summaryrefslogtreecommitdiff
diff options
context:
space:
mode:
-rw-r--r--.gitignore3
-rw-r--r--.gitlab-ci.yml37
-rw-r--r--CHANGELOG.rst38
-rw-r--r--client/pkg/requirements-latest.pip2
-rw-r--r--client/pkg/requirements-leap.pip2
-rw-r--r--client/pkg/requirements-testing.pip1
-rw-r--r--client/setup.py14
-rw-r--r--client/src/leap/soledad/client/adbapi.py19
-rw-r--r--client/src/leap/soledad/client/api.py12
-rw-r--r--client/src/leap/soledad/client/crypto.py8
-rw-r--r--client/src/leap/soledad/client/encdecpool.py28
-rw-r--r--client/src/leap/soledad/client/http_target/__init__.py4
-rw-r--r--client/src/leap/soledad/client/http_target/api.py1
-rw-r--r--client/src/leap/soledad/client/http_target/fetch.py4
-rw-r--r--client/src/leap/soledad/client/http_target/send.py5
-rw-r--r--client/src/leap/soledad/client/pragmas.py12
-rw-r--r--client/src/leap/soledad/client/secrets.py28
-rw-r--r--client/src/leap/soledad/client/sqlcipher.py19
-rw-r--r--client/src/leap/soledad/client/sync.py44
-rw-r--r--common/pkg/requirements-latest.pip2
-rw-r--r--common/pkg/requirements-testing.pip14
-rw-r--r--common/setup.py141
-rw-r--r--common/src/leap/soledad/common/.gitignore1
-rw-r--r--common/src/leap/soledad/common/README.txt9
-rw-r--r--common/src/leap/soledad/common/couch/__init__.py356
-rw-r--r--common/src/leap/soledad/common/couch/errors.py144
-rw-r--r--common/src/leap/soledad/common/couch/state.py46
-rw-r--r--common/src/leap/soledad/common/ddocs/README.txt34
-rw-r--r--common/src/leap/soledad/common/ddocs/docs/views/get/map.js20
-rw-r--r--common/src/leap/soledad/common/ddocs/syncs/updates/state.js105
-rw-r--r--common/src/leap/soledad/common/ddocs/syncs/views/changes_to_return/map.js20
-rw-r--r--common/src/leap/soledad/common/ddocs/syncs/views/seen_ids/map.js11
-rw-r--r--common/src/leap/soledad/common/ddocs/syncs/views/state/map.js17
-rw-r--r--common/src/leap/soledad/common/ddocs/transactions/lists/generation.js20
-rw-r--r--common/src/leap/soledad/common/ddocs/transactions/lists/trans_id_for_gen.js19
-rw-r--r--common/src/leap/soledad/common/ddocs/transactions/lists/whats_changed.js22
-rw-r--r--common/src/leap/soledad/common/ddocs/transactions/views/log/map.js7
-rw-r--r--common/src/leap/soledad/common/errors.py13
-rw-r--r--common/src/leap/soledad/common/l2db/__init__.py5
-rw-r--r--common/src/leap/soledad/common/l2db/backends/__init__.py5
-rw-r--r--common/src/leap/soledad/common/l2db/backends/inmemory.py5
-rw-r--r--common/src/leap/soledad/common/l2db/backends/sqlite_backend.py9
-rw-r--r--common/src/leap/soledad/common/l2db/remote/basic_auth_middleware.py6
-rw-r--r--common/src/leap/soledad/common/l2db/remote/http_app.py5
-rw-r--r--common/src/leap/soledad/common/l2db/remote/http_client.py5
-rw-r--r--common/src/leap/soledad/common/l2db/remote/http_database.py5
-rw-r--r--common/src/leap/soledad/common/l2db/remote/http_target.py5
-rw-r--r--common/src/leap/soledad/common/l2db/remote/server_state.py2
-rw-r--r--common/src/leap/soledad/common/l2db/sync.py4
-rw-r--r--common/src/leap/soledad/common/log.py45
-rw-r--r--scripts/ddocs/update_design_docs.py170
-rw-r--r--scripts/docker/Dockerfile53
-rw-r--r--scripts/docker/Makefile35
-rw-r--r--scripts/docker/README.md15
-rw-r--r--scripts/docker/TODO4
-rw-r--r--scripts/docker/couchdb/Dockerfile3
-rw-r--r--scripts/docker/couchdb/Makefile4
-rw-r--r--scripts/docker/couchdb/README.rst12
-rw-r--r--scripts/docker/couchdb/local.ini2
-rwxr-xr-xscripts/docker/files/bin/run-perf.sh22
-rwxr-xr-xscripts/docker/files/bin/run-tox.sh17
-rwxr-xr-xscripts/docker/files/bin/setup-test-env.py16
-rw-r--r--scripts/migration/0.9.0/.gitignore1
-rw-r--r--scripts/migration/0.9.0/README.md73
-rw-r--r--scripts/migration/0.9.0/log/.empty0
-rwxr-xr-xscripts/migration/0.9.0/migrate.py117
-rw-r--r--scripts/migration/0.9.0/migrate_couch_schema/__init__.py192
-rw-r--r--scripts/migration/0.9.0/requirements.pip3
-rw-r--r--scripts/migration/0.9.0/setup.py8
-rw-r--r--scripts/migration/0.9.0/tests/conftest.py54
-rw-r--r--scripts/migration/0.9.0/tests/test_migrate.py67
-rw-r--r--scripts/migration/0.9.0/tox.ini13
-rw-r--r--scripts/packaging/compile_design_docs.py112
-rw-r--r--scripts/profiling/mail/couchdb_server.py5
-rwxr-xr-xserver/pkg/create-user-db2
-rw-r--r--server/pkg/requirements-latest.pip2
-rw-r--r--server/pkg/requirements-leap.pip2
-rw-r--r--server/pkg/requirements.pip5
-rw-r--r--server/pkg/soledad-server4
-rw-r--r--server/setup.py14
-rw-r--r--server/src/leap/soledad/server/__init__.py93
-rw-r--r--server/src/leap/soledad/server/auth.py7
-rw-r--r--server/src/leap/soledad/server/config.py67
-rw-r--r--testing/pytest.ini3
-rw-r--r--testing/setup.py2
-rw-r--r--testing/test_soledad/u1db_tests/test_open.py15
-rw-r--r--testing/test_soledad/util.py20
-rw-r--r--testing/tests/client/test_app.py8
-rw-r--r--testing/tests/client/test_doc.py4
-rw-r--r--testing/tests/client/test_https.py4
-rw-r--r--testing/tests/conftest.py18
-rw-r--r--testing/tests/couch/common.py16
-rw-r--r--testing/tests/couch/conftest.py31
-rw-r--r--testing/tests/couch/couchdb.ini.template22
-rw-r--r--testing/tests/couch/test_atomicity.py7
-rw-r--r--testing/tests/couch/test_backend.py8
-rw-r--r--testing/tests/couch/test_command.py10
-rw-r--r--testing/tests/couch/test_ddocs.py157
-rw-r--r--testing/tests/couch/test_state.py25
-rw-r--r--testing/tests/perf/assets/cert_default.conf15
-rw-r--r--testing/tests/perf/conftest.py249
-rw-r--r--testing/tests/perf/pytest.ini2
-rw-r--r--testing/tests/perf/test_crypto.py81
-rw-r--r--testing/tests/perf/test_encdecpool.py78
-rw-r--r--testing/tests/perf/test_misc.py6
-rw-r--r--testing/tests/perf/test_sqlcipher.py38
-rw-r--r--testing/tests/perf/test_sync.py68
-rw-r--r--testing/tests/server/test_server.py14
-rw-r--r--testing/tests/sqlcipher/test_backend.py58
-rw-r--r--testing/tests/sync/test_encdecpool.py19
-rw-r--r--testing/tests/sync/test_sync.py5
-rw-r--r--testing/tests/sync/test_sync_mutex.py7
-rw-r--r--testing/tox.ini37
113 files changed, 1964 insertions, 1640 deletions
diff --git a/.gitignore b/.gitignore
index 6c3e413e..1f278cbf 100644
--- a/.gitignore
+++ b/.gitignore
@@ -18,3 +18,6 @@ MANIFEST
_trial_temp
.DS_Store
scripts/profiling/sync/profiles
+
+testing/htmlcov
+testing/.coverage
diff --git a/.gitlab-ci.yml b/.gitlab-ci.yml
index 647cc43c..dd4e4605 100644
--- a/.gitlab-ci.yml
+++ b/.gitlab-ci.yml
@@ -1,3 +1,36 @@
-trial:
+stages:
+ - code-check
+ - tests
+ - benchmark
+
+# Cache tox envs between builds
+cache:
+ paths:
+ - testing/.tox/
+
+code-check:
+ stage: code-check
script:
- - cd testing; tox
+ - cd testing
+ - tox -e code-check
+
+tests:
+ stage: tests
+ image: leapcode/soledad:latest
+ services:
+ - couchdb
+ script:
+ - cd testing
+ - tox -- --couch-url http://couchdb:5984
+
+benchmark:
+ stage: benchmark
+ image: leapcode/soledad:latest
+ services:
+ - couchdb
+ script:
+ - cd testing
+ - tox -e perf -- --couch-url http://couchdb:5984
+ tags:
+ - docker
+ - benchmark
diff --git a/CHANGELOG.rst b/CHANGELOG.rst
index 24c20641..ded2cac9 100644
--- a/CHANGELOG.rst
+++ b/CHANGELOG.rst
@@ -1,3 +1,41 @@
+0.9.0 - 11 November, 2016
++++++++++++++++++++++++++
+
+Main features
+~~~~~~~~~~~~~
+
+- Server-side changes in couch backend schema.
+- Use of tox and pytest to run tests.
+- Performance tests.
+
+Server
+======
+
+*** Attention: Migration needed! ***
+
+This version of soledad uses a different database schema in the server couch
+backend. The difference from the old schema is that the use of design documents
+for storing and accessing soledad db metadata was removed because incurred in
+too much memory and time overhead for passing data to the javascript
+interpreter.
+
+Because of that, you need to run a migration script on your database. Check the
+`scripts/migration/0.9.0/` diretctory for instructions on how to run the
+migration script on your database. Don't forget to backup before running the
+script!
+
+Bugfixes
+~~~~~~~~
+- Fix order of multipart serialization when writing to couch.
+
+Features
+~~~~~~~~
+- Log to syslog.
+- Remove usage of design documents in couch backend.
+- Use _local couch docs for metadata storage.
+- Other small improvements in couch backend.
+
+
0.8.1 - 14 July, 2016
+++++++++++++++++++++
diff --git a/client/pkg/requirements-latest.pip b/client/pkg/requirements-latest.pip
index 46a7ccba..d32e1ffa 100644
--- a/client/pkg/requirements-latest.pip
+++ b/client/pkg/requirements-latest.pip
@@ -1,5 +1,5 @@
--index-url https://pypi.python.org/simple/
--e 'git+https://github.com/pixelated-project/leap_pycommon.git@develop#egg=leap.common'
+-e 'git+https://github.com/leapcode/leap_pycommon.git@develop#egg=leap.common'
-e '../common'
-e .
diff --git a/client/pkg/requirements-leap.pip b/client/pkg/requirements-leap.pip
index 52d1263b..920d4123 100644
--- a/client/pkg/requirements-leap.pip
+++ b/client/pkg/requirements-leap.pip
@@ -1,2 +1,2 @@
leap.common>=0.4.3
-leap.soledad.common>=0.7.0
+leap.soledad.common>=0.9.0
diff --git a/client/pkg/requirements-testing.pip b/client/pkg/requirements-testing.pip
deleted file mode 100644
index 94ab6e8e..00000000
--- a/client/pkg/requirements-testing.pip
+++ /dev/null
@@ -1 +0,0 @@
-pep8
diff --git a/client/setup.py b/client/setup.py
index 90986dde..235e731c 100644
--- a/client/setup.py
+++ b/client/setup.py
@@ -114,13 +114,13 @@ requirements = utils.parse_requirements()
if utils.is_develop_mode():
print
- print ("[WARNING] Skipping leap-specific dependencies "
- "because development mode is detected.")
- print ("[WARNING] You can install "
- "the latest published versions with "
- "'pip install -r pkg/requirements-leap.pip'")
- print ("[WARNING] Or you can instead do 'python setup.py develop' "
- "from the parent folder of each one of them.")
+ print("[WARNING] Skipping leap-specific dependencies "
+ "because development mode is detected.")
+ print("[WARNING] You can install "
+ "the latest published versions with "
+ "'pip install -r pkg/requirements-leap.pip'")
+ print("[WARNING] Or you can instead do 'python setup.py develop' "
+ "from the parent folder of each one of them.")
print
else:
requirements += utils.parse_requirements(
diff --git a/client/src/leap/soledad/client/adbapi.py b/client/src/leap/soledad/client/adbapi.py
index ef0f9066..ce9bec05 100644
--- a/client/src/leap/soledad/client/adbapi.py
+++ b/client/src/leap/soledad/client/adbapi.py
@@ -19,31 +19,25 @@ An asyncrhonous interface to soledad using sqlcipher backend.
It uses twisted.enterprise.adbapi.
"""
import re
-import os
import sys
-import logging
from functools import partial
from twisted.enterprise import adbapi
from twisted.internet.defer import DeferredSemaphore
-from twisted.python import log
from zope.proxy import ProxyBase, setProxiedObject
from pysqlcipher import dbapi2
+from leap.soledad.common.log import getLogger
from leap.soledad.common.errors import DatabaseAccessError
from leap.soledad.client import sqlcipher as soledad_sqlcipher
from leap.soledad.client.pragmas import set_init_pragmas
-logger = logging.getLogger(name=__name__)
+logger = getLogger(__name__)
-DEBUG_SQL = os.environ.get("LEAP_DEBUG_SQL")
-if DEBUG_SQL:
- log.startLogging(sys.stdout)
-
"""
How long the SQLCipher connection should wait for the lock to go away until
raising an exception.
@@ -221,13 +215,12 @@ class U1DBConnectionPool(adbapi.ConnectionPool):
def _errback(failure):
failure.trap(dbapi2.OperationalError)
if failure.getErrorMessage() == "database is locked":
- logger.warning("Database operation timed out.")
+ logger.warn("database operation timed out")
should_retry = semaphore.acquire()
if should_retry:
- logger.warning(
- "Database operation timed out while waiting for "
- "lock, trying again...")
+ logger.warn("trying again...")
return _run_interaction()
+ logger.warn("giving up!")
return failure
d = _run_interaction()
@@ -286,7 +279,7 @@ class U1DBConnectionPool(adbapi.ConnectionPool):
try:
conn.rollback()
except:
- log.err(None, "Rollback failed")
+ logger.error(None, "Rollback failed")
raise excType, excValue, excTraceback
def finalClose(self):
diff --git a/client/src/leap/soledad/client/api.py b/client/src/leap/soledad/client/api.py
index 1bfbed8a..6870d5ba 100644
--- a/client/src/leap/soledad/client/api.py
+++ b/client/src/leap/soledad/client/api.py
@@ -28,7 +28,6 @@ remote storage in the server side.
import binascii
import errno
import httplib
-import logging
import os
import socket
import ssl
@@ -49,6 +48,7 @@ from leap.common.plugins import collect_plugins
from leap.soledad.common import SHARED_DB_NAME
from leap.soledad.common import soledad_assert
from leap.soledad.common import soledad_assert_type
+from leap.soledad.common.log import getLogger
from leap.soledad.common.l2db.remote import http_client
from leap.soledad.common.l2db.remote.ssl_match_hostname import match_hostname
from leap.soledad.common.errors import DatabaseAccessError
@@ -62,7 +62,7 @@ from leap.soledad.client.shared_db import SoledadSharedDatabase
from leap.soledad.client import sqlcipher
from leap.soledad.client import encdecpool
-logger = logging.getLogger(name=__name__)
+logger = getLogger(__name__)
# we may want to collect statistics from the sync process
@@ -337,7 +337,7 @@ class Soledad(object):
"""
Close underlying U1DB database.
"""
- logger.debug("Closing soledad")
+ logger.debug("closing soledad")
self._dbpool.close()
if getattr(self, '_dbsyncer', None):
self._dbsyncer.close()
@@ -736,6 +736,8 @@ class Soledad(object):
:rtype: twisted.internet.defer.Deferred
"""
sync_url = urlparse.urljoin(self._server_url, 'user-%s' % self.uuid)
+ if not self._dbsyncer:
+ return
d = self._dbsyncer.sync(
sync_url,
creds=self._creds,
@@ -761,7 +763,7 @@ class Soledad(object):
def _sync_errback(failure):
s = StringIO()
failure.printDetailedTraceback(file=s)
- msg = "Soledad exception when syncing!\n" + s.getvalue()
+ msg = "got exception when syncing!\n" + s.getvalue()
logger.error(msg)
return failure
@@ -1003,7 +1005,7 @@ class Soledad(object):
def create_path_if_not_exists(path):
try:
if not os.path.isdir(path):
- logger.info('Creating directory: %s.' % path)
+ logger.info('creating directory: %s.' % path)
os.makedirs(path)
except OSError as exc:
if exc.errno == errno.EEXIST and os.path.isdir(path):
diff --git a/client/src/leap/soledad/client/crypto.py b/client/src/leap/soledad/client/crypto.py
index f7d92372..d81c883b 100644
--- a/client/src/leap/soledad/client/crypto.py
+++ b/client/src/leap/soledad/client/crypto.py
@@ -22,7 +22,6 @@ import binascii
import hmac
import hashlib
import json
-import logging
from cryptography.hazmat.primitives.ciphers import Cipher, algorithms, modes
from cryptography.hazmat.backends.multibackend import MultiBackend
@@ -32,9 +31,10 @@ from cryptography.hazmat.backends.openssl.backend \
from leap.soledad.common import soledad_assert
from leap.soledad.common import soledad_assert_type
from leap.soledad.common import crypto
+from leap.soledad.common.log import getLogger
-logger = logging.getLogger(__name__)
+logger = getLogger(__name__)
MAC_KEY_LENGTH = 64
@@ -300,7 +300,7 @@ def encrypt_docstr(docstr, doc_id, doc_rev, key, secret):
# convert binary data to hexadecimal representation so the JSON
# serialization does not complain about what it tries to serialize.
hex_ciphertext = binascii.b2a_hex(ciphertext)
- logger.debug("Encrypting doc: %s" % doc_id)
+ logger.debug("encrypting doc: %s" % doc_id)
return json.dumps({
crypto.ENC_JSON_KEY: hex_ciphertext,
crypto.ENC_SCHEME_KEY: enc_scheme,
@@ -356,7 +356,7 @@ def _verify_doc_mac(doc_id, doc_rev, ciphertext, enc_scheme, enc_method,
calculated_mac_hash = hashlib.sha256(calculated_mac).digest()
if doc_mac_hash != calculated_mac_hash:
- logger.warning("Wrong MAC while decrypting doc...")
+ logger.warn("wrong MAC while decrypting doc...")
raise crypto.WrongMacError("Could not authenticate document's "
"contents.")
diff --git a/client/src/leap/soledad/client/encdecpool.py b/client/src/leap/soledad/client/encdecpool.py
index a6d49b21..056b012f 100644
--- a/client/src/leap/soledad/client/encdecpool.py
+++ b/client/src/leap/soledad/client/encdecpool.py
@@ -23,22 +23,21 @@ during synchronization.
import json
-import logging
from uuid import uuid4
from twisted.internet.task import LoopingCall
from twisted.internet import threads
from twisted.internet import defer
-from twisted.python import log
from leap.soledad.common.document import SoledadDocument
from leap.soledad.common import soledad_assert
+from leap.soledad.common.log import getLogger
from leap.soledad.client.crypto import encrypt_docstr
from leap.soledad.client.crypto import decrypt_doc_dict
-logger = logging.getLogger(__name__)
+logger = getLogger(__name__)
#
@@ -155,7 +154,7 @@ class SyncEncrypterPool(SyncEncryptDecryptPool):
Start the encrypter pool.
"""
SyncEncryptDecryptPool.start(self)
- logger.debug("Starting the encryption loop...")
+ logger.debug("starting the encryption loop...")
def stop(self):
"""
@@ -230,10 +229,10 @@ class SyncEncrypterPool(SyncEncryptDecryptPool):
% self.TABLE_NAME
result = yield self._runQuery(query, (doc_id, doc_rev))
if result:
- logger.debug("Found doc on sync db: %s" % doc_id)
+ logger.debug("found doc on sync db: %s" % doc_id)
val = result.pop()
defer.returnValue(val[0])
- logger.debug("Did not find doc on sync db: %s" % doc_id)
+ logger.debug("did not find doc on sync db: %s" % doc_id)
defer.returnValue(None)
def delete_encrypted_doc(self, doc_id, doc_rev):
@@ -344,6 +343,9 @@ class SyncDecrypterPool(SyncEncryptDecryptPool):
self._loop = LoopingCall(self._decrypt_and_recurse)
+ def _start_pool(self, period):
+ self._loop.start(period)
+
def start(self, docs_to_process):
"""
Set the number of documents we expect to process.
@@ -360,7 +362,7 @@ class SyncDecrypterPool(SyncEncryptDecryptPool):
self._docs_to_process = docs_to_process
self._deferred = defer.Deferred()
d = self._init_db()
- d.addCallback(lambda _: self._loop.start(self.DECRYPT_LOOP_PERIOD))
+ d.addCallback(lambda _: self._start_pool(self.DECRYPT_LOOP_PERIOD))
return d
def stop(self):
@@ -390,7 +392,7 @@ class SyncDecrypterPool(SyncEncryptDecryptPool):
return d
def _errback(self, failure):
- log.err(failure)
+ logger.error(failure)
self._deferred.errback(failure)
self._processed_docs = 0
self._last_inserted_idx = 0
@@ -503,7 +505,7 @@ class SyncDecrypterPool(SyncEncryptDecryptPool):
:rtype: twisted.internet.defer.Deferred
"""
doc_id, rev, content, gen, trans_id, idx = result
- logger.debug("Sync decrypter pool: decrypted doc %s: %s %s %s"
+ logger.debug("sync decrypter pool: decrypted doc %s: %s %s %s"
% (doc_id, rev, gen, trans_id))
return self.insert_received_doc(
doc_id, rev, content, gen, trans_id, idx)
@@ -553,6 +555,12 @@ class SyncDecrypterPool(SyncEncryptDecryptPool):
while next_index in self._decrypted_docs_indexes:
sequence.append(str(next_index))
next_index += 1
+ if len(sequence) > 900:
+ # 999 is the default value of SQLITE_MAX_VARIABLE_NUMBER
+ # if we try to query more, SQLite will refuse
+ # we need to find a way to improve this
+ # being researched in #7669
+ break
# Then fetch all the ones ready for insertion.
if sequence:
insertable_docs = yield self._get_docs(encrypted=False,
@@ -602,7 +610,7 @@ class SyncDecrypterPool(SyncEncryptDecryptPool):
:type trans_id: str
"""
# could pass source_replica in params for callback chain
- logger.debug("Sync decrypter pool: inserting doc in local db: "
+ logger.debug("sync decrypter pool: inserting doc in local db: "
"%s:%s %s" % (doc_id, doc_rev, gen))
# convert deleted documents to avoid error on document creation
diff --git a/client/src/leap/soledad/client/http_target/__init__.py b/client/src/leap/soledad/client/http_target/__init__.py
index b7e54aa4..62e8bcf0 100644
--- a/client/src/leap/soledad/client/http_target/__init__.py
+++ b/client/src/leap/soledad/client/http_target/__init__.py
@@ -23,15 +23,15 @@ after receiving.
import os
-import logging
+from leap.soledad.common.log import getLogger
from leap.common.http import HTTPClient
from leap.soledad.client.http_target.send import HTTPDocSender
from leap.soledad.client.http_target.api import SyncTargetAPI
from leap.soledad.client.http_target.fetch import HTTPDocFetcher
-logger = logging.getLogger(__name__)
+logger = getLogger(__name__)
# we may want to collect statistics from the sync process
diff --git a/client/src/leap/soledad/client/http_target/api.py b/client/src/leap/soledad/client/http_target/api.py
index f8de9a15..3c8e3764 100644
--- a/client/src/leap/soledad/client/http_target/api.py
+++ b/client/src/leap/soledad/client/http_target/api.py
@@ -15,7 +15,6 @@
# You should have received a copy of the GNU General Public License
# along with this program. If not, see <http://www.gnu.org/licenses/>.
import os
-import time
import json
import base64
diff --git a/client/src/leap/soledad/client/http_target/fetch.py b/client/src/leap/soledad/client/http_target/fetch.py
index a3f70b02..184c5883 100644
--- a/client/src/leap/soledad/client/http_target/fetch.py
+++ b/client/src/leap/soledad/client/http_target/fetch.py
@@ -14,7 +14,6 @@
#
# You should have received a copy of the GNU General Public License
# along with this program. If not, see <http://www.gnu.org/licenses/>.
-import logging
import json
from twisted.internet import defer
@@ -24,11 +23,12 @@ from leap.soledad.client.events import emit_async
from leap.soledad.client.crypto import is_symmetrically_encrypted
from leap.soledad.client.encdecpool import SyncDecrypterPool
from leap.soledad.client.http_target.support import RequestBody
+from leap.soledad.common.log import getLogger
from leap.soledad.common.document import SoledadDocument
from leap.soledad.common.l2db import errors
from leap.soledad.common.l2db.remote import utils
-logger = logging.getLogger(__name__)
+logger = getLogger(__name__)
class HTTPDocFetcher(object):
diff --git a/client/src/leap/soledad/client/http_target/send.py b/client/src/leap/soledad/client/http_target/send.py
index 13218acf..c7bd057e 100644
--- a/client/src/leap/soledad/client/http_target/send.py
+++ b/client/src/leap/soledad/client/http_target/send.py
@@ -15,15 +15,15 @@
# You should have received a copy of the GNU General Public License
# along with this program. If not, see <http://www.gnu.org/licenses/>.
import json
-import logging
from twisted.internet import defer
+from leap.soledad.common.log import getLogger
from leap.soledad.client.events import emit_async
from leap.soledad.client.events import SOLEDAD_SYNC_SEND_STATUS
from leap.soledad.client.http_target.support import RequestBody
-logger = logging.getLogger(__name__)
+logger = getLogger(__name__)
class HTTPDocSender(object):
@@ -82,7 +82,6 @@ class HTTPDocSender(object):
if self._defer_encryption:
self._delete_sent(sent)
- user_data = {'uuid': self.uuid, 'userid': self.userid}
_emit_send_status(self.uuid, body.consumed, total)
defer.returnValue(result)
diff --git a/client/src/leap/soledad/client/pragmas.py b/client/src/leap/soledad/client/pragmas.py
index 55397d10..870ed63e 100644
--- a/client/src/leap/soledad/client/pragmas.py
+++ b/client/src/leap/soledad/client/pragmas.py
@@ -17,15 +17,15 @@
"""
Different pragmas used in the initialization of the SQLCipher database.
"""
-import logging
import string
import threading
import os
from leap.soledad.common import soledad_assert
+from leap.soledad.common.log import getLogger
-logger = logging.getLogger(__name__)
+logger = getLogger(__name__)
_db_init_lock = threading.Lock()
@@ -321,7 +321,7 @@ def set_synchronous_off(db_handle):
"""
Change the setting of the "synchronous" flag to OFF.
"""
- logger.debug("SQLCIPHER: SETTING SYNCHRONOUS OFF")
+ logger.debug("sqlcipher: setting synchronous off")
db_handle.cursor().execute('PRAGMA synchronous=OFF')
@@ -329,7 +329,7 @@ def set_synchronous_normal(db_handle):
"""
Change the setting of the "synchronous" flag to NORMAL.
"""
- logger.debug("SQLCIPHER: SETTING SYNCHRONOUS NORMAL")
+ logger.debug("sqlcipher: setting synchronous normal")
db_handle.cursor().execute('PRAGMA synchronous=NORMAL')
@@ -337,7 +337,7 @@ def set_mem_temp_store(db_handle):
"""
Use a in-memory store for temporary tables.
"""
- logger.debug("SQLCIPHER: SETTING TEMP_STORE MEMORY")
+ logger.debug("sqlcipher: setting temp_store memory")
db_handle.cursor().execute('PRAGMA temp_store=MEMORY')
@@ -362,7 +362,7 @@ def set_write_ahead_logging(db_handle):
requirements of the application. The default strategy is to run a
checkpoint once the WAL reaches 1000 pages"
"""
- logger.debug("SQLCIPHER: SETTING WRITE-AHEAD LOGGING")
+ logger.debug("sqlcipher: setting write-ahead logging")
db_handle.cursor().execute('PRAGMA journal_mode=WAL')
# The optimum value can still use a little bit of tuning, but we favor
diff --git a/client/src/leap/soledad/client/secrets.py b/client/src/leap/soledad/client/secrets.py
index 3547a711..1eb6f31d 100644
--- a/client/src/leap/soledad/client/secrets.py
+++ b/client/src/leap/soledad/client/secrets.py
@@ -23,7 +23,6 @@ Soledad secrets handling.
import os
import scrypt
-import logging
import binascii
import errno
import json
@@ -33,11 +32,12 @@ from hashlib import sha256
from leap.soledad.common import soledad_assert
from leap.soledad.common import soledad_assert_type
from leap.soledad.common import document
+from leap.soledad.common.log import getLogger
from leap.soledad.client import events
from leap.soledad.client.crypto import encrypt_sym, decrypt_sym
-logger = logging.getLogger(name=__name__)
+logger = getLogger(__name__)
#
@@ -193,42 +193,42 @@ class SoledadSecrets(object):
"""
# STAGE 1 - verify if secrets exist locally
try:
- logger.info("Trying to load secrets from local storage...")
+ logger.info("trying to load secrets from local storage...")
version = self._load_secrets_from_local_file()
# eventually migrate local and remote stored documents from old
# format version
if version < self.RECOVERY_DOC_VERSION:
self._store_secrets()
self._upload_crypto_secrets()
- logger.info("Found secrets in local storage.")
+ logger.info("found secrets in local storage")
return
except NoStorageSecret:
- logger.info("Could not find secrets in local storage.")
+ logger.info("could not find secrets in local storage")
# STAGE 2 - there are no secrets in local storage and this is the
# first time we are running soledad with the specified
# secrets_path. Try to fetch encrypted secrets from
# server.
try:
- logger.info('Trying to fetch secrets from remote storage...')
+ logger.info('trying to fetch secrets from remote storage...')
version = self._download_crypto_secrets()
self._store_secrets()
# eventually migrate remote stored document from old format
# version
if version < self.RECOVERY_DOC_VERSION:
self._upload_crypto_secrets()
- logger.info('Found secrets in remote storage.')
+ logger.info('found secrets in remote storage.')
return
except NoStorageSecret:
- logger.info("Could not find secrets in remote storage.")
+ logger.info("could not find secrets in remote storage.")
# STAGE 3 - there are no secrets in server also, so we want to
# generate the secrets and store them in the remote
# db.
- logger.info("Generating secrets...")
+ logger.info("generating secrets...")
self._gen_crypto_secrets()
- logger.info("Uploading secrets...")
+ logger.info("uploading secrets...")
self._upload_crypto_secrets()
def _has_secret(self):
@@ -298,7 +298,7 @@ class SoledadSecrets(object):
"""
Generate the crypto secrets.
"""
- logger.info('No cryptographic secrets found, creating new secrets...')
+ logger.info('no cryptographic secrets found, creating new secrets...')
secret_id = self._gen_secret()
self.set_secret_id(secret_id)
@@ -445,7 +445,7 @@ class SoledadSecrets(object):
encrypted_secret)
secret_count += 1
except SecretsException as e:
- logger.error("Failed to decrypt storage secret: %s"
+ logger.error("failed to decrypt storage secret: %s"
% str(e))
return secret_count, active_secret
@@ -461,7 +461,7 @@ class SoledadSecrets(object):
events.emit_async(events.SOLEDAD_DOWNLOADING_KEYS, user_data)
db = self._shared_db
if not db:
- logger.warning('No shared db found')
+ logger.warn('no shared db found')
return
doc = db.get_doc(self._shared_db_doc_id())
user_data = {'userid': self._userid, 'uuid': self._uuid}
@@ -492,7 +492,7 @@ class SoledadSecrets(object):
events.emit_async(events.SOLEDAD_UPLOADING_KEYS, user_data)
db = self._shared_db
if not db:
- logger.warning('No shared db found')
+ logger.warn('no shared db found')
return
db.put_doc(doc)
events.emit_async(events.SOLEDAD_DONE_UPLOADING_KEYS, user_data)
diff --git a/client/src/leap/soledad/client/sqlcipher.py b/client/src/leap/soledad/client/sqlcipher.py
index 166c0783..3921c323 100644
--- a/client/src/leap/soledad/client/sqlcipher.py
+++ b/client/src/leap/soledad/client/sqlcipher.py
@@ -41,7 +41,6 @@ So, as the statements above were introduced for backwards compatibility with
SQLCipher 1.1 databases, we do not implement them as all SQLCipher databases
handled by Soledad should be created by SQLCipher >= 2.0.
"""
-import logging
import os
import json
@@ -55,8 +54,9 @@ from twisted.internet import defer
from twisted.enterprise import adbapi
from leap.soledad.common.document import SoledadDocument
-from leap.soledad.common import l2db
+from leap.soledad.common.log import getLogger
from leap.soledad.common.l2db import errors as u1db_errors
+from leap.soledad.common.l2db import Document
from leap.soledad.common.l2db.backends import sqlite_backend
from leap.soledad.common.errors import DatabaseAccessError
@@ -65,7 +65,7 @@ from leap.soledad.client.sync import SoledadSynchronizer
from leap.soledad.client import pragmas
-logger = logging.getLogger(__name__)
+logger = getLogger(__name__)
# Monkey-patch u1db.backends.sqlite_backend with pysqlcipher.dbapi2
@@ -448,7 +448,6 @@ class SQLCipherU1DBSync(SQLCipherDatabase):
self.received_docs = []
self.running = False
- self.shutdownID = None
self._db_handle = None
# initialize the main db before scheduling a start
@@ -465,8 +464,6 @@ class SQLCipherU1DBSync(SQLCipherDatabase):
def _start(self):
if not self.running:
- self.shutdownID = self._reactor.addSystemEventTrigger(
- 'during', 'shutdown', self.finalClose)
self.running = True
def _initialize_main_db(self):
@@ -561,13 +558,6 @@ class SQLCipherU1DBSync(SQLCipherDatabase):
# XXX this SHOULD BE a callback
return self._get_generation()
- def finalClose(self):
- """
- This should only be called by the shutdown trigger.
- """
- self.shutdownID = None
- self.running = False
-
def close(self):
"""
Close the syncer and syncdb orderly
@@ -578,6 +568,7 @@ class SQLCipherU1DBSync(SQLCipherDatabase):
_, syncer = self._syncers[url]
syncer.close()
del self._syncers[url]
+ self.running = False
class U1DBSQLiteBackend(sqlite_backend.SQLitePartialExpandDatabase):
@@ -595,7 +586,7 @@ class U1DBSQLiteBackend(sqlite_backend.SQLitePartialExpandDatabase):
self._db_handle = conn
self._real_replica_uid = None
self._ensure_schema()
- self._factory = l2db.Document
+ self._factory = Document
class SoledadSQLCipherWrapper(SQLCipherDatabase):
diff --git a/client/src/leap/soledad/client/sync.py b/client/src/leap/soledad/client/sync.py
index 2656a150..7ed5f693 100644
--- a/client/src/leap/soledad/client/sync.py
+++ b/client/src/leap/soledad/client/sync.py
@@ -18,17 +18,16 @@
Soledad synchronization utilities.
"""
import os
-import time
-import logging
from twisted.internet import defer
+from leap.soledad.common.log import getLogger
from leap.soledad.common.l2db import errors
from leap.soledad.common.l2db.sync import Synchronizer
from leap.soledad.common.errors import BackendNotReadyError
-logger = logging.getLogger(__name__)
+logger = getLogger(__name__)
# we may want to collect statistics from the sync process
@@ -97,21 +96,17 @@ class SoledadSynchronizer(Synchronizer):
sync_target.get_sync_info(self.source._replica_uid)
except (errors.DatabaseDoesNotExist, BackendNotReadyError) as e:
logger.debug("Database isn't ready on server. Will be created.")
- logger.debug("Reason: %s", e.__class__)
+ logger.debug("Reason: %s" % e.__class__)
self.target_replica_uid = None
target_gen, target_trans_id = 0, ''
target_my_gen, target_my_trans_id = 0, ''
- logger.debug(
- "Soledad target sync info:\n"
- " target replica uid: %s\n"
- " target generation: %d\n"
- " target trans id: %s\n"
- " target my gen: %d\n"
- " target my trans_id: %s\n"
- " source replica_uid: %s\n"
- % (self.target_replica_uid, target_gen, target_trans_id,
- target_my_gen, target_my_trans_id, self.source._replica_uid))
+ logger.debug("target replica uid: %s" % self.target_replica_uid)
+ logger.debug("target generation: %d" % target_gen)
+ logger.debug("target trans id: %s" % target_trans_id)
+ logger.debug("target my gen: %d" % target_my_gen)
+ logger.debug("target my trans_id: %s" % target_my_trans_id)
+ logger.debug("source replica_uid: %s" % self.source._replica_uid)
# make sure we'll have access to target replica uid once it exists
if self.target_replica_uid is None:
@@ -134,8 +129,7 @@ class SoledadSynchronizer(Synchronizer):
# what's changed since that generation and this current gen
my_gen, _, changes = self.source.whats_changed(target_my_gen)
- logger.debug("Soledad sync: there are %d documents to send."
- % len(changes))
+ logger.debug("there are %d documents to send" % len(changes))
# get source last-seen database generation for the target
if self.target_replica_uid is None:
@@ -144,11 +138,10 @@ class SoledadSynchronizer(Synchronizer):
target_last_known_gen, target_last_known_trans_id = \
self.source._get_replica_gen_and_trans_id(
self.target_replica_uid)
- logger.debug(
- "Soledad source sync info:\n"
- " last target gen known to source: %d\n"
- " last target trans_id known to source: %s"
- % (target_last_known_gen, target_last_known_trans_id))
+ logger.debug(
+ "last known target gen: %d" % target_last_known_gen)
+ logger.debug(
+ "last known target trans_id: %s" % target_last_known_trans_id)
# validate transaction ids
if not changes and target_last_known_gen == target_gen:
@@ -181,11 +174,8 @@ class SoledadSynchronizer(Synchronizer):
target_last_known_gen, target_last_known_trans_id,
self._insert_doc_from_target, ensure_callback=ensure_callback,
defer_decryption=defer_decryption)
- logger.debug(
- "Soledad source sync info after sync exchange:\n"
- " source known target gen: %d\n"
- " source known target trans_id: %s"
- % (new_gen, new_trans_id))
+ logger.debug("target gen after sync: %d" % new_gen)
+ logger.debug("target trans_id after sync: %s" % new_trans_id)
info = {
"target_replica_uid": self.target_replica_uid,
"new_gen": new_gen,
@@ -224,7 +214,7 @@ class SoledadSynchronizer(Synchronizer):
:return: A deferred which will fire when the sync has been completed.
:rtype: twisted.internet.defer.Deferred
"""
- logger.debug("Completing deferred last step in SYNC...")
+ logger.debug("completing deferred last step in sync...")
# record target synced-up-to generation including applying what we
# sent
diff --git a/common/pkg/requirements-latest.pip b/common/pkg/requirements-latest.pip
index 396d77f1..852f2433 100644
--- a/common/pkg/requirements-latest.pip
+++ b/common/pkg/requirements-latest.pip
@@ -1,4 +1,4 @@
--index-url https://pypi.python.org/simple/
--e 'git+https://github.com/pixelated-project/leap_pycommon.git@develop#egg=leap.common'
+-e 'git+https://github.com/leapcode/leap_pycommon.git@develop#egg=leap.common'
-e .
diff --git a/common/pkg/requirements-testing.pip b/common/pkg/requirements-testing.pip
deleted file mode 100644
index 526b7101..00000000
--- a/common/pkg/requirements-testing.pip
+++ /dev/null
@@ -1,14 +0,0 @@
-mock
-testscenarios
-setuptools-trial
-pep8
-
-#----------------------------------------------------------------------
-#Right now, common tests also depend on having the other soledad
-#modules installed. Commenting to avoid versioning problem, you should
-#know what you are testing against :)
-#----------------------------------------------------------------------
-
-#leap.common>=0.4.0
-#leap.soledad.server>=0.7.0
-#leap.soledad.client>=0.7.0
diff --git a/common/setup.py b/common/setup.py
index 7191fa00..bb70d587 100644
--- a/common/setup.py
+++ b/common/setup.py
@@ -17,13 +17,8 @@
"""
setup file for leap.soledad.common
"""
-import binascii
-import json
-from os import listdir
-from os.path import realpath, dirname, isdir, join, isfile, basename
import re
-from distutils.command.build import build as _build
from setuptools import setup
from setuptools import find_packages
from setuptools import Command
@@ -110,117 +105,6 @@ def get_versions():
with open(versioneer_cfg.versionfile_source, 'w') as f:
f.write(subst_template)
-cmdclass = versioneer.get_cmdclass()
-
-#
-# Couch backend design docs file generation.
-#
-
-old_cmd_sdist = cmdclass["sdist"]
-
-
-def build_ddocs_py(basedir=None, with_src=True):
- """
- Build `ddocs.py` file.
-
- For ease of development, couch backend design documents are stored as
- `.js` files in subdirectories of `src/leap/soledad/common/ddocs`. This
- function scans that directory for javascript files, builds the design
- documents structure, and encode those structures in the `ddocs.py` file.
-
- This function is used when installing in develop mode, building or
- generating source distributions (see the next classes and the `cmdclass`
- setuptools parameter.
-
- This funciton uses the following conventions to generate design documents:
-
- - Design documents are represented by directories in the form
- `<prefix>/<ddoc>`, there prefix is the `src/leap/soledad/common/ddocs`
- directory.
- - Design document directories might contain `views`, `lists` and
- `updates` subdirectories.
- - Views subdirectories must contain a `map.js` file and may contain a
- `reduce.js` file.
- - List and updates subdirectories may contain any number of javascript
- files (i.e. ending in `.js`) whose names will be mapped to the
- corresponding list or update function name.
- """
- cur_pwd = dirname(realpath(__file__))
- common_path = ('src', 'leap', 'soledad', 'common')
- dest_common_path = common_path
- if not with_src:
- dest_common_path = common_path[1:]
- prefix = join(cur_pwd, *common_path)
-
- dest_prefix = prefix
- if basedir is not None:
- # we're bulding a sdist
- dest_prefix = join(basedir, *dest_common_path)
-
- ddocs_prefix = join(prefix, 'ddocs')
-
- if not isdir(ddocs_prefix):
- print "No ddocs/ folder, bailing out..."
- return
-
- ddocs = {}
-
- # design docs are represented by subdirectories of `ddocs_prefix`
- for ddoc in [f for f in listdir(ddocs_prefix)
- if isdir(join(ddocs_prefix, f))]:
-
- ddocs[ddoc] = {'_id': '_design/%s' % ddoc}
-
- for t in ['views', 'lists', 'updates']:
- tdir = join(ddocs_prefix, ddoc, t)
- if isdir(tdir):
-
- ddocs[ddoc][t] = {}
-
- if t == 'views': # handle views (with map/reduce functions)
- for view in [f for f in listdir(tdir)
- if isdir(join(tdir, f))]:
- # look for map.js and reduce.js
- mapfile = join(tdir, view, 'map.js')
- reducefile = join(tdir, view, 'reduce.js')
- mapfun = None
- reducefun = None
- try:
- with open(mapfile) as f:
- mapfun = f.read()
- except IOError:
- pass
- try:
- with open(reducefile) as f:
- reducefun = f.read()
- except IOError:
- pass
- ddocs[ddoc]['views'][view] = {}
-
- if mapfun is not None:
- ddocs[ddoc]['views'][view]['map'] = mapfun
- if reducefun is not None:
- ddocs[ddoc]['views'][view]['reduce'] = reducefun
-
- else: # handle lists, updates, etc
- for fun in [f for f in listdir(tdir)
- if isfile(join(tdir, f))]:
- funfile = join(tdir, fun)
- funname = basename(funfile).replace('.js', '')
- try:
- with open(funfile) as f:
- ddocs[ddoc][t][funname] = f.read()
- except IOError:
- pass
- # write file containing design docs strings
- ddoc_filename = "ddocs.py"
- with open(join(dest_prefix, ddoc_filename), 'w') as f:
- for ddoc in ddocs:
- f.write(
- "%s = '%s'\n" %
- (ddoc, binascii.b2a_base64(json.dumps(ddocs[ddoc]))[:-1]))
- print "Wrote design docs in %s" % (dest_prefix + '/' + ddoc_filename,)
-
class cmd_develop(_cmd_develop):
def run(self):
@@ -230,17 +114,10 @@ class cmd_develop(_cmd_develop):
# unless we update this, the command will keep using the old version
self.distribution.metadata.version = versions["version"]
_cmd_develop.run(self)
- build_ddocs_py()
-
-
-class cmd_build(_build):
- def run(self):
- _build.run(self)
- build_ddocs_py(basedir=self.build_lib, with_src=False)
+cmdclass = versioneer.get_cmdclass()
cmdclass["freeze_debianver"] = freeze_debianver
-cmdclass["build"] = cmd_build
cmdclass["develop"] = cmd_develop
@@ -250,13 +127,13 @@ requirements = utils.parse_requirements()
if utils.is_develop_mode():
print
- print ("[WARNING] Skipping leap-specific dependencies "
- "because development mode is detected.")
- print ("[WARNING] You can install "
- "the latest published versions with "
- "'pip install -r pkg/requirements-leap.pip'")
- print ("[WARNING] Or you can instead do 'python setup.py develop' "
- "from the parent folder of each one of them.")
+ print("[WARNING] Skipping leap-specific dependencies "
+ "because development mode is detected.")
+ print("[WARNING] You can install "
+ "the latest published versions with "
+ "'pip install -r pkg/requirements-leap.pip'")
+ print("[WARNING] Or you can instead do 'python setup.py develop' "
+ "from the parent folder of each one of them.")
print
else:
requirements += utils.parse_requirements(
@@ -287,6 +164,4 @@ setup(
package_data={'': ["*.sql"]},
test_suite='leap.soledad.common.tests',
install_requires=requirements,
- tests_require=utils.parse_requirements(
- reqfiles=['pkg/requirements-testing.pip']),
)
diff --git a/common/src/leap/soledad/common/.gitignore b/common/src/leap/soledad/common/.gitignore
deleted file mode 100644
index 3378c78a..00000000
--- a/common/src/leap/soledad/common/.gitignore
+++ /dev/null
@@ -1 +0,0 @@
-ddocs.py
diff --git a/common/src/leap/soledad/common/README.txt b/common/src/leap/soledad/common/README.txt
index 38b9858e..0a252650 100644
--- a/common/src/leap/soledad/common/README.txt
+++ b/common/src/leap/soledad/common/README.txt
@@ -60,15 +60,6 @@ implemented in a way that all changes will be pushed with just one operation.
* delete_index
* create_index
-Couch views and update functions are used in order to achieve atomicity on the
-Couch backend. Transactions are stored in the `u1db_transactions` field of the
-couch document. Document's content and conflicted versions are stored as couch
-document attachments with names, respectivelly, `u1db_content` and
-`u1db_conflicts`.
-
-A map of methods and couch query URI can be found on the `./ddocs/README.txt`
-document.
-
Notes:
* Currently, the couch backend does not implement indexing, so what is
diff --git a/common/src/leap/soledad/common/couch/__init__.py b/common/src/leap/soledad/common/couch/__init__.py
index 523a50a0..0f4102db 100644
--- a/common/src/leap/soledad/common/couch/__init__.py
+++ b/common/src/leap/soledad/common/couch/__init__.py
@@ -23,21 +23,17 @@ import json
import re
import uuid
import binascii
-import time
-import functools
from StringIO import StringIO
from urlparse import urljoin
from contextlib import contextmanager
-from multiprocessing.pool import ThreadPool
from couchdb.client import Server, Database
from couchdb.http import (
ResourceConflict,
ResourceNotFound,
- ServerError,
Session,
urljoin as couch_urljoin,
Resource,
@@ -50,9 +46,6 @@ from leap.soledad.common.l2db.errors import (
from leap.soledad.common.l2db.remote import http_app
-from leap.soledad.common import ddocs
-from .errors import raise_server_error
-from .errors import raise_missing_design_doc_error
from .support import MultipartWriter
from leap.soledad.common.errors import InvalidURLError
from leap.soledad.common.document import ServerDocument
@@ -100,7 +93,19 @@ def couch_server(url):
yield server
-THREAD_POOL = ThreadPool(20)
+def _get_gen_doc_id(gen):
+ return 'gen-%s' % str(gen).zfill(10)
+
+
+GENERATION_KEY = 'gen'
+TRANSACTION_ID_KEY = 'trans_id'
+REPLICA_UID_KEY = 'replica_uid'
+DOC_ID_KEY = 'doc_id'
+SCHEMA_VERSION_KEY = 'schema_version'
+
+CONFIG_DOC_ID = '_local/config'
+SYNC_DOC_ID_PREFIX = '_local/sync_'
+SCHEMA_VERSION = 1
class CouchDatabase(object):
@@ -111,7 +116,7 @@ class CouchDatabase(object):
"""
@classmethod
- def open_database(cls, url, create, ensure_ddocs=False, replica_uid=None,
+ def open_database(cls, url, create, replica_uid=None,
database_security=None):
"""
Open a U1DB database using CouchDB as backend.
@@ -122,8 +127,6 @@ class CouchDatabase(object):
:type create: bool
:param replica_uid: an optional unique replica identifier
:type replica_uid: str
- :param ensure_ddocs: Ensure that the design docs exist on server.
- :type ensure_ddocs: bool
:param database_security: security rules as CouchDB security doc
:type database_security: dict
@@ -144,21 +147,20 @@ class CouchDatabase(object):
server.create(dbname)
else:
raise DatabaseDoesNotExist()
- db = cls(url,
- dbname, ensure_ddocs=ensure_ddocs,
+ db = cls(url, dbname, ensure_security=create,
database_security=database_security)
return SoledadBackend(
db, replica_uid=replica_uid)
- def __init__(self, url, dbname, ensure_ddocs=True,
+ def __init__(self, url, dbname, ensure_security=False,
database_security=None):
"""
:param url: Couch server URL with necessary credentials
:type url: string
:param dbname: Couch database name
:type dbname: string
- :param ensure_ddocs: Ensure that the design docs exist on server.
- :type ensure_ddocs: bool
+ :param ensure_security: will PUT a _security ddoc if set
+ :type ensure_security: bool
:param database_security: security rules as CouchDB security doc
:type database_security: dict
"""
@@ -169,8 +171,7 @@ class CouchDatabase(object):
self.batching = False
self.batch_generation = None
self.batch_docs = {}
- if ensure_ddocs:
- self.ensure_ddocs_on_db()
+ if ensure_security:
self.ensure_security_ddoc(database_security)
def batch_start(self):
@@ -205,22 +206,6 @@ class CouchDatabase(object):
except ResourceNotFound:
raise DatabaseDoesNotExist()
- def ensure_ddocs_on_db(self):
- """
- Ensure that the design documents used by the backend exist on the
- couch database.
- """
- for ddoc_name in ['docs', 'syncs', 'transactions']:
- try:
- self.json_from_resource(['_design'] +
- ddoc_name.split('/') + ['_info'],
- check_missing_ddoc=False)
- except ResourceNotFound:
- ddoc = json.loads(
- binascii.a2b_base64(
- getattr(ddocs, ddoc_name)))
- self._database.save(ddoc)
-
def ensure_security_ddoc(self, security_config=None):
"""
Make sure that only soledad user is able to access this database as
@@ -261,13 +246,14 @@ class CouchDatabase(object):
"""
try:
# set on existent config document
- doc = self._database['u1db_config']
- doc['replica_uid'] = replica_uid
+ doc = self._database[CONFIG_DOC_ID]
+ doc[REPLICA_UID_KEY] = replica_uid
except ResourceNotFound:
# or create the config document
doc = {
- '_id': 'u1db_config',
- 'replica_uid': replica_uid,
+ '_id': CONFIG_DOC_ID,
+ REPLICA_UID_KEY: replica_uid,
+ SCHEMA_VERSION_KEY: SCHEMA_VERSION,
}
self._database.save(doc)
@@ -280,8 +266,8 @@ class CouchDatabase(object):
"""
try:
# grab replica_uid from server
- doc = self._database['u1db_config']
- replica_uid = doc['replica_uid']
+ doc = self._database[CONFIG_DOC_ID]
+ replica_uid = doc[REPLICA_UID_KEY]
return replica_uid
except ResourceNotFound:
# create a unique replica_uid
@@ -308,8 +294,8 @@ class CouchDatabase(object):
"""
generation, _ = self.get_generation_info()
- results = list(self.get_docs(self._database,
- include_deleted=include_deleted))
+ results = list(
+ self._get_docs(None, True, include_deleted))
return (generation, results)
def get_docs(self, doc_ids, check_for_conflicts=True,
@@ -330,24 +316,37 @@ class CouchDatabase(object):
in matching doc_ids order.
:rtype: iterable
"""
- # Workaround for:
- #
- # http://bugs.python.org/issue7980
- # https://leap.se/code/issues/5449
- #
- # python-couchdb uses time.strptime, which is not thread safe. In
- # order to avoid the problem described on the issues above, we preload
- # strptime here by evaluating the conversion of an arbitrary date.
- # This will not be needed when/if we switch from python-couchdb to
- # paisley.
- time.strptime('Mar 8 1917', '%b %d %Y')
- get_one = functools.partial(
- self.get_doc, check_for_conflicts=check_for_conflicts)
- docs = [THREAD_POOL.apply_async(get_one, [doc_id])
- for doc_id in doc_ids]
- for doc in docs:
- doc = doc.get()
- if not doc or not include_deleted and doc.is_tombstone():
+ return self._get_docs(doc_ids, check_for_conflicts, include_deleted)
+
+ def _get_docs(self, doc_ids, check_for_conflicts, include_deleted):
+ """
+ Use couch's `_all_docs` view to get the documents indicated in
+ `doc_ids`,
+
+ :param doc_ids: A list of document identifiers or None for all.
+ :type doc_ids: list
+ :param check_for_conflicts: If set to False, then the conflict check
+ will be skipped, and 'None' will be
+ returned instead of True/False.
+ :type check_for_conflicts: bool
+ :param include_deleted: If set to True, deleted documents will be
+ returned with empty content. Otherwise deleted
+ documents will not be included in the results.
+
+ :return: iterable giving the Document object for each document id
+ in matching doc_ids order.
+ :rtype: iterable
+ """
+ params = {'include_docs': 'true', 'attachments': 'true'}
+ if doc_ids is not None:
+ params['keys'] = doc_ids
+ view = self._database.view("_all_docs", **params)
+ for row in view.rows:
+ result = row['doc']
+ doc = self.__parse_doc_from_couch(
+ result, result['_id'], check_for_conflicts=check_for_conflicts)
+ # filter out non-u1db or deleted documents
+ if not doc or (not include_deleted and doc.is_tombstone()):
continue
yield doc
@@ -434,8 +433,6 @@ class CouchDatabase(object):
result['_attachments']['u1db_conflicts']['data']))))
# store couch revision
doc.couch_rev = result['_rev']
- # store transactions
- doc.transactions = result['u1db_transactions']
return doc
def _build_conflicts(self, doc_id, attached_conflicts):
@@ -471,14 +468,11 @@ class CouchDatabase(object):
"""
if generation == 0:
return ''
- # query a couch list function
- ddoc_path = [
- '_design', 'transactions', '_list', 'trans_id_for_gen', 'log'
- ]
- response = self.json_from_resource(ddoc_path, gen=generation)
- if response == {}:
+ log = self._get_transaction_log(start=generation, end=generation)
+ if not log:
raise InvalidGeneration
- return response['transaction_id']
+ _, _, trans_id = log[0]
+ return trans_id
def get_replica_gen_and_trans_id(self, other_replica_uid):
"""
@@ -499,18 +493,19 @@ class CouchDatabase(object):
synchronized with the replica, this is (0, '').
:rtype: (int, str)
"""
- doc_id = 'u1db_sync_%s' % other_replica_uid
+ doc_id = '%s%s' % (SYNC_DOC_ID_PREFIX, other_replica_uid)
try:
doc = self._database[doc_id]
except ResourceNotFound:
doc = {
'_id': doc_id,
- 'generation': 0,
- 'transaction_id': '',
+ GENERATION_KEY: 0,
+ REPLICA_UID_KEY: str(other_replica_uid),
+ TRANSACTION_ID_KEY: '',
}
self._database.save(doc)
- result = doc['generation'], doc['transaction_id']
- return result
+ gen, trans_id = doc[GENERATION_KEY], doc[TRANSACTION_ID_KEY]
+ return gen, trans_id
def get_doc_conflicts(self, doc_id, couch_rev=None):
"""
@@ -537,7 +532,6 @@ class CouchDatabase(object):
try:
response = self.json_from_resource([doc_id, 'u1db_conflicts'],
- check_missing_ddoc=False,
**params)
return conflicts + self._build_conflicts(
doc_id, json.loads(response.read()))
@@ -562,13 +556,13 @@ class CouchDatabase(object):
generation.
:type other_transaction_id: str
"""
- doc_id = 'u1db_sync_%s' % other_replica_uid
+ doc_id = '%s%s' % (SYNC_DOC_ID_PREFIX, other_replica_uid)
try:
doc = self._database[doc_id]
except ResourceNotFound:
doc = {'_id': doc_id}
- doc['generation'] = other_generation
- doc['transaction_id'] = other_transaction_id
+ doc[GENERATION_KEY] = other_generation
+ doc[TRANSACTION_ID_KEY] = other_transaction_id
self._database.save(doc)
def get_transaction_log(self):
@@ -578,12 +572,35 @@ class CouchDatabase(object):
:return: The complete transaction log.
:rtype: [(str, str)]
"""
- # query a couch view
- ddoc_path = ['_design', 'transactions', '_view', 'log']
- response = self.json_from_resource(ddoc_path)
- return map(
- lambda row: (row['id'], row['value']),
- response['rows'])
+ log = self._get_transaction_log()
+ return map(lambda i: (i[1], i[2]), log)
+
+ def _get_gen_docs(
+ self, start=0, end=9999999999, descending=None, limit=None):
+ params = {}
+ if descending:
+ params['descending'] = 'true'
+ # honor couch way of traversing the view tree in reverse order
+ start, end = end, start
+ params['startkey'] = _get_gen_doc_id(start)
+ params['endkey'] = _get_gen_doc_id(end)
+ params['include_docs'] = 'true'
+ if limit:
+ params['limit'] = limit
+ view = self._database.view("_all_docs", **params)
+ return view.rows
+
+ def _get_transaction_log(self, start=0, end=9999999999):
+ # get current gen and trans_id
+ rows = self._get_gen_docs(start=start, end=end)
+ log = []
+ for row in rows:
+ doc = row['doc']
+ log.append((
+ doc[GENERATION_KEY],
+ doc[DOC_ID_KEY],
+ doc[TRANSACTION_ID_KEY]))
+ return log
def whats_changed(self, old_generation=0):
"""
@@ -602,32 +619,16 @@ class CouchDatabase(object):
changes first)
:rtype: (int, str, [(str, int, str)])
"""
- # query a couch list function
- ddoc_path = [
- '_design', 'transactions', '_list', 'whats_changed', 'log'
- ]
- response = self.json_from_resource(ddoc_path, old_gen=old_generation)
- results = map(
- lambda row:
- (row['generation'], row['doc_id'], row['transaction_id']),
- response['transactions'])
- results.reverse()
- cur_gen = old_generation
- seen = set()
changes = []
- newest_trans_id = ''
- for generation, doc_id, trans_id in results:
+ cur_generation, last_trans_id = self.get_generation_info()
+ relevant_tail = self._get_transaction_log(start=old_generation + 1)
+ seen = set()
+ for generation, doc_id, trans_id in reversed(relevant_tail):
if doc_id not in seen:
changes.append((doc_id, generation, trans_id))
seen.add(doc_id)
- if changes:
- cur_gen = changes[0][1] # max generation
- newest_trans_id = changes[0][2]
- changes.reverse()
- else:
- cur_gen, newest_trans_id = self.get_generation_info()
-
- return cur_gen, newest_trans_id, changes
+ changes.reverse()
+ return (cur_generation, last_trans_id, changes)
def get_generation_info(self):
"""
@@ -638,53 +639,74 @@ class CouchDatabase(object):
"""
if self.batching and self.batch_generation:
return self.batch_generation
- # query a couch list function
- ddoc_path = ['_design', 'transactions', '_list', 'generation', 'log']
- info = self.json_from_resource(ddoc_path)
- return (info['generation'], info['transaction_id'])
+ rows = self._get_gen_docs(descending=True, limit=1)
+ if not rows:
+ return 0, ''
+ gen_doc = rows.pop()['doc']
+ return gen_doc[GENERATION_KEY], gen_doc[TRANSACTION_ID_KEY]
- def json_from_resource(self, ddoc_path, check_missing_ddoc=True,
- **kwargs):
+ def json_from_resource(self, doc_path, **kwargs):
"""
Get a resource from it's path and gets a doc's JSON using provided
- parameters, also checking for missing design docs by default.
+ parameters.
- :param ddoc_path: The path to resource.
- :type ddoc_path: [str]
- :param check_missing_ddoc: Raises info on what design doc is missing.
- :type check_missin_ddoc: bool
+ :param doc_path: The path to resource.
+ :type doc_path: [str]
:return: The request's data parsed from JSON to a dict.
:rtype: dict
-
- :raise MissingDesignDocError: Raised when tried to access a missing
- design document.
- :raise MissingDesignDocListFunctionError: Raised when trying to access
- a missing list function on a
- design document.
- :raise MissingDesignDocNamedViewError: Raised when trying to access a
- missing named view on a design
- document.
- :raise MissingDesignDocDeletedError: Raised when trying to access a
- deleted design document.
- :raise MissingDesignDocUnknownError: Raised when failed to access a
- design document for an yet
- unknown reason.
- """
- if ddoc_path is not None:
- resource = self._database.resource(*ddoc_path)
+ """
+ if doc_path is not None:
+ resource = self._database.resource(*doc_path)
else:
resource = self._database.resource()
- try:
- _, _, data = resource.get_json(**kwargs)
- return data
- except ResourceNotFound as e:
- if check_missing_ddoc:
- raise_missing_design_doc_error(e, ddoc_path)
- else:
- raise e
- except ServerError as e:
- raise_server_error(e, ddoc_path)
+ _, _, data = resource.get_json(**kwargs)
+ return data
+
+ def _allocate_new_generation(self, doc_id, transaction_id):
+ """
+ Allocate a new generation number for a document modification.
+
+ We need to allocate a new generation to this document modification by
+ creating a new gen doc. In order to avoid concurrent database updates
+ from allocating the same new generation, we will try to create the
+ document until we succeed, meaning that no other piece of code holds
+ the same generation number as ours.
+
+ The loop below would only be executed more than once if:
+
+ 1. there's more than one thread trying to modify the user's database,
+ and
+
+ 2. the execution of getting the current generation and saving the gen
+ doc different threads get interleaved (one of them will succeed
+ and the others will fail and try again).
+
+ Number 1 only happens when more than one user device is syncing at the
+ same time. Number 2 depends on not-so-frequent coincidence of
+ code execution.
+
+ Also, in the race between threads for a generation number there's
+ always one thread that wins. so if there are N threads in the race, the
+ expected number of repetitions of the loop for each thread would be
+ N/2. If N is equal to the number of devices that the user has, the
+ number of possible repetitions of the loop should always be low.
+ """
+ while True:
+ try:
+ # add the gen document
+ gen, _ = self.get_generation_info()
+ new_gen = gen + 1
+ gen_doc = {
+ '_id': _get_gen_doc_id(new_gen),
+ GENERATION_KEY: new_gen,
+ DOC_ID_KEY: doc_id,
+ TRANSACTION_ID_KEY: transaction_id,
+ }
+ self._database.save(gen_doc)
+ break # succeeded allocating a new generation, proceed
+ except ResourceConflict:
+ pass # try again!
def save_document(self, old_doc, doc, transaction_id):
"""
@@ -701,19 +723,6 @@ class CouchDatabase(object):
:raise RevisionConflict: Raised when trying to update a document but
couch revisions mismatch.
- :raise MissingDesignDocError: Raised when tried to access a missing
- design document.
- :raise MissingDesignDocListFunctionError: Raised when trying to access
- a missing list function on a
- design document.
- :raise MissingDesignDocNamedViewError: Raised when trying to access a
- missing named view on a design
- document.
- :raise MissingDesignDocDeletedError: Raised when trying to access a
- deleted design document.
- :raise MissingDesignDocUnknownError: Raised when failed to access a
- design document for an yet
- unknown reason.
"""
attachments = {} # we save content and conflicts as attachments
parts = [] # and we put it using couch's multipart PUT
@@ -726,6 +735,7 @@ class CouchDatabase(object):
'length': len(content),
}
parts.append(content)
+
# save conflicts as attachment
if doc.has_conflicts is True:
conflicts = json.dumps(
@@ -737,21 +747,11 @@ class CouchDatabase(object):
'length': len(conflicts),
}
parts.append(conflicts)
- # store old transactions, if any
- transactions = old_doc.transactions[:] if old_doc is not None else []
- # create a new transaction id and timestamp it so the transaction log
- # is consistent when querying the database.
- transactions.append(
- # here we store milliseconds to keep consistent with javascript
- # Date.prototype.getTime() which was used before inside a couchdb
- # update handler.
- (int(time.time() * 1000),
- transaction_id))
+
# build the couch document
couch_doc = {
'_id': doc.doc_id,
'u1db_rev': doc.rev,
- 'u1db_transactions': transactions,
'_attachments': attachments,
}
# if we are updating a doc we have to add the couch doc revision
@@ -761,7 +761,19 @@ class CouchDatabase(object):
if not self.batching:
buf = StringIO()
envelope = MultipartWriter(buf)
- envelope.add('application/json', json.dumps(couch_doc))
+ # the order in which attachments are described inside the
+ # serialization of the couch document must match the order in
+ # which they are actually written in the multipart structure.
+ # Because of that, we use `sorted_keys=True` in the json
+ # serialization (so "u1db_conflicts" comes before
+ # "u1db_content" on the couch document attachments
+ # description), and also reverse the order of the parts before
+ # writing them, so the "conflict" part is written before the
+ # "content" part.
+ envelope.add(
+ 'application/json',
+ json.dumps(couch_doc, sort_keys=True))
+ parts.reverse()
for part in parts:
envelope.add('application/octet-stream', part)
envelope.close()
@@ -778,12 +790,14 @@ class CouchDatabase(object):
del attachment['follows']
del attachment['length']
index = 0 if name is 'u1db_content' else 1
- attachment['data'] = binascii.b2a_base64(parts[index]).strip()
+ attachment['data'] = binascii.b2a_base64(
+ parts[index]).strip()
couch_doc['_attachments'] = attachments
self.batch_docs[doc.doc_id] = couch_doc
last_gen, last_trans_id = self.batch_generation
self.batch_generation = (last_gen + 1, transaction_id)
- return transactions[-1][1]
+
+ self._allocate_new_generation(doc.doc_id, transaction_id)
def _new_resource(self, *path):
"""
diff --git a/common/src/leap/soledad/common/couch/errors.py b/common/src/leap/soledad/common/couch/errors.py
deleted file mode 100644
index 9b287c76..00000000
--- a/common/src/leap/soledad/common/couch/errors.py
+++ /dev/null
@@ -1,144 +0,0 @@
-# -*- coding: utf-8 -*-
-# errors.py
-# Copyright (C) 2015 LEAP
-#
-# This program is free software: you can redistribute it and/or modify
-# it under the terms of the GNU General Public License as published by
-# the Free Software Foundation, either version 3 of the License, or
-# (at your option) any later version.
-#
-# This program is distributed in the hope that it will be useful,
-# but WITHOUT ANY WARRANTY; without even the implied warranty of
-# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
-# GNU General Public License for more details.
-#
-# You should have received a copy of the GNU General Public License
-# along with this program. If not, see <http://www.gnu.org/licenses/>.
-from leap.soledad.common.errors import SoledadError, BackendNotReadyError
-from leap.soledad.common.errors import register_exception
-
-"""
-Specific errors that can be raised by CouchDatabase.
-"""
-
-
-@register_exception
-class MissingDesignDocError(BackendNotReadyError):
-
- """
- Raised when trying to access a missing couch design document.
- """
-
- wire_description = "missing design document"
- status = 500
-
-
-@register_exception
-class MissingDesignDocNamedViewError(SoledadError):
-
- """
- Raised when trying to access a missing named view on a couch design
- document.
- """
-
- wire_description = "missing design document named function"
- status = 500
-
-
-@register_exception
-class MissingDesignDocListFunctionError(SoledadError):
-
- """
- Raised when trying to access a missing list function on a couch design
- document.
- """
-
- wire_description = "missing design document list function"
- status = 500
-
-
-@register_exception
-class MissingDesignDocDeletedError(SoledadError):
-
- """
- Raised when trying to access a deleted couch design document.
- """
-
- wire_description = "design document was deleted"
- status = 500
-
-
-@register_exception
-class DesignDocUnknownError(SoledadError):
-
- """
- Raised when trying to access a couch design document and getting an
- unknown error.
- """
-
- wire_description = "missing design document unknown error"
- status = 500
-
-
-def raise_missing_design_doc_error(exc, ddoc_path):
- """
- Raise an appropriate exception when catching a ResourceNotFound when
- accessing a design document.
-
- :param exc: The exception cought.
- :type exc: ResourceNotFound
- :param ddoc_path: A list representing the requested path.
- :type ddoc_path: list
-
- :raise MissingDesignDocError: Raised when tried to access a missing design
- document.
- :raise MissingDesignDocListFunctionError: Raised when trying to access a
- missing list function on a
- design document.
- :raise MissingDesignDocNamedViewError: Raised when trying to access a
- missing named view on a design
- document.
- :raise MissingDesignDocDeletedError: Raised when trying to access a
- deleted design document.
- :raise MissingDesignDocUnknownError: Raised when failed to access a design
- document for an yet unknown reason.
- """
- path = "".join(ddoc_path)
- if exc.message[1] == 'missing':
- raise MissingDesignDocError(path)
- elif exc.message[1] == 'missing function' or \
- exc.message[1].startswith('missing lists function'):
- raise MissingDesignDocListFunctionError(path)
- elif exc.message[1] == 'missing_named_view':
- raise MissingDesignDocNamedViewError(path)
- elif exc.message[1] == 'deleted':
- raise MissingDesignDocDeletedError(path)
- # other errors are unknown for now
- raise DesignDocUnknownError("%s: %s" % (path, str(exc.message)))
-
-
-def raise_server_error(exc, ddoc_path):
- """
- Raise an appropriate exception when catching a ServerError when
- accessing a design document.
-
- :param exc: The exception cought.
- :type exc: ResourceNotFound
- :param ddoc_path: A list representing the requested path.
- :type ddoc_path: list
-
- :raise MissingDesignDocListFunctionError: Raised when trying to access a
- missing list function on a
- design document.
- :raise MissingDesignDocUnknownError: Raised when failed to access a design
- document for an yet unknown reason.
- """
- path = "".join(ddoc_path)
- msg = exc.message[1][0]
- if msg == 'unnamed_error':
- raise MissingDesignDocListFunctionError(path)
- elif msg == 'TypeError':
- if 'point is undefined' in exc.message[1][1]:
- raise MissingDesignDocListFunctionError
- # other errors are unknown for now
- raise DesignDocUnknownError("%s: %s" % (path, str(exc.message)))
diff --git a/common/src/leap/soledad/common/couch/state.py b/common/src/leap/soledad/common/couch/state.py
index 9b40a264..523ac0b0 100644
--- a/common/src/leap/soledad/common/couch/state.py
+++ b/common/src/leap/soledad/common/couch/state.py
@@ -17,20 +17,26 @@
"""
Server state using CouchDatabase as backend.
"""
-import logging
+import couchdb
import re
import time
from urlparse import urljoin
from hashlib import sha512
+from leap.soledad.common.log import getLogger
from leap.soledad.common.couch import CouchDatabase
from leap.soledad.common.couch import couch_server
+from leap.soledad.common.couch import CONFIG_DOC_ID
+from leap.soledad.common.couch import SCHEMA_VERSION
+from leap.soledad.common.couch import SCHEMA_VERSION_KEY
from leap.soledad.common.command import exec_validated_cmd
from leap.soledad.common.l2db.remote.server_state import ServerState
from leap.soledad.common.l2db.errors import Unauthorized
+from leap.soledad.common.errors import WrongCouchSchemaVersionError
+from leap.soledad.common.errors import MissingCouchConfigDocumentError
-logger = logging.getLogger(__name__)
+logger = getLogger(__name__)
def is_db_name_valid(name):
@@ -59,15 +65,47 @@ class CouchServerState(ServerState):
TOKENS_TYPE_DEF = "Token"
TOKENS_USER_ID_KEY = "user_id"
- def __init__(self, couch_url, create_cmd=None):
+ def __init__(self, couch_url, create_cmd=None,
+ check_schema_versions=False):
"""
Initialize the couch server state.
:param couch_url: The URL for the couch database.
:type couch_url: str
+ :param create_cmd: Command to be executed for user db creation. It will
+ receive a properly sanitized parameter with user db
+ name and should access CouchDB with necessary
+ privileges, which server lacks for security reasons.
+ :type create_cmd: str
+ :param check_schema_versions: Whether to check couch schema version of
+ user dbs. Set to False as this is only
+ intended to run once during start-up.
+ :type check_schema_versions: bool
"""
self.couch_url = couch_url
self.create_cmd = create_cmd
+ if check_schema_versions:
+ self._check_schema_versions()
+
+ def _check_schema_versions(self):
+ """
+ Check that all user databases use the correct couch schema.
+ """
+ server = couchdb.client.Server(self.couch_url)
+ for dbname in server:
+ if not dbname.startswith('user-'):
+ continue
+ db = server[dbname]
+
+ # if there are documents, ensure that a config doc exists
+ config_doc = db.get(CONFIG_DOC_ID)
+ if config_doc:
+ if config_doc[SCHEMA_VERSION_KEY] != SCHEMA_VERSION:
+ raise WrongCouchSchemaVersionError(dbname)
+ else:
+ result = db.view('_all_docs', limit=1)
+ if result.total_rows != 0:
+ raise MissingCouchConfigDocumentError(dbname)
def open_database(self, dbname):
"""
@@ -80,7 +118,7 @@ class CouchServerState(ServerState):
:rtype: SoledadBackend
"""
url = urljoin(self.couch_url, dbname)
- db = CouchDatabase.open_database(url, create=False, ensure_ddocs=False)
+ db = CouchDatabase.open_database(url, create=False)
return db
def ensure_database(self, dbname):
diff --git a/common/src/leap/soledad/common/ddocs/README.txt b/common/src/leap/soledad/common/ddocs/README.txt
deleted file mode 100644
index 5569d929..00000000
--- a/common/src/leap/soledad/common/ddocs/README.txt
+++ /dev/null
@@ -1,34 +0,0 @@
-This directory holds a folder structure containing javascript files that
-represent the design documents needed by the CouchDB U1DB backend. These files
-are compiled into the `../ddocs.py` file by setuptools when creating the
-source distribution.
-
-The following table depicts the U1DB CouchDB backend method and the URI that
-is queried to obtain/update data from/to the server.
-
- +----------------------------------+------------------------------------------------------------------+
- | u1db backend method | URI |
- |----------------------------------+------------------------------------------------------------------|
- | _get_generation | _design/transactions/_list/generation/log |
- | _get_generation_info | _design/transactions/_list/generation/log |
- | _get_trans_id_for_gen | _design/transactions/_list/trans_id_for_gen/log |
- | _get_transaction_log | _design/transactions/_view/log |
- | _get_doc (*) | _design/docs/_view/get?key=<doc_id> |
- | _has_conflicts | _design/docs/_view/get?key=<doc_id> |
- | get_all_docs | _design/docs/_view/get |
- | _put_doc | _design/docs/_update/put/<doc_id> |
- | _whats_changed | _design/transactions/_list/whats_changed/log?old_gen=<gen> |
- | _get_conflicts (*) | _design/docs/_view/conflicts?key=<doc_id> |
- | _get_replica_gen_and_trans_id | _design/syncs/_view/log?other_replica_uid=<uid> |
- | _do_set_replica_gen_and_trans_id | _design/syncs/_update/put/u1db_sync_log |
- | _add_conflict | _design/docs/_update/add_conflict/<doc_id> |
- | _delete_conflicts | _design/docs/_update/delete_conflicts/<doc_id>?doc_rev=<doc_rev> |
- | list_indexes | not implemented |
- | _get_index_definition | not implemented |
- | delete_index | not implemented |
- | _get_indexed_fields | not implemented |
- | _put_and_update_indexes | not implemented |
- +----------------------------------+------------------------------------------------------------------+
-
-(*) These methods also request CouchDB document attachments that store U1DB
- document contents.
diff --git a/common/src/leap/soledad/common/ddocs/docs/views/get/map.js b/common/src/leap/soledad/common/ddocs/docs/views/get/map.js
deleted file mode 100644
index ae08d9e9..00000000
--- a/common/src/leap/soledad/common/ddocs/docs/views/get/map.js
+++ /dev/null
@@ -1,20 +0,0 @@
-function(doc) {
- if (doc.u1db_rev) {
- var is_tombstone = true;
- var has_conflicts = false;
- if (doc._attachments) {
- if (doc._attachments.u1db_content)
- is_tombstone = false;
- if (doc._attachments.u1db_conflicts)
- has_conflicts = true;
- }
- emit(doc._id,
- {
- "couch_rev": doc._rev,
- "u1db_rev": doc.u1db_rev,
- "is_tombstone": is_tombstone,
- "has_conflicts": has_conflicts,
- }
- );
- }
-}
diff --git a/common/src/leap/soledad/common/ddocs/syncs/updates/state.js b/common/src/leap/soledad/common/ddocs/syncs/updates/state.js
deleted file mode 100644
index d62aeb40..00000000
--- a/common/src/leap/soledad/common/ddocs/syncs/updates/state.js
+++ /dev/null
@@ -1,105 +0,0 @@
-/**
- * This update handler stores information about ongoing synchronization
- * attempts from distinct source replicas.
- *
- * Normally, u1db synchronization occurs during one POST request. In order to
- * split that into many serial POST requests, we store the state of each sync
- * in the server, using a document with id 'u1db_sync_state'. To identify
- * each sync attempt, we use a sync_id sent by the client. If we ever receive
- * a new sync_id, we trash current data for that source replica and start
- * over.
- *
- * We expect the following in the document body:
- *
- * {
- * 'source_replica_uid': '<source_replica_uid>',
- * 'sync_id': '<sync_id>',
- * 'seen_ids': [['<doc_id>', <at_gen>], ...], // optional
- * 'changes_to_return': [ // optional
- * 'gen': <gen>,
- * 'trans_id': '<trans_id>',
- * 'changes_to_return': [[<doc_id>', <gen>, '<trans_id>'], ...]
- * ],
- * }
- *
- * The format of the final document stored on server is:
- *
- * {
- * '_id': '<str>',
- * '_rev' '<str>',
- * 'ongoing_syncs': {
- * '<source_replica_uid>': {
- * 'sync_id': '<sync_id>',
- * 'seen_ids': [['<doc_id>', <at_gen>[, ...],
- * 'changes_to_return': {
- * 'gen': <gen>,
- * 'trans_id': '<trans_id>',
- * 'changes_to_return': [
- * ['<doc_id>', <gen>, '<trans_id>'],
- * ...,
- * ],
- * },
- * },
- * ... // info about other source replicas here
- * }
- * }
- */
-function(doc, req) {
-
- // prevent updates to alien documents
- if (doc != null && doc['_id'] != 'u1db_sync_state')
- return [null, 'invalid data'];
-
- // create the document if it doesn't exist
- if (!doc)
- doc = {
- '_id': 'u1db_sync_state',
- 'ongoing_syncs': {},
- };
-
- // parse and validate incoming data
- var body = JSON.parse(req.body);
- if (body['source_replica_uid'] == null)
- return [null, 'invalid data'];
- var source_replica_uid = body['source_replica_uid'];
-
- if (body['sync_id'] == null)
- return [null, 'invalid data'];
- var sync_id = body['sync_id'];
-
- // trash outdated sync data for that replica if that exists
- if (doc['ongoing_syncs'][source_replica_uid] != null &&
- doc['ongoing_syncs'][source_replica_uid]['sync_id'] != sync_id)
- delete doc['ongoing_syncs'][source_replica_uid];
-
- // create an entry for that source replica
- if (doc['ongoing_syncs'][source_replica_uid] == null)
- doc['ongoing_syncs'][source_replica_uid] = {
- 'sync_id': sync_id,
- 'seen_ids': {},
- 'changes_to_return': null,
- };
-
- // incoming meta-data values should be exclusive, so we count how many
- // arrived and deny to accomplish the transaction if the count is high.
- var incoming_values = 0;
- var info = doc['ongoing_syncs'][source_replica_uid]
-
- // add incoming seen id
- if ('seen_id' in body) {
- info['seen_ids'][body['seen_id'][0]] = body['seen_id'][1];
- incoming_values += 1;
- }
-
- // add incoming changes_to_return
- if ('changes_to_return' in body) {
- info['changes_to_return'] = body['changes_to_return'];
- incoming_values += 1;
- }
-
- if (incoming_values != 1)
- return [null, 'invalid data'];
-
- return [doc, 'ok'];
-}
-
diff --git a/common/src/leap/soledad/common/ddocs/syncs/views/changes_to_return/map.js b/common/src/leap/soledad/common/ddocs/syncs/views/changes_to_return/map.js
deleted file mode 100644
index 94b7e767..00000000
--- a/common/src/leap/soledad/common/ddocs/syncs/views/changes_to_return/map.js
+++ /dev/null
@@ -1,20 +0,0 @@
-function(doc) {
- if (doc['_id'] == 'u1db_sync_state' && doc['ongoing_syncs'] != null)
- for (var source_replica_uid in doc['ongoing_syncs']) {
- var changes = doc['ongoing_syncs'][source_replica_uid]['changes_to_return'];
- var sync_id = doc['ongoing_syncs'][source_replica_uid]['sync_id'];
- if (changes == null)
- emit([source_replica_uid, sync_id, 0], null);
- else if (changes.length == 0)
- emit([source_replica_uid, sync_id, 0], []);
- else
- for (var i = 0; i < changes['changes_to_return'].length; i++)
- emit(
- [source_replica_uid, sync_id, i],
- {
- 'gen': changes['gen'],
- 'trans_id': changes['trans_id'],
- 'next_change_to_return': changes['changes_to_return'][i],
- });
- }
-}
diff --git a/common/src/leap/soledad/common/ddocs/syncs/views/seen_ids/map.js b/common/src/leap/soledad/common/ddocs/syncs/views/seen_ids/map.js
deleted file mode 100644
index 16118e88..00000000
--- a/common/src/leap/soledad/common/ddocs/syncs/views/seen_ids/map.js
+++ /dev/null
@@ -1,11 +0,0 @@
-function(doc) {
- if (doc['_id'] == 'u1db_sync_state' && doc['ongoing_syncs'] != null)
- for (var source_replica_uid in doc['ongoing_syncs']) {
- var sync_id = doc['ongoing_syncs'][source_replica_uid]['sync_id'];
- emit(
- [source_replica_uid, sync_id],
- {
- 'seen_ids': doc['ongoing_syncs'][source_replica_uid]['seen_ids'],
- });
- }
-}
diff --git a/common/src/leap/soledad/common/ddocs/syncs/views/state/map.js b/common/src/leap/soledad/common/ddocs/syncs/views/state/map.js
deleted file mode 100644
index e88c6ebb..00000000
--- a/common/src/leap/soledad/common/ddocs/syncs/views/state/map.js
+++ /dev/null
@@ -1,17 +0,0 @@
-function(doc) {
- if (doc['_id'] == 'u1db_sync_state' && doc['ongoing_syncs'] != null)
- for (var source_replica_uid in doc['ongoing_syncs']) {
- var changes = doc['ongoing_syncs'][source_replica_uid]['changes_to_return'];
- var sync_id = doc['ongoing_syncs'][source_replica_uid]['sync_id'];
- if (changes == null)
- emit([source_replica_uid, sync_id], null);
- else
- emit(
- [source_replica_uid, sync_id],
- {
- 'gen': changes['gen'],
- 'trans_id': changes['trans_id'],
- 'number_of_changes': changes['changes_to_return'].length
- });
- }
-}
diff --git a/common/src/leap/soledad/common/ddocs/transactions/lists/generation.js b/common/src/leap/soledad/common/ddocs/transactions/lists/generation.js
deleted file mode 100644
index dbdfff0d..00000000
--- a/common/src/leap/soledad/common/ddocs/transactions/lists/generation.js
+++ /dev/null
@@ -1,20 +0,0 @@
-function(head, req) {
- var row;
- var rows=[];
- // fetch all rows
- while(row = getRow()) {
- rows.push(row);
- }
- if (rows.length > 0)
- send(JSON.stringify({
- "generation": rows.length,
- "doc_id": rows[rows.length-1]['id'],
- "transaction_id": rows[rows.length-1]['value']
- }));
- else
- send(JSON.stringify({
- "generation": 0,
- "doc_id": "",
- "transaction_id": "",
- }));
-}
diff --git a/common/src/leap/soledad/common/ddocs/transactions/lists/trans_id_for_gen.js b/common/src/leap/soledad/common/ddocs/transactions/lists/trans_id_for_gen.js
deleted file mode 100644
index 2ec91794..00000000
--- a/common/src/leap/soledad/common/ddocs/transactions/lists/trans_id_for_gen.js
+++ /dev/null
@@ -1,19 +0,0 @@
-function(head, req) {
- var row;
- var rows=[];
- var i = 1;
- var gen = 1;
- if (req.query.gen)
- gen = parseInt(req.query['gen']);
- // fetch all rows
- while(row = getRow())
- rows.push(row);
- if (gen <= rows.length)
- send(JSON.stringify({
- "generation": gen,
- "doc_id": rows[gen-1]['id'],
- "transaction_id": rows[gen-1]['value'],
- }));
- else
- send('{}');
-}
diff --git a/common/src/leap/soledad/common/ddocs/transactions/lists/whats_changed.js b/common/src/leap/soledad/common/ddocs/transactions/lists/whats_changed.js
deleted file mode 100644
index b35cdf51..00000000
--- a/common/src/leap/soledad/common/ddocs/transactions/lists/whats_changed.js
+++ /dev/null
@@ -1,22 +0,0 @@
-function(head, req) {
- var row;
- var gen = 1;
- var old_gen = 0;
- if (req.query.old_gen)
- old_gen = parseInt(req.query['old_gen']);
- send('{"transactions":[\n');
- // fetch all rows
- while(row = getRow()) {
- if (gen > old_gen) {
- if (gen > old_gen+1)
- send(',\n');
- send(JSON.stringify({
- "generation": gen,
- "doc_id": row["id"],
- "transaction_id": row["value"]
- }));
- }
- gen++;
- }
- send('\n]}');
-}
diff --git a/common/src/leap/soledad/common/ddocs/transactions/views/log/map.js b/common/src/leap/soledad/common/ddocs/transactions/views/log/map.js
deleted file mode 100644
index 94ef63ca..00000000
--- a/common/src/leap/soledad/common/ddocs/transactions/views/log/map.js
+++ /dev/null
@@ -1,7 +0,0 @@
-function(doc) {
- if (doc.u1db_transactions)
- doc.u1db_transactions.forEach(function(t) {
- emit(t[0], // use timestamp as key so the results are ordered
- t[1]); // value is the transaction_id
- });
-}
diff --git a/common/src/leap/soledad/common/errors.py b/common/src/leap/soledad/common/errors.py
index dec871c9..d543a3de 100644
--- a/common/src/leap/soledad/common/errors.py
+++ b/common/src/leap/soledad/common/errors.py
@@ -77,7 +77,6 @@ http_errors.ERROR_STATUSES = set(
class InvalidURLError(Exception):
-
"""
Exception raised when Soledad encounters a malformed URL.
"""
@@ -90,3 +89,15 @@ class BackendNotReadyError(SoledadError):
"""
wire_description = "backend not ready"
status = 500
+
+
+class WrongCouchSchemaVersionError(SoledadError):
+ """
+ Raised in case there is a user database with wrong couch schema version.
+ """
+
+
+class MissingCouchConfigDocumentError(SoledadError):
+ """
+ Raised if a database has documents but lacks the couch config document.
+ """
diff --git a/common/src/leap/soledad/common/l2db/__init__.py b/common/src/leap/soledad/common/l2db/__init__.py
index c0bd15fe..568897c4 100644
--- a/common/src/leap/soledad/common/l2db/__init__.py
+++ b/common/src/leap/soledad/common/l2db/__init__.py
@@ -16,10 +16,7 @@
"""L2DB"""
-try:
- import simplejson as json
-except ImportError:
- import json # noqa
+import json
from leap.soledad.common.l2db.errors import InvalidJSON, InvalidContent
diff --git a/common/src/leap/soledad/common/l2db/backends/__init__.py b/common/src/leap/soledad/common/l2db/backends/__init__.py
index 922daafd..c731c3d3 100644
--- a/common/src/leap/soledad/common/l2db/backends/__init__.py
+++ b/common/src/leap/soledad/common/l2db/backends/__init__.py
@@ -17,10 +17,7 @@
"""Abstract classes and common implementations for the backends."""
import re
-try:
- import simplejson as json
-except ImportError:
- import json # noqa
+import json
import uuid
from leap.soledad.common import l2db
diff --git a/common/src/leap/soledad/common/l2db/backends/inmemory.py b/common/src/leap/soledad/common/l2db/backends/inmemory.py
index 06a934a6..6fd251af 100644
--- a/common/src/leap/soledad/common/l2db/backends/inmemory.py
+++ b/common/src/leap/soledad/common/l2db/backends/inmemory.py
@@ -16,10 +16,7 @@
"""The in-memory Database class for U1DB."""
-try:
- import simplejson as json
-except ImportError:
- import json # noqa
+import json
from leap.soledad.common.l2db import (
Document, errors,
diff --git a/common/src/leap/soledad/common/l2db/backends/sqlite_backend.py b/common/src/leap/soledad/common/l2db/backends/sqlite_backend.py
index ba273039..d73c0d16 100644
--- a/common/src/leap/soledad/common/l2db/backends/sqlite_backend.py
+++ b/common/src/leap/soledad/common/l2db/backends/sqlite_backend.py
@@ -21,17 +21,14 @@ A L2DB implementation that uses SQLite as its persistence layer.
import errno
import os
-try:
- import simplejson as json
-except ImportError:
- import json # noqa
-from sqlite3 import dbapi2
+import json
import sys
import time
import uuid
-
import pkg_resources
+from sqlite3 import dbapi2
+
from leap.soledad.common.l2db.backends import CommonBackend, CommonSyncTarget
from leap.soledad.common.l2db import (
Document, errors,
diff --git a/common/src/leap/soledad/common/l2db/remote/basic_auth_middleware.py b/common/src/leap/soledad/common/l2db/remote/basic_auth_middleware.py
index a2cbff62..96d0d872 100644
--- a/common/src/leap/soledad/common/l2db/remote/basic_auth_middleware.py
+++ b/common/src/leap/soledad/common/l2db/remote/basic_auth_middleware.py
@@ -15,10 +15,8 @@
# along with u1db. If not, see <http://www.gnu.org/licenses/>.
"""U1DB Basic Auth authorisation WSGI middleware."""
import httplib
-try:
- import simplejson as json
-except ImportError:
- import json # noqa
+import json
+
from wsgiref.util import shift_path_info
diff --git a/common/src/leap/soledad/common/l2db/remote/http_app.py b/common/src/leap/soledad/common/l2db/remote/http_app.py
index 65277bd1..5cf6645e 100644
--- a/common/src/leap/soledad/common/l2db/remote/http_app.py
+++ b/common/src/leap/soledad/common/l2db/remote/http_app.py
@@ -23,10 +23,7 @@ HTTP Application exposing U1DB.
import functools
import httplib
import inspect
-try:
- import simplejson as json
-except ImportError:
- import json # noqa
+import json
import sys
import urlparse
diff --git a/common/src/leap/soledad/common/l2db/remote/http_client.py b/common/src/leap/soledad/common/l2db/remote/http_client.py
index a65264b6..53363c0a 100644
--- a/common/src/leap/soledad/common/l2db/remote/http_client.py
+++ b/common/src/leap/soledad/common/l2db/remote/http_client.py
@@ -17,10 +17,7 @@
"""Base class to make requests to a remote HTTP server."""
import httplib
-try:
- import simplejson as json
-except ImportError:
- import json # noqa
+import json
import socket
import ssl
import sys
diff --git a/common/src/leap/soledad/common/l2db/remote/http_database.py b/common/src/leap/soledad/common/l2db/remote/http_database.py
index b2b48dee..7512379f 100644
--- a/common/src/leap/soledad/common/l2db/remote/http_database.py
+++ b/common/src/leap/soledad/common/l2db/remote/http_database.py
@@ -16,10 +16,7 @@
"""HTTPDatabase to access a remote db over the HTTP API."""
-try:
- import simplejson as json
-except ImportError:
- import json # noqa
+import json
import uuid
from leap.soledad.common.l2db import (
diff --git a/common/src/leap/soledad/common/l2db/remote/http_target.py b/common/src/leap/soledad/common/l2db/remote/http_target.py
index 7e7f366f..38804f01 100644
--- a/common/src/leap/soledad/common/l2db/remote/http_target.py
+++ b/common/src/leap/soledad/common/l2db/remote/http_target.py
@@ -16,10 +16,7 @@
"""SyncTarget API implementation to a remote HTTP server."""
-try:
- import simplejson as json
-except ImportError:
- import json # noqa
+import json
from leap.soledad.common.l2db import Document, SyncTarget
from leap.soledad.common.l2db.errors import BrokenSyncStream
diff --git a/common/src/leap/soledad/common/l2db/remote/server_state.py b/common/src/leap/soledad/common/l2db/remote/server_state.py
index f131e09e..e20b4679 100644
--- a/common/src/leap/soledad/common/l2db/remote/server_state.py
+++ b/common/src/leap/soledad/common/l2db/remote/server_state.py
@@ -15,8 +15,6 @@
# along with u1db. If not, see <http://www.gnu.org/licenses/>.
"""State for servers exposing a set of U1DB databases."""
-import os
-import errno
class ServerState(object):
diff --git a/common/src/leap/soledad/common/l2db/sync.py b/common/src/leap/soledad/common/l2db/sync.py
index c612629f..5e9b22f4 100644
--- a/common/src/leap/soledad/common/l2db/sync.py
+++ b/common/src/leap/soledad/common/l2db/sync.py
@@ -126,8 +126,8 @@ class Synchronizer(object):
target_last_known_gen, target_last_known_trans_id = 0, ''
else:
target_last_known_gen, target_last_known_trans_id = (
- self.source._get_replica_gen_and_trans_id( # nopep8
- self.target_replica_uid))
+ self.source._get_replica_gen_and_trans_id( # nopep8
+ self.target_replica_uid))
if not changes and target_last_known_gen == target_gen:
if target_trans_id != target_last_known_trans_id:
raise errors.InvalidTransactionId
diff --git a/common/src/leap/soledad/common/log.py b/common/src/leap/soledad/common/log.py
new file mode 100644
index 00000000..3f026045
--- /dev/null
+++ b/common/src/leap/soledad/common/log.py
@@ -0,0 +1,45 @@
+# -*- coding: utf-8 -*-
+# log.py
+# Copyright (C) 2016 LEAP
+#
+# This program is free software: you can redistribute it and/or modify
+# it under the terms of the GNU General Public License as published by
+# the Free Software Foundation, either version 3 of the License, or
+# (at your option) any later version.
+#
+# This program is distributed in the hope that it will be useful,
+# but WITHOUT ANY WARRANTY; without even the implied warranty of
+# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
+# GNU General Public License for more details.
+#
+# You should have received a copy of the GNU General Public License
+# along with this program. If not, see <http://www.gnu.org/licenses/>.
+
+
+"""
+This module centralizes logging facilities and allows for different behaviours,
+as using the python logging module instead of twisted logger, and to print logs
+to stdout, mainly for development purposes.
+"""
+
+
+import os
+import sys
+
+from twisted.logger import Logger
+from twisted.logger import textFileLogObserver
+
+
+def getLogger(*args, **kwargs):
+
+ if os.environ.get('SOLEDAD_USE_PYTHON_LOGGING'):
+ import logging
+ return logging.getLogger(__name__)
+
+ if os.environ.get('SOLEDAD_LOG_TO_STDOUT'):
+ kwargs({'observer': textFileLogObserver(sys.stdout)})
+
+ return Logger(*args, **kwargs)
+
+
+__all__ = ['getLogger']
diff --git a/scripts/ddocs/update_design_docs.py b/scripts/ddocs/update_design_docs.py
deleted file mode 100644
index 281482b8..00000000
--- a/scripts/ddocs/update_design_docs.py
+++ /dev/null
@@ -1,170 +0,0 @@
-#!/usr/bin/python
-
-# This script updates Soledad's design documents in the session database and
-# all user databases with contents from the installed leap.soledad.common
-# package.
-
-import json
-import logging
-import argparse
-import re
-import threading
-import binascii
-
-from urlparse import urlparse
-from getpass import getpass
-from ConfigParser import ConfigParser
-
-from couchdb.client import Server
-from couchdb.http import Resource
-from couchdb.http import Session
-from couchdb.http import ResourceNotFound
-
-from leap.soledad.common import ddocs
-
-
-MAX_THREADS = 20
-DESIGN_DOCS = {
- '_design/docs': json.loads(binascii.a2b_base64(ddocs.docs)),
- '_design/syncs': json.loads(binascii.a2b_base64(ddocs.syncs)),
- '_design/transactions': json.loads(
- binascii.a2b_base64(ddocs.transactions)),
-}
-
-
-# create a logger
-logger = logging.getLogger(__name__)
-LOG_FORMAT = '%(asctime)s %(message)s'
-logging.basicConfig(format=LOG_FORMAT, level=logging.INFO)
-
-
-def _parse_args():
- parser = argparse.ArgumentParser()
- parser.add_argument('-u', dest='uuid', default=None, type=str,
- help='the UUID of the user')
- parser.add_argument('-t', dest='threads', default=MAX_THREADS, type=int,
- help='the number of parallel threads')
- return parser.parse_args()
-
-
-def _get_url():
- # get couch url
- cp = ConfigParser()
- cp.read('/etc/soledad/soledad-server.conf')
- url = urlparse(cp.get('soledad-server', 'couch_url'))
- # get admin password
- netloc = re.sub('^.*@', '', url.netloc)
- url = url._replace(netloc=netloc)
- password = getpass("Admin password for %s: " % url.geturl())
- return url._replace(netloc='admin:%s@%s' % (password, netloc))
-
-
-def _get_server(url):
- resource = Resource(
- url.geturl(), Session(retry_delays=[1, 2, 4, 8], timeout=10))
- return Server(url=resource)
-
-
-def _confirm(url):
- hidden_url = re.sub(
- 'http://(.*):.*@',
- 'http://\\1:xxxxx@',
- url.geturl())
-
- print """
- ==========
- ATTENTION!
- ==========
-
- This script will modify Soledad's shared and user databases in:
-
- %s
-
- This script does not make a backup of the couch db data, so make sure you
- have a copy or you may loose data.
- """ % hidden_url
- confirm = raw_input("Proceed (type uppercase YES)? ")
-
- if confirm != "YES":
- exit(1)
-
-
-#
-# Thread
-#
-
-class DBWorkerThread(threading.Thread):
-
- def __init__(self, server, dbname, db_idx, db_len, release_fun):
- threading.Thread.__init__(self)
- self._dbname = dbname
- self._cdb = server[self._dbname]
- self._db_idx = db_idx
- self._db_len = db_len
- self._release_fun = release_fun
-
- def run(self):
-
- logger.info(
- "(%d/%d) Updating db %s."
- % (self._db_idx, self._db_len, self._dbname))
-
- for doc_id in DESIGN_DOCS:
- try:
- doc = self._cdb[doc_id]
- except ResourceNotFound:
- doc = {'_id': doc_id}
- for key in ['lists', 'views', 'updates']:
- if key in DESIGN_DOCS[doc_id]:
- doc[key] = DESIGN_DOCS[doc_id][key]
- self._cdb.save(doc)
-
- # release the semaphore
- self._release_fun()
-
-
-def _launch_update_design_docs_thread(
- server, dbname, db_idx, db_len, semaphore_pool):
- semaphore_pool.acquire() # wait for an available working slot
- thread = DBWorkerThread(
- server, dbname, db_idx, db_len, semaphore_pool.release)
- thread.daemon = True
- thread.start()
- return thread
-
-
-def _update_design_docs(args, server):
-
- # find the actual databases to be updated
- dbs = []
- if args.uuid:
- dbs.append('user-%s' % args.uuid)
- else:
- for dbname in server:
- if dbname.startswith('user-') or dbname == 'shared':
- dbs.append(dbname)
- else:
- logger.info("Skipping db %s." % dbname)
-
- db_idx = 0
- db_len = len(dbs)
- semaphore_pool = threading.BoundedSemaphore(value=args.threads)
- threads = []
-
- # launch the update
- for db in dbs:
- db_idx += 1
- threads.append(
- _launch_update_design_docs_thread(
- server, db, db_idx, db_len, semaphore_pool))
-
- # wait for all threads to finish
- map(lambda thread: thread.join(), threads)
-
-
-if __name__ == "__main__":
- args = _parse_args()
- url = _get_url()
- _confirm(url)
- server = _get_server(url)
- _update_design_docs(args, server)
diff --git a/scripts/docker/Dockerfile b/scripts/docker/Dockerfile
index 915508ea..21764d84 100644
--- a/scripts/docker/Dockerfile
+++ b/scripts/docker/Dockerfile
@@ -1,51 +1,32 @@
# start with a fresh debian image
-FROM debian
-
-# expose soledad server port in case we want to run a server container
-EXPOSE 2424
-
-# install dependencies from debian repos
-COPY files/apt/leap.list /etc/apt/sources.list.d/
-
-RUN apt-get update
-RUN apt-get -y --force-yes install leap-archive-keyring
+# we use backports because of libsqlcipher-dev
+FROM debian:jessie-backports
RUN apt-get update
RUN apt-get -y install git
-RUN apt-get -y install vim
-RUN apt-get -y install python-ipdb
-# install python deps
+# needed to build python twisted module
RUN apt-get -y install libpython2.7-dev
-RUN apt-get -y install libffi-dev
+# needed to build python cryptography module
RUN apt-get -y install libssl-dev
-RUN apt-get -y install libzmq3-dev
-RUN apt-get -y install python-pip
-RUN apt-get -y install couchdb
-RUN apt-get -y install python-srp
-RUN apt-get -y install python-scrypt
-RUN apt-get -y install leap-keymanager
-RUN apt-get -y install python-tz
+RUN apt-get -y install libffi-dev
+# needed to build pysqlcipher
+RUN apt-get -y install libsqlcipher-dev
+# needed to support keymanager
+RUN apt-get -y install libsqlite3-dev
+# install pip and tox
+RUN apt-get -y install python-pip
RUN pip install -U pip
-RUN pip install psutil
-
-# install soledad-perf deps
-RUN pip install klein
-RUN apt-get -y install curl
-RUN apt-get -y install httperf
+RUN pip install tox
# clone repositories
-ENV BASEURL "https://github.com/leapcode"
-ENV VARDIR "/var/local"
-ENV REPOS "soledad leap_pycommon soledad-perf"
-RUN for repo in ${REPOS}; do git clone ${BASEURL}/${repo}.git /var/local/${repo}; done
+RUN mkdir -p /builds/leap
+RUN git clone -b develop https://0xacab.org/leap/soledad.git /builds/leap/soledad
-# copy over files to help setup the environment and run soledad
-RUN mkdir -p /usr/local/soledad
-
-COPY files/build/install-deps-from-repos.sh /usr/local/soledad/
-RUN /usr/local/soledad/install-deps-from-repos.sh
+# use tox to install everything needed to run tests
+RUN cd /builds/leap/soledad/testing && tox -v -r --notest
+RUN mkdir -p /usr/local/soledad
COPY files/bin/ /usr/local/soledad/
diff --git a/scripts/docker/Makefile b/scripts/docker/Makefile
index 4fa2e264..7050526a 100644
--- a/scripts/docker/Makefile
+++ b/scripts/docker/Makefile
@@ -16,7 +16,7 @@
# Some configurations you might override when calling this makefile #
#####################################################################
-IMAGE_NAME ?= leap/soledad:1.0
+IMAGE_NAME ?= leapcode/soledad:latest
SOLEDAD_REMOTE ?= https://0xacab.org/leap/soledad.git
SOLEDAD_BRANCH ?= develop
SOLEDAD_PRELOAD_NUM ?= 100
@@ -27,11 +27,14 @@ MEMORY ?= 512m
# Docker image generation (main make target) #
##############################################
-all: image
+all: soledad-image couchdb-image
-image:
+soledad-image:
docker build -t $(IMAGE_NAME) .
+couchdb-image:
+ (cd couchdb/ && make)
+
##################################################
# Run a Soledad Server inside a docker container #
##################################################
@@ -69,23 +72,37 @@ run-client-bootstrap:
/usr/local/soledad/run-client-bootstrap.sh
#################################################
-# Run all trial tests inside a docker container #
+# Run all tests inside a docker container #
#################################################
-run-trial:
+run-tox:
+ name=$$(cat /dev/urandom | tr -dc 'a-zA-Z0-9' | fold -w 5 | head -n 1); \
+ docker run -d --name $${name} leap/couchdb; \
docker run -t -i \
--memory="$(MEMORY)" \
--env="SOLEDAD_REMOTE=$(SOLEDAD_REMOTE)" \
--env="SOLEDAD_BRANCH=$(SOLEDAD_BRANCH)" \
+ --env="COUCH_URL=http://$${name}:5984" \
+ --link $${name} \
$(IMAGE_NAME) \
- /usr/local/soledad/run-trial.sh
+ /usr/local/soledad/run-tox.sh
############################################
# Performance tests and graphic generation #
############################################
-run-perf-test:
- helper/run-test.sh perf
+run-perf:
+ name=$$(cat /dev/urandom | tr -dc 'a-zA-Z0-9' | fold -w 5 | head -n 1); \
+ docker run -d --name $${name} leap/couchdb; \
+ docker run -t -i \
+ --memory="$(MEMORY)" \
+ --env="SOLEDAD_REMOTE=$(SOLEDAD_REMOTE)" \
+ --env="SOLEDAD_BRANCH=$(SOLEDAD_BRANCH)" \
+ --env="SOLEDAD_PRELOAD_NUM=$(SOLEDAD_PRELOAD_NUM)" \
+ --env="COUCH_URL=http://$${name}:5984" \
+ --link $${name} \
+ $(IMAGE_NAME) \
+ /usr/local/soledad/run-perf.sh
run-client-perf:
@if [ -z "$(CONTAINER_ID_FILE)" ]; then \
@@ -123,7 +140,7 @@ cp-perf-result:
# Other helper targets #
########################
-run-shell: image
+run-shell: soledad-image
docker run -t -i \
--memory="$(MEMORY)" \
$(IMAGE_NAME) \
diff --git a/scripts/docker/README.md b/scripts/docker/README.md
index c4d7ac94..97b39f87 100644
--- a/scripts/docker/README.md
+++ b/scripts/docker/README.md
@@ -11,7 +11,20 @@ Check the `Dockerfile` for the steps for creating the docker image.
Check the `Makefile` for the rules for running containers.
-Check the `helper/` directory for scripts that help running tests.
+
+Installation
+------------
+
+1. Install docker for your system: https://docs.docker.com/
+2. Build images by running `make`
+3. Execute `make run-tox` and `make run-perf` to run tox tests and perf tests,
+ respectivelly.
+4. You may want to pass some variables to the `make` command to control
+ parameters of execution, for example:
+
+ make run-perf SOLEDAD_PRELOAD_NUM=500
+
+ See more variables below.
Environment variables for docker containers
diff --git a/scripts/docker/TODO b/scripts/docker/TODO
index 5185d754..90597637 100644
--- a/scripts/docker/TODO
+++ b/scripts/docker/TODO
@@ -1 +1,5 @@
- limit resources of containers (mem and cpu)
+- allow running couchdb on another container
+- use a config file to get defaults for running tests
+- use the /builds directory as base of git repo
+- save the test state to a directory to make it reproducible
diff --git a/scripts/docker/couchdb/Dockerfile b/scripts/docker/couchdb/Dockerfile
new file mode 100644
index 00000000..03448da5
--- /dev/null
+++ b/scripts/docker/couchdb/Dockerfile
@@ -0,0 +1,3 @@
+FROM couchdb:latest
+
+COPY local.ini /usr/local/etc/couchdb/
diff --git a/scripts/docker/couchdb/Makefile b/scripts/docker/couchdb/Makefile
new file mode 100644
index 00000000..cf3ac966
--- /dev/null
+++ b/scripts/docker/couchdb/Makefile
@@ -0,0 +1,4 @@
+IMAGE_NAME ?= leap/couchdb
+
+image:
+ docker build -t $(IMAGE_NAME) .
diff --git a/scripts/docker/couchdb/README.rst b/scripts/docker/couchdb/README.rst
new file mode 100644
index 00000000..31a791a8
--- /dev/null
+++ b/scripts/docker/couchdb/README.rst
@@ -0,0 +1,12 @@
+Couchdb Docker image
+====================
+
+This directory contains rules to build a custom couchdb docker image to be
+provided as backend to soledad server.
+
+Type `make` to build the image.
+
+Differences between this image and the official one:
+
+ - add the "nodelay" socket option on the httpd section of the config file
+ (see: https://leap.se/code/issues/8264).
diff --git a/scripts/docker/couchdb/local.ini b/scripts/docker/couchdb/local.ini
new file mode 100644
index 00000000..3650e0ed
--- /dev/null
+++ b/scripts/docker/couchdb/local.ini
@@ -0,0 +1,2 @@
+[httpd]
+socket_options = [{recbuf, 262144}, {sndbuf, 262144}, {nodelay, true}]
diff --git a/scripts/docker/files/bin/run-perf.sh b/scripts/docker/files/bin/run-perf.sh
new file mode 100755
index 00000000..72060230
--- /dev/null
+++ b/scripts/docker/files/bin/run-perf.sh
@@ -0,0 +1,22 @@
+#!/bin/bash
+
+REPO=/builds/leap/soledad/testing
+COUCH_URL="${COUCH_URL:-http://127.0.0.1:5984}"
+SOLEDAD_PRELOAD_NUM="${SOLEDAD_PRELOAD_NUM:-100}"
+
+if [ ! -z "${SOLEDAD_REMOTE}" ]; then
+ git -C ${REPO} remote set-url origin ${SOLEDAD_REMOTE}
+ git -C ${REPO} fetch origin
+fi
+
+if [ ! -z "${SOLEDAD_BRANCH}" ]; then
+ git -C ${REPO} checkout ${SOLEDAD_BRANCH}
+fi
+
+cd ${REPO}
+
+tox perf -- \
+ --durations 0 \
+ --couch-url ${COUCH_URL} \
+ --twisted \
+ --num-docs ${SOLEDAD_PRELOAD_NUM}
diff --git a/scripts/docker/files/bin/run-tox.sh b/scripts/docker/files/bin/run-tox.sh
new file mode 100755
index 00000000..74fde182
--- /dev/null
+++ b/scripts/docker/files/bin/run-tox.sh
@@ -0,0 +1,17 @@
+#!/bin/bash
+
+REPO=/builds/leap/soledad/testing
+COUCH_URL="${COUCH_URL:-http://127.0.0.1:5984}"
+
+if [ ! -z "${SOLEDAD_REMOTE}" ]; then
+ git -C ${REPO} remote set-url origin ${SOLEDAD_REMOTE}
+ git -C ${REPO} fetch origin
+fi
+
+if [ ! -z "${SOLEDAD_BRANCH}" ]; then
+ git -C ${REPO} checkout ${SOLEDAD_BRANCH}
+fi
+
+cd ${REPO}
+
+tox -- --couch-url ${COUCH_URL}
diff --git a/scripts/docker/files/bin/setup-test-env.py b/scripts/docker/files/bin/setup-test-env.py
index 0f3ea6f4..4868fd56 100755
--- a/scripts/docker/files/bin/setup-test-env.py
+++ b/scripts/docker/files/bin/setup-test-env.py
@@ -194,12 +194,12 @@ def user_db_create(args):
url = 'http://localhost:%d/user-%s' % (args.port, args.uuid)
try:
CouchDatabase.open_database(
- url=url, create=False, replica_uid=None, ensure_ddocs=True)
+ url=url, create=False, replica_uid=None)
print '[*] error: database "user-%s" already exists' % args.uuid
exit(1)
except DatabaseDoesNotExist:
CouchDatabase.open_database(
- url=url, create=True, replica_uid=None, ensure_ddocs=True)
+ url=url, create=True, replica_uid=None)
print '[+] database created: user-%s' % args.uuid
@@ -372,7 +372,10 @@ CERT_CONFIG_FILE = os.path.join(
def cert_create(args):
private_key = os.path.join(args.basedir, args.private_key)
cert_key = os.path.join(args.basedir, args.cert_key)
- os.mkdir(args.basedir)
+ try:
+ os.mkdir(args.basedir)
+ except OSError:
+ pass
call([
'openssl',
'req',
@@ -389,8 +392,11 @@ def cert_create(args):
def cert_delete(args):
private_key = os.path.join(args.basedir, args.private_key)
cert_key = os.path.join(args.basedir, args.cert_key)
- os.unlink(private_key)
- os.unlink(cert_key)
+ try:
+ os.unlink(private_key)
+ os.unlink(cert_key)
+ except OSError:
+ pass
#
diff --git a/scripts/migration/0.9.0/.gitignore b/scripts/migration/0.9.0/.gitignore
new file mode 100644
index 00000000..6115c109
--- /dev/null
+++ b/scripts/migration/0.9.0/.gitignore
@@ -0,0 +1 @@
+log/*
diff --git a/scripts/migration/0.9.0/README.md b/scripts/migration/0.9.0/README.md
new file mode 100644
index 00000000..919a5235
--- /dev/null
+++ b/scripts/migration/0.9.0/README.md
@@ -0,0 +1,73 @@
+CouchDB schema migration to Soledad 0.8.2
+=========================================
+
+Migrate couch database schema from <= 0.8.1 version to 0.8.2 version.
+
+
+ATTENTION!
+----------
+
+ - This script does not backup your data for you. Make sure you have a backup
+ copy of your databases before running this script!
+
+ - Make sure you turn off any service that might be writing to the couch
+ database before running this script.
+
+
+Usage
+-----
+
+To see what the script would do, run:
+
+ ./migrate.py
+
+To actually run the migration, add the --do-migrate command line option:
+
+ ./migrate.py --do-migrate
+
+See command line options:
+
+ ./migrate.py --help
+
+
+Log
+---
+
+If you don't pass a --log-file command line option, a log will be written to
+the `log/` folder.
+
+
+Differences between old and new couch schema
+--------------------------------------------
+
+The differences between old and new schemas are:
+
+ - Transaction metadata was previously stored inside each document, and we
+ used design doc view/list functions to retrieve that information. Now,
+ transaction metadata is stored in documents with special ids
+ (gen-0000000001 to gen-9999999999).
+
+ - Database replica config metadata was stored in a document called
+ "u1db_config", and now we store it in the "_local/config" document.
+
+ - Sync metadata was previously stored in documents with id
+ "u1db_sync_<source-replica-id>", and now are stored in
+ "_local/sync_<source-replica-id>".
+
+ - The new schema doesn't make use of any design documents.
+
+
+What does this script do
+------------------------
+
+- List all databases starting with "user-".
+- For each one, do:
+ - Check if it contains the old "u1db_config" document.
+ - If it doesn't, skip this db.
+ - Get the transaction log using the usual design doc view/list functions.
+ - Write a new "gen-X" document for each line on the transaction log.
+ - Get the "u1db_config" document, create a new one in "_local/config",
+ Delete the old one.
+ - List all "u1db_sync_X" documents, create new ones in "_local/sync_X",
+ delete the old ones.
+ - Delete unused design documents.
diff --git a/scripts/migration/0.9.0/log/.empty b/scripts/migration/0.9.0/log/.empty
new file mode 100644
index 00000000..e69de29b
--- /dev/null
+++ b/scripts/migration/0.9.0/log/.empty
diff --git a/scripts/migration/0.9.0/migrate.py b/scripts/migration/0.9.0/migrate.py
new file mode 100755
index 00000000..6ad5bc2d
--- /dev/null
+++ b/scripts/migration/0.9.0/migrate.py
@@ -0,0 +1,117 @@
+#!/usr/bin/env python
+# migrate.py
+
+"""
+Migrate CouchDB schema to Soledad 0.8.2 schema.
+
+******************************************************************************
+ ATTENTION!
+
+ - This script does not backup your data for you. Make sure you have a backup
+ copy of your databases before running this script!
+
+ - Make sure you turn off any service that might be writing to the couch
+ database before running this script.
+
+******************************************************************************
+
+Run this script with the --help option to see command line options.
+
+See the README.md file for more information.
+"""
+
+import datetime
+import logging
+import netrc
+import os
+
+from argparse import ArgumentParser
+
+from leap.soledad.server import load_configuration
+
+from migrate_couch_schema import migrate
+
+
+TARGET_VERSION = '0.8.2'
+DEFAULT_COUCH_URL = 'http://127.0.0.1:5984'
+CONF = load_configuration('/etc/soledad/soledad-server.conf')
+NETRC_PATH = CONF['soledad-server']['admin_netrc']
+
+
+#
+# command line args and execution
+#
+
+def _configure_logger(log_file, level=logging.INFO):
+ if not log_file:
+ fname, _ = os.path.basename(__file__).split('.')
+ timestr = datetime.datetime.now().strftime('%Y-%m-%d_%H:%M:%S')
+ filename = 'soledad_%s_%s_%s.log' \
+ % (TARGET_VERSION, fname, timestr)
+ dirname = os.path.join(
+ os.path.dirname(os.path.realpath(__file__)), 'log')
+ log_file = os.path.join(dirname, filename)
+ logging.basicConfig(
+ filename=log_file,
+ filemode='a',
+ format='%(asctime)s,%(msecs)d %(levelname)s %(message)s',
+ datefmt='%H:%M:%S',
+ level=level)
+
+
+def _default_couch_url():
+ if not os.path.exists(NETRC_PATH):
+ return DEFAULT_COUCH_URL
+ parsed_netrc = netrc.netrc(NETRC_PATH)
+ host, (login, _, password) = parsed_netrc.hosts.items()[0]
+ url = ('http://%(login)s:%(password)s@%(host)s:5984' % {
+ 'login': login,
+ 'password': password,
+ 'host': host})
+ return url
+
+
+def _parse_args():
+ parser = ArgumentParser()
+ parser.add_argument(
+ '--couch_url',
+ help='the url for the couch database',
+ default=_default_couch_url())
+ parser.add_argument(
+ '--do-migrate',
+ help='actually perform the migration (otherwise '
+ 'just print what would be done)',
+ action='store_true')
+ parser.add_argument(
+ '--log-file',
+ help='the log file to use')
+ parser.add_argument(
+ '--pdb', action='store_true',
+ help='escape to pdb shell in case of exception')
+ parser.add_argument(
+ '--verbose', action='store_true',
+ help='output detailed information about the migration '
+ '(i.e. include debug messages)')
+ return parser.parse_args()
+
+
+def _enable_pdb():
+ import sys
+ from IPython.core import ultratb
+ sys.excepthook = ultratb.FormattedTB(
+ mode='Verbose', color_scheme='Linux', call_pdb=1)
+
+
+if __name__ == '__main__':
+ args = _parse_args()
+ if args.pdb:
+ _enable_pdb()
+ _configure_logger(
+ args.log_file,
+ level=logging.DEBUG if args.verbose else logging.INFO)
+ logger = logging.getLogger(__name__)
+ try:
+ migrate(args, TARGET_VERSION)
+ except:
+ logger.exception('Fatal error on migrate script!')
+ raise
diff --git a/scripts/migration/0.9.0/migrate_couch_schema/__init__.py b/scripts/migration/0.9.0/migrate_couch_schema/__init__.py
new file mode 100644
index 00000000..f0b456e4
--- /dev/null
+++ b/scripts/migration/0.9.0/migrate_couch_schema/__init__.py
@@ -0,0 +1,192 @@
+# __init__.py
+"""
+Support functions for migration script.
+"""
+
+import logging
+
+from couchdb import Server
+from couchdb import ResourceNotFound
+from couchdb import ResourceConflict
+
+from leap.soledad.common.couch import GENERATION_KEY
+from leap.soledad.common.couch import TRANSACTION_ID_KEY
+from leap.soledad.common.couch import REPLICA_UID_KEY
+from leap.soledad.common.couch import DOC_ID_KEY
+from leap.soledad.common.couch import SCHEMA_VERSION_KEY
+from leap.soledad.common.couch import CONFIG_DOC_ID
+from leap.soledad.common.couch import SYNC_DOC_ID_PREFIX
+from leap.soledad.common.couch import SCHEMA_VERSION
+
+
+logger = logging.getLogger(__name__)
+
+
+#
+# support functions
+#
+
+def _get_couch_server(couch_url):
+ return Server(couch_url)
+
+
+def _is_migrateable(db):
+ config_doc = db.get('u1db_config')
+ return bool(config_doc)
+
+
+def _get_transaction_log(db):
+ ddoc_path = ['_design', 'transactions', '_view', 'log']
+ resource = db.resource(*ddoc_path)
+ try:
+ _, _, data = resource.get_json()
+ except ResourceNotFound:
+ logger.warning(
+ '[%s] missing transactions design document, '
+ 'can\'t get transaction log.' % db.name)
+ return []
+ rows = data['rows']
+ transaction_log = []
+ gen = 1
+ for row in rows:
+ transaction_log.append((gen, row['id'], row['value']))
+ gen += 1
+ return transaction_log
+
+
+def _get_user_dbs(server):
+ user_dbs = filter(lambda dbname: dbname.startswith('user-'), server)
+ return user_dbs
+
+
+#
+# migration main functions
+#
+
+def migrate(args, target_version):
+ server = _get_couch_server(args.couch_url)
+ logger.info('starting couch schema migration to %s' % target_version)
+ if not args.do_migrate:
+ logger.warning('dry-run: no changes will be made to databases')
+ user_dbs = _get_user_dbs(server)
+ for dbname in user_dbs:
+ db = server[dbname]
+ if not _is_migrateable(db):
+ logger.warning("[%s] skipping not migrateable user db" % dbname)
+ continue
+ logger.info("[%s] starting migration of user db" % dbname)
+ try:
+ _migrate_user_db(db, args.do_migrate)
+ logger.info("[%s] finished migration of user db" % dbname)
+ except:
+ logger.exception('[%s] error migrating user db' % dbname)
+ logger.error('continuing with next database.')
+ logger.info('finished couch schema migration to %s' % target_version)
+
+
+def _migrate_user_db(db, do_migrate):
+ _migrate_transaction_log(db, do_migrate)
+ _migrate_sync_docs(db, do_migrate)
+ _delete_design_docs(db, do_migrate)
+ _migrate_config_doc(db, do_migrate)
+
+
+def _migrate_transaction_log(db, do_migrate):
+ transaction_log = _get_transaction_log(db)
+ for gen, doc_id, trans_id in transaction_log:
+ gen_doc_id = 'gen-%s' % str(gen).zfill(10)
+ doc = {
+ '_id': gen_doc_id,
+ GENERATION_KEY: gen,
+ DOC_ID_KEY: doc_id,
+ TRANSACTION_ID_KEY: trans_id,
+ }
+ logger.debug('[%s] creating gen doc: %s' % (db.name, gen_doc_id))
+ if do_migrate:
+ try:
+ db.save(doc)
+ except ResourceConflict:
+ # this gen document already exists. if documents are the same,
+ # continue with migration.
+ existing_doc = db.get(gen_doc_id)
+ for key in [GENERATION_KEY, DOC_ID_KEY, TRANSACTION_ID_KEY]:
+ if existing_doc[key] != doc[key]:
+ raise
+
+
+def _migrate_config_doc(db, do_migrate):
+ old_doc = db['u1db_config']
+ new_doc = {
+ '_id': CONFIG_DOC_ID,
+ REPLICA_UID_KEY: old_doc[REPLICA_UID_KEY],
+ SCHEMA_VERSION_KEY: SCHEMA_VERSION,
+ }
+ logger.info("[%s] moving config doc: %s -> %s"
+ % (db.name, old_doc['_id'], new_doc['_id']))
+ if do_migrate:
+ # the config doc must not exist, otherwise we would have skipped this
+ # database.
+ db.save(new_doc)
+ db.delete(old_doc)
+
+
+def _migrate_sync_docs(db, do_migrate):
+ logger.info('[%s] moving sync docs' % db.name)
+ view = db.view(
+ '_all_docs',
+ startkey='u1db_sync',
+ endkey='u1db_synd',
+ include_docs='true')
+ for row in view.rows:
+ old_doc = row['doc']
+ old_id = old_doc['_id']
+
+ # older schemas used different documents with ids starting with
+ # "u1db_sync" to store sync-related data:
+ #
+ # - u1db_sync_log: was used to store the whole sync log.
+ # - u1db_sync_state: was used to store the sync state.
+ #
+ # if any of these documents exist in the current db, they are leftover
+ # from previous migrations, and should just be removed.
+ if old_id in ['u1db_sync_log', 'u1db_sync_state']:
+ logger.info('[%s] removing leftover document: %s'
+ % (db.name, old_id))
+ if do_migrate:
+ db.delete(old_doc)
+ continue
+
+ replica_uid = old_id.replace('u1db_sync_', '')
+ new_id = "%s%s" % (SYNC_DOC_ID_PREFIX, replica_uid)
+ new_doc = {
+ '_id': new_id,
+ GENERATION_KEY: old_doc['generation'],
+ TRANSACTION_ID_KEY: old_doc['transaction_id'],
+ REPLICA_UID_KEY: replica_uid,
+ }
+ logger.debug("[%s] moving sync doc: %s -> %s"
+ % (db.name, old_id, new_id))
+ if do_migrate:
+ try:
+ db.save(new_doc)
+ except ResourceConflict:
+ # this sync document already exists. if documents are the same,
+ # continue with migration.
+ existing_doc = db.get(new_id)
+ for key in [GENERATION_KEY, TRANSACTION_ID_KEY,
+ REPLICA_UID_KEY]:
+ if existing_doc[key] != new_doc[key]:
+ raise
+ db.delete(old_doc)
+
+
+def _delete_design_docs(db, do_migrate):
+ for ddoc in ['docs', 'syncs', 'transactions']:
+ doc_id = '_design/%s' % ddoc
+ doc = db.get(doc_id)
+ if doc:
+ logger.info("[%s] deleting design doc: %s" % (db.name, doc_id))
+ if do_migrate:
+ db.delete(doc)
+ else:
+ logger.warning("[%s] design doc not found: %s" % (db.name, doc_id))
diff --git a/scripts/migration/0.9.0/requirements.pip b/scripts/migration/0.9.0/requirements.pip
new file mode 100644
index 00000000..ea22a1a4
--- /dev/null
+++ b/scripts/migration/0.9.0/requirements.pip
@@ -0,0 +1,3 @@
+couchdb
+leap.soledad.common==0.9.0
+leap.soledad.server==0.9.0
diff --git a/scripts/migration/0.9.0/setup.py b/scripts/migration/0.9.0/setup.py
new file mode 100644
index 00000000..0467e932
--- /dev/null
+++ b/scripts/migration/0.9.0/setup.py
@@ -0,0 +1,8 @@
+from setuptools import setup
+from setuptools import find_packages
+
+
+setup(
+ name='migrate_couch_schema',
+ packages=find_packages('.'),
+)
diff --git a/scripts/migration/0.9.0/tests/conftest.py b/scripts/migration/0.9.0/tests/conftest.py
new file mode 100644
index 00000000..61f6c7ee
--- /dev/null
+++ b/scripts/migration/0.9.0/tests/conftest.py
@@ -0,0 +1,54 @@
+# conftest.py
+
+"""
+Provide a couch database with content stored in old schema.
+"""
+
+import couchdb
+import pytest
+import uuid
+
+
+COUCH_URL = 'http://127.0.0.1:5984'
+
+transaction_map = """
+function(doc) {
+ if (doc.u1db_transactions)
+ doc.u1db_transactions.forEach(function(t) {
+ emit(t[0], // use timestamp as key so the results are ordered
+ t[1]); // value is the transaction_id
+ });
+}
+"""
+
+initial_docs = [
+ {'_id': 'u1db_config', 'replica_uid': 'an-uid'},
+ {'_id': 'u1db_sync_A', 'generation': 0, 'replica_uid': 'A',
+ 'transaction_id': ''},
+ {'_id': 'u1db_sync_B', 'generation': 2, 'replica_uid': 'B',
+ 'transaction_id': 'X'},
+ {'_id': 'doc1', 'u1db_transactions': [(1, 'trans-1'), (3, 'trans-3')]},
+ {'_id': 'doc2', 'u1db_transactions': [(2, 'trans-2'), (4, 'trans-4')]},
+ {'_id': '_design/docs'},
+ {'_id': '_design/syncs'},
+ {'_id': '_design/transactions',
+ 'views': {'log': {'map': transaction_map}}},
+ # add some data from previous interrupted migration
+ {'_id': '_local/sync_A', 'gen': 0, 'trans_id': '', 'replica_uid': 'A'},
+ {'_id': 'gen-0000000002',
+ 'gen': 2, 'trans_id': 'trans-2', 'doc_id': 'doc2'},
+ # the following should be removed if found in the dbs
+ {'_id': 'u1db_sync_log'},
+ {'_id': 'u1db_sync_state'},
+]
+
+
+@pytest.fixture(scope='function')
+def db(request):
+ server = couchdb.Server(COUCH_URL)
+ dbname = "user-" + uuid.uuid4().hex
+ db = server.create(dbname)
+ for doc in initial_docs:
+ db.save(doc)
+ request.addfinalizer(lambda: server.delete(dbname))
+ return db
diff --git a/scripts/migration/0.9.0/tests/test_migrate.py b/scripts/migration/0.9.0/tests/test_migrate.py
new file mode 100644
index 00000000..10c8b906
--- /dev/null
+++ b/scripts/migration/0.9.0/tests/test_migrate.py
@@ -0,0 +1,67 @@
+# test_migrate.py
+
+"""
+Ensure that the migration script works!
+"""
+
+from migrate_couch_schema import _migrate_user_db
+
+from leap.soledad.common.couch import GENERATION_KEY
+from leap.soledad.common.couch import TRANSACTION_ID_KEY
+from leap.soledad.common.couch import REPLICA_UID_KEY
+from leap.soledad.common.couch import DOC_ID_KEY
+from leap.soledad.common.couch import SCHEMA_VERSION_KEY
+from leap.soledad.common.couch import CONFIG_DOC_ID
+from leap.soledad.common.couch import SYNC_DOC_ID_PREFIX
+from leap.soledad.common.couch import SCHEMA_VERSION
+
+
+def test__migrate_user_db(db):
+ _migrate_user_db(db, True)
+
+ # we should find exactly 6 documents: 2 normal documents and 4 generation
+ # documents
+ view = db.view('_all_docs')
+ assert len(view.rows) == 6
+
+ # ensure that the ids of the documents we found on the database are correct
+ doc_ids = map(lambda doc: doc.id, view.rows)
+ assert 'doc1' in doc_ids
+ assert 'doc2' in doc_ids
+ assert 'gen-0000000001' in doc_ids
+ assert 'gen-0000000002' in doc_ids
+ assert 'gen-0000000003' in doc_ids
+ assert 'gen-0000000004' in doc_ids
+
+ # assert config doc contents
+ config_doc = db.get(CONFIG_DOC_ID)
+ assert config_doc[REPLICA_UID_KEY] == 'an-uid'
+ assert config_doc[SCHEMA_VERSION_KEY] == SCHEMA_VERSION
+
+ # assert sync docs contents
+ sync_doc_A = db.get('%s%s' % (SYNC_DOC_ID_PREFIX, 'A'))
+ assert sync_doc_A[GENERATION_KEY] == 0
+ assert sync_doc_A[REPLICA_UID_KEY] == 'A'
+ assert sync_doc_A[TRANSACTION_ID_KEY] == ''
+ sync_doc_B = db.get('%s%s' % (SYNC_DOC_ID_PREFIX, 'B'))
+ assert sync_doc_B[GENERATION_KEY] == 2
+ assert sync_doc_B[REPLICA_UID_KEY] == 'B'
+ assert sync_doc_B[TRANSACTION_ID_KEY] == 'X'
+
+ # assert gen docs contents
+ gen_1 = db.get('gen-0000000001')
+ assert gen_1[DOC_ID_KEY] == 'doc1'
+ assert gen_1[GENERATION_KEY] == 1
+ assert gen_1[TRANSACTION_ID_KEY] == 'trans-1'
+ gen_2 = db.get('gen-0000000002')
+ assert gen_2[DOC_ID_KEY] == 'doc2'
+ assert gen_2[GENERATION_KEY] == 2
+ assert gen_2[TRANSACTION_ID_KEY] == 'trans-2'
+ gen_3 = db.get('gen-0000000003')
+ assert gen_3[DOC_ID_KEY] == 'doc1'
+ assert gen_3[GENERATION_KEY] == 3
+ assert gen_3[TRANSACTION_ID_KEY] == 'trans-3'
+ gen_4 = db.get('gen-0000000004')
+ assert gen_4[DOC_ID_KEY] == 'doc2'
+ assert gen_4[GENERATION_KEY] == 4
+ assert gen_4[TRANSACTION_ID_KEY] == 'trans-4'
diff --git a/scripts/migration/0.9.0/tox.ini b/scripts/migration/0.9.0/tox.ini
new file mode 100644
index 00000000..2bb6be4c
--- /dev/null
+++ b/scripts/migration/0.9.0/tox.ini
@@ -0,0 +1,13 @@
+[tox]
+envlist = py27
+
+[testenv]
+commands = py.test {posargs}
+changedir = tests
+deps =
+ pytest
+ couchdb
+ pdbpp
+ -e../../../common
+setenv =
+ TERM=xterm
diff --git a/scripts/packaging/compile_design_docs.py b/scripts/packaging/compile_design_docs.py
deleted file mode 100644
index b2b5729a..00000000
--- a/scripts/packaging/compile_design_docs.py
+++ /dev/null
@@ -1,112 +0,0 @@
-#!/usr/bin/python
-
-
-# This script builds files for the design documents represented in the
-# ../common/src/soledad/common/ddocs directory structure (relative to the
-# current location of the script) into a target directory.
-
-
-import argparse
-from os import listdir
-from os.path import realpath, dirname, isdir, join, isfile, basename
-import json
-
-DDOCS_REL_PATH = ('..', 'common', 'src', 'leap', 'soledad', 'common', 'ddocs')
-
-
-def build_ddocs():
- """
- Build design documents.
-
- For ease of development, couch backend design documents are stored as
- `.js` files in subdirectories of
- `../common/src/leap/soledad/common/ddocs`. This function scans that
- directory for javascript files, and builds the design documents structure.
-
- This funciton uses the following conventions to generate design documents:
-
- - Design documents are represented by directories in the form
- `<prefix>/<ddoc>`, there prefix is the `src/leap/soledad/common/ddocs`
- directory.
- - Design document directories might contain `views`, `lists` and
- `updates` subdirectories.
- - Views subdirectories must contain a `map.js` file and may contain a
- `reduce.js` file.
- - List and updates subdirectories may contain any number of javascript
- files (i.e. ending in `.js`) whose names will be mapped to the
- corresponding list or update function name.
- """
- ddocs = {}
-
- # design docs are represented by subdirectories of `DDOCS_REL_PATH`
- cur_pwd = dirname(realpath(__file__))
- ddocs_path = join(cur_pwd, *DDOCS_REL_PATH)
- for ddoc in [f for f in listdir(ddocs_path)
- if isdir(join(ddocs_path, f))]:
-
- ddocs[ddoc] = {'_id': '_design/%s' % ddoc}
-
- for t in ['views', 'lists', 'updates']:
- tdir = join(ddocs_path, ddoc, t)
- if isdir(tdir):
-
- ddocs[ddoc][t] = {}
-
- if t == 'views': # handle views (with map/reduce functions)
- for view in [f for f in listdir(tdir)
- if isdir(join(tdir, f))]:
- # look for map.js and reduce.js
- mapfile = join(tdir, view, 'map.js')
- reducefile = join(tdir, view, 'reduce.js')
- mapfun = None
- reducefun = None
- try:
- with open(mapfile) as f:
- mapfun = f.read()
- except IOError:
- pass
- try:
- with open(reducefile) as f:
- reducefun = f.read()
- except IOError:
- pass
- ddocs[ddoc]['views'][view] = {}
-
- if mapfun is not None:
- ddocs[ddoc]['views'][view]['map'] = mapfun
- if reducefun is not None:
- ddocs[ddoc]['views'][view]['reduce'] = reducefun
-
- else: # handle lists, updates, etc
- for fun in [f for f in listdir(tdir)
- if isfile(join(tdir, f))]:
- funfile = join(tdir, fun)
- funname = basename(funfile).replace('.js', '')
- try:
- with open(funfile) as f:
- ddocs[ddoc][t][funname] = f.read()
- except IOError:
- pass
- return ddocs
-
-
-if __name__ == '__main__':
- parser = argparse.ArgumentParser()
- parser.add_argument(
- 'target', type=str,
- help='the target dir where to store design documents')
- args = parser.parse_args()
-
- # check if given target is a directory
- if not isdir(args.target):
- print 'Error: %s is not a directory.' % args.target
- exit(1)
-
- # write desifgn docs files
- ddocs = build_ddocs()
- for ddoc in ddocs:
- ddoc_filename = "%s.json" % ddoc
- with open(join(args.target, ddoc_filename), 'w') as f:
- f.write("%s" % json.dumps(ddocs[ddoc], indent=3))
- print "Wrote _design/%s content in %s" \
- % (ddoc, join(args.target, ddoc_filename,))
diff --git a/scripts/profiling/mail/couchdb_server.py b/scripts/profiling/mail/couchdb_server.py
index 2cf0a3fd..452f8ec2 100644
--- a/scripts/profiling/mail/couchdb_server.py
+++ b/scripts/profiling/mail/couchdb_server.py
@@ -18,8 +18,7 @@ def start_couchdb_wrapper():
def get_u1db_database(dbname, port):
return CouchDatabase.open_database(
'http://127.0.0.1:%d/%s' % (port, dbname),
- True,
- ensure_ddocs=True)
+ True)
def create_tokens_database(port, uuid, token_value):
@@ -38,5 +37,5 @@ def get_couchdb_wrapper_and_u1db(uuid, token_value):
couchdb_u1db = get_u1db_database('user-%s' % uuid, couchdb_wrapper.port)
get_u1db_database('shared', couchdb_wrapper.port)
create_tokens_database(couchdb_wrapper.port, uuid, token_value)
-
+
return couchdb_wrapper, couchdb_u1db
diff --git a/server/pkg/create-user-db b/server/pkg/create-user-db
index 5e48d4de..b955b4c3 100755
--- a/server/pkg/create-user-db
+++ b/server/pkg/create-user-db
@@ -80,7 +80,7 @@ def ensure_database(dbname):
url = url_for_db(dbname)
db_security = CONF['database-security']
db = CouchDatabase.open_database(url=url, create=True,
- replica_uid=None, ensure_ddocs=True,
+ replica_uid=None,
database_security=db_security)
print ('success! Ensured that database %s exists, with replica_uid: %s' %
(db._dbname, db.replica_uid))
diff --git a/server/pkg/requirements-latest.pip b/server/pkg/requirements-latest.pip
index 46a7ccba..d32e1ffa 100644
--- a/server/pkg/requirements-latest.pip
+++ b/server/pkg/requirements-latest.pip
@@ -1,5 +1,5 @@
--index-url https://pypi.python.org/simple/
--e 'git+https://github.com/pixelated-project/leap_pycommon.git@develop#egg=leap.common'
+-e 'git+https://github.com/leapcode/leap_pycommon.git@develop#egg=leap.common'
-e '../common'
-e .
diff --git a/server/pkg/requirements-leap.pip b/server/pkg/requirements-leap.pip
index aaad340c..93b447e5 100644
--- a/server/pkg/requirements-leap.pip
+++ b/server/pkg/requirements-leap.pip
@@ -1 +1 @@
-leap.soledad.common>=0.6.5
+leap.soledad.common>=0.9.0
diff --git a/server/pkg/requirements.pip b/server/pkg/requirements.pip
index 2d845f24..e92dfde6 100644
--- a/server/pkg/requirements.pip
+++ b/server/pkg/requirements.pip
@@ -1,6 +1,5 @@
configparser
PyOpenSSL
twisted>=12.3.0
-#pinned for wheezy compatibility
-Beaker==1.6.3 #wheezy
-couchdb==0.8 #wheezy
+Beaker
+couchdb
diff --git a/server/pkg/soledad-server b/server/pkg/soledad-server
index 74ed122e..9dada6a0 100644
--- a/server/pkg/soledad-server
+++ b/server/pkg/soledad-server
@@ -12,7 +12,6 @@
PATH=/sbin:/bin:/usr/sbin:/usr/bin
PIDFILE=/var/run/soledad.pid
OBJ=leap.soledad.server.application
-LOGFILE=/var/log/soledad.log
HTTPS_PORT=2424
CONFDIR=/etc/soledad
CERT_PATH="${CONFDIR}/soledad-server.pem"
@@ -37,7 +36,8 @@ case "${1}" in
--exec ${TWISTD_PATH} -- \
--uid=${USER} --gid=${GROUP} \
--pidfile=${PIDFILE} \
- --logfile=${LOGFILE} \
+ --syslog \
+ --prefix=soledad-server \
web \
--wsgi=${OBJ} \
--port=ssl:${HTTPS_PORT}:privateKey=${PRIVKEY_PATH}:certKey=${CERT_PATH}:sslmethod=${SSL_METHOD}
diff --git a/server/setup.py b/server/setup.py
index b3b26010..a18d0b2d 100644
--- a/server/setup.py
+++ b/server/setup.py
@@ -122,13 +122,13 @@ requirements = utils.parse_requirements()
if utils.is_develop_mode():
print
- print ("[WARNING] Skipping leap-specific dependencies "
- "because development mode is detected.")
- print ("[WARNING] You can install "
- "the latest published versions with "
- "'pip install -r pkg/requirements-leap.pip'")
- print ("[WARNING] Or you can instead do 'python setup.py develop' "
- "from the parent folder of each one of them.")
+ print("[WARNING] Skipping leap-specific dependencies "
+ "because development mode is detected.")
+ print("[WARNING] You can install "
+ "the latest published versions with "
+ "'pip install -r pkg/requirements-leap.pip'")
+ print("[WARNING] Or you can instead do 'python setup.py develop' "
+ "from the parent folder of each one of them.")
print
else:
requirements += utils.parse_requirements(
diff --git a/server/src/leap/soledad/server/__init__.py b/server/src/leap/soledad/server/__init__.py
index 34570b52..d154e3fe 100644
--- a/server/src/leap/soledad/server/__init__.py
+++ b/server/src/leap/soledad/server/__init__.py
@@ -80,7 +80,6 @@ documents on the shared database is handled by `leap.soledad.server.auth`
module.
"""
-import configparser
import urlparse
import sys
@@ -88,11 +87,10 @@ from leap.soledad.common.l2db.remote import http_app, utils
from leap.soledad.server.auth import SoledadTokenAuthMiddleware
from leap.soledad.server.gzip_middleware import GzipMiddleware
-from leap.soledad.server.sync import (
- SyncResource,
- MAX_REQUEST_SIZE,
- MAX_ENTRY_SIZE,
-)
+from leap.soledad.server.sync import SyncResource
+from leap.soledad.server.sync import MAX_REQUEST_SIZE
+from leap.soledad.server.sync import MAX_ENTRY_SIZE
+from leap.soledad.server.config import load_configuration
from leap.soledad.common import SHARED_DB_NAME
from leap.soledad.common.backend import SoledadBackend
@@ -100,6 +98,14 @@ from leap.soledad.common.couch.state import CouchServerState
from ._version import get_versions
+
+__all__ = [
+ 'SoledadApp',
+ 'application',
+ '__version__',
+]
+
+
# ----------------------------------------------------------------------------
# Soledad WSGI application
# ----------------------------------------------------------------------------
@@ -250,57 +256,6 @@ http_app.HTTPInvocationByMethodWithBody = HTTPInvocationByMethodWithBody
# ----------------------------------------------------------------------------
-# Auxiliary functions
-# ----------------------------------------------------------------------------
-CONFIG_DEFAULTS = {
- 'soledad-server': {
- 'couch_url': 'http://localhost:5984',
- 'create_cmd': None,
- 'admin_netrc': '/etc/couchdb/couchdb-admin.netrc',
- 'batching': False
- },
- 'database-security': {
- 'members': ['soledad'],
- 'members_roles': [],
- 'admins': [],
- 'admins_roles': []
- }
-}
-
-
-def load_configuration(file_path):
- """
- Load server configuration from file.
-
- @param file_path: The path to the configuration file.
- @type file_path: str
-
- @return: A dictionary with the configuration.
- @rtype: dict
- """
- defaults = dict(CONFIG_DEFAULTS)
- config = configparser.SafeConfigParser()
- config.read(file_path)
- for section in defaults:
- if not config.has_section(section):
- continue
- for key, value in defaults[section].items():
- if not config.has_option(section, key):
- continue
- elif type(value) == bool:
- defaults[section][key] = config.getboolean(section, key)
- elif type(value) == list:
- values = config.get(section, key).split(',')
- values = [v.strip() for v in values]
- defaults[section][key] = values
- else:
- defaults[section][key] = config.get(section, key)
- # TODO: implement basic parsing/sanitization of options comming from
- # config file.
- return defaults
-
-
-# ----------------------------------------------------------------------------
# Run as Twisted WSGI Resource
# ----------------------------------------------------------------------------
@@ -312,25 +267,23 @@ def _load_config():
def _get_couch_state():
conf = _load_config()
- state = CouchServerState(conf['couch_url'], create_cmd=conf['create_cmd'])
+ state = CouchServerState(conf['couch_url'], create_cmd=conf['create_cmd'],
+ check_schema_versions=True)
SoledadBackend.BATCH_SUPPORT = conf.get('batching', False)
return state
-
-def application(environ, start_response):
- """return WSGI application that may be used by `twistd -web`"""
- state = _get_couch_state()
+try:
+ _couch_state = _get_couch_state()
+ # a WSGI application that may be used by `twistd -web`
application = GzipMiddleware(
- SoledadTokenAuthMiddleware(SoledadApp(state)))
- return application(environ, start_response)
+ SoledadTokenAuthMiddleware(SoledadApp(_couch_state)))
+except:
+ pass
-def debug_local_application_do_not_use(environ, start_response):
- """in where we bypass token auth middleware for ease of mind while
- debugging in your local environment"""
- state = _get_couch_state()
- application = SoledadApp(state)
- return application(environ, start_response)
+# another WSGI application in which we bypass token auth middleware for ease of
+# mind while debugging in your local environment
+# debug_local_application_do_not_use = SoledadApp(_couch_state)
__version__ = get_versions()['version']
diff --git a/server/src/leap/soledad/server/auth.py b/server/src/leap/soledad/server/auth.py
index ecee2d5d..b7186b3b 100644
--- a/server/src/leap/soledad/server/auth.py
+++ b/server/src/leap/soledad/server/auth.py
@@ -22,13 +22,16 @@ import json
from abc import ABCMeta, abstractmethod
from routes.mapper import Mapper
-from twisted.python import log
+from leap.soledad.common.log import getLogger
from leap.soledad.common.l2db import DBNAME_CONSTRAINTS, errors as u1db_errors
from leap.soledad.common import SHARED_DB_NAME
from leap.soledad.common import USER_DB_PREFIX
+logger = getLogger(__name__)
+
+
class URLToAuthorization(object):
"""
Verify if actions can be performed by a user.
@@ -378,7 +381,7 @@ class SoledadTokenAuthMiddleware(SoledadAuthMiddleware):
try:
return self._state.verify_token(uuid, token)
except Exception as e:
- log.err(e)
+ logger.error(e)
return False
def _get_auth_error_string(self):
diff --git a/server/src/leap/soledad/server/config.py b/server/src/leap/soledad/server/config.py
new file mode 100644
index 00000000..4a791cbe
--- /dev/null
+++ b/server/src/leap/soledad/server/config.py
@@ -0,0 +1,67 @@
+# -*- coding: utf-8 -*-
+# config.py
+# Copyright (C) 2016 LEAP
+#
+# This program is free software: you can redistribute it and/or modify
+# it under the terms of the GNU General Public License as published by
+# the Free Software Foundation, either version 3 of the License, or
+# (at your option) any later version.
+#
+# This program is distributed in the hope that it will be useful,
+# but WITHOUT ANY WARRANTY; without even the implied warranty of
+# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
+# GNU General Public License for more details.
+#
+# You should have received a copy of the GNU General Public License
+# along with this program. If not, see <http://www.gnu.org/licenses/>.
+
+
+import configparser
+
+
+CONFIG_DEFAULTS = {
+ 'soledad-server': {
+ 'couch_url': 'http://localhost:5984',
+ 'create_cmd': None,
+ 'admin_netrc': '/etc/couchdb/couchdb-admin.netrc',
+ 'batching': False
+ },
+ 'database-security': {
+ 'members': ['soledad'],
+ 'members_roles': [],
+ 'admins': [],
+ 'admins_roles': []
+ }
+}
+
+
+def load_configuration(file_path):
+ """
+ Load server configuration from file.
+
+ @param file_path: The path to the configuration file.
+ @type file_path: str
+
+ @return: A dictionary with the configuration.
+ @rtype: dict
+ """
+ defaults = dict(CONFIG_DEFAULTS)
+ config = configparser.SafeConfigParser()
+ config.read(file_path)
+ for section in defaults:
+ if not config.has_section(section):
+ continue
+ for key, value in defaults[section].items():
+ if not config.has_option(section, key):
+ continue
+ elif type(value) == bool:
+ defaults[section][key] = config.getboolean(section, key)
+ elif type(value) == list:
+ values = config.get(section, key).split(',')
+ values = [v.strip() for v in values]
+ defaults[section][key] = values
+ else:
+ defaults[section][key] = config.get(section, key)
+ # TODO: implement basic parsing/sanitization of options comming from
+ # config file.
+ return defaults
diff --git a/testing/pytest.ini b/testing/pytest.ini
new file mode 100644
index 00000000..2d34c607
--- /dev/null
+++ b/testing/pytest.ini
@@ -0,0 +1,3 @@
+[pytest]
+testpaths = tests
+norecursedirs = tests/perf
diff --git a/testing/setup.py b/testing/setup.py
index 059b2489..c1204c9a 100644
--- a/testing/setup.py
+++ b/testing/setup.py
@@ -5,5 +5,5 @@ from setuptools import find_packages
setup(
name='test_soledad',
packages=find_packages('.'),
- package_data={'': ['*.conf']}
+ package_data={'': ['*.conf', 'u1db_tests/testing-certs/*']}
)
diff --git a/testing/test_soledad/u1db_tests/test_open.py b/testing/test_soledad/u1db_tests/test_open.py
index 30d4de00..b572fba0 100644
--- a/testing/test_soledad/u1db_tests/test_open.py
+++ b/testing/test_soledad/u1db_tests/test_open.py
@@ -18,24 +18,25 @@
"""Test u1db.open"""
import os
+import pytest
+
from unittest import skip
-from leap.soledad.common.l2db import (
- errors, open as u1db_open,
-)
from test_soledad import u1db_tests as tests
+from test_soledad.u1db_tests.test_backends import TestAlternativeDocument
+
+from leap.soledad.common.l2db import errors
+from leap.soledad.common.l2db import open as u1db_open
from leap.soledad.common.l2db.backends import sqlite_backend
-from test_soledad.u1db_tests.test_backends \
- import TestAlternativeDocument
@skip("Skiping tests imported from U1DB.")
+@pytest.mark.usefixtures('method_tmpdir')
class TestU1DBOpen(tests.TestCase):
def setUp(self):
super(TestU1DBOpen, self).setUp()
- tmpdir = self.createTempDir()
- self.db_path = tmpdir + '/test.db'
+ self.db_path = self.tempdir + '/test.db'
def test_open_no_create(self):
self.assertRaises(errors.DatabaseDoesNotExist,
diff --git a/testing/test_soledad/util.py b/testing/test_soledad/util.py
index 033a55df..d53f6cda 100644
--- a/testing/test_soledad/util.py
+++ b/testing/test_soledad/util.py
@@ -22,11 +22,10 @@ Utilities used by multiple test suites.
import os
-import tempfile
-import shutil
import random
import string
import couchdb
+import pytest
from uuid import uuid4
from mock import Mock
@@ -42,7 +41,6 @@ from leap.soledad.common import l2db
from leap.soledad.common.l2db import sync
from leap.soledad.common.l2db.remote import http_database
-from leap.soledad.common import soledad_assert
from leap.soledad.common.document import SoledadDocument
from leap.soledad.common.couch import CouchDatabase
from leap.soledad.common.couch.state import CouchServerState
@@ -225,6 +223,7 @@ class BaseSoledadTest(BaseLeapTest, MockedSharedDBTest):
"""
defer_sync_encryption = False
+ @pytest.mark.usefixtures("method_tmpdir")
def setUp(self):
# The following snippet comes from BaseLeapTest.setUpClass, but we
# repeat it here because twisted.trial does not work with
@@ -232,7 +231,6 @@ class BaseSoledadTest(BaseLeapTest, MockedSharedDBTest):
self.old_path = os.environ['PATH']
self.old_home = os.environ['HOME']
- self.tempdir = tempfile.mkdtemp(prefix="leap_tests-")
self.home = self.tempdir
bin_tdir = os.path.join(
self.tempdir,
@@ -275,14 +273,6 @@ class BaseSoledadTest(BaseLeapTest, MockedSharedDBTest):
self._soledad.secrets.secrets_path]:
if os.path.isfile(f):
os.unlink(f)
- # The following snippet comes from BaseLeapTest.setUpClass, but we
- # repeat it here because twisted.trial does not work with
- # setUpClass/tearDownClass.
- soledad_assert(
- self.tempdir.startswith('/tmp/leap_tests-'),
- "beware! tried to remove a dir which does not "
- "live in temporal folder!")
- shutil.rmtree(self.tempdir)
from twisted.internet import reactor
reactor.addSystemEventTrigger(
@@ -344,6 +334,7 @@ class BaseSoledadTest(BaseLeapTest, MockedSharedDBTest):
self.assertEqual(exp_doc.content, doc.content)
+@pytest.mark.usefixtures("couch_url")
class CouchDBTestCase(unittest.TestCase, MockedSharedDBTest):
"""
@@ -354,8 +345,6 @@ class CouchDBTestCase(unittest.TestCase, MockedSharedDBTest):
"""
Make sure we have a CouchDB instance for a test.
"""
- self.couch_port = 5984
- self.couch_url = 'http://localhost:%d' % self.couch_port
self.couch_server = couchdb.Server(self.couch_url)
def delete_db(self, name):
@@ -391,8 +380,7 @@ class CouchServerStateForTests(CouchServerState):
db = CouchDatabase.open_database(
urljoin(self.couch_url, dbname),
True,
- replica_uid=replica_uid or 'test',
- ensure_ddocs=True)
+ replica_uid=replica_uid or 'test')
self.dbs.append(db)
return db
diff --git a/testing/tests/client/test_app.py b/testing/tests/client/test_app.py
index fef2f371..6867473e 100644
--- a/testing/tests/client/test_app.py
+++ b/testing/tests/client/test_app.py
@@ -17,6 +17,8 @@
"""
Test ObjectStore and Couch backend bits.
"""
+import pytest
+
from testscenarios import TestWithScenarios
from test_soledad.util import BaseSoledadTest
@@ -31,9 +33,15 @@ from test_soledad.u1db_tests import test_backends
# The following tests come from `u1db.tests.test_backends`.
# -----------------------------------------------------------------------------
+@pytest.mark.usefixtures('method_tmpdir')
class SoledadTests(
TestWithScenarios, test_backends.AllDatabaseTests, BaseSoledadTest):
+ def setUp(self):
+ TestWithScenarios.setUp(self)
+ test_backends.AllDatabaseTests.setUp(self)
+ BaseSoledadTest.setUp(self)
+
scenarios = [
('token_http', {
'make_database_for_test': make_token_http_database_for_test,
diff --git a/testing/tests/client/test_doc.py b/testing/tests/client/test_doc.py
index e158d768..36479e90 100644
--- a/testing/tests/client/test_doc.py
+++ b/testing/tests/client/test_doc.py
@@ -17,6 +17,8 @@
"""
Test Leap backend bits: soledad docs
"""
+import pytest
+
from testscenarios import TestWithScenarios
from test_soledad.u1db_tests import test_document
@@ -28,6 +30,7 @@ from test_soledad.util import make_soledad_document_for_test
# The following tests come from `u1db.tests.test_document`.
# -----------------------------------------------------------------------------
+@pytest.mark.usefixtures('method_tmpdir')
class TestSoledadDocument(
TestWithScenarios,
test_document.TestDocument, BaseSoledadTest):
@@ -37,6 +40,7 @@ class TestSoledadDocument(
'make_document_for_test': make_soledad_document_for_test})])
+@pytest.mark.usefixtures('method_tmpdir')
class TestSoledadPyDocument(
TestWithScenarios,
test_document.TestPyDocument, BaseSoledadTest):
diff --git a/testing/tests/client/test_https.py b/testing/tests/client/test_https.py
index caac16da..1b6caed6 100644
--- a/testing/tests/client/test_https.py
+++ b/testing/tests/client/test_https.py
@@ -17,7 +17,7 @@
"""
Test Leap backend bits: https
"""
-from unittest import skip
+import pytest
from testscenarios import TestWithScenarios
@@ -62,7 +62,7 @@ def token_leap_https_sync_target(test, host, path, cert_file=None):
return st
-@skip("Skiping tests imported from U1DB.")
+@pytest.mark.skip
class TestSoledadHTTPSyncTargetHttpsSupport(
TestWithScenarios,
# test_https.TestHttpSyncTargetHttpsSupport,
diff --git a/testing/tests/conftest.py b/testing/tests/conftest.py
new file mode 100644
index 00000000..9e4319ac
--- /dev/null
+++ b/testing/tests/conftest.py
@@ -0,0 +1,18 @@
+import pytest
+
+
+def pytest_addoption(parser):
+ parser.addoption(
+ "--couch-url", type="string", default="http://127.0.0.1:5984",
+ help="the url for the couch server to be used during tests")
+
+
+@pytest.fixture
+def couch_url(request):
+ url = request.config.getoption('--couch-url')
+ request.cls.couch_url = url
+
+
+@pytest.fixture
+def method_tmpdir(request, tmpdir):
+ request.instance.tempdir = tmpdir.strpath
diff --git a/testing/tests/couch/common.py b/testing/tests/couch/common.py
index b08e1fa3..84790059 100644
--- a/testing/tests/couch/common.py
+++ b/testing/tests/couch/common.py
@@ -13,20 +13,17 @@ nested_doc = tests.nested_doc
def make_couch_database_for_test(test, replica_uid):
- port = str(test.couch_port)
dbname = ('test-%s' % uuid4().hex)
db = couch.CouchDatabase.open_database(
- urljoin('http://localhost:' + port, dbname),
+ urljoin(test.couch_url, dbname),
create=True,
- replica_uid=replica_uid or 'test',
- ensure_ddocs=True)
+ replica_uid=replica_uid or 'test')
test.addCleanup(test.delete_db, dbname)
return db
def copy_couch_database_for_test(test, db):
- port = str(test.couch_port)
- couch_url = 'http://localhost:' + port
+ couch_url = test.couch_url
new_dbname = db._dbname + '_copy'
new_db = couch.CouchDatabase.open_database(
urljoin(couch_url, new_dbname),
@@ -41,15 +38,10 @@ def copy_couch_database_for_test(test, db):
# bypass u1db_config document
if doc_id == 'u1db_config':
pass
- # copy design docs
- elif doc_id.startswith('_design'):
- del doc['_rev']
- new_couch_db.save(doc)
# copy u1db docs
elif 'u1db_rev' in doc:
new_doc = {
'_id': doc['_id'],
- 'u1db_transactions': doc['u1db_transactions'],
'u1db_rev': doc['u1db_rev']
}
attachments = []
@@ -65,6 +57,8 @@ def copy_couch_database_for_test(test, db):
if (att is not None):
new_couch_db.put_attachment(new_doc, att,
filename=att_name)
+ elif doc_id.startswith('gen-'):
+ new_couch_db.save(doc)
# cleanup connections to prevent file descriptor leaking
return new_db
diff --git a/testing/tests/couch/conftest.py b/testing/tests/couch/conftest.py
new file mode 100644
index 00000000..1074f091
--- /dev/null
+++ b/testing/tests/couch/conftest.py
@@ -0,0 +1,31 @@
+import couchdb
+import pytest
+import random
+import string
+
+
+@pytest.fixture
+def random_name():
+ return 'user-' + ''.join(
+ random.choice(
+ string.ascii_lowercase) for _ in range(10))
+
+
+class RandomDatabase(object):
+
+ def __init__(self, couch_url, name):
+ self.couch_url = couch_url
+ self.name = name
+ self.server = couchdb.client.Server(couch_url)
+ self.database = self.server.create(name)
+
+ def teardown(self):
+ self.server.delete(self.name)
+
+
+@pytest.fixture
+def db(random_name, request):
+ couch_url = request.config.getoption('--couch-url')
+ db = RandomDatabase(couch_url, random_name)
+ request.addfinalizer(db.teardown)
+ return db
diff --git a/testing/tests/couch/couchdb.ini.template b/testing/tests/couch/couchdb.ini.template
deleted file mode 100644
index 174d9d86..00000000
--- a/testing/tests/couch/couchdb.ini.template
+++ /dev/null
@@ -1,22 +0,0 @@
-; etc/couchdb/default.ini.tpl. Generated from default.ini.tpl.in by configure.
-
-; Upgrading CouchDB will overwrite this file.
-
-[couchdb]
-database_dir = %(tempdir)s/lib
-view_index_dir = %(tempdir)s/lib
-max_document_size = 4294967296 ; 4 GB
-os_process_timeout = 120000 ; 120 seconds. for view and external servers.
-max_dbs_open = 100
-delayed_commits = true ; set this to false to ensure an fsync before 201 Created is returned
-uri_file = %(tempdir)s/lib/couch.uri
-file_compression = snappy
-
-[log]
-file = %(tempdir)s/log/couch.log
-level = info
-include_sasl = true
-
-[httpd]
-port = 0
-bind_address = 127.0.0.1
diff --git a/testing/tests/couch/test_atomicity.py b/testing/tests/couch/test_atomicity.py
index aec9c6cf..a3ae0314 100644
--- a/testing/tests/couch/test_atomicity.py
+++ b/testing/tests/couch/test_atomicity.py
@@ -18,7 +18,7 @@
Test atomicity of couch operations.
"""
import os
-import tempfile
+import pytest
import threading
from urlparse import urljoin
@@ -41,6 +41,7 @@ from test_soledad.u1db_tests import TestCaseWithServer
REPEAT_TIMES = 20
+@pytest.mark.usefixtures('method_tmpdir')
class CouchAtomicityTestCase(CouchDBTestCase, TestCaseWithServer):
@staticmethod
@@ -90,9 +91,7 @@ class CouchAtomicityTestCase(CouchDBTestCase, TestCaseWithServer):
self.db = CouchDatabase.open_database(
urljoin(self.couch_url, 'user-' + self.user),
create=True,
- replica_uid='replica',
- ensure_ddocs=True)
- self.tempdir = tempfile.mkdtemp(prefix="leap_tests-")
+ replica_uid='replica')
self.startTwistedServer()
def tearDown(self):
diff --git a/testing/tests/couch/test_backend.py b/testing/tests/couch/test_backend.py
index f178e8a5..4fad11cf 100644
--- a/testing/tests/couch/test_backend.py
+++ b/testing/tests/couch/test_backend.py
@@ -39,12 +39,8 @@ class TestCouchBackendImpl(CouchDBTestCase):
def test__allocate_doc_id(self):
db = couch.CouchDatabase.open_database(
- urljoin(
- 'http://localhost:' + str(self.couch_port),
- ('test-%s' % uuid4().hex)
- ),
- create=True,
- ensure_ddocs=True)
+ urljoin(self.couch_url, 'test-%s' % uuid4().hex),
+ create=True)
doc_id1 = db._allocate_doc_id()
self.assertTrue(doc_id1.startswith('D-'))
self.assertEqual(34, len(doc_id1))
diff --git a/testing/tests/couch/test_command.py b/testing/tests/couch/test_command.py
index f61e118d..68097fb1 100644
--- a/testing/tests/couch/test_command.py
+++ b/testing/tests/couch/test_command.py
@@ -1,6 +1,6 @@
from twisted.trial import unittest
-from leap.soledad.common import couch
+from leap.soledad.common.couch import state as couch_state
from leap.soledad.common.l2db import errors as u1db_errors
from mock import Mock
@@ -9,7 +9,8 @@ from mock import Mock
class CommandBasedDBCreationTest(unittest.TestCase):
def test_ensure_db_using_custom_command(self):
- state = couch.state.CouchServerState("url", create_cmd="echo")
+ state = couch_state.CouchServerState(
+ "url", create_cmd="/bin/echo", check_schema_versions=False)
mock_db = Mock()
mock_db.replica_uid = 'replica_uid'
state.open_database = Mock(return_value=mock_db)
@@ -18,11 +19,12 @@ class CommandBasedDBCreationTest(unittest.TestCase):
self.assertEquals(mock_db.replica_uid, replica_uid)
def test_raises_unauthorized_on_failure(self):
- state = couch.state.CouchServerState("url", create_cmd="inexistent")
+ state = couch_state.CouchServerState(
+ "url", create_cmd="inexistent", check_schema_versions=False)
self.assertRaises(u1db_errors.Unauthorized,
state.ensure_database, "user-1337")
def test_raises_unauthorized_by_default(self):
- state = couch.state.CouchServerState("url")
+ state = couch_state.CouchServerState("url", check_schema_versions=False)
self.assertRaises(u1db_errors.Unauthorized,
state.ensure_database, "user-1337")
diff --git a/testing/tests/couch/test_ddocs.py b/testing/tests/couch/test_ddocs.py
index 9ff32633..3937f2de 100644
--- a/testing/tests/couch/test_ddocs.py
+++ b/testing/tests/couch/test_ddocs.py
@@ -1,6 +1,5 @@
from uuid import uuid4
-from leap.soledad.common.couch import errors
from leap.soledad.common import couch
from test_soledad.util import CouchDBTestCase
@@ -10,174 +9,27 @@ class CouchDesignDocsTests(CouchDBTestCase):
def setUp(self):
CouchDBTestCase.setUp(self)
+ self.create_db()
- def create_db(self, ensure=True, dbname=None):
+ def create_db(self, dbname=None):
if not dbname:
dbname = ('test-%s' % uuid4().hex)
if dbname not in self.couch_server:
self.couch_server.create(dbname)
self.db = couch.CouchDatabase(
- ('http://127.0.0.1:%d' % self.couch_port),
- dbname,
- ensure_ddocs=ensure)
+ (self.couch_url),
+ dbname)
def tearDown(self):
self.db.delete_database()
self.db.close()
CouchDBTestCase.tearDown(self)
- def test_missing_design_doc_raises(self):
- """
- Test that all methods that access design documents will raise if the
- design docs are not present.
- """
- self.create_db(ensure=False)
- # get_generation_info()
- self.assertRaises(
- errors.MissingDesignDocError,
- self.db.get_generation_info)
- # get_trans_id_for_gen()
- self.assertRaises(
- errors.MissingDesignDocError,
- self.db.get_trans_id_for_gen, 1)
- # get_transaction_log()
- self.assertRaises(
- errors.MissingDesignDocError,
- self.db.get_transaction_log)
- # whats_changed()
- self.assertRaises(
- errors.MissingDesignDocError,
- self.db.whats_changed)
-
- def test_missing_design_doc_functions_raises(self):
- """
- Test that all methods that access design documents list functions
- will raise if the functions are not present.
- """
- self.create_db(ensure=True)
- # erase views from _design/transactions
- transactions = self.db._database['_design/transactions']
- transactions['lists'] = {}
- self.db._database.save(transactions)
- # get_generation_info()
- self.assertRaises(
- errors.MissingDesignDocListFunctionError,
- self.db.get_generation_info)
- # get_trans_id_for_gen()
- self.assertRaises(
- errors.MissingDesignDocListFunctionError,
- self.db.get_trans_id_for_gen, 1)
- # whats_changed()
- self.assertRaises(
- errors.MissingDesignDocListFunctionError,
- self.db.whats_changed)
-
- def test_absent_design_doc_functions_raises(self):
- """
- Test that all methods that access design documents list functions
- will raise if the functions are not present.
- """
- self.create_db(ensure=True)
- # erase views from _design/transactions
- transactions = self.db._database['_design/transactions']
- del transactions['lists']
- self.db._database.save(transactions)
- # get_generation_info()
- self.assertRaises(
- errors.MissingDesignDocListFunctionError,
- self.db.get_generation_info)
- # _get_trans_id_for_gen()
- self.assertRaises(
- errors.MissingDesignDocListFunctionError,
- self.db.get_trans_id_for_gen, 1)
- # whats_changed()
- self.assertRaises(
- errors.MissingDesignDocListFunctionError,
- self.db.whats_changed)
-
- def test_missing_design_doc_named_views_raises(self):
- """
- Test that all methods that access design documents' named views will
- raise if the views are not present.
- """
- self.create_db(ensure=True)
- # erase views from _design/docs
- docs = self.db._database['_design/docs']
- del docs['views']
- self.db._database.save(docs)
- # erase views from _design/syncs
- syncs = self.db._database['_design/syncs']
- del syncs['views']
- self.db._database.save(syncs)
- # erase views from _design/transactions
- transactions = self.db._database['_design/transactions']
- del transactions['views']
- self.db._database.save(transactions)
- # get_generation_info()
- self.assertRaises(
- errors.MissingDesignDocNamedViewError,
- self.db.get_generation_info)
- # _get_trans_id_for_gen()
- self.assertRaises(
- errors.MissingDesignDocNamedViewError,
- self.db.get_trans_id_for_gen, 1)
- # _get_transaction_log()
- self.assertRaises(
- errors.MissingDesignDocNamedViewError,
- self.db.get_transaction_log)
- # whats_changed()
- self.assertRaises(
- errors.MissingDesignDocNamedViewError,
- self.db.whats_changed)
-
- def test_deleted_design_doc_raises(self):
- """
- Test that all methods that access design documents will raise if the
- design docs are not present.
- """
- self.create_db(ensure=True)
- # delete _design/docs
- del self.db._database['_design/docs']
- # delete _design/syncs
- del self.db._database['_design/syncs']
- # delete _design/transactions
- del self.db._database['_design/transactions']
- # get_generation_info()
- self.assertRaises(
- errors.MissingDesignDocDeletedError,
- self.db.get_generation_info)
- # get_trans_id_for_gen()
- self.assertRaises(
- errors.MissingDesignDocDeletedError,
- self.db.get_trans_id_for_gen, 1)
- # get_transaction_log()
- self.assertRaises(
- errors.MissingDesignDocDeletedError,
- self.db.get_transaction_log)
- # whats_changed()
- self.assertRaises(
- errors.MissingDesignDocDeletedError,
- self.db.whats_changed)
-
- def test_ensure_ddoc_independently(self):
- """
- Test that a missing ddocs other than _design/docs will be ensured
- even if _design/docs is there.
- """
- self.create_db(ensure=True)
- del self.db._database['_design/transactions']
- self.assertRaises(
- errors.MissingDesignDocDeletedError,
- self.db.get_transaction_log)
- self.create_db(ensure=True, dbname=self.db._dbname)
- self.db.get_transaction_log()
-
def test_ensure_security_doc(self):
"""
Ensure_security creates a _security ddoc to ensure that only soledad
will have the lowest privileged access to an user db.
"""
- self.create_db(ensure=False)
self.assertFalse(self.db._database.resource.get_json('_security')[2])
self.db.ensure_security_ddoc()
security_ddoc = self.db._database.resource.get_json('_security')[2]
@@ -190,7 +42,6 @@ class CouchDesignDocsTests(CouchDBTestCase):
"""
Given a configuration, follow it to create the security document
"""
- self.create_db(ensure=False)
configuration = {'members': ['user1', 'user2'],
'members_roles': ['role1', 'role2'],
'admins': ['admin'],
diff --git a/testing/tests/couch/test_state.py b/testing/tests/couch/test_state.py
new file mode 100644
index 00000000..e293b5b8
--- /dev/null
+++ b/testing/tests/couch/test_state.py
@@ -0,0 +1,25 @@
+import pytest
+
+from leap.soledad.common.couch import CONFIG_DOC_ID
+from leap.soledad.common.couch import SCHEMA_VERSION
+from leap.soledad.common.couch import SCHEMA_VERSION_KEY
+from leap.soledad.common.couch.state import CouchServerState
+
+from leap.soledad.common.errors import WrongCouchSchemaVersionError
+from leap.soledad.common.errors import MissingCouchConfigDocumentError
+
+
+def test_wrong_couch_version_raises(db):
+ wrong_schema_version = SCHEMA_VERSION + 1
+ db.database.create(
+ {'_id': CONFIG_DOC_ID, SCHEMA_VERSION_KEY: wrong_schema_version})
+ with pytest.raises(WrongCouchSchemaVersionError):
+ CouchServerState(db.couch_url, create_cmd='/bin/echo',
+ check_schema_versions=True)
+
+
+def test_missing_config_doc_raises(db):
+ db.database.create({})
+ with pytest.raises(MissingCouchConfigDocumentError):
+ CouchServerState(db.couch_url, create_cmd='/bin/echo',
+ check_schema_versions=True)
diff --git a/testing/tests/perf/assets/cert_default.conf b/testing/tests/perf/assets/cert_default.conf
new file mode 100644
index 00000000..8043cea3
--- /dev/null
+++ b/testing/tests/perf/assets/cert_default.conf
@@ -0,0 +1,15 @@
+[ req ]
+default_bits = 1024
+default_keyfile = keyfile.pem
+distinguished_name = req_distinguished_name
+prompt = no
+output_password = mypass
+
+[ req_distinguished_name ]
+C = GB
+ST = Test State or Province
+L = Test Locality
+O = Organization Name
+OU = Organizational Unit Name
+CN = localhost
+emailAddress = test@email.address
diff --git a/testing/tests/perf/conftest.py b/testing/tests/perf/conftest.py
new file mode 100644
index 00000000..5ac1f3c0
--- /dev/null
+++ b/testing/tests/perf/conftest.py
@@ -0,0 +1,249 @@
+import json
+import os
+import pytest
+import requests
+import random
+import base64
+import signal
+import time
+
+from hashlib import sha512
+from uuid import uuid4
+from subprocess import call
+from urlparse import urljoin
+from twisted.internet import threads, reactor
+
+from leap.soledad.client import Soledad
+from leap.soledad.common.couch import CouchDatabase
+
+
+# we have to manually setup the events server in order to be able to signal
+# events. This is usually done by the enclosing application using soledad
+# client (i.e. bitmask client).
+from leap.common.events import server
+server.ensure_server()
+
+
+def pytest_addoption(parser):
+ parser.addoption(
+ "--couch-url", type="string", default="http://127.0.0.1:5984",
+ help="the url for the couch server to be used during tests")
+ parser.addoption(
+ "--num-docs", type="int", default=100,
+ help="the number of documents to use in performance tests")
+
+
+#
+# default options for all tests
+#
+
+DEFAULT_PASSPHRASE = '123'
+
+DEFAULT_URL = 'http://127.0.0.1:2424'
+DEFAULT_PRIVKEY = 'soledad_privkey.pem'
+DEFAULT_CERTKEY = 'soledad_certkey.pem'
+DEFAULT_TOKEN = 'an-auth-token'
+
+
+@pytest.fixture()
+def payload():
+ def generate(size):
+ random.seed(1337) # same seed to avoid different bench results
+ payload_bytes = bytearray(random.getrandbits(8) for _ in xrange(size))
+ # encode as base64 to avoid ascii encode/decode errors
+ return base64.b64encode(payload_bytes)[:size] # remove b64 overhead
+ return generate
+
+
+#
+# soledad_dbs fixture: provides all databases needed by soledad server in a per
+# module scope (same databases for all tests in this module).
+#
+
+def _token_dbname():
+ dbname = 'tokens_' + \
+ str(int(time.time() / (30 * 24 * 3600)))
+ return dbname
+
+
+class SoledadDatabases(object):
+
+ def __init__(self, url):
+ self._token_db_url = urljoin(url, _token_dbname())
+ self._shared_db_url = urljoin(url, 'shared')
+
+ def setup(self, uuid):
+ self._create_dbs()
+ self._add_token(uuid)
+
+ def _create_dbs(self):
+ requests.put(self._token_db_url)
+ requests.put(self._shared_db_url)
+
+ def _add_token(self, uuid):
+ token = sha512(DEFAULT_TOKEN).hexdigest()
+ content = {'type': 'Token', 'user_id': uuid}
+ requests.put(
+ self._token_db_url + '/' + token, data=json.dumps(content))
+
+ def teardown(self):
+ requests.delete(self._token_db_url)
+ requests.delete(self._shared_db_url)
+
+
+@pytest.fixture()
+def soledad_dbs(request):
+ couch_url = request.config.option.couch_url
+
+ def create(uuid):
+ db = SoledadDatabases(couch_url)
+ request.addfinalizer(db.teardown)
+ return db.setup(uuid)
+ return create
+
+
+#
+# remote_db fixture: provides an empty database for a given user in a per
+# function scope.
+#
+
+class UserDatabase(object):
+
+ def __init__(self, url, uuid):
+ self._remote_db_url = urljoin(url, 'user-%s' % uuid)
+
+ def setup(self):
+ return CouchDatabase.open_database(
+ url=self._remote_db_url, create=True, replica_uid=None)
+
+ def teardown(self):
+ requests.delete(self._remote_db_url)
+
+
+@pytest.fixture()
+def remote_db(request):
+ couch_url = request.config.option.couch_url
+
+ def create(uuid):
+ db = UserDatabase(couch_url, uuid)
+ request.addfinalizer(db.teardown)
+ return db.setup()
+ return create
+
+
+def get_pid(pidfile):
+ if not os.path.isfile(pidfile):
+ return 0
+ try:
+ with open(pidfile) as f:
+ return int(f.read())
+ except IOError:
+ return 0
+
+
+#
+# soledad_server fixture: provides a running soledad server in a per module
+# context (same soledad server for all tests in this module).
+#
+
+class SoledadServer(object):
+
+ def __init__(self, tmpdir_factory, couch_url):
+ tmpdir = tmpdir_factory.mktemp('soledad-server')
+ self._pidfile = os.path.join(tmpdir.strpath, 'soledad-server.pid')
+ self._logfile = os.path.join(tmpdir.strpath, 'soledad-server.log')
+ self._couch_url = couch_url
+
+ def start(self):
+ self._create_conf_file()
+ # start the server
+ call([
+ 'twistd',
+ '--logfile=%s' % self._logfile,
+ '--pidfile=%s' % self._pidfile,
+ 'web',
+ '--wsgi=leap.soledad.server.application',
+ '--port=2424'
+ ])
+
+ def _create_conf_file(self):
+ if not os.access('/etc', os.W_OK):
+ return
+ if not os.path.isdir('/etc/soledad'):
+ os.mkdir('/etc/soledad')
+ with open('/etc/soledad/soledad-server.conf', 'w') as f:
+ content = '[soledad-server]\ncouch_url = %s' % self._couch_url
+ f.write(content)
+
+ def stop(self):
+ pid = get_pid(self._pidfile)
+ os.kill(pid, signal.SIGKILL)
+
+
+@pytest.fixture(scope='module')
+def soledad_server(tmpdir_factory, request):
+ couch_url = request.config.option.couch_url
+ server = SoledadServer(tmpdir_factory, couch_url)
+ server.start()
+ request.addfinalizer(server.stop)
+ return server
+
+
+@pytest.fixture()
+def txbenchmark(benchmark):
+ def blockOnThread(*args, **kwargs):
+ return threads.deferToThread(
+ benchmark, threads.blockingCallFromThread,
+ reactor, *args, **kwargs)
+ return blockOnThread
+
+
+@pytest.fixture()
+def txbenchmark_with_setup(benchmark):
+ def blockOnThreadWithSetup(setup, f):
+ def blocking_runner(*args, **kwargs):
+ return threads.blockingCallFromThread(reactor, f, *args, **kwargs)
+
+ def blocking_setup():
+ args = threads.blockingCallFromThread(reactor, setup)
+ try:
+ return tuple(arg for arg in args), {}
+ except TypeError:
+ return ((args,), {}) if args else None
+
+ def bench():
+ return benchmark.pedantic(blocking_runner, setup=blocking_setup,
+ rounds=4, warmup_rounds=1)
+ return threads.deferToThread(bench)
+ return blockOnThreadWithSetup
+
+
+#
+# soledad_client fixture: provides a clean soledad client for a test function.
+#
+
+@pytest.fixture()
+def soledad_client(tmpdir, soledad_server, remote_db, soledad_dbs, request):
+ passphrase = DEFAULT_PASSPHRASE
+ server_url = DEFAULT_URL
+ token = DEFAULT_TOKEN
+ default_uuid = uuid4().hex
+ remote_db(default_uuid)
+ soledad_dbs(default_uuid)
+
+ # get a soledad instance
+ def create():
+ secrets_path = os.path.join(tmpdir.strpath, '%s.secret' % uuid4().hex)
+ local_db_path = os.path.join(tmpdir.strpath, '%s.db' % uuid4().hex)
+ soledad_client = Soledad(
+ default_uuid,
+ unicode(passphrase),
+ secrets_path=secrets_path,
+ local_db_path=local_db_path,
+ server_url=server_url,
+ cert_file=None,
+ auth_token=token,
+ defer_encryption=True)
+ request.addfinalizer(soledad_client.close)
+ return soledad_client
+ return create
diff --git a/testing/tests/perf/pytest.ini b/testing/tests/perf/pytest.ini
new file mode 100644
index 00000000..7a0508ce
--- /dev/null
+++ b/testing/tests/perf/pytest.ini
@@ -0,0 +1,2 @@
+[pytest]
+twisted = yes
diff --git a/testing/tests/perf/test_crypto.py b/testing/tests/perf/test_crypto.py
new file mode 100644
index 00000000..be00560b
--- /dev/null
+++ b/testing/tests/perf/test_crypto.py
@@ -0,0 +1,81 @@
+import pytest
+import json
+from uuid import uuid4
+from leap.soledad.common.document import SoledadDocument
+from leap.soledad.client.crypto import encrypt_sym
+from leap.soledad.client.crypto import decrypt_sym
+
+
+def create_doc_encryption(size):
+ @pytest.mark.benchmark(group="test_crypto_encrypt_doc")
+ def test_doc_encryption(soledad_client, benchmark, payload):
+ crypto = soledad_client()._crypto
+
+ DOC_CONTENT = {'payload': payload(size)}
+ doc = SoledadDocument(
+ doc_id=uuid4().hex, rev='rev',
+ json=json.dumps(DOC_CONTENT))
+
+ benchmark(crypto.encrypt_doc, doc)
+ return test_doc_encryption
+
+
+def create_doc_decryption(size):
+ @pytest.mark.benchmark(group="test_crypto_decrypt_doc")
+ def test_doc_decryption(soledad_client, benchmark, payload):
+ crypto = soledad_client()._crypto
+
+ DOC_CONTENT = {'payload': payload(size)}
+ doc = SoledadDocument(
+ doc_id=uuid4().hex, rev='rev',
+ json=json.dumps(DOC_CONTENT))
+ encrypted_doc = crypto.encrypt_doc(doc)
+ doc.set_json(encrypted_doc)
+
+ benchmark(crypto.decrypt_doc, doc)
+ return test_doc_decryption
+
+
+test_encrypt_doc_10k = create_doc_encryption(10*1000)
+test_encrypt_doc_100k = create_doc_encryption(100*1000)
+test_encrypt_doc_500k = create_doc_encryption(500*1000)
+test_encrypt_doc_1M = create_doc_encryption(1000*1000)
+test_encrypt_doc_10M = create_doc_encryption(10*1000*1000)
+test_encrypt_doc_50M = create_doc_encryption(50*1000*1000)
+test_decrypt_doc_10k = create_doc_decryption(10*1000)
+test_decrypt_doc_100k = create_doc_decryption(100*1000)
+test_decrypt_doc_500k = create_doc_decryption(500*1000)
+test_decrypt_doc_1M = create_doc_decryption(1000*1000)
+test_decrypt_doc_10M = create_doc_decryption(10*1000*1000)
+test_decrypt_doc_50M = create_doc_decryption(50*1000*1000)
+
+
+def create_raw_encryption(size):
+ @pytest.mark.benchmark(group="test_crypto_raw_encrypt")
+ def test_raw_encrypt(benchmark, payload):
+ key = payload(32)
+ benchmark(encrypt_sym, payload(size), key)
+ return test_raw_encrypt
+
+
+def create_raw_decryption(size):
+ @pytest.mark.benchmark(group="test_crypto_raw_decrypt")
+ def test_raw_decrypt(benchmark, payload):
+ key = payload(32)
+ iv, ciphertext = encrypt_sym(payload(size), key)
+ benchmark(decrypt_sym, ciphertext, key, iv)
+ return test_raw_decrypt
+
+
+test_encrypt_raw_10k = create_raw_encryption(10*1000)
+test_encrypt_raw_100k = create_raw_encryption(100*1000)
+test_encrypt_raw_500k = create_raw_encryption(500*1000)
+test_encrypt_raw_1M = create_raw_encryption(1000*1000)
+test_encrypt_raw_10M = create_raw_encryption(10*1000*1000)
+test_encrypt_raw_50M = create_raw_encryption(50*1000*1000)
+test_decrypt_raw_10k = create_raw_decryption(10*1000)
+test_decrypt_raw_100k = create_raw_decryption(100*1000)
+test_decrypt_raw_500k = create_raw_decryption(500*1000)
+test_decrypt_raw_1M = create_raw_decryption(1000*1000)
+test_decrypt_raw_10M = create_raw_decryption(10*1000*1000)
+test_decrypt_raw_50M = create_raw_decryption(50*1000*1000)
diff --git a/testing/tests/perf/test_encdecpool.py b/testing/tests/perf/test_encdecpool.py
new file mode 100644
index 00000000..77091a41
--- /dev/null
+++ b/testing/tests/perf/test_encdecpool.py
@@ -0,0 +1,78 @@
+import pytest
+import json
+from uuid import uuid4
+from twisted.internet.defer import gatherResults
+from leap.soledad.client.encdecpool import SyncEncrypterPool
+from leap.soledad.client.encdecpool import SyncDecrypterPool
+from leap.soledad.common.document import SoledadDocument
+# FIXME: test load is low due issue #7370, higher values will get out of memory
+
+
+def create_encrypt(amount, size):
+ @pytest.mark.benchmark(group="test_pool_encrypt")
+ @pytest.inlineCallbacks
+ def test(soledad_client, txbenchmark_with_setup, request, payload):
+ DOC_CONTENT = {'payload': payload(size)}
+
+ def setup():
+ client = soledad_client()
+ pool = SyncEncrypterPool(client._crypto, client._sync_db)
+ pool.start()
+ request.addfinalizer(pool.stop)
+ docs = [
+ SoledadDocument(doc_id=uuid4().hex, rev='rev',
+ json=json.dumps(DOC_CONTENT))
+ for _ in xrange(amount)
+ ]
+ return pool, docs
+
+ @pytest.inlineCallbacks
+ def put_and_wait(pool, docs):
+ yield gatherResults([pool.encrypt_doc(doc) for doc in docs])
+
+ yield txbenchmark_with_setup(setup, put_and_wait)
+ return test
+
+test_encdecpool_encrypt_100_10k = create_encrypt(100, 10*1000)
+test_encdecpool_encrypt_100_100k = create_encrypt(100, 100*1000)
+test_encdecpool_encrypt_100_500k = create_encrypt(100, 500*1000)
+
+
+def create_decrypt(amount, size):
+ @pytest.mark.benchmark(group="test_pool_decrypt")
+ @pytest.inlineCallbacks
+ def test(soledad_client, txbenchmark_with_setup, request, payload):
+ DOC_CONTENT = {'payload': payload(size)}
+ client = soledad_client()
+
+ def setup():
+ pool = SyncDecrypterPool(
+ client._crypto,
+ client._sync_db,
+ source_replica_uid=client._dbpool.replica_uid,
+ insert_doc_cb=lambda x, y, z: False) # ignored
+ pool.start(amount)
+ request.addfinalizer(pool.stop)
+ crypto = client._crypto
+ docs = []
+ for _ in xrange(amount):
+ doc = SoledadDocument(
+ doc_id=uuid4().hex, rev='rev',
+ json=json.dumps(DOC_CONTENT))
+ encrypted_content = json.loads(crypto.encrypt_doc(doc))
+ docs.append((doc.doc_id, encrypted_content))
+ return pool, docs
+
+ def put_and_wait(pool, docs):
+ deferreds = [] # fires on completion
+ for idx, (doc_id, content) in enumerate(docs, 1):
+ deferreds.append(pool.insert_encrypted_received_doc(
+ doc_id, 'rev', content, idx, "trans_id", idx))
+ return gatherResults(deferreds)
+
+ yield txbenchmark_with_setup(setup, put_and_wait)
+ return test
+
+test_encdecpool_decrypt_100_10k = create_decrypt(100, 10*1000)
+test_encdecpool_decrypt_100_100k = create_decrypt(100, 100*1000)
+test_encdecpool_decrypt_100_500k = create_decrypt(100, 500*1000)
diff --git a/testing/tests/perf/test_misc.py b/testing/tests/perf/test_misc.py
new file mode 100644
index 00000000..ead48adf
--- /dev/null
+++ b/testing/tests/perf/test_misc.py
@@ -0,0 +1,6 @@
+import pytest
+
+
+@pytest.mark.benchmark(group="test_instance")
+def test_initialization(soledad_client, benchmark):
+ benchmark(soledad_client)
diff --git a/testing/tests/perf/test_sqlcipher.py b/testing/tests/perf/test_sqlcipher.py
new file mode 100644
index 00000000..e7a54228
--- /dev/null
+++ b/testing/tests/perf/test_sqlcipher.py
@@ -0,0 +1,38 @@
+'''
+Tests SoledadClient/SQLCipher interaction
+'''
+import pytest
+
+from twisted.internet.defer import gatherResults
+
+
+def load_up(client, amount, payload, defer=True):
+ results = [client.create_doc({'content': payload}) for _ in xrange(amount)]
+ if defer:
+ return gatherResults(results)
+
+
+def build_test_sqlcipher_async_create(amount, size):
+ @pytest.inlineCallbacks
+ @pytest.mark.benchmark(group="test_sqlcipher_async_create")
+ def test(soledad_client, txbenchmark, payload):
+ client = soledad_client()
+ yield txbenchmark(load_up, client, amount, payload(size))
+ return test
+
+
+def build_test_sqlcipher_create(amount, size):
+ @pytest.mark.benchmark(group="test_sqlcipher_create")
+ def test(soledad_client, benchmark, payload):
+ client = soledad_client()._dbsyncer
+ benchmark(load_up, client, amount, payload(size), defer=False)
+ return test
+
+
+test_async_create_20_500k = build_test_sqlcipher_async_create(20, 500*1000)
+test_async_create_100_100k = build_test_sqlcipher_async_create(100, 100*1000)
+test_async_create_1000_10k = build_test_sqlcipher_async_create(1000, 10*1000)
+# synchronous
+test_create_20_500k = build_test_sqlcipher_create(20, 500*1000)
+test_create_100_100k = build_test_sqlcipher_create(100, 100*1000)
+test_create_1000_10k = build_test_sqlcipher_create(1000, 10*1000)
diff --git a/testing/tests/perf/test_sync.py b/testing/tests/perf/test_sync.py
new file mode 100644
index 00000000..0b48a0b9
--- /dev/null
+++ b/testing/tests/perf/test_sync.py
@@ -0,0 +1,68 @@
+import pytest
+
+from twisted.internet.defer import gatherResults
+
+
+def load_up(client, amount, payload):
+ deferreds = []
+ # create a bunch of local documents
+ for i in xrange(amount):
+ d = client.create_doc({'content': payload})
+ deferreds.append(d)
+ d = gatherResults(deferreds)
+ d.addCallback(lambda _: None)
+ return d
+
+
+def create_upload(uploads, size):
+ @pytest.inlineCallbacks
+ @pytest.mark.benchmark(group="test_upload")
+ def test(soledad_client, txbenchmark_with_setup, payload):
+ client = soledad_client()
+
+ def setup():
+ return load_up(client, uploads, payload(size))
+
+ yield txbenchmark_with_setup(setup, client.sync)
+ return test
+
+
+test_upload_20_500k = create_upload(20, 500*1000)
+test_upload_100_100k = create_upload(100, 100*1000)
+test_upload_1000_10k = create_upload(1000, 10*1000)
+
+
+def create_download(downloads, size):
+ @pytest.inlineCallbacks
+ @pytest.mark.benchmark(group="test_download")
+ def test(soledad_client, txbenchmark_with_setup, payload):
+ client = soledad_client()
+
+ yield load_up(client, downloads, payload(size))
+ yield client.sync()
+ # We could create them directly on couch, but sending them
+ # ensures we are dealing with properly encrypted docs
+
+ def setup():
+ return soledad_client()
+
+ def sync(clean_client):
+ return clean_client.sync()
+ yield txbenchmark_with_setup(setup, sync)
+ return test
+
+
+test_download_20_500k = create_download(20, 500*1000)
+test_download_100_100k = create_download(100, 100*1000)
+test_download_1000_10k = create_download(1000, 10*1000)
+
+
+@pytest.inlineCallbacks
+@pytest.mark.benchmark(group="test_nothing_to_sync")
+def test_nothing_to_sync(soledad_client, txbenchmark_with_setup):
+ def setup():
+ return soledad_client()
+
+ def sync(clean_client):
+ return clean_client.sync()
+ yield txbenchmark_with_setup(setup, sync)
diff --git a/testing/tests/server/test_server.py b/testing/tests/server/test_server.py
index b99d1939..6bbcf002 100644
--- a/testing/tests/server/test_server.py
+++ b/testing/tests/server/test_server.py
@@ -20,7 +20,7 @@ Tests for server-related functionality.
import binascii
import mock
import os
-import tempfile
+import pytest
from hashlib import sha512
from pkg_resources import resource_filename
@@ -43,8 +43,8 @@ from test_soledad.util import (
from leap.soledad.common import crypto
from leap.soledad.client import Soledad
-from leap.soledad.server import load_configuration
-from leap.soledad.server import CONFIG_DEFAULTS
+from leap.soledad.server.config import load_configuration
+from leap.soledad.server.config import CONFIG_DEFAULTS
from leap.soledad.server.auth import URLToAuthorization
from leap.soledad.server.auth import SoledadTokenAuthMiddleware
@@ -287,6 +287,7 @@ class ServerAuthorizationTestCase(BaseSoledadTest):
self._make_environ('/%s/sync-from/x' % dbname, 'POST')))
+@pytest.mark.usefixtures("method_tmpdir")
class EncryptedSyncTestCase(
CouchDBTestCase, TestCaseWithServer):
@@ -349,11 +350,7 @@ class EncryptedSyncTestCase(
return self.make_app_with_state(self.request_state)
def setUp(self):
- # the order of the following initializations is crucial because of
- # dependencies.
- # XXX explain better
CouchDBTestCase.setUp(self)
- self.tempdir = tempfile.mkdtemp(prefix="leap_tests-")
TestCaseWithServer.setUp(self)
def tearDown(self):
@@ -391,8 +388,7 @@ class EncryptedSyncTestCase(
# ensure remote db exists before syncing
db = CouchDatabase.open_database(
urljoin(self.couch_url, 'user-' + user),
- create=True,
- ensure_ddocs=True)
+ create=True)
def _db1AssertEmptyDocList(results):
_, doclist = results
diff --git a/testing/tests/sqlcipher/test_backend.py b/testing/tests/sqlcipher/test_backend.py
index 11472d46..caacba0d 100644
--- a/testing/tests/sqlcipher/test_backend.py
+++ b/testing/tests/sqlcipher/test_backend.py
@@ -18,10 +18,9 @@
Test sqlcipher backend internals.
"""
import os
+import pytest
import time
import threading
-import tempfile
-import shutil
from pysqlcipher import dbapi2
from testscenarios import TestWithScenarios
@@ -33,7 +32,6 @@ from leap.soledad.common.l2db.backends.sqlite_backend \
import SQLitePartialExpandDatabase
# soledad stuff.
-from leap.soledad.common import soledad_assert
from leap.soledad.common.document import SoledadDocument
from leap.soledad.client.sqlcipher import SQLCipherDatabase
from leap.soledad.client.sqlcipher import SQLCipherOptions
@@ -109,6 +107,7 @@ class SQLCipherIndexTests(
# The following tests come from `u1db.tests.test_sqlite_backend`.
# -----------------------------------------------------------------------------
+@pytest.mark.usefixtures('method_tmpdir')
class TestSQLCipherDatabase(tests.TestCase):
"""
Tests from u1db.tests.test_sqlite_backend.TestSQLiteDatabase.
@@ -117,8 +116,7 @@ class TestSQLCipherDatabase(tests.TestCase):
def test_atomic_initialize(self):
# This test was modified to ensure that db2.close() is called within
# the thread that created the database.
- tmpdir = self.createTempDir()
- dbname = os.path.join(tmpdir, 'atomic.db')
+ dbname = os.path.join(self.tempdir, 'atomic.db')
t2 = None # will be a thread
@@ -164,6 +162,7 @@ class TestSQLCipherDatabase(tests.TestCase):
db1.close()
+@pytest.mark.usefixtures('method_tmpdir')
class TestSQLCipherPartialExpandDatabase(tests.TestCase):
"""
Tests from u1db.tests.test_sqlite_backend.TestSQLitePartialExpandDatabase.
@@ -226,8 +225,7 @@ class TestSQLCipherPartialExpandDatabase(tests.TestCase):
pass
def test__open_database_non_existent(self):
- temp_dir = self.createTempDir(prefix='u1db-test-')
- path = temp_dir + '/non-existent.sqlite'
+ path = self.tempdir + '/non-existent.sqlite'
self.assertRaises(errors.DatabaseDoesNotExist,
sqlcipher_open,
path, PASSWORD, create=False)
@@ -243,8 +241,7 @@ class TestSQLCipherPartialExpandDatabase(tests.TestCase):
# This test was modified to ensure that an empty database file will
# raise a DatabaseIsNotEncrypted exception instead of a
# dbapi2.OperationalError exception.
- temp_dir = self.createTempDir(prefix='u1db-test-')
- path1 = temp_dir + '/invalid1.db'
+ path1 = self.tempdir + '/invalid1.db'
with open(path1, 'wb') as f:
f.write("")
self.assertRaises(DatabaseIsNotEncrypted,
@@ -270,8 +267,7 @@ class TestSQLCipherPartialExpandDatabase(tests.TestCase):
def test_open_database_create(self):
# SQLCipherDatabas has no open_database() method, so we just test for
# the actual database constructor effects.
- temp_dir = self.createTempDir(prefix='u1db-test-')
- path = temp_dir + '/new.sqlite'
+ path = self.tempdir + '/new.sqlite'
db1 = sqlcipher_open(path, PASSWORD, create=True)
db2 = sqlcipher_open(path, PASSWORD, create=False)
self.assertIsInstance(db2, SQLCipherDatabase)
@@ -395,8 +391,7 @@ class TestSQLCipherPartialExpandDatabase(tests.TestCase):
c.fetchall())
def test__ensure_schema_rollback(self):
- temp_dir = self.createTempDir(prefix='u1db-test-')
- path = temp_dir + '/rollback.db'
+ path = self.tempdir + '/rollback.db'
class SQLitePartialExpandDbTesting(SQLCipherDatabase):
@@ -414,15 +409,13 @@ class TestSQLCipherPartialExpandDatabase(tests.TestCase):
db._initialize(db._db_handle.cursor())
def test_open_database_non_existent(self):
- temp_dir = self.createTempDir(prefix='u1db-test-')
- path = temp_dir + '/non-existent.sqlite'
+ path = self.tempdir + '/non-existent.sqlite'
self.assertRaises(errors.DatabaseDoesNotExist,
sqlcipher_open, path, "123",
create=False)
def test_delete_database_existent(self):
- temp_dir = self.createTempDir(prefix='u1db-test-')
- path = temp_dir + '/new.sqlite'
+ path = self.tempdir + '/new.sqlite'
db = sqlcipher_open(path, "123", create=True)
db.close()
SQLCipherDatabase.delete_database(path)
@@ -431,8 +424,7 @@ class TestSQLCipherPartialExpandDatabase(tests.TestCase):
create=False)
def test_delete_database_nonexistent(self):
- temp_dir = self.createTempDir(prefix='u1db-test-')
- path = temp_dir + '/non-existent.sqlite'
+ path = self.tempdir + '/non-existent.sqlite'
self.assertRaises(errors.DatabaseDoesNotExist,
SQLCipherDatabase.delete_database, path)
@@ -630,37 +622,13 @@ class SQLCipherEncryptionTests(BaseSoledadTest):
os.unlink(dbfile)
def setUp(self):
- # the following come from BaseLeapTest.setUpClass, because
- # twisted.trial doesn't support such class methods for setting up
- # test classes.
- self.old_path = os.environ['PATH']
- self.old_home = os.environ['HOME']
- self.tempdir = tempfile.mkdtemp(prefix="leap_tests-")
- self.home = self.tempdir
- bin_tdir = os.path.join(
- self.tempdir,
- 'bin')
- os.environ["PATH"] = bin_tdir
- os.environ["HOME"] = self.tempdir
- # this is our own stuff
+ BaseSoledadTest.setUp(self)
self.DB_FILE = os.path.join(self.tempdir, 'test.db')
self._delete_dbfiles()
def tearDown(self):
self._delete_dbfiles()
- # the following come from BaseLeapTest.tearDownClass, because
- # twisted.trial doesn't support such class methods for tearing down
- # test classes.
- os.environ["PATH"] = self.old_path
- os.environ["HOME"] = self.old_home
- # safety check! please do not wipe my home...
- # XXX needs to adapt to non-linuces
- soledad_assert(
- self.tempdir.startswith('/tmp/leap_tests-') or
- self.tempdir.startswith('/var/folder'),
- "beware! tried to remove a dir which does not "
- "live in temporal folder!")
- shutil.rmtree(self.tempdir)
+ BaseSoledadTest.tearDown(self)
def test_try_to_open_encrypted_db_with_sqlite_backend(self):
"""
diff --git a/testing/tests/sync/test_encdecpool.py b/testing/tests/sync/test_encdecpool.py
index 82e99a47..4a32885e 100644
--- a/testing/tests/sync/test_encdecpool.py
+++ b/testing/tests/sync/test_encdecpool.py
@@ -29,7 +29,6 @@ from leap.soledad.client.encdecpool import SyncDecrypterPool
from leap.soledad.common.document import SoledadDocument
from test_soledad.util import BaseSoledadTest
from twisted.internet import defer
-from twisted.test.proto_helpers import MemoryReactorClock
DOC_ID = "mydoc"
DOC_REV = "rev"
@@ -65,17 +64,11 @@ class TestSyncEncrypterPool(BaseSoledadTest):
"""
doc = SoledadDocument(
doc_id=DOC_ID, rev=DOC_REV, json=json.dumps(DOC_CONTENT))
- self._pool.encrypt_doc(doc)
- # exhaustivelly attempt to get the encrypted document
- encrypted = None
- attempts = 0
- while encrypted is None and attempts < 10:
- encrypted = yield self._pool.get_encrypted_doc(DOC_ID, DOC_REV)
- attempts += 1
+ yield self._pool.encrypt_doc(doc)
+ encrypted = yield self._pool.get_encrypted_doc(DOC_ID, DOC_REV)
self.assertIsNotNone(encrypted)
- self.assertTrue(attempts < 10)
class TestSyncDecrypterPool(BaseSoledadTest):
@@ -219,9 +212,6 @@ class TestSyncDecrypterPool(BaseSoledadTest):
This test ensures that processing of documents only occur if there is
a sequence in place.
"""
- reactor_clock = MemoryReactorClock()
- self._pool._loop.clock = reactor_clock
-
crypto = self._soledad._crypto
docs = []
@@ -234,18 +224,19 @@ class TestSyncDecrypterPool(BaseSoledadTest):
docs.append((doc, encrypted_content))
# insert the encrypted document in the pool
- self._pool.start(10) # pool is expecting to process 10 docs
+ yield self._pool.start(10) # pool is expecting to process 10 docs
+ self._pool._loop.stop() # we are processing manually
# first three arrives, forming a sequence
for i, (doc, encrypted_content) in enumerate(docs[:3]):
gen = idx = i + 1
yield self._pool.insert_encrypted_received_doc(
doc.doc_id, doc.rev, encrypted_content, gen, "trans_id", idx)
+
# last one arrives alone, so it can't be processed
doc, encrypted_content = docs[-1]
yield self._pool.insert_encrypted_received_doc(
doc.doc_id, doc.rev, encrypted_content, 10, "trans_id", 10)
- reactor_clock.advance(self._pool.DECRYPT_LOOP_PERIOD)
yield self._pool._decrypt_and_recurse()
self.assertEqual(3, self._pool._processed_docs)
diff --git a/testing/tests/sync/test_sync.py b/testing/tests/sync/test_sync.py
index 095884ce..5290003e 100644
--- a/testing/tests/sync/test_sync.py
+++ b/testing/tests/sync/test_sync.py
@@ -15,7 +15,6 @@
# You should have received a copy of the GNU General Public License
# along with this program. If not, see <http://www.gnu.org/licenses/>.
import json
-import tempfile
import threading
import time
@@ -60,7 +59,6 @@ class InterruptableSyncTestCase(
def setUp(self):
TestCaseWithServer.setUp(self)
CouchDBTestCase.setUp(self)
- self.tempdir = tempfile.mkdtemp(prefix="leap_tests-")
def tearDown(self):
CouchDBTestCase.tearDown(self)
@@ -101,8 +99,7 @@ class InterruptableSyncTestCase(
# ensure remote db exists before syncing
db = couch.CouchDatabase.open_database(
urljoin(self.couch_url, 'user-user-uuid'),
- create=True,
- ensure_ddocs=True)
+ create=True)
# create interruptor thread
t = _SyncInterruptor(sol, db)
diff --git a/testing/tests/sync/test_sync_mutex.py b/testing/tests/sync/test_sync_mutex.py
index 787cfee8..2626ab2a 100644
--- a/testing/tests/sync/test_sync_mutex.py
+++ b/testing/tests/sync/test_sync_mutex.py
@@ -24,8 +24,6 @@ be two concurrent synchronization processes at the same time.
import time
import uuid
-import tempfile
-import shutil
from urlparse import urljoin
@@ -91,13 +89,11 @@ class TestSyncMutex(
def setUp(self):
TestCaseWithServer.setUp(self)
CouchDBTestCase.setUp(self)
- self.tempdir = tempfile.mkdtemp(prefix="leap_tests-")
self.user = ('user-%s' % uuid.uuid4().hex)
def tearDown(self):
CouchDBTestCase.tearDown(self)
TestCaseWithServer.tearDown(self)
- shutil.rmtree(self.tempdir)
def test_two_concurrent_syncs_do_not_overlap_no_docs(self):
self.startServer()
@@ -105,8 +101,7 @@ class TestSyncMutex(
# ensure remote db exists before syncing
db = CouchDatabase.open_database(
urljoin(self.couch_url, 'user-' + self.user),
- create=True,
- ensure_ddocs=True)
+ create=True)
sol = self._soledad_instance(
user=self.user, server_url=self.getURL())
diff --git a/testing/tox.ini b/testing/tox.ini
index 3663eef3..31cb8a4f 100644
--- a/testing/tox.ini
+++ b/testing/tox.ini
@@ -2,22 +2,49 @@
envlist = py27
[testenv]
-commands = py.test --pep8 {posargs}
-changedir = tests
+basepython = python2.7
+commands = py.test --cov-report=html \
+ --cov-report=term \
+ --cov=leap.soledad \
+ {posargs}
deps =
+ coverage
pytest
- pytest-flake8
- pytest-pep8
+ pytest-cov
+ pytest-twisted
mock
testscenarios
setuptools-trial
- pep8
pdbpp
couchdb
+ requests
# install soledad local packages
-e../common
-e../client
-e../server
setenv =
HOME=/tmp
+ TERM=xterm
+install_command = pip install {opts} {packages}
+
+[testenv:perf]
+deps =
+ {[testenv]deps}
+ pytest-benchmark
+commands = py.test tests/perf {posargs}
+
+[testenv:code-check]
+changedir = ..
+deps =
+ pep8
+ flake8
+commands =
+ pep8 client server common
+ flake8 --ignore=F812,E731 client server common
+
+[testenv:parallel]
+deps =
+ {[testenv]deps}
+ pytest-xdist
install_command = pip install {opts} {packages}
+commands = py.test {posargs} -n 4