summaryrefslogtreecommitdiff
path: root/src/leap/soledad
diff options
context:
space:
mode:
authorMicah Anderson <micah@riseup.net>2013-07-04 14:51:39 -0400
committerMicah Anderson <micah@riseup.net>2013-07-04 14:51:39 -0400
commite7abf1080661b9795e012307d6134c4f5db24315 (patch)
tree8d8285b8688064cf58e4bbfc9a87b1974a7cae48 /src/leap/soledad
parent278275b20f717e9e01a4843a871c5fa8d104c2a4 (diff)
import 0.2.1 soledad, incorporating package split
Diffstat (limited to 'src/leap/soledad')
-rw-r--r--src/leap/soledad/__init__.py988
-rw-r--r--src/leap/soledad/auth.py71
-rw-r--r--src/leap/soledad/backends/__init__.py36
-rw-r--r--src/leap/soledad/backends/couch.py484
-rw-r--r--src/leap/soledad/backends/leap_backend.py546
-rw-r--r--src/leap/soledad/backends/objectstore.py296
-rw-r--r--src/leap/soledad/backends/sqlcipher.py653
-rw-r--r--src/leap/soledad/crypto.py153
-rw-r--r--src/leap/soledad/server.py388
-rw-r--r--src/leap/soledad/shared_db.py138
-rw-r--r--src/leap/soledad/tests/__init__.py267
-rw-r--r--src/leap/soledad/tests/couchdb.ini.template222
-rw-r--r--src/leap/soledad/tests/test_couch.py439
-rw-r--r--src/leap/soledad/tests/test_crypto.py286
-rw-r--r--src/leap/soledad/tests/test_leap_backend.py782
-rw-r--r--src/leap/soledad/tests/test_server.py239
-rw-r--r--src/leap/soledad/tests/test_soledad.py302
-rw-r--r--src/leap/soledad/tests/test_sqlcipher.py824
-rw-r--r--src/leap/soledad/tests/u1db_tests/README34
-rw-r--r--src/leap/soledad/tests/u1db_tests/__init__.py421
-rw-r--r--src/leap/soledad/tests/u1db_tests/test_backends.py1907
-rw-r--r--src/leap/soledad/tests/u1db_tests/test_document.py150
-rw-r--r--src/leap/soledad/tests/u1db_tests/test_http_app.py1135
-rw-r--r--src/leap/soledad/tests/u1db_tests/test_http_client.py363
-rw-r--r--src/leap/soledad/tests/u1db_tests/test_http_database.py260
-rw-r--r--src/leap/soledad/tests/u1db_tests/test_https.py124
-rw-r--r--src/leap/soledad/tests/u1db_tests/test_open.py69
-rw-r--r--src/leap/soledad/tests/u1db_tests/test_remote_sync_target.py317
-rw-r--r--src/leap/soledad/tests/u1db_tests/test_sqlite_backend.py494
-rw-r--r--src/leap/soledad/tests/u1db_tests/test_sync.py1242
-rw-r--r--src/leap/soledad/tests/u1db_tests/testing-certs/Makefile35
-rw-r--r--src/leap/soledad/tests/u1db_tests/testing-certs/cacert.pem58
-rw-r--r--src/leap/soledad/tests/u1db_tests/testing-certs/testing.cert61
-rw-r--r--src/leap/soledad/tests/u1db_tests/testing-certs/testing.key16
34 files changed, 0 insertions, 13800 deletions
diff --git a/src/leap/soledad/__init__.py b/src/leap/soledad/__init__.py
deleted file mode 100644
index c7f8cff3..00000000
--- a/src/leap/soledad/__init__.py
+++ /dev/null
@@ -1,988 +0,0 @@
-# -*- coding: utf-8 -*-
-# __init__.py
-# Copyright (C) 2013 LEAP
-#
-# This program is free software: you can redistribute it and/or modify
-# it under the terms of the GNU General Public License as published by
-# the Free Software Foundation, either version 3 of the License, or
-# (at your option) any later version.
-#
-# This program is distributed in the hope that it will be useful,
-# but WITHOUT ANY WARRANTY; without even the implied warranty of
-# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
-# GNU General Public License for more details.
-#
-# You should have received a copy of the GNU General Public License
-# along with this program. If not, see <http://www.gnu.org/licenses/>.
-
-
-"""
-Soledad - Synchronization Of Locally Encrypted Data Among Devices.
-
-Soledad is the part of LEAP that manages storage and synchronization of
-application data. It is built on top of U1DB reference Python API and
-implements (1) a SQLCipher backend for local storage in the client, (2) a
-SyncTarget that encrypts data before syncing, and (3) a CouchDB backend for
-remote storage in the server side.
-"""
-
-import os
-import string
-import binascii
-import logging
-import urlparse
-import simplejson as json
-import scrypt
-import httplib
-import socket
-import ssl
-
-
-from xdg import BaseDirectory
-from hashlib import sha256
-from u1db.remote import http_client
-from u1db.remote.ssl_match_hostname import ( # noqa
- CertificateError,
- match_hostname,
-)
-
-
-from leap.common import events
-from leap.common.check import leap_assert
-from leap.common.files import mkdir_p
-from leap.soledad.backends import sqlcipher
-from leap.soledad.backends.leap_backend import (
- LeapDocument,
- LeapSyncTarget,
-)
-
-from leap.soledad import shared_db
-from leap.soledad.shared_db import SoledadSharedDatabase
-from leap.soledad.crypto import SoledadCrypto
-
-
-logger = logging.getLogger(name=__name__)
-
-
-SOLEDAD_CERT = None
-"""
-Path to the certificate file used to certify the SSL connection between
-Soledad client and server.
-"""
-
-SECRETS_DOC_ID_HASH_PREFIX = 'uuid-'
-
-
-#
-# Soledad: local encrypted storage and remote encrypted sync.
-#
-
-class Soledad(object):
- """
- Soledad provides encrypted data storage and sync.
-
- A Soledad instance is used to store and retrieve data in a local encrypted
- database and synchronize this database with Soledad server.
-
- This class is also responsible for bootstrapping users' account by
- creating cryptographic secrets and/or storing/fetching them on Soledad
- server.
-
- Soledad uses C{leap.common.events} to signal events. The possible events
- to be signaled are:
-
- SOLEDAD_CREATING_KEYS: emitted during bootstrap sequence when key
- generation starts.
- SOLEDAD_DONE_CREATING_KEYS: emitted during bootstrap sequence when key
- generation finishes.
- SOLEDAD_UPLOADING_KEYS: emitted during bootstrap sequence when soledad
- starts sending keys to server.
- SOLEDAD_DONE_UPLOADING_KEYS: emitted during bootstrap sequence when
- soledad finishes sending keys to server.
- SOLEDAD_DOWNLOADING_KEYS: emitted during bootstrap sequence when
- soledad starts to retrieve keys from server.
- SOLEDAD_DONE_DOWNLOADING_KEYS: emitted during bootstrap sequence when
- soledad finishes downloading keys from server.
- SOLEDAD_NEW_DATA_TO_SYNC: emitted upon call to C{need_sync()} when
- there's indeed new data to be synchronized between local database
- replica and server's replica.
- SOLEDAD_DONE_DATA_SYNC: emitted inside C{sync()} method when it has
- finished synchronizing with remote replica.
- """
-
- LOCAL_DATABASE_FILE_NAME = 'soledad.u1db'
- """
- The name of the local SQLCipher U1DB database file.
- """
-
- STORAGE_SECRETS_FILE_NAME = "soledad.json"
- """
- The name of the file where the storage secrets will be stored.
- """
-
- GENERATED_SECRET_LENGTH = 1024
- """
- The length of the generated secret used to derive keys for symmetric
- encryption for local and remote storage.
- """
-
- LOCAL_STORAGE_SECRET_LENGTH = 512
- """
- The length of the secret used to derive a passphrase for the SQLCipher
- database.
- """
-
- REMOTE_STORAGE_SECRET_LENGTH = \
- GENERATED_SECRET_LENGTH - LOCAL_STORAGE_SECRET_LENGTH
- """
- The length of the secret used to derive an encryption key and a MAC auth
- key for remote storage.
- """
-
- SALT_LENGTH = 64
- """
- The length of the salt used to derive the key for the storage secret
- encryption.
- """
-
- IV_SEPARATOR = ":"
- """
- A separator used for storing the encryption initial value prepended to the
- ciphertext.
- """
-
- UUID_KEY = 'uuid'
- STORAGE_SECRETS_KEY = 'storage_secrets'
- SECRET_KEY = 'secret'
- CIPHER_KEY = 'cipher'
- LENGTH_KEY = 'length'
- KDF_KEY = 'kdf'
- KDF_SALT_KEY = 'kdf_salt'
- KDF_LENGTH_KEY = 'kdf_length'
- """
- Keys used to access storage secrets in recovery documents.
- """
-
- DEFAULT_PREFIX = os.path.join(
- BaseDirectory.xdg_config_home,
- 'leap', 'soledad')
- """
- Prefix for default values for path.
- """
-
- def __init__(self, uuid, passphrase, secrets_path, local_db_path,
- server_url, cert_file, auth_token=None, secret_id=None):
- """
- Initialize configuration, cryptographic keys and dbs.
-
- @param uuid: User's uuid.
- @type uuid: str
- @param passphrase: The passphrase for locking and unlocking encryption
- secrets for local and remote storage.
- @type passphrase: str
- @param secrets_path: Path for storing encrypted key used for
- symmetric encryption.
- @type secrets_path: str
- @param local_db_path: Path for local encrypted storage db.
- @type local_db_path: str
- @param server_url: URL for Soledad server. This is used either to sync
- with the user's remote db and to interact with the shared recovery
- database.
- @type server_url: str
- @param cert_file: Path to the SSL certificate to use in the
- connection to the server_url.
- @type cert_file: str
- @param auth_token: Authorization token for accessing remote databases.
- @type auth_token: str
- """
- # get config params
- self._uuid = uuid
- self._passphrase = passphrase
- # init crypto variables
- self._secrets = {}
- self._secret_id = secret_id
- # init config (possibly with default values)
- self._init_config(secrets_path, local_db_path, server_url)
- self._set_token(auth_token)
- # configure SSL certificate
- global SOLEDAD_CERT
- SOLEDAD_CERT = cert_file
- # initiate bootstrap sequence
- self._bootstrap()
-
- def _init_config(self, secrets_path, local_db_path, server_url):
- """
- Initialize configuration using default values for missing params.
- """
- # initialize secrets_path
- self._secrets_path = secrets_path
- if self._secrets_path is None:
- self._secrets_path = os.path.join(
- self.DEFAULT_PREFIX, self.STORAGE_SECRETS_FILE_NAME)
- # initialize local_db_path
- self._local_db_path = local_db_path
- if self._local_db_path is None:
- self._local_db_path = os.path.join(
- self.DEFAULT_PREFIX, self.LOCAL_DATABASE_FILE_NAME)
- # initialize server_url
- self._server_url = server_url
- leap_assert(
- self._server_url is not None,
- 'Missing URL for Soledad server.')
-
- #
- # initialization/destruction methods
- #
-
- def _bootstrap(self):
- """
- Bootstrap local Soledad instance.
-
- Soledad Client bootstrap is the following sequence of stages:
-
- * stage 0 - local environment setup.
- - directory initialization.
- - crypto submodule initialization
- * stage 1 - secret generation/loading:
- - if secrets exist locally, load them.
- - else, if secrets exist in server, download them.
- - else, generate a new secret.
- * stage 2 - store secrets in server.
- * stage 3 - database initialization.
-
- This method decides which bootstrap stages have already been performed
- and performs the missing ones in order.
- """
- # TODO: make sure key storage always happens (even if this method is
- # interrupted).
- # TODO: write tests for bootstrap stages.
- # TODO: log each bootstrap step.
- # stage 0 - socal environment setup
- self._init_dirs()
- self._crypto = SoledadCrypto(self)
- # stage 1 - secret generation/loading
- if not self._has_secret(): # try to load from local storage.
- logger.info(
- 'Trying to fetch cryptographic secrets from shared recovery '
- 'database...')
- # there are no secrets in local storage, so try to fetch encrypted
- # secrets from server.
- doc = self._get_secrets_from_shared_db()
- if doc:
- # found secrets in server, so import them.
- logger.info(
- 'Found cryptographic secrets in shared recovery '
- 'database.')
- self.import_recovery_document(doc.content)
- else:
- # there are no secrets in server also, so generate a secret.
- logger.info(
- 'No cryptographic secrets found, creating new secrets...')
- self._set_secret_id(self._gen_secret())
- # Stage 2 - storage of encrypted secrets in the server.
- self._put_secrets_in_shared_db()
- # Stage 3 - Local database initialization
- self._init_db()
-
- def _init_dirs(self):
- """
- Create work directories.
-
- @raise OSError: in case file exists and is not a dir.
- """
- paths = map(
- lambda x: os.path.dirname(x),
- [self._local_db_path, self._secrets_path])
- for path in paths:
- logger.info('Creating directory: %s.' % path)
- mkdir_p(path)
-
- def _init_db(self):
- """
- Initialize the U1DB SQLCipher database for local storage.
-
- The local storage passphrase is hexlified version of the last
- C{LOCAL_STORAGE_SECRET_LENGTH} bytes of the storage secret.
- """
- self._db = sqlcipher.open(
- self._local_db_path,
- # storage secret is binary but sqlcipher passphrase must be string
- binascii.b2a_hex(
- self._get_storage_secret()[self.LOCAL_STORAGE_SECRET_LENGTH:]),
- create=True,
- document_factory=LeapDocument,
- crypto=self._crypto)
-
- def close(self):
- """
- Close underlying U1DB database.
- """
- if hasattr(self, '_db') and isinstance(
- self._db,
- sqlcipher.SQLCipherDatabase):
- self._db.close()
-
- def __del__(self):
- """
- Make sure local database is closed when object is destroyed.
- """
- self.close()
-
- #
- # Management of secret for symmetric encryption.
- #
-
- def _get_storage_secret(self):
- """
- Return the storage secret.
-
- Storage secret is encrypted before being stored. This method decrypts
- and returns the stored secret.
-
- @return: The storage secret.
- @rtype: str
- """
- # calculate the encryption key
- key = scrypt.hash(
- self._passphrase,
- # the salt is stored base64 encoded
- binascii.a2b_base64(
- self._secrets[self._secret_id][self.KDF_SALT_KEY]),
- buflen=32, # we need a key with 256 bits (32 bytes).
- )
- # recover the initial value and ciphertext
- iv, ciphertext = self._secrets[self._secret_id][self.SECRET_KEY].split(
- self.IV_SEPARATOR, 1)
- ciphertext = binascii.a2b_base64(ciphertext)
- return self._crypto.decrypt_sym(ciphertext, key, iv=iv)
-
- def _set_secret_id(self, secret_id):
- """
- Define the id of the storage secret to be used.
-
- This method will also replace the secret in the crypto object.
- """
- self._secret_id = secret_id
-
- def _load_secrets(self):
- """
- Load storage secrets from local file.
-
- The content of the file has the following format:
-
- {
- "storage_secrets": {
- "<secret_id>": {
- 'kdf': 'scrypt',
- 'kdf_salt': '<b64 repr of salt>'
- 'kdf_length': <key length>
- "cipher": "aes256",
- "length": <secret length>,
- "secret": "<encrypted storage_secret 1>",
- }
- }
- }
- """
- # does the file exist in disk?
- if not os.path.isfile(self._secrets_path):
- raise IOError('File does not exist: %s' % self._secrets_path)
- # read storage secrets from file
- content = None
- with open(self._secrets_path, 'r') as f:
- content = json.loads(f.read())
- self._secrets = content[self.STORAGE_SECRETS_KEY]
- # choose first secret if no secret_id was given
- if self._secret_id is None:
- self._set_secret_id(self._secrets.items()[0][0])
-
- def _has_secret(self):
- """
- Return whether there is a storage secret available for use or not.
-
- @return: Whether there's a storage secret for symmetric encryption.
- @rtype: bool
- """
- if self._secret_id is None or self._secret_id not in self._secrets:
- try:
- self._load_secrets() # try to load from disk
- except IOError, e:
- logger.error('IOError: %s' % str(e))
- try:
- self._get_storage_secret()
- return True
- except:
- return False
-
- def _gen_secret(self):
- """
- Generate a secret for symmetric encryption and store in a local
- encrypted file.
-
- This method emits the following signals:
-
- * leap.common.events.events_pb2.SOLEDAD_CREATING_KEYS
- * leap.common.events.events_pb2.SOLEDAD_DONE_CREATING_KEYS
-
- A secret has the following structure:
-
- {
- '<secret_id>': {
- 'kdf': 'scrypt',
- 'kdf_salt': '<b64 repr of salt>'
- 'kdf_length': <key length>
- 'cipher': 'aes256',
- 'length': <secret length>,
- 'secret': '<encrypted b64 repr of storage_secret>',
- }
- }
-
- @return: The id of the generated secret.
- @rtype: str
- """
- events.signal(events.events_pb2.SOLEDAD_CREATING_KEYS, self._uuid)
- # generate random secret
- secret = os.urandom(self.GENERATED_SECRET_LENGTH)
- secret_id = sha256(secret).hexdigest()
- # generate random salt
- salt = os.urandom(self.SALT_LENGTH)
- # get a 256-bit key
- key = scrypt.hash(self._passphrase, salt, buflen=32)
- iv, ciphertext = self._crypto.encrypt_sym(secret, key)
- self._secrets[secret_id] = {
- # leap.soledad.crypto submodule uses AES256 for symmetric
- # encryption.
- self.KDF_KEY: 'scrypt', # TODO: remove hard coded kdf
- self.KDF_SALT_KEY: binascii.b2a_base64(salt),
- self.KDF_LENGTH_KEY: len(key),
- self.CIPHER_KEY: 'aes256', # TODO: remove hard coded cipher
- self.LENGTH_KEY: len(secret),
- self.SECRET_KEY: '%s%s%s' % (
- str(iv), self.IV_SEPARATOR, binascii.b2a_base64(ciphertext)),
- }
- self._store_secrets()
- events.signal(
- events.events_pb2.SOLEDAD_DONE_CREATING_KEYS, self._uuid)
- return secret_id
-
- def _store_secrets(self):
- """
- Store a secret in C{Soledad.STORAGE_SECRETS_FILE_PATH}.
-
- The contents of the stored file have the following format:
-
- {
- 'storage_secrets': {
- '<secret_id>': {
- 'kdf': 'scrypt',
- 'kdf_salt': '<salt>'
- 'kdf_length': <len>
- 'cipher': 'aes256',
- 'length': 1024,
- 'secret': '<encrypted storage_secret 1>',
- }
- }
- }
- """
- data = {
- self.STORAGE_SECRETS_KEY: self._secrets,
- }
- with open(self._secrets_path, 'w') as f:
- f.write(json.dumps(data))
-
- #
- # General crypto utility methods.
- #
-
- def _uuid_hash(self):
- """
- Calculate a hash for storing/retrieving key material on shared
- database, based on user's uuid.
-
- @return: the hash
- @rtype: str
- """
- return sha256(
- '%s%s' % (
- SECRETS_DOC_ID_HASH_PREFIX,
- self._uuid)).hexdigest()
-
- def _shared_db(self):
- """
- Return an instance of the shared recovery database object.
- """
- if self.server_url:
- return SoledadSharedDatabase.open_database(
- urlparse.urljoin(self.server_url, 'shared'),
- False, # TODO: eliminate need to create db here.
- creds=self._creds)
-
- def _get_secrets_from_shared_db(self):
- """
- Retrieve the document with encrypted key material from the shared
- database.
-
- @return: a document with encrypted key material in its contents
- @rtype: LeapDocument
- """
- events.signal(
- events.events_pb2.SOLEDAD_DOWNLOADING_KEYS, self._uuid)
- db = self._shared_db()
- if not db:
- logger.warning('No shared db found')
- return
- doc = db.get_doc(self._uuid_hash())
- events.signal(
- events.events_pb2.SOLEDAD_DONE_DOWNLOADING_KEYS, self._uuid)
- return doc
-
- def _put_secrets_in_shared_db(self):
- """
- Assert local keys are the same as shared db's ones.
-
- Try to fetch keys from shared recovery database. If they already exist
- in the remote db, assert that that data is the same as local data.
- Otherwise, upload keys to shared recovery database.
-
- """
- leap_assert(
- self._has_secret(),
- 'Tried to send keys to server but they don\'t exist in local '
- 'storage.')
- # try to get secrets doc from server, otherwise create it
- doc = self._get_secrets_from_shared_db()
- if doc is None:
- doc = LeapDocument(doc_id=self._uuid_hash())
- # fill doc with encrypted secrets
- doc.content = self.export_recovery_document(include_uuid=False)
- # upload secrets to server
- events.signal(
- events.events_pb2.SOLEDAD_UPLOADING_KEYS, self._uuid)
- db = self._shared_db()
- if not db:
- logger.warning('No shared db found')
- return
- db.put_doc(doc)
- events.signal(
- events.events_pb2.SOLEDAD_DONE_UPLOADING_KEYS, self._uuid)
-
- #
- # Document storage, retrieval and sync.
- #
-
- def put_doc(self, doc):
- """
- Update a document in the local encrypted database.
-
- @param doc: the document to update
- @type doc: LeapDocument
-
- @return: the new revision identifier for the document
- @rtype: str
- """
- return self._db.put_doc(doc)
-
- def delete_doc(self, doc):
- """
- Delete a document from the local encrypted database.
-
- @param doc: the document to delete
- @type doc: LeapDocument
-
- @return: the new revision identifier for the document
- @rtype: str
- """
- return self._db.delete_doc(doc)
-
- def get_doc(self, doc_id, include_deleted=False):
- """
- Retrieve a document from the local encrypted database.
-
- @param doc_id: the unique document identifier
- @type doc_id: str
- @param include_deleted: if True, deleted documents will be
- returned with empty content; otherwise asking for a deleted
- document will return None
- @type include_deleted: bool
-
- @return: the document object or None
- @rtype: LeapDocument
- """
- return self._db.get_doc(doc_id, include_deleted=include_deleted)
-
- def get_docs(self, doc_ids, check_for_conflicts=True,
- include_deleted=False):
- """
- Get the content for many documents.
-
- @param doc_ids: a list of document identifiers
- @type doc_ids: list
- @param check_for_conflicts: if set False, then the conflict check will
- be skipped, and 'None' will be returned instead of True/False
- @type check_for_conflicts: bool
-
- @return: iterable giving the Document object for each document id
- in matching doc_ids order.
- @rtype: generator
- """
- return self._db.get_docs(doc_ids,
- check_for_conflicts=check_for_conflicts,
- include_deleted=include_deleted)
-
- def get_all_docs(self, include_deleted=False):
- """Get the JSON content for all documents in the database.
-
- @param include_deleted: If set to True, deleted documents will be
- returned with empty content. Otherwise deleted documents will not
- be included in the results.
- @return: (generation, [Document])
- The current generation of the database, followed by a list of all
- the documents in the database.
- """
- return self._db.get_all_docs(include_deleted)
-
- def create_doc(self, content, doc_id=None):
- """
- Create a new document in the local encrypted database.
-
- @param content: the contents of the new document
- @type content: dict
- @param doc_id: an optional identifier specifying the document id
- @type doc_id: str
-
- @return: the new document
- @rtype: LeapDocument
- """
- return self._db.create_doc(content, doc_id=doc_id)
-
- def create_doc_from_json(self, json, doc_id=None):
- """
- Create a new document.
-
- You can optionally specify the document identifier, but the document
- must not already exist. See 'put_doc' if you want to override an
- existing document.
- If the database specifies a maximum document size and the document
- exceeds it, create will fail and raise a DocumentTooBig exception.
-
- @param json: The JSON document string
- @type json: str
- @param doc_id: An optional identifier specifying the document id.
- @type doc_id:
- @return: The new cocument
- @rtype: LeapDocument
- """
- return self._db.create_doc_from_json(json, doc_id=doc_id)
-
- def create_index(self, index_name, *index_expressions):
- """
- Create an named index, which can then be queried for future lookups.
- Creating an index which already exists is not an error, and is cheap.
- Creating an index which does not match the index_expressions of the
- existing index is an error.
- Creating an index will block until the expressions have been evaluated
- and the index generated.
-
- @param index_name: A unique name which can be used as a key prefix
- @type index_name: str
- @param index_expressions: index expressions defining the index
- information.
- @type index_expressions: dict
-
- Examples:
-
- "fieldname", or "fieldname.subfieldname" to index alphabetically
- sorted on the contents of a field.
-
- "number(fieldname, width)", "lower(fieldname)"
- """
- return self._db.create_index(index_name, *index_expressions)
-
- def delete_index(self, index_name):
- """
- Remove a named index.
-
- @param index_name: The name of the index we are removing
- @type index_name: str
- """
- return self._db.delete_index(index_name)
-
- def list_indexes(self):
- """
- List the definitions of all known indexes.
-
- @return: A list of [('index-name', ['field', 'field2'])] definitions.
- @rtype: list
- """
- return self._db.list_indexes()
-
- def get_from_index(self, index_name, *key_values):
- """
- Return documents that match the keys supplied.
-
- You must supply exactly the same number of values as have been defined
- in the index. It is possible to do a prefix match by using '*' to
- indicate a wildcard match. You can only supply '*' to trailing entries,
- (eg 'val', '*', '*' is allowed, but '*', 'val', 'val' is not.)
- It is also possible to append a '*' to the last supplied value (eg
- 'val*', '*', '*' or 'val', 'val*', '*', but not 'val*', 'val', '*')
-
- @param index_name: The index to query
- @type index_name: str
- @param key_values: values to match. eg, if you have
- an index with 3 fields then you would have:
- get_from_index(index_name, val1, val2, val3)
- @type key_values: tuple
- @return: List of [Document]
- @rtype: list
- """
- return self._db.get_from_index(index_name, *key_values)
-
- def get_range_from_index(self, index_name, start_value, end_value):
- """
- Return documents that fall within the specified range.
-
- Both ends of the range are inclusive. For both start_value and
- end_value, one must supply exactly the same number of values as have
- been defined in the index, or pass None. In case of a single column
- index, a string is accepted as an alternative for a tuple with a single
- value. It is possible to do a prefix match by using '*' to indicate
- a wildcard match. You can only supply '*' to trailing entries, (eg
- 'val', '*', '*' is allowed, but '*', 'val', 'val' is not.) It is also
- possible to append a '*' to the last supplied value (eg 'val*', '*',
- '*' or 'val', 'val*', '*', but not 'val*', 'val', '*')
-
- @param index_name: The index to query
- @type index_name: str
- @param start_values: tuples of values that define the lower bound of
- the range. eg, if you have an index with 3 fields then you would
- have: (val1, val2, val3)
- @type start_values: tuple
- @param end_values: tuples of values that define the upper bound of the
- range. eg, if you have an index with 3 fields then you would have:
- (val1, val2, val3)
- @type end_values: tuple
- @return: List of [Document]
- @rtype: list
- """
- return self._db.get_range_from_index(
- index_name, start_value, end_value)
-
- def get_index_keys(self, index_name):
- """
- Return all keys under which documents are indexed in this index.
-
- @param index_name: The index to query
- @type index_name: str
- @return: [] A list of tuples of indexed keys.
- @rtype: list
- """
- return self._db.get_index_keys(index_name)
-
- def get_doc_conflicts(self, doc_id):
- """
- Get the list of conflicts for the given document.
-
- @param doc_id: the document id
- @type doc_id: str
-
- @return: a list of the document entries that are conflicted
- @rtype: list
- """
- return self._db.get_doc_conflicts(doc_id)
-
- def resolve_doc(self, doc, conflicted_doc_revs):
- """
- Mark a document as no longer conflicted.
-
- @param doc: a document with the new content to be inserted.
- @type doc: LeapDocument
- @param conflicted_doc_revs: a list of revisions that the new content
- supersedes.
- @type conflicted_doc_revs: list
- """
- return self._db.resolve_doc(doc, conflicted_doc_revs)
-
- def sync(self):
- """
- Synchronize the local encrypted replica with a remote replica.
-
- @param url: the url of the target replica to sync with
- @type url: str
-
- @return: the local generation before the synchronisation was
- performed.
- @rtype: str
- """
- local_gen = self._db.sync(
- urlparse.urljoin(self.server_url, 'user-%s' % self._uuid),
- creds=self._creds, autocreate=True)
- events.signal(events.events_pb2.SOLEDAD_DONE_DATA_SYNC, self._uuid)
- return local_gen
-
- def need_sync(self, url):
- """
- Return if local db replica differs from remote url's replica.
-
- @param url: The remote replica to compare with local replica.
- @type url: str
-
- @return: Whether remote replica and local replica differ.
- @rtype: bool
- """
- target = LeapSyncTarget(url, creds=self._creds, crypto=self._crypto)
- info = target.get_sync_info(self._db._get_replica_uid())
- # compare source generation with target's last known source generation
- if self._db._get_generation() != info[4]:
- events.signal(
- events.events_pb2.SOLEDAD_NEW_DATA_TO_SYNC, self._uuid)
- return True
- return False
-
- def _set_token(self, token):
- """
- Set the authentication token for remote database access.
-
- Build the credentials dictionary with the following format:
-
- self._{
- 'token': {
- 'uuid': '<uuid>'
- 'token': '<token>'
- }
-
- @param token: The authentication token.
- @type token: str
- """
- self._creds = {
- 'token': {
- 'uuid': self._uuid,
- 'token': token,
- }
- }
-
- def _get_token(self):
- """
- Return current token from credentials dictionary.
- """
- return self._creds['token']['token']
-
- token = property(_get_token, _set_token, doc='The authentication Token.')
-
- #
- # Recovery document export and import methods
- #
- def export_recovery_document(self, include_uuid=True):
- """
- Export the storage secrets and (optionally) the uuid.
-
- A recovery document has the following structure:
-
- {
- self.STORAGE_SECRET_KEY: <secrets dict>,
- self.UUID_KEY: '<uuid>', # (optional)
- }
-
- @param include_uuid: Should the uuid be included?
- @type include_uuid: bool
-
- @return: The recovery document.
- @rtype: dict
- """
- data = {self.STORAGE_SECRETS_KEY: self._secrets}
- if include_uuid:
- data[self.UUID_KEY] = self._uuid
- return data
-
- def import_recovery_document(self, data):
- """
- Import storage secrets for symmetric encryption and uuid (if present)
- from a recovery document.
-
- A recovery document has the following structure:
-
- {
- self.STORAGE_SECRET_KEY: <secrets dict>,
- self.UUID_KEY: '<uuid>', # (optional)
- }
-
- @param data: The recovery document.
- @type data: dict
- """
- # include new secrets in our secret pool.
- for secret_id, secret_data in data[self.STORAGE_SECRETS_KEY].items():
- if secret_id not in self._secrets:
- self._secrets[secret_id] = secret_data
- self._store_secrets() # save new secrets in local file
- # set uuid if present
- if self.UUID_KEY in data:
- self._uuid = data[self.UUID_KEY]
- # choose first secret to use is none is assigned
- if self._secret_id is None:
- self._set_secret_id(data[self.STORAGE_SECRETS_KEY].items()[0][0])
-
- #
- # Setters/getters
- #
-
- def _get_uuid(self):
- return self._uuid
-
- uuid = property(_get_uuid, doc='The user uuid.')
-
- def _get_secret_id(self):
- return self._secret_id
-
- secret_id = property(
- _get_secret_id,
- doc='The active secret id.')
-
- def _get_secrets_path(self):
- return self._secrets_path
-
- secrets_path = property(
- _get_secrets_path,
- doc='The path for the file containing the encrypted symmetric secret.')
-
- def _get_local_db_path(self):
- return self._local_db_path
-
- local_db_path = property(
- _get_local_db_path,
- doc='The path for the local database replica.')
-
- def _get_server_url(self):
- return self._server_url
-
- server_url = property(
- _get_server_url,
- doc='The URL of the Soledad server.')
-
- storage_secret = property(
- _get_storage_secret,
- doc='The secret used for symmetric encryption.')
-
-
-#-----------------------------------------------------------------------------
-# Monkey patching u1db to be able to provide a custom SSL cert
-#-----------------------------------------------------------------------------
-
-class VerifiedHTTPSConnection(httplib.HTTPSConnection):
- """HTTPSConnection verifying server side certificates."""
- # derived from httplib.py
-
- def connect(self):
- "Connect to a host on a given (SSL) port."
- sock = socket.create_connection((self.host, self.port),
- self.timeout, self.source_address)
- if self._tunnel_host:
- self.sock = sock
- self._tunnel()
-
- self.sock = ssl.wrap_socket(sock,
- ca_certs=SOLEDAD_CERT,
- cert_reqs=ssl.CERT_REQUIRED)
- match_hostname(self.sock.getpeercert(), self.host)
-
-
-old__VerifiedHTTPSConnection = http_client._VerifiedHTTPSConnection
-http_client._VerifiedHTTPSConnection = VerifiedHTTPSConnection
diff --git a/src/leap/soledad/auth.py b/src/leap/soledad/auth.py
deleted file mode 100644
index 8c093099..00000000
--- a/src/leap/soledad/auth.py
+++ /dev/null
@@ -1,71 +0,0 @@
-# -*- coding: utf-8 -*-
-# auth.py
-# Copyright (C) 2013 LEAP
-#
-# This program is free software: you can redistribute it and/or modify
-# it under the terms of the GNU General Public License as published by
-# the Free Software Foundation, either version 3 of the License, or
-# (at your option) any later version.
-#
-# This program is distributed in the hope that it will be useful,
-# but WITHOUT ANY WARRANTY; without even the implied warranty of
-# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
-# GNU General Public License for more details.
-#
-# You should have received a copy of the GNU General Public License
-# along with this program. If not, see <http://www.gnu.org/licenses/>.
-
-
-"""
-Methods for token-based authentication.
-
-These methods have to be included in all classes that extend HTTPClient so
-they can do token-based auth requests to the Soledad server.
-"""
-
-
-from u1db.remote.http_client import HTTPClientBase
-from u1db import errors
-
-
-class TokenBasedAuth(object):
- """
- Encapsulate token-auth methods for classes that inherit from
- u1db.remote.http_client.HTTPClient.
- """
-
- def set_token_credentials(self, uuid, token):
- """
- Store given credentials so we can sign the request later.
-
- @param uuid: The user's uuid.
- @type uuid: str
- @param token: The authentication token.
- @type token: str
- """
- self._creds = {'token': (uuid, token)}
-
- def _sign_request(self, method, url_query, params):
- """
- Return an authorization header to be included in the HTTP request, in
- the form:
-
- [('Authorization', 'Token <base64 encoded creds')]
-
- @param method: The HTTP method.
- @type method: str
- @param url_query: The URL query string.
- @type url_query: str
- @param params: A list with encoded query parameters.
- @type param: list
-
- @return: The Authorization header.
- @rtype: list of tuple
- """
- if 'token' in self._creds:
- uuid, token = self._creds['token']
- auth = '%s:%s' % (uuid, token)
- return [('Authorization', 'Token %s' % auth.encode('base64')[:-1])]
- else:
- raise errors.UnknownAuthMethod(
- 'Wrong credentials: %s' % self._creds)
diff --git a/src/leap/soledad/backends/__init__.py b/src/leap/soledad/backends/__init__.py
deleted file mode 100644
index 720a8118..00000000
--- a/src/leap/soledad/backends/__init__.py
+++ /dev/null
@@ -1,36 +0,0 @@
-# -*- coding: utf-8 -*-
-# __init__.py
-# Copyright (C) 2013 LEAP
-#
-# This program is free software: you can redistribute it and/or modify
-# it under the terms of the GNU General Public License as published by
-# the Free Software Foundation, either version 3 of the License, or
-# (at your option) any later version.
-#
-# This program is distributed in the hope that it will be useful,
-# but WITHOUT ANY WARRANTY; without even the implied warranty of
-# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
-# GNU General Public License for more details.
-#
-# You should have received a copy of the GNU General Public License
-# along with this program. If not, see <http://www.gnu.org/licenses/>.
-
-
-"""
-Backends that extend U1DB functionality.
-"""
-
-from leap.soledad.backends import (
- objectstore,
- couch,
- sqlcipher,
- leap_backend,
-)
-
-
-__all__ = [
- 'objectstore',
- 'couch',
- 'sqlcipher',
- 'leap_backend',
-]
diff --git a/src/leap/soledad/backends/couch.py b/src/leap/soledad/backends/couch.py
deleted file mode 100644
index 57885012..00000000
--- a/src/leap/soledad/backends/couch.py
+++ /dev/null
@@ -1,484 +0,0 @@
-# -*- coding: utf-8 -*-
-# couch.py
-# Copyright (C) 2013 LEAP
-#
-# This program is free software: you can redistribute it and/or modify
-# it under the terms of the GNU General Public License as published by
-# the Free Software Foundation, either version 3 of the License, or
-# (at your option) any later version.
-#
-# This program is distributed in the hope that it will be useful,
-# but WITHOUT ANY WARRANTY; without even the implied warranty of
-# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
-# GNU General Public License for more details.
-#
-# You should have received a copy of the GNU General Public License
-# along with this program. If not, see <http://www.gnu.org/licenses/>.
-
-
-"""A U1DB backend that uses CouchDB as its persistence layer."""
-
-# general imports
-import uuid
-import re
-import simplejson as json
-
-
-from base64 import b64encode, b64decode
-from u1db import errors
-from u1db.sync import Synchronizer
-from u1db.backends.inmemory import InMemoryIndex
-from u1db.remote.server_state import ServerState
-from u1db.errors import DatabaseDoesNotExist
-from couchdb.client import Server, Document as CouchDocument
-from couchdb.http import ResourceNotFound
-
-
-from leap.soledad.backends.objectstore import (
- ObjectStoreDatabase,
- ObjectStoreSyncTarget,
-)
-from leap.soledad.backends.leap_backend import LeapDocument
-
-
-class InvalidURLError(Exception):
- """
- Exception raised when Soledad encounters a malformed URL.
- """
-
-
-class CouchDatabase(ObjectStoreDatabase):
- """
- A U1DB backend that uses Couch as its persistence layer.
- """
-
- U1DB_TRANSACTION_LOG_KEY = 'transaction_log'
- U1DB_CONFLICTS_KEY = 'conflicts'
- U1DB_OTHER_GENERATIONS_KEY = 'other_generations'
- U1DB_INDEXES_KEY = 'indexes'
- U1DB_REPLICA_UID_KEY = 'replica_uid'
-
- COUCH_ID_KEY = '_id'
- COUCH_REV_KEY = '_rev'
- COUCH_U1DB_ATTACHMENT_KEY = 'u1db_json'
- COUCH_U1DB_REV_KEY = 'u1db_rev'
-
- @classmethod
- def open_database(cls, url, create):
- """
- Open a U1DB database using CouchDB as backend.
-
- @param url: the url of the database replica
- @type url: str
- @param create: should the replica be created if it does not exist?
- @type create: bool
-
- @return: the database instance
- @rtype: CouchDatabase
- """
- # get database from url
- m = re.match('(^https?://[^/]+)/(.+)$', url)
- if not m:
- raise InvalidURLError
- url = m.group(1)
- dbname = m.group(2)
- server = Server(url=url)
- try:
- server[dbname]
- except ResourceNotFound:
- if not create:
- raise DatabaseDoesNotExist()
- return cls(url, dbname)
-
- def __init__(self, url, dbname, replica_uid=None, full_commit=True,
- session=None):
- """
- Create a new Couch data container.
-
- @param url: the url of the couch database
- @type url: str
- @param dbname: the database name
- @type dbname: str
- @param replica_uid: an optional unique replica identifier
- @type replica_uid: str
- @param full_commit: turn on the X-Couch-Full-Commit header
- @type full_commit: bool
- @param session: an http.Session instance or None for a default session
- @type session: http.Session
- """
- self._url = url
- self._full_commit = full_commit
- self._session = session
- self._server = Server(url=self._url,
- full_commit=self._full_commit,
- session=self._session)
- self._dbname = dbname
- # this will ensure that transaction and sync logs exist and are
- # up-to-date.
- try:
- self._database = self._server[self._dbname]
- except ResourceNotFound:
- self._server.create(self._dbname)
- self._database = self._server[self._dbname]
- ObjectStoreDatabase.__init__(self, replica_uid=replica_uid,
- # TODO: move the factory choice
- # away
- document_factory=LeapDocument)
-
- #-------------------------------------------------------------------------
- # methods from Database
- #-------------------------------------------------------------------------
-
- def _get_doc(self, doc_id, check_for_conflicts=False):
- """
- Get just the document content, without fancy handling.
-
- @param doc_id: The unique document identifier
- @type doc_id: str
- @param include_deleted: If set to True, deleted documents will be
- returned with empty content. Otherwise asking for a deleted
- document will return None.
- @type include_deleted: bool
-
- @return: a Document object.
- @type: u1db.Document
- """
- cdoc = self._database.get(doc_id)
- if cdoc is None:
- return None
- has_conflicts = False
- if check_for_conflicts:
- has_conflicts = self._has_conflicts(doc_id)
- doc = self._factory(
- doc_id=doc_id,
- rev=cdoc[self.COUCH_U1DB_REV_KEY],
- has_conflicts=has_conflicts)
- contents = self._database.get_attachment(
- cdoc,
- self.COUCH_U1DB_ATTACHMENT_KEY)
- if contents:
- doc.content = json.loads(contents.read())
- else:
- doc.make_tombstone()
- return doc
-
- def get_all_docs(self, include_deleted=False):
- """
- Get the JSON content for all documents in the database.
-
- @param include_deleted: If set to True, deleted documents will be
- returned with empty content. Otherwise deleted documents will not
- be included in the results.
- @type include_deleted: bool
-
- @return: (generation, [Document])
- The current generation of the database, followed by a list of all
- the documents in the database.
- @rtype: tuple
- """
- generation = self._get_generation()
- results = []
- for doc_id in self._database:
- if doc_id == self.U1DB_DATA_DOC_ID:
- continue
- doc = self._get_doc(doc_id, check_for_conflicts=True)
- if doc.content is None and not include_deleted:
- continue
- results.append(doc)
- return (generation, results)
-
- def _put_doc(self, doc):
- """
- Update a document.
-
- This is called everytime we just want to do a raw put on the db (i.e.
- without index updates, document constraint checks, and conflict
- checks).
-
- @param doc: The document to update.
- @type doc: u1db.Document
-
- @return: The new revision identifier for the document.
- @rtype: str
- """
- # prepare couch's Document
- cdoc = CouchDocument()
- cdoc[self.COUCH_ID_KEY] = doc.doc_id
- # we have to guarantee that couch's _rev is consistent
- old_cdoc = self._database.get(doc.doc_id)
- if old_cdoc is not None:
- cdoc[self.COUCH_REV_KEY] = old_cdoc[self.COUCH_REV_KEY]
- # store u1db's rev
- cdoc[self.COUCH_U1DB_REV_KEY] = doc.rev
- # save doc in db
- self._database.save(cdoc)
- # store u1db's content as json string
- if not doc.is_tombstone():
- self._database.put_attachment(
- cdoc, doc.get_json(),
- filename=self.COUCH_U1DB_ATTACHMENT_KEY)
- else:
- self._database.delete_attachment(
- cdoc,
- self.COUCH_U1DB_ATTACHMENT_KEY)
-
- def get_sync_target(self):
- """
- Return a SyncTarget object, for another u1db to synchronize with.
-
- @return: The sync target.
- @rtype: CouchSyncTarget
- """
- return CouchSyncTarget(self)
-
- def create_index(self, index_name, *index_expressions):
- """
- Create a named index, which can then be queried for future lookups.
-
- @param index_name: A unique name which can be used as a key prefix.
- @param index_expressions: Index expressions defining the index
- information.
- """
- if index_name in self._indexes:
- if self._indexes[index_name]._definition == list(
- index_expressions):
- return
- raise errors.IndexNameTakenError
- index = InMemoryIndex(index_name, list(index_expressions))
- for doc_id in self._database:
- if doc_id == self.U1DB_DATA_DOC_ID: # skip special file
- continue
- doc = self._get_doc(doc_id)
- if doc.content is not None:
- index.add_json(doc_id, doc.get_json())
- self._indexes[index_name] = index
- # save data in object store
- self._store_u1db_data()
-
- def close(self):
- """
- Release any resources associated with this database.
-
- @return: True if db was succesfully closed.
- @rtype: bool
- """
- # TODO: fix this method so the connection is properly closed and
- # test_close (+tearDown, which deletes the db) works without problems.
- self._url = None
- self._full_commit = None
- self._session = None
- #self._server = None
- self._database = None
- return True
-
- def sync(self, url, creds=None, autocreate=True):
- """
- Synchronize documents with remote replica exposed at url.
-
- @param url: The url of the target replica to sync with.
- @type url: str
- @param creds: optional dictionary giving credentials.
- to authorize the operation with the server.
- @type creds: dict
- @param autocreate: Ask the target to create the db if non-existent.
- @type autocreate: bool
-
- @return: The local generation before the synchronisation was performed.
- @rtype: int
- """
- return Synchronizer(self, CouchSyncTarget(url, creds=creds)).sync(
- autocreate=autocreate)
-
- #-------------------------------------------------------------------------
- # methods from ObjectStoreDatabase
- #-------------------------------------------------------------------------
-
- def _init_u1db_data(self):
- """
- Initialize U1DB info data structure in the couch db.
-
- A U1DB database needs to keep track of all database transactions,
- document conflicts, the generation of other replicas it has seen,
- indexes created by users and so on.
-
- In this implementation, all this information is stored in a special
- document stored in the couch db with id equals to
- CouchDatabse.U1DB_DATA_DOC_ID.
-
- This method initializes the document that will hold such information.
- """
- if self._replica_uid is None:
- self._replica_uid = uuid.uuid4().hex
- # TODO: prevent user from overwriting a document with the same doc_id
- # as this one.
- doc = self._factory(doc_id=self.U1DB_DATA_DOC_ID)
- doc.content = {
- self.U1DB_TRANSACTION_LOG_KEY: b64encode(json.dumps([])),
- self.U1DB_CONFLICTS_KEY: b64encode(json.dumps({})),
- self.U1DB_OTHER_GENERATIONS_KEY: b64encode(json.dumps({})),
- self.U1DB_INDEXES_KEY: b64encode(json.dumps({})),
- self.U1DB_REPLICA_UID_KEY: b64encode(self._replica_uid),
- }
- self._put_doc(doc)
-
- def _fetch_u1db_data(self):
- """
- Fetch U1DB info from the couch db.
-
- See C{_init_u1db_data} documentation.
- """
- # retrieve u1db data from couch db
- cdoc = self._database.get(self.U1DB_DATA_DOC_ID)
- jsonstr = self._database.get_attachment(
- cdoc, self.COUCH_U1DB_ATTACHMENT_KEY).read()
- content = json.loads(jsonstr)
- # set u1db database info
- self._transaction_log = json.loads(
- b64decode(content[self.U1DB_TRANSACTION_LOG_KEY]))
- self._conflicts = json.loads(
- b64decode(content[self.U1DB_CONFLICTS_KEY]))
- self._other_generations = json.loads(
- b64decode(content[self.U1DB_OTHER_GENERATIONS_KEY]))
- self._indexes = self._load_indexes_from_json(
- b64decode(content[self.U1DB_INDEXES_KEY]))
- self._replica_uid = b64decode(content[self.U1DB_REPLICA_UID_KEY])
- # save couch _rev
- self._couch_rev = cdoc[self.COUCH_REV_KEY]
-
- def _store_u1db_data(self):
- """
- Store U1DB info in the couch db.
-
- See C{_init_u1db_data} documentation.
- """
- doc = self._factory(doc_id=self.U1DB_DATA_DOC_ID)
- doc.content = {
- # Here, the b64 encode ensures that document content
- # does not cause strange behaviour in couchdb because
- # of encoding.
- self.U1DB_TRANSACTION_LOG_KEY:
- b64encode(json.dumps(self._transaction_log)),
- self.U1DB_CONFLICTS_KEY: b64encode(json.dumps(self._conflicts)),
- self.U1DB_OTHER_GENERATIONS_KEY:
- b64encode(json.dumps(self._other_generations)),
- self.U1DB_INDEXES_KEY: b64encode(self._dump_indexes_as_json()),
- self.U1DB_REPLICA_UID_KEY: b64encode(self._replica_uid),
- self.COUCH_REV_KEY: self._couch_rev}
- self._put_doc(doc)
-
- #-------------------------------------------------------------------------
- # Couch specific methods
- #-------------------------------------------------------------------------
-
- INDEX_NAME_KEY = 'name'
- INDEX_DEFINITION_KEY = 'definition'
- INDEX_VALUES_KEY = 'values'
-
- def delete_database(self):
- """
- Delete a U1DB CouchDB database.
- """
- del(self._server[self._dbname])
-
- def _dump_indexes_as_json(self):
- """
- Dump index definitions as JSON string.
- """
- indexes = {}
- for name, idx in self._indexes.iteritems():
- indexes[name] = {}
- for attr in [self.INDEX_NAME_KEY, self.INDEX_DEFINITION_KEY,
- self.INDEX_VALUES_KEY]:
- indexes[name][attr] = getattr(idx, '_' + attr)
- return json.dumps(indexes)
-
- def _load_indexes_from_json(self, indexes):
- """
- Load index definitions from JSON string.
-
- @param indexes: A JSON serialization of a list of [('index-name',
- ['field', 'field2'])].
- @type indexes: str
-
- @return: A dictionary with the index definitions.
- @rtype: dict
- """
- dict = {}
- for name, idx_dict in json.loads(indexes).iteritems():
- idx = InMemoryIndex(name, idx_dict[self.INDEX_DEFINITION_KEY])
- idx._values = idx_dict[self.INDEX_VALUES_KEY]
- dict[name] = idx
- return dict
-
-
-class CouchSyncTarget(ObjectStoreSyncTarget):
- """
- Functionality for using a CouchDatabase as a synchronization target.
- """
-
-
-class CouchServerState(ServerState):
- """
- Inteface of the WSGI server with the CouchDB backend.
- """
-
- def __init__(self, couch_url):
- self._couch_url = couch_url
-
- def open_database(self, dbname):
- """
- Open a couch database.
-
- @param dbname: The name of the database to open.
- @type dbname: str
-
- @return: The CouchDatabase object.
- @rtype: CouchDatabase
- """
- # TODO: open couch
- return CouchDatabase.open_database(
- self._couch_url + '/' + dbname,
- create=False)
-
- def ensure_database(self, dbname):
- """
- Ensure couch database exists.
-
- @param dbname: The name of the database to ensure.
- @type dbname: str
-
- @return: The CouchDatabase object and the replica uid.
- @rtype: (CouchDatabase, str)
- """
- db = CouchDatabase.open_database(
- self._couch_url + '/' + dbname,
- create=True)
- return db, db._replica_uid
-
- def delete_database(self, dbname):
- """
- Delete couch database.
-
- @param dbname: The name of the database to delete.
- @type dbname: str
- """
- CouchDatabase.delete_database(self._couch_url + '/' + dbname)
-
- def _set_couch_url(self, url):
- """
- Set the couchdb URL
-
- @param url: CouchDB URL
- @type url: str
- """
- self._couch_url = url
-
- def _get_couch_url(self):
- """
- Return CouchDB URL
-
- @rtype: str
- """
- return self._couch_url
-
- couch_url = property(_get_couch_url, _set_couch_url, doc='CouchDB URL')
diff --git a/src/leap/soledad/backends/leap_backend.py b/src/leap/soledad/backends/leap_backend.py
deleted file mode 100644
index d92025db..00000000
--- a/src/leap/soledad/backends/leap_backend.py
+++ /dev/null
@@ -1,546 +0,0 @@
-# -*- coding: utf-8 -*-
-# leap_backend.py
-# Copyright (C) 2013 LEAP
-#
-# This program is free software: you can redistribute it and/or modify
-# it under the terms of the GNU General Public License as published by
-# the Free Software Foundation, either version 3 of the License, or
-# (at your option) any later version.
-#
-# This program is distributed in the hope that it will be useful,
-# but WITHOUT ANY WARRANTY; without even the implied warranty of
-# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
-# GNU General Public License for more details.
-#
-# You should have received a copy of the GNU General Public License
-# along with this program. If not, see <http://www.gnu.org/licenses/>.
-
-
-"""
-A U1DB backend for encrypting data before sending to server and decrypting
-after receiving.
-"""
-
-import simplejson as json
-import hashlib
-import hmac
-import binascii
-
-
-from u1db import Document
-from u1db.remote import utils
-from u1db.errors import BrokenSyncStream
-from u1db.remote.http_target import HTTPSyncTarget
-
-
-from leap.common.crypto import (
- EncryptionMethods,
- UnknownEncryptionMethod,
- encrypt_sym,
- decrypt_sym,
-)
-from leap.common.check import leap_assert
-from leap.soledad.auth import TokenBasedAuth
-
-
-#
-# Exceptions
-#
-
-class DocumentNotEncrypted(Exception):
- """
- Raised for failures in document encryption.
- """
- pass
-
-
-class UnknownEncryptionScheme(Exception):
- """
- Raised when trying to decrypt from unknown encryption schemes.
- """
- pass
-
-
-class UnknownMacMethod(Exception):
- """
- Raised when trying to authenticate document's content with unknown MAC
- mehtod.
- """
- pass
-
-
-class WrongMac(Exception):
- """
- Raised when failing to authenticate document's contents based on MAC.
- """
-
-
-#
-# Encryption schemes used for encryption.
-#
-
-class EncryptionSchemes(object):
- """
- Representation of encryption schemes used to encrypt documents.
- """
-
- NONE = 'none'
- SYMKEY = 'symkey'
- PUBKEY = 'pubkey'
-
-
-class MacMethods(object):
- """
- Representation of MAC methods used to authenticate document's contents.
- """
-
- HMAC = 'hmac'
-
-
-#
-# Crypto utilities for a LeapDocument.
-#
-
-ENC_JSON_KEY = '_enc_json'
-ENC_SCHEME_KEY = '_enc_scheme'
-ENC_METHOD_KEY = '_enc_method'
-ENC_IV_KEY = '_enc_iv'
-MAC_KEY = '_mac'
-MAC_METHOD_KEY = '_mac_method'
-
-
-def mac_doc(crypto, doc_id, doc_rev, ciphertext, mac_method):
- """
- Calculate a MAC for C{doc} using C{ciphertext}.
-
- Current MAC method used is HMAC, with the following parameters:
-
- * key: sha256(storage_secret, doc_id)
- * msg: doc_id + doc_rev + ciphertext
- * digestmod: sha256
-
- @param crypto: A SoledadCryto instance used to perform the encryption.
- @type crypto: leap.soledad.crypto.SoledadCrypto
- @param doc_id: The id of the document.
- @type doc_id: str
- @param doc_rev: The revision of the document.
- @type doc_rev: str
- @param ciphertext: The content of the document.
- @type ciphertext: str
- @param mac_method: The MAC method to use.
- @type mac_method: str
-
- @return: The calculated MAC.
- @rtype: str
- """
- if mac_method == MacMethods.HMAC:
- return hmac.new(
- crypto.doc_mac_key(doc_id),
- str(doc_id) + str(doc_rev) + ciphertext,
- hashlib.sha256).digest()
- # raise if we do not know how to handle this MAC method
- raise UnknownMacMethod('Unknown MAC method: %s.' % mac_method)
-
-
-def encrypt_doc(crypto, doc):
- """
- Encrypt C{doc}'s content.
-
- Encrypt doc's contents using AES-256 CTR mode and return a valid JSON
- string representing the following:
-
- {
- ENC_JSON_KEY: '<encrypted doc JSON string>',
- ENC_SCHEME_KEY: 'symkey',
- ENC_METHOD_KEY: EncryptionMethods.AES_256_CTR,
- ENC_IV_KEY: '<the initial value used to encrypt>',
- MAC_KEY: '<mac>'
- MAC_METHOD_KEY: 'hmac'
- }
-
- @param crypto: A SoledadCryto instance used to perform the encryption.
- @type crypto: leap.soledad.crypto.SoledadCrypto
- @param doc: The document with contents to be encrypted.
- @type doc: LeapDocument
-
- @return: The JSON serialization of the dict representing the encrypted
- content.
- @rtype: str
- """
- leap_assert(doc.is_tombstone() is False)
- # encrypt content using AES-256 CTR mode
- iv, ciphertext = encrypt_sym(
- doc.get_json(),
- crypto.doc_passphrase(doc.doc_id),
- method=EncryptionMethods.AES_256_CTR)
- # Return a representation for the encrypted content. In the following, we
- # convert binary data to hexadecimal representation so the JSON
- # serialization does not complain about what it tries to serialize.
- hex_ciphertext = binascii.b2a_hex(ciphertext)
- return json.dumps({
- ENC_JSON_KEY: hex_ciphertext,
- ENC_SCHEME_KEY: EncryptionSchemes.SYMKEY,
- ENC_METHOD_KEY: EncryptionMethods.AES_256_CTR,
- ENC_IV_KEY: iv,
- MAC_KEY: binascii.b2a_hex(mac_doc( # store the mac as hex.
- crypto, doc.doc_id, doc.rev,
- ciphertext,
- MacMethods.HMAC)),
- MAC_METHOD_KEY: MacMethods.HMAC,
- })
-
-
-def decrypt_doc(crypto, doc):
- """
- Decrypt C{doc}'s content.
-
- Return the JSON string representation of the document's decrypted content.
-
- The content of the document should have the following structure:
-
- {
- ENC_JSON_KEY: '<enc_blob>',
- ENC_SCHEME_KEY: '<enc_scheme>',
- ENC_METHOD_KEY: '<enc_method>',
- ENC_IV_KEY: '<initial value used to encrypt>', # (optional)
- MAC_KEY: '<mac>'
- MAC_METHOD_KEY: 'hmac'
- }
-
- C{enc_blob} is the encryption of the JSON serialization of the document's
- content. For now Soledad just deals with documents whose C{enc_scheme} is
- EncryptionSchemes.SYMKEY and C{enc_method} is
- EncryptionMethods.AES_256_CTR.
-
- @param crypto: A SoledadCryto instance to perform the encryption.
- @type crypto: leap.soledad.crypto.SoledadCrypto
- @param doc: The document to be decrypted.
- @type doc: LeapDocument
-
- @return: The JSON serialization of the decrypted content.
- @rtype: str
- """
- leap_assert(doc.is_tombstone() is False)
- leap_assert(ENC_JSON_KEY in doc.content)
- leap_assert(ENC_SCHEME_KEY in doc.content)
- leap_assert(ENC_METHOD_KEY in doc.content)
- leap_assert(MAC_KEY in doc.content)
- leap_assert(MAC_METHOD_KEY in doc.content)
- # verify MAC
- ciphertext = binascii.a2b_hex( # content is stored as hex.
- doc.content[ENC_JSON_KEY])
- mac = mac_doc(
- crypto, doc.doc_id, doc.rev,
- ciphertext,
- doc.content[MAC_METHOD_KEY])
- if binascii.a2b_hex(doc.content[MAC_KEY]) != mac: # mac is stored as hex.
- raise WrongMac('Could not authenticate document\'s contents.')
- # decrypt doc's content
- enc_scheme = doc.content[ENC_SCHEME_KEY]
- plainjson = None
- if enc_scheme == EncryptionSchemes.SYMKEY:
- enc_method = doc.content[ENC_METHOD_KEY]
- if enc_method == EncryptionMethods.AES_256_CTR:
- leap_assert(ENC_IV_KEY in doc.content)
- plainjson = decrypt_sym(
- ciphertext,
- crypto.doc_passphrase(doc.doc_id),
- method=enc_method,
- iv=doc.content[ENC_IV_KEY])
- else:
- raise UnknownEncryptionMethod(enc_method)
- else:
- raise UnknownEncryptionScheme(enc_scheme)
- return plainjson
-
-
-class LeapDocument(Document):
- """
- Encryptable and syncable document.
-
- LEAP Documents can be flagged as syncable or not, so the replicas
- might not sync every document.
- """
-
- def __init__(self, doc_id=None, rev=None, json='{}', has_conflicts=False,
- syncable=True):
- """
- Container for handling an encryptable document.
-
- @param doc_id: The unique document identifier.
- @type doc_id: str
- @param rev: The revision identifier of the document.
- @type rev: str
- @param json: The JSON string for this document.
- @type json: str
- @param has_conflicts: Boolean indicating if this document has conflicts
- @type has_conflicts: bool
- @param syncable: Should this document be synced with remote replicas?
- @type syncable: bool
- """
- Document.__init__(self, doc_id, rev, json, has_conflicts)
- self._syncable = syncable
-
- def _get_syncable(self):
- """
- Return whether this document is syncable.
-
- @return: Is this document syncable?
- @rtype: bool
- """
- return self._syncable
-
- def _set_syncable(self, syncable=True):
- """
- Determine if this document should be synced with remote replicas.
-
- @param syncable: Should this document be synced with remote replicas?
- @type syncable: bool
- """
- self._syncable = syncable
-
- syncable = property(
- _get_syncable,
- _set_syncable,
- doc="Determine if document should be synced with server."
- )
-
- def _get_rev(self):
- """
- Get the document revision.
-
- Returning the revision as string solves the following exception in
- Twisted web:
- exceptions.TypeError: Can only pass-through bytes on Python 2
-
- @return: The document revision.
- @rtype: str
- """
- if self._rev is None:
- return None
- return str(self._rev)
-
- def _set_rev(self, rev):
- """
- Set document revision.
-
- @param rev: The new document revision.
- @type rev: bytes
- """
- self._rev = rev
-
- rev = property(
- _get_rev,
- _set_rev,
- doc="Wrapper to ensure `doc.rev` is always returned as bytes.")
-
-
-#
-# LeapSyncTarget
-#
-
-class LeapSyncTarget(HTTPSyncTarget, TokenBasedAuth):
- """
- A SyncTarget that encrypts data before sending and decrypts data after
- receiving.
- """
-
- #
- # Token auth methods.
- #
-
- def set_token_credentials(self, uuid, token):
- """
- Store given credentials so we can sign the request later.
-
- @param uuid: The user's uuid.
- @type uuid: str
- @param token: The authentication token.
- @type token: str
- """
- TokenBasedAuth.set_token_credentials(self, uuid, token)
-
- def _sign_request(self, method, url_query, params):
- """
- Return an authorization header to be included in the HTTP request.
-
- @param method: The HTTP method.
- @type method: str
- @param url_query: The URL query string.
- @type url_query: str
- @param params: A list with encoded query parameters.
- @type param: list
-
- @return: The Authorization header.
- @rtype: list of tuple
- """
- return TokenBasedAuth._sign_request(self, method, url_query, params)
-
- #
- # Modified HTTPSyncTarget methods.
- #
-
- @staticmethod
- def connect(url, crypto=None):
- return LeapSyncTarget(url, crypto=crypto)
-
- def __init__(self, url, creds=None, crypto=None):
- """
- Initialize the LeapSyncTarget.
-
- @param url: The url of the target replica to sync with.
- @type url: str
- @param creds: optional dictionary giving credentials.
- to authorize the operation with the server.
- @type creds: dict
- @param soledad: An instance of Soledad so we can encrypt/decrypt
- document contents when syncing.
- @type soledad: soledad.Soledad
- """
- HTTPSyncTarget.__init__(self, url, creds)
- self._crypto = crypto
-
- def _parse_sync_stream(self, data, return_doc_cb, ensure_callback=None):
- """
- Parse incoming synchronization stream and insert documents in the
- local database.
-
- If an incoming document's encryption scheme is equal to
- EncryptionSchemes.SYMKEY, then this method will decrypt it with
- Soledad's symmetric key.
-
- @param data: The body of the HTTP response.
- @type data: str
- @param return_doc_cb: A callback to insert docs from target.
- @type return_doc_cb: function
- @param ensure_callback: A callback to ensure we have the correct
- target_replica_uid, if it was just created.
- @type ensure_callback: function
-
- @raise BrokenSyncStream: If C{data} is malformed.
-
- @return: A dictionary representing the first line of the response got
- from remote replica.
- @rtype: list of str
- """
- parts = data.splitlines() # one at a time
- if not parts or parts[0] != '[':
- raise BrokenSyncStream
- data = parts[1:-1]
- comma = False
- if data:
- line, comma = utils.check_and_strip_comma(data[0])
- res = json.loads(line)
- if ensure_callback and 'replica_uid' in res:
- ensure_callback(res['replica_uid'])
- for entry in data[1:]:
- if not comma: # missing in between comma
- raise BrokenSyncStream
- line, comma = utils.check_and_strip_comma(entry)
- entry = json.loads(line)
- #-------------------------------------------------------------
- # symmetric decryption of document's contents
- #-------------------------------------------------------------
- # if arriving content was symmetrically encrypted, we decrypt
- # it.
- doc = LeapDocument(entry['id'], entry['rev'], entry['content'])
- if doc.content and ENC_SCHEME_KEY in doc.content:
- if doc.content[ENC_SCHEME_KEY] == \
- EncryptionSchemes.SYMKEY:
- doc.set_json(decrypt_doc(self._crypto, doc))
- #-------------------------------------------------------------
- # end of symmetric decryption
- #-------------------------------------------------------------
- return_doc_cb(doc, entry['gen'], entry['trans_id'])
- if parts[-1] != ']':
- try:
- partdic = json.loads(parts[-1])
- except ValueError:
- pass
- else:
- if isinstance(partdic, dict):
- self._error(partdic)
- raise BrokenSyncStream
- if not data or comma: # no entries or bad extra comma
- raise BrokenSyncStream
- return res
-
- def sync_exchange(self, docs_by_generations, source_replica_uid,
- last_known_generation, last_known_trans_id,
- return_doc_cb, ensure_callback=None):
- """
- Find out which documents the remote database does not know about,
- encrypt and send them.
-
- This does the same as the parent's method but encrypts content before
- syncing.
-
- @param docs_by_generations: A list of (doc_id, generation, trans_id)
- of local documents that were changed since the last local
- generation the remote replica knows about.
- @type docs_by_generations: list of tuples
- @param source_replica_uid: The uid of the source replica.
- @type source_replica_uid: str
- @param last_known_generation: Target's last known generation.
- @type last_known_generation: int
- @param last_known_trans_id: Target's last known transaction id.
- @type last_known_trans_id: str
- @param return_doc_cb: A callback for inserting received documents from
- target.
- @type return_doc_cb: function
- @param ensure_callback: A callback that ensures we know the target
- replica uid if the target replica was just created.
- @type ensure_callback: function
-
- @return: The new generation and transaction id of the target replica.
- @rtype: tuple
- """
- self._ensure_connection()
- if self._trace_hook: # for tests
- self._trace_hook('sync_exchange')
- url = '%s/sync-from/%s' % (self._url.path, source_replica_uid)
- self._conn.putrequest('POST', url)
- self._conn.putheader('content-type', 'application/x-u1db-sync-stream')
- for header_name, header_value in self._sign_request('POST', url, {}):
- self._conn.putheader(header_name, header_value)
- entries = ['[']
- size = 1
-
- def prepare(**dic):
- entry = comma + '\r\n' + json.dumps(dic)
- entries.append(entry)
- return len(entry)
-
- comma = ''
- size += prepare(
- last_known_generation=last_known_generation,
- last_known_trans_id=last_known_trans_id,
- ensure=ensure_callback is not None)
- comma = ','
- for doc, gen, trans_id in docs_by_generations:
- # skip non-syncable docs
- if isinstance(doc, LeapDocument) and not doc.syncable:
- continue
- #-------------------------------------------------------------
- # symmetric encryption of document's contents
- #-------------------------------------------------------------
- doc_json = doc.get_json()
- if not doc.is_tombstone():
- doc_json = encrypt_doc(self._crypto, doc)
- #-------------------------------------------------------------
- # end of symmetric encryption
- #-------------------------------------------------------------
- size += prepare(id=doc.doc_id, rev=doc.rev,
- content=doc_json,
- gen=gen, trans_id=trans_id)
- entries.append('\r\n]')
- size += len(entries[-1])
- self._conn.putheader('content-length', str(size))
- self._conn.endheaders()
- for entry in entries:
- self._conn.send(entry)
- entries = None
- data, _ = self._response()
- res = self._parse_sync_stream(data, return_doc_cb, ensure_callback)
- data = None
- return res['new_generation'], res['new_transaction_id']
diff --git a/src/leap/soledad/backends/objectstore.py b/src/leap/soledad/backends/objectstore.py
deleted file mode 100644
index 8afac3ec..00000000
--- a/src/leap/soledad/backends/objectstore.py
+++ /dev/null
@@ -1,296 +0,0 @@
-# -*- coding: utf-8 -*-
-# objectstore.py
-# Copyright (C) 2013 LEAP
-#
-# This program is free software: you can redistribute it and/or modify
-# it under the terms of the GNU General Public License as published by
-# the Free Software Foundation, either version 3 of the License, or
-# (at your option) any later version.
-#
-# This program is distributed in the hope that it will be useful,
-# but WITHOUT ANY WARRANTY; without even the implied warranty of
-# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
-# GNU General Public License for more details.
-#
-# You should have received a copy of the GNU General Public License
-# along with this program. If not, see <http://www.gnu.org/licenses/>.
-
-
-"""
-Abstract U1DB backend to handle storage using object stores (like CouchDB, for
-example).
-
-Right now, this is only used by CouchDatabase backend, but can also be
-extended to implement OpenStack or Amazon S3 storage, for example.
-
-See U1DB documentation for more information on how to use databases.
-"""
-
-from u1db.backends.inmemory import (
- InMemoryDatabase,
- InMemorySyncTarget,
-)
-from u1db import errors
-
-
-class ObjectStoreDatabase(InMemoryDatabase):
- """
- A backend for storing u1db data in an object store.
- """
-
- @classmethod
- def open_database(cls, url, create, document_factory=None):
- """
- Open a U1DB database using an object store as backend.
-
- @param url: the url of the database replica
- @type url: str
- @param create: should the replica be created if it does not exist?
- @type create: bool
- @param document_factory: A function that will be called with the same
- parameters as Document.__init__.
- @type document_factory: callable
-
- @return: the database instance
- @rtype: CouchDatabase
- """
- raise NotImplementedError(cls.open_database)
-
- def __init__(self, replica_uid=None, document_factory=None):
- """
- Initialize the object store database.
-
- @param replica_uid: an optional unique replica identifier
- @type replica_uid: str
- @param document_factory: A function that will be called with the same
- parameters as Document.__init__.
- @type document_factory: callable
- """
- InMemoryDatabase.__init__(
- self,
- replica_uid,
- document_factory=document_factory)
- # sync data in memory with data in object store
- if not self._get_doc(self.U1DB_DATA_DOC_ID):
- self._init_u1db_data()
- self._fetch_u1db_data()
-
- #-------------------------------------------------------------------------
- # methods from Database
- #-------------------------------------------------------------------------
-
- def _set_replica_uid(self, replica_uid):
- """
- Force the replica_uid to be set.
-
- @param replica_uid: The uid of the replica.
- @type replica_uid: str
- """
- InMemoryDatabase._set_replica_uid(self, replica_uid)
- self._store_u1db_data()
-
- def _put_doc(self, doc):
- """
- Update a document.
-
- This is called everytime we just want to do a raw put on the db (i.e.
- without index updates, document constraint checks, and conflict
- checks).
-
- @param doc: The document to update.
- @type doc: u1db.Document
-
- @return: The new revision identifier for the document.
- @rtype: str
- """
- raise NotImplementedError(self._put_doc)
-
- def _get_doc(self, doc_id):
- """
- Get just the document content, without fancy handling.
-
- @param doc_id: The unique document identifier
- @type doc_id: str
- @param include_deleted: If set to True, deleted documents will be
- returned with empty content. Otherwise asking for a deleted
- document will return None.
- @type include_deleted: bool
-
- @return: a Document object.
- @type: u1db.Document
- """
- raise NotImplementedError(self._get_doc)
-
- def get_all_docs(self, include_deleted=False):
- """
- Get the JSON content for all documents in the database.
-
- @param include_deleted: If set to True, deleted documents will be
- returned with empty content. Otherwise deleted documents will not
- be included in the results.
- @type include_deleted: bool
-
- @return: (generation, [Document])
- The current generation of the database, followed by a list of all
- the documents in the database.
- @rtype: tuple
- """
- raise NotImplementedError(self.get_all_docs)
-
- def delete_doc(self, doc):
- """
- Mark a document as deleted.
-
- @param doc: The document to mark as deleted.
- @type doc: u1db.Document
-
- @return: The new revision id of the document.
- @type: str
- """
- old_doc = self._get_doc(doc.doc_id, check_for_conflicts=True)
- if old_doc is None:
- raise errors.DocumentDoesNotExist
- if old_doc.rev != doc.rev:
- raise errors.RevisionConflict()
- if old_doc.is_tombstone():
- raise errors.DocumentAlreadyDeleted
- if old_doc.has_conflicts:
- raise errors.ConflictedDoc()
- new_rev = self._allocate_doc_rev(doc.rev)
- doc.rev = new_rev
- doc.make_tombstone()
- self._put_and_update_indexes(old_doc, doc)
- return new_rev
-
- # index-related methods
-
- def create_index(self, index_name, *index_expressions):
- """
- Create a named index, which can then be queried for future lookups.
-
- See U1DB documentation for more information.
-
- @param index_name: A unique name which can be used as a key prefix.
- @param index_expressions: Index expressions defining the index
- information.
- """
- raise NotImplementedError(self.create_index)
-
- def delete_index(self, index_name):
- """
- Remove a named index.
-
- Here we just guarantee that the new info will be stored in the backend
- db after update.
-
- @param index_name: The name of the index we are removing.
- @type index_name: str
- """
- InMemoryDatabase.delete_index(self, index_name)
- self._store_u1db_data()
-
- def _replace_conflicts(self, doc, conflicts):
- """
- Set new conflicts for a document.
-
- Here we just guarantee that the new info will be stored in the backend
- db after update.
-
- @param doc: The document with a new set of conflicts.
- @param conflicts: The new set of conflicts.
- @type conflicts: list
- """
- InMemoryDatabase._replace_conflicts(self, doc, conflicts)
- self._store_u1db_data()
-
- def _do_set_replica_gen_and_trans_id(self, other_replica_uid,
- other_generation,
- other_transaction_id):
- """
- Set the last-known generation and transaction id for the other
- database replica.
-
- Here we just guarantee that the new info will be stored in the backend
- db after update.
-
- @param other_replica_uid: The U1DB identifier for the other replica.
- @type other_replica_uid: str
- @param other_generation: The generation number for the other replica.
- @type other_generation: int
- @param other_transaction_id: The transaction id associated with the
- generation.
- @type other_transaction_id: str
- """
- InMemoryDatabase._do_set_replica_gen_and_trans_id(
- self,
- other_replica_uid,
- other_generation,
- other_transaction_id)
- self._store_u1db_data()
-
- #-------------------------------------------------------------------------
- # implemented methods from CommonBackend
- #-------------------------------------------------------------------------
-
- def _put_and_update_indexes(self, old_doc, doc):
- """
- Update a document and all indexes related to it.
-
- @param old_doc: The old version of the document.
- @type old_doc: u1db.Document
- @param doc: The new version of the document.
- @type doc: u1db.Document
- """
- for index in self._indexes.itervalues():
- if old_doc is not None and not old_doc.is_tombstone():
- index.remove_json(old_doc.doc_id, old_doc.get_json())
- if not doc.is_tombstone():
- index.add_json(doc.doc_id, doc.get_json())
- trans_id = self._allocate_transaction_id()
- self._put_doc(doc)
- self._transaction_log.append((doc.doc_id, trans_id))
- self._store_u1db_data()
-
- #-------------------------------------------------------------------------
- # methods specific for object stores
- #-------------------------------------------------------------------------
-
- U1DB_DATA_DOC_ID = 'u1db_data'
-
- def _fetch_u1db_data(self):
- """
- Fetch u1db configuration data from backend storage.
-
- See C{_init_u1db_data} documentation.
- """
- NotImplementedError(self._fetch_u1db_data)
-
- def _store_u1db_data(self):
- """
- Store u1db configuration data on backend storage.
-
- See C{_init_u1db_data} documentation.
- """
- NotImplementedError(self._store_u1db_data)
-
- def _init_u1db_data(self):
- """
- Initialize u1db configuration data on backend storage.
-
- A U1DB database needs to keep track of all database transactions,
- document conflicts, the generation of other replicas it has seen,
- indexes created by users and so on.
-
- In this implementation, all this information is stored in a special
- document stored in the couch db with id equals to
- CouchDatabse.U1DB_DATA_DOC_ID.
-
- This method initializes the document that will hold such information.
- """
- NotImplementedError(self._init_u1db_data)
-
-
-class ObjectStoreSyncTarget(InMemorySyncTarget):
- """
- Functionality for using an ObjectStore as a synchronization target.
- """
diff --git a/src/leap/soledad/backends/sqlcipher.py b/src/leap/soledad/backends/sqlcipher.py
deleted file mode 100644
index 5825b844..00000000
--- a/src/leap/soledad/backends/sqlcipher.py
+++ /dev/null
@@ -1,653 +0,0 @@
-# -*- coding: utf-8 -*-
-# sqlcipher.py
-# Copyright (C) 2013 LEAP
-#
-# This program is free software: you can redistribute it and/or modify
-# it under the terms of the GNU General Public License as published by
-# the Free Software Foundation, either version 3 of the License, or
-# (at your option) any later version.
-#
-# This program is distributed in the hope that it will be useful,
-# but WITHOUT ANY WARRANTY; without even the implied warranty of
-# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
-# GNU General Public License for more details.
-#
-# You should have received a copy of the GNU General Public License
-# along with this program. If not, see <http://www.gnu.org/licenses/>.
-
-
-"""
-A U1DB backend that uses SQLCipher as its persistence layer.
-
-The SQLCipher API (http://sqlcipher.net/sqlcipher-api/) is fully implemented,
-with the exception of the following statements:
-
- * PRAGMA cipher_use_hmac
- * PRAGMA cipher_default_use_mac
-
-SQLCipher 2.0 introduced a per-page HMAC to validate that the page data has
-not be tampered with. By default, when creating or opening a database using
-SQLCipher 2, SQLCipher will attempt to use an HMAC check. This change in
-database format means that SQLCipher 2 can't operate on version 1.1.x
-databases by default. Thus, in order to provide backward compatibility with
-SQLCipher 1.1.x, PRAGMA cipher_use_hmac can be used to disable the HMAC
-functionality on specific databases.
-
-In some very specific cases, it is not possible to call PRAGMA cipher_use_hmac
-as one of the first operations on a database. An example of this is when
-trying to ATTACH a 1.1.x database to the main database. In these cases PRAGMA
-cipher_default_use_hmac can be used to globally alter the default use of HMAC
-when opening a database.
-
-So, as the statements above were introduced for backwards compatibility with
-SLCipher 1.1 databases, we do not implement them as all SQLCipher databases
-handled by Soledad should be created by SQLCipher >= 2.0.
-"""
-
-import os
-import time
-import string
-
-
-from u1db.backends import sqlite_backend
-from pysqlcipher import dbapi2
-from u1db import (
- errors,
-)
-from leap.soledad.backends.leap_backend import LeapDocument
-
-
-# Monkey-patch u1db.backends.sqlite_backend with pysqlcipher.dbapi2
-sqlite_backend.dbapi2 = dbapi2
-
-
-def open(path, password, create=True, document_factory=None, crypto=None,
- raw_key=False, cipher='aes-256-cbc', kdf_iter=4000,
- cipher_page_size=1024):
- """Open a database at the given location.
-
- Will raise u1db.errors.DatabaseDoesNotExist if create=False and the
- database does not already exist.
-
- @param path: The filesystem path for the database to open.
- @param type: str
- @param create: True/False, should the database be created if it doesn't
- already exist?
- @param type: bool
- @param document_factory: A function that will be called with the same
- parameters as Document.__init__.
- @type document_factory: callable
- @param crypto: An instance of SoledadCrypto so we can encrypt/decrypt
- document contents when syncing.
- @type crypto: soledad.crypto.SoledadCrypto
- @param raw_key: Whether C{password} is a raw 64-char hex string or a
- passphrase that should be hashed to obtain the encyrption key.
- @type raw_key: bool
- @param cipher: The cipher and mode to use.
- @type cipher: str
- @param kdf_iter: The number of iterations to use.
- @type kdf_iter: int
- @param cipher_page_size: The page size.
- @type cipher_page_size: int
-
- @return: An instance of Database.
- @rtype SQLCipherDatabase
- """
- return SQLCipherDatabase.open_database(
- path, password, create=create, document_factory=document_factory,
- crypto=crypto, raw_key=raw_key, cipher=cipher, kdf_iter=kdf_iter,
- cipher_page_size=cipher_page_size)
-
-
-#
-# Exceptions
-#
-
-class DatabaseIsNotEncrypted(Exception):
- """
- Exception raised when trying to open non-encrypted databases.
- """
- pass
-
-
-class NotAnHexString(Exception):
- """
- Raised when trying to (raw) key the database with a non-hex string.
- """
- pass
-
-
-#
-# The SQLCipher database
-#
-
-class SQLCipherDatabase(sqlite_backend.SQLitePartialExpandDatabase):
- """A U1DB implementation that uses SQLCipher as its persistence layer."""
-
- _index_storage_value = 'expand referenced encrypted'
-
- def __init__(self, sqlcipher_file, password, document_factory=None,
- crypto=None, raw_key=False, cipher='aes-256-cbc',
- kdf_iter=4000, cipher_page_size=1024):
- """
- Create a new sqlcipher file.
-
- @param sqlcipher_file: The path for the SQLCipher file.
- @type sqlcipher_file: str
- @param password: The password that protects the SQLCipher db.
- @type password: str
- @param document_factory: A function that will be called with the same
- parameters as Document.__init__.
- @type document_factory: callable
- @param crypto: An instance of SoledadCrypto so we can encrypt/decrypt
- document contents when syncing.
- @type crypto: soledad.crypto.SoledadCrypto
- @param raw_key: Whether C{password} is a raw 64-char hex string or a
- passphrase that should be hashed to obtain the encyrption key.
- @type raw_key: bool
- @param cipher: The cipher and mode to use.
- @type cipher: str
- @param kdf_iter: The number of iterations to use.
- @type kdf_iter: int
- @param cipher_page_size: The page size.
- @type cipher_page_size: int
- """
- # ensure the db is encrypted if the file already exists
- if os.path.exists(sqlcipher_file):
- self.assert_db_is_encrypted(
- sqlcipher_file, password, raw_key, cipher, kdf_iter,
- cipher_page_size)
- # connect to the database
- self._db_handle = dbapi2.connect(sqlcipher_file)
- # set SQLCipher cryptographic parameters
- self._set_crypto_pragmas(
- self._db_handle, password, raw_key, cipher, kdf_iter,
- cipher_page_size)
- self._real_replica_uid = None
- self._ensure_schema()
- self._crypto = crypto
-
- def factory(doc_id=None, rev=None, json='{}', has_conflicts=False,
- syncable=True):
- return LeapDocument(doc_id=doc_id, rev=rev, json=json,
- has_conflicts=has_conflicts,
- syncable=syncable)
- self.set_document_factory(factory)
-
- @classmethod
- def _open_database(cls, sqlcipher_file, password, document_factory=None,
- crypto=None, raw_key=False, cipher='aes-256-cbc',
- kdf_iter=4000, cipher_page_size=1024):
- """
- Open a SQLCipher database.
-
- @param sqlcipher_file: The path for the SQLCipher file.
- @type sqlcipher_file: str
- @param password: The password that protects the SQLCipher db.
- @type password: str
- @param document_factory: A function that will be called with the same
- parameters as Document.__init__.
- @type document_factory: callable
- @param crypto: An instance of SoledadCrypto so we can encrypt/decrypt
- document contents when syncing.
- @type crypto: soledad.crypto.SoledadCrypto
- @param raw_key: Whether C{password} is a raw 64-char hex string or a
- passphrase that should be hashed to obtain the encyrption key.
- @type raw_key: bool
- @param cipher: The cipher and mode to use.
- @type cipher: str
- @param kdf_iter: The number of iterations to use.
- @type kdf_iter: int
- @param cipher_page_size: The page size.
- @type cipher_page_size: int
-
- @return: The database object.
- @rtype: SQLCipherDatabase
- """
- if not os.path.isfile(sqlcipher_file):
- raise errors.DatabaseDoesNotExist()
- tries = 2
- while True:
- # Note: There seems to be a bug in sqlite 3.5.9 (with python2.6)
- # where without re-opening the database on Windows, it
- # doesn't see the transaction that was just committed
- db_handle = dbapi2.connect(sqlcipher_file)
- # set cryptographic params
- cls._set_crypto_pragmas(
- db_handle, password, raw_key, cipher, kdf_iter,
- cipher_page_size)
- c = db_handle.cursor()
- v, err = cls._which_index_storage(c)
- db_handle.close()
- if v is not None:
- break
- # possibly another process is initializing it, wait for it to be
- # done
- if tries == 0:
- raise err # go for the richest error?
- tries -= 1
- time.sleep(cls.WAIT_FOR_PARALLEL_INIT_HALF_INTERVAL)
- return SQLCipherDatabase._sqlite_registry[v](
- sqlcipher_file, password, document_factory=document_factory,
- crypto=crypto, raw_key=raw_key, cipher=cipher, kdf_iter=kdf_iter,
- cipher_page_size=cipher_page_size)
-
- @classmethod
- def open_database(cls, sqlcipher_file, password, create, backend_cls=None,
- document_factory=None, crypto=None, raw_key=False,
- cipher='aes-256-cbc', kdf_iter=4000,
- cipher_page_size=1024):
- """
- Open a SQLCipher database.
-
- @param sqlcipher_file: The path for the SQLCipher file.
- @type sqlcipher_file: str
- @param password: The password that protects the SQLCipher db.
- @type password: str
- @param create: Should the datbase be created if it does not already
- exist?
- @type: bool
- @param backend_cls: A class to use as backend.
- @type backend_cls: type
- @param document_factory: A function that will be called with the same
- parameters as Document.__init__.
- @type document_factory: callable
- @param crypto: An instance of SoledadCrypto so we can encrypt/decrypt
- document contents when syncing.
- @type crypto: soledad.crypto.SoledadCrypto
- @param raw_key: Whether C{password} is a raw 64-char hex string or a
- passphrase that should be hashed to obtain the encyrption key.
- @type raw_key: bool
- @param cipher: The cipher and mode to use.
- @type cipher: str
- @param kdf_iter: The number of iterations to use.
- @type kdf_iter: int
- @param cipher_page_size: The page size.
- @type cipher_page_size: int
-
- @return: The database object.
- @rtype: SQLCipherDatabase
- """
- try:
- return cls._open_database(
- sqlcipher_file, password, document_factory=document_factory,
- crypto=crypto, raw_key=raw_key, cipher=cipher,
- kdf_iter=kdf_iter, cipher_page_size=cipher_page_size)
- except errors.DatabaseDoesNotExist:
- if not create:
- raise
- # TODO: remove backend class from here.
- if backend_cls is None:
- # default is SQLCipherPartialExpandDatabase
- backend_cls = SQLCipherDatabase
- return backend_cls(
- sqlcipher_file, password, document_factory=document_factory,
- crypto=crypto, raw_key=raw_key, cipher=cipher,
- kdf_iter=kdf_iter, cipher_page_size=cipher_page_size)
-
- def sync(self, url, creds=None, autocreate=True):
- """
- Synchronize documents with remote replica exposed at url.
-
- @param url: The url of the target replica to sync with.
- @type url: str
- @param creds: optional dictionary giving credentials.
- to authorize the operation with the server.
- @type creds: dict
- @param autocreate: Ask the target to create the db if non-existent.
- @type autocreate: bool
-
- @return: The local generation before the synchronisation was performed.
- @rtype: int
- """
- from u1db.sync import Synchronizer
- from leap.soledad.backends.leap_backend import LeapSyncTarget
- return Synchronizer(
- self,
- LeapSyncTarget(url,
- creds=creds,
- crypto=self._crypto)).sync(autocreate=autocreate)
-
- def _extra_schema_init(self, c):
- """
- Add any extra fields, etc to the basic table definitions.
-
- @param c: The cursor for querying the database.
- @type c: dbapi2.cursor
- """
- c.execute(
- 'ALTER TABLE document '
- 'ADD COLUMN syncable BOOL NOT NULL DEFAULT TRUE')
-
- def _put_and_update_indexes(self, old_doc, doc):
- """
- Update a document and all indexes related to it.
-
- @param old_doc: The old version of the document.
- @type old_doc: u1db.Document
- @param doc: The new version of the document.
- @type doc: u1db.Document
- """
- sqlite_backend.SQLitePartialExpandDatabase._put_and_update_indexes(
- self, old_doc, doc)
- c = self._db_handle.cursor()
- c.execute('UPDATE document SET syncable=? '
- 'WHERE doc_id=?',
- (doc.syncable, doc.doc_id))
-
- def _get_doc(self, doc_id, check_for_conflicts=False):
- """
- Get just the document content, without fancy handling.
-
- @param doc_id: The unique document identifier
- @type doc_id: str
- @param include_deleted: If set to True, deleted documents will be
- returned with empty content. Otherwise asking for a deleted
- document will return None.
- @type include_deleted: bool
-
- @return: a Document object.
- @type: u1db.Document
- """
- doc = sqlite_backend.SQLitePartialExpandDatabase._get_doc(
- self, doc_id, check_for_conflicts)
- if doc:
- c = self._db_handle.cursor()
- c.execute('SELECT syncable FROM document '
- 'WHERE doc_id=?',
- (doc.doc_id,))
- result = c.fetchone()
- doc.syncable = bool(result[0])
- return doc
-
- #
- # SQLCipher API methods
- #
-
- @classmethod
- def assert_db_is_encrypted(cls, sqlcipher_file, key, raw_key, cipher,
- kdf_iter, cipher_page_size):
- """
- Assert that C{sqlcipher_file} contains an encrypted database.
-
- When opening an existing database, PRAGMA key will not immediately
- throw an error if the key provided is incorrect. To test that the
- database can be successfully opened with the provided key, it is
- necessary to perform some operation on the database (i.e. read from
- it) and confirm it is success.
-
- The easiest way to do this is select off the sqlite_master table,
- which will attempt to read the first page of the database and will
- parse the schema.
-
- @param sqlcipher_file: The path for the SQLCipher file.
- @type sqlcipher_file: str
- @param key: The key that protects the SQLCipher db.
- @type key: str
- @param raw_key: Whether C{key} is a raw 64-char hex string or a
- passphrase that should be hashed to obtain the encyrption key.
- @type raw_key: bool
- @param cipher: The cipher and mode to use.
- @type cipher: str
- @param kdf_iter: The number of iterations to use.
- @type kdf_iter: int
- @param cipher_page_size: The page size.
- @type cipher_page_size: int
- """
- try:
- # try to open an encrypted database with the regular u1db
- # backend should raise a DatabaseError exception.
- sqlite_backend.SQLitePartialExpandDatabase(sqlcipher_file)
- raise DatabaseIsNotEncrypted()
- except dbapi2.DatabaseError:
- # assert that we can access it using SQLCipher with the given
- # key
- db_handle = dbapi2.connect(sqlcipher_file)
- cls._set_crypto_pragmas(
- db_handle, key, raw_key, cipher, kdf_iter, cipher_page_size)
- db_handle.cursor().execute('SELECT count(*) FROM sqlite_master')
-
- @classmethod
- def _set_crypto_pragmas(cls, db_handle, key, raw_key, cipher, kdf_iter,
- cipher_page_size):
- """
- Set cryptographic params (key, cipher, KDF number of iterations and
- cipher page size).
- """
- cls._pragma_key(db_handle, key, raw_key)
- cls._pragma_cipher(db_handle, cipher)
- cls._pragma_kdf_iter(db_handle, kdf_iter)
- cls._pragma_cipher_page_size(db_handle, cipher_page_size)
-
- @classmethod
- def _pragma_key(cls, db_handle, key, raw_key):
- """
- Set the C{key} for use with the database.
-
- The process of creating a new, encrypted database is called 'keying'
- the database. SQLCipher uses just-in-time key derivation at the point
- it is first needed for an operation. This means that the key (and any
- options) must be set before the first operation on the database. As
- soon as the database is touched (e.g. SELECT, CREATE TABLE, UPDATE,
- etc.) and pages need to be read or written, the key is prepared for
- use.
-
- Implementation Notes:
-
- * PRAGMA key should generally be called as the first operation on a
- database.
-
- @param key: The key for use with the database.
- @type key: str
- @param raw_key: Whether C{key} is a raw 64-char hex string or a
- passphrase that should be hashed to obtain the encyrption key.
- @type raw_key: bool
- """
- if raw_key:
- cls._pragma_key_raw(db_handle, key)
- else:
- cls._pragma_key_passphrase(db_handle, key)
-
- @classmethod
- def _pragma_key_passphrase(cls, db_handle, passphrase):
- """
- Set a passphrase for encryption key derivation.
-
- The key itself can be a passphrase, which is converted to a key using
- PBKDF2 key derivation. The result is used as the encryption key for
- the database. By using this method, there is no way to alter the KDF;
- if you want to do so you should use a raw key instead and derive the
- key using your own KDF.
-
- @param db_handle: A handle to the SQLCipher database.
- @type db_handle: pysqlcipher.Connection
- @param passphrase: The passphrase used to derive the encryption key.
- @type passphrase: str
- """
- db_handle.cursor().execute("PRAGMA key = '%s'" % passphrase)
-
- @classmethod
- def _pragma_key_raw(cls, db_handle, key):
- """
- Set a raw hexadecimal encryption key.
-
- It is possible to specify an exact byte sequence using a blob literal.
- With this method, it is the calling application's responsibility to
- ensure that the data provided is a 64 character hex string, which will
- be converted directly to 32 bytes (256 bits) of key data.
-
- @param db_handle: A handle to the SQLCipher database.
- @type db_handle: pysqlcipher.Connection
- @param key: A 64 character hex string.
- @type key: str
- """
- if not all(c in string.hexdigits for c in key):
- raise NotAnHexString(key)
- db_handle.cursor().execute('PRAGMA key = "x\'%s"' % passphrase)
-
- @classmethod
- def _pragma_cipher(cls, db_handle, cipher='aes-256-cbc'):
- """
- Set the cipher and mode to use for symmetric encryption.
-
- SQLCipher uses aes-256-cbc as the default cipher and mode of
- operation. It is possible to change this, though not generally
- recommended, using PRAGMA cipher.
-
- SQLCipher makes direct use of libssl, so all cipher options available
- to libssl are also available for use with SQLCipher. See `man enc` for
- OpenSSL's supported ciphers.
-
- Implementation Notes:
-
- * PRAGMA cipher must be called after PRAGMA key and before the first
- actual database operation or it will have no effect.
-
- * If a non-default value is used PRAGMA cipher to create a database,
- it must also be called every time that database is opened.
-
- * SQLCipher does not implement its own encryption. Instead it uses the
- widely available and peer-reviewed OpenSSL libcrypto for all
- cryptographic functions.
-
- @param db_handle: A handle to the SQLCipher database.
- @type db_handle: pysqlcipher.Connection
- @param cipher: The cipher and mode to use.
- @type cipher: str
- """
- db_handle.cursor().execute("PRAGMA cipher = '%s'" % cipher)
-
- @classmethod
- def _pragma_kdf_iter(cls, db_handle, kdf_iter=4000):
- """
- Set the number of iterations for the key derivation function.
-
- SQLCipher uses PBKDF2 key derivation to strengthen the key and make it
- resistent to brute force and dictionary attacks. The default
- configuration uses 4000 PBKDF2 iterations (effectively 16,000 SHA1
- operations). PRAGMA kdf_iter can be used to increase or decrease the
- number of iterations used.
-
- Implementation Notes:
-
- * PRAGMA kdf_iter must be called after PRAGMA key and before the first
- actual database operation or it will have no effect.
-
- * If a non-default value is used PRAGMA kdf_iter to create a database,
- it must also be called every time that database is opened.
-
- * It is not recommended to reduce the number of iterations if a
- passphrase is in use.
-
- @param db_handle: A handle to the SQLCipher database.
- @type db_handle: pysqlcipher.Connection
- @param kdf_iter: The number of iterations to use.
- @type kdf_iter: int
- """
- db_handle.cursor().execute("PRAGMA kdf_iter = '%d'" % kdf_iter)
-
- @classmethod
- def _pragma_cipher_page_size(cls, db_handle, cipher_page_size=1024):
- """
- Set the page size of the encrypted database.
-
- SQLCipher 2 introduced the new PRAGMA cipher_page_size that can be
- used to adjust the page size for the encrypted database. The default
- page size is 1024 bytes, but it can be desirable for some applications
- to use a larger page size for increased performance. For instance,
- some recent testing shows that increasing the page size can noticeably
- improve performance (5-30%) for certain queries that manipulate a
- large number of pages (e.g. selects without an index, large inserts in
- a transaction, big deletes).
-
- To adjust the page size, call the pragma immediately after setting the
- key for the first time and each subsequent time that you open the
- database.
-
- Implementation Notes:
-
- * PRAGMA cipher_page_size must be called after PRAGMA key and before
- the first actual database operation or it will have no effect.
-
- * If a non-default value is used PRAGMA cipher_page_size to create a
- database, it must also be called every time that database is opened.
-
- @param db_handle: A handle to the SQLCipher database.
- @type db_handle: pysqlcipher.Connection
- @param cipher_page_size: The page size.
- @type cipher_page_size: int
- """
- db_handle.cursor().execute(
- "PRAGMA cipher_page_size = '%d'" % cipher_page_size)
-
- @classmethod
- def _pragma_rekey(cls, db_handle, new_key, raw_key):
- """
- Change the key of an existing encrypted database.
-
- To change the key on an existing encrypted database, it must first be
- unlocked with the current encryption key. Once the database is
- readable and writeable, PRAGMA rekey can be used to re-encrypt every
- page in the database with a new key.
-
- * PRAGMA rekey must be called after PRAGMA key. It can be called at any
- time once the database is readable.
-
- * PRAGMA rekey can not be used to encrypted a standard SQLite
- database! It is only useful for changing the key on an existing
- database.
-
- * Previous versions of SQLCipher provided a PRAGMA rekey_cipher and
- code>PRAGMA rekey_kdf_iter. These are deprecated and should not be
- used. Instead, use sqlcipher_export().
-
- @param db_handle: A handle to the SQLCipher database.
- @type db_handle: pysqlcipher.Connection
- @param new_key: The new key.
- @type new_key: str
- @param raw_key: Whether C{password} is a raw 64-char hex string or a
- passphrase that should be hashed to obtain the encyrption key.
- @type raw_key: bool
- """
- if raw_key:
- cls._pragma_rekey_raw(db_handle, key)
- else:
- cls._pragma_rekey_passphrase(db_handle, key)
-
- @classmethod
- def _pragma_rekey_passphrase(cls, db_handle, passphrase):
- """
- Change the passphrase for encryption key derivation.
-
- The key itself can be a passphrase, which is converted to a key using
- PBKDF2 key derivation. The result is used as the encryption key for
- the database.
-
- @param db_handle: A handle to the SQLCipher database.
- @type db_handle: pysqlcipher.Connection
- @param passphrase: The passphrase used to derive the encryption key.
- @type passphrase: str
- """
- db_handle.cursor().execute("PRAGMA rekey = '%s'" % passphrase)
-
- @classmethod
- def _pragma_rekey_raw(cls, db_handle, key):
- """
- Change the raw hexadecimal encryption key.
-
- It is possible to specify an exact byte sequence using a blob literal.
- With this method, it is the calling application's responsibility to
- ensure that the data provided is a 64 character hex string, which will
- be converted directly to 32 bytes (256 bits) of key data.
-
- @param db_handle: A handle to the SQLCipher database.
- @type db_handle: pysqlcipher.Connection
- @param key: A 64 character hex string.
- @type key: str
- """
- if not all(c in string.hexdigits for c in key):
- raise NotAnHexString(key)
- db_handle.cursor().execute('PRAGMA rekey = "x\'%s"' % passphrase)
-
-
-sqlite_backend.SQLiteDatabase.register_implementation(SQLCipherDatabase)
diff --git a/src/leap/soledad/crypto.py b/src/leap/soledad/crypto.py
deleted file mode 100644
index e020eee6..00000000
--- a/src/leap/soledad/crypto.py
+++ /dev/null
@@ -1,153 +0,0 @@
-# -*- coding: utf-8 -*-
-# crypto.py
-# Copyright (C) 2013 LEAP
-#
-# This program is free software: you can redistribute it and/or modify
-# it under the terms of the GNU General Public License as published by
-# the Free Software Foundation, either version 3 of the License, or
-# (at your option) any later version.
-#
-# This program is distributed in the hope that it will be useful,
-# but WITHOUT ANY WARRANTY; without even the implied warranty of
-# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
-# GNU General Public License for more details.
-#
-# You should have received a copy of the GNU General Public License
-# along with this program. If not, see <http://www.gnu.org/licenses/>.
-
-
-"""
-Cryptographic utilities for Soledad.
-"""
-
-
-import hmac
-import hashlib
-
-
-from leap.common import crypto
-
-
-class NoSymmetricSecret(Exception):
- """
- Raised when trying to get a hashed passphrase.
- """
-
-
-class SoledadCrypto(object):
- """
- General cryptographic functionality.
- """
-
- MAC_KEY_LENGTH = 64
-
- def __init__(self, soledad):
- """
- Initialize the crypto object.
-
- @param soledad: A Soledad instance for key lookup.
- @type soledad: leap.soledad.Soledad
- """
- self._soledad = soledad
-
- def encrypt_sym(self, data, key,
- method=crypto.EncryptionMethods.AES_256_CTR):
- """
- Encrypt C{data} using a {password}.
-
- Currently, the only encryption method supported is AES-256 CTR mode.
-
- @param data: The data to be encrypted.
- @type data: str
- @param key: The key used to encrypt C{data} (must be 256 bits long).
- @type key: str
- @param method: The encryption method to use.
- @type method: str
-
- @return: A tuple with the initial value and the encrypted data.
- @rtype: (long, str)
- """
- return crypto.encrypt_sym(data, key, method)
-
- def decrypt_sym(self, data, key,
- method=crypto.EncryptionMethods.AES_256_CTR, **kwargs):
- """
- Decrypt data using symmetric secret.
-
- Currently, the only encryption method supported is AES-256 CTR mode.
-
- @param data: The data to be decrypted.
- @type data: str
- @param key: The key used to decrypt C{data} (must be 256 bits long).
- @type key: str
- @param method: The encryption method to use.
- @type method: str
- @param kwargs: Other parameters specific to each encryption method.
- @type kwargs: dict
-
- @return: The decrypted data.
- @rtype: str
- """
- return crypto.decrypt_sym(data, key, method, **kwargs)
-
- def doc_passphrase(self, doc_id):
- """
- Generate a passphrase for symmetric encryption of document's contents.
-
- The password is derived using HMAC having sha256 as underlying hash
- function. The key used for HMAC are the first
- C{soledad.REMOTE_STORAGE_SECRET_KENGTH} bytes of Soledad's storage
- secret stripped from the first MAC_KEY_LENGTH characters. The HMAC
- message is C{doc_id}.
-
- @param doc_id: The id of the document that will be encrypted using
- this passphrase.
- @type doc_id: str
-
- @return: The passphrase.
- @rtype: str
-
- @raise NoSymmetricSecret: if no symmetric secret was supplied.
- """
- if self.secret is None:
- raise NoSymmetricSecret()
- return hmac.new(
- self.secret[
- self.MAC_KEY_LENGTH:
- self._soledad.REMOTE_STORAGE_SECRET_LENGTH],
- doc_id,
- hashlib.sha256).digest()
-
- def doc_mac_key(self, doc_id):
- """
- Generate a key for calculating a MAC for a document whose id is
- C{doc_id}.
-
- The key is derived using HMAC having sha256 as underlying hash
- function. The key used for HMAC is the first MAC_KEY_LENGTH characters
- of Soledad's storage secret. The HMAC message is C{doc_id}.
-
- @param doc_id: The id of the document.
- @type doc_id: str
-
- @return: The key.
- @rtype: str
-
- @raise NoSymmetricSecret: if no symmetric secret was supplied.
- """
- if self.secret is None:
- raise NoSymmetricSecret()
- return hmac.new(
- self.secret[:self.MAC_KEY_LENGTH],
- doc_id,
- hashlib.sha256).digest()
-
- #
- # secret setters/getters
- #
-
- def _get_secret(self):
- return self._soledad.storage_secret
-
- secret = property(
- _get_secret, doc='The secret used for symmetric encryption')
diff --git a/src/leap/soledad/server.py b/src/leap/soledad/server.py
deleted file mode 100644
index 9c9e0ad7..00000000
--- a/src/leap/soledad/server.py
+++ /dev/null
@@ -1,388 +0,0 @@
-# -*- coding: utf-8 -*-
-# server.py
-# Copyright (C) 2013 LEAP
-#
-# This program is free software: you can redistribute it and/or modify
-# it under the terms of the GNU General Public License as published by
-# the Free Software Foundation, either version 3 of the License, or
-# (at your option) any later version.
-#
-# This program is distributed in the hope that it will be useful,
-# but WITHOUT ANY WARRANTY; without even the implied warranty of
-# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
-# GNU General Public License for more details.
-#
-# You should have received a copy of the GNU General Public License
-# along with this program. If not, see <http://www.gnu.org/licenses/>.
-
-
-"""
-A U1DB server that stores data using CouchDB as its persistence layer.
-
-This should be run with:
- twistd -n web --wsgi=leap.soledad.server.application --port=2424
-"""
-
-import configparser
-import httplib
-import simplejson as json
-
-
-from hashlib import sha256
-from routes.mapper import Mapper
-from u1db import DBNAME_CONSTRAINTS
-from u1db.remote import http_app
-
-
-# Keep OpenSSL's tsafe before importing Twisted submodules so we can put
-# it back if Twisted==12.0.0 messes with it.
-from OpenSSL import tsafe
-old_tsafe = tsafe
-
-from twisted.web.wsgi import WSGIResource
-from twisted.internet import reactor
-from twisted.python import log
-
-from twisted import version
-if version.base() == "12.0.0":
- # Put OpenSSL's tsafe back into place. This can probably be removed if we
- # come to use Twisted>=12.3.0.
- import sys
- sys.modules['OpenSSL.tsafe'] = old_tsafe
-
-from couchdb.client import Server
-
-from leap.soledad import SECRETS_DOC_ID_HASH_PREFIX
-from leap.soledad.backends.couch import CouchServerState
-
-
-#-----------------------------------------------------------------------------
-# Authentication
-#-----------------------------------------------------------------------------
-
-class Unauthorized(Exception):
- """
- User authentication failed.
- """
-
-
-class URLToAuth(object):
- """
- Verify if actions can be performed by a user.
- """
-
- HTTP_METHOD_GET = 'GET'
- HTTP_METHOD_PUT = 'PUT'
- HTTP_METHOD_DELETE = 'DELETE'
- HTTP_METHOD_POST = 'POST'
-
- def __init__(self, uuid):
- """
- Initialize the mapper.
-
- The C{uuid} is used to create the rules that will either allow or
- disallow the user to perform specific actions.
-
- @param uuid: The user uuid.
- @type uuid: str
- """
- self._map = Mapper(controller_scan=None)
- self._register_auth_info(self._uuid_dbname(uuid))
-
- def is_authorized(self, environ):
- """
- Return whether an HTTP request that produced the CGI C{environ}
- corresponds to an authorized action.
-
- @param environ: Dictionary containing CGI variables.
- @type environ: dict
-
- @return: Whether the action is authorized or not.
- @rtype: bool
- """
- return self._map.match(environ=environ) is not None
-
- def _register(self, pattern, http_methods):
- """
- Register a C{pattern} in the mapper as valid for C{http_methods}.
-
- @param pattern: The URL pattern that corresponds to the user action.
- @type pattern: str
- @param http_methods: A list of authorized HTTP methods.
- @type http_methods: list of str
- """
- self._map.connect(
- None, pattern, http_methods=http_methods,
- conditions=dict(method=http_methods),
- requirements={'dbname': DBNAME_CONSTRAINTS})
-
- def _uuid_dbname(self, uuid):
- """
- Return the database name corresponding to C{uuid}.
-
- @param uuid: The user uid.
- @type uuid: str
-
- @return: The database name corresponding to C{uuid}.
- @rtype: str
- """
- return sha256('%s%s' % (SECRETS_DOC_ID_HASH_PREFIX, uuid)).hexdigest()
-
- def _register_auth_info(self, dbname):
- """
- Register the authorization info in the mapper using C{dbname} as the
- user's database name.
-
- This method sets up the following authorization rules:
-
- URL path | Authorized actions
- --------------------------------------------------
- / | GET
- /shared-db | GET
- /shared-db/docs | -
- /shared-db/doc/{id} | GET, PUT, DELETE
- /shared-db/sync-from/{source} | -
- /user-db | GET, PUT, DELETE
- /user-db/docs | -
- /user-db/doc/{id} | -
- /user-db/sync-from/{source} | GET, PUT, POST
-
- @param dbname: The name of the user's database.
- @type dbname: str
- """
- # auth info for global resource
- self._register('/', [self.HTTP_METHOD_GET])
- # auth info for shared-db database resource
- self._register(
- '/%s' % SoledadApp.SHARED_DB_NAME,
- [self.HTTP_METHOD_GET])
- # auth info for shared-db doc resource
- self._register(
- '/%s/doc/{id:.*}' % SoledadApp.SHARED_DB_NAME,
- [self.HTTP_METHOD_GET, self.HTTP_METHOD_PUT,
- self.HTTP_METHOD_DELETE])
- # auth info for user-db database resource
- self._register(
- '/%s' % dbname,
- [self.HTTP_METHOD_GET, self.HTTP_METHOD_PUT,
- self.HTTP_METHOD_DELETE])
- # auth info for user-db sync resource
- self._register(
- '/%s/sync-from/{source_replica_uid}' % dbname,
- [self.HTTP_METHOD_GET, self.HTTP_METHOD_PUT,
- self.HTTP_METHOD_POST])
- # generate the regular expressions
- self._map.create_regs()
-
-
-class SoledadAuthMiddleware(object):
- """
- Soledad Authentication WSGI middleware.
-
- In general, databases are accessed using a token provided by the LEAP API.
- Some special databases can be read without authentication.
- """
-
- TOKENS_DB = "tokens"
- TOKENS_TYPE_KEY = "type"
- TOKENS_TYPE_DEF = "Token"
- TOKENS_USER_ID_KEY = "user_id"
-
- HTTP_AUTH_KEY = "HTTP_AUTHORIZATION"
- PATH_INFO_KEY = "PATH_INFO"
-
- CONTENT_TYPE_JSON = ('content-type', 'application/json')
-
- def __init__(self, app):
- """
- Initialize the Soledad Authentication Middleware.
-
- @param app: The application to run on successfull authentication.
- @type app: u1db.remote.http_app.HTTPApp
- @param prefix: Auth app path prefix.
- @type prefix: str
- """
- self._app = app
-
- def _error(self, start_response, status, description, message=None):
- """
- Send a JSON serialized error to WSGI client.
-
- @param start_response: Callable of the form start_response(status,
- response_headers, exc_info=None).
- @type start_response: callable
- @param status: Status string of the form "999 Message here"
- @type status: str
- @param response_headers: A list of (header_name, header_value) tuples
- describing the HTTP response header.
- @type response_headers: list
- @param description: The error description.
- @type description: str
- @param message: The error message.
- @type message: str
-
- @return: List with JSON serialized error message.
- @rtype list
- """
- start_response("%d %s" % (status, httplib.responses[status]),
- [self.CONTENT_TYPE_JSON])
- err = {"error": description}
- if message:
- err['message'] = message
- return [json.dumps(err)]
-
- def __call__(self, environ, start_response):
- """
- Handle a WSGI call to the authentication application.
-
- @param environ: Dictionary containing CGI variables.
- @type environ: dict
- @param start_response: Callable of the form start_response(status,
- response_headers, exc_info=None).
- @type start_response: callable
-
- @return: Target application results if authentication succeeds, an
- error message otherwise.
- @rtype: list
- """
- unauth_err = lambda msg: self._error(start_response,
- 401,
- "unauthorized",
- msg)
-
- auth = environ.get(self.HTTP_AUTH_KEY)
- if not auth:
- return unauth_err("Missing Token Authentication.")
-
- scheme, encoded = auth.split(None, 1)
- if scheme.lower() != 'token':
- return unauth_err("Missing Token Authentication")
-
- uuid, token = encoded.decode('base64').split(':', 1)
- if not self.verify_token(environ, uuid, token):
- return unauth_err("Incorrect address or token.")
-
- if not self.verify_action(uuid, environ):
- return unauth_err("Unauthorized action.")
-
- del environ[self.HTTP_AUTH_KEY]
-
- return self._app(environ, start_response)
-
- def verify_token(self, environ, uuid, token):
- """
- Verify if token is valid for authenticating this request.
-
- @param environ: Dictionary containing CGI variables.
- @type environ: dict
- @param uuid: The user's uuid.
- @type uuid: str
- @param token: The authentication token.
- @type token: str
-
- @return: Whether the token is valid for authenticating the request.
- @rtype: bool
- """
-
- server = Server(url=self._app.state.couch_url)
- try:
- dbname = self.TOKENS_DB
- db = server[dbname]
- token = db.get(token)
- if token is None:
- return False
- return token[self.TOKENS_TYPE_KEY] == self.TOKENS_TYPE_DEF and \
- token[self.TOKENS_USER_ID_KEY] == uuid
- except Exception as e:
- log.err(e)
- return False
- return True
-
- def verify_action(self, uuid, environ):
- """
- Verify if the user is authorized to perform the requested action over
- the requested database.
-
- @param uuid: The user's uuid.
- @type uuid: str
- @param environ: Dictionary containing CGI variables.
- @type environ: dict
-
- @return: Whether the user is authorize to perform the requested action
- over the requested db.
- @rtype: bool
- """
- return URLToAuth(uuid).is_authorized(environ)
-
-
-#-----------------------------------------------------------------------------
-# Soledad WSGI application
-#-----------------------------------------------------------------------------
-
-class SoledadApp(http_app.HTTPApp):
- """
- Soledad WSGI application
- """
-
- SHARED_DB_NAME = 'shared'
- """
- The name of the shared database that holds user's encrypted secrets.
- """
-
- def __call__(self, environ, start_response):
- """
- Handle a WSGI call to the Soledad application.
-
- @param environ: Dictionary containing CGI variables.
- @type environ: dict
- @param start_response: Callable of the form start_response(status,
- response_headers, exc_info=None).
- @type start_response: callable
-
- @return: HTTP application results.
- @rtype: list
- """
- # ensure the shared database exists
- self.state.ensure_database(self.SHARED_DB_NAME)
- return http_app.HTTPApp.__call__(self, environ, start_response)
-
-
-#-----------------------------------------------------------------------------
-# Auxiliary functions
-#-----------------------------------------------------------------------------
-
-def load_configuration(file_path):
- """
- Load server configuration from file.
-
- @param file_path: The path to the configuration file.
- @type file_path: str
-
- @return: A dictionary with the configuration.
- @rtype: dict
- """
- conf = {
- 'couch_url': 'http://localhost:5984',
- }
- config = configparser.ConfigParser()
- config.read(file_path)
- if 'soledad-server' in config:
- for key in conf:
- if key in config['soledad-server']:
- conf[key] = config['soledad-server'][key]
- # TODO: implement basic parsing/sanitization of options comming from
- # config file.
- return conf
-
-
-#-----------------------------------------------------------------------------
-# Run as Twisted WSGI Resource
-#-----------------------------------------------------------------------------
-
-conf = load_configuration('/etc/leap/soledad-server.conf')
-state = CouchServerState(conf['couch_url'])
-
-# WSGI application that may be used by `twistd -web`
-application = SoledadAuthMiddleware(SoledadApp(state))
-
-resource = WSGIResource(reactor, reactor.getThreadPool(), application)
diff --git a/src/leap/soledad/shared_db.py b/src/leap/soledad/shared_db.py
deleted file mode 100644
index 33c5c484..00000000
--- a/src/leap/soledad/shared_db.py
+++ /dev/null
@@ -1,138 +0,0 @@
-# -*- coding: utf-8 -*-
-# shared_db.py
-# Copyright (C) 2013 LEAP
-#
-# This program is free software: you can redistribute it and/or modify
-# it under the terms of the GNU General Public License as published by
-# the Free Software Foundation, either version 3 of the License, or
-# (at your option) any later version.
-#
-# This program is distributed in the hope that it will be useful,
-# but WITHOUT ANY WARRANTY; without even the implied warranty of
-# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
-# GNU General Public License for more details.
-#
-# You should have received a copy of the GNU General Public License
-# along with this program. If not, see <http://www.gnu.org/licenses/>.
-
-
-"""
-A shared database for storing/retrieving encrypted key material.
-"""
-
-import simplejson as json
-
-
-from u1db.remote import http_database
-
-
-from leap.soledad.auth import TokenBasedAuth
-
-
-#-----------------------------------------------------------------------------
-# Soledad shared database
-#-----------------------------------------------------------------------------
-
-class NoTokenForAuth(Exception):
- """
- No token was found for token-based authentication.
- """
-
-
-class Unauthorized(Exception):
- """
- User does not have authorization to perform task.
- """
-
-
-class SoledadSharedDatabase(http_database.HTTPDatabase, TokenBasedAuth):
- """
- This is a shared recovery database that enables users to store their
- encryption secrets in the server and retrieve them afterwards.
- """
- # TODO: prevent client from messing with the shared DB.
- # TODO: define and document API.
-
- #
- # Token auth methods.
- #
-
- def set_token_credentials(self, uuid, token):
- """
- Store given credentials so we can sign the request later.
-
- @param uuid: The user's uuid.
- @type uuid: str
- @param token: The authentication token.
- @type token: str
- """
- TokenBasedAuth.set_token_credentials(self, uuid, token)
-
- def _sign_request(self, method, url_query, params):
- """
- Return an authorization header to be included in the HTTP request.
-
- @param method: The HTTP method.
- @type method: str
- @param url_query: The URL query string.
- @type url_query: str
- @param params: A list with encoded query parameters.
- @type param: list
-
- @return: The Authorization header.
- @rtype: list of tuple
- """
- return TokenBasedAuth._sign_request(self, method, url_query, params)
-
- #
- # Modified HTTPDatabase methods.
- #
-
- @staticmethod
- def open_database(url, create, creds=None):
- # TODO: users should not be able to create the shared database, so we
- # have to remove this from here in the future.
- """
- Open a Soledad shared database.
-
- @param url: URL of the remote database.
- @type url: str
- @param create: Should the database be created if it does not already
- exist?
- @type create: bool
- @param token: An authentication token for accessing the shared db.
- @type token: str
-
- @return: The shared database in the given url.
- @rtype: SoledadSharedDatabase
- """
- db = SoledadSharedDatabase(url, creds=creds)
- db.open(create)
- return db
-
- @staticmethod
- def delete_database(url):
- """
- Dummy method that prevents from deleting shared database.
-
- @raise: This will always raise an Unauthorized exception.
-
- @param url: The database URL.
- @type url: str
- """
- raise Unauthorized("Can't delete shared database.")
-
- def __init__(self, url, document_factory=None, creds=None):
- """
- Initialize database with auth token and encryption powers.
-
- @param url: URL of the remote database.
- @type url: str
- @param document_factory: A factory for U1BD documents.
- @type document_factory: u1db.Document
- @param creds: A tuple containing the authentication method and
- credentials.
- @type creds: tuple
- """
- http_database.HTTPDatabase.__init__(self, url, document_factory,
- creds)
diff --git a/src/leap/soledad/tests/__init__.py b/src/leap/soledad/tests/__init__.py
deleted file mode 100644
index 9fec5530..00000000
--- a/src/leap/soledad/tests/__init__.py
+++ /dev/null
@@ -1,267 +0,0 @@
-"""
-Tests to make sure Soledad provides U1DB functionality and more.
-"""
-
-import os
-import u1db
-from mock import Mock
-
-
-from leap.soledad import Soledad
-from leap.soledad.crypto import SoledadCrypto
-from leap.soledad.backends.leap_backend import (
- LeapDocument,
- decrypt_doc,
- ENC_SCHEME_KEY,
-)
-from leap.common.testing.basetest import BaseLeapTest
-
-
-#-----------------------------------------------------------------------------
-# Some tests inherit from BaseSoledadTest in order to have a working Soledad
-# instance in each test.
-#-----------------------------------------------------------------------------
-
-ADDRESS = 'leap@leap.se'
-
-
-class BaseSoledadTest(BaseLeapTest):
- """
- Instantiates Soledad for usage in tests.
- """
-
- def setUp(self):
- # config info
- self.db1_file = os.path.join(self.tempdir, "db1.u1db")
- self.db2_file = os.path.join(self.tempdir, "db2.u1db")
- self.email = ADDRESS
- # open test dbs
- self._db1 = u1db.open(self.db1_file, create=True,
- document_factory=LeapDocument)
- self._db2 = u1db.open(self.db2_file, create=True,
- document_factory=LeapDocument)
- # initialize soledad by hand so we can control keys
- self._soledad = self._soledad_instance(user=self.email)
-
- def tearDown(self):
- self._db1.close()
- self._db2.close()
- for f in [self._soledad._local_db_path, self._soledad._secrets_path]:
- if os.path.isfile(f):
- os.unlink(f)
- self._soledad.close()
-
- def _soledad_instance(self, user=ADDRESS, passphrase='123',
- prefix='',
- secrets_path=Soledad.STORAGE_SECRETS_FILE_NAME,
- local_db_path='soledad.u1db', server_url='',
- cert_file=None, secret_id=None):
-
- def _put_doc_side_effect(doc):
- self._doc_put = doc
-
- class MockSharedDB(object):
-
- get_doc = Mock(return_value=None)
- put_doc = Mock(side_effect=_put_doc_side_effect)
-
- def __call__(self):
- return self
-
- Soledad._shared_db = MockSharedDB()
- return Soledad(
- user,
- passphrase,
- secrets_path=os.path.join(self.tempdir, prefix, secrets_path),
- local_db_path=os.path.join(
- self.tempdir, prefix, local_db_path),
- server_url=server_url, # Soledad will fail if not given an url.
- cert_file=cert_file,
- secret_id=secret_id)
-
- def assertGetEncryptedDoc(
- self, db, doc_id, doc_rev, content, has_conflicts):
- """
- Assert that the document in the database looks correct.
- """
- exp_doc = self.make_document(doc_id, doc_rev, content,
- has_conflicts=has_conflicts)
- doc = db.get_doc(doc_id)
- if ENC_SCHEME_KEY in doc.content:
- doc.set_json(decrypt_doc(self._soledad._crypto, doc))
- self.assertEqual(exp_doc.doc_id, doc.doc_id)
- self.assertEqual(exp_doc.rev, doc.rev)
- self.assertEqual(exp_doc.has_conflicts, doc.has_conflicts)
- self.assertEqual(exp_doc.content, doc.content)
-
-
-# Key material for testing
-KEY_FINGERPRINT = "E36E738D69173C13D709E44F2F455E2824D18DDF"
-PUBLIC_KEY = """
------BEGIN PGP PUBLIC KEY BLOCK-----
-Version: GnuPG v1.4.10 (GNU/Linux)
-
-mQINBFC9+dkBEADNRfwV23TWEoGc/x0wWH1P7PlXt8MnC2Z1kKaKKmfnglVrpOiz
-iLWoiU58sfZ0L5vHkzXHXCBf6Eiy/EtUIvdiWAn+yASJ1mk5jZTBKO/WMAHD8wTO
-zpMsFmWyg3xc4DkmFa9KQ5EVU0o/nqPeyQxNMQN7px5pPwrJtJFmPxnxm+aDkPYx
-irDmz/4DeDNqXliazGJKw7efqBdlwTHkl9Akw2gwy178pmsKwHHEMOBOFFvX61AT
-huKqHYmlCGSliwbrJppTG7jc1/ls3itrK+CWTg4txREkSpEVmfcASvw/ZqLbjgfs
-d/INMwXnR9U81O8+7LT6yw/ca4ppcFoJD7/XJbkRiML6+bJ4Dakiy6i727BzV17g
-wI1zqNvm5rAhtALKfACha6YO43aJzairO4II1wxVHvRDHZn2IuKDDephQ3Ii7/vb
-hUOf6XCSmchkAcpKXUOvbxm1yfB1LRa64mMc2RcZxf4mW7KQkulBsdV5QG2276lv
-U2UUy2IutXcGP5nXC+f6sJJGJeEToKJ57yiO/VWJFjKN8SvP+7AYsQSqINUuEf6H
-T5gCPCraGMkTUTPXrREvu7NOohU78q6zZNaL3GW8ai7eSeANSuQ8Vzffx7Wd8Y7i
-Pw9sYj0SMFs1UgjbuL6pO5ueHh+qyumbtAq2K0Bci0kqOcU4E9fNtdiovQARAQAB
-tBxMZWFwIFRlc3QgS2V5IDxsZWFwQGxlYXAuc2U+iQI3BBMBCAAhBQJQvfnZAhsD
-BQsJCAcDBRUKCQgLBRYCAwEAAh4BAheAAAoJEC9FXigk0Y3fT7EQAKH3IuRniOpb
-T/DDIgwwjz3oxB/W0DDMyPXowlhSOuM0rgGfntBpBb3boezEXwL86NPQxNGGruF5
-hkmecSiuPSvOmQlqlS95NGQp6hNG0YaKColh+Q5NTspFXCAkFch9oqUje0LdxfSP
-QfV9UpeEvGyPmk1I9EJV/YDmZ4+Djge1d7qhVZInz4Rx1NrSyF/Tc2EC0VpjQFsU
-Y9Kb2YBBR7ivG6DBc8ty0jJXi7B4WjkFcUEJviQpMF2dCLdonCehYs1PqsN1N7j+
-eFjQd+hqVMJgYuSGKjvuAEfClM6MQw7+FmFwMyLgK/Ew/DttHEDCri77SPSkOGSI
-txCzhTg6798f6mJr7WcXmHX1w1Vcib5FfZ8vTDFVhz/XgAgArdhPo9V6/1dgSSiB
-KPQ/spsco6u5imdOhckERE0lnAYvVT6KE81TKuhF/b23u7x+Wdew6kK0EQhYA7wy
-7LmlaNXc7rMBQJ9Z60CJ4JDtatBWZ0kNrt2VfdDHVdqBTOpl0CraNUjWE5YMDasr
-K2dF5IX8D3uuYtpZnxqg0KzyLg0tzL0tvOL1C2iudgZUISZNPKbS0z0v+afuAAnx
-2pTC3uezbh2Jt8SWTLhll4i0P4Ps5kZ6HQUO56O+/Z1cWovX+mQekYFmERySDR9n
-3k1uAwLilJmRmepGmvYbB8HloV8HqwgguQINBFC9+dkBEAC0I/xn1uborMgDvBtf
-H0sEhwnXBC849/32zic6udB6/3Efk9nzbSpL3FSOuXITZsZgCHPkKarnoQ2ztMcS
-sh1ke1C5gQGms75UVmM/nS+2YI4vY8OX/GC/on2vUyncqdH+bR6xH5hx4NbWpfTs
-iQHmz5C6zzS/kuabGdZyKRaZHt23WQ7JX/4zpjqbC99DjHcP9BSk7tJ8wI4bkMYD
-uFVQdT9O6HwyKGYwUU4sAQRAj7XCTGvVbT0dpgJwH4RmrEtJoHAx4Whg8mJ710E0
-GCmzf2jqkNuOw76ivgk27Kge+Hw00jmJjQhHY0yVbiaoJwcRrPKzaSjEVNgrpgP3
-lXPRGQArgESsIOTeVVHQ8fhK2YtTeCY9rIiO+L0OX2xo9HK7hfHZZWL6rqymXdyS
-fhzh/f6IPyHFWnvj7Brl7DR8heMikygcJqv+ed2yx7iLyCUJ10g12I48+aEj1aLe
-dP7lna32iY8/Z0SHQLNH6PXO9SlPcq2aFUgKqE75A/0FMk7CunzU1OWr2ZtTLNO1
-WT/13LfOhhuEq9jTyTosn0WxBjJKq18lnhzCXlaw6EAtbA7CUwsD3CTPR56aAXFK
-3I7KXOVAqggrvMe5Tpdg5drfYpI8hZovL5aAgb+7Y5ta10TcJdUhS5K3kFAWe/td
-U0cmWUMDP1UMSQ5Jg6JIQVWhSwARAQABiQIfBBgBCAAJBQJQvfnZAhsMAAoJEC9F
-Xigk0Y3fRwsP/i0ElYCyxeLpWJTwo1iCLkMKz2yX1lFVa9nT1BVTPOQwr/IAc5OX
-NdtbJ14fUsKL5pWgW8OmrXtwZm1y4euI1RPWWubG01ouzwnGzv26UcuHeqC5orZj
-cOnKtL40y8VGMm8LoicVkRJH8blPORCnaLjdOtmA3rx/v2EXrJpSa3AhOy0ZSRXk
-ZSrK68AVNwamHRoBSYyo0AtaXnkPX4+tmO8X8BPfj125IljubvwZPIW9VWR9UqCE
-VPfDR1XKegVb6VStIywF7kmrknM1C5qUY28rdZYWgKorw01hBGV4jTW0cqde3N51
-XT1jnIAa+NoXUM9uQoGYMiwrL7vNsLlyyiW5ayDyV92H/rIuiqhFgbJsHTlsm7I8
-oGheR784BagAA1NIKD1qEO9T6Kz9lzlDaeWS5AUKeXrb7ZJLI1TTCIZx5/DxjLqM
-Tt/RFBpVo9geZQrvLUqLAMwdaUvDXC2c6DaCPXTh65oCZj/hqzlJHH+RoTWWzKI+
-BjXxgUWF9EmZUBrg68DSmI+9wuDFsjZ51BcqvJwxyfxtTaWhdoYqH/UQS+D1FP3/
-diZHHlzwVwPICzM9ooNTgbrcDzyxRkIVqsVwBq7EtzcvgYUyX53yG25Giy6YQaQ2
-ZtQ/VymwFL3XdUWV6B/hU4PVAFvO3qlOtdJ6TpE+nEWgcWjCv5g7RjXX
-=MuOY
------END PGP PUBLIC KEY BLOCK-----
-"""
-PRIVATE_KEY = """
------BEGIN PGP PRIVATE KEY BLOCK-----
-Version: GnuPG v1.4.10 (GNU/Linux)
-
-lQcYBFC9+dkBEADNRfwV23TWEoGc/x0wWH1P7PlXt8MnC2Z1kKaKKmfnglVrpOiz
-iLWoiU58sfZ0L5vHkzXHXCBf6Eiy/EtUIvdiWAn+yASJ1mk5jZTBKO/WMAHD8wTO
-zpMsFmWyg3xc4DkmFa9KQ5EVU0o/nqPeyQxNMQN7px5pPwrJtJFmPxnxm+aDkPYx
-irDmz/4DeDNqXliazGJKw7efqBdlwTHkl9Akw2gwy178pmsKwHHEMOBOFFvX61AT
-huKqHYmlCGSliwbrJppTG7jc1/ls3itrK+CWTg4txREkSpEVmfcASvw/ZqLbjgfs
-d/INMwXnR9U81O8+7LT6yw/ca4ppcFoJD7/XJbkRiML6+bJ4Dakiy6i727BzV17g
-wI1zqNvm5rAhtALKfACha6YO43aJzairO4II1wxVHvRDHZn2IuKDDephQ3Ii7/vb
-hUOf6XCSmchkAcpKXUOvbxm1yfB1LRa64mMc2RcZxf4mW7KQkulBsdV5QG2276lv
-U2UUy2IutXcGP5nXC+f6sJJGJeEToKJ57yiO/VWJFjKN8SvP+7AYsQSqINUuEf6H
-T5gCPCraGMkTUTPXrREvu7NOohU78q6zZNaL3GW8ai7eSeANSuQ8Vzffx7Wd8Y7i
-Pw9sYj0SMFs1UgjbuL6pO5ueHh+qyumbtAq2K0Bci0kqOcU4E9fNtdiovQARAQAB
-AA/+JHtlL39G1wsH9R6UEfUQJGXR9MiIiwZoKcnRB2o8+DS+OLjg0JOh8XehtuCs
-E/8oGQKtQqa5bEIstX7IZoYmYFiUQi9LOzIblmp2vxOm+HKkxa4JszWci2/ZmC3t
-KtaA4adl9XVnshoQ7pijuCMUKB3naBEOAxd8s9d/JeReGIYkJErdrnVfNk5N71Ds
-FmH5Ll3XtEDvgBUQP3nkA6QFjpsaB94FHjL3gDwum/cxzj6pCglcvHOzEhfY0Ddb
-J967FozQTaf2JW3O+w3LOqtcKWpq87B7+O61tVidQPSSuzPjCtFF0D2LC9R/Hpky
-KTMQ6CaKja4MPhjwywd4QPcHGYSqjMpflvJqi+kYIt8psUK/YswWjnr3r4fbuqVY
-VhtiHvnBHQjz135lUqWvEz4hM3Xpnxydx7aRlv5NlevK8+YIO5oFbWbGNTWsPZI5
-jpoFBpSsnR1Q5tnvtNHauvoWV+XN2qAOBTG+/nEbDYH6Ak3aaE9jrpTdYh0CotYF
-q7csANsDy3JvkAzeU6WnYpsHHaAjqOGyiZGsLej1UcXPFMosE/aUo4WQhiS8Zx2c
-zOVKOi/X5vQ2GdNT9Qolz8AriwzsvFR+bxPzyd8V6ALwDsoXvwEYinYBKK8j0OPv
-OOihSR6HVsuP9NUZNU9ewiGzte/+/r6pNXHvR7wTQ8EWLcEIAN6Zyrb0bHZTIlxt
-VWur/Ht2mIZrBaO50qmM5RD3T5oXzWXi/pjLrIpBMfeZR9DWfwQwjYzwqi7pxtYx
-nJvbMuY505rfnMoYxb4J+cpRXV8MS7Dr1vjjLVUC9KiwSbM3gg6emfd2yuA93ihv
-Pe3mffzLIiQa4mRE3wtGcioC43nWuV2K2e1KjxeFg07JhrezA/1Cak505ab/tmvP
-4YmjR5c44+yL/YcQ3HdFgs4mV+nVbptRXvRcPpolJsgxPccGNdvHhsoR4gwXMS3F
-RRPD2z6x8xeN73Q4KH3bm01swQdwFBZbWVfmUGLxvN7leCdfs9+iFJyqHiCIB6Iv
-mQfp8F0IAOwSo8JhWN+V1dwML4EkIrM8wUb4yecNLkyR6TpPH/qXx4PxVMC+vy6x
-sCtjeHIwKE+9vqnlhd5zOYh7qYXEJtYwdeDDmDbL8oks1LFfd+FyAuZXY33DLwn0
-cRYsr2OEZmaajqUB3NVmj3H4uJBN9+paFHyFSXrH68K1Fk2o3n+RSf2EiX+eICwI
-L6rqoF5sSVUghBWdNegV7qfy4anwTQwrIMGjgU5S6PKW0Dr/3iO5z3qQpGPAj5OW
-ATqPWkDICLbObPxD5cJlyyNE2wCA9VVc6/1d6w4EVwSq9h3/WTpATEreXXxTGptd
-LNiTA1nmakBYNO2Iyo3djhaqBdWjk+EIAKtVEnJH9FAVwWOvaj1RoZMA5DnDMo7e
-SnhrCXl8AL7Z1WInEaybasTJXn1uQ8xY52Ua4b8cbuEKRKzw/70NesFRoMLYoHTO
-dyeszvhoDHberpGRTciVmpMu7Hyi33rM31K9epA4ib6QbbCHnxkWOZB+Bhgj1hJ8
-xb4RBYWiWpAYcg0+DAC3w9gfxQhtUlZPIbmbrBmrVkO2GVGUj8kH6k4UV6kUHEGY
-HQWQR0HcbKcXW81ZXCCD0l7ROuEWQtTe5Jw7dJ4/QFuqZnPutXVRNOZqpl6eRShw
-7X2/a29VXBpmHA95a88rSQsL+qm7Fb3prqRmuMCtrUZgFz7HLSTuUMR867QcTGVh
-cCBUZXN0IEtleSA8bGVhcEBsZWFwLnNlPokCNwQTAQgAIQUCUL352QIbAwULCQgH
-AwUVCgkICwUWAgMBAAIeAQIXgAAKCRAvRV4oJNGN30+xEACh9yLkZ4jqW0/wwyIM
-MI896MQf1tAwzMj16MJYUjrjNK4Bn57QaQW926HsxF8C/OjT0MTRhq7heYZJnnEo
-rj0rzpkJapUveTRkKeoTRtGGigqJYfkOTU7KRVwgJBXIfaKlI3tC3cX0j0H1fVKX
-hLxsj5pNSPRCVf2A5mePg44HtXe6oVWSJ8+EcdTa0shf03NhAtFaY0BbFGPSm9mA
-QUe4rxugwXPLctIyV4uweFo5BXFBCb4kKTBdnQi3aJwnoWLNT6rDdTe4/nhY0Hfo
-alTCYGLkhio77gBHwpTOjEMO/hZhcDMi4CvxMPw7bRxAwq4u+0j0pDhkiLcQs4U4
-Ou/fH+pia+1nF5h19cNVXIm+RX2fL0wxVYc/14AIAK3YT6PVev9XYEkogSj0P7Kb
-HKOruYpnToXJBERNJZwGL1U+ihPNUyroRf29t7u8flnXsOpCtBEIWAO8Muy5pWjV
-3O6zAUCfWetAieCQ7WrQVmdJDa7dlX3Qx1XagUzqZdAq2jVI1hOWDA2rKytnReSF
-/A97rmLaWZ8aoNCs8i4NLcy9Lbzi9QtornYGVCEmTTym0tM9L/mn7gAJ8dqUwt7n
-s24dibfElky4ZZeItD+D7OZGeh0FDuejvv2dXFqL1/pkHpGBZhEckg0fZ95NbgMC
-4pSZkZnqRpr2GwfB5aFfB6sIIJ0HGARQvfnZARAAtCP8Z9bm6KzIA7wbXx9LBIcJ
-1wQvOPf99s4nOrnQev9xH5PZ820qS9xUjrlyE2bGYAhz5Cmq56ENs7THErIdZHtQ
-uYEBprO+VFZjP50vtmCOL2PDl/xgv6J9r1Mp3KnR/m0esR+YceDW1qX07IkB5s+Q
-us80v5LmmxnWcikWmR7dt1kOyV/+M6Y6mwvfQ4x3D/QUpO7SfMCOG5DGA7hVUHU/
-Tuh8MihmMFFOLAEEQI+1wkxr1W09HaYCcB+EZqxLSaBwMeFoYPJie9dBNBgps39o
-6pDbjsO+or4JNuyoHvh8NNI5iY0IR2NMlW4mqCcHEazys2koxFTYK6YD95Vz0RkA
-K4BErCDk3lVR0PH4StmLU3gmPayIjvi9Dl9saPRyu4Xx2WVi+q6spl3ckn4c4f3+
-iD8hxVp74+wa5ew0fIXjIpMoHCar/nndsse4i8glCddINdiOPPmhI9Wi3nT+5Z2t
-9omPP2dEh0CzR+j1zvUpT3KtmhVICqhO+QP9BTJOwrp81NTlq9mbUyzTtVk/9dy3
-zoYbhKvY08k6LJ9FsQYySqtfJZ4cwl5WsOhALWwOwlMLA9wkz0eemgFxStyOylzl
-QKoIK7zHuU6XYOXa32KSPIWaLy+WgIG/u2ObWtdE3CXVIUuSt5BQFnv7XVNHJllD
-Az9VDEkOSYOiSEFVoUsAEQEAAQAP/1AagnZQZyzHDEgw4QELAspYHCWLXE5aZInX
-wTUJhK31IgIXNn9bJ0hFiSpQR2xeMs9oYtRuPOu0P8oOFMn4/z374fkjZy8QVY3e
-PlL+3EUeqYtkMwlGNmVw5a/NbNuNfm5Darb7pEfbYd1gPcni4MAYw7R2SG/57GbC
-9gucvspHIfOSfBNLBthDzmK8xEKe1yD2eimfc2T7IRYb6hmkYfeds5GsqvGI6mwI
-85h4uUHWRc5JOlhVM6yX8hSWx0L60Z3DZLChmc8maWnFXd7C8eQ6P1azJJbW71Ih
-7CoK0XW4LE82vlQurSRFgTwfl7wFYszW2bOzCuhHDDtYnwH86Nsu0DC78ZVRnvxn
-E8Ke/AJgrdhIOo4UAyR+aZD2+2mKd7/waOUTUrUtTzc7i8N3YXGi/EIaNReBXaq+
-ZNOp24BlFzRp+FCF/pptDW9HjPdiV09x0DgICmeZS4Gq/4vFFIahWctg52NGebT0
-Idxngjj+xDtLaZlLQoOz0n5ByjO/Wi0ANmMv1sMKCHhGvdaSws2/PbMR2r4caj8m
-KXpIgdinM/wUzHJ5pZyF2U/qejsRj8Kw8KH/tfX4JCLhiaP/mgeTuWGDHeZQERAT
-xPmRFHaLP9/ZhvGNh6okIYtrKjWTLGoXvKLHcrKNisBLSq+P2WeFrlme1vjvJMo/
-jPwLT5o9CADQmcbKZ+QQ1ZM9v99iDZol7SAMZX43JC019sx6GK0u6xouJBcLfeB4
-OXacTgmSYdTa9RM9fbfVpti01tJ84LV2SyL/VJq/enJF4XQPSynT/tFTn1PAor6o
-tEAAd8fjKdJ6LnD5wb92SPHfQfXqI84rFEO8rUNIE/1ErT6DYifDzVCbfD2KZdoF
-cOSp7TpD77sY1bs74ocBX5ejKtd+aH99D78bJSMM4pSDZsIEwnomkBHTziubPwJb
-OwnATy0LmSMAWOw5rKbsh5nfwCiUTM20xp0t5JeXd+wPVWbpWqI2EnkCEN+RJr9i
-7dp/ymDQ+Yt5wrsN3NwoyiexPOG91WQVCADdErHsnglVZZq9Z8Wx7KwecGCUurJ2
-H6lKudv5YOxPnAzqZS5HbpZd/nRTMZh2rdXCr5m2YOuewyYjvM757AkmUpM09zJX
-MQ1S67/UX2y8/74TcRF97Ncx9HeELs92innBRXoFitnNguvcO6Esx4BTe1OdU6qR
-ER3zAmVf22Le9ciXbu24DN4mleOH+OmBx7X2PqJSYW9GAMTsRB081R6EWKH7romQ
-waxFrZ4DJzZ9ltyosEJn5F32StyLrFxpcrdLUoEaclZCv2qka7sZvi0EvovDVEBU
-e10jOx9AOwf8Gj2ufhquQ6qgVYCzbP+YrodtkFrXRS3IsljIchj1M2ffB/0bfoUs
-rtER9pLvYzCjBPg8IfGLw0o754Qbhh/ReplCRTusP/fQMybvCvfxreS3oyEriu/G
-GufRomjewZ8EMHDIgUsLcYo2UHZsfF7tcazgxMGmMvazp4r8vpgrvW/8fIN/6Adu
-tF+WjWDTvJLFJCe6O+BFJOWrssNrrra1zGtLC1s8s+Wfpe+bGPL5zpHeebGTwH1U
-22eqgJArlEKxrfarz7W5+uHZJHSjF/K9ZvunLGD0n9GOPMpji3UO3zeM8IYoWn7E
-/EWK1XbjnssNemeeTZ+sDh+qrD7BOi+vCX1IyBxbfqnQfJZvmcPWpruy1UsO+aIC
-0GY8Jr3OL69dDQ21jueJAh8EGAEIAAkFAlC9+dkCGwwACgkQL0VeKCTRjd9HCw/+
-LQSVgLLF4ulYlPCjWIIuQwrPbJfWUVVr2dPUFVM85DCv8gBzk5c121snXh9Swovm
-laBbw6ate3BmbXLh64jVE9Za5sbTWi7PCcbO/bpRy4d6oLmitmNw6cq0vjTLxUYy
-bwuiJxWREkfxuU85EKdouN062YDevH+/YResmlJrcCE7LRlJFeRlKsrrwBU3BqYd
-GgFJjKjQC1peeQ9fj62Y7xfwE9+PXbkiWO5u/Bk8hb1VZH1SoIRU98NHVcp6BVvp
-VK0jLAXuSauSczULmpRjbyt1lhaAqivDTWEEZXiNNbRyp17c3nVdPWOcgBr42hdQ
-z25CgZgyLCsvu82wuXLKJblrIPJX3Yf+si6KqEWBsmwdOWybsjygaF5HvzgFqAAD
-U0goPWoQ71PorP2XOUNp5ZLkBQp5etvtkksjVNMIhnHn8PGMuoxO39EUGlWj2B5l
-Cu8tSosAzB1pS8NcLZzoNoI9dOHrmgJmP+GrOUkcf5GhNZbMoj4GNfGBRYX0SZlQ
-GuDrwNKYj73C4MWyNnnUFyq8nDHJ/G1NpaF2hiof9RBL4PUU/f92JkceXPBXA8gL
-Mz2ig1OButwPPLFGQhWqxXAGrsS3Ny+BhTJfnfIbbkaLLphBpDZm1D9XKbAUvdd1
-RZXoH+FTg9UAW87eqU610npOkT6cRaBxaMK/mDtGNdc=
-=JTFu
------END PGP PRIVATE KEY BLOCK-----
-"""
-
-__all__ = [
- 'test_couch',
- 'test_encrypted',
- 'test_leap_backend',
- 'test_sqlcipher',
- 'u1db_tests',
-]
diff --git a/src/leap/soledad/tests/couchdb.ini.template b/src/leap/soledad/tests/couchdb.ini.template
deleted file mode 100644
index 7d0316f0..00000000
--- a/src/leap/soledad/tests/couchdb.ini.template
+++ /dev/null
@@ -1,222 +0,0 @@
-; etc/couchdb/default.ini.tpl. Generated from default.ini.tpl.in by configure.
-
-; Upgrading CouchDB will overwrite this file.
-
-[couchdb]
-database_dir = %(tempdir)s/lib
-view_index_dir = %(tempdir)s/lib
-max_document_size = 4294967296 ; 4 GB
-os_process_timeout = 5000 ; 5 seconds. for view and external servers.
-max_dbs_open = 100
-delayed_commits = true ; set this to false to ensure an fsync before 201 Created is returned
-uri_file = %(tempdir)s/lib/couch.uri
-file_compression = snappy
-
-[database_compaction]
-; larger buffer sizes can originate smaller files
-doc_buffer_size = 524288 ; value in bytes
-checkpoint_after = 5242880 ; checkpoint after every N bytes were written
-
-[view_compaction]
-; larger buffer sizes can originate smaller files
-keyvalue_buffer_size = 2097152 ; value in bytes
-
-[httpd]
-port = 0
-bind_address = 127.0.0.1
-authentication_handlers = {couch_httpd_oauth, oauth_authentication_handler}, {couch_httpd_auth, cookie_authentication_handler}, {couch_httpd_auth, default_authentication_handler}
-default_handler = {couch_httpd_db, handle_request}
-secure_rewrites = true
-vhost_global_handlers = _utils, _uuids, _session, _oauth, _users
-allow_jsonp = false
-; Options for the MochiWeb HTTP server.
-;server_options = [{backlog, 128}, {acceptor_pool_size, 16}]
-; For more socket options, consult Erlang's module 'inet' man page.
-;socket_options = [{recbuf, 262144}, {sndbuf, 262144}, {nodelay, true}]
-log_max_chunk_size = 1000000
-
-[log]
-file = %(tempdir)s/log/couch.log
-level = info
-include_sasl = true
-
-[couch_httpd_auth]
-authentication_db = _users
-authentication_redirect = /_utils/session.html
-require_valid_user = false
-timeout = 600 ; number of seconds before automatic logout
-auth_cache_size = 50 ; size is number of cache entries
-allow_persistent_cookies = false ; set to true to allow persistent cookies
-
-[couch_httpd_oauth]
-; If set to 'true', oauth token and consumer secrets will be looked up
-; in the authentication database (_users). These secrets are stored in
-; a top level property named "oauth" in user documents. Example:
-; {
-; "_id": "org.couchdb.user:joe",
-; "type": "user",
-; "name": "joe",
-; "password_sha": "fe95df1ca59a9b567bdca5cbaf8412abd6e06121",
-; "salt": "4e170ffeb6f34daecfd814dfb4001a73"
-; "roles": ["foo", "bar"],
-; "oauth": {
-; "consumer_keys": {
-; "consumerKey1": "key1Secret",
-; "consumerKey2": "key2Secret"
-; },
-; "tokens": {
-; "token1": "token1Secret",
-; "token2": "token2Secret"
-; }
-; }
-; }
-use_users_db = false
-
-[query_servers]
-; javascript = %(tempdir)s/server/main.js
-
-
-; Changing reduce_limit to false will disable reduce_limit.
-; If you think you're hitting reduce_limit with a "good" reduce function,
-; please let us know on the mailing list so we can fine tune the heuristic.
-[query_server_config]
-reduce_limit = true
-os_process_limit = 25
-
-[daemons]
-view_manager={couch_view, start_link, []}
-external_manager={couch_external_manager, start_link, []}
-query_servers={couch_query_servers, start_link, []}
-vhosts={couch_httpd_vhost, start_link, []}
-httpd={couch_httpd, start_link, []}
-stats_aggregator={couch_stats_aggregator, start, []}
-stats_collector={couch_stats_collector, start, []}
-uuids={couch_uuids, start, []}
-auth_cache={couch_auth_cache, start_link, []}
-replication_manager={couch_replication_manager, start_link, []}
-os_daemons={couch_os_daemons, start_link, []}
-compaction_daemon={couch_compaction_daemon, start_link, []}
-
-[httpd_global_handlers]
-/ = {couch_httpd_misc_handlers, handle_welcome_req, <<"Welcome">>}
-
-_all_dbs = {couch_httpd_misc_handlers, handle_all_dbs_req}
-_active_tasks = {couch_httpd_misc_handlers, handle_task_status_req}
-_config = {couch_httpd_misc_handlers, handle_config_req}
-_replicate = {couch_httpd_replicator, handle_req}
-_uuids = {couch_httpd_misc_handlers, handle_uuids_req}
-_restart = {couch_httpd_misc_handlers, handle_restart_req}
-_stats = {couch_httpd_stats_handlers, handle_stats_req}
-_log = {couch_httpd_misc_handlers, handle_log_req}
-_session = {couch_httpd_auth, handle_session_req}
-_oauth = {couch_httpd_oauth, handle_oauth_req}
-
-[httpd_db_handlers]
-_view_cleanup = {couch_httpd_db, handle_view_cleanup_req}
-_compact = {couch_httpd_db, handle_compact_req}
-_design = {couch_httpd_db, handle_design_req}
-_temp_view = {couch_httpd_view, handle_temp_view_req}
-_changes = {couch_httpd_db, handle_changes_req}
-
-; The external module takes an optional argument allowing you to narrow it to a
-; single script. Otherwise the script name is inferred from the first path section
-; after _external's own path.
-; _mypath = {couch_httpd_external, handle_external_req, <<"mykey">>}
-; _external = {couch_httpd_external, handle_external_req}
-
-[httpd_design_handlers]
-_view = {couch_httpd_view, handle_view_req}
-_show = {couch_httpd_show, handle_doc_show_req}
-_list = {couch_httpd_show, handle_view_list_req}
-_info = {couch_httpd_db, handle_design_info_req}
-_rewrite = {couch_httpd_rewrite, handle_rewrite_req}
-_update = {couch_httpd_show, handle_doc_update_req}
-
-; enable external as an httpd handler, then link it with commands here.
-; note, this api is still under consideration.
-; [external]
-; mykey = /path/to/mycommand
-
-; Here you can setup commands for CouchDB to manage
-; while it is alive. It will attempt to keep each command
-; alive if it exits.
-; [os_daemons]
-; some_daemon_name = /path/to/script -with args
-
-
-[uuids]
-; Known algorithms:
-; random - 128 bits of random awesome
-; All awesome, all the time.
-; sequential - monotonically increasing ids with random increments
-; First 26 hex characters are random. Last 6 increment in
-; random amounts until an overflow occurs. On overflow, the
-; random prefix is regenerated and the process starts over.
-; utc_random - Time since Jan 1, 1970 UTC with microseconds
-; First 14 characters are the time in hex. Last 18 are random.
-algorithm = sequential
-
-[stats]
-; rate is in milliseconds
-rate = 1000
-; sample intervals are in seconds
-samples = [0, 60, 300, 900]
-
-[attachments]
-compression_level = 8 ; from 1 (lowest, fastest) to 9 (highest, slowest), 0 to disable compression
-compressible_types = text/*, application/javascript, application/json, application/xml
-
-[replicator]
-db = _replicator
-; Maximum replicaton retry count can be a non-negative integer or "infinity".
-max_replication_retry_count = 10
-; More worker processes can give higher network throughput but can also
-; imply more disk and network IO.
-worker_processes = 4
-; With lower batch sizes checkpoints are done more frequently. Lower batch sizes
-; also reduce the total amount of used RAM memory.
-worker_batch_size = 500
-; Maximum number of HTTP connections per replication.
-http_connections = 20
-; HTTP connection timeout per replication.
-; Even for very fast/reliable networks it might need to be increased if a remote
-; database is too busy.
-connection_timeout = 30000
-; If a request fails, the replicator will retry it up to N times.
-retries_per_request = 10
-; Some socket options that might boost performance in some scenarios:
-; {nodelay, boolean()}
-; {sndbuf, integer()}
-; {recbuf, integer()}
-; {priority, integer()}
-; See the `inet` Erlang module's man page for the full list of options.
-socket_options = [{keepalive, true}, {nodelay, false}]
-; Path to a file containing the user's certificate.
-;cert_file = /full/path/to/server_cert.pem
-; Path to file containing user's private PEM encoded key.
-;key_file = /full/path/to/server_key.pem
-; String containing the user's password. Only used if the private keyfile is password protected.
-;password = somepassword
-; Set to true to validate peer certificates.
-verify_ssl_certificates = false
-; File containing a list of peer trusted certificates (in the PEM format).
-;ssl_trusted_certificates_file = /etc/ssl/certs/ca-certificates.crt
-; Maximum peer certificate depth (must be set even if certificate validation is off).
-ssl_certificate_max_depth = 3
-
-[compaction_daemon]
-; The delay, in seconds, between each check for which database and view indexes
-; need to be compacted.
-check_interval = 300
-; If a database or view index file is smaller then this value (in bytes),
-; compaction will not happen. Very small files always have a very high
-; fragmentation therefore it's not worth to compact them.
-min_file_size = 131072
-
-[compactions]
-; List of compaction rules for the compaction daemon.
-
-
-;[admins]
-;testuser = -hashed-f50a252c12615697c5ed24ec5cd56b05d66fe91e,b05471ba260132953930cf9f97f327f5
-; pass for above user is 'testpass' \ No newline at end of file
diff --git a/src/leap/soledad/tests/test_couch.py b/src/leap/soledad/tests/test_couch.py
deleted file mode 100644
index 60a61b71..00000000
--- a/src/leap/soledad/tests/test_couch.py
+++ /dev/null
@@ -1,439 +0,0 @@
-# -*- coding: utf-8 -*-
-# test_couch.py
-# Copyright (C) 2013 LEAP
-#
-# This program is free software: you can redistribute it and/or modify
-# it under the terms of the GNU General Public License as published by
-# the Free Software Foundation, either version 3 of the License, or
-# (at your option) any later version.
-#
-# This program is distributed in the hope that it will be useful,
-# but WITHOUT ANY WARRANTY; without even the implied warranty of
-# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
-# GNU General Public License for more details.
-#
-# You should have received a copy of the GNU General Public License
-# along with this program. If not, see <http://www.gnu.org/licenses/>.
-
-
-"""
-Test ObjectStore and Couch backend bits.
-"""
-
-import re
-import copy
-import shutil
-from base64 import b64decode
-
-from leap.common.files import mkdir_p
-
-from leap.soledad.backends import couch
-from leap.soledad.tests import u1db_tests as tests
-from leap.soledad.tests.u1db_tests import test_backends
-from leap.soledad.tests.u1db_tests import test_sync
-import simplejson as json
-from leap.soledad.backends.leap_backend import (
- LeapDocument,
-)
-
-
-#-----------------------------------------------------------------------------
-# A wrapper for running couchdb locally.
-#-----------------------------------------------------------------------------
-
-import re
-import os
-import tempfile
-import subprocess
-import time
-import unittest
-
-
-# from: https://github.com/smcq/paisley/blob/master/paisley/test/util.py
-# TODO: include license of above project.
-class CouchDBWrapper(object):
- """
- Wrapper for external CouchDB instance which is started and stopped for
- testing.
- """
-
- def start(self):
- """
- Start a CouchDB instance for a test.
- """
- self.tempdir = tempfile.mkdtemp(suffix='.couch.test')
-
- path = os.path.join(os.path.dirname(__file__),
- 'couchdb.ini.template')
- handle = open(path)
- conf = handle.read() % {
- 'tempdir': self.tempdir,
- }
- handle.close()
-
- confPath = os.path.join(self.tempdir, 'test.ini')
- handle = open(confPath, 'w')
- handle.write(conf)
- handle.close()
-
- # create the dirs from the template
- mkdir_p(os.path.join(self.tempdir, 'lib'))
- mkdir_p(os.path.join(self.tempdir, 'log'))
- args = ['couchdb', '-n', '-a', confPath]
- #null = open('/dev/null', 'w')
- self.process = subprocess.Popen(
- args, env=None, stdout=subprocess.PIPE, stderr=subprocess.PIPE,
- close_fds=True)
- # find port
- logPath = os.path.join(self.tempdir, 'log', 'couch.log')
- while not os.path.exists(logPath):
- if self.process.poll() is not None:
- raise Exception("""
-couchdb exited with code %d.
-stdout:
-%s
-stderr:
-%s""" % (
- self.process.returncode, self.process.stdout.read(),
- self.process.stderr.read()))
- time.sleep(0.01)
- while os.stat(logPath).st_size == 0:
- time.sleep(0.01)
- PORT_RE = re.compile(
- 'Apache CouchDB has started on http://127.0.0.1:(?P<port>\d+)')
-
- handle = open(logPath)
- line = handle.read()
- handle.close()
- m = PORT_RE.search(line)
- if not m:
- self.stop()
- raise Exception("Cannot find port in line %s" % line)
- self.port = int(m.group('port'))
-
- def stop(self):
- """
- Terminate the CouchDB instance.
- """
- self.process.terminate()
- self.process.communicate()
- shutil.rmtree(self.tempdir)
-
-
-class CouchDBTestCase(unittest.TestCase):
- """
- TestCase base class for tests against a real CouchDB server.
- """
-
- def setUp(self):
- """
- Make sure we have a CouchDB instance for a test.
- """
- self.wrapper = CouchDBWrapper()
- self.wrapper.start()
- #self.db = self.wrapper.db
- unittest.TestCase.setUp(self)
-
- def tearDown(self):
- """
- Stop CouchDB instance for test.
- """
- self.wrapper.stop()
- unittest.TestCase.tearDown(self)
-
-
-#-----------------------------------------------------------------------------
-# The following tests come from `u1db.tests.test_common_backend`.
-#-----------------------------------------------------------------------------
-
-class TestCouchBackendImpl(CouchDBTestCase):
-
- def test__allocate_doc_id(self):
- db = couch.CouchDatabase('http://localhost:' + str(self.wrapper.port),
- 'u1db_tests')
- doc_id1 = db._allocate_doc_id()
- self.assertTrue(doc_id1.startswith('D-'))
- self.assertEqual(34, len(doc_id1))
- int(doc_id1[len('D-'):], 16)
- self.assertNotEqual(doc_id1, db._allocate_doc_id())
-
-
-#-----------------------------------------------------------------------------
-# The following tests come from `u1db.tests.test_backends`.
-#-----------------------------------------------------------------------------
-
-def make_couch_database_for_test(test, replica_uid):
- port = str(test.wrapper.port)
- return couch.CouchDatabase('http://localhost:' + port, replica_uid,
- replica_uid=replica_uid or 'test')
-
-
-def copy_couch_database_for_test(test, db):
- port = str(test.wrapper.port)
- new_db = couch.CouchDatabase('http://localhost:' + port,
- db._replica_uid + '_copy',
- replica_uid=db._replica_uid or 'test')
- gen, docs = db.get_all_docs(include_deleted=True)
- for doc in docs:
- new_db._put_doc(doc)
- new_db._transaction_log = copy.deepcopy(db._transaction_log)
- new_db._conflicts = copy.deepcopy(db._conflicts)
- new_db._other_generations = copy.deepcopy(db._other_generations)
- new_db._indexes = copy.deepcopy(db._indexes)
- new_db._store_u1db_data()
- return new_db
-
-
-def make_document_for_test(test, doc_id, rev, content, has_conflicts=False):
- return LeapDocument(doc_id, rev, content, has_conflicts=has_conflicts)
-
-
-COUCH_SCENARIOS = [
- ('couch', {'make_database_for_test': make_couch_database_for_test,
- 'copy_database_for_test': copy_couch_database_for_test,
- 'make_document_for_test': make_document_for_test, }),
-]
-
-
-class CouchTests(test_backends.AllDatabaseTests, CouchDBTestCase):
-
- scenarios = COUCH_SCENARIOS
-
- def tearDown(self):
- self.db.delete_database()
- test_backends.AllDatabaseTests.tearDown(self)
-
-
-class CouchDatabaseTests(test_backends.LocalDatabaseTests, CouchDBTestCase):
-
- scenarios = COUCH_SCENARIOS
-
- def tearDown(self):
- self.db.delete_database()
- test_backends.LocalDatabaseTests.tearDown(self)
-
-
-class CouchValidateGenNTransIdTests(
- test_backends.LocalDatabaseValidateGenNTransIdTests, CouchDBTestCase):
-
- scenarios = COUCH_SCENARIOS
-
- def tearDown(self):
- self.db.delete_database()
- test_backends.LocalDatabaseValidateGenNTransIdTests.tearDown(self)
-
-
-class CouchValidateSourceGenTests(
- test_backends.LocalDatabaseValidateSourceGenTests, CouchDBTestCase):
-
- scenarios = COUCH_SCENARIOS
-
- def tearDown(self):
- self.db.delete_database()
- test_backends.LocalDatabaseValidateSourceGenTests.tearDown(self)
-
-
-class CouchWithConflictsTests(
- test_backends.LocalDatabaseWithConflictsTests, CouchDBTestCase):
-
- scenarios = COUCH_SCENARIOS
-
- def tearDown(self):
- self.db.delete_database()
- test_backends.LocalDatabaseWithConflictsTests.tearDown(self)
-
-
-# Notice: the CouchDB backend is currently used for storing encrypted data in
-# the server, so indexing makes no sense. Thus, we ignore index testing for
-# now.
-
-class CouchIndexTests(test_backends.DatabaseIndexTests, CouchDBTestCase):
-
- scenarios = COUCH_SCENARIOS
-
- def tearDown(self):
- self.db.delete_database()
- test_backends.DatabaseIndexTests.tearDown(self)
-
-
-#-----------------------------------------------------------------------------
-# The following tests come from `u1db.tests.test_sync`.
-#-----------------------------------------------------------------------------
-
-target_scenarios = [
- ('local', {'create_db_and_target': test_sync._make_local_db_and_target}), ]
-
-
-simple_doc = tests.simple_doc
-nested_doc = tests.nested_doc
-
-
-class CouchDatabaseSyncTargetTests(test_sync.DatabaseSyncTargetTests,
- CouchDBTestCase):
-
- scenarios = (tests.multiply_scenarios(COUCH_SCENARIOS, target_scenarios))
-
- def tearDown(self):
- self.db.delete_database()
- test_sync.DatabaseSyncTargetTests.tearDown(self)
-
- def test_sync_exchange_returns_many_new_docs(self):
- # This test was replicated to allow dictionaries to be compared after
- # JSON expansion (because one dictionary may have many different
- # serialized representations).
- doc = self.db.create_doc_from_json(simple_doc)
- doc2 = self.db.create_doc_from_json(nested_doc)
- self.assertTransactionLog([doc.doc_id, doc2.doc_id], self.db)
- new_gen, _ = self.st.sync_exchange(
- [], 'other-replica', last_known_generation=0,
- last_known_trans_id=None, return_doc_cb=self.receive_doc)
- self.assertTransactionLog([doc.doc_id, doc2.doc_id], self.db)
- self.assertEqual(2, new_gen)
- self.assertEqual(
- [(doc.doc_id, doc.rev, json.loads(simple_doc), 1),
- (doc2.doc_id, doc2.rev, json.loads(nested_doc), 2)],
- [c[:-3] + (json.loads(c[-3]), c[-2]) for c in self.other_changes])
- if self.whitebox:
- self.assertEqual(
- self.db._last_exchange_log['return'],
- {'last_gen': 2, 'docs':
- [(doc.doc_id, doc.rev), (doc2.doc_id, doc2.rev)]})
-
-
-sync_scenarios = []
-for name, scenario in COUCH_SCENARIOS:
- scenario = dict(scenario)
- scenario['do_sync'] = test_sync.sync_via_synchronizer
- sync_scenarios.append((name, scenario))
- scenario = dict(scenario)
-
-
-class CouchDatabaseSyncTests(test_sync.DatabaseSyncTests, CouchDBTestCase):
-
- scenarios = sync_scenarios
-
- def setUp(self):
- self.db = None
- self.db1 = None
- self.db2 = None
- self.db3 = None
- test_sync.DatabaseSyncTests.setUp(self)
- CouchDBTestCase.setUp(self)
-
- def tearDown(self):
- self.db and self.db.delete_database()
- self.db1 and self.db1.delete_database()
- self.db2 and self.db2.delete_database()
- self.db3 and self.db3.delete_database()
- db = self.create_database('test1_copy', 'source')
- db.delete_database()
- db = self.create_database('test2_copy', 'target')
- db.delete_database()
- db = self.create_database('test3', 'target')
- db.delete_database()
- test_sync.DatabaseSyncTests.tearDown(self)
-
-
-#-----------------------------------------------------------------------------
-# The following tests test extra functionality introduced by our backends
-#-----------------------------------------------------------------------------
-
-class CouchDatabaseStorageTests(CouchDBTestCase):
-
- def _listify(self, l):
- if type(l) is dict:
- return {
- self._listify(a): self._listify(b) for a, b in l.iteritems()}
- if hasattr(l, '__iter__'):
- return [self._listify(i) for i in l]
- return l
-
- def _fetch_u1db_data(self, db):
- cdoc = db._database.get(db.U1DB_DATA_DOC_ID)
- jsonstr = db._database.get_attachment(cdoc, 'u1db_json').getvalue()
- return json.loads(jsonstr)
-
- def test_transaction_log_storage_after_put(self):
- db = couch.CouchDatabase('http://localhost:' + str(self.wrapper.port),
- 'u1db_tests')
- db.create_doc({'simple': 'doc'})
- content = self._fetch_u1db_data(db)
- self.assertEqual(
- self._listify(db._transaction_log),
- self._listify(
- json.loads(b64decode(content[db.U1DB_TRANSACTION_LOG_KEY]))))
-
- def test_conflict_log_storage_after_put_if_newer(self):
- db = couch.CouchDatabase('http://localhost:' + str(self.wrapper.port),
- 'u1db_tests')
- doc = db.create_doc({'simple': 'doc'})
- doc.set_json(nested_doc)
- doc.rev = db._replica_uid + ':2'
- db._force_doc_sync_conflict(doc)
- content = self._fetch_u1db_data(db)
- self.assertEqual(
- self._listify(db._conflicts),
- self._listify(
- json.loads(b64decode(content[db.U1DB_CONFLICTS_KEY]))))
-
- def test_other_gens_storage_after_set(self):
- db = couch.CouchDatabase('http://localhost:' + str(self.wrapper.port),
- 'u1db_tests')
- doc = db.create_doc({'simple': 'doc'})
- db._set_replica_gen_and_trans_id('a', 'b', 'c')
- content = self._fetch_u1db_data(db)
- self.assertEqual(
- self._listify(db._other_generations),
- self._listify(
- json.loads(b64decode(content[db.U1DB_OTHER_GENERATIONS_KEY]))))
-
- def test_index_storage_after_create(self):
- db = couch.CouchDatabase('http://localhost:' + str(self.wrapper.port),
- 'u1db_tests')
- doc = db.create_doc({'name': 'john'})
- db.create_index('myindex', 'name')
- content = self._fetch_u1db_data(db)
- myind = db._indexes['myindex']
- index = {
- 'myindex': {
- 'definition': myind._definition,
- 'name': myind._name,
- 'values': myind._values,
- }
- }
- self.assertEqual(
- self._listify(index),
- self._listify(
- json.loads(b64decode(content[db.U1DB_INDEXES_KEY]))))
-
- def test_index_storage_after_delete(self):
- db = couch.CouchDatabase('http://localhost:' + str(self.wrapper.port),
- 'u1db_tests')
- doc = db.create_doc({'name': 'john'})
- db.create_index('myindex', 'name')
- db.create_index('myindex2', 'name')
- db.delete_index('myindex')
- content = self._fetch_u1db_data(db)
- myind = db._indexes['myindex2']
- index = {
- 'myindex2': {
- 'definition': myind._definition,
- 'name': myind._name,
- 'values': myind._values,
- }
- }
- self.assertEqual(
- self._listify(index),
- self._listify(
- json.loads(b64decode(content[db.U1DB_INDEXES_KEY]))))
-
- def test_replica_uid_storage_after_db_creation(self):
- db = couch.CouchDatabase('http://localhost:' + str(self.wrapper.port),
- 'u1db_tests')
- content = self._fetch_u1db_data(db)
- self.assertEqual(
- db._replica_uid,
- b64decode(content[db.U1DB_REPLICA_UID_KEY]))
-
-
-load_tests = tests.load_with_scenarios
diff --git a/src/leap/soledad/tests/test_crypto.py b/src/leap/soledad/tests/test_crypto.py
deleted file mode 100644
index ae84dad3..00000000
--- a/src/leap/soledad/tests/test_crypto.py
+++ /dev/null
@@ -1,286 +0,0 @@
-# -*- coding: utf-8 -*-
-# test_crypto.py
-# Copyright (C) 2013 LEAP
-#
-# This program is free software: you can redistribute it and/or modify
-# it under the terms of the GNU General Public License as published by
-# the Free Software Foundation, either version 3 of the License, or
-# (at your option) any later version.
-#
-# This program is distributed in the hope that it will be useful,
-# but WITHOUT ANY WARRANTY; without even the implied warranty of
-# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
-# GNU General Public License for more details.
-#
-# You should have received a copy of the GNU General Public License
-# along with this program. If not, see <http://www.gnu.org/licenses/>.
-
-
-"""
-Tests for cryptographic related stuff.
-"""
-
-import os
-import shutil
-import tempfile
-import simplejson as json
-import hashlib
-
-
-from leap.soledad.backends.leap_backend import (
- LeapDocument,
- encrypt_doc,
- decrypt_doc,
- EncryptionSchemes,
- LeapSyncTarget,
- ENC_JSON_KEY,
- ENC_SCHEME_KEY,
- MAC_METHOD_KEY,
- MAC_KEY,
- UnknownMacMethod,
- WrongMac,
-)
-from leap.soledad.backends.couch import CouchDatabase
-from leap.soledad import Soledad
-from leap.soledad.crypto import SoledadCrypto
-from leap.soledad.tests import BaseSoledadTest
-from leap.soledad.tests.test_couch import CouchDBTestCase
-from leap.soledad.tests import (
- KEY_FINGERPRINT,
- PRIVATE_KEY,
-)
-from leap.soledad.tests.u1db_tests import (
- simple_doc,
- nested_doc,
- TestCaseWithServer,
-)
-from leap.soledad.tests.test_leap_backend import make_leap_document_for_test
-from leap.soledad.backends.couch import CouchServerState
-
-
-class EncryptedSyncTestCase(BaseSoledadTest):
- """
- Tests that guarantee that data will always be encrypted when syncing.
- """
-
- def test_encrypt_decrypt_json(self):
- """
- Test encrypting and decrypting documents.
- """
- simpledoc = {'key': 'val'}
- doc1 = LeapDocument(doc_id='id')
- doc1.content = simpledoc
- # encrypt doc
- doc1.set_json(encrypt_doc(self._soledad._crypto, doc1))
- # assert content is different and includes keys
- self.assertNotEqual(
- simpledoc, doc1.content,
- 'incorrect document encryption')
- self.assertTrue(ENC_JSON_KEY in doc1.content)
- self.assertTrue(ENC_SCHEME_KEY in doc1.content)
- # decrypt doc
- doc1.set_json(decrypt_doc(self._soledad._crypto, doc1))
- self.assertEqual(
- simpledoc, doc1.content, 'incorrect document encryption')
-
-
-#from leap.soledad.server import SoledadApp, SoledadAuthMiddleware
-#
-#
-#def make_token_leap_app(test, state):
-# app = SoledadApp(state)
-# application = SoledadAuthMiddleware(app, prefix='/soledad/')
-# return application
-#
-#
-#def leap_sync_target(test, path):
-# return LeapSyncTarget(test.getURL(path))
-#
-#
-#def token_leap_sync_target(test, path):
-# st = leap_sync_target(test, 'soledad/' + path)
-# st.set_token_credentials('any_user', 'any_token')
-# return st
-#
-#
-#class EncryptedCouchSyncTest(CouchDBTestCase, TestCaseWithServer):
-#
-# make_app_with_state = make_token_leap_app
-#
-# make_document_for_test = make_leap_document_for_test
-#
-# sync_target = token_leap_sync_target
-#
-# def make_app(self):
-# # potential hook point
-# self.request_state = CouchServerState(self._couch_url)
-# return self.make_app_with_state(self.request_state)
-#
-# def _soledad_instance(self, user='leap@leap.se', prefix='',
-# bootstrap=False, gnupg_home='/gnupg',
-# secrets_path='/secret.gpg',
-# local_db_path='/soledad.u1db'):
-# return Soledad(
-# user,
-# '123',
-# gnupg_home=self.tempdir+prefix+gnupg_home,
-# secrets_path=self.tempdir+prefix+secrets_path,
-# local_db_path=self.tempdir+prefix+local_db_path,
-# bootstrap=bootstrap)
-#
-# def setUp(self):
-# CouchDBTestCase.setUp(self)
-# TestCaseWithServer.setUp(self)
-# self.tempdir = tempfile.mkdtemp(suffix='.couch.test')
-# # initialize soledad by hand so we can control keys
-# self._soledad = self._soledad_instance('leap@leap.se')
-# self._soledad._init_dirs()
-# self._soledad._crypto = SoledadCrypto(self._soledad)
-# if not self._soledad._has_get_storage_secret()():
-# self._soledad._gen_get_storage_secret()()
-# self._soledad._load_get_storage_secret()()
-# self._soledad._init_db()
-#
-# def tearDown(self):
-# shutil.rmtree(self.tempdir)
-#
-# def test_encrypted_sym_sync(self):
-# # get direct access to couchdb
-# import ipdb; ipdb.set_trace()
-# self._couch_url = 'http://localhost:' + str(self.wrapper.port)
-# db = CouchDatabase(self._couch_url, 'testdb')
-# # create and encrypt a doc to insert directly in couchdb
-# doc = LeapDocument('doc-id')
-# doc.set_json(
-# encrypt_doc(
-# self._soledad._crypto, 'doc-id', json.dumps(simple_doc)))
-# db.put_doc(doc)
-# # setup credentials for access to soledad server
-# creds = {
-# 'token': {
-# 'uuid': 'leap@leap.se',
-# 'token': '1234',
-# }
-# }
-# # sync local soledad db with server
-# self.assertTrue(self._soledad.get_doc('doc-id') is None)
-# self.startServer()
-# # TODO fix sync for test.
-# #self._soledad.sync(self.getURL('soledad/testdb'), creds)
-# # get and check doc
-# doc = self._soledad.get_doc('doc-id')
-# # TODO: fix below.
-# #self.assertTrue(doc is not None)
-# #self.assertTrue(doc.content == simple_doc)
-
-
-class RecoveryDocumentTestCase(BaseSoledadTest):
-
- def test_export_recovery_document_raw(self):
- rd = self._soledad.export_recovery_document()
- secret_id = rd[self._soledad.STORAGE_SECRETS_KEY].items()[0][0]
- secret = rd[self._soledad.STORAGE_SECRETS_KEY][secret_id]
- self.assertEqual(secret_id, self._soledad._secret_id)
- self.assertEqual(secret, self._soledad._secrets[secret_id])
- self.assertTrue(self._soledad.CIPHER_KEY in secret)
- self.assertTrue(secret[self._soledad.CIPHER_KEY] == 'aes256')
- self.assertTrue(self._soledad.LENGTH_KEY in secret)
- self.assertTrue(self._soledad.SECRET_KEY in secret)
-
- def test_import_recovery_document(self):
- rd = self._soledad.export_recovery_document()
- s = self._soledad_instance(user='anotheruser@leap.se')
- s.import_recovery_document(rd)
- s._set_secret_id(self._soledad._secret_id)
- self.assertEqual(self._soledad._uuid,
- s._uuid, 'Failed setting user uuid.')
- self.assertEqual(self._soledad._get_storage_secret(),
- s._get_storage_secret(),
- 'Failed settinng secret for symmetric encryption.')
-
-
-class CryptoMethodsTestCase(BaseSoledadTest):
-
- def test__gen_secret(self):
- # instantiate and save secret_id
- sol = self._soledad_instance(user='user@leap.se')
- self.assertTrue(len(sol._secrets) == 1)
- secret_id_1 = sol.secret_id
- # assert id is hash of secret
- self.assertTrue(
- secret_id_1 == hashlib.sha256(sol.storage_secret).hexdigest())
- # generate new secret
- secret_id_2 = sol._gen_secret()
- self.assertTrue(secret_id_1 != secret_id_2)
- # re-instantiate
- sol = self._soledad_instance(
- user='user@leap.se',
- secret_id=secret_id_1)
- # assert ids are valid
- self.assertTrue(len(sol._secrets) == 2)
- self.assertTrue(secret_id_1 in sol._secrets)
- self.assertTrue(secret_id_2 in sol._secrets)
- # assert format of secret 1
- self.assertTrue(sol.storage_secret is not None)
- self.assertIsInstance(sol.storage_secret, str)
- self.assertTrue(len(sol.storage_secret) == sol.GENERATED_SECRET_LENGTH)
- # assert format of secret 2
- sol._set_secret_id(secret_id_2)
- self.assertTrue(sol.storage_secret is not None)
- self.assertIsInstance(sol.storage_secret, str)
- self.assertTrue(len(sol.storage_secret) == sol.GENERATED_SECRET_LENGTH)
- # assert id is hash of new secret
- self.assertTrue(
- secret_id_2 == hashlib.sha256(sol.storage_secret).hexdigest())
-
- def test__has_secret(self):
- sol = self._soledad_instance(user='user@leap.se')
- self.assertTrue(sol._has_secret(), "Should have a secret at "
- "this point")
- # setting secret id to None should not interfere in the fact we have a
- # secret.
- sol._set_secret_id(None)
- self.assertTrue(sol._has_secret(), "Should have a secret at "
- "this point")
- # but not being able to decrypt correctly should
- sol._secrets[sol.secret_id][sol.SECRET_KEY] = None
- self.assertFalse(sol._has_secret())
-
-
-class MacAuthTestCase(BaseSoledadTest):
-
- def test_decrypt_with_wrong_mac_raises(self):
- """
- Trying to decrypt a document with wrong MAC should raise.
- """
- simpledoc = {'key': 'val'}
- doc = LeapDocument(doc_id='id')
- doc.content = simpledoc
- # encrypt doc
- doc.set_json(encrypt_doc(self._soledad._crypto, doc))
- self.assertTrue(MAC_KEY in doc.content)
- self.assertTrue(MAC_METHOD_KEY in doc.content)
- # mess with MAC
- doc.content[MAC_KEY] = '1234567890ABCDEF'
- # try to decrypt doc
- self.assertRaises(
- WrongMac,
- decrypt_doc, self._soledad._crypto, doc)
-
- def test_decrypt_with_unknown_mac_method_raises(self):
- """
- Trying to decrypt a document with unknown MAC method should raise.
- """
- simpledoc = {'key': 'val'}
- doc = LeapDocument(doc_id='id')
- doc.content = simpledoc
- # encrypt doc
- doc.set_json(encrypt_doc(self._soledad._crypto, doc))
- self.assertTrue(MAC_KEY in doc.content)
- self.assertTrue(MAC_METHOD_KEY in doc.content)
- # mess with MAC method
- doc.content[MAC_METHOD_KEY] = 'mymac'
- # try to decrypt doc
- self.assertRaises(
- UnknownMacMethod,
- decrypt_doc, self._soledad._crypto, doc)
diff --git a/src/leap/soledad/tests/test_leap_backend.py b/src/leap/soledad/tests/test_leap_backend.py
deleted file mode 100644
index 6914e869..00000000
--- a/src/leap/soledad/tests/test_leap_backend.py
+++ /dev/null
@@ -1,782 +0,0 @@
-# -*- coding: utf-8 -*-
-# test_leap_backend.py
-# Copyright (C) 2013 LEAP
-#
-# This program is free software: you can redistribute it and/or modify
-# it under the terms of the GNU General Public License as published by
-# the Free Software Foundation, either version 3 of the License, or
-# (at your option) any later version.
-#
-# This program is distributed in the hope that it will be useful,
-# but WITHOUT ANY WARRANTY; without even the implied warranty of
-# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
-# GNU General Public License for more details.
-#
-# You should have received a copy of the GNU General Public License
-# along with this program. If not, see <http://www.gnu.org/licenses/>.
-
-
-"""
-Test Leap backend bits.
-"""
-
-import u1db
-import os
-import ssl
-import simplejson as json
-import cStringIO
-
-
-from u1db.sync import Synchronizer
-from u1db.remote import (
- http_client,
- http_database,
- http_target,
-)
-from routes.mapper import Mapper
-
-from leap import soledad
-from leap.soledad.backends import leap_backend
-from leap.soledad.server import (
- SoledadApp,
- SoledadAuthMiddleware,
-)
-from leap.soledad import auth
-
-
-from leap.soledad.tests import u1db_tests as tests
-from leap.soledad.tests import BaseSoledadTest
-from leap.soledad.tests.u1db_tests import test_backends
-from leap.soledad.tests.u1db_tests import test_http_database
-from leap.soledad.tests.u1db_tests import test_http_client
-from leap.soledad.tests.u1db_tests import test_document
-from leap.soledad.tests.u1db_tests import test_remote_sync_target
-from leap.soledad.tests.u1db_tests import test_https
-from leap.soledad.tests.u1db_tests import test_sync
-
-
-#-----------------------------------------------------------------------------
-# The following tests come from `u1db.tests.test_backends`.
-#-----------------------------------------------------------------------------
-
-def make_leap_document_for_test(test, doc_id, rev, content,
- has_conflicts=False):
- return leap_backend.LeapDocument(
- doc_id, rev, content, has_conflicts=has_conflicts)
-
-
-def make_soledad_app(state):
- return SoledadApp(state)
-
-
-def make_token_soledad_app(state):
- app = SoledadApp(state)
-
- def verify_token(environ, uuid, token):
- if uuid == 'user-uuid' and token == 'auth-token':
- return True
- return False
-
- # we test for action authorization in leap.soledad.tests.test_server
- def verify_action(environ, uuid):
- return True
-
- application = SoledadAuthMiddleware(app)
- application.verify_token = verify_token
- application.verify_action = verify_action
- return application
-
-
-LEAP_SCENARIOS = [
- ('http', {
- 'make_database_for_test': test_backends.make_http_database_for_test,
- 'copy_database_for_test': test_backends.copy_http_database_for_test,
- 'make_document_for_test': make_leap_document_for_test,
- 'make_app_with_state': make_soledad_app}),
-]
-
-
-def make_token_http_database_for_test(test, replica_uid):
- test.startServer()
- test.request_state._create_database(replica_uid)
-
- class _HTTPDatabaseWithToken(
- http_database.HTTPDatabase, auth.TokenBasedAuth):
-
- def set_token_credentials(self, uuid, token):
- auth.TokenBasedAuth.set_token_credentials(self, uuid, token)
-
- def _sign_request(self, method, url_query, params):
- return auth.TokenBasedAuth._sign_request(
- self, method, url_query, params)
-
- http_db = _HTTPDatabaseWithToken(test.getURL('test'))
- http_db.set_token_credentials('user-uuid', 'auth-token')
- return http_db
-
-
-def copy_token_http_database_for_test(test, db):
- # DO NOT COPY OR REUSE THIS CODE OUTSIDE TESTS: COPYING U1DB DATABASES IS
- # THE WRONG THING TO DO, THE ONLY REASON WE DO SO HERE IS TO TEST THAT WE
- # CORRECTLY DETECT IT HAPPENING SO THAT WE CAN RAISE ERRORS RATHER THAN
- # CORRUPT USER DATA. USE SYNC INSTEAD, OR WE WILL SEND NINJA TO YOUR
- # HOUSE.
- http_db = test.request_state._copy_database(db)
- http_db.set_token_credentials(http_db, 'user-uuid', 'auth-token')
- return http_db
-
-
-class LeapTests(test_backends.AllDatabaseTests, BaseSoledadTest):
-
- scenarios = LEAP_SCENARIOS + [
- ('token_http', {'make_database_for_test':
- make_token_http_database_for_test,
- 'copy_database_for_test':
- copy_token_http_database_for_test,
- 'make_document_for_test': make_leap_document_for_test,
- 'make_app_with_state': make_token_soledad_app,
- })
- ]
-
-
-#-----------------------------------------------------------------------------
-# The following tests come from `u1db.tests.test_http_client`.
-#-----------------------------------------------------------------------------
-
-class TestLeapClientBase(test_http_client.TestHTTPClientBase):
- """
- This class should be used to test Token auth.
- """
-
- def getClientWithToken(self, **kwds):
- self.startServer()
-
- class _HTTPClientWithToken(
- http_client.HTTPClientBase, auth.TokenBasedAuth):
-
- def set_token_credentials(self, uuid, token):
- auth.TokenBasedAuth.set_token_credentials(self, uuid, token)
-
- def _sign_request(self, method, url_query, params):
- return auth.TokenBasedAuth._sign_request(
- self, method, url_query, params)
-
- return _HTTPClientWithToken(self.getURL('dbase'), **kwds)
-
- def test_oauth(self):
- """
- Suppress oauth test (we test for token auth here).
- """
- pass
-
- def test_oauth_ctr_creds(self):
- """
- Suppress oauth test (we test for token auth here).
- """
- pass
-
- def test_oauth_Unauthorized(self):
- """
- Suppress oauth test (we test for token auth here).
- """
- pass
-
- def app(self, environ, start_response):
- res = test_http_client.TestHTTPClientBase.app(
- self, environ, start_response)
- if res is not None:
- return res
- # mime solead application here.
- if '/token' in environ['PATH_INFO']:
- auth = environ.get(SoledadAuthMiddleware.HTTP_AUTH_KEY)
- if not auth:
- start_response("401 Unauthorized",
- [('Content-Type', 'application/json')])
- return [json.dumps({"error": "unauthorized",
- "message": e.message})]
- scheme, encoded = auth.split(None, 1)
- if scheme.lower() != 'token':
- start_response("401 Unauthorized",
- [('Content-Type', 'application/json')])
- return [json.dumps({"error": "unauthorized",
- "message": e.message})]
- uuid, token = encoded.decode('base64').split(':', 1)
- if uuid != 'user-uuid' and token != 'auth-token':
- return unauth_err("Incorrect address or token.")
- start_response("200 OK", [('Content-Type', 'application/json')])
- return [json.dumps([environ['PATH_INFO'], uuid, token])]
-
- def test_token(self):
- """
- Test if token is sent correctly.
- """
- cli = self.getClientWithToken()
- cli.set_token_credentials('user-uuid', 'auth-token')
- res, headers = cli._request('GET', ['doc', 'token'])
- self.assertEqual(
- ['/dbase/doc/token', 'user-uuid', 'auth-token'], json.loads(res))
-
- def test_token_ctr_creds(self):
- cli = self.getClientWithToken(creds={'token': {
- 'uuid': 'user-uuid',
- 'token': 'auth-token',
- }})
- res, headers = cli._request('GET', ['doc', 'token'])
- self.assertEqual(
- ['/dbase/doc/token', 'user-uuid', 'auth-token'], json.loads(res))
-
-
-#-----------------------------------------------------------------------------
-# The following tests come from `u1db.tests.test_document`.
-#-----------------------------------------------------------------------------
-
-class TestLeapDocument(test_document.TestDocument, BaseSoledadTest):
-
- scenarios = ([(
- 'leap', {'make_document_for_test': make_leap_document_for_test})])
-
-
-class TestLeapPyDocument(test_document.TestPyDocument, BaseSoledadTest):
-
- scenarios = ([(
- 'leap', {'make_document_for_test': make_leap_document_for_test})])
-
-
-#-----------------------------------------------------------------------------
-# The following tests come from `u1db.tests.test_remote_sync_target`.
-#-----------------------------------------------------------------------------
-
-class TestLeapSyncTargetBasics(
- test_remote_sync_target.TestHTTPSyncTargetBasics):
- """
- Some tests had to be copied to this class so we can instantiate our own
- target.
- """
-
- def test_parse_url(self):
- remote_target = leap_backend.LeapSyncTarget('http://127.0.0.1:12345/')
- self.assertEqual('http', remote_target._url.scheme)
- self.assertEqual('127.0.0.1', remote_target._url.hostname)
- self.assertEqual(12345, remote_target._url.port)
- self.assertEqual('/', remote_target._url.path)
-
-
-class TestLeapParsingSyncStream(
- test_remote_sync_target.TestParsingSyncStream,
- BaseSoledadTest):
- """
- Some tests had to be copied to this class so we can instantiate our own
- target.
- """
-
- def setUp(self):
- test_remote_sync_target.TestParsingSyncStream.setUp(self)
- BaseSoledadTest.setUp(self)
-
- def tearDown(self):
- test_remote_sync_target.TestParsingSyncStream.tearDown(self)
- BaseSoledadTest.tearDown(self)
-
- def test_extra_comma(self):
- """
- Test adapted to use encrypted content.
- """
- doc = leap_backend.LeapDocument('i', rev='r')
- doc.content = {}
- enc_json = leap_backend.encrypt_doc(self._soledad._crypto, doc)
- tgt = leap_backend.LeapSyncTarget(
- "http://foo/foo", crypto=self._soledad._crypto)
-
- self.assertRaises(u1db.errors.BrokenSyncStream,
- tgt._parse_sync_stream, "[\r\n{},\r\n]", None)
- self.assertRaises(u1db.errors.BrokenSyncStream,
- tgt._parse_sync_stream,
- '[\r\n{},\r\n{"id": "i", "rev": "r", '
- '"content": %s, "gen": 3, "trans_id": "T-sid"}'
- ',\r\n]' % json.dumps(enc_json),
- lambda doc, gen, trans_id: None)
-
- def test_wrong_start(self):
- tgt = leap_backend.LeapSyncTarget("http://foo/foo")
-
- self.assertRaises(u1db.errors.BrokenSyncStream,
- tgt._parse_sync_stream, "{}\r\n]", None)
-
- self.assertRaises(u1db.errors.BrokenSyncStream,
- tgt._parse_sync_stream, "\r\n{}\r\n]", None)
-
- self.assertRaises(u1db.errors.BrokenSyncStream,
- tgt._parse_sync_stream, "", None)
-
- def test_wrong_end(self):
- tgt = leap_backend.LeapSyncTarget("http://foo/foo")
-
- self.assertRaises(u1db.errors.BrokenSyncStream,
- tgt._parse_sync_stream, "[\r\n{}", None)
-
- self.assertRaises(u1db.errors.BrokenSyncStream,
- tgt._parse_sync_stream, "[\r\n", None)
-
- def test_missing_comma(self):
- tgt = leap_backend.LeapSyncTarget("http://foo/foo")
-
- self.assertRaises(u1db.errors.BrokenSyncStream,
- tgt._parse_sync_stream,
- '[\r\n{}\r\n{"id": "i", "rev": "r", '
- '"content": "c", "gen": 3}\r\n]', None)
-
- def test_no_entries(self):
- tgt = leap_backend.LeapSyncTarget("http://foo/foo")
-
- self.assertRaises(u1db.errors.BrokenSyncStream,
- tgt._parse_sync_stream, "[\r\n]", None)
-
- def test_error_in_stream(self):
- tgt = leap_backend.LeapSyncTarget("http://foo/foo")
-
- self.assertRaises(u1db.errors.Unavailable,
- tgt._parse_sync_stream,
- '[\r\n{"new_generation": 0},'
- '\r\n{"error": "unavailable"}\r\n', None)
-
- self.assertRaises(u1db.errors.Unavailable,
- tgt._parse_sync_stream,
- '[\r\n{"error": "unavailable"}\r\n', None)
-
- self.assertRaises(u1db.errors.BrokenSyncStream,
- tgt._parse_sync_stream,
- '[\r\n{"error": "?"}\r\n', None)
-
-
-#
-# functions for TestRemoteSyncTargets
-#
-
-def leap_sync_target(test, path):
- return leap_backend.LeapSyncTarget(
- test.getURL(path), crypto=test._soledad._crypto)
-
-
-def token_leap_sync_target(test, path):
- st = leap_sync_target(test, path)
- st.set_token_credentials('user-uuid', 'auth-token')
- return st
-
-
-class TestLeapSyncTarget(
- test_remote_sync_target.TestRemoteSyncTargets, BaseSoledadTest):
-
- scenarios = [
- ('token_soledad',
- {'make_app_with_state': make_token_soledad_app,
- 'make_document_for_test': make_leap_document_for_test,
- 'sync_target': token_leap_sync_target}),
- ]
-
- def test_sync_exchange_send(self):
- """
- Test for sync exchanging send of document.
-
- This test was adapted to decrypt remote content before assert.
- """
- self.startServer()
- db = self.request_state._create_database('test')
- remote_target = self.getSyncTarget('test')
- other_docs = []
-
- def receive_doc(doc):
- other_docs.append((doc.doc_id, doc.rev, doc.get_json()))
-
- doc = self.make_document('doc-here', 'replica:1', '{"value": "here"}')
- new_gen, trans_id = remote_target.sync_exchange(
- [(doc, 10, 'T-sid')], 'replica', last_known_generation=0,
- last_known_trans_id=None, return_doc_cb=receive_doc)
- self.assertEqual(1, new_gen)
- self.assertGetEncryptedDoc(
- db, 'doc-here', 'replica:1', '{"value": "here"}', False)
-
- def test_sync_exchange_send_failure_and_retry_scenario(self):
- """
- Test for sync exchange failure and retry.
-
- This test was adapted to decrypt remote content before assert.
- """
-
- self.startServer()
-
- def blackhole_getstderr(inst):
- return cStringIO.StringIO()
-
- self.patch(self.server.RequestHandlerClass, 'get_stderr',
- blackhole_getstderr)
- db = self.request_state._create_database('test')
- _put_doc_if_newer = db._put_doc_if_newer
- trigger_ids = ['doc-here2']
-
- def bomb_put_doc_if_newer(doc, save_conflict,
- replica_uid=None, replica_gen=None,
- replica_trans_id=None):
- if doc.doc_id in trigger_ids:
- raise Exception
- return _put_doc_if_newer(doc, save_conflict=save_conflict,
- replica_uid=replica_uid,
- replica_gen=replica_gen,
- replica_trans_id=replica_trans_id)
- self.patch(db, '_put_doc_if_newer', bomb_put_doc_if_newer)
- remote_target = self.getSyncTarget('test')
- other_changes = []
-
- def receive_doc(doc, gen, trans_id):
- other_changes.append(
- (doc.doc_id, doc.rev, doc.get_json(), gen, trans_id))
-
- doc1 = self.make_document('doc-here', 'replica:1', '{"value": "here"}')
- doc2 = self.make_document('doc-here2', 'replica:1',
- '{"value": "here2"}')
- self.assertRaises(
- u1db.errors.HTTPError,
- remote_target.sync_exchange,
- [(doc1, 10, 'T-sid'), (doc2, 11, 'T-sud')],
- 'replica', last_known_generation=0, last_known_trans_id=None,
- return_doc_cb=receive_doc)
- self.assertGetEncryptedDoc(
- db, 'doc-here', 'replica:1', '{"value": "here"}',
- False)
- self.assertEqual(
- (10, 'T-sid'), db._get_replica_gen_and_trans_id('replica'))
- self.assertEqual([], other_changes)
- # retry
- trigger_ids = []
- new_gen, trans_id = remote_target.sync_exchange(
- [(doc2, 11, 'T-sud')], 'replica', last_known_generation=0,
- last_known_trans_id=None, return_doc_cb=receive_doc)
- self.assertGetEncryptedDoc(
- db, 'doc-here2', 'replica:1', '{"value": "here2"}',
- False)
- self.assertEqual(
- (11, 'T-sud'), db._get_replica_gen_and_trans_id('replica'))
- self.assertEqual(2, new_gen)
- # bounced back to us
- self.assertEqual(
- ('doc-here', 'replica:1', '{"value": "here"}', 1),
- other_changes[0][:-1])
-
- def test_sync_exchange_send_ensure_callback(self):
- """
- Test for sync exchange failure and retry.
-
- This test was adapted to decrypt remote content before assert.
- """
- self.startServer()
- remote_target = self.getSyncTarget('test')
- other_docs = []
- replica_uid_box = []
-
- def receive_doc(doc):
- other_docs.append((doc.doc_id, doc.rev, doc.get_json()))
-
- def ensure_cb(replica_uid):
- replica_uid_box.append(replica_uid)
-
- doc = self.make_document('doc-here', 'replica:1', '{"value": "here"}')
- new_gen, trans_id = remote_target.sync_exchange(
- [(doc, 10, 'T-sid')], 'replica', last_known_generation=0,
- last_known_trans_id=None, return_doc_cb=receive_doc,
- ensure_callback=ensure_cb)
- self.assertEqual(1, new_gen)
- db = self.request_state.open_database('test')
- self.assertEqual(1, len(replica_uid_box))
- self.assertEqual(db._replica_uid, replica_uid_box[0])
- self.assertGetEncryptedDoc(
- db, 'doc-here', 'replica:1', '{"value": "here"}', False)
-
-
-#-----------------------------------------------------------------------------
-# The following tests come from `u1db.tests.test_https`.
-#-----------------------------------------------------------------------------
-
-def token_leap_https_sync_target(test, host, path):
- _, port = test.server.server_address
- st = leap_backend.LeapSyncTarget(
- 'https://%s:%d/%s' % (host, port, path),
- crypto=test._soledad._crypto)
- st.set_token_credentials('user-uuid', 'auth-token')
- return st
-
-
-class TestLeapSyncTargetHttpsSupport(test_https.TestHttpSyncTargetHttpsSupport,
- BaseSoledadTest):
-
- scenarios = [
- ('token_soledad_https',
- {'server_def': test_https.https_server_def,
- 'make_app_with_state': make_token_soledad_app,
- 'make_document_for_test': make_leap_document_for_test,
- 'sync_target': token_leap_https_sync_target}),
- ]
-
- def setUp(self):
- # the parent constructor undoes our SSL monkey patch to ensure tests
- # run smoothly with standard u1db.
- test_https.TestHttpSyncTargetHttpsSupport.setUp(self)
- # so here monkey patch again to test our functionality.
- http_client._VerifiedHTTPSConnection = soledad.VerifiedHTTPSConnection
- soledad.SOLEDAD_CERT = http_client.CA_CERTS
-
- def test_working(self):
- """
- Test that SSL connections work well.
-
- This test was adapted to patch Soledad's HTTPS connection custom class
- with the intended CA certificates.
- """
- self.startServer()
- db = self.request_state._create_database('test')
- self.patch(soledad, 'SOLEDAD_CERT', self.cacert_pem)
- remote_target = self.getSyncTarget('localhost', 'test')
- remote_target.record_sync_info('other-id', 2, 'T-id')
- self.assertEqual(
- (2, 'T-id'), db._get_replica_gen_and_trans_id('other-id'))
-
- def test_host_mismatch(self):
- """
- Test that SSL connections to a hostname different than the one in the
- certificate raise CertificateError.
-
- This test was adapted to patch Soledad's HTTPS connection custom class
- with the intended CA certificates.
- """
- self.startServer()
- self.request_state._create_database('test')
- self.patch(soledad, 'SOLEDAD_CERT', self.cacert_pem)
- remote_target = self.getSyncTarget('127.0.0.1', 'test')
- self.assertRaises(
- http_client.CertificateError, remote_target.record_sync_info,
- 'other-id', 2, 'T-id')
-
-
-#-----------------------------------------------------------------------------
-# The following tests come from `u1db.tests.test_http_database`.
-#-----------------------------------------------------------------------------
-
-class _HTTPDatabase(http_database.HTTPDatabase, auth.TokenBasedAuth):
- """
- Wraps our token auth implementation.
- """
-
- def set_token_credentials(self, uuid, token):
- auth.TokenBasedAuth.set_token_credentials(self, uuid, token)
-
- def _sign_request(self, method, url_query, params):
- return auth.TokenBasedAuth._sign_request(
- self, method, url_query, params)
-
-
-class TestHTTPDatabaseWithCreds(
- test_http_database.TestHTTPDatabaseCtrWithCreds):
-
- def test_get_sync_target_inherits_token_credentials(self):
- # this test was from TestDatabaseSimpleOperations but we put it here
- # for convenience.
- self.db = _HTTPDatabase('dbase')
- self.db.set_token_credentials('user-uuid', 'auth-token')
- st = self.db.get_sync_target()
- self.assertEqual(self.db._creds, st._creds)
-
- def test_ctr_with_creds(self):
- db1 = _HTTPDatabase('http://dbs/db', creds={'token': {
- 'uuid': 'user-uuid',
- 'token': 'auth-token',
- }})
- self.assertIn('token', db1._creds)
-
-
-#-----------------------------------------------------------------------------
-# The following tests come from `u1db.tests.test_sync`.
-#-----------------------------------------------------------------------------
-
-def _make_local_db_and_leap_target(test, path='test'):
- test.startServer()
- db = test.request_state._create_database(os.path.basename(path))
- st = leap_backend.LeapSyncTarget.connect(
- test.getURL(path), crypto=test._soledad._crypto)
- return db, st
-
-
-def _make_local_db_and_token_leap_target(test):
- db, st = _make_local_db_and_leap_target(test, 'test')
- st.set_token_credentials('user-uuid', 'auth-token')
- return db, st
-
-
-target_scenarios = [
- ('token_leap', {'create_db_and_target':
- _make_local_db_and_token_leap_target,
- 'make_app_with_state': make_token_soledad_app}),
-]
-
-
-class LeapDatabaseSyncTargetTests(
- test_sync.DatabaseSyncTargetTests, BaseSoledadTest):
-
- scenarios = (
- tests.multiply_scenarios(
- tests.DatabaseBaseTests.scenarios,
- target_scenarios))
-
- def test_sync_exchange(self):
- """
- Test sync exchange.
-
- This test was adapted to decrypt remote content before assert.
- """
- sol = _make_local_db_and_leap_target(self)
- docs_by_gen = [
- (self.make_document('doc-id', 'replica:1', tests.simple_doc), 10,
- 'T-sid')]
- new_gen, trans_id = self.st.sync_exchange(
- docs_by_gen, 'replica', last_known_generation=0,
- last_known_trans_id=None, return_doc_cb=self.receive_doc)
- self.assertGetEncryptedDoc(
- self.db, 'doc-id', 'replica:1', tests.simple_doc, False)
- self.assertTransactionLog(['doc-id'], self.db)
- last_trans_id = self.getLastTransId(self.db)
- self.assertEqual(([], 1, last_trans_id),
- (self.other_changes, new_gen, last_trans_id))
- self.assertEqual(10, self.st.get_sync_info('replica')[3])
-
- def test_sync_exchange_push_many(self):
- """
- Test sync exchange.
-
- This test was adapted to decrypt remote content before assert.
- """
- docs_by_gen = [
- (self.make_document(
- 'doc-id', 'replica:1', tests.simple_doc), 10, 'T-1'),
- (self.make_document(
- 'doc-id2', 'replica:1', tests.nested_doc), 11, 'T-2')]
- new_gen, trans_id = self.st.sync_exchange(
- docs_by_gen, 'replica', last_known_generation=0,
- last_known_trans_id=None, return_doc_cb=self.receive_doc)
- self.assertGetEncryptedDoc(
- self.db, 'doc-id', 'replica:1', tests.simple_doc, False)
- self.assertGetEncryptedDoc(
- self.db, 'doc-id2', 'replica:1', tests.nested_doc, False)
- self.assertTransactionLog(['doc-id', 'doc-id2'], self.db)
- last_trans_id = self.getLastTransId(self.db)
- self.assertEqual(([], 2, last_trans_id),
- (self.other_changes, new_gen, trans_id))
- self.assertEqual(11, self.st.get_sync_info('replica')[3])
-
- def test_sync_exchange_returns_many_new_docs(self):
- """
- Test sync exchange.
-
- This test was adapted to avoid JSON serialization comparison as local
- and remote representations might differ. It looks directly at the
- doc's contents instead.
- """
- doc = self.db.create_doc_from_json(tests.simple_doc)
- doc2 = self.db.create_doc_from_json(tests.nested_doc)
- self.assertTransactionLog([doc.doc_id, doc2.doc_id], self.db)
- new_gen, _ = self.st.sync_exchange(
- [], 'other-replica', last_known_generation=0,
- last_known_trans_id=None, return_doc_cb=self.receive_doc)
- self.assertTransactionLog([doc.doc_id, doc2.doc_id], self.db)
- self.assertEqual(2, new_gen)
- self.assertEqual(
- [(doc.doc_id, doc.rev, 1),
- (doc2.doc_id, doc2.rev, 2)],
- [c[:-3] + c[-2:-1] for c in self.other_changes])
- self.assertEqual(
- json.loads(tests.simple_doc),
- json.loads(self.other_changes[0][2]))
- self.assertEqual(
- json.loads(tests.nested_doc),
- json.loads(self.other_changes[1][2]))
- if self.whitebox:
- self.assertEqual(
- self.db._last_exchange_log['return'],
- {'last_gen': 2, 'docs':
- [(doc.doc_id, doc.rev), (doc2.doc_id, doc2.rev)]})
-
-
-class TestLeapDbSync(test_sync.TestDbSync, BaseSoledadTest):
- """Test db.sync remote sync shortcut"""
-
- scenarios = [
- ('py-http', {
- 'make_app_with_state': make_soledad_app,
- 'make_database_for_test': tests.make_memory_database_for_test,
- }),
- ('py-token-http', {
- 'make_app_with_state': make_token_soledad_app,
- 'make_database_for_test': tests.make_memory_database_for_test,
- 'token': True
- }),
- ]
-
- oauth = False
- token = False
-
- def do_sync(self, target_name):
- """
- Perform sync using LeapSyncTarget and Token auth.
- """
- if self.token:
- extra = dict(creds={'token': {
- 'uuid': 'user-uuid',
- 'token': 'auth-token',
- }})
- target_url = self.getURL(target_name)
- return Synchronizer(
- self.db,
- leap_backend.LeapSyncTarget(
- target_url,
- crypto=self._soledad._crypto,
- **extra)).sync(autocreate=True)
- else:
- return test_sync.TestDbSync.do_sync(self, target_name)
-
- def test_db_sync(self):
- """
- Test sync.
-
- Adapted to check for encrypted content.
- """
- doc1 = self.db.create_doc_from_json(tests.simple_doc)
- doc2 = self.db2.create_doc_from_json(tests.nested_doc)
- local_gen_before_sync = self.do_sync('test2.db')
- gen, _, changes = self.db.whats_changed(local_gen_before_sync)
- self.assertEqual(1, len(changes))
- self.assertEqual(doc2.doc_id, changes[0][0])
- self.assertEqual(1, gen - local_gen_before_sync)
- self.assertGetEncryptedDoc(
- self.db2, doc1.doc_id, doc1.rev, tests.simple_doc, False)
- self.assertGetEncryptedDoc(
- self.db, doc2.doc_id, doc2.rev, tests.nested_doc, False)
-
- def test_db_sync_autocreate(self):
- """
- Test sync.
-
- Adapted to check for encrypted content.
- """
- doc1 = self.db.create_doc_from_json(tests.simple_doc)
- local_gen_before_sync = self.do_sync('test3.db')
- gen, _, changes = self.db.whats_changed(local_gen_before_sync)
- self.assertEqual(0, gen - local_gen_before_sync)
- db3 = self.request_state.open_database('test3.db')
- gen, _, changes = db3.whats_changed()
- self.assertEqual(1, len(changes))
- self.assertEqual(doc1.doc_id, changes[0][0])
- self.assertGetEncryptedDoc(
- db3, doc1.doc_id, doc1.rev, tests.simple_doc, False)
- t_gen, _ = self.db._get_replica_gen_and_trans_id('test3.db')
- s_gen, _ = db3._get_replica_gen_and_trans_id('test1')
- self.assertEqual(1, t_gen)
- self.assertEqual(1, s_gen)
-
-
-load_tests = tests.load_with_scenarios
diff --git a/src/leap/soledad/tests/test_server.py b/src/leap/soledad/tests/test_server.py
deleted file mode 100644
index ec3f636b..00000000
--- a/src/leap/soledad/tests/test_server.py
+++ /dev/null
@@ -1,239 +0,0 @@
-# -*- coding: utf-8 -*-
-# test_server.py
-# Copyright (C) 2013 LEAP
-#
-# This program is free software: you can redistribute it and/or modify
-# it under the terms of the GNU General Public License as published by
-# the Free Software Foundation, either version 3 of the License, or
-# (at your option) any later version.
-#
-# This program is distributed in the hope that it will be useful,
-# but WITHOUT ANY WARRANTY; without even the implied warranty of
-# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
-# GNU General Public License for more details.
-#
-# You should have received a copy of the GNU General Public License
-# along with this program. If not, see <http://www.gnu.org/licenses/>.
-
-
-"""
-Tests for server-related functionality.
-"""
-
-import os
-import shutil
-import tempfile
-import simplejson as json
-import hashlib
-
-
-from leap.soledad.server import URLToAuth
-from leap.common.testing.basetest import BaseLeapTest
-
-
-class SoledadServerTestCase(BaseLeapTest):
- """
- Tests that guarantee that data will always be encrypted when syncing.
- """
-
- def setUp(self):
- pass
-
- def tearDown(self):
- pass
-
- def _make_environ(self, path_info, request_method):
- return {
- 'PATH_INFO': path_info,
- 'REQUEST_METHOD': request_method,
- }
-
- def test_verify_action_with_correct_dbnames(self):
- """
- Test encrypting and decrypting documents.
-
- The following table lists the authorized actions among all possible
- u1db remote actions:
-
- URL path | Authorized actions
- --------------------------------------------------
- / | GET
- /shared-db | GET
- /shared-db/docs | -
- /shared-db/doc/{id} | GET, PUT, DELETE
- /shared-db/sync-from/{source} | -
- /user-db | GET, PUT, DELETE
- /user-db/docs | -
- /user-db/doc/{id} | -
- /user-db/sync-from/{source} | GET, PUT, POST
- """
- uuid = 'myuuid'
- authmap = URLToAuth(uuid)
- dbname = authmap._uuid_dbname(uuid)
- # test global auth
- self.assertTrue(
- authmap.is_authorized(self._make_environ('/', 'GET')))
- # test shared-db database resource auth
- self.assertTrue(
- authmap.is_authorized(
- self._make_environ('/shared', 'GET')))
- self.assertFalse(
- authmap.is_authorized(
- self._make_environ('/shared', 'PUT')))
- self.assertFalse(
- authmap.is_authorized(
- self._make_environ('/shared', 'DELETE')))
- self.assertFalse(
- authmap.is_authorized(
- self._make_environ('/shared', 'POST')))
- # test shared-db docs resource auth
- self.assertFalse(
- authmap.is_authorized(
- self._make_environ('/shared/docs', 'GET')))
- self.assertFalse(
- authmap.is_authorized(
- self._make_environ('/shared/docs', 'PUT')))
- self.assertFalse(
- authmap.is_authorized(
- self._make_environ('/shared/docs', 'DELETE')))
- self.assertFalse(
- authmap.is_authorized(
- self._make_environ('/shared/docs', 'POST')))
- # test shared-db doc resource auth
- self.assertTrue(
- authmap.is_authorized(
- self._make_environ('/shared/doc/x', 'GET')))
- self.assertTrue(
- authmap.is_authorized(
- self._make_environ('/shared/doc/x', 'PUT')))
- self.assertTrue(
- authmap.is_authorized(
- self._make_environ('/shared/doc/x', 'DELETE')))
- self.assertFalse(
- authmap.is_authorized(
- self._make_environ('/shared/doc/x', 'POST')))
- # test shared-db sync resource auth
- self.assertFalse(
- authmap.is_authorized(
- self._make_environ('/shared/sync-from/x', 'GET')))
- self.assertFalse(
- authmap.is_authorized(
- self._make_environ('/shared/sync-from/x', 'PUT')))
- self.assertFalse(
- authmap.is_authorized(
- self._make_environ('/shared/sync-from/x', 'DELETE')))
- self.assertFalse(
- authmap.is_authorized(
- self._make_environ('/shared/sync-from/x', 'POST')))
- # test user-db database resource auth
- self.assertTrue(
- authmap.is_authorized(
- self._make_environ('/%s' % dbname, 'GET')))
- self.assertTrue(
- authmap.is_authorized(
- self._make_environ('/%s' % dbname, 'PUT')))
- self.assertTrue(
- authmap.is_authorized(
- self._make_environ('/%s' % dbname, 'DELETE')))
- self.assertFalse(
- authmap.is_authorized(
- self._make_environ('/%s' % dbname, 'POST')))
- # test user-db docs resource auth
- self.assertFalse(
- authmap.is_authorized(
- self._make_environ('/%s/docs' % dbname, 'GET')))
- self.assertFalse(
- authmap.is_authorized(
- self._make_environ('/%s/docs' % dbname, 'PUT')))
- self.assertFalse(
- authmap.is_authorized(
- self._make_environ('/%s/docs' % dbname, 'DELETE')))
- self.assertFalse(
- authmap.is_authorized(
- self._make_environ('/%s/docs' % dbname, 'POST')))
- # test user-db doc resource auth
- self.assertFalse(
- authmap.is_authorized(
- self._make_environ('/%s/doc/x' % dbname, 'GET')))
- self.assertFalse(
- authmap.is_authorized(
- self._make_environ('/%s/doc/x' % dbname, 'PUT')))
- self.assertFalse(
- authmap.is_authorized(
- self._make_environ('/%s/doc/x' % dbname, 'DELETE')))
- self.assertFalse(
- authmap.is_authorized(
- self._make_environ('/%s/doc/x' % dbname, 'POST')))
- # test user-db sync resource auth
- self.assertTrue(
- authmap.is_authorized(
- self._make_environ('/%s/sync-from/x' % dbname, 'GET')))
- self.assertTrue(
- authmap.is_authorized(
- self._make_environ('/%s/sync-from/x' % dbname, 'PUT')))
- self.assertFalse(
- authmap.is_authorized(
- self._make_environ('/%s/sync-from/x' % dbname, 'DELETE')))
- self.assertTrue(
- authmap.is_authorized(
- self._make_environ('/%s/sync-from/x' % dbname, 'POST')))
-
- def test_verify_action_with_wrong_dbnames(self):
- """
- Test if authorization fails for a wrong dbname.
- """
- uuid = 'myuuid'
- authmap = URLToAuth(uuid)
- dbname = 'somedb'
- # test wrong-db database resource auth
- self.assertFalse(
- authmap.is_authorized(
- self._make_environ('/%s' % dbname, 'GET')))
- self.assertFalse(
- authmap.is_authorized(
- self._make_environ('/%s' % dbname, 'PUT')))
- self.assertFalse(
- authmap.is_authorized(
- self._make_environ('/%s' % dbname, 'DELETE')))
- self.assertFalse(
- authmap.is_authorized(
- self._make_environ('/%s' % dbname, 'POST')))
- # test wrong-db docs resource auth
- self.assertFalse(
- authmap.is_authorized(
- self._make_environ('/%s/docs' % dbname, 'GET')))
- self.assertFalse(
- authmap.is_authorized(
- self._make_environ('/%s/docs' % dbname, 'PUT')))
- self.assertFalse(
- authmap.is_authorized(
- self._make_environ('/%s/docs' % dbname, 'DELETE')))
- self.assertFalse(
- authmap.is_authorized(
- self._make_environ('/%s/docs' % dbname, 'POST')))
- # test wrong-db doc resource auth
- self.assertFalse(
- authmap.is_authorized(
- self._make_environ('/%s/doc/x' % dbname, 'GET')))
- self.assertFalse(
- authmap.is_authorized(
- self._make_environ('/%s/doc/x' % dbname, 'PUT')))
- self.assertFalse(
- authmap.is_authorized(
- self._make_environ('/%s/doc/x' % dbname, 'DELETE')))
- self.assertFalse(
- authmap.is_authorized(
- self._make_environ('/%s/doc/x' % dbname, 'POST')))
- # test wrong-db sync resource auth
- self.assertFalse(
- authmap.is_authorized(
- self._make_environ('/%s/sync-from/x' % dbname, 'GET')))
- self.assertFalse(
- authmap.is_authorized(
- self._make_environ('/%s/sync-from/x' % dbname, 'PUT')))
- self.assertFalse(
- authmap.is_authorized(
- self._make_environ('/%s/sync-from/x' % dbname, 'DELETE')))
- self.assertFalse(
- authmap.is_authorized(
- self._make_environ('/%s/sync-from/x' % dbname, 'POST')))
diff --git a/src/leap/soledad/tests/test_soledad.py b/src/leap/soledad/tests/test_soledad.py
deleted file mode 100644
index 09711f19..00000000
--- a/src/leap/soledad/tests/test_soledad.py
+++ /dev/null
@@ -1,302 +0,0 @@
-# -*- coding: utf-8 -*-
-# test_soledad.py
-# Copyright (C) 2013 LEAP
-#
-# This program is free software: you can redistribute it and/or modify
-# it under the terms of the GNU General Public License as published by
-# the Free Software Foundation, either version 3 of the License, or
-# (at your option) any later version.
-#
-# This program is distributed in the hope that it will be useful,
-# but WITHOUT ANY WARRANTY; without even the implied warranty of
-# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
-# GNU General Public License for more details.
-#
-# You should have received a copy of the GNU General Public License
-# along with this program. If not, see <http://www.gnu.org/licenses/>.
-
-
-"""
-Tests for general Soledad functionality.
-"""
-
-
-import os
-import re
-import tempfile
-import simplejson as json
-from mock import Mock
-
-
-from leap.common.testing.basetest import BaseLeapTest
-from leap.common.events import (
- server,
- component,
- events_pb2 as proto,
- register,
- signal,
-)
-from leap.soledad.tests import (
- BaseSoledadTest,
- ADDRESS,
-)
-from leap import soledad
-from leap.soledad import Soledad
-from leap.soledad.crypto import SoledadCrypto
-from leap.soledad.shared_db import SoledadSharedDatabase
-from leap.soledad.backends.leap_backend import (
- LeapDocument,
- LeapSyncTarget,
-)
-
-
-class AuxMethodsTestCase(BaseSoledadTest):
-
- def test__init_dirs(self):
- sol = self._soledad_instance(prefix='_init_dirs')
- sol._init_dirs()
- local_db_dir = os.path.dirname(sol.local_db_path)
- secrets_path = os.path.dirname(sol.secrets_path)
- self.assertTrue(os.path.isdir(local_db_dir))
- self.assertTrue(os.path.isdir(secrets_path))
-
- def test__init_db(self):
- sol = self._soledad_instance()
- sol._init_dirs()
- sol._crypto = SoledadCrypto(sol)
- #self._soledad._gpg.import_keys(PUBLIC_KEY)
- if not sol._has_secret():
- sol._gen_secret()
- sol._load_secrets()
- sol._init_db()
- from leap.soledad.backends.sqlcipher import SQLCipherDatabase
- self.assertIsInstance(sol._db, SQLCipherDatabase)
-
- def test__init_config_defaults(self):
- """
- Test if configuration defaults point to the correct place.
- """
-
- class SoledadMock(Soledad):
-
- def __init__(self):
- pass
-
- # instantiate without initializing so we just test _init_config()
- sol = SoledadMock()
- Soledad._init_config(sol, None, None, '')
- # assert value of secrets_path
- self.assertEquals(
- os.path.join(
- sol.DEFAULT_PREFIX, Soledad.STORAGE_SECRETS_FILE_NAME),
- sol.secrets_path)
- # assert value of local_db_path
- self.assertEquals(
- os.path.join(sol.DEFAULT_PREFIX, 'soledad.u1db'),
- sol.local_db_path)
-
- def test__init_config_from_params(self):
- """
- Test if configuration is correctly read from file.
- """
- sol = self._soledad_instance(
- 'leap@leap.se',
- passphrase='123',
- secrets_path='value_3',
- local_db_path='value_2',
- server_url='value_1',
- cert_file=None)
- self.assertEqual(
- os.path.join(self.tempdir, 'value_3'),
- sol.secrets_path)
- self.assertEqual(
- os.path.join(self.tempdir, 'value_2'),
- sol.local_db_path)
- self.assertEqual('value_1', sol.server_url)
-
-
-class SoledadSharedDBTestCase(BaseSoledadTest):
- """
- These tests ensure the functionalities of the shared recovery database.
- """
-
- def setUp(self):
- BaseSoledadTest.setUp(self)
- self._shared_db = SoledadSharedDatabase(
- 'https://provider/', LeapDocument, None)
-
- def test__get_secrets_from_shared_db(self):
- """
- Ensure the shared db is queried with the correct doc_id.
- """
- doc_id = self._soledad._uuid_hash()
- self._soledad._get_secrets_from_shared_db()
- self.assertTrue(
- self._soledad._shared_db().get_doc.assert_called_with(
- doc_id) is None,
- 'Wrong doc_id when fetching recovery document.')
-
- def test__put_secrets_in_shared_db(self):
- """
- Ensure recovery document is put into shared recover db.
- """
- doc_id = self._soledad._uuid_hash()
- self._soledad._put_secrets_in_shared_db()
- self.assertTrue(
- self._soledad._shared_db().get_doc.assert_called_with(
- doc_id) is None,
- 'Wrong doc_id when fetching recovery document.')
- self.assertTrue(
- self._soledad._shared_db.put_doc.assert_called_with(
- self._doc_put) is None,
- 'Wrong document when putting recovery document.')
- self.assertTrue(
- self._doc_put.doc_id == doc_id,
- 'Wrong doc_id when putting recovery document.')
-
-
-class SoledadSignalingTestCase(BaseSoledadTest):
- """
- These tests ensure signals are correctly emmited by Soledad.
- """
-
- EVENTS_SERVER_PORT = 8090
-
- def setUp(self):
- BaseSoledadTest.setUp(self)
- # mock signaling
- soledad.events.signal = Mock()
-
- def tearDown(self):
- pass
-
- def _pop_mock_call(self, mocked):
- mocked.call_args_list.pop()
- mocked.mock_calls.pop()
- mocked.call_args = mocked.call_args_list[-1]
-
- def test_stage2_bootstrap_signals(self):
- """
- Test that a fresh soledad emits all bootstrap signals.
- """
- soledad.events.signal.reset_mock()
- # get a fresh instance so it emits all bootstrap signals
- sol = self._soledad_instance(
- secrets_path='alternative.json',
- local_db_path='alternative.u1db')
- # reverse call order so we can verify in the order the signals were
- # expected
- soledad.events.signal.mock_calls.reverse()
- soledad.events.signal.call_args = \
- soledad.events.signal.call_args_list[0]
- soledad.events.signal.call_args_list.reverse()
- # assert signals
- soledad.events.signal.assert_called_with(
- proto.SOLEDAD_DOWNLOADING_KEYS,
- ADDRESS,
- )
- self._pop_mock_call(soledad.events.signal)
- soledad.events.signal.assert_called_with(
- proto.SOLEDAD_DONE_DOWNLOADING_KEYS,
- ADDRESS,
- )
- self._pop_mock_call(soledad.events.signal)
- soledad.events.signal.assert_called_with(
- proto.SOLEDAD_CREATING_KEYS,
- ADDRESS,
- )
- self._pop_mock_call(soledad.events.signal)
- soledad.events.signal.assert_called_with(
- proto.SOLEDAD_DONE_CREATING_KEYS,
- ADDRESS,
- )
- self._pop_mock_call(soledad.events.signal)
- soledad.events.signal.assert_called_with(
- proto.SOLEDAD_DOWNLOADING_KEYS,
- ADDRESS,
- )
- self._pop_mock_call(soledad.events.signal)
- soledad.events.signal.assert_called_with(
- proto.SOLEDAD_DONE_DOWNLOADING_KEYS,
- ADDRESS,
- )
- self._pop_mock_call(soledad.events.signal)
- soledad.events.signal.assert_called_with(
- proto.SOLEDAD_UPLOADING_KEYS,
- ADDRESS,
- )
- self._pop_mock_call(soledad.events.signal)
- soledad.events.signal.assert_called_with(
- proto.SOLEDAD_DONE_UPLOADING_KEYS,
- ADDRESS,
- )
-
- def test_stage1_bootstrap_signals(self):
- """
- Test that an existent soledad emits some of the bootstrap signals.
- """
- soledad.events.signal.reset_mock()
- # get an existent instance so it emits only some of bootstrap signals
- sol = self._soledad_instance()
- # reverse call order so we can verify in the order the signals were
- # expected
- soledad.events.signal.mock_calls.reverse()
- soledad.events.signal.call_args = \
- soledad.events.signal.call_args_list[0]
- soledad.events.signal.call_args_list.reverse()
- # assert signals
- soledad.events.signal.assert_called_with(
- proto.SOLEDAD_DOWNLOADING_KEYS,
- ADDRESS,
- )
- self._pop_mock_call(soledad.events.signal)
- soledad.events.signal.assert_called_with(
- proto.SOLEDAD_DONE_DOWNLOADING_KEYS,
- ADDRESS,
- )
- self._pop_mock_call(soledad.events.signal)
- soledad.events.signal.assert_called_with(
- proto.SOLEDAD_UPLOADING_KEYS,
- ADDRESS,
- )
- self._pop_mock_call(soledad.events.signal)
- soledad.events.signal.assert_called_with(
- proto.SOLEDAD_DONE_UPLOADING_KEYS,
- ADDRESS,
- )
-
- def test_sync_signals(self):
- """
- Test Soledad emits SOLEDAD_CREATING_KEYS signal.
- """
- soledad.events.signal.reset_mock()
- # get a fresh instance so it emits all bootstrap signals
- sol = self._soledad_instance()
- # mock the actual db sync so soledad does not try to connect to the
- # server
- sol._db.sync = Mock()
- # do the sync
- sol.sync()
- # assert the signal has been emitted
- soledad.events.signal.assert_called_with(
- proto.SOLEDAD_DONE_DATA_SYNC,
- ADDRESS,
- )
-
- def test_need_sync_signals(self):
- """
- Test Soledad emits SOLEDAD_CREATING_KEYS signal.
- """
- soledad.events.signal.reset_mock()
- sol = self._soledad_instance()
- # mock the sync target
- LeapSyncTarget.get_sync_info = Mock(return_value=[0, 0, 0, 0, 2])
- # mock our generation so soledad thinks there's new data to sync
- sol._db._get_generation = Mock(return_value=1)
- # check for new data to sync
- sol.need_sync('http://provider/userdb')
- # assert the signal has been emitted
- soledad.events.signal.assert_called_with(
- proto.SOLEDAD_NEW_DATA_TO_SYNC,
- ADDRESS,
- )
diff --git a/src/leap/soledad/tests/test_sqlcipher.py b/src/leap/soledad/tests/test_sqlcipher.py
deleted file mode 100644
index 9741bd4e..00000000
--- a/src/leap/soledad/tests/test_sqlcipher.py
+++ /dev/null
@@ -1,824 +0,0 @@
-# -*- coding: utf-8 -*-
-# test_sqlcipher.py
-# Copyright (C) 2013 LEAP
-#
-# This program is free software: you can redistribute it and/or modify
-# it under the terms of the GNU General Public License as published by
-# the Free Software Foundation, either version 3 of the License, or
-# (at your option) any later version.
-#
-# This program is distributed in the hope that it will be useful,
-# but WITHOUT ANY WARRANTY; without even the implied warranty of
-# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
-# GNU General Public License for more details.
-#
-# You should have received a copy of the GNU General Public License
-# along with this program. If not, see <http://www.gnu.org/licenses/>.
-
-
-"""
-Test sqlcipher backend internals.
-"""
-
-
-import os
-import time
-import unittest
-import simplejson as json
-import threading
-
-
-from pysqlcipher import dbapi2
-from StringIO import StringIO
-
-
-# u1db stuff.
-from u1db import (
- errors,
- query_parser,
- sync,
- vectorclock,
-)
-from u1db.backends.sqlite_backend import SQLitePartialExpandDatabase
-
-
-# soledad stuff.
-from leap.soledad.backends.sqlcipher import (
- SQLCipherDatabase,
- DatabaseIsNotEncrypted,
-)
-from leap.soledad.backends.sqlcipher import open as u1db_open
-from leap.soledad.backends.leap_backend import (
- LeapDocument,
- EncryptionSchemes,
- decrypt_doc,
- ENC_JSON_KEY,
- ENC_SCHEME_KEY,
-)
-
-
-# u1db tests stuff.
-from leap.soledad.tests import u1db_tests as tests, BaseSoledadTest
-from leap.soledad.tests.u1db_tests import test_sqlite_backend
-from leap.soledad.tests.u1db_tests import test_backends
-from leap.soledad.tests.u1db_tests import test_open
-from leap.soledad.tests.u1db_tests import test_sync
-from leap.soledad.backends.leap_backend import LeapSyncTarget
-from leap.common.testing.basetest import BaseLeapTest
-
-PASSWORD = '123456'
-
-
-#-----------------------------------------------------------------------------
-# The following tests come from `u1db.tests.test_common_backend`.
-#-----------------------------------------------------------------------------
-
-class TestSQLCipherBackendImpl(tests.TestCase):
-
- def test__allocate_doc_id(self):
- db = SQLCipherDatabase(':memory:', PASSWORD)
- doc_id1 = db._allocate_doc_id()
- self.assertTrue(doc_id1.startswith('D-'))
- self.assertEqual(34, len(doc_id1))
- int(doc_id1[len('D-'):], 16)
- self.assertNotEqual(doc_id1, db._allocate_doc_id())
-
-
-#-----------------------------------------------------------------------------
-# The following tests come from `u1db.tests.test_backends`.
-#-----------------------------------------------------------------------------
-
-def make_sqlcipher_database_for_test(test, replica_uid):
- db = SQLCipherDatabase(':memory:', PASSWORD)
- db._set_replica_uid(replica_uid)
- return db
-
-
-def copy_sqlcipher_database_for_test(test, db):
- # DO NOT COPY OR REUSE THIS CODE OUTSIDE TESTS: COPYING U1DB DATABASES IS
- # THE WRONG THING TO DO, THE ONLY REASON WE DO SO HERE IS TO TEST THAT WE
- # CORRECTLY DETECT IT HAPPENING SO THAT WE CAN RAISE ERRORS RATHER THAN
- # CORRUPT USER DATA. USE SYNC INSTEAD, OR WE WILL SEND NINJA TO YOUR
- # HOUSE.
- new_db = SQLCipherDatabase(':memory:', PASSWORD)
- tmpfile = StringIO()
- for line in db._db_handle.iterdump():
- if not 'sqlite_sequence' in line: # work around bug in iterdump
- tmpfile.write('%s\n' % line)
- tmpfile.seek(0)
- new_db._db_handle = dbapi2.connect(':memory:')
- new_db._db_handle.cursor().executescript(tmpfile.read())
- new_db._db_handle.commit()
- new_db._set_replica_uid(db._replica_uid)
- new_db._factory = db._factory
- return new_db
-
-
-def make_document_for_test(test, doc_id, rev, content, has_conflicts=False):
- return LeapDocument(doc_id, rev, content, has_conflicts=has_conflicts)
-
-
-SQLCIPHER_SCENARIOS = [
- ('sqlcipher', {'make_database_for_test': make_sqlcipher_database_for_test,
- 'copy_database_for_test': copy_sqlcipher_database_for_test,
- 'make_document_for_test': make_document_for_test, }),
-]
-
-
-class SQLCipherTests(test_backends.AllDatabaseTests):
- scenarios = SQLCIPHER_SCENARIOS
-
-
-class SQLCipherDatabaseTests(test_backends.LocalDatabaseTests):
- scenarios = SQLCIPHER_SCENARIOS
-
-
-class SQLCipherValidateGenNTransIdTests(
- test_backends.LocalDatabaseValidateGenNTransIdTests):
- scenarios = SQLCIPHER_SCENARIOS
-
-
-class SQLCipherValidateSourceGenTests(
- test_backends.LocalDatabaseValidateSourceGenTests):
- scenarios = SQLCIPHER_SCENARIOS
-
-
-class SQLCipherWithConflictsTests(
- test_backends.LocalDatabaseWithConflictsTests):
- scenarios = SQLCIPHER_SCENARIOS
-
-
-class SQLCipherIndexTests(test_backends.DatabaseIndexTests):
- scenarios = SQLCIPHER_SCENARIOS
-
-
-load_tests = tests.load_with_scenarios
-
-
-#-----------------------------------------------------------------------------
-# The following tests come from `u1db.tests.test_sqlite_backend`.
-#-----------------------------------------------------------------------------
-
-class TestSQLCipherDatabase(test_sqlite_backend.TestSQLiteDatabase):
-
- def test_atomic_initialize(self):
- tmpdir = self.createTempDir()
- dbname = os.path.join(tmpdir, 'atomic.db')
-
- t2 = None # will be a thread
-
- class SQLCipherDatabaseTesting(SQLCipherDatabase):
- _index_storage_value = "testing"
-
- def __init__(self, dbname, ntry):
- self._try = ntry
- self._is_initialized_invocations = 0
- SQLCipherDatabase.__init__(self, dbname, PASSWORD)
-
- def _is_initialized(self, c):
- res = \
- SQLCipherDatabase._is_initialized(self, c)
- if self._try == 1:
- self._is_initialized_invocations += 1
- if self._is_initialized_invocations == 2:
- t2.start()
- # hard to do better and have a generic test
- time.sleep(0.05)
- return res
-
- outcome2 = []
-
- def second_try():
- try:
- db2 = SQLCipherDatabaseTesting(dbname, 2)
- except Exception, e:
- outcome2.append(e)
- else:
- outcome2.append(db2)
-
- t2 = threading.Thread(target=second_try)
- db1 = SQLCipherDatabaseTesting(dbname, 1)
- t2.join()
-
- self.assertIsInstance(outcome2[0], SQLCipherDatabaseTesting)
- db2 = outcome2[0]
- self.assertTrue(db2._is_initialized(db1._get_sqlite_handle().cursor()))
-
-
-class TestAlternativeDocument(LeapDocument):
- """A (not very) alternative implementation of Document."""
-
-
-class TestSQLCipherPartialExpandDatabase(
- test_sqlite_backend.TestSQLitePartialExpandDatabase):
-
- # The following tests had to be cloned from u1db because they all
- # instantiate the backend directly, so we need to change that in order to
- # our backend be instantiated in place.
-
- def setUp(self):
- test_sqlite_backend.TestSQLitePartialExpandDatabase.setUp(self)
- self.db = SQLCipherDatabase(':memory:', PASSWORD)
- self.db._set_replica_uid('test')
-
- def test_default_replica_uid(self):
- self.db = SQLCipherDatabase(':memory:', PASSWORD)
- self.assertIsNot(None, self.db._replica_uid)
- self.assertEqual(32, len(self.db._replica_uid))
- int(self.db._replica_uid, 16)
-
- def test__parse_index(self):
- self.db = SQLCipherDatabase(':memory:', PASSWORD)
- g = self.db._parse_index_definition('fieldname')
- self.assertIsInstance(g, query_parser.ExtractField)
- self.assertEqual(['fieldname'], g.field)
-
- def test__update_indexes(self):
- self.db = SQLCipherDatabase(':memory:', PASSWORD)
- g = self.db._parse_index_definition('fieldname')
- c = self.db._get_sqlite_handle().cursor()
- self.db._update_indexes('doc-id', {'fieldname': 'val'},
- [('fieldname', g)], c)
- c.execute('SELECT doc_id, field_name, value FROM document_fields')
- self.assertEqual([('doc-id', 'fieldname', 'val')],
- c.fetchall())
-
- def test__set_replica_uid(self):
- # Start from scratch, so that replica_uid isn't set.
- self.db = SQLCipherDatabase(':memory:', PASSWORD)
- self.assertIsNot(None, self.db._real_replica_uid)
- self.assertIsNot(None, self.db._replica_uid)
- self.db._set_replica_uid('foo')
- c = self.db._get_sqlite_handle().cursor()
- c.execute("SELECT value FROM u1db_config WHERE name='replica_uid'")
- self.assertEqual(('foo',), c.fetchone())
- self.assertEqual('foo', self.db._real_replica_uid)
- self.assertEqual('foo', self.db._replica_uid)
- self.db._close_sqlite_handle()
- self.assertEqual('foo', self.db._replica_uid)
-
- def test__open_database(self):
- temp_dir = self.createTempDir(prefix='u1db-test-')
- path = temp_dir + '/test.sqlite'
- SQLCipherDatabase(path, PASSWORD)
- db2 = SQLCipherDatabase._open_database(path, PASSWORD)
- self.assertIsInstance(db2, SQLCipherDatabase)
-
- def test__open_database_with_factory(self):
- temp_dir = self.createTempDir(prefix='u1db-test-')
- path = temp_dir + '/test.sqlite'
- SQLCipherDatabase(path, PASSWORD)
- db2 = SQLCipherDatabase._open_database(
- path, PASSWORD,
- document_factory=TestAlternativeDocument)
- doc = db2.create_doc({})
- self.assertTrue(isinstance(doc, LeapDocument))
-
- def test__open_database_non_existent(self):
- temp_dir = self.createTempDir(prefix='u1db-test-')
- path = temp_dir + '/non-existent.sqlite'
- self.assertRaises(errors.DatabaseDoesNotExist,
- SQLCipherDatabase._open_database,
- path, PASSWORD)
-
- def test__open_database_during_init(self):
- temp_dir = self.createTempDir(prefix='u1db-test-')
- path = temp_dir + '/initialised.db'
- db = SQLCipherDatabase.__new__(
- SQLCipherDatabase)
- db._db_handle = dbapi2.connect(path) # db is there but not yet init-ed
- c = db._db_handle.cursor()
- c.execute('PRAGMA key="%s"' % PASSWORD)
- self.addCleanup(db.close)
- observed = []
-
- class SQLiteDatabaseTesting(SQLCipherDatabase):
- WAIT_FOR_PARALLEL_INIT_HALF_INTERVAL = 0.1
-
- @classmethod
- def _which_index_storage(cls, c):
- res = SQLCipherDatabase._which_index_storage(c)
- db._ensure_schema() # init db
- observed.append(res[0])
- return res
-
- db2 = SQLiteDatabaseTesting._open_database(path, PASSWORD)
- self.addCleanup(db2.close)
- self.assertIsInstance(db2, SQLCipherDatabase)
- self.assertEqual(
- [None,
- SQLCipherDatabase._index_storage_value],
- observed)
-
- def test__open_database_invalid(self):
- class SQLiteDatabaseTesting(SQLCipherDatabase):
- WAIT_FOR_PARALLEL_INIT_HALF_INTERVAL = 0.1
- temp_dir = self.createTempDir(prefix='u1db-test-')
- path1 = temp_dir + '/invalid1.db'
- with open(path1, 'wb') as f:
- f.write("")
- self.assertRaises(dbapi2.OperationalError,
- SQLiteDatabaseTesting._open_database, path1,
- PASSWORD)
- with open(path1, 'wb') as f:
- f.write("invalid")
- self.assertRaises(dbapi2.DatabaseError,
- SQLiteDatabaseTesting._open_database, path1,
- PASSWORD)
-
- def test_open_database_existing(self):
- temp_dir = self.createTempDir(prefix='u1db-test-')
- path = temp_dir + '/existing.sqlite'
- SQLCipherDatabase(path, PASSWORD)
- db2 = SQLCipherDatabase.open_database(path, PASSWORD, create=False)
- self.assertIsInstance(db2, SQLCipherDatabase)
-
- def test_open_database_with_factory(self):
- temp_dir = self.createTempDir(prefix='u1db-test-')
- path = temp_dir + '/existing.sqlite'
- SQLCipherDatabase(path, PASSWORD)
- db2 = SQLCipherDatabase.open_database(
- path, PASSWORD, create=False,
- document_factory=TestAlternativeDocument)
- doc = db2.create_doc({})
- self.assertTrue(isinstance(doc, LeapDocument))
-
- def test_open_database_create(self):
- temp_dir = self.createTempDir(prefix='u1db-test-')
- path = temp_dir + '/new.sqlite'
- SQLCipherDatabase.open_database(path, PASSWORD, create=True)
- db2 = SQLCipherDatabase.open_database(path, PASSWORD, create=False)
- self.assertIsInstance(db2, SQLCipherDatabase)
-
- def test_create_database_initializes_schema(self):
- # This test had to be cloned because our implementation of SQLCipher
- # backend is referenced with an index_storage_value that includes the
- # word "encrypted". See u1db's sqlite_backend and our
- # sqlcipher_backend for reference.
- raw_db = self.db._get_sqlite_handle()
- c = raw_db.cursor()
- c.execute("SELECT * FROM u1db_config")
- config = dict([(r[0], r[1]) for r in c.fetchall()])
- self.assertEqual({'sql_schema': '0', 'replica_uid': 'test',
- 'index_storage': 'expand referenced encrypted'},
- config)
-
- def test_store_syncable(self):
- doc = self.db.create_doc_from_json(tests.simple_doc)
- # assert that docs are syncable by default
- self.assertEqual(True, doc.syncable)
- # assert that we can store syncable = False
- doc.syncable = False
- self.db.put_doc(doc)
- self.assertEqual(False, self.db.get_doc(doc.doc_id).syncable)
- # assert that we can store syncable = True
- doc.syncable = True
- self.db.put_doc(doc)
- self.assertEqual(True, self.db.get_doc(doc.doc_id).syncable)
-
-
-#-----------------------------------------------------------------------------
-# The following tests come from `u1db.tests.test_open`.
-#-----------------------------------------------------------------------------
-
-class SQLCipherOpen(test_open.TestU1DBOpen):
-
- def test_open_no_create(self):
- self.assertRaises(errors.DatabaseDoesNotExist,
- u1db_open, self.db_path,
- password=PASSWORD,
- create=False)
- self.assertFalse(os.path.exists(self.db_path))
-
- def test_open_create(self):
- db = u1db_open(self.db_path, password=PASSWORD, create=True)
- self.addCleanup(db.close)
- self.assertTrue(os.path.exists(self.db_path))
- self.assertIsInstance(db, SQLCipherDatabase)
-
- def test_open_with_factory(self):
- db = u1db_open(self.db_path, password=PASSWORD, create=True,
- document_factory=TestAlternativeDocument)
- self.addCleanup(db.close)
- doc = db.create_doc({})
- self.assertTrue(isinstance(doc, LeapDocument))
-
- def test_open_existing(self):
- db = SQLCipherDatabase(self.db_path, PASSWORD)
- self.addCleanup(db.close)
- doc = db.create_doc_from_json(tests.simple_doc)
- # Even though create=True, we shouldn't wipe the db
- db2 = u1db_open(self.db_path, password=PASSWORD, create=True)
- self.addCleanup(db2.close)
- doc2 = db2.get_doc(doc.doc_id)
- self.assertEqual(doc, doc2)
-
- def test_open_existing_no_create(self):
- db = SQLCipherDatabase(self.db_path, PASSWORD)
- self.addCleanup(db.close)
- db2 = u1db_open(self.db_path, password=PASSWORD, create=False)
- self.addCleanup(db2.close)
- self.assertIsInstance(db2, SQLCipherDatabase)
-
-
-#-----------------------------------------------------------------------------
-# The following tests come from `u1db.tests.test_sync`.
-#-----------------------------------------------------------------------------
-
-sync_scenarios = []
-for name, scenario in SQLCIPHER_SCENARIOS:
- scenario = dict(scenario)
- scenario['do_sync'] = test_sync.sync_via_synchronizer
- sync_scenarios.append((name, scenario))
- scenario = dict(scenario)
-
-
-def sync_via_synchronizer_and_leap(test, db_source, db_target,
- trace_hook=None, trace_hook_shallow=None):
- if trace_hook:
- test.skipTest("full trace hook unsupported over http")
- path = test._http_at[db_target]
- target = LeapSyncTarget.connect(test.getURL(path), test._soledad._crypto)
- target.set_token_credentials('user-uuid', 'auth-token')
- if trace_hook_shallow:
- target._set_trace_hook_shallow(trace_hook_shallow)
- return sync.Synchronizer(db_source, target).sync()
-
-
-sync_scenarios.append(('pyleap', {
- 'make_database_for_test': test_sync.make_database_for_http_test,
- 'copy_database_for_test': test_sync.copy_database_for_http_test,
- 'make_document_for_test': make_document_for_test,
- 'make_app_with_state': tests.test_remote_sync_target.make_http_app,
- 'do_sync': sync_via_synchronizer_and_leap,
-}))
-
-
-class SQLCipherDatabaseSyncTests(
- test_sync.DatabaseSyncTests, BaseSoledadTest):
- """
- Test for succesfull sync between SQLCipher and LeapBackend.
-
- Some of the tests in this class had to be adapted because the remote
- backend always receive encrypted content, and so it can not rely on
- document's content comparison to try to autoresolve conflicts.
- """
-
- scenarios = sync_scenarios
-
- def setUp(self):
- test_sync.DatabaseSyncTests.setUp(self)
- BaseSoledadTest.setUp(self)
-
- def tearDown(self):
- test_sync.DatabaseSyncTests.tearDown(self)
- BaseSoledadTest.tearDown(self)
-
- def test_sync_autoresolves(self):
- """
- Test for sync autoresolve remote.
-
- This test was adapted because the remote database receives encrypted
- content and so it can't compare documents contents to autoresolve.
- """
- # The remote database can't autoresolve conflicts based on magic
- # content convergence, so we modify this test to leave the possibility
- # of the remode document ending up in conflicted state.
- self.db1 = self.create_database('test1', 'source')
- self.db2 = self.create_database('test2', 'target')
- doc1 = self.db1.create_doc_from_json(tests.simple_doc, doc_id='doc')
- rev1 = doc1.rev
- doc2 = self.db2.create_doc_from_json(tests.simple_doc, doc_id='doc')
- rev2 = doc2.rev
- self.sync(self.db1, self.db2)
- doc = self.db1.get_doc('doc')
- self.assertFalse(doc.has_conflicts)
- # if remote content is in conflicted state, then document revisions
- # will be different.
- #self.assertEqual(doc.rev, self.db2.get_doc('doc').rev)
- v = vectorclock.VectorClockRev(doc.rev)
- self.assertTrue(v.is_newer(vectorclock.VectorClockRev(rev1)))
- self.assertTrue(v.is_newer(vectorclock.VectorClockRev(rev2)))
-
- def test_sync_autoresolves_moar(self):
- """
- Test for sync autoresolve local.
-
- This test was adapted to decrypt remote content before assert.
- """
- # here we test that when a database that has a conflicted document is
- # the source of a sync, and the target database has a revision of the
- # conflicted document that is newer than the source database's, and
- # that target's database's document's content is the same as the
- # source's document's conflict's, the source's document's conflict gets
- # autoresolved, and the source's document's revision bumped.
- #
- # idea is as follows:
- # A B
- # a1 -
- # `------->
- # a1 a1
- # v v
- # a2 a1b1
- # `------->
- # a1b1+a2 a1b1
- # v
- # a1b1+a2 a1b2 (a1b2 has same content as a2)
- # `------->
- # a3b2 a1b2 (autoresolved)
- # `------->
- # a3b2 a3b2
- self.db1 = self.create_database('test1', 'source')
- self.db2 = self.create_database('test2', 'target')
- self.db1.create_doc_from_json(tests.simple_doc, doc_id='doc')
- self.sync(self.db1, self.db2)
- for db, content in [(self.db1, '{}'), (self.db2, '{"hi": 42}')]:
- doc = db.get_doc('doc')
- doc.set_json(content)
- db.put_doc(doc)
- self.sync(self.db1, self.db2)
- # db1 and db2 now both have a doc of {hi:42}, but db1 has a conflict
- doc = self.db1.get_doc('doc')
- rev1 = doc.rev
- self.assertTrue(doc.has_conflicts)
- # set db2 to have a doc of {} (same as db1 before the conflict)
- doc = self.db2.get_doc('doc')
- doc.set_json('{}')
- self.db2.put_doc(doc)
- rev2 = doc.rev
- # sync it across
- self.sync(self.db1, self.db2)
- # tadaa!
- doc = self.db1.get_doc('doc')
- self.assertFalse(doc.has_conflicts)
- vec1 = vectorclock.VectorClockRev(rev1)
- vec2 = vectorclock.VectorClockRev(rev2)
- vec3 = vectorclock.VectorClockRev(doc.rev)
- self.assertTrue(vec3.is_newer(vec1))
- self.assertTrue(vec3.is_newer(vec2))
- # because the conflict is on the source, sync it another time
- self.sync(self.db1, self.db2)
- # make sure db2 now has the exact same thing
- doc1 = self.db1.get_doc('doc')
- self.assertGetEncryptedDoc(
- self.db2,
- doc1.doc_id, doc1.rev, doc1.get_json(), False)
-
- def test_sync_autoresolves_moar_backwards(self):
- # here we would test that when a database that has a conflicted
- # document is the target of a sync, and the source database has a
- # revision of the conflicted document that is newer than the target
- # database's, and that source's database's document's content is the
- # same as the target's document's conflict's, the target's document's
- # conflict gets autoresolved, and the document's revision bumped.
- #
- # Despite that, in Soledad we suppose that the server never syncs, so
- # it never has conflicted documents. Also, if it had, convergence
- # would not be possible by checking document's contents because they
- # would be encrypted in server.
- #
- # Therefore we suppress this test.
- pass
-
- def test_sync_autoresolves_moar_backwards_three(self):
- # here we would test that when a database that has a conflicted
- # document is the target of a sync, and the source database has a
- # revision of the conflicted document that is newer than the target
- # database's, and that source's database's document's content is the
- # same as the target's document's conflict's, the target's document's
- # conflict gets autoresolved, and the document's revision bumped.
- #
- # We use the same reasoning from the last test to suppress this one.
- pass
-
- def test_sync_propagates_resolution(self):
- """
- Test if synchronization propagates resolution.
-
- This test was adapted to decrypt remote content before assert.
- """
- self.db1 = self.create_database('test1', 'both')
- self.db2 = self.create_database('test2', 'both')
- doc1 = self.db1.create_doc_from_json('{"a": 1}', doc_id='the-doc')
- db3 = self.create_database('test3', 'both')
- self.sync(self.db2, self.db1)
- self.assertEqual(
- self.db1._get_generation_info(),
- self.db2._get_replica_gen_and_trans_id(self.db1._replica_uid))
- self.assertEqual(
- self.db2._get_generation_info(),
- self.db1._get_replica_gen_and_trans_id(self.db2._replica_uid))
- self.sync(db3, self.db1)
- # update on 2
- doc2 = self.make_document('the-doc', doc1.rev, '{"a": 2}')
- self.db2.put_doc(doc2)
- self.sync(self.db2, db3)
- self.assertEqual(db3.get_doc('the-doc').rev, doc2.rev)
- # update on 1
- doc1.set_json('{"a": 3}')
- self.db1.put_doc(doc1)
- # conflicts
- self.sync(self.db2, self.db1)
- self.sync(db3, self.db1)
- self.assertTrue(self.db2.get_doc('the-doc').has_conflicts)
- self.assertTrue(db3.get_doc('the-doc').has_conflicts)
- # resolve
- conflicts = self.db2.get_doc_conflicts('the-doc')
- doc4 = self.make_document('the-doc', None, '{"a": 4}')
- revs = [doc.rev for doc in conflicts]
- self.db2.resolve_doc(doc4, revs)
- doc2 = self.db2.get_doc('the-doc')
- self.assertEqual(doc4.get_json(), doc2.get_json())
- self.assertFalse(doc2.has_conflicts)
- self.sync(self.db2, db3)
- doc3 = db3.get_doc('the-doc')
- if ENC_SCHEME_KEY in doc3.content:
- doc3.set_json(decrypt_doc(self._soledad._crypto, doc3))
- self.assertEqual(doc4.get_json(), doc3.get_json())
- self.assertFalse(doc3.has_conflicts)
-
- def test_sync_puts_changes(self):
- """
- Test if sync puts changes in remote replica.
-
- This test was adapted to decrypt remote content before assert.
- """
- self.db1 = self.create_database('test1', 'source')
- self.db2 = self.create_database('test2', 'target')
- doc = self.db1.create_doc_from_json(tests.simple_doc)
- self.assertEqual(1, self.sync(self.db1, self.db2))
- self.assertGetEncryptedDoc(
- self.db2, doc.doc_id, doc.rev, tests.simple_doc, False)
- self.assertEqual(1, self.db1._get_replica_gen_and_trans_id('test2')[0])
- self.assertEqual(1, self.db2._get_replica_gen_and_trans_id('test1')[0])
- self.assertLastExchangeLog(
- self.db2,
- {'receive': {'docs': [(doc.doc_id, doc.rev)],
- 'source_uid': 'test1',
- 'source_gen': 1, 'last_known_gen': 0},
- 'return': {'docs': [], 'last_gen': 1}})
-
-
-def _make_local_db_and_leap_target(test, path='test'):
- test.startServer()
- db = test.request_state._create_database(os.path.basename(path))
- st = LeapSyncTarget.connect(test.getURL(path), test._soledad._crypto)
- st.set_token_credentials('user-uuid', 'auth-token')
- return db, st
-
-
-target_scenarios = [
- ('leap', {
- 'create_db_and_target': _make_local_db_and_leap_target,
- 'make_app_with_state': tests.test_remote_sync_target.make_http_app}),
-]
-
-
-class SQLCipherSyncTargetTests(
- test_sync.DatabaseSyncTargetTests, BaseSoledadTest):
-
- scenarios = (tests.multiply_scenarios(SQLCIPHER_SCENARIOS,
- target_scenarios))
-
- def setUp(self):
- test_sync.DatabaseSyncTargetTests.setUp(self)
- BaseSoledadTest.setUp(self)
-
- def tearDown(self):
- test_sync.DatabaseSyncTargetTests.tearDown(self)
- BaseSoledadTest.tearDown(self)
-
- def test_sync_exchange(self):
- """
- Modified to account for possibly receiving encrypted documents from
- sever-side.
- """
- docs_by_gen = [
- (self.make_document('doc-id', 'replica:1', tests.simple_doc), 10,
- 'T-sid')]
- new_gen, trans_id = self.st.sync_exchange(
- docs_by_gen, 'replica', last_known_generation=0,
- last_known_trans_id=None, return_doc_cb=self.receive_doc)
- self.assertGetEncryptedDoc(
- self.db, 'doc-id', 'replica:1', tests.simple_doc, False)
- self.assertTransactionLog(['doc-id'], self.db)
- last_trans_id = self.getLastTransId(self.db)
- self.assertEqual(([], 1, last_trans_id),
- (self.other_changes, new_gen, last_trans_id))
- self.assertEqual(10, self.st.get_sync_info('replica')[3])
-
- def test_sync_exchange_push_many(self):
- """
- Modified to account for possibly receiving encrypted documents from
- sever-side.
- """
- docs_by_gen = [
- (self.make_document(
- 'doc-id', 'replica:1', tests.simple_doc), 10, 'T-1'),
- (self.make_document('doc-id2', 'replica:1', tests.nested_doc), 11,
- 'T-2')]
- new_gen, trans_id = self.st.sync_exchange(
- docs_by_gen, 'replica', last_known_generation=0,
- last_known_trans_id=None, return_doc_cb=self.receive_doc)
- self.assertGetEncryptedDoc(
- self.db, 'doc-id', 'replica:1', tests.simple_doc, False)
- self.assertGetEncryptedDoc(
- self.db, 'doc-id2', 'replica:1', tests.nested_doc, False)
- self.assertTransactionLog(['doc-id', 'doc-id2'], self.db)
- last_trans_id = self.getLastTransId(self.db)
- self.assertEqual(([], 2, last_trans_id),
- (self.other_changes, new_gen, trans_id))
- self.assertEqual(11, self.st.get_sync_info('replica')[3])
-
- def test_sync_exchange_returns_many_new_docs(self):
- """
- Modified to account for JSON serialization differences.
- """
- doc = self.db.create_doc_from_json(tests.simple_doc)
- doc2 = self.db.create_doc_from_json(tests.nested_doc)
- self.assertTransactionLog([doc.doc_id, doc2.doc_id], self.db)
- new_gen, _ = self.st.sync_exchange(
- [], 'other-replica', last_known_generation=0,
- last_known_trans_id=None, return_doc_cb=self.receive_doc)
- self.assertTransactionLog([doc.doc_id, doc2.doc_id], self.db)
- self.assertEqual(2, new_gen)
- self.assertEqual(
- [(doc.doc_id, doc.rev, 1),
- (doc2.doc_id, doc2.rev, 2)],
- [c[:2] + c[3:4] for c in self.other_changes])
- self.assertEqual(
- json.dumps(tests.simple_doc),
- json.dumps(self.other_changes[0][2]))
- self.assertEqual(
- json.loads(tests.nested_doc),
- json.loads(self.other_changes[1][2]))
- if self.whitebox:
- self.assertEqual(
- self.db._last_exchange_log['return'],
- {'last_gen': 2, 'docs':
- [(doc.doc_id, doc.rev), (doc2.doc_id, doc2.rev)]})
-
-
-#-----------------------------------------------------------------------------
-# Tests for actual encryption of the database
-#-----------------------------------------------------------------------------
-
-class SQLCipherEncryptionTest(BaseLeapTest):
- """
- Tests to guarantee SQLCipher is indeed encrypting data when storing.
- """
-
- def _delete_dbfiles(self):
- for dbfile in [self.DB_FILE]:
- if os.path.exists(dbfile):
- os.unlink(dbfile)
-
- def setUp(self):
- self.DB_FILE = os.path.join(self.tempdir, 'test.db')
- self._delete_dbfiles()
-
- def tearDown(self):
- self._delete_dbfiles()
-
- def test_try_to_open_encrypted_db_with_sqlite_backend(self):
- """
- SQLite backend should not succeed to open SQLCipher databases.
- """
- db = SQLCipherDatabase(self.DB_FILE, PASSWORD)
- doc = db.create_doc_from_json(tests.simple_doc)
- db.close()
- try:
- # trying to open an encrypted database with the regular u1db
- # backend should raise a DatabaseError exception.
- SQLitePartialExpandDatabase(self.DB_FILE,
- document_factory=LeapDocument)
- raise DatabaseIsNotEncrypted()
- except dbapi2.DatabaseError:
- # at this point we know that the regular U1DB sqlcipher backend
- # did not succeed on opening the database, so it was indeed
- # encrypted.
- db = SQLCipherDatabase(self.DB_FILE, PASSWORD)
- doc = db.get_doc(doc.doc_id)
- self.assertEqual(tests.simple_doc, doc.get_json(),
- 'decrypted content mismatch')
-
- def test_try_to_open_raw_db_with_sqlcipher_backend(self):
- """
- SQLCipher backend should not succeed to open unencrypted databases.
- """
- db = SQLitePartialExpandDatabase(self.DB_FILE,
- document_factory=LeapDocument)
- db.create_doc_from_json(tests.simple_doc)
- db.close()
- try:
- # trying to open the a non-encrypted database with sqlcipher
- # backend should raise a DatabaseIsNotEncrypted exception.
- SQLCipherDatabase(self.DB_FILE, PASSWORD)
- raise db1pi2.DatabaseError(
- "SQLCipher backend should not be able to open non-encrypted "
- "dbs.")
- except DatabaseIsNotEncrypted:
- pass
-
-
-load_tests = tests.load_with_scenarios
diff --git a/src/leap/soledad/tests/u1db_tests/README b/src/leap/soledad/tests/u1db_tests/README
deleted file mode 100644
index 605f01fa..00000000
--- a/src/leap/soledad/tests/u1db_tests/README
+++ /dev/null
@@ -1,34 +0,0 @@
-General info
-------------
-
-Test files in this directory are derived from u1db-0.1.4 tests. The main
-difference is that:
-
- (1) they include the test infrastructure packed with soledad; and
- (2) they do not include c_backend_wrapper testing.
-
-Dependencies
-------------
-
-u1db tests depend on the following python packages:
-
- nose2
- unittest2
- mercurial
- hgtools
- testtools
- discover
- oauth
- testscenarios
- dirspec
- paste
- routes
- simplejson
- cython
-
-Running tests
--------------
-
-Use nose2 to run tests:
-
- nose2 leap.soledad.tests.u1db_tests
diff --git a/src/leap/soledad/tests/u1db_tests/__init__.py b/src/leap/soledad/tests/u1db_tests/__init__.py
deleted file mode 100644
index 43304b43..00000000
--- a/src/leap/soledad/tests/u1db_tests/__init__.py
+++ /dev/null
@@ -1,421 +0,0 @@
-# Copyright 2011-2012 Canonical Ltd.
-#
-# This file is part of u1db.
-#
-# u1db is free software: you can redistribute it and/or modify
-# it under the terms of the GNU Lesser General Public License version 3
-# as published by the Free Software Foundation.
-#
-# u1db is distributed in the hope that it will be useful,
-# but WITHOUT ANY WARRANTY; without even the implied warranty of
-# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
-# GNU Lesser General Public License for more details.
-#
-# You should have received a copy of the GNU Lesser General Public License
-# along with u1db. If not, see <http://www.gnu.org/licenses/>.
-
-"""Test infrastructure for U1DB"""
-
-import copy
-import shutil
-import socket
-import tempfile
-import threading
-
-try:
- import simplejson as json
-except ImportError:
- import json # noqa
-
-from wsgiref import simple_server
-
-from oauth import oauth
-from pysqlcipher import dbapi2
-from StringIO import StringIO
-
-import testscenarios
-import testtools
-
-from u1db import (
- errors,
- Document,
-)
-from u1db.backends import (
- inmemory,
- sqlite_backend,
-)
-from u1db.remote import (
- server_state,
-)
-
-
-class TestCase(testtools.TestCase):
-
- def createTempDir(self, prefix='u1db-tmp-'):
- """Create a temporary directory to do some work in.
-
- This directory will be scheduled for cleanup when the test ends.
- """
- tempdir = tempfile.mkdtemp(prefix=prefix)
- self.addCleanup(shutil.rmtree, tempdir)
- return tempdir
-
- def make_document(self, doc_id, doc_rev, content, has_conflicts=False):
- return self.make_document_for_test(
- self, doc_id, doc_rev, content, has_conflicts)
-
- def make_document_for_test(self, test, doc_id, doc_rev, content,
- has_conflicts):
- return make_document_for_test(
- test, doc_id, doc_rev, content, has_conflicts)
-
- def assertGetDoc(self, db, doc_id, doc_rev, content, has_conflicts):
- """Assert that the document in the database looks correct."""
- exp_doc = self.make_document(doc_id, doc_rev, content,
- has_conflicts=has_conflicts)
- self.assertEqual(exp_doc, db.get_doc(doc_id))
-
- def assertGetDocIncludeDeleted(self, db, doc_id, doc_rev, content,
- has_conflicts):
- """Assert that the document in the database looks correct."""
- exp_doc = self.make_document(doc_id, doc_rev, content,
- has_conflicts=has_conflicts)
- self.assertEqual(exp_doc, db.get_doc(doc_id, include_deleted=True))
-
- def assertGetDocConflicts(self, db, doc_id, conflicts):
- """Assert what conflicts are stored for a given doc_id.
-
- :param conflicts: A list of (doc_rev, content) pairs.
- The first item must match the first item returned from the
- database, however the rest can be returned in any order.
- """
- if conflicts:
- conflicts = [(rev,
- (json.loads(cont) if isinstance(cont, basestring)
- else cont)) for (rev, cont) in conflicts]
- conflicts = conflicts[:1] + sorted(conflicts[1:])
- actual = db.get_doc_conflicts(doc_id)
- if actual:
- actual = [
- (doc.rev, (json.loads(doc.get_json())
- if doc.get_json() is not None else None))
- for doc in actual]
- actual = actual[:1] + sorted(actual[1:])
- self.assertEqual(conflicts, actual)
-
-
-def multiply_scenarios(a_scenarios, b_scenarios):
- """Create the cross-product of scenarios."""
-
- all_scenarios = []
- for a_name, a_attrs in a_scenarios:
- for b_name, b_attrs in b_scenarios:
- name = '%s,%s' % (a_name, b_name)
- attrs = dict(a_attrs)
- attrs.update(b_attrs)
- all_scenarios.append((name, attrs))
- return all_scenarios
-
-
-simple_doc = '{"key": "value"}'
-nested_doc = '{"key": "value", "sub": {"doc": "underneath"}}'
-
-
-def make_memory_database_for_test(test, replica_uid):
- return inmemory.InMemoryDatabase(replica_uid)
-
-
-def copy_memory_database_for_test(test, db):
- # DO NOT COPY OR REUSE THIS CODE OUTSIDE TESTS: COPYING U1DB DATABASES IS
- # THE WRONG THING TO DO, THE ONLY REASON WE DO SO HERE IS TO TEST THAT WE
- # CORRECTLY DETECT IT HAPPENING SO THAT WE CAN RAISE ERRORS RATHER THAN
- # CORRUPT USER DATA. USE SYNC INSTEAD, OR WE WILL SEND NINJA TO YOUR
- # HOUSE.
- new_db = inmemory.InMemoryDatabase(db._replica_uid)
- new_db._transaction_log = db._transaction_log[:]
- new_db._docs = copy.deepcopy(db._docs)
- new_db._conflicts = copy.deepcopy(db._conflicts)
- new_db._indexes = copy.deepcopy(db._indexes)
- new_db._factory = db._factory
- return new_db
-
-
-def make_sqlite_partial_expanded_for_test(test, replica_uid):
- db = sqlite_backend.SQLitePartialExpandDatabase(':memory:')
- db._set_replica_uid(replica_uid)
- return db
-
-
-def copy_sqlite_partial_expanded_for_test(test, db):
- # DO NOT COPY OR REUSE THIS CODE OUTSIDE TESTS: COPYING U1DB DATABASES IS
- # THE WRONG THING TO DO, THE ONLY REASON WE DO SO HERE IS TO TEST THAT WE
- # CORRECTLY DETECT IT HAPPENING SO THAT WE CAN RAISE ERRORS RATHER THAN
- # CORRUPT USER DATA. USE SYNC INSTEAD, OR WE WILL SEND NINJA TO YOUR
- # HOUSE.
- new_db = sqlite_backend.SQLitePartialExpandDatabase(':memory:')
- tmpfile = StringIO()
- for line in db._db_handle.iterdump():
- if not 'sqlite_sequence' in line: # work around bug in iterdump
- tmpfile.write('%s\n' % line)
- tmpfile.seek(0)
- new_db._db_handle = dbapi2.connect(':memory:')
- new_db._db_handle.cursor().executescript(tmpfile.read())
- new_db._db_handle.commit()
- new_db._set_replica_uid(db._replica_uid)
- new_db._factory = db._factory
- return new_db
-
-
-def make_document_for_test(test, doc_id, rev, content, has_conflicts=False):
- return Document(doc_id, rev, content, has_conflicts=has_conflicts)
-
-
-LOCAL_DATABASES_SCENARIOS = [
- ('mem', {'make_database_for_test': make_memory_database_for_test,
- 'copy_database_for_test': copy_memory_database_for_test,
- 'make_document_for_test': make_document_for_test}),
- ('sql', {'make_database_for_test':
- make_sqlite_partial_expanded_for_test,
- 'copy_database_for_test':
- copy_sqlite_partial_expanded_for_test,
- 'make_document_for_test': make_document_for_test}),
-]
-
-
-class DatabaseBaseTests(TestCase):
-
- accept_fixed_trans_id = False # set to True assertTransactionLog
- # is happy with all trans ids = ''
-
- scenarios = LOCAL_DATABASES_SCENARIOS
-
- def create_database(self, replica_uid):
- return self.make_database_for_test(self, replica_uid)
-
- def copy_database(self, db):
- # DO NOT COPY OR REUSE THIS CODE OUTSIDE TESTS: COPYING U1DB DATABASES
- # IS THE WRONG THING TO DO, THE ONLY REASON WE DO SO HERE IS TO TEST
- # THAT WE CORRECTLY DETECT IT HAPPENING SO THAT WE CAN RAISE ERRORS
- # RATHER THAN CORRUPT USER DATA. USE SYNC INSTEAD, OR WE WILL SEND
- # NINJA TO YOUR HOUSE.
- return self.copy_database_for_test(self, db)
-
- def setUp(self):
- super(DatabaseBaseTests, self).setUp()
- self.db = self.create_database('test')
-
- def tearDown(self):
- # TODO: Add close_database parameterization
- # self.close_database(self.db)
- super(DatabaseBaseTests, self).tearDown()
-
- def assertTransactionLog(self, doc_ids, db):
- """Assert that the given docs are in the transaction log."""
- log = db._get_transaction_log()
- just_ids = []
- seen_transactions = set()
- for doc_id, transaction_id in log:
- just_ids.append(doc_id)
- self.assertIsNot(None, transaction_id,
- "Transaction id should not be None")
- if transaction_id == '' and self.accept_fixed_trans_id:
- continue
- self.assertNotEqual('', transaction_id,
- "Transaction id should be a unique string")
- self.assertTrue(transaction_id.startswith('T-'))
- self.assertNotIn(transaction_id, seen_transactions)
- seen_transactions.add(transaction_id)
- self.assertEqual(doc_ids, just_ids)
-
- def getLastTransId(self, db):
- """Return the transaction id for the last database update."""
- return self.db._get_transaction_log()[-1][-1]
-
-
-class ServerStateForTests(server_state.ServerState):
- """Used in the test suite, so we don't have to touch disk, etc."""
-
- def __init__(self):
- super(ServerStateForTests, self).__init__()
- self._dbs = {}
-
- def open_database(self, path):
- try:
- return self._dbs[path]
- except KeyError:
- raise errors.DatabaseDoesNotExist
-
- def check_database(self, path):
- # cares only about the possible exception
- self.open_database(path)
-
- def ensure_database(self, path):
- try:
- db = self.open_database(path)
- except errors.DatabaseDoesNotExist:
- db = self._create_database(path)
- return db, db._replica_uid
-
- def _copy_database(self, db):
- # DO NOT COPY OR REUSE THIS CODE OUTSIDE TESTS: COPYING U1DB DATABASES
- # IS THE WRONG THING TO DO, THE ONLY REASON WE DO SO HERE IS TO TEST
- # THAT WE CORRECTLY DETECT IT HAPPENING SO THAT WE CAN RAISE ERRORS
- # RATHER THAN CORRUPT USER DATA. USE SYNC INSTEAD, OR WE WILL SEND
- # NINJA TO YOUR HOUSE.
- new_db = copy_memory_database_for_test(None, db)
- path = db._replica_uid
- while path in self._dbs:
- path += 'copy'
- self._dbs[path] = new_db
- return new_db
-
- def _create_database(self, path):
- db = inmemory.InMemoryDatabase(path)
- self._dbs[path] = db
- return db
-
- def delete_database(self, path):
- del self._dbs[path]
-
-
-class ResponderForTests(object):
- """Responder for tests."""
- _started = False
- sent_response = False
- status = None
-
- def start_response(self, status='success', **kwargs):
- self._started = True
- self.status = status
- self.kwargs = kwargs
-
- def send_response(self, status='success', **kwargs):
- self.start_response(status, **kwargs)
- self.finish_response()
-
- def finish_response(self):
- self.sent_response = True
-
-
-class TestCaseWithServer(TestCase):
-
- @staticmethod
- def server_def():
- # hook point
- # should return (ServerClass, "shutdown method name", "url_scheme")
- class _RequestHandler(simple_server.WSGIRequestHandler):
- def log_request(*args):
- pass # suppress
-
- def make_server(host_port, application):
- assert application, "forgot to override make_app(_with_state)?"
- srv = simple_server.WSGIServer(host_port, _RequestHandler)
- # patch the value in if it's None
- if getattr(application, 'base_url', 1) is None:
- application.base_url = "http://%s:%s" % srv.server_address
- srv.set_app(application)
- return srv
-
- return make_server, "shutdown", "http"
-
- @staticmethod
- def make_app_with_state(state):
- # hook point
- return None
-
- def make_app(self):
- # potential hook point
- self.request_state = ServerStateForTests()
- return self.make_app_with_state(self.request_state)
-
- def setUp(self):
- super(TestCaseWithServer, self).setUp()
- self.server = self.server_thread = None
-
- @property
- def url_scheme(self):
- return self.server_def()[-1]
-
- def startServer(self):
- server_def = self.server_def()
- server_class, shutdown_meth, _ = server_def
- application = self.make_app()
- self.server = server_class(('127.0.0.1', 0), application)
- self.server_thread = threading.Thread(target=self.server.serve_forever,
- kwargs=dict(poll_interval=0.01))
- self.server_thread.start()
- self.addCleanup(self.server_thread.join)
- self.addCleanup(getattr(self.server, shutdown_meth))
-
- def getURL(self, path=None):
- host, port = self.server.server_address
- if path is None:
- path = ''
- return '%s://%s:%s/%s' % (self.url_scheme, host, port, path)
-
-
-def socket_pair():
- """Return a pair of TCP sockets connected to each other.
-
- Unlike socket.socketpair, this should work on Windows.
- """
- sock_pair = getattr(socket, 'socket_pair', None)
- if sock_pair:
- return sock_pair(socket.AF_INET, socket.SOCK_STREAM)
- listen_sock = socket.socket(socket.AF_INET, socket.SOCK_STREAM)
- listen_sock.bind(('127.0.0.1', 0))
- listen_sock.listen(1)
- client_sock = socket.socket(socket.AF_INET, socket.SOCK_STREAM)
- client_sock.connect(listen_sock.getsockname())
- server_sock, addr = listen_sock.accept()
- listen_sock.close()
- return server_sock, client_sock
-
-
-# OAuth related testing
-
-consumer1 = oauth.OAuthConsumer('K1', 'S1')
-token1 = oauth.OAuthToken('kkkk1', 'XYZ')
-consumer2 = oauth.OAuthConsumer('K2', 'S2')
-token2 = oauth.OAuthToken('kkkk2', 'ZYX')
-token3 = oauth.OAuthToken('kkkk3', 'ZYX')
-
-
-class TestingOAuthDataStore(oauth.OAuthDataStore):
- """In memory predefined OAuthDataStore for testing."""
-
- consumers = {
- consumer1.key: consumer1,
- consumer2.key: consumer2,
- }
-
- tokens = {
- token1.key: token1,
- token2.key: token2
- }
-
- def lookup_consumer(self, key):
- return self.consumers.get(key)
-
- def lookup_token(self, token_type, token_token):
- return self.tokens.get(token_token)
-
- def lookup_nonce(self, oauth_consumer, oauth_token, nonce):
- return None
-
-testingOAuthStore = TestingOAuthDataStore()
-
-sign_meth_HMAC_SHA1 = oauth.OAuthSignatureMethod_HMAC_SHA1()
-sign_meth_PLAINTEXT = oauth.OAuthSignatureMethod_PLAINTEXT()
-
-
-def load_with_scenarios(loader, standard_tests, pattern):
- """Load the tests in a given module.
-
- This just applies testscenarios.generate_scenarios to all the tests that
- are present. We do it at load time rather than at run time, because it
- plays nicer with various tools.
- """
- suite = loader.suiteClass()
- suite.addTests(testscenarios.generate_scenarios(standard_tests))
- return suite
diff --git a/src/leap/soledad/tests/u1db_tests/test_backends.py b/src/leap/soledad/tests/u1db_tests/test_backends.py
deleted file mode 100644
index a53b01ba..00000000
--- a/src/leap/soledad/tests/u1db_tests/test_backends.py
+++ /dev/null
@@ -1,1907 +0,0 @@
-# Copyright 2011 Canonical Ltd.
-#
-# This file is part of u1db.
-#
-# u1db is free software: you can redistribute it and/or modify
-# it under the terms of the GNU Lesser General Public License version 3
-# as published by the Free Software Foundation.
-#
-# u1db is distributed in the hope that it will be useful,
-# but WITHOUT ANY WARRANTY; without even the implied warranty of
-# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
-# GNU Lesser General Public License for more details.
-#
-# You should have received a copy of the GNU Lesser General Public License
-# along with u1db. If not, see <http://www.gnu.org/licenses/>.
-
-"""The backend class for U1DB. This deals with hiding storage details."""
-
-try:
- import simplejson as json
-except ImportError:
- import json # noqa
-from u1db import (
- DocumentBase,
- errors,
- vectorclock,
-)
-
-from leap.soledad.tests import u1db_tests as tests
-
-simple_doc = tests.simple_doc
-nested_doc = tests.nested_doc
-
-from leap.soledad.tests.u1db_tests.test_remote_sync_target import (
- make_http_app,
- make_oauth_http_app,
-)
-
-from u1db.remote import (
- http_database,
-)
-
-
-def make_http_database_for_test(test, replica_uid, path='test'):
- test.startServer()
- test.request_state._create_database(replica_uid)
- return http_database.HTTPDatabase(test.getURL(path))
-
-
-def copy_http_database_for_test(test, db):
- # DO NOT COPY OR REUSE THIS CODE OUTSIDE TESTS: COPYING U1DB DATABASES IS
- # THE WRONG THING TO DO, THE ONLY REASON WE DO SO HERE IS TO TEST THAT WE
- # CORRECTLY DETECT IT HAPPENING SO THAT WE CAN RAISE ERRORS RATHER THAN
- # CORRUPT USER DATA. USE SYNC INSTEAD, OR WE WILL SEND NINJA TO YOUR
- # HOUSE.
- return test.request_state._copy_database(db)
-
-
-def make_oauth_http_database_for_test(test, replica_uid):
- http_db = make_http_database_for_test(test, replica_uid, '~/test')
- http_db.set_oauth_credentials(tests.consumer1.key, tests.consumer1.secret,
- tests.token1.key, tests.token1.secret)
- return http_db
-
-
-def copy_oauth_http_database_for_test(test, db):
- # DO NOT COPY OR REUSE THIS CODE OUTSIDE TESTS: COPYING U1DB DATABASES IS
- # THE WRONG THING TO DO, THE ONLY REASON WE DO SO HERE IS TO TEST THAT WE
- # CORRECTLY DETECT IT HAPPENING SO THAT WE CAN RAISE ERRORS RATHER THAN
- # CORRUPT USER DATA. USE SYNC INSTEAD, OR WE WILL SEND NINJA TO YOUR
- # HOUSE.
- http_db = test.request_state._copy_database(db)
- http_db.set_oauth_credentials(tests.consumer1.key, tests.consumer1.secret,
- tests.token1.key, tests.token1.secret)
- return http_db
-
-
-class TestAlternativeDocument(DocumentBase):
- """A (not very) alternative implementation of Document."""
-
-
-class AllDatabaseTests(tests.DatabaseBaseTests, tests.TestCaseWithServer):
-
- scenarios = tests.LOCAL_DATABASES_SCENARIOS + [
- ('http', {'make_database_for_test': make_http_database_for_test,
- 'copy_database_for_test': copy_http_database_for_test,
- 'make_document_for_test': tests.make_document_for_test,
- 'make_app_with_state': make_http_app}),
- ('oauth_http', {'make_database_for_test':
- make_oauth_http_database_for_test,
- 'copy_database_for_test':
- copy_oauth_http_database_for_test,
- 'make_document_for_test': tests.make_document_for_test,
- 'make_app_with_state': make_oauth_http_app})
- ]
-
- def test_close(self):
- self.db.close()
-
- def test_create_doc_allocating_doc_id(self):
- doc = self.db.create_doc_from_json(simple_doc)
- self.assertNotEqual(None, doc.doc_id)
- self.assertNotEqual(None, doc.rev)
- self.assertGetDoc(self.db, doc.doc_id, doc.rev, simple_doc, False)
-
- def test_create_doc_different_ids_same_db(self):
- doc1 = self.db.create_doc_from_json(simple_doc)
- doc2 = self.db.create_doc_from_json(nested_doc)
- self.assertNotEqual(doc1.doc_id, doc2.doc_id)
-
- def test_create_doc_with_id(self):
- doc = self.db.create_doc_from_json(simple_doc, doc_id='my-id')
- self.assertEqual('my-id', doc.doc_id)
- self.assertNotEqual(None, doc.rev)
- self.assertGetDoc(self.db, doc.doc_id, doc.rev, simple_doc, False)
-
- def test_create_doc_existing_id(self):
- doc = self.db.create_doc_from_json(simple_doc)
- new_content = '{"something": "else"}'
- self.assertRaises(
- errors.RevisionConflict, self.db.create_doc_from_json,
- new_content, doc.doc_id)
- self.assertGetDoc(self.db, doc.doc_id, doc.rev, simple_doc, False)
-
- def test_put_doc_creating_initial(self):
- doc = self.make_document('my_doc_id', None, simple_doc)
- new_rev = self.db.put_doc(doc)
- self.assertIsNot(None, new_rev)
- self.assertGetDoc(self.db, 'my_doc_id', new_rev, simple_doc, False)
-
- def test_put_doc_space_in_id(self):
- doc = self.make_document('my doc id', None, simple_doc)
- self.assertRaises(errors.InvalidDocId, self.db.put_doc, doc)
-
- def test_put_doc_update(self):
- doc = self.db.create_doc_from_json(simple_doc, doc_id='my_doc_id')
- orig_rev = doc.rev
- doc.set_json('{"updated": "stuff"}')
- new_rev = self.db.put_doc(doc)
- self.assertNotEqual(new_rev, orig_rev)
- self.assertGetDoc(self.db, 'my_doc_id', new_rev,
- '{"updated": "stuff"}', False)
- self.assertEqual(doc.rev, new_rev)
-
- def test_put_non_ascii_key(self):
- content = json.dumps({u'key\xe5': u'val'})
- doc = self.db.create_doc_from_json(content, doc_id='my_doc')
- self.assertGetDoc(self.db, 'my_doc', doc.rev, content, False)
-
- def test_put_non_ascii_value(self):
- content = json.dumps({'key': u'\xe5'})
- doc = self.db.create_doc_from_json(content, doc_id='my_doc')
- self.assertGetDoc(self.db, 'my_doc', doc.rev, content, False)
-
- def test_put_doc_refuses_no_id(self):
- doc = self.make_document(None, None, simple_doc)
- self.assertRaises(errors.InvalidDocId, self.db.put_doc, doc)
- doc = self.make_document("", None, simple_doc)
- self.assertRaises(errors.InvalidDocId, self.db.put_doc, doc)
-
- def test_put_doc_refuses_slashes(self):
- doc = self.make_document('a/b', None, simple_doc)
- self.assertRaises(errors.InvalidDocId, self.db.put_doc, doc)
- doc = self.make_document(r'\b', None, simple_doc)
- self.assertRaises(errors.InvalidDocId, self.db.put_doc, doc)
-
- def test_put_doc_url_quoting_is_fine(self):
- doc_id = "%2F%2Ffoo%2Fbar"
- doc = self.make_document(doc_id, None, simple_doc)
- new_rev = self.db.put_doc(doc)
- self.assertGetDoc(self.db, doc_id, new_rev, simple_doc, False)
-
- def test_put_doc_refuses_non_existing_old_rev(self):
- doc = self.make_document('doc-id', 'test:4', simple_doc)
- self.assertRaises(errors.RevisionConflict, self.db.put_doc, doc)
-
- def test_put_doc_refuses_non_ascii_doc_id(self):
- doc = self.make_document('d\xc3\xa5c-id', None, simple_doc)
- self.assertRaises(errors.InvalidDocId, self.db.put_doc, doc)
-
- def test_put_fails_with_bad_old_rev(self):
- doc = self.db.create_doc_from_json(simple_doc, doc_id='my_doc_id')
- old_rev = doc.rev
- bad_doc = self.make_document(doc.doc_id, 'other:1',
- '{"something": "else"}')
- self.assertRaises(errors.RevisionConflict, self.db.put_doc, bad_doc)
- self.assertGetDoc(self.db, 'my_doc_id', old_rev, simple_doc, False)
-
- def test_create_succeeds_after_delete(self):
- doc = self.db.create_doc_from_json(simple_doc, doc_id='my_doc_id')
- self.db.delete_doc(doc)
- deleted_doc = self.db.get_doc('my_doc_id', include_deleted=True)
- deleted_vc = vectorclock.VectorClockRev(deleted_doc.rev)
- new_doc = self.db.create_doc_from_json(simple_doc, doc_id='my_doc_id')
- self.assertGetDoc(self.db, 'my_doc_id', new_doc.rev, simple_doc, False)
- new_vc = vectorclock.VectorClockRev(new_doc.rev)
- self.assertTrue(
- new_vc.is_newer(deleted_vc),
- "%s does not supersede %s" % (new_doc.rev, deleted_doc.rev))
-
- def test_put_succeeds_after_delete(self):
- doc = self.db.create_doc_from_json(simple_doc, doc_id='my_doc_id')
- self.db.delete_doc(doc)
- deleted_doc = self.db.get_doc('my_doc_id', include_deleted=True)
- deleted_vc = vectorclock.VectorClockRev(deleted_doc.rev)
- doc2 = self.make_document('my_doc_id', None, simple_doc)
- self.db.put_doc(doc2)
- self.assertGetDoc(self.db, 'my_doc_id', doc2.rev, simple_doc, False)
- new_vc = vectorclock.VectorClockRev(doc2.rev)
- self.assertTrue(
- new_vc.is_newer(deleted_vc),
- "%s does not supersede %s" % (doc2.rev, deleted_doc.rev))
-
- def test_get_doc_after_put(self):
- doc = self.db.create_doc_from_json(simple_doc, doc_id='my_doc_id')
- self.assertGetDoc(self.db, 'my_doc_id', doc.rev, simple_doc, False)
-
- def test_get_doc_nonexisting(self):
- self.assertIs(None, self.db.get_doc('non-existing'))
-
- def test_get_doc_deleted(self):
- doc = self.db.create_doc_from_json(simple_doc, doc_id='my_doc_id')
- self.db.delete_doc(doc)
- self.assertIs(None, self.db.get_doc('my_doc_id'))
-
- def test_get_doc_include_deleted(self):
- doc = self.db.create_doc_from_json(simple_doc, doc_id='my_doc_id')
- self.db.delete_doc(doc)
- self.assertGetDocIncludeDeleted(
- self.db, doc.doc_id, doc.rev, None, False)
-
- def test_get_docs(self):
- doc1 = self.db.create_doc_from_json(simple_doc)
- doc2 = self.db.create_doc_from_json(nested_doc)
- self.assertEqual([doc1, doc2],
- list(self.db.get_docs([doc1.doc_id, doc2.doc_id])))
-
- def test_get_docs_deleted(self):
- doc1 = self.db.create_doc_from_json(simple_doc)
- doc2 = self.db.create_doc_from_json(nested_doc)
- self.db.delete_doc(doc1)
- self.assertEqual([doc2],
- list(self.db.get_docs([doc1.doc_id, doc2.doc_id])))
-
- def test_get_docs_include_deleted(self):
- doc1 = self.db.create_doc_from_json(simple_doc)
- doc2 = self.db.create_doc_from_json(nested_doc)
- self.db.delete_doc(doc1)
- self.assertEqual(
- [doc1, doc2],
- list(self.db.get_docs([doc1.doc_id, doc2.doc_id],
- include_deleted=True)))
-
- def test_get_docs_request_ordered(self):
- doc1 = self.db.create_doc_from_json(simple_doc)
- doc2 = self.db.create_doc_from_json(nested_doc)
- self.assertEqual([doc1, doc2],
- list(self.db.get_docs([doc1.doc_id, doc2.doc_id])))
- self.assertEqual([doc2, doc1],
- list(self.db.get_docs([doc2.doc_id, doc1.doc_id])))
-
- def test_get_docs_empty_list(self):
- self.assertEqual([], list(self.db.get_docs([])))
-
- def test_handles_nested_content(self):
- doc = self.db.create_doc_from_json(nested_doc)
- self.assertGetDoc(self.db, doc.doc_id, doc.rev, nested_doc, False)
-
- def test_handles_doc_with_null(self):
- doc = self.db.create_doc_from_json('{"key": null}')
- self.assertGetDoc(self.db, doc.doc_id, doc.rev, '{"key": null}', False)
-
- def test_delete_doc(self):
- doc = self.db.create_doc_from_json(simple_doc)
- self.assertGetDoc(self.db, doc.doc_id, doc.rev, simple_doc, False)
- orig_rev = doc.rev
- self.db.delete_doc(doc)
- self.assertNotEqual(orig_rev, doc.rev)
- self.assertGetDocIncludeDeleted(
- self.db, doc.doc_id, doc.rev, None, False)
- self.assertIs(None, self.db.get_doc(doc.doc_id))
-
- def test_delete_doc_non_existent(self):
- doc = self.make_document('non-existing', 'other:1', simple_doc)
- self.assertRaises(errors.DocumentDoesNotExist, self.db.delete_doc, doc)
-
- def test_delete_doc_already_deleted(self):
- doc = self.db.create_doc_from_json(simple_doc)
- self.db.delete_doc(doc)
- self.assertRaises(errors.DocumentAlreadyDeleted,
- self.db.delete_doc, doc)
- self.assertGetDocIncludeDeleted(
- self.db, doc.doc_id, doc.rev, None, False)
-
- def test_delete_doc_bad_rev(self):
- doc1 = self.db.create_doc_from_json(simple_doc)
- self.assertGetDoc(self.db, doc1.doc_id, doc1.rev, simple_doc, False)
- doc2 = self.make_document(doc1.doc_id, 'other:1', simple_doc)
- self.assertRaises(errors.RevisionConflict, self.db.delete_doc, doc2)
- self.assertGetDoc(self.db, doc1.doc_id, doc1.rev, simple_doc, False)
-
- def test_delete_doc_sets_content_to_None(self):
- doc = self.db.create_doc_from_json(simple_doc)
- self.db.delete_doc(doc)
- self.assertIs(None, doc.get_json())
-
- def test_delete_doc_rev_supersedes(self):
- doc = self.db.create_doc_from_json(simple_doc)
- doc.set_json(nested_doc)
- self.db.put_doc(doc)
- doc.set_json('{"fishy": "content"}')
- self.db.put_doc(doc)
- old_rev = doc.rev
- self.db.delete_doc(doc)
- cur_vc = vectorclock.VectorClockRev(old_rev)
- deleted_vc = vectorclock.VectorClockRev(doc.rev)
- self.assertTrue(deleted_vc.is_newer(cur_vc),
- "%s does not supersede %s" % (doc.rev, old_rev))
-
- def test_delete_then_put(self):
- doc = self.db.create_doc_from_json(simple_doc)
- self.db.delete_doc(doc)
- self.assertGetDocIncludeDeleted(
- self.db, doc.doc_id, doc.rev, None, False)
- doc.set_json(nested_doc)
- self.db.put_doc(doc)
- self.assertGetDoc(self.db, doc.doc_id, doc.rev, nested_doc, False)
-
-
-class DocumentSizeTests(tests.DatabaseBaseTests):
-
- scenarios = tests.LOCAL_DATABASES_SCENARIOS
-
- def test_put_doc_refuses_oversized_documents(self):
- self.db.set_document_size_limit(1)
- doc = self.make_document('doc-id', None, simple_doc)
- self.assertRaises(errors.DocumentTooBig, self.db.put_doc, doc)
-
- def test_create_doc_refuses_oversized_documents(self):
- self.db.set_document_size_limit(1)
- self.assertRaises(
- errors.DocumentTooBig, self.db.create_doc_from_json, simple_doc,
- doc_id='my_doc_id')
-
- def test_set_document_size_limit_zero(self):
- self.db.set_document_size_limit(0)
- self.assertEqual(0, self.db.document_size_limit)
-
- def test_set_document_size_limit(self):
- self.db.set_document_size_limit(1000000)
- self.assertEqual(1000000, self.db.document_size_limit)
-
-
-class LocalDatabaseTests(tests.DatabaseBaseTests):
-
- scenarios = tests.LOCAL_DATABASES_SCENARIOS
-
- def test_create_doc_different_ids_diff_db(self):
- doc1 = self.db.create_doc_from_json(simple_doc)
- db2 = self.create_database('other-uid')
- doc2 = db2.create_doc_from_json(simple_doc)
- self.assertNotEqual(doc1.doc_id, doc2.doc_id)
-
- def test_put_doc_refuses_slashes_picky(self):
- doc = self.make_document('/a', None, simple_doc)
- self.assertRaises(errors.InvalidDocId, self.db.put_doc, doc)
-
- def test_get_all_docs_empty(self):
- self.assertEqual([], list(self.db.get_all_docs()[1]))
-
- def test_get_all_docs(self):
- doc1 = self.db.create_doc_from_json(simple_doc)
- doc2 = self.db.create_doc_from_json(nested_doc)
- self.assertEqual(
- sorted([doc1, doc2]), sorted(list(self.db.get_all_docs()[1])))
-
- def test_get_all_docs_exclude_deleted(self):
- doc1 = self.db.create_doc_from_json(simple_doc)
- doc2 = self.db.create_doc_from_json(nested_doc)
- self.db.delete_doc(doc2)
- self.assertEqual([doc1], list(self.db.get_all_docs()[1]))
-
- def test_get_all_docs_include_deleted(self):
- doc1 = self.db.create_doc_from_json(simple_doc)
- doc2 = self.db.create_doc_from_json(nested_doc)
- self.db.delete_doc(doc2)
- self.assertEqual(
- sorted([doc1, doc2]),
- sorted(list(self.db.get_all_docs(include_deleted=True)[1])))
-
- def test_get_all_docs_generation(self):
- self.db.create_doc_from_json(simple_doc)
- self.db.create_doc_from_json(nested_doc)
- self.assertEqual(2, self.db.get_all_docs()[0])
-
- def test_simple_put_doc_if_newer(self):
- doc = self.make_document('my-doc-id', 'test:1', simple_doc)
- state_at_gen = self.db._put_doc_if_newer(
- doc, save_conflict=False, replica_uid='r', replica_gen=1,
- replica_trans_id='foo')
- self.assertEqual(('inserted', 1), state_at_gen)
- self.assertGetDoc(self.db, 'my-doc-id', 'test:1', simple_doc, False)
-
- def test_simple_put_doc_if_newer_deleted(self):
- self.db.create_doc_from_json('{}', doc_id='my-doc-id')
- doc = self.make_document('my-doc-id', 'test:2', None)
- state_at_gen = self.db._put_doc_if_newer(
- doc, save_conflict=False, replica_uid='r', replica_gen=1,
- replica_trans_id='foo')
- self.assertEqual(('inserted', 2), state_at_gen)
- self.assertGetDocIncludeDeleted(
- self.db, 'my-doc-id', 'test:2', None, False)
-
- def test_put_doc_if_newer_already_superseded(self):
- orig_doc = '{"new": "doc"}'
- doc1 = self.db.create_doc_from_json(orig_doc)
- doc1_rev1 = doc1.rev
- doc1.set_json(simple_doc)
- self.db.put_doc(doc1)
- doc1_rev2 = doc1.rev
- # Nothing is inserted, because the document is already superseded
- doc = self.make_document(doc1.doc_id, doc1_rev1, orig_doc)
- state, _ = self.db._put_doc_if_newer(
- doc, save_conflict=False, replica_uid='r', replica_gen=1,
- replica_trans_id='foo')
- self.assertEqual('superseded', state)
- self.assertGetDoc(self.db, doc1.doc_id, doc1_rev2, simple_doc, False)
-
- def test_put_doc_if_newer_autoresolve(self):
- doc1 = self.db.create_doc_from_json(simple_doc)
- rev = doc1.rev
- doc = self.make_document(doc1.doc_id, "whatever:1", doc1.get_json())
- state, _ = self.db._put_doc_if_newer(
- doc, save_conflict=False, replica_uid='r', replica_gen=1,
- replica_trans_id='foo')
- self.assertEqual('superseded', state)
- doc2 = self.db.get_doc(doc1.doc_id)
- v2 = vectorclock.VectorClockRev(doc2.rev)
- self.assertTrue(v2.is_newer(vectorclock.VectorClockRev("whatever:1")))
- self.assertTrue(v2.is_newer(vectorclock.VectorClockRev(rev)))
- # strictly newer locally
- self.assertTrue(rev not in doc2.rev)
-
- def test_put_doc_if_newer_already_converged(self):
- orig_doc = '{"new": "doc"}'
- doc1 = self.db.create_doc_from_json(orig_doc)
- state_at_gen = self.db._put_doc_if_newer(
- doc1, save_conflict=False, replica_uid='r', replica_gen=1,
- replica_trans_id='foo')
- self.assertEqual(('converged', 1), state_at_gen)
-
- def test_put_doc_if_newer_conflicted(self):
- doc1 = self.db.create_doc_from_json(simple_doc)
- # Nothing is inserted, the document id is returned as would-conflict
- alt_doc = self.make_document(doc1.doc_id, 'alternate:1', nested_doc)
- state, _ = self.db._put_doc_if_newer(
- alt_doc, save_conflict=False, replica_uid='r', replica_gen=1,
- replica_trans_id='foo')
- self.assertEqual('conflicted', state)
- # The database wasn't altered
- self.assertGetDoc(self.db, doc1.doc_id, doc1.rev, simple_doc, False)
-
- def test_put_doc_if_newer_newer_generation(self):
- self.db._set_replica_gen_and_trans_id('other', 1, 'T-sid')
- doc = self.make_document('doc_id', 'other:2', simple_doc)
- state, _ = self.db._put_doc_if_newer(
- doc, save_conflict=False, replica_uid='other', replica_gen=2,
- replica_trans_id='T-irrelevant')
- self.assertEqual('inserted', state)
-
- def test_put_doc_if_newer_same_generation_same_txid(self):
- self.db._set_replica_gen_and_trans_id('other', 1, 'T-sid')
- doc = self.db.create_doc_from_json(simple_doc)
- self.make_document(doc.doc_id, 'other:1', simple_doc)
- state, _ = self.db._put_doc_if_newer(
- doc, save_conflict=False, replica_uid='other', replica_gen=1,
- replica_trans_id='T-sid')
- self.assertEqual('converged', state)
-
- def test_put_doc_if_newer_wrong_transaction_id(self):
- self.db._set_replica_gen_and_trans_id('other', 1, 'T-sid')
- doc = self.make_document('doc_id', 'other:1', simple_doc)
- self.assertRaises(
- errors.InvalidTransactionId,
- self.db._put_doc_if_newer, doc, save_conflict=False,
- replica_uid='other', replica_gen=1, replica_trans_id='T-sad')
-
- def test_put_doc_if_newer_old_generation_older_doc(self):
- orig_doc = '{"new": "doc"}'
- doc = self.db.create_doc_from_json(orig_doc)
- doc_rev1 = doc.rev
- doc.set_json(simple_doc)
- self.db.put_doc(doc)
- self.db._set_replica_gen_and_trans_id('other', 3, 'T-sid')
- older_doc = self.make_document(doc.doc_id, doc_rev1, simple_doc)
- state, _ = self.db._put_doc_if_newer(
- older_doc, save_conflict=False, replica_uid='other', replica_gen=8,
- replica_trans_id='T-irrelevant')
- self.assertEqual('superseded', state)
-
- def test_put_doc_if_newer_old_generation_newer_doc(self):
- self.db._set_replica_gen_and_trans_id('other', 5, 'T-sid')
- doc = self.make_document('doc_id', 'other:1', simple_doc)
- self.assertRaises(
- errors.InvalidGeneration,
- self.db._put_doc_if_newer, doc, save_conflict=False,
- replica_uid='other', replica_gen=1, replica_trans_id='T-sad')
-
- def test_put_doc_if_newer_replica_uid(self):
- doc1 = self.db.create_doc_from_json(simple_doc)
- self.db._set_replica_gen_and_trans_id('other', 1, 'T-sid')
- doc2 = self.make_document(doc1.doc_id, doc1.rev + '|other:1',
- nested_doc)
- self.assertEqual('inserted',
- self.db._put_doc_if_newer(
- doc2,
- save_conflict=False,
- replica_uid='other',
- replica_gen=2,
- replica_trans_id='T-id2')[0])
- self.assertEqual((2, 'T-id2'), self.db._get_replica_gen_and_trans_id(
- 'other'))
- # Compare to the old rev, should be superseded
- doc2 = self.make_document(doc1.doc_id, doc1.rev, nested_doc)
- self.assertEqual('superseded',
- self.db._put_doc_if_newer(
- doc2,
- save_conflict=False,
- replica_uid='other',
- replica_gen=3,
- replica_trans_id='T-id3')[0])
- self.assertEqual(
- (3, 'T-id3'), self.db._get_replica_gen_and_trans_id('other'))
- # A conflict that isn't saved still records the sync gen, because we
- # don't need to see it again
- doc2 = self.make_document(doc1.doc_id, doc1.rev + '|fourth:1',
- '{}')
- self.assertEqual('conflicted',
- self.db._put_doc_if_newer(
- doc2,
- save_conflict=False,
- replica_uid='other',
- replica_gen=4,
- replica_trans_id='T-id4')[0])
- self.assertEqual(
- (4, 'T-id4'), self.db._get_replica_gen_and_trans_id('other'))
-
- def test__get_replica_gen_and_trans_id(self):
- self.assertEqual(
- (0, ''), self.db._get_replica_gen_and_trans_id('other-db'))
- self.db._set_replica_gen_and_trans_id('other-db', 2, 'T-transaction')
- self.assertEqual(
- (2, 'T-transaction'),
- self.db._get_replica_gen_and_trans_id('other-db'))
-
- def test_put_updates_transaction_log(self):
- doc = self.db.create_doc_from_json(simple_doc)
- self.assertTransactionLog([doc.doc_id], self.db)
- doc.set_json('{"something": "else"}')
- self.db.put_doc(doc)
- self.assertTransactionLog([doc.doc_id, doc.doc_id], self.db)
- last_trans_id = self.getLastTransId(self.db)
- self.assertEqual((2, last_trans_id, [(doc.doc_id, 2, last_trans_id)]),
- self.db.whats_changed())
-
- def test_delete_updates_transaction_log(self):
- doc = self.db.create_doc_from_json(simple_doc)
- db_gen, _, _ = self.db.whats_changed()
- self.db.delete_doc(doc)
- last_trans_id = self.getLastTransId(self.db)
- self.assertEqual((2, last_trans_id, [(doc.doc_id, 2, last_trans_id)]),
- self.db.whats_changed(db_gen))
-
- def test_whats_changed_initial_database(self):
- self.assertEqual((0, '', []), self.db.whats_changed())
-
- def test_whats_changed_returns_one_id_for_multiple_changes(self):
- doc = self.db.create_doc_from_json(simple_doc)
- doc.set_json('{"new": "contents"}')
- self.db.put_doc(doc)
- last_trans_id = self.getLastTransId(self.db)
- self.assertEqual((2, last_trans_id, [(doc.doc_id, 2, last_trans_id)]),
- self.db.whats_changed())
- self.assertEqual((2, last_trans_id, []), self.db.whats_changed(2))
-
- def test_whats_changed_returns_last_edits_ascending(self):
- doc = self.db.create_doc_from_json(simple_doc)
- doc1 = self.db.create_doc_from_json(simple_doc)
- doc.set_json('{"new": "contents"}')
- self.db.delete_doc(doc1)
- delete_trans_id = self.getLastTransId(self.db)
- self.db.put_doc(doc)
- put_trans_id = self.getLastTransId(self.db)
- self.assertEqual((4, put_trans_id,
- [(doc1.doc_id, 3, delete_trans_id),
- (doc.doc_id, 4, put_trans_id)]),
- self.db.whats_changed())
-
- def test_whats_changed_doesnt_include_old_gen(self):
- self.db.create_doc_from_json(simple_doc)
- self.db.create_doc_from_json(simple_doc)
- doc2 = self.db.create_doc_from_json(simple_doc)
- last_trans_id = self.getLastTransId(self.db)
- self.assertEqual((3, last_trans_id, [(doc2.doc_id, 3, last_trans_id)]),
- self.db.whats_changed(2))
-
-
-class LocalDatabaseValidateGenNTransIdTests(tests.DatabaseBaseTests):
-
- scenarios = tests.LOCAL_DATABASES_SCENARIOS
-
- def test_validate_gen_and_trans_id(self):
- self.db.create_doc_from_json(simple_doc)
- gen, trans_id = self.db._get_generation_info()
- self.db.validate_gen_and_trans_id(gen, trans_id)
-
- def test_validate_gen_and_trans_id_invalid_txid(self):
- self.db.create_doc_from_json(simple_doc)
- gen, _ = self.db._get_generation_info()
- self.assertRaises(
- errors.InvalidTransactionId,
- self.db.validate_gen_and_trans_id, gen, 'wrong')
-
- def test_validate_gen_and_trans_id_invalid_gen(self):
- self.db.create_doc_from_json(simple_doc)
- gen, trans_id = self.db._get_generation_info()
- self.assertRaises(
- errors.InvalidGeneration,
- self.db.validate_gen_and_trans_id, gen + 1, trans_id)
-
-
-class LocalDatabaseValidateSourceGenTests(tests.DatabaseBaseTests):
-
- scenarios = tests.LOCAL_DATABASES_SCENARIOS
-
- def test_validate_source_gen_and_trans_id_same(self):
- self.db._set_replica_gen_and_trans_id('other', 1, 'T-sid')
- self.db._validate_source('other', 1, 'T-sid')
-
- def test_validate_source_gen_newer(self):
- self.db._set_replica_gen_and_trans_id('other', 1, 'T-sid')
- self.db._validate_source('other', 2, 'T-whatevs')
-
- def test_validate_source_wrong_txid(self):
- self.db._set_replica_gen_and_trans_id('other', 1, 'T-sid')
- self.assertRaises(
- errors.InvalidTransactionId,
- self.db._validate_source, 'other', 1, 'T-sad')
-
-
-class LocalDatabaseWithConflictsTests(tests.DatabaseBaseTests):
- # test supporting/functionality around storing conflicts
-
- scenarios = tests.LOCAL_DATABASES_SCENARIOS
-
- def test_get_docs_conflicted(self):
- doc1 = self.db.create_doc_from_json(simple_doc)
- doc2 = self.make_document(doc1.doc_id, 'alternate:1', nested_doc)
- self.db._put_doc_if_newer(
- doc2, save_conflict=True, replica_uid='r', replica_gen=1,
- replica_trans_id='foo')
- self.assertEqual([doc2], list(self.db.get_docs([doc1.doc_id])))
-
- def test_get_docs_conflicts_ignored(self):
- doc1 = self.db.create_doc_from_json(simple_doc)
- doc2 = self.db.create_doc_from_json(nested_doc)
- alt_doc = self.make_document(doc1.doc_id, 'alternate:1', nested_doc)
- self.db._put_doc_if_newer(
- alt_doc, save_conflict=True, replica_uid='r', replica_gen=1,
- replica_trans_id='foo')
- no_conflict_doc = self.make_document(doc1.doc_id, 'alternate:1',
- nested_doc)
- self.assertEqual([no_conflict_doc, doc2],
- list(self.db.get_docs([doc1.doc_id, doc2.doc_id],
- check_for_conflicts=False)))
-
- def test_get_doc_conflicts(self):
- doc = self.db.create_doc_from_json(simple_doc)
- alt_doc = self.make_document(doc.doc_id, 'alternate:1', nested_doc)
- self.db._put_doc_if_newer(
- alt_doc, save_conflict=True, replica_uid='r', replica_gen=1,
- replica_trans_id='foo')
- self.assertEqual([alt_doc, doc],
- self.db.get_doc_conflicts(doc.doc_id))
-
- def test_get_all_docs_sees_conflicts(self):
- doc = self.db.create_doc_from_json(simple_doc)
- alt_doc = self.make_document(doc.doc_id, 'alternate:1', nested_doc)
- self.db._put_doc_if_newer(
- alt_doc, save_conflict=True, replica_uid='r', replica_gen=1,
- replica_trans_id='foo')
- _, docs = self.db.get_all_docs()
- self.assertTrue(list(docs)[0].has_conflicts)
-
- def test_get_doc_conflicts_unconflicted(self):
- doc = self.db.create_doc_from_json(simple_doc)
- self.assertEqual([], self.db.get_doc_conflicts(doc.doc_id))
-
- def test_get_doc_conflicts_no_such_id(self):
- self.assertEqual([], self.db.get_doc_conflicts('doc-id'))
-
- def test_resolve_doc(self):
- doc = self.db.create_doc_from_json(simple_doc)
- alt_doc = self.make_document(doc.doc_id, 'alternate:1', nested_doc)
- self.db._put_doc_if_newer(
- alt_doc, save_conflict=True, replica_uid='r', replica_gen=1,
- replica_trans_id='foo')
- self.assertGetDocConflicts(self.db, doc.doc_id,
- [('alternate:1', nested_doc),
- (doc.rev, simple_doc)])
- orig_rev = doc.rev
- self.db.resolve_doc(doc, [alt_doc.rev, doc.rev])
- self.assertNotEqual(orig_rev, doc.rev)
- self.assertFalse(doc.has_conflicts)
- self.assertGetDoc(self.db, doc.doc_id, doc.rev, simple_doc, False)
- self.assertGetDocConflicts(self.db, doc.doc_id, [])
-
- def test_resolve_doc_picks_biggest_vcr(self):
- doc1 = self.db.create_doc_from_json(simple_doc)
- doc2 = self.make_document(doc1.doc_id, 'alternate:1', nested_doc)
- self.db._put_doc_if_newer(
- doc2, save_conflict=True, replica_uid='r', replica_gen=1,
- replica_trans_id='foo')
- self.assertGetDocConflicts(self.db, doc1.doc_id,
- [(doc2.rev, nested_doc),
- (doc1.rev, simple_doc)])
- orig_doc1_rev = doc1.rev
- self.db.resolve_doc(doc1, [doc2.rev, doc1.rev])
- self.assertFalse(doc1.has_conflicts)
- self.assertNotEqual(orig_doc1_rev, doc1.rev)
- self.assertGetDoc(self.db, doc1.doc_id, doc1.rev, simple_doc, False)
- self.assertGetDocConflicts(self.db, doc1.doc_id, [])
- vcr_1 = vectorclock.VectorClockRev(orig_doc1_rev)
- vcr_2 = vectorclock.VectorClockRev(doc2.rev)
- vcr_new = vectorclock.VectorClockRev(doc1.rev)
- self.assertTrue(vcr_new.is_newer(vcr_1))
- self.assertTrue(vcr_new.is_newer(vcr_2))
-
- def test_resolve_doc_partial_not_winning(self):
- doc1 = self.db.create_doc_from_json(simple_doc)
- doc2 = self.make_document(doc1.doc_id, 'alternate:1', nested_doc)
- self.db._put_doc_if_newer(
- doc2, save_conflict=True, replica_uid='r', replica_gen=1,
- replica_trans_id='foo')
- self.assertGetDocConflicts(self.db, doc1.doc_id,
- [(doc2.rev, nested_doc),
- (doc1.rev, simple_doc)])
- content3 = '{"key": "valin3"}'
- doc3 = self.make_document(doc1.doc_id, 'third:1', content3)
- self.db._put_doc_if_newer(
- doc3, save_conflict=True, replica_uid='r', replica_gen=2,
- replica_trans_id='bar')
- self.assertGetDocConflicts(self.db, doc1.doc_id,
- [(doc3.rev, content3),
- (doc1.rev, simple_doc),
- (doc2.rev, nested_doc)])
- self.db.resolve_doc(doc1, [doc2.rev, doc1.rev])
- self.assertTrue(doc1.has_conflicts)
- self.assertGetDoc(self.db, doc1.doc_id, doc3.rev, content3, True)
- self.assertGetDocConflicts(self.db, doc1.doc_id,
- [(doc3.rev, content3),
- (doc1.rev, simple_doc)])
-
- def test_resolve_doc_partial_winning(self):
- doc1 = self.db.create_doc_from_json(simple_doc)
- doc2 = self.make_document(doc1.doc_id, 'alternate:1', nested_doc)
- self.db._put_doc_if_newer(
- doc2, save_conflict=True, replica_uid='r', replica_gen=1,
- replica_trans_id='foo')
- content3 = '{"key": "valin3"}'
- doc3 = self.make_document(doc1.doc_id, 'third:1', content3)
- self.db._put_doc_if_newer(
- doc3, save_conflict=True, replica_uid='r', replica_gen=2,
- replica_trans_id='bar')
- self.assertGetDocConflicts(self.db, doc1.doc_id,
- [(doc3.rev, content3),
- (doc1.rev, simple_doc),
- (doc2.rev, nested_doc)])
- self.db.resolve_doc(doc1, [doc3.rev, doc1.rev])
- self.assertTrue(doc1.has_conflicts)
- self.assertGetDocConflicts(self.db, doc1.doc_id,
- [(doc1.rev, simple_doc),
- (doc2.rev, nested_doc)])
-
- def test_resolve_doc_with_delete_conflict(self):
- doc1 = self.db.create_doc_from_json(simple_doc)
- self.db.delete_doc(doc1)
- doc2 = self.make_document(doc1.doc_id, 'alternate:1', nested_doc)
- self.db._put_doc_if_newer(
- doc2, save_conflict=True, replica_uid='r', replica_gen=1,
- replica_trans_id='foo')
- self.assertGetDocConflicts(self.db, doc1.doc_id,
- [(doc2.rev, nested_doc),
- (doc1.rev, None)])
- self.db.resolve_doc(doc2, [doc1.rev, doc2.rev])
- self.assertGetDocConflicts(self.db, doc1.doc_id, [])
- self.assertGetDoc(self.db, doc2.doc_id, doc2.rev, nested_doc, False)
-
- def test_resolve_doc_with_delete_to_delete(self):
- doc1 = self.db.create_doc_from_json(simple_doc)
- self.db.delete_doc(doc1)
- doc2 = self.make_document(doc1.doc_id, 'alternate:1', nested_doc)
- self.db._put_doc_if_newer(
- doc2, save_conflict=True, replica_uid='r', replica_gen=1,
- replica_trans_id='foo')
- self.assertGetDocConflicts(self.db, doc1.doc_id,
- [(doc2.rev, nested_doc),
- (doc1.rev, None)])
- self.db.resolve_doc(doc1, [doc1.rev, doc2.rev])
- self.assertGetDocConflicts(self.db, doc1.doc_id, [])
- self.assertGetDocIncludeDeleted(
- self.db, doc1.doc_id, doc1.rev, None, False)
-
- def test_put_doc_if_newer_save_conflicted(self):
- doc1 = self.db.create_doc_from_json(simple_doc)
- # Document is inserted as a conflict
- doc2 = self.make_document(doc1.doc_id, 'alternate:1', nested_doc)
- state, _ = self.db._put_doc_if_newer(
- doc2, save_conflict=True, replica_uid='r', replica_gen=1,
- replica_trans_id='foo')
- self.assertEqual('conflicted', state)
- # The database was updated
- self.assertGetDoc(self.db, doc1.doc_id, doc2.rev, nested_doc, True)
-
- def test_force_doc_conflict_supersedes_properly(self):
- doc1 = self.db.create_doc_from_json(simple_doc)
- doc2 = self.make_document(doc1.doc_id, 'alternate:1', '{"b": 1}')
- self.db._put_doc_if_newer(
- doc2, save_conflict=True, replica_uid='r', replica_gen=1,
- replica_trans_id='foo')
- doc3 = self.make_document(doc1.doc_id, 'altalt:1', '{"c": 1}')
- self.db._put_doc_if_newer(
- doc3, save_conflict=True, replica_uid='r', replica_gen=2,
- replica_trans_id='bar')
- doc22 = self.make_document(doc1.doc_id, 'alternate:2', '{"b": 2}')
- self.db._put_doc_if_newer(
- doc22, save_conflict=True, replica_uid='r', replica_gen=3,
- replica_trans_id='zed')
- self.assertGetDocConflicts(self.db, doc1.doc_id,
- [('alternate:2', doc22.get_json()),
- ('altalt:1', doc3.get_json()),
- (doc1.rev, simple_doc)])
-
- def test_put_doc_if_newer_save_conflict_was_deleted(self):
- doc1 = self.db.create_doc_from_json(simple_doc)
- self.db.delete_doc(doc1)
- doc2 = self.make_document(doc1.doc_id, 'alternate:1', nested_doc)
- self.db._put_doc_if_newer(
- doc2, save_conflict=True, replica_uid='r', replica_gen=1,
- replica_trans_id='foo')
- self.assertTrue(doc2.has_conflicts)
- self.assertGetDoc(
- self.db, doc1.doc_id, 'alternate:1', nested_doc, True)
- self.assertGetDocConflicts(self.db, doc1.doc_id,
- [('alternate:1', nested_doc),
- (doc1.rev, None)])
-
- def test_put_doc_if_newer_propagates_full_resolution(self):
- doc1 = self.db.create_doc_from_json(simple_doc)
- doc2 = self.make_document(doc1.doc_id, 'alternate:1', nested_doc)
- self.db._put_doc_if_newer(
- doc2, save_conflict=True, replica_uid='r', replica_gen=1,
- replica_trans_id='foo')
- resolved_vcr = vectorclock.VectorClockRev(doc1.rev)
- vcr_2 = vectorclock.VectorClockRev(doc2.rev)
- resolved_vcr.maximize(vcr_2)
- resolved_vcr.increment('alternate')
- doc_resolved = self.make_document(doc1.doc_id, resolved_vcr.as_str(),
- '{"good": 1}')
- state, _ = self.db._put_doc_if_newer(
- doc_resolved, save_conflict=True, replica_uid='r', replica_gen=2,
- replica_trans_id='foo2')
- self.assertEqual('inserted', state)
- self.assertFalse(doc_resolved.has_conflicts)
- self.assertGetDocConflicts(self.db, doc1.doc_id, [])
- doc3 = self.db.get_doc(doc1.doc_id)
- self.assertFalse(doc3.has_conflicts)
-
- def test_put_doc_if_newer_propagates_partial_resolution(self):
- doc1 = self.db.create_doc_from_json(simple_doc)
- doc2 = self.make_document(doc1.doc_id, 'altalt:1', '{}')
- self.db._put_doc_if_newer(
- doc2, save_conflict=True, replica_uid='r', replica_gen=1,
- replica_trans_id='foo')
- doc3 = self.make_document(doc1.doc_id, 'alternate:1', nested_doc)
- self.db._put_doc_if_newer(
- doc3, save_conflict=True, replica_uid='r', replica_gen=2,
- replica_trans_id='foo2')
- self.assertGetDocConflicts(self.db, doc1.doc_id,
- [('alternate:1', nested_doc),
- ('test:1', simple_doc),
- ('altalt:1', '{}')])
- resolved_vcr = vectorclock.VectorClockRev(doc1.rev)
- vcr_3 = vectorclock.VectorClockRev(doc3.rev)
- resolved_vcr.maximize(vcr_3)
- resolved_vcr.increment('alternate')
- doc_resolved = self.make_document(doc1.doc_id, resolved_vcr.as_str(),
- '{"good": 1}')
- state, _ = self.db._put_doc_if_newer(
- doc_resolved, save_conflict=True, replica_uid='r', replica_gen=3,
- replica_trans_id='foo3')
- self.assertEqual('inserted', state)
- self.assertTrue(doc_resolved.has_conflicts)
- doc4 = self.db.get_doc(doc1.doc_id)
- self.assertTrue(doc4.has_conflicts)
- self.assertGetDocConflicts(self.db, doc1.doc_id,
- [('alternate:2|test:1', '{"good": 1}'),
- ('altalt:1', '{}')])
-
- def test_put_doc_if_newer_replica_uid(self):
- doc1 = self.db.create_doc_from_json(simple_doc)
- self.db._set_replica_gen_and_trans_id('other', 1, 'T-id')
- doc2 = self.make_document(doc1.doc_id, doc1.rev + '|other:1',
- nested_doc)
- self.db._put_doc_if_newer(doc2, save_conflict=True,
- replica_uid='other', replica_gen=2,
- replica_trans_id='T-id2')
- # Conflict vs the current update
- doc2 = self.make_document(doc1.doc_id, doc1.rev + '|third:3',
- '{}')
- self.assertEqual('conflicted',
- self.db._put_doc_if_newer(
- doc2,
- save_conflict=True,
- replica_uid='other',
- replica_gen=3,
- replica_trans_id='T-id3')[0])
- self.assertEqual(
- (3, 'T-id3'), self.db._get_replica_gen_and_trans_id('other'))
-
- def test_put_doc_if_newer_autoresolve_2(self):
- # this is an ordering variant of _3, but that already works
- # adding the test explicitly to catch the regression easily
- doc_a1 = self.db.create_doc_from_json(simple_doc)
- doc_a2 = self.make_document(doc_a1.doc_id, 'test:2', "{}")
- doc_a1b1 = self.make_document(doc_a1.doc_id, 'test:1|other:1',
- '{"a":"42"}')
- doc_a3 = self.make_document(doc_a1.doc_id, 'test:2|other:1', "{}")
- state, _ = self.db._put_doc_if_newer(
- doc_a2, save_conflict=True, replica_uid='r', replica_gen=1,
- replica_trans_id='foo')
- self.assertEqual(state, 'inserted')
- state, _ = self.db._put_doc_if_newer(
- doc_a1b1, save_conflict=True, replica_uid='r', replica_gen=2,
- replica_trans_id='foo2')
- self.assertEqual(state, 'conflicted')
- state, _ = self.db._put_doc_if_newer(
- doc_a3, save_conflict=True, replica_uid='r', replica_gen=3,
- replica_trans_id='foo3')
- self.assertEqual(state, 'inserted')
- self.assertFalse(self.db.get_doc(doc_a1.doc_id).has_conflicts)
-
- def test_put_doc_if_newer_autoresolve_3(self):
- doc_a1 = self.db.create_doc_from_json(simple_doc)
- doc_a1b1 = self.make_document(doc_a1.doc_id, 'test:1|other:1', "{}")
- doc_a2 = self.make_document(doc_a1.doc_id, 'test:2', '{"a":"42"}')
- doc_a3 = self.make_document(doc_a1.doc_id, 'test:3', "{}")
- state, _ = self.db._put_doc_if_newer(
- doc_a1b1, save_conflict=True, replica_uid='r', replica_gen=1,
- replica_trans_id='foo')
- self.assertEqual(state, 'inserted')
- state, _ = self.db._put_doc_if_newer(
- doc_a2, save_conflict=True, replica_uid='r', replica_gen=2,
- replica_trans_id='foo2')
- self.assertEqual(state, 'conflicted')
- state, _ = self.db._put_doc_if_newer(
- doc_a3, save_conflict=True, replica_uid='r', replica_gen=3,
- replica_trans_id='foo3')
- self.assertEqual(state, 'superseded')
- doc = self.db.get_doc(doc_a1.doc_id, True)
- self.assertFalse(doc.has_conflicts)
- rev = vectorclock.VectorClockRev(doc.rev)
- rev_a3 = vectorclock.VectorClockRev('test:3')
- rev_a1b1 = vectorclock.VectorClockRev('test:1|other:1')
- self.assertTrue(rev.is_newer(rev_a3))
- self.assertTrue('test:4' in doc.rev) # locally increased
- self.assertTrue(rev.is_newer(rev_a1b1))
-
- def test_put_doc_if_newer_autoresolve_4(self):
- doc_a1 = self.db.create_doc_from_json(simple_doc)
- doc_a1b1 = self.make_document(doc_a1.doc_id, 'test:1|other:1', None)
- doc_a2 = self.make_document(doc_a1.doc_id, 'test:2', '{"a":"42"}')
- doc_a3 = self.make_document(doc_a1.doc_id, 'test:3', None)
- state, _ = self.db._put_doc_if_newer(
- doc_a1b1, save_conflict=True, replica_uid='r', replica_gen=1,
- replica_trans_id='foo')
- self.assertEqual(state, 'inserted')
- state, _ = self.db._put_doc_if_newer(
- doc_a2, save_conflict=True, replica_uid='r', replica_gen=2,
- replica_trans_id='foo2')
- self.assertEqual(state, 'conflicted')
- state, _ = self.db._put_doc_if_newer(
- doc_a3, save_conflict=True, replica_uid='r', replica_gen=3,
- replica_trans_id='foo3')
- self.assertEqual(state, 'superseded')
- doc = self.db.get_doc(doc_a1.doc_id, True)
- self.assertFalse(doc.has_conflicts)
- rev = vectorclock.VectorClockRev(doc.rev)
- rev_a3 = vectorclock.VectorClockRev('test:3')
- rev_a1b1 = vectorclock.VectorClockRev('test:1|other:1')
- self.assertTrue(rev.is_newer(rev_a3))
- self.assertTrue('test:4' in doc.rev) # locally increased
- self.assertTrue(rev.is_newer(rev_a1b1))
-
- def test_put_refuses_to_update_conflicted(self):
- doc1 = self.db.create_doc_from_json(simple_doc)
- content2 = '{"key": "altval"}'
- doc2 = self.make_document(doc1.doc_id, 'altrev:1', content2)
- self.db._put_doc_if_newer(
- doc2, save_conflict=True, replica_uid='r', replica_gen=1,
- replica_trans_id='foo')
- self.assertGetDoc(self.db, doc1.doc_id, doc2.rev, content2, True)
- content3 = '{"key": "local"}'
- doc2.set_json(content3)
- self.assertRaises(errors.ConflictedDoc, self.db.put_doc, doc2)
-
- def test_delete_refuses_for_conflicted(self):
- doc1 = self.db.create_doc_from_json(simple_doc)
- doc2 = self.make_document(doc1.doc_id, 'altrev:1', nested_doc)
- self.db._put_doc_if_newer(
- doc2, save_conflict=True, replica_uid='r', replica_gen=1,
- replica_trans_id='foo')
- self.assertGetDoc(self.db, doc2.doc_id, doc2.rev, nested_doc, True)
- self.assertRaises(errors.ConflictedDoc, self.db.delete_doc, doc2)
-
-
-class DatabaseIndexTests(tests.DatabaseBaseTests):
-
- scenarios = tests.LOCAL_DATABASES_SCENARIOS
-
- def assertParseError(self, definition):
- self.db.create_doc_from_json(nested_doc)
- self.assertRaises(
- errors.IndexDefinitionParseError, self.db.create_index, 'idx',
- definition)
-
- def assertIndexCreatable(self, definition):
- name = "idx"
- self.db.create_doc_from_json(nested_doc)
- self.db.create_index(name, definition)
- self.assertEqual(
- [(name, [definition])], self.db.list_indexes())
-
- def test_create_index(self):
- self.db.create_index('test-idx', 'name')
- self.assertEqual([('test-idx', ['name'])],
- self.db.list_indexes())
-
- def test_create_index_on_non_ascii_field_name(self):
- doc = self.db.create_doc_from_json(json.dumps({u'\xe5': 'value'}))
- self.db.create_index('test-idx', u'\xe5')
- self.assertEqual([doc], self.db.get_from_index('test-idx', 'value'))
-
- def test_list_indexes_with_non_ascii_field_names(self):
- self.db.create_index('test-idx', u'\xe5')
- self.assertEqual(
- [('test-idx', [u'\xe5'])], self.db.list_indexes())
-
- def test_create_index_evaluates_it(self):
- doc = self.db.create_doc_from_json(simple_doc)
- self.db.create_index('test-idx', 'key')
- self.assertEqual([doc], self.db.get_from_index('test-idx', 'value'))
-
- def test_wildcard_matches_unicode_value(self):
- doc = self.db.create_doc_from_json(json.dumps({"key": u"valu\xe5"}))
- self.db.create_index('test-idx', 'key')
- self.assertEqual([doc], self.db.get_from_index('test-idx', '*'))
-
- def test_retrieve_unicode_value_from_index(self):
- doc = self.db.create_doc_from_json(json.dumps({"key": u"valu\xe5"}))
- self.db.create_index('test-idx', 'key')
- self.assertEqual(
- [doc], self.db.get_from_index('test-idx', u"valu\xe5"))
-
- def test_create_index_fails_if_name_taken(self):
- self.db.create_index('test-idx', 'key')
- self.assertRaises(errors.IndexNameTakenError,
- self.db.create_index,
- 'test-idx', 'stuff')
-
- def test_create_index_does_not_fail_if_name_taken_with_same_index(self):
- self.db.create_index('test-idx', 'key')
- self.db.create_index('test-idx', 'key')
- self.assertEqual([('test-idx', ['key'])], self.db.list_indexes())
-
- def test_create_index_does_not_duplicate_indexed_fields(self):
- self.db.create_doc_from_json(simple_doc)
- self.db.create_index('test-idx', 'key')
- self.db.delete_index('test-idx')
- self.db.create_index('test-idx', 'key')
- self.assertEqual(1, len(self.db.get_from_index('test-idx', 'value')))
-
- def test_delete_index_does_not_remove_fields_from_other_indexes(self):
- self.db.create_doc_from_json(simple_doc)
- self.db.create_index('test-idx', 'key')
- self.db.create_index('test-idx2', 'key')
- self.db.delete_index('test-idx')
- self.assertEqual(1, len(self.db.get_from_index('test-idx2', 'value')))
-
- def test_create_index_after_deleting_document(self):
- doc = self.db.create_doc_from_json(simple_doc)
- doc2 = self.db.create_doc_from_json(simple_doc)
- self.db.delete_doc(doc2)
- self.db.create_index('test-idx', 'key')
- self.assertEqual([doc], self.db.get_from_index('test-idx', 'value'))
-
- def test_delete_index(self):
- self.db.create_index('test-idx', 'key')
- self.assertEqual([('test-idx', ['key'])], self.db.list_indexes())
- self.db.delete_index('test-idx')
- self.assertEqual([], self.db.list_indexes())
-
- def test_create_adds_to_index(self):
- self.db.create_index('test-idx', 'key')
- doc = self.db.create_doc_from_json(simple_doc)
- self.assertEqual([doc], self.db.get_from_index('test-idx', 'value'))
-
- def test_get_from_index_unmatched(self):
- self.db.create_doc_from_json(simple_doc)
- self.db.create_index('test-idx', 'key')
- self.assertEqual([], self.db.get_from_index('test-idx', 'novalue'))
-
- def test_create_index_multiple_exact_matches(self):
- doc = self.db.create_doc_from_json(simple_doc)
- doc2 = self.db.create_doc_from_json(simple_doc)
- self.db.create_index('test-idx', 'key')
- self.assertEqual(
- sorted([doc, doc2]),
- sorted(self.db.get_from_index('test-idx', 'value')))
-
- def test_get_from_index(self):
- doc = self.db.create_doc_from_json(simple_doc)
- self.db.create_index('test-idx', 'key')
- self.assertEqual([doc], self.db.get_from_index('test-idx', 'value'))
-
- def test_get_from_index_multi(self):
- content = '{"key": "value", "key2": "value2"}'
- doc = self.db.create_doc_from_json(content)
- self.db.create_index('test-idx', 'key', 'key2')
- self.assertEqual(
- [doc], self.db.get_from_index('test-idx', 'value', 'value2'))
-
- def test_get_from_index_multi_list(self):
- doc = self.db.create_doc_from_json(
- '{"key": "value", "key2": ["value2-1", "value2-2", "value2-3"]}')
- self.db.create_index('test-idx', 'key', 'key2')
- self.assertEqual(
- [doc], self.db.get_from_index('test-idx', 'value', 'value2-1'))
- self.assertEqual(
- [doc], self.db.get_from_index('test-idx', 'value', 'value2-2'))
- self.assertEqual(
- [doc], self.db.get_from_index('test-idx', 'value', 'value2-3'))
- self.assertEqual(
- [('value', 'value2-1'), ('value', 'value2-2'),
- ('value', 'value2-3')],
- sorted(self.db.get_index_keys('test-idx')))
-
- def test_get_from_index_sees_conflicts(self):
- doc = self.db.create_doc_from_json(simple_doc)
- self.db.create_index('test-idx', 'key', 'key2')
- alt_doc = self.make_document(
- doc.doc_id, 'alternate:1',
- '{"key": "value", "key2": ["value2-1", "value2-2", "value2-3"]}')
- self.db._put_doc_if_newer(
- alt_doc, save_conflict=True, replica_uid='r', replica_gen=1,
- replica_trans_id='foo')
- docs = self.db.get_from_index('test-idx', 'value', 'value2-1')
- self.assertTrue(docs[0].has_conflicts)
-
- def test_get_index_keys_multi_list_list(self):
- self.db.create_doc_from_json(
- '{"key": "value1-1 value1-2 value1-3", '
- '"key2": ["value2-1", "value2-2", "value2-3"]}')
- self.db.create_index('test-idx', 'split_words(key)', 'key2')
- self.assertEqual(
- [(u'value1-1', u'value2-1'), (u'value1-1', u'value2-2'),
- (u'value1-1', u'value2-3'), (u'value1-2', u'value2-1'),
- (u'value1-2', u'value2-2'), (u'value1-2', u'value2-3'),
- (u'value1-3', u'value2-1'), (u'value1-3', u'value2-2'),
- (u'value1-3', u'value2-3')],
- sorted(self.db.get_index_keys('test-idx')))
-
- def test_get_from_index_multi_ordered(self):
- doc1 = self.db.create_doc_from_json(
- '{"key": "value3", "key2": "value4"}')
- doc2 = self.db.create_doc_from_json(
- '{"key": "value2", "key2": "value3"}')
- doc3 = self.db.create_doc_from_json(
- '{"key": "value2", "key2": "value2"}')
- doc4 = self.db.create_doc_from_json(
- '{"key": "value1", "key2": "value1"}')
- self.db.create_index('test-idx', 'key', 'key2')
- self.assertEqual(
- [doc4, doc3, doc2, doc1],
- self.db.get_from_index('test-idx', 'v*', '*'))
-
- def test_get_range_from_index_start_end(self):
- doc1 = self.db.create_doc_from_json('{"key": "value3"}')
- doc2 = self.db.create_doc_from_json('{"key": "value2"}')
- self.db.create_doc_from_json('{"key": "value4"}')
- self.db.create_doc_from_json('{"key": "value1"}')
- self.db.create_index('test-idx', 'key')
- self.assertEqual(
- [doc2, doc1],
- self.db.get_range_from_index('test-idx', 'value2', 'value3'))
-
- def test_get_range_from_index_start(self):
- doc1 = self.db.create_doc_from_json('{"key": "value3"}')
- doc2 = self.db.create_doc_from_json('{"key": "value2"}')
- doc3 = self.db.create_doc_from_json('{"key": "value4"}')
- self.db.create_doc_from_json('{"key": "value1"}')
- self.db.create_index('test-idx', 'key')
- self.assertEqual(
- [doc2, doc1, doc3],
- self.db.get_range_from_index('test-idx', 'value2'))
-
- def test_get_range_from_index_sees_conflicts(self):
- doc = self.db.create_doc_from_json(simple_doc)
- self.db.create_index('test-idx', 'key')
- alt_doc = self.make_document(
- doc.doc_id, 'alternate:1', '{"key": "valuedepalue"}')
- self.db._put_doc_if_newer(
- alt_doc, save_conflict=True, replica_uid='r', replica_gen=1,
- replica_trans_id='foo')
- docs = self.db.get_range_from_index('test-idx', 'a')
- self.assertTrue(docs[0].has_conflicts)
-
- def test_get_range_from_index_end(self):
- self.db.create_doc_from_json('{"key": "value3"}')
- doc2 = self.db.create_doc_from_json('{"key": "value2"}')
- self.db.create_doc_from_json('{"key": "value4"}')
- doc4 = self.db.create_doc_from_json('{"key": "value1"}')
- self.db.create_index('test-idx', 'key')
- self.assertEqual(
- [doc4, doc2],
- self.db.get_range_from_index('test-idx', None, 'value2'))
-
- def test_get_wildcard_range_from_index_start(self):
- doc1 = self.db.create_doc_from_json('{"key": "value4"}')
- doc2 = self.db.create_doc_from_json('{"key": "value23"}')
- doc3 = self.db.create_doc_from_json('{"key": "value2"}')
- doc4 = self.db.create_doc_from_json('{"key": "value22"}')
- self.db.create_doc_from_json('{"key": "value1"}')
- self.db.create_index('test-idx', 'key')
- self.assertEqual(
- [doc3, doc4, doc2, doc1],
- self.db.get_range_from_index('test-idx', 'value2*'))
-
- def test_get_wildcard_range_from_index_end(self):
- self.db.create_doc_from_json('{"key": "value4"}')
- doc2 = self.db.create_doc_from_json('{"key": "value23"}')
- doc3 = self.db.create_doc_from_json('{"key": "value2"}')
- doc4 = self.db.create_doc_from_json('{"key": "value22"}')
- doc5 = self.db.create_doc_from_json('{"key": "value1"}')
- self.db.create_index('test-idx', 'key')
- self.assertEqual(
- [doc5, doc3, doc4, doc2],
- self.db.get_range_from_index('test-idx', None, 'value2*'))
-
- def test_get_wildcard_range_from_index_start_end(self):
- self.db.create_doc_from_json('{"key": "a"}')
- self.db.create_doc_from_json('{"key": "boo3"}')
- doc3 = self.db.create_doc_from_json('{"key": "catalyst"}')
- doc4 = self.db.create_doc_from_json('{"key": "whaever"}')
- self.db.create_doc_from_json('{"key": "zerg"}')
- self.db.create_index('test-idx', 'key')
- self.assertEqual(
- [doc3, doc4],
- self.db.get_range_from_index('test-idx', 'cat*', 'zap*'))
-
- def test_get_range_from_index_multi_column_start_end(self):
- self.db.create_doc_from_json('{"key": "value3", "key2": "value4"}')
- doc2 = self.db.create_doc_from_json(
- '{"key": "value2", "key2": "value3"}')
- doc3 = self.db.create_doc_from_json(
- '{"key": "value2", "key2": "value2"}')
- self.db.create_doc_from_json('{"key": "value1", "key2": "value1"}')
- self.db.create_index('test-idx', 'key', 'key2')
- self.assertEqual(
- [doc3, doc2],
- self.db.get_range_from_index(
- 'test-idx', ('value2', 'value2'), ('value2', 'value3')))
-
- def test_get_range_from_index_multi_column_start(self):
- doc1 = self.db.create_doc_from_json(
- '{"key": "value3", "key2": "value4"}')
- doc2 = self.db.create_doc_from_json(
- '{"key": "value2", "key2": "value3"}')
- self.db.create_doc_from_json('{"key": "value2", "key2": "value2"}')
- self.db.create_doc_from_json('{"key": "value1", "key2": "value1"}')
- self.db.create_index('test-idx', 'key', 'key2')
- self.assertEqual(
- [doc2, doc1],
- self.db.get_range_from_index('test-idx', ('value2', 'value3')))
-
- def test_get_range_from_index_multi_column_end(self):
- self.db.create_doc_from_json('{"key": "value3", "key2": "value4"}')
- doc2 = self.db.create_doc_from_json(
- '{"key": "value2", "key2": "value3"}')
- doc3 = self.db.create_doc_from_json(
- '{"key": "value2", "key2": "value2"}')
- doc4 = self.db.create_doc_from_json(
- '{"key": "value1", "key2": "value1"}')
- self.db.create_index('test-idx', 'key', 'key2')
- self.assertEqual(
- [doc4, doc3, doc2],
- self.db.get_range_from_index(
- 'test-idx', None, ('value2', 'value3')))
-
- def test_get_wildcard_range_from_index_multi_column_start(self):
- doc1 = self.db.create_doc_from_json(
- '{"key": "value3", "key2": "value4"}')
- doc2 = self.db.create_doc_from_json(
- '{"key": "value2", "key2": "value23"}')
- doc3 = self.db.create_doc_from_json(
- '{"key": "value2", "key2": "value2"}')
- self.db.create_doc_from_json('{"key": "value1", "key2": "value1"}')
- self.db.create_index('test-idx', 'key', 'key2')
- self.assertEqual(
- [doc3, doc2, doc1],
- self.db.get_range_from_index('test-idx', ('value2', 'value2*')))
-
- def test_get_wildcard_range_from_index_multi_column_end(self):
- self.db.create_doc_from_json('{"key": "value3", "key2": "value4"}')
- doc2 = self.db.create_doc_from_json(
- '{"key": "value2", "key2": "value23"}')
- doc3 = self.db.create_doc_from_json(
- '{"key": "value2", "key2": "value2"}')
- doc4 = self.db.create_doc_from_json(
- '{"key": "value1", "key2": "value1"}')
- self.db.create_index('test-idx', 'key', 'key2')
- self.assertEqual(
- [doc4, doc3, doc2],
- self.db.get_range_from_index(
- 'test-idx', None, ('value2', 'value2*')))
-
- def test_get_glob_range_from_index_multi_column_start(self):
- doc1 = self.db.create_doc_from_json(
- '{"key": "value3", "key2": "value4"}')
- doc2 = self.db.create_doc_from_json(
- '{"key": "value2", "key2": "value23"}')
- self.db.create_doc_from_json('{"key": "value1", "key2": "value2"}')
- self.db.create_doc_from_json('{"key": "value1", "key2": "value1"}')
- self.db.create_index('test-idx', 'key', 'key2')
- self.assertEqual(
- [doc2, doc1],
- self.db.get_range_from_index('test-idx', ('value2', '*')))
-
- def test_get_glob_range_from_index_multi_column_end(self):
- self.db.create_doc_from_json('{"key": "value3", "key2": "value4"}')
- doc2 = self.db.create_doc_from_json(
- '{"key": "value2", "key2": "value23"}')
- doc3 = self.db.create_doc_from_json(
- '{"key": "value1", "key2": "value2"}')
- doc4 = self.db.create_doc_from_json(
- '{"key": "value1", "key2": "value1"}')
- self.db.create_index('test-idx', 'key', 'key2')
- self.assertEqual(
- [doc4, doc3, doc2],
- self.db.get_range_from_index('test-idx', None, ('value2', '*')))
-
- def test_get_range_from_index_illegal_wildcard_order(self):
- self.db.create_index('test-idx', 'k1', 'k2')
- self.assertRaises(
- errors.InvalidGlobbing,
- self.db.get_range_from_index, 'test-idx', ('*', 'v2'))
-
- def test_get_range_from_index_illegal_glob_after_wildcard(self):
- self.db.create_index('test-idx', 'k1', 'k2')
- self.assertRaises(
- errors.InvalidGlobbing,
- self.db.get_range_from_index, 'test-idx', ('*', 'v*'))
-
- def test_get_range_from_index_illegal_wildcard_order_end(self):
- self.db.create_index('test-idx', 'k1', 'k2')
- self.assertRaises(
- errors.InvalidGlobbing,
- self.db.get_range_from_index, 'test-idx', None, ('*', 'v2'))
-
- def test_get_range_from_index_illegal_glob_after_wildcard_end(self):
- self.db.create_index('test-idx', 'k1', 'k2')
- self.assertRaises(
- errors.InvalidGlobbing,
- self.db.get_range_from_index, 'test-idx', None, ('*', 'v*'))
-
- def test_get_from_index_fails_if_no_index(self):
- self.assertRaises(
- errors.IndexDoesNotExist, self.db.get_from_index, 'foo')
-
- def test_get_index_keys_fails_if_no_index(self):
- self.assertRaises(errors.IndexDoesNotExist,
- self.db.get_index_keys,
- 'foo')
-
- def test_get_index_keys_works_if_no_docs(self):
- self.db.create_index('test-idx', 'key')
- self.assertEqual([], self.db.get_index_keys('test-idx'))
-
- def test_put_updates_index(self):
- doc = self.db.create_doc_from_json(simple_doc)
- self.db.create_index('test-idx', 'key')
- new_content = '{"key": "altval"}'
- doc.set_json(new_content)
- self.db.put_doc(doc)
- self.assertEqual([], self.db.get_from_index('test-idx', 'value'))
- self.assertEqual([doc], self.db.get_from_index('test-idx', 'altval'))
-
- def test_delete_updates_index(self):
- doc = self.db.create_doc_from_json(simple_doc)
- doc2 = self.db.create_doc_from_json(simple_doc)
- self.db.create_index('test-idx', 'key')
- self.assertEqual(
- sorted([doc, doc2]),
- sorted(self.db.get_from_index('test-idx', 'value')))
- self.db.delete_doc(doc)
- self.assertEqual([doc2], self.db.get_from_index('test-idx', 'value'))
-
- def test_get_from_index_illegal_number_of_entries(self):
- self.db.create_index('test-idx', 'k1', 'k2')
- self.assertRaises(
- errors.InvalidValueForIndex, self.db.get_from_index, 'test-idx')
- self.assertRaises(
- errors.InvalidValueForIndex,
- self.db.get_from_index, 'test-idx', 'v1')
- self.assertRaises(
- errors.InvalidValueForIndex,
- self.db.get_from_index, 'test-idx', 'v1', 'v2', 'v3')
-
- def test_get_from_index_illegal_wildcard_order(self):
- self.db.create_index('test-idx', 'k1', 'k2')
- self.assertRaises(
- errors.InvalidGlobbing,
- self.db.get_from_index, 'test-idx', '*', 'v2')
-
- def test_get_from_index_illegal_glob_after_wildcard(self):
- self.db.create_index('test-idx', 'k1', 'k2')
- self.assertRaises(
- errors.InvalidGlobbing,
- self.db.get_from_index, 'test-idx', '*', 'v*')
-
- def test_get_all_from_index(self):
- self.db.create_index('test-idx', 'key')
- doc1 = self.db.create_doc_from_json(simple_doc)
- doc2 = self.db.create_doc_from_json(nested_doc)
- # This one should not be in the index
- self.db.create_doc_from_json('{"no": "key"}')
- diff_value_doc = '{"key": "diff value"}'
- doc4 = self.db.create_doc_from_json(diff_value_doc)
- # This is essentially a 'prefix' match, but we match every entry.
- self.assertEqual(
- sorted([doc1, doc2, doc4]),
- sorted(self.db.get_from_index('test-idx', '*')))
-
- def test_get_all_from_index_ordered(self):
- self.db.create_index('test-idx', 'key')
- doc1 = self.db.create_doc_from_json('{"key": "value x"}')
- doc2 = self.db.create_doc_from_json('{"key": "value b"}')
- doc3 = self.db.create_doc_from_json('{"key": "value a"}')
- doc4 = self.db.create_doc_from_json('{"key": "value m"}')
- # This is essentially a 'prefix' match, but we match every entry.
- self.assertEqual(
- [doc3, doc2, doc4, doc1], self.db.get_from_index('test-idx', '*'))
-
- def test_put_updates_when_adding_key(self):
- doc = self.db.create_doc_from_json("{}")
- self.db.create_index('test-idx', 'key')
- self.assertEqual([], self.db.get_from_index('test-idx', '*'))
- doc.set_json(simple_doc)
- self.db.put_doc(doc)
- self.assertEqual([doc], self.db.get_from_index('test-idx', '*'))
-
- def test_get_from_index_empty_string(self):
- self.db.create_index('test-idx', 'key')
- doc1 = self.db.create_doc_from_json(simple_doc)
- content2 = '{"key": ""}'
- doc2 = self.db.create_doc_from_json(content2)
- self.assertEqual([doc2], self.db.get_from_index('test-idx', ''))
- # Empty string matches the wildcard.
- self.assertEqual(
- sorted([doc1, doc2]),
- sorted(self.db.get_from_index('test-idx', '*')))
-
- def test_get_from_index_not_null(self):
- self.db.create_index('test-idx', 'key')
- doc1 = self.db.create_doc_from_json(simple_doc)
- self.db.create_doc_from_json('{"key": null}')
- self.assertEqual([doc1], self.db.get_from_index('test-idx', '*'))
-
- def test_get_partial_from_index(self):
- content1 = '{"k1": "v1", "k2": "v2"}'
- content2 = '{"k1": "v1", "k2": "x2"}'
- content3 = '{"k1": "v1", "k2": "y2"}'
- # doc4 has a different k1 value, so it doesn't match the prefix.
- content4 = '{"k1": "NN", "k2": "v2"}'
- doc1 = self.db.create_doc_from_json(content1)
- doc2 = self.db.create_doc_from_json(content2)
- doc3 = self.db.create_doc_from_json(content3)
- self.db.create_doc_from_json(content4)
- self.db.create_index('test-idx', 'k1', 'k2')
- self.assertEqual(
- sorted([doc1, doc2, doc3]),
- sorted(self.db.get_from_index('test-idx', "v1", "*")))
-
- def test_get_glob_match(self):
- # Note: the exact glob syntax is probably subject to change
- content1 = '{"k1": "v1", "k2": "v1"}'
- content2 = '{"k1": "v1", "k2": "v2"}'
- content3 = '{"k1": "v1", "k2": "v3"}'
- # doc4 has a different k2 prefix value, so it doesn't match
- content4 = '{"k1": "v1", "k2": "ZZ"}'
- self.db.create_index('test-idx', 'k1', 'k2')
- doc1 = self.db.create_doc_from_json(content1)
- doc2 = self.db.create_doc_from_json(content2)
- doc3 = self.db.create_doc_from_json(content3)
- self.db.create_doc_from_json(content4)
- self.assertEqual(
- sorted([doc1, doc2, doc3]),
- sorted(self.db.get_from_index('test-idx', "v1", "v*")))
-
- def test_nested_index(self):
- doc = self.db.create_doc_from_json(nested_doc)
- self.db.create_index('test-idx', 'sub.doc')
- self.assertEqual(
- [doc], self.db.get_from_index('test-idx', 'underneath'))
- doc2 = self.db.create_doc_from_json(nested_doc)
- self.assertEqual(
- sorted([doc, doc2]),
- sorted(self.db.get_from_index('test-idx', 'underneath')))
-
- def test_nested_nonexistent(self):
- self.db.create_doc_from_json(nested_doc)
- # sub exists, but sub.foo does not:
- self.db.create_index('test-idx', 'sub.foo')
- self.assertEqual([], self.db.get_from_index('test-idx', '*'))
-
- def test_nested_nonexistent2(self):
- self.db.create_doc_from_json(nested_doc)
- self.db.create_index('test-idx', 'sub.foo.bar.baz.qux.fnord')
- self.assertEqual([], self.db.get_from_index('test-idx', '*'))
-
- def test_nested_traverses_lists(self):
- # subpath finds dicts in list
- doc = self.db.create_doc_from_json(
- '{"foo": [{"zap": "bar"}, {"zap": "baz"}]}')
- # subpath only finds dicts in list
- self.db.create_doc_from_json('{"foo": ["zap", "baz"]}')
- self.db.create_index('test-idx', 'foo.zap')
- self.assertEqual([doc], self.db.get_from_index('test-idx', 'bar'))
- self.assertEqual([doc], self.db.get_from_index('test-idx', 'baz'))
-
- def test_nested_list_traversal(self):
- # subpath finds dicts in list
- doc = self.db.create_doc_from_json(
- '{"foo": [{"zap": [{"qux": "fnord"}, {"qux": "zombo"}]},'
- '{"zap": "baz"}]}')
- # subpath only finds dicts in list
- self.db.create_index('test-idx', 'foo.zap.qux')
- self.assertEqual([doc], self.db.get_from_index('test-idx', 'fnord'))
- self.assertEqual([doc], self.db.get_from_index('test-idx', 'zombo'))
-
- def test_index_list1(self):
- self.db.create_index("index", "name")
- content = '{"name": ["foo", "bar"]}'
- doc = self.db.create_doc_from_json(content)
- rows = self.db.get_from_index("index", "bar")
- self.assertEqual([doc], rows)
-
- def test_index_list2(self):
- self.db.create_index("index", "name")
- content = '{"name": ["foo", "bar"]}'
- doc = self.db.create_doc_from_json(content)
- rows = self.db.get_from_index("index", "foo")
- self.assertEqual([doc], rows)
-
- def test_get_from_index_case_sensitive(self):
- self.db.create_index('test-idx', 'key')
- doc1 = self.db.create_doc_from_json(simple_doc)
- self.assertEqual([], self.db.get_from_index('test-idx', 'V*'))
- self.assertEqual([doc1], self.db.get_from_index('test-idx', 'v*'))
-
- def test_get_from_index_illegal_glob_before_value(self):
- self.db.create_index('test-idx', 'k1', 'k2')
- self.assertRaises(
- errors.InvalidGlobbing,
- self.db.get_from_index, 'test-idx', 'v*', 'v2')
-
- def test_get_from_index_illegal_glob_after_glob(self):
- self.db.create_index('test-idx', 'k1', 'k2')
- self.assertRaises(
- errors.InvalidGlobbing,
- self.db.get_from_index, 'test-idx', 'v*', 'v*')
-
- def test_get_from_index_with_sql_wildcards(self):
- self.db.create_index('test-idx', 'key')
- content1 = '{"key": "va%lue"}'
- content2 = '{"key": "value"}'
- content3 = '{"key": "va_lue"}'
- doc1 = self.db.create_doc_from_json(content1)
- self.db.create_doc_from_json(content2)
- doc3 = self.db.create_doc_from_json(content3)
- # The '%' in the search should be treated literally, not as a sql
- # globbing character.
- self.assertEqual([doc1], self.db.get_from_index('test-idx', 'va%*'))
- # Same for '_'
- self.assertEqual([doc3], self.db.get_from_index('test-idx', 'va_*'))
-
- def test_get_from_index_with_lower(self):
- self.db.create_index("index", "lower(name)")
- content = '{"name": "Foo"}'
- doc = self.db.create_doc_from_json(content)
- rows = self.db.get_from_index("index", "foo")
- self.assertEqual([doc], rows)
-
- def test_get_from_index_with_lower_matches_same_case(self):
- self.db.create_index("index", "lower(name)")
- content = '{"name": "foo"}'
- doc = self.db.create_doc_from_json(content)
- rows = self.db.get_from_index("index", "foo")
- self.assertEqual([doc], rows)
-
- def test_index_lower_doesnt_match_different_case(self):
- self.db.create_index("index", "lower(name)")
- content = '{"name": "Foo"}'
- self.db.create_doc_from_json(content)
- rows = self.db.get_from_index("index", "Foo")
- self.assertEqual([], rows)
-
- def test_index_lower_doesnt_match_other_index(self):
- self.db.create_index("index", "lower(name)")
- self.db.create_index("other_index", "name")
- content = '{"name": "Foo"}'
- self.db.create_doc_from_json(content)
- rows = self.db.get_from_index("index", "Foo")
- self.assertEqual(0, len(rows))
-
- def test_index_split_words_match_first(self):
- self.db.create_index("index", "split_words(name)")
- content = '{"name": "foo bar"}'
- doc = self.db.create_doc_from_json(content)
- rows = self.db.get_from_index("index", "foo")
- self.assertEqual([doc], rows)
-
- def test_index_split_words_match_second(self):
- self.db.create_index("index", "split_words(name)")
- content = '{"name": "foo bar"}'
- doc = self.db.create_doc_from_json(content)
- rows = self.db.get_from_index("index", "bar")
- self.assertEqual([doc], rows)
-
- def test_index_split_words_match_both(self):
- self.db.create_index("index", "split_words(name)")
- content = '{"name": "foo foo"}'
- doc = self.db.create_doc_from_json(content)
- rows = self.db.get_from_index("index", "foo")
- self.assertEqual([doc], rows)
-
- def test_index_split_words_double_space(self):
- self.db.create_index("index", "split_words(name)")
- content = '{"name": "foo bar"}'
- doc = self.db.create_doc_from_json(content)
- rows = self.db.get_from_index("index", "bar")
- self.assertEqual([doc], rows)
-
- def test_index_split_words_leading_space(self):
- self.db.create_index("index", "split_words(name)")
- content = '{"name": " foo bar"}'
- doc = self.db.create_doc_from_json(content)
- rows = self.db.get_from_index("index", "foo")
- self.assertEqual([doc], rows)
-
- def test_index_split_words_trailing_space(self):
- self.db.create_index("index", "split_words(name)")
- content = '{"name": "foo bar "}'
- doc = self.db.create_doc_from_json(content)
- rows = self.db.get_from_index("index", "bar")
- self.assertEqual([doc], rows)
-
- def test_get_from_index_with_number(self):
- self.db.create_index("index", "number(foo, 5)")
- content = '{"foo": 12}'
- doc = self.db.create_doc_from_json(content)
- rows = self.db.get_from_index("index", "00012")
- self.assertEqual([doc], rows)
-
- def test_get_from_index_with_number_bigger_than_padding(self):
- self.db.create_index("index", "number(foo, 5)")
- content = '{"foo": 123456}'
- doc = self.db.create_doc_from_json(content)
- rows = self.db.get_from_index("index", "123456")
- self.assertEqual([doc], rows)
-
- def test_number_mapping_ignores_non_numbers(self):
- self.db.create_index("index", "number(foo, 5)")
- content = '{"foo": 56}'
- doc1 = self.db.create_doc_from_json(content)
- content = '{"foo": "this is not a maigret painting"}'
- self.db.create_doc_from_json(content)
- rows = self.db.get_from_index("index", "*")
- self.assertEqual([doc1], rows)
-
- def test_get_from_index_with_bool(self):
- self.db.create_index("index", "bool(foo)")
- content = '{"foo": true}'
- doc = self.db.create_doc_from_json(content)
- rows = self.db.get_from_index("index", "1")
- self.assertEqual([doc], rows)
-
- def test_get_from_index_with_bool_false(self):
- self.db.create_index("index", "bool(foo)")
- content = '{"foo": false}'
- doc = self.db.create_doc_from_json(content)
- rows = self.db.get_from_index("index", "0")
- self.assertEqual([doc], rows)
-
- def test_get_from_index_with_non_bool(self):
- self.db.create_index("index", "bool(foo)")
- content = '{"foo": 42}'
- self.db.create_doc_from_json(content)
- rows = self.db.get_from_index("index", "*")
- self.assertEqual([], rows)
-
- def test_get_from_index_with_combine(self):
- self.db.create_index("index", "combine(foo, bar)")
- content = '{"foo": "value1", "bar": "value2"}'
- doc = self.db.create_doc_from_json(content)
- rows = self.db.get_from_index("index", "value1")
- self.assertEqual([doc], rows)
- rows = self.db.get_from_index("index", "value2")
- self.assertEqual([doc], rows)
-
- def test_get_complex_combine(self):
- self.db.create_index(
- "index", "combine(number(foo, 5), lower(bar), split_words(baz))")
- content = '{"foo": 12, "bar": "ALLCAPS", "baz": "qux nox"}'
- doc = self.db.create_doc_from_json(content)
- content = '{"foo": "not a number", "bar": "something"}'
- doc2 = self.db.create_doc_from_json(content)
- rows = self.db.get_from_index("index", "00012")
- self.assertEqual([doc], rows)
- rows = self.db.get_from_index("index", "allcaps")
- self.assertEqual([doc], rows)
- rows = self.db.get_from_index("index", "nox")
- self.assertEqual([doc], rows)
- rows = self.db.get_from_index("index", "something")
- self.assertEqual([doc2], rows)
-
- def test_get_index_keys_from_index(self):
- self.db.create_index('test-idx', 'key')
- content1 = '{"key": "value1"}'
- content2 = '{"key": "value2"}'
- content3 = '{"key": "value2"}'
- self.db.create_doc_from_json(content1)
- self.db.create_doc_from_json(content2)
- self.db.create_doc_from_json(content3)
- self.assertEqual(
- [('value1',), ('value2',)],
- sorted(self.db.get_index_keys('test-idx')))
-
- def test_get_index_keys_from_multicolumn_index(self):
- self.db.create_index('test-idx', 'key1', 'key2')
- content1 = '{"key1": "value1", "key2": "val2-1"}'
- content2 = '{"key1": "value2", "key2": "val2-2"}'
- content3 = '{"key1": "value2", "key2": "val2-2"}'
- content4 = '{"key1": "value2", "key2": "val3"}'
- self.db.create_doc_from_json(content1)
- self.db.create_doc_from_json(content2)
- self.db.create_doc_from_json(content3)
- self.db.create_doc_from_json(content4)
- self.assertEqual([
- ('value1', 'val2-1'),
- ('value2', 'val2-2'),
- ('value2', 'val3')],
- sorted(self.db.get_index_keys('test-idx')))
-
- def test_empty_expr(self):
- self.assertParseError('')
-
- def test_nested_unknown_operation(self):
- self.assertParseError('unknown_operation(field1)')
-
- def test_parse_missing_close_paren(self):
- self.assertParseError("lower(a")
-
- def test_parse_trailing_close_paren(self):
- self.assertParseError("lower(ab))")
-
- def test_parse_trailing_chars(self):
- self.assertParseError("lower(ab)adsf")
-
- def test_parse_empty_op(self):
- self.assertParseError("(ab)")
-
- def test_parse_top_level_commas(self):
- self.assertParseError("a, b")
-
- def test_invalid_field_name(self):
- self.assertParseError("a.")
-
- def test_invalid_inner_field_name(self):
- self.assertParseError("lower(a.)")
-
- def test_gobbledigook(self):
- self.assertParseError("(@#@cc @#!*DFJSXV(()jccd")
-
- def test_leading_space(self):
- self.assertIndexCreatable(" lower(a)")
-
- def test_trailing_space(self):
- self.assertIndexCreatable("lower(a) ")
-
- def test_spaces_before_open_paren(self):
- self.assertIndexCreatable("lower (a)")
-
- def test_spaces_after_open_paren(self):
- self.assertIndexCreatable("lower( a)")
-
- def test_spaces_before_close_paren(self):
- self.assertIndexCreatable("lower(a )")
-
- def test_spaces_before_comma(self):
- self.assertIndexCreatable("combine(a , b , c)")
-
- def test_spaces_after_comma(self):
- self.assertIndexCreatable("combine(a, b, c)")
-
- def test_all_together_now(self):
- self.assertParseError(' (a) ')
-
- def test_all_together_now2(self):
- self.assertParseError('combine(lower(x)x,foo)')
-
-
-class PythonBackendTests(tests.DatabaseBaseTests):
-
- def setUp(self):
- super(PythonBackendTests, self).setUp()
- self.simple_doc = json.loads(simple_doc)
-
- def test_create_doc_with_factory(self):
- self.db.set_document_factory(TestAlternativeDocument)
- doc = self.db.create_doc(self.simple_doc, doc_id='my_doc_id')
- self.assertTrue(isinstance(doc, TestAlternativeDocument))
-
- def test_get_doc_after_put_with_factory(self):
- doc = self.db.create_doc(self.simple_doc, doc_id='my_doc_id')
- self.db.set_document_factory(TestAlternativeDocument)
- result = self.db.get_doc('my_doc_id')
- self.assertTrue(isinstance(result, TestAlternativeDocument))
- self.assertEqual(doc.doc_id, result.doc_id)
- self.assertEqual(doc.rev, result.rev)
- self.assertEqual(doc.get_json(), result.get_json())
- self.assertEqual(False, result.has_conflicts)
-
- def test_get_doc_nonexisting_with_factory(self):
- self.db.set_document_factory(TestAlternativeDocument)
- self.assertIs(None, self.db.get_doc('non-existing'))
-
- def test_get_all_docs_with_factory(self):
- self.db.set_document_factory(TestAlternativeDocument)
- self.db.create_doc(self.simple_doc)
- self.assertTrue(isinstance(
- list(self.db.get_all_docs()[1])[0], TestAlternativeDocument))
-
- def test_get_docs_conflicted_with_factory(self):
- self.db.set_document_factory(TestAlternativeDocument)
- doc1 = self.db.create_doc(self.simple_doc)
- doc2 = self.make_document(doc1.doc_id, 'alternate:1', nested_doc)
- self.db._put_doc_if_newer(
- doc2, save_conflict=True, replica_uid='r', replica_gen=1,
- replica_trans_id='foo')
- self.assertTrue(
- isinstance(
- list(self.db.get_docs([doc1.doc_id]))[0],
- TestAlternativeDocument))
-
- def test_get_from_index_with_factory(self):
- self.db.set_document_factory(TestAlternativeDocument)
- self.db.create_doc(self.simple_doc)
- self.db.create_index('test-idx', 'key')
- self.assertTrue(
- isinstance(
- self.db.get_from_index('test-idx', 'value')[0],
- TestAlternativeDocument))
-
- def test_sync_exchange_updates_indexes(self):
- doc = self.db.create_doc(self.simple_doc)
- self.db.create_index('test-idx', 'key')
- new_content = '{"key": "altval"}'
- other_rev = 'test:1|z:2'
- st = self.db.get_sync_target()
-
- def ignore(doc_id, doc_rev, doc):
- pass
-
- doc_other = self.make_document(doc.doc_id, other_rev, new_content)
- docs_by_gen = [(doc_other, 10, 'T-sid')]
- st.sync_exchange(
- docs_by_gen, 'other-replica', last_known_generation=0,
- last_known_trans_id=None, return_doc_cb=ignore)
- self.assertGetDoc(self.db, doc.doc_id, other_rev, new_content, False)
- self.assertEqual(
- [doc_other], self.db.get_from_index('test-idx', 'altval'))
- self.assertEqual([], self.db.get_from_index('test-idx', 'value'))
-
-
-# Use a custom loader to apply the scenarios at load time.
-load_tests = tests.load_with_scenarios
diff --git a/src/leap/soledad/tests/u1db_tests/test_document.py b/src/leap/soledad/tests/u1db_tests/test_document.py
deleted file mode 100644
index e706e1a9..00000000
--- a/src/leap/soledad/tests/u1db_tests/test_document.py
+++ /dev/null
@@ -1,150 +0,0 @@
-# Copyright 2011 Canonical Ltd.
-#
-# This file is part of u1db.
-#
-# u1db is free software: you can redistribute it and/or modify
-# it under the terms of the GNU Lesser General Public License version 3
-# as published by the Free Software Foundation.
-#
-# u1db is distributed in the hope that it will be useful,
-# but WITHOUT ANY WARRANTY; without even the implied warranty of
-# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
-# GNU Lesser General Public License for more details.
-#
-# You should have received a copy of the GNU Lesser General Public License
-# along with u1db. If not, see <http://www.gnu.org/licenses/>.
-
-
-from u1db import errors
-
-from leap.soledad.tests import u1db_tests as tests
-
-
-class TestDocument(tests.TestCase):
-
- scenarios = ([(
- 'py', {'make_document_for_test': tests.make_document_for_test})]) # +
- #tests.C_DATABASE_SCENARIOS)
-
- def test_create_doc(self):
- doc = self.make_document('doc-id', 'uid:1', tests.simple_doc)
- self.assertEqual('doc-id', doc.doc_id)
- self.assertEqual('uid:1', doc.rev)
- self.assertEqual(tests.simple_doc, doc.get_json())
- self.assertFalse(doc.has_conflicts)
-
- def test__repr__(self):
- doc = self.make_document('doc-id', 'uid:1', tests.simple_doc)
- self.assertEqual(
- '%s(doc-id, uid:1, \'{"key": "value"}\')'
- % (doc.__class__.__name__,),
- repr(doc))
-
- def test__repr__conflicted(self):
- doc = self.make_document('doc-id', 'uid:1', tests.simple_doc,
- has_conflicts=True)
- self.assertEqual(
- '%s(doc-id, uid:1, conflicted, \'{"key": "value"}\')'
- % (doc.__class__.__name__,),
- repr(doc))
-
- def test__lt__(self):
- doc_a = self.make_document('a', 'b', '{}')
- doc_b = self.make_document('b', 'b', '{}')
- self.assertTrue(doc_a < doc_b)
- self.assertTrue(doc_b > doc_a)
- doc_aa = self.make_document('a', 'a', '{}')
- self.assertTrue(doc_aa < doc_a)
-
- def test__eq__(self):
- doc_a = self.make_document('a', 'b', '{}')
- doc_b = self.make_document('a', 'b', '{}')
- self.assertTrue(doc_a == doc_b)
- doc_b = self.make_document('a', 'b', '{}', has_conflicts=True)
- self.assertFalse(doc_a == doc_b)
-
- def test_non_json_dict(self):
- self.assertRaises(
- errors.InvalidJSON, self.make_document, 'id', 'uid:1',
- '"not a json dictionary"')
-
- def test_non_json(self):
- self.assertRaises(
- errors.InvalidJSON, self.make_document, 'id', 'uid:1',
- 'not a json dictionary')
-
- def test_get_size(self):
- doc_a = self.make_document('a', 'b', '{"some": "content"}')
- self.assertEqual(
- len('a' + 'b' + '{"some": "content"}'), doc_a.get_size())
-
- def test_get_size_empty_document(self):
- doc_a = self.make_document('a', 'b', None)
- self.assertEqual(len('a' + 'b'), doc_a.get_size())
-
-
-class TestPyDocument(tests.TestCase):
-
- scenarios = ([(
- 'py', {'make_document_for_test': tests.make_document_for_test})])
-
- def test_get_content(self):
- doc = self.make_document('id', 'rev', '{"content":""}')
- self.assertEqual({"content": ""}, doc.content)
- doc.set_json('{"content": "new"}')
- self.assertEqual({"content": "new"}, doc.content)
-
- def test_set_content(self):
- doc = self.make_document('id', 'rev', '{"content":""}')
- doc.content = {"content": "new"}
- self.assertEqual('{"content": "new"}', doc.get_json())
-
- def test_set_bad_content(self):
- doc = self.make_document('id', 'rev', '{"content":""}')
- self.assertRaises(
- errors.InvalidContent, setattr, doc, 'content',
- '{"content": "new"}')
-
- def test_is_tombstone(self):
- doc_a = self.make_document('a', 'b', '{}')
- self.assertFalse(doc_a.is_tombstone())
- doc_a.set_json(None)
- self.assertTrue(doc_a.is_tombstone())
-
- def test_make_tombstone(self):
- doc_a = self.make_document('a', 'b', '{}')
- self.assertFalse(doc_a.is_tombstone())
- doc_a.make_tombstone()
- self.assertTrue(doc_a.is_tombstone())
-
- def test_same_content_as(self):
- doc_a = self.make_document('a', 'b', '{}')
- doc_b = self.make_document('d', 'e', '{}')
- self.assertTrue(doc_a.same_content_as(doc_b))
- doc_b = self.make_document('p', 'q', '{}', has_conflicts=True)
- self.assertTrue(doc_a.same_content_as(doc_b))
- doc_b.content['key'] = 'value'
- self.assertFalse(doc_a.same_content_as(doc_b))
-
- def test_same_content_as_json_order(self):
- doc_a = self.make_document(
- 'a', 'b', '{"key1": "val1", "key2": "val2"}')
- doc_b = self.make_document(
- 'c', 'd', '{"key2": "val2", "key1": "val1"}')
- self.assertTrue(doc_a.same_content_as(doc_b))
-
- def test_set_json(self):
- doc = self.make_document('id', 'rev', '{"content":""}')
- doc.set_json('{"content": "new"}')
- self.assertEqual('{"content": "new"}', doc.get_json())
-
- def test_set_json_non_dict(self):
- doc = self.make_document('id', 'rev', '{"content":""}')
- self.assertRaises(errors.InvalidJSON, doc.set_json, '"is not a dict"')
-
- def test_set_json_error(self):
- doc = self.make_document('id', 'rev', '{"content":""}')
- self.assertRaises(errors.InvalidJSON, doc.set_json, 'is not json')
-
-
-load_tests = tests.load_with_scenarios
diff --git a/src/leap/soledad/tests/u1db_tests/test_http_app.py b/src/leap/soledad/tests/u1db_tests/test_http_app.py
deleted file mode 100644
index e0729aa2..00000000
--- a/src/leap/soledad/tests/u1db_tests/test_http_app.py
+++ /dev/null
@@ -1,1135 +0,0 @@
-# Copyright 2011-2012 Canonical Ltd.
-#
-# This file is part of u1db.
-#
-# u1db is free software: you can redistribute it and/or modify
-# it under the terms of the GNU Lesser General Public License version 3
-# as published by the Free Software Foundation.
-#
-# u1db is distributed in the hope that it will be useful,
-# but WITHOUT ANY WARRANTY; without even the implied warranty of
-# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
-# GNU Lesser General Public License for more details.
-#
-# You should have received a copy of the GNU Lesser General Public License
-# along with u1db. If not, see <http://www.gnu.org/licenses/>.
-
-"""Test the WSGI app."""
-
-import paste.fixture
-import sys
-try:
- import simplejson as json
-except ImportError:
- import json # noqa
-import StringIO
-
-from u1db import (
- __version__ as _u1db_version,
- errors,
- sync,
-)
-
-from leap.soledad.tests import u1db_tests as tests
-
-from u1db.remote import (
- http_app,
- http_errors,
-)
-
-
-class TestFencedReader(tests.TestCase):
-
- def test_init(self):
- reader = http_app._FencedReader(StringIO.StringIO(""), 25, 100)
- self.assertEqual(25, reader.remaining)
-
- def test_read_chunk(self):
- inp = StringIO.StringIO("abcdef")
- reader = http_app._FencedReader(inp, 5, 10)
- data = reader.read_chunk(2)
- self.assertEqual("ab", data)
- self.assertEqual(2, inp.tell())
- self.assertEqual(3, reader.remaining)
-
- def test_read_chunk_remaining(self):
- inp = StringIO.StringIO("abcdef")
- reader = http_app._FencedReader(inp, 4, 10)
- data = reader.read_chunk(9999)
- self.assertEqual("abcd", data)
- self.assertEqual(4, inp.tell())
- self.assertEqual(0, reader.remaining)
-
- def test_read_chunk_nothing_left(self):
- inp = StringIO.StringIO("abc")
- reader = http_app._FencedReader(inp, 2, 10)
- reader.read_chunk(2)
- self.assertEqual(2, inp.tell())
- self.assertEqual(0, reader.remaining)
- data = reader.read_chunk(2)
- self.assertEqual("", data)
- self.assertEqual(2, inp.tell())
- self.assertEqual(0, reader.remaining)
-
- def test_read_chunk_kept(self):
- inp = StringIO.StringIO("abcde")
- reader = http_app._FencedReader(inp, 4, 10)
- reader._kept = "xyz"
- data = reader.read_chunk(2) # atmost ignored
- self.assertEqual("xyz", data)
- self.assertEqual(0, inp.tell())
- self.assertEqual(4, reader.remaining)
- self.assertIsNone(reader._kept)
-
- def test_getline(self):
- inp = StringIO.StringIO("abc\r\nde")
- reader = http_app._FencedReader(inp, 6, 10)
- reader.MAXCHUNK = 6
- line = reader.getline()
- self.assertEqual("abc\r\n", line)
- self.assertEqual("d", reader._kept)
-
- def test_getline_exact(self):
- inp = StringIO.StringIO("abcd\r\nef")
- reader = http_app._FencedReader(inp, 6, 10)
- reader.MAXCHUNK = 6
- line = reader.getline()
- self.assertEqual("abcd\r\n", line)
- self.assertIs(None, reader._kept)
-
- def test_getline_no_newline(self):
- inp = StringIO.StringIO("abcd")
- reader = http_app._FencedReader(inp, 4, 10)
- reader.MAXCHUNK = 6
- line = reader.getline()
- self.assertEqual("abcd", line)
-
- def test_getline_many_chunks(self):
- inp = StringIO.StringIO("abcde\r\nf")
- reader = http_app._FencedReader(inp, 8, 10)
- reader.MAXCHUNK = 4
- line = reader.getline()
- self.assertEqual("abcde\r\n", line)
- self.assertEqual("f", reader._kept)
- line = reader.getline()
- self.assertEqual("f", line)
-
- def test_getline_empty(self):
- inp = StringIO.StringIO("")
- reader = http_app._FencedReader(inp, 0, 10)
- reader.MAXCHUNK = 4
- line = reader.getline()
- self.assertEqual("", line)
- line = reader.getline()
- self.assertEqual("", line)
-
- def test_getline_just_newline(self):
- inp = StringIO.StringIO("\r\n")
- reader = http_app._FencedReader(inp, 2, 10)
- reader.MAXCHUNK = 4
- line = reader.getline()
- self.assertEqual("\r\n", line)
- line = reader.getline()
- self.assertEqual("", line)
-
- def test_getline_too_large(self):
- inp = StringIO.StringIO("x" * 50)
- reader = http_app._FencedReader(inp, 50, 25)
- reader.MAXCHUNK = 4
- self.assertRaises(http_app.BadRequest, reader.getline)
-
- def test_getline_too_large_complete(self):
- inp = StringIO.StringIO("x" * 25 + "\r\n")
- reader = http_app._FencedReader(inp, 50, 25)
- reader.MAXCHUNK = 4
- self.assertRaises(http_app.BadRequest, reader.getline)
-
-
-class TestHTTPMethodDecorator(tests.TestCase):
-
- def test_args(self):
- @http_app.http_method()
- def f(self, a, b):
- return self, a, b
- res = f("self", {"a": "x", "b": "y"}, None)
- self.assertEqual(("self", "x", "y"), res)
-
- def test_args_missing(self):
- @http_app.http_method()
- def f(self, a, b):
- return a, b
- self.assertRaises(http_app.BadRequest, f, "self", {"a": "x"}, None)
-
- def test_args_unexpected(self):
- @http_app.http_method()
- def f(self, a):
- return a
- self.assertRaises(http_app.BadRequest, f, "self",
- {"a": "x", "c": "z"}, None)
-
- def test_args_default(self):
- @http_app.http_method()
- def f(self, a, b="z"):
- return a, b
- res = f("self", {"a": "x"}, None)
- self.assertEqual(("x", "z"), res)
-
- def test_args_conversion(self):
- @http_app.http_method(b=int)
- def f(self, a, b):
- return self, a, b
- res = f("self", {"a": "x", "b": "2"}, None)
- self.assertEqual(("self", "x", 2), res)
-
- self.assertRaises(http_app.BadRequest, f, "self",
- {"a": "x", "b": "foo"}, None)
-
- def test_args_conversion_with_default(self):
- @http_app.http_method(b=str)
- def f(self, a, b=None):
- return self, a, b
- res = f("self", {"a": "x"}, None)
- self.assertEqual(("self", "x", None), res)
-
- def test_args_content(self):
- @http_app.http_method()
- def f(self, a, content):
- return a, content
- res = f(self, {"a": "x"}, "CONTENT")
- self.assertEqual(("x", "CONTENT"), res)
-
- def test_args_content_as_args(self):
- @http_app.http_method(b=int, content_as_args=True)
- def f(self, a, b):
- return self, a, b
- res = f("self", {"a": "x"}, '{"b": "2"}')
- self.assertEqual(("self", "x", 2), res)
-
- self.assertRaises(http_app.BadRequest, f, "self", {}, 'not-json')
-
- def test_args_content_no_query(self):
- @http_app.http_method(no_query=True,
- content_as_args=True)
- def f(self, a='a', b='b'):
- return a, b
- res = f("self", {}, '{"b": "y"}')
- self.assertEqual(('a', 'y'), res)
-
- self.assertRaises(http_app.BadRequest, f, "self", {'a': 'x'},
- '{"b": "y"}')
-
-
-class TestResource(object):
-
- @http_app.http_method()
- def get(self, a, b):
- self.args = dict(a=a, b=b)
- return 'Get'
-
- @http_app.http_method()
- def put(self, a, content):
- self.args = dict(a=a)
- self.content = content
- return 'Put'
-
- @http_app.http_method(content_as_args=True)
- def put_args(self, a, b):
- self.args = dict(a=a, b=b)
- self.order = ['a']
- self.entries = []
-
- @http_app.http_method()
- def put_stream_entry(self, content):
- self.entries.append(content)
- self.order.append('s')
-
- def put_end(self):
- self.order.append('e')
- return "Put/end"
-
-
-class parameters:
- max_request_size = 200000
- max_entry_size = 100000
-
-
-class TestHTTPInvocationByMethodWithBody(tests.TestCase):
-
- def test_get(self):
- resource = TestResource()
- environ = {'QUERY_STRING': 'a=1&b=2', 'REQUEST_METHOD': 'GET'}
- invoke = http_app.HTTPInvocationByMethodWithBody(resource, environ,
- parameters)
- res = invoke()
- self.assertEqual('Get', res)
- self.assertEqual({'a': '1', 'b': '2'}, resource.args)
-
- def test_put_json(self):
- resource = TestResource()
- body = '{"body": true}'
- environ = {'QUERY_STRING': 'a=1', 'REQUEST_METHOD': 'PUT',
- 'wsgi.input': StringIO.StringIO(body),
- 'CONTENT_LENGTH': str(len(body)),
- 'CONTENT_TYPE': 'application/json'}
- invoke = http_app.HTTPInvocationByMethodWithBody(resource, environ,
- parameters)
- res = invoke()
- self.assertEqual('Put', res)
- self.assertEqual({'a': '1'}, resource.args)
- self.assertEqual('{"body": true}', resource.content)
-
- def test_put_sync_stream(self):
- resource = TestResource()
- body = (
- '[\r\n'
- '{"b": 2},\r\n' # args
- '{"entry": "x"},\r\n' # stream entry
- '{"entry": "y"}\r\n' # stream entry
- ']'
- )
- environ = {'QUERY_STRING': 'a=1', 'REQUEST_METHOD': 'PUT',
- 'wsgi.input': StringIO.StringIO(body),
- 'CONTENT_LENGTH': str(len(body)),
- 'CONTENT_TYPE': 'application/x-u1db-sync-stream'}
- invoke = http_app.HTTPInvocationByMethodWithBody(resource, environ,
- parameters)
- res = invoke()
- self.assertEqual('Put/end', res)
- self.assertEqual({'a': '1', 'b': 2}, resource.args)
- self.assertEqual(
- ['{"entry": "x"}', '{"entry": "y"}'], resource.entries)
- self.assertEqual(['a', 's', 's', 'e'], resource.order)
-
- def _put_sync_stream(self, body):
- resource = TestResource()
- environ = {'QUERY_STRING': 'a=1&b=2', 'REQUEST_METHOD': 'PUT',
- 'wsgi.input': StringIO.StringIO(body),
- 'CONTENT_LENGTH': str(len(body)),
- 'CONTENT_TYPE': 'application/x-u1db-sync-stream'}
- invoke = http_app.HTTPInvocationByMethodWithBody(resource, environ,
- parameters)
- invoke()
-
- def test_put_sync_stream_wrong_start(self):
- self.assertRaises(http_app.BadRequest,
- self._put_sync_stream, "{}\r\n]")
-
- self.assertRaises(http_app.BadRequest,
- self._put_sync_stream, "\r\n{}\r\n]")
-
- self.assertRaises(http_app.BadRequest,
- self._put_sync_stream, "")
-
- def test_put_sync_stream_wrong_end(self):
- self.assertRaises(http_app.BadRequest,
- self._put_sync_stream, "[\r\n{}")
-
- self.assertRaises(http_app.BadRequest,
- self._put_sync_stream, "[\r\n")
-
- self.assertRaises(http_app.BadRequest,
- self._put_sync_stream, "[\r\n{}\r\n]\r\n...")
-
- def test_put_sync_stream_missing_comma(self):
- self.assertRaises(http_app.BadRequest,
- self._put_sync_stream, "[\r\n{}\r\n{}\r\n]")
-
- def test_put_sync_stream_extra_comma(self):
- self.assertRaises(http_app.BadRequest,
- self._put_sync_stream, "[\r\n{},\r\n]")
-
- self.assertRaises(http_app.BadRequest,
- self._put_sync_stream, "[\r\n{},\r\n{},\r\n]")
-
- def test_bad_request_decode_failure(self):
- resource = TestResource()
- environ = {'QUERY_STRING': 'a=\xff', 'REQUEST_METHOD': 'PUT',
- 'wsgi.input': StringIO.StringIO('{}'),
- 'CONTENT_LENGTH': '2',
- 'CONTENT_TYPE': 'application/json'}
- invoke = http_app.HTTPInvocationByMethodWithBody(resource, environ,
- parameters)
- self.assertRaises(http_app.BadRequest, invoke)
-
- def test_bad_request_unsupported_content_type(self):
- resource = TestResource()
- environ = {'QUERY_STRING': '', 'REQUEST_METHOD': 'PUT',
- 'wsgi.input': StringIO.StringIO('{}'),
- 'CONTENT_LENGTH': '2',
- 'CONTENT_TYPE': 'text/plain'}
- invoke = http_app.HTTPInvocationByMethodWithBody(resource, environ,
- parameters)
- self.assertRaises(http_app.BadRequest, invoke)
-
- def test_bad_request_content_length_too_large(self):
- resource = TestResource()
- environ = {'QUERY_STRING': '', 'REQUEST_METHOD': 'PUT',
- 'wsgi.input': StringIO.StringIO('{}'),
- 'CONTENT_LENGTH': '10000',
- 'CONTENT_TYPE': 'text/plain'}
-
- resource.max_request_size = 5000
- resource.max_entry_size = sys.maxint # we don't get to use this
-
- invoke = http_app.HTTPInvocationByMethodWithBody(resource, environ,
- parameters)
- self.assertRaises(http_app.BadRequest, invoke)
-
- def test_bad_request_no_content_length(self):
- resource = TestResource()
- environ = {'QUERY_STRING': '', 'REQUEST_METHOD': 'PUT',
- 'wsgi.input': StringIO.StringIO('a'),
- 'CONTENT_TYPE': 'application/json'}
- invoke = http_app.HTTPInvocationByMethodWithBody(resource, environ,
- parameters)
- self.assertRaises(http_app.BadRequest, invoke)
-
- def test_bad_request_invalid_content_length(self):
- resource = TestResource()
- environ = {'QUERY_STRING': '', 'REQUEST_METHOD': 'PUT',
- 'wsgi.input': StringIO.StringIO('abc'),
- 'CONTENT_LENGTH': '1unk',
- 'CONTENT_TYPE': 'application/json'}
- invoke = http_app.HTTPInvocationByMethodWithBody(resource, environ,
- parameters)
- self.assertRaises(http_app.BadRequest, invoke)
-
- def test_bad_request_empty_body(self):
- resource = TestResource()
- environ = {'QUERY_STRING': '', 'REQUEST_METHOD': 'PUT',
- 'wsgi.input': StringIO.StringIO(''),
- 'CONTENT_LENGTH': '0',
- 'CONTENT_TYPE': 'application/json'}
- invoke = http_app.HTTPInvocationByMethodWithBody(resource, environ,
- parameters)
- self.assertRaises(http_app.BadRequest, invoke)
-
- def test_bad_request_unsupported_method_get_like(self):
- resource = TestResource()
- environ = {'QUERY_STRING': '', 'REQUEST_METHOD': 'DELETE'}
- invoke = http_app.HTTPInvocationByMethodWithBody(resource, environ,
- parameters)
- self.assertRaises(http_app.BadRequest, invoke)
-
- def test_bad_request_unsupported_method_put_like(self):
- resource = TestResource()
- environ = {'QUERY_STRING': '', 'REQUEST_METHOD': 'PUT',
- 'wsgi.input': StringIO.StringIO('{}'),
- 'CONTENT_LENGTH': '2',
- 'CONTENT_TYPE': 'application/json'}
- invoke = http_app.HTTPInvocationByMethodWithBody(resource, environ,
- parameters)
- self.assertRaises(http_app.BadRequest, invoke)
-
- def test_bad_request_unsupported_method_put_like_multi_json(self):
- resource = TestResource()
- body = '{}\r\n{}\r\n'
- environ = {'QUERY_STRING': '', 'REQUEST_METHOD': 'POST',
- 'wsgi.input': StringIO.StringIO(body),
- 'CONTENT_LENGTH': str(len(body)),
- 'CONTENT_TYPE': 'application/x-u1db-multi-json'}
- invoke = http_app.HTTPInvocationByMethodWithBody(resource, environ,
- parameters)
- self.assertRaises(http_app.BadRequest, invoke)
-
-
-class TestHTTPResponder(tests.TestCase):
-
- def start_response(self, status, headers):
- self.status = status
- self.headers = dict(headers)
- self.response_body = []
-
- def write(data):
- self.response_body.append(data)
-
- return write
-
- def test_send_response_content_w_headers(self):
- responder = http_app.HTTPResponder(self.start_response)
- responder.send_response_content('foo', headers={'x-a': '1'})
- self.assertEqual('200 OK', self.status)
- self.assertEqual({'content-type': 'application/json',
- 'cache-control': 'no-cache',
- 'x-a': '1', 'content-length': '3'}, self.headers)
- self.assertEqual([], self.response_body)
- self.assertEqual(['foo'], responder.content)
-
- def test_send_response_json(self):
- responder = http_app.HTTPResponder(self.start_response)
- responder.send_response_json(value='success')
- self.assertEqual('200 OK', self.status)
- expected_body = '{"value": "success"}\r\n'
- self.assertEqual({'content-type': 'application/json',
- 'content-length': str(len(expected_body)),
- 'cache-control': 'no-cache'}, self.headers)
- self.assertEqual([], self.response_body)
- self.assertEqual([expected_body], responder.content)
-
- def test_send_response_json_status_fail(self):
- responder = http_app.HTTPResponder(self.start_response)
- responder.send_response_json(400)
- self.assertEqual('400 Bad Request', self.status)
- expected_body = '{}\r\n'
- self.assertEqual({'content-type': 'application/json',
- 'content-length': str(len(expected_body)),
- 'cache-control': 'no-cache'}, self.headers)
- self.assertEqual([], self.response_body)
- self.assertEqual([expected_body], responder.content)
-
- def test_start_finish_response_status_fail(self):
- responder = http_app.HTTPResponder(self.start_response)
- responder.start_response(404, {'error': 'not found'})
- responder.finish_response()
- self.assertEqual('404 Not Found', self.status)
- self.assertEqual({'content-type': 'application/json',
- 'cache-control': 'no-cache'}, self.headers)
- self.assertEqual(['{"error": "not found"}\r\n'], self.response_body)
- self.assertEqual([], responder.content)
-
- def test_send_stream_entry(self):
- responder = http_app.HTTPResponder(self.start_response)
- responder.content_type = "application/x-u1db-multi-json"
- responder.start_response(200)
- responder.start_stream()
- responder.stream_entry({'entry': 1})
- responder.stream_entry({'entry': 2})
- responder.end_stream()
- responder.finish_response()
- self.assertEqual('200 OK', self.status)
- self.assertEqual({'content-type': 'application/x-u1db-multi-json',
- 'cache-control': 'no-cache'}, self.headers)
- self.assertEqual(['[',
- '\r\n', '{"entry": 1}',
- ',\r\n', '{"entry": 2}',
- '\r\n]\r\n'], self.response_body)
- self.assertEqual([], responder.content)
-
- def test_send_stream_w_error(self):
- responder = http_app.HTTPResponder(self.start_response)
- responder.content_type = "application/x-u1db-multi-json"
- responder.start_response(200)
- responder.start_stream()
- responder.stream_entry({'entry': 1})
- responder.send_response_json(503, error="unavailable")
- self.assertEqual('200 OK', self.status)
- self.assertEqual({'content-type': 'application/x-u1db-multi-json',
- 'cache-control': 'no-cache'}, self.headers)
- self.assertEqual(['[',
- '\r\n', '{"entry": 1}'], self.response_body)
- self.assertEqual([',\r\n', '{"error": "unavailable"}\r\n'],
- responder.content)
-
-
-class TestHTTPApp(tests.TestCase):
-
- def setUp(self):
- super(TestHTTPApp, self).setUp()
- self.state = tests.ServerStateForTests()
- self.http_app = http_app.HTTPApp(self.state)
- self.app = paste.fixture.TestApp(self.http_app)
- self.db0 = self.state._create_database('db0')
-
- def test_bad_request_broken(self):
- resp = self.app.put('/db0/doc/doc1', params='{"x": 1}',
- headers={'content-type': 'application/foo'},
- expect_errors=True)
- self.assertEqual(400, resp.status)
-
- def test_bad_request_dispatch(self):
- resp = self.app.put('/db0/foo/doc1', params='{"x": 1}',
- headers={'content-type': 'application/json'},
- expect_errors=True)
- self.assertEqual(400, resp.status)
-
- def test_version(self):
- resp = self.app.get('/')
- self.assertEqual(200, resp.status)
- self.assertEqual('application/json', resp.header('content-type'))
- self.assertEqual({"version": _u1db_version}, json.loads(resp.body))
-
- def test_create_database(self):
- resp = self.app.put('/db1', params='{}',
- headers={'content-type': 'application/json'})
- self.assertEqual(200, resp.status)
- self.assertEqual('application/json', resp.header('content-type'))
- self.assertEqual({'ok': True}, json.loads(resp.body))
-
- resp = self.app.put('/db1', params='{}',
- headers={'content-type': 'application/json'})
- self.assertEqual(200, resp.status)
- self.assertEqual('application/json', resp.header('content-type'))
- self.assertEqual({'ok': True}, json.loads(resp.body))
-
- def test_delete_database(self):
- resp = self.app.delete('/db0')
- self.assertEqual(200, resp.status)
- self.assertRaises(errors.DatabaseDoesNotExist,
- self.state.check_database, 'db0')
-
- def test_get_database(self):
- resp = self.app.get('/db0')
- self.assertEqual(200, resp.status)
- self.assertEqual('application/json', resp.header('content-type'))
- self.assertEqual({}, json.loads(resp.body))
-
- def test_valid_database_names(self):
- resp = self.app.get('/a-database', expect_errors=True)
- self.assertEqual(404, resp.status)
-
- resp = self.app.get('/db1', expect_errors=True)
- self.assertEqual(404, resp.status)
-
- resp = self.app.get('/0', expect_errors=True)
- self.assertEqual(404, resp.status)
-
- resp = self.app.get('/0-0', expect_errors=True)
- self.assertEqual(404, resp.status)
-
- resp = self.app.get('/org.future', expect_errors=True)
- self.assertEqual(404, resp.status)
-
- def test_invalid_database_names(self):
- resp = self.app.get('/.a', expect_errors=True)
- self.assertEqual(400, resp.status)
-
- resp = self.app.get('/-a', expect_errors=True)
- self.assertEqual(400, resp.status)
-
- resp = self.app.get('/_a', expect_errors=True)
- self.assertEqual(400, resp.status)
-
- def test_put_doc_create(self):
- resp = self.app.put('/db0/doc/doc1', params='{"x": 1}',
- headers={'content-type': 'application/json'})
- doc = self.db0.get_doc('doc1')
- self.assertEqual(201, resp.status) # created
- self.assertEqual('{"x": 1}', doc.get_json())
- self.assertEqual('application/json', resp.header('content-type'))
- self.assertEqual({'rev': doc.rev}, json.loads(resp.body))
-
- def test_put_doc(self):
- doc = self.db0.create_doc_from_json('{"x": 1}', doc_id='doc1')
- resp = self.app.put('/db0/doc/doc1?old_rev=%s' % doc.rev,
- params='{"x": 2}',
- headers={'content-type': 'application/json'})
- doc = self.db0.get_doc('doc1')
- self.assertEqual(200, resp.status)
- self.assertEqual('{"x": 2}', doc.get_json())
- self.assertEqual('application/json', resp.header('content-type'))
- self.assertEqual({'rev': doc.rev}, json.loads(resp.body))
-
- def test_put_doc_too_large(self):
- self.http_app.max_request_size = 15000
- doc = self.db0.create_doc_from_json('{"x": 1}', doc_id='doc1')
- resp = self.app.put('/db0/doc/doc1?old_rev=%s' % doc.rev,
- params='{"%s": 2}' % ('z' * 16000),
- headers={'content-type': 'application/json'},
- expect_errors=True)
- self.assertEqual(400, resp.status)
-
- def test_delete_doc(self):
- doc = self.db0.create_doc_from_json('{"x": 1}', doc_id='doc1')
- resp = self.app.delete('/db0/doc/doc1?old_rev=%s' % doc.rev)
- doc = self.db0.get_doc('doc1', include_deleted=True)
- self.assertEqual(None, doc.content)
- self.assertEqual(200, resp.status)
- self.assertEqual('application/json', resp.header('content-type'))
- self.assertEqual({'rev': doc.rev}, json.loads(resp.body))
-
- def test_get_doc(self):
- doc = self.db0.create_doc_from_json('{"x": 1}', doc_id='doc1')
- resp = self.app.get('/db0/doc/%s' % doc.doc_id)
- self.assertEqual(200, resp.status)
- self.assertEqual('application/json', resp.header('content-type'))
- self.assertEqual('{"x": 1}', resp.body)
- self.assertEqual(doc.rev, resp.header('x-u1db-rev'))
- self.assertEqual('false', resp.header('x-u1db-has-conflicts'))
-
- def test_get_doc_non_existing(self):
- resp = self.app.get('/db0/doc/not-there', expect_errors=True)
- self.assertEqual(404, resp.status)
- self.assertEqual('application/json', resp.header('content-type'))
- self.assertEqual(
- {"error": "document does not exist"}, json.loads(resp.body))
- self.assertEqual('', resp.header('x-u1db-rev'))
- self.assertEqual('false', resp.header('x-u1db-has-conflicts'))
-
- def test_get_doc_deleted(self):
- doc = self.db0.create_doc_from_json('{"x": 1}', doc_id='doc1')
- self.db0.delete_doc(doc)
- resp = self.app.get('/db0/doc/doc1', expect_errors=True)
- self.assertEqual(404, resp.status)
- self.assertEqual('application/json', resp.header('content-type'))
- self.assertEqual(
- {"error": errors.DocumentDoesNotExist.wire_description},
- json.loads(resp.body))
-
- def test_get_doc_deleted_explicit_exclude(self):
- doc = self.db0.create_doc_from_json('{"x": 1}', doc_id='doc1')
- self.db0.delete_doc(doc)
- resp = self.app.get(
- '/db0/doc/doc1?include_deleted=false', expect_errors=True)
- self.assertEqual(404, resp.status)
- self.assertEqual('application/json', resp.header('content-type'))
- self.assertEqual(
- {"error": errors.DocumentDoesNotExist.wire_description},
- json.loads(resp.body))
-
- def test_get_deleted_doc(self):
- doc = self.db0.create_doc_from_json('{"x": 1}', doc_id='doc1')
- self.db0.delete_doc(doc)
- resp = self.app.get(
- '/db0/doc/doc1?include_deleted=true', expect_errors=True)
- self.assertEqual(404, resp.status)
- self.assertEqual('application/json', resp.header('content-type'))
- self.assertEqual(
- {"error": errors.DOCUMENT_DELETED}, json.loads(resp.body))
- self.assertEqual(doc.rev, resp.header('x-u1db-rev'))
- self.assertEqual('false', resp.header('x-u1db-has-conflicts'))
-
- def test_get_doc_non_existing_dabase(self):
- resp = self.app.get('/not-there/doc/doc1', expect_errors=True)
- self.assertEqual(404, resp.status)
- self.assertEqual('application/json', resp.header('content-type'))
- self.assertEqual(
- {"error": "database does not exist"}, json.loads(resp.body))
-
- def test_get_docs(self):
- doc1 = self.db0.create_doc_from_json('{"x": 1}', doc_id='doc1')
- doc2 = self.db0.create_doc_from_json('{"x": 1}', doc_id='doc2')
- ids = ','.join([doc1.doc_id, doc2.doc_id])
- resp = self.app.get('/db0/docs?doc_ids=%s' % ids)
- self.assertEqual(200, resp.status)
- self.assertEqual(
- 'application/json', resp.header('content-type'))
- expected = [
- {"content": '{"x": 1}', "doc_rev": "db0:1", "doc_id": "doc1",
- "has_conflicts": False},
- {"content": '{"x": 1}', "doc_rev": "db0:1", "doc_id": "doc2",
- "has_conflicts": False}]
- self.assertEqual(expected, json.loads(resp.body))
-
- def test_get_docs_missing_doc_ids(self):
- resp = self.app.get('/db0/docs', expect_errors=True)
- self.assertEqual(400, resp.status)
- self.assertEqual('application/json', resp.header('content-type'))
- self.assertEqual(
- {"error": "missing document ids"}, json.loads(resp.body))
-
- def test_get_docs_empty_doc_ids(self):
- resp = self.app.get('/db0/docs?doc_ids=', expect_errors=True)
- self.assertEqual(400, resp.status)
- self.assertEqual('application/json', resp.header('content-type'))
- self.assertEqual(
- {"error": "missing document ids"}, json.loads(resp.body))
-
- def test_get_docs_percent(self):
- doc1 = self.db0.create_doc_from_json('{"x": 1}', doc_id='doc%1')
- doc2 = self.db0.create_doc_from_json('{"x": 1}', doc_id='doc2')
- ids = ','.join([doc1.doc_id, doc2.doc_id])
- resp = self.app.get('/db0/docs?doc_ids=%s' % ids)
- self.assertEqual(200, resp.status)
- self.assertEqual(
- 'application/json', resp.header('content-type'))
- expected = [
- {"content": '{"x": 1}', "doc_rev": "db0:1", "doc_id": "doc%1",
- "has_conflicts": False},
- {"content": '{"x": 1}', "doc_rev": "db0:1", "doc_id": "doc2",
- "has_conflicts": False}]
- self.assertEqual(expected, json.loads(resp.body))
-
- def test_get_docs_deleted(self):
- doc1 = self.db0.create_doc_from_json('{"x": 1}', doc_id='doc1')
- doc2 = self.db0.create_doc_from_json('{"x": 1}', doc_id='doc2')
- self.db0.delete_doc(doc2)
- ids = ','.join([doc1.doc_id, doc2.doc_id])
- resp = self.app.get('/db0/docs?doc_ids=%s' % ids)
- self.assertEqual(200, resp.status)
- self.assertEqual(
- 'application/json', resp.header('content-type'))
- expected = [
- {"content": '{"x": 1}', "doc_rev": "db0:1", "doc_id": "doc1",
- "has_conflicts": False}]
- self.assertEqual(expected, json.loads(resp.body))
-
- def test_get_docs_include_deleted(self):
- doc1 = self.db0.create_doc_from_json('{"x": 1}', doc_id='doc1')
- doc2 = self.db0.create_doc_from_json('{"x": 1}', doc_id='doc2')
- self.db0.delete_doc(doc2)
- ids = ','.join([doc1.doc_id, doc2.doc_id])
- resp = self.app.get('/db0/docs?doc_ids=%s&include_deleted=true' % ids)
- self.assertEqual(200, resp.status)
- self.assertEqual(
- 'application/json', resp.header('content-type'))
- expected = [
- {"content": '{"x": 1}', "doc_rev": "db0:1", "doc_id": "doc1",
- "has_conflicts": False},
- {"content": None, "doc_rev": "db0:2", "doc_id": "doc2",
- "has_conflicts": False}]
- self.assertEqual(expected, json.loads(resp.body))
-
- def test_get_sync_info(self):
- self.db0._set_replica_gen_and_trans_id('other-id', 1, 'T-transid')
- resp = self.app.get('/db0/sync-from/other-id')
- self.assertEqual(200, resp.status)
- self.assertEqual('application/json', resp.header('content-type'))
- self.assertEqual(dict(target_replica_uid='db0',
- target_replica_generation=0,
- target_replica_transaction_id='',
- source_replica_uid='other-id',
- source_replica_generation=1,
- source_transaction_id='T-transid'),
- json.loads(resp.body))
-
- def test_record_sync_info(self):
- resp = self.app.put('/db0/sync-from/other-id',
- params='{"generation": 2, "transaction_id": '
- '"T-transid"}',
- headers={'content-type': 'application/json'})
- self.assertEqual(200, resp.status)
- self.assertEqual('application/json', resp.header('content-type'))
- self.assertEqual({'ok': True}, json.loads(resp.body))
- self.assertEqual(
- (2, 'T-transid'),
- self.db0._get_replica_gen_and_trans_id('other-id'))
-
- def test_sync_exchange_send(self):
- entries = {
- 10: {'id': 'doc-here', 'rev': 'replica:1', 'content':
- '{"value": "here"}', 'gen': 10, 'trans_id': 'T-sid'},
- 11: {'id': 'doc-here2', 'rev': 'replica:1', 'content':
- '{"value": "here2"}', 'gen': 11, 'trans_id': 'T-sed'}
- }
-
- gens = []
- _do_set_replica_gen_and_trans_id = \
- self.db0._do_set_replica_gen_and_trans_id
-
- def set_sync_generation_witness(other_uid, other_gen, other_trans_id):
- gens.append((other_uid, other_gen))
- _do_set_replica_gen_and_trans_id(
- other_uid, other_gen, other_trans_id)
- self.assertGetDoc(self.db0, entries[other_gen]['id'],
- entries[other_gen]['rev'],
- entries[other_gen]['content'], False)
-
- self.patch(
- self.db0, '_do_set_replica_gen_and_trans_id',
- set_sync_generation_witness)
-
- args = dict(last_known_generation=0)
- body = ("[\r\n" +
- "%s,\r\n" % json.dumps(args) +
- "%s,\r\n" % json.dumps(entries[10]) +
- "%s\r\n" % json.dumps(entries[11]) +
- "]\r\n")
- resp = self.app.post('/db0/sync-from/replica',
- params=body,
- headers={'content-type':
- 'application/x-u1db-sync-stream'})
- self.assertEqual(200, resp.status)
- self.assertEqual('application/x-u1db-sync-stream',
- resp.header('content-type'))
- bits = resp.body.split('\r\n')
- self.assertEqual('[', bits[0])
- last_trans_id = self.db0._get_transaction_log()[-1][1]
- self.assertEqual({'new_generation': 2,
- 'new_transaction_id': last_trans_id},
- json.loads(bits[1]))
- self.assertEqual(']', bits[2])
- self.assertEqual('', bits[3])
- self.assertEqual([('replica', 10), ('replica', 11)], gens)
-
- def test_sync_exchange_send_ensure(self):
- entries = {
- 10: {'id': 'doc-here', 'rev': 'replica:1', 'content':
- '{"value": "here"}', 'gen': 10, 'trans_id': 'T-sid'},
- 11: {'id': 'doc-here2', 'rev': 'replica:1', 'content':
- '{"value": "here2"}', 'gen': 11, 'trans_id': 'T-sed'}
- }
-
- args = dict(last_known_generation=0, ensure=True)
- body = ("[\r\n" +
- "%s,\r\n" % json.dumps(args) +
- "%s,\r\n" % json.dumps(entries[10]) +
- "%s\r\n" % json.dumps(entries[11]) +
- "]\r\n")
- resp = self.app.post('/dbnew/sync-from/replica',
- params=body,
- headers={'content-type':
- 'application/x-u1db-sync-stream'})
- self.assertEqual(200, resp.status)
- self.assertEqual('application/x-u1db-sync-stream',
- resp.header('content-type'))
- bits = resp.body.split('\r\n')
- self.assertEqual('[', bits[0])
- dbnew = self.state.open_database("dbnew")
- last_trans_id = dbnew._get_transaction_log()[-1][1]
- self.assertEqual({'new_generation': 2,
- 'new_transaction_id': last_trans_id,
- 'replica_uid': dbnew._replica_uid},
- json.loads(bits[1]))
- self.assertEqual(']', bits[2])
- self.assertEqual('', bits[3])
-
- def test_sync_exchange_send_entry_too_large(self):
- self.patch(http_app.SyncResource, 'max_request_size', 20000)
- self.patch(http_app.SyncResource, 'max_entry_size', 10000)
- entries = {
- 10: {'id': 'doc-here', 'rev': 'replica:1', 'content':
- '{"value": "%s"}' % ('H' * 11000), 'gen': 10},
- }
- args = dict(last_known_generation=0)
- body = ("[\r\n" +
- "%s,\r\n" % json.dumps(args) +
- "%s\r\n" % json.dumps(entries[10]) +
- "]\r\n")
- resp = self.app.post('/db0/sync-from/replica',
- params=body,
- headers={'content-type':
- 'application/x-u1db-sync-stream'},
- expect_errors=True)
- self.assertEqual(400, resp.status)
-
- def test_sync_exchange_receive(self):
- doc = self.db0.create_doc_from_json('{"value": "there"}')
- doc2 = self.db0.create_doc_from_json('{"value": "there2"}')
- args = dict(last_known_generation=0)
- body = "[\r\n%s\r\n]" % json.dumps(args)
- resp = self.app.post('/db0/sync-from/replica',
- params=body,
- headers={'content-type':
- 'application/x-u1db-sync-stream'})
- self.assertEqual(200, resp.status)
- self.assertEqual('application/x-u1db-sync-stream',
- resp.header('content-type'))
- parts = resp.body.splitlines()
- self.assertEqual(5, len(parts))
- self.assertEqual('[', parts[0])
- last_trans_id = self.db0._get_transaction_log()[-1][1]
- self.assertEqual({'new_generation': 2,
- 'new_transaction_id': last_trans_id},
- json.loads(parts[1].rstrip(",")))
- part2 = json.loads(parts[2].rstrip(","))
- self.assertTrue(part2['trans_id'].startswith('T-'))
- self.assertEqual('{"value": "there"}', part2['content'])
- self.assertEqual(doc.rev, part2['rev'])
- self.assertEqual(doc.doc_id, part2['id'])
- self.assertEqual(1, part2['gen'])
- part3 = json.loads(parts[3].rstrip(","))
- self.assertTrue(part3['trans_id'].startswith('T-'))
- self.assertEqual('{"value": "there2"}', part3['content'])
- self.assertEqual(doc2.rev, part3['rev'])
- self.assertEqual(doc2.doc_id, part3['id'])
- self.assertEqual(2, part3['gen'])
- self.assertEqual(']', parts[4])
-
- def test_sync_exchange_error_in_stream(self):
- args = dict(last_known_generation=0)
- body = "[\r\n%s\r\n]" % json.dumps(args)
-
- def boom(self, return_doc_cb):
- raise errors.Unavailable
-
- self.patch(sync.SyncExchange, 'return_docs',
- boom)
- resp = self.app.post('/db0/sync-from/replica',
- params=body,
- headers={'content-type':
- 'application/x-u1db-sync-stream'})
- self.assertEqual(200, resp.status)
- self.assertEqual('application/x-u1db-sync-stream',
- resp.header('content-type'))
- parts = resp.body.splitlines()
- self.assertEqual(3, len(parts))
- self.assertEqual('[', parts[0])
- self.assertEqual({'new_generation': 0, 'new_transaction_id': ''},
- json.loads(parts[1].rstrip(",")))
- self.assertEqual({'error': 'unavailable'}, json.loads(parts[2]))
-
-
-class TestRequestHooks(tests.TestCase):
-
- def setUp(self):
- super(TestRequestHooks, self).setUp()
- self.state = tests.ServerStateForTests()
- self.http_app = http_app.HTTPApp(self.state)
- self.app = paste.fixture.TestApp(self.http_app)
- self.db0 = self.state._create_database('db0')
-
- def test_begin_and_done(self):
- calls = []
-
- def begin(environ):
- self.assertTrue('PATH_INFO' in environ)
- calls.append('begin')
-
- def done(environ):
- self.assertTrue('PATH_INFO' in environ)
- calls.append('done')
-
- self.http_app.request_begin = begin
- self.http_app.request_done = done
-
- doc = self.db0.create_doc_from_json('{"x": 1}', doc_id='doc1')
- self.app.get('/db0/doc/%s' % doc.doc_id)
-
- self.assertEqual(['begin', 'done'], calls)
-
- def test_bad_request(self):
- calls = []
-
- def begin(environ):
- self.assertTrue('PATH_INFO' in environ)
- calls.append('begin')
-
- def bad_request(environ):
- self.assertTrue('PATH_INFO' in environ)
- calls.append('bad-request')
-
- self.http_app.request_begin = begin
- self.http_app.request_bad_request = bad_request
- # shouldn't be called
- self.http_app.request_done = lambda env: 1 / 0
-
- resp = self.app.put('/db0/foo/doc1', params='{"x": 1}',
- headers={'content-type': 'application/json'},
- expect_errors=True)
- self.assertEqual(400, resp.status)
- self.assertEqual(['begin', 'bad-request'], calls)
-
-
-class TestHTTPErrors(tests.TestCase):
-
- def test_wire_description_to_status(self):
- self.assertNotIn("error", http_errors.wire_description_to_status)
-
-
-class TestHTTPAppErrorHandling(tests.TestCase):
-
- def setUp(self):
- super(TestHTTPAppErrorHandling, self).setUp()
- self.exc = None
- self.state = tests.ServerStateForTests()
-
- class ErroringResource(object):
-
- def post(_, args, content):
- raise self.exc
-
- def lookup_resource(environ, responder):
- return ErroringResource()
-
- self.http_app = http_app.HTTPApp(self.state)
- self.http_app._lookup_resource = lookup_resource
- self.app = paste.fixture.TestApp(self.http_app)
-
- def test_RevisionConflict_etc(self):
- self.exc = errors.RevisionConflict()
- resp = self.app.post('/req', params='{}',
- headers={'content-type': 'application/json'},
- expect_errors=True)
- self.assertEqual(409, resp.status)
- self.assertEqual('application/json', resp.header('content-type'))
- self.assertEqual({"error": "revision conflict"},
- json.loads(resp.body))
-
- def test_Unavailable(self):
- self.exc = errors.Unavailable
- resp = self.app.post('/req', params='{}',
- headers={'content-type': 'application/json'},
- expect_errors=True)
- self.assertEqual(503, resp.status)
- self.assertEqual('application/json', resp.header('content-type'))
- self.assertEqual({"error": "unavailable"},
- json.loads(resp.body))
-
- def test_generic_u1db_errors(self):
- self.exc = errors.U1DBError()
- resp = self.app.post('/req', params='{}',
- headers={'content-type': 'application/json'},
- expect_errors=True)
- self.assertEqual(500, resp.status)
- self.assertEqual('application/json', resp.header('content-type'))
- self.assertEqual({"error": "error"},
- json.loads(resp.body))
-
- def test_generic_u1db_errors_hooks(self):
- calls = []
-
- def begin(environ):
- self.assertTrue('PATH_INFO' in environ)
- calls.append('begin')
-
- def u1db_error(environ, exc):
- self.assertTrue('PATH_INFO' in environ)
- calls.append(('error', exc))
-
- self.http_app.request_begin = begin
- self.http_app.request_u1db_error = u1db_error
- # shouldn't be called
- self.http_app.request_done = lambda env: 1 / 0
-
- self.exc = errors.U1DBError()
- resp = self.app.post('/req', params='{}',
- headers={'content-type': 'application/json'},
- expect_errors=True)
- self.assertEqual(500, resp.status)
- self.assertEqual(['begin', ('error', self.exc)], calls)
-
- def test_failure(self):
- class Failure(Exception):
- pass
- self.exc = Failure()
- self.assertRaises(Failure, self.app.post, '/req', params='{}',
- headers={'content-type': 'application/json'})
-
- def test_failure_hooks(self):
- class Failure(Exception):
- pass
- calls = []
-
- def begin(environ):
- calls.append('begin')
-
- def failed(environ):
- self.assertTrue('PATH_INFO' in environ)
- calls.append(('failed', sys.exc_info()))
-
- self.http_app.request_begin = begin
- self.http_app.request_failed = failed
- # shouldn't be called
- self.http_app.request_done = lambda env: 1 / 0
-
- self.exc = Failure()
- self.assertRaises(Failure, self.app.post, '/req', params='{}',
- headers={'content-type': 'application/json'})
-
- self.assertEqual(2, len(calls))
- self.assertEqual('begin', calls[0])
- marker, (exc_type, exc, tb) = calls[1]
- self.assertEqual('failed', marker)
- self.assertEqual(self.exc, exc)
-
-
-class TestPluggableSyncExchange(tests.TestCase):
-
- def setUp(self):
- super(TestPluggableSyncExchange, self).setUp()
- self.state = tests.ServerStateForTests()
- self.state.ensure_database('foo')
-
- def test_plugging(self):
-
- class MySyncExchange(object):
- def __init__(self, db, source_replica_uid, last_known_generation):
- pass
-
- class MySyncResource(http_app.SyncResource):
- sync_exchange_class = MySyncExchange
-
- sync_res = MySyncResource('foo', 'src', self.state, None)
- sync_res.post_args(
- {'last_known_generation': 0, 'last_known_trans_id': None}, '{}')
- self.assertIsInstance(sync_res.sync_exch, MySyncExchange)
diff --git a/src/leap/soledad/tests/u1db_tests/test_http_client.py b/src/leap/soledad/tests/u1db_tests/test_http_client.py
deleted file mode 100644
index 42e98461..00000000
--- a/src/leap/soledad/tests/u1db_tests/test_http_client.py
+++ /dev/null
@@ -1,363 +0,0 @@
-# Copyright 2011-2012 Canonical Ltd.
-#
-# This file is part of u1db.
-#
-# u1db is free software: you can redistribute it and/or modify
-# it under the terms of the GNU Lesser General Public License version 3
-# as published by the Free Software Foundation.
-#
-# u1db is distributed in the hope that it will be useful,
-# but WITHOUT ANY WARRANTY; without even the implied warranty of
-# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
-# GNU Lesser General Public License for more details.
-#
-# You should have received a copy of the GNU Lesser General Public License
-# along with u1db. If not, see <http://www.gnu.org/licenses/>.
-
-"""Tests for HTTPDatabase"""
-
-from oauth import oauth
-try:
- import simplejson as json
-except ImportError:
- import json # noqa
-
-from u1db import (
- errors,
-)
-
-from leap.soledad.tests import u1db_tests as tests
-
-from u1db.remote import (
- http_client,
-)
-
-
-class TestEncoder(tests.TestCase):
-
- def test_encode_string(self):
- self.assertEqual("foo", http_client._encode_query_parameter("foo"))
-
- def test_encode_true(self):
- self.assertEqual("true", http_client._encode_query_parameter(True))
-
- def test_encode_false(self):
- self.assertEqual("false", http_client._encode_query_parameter(False))
-
-
-class TestHTTPClientBase(tests.TestCaseWithServer):
-
- def setUp(self):
- super(TestHTTPClientBase, self).setUp()
- self.errors = 0
-
- def app(self, environ, start_response):
- if environ['PATH_INFO'].endswith('echo'):
- start_response("200 OK", [('Content-Type', 'application/json')])
- ret = {}
- for name in ('REQUEST_METHOD', 'PATH_INFO', 'QUERY_STRING'):
- ret[name] = environ[name]
- if environ['REQUEST_METHOD'] in ('PUT', 'POST'):
- ret['CONTENT_TYPE'] = environ['CONTENT_TYPE']
- content_length = int(environ['CONTENT_LENGTH'])
- ret['body'] = environ['wsgi.input'].read(content_length)
- return [json.dumps(ret)]
- elif environ['PATH_INFO'].endswith('error_then_accept'):
- if self.errors >= 3:
- start_response(
- "200 OK", [('Content-Type', 'application/json')])
- ret = {}
- for name in ('REQUEST_METHOD', 'PATH_INFO', 'QUERY_STRING'):
- ret[name] = environ[name]
- if environ['REQUEST_METHOD'] in ('PUT', 'POST'):
- ret['CONTENT_TYPE'] = environ['CONTENT_TYPE']
- content_length = int(environ['CONTENT_LENGTH'])
- ret['body'] = '{"oki": "doki"}'
- return [json.dumps(ret)]
- self.errors += 1
- content_length = int(environ['CONTENT_LENGTH'])
- error = json.loads(
- environ['wsgi.input'].read(content_length))
- response = error['response']
- # In debug mode, wsgiref has an assertion that the status parameter
- # is a 'str' object. However error['status'] returns a unicode
- # object.
- status = str(error['status'])
- if isinstance(response, unicode):
- response = str(response)
- if isinstance(response, str):
- start_response(status, [('Content-Type', 'text/plain')])
- return [str(response)]
- else:
- start_response(status, [('Content-Type', 'application/json')])
- return [json.dumps(response)]
- elif environ['PATH_INFO'].endswith('error'):
- self.errors += 1
- content_length = int(environ['CONTENT_LENGTH'])
- error = json.loads(
- environ['wsgi.input'].read(content_length))
- response = error['response']
- # In debug mode, wsgiref has an assertion that the status parameter
- # is a 'str' object. However error['status'] returns a unicode
- # object.
- status = str(error['status'])
- if isinstance(response, unicode):
- response = str(response)
- if isinstance(response, str):
- start_response(status, [('Content-Type', 'text/plain')])
- return [str(response)]
- else:
- start_response(status, [('Content-Type', 'application/json')])
- return [json.dumps(response)]
- elif '/oauth' in environ['PATH_INFO']:
- base_url = self.getURL('').rstrip('/')
- oauth_req = oauth.OAuthRequest.from_request(
- http_method=environ['REQUEST_METHOD'],
- http_url=base_url + environ['PATH_INFO'],
- headers={'Authorization': environ['HTTP_AUTHORIZATION']},
- query_string=environ['QUERY_STRING']
- )
- oauth_server = oauth.OAuthServer(tests.testingOAuthStore)
- oauth_server.add_signature_method(tests.sign_meth_HMAC_SHA1)
- try:
- consumer, token, params = oauth_server.verify_request(
- oauth_req)
- except oauth.OAuthError, e:
- start_response("401 Unauthorized",
- [('Content-Type', 'application/json')])
- return [json.dumps({"error": "unauthorized",
- "message": e.message})]
- start_response("200 OK", [('Content-Type', 'application/json')])
- return [json.dumps([environ['PATH_INFO'], token.key, params])]
-
- def make_app(self):
- return self.app
-
- def getClient(self, **kwds):
- self.startServer()
- return http_client.HTTPClientBase(self.getURL('dbase'), **kwds)
-
- def test_construct(self):
- self.startServer()
- url = self.getURL()
- cli = http_client.HTTPClientBase(url)
- self.assertEqual(url, cli._url.geturl())
- self.assertIs(None, cli._conn)
-
- def test_parse_url(self):
- cli = http_client.HTTPClientBase(
- '%s://127.0.0.1:12345/' % self.url_scheme)
- self.assertEqual(self.url_scheme, cli._url.scheme)
- self.assertEqual('127.0.0.1', cli._url.hostname)
- self.assertEqual(12345, cli._url.port)
- self.assertEqual('/', cli._url.path)
-
- def test__ensure_connection(self):
- cli = self.getClient()
- self.assertIs(None, cli._conn)
- cli._ensure_connection()
- self.assertIsNot(None, cli._conn)
- conn = cli._conn
- cli._ensure_connection()
- self.assertIs(conn, cli._conn)
-
- def test_close(self):
- cli = self.getClient()
- cli._ensure_connection()
- cli.close()
- self.assertIs(None, cli._conn)
-
- def test__request(self):
- cli = self.getClient()
- res, headers = cli._request('PUT', ['echo'], {}, {})
- self.assertEqual({'CONTENT_TYPE': 'application/json',
- 'PATH_INFO': '/dbase/echo',
- 'QUERY_STRING': '',
- 'body': '{}',
- 'REQUEST_METHOD': 'PUT'}, json.loads(res))
-
- res, headers = cli._request('GET', ['doc', 'echo'], {'a': 1})
- self.assertEqual({'PATH_INFO': '/dbase/doc/echo',
- 'QUERY_STRING': 'a=1',
- 'REQUEST_METHOD': 'GET'}, json.loads(res))
-
- res, headers = cli._request('GET', ['doc', '%FFFF', 'echo'], {'a': 1})
- self.assertEqual({'PATH_INFO': '/dbase/doc/%FFFF/echo',
- 'QUERY_STRING': 'a=1',
- 'REQUEST_METHOD': 'GET'}, json.loads(res))
-
- res, headers = cli._request('POST', ['echo'], {'b': 2}, 'Body',
- 'application/x-test')
- self.assertEqual({'CONTENT_TYPE': 'application/x-test',
- 'PATH_INFO': '/dbase/echo',
- 'QUERY_STRING': 'b=2',
- 'body': 'Body',
- 'REQUEST_METHOD': 'POST'}, json.loads(res))
-
- def test__request_json(self):
- cli = self.getClient()
- res, headers = cli._request_json(
- 'POST', ['echo'], {'b': 2}, {'a': 'x'})
- self.assertEqual('application/json', headers['content-type'])
- self.assertEqual({'CONTENT_TYPE': 'application/json',
- 'PATH_INFO': '/dbase/echo',
- 'QUERY_STRING': 'b=2',
- 'body': '{"a": "x"}',
- 'REQUEST_METHOD': 'POST'}, res)
-
- def test_unspecified_http_error(self):
- cli = self.getClient()
- self.assertRaises(errors.HTTPError,
- cli._request_json, 'POST', ['error'], {},
- {'status': "500 Internal Error",
- 'response': "Crash."})
- try:
- cli._request_json('POST', ['error'], {},
- {'status': "500 Internal Error",
- 'response': "Fail."})
- except errors.HTTPError, e:
- pass
-
- self.assertEqual(500, e.status)
- self.assertEqual("Fail.", e.message)
- self.assertTrue("content-type" in e.headers)
-
- def test_revision_conflict(self):
- cli = self.getClient()
- self.assertRaises(errors.RevisionConflict,
- cli._request_json, 'POST', ['error'], {},
- {'status': "409 Conflict",
- 'response': {"error": "revision conflict"}})
-
- def test_unavailable_proper(self):
- cli = self.getClient()
- cli._delays = (0, 0, 0, 0, 0)
- self.assertRaises(errors.Unavailable,
- cli._request_json, 'POST', ['error'], {},
- {'status': "503 Service Unavailable",
- 'response': {"error": "unavailable"}})
- self.assertEqual(5, self.errors)
-
- def test_unavailable_then_available(self):
- cli = self.getClient()
- cli._delays = (0, 0, 0, 0, 0)
- res, headers = cli._request_json(
- 'POST', ['error_then_accept'], {'b': 2},
- {'status': "503 Service Unavailable",
- 'response': {"error": "unavailable"}})
- self.assertEqual('application/json', headers['content-type'])
- self.assertEqual({'CONTENT_TYPE': 'application/json',
- 'PATH_INFO': '/dbase/error_then_accept',
- 'QUERY_STRING': 'b=2',
- 'body': '{"oki": "doki"}',
- 'REQUEST_METHOD': 'POST'}, res)
- self.assertEqual(3, self.errors)
-
- def test_unavailable_random_source(self):
- cli = self.getClient()
- cli._delays = (0, 0, 0, 0, 0)
- try:
- cli._request_json('POST', ['error'], {},
- {'status': "503 Service Unavailable",
- 'response': "random unavailable."})
- except errors.Unavailable, e:
- pass
-
- self.assertEqual(503, e.status)
- self.assertEqual("random unavailable.", e.message)
- self.assertTrue("content-type" in e.headers)
- self.assertEqual(5, self.errors)
-
- def test_document_too_big(self):
- cli = self.getClient()
- self.assertRaises(errors.DocumentTooBig,
- cli._request_json, 'POST', ['error'], {},
- {'status': "403 Forbidden",
- 'response': {"error": "document too big"}})
-
- def test_user_quota_exceeded(self):
- cli = self.getClient()
- self.assertRaises(errors.UserQuotaExceeded,
- cli._request_json, 'POST', ['error'], {},
- {'status': "403 Forbidden",
- 'response': {"error": "user quota exceeded"}})
-
- def test_user_needs_subscription(self):
- cli = self.getClient()
- self.assertRaises(errors.SubscriptionNeeded,
- cli._request_json, 'POST', ['error'], {},
- {'status': "403 Forbidden",
- 'response': {"error": "user needs subscription"}})
-
- def test_generic_u1db_error(self):
- cli = self.getClient()
- self.assertRaises(errors.U1DBError,
- cli._request_json, 'POST', ['error'], {},
- {'status': "400 Bad Request",
- 'response': {"error": "error"}})
- try:
- cli._request_json('POST', ['error'], {},
- {'status': "400 Bad Request",
- 'response': {"error": "error"}})
- except errors.U1DBError, e:
- pass
- self.assertIs(e.__class__, errors.U1DBError)
-
- def test_unspecified_bad_request(self):
- cli = self.getClient()
- self.assertRaises(errors.HTTPError,
- cli._request_json, 'POST', ['error'], {},
- {'status': "400 Bad Request",
- 'response': "<Bad Request>"})
- try:
- cli._request_json('POST', ['error'], {},
- {'status': "400 Bad Request",
- 'response': "<Bad Request>"})
- except errors.HTTPError, e:
- pass
-
- self.assertEqual(400, e.status)
- self.assertEqual("<Bad Request>", e.message)
- self.assertTrue("content-type" in e.headers)
-
- def test_oauth(self):
- cli = self.getClient()
- cli.set_oauth_credentials(tests.consumer1.key, tests.consumer1.secret,
- tests.token1.key, tests.token1.secret)
- params = {'x': u'\xf0', 'y': "foo"}
- res, headers = cli._request('GET', ['doc', 'oauth'], params)
- self.assertEqual(
- ['/dbase/doc/oauth', tests.token1.key, params], json.loads(res))
-
- # oauth does its own internal quoting
- params = {'x': u'\xf0', 'y': "foo"}
- res, headers = cli._request('GET', ['doc', 'oauth', 'foo bar'], params)
- self.assertEqual(
- ['/dbase/doc/oauth/foo bar', tests.token1.key, params],
- json.loads(res))
-
- def test_oauth_ctr_creds(self):
- cli = self.getClient(creds={'oauth': {
- 'consumer_key': tests.consumer1.key,
- 'consumer_secret': tests.consumer1.secret,
- 'token_key': tests.token1.key,
- 'token_secret': tests.token1.secret,
- }})
- params = {'x': u'\xf0', 'y': "foo"}
- res, headers = cli._request('GET', ['doc', 'oauth'], params)
- self.assertEqual(
- ['/dbase/doc/oauth', tests.token1.key, params], json.loads(res))
-
- def test_unknown_creds(self):
- self.assertRaises(errors.UnknownAuthMethod,
- self.getClient, creds={'foo': {}})
- self.assertRaises(errors.UnknownAuthMethod,
- self.getClient, creds={})
-
- def test_oauth_Unauthorized(self):
- cli = self.getClient()
- cli.set_oauth_credentials(tests.consumer1.key, tests.consumer1.secret,
- tests.token1.key, "WRONG")
- params = {'y': 'foo'}
- self.assertRaises(errors.Unauthorized, cli._request, 'GET',
- ['doc', 'oauth'], params)
diff --git a/src/leap/soledad/tests/u1db_tests/test_http_database.py b/src/leap/soledad/tests/u1db_tests/test_http_database.py
deleted file mode 100644
index f21e6da1..00000000
--- a/src/leap/soledad/tests/u1db_tests/test_http_database.py
+++ /dev/null
@@ -1,260 +0,0 @@
-# Copyright 2011 Canonical Ltd.
-#
-# This file is part of u1db.
-#
-# u1db is free software: you can redistribute it and/or modify
-# it under the terms of the GNU Lesser General Public License version 3
-# as published by the Free Software Foundation.
-#
-# u1db is distributed in the hope that it will be useful,
-# but WITHOUT ANY WARRANTY; without even the implied warranty of
-# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
-# GNU Lesser General Public License for more details.
-#
-# You should have received a copy of the GNU Lesser General Public License
-# along with u1db. If not, see <http://www.gnu.org/licenses/>.
-
-"""Tests for HTTPDatabase"""
-
-import inspect
-try:
- import simplejson as json
-except ImportError:
- import json # noqa
-
-from u1db import (
- errors,
- Document,
-)
-
-from leap.soledad.tests import u1db_tests as tests
-
-from u1db.remote import (
- http_database,
- http_target,
-)
-from leap.soledad.tests.u1db_tests.test_remote_sync_target import (
- make_http_app,
-)
-
-
-class TestHTTPDatabaseSimpleOperations(tests.TestCase):
-
- def setUp(self):
- super(TestHTTPDatabaseSimpleOperations, self).setUp()
- self.db = http_database.HTTPDatabase('dbase')
- self.db._conn = object() # crash if used
- self.got = None
- self.response_val = None
-
- def _request(method, url_parts, params=None, body=None,
- content_type=None):
- self.got = method, url_parts, params, body, content_type
- if isinstance(self.response_val, Exception):
- raise self.response_val
- return self.response_val
-
- def _request_json(method, url_parts, params=None, body=None,
- content_type=None):
- self.got = method, url_parts, params, body, content_type
- if isinstance(self.response_val, Exception):
- raise self.response_val
- return self.response_val
-
- self.db._request = _request
- self.db._request_json = _request_json
-
- def test__sanity_same_signature(self):
- my_request_sig = inspect.getargspec(self.db._request)
- my_request_sig = (['self'] + my_request_sig[0],) + my_request_sig[1:]
- self.assertEqual(
- my_request_sig,
- inspect.getargspec(http_database.HTTPDatabase._request))
- my_request_json_sig = inspect.getargspec(self.db._request_json)
- my_request_json_sig = ((['self'] + my_request_json_sig[0],) +
- my_request_json_sig[1:])
- self.assertEqual(
- my_request_json_sig,
- inspect.getargspec(http_database.HTTPDatabase._request_json))
-
- def test__ensure(self):
- self.response_val = {'ok': True}, {}
- self.db._ensure()
- self.assertEqual(('PUT', [], {}, {}, None), self.got)
-
- def test__delete(self):
- self.response_val = {'ok': True}, {}
- self.db._delete()
- self.assertEqual(('DELETE', [], {}, {}, None), self.got)
-
- def test__check(self):
- self.response_val = {}, {}
- res = self.db._check()
- self.assertEqual({}, res)
- self.assertEqual(('GET', [], None, None, None), self.got)
-
- def test_put_doc(self):
- self.response_val = {'rev': 'doc-rev'}, {}
- doc = Document('doc-id', None, '{"v": 1}')
- res = self.db.put_doc(doc)
- self.assertEqual('doc-rev', res)
- self.assertEqual('doc-rev', doc.rev)
- self.assertEqual(('PUT', ['doc', 'doc-id'], {},
- '{"v": 1}', 'application/json'), self.got)
-
- self.response_val = {'rev': 'doc-rev-2'}, {}
- doc.content = {"v": 2}
- res = self.db.put_doc(doc)
- self.assertEqual('doc-rev-2', res)
- self.assertEqual('doc-rev-2', doc.rev)
- self.assertEqual(('PUT', ['doc', 'doc-id'], {'old_rev': 'doc-rev'},
- '{"v": 2}', 'application/json'), self.got)
-
- def test_get_doc(self):
- self.response_val = '{"v": 2}', {'x-u1db-rev': 'doc-rev',
- 'x-u1db-has-conflicts': 'false'}
- self.assertGetDoc(self.db, 'doc-id', 'doc-rev', '{"v": 2}', False)
- self.assertEqual(
- ('GET', ['doc', 'doc-id'], {'include_deleted': False}, None, None),
- self.got)
-
- def test_get_doc_non_existing(self):
- self.response_val = errors.DocumentDoesNotExist()
- self.assertIs(None, self.db.get_doc('not-there'))
- self.assertEqual(
- ('GET', ['doc', 'not-there'], {'include_deleted': False}, None,
- None), self.got)
-
- def test_get_doc_deleted(self):
- self.response_val = errors.DocumentDoesNotExist()
- self.assertIs(None, self.db.get_doc('deleted'))
- self.assertEqual(
- ('GET', ['doc', 'deleted'], {'include_deleted': False}, None,
- None), self.got)
-
- def test_get_doc_deleted_include_deleted(self):
- self.response_val = errors.HTTPError(404,
- json.dumps(
- {"error": errors.DOCUMENT_DELETED}
- ),
- {'x-u1db-rev': 'doc-rev-gone',
- 'x-u1db-has-conflicts': 'false'})
- doc = self.db.get_doc('deleted', include_deleted=True)
- self.assertEqual('deleted', doc.doc_id)
- self.assertEqual('doc-rev-gone', doc.rev)
- self.assertIs(None, doc.content)
- self.assertEqual(
- ('GET', ['doc', 'deleted'], {'include_deleted': True}, None, None),
- self.got)
-
- def test_get_doc_pass_through_errors(self):
- self.response_val = errors.HTTPError(500, 'Crash.')
- self.assertRaises(errors.HTTPError,
- self.db.get_doc, 'something-something')
-
- def test_create_doc_with_id(self):
- self.response_val = {'rev': 'doc-rev'}, {}
- new_doc = self.db.create_doc_from_json('{"v": 1}', doc_id='doc-id')
- self.assertEqual('doc-rev', new_doc.rev)
- self.assertEqual('doc-id', new_doc.doc_id)
- self.assertEqual('{"v": 1}', new_doc.get_json())
- self.assertEqual(('PUT', ['doc', 'doc-id'], {},
- '{"v": 1}', 'application/json'), self.got)
-
- def test_create_doc_without_id(self):
- self.response_val = {'rev': 'doc-rev-2'}, {}
- new_doc = self.db.create_doc_from_json('{"v": 3}')
- self.assertEqual('D-', new_doc.doc_id[:2])
- self.assertEqual('doc-rev-2', new_doc.rev)
- self.assertEqual('{"v": 3}', new_doc.get_json())
- self.assertEqual(('PUT', ['doc', new_doc.doc_id], {},
- '{"v": 3}', 'application/json'), self.got)
-
- def test_delete_doc(self):
- self.response_val = {'rev': 'doc-rev-gone'}, {}
- doc = Document('doc-id', 'doc-rev', None)
- self.db.delete_doc(doc)
- self.assertEqual('doc-rev-gone', doc.rev)
- self.assertEqual(('DELETE', ['doc', 'doc-id'], {'old_rev': 'doc-rev'},
- None, None), self.got)
-
- def test_get_sync_target(self):
- st = self.db.get_sync_target()
- self.assertIsInstance(st, http_target.HTTPSyncTarget)
- self.assertEqual(st._url, self.db._url)
-
- def test_get_sync_target_inherits_oauth_credentials(self):
- self.db.set_oauth_credentials(tests.consumer1.key,
- tests.consumer1.secret,
- tests.token1.key, tests.token1.secret)
- st = self.db.get_sync_target()
- self.assertEqual(self.db._creds, st._creds)
-
-
-class TestHTTPDatabaseCtrWithCreds(tests.TestCase):
-
- def test_ctr_with_creds(self):
- db1 = http_database.HTTPDatabase('http://dbs/db', creds={'oauth': {
- 'consumer_key': tests.consumer1.key,
- 'consumer_secret': tests.consumer1.secret,
- 'token_key': tests.token1.key,
- 'token_secret': tests.token1.secret
- }})
- self.assertIn('oauth', db1._creds)
-
-
-class TestHTTPDatabaseIntegration(tests.TestCaseWithServer):
-
- make_app_with_state = staticmethod(make_http_app)
-
- def setUp(self):
- super(TestHTTPDatabaseIntegration, self).setUp()
- self.startServer()
-
- def test_non_existing_db(self):
- db = http_database.HTTPDatabase(self.getURL('not-there'))
- self.assertRaises(errors.DatabaseDoesNotExist, db.get_doc, 'doc1')
-
- def test__ensure(self):
- db = http_database.HTTPDatabase(self.getURL('new'))
- db._ensure()
- self.assertIs(None, db.get_doc('doc1'))
-
- def test__delete(self):
- self.request_state._create_database('db0')
- db = http_database.HTTPDatabase(self.getURL('db0'))
- db._delete()
- self.assertRaises(errors.DatabaseDoesNotExist,
- self.request_state.check_database, 'db0')
-
- def test_open_database_existing(self):
- self.request_state._create_database('db0')
- db = http_database.HTTPDatabase.open_database(self.getURL('db0'),
- create=False)
- self.assertIs(None, db.get_doc('doc1'))
-
- def test_open_database_non_existing(self):
- self.assertRaises(errors.DatabaseDoesNotExist,
- http_database.HTTPDatabase.open_database,
- self.getURL('not-there'),
- create=False)
-
- def test_open_database_create(self):
- db = http_database.HTTPDatabase.open_database(self.getURL('new'),
- create=True)
- self.assertIs(None, db.get_doc('doc1'))
-
- def test_delete_database_existing(self):
- self.request_state._create_database('db0')
- http_database.HTTPDatabase.delete_database(self.getURL('db0'))
- self.assertRaises(errors.DatabaseDoesNotExist,
- self.request_state.check_database, 'db0')
-
- def test_doc_ids_needing_quoting(self):
- db0 = self.request_state._create_database('db0')
- db = http_database.HTTPDatabase.open_database(self.getURL('db0'),
- create=False)
- doc = Document('%fff', None, '{}')
- db.put_doc(doc)
- self.assertGetDoc(db0, '%fff', doc.rev, '{}', False)
- self.assertGetDoc(db, '%fff', doc.rev, '{}', False)
diff --git a/src/leap/soledad/tests/u1db_tests/test_https.py b/src/leap/soledad/tests/u1db_tests/test_https.py
deleted file mode 100644
index 62180f8c..00000000
--- a/src/leap/soledad/tests/u1db_tests/test_https.py
+++ /dev/null
@@ -1,124 +0,0 @@
-"""Test support for client-side https support."""
-
-import os
-import ssl
-import sys
-
-from paste import httpserver
-
-from u1db.remote import (
- http_client,
- http_target,
-)
-
-from leap import soledad
-from leap.soledad.tests import u1db_tests as tests
-from leap.soledad.tests.u1db_tests.test_remote_sync_target import (
- make_oauth_http_app,
-)
-
-
-def https_server_def():
- def make_server(host_port, application):
- from OpenSSL import SSL
- cert_file = os.path.join(os.path.dirname(__file__), 'testing-certs',
- 'testing.cert')
- key_file = os.path.join(os.path.dirname(__file__), 'testing-certs',
- 'testing.key')
- ssl_context = SSL.Context(SSL.SSLv23_METHOD)
- ssl_context.use_privatekey_file(key_file)
- ssl_context.use_certificate_chain_file(cert_file)
- srv = httpserver.WSGIServerBase(application, host_port,
- httpserver.WSGIHandler,
- ssl_context=ssl_context
- )
-
- def shutdown_request(req):
- req.shutdown()
- srv.close_request(req)
-
- srv.shutdown_request = shutdown_request
- application.base_url = "https://localhost:%s" % srv.server_address[1]
- return srv
- return make_server, "shutdown", "https"
-
-
-def oauth_https_sync_target(test, host, path):
- _, port = test.server.server_address
- st = http_target.HTTPSyncTarget('https://%s:%d/~/%s' % (host, port, path))
- st.set_oauth_credentials(tests.consumer1.key, tests.consumer1.secret,
- tests.token1.key, tests.token1.secret)
- return st
-
-
-class TestHttpSyncTargetHttpsSupport(tests.TestCaseWithServer):
-
- scenarios = [
- ('oauth_https', {'server_def': https_server_def,
- 'make_app_with_state': make_oauth_http_app,
- 'make_document_for_test':
- tests.make_document_for_test,
- 'sync_target': oauth_https_sync_target
- }),
- ]
-
- def setUp(self):
- try:
- import OpenSSL # noqa
- except ImportError:
- self.skipTest("Requires pyOpenSSL")
- self.cacert_pem = os.path.join(os.path.dirname(__file__),
- 'testing-certs', 'cacert.pem')
- # The default u1db http_client class for doing HTTPS only does HTTPS
- # if the platform is linux. Because of this, soledad replaces that
- # class with one that will do HTTPS independent of the platform. In
- # order to maintain the compatibility with u1db default tests, we undo
- # that replacement here.
- http_client._VerifiedHTTPSConnection = \
- soledad.old__VerifiedHTTPSConnection
- super(TestHttpSyncTargetHttpsSupport, self).setUp()
-
- def getSyncTarget(self, host, path=None):
- if self.server is None:
- self.startServer()
- return self.sync_target(self, host, path)
-
- def test_working(self):
- self.startServer()
- db = self.request_state._create_database('test')
- self.patch(http_client, 'CA_CERTS', self.cacert_pem)
- remote_target = self.getSyncTarget('localhost', 'test')
- remote_target.record_sync_info('other-id', 2, 'T-id')
- self.assertEqual(
- (2, 'T-id'), db._get_replica_gen_and_trans_id('other-id'))
-
- def test_cannot_verify_cert(self):
- if not sys.platform.startswith('linux'):
- self.skipTest(
- "XXX certificate verification happens on linux only for now")
- self.startServer()
- # don't print expected traceback server-side
- self.server.handle_error = lambda req, cli_addr: None
- self.request_state._create_database('test')
- remote_target = self.getSyncTarget('localhost', 'test')
- try:
- remote_target.record_sync_info('other-id', 2, 'T-id')
- except ssl.SSLError, e:
- self.assertIn("certificate verify failed", str(e))
- else:
- self.fail("certificate verification should have failed.")
-
- def test_host_mismatch(self):
- if not sys.platform.startswith('linux'):
- self.skipTest(
- "XXX certificate verification happens on linux only for now")
- self.startServer()
- self.request_state._create_database('test')
- self.patch(http_client, 'CA_CERTS', self.cacert_pem)
- remote_target = self.getSyncTarget('127.0.0.1', 'test')
- self.assertRaises(
- http_client.CertificateError, remote_target.record_sync_info,
- 'other-id', 2, 'T-id')
-
-
-load_tests = tests.load_with_scenarios
diff --git a/src/leap/soledad/tests/u1db_tests/test_open.py b/src/leap/soledad/tests/u1db_tests/test_open.py
deleted file mode 100644
index 0ff307e8..00000000
--- a/src/leap/soledad/tests/u1db_tests/test_open.py
+++ /dev/null
@@ -1,69 +0,0 @@
-# Copyright 2011 Canonical Ltd.
-#
-# This file is part of u1db.
-#
-# u1db is free software: you can redistribute it and/or modify
-# it under the terms of the GNU Lesser General Public License version 3
-# as published by the Free Software Foundation.
-#
-# u1db is distributed in the hope that it will be useful,
-# but WITHOUT ANY WARRANTY; without even the implied warranty of
-# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
-# GNU Lesser General Public License for more details.
-#
-# You should have received a copy of the GNU Lesser General Public License
-# along with u1db. If not, see <http://www.gnu.org/licenses/>.
-
-"""Test u1db.open"""
-
-import os
-
-from u1db import (
- errors,
- open as u1db_open,
-)
-from leap.soledad.tests import u1db_tests as tests
-from u1db.backends import sqlite_backend
-from leap.soledad.tests.u1db_tests.test_backends import TestAlternativeDocument
-
-
-class TestU1DBOpen(tests.TestCase):
-
- def setUp(self):
- super(TestU1DBOpen, self).setUp()
- tmpdir = self.createTempDir()
- self.db_path = tmpdir + '/test.db'
-
- def test_open_no_create(self):
- self.assertRaises(errors.DatabaseDoesNotExist,
- u1db_open, self.db_path, create=False)
- self.assertFalse(os.path.exists(self.db_path))
-
- def test_open_create(self):
- db = u1db_open(self.db_path, create=True)
- self.addCleanup(db.close)
- self.assertTrue(os.path.exists(self.db_path))
- self.assertIsInstance(db, sqlite_backend.SQLiteDatabase)
-
- def test_open_with_factory(self):
- db = u1db_open(self.db_path, create=True,
- document_factory=TestAlternativeDocument)
- self.addCleanup(db.close)
- self.assertEqual(TestAlternativeDocument, db._factory)
-
- def test_open_existing(self):
- db = sqlite_backend.SQLitePartialExpandDatabase(self.db_path)
- self.addCleanup(db.close)
- doc = db.create_doc_from_json(tests.simple_doc)
- # Even though create=True, we shouldn't wipe the db
- db2 = u1db_open(self.db_path, create=True)
- self.addCleanup(db2.close)
- doc2 = db2.get_doc(doc.doc_id)
- self.assertEqual(doc, doc2)
-
- def test_open_existing_no_create(self):
- db = sqlite_backend.SQLitePartialExpandDatabase(self.db_path)
- self.addCleanup(db.close)
- db2 = u1db_open(self.db_path, create=False)
- self.addCleanup(db2.close)
- self.assertIsInstance(db2, sqlite_backend.SQLitePartialExpandDatabase)
diff --git a/src/leap/soledad/tests/u1db_tests/test_remote_sync_target.py b/src/leap/soledad/tests/u1db_tests/test_remote_sync_target.py
deleted file mode 100644
index 66d404d2..00000000
--- a/src/leap/soledad/tests/u1db_tests/test_remote_sync_target.py
+++ /dev/null
@@ -1,317 +0,0 @@
-# Copyright 2011-2012 Canonical Ltd.
-#
-# This file is part of u1db.
-#
-# u1db is free software: you can redistribute it and/or modify
-# it under the terms of the GNU Lesser General Public License version 3
-# as published by the Free Software Foundation.
-#
-# u1db is distributed in the hope that it will be useful,
-# but WITHOUT ANY WARRANTY; without even the implied warranty of
-# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
-# GNU Lesser General Public License for more details.
-#
-# You should have received a copy of the GNU Lesser General Public License
-# along with u1db. If not, see <http://www.gnu.org/licenses/>.
-
-"""Tests for the remote sync targets"""
-
-import cStringIO
-
-from u1db import (
- errors,
-)
-
-from leap.soledad.tests import u1db_tests as tests
-
-from u1db.remote import (
- http_app,
- http_target,
- oauth_middleware,
-)
-
-
-class TestHTTPSyncTargetBasics(tests.TestCase):
-
- def test_parse_url(self):
- remote_target = http_target.HTTPSyncTarget('http://127.0.0.1:12345/')
- self.assertEqual('http', remote_target._url.scheme)
- self.assertEqual('127.0.0.1', remote_target._url.hostname)
- self.assertEqual(12345, remote_target._url.port)
- self.assertEqual('/', remote_target._url.path)
-
-
-class TestParsingSyncStream(tests.TestCase):
-
- def test_wrong_start(self):
- tgt = http_target.HTTPSyncTarget("http://foo/foo")
-
- self.assertRaises(errors.BrokenSyncStream,
- tgt._parse_sync_stream, "{}\r\n]", None)
-
- self.assertRaises(errors.BrokenSyncStream,
- tgt._parse_sync_stream, "\r\n{}\r\n]", None)
-
- self.assertRaises(errors.BrokenSyncStream,
- tgt._parse_sync_stream, "", None)
-
- def test_wrong_end(self):
- tgt = http_target.HTTPSyncTarget("http://foo/foo")
-
- self.assertRaises(errors.BrokenSyncStream,
- tgt._parse_sync_stream, "[\r\n{}", None)
-
- self.assertRaises(errors.BrokenSyncStream,
- tgt._parse_sync_stream, "[\r\n", None)
-
- def test_missing_comma(self):
- tgt = http_target.HTTPSyncTarget("http://foo/foo")
-
- self.assertRaises(errors.BrokenSyncStream,
- tgt._parse_sync_stream,
- '[\r\n{}\r\n{"id": "i", "rev": "r", '
- '"content": "c", "gen": 3}\r\n]', None)
-
- def test_no_entries(self):
- tgt = http_target.HTTPSyncTarget("http://foo/foo")
-
- self.assertRaises(errors.BrokenSyncStream,
- tgt._parse_sync_stream, "[\r\n]", None)
-
- def test_extra_comma(self):
- tgt = http_target.HTTPSyncTarget("http://foo/foo")
-
- self.assertRaises(errors.BrokenSyncStream,
- tgt._parse_sync_stream, "[\r\n{},\r\n]", None)
-
- self.assertRaises(errors.BrokenSyncStream,
- tgt._parse_sync_stream,
- '[\r\n{},\r\n{"id": "i", "rev": "r", '
- '"content": "{}", "gen": 3, "trans_id": "T-sid"}'
- ',\r\n]',
- lambda doc, gen, trans_id: None)
-
- def test_error_in_stream(self):
- tgt = http_target.HTTPSyncTarget("http://foo/foo")
-
- self.assertRaises(errors.Unavailable,
- tgt._parse_sync_stream,
- '[\r\n{"new_generation": 0},'
- '\r\n{"error": "unavailable"}\r\n', None)
-
- self.assertRaises(errors.Unavailable,
- tgt._parse_sync_stream,
- '[\r\n{"error": "unavailable"}\r\n', None)
-
- self.assertRaises(errors.BrokenSyncStream,
- tgt._parse_sync_stream,
- '[\r\n{"error": "?"}\r\n', None)
-
-
-def make_http_app(state):
- return http_app.HTTPApp(state)
-
-
-def http_sync_target(test, path):
- return http_target.HTTPSyncTarget(test.getURL(path))
-
-
-def make_oauth_http_app(state):
- app = http_app.HTTPApp(state)
- application = oauth_middleware.OAuthMiddleware(app, None, prefix='/~/')
- application.get_oauth_data_store = lambda: tests.testingOAuthStore
- return application
-
-
-def oauth_http_sync_target(test, path):
- st = http_sync_target(test, '~/' + path)
- st.set_oauth_credentials(tests.consumer1.key, tests.consumer1.secret,
- tests.token1.key, tests.token1.secret)
- return st
-
-
-class TestRemoteSyncTargets(tests.TestCaseWithServer):
-
- scenarios = [
- ('http', {'make_app_with_state': make_http_app,
- 'make_document_for_test': tests.make_document_for_test,
- 'sync_target': http_sync_target}),
- ('oauth_http', {'make_app_with_state': make_oauth_http_app,
- 'make_document_for_test': tests.make_document_for_test,
- 'sync_target': oauth_http_sync_target}),
- ]
-
- def getSyncTarget(self, path=None):
- if self.server is None:
- self.startServer()
- return self.sync_target(self, path)
-
- def test_get_sync_info(self):
- self.startServer()
- db = self.request_state._create_database('test')
- db._set_replica_gen_and_trans_id('other-id', 1, 'T-transid')
- remote_target = self.getSyncTarget('test')
- self.assertEqual(('test', 0, '', 1, 'T-transid'),
- remote_target.get_sync_info('other-id'))
-
- def test_record_sync_info(self):
- self.startServer()
- db = self.request_state._create_database('test')
- remote_target = self.getSyncTarget('test')
- remote_target.record_sync_info('other-id', 2, 'T-transid')
- self.assertEqual(
- (2, 'T-transid'), db._get_replica_gen_and_trans_id('other-id'))
-
- def test_sync_exchange_send(self):
- self.startServer()
- db = self.request_state._create_database('test')
- remote_target = self.getSyncTarget('test')
- other_docs = []
-
- def receive_doc(doc):
- other_docs.append((doc.doc_id, doc.rev, doc.get_json()))
-
- doc = self.make_document('doc-here', 'replica:1', '{"value": "here"}')
- new_gen, trans_id = remote_target.sync_exchange(
- [(doc, 10, 'T-sid')], 'replica', last_known_generation=0,
- last_known_trans_id=None, return_doc_cb=receive_doc)
- self.assertEqual(1, new_gen)
- self.assertGetDoc(
- db, 'doc-here', 'replica:1', '{"value": "here"}', False)
-
- def test_sync_exchange_send_failure_and_retry_scenario(self):
- self.startServer()
-
- def blackhole_getstderr(inst):
- return cStringIO.StringIO()
-
- self.patch(self.server.RequestHandlerClass, 'get_stderr',
- blackhole_getstderr)
- db = self.request_state._create_database('test')
- _put_doc_if_newer = db._put_doc_if_newer
- trigger_ids = ['doc-here2']
-
- def bomb_put_doc_if_newer(doc, save_conflict,
- replica_uid=None, replica_gen=None,
- replica_trans_id=None):
- if doc.doc_id in trigger_ids:
- raise Exception
- return _put_doc_if_newer(doc, save_conflict=save_conflict,
- replica_uid=replica_uid,
- replica_gen=replica_gen,
- replica_trans_id=replica_trans_id)
- self.patch(db, '_put_doc_if_newer', bomb_put_doc_if_newer)
- remote_target = self.getSyncTarget('test')
- other_changes = []
-
- def receive_doc(doc, gen, trans_id):
- other_changes.append(
- (doc.doc_id, doc.rev, doc.get_json(), gen, trans_id))
-
- doc1 = self.make_document('doc-here', 'replica:1', '{"value": "here"}')
- doc2 = self.make_document('doc-here2', 'replica:1',
- '{"value": "here2"}')
- self.assertRaises(
- errors.HTTPError,
- remote_target.sync_exchange,
- [(doc1, 10, 'T-sid'), (doc2, 11, 'T-sud')],
- 'replica', last_known_generation=0, last_known_trans_id=None,
- return_doc_cb=receive_doc)
- self.assertGetDoc(db, 'doc-here', 'replica:1', '{"value": "here"}',
- False)
- self.assertEqual(
- (10, 'T-sid'), db._get_replica_gen_and_trans_id('replica'))
- self.assertEqual([], other_changes)
- # retry
- trigger_ids = []
- new_gen, trans_id = remote_target.sync_exchange(
- [(doc2, 11, 'T-sud')], 'replica', last_known_generation=0,
- last_known_trans_id=None, return_doc_cb=receive_doc)
- self.assertGetDoc(db, 'doc-here2', 'replica:1', '{"value": "here2"}',
- False)
- self.assertEqual(
- (11, 'T-sud'), db._get_replica_gen_and_trans_id('replica'))
- self.assertEqual(2, new_gen)
- # bounced back to us
- self.assertEqual(
- ('doc-here', 'replica:1', '{"value": "here"}', 1),
- other_changes[0][:-1])
-
- def test_sync_exchange_in_stream_error(self):
- self.startServer()
-
- def blackhole_getstderr(inst):
- return cStringIO.StringIO()
-
- self.patch(self.server.RequestHandlerClass, 'get_stderr',
- blackhole_getstderr)
- db = self.request_state._create_database('test')
- doc = db.create_doc_from_json('{"value": "there"}')
-
- def bomb_get_docs(doc_ids, check_for_conflicts=None,
- include_deleted=False):
- yield doc
- # delayed failure case
- raise errors.Unavailable
-
- self.patch(db, 'get_docs', bomb_get_docs)
- remote_target = self.getSyncTarget('test')
- other_changes = []
-
- def receive_doc(doc, gen, trans_id):
- other_changes.append(
- (doc.doc_id, doc.rev, doc.get_json(), gen, trans_id))
-
- self.assertRaises(
- errors.Unavailable, remote_target.sync_exchange, [], 'replica',
- last_known_generation=0, last_known_trans_id=None,
- return_doc_cb=receive_doc)
- self.assertEqual(
- (doc.doc_id, doc.rev, '{"value": "there"}', 1),
- other_changes[0][:-1])
-
- def test_sync_exchange_receive(self):
- self.startServer()
- db = self.request_state._create_database('test')
- doc = db.create_doc_from_json('{"value": "there"}')
- remote_target = self.getSyncTarget('test')
- other_changes = []
-
- def receive_doc(doc, gen, trans_id):
- other_changes.append(
- (doc.doc_id, doc.rev, doc.get_json(), gen, trans_id))
-
- new_gen, trans_id = remote_target.sync_exchange(
- [], 'replica', last_known_generation=0, last_known_trans_id=None,
- return_doc_cb=receive_doc)
- self.assertEqual(1, new_gen)
- self.assertEqual(
- (doc.doc_id, doc.rev, '{"value": "there"}', 1),
- other_changes[0][:-1])
-
- def test_sync_exchange_send_ensure_callback(self):
- self.startServer()
- remote_target = self.getSyncTarget('test')
- other_docs = []
- replica_uid_box = []
-
- def receive_doc(doc):
- other_docs.append((doc.doc_id, doc.rev, doc.get_json()))
-
- def ensure_cb(replica_uid):
- replica_uid_box.append(replica_uid)
-
- doc = self.make_document('doc-here', 'replica:1', '{"value": "here"}')
- new_gen, trans_id = remote_target.sync_exchange(
- [(doc, 10, 'T-sid')], 'replica', last_known_generation=0,
- last_known_trans_id=None, return_doc_cb=receive_doc,
- ensure_callback=ensure_cb)
- self.assertEqual(1, new_gen)
- db = self.request_state.open_database('test')
- self.assertEqual(1, len(replica_uid_box))
- self.assertEqual(db._replica_uid, replica_uid_box[0])
- self.assertGetDoc(
- db, 'doc-here', 'replica:1', '{"value": "here"}', False)
-
-
-load_tests = tests.load_with_scenarios
diff --git a/src/leap/soledad/tests/u1db_tests/test_sqlite_backend.py b/src/leap/soledad/tests/u1db_tests/test_sqlite_backend.py
deleted file mode 100644
index 1380e4b1..00000000
--- a/src/leap/soledad/tests/u1db_tests/test_sqlite_backend.py
+++ /dev/null
@@ -1,494 +0,0 @@
-# Copyright 2011 Canonical Ltd.
-#
-# This file is part of u1db.
-#
-# u1db is free software: you can redistribute it and/or modify
-# it under the terms of the GNU Lesser General Public License version 3
-# as published by the Free Software Foundation.
-#
-# u1db is distributed in the hope that it will be useful,
-# but WITHOUT ANY WARRANTY; without even the implied warranty of
-# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
-# GNU Lesser General Public License for more details.
-#
-# You should have received a copy of the GNU Lesser General Public License
-# along with u1db. If not, see <http://www.gnu.org/licenses/>.
-
-"""Test sqlite backend internals."""
-
-import os
-import time
-import threading
-
-from pysqlcipher import dbapi2
-
-from u1db import (
- errors,
- query_parser,
-)
-
-from leap.soledad.tests import u1db_tests as tests
-
-from u1db.backends import sqlite_backend
-from leap.soledad.tests.u1db_tests.test_backends import TestAlternativeDocument
-
-
-simple_doc = '{"key": "value"}'
-nested_doc = '{"key": "value", "sub": {"doc": "underneath"}}'
-
-
-class TestSQLiteDatabase(tests.TestCase):
-
- def test_atomic_initialize(self):
- tmpdir = self.createTempDir()
- dbname = os.path.join(tmpdir, 'atomic.db')
-
- t2 = None # will be a thread
-
- class SQLiteDatabaseTesting(sqlite_backend.SQLiteDatabase):
- _index_storage_value = "testing"
-
- def __init__(self, dbname, ntry):
- self._try = ntry
- self._is_initialized_invocations = 0
- super(SQLiteDatabaseTesting, self).__init__(dbname)
-
- def _is_initialized(self, c):
- res = super(SQLiteDatabaseTesting, self)._is_initialized(c)
- if self._try == 1:
- self._is_initialized_invocations += 1
- if self._is_initialized_invocations == 2:
- t2.start()
- # hard to do better and have a generic test
- time.sleep(0.05)
- return res
-
- outcome2 = []
-
- def second_try():
- try:
- db2 = SQLiteDatabaseTesting(dbname, 2)
- except Exception, e:
- outcome2.append(e)
- else:
- outcome2.append(db2)
-
- t2 = threading.Thread(target=second_try)
- db1 = SQLiteDatabaseTesting(dbname, 1)
- t2.join()
-
- self.assertIsInstance(outcome2[0], SQLiteDatabaseTesting)
- db2 = outcome2[0]
- self.assertTrue(db2._is_initialized(db1._get_sqlite_handle().cursor()))
-
-
-class TestSQLitePartialExpandDatabase(tests.TestCase):
-
- def setUp(self):
- super(TestSQLitePartialExpandDatabase, self).setUp()
- self.db = sqlite_backend.SQLitePartialExpandDatabase(':memory:')
- self.db._set_replica_uid('test')
-
- def test_create_database(self):
- raw_db = self.db._get_sqlite_handle()
- self.assertNotEqual(None, raw_db)
-
- def test_default_replica_uid(self):
- self.db = sqlite_backend.SQLitePartialExpandDatabase(':memory:')
- self.assertIsNot(None, self.db._replica_uid)
- self.assertEqual(32, len(self.db._replica_uid))
- int(self.db._replica_uid, 16)
-
- def test__close_sqlite_handle(self):
- raw_db = self.db._get_sqlite_handle()
- self.db._close_sqlite_handle()
- self.assertRaises(dbapi2.ProgrammingError,
- raw_db.cursor)
-
- def test_create_database_initializes_schema(self):
- raw_db = self.db._get_sqlite_handle()
- c = raw_db.cursor()
- c.execute("SELECT * FROM u1db_config")
- config = dict([(r[0], r[1]) for r in c.fetchall()])
- self.assertEqual({'sql_schema': '0', 'replica_uid': 'test',
- 'index_storage': 'expand referenced'}, config)
-
- # These tables must exist, though we don't care what is in them yet
- c.execute("SELECT * FROM transaction_log")
- c.execute("SELECT * FROM document")
- c.execute("SELECT * FROM document_fields")
- c.execute("SELECT * FROM sync_log")
- c.execute("SELECT * FROM conflicts")
- c.execute("SELECT * FROM index_definitions")
-
- def test__parse_index(self):
- self.db = sqlite_backend.SQLitePartialExpandDatabase(':memory:')
- g = self.db._parse_index_definition('fieldname')
- self.assertIsInstance(g, query_parser.ExtractField)
- self.assertEqual(['fieldname'], g.field)
-
- def test__update_indexes(self):
- self.db = sqlite_backend.SQLitePartialExpandDatabase(':memory:')
- g = self.db._parse_index_definition('fieldname')
- c = self.db._get_sqlite_handle().cursor()
- self.db._update_indexes('doc-id', {'fieldname': 'val'},
- [('fieldname', g)], c)
- c.execute('SELECT doc_id, field_name, value FROM document_fields')
- self.assertEqual([('doc-id', 'fieldname', 'val')],
- c.fetchall())
-
- def test__set_replica_uid(self):
- # Start from scratch, so that replica_uid isn't set.
- self.db = sqlite_backend.SQLitePartialExpandDatabase(':memory:')
- self.assertIsNot(None, self.db._real_replica_uid)
- self.assertIsNot(None, self.db._replica_uid)
- self.db._set_replica_uid('foo')
- c = self.db._get_sqlite_handle().cursor()
- c.execute("SELECT value FROM u1db_config WHERE name='replica_uid'")
- self.assertEqual(('foo',), c.fetchone())
- self.assertEqual('foo', self.db._real_replica_uid)
- self.assertEqual('foo', self.db._replica_uid)
- self.db._close_sqlite_handle()
- self.assertEqual('foo', self.db._replica_uid)
-
- def test__get_generation(self):
- self.assertEqual(0, self.db._get_generation())
-
- def test__get_generation_info(self):
- self.assertEqual((0, ''), self.db._get_generation_info())
-
- def test_create_index(self):
- self.db.create_index('test-idx', "key")
- self.assertEqual([('test-idx', ["key"])], self.db.list_indexes())
-
- def test_create_index_multiple_fields(self):
- self.db.create_index('test-idx', "key", "key2")
- self.assertEqual([('test-idx', ["key", "key2"])],
- self.db.list_indexes())
-
- def test__get_index_definition(self):
- self.db.create_index('test-idx', "key", "key2")
- # TODO: How would you test that an index is getting used for an SQL
- # request?
- self.assertEqual(["key", "key2"],
- self.db._get_index_definition('test-idx'))
-
- def test_list_index_mixed(self):
- # Make sure that we properly order the output
- c = self.db._get_sqlite_handle().cursor()
- # We intentionally insert the data in weird ordering, to make sure the
- # query still gets it back correctly.
- c.executemany("INSERT INTO index_definitions VALUES (?, ?, ?)",
- [('idx-1', 0, 'key10'),
- ('idx-2', 2, 'key22'),
- ('idx-1', 1, 'key11'),
- ('idx-2', 0, 'key20'),
- ('idx-2', 1, 'key21')])
- self.assertEqual([('idx-1', ['key10', 'key11']),
- ('idx-2', ['key20', 'key21', 'key22'])],
- self.db.list_indexes())
-
- def test_no_indexes_no_document_fields(self):
- self.db.create_doc_from_json(
- '{"key1": "val1", "key2": "val2"}')
- c = self.db._get_sqlite_handle().cursor()
- c.execute("SELECT doc_id, field_name, value FROM document_fields"
- " ORDER BY doc_id, field_name, value")
- self.assertEqual([], c.fetchall())
-
- def test_create_extracts_fields(self):
- doc1 = self.db.create_doc_from_json('{"key1": "val1", "key2": "val2"}')
- doc2 = self.db.create_doc_from_json('{"key1": "valx", "key2": "valy"}')
- c = self.db._get_sqlite_handle().cursor()
- c.execute("SELECT doc_id, field_name, value FROM document_fields"
- " ORDER BY doc_id, field_name, value")
- self.assertEqual([], c.fetchall())
- self.db.create_index('test', 'key1', 'key2')
- c.execute("SELECT doc_id, field_name, value FROM document_fields"
- " ORDER BY doc_id, field_name, value")
- self.assertEqual(sorted(
- [(doc1.doc_id, "key1", "val1"),
- (doc1.doc_id, "key2", "val2"),
- (doc2.doc_id, "key1", "valx"),
- (doc2.doc_id, "key2", "valy"), ]), sorted(c.fetchall()))
-
- def test_put_updates_fields(self):
- self.db.create_index('test', 'key1', 'key2')
- doc1 = self.db.create_doc_from_json(
- '{"key1": "val1", "key2": "val2"}')
- doc1.content = {"key1": "val1", "key2": "valy"}
- self.db.put_doc(doc1)
- c = self.db._get_sqlite_handle().cursor()
- c.execute("SELECT doc_id, field_name, value FROM document_fields"
- " ORDER BY doc_id, field_name, value")
- self.assertEqual([(doc1.doc_id, "key1", "val1"),
- (doc1.doc_id, "key2", "valy"), ], c.fetchall())
-
- def test_put_updates_nested_fields(self):
- self.db.create_index('test', 'key', 'sub.doc')
- doc1 = self.db.create_doc_from_json(nested_doc)
- c = self.db._get_sqlite_handle().cursor()
- c.execute("SELECT doc_id, field_name, value FROM document_fields"
- " ORDER BY doc_id, field_name, value")
- self.assertEqual([(doc1.doc_id, "key", "value"),
- (doc1.doc_id, "sub.doc", "underneath"), ],
- c.fetchall())
-
- def test__ensure_schema_rollback(self):
- temp_dir = self.createTempDir(prefix='u1db-test-')
- path = temp_dir + '/rollback.db'
-
- class SQLitePartialExpandDbTesting(
- sqlite_backend.SQLitePartialExpandDatabase):
-
- def _set_replica_uid_in_transaction(self, uid):
- super(SQLitePartialExpandDbTesting,
- self)._set_replica_uid_in_transaction(uid)
- if fail:
- raise Exception()
-
- db = SQLitePartialExpandDbTesting.__new__(SQLitePartialExpandDbTesting)
- db._db_handle = dbapi2.connect(path) # db is there but not yet init-ed
- fail = True
- self.assertRaises(Exception, db._ensure_schema)
- fail = False
- db._initialize(db._db_handle.cursor())
-
- def test__open_database(self):
- temp_dir = self.createTempDir(prefix='u1db-test-')
- path = temp_dir + '/test.sqlite'
- sqlite_backend.SQLitePartialExpandDatabase(path)
- db2 = sqlite_backend.SQLiteDatabase._open_database(path)
- self.assertIsInstance(db2, sqlite_backend.SQLitePartialExpandDatabase)
-
- def test__open_database_with_factory(self):
- temp_dir = self.createTempDir(prefix='u1db-test-')
- path = temp_dir + '/test.sqlite'
- sqlite_backend.SQLitePartialExpandDatabase(path)
- db2 = sqlite_backend.SQLiteDatabase._open_database(
- path, document_factory=TestAlternativeDocument)
- self.assertEqual(TestAlternativeDocument, db2._factory)
-
- def test__open_database_non_existent(self):
- temp_dir = self.createTempDir(prefix='u1db-test-')
- path = temp_dir + '/non-existent.sqlite'
- self.assertRaises(errors.DatabaseDoesNotExist,
- sqlite_backend.SQLiteDatabase._open_database, path)
-
- def test__open_database_during_init(self):
- temp_dir = self.createTempDir(prefix='u1db-test-')
- path = temp_dir + '/initialised.db'
- db = sqlite_backend.SQLitePartialExpandDatabase.__new__(
- sqlite_backend.SQLitePartialExpandDatabase)
- db._db_handle = dbapi2.connect(path) # db is there but not yet init-ed
- self.addCleanup(db.close)
- observed = []
-
- class SQLiteDatabaseTesting(sqlite_backend.SQLiteDatabase):
- WAIT_FOR_PARALLEL_INIT_HALF_INTERVAL = 0.1
-
- @classmethod
- def _which_index_storage(cls, c):
- res = super(SQLiteDatabaseTesting, cls)._which_index_storage(c)
- db._ensure_schema() # init db
- observed.append(res[0])
- return res
-
- db2 = SQLiteDatabaseTesting._open_database(path)
- self.addCleanup(db2.close)
- self.assertIsInstance(db2, sqlite_backend.SQLitePartialExpandDatabase)
- self.assertEqual(
- [None,
- sqlite_backend.SQLitePartialExpandDatabase._index_storage_value],
- observed)
-
- def test__open_database_invalid(self):
- class SQLiteDatabaseTesting(sqlite_backend.SQLiteDatabase):
- WAIT_FOR_PARALLEL_INIT_HALF_INTERVAL = 0.1
- temp_dir = self.createTempDir(prefix='u1db-test-')
- path1 = temp_dir + '/invalid1.db'
- with open(path1, 'wb') as f:
- f.write("")
- self.assertRaises(dbapi2.OperationalError,
- SQLiteDatabaseTesting._open_database, path1)
- with open(path1, 'wb') as f:
- f.write("invalid")
- self.assertRaises(dbapi2.DatabaseError,
- SQLiteDatabaseTesting._open_database, path1)
-
- def test_open_database_existing(self):
- temp_dir = self.createTempDir(prefix='u1db-test-')
- path = temp_dir + '/existing.sqlite'
- sqlite_backend.SQLitePartialExpandDatabase(path)
- db2 = sqlite_backend.SQLiteDatabase.open_database(path, create=False)
- self.assertIsInstance(db2, sqlite_backend.SQLitePartialExpandDatabase)
-
- def test_open_database_with_factory(self):
- temp_dir = self.createTempDir(prefix='u1db-test-')
- path = temp_dir + '/existing.sqlite'
- sqlite_backend.SQLitePartialExpandDatabase(path)
- db2 = sqlite_backend.SQLiteDatabase.open_database(
- path, create=False, document_factory=TestAlternativeDocument)
- self.assertEqual(TestAlternativeDocument, db2._factory)
-
- def test_open_database_create(self):
- temp_dir = self.createTempDir(prefix='u1db-test-')
- path = temp_dir + '/new.sqlite'
- sqlite_backend.SQLiteDatabase.open_database(path, create=True)
- db2 = sqlite_backend.SQLiteDatabase.open_database(path, create=False)
- self.assertIsInstance(db2, sqlite_backend.SQLitePartialExpandDatabase)
-
- def test_open_database_non_existent(self):
- temp_dir = self.createTempDir(prefix='u1db-test-')
- path = temp_dir + '/non-existent.sqlite'
- self.assertRaises(errors.DatabaseDoesNotExist,
- sqlite_backend.SQLiteDatabase.open_database, path,
- create=False)
-
- def test_delete_database_existent(self):
- temp_dir = self.createTempDir(prefix='u1db-test-')
- path = temp_dir + '/new.sqlite'
- db = sqlite_backend.SQLiteDatabase.open_database(path, create=True)
- db.close()
- sqlite_backend.SQLiteDatabase.delete_database(path)
- self.assertRaises(errors.DatabaseDoesNotExist,
- sqlite_backend.SQLiteDatabase.open_database, path,
- create=False)
-
- def test_delete_database_nonexistent(self):
- temp_dir = self.createTempDir(prefix='u1db-test-')
- path = temp_dir + '/non-existent.sqlite'
- self.assertRaises(errors.DatabaseDoesNotExist,
- sqlite_backend.SQLiteDatabase.delete_database, path)
-
- def test__get_indexed_fields(self):
- self.db.create_index('idx1', 'a', 'b')
- self.assertEqual(set(['a', 'b']), self.db._get_indexed_fields())
- self.db.create_index('idx2', 'b', 'c')
- self.assertEqual(set(['a', 'b', 'c']), self.db._get_indexed_fields())
-
- def test_indexed_fields_expanded(self):
- self.db.create_index('idx1', 'key1')
- doc1 = self.db.create_doc_from_json('{"key1": "val1", "key2": "val2"}')
- self.assertEqual(set(['key1']), self.db._get_indexed_fields())
- c = self.db._get_sqlite_handle().cursor()
- c.execute("SELECT doc_id, field_name, value FROM document_fields"
- " ORDER BY doc_id, field_name, value")
- self.assertEqual([(doc1.doc_id, 'key1', 'val1')], c.fetchall())
-
- def test_create_index_updates_fields(self):
- doc1 = self.db.create_doc_from_json('{"key1": "val1", "key2": "val2"}')
- self.db.create_index('idx1', 'key1')
- self.assertEqual(set(['key1']), self.db._get_indexed_fields())
- c = self.db._get_sqlite_handle().cursor()
- c.execute("SELECT doc_id, field_name, value FROM document_fields"
- " ORDER BY doc_id, field_name, value")
- self.assertEqual([(doc1.doc_id, 'key1', 'val1')], c.fetchall())
-
- def assertFormatQueryEquals(self, exp_statement, exp_args, definition,
- values):
- statement, args = self.db._format_query(definition, values)
- self.assertEqual(exp_statement, statement)
- self.assertEqual(exp_args, args)
-
- def test__format_query(self):
- self.assertFormatQueryEquals(
- "SELECT d.doc_id, d.doc_rev, d.content, count(c.doc_rev) FROM "
- "document d, document_fields d0 LEFT OUTER JOIN conflicts c ON "
- "c.doc_id = d.doc_id WHERE d.doc_id = d0.doc_id AND d0.field_name "
- "= ? AND d0.value = ? GROUP BY d.doc_id, d.doc_rev, d.content "
- "ORDER BY d0.value;", ["key1", "a"],
- ["key1"], ["a"])
-
- def test__format_query2(self):
- self.assertFormatQueryEquals(
- 'SELECT d.doc_id, d.doc_rev, d.content, count(c.doc_rev) FROM '
- 'document d, document_fields d0, document_fields d1, '
- 'document_fields d2 LEFT OUTER JOIN conflicts c ON c.doc_id = '
- 'd.doc_id WHERE d.doc_id = d0.doc_id AND d0.field_name = ? AND '
- 'd0.value = ? AND d.doc_id = d1.doc_id AND d1.field_name = ? AND '
- 'd1.value = ? AND d.doc_id = d2.doc_id AND d2.field_name = ? AND '
- 'd2.value = ? GROUP BY d.doc_id, d.doc_rev, d.content ORDER BY '
- 'd0.value, d1.value, d2.value;',
- ["key1", "a", "key2", "b", "key3", "c"],
- ["key1", "key2", "key3"], ["a", "b", "c"])
-
- def test__format_query_wildcard(self):
- self.assertFormatQueryEquals(
- 'SELECT d.doc_id, d.doc_rev, d.content, count(c.doc_rev) FROM '
- 'document d, document_fields d0, document_fields d1, '
- 'document_fields d2 LEFT OUTER JOIN conflicts c ON c.doc_id = '
- 'd.doc_id WHERE d.doc_id = d0.doc_id AND d0.field_name = ? AND '
- 'd0.value = ? AND d.doc_id = d1.doc_id AND d1.field_name = ? AND '
- 'd1.value GLOB ? AND d.doc_id = d2.doc_id AND d2.field_name = ? '
- 'AND d2.value NOT NULL GROUP BY d.doc_id, d.doc_rev, d.content '
- 'ORDER BY d0.value, d1.value, d2.value;',
- ["key1", "a", "key2", "b*", "key3"], ["key1", "key2", "key3"],
- ["a", "b*", "*"])
-
- def assertFormatRangeQueryEquals(self, exp_statement, exp_args, definition,
- start_value, end_value):
- statement, args = self.db._format_range_query(
- definition, start_value, end_value)
- self.assertEqual(exp_statement, statement)
- self.assertEqual(exp_args, args)
-
- def test__format_range_query(self):
- self.assertFormatRangeQueryEquals(
- 'SELECT d.doc_id, d.doc_rev, d.content, count(c.doc_rev) FROM '
- 'document d, document_fields d0, document_fields d1, '
- 'document_fields d2 LEFT OUTER JOIN conflicts c ON c.doc_id = '
- 'd.doc_id WHERE d.doc_id = d0.doc_id AND d0.field_name = ? AND '
- 'd0.value >= ? AND d.doc_id = d1.doc_id AND d1.field_name = ? AND '
- 'd1.value >= ? AND d.doc_id = d2.doc_id AND d2.field_name = ? AND '
- 'd2.value >= ? AND d.doc_id = d0.doc_id AND d0.field_name = ? AND '
- 'd0.value <= ? AND d.doc_id = d1.doc_id AND d1.field_name = ? AND '
- 'd1.value <= ? AND d.doc_id = d2.doc_id AND d2.field_name = ? AND '
- 'd2.value <= ? GROUP BY d.doc_id, d.doc_rev, d.content ORDER BY '
- 'd0.value, d1.value, d2.value;',
- ['key1', 'a', 'key2', 'b', 'key3', 'c', 'key1', 'p', 'key2', 'q',
- 'key3', 'r'],
- ["key1", "key2", "key3"], ["a", "b", "c"], ["p", "q", "r"])
-
- def test__format_range_query_no_start(self):
- self.assertFormatRangeQueryEquals(
- 'SELECT d.doc_id, d.doc_rev, d.content, count(c.doc_rev) FROM '
- 'document d, document_fields d0, document_fields d1, '
- 'document_fields d2 LEFT OUTER JOIN conflicts c ON c.doc_id = '
- 'd.doc_id WHERE d.doc_id = d0.doc_id AND d0.field_name = ? AND '
- 'd0.value <= ? AND d.doc_id = d1.doc_id AND d1.field_name = ? AND '
- 'd1.value <= ? AND d.doc_id = d2.doc_id AND d2.field_name = ? AND '
- 'd2.value <= ? GROUP BY d.doc_id, d.doc_rev, d.content ORDER BY '
- 'd0.value, d1.value, d2.value;',
- ['key1', 'a', 'key2', 'b', 'key3', 'c'],
- ["key1", "key2", "key3"], None, ["a", "b", "c"])
-
- def test__format_range_query_no_end(self):
- self.assertFormatRangeQueryEquals(
- 'SELECT d.doc_id, d.doc_rev, d.content, count(c.doc_rev) FROM '
- 'document d, document_fields d0, document_fields d1, '
- 'document_fields d2 LEFT OUTER JOIN conflicts c ON c.doc_id = '
- 'd.doc_id WHERE d.doc_id = d0.doc_id AND d0.field_name = ? AND '
- 'd0.value >= ? AND d.doc_id = d1.doc_id AND d1.field_name = ? AND '
- 'd1.value >= ? AND d.doc_id = d2.doc_id AND d2.field_name = ? AND '
- 'd2.value >= ? GROUP BY d.doc_id, d.doc_rev, d.content ORDER BY '
- 'd0.value, d1.value, d2.value;',
- ['key1', 'a', 'key2', 'b', 'key3', 'c'],
- ["key1", "key2", "key3"], ["a", "b", "c"], None)
-
- def test__format_range_query_wildcard(self):
- self.assertFormatRangeQueryEquals(
- 'SELECT d.doc_id, d.doc_rev, d.content, count(c.doc_rev) FROM '
- 'document d, document_fields d0, document_fields d1, '
- 'document_fields d2 LEFT OUTER JOIN conflicts c ON c.doc_id = '
- 'd.doc_id WHERE d.doc_id = d0.doc_id AND d0.field_name = ? AND '
- 'd0.value >= ? AND d.doc_id = d1.doc_id AND d1.field_name = ? AND '
- 'd1.value >= ? AND d.doc_id = d2.doc_id AND d2.field_name = ? AND '
- 'd2.value NOT NULL AND d.doc_id = d0.doc_id AND d0.field_name = ? '
- 'AND d0.value <= ? AND d.doc_id = d1.doc_id AND d1.field_name = ? '
- 'AND (d1.value < ? OR d1.value GLOB ?) AND d.doc_id = d2.doc_id '
- 'AND d2.field_name = ? AND d2.value NOT NULL GROUP BY d.doc_id, '
- 'd.doc_rev, d.content ORDER BY d0.value, d1.value, d2.value;',
- ['key1', 'a', 'key2', 'b', 'key3', 'key1', 'p', 'key2', 'q', 'q*',
- 'key3'],
- ["key1", "key2", "key3"], ["a", "b*", "*"], ["p", "q*", "*"])
diff --git a/src/leap/soledad/tests/u1db_tests/test_sync.py b/src/leap/soledad/tests/u1db_tests/test_sync.py
deleted file mode 100644
index 96aa2736..00000000
--- a/src/leap/soledad/tests/u1db_tests/test_sync.py
+++ /dev/null
@@ -1,1242 +0,0 @@
-# Copyright 2011-2012 Canonical Ltd.
-#
-# This file is part of u1db.
-#
-# u1db is free software: you can redistribute it and/or modify
-# it under the terms of the GNU Lesser General Public License version 3
-# as published by the Free Software Foundation.
-#
-# u1db is distributed in the hope that it will be useful,
-# but WITHOUT ANY WARRANTY; without even the implied warranty of
-# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
-# GNU Lesser General Public License for more details.
-#
-# You should have received a copy of the GNU Lesser General Public License
-# along with u1db. If not, see <http://www.gnu.org/licenses/>.
-
-"""The Synchronization class for U1DB."""
-
-import os
-from wsgiref import simple_server
-
-from u1db import (
- errors,
- sync,
- vectorclock,
- SyncTarget,
-)
-
-from leap.soledad.tests import u1db_tests as tests
-
-from u1db.backends import (
- inmemory,
-)
-from u1db.remote import (
- http_target,
-)
-
-from leap.soledad.tests.u1db_tests.test_remote_sync_target import (
- make_http_app,
- make_oauth_http_app,
-)
-
-simple_doc = tests.simple_doc
-nested_doc = tests.nested_doc
-
-
-def _make_local_db_and_target(test):
- db = test.create_database('test')
- st = db.get_sync_target()
- return db, st
-
-
-def _make_local_db_and_http_target(test, path='test'):
- test.startServer()
- db = test.request_state._create_database(os.path.basename(path))
- st = http_target.HTTPSyncTarget.connect(test.getURL(path))
- return db, st
-
-
-def _make_local_db_and_oauth_http_target(test):
- db, st = _make_local_db_and_http_target(test, '~/test')
- st.set_oauth_credentials(tests.consumer1.key, tests.consumer1.secret,
- tests.token1.key, tests.token1.secret)
- return db, st
-
-
-target_scenarios = [
- ('local', {'create_db_and_target': _make_local_db_and_target}),
- ('http', {'create_db_and_target': _make_local_db_and_http_target,
- 'make_app_with_state': make_http_app}),
- ('oauth_http', {'create_db_and_target':
- _make_local_db_and_oauth_http_target,
- 'make_app_with_state': make_oauth_http_app}),
-]
-
-
-class DatabaseSyncTargetTests(tests.DatabaseBaseTests,
- tests.TestCaseWithServer):
-
- scenarios = (tests.multiply_scenarios(tests.DatabaseBaseTests.scenarios,
- target_scenarios))
- #+ c_db_scenarios)
- # whitebox true means self.db is the actual local db object
- # against which the sync is performed
- whitebox = True
-
- def setUp(self):
- super(DatabaseSyncTargetTests, self).setUp()
- self.db, self.st = self.create_db_and_target(self)
- self.other_changes = []
-
- def tearDown(self):
- # We delete them explicitly, so that connections are cleanly closed
- del self.st
- self.db.close()
- del self.db
- super(DatabaseSyncTargetTests, self).tearDown()
-
- def receive_doc(self, doc, gen, trans_id):
- self.other_changes.append(
- (doc.doc_id, doc.rev, doc.get_json(), gen, trans_id))
-
- def set_trace_hook(self, callback, shallow=False):
- setter = (self.st._set_trace_hook if not shallow else
- self.st._set_trace_hook_shallow)
- try:
- setter(callback)
- except NotImplementedError:
- self.skipTest("%s does not implement _set_trace_hook"
- % (self.st.__class__.__name__,))
-
- def test_get_sync_target(self):
- self.assertIsNot(None, self.st)
-
- def test_get_sync_info(self):
- self.assertEqual(
- ('test', 0, '', 0, ''), self.st.get_sync_info('other'))
-
- def test_create_doc_updates_sync_info(self):
- self.assertEqual(
- ('test', 0, '', 0, ''), self.st.get_sync_info('other'))
- self.db.create_doc_from_json(simple_doc)
- self.assertEqual(1, self.st.get_sync_info('other')[1])
-
- def test_record_sync_info(self):
- self.st.record_sync_info('replica', 10, 'T-transid')
- self.assertEqual(
- ('test', 0, '', 10, 'T-transid'), self.st.get_sync_info('replica'))
-
- def test_sync_exchange(self):
- docs_by_gen = [
- (self.make_document('doc-id', 'replica:1', simple_doc), 10,
- 'T-sid')]
- new_gen, trans_id = self.st.sync_exchange(
- docs_by_gen, 'replica', last_known_generation=0,
- last_known_trans_id=None, return_doc_cb=self.receive_doc)
- self.assertGetDoc(self.db, 'doc-id', 'replica:1', simple_doc, False)
- self.assertTransactionLog(['doc-id'], self.db)
- last_trans_id = self.getLastTransId(self.db)
- self.assertEqual(([], 1, last_trans_id),
- (self.other_changes, new_gen, last_trans_id))
- self.assertEqual(10, self.st.get_sync_info('replica')[3])
-
- def test_sync_exchange_deleted(self):
- doc = self.db.create_doc_from_json('{}')
- edit_rev = 'replica:1|' + doc.rev
- docs_by_gen = [
- (self.make_document(doc.doc_id, edit_rev, None), 10, 'T-sid')]
- new_gen, trans_id = self.st.sync_exchange(
- docs_by_gen, 'replica', last_known_generation=0,
- last_known_trans_id=None, return_doc_cb=self.receive_doc)
- self.assertGetDocIncludeDeleted(
- self.db, doc.doc_id, edit_rev, None, False)
- self.assertTransactionLog([doc.doc_id, doc.doc_id], self.db)
- last_trans_id = self.getLastTransId(self.db)
- self.assertEqual(([], 2, last_trans_id),
- (self.other_changes, new_gen, trans_id))
- self.assertEqual(10, self.st.get_sync_info('replica')[3])
-
- def test_sync_exchange_push_many(self):
- docs_by_gen = [
- (self.make_document('doc-id', 'replica:1', simple_doc), 10, 'T-1'),
- (self.make_document('doc-id2', 'replica:1', nested_doc), 11,
- 'T-2')]
- new_gen, trans_id = self.st.sync_exchange(
- docs_by_gen, 'replica', last_known_generation=0,
- last_known_trans_id=None, return_doc_cb=self.receive_doc)
- self.assertGetDoc(self.db, 'doc-id', 'replica:1', simple_doc, False)
- self.assertGetDoc(self.db, 'doc-id2', 'replica:1', nested_doc, False)
- self.assertTransactionLog(['doc-id', 'doc-id2'], self.db)
- last_trans_id = self.getLastTransId(self.db)
- self.assertEqual(([], 2, last_trans_id),
- (self.other_changes, new_gen, trans_id))
- self.assertEqual(11, self.st.get_sync_info('replica')[3])
-
- def test_sync_exchange_refuses_conflicts(self):
- doc = self.db.create_doc_from_json(simple_doc)
- self.assertTransactionLog([doc.doc_id], self.db)
- new_doc = '{"key": "altval"}'
- docs_by_gen = [
- (self.make_document(doc.doc_id, 'replica:1', new_doc), 10,
- 'T-sid')]
- new_gen, _ = self.st.sync_exchange(
- docs_by_gen, 'replica', last_known_generation=0,
- last_known_trans_id=None, return_doc_cb=self.receive_doc)
- self.assertTransactionLog([doc.doc_id], self.db)
- self.assertEqual(
- (doc.doc_id, doc.rev, simple_doc, 1), self.other_changes[0][:-1])
- self.assertEqual(1, new_gen)
- if self.whitebox:
- self.assertEqual(self.db._last_exchange_log['return'],
- {'last_gen': 1, 'docs': [(doc.doc_id, doc.rev)]})
-
- def test_sync_exchange_ignores_convergence(self):
- doc = self.db.create_doc_from_json(simple_doc)
- self.assertTransactionLog([doc.doc_id], self.db)
- gen, txid = self.db._get_generation_info()
- docs_by_gen = [
- (self.make_document(doc.doc_id, doc.rev, simple_doc), 10, 'T-sid')]
- new_gen, _ = self.st.sync_exchange(
- docs_by_gen, 'replica', last_known_generation=gen,
- last_known_trans_id=txid, return_doc_cb=self.receive_doc)
- self.assertTransactionLog([doc.doc_id], self.db)
- self.assertEqual(([], 1), (self.other_changes, new_gen))
-
- def test_sync_exchange_returns_new_docs(self):
- doc = self.db.create_doc_from_json(simple_doc)
- self.assertTransactionLog([doc.doc_id], self.db)
- new_gen, _ = self.st.sync_exchange(
- [], 'other-replica', last_known_generation=0,
- last_known_trans_id=None, return_doc_cb=self.receive_doc)
- self.assertTransactionLog([doc.doc_id], self.db)
- self.assertEqual(
- (doc.doc_id, doc.rev, simple_doc, 1), self.other_changes[0][:-1])
- self.assertEqual(1, new_gen)
- if self.whitebox:
- self.assertEqual(self.db._last_exchange_log['return'],
- {'last_gen': 1, 'docs': [(doc.doc_id, doc.rev)]})
-
- def test_sync_exchange_returns_deleted_docs(self):
- doc = self.db.create_doc_from_json(simple_doc)
- self.db.delete_doc(doc)
- self.assertTransactionLog([doc.doc_id, doc.doc_id], self.db)
- new_gen, _ = self.st.sync_exchange(
- [], 'other-replica', last_known_generation=0,
- last_known_trans_id=None, return_doc_cb=self.receive_doc)
- self.assertTransactionLog([doc.doc_id, doc.doc_id], self.db)
- self.assertEqual(
- (doc.doc_id, doc.rev, None, 2), self.other_changes[0][:-1])
- self.assertEqual(2, new_gen)
- if self.whitebox:
- self.assertEqual(self.db._last_exchange_log['return'],
- {'last_gen': 2, 'docs': [(doc.doc_id, doc.rev)]})
-
- def test_sync_exchange_returns_many_new_docs(self):
- doc = self.db.create_doc_from_json(simple_doc)
- doc2 = self.db.create_doc_from_json(nested_doc)
- self.assertTransactionLog([doc.doc_id, doc2.doc_id], self.db)
- new_gen, _ = self.st.sync_exchange(
- [], 'other-replica', last_known_generation=0,
- last_known_trans_id=None, return_doc_cb=self.receive_doc)
- self.assertTransactionLog([doc.doc_id, doc2.doc_id], self.db)
- self.assertEqual(2, new_gen)
- self.assertEqual(
- [(doc.doc_id, doc.rev, simple_doc, 1),
- (doc2.doc_id, doc2.rev, nested_doc, 2)],
- [c[:-1] for c in self.other_changes])
- if self.whitebox:
- self.assertEqual(
- self.db._last_exchange_log['return'],
- {'last_gen': 2, 'docs':
- [(doc.doc_id, doc.rev), (doc2.doc_id, doc2.rev)]})
-
- def test_sync_exchange_getting_newer_docs(self):
- doc = self.db.create_doc_from_json(simple_doc)
- self.assertTransactionLog([doc.doc_id], self.db)
- new_doc = '{"key": "altval"}'
- docs_by_gen = [
- (self.make_document(doc.doc_id, 'test:1|z:2', new_doc), 10,
- 'T-sid')]
- new_gen, _ = self.st.sync_exchange(
- docs_by_gen, 'other-replica', last_known_generation=0,
- last_known_trans_id=None, return_doc_cb=self.receive_doc)
- self.assertTransactionLog([doc.doc_id, doc.doc_id], self.db)
- self.assertEqual(([], 2), (self.other_changes, new_gen))
-
- def test_sync_exchange_with_concurrent_updates_of_synced_doc(self):
- expected = []
-
- def before_whatschanged_cb(state):
- if state != 'before whats_changed':
- return
- cont = '{"key": "cuncurrent"}'
- conc_rev = self.db.put_doc(
- self.make_document(doc.doc_id, 'test:1|z:2', cont))
- expected.append((doc.doc_id, conc_rev, cont, 3))
-
- self.set_trace_hook(before_whatschanged_cb)
- doc = self.db.create_doc_from_json(simple_doc)
- self.assertTransactionLog([doc.doc_id], self.db)
- new_doc = '{"key": "altval"}'
- docs_by_gen = [
- (self.make_document(doc.doc_id, 'test:1|z:2', new_doc), 10,
- 'T-sid')]
- new_gen, _ = self.st.sync_exchange(
- docs_by_gen, 'other-replica', last_known_generation=0,
- last_known_trans_id=None, return_doc_cb=self.receive_doc)
- self.assertEqual(expected, [c[:-1] for c in self.other_changes])
- self.assertEqual(3, new_gen)
-
- def test_sync_exchange_with_concurrent_updates(self):
-
- def after_whatschanged_cb(state):
- if state != 'after whats_changed':
- return
- self.db.create_doc_from_json('{"new": "doc"}')
-
- self.set_trace_hook(after_whatschanged_cb)
- doc = self.db.create_doc_from_json(simple_doc)
- self.assertTransactionLog([doc.doc_id], self.db)
- new_doc = '{"key": "altval"}'
- docs_by_gen = [
- (self.make_document(doc.doc_id, 'test:1|z:2', new_doc), 10,
- 'T-sid')]
- new_gen, _ = self.st.sync_exchange(
- docs_by_gen, 'other-replica', last_known_generation=0,
- last_known_trans_id=None, return_doc_cb=self.receive_doc)
- self.assertEqual(([], 2), (self.other_changes, new_gen))
-
- def test_sync_exchange_converged_handling(self):
- doc = self.db.create_doc_from_json(simple_doc)
- docs_by_gen = [
- (self.make_document('new', 'other:1', '{}'), 4, 'T-foo'),
- (self.make_document(doc.doc_id, doc.rev, doc.get_json()), 5,
- 'T-bar')]
- new_gen, _ = self.st.sync_exchange(
- docs_by_gen, 'other-replica', last_known_generation=0,
- last_known_trans_id=None, return_doc_cb=self.receive_doc)
- self.assertEqual(([], 2), (self.other_changes, new_gen))
-
- def test_sync_exchange_detect_incomplete_exchange(self):
- def before_get_docs_explode(state):
- if state != 'before get_docs':
- return
- raise errors.U1DBError("fail")
- self.set_trace_hook(before_get_docs_explode)
- # suppress traceback printing in the wsgiref server
- self.patch(simple_server.ServerHandler,
- 'log_exception', lambda h, exc_info: None)
- doc = self.db.create_doc_from_json(simple_doc)
- self.assertTransactionLog([doc.doc_id], self.db)
- self.assertRaises(
- (errors.U1DBError, errors.BrokenSyncStream),
- self.st.sync_exchange, [], 'other-replica',
- last_known_generation=0, last_known_trans_id=None,
- return_doc_cb=self.receive_doc)
-
- def test_sync_exchange_doc_ids(self):
- sync_exchange_doc_ids = getattr(self.st, 'sync_exchange_doc_ids', None)
- if sync_exchange_doc_ids is None:
- self.skipTest("sync_exchange_doc_ids not implemented")
- db2 = self.create_database('test2')
- doc = db2.create_doc_from_json(simple_doc)
- new_gen, trans_id = sync_exchange_doc_ids(
- db2, [(doc.doc_id, 10, 'T-sid')], 0, None,
- return_doc_cb=self.receive_doc)
- self.assertGetDoc(self.db, doc.doc_id, doc.rev, simple_doc, False)
- self.assertTransactionLog([doc.doc_id], self.db)
- last_trans_id = self.getLastTransId(self.db)
- self.assertEqual(([], 1, last_trans_id),
- (self.other_changes, new_gen, trans_id))
- self.assertEqual(10, self.st.get_sync_info(db2._replica_uid)[3])
-
- def test__set_trace_hook(self):
- called = []
-
- def cb(state):
- called.append(state)
-
- self.set_trace_hook(cb)
- self.st.sync_exchange([], 'replica', 0, None, self.receive_doc)
- self.st.record_sync_info('replica', 0, 'T-sid')
- self.assertEqual(['before whats_changed',
- 'after whats_changed',
- 'before get_docs',
- 'record_sync_info',
- ],
- called)
-
- def test__set_trace_hook_shallow(self):
- if (self.st._set_trace_hook_shallow == self.st._set_trace_hook
- or
- self.st._set_trace_hook_shallow.im_func ==
- SyncTarget._set_trace_hook_shallow.im_func):
- # shallow same as full
- expected = ['before whats_changed',
- 'after whats_changed',
- 'before get_docs',
- 'record_sync_info',
- ]
- else:
- expected = ['sync_exchange', 'record_sync_info']
-
- called = []
-
- def cb(state):
- called.append(state)
-
- self.set_trace_hook(cb, shallow=True)
- self.st.sync_exchange([], 'replica', 0, None, self.receive_doc)
- self.st.record_sync_info('replica', 0, 'T-sid')
- self.assertEqual(expected, called)
-
-
-def sync_via_synchronizer(test, db_source, db_target, trace_hook=None,
- trace_hook_shallow=None):
- target = db_target.get_sync_target()
- trace_hook = trace_hook or trace_hook_shallow
- if trace_hook:
- target._set_trace_hook(trace_hook)
- return sync.Synchronizer(db_source, target).sync()
-
-
-sync_scenarios = []
-for name, scenario in tests.LOCAL_DATABASES_SCENARIOS:
- scenario = dict(scenario)
- scenario['do_sync'] = sync_via_synchronizer
- sync_scenarios.append((name, scenario))
- scenario = dict(scenario)
-
-
-def make_database_for_http_test(test, replica_uid):
- if test.server is None:
- test.startServer()
- db = test.request_state._create_database(replica_uid)
- try:
- http_at = test._http_at
- except AttributeError:
- http_at = test._http_at = {}
- http_at[db] = replica_uid
- return db
-
-
-def copy_database_for_http_test(test, db):
- # DO NOT COPY OR REUSE THIS CODE OUTSIDE TESTS: COPYING U1DB DATABASES IS
- # THE WRONG THING TO DO, THE ONLY REASON WE DO SO HERE IS TO TEST THAT WE
- # CORRECTLY DETECT IT HAPPENING SO THAT WE CAN RAISE ERRORS RATHER THAN
- # CORRUPT USER DATA. USE SYNC INSTEAD, OR WE WILL SEND NINJA TO YOUR HOUSE.
- if test.server is None:
- test.startServer()
- new_db = test.request_state._copy_database(db)
- try:
- http_at = test._http_at
- except AttributeError:
- http_at = test._http_at = {}
- path = db._replica_uid
- while path in http_at.values():
- path += 'copy'
- http_at[new_db] = path
- return new_db
-
-
-def sync_via_synchronizer_and_http(test, db_source, db_target,
- trace_hook=None, trace_hook_shallow=None):
- if trace_hook:
- test.skipTest("full trace hook unsupported over http")
- path = test._http_at[db_target]
- target = http_target.HTTPSyncTarget.connect(test.getURL(path))
- if trace_hook_shallow:
- target._set_trace_hook_shallow(trace_hook_shallow)
- return sync.Synchronizer(db_source, target).sync()
-
-
-sync_scenarios.append(('pyhttp', {
- 'make_database_for_test': make_database_for_http_test,
- 'copy_database_for_test': copy_database_for_http_test,
- 'make_document_for_test': tests.make_document_for_test,
- 'make_app_with_state': make_http_app,
- 'do_sync': sync_via_synchronizer_and_http
-}))
-
-
-class DatabaseSyncTests(tests.DatabaseBaseTests,
- tests.TestCaseWithServer):
-
- scenarios = sync_scenarios
- do_sync = None # set by scenarios
-
- def create_database(self, replica_uid, sync_role=None):
- if replica_uid == 'test' and sync_role is None:
- # created up the chain by base class but unused
- return None
- db = self.create_database_for_role(replica_uid, sync_role)
- if sync_role:
- self._use_tracking[db] = (replica_uid, sync_role)
- return db
-
- def create_database_for_role(self, replica_uid, sync_role):
- # hook point for reuse
- return super(DatabaseSyncTests, self).create_database(replica_uid)
-
- def copy_database(self, db, sync_role=None):
- # DO NOT COPY OR REUSE THIS CODE OUTSIDE TESTS: COPYING U1DB DATABASES
- # IS THE WRONG THING TO DO, THE ONLY REASON WE DO SO HERE IS TO TEST
- # THAT WE CORRECTLY DETECT IT HAPPENING SO THAT WE CAN RAISE ERRORS
- # RATHER THAN CORRUPT USER DATA. USE SYNC INSTEAD, OR WE WILL SEND
- # NINJA TO YOUR HOUSE.
- db_copy = super(DatabaseSyncTests, self).copy_database(db)
- name, orig_sync_role = self._use_tracking[db]
- self._use_tracking[db_copy] = (name + '(copy)', sync_role
- or orig_sync_role)
- return db_copy
-
- def sync(self, db_from, db_to, trace_hook=None,
- trace_hook_shallow=None):
- from_name, from_sync_role = self._use_tracking[db_from]
- to_name, to_sync_role = self._use_tracking[db_to]
- if from_sync_role not in ('source', 'both'):
- raise Exception("%s marked for %s use but used as source" %
- (from_name, from_sync_role))
- if to_sync_role not in ('target', 'both'):
- raise Exception("%s marked for %s use but used as target" %
- (to_name, to_sync_role))
- return self.do_sync(self, db_from, db_to, trace_hook,
- trace_hook_shallow)
-
- def setUp(self):
- self._use_tracking = {}
- super(DatabaseSyncTests, self).setUp()
-
- def assertLastExchangeLog(self, db, expected):
- log = getattr(db, '_last_exchange_log', None)
- if log is None:
- return
- self.assertEqual(expected, log)
-
- def test_sync_tracks_db_generation_of_other(self):
- self.db1 = self.create_database('test1', 'source')
- self.db2 = self.create_database('test2', 'target')
- self.assertEqual(0, self.sync(self.db1, self.db2))
- self.assertEqual(
- (0, ''), self.db1._get_replica_gen_and_trans_id('test2'))
- self.assertEqual(
- (0, ''), self.db2._get_replica_gen_and_trans_id('test1'))
- self.assertLastExchangeLog(self.db2,
- {'receive':
- {'docs': [], 'last_known_gen': 0},
- 'return':
- {'docs': [], 'last_gen': 0}})
-
- def test_sync_autoresolves(self):
- self.db1 = self.create_database('test1', 'source')
- self.db2 = self.create_database('test2', 'target')
- doc1 = self.db1.create_doc_from_json(simple_doc, doc_id='doc')
- rev1 = doc1.rev
- doc2 = self.db2.create_doc_from_json(simple_doc, doc_id='doc')
- rev2 = doc2.rev
- self.sync(self.db1, self.db2)
- doc = self.db1.get_doc('doc')
- self.assertFalse(doc.has_conflicts)
- self.assertEqual(doc.rev, self.db2.get_doc('doc').rev)
- v = vectorclock.VectorClockRev(doc.rev)
- self.assertTrue(v.is_newer(vectorclock.VectorClockRev(rev1)))
- self.assertTrue(v.is_newer(vectorclock.VectorClockRev(rev2)))
-
- def test_sync_autoresolves_moar(self):
- # here we test that when a database that has a conflicted document is
- # the source of a sync, and the target database has a revision of the
- # conflicted document that is newer than the source database's, and
- # that target's database's document's content is the same as the
- # source's document's conflict's, the source's document's conflict gets
- # autoresolved, and the source's document's revision bumped.
- #
- # idea is as follows:
- # A B
- # a1 -
- # `------->
- # a1 a1
- # v v
- # a2 a1b1
- # `------->
- # a1b1+a2 a1b1
- # v
- # a1b1+a2 a1b2 (a1b2 has same content as a2)
- # `------->
- # a3b2 a1b2 (autoresolved)
- # `------->
- # a3b2 a3b2
- self.db1 = self.create_database('test1', 'source')
- self.db2 = self.create_database('test2', 'target')
- self.db1.create_doc_from_json(simple_doc, doc_id='doc')
- self.sync(self.db1, self.db2)
- for db, content in [(self.db1, '{}'), (self.db2, '{"hi": 42}')]:
- doc = db.get_doc('doc')
- doc.set_json(content)
- db.put_doc(doc)
- self.sync(self.db1, self.db2)
- # db1 and db2 now both have a doc of {hi:42}, but db1 has a conflict
- doc = self.db1.get_doc('doc')
- rev1 = doc.rev
- self.assertTrue(doc.has_conflicts)
- # set db2 to have a doc of {} (same as db1 before the conflict)
- doc = self.db2.get_doc('doc')
- doc.set_json('{}')
- self.db2.put_doc(doc)
- rev2 = doc.rev
- # sync it across
- self.sync(self.db1, self.db2)
- # tadaa!
- doc = self.db1.get_doc('doc')
- self.assertFalse(doc.has_conflicts)
- vec1 = vectorclock.VectorClockRev(rev1)
- vec2 = vectorclock.VectorClockRev(rev2)
- vec3 = vectorclock.VectorClockRev(doc.rev)
- self.assertTrue(vec3.is_newer(vec1))
- self.assertTrue(vec3.is_newer(vec2))
- # because the conflict is on the source, sync it another time
- self.sync(self.db1, self.db2)
- # make sure db2 now has the exact same thing
- self.assertEqual(self.db1.get_doc('doc'), self.db2.get_doc('doc'))
-
- def test_sync_autoresolves_moar_backwards(self):
- # here we test that when a database that has a conflicted document is
- # the target of a sync, and the source database has a revision of the
- # conflicted document that is newer than the target database's, and
- # that source's database's document's content is the same as the
- # target's document's conflict's, the target's document's conflict gets
- # autoresolved, and the document's revision bumped.
- #
- # idea is as follows:
- # A B
- # a1 -
- # `------->
- # a1 a1
- # v v
- # a2 a1b1
- # `------->
- # a1b1+a2 a1b1
- # v
- # a1b1+a2 a1b2 (a1b2 has same content as a2)
- # <-------'
- # a3b2 a3b2 (autoresolved and propagated)
- self.db1 = self.create_database('test1', 'both')
- self.db2 = self.create_database('test2', 'both')
- self.db1.create_doc_from_json(simple_doc, doc_id='doc')
- self.sync(self.db1, self.db2)
- for db, content in [(self.db1, '{}'), (self.db2, '{"hi": 42}')]:
- doc = db.get_doc('doc')
- doc.set_json(content)
- db.put_doc(doc)
- self.sync(self.db1, self.db2)
- # db1 and db2 now both have a doc of {hi:42}, but db1 has a conflict
- doc = self.db1.get_doc('doc')
- rev1 = doc.rev
- self.assertTrue(doc.has_conflicts)
- revc = self.db1.get_doc_conflicts('doc')[-1].rev
- # set db2 to have a doc of {} (same as db1 before the conflict)
- doc = self.db2.get_doc('doc')
- doc.set_json('{}')
- self.db2.put_doc(doc)
- rev2 = doc.rev
- # sync it across
- self.sync(self.db2, self.db1)
- # tadaa!
- doc = self.db1.get_doc('doc')
- self.assertFalse(doc.has_conflicts)
- vec1 = vectorclock.VectorClockRev(rev1)
- vec2 = vectorclock.VectorClockRev(rev2)
- vec3 = vectorclock.VectorClockRev(doc.rev)
- vecc = vectorclock.VectorClockRev(revc)
- self.assertTrue(vec3.is_newer(vec1))
- self.assertTrue(vec3.is_newer(vec2))
- self.assertTrue(vec3.is_newer(vecc))
- # make sure db2 now has the exact same thing
- self.assertEqual(self.db1.get_doc('doc'), self.db2.get_doc('doc'))
-
- def test_sync_autoresolves_moar_backwards_three(self):
- # same as autoresolves_moar_backwards, but with three databases (note
- # all the syncs go in the same direction -- this is a more natural
- # scenario):
- #
- # A B C
- # a1 - -
- # `------->
- # a1 a1 -
- # `------->
- # a1 a1 a1
- # v v
- # a2 a1b1 a1
- # `------------------->
- # a2 a1b1 a2
- # `------->
- # a2+a1b1 a2
- # v
- # a2 a2+a1b1 a2c1 (same as a1b1)
- # `------------------->
- # a2c1 a2+a1b1 a2c1
- # `------->
- # a2b2c1 a2b2c1 a2c1
- self.db1 = self.create_database('test1', 'source')
- self.db2 = self.create_database('test2', 'both')
- self.db3 = self.create_database('test3', 'target')
- self.db1.create_doc_from_json(simple_doc, doc_id='doc')
- self.sync(self.db1, self.db2)
- self.sync(self.db2, self.db3)
- for db, content in [(self.db2, '{"hi": 42}'),
- (self.db1, '{}'),
- ]:
- doc = db.get_doc('doc')
- doc.set_json(content)
- db.put_doc(doc)
- self.sync(self.db1, self.db3)
- self.sync(self.db2, self.db3)
- # db2 and db3 now both have a doc of {}, but db2 has a
- # conflict
- doc = self.db2.get_doc('doc')
- self.assertTrue(doc.has_conflicts)
- revc = self.db2.get_doc_conflicts('doc')[-1].rev
- self.assertEqual('{}', doc.get_json())
- self.assertEqual(self.db3.get_doc('doc').get_json(), doc.get_json())
- self.assertEqual(self.db3.get_doc('doc').rev, doc.rev)
- # set db3 to have a doc of {hi:42} (same as db2 before the conflict)
- doc = self.db3.get_doc('doc')
- doc.set_json('{"hi": 42}')
- self.db3.put_doc(doc)
- rev3 = doc.rev
- # sync it across to db1
- self.sync(self.db1, self.db3)
- # db1 now has hi:42, with a rev that is newer than db2's doc
- doc = self.db1.get_doc('doc')
- rev1 = doc.rev
- self.assertFalse(doc.has_conflicts)
- self.assertEqual('{"hi": 42}', doc.get_json())
- VCR = vectorclock.VectorClockRev
- self.assertTrue(VCR(rev1).is_newer(VCR(self.db2.get_doc('doc').rev)))
- # so sync it to db2
- self.sync(self.db1, self.db2)
- # tadaa!
- doc = self.db2.get_doc('doc')
- self.assertFalse(doc.has_conflicts)
- # db2's revision of the document is strictly newer than db1's before
- # the sync, and db3's before that sync way back when
- self.assertTrue(VCR(doc.rev).is_newer(VCR(rev1)))
- self.assertTrue(VCR(doc.rev).is_newer(VCR(rev3)))
- self.assertTrue(VCR(doc.rev).is_newer(VCR(revc)))
- # make sure both dbs now have the exact same thing
- self.assertEqual(self.db1.get_doc('doc'), self.db2.get_doc('doc'))
-
- def test_sync_puts_changes(self):
- self.db1 = self.create_database('test1', 'source')
- self.db2 = self.create_database('test2', 'target')
- doc = self.db1.create_doc_from_json(simple_doc)
- self.assertEqual(1, self.sync(self.db1, self.db2))
- self.assertGetDoc(self.db2, doc.doc_id, doc.rev, simple_doc, False)
- self.assertEqual(1, self.db1._get_replica_gen_and_trans_id('test2')[0])
- self.assertEqual(1, self.db2._get_replica_gen_and_trans_id('test1')[0])
- self.assertLastExchangeLog(self.db2,
- {'receive':
- {'docs': [(doc.doc_id, doc.rev)],
- 'source_uid': 'test1',
- 'source_gen': 1,
- 'last_known_gen': 0},
- 'return': {'docs': [], 'last_gen': 1}})
-
- def test_sync_pulls_changes(self):
- self.db1 = self.create_database('test1', 'source')
- self.db2 = self.create_database('test2', 'target')
- doc = self.db2.create_doc_from_json(simple_doc)
- self.db1.create_index('test-idx', 'key')
- self.assertEqual(0, self.sync(self.db1, self.db2))
- self.assertGetDoc(self.db1, doc.doc_id, doc.rev, simple_doc, False)
- self.assertEqual(1, self.db1._get_replica_gen_and_trans_id('test2')[0])
- self.assertEqual(1, self.db2._get_replica_gen_and_trans_id('test1')[0])
- self.assertLastExchangeLog(self.db2,
- {'receive':
- {'docs': [], 'last_known_gen': 0},
- 'return':
- {'docs': [(doc.doc_id, doc.rev)],
- 'last_gen': 1}})
- self.assertEqual([doc], self.db1.get_from_index('test-idx', 'value'))
-
- def test_sync_pulling_doesnt_update_other_if_changed(self):
- self.db1 = self.create_database('test1', 'source')
- self.db2 = self.create_database('test2', 'target')
- doc = self.db2.create_doc_from_json(simple_doc)
- # After the local side has sent its list of docs, before we start
- # receiving the "targets" response, we update the local database with a
- # new record.
- # When we finish synchronizing, we can notice that something locally
- # was updated, and we cannot tell c2 our new updated generation
-
- def before_get_docs(state):
- if state != 'before get_docs':
- return
- self.db1.create_doc_from_json(simple_doc)
-
- self.assertEqual(0, self.sync(self.db1, self.db2,
- trace_hook=before_get_docs))
- self.assertLastExchangeLog(self.db2,
- {'receive':
- {'docs': [], 'last_known_gen': 0},
- 'return':
- {'docs': [(doc.doc_id, doc.rev)],
- 'last_gen': 1}})
- self.assertEqual(1, self.db1._get_replica_gen_and_trans_id('test2')[0])
- # c2 should not have gotten a '_record_sync_info' call, because the
- # local database had been updated more than just by the messages
- # returned from c2.
- self.assertEqual(
- (0, ''), self.db2._get_replica_gen_and_trans_id('test1'))
-
- def test_sync_doesnt_update_other_if_nothing_pulled(self):
- self.db1 = self.create_database('test1', 'source')
- self.db2 = self.create_database('test2', 'target')
- self.db1.create_doc_from_json(simple_doc)
-
- def no_record_sync_info(state):
- if state != 'record_sync_info':
- return
- self.fail('SyncTarget.record_sync_info was called')
- self.assertEqual(1, self.sync(self.db1, self.db2,
- trace_hook_shallow=no_record_sync_info))
- self.assertEqual(
- 1,
- self.db2._get_replica_gen_and_trans_id(self.db1._replica_uid)[0])
-
- def test_sync_ignores_convergence(self):
- self.db1 = self.create_database('test1', 'source')
- self.db2 = self.create_database('test2', 'both')
- doc = self.db1.create_doc_from_json(simple_doc)
- self.db3 = self.create_database('test3', 'target')
- self.assertEqual(1, self.sync(self.db1, self.db3))
- self.assertEqual(0, self.sync(self.db2, self.db3))
- self.assertEqual(1, self.sync(self.db1, self.db2))
- self.assertLastExchangeLog(self.db2,
- {'receive':
- {'docs': [(doc.doc_id, doc.rev)],
- 'source_uid': 'test1',
- 'source_gen': 1, 'last_known_gen': 0},
- 'return': {'docs': [], 'last_gen': 1}})
-
- def test_sync_ignores_superseded(self):
- self.db1 = self.create_database('test1', 'both')
- self.db2 = self.create_database('test2', 'both')
- doc = self.db1.create_doc_from_json(simple_doc)
- doc_rev1 = doc.rev
- self.db3 = self.create_database('test3', 'target')
- self.sync(self.db1, self.db3)
- self.sync(self.db2, self.db3)
- new_content = '{"key": "altval"}'
- doc.set_json(new_content)
- self.db1.put_doc(doc)
- doc_rev2 = doc.rev
- self.sync(self.db2, self.db1)
- self.assertLastExchangeLog(self.db1,
- {'receive':
- {'docs': [(doc.doc_id, doc_rev1)],
- 'source_uid': 'test2',
- 'source_gen': 1, 'last_known_gen': 0},
- 'return':
- {'docs': [(doc.doc_id, doc_rev2)],
- 'last_gen': 2}})
- self.assertGetDoc(self.db1, doc.doc_id, doc_rev2, new_content, False)
-
- def test_sync_sees_remote_conflicted(self):
- self.db1 = self.create_database('test1', 'source')
- self.db2 = self.create_database('test2', 'target')
- doc1 = self.db1.create_doc_from_json(simple_doc)
- doc_id = doc1.doc_id
- doc1_rev = doc1.rev
- self.db1.create_index('test-idx', 'key')
- new_doc = '{"key": "altval"}'
- doc2 = self.db2.create_doc_from_json(new_doc, doc_id=doc_id)
- doc2_rev = doc2.rev
- self.assertTransactionLog([doc1.doc_id], self.db1)
- self.sync(self.db1, self.db2)
- self.assertLastExchangeLog(self.db2,
- {'receive':
- {'docs': [(doc_id, doc1_rev)],
- 'source_uid': 'test1',
- 'source_gen': 1, 'last_known_gen': 0},
- 'return':
- {'docs': [(doc_id, doc2_rev)],
- 'last_gen': 1}})
- self.assertTransactionLog([doc_id, doc_id], self.db1)
- self.assertGetDoc(self.db1, doc_id, doc2_rev, new_doc, True)
- self.assertGetDoc(self.db2, doc_id, doc2_rev, new_doc, False)
- from_idx = self.db1.get_from_index('test-idx', 'altval')[0]
- self.assertEqual(doc2.doc_id, from_idx.doc_id)
- self.assertEqual(doc2.rev, from_idx.rev)
- self.assertTrue(from_idx.has_conflicts)
- self.assertEqual([], self.db1.get_from_index('test-idx', 'value'))
-
- def test_sync_sees_remote_delete_conflicted(self):
- self.db1 = self.create_database('test1', 'source')
- self.db2 = self.create_database('test2', 'target')
- doc1 = self.db1.create_doc_from_json(simple_doc)
- doc_id = doc1.doc_id
- self.db1.create_index('test-idx', 'key')
- self.sync(self.db1, self.db2)
- doc2 = self.make_document(doc1.doc_id, doc1.rev, doc1.get_json())
- new_doc = '{"key": "altval"}'
- doc1.set_json(new_doc)
- self.db1.put_doc(doc1)
- self.db2.delete_doc(doc2)
- self.assertTransactionLog([doc_id, doc_id], self.db1)
- self.sync(self.db1, self.db2)
- self.assertLastExchangeLog(self.db2,
- {'receive':
- {'docs': [(doc_id, doc1.rev)],
- 'source_uid': 'test1',
- 'source_gen': 2, 'last_known_gen': 1},
- 'return': {'docs': [(doc_id, doc2.rev)],
- 'last_gen': 2}})
- self.assertTransactionLog([doc_id, doc_id, doc_id], self.db1)
- self.assertGetDocIncludeDeleted(self.db1, doc_id, doc2.rev, None, True)
- self.assertGetDocIncludeDeleted(
- self.db2, doc_id, doc2.rev, None, False)
- self.assertEqual([], self.db1.get_from_index('test-idx', 'value'))
-
- def test_sync_local_race_conflicted(self):
- self.db1 = self.create_database('test1', 'source')
- self.db2 = self.create_database('test2', 'target')
- doc = self.db1.create_doc_from_json(simple_doc)
- doc_id = doc.doc_id
- doc1_rev = doc.rev
- self.db1.create_index('test-idx', 'key')
- self.sync(self.db1, self.db2)
- content1 = '{"key": "localval"}'
- content2 = '{"key": "altval"}'
- doc.set_json(content2)
- self.db2.put_doc(doc)
- doc2_rev2 = doc.rev
- triggered = []
-
- def after_whatschanged(state):
- if state != 'after whats_changed':
- return
- triggered.append(True)
- doc = self.make_document(doc_id, doc1_rev, content1)
- self.db1.put_doc(doc)
-
- self.sync(self.db1, self.db2, trace_hook=after_whatschanged)
- self.assertEqual([True], triggered)
- self.assertGetDoc(self.db1, doc_id, doc2_rev2, content2, True)
- from_idx = self.db1.get_from_index('test-idx', 'altval')[0]
- self.assertEqual(doc.doc_id, from_idx.doc_id)
- self.assertEqual(doc.rev, from_idx.rev)
- self.assertTrue(from_idx.has_conflicts)
- self.assertEqual([], self.db1.get_from_index('test-idx', 'value'))
- self.assertEqual([], self.db1.get_from_index('test-idx', 'localval'))
-
- def test_sync_propagates_deletes(self):
- self.db1 = self.create_database('test1', 'source')
- self.db2 = self.create_database('test2', 'both')
- doc1 = self.db1.create_doc_from_json(simple_doc)
- doc_id = doc1.doc_id
- self.db1.create_index('test-idx', 'key')
- self.sync(self.db1, self.db2)
- self.db2.create_index('test-idx', 'key')
- self.db3 = self.create_database('test3', 'target')
- self.sync(self.db1, self.db3)
- self.db1.delete_doc(doc1)
- deleted_rev = doc1.rev
- self.sync(self.db1, self.db2)
- self.assertLastExchangeLog(self.db2,
- {'receive':
- {'docs': [(doc_id, deleted_rev)],
- 'source_uid': 'test1',
- 'source_gen': 2, 'last_known_gen': 1},
- 'return': {'docs': [], 'last_gen': 2}})
- self.assertGetDocIncludeDeleted(
- self.db1, doc_id, deleted_rev, None, False)
- self.assertGetDocIncludeDeleted(
- self.db2, doc_id, deleted_rev, None, False)
- self.assertEqual([], self.db1.get_from_index('test-idx', 'value'))
- self.assertEqual([], self.db2.get_from_index('test-idx', 'value'))
- self.sync(self.db2, self.db3)
- self.assertLastExchangeLog(self.db3,
- {'receive':
- {'docs': [(doc_id, deleted_rev)],
- 'source_uid': 'test2',
- 'source_gen': 2,
- 'last_known_gen': 0},
- 'return':
- {'docs': [], 'last_gen': 2}})
- self.assertGetDocIncludeDeleted(
- self.db3, doc_id, deleted_rev, None, False)
-
- def test_sync_propagates_resolution(self):
- self.db1 = self.create_database('test1', 'both')
- self.db2 = self.create_database('test2', 'both')
- doc1 = self.db1.create_doc_from_json('{"a": 1}', doc_id='the-doc')
- db3 = self.create_database('test3', 'both')
- self.sync(self.db2, self.db1)
- self.assertEqual(
- self.db1._get_generation_info(),
- self.db2._get_replica_gen_and_trans_id(self.db1._replica_uid))
- self.assertEqual(
- self.db2._get_generation_info(),
- self.db1._get_replica_gen_and_trans_id(self.db2._replica_uid))
- self.sync(db3, self.db1)
- # update on 2
- doc2 = self.make_document('the-doc', doc1.rev, '{"a": 2}')
- self.db2.put_doc(doc2)
- self.sync(self.db2, db3)
- self.assertEqual(db3.get_doc('the-doc').rev, doc2.rev)
- # update on 1
- doc1.set_json('{"a": 3}')
- self.db1.put_doc(doc1)
- # conflicts
- self.sync(self.db2, self.db1)
- self.sync(db3, self.db1)
- self.assertTrue(self.db2.get_doc('the-doc').has_conflicts)
- self.assertTrue(db3.get_doc('the-doc').has_conflicts)
- # resolve
- conflicts = self.db2.get_doc_conflicts('the-doc')
- doc4 = self.make_document('the-doc', None, '{"a": 4}')
- revs = [doc.rev for doc in conflicts]
- self.db2.resolve_doc(doc4, revs)
- doc2 = self.db2.get_doc('the-doc')
- self.assertEqual(doc4.get_json(), doc2.get_json())
- self.assertFalse(doc2.has_conflicts)
- self.sync(self.db2, db3)
- doc3 = db3.get_doc('the-doc')
- self.assertEqual(doc4.get_json(), doc3.get_json())
- self.assertFalse(doc3.has_conflicts)
-
- def test_sync_supersedes_conflicts(self):
- self.db1 = self.create_database('test1', 'both')
- self.db2 = self.create_database('test2', 'target')
- db3 = self.create_database('test3', 'both')
- doc1 = self.db1.create_doc_from_json('{"a": 1}', doc_id='the-doc')
- self.db2.create_doc_from_json('{"b": 1}', doc_id='the-doc')
- db3.create_doc_from_json('{"c": 1}', doc_id='the-doc')
- self.sync(db3, self.db1)
- self.assertEqual(
- self.db1._get_generation_info(),
- db3._get_replica_gen_and_trans_id(self.db1._replica_uid))
- self.assertEqual(
- db3._get_generation_info(),
- self.db1._get_replica_gen_and_trans_id(db3._replica_uid))
- self.sync(db3, self.db2)
- self.assertEqual(
- self.db2._get_generation_info(),
- db3._get_replica_gen_and_trans_id(self.db2._replica_uid))
- self.assertEqual(
- db3._get_generation_info(),
- self.db2._get_replica_gen_and_trans_id(db3._replica_uid))
- self.assertEqual(3, len(db3.get_doc_conflicts('the-doc')))
- doc1.set_json('{"a": 2}')
- self.db1.put_doc(doc1)
- self.sync(db3, self.db1)
- # original doc1 should have been removed from conflicts
- self.assertEqual(3, len(db3.get_doc_conflicts('the-doc')))
-
- def test_sync_stops_after_get_sync_info(self):
- self.db1 = self.create_database('test1', 'source')
- self.db2 = self.create_database('test2', 'target')
- self.db1.create_doc_from_json(tests.simple_doc)
- self.sync(self.db1, self.db2)
-
- def put_hook(state):
- self.fail("Tracehook triggered for %s" % (state,))
-
- self.sync(self.db1, self.db2, trace_hook_shallow=put_hook)
-
- def test_sync_detects_rollback_in_source(self):
- self.db1 = self.create_database('test1', 'source')
- self.db2 = self.create_database('test2', 'target')
- self.db1.create_doc_from_json(tests.simple_doc, doc_id='doc1')
- self.sync(self.db1, self.db2)
- db1_copy = self.copy_database(self.db1)
- self.db1.create_doc_from_json(tests.simple_doc, doc_id='doc2')
- self.sync(self.db1, self.db2)
- self.assertRaises(
- errors.InvalidGeneration, self.sync, db1_copy, self.db2)
-
- def test_sync_detects_rollback_in_target(self):
- self.db1 = self.create_database('test1', 'source')
- self.db2 = self.create_database('test2', 'target')
- self.db1.create_doc_from_json(tests.simple_doc, doc_id="divergent")
- self.sync(self.db1, self.db2)
- db2_copy = self.copy_database(self.db2)
- self.db2.create_doc_from_json(tests.simple_doc, doc_id='doc2')
- self.sync(self.db1, self.db2)
- self.assertRaises(
- errors.InvalidGeneration, self.sync, self.db1, db2_copy)
-
- def test_sync_detects_diverged_source(self):
- self.db1 = self.create_database('test1', 'source')
- self.db2 = self.create_database('test2', 'target')
- db3 = self.copy_database(self.db1)
- self.db1.create_doc_from_json(tests.simple_doc, doc_id="divergent")
- db3.create_doc_from_json(tests.simple_doc, doc_id="divergent")
- self.sync(self.db1, self.db2)
- self.assertRaises(
- errors.InvalidTransactionId, self.sync, db3, self.db2)
-
- def test_sync_detects_diverged_target(self):
- self.db1 = self.create_database('test1', 'source')
- self.db2 = self.create_database('test2', 'target')
- db3 = self.copy_database(self.db2)
- db3.create_doc_from_json(tests.nested_doc, doc_id="divergent")
- self.db1.create_doc_from_json(tests.simple_doc, doc_id="divergent")
- self.sync(self.db1, self.db2)
- self.assertRaises(
- errors.InvalidTransactionId, self.sync, self.db1, db3)
-
- def test_sync_detects_rollback_and_divergence_in_source(self):
- self.db1 = self.create_database('test1', 'source')
- self.db2 = self.create_database('test2', 'target')
- self.db1.create_doc_from_json(tests.simple_doc, doc_id='doc1')
- self.sync(self.db1, self.db2)
- db1_copy = self.copy_database(self.db1)
- self.db1.create_doc_from_json(tests.simple_doc, doc_id='doc2')
- self.db1.create_doc_from_json(tests.simple_doc, doc_id='doc3')
- self.sync(self.db1, self.db2)
- db1_copy.create_doc_from_json(tests.simple_doc, doc_id='doc2')
- db1_copy.create_doc_from_json(tests.simple_doc, doc_id='doc3')
- self.assertRaises(
- errors.InvalidTransactionId, self.sync, db1_copy, self.db2)
-
- def test_sync_detects_rollback_and_divergence_in_target(self):
- self.db1 = self.create_database('test1', 'source')
- self.db2 = self.create_database('test2', 'target')
- self.db1.create_doc_from_json(tests.simple_doc, doc_id="divergent")
- self.sync(self.db1, self.db2)
- db2_copy = self.copy_database(self.db2)
- self.db2.create_doc_from_json(tests.simple_doc, doc_id='doc2')
- self.db2.create_doc_from_json(tests.simple_doc, doc_id='doc3')
- self.sync(self.db1, self.db2)
- db2_copy.create_doc_from_json(tests.simple_doc, doc_id='doc2')
- db2_copy.create_doc_from_json(tests.simple_doc, doc_id='doc3')
- self.assertRaises(
- errors.InvalidTransactionId, self.sync, self.db1, db2_copy)
-
-
-class TestDbSync(tests.TestCaseWithServer):
- """Test db.sync remote sync shortcut"""
-
- scenarios = [
- ('py-http', {
- 'make_app_with_state': make_http_app,
- 'make_database_for_test': tests.make_memory_database_for_test,
- }),
- ('py-oauth-http', {
- 'make_app_with_state': make_oauth_http_app,
- 'make_database_for_test': tests.make_memory_database_for_test,
- 'oauth': True
- }),
- ]
-
- oauth = False
-
- def do_sync(self, target_name):
- if self.oauth:
- path = '~/' + target_name
- extra = dict(creds={'oauth': {
- 'consumer_key': tests.consumer1.key,
- 'consumer_secret': tests.consumer1.secret,
- 'token_key': tests.token1.key,
- 'token_secret': tests.token1.secret,
- }})
- else:
- path = target_name
- extra = {}
- target_url = self.getURL(path)
- return self.db.sync(target_url, **extra)
-
- def setUp(self):
- super(TestDbSync, self).setUp()
- self.startServer()
- self.db = self.make_database_for_test(self, 'test1')
- self.db2 = self.request_state._create_database('test2.db')
-
- def test_db_sync(self):
- doc1 = self.db.create_doc_from_json(tests.simple_doc)
- doc2 = self.db2.create_doc_from_json(tests.nested_doc)
- local_gen_before_sync = self.do_sync('test2.db')
- gen, _, changes = self.db.whats_changed(local_gen_before_sync)
- self.assertEqual(1, len(changes))
- self.assertEqual(doc2.doc_id, changes[0][0])
- self.assertEqual(1, gen - local_gen_before_sync)
- self.assertGetDoc(self.db2, doc1.doc_id, doc1.rev, tests.simple_doc,
- False)
- self.assertGetDoc(self.db, doc2.doc_id, doc2.rev, tests.nested_doc,
- False)
-
- def test_db_sync_autocreate(self):
- doc1 = self.db.create_doc_from_json(tests.simple_doc)
- local_gen_before_sync = self.do_sync('test3.db')
- gen, _, changes = self.db.whats_changed(local_gen_before_sync)
- self.assertEqual(0, gen - local_gen_before_sync)
- db3 = self.request_state.open_database('test3.db')
- gen, _, changes = db3.whats_changed()
- self.assertEqual(1, len(changes))
- self.assertEqual(doc1.doc_id, changes[0][0])
- self.assertGetDoc(db3, doc1.doc_id, doc1.rev, tests.simple_doc,
- False)
- t_gen, _ = self.db._get_replica_gen_and_trans_id('test3.db')
- s_gen, _ = db3._get_replica_gen_and_trans_id('test1')
- self.assertEqual(1, t_gen)
- self.assertEqual(1, s_gen)
-
-
-class TestRemoteSyncIntegration(tests.TestCaseWithServer):
- """Integration tests for the most common sync scenario local -> remote"""
-
- make_app_with_state = staticmethod(make_http_app)
-
- def setUp(self):
- super(TestRemoteSyncIntegration, self).setUp()
- self.startServer()
- self.db1 = inmemory.InMemoryDatabase('test1')
- self.db2 = self.request_state._create_database('test2')
-
- def test_sync_tracks_generations_incrementally(self):
- doc11 = self.db1.create_doc_from_json('{"a": 1}')
- doc12 = self.db1.create_doc_from_json('{"a": 2}')
- doc21 = self.db2.create_doc_from_json('{"b": 1}')
- doc22 = self.db2.create_doc_from_json('{"b": 2}')
- #sanity
- self.assertEqual(2, len(self.db1._get_transaction_log()))
- self.assertEqual(2, len(self.db2._get_transaction_log()))
- progress1 = []
- progress2 = []
- _do_set_replica_gen_and_trans_id = \
- self.db1._do_set_replica_gen_and_trans_id
-
- def set_sync_generation_witness1(other_uid, other_gen, trans_id):
- progress1.append((other_uid, other_gen,
- [d for d, t in
- self.db1._get_transaction_log()[2:]]))
- _do_set_replica_gen_and_trans_id(other_uid, other_gen, trans_id)
- self.patch(self.db1, '_do_set_replica_gen_and_trans_id',
- set_sync_generation_witness1)
- _do_set_replica_gen_and_trans_id2 = \
- self.db2._do_set_replica_gen_and_trans_id
-
- def set_sync_generation_witness2(other_uid, other_gen, trans_id):
- progress2.append((other_uid, other_gen,
- [d for d, t in
- self.db2._get_transaction_log()[2:]]))
- _do_set_replica_gen_and_trans_id2(other_uid, other_gen, trans_id)
- self.patch(self.db2, '_do_set_replica_gen_and_trans_id',
- set_sync_generation_witness2)
-
- db2_url = self.getURL('test2')
- self.db1.sync(db2_url)
-
- self.assertEqual([('test2', 1, [doc21.doc_id]),
- ('test2', 2, [doc21.doc_id, doc22.doc_id]),
- ('test2', 4, [doc21.doc_id, doc22.doc_id])],
- progress1)
- self.assertEqual([('test1', 1, [doc11.doc_id]),
- ('test1', 2, [doc11.doc_id, doc12.doc_id]),
- ('test1', 4, [doc11.doc_id, doc12.doc_id])],
- progress2)
-
-
-load_tests = tests.load_with_scenarios
diff --git a/src/leap/soledad/tests/u1db_tests/testing-certs/Makefile b/src/leap/soledad/tests/u1db_tests/testing-certs/Makefile
deleted file mode 100644
index 2385e75b..00000000
--- a/src/leap/soledad/tests/u1db_tests/testing-certs/Makefile
+++ /dev/null
@@ -1,35 +0,0 @@
-CATOP=./demoCA
-ORIG_CONF=/usr/lib/ssl/openssl.cnf
-ELEVEN_YEARS=-days 4015
-
-init:
- cp $(ORIG_CONF) ca.conf
- install -d $(CATOP)
- install -d $(CATOP)/certs
- install -d $(CATOP)/crl
- install -d $(CATOP)/newcerts
- install -d $(CATOP)/private
- touch $(CATOP)/index.txt
- echo 01>$(CATOP)/crlnumber
- @echo '**** Making CA certificate ...'
- openssl req -nodes -new \
- -newkey rsa -keyout $(CATOP)/private/cakey.pem \
- -out $(CATOP)/careq.pem \
- -multivalue-rdn \
- -subj "/C=UK/ST=-/O=u1db LOCAL TESTING ONLY, DO NO TRUST/CN=u1db testing CA"
- openssl ca -config ./ca.conf -create_serial \
- -out $(CATOP)/cacert.pem $(ELEVEN_YEARS) -batch \
- -keyfile $(CATOP)/private/cakey.pem -selfsign \
- -extensions v3_ca -infiles $(CATOP)/careq.pem
-
-pems:
- cp ./demoCA/cacert.pem .
- openssl req -new -config ca.conf \
- -multivalue-rdn \
- -subj "/O=u1db LOCAL TESTING ONLY, DO NOT TRUST/CN=localhost" \
- -nodes -keyout testing.key -out newreq.pem $(ELEVEN_YEARS)
- openssl ca -batch -config ./ca.conf $(ELEVEN_YEARS) \
- -policy policy_anything \
- -out testing.cert -infiles newreq.pem
-
-.PHONY: init pems
diff --git a/src/leap/soledad/tests/u1db_tests/testing-certs/cacert.pem b/src/leap/soledad/tests/u1db_tests/testing-certs/cacert.pem
deleted file mode 100644
index c019a730..00000000
--- a/src/leap/soledad/tests/u1db_tests/testing-certs/cacert.pem
+++ /dev/null
@@ -1,58 +0,0 @@
-Certificate:
- Data:
- Version: 3 (0x2)
- Serial Number:
- e4:de:01:76:c4:78:78:7e
- Signature Algorithm: sha1WithRSAEncryption
- Issuer: C=UK, ST=-, O=u1db LOCAL TESTING ONLY, DO NO TRUST, CN=u1db testing CA
- Validity
- Not Before: May 3 11:11:11 2012 GMT
- Not After : May 1 11:11:11 2023 GMT
- Subject: C=UK, ST=-, O=u1db LOCAL TESTING ONLY, DO NO TRUST, CN=u1db testing CA
- Subject Public Key Info:
- Public Key Algorithm: rsaEncryption
- Public-Key: (1024 bit)
- Modulus:
- 00:bc:91:a5:7f:7d:37:f7:06:c7:db:5b:83:6a:6b:
- 63:c3:8b:5c:f7:84:4d:97:6d:d4:be:bf:e7:79:a8:
- c1:03:57:ec:90:d4:20:e7:02:95:d9:a6:49:e3:f9:
- 9a:ea:37:b9:b2:02:62:ab:40:d3:42:bb:4a:4e:a2:
- 47:71:0f:1d:a2:c5:94:a1:cf:35:d3:23:32:42:c0:
- 1e:8d:cb:08:58:fb:8a:5c:3e:ea:eb:d5:2c:ed:d6:
- aa:09:b4:b5:7d:e3:45:c9:ae:c2:82:b2:ae:c0:81:
- bc:24:06:65:a9:e7:e0:61:ac:25:ee:53:d3:d7:be:
- 22:f7:00:a2:ad:c6:0e:3a:39
- Exponent: 65537 (0x10001)
- X509v3 extensions:
- X509v3 Subject Key Identifier:
- DB:3D:93:51:6C:32:15:54:8F:10:50:FC:49:4F:36:15:28:BB:95:6D
- X509v3 Authority Key Identifier:
- keyid:DB:3D:93:51:6C:32:15:54:8F:10:50:FC:49:4F:36:15:28:BB:95:6D
-
- X509v3 Basic Constraints:
- CA:TRUE
- Signature Algorithm: sha1WithRSAEncryption
- 72:9b:c1:f7:07:65:83:36:25:4e:01:2f:b7:4a:f2:a4:00:28:
- 80:c7:56:2c:32:39:90:13:61:4b:bb:12:c5:44:9d:42:57:85:
- 28:19:70:69:e1:43:c8:bd:11:f6:94:df:91:2d:c3:ea:82:8d:
- b4:8f:5d:47:a3:00:99:53:29:93:27:6c:c5:da:c1:20:6f:ab:
- ec:4a:be:34:f3:8f:02:e5:0c:c0:03:ac:2b:33:41:71:4f:0a:
- 72:5a:b4:26:1a:7f:81:bc:c0:95:8a:06:87:a8:11:9f:5c:73:
- 38:df:5a:69:40:21:29:ad:46:23:56:75:e1:e9:8b:10:18:4c:
- 7b:54
------BEGIN CERTIFICATE-----
-MIICkjCCAfugAwIBAgIJAOTeAXbEeHh+MA0GCSqGSIb3DQEBBQUAMGIxCzAJBgNV
-BAYTAlVLMQowCAYDVQQIDAEtMS0wKwYDVQQKDCR1MWRiIExPQ0FMIFRFU1RJTkcg
-T05MWSwgRE8gTk8gVFJVU1QxGDAWBgNVBAMMD3UxZGIgdGVzdGluZyBDQTAeFw0x
-MjA1MDMxMTExMTFaFw0yMzA1MDExMTExMTFaMGIxCzAJBgNVBAYTAlVLMQowCAYD
-VQQIDAEtMS0wKwYDVQQKDCR1MWRiIExPQ0FMIFRFU1RJTkcgT05MWSwgRE8gTk8g
-VFJVU1QxGDAWBgNVBAMMD3UxZGIgdGVzdGluZyBDQTCBnzANBgkqhkiG9w0BAQEF
-AAOBjQAwgYkCgYEAvJGlf3039wbH21uDamtjw4tc94RNl23Uvr/neajBA1fskNQg
-5wKV2aZJ4/ma6je5sgJiq0DTQrtKTqJHcQ8dosWUoc810yMyQsAejcsIWPuKXD7q
-69Us7daqCbS1feNFya7CgrKuwIG8JAZlqefgYawl7lPT174i9wCircYOOjkCAwEA
-AaNQME4wHQYDVR0OBBYEFNs9k1FsMhVUjxBQ/ElPNhUou5VtMB8GA1UdIwQYMBaA
-FNs9k1FsMhVUjxBQ/ElPNhUou5VtMAwGA1UdEwQFMAMBAf8wDQYJKoZIhvcNAQEF
-BQADgYEAcpvB9wdlgzYlTgEvt0rypAAogMdWLDI5kBNhS7sSxUSdQleFKBlwaeFD
-yL0R9pTfkS3D6oKNtI9dR6MAmVMpkydsxdrBIG+r7Eq+NPOPAuUMwAOsKzNBcU8K
-clq0Jhp/gbzAlYoGh6gRn1xzON9aaUAhKa1GI1Z14emLEBhMe1Q=
------END CERTIFICATE-----
diff --git a/src/leap/soledad/tests/u1db_tests/testing-certs/testing.cert b/src/leap/soledad/tests/u1db_tests/testing-certs/testing.cert
deleted file mode 100644
index 985684fb..00000000
--- a/src/leap/soledad/tests/u1db_tests/testing-certs/testing.cert
+++ /dev/null
@@ -1,61 +0,0 @@
-Certificate:
- Data:
- Version: 3 (0x2)
- Serial Number:
- e4:de:01:76:c4:78:78:7f
- Signature Algorithm: sha1WithRSAEncryption
- Issuer: C=UK, ST=-, O=u1db LOCAL TESTING ONLY, DO NO TRUST, CN=u1db testing CA
- Validity
- Not Before: May 3 11:11:14 2012 GMT
- Not After : May 1 11:11:14 2023 GMT
- Subject: O=u1db LOCAL TESTING ONLY, DO NOT TRUST, CN=localhost
- Subject Public Key Info:
- Public Key Algorithm: rsaEncryption
- Public-Key: (1024 bit)
- Modulus:
- 00:c6:1d:72:d3:c5:e4:fc:d1:4c:d9:e4:08:3e:90:
- 10:ce:3f:1f:87:4a:1d:4f:7f:2a:5a:52:c9:65:4f:
- d9:2c:bf:69:75:18:1a:b5:c9:09:32:00:47:f5:60:
- aa:c6:dd:3a:87:37:5f:16:be:de:29:b5:ea:fc:41:
- 7e:eb:77:bb:df:63:c3:06:1e:ed:e9:a0:67:1a:f1:
- ec:e1:9d:f7:9c:8f:1c:fa:c3:66:7b:39:dc:70:ae:
- 09:1b:9c:c0:9a:c4:90:77:45:8e:39:95:a9:2f:92:
- 43:bd:27:07:5a:99:51:6e:76:a0:af:dd:b1:2c:8f:
- ca:8b:8c:47:0d:f6:6e:fc:69
- Exponent: 65537 (0x10001)
- X509v3 extensions:
- X509v3 Basic Constraints:
- CA:FALSE
- Netscape Comment:
- OpenSSL Generated Certificate
- X509v3 Subject Key Identifier:
- 1C:63:85:E1:1D:F3:89:2E:6C:4E:3F:FB:D0:10:64:5A:C1:22:6A:2A
- X509v3 Authority Key Identifier:
- keyid:DB:3D:93:51:6C:32:15:54:8F:10:50:FC:49:4F:36:15:28:BB:95:6D
-
- Signature Algorithm: sha1WithRSAEncryption
- 1d:6d:3e:bd:93:fd:bd:3e:17:b8:9f:f0:99:7f:db:50:5c:b2:
- 01:42:03:b5:d5:94:05:d3:f6:8e:80:82:55:47:1f:58:f2:18:
- 6c:ab:ef:43:2c:2f:10:e1:7c:c4:5c:cc:ac:50:50:22:42:aa:
- 35:33:f5:b9:f3:a6:66:55:d9:36:f4:f2:e4:d4:d9:b5:2c:52:
- 66:d4:21:17:97:22:b8:9b:d7:0e:7c:3d:ce:85:19:ca:c4:d2:
- 58:62:31:c6:18:3e:44:fc:f4:30:b6:95:87:ee:21:4a:08:f0:
- af:3c:8f:c4:ba:5e:a1:5c:37:1a:7d:7b:fe:66:ae:62:50:17:
- 31:ca
------BEGIN CERTIFICATE-----
-MIICnzCCAgigAwIBAgIJAOTeAXbEeHh/MA0GCSqGSIb3DQEBBQUAMGIxCzAJBgNV
-BAYTAlVLMQowCAYDVQQIDAEtMS0wKwYDVQQKDCR1MWRiIExPQ0FMIFRFU1RJTkcg
-T05MWSwgRE8gTk8gVFJVU1QxGDAWBgNVBAMMD3UxZGIgdGVzdGluZyBDQTAeFw0x
-MjA1MDMxMTExMTRaFw0yMzA1MDExMTExMTRaMEQxLjAsBgNVBAoMJXUxZGIgTE9D
-QUwgVEVTVElORyBPTkxZLCBETyBOT1QgVFJVU1QxEjAQBgNVBAMMCWxvY2FsaG9z
-dDCBnzANBgkqhkiG9w0BAQEFAAOBjQAwgYkCgYEAxh1y08Xk/NFM2eQIPpAQzj8f
-h0odT38qWlLJZU/ZLL9pdRgatckJMgBH9WCqxt06hzdfFr7eKbXq/EF+63e732PD
-Bh7t6aBnGvHs4Z33nI8c+sNmeznccK4JG5zAmsSQd0WOOZWpL5JDvScHWplRbnag
-r92xLI/Ki4xHDfZu/GkCAwEAAaN7MHkwCQYDVR0TBAIwADAsBglghkgBhvhCAQ0E
-HxYdT3BlblNTTCBHZW5lcmF0ZWQgQ2VydGlmaWNhdGUwHQYDVR0OBBYEFBxjheEd
-84kubE4/+9AQZFrBImoqMB8GA1UdIwQYMBaAFNs9k1FsMhVUjxBQ/ElPNhUou5Vt
-MA0GCSqGSIb3DQEBBQUAA4GBAB1tPr2T/b0+F7if8Jl/21BcsgFCA7XVlAXT9o6A
-glVHH1jyGGyr70MsLxDhfMRczKxQUCJCqjUz9bnzpmZV2Tb08uTU2bUsUmbUIReX
-Irib1w58Pc6FGcrE0lhiMcYYPkT89DC2lYfuIUoI8K88j8S6XqFcNxp9e/5mrmJQ
-FzHK
------END CERTIFICATE-----
diff --git a/src/leap/soledad/tests/u1db_tests/testing-certs/testing.key b/src/leap/soledad/tests/u1db_tests/testing-certs/testing.key
deleted file mode 100644
index d83d4920..00000000
--- a/src/leap/soledad/tests/u1db_tests/testing-certs/testing.key
+++ /dev/null
@@ -1,16 +0,0 @@
------BEGIN PRIVATE KEY-----
-MIICdgIBADANBgkqhkiG9w0BAQEFAASCAmAwggJcAgEAAoGBAMYdctPF5PzRTNnk
-CD6QEM4/H4dKHU9/KlpSyWVP2Sy/aXUYGrXJCTIAR/VgqsbdOoc3Xxa+3im16vxB
-fut3u99jwwYe7emgZxrx7OGd95yPHPrDZns53HCuCRucwJrEkHdFjjmVqS+SQ70n
-B1qZUW52oK/dsSyPyouMRw32bvxpAgMBAAECgYBs3lXxhjg1rhabTjIxnx19GTcM
-M3Az9V+izweZQu3HJ1CeZiaXauhAr+LbNsniCkRVddotN6oCJdQB10QVxXBZc9Jz
-HPJ4zxtZfRZlNMTMmG7eLWrfxpgWnb/BUjDb40yy1nhr9yhDUnI/8RoHDRHnAEHZ
-/CnHGUrqcVcrY5zJAQJBAPLhBJg9W88JVmcOKdWxRgs7dLHnZb999Kv1V5mczmAi
-jvGvbUmucqOqke6pTUHNYyNHqU6pySzGUi2cH+BAkFECQQDQ0VoAOysg6FVoT15v
-tGh57t5sTiCZZ7PS8jwvtThsgA+vcf6c16XWzXgjGXSap4r2QDOY2rI5lsWLaQ8T
-+fyZAkAfyFJRmbXp4c7srW3MCOahkaYzoZQu+syJtBFCiMJ40gzik5I5khpuUGPI
-V19EvRu8AiSlppIsycb3MPb64XgBAkEAy7DrUf5le5wmc7G4NM6OeyJ+5LbxJbL6
-vnJ8My1a9LuWkVVpQCU7J+UVo2dZTuLPspW9vwTVhUeFOxAoHRxlQQJAFem93f7m
-el2BkB2EFqU3onPejkZ5UrDmfmeOQR1axMQNSXqSxcJxqa16Ru1BWV2gcWRbwajQ
-oc+kuJThu/r/Ug==
------END PRIVATE KEY-----