summaryrefslogtreecommitdiff
path: root/client
diff options
context:
space:
mode:
authorKali Kaneko <kali@leap.se>2014-08-15 13:24:25 -0500
committerKali Kaneko <kali@leap.se>2014-08-15 13:24:25 -0500
commit6ba98af198db2884d1b43da90683bdb399a589c8 (patch)
tree1390542fcecc318a18206101c2720144106cf025 /client
parent365fa1603a977040a1891880a66118f196a54ac0 (diff)
parentab7850bbdcded8b0e36cb27a2468f55d1910c218 (diff)
Merge remote-tracking branch 'drebs-github/feature/5895_store-all-incoming-documents-in-sync-db' into develop
Diffstat (limited to 'client')
-rw-r--r--client/changes/feature_5895-store-all-incoming-documents-in-sync-db1
-rw-r--r--client/src/leap/soledad/client/__init__.py670
-rw-r--r--client/src/leap/soledad/client/crypto.py263
-rw-r--r--client/src/leap/soledad/client/mp_safe_db.py112
-rw-r--r--client/src/leap/soledad/client/secrets.py776
-rw-r--r--client/src/leap/soledad/client/sqlcipher.py119
-rw-r--r--client/src/leap/soledad/client/sync.py2
-rw-r--r--client/src/leap/soledad/client/target.py87
8 files changed, 1236 insertions, 794 deletions
diff --git a/client/changes/feature_5895-store-all-incoming-documents-in-sync-db b/client/changes/feature_5895-store-all-incoming-documents-in-sync-db
new file mode 100644
index 00000000..71d5a91f
--- /dev/null
+++ b/client/changes/feature_5895-store-all-incoming-documents-in-sync-db
@@ -0,0 +1 @@
+ o Store all incoming documents in the sync db (#5895).
diff --git a/client/src/leap/soledad/client/__init__.py b/client/src/leap/soledad/client/__init__.py
index 586e3389..c76e4a4a 100644
--- a/client/src/leap/soledad/client/__init__.py
+++ b/client/src/leap/soledad/client/__init__.py
@@ -31,9 +31,7 @@ import os
import socket
import ssl
import urlparse
-import hmac
-from hashlib import sha256
try:
import cchardet as chardet
@@ -43,41 +41,20 @@ except ImportError:
from u1db.remote import http_client
from u1db.remote.ssl_match_hostname import match_hostname
-import scrypt
-import simplejson as json
-
from leap.common.config import get_path_prefix
from leap.soledad.common import (
SHARED_DB_NAME,
soledad_assert,
soledad_assert_type
)
-from leap.soledad.common.errors import (
- InvalidTokenError,
- NotLockedError,
- AlreadyLockedError,
- LockTimedOutError,
-)
-from leap.soledad.common.crypto import (
- MacMethods,
- UnknownMacMethod,
- WrongMac,
- MAC_KEY,
- MAC_METHOD_KEY,
-)
from leap.soledad.client.events import (
- SOLEDAD_CREATING_KEYS,
- SOLEDAD_DONE_CREATING_KEYS,
- SOLEDAD_DOWNLOADING_KEYS,
- SOLEDAD_DONE_DOWNLOADING_KEYS,
- SOLEDAD_UPLOADING_KEYS,
- SOLEDAD_DONE_UPLOADING_KEYS,
SOLEDAD_NEW_DATA_TO_SYNC,
SOLEDAD_DONE_DATA_SYNC,
signal,
)
from leap.soledad.common.document import SoledadDocument
from leap.soledad.client.crypto import SoledadCrypto
+from leap.soledad.client.secrets import SoledadSecrets
from leap.soledad.client.shared_db import SoledadSharedDatabase
from leap.soledad.client.sqlcipher import open as sqlcipher_open
from leap.soledad.client.sqlcipher import SQLCipherDatabase
@@ -102,27 +79,6 @@ Soledad client and server.
# Soledad: local encrypted storage and remote encrypted sync.
#
-class NoStorageSecret(Exception):
- """
- Raised when trying to use a storage secret but none is available.
- """
- pass
-
-
-class PassphraseTooShort(Exception):
- """
- Raised when trying to change the passphrase but the provided passphrase is
- too short.
- """
-
-
-class BootstrapSequenceError(Exception):
- """
- Raised when an attempt to generate a secret and store it in a recovery
- documents on server failed.
- """
-
-
class Soledad(object):
"""
Soledad provides encrypted data storage and sync.
@@ -166,57 +122,6 @@ class Soledad(object):
The name of the file where the storage secrets will be stored.
"""
- GENERATED_SECRET_LENGTH = 1024
- """
- The length of the generated secret used to derive keys for symmetric
- encryption for local and remote storage.
- """
-
- LOCAL_STORAGE_SECRET_LENGTH = 512
- """
- The length of the secret used to derive a passphrase for the SQLCipher
- database.
- """
-
- REMOTE_STORAGE_SECRET_LENGTH = \
- GENERATED_SECRET_LENGTH - LOCAL_STORAGE_SECRET_LENGTH
- """
- The length of the secret used to derive an encryption key and a MAC auth
- key for remote storage.
- """
-
- SALT_LENGTH = 64
- """
- The length of the salt used to derive the key for the storage secret
- encryption.
- """
-
- MINIMUM_PASSPHRASE_LENGTH = 6
- """
- The minimum length for a passphrase. The passphrase length is only checked
- when the user changes her passphrase, not when she instantiates Soledad.
- """
-
- IV_SEPARATOR = ":"
- """
- A separator used for storing the encryption initial value prepended to the
- ciphertext.
- """
-
- UUID_KEY = 'uuid'
- STORAGE_SECRETS_KEY = 'storage_secrets'
- SECRET_KEY = 'secret'
- CIPHER_KEY = 'cipher'
- LENGTH_KEY = 'length'
- KDF_KEY = 'kdf'
- KDF_SALT_KEY = 'kdf_salt'
- KDF_LENGTH_KEY = 'kdf_length'
- KDF_SCRYPT = 'scrypt'
- CIPHER_AES256 = 'aes256'
- """
- Keys used to access storage secrets in recovery documents.
- """
-
DEFAULT_PREFIX = os.path.join(get_path_prefix(), 'leap', 'soledad')
"""
Prefix for default values for path.
@@ -266,41 +171,49 @@ class Soledad(object):
storage on server sequence has failed
for some reason.
"""
- # get config params
+ # store config params
self._uuid = uuid
- soledad_assert_type(passphrase, unicode)
self._passphrase = passphrase
- # init crypto variables
- self._secrets = {}
- self._secret_id = secret_id
+ self._secrets_path = secrets_path
+ self._local_db_path = local_db_path
+ self._server_url = server_url
+ # configure SSL certificate
+ global SOLEDAD_CERT
+ SOLEDAD_CERT = cert_file
+ self._set_token(auth_token)
self._defer_encryption = defer_encryption
- self._init_config(secrets_path, local_db_path, server_url)
+ self._init_config()
+ self._init_dirs()
- self._set_token(auth_token)
+ # init crypto variables
self._shared_db_instance = None
- # configure SSL certificate
- global SOLEDAD_CERT
- SOLEDAD_CERT = cert_file
+ self._crypto = SoledadCrypto(self)
+ self._secrets = SoledadSecrets(
+ self._uuid,
+ self._passphrase,
+ self._secrets_path,
+ self._shared_db,
+ self._crypto,
+ secret_id=secret_id)
+
# initiate bootstrap sequence
self._bootstrap() # might raise BootstrapSequenceError()
- def _init_config(self, secrets_path, local_db_path, server_url):
+ def _init_config(self):
"""
Initialize configuration using default values for missing params.
"""
+ soledad_assert_type(self._passphrase, unicode)
# initialize secrets_path
- self._secrets_path = secrets_path
if self._secrets_path is None:
self._secrets_path = os.path.join(
self.DEFAULT_PREFIX, self.STORAGE_SECRETS_FILE_NAME)
# initialize local_db_path
- self._local_db_path = local_db_path
if self._local_db_path is None:
self._local_db_path = os.path.join(
self.DEFAULT_PREFIX, self.LOCAL_DATABASE_FILE_NAME)
# initialize server_url
- self._server_url = server_url
soledad_assert(
self._server_url is not None,
'Missing URL for Soledad server.')
@@ -309,129 +222,18 @@ class Soledad(object):
# initialization/destruction methods
#
- def _get_or_gen_crypto_secrets(self):
- """
- Retrieves or generates the crypto secrets.
-
- Might raise BootstrapSequenceError
- """
- doc = self._get_secrets_from_shared_db()
-
- if doc:
- logger.info(
- 'Found cryptographic secrets in shared recovery '
- 'database.')
- _, mac = self.import_recovery_document(doc.content)
- if mac is False:
- self.put_secrets_in_shared_db()
- self._store_secrets() # save new secrets in local file
- if self._secret_id is None:
- self._set_secret_id(self._secrets.items()[0][0])
- else:
- # STAGE 3 - there are no secrets in server also, so
- # generate a secret and store it in remote db.
- logger.info(
- 'No cryptographic secrets found, creating new '
- ' secrets...')
- self._set_secret_id(self._gen_secret())
- try:
- self._put_secrets_in_shared_db()
- except Exception as ex:
- # storing generated secret in shared db failed for
- # some reason, so we erase the generated secret and
- # raise.
- try:
- os.unlink(self._secrets_path)
- except OSError as e:
- if e.errno != errno.ENOENT: # no such file or directory
- logger.exception(e)
- logger.exception(ex)
- raise BootstrapSequenceError(
- 'Could not store generated secret in the shared '
- 'database, bailing out...')
-
def _bootstrap(self):
"""
Bootstrap local Soledad instance.
- Soledad Client bootstrap is the following sequence of stages:
-
- * stage 0 - local environment setup.
- - directory initialization.
- - crypto submodule initialization
- * stage 1 - local secret loading:
- - if secrets exist locally, load them.
- * stage 2 - remote secret loading:
- - else, if secrets exist in server, download them.
- * stage 3 - secret generation:
- - else, generate a new secret and store in server.
- * stage 4 - database initialization.
-
- This method decides which bootstrap stages have already been performed
- and performs the missing ones in order.
-
:raise BootstrapSequenceError: Raised when the secret generation and
storage on server sequence has failed for some reason.
"""
- # STAGE 0 - local environment setup
- self._init_dirs()
- self._crypto = SoledadCrypto(self)
-
- secrets_problem = None
-
- # STAGE 1 - verify if secrets exist locally
- if not self._has_secret(): # try to load from local storage.
-
- # STAGE 2 - there are no secrets in local storage, so try to fetch
- # encrypted secrets from server.
- logger.info(
- 'Trying to fetch cryptographic secrets from shared recovery '
- 'database...')
-
- # --- start of atomic operation in shared db ---
-
- # obtain lock on shared db
- token = timeout = None
- try:
- token, timeout = self._shared_db.lock()
- except AlreadyLockedError:
- raise BootstrapSequenceError('Database is already locked.')
- except LockTimedOutError:
- raise BootstrapSequenceError('Lock operation timed out.')
-
- try:
- self._get_or_gen_crypto_secrets()
- except Exception as e:
- secrets_problem = e
-
- # release the lock on shared db
- try:
- self._shared_db.unlock(token)
- self._shared_db.close()
- except NotLockedError:
- # for some reason the lock expired. Despite that, secret
- # loading or generation/storage must have been executed
- # successfully, so we pass.
- pass
- except InvalidTokenError:
- # here, our lock has not only expired but also some other
- # client application has obtained a new lock and is currently
- # doing its thing in the shared database. Using the same
- # reasoning as above, we assume everything went smooth and
- # pass.
- pass
- except Exception as e:
- logger.error("Unhandled exception when unlocking shared "
- "database.")
- logger.exception(e)
-
- # --- end of atomic operation in shared db ---
-
- # STAGE 4 - local database initialization
- if secrets_problem is None:
+ try:
+ self._secrets.bootstrap()
self._init_db()
- else:
- raise secrets_problem
+ except:
+ raise
def _init_dirs(self):
"""
@@ -460,27 +262,9 @@ class Soledad(object):
Currently, Soledad uses the default SQLCipher cipher, i.e.
'aes-256-cbc'. We use scrypt to derive a 256-bit encryption key and
uses the 'raw PRAGMA key' format to handle the key to SQLCipher.
-
- The first C{self.REMOTE_STORAGE_SECRET_LENGTH} bytes of the storage
- secret are used for remote storage encryption. We use the next
- C{self.LOCAL_STORAGE_SECRET} bytes to derive a key for local storage.
- From these bytes, the first C{self.SALT_LENGTH} are used as the salt
- and the rest as the password for the scrypt hashing.
- """
- # salt indexes
- salt_start = self.REMOTE_STORAGE_SECRET_LENGTH
- salt_end = salt_start + self.SALT_LENGTH
- # password indexes
- pwd_start = salt_end
- pwd_end = salt_start + self.LOCAL_STORAGE_SECRET_LENGTH
- # calculate the key for local encryption
- secret = self._get_storage_secret()
- key = scrypt.hash(
- secret[pwd_start:pwd_end], # the password
- secret[salt_start:salt_end], # the salt
- buflen=32, # we need a key with 256 bits (32 bytes)
- )
-
+ """
+ key = self._secrets.get_local_storage_key()
+ sync_db_key = self._secrets.get_sync_db_key()
self._db = sqlcipher_open(
self._local_db_path,
binascii.b2a_hex(key), # sqlcipher only accepts the hex version
@@ -488,7 +272,8 @@ class Soledad(object):
document_factory=SoledadDocument,
crypto=self._crypto,
raw_key=True,
- defer_encryption=self._defer_encryption)
+ defer_encryption=self._defer_encryption,
+ sync_db_key=binascii.b2a_hex(sync_db_key))
def close(self):
"""
@@ -501,186 +286,6 @@ class Soledad(object):
self._db.stop_sync()
self._db.close()
- #
- # Management of secret for symmetric encryption.
- #
-
- def _get_storage_secret(self):
- """
- Return the storage secret.
-
- Storage secret is encrypted before being stored. This method decrypts
- and returns the stored secret.
-
- :return: The storage secret.
- :rtype: str
- """
- # calculate the encryption key
- key = scrypt.hash(
- self._passphrase_as_string(),
- # the salt is stored base64 encoded
- binascii.a2b_base64(
- self._secrets[self._secret_id][self.KDF_SALT_KEY]),
- buflen=32, # we need a key with 256 bits (32 bytes).
- )
- # recover the initial value and ciphertext
- iv, ciphertext = self._secrets[self._secret_id][self.SECRET_KEY].split(
- self.IV_SEPARATOR, 1)
- ciphertext = binascii.a2b_base64(ciphertext)
- return self._crypto.decrypt_sym(ciphertext, key, iv=iv)
-
- def _set_secret_id(self, secret_id):
- """
- Define the id of the storage secret to be used.
-
- This method will also replace the secret in the crypto object.
-
- :param secret_id: The id of the storage secret to be used.
- :type secret_id: str
- """
- self._secret_id = secret_id
-
- def _load_secrets(self):
- """
- Load storage secrets from local file.
- """
- # does the file exist in disk?
- if not os.path.isfile(self._secrets_path):
- raise IOError('File does not exist: %s' % self._secrets_path)
- # read storage secrets from file
- content = None
- with open(self._secrets_path, 'r') as f:
- content = json.loads(f.read())
- _, mac = self.import_recovery_document(content)
- if mac is False:
- self._store_secrets()
- self._put_secrets_in_shared_db()
- # choose first secret if no secret_id was given
- if self._secret_id is None:
- self._set_secret_id(self._secrets.items()[0][0])
-
- def _has_secret(self):
- """
- Return whether there is a storage secret available for use or not.
-
- :return: Whether there's a storage secret for symmetric encryption.
- :rtype: bool
- """
- if self._secret_id is None or self._secret_id not in self._secrets:
- try:
- self._load_secrets() # try to load from disk
- except IOError, e:
- logger.warning('IOError: %s' % str(e))
- try:
- self._get_storage_secret()
- return True
- except Exception:
- return False
-
- def _gen_secret(self):
- """
- Generate a secret for symmetric encryption and store in a local
- encrypted file.
-
- This method emits the following signals:
-
- * SOLEDAD_CREATING_KEYS
- * SOLEDAD_DONE_CREATING_KEYS
-
- A secret has the following structure:
-
- {
- '<secret_id>': {
- 'kdf': 'scrypt',
- 'kdf_salt': '<b64 repr of salt>'
- 'kdf_length': <key length>
- 'cipher': 'aes256',
- 'length': <secret length>,
- 'secret': '<encrypted b64 repr of storage_secret>',
- }
- }
-
- :return: The id of the generated secret.
- :rtype: str
- """
- signal(SOLEDAD_CREATING_KEYS, self._uuid)
- # generate random secret
- secret = os.urandom(self.GENERATED_SECRET_LENGTH)
- secret_id = sha256(secret).hexdigest()
- # generate random salt
- salt = os.urandom(self.SALT_LENGTH)
- # get a 256-bit key
- key = scrypt.hash(self._passphrase_as_string(), salt, buflen=32)
- iv, ciphertext = self._crypto.encrypt_sym(secret, key)
- self._secrets[secret_id] = {
- # leap.soledad.crypto submodule uses AES256 for symmetric
- # encryption.
- self.KDF_KEY: self.KDF_SCRYPT,
- self.KDF_SALT_KEY: binascii.b2a_base64(salt),
- self.KDF_LENGTH_KEY: len(key),
- self.CIPHER_KEY: self.CIPHER_AES256,
- self.LENGTH_KEY: len(secret),
- self.SECRET_KEY: '%s%s%s' % (
- str(iv), self.IV_SEPARATOR, binascii.b2a_base64(ciphertext)),
- }
- self._store_secrets()
- signal(SOLEDAD_DONE_CREATING_KEYS, self._uuid)
- return secret_id
-
- def _store_secrets(self):
- """
- Store secrets in C{Soledad.STORAGE_SECRETS_FILE_PATH}.
- """
- with open(self._secrets_path, 'w') as f:
- f.write(
- json.dumps(
- self.export_recovery_document()))
-
- def change_passphrase(self, new_passphrase):
- """
- Change the passphrase that encrypts the storage secret.
-
- :param new_passphrase: The new passphrase.
- :type new_passphrase: unicode
-
- :raise NoStorageSecret: Raised if there's no storage secret available.
- """
- # maybe we want to add more checks to guarantee passphrase is
- # reasonable?
- soledad_assert_type(new_passphrase, unicode)
- if len(new_passphrase) < self.MINIMUM_PASSPHRASE_LENGTH:
- raise PassphraseTooShort(
- 'Passphrase must be at least %d characters long!' %
- self.MINIMUM_PASSPHRASE_LENGTH)
- # ensure there's a secret for which the passphrase will be changed.
- if not self._has_secret():
- raise NoStorageSecret()
- secret = self._get_storage_secret()
- # generate random salt
- new_salt = os.urandom(self.SALT_LENGTH)
- # get a 256-bit key
- key = scrypt.hash(new_passphrase.encode('utf-8'), new_salt, buflen=32)
- iv, ciphertext = self._crypto.encrypt_sym(secret, key)
- # XXX update all secrets in the dict
- self._secrets[self._secret_id] = {
- # leap.soledad.crypto submodule uses AES256 for symmetric
- # encryption.
- self.KDF_KEY: self.KDF_SCRYPT, # TODO: remove hard coded kdf
- self.KDF_SALT_KEY: binascii.b2a_base64(new_salt),
- self.KDF_LENGTH_KEY: len(key),
- self.CIPHER_KEY: self.CIPHER_AES256,
- self.LENGTH_KEY: len(secret),
- self.SECRET_KEY: '%s%s%s' % (
- str(iv), self.IV_SEPARATOR, binascii.b2a_base64(ciphertext)),
- }
- self._passphrase = new_passphrase
- self._store_secrets()
- self._put_secrets_in_shared_db()
-
- #
- # General crypto utility methods.
- #
-
@property
def _shared_db(self):
"""
@@ -697,63 +302,6 @@ class Soledad(object):
creds=self._creds)
return self._shared_db_instance
- def _shared_db_doc_id(self):
- """
- Calculate the doc_id of the document in the shared db that stores key
- material.
-
- :return: the hash
- :rtype: str
- """
- return sha256(
- '%s%s' %
- (self._passphrase_as_string(), self.uuid)).hexdigest()
-
- def _get_secrets_from_shared_db(self):
- """
- Retrieve the document with encrypted key material from the shared
- database.
-
- :return: a document with encrypted key material in its contents
- :rtype: SoledadDocument
- """
- signal(SOLEDAD_DOWNLOADING_KEYS, self._uuid)
- db = self._shared_db
- if not db:
- logger.warning('No shared db found')
- return
- doc = db.get_doc(self._shared_db_doc_id())
- signal(SOLEDAD_DONE_DOWNLOADING_KEYS, self._uuid)
- return doc
-
- def _put_secrets_in_shared_db(self):
- """
- Assert local keys are the same as shared db's ones.
-
- Try to fetch keys from shared recovery database. If they already exist
- in the remote db, assert that that data is the same as local data.
- Otherwise, upload keys to shared recovery database.
- """
- soledad_assert(
- self._has_secret(),
- 'Tried to send keys to server but they don\'t exist in local '
- 'storage.')
- # try to get secrets doc from server, otherwise create it
- doc = self._get_secrets_from_shared_db()
- if doc is None:
- doc = SoledadDocument(
- doc_id=self._shared_db_doc_id())
- # fill doc with encrypted secrets
- doc.content = self.export_recovery_document()
- # upload secrets to server
- signal(SOLEDAD_UPLOADING_KEYS, self._uuid)
- db = self._shared_db
- if not db:
- logger.warning('No shared db found')
- return
- db.put_doc(doc)
- signal(SOLEDAD_DONE_UPLOADING_KEYS, self._uuid)
-
#
# Document storage, retrieval and sync.
#
@@ -1153,104 +701,6 @@ class Soledad(object):
token = property(_get_token, _set_token, doc='The authentication Token.')
#
- # Recovery document export and import methods
- #
-
- def export_recovery_document(self):
- """
- Export the storage secrets.
-
- A recovery document has the following structure:
-
- {
- 'storage_secrets': {
- '<storage_secret id>': {
- 'kdf': 'scrypt',
- 'kdf_salt': '<b64 repr of salt>'
- 'kdf_length': <key length>
- 'cipher': 'aes256',
- 'length': <secret length>,
- 'secret': '<encrypted storage_secret>',
- },
- },
- 'kdf': 'scrypt',
- 'kdf_salt': '<b64 repr of salt>',
- 'kdf_length: <key length>,
- '_mac_method': 'hmac',
- '_mac': '<mac>'
- }
-
- Note that multiple storage secrets might be stored in one recovery
- document. This method will also calculate a MAC of a string
- representation of the secrets dictionary.
-
- :return: The recovery document.
- :rtype: dict
- """
- # create salt and key for calculating MAC
- salt = os.urandom(self.SALT_LENGTH)
- key = scrypt.hash(self._passphrase_as_string(), salt, buflen=32)
- data = {
- self.STORAGE_SECRETS_KEY: self._secrets,
- self.KDF_KEY: self.KDF_SCRYPT,
- self.KDF_SALT_KEY: binascii.b2a_base64(salt),
- self.KDF_LENGTH_KEY: len(key),
- MAC_METHOD_KEY: MacMethods.HMAC,
- MAC_KEY: hmac.new(
- key,
- json.dumps(self._secrets),
- sha256).hexdigest(),
- }
- return data
-
- def import_recovery_document(self, data):
- """
- Import storage secrets for symmetric encryption and uuid (if present)
- from a recovery document.
-
- Note that this method does not store the imported data on disk. For
- that, use C{self._store_secrets()}.
-
- :param data: The recovery document.
- :type data: dict
-
- :return: A tuple containing the number of imported secrets and whether
- there was MAC informationa available for authenticating.
- :rtype: (int, bool)
- """
- soledad_assert(self.STORAGE_SECRETS_KEY in data)
- # check mac of the recovery document
- mac = None
- if MAC_KEY in data:
- soledad_assert(data[MAC_KEY] is not None)
- soledad_assert(MAC_METHOD_KEY in data)
- soledad_assert(self.KDF_KEY in data)
- soledad_assert(self.KDF_SALT_KEY in data)
- soledad_assert(self.KDF_LENGTH_KEY in data)
- if data[MAC_METHOD_KEY] == MacMethods.HMAC:
- key = scrypt.hash(
- self._passphrase_as_string(),
- binascii.a2b_base64(data[self.KDF_SALT_KEY]),
- buflen=32)
- mac = hmac.new(
- key,
- json.dumps(data[self.STORAGE_SECRETS_KEY]),
- sha256).hexdigest()
- else:
- raise UnknownMacMethod('Unknown MAC method: %s.' %
- data[MAC_METHOD_KEY])
- if mac != data[MAC_KEY]:
- raise WrongMac('Could not authenticate recovery document\'s '
- 'contents.')
- # include secrets in the secret pool.
- secrets = 0
- for secret_id, secret_data in data[self.STORAGE_SECRETS_KEY].items():
- if secret_id not in self._secrets:
- secrets += 1
- self._secrets[secret_id] = secret_data
- return secrets, mac
-
- #
# Setters/getters
#
@@ -1259,18 +709,26 @@ class Soledad(object):
uuid = property(_get_uuid, doc='The user uuid.')
- def _get_secret_id(self):
- return self._secret_id
+ def get_secret_id(self):
+ return self._secrets.secret_id
+
+ def set_secret_id(self, secret_id):
+ self._secrets.set_secret_id(secret_id)
secret_id = property(
- _get_secret_id,
+ get_secret_id,
+ set_secret_id,
doc='The active secret id.')
+ def _set_secrets_path(self, secrets_path):
+ self._secrets.secrets_path = secrets_path
+
def _get_secrets_path(self):
- return self._secrets_path
+ return self._secrets.secrets_path
secrets_path = property(
_get_secrets_path,
+ _set_secrets_path,
doc='The path for the file containing the encrypted symmetric secret.')
def _get_local_db_path(self):
@@ -1287,20 +745,38 @@ class Soledad(object):
_get_server_url,
doc='The URL of the Soledad server.')
- storage_secret = property(
- _get_storage_secret,
- doc='The secret used for symmetric encryption.')
+ @property
+ def storage_secret(self):
+ """
+ Return the secret used for symmetric encryption.
+ """
+ return self._secrets.storage_secret
+
+ @property
+ def remote_storage_secret(self):
+ """
+ Return the secret used for encryption of remotely stored data.
+ """
+ return self._secrets.remote_storage_secret
+
+ @property
+ def secrets(self):
+ return self._secrets
- def _get_passphrase(self):
- return self._passphrase
+ @property
+ def passphrase(self):
+ return self._secrets.passphrase
- passphrase = property(
- _get_passphrase,
- doc='The passphrase for locking and unlocking encryption secrets for '
- 'local and remote storage.')
+ def change_passphrase(self, new_passphrase):
+ """
+ Change the passphrase that encrypts the storage secret.
- def _passphrase_as_string(self):
- return self._passphrase.encode('utf-8')
+ :param new_passphrase: The new passphrase.
+ :type new_passphrase: unicode
+
+ :raise NoStorageSecret: Raised if there's no storage secret available.
+ """
+ self._secrets.change_passphrase(new_passphrase)
# ----------------------------------------------------------------------------
diff --git a/client/src/leap/soledad/client/crypto.py b/client/src/leap/soledad/client/crypto.py
index 7133f804..5e3760b3 100644
--- a/client/src/leap/soledad/client/crypto.py
+++ b/client/src/leap/soledad/client/crypto.py
@@ -224,7 +224,7 @@ class SoledadCrypto(object):
The password is derived using HMAC having sha256 as underlying hash
function. The key used for HMAC are the first
- C{soledad.REMOTE_STORAGE_SECRET_KENGTH} bytes of Soledad's storage
+ C{soledad.REMOTE_STORAGE_SECRET_LENGTH} bytes of Soledad's storage
secret stripped from the first MAC_KEY_LENGTH characters. The HMAC
message is C{doc_id}.
@@ -240,9 +240,7 @@ class SoledadCrypto(object):
if self.secret is None:
raise NoSymmetricSecret()
return hmac.new(
- self.secret[
- MAC_KEY_LENGTH:
- self._soledad.REMOTE_STORAGE_SECRET_LENGTH],
+ self.secret[MAC_KEY_LENGTH:],
doc_id,
hashlib.sha256).digest()
@@ -251,16 +249,16 @@ class SoledadCrypto(object):
#
def _get_secret(self):
- return self._soledad.storage_secret
+ return self._soledad.secrets.remote_storage_secret
secret = property(
_get_secret, doc='The secret used for symmetric encryption')
+
#
# Crypto utilities for a SoledadDocument.
#
-
def mac_doc(doc_id, doc_rev, ciphertext, mac_method, secret):
"""
Calculate a MAC for C{doc} using C{ciphertext}.
@@ -623,9 +621,8 @@ class SyncEncrypterPool(SyncEncryptDecryptPool):
con = self._sync_db
with self._sync_db_write_lock:
- with con:
- con.execute(sql_del, (doc_id, ))
- con.execute(sql_ins, (doc_id, doc_rev, content))
+ con.execute(sql_del, (doc_id, ))
+ con.execute(sql_ins, (doc_id, doc_rev, content))
def decrypt_doc_task(doc_id, doc_rev, content, gen, trans_id, key, secret):
@@ -657,26 +654,6 @@ def decrypt_doc_task(doc_id, doc_rev, content, gen, trans_id, key, secret):
return doc_id, doc_rev, decrypted_content, gen, trans_id
-def get_insertable_docs_by_gen(expected, got):
- """
- Return a list of documents ready to be inserted. This list is computed
- by aligning the expected list with the already gotten docs, and returning
- the maximum number of docs that can be processed in the expected order
- before finding a gap.
-
- :param expected: A list of generations to be inserted.
- :type expected: list
-
- :param got: A dictionary whose values are the docs to be inserted.
- :type got: dict
- """
- ordered = [got.get(i) for i in expected]
- if None in ordered:
- return ordered[:ordered.index(None)]
- else:
- return ordered
-
-
class SyncDecrypterPool(SyncEncryptDecryptPool):
"""
Pool of workers that spawn subprocesses to execute the symmetric decryption
@@ -691,7 +668,7 @@ class SyncDecrypterPool(SyncEncryptDecryptPool):
"""
# TODO implement throttling to reduce cpu usage??
TABLE_NAME = "docs_received"
- FIELD_NAMES = "doc_id, rev, content, gen, trans_id"
+ FIELD_NAMES = "doc_id, rev, content, gen, trans_id, encrypted"
write_encrypted_lock = threading.Lock()
@@ -700,10 +677,17 @@ class SyncDecrypterPool(SyncEncryptDecryptPool):
Initialize the decrypter pool, and setup a dict for putting the
results of the decrypted docs until they are picked by the insert
routine that gets them in order.
+
+ :param insert_doc_cb: A callback for inserting received documents from
+ target. If not overriden, this will call u1db
+ insert_doc_from_target in synchronizer, which
+ implements the TAKE OTHER semantics.
+ :type insert_doc_cb: function
+ :param last_known_generation: Target's last known generation.
+ :type last_known_generation: int
"""
self._insert_doc_cb = kwargs.pop("insert_doc_cb")
SyncEncryptDecryptPool.__init__(self, *args, **kwargs)
- self.decrypted_docs = {}
self.source_replica_uid = None
def set_source_replica_uid(self, source_replica_uid):
@@ -733,36 +717,16 @@ class SyncDecrypterPool(SyncEncryptDecryptPool):
:type trans_id: str
"""
docstr = json.dumps(content)
- sql_ins = "INSERT INTO '%s' VALUES (?, ?, ?, ?, ?)" % (
+ sql_del = "DELETE FROM '%s' WHERE doc_id=?" % (self.TABLE_NAME,)
+ sql_ins = "INSERT INTO '%s' VALUES (?, ?, ?, ?, ?, ?)" % (
self.TABLE_NAME,)
con = self._sync_db
with self._sync_db_write_lock:
- with con:
- con.execute(sql_ins, (doc_id, doc_rev, docstr, gen, trans_id))
-
- def insert_marker_for_received_doc(self, doc_id, doc_rev, gen):
- """
- Insert a marker with the document id, revision and generation on the
- sync db. This document does not have an encrypted payload, so the
- content has already been inserted into the decrypted_docs dictionary
- from where it can be picked following generation order.
- We need to leave here the marker to be able to calculate the expected
- insertion order for a synchronization batch.
-
- :param doc_id: The Document ID.
- :type doc_id: str
- :param doc_rev: The Document Revision
- :param doc_rev: str
- :param gen: the Document Generation
- :type gen: int
- """
- sql_ins = "INSERT INTO '%s' VALUES (?, ?, ?, ?, ?)" % (
- self.TABLE_NAME,)
- con = self._sync_db
- with self._sync_db_write_lock:
- with con:
- con.execute(sql_ins, (doc_id, doc_rev, '', gen, ''))
+ con.execute(sql_del, (doc_id, ))
+ con.execute(
+ sql_ins,
+ (doc_id, doc_rev, docstr, gen, trans_id, 1))
def insert_received_doc(self, doc_id, doc_rev, content, gen, trans_id):
"""
@@ -781,21 +745,22 @@ class SyncDecrypterPool(SyncEncryptDecryptPool):
:param trans_id: Transaction ID
:type trans_id: str
"""
- # XXX this need a deeper review / testing.
- # I believe that what I'm doing here is prone to problems
- # if the sync is interrupted (ie, client crash) in the worst possible
- # moment. We would need a recover strategy in that case
- # (or, insert the document in the table all the same, but with a flag
- # saying if the document is sym-encrypted or not),
- content = json.dumps(content)
- result = doc_id, doc_rev, content, gen, trans_id
- self.decrypted_docs[gen] = result
- self.insert_marker_for_received_doc(doc_id, doc_rev, gen)
+ if not isinstance(content, str):
+ content = json.dumps(content)
+ sql_del = "DELETE FROM '%s' WHERE doc_id=?" % (
+ self.TABLE_NAME,)
+ sql_ins = "INSERT INTO '%s' VALUES (?, ?, ?, ?, ?, ?)" % (
+ self.TABLE_NAME,)
+ con = self._sync_db
+ with self._sync_db_write_lock:
+ con.execute(sql_del, (doc_id,))
+ con.execute(
+ sql_ins,
+ (doc_id, doc_rev, content, gen, trans_id, 0))
- def delete_encrypted_received_doc(self, doc_id, doc_rev):
+ def delete_received_doc(self, doc_id, doc_rev):
"""
- Delete a encrypted received doc after it was inserted into the local
- db.
+ Delete a received doc after it was inserted into the local db.
:param doc_id: Document ID.
:type doc_id: str
@@ -806,10 +771,10 @@ class SyncDecrypterPool(SyncEncryptDecryptPool):
self.TABLE_NAME,)
con = self._sync_db
with self._sync_db_write_lock:
- with con:
- con.execute(sql_del, (doc_id, doc_rev))
+ con.execute(sql_del, (doc_id, doc_rev))
- def decrypt_doc(self, doc_id, rev, source_replica_uid, workers=True):
+ def decrypt_doc(self, doc_id, rev, content, gen, trans_id,
+ source_replica_uid, workers=True):
"""
Symmetrically decrypt a document.
@@ -817,6 +782,14 @@ class SyncDecrypterPool(SyncEncryptDecryptPool):
:type doc: str
:param rev: The revision of the document.
:type rev: str
+ :param content: The serialized content of the document.
+ :type content: str
+ :param gen: The generation corresponding to the modification of that
+ document.
+ :type gen: int
+ :param trans_id: The transaction id corresponding to the modification
+ of that document.
+ :type trans_id: str
:param source_replica_uid:
:type source_replica_uid: str
@@ -835,35 +808,17 @@ class SyncDecrypterPool(SyncEncryptDecryptPool):
logger.debug("Sync decrypter pool: no insert_doc_cb() yet.")
return
- # XXX move to get_doc function...
- c = self._sync_db.cursor()
- sql = "SELECT * FROM '%s' WHERE doc_id=? AND rev=?" % (
- self.TABLE_NAME,)
- try:
- c.execute(sql, (doc_id, rev))
- res = c.fetchone()
- except Exception as exc:
- logger.warning("Error getting docs from syncdb: %r" % (exc,))
- return
- if res is None:
- logger.debug("Doc %s:%s does not exist in sync db" % (doc_id, rev))
- return
-
soledad_assert(self._crypto is not None, "need a crypto object")
- try:
- doc_id, rev, docstr, gen, trans_id = res
- except ValueError:
- logger.warning("Wrong entry in sync db")
- return
- if len(docstr) == 0:
+ if len(content) == 0:
# not encrypted payload
return
try:
- content = json.loads(docstr)
+ content = json.loads(content)
except TypeError:
- logger.warning("Wrong type while decoding json: %s" % repr(docstr))
+ logger.warning("Wrong type while decoding json: %s"
+ % repr(content))
return
key = self._crypto.doc_passphrase(doc_id)
@@ -889,34 +844,71 @@ class SyncDecrypterPool(SyncEncryptDecryptPool):
def decrypt_doc_cb(self, result):
"""
- Temporarily store the decryption result in a dictionary where it will
- be picked by process_decrypted.
+ Store the decryption result in the sync db from where it will later be
+ picked by process_decrypted.
:param result: A tuple containing the doc id, revision and encrypted
content.
:type result: tuple(str, str, str)
"""
doc_id, rev, content, gen, trans_id = result
- logger.debug("Sync decrypter pool: decrypted doc %s: %s %s" % (doc_id, rev, gen))
- self.decrypted_docs[gen] = result
+ logger.debug("Sync decrypter pool: decrypted doc %s: %s %s %s"
+ % (doc_id, rev, gen, trans_id))
+ self.insert_received_doc(doc_id, rev, content, gen, trans_id)
- def get_docs_by_generation(self):
+ def get_docs_by_generation(self, encrypted=None):
"""
Get all documents in the received table from the sync db,
ordered by generation.
- :return: list of doc_id, rev, generation
- """
- c = self._sync_db.cursor()
- sql = "SELECT doc_id, rev, gen FROM %s ORDER BY gen" % (
- self.TABLE_NAME,)
- c.execute(sql)
- return c.fetchall()
-
- def count_received_encrypted_docs(self):
- """
- Count how many documents we have in the table for received and
- encrypted docs.
+ :param encrypted: If not None, only return documents with encrypted
+ field equal to given parameter.
+ :type encrypted: bool or None
+
+ :return: list of doc_id, rev, generation, gen, trans_id
+ :rtype: list
+ """
+ sql = "SELECT doc_id, rev, content, gen, trans_id, encrypted FROM %s" \
+ % self.TABLE_NAME
+ if encrypted is not None:
+ sql += " WHERE encrypted = %d" % int(encrypted)
+ sql += " ORDER BY gen ASC"
+ docs = self._sync_db.select(sql)
+ return docs
+
+ def get_insertable_docs_by_gen(self):
+ """
+ Return a list of non-encrypted documents ready to be inserted.
+ """
+ # here, we compare the list of all available docs with the list of
+ # decrypted docs and find the longest common prefix between these two
+ # lists. Note that the order of lists fetch matters: if instead we
+ # first fetch the list of decrypted docs and then the list of all
+ # docs, then some document might have been decrypted between these two
+ # calls, and if it is just the right doc then it might not be caught
+ # by the next loop.
+ all_docs = self.get_docs_by_generation()
+ decrypted_docs = self.get_docs_by_generation(encrypted=False)
+ insertable = []
+ for doc_id, rev, _, gen, trans_id, encrypted in all_docs:
+ try:
+ next_doc_id, _, next_content, _, _, _ = decrypted_docs.next()
+ if doc_id == next_doc_id:
+ content = next_content
+ insertable.append((doc_id, rev, content, gen, trans_id))
+ else:
+ break
+ except StopIteration:
+ break
+ return insertable
+
+ def count_docs_in_sync_db(self, encrypted=None):
+ """
+ Count how many documents we have in the table for received docs.
+
+ :param encrypted: If not None, return count of documents with
+ encrypted field equal to given parameter.
+ :type encrypted: bool or None
:return: The count of documents.
:rtype: int
@@ -924,12 +916,13 @@ class SyncDecrypterPool(SyncEncryptDecryptPool):
if self._sync_db is None:
logger.warning("cannot return count with null sync_db")
return
- c = self._sync_db.cursor()
sql = "SELECT COUNT(*) FROM %s" % (self.TABLE_NAME,)
- c.execute(sql)
- res = c.fetchone()
+ if encrypted is not None:
+ sql += " WHERE encrypted = %d" % int(encrypted)
+ res = self._sync_db.select(sql)
if res is not None:
- return res[0]
+ val = res.next()
+ return val[0]
else:
return 0
@@ -938,11 +931,11 @@ class SyncDecrypterPool(SyncEncryptDecryptPool):
Get all the encrypted documents from the sync database and dispatch a
decrypt worker to decrypt each one of them.
"""
- docs_by_generation = self.get_docs_by_generation()
- logger.debug("Sync decrypter pool: There are %d documents to " \
- "decrypt." % len(docs_by_generation))
- for doc_id, rev, gen in filter(None, docs_by_generation):
- self.decrypt_doc(doc_id, rev, self.source_replica_uid)
+ docs_by_generation = self.get_docs_by_generation(encrypted=True)
+ for doc_id, rev, content, gen, trans_id, _ \
+ in filter(None, docs_by_generation):
+ self.decrypt_doc(
+ doc_id, rev, content, gen, trans_id, self.source_replica_uid)
def process_decrypted(self):
"""
@@ -956,15 +949,9 @@ class SyncDecrypterPool(SyncEncryptDecryptPool):
# getting data from the syncing stream, to avoid InvalidGeneration
# problems.
with self.write_encrypted_lock:
- already_decrypted = self.decrypted_docs
- docs = self.get_docs_by_generation()
- docs = filter(lambda entry: len(entry) > 0, docs)
- expected = [gen for doc_id, rev, gen in docs]
- docs_to_insert = get_insertable_docs_by_gen(
- expected, already_decrypted)
- for doc_fields in docs_to_insert:
+ for doc_fields in self.get_insertable_docs_by_gen():
self.insert_decrypted_local_doc(*doc_fields)
- remaining = self.count_received_encrypted_docs()
+ remaining = self.count_docs_in_sync_db()
return remaining == 0
def insert_decrypted_local_doc(self, doc_id, doc_rev, content,
@@ -989,21 +976,27 @@ class SyncDecrypterPool(SyncEncryptDecryptPool):
"""
# could pass source_replica in params for callback chain
insert_fun = self._insert_doc_cb[self.source_replica_uid]
- logger.debug("Sync decrypter pool: inserting doc in local db: " \
+ logger.debug("Sync decrypter pool: inserting doc in local db: "
"%s:%s %s" % (doc_id, doc_rev, gen))
try:
# convert deleted documents to avoid error on document creation
if content == 'null':
content = None
doc = SoledadDocument(doc_id, doc_rev, content)
- insert_fun(doc, int(gen), trans_id)
+ gen = int(gen)
+ insert_fun(doc, gen, trans_id)
except Exception as exc:
logger.error("Sync decrypter pool: error while inserting "
"decrypted doc into local db.")
logger.exception(exc)
else:
- # If no errors found, remove it from the local temporary dict
- # and from the received database.
- self.decrypted_docs.pop(gen)
- self.delete_encrypted_received_doc(doc_id, doc_rev)
+ # If no errors found, remove it from the received database.
+ self.delete_received_doc(doc_id, doc_rev)
+
+ def empty(self):
+ """
+ Empty the received docs table of the sync database.
+ """
+ sql = "DELETE FROM %s WHERE 1" % (self.TABLE_NAME,)
+ res = self._sync_db.execute(sql)
diff --git a/client/src/leap/soledad/client/mp_safe_db.py b/client/src/leap/soledad/client/mp_safe_db.py
new file mode 100644
index 00000000..780b7153
--- /dev/null
+++ b/client/src/leap/soledad/client/mp_safe_db.py
@@ -0,0 +1,112 @@
+# -*- coding: utf-8 -*-
+# mp_safe_db.py
+# Copyright (C) 2014 LEAP
+#
+# This program is free software: you can redistribute it and/or modify
+# it under the terms of the GNU General Public License as published by
+# the Free Software Foundation, either version 3 of the License, or
+# (at your option) any later version.
+#
+# This program is distributed in the hope that it will be useful,
+# but WITHOUT ANY WARRANTY; without even the implied warranty of
+# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
+# GNU General Public License for more details.
+#
+# You should have received a copy of the GNU General Public License
+# along with this program. If not, see <http://www.gnu.org/licenses/>.
+
+
+"""
+Multiprocessing-safe SQLite database.
+"""
+
+
+from threading import Thread
+from Queue import Queue
+from pysqlcipher import dbapi2
+
+
+# Thanks to http://code.activestate.com/recipes/526618/
+
+class MPSafeSQLiteDB(Thread):
+ """
+ A multiprocessing-safe SQLite database accessor.
+ """
+
+ CLOSE = "--close--"
+ NO_MORE = "--no more--"
+
+ def __init__(self, db_path):
+ """
+ Initialize the process
+ """
+ Thread.__init__(self)
+ self._db_path = db_path
+ self._requests = Queue()
+ self.start()
+
+ def run(self):
+ """
+ Run the multiprocessing-safe database accessor.
+ """
+ conn = dbapi2.connect(self._db_path)
+ while True:
+ req, arg, res = self._requests.get()
+ if req == self.CLOSE:
+ break
+ with conn:
+ cursor = conn.cursor()
+ cursor.execute(req, arg)
+ if res:
+ for rec in cursor.fetchall():
+ res.put(rec)
+ res.put(self.NO_MORE)
+ conn.close()
+
+ def execute(self, req, arg=None, res=None):
+ """
+ Execute a request on the database.
+
+ :param req: The request to be executed.
+ :type req: str
+ :param arg: The arguments for the request.
+ :type arg: tuple
+ :param res: A queue to write request results.
+ :type res: multiprocessing.Queue
+ """
+ self._requests.put((req, arg or tuple(), res))
+
+ def select(self, req, arg=None):
+ """
+ Run a select query on the database and yield results.
+
+ :param req: The request to be executed.
+ :type req: str
+ :param arg: The arguments for the request.
+ :type arg: tuple
+ """
+ res = Queue()
+ self.execute(req, arg, res)
+ while True:
+ rec=res.get()
+ if rec == self.NO_MORE:
+ break
+ yield rec
+
+ def close(self):
+ """
+ Close the database connection.
+ """
+ self.execute(self.CLOSE)
+ self.join()
+
+ def cursor(self):
+ """
+ Return a fake cursor object.
+
+ Not really a cursor, but allows for calling db.cursor().execute().
+
+ :return: Self.
+ :rtype: MPSafeSQLiteDatabase
+ """
+ return self
diff --git a/client/src/leap/soledad/client/secrets.py b/client/src/leap/soledad/client/secrets.py
new file mode 100644
index 00000000..b1c22371
--- /dev/null
+++ b/client/src/leap/soledad/client/secrets.py
@@ -0,0 +1,776 @@
+# -*- coding: utf-8 -*-
+# secrets.py
+# Copyright (C) 2014 LEAP
+#
+# This program is free software: you can redistribute it and/or modify
+# it under the terms of the GNU General Public License as published by
+# the Free Software Foundation, either version 3 of the License, or
+# (at your option) any later version.
+#
+# This program is distributed in the hope that it will be useful,
+# but WITHOUT ANY WARRANTY; without even the implied warranty of
+# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
+# GNU General Public License for more details.
+#
+# You should have received a copy of the GNU General Public License
+# along with this program. If not, see <http://www.gnu.org/licenses/>.
+
+
+"""
+Soledad secrets handling.
+"""
+
+
+import os
+import scrypt
+import hmac
+import logging
+import binascii
+import errno
+
+
+from hashlib import sha256
+import simplejson as json
+
+
+from leap.soledad.common import (
+ soledad_assert,
+ soledad_assert_type
+)
+from leap.soledad.common.document import SoledadDocument
+from leap.soledad.common.crypto import (
+ MacMethods,
+ UnknownMacMethod,
+ WrongMac,
+ MAC_KEY,
+ MAC_METHOD_KEY,
+)
+from leap.soledad.common.errors import (
+ InvalidTokenError,
+ NotLockedError,
+ AlreadyLockedError,
+ LockTimedOutError,
+)
+from leap.soledad.client.events import (
+ SOLEDAD_CREATING_KEYS,
+ SOLEDAD_DONE_CREATING_KEYS,
+ SOLEDAD_DOWNLOADING_KEYS,
+ SOLEDAD_DONE_DOWNLOADING_KEYS,
+ SOLEDAD_UPLOADING_KEYS,
+ SOLEDAD_DONE_UPLOADING_KEYS,
+ signal,
+)
+
+
+logger = logging.getLogger(name=__name__)
+
+
+#
+# Exceptions
+#
+
+
+class SecretsException(Exception):
+ """
+ Generic exception type raised by this module.
+ """
+
+
+class NoStorageSecret(SecretsException):
+ """
+ Raised when trying to use a storage secret but none is available.
+ """
+ pass
+
+
+class PassphraseTooShort(SecretsException):
+ """
+ Raised when trying to change the passphrase but the provided passphrase is
+ too short.
+ """
+
+
+class BootstrapSequenceError(SecretsException):
+ """
+ Raised when an attempt to generate a secret and store it in a recovery
+ document on server failed.
+ """
+
+
+#
+# Secrets handler
+#
+
+class SoledadSecrets(object):
+ """
+ Soledad secrets handler.
+
+ The first C{self.REMOTE_STORAGE_SECRET_LENGTH} bytes of the storage
+ secret are used for remote storage encryption. We use the next
+ C{self.LOCAL_STORAGE_SECRET} bytes to derive a key for local storage.
+ From these bytes, the first C{self.SALT_LENGTH} bytes are used as the
+ salt and the rest as the password for the scrypt hashing.
+ """
+
+ LOCAL_STORAGE_SECRET_LENGTH = 512
+ """
+ The length, in bytes, of the secret used to derive a passphrase for the
+ SQLCipher database.
+ """
+
+ REMOTE_STORAGE_SECRET_LENGTH = 512
+ """
+ The length, in bytes, of the secret used to derive an encryption key and a
+ MAC auth key for remote storage.
+ """
+
+ SALT_LENGTH = 64
+ """
+ The length, in bytes, of the salt used to derive the key for the storage
+ secret encryption.
+ """
+
+ GEN_SECRET_LENGTH = LOCAL_STORAGE_SECRET_LENGTH \
+ + REMOTE_STORAGE_SECRET_LENGTH \
+ + SALT_LENGTH # for sync db
+ """
+ The length, in bytes, of the secret to be generated. This includes local
+ and remote secrets, and the salt for deriving the sync db secret.
+ """
+
+ MINIMUM_PASSPHRASE_LENGTH = 6
+ """
+ The minimum length, in bytes, for a passphrase. The passphrase length is
+ only checked when the user changes her passphrase, not when she
+ instantiates Soledad.
+ """
+
+ IV_SEPARATOR = ":"
+ """
+ A separator used for storing the encryption initial value prepended to the
+ ciphertext.
+ """
+
+ UUID_KEY = 'uuid'
+ STORAGE_SECRETS_KEY = 'storage_secrets'
+ SECRET_KEY = 'secret'
+ CIPHER_KEY = 'cipher'
+ LENGTH_KEY = 'length'
+ KDF_KEY = 'kdf'
+ KDF_SALT_KEY = 'kdf_salt'
+ KDF_LENGTH_KEY = 'kdf_length'
+ KDF_SCRYPT = 'scrypt'
+ CIPHER_AES256 = 'aes256'
+ """
+ Keys used to access storage secrets in recovery documents.
+ """
+
+ def __init__(self, uuid, passphrase, secrets_path, shared_db, crypto,
+ secret_id=None):
+ """
+ Initialize the secrets manager.
+
+ :param uuid: User's unique id.
+ :type uuid: str
+ :param passphrase: The passphrase for locking and unlocking encryption
+ secrets for local and remote storage.
+ :type passphrase: unicode
+ :param secrets_path: Path for storing encrypted key used for
+ symmetric encryption.
+ :type secrets_path: str
+ :param shared_db: The shared database that stores user secrets.
+ :type shared_db: leap.soledad.client.shared_db.SoledadSharedDatabase
+ :param crypto: A soledad crypto object.
+ :type crypto: SoledadCrypto
+ :param secret_id: The id of the storage secret to be used.
+ :type secret_id: str
+ """
+ self._uuid = uuid
+ self._passphrase = passphrase
+ self._secrets_path = secrets_path
+ self._shared_db = shared_db
+ self._crypto = crypto
+ self._secret_id = secret_id
+ self._secrets = {}
+
+ def bootstrap(self):
+ """
+ Bootstrap secrets.
+
+ Soledad secrets bootstrap is the following sequence of stages:
+
+ * stage 1 - local secret loading:
+ - if secrets exist locally, load them.
+ * stage 2 - remote secret loading:
+ - else, if secrets exist in server, download them.
+ * stage 3 - secret generation:
+ - else, generate a new secret and store in server.
+
+ This method decides which bootstrap stages have already been performed
+ and performs the missing ones in order.
+
+ :raise BootstrapSequenceError: Raised when the secret generation and
+ storage on server sequence has failed for some reason.
+ """
+ # STAGE 1 - verify if secrets exist locally
+ if not self._has_secret(): # try to load from local storage.
+
+ # STAGE 2 - there are no secrets in local storage, so try to fetch
+ # encrypted secrets from server.
+ logger.info(
+ 'Trying to fetch cryptographic secrets from shared recovery '
+ 'database...')
+
+ # --- start of atomic operation in shared db ---
+
+ # obtain lock on shared db
+ token = timeout = None
+ try:
+ token, timeout = self._shared_db.lock()
+ except AlreadyLockedError:
+ raise BootstrapSequenceError('Database is already locked.')
+ except LockTimedOutError:
+ raise BootstrapSequenceError('Lock operation timed out.')
+
+ self._get_or_gen_crypto_secrets()
+
+ # release the lock on shared db
+ try:
+ self._shared_db.unlock(token)
+ self._shared_db.close()
+ except NotLockedError:
+ # for some reason the lock expired. Despite that, secret
+ # loading or generation/storage must have been executed
+ # successfully, so we pass.
+ pass
+ except InvalidTokenError:
+ # here, our lock has not only expired but also some other
+ # client application has obtained a new lock and is currently
+ # doing its thing in the shared database. Using the same
+ # reasoning as above, we assume everything went smooth and
+ # pass.
+ pass
+ except Exception as e:
+ logger.error("Unhandled exception when unlocking shared "
+ "database.")
+ logger.exception(e)
+
+ # --- end of atomic operation in shared db ---
+
+ def _has_secret(self):
+ """
+ Return whether there is a storage secret available for use or not.
+
+ :return: Whether there's a storage secret for symmetric encryption.
+ :rtype: bool
+ """
+ if self._secret_id is None or self._secret_id not in self._secrets:
+ try:
+ self._load_secrets() # try to load from disk
+ except IOError as e:
+ logger.warning('IOError while loading secrets from disk: %s' % str(e))
+ return False
+ return self.storage_secret is not None
+
+ def _load_secrets(self):
+ """
+ Load storage secrets from local file.
+ """
+ # does the file exist in disk?
+ if not os.path.isfile(self._secrets_path):
+ raise IOError('File does not exist: %s' % self._secrets_path)
+ # read storage secrets from file
+ content = None
+ with open(self._secrets_path, 'r') as f:
+ content = json.loads(f.read())
+ _, mac = self._import_recovery_document(content)
+ # choose first secret if no secret_id was given
+ if self._secret_id is None:
+ self.set_secret_id(self._secrets.items()[0][0])
+ # enlarge secret if needed
+ enlarged = False
+ if len(self._secrets[self._secret_id]) < self.GEN_SECRET_LENGTH:
+ gen_len = self.GEN_SECRET_LENGTH \
+ - len(self._secrets[self._secret_id])
+ new_piece = os.urandom(gen_len)
+ self._secrets[self._secret_id] += new_piece
+ enlarged = True
+ # store and save in shared db if needed
+ if not mac or enlarged:
+ self._store_secrets()
+ self._put_secrets_in_shared_db()
+
+ def _get_or_gen_crypto_secrets(self):
+ """
+ Retrieves or generates the crypto secrets.
+
+ :raises BootstrapSequenceError: Raised when unable to store secrets in
+ shared database.
+ """
+ doc = self._get_secrets_from_shared_db()
+
+ if doc:
+ logger.info(
+ 'Found cryptographic secrets in shared recovery '
+ 'database.')
+ _, mac = self._import_recovery_document(doc.content)
+ if mac is False:
+ self.put_secrets_in_shared_db()
+ self._store_secrets() # save new secrets in local file
+ if self._secret_id is None:
+ self.set_secret_id(self._secrets.items()[0][0])
+ else:
+ # STAGE 3 - there are no secrets in server also, so
+ # generate a secret and store it in remote db.
+ logger.info(
+ 'No cryptographic secrets found, creating new '
+ ' secrets...')
+ self.set_secret_id(self._gen_secret())
+ try:
+ self._put_secrets_in_shared_db()
+ except Exception as ex:
+ # storing generated secret in shared db failed for
+ # some reason, so we erase the generated secret and
+ # raise.
+ try:
+ os.unlink(self._secrets_path)
+ except OSError as e:
+ if e.errno != errno.ENOENT: # no such file or directory
+ logger.exception(e)
+ logger.exception(ex)
+ raise BootstrapSequenceError(
+ 'Could not store generated secret in the shared '
+ 'database, bailing out...')
+
+ #
+ # Shared DB related methods
+ #
+
+ def _shared_db_doc_id(self):
+ """
+ Calculate the doc_id of the document in the shared db that stores key
+ material.
+
+ :return: the hash
+ :rtype: str
+ """
+ return sha256(
+ '%s%s' %
+ (self._passphrase_as_string(), self._uuid)).hexdigest()
+
+ def _export_recovery_document(self):
+ """
+ Export the storage secrets.
+
+ A recovery document has the following structure:
+
+ {
+ 'storage_secrets': {
+ '<storage_secret id>': {
+ 'kdf': 'scrypt',
+ 'kdf_salt': '<b64 repr of salt>'
+ 'kdf_length': <key length>
+ 'cipher': 'aes256',
+ 'length': <secret length>,
+ 'secret': '<encrypted storage_secret>',
+ },
+ },
+ 'kdf': 'scrypt',
+ 'kdf_salt': '<b64 repr of salt>',
+ 'kdf_length: <key length>,
+ '_mac_method': 'hmac',
+ '_mac': '<mac>'
+ }
+
+ Note that multiple storage secrets might be stored in one recovery
+ document. This method will also calculate a MAC of a string
+ representation of the secrets dictionary.
+
+ :return: The recovery document.
+ :rtype: dict
+ """
+ # create salt and key for calculating MAC
+ salt = os.urandom(self.SALT_LENGTH)
+ key = scrypt.hash(self._passphrase_as_string(), salt, buflen=32)
+ # encrypt secrets
+ encrypted_secrets = {}
+ for secret_id in self._secrets:
+ encrypted_secrets[secret_id] = self._encrypt_storage_secret(
+ self._secrets[secret_id])
+ # create the recovery document
+ data = {
+ self.STORAGE_SECRETS_KEY: encrypted_secrets,
+ self.KDF_KEY: self.KDF_SCRYPT,
+ self.KDF_SALT_KEY: binascii.b2a_base64(salt),
+ self.KDF_LENGTH_KEY: len(key),
+ MAC_METHOD_KEY: MacMethods.HMAC,
+ MAC_KEY: hmac.new(
+ key,
+ json.dumps(encrypted_secrets),
+ sha256).hexdigest(),
+ }
+ return data
+
+ def _import_recovery_document(self, data):
+ """
+ Import storage secrets for symmetric encryption and uuid (if present)
+ from a recovery document.
+
+ Note that this method does not store the imported data on disk. For
+ that, use C{self._store_secrets()}.
+
+ :param data: The recovery document.
+ :type data: dict
+
+ :return: A tuple containing the number of imported secrets and whether
+ there was MAC informationa available for authenticating.
+ :rtype: (int, bool)
+ """
+ soledad_assert(self.STORAGE_SECRETS_KEY in data)
+ # check mac of the recovery document
+ mac = None
+ if MAC_KEY in data:
+ soledad_assert(data[MAC_KEY] is not None)
+ soledad_assert(MAC_METHOD_KEY in data)
+ soledad_assert(self.KDF_KEY in data)
+ soledad_assert(self.KDF_SALT_KEY in data)
+ soledad_assert(self.KDF_LENGTH_KEY in data)
+ if data[MAC_METHOD_KEY] == MacMethods.HMAC:
+ key = scrypt.hash(
+ self._passphrase_as_string(),
+ binascii.a2b_base64(data[self.KDF_SALT_KEY]),
+ buflen=32)
+ mac = hmac.new(
+ key,
+ json.dumps(data[self.STORAGE_SECRETS_KEY]),
+ sha256).hexdigest()
+ else:
+ raise UnknownMacMethod('Unknown MAC method: %s.' %
+ data[MAC_METHOD_KEY])
+ if mac != data[MAC_KEY]:
+ raise WrongMac('Could not authenticate recovery document\'s '
+ 'contents.')
+ # include secrets in the secret pool.
+ secret_count = 0
+ for secret_id, encrypted_secret in data[self.STORAGE_SECRETS_KEY].items():
+ if secret_id not in self._secrets:
+ try:
+ self._secrets[secret_id] = \
+ self._decrypt_storage_secret(encrypted_secret)
+ secret_count += 1
+ except SecretsException as e:
+ logger.error("Failed to decrypt storage secret: %s"
+ % str(e))
+ return secret_count, mac
+
+ def _get_secrets_from_shared_db(self):
+ """
+ Retrieve the document with encrypted key material from the shared
+ database.
+
+ :return: a document with encrypted key material in its contents
+ :rtype: SoledadDocument
+ """
+ signal(SOLEDAD_DOWNLOADING_KEYS, self._uuid)
+ db = self._shared_db
+ if not db:
+ logger.warning('No shared db found')
+ return
+ doc = db.get_doc(self._shared_db_doc_id())
+ signal(SOLEDAD_DONE_DOWNLOADING_KEYS, self._uuid)
+ return doc
+
+ def _put_secrets_in_shared_db(self):
+ """
+ Assert local keys are the same as shared db's ones.
+
+ Try to fetch keys from shared recovery database. If they already exist
+ in the remote db, assert that that data is the same as local data.
+ Otherwise, upload keys to shared recovery database.
+ """
+ soledad_assert(
+ self._has_secret(),
+ 'Tried to send keys to server but they don\'t exist in local '
+ 'storage.')
+ # try to get secrets doc from server, otherwise create it
+ doc = self._get_secrets_from_shared_db()
+ if doc is None:
+ doc = SoledadDocument(
+ doc_id=self._shared_db_doc_id())
+ # fill doc with encrypted secrets
+ doc.content = self._export_recovery_document()
+ # upload secrets to server
+ signal(SOLEDAD_UPLOADING_KEYS, self._uuid)
+ db = self._shared_db
+ if not db:
+ logger.warning('No shared db found')
+ return
+ db.put_doc(doc)
+ signal(SOLEDAD_DONE_UPLOADING_KEYS, self._uuid)
+
+ #
+ # Management of secret for symmetric encryption.
+ #
+
+ def _decrypt_storage_secret(self, encrypted_secret_dict):
+ """
+ Decrypt the storage secret.
+
+ Storage secret is encrypted before being stored. This method decrypts
+ and returns the decrypted storage secret.
+
+ :param encrypted_secret_dict: The encrypted storage secret.
+ :type encrypted_secret_dict: dict
+
+ :return: The decrypted storage secret.
+ :rtype: str
+
+ :raise SecretsException: Raised in case the decryption of the storage
+ secret fails for some reason.
+ """
+ # calculate the encryption key
+ if encrypted_secret_dict[self.KDF_KEY] != self.KDF_SCRYPT:
+ raise SecretsException("Unknown KDF in stored secret.")
+ key = scrypt.hash(
+ self._passphrase_as_string(),
+ # the salt is stored base64 encoded
+ binascii.a2b_base64(
+ encrypted_secret_dict[self.KDF_SALT_KEY]),
+ buflen=32, # we need a key with 256 bits (32 bytes).
+ )
+ if encrypted_secret_dict[self.KDF_LENGTH_KEY] != len(key):
+ raise SecretsException("Wrong length of decryption key.")
+ if encrypted_secret_dict[self.CIPHER_KEY] != self.CIPHER_AES256:
+ raise SecretsException("Unknown cipher in stored secret.")
+ # recover the initial value and ciphertext
+ iv, ciphertext = encrypted_secret_dict[self.SECRET_KEY].split(
+ self.IV_SEPARATOR, 1)
+ ciphertext = binascii.a2b_base64(ciphertext)
+ decrypted_secret = self._crypto.decrypt_sym(ciphertext, key, iv=iv)
+ if encrypted_secret_dict[self.LENGTH_KEY] != len(decrypted_secret):
+ raise SecretsException("Wrong length of decrypted secret.")
+ return decrypted_secret
+
+ def _encrypt_storage_secret(self, decrypted_secret):
+ """
+ Encrypt the storage secret.
+
+ An encrypted secret has the following structure:
+
+ {
+ '<secret_id>': {
+ 'kdf': 'scrypt',
+ 'kdf_salt': '<b64 repr of salt>'
+ 'kdf_length': <key length>
+ 'cipher': 'aes256',
+ 'length': <secret length>,
+ 'secret': '<encrypted b64 repr of storage_secret>',
+ }
+ }
+
+ :param decrypted_secret: The decrypted storage secret.
+ :type decrypted_secret: str
+
+ :return: The encrypted storage secret.
+ :rtype: dict
+ """
+ # generate random salt
+ salt = os.urandom(self.SALT_LENGTH)
+ # get a 256-bit key
+ key = scrypt.hash(self._passphrase_as_string(), salt, buflen=32)
+ iv, ciphertext = self._crypto.encrypt_sym(decrypted_secret, key)
+ encrypted_secret_dict = {
+ # leap.soledad.crypto submodule uses AES256 for symmetric
+ # encryption.
+ self.KDF_KEY: self.KDF_SCRYPT,
+ self.KDF_SALT_KEY: binascii.b2a_base64(salt),
+ self.KDF_LENGTH_KEY: len(key),
+ self.CIPHER_KEY: self.CIPHER_AES256,
+ self.LENGTH_KEY: len(decrypted_secret),
+ self.SECRET_KEY: '%s%s%s' % (
+ str(iv), self.IV_SEPARATOR, binascii.b2a_base64(ciphertext)),
+ }
+ return encrypted_secret_dict
+
+ @property
+ def storage_secret(self):
+ """
+ Return the storage secret.
+
+ :return: The decrypted storage secret.
+ :rtype: str
+ """
+ return self._secrets.get(self._secret_id)
+
+ def set_secret_id(self, secret_id):
+ """
+ Define the id of the storage secret to be used.
+
+ This method will also replace the secret in the crypto object.
+
+ :param secret_id: The id of the storage secret to be used.
+ :type secret_id: str
+ """
+ self._secret_id = secret_id
+
+ def _gen_secret(self):
+ """
+ Generate a secret for symmetric encryption and store in a local
+ encrypted file.
+
+ This method emits the following signals:
+
+ * SOLEDAD_CREATING_KEYS
+ * SOLEDAD_DONE_CREATING_KEYS
+
+ :return: The id of the generated secret.
+ :rtype: str
+ """
+ signal(SOLEDAD_CREATING_KEYS, self._uuid)
+ # generate random secret
+ secret = os.urandom(self.GEN_SECRET_LENGTH)
+ secret_id = sha256(secret).hexdigest()
+ self._secrets[secret_id] = secret
+ self._store_secrets()
+ signal(SOLEDAD_DONE_CREATING_KEYS, self._uuid)
+ return secret_id
+
+ def _store_secrets(self):
+ """
+ Store secrets in C{Soledad.STORAGE_SECRETS_FILE_PATH}.
+ """
+ with open(self._secrets_path, 'w') as f:
+ f.write(
+ json.dumps(
+ self._export_recovery_document()))
+
+ def change_passphrase(self, new_passphrase):
+ """
+ Change the passphrase that encrypts the storage secret.
+
+ :param new_passphrase: The new passphrase.
+ :type new_passphrase: unicode
+
+ :raise NoStorageSecret: Raised if there's no storage secret available.
+ """
+ # TODO: maybe we want to add more checks to guarantee passphrase is
+ # reasonable?
+ soledad_assert_type(new_passphrase, unicode)
+ if len(new_passphrase) < self.MINIMUM_PASSPHRASE_LENGTH:
+ raise PassphraseTooShort(
+ 'Passphrase must be at least %d characters long!' %
+ self.MINIMUM_PASSPHRASE_LENGTH)
+ # ensure there's a secret for which the passphrase will be changed.
+ if not self._has_secret():
+ raise NoStorageSecret()
+ self._passphrase = new_passphrase
+ self._store_secrets()
+ self._put_secrets_in_shared_db()
+
+ #
+ # Setters and getters
+ #
+
+ @property
+ def secret_id(self):
+ return self._secret_id
+
+ def _get_secrets_path(self):
+ return self._secrets_path
+
+ def _set_secrets_path(self, secrets_path):
+ self._secrets_path = secrets_path
+
+ secrets_path = property(
+ _get_secrets_path,
+ _set_secrets_path,
+ doc='The path for the file containing the encrypted symmetric secret.')
+
+ @property
+ def passphrase(self):
+ """
+ Return the passphrase for locking and unlocking encryption secrets for
+ local and remote storage.
+ """
+ return self._passphrase
+
+ def _passphrase_as_string(self):
+ return self._passphrase.encode('utf-8')
+
+ #
+ # remote storage secret
+ #
+
+ @property
+ def remote_storage_secret(self):
+ """
+ Return the secret for remote storage.
+ """
+ key_start = 0
+ key_end = self.REMOTE_STORAGE_SECRET_LENGTH
+ return self.storage_secret[key_start:key_end]
+
+ #
+ # local storage key
+ #
+
+ def _get_local_storage_secret(self):
+ """
+ Return the local storage secret.
+
+ :return: The local storage secret.
+ :rtype: str
+ """
+ pwd_start = self.REMOTE_STORAGE_SECRET_LENGTH + self.SALT_LENGTH
+ pwd_end = self.REMOTE_STORAGE_SECRET_LENGTH + self.LOCAL_STORAGE_SECRET_LENGTH
+ return self.storage_secret[pwd_start:pwd_end]
+
+ def _get_local_storage_salt(self):
+ """
+ Return the local storage salt.
+
+ :return: The local storage salt.
+ :rtype: str
+ """
+ salt_start = self.REMOTE_STORAGE_SECRET_LENGTH
+ salt_end = salt_start + self.SALT_LENGTH
+ return self.storage_secret[salt_start:salt_end]
+
+ def get_local_storage_key(self):
+ """
+ Return the local storage key derived from the local storage secret.
+
+ :return: The key for protecting the local database.
+ :rtype: str
+ """
+ return scrypt.hash(
+ password=self._get_local_storage_secret(),
+ salt=self._get_local_storage_salt(),
+ buflen=32, # we need a key with 256 bits (32 bytes)
+ )
+
+ #
+ # sync db key
+ #
+
+ def _get_sync_db_salt(self):
+ """
+ Return the salt for sync db.
+ """
+ salt_start = self.LOCAL_STORAGE_SECRET_LENGTH \
+ + self.REMOTE_STORAGE_SECRET_LENGTH
+ salt_end = salt_start + self.SALT_LENGTH
+ return self.storage_secret[salt_start:salt_end]
+
+ def get_sync_db_key(self):
+ """
+ Return the key for protecting the sync database.
+
+ :return: The key for protecting the sync database.
+ :rtype: str
+ """
+ return scrypt.hash(
+ password=self._get_local_storage_secret(),
+ salt=self._get_sync_db_salt(),
+ buflen=32, # we need a key with 256 bits (32 bytes)
+ )
diff --git a/client/src/leap/soledad/client/sqlcipher.py b/client/src/leap/soledad/client/sqlcipher.py
index 2df9606e..b7de2fba 100644
--- a/client/src/leap/soledad/client/sqlcipher.py
+++ b/client/src/leap/soledad/client/sqlcipher.py
@@ -44,7 +44,6 @@ handled by Soledad should be created by SQLCipher >= 2.0.
import logging
import multiprocessing
import os
-import sqlite3
import string
import threading
import time
@@ -63,6 +62,8 @@ from leap.soledad.client.crypto import SyncEncrypterPool, SyncDecrypterPool
from leap.soledad.client.target import SoledadSyncTarget
from leap.soledad.client.target import PendingReceivedDocsSyncError
from leap.soledad.client.sync import SoledadSynchronizer
+from leap.soledad.client.mp_safe_db import MPSafeSQLiteDB
+from leap.soledad.common import soledad_assert
from leap.soledad.common.document import SoledadDocument
@@ -91,8 +92,17 @@ SQLITE_ISOLATION_LEVEL = None
def open(path, password, create=True, document_factory=None, crypto=None,
raw_key=False, cipher='aes-256-cbc', kdf_iter=4000,
- cipher_page_size=1024, defer_encryption=False):
- """Open a database at the given location.
+ cipher_page_size=1024, defer_encryption=False, sync_db_key=None):
+ """
+ Open a database at the given location.
+
+ *** IMPORTANT ***
+
+ Don't forget to close the database after use by calling the close()
+ method otherwise some resources might not be freed and you may experience
+ several kinds of leakages.
+
+ *** IMPORTANT ***
Will raise u1db.errors.DatabaseDoesNotExist if create=False and the
database does not already exist.
@@ -127,7 +137,8 @@ def open(path, password, create=True, document_factory=None, crypto=None,
return SQLCipherDatabase.open_database(
path, password, create=create, document_factory=document_factory,
crypto=crypto, raw_key=raw_key, cipher=cipher, kdf_iter=kdf_iter,
- cipher_page_size=cipher_page_size, defer_encryption=defer_encryption)
+ cipher_page_size=cipher_page_size, defer_encryption=defer_encryption,
+ sync_db_key=sync_db_key)
#
@@ -190,11 +201,19 @@ class SQLCipherDatabase(sqlite_backend.SQLitePartialExpandDatabase):
def __init__(self, sqlcipher_file, password, document_factory=None,
crypto=None, raw_key=False, cipher='aes-256-cbc',
- kdf_iter=4000, cipher_page_size=1024):
+ kdf_iter=4000, cipher_page_size=1024, sync_db_key=None):
"""
Connect to an existing SQLCipher database, creating a new sqlcipher
database file if needed.
+ *** IMPORTANT ***
+
+ Don't forget to close the database after use by calling the close()
+ method otherwise some resources might not be freed and you may
+ experience several kinds of leakages.
+
+ *** IMPORTANT ***
+
:param sqlcipher_file: The path for the SQLCipher file.
:type sqlcipher_file: str
:param password: The password that protects the SQLCipher db.
@@ -243,19 +262,17 @@ class SQLCipherDatabase(sqlite_backend.SQLitePartialExpandDatabase):
self._ensure_schema()
self._crypto = crypto
+ # define sync-db attrs
+ self._sqlcipher_file = sqlcipher_file
+ self._sync_db_key = sync_db_key
self._sync_db = None
self._sync_db_write_lock = None
self._sync_enc_pool = None
+ self.sync_queue = None
if self.defer_encryption:
- if sqlcipher_file != ":memory:":
- self._sync_db_path = "%s-sync" % sqlcipher_file
- else:
- self._sync_db_path = ":memory:"
-
# initialize sync db
self._init_sync_db()
-
# initialize syncing queue encryption pool
self._sync_enc_pool = SyncEncrypterPool(
self._crypto, self._sync_db, self._sync_db_write_lock)
@@ -281,7 +298,7 @@ class SQLCipherDatabase(sqlite_backend.SQLitePartialExpandDatabase):
def _open_database(cls, sqlcipher_file, password, document_factory=None,
crypto=None, raw_key=False, cipher='aes-256-cbc',
kdf_iter=4000, cipher_page_size=1024,
- defer_encryption=False):
+ defer_encryption=False, sync_db_key=None):
"""
Open a SQLCipher database.
@@ -351,16 +368,25 @@ class SQLCipherDatabase(sqlite_backend.SQLitePartialExpandDatabase):
return SQLCipherDatabase._sqlite_registry[v](
sqlcipher_file, password, document_factory=document_factory,
crypto=crypto, raw_key=raw_key, cipher=cipher, kdf_iter=kdf_iter,
- cipher_page_size=cipher_page_size)
+ cipher_page_size=cipher_page_size, sync_db_key=sync_db_key)
@classmethod
def open_database(cls, sqlcipher_file, password, create, backend_cls=None,
document_factory=None, crypto=None, raw_key=False,
cipher='aes-256-cbc', kdf_iter=4000,
- cipher_page_size=1024, defer_encryption=False):
+ cipher_page_size=1024, defer_encryption=False,
+ sync_db_key=None):
"""
Open a SQLCipher database.
+ *** IMPORTANT ***
+
+ Don't forget to close the database after use by calling the close()
+ method otherwise some resources might not be freed and you may
+ experience several kinds of leakages.
+
+ *** IMPORTANT ***
+
:param sqlcipher_file: The path for the SQLCipher file.
:type sqlcipher_file: str
@@ -409,7 +435,7 @@ class SQLCipherDatabase(sqlite_backend.SQLitePartialExpandDatabase):
sqlcipher_file, password, document_factory=document_factory,
crypto=crypto, raw_key=raw_key, cipher=cipher,
kdf_iter=kdf_iter, cipher_page_size=cipher_page_size,
- defer_encryption=defer_encryption)
+ defer_encryption=defer_encryption, sync_db_key=sync_db_key)
except u1db_errors.DatabaseDoesNotExist:
if not create:
raise
@@ -420,7 +446,8 @@ class SQLCipherDatabase(sqlite_backend.SQLitePartialExpandDatabase):
return backend_cls(
sqlcipher_file, password, document_factory=document_factory,
crypto=crypto, raw_key=raw_key, cipher=cipher,
- kdf_iter=kdf_iter, cipher_page_size=cipher_page_size)
+ kdf_iter=kdf_iter, cipher_page_size=cipher_page_size,
+ sync_db_key=sync_db_key)
def sync(self, url, creds=None, autocreate=True, defer_decryption=True):
"""
@@ -448,8 +475,9 @@ class SQLCipherDatabase(sqlite_backend.SQLitePartialExpandDatabase):
res = None
# the following context manager blocks until the syncing lock can be
# acquired.
+ if defer_decryption:
+ self._init_sync_db()
with self.syncer(url, creds=creds) as syncer:
-
# XXX could mark the critical section here...
try:
res = syncer.sync(autocreate=autocreate,
@@ -547,12 +575,22 @@ class SQLCipherDatabase(sqlite_backend.SQLitePartialExpandDatabase):
Initialize the Symmetrically-Encrypted document to be synced database,
and the queue to communicate with subprocess workers.
"""
- self._sync_db = sqlite3.connect(self._sync_db_path,
- check_same_thread=False)
-
- self._sync_db_write_lock = threading.Lock()
- self._create_sync_db_tables()
- self.sync_queue = multiprocessing.Queue()
+ if self._sync_db is None:
+ soledad_assert(self._sync_db_key is not None)
+ sync_db_path = None
+ if self._sqlcipher_file != ":memory:":
+ sync_db_path = "%s-sync" % self._sqlcipher_file
+ else:
+ sync_db_path = ":memory:"
+ self._sync_db = MPSafeSQLiteDB(sync_db_path)
+ # protect the sync db with a password
+ if self._sync_db_key is not None:
+ self._set_crypto_pragmas(
+ self._sync_db, self._sync_db_key, False,
+ 'aes-256-cbc', 4000, 1024)
+ self._sync_db_write_lock = threading.Lock()
+ self._create_sync_db_tables()
+ self.sync_queue = multiprocessing.Queue()
def _create_sync_db_tables(self):
"""
@@ -566,9 +604,8 @@ class SQLCipherDatabase(sqlite_backend.SQLitePartialExpandDatabase):
decr.TABLE_NAME, decr.FIELD_NAMES))
with self._sync_db_write_lock:
- with self._sync_db:
- self._sync_db.execute(sql_encr)
- self._sync_db.execute(sql_decr)
+ self._sync_db.execute(sql_encr)
+ self._sync_db.execute(sql_decr)
#
# Symmetric encryption of syncing docs
@@ -1074,17 +1111,45 @@ class SQLCipherDatabase(sqlite_backend.SQLitePartialExpandDatabase):
"""
Close db_handle and close syncer.
"""
- logger.debug("Sqlcipher backend: closing")
+ if logger is not None: # logger might be none if called from __del__
+ logger.debug("Sqlcipher backend: closing")
+ # stop the sync watcher for deferred encryption
if self._sync_watcher is not None:
self._sync_watcher.stop()
self._sync_watcher.shutdown()
+ self._sync_watcher = None
+ # close all open syncers
for url in self._syncers:
_, syncer = self._syncers[url]
syncer.close()
+ self._syncers = []
+ # stop the encryption pool
if self._sync_enc_pool is not None:
self._sync_enc_pool.close()
+ self._sync_enc_pool = None
+ # close the actual database
if self._db_handle is not None:
self._db_handle.close()
+ self._db_handle = None
+ # close the sync database
+ if self._sync_db is not None:
+ self._sync_db.close()
+ self._sync_db = None
+ # close the sync queue
+ if self.sync_queue is not None:
+ self.sync_queue.close()
+ del self.sync_queue
+ self.sync_queue = None
+
+ def __del__(self):
+ """
+ Free resources when deleting or garbage collecting the database.
+
+ This is only here to minimze problems if someone ever forgets to call
+ the close() method after using the database; you should not rely on
+ garbage collecting to free up the database resources.
+ """
+ self.close()
@property
def replica_uid(self):
diff --git a/client/src/leap/soledad/client/sync.py b/client/src/leap/soledad/client/sync.py
index 5d545a77..c158f2a7 100644
--- a/client/src/leap/soledad/client/sync.py
+++ b/client/src/leap/soledad/client/sync.py
@@ -29,8 +29,6 @@ Extend u1db Synchronizer with the ability to:
"""
-import json
-
import logging
import traceback
from threading import Lock
diff --git a/client/src/leap/soledad/client/target.py b/client/src/leap/soledad/client/target.py
index 70e4d3a2..ae2010a6 100644
--- a/client/src/leap/soledad/client/target.py
+++ b/client/src/leap/soledad/client/target.py
@@ -28,12 +28,10 @@ import logging
import re
import urllib
import threading
-import urlparse
from collections import defaultdict
from time import sleep
from uuid import uuid4
-from contextlib import contextmanager
import simplejson as json
from taskthread import TimerTask
@@ -44,7 +42,6 @@ from u1db.remote.http_client import _encode_query_parameter, HTTPClientBase
from zope.proxy import ProxyBase
from zope.proxy import sameProxiedObjects, setProxiedObject
-from leap.soledad.common import soledad_assert
from leap.soledad.common.document import SoledadDocument
from leap.soledad.client.auth import TokenBasedAuth
from leap.soledad.client.crypto import is_symmetrically_encrypted
@@ -87,7 +84,7 @@ class DocumentSyncerThread(threading.Thread):
"""
def __init__(self, doc_syncer, release_method, failed_method,
- idx, total, last_request_lock=None, last_callback_lock=None):
+ idx, total, last_request_lock=None, last_callback_lock=None):
"""
Initialize a new syncer thread.
@@ -246,7 +243,7 @@ class DocumentSyncerPool(object):
"""
def __init__(self, raw_url, raw_creds, query_string, headers,
- ensure_callback, stop_method):
+ ensure_callback, stop_method):
"""
Initialize the document syncer pool.
@@ -279,7 +276,7 @@ class DocumentSyncerPool(object):
self._threads = []
def new_syncer_thread(self, idx, total, last_request_lock=None,
- last_callback_lock=None):
+ last_callback_lock=None):
"""
Yield a new document syncer thread.
@@ -376,6 +373,12 @@ class DocumentSyncerPool(object):
t.request_lock.release()
t.callback_lock.acquire(False) # just in case
t.callback_lock.release()
+ # release any blocking semaphores
+ for i in xrange(DocumentSyncerPool.POOL_SIZE):
+ try:
+ self._semaphore_pool.release()
+ except ValueError:
+ break
logger.warning("Soledad sync: cancelled sync threads.")
def cleanup(self):
@@ -613,7 +616,7 @@ class HTTPDocumentSyncer(HTTPClientBase, TokenBasedAuth):
self._conn.endheaders()
def _get_doc(self, received, sync_id, last_known_generation,
- last_known_trans_id):
+ last_known_trans_id):
"""
Get a sync document from server by means of a POST request.
@@ -652,7 +655,7 @@ class HTTPDocumentSyncer(HTTPClientBase, TokenBasedAuth):
return self._response()
def _put_doc(self, sync_id, last_known_generation, last_known_trans_id,
- id, rev, content, gen, trans_id, number_of_docs, doc_idx):
+ id, rev, content, gen, trans_id, number_of_docs, doc_idx):
"""
Put a sync document on server by means of a POST request.
@@ -759,7 +762,7 @@ class SoledadSyncTarget(HTTPSyncTarget, TokenBasedAuth):
#
def __init__(self, url, source_replica_uid=None, creds=None, crypto=None,
- sync_db=None, sync_db_write_lock=None):
+ sync_db=None, sync_db_write_lock=None):
"""
Initialize the SoledadSyncTarget.
@@ -916,7 +919,7 @@ class SoledadSyncTarget(HTTPSyncTarget, TokenBasedAuth):
"""
new_generation, new_transaction_id, number_of_changes, doc_id, \
rev, content, gen, trans_id = \
- self._parse_received_doc_response(response)
+ self._parse_received_doc_response(response)
if doc_id is not None:
# decrypt incoming document and insert into local database
# -------------------------------------------------------------
@@ -1125,11 +1128,14 @@ class SoledadSyncTarget(HTTPSyncTarget, TokenBasedAuth):
"""
self._ensure_callback = ensure_callback
- if defer_decryption:
+ if defer_decryption and self._sync_db is not None:
self._sync_exchange_lock.acquire()
self._setup_sync_decr_pool()
self._setup_sync_watcher()
self._defer_decryption = True
+ else:
+ # fall back
+ defer_decryption = False
self.start()
@@ -1140,8 +1146,9 @@ class SoledadSyncTarget(HTTPSyncTarget, TokenBasedAuth):
setProxiedObject(self._insert_doc_cb[source_replica_uid],
return_doc_cb)
- if not self.clear_to_sync():
- raise PendingReceivedDocsSyncError
+ # empty the database before starting a new sync
+ if defer_decryption is True and not self.clear_to_sync():
+ self._sync_decr_pool.empty()
self._ensure_connection()
if self._trace_hook: # for tests
@@ -1162,7 +1169,6 @@ class SoledadSyncTarget(HTTPSyncTarget, TokenBasedAuth):
self._raw_url, self._raw_creds, url, headers, ensure_callback,
self.stop)
threads = []
- last_request_lock = None
last_callback_lock = None
sent = 0
total = len(docs_by_generations)
@@ -1218,7 +1224,8 @@ class SoledadSyncTarget(HTTPSyncTarget, TokenBasedAuth):
t.doc_syncer.set_request_method(
'put', sync_id, cur_target_gen, cur_target_trans_id,
id=doc.doc_id, rev=doc.rev, content=doc_json, gen=gen,
- trans_id=trans_id, number_of_docs=number_of_docs, doc_idx=sent + 1)
+ trans_id=trans_id, number_of_docs=number_of_docs,
+ doc_idx=sent + 1)
# set the success calback
def _success_callback(idx, total, response):
@@ -1242,20 +1249,30 @@ class SoledadSyncTarget(HTTPSyncTarget, TokenBasedAuth):
# save thread and append
t.start()
threads.append((t, doc))
- last_request_lock = t.request_lock
last_callback_lock = t.callback_lock
sent += 1
# make sure all threads finished and we have up-to-date info
+ last_successful_thread = None
while threads:
# check if there are failures
t, doc = threads.pop(0)
t.join()
if t.success:
synced.append((doc.doc_id, doc.rev))
+ last_successful_thread = t
- if defer_decryption:
- self._sync_watcher.start()
+ # delete documents from the sync database
+ if defer_encryption:
+ self.delete_encrypted_docs_from_db(synced)
+
+ # get target gen and trans_id after docs
+ gen_after_send = None
+ trans_id_after_send = None
+ if last_successful_thread is not None:
+ response_dict = json.loads(last_successful_thread.response[0])[0]
+ gen_after_send = response_dict['new_generation']
+ trans_id_after_send = response_dict['new_transaction_id']
# get docs from target
if self.stopped is False:
@@ -1264,20 +1281,24 @@ class SoledadSyncTarget(HTTPSyncTarget, TokenBasedAuth):
last_known_generation, last_known_trans_id, headers,
return_doc_cb, ensure_callback, sync_id, syncer_pool,
defer_decryption=defer_decryption)
- syncer_pool.cleanup()
- # delete documents from the sync database
- if defer_encryption:
- self.delete_encrypted_docs_from_db(synced)
+ syncer_pool.cleanup()
- # wait for deferred decryption to finish
+ # decrypt docs in case of deferred decryption
if defer_decryption:
+ self._sync_watcher.start()
while self.clear_to_sync() is False:
sleep(self.DECRYPT_TASK_PERIOD)
self._teardown_sync_watcher()
self._teardown_sync_decr_pool()
self._sync_exchange_lock.release()
+ # update gen and trans id info in case we just sent and did not
+ # receive docs.
+ if gen_after_send is not None and gen_after_send > cur_target_gen:
+ cur_target_gen = gen_after_send
+ cur_target_trans_id = trans_id_after_send
+
self.stop()
return cur_target_gen, cur_target_trans_id
@@ -1322,13 +1343,15 @@ class SoledadSyncTarget(HTTPSyncTarget, TokenBasedAuth):
:type doc_rev: str
"""
encr = SyncEncrypterPool
- c = self._sync_db.cursor()
sql = ("SELECT content FROM %s WHERE doc_id=? and rev=?" % (
encr.TABLE_NAME,))
- c.execute(sql, (doc_id, doc_rev))
- res = c.fetchall()
- if len(res) != 0:
- return res[0][0]
+ res = self._sync_db.select(sql, (doc_id, doc_rev))
+ try:
+ val = res.next()
+ return val[0]
+ except StopIteration:
+ # no doc found
+ return None
def delete_encrypted_docs_from_db(self, docs_ids):
"""
@@ -1341,12 +1364,10 @@ class SoledadSyncTarget(HTTPSyncTarget, TokenBasedAuth):
"""
if docs_ids:
encr = SyncEncrypterPool
- c = self._sync_db.cursor()
for doc_id, doc_rev in docs_ids:
sql = ("DELETE FROM %s WHERE doc_id=? and rev=?" % (
encr.TABLE_NAME,))
- c.execute(sql, (doc_id, doc_rev))
- self._sync_db.commit()
+ self._sync_db.execute(sql, (doc_id, doc_rev))
def _save_encrypted_received_doc(self, doc, gen, trans_id, idx, total):
"""
@@ -1402,7 +1423,7 @@ class SoledadSyncTarget(HTTPSyncTarget, TokenBasedAuth):
:rtype: bool
"""
if self._sync_decr_pool is not None:
- return self._sync_decr_pool.count_received_encrypted_docs() == 0
+ return self._sync_decr_pool.count_docs_in_sync_db() == 0
else:
return True
@@ -1442,7 +1463,7 @@ class SoledadSyncTarget(HTTPSyncTarget, TokenBasedAuth):
decrypter = self._sync_decr_pool
decrypter.decrypt_received_docs()
- done = decrypter.process_decrypted()
+ decrypter.process_decrypted()
def _sign_request(self, method, url_query, params):
"""