From dbe5e37ef742617c93c7975a612582a77c7724a8 Mon Sep 17 00:00:00 2001 From: drebs Date: Sun, 16 Jun 2013 21:45:16 -0300 Subject: Split client and server in two different packages and refactor. --- src/leap/soledad/backends/__init__.py | 21 - src/leap/soledad/backends/couch.py | 484 ---------------------- src/leap/soledad/backends/leap_backend.py | 544 ------------------------- src/leap/soledad/backends/objectstore.py | 296 -------------- src/leap/soledad/backends/sqlcipher.py | 653 ------------------------------ 5 files changed, 1998 deletions(-) delete mode 100644 src/leap/soledad/backends/__init__.py delete mode 100644 src/leap/soledad/backends/couch.py delete mode 100644 src/leap/soledad/backends/leap_backend.py delete mode 100644 src/leap/soledad/backends/objectstore.py delete mode 100644 src/leap/soledad/backends/sqlcipher.py (limited to 'src/leap/soledad/backends') diff --git a/src/leap/soledad/backends/__init__.py b/src/leap/soledad/backends/__init__.py deleted file mode 100644 index 737c08e6..00000000 --- a/src/leap/soledad/backends/__init__.py +++ /dev/null @@ -1,21 +0,0 @@ -# -*- coding: utf-8 -*- -# __init__.py -# Copyright (C) 2013 LEAP -# -# This program is free software: you can redistribute it and/or modify -# it under the terms of the GNU General Public License as published by -# the Free Software Foundation, either version 3 of the License, or -# (at your option) any later version. -# -# This program is distributed in the hope that it will be useful, -# but WITHOUT ANY WARRANTY; without even the implied warranty of -# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the -# GNU General Public License for more details. -# -# You should have received a copy of the GNU General Public License -# along with this program. If not, see . - - -""" -Backends that extend U1DB functionality. -""" diff --git a/src/leap/soledad/backends/couch.py b/src/leap/soledad/backends/couch.py deleted file mode 100644 index 57885012..00000000 --- a/src/leap/soledad/backends/couch.py +++ /dev/null @@ -1,484 +0,0 @@ -# -*- coding: utf-8 -*- -# couch.py -# Copyright (C) 2013 LEAP -# -# This program is free software: you can redistribute it and/or modify -# it under the terms of the GNU General Public License as published by -# the Free Software Foundation, either version 3 of the License, or -# (at your option) any later version. -# -# This program is distributed in the hope that it will be useful, -# but WITHOUT ANY WARRANTY; without even the implied warranty of -# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the -# GNU General Public License for more details. -# -# You should have received a copy of the GNU General Public License -# along with this program. If not, see . - - -"""A U1DB backend that uses CouchDB as its persistence layer.""" - -# general imports -import uuid -import re -import simplejson as json - - -from base64 import b64encode, b64decode -from u1db import errors -from u1db.sync import Synchronizer -from u1db.backends.inmemory import InMemoryIndex -from u1db.remote.server_state import ServerState -from u1db.errors import DatabaseDoesNotExist -from couchdb.client import Server, Document as CouchDocument -from couchdb.http import ResourceNotFound - - -from leap.soledad.backends.objectstore import ( - ObjectStoreDatabase, - ObjectStoreSyncTarget, -) -from leap.soledad.backends.leap_backend import LeapDocument - - -class InvalidURLError(Exception): - """ - Exception raised when Soledad encounters a malformed URL. - """ - - -class CouchDatabase(ObjectStoreDatabase): - """ - A U1DB backend that uses Couch as its persistence layer. - """ - - U1DB_TRANSACTION_LOG_KEY = 'transaction_log' - U1DB_CONFLICTS_KEY = 'conflicts' - U1DB_OTHER_GENERATIONS_KEY = 'other_generations' - U1DB_INDEXES_KEY = 'indexes' - U1DB_REPLICA_UID_KEY = 'replica_uid' - - COUCH_ID_KEY = '_id' - COUCH_REV_KEY = '_rev' - COUCH_U1DB_ATTACHMENT_KEY = 'u1db_json' - COUCH_U1DB_REV_KEY = 'u1db_rev' - - @classmethod - def open_database(cls, url, create): - """ - Open a U1DB database using CouchDB as backend. - - @param url: the url of the database replica - @type url: str - @param create: should the replica be created if it does not exist? - @type create: bool - - @return: the database instance - @rtype: CouchDatabase - """ - # get database from url - m = re.match('(^https?://[^/]+)/(.+)$', url) - if not m: - raise InvalidURLError - url = m.group(1) - dbname = m.group(2) - server = Server(url=url) - try: - server[dbname] - except ResourceNotFound: - if not create: - raise DatabaseDoesNotExist() - return cls(url, dbname) - - def __init__(self, url, dbname, replica_uid=None, full_commit=True, - session=None): - """ - Create a new Couch data container. - - @param url: the url of the couch database - @type url: str - @param dbname: the database name - @type dbname: str - @param replica_uid: an optional unique replica identifier - @type replica_uid: str - @param full_commit: turn on the X-Couch-Full-Commit header - @type full_commit: bool - @param session: an http.Session instance or None for a default session - @type session: http.Session - """ - self._url = url - self._full_commit = full_commit - self._session = session - self._server = Server(url=self._url, - full_commit=self._full_commit, - session=self._session) - self._dbname = dbname - # this will ensure that transaction and sync logs exist and are - # up-to-date. - try: - self._database = self._server[self._dbname] - except ResourceNotFound: - self._server.create(self._dbname) - self._database = self._server[self._dbname] - ObjectStoreDatabase.__init__(self, replica_uid=replica_uid, - # TODO: move the factory choice - # away - document_factory=LeapDocument) - - #------------------------------------------------------------------------- - # methods from Database - #------------------------------------------------------------------------- - - def _get_doc(self, doc_id, check_for_conflicts=False): - """ - Get just the document content, without fancy handling. - - @param doc_id: The unique document identifier - @type doc_id: str - @param include_deleted: If set to True, deleted documents will be - returned with empty content. Otherwise asking for a deleted - document will return None. - @type include_deleted: bool - - @return: a Document object. - @type: u1db.Document - """ - cdoc = self._database.get(doc_id) - if cdoc is None: - return None - has_conflicts = False - if check_for_conflicts: - has_conflicts = self._has_conflicts(doc_id) - doc = self._factory( - doc_id=doc_id, - rev=cdoc[self.COUCH_U1DB_REV_KEY], - has_conflicts=has_conflicts) - contents = self._database.get_attachment( - cdoc, - self.COUCH_U1DB_ATTACHMENT_KEY) - if contents: - doc.content = json.loads(contents.read()) - else: - doc.make_tombstone() - return doc - - def get_all_docs(self, include_deleted=False): - """ - Get the JSON content for all documents in the database. - - @param include_deleted: If set to True, deleted documents will be - returned with empty content. Otherwise deleted documents will not - be included in the results. - @type include_deleted: bool - - @return: (generation, [Document]) - The current generation of the database, followed by a list of all - the documents in the database. - @rtype: tuple - """ - generation = self._get_generation() - results = [] - for doc_id in self._database: - if doc_id == self.U1DB_DATA_DOC_ID: - continue - doc = self._get_doc(doc_id, check_for_conflicts=True) - if doc.content is None and not include_deleted: - continue - results.append(doc) - return (generation, results) - - def _put_doc(self, doc): - """ - Update a document. - - This is called everytime we just want to do a raw put on the db (i.e. - without index updates, document constraint checks, and conflict - checks). - - @param doc: The document to update. - @type doc: u1db.Document - - @return: The new revision identifier for the document. - @rtype: str - """ - # prepare couch's Document - cdoc = CouchDocument() - cdoc[self.COUCH_ID_KEY] = doc.doc_id - # we have to guarantee that couch's _rev is consistent - old_cdoc = self._database.get(doc.doc_id) - if old_cdoc is not None: - cdoc[self.COUCH_REV_KEY] = old_cdoc[self.COUCH_REV_KEY] - # store u1db's rev - cdoc[self.COUCH_U1DB_REV_KEY] = doc.rev - # save doc in db - self._database.save(cdoc) - # store u1db's content as json string - if not doc.is_tombstone(): - self._database.put_attachment( - cdoc, doc.get_json(), - filename=self.COUCH_U1DB_ATTACHMENT_KEY) - else: - self._database.delete_attachment( - cdoc, - self.COUCH_U1DB_ATTACHMENT_KEY) - - def get_sync_target(self): - """ - Return a SyncTarget object, for another u1db to synchronize with. - - @return: The sync target. - @rtype: CouchSyncTarget - """ - return CouchSyncTarget(self) - - def create_index(self, index_name, *index_expressions): - """ - Create a named index, which can then be queried for future lookups. - - @param index_name: A unique name which can be used as a key prefix. - @param index_expressions: Index expressions defining the index - information. - """ - if index_name in self._indexes: - if self._indexes[index_name]._definition == list( - index_expressions): - return - raise errors.IndexNameTakenError - index = InMemoryIndex(index_name, list(index_expressions)) - for doc_id in self._database: - if doc_id == self.U1DB_DATA_DOC_ID: # skip special file - continue - doc = self._get_doc(doc_id) - if doc.content is not None: - index.add_json(doc_id, doc.get_json()) - self._indexes[index_name] = index - # save data in object store - self._store_u1db_data() - - def close(self): - """ - Release any resources associated with this database. - - @return: True if db was succesfully closed. - @rtype: bool - """ - # TODO: fix this method so the connection is properly closed and - # test_close (+tearDown, which deletes the db) works without problems. - self._url = None - self._full_commit = None - self._session = None - #self._server = None - self._database = None - return True - - def sync(self, url, creds=None, autocreate=True): - """ - Synchronize documents with remote replica exposed at url. - - @param url: The url of the target replica to sync with. - @type url: str - @param creds: optional dictionary giving credentials. - to authorize the operation with the server. - @type creds: dict - @param autocreate: Ask the target to create the db if non-existent. - @type autocreate: bool - - @return: The local generation before the synchronisation was performed. - @rtype: int - """ - return Synchronizer(self, CouchSyncTarget(url, creds=creds)).sync( - autocreate=autocreate) - - #------------------------------------------------------------------------- - # methods from ObjectStoreDatabase - #------------------------------------------------------------------------- - - def _init_u1db_data(self): - """ - Initialize U1DB info data structure in the couch db. - - A U1DB database needs to keep track of all database transactions, - document conflicts, the generation of other replicas it has seen, - indexes created by users and so on. - - In this implementation, all this information is stored in a special - document stored in the couch db with id equals to - CouchDatabse.U1DB_DATA_DOC_ID. - - This method initializes the document that will hold such information. - """ - if self._replica_uid is None: - self._replica_uid = uuid.uuid4().hex - # TODO: prevent user from overwriting a document with the same doc_id - # as this one. - doc = self._factory(doc_id=self.U1DB_DATA_DOC_ID) - doc.content = { - self.U1DB_TRANSACTION_LOG_KEY: b64encode(json.dumps([])), - self.U1DB_CONFLICTS_KEY: b64encode(json.dumps({})), - self.U1DB_OTHER_GENERATIONS_KEY: b64encode(json.dumps({})), - self.U1DB_INDEXES_KEY: b64encode(json.dumps({})), - self.U1DB_REPLICA_UID_KEY: b64encode(self._replica_uid), - } - self._put_doc(doc) - - def _fetch_u1db_data(self): - """ - Fetch U1DB info from the couch db. - - See C{_init_u1db_data} documentation. - """ - # retrieve u1db data from couch db - cdoc = self._database.get(self.U1DB_DATA_DOC_ID) - jsonstr = self._database.get_attachment( - cdoc, self.COUCH_U1DB_ATTACHMENT_KEY).read() - content = json.loads(jsonstr) - # set u1db database info - self._transaction_log = json.loads( - b64decode(content[self.U1DB_TRANSACTION_LOG_KEY])) - self._conflicts = json.loads( - b64decode(content[self.U1DB_CONFLICTS_KEY])) - self._other_generations = json.loads( - b64decode(content[self.U1DB_OTHER_GENERATIONS_KEY])) - self._indexes = self._load_indexes_from_json( - b64decode(content[self.U1DB_INDEXES_KEY])) - self._replica_uid = b64decode(content[self.U1DB_REPLICA_UID_KEY]) - # save couch _rev - self._couch_rev = cdoc[self.COUCH_REV_KEY] - - def _store_u1db_data(self): - """ - Store U1DB info in the couch db. - - See C{_init_u1db_data} documentation. - """ - doc = self._factory(doc_id=self.U1DB_DATA_DOC_ID) - doc.content = { - # Here, the b64 encode ensures that document content - # does not cause strange behaviour in couchdb because - # of encoding. - self.U1DB_TRANSACTION_LOG_KEY: - b64encode(json.dumps(self._transaction_log)), - self.U1DB_CONFLICTS_KEY: b64encode(json.dumps(self._conflicts)), - self.U1DB_OTHER_GENERATIONS_KEY: - b64encode(json.dumps(self._other_generations)), - self.U1DB_INDEXES_KEY: b64encode(self._dump_indexes_as_json()), - self.U1DB_REPLICA_UID_KEY: b64encode(self._replica_uid), - self.COUCH_REV_KEY: self._couch_rev} - self._put_doc(doc) - - #------------------------------------------------------------------------- - # Couch specific methods - #------------------------------------------------------------------------- - - INDEX_NAME_KEY = 'name' - INDEX_DEFINITION_KEY = 'definition' - INDEX_VALUES_KEY = 'values' - - def delete_database(self): - """ - Delete a U1DB CouchDB database. - """ - del(self._server[self._dbname]) - - def _dump_indexes_as_json(self): - """ - Dump index definitions as JSON string. - """ - indexes = {} - for name, idx in self._indexes.iteritems(): - indexes[name] = {} - for attr in [self.INDEX_NAME_KEY, self.INDEX_DEFINITION_KEY, - self.INDEX_VALUES_KEY]: - indexes[name][attr] = getattr(idx, '_' + attr) - return json.dumps(indexes) - - def _load_indexes_from_json(self, indexes): - """ - Load index definitions from JSON string. - - @param indexes: A JSON serialization of a list of [('index-name', - ['field', 'field2'])]. - @type indexes: str - - @return: A dictionary with the index definitions. - @rtype: dict - """ - dict = {} - for name, idx_dict in json.loads(indexes).iteritems(): - idx = InMemoryIndex(name, idx_dict[self.INDEX_DEFINITION_KEY]) - idx._values = idx_dict[self.INDEX_VALUES_KEY] - dict[name] = idx - return dict - - -class CouchSyncTarget(ObjectStoreSyncTarget): - """ - Functionality for using a CouchDatabase as a synchronization target. - """ - - -class CouchServerState(ServerState): - """ - Inteface of the WSGI server with the CouchDB backend. - """ - - def __init__(self, couch_url): - self._couch_url = couch_url - - def open_database(self, dbname): - """ - Open a couch database. - - @param dbname: The name of the database to open. - @type dbname: str - - @return: The CouchDatabase object. - @rtype: CouchDatabase - """ - # TODO: open couch - return CouchDatabase.open_database( - self._couch_url + '/' + dbname, - create=False) - - def ensure_database(self, dbname): - """ - Ensure couch database exists. - - @param dbname: The name of the database to ensure. - @type dbname: str - - @return: The CouchDatabase object and the replica uid. - @rtype: (CouchDatabase, str) - """ - db = CouchDatabase.open_database( - self._couch_url + '/' + dbname, - create=True) - return db, db._replica_uid - - def delete_database(self, dbname): - """ - Delete couch database. - - @param dbname: The name of the database to delete. - @type dbname: str - """ - CouchDatabase.delete_database(self._couch_url + '/' + dbname) - - def _set_couch_url(self, url): - """ - Set the couchdb URL - - @param url: CouchDB URL - @type url: str - """ - self._couch_url = url - - def _get_couch_url(self): - """ - Return CouchDB URL - - @rtype: str - """ - return self._couch_url - - couch_url = property(_get_couch_url, _set_couch_url, doc='CouchDB URL') diff --git a/src/leap/soledad/backends/leap_backend.py b/src/leap/soledad/backends/leap_backend.py deleted file mode 100644 index 4d92db37..00000000 --- a/src/leap/soledad/backends/leap_backend.py +++ /dev/null @@ -1,544 +0,0 @@ -# -*- coding: utf-8 -*- -# leap_backend.py -# Copyright (C) 2013 LEAP -# -# This program is free software: you can redistribute it and/or modify -# it under the terms of the GNU General Public License as published by -# the Free Software Foundation, either version 3 of the License, or -# (at your option) any later version. -# -# This program is distributed in the hope that it will be useful, -# but WITHOUT ANY WARRANTY; without even the implied warranty of -# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the -# GNU General Public License for more details. -# -# You should have received a copy of the GNU General Public License -# along with this program. If not, see . - - -""" -A U1DB backend for encrypting data before sending to server and decrypting -after receiving. -""" - -import simplejson as json -import hashlib -import hmac -import binascii - - -from u1db import Document -from u1db.remote import utils -from u1db.errors import BrokenSyncStream -from u1db.remote.http_target import HTTPSyncTarget - - -from leap.soledad import soledad_assert -from leap.soledad.crypto import ( - EncryptionMethods, - UnknownEncryptionMethod, -) -from leap.soledad.auth import TokenBasedAuth - - -# -# Exceptions -# - -class DocumentNotEncrypted(Exception): - """ - Raised for failures in document encryption. - """ - pass - - -class UnknownEncryptionScheme(Exception): - """ - Raised when trying to decrypt from unknown encryption schemes. - """ - pass - - -class UnknownMacMethod(Exception): - """ - Raised when trying to authenticate document's content with unknown MAC - mehtod. - """ - pass - - -class WrongMac(Exception): - """ - Raised when failing to authenticate document's contents based on MAC. - """ - - -# -# Encryption schemes used for encryption. -# - -class EncryptionSchemes(object): - """ - Representation of encryption schemes used to encrypt documents. - """ - - NONE = 'none' - SYMKEY = 'symkey' - PUBKEY = 'pubkey' - - -class MacMethods(object): - """ - Representation of MAC methods used to authenticate document's contents. - """ - - HMAC = 'hmac' - - -# -# Crypto utilities for a LeapDocument. -# - -ENC_JSON_KEY = '_enc_json' -ENC_SCHEME_KEY = '_enc_scheme' -ENC_METHOD_KEY = '_enc_method' -ENC_IV_KEY = '_enc_iv' -MAC_KEY = '_mac' -MAC_METHOD_KEY = '_mac_method' - - -def mac_doc(crypto, doc_id, doc_rev, ciphertext, mac_method): - """ - Calculate a MAC for C{doc} using C{ciphertext}. - - Current MAC method used is HMAC, with the following parameters: - - * key: sha256(storage_secret, doc_id) - * msg: doc_id + doc_rev + ciphertext - * digestmod: sha256 - - @param crypto: A SoledadCryto instance used to perform the encryption. - @type crypto: leap.soledad.crypto.SoledadCrypto - @param doc_id: The id of the document. - @type doc_id: str - @param doc_rev: The revision of the document. - @type doc_rev: str - @param ciphertext: The content of the document. - @type ciphertext: str - @param mac_method: The MAC method to use. - @type mac_method: str - - @return: The calculated MAC. - @rtype: str - """ - if mac_method == MacMethods.HMAC: - return hmac.new( - crypto.doc_mac_key(doc_id), - str(doc_id) + str(doc_rev) + ciphertext, - hashlib.sha256).digest() - # raise if we do not know how to handle this MAC method - raise UnknownMacMethod('Unknown MAC method: %s.' % mac_method) - - -def encrypt_doc(crypto, doc): - """ - Encrypt C{doc}'s content. - - Encrypt doc's contents using AES-256 CTR mode and return a valid JSON - string representing the following: - - { - ENC_JSON_KEY: '', - ENC_SCHEME_KEY: 'symkey', - ENC_METHOD_KEY: EncryptionMethods.AES_256_CTR, - ENC_IV_KEY: '', - MAC_KEY: '' - MAC_METHOD_KEY: 'hmac' - } - - @param crypto: A SoledadCryto instance used to perform the encryption. - @type crypto: leap.soledad.crypto.SoledadCrypto - @param doc: The document with contents to be encrypted. - @type doc: LeapDocument - - @return: The JSON serialization of the dict representing the encrypted - content. - @rtype: str - """ - soledad_assert(doc.is_tombstone() is False) - # encrypt content using AES-256 CTR mode - iv, ciphertext = crypto.encrypt_sym( - doc.get_json(), - crypto.doc_passphrase(doc.doc_id), - method=EncryptionMethods.AES_256_CTR) - # Return a representation for the encrypted content. In the following, we - # convert binary data to hexadecimal representation so the JSON - # serialization does not complain about what it tries to serialize. - hex_ciphertext = binascii.b2a_hex(ciphertext) - return json.dumps({ - ENC_JSON_KEY: hex_ciphertext, - ENC_SCHEME_KEY: EncryptionSchemes.SYMKEY, - ENC_METHOD_KEY: EncryptionMethods.AES_256_CTR, - ENC_IV_KEY: iv, - MAC_KEY: binascii.b2a_hex(mac_doc( # store the mac as hex. - crypto, doc.doc_id, doc.rev, - ciphertext, - MacMethods.HMAC)), - MAC_METHOD_KEY: MacMethods.HMAC, - }) - - -def decrypt_doc(crypto, doc): - """ - Decrypt C{doc}'s content. - - Return the JSON string representation of the document's decrypted content. - - The content of the document should have the following structure: - - { - ENC_JSON_KEY: '', - ENC_SCHEME_KEY: '', - ENC_METHOD_KEY: '', - ENC_IV_KEY: '', # (optional) - MAC_KEY: '' - MAC_METHOD_KEY: 'hmac' - } - - C{enc_blob} is the encryption of the JSON serialization of the document's - content. For now Soledad just deals with documents whose C{enc_scheme} is - EncryptionSchemes.SYMKEY and C{enc_method} is - EncryptionMethods.AES_256_CTR. - - @param crypto: A SoledadCryto instance to perform the encryption. - @type crypto: leap.soledad.crypto.SoledadCrypto - @param doc: The document to be decrypted. - @type doc: LeapDocument - - @return: The JSON serialization of the decrypted content. - @rtype: str - """ - soledad_assert(doc.is_tombstone() is False) - soledad_assert(ENC_JSON_KEY in doc.content) - soledad_assert(ENC_SCHEME_KEY in doc.content) - soledad_assert(ENC_METHOD_KEY in doc.content) - soledad_assert(MAC_KEY in doc.content) - soledad_assert(MAC_METHOD_KEY in doc.content) - # verify MAC - ciphertext = binascii.a2b_hex( # content is stored as hex. - doc.content[ENC_JSON_KEY]) - mac = mac_doc( - crypto, doc.doc_id, doc.rev, - ciphertext, - doc.content[MAC_METHOD_KEY]) - if binascii.a2b_hex(doc.content[MAC_KEY]) != mac: # mac is stored as hex. - raise WrongMac('Could not authenticate document\'s contents.') - # decrypt doc's content - enc_scheme = doc.content[ENC_SCHEME_KEY] - plainjson = None - if enc_scheme == EncryptionSchemes.SYMKEY: - enc_method = doc.content[ENC_METHOD_KEY] - if enc_method == EncryptionMethods.AES_256_CTR: - soledad_assert(ENC_IV_KEY in doc.content) - plainjson = crypto.decrypt_sym( - ciphertext, - crypto.doc_passphrase(doc.doc_id), - method=enc_method, - iv=doc.content[ENC_IV_KEY]) - else: - raise UnknownEncryptionMethod(enc_method) - else: - raise UnknownEncryptionScheme(enc_scheme) - return plainjson - - -class LeapDocument(Document): - """ - Encryptable and syncable document. - - LEAP Documents can be flagged as syncable or not, so the replicas - might not sync every document. - """ - - def __init__(self, doc_id=None, rev=None, json='{}', has_conflicts=False, - syncable=True): - """ - Container for handling an encryptable document. - - @param doc_id: The unique document identifier. - @type doc_id: str - @param rev: The revision identifier of the document. - @type rev: str - @param json: The JSON string for this document. - @type json: str - @param has_conflicts: Boolean indicating if this document has conflicts - @type has_conflicts: bool - @param syncable: Should this document be synced with remote replicas? - @type syncable: bool - """ - Document.__init__(self, doc_id, rev, json, has_conflicts) - self._syncable = syncable - - def _get_syncable(self): - """ - Return whether this document is syncable. - - @return: Is this document syncable? - @rtype: bool - """ - return self._syncable - - def _set_syncable(self, syncable=True): - """ - Determine if this document should be synced with remote replicas. - - @param syncable: Should this document be synced with remote replicas? - @type syncable: bool - """ - self._syncable = syncable - - syncable = property( - _get_syncable, - _set_syncable, - doc="Determine if document should be synced with server." - ) - - def _get_rev(self): - """ - Get the document revision. - - Returning the revision as string solves the following exception in - Twisted web: - exceptions.TypeError: Can only pass-through bytes on Python 2 - - @return: The document revision. - @rtype: str - """ - if self._rev is None: - return None - return str(self._rev) - - def _set_rev(self, rev): - """ - Set document revision. - - @param rev: The new document revision. - @type rev: bytes - """ - self._rev = rev - - rev = property( - _get_rev, - _set_rev, - doc="Wrapper to ensure `doc.rev` is always returned as bytes.") - - -# -# LeapSyncTarget -# - -class LeapSyncTarget(HTTPSyncTarget, TokenBasedAuth): - """ - A SyncTarget that encrypts data before sending and decrypts data after - receiving. - """ - - # - # Token auth methods. - # - - def set_token_credentials(self, uuid, token): - """ - Store given credentials so we can sign the request later. - - @param uuid: The user's uuid. - @type uuid: str - @param token: The authentication token. - @type token: str - """ - TokenBasedAuth.set_token_credentials(self, uuid, token) - - def _sign_request(self, method, url_query, params): - """ - Return an authorization header to be included in the HTTP request. - - @param method: The HTTP method. - @type method: str - @param url_query: The URL query string. - @type url_query: str - @param params: A list with encoded query parameters. - @type param: list - - @return: The Authorization header. - @rtype: list of tuple - """ - return TokenBasedAuth._sign_request(self, method, url_query, params) - - # - # Modified HTTPSyncTarget methods. - # - - @staticmethod - def connect(url, crypto=None): - return LeapSyncTarget(url, crypto=crypto) - - def __init__(self, url, creds=None, crypto=None): - """ - Initialize the LeapSyncTarget. - - @param url: The url of the target replica to sync with. - @type url: str - @param creds: optional dictionary giving credentials. - to authorize the operation with the server. - @type creds: dict - @param soledad: An instance of Soledad so we can encrypt/decrypt - document contents when syncing. - @type soledad: soledad.Soledad - """ - HTTPSyncTarget.__init__(self, url, creds) - self._crypto = crypto - - def _parse_sync_stream(self, data, return_doc_cb, ensure_callback=None): - """ - Parse incoming synchronization stream and insert documents in the - local database. - - If an incoming document's encryption scheme is equal to - EncryptionSchemes.SYMKEY, then this method will decrypt it with - Soledad's symmetric key. - - @param data: The body of the HTTP response. - @type data: str - @param return_doc_cb: A callback to insert docs from target. - @type return_doc_cb: function - @param ensure_callback: A callback to ensure we have the correct - target_replica_uid, if it was just created. - @type ensure_callback: function - - @raise BrokenSyncStream: If C{data} is malformed. - - @return: A dictionary representing the first line of the response got - from remote replica. - @rtype: list of str - """ - parts = data.splitlines() # one at a time - if not parts or parts[0] != '[': - raise BrokenSyncStream - data = parts[1:-1] - comma = False - if data: - line, comma = utils.check_and_strip_comma(data[0]) - res = json.loads(line) - if ensure_callback and 'replica_uid' in res: - ensure_callback(res['replica_uid']) - for entry in data[1:]: - if not comma: # missing in between comma - raise BrokenSyncStream - line, comma = utils.check_and_strip_comma(entry) - entry = json.loads(line) - #------------------------------------------------------------- - # symmetric decryption of document's contents - #------------------------------------------------------------- - # if arriving content was symmetrically encrypted, we decrypt - # it. - doc = LeapDocument(entry['id'], entry['rev'], entry['content']) - if doc.content and ENC_SCHEME_KEY in doc.content: - if doc.content[ENC_SCHEME_KEY] == \ - EncryptionSchemes.SYMKEY: - doc.set_json(decrypt_doc(self._crypto, doc)) - #------------------------------------------------------------- - # end of symmetric decryption - #------------------------------------------------------------- - return_doc_cb(doc, entry['gen'], entry['trans_id']) - if parts[-1] != ']': - try: - partdic = json.loads(parts[-1]) - except ValueError: - pass - else: - if isinstance(partdic, dict): - self._error(partdic) - raise BrokenSyncStream - if not data or comma: # no entries or bad extra comma - raise BrokenSyncStream - return res - - def sync_exchange(self, docs_by_generations, source_replica_uid, - last_known_generation, last_known_trans_id, - return_doc_cb, ensure_callback=None): - """ - Find out which documents the remote database does not know about, - encrypt and send them. - - This does the same as the parent's method but encrypts content before - syncing. - - @param docs_by_generations: A list of (doc_id, generation, trans_id) - of local documents that were changed since the last local - generation the remote replica knows about. - @type docs_by_generations: list of tuples - @param source_replica_uid: The uid of the source replica. - @type source_replica_uid: str - @param last_known_generation: Target's last known generation. - @type last_known_generation: int - @param last_known_trans_id: Target's last known transaction id. - @type last_known_trans_id: str - @param return_doc_cb: A callback for inserting received documents from - target. - @type return_doc_cb: function - @param ensure_callback: A callback that ensures we know the target - replica uid if the target replica was just created. - @type ensure_callback: function - - @return: The new generation and transaction id of the target replica. - @rtype: tuple - """ - self._ensure_connection() - if self._trace_hook: # for tests - self._trace_hook('sync_exchange') - url = '%s/sync-from/%s' % (self._url.path, source_replica_uid) - self._conn.putrequest('POST', url) - self._conn.putheader('content-type', 'application/x-u1db-sync-stream') - for header_name, header_value in self._sign_request('POST', url, {}): - self._conn.putheader(header_name, header_value) - entries = ['['] - size = 1 - - def prepare(**dic): - entry = comma + '\r\n' + json.dumps(dic) - entries.append(entry) - return len(entry) - - comma = '' - size += prepare( - last_known_generation=last_known_generation, - last_known_trans_id=last_known_trans_id, - ensure=ensure_callback is not None) - comma = ',' - for doc, gen, trans_id in docs_by_generations: - # skip non-syncable docs - if isinstance(doc, LeapDocument) and not doc.syncable: - continue - #------------------------------------------------------------- - # symmetric encryption of document's contents - #------------------------------------------------------------- - doc_json = doc.get_json() - if not doc.is_tombstone(): - doc_json = encrypt_doc(self._crypto, doc) - #------------------------------------------------------------- - # end of symmetric encryption - #------------------------------------------------------------- - size += prepare(id=doc.doc_id, rev=doc.rev, - content=doc_json, - gen=gen, trans_id=trans_id) - entries.append('\r\n]') - size += len(entries[-1]) - self._conn.putheader('content-length', str(size)) - self._conn.endheaders() - for entry in entries: - self._conn.send(entry) - entries = None - data, _ = self._response() - res = self._parse_sync_stream(data, return_doc_cb, ensure_callback) - data = None - return res['new_generation'], res['new_transaction_id'] diff --git a/src/leap/soledad/backends/objectstore.py b/src/leap/soledad/backends/objectstore.py deleted file mode 100644 index 8afac3ec..00000000 --- a/src/leap/soledad/backends/objectstore.py +++ /dev/null @@ -1,296 +0,0 @@ -# -*- coding: utf-8 -*- -# objectstore.py -# Copyright (C) 2013 LEAP -# -# This program is free software: you can redistribute it and/or modify -# it under the terms of the GNU General Public License as published by -# the Free Software Foundation, either version 3 of the License, or -# (at your option) any later version. -# -# This program is distributed in the hope that it will be useful, -# but WITHOUT ANY WARRANTY; without even the implied warranty of -# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the -# GNU General Public License for more details. -# -# You should have received a copy of the GNU General Public License -# along with this program. If not, see . - - -""" -Abstract U1DB backend to handle storage using object stores (like CouchDB, for -example). - -Right now, this is only used by CouchDatabase backend, but can also be -extended to implement OpenStack or Amazon S3 storage, for example. - -See U1DB documentation for more information on how to use databases. -""" - -from u1db.backends.inmemory import ( - InMemoryDatabase, - InMemorySyncTarget, -) -from u1db import errors - - -class ObjectStoreDatabase(InMemoryDatabase): - """ - A backend for storing u1db data in an object store. - """ - - @classmethod - def open_database(cls, url, create, document_factory=None): - """ - Open a U1DB database using an object store as backend. - - @param url: the url of the database replica - @type url: str - @param create: should the replica be created if it does not exist? - @type create: bool - @param document_factory: A function that will be called with the same - parameters as Document.__init__. - @type document_factory: callable - - @return: the database instance - @rtype: CouchDatabase - """ - raise NotImplementedError(cls.open_database) - - def __init__(self, replica_uid=None, document_factory=None): - """ - Initialize the object store database. - - @param replica_uid: an optional unique replica identifier - @type replica_uid: str - @param document_factory: A function that will be called with the same - parameters as Document.__init__. - @type document_factory: callable - """ - InMemoryDatabase.__init__( - self, - replica_uid, - document_factory=document_factory) - # sync data in memory with data in object store - if not self._get_doc(self.U1DB_DATA_DOC_ID): - self._init_u1db_data() - self._fetch_u1db_data() - - #------------------------------------------------------------------------- - # methods from Database - #------------------------------------------------------------------------- - - def _set_replica_uid(self, replica_uid): - """ - Force the replica_uid to be set. - - @param replica_uid: The uid of the replica. - @type replica_uid: str - """ - InMemoryDatabase._set_replica_uid(self, replica_uid) - self._store_u1db_data() - - def _put_doc(self, doc): - """ - Update a document. - - This is called everytime we just want to do a raw put on the db (i.e. - without index updates, document constraint checks, and conflict - checks). - - @param doc: The document to update. - @type doc: u1db.Document - - @return: The new revision identifier for the document. - @rtype: str - """ - raise NotImplementedError(self._put_doc) - - def _get_doc(self, doc_id): - """ - Get just the document content, without fancy handling. - - @param doc_id: The unique document identifier - @type doc_id: str - @param include_deleted: If set to True, deleted documents will be - returned with empty content. Otherwise asking for a deleted - document will return None. - @type include_deleted: bool - - @return: a Document object. - @type: u1db.Document - """ - raise NotImplementedError(self._get_doc) - - def get_all_docs(self, include_deleted=False): - """ - Get the JSON content for all documents in the database. - - @param include_deleted: If set to True, deleted documents will be - returned with empty content. Otherwise deleted documents will not - be included in the results. - @type include_deleted: bool - - @return: (generation, [Document]) - The current generation of the database, followed by a list of all - the documents in the database. - @rtype: tuple - """ - raise NotImplementedError(self.get_all_docs) - - def delete_doc(self, doc): - """ - Mark a document as deleted. - - @param doc: The document to mark as deleted. - @type doc: u1db.Document - - @return: The new revision id of the document. - @type: str - """ - old_doc = self._get_doc(doc.doc_id, check_for_conflicts=True) - if old_doc is None: - raise errors.DocumentDoesNotExist - if old_doc.rev != doc.rev: - raise errors.RevisionConflict() - if old_doc.is_tombstone(): - raise errors.DocumentAlreadyDeleted - if old_doc.has_conflicts: - raise errors.ConflictedDoc() - new_rev = self._allocate_doc_rev(doc.rev) - doc.rev = new_rev - doc.make_tombstone() - self._put_and_update_indexes(old_doc, doc) - return new_rev - - # index-related methods - - def create_index(self, index_name, *index_expressions): - """ - Create a named index, which can then be queried for future lookups. - - See U1DB documentation for more information. - - @param index_name: A unique name which can be used as a key prefix. - @param index_expressions: Index expressions defining the index - information. - """ - raise NotImplementedError(self.create_index) - - def delete_index(self, index_name): - """ - Remove a named index. - - Here we just guarantee that the new info will be stored in the backend - db after update. - - @param index_name: The name of the index we are removing. - @type index_name: str - """ - InMemoryDatabase.delete_index(self, index_name) - self._store_u1db_data() - - def _replace_conflicts(self, doc, conflicts): - """ - Set new conflicts for a document. - - Here we just guarantee that the new info will be stored in the backend - db after update. - - @param doc: The document with a new set of conflicts. - @param conflicts: The new set of conflicts. - @type conflicts: list - """ - InMemoryDatabase._replace_conflicts(self, doc, conflicts) - self._store_u1db_data() - - def _do_set_replica_gen_and_trans_id(self, other_replica_uid, - other_generation, - other_transaction_id): - """ - Set the last-known generation and transaction id for the other - database replica. - - Here we just guarantee that the new info will be stored in the backend - db after update. - - @param other_replica_uid: The U1DB identifier for the other replica. - @type other_replica_uid: str - @param other_generation: The generation number for the other replica. - @type other_generation: int - @param other_transaction_id: The transaction id associated with the - generation. - @type other_transaction_id: str - """ - InMemoryDatabase._do_set_replica_gen_and_trans_id( - self, - other_replica_uid, - other_generation, - other_transaction_id) - self._store_u1db_data() - - #------------------------------------------------------------------------- - # implemented methods from CommonBackend - #------------------------------------------------------------------------- - - def _put_and_update_indexes(self, old_doc, doc): - """ - Update a document and all indexes related to it. - - @param old_doc: The old version of the document. - @type old_doc: u1db.Document - @param doc: The new version of the document. - @type doc: u1db.Document - """ - for index in self._indexes.itervalues(): - if old_doc is not None and not old_doc.is_tombstone(): - index.remove_json(old_doc.doc_id, old_doc.get_json()) - if not doc.is_tombstone(): - index.add_json(doc.doc_id, doc.get_json()) - trans_id = self._allocate_transaction_id() - self._put_doc(doc) - self._transaction_log.append((doc.doc_id, trans_id)) - self._store_u1db_data() - - #------------------------------------------------------------------------- - # methods specific for object stores - #------------------------------------------------------------------------- - - U1DB_DATA_DOC_ID = 'u1db_data' - - def _fetch_u1db_data(self): - """ - Fetch u1db configuration data from backend storage. - - See C{_init_u1db_data} documentation. - """ - NotImplementedError(self._fetch_u1db_data) - - def _store_u1db_data(self): - """ - Store u1db configuration data on backend storage. - - See C{_init_u1db_data} documentation. - """ - NotImplementedError(self._store_u1db_data) - - def _init_u1db_data(self): - """ - Initialize u1db configuration data on backend storage. - - A U1DB database needs to keep track of all database transactions, - document conflicts, the generation of other replicas it has seen, - indexes created by users and so on. - - In this implementation, all this information is stored in a special - document stored in the couch db with id equals to - CouchDatabse.U1DB_DATA_DOC_ID. - - This method initializes the document that will hold such information. - """ - NotImplementedError(self._init_u1db_data) - - -class ObjectStoreSyncTarget(InMemorySyncTarget): - """ - Functionality for using an ObjectStore as a synchronization target. - """ diff --git a/src/leap/soledad/backends/sqlcipher.py b/src/leap/soledad/backends/sqlcipher.py deleted file mode 100644 index d6d62f21..00000000 --- a/src/leap/soledad/backends/sqlcipher.py +++ /dev/null @@ -1,653 +0,0 @@ -# -*- coding: utf-8 -*- -# sqlcipher.py -# Copyright (C) 2013 LEAP -# -# This program is free software: you can redistribute it and/or modify -# it under the terms of the GNU General Public License as published by -# the Free Software Foundation, either version 3 of the License, or -# (at your option) any later version. -# -# This program is distributed in the hope that it will be useful, -# but WITHOUT ANY WARRANTY; without even the implied warranty of -# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the -# GNU General Public License for more details. -# -# You should have received a copy of the GNU General Public License -# along with this program. If not, see . - - -""" -A U1DB backend that uses SQLCipher as its persistence layer. - -The SQLCipher API (http://sqlcipher.net/sqlcipher-api/) is fully implemented, -with the exception of the following statements: - - * PRAGMA cipher_use_hmac - * PRAGMA cipher_default_use_mac - -SQLCipher 2.0 introduced a per-page HMAC to validate that the page data has -not be tampered with. By default, when creating or opening a database using -SQLCipher 2, SQLCipher will attempt to use an HMAC check. This change in -database format means that SQLCipher 2 can't operate on version 1.1.x -databases by default. Thus, in order to provide backward compatibility with -SQLCipher 1.1.x, PRAGMA cipher_use_hmac can be used to disable the HMAC -functionality on specific databases. - -In some very specific cases, it is not possible to call PRAGMA cipher_use_hmac -as one of the first operations on a database. An example of this is when -trying to ATTACH a 1.1.x database to the main database. In these cases PRAGMA -cipher_default_use_hmac can be used to globally alter the default use of HMAC -when opening a database. - -So, as the statements above were introduced for backwards compatibility with -SLCipher 1.1 databases, we do not implement them as all SQLCipher databases -handled by Soledad should be created by SQLCipher >= 2.0. -""" - -import os -import time -import string - - -from u1db.backends import sqlite_backend -from pysqlcipher import dbapi2 -from u1db import ( - errors, -) -from leap.soledad.backends.leap_backend import LeapDocument - - -# Monkey-patch u1db.backends.sqlite_backend with pysqlcipher.dbapi2 -sqlite_backend.dbapi2 = dbapi2 - - -def open(path, password, create=True, document_factory=None, crypto=None, - raw_key=False, cipher='aes-256-cbc', kdf_iter=4000, - cipher_page_size=1024): - """Open a database at the given location. - - Will raise u1db.errors.DatabaseDoesNotExist if create=False and the - database does not already exist. - - @param path: The filesystem path for the database to open. - @param type: str - @param create: True/False, should the database be created if it doesn't - already exist? - @param type: bool - @param document_factory: A function that will be called with the same - parameters as Document.__init__. - @type document_factory: callable - @param crypto: An instance of SoledadCrypto so we can encrypt/decrypt - document contents when syncing. - @type crypto: soledad.crypto.SoledadCrypto - @param raw_key: Whether C{password} is a raw 64-char hex string or a - passphrase that should be hashed to obtain the encyrption key. - @type raw_key: bool - @param cipher: The cipher and mode to use. - @type cipher: str - @param kdf_iter: The number of iterations to use. - @type kdf_iter: int - @param cipher_page_size: The page size. - @type cipher_page_size: int - - @return: An instance of Database. - @rtype SQLCipherDatabase - """ - return SQLCipherDatabase.open_database( - path, password, create=create, document_factory=document_factory, - crypto=crypto, raw_key=raw_key, cipher=cipher, kdf_iter=kdf_iter, - cipher_page_size=cipher_page_size) - - -# -# Exceptions -# - -class DatabaseIsNotEncrypted(Exception): - """ - Exception raised when trying to open non-encrypted databases. - """ - pass - - -class NotAnHexString(Exception): - """ - Raised when trying to (raw) key the database with a non-hex string. - """ - pass - - -# -# The SQLCipher database -# - -class SQLCipherDatabase(sqlite_backend.SQLitePartialExpandDatabase): - """A U1DB implementation that uses SQLCipher as its persistence layer.""" - - _index_storage_value = 'expand referenced encrypted' - - def __init__(self, sqlcipher_file, password, document_factory=None, - crypto=None, raw_key=False, cipher='aes-256-cbc', - kdf_iter=4000, cipher_page_size=1024): - """ - Create a new sqlcipher file. - - @param sqlcipher_file: The path for the SQLCipher file. - @type sqlcipher_file: str - @param password: The password that protects the SQLCipher db. - @type password: str - @param document_factory: A function that will be called with the same - parameters as Document.__init__. - @type document_factory: callable - @param crypto: An instance of SoledadCrypto so we can encrypt/decrypt - document contents when syncing. - @type crypto: soledad.crypto.SoledadCrypto - @param raw_key: Whether C{password} is a raw 64-char hex string or a - passphrase that should be hashed to obtain the encyrption key. - @type raw_key: bool - @param cipher: The cipher and mode to use. - @type cipher: str - @param kdf_iter: The number of iterations to use. - @type kdf_iter: int - @param cipher_page_size: The page size. - @type cipher_page_size: int - """ - # ensure the db is encrypted if the file already exists - if os.path.exists(sqlcipher_file): - self.assert_db_is_encrypted( - sqlcipher_file, password, raw_key, cipher, kdf_iter, - cipher_page_size) - # connect to the database - self._db_handle = dbapi2.connect(sqlcipher_file) - # set SQLCipher cryptographic parameters - self._set_crypto_pragmas( - self._db_handle, password, raw_key, cipher, kdf_iter, - cipher_page_size) - self._real_replica_uid = None - self._ensure_schema() - self._crypto = crypto - - def factory(doc_id=None, rev=None, json='{}', has_conflicts=False, - syncable=True): - return LeapDocument(doc_id=doc_id, rev=rev, json=json, - has_conflicts=has_conflicts, - syncable=syncable) - self.set_document_factory(factory) - - @classmethod - def _open_database(cls, sqlcipher_file, password, document_factory=None, - crypto=None, raw_key=False, cipher='aes-256-cbc', - kdf_iter=4000, cipher_page_size=1024): - """ - Open a SQLCipher database. - - @param sqlcipher_file: The path for the SQLCipher file. - @type sqlcipher_file: str - @param password: The password that protects the SQLCipher db. - @type password: str - @param document_factory: A function that will be called with the same - parameters as Document.__init__. - @type document_factory: callable - @param crypto: An instance of SoledadCrypto so we can encrypt/decrypt - document contents when syncing. - @type crypto: soledad.crypto.SoledadCrypto - @param raw_key: Whether C{password} is a raw 64-char hex string or a - passphrase that should be hashed to obtain the encyrption key. - @type raw_key: bool - @param cipher: The cipher and mode to use. - @type cipher: str - @param kdf_iter: The number of iterations to use. - @type kdf_iter: int - @param cipher_page_size: The page size. - @type cipher_page_size: int - - @return: The database object. - @rtype: SQLCipherDatabase - """ - if not os.path.isfile(sqlcipher_file): - raise errors.DatabaseDoesNotExist() - tries = 2 - while True: - # Note: There seems to be a bug in sqlite 3.5.9 (with python2.6) - # where without re-opening the database on Windows, it - # doesn't see the transaction that was just committed - db_handle = dbapi2.connect(sqlcipher_file) - # set cryptographic params - cls._set_crypto_pragmas( - db_handle, password, raw_key, cipher, kdf_iter, - cipher_page_size) - c = db_handle.cursor() - v, err = cls._which_index_storage(c) - db_handle.close() - if v is not None: - break - # possibly another process is initializing it, wait for it to be - # done - if tries == 0: - raise err # go for the richest error? - tries -= 1 - time.sleep(cls.WAIT_FOR_PARALLEL_INIT_HALF_INTERVAL) - return SQLCipherDatabase._sqlite_registry[v]( - sqlcipher_file, password, document_factory=document_factory, - crypto=crypto, raw_key=raw_key, cipher=cipher, kdf_iter=kdf_iter, - cipher_page_size=cipher_page_size) - - @classmethod - def open_database(cls, sqlcipher_file, password, create, backend_cls=None, - document_factory=None, crypto=None, raw_key=False, - cipher='aes-256-cbc', kdf_iter=4000, - cipher_page_size=1024): - """ - Open a SQLCipher database. - - @param sqlcipher_file: The path for the SQLCipher file. - @type sqlcipher_file: str - @param password: The password that protects the SQLCipher db. - @type password: str - @param create: Should the datbase be created if it does not already - exist? - @type: bool - @param backend_cls: A class to use as backend. - @type backend_cls: type - @param document_factory: A function that will be called with the same - parameters as Document.__init__. - @type document_factory: callable - @param crypto: An instance of SoledadCrypto so we can encrypt/decrypt - document contents when syncing. - @type crypto: soledad.crypto.SoledadCrypto - @param raw_key: Whether C{password} is a raw 64-char hex string or a - passphrase that should be hashed to obtain the encyrption key. - @type raw_key: bool - @param cipher: The cipher and mode to use. - @type cipher: str - @param kdf_iter: The number of iterations to use. - @type kdf_iter: int - @param cipher_page_size: The page size. - @type cipher_page_size: int - - @return: The database object. - @rtype: SQLCipherDatabase - """ - try: - return cls._open_database( - sqlcipher_file, password, document_factory=document_factory, - crypto=crypto, raw_key=raw_key, cipher=cipher, - kdf_iter=kdf_iter, cipher_page_size=cipher_page_size) - except errors.DatabaseDoesNotExist: - if not create: - raise - # TODO: remove backend class from here. - if backend_cls is None: - # default is SQLCipherPartialExpandDatabase - backend_cls = SQLCipherDatabase - return backend_cls( - sqlcipher_file, password, document_factory=document_factory, - crypto=crypto, raw_key=raw_key, cipher=cipher, - kdf_iter=kdf_iter, cipher_page_size=cipher_page_size) - - def sync(self, url, creds=None, autocreate=True): - """ - Synchronize documents with remote replica exposed at url. - - @param url: The url of the target replica to sync with. - @type url: str - @param creds: optional dictionary giving credentials. - to authorize the operation with the server. - @type creds: dict - @param autocreate: Ask the target to create the db if non-existent. - @type autocreate: bool - - @return: The local generation before the synchronisation was performed. - @rtype: int - """ - from u1db.sync import Synchronizer - from leap.soledad.backends.leap_backend import LeapSyncTarget - return Synchronizer( - self, - LeapSyncTarget(url, - creds=creds, - crypto=self._crypto)).sync(autocreate=autocreate) - - def _extra_schema_init(self, c): - """ - Add any extra fields, etc to the basic table definitions. - - @param c: The cursor for querying the database. - @type c: dbapi2.cursor - """ - c.execute( - 'ALTER TABLE document ' - 'ADD COLUMN syncable BOOL NOT NULL DEFAULT TRUE') - - def _put_and_update_indexes(self, old_doc, doc): - """ - Update a document and all indexes related to it. - - @param old_doc: The old version of the document. - @type old_doc: u1db.Document - @param doc: The new version of the document. - @type doc: u1db.Document - """ - sqlite_backend.SQLitePartialExpandDatabase._put_and_update_indexes( - self, old_doc, doc) - c = self._db_handle.cursor() - c.execute('UPDATE document SET syncable=? ' - 'WHERE doc_id=?', - (doc.syncable, doc.doc_id)) - - def _get_doc(self, doc_id, check_for_conflicts=False): - """ - Get just the document content, without fancy handling. - - @param doc_id: The unique document identifier - @type doc_id: str - @param include_deleted: If set to True, deleted documents will be - returned with empty content. Otherwise asking for a deleted - document will return None. - @type include_deleted: bool - - @return: a Document object. - @type: u1db.Document - """ - doc = sqlite_backend.SQLitePartialExpandDatabase._get_doc( - self, doc_id, check_for_conflicts) - if doc: - c = self._db_handle.cursor() - c.execute('SELECT syncable FROM document ' - 'WHERE doc_id=?', - (doc.doc_id,)) - result = c.fetchone() - doc.syncable = bool(result[0]) - return doc - - # - # SQLCipher API methods - # - - @classmethod - def assert_db_is_encrypted(cls, sqlcipher_file, key, raw_key, cipher, - kdf_iter, cipher_page_size): - """ - Assert that C{sqlcipher_file} contains an encrypted database. - - When opening an existing database, PRAGMA key will not immediately - throw an error if the key provided is incorrect. To test that the - database can be successfully opened with the provided key, it is - necessary to perform some operation on the database (i.e. read from - it) and confirm it is success. - - The easiest way to do this is select off the sqlite_master table, - which will attempt to read the first page of the database and will - parse the schema. - - @param sqlcipher_file: The path for the SQLCipher file. - @type sqlcipher_file: str - @param key: The key that protects the SQLCipher db. - @type key: str - @param raw_key: Whether C{key} is a raw 64-char hex string or a - passphrase that should be hashed to obtain the encyrption key. - @type raw_key: bool - @param cipher: The cipher and mode to use. - @type cipher: str - @param kdf_iter: The number of iterations to use. - @type kdf_iter: int - @param cipher_page_size: The page size. - @type cipher_page_size: int - """ - try: - # try to open an encrypted database with the regular u1db - # backend should raise a DatabaseError exception. - sqlite_backend.SQLitePartialExpandDatabase(sqlcipher_file) - raise DatabaseIsNotEncrypted() - except dbapi2.DatabaseError: - # assert that we can access it using SQLCipher with the given - # key - db_handle = dbapi2.connect(sqlcipher_file) - cls._set_crypto_pragmas( - db_handle, key, raw_key, cipher, kdf_iter, cipher_page_size) - db_handle.cursor().execute('SELECT count(*) FROM sqlite_master') - - @classmethod - def _set_crypto_pragmas(cls, db_handle, key, raw_key, cipher, kdf_iter, - cipher_page_size): - """ - Set cryptographic params (key, cipher, KDF number of iterations and - cipher page size). - """ - cls._pragma_key(db_handle, key, raw_key) - cls._pragma_cipher(db_handle, cipher) - cls._pragma_kdf_iter(db_handle, kdf_iter) - cls._pragma_cipher_page_size(db_handle, cipher_page_size) - - @classmethod - def _pragma_key(cls, db_handle, key, raw_key): - """ - Set the C{key} for use with the database. - - The process of creating a new, encrypted database is called 'keying' - the database. SQLCipher uses just-in-time key derivation at the point - it is first needed for an operation. This means that the key (and any - options) must be set before the first operation on the database. As - soon as the database is touched (e.g. SELECT, CREATE TABLE, UPDATE, - etc.) and pages need to be read or written, the key is prepared for - use. - - Implementation Notes: - - * PRAGMA key should generally be called as the first operation on a - database. - - @param key: The key for use with the database. - @type key: str - @param raw_key: Whether C{key} is a raw 64-char hex string or a - passphrase that should be hashed to obtain the encyrption key. - @type raw_key: bool - """ - if raw_key: - cls._pragma_key_raw(db_handle, key) - else: - cls._pragma_key_passphrase(db_handle, key) - - @classmethod - def _pragma_key_passphrase(cls, db_handle, passphrase): - """ - Set a passphrase for encryption key derivation. - - The key itself can be a passphrase, which is converted to a key using - PBKDF2 key derivation. The result is used as the encryption key for - the database. By using this method, there is no way to alter the KDF; - if you want to do so you should use a raw key instead and derive the - key using your own KDF. - - @param db_handle: A handle to the SQLCipher database. - @type db_handle: pysqlcipher.Connection - @param passphrase: The passphrase used to derive the encryption key. - @type passphrase: str - """ - db_handle.cursor().execute("PRAGMA key = '%s'" % passphrase) - - @classmethod - def _pragma_key_raw(cls, db_handle, key): - """ - Set a raw hexadecimal encryption key. - - It is possible to specify an exact byte sequence using a blob literal. - With this method, it is the calling application's responsibility to - ensure that the data provided is a 64 character hex string, which will - be converted directly to 32 bytes (256 bits) of key data. - - @param db_handle: A handle to the SQLCipher database. - @type db_handle: pysqlcipher.Connection - @param key: A 64 character hex string. - @type key: str - """ - if not all(c in string.hexdigits for c in key): - raise NotAnHexString(key) - db_handle.cursor().execute('PRAGMA key = "x\'%s"' % key) - - @classmethod - def _pragma_cipher(cls, db_handle, cipher='aes-256-cbc'): - """ - Set the cipher and mode to use for symmetric encryption. - - SQLCipher uses aes-256-cbc as the default cipher and mode of - operation. It is possible to change this, though not generally - recommended, using PRAGMA cipher. - - SQLCipher makes direct use of libssl, so all cipher options available - to libssl are also available for use with SQLCipher. See `man enc` for - OpenSSL's supported ciphers. - - Implementation Notes: - - * PRAGMA cipher must be called after PRAGMA key and before the first - actual database operation or it will have no effect. - - * If a non-default value is used PRAGMA cipher to create a database, - it must also be called every time that database is opened. - - * SQLCipher does not implement its own encryption. Instead it uses the - widely available and peer-reviewed OpenSSL libcrypto for all - cryptographic functions. - - @param db_handle: A handle to the SQLCipher database. - @type db_handle: pysqlcipher.Connection - @param cipher: The cipher and mode to use. - @type cipher: str - """ - db_handle.cursor().execute("PRAGMA cipher = '%s'" % cipher) - - @classmethod - def _pragma_kdf_iter(cls, db_handle, kdf_iter=4000): - """ - Set the number of iterations for the key derivation function. - - SQLCipher uses PBKDF2 key derivation to strengthen the key and make it - resistent to brute force and dictionary attacks. The default - configuration uses 4000 PBKDF2 iterations (effectively 16,000 SHA1 - operations). PRAGMA kdf_iter can be used to increase or decrease the - number of iterations used. - - Implementation Notes: - - * PRAGMA kdf_iter must be called after PRAGMA key and before the first - actual database operation or it will have no effect. - - * If a non-default value is used PRAGMA kdf_iter to create a database, - it must also be called every time that database is opened. - - * It is not recommended to reduce the number of iterations if a - passphrase is in use. - - @param db_handle: A handle to the SQLCipher database. - @type db_handle: pysqlcipher.Connection - @param kdf_iter: The number of iterations to use. - @type kdf_iter: int - """ - db_handle.cursor().execute("PRAGMA kdf_iter = '%d'" % kdf_iter) - - @classmethod - def _pragma_cipher_page_size(cls, db_handle, cipher_page_size=1024): - """ - Set the page size of the encrypted database. - - SQLCipher 2 introduced the new PRAGMA cipher_page_size that can be - used to adjust the page size for the encrypted database. The default - page size is 1024 bytes, but it can be desirable for some applications - to use a larger page size for increased performance. For instance, - some recent testing shows that increasing the page size can noticeably - improve performance (5-30%) for certain queries that manipulate a - large number of pages (e.g. selects without an index, large inserts in - a transaction, big deletes). - - To adjust the page size, call the pragma immediately after setting the - key for the first time and each subsequent time that you open the - database. - - Implementation Notes: - - * PRAGMA cipher_page_size must be called after PRAGMA key and before - the first actual database operation or it will have no effect. - - * If a non-default value is used PRAGMA cipher_page_size to create a - database, it must also be called every time that database is opened. - - @param db_handle: A handle to the SQLCipher database. - @type db_handle: pysqlcipher.Connection - @param cipher_page_size: The page size. - @type cipher_page_size: int - """ - db_handle.cursor().execute( - "PRAGMA cipher_page_size = '%d'" % cipher_page_size) - - @classmethod - def _pragma_rekey(cls, db_handle, new_key, raw_key): - """ - Change the key of an existing encrypted database. - - To change the key on an existing encrypted database, it must first be - unlocked with the current encryption key. Once the database is - readable and writeable, PRAGMA rekey can be used to re-encrypt every - page in the database with a new key. - - * PRAGMA rekey must be called after PRAGMA key. It can be called at any - time once the database is readable. - - * PRAGMA rekey can not be used to encrypted a standard SQLite - database! It is only useful for changing the key on an existing - database. - - * Previous versions of SQLCipher provided a PRAGMA rekey_cipher and - code>PRAGMA rekey_kdf_iter. These are deprecated and should not be - used. Instead, use sqlcipher_export(). - - @param db_handle: A handle to the SQLCipher database. - @type db_handle: pysqlcipher.Connection - @param new_key: The new key. - @type new_key: str - @param raw_key: Whether C{password} is a raw 64-char hex string or a - passphrase that should be hashed to obtain the encyrption key. - @type raw_key: bool - """ - if raw_key: - cls._pragma_rekey_raw(db_handle, key) - else: - cls._pragma_rekey_passphrase(db_handle, key) - - @classmethod - def _pragma_rekey_passphrase(cls, db_handle, passphrase): - """ - Change the passphrase for encryption key derivation. - - The key itself can be a passphrase, which is converted to a key using - PBKDF2 key derivation. The result is used as the encryption key for - the database. - - @param db_handle: A handle to the SQLCipher database. - @type db_handle: pysqlcipher.Connection - @param passphrase: The passphrase used to derive the encryption key. - @type passphrase: str - """ - db_handle.cursor().execute("PRAGMA rekey = '%s'" % passphrase) - - @classmethod - def _pragma_rekey_raw(cls, db_handle, key): - """ - Change the raw hexadecimal encryption key. - - It is possible to specify an exact byte sequence using a blob literal. - With this method, it is the calling application's responsibility to - ensure that the data provided is a 64 character hex string, which will - be converted directly to 32 bytes (256 bits) of key data. - - @param db_handle: A handle to the SQLCipher database. - @type db_handle: pysqlcipher.Connection - @param key: A 64 character hex string. - @type key: str - """ - if not all(c in string.hexdigits for c in key): - raise NotAnHexString(key) - db_handle.cursor().execute('PRAGMA rekey = "x\'%s"' % passphrase) - - -sqlite_backend.SQLiteDatabase.register_implementation(SQLCipherDatabase) -- cgit v1.2.3