summaryrefslogtreecommitdiff
path: root/soledad/backends
diff options
context:
space:
mode:
Diffstat (limited to 'soledad/backends')
-rw-r--r--soledad/backends/__init__.py9
-rw-r--r--soledad/backends/couch.py270
-rw-r--r--soledad/backends/leap_backend.py224
-rw-r--r--soledad/backends/objectstore.py135
-rw-r--r--soledad/backends/sqlcipher.py163
5 files changed, 801 insertions, 0 deletions
diff --git a/soledad/backends/__init__.py b/soledad/backends/__init__.py
new file mode 100644
index 00000000..61438e8a
--- /dev/null
+++ b/soledad/backends/__init__.py
@@ -0,0 +1,9 @@
+"""
+Backends that extend U1DB functionality.
+"""
+
+import objectstore
+
+
+__all__ = [
+ 'objectstore']
diff --git a/soledad/backends/couch.py b/soledad/backends/couch.py
new file mode 100644
index 00000000..b7a77054
--- /dev/null
+++ b/soledad/backends/couch.py
@@ -0,0 +1,270 @@
+"""A U1DB backend that uses CouchDB as its persistence layer."""
+
+# general imports
+import uuid
+from base64 import b64encode, b64decode
+import re
+# u1db
+from u1db import errors
+from u1db.sync import LocalSyncTarget
+from u1db.backends.inmemory import InMemoryIndex
+from u1db.remote.server_state import ServerState
+from u1db.errors import DatabaseDoesNotExist
+# couchdb
+from couchdb.client import Server, Document as CouchDocument
+from couchdb.http import ResourceNotFound
+# leap
+from leap.soledad.backends.objectstore import (
+ ObjectStoreDatabase,
+ ObjectStoreSyncTarget,
+)
+from leap.soledad.backends.leap_backend import LeapDocument
+
+try:
+ import simplejson as json
+except ImportError:
+ import json # noqa
+
+
+class InvalidURLError(Exception):
+ """Exception raised when Soledad encounters a malformed URL."""
+ pass
+
+
+class CouchDatabase(ObjectStoreDatabase):
+ """A U1DB backend that uses Couch as its persistence layer."""
+
+ @classmethod
+ def open_database(cls, url, create):
+ """Open a U1DB database using CouchDB as backend."""
+ # get database from url
+ m = re.match('(^https?://[^/]+)/(.+)$', url)
+ if not m:
+ raise InvalidURLError
+ url = m.group(1)
+ dbname = m.group(2)
+ server = Server(url=url)
+ try:
+ server[dbname]
+ except ResourceNotFound:
+ if not create:
+ raise DatabaseDoesNotExist()
+ return cls(url, dbname)
+
+ def __init__(self, url, database, replica_uid=None, full_commit=True,
+ session=None):
+ """Create a new Couch data container."""
+ self._url = url
+ self._full_commit = full_commit
+ self._session = session
+ self._server = Server(url=self._url,
+ full_commit=self._full_commit,
+ session=self._session)
+ self._dbname = database
+ # this will ensure that transaction and sync logs exist and are
+ # up-to-date.
+ try:
+ self._database = self._server[database]
+ except ResourceNotFound:
+ self._server.create(database)
+ self._database = self._server[database]
+ super(CouchDatabase, self).__init__(replica_uid=replica_uid,
+ document_factory=LeapDocument)
+
+ #-------------------------------------------------------------------------
+ # methods from Database
+ #-------------------------------------------------------------------------
+
+ def _get_doc(self, doc_id, check_for_conflicts=False):
+ """Get just the document content, without fancy handling."""
+ cdoc = self._database.get(doc_id)
+ if cdoc is None:
+ return None
+ has_conflicts = False
+ if check_for_conflicts:
+ has_conflicts = self._has_conflicts(doc_id)
+ doc = self._factory(
+ doc_id=doc_id,
+ rev=cdoc['u1db_rev'],
+ has_conflicts=has_conflicts)
+ contents = self._database.get_attachment(cdoc, 'u1db_json')
+ if contents:
+ doc.content = json.loads(contents.getvalue())
+ else:
+ doc.make_tombstone()
+ return doc
+
+ def get_all_docs(self, include_deleted=False):
+ """Get the JSON content for all documents in the database."""
+ generation = self._get_generation()
+ results = []
+ for doc_id in self._database:
+ if doc_id == self.U1DB_DATA_DOC_ID:
+ continue
+ doc = self._get_doc(doc_id, check_for_conflicts=True)
+ if doc.content is None and not include_deleted:
+ continue
+ results.append(doc)
+ return (generation, results)
+
+ def _put_doc(self, doc):
+ """Store document in database."""
+ # prepare couch's Document
+ cdoc = CouchDocument()
+ cdoc['_id'] = doc.doc_id
+ # we have to guarantee that couch's _rev is cosistent
+ old_cdoc = self._database.get(doc.doc_id)
+ if old_cdoc is not None:
+ cdoc['_rev'] = old_cdoc['_rev']
+ # store u1db's rev
+ cdoc['u1db_rev'] = doc.rev
+ # save doc in db
+ self._database.save(cdoc)
+ # store u1db's content as json string
+ if not doc.is_tombstone():
+ self._database.put_attachment(cdoc, doc.get_json(),
+ filename='u1db_json')
+ else:
+ self._database.delete_attachment(cdoc, 'u1db_json')
+
+ def get_sync_target(self):
+ """
+ Return a SyncTarget object, for another u1db to synchronize with.
+ """
+ return CouchSyncTarget(self)
+
+ def create_index(self, index_name, *index_expressions):
+ """
+ Create a named index, which can then be queried for future lookups.
+ """
+ if index_name in self._indexes:
+ if self._indexes[index_name]._definition == list(
+ index_expressions):
+ return
+ raise errors.IndexNameTakenError
+ index = InMemoryIndex(index_name, list(index_expressions))
+ for doc_id in self._database:
+ if doc_id == self.U1DB_DATA_DOC_ID:
+ continue
+ doc = self._get_doc(doc_id)
+ if doc.content is not None:
+ index.add_json(doc_id, doc.get_json())
+ self._indexes[index_name] = index
+ # save data in object store
+ self._store_u1db_data()
+
+ def close(self):
+ """Release any resources associated with this database."""
+ # TODO: fix this method so the connection is properly closed and
+ # test_close (+tearDown, which deletes the db) works without problems.
+ self._url = None
+ self._full_commit = None
+ self._session = None
+ #self._server = None
+ self._database = None
+ return True
+
+ def sync(self, url, creds=None, autocreate=True):
+ """Synchronize documents with remote replica exposed at url."""
+ from u1db.sync import Synchronizer
+ return Synchronizer(self, CouchSyncTarget(url, creds=creds)).sync(
+ autocreate=autocreate)
+
+ #-------------------------------------------------------------------------
+ # methods from ObjectStoreDatabase
+ #-------------------------------------------------------------------------
+
+ def _init_u1db_data(self):
+ if self._replica_uid is None:
+ self._replica_uid = uuid.uuid4().hex
+ doc = self._factory(doc_id=self.U1DB_DATA_DOC_ID)
+ doc.content = {'transaction_log': [],
+ 'conflicts': b64encode(json.dumps({})),
+ 'other_generations': {},
+ 'indexes': b64encode(json.dumps({})),
+ 'replica_uid': self._replica_uid}
+ self._put_doc(doc)
+
+ def _fetch_u1db_data(self):
+ # retrieve u1db data from couch db
+ cdoc = self._database.get(self.U1DB_DATA_DOC_ID)
+ jsonstr = self._database.get_attachment(cdoc, 'u1db_json').getvalue()
+ content = json.loads(jsonstr)
+ # set u1db database info
+ #self._sync_log = content['sync_log']
+ self._transaction_log = content['transaction_log']
+ self._conflicts = json.loads(b64decode(content['conflicts']))
+ self._other_generations = content['other_generations']
+ self._indexes = self._load_indexes_from_json(
+ b64decode(content['indexes']))
+ self._replica_uid = content['replica_uid']
+ # save couch _rev
+ self._couch_rev = cdoc['_rev']
+
+ def _store_u1db_data(self):
+ doc = self._factory(doc_id=self.U1DB_DATA_DOC_ID)
+ doc.content = {
+ 'transaction_log': self._transaction_log,
+ # Here, the b64 encode ensures that document content
+ # does not cause strange behaviour in couchdb because
+ # of encoding.
+ 'conflicts': b64encode(json.dumps(self._conflicts)),
+ 'other_generations': self._other_generations,
+ 'indexes': b64encode(self._dump_indexes_as_json()),
+ 'replica_uid': self._replica_uid,
+ '_rev': self._couch_rev}
+ self._put_doc(doc)
+
+ #-------------------------------------------------------------------------
+ # Couch specific methods
+ #-------------------------------------------------------------------------
+
+ def delete_database(self):
+ """Delete a U1DB CouchDB database."""
+ del(self._server[self._dbname])
+
+ def _dump_indexes_as_json(self):
+ indexes = {}
+ for name, idx in self._indexes.iteritems():
+ indexes[name] = {}
+ for attr in ['name', 'definition', 'values']:
+ indexes[name][attr] = getattr(idx, '_' + attr)
+ return json.dumps(indexes)
+
+ def _load_indexes_from_json(self, indexes):
+ dict = {}
+ for name, idx_dict in json.loads(indexes).iteritems():
+ idx = InMemoryIndex(name, idx_dict['definition'])
+ idx._values = idx_dict['values']
+ dict[name] = idx
+ return dict
+
+
+class CouchSyncTarget(ObjectStoreSyncTarget):
+ pass
+
+
+class CouchServerState(ServerState):
+ """Inteface of the WSGI server with the CouchDB backend."""
+
+ def __init__(self, couch_url):
+ self.couch_url = couch_url
+
+ def open_database(self, dbname):
+ """Open a database at the given location."""
+ # TODO: open couch
+ from leap.soledad.backends.couch import CouchDatabase
+ return CouchDatabase.open_database(self.couch_url + '/' + dbname,
+ create=False)
+
+ def ensure_database(self, dbname):
+ """Ensure database at the given location."""
+ from leap.soledad.backends.couch import CouchDatabase
+ db = CouchDatabase.open_database(self.couch_url + '/' + dbname,
+ create=True)
+ return db, db._replica_uid
+
+ def delete_database(self, dbname):
+ """Delete database at the given location."""
+ from leap.soledad.backends.couch import CouchDatabase
+ CouchDatabase.delete_database(self.couch_url + '/' + dbname)
diff --git a/soledad/backends/leap_backend.py b/soledad/backends/leap_backend.py
new file mode 100644
index 00000000..a37f9d25
--- /dev/null
+++ b/soledad/backends/leap_backend.py
@@ -0,0 +1,224 @@
+"""
+A U1DB backend that encrypts data before sending to server and decrypts after
+receiving.
+"""
+
+try:
+ import simplejson as json
+except ImportError:
+ import json # noqa
+
+from u1db import Document
+from u1db.remote import utils
+from u1db.remote.http_target import HTTPSyncTarget
+from u1db.remote.http_database import HTTPDatabase
+from u1db.errors import BrokenSyncStream
+
+import uuid
+
+
+class NoDefaultKey(Exception):
+ """
+ Exception to signal that there's no default OpenPGP key configured.
+ """
+ pass
+
+
+class NoSoledadInstance(Exception):
+ """
+ Exception to signal that no Soledad instance was found.
+ """
+ pass
+
+
+class DocumentNotEncrypted(Exception):
+ """
+ Exception to signal failures in document encryption.
+ """
+ pass
+
+
+class LeapDocument(Document):
+ """
+ Encryptable and syncable document.
+
+ LEAP Documents are standard u1db documents with cabability of returning an
+ encrypted version of the document json string as well as setting document
+ content based on an encrypted version of json string.
+ """
+
+ def __init__(self, doc_id=None, rev=None, json='{}', has_conflicts=False,
+ encrypted_json=None, soledad=None, syncable=True):
+ super(LeapDocument, self).__init__(doc_id, rev, json, has_conflicts)
+ self._soledad = soledad
+ self._syncable = syncable
+ if encrypted_json:
+ self.set_encrypted_json(encrypted_json)
+
+ def get_encrypted_content(self):
+ """
+ Return an encrypted JSON serialization of document's contents.
+ """
+ if not self._soledad:
+ raise NoSoledadInstance()
+ return self._soledad.encrypt_symmetric(self.doc_id,
+ self.get_json())
+
+ def set_encrypted_content(self, cyphertext):
+ """
+ Set document's content based on an encrypted JSON serialization of
+ contents.
+ """
+ plaintext = self._soledad.decrypt_symmetric(self.doc_id, cyphertext)
+ return self.set_json(plaintext)
+
+ def get_encrypted_json(self):
+ """
+ Return a valid JSON string containing document's content encrypted to
+ the user's public key.
+ """
+ return json.dumps({'_encrypted_json': self.get_encrypted_content()})
+
+ def set_encrypted_json(self, encrypted_json):
+ """
+ Set document's content based on a valid JSON string containing the
+ encrypted document's contents.
+ """
+ if not self._soledad:
+ raise NoSoledadInstance()
+ cyphertext = json.loads(encrypted_json)['_encrypted_json']
+ self.set_encrypted_content(cyphertext)
+
+ def _get_syncable(self):
+ return self._syncable
+
+ def _set_syncable(self, syncable=True):
+ self._syncable = syncable
+
+ syncable = property(
+ _get_syncable,
+ _set_syncable,
+ doc="Determine if document should be synced with server."
+ )
+
+ # Returning the revision as string solves the following exception in
+ # Twisted web:
+ # exceptions.TypeError: Can only pass-through bytes on Python 2
+ def _get_rev(self):
+ if self._rev is None:
+ return None
+ return str(self._rev)
+
+ def _set_rev(self, rev):
+ self._rev = rev
+
+ rev = property(
+ _get_rev,
+ _set_rev,
+ doc="Wrapper to ensure `doc.rev` is always returned as bytes.")
+
+
+class LeapSyncTarget(HTTPSyncTarget):
+ """
+ A SyncTarget that encrypts data before sending and decrypts data after
+ receiving.
+ """
+
+ def __init__(self, url, creds=None, soledad=None):
+ super(LeapSyncTarget, self).__init__(url, creds)
+ self._soledad = soledad
+
+ def _parse_sync_stream(self, data, return_doc_cb, ensure_callback=None):
+ """
+ Does the same as parent's method but ensures incoming content will be
+ decrypted.
+ """
+ parts = data.splitlines() # one at a time
+ if not parts or parts[0] != '[':
+ raise BrokenSyncStream
+ data = parts[1:-1]
+ comma = False
+ if data:
+ line, comma = utils.check_and_strip_comma(data[0])
+ res = json.loads(line)
+ if ensure_callback and 'replica_uid' in res:
+ ensure_callback(res['replica_uid'])
+ for entry in data[1:]:
+ if not comma: # missing in between comma
+ raise BrokenSyncStream
+ line, comma = utils.check_and_strip_comma(entry)
+ entry = json.loads(line)
+ # decrypt after receiving from server.
+ if not self._soledad:
+ raise NoSoledadInstance()
+ enc_json = json.loads(entry['content'])['_encrypted_json']
+ if not self._soledad.is_encrypted_sym(enc_json):
+ raise DocumentNotEncrypted(
+ "Incoming document from sync is not encrypted.")
+ doc = LeapDocument(entry['id'], entry['rev'],
+ encrypted_json=entry['content'],
+ soledad=self._soledad)
+ return_doc_cb(doc, entry['gen'], entry['trans_id'])
+ if parts[-1] != ']':
+ try:
+ partdic = json.loads(parts[-1])
+ except ValueError:
+ pass
+ else:
+ if isinstance(partdic, dict):
+ self._error(partdic)
+ raise BrokenSyncStream
+ if not data or comma: # no entries or bad extra comma
+ raise BrokenSyncStream
+ return res
+
+ def sync_exchange(self, docs_by_generations, source_replica_uid,
+ last_known_generation, last_known_trans_id,
+ return_doc_cb, ensure_callback=None):
+ """
+ Does the same as parent's method but encrypts content before syncing.
+ """
+ self._ensure_connection()
+ if self._trace_hook: # for tests
+ self._trace_hook('sync_exchange')
+ url = '%s/sync-from/%s' % (self._url.path, source_replica_uid)
+ self._conn.putrequest('POST', url)
+ self._conn.putheader('content-type', 'application/x-u1db-sync-stream')
+ for header_name, header_value in self._sign_request('POST', url, {}):
+ self._conn.putheader(header_name, header_value)
+ entries = ['[']
+ size = 1
+
+ def prepare(**dic):
+ entry = comma + '\r\n' + json.dumps(dic)
+ entries.append(entry)
+ return len(entry)
+
+ comma = ''
+ size += prepare(
+ last_known_generation=last_known_generation,
+ last_known_trans_id=last_known_trans_id,
+ ensure=ensure_callback is not None)
+ comma = ','
+ for doc, gen, trans_id in docs_by_generations:
+ if doc.syncable:
+ # encrypt and verify before sending to server.
+ enc_json = json.loads(
+ doc.get_encrypted_json())['_encrypted_json']
+ if not self._soledad.is_encrypted_sym(enc_json):
+ raise DocumentNotEncrypted(
+ "Could not encrypt document before sync.")
+ size += prepare(id=doc.doc_id, rev=doc.rev,
+ content=doc.get_encrypted_json(),
+ gen=gen, trans_id=trans_id)
+ entries.append('\r\n]')
+ size += len(entries[-1])
+ self._conn.putheader('content-length', str(size))
+ self._conn.endheaders()
+ for entry in entries:
+ self._conn.send(entry)
+ entries = None
+ data, _ = self._response()
+ res = self._parse_sync_stream(data, return_doc_cb, ensure_callback)
+ data = None
+ return res['new_generation'], res['new_transaction_id']
diff --git a/soledad/backends/objectstore.py b/soledad/backends/objectstore.py
new file mode 100644
index 00000000..7c5d1177
--- /dev/null
+++ b/soledad/backends/objectstore.py
@@ -0,0 +1,135 @@
+"""
+Abstract U1DB backend to handle storage using object stores (like CouchDB, for
+example.
+
+Right now, this is only used by CouchDatabase backend, but can also be
+extended to implement OpenStack or Amazon S3 storage, for example.
+"""
+
+from u1db.backends.inmemory import (
+ InMemoryDatabase,
+ InMemorySyncTarget,
+)
+from u1db import errors
+
+
+class ObjectStoreDatabase(InMemoryDatabase):
+ """
+ A backend for storing u1db data in an object store.
+ """
+
+ @classmethod
+ def open_database(cls, url, create, document_factory=None):
+ raise NotImplementedError(cls.open_database)
+
+ def __init__(self, replica_uid=None, document_factory=None):
+ super(ObjectStoreDatabase, self).__init__(
+ replica_uid,
+ document_factory=document_factory)
+ # sync data in memory with data in object store
+ if not self._get_doc(self.U1DB_DATA_DOC_ID):
+ self._init_u1db_data()
+ self._fetch_u1db_data()
+
+ #-------------------------------------------------------------------------
+ # methods from Database
+ #-------------------------------------------------------------------------
+
+ def _set_replica_uid(self, replica_uid):
+ super(ObjectStoreDatabase, self)._set_replica_uid(replica_uid)
+ self._store_u1db_data()
+
+ def _put_doc(self, doc):
+ raise NotImplementedError(self._put_doc)
+
+ def _get_doc(self, doc):
+ raise NotImplementedError(self._get_doc)
+
+ def get_all_docs(self, include_deleted=False):
+ raise NotImplementedError(self.get_all_docs)
+
+ def delete_doc(self, doc):
+ """Mark a document as deleted."""
+ old_doc = self._get_doc(doc.doc_id, check_for_conflicts=True)
+ if old_doc is None:
+ raise errors.DocumentDoesNotExist
+ if old_doc.rev != doc.rev:
+ raise errors.RevisionConflict()
+ if old_doc.is_tombstone():
+ raise errors.DocumentAlreadyDeleted
+ if old_doc.has_conflicts:
+ raise errors.ConflictedDoc()
+ new_rev = self._allocate_doc_rev(doc.rev)
+ doc.rev = new_rev
+ doc.make_tombstone()
+ self._put_and_update_indexes(old_doc, doc)
+ return new_rev
+
+ # index-related methods
+
+ def create_index(self, index_name, *index_expressions):
+ """
+ Create an named index, which can then be queried for future lookups.
+ """
+ raise NotImplementedError(self.create_index)
+
+ def delete_index(self, index_name):
+ """Remove a named index."""
+ super(ObjectStoreDatabase, self).delete_index(index_name)
+ self._store_u1db_data()
+
+ def _replace_conflicts(self, doc, conflicts):
+ super(ObjectStoreDatabase, self)._replace_conflicts(doc, conflicts)
+ self._store_u1db_data()
+
+ def _do_set_replica_gen_and_trans_id(self, other_replica_uid,
+ other_generation,
+ other_transaction_id):
+ super(ObjectStoreDatabase, self)._do_set_replica_gen_and_trans_id(
+ other_replica_uid,
+ other_generation,
+ other_transaction_id)
+ self._store_u1db_data()
+
+ #-------------------------------------------------------------------------
+ # implemented methods from CommonBackend
+ #-------------------------------------------------------------------------
+
+ def _put_and_update_indexes(self, old_doc, doc):
+ for index in self._indexes.itervalues():
+ if old_doc is not None and not old_doc.is_tombstone():
+ index.remove_json(old_doc.doc_id, old_doc.get_json())
+ if not doc.is_tombstone():
+ index.add_json(doc.doc_id, doc.get_json())
+ trans_id = self._allocate_transaction_id()
+ self._put_doc(doc)
+ self._transaction_log.append((doc.doc_id, trans_id))
+ self._store_u1db_data()
+
+ #-------------------------------------------------------------------------
+ # methods specific for object stores
+ #-------------------------------------------------------------------------
+
+ U1DB_DATA_DOC_ID = 'u1db_data'
+
+ def _fetch_u1db_data(self):
+ """
+ Fetch u1db configuration data from backend storage.
+ """
+ NotImplementedError(self._fetch_u1db_data)
+
+ def _store_u1db_data(self):
+ """
+ Save u1db configuration data on backend storage.
+ """
+ NotImplementedError(self._store_u1db_data)
+
+ def _init_u1db_data(self):
+ """
+ Initialize u1db configuration data on backend storage.
+ """
+ NotImplementedError(self._init_u1db_data)
+
+
+class ObjectStoreSyncTarget(InMemorySyncTarget):
+ pass
diff --git a/soledad/backends/sqlcipher.py b/soledad/backends/sqlcipher.py
new file mode 100644
index 00000000..5d2569bf
--- /dev/null
+++ b/soledad/backends/sqlcipher.py
@@ -0,0 +1,163 @@
+"""A U1DB backend that uses SQLCipher as its persistence layer."""
+
+import os
+from pysqlcipher import dbapi2
+import time
+
+from leap import util
+from u1db.backends import sqlite_backend
+util.logger.debug(
+ "Monkey-patching u1db.backends.sqlite_backend with pysqlcipher.dbapi2..."
+)
+sqlite_backend.dbapi2 = dbapi2
+
+from u1db import (
+ errors,
+)
+
+from leap.soledad.backends.leap_backend import LeapDocument
+
+
+def open(path, password, create=True, document_factory=None, soledad=None):
+ """Open a database at the given location.
+
+ Will raise u1db.errors.DatabaseDoesNotExist if create=False and the
+ database does not already exist.
+
+ :param path: The filesystem path for the database to open.
+ :param create: True/False, should the database be created if it doesn't
+ already exist?
+ :param document_factory: A function that will be called with the same
+ parameters as Document.__init__.
+ :return: An instance of Database.
+ """
+ return SQLCipherDatabase.open_database(
+ path, password, create=create, document_factory=document_factory,
+ soledad=soledad)
+
+
+class DatabaseIsNotEncrypted(Exception):
+ """
+ Exception raised when trying to open non-encrypted databases.
+ """
+ pass
+
+
+class SQLCipherDatabase(sqlite_backend.SQLitePartialExpandDatabase):
+ """A U1DB implementation that uses SQLCipher as its persistence layer."""
+
+ _index_storage_value = 'expand referenced encrypted'
+
+ @classmethod
+ def set_pragma_key(cls, db_handle, key):
+ db_handle.cursor().execute("PRAGMA key = '%s'" % key)
+
+ def __init__(self, sqlite_file, password, document_factory=None,
+ soledad=None):
+ """Create a new sqlcipher file."""
+ self._check_if_db_is_encrypted(sqlite_file)
+ self._db_handle = dbapi2.connect(sqlite_file)
+ SQLCipherDatabase.set_pragma_key(self._db_handle, password)
+ self._real_replica_uid = None
+ self._ensure_schema()
+ self._soledad = soledad
+
+ def factory(doc_id=None, rev=None, json='{}', has_conflicts=False,
+ encrypted_json=None, syncable=True):
+ return LeapDocument(doc_id=doc_id, rev=rev, json=json,
+ has_conflicts=has_conflicts,
+ encrypted_json=encrypted_json,
+ syncable=syncable, soledad=self._soledad)
+ self.set_document_factory(factory)
+
+ def _check_if_db_is_encrypted(self, sqlite_file):
+ if not os.path.exists(sqlite_file):
+ return
+ else:
+ try:
+ # try to open an encrypted database with the regular u1db
+ # backend should raise a DatabaseError exception.
+ sqlite_backend.SQLitePartialExpandDatabase(sqlite_file)
+ raise DatabaseIsNotEncrypted()
+ except dbapi2.DatabaseError:
+ pass
+
+ @classmethod
+ def _open_database(cls, sqlite_file, password, document_factory=None,
+ soledad=None):
+ if not os.path.isfile(sqlite_file):
+ raise errors.DatabaseDoesNotExist()
+ tries = 2
+ while True:
+ # Note: There seems to be a bug in sqlite 3.5.9 (with python2.6)
+ # where without re-opening the database on Windows, it
+ # doesn't see the transaction that was just committed
+ db_handle = dbapi2.connect(sqlite_file)
+ SQLCipherDatabase.set_pragma_key(db_handle, password)
+ c = db_handle.cursor()
+ v, err = cls._which_index_storage(c)
+ db_handle.close()
+ if v is not None:
+ break
+ # possibly another process is initializing it, wait for it to be
+ # done
+ if tries == 0:
+ raise err # go for the richest error?
+ tries -= 1
+ time.sleep(cls.WAIT_FOR_PARALLEL_INIT_HALF_INTERVAL)
+ return SQLCipherDatabase._sqlite_registry[v](
+ sqlite_file, password, document_factory=document_factory,
+ soledad=soledad)
+
+ @classmethod
+ def open_database(cls, sqlite_file, password, create, backend_cls=None,
+ document_factory=None, soledad=None):
+ """Open U1DB database using SQLCipher as backend."""
+ try:
+ return cls._open_database(sqlite_file, password,
+ document_factory=document_factory,
+ soledad=soledad)
+ except errors.DatabaseDoesNotExist:
+ if not create:
+ raise
+ if backend_cls is None:
+ # default is SQLCipherPartialExpandDatabase
+ backend_cls = SQLCipherDatabase
+ return backend_cls(sqlite_file, password,
+ document_factory=document_factory,
+ soledad=soledad)
+
+ def sync(self, url, creds=None, autocreate=True):
+ """
+ Synchronize encrypted documents with remote replica exposed at url.
+ """
+ from u1db.sync import Synchronizer
+ from leap.soledad.backends.leap_backend import LeapSyncTarget
+ return Synchronizer(
+ self,
+ LeapSyncTarget(url,
+ creds=creds,
+ soledad=self._soledad)).sync(autocreate=autocreate)
+
+ def _extra_schema_init(self, c):
+ c.execute(
+ 'ALTER TABLE document '
+ 'ADD COLUMN syncable BOOL NOT NULL DEFAULT TRUE')
+
+ def _put_and_update_indexes(self, old_doc, doc):
+ super(SQLCipherDatabase, self)._put_and_update_indexes(old_doc, doc)
+ c = self._db_handle.cursor()
+ c.execute('UPDATE document SET syncable=? WHERE doc_id=?',
+ (doc.syncable, doc.doc_id))
+
+ def _get_doc(self, doc_id, check_for_conflicts=False):
+ doc = super(SQLCipherDatabase, self)._get_doc(doc_id,
+ check_for_conflicts)
+ if doc:
+ c = self._db_handle.cursor()
+ c.execute('SELECT syncable FROM document WHERE doc_id=?',
+ (doc.doc_id,))
+ doc.syncable = bool(c.fetchone()[0])
+ return doc
+
+sqlite_backend.SQLiteDatabase.register_implementation(SQLCipherDatabase)