summaryrefslogtreecommitdiff
path: root/src/leap/soledad/backends
diff options
context:
space:
mode:
Diffstat (limited to 'src/leap/soledad/backends')
-rw-r--r--src/leap/soledad/backends/__init__.py5
-rw-r--r--src/leap/soledad/backends/couch.py217
-rw-r--r--src/leap/soledad/backends/leap_backend.py210
-rw-r--r--src/leap/soledad/backends/objectstore.py109
-rw-r--r--src/leap/soledad/backends/openstack.py98
-rw-r--r--src/leap/soledad/backends/sqlcipher.py159
6 files changed, 0 insertions, 798 deletions
diff --git a/src/leap/soledad/backends/__init__.py b/src/leap/soledad/backends/__init__.py
deleted file mode 100644
index 72907f37..00000000
--- a/src/leap/soledad/backends/__init__.py
+++ /dev/null
@@ -1,5 +0,0 @@
-import objectstore
-
-
-__all__ = [
- 'objectstore']
diff --git a/src/leap/soledad/backends/couch.py b/src/leap/soledad/backends/couch.py
deleted file mode 100644
index c8dadfa8..00000000
--- a/src/leap/soledad/backends/couch.py
+++ /dev/null
@@ -1,217 +0,0 @@
-import uuid
-from base64 import b64encode, b64decode
-from u1db import errors
-from u1db.sync import LocalSyncTarget
-from u1db.backends.inmemory import InMemoryIndex
-from couchdb.client import Server, Document as CouchDocument
-from couchdb.http import ResourceNotFound
-from leap.soledad.backends.objectstore import ObjectStore
-from leap.soledad.backends.leap_backend import LeapDocument
-
-try:
- import simplejson as json
-except ImportError:
- import json # noqa
-
-
-class CouchDatabase(ObjectStore):
- """A U1DB implementation that uses Couch as its persistence layer."""
-
- def __init__(self, url, database, replica_uid=None, full_commit=True,
- session=None):
- """Create a new Couch data container."""
- self._url = url
- self._full_commit = full_commit
- self._session = session
- self._server = Server(url=self._url,
- full_commit=self._full_commit,
- session=self._session)
- self._dbname = database
- # this will ensure that transaction and sync logs exist and are
- # up-to-date.
- self.set_document_factory(LeapDocument)
- try:
- self._database = self._server[database]
- except ResourceNotFound:
- self._server.create(database)
- self._database = self._server[database]
- super(CouchDatabase, self).__init__(replica_uid=replica_uid)
-
- #-------------------------------------------------------------------------
- # methods from Database
- #-------------------------------------------------------------------------
-
- def _get_doc(self, doc_id, check_for_conflicts=False):
- """
- Get just the document content, without fancy handling.
- """
- cdoc = self._database.get(doc_id)
- if cdoc is None:
- return None
- has_conflicts = False
- if check_for_conflicts:
- has_conflicts = self._has_conflicts(doc_id)
- doc = self._factory(
- doc_id=doc_id,
- rev=cdoc['u1db_rev'],
- has_conflicts=has_conflicts)
- contents = self._database.get_attachment(cdoc, 'u1db_json')
- if contents:
- doc.content = json.loads(contents.getvalue())
- else:
- doc.make_tombstone()
- return doc
-
- def get_all_docs(self, include_deleted=False):
- """Get all documents from the database."""
- generation = self._get_generation()
- results = []
- for doc_id in self._database:
- if doc_id == self.U1DB_DATA_DOC_ID:
- continue
- doc = self._get_doc(doc_id, check_for_conflicts=True)
- if doc.content is None and not include_deleted:
- continue
- results.append(doc)
- return (generation, results)
-
- def _put_doc(self, doc):
- # prepare couch's Document
- cdoc = CouchDocument()
- cdoc['_id'] = doc.doc_id
- # we have to guarantee that couch's _rev is cosistent
- old_cdoc = self._database.get(doc.doc_id)
- if old_cdoc is not None:
- cdoc['_rev'] = old_cdoc['_rev']
- # store u1db's rev
- cdoc['u1db_rev'] = doc.rev
- # save doc in db
- self._database.save(cdoc)
- # store u1db's content as json string
- if not doc.is_tombstone():
- self._database.put_attachment(cdoc, doc.get_json(),
- filename='u1db_json')
- else:
- self._database.delete_attachment(cdoc, 'u1db_json')
-
- def get_sync_target(self):
- return CouchSyncTarget(self)
-
- def create_index(self, index_name, *index_expressions):
- if index_name in self._indexes:
- if self._indexes[index_name]._definition == list(
- index_expressions):
- return
- raise errors.IndexNameTakenError
- index = InMemoryIndex(index_name, list(index_expressions))
- for doc_id in self._database:
- if doc_id == self.U1DB_DATA_DOC_ID:
- continue
- doc = self._get_doc(doc_id)
- if doc.content is not None:
- index.add_json(doc_id, doc.get_json())
- self._indexes[index_name] = index
- # save data in object store
- self._set_u1db_data()
-
- def close(self):
- # TODO: fix this method so the connection is properly closed and
- # test_close (+tearDown, which deletes the db) works without problems.
- self._url = None
- self._full_commit = None
- self._session = None
- #self._server = None
- self._database = None
- return True
-
- def sync(self, url, creds=None, autocreate=True):
- from u1db.sync import Synchronizer
- return Synchronizer(self, CouchSyncTarget(url, creds=creds)).sync(
- autocreate=autocreate)
-
- #-------------------------------------------------------------------------
- # methods from ObjectStore
- #-------------------------------------------------------------------------
-
- def _init_u1db_data(self):
- if self._replica_uid is None:
- self._replica_uid = uuid.uuid4().hex
- doc = self._factory(doc_id=self.U1DB_DATA_DOC_ID)
- doc.content = {'transaction_log': [],
- 'conflicts': b64encode(json.dumps({})),
- 'other_generations': {},
- 'indexes': b64encode(json.dumps({})),
- 'replica_uid': self._replica_uid}
- self._put_doc(doc)
-
- def _get_u1db_data(self):
- # retrieve u1db data from couch db
- cdoc = self._database.get(self.U1DB_DATA_DOC_ID)
- jsonstr = self._database.get_attachment(cdoc, 'u1db_json').getvalue()
- content = json.loads(jsonstr)
- # set u1db database info
- #self._sync_log = content['sync_log']
- self._transaction_log = content['transaction_log']
- self._conflicts = json.loads(b64decode(content['conflicts']))
- self._other_generations = content['other_generations']
- self._indexes = self._load_indexes_from_json(
- b64decode(content['indexes']))
- self._replica_uid = content['replica_uid']
- # save couch _rev
- self._couch_rev = cdoc['_rev']
-
- def _set_u1db_data(self):
- doc = self._factory(doc_id=self.U1DB_DATA_DOC_ID)
- doc.content = {
- 'transaction_log': self._transaction_log,
- # Here, the b64 encode ensures that document content
- # does not cause strange behaviour in couchdb because
- # of encoding.
- 'conflicts': b64encode(json.dumps(self._conflicts)),
- 'other_generations': self._other_generations,
- 'indexes': b64encode(self._dump_indexes_as_json()),
- 'replica_uid': self._replica_uid,
- '_rev': self._couch_rev}
- self._put_doc(doc)
-
- #-------------------------------------------------------------------------
- # Couch specific methods
- #-------------------------------------------------------------------------
-
- def delete_database(self):
- del(self._server[self._dbname])
-
- def _dump_indexes_as_json(self):
- indexes = {}
- for name, idx in self._indexes.iteritems():
- indexes[name] = {}
- for attr in ['name', 'definition', 'values']:
- indexes[name][attr] = getattr(idx, '_' + attr)
- return json.dumps(indexes)
-
- def _load_indexes_from_json(self, indexes):
- dict = {}
- for name, idx_dict in json.loads(indexes).iteritems():
- idx = InMemoryIndex(name, idx_dict['definition'])
- idx._values = idx_dict['values']
- dict[name] = idx
- return dict
-
-
-class CouchSyncTarget(LocalSyncTarget):
-
- def get_sync_info(self, source_replica_uid):
- source_gen, source_trans_id = self._db._get_replica_gen_and_trans_id(
- source_replica_uid)
- my_gen, my_trans_id = self._db._get_generation_info()
- return (
- self._db._replica_uid, my_gen, my_trans_id, source_gen,
- source_trans_id)
-
- def record_sync_info(self, source_replica_uid, source_replica_generation,
- source_replica_transaction_id):
- if self._trace_hook:
- self._trace_hook('record_sync_info')
- self._db._set_replica_gen_and_trans_id(
- source_replica_uid, source_replica_generation,
- source_replica_transaction_id)
diff --git a/src/leap/soledad/backends/leap_backend.py b/src/leap/soledad/backends/leap_backend.py
deleted file mode 100644
index f73698f2..00000000
--- a/src/leap/soledad/backends/leap_backend.py
+++ /dev/null
@@ -1,210 +0,0 @@
-try:
- import simplejson as json
-except ImportError:
- import json # noqa
-
-from u1db import Document
-from u1db.remote import utils
-from u1db.remote.http_target import HTTPSyncTarget
-from u1db.remote.http_database import HTTPDatabase
-from u1db.errors import BrokenSyncStream
-
-import uuid
-
-
-class NoDefaultKey(Exception):
- pass
-
-
-class NoSoledadInstance(Exception):
- pass
-
-
-class DocumentEncryptionFailed(Exception):
- pass
-
-
-class LeapDocument(Document):
- """
- LEAP Documents are standard u1db documents with cabability of returning an
- encrypted version of the document json string as well as setting document
- content based on an encrypted version of json string.
- """
-
- def __init__(self, doc_id=None, rev=None, json='{}', has_conflicts=False,
- encrypted_json=None, soledad=None, syncable=True):
- super(LeapDocument, self).__init__(doc_id, rev, json, has_conflicts)
- self._soledad = soledad
- self._syncable = syncable
- if encrypted_json:
- self.set_encrypted_json(encrypted_json)
-
- def get_encrypted_json(self):
- """
- Returns document's json serialization encrypted with user's public key.
- """
- if not self._soledad:
- raise NoSoledadInstance()
- ciphertext = self._soledad.encrypt_symmetric(self.doc_id,
- self.get_json())
- return json.dumps({'_encrypted_json': ciphertext})
-
- def set_encrypted_json(self, encrypted_json):
- """
- Set document's content based on encrypted version of json string.
- """
- if not self._soledad:
- raise NoSoledadInstance()
- ciphertext = json.loads(encrypted_json)['_encrypted_json']
- plaintext = self._soledad.decrypt_symmetric(self.doc_id, ciphertext)
- return self.set_json(plaintext)
-
- def _get_syncable(self):
- return self._syncable
-
- def _set_syncable(self, syncable=True):
- self._syncable = syncable
-
- syncable = property(
- _get_syncable,
- _set_syncable,
- doc="Determine if document should be synced with server."
- )
-
-
-class LeapDatabase(HTTPDatabase):
- """Implement the HTTP remote database API to a Leap server."""
-
- def __init__(self, url, document_factory=None, creds=None, soledad=None):
- super(LeapDatabase, self).__init__(url, creds=creds)
- self._soledad = soledad
- self._factory = LeapDocument
-
- @staticmethod
- def open_database(url, create):
- db = LeapDatabase(url)
- db.open(create)
- return db
-
- @staticmethod
- def delete_database(url):
- db = LeapDatabase(url)
- db._delete()
- db.close()
-
- def _allocate_doc_id(self):
- """Generate a unique identifier for this document."""
- return 'D-' + uuid.uuid4().hex # 'D-' stands for document
-
- def get_sync_target(self):
- st = LeapSyncTarget(self._url.geturl())
- st._creds = self._creds
- return st
-
- def create_doc_from_json(self, content, doc_id=None):
- if doc_id is None:
- doc_id = self._allocate_doc_id()
- res, headers = self._request_json('PUT', ['doc', doc_id], {},
- content, 'application/json')
- new_doc = self._factory(doc_id, res['rev'], content,
- soledad=self._soledad)
- return new_doc
-
-
-class LeapSyncTarget(HTTPSyncTarget):
-
- def __init__(self, url, creds=None, soledad=None):
- super(LeapSyncTarget, self).__init__(url, creds)
- self._soledad = soledad
-
- def _parse_sync_stream(self, data, return_doc_cb, ensure_callback=None):
- """
- Does the same as parent's method but ensures incoming content will be
- decrypted.
- """
- parts = data.splitlines() # one at a time
- if not parts or parts[0] != '[':
- raise BrokenSyncStream
- data = parts[1:-1]
- comma = False
- if data:
- line, comma = utils.check_and_strip_comma(data[0])
- res = json.loads(line)
- if ensure_callback and 'replica_uid' in res:
- ensure_callback(res['replica_uid'])
- for entry in data[1:]:
- if not comma: # missing in between comma
- raise BrokenSyncStream
- line, comma = utils.check_and_strip_comma(entry)
- entry = json.loads(line)
- # decrypt after receiving from server.
- doc = LeapDocument(entry['id'], entry['rev'],
- encrypted_json=entry['content'],
- soledad=self._soledad)
- return_doc_cb(doc, entry['gen'], entry['trans_id'])
- if parts[-1] != ']':
- try:
- partdic = json.loads(parts[-1])
- except ValueError:
- pass
- else:
- if isinstance(partdic, dict):
- self._error(partdic)
- raise BrokenSyncStream
- if not data or comma: # no entries or bad extra comma
- raise BrokenSyncStream
- return res
-
- def sync_exchange(self, docs_by_generations, source_replica_uid,
- last_known_generation, last_known_trans_id,
- return_doc_cb, ensure_callback=None):
- """
- Does the same as parent's method but encrypts content before syncing.
- """
- self._ensure_connection()
- if self._trace_hook: # for tests
- self._trace_hook('sync_exchange')
- url = '%s/sync-from/%s' % (self._url.path, source_replica_uid)
- self._conn.putrequest('POST', url)
- self._conn.putheader('content-type', 'application/x-u1db-sync-stream')
- for header_name, header_value in self._sign_request('POST', url, {}):
- self._conn.putheader(header_name, header_value)
- entries = ['[']
- size = 1
-
- def prepare(**dic):
- entry = comma + '\r\n' + json.dumps(dic)
- entries.append(entry)
- return len(entry)
-
- comma = ''
- size += prepare(
- last_known_generation=last_known_generation,
- last_known_trans_id=last_known_trans_id,
- ensure=ensure_callback is not None)
- comma = ','
- for doc, gen, trans_id in docs_by_generations:
- if doc.syncable:
- # encrypt and verify before sending to server.
- doc_content = doc.get_encrypted_json()
- if doc_content == doc.get_json():
- raise DocumentEncryptionFailed
- enc_doc = LeapDocument(doc.doc_id, doc.rev,
- encrypted_json=doc_content,
- soledad=self._soledad)
- if doc.get_json() != enc_doc.get_json():
- raise DocumentEncryptionFailed
- size += prepare(id=doc.doc_id, rev=doc.rev,
- content=doc_content,
- gen=gen, trans_id=trans_id)
- entries.append('\r\n]')
- size += len(entries[-1])
- self._conn.putheader('content-length', str(size))
- self._conn.endheaders()
- for entry in entries:
- self._conn.send(entry)
- entries = None
- data, _ = self._response()
- res = self._parse_sync_stream(data, return_doc_cb, ensure_callback)
- data = None
- return res['new_generation'], res['new_transaction_id']
diff --git a/src/leap/soledad/backends/objectstore.py b/src/leap/soledad/backends/objectstore.py
deleted file mode 100644
index 588fc7a1..00000000
--- a/src/leap/soledad/backends/objectstore.py
+++ /dev/null
@@ -1,109 +0,0 @@
-from u1db.backends.inmemory import InMemoryDatabase
-from u1db import errors
-
-
-class ObjectStore(InMemoryDatabase):
- """
- A backend for storing u1db data in an object store.
- """
-
- def __init__(self, replica_uid=None):
- super(ObjectStore, self).__init__(replica_uid)
- # sync data in memory with data in object store
- if not self._get_doc(self.U1DB_DATA_DOC_ID):
- self._init_u1db_data()
- self._get_u1db_data()
-
- #-------------------------------------------------------------------------
- # methods from Database
- #-------------------------------------------------------------------------
-
- def _set_replica_uid(self, replica_uid):
- super(ObjectStore, self)._set_replica_uid(replica_uid)
- self._set_u1db_data()
-
- def _put_doc(self, doc):
- raise NotImplementedError(self._put_doc)
-
- def _get_doc(self, doc):
- raise NotImplementedError(self._get_doc)
-
- def get_all_docs(self, include_deleted=False):
- raise NotImplementedError(self.get_all_docs)
-
- def delete_doc(self, doc):
- old_doc = self._get_doc(doc.doc_id, check_for_conflicts=True)
- if old_doc is None:
- raise errors.DocumentDoesNotExist
- if old_doc.rev != doc.rev:
- raise errors.RevisionConflict()
- if old_doc.is_tombstone():
- raise errors.DocumentAlreadyDeleted
- if old_doc.has_conflicts:
- raise errors.ConflictedDoc()
- new_rev = self._allocate_doc_rev(doc.rev)
- doc.rev = new_rev
- doc.make_tombstone()
- self._put_and_update_indexes(old_doc, doc)
- return new_rev
-
- # index-related methods
-
- def create_index(self, index_name, *index_expressions):
- raise NotImplementedError(self.create_index)
-
- def delete_index(self, index_name):
- super(ObjectStore, self).delete_index(index_name)
- self._set_u1db_data()
-
- def _replace_conflicts(self, doc, conflicts):
- super(ObjectStore, self)._replace_conflicts(doc, conflicts)
- self._set_u1db_data()
-
- def _do_set_replica_gen_and_trans_id(self, other_replica_uid,
- other_generation,
- other_transaction_id):
- super(ObjectStore, self)._do_set_replica_gen_and_trans_id(
- other_replica_uid,
- other_generation,
- other_transaction_id)
- self._set_u1db_data()
-
- #-------------------------------------------------------------------------
- # implemented methods from CommonBackend
- #-------------------------------------------------------------------------
-
- def _put_and_update_indexes(self, old_doc, doc):
- for index in self._indexes.itervalues():
- if old_doc is not None and not old_doc.is_tombstone():
- index.remove_json(old_doc.doc_id, old_doc.get_json())
- if not doc.is_tombstone():
- index.add_json(doc.doc_id, doc.get_json())
- trans_id = self._allocate_transaction_id()
- self._put_doc(doc)
- self._transaction_log.append((doc.doc_id, trans_id))
- self._set_u1db_data()
-
- #-------------------------------------------------------------------------
- # methods specific for object stores
- #-------------------------------------------------------------------------
-
- U1DB_DATA_DOC_ID = 'u1db_data'
-
- def _get_u1db_data(self):
- """
- Fetch u1db configuration data from backend storage.
- """
- NotImplementedError(self._get_u1db_data)
-
- def _set_u1db_data(self):
- """
- Save u1db configuration data on backend storage.
- """
- NotImplementedError(self._set_u1db_data)
-
- def _init_u1db_data(self):
- """
- Initialize u1db configuration data on backend storage.
- """
- NotImplementedError(self._init_u1db_data)
diff --git a/src/leap/soledad/backends/openstack.py b/src/leap/soledad/backends/openstack.py
deleted file mode 100644
index a9615736..00000000
--- a/src/leap/soledad/backends/openstack.py
+++ /dev/null
@@ -1,98 +0,0 @@
-# TODO: this backend is not tested yet.
-from u1db.remote.http_target import HTTPSyncTarget
-import swiftclient
-from soledad.backends.objectstore import ObjectStore
-
-
-class OpenStackDatabase(ObjectStore):
- """A U1DB implementation that uses OpenStack as its persistence layer."""
-
- def __init__(self, auth_url, user, auth_key, container):
- """Create a new OpenStack data container."""
- self._auth_url = auth_url
- self._user = user
- self._auth_key = auth_key
- self._container = container
- self._connection = swiftclient.Connection(self._auth_url, self._user,
- self._auth_key)
- self._get_auth()
- # this will ensure transaction and sync logs exist and are up-to-date.
- super(OpenStackDatabase, self).__init__()
-
- #-------------------------------------------------------------------------
- # implemented methods from Database
- #-------------------------------------------------------------------------
-
- def _get_doc(self, doc_id, check_for_conflicts=False):
- """Get just the document content, without fancy handling.
-
- Conflicts do not happen on server side, so there's no need to check
- for them.
- """
- try:
- response, contents = self._connection.get_object(self._container,
- doc_id)
- # TODO: change revision to be a dictionary element?
- rev = response['x-object-meta-rev']
- return self._factory(doc_id, rev, contents)
- except swiftclient.ClientException:
- return None
-
- def get_all_docs(self, include_deleted=False):
- """Get all documents from the database."""
- generation = self._get_generation()
- results = []
- _, doc_ids = self._connection.get_container(self._container,
- full_listing=True)
- for doc_id in doc_ids:
- doc = self._get_doc(doc_id)
- if doc.content is None and not include_deleted:
- continue
- results.append(doc)
- return (generation, results)
-
- def _put_doc(self, doc, new_rev):
- new_rev = self._allocate_doc_rev(doc.rev)
- # TODO: change revision to be a dictionary element?
- headers = {'X-Object-Meta-Rev': new_rev}
- self._connection.put_object(self._container, doc_id, doc.get_json(),
- headers=headers)
-
- def get_sync_target(self):
- return OpenStackSyncTarget(self)
-
- def close(self):
- raise NotImplementedError(self.close)
-
- def sync(self, url, creds=None, autocreate=True):
- from u1db.sync import Synchronizer
- from u1db.remote.http_target import OpenStackSyncTarget
- return Synchronizer(self, OpenStackSyncTarget(url, creds=creds)).sync(
- autocreate=autocreate)
-
- #-------------------------------------------------------------------------
- # OpenStack specific methods
- #-------------------------------------------------------------------------
-
- def _get_auth(self):
- self._url, self._auth_token = self._connection.get_auth()
- return self._url, self.auth_token
-
-
-class OpenStackSyncTarget(HTTPSyncTarget):
-
- def get_sync_info(self, source_replica_uid):
- source_gen, source_trans_id = self._db._get_replica_gen_and_trans_id(
- source_replica_uid)
- my_gen, my_trans_id = self._db._get_generation_info()
- return (
- self._db._replica_uid, my_gen, my_trans_id, source_gen,
- source_trans_id)
-
- def record_sync_info(self, source_replica_uid, source_replica_generation,
- source_replica_transaction_id):
- if self._trace_hook:
- self._trace_hook('record_sync_info')
- self._db._set_replica_gen_and_trans_id(
- source_replica_uid, source_replica_generation,
- source_replica_transaction_id)
diff --git a/src/leap/soledad/backends/sqlcipher.py b/src/leap/soledad/backends/sqlcipher.py
deleted file mode 100644
index 6cebcf7d..00000000
--- a/src/leap/soledad/backends/sqlcipher.py
+++ /dev/null
@@ -1,159 +0,0 @@
-# Copyright 2011 Canonical Ltd.
-#
-# This file is part of u1db.
-#
-# u1db is free software: you can redistribute it and/or modify
-# it under the terms of the GNU Lesser General Public License version 3
-# as published by the Free Software Foundation.
-#
-# u1db is distributed in the hope that it will be useful,
-# but WITHOUT ANY WARRANTY; without even the implied warranty of
-# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
-# GNU Lesser General Public License for more details.
-#
-# You should have received a copy of the GNU Lesser General Public License
-# along with u1db. If not, see <http://www.gnu.org/licenses/>.
-
-"""A U1DB implementation that uses SQLCipher as its persistence layer."""
-
-import os
-from sqlite3 import dbapi2, DatabaseError
-import time
-
-from u1db.backends.sqlite_backend import (
- SQLiteDatabase,
- SQLitePartialExpandDatabase,
-)
-from u1db import (
- errors,
-)
-
-from leap.soledad.backends.leap_backend import LeapDocument
-
-
-def open(path, password, create=True, document_factory=None):
- """Open a database at the given location.
-
- Will raise u1db.errors.DatabaseDoesNotExist if create=False and the
- database does not already exist.
-
- :param path: The filesystem path for the database to open.
- :param create: True/False, should the database be created if it doesn't
- already exist?
- :param document_factory: A function that will be called with the same
- parameters as Document.__init__.
- :return: An instance of Database.
- """
- return SQLCipherDatabase.open_database(
- path, password, create=create, document_factory=document_factory)
-
-
-class DatabaseIsNotEncrypted(Exception):
- """
- Exception raised when trying to open non-encrypted databases.
- """
- pass
-
-
-class SQLCipherDatabase(SQLitePartialExpandDatabase):
- """A U1DB implementation that uses SQLCipher as its persistence layer."""
-
- _index_storage_value = 'expand referenced encrypted'
-
- @classmethod
- def set_pragma_key(cls, db_handle, key):
- db_handle.cursor().execute("PRAGMA key = '%s'" % key)
-
- def __init__(self, sqlite_file, password, document_factory=None):
- """Create a new sqlcipher file."""
- self._check_if_db_is_encrypted(sqlite_file)
- self._db_handle = dbapi2.connect(sqlite_file)
- SQLCipherDatabase.set_pragma_key(self._db_handle, password)
- self._real_replica_uid = None
- self._ensure_schema()
- self._factory = document_factory or LeapDocument
-
- def _check_if_db_is_encrypted(self, sqlite_file):
- if not os.path.exists(sqlite_file):
- return
- else:
- try:
- # try to open an encrypted database with the regular u1db
- # backend should raise a DatabaseError exception.
- SQLitePartialExpandDatabase(sqlite_file)
- raise DatabaseIsNotEncrypted()
- except DatabaseError:
- pass
-
- @classmethod
- def _open_database(cls, sqlite_file, password, document_factory=None):
- if not os.path.isfile(sqlite_file):
- raise errors.DatabaseDoesNotExist()
- tries = 2
- while True:
- # Note: There seems to be a bug in sqlite 3.5.9 (with python2.6)
- # where without re-opening the database on Windows, it
- # doesn't see the transaction that was just committed
- db_handle = dbapi2.connect(sqlite_file)
- SQLCipherDatabase.set_pragma_key(db_handle, password)
- c = db_handle.cursor()
- v, err = cls._which_index_storage(c)
- db_handle.close()
- if v is not None:
- break
- # possibly another process is initializing it, wait for it to be
- # done
- if tries == 0:
- raise err # go for the richest error?
- tries -= 1
- time.sleep(cls.WAIT_FOR_PARALLEL_INIT_HALF_INTERVAL)
- return SQLCipherDatabase._sqlite_registry[v](
- sqlite_file, password, document_factory=document_factory)
-
- @classmethod
- def open_database(cls, sqlite_file, password, create, backend_cls=None,
- document_factory=None):
- try:
- return cls._open_database(sqlite_file, password,
- document_factory=document_factory)
- except errors.DatabaseDoesNotExist:
- if not create:
- raise
- if backend_cls is None:
- # default is SQLCipherPartialExpandDatabase
- backend_cls = SQLCipherDatabase
- return backend_cls(sqlite_file, password,
- document_factory=document_factory)
-
- def sync(self, url, creds=None, autocreate=True, soledad=None):
- """
- Synchronize encrypted documents with remote replica exposed at url.
- """
- from u1db.sync import Synchronizer
- from leap.soledad.backends.leap_backend import LeapSyncTarget
- return Synchronizer(self, LeapSyncTarget(url, creds=creds),
- soledad=self._soledad).sync(autocreate=autocreate)
-
- def _extra_schema_init(self, c):
- c.execute(
- 'ALTER TABLE document '
- 'ADD COLUMN syncable BOOL NOT NULL DEFAULT TRUE')
-
- def _put_and_update_indexes(self, old_doc, doc):
- super(SQLCipherDatabase, self)._put_and_update_indexes(old_doc, doc)
- c = self._db_handle.cursor()
- c.execute('UPDATE document SET syncable=? WHERE doc_id=?',
- (doc.syncable, doc.doc_id))
-
- def _get_doc(self, doc_id, check_for_conflicts=False):
- doc = super(SQLCipherDatabase, self)._get_doc(doc_id,
- check_for_conflicts)
- if doc:
- c = self._db_handle.cursor()
- c.execute('SELECT syncable FROM document WHERE doc_id=?',
- (doc.doc_id,))
- doc.syncable = bool(c.fetchone()[0])
- return doc
-
-
-SQLiteDatabase.register_implementation(SQLCipherDatabase)