summaryrefslogtreecommitdiff
path: root/src/leap/soledad/backends
diff options
context:
space:
mode:
Diffstat (limited to 'src/leap/soledad/backends')
-rw-r--r--src/leap/soledad/backends/__init__.py18
-rw-r--r--src/leap/soledad/backends/couch.py232
-rw-r--r--src/leap/soledad/backends/leap_backend.py115
-rw-r--r--src/leap/soledad/backends/objectstore.py173
-rw-r--r--src/leap/soledad/backends/sqlcipher.py162
5 files changed, 632 insertions, 68 deletions
diff --git a/src/leap/soledad/backends/__init__.py b/src/leap/soledad/backends/__init__.py
index f5e2497a..720a8118 100644
--- a/src/leap/soledad/backends/__init__.py
+++ b/src/leap/soledad/backends/__init__.py
@@ -1,3 +1,21 @@
+# -*- coding: utf-8 -*-
+# __init__.py
+# Copyright (C) 2013 LEAP
+#
+# This program is free software: you can redistribute it and/or modify
+# it under the terms of the GNU General Public License as published by
+# the Free Software Foundation, either version 3 of the License, or
+# (at your option) any later version.
+#
+# This program is distributed in the hope that it will be useful,
+# but WITHOUT ANY WARRANTY; without even the implied warranty of
+# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
+# GNU General Public License for more details.
+#
+# You should have received a copy of the GNU General Public License
+# along with this program. If not, see <http://www.gnu.org/licenses/>.
+
+
"""
Backends that extend U1DB functionality.
"""
diff --git a/src/leap/soledad/backends/couch.py b/src/leap/soledad/backends/couch.py
index b7a77054..5407f992 100644
--- a/src/leap/soledad/backends/couch.py
+++ b/src/leap/soledad/backends/couch.py
@@ -1,42 +1,71 @@
+# -*- coding: utf-8 -*-
+# couch.py
+# Copyright (C) 2013 LEAP
+#
+# This program is free software: you can redistribute it and/or modify
+# it under the terms of the GNU General Public License as published by
+# the Free Software Foundation, either version 3 of the License, or
+# (at your option) any later version.
+#
+# This program is distributed in the hope that it will be useful,
+# but WITHOUT ANY WARRANTY; without even the implied warranty of
+# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
+# GNU General Public License for more details.
+#
+# You should have received a copy of the GNU General Public License
+# along with this program. If not, see <http://www.gnu.org/licenses/>.
+
+
"""A U1DB backend that uses CouchDB as its persistence layer."""
# general imports
import uuid
-from base64 import b64encode, b64decode
import re
-# u1db
+try:
+ import simplejson as json
+except ImportError:
+ import json # noqa
+
+
+from base64 import b64encode, b64decode
from u1db import errors
from u1db.sync import LocalSyncTarget
from u1db.backends.inmemory import InMemoryIndex
from u1db.remote.server_state import ServerState
from u1db.errors import DatabaseDoesNotExist
-# couchdb
from couchdb.client import Server, Document as CouchDocument
from couchdb.http import ResourceNotFound
-# leap
from leap.soledad.backends.objectstore import (
ObjectStoreDatabase,
ObjectStoreSyncTarget,
)
from leap.soledad.backends.leap_backend import LeapDocument
-try:
- import simplejson as json
-except ImportError:
- import json # noqa
-
class InvalidURLError(Exception):
- """Exception raised when Soledad encounters a malformed URL."""
- pass
+ """
+ Exception raised when Soledad encounters a malformed URL.
+ """
class CouchDatabase(ObjectStoreDatabase):
- """A U1DB backend that uses Couch as its persistence layer."""
+ """
+ A U1DB backend that uses Couch as its persistence layer.
+ """
@classmethod
def open_database(cls, url, create):
- """Open a U1DB database using CouchDB as backend."""
+ """
+ Open a U1DB database using CouchDB as backend.
+
+ @param url: the url of the database replica
+ @type url: str
+ @param create: should the replica be created if it does not exist?
+ @type create: bool
+
+ @return: the database instance
+ @rtype: CouchDatabase
+ """
# get database from url
m = re.match('(^https?://[^/]+)/(.+)$', url)
if not m:
@@ -51,24 +80,39 @@ class CouchDatabase(ObjectStoreDatabase):
raise DatabaseDoesNotExist()
return cls(url, dbname)
- def __init__(self, url, database, replica_uid=None, full_commit=True,
+ def __init__(self, url, dbname, replica_uid=None, full_commit=True,
session=None):
- """Create a new Couch data container."""
+ """
+ Create a new Couch data container.
+
+ @param url: the url of the couch database
+ @type url: str
+ @param dbname: the database name
+ @type dbname: str
+ @param replica_uid: an optional unique replica identifier
+ @type replica_uid: str
+ @param full_commit: turn on the X-Couch-Full-Commit header
+ @type full_commit: bool
+ @param session: an http.Session instance or None for a default session
+ @type session: http.Session
+ """
self._url = url
self._full_commit = full_commit
self._session = session
self._server = Server(url=self._url,
full_commit=self._full_commit,
session=self._session)
- self._dbname = database
+ self._dbname = dbname
# this will ensure that transaction and sync logs exist and are
# up-to-date.
try:
- self._database = self._server[database]
+ self._database = self._server[self._dbname]
except ResourceNotFound:
- self._server.create(database)
- self._database = self._server[database]
+ self._server.create(self._dbname)
+ self._database = self._server[self._dbname]
super(CouchDatabase, self).__init__(replica_uid=replica_uid,
+ # TODO: move the factory choice
+ # away
document_factory=LeapDocument)
#-------------------------------------------------------------------------
@@ -76,7 +120,19 @@ class CouchDatabase(ObjectStoreDatabase):
#-------------------------------------------------------------------------
def _get_doc(self, doc_id, check_for_conflicts=False):
- """Get just the document content, without fancy handling."""
+ """
+ Get just the document content, without fancy handling.
+
+ @param doc_id: The unique document identifier
+ @type doc_id: str
+ @param include_deleted: If set to True, deleted documents will be
+ returned with empty content. Otherwise asking for a deleted
+ document will return None.
+ @type include_deleted: bool
+
+ @return: a Document object.
+ @type: u1db.Document
+ """
cdoc = self._database.get(doc_id)
if cdoc is None:
return None
@@ -95,7 +151,19 @@ class CouchDatabase(ObjectStoreDatabase):
return doc
def get_all_docs(self, include_deleted=False):
- """Get the JSON content for all documents in the database."""
+ """
+ Get the JSON content for all documents in the database.
+
+ @param include_deleted: If set to True, deleted documents will be
+ returned with empty content. Otherwise deleted documents will not
+ be included in the results.
+ @type include_deleted: bool
+
+ @return: (generation, [Document])
+ The current generation of the database, followed by a list of all
+ the documents in the database.
+ @rtype: tuple
+ """
generation = self._get_generation()
results = []
for doc_id in self._database:
@@ -108,7 +176,19 @@ class CouchDatabase(ObjectStoreDatabase):
return (generation, results)
def _put_doc(self, doc):
- """Store document in database."""
+ """
+ Update a document.
+
+ This is called everytime we just want to do a raw put on the db (i.e.
+ without index updates, document constraint checks, and conflict
+ checks).
+
+ @param doc: The document to update.
+ @type doc: u1db.Document
+
+ @return: The new revision identifier for the document.
+ @rtype: str
+ """
# prepare couch's Document
cdoc = CouchDocument()
cdoc['_id'] = doc.doc_id
@@ -130,12 +210,19 @@ class CouchDatabase(ObjectStoreDatabase):
def get_sync_target(self):
"""
Return a SyncTarget object, for another u1db to synchronize with.
+
+ @return: The sync target.
+ @rtype: CouchSyncTarget
"""
return CouchSyncTarget(self)
def create_index(self, index_name, *index_expressions):
"""
Create a named index, which can then be queried for future lookups.
+
+ @param index_name: A unique name which can be used as a key prefix.
+ @param index_expressions: Index expressions defining the index
+ information.
"""
if index_name in self._indexes:
if self._indexes[index_name]._definition == list(
@@ -144,7 +231,7 @@ class CouchDatabase(ObjectStoreDatabase):
raise errors.IndexNameTakenError
index = InMemoryIndex(index_name, list(index_expressions))
for doc_id in self._database:
- if doc_id == self.U1DB_DATA_DOC_ID:
+ if doc_id == self.U1DB_DATA_DOC_ID: # skip special file
continue
doc = self._get_doc(doc_id)
if doc.content is not None:
@@ -154,7 +241,12 @@ class CouchDatabase(ObjectStoreDatabase):
self._store_u1db_data()
def close(self):
- """Release any resources associated with this database."""
+ """
+ Release any resources associated with this database.
+
+ @return: True if db was succesfully closed.
+ @rtype: bool
+ """
# TODO: fix this method so the connection is properly closed and
# test_close (+tearDown, which deletes the db) works without problems.
self._url = None
@@ -165,7 +257,20 @@ class CouchDatabase(ObjectStoreDatabase):
return True
def sync(self, url, creds=None, autocreate=True):
- """Synchronize documents with remote replica exposed at url."""
+ """
+ Synchronize documents with remote replica exposed at url.
+
+ @param url: The url of the target replica to sync with.
+ @type url: str
+ @param creds: optional dictionary giving credentials.
+ to authorize the operation with the server.
+ @type creds: dict
+ @param autocreate: Ask the target to create the db if non-existent.
+ @type autocreate: bool
+
+ @return: The local generation before the synchronisation was performed.
+ @rtype: int
+ """
from u1db.sync import Synchronizer
return Synchronizer(self, CouchSyncTarget(url, creds=creds)).sync(
autocreate=autocreate)
@@ -175,8 +280,23 @@ class CouchDatabase(ObjectStoreDatabase):
#-------------------------------------------------------------------------
def _init_u1db_data(self):
+ """
+ Initialize U1DB info data structure in the couch db.
+
+ A U1DB database needs to keep track of all database transactions,
+ document conflicts, the generation of other replicas it has seen,
+ indexes created by users and so on.
+
+ In this implementation, all this information is stored in a special
+ document stored in the couch db with id equals to
+ CouchDatabse.U1DB_DATA_DOC_ID.
+
+ This method initializes the document that will hold such information.
+ """
if self._replica_uid is None:
self._replica_uid = uuid.uuid4().hex
+ # TODO: prevent user from overwriting a document with the same doc_id
+ # as this one.
doc = self._factory(doc_id=self.U1DB_DATA_DOC_ID)
doc.content = {'transaction_log': [],
'conflicts': b64encode(json.dumps({})),
@@ -186,6 +306,11 @@ class CouchDatabase(ObjectStoreDatabase):
self._put_doc(doc)
def _fetch_u1db_data(self):
+ """
+ Fetch U1DB info from the couch db.
+
+ See C{_init_u1db_data} documentation.
+ """
# retrieve u1db data from couch db
cdoc = self._database.get(self.U1DB_DATA_DOC_ID)
jsonstr = self._database.get_attachment(cdoc, 'u1db_json').getvalue()
@@ -202,6 +327,11 @@ class CouchDatabase(ObjectStoreDatabase):
self._couch_rev = cdoc['_rev']
def _store_u1db_data(self):
+ """
+ Store U1DB info in the couch db.
+
+ See C{_init_u1db_data} documentation.
+ """
doc = self._factory(doc_id=self.U1DB_DATA_DOC_ID)
doc.content = {
'transaction_log': self._transaction_log,
@@ -220,10 +350,15 @@ class CouchDatabase(ObjectStoreDatabase):
#-------------------------------------------------------------------------
def delete_database(self):
- """Delete a U1DB CouchDB database."""
+ """
+ Delete a U1DB CouchDB database.
+ """
del(self._server[self._dbname])
def _dump_indexes_as_json(self):
+ """
+ Dump index definitions as JSON string.
+ """
indexes = {}
for name, idx in self._indexes.iteritems():
indexes[name] = {}
@@ -232,6 +367,16 @@ class CouchDatabase(ObjectStoreDatabase):
return json.dumps(indexes)
def _load_indexes_from_json(self, indexes):
+ """
+ Load index definitions from JSON string.
+
+ @param indexes: A JSON serialization of a list of [('index-name',
+ ['field', 'field2'])].
+ @type indexes: str
+
+ @return: A dictionary with the index definitions.
+ @rtype: dict
+ """
dict = {}
for name, idx_dict in json.loads(indexes).iteritems():
idx = InMemoryIndex(name, idx_dict['definition'])
@@ -241,30 +386,55 @@ class CouchDatabase(ObjectStoreDatabase):
class CouchSyncTarget(ObjectStoreSyncTarget):
- pass
+ """
+ Functionality for using a CouchDatabase as a synchronization target.
+ """
class CouchServerState(ServerState):
- """Inteface of the WSGI server with the CouchDB backend."""
+ """
+ Inteface of the WSGI server with the CouchDB backend.
+ """
def __init__(self, couch_url):
self.couch_url = couch_url
def open_database(self, dbname):
- """Open a database at the given location."""
+ """
+ Open a couch database.
+
+ @param dbname: The name of the database to open.
+ @type dbname: str
+
+ @return: The CouchDatabase object.
+ @rtype: CouchDatabase
+ """
# TODO: open couch
from leap.soledad.backends.couch import CouchDatabase
return CouchDatabase.open_database(self.couch_url + '/' + dbname,
create=False)
def ensure_database(self, dbname):
- """Ensure database at the given location."""
+ """
+ Ensure couch database exists.
+
+ @param dbname: The name of the database to ensure.
+ @type dbname: str
+
+ @return: The CouchDatabase object and the replica uid.
+ @rtype: (CouchDatabase, str)
+ """
from leap.soledad.backends.couch import CouchDatabase
db = CouchDatabase.open_database(self.couch_url + '/' + dbname,
create=True)
return db, db._replica_uid
def delete_database(self, dbname):
- """Delete database at the given location."""
+ """
+ Delete couch database.
+
+ @param dbname: The name of the database to delete.
+ @type dbname: str
+ """
from leap.soledad.backends.couch import CouchDatabase
CouchDatabase.delete_database(self.couch_url + '/' + dbname)
diff --git a/src/leap/soledad/backends/leap_backend.py b/src/leap/soledad/backends/leap_backend.py
index a37f9d25..3110c662 100644
--- a/src/leap/soledad/backends/leap_backend.py
+++ b/src/leap/soledad/backends/leap_backend.py
@@ -1,21 +1,39 @@
+# -*- coding: utf-8 -*-
+# leap_backend.py
+# Copyright (C) 2013 LEAP
+#
+# This program is free software: you can redistribute it and/or modify
+# it under the terms of the GNU General Public License as published by
+# the Free Software Foundation, either version 3 of the License, or
+# (at your option) any later version.
+#
+# This program is distributed in the hope that it will be useful,
+# but WITHOUT ANY WARRANTY; without even the implied warranty of
+# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
+# GNU General Public License for more details.
+#
+# You should have received a copy of the GNU General Public License
+# along with this program. If not, see <http://www.gnu.org/licenses/>.
+
+
"""
-A U1DB backend that encrypts data before sending to server and decrypts after
-receiving.
+A U1DB backend for encrypting data before sending to server and decrypting
+after receiving.
"""
+import uuid
try:
import simplejson as json
except ImportError:
import json # noqa
+
from u1db import Document
from u1db.remote import utils
from u1db.remote.http_target import HTTPSyncTarget
from u1db.remote.http_database import HTTPDatabase
from u1db.errors import BrokenSyncStream
-import uuid
-
class NoDefaultKey(Exception):
"""
@@ -33,7 +51,7 @@ class NoSoledadInstance(Exception):
class DocumentNotEncrypted(Exception):
"""
- Exception to signal failures in document encryption.
+ Raised for failures in document encryption.
"""
pass
@@ -45,10 +63,33 @@ class LeapDocument(Document):
LEAP Documents are standard u1db documents with cabability of returning an
encrypted version of the document json string as well as setting document
content based on an encrypted version of json string.
+
+ Also, LEAP Documents can be flagged as syncable or not, so the replicas
+ might not sync every document.
"""
def __init__(self, doc_id=None, rev=None, json='{}', has_conflicts=False,
encrypted_json=None, soledad=None, syncable=True):
+ """
+ Container for handling an encryptable document.
+
+ @param doc_id: The unique document identifier.
+ @type doc_id: str
+ @param rev: The revision identifier of the document.
+ @type rev: str
+ @param json: The JSON string for this document.
+ @type json: str
+ @param has_conflicts: Boolean indicating if this document has conflicts
+ @type has_conflicts: bool
+ @param encrypted_json: The encrypted JSON string for this document. If
+ given, the decrypted value supersedes any raw json string given.
+ @type encrypted_json: str
+ @param soledad: An instance of Soledad so we can encrypt/decrypt
+ document contents when syncing.
+ @type soledad: soledad.Soledad
+ @param syncable: Should this document be synced with remote replicas?
+ @type syncable: bool
+ """
super(LeapDocument, self).__init__(doc_id, rev, json, has_conflicts)
self._soledad = soledad
self._syncable = syncable
@@ -58,6 +99,9 @@ class LeapDocument(Document):
def get_encrypted_content(self):
"""
Return an encrypted JSON serialization of document's contents.
+
+ @return: The encrpted JSON serialization of document's contents.
+ @rtype: str
"""
if not self._soledad:
raise NoSoledadInstance()
@@ -66,16 +110,19 @@ class LeapDocument(Document):
def set_encrypted_content(self, cyphertext):
"""
- Set document's content based on an encrypted JSON serialization of
+ Decrypt C{cyphertext} and set document's content.
contents.
"""
plaintext = self._soledad.decrypt_symmetric(self.doc_id, cyphertext)
- return self.set_json(plaintext)
+ self.set_json(plaintext)
def get_encrypted_json(self):
"""
Return a valid JSON string containing document's content encrypted to
the user's public key.
+
+ @return: The encrypted JSON string.
+ @rtype: str
"""
return json.dumps({'_encrypted_json': self.get_encrypted_content()})
@@ -90,9 +137,21 @@ class LeapDocument(Document):
self.set_encrypted_content(cyphertext)
def _get_syncable(self):
+ """
+ Return whether this document is syncable.
+
+ @return: Is this document syncable?
+ @rtype: bool
+ """
return self._syncable
def _set_syncable(self, syncable=True):
+ """
+ Determine if this document should be synced with remote replicas.
+
+ @param syncable: Should this document be synced with remote replicas?
+ @type syncable: bool
+ """
self._syncable = syncable
syncable = property(
@@ -101,15 +160,28 @@ class LeapDocument(Document):
doc="Determine if document should be synced with server."
)
- # Returning the revision as string solves the following exception in
- # Twisted web:
- # exceptions.TypeError: Can only pass-through bytes on Python 2
def _get_rev(self):
+ """
+ Get the document revision.
+
+ Returning the revision as string solves the following exception in
+ Twisted web:
+ exceptions.TypeError: Can only pass-through bytes on Python 2
+
+ @return: The document revision.
+ @rtype: str
+ """
if self._rev is None:
return None
return str(self._rev)
def _set_rev(self, rev):
+ """
+ Set document revision.
+
+ @param rev: The new document revision.
+ @type rev: bytes
+ """
self._rev = rev
rev = property(
@@ -125,6 +197,18 @@ class LeapSyncTarget(HTTPSyncTarget):
"""
def __init__(self, url, creds=None, soledad=None):
+ """
+ Initialize the LeapSyncTarget.
+
+ @param url: The url of the target replica to sync with.
+ @type url: str
+ @param creds: optional dictionary giving credentials.
+ to authorize the operation with the server.
+ @type creds: dict
+ @param soledad: An instance of Soledad so we can encrypt/decrypt
+ document contents when syncing.
+ @type soledad: soledad.Soledad
+ """
super(LeapSyncTarget, self).__init__(url, creds)
self._soledad = soledad
@@ -132,6 +216,17 @@ class LeapSyncTarget(HTTPSyncTarget):
"""
Does the same as parent's method but ensures incoming content will be
decrypted.
+
+ @param data: The body of the HTTP response.
+ @type data: str
+ @param return_doc_cb: A callback to insert docs from target.
+ @type return_doc_cb: function
+ @param ensure_callback: A callback to ensure we have the correct
+ target_replica_uid, if it was just created.
+ @type ensure_callback: function
+
+ @return: The parsed sync stream.
+ @rtype: list of str
"""
parts = data.splitlines() # one at a time
if not parts or parts[0] != '[':
diff --git a/src/leap/soledad/backends/objectstore.py b/src/leap/soledad/backends/objectstore.py
index 7c5d1177..38de421f 100644
--- a/src/leap/soledad/backends/objectstore.py
+++ b/src/leap/soledad/backends/objectstore.py
@@ -1,9 +1,29 @@
+# -*- coding: utf-8 -*-
+# objectstore.py
+# Copyright (C) 2013 LEAP
+#
+# This program is free software: you can redistribute it and/or modify
+# it under the terms of the GNU General Public License as published by
+# the Free Software Foundation, either version 3 of the License, or
+# (at your option) any later version.
+#
+# This program is distributed in the hope that it will be useful,
+# but WITHOUT ANY WARRANTY; without even the implied warranty of
+# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
+# GNU General Public License for more details.
+#
+# You should have received a copy of the GNU General Public License
+# along with this program. If not, see <http://www.gnu.org/licenses/>.
+
+
"""
Abstract U1DB backend to handle storage using object stores (like CouchDB, for
-example.
+example).
Right now, this is only used by CouchDatabase backend, but can also be
extended to implement OpenStack or Amazon S3 storage, for example.
+
+See U1DB documentation for more information on how to use databases.
"""
from u1db.backends.inmemory import (
@@ -20,9 +40,32 @@ class ObjectStoreDatabase(InMemoryDatabase):
@classmethod
def open_database(cls, url, create, document_factory=None):
+ """
+ Open a U1DB database using an object store as backend.
+
+ @param url: the url of the database replica
+ @type url: str
+ @param create: should the replica be created if it does not exist?
+ @type create: bool
+ @param document_factory: A function that will be called with the same
+ parameters as Document.__init__.
+ @type document_factory: callable
+
+ @return: the database instance
+ @rtype: CouchDatabase
+ """
raise NotImplementedError(cls.open_database)
def __init__(self, replica_uid=None, document_factory=None):
+ """
+ Initialize the object store database.
+
+ @param replica_uid: an optional unique replica identifier
+ @type replica_uid: str
+ @param document_factory: A function that will be called with the same
+ parameters as Document.__init__.
+ @type document_factory: callable
+ """
super(ObjectStoreDatabase, self).__init__(
replica_uid,
document_factory=document_factory)
@@ -36,20 +79,73 @@ class ObjectStoreDatabase(InMemoryDatabase):
#-------------------------------------------------------------------------
def _set_replica_uid(self, replica_uid):
+ """
+ Force the replica_uid to be set.
+
+ @param replica_uid: The uid of the replica.
+ @type replica_uid: str
+ """
super(ObjectStoreDatabase, self)._set_replica_uid(replica_uid)
self._store_u1db_data()
def _put_doc(self, doc):
+ """
+ Update a document.
+
+ This is called everytime we just want to do a raw put on the db (i.e.
+ without index updates, document constraint checks, and conflict
+ checks).
+
+ @param doc: The document to update.
+ @type doc: u1db.Document
+
+ @return: The new revision identifier for the document.
+ @rtype: str
+ """
raise NotImplementedError(self._put_doc)
- def _get_doc(self, doc):
+ def _get_doc(self, doc_id):
+ """
+ Get just the document content, without fancy handling.
+
+ @param doc_id: The unique document identifier
+ @type doc_id: str
+ @param include_deleted: If set to True, deleted documents will be
+ returned with empty content. Otherwise asking for a deleted
+ document will return None.
+ @type include_deleted: bool
+
+ @return: a Document object.
+ @type: u1db.Document
+ """
raise NotImplementedError(self._get_doc)
def get_all_docs(self, include_deleted=False):
+ """
+ Get the JSON content for all documents in the database.
+
+ @param include_deleted: If set to True, deleted documents will be
+ returned with empty content. Otherwise deleted documents will not
+ be included in the results.
+ @type include_deleted: bool
+
+ @return: (generation, [Document])
+ The current generation of the database, followed by a list of all
+ the documents in the database.
+ @rtype: tuple
+ """
raise NotImplementedError(self.get_all_docs)
def delete_doc(self, doc):
- """Mark a document as deleted."""
+ """
+ Mark a document as deleted.
+
+ @param doc: The document to mark as deleted.
+ @type doc: u1db.Document
+
+ @return: The new revision id of the document.
+ @type: str
+ """
old_doc = self._get_doc(doc.doc_id, check_for_conflicts=True)
if old_doc is None:
raise errors.DocumentDoesNotExist
@@ -69,22 +165,61 @@ class ObjectStoreDatabase(InMemoryDatabase):
def create_index(self, index_name, *index_expressions):
"""
- Create an named index, which can then be queried for future lookups.
+ Create a named index, which can then be queried for future lookups.
+
+ See U1DB documentation for more information.
+
+ @param index_name: A unique name which can be used as a key prefix.
+ @param index_expressions: Index expressions defining the index
+ information.
"""
raise NotImplementedError(self.create_index)
def delete_index(self, index_name):
- """Remove a named index."""
+ """
+ Remove a named index.
+
+ Here we just guarantee that the new info will be stored in the backend
+ db after update.
+
+ @param index_name: The name of the index we are removing.
+ @type index_name: str
+ """
super(ObjectStoreDatabase, self).delete_index(index_name)
self._store_u1db_data()
def _replace_conflicts(self, doc, conflicts):
+ """
+ Set new conflicts for a document.
+
+ Here we just guarantee that the new info will be stored in the backend
+ db after update.
+
+ @param doc: The document with a new set of conflicts.
+ @param conflicts: The new set of conflicts.
+ @type conflicts: list
+ """
super(ObjectStoreDatabase, self)._replace_conflicts(doc, conflicts)
self._store_u1db_data()
def _do_set_replica_gen_and_trans_id(self, other_replica_uid,
other_generation,
other_transaction_id):
+ """
+ Set the last-known generation and transaction id for the other
+ database replica.
+
+ Here we just guarantee that the new info will be stored in the backend
+ db after update.
+
+ @param other_replica_uid: The U1DB identifier for the other replica.
+ @type other_replica_uid: str
+ @param other_generation: The generation number for the other replica.
+ @type other_generation: int
+ @param other_transaction_id: The transaction id associated with the
+ generation.
+ @type other_transaction_id: str
+ """
super(ObjectStoreDatabase, self)._do_set_replica_gen_and_trans_id(
other_replica_uid,
other_generation,
@@ -96,6 +231,14 @@ class ObjectStoreDatabase(InMemoryDatabase):
#-------------------------------------------------------------------------
def _put_and_update_indexes(self, old_doc, doc):
+ """
+ Update a document and all indexes related to it.
+
+ @param old_doc: The old version of the document.
+ @type old_doc: u1db.Document
+ @param doc: The new version of the document.
+ @type doc: u1db.Document
+ """
for index in self._indexes.itervalues():
if old_doc is not None and not old_doc.is_tombstone():
index.remove_json(old_doc.doc_id, old_doc.get_json())
@@ -115,21 +258,37 @@ class ObjectStoreDatabase(InMemoryDatabase):
def _fetch_u1db_data(self):
"""
Fetch u1db configuration data from backend storage.
+
+ See C{_init_u1db_data} documentation.
"""
NotImplementedError(self._fetch_u1db_data)
def _store_u1db_data(self):
"""
- Save u1db configuration data on backend storage.
+ Store u1db configuration data on backend storage.
+
+ See C{_init_u1db_data} documentation.
"""
NotImplementedError(self._store_u1db_data)
def _init_u1db_data(self):
"""
Initialize u1db configuration data on backend storage.
+
+ A U1DB database needs to keep track of all database transactions,
+ document conflicts, the generation of other replicas it has seen,
+ indexes created by users and so on.
+
+ In this implementation, all this information is stored in a special
+ document stored in the couch db with id equals to
+ CouchDatabse.U1DB_DATA_DOC_ID.
+
+ This method initializes the document that will hold such information.
"""
NotImplementedError(self._init_u1db_data)
class ObjectStoreSyncTarget(InMemorySyncTarget):
- pass
+ """
+ Functionality for using an ObjectStore as a synchronization target.
+ """
diff --git a/src/leap/soledad/backends/sqlcipher.py b/src/leap/soledad/backends/sqlcipher.py
index ab74bab1..9e3c38c9 100644
--- a/src/leap/soledad/backends/sqlcipher.py
+++ b/src/leap/soledad/backends/sqlcipher.py
@@ -1,3 +1,21 @@
+# -*- coding: utf-8 -*-
+# sqlcipher.py
+# Copyright (C) 2013 LEAP
+#
+# This program is free software: you can redistribute it and/or modify
+# it under the terms of the GNU General Public License as published by
+# the Free Software Foundation, either version 3 of the License, or
+# (at your option) any later version.
+#
+# This program is distributed in the hope that it will be useful,
+# but WITHOUT ANY WARRANTY; without even the implied warranty of
+# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
+# GNU General Public License for more details.
+#
+# You should have received a copy of the GNU General Public License
+# along with this program. If not, see <http://www.gnu.org/licenses/>.
+
+
"""A U1DB backend that uses SQLCipher as its persistence layer."""
import os
@@ -26,12 +44,17 @@ def open(path, password, create=True, document_factory=None, soledad=None):
Will raise u1db.errors.DatabaseDoesNotExist if create=False and the
database does not already exist.
- :param path: The filesystem path for the database to open.
- :param create: True/False, should the database be created if it doesn't
+ @param path: The filesystem path for the database to open.
+ @param type: str
+ @param create: True/False, should the database be created if it doesn't
already exist?
- :param document_factory: A function that will be called with the same
+ @param type: bool
+ @param document_factory: A function that will be called with the same
parameters as Document.__init__.
- :return: An instance of Database.
+ @type document_factory: callable
+
+ @return: An instance of Database.
+ @rtype SQLCipherDatabase
"""
return SQLCipherDatabase.open_database(
path, password, create=create, document_factory=document_factory,
@@ -54,11 +77,24 @@ class SQLCipherDatabase(sqlite_backend.SQLitePartialExpandDatabase):
def set_pragma_key(cls, db_handle, key):
db_handle.cursor().execute("PRAGMA key = '%s'" % key)
- def __init__(self, sqlite_file, password, document_factory=None,
+ def __init__(self, sqlcipher_file, password, document_factory=None,
soledad=None):
- """Create a new sqlcipher file."""
- self._check_if_db_is_encrypted(sqlite_file)
- self._db_handle = dbapi2.connect(sqlite_file)
+ """
+ Create a new sqlcipher file.
+
+ @param sqlcipher_file: The path for the SQLCipher file.
+ @type sqlcipher_file: str
+ @param password: The password that protects the SQLCipher db.
+ @type password: str
+ @param document_factory: A function that will be called with the same
+ parameters as Document.__init__.
+ @type document_factory: callable
+ @param soledad: An instance of Soledad so we can encrypt/decrypt
+ document contents when syncing.
+ @type soledad: soledad.Soledad
+ """
+ self._check_if_db_is_encrypted(sqlcipher_file)
+ self._db_handle = dbapi2.connect(sqlcipher_file)
SQLCipherDatabase.set_pragma_key(self._db_handle, password)
self._real_replica_uid = None
self._ensure_schema()
@@ -72,29 +108,55 @@ class SQLCipherDatabase(sqlite_backend.SQLitePartialExpandDatabase):
syncable=syncable, soledad=self._soledad)
self.set_document_factory(factory)
- def _check_if_db_is_encrypted(self, sqlite_file):
- if not os.path.exists(sqlite_file):
+ def _check_if_db_is_encrypted(self, sqlcipher_file):
+ """
+ Verify if loca file is an encrypted database.
+
+ @param sqlcipher_file: The path for the SQLCipher file.
+ @type sqlcipher_file: str
+
+ @return: True if the database is encrypted, False otherwise.
+ @rtype: bool
+ """
+ if not os.path.exists(sqlcipher_file):
return
else:
try:
# try to open an encrypted database with the regular u1db
# backend should raise a DatabaseError exception.
- sqlite_backend.SQLitePartialExpandDatabase(sqlite_file)
+ sqlite_backend.SQLitePartialExpandDatabase(sqlcipher_file)
raise DatabaseIsNotEncrypted()
except dbapi2.DatabaseError:
pass
@classmethod
- def _open_database(cls, sqlite_file, password, document_factory=None,
+ def _open_database(cls, sqlcipher_file, password, document_factory=None,
soledad=None):
- if not os.path.isfile(sqlite_file):
+ """
+ Open a SQLCipher database.
+
+ @param sqlcipher_file: The path for the SQLCipher file.
+ @type sqlcipher_file: str
+ @param password: The password that protects the SQLCipher db.
+ @type password: str
+ @param document_factory: A function that will be called with the same
+ parameters as Document.__init__.
+ @type document_factory: callable
+ @param soledad: An instance of Soledad so we can encrypt/decrypt
+ document contents when syncing.
+ @type soledad: soledad.Soledad
+
+ @return: The database object.
+ @rtype: SQLCipherDatabase
+ """
+ if not os.path.isfile(sqlcipher_file):
raise errors.DatabaseDoesNotExist()
tries = 2
while True:
# Note: There seems to be a bug in sqlite 3.5.9 (with python2.6)
# where without re-opening the database on Windows, it
# doesn't see the transaction that was just committed
- db_handle = dbapi2.connect(sqlite_file)
+ db_handle = dbapi2.connect(sqlcipher_file)
SQLCipherDatabase.set_pragma_key(db_handle, password)
c = db_handle.cursor()
v, err = cls._which_index_storage(c)
@@ -108,30 +170,63 @@ class SQLCipherDatabase(sqlite_backend.SQLitePartialExpandDatabase):
tries -= 1
time.sleep(cls.WAIT_FOR_PARALLEL_INIT_HALF_INTERVAL)
return SQLCipherDatabase._sqlite_registry[v](
- sqlite_file, password, document_factory=document_factory,
+ sqlcipher_file, password, document_factory=document_factory,
soledad=soledad)
@classmethod
- def open_database(cls, sqlite_file, password, create, backend_cls=None,
+ def open_database(cls, sqlcipher_file, password, create, backend_cls=None,
document_factory=None, soledad=None):
- """Open U1DB database using SQLCipher as backend."""
+ """
+ Open a SQLCipher database.
+
+ @param sqlcipher_file: The path for the SQLCipher file.
+ @type sqlcipher_file: str
+ @param password: The password that protects the SQLCipher db.
+ @type password: str
+ @param create: Should the datbase be created if it does not already
+ exist?
+ @type: bool
+ @param backend_cls: A class to use as backend.
+ @type backend_cls: type
+ @param document_factory: A function that will be called with the same
+ parameters as Document.__init__.
+ @type document_factory: callable
+ @param soledad: An instance of Soledad so we can encrypt/decrypt
+ document contents when syncing.
+ @type soledad: soledad.Soledad
+
+ @return: The database object.
+ @rtype: SQLCipherDatabase
+ """
try:
- return cls._open_database(sqlite_file, password,
+ return cls._open_database(sqlcipher_file, password,
document_factory=document_factory,
soledad=soledad)
except errors.DatabaseDoesNotExist:
if not create:
raise
+ # TODO: remove backend class from here.
if backend_cls is None:
# default is SQLCipherPartialExpandDatabase
backend_cls = SQLCipherDatabase
- return backend_cls(sqlite_file, password,
+ return backend_cls(sqlcipher_file, password,
document_factory=document_factory,
soledad=soledad)
def sync(self, url, creds=None, autocreate=True):
"""
- Synchronize encrypted documents with remote replica exposed at url.
+ Synchronize documents with remote replica exposed at url.
+
+ @param url: The url of the target replica to sync with.
+ @type url: str
+ @param creds: optional dictionary giving credentials.
+ to authorize the operation with the server.
+ @type creds: dict
+ @param autocreate: Ask the target to create the db if non-existent.
+ @type autocreate: bool
+
+ @return: The local generation before the synchronisation was performed.
+ @rtype: int
"""
from u1db.sync import Synchronizer
from leap.soledad.backends.leap_backend import LeapSyncTarget
@@ -142,17 +237,44 @@ class SQLCipherDatabase(sqlite_backend.SQLitePartialExpandDatabase):
soledad=self._soledad)).sync(autocreate=autocreate)
def _extra_schema_init(self, c):
+ """
+ Add any extra fields, etc to the basic table definitions.
+
+ @param c: The cursor for querying the database.
+ @type c: dbapi2.cursor
+ """
c.execute(
'ALTER TABLE document '
'ADD COLUMN syncable BOOL NOT NULL DEFAULT TRUE')
def _put_and_update_indexes(self, old_doc, doc):
+ """
+ Update a document and all indexes related to it.
+
+ @param old_doc: The old version of the document.
+ @type old_doc: u1db.Document
+ @param doc: The new version of the document.
+ @type doc: u1db.Document
+ """
super(SQLCipherDatabase, self)._put_and_update_indexes(old_doc, doc)
c = self._db_handle.cursor()
c.execute('UPDATE document SET syncable=? WHERE doc_id=?',
(doc.syncable, doc.doc_id))
def _get_doc(self, doc_id, check_for_conflicts=False):
+ """
+ Get just the document content, without fancy handling.
+
+ @param doc_id: The unique document identifier
+ @type doc_id: str
+ @param include_deleted: If set to True, deleted documents will be
+ returned with empty content. Otherwise asking for a deleted
+ document will return None.
+ @type include_deleted: bool
+
+ @return: a Document object.
+ @type: u1db.Document
+ """
doc = super(SQLCipherDatabase, self)._get_doc(doc_id,
check_for_conflicts)
if doc: