summaryrefslogtreecommitdiff
path: root/client/src
diff options
context:
space:
mode:
Diffstat (limited to 'client/src')
-rw-r--r--client/src/leap/soledad/client/adbapi.py8
-rw-r--r--client/src/leap/soledad/client/api.py52
-rw-r--r--client/src/leap/soledad/client/examples/use_adbapi.py4
-rw-r--r--client/src/leap/soledad/client/examples/use_api.py4
-rw-r--r--client/src/leap/soledad/client/interfaces.py15
-rw-r--r--client/src/leap/soledad/client/shared_db.py9
-rw-r--r--client/src/leap/soledad/client/sqlcipher.py35
7 files changed, 47 insertions, 80 deletions
diff --git a/client/src/leap/soledad/client/adbapi.py b/client/src/leap/soledad/client/adbapi.py
index 733fce23..0cdc90eb 100644
--- a/client/src/leap/soledad/client/adbapi.py
+++ b/client/src/leap/soledad/client/adbapi.py
@@ -86,7 +86,7 @@ class U1DBConnectionPool(adbapi.ConnectionPool):
def __init__(self, *args, **kwargs):
adbapi.ConnectionPool.__init__(self, *args, **kwargs)
# all u1db connections, hashed by thread-id
- self.u1dbconnections = {}
+ self._u1dbconnections = {}
# The replica uid, primed by the connections on init.
self.replica_uid = ProxyBase(None)
@@ -101,7 +101,7 @@ class U1DBConnectionPool(adbapi.ConnectionPool):
def _runInteraction(self, interaction, *args, **kw):
tid = self.threadID()
- u1db = self.u1dbconnections.get(tid)
+ u1db = self._u1dbconnections.get(tid)
conn = self.connectionFactory(self, init_u1db=not bool(u1db))
if self.replica_uid is None:
@@ -110,7 +110,7 @@ class U1DBConnectionPool(adbapi.ConnectionPool):
print "GOT REPLICA UID IN DBPOOL", self.replica_uid
if u1db is None:
- self.u1dbconnections[tid] = conn._u1db
+ self._u1dbconnections[tid] = conn._u1db
else:
conn._u1db = u1db
@@ -134,6 +134,6 @@ class U1DBConnectionPool(adbapi.ConnectionPool):
self.running = False
for conn in self.connections.values():
self._close(conn)
- for u1db in self.u1dbconnections.values():
+ for u1db in self._u1dbconnections.values():
self._close(u1db)
self.connections.clear()
diff --git a/client/src/leap/soledad/client/api.py b/client/src/leap/soledad/client/api.py
index 7886f397..00884a12 100644
--- a/client/src/leap/soledad/client/api.py
+++ b/client/src/leap/soledad/client/api.py
@@ -105,7 +105,7 @@ class Soledad(object):
"""
implements(soledad_interfaces.ILocalStorage,
soledad_interfaces.ISyncableStorage,
- soledad_interfaces.ISharedSecretsStorage)
+ soledad_interfaces.ISecretsStorage)
local_db_file_name = 'soledad.u1db'
secrets_file_name = "soledad.json"
@@ -292,10 +292,7 @@ class Soledad(object):
call.
============================== WARNING ==============================
"""
- # TODO what happens with this warning during the deferred life cycle?
- # Isn't it better to defend ourselves from the mutability, to avoid
- # nasty surprises?
- doc.content = self._convert_to_unicode(doc.content)
+ doc.content = _convert_to_unicode(doc.content)
return self._defer("put_doc", doc)
def delete_doc(self, doc):
@@ -388,7 +385,7 @@ class Soledad(object):
soledad_events.SOLEDAD_DONE_DATA_SYNC, self.uuid)
return local_gen
- sync_url = urlparse.urljoin(self.server_url, 'user-%s' % self.uuid)
+ sync_url = urlparse.urljoin(self._server_url, 'user-%s' % self.uuid)
try:
d = self._dbsyncer.sync(
sync_url,
@@ -405,29 +402,6 @@ class Soledad(object):
def stop_sync(self):
self._dbsyncer.stop_sync()
- # FIXME -------------------------------------------------------
- # review if we really need this. I think that we can the sync
- # fail silently if nothing is to be synced.
- #def need_sync(self, url):
- # XXX dispatch this method in the dbpool .................
- #replica_uid = self._dbpool.replica_uid
- #target = SoledadSyncTarget(
- #url, replica_uid, creds=self._creds, crypto=self._crypto)
-#
- # XXX does it matter if we get this from the general dbpool or the
- # syncer pool?
- #generation = self._dbpool.get_generation()
-#
- # XXX better unpack it?
- #info = target.get_sync_info(replica_uid)
-#
- # compare source generation with target's last known source generation
- #if generation != info[4]:
- #soledad_events.signal(
- #soledad_events.SOLEDAD_NEW_DATA_TO_SYNC, self.uuid)
- #return True
- #return False
-
@property
def syncing(self):
return self._dbsyncer.syncing
@@ -463,15 +437,8 @@ class Soledad(object):
token = property(_get_token, _set_token, doc='The authentication Token.')
- def _get_server_url(self):
- return self._server_url
-
- server_url = property(
- _get_server_url,
- doc='The URL of the Soledad server.')
-
#
- # ISharedSecretsStorage
+ # ISecretsStorage
#
def init_shared_db(self, server_url, uuid, creds, syncable=True):
@@ -483,17 +450,6 @@ class Soledad(object):
create=False, # db should exist at this point.
syncable=syncable)
- def _set_secrets_path(self, secrets_path):
- self._secrets.secrets_path = secrets_path
-
- def _get_secrets_path(self):
- return self._secrets.secrets_path
-
- secrets_path = property(
- _get_secrets_path,
- _set_secrets_path,
- doc='The path for the file containing the encrypted symmetric secret.')
-
@property
def storage_secret(self):
"""
diff --git a/client/src/leap/soledad/client/examples/use_adbapi.py b/client/src/leap/soledad/client/examples/use_adbapi.py
index d3ee8527..d7bd21f2 100644
--- a/client/src/leap/soledad/client/examples/use_adbapi.py
+++ b/client/src/leap/soledad/client/examples/use_adbapi.py
@@ -88,10 +88,10 @@ def allDone(_):
reactor.stop()
deferreds = []
+payload = open('manifest.phk').read()
for i in range(times):
- doc = {"number": i,
- "payload": open('manifest.phk').read()}
+ doc = {"number": i, "payload": payload}
d = createDoc(doc)
d.addCallbacks(printResult, lambda e: e.printTraceback())
deferreds.append(d)
diff --git a/client/src/leap/soledad/client/examples/use_api.py b/client/src/leap/soledad/client/examples/use_api.py
index 4268fe71..e2501c98 100644
--- a/client/src/leap/soledad/client/examples/use_api.py
+++ b/client/src/leap/soledad/client/examples/use_api.py
@@ -52,10 +52,10 @@ db = sqlcipher.SQLCipherDatabase(opts)
def allDone():
debug("ALL DONE!")
+payload = open('manifest.phk').read()
for i in range(times):
- doc = {"number": i,
- "payload": open('manifest.phk').read()}
+ doc = {"number": i, "payload": payload}
d = db.create_doc(doc)
debug(d.doc_id, d.content['number'])
diff --git a/client/src/leap/soledad/client/interfaces.py b/client/src/leap/soledad/client/interfaces.py
index 6bd3f200..4f7b0779 100644
--- a/client/src/leap/soledad/client/interfaces.py
+++ b/client/src/leap/soledad/client/interfaces.py
@@ -22,7 +22,8 @@ from zope.interface import Interface, Attribute
class ILocalStorage(Interface):
"""
- I implement core methods for the u1db local storage.
+ I implement core methods for the u1db local storage of documents and
+ indexes.
"""
local_db_path = Attribute(
"The path for the local database replica")
@@ -285,7 +286,6 @@ class ISyncableStorage(Interface):
I implement methods to synchronize with a remote replica.
"""
replica_uid = Attribute("The uid of the local replica")
- server_url = Attribute("The URL of the Soledad server.")
syncing = Attribute(
"Property, True if the syncer is syncing.")
token = Attribute("The authentication Token.")
@@ -317,12 +317,11 @@ class ISyncableStorage(Interface):
"""
-class ISharedSecretsStorage(Interface):
+class ISecretsStorage(Interface):
"""
- I implement methods needed for the Shared Recovery Database.
+ I implement methods needed for initializing and accessing secrets, that are
+ synced against the Shared Recovery Database.
"""
- secrets_path = Attribute(
- "Path for storing encrypted key used for symmetric encryption.")
secrets_file_name = Attribute(
"The name of the file where the storage secrets will be stored")
@@ -332,7 +331,9 @@ class ISharedSecretsStorage(Interface):
# XXX this used internally from secrets, so it might be good to preserve
# as a public boundary with other components.
- secrets = Attribute("")
+
+ # We should also probably document its interface.
+ secrets = Attribute("A SoledadSecrets object containing access to secrets")
def init_shared_db(self, server_url, uuid, creds):
"""
diff --git a/client/src/leap/soledad/client/shared_db.py b/client/src/leap/soledad/client/shared_db.py
index 7ec71991..77a7db68 100644
--- a/client/src/leap/soledad/client/shared_db.py
+++ b/client/src/leap/soledad/client/shared_db.py
@@ -55,6 +55,8 @@ class SoledadSharedDatabase(http_database.HTTPDatabase, TokenBasedAuth):
# TODO: prevent client from messing with the shared DB.
# TODO: define and document API.
+ # If syncable is False, the database will not attempt to sync against
+ # a remote replica. Default is True.
syncable = True
#
@@ -109,6 +111,11 @@ class SoledadSharedDatabase(http_database.HTTPDatabase, TokenBasedAuth):
:param token: An authentication token for accessing the shared db.
:type token: str
+ :param syncable:
+ If syncable is False, the database will not attempt to sync against
+ a remote replica.
+ :type syncable: bool
+
:return: The shared database in the given url.
:rtype: SoledadSharedDatabase
"""
@@ -161,8 +168,6 @@ class SoledadSharedDatabase(http_database.HTTPDatabase, TokenBasedAuth):
:raise HTTPError: Raised if any HTTP error occurs.
"""
- # TODO ----- if the shared_db is not syncable, should not
- # attempt to resolve.
if self.syncable:
res, headers = self._request_json(
'PUT', ['lock', self._uuid], body={})
diff --git a/client/src/leap/soledad/client/sqlcipher.py b/client/src/leap/soledad/client/sqlcipher.py
index c8e14176..323d78f1 100644
--- a/client/src/leap/soledad/client/sqlcipher.py
+++ b/client/src/leap/soledad/client/sqlcipher.py
@@ -69,7 +69,6 @@ from leap.soledad.client.target import SoledadSyncTarget
from leap.soledad.client.target import PendingReceivedDocsSyncError
from leap.soledad.client.sync import SoledadSynchronizer
-# TODO use adbapi too
from leap.soledad.client import pragmas
from leap.soledad.common import soledad_assert
from leap.soledad.common.document import SoledadDocument
@@ -115,7 +114,7 @@ def set_init_pragmas(conn, opts=None, extra_queries=None):
This includes the crypto pragmas, and any other options that must
be passed early to sqlcipher db.
"""
- assert opts is not None
+ soledad_assert(opts is not None)
extra_queries = [] if extra_queries is None else extra_queries
with _db_init_lock:
# only one execution path should initialize the db
@@ -196,8 +195,8 @@ class SQLCipherDatabase(sqlite_backend.SQLitePartialExpandDatabase):
"""
defer_encryption = False
- # The attribute _index_storage_value will be used as the lookup key.
- # Here we extend it with `encrypted`
+ # The attribute _index_storage_value will be used as the lookup key for the
+ # implementation of the SQLCipher storage backend.
_index_storage_value = 'expand referenced encrypted'
def __init__(self, opts):
@@ -227,7 +226,7 @@ class SQLCipherDatabase(sqlite_backend.SQLitePartialExpandDatabase):
# TODO ---------------------------------------------------
# Everything else in this initialization has to be factored
- # out, so it can be used from U1DBSqlcipherWrapper __init__
+ # out, so it can be used from SoledadSQLCipherWrapper.__init__
# too.
# ---------------------------------------------------------
@@ -406,6 +405,9 @@ class SQLCipherDatabase(sqlite_backend.SQLitePartialExpandDatabase):
class SQLCipherU1DBSync(SQLCipherDatabase):
+ """
+ Soledad syncer implementation.
+ """
_sync_loop = None
_sync_enc_pool = None
@@ -454,7 +456,7 @@ class SQLCipherU1DBSync(SQLCipherDatabase):
# we store syncers in a dictionary indexed by the target URL. We also
# store a hash of the auth info in case auth info expires and we need
# to rebuild the syncer for that target. The final self._syncers
- # format is the following::
+ # format is the following:
#
# self._syncers = {'<url>': ('<auth_hash>', syncer), ...}
@@ -514,10 +516,12 @@ class SQLCipherU1DBSync(SQLCipherDatabase):
def init_db():
- # XXX DEBUG ---------------------------------------------
- import thread
- print "initializing in thread", thread.get_ident()
- # XXX DEBUG ---------------------------------------------
+ # XXX DEBUG -----------------------------------------
+ # REMOVE ME when merging.
+
+ #import thread
+ #print "initializing in thread", thread.get_ident()
+ # ---------------------------------------------------
self._db_handle = initialize_sqlcipher_db(
self._opts, check_same_thread=False)
@@ -553,11 +557,6 @@ class SQLCipherU1DBSync(SQLCipherDatabase):
else:
sync_db_path = ":memory:"
- # XXX use initialize_sqlcipher_db here too
- # TODO pass on_init queries to initialize_sqlcipher_db
- self._sync_db = None#MPSafeSQLiteDB(sync_db_path)
- pragmas.set_crypto_pragmas(self._sync_db, opts)
-
opts.path = sync_db_path
self._sync_db = initialize_sqlcipher_db(
@@ -799,6 +798,9 @@ class U1DBSQLiteBackend(sqlite_backend.SQLitePartialExpandDatabase):
Instead of initializing the database on the fly, it just uses an existing
connection that is passed to it in the initializer.
+
+ It can be used in tests and debug runs to initialize the adbapi with plain
+ sqlite connections, decoupled from the sqlcipher layer.
"""
def __init__(self, conn):
@@ -814,6 +816,9 @@ class SoledadSQLCipherWrapper(SQLCipherDatabase):
Instead of initializing the database on the fly, it just uses an existing
connection that is passed to it in the initializer.
+
+ It can be used from adbapi to initialize a soledad database after
+ getting a regular connection to a sqlcipher database.
"""
def __init__(self, conn):
self._db_handle = conn