summaryrefslogtreecommitdiff
path: root/client/src
diff options
context:
space:
mode:
authordrebs <drebs@leap.se>2014-06-05 10:21:29 -0300
committerdrebs <drebs@leap.se>2014-06-05 10:45:29 -0300
commit08a2f350690b3a66212f3d4f63b24b20f682f88e (patch)
tree202bd62f642ba3e1020e0349285db9797d1b7032 /client/src
parent68443c469e224d93fc36c7c1014191e883edaf67 (diff)
Move the syncing lock to inside SQLCipherDatabase.
Diffstat (limited to 'client/src')
-rw-r--r--client/src/leap/soledad/client/__init__.py11
-rw-r--r--client/src/leap/soledad/client/sqlcipher.py26
2 files changed, 23 insertions, 14 deletions
diff --git a/client/src/leap/soledad/client/__init__.py b/client/src/leap/soledad/client/__init__.py
index 656c0e77..0d3a21fd 100644
--- a/client/src/leap/soledad/client/__init__.py
+++ b/client/src/leap/soledad/client/__init__.py
@@ -34,8 +34,6 @@ import urlparse
import hmac
from hashlib import sha256
-from threading import Lock
-from collections import defaultdict
try:
import cchardet as chardet
@@ -224,12 +222,6 @@ class Soledad(object):
Prefix for default values for path.
"""
- syncing_lock = defaultdict(Lock)
- """
- A dictionary that hold locks which avoid multiple sync attempts from the
- same database replica.
- """
-
def __init__(self, uuid, passphrase, secrets_path, local_db_path,
server_url, cert_file, auth_token=None, secret_id=None):
"""
@@ -1064,9 +1056,6 @@ class Soledad(object):
:rtype: str
"""
if self._db:
- # acquire lock before attempt to sync
- # TODO: move this lock to inside SQLCipherDatabase.
- with Soledad.syncing_lock[self._db._get_replica_uid()]:
local_gen = self._db.sync(
urlparse.urljoin(self.server_url, 'user-%s' % self._uuid),
creds=self._creds, autocreate=False)
diff --git a/client/src/leap/soledad/client/sqlcipher.py b/client/src/leap/soledad/client/sqlcipher.py
index 576b51ad..5ffa9c7e 100644
--- a/client/src/leap/soledad/client/sqlcipher.py
+++ b/client/src/leap/soledad/client/sqlcipher.py
@@ -52,6 +52,7 @@ import json
from hashlib import sha256
from contextlib import contextmanager
+from collections import defaultdict
from pysqlcipher import dbapi2
from u1db.backends import sqlite_backend
@@ -153,6 +154,13 @@ class SQLCipherDatabase(sqlite_backend.SQLitePartialExpandDatabase):
create_doc_lock = threading.Lock()
update_indexes_lock = threading.Lock()
+ syncing_lock = defaultdict(threading.Lock)
+ """
+ A dictionary that hold locks which avoid multiple sync attempts from the
+ same database replica.
+ """
+
+
def __init__(self, sqlcipher_file, password, document_factory=None,
crypto=None, raw_key=False, cipher='aes-256-cbc',
kdf_iter=4000, cipher_page_size=1024):
@@ -343,6 +351,10 @@ class SQLCipherDatabase(sqlite_backend.SQLitePartialExpandDatabase):
"""
Synchronize documents with remote replica exposed at url.
+ There can be at most one instance syncing the same database replica at
+ the same time, so this method will block until the syncing lock can be
+ acquired.
+
:param url: The url of the target replica to sync with.
:type url: str
:param creds: optional dictionary giving credentials.
@@ -355,6 +367,8 @@ class SQLCipherDatabase(sqlite_backend.SQLitePartialExpandDatabase):
:rtype: int
"""
res = None
+ # the following context manager blocks until the syncing lock can be
+ # acquired.
with self.syncer(url, creds=creds) as syncer:
res = syncer.sync(autocreate=autocreate)
return res
@@ -371,10 +385,16 @@ class SQLCipherDatabase(sqlite_backend.SQLitePartialExpandDatabase):
def syncer(self, url, creds=None):
"""
Accesor for synchronizer.
+
+ As we reuse the same synchronizer for every sync, there can be only
+ one instance synchronizing the same database replica at the same time.
+ Because of that, this method blocks until the syncing lock can be
+ acquired.
"""
- syncer = self._get_syncer(url, creds=creds)
- yield syncer
- syncer.sync_target.close()
+ with SQLCipherDatabase.syncing_lock[self._get_replica_uid()]:
+ syncer = self._get_syncer(url, creds=creds)
+ yield syncer
+ syncer.sync_target.close()
def _get_syncer(self, url, creds=None):
"""