summaryrefslogtreecommitdiff
path: root/client/src
diff options
context:
space:
mode:
Diffstat (limited to 'client/src')
-rw-r--r--client/src/leap/soledad/client/__init__.py77
-rw-r--r--client/src/leap/soledad/client/sqlcipher.py76
2 files changed, 100 insertions, 53 deletions
diff --git a/client/src/leap/soledad/client/__init__.py b/client/src/leap/soledad/client/__init__.py
index 3fb037c8..f0abf130 100644
--- a/client/src/leap/soledad/client/__init__.py
+++ b/client/src/leap/soledad/client/__init__.py
@@ -249,7 +249,6 @@ class Soledad(object):
"""
syncing_lock = defaultdict(Lock)
- rw_lock = Lock()
"""
A dictionary that hold locks which avoid multiple sync attempts from the
same database replica.
@@ -791,8 +790,7 @@ class Soledad(object):
:rtype: str
"""
doc.content = self._convert_to_unicode(doc.content)
- with self.rw_lock:
- return self._db.put_doc(doc)
+ return self._db.put_doc(doc)
def delete_doc(self, doc):
"""
@@ -804,8 +802,7 @@ class Soledad(object):
:return: the new revision identifier for the document
:rtype: str
"""
- with self.rw_lock:
- return self._db.delete_doc(doc)
+ return self._db.delete_doc(doc)
def get_doc(self, doc_id, include_deleted=False):
"""
@@ -821,8 +818,7 @@ class Soledad(object):
:return: the document object or None
:rtype: SoledadDocument
"""
- with self.rw_lock:
- return self._db.get_doc(doc_id, include_deleted=include_deleted)
+ return self._db.get_doc(doc_id, include_deleted=include_deleted)
def get_docs(self, doc_ids, check_for_conflicts=True,
include_deleted=False):
@@ -839,10 +835,9 @@ class Soledad(object):
in matching doc_ids order.
:rtype: generator
"""
- with self.rw_lock:
- return self._db.get_docs(
- doc_ids, check_for_conflicts=check_for_conflicts,
- include_deleted=include_deleted)
+ return self._db.get_docs(
+ doc_ids, check_for_conflicts=check_for_conflicts,
+ include_deleted=include_deleted)
def get_all_docs(self, include_deleted=False):
"""Get the JSON content for all documents in the database.
@@ -854,8 +849,7 @@ class Soledad(object):
The current generation of the database, followed by a list of
all the documents in the database.
"""
- with self.rw_lock:
- return self._db.get_all_docs(include_deleted)
+ return self._db.get_all_docs(include_deleted)
def _convert_to_unicode(self, content):
"""
@@ -901,9 +895,8 @@ class Soledad(object):
:return: the new document
:rtype: SoledadDocument
"""
- with self.rw_lock:
- return self._db.create_doc(
- self._convert_to_unicode(content), doc_id=doc_id)
+ return self._db.create_doc(
+ self._convert_to_unicode(content), doc_id=doc_id)
def create_doc_from_json(self, json, doc_id=None):
"""
@@ -922,8 +915,7 @@ class Soledad(object):
:return: The new cocument
:rtype: SoledadDocument
"""
- with self.rw_lock:
- return self._db.create_doc_from_json(json, doc_id=doc_id)
+ return self._db.create_doc_from_json(json, doc_id=doc_id)
def create_index(self, index_name, *index_expressions):
"""
@@ -947,10 +939,9 @@ class Soledad(object):
"number(fieldname, width)", "lower(fieldname)"
"""
- with self.rw_lock:
- if self._db:
- return self._db.create_index(
- index_name, *index_expressions)
+ if self._db:
+ return self._db.create_index(
+ index_name, *index_expressions)
def delete_index(self, index_name):
"""
@@ -959,9 +950,8 @@ class Soledad(object):
:param index_name: The name of the index we are removing
:type index_name: str
"""
- with self.rw_lock:
- if self._db:
- return self._db.delete_index(index_name)
+ if self._db:
+ return self._db.delete_index(index_name)
def list_indexes(self):
"""
@@ -970,9 +960,8 @@ class Soledad(object):
:return: A list of [('index-name', ['field', 'field2'])] definitions.
:rtype: list
"""
- with self.rw_lock:
- if self._db:
- return self._db.list_indexes()
+ if self._db:
+ return self._db.list_indexes()
def get_from_index(self, index_name, *key_values):
"""
@@ -994,9 +983,8 @@ class Soledad(object):
:return: List of [Document]
:rtype: list
"""
- with self.rw_lock:
- if self._db:
- return self._db.get_from_index(index_name, *key_values)
+ if self._db:
+ return self._db.get_from_index(index_name, *key_values)
def get_count_from_index(self, index_name, *key_values):
"""
@@ -1012,9 +1000,8 @@ class Soledad(object):
:return: count.
:rtype: int
"""
- with self.rw_lock:
- if self._db:
- return self._db.get_count_from_index(index_name, *key_values)
+ if self._db:
+ return self._db.get_count_from_index(index_name, *key_values)
def get_range_from_index(self, index_name, start_value, end_value):
"""
@@ -1043,10 +1030,9 @@ class Soledad(object):
:return: List of [Document]
:rtype: list
"""
- with self.rw_lock:
- if self._db:
- return self._db.get_range_from_index(
- index_name, start_value, end_value)
+ if self._db:
+ return self._db.get_range_from_index(
+ index_name, start_value, end_value)
def get_index_keys(self, index_name):
"""
@@ -1057,9 +1043,8 @@ class Soledad(object):
:return: [] A list of tuples of indexed keys.
:rtype: list
"""
- with self.rw_lock:
- if self._db:
- return self._db.get_index_keys(index_name)
+ if self._db:
+ return self._db.get_index_keys(index_name)
def get_doc_conflicts(self, doc_id):
"""
@@ -1071,9 +1056,8 @@ class Soledad(object):
:return: a list of the document entries that are conflicted
:rtype: list
"""
- with self.rw_lock:
- if self._db:
- return self._db.get_doc_conflicts(doc_id)
+ if self._db:
+ return self._db.get_doc_conflicts(doc_id)
def resolve_doc(self, doc, conflicted_doc_revs):
"""
@@ -1085,9 +1069,8 @@ class Soledad(object):
supersedes.
:type conflicted_doc_revs: list
"""
- with self.rw_lock:
- if self._db:
- return self._db.resolve_doc(doc, conflicted_doc_revs)
+ if self._db:
+ return self._db.resolve_doc(doc, conflicted_doc_revs)
def sync(self):
"""
diff --git a/client/src/leap/soledad/client/sqlcipher.py b/client/src/leap/soledad/client/sqlcipher.py
index ef059e9b..09efa592 100644
--- a/client/src/leap/soledad/client/sqlcipher.py
+++ b/client/src/leap/soledad/client/sqlcipher.py
@@ -147,6 +147,8 @@ class SQLCipherDatabase(sqlite_backend.SQLitePartialExpandDatabase):
_index_storage_value = 'expand referenced encrypted'
k_lock = threading.Lock()
+ create_doc_lock = threading.Lock()
+ update_indexes_lock = threading.Lock()
_syncer = None
def __init__(self, sqlcipher_file, password, document_factory=None,
@@ -192,8 +194,11 @@ class SQLCipherDatabase(sqlite_backend.SQLitePartialExpandDatabase):
cipher_page_size)
if os.environ.get('LEAP_SQLITE_NOSYNC'):
self._pragma_synchronous_off(self._db_handle)
+ else:
+ self._pragma_synchronous_normal(self._db_handle)
if os.environ.get('LEAP_SQLITE_MEMSTORE'):
self._pragma_mem_temp_store(self._db_handle)
+ self._pragma_write_ahead_logging(self._db_handle)
self._real_replica_uid = None
self._ensure_schema()
self._crypto = crypto
@@ -400,6 +405,22 @@ class SQLCipherDatabase(sqlite_backend.SQLitePartialExpandDatabase):
'ALTER TABLE document '
'ADD COLUMN syncable BOOL NOT NULL DEFAULT TRUE')
+ def create_doc(self, content, doc_id=None):
+ """
+ Create a new document in the local encrypted database.
+
+ :param content: the contents of the new document
+ :type content: dict
+ :param doc_id: an optional identifier specifying the document id
+ :type doc_id: str
+
+ :return: the new document
+ :rtype: SoledadDocument
+ """
+ with self.create_doc_lock:
+ return sqlite_backend.SQLitePartialExpandDatabase.create_doc(
+ self, content, doc_id=doc_id)
+
def _put_and_update_indexes(self, old_doc, doc):
"""
Update a document and all indexes related to it.
@@ -409,12 +430,13 @@ class SQLCipherDatabase(sqlite_backend.SQLitePartialExpandDatabase):
:param doc: The new version of the document.
:type doc: u1db.Document
"""
- sqlite_backend.SQLitePartialExpandDatabase._put_and_update_indexes(
- self, old_doc, doc)
- c = self._db_handle.cursor()
- c.execute('UPDATE document SET syncable=? '
- 'WHERE doc_id=?',
- (doc.syncable, doc.doc_id))
+ with self.update_indexes_lock:
+ sqlite_backend.SQLitePartialExpandDatabase._put_and_update_indexes(
+ self, old_doc, doc)
+ c = self._db_handle.cursor()
+ c.execute('UPDATE document SET syncable=? '
+ 'WHERE doc_id=?',
+ (doc.syncable, doc.doc_id))
def _get_doc(self, doc_id, check_for_conflicts=False):
"""
@@ -747,6 +769,14 @@ class SQLCipherDatabase(sqlite_backend.SQLitePartialExpandDatabase):
db_handle.cursor().execute('PRAGMA synchronous=OFF')
@classmethod
+ def _pragma_synchronous_normal(cls, db_handle):
+ """
+ Change the setting of the "synchronous" flag to NORMAL.
+ """
+ logger.debug("SQLCIPHER: SETTING SYNCHRONOUS NORMAL")
+ db_handle.cursor().execute('PRAGMA synchronous=NORMAL')
+
+ @classmethod
def _pragma_mem_temp_store(cls, db_handle):
"""
Use a in-memory store for temporary tables.
@@ -754,6 +784,40 @@ class SQLCipherDatabase(sqlite_backend.SQLitePartialExpandDatabase):
logger.debug("SQLCIPHER: SETTING TEMP_STORE MEMORY")
db_handle.cursor().execute('PRAGMA temp_store=MEMORY')
+ @classmethod
+ def _pragma_write_ahead_logging(cls, db_handle):
+ """
+ Enable write-ahead logging, and set the autocheckpoint to 50 pages.
+
+ Setting the autocheckpoint to a small value, we make the reads not
+ suffer too much performance degradation.
+
+ From the sqlite docs:
+
+ "There is a tradeoff between average read performance and average write
+ performance. To maximize the read performance, one wants to keep the
+ WAL as small as possible and hence run checkpoints frequently, perhaps
+ as often as every COMMIT. To maximize write performance, one wants to
+ amortize the cost of each checkpoint over as many writes as possible,
+ meaning that one wants to run checkpoints infrequently and let the WAL
+ grow as large as possible before each checkpoint. The decision of how
+ often to run checkpoints may therefore vary from one application to
+ another depending on the relative read and write performance
+ requirements of the application. The default strategy is to run a
+ checkpoint once the WAL reaches 1000 pages"
+ """
+ logger.debug("SQLCIPHER: SETTING WRITE-AHEAD LOGGING")
+ db_handle.cursor().execute('PRAGMA journal_mode=WAL')
+ # The optimum value can still use a little bit of tuning, but we favor
+ # small sizes of the WAL file to get fast reads, since we assume that
+ # the writes will be quick enough to not block too much.
+
+ # TODO
+ # As a further improvement, we might want to set autocheckpoint to 0
+ # here and do the checkpoints manually in a separate thread, to avoid
+ # any blocks in the main thread (we should run a loopingcall from here)
+ db_handle.cursor().execute('PRAGMA wal_autocheckpoint=50')
+
# Extra query methods: extensions to the base sqlite implmentation.
def get_count_from_index(self, index_name, *key_values):