summaryrefslogtreecommitdiff
path: root/testing/tests/client
diff options
context:
space:
mode:
authordrebs <drebs@leap.se>2016-07-10 10:38:04 +0200
committerKali Kaneko <kali@leap.se>2016-07-12 03:09:33 +0200
commitf406ccbaf2b79db1d65827463f830f4ffbe5856c (patch)
treefb061e6782615ad6cb327dd8bafdb4155134ab9c /testing/tests/client
parentd99198046e07abb0d19fde1695d22267bc5e1433 (diff)
[refactor] make u1db connection pool args explicit
Diffstat (limited to 'testing/tests/client')
-rw-r--r--testing/tests/client/test_async.py146
-rw-r--r--testing/tests/client/test_sqlcipher.py705
-rw-r--r--testing/tests/client/test_sqlcipher_sync.py730
3 files changed, 0 insertions, 1581 deletions
diff --git a/testing/tests/client/test_async.py b/testing/tests/client/test_async.py
deleted file mode 100644
index 42c315fe..00000000
--- a/testing/tests/client/test_async.py
+++ /dev/null
@@ -1,146 +0,0 @@
-# -*- coding: utf-8 -*-
-# test_async.py
-# Copyright (C) 2013, 2014 LEAP
-#
-# This program is free software: you can redistribute it and/or modify
-# it under the terms of the GNU General Public License as published by
-# the Free Software Foundation, either version 3 of the License, or
-# (at your option) any later version.
-#
-# This program is distributed in the hope that it will be useful,
-# but WITHOUT ANY WARRANTY; without even the implied warranty of
-# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
-# GNU General Public License for more details.
-#
-# You should have received a copy of the GNU General Public License
-# along with this program. If not, see <http://www.gnu.org/licenses/>.
-import os
-import hashlib
-
-from twisted.internet import defer
-
-from test_soledad.util import BaseSoledadTest
-from leap.soledad.client import adbapi
-from leap.soledad.client.sqlcipher import SQLCipherOptions
-
-
-class ASyncSQLCipherRetryTestCase(BaseSoledadTest):
-
- """
- Test asynchronous SQLCipher operation.
- """
-
- NUM_DOCS = 5000
-
- def setUp(self):
- BaseSoledadTest.setUp(self)
- self._dbpool = self._get_dbpool()
-
- def tearDown(self):
- self._dbpool.close()
- BaseSoledadTest.tearDown(self)
-
- def _get_dbpool(self):
- tmpdb = os.path.join(self.tempdir, "test.soledad")
- opts = SQLCipherOptions(tmpdb, "secret", create=True)
- return adbapi.getConnectionPool(opts)
-
- def _get_sample(self):
- if not getattr(self, "_sample", None):
- dirname = os.path.dirname(os.path.realpath(__file__))
- sample_file = os.path.join(dirname, "hacker_crackdown.txt")
- with open(sample_file) as f:
- self._sample = f.readlines()
- return self._sample
-
- def test_concurrent_puts_fail_with_few_retries_and_small_timeout(self):
- """
- Test if concurrent updates to the database with small timeout and
- small number of retries fail with "database is locked" error.
-
- Many concurrent write attempts to the same sqlcipher database may fail
- when the timeout is small and there are no retries. This test will
- pass if any of the attempts to write the database fail.
-
- This test is much dependent on the environment and its result intends
- to contrast with the test for the workaround for the "database is
- locked" problem, which is addressed by the "test_concurrent_puts" test
- below.
-
- If this test ever fails, it means that either (1) the platform where
- you are running is it very powerful and you should try with an even
- lower timeout value, or (2) the bug has been solved by a better
- implementation of the underlying database pool, and thus this test
- should be removed from the test suite.
- """
-
- old_timeout = adbapi.SQLCIPHER_CONNECTION_TIMEOUT
- old_max_retries = adbapi.SQLCIPHER_MAX_RETRIES
-
- adbapi.SQLCIPHER_CONNECTION_TIMEOUT = 1
- adbapi.SQLCIPHER_MAX_RETRIES = 1
-
- def _create_doc(doc):
- return self._dbpool.runU1DBQuery("create_doc", doc)
-
- def _insert_docs():
- deferreds = []
- for i in range(self.NUM_DOCS):
- payload = self._get_sample()[i]
- chash = hashlib.sha256(payload).hexdigest()
- doc = {"number": i, "payload": payload, 'chash': chash}
- d = _create_doc(doc)
- deferreds.append(d)
- return defer.gatherResults(deferreds, consumeErrors=True)
-
- def _errback(e):
- if e.value[0].getErrorMessage() == "database is locked":
- adbapi.SQLCIPHER_CONNECTION_TIMEOUT = old_timeout
- adbapi.SQLCIPHER_MAX_RETRIES = old_max_retries
- return defer.succeed("")
- raise Exception
-
- d = _insert_docs()
- d.addCallback(lambda _: self._dbpool.runU1DBQuery("get_all_docs"))
- d.addErrback(_errback)
- return d
-
- def test_concurrent_puts(self):
- """
- Test that many concurrent puts succeed.
-
- Currently, there's a known problem with the concurrent database pool
- which is that many concurrent attempts to write to the database may
- fail when the lock timeout is small and when there are no (or few)
- retries. We currently workaround this problem by increasing the
- timeout and the number of retries.
-
- Should this test ever fail, it probably means that the timeout and/or
- number of retries should be increased for the platform you're running
- the test. If the underlying database pool is ever fixed, then the test
- above will fail and we should remove this comment from here.
- """
-
- def _create_doc(doc):
- return self._dbpool.runU1DBQuery("create_doc", doc)
-
- def _insert_docs():
- deferreds = []
- for i in range(self.NUM_DOCS):
- payload = self._get_sample()[i]
- chash = hashlib.sha256(payload).hexdigest()
- doc = {"number": i, "payload": payload, 'chash': chash}
- d = _create_doc(doc)
- deferreds.append(d)
- return defer.gatherResults(deferreds, consumeErrors=True)
-
- def _count_docs(results):
- _, docs = results
- if self.NUM_DOCS == len(docs):
- return defer.succeed("")
- raise Exception
-
- d = _insert_docs()
- d.addCallback(lambda _: self._dbpool.runU1DBQuery("get_all_docs"))
- d.addCallback(_count_docs)
- return d
diff --git a/testing/tests/client/test_sqlcipher.py b/testing/tests/client/test_sqlcipher.py
deleted file mode 100644
index 11472d46..00000000
--- a/testing/tests/client/test_sqlcipher.py
+++ /dev/null
@@ -1,705 +0,0 @@
-# -*- coding: utf-8 -*-
-# test_sqlcipher.py
-# Copyright (C) 2013 LEAP
-#
-# This program is free software: you can redistribute it and/or modify
-# it under the terms of the GNU General Public License as published by
-# the Free Software Foundation, either version 3 of the License, or
-# (at your option) any later version.
-#
-# This program is distributed in the hope that it will be useful,
-# but WITHOUT ANY WARRANTY; without even the implied warranty of
-# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
-# GNU General Public License for more details.
-#
-# You should have received a copy of the GNU General Public License
-# along with this program. If not, see <http://www.gnu.org/licenses/>.
-"""
-Test sqlcipher backend internals.
-"""
-import os
-import time
-import threading
-import tempfile
-import shutil
-
-from pysqlcipher import dbapi2
-from testscenarios import TestWithScenarios
-
-# l2db stuff.
-from leap.soledad.common.l2db import errors
-from leap.soledad.common.l2db import query_parser
-from leap.soledad.common.l2db.backends.sqlite_backend \
- import SQLitePartialExpandDatabase
-
-# soledad stuff.
-from leap.soledad.common import soledad_assert
-from leap.soledad.common.document import SoledadDocument
-from leap.soledad.client.sqlcipher import SQLCipherDatabase
-from leap.soledad.client.sqlcipher import SQLCipherOptions
-from leap.soledad.client.sqlcipher import DatabaseIsNotEncrypted
-
-# u1db tests stuff.
-from test_soledad import u1db_tests as tests
-from test_soledad.u1db_tests import test_backends
-from test_soledad.u1db_tests import test_open
-from test_soledad.util import SQLCIPHER_SCENARIOS
-from test_soledad.util import PASSWORD
-from test_soledad.util import BaseSoledadTest
-
-
-def sqlcipher_open(path, passphrase, create=True, document_factory=None):
- return SQLCipherDatabase(
- SQLCipherOptions(path, passphrase, create=create))
-
-
-# -----------------------------------------------------------------------------
-# The following tests come from `u1db.tests.test_common_backend`.
-# -----------------------------------------------------------------------------
-
-class TestSQLCipherBackendImpl(tests.TestCase):
-
- def test__allocate_doc_id(self):
- db = sqlcipher_open(':memory:', PASSWORD)
- doc_id1 = db._allocate_doc_id()
- self.assertTrue(doc_id1.startswith('D-'))
- self.assertEqual(34, len(doc_id1))
- int(doc_id1[len('D-'):], 16)
- self.assertNotEqual(doc_id1, db._allocate_doc_id())
- db.close()
-
-
-# -----------------------------------------------------------------------------
-# The following tests come from `u1db.tests.test_backends`.
-# -----------------------------------------------------------------------------
-
-class SQLCipherTests(TestWithScenarios, test_backends.AllDatabaseTests):
- scenarios = SQLCIPHER_SCENARIOS
-
-
-class SQLCipherDatabaseTests(TestWithScenarios,
- test_backends.LocalDatabaseTests):
- scenarios = SQLCIPHER_SCENARIOS
-
-
-class SQLCipherValidateGenNTransIdTests(
- TestWithScenarios,
- test_backends.LocalDatabaseValidateGenNTransIdTests):
- scenarios = SQLCIPHER_SCENARIOS
-
-
-class SQLCipherValidateSourceGenTests(
- TestWithScenarios,
- test_backends.LocalDatabaseValidateSourceGenTests):
- scenarios = SQLCIPHER_SCENARIOS
-
-
-class SQLCipherWithConflictsTests(
- TestWithScenarios,
- test_backends.LocalDatabaseWithConflictsTests):
- scenarios = SQLCIPHER_SCENARIOS
-
-
-class SQLCipherIndexTests(
- TestWithScenarios, test_backends.DatabaseIndexTests):
- scenarios = SQLCIPHER_SCENARIOS
-
-
-# -----------------------------------------------------------------------------
-# The following tests come from `u1db.tests.test_sqlite_backend`.
-# -----------------------------------------------------------------------------
-
-class TestSQLCipherDatabase(tests.TestCase):
- """
- Tests from u1db.tests.test_sqlite_backend.TestSQLiteDatabase.
- """
-
- def test_atomic_initialize(self):
- # This test was modified to ensure that db2.close() is called within
- # the thread that created the database.
- tmpdir = self.createTempDir()
- dbname = os.path.join(tmpdir, 'atomic.db')
-
- t2 = None # will be a thread
-
- class SQLCipherDatabaseTesting(SQLCipherDatabase):
- _index_storage_value = "testing"
-
- def __init__(self, dbname, ntry):
- self._try = ntry
- self._is_initialized_invocations = 0
- SQLCipherDatabase.__init__(
- self,
- SQLCipherOptions(dbname, PASSWORD))
-
- def _is_initialized(self, c):
- res = \
- SQLCipherDatabase._is_initialized(self, c)
- if self._try == 1:
- self._is_initialized_invocations += 1
- if self._is_initialized_invocations == 2:
- t2.start()
- # hard to do better and have a generic test
- time.sleep(0.05)
- return res
-
- class SecondTry(threading.Thread):
-
- outcome2 = []
-
- def run(self):
- try:
- db2 = SQLCipherDatabaseTesting(dbname, 2)
- except Exception, e:
- SecondTry.outcome2.append(e)
- else:
- SecondTry.outcome2.append(db2)
-
- t2 = SecondTry()
- db1 = SQLCipherDatabaseTesting(dbname, 1)
- t2.join()
-
- self.assertIsInstance(SecondTry.outcome2[0], SQLCipherDatabaseTesting)
- self.assertTrue(db1._is_initialized(db1._get_sqlite_handle().cursor()))
- db1.close()
-
-
-class TestSQLCipherPartialExpandDatabase(tests.TestCase):
- """
- Tests from u1db.tests.test_sqlite_backend.TestSQLitePartialExpandDatabase.
- """
-
- # The following tests had to be cloned from u1db because they all
- # instantiate the backend directly, so we need to change that in order to
- # our backend be instantiated in place.
-
- def setUp(self):
- self.db = sqlcipher_open(':memory:', PASSWORD)
-
- def tearDown(self):
- self.db.close()
-
- def test_default_replica_uid(self):
- self.assertIsNot(None, self.db._replica_uid)
- self.assertEqual(32, len(self.db._replica_uid))
- int(self.db._replica_uid, 16)
-
- def test__parse_index(self):
- g = self.db._parse_index_definition('fieldname')
- self.assertIsInstance(g, query_parser.ExtractField)
- self.assertEqual(['fieldname'], g.field)
-
- def test__update_indexes(self):
- g = self.db._parse_index_definition('fieldname')
- c = self.db._get_sqlite_handle().cursor()
- self.db._update_indexes('doc-id', {'fieldname': 'val'},
- [('fieldname', g)], c)
- c.execute('SELECT doc_id, field_name, value FROM document_fields')
- self.assertEqual([('doc-id', 'fieldname', 'val')],
- c.fetchall())
-
- def test_create_database(self):
- raw_db = self.db._get_sqlite_handle()
- self.assertNotEqual(None, raw_db)
-
- def test__set_replica_uid(self):
- # Start from scratch, so that replica_uid isn't set.
- self.assertIsNot(None, self.db._real_replica_uid)
- self.assertIsNot(None, self.db._replica_uid)
- self.db._set_replica_uid('foo')
- c = self.db._get_sqlite_handle().cursor()
- c.execute("SELECT value FROM u1db_config WHERE name='replica_uid'")
- self.assertEqual(('foo',), c.fetchone())
- self.assertEqual('foo', self.db._real_replica_uid)
- self.assertEqual('foo', self.db._replica_uid)
- self.db._close_sqlite_handle()
- self.assertEqual('foo', self.db._replica_uid)
-
- def test__open_database(self):
- # SQLCipherDatabase has no _open_database() method, so we just pass
- # (and test for the same funcionality on test_open_database_existing()
- # below).
- pass
-
- def test__open_database_with_factory(self):
- # SQLCipherDatabase has no _open_database() method.
- pass
-
- def test__open_database_non_existent(self):
- temp_dir = self.createTempDir(prefix='u1db-test-')
- path = temp_dir + '/non-existent.sqlite'
- self.assertRaises(errors.DatabaseDoesNotExist,
- sqlcipher_open,
- path, PASSWORD, create=False)
-
- def test__open_database_during_init(self):
- # The purpose of this test is to ensure that _open_database() parallel
- # db initialization behaviour is correct. As SQLCipherDatabase does
- # not have an _open_database() method, we just do not implement this
- # test.
- pass
-
- def test__open_database_invalid(self):
- # This test was modified to ensure that an empty database file will
- # raise a DatabaseIsNotEncrypted exception instead of a
- # dbapi2.OperationalError exception.
- temp_dir = self.createTempDir(prefix='u1db-test-')
- path1 = temp_dir + '/invalid1.db'
- with open(path1, 'wb') as f:
- f.write("")
- self.assertRaises(DatabaseIsNotEncrypted,
- sqlcipher_open, path1,
- PASSWORD)
- with open(path1, 'wb') as f:
- f.write("invalid")
- self.assertRaises(dbapi2.DatabaseError,
- sqlcipher_open, path1,
- PASSWORD)
-
- def test_open_database_existing(self):
- # In the context of SQLCipherDatabase, where no _open_database()
- # method exists and thus there's no call to _which_index_storage(),
- # this test tests for the same functionality as
- # test_open_database_create() below. So, we just pass.
- pass
-
- def test_open_database_with_factory(self):
- # SQLCipherDatabase's constructor has no factory parameter.
- pass
-
- def test_open_database_create(self):
- # SQLCipherDatabas has no open_database() method, so we just test for
- # the actual database constructor effects.
- temp_dir = self.createTempDir(prefix='u1db-test-')
- path = temp_dir + '/new.sqlite'
- db1 = sqlcipher_open(path, PASSWORD, create=True)
- db2 = sqlcipher_open(path, PASSWORD, create=False)
- self.assertIsInstance(db2, SQLCipherDatabase)
- db1.close()
- db2.close()
-
- def test_create_database_initializes_schema(self):
- # This test had to be cloned because our implementation of SQLCipher
- # backend is referenced with an index_storage_value that includes the
- # word "encrypted". See u1db's sqlite_backend and our
- # sqlcipher_backend for reference.
- raw_db = self.db._get_sqlite_handle()
- c = raw_db.cursor()
- c.execute("SELECT * FROM u1db_config")
- config = dict([(r[0], r[1]) for r in c.fetchall()])
- replica_uid = self.db._replica_uid
- self.assertEqual({'sql_schema': '0', 'replica_uid': replica_uid,
- 'index_storage': 'expand referenced encrypted'},
- config)
-
- def test_store_syncable(self):
- doc = self.db.create_doc_from_json(tests.simple_doc)
- # assert that docs are syncable by default
- self.assertEqual(True, doc.syncable)
- # assert that we can store syncable = False
- doc.syncable = False
- self.db.put_doc(doc)
- self.assertEqual(False, self.db.get_doc(doc.doc_id).syncable)
- # assert that we can store syncable = True
- doc.syncable = True
- self.db.put_doc(doc)
- self.assertEqual(True, self.db.get_doc(doc.doc_id).syncable)
-
- def test__close_sqlite_handle(self):
- raw_db = self.db._get_sqlite_handle()
- self.db._close_sqlite_handle()
- self.assertRaises(dbapi2.ProgrammingError,
- raw_db.cursor)
-
- def test__get_generation(self):
- self.assertEqual(0, self.db._get_generation())
-
- def test__get_generation_info(self):
- self.assertEqual((0, ''), self.db._get_generation_info())
-
- def test_create_index(self):
- self.db.create_index('test-idx', "key")
- self.assertEqual([('test-idx', ["key"])], self.db.list_indexes())
-
- def test_create_index_multiple_fields(self):
- self.db.create_index('test-idx', "key", "key2")
- self.assertEqual([('test-idx', ["key", "key2"])],
- self.db.list_indexes())
-
- def test__get_index_definition(self):
- self.db.create_index('test-idx', "key", "key2")
- # TODO: How would you test that an index is getting used for an SQL
- # request?
- self.assertEqual(["key", "key2"],
- self.db._get_index_definition('test-idx'))
-
- def test_list_index_mixed(self):
- # Make sure that we properly order the output
- c = self.db._get_sqlite_handle().cursor()
- # We intentionally insert the data in weird ordering, to make sure the
- # query still gets it back correctly.
- c.executemany("INSERT INTO index_definitions VALUES (?, ?, ?)",
- [('idx-1', 0, 'key10'),
- ('idx-2', 2, 'key22'),
- ('idx-1', 1, 'key11'),
- ('idx-2', 0, 'key20'),
- ('idx-2', 1, 'key21')])
- self.assertEqual([('idx-1', ['key10', 'key11']),
- ('idx-2', ['key20', 'key21', 'key22'])],
- self.db.list_indexes())
-
- def test_no_indexes_no_document_fields(self):
- self.db.create_doc_from_json(
- '{"key1": "val1", "key2": "val2"}')
- c = self.db._get_sqlite_handle().cursor()
- c.execute("SELECT doc_id, field_name, value FROM document_fields"
- " ORDER BY doc_id, field_name, value")
- self.assertEqual([], c.fetchall())
-
- def test_create_extracts_fields(self):
- doc1 = self.db.create_doc_from_json('{"key1": "val1", "key2": "val2"}')
- doc2 = self.db.create_doc_from_json('{"key1": "valx", "key2": "valy"}')
- c = self.db._get_sqlite_handle().cursor()
- c.execute("SELECT doc_id, field_name, value FROM document_fields"
- " ORDER BY doc_id, field_name, value")
- self.assertEqual([], c.fetchall())
- self.db.create_index('test', 'key1', 'key2')
- c.execute("SELECT doc_id, field_name, value FROM document_fields"
- " ORDER BY doc_id, field_name, value")
- self.assertEqual(sorted(
- [(doc1.doc_id, "key1", "val1"),
- (doc1.doc_id, "key2", "val2"),
- (doc2.doc_id, "key1", "valx"),
- (doc2.doc_id, "key2", "valy"), ]), sorted(c.fetchall()))
-
- def test_put_updates_fields(self):
- self.db.create_index('test', 'key1', 'key2')
- doc1 = self.db.create_doc_from_json(
- '{"key1": "val1", "key2": "val2"}')
- doc1.content = {"key1": "val1", "key2": "valy"}
- self.db.put_doc(doc1)
- c = self.db._get_sqlite_handle().cursor()
- c.execute("SELECT doc_id, field_name, value FROM document_fields"
- " ORDER BY doc_id, field_name, value")
- self.assertEqual([(doc1.doc_id, "key1", "val1"),
- (doc1.doc_id, "key2", "valy"), ], c.fetchall())
-
- def test_put_updates_nested_fields(self):
- self.db.create_index('test', 'key', 'sub.doc')
- doc1 = self.db.create_doc_from_json(tests.nested_doc)
- c = self.db._get_sqlite_handle().cursor()
- c.execute("SELECT doc_id, field_name, value FROM document_fields"
- " ORDER BY doc_id, field_name, value")
- self.assertEqual([(doc1.doc_id, "key", "value"),
- (doc1.doc_id, "sub.doc", "underneath"), ],
- c.fetchall())
-
- def test__ensure_schema_rollback(self):
- temp_dir = self.createTempDir(prefix='u1db-test-')
- path = temp_dir + '/rollback.db'
-
- class SQLitePartialExpandDbTesting(SQLCipherDatabase):
-
- def _set_replica_uid_in_transaction(self, uid):
- super(SQLitePartialExpandDbTesting,
- self)._set_replica_uid_in_transaction(uid)
- if fail:
- raise Exception()
-
- db = SQLitePartialExpandDbTesting.__new__(SQLitePartialExpandDbTesting)
- db._db_handle = dbapi2.connect(path) # db is there but not yet init-ed
- fail = True
- self.assertRaises(Exception, db._ensure_schema)
- fail = False
- db._initialize(db._db_handle.cursor())
-
- def test_open_database_non_existent(self):
- temp_dir = self.createTempDir(prefix='u1db-test-')
- path = temp_dir + '/non-existent.sqlite'
- self.assertRaises(errors.DatabaseDoesNotExist,
- sqlcipher_open, path, "123",
- create=False)
-
- def test_delete_database_existent(self):
- temp_dir = self.createTempDir(prefix='u1db-test-')
- path = temp_dir + '/new.sqlite'
- db = sqlcipher_open(path, "123", create=True)
- db.close()
- SQLCipherDatabase.delete_database(path)
- self.assertRaises(errors.DatabaseDoesNotExist,
- sqlcipher_open, path, "123",
- create=False)
-
- def test_delete_database_nonexistent(self):
- temp_dir = self.createTempDir(prefix='u1db-test-')
- path = temp_dir + '/non-existent.sqlite'
- self.assertRaises(errors.DatabaseDoesNotExist,
- SQLCipherDatabase.delete_database, path)
-
- def test__get_indexed_fields(self):
- self.db.create_index('idx1', 'a', 'b')
- self.assertEqual(set(['a', 'b']), self.db._get_indexed_fields())
- self.db.create_index('idx2', 'b', 'c')
- self.assertEqual(set(['a', 'b', 'c']), self.db._get_indexed_fields())
-
- def test_indexed_fields_expanded(self):
- self.db.create_index('idx1', 'key1')
- doc1 = self.db.create_doc_from_json('{"key1": "val1", "key2": "val2"}')
- self.assertEqual(set(['key1']), self.db._get_indexed_fields())
- c = self.db._get_sqlite_handle().cursor()
- c.execute("SELECT doc_id, field_name, value FROM document_fields"
- " ORDER BY doc_id, field_name, value")
- self.assertEqual([(doc1.doc_id, 'key1', 'val1')], c.fetchall())
-
- def test_create_index_updates_fields(self):
- doc1 = self.db.create_doc_from_json('{"key1": "val1", "key2": "val2"}')
- self.db.create_index('idx1', 'key1')
- self.assertEqual(set(['key1']), self.db._get_indexed_fields())
- c = self.db._get_sqlite_handle().cursor()
- c.execute("SELECT doc_id, field_name, value FROM document_fields"
- " ORDER BY doc_id, field_name, value")
- self.assertEqual([(doc1.doc_id, 'key1', 'val1')], c.fetchall())
-
- def assertFormatQueryEquals(self, exp_statement, exp_args, definition,
- values):
- statement, args = self.db._format_query(definition, values)
- self.assertEqual(exp_statement, statement)
- self.assertEqual(exp_args, args)
-
- def test__format_query(self):
- self.assertFormatQueryEquals(
- "SELECT d.doc_id, d.doc_rev, d.content, count(c.doc_rev) FROM "
- "document d, document_fields d0 LEFT OUTER JOIN conflicts c ON "
- "c.doc_id = d.doc_id WHERE d.doc_id = d0.doc_id AND d0.field_name "
- "= ? AND d0.value = ? GROUP BY d.doc_id, d.doc_rev, d.content "
- "ORDER BY d0.value;", ["key1", "a"],
- ["key1"], ["a"])
-
- def test__format_query2(self):
- self.assertFormatQueryEquals(
- 'SELECT d.doc_id, d.doc_rev, d.content, count(c.doc_rev) FROM '
- 'document d, document_fields d0, document_fields d1, '
- 'document_fields d2 LEFT OUTER JOIN conflicts c ON c.doc_id = '
- 'd.doc_id WHERE d.doc_id = d0.doc_id AND d0.field_name = ? AND '
- 'd0.value = ? AND d.doc_id = d1.doc_id AND d1.field_name = ? AND '
- 'd1.value = ? AND d.doc_id = d2.doc_id AND d2.field_name = ? AND '
- 'd2.value = ? GROUP BY d.doc_id, d.doc_rev, d.content ORDER BY '
- 'd0.value, d1.value, d2.value;',
- ["key1", "a", "key2", "b", "key3", "c"],
- ["key1", "key2", "key3"], ["a", "b", "c"])
-
- def test__format_query_wildcard(self):
- self.assertFormatQueryEquals(
- 'SELECT d.doc_id, d.doc_rev, d.content, count(c.doc_rev) FROM '
- 'document d, document_fields d0, document_fields d1, '
- 'document_fields d2 LEFT OUTER JOIN conflicts c ON c.doc_id = '
- 'd.doc_id WHERE d.doc_id = d0.doc_id AND d0.field_name = ? AND '
- 'd0.value = ? AND d.doc_id = d1.doc_id AND d1.field_name = ? AND '
- 'd1.value GLOB ? AND d.doc_id = d2.doc_id AND d2.field_name = ? '
- 'AND d2.value NOT NULL GROUP BY d.doc_id, d.doc_rev, d.content '
- 'ORDER BY d0.value, d1.value, d2.value;',
- ["key1", "a", "key2", "b*", "key3"], ["key1", "key2", "key3"],
- ["a", "b*", "*"])
-
- def assertFormatRangeQueryEquals(self, exp_statement, exp_args, definition,
- start_value, end_value):
- statement, args = self.db._format_range_query(
- definition, start_value, end_value)
- self.assertEqual(exp_statement, statement)
- self.assertEqual(exp_args, args)
-
- def test__format_range_query(self):
- self.assertFormatRangeQueryEquals(
- 'SELECT d.doc_id, d.doc_rev, d.content, count(c.doc_rev) FROM '
- 'document d, document_fields d0, document_fields d1, '
- 'document_fields d2 LEFT OUTER JOIN conflicts c ON c.doc_id = '
- 'd.doc_id WHERE d.doc_id = d0.doc_id AND d0.field_name = ? AND '
- 'd0.value >= ? AND d.doc_id = d1.doc_id AND d1.field_name = ? AND '
- 'd1.value >= ? AND d.doc_id = d2.doc_id AND d2.field_name = ? AND '
- 'd2.value >= ? AND d.doc_id = d0.doc_id AND d0.field_name = ? AND '
- 'd0.value <= ? AND d.doc_id = d1.doc_id AND d1.field_name = ? AND '
- 'd1.value <= ? AND d.doc_id = d2.doc_id AND d2.field_name = ? AND '
- 'd2.value <= ? GROUP BY d.doc_id, d.doc_rev, d.content ORDER BY '
- 'd0.value, d1.value, d2.value;',
- ['key1', 'a', 'key2', 'b', 'key3', 'c', 'key1', 'p', 'key2', 'q',
- 'key3', 'r'],
- ["key1", "key2", "key3"], ["a", "b", "c"], ["p", "q", "r"])
-
- def test__format_range_query_no_start(self):
- self.assertFormatRangeQueryEquals(
- 'SELECT d.doc_id, d.doc_rev, d.content, count(c.doc_rev) FROM '
- 'document d, document_fields d0, document_fields d1, '
- 'document_fields d2 LEFT OUTER JOIN conflicts c ON c.doc_id = '
- 'd.doc_id WHERE d.doc_id = d0.doc_id AND d0.field_name = ? AND '
- 'd0.value <= ? AND d.doc_id = d1.doc_id AND d1.field_name = ? AND '
- 'd1.value <= ? AND d.doc_id = d2.doc_id AND d2.field_name = ? AND '
- 'd2.value <= ? GROUP BY d.doc_id, d.doc_rev, d.content ORDER BY '
- 'd0.value, d1.value, d2.value;',
- ['key1', 'a', 'key2', 'b', 'key3', 'c'],
- ["key1", "key2", "key3"], None, ["a", "b", "c"])
-
- def test__format_range_query_no_end(self):
- self.assertFormatRangeQueryEquals(
- 'SELECT d.doc_id, d.doc_rev, d.content, count(c.doc_rev) FROM '
- 'document d, document_fields d0, document_fields d1, '
- 'document_fields d2 LEFT OUTER JOIN conflicts c ON c.doc_id = '
- 'd.doc_id WHERE d.doc_id = d0.doc_id AND d0.field_name = ? AND '
- 'd0.value >= ? AND d.doc_id = d1.doc_id AND d1.field_name = ? AND '
- 'd1.value >= ? AND d.doc_id = d2.doc_id AND d2.field_name = ? AND '
- 'd2.value >= ? GROUP BY d.doc_id, d.doc_rev, d.content ORDER BY '
- 'd0.value, d1.value, d2.value;',
- ['key1', 'a', 'key2', 'b', 'key3', 'c'],
- ["key1", "key2", "key3"], ["a", "b", "c"], None)
-
- def test__format_range_query_wildcard(self):
- self.assertFormatRangeQueryEquals(
- 'SELECT d.doc_id, d.doc_rev, d.content, count(c.doc_rev) FROM '
- 'document d, document_fields d0, document_fields d1, '
- 'document_fields d2 LEFT OUTER JOIN conflicts c ON c.doc_id = '
- 'd.doc_id WHERE d.doc_id = d0.doc_id AND d0.field_name = ? AND '
- 'd0.value >= ? AND d.doc_id = d1.doc_id AND d1.field_name = ? AND '
- 'd1.value >= ? AND d.doc_id = d2.doc_id AND d2.field_name = ? AND '
- 'd2.value NOT NULL AND d.doc_id = d0.doc_id AND d0.field_name = ? '
- 'AND d0.value <= ? AND d.doc_id = d1.doc_id AND d1.field_name = ? '
- 'AND (d1.value < ? OR d1.value GLOB ?) AND d.doc_id = d2.doc_id '
- 'AND d2.field_name = ? AND d2.value NOT NULL GROUP BY d.doc_id, '
- 'd.doc_rev, d.content ORDER BY d0.value, d1.value, d2.value;',
- ['key1', 'a', 'key2', 'b', 'key3', 'key1', 'p', 'key2', 'q', 'q*',
- 'key3'],
- ["key1", "key2", "key3"], ["a", "b*", "*"], ["p", "q*", "*"])
-
-
-# -----------------------------------------------------------------------------
-# The following tests come from `u1db.tests.test_open`.
-# -----------------------------------------------------------------------------
-
-
-class SQLCipherOpen(test_open.TestU1DBOpen):
-
- def test_open_no_create(self):
- self.assertRaises(errors.DatabaseDoesNotExist,
- sqlcipher_open, self.db_path,
- PASSWORD,
- create=False)
- self.assertFalse(os.path.exists(self.db_path))
-
- def test_open_create(self):
- db = sqlcipher_open(self.db_path, PASSWORD, create=True)
- self.addCleanup(db.close)
- self.assertTrue(os.path.exists(self.db_path))
- self.assertIsInstance(db, SQLCipherDatabase)
-
- def test_open_with_factory(self):
- db = sqlcipher_open(self.db_path, PASSWORD, create=True,
- document_factory=SoledadDocument)
- self.addCleanup(db.close)
- doc = db.create_doc({})
- self.assertTrue(isinstance(doc, SoledadDocument))
-
- def test_open_existing(self):
- db = sqlcipher_open(self.db_path, PASSWORD)
- self.addCleanup(db.close)
- doc = db.create_doc_from_json(tests.simple_doc)
- # Even though create=True, we shouldn't wipe the db
- db2 = sqlcipher_open(self.db_path, PASSWORD, create=True)
- self.addCleanup(db2.close)
- doc2 = db2.get_doc(doc.doc_id)
- self.assertEqual(doc, doc2)
-
- def test_open_existing_no_create(self):
- db = sqlcipher_open(self.db_path, PASSWORD)
- self.addCleanup(db.close)
- db2 = sqlcipher_open(self.db_path, PASSWORD, create=False)
- self.addCleanup(db2.close)
- self.assertIsInstance(db2, SQLCipherDatabase)
-
-
-# -----------------------------------------------------------------------------
-# Tests for actual encryption of the database
-# -----------------------------------------------------------------------------
-
-class SQLCipherEncryptionTests(BaseSoledadTest):
-
- """
- Tests to guarantee SQLCipher is indeed encrypting data when storing.
- """
-
- def _delete_dbfiles(self):
- for dbfile in [self.DB_FILE]:
- if os.path.exists(dbfile):
- os.unlink(dbfile)
-
- def setUp(self):
- # the following come from BaseLeapTest.setUpClass, because
- # twisted.trial doesn't support such class methods for setting up
- # test classes.
- self.old_path = os.environ['PATH']
- self.old_home = os.environ['HOME']
- self.tempdir = tempfile.mkdtemp(prefix="leap_tests-")
- self.home = self.tempdir
- bin_tdir = os.path.join(
- self.tempdir,
- 'bin')
- os.environ["PATH"] = bin_tdir
- os.environ["HOME"] = self.tempdir
- # this is our own stuff
- self.DB_FILE = os.path.join(self.tempdir, 'test.db')
- self._delete_dbfiles()
-
- def tearDown(self):
- self._delete_dbfiles()
- # the following come from BaseLeapTest.tearDownClass, because
- # twisted.trial doesn't support such class methods for tearing down
- # test classes.
- os.environ["PATH"] = self.old_path
- os.environ["HOME"] = self.old_home
- # safety check! please do not wipe my home...
- # XXX needs to adapt to non-linuces
- soledad_assert(
- self.tempdir.startswith('/tmp/leap_tests-') or
- self.tempdir.startswith('/var/folder'),
- "beware! tried to remove a dir which does not "
- "live in temporal folder!")
- shutil.rmtree(self.tempdir)
-
- def test_try_to_open_encrypted_db_with_sqlite_backend(self):
- """
- SQLite backend should not succeed to open SQLCipher databases.
- """
- db = sqlcipher_open(self.DB_FILE, PASSWORD)
- doc = db.create_doc_from_json(tests.simple_doc)
- db.close()
- try:
- # trying to open an encrypted database with the regular u1db
- # backend should raise a DatabaseError exception.
- SQLitePartialExpandDatabase(self.DB_FILE,
- document_factory=SoledadDocument)
- raise DatabaseIsNotEncrypted()
- except dbapi2.DatabaseError:
- # at this point we know that the regular U1DB sqlcipher backend
- # did not succeed on opening the database, so it was indeed
- # encrypted.
- db = sqlcipher_open(self.DB_FILE, PASSWORD)
- doc = db.get_doc(doc.doc_id)
- self.assertEqual(tests.simple_doc, doc.get_json(),
- 'decrypted content mismatch')
- db.close()
-
- def test_try_to_open_raw_db_with_sqlcipher_backend(self):
- """
- SQLCipher backend should not succeed to open unencrypted databases.
- """
- db = SQLitePartialExpandDatabase(self.DB_FILE,
- document_factory=SoledadDocument)
- db.create_doc_from_json(tests.simple_doc)
- db.close()
- try:
- # trying to open the a non-encrypted database with sqlcipher
- # backend should raise a DatabaseIsNotEncrypted exception.
- db = sqlcipher_open(self.DB_FILE, PASSWORD)
- db.close()
- raise dbapi2.DatabaseError(
- "SQLCipher backend should not be able to open non-encrypted "
- "dbs.")
- except DatabaseIsNotEncrypted:
- pass
diff --git a/testing/tests/client/test_sqlcipher_sync.py b/testing/tests/client/test_sqlcipher_sync.py
deleted file mode 100644
index 3cbefc8b..00000000
--- a/testing/tests/client/test_sqlcipher_sync.py
+++ /dev/null
@@ -1,730 +0,0 @@
-# -*- coding: utf-8 -*-
-# test_sqlcipher.py
-# Copyright (C) 2013-2016 LEAP
-#
-# This program is free software: you can redistribute it and/or modify
-# it under the terms of the GNU General Public License as published by
-# the Free Software Foundation, either version 3 of the License, or
-# (at your option) any later version.
-#
-# This program is distributed in the hope that it will be useful,
-# but WITHOUT ANY WARRANTY; without even the implied warranty of
-# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
-# GNU General Public License for more details.
-#
-# You should have received a copy of the GNU General Public License
-# along with this program. If not, see <http://www.gnu.org/licenses/>.
-"""
-Test sqlcipher backend sync.
-"""
-import os
-
-from uuid import uuid4
-
-from testscenarios import TestWithScenarios
-
-from leap.soledad.common.l2db import sync
-from leap.soledad.common.l2db import vectorclock
-from leap.soledad.common.l2db import errors
-
-from leap.soledad.common.crypto import ENC_SCHEME_KEY
-from leap.soledad.client.crypto import decrypt_doc_dict
-from leap.soledad.client.http_target import SoledadHTTPSyncTarget
-
-from test_soledad import u1db_tests as tests
-from test_soledad.util import SQLCIPHER_SCENARIOS
-from test_soledad.util import make_soledad_app
-from test_soledad.util import soledad_sync_target
-from test_soledad.util import BaseSoledadTest
-
-
-# -----------------------------------------------------------------------------
-# The following tests come from `u1db.tests.test_sync`.
-# -----------------------------------------------------------------------------
-
-def sync_via_synchronizer_and_soledad(test, db_source, db_target,
- trace_hook=None,
- trace_hook_shallow=None):
- if trace_hook:
- test.skipTest("full trace hook unsupported over http")
- path = test._http_at[db_target]
- target = SoledadHTTPSyncTarget.connect(
- test.getURL(path), test._soledad._crypto)
- target.set_token_credentials('user-uuid', 'auth-token')
- if trace_hook_shallow:
- target._set_trace_hook_shallow(trace_hook_shallow)
- return sync.Synchronizer(db_source, target).sync()
-
-
-def sync_via_synchronizer(test, db_source, db_target,
- trace_hook=None,
- trace_hook_shallow=None):
- target = db_target.get_sync_target()
- trace_hook = trace_hook or trace_hook_shallow
- if trace_hook:
- target._set_trace_hook(trace_hook)
- return sync.Synchronizer(db_source, target).sync()
-
-
-sync_scenarios = []
-for name, scenario in SQLCIPHER_SCENARIOS:
- scenario['do_sync'] = sync_via_synchronizer
- sync_scenarios.append((name, scenario))
-
-
-class SQLCipherDatabaseSyncTests(
- TestWithScenarios,
- tests.DatabaseBaseTests,
- BaseSoledadTest):
-
- """
- Test for succesfull sync between SQLCipher and LeapBackend.
-
- Some of the tests in this class had to be adapted because the remote
- backend always receive encrypted content, and so it can not rely on
- document's content comparison to try to autoresolve conflicts.
- """
-
- scenarios = sync_scenarios
-
- def setUp(self):
- self._use_tracking = {}
- super(tests.DatabaseBaseTests, self).setUp()
-
- def create_database(self, replica_uid, sync_role=None):
- if replica_uid == 'test' and sync_role is None:
- # created up the chain by base class but unused
- return None
- db = self.create_database_for_role(replica_uid, sync_role)
- if sync_role:
- self._use_tracking[db] = (replica_uid, sync_role)
- self.addCleanup(db.close)
- return db
-
- def create_database_for_role(self, replica_uid, sync_role):
- # hook point for reuse
- return tests.DatabaseBaseTests.create_database(self, replica_uid)
-
- def sync(self, db_from, db_to, trace_hook=None,
- trace_hook_shallow=None):
- from_name, from_sync_role = self._use_tracking[db_from]
- to_name, to_sync_role = self._use_tracking[db_to]
- if from_sync_role not in ('source', 'both'):
- raise Exception("%s marked for %s use but used as source" %
- (from_name, from_sync_role))
- if to_sync_role not in ('target', 'both'):
- raise Exception("%s marked for %s use but used as target" %
- (to_name, to_sync_role))
- return self.do_sync(self, db_from, db_to, trace_hook,
- trace_hook_shallow)
-
- def assertLastExchangeLog(self, db, expected):
- log = getattr(db, '_last_exchange_log', None)
- if log is None:
- return
- self.assertEqual(expected, log)
-
- def copy_database(self, db, sync_role=None):
- # DO NOT COPY OR REUSE THIS CODE OUTSIDE TESTS: COPYING U1DB DATABASES
- # IS THE WRONG THING TO DO, THE ONLY REASON WE DO SO HERE IS TO TEST
- # THAT WE CORRECTLY DETECT IT HAPPENING SO THAT WE CAN RAISE ERRORS
- # RATHER THAN CORRUPT USER DATA. USE SYNC INSTEAD, OR WE WILL SEND
- # NINJA TO YOUR HOUSE.
- db_copy = tests.DatabaseBaseTests.copy_database(self, db)
- name, orig_sync_role = self._use_tracking[db]
- self._use_tracking[db_copy] = (name + '(copy)', sync_role or
- orig_sync_role)
- return db_copy
-
- def test_sync_tracks_db_generation_of_other(self):
- self.db1 = self.create_database('test1', 'source')
- self.db2 = self.create_database('test2', 'target')
- self.assertEqual(0, self.sync(self.db1, self.db2))
- self.assertEqual(
- (0, ''), self.db1._get_replica_gen_and_trans_id('test2'))
- self.assertEqual(
- (0, ''), self.db2._get_replica_gen_and_trans_id('test1'))
- self.assertLastExchangeLog(self.db2,
- {'receive':
- {'docs': [], 'last_known_gen': 0},
- 'return':
- {'docs': [], 'last_gen': 0}})
-
- def test_sync_autoresolves(self):
- """
- Test for sync autoresolve remote.
-
- This test was adapted because the remote database receives encrypted
- content and so it can't compare documents contents to autoresolve.
- """
- # The remote database can't autoresolve conflicts based on magic
- # content convergence, so we modify this test to leave the possibility
- # of the remode document ending up in conflicted state.
- self.db1 = self.create_database('test1', 'source')
- self.db2 = self.create_database('test2', 'target')
- doc1 = self.db1.create_doc_from_json(tests.simple_doc, doc_id='doc')
- rev1 = doc1.rev
- doc2 = self.db2.create_doc_from_json(tests.simple_doc, doc_id='doc')
- rev2 = doc2.rev
- self.sync(self.db1, self.db2)
- doc = self.db1.get_doc('doc')
- self.assertFalse(doc.has_conflicts)
- # if remote content is in conflicted state, then document revisions
- # will be different.
- # self.assertEqual(doc.rev, self.db2.get_doc('doc').rev)
- v = vectorclock.VectorClockRev(doc.rev)
- self.assertTrue(v.is_newer(vectorclock.VectorClockRev(rev1)))
- self.assertTrue(v.is_newer(vectorclock.VectorClockRev(rev2)))
-
- def test_sync_autoresolves_moar(self):
- """
- Test for sync autoresolve local.
-
- This test was adapted to decrypt remote content before assert.
- """
- # here we test that when a database that has a conflicted document is
- # the source of a sync, and the target database has a revision of the
- # conflicted document that is newer than the source database's, and
- # that target's database's document's content is the same as the
- # source's document's conflict's, the source's document's conflict gets
- # autoresolved, and the source's document's revision bumped.
- #
- # idea is as follows:
- # A B
- # a1 -
- # `------->
- # a1 a1
- # v v
- # a2 a1b1
- # `------->
- # a1b1+a2 a1b1
- # v
- # a1b1+a2 a1b2 (a1b2 has same content as a2)
- # `------->
- # a3b2 a1b2 (autoresolved)
- # `------->
- # a3b2 a3b2
- self.db1 = self.create_database('test1', 'source')
- self.db2 = self.create_database('test2', 'target')
- self.db1.create_doc_from_json(tests.simple_doc, doc_id='doc')
- self.sync(self.db1, self.db2)
- for db, content in [(self.db1, '{}'), (self.db2, '{"hi": 42}')]:
- doc = db.get_doc('doc')
- doc.set_json(content)
- db.put_doc(doc)
- self.sync(self.db1, self.db2)
- # db1 and db2 now both have a doc of {hi:42}, but db1 has a conflict
- doc = self.db1.get_doc('doc')
- rev1 = doc.rev
- self.assertTrue(doc.has_conflicts)
- # set db2 to have a doc of {} (same as db1 before the conflict)
- doc = self.db2.get_doc('doc')
- doc.set_json('{}')
- self.db2.put_doc(doc)
- rev2 = doc.rev
- # sync it across
- self.sync(self.db1, self.db2)
- # tadaa!
- doc = self.db1.get_doc('doc')
- self.assertFalse(doc.has_conflicts)
- vec1 = vectorclock.VectorClockRev(rev1)
- vec2 = vectorclock.VectorClockRev(rev2)
- vec3 = vectorclock.VectorClockRev(doc.rev)
- self.assertTrue(vec3.is_newer(vec1))
- self.assertTrue(vec3.is_newer(vec2))
- # because the conflict is on the source, sync it another time
- self.sync(self.db1, self.db2)
- # make sure db2 now has the exact same thing
- doc1 = self.db1.get_doc('doc')
- self.assertGetEncryptedDoc(
- self.db2,
- doc1.doc_id, doc1.rev, doc1.get_json(), False)
-
- def test_sync_autoresolves_moar_backwards(self):
- # here we would test that when a database that has a conflicted
- # document is the target of a sync, and the source database has a
- # revision of the conflicted document that is newer than the target
- # database's, and that source's database's document's content is the
- # same as the target's document's conflict's, the target's document's
- # conflict gets autoresolved, and the document's revision bumped.
- #
- # Despite that, in Soledad we suppose that the server never syncs, so
- # it never has conflicted documents. Also, if it had, convergence
- # would not be possible by checking document's contents because they
- # would be encrypted in server.
- #
- # Therefore we suppress this test.
- pass
-
- def test_sync_autoresolves_moar_backwards_three(self):
- # here we would test that when a database that has a conflicted
- # document is the target of a sync, and the source database has a
- # revision of the conflicted document that is newer than the target
- # database's, and that source's database's document's content is the
- # same as the target's document's conflict's, the target's document's
- # conflict gets autoresolved, and the document's revision bumped.
- #
- # We use the same reasoning from the last test to suppress this one.
- pass
-
- def test_sync_pulling_doesnt_update_other_if_changed(self):
- self.db1 = self.create_database('test1', 'source')
- self.db2 = self.create_database('test2', 'target')
- doc = self.db2.create_doc_from_json(tests.simple_doc)
- # After the local side has sent its list of docs, before we start
- # receiving the "targets" response, we update the local database with a
- # new record.
- # When we finish synchronizing, we can notice that something locally
- # was updated, and we cannot tell c2 our new updated generation
-
- def before_get_docs(state):
- if state != 'before get_docs':
- return
- self.db1.create_doc_from_json(tests.simple_doc)
-
- self.assertEqual(0, self.sync(self.db1, self.db2,
- trace_hook=before_get_docs))
- self.assertLastExchangeLog(self.db2,
- {'receive':
- {'docs': [], 'last_known_gen': 0},
- 'return':
- {'docs': [(doc.doc_id, doc.rev)],
- 'last_gen': 1}})
- self.assertEqual(1, self.db1._get_replica_gen_and_trans_id('test2')[0])
- # c2 should not have gotten a '_record_sync_info' call, because the
- # local database had been updated more than just by the messages
- # returned from c2.
- self.assertEqual(
- (0, ''), self.db2._get_replica_gen_and_trans_id('test1'))
-
- def test_sync_doesnt_update_other_if_nothing_pulled(self):
- self.db1 = self.create_database('test1', 'source')
- self.db2 = self.create_database('test2', 'target')
- self.db1.create_doc_from_json(tests.simple_doc)
-
- def no_record_sync_info(state):
- if state != 'record_sync_info':
- return
- self.fail('SyncTarget.record_sync_info was called')
- self.assertEqual(1, self.sync(self.db1, self.db2,
- trace_hook_shallow=no_record_sync_info))
- self.assertEqual(
- 1,
- self.db2._get_replica_gen_and_trans_id(self.db1._replica_uid)[0])
-
- def test_sync_ignores_convergence(self):
- self.db1 = self.create_database('test1', 'source')
- self.db2 = self.create_database('test2', 'both')
- doc = self.db1.create_doc_from_json(tests.simple_doc)
- self.db3 = self.create_database('test3', 'target')
- self.assertEqual(1, self.sync(self.db1, self.db3))
- self.assertEqual(0, self.sync(self.db2, self.db3))
- self.assertEqual(1, self.sync(self.db1, self.db2))
- self.assertLastExchangeLog(self.db2,
- {'receive':
- {'docs': [(doc.doc_id, doc.rev)],
- 'source_uid': 'test1',
- 'source_gen': 1, 'last_known_gen': 0},
- 'return': {'docs': [], 'last_gen': 1}})
-
- def test_sync_ignores_superseded(self):
- self.db1 = self.create_database('test1', 'both')
- self.db2 = self.create_database('test2', 'both')
- doc = self.db1.create_doc_from_json(tests.simple_doc)
- doc_rev1 = doc.rev
- self.db3 = self.create_database('test3', 'target')
- self.sync(self.db1, self.db3)
- self.sync(self.db2, self.db3)
- new_content = '{"key": "altval"}'
- doc.set_json(new_content)
- self.db1.put_doc(doc)
- doc_rev2 = doc.rev
- self.sync(self.db2, self.db1)
- self.assertLastExchangeLog(self.db1,
- {'receive':
- {'docs': [(doc.doc_id, doc_rev1)],
- 'source_uid': 'test2',
- 'source_gen': 1, 'last_known_gen': 0},
- 'return':
- {'docs': [(doc.doc_id, doc_rev2)],
- 'last_gen': 2}})
- self.assertGetDoc(self.db1, doc.doc_id, doc_rev2, new_content, False)
-
- def test_sync_sees_remote_conflicted(self):
- self.db1 = self.create_database('test1', 'source')
- self.db2 = self.create_database('test2', 'target')
- doc1 = self.db1.create_doc_from_json(tests.simple_doc)
- doc_id = doc1.doc_id
- doc1_rev = doc1.rev
- self.db1.create_index('test-idx', 'key')
- new_doc = '{"key": "altval"}'
- doc2 = self.db2.create_doc_from_json(new_doc, doc_id=doc_id)
- doc2_rev = doc2.rev
- self.assertTransactionLog([doc1.doc_id], self.db1)
- self.sync(self.db1, self.db2)
- self.assertLastExchangeLog(self.db2,
- {'receive':
- {'docs': [(doc_id, doc1_rev)],
- 'source_uid': 'test1',
- 'source_gen': 1, 'last_known_gen': 0},
- 'return':
- {'docs': [(doc_id, doc2_rev)],
- 'last_gen': 1}})
- self.assertTransactionLog([doc_id, doc_id], self.db1)
- self.assertGetDoc(self.db1, doc_id, doc2_rev, new_doc, True)
- self.assertGetDoc(self.db2, doc_id, doc2_rev, new_doc, False)
- from_idx = self.db1.get_from_index('test-idx', 'altval')[0]
- self.assertEqual(doc2.doc_id, from_idx.doc_id)
- self.assertEqual(doc2.rev, from_idx.rev)
- self.assertTrue(from_idx.has_conflicts)
- self.assertEqual([], self.db1.get_from_index('test-idx', 'value'))
-
- def test_sync_sees_remote_delete_conflicted(self):
- self.db1 = self.create_database('test1', 'source')
- self.db2 = self.create_database('test2', 'target')
- doc1 = self.db1.create_doc_from_json(tests.simple_doc)
- doc_id = doc1.doc_id
- self.db1.create_index('test-idx', 'key')
- self.sync(self.db1, self.db2)
- doc2 = self.make_document(doc1.doc_id, doc1.rev, doc1.get_json())
- new_doc = '{"key": "altval"}'
- doc1.set_json(new_doc)
- self.db1.put_doc(doc1)
- self.db2.delete_doc(doc2)
- self.assertTransactionLog([doc_id, doc_id], self.db1)
- self.sync(self.db1, self.db2)
- self.assertLastExchangeLog(self.db2,
- {'receive':
- {'docs': [(doc_id, doc1.rev)],
- 'source_uid': 'test1',
- 'source_gen': 2, 'last_known_gen': 1},
- 'return': {'docs': [(doc_id, doc2.rev)],
- 'last_gen': 2}})
- self.assertTransactionLog([doc_id, doc_id, doc_id], self.db1)
- self.assertGetDocIncludeDeleted(self.db1, doc_id, doc2.rev, None, True)
- self.assertGetDocIncludeDeleted(
- self.db2, doc_id, doc2.rev, None, False)
- self.assertEqual([], self.db1.get_from_index('test-idx', 'value'))
-
- def test_sync_local_race_conflicted(self):
- self.db1 = self.create_database('test1', 'source')
- self.db2 = self.create_database('test2', 'target')
- doc = self.db1.create_doc_from_json(tests.simple_doc)
- doc_id = doc.doc_id
- doc1_rev = doc.rev
- self.db1.create_index('test-idx', 'key')
- self.sync(self.db1, self.db2)
- content1 = '{"key": "localval"}'
- content2 = '{"key": "altval"}'
- doc.set_json(content2)
- self.db2.put_doc(doc)
- doc2_rev2 = doc.rev
- triggered = []
-
- def after_whatschanged(state):
- if state != 'after whats_changed':
- return
- triggered.append(True)
- doc = self.make_document(doc_id, doc1_rev, content1)
- self.db1.put_doc(doc)
-
- self.sync(self.db1, self.db2, trace_hook=after_whatschanged)
- self.assertEqual([True], triggered)
- self.assertGetDoc(self.db1, doc_id, doc2_rev2, content2, True)
- from_idx = self.db1.get_from_index('test-idx', 'altval')[0]
- self.assertEqual(doc.doc_id, from_idx.doc_id)
- self.assertEqual(doc.rev, from_idx.rev)
- self.assertTrue(from_idx.has_conflicts)
- self.assertEqual([], self.db1.get_from_index('test-idx', 'value'))
- self.assertEqual([], self.db1.get_from_index('test-idx', 'localval'))
-
- def test_sync_propagates_deletes(self):
- self.db1 = self.create_database('test1', 'source')
- self.db2 = self.create_database('test2', 'both')
- doc1 = self.db1.create_doc_from_json(tests.simple_doc)
- doc_id = doc1.doc_id
- self.db1.create_index('test-idx', 'key')
- self.sync(self.db1, self.db2)
- self.db2.create_index('test-idx', 'key')
- self.db3 = self.create_database('test3', 'target')
- self.sync(self.db1, self.db3)
- self.db1.delete_doc(doc1)
- deleted_rev = doc1.rev
- self.sync(self.db1, self.db2)
- self.assertLastExchangeLog(self.db2,
- {'receive':
- {'docs': [(doc_id, deleted_rev)],
- 'source_uid': 'test1',
- 'source_gen': 2, 'last_known_gen': 1},
- 'return': {'docs': [], 'last_gen': 2}})
- self.assertGetDocIncludeDeleted(
- self.db1, doc_id, deleted_rev, None, False)
- self.assertGetDocIncludeDeleted(
- self.db2, doc_id, deleted_rev, None, False)
- self.assertEqual([], self.db1.get_from_index('test-idx', 'value'))
- self.assertEqual([], self.db2.get_from_index('test-idx', 'value'))
- self.sync(self.db2, self.db3)
- self.assertLastExchangeLog(self.db3,
- {'receive':
- {'docs': [(doc_id, deleted_rev)],
- 'source_uid': 'test2',
- 'source_gen': 2,
- 'last_known_gen': 0},
- 'return':
- {'docs': [], 'last_gen': 2}})
- self.assertGetDocIncludeDeleted(
- self.db3, doc_id, deleted_rev, None, False)
-
- def test_sync_propagates_deletes_2(self):
- self.db1 = self.create_database('test1', 'source')
- self.db2 = self.create_database('test2', 'target')
- self.db1.create_doc_from_json('{"a": "1"}', doc_id='the-doc')
- self.sync(self.db1, self.db2)
- doc1_2 = self.db2.get_doc('the-doc')
- self.db2.delete_doc(doc1_2)
- self.sync(self.db1, self.db2)
- self.assertGetDocIncludeDeleted(
- self.db1, 'the-doc', doc1_2.rev, None, False)
-
- def test_sync_detects_identical_replica_uid(self):
- self.db1 = self.create_database('test1', 'source')
- self.db2 = self.create_database('test1', 'target')
- self.db1.create_doc_from_json(tests.simple_doc, doc_id='doc1')
- self.assertRaises(
- errors.InvalidReplicaUID, self.sync, self.db1, self.db2)
-
- def test_optional_sync_preserve_json(self):
- self.db1 = self.create_database('test1', 'source')
- self.db2 = self.create_database('test2', 'target')
- cont1 = '{ "a": 2 }'
- cont2 = '{ "b":3}'
- self.db1.create_doc_from_json(cont1, doc_id="1")
- self.db2.create_doc_from_json(cont2, doc_id="2")
- self.sync(self.db1, self.db2)
- self.assertEqual(cont1, self.db2.get_doc("1").get_json())
- self.assertEqual(cont2, self.db1.get_doc("2").get_json())
-
- def test_sync_propagates_resolution(self):
- """
- Test if synchronization propagates resolution.
-
- This test was adapted to decrypt remote content before assert.
- """
- self.db1 = self.create_database('test1', 'both')
- self.db2 = self.create_database('test2', 'both')
- doc1 = self.db1.create_doc_from_json('{"a": 1}', doc_id='the-doc')
- db3 = self.create_database('test3', 'both')
- self.sync(self.db2, self.db1)
- self.assertEqual(
- self.db1._get_generation_info(),
- self.db2._get_replica_gen_and_trans_id(self.db1._replica_uid))
- self.assertEqual(
- self.db2._get_generation_info(),
- self.db1._get_replica_gen_and_trans_id(self.db2._replica_uid))
- self.sync(db3, self.db1)
- # update on 2
- doc2 = self.make_document('the-doc', doc1.rev, '{"a": 2}')
- self.db2.put_doc(doc2)
- self.sync(self.db2, db3)
- self.assertEqual(db3.get_doc('the-doc').rev, doc2.rev)
- # update on 1
- doc1.set_json('{"a": 3}')
- self.db1.put_doc(doc1)
- # conflicts
- self.sync(self.db2, self.db1)
- self.sync(db3, self.db1)
- self.assertTrue(self.db2.get_doc('the-doc').has_conflicts)
- self.assertTrue(db3.get_doc('the-doc').has_conflicts)
- # resolve
- conflicts = self.db2.get_doc_conflicts('the-doc')
- doc4 = self.make_document('the-doc', None, '{"a": 4}')
- revs = [doc.rev for doc in conflicts]
- self.db2.resolve_doc(doc4, revs)
- doc2 = self.db2.get_doc('the-doc')
- self.assertEqual(doc4.get_json(), doc2.get_json())
- self.assertFalse(doc2.has_conflicts)
- self.sync(self.db2, db3)
- doc3 = db3.get_doc('the-doc')
- if ENC_SCHEME_KEY in doc3.content:
- _crypto = self._soledad._crypto
- key = _crypto.doc_passphrase(doc3.doc_id)
- secret = _crypto.secret
- doc3.set_json(decrypt_doc_dict(
- doc3.content,
- doc3.doc_id, doc3.rev, key, secret))
- self.assertEqual(doc4.get_json(), doc3.get_json())
- self.assertFalse(doc3.has_conflicts)
- self.db1.close()
- self.db2.close()
- db3.close()
-
- def test_sync_puts_changes(self):
- """
- Test if sync puts changes in remote replica.
-
- This test was adapted to decrypt remote content before assert.
- """
- self.db1 = self.create_database('test1', 'source')
- self.db2 = self.create_database('test2', 'target')
- doc = self.db1.create_doc_from_json(tests.simple_doc)
- self.assertEqual(1, self.sync(self.db1, self.db2))
- self.assertGetEncryptedDoc(
- self.db2, doc.doc_id, doc.rev, tests.simple_doc, False)
- self.assertEqual(1, self.db1._get_replica_gen_and_trans_id('test2')[0])
- self.assertEqual(1, self.db2._get_replica_gen_and_trans_id('test1')[0])
- self.assertLastExchangeLog(
- self.db2,
- {'receive': {'docs': [(doc.doc_id, doc.rev)],
- 'source_uid': 'test1',
- 'source_gen': 1, 'last_known_gen': 0},
- 'return': {'docs': [], 'last_gen': 1}})
-
- def test_sync_pulls_changes(self):
- self.db1 = self.create_database('test1', 'source')
- self.db2 = self.create_database('test2', 'target')
- doc = self.db2.create_doc_from_json(tests.simple_doc)
- self.db1.create_index('test-idx', 'key')
- self.assertEqual(0, self.sync(self.db1, self.db2))
- self.assertGetDoc(self.db1, doc.doc_id, doc.rev,
- tests.simple_doc, False)
- self.assertEqual(1, self.db1._get_replica_gen_and_trans_id('test2')[0])
- self.assertEqual(1, self.db2._get_replica_gen_and_trans_id('test1')[0])
- self.assertLastExchangeLog(self.db2,
- {'receive':
- {'docs': [], 'last_known_gen': 0},
- 'return':
- {'docs': [(doc.doc_id, doc.rev)],
- 'last_gen': 1}})
- self.assertEqual([doc], self.db1.get_from_index('test-idx', 'value'))
-
- def test_sync_supersedes_conflicts(self):
- self.db1 = self.create_database('test1', 'both')
- self.db2 = self.create_database('test2', 'target')
- self.db3 = self.create_database('test3', 'both')
- doc1 = self.db1.create_doc_from_json('{"a": 1}', doc_id='the-doc')
- self.db2.create_doc_from_json('{"b": 1}', doc_id='the-doc')
- self.db3.create_doc_from_json('{"c": 1}', doc_id='the-doc')
- self.sync(self.db3, self.db1)
- self.assertEqual(
- self.db1._get_generation_info(),
- self.db3._get_replica_gen_and_trans_id(self.db1._replica_uid))
- self.assertEqual(
- self.db3._get_generation_info(),
- self.db1._get_replica_gen_and_trans_id(self.db3._replica_uid))
- self.sync(self.db3, self.db2)
- self.assertEqual(
- self.db2._get_generation_info(),
- self.db3._get_replica_gen_and_trans_id(self.db2._replica_uid))
- self.assertEqual(
- self.db3._get_generation_info(),
- self.db2._get_replica_gen_and_trans_id(self.db3._replica_uid))
- self.assertEqual(3, len(self.db3.get_doc_conflicts('the-doc')))
- doc1.set_json('{"a": 2}')
- self.db1.put_doc(doc1)
- self.sync(self.db3, self.db1)
- # original doc1 should have been removed from conflicts
- self.assertEqual(3, len(self.db3.get_doc_conflicts('the-doc')))
-
- def test_sync_stops_after_get_sync_info(self):
- self.db1 = self.create_database('test1', 'source')
- self.db2 = self.create_database('test2', 'target')
- self.db1.create_doc_from_json(tests.simple_doc)
- self.sync(self.db1, self.db2)
-
- def put_hook(state):
- self.fail("Tracehook triggered for %s" % (state,))
-
- self.sync(self.db1, self.db2, trace_hook_shallow=put_hook)
-
- def test_sync_detects_rollback_in_source(self):
- self.db1 = self.create_database('test1', 'source')
- self.db2 = self.create_database('test2', 'target')
- self.db1.create_doc_from_json(tests.simple_doc, doc_id='doc1')
- self.sync(self.db1, self.db2)
- self.db1_copy = self.copy_database(self.db1)
- self.db1.create_doc_from_json(tests.simple_doc, doc_id='doc2')
- self.sync(self.db1, self.db2)
- self.assertRaises(
- errors.InvalidGeneration, self.sync, self.db1_copy, self.db2)
-
- def test_sync_detects_rollback_in_target(self):
- self.db1 = self.create_database('test1', 'source')
- self.db2 = self.create_database('test2', 'target')
- self.db1.create_doc_from_json(tests.simple_doc, doc_id="divergent")
- self.sync(self.db1, self.db2)
- self.db2_copy = self.copy_database(self.db2)
- self.db2.create_doc_from_json(tests.simple_doc, doc_id='doc2')
- self.sync(self.db1, self.db2)
- self.assertRaises(
- errors.InvalidGeneration, self.sync, self.db1, self.db2_copy)
-
- def test_sync_detects_diverged_source(self):
- self.db1 = self.create_database('test1', 'source')
- self.db2 = self.create_database('test2', 'target')
- self.db3 = self.copy_database(self.db1)
- self.db1.create_doc_from_json(tests.simple_doc, doc_id="divergent")
- self.db3.create_doc_from_json(tests.simple_doc, doc_id="divergent")
- self.sync(self.db1, self.db2)
- self.assertRaises(
- errors.InvalidTransactionId, self.sync, self.db3, self.db2)
-
- def test_sync_detects_diverged_target(self):
- self.db1 = self.create_database('test1', 'source')
- self.db2 = self.create_database('test2', 'target')
- self.db3 = self.copy_database(self.db2)
- self.db3.create_doc_from_json(tests.nested_doc, doc_id="divergent")
- self.db1.create_doc_from_json(tests.simple_doc, doc_id="divergent")
- self.sync(self.db1, self.db2)
- self.assertRaises(
- errors.InvalidTransactionId, self.sync, self.db1, self.db3)
-
- def test_sync_detects_rollback_and_divergence_in_source(self):
- self.db1 = self.create_database('test1', 'source')
- self.db2 = self.create_database('test2', 'target')
- self.db1.create_doc_from_json(tests.simple_doc, doc_id='doc1')
- self.sync(self.db1, self.db2)
- self.db1_copy = self.copy_database(self.db1)
- self.db1.create_doc_from_json(tests.simple_doc, doc_id='doc2')
- self.db1.create_doc_from_json(tests.simple_doc, doc_id='doc3')
- self.sync(self.db1, self.db2)
- self.db1_copy.create_doc_from_json(tests.simple_doc, doc_id='doc2')
- self.db1_copy.create_doc_from_json(tests.simple_doc, doc_id='doc3')
- self.assertRaises(
- errors.InvalidTransactionId, self.sync, self.db1_copy, self.db2)
-
- def test_sync_detects_rollback_and_divergence_in_target(self):
- self.db1 = self.create_database('test1', 'source')
- self.db2 = self.create_database('test2', 'target')
- self.db1.create_doc_from_json(tests.simple_doc, doc_id="divergent")
- self.sync(self.db1, self.db2)
- self.db2_copy = self.copy_database(self.db2)
- self.db2.create_doc_from_json(tests.simple_doc, doc_id='doc2')
- self.db2.create_doc_from_json(tests.simple_doc, doc_id='doc3')
- self.sync(self.db1, self.db2)
- self.db2_copy.create_doc_from_json(tests.simple_doc, doc_id='doc2')
- self.db2_copy.create_doc_from_json(tests.simple_doc, doc_id='doc3')
- self.assertRaises(
- errors.InvalidTransactionId, self.sync, self.db1, self.db2_copy)
-
-
-def make_local_db_and_soledad_target(
- test, path='test',
- source_replica_uid=uuid4().hex):
- test.startTwistedServer()
- replica_uid = os.path.basename(path)
- db = test.request_state._create_database(replica_uid)
- sync_db = test._soledad._sync_db
- sync_enc_pool = test._soledad._sync_enc_pool
- st = soledad_sync_target(
- test, db._dbname,
- source_replica_uid=source_replica_uid,
- sync_db=sync_db,
- sync_enc_pool=sync_enc_pool)
- return db, st
-
-target_scenarios = [
- ('leap', {
- 'create_db_and_target': make_local_db_and_soledad_target,
- 'make_app_with_state': make_soledad_app,
- 'do_sync': sync_via_synchronizer_and_soledad}),
-]