diff options
Diffstat (limited to 'common/src/leap/soledad/common')
-rw-r--r-- | common/src/leap/soledad/common/couch.py | 269 | ||||
-rw-r--r-- | common/src/leap/soledad/common/objectstore.py | 189 | ||||
-rw-r--r-- | common/src/leap/soledad/common/tests/test_couch.py | 45 |
3 files changed, 288 insertions, 215 deletions
diff --git a/common/src/leap/soledad/common/couch.py b/common/src/leap/soledad/common/couch.py index 973f8b49..187d3035 100644 --- a/common/src/leap/soledad/common/couch.py +++ b/common/src/leap/soledad/common/couch.py @@ -18,14 +18,12 @@ """A U1DB backend that uses CouchDB as its persistence layer.""" -import uuid import re import simplejson as json import socket import logging -from base64 import b64encode, b64decode from u1db import errors from u1db.sync import Synchronizer from u1db.backends.inmemory import InMemoryIndex @@ -50,22 +48,146 @@ class InvalidURLError(Exception): """ +def persistent_class(cls): + """ + Decorator that modifies a class to ensure u1db metadata persists on + underlying storage. + + @param cls: The class that will be modified. + @type cls: type + """ + + def _create_persistent_method(old_method_name, key, load_method_name, + dump_method_name, store): + """ + Create a persistent method to replace C{old_method_name}. + + The new method will load C{key} using C{load_method_name} and stores + it using C{dump_method_name} depending on the value of C{store}. + """ + # get methods + old_method = getattr(cls, old_method_name) + load_method = getattr(cls, load_method_name) \ + if load_method_name is not None \ + else lambda self, data: setattr(self, key, data) + dump_method = getattr(cls, dump_method_name) \ + if dump_method_name is not None \ + else lambda self: getattr(self, key) + + def _new_method(self, *args, **kwargs): + # get u1db data from couch db + doc = self._get_doc('%s%s' % + (self.U1DB_DATA_DOC_ID_PREFIX, key)) + load_method(self, doc.content['content']) + # run old method + retval = old_method(self, *args, **kwargs) + # store u1db data on couch + if store: + doc.content = {'content': dump_method(self)} + self._put_doc(doc) + return retval + + return _new_method + + # ensure the class has a persistency map + if not hasattr(cls, 'PERSISTENCY_MAP'): + logger.error('Class %s has no PERSISTENCY_MAP attribute, skipping ' + 'persistent methods substitution.' % cls) + return cls + # replace old methods with new persistent ones + for key, ((load_method_name, dump_method_name), + persistent_methods) in cls.PERSISTENCY_MAP.iteritems(): + for (method_name, store) in persistent_methods: + setattr(cls, method_name, + _create_persistent_method( + method_name, + key, + load_method_name, + dump_method_name, + store)) + return cls + + +@persistent_class class CouchDatabase(ObjectStoreDatabase): """ A U1DB backend that uses Couch as its persistence layer. """ - U1DB_TRANSACTION_LOG_KEY = 'transaction_log' - U1DB_CONFLICTS_KEY = 'conflicts' - U1DB_OTHER_GENERATIONS_KEY = 'other_generations' - U1DB_INDEXES_KEY = 'indexes' - U1DB_REPLICA_UID_KEY = 'replica_uid' + U1DB_TRANSACTION_LOG_KEY = '_transaction_log' + U1DB_CONFLICTS_KEY = '_conflicts' + U1DB_OTHER_GENERATIONS_KEY = '_other_generations' + U1DB_INDEXES_KEY = '_indexes' + U1DB_REPLICA_UID_KEY = '_replica_uid' + + U1DB_DATA_KEYS = [ + U1DB_TRANSACTION_LOG_KEY, + U1DB_CONFLICTS_KEY, + U1DB_OTHER_GENERATIONS_KEY, + U1DB_INDEXES_KEY, + U1DB_REPLICA_UID_KEY, + ] COUCH_ID_KEY = '_id' COUCH_REV_KEY = '_rev' COUCH_U1DB_ATTACHMENT_KEY = 'u1db_json' COUCH_U1DB_REV_KEY = 'u1db_rev' + # the following map describes information about methods usage of + # properties that have to persist on the underlying database. The format + # of the map is assumed to be: + # + # { + # 'property_name': [ + # ('property_load_method_name', 'property_dump_method_name'), + # [('method_1_name', bool), + # ... + # ('method_N_name', bool)]], + # ... + # } + # + # where the booleans indicate if the property should be stored after + # each method execution (i.e. if the method alters the property). Property + # load/dump methods will be run after/before properties are read/written + # to the underlying db. + PERSISTENCY_MAP = { + U1DB_TRANSACTION_LOG_KEY: [ + ('_load_transaction_log_from_json', None), + [('_get_transaction_log', False), + ('_get_generation', False), + ('_get_generation_info', False), + ('_get_trans_id_for_gen', False), + ('whats_changed', False), + ('_put_and_update_indexes', True)]], + U1DB_CONFLICTS_KEY: [ + (None, None), + [('_has_conflicts', False), + ('get_doc_conflicts', False), + ('_prune_conflicts', False), + ('resolve_doc', False), + ('_replace_conflicts', True), + ('_force_doc_sync_conflict', True)]], + U1DB_OTHER_GENERATIONS_KEY: [ + ('_load_other_generations_from_json', None), + [('_get_replica_gen_and_trans_id', False), + ('_do_set_replica_gen_and_trans_id', True)]], + U1DB_INDEXES_KEY: [ + ('_load_indexes_from_json', '_dump_indexes_as_json'), + [('list_indexes', False), + ('get_from_index', False), + ('get_range_from_index', False), + ('get_index_keys', False), + ('_put_and_update_indexes', True), + ('create_index', True), + ('delete_index', True)]], + U1DB_REPLICA_UID_KEY: [ + (None, None), + [('_allocate_doc_rev', False), + ('_put_doc_if_newer', False), + ('_ensure_maximal_rev', False), + ('_prune_conflicts', False), + ('_set_replica_uid', True)]]} + @classmethod def open_database(cls, url, create): """ @@ -109,9 +231,11 @@ class CouchDatabase(ObjectStoreDatabase): @param session: an http.Session instance or None for a default session @type session: http.Session """ + # save params self._url = url self._full_commit = full_commit self._session = session + # configure couch self._server = Server(url=self._url, full_commit=self._full_commit, session=self._session) @@ -179,7 +303,7 @@ class CouchDatabase(ObjectStoreDatabase): generation = self._get_generation() results = [] for doc_id in self._database: - if doc_id == self.U1DB_DATA_DOC_ID: + if doc_id.startswith(self.U1DB_DATA_DOC_ID_PREFIX): continue doc = self._get_doc(doc_id, check_for_conflicts=True) if doc.content is None and not include_deleted: @@ -246,14 +370,12 @@ class CouchDatabase(ObjectStoreDatabase): raise errors.IndexNameTakenError index = InMemoryIndex(index_name, list(index_expressions)) for doc_id in self._database: - if doc_id == self.U1DB_DATA_DOC_ID: # skip special file - continue + if doc_id.startswith(self.U1DB_DATA_DOC_ID_PREFIX): + continue # skip special files doc = self._get_doc(doc_id) if doc.content is not None: index.add_json(doc_id, doc.get_json()) self._indexes[index_name] = index - # save data in object store - self._store_u1db_data() def close(self): """ @@ -295,76 +417,25 @@ class CouchDatabase(ObjectStoreDatabase): def _init_u1db_data(self): """ - Initialize U1DB info data structure in the couch db. + Initialize u1db configuration data on backend storage. A U1DB database needs to keep track of all database transactions, document conflicts, the generation of other replicas it has seen, indexes created by users and so on. - In this implementation, all this information is stored in a special - document stored in the couch db with id equals to - CouchDatabse.U1DB_DATA_DOC_ID. - - This method initializes the document that will hold such information. - """ - if self._replica_uid is None: - self._replica_uid = uuid.uuid4().hex - # TODO: prevent user from overwriting a document with the same doc_id - # as this one. - doc = self._factory(doc_id=self.U1DB_DATA_DOC_ID) - doc.content = { - self.U1DB_TRANSACTION_LOG_KEY: b64encode(json.dumps([])), - self.U1DB_CONFLICTS_KEY: b64encode(json.dumps({})), - self.U1DB_OTHER_GENERATIONS_KEY: b64encode(json.dumps({})), - self.U1DB_INDEXES_KEY: b64encode(json.dumps({})), - self.U1DB_REPLICA_UID_KEY: b64encode(self._replica_uid), - } - self._put_doc(doc) - - def _fetch_u1db_data(self): - """ - Fetch U1DB info from the couch db. - - See C{_init_u1db_data} documentation. - """ - # retrieve u1db data from couch db - cdoc = self._database.get(self.U1DB_DATA_DOC_ID) - jsonstr = self._database.get_attachment( - cdoc, self.COUCH_U1DB_ATTACHMENT_KEY).read() - content = json.loads(jsonstr) - # set u1db database info - self._transaction_log = json.loads( - b64decode(content[self.U1DB_TRANSACTION_LOG_KEY])) - self._conflicts = json.loads( - b64decode(content[self.U1DB_CONFLICTS_KEY])) - self._other_generations = json.loads( - b64decode(content[self.U1DB_OTHER_GENERATIONS_KEY])) - self._indexes = self._load_indexes_from_json( - b64decode(content[self.U1DB_INDEXES_KEY])) - self._replica_uid = b64decode(content[self.U1DB_REPLICA_UID_KEY]) - # save couch _rev - self._couch_rev = cdoc[self.COUCH_REV_KEY] - - def _store_u1db_data(self): - """ - Store U1DB info in the couch db. - - See C{_init_u1db_data} documentation. - """ - doc = self._factory(doc_id=self.U1DB_DATA_DOC_ID) - doc.content = { - # Here, the b64 encode ensures that document content - # does not cause strange behaviour in couchdb because - # of encoding. - self.U1DB_TRANSACTION_LOG_KEY: - b64encode(json.dumps(self._transaction_log)), - self.U1DB_CONFLICTS_KEY: b64encode(json.dumps(self._conflicts)), - self.U1DB_OTHER_GENERATIONS_KEY: - b64encode(json.dumps(self._other_generations)), - self.U1DB_INDEXES_KEY: b64encode(self._dump_indexes_as_json()), - self.U1DB_REPLICA_UID_KEY: b64encode(self._replica_uid), - self.COUCH_REV_KEY: self._couch_rev} - self._put_doc(doc) + In this implementation, all this information is stored in special + documents stored in the underlying with doc_id prefix equal to + U1DB_DATA_DOC_ID_PREFIX. Those documents ids are reserved: put_doc(), + get_doc() and delete_doc() will not allow documents with a doc_id with + that prefix to be accessed or modified. + """ + for key in self.U1DB_DATA_KEYS: + doc_id = '%s%s' % (self.U1DB_DATA_DOC_ID_PREFIX, key) + doc = self._get_doc(doc_id) + if doc is None: + doc = self._factory(doc_id) + doc.content = {'content': getattr(self, key)} + self._put_doc(doc) #------------------------------------------------------------------------- # Couch specific methods @@ -382,7 +453,7 @@ class CouchDatabase(ObjectStoreDatabase): def _dump_indexes_as_json(self): """ - Dump index definitions as JSON string. + Dump index definitions as JSON. """ indexes = {} for name, idx in self._indexes.iteritems(): @@ -390,25 +461,45 @@ class CouchDatabase(ObjectStoreDatabase): for attr in [self.INDEX_NAME_KEY, self.INDEX_DEFINITION_KEY, self.INDEX_VALUES_KEY]: indexes[name][attr] = getattr(idx, '_' + attr) - return json.dumps(indexes) + return indexes def _load_indexes_from_json(self, indexes): """ - Load index definitions from JSON string. + Load index definitions from stored JSON. - @param indexes: A JSON serialization of a list of [('index-name', - ['field', 'field2'])]. + @param indexes: A JSON representation of indexes as + [('index-name', ['field', 'field2', ...]), ...]. @type indexes: str - - @return: A dictionary with the index definitions. - @rtype: dict """ - dict = {} - for name, idx_dict in json.loads(indexes).iteritems(): + self._indexes = {} + for name, idx_dict in indexes.iteritems(): idx = InMemoryIndex(name, idx_dict[self.INDEX_DEFINITION_KEY]) idx._values = idx_dict[self.INDEX_VALUES_KEY] - dict[name] = idx - return dict + self._indexes[name] = idx + + def _load_transaction_log_from_json(self, transaction_log): + """ + Load transaction log from stored JSON. + + @param transaction_log: A JSON representation of transaction_log as + [('generation', 'transaction_id'), ...]. + @type transaction_log: list + """ + self._transaction_log = [] + for gen, trans_id in transaction_log: + self._transaction_log.append((gen, trans_id)) + + def _load_other_generations_from_json(self, other_generations): + """ + Load other generations from stored JSON. + + @param other_generations: A JSON representation of other_generations + as {'replica_uid': ('generation', 'transaction_id'), ...}. + @type other_generations: dict + """ + self._other_generations = {} + for replica_uid, [gen, trans_id] in other_generations.iteritems(): + self._other_generations[replica_uid] = (gen, trans_id) class CouchSyncTarget(ObjectStoreSyncTarget): diff --git a/common/src/leap/soledad/common/objectstore.py b/common/src/leap/soledad/common/objectstore.py index 921cf075..7aff3e32 100644 --- a/common/src/leap/soledad/common/objectstore.py +++ b/common/src/leap/soledad/common/objectstore.py @@ -20,6 +20,10 @@ Abstract U1DB backend to handle storage using object stores (like CouchDB, for example). +This backend uses special documents to store all U1DB data (replica uid, +indexes, transaction logs and info about other dbs). The id of these documents +are reserved and have prefix equal to ObjectStore.U1DB_DATA_DOC_ID_PREFIX. + Right now, this is only used by CouchDatabase backend, but can also be extended to implement OpenStack or Amazon S3 storage, for example. @@ -27,6 +31,13 @@ See U1DB documentation for more information on how to use databases. """ +from base64 import b64encode, b64decode + + +import uuid +import simplejson as json + + from u1db import errors from u1db.backends.inmemory import ( InMemoryDatabase, @@ -39,6 +50,8 @@ class ObjectStoreDatabase(InMemoryDatabase): A backend for storing u1db data in an object store. """ + U1DB_DATA_DOC_ID_PREFIX = 'u1db/' + @classmethod def open_database(cls, url, create, document_factory=None): """ @@ -71,24 +84,52 @@ class ObjectStoreDatabase(InMemoryDatabase): self, replica_uid, document_factory=document_factory) - # sync data in memory with data in object store - if not self._get_doc(self.U1DB_DATA_DOC_ID): - self._init_u1db_data() - self._fetch_u1db_data() + if self._replica_uid is None: + self._replica_uid = uuid.uuid4().hex + self._init_u1db_data() + + def _init_u1db_data(self): + """ + Initialize u1db configuration data on backend storage. + + A U1DB database needs to keep track of all database transactions, + document conflicts, the generation of other replicas it has seen, + indexes created by users and so on. + + In this implementation, all this information is stored in special + documents stored in the couch db with id prefix equal to + U1DB_DATA_DOC_ID_PREFIX. Those documents ids are reserved: + put_doc(), get_doc() and delete_doc() will not allow documents with + a doc_id with that prefix to be accessed or modified. + """ + raise NotImplementedError(self._init_u1db_data) #------------------------------------------------------------------------- # methods from Database #------------------------------------------------------------------------- - def _set_replica_uid(self, replica_uid): + def put_doc(self, doc): """ - Force the replica_uid to be set. + Update a document. - @param replica_uid: The uid of the replica. - @type replica_uid: str + If the document currently has conflicts, put will fail. + If the database specifies a maximum document size and the document + exceeds it, put will fail and raise a DocumentTooBig exception. + + This method prevents from updating the document with doc_id equals to + self.U1DB_DATA_DOC_ID, which contains U1DB data. + + @param doc: A Document with new content. + @type doc: Document + + @return: new_doc_rev - The new revision identifier for the document. + The Document object will also be updated. + @rtype: str """ - InMemoryDatabase._set_replica_uid(self, replica_uid) - self._store_u1db_data() + if doc.doc_id is not None and \ + doc.doc_id.startswith(self.U1DB_DATA_DOC_ID_PREFIX): + raise errors.InvalidDocId() + return InMemoryDatabase.put_doc(self, doc) def _put_doc(self, doc): """ @@ -106,6 +147,27 @@ class ObjectStoreDatabase(InMemoryDatabase): """ raise NotImplementedError(self._put_doc) + def get_doc(self, doc_id, include_deleted=False): + """ + Get the JSON string for the given document. + + This method prevents from getting the document with doc_id equals to + self.U1DB_DATA_DOC_ID, which contains U1DB data. + + @param doc_id: The unique document identifier + @type doc_id: str + @param include_deleted: If set to True, deleted documents will be + returned with empty content. Otherwise asking for a deleted + document will return None. + @type include_deleted: bool + + @return: a Document object. + @rtype: Document + """ + if doc_id.startswith(self.U1DB_DATA_DOC_ID_PREFIX): + raise errors.InvalidDocId() + return InMemoryDatabase.get_doc(self, doc_id, include_deleted) + def _get_doc(self, doc_id): """ Get just the document content, without fancy handling. @@ -136,18 +198,32 @@ class ObjectStoreDatabase(InMemoryDatabase): the documents in the database. @rtype: tuple """ - raise NotImplementedError(self.get_all_docs) + generation = self._get_generation() + results = [] + for doc_id in self._database: + if doc_id.startswith(self.U1DB_DATA_DOC_ID_PREFIX): + continue + doc = self._get_doc(doc_id, check_for_conflicts=True) + if doc.content is None and not include_deleted: + continue + results.append(doc) + return (generation, results) def delete_doc(self, doc): """ Mark a document as deleted. + This method prevents from deleting the document with doc_id equals to + self.U1DB_DATA_DOC_ID, which contains U1DB data. + @param doc: The document to mark as deleted. @type doc: u1db.Document @return: The new revision id of the document. @type: str """ + if doc.doc_id.startswith(self.U1DB_DATA_DOC_ID_PREFIX): + raise errors.InvalidDocId() old_doc = self._get_doc(doc.doc_id, check_for_conflicts=True) if old_doc is None: raise errors.DocumentDoesNotExist @@ -177,58 +253,6 @@ class ObjectStoreDatabase(InMemoryDatabase): """ raise NotImplementedError(self.create_index) - def delete_index(self, index_name): - """ - Remove a named index. - - Here we just guarantee that the new info will be stored in the backend - db after update. - - @param index_name: The name of the index we are removing. - @type index_name: str - """ - InMemoryDatabase.delete_index(self, index_name) - self._store_u1db_data() - - def _replace_conflicts(self, doc, conflicts): - """ - Set new conflicts for a document. - - Here we just guarantee that the new info will be stored in the backend - db after update. - - @param doc: The document with a new set of conflicts. - @param conflicts: The new set of conflicts. - @type conflicts: list - """ - InMemoryDatabase._replace_conflicts(self, doc, conflicts) - self._store_u1db_data() - - def _do_set_replica_gen_and_trans_id(self, other_replica_uid, - other_generation, - other_transaction_id): - """ - Set the last-known generation and transaction id for the other - database replica. - - Here we just guarantee that the new info will be stored in the backend - db after update. - - @param other_replica_uid: The U1DB identifier for the other replica. - @type other_replica_uid: str - @param other_generation: The generation number for the other replica. - @type other_generation: int - @param other_transaction_id: The transaction id associated with the - generation. - @type other_transaction_id: str - """ - InMemoryDatabase._do_set_replica_gen_and_trans_id( - self, - other_replica_uid, - other_generation, - other_transaction_id) - self._store_u1db_data() - #------------------------------------------------------------------------- # implemented methods from CommonBackend #------------------------------------------------------------------------- @@ -250,45 +274,6 @@ class ObjectStoreDatabase(InMemoryDatabase): trans_id = self._allocate_transaction_id() self._put_doc(doc) self._transaction_log.append((doc.doc_id, trans_id)) - self._store_u1db_data() - - #------------------------------------------------------------------------- - # methods specific for object stores - #------------------------------------------------------------------------- - - U1DB_DATA_DOC_ID = 'u1db_data' - - def _fetch_u1db_data(self): - """ - Fetch u1db configuration data from backend storage. - - See C{_init_u1db_data} documentation. - """ - NotImplementedError(self._fetch_u1db_data) - - def _store_u1db_data(self): - """ - Store u1db configuration data on backend storage. - - See C{_init_u1db_data} documentation. - """ - NotImplementedError(self._store_u1db_data) - - def _init_u1db_data(self): - """ - Initialize u1db configuration data on backend storage. - - A U1DB database needs to keep track of all database transactions, - document conflicts, the generation of other replicas it has seen, - indexes created by users and so on. - - In this implementation, all this information is stored in a special - document stored in the couch db with id equals to - CouchDatabse.U1DB_DATA_DOC_ID. - - This method initializes the document that will hold such information. - """ - NotImplementedError(self._init_u1db_data) class ObjectStoreSyncTarget(InMemorySyncTarget): diff --git a/common/src/leap/soledad/common/tests/test_couch.py b/common/src/leap/soledad/common/tests/test_couch.py index 3b49f39e..42edf9fe 100644 --- a/common/src/leap/soledad/common/tests/test_couch.py +++ b/common/src/leap/soledad/common/tests/test_couch.py @@ -178,7 +178,12 @@ def copy_couch_database_for_test(test, db): new_db._conflicts = copy.deepcopy(db._conflicts) new_db._other_generations = copy.deepcopy(db._other_generations) new_db._indexes = copy.deepcopy(db._indexes) - new_db._store_u1db_data() + # save u1db data on couch + for key in new_db.U1DB_DATA_KEYS: + doc_id = '%s%s' % (new_db.U1DB_DATA_DOC_ID_PREFIX, key) + doc = new_db._get_doc(doc_id) + doc.content = {'content': getattr(new_db, key)} + new_db._put_doc(doc) return new_db @@ -353,20 +358,18 @@ class CouchDatabaseStorageTests(CouchDBTestCase): return [self._listify(i) for i in l] return l - def _fetch_u1db_data(self, db): - cdoc = db._database.get(db.U1DB_DATA_DOC_ID) - jsonstr = db._database.get_attachment(cdoc, 'u1db_json').getvalue() - return json.loads(jsonstr) + def _fetch_u1db_data(self, db, key): + doc = db._get_doc("%s%s" % (db.U1DB_DATA_DOC_ID_PREFIX, key)) + return doc.content['content'] def test_transaction_log_storage_after_put(self): db = couch.CouchDatabase('http://localhost:' + str(self.wrapper.port), 'u1db_tests') db.create_doc({'simple': 'doc'}) - content = self._fetch_u1db_data(db) + content = self._fetch_u1db_data(db, db.U1DB_TRANSACTION_LOG_KEY) self.assertEqual( self._listify(db._transaction_log), - self._listify( - json.loads(b64decode(content[db.U1DB_TRANSACTION_LOG_KEY])))) + self._listify(content)) def test_conflict_log_storage_after_put_if_newer(self): db = couch.CouchDatabase('http://localhost:' + str(self.wrapper.port), @@ -375,29 +378,27 @@ class CouchDatabaseStorageTests(CouchDBTestCase): doc.set_json(nested_doc) doc.rev = db._replica_uid + ':2' db._force_doc_sync_conflict(doc) - content = self._fetch_u1db_data(db) + content = self._fetch_u1db_data(db, db.U1DB_CONFLICTS_KEY) self.assertEqual( self._listify(db._conflicts), - self._listify( - json.loads(b64decode(content[db.U1DB_CONFLICTS_KEY])))) + self._listify(content)) def test_other_gens_storage_after_set(self): db = couch.CouchDatabase('http://localhost:' + str(self.wrapper.port), 'u1db_tests') doc = db.create_doc({'simple': 'doc'}) db._set_replica_gen_and_trans_id('a', 'b', 'c') - content = self._fetch_u1db_data(db) + content = self._fetch_u1db_data(db, db.U1DB_OTHER_GENERATIONS_KEY) self.assertEqual( self._listify(db._other_generations), - self._listify( - json.loads(b64decode(content[db.U1DB_OTHER_GENERATIONS_KEY])))) + self._listify(content)) def test_index_storage_after_create(self): db = couch.CouchDatabase('http://localhost:' + str(self.wrapper.port), 'u1db_tests') doc = db.create_doc({'name': 'john'}) db.create_index('myindex', 'name') - content = self._fetch_u1db_data(db) + content = self._fetch_u1db_data(db, db.U1DB_INDEXES_KEY) myind = db._indexes['myindex'] index = { 'myindex': { @@ -408,8 +409,7 @@ class CouchDatabaseStorageTests(CouchDBTestCase): } self.assertEqual( self._listify(index), - self._listify( - json.loads(b64decode(content[db.U1DB_INDEXES_KEY])))) + self._listify(content)) def test_index_storage_after_delete(self): db = couch.CouchDatabase('http://localhost:' + str(self.wrapper.port), @@ -418,7 +418,7 @@ class CouchDatabaseStorageTests(CouchDBTestCase): db.create_index('myindex', 'name') db.create_index('myindex2', 'name') db.delete_index('myindex') - content = self._fetch_u1db_data(db) + content = self._fetch_u1db_data(db, db.U1DB_INDEXES_KEY) myind = db._indexes['myindex2'] index = { 'myindex2': { @@ -429,16 +429,13 @@ class CouchDatabaseStorageTests(CouchDBTestCase): } self.assertEqual( self._listify(index), - self._listify( - json.loads(b64decode(content[db.U1DB_INDEXES_KEY])))) + self._listify(content)) def test_replica_uid_storage_after_db_creation(self): db = couch.CouchDatabase('http://localhost:' + str(self.wrapper.port), 'u1db_tests') - content = self._fetch_u1db_data(db) - self.assertEqual( - db._replica_uid, - b64decode(content[db.U1DB_REPLICA_UID_KEY])) + content = self._fetch_u1db_data(db, db.U1DB_REPLICA_UID_KEY) + self.assertEqual(db._replica_uid, content) load_tests = tests.load_with_scenarios |