diff options
Diffstat (limited to 'scripts')
24 files changed, 662 insertions, 336 deletions
diff --git a/scripts/ddocs/update_design_docs.py b/scripts/ddocs/update_design_docs.py deleted file mode 100644 index 281482b8..00000000 --- a/scripts/ddocs/update_design_docs.py +++ /dev/null @@ -1,170 +0,0 @@ -#!/usr/bin/python - -# This script updates Soledad's design documents in the session database and -# all user databases with contents from the installed leap.soledad.common -# package. - -import json -import logging -import argparse -import re -import threading -import binascii - -from urlparse import urlparse -from getpass import getpass -from ConfigParser import ConfigParser - -from couchdb.client import Server -from couchdb.http import Resource -from couchdb.http import Session -from couchdb.http import ResourceNotFound - -from leap.soledad.common import ddocs - - -MAX_THREADS = 20 -DESIGN_DOCS = { - '_design/docs': json.loads(binascii.a2b_base64(ddocs.docs)), - '_design/syncs': json.loads(binascii.a2b_base64(ddocs.syncs)), - '_design/transactions': json.loads( - binascii.a2b_base64(ddocs.transactions)), -} - - -# create a logger -logger = logging.getLogger(__name__) -LOG_FORMAT = '%(asctime)s %(message)s' -logging.basicConfig(format=LOG_FORMAT, level=logging.INFO) - - -def _parse_args(): - parser = argparse.ArgumentParser() - parser.add_argument('-u', dest='uuid', default=None, type=str, - help='the UUID of the user') - parser.add_argument('-t', dest='threads', default=MAX_THREADS, type=int, - help='the number of parallel threads') - return parser.parse_args() - - -def _get_url(): - # get couch url - cp = ConfigParser() - cp.read('/etc/soledad/soledad-server.conf') - url = urlparse(cp.get('soledad-server', 'couch_url')) - # get admin password - netloc = re.sub('^.*@', '', url.netloc) - url = url._replace(netloc=netloc) - password = getpass("Admin password for %s: " % url.geturl()) - return url._replace(netloc='admin:%s@%s' % (password, netloc)) - - -def _get_server(url): - resource = Resource( - url.geturl(), Session(retry_delays=[1, 2, 4, 8], timeout=10)) - return Server(url=resource) - - -def _confirm(url): - hidden_url = re.sub( - 'http://(.*):.*@', - 'http://\\1:xxxxx@', - url.geturl()) - - print """ - ========== - ATTENTION! - ========== - - This script will modify Soledad's shared and user databases in: - - %s - - This script does not make a backup of the couch db data, so make sure you - have a copy or you may loose data. - """ % hidden_url - confirm = raw_input("Proceed (type uppercase YES)? ") - - if confirm != "YES": - exit(1) - - -# -# Thread -# - -class DBWorkerThread(threading.Thread): - - def __init__(self, server, dbname, db_idx, db_len, release_fun): - threading.Thread.__init__(self) - self._dbname = dbname - self._cdb = server[self._dbname] - self._db_idx = db_idx - self._db_len = db_len - self._release_fun = release_fun - - def run(self): - - logger.info( - "(%d/%d) Updating db %s." - % (self._db_idx, self._db_len, self._dbname)) - - for doc_id in DESIGN_DOCS: - try: - doc = self._cdb[doc_id] - except ResourceNotFound: - doc = {'_id': doc_id} - for key in ['lists', 'views', 'updates']: - if key in DESIGN_DOCS[doc_id]: - doc[key] = DESIGN_DOCS[doc_id][key] - self._cdb.save(doc) - - # release the semaphore - self._release_fun() - - -def _launch_update_design_docs_thread( - server, dbname, db_idx, db_len, semaphore_pool): - semaphore_pool.acquire() # wait for an available working slot - thread = DBWorkerThread( - server, dbname, db_idx, db_len, semaphore_pool.release) - thread.daemon = True - thread.start() - return thread - - -def _update_design_docs(args, server): - - # find the actual databases to be updated - dbs = [] - if args.uuid: - dbs.append('user-%s' % args.uuid) - else: - for dbname in server: - if dbname.startswith('user-') or dbname == 'shared': - dbs.append(dbname) - else: - logger.info("Skipping db %s." % dbname) - - db_idx = 0 - db_len = len(dbs) - semaphore_pool = threading.BoundedSemaphore(value=args.threads) - threads = [] - - # launch the update - for db in dbs: - db_idx += 1 - threads.append( - _launch_update_design_docs_thread( - server, db, db_idx, db_len, semaphore_pool)) - - # wait for all threads to finish - map(lambda thread: thread.join(), threads) - - -if __name__ == "__main__": - args = _parse_args() - url = _get_url() - _confirm(url) - server = _get_server(url) - _update_design_docs(args, server) diff --git a/scripts/docker/Dockerfile b/scripts/docker/Dockerfile index 915508ea..21764d84 100644 --- a/scripts/docker/Dockerfile +++ b/scripts/docker/Dockerfile @@ -1,51 +1,32 @@ # start with a fresh debian image -FROM debian - -# expose soledad server port in case we want to run a server container -EXPOSE 2424 - -# install dependencies from debian repos -COPY files/apt/leap.list /etc/apt/sources.list.d/ - -RUN apt-get update -RUN apt-get -y --force-yes install leap-archive-keyring +# we use backports because of libsqlcipher-dev +FROM debian:jessie-backports RUN apt-get update RUN apt-get -y install git -RUN apt-get -y install vim -RUN apt-get -y install python-ipdb -# install python deps +# needed to build python twisted module RUN apt-get -y install libpython2.7-dev -RUN apt-get -y install libffi-dev +# needed to build python cryptography module RUN apt-get -y install libssl-dev -RUN apt-get -y install libzmq3-dev -RUN apt-get -y install python-pip -RUN apt-get -y install couchdb -RUN apt-get -y install python-srp -RUN apt-get -y install python-scrypt -RUN apt-get -y install leap-keymanager -RUN apt-get -y install python-tz +RUN apt-get -y install libffi-dev +# needed to build pysqlcipher +RUN apt-get -y install libsqlcipher-dev +# needed to support keymanager +RUN apt-get -y install libsqlite3-dev +# install pip and tox +RUN apt-get -y install python-pip RUN pip install -U pip -RUN pip install psutil - -# install soledad-perf deps -RUN pip install klein -RUN apt-get -y install curl -RUN apt-get -y install httperf +RUN pip install tox # clone repositories -ENV BASEURL "https://github.com/leapcode" -ENV VARDIR "/var/local" -ENV REPOS "soledad leap_pycommon soledad-perf" -RUN for repo in ${REPOS}; do git clone ${BASEURL}/${repo}.git /var/local/${repo}; done +RUN mkdir -p /builds/leap +RUN git clone -b develop https://0xacab.org/leap/soledad.git /builds/leap/soledad -# copy over files to help setup the environment and run soledad -RUN mkdir -p /usr/local/soledad - -COPY files/build/install-deps-from-repos.sh /usr/local/soledad/ -RUN /usr/local/soledad/install-deps-from-repos.sh +# use tox to install everything needed to run tests +RUN cd /builds/leap/soledad/testing && tox -v -r --notest +RUN mkdir -p /usr/local/soledad COPY files/bin/ /usr/local/soledad/ diff --git a/scripts/docker/Makefile b/scripts/docker/Makefile index 4fa2e264..7050526a 100644 --- a/scripts/docker/Makefile +++ b/scripts/docker/Makefile @@ -16,7 +16,7 @@ # Some configurations you might override when calling this makefile # ##################################################################### -IMAGE_NAME ?= leap/soledad:1.0 +IMAGE_NAME ?= leapcode/soledad:latest SOLEDAD_REMOTE ?= https://0xacab.org/leap/soledad.git SOLEDAD_BRANCH ?= develop SOLEDAD_PRELOAD_NUM ?= 100 @@ -27,11 +27,14 @@ MEMORY ?= 512m # Docker image generation (main make target) # ############################################## -all: image +all: soledad-image couchdb-image -image: +soledad-image: docker build -t $(IMAGE_NAME) . +couchdb-image: + (cd couchdb/ && make) + ################################################## # Run a Soledad Server inside a docker container # ################################################## @@ -69,23 +72,37 @@ run-client-bootstrap: /usr/local/soledad/run-client-bootstrap.sh ################################################# -# Run all trial tests inside a docker container # +# Run all tests inside a docker container # ################################################# -run-trial: +run-tox: + name=$$(cat /dev/urandom | tr -dc 'a-zA-Z0-9' | fold -w 5 | head -n 1); \ + docker run -d --name $${name} leap/couchdb; \ docker run -t -i \ --memory="$(MEMORY)" \ --env="SOLEDAD_REMOTE=$(SOLEDAD_REMOTE)" \ --env="SOLEDAD_BRANCH=$(SOLEDAD_BRANCH)" \ + --env="COUCH_URL=http://$${name}:5984" \ + --link $${name} \ $(IMAGE_NAME) \ - /usr/local/soledad/run-trial.sh + /usr/local/soledad/run-tox.sh ############################################ # Performance tests and graphic generation # ############################################ -run-perf-test: - helper/run-test.sh perf +run-perf: + name=$$(cat /dev/urandom | tr -dc 'a-zA-Z0-9' | fold -w 5 | head -n 1); \ + docker run -d --name $${name} leap/couchdb; \ + docker run -t -i \ + --memory="$(MEMORY)" \ + --env="SOLEDAD_REMOTE=$(SOLEDAD_REMOTE)" \ + --env="SOLEDAD_BRANCH=$(SOLEDAD_BRANCH)" \ + --env="SOLEDAD_PRELOAD_NUM=$(SOLEDAD_PRELOAD_NUM)" \ + --env="COUCH_URL=http://$${name}:5984" \ + --link $${name} \ + $(IMAGE_NAME) \ + /usr/local/soledad/run-perf.sh run-client-perf: @if [ -z "$(CONTAINER_ID_FILE)" ]; then \ @@ -123,7 +140,7 @@ cp-perf-result: # Other helper targets # ######################## -run-shell: image +run-shell: soledad-image docker run -t -i \ --memory="$(MEMORY)" \ $(IMAGE_NAME) \ diff --git a/scripts/docker/README.md b/scripts/docker/README.md index c4d7ac94..97b39f87 100644 --- a/scripts/docker/README.md +++ b/scripts/docker/README.md @@ -11,7 +11,20 @@ Check the `Dockerfile` for the steps for creating the docker image. Check the `Makefile` for the rules for running containers. -Check the `helper/` directory for scripts that help running tests. + +Installation +------------ + +1. Install docker for your system: https://docs.docker.com/ +2. Build images by running `make` +3. Execute `make run-tox` and `make run-perf` to run tox tests and perf tests, + respectivelly. +4. You may want to pass some variables to the `make` command to control + parameters of execution, for example: + + make run-perf SOLEDAD_PRELOAD_NUM=500 + + See more variables below. Environment variables for docker containers diff --git a/scripts/docker/TODO b/scripts/docker/TODO index 5185d754..90597637 100644 --- a/scripts/docker/TODO +++ b/scripts/docker/TODO @@ -1 +1,5 @@ - limit resources of containers (mem and cpu) +- allow running couchdb on another container +- use a config file to get defaults for running tests +- use the /builds directory as base of git repo +- save the test state to a directory to make it reproducible diff --git a/scripts/docker/couchdb/Dockerfile b/scripts/docker/couchdb/Dockerfile new file mode 100644 index 00000000..03448da5 --- /dev/null +++ b/scripts/docker/couchdb/Dockerfile @@ -0,0 +1,3 @@ +FROM couchdb:latest + +COPY local.ini /usr/local/etc/couchdb/ diff --git a/scripts/docker/couchdb/Makefile b/scripts/docker/couchdb/Makefile new file mode 100644 index 00000000..cf3ac966 --- /dev/null +++ b/scripts/docker/couchdb/Makefile @@ -0,0 +1,4 @@ +IMAGE_NAME ?= leap/couchdb + +image: + docker build -t $(IMAGE_NAME) . diff --git a/scripts/docker/couchdb/README.rst b/scripts/docker/couchdb/README.rst new file mode 100644 index 00000000..31a791a8 --- /dev/null +++ b/scripts/docker/couchdb/README.rst @@ -0,0 +1,12 @@ +Couchdb Docker image +==================== + +This directory contains rules to build a custom couchdb docker image to be +provided as backend to soledad server. + +Type `make` to build the image. + +Differences between this image and the official one: + + - add the "nodelay" socket option on the httpd section of the config file + (see: https://leap.se/code/issues/8264). diff --git a/scripts/docker/couchdb/local.ini b/scripts/docker/couchdb/local.ini new file mode 100644 index 00000000..3650e0ed --- /dev/null +++ b/scripts/docker/couchdb/local.ini @@ -0,0 +1,2 @@ +[httpd] +socket_options = [{recbuf, 262144}, {sndbuf, 262144}, {nodelay, true}] diff --git a/scripts/docker/files/bin/run-perf.sh b/scripts/docker/files/bin/run-perf.sh new file mode 100755 index 00000000..72060230 --- /dev/null +++ b/scripts/docker/files/bin/run-perf.sh @@ -0,0 +1,22 @@ +#!/bin/bash + +REPO=/builds/leap/soledad/testing +COUCH_URL="${COUCH_URL:-http://127.0.0.1:5984}" +SOLEDAD_PRELOAD_NUM="${SOLEDAD_PRELOAD_NUM:-100}" + +if [ ! -z "${SOLEDAD_REMOTE}" ]; then + git -C ${REPO} remote set-url origin ${SOLEDAD_REMOTE} + git -C ${REPO} fetch origin +fi + +if [ ! -z "${SOLEDAD_BRANCH}" ]; then + git -C ${REPO} checkout ${SOLEDAD_BRANCH} +fi + +cd ${REPO} + +tox perf -- \ + --durations 0 \ + --couch-url ${COUCH_URL} \ + --twisted \ + --num-docs ${SOLEDAD_PRELOAD_NUM} diff --git a/scripts/docker/files/bin/run-tox.sh b/scripts/docker/files/bin/run-tox.sh new file mode 100755 index 00000000..74fde182 --- /dev/null +++ b/scripts/docker/files/bin/run-tox.sh @@ -0,0 +1,17 @@ +#!/bin/bash + +REPO=/builds/leap/soledad/testing +COUCH_URL="${COUCH_URL:-http://127.0.0.1:5984}" + +if [ ! -z "${SOLEDAD_REMOTE}" ]; then + git -C ${REPO} remote set-url origin ${SOLEDAD_REMOTE} + git -C ${REPO} fetch origin +fi + +if [ ! -z "${SOLEDAD_BRANCH}" ]; then + git -C ${REPO} checkout ${SOLEDAD_BRANCH} +fi + +cd ${REPO} + +tox -- --couch-url ${COUCH_URL} diff --git a/scripts/docker/files/bin/setup-test-env.py b/scripts/docker/files/bin/setup-test-env.py index 0f3ea6f4..4868fd56 100755 --- a/scripts/docker/files/bin/setup-test-env.py +++ b/scripts/docker/files/bin/setup-test-env.py @@ -194,12 +194,12 @@ def user_db_create(args): url = 'http://localhost:%d/user-%s' % (args.port, args.uuid) try: CouchDatabase.open_database( - url=url, create=False, replica_uid=None, ensure_ddocs=True) + url=url, create=False, replica_uid=None) print '[*] error: database "user-%s" already exists' % args.uuid exit(1) except DatabaseDoesNotExist: CouchDatabase.open_database( - url=url, create=True, replica_uid=None, ensure_ddocs=True) + url=url, create=True, replica_uid=None) print '[+] database created: user-%s' % args.uuid @@ -372,7 +372,10 @@ CERT_CONFIG_FILE = os.path.join( def cert_create(args): private_key = os.path.join(args.basedir, args.private_key) cert_key = os.path.join(args.basedir, args.cert_key) - os.mkdir(args.basedir) + try: + os.mkdir(args.basedir) + except OSError: + pass call([ 'openssl', 'req', @@ -389,8 +392,11 @@ def cert_create(args): def cert_delete(args): private_key = os.path.join(args.basedir, args.private_key) cert_key = os.path.join(args.basedir, args.cert_key) - os.unlink(private_key) - os.unlink(cert_key) + try: + os.unlink(private_key) + os.unlink(cert_key) + except OSError: + pass # diff --git a/scripts/migration/0.9.0/.gitignore b/scripts/migration/0.9.0/.gitignore new file mode 100644 index 00000000..6115c109 --- /dev/null +++ b/scripts/migration/0.9.0/.gitignore @@ -0,0 +1 @@ +log/* diff --git a/scripts/migration/0.9.0/README.md b/scripts/migration/0.9.0/README.md new file mode 100644 index 00000000..919a5235 --- /dev/null +++ b/scripts/migration/0.9.0/README.md @@ -0,0 +1,73 @@ +CouchDB schema migration to Soledad 0.8.2 +========================================= + +Migrate couch database schema from <= 0.8.1 version to 0.8.2 version. + + +ATTENTION! +---------- + + - This script does not backup your data for you. Make sure you have a backup + copy of your databases before running this script! + + - Make sure you turn off any service that might be writing to the couch + database before running this script. + + +Usage +----- + +To see what the script would do, run: + + ./migrate.py + +To actually run the migration, add the --do-migrate command line option: + + ./migrate.py --do-migrate + +See command line options: + + ./migrate.py --help + + +Log +--- + +If you don't pass a --log-file command line option, a log will be written to +the `log/` folder. + + +Differences between old and new couch schema +-------------------------------------------- + +The differences between old and new schemas are: + + - Transaction metadata was previously stored inside each document, and we + used design doc view/list functions to retrieve that information. Now, + transaction metadata is stored in documents with special ids + (gen-0000000001 to gen-9999999999). + + - Database replica config metadata was stored in a document called + "u1db_config", and now we store it in the "_local/config" document. + + - Sync metadata was previously stored in documents with id + "u1db_sync_<source-replica-id>", and now are stored in + "_local/sync_<source-replica-id>". + + - The new schema doesn't make use of any design documents. + + +What does this script do +------------------------ + +- List all databases starting with "user-". +- For each one, do: + - Check if it contains the old "u1db_config" document. + - If it doesn't, skip this db. + - Get the transaction log using the usual design doc view/list functions. + - Write a new "gen-X" document for each line on the transaction log. + - Get the "u1db_config" document, create a new one in "_local/config", + Delete the old one. + - List all "u1db_sync_X" documents, create new ones in "_local/sync_X", + delete the old ones. + - Delete unused design documents. diff --git a/scripts/migration/0.9.0/log/.empty b/scripts/migration/0.9.0/log/.empty new file mode 100644 index 00000000..e69de29b --- /dev/null +++ b/scripts/migration/0.9.0/log/.empty diff --git a/scripts/migration/0.9.0/migrate.py b/scripts/migration/0.9.0/migrate.py new file mode 100755 index 00000000..6ad5bc2d --- /dev/null +++ b/scripts/migration/0.9.0/migrate.py @@ -0,0 +1,117 @@ +#!/usr/bin/env python +# migrate.py + +""" +Migrate CouchDB schema to Soledad 0.8.2 schema. + +****************************************************************************** + ATTENTION! + + - This script does not backup your data for you. Make sure you have a backup + copy of your databases before running this script! + + - Make sure you turn off any service that might be writing to the couch + database before running this script. + +****************************************************************************** + +Run this script with the --help option to see command line options. + +See the README.md file for more information. +""" + +import datetime +import logging +import netrc +import os + +from argparse import ArgumentParser + +from leap.soledad.server import load_configuration + +from migrate_couch_schema import migrate + + +TARGET_VERSION = '0.8.2' +DEFAULT_COUCH_URL = 'http://127.0.0.1:5984' +CONF = load_configuration('/etc/soledad/soledad-server.conf') +NETRC_PATH = CONF['soledad-server']['admin_netrc'] + + +# +# command line args and execution +# + +def _configure_logger(log_file, level=logging.INFO): + if not log_file: + fname, _ = os.path.basename(__file__).split('.') + timestr = datetime.datetime.now().strftime('%Y-%m-%d_%H:%M:%S') + filename = 'soledad_%s_%s_%s.log' \ + % (TARGET_VERSION, fname, timestr) + dirname = os.path.join( + os.path.dirname(os.path.realpath(__file__)), 'log') + log_file = os.path.join(dirname, filename) + logging.basicConfig( + filename=log_file, + filemode='a', + format='%(asctime)s,%(msecs)d %(levelname)s %(message)s', + datefmt='%H:%M:%S', + level=level) + + +def _default_couch_url(): + if not os.path.exists(NETRC_PATH): + return DEFAULT_COUCH_URL + parsed_netrc = netrc.netrc(NETRC_PATH) + host, (login, _, password) = parsed_netrc.hosts.items()[0] + url = ('http://%(login)s:%(password)s@%(host)s:5984' % { + 'login': login, + 'password': password, + 'host': host}) + return url + + +def _parse_args(): + parser = ArgumentParser() + parser.add_argument( + '--couch_url', + help='the url for the couch database', + default=_default_couch_url()) + parser.add_argument( + '--do-migrate', + help='actually perform the migration (otherwise ' + 'just print what would be done)', + action='store_true') + parser.add_argument( + '--log-file', + help='the log file to use') + parser.add_argument( + '--pdb', action='store_true', + help='escape to pdb shell in case of exception') + parser.add_argument( + '--verbose', action='store_true', + help='output detailed information about the migration ' + '(i.e. include debug messages)') + return parser.parse_args() + + +def _enable_pdb(): + import sys + from IPython.core import ultratb + sys.excepthook = ultratb.FormattedTB( + mode='Verbose', color_scheme='Linux', call_pdb=1) + + +if __name__ == '__main__': + args = _parse_args() + if args.pdb: + _enable_pdb() + _configure_logger( + args.log_file, + level=logging.DEBUG if args.verbose else logging.INFO) + logger = logging.getLogger(__name__) + try: + migrate(args, TARGET_VERSION) + except: + logger.exception('Fatal error on migrate script!') + raise diff --git a/scripts/migration/0.9.0/migrate_couch_schema/__init__.py b/scripts/migration/0.9.0/migrate_couch_schema/__init__.py new file mode 100644 index 00000000..f0b456e4 --- /dev/null +++ b/scripts/migration/0.9.0/migrate_couch_schema/__init__.py @@ -0,0 +1,192 @@ +# __init__.py +""" +Support functions for migration script. +""" + +import logging + +from couchdb import Server +from couchdb import ResourceNotFound +from couchdb import ResourceConflict + +from leap.soledad.common.couch import GENERATION_KEY +from leap.soledad.common.couch import TRANSACTION_ID_KEY +from leap.soledad.common.couch import REPLICA_UID_KEY +from leap.soledad.common.couch import DOC_ID_KEY +from leap.soledad.common.couch import SCHEMA_VERSION_KEY +from leap.soledad.common.couch import CONFIG_DOC_ID +from leap.soledad.common.couch import SYNC_DOC_ID_PREFIX +from leap.soledad.common.couch import SCHEMA_VERSION + + +logger = logging.getLogger(__name__) + + +# +# support functions +# + +def _get_couch_server(couch_url): + return Server(couch_url) + + +def _is_migrateable(db): + config_doc = db.get('u1db_config') + return bool(config_doc) + + +def _get_transaction_log(db): + ddoc_path = ['_design', 'transactions', '_view', 'log'] + resource = db.resource(*ddoc_path) + try: + _, _, data = resource.get_json() + except ResourceNotFound: + logger.warning( + '[%s] missing transactions design document, ' + 'can\'t get transaction log.' % db.name) + return [] + rows = data['rows'] + transaction_log = [] + gen = 1 + for row in rows: + transaction_log.append((gen, row['id'], row['value'])) + gen += 1 + return transaction_log + + +def _get_user_dbs(server): + user_dbs = filter(lambda dbname: dbname.startswith('user-'), server) + return user_dbs + + +# +# migration main functions +# + +def migrate(args, target_version): + server = _get_couch_server(args.couch_url) + logger.info('starting couch schema migration to %s' % target_version) + if not args.do_migrate: + logger.warning('dry-run: no changes will be made to databases') + user_dbs = _get_user_dbs(server) + for dbname in user_dbs: + db = server[dbname] + if not _is_migrateable(db): + logger.warning("[%s] skipping not migrateable user db" % dbname) + continue + logger.info("[%s] starting migration of user db" % dbname) + try: + _migrate_user_db(db, args.do_migrate) + logger.info("[%s] finished migration of user db" % dbname) + except: + logger.exception('[%s] error migrating user db' % dbname) + logger.error('continuing with next database.') + logger.info('finished couch schema migration to %s' % target_version) + + +def _migrate_user_db(db, do_migrate): + _migrate_transaction_log(db, do_migrate) + _migrate_sync_docs(db, do_migrate) + _delete_design_docs(db, do_migrate) + _migrate_config_doc(db, do_migrate) + + +def _migrate_transaction_log(db, do_migrate): + transaction_log = _get_transaction_log(db) + for gen, doc_id, trans_id in transaction_log: + gen_doc_id = 'gen-%s' % str(gen).zfill(10) + doc = { + '_id': gen_doc_id, + GENERATION_KEY: gen, + DOC_ID_KEY: doc_id, + TRANSACTION_ID_KEY: trans_id, + } + logger.debug('[%s] creating gen doc: %s' % (db.name, gen_doc_id)) + if do_migrate: + try: + db.save(doc) + except ResourceConflict: + # this gen document already exists. if documents are the same, + # continue with migration. + existing_doc = db.get(gen_doc_id) + for key in [GENERATION_KEY, DOC_ID_KEY, TRANSACTION_ID_KEY]: + if existing_doc[key] != doc[key]: + raise + + +def _migrate_config_doc(db, do_migrate): + old_doc = db['u1db_config'] + new_doc = { + '_id': CONFIG_DOC_ID, + REPLICA_UID_KEY: old_doc[REPLICA_UID_KEY], + SCHEMA_VERSION_KEY: SCHEMA_VERSION, + } + logger.info("[%s] moving config doc: %s -> %s" + % (db.name, old_doc['_id'], new_doc['_id'])) + if do_migrate: + # the config doc must not exist, otherwise we would have skipped this + # database. + db.save(new_doc) + db.delete(old_doc) + + +def _migrate_sync_docs(db, do_migrate): + logger.info('[%s] moving sync docs' % db.name) + view = db.view( + '_all_docs', + startkey='u1db_sync', + endkey='u1db_synd', + include_docs='true') + for row in view.rows: + old_doc = row['doc'] + old_id = old_doc['_id'] + + # older schemas used different documents with ids starting with + # "u1db_sync" to store sync-related data: + # + # - u1db_sync_log: was used to store the whole sync log. + # - u1db_sync_state: was used to store the sync state. + # + # if any of these documents exist in the current db, they are leftover + # from previous migrations, and should just be removed. + if old_id in ['u1db_sync_log', 'u1db_sync_state']: + logger.info('[%s] removing leftover document: %s' + % (db.name, old_id)) + if do_migrate: + db.delete(old_doc) + continue + + replica_uid = old_id.replace('u1db_sync_', '') + new_id = "%s%s" % (SYNC_DOC_ID_PREFIX, replica_uid) + new_doc = { + '_id': new_id, + GENERATION_KEY: old_doc['generation'], + TRANSACTION_ID_KEY: old_doc['transaction_id'], + REPLICA_UID_KEY: replica_uid, + } + logger.debug("[%s] moving sync doc: %s -> %s" + % (db.name, old_id, new_id)) + if do_migrate: + try: + db.save(new_doc) + except ResourceConflict: + # this sync document already exists. if documents are the same, + # continue with migration. + existing_doc = db.get(new_id) + for key in [GENERATION_KEY, TRANSACTION_ID_KEY, + REPLICA_UID_KEY]: + if existing_doc[key] != new_doc[key]: + raise + db.delete(old_doc) + + +def _delete_design_docs(db, do_migrate): + for ddoc in ['docs', 'syncs', 'transactions']: + doc_id = '_design/%s' % ddoc + doc = db.get(doc_id) + if doc: + logger.info("[%s] deleting design doc: %s" % (db.name, doc_id)) + if do_migrate: + db.delete(doc) + else: + logger.warning("[%s] design doc not found: %s" % (db.name, doc_id)) diff --git a/scripts/migration/0.9.0/requirements.pip b/scripts/migration/0.9.0/requirements.pip new file mode 100644 index 00000000..ea22a1a4 --- /dev/null +++ b/scripts/migration/0.9.0/requirements.pip @@ -0,0 +1,3 @@ +couchdb +leap.soledad.common==0.9.0 +leap.soledad.server==0.9.0 diff --git a/scripts/migration/0.9.0/setup.py b/scripts/migration/0.9.0/setup.py new file mode 100644 index 00000000..0467e932 --- /dev/null +++ b/scripts/migration/0.9.0/setup.py @@ -0,0 +1,8 @@ +from setuptools import setup +from setuptools import find_packages + + +setup( + name='migrate_couch_schema', + packages=find_packages('.'), +) diff --git a/scripts/migration/0.9.0/tests/conftest.py b/scripts/migration/0.9.0/tests/conftest.py new file mode 100644 index 00000000..61f6c7ee --- /dev/null +++ b/scripts/migration/0.9.0/tests/conftest.py @@ -0,0 +1,54 @@ +# conftest.py + +""" +Provide a couch database with content stored in old schema. +""" + +import couchdb +import pytest +import uuid + + +COUCH_URL = 'http://127.0.0.1:5984' + +transaction_map = """ +function(doc) { + if (doc.u1db_transactions) + doc.u1db_transactions.forEach(function(t) { + emit(t[0], // use timestamp as key so the results are ordered + t[1]); // value is the transaction_id + }); +} +""" + +initial_docs = [ + {'_id': 'u1db_config', 'replica_uid': 'an-uid'}, + {'_id': 'u1db_sync_A', 'generation': 0, 'replica_uid': 'A', + 'transaction_id': ''}, + {'_id': 'u1db_sync_B', 'generation': 2, 'replica_uid': 'B', + 'transaction_id': 'X'}, + {'_id': 'doc1', 'u1db_transactions': [(1, 'trans-1'), (3, 'trans-3')]}, + {'_id': 'doc2', 'u1db_transactions': [(2, 'trans-2'), (4, 'trans-4')]}, + {'_id': '_design/docs'}, + {'_id': '_design/syncs'}, + {'_id': '_design/transactions', + 'views': {'log': {'map': transaction_map}}}, + # add some data from previous interrupted migration + {'_id': '_local/sync_A', 'gen': 0, 'trans_id': '', 'replica_uid': 'A'}, + {'_id': 'gen-0000000002', + 'gen': 2, 'trans_id': 'trans-2', 'doc_id': 'doc2'}, + # the following should be removed if found in the dbs + {'_id': 'u1db_sync_log'}, + {'_id': 'u1db_sync_state'}, +] + + +@pytest.fixture(scope='function') +def db(request): + server = couchdb.Server(COUCH_URL) + dbname = "user-" + uuid.uuid4().hex + db = server.create(dbname) + for doc in initial_docs: + db.save(doc) + request.addfinalizer(lambda: server.delete(dbname)) + return db diff --git a/scripts/migration/0.9.0/tests/test_migrate.py b/scripts/migration/0.9.0/tests/test_migrate.py new file mode 100644 index 00000000..10c8b906 --- /dev/null +++ b/scripts/migration/0.9.0/tests/test_migrate.py @@ -0,0 +1,67 @@ +# test_migrate.py + +""" +Ensure that the migration script works! +""" + +from migrate_couch_schema import _migrate_user_db + +from leap.soledad.common.couch import GENERATION_KEY +from leap.soledad.common.couch import TRANSACTION_ID_KEY +from leap.soledad.common.couch import REPLICA_UID_KEY +from leap.soledad.common.couch import DOC_ID_KEY +from leap.soledad.common.couch import SCHEMA_VERSION_KEY +from leap.soledad.common.couch import CONFIG_DOC_ID +from leap.soledad.common.couch import SYNC_DOC_ID_PREFIX +from leap.soledad.common.couch import SCHEMA_VERSION + + +def test__migrate_user_db(db): + _migrate_user_db(db, True) + + # we should find exactly 6 documents: 2 normal documents and 4 generation + # documents + view = db.view('_all_docs') + assert len(view.rows) == 6 + + # ensure that the ids of the documents we found on the database are correct + doc_ids = map(lambda doc: doc.id, view.rows) + assert 'doc1' in doc_ids + assert 'doc2' in doc_ids + assert 'gen-0000000001' in doc_ids + assert 'gen-0000000002' in doc_ids + assert 'gen-0000000003' in doc_ids + assert 'gen-0000000004' in doc_ids + + # assert config doc contents + config_doc = db.get(CONFIG_DOC_ID) + assert config_doc[REPLICA_UID_KEY] == 'an-uid' + assert config_doc[SCHEMA_VERSION_KEY] == SCHEMA_VERSION + + # assert sync docs contents + sync_doc_A = db.get('%s%s' % (SYNC_DOC_ID_PREFIX, 'A')) + assert sync_doc_A[GENERATION_KEY] == 0 + assert sync_doc_A[REPLICA_UID_KEY] == 'A' + assert sync_doc_A[TRANSACTION_ID_KEY] == '' + sync_doc_B = db.get('%s%s' % (SYNC_DOC_ID_PREFIX, 'B')) + assert sync_doc_B[GENERATION_KEY] == 2 + assert sync_doc_B[REPLICA_UID_KEY] == 'B' + assert sync_doc_B[TRANSACTION_ID_KEY] == 'X' + + # assert gen docs contents + gen_1 = db.get('gen-0000000001') + assert gen_1[DOC_ID_KEY] == 'doc1' + assert gen_1[GENERATION_KEY] == 1 + assert gen_1[TRANSACTION_ID_KEY] == 'trans-1' + gen_2 = db.get('gen-0000000002') + assert gen_2[DOC_ID_KEY] == 'doc2' + assert gen_2[GENERATION_KEY] == 2 + assert gen_2[TRANSACTION_ID_KEY] == 'trans-2' + gen_3 = db.get('gen-0000000003') + assert gen_3[DOC_ID_KEY] == 'doc1' + assert gen_3[GENERATION_KEY] == 3 + assert gen_3[TRANSACTION_ID_KEY] == 'trans-3' + gen_4 = db.get('gen-0000000004') + assert gen_4[DOC_ID_KEY] == 'doc2' + assert gen_4[GENERATION_KEY] == 4 + assert gen_4[TRANSACTION_ID_KEY] == 'trans-4' diff --git a/scripts/migration/0.9.0/tox.ini b/scripts/migration/0.9.0/tox.ini new file mode 100644 index 00000000..2bb6be4c --- /dev/null +++ b/scripts/migration/0.9.0/tox.ini @@ -0,0 +1,13 @@ +[tox] +envlist = py27 + +[testenv] +commands = py.test {posargs} +changedir = tests +deps = + pytest + couchdb + pdbpp + -e../../../common +setenv = + TERM=xterm diff --git a/scripts/packaging/compile_design_docs.py b/scripts/packaging/compile_design_docs.py deleted file mode 100644 index b2b5729a..00000000 --- a/scripts/packaging/compile_design_docs.py +++ /dev/null @@ -1,112 +0,0 @@ -#!/usr/bin/python - - -# This script builds files for the design documents represented in the -# ../common/src/soledad/common/ddocs directory structure (relative to the -# current location of the script) into a target directory. - - -import argparse -from os import listdir -from os.path import realpath, dirname, isdir, join, isfile, basename -import json - -DDOCS_REL_PATH = ('..', 'common', 'src', 'leap', 'soledad', 'common', 'ddocs') - - -def build_ddocs(): - """ - Build design documents. - - For ease of development, couch backend design documents are stored as - `.js` files in subdirectories of - `../common/src/leap/soledad/common/ddocs`. This function scans that - directory for javascript files, and builds the design documents structure. - - This funciton uses the following conventions to generate design documents: - - - Design documents are represented by directories in the form - `<prefix>/<ddoc>`, there prefix is the `src/leap/soledad/common/ddocs` - directory. - - Design document directories might contain `views`, `lists` and - `updates` subdirectories. - - Views subdirectories must contain a `map.js` file and may contain a - `reduce.js` file. - - List and updates subdirectories may contain any number of javascript - files (i.e. ending in `.js`) whose names will be mapped to the - corresponding list or update function name. - """ - ddocs = {} - - # design docs are represented by subdirectories of `DDOCS_REL_PATH` - cur_pwd = dirname(realpath(__file__)) - ddocs_path = join(cur_pwd, *DDOCS_REL_PATH) - for ddoc in [f for f in listdir(ddocs_path) - if isdir(join(ddocs_path, f))]: - - ddocs[ddoc] = {'_id': '_design/%s' % ddoc} - - for t in ['views', 'lists', 'updates']: - tdir = join(ddocs_path, ddoc, t) - if isdir(tdir): - - ddocs[ddoc][t] = {} - - if t == 'views': # handle views (with map/reduce functions) - for view in [f for f in listdir(tdir) - if isdir(join(tdir, f))]: - # look for map.js and reduce.js - mapfile = join(tdir, view, 'map.js') - reducefile = join(tdir, view, 'reduce.js') - mapfun = None - reducefun = None - try: - with open(mapfile) as f: - mapfun = f.read() - except IOError: - pass - try: - with open(reducefile) as f: - reducefun = f.read() - except IOError: - pass - ddocs[ddoc]['views'][view] = {} - - if mapfun is not None: - ddocs[ddoc]['views'][view]['map'] = mapfun - if reducefun is not None: - ddocs[ddoc]['views'][view]['reduce'] = reducefun - - else: # handle lists, updates, etc - for fun in [f for f in listdir(tdir) - if isfile(join(tdir, f))]: - funfile = join(tdir, fun) - funname = basename(funfile).replace('.js', '') - try: - with open(funfile) as f: - ddocs[ddoc][t][funname] = f.read() - except IOError: - pass - return ddocs - - -if __name__ == '__main__': - parser = argparse.ArgumentParser() - parser.add_argument( - 'target', type=str, - help='the target dir where to store design documents') - args = parser.parse_args() - - # check if given target is a directory - if not isdir(args.target): - print 'Error: %s is not a directory.' % args.target - exit(1) - - # write desifgn docs files - ddocs = build_ddocs() - for ddoc in ddocs: - ddoc_filename = "%s.json" % ddoc - with open(join(args.target, ddoc_filename), 'w') as f: - f.write("%s" % json.dumps(ddocs[ddoc], indent=3)) - print "Wrote _design/%s content in %s" \ - % (ddoc, join(args.target, ddoc_filename,)) diff --git a/scripts/profiling/mail/couchdb_server.py b/scripts/profiling/mail/couchdb_server.py index 2cf0a3fd..452f8ec2 100644 --- a/scripts/profiling/mail/couchdb_server.py +++ b/scripts/profiling/mail/couchdb_server.py @@ -18,8 +18,7 @@ def start_couchdb_wrapper(): def get_u1db_database(dbname, port): return CouchDatabase.open_database( 'http://127.0.0.1:%d/%s' % (port, dbname), - True, - ensure_ddocs=True) + True) def create_tokens_database(port, uuid, token_value): @@ -38,5 +37,5 @@ def get_couchdb_wrapper_and_u1db(uuid, token_value): couchdb_u1db = get_u1db_database('user-%s' % uuid, couchdb_wrapper.port) get_u1db_database('shared', couchdb_wrapper.port) create_tokens_database(couchdb_wrapper.port, uuid, token_value) - + return couchdb_wrapper, couchdb_u1db |