From 0514978eeed0e4db41fe13b3352ab55ccf299ef1 Mon Sep 17 00:00:00 2001 From: drebs Date: Fri, 22 Jul 2016 20:14:26 +0200 Subject: [test] fail gracefully on cert delete --- scripts/docker/files/bin/setup-test-env.py | 7 +++++-- 1 file changed, 5 insertions(+), 2 deletions(-) (limited to 'scripts') diff --git a/scripts/docker/files/bin/setup-test-env.py b/scripts/docker/files/bin/setup-test-env.py index 0f3ea6f4..c0487e8f 100755 --- a/scripts/docker/files/bin/setup-test-env.py +++ b/scripts/docker/files/bin/setup-test-env.py @@ -389,8 +389,11 @@ def cert_create(args): def cert_delete(args): private_key = os.path.join(args.basedir, args.private_key) cert_key = os.path.join(args.basedir, args.cert_key) - os.unlink(private_key) - os.unlink(cert_key) + try: + os.unlink(private_key) + os.unlink(cert_key) + except OSError: + pass # -- cgit v1.2.3 From c2849c2f13adfc7c1388de50f41cd234868113ec Mon Sep 17 00:00:00 2001 From: drebs Date: Fri, 22 Jul 2016 20:15:35 +0200 Subject: [test] update docker readme and todo --- scripts/docker/README.md | 9 +++++++++ scripts/docker/TODO | 4 ++++ 2 files changed, 13 insertions(+) (limited to 'scripts') diff --git a/scripts/docker/README.md b/scripts/docker/README.md index c4d7ac94..fda1c04a 100644 --- a/scripts/docker/README.md +++ b/scripts/docker/README.md @@ -14,6 +14,15 @@ Check the `Makefile` for the rules for running containers. Check the `helper/` directory for scripts that help running tests. +Installation +------------ + +0. update and install +1. Install docker for your system: https://docs.docker.com/ +2. Build the image by running `make` +3. Use one of the scripts in the `helper/` directory + + Environment variables for docker containers ------------------------------------------- diff --git a/scripts/docker/TODO b/scripts/docker/TODO index 5185d754..90597637 100644 --- a/scripts/docker/TODO +++ b/scripts/docker/TODO @@ -1 +1,5 @@ - limit resources of containers (mem and cpu) +- allow running couchdb on another container +- use a config file to get defaults for running tests +- use the /builds directory as base of git repo +- save the test state to a directory to make it reproducible -- cgit v1.2.3 From 23fe0be50c7c9408eab47e8286e19b16a77f66ad Mon Sep 17 00:00:00 2001 From: drebs Date: Mon, 25 Jul 2016 08:06:39 -0300 Subject: [test] remove ddocs param from docker setup script --- scripts/docker/files/bin/setup-test-env.py | 9 ++++++--- 1 file changed, 6 insertions(+), 3 deletions(-) (limited to 'scripts') diff --git a/scripts/docker/files/bin/setup-test-env.py b/scripts/docker/files/bin/setup-test-env.py index c0487e8f..4868fd56 100755 --- a/scripts/docker/files/bin/setup-test-env.py +++ b/scripts/docker/files/bin/setup-test-env.py @@ -194,12 +194,12 @@ def user_db_create(args): url = 'http://localhost:%d/user-%s' % (args.port, args.uuid) try: CouchDatabase.open_database( - url=url, create=False, replica_uid=None, ensure_ddocs=True) + url=url, create=False, replica_uid=None) print '[*] error: database "user-%s" already exists' % args.uuid exit(1) except DatabaseDoesNotExist: CouchDatabase.open_database( - url=url, create=True, replica_uid=None, ensure_ddocs=True) + url=url, create=True, replica_uid=None) print '[+] database created: user-%s' % args.uuid @@ -372,7 +372,10 @@ CERT_CONFIG_FILE = os.path.join( def cert_create(args): private_key = os.path.join(args.basedir, args.private_key) cert_key = os.path.join(args.basedir, args.cert_key) - os.mkdir(args.basedir) + try: + os.mkdir(args.basedir) + except OSError: + pass call([ 'openssl', 'req', -- cgit v1.2.3 From bf9355077c2f190c82d660ad9b7059a1c3f32a8d Mon Sep 17 00:00:00 2001 From: drebs Date: Mon, 25 Jul 2016 11:37:47 -0300 Subject: [test] use tox to create docker image --- scripts/docker/Dockerfile | 46 ++++++++++------------------------------------ 1 file changed, 10 insertions(+), 36 deletions(-) (limited to 'scripts') diff --git a/scripts/docker/Dockerfile b/scripts/docker/Dockerfile index 915508ea..2ec310a9 100644 --- a/scripts/docker/Dockerfile +++ b/scripts/docker/Dockerfile @@ -1,51 +1,25 @@ # start with a fresh debian image FROM debian -# expose soledad server port in case we want to run a server container -EXPOSE 2424 - -# install dependencies from debian repos -COPY files/apt/leap.list /etc/apt/sources.list.d/ - -RUN apt-get update -RUN apt-get -y --force-yes install leap-archive-keyring - RUN apt-get update RUN apt-get -y install git -RUN apt-get -y install vim -RUN apt-get -y install python-ipdb - -# install python deps +# needed to build python twisted module RUN apt-get -y install libpython2.7-dev -RUN apt-get -y install libffi-dev +# needed to build python cryptography module RUN apt-get -y install libssl-dev -RUN apt-get -y install libzmq3-dev -RUN apt-get -y install python-pip -RUN apt-get -y install couchdb -RUN apt-get -y install python-srp -RUN apt-get -y install python-scrypt -RUN apt-get -y install leap-keymanager -RUN apt-get -y install python-tz +# install pip and tox +RUN apt-get -y install python-pip RUN pip install -U pip -RUN pip install psutil - -# install soledad-perf deps -RUN pip install klein -RUN apt-get -y install curl -RUN apt-get -y install httperf +RUN pip install tox # clone repositories -ENV BASEURL "https://github.com/leapcode" -ENV VARDIR "/var/local" -ENV REPOS "soledad leap_pycommon soledad-perf" -RUN for repo in ${REPOS}; do git clone ${BASEURL}/${repo}.git /var/local/${repo}; done +RUN mkdir -p /builds/leap +RUN git clone -b develop https://0xacab.org/leap/soledad.git /builds/leap/soledad -# copy over files to help setup the environment and run soledad -RUN mkdir -p /usr/local/soledad - -COPY files/build/install-deps-from-repos.sh /usr/local/soledad/ -RUN /usr/local/soledad/install-deps-from-repos.sh +# use tox to install everything needed to run tests +RUN cd /builds/leap/soledad/testing && tox -v -r --notest +RUN mkdir -p /usr/local/soledad COPY files/bin/ /usr/local/soledad/ -- cgit v1.2.3 From 8d08016b6e5985569ca5d04ef3e2690e78809f54 Mon Sep 17 00:00:00 2001 From: drebs Date: Mon, 25 Jul 2016 11:38:19 -0300 Subject: [test] use tox and couchdb image to run tests --- scripts/docker/Makefile | 17 +++++++++++------ scripts/docker/files/bin/run-tox.sh | 14 ++++++++++++++ 2 files changed, 25 insertions(+), 6 deletions(-) create mode 100755 scripts/docker/files/bin/run-tox.sh (limited to 'scripts') diff --git a/scripts/docker/Makefile b/scripts/docker/Makefile index 4fa2e264..6f30a341 100644 --- a/scripts/docker/Makefile +++ b/scripts/docker/Makefile @@ -27,11 +27,14 @@ MEMORY ?= 512m # Docker image generation (main make target) # ############################################## -all: image +all: soledad-image couchdb-image -image: +soledad-image: docker build -t $(IMAGE_NAME) . +couchdb-image: + docker pull couchdb + ################################################## # Run a Soledad Server inside a docker container # ################################################## @@ -69,16 +72,18 @@ run-client-bootstrap: /usr/local/soledad/run-client-bootstrap.sh ################################################# -# Run all trial tests inside a docker container # +# Run all tests inside a docker container # ################################################# -run-trial: +run-tox: + docker run -d --name couchdb couchdb docker run -t -i \ --memory="$(MEMORY)" \ --env="SOLEDAD_REMOTE=$(SOLEDAD_REMOTE)" \ --env="SOLEDAD_BRANCH=$(SOLEDAD_BRANCH)" \ + --link couchdb \ $(IMAGE_NAME) \ - /usr/local/soledad/run-trial.sh + /usr/local/soledad/run-tox.sh ############################################ # Performance tests and graphic generation # @@ -123,7 +128,7 @@ cp-perf-result: # Other helper targets # ######################## -run-shell: image +run-shell: soledad-image docker run -t -i \ --memory="$(MEMORY)" \ $(IMAGE_NAME) \ diff --git a/scripts/docker/files/bin/run-tox.sh b/scripts/docker/files/bin/run-tox.sh new file mode 100755 index 00000000..793ce6e1 --- /dev/null +++ b/scripts/docker/files/bin/run-tox.sh @@ -0,0 +1,14 @@ +#!/bin/bash + +REPO=/builds/leap/soledad/testing + +if [ ! -z "${SOLEDAD_REMOTE}" ]; then + git -C ${REPO} remote set-url origin ${SOLEDAD_REMOTE} + git -C ${REPO} fetch origin +fi + +if [ ! -z "${SOLEDAD_BRANCH}" ]; then + git -C ${REPO} checkout ${SOLEDAD_BRANCH} +fi + +(cd ${REPO}; tox -- -v --durations 0 --couch-url http://couchdb:5984) -- cgit v1.2.3 From 1a8ea1fde14eb5b2d2c4d4165c4bc21031512d06 Mon Sep 17 00:00:00 2001 From: drebs Date: Mon, 25 Jul 2016 11:39:38 -0300 Subject: [test] use docker image with couchdb service to run tests --- scripts/docker/Dockerfile | 3 +++ 1 file changed, 3 insertions(+) (limited to 'scripts') diff --git a/scripts/docker/Dockerfile b/scripts/docker/Dockerfile index 2ec310a9..1e46fda3 100644 --- a/scripts/docker/Dockerfile +++ b/scripts/docker/Dockerfile @@ -4,13 +4,16 @@ FROM debian RUN apt-get update RUN apt-get -y install git + # needed to build python twisted module RUN apt-get -y install libpython2.7-dev # needed to build python cryptography module RUN apt-get -y install libssl-dev +RUN apt-get -y install libffi-dev # install pip and tox RUN apt-get -y install python-pip + RUN pip install -U pip RUN pip install tox -- cgit v1.2.3 From 92813593b93d9788fd978acdeeba59d32c311d48 Mon Sep 17 00:00:00 2001 From: drebs Date: Mon, 25 Jul 2016 21:25:46 -0300 Subject: [test] use pip download cache for tests and docker image --- scripts/docker/Dockerfile | 1 - 1 file changed, 1 deletion(-) (limited to 'scripts') diff --git a/scripts/docker/Dockerfile b/scripts/docker/Dockerfile index 1e46fda3..8c6bfdb3 100644 --- a/scripts/docker/Dockerfile +++ b/scripts/docker/Dockerfile @@ -13,7 +13,6 @@ RUN apt-get -y install libffi-dev # install pip and tox RUN apt-get -y install python-pip - RUN pip install -U pip RUN pip install tox -- cgit v1.2.3 From 6f98a8d61c33a4fc3619f998eeea0075d51c739b Mon Sep 17 00:00:00 2001 From: drebs Date: Mon, 25 Jul 2016 21:31:46 -0300 Subject: [test] add rules to run perf test on docker with separate couchdb server container --- scripts/docker/Makefile | 11 +++++++++-- 1 file changed, 9 insertions(+), 2 deletions(-) (limited to 'scripts') diff --git a/scripts/docker/Makefile b/scripts/docker/Makefile index 6f30a341..6ad4cced 100644 --- a/scripts/docker/Makefile +++ b/scripts/docker/Makefile @@ -89,8 +89,15 @@ run-tox: # Performance tests and graphic generation # ############################################ -run-perf-test: - helper/run-test.sh perf +run-perf: + docker run -d --name couchdb couchdb + docker run -t -i \ + --memory="$(MEMORY)" \ + --env="SOLEDAD_REMOTE=$(SOLEDAD_REMOTE)" \ + --env="SOLEDAD_BRANCH=$(SOLEDAD_BRANCH)" \ + --link couchdb \ + $(IMAGE_NAME) \ + /usr/local/soledad/run-tox-perf.sh run-client-perf: @if [ -z "$(CONTAINER_ID_FILE)" ]; then \ -- cgit v1.2.3 From 76acb8f39a32b6b61f00af571bae9bd48c0a5d62 Mon Sep 17 00:00:00 2001 From: drebs Date: Mon, 25 Jul 2016 21:51:45 -0300 Subject: [test] use random name for couchdb container in docker perf test --- scripts/docker/Makefile | 15 ++++++++++----- scripts/docker/files/bin/run-perf.sh | 20 ++++++++++++++++++++ scripts/docker/files/bin/run-tox.sh | 5 ++++- 3 files changed, 34 insertions(+), 6 deletions(-) create mode 100755 scripts/docker/files/bin/run-perf.sh (limited to 'scripts') diff --git a/scripts/docker/Makefile b/scripts/docker/Makefile index 6ad4cced..4b4d4496 100644 --- a/scripts/docker/Makefile +++ b/scripts/docker/Makefile @@ -76,12 +76,14 @@ run-client-bootstrap: ################################################# run-tox: - docker run -d --name couchdb couchdb + name=$$(cat /dev/urandom | tr -dc 'a-zA-Z0-9' | fold -w 5 | head -n 1); \ + docker run -d --name $${name} couchdb; \ docker run -t -i \ --memory="$(MEMORY)" \ --env="SOLEDAD_REMOTE=$(SOLEDAD_REMOTE)" \ --env="SOLEDAD_BRANCH=$(SOLEDAD_BRANCH)" \ - --link couchdb \ + --env="COUCH_URL=http://$${name}:5984" \ + --link $${name} \ $(IMAGE_NAME) \ /usr/local/soledad/run-tox.sh @@ -90,14 +92,17 @@ run-tox: ############################################ run-perf: - docker run -d --name couchdb couchdb + name=$$(cat /dev/urandom | tr -dc 'a-zA-Z0-9' | fold -w 5 | head -n 1); \ + docker run -d --name $${name} couchdb; \ docker run -t -i \ --memory="$(MEMORY)" \ --env="SOLEDAD_REMOTE=$(SOLEDAD_REMOTE)" \ --env="SOLEDAD_BRANCH=$(SOLEDAD_BRANCH)" \ - --link couchdb \ + --env="SOLEDAD_PRELOAD_NUM=$(SOLEDAD_PRELOAD_NUM)" \ + --env="COUCH_URL=http://$${name}:5984" \ + --link $${name} \ $(IMAGE_NAME) \ - /usr/local/soledad/run-tox-perf.sh + /usr/local/soledad/run-perf.sh run-client-perf: @if [ -z "$(CONTAINER_ID_FILE)" ]; then \ diff --git a/scripts/docker/files/bin/run-perf.sh b/scripts/docker/files/bin/run-perf.sh new file mode 100755 index 00000000..35c7f006 --- /dev/null +++ b/scripts/docker/files/bin/run-perf.sh @@ -0,0 +1,20 @@ +#!/bin/bash + +REPO=/builds/leap/soledad/testing +COUCH_URL="${COUCH_URL:-http://127.0.0.1:5984}" + +if [ ! -z "${SOLEDAD_REMOTE}" ]; then + git -C ${REPO} remote set-url origin ${SOLEDAD_REMOTE} + git -C ${REPO} fetch origin +fi + +if [ ! -z "${SOLEDAD_BRANCH}" ]; then + git -C ${REPO} checkout ${SOLEDAD_BRANCH} +fi + +cd ${REPO} + +tox perf -- \ + --durations 0 \ + --couch-url ${COUCH_URL} \ + --twisted diff --git a/scripts/docker/files/bin/run-tox.sh b/scripts/docker/files/bin/run-tox.sh index 793ce6e1..74fde182 100755 --- a/scripts/docker/files/bin/run-tox.sh +++ b/scripts/docker/files/bin/run-tox.sh @@ -1,6 +1,7 @@ #!/bin/bash REPO=/builds/leap/soledad/testing +COUCH_URL="${COUCH_URL:-http://127.0.0.1:5984}" if [ ! -z "${SOLEDAD_REMOTE}" ]; then git -C ${REPO} remote set-url origin ${SOLEDAD_REMOTE} @@ -11,4 +12,6 @@ if [ ! -z "${SOLEDAD_BRANCH}" ]; then git -C ${REPO} checkout ${SOLEDAD_BRANCH} fi -(cd ${REPO}; tox -- -v --durations 0 --couch-url http://couchdb:5984) +cd ${REPO} + +tox -- --couch-url ${COUCH_URL} -- cgit v1.2.3 From de5cd462cc3f04275e22d9267ecb8e6c2b23dfda Mon Sep 17 00:00:00 2001 From: drebs Date: Mon, 25 Jul 2016 21:34:23 -0300 Subject: [test] allow passing number of docs on command line on perf tests --- scripts/docker/files/bin/run-perf.sh | 4 +++- 1 file changed, 3 insertions(+), 1 deletion(-) (limited to 'scripts') diff --git a/scripts/docker/files/bin/run-perf.sh b/scripts/docker/files/bin/run-perf.sh index 35c7f006..72060230 100755 --- a/scripts/docker/files/bin/run-perf.sh +++ b/scripts/docker/files/bin/run-perf.sh @@ -2,6 +2,7 @@ REPO=/builds/leap/soledad/testing COUCH_URL="${COUCH_URL:-http://127.0.0.1:5984}" +SOLEDAD_PRELOAD_NUM="${SOLEDAD_PRELOAD_NUM:-100}" if [ ! -z "${SOLEDAD_REMOTE}" ]; then git -C ${REPO} remote set-url origin ${SOLEDAD_REMOTE} @@ -17,4 +18,5 @@ cd ${REPO} tox perf -- \ --durations 0 \ --couch-url ${COUCH_URL} \ - --twisted + --twisted \ + --num-docs ${SOLEDAD_PRELOAD_NUM} -- cgit v1.2.3 From 682aab0b30e479ea4e826f0636340bb100b36c0a Mon Sep 17 00:00:00 2001 From: drebs Date: Tue, 26 Jul 2016 13:44:50 -0300 Subject: [test] add custom couchdb docker image --- scripts/docker/Makefile | 6 +++--- scripts/docker/README.md | 14 +++++++++----- scripts/docker/couchdb/Dockerfile | 3 +++ scripts/docker/couchdb/Makefile | 4 ++++ scripts/docker/couchdb/README.rst | 12 ++++++++++++ scripts/docker/couchdb/local.ini | 2 ++ 6 files changed, 33 insertions(+), 8 deletions(-) create mode 100644 scripts/docker/couchdb/Dockerfile create mode 100644 scripts/docker/couchdb/Makefile create mode 100644 scripts/docker/couchdb/README.rst create mode 100644 scripts/docker/couchdb/local.ini (limited to 'scripts') diff --git a/scripts/docker/Makefile b/scripts/docker/Makefile index 4b4d4496..1bb57757 100644 --- a/scripts/docker/Makefile +++ b/scripts/docker/Makefile @@ -33,7 +33,7 @@ soledad-image: docker build -t $(IMAGE_NAME) . couchdb-image: - docker pull couchdb + (cd couchdb/ && make) ################################################## # Run a Soledad Server inside a docker container # @@ -77,7 +77,7 @@ run-client-bootstrap: run-tox: name=$$(cat /dev/urandom | tr -dc 'a-zA-Z0-9' | fold -w 5 | head -n 1); \ - docker run -d --name $${name} couchdb; \ + docker run -d --name $${name} leap/couchdb; \ docker run -t -i \ --memory="$(MEMORY)" \ --env="SOLEDAD_REMOTE=$(SOLEDAD_REMOTE)" \ @@ -93,7 +93,7 @@ run-tox: run-perf: name=$$(cat /dev/urandom | tr -dc 'a-zA-Z0-9' | fold -w 5 | head -n 1); \ - docker run -d --name $${name} couchdb; \ + docker run -d --name $${name} leap/couchdb; \ docker run -t -i \ --memory="$(MEMORY)" \ --env="SOLEDAD_REMOTE=$(SOLEDAD_REMOTE)" \ diff --git a/scripts/docker/README.md b/scripts/docker/README.md index fda1c04a..97b39f87 100644 --- a/scripts/docker/README.md +++ b/scripts/docker/README.md @@ -11,16 +11,20 @@ Check the `Dockerfile` for the steps for creating the docker image. Check the `Makefile` for the rules for running containers. -Check the `helper/` directory for scripts that help running tests. - Installation ------------ -0. update and install 1. Install docker for your system: https://docs.docker.com/ -2. Build the image by running `make` -3. Use one of the scripts in the `helper/` directory +2. Build images by running `make` +3. Execute `make run-tox` and `make run-perf` to run tox tests and perf tests, + respectivelly. +4. You may want to pass some variables to the `make` command to control + parameters of execution, for example: + + make run-perf SOLEDAD_PRELOAD_NUM=500 + + See more variables below. Environment variables for docker containers diff --git a/scripts/docker/couchdb/Dockerfile b/scripts/docker/couchdb/Dockerfile new file mode 100644 index 00000000..03448da5 --- /dev/null +++ b/scripts/docker/couchdb/Dockerfile @@ -0,0 +1,3 @@ +FROM couchdb:latest + +COPY local.ini /usr/local/etc/couchdb/ diff --git a/scripts/docker/couchdb/Makefile b/scripts/docker/couchdb/Makefile new file mode 100644 index 00000000..cf3ac966 --- /dev/null +++ b/scripts/docker/couchdb/Makefile @@ -0,0 +1,4 @@ +IMAGE_NAME ?= leap/couchdb + +image: + docker build -t $(IMAGE_NAME) . diff --git a/scripts/docker/couchdb/README.rst b/scripts/docker/couchdb/README.rst new file mode 100644 index 00000000..31a791a8 --- /dev/null +++ b/scripts/docker/couchdb/README.rst @@ -0,0 +1,12 @@ +Couchdb Docker image +==================== + +This directory contains rules to build a custom couchdb docker image to be +provided as backend to soledad server. + +Type `make` to build the image. + +Differences between this image and the official one: + + - add the "nodelay" socket option on the httpd section of the config file + (see: https://leap.se/code/issues/8264). diff --git a/scripts/docker/couchdb/local.ini b/scripts/docker/couchdb/local.ini new file mode 100644 index 00000000..3650e0ed --- /dev/null +++ b/scripts/docker/couchdb/local.ini @@ -0,0 +1,2 @@ +[httpd] +socket_options = [{recbuf, 262144}, {sndbuf, 262144}, {nodelay, true}] -- cgit v1.2.3 From bc28ea6e652418791dcf63fadcc81db9c50e2d45 Mon Sep 17 00:00:00 2001 From: drebs Date: Fri, 29 Jul 2016 10:01:46 -0300 Subject: [pkg] add couch schema migration script --- scripts/migration/0.8.2/README.md | 73 +++++++++++ scripts/migration/0.8.2/log/.empty | 0 scripts/migration/0.8.2/migrate.py | 77 +++++++++++ .../0.8.2/migrate_couch_schema/__init__.py | 142 +++++++++++++++++++++ scripts/migration/0.8.2/setup.py | 8 ++ scripts/migration/0.8.2/tests/conftest.py | 46 +++++++ scripts/migration/0.8.2/tests/test_migrate.py | 67 ++++++++++ scripts/migration/0.8.2/tox.ini | 13 ++ 8 files changed, 426 insertions(+) create mode 100644 scripts/migration/0.8.2/README.md create mode 100644 scripts/migration/0.8.2/log/.empty create mode 100755 scripts/migration/0.8.2/migrate.py create mode 100644 scripts/migration/0.8.2/migrate_couch_schema/__init__.py create mode 100644 scripts/migration/0.8.2/setup.py create mode 100644 scripts/migration/0.8.2/tests/conftest.py create mode 100644 scripts/migration/0.8.2/tests/test_migrate.py create mode 100644 scripts/migration/0.8.2/tox.ini (limited to 'scripts') diff --git a/scripts/migration/0.8.2/README.md b/scripts/migration/0.8.2/README.md new file mode 100644 index 00000000..919a5235 --- /dev/null +++ b/scripts/migration/0.8.2/README.md @@ -0,0 +1,73 @@ +CouchDB schema migration to Soledad 0.8.2 +========================================= + +Migrate couch database schema from <= 0.8.1 version to 0.8.2 version. + + +ATTENTION! +---------- + + - This script does not backup your data for you. Make sure you have a backup + copy of your databases before running this script! + + - Make sure you turn off any service that might be writing to the couch + database before running this script. + + +Usage +----- + +To see what the script would do, run: + + ./migrate.py + +To actually run the migration, add the --do-migrate command line option: + + ./migrate.py --do-migrate + +See command line options: + + ./migrate.py --help + + +Log +--- + +If you don't pass a --log-file command line option, a log will be written to +the `log/` folder. + + +Differences between old and new couch schema +-------------------------------------------- + +The differences between old and new schemas are: + + - Transaction metadata was previously stored inside each document, and we + used design doc view/list functions to retrieve that information. Now, + transaction metadata is stored in documents with special ids + (gen-0000000001 to gen-9999999999). + + - Database replica config metadata was stored in a document called + "u1db_config", and now we store it in the "_local/config" document. + + - Sync metadata was previously stored in documents with id + "u1db_sync_", and now are stored in + "_local/sync_". + + - The new schema doesn't make use of any design documents. + + +What does this script do +------------------------ + +- List all databases starting with "user-". +- For each one, do: + - Check if it contains the old "u1db_config" document. + - If it doesn't, skip this db. + - Get the transaction log using the usual design doc view/list functions. + - Write a new "gen-X" document for each line on the transaction log. + - Get the "u1db_config" document, create a new one in "_local/config", + Delete the old one. + - List all "u1db_sync_X" documents, create new ones in "_local/sync_X", + delete the old ones. + - Delete unused design documents. diff --git a/scripts/migration/0.8.2/log/.empty b/scripts/migration/0.8.2/log/.empty new file mode 100644 index 00000000..e69de29b diff --git a/scripts/migration/0.8.2/migrate.py b/scripts/migration/0.8.2/migrate.py new file mode 100755 index 00000000..159905ef --- /dev/null +++ b/scripts/migration/0.8.2/migrate.py @@ -0,0 +1,77 @@ +#!/usr/bin/env python +# migrate.py + +""" +Migrate CouchDB schema to Soledad 0.8.2 schema. + +****************************************************************************** + ATTENTION! + + - This script does not backup your data for you. Make sure you have a backup + copy of your databases before running this script! + + - Make sure you turn off any service that might be writing to the couch + database before running this script. + +****************************************************************************** + +Run this script with the --help option to see command line options. + +See the README.md file for more information. +""" + +import datetime +import logging +import os + +from argparse import ArgumentParser + +from migrate_couch_schema import migrate + + +TARGET_VERSION = '0.8.2' +DEFAULT_COUCH_URL = 'http://127.0.0.1:5984' + + +# +# command line args and execution +# + +def _configure_logger(log_file): + if not log_file: + fname, _ = os.path.basename(__file__).split('.') + timestr = datetime.datetime.now().strftime('%Y-%m-%d_%H:%M:%S') + filename = 'soledad_%s_%s_%s.log' \ + % (TARGET_VERSION, fname, timestr) + dirname = os.path.join( + os.path.dirname(os.path.realpath(__file__)), 'log') + log_file = os.path.join(dirname, filename) + logging.basicConfig( + filename=log_file, + filemode='a', + format='%(asctime)s,%(msecs)d %(levelname)s %(message)s', + datefmt='%H:%M:%S', + level=logging.DEBUG) + + +def _parse_args(): + parser = ArgumentParser() + parser.add_argument( + '--couch_url', + help='the url for the couch database', + default=DEFAULT_COUCH_URL) + parser.add_argument( + '--do-migrate', + help='actually perform the migration (otherwise ' + 'just print what would be done)', + action='store_true') + parser.add_argument( + '--log-file', + help='the log file to use') + return parser.parse_args() + + +if __name__ == '__main__': + args = _parse_args() + _configure_logger(args.log_file) + migrate(args, TARGET_VERSION) diff --git a/scripts/migration/0.8.2/migrate_couch_schema/__init__.py b/scripts/migration/0.8.2/migrate_couch_schema/__init__.py new file mode 100644 index 00000000..37e5a525 --- /dev/null +++ b/scripts/migration/0.8.2/migrate_couch_schema/__init__.py @@ -0,0 +1,142 @@ +# __init__.py +""" +Support functions for migration script. +""" + +import logging + +from couchdb import Server + +from leap.soledad.common.couch import GENERATION_KEY +from leap.soledad.common.couch import TRANSACTION_ID_KEY +from leap.soledad.common.couch import REPLICA_UID_KEY +from leap.soledad.common.couch import DOC_ID_KEY +from leap.soledad.common.couch import SCHEMA_VERSION_KEY +from leap.soledad.common.couch import CONFIG_DOC_ID +from leap.soledad.common.couch import SYNC_DOC_ID_PREFIX +from leap.soledad.common.couch import SCHEMA_VERSION + + +logger = logging.getLogger(__name__) + + +# +# support functions +# + +def _get_couch_server(couch_url): + return Server(couch_url) + + +def _is_migrateable(db): + config_doc = db.get('u1db_config') + if config_doc is None: + return False + return True + + +def _get_transaction_log(db): + ddoc_path = ['_design', 'transactions', '_view', 'log'] + resource = db.resource(*ddoc_path) + _, _, data = resource.get_json() + rows = data['rows'] + transaction_log = [] + gen = 1 + for row in rows: + transaction_log.append((gen, row['id'], row['value'])) + gen += 1 + return transaction_log + + +def _get_user_dbs(server): + user_dbs = filter(lambda dbname: dbname.startswith('user-'), server) + return user_dbs + + +# +# migration main functions +# + +def migrate(args, target_version): + server = _get_couch_server(args.couch_url) + logger.info('starting couch schema migration to %s...' % target_version) + if not args.do_migrate: + logger.warning('dry-run: no changes will be made to databases') + user_dbs = _get_user_dbs(server) + for dbname in user_dbs: + db = server[dbname] + if not _is_migrateable(db): + logger.warning("skipping user db: %s" % dbname) + continue + logger.info("starting migration of user db: %s" % dbname) + _migrate_user_db(db, args.do_migrate) + logger.info("finished migration of user db: %s" % dbname) + logger.info('finished couch schema migration to %s' % target_version) + + +def _migrate_user_db(db, do_migrate): + _migrate_transaction_log(db, do_migrate) + _migrate_config_doc(db, do_migrate) + _migrate_sync_docs(db, do_migrate) + _delete_design_docs(db, do_migrate) + + +def _migrate_transaction_log(db, do_migrate): + transaction_log = _get_transaction_log(db) + for gen, doc_id, trans_id in transaction_log: + gen_doc_id = 'gen-%s' % str(gen).zfill(10) + doc = { + '_id': gen_doc_id, + GENERATION_KEY: gen, + DOC_ID_KEY: doc_id, + TRANSACTION_ID_KEY: trans_id, + } + logger.info('creating gen doc: %s' % (gen_doc_id)) + if do_migrate: + db.save(doc) + + +def _migrate_config_doc(db, do_migrate): + old_doc = db['u1db_config'] + new_doc = { + '_id': CONFIG_DOC_ID, + REPLICA_UID_KEY: old_doc[REPLICA_UID_KEY], + SCHEMA_VERSION_KEY: SCHEMA_VERSION, + } + logger.info("moving config doc: %s -> %s" + % (old_doc['_id'], new_doc['_id'])) + if do_migrate: + db.save(new_doc) + db.delete(old_doc) + + +def _migrate_sync_docs(db, do_migrate): + view = db.view( + '_all_docs', + startkey='u1db_sync', + endkey='u1db_synd', + include_docs='true') + for row in view.rows: + old_doc = row['doc'] + old_id = old_doc['_id'] + replica_uid = old_id.replace('u1db_sync_', '') + new_id = "%s%s" % (SYNC_DOC_ID_PREFIX, replica_uid) + new_doc = { + '_id': new_id, + GENERATION_KEY: old_doc['generation'], + TRANSACTION_ID_KEY: old_doc['transaction_id'], + REPLICA_UID_KEY: replica_uid, + } + logger.info("moving sync doc: %s -> %s" % (old_id, new_id)) + if do_migrate: + db.save(new_doc) + db.delete(old_doc) + + +def _delete_design_docs(db, do_migrate): + for ddoc in ['docs', 'syncs', 'transactions']: + doc_id = '_design/%s' % ddoc + doc = db.get(doc_id) + logger.info("deleting design doc: %s" % doc_id) + if do_migrate: + db.delete(doc) diff --git a/scripts/migration/0.8.2/setup.py b/scripts/migration/0.8.2/setup.py new file mode 100644 index 00000000..0467e932 --- /dev/null +++ b/scripts/migration/0.8.2/setup.py @@ -0,0 +1,8 @@ +from setuptools import setup +from setuptools import find_packages + + +setup( + name='migrate_couch_schema', + packages=find_packages('.'), +) diff --git a/scripts/migration/0.8.2/tests/conftest.py b/scripts/migration/0.8.2/tests/conftest.py new file mode 100644 index 00000000..92d1e17e --- /dev/null +++ b/scripts/migration/0.8.2/tests/conftest.py @@ -0,0 +1,46 @@ +# conftest.py + +""" +Provide a couch database with content stored in old schema. +""" + +import couchdb +import pytest +import uuid + + +COUCH_URL = 'http://127.0.0.1:5984' + +transaction_map = """ +function(doc) { + if (doc.u1db_transactions) + doc.u1db_transactions.forEach(function(t) { + emit(t[0], // use timestamp as key so the results are ordered + t[1]); // value is the transaction_id + }); +} +""" + +initial_docs = [ + {'_id': 'u1db_config', 'replica_uid': 'an-uid'}, + {'_id': 'u1db_sync_A', 'generation': 0, 'replica_uid': 'A', + 'transaction_id': ''}, + {'_id': 'u1db_sync_B', 'generation': 2, 'replica_uid': 'B', + 'transaction_id': 'X'}, + {'_id': 'doc1', 'u1db_transactions': [(1, 'trans-1'), (3, 'trans-3')]}, + {'_id': 'doc2', 'u1db_transactions': [(2, 'trans-2'), (4, 'trans-4')]}, + {'_id': '_design/docs'}, + {'_id': '_design/syncs'}, + {'_id': '_design/transactions', 'views': {'log': {'map': transaction_map}}} +] + + +@pytest.fixture(scope='function') +def db(request): + server = couchdb.Server(COUCH_URL) + dbname = "user-" + uuid.uuid4().hex + db = server.create(dbname) + for doc in initial_docs: + db.save(doc) + request.addfinalizer(lambda: server.delete(dbname)) + return db diff --git a/scripts/migration/0.8.2/tests/test_migrate.py b/scripts/migration/0.8.2/tests/test_migrate.py new file mode 100644 index 00000000..10c8b906 --- /dev/null +++ b/scripts/migration/0.8.2/tests/test_migrate.py @@ -0,0 +1,67 @@ +# test_migrate.py + +""" +Ensure that the migration script works! +""" + +from migrate_couch_schema import _migrate_user_db + +from leap.soledad.common.couch import GENERATION_KEY +from leap.soledad.common.couch import TRANSACTION_ID_KEY +from leap.soledad.common.couch import REPLICA_UID_KEY +from leap.soledad.common.couch import DOC_ID_KEY +from leap.soledad.common.couch import SCHEMA_VERSION_KEY +from leap.soledad.common.couch import CONFIG_DOC_ID +from leap.soledad.common.couch import SYNC_DOC_ID_PREFIX +from leap.soledad.common.couch import SCHEMA_VERSION + + +def test__migrate_user_db(db): + _migrate_user_db(db, True) + + # we should find exactly 6 documents: 2 normal documents and 4 generation + # documents + view = db.view('_all_docs') + assert len(view.rows) == 6 + + # ensure that the ids of the documents we found on the database are correct + doc_ids = map(lambda doc: doc.id, view.rows) + assert 'doc1' in doc_ids + assert 'doc2' in doc_ids + assert 'gen-0000000001' in doc_ids + assert 'gen-0000000002' in doc_ids + assert 'gen-0000000003' in doc_ids + assert 'gen-0000000004' in doc_ids + + # assert config doc contents + config_doc = db.get(CONFIG_DOC_ID) + assert config_doc[REPLICA_UID_KEY] == 'an-uid' + assert config_doc[SCHEMA_VERSION_KEY] == SCHEMA_VERSION + + # assert sync docs contents + sync_doc_A = db.get('%s%s' % (SYNC_DOC_ID_PREFIX, 'A')) + assert sync_doc_A[GENERATION_KEY] == 0 + assert sync_doc_A[REPLICA_UID_KEY] == 'A' + assert sync_doc_A[TRANSACTION_ID_KEY] == '' + sync_doc_B = db.get('%s%s' % (SYNC_DOC_ID_PREFIX, 'B')) + assert sync_doc_B[GENERATION_KEY] == 2 + assert sync_doc_B[REPLICA_UID_KEY] == 'B' + assert sync_doc_B[TRANSACTION_ID_KEY] == 'X' + + # assert gen docs contents + gen_1 = db.get('gen-0000000001') + assert gen_1[DOC_ID_KEY] == 'doc1' + assert gen_1[GENERATION_KEY] == 1 + assert gen_1[TRANSACTION_ID_KEY] == 'trans-1' + gen_2 = db.get('gen-0000000002') + assert gen_2[DOC_ID_KEY] == 'doc2' + assert gen_2[GENERATION_KEY] == 2 + assert gen_2[TRANSACTION_ID_KEY] == 'trans-2' + gen_3 = db.get('gen-0000000003') + assert gen_3[DOC_ID_KEY] == 'doc1' + assert gen_3[GENERATION_KEY] == 3 + assert gen_3[TRANSACTION_ID_KEY] == 'trans-3' + gen_4 = db.get('gen-0000000004') + assert gen_4[DOC_ID_KEY] == 'doc2' + assert gen_4[GENERATION_KEY] == 4 + assert gen_4[TRANSACTION_ID_KEY] == 'trans-4' diff --git a/scripts/migration/0.8.2/tox.ini b/scripts/migration/0.8.2/tox.ini new file mode 100644 index 00000000..2bb6be4c --- /dev/null +++ b/scripts/migration/0.8.2/tox.ini @@ -0,0 +1,13 @@ +[tox] +envlist = py27 + +[testenv] +commands = py.test {posargs} +changedir = tests +deps = + pytest + couchdb + pdbpp + -e../../../common +setenv = + TERM=xterm -- cgit v1.2.3 From 2ce01514d42e9fcd4bf97a9a06655ceebca5c394 Mon Sep 17 00:00:00 2001 From: drebs Date: Mon, 1 Aug 2016 19:58:49 -0300 Subject: [refactor] remove unused design docs compilation code --- scripts/ddocs/update_design_docs.py | 170 ------------------------------- scripts/packaging/compile_design_docs.py | 112 -------------------- 2 files changed, 282 deletions(-) delete mode 100644 scripts/ddocs/update_design_docs.py delete mode 100644 scripts/packaging/compile_design_docs.py (limited to 'scripts') diff --git a/scripts/ddocs/update_design_docs.py b/scripts/ddocs/update_design_docs.py deleted file mode 100644 index 281482b8..00000000 --- a/scripts/ddocs/update_design_docs.py +++ /dev/null @@ -1,170 +0,0 @@ -#!/usr/bin/python - -# This script updates Soledad's design documents in the session database and -# all user databases with contents from the installed leap.soledad.common -# package. - -import json -import logging -import argparse -import re -import threading -import binascii - -from urlparse import urlparse -from getpass import getpass -from ConfigParser import ConfigParser - -from couchdb.client import Server -from couchdb.http import Resource -from couchdb.http import Session -from couchdb.http import ResourceNotFound - -from leap.soledad.common import ddocs - - -MAX_THREADS = 20 -DESIGN_DOCS = { - '_design/docs': json.loads(binascii.a2b_base64(ddocs.docs)), - '_design/syncs': json.loads(binascii.a2b_base64(ddocs.syncs)), - '_design/transactions': json.loads( - binascii.a2b_base64(ddocs.transactions)), -} - - -# create a logger -logger = logging.getLogger(__name__) -LOG_FORMAT = '%(asctime)s %(message)s' -logging.basicConfig(format=LOG_FORMAT, level=logging.INFO) - - -def _parse_args(): - parser = argparse.ArgumentParser() - parser.add_argument('-u', dest='uuid', default=None, type=str, - help='the UUID of the user') - parser.add_argument('-t', dest='threads', default=MAX_THREADS, type=int, - help='the number of parallel threads') - return parser.parse_args() - - -def _get_url(): - # get couch url - cp = ConfigParser() - cp.read('/etc/soledad/soledad-server.conf') - url = urlparse(cp.get('soledad-server', 'couch_url')) - # get admin password - netloc = re.sub('^.*@', '', url.netloc) - url = url._replace(netloc=netloc) - password = getpass("Admin password for %s: " % url.geturl()) - return url._replace(netloc='admin:%s@%s' % (password, netloc)) - - -def _get_server(url): - resource = Resource( - url.geturl(), Session(retry_delays=[1, 2, 4, 8], timeout=10)) - return Server(url=resource) - - -def _confirm(url): - hidden_url = re.sub( - 'http://(.*):.*@', - 'http://\\1:xxxxx@', - url.geturl()) - - print """ - ========== - ATTENTION! - ========== - - This script will modify Soledad's shared and user databases in: - - %s - - This script does not make a backup of the couch db data, so make sure you - have a copy or you may loose data. - """ % hidden_url - confirm = raw_input("Proceed (type uppercase YES)? ") - - if confirm != "YES": - exit(1) - - -# -# Thread -# - -class DBWorkerThread(threading.Thread): - - def __init__(self, server, dbname, db_idx, db_len, release_fun): - threading.Thread.__init__(self) - self._dbname = dbname - self._cdb = server[self._dbname] - self._db_idx = db_idx - self._db_len = db_len - self._release_fun = release_fun - - def run(self): - - logger.info( - "(%d/%d) Updating db %s." - % (self._db_idx, self._db_len, self._dbname)) - - for doc_id in DESIGN_DOCS: - try: - doc = self._cdb[doc_id] - except ResourceNotFound: - doc = {'_id': doc_id} - for key in ['lists', 'views', 'updates']: - if key in DESIGN_DOCS[doc_id]: - doc[key] = DESIGN_DOCS[doc_id][key] - self._cdb.save(doc) - - # release the semaphore - self._release_fun() - - -def _launch_update_design_docs_thread( - server, dbname, db_idx, db_len, semaphore_pool): - semaphore_pool.acquire() # wait for an available working slot - thread = DBWorkerThread( - server, dbname, db_idx, db_len, semaphore_pool.release) - thread.daemon = True - thread.start() - return thread - - -def _update_design_docs(args, server): - - # find the actual databases to be updated - dbs = [] - if args.uuid: - dbs.append('user-%s' % args.uuid) - else: - for dbname in server: - if dbname.startswith('user-') or dbname == 'shared': - dbs.append(dbname) - else: - logger.info("Skipping db %s." % dbname) - - db_idx = 0 - db_len = len(dbs) - semaphore_pool = threading.BoundedSemaphore(value=args.threads) - threads = [] - - # launch the update - for db in dbs: - db_idx += 1 - threads.append( - _launch_update_design_docs_thread( - server, db, db_idx, db_len, semaphore_pool)) - - # wait for all threads to finish - map(lambda thread: thread.join(), threads) - - -if __name__ == "__main__": - args = _parse_args() - url = _get_url() - _confirm(url) - server = _get_server(url) - _update_design_docs(args, server) diff --git a/scripts/packaging/compile_design_docs.py b/scripts/packaging/compile_design_docs.py deleted file mode 100644 index b2b5729a..00000000 --- a/scripts/packaging/compile_design_docs.py +++ /dev/null @@ -1,112 +0,0 @@ -#!/usr/bin/python - - -# This script builds files for the design documents represented in the -# ../common/src/soledad/common/ddocs directory structure (relative to the -# current location of the script) into a target directory. - - -import argparse -from os import listdir -from os.path import realpath, dirname, isdir, join, isfile, basename -import json - -DDOCS_REL_PATH = ('..', 'common', 'src', 'leap', 'soledad', 'common', 'ddocs') - - -def build_ddocs(): - """ - Build design documents. - - For ease of development, couch backend design documents are stored as - `.js` files in subdirectories of - `../common/src/leap/soledad/common/ddocs`. This function scans that - directory for javascript files, and builds the design documents structure. - - This funciton uses the following conventions to generate design documents: - - - Design documents are represented by directories in the form - `/`, there prefix is the `src/leap/soledad/common/ddocs` - directory. - - Design document directories might contain `views`, `lists` and - `updates` subdirectories. - - Views subdirectories must contain a `map.js` file and may contain a - `reduce.js` file. - - List and updates subdirectories may contain any number of javascript - files (i.e. ending in `.js`) whose names will be mapped to the - corresponding list or update function name. - """ - ddocs = {} - - # design docs are represented by subdirectories of `DDOCS_REL_PATH` - cur_pwd = dirname(realpath(__file__)) - ddocs_path = join(cur_pwd, *DDOCS_REL_PATH) - for ddoc in [f for f in listdir(ddocs_path) - if isdir(join(ddocs_path, f))]: - - ddocs[ddoc] = {'_id': '_design/%s' % ddoc} - - for t in ['views', 'lists', 'updates']: - tdir = join(ddocs_path, ddoc, t) - if isdir(tdir): - - ddocs[ddoc][t] = {} - - if t == 'views': # handle views (with map/reduce functions) - for view in [f for f in listdir(tdir) - if isdir(join(tdir, f))]: - # look for map.js and reduce.js - mapfile = join(tdir, view, 'map.js') - reducefile = join(tdir, view, 'reduce.js') - mapfun = None - reducefun = None - try: - with open(mapfile) as f: - mapfun = f.read() - except IOError: - pass - try: - with open(reducefile) as f: - reducefun = f.read() - except IOError: - pass - ddocs[ddoc]['views'][view] = {} - - if mapfun is not None: - ddocs[ddoc]['views'][view]['map'] = mapfun - if reducefun is not None: - ddocs[ddoc]['views'][view]['reduce'] = reducefun - - else: # handle lists, updates, etc - for fun in [f for f in listdir(tdir) - if isfile(join(tdir, f))]: - funfile = join(tdir, fun) - funname = basename(funfile).replace('.js', '') - try: - with open(funfile) as f: - ddocs[ddoc][t][funname] = f.read() - except IOError: - pass - return ddocs - - -if __name__ == '__main__': - parser = argparse.ArgumentParser() - parser.add_argument( - 'target', type=str, - help='the target dir where to store design documents') - args = parser.parse_args() - - # check if given target is a directory - if not isdir(args.target): - print 'Error: %s is not a directory.' % args.target - exit(1) - - # write desifgn docs files - ddocs = build_ddocs() - for ddoc in ddocs: - ddoc_filename = "%s.json" % ddoc - with open(join(args.target, ddoc_filename), 'w') as f: - f.write("%s" % json.dumps(ddocs[ddoc], indent=3)) - print "Wrote _design/%s content in %s" \ - % (ddoc, join(args.target, ddoc_filename,)) -- cgit v1.2.3 From 9084597674130682d84cd1884c8dbd24b866096e Mon Sep 17 00:00:00 2001 From: drebs Date: Wed, 3 Aug 2016 21:30:08 -0300 Subject: [pkg] support netrc couch access in migrate script --- scripts/migration/0.8.2/migrate.py | 19 ++++++++++++++++++- 1 file changed, 18 insertions(+), 1 deletion(-) (limited to 'scripts') diff --git a/scripts/migration/0.8.2/migrate.py b/scripts/migration/0.8.2/migrate.py index 159905ef..adc0f7d9 100755 --- a/scripts/migration/0.8.2/migrate.py +++ b/scripts/migration/0.8.2/migrate.py @@ -22,15 +22,20 @@ See the README.md file for more information. import datetime import logging +import netrc import os from argparse import ArgumentParser +from leap.soledad.server import load_configuration + from migrate_couch_schema import migrate TARGET_VERSION = '0.8.2' DEFAULT_COUCH_URL = 'http://127.0.0.1:5984' +CONF = load_configuration('/etc/soledad/soledad-server.conf') +NETRC_PATH = CONF['soledad-server']['admin_netrc'] # @@ -54,12 +59,24 @@ def _configure_logger(log_file): level=logging.DEBUG) +def _default_couch_url(): + if not os.path.exists(NETRC_PATH): + return DEFAULT_COUCH_URL + parsed_netrc = netrc.netrc(NETRC_PATH) + host, (login, _, password) = parsed_netrc.hosts.items()[0] + url = ('http://%(login)s:%(password)s@%(host)s:5984' % { + 'login': login, + 'password': password, + 'host': host}) + return url + + def _parse_args(): parser = ArgumentParser() parser.add_argument( '--couch_url', help='the url for the couch database', - default=DEFAULT_COUCH_URL) + default=_default_couch_url()) parser.add_argument( '--do-migrate', help='actually perform the migration (otherwise ' -- cgit v1.2.3 From 6fd0062c4c2199e610d7832bbfbd57a07abab9e1 Mon Sep 17 00:00:00 2001 From: drebs Date: Tue, 9 Aug 2016 15:38:39 -0300 Subject: [test] use "leapcode" in docker image name "leapcode" is the LEAP docker hub organisation varac could squat (https://hub.docker.com/r/leap/ was already taken). --- scripts/docker/Makefile | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) (limited to 'scripts') diff --git a/scripts/docker/Makefile b/scripts/docker/Makefile index 1bb57757..0fdc93fa 100644 --- a/scripts/docker/Makefile +++ b/scripts/docker/Makefile @@ -16,7 +16,7 @@ # Some configurations you might override when calling this makefile # ##################################################################### -IMAGE_NAME ?= leap/soledad:1.0 +IMAGE_NAME ?= leapcode/soledad:1.0 SOLEDAD_REMOTE ?= https://0xacab.org/leap/soledad.git SOLEDAD_BRANCH ?= develop SOLEDAD_PRELOAD_NUM ?= 100 -- cgit v1.2.3 From dc0bae8b6025a060297b55520674cd7238f0186b Mon Sep 17 00:00:00 2001 From: Victor Shyba Date: Wed, 17 Aug 2016 23:00:34 -0300 Subject: [bug] remove misleading ensure_ddoc ensure_ddoc doesnt make sense anymore as we dont have any ddoc other than _security, which has its own method for setting. 'ensure_security' is explicit and is set internally when user is creating a database, otherwise it will be False as it's only used during creation. This isn't exposed externally (of couch module) to avoid confusion. This confusion was making create-user-db fail to create a security ddoc as it wasn't passing ensure_ddocs=True. -- Resolves: #8388 --- scripts/profiling/mail/couchdb_server.py | 5 ++--- 1 file changed, 2 insertions(+), 3 deletions(-) (limited to 'scripts') diff --git a/scripts/profiling/mail/couchdb_server.py b/scripts/profiling/mail/couchdb_server.py index 2cf0a3fd..452f8ec2 100644 --- a/scripts/profiling/mail/couchdb_server.py +++ b/scripts/profiling/mail/couchdb_server.py @@ -18,8 +18,7 @@ def start_couchdb_wrapper(): def get_u1db_database(dbname, port): return CouchDatabase.open_database( 'http://127.0.0.1:%d/%s' % (port, dbname), - True, - ensure_ddocs=True) + True) def create_tokens_database(port, uuid, token_value): @@ -38,5 +37,5 @@ def get_couchdb_wrapper_and_u1db(uuid, token_value): couchdb_u1db = get_u1db_database('user-%s' % uuid, couchdb_wrapper.port) get_u1db_database('shared', couchdb_wrapper.port) create_tokens_database(couchdb_wrapper.port, uuid, token_value) - + return couchdb_wrapper, couchdb_u1db -- cgit v1.2.3 From 46bb2b65e6fe642b07dee1de6c628c6f2cd303fd Mon Sep 17 00:00:00 2001 From: drebs Date: Fri, 19 Aug 2016 11:50:33 -0300 Subject: [pkg] add --pdb option to migration script --- scripts/migration/0.8.2/migrate.py | 12 ++++++++++++ 1 file changed, 12 insertions(+) (limited to 'scripts') diff --git a/scripts/migration/0.8.2/migrate.py b/scripts/migration/0.8.2/migrate.py index adc0f7d9..fe612221 100755 --- a/scripts/migration/0.8.2/migrate.py +++ b/scripts/migration/0.8.2/migrate.py @@ -85,10 +85,22 @@ def _parse_args(): parser.add_argument( '--log-file', help='the log file to use') + parser.add_argument( + '--pdb', action='store_true', + help='escape to pdb shell in case of exception') return parser.parse_args() +def _enable_pdb(): + import sys + from IPython.core import ultratb + sys.excepthook = ultratb.FormattedTB( + mode='Verbose', color_scheme='Linux', call_pdb=1) + + if __name__ == '__main__': args = _parse_args() + if args.pdb: + _enable_pdb() _configure_logger(args.log_file) migrate(args, TARGET_VERSION) -- cgit v1.2.3 From 4ed6bb54f1cc96ee0a8b98914c98b94edc1d1b1c Mon Sep 17 00:00:00 2001 From: drebs Date: Fri, 19 Aug 2016 12:14:40 -0300 Subject: [pkg] add leftovers deletion to couch scehma migration script Previous versions of the couchdb schema used documents "u1db_sync_log" and "u1db_sync_state" to store sync metadata. At some point this was changed, but the documents might have stayed as leftovers. This commit adds the deletion of such documents to the migration script. --- scripts/migration/0.8.2/migrate_couch_schema/__init__.py | 15 +++++++++++++++ scripts/migration/0.8.2/tests/conftest.py | 6 +++++- 2 files changed, 20 insertions(+), 1 deletion(-) (limited to 'scripts') diff --git a/scripts/migration/0.8.2/migrate_couch_schema/__init__.py b/scripts/migration/0.8.2/migrate_couch_schema/__init__.py index 37e5a525..60214aae 100644 --- a/scripts/migration/0.8.2/migrate_couch_schema/__init__.py +++ b/scripts/migration/0.8.2/migrate_couch_schema/__init__.py @@ -119,6 +119,21 @@ def _migrate_sync_docs(db, do_migrate): for row in view.rows: old_doc = row['doc'] old_id = old_doc['_id'] + + # older schemas used different documents with ids starting with + # "u1db_sync" to store sync-related data: + # + # - u1db_sync_log: was used to store the whole sync log. + # - u1db_sync_state: was used to store the sync state. + # + # if any of these documents exist in the current db, they are leftover + # from previous migrations, and should just be removed. + if old_id in ['u1db_sync_log', 'u1db_sync_state']: + logger.info('removing leftover "u1db_sync_log" document...') + if do_migrate: + db.delete(old_doc) + continue + replica_uid = old_id.replace('u1db_sync_', '') new_id = "%s%s" % (SYNC_DOC_ID_PREFIX, replica_uid) new_doc = { diff --git a/scripts/migration/0.8.2/tests/conftest.py b/scripts/migration/0.8.2/tests/conftest.py index 92d1e17e..8e49891c 100644 --- a/scripts/migration/0.8.2/tests/conftest.py +++ b/scripts/migration/0.8.2/tests/conftest.py @@ -31,7 +31,11 @@ initial_docs = [ {'_id': 'doc2', 'u1db_transactions': [(2, 'trans-2'), (4, 'trans-4')]}, {'_id': '_design/docs'}, {'_id': '_design/syncs'}, - {'_id': '_design/transactions', 'views': {'log': {'map': transaction_map}}} + {'_id': '_design/transactions', + 'views': {'log': {'map': transaction_map}}}, + # the following should be removed if found in the dbs + {'_id': 'u1db_sync_log'}, + {'_id': 'u1db_sync_state'}, ] -- cgit v1.2.3 From 099f2b7453ee6486ccc23c0766f613709aacbde0 Mon Sep 17 00:00:00 2001 From: drebs Date: Fri, 19 Aug 2016 12:17:52 -0300 Subject: [pkg] move config doc as last action of couch schema migration script If the moving of the config document is the last action of the couch schema migration script, then we can test for successful migration of a certain db by checking if the config document was already moved. This commit just changes the order of migration actions to enforce this situation. --- scripts/migration/0.8.2/migrate_couch_schema/__init__.py | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) (limited to 'scripts') diff --git a/scripts/migration/0.8.2/migrate_couch_schema/__init__.py b/scripts/migration/0.8.2/migrate_couch_schema/__init__.py index 60214aae..d0dd41e3 100644 --- a/scripts/migration/0.8.2/migrate_couch_schema/__init__.py +++ b/scripts/migration/0.8.2/migrate_couch_schema/__init__.py @@ -76,9 +76,9 @@ def migrate(args, target_version): def _migrate_user_db(db, do_migrate): _migrate_transaction_log(db, do_migrate) - _migrate_config_doc(db, do_migrate) _migrate_sync_docs(db, do_migrate) _delete_design_docs(db, do_migrate) + _migrate_config_doc(db, do_migrate) def _migrate_transaction_log(db, do_migrate): -- cgit v1.2.3 From 5f7395ebe9a8419de51c43ad189ca30af4b371f0 Mon Sep 17 00:00:00 2001 From: drebs Date: Fri, 19 Aug 2016 13:36:43 -0300 Subject: [pkg] fail gracefully for missing design doc on couch schema migration script --- .../migration/0.8.2/migrate_couch_schema/__init__.py | 18 ++++++++++++++---- 1 file changed, 14 insertions(+), 4 deletions(-) (limited to 'scripts') diff --git a/scripts/migration/0.8.2/migrate_couch_schema/__init__.py b/scripts/migration/0.8.2/migrate_couch_schema/__init__.py index d0dd41e3..456eadf0 100644 --- a/scripts/migration/0.8.2/migrate_couch_schema/__init__.py +++ b/scripts/migration/0.8.2/migrate_couch_schema/__init__.py @@ -6,6 +6,7 @@ Support functions for migration script. import logging from couchdb import Server +from couchdb import ResourceNotFound from leap.soledad.common.couch import GENERATION_KEY from leap.soledad.common.couch import TRANSACTION_ID_KEY @@ -38,7 +39,13 @@ def _is_migrateable(db): def _get_transaction_log(db): ddoc_path = ['_design', 'transactions', '_view', 'log'] resource = db.resource(*ddoc_path) - _, _, data = resource.get_json() + try: + _, _, data = resource.get_json() + except ResourceNotFound: + logger.warning( + 'Missing transactions design document, ' + 'can\'t get transaction log.') + return [] rows = data['rows'] transaction_log = [] gen = 1 @@ -152,6 +159,9 @@ def _delete_design_docs(db, do_migrate): for ddoc in ['docs', 'syncs', 'transactions']: doc_id = '_design/%s' % ddoc doc = db.get(doc_id) - logger.info("deleting design doc: %s" % doc_id) - if do_migrate: - db.delete(doc) + if doc: + logger.info("deleting design doc: %s" % doc_id) + if do_migrate: + db.delete(doc) + else: + logger.warning("design doc not found: %s" % doc_id) -- cgit v1.2.3 From 9ae70f3cd0eaad378c73416a0cc18f62199082b0 Mon Sep 17 00:00:00 2001 From: drebs Date: Fri, 19 Aug 2016 13:38:40 -0300 Subject: [pkg] improve log message for skipped dbs on couch schema migration script --- scripts/migration/0.8.2/migrate_couch_schema/__init__.py | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) (limited to 'scripts') diff --git a/scripts/migration/0.8.2/migrate_couch_schema/__init__.py b/scripts/migration/0.8.2/migrate_couch_schema/__init__.py index 456eadf0..66ae960b 100644 --- a/scripts/migration/0.8.2/migrate_couch_schema/__init__.py +++ b/scripts/migration/0.8.2/migrate_couch_schema/__init__.py @@ -73,7 +73,7 @@ def migrate(args, target_version): for dbname in user_dbs: db = server[dbname] if not _is_migrateable(db): - logger.warning("skipping user db: %s" % dbname) + logger.warning("skipping not migrateable user db: %s" % dbname) continue logger.info("starting migration of user db: %s" % dbname) _migrate_user_db(db, args.do_migrate) -- cgit v1.2.3 From 2025916a1c4d4518e714086e2144be0e83c95d9e Mon Sep 17 00:00:00 2001 From: drebs Date: Fri, 19 Aug 2016 14:31:52 -0300 Subject: [pkg] ignore existing correct gen docs in couch schema migrate script --- scripts/migration/0.8.2/migrate_couch_schema/__init__.py | 11 ++++++++++- 1 file changed, 10 insertions(+), 1 deletion(-) (limited to 'scripts') diff --git a/scripts/migration/0.8.2/migrate_couch_schema/__init__.py b/scripts/migration/0.8.2/migrate_couch_schema/__init__.py index 66ae960b..c3eb9c3d 100644 --- a/scripts/migration/0.8.2/migrate_couch_schema/__init__.py +++ b/scripts/migration/0.8.2/migrate_couch_schema/__init__.py @@ -7,6 +7,7 @@ import logging from couchdb import Server from couchdb import ResourceNotFound +from couchdb import ResourceConflict from leap.soledad.common.couch import GENERATION_KEY from leap.soledad.common.couch import TRANSACTION_ID_KEY @@ -100,7 +101,15 @@ def _migrate_transaction_log(db, do_migrate): } logger.info('creating gen doc: %s' % (gen_doc_id)) if do_migrate: - db.save(doc) + try: + db.save(doc) + except ResourceConflict: + # this gen document already exists. if documents are the same, + # continue with migration. + existing_doc = db.get(gen_doc_id) + for key in [GENERATION_KEY, DOC_ID_KEY, TRANSACTION_ID_KEY]: + if existing_doc[key] != doc[key]: + raise def _migrate_config_doc(db, do_migrate): -- cgit v1.2.3 From 3f74c450c37046cdd04c515e0797084a01426a80 Mon Sep 17 00:00:00 2001 From: drebs Date: Fri, 19 Aug 2016 14:33:50 -0300 Subject: [pkg] log any errors in couch schema migration script --- scripts/migration/0.8.2/migrate.py | 7 ++++++- 1 file changed, 6 insertions(+), 1 deletion(-) (limited to 'scripts') diff --git a/scripts/migration/0.8.2/migrate.py b/scripts/migration/0.8.2/migrate.py index fe612221..c9c8a9a0 100755 --- a/scripts/migration/0.8.2/migrate.py +++ b/scripts/migration/0.8.2/migrate.py @@ -103,4 +103,9 @@ if __name__ == '__main__': if args.pdb: _enable_pdb() _configure_logger(args.log_file) - migrate(args, TARGET_VERSION) + logger = logging.getLogger(__name__) + try: + migrate(args, TARGET_VERSION) + except: + logger.exception('Fatal error on migrate script!') + raise -- cgit v1.2.3 From a601f8ddd7b8cd3a9cecbdb7fb16788becadb667 Mon Sep 17 00:00:00 2001 From: drebs Date: Fri, 19 Aug 2016 14:51:58 -0300 Subject: [pkg] log errors and continue with next db in couch schema migration script --- scripts/migration/0.8.2/migrate_couch_schema/__init__.py | 8 ++++++-- 1 file changed, 6 insertions(+), 2 deletions(-) (limited to 'scripts') diff --git a/scripts/migration/0.8.2/migrate_couch_schema/__init__.py b/scripts/migration/0.8.2/migrate_couch_schema/__init__.py index c3eb9c3d..c9ec4910 100644 --- a/scripts/migration/0.8.2/migrate_couch_schema/__init__.py +++ b/scripts/migration/0.8.2/migrate_couch_schema/__init__.py @@ -77,8 +77,12 @@ def migrate(args, target_version): logger.warning("skipping not migrateable user db: %s" % dbname) continue logger.info("starting migration of user db: %s" % dbname) - _migrate_user_db(db, args.do_migrate) - logger.info("finished migration of user db: %s" % dbname) + try: + _migrate_user_db(db, args.do_migrate) + logger.info("finished migration of user db: %s" % dbname) + except: + logger.exception('Error migrating user db: %s' % dbname) + logger.error('Continuing with next database.') logger.info('finished couch schema migration to %s' % target_version) -- cgit v1.2.3 From 9b178bfc632ea9dbd584029af05bb688f801b0e3 Mon Sep 17 00:00:00 2001 From: drebs Date: Sun, 21 Aug 2016 10:53:51 -0300 Subject: [pkg] improve logging of couch schema migration script --- scripts/migration/0.8.2/migrate.py | 12 ++++++-- .../0.8.2/migrate_couch_schema/__init__.py | 33 ++++++++++++---------- 2 files changed, 27 insertions(+), 18 deletions(-) (limited to 'scripts') diff --git a/scripts/migration/0.8.2/migrate.py b/scripts/migration/0.8.2/migrate.py index c9c8a9a0..6ad5bc2d 100755 --- a/scripts/migration/0.8.2/migrate.py +++ b/scripts/migration/0.8.2/migrate.py @@ -42,7 +42,7 @@ NETRC_PATH = CONF['soledad-server']['admin_netrc'] # command line args and execution # -def _configure_logger(log_file): +def _configure_logger(log_file, level=logging.INFO): if not log_file: fname, _ = os.path.basename(__file__).split('.') timestr = datetime.datetime.now().strftime('%Y-%m-%d_%H:%M:%S') @@ -56,7 +56,7 @@ def _configure_logger(log_file): filemode='a', format='%(asctime)s,%(msecs)d %(levelname)s %(message)s', datefmt='%H:%M:%S', - level=logging.DEBUG) + level=level) def _default_couch_url(): @@ -88,6 +88,10 @@ def _parse_args(): parser.add_argument( '--pdb', action='store_true', help='escape to pdb shell in case of exception') + parser.add_argument( + '--verbose', action='store_true', + help='output detailed information about the migration ' + '(i.e. include debug messages)') return parser.parse_args() @@ -102,7 +106,9 @@ if __name__ == '__main__': args = _parse_args() if args.pdb: _enable_pdb() - _configure_logger(args.log_file) + _configure_logger( + args.log_file, + level=logging.DEBUG if args.verbose else logging.INFO) logger = logging.getLogger(__name__) try: migrate(args, TARGET_VERSION) diff --git a/scripts/migration/0.8.2/migrate_couch_schema/__init__.py b/scripts/migration/0.8.2/migrate_couch_schema/__init__.py index c9ec4910..edf671ae 100644 --- a/scripts/migration/0.8.2/migrate_couch_schema/__init__.py +++ b/scripts/migration/0.8.2/migrate_couch_schema/__init__.py @@ -44,8 +44,8 @@ def _get_transaction_log(db): _, _, data = resource.get_json() except ResourceNotFound: logger.warning( - 'Missing transactions design document, ' - 'can\'t get transaction log.') + '[%s] missing transactions design document, ' + 'can\'t get transaction log.' % db.name) return [] rows = data['rows'] transaction_log = [] @@ -67,22 +67,22 @@ def _get_user_dbs(server): def migrate(args, target_version): server = _get_couch_server(args.couch_url) - logger.info('starting couch schema migration to %s...' % target_version) + logger.info('starting couch schema migration to %s' % target_version) if not args.do_migrate: logger.warning('dry-run: no changes will be made to databases') user_dbs = _get_user_dbs(server) for dbname in user_dbs: db = server[dbname] if not _is_migrateable(db): - logger.warning("skipping not migrateable user db: %s" % dbname) + logger.warning("[%s] skipping not migrateable user db" % dbname) continue - logger.info("starting migration of user db: %s" % dbname) + logger.info("[%s] starting migration of user db" % dbname) try: _migrate_user_db(db, args.do_migrate) - logger.info("finished migration of user db: %s" % dbname) + logger.info("[%s] finished migration of user db" % dbname) except: - logger.exception('Error migrating user db: %s' % dbname) - logger.error('Continuing with next database.') + logger.exception('[%s] error migrating user db' % dbname) + logger.error('continuing with next database.') logger.info('finished couch schema migration to %s' % target_version) @@ -103,7 +103,7 @@ def _migrate_transaction_log(db, do_migrate): DOC_ID_KEY: doc_id, TRANSACTION_ID_KEY: trans_id, } - logger.info('creating gen doc: %s' % (gen_doc_id)) + logger.debug('[%s] creating gen doc: %s' % (db.name, gen_doc_id)) if do_migrate: try: db.save(doc) @@ -123,14 +123,15 @@ def _migrate_config_doc(db, do_migrate): REPLICA_UID_KEY: old_doc[REPLICA_UID_KEY], SCHEMA_VERSION_KEY: SCHEMA_VERSION, } - logger.info("moving config doc: %s -> %s" - % (old_doc['_id'], new_doc['_id'])) + logger.info("[%s] moving config doc: %s -> %s" + % (db.name, old_doc['_id'], new_doc['_id'])) if do_migrate: db.save(new_doc) db.delete(old_doc) def _migrate_sync_docs(db, do_migrate): + logger.info('[%s] moving sync docs' % db.name) view = db.view( '_all_docs', startkey='u1db_sync', @@ -149,7 +150,8 @@ def _migrate_sync_docs(db, do_migrate): # if any of these documents exist in the current db, they are leftover # from previous migrations, and should just be removed. if old_id in ['u1db_sync_log', 'u1db_sync_state']: - logger.info('removing leftover "u1db_sync_log" document...') + logger.info('[%s] removing leftover document: %s' + % (db.name, old_id)) if do_migrate: db.delete(old_doc) continue @@ -162,7 +164,8 @@ def _migrate_sync_docs(db, do_migrate): TRANSACTION_ID_KEY: old_doc['transaction_id'], REPLICA_UID_KEY: replica_uid, } - logger.info("moving sync doc: %s -> %s" % (old_id, new_id)) + logger.debug("[%s] moving sync doc: %s -> %s" + % (db.name, old_id, new_id)) if do_migrate: db.save(new_doc) db.delete(old_doc) @@ -173,8 +176,8 @@ def _delete_design_docs(db, do_migrate): doc_id = '_design/%s' % ddoc doc = db.get(doc_id) if doc: - logger.info("deleting design doc: %s" % doc_id) + logger.info("[%s] deleting design doc: %s" % (db.name, doc_id)) if do_migrate: db.delete(doc) else: - logger.warning("design doc not found: %s" % doc_id) + logger.warning("[%s] design doc not found: %s" % (db.name, doc_id)) -- cgit v1.2.3 From 8e87fecc4f9262ee290c0a148cdbfb214cc0417d Mon Sep 17 00:00:00 2001 From: drebs Date: Mon, 22 Aug 2016 20:56:49 -0300 Subject: [test] avoid failing on interrupted couch schema migrations --- .../migration/0.8.2/migrate_couch_schema/__init__.py | 17 +++++++++++++---- scripts/migration/0.8.2/tests/conftest.py | 4 ++++ 2 files changed, 17 insertions(+), 4 deletions(-) (limited to 'scripts') diff --git a/scripts/migration/0.8.2/migrate_couch_schema/__init__.py b/scripts/migration/0.8.2/migrate_couch_schema/__init__.py index edf671ae..f0b456e4 100644 --- a/scripts/migration/0.8.2/migrate_couch_schema/__init__.py +++ b/scripts/migration/0.8.2/migrate_couch_schema/__init__.py @@ -32,9 +32,7 @@ def _get_couch_server(couch_url): def _is_migrateable(db): config_doc = db.get('u1db_config') - if config_doc is None: - return False - return True + return bool(config_doc) def _get_transaction_log(db): @@ -126,6 +124,8 @@ def _migrate_config_doc(db, do_migrate): logger.info("[%s] moving config doc: %s -> %s" % (db.name, old_doc['_id'], new_doc['_id'])) if do_migrate: + # the config doc must not exist, otherwise we would have skipped this + # database. db.save(new_doc) db.delete(old_doc) @@ -167,7 +167,16 @@ def _migrate_sync_docs(db, do_migrate): logger.debug("[%s] moving sync doc: %s -> %s" % (db.name, old_id, new_id)) if do_migrate: - db.save(new_doc) + try: + db.save(new_doc) + except ResourceConflict: + # this sync document already exists. if documents are the same, + # continue with migration. + existing_doc = db.get(new_id) + for key in [GENERATION_KEY, TRANSACTION_ID_KEY, + REPLICA_UID_KEY]: + if existing_doc[key] != new_doc[key]: + raise db.delete(old_doc) diff --git a/scripts/migration/0.8.2/tests/conftest.py b/scripts/migration/0.8.2/tests/conftest.py index 8e49891c..61f6c7ee 100644 --- a/scripts/migration/0.8.2/tests/conftest.py +++ b/scripts/migration/0.8.2/tests/conftest.py @@ -33,6 +33,10 @@ initial_docs = [ {'_id': '_design/syncs'}, {'_id': '_design/transactions', 'views': {'log': {'map': transaction_map}}}, + # add some data from previous interrupted migration + {'_id': '_local/sync_A', 'gen': 0, 'trans_id': '', 'replica_uid': 'A'}, + {'_id': 'gen-0000000002', + 'gen': 2, 'trans_id': 'trans-2', 'doc_id': 'doc2'}, # the following should be removed if found in the dbs {'_id': 'u1db_sync_log'}, {'_id': 'u1db_sync_state'}, -- cgit v1.2.3 From c326dae7b824366208220da94ca730788bb50a18 Mon Sep 17 00:00:00 2001 From: Victor Shyba Date: Wed, 5 Oct 2016 01:15:50 -0300 Subject: [bug] adds libsqlcipher to docker Current docker image is broken due missing libsqlcipher. This commit adds it and jessie-backports due package needs. Resolves: #8508 --- scripts/docker/Dockerfile | 4 +++- 1 file changed, 3 insertions(+), 1 deletion(-) (limited to 'scripts') diff --git a/scripts/docker/Dockerfile b/scripts/docker/Dockerfile index 8c6bfdb3..26d5f782 100644 --- a/scripts/docker/Dockerfile +++ b/scripts/docker/Dockerfile @@ -1,5 +1,5 @@ # start with a fresh debian image -FROM debian +FROM debian:jessie-backports RUN apt-get update @@ -10,6 +10,8 @@ RUN apt-get -y install libpython2.7-dev # needed to build python cryptography module RUN apt-get -y install libssl-dev RUN apt-get -y install libffi-dev +# needed to build pysqlcipher +RUN apt-get -y install libsqlcipher-dev # install pip and tox RUN apt-get -y install python-pip -- cgit v1.2.3 From 308384eba7da58fbfdc17ead35a98216880206b5 Mon Sep 17 00:00:00 2001 From: Victor Shyba Date: Wed, 5 Oct 2016 01:19:51 -0300 Subject: [feature] use latest image Instead of hardcoding a version. This should give us the flexibility of changing images without changing code. --- scripts/docker/Makefile | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) (limited to 'scripts') diff --git a/scripts/docker/Makefile b/scripts/docker/Makefile index 0fdc93fa..7050526a 100644 --- a/scripts/docker/Makefile +++ b/scripts/docker/Makefile @@ -16,7 +16,7 @@ # Some configurations you might override when calling this makefile # ##################################################################### -IMAGE_NAME ?= leapcode/soledad:1.0 +IMAGE_NAME ?= leapcode/soledad:latest SOLEDAD_REMOTE ?= https://0xacab.org/leap/soledad.git SOLEDAD_BRANCH ?= develop SOLEDAD_PRELOAD_NUM ?= 100 -- cgit v1.2.3 From e121a92161d3a18cebc8796d43b98c05b6916088 Mon Sep 17 00:00:00 2001 From: Victor Shyba Date: Thu, 6 Oct 2016 18:56:25 -0300 Subject: [feature] adds libsqlite3-dev on docker image This is necessary for keymanager and this image is shared, thus adding here with a comment explaining why. Also explained why using jessie-backports. --- scripts/docker/Dockerfile | 3 +++ 1 file changed, 3 insertions(+) (limited to 'scripts') diff --git a/scripts/docker/Dockerfile b/scripts/docker/Dockerfile index 26d5f782..21764d84 100644 --- a/scripts/docker/Dockerfile +++ b/scripts/docker/Dockerfile @@ -1,4 +1,5 @@ # start with a fresh debian image +# we use backports because of libsqlcipher-dev FROM debian:jessie-backports RUN apt-get update @@ -12,6 +13,8 @@ RUN apt-get -y install libssl-dev RUN apt-get -y install libffi-dev # needed to build pysqlcipher RUN apt-get -y install libsqlcipher-dev +# needed to support keymanager +RUN apt-get -y install libsqlite3-dev # install pip and tox RUN apt-get -y install python-pip -- cgit v1.2.3 From 18b4cb0aa61a4f935362cf268afc543280461dda Mon Sep 17 00:00:00 2001 From: drebs Date: Fri, 30 Sep 2016 09:20:21 -0300 Subject: [pkg] use correct folder name for migrate script --- scripts/migration/0.8.2/README.md | 73 -------- scripts/migration/0.8.2/log/.empty | 0 scripts/migration/0.8.2/migrate.py | 117 ------------- .../0.8.2/migrate_couch_schema/__init__.py | 192 --------------------- scripts/migration/0.8.2/setup.py | 8 - scripts/migration/0.8.2/tests/conftest.py | 54 ------ scripts/migration/0.8.2/tests/test_migrate.py | 67 ------- scripts/migration/0.8.2/tox.ini | 13 -- scripts/migration/0.9.0/.gitignore | 1 + scripts/migration/0.9.0/README.md | 73 ++++++++ scripts/migration/0.9.0/log/.empty | 0 scripts/migration/0.9.0/migrate.py | 117 +++++++++++++ .../0.9.0/migrate_couch_schema/__init__.py | 192 +++++++++++++++++++++ scripts/migration/0.9.0/requirements.pip | 3 + scripts/migration/0.9.0/setup.py | 8 + scripts/migration/0.9.0/tests/conftest.py | 54 ++++++ scripts/migration/0.9.0/tests/test_migrate.py | 67 +++++++ scripts/migration/0.9.0/tox.ini | 13 ++ 18 files changed, 528 insertions(+), 524 deletions(-) delete mode 100644 scripts/migration/0.8.2/README.md delete mode 100644 scripts/migration/0.8.2/log/.empty delete mode 100755 scripts/migration/0.8.2/migrate.py delete mode 100644 scripts/migration/0.8.2/migrate_couch_schema/__init__.py delete mode 100644 scripts/migration/0.8.2/setup.py delete mode 100644 scripts/migration/0.8.2/tests/conftest.py delete mode 100644 scripts/migration/0.8.2/tests/test_migrate.py delete mode 100644 scripts/migration/0.8.2/tox.ini create mode 100644 scripts/migration/0.9.0/.gitignore create mode 100644 scripts/migration/0.9.0/README.md create mode 100644 scripts/migration/0.9.0/log/.empty create mode 100755 scripts/migration/0.9.0/migrate.py create mode 100644 scripts/migration/0.9.0/migrate_couch_schema/__init__.py create mode 100644 scripts/migration/0.9.0/requirements.pip create mode 100644 scripts/migration/0.9.0/setup.py create mode 100644 scripts/migration/0.9.0/tests/conftest.py create mode 100644 scripts/migration/0.9.0/tests/test_migrate.py create mode 100644 scripts/migration/0.9.0/tox.ini (limited to 'scripts') diff --git a/scripts/migration/0.8.2/README.md b/scripts/migration/0.8.2/README.md deleted file mode 100644 index 919a5235..00000000 --- a/scripts/migration/0.8.2/README.md +++ /dev/null @@ -1,73 +0,0 @@ -CouchDB schema migration to Soledad 0.8.2 -========================================= - -Migrate couch database schema from <= 0.8.1 version to 0.8.2 version. - - -ATTENTION! ----------- - - - This script does not backup your data for you. Make sure you have a backup - copy of your databases before running this script! - - - Make sure you turn off any service that might be writing to the couch - database before running this script. - - -Usage ------ - -To see what the script would do, run: - - ./migrate.py - -To actually run the migration, add the --do-migrate command line option: - - ./migrate.py --do-migrate - -See command line options: - - ./migrate.py --help - - -Log ---- - -If you don't pass a --log-file command line option, a log will be written to -the `log/` folder. - - -Differences between old and new couch schema --------------------------------------------- - -The differences between old and new schemas are: - - - Transaction metadata was previously stored inside each document, and we - used design doc view/list functions to retrieve that information. Now, - transaction metadata is stored in documents with special ids - (gen-0000000001 to gen-9999999999). - - - Database replica config metadata was stored in a document called - "u1db_config", and now we store it in the "_local/config" document. - - - Sync metadata was previously stored in documents with id - "u1db_sync_", and now are stored in - "_local/sync_". - - - The new schema doesn't make use of any design documents. - - -What does this script do ------------------------- - -- List all databases starting with "user-". -- For each one, do: - - Check if it contains the old "u1db_config" document. - - If it doesn't, skip this db. - - Get the transaction log using the usual design doc view/list functions. - - Write a new "gen-X" document for each line on the transaction log. - - Get the "u1db_config" document, create a new one in "_local/config", - Delete the old one. - - List all "u1db_sync_X" documents, create new ones in "_local/sync_X", - delete the old ones. - - Delete unused design documents. diff --git a/scripts/migration/0.8.2/log/.empty b/scripts/migration/0.8.2/log/.empty deleted file mode 100644 index e69de29b..00000000 diff --git a/scripts/migration/0.8.2/migrate.py b/scripts/migration/0.8.2/migrate.py deleted file mode 100755 index 6ad5bc2d..00000000 --- a/scripts/migration/0.8.2/migrate.py +++ /dev/null @@ -1,117 +0,0 @@ -#!/usr/bin/env python -# migrate.py - -""" -Migrate CouchDB schema to Soledad 0.8.2 schema. - -****************************************************************************** - ATTENTION! - - - This script does not backup your data for you. Make sure you have a backup - copy of your databases before running this script! - - - Make sure you turn off any service that might be writing to the couch - database before running this script. - -****************************************************************************** - -Run this script with the --help option to see command line options. - -See the README.md file for more information. -""" - -import datetime -import logging -import netrc -import os - -from argparse import ArgumentParser - -from leap.soledad.server import load_configuration - -from migrate_couch_schema import migrate - - -TARGET_VERSION = '0.8.2' -DEFAULT_COUCH_URL = 'http://127.0.0.1:5984' -CONF = load_configuration('/etc/soledad/soledad-server.conf') -NETRC_PATH = CONF['soledad-server']['admin_netrc'] - - -# -# command line args and execution -# - -def _configure_logger(log_file, level=logging.INFO): - if not log_file: - fname, _ = os.path.basename(__file__).split('.') - timestr = datetime.datetime.now().strftime('%Y-%m-%d_%H:%M:%S') - filename = 'soledad_%s_%s_%s.log' \ - % (TARGET_VERSION, fname, timestr) - dirname = os.path.join( - os.path.dirname(os.path.realpath(__file__)), 'log') - log_file = os.path.join(dirname, filename) - logging.basicConfig( - filename=log_file, - filemode='a', - format='%(asctime)s,%(msecs)d %(levelname)s %(message)s', - datefmt='%H:%M:%S', - level=level) - - -def _default_couch_url(): - if not os.path.exists(NETRC_PATH): - return DEFAULT_COUCH_URL - parsed_netrc = netrc.netrc(NETRC_PATH) - host, (login, _, password) = parsed_netrc.hosts.items()[0] - url = ('http://%(login)s:%(password)s@%(host)s:5984' % { - 'login': login, - 'password': password, - 'host': host}) - return url - - -def _parse_args(): - parser = ArgumentParser() - parser.add_argument( - '--couch_url', - help='the url for the couch database', - default=_default_couch_url()) - parser.add_argument( - '--do-migrate', - help='actually perform the migration (otherwise ' - 'just print what would be done)', - action='store_true') - parser.add_argument( - '--log-file', - help='the log file to use') - parser.add_argument( - '--pdb', action='store_true', - help='escape to pdb shell in case of exception') - parser.add_argument( - '--verbose', action='store_true', - help='output detailed information about the migration ' - '(i.e. include debug messages)') - return parser.parse_args() - - -def _enable_pdb(): - import sys - from IPython.core import ultratb - sys.excepthook = ultratb.FormattedTB( - mode='Verbose', color_scheme='Linux', call_pdb=1) - - -if __name__ == '__main__': - args = _parse_args() - if args.pdb: - _enable_pdb() - _configure_logger( - args.log_file, - level=logging.DEBUG if args.verbose else logging.INFO) - logger = logging.getLogger(__name__) - try: - migrate(args, TARGET_VERSION) - except: - logger.exception('Fatal error on migrate script!') - raise diff --git a/scripts/migration/0.8.2/migrate_couch_schema/__init__.py b/scripts/migration/0.8.2/migrate_couch_schema/__init__.py deleted file mode 100644 index f0b456e4..00000000 --- a/scripts/migration/0.8.2/migrate_couch_schema/__init__.py +++ /dev/null @@ -1,192 +0,0 @@ -# __init__.py -""" -Support functions for migration script. -""" - -import logging - -from couchdb import Server -from couchdb import ResourceNotFound -from couchdb import ResourceConflict - -from leap.soledad.common.couch import GENERATION_KEY -from leap.soledad.common.couch import TRANSACTION_ID_KEY -from leap.soledad.common.couch import REPLICA_UID_KEY -from leap.soledad.common.couch import DOC_ID_KEY -from leap.soledad.common.couch import SCHEMA_VERSION_KEY -from leap.soledad.common.couch import CONFIG_DOC_ID -from leap.soledad.common.couch import SYNC_DOC_ID_PREFIX -from leap.soledad.common.couch import SCHEMA_VERSION - - -logger = logging.getLogger(__name__) - - -# -# support functions -# - -def _get_couch_server(couch_url): - return Server(couch_url) - - -def _is_migrateable(db): - config_doc = db.get('u1db_config') - return bool(config_doc) - - -def _get_transaction_log(db): - ddoc_path = ['_design', 'transactions', '_view', 'log'] - resource = db.resource(*ddoc_path) - try: - _, _, data = resource.get_json() - except ResourceNotFound: - logger.warning( - '[%s] missing transactions design document, ' - 'can\'t get transaction log.' % db.name) - return [] - rows = data['rows'] - transaction_log = [] - gen = 1 - for row in rows: - transaction_log.append((gen, row['id'], row['value'])) - gen += 1 - return transaction_log - - -def _get_user_dbs(server): - user_dbs = filter(lambda dbname: dbname.startswith('user-'), server) - return user_dbs - - -# -# migration main functions -# - -def migrate(args, target_version): - server = _get_couch_server(args.couch_url) - logger.info('starting couch schema migration to %s' % target_version) - if not args.do_migrate: - logger.warning('dry-run: no changes will be made to databases') - user_dbs = _get_user_dbs(server) - for dbname in user_dbs: - db = server[dbname] - if not _is_migrateable(db): - logger.warning("[%s] skipping not migrateable user db" % dbname) - continue - logger.info("[%s] starting migration of user db" % dbname) - try: - _migrate_user_db(db, args.do_migrate) - logger.info("[%s] finished migration of user db" % dbname) - except: - logger.exception('[%s] error migrating user db' % dbname) - logger.error('continuing with next database.') - logger.info('finished couch schema migration to %s' % target_version) - - -def _migrate_user_db(db, do_migrate): - _migrate_transaction_log(db, do_migrate) - _migrate_sync_docs(db, do_migrate) - _delete_design_docs(db, do_migrate) - _migrate_config_doc(db, do_migrate) - - -def _migrate_transaction_log(db, do_migrate): - transaction_log = _get_transaction_log(db) - for gen, doc_id, trans_id in transaction_log: - gen_doc_id = 'gen-%s' % str(gen).zfill(10) - doc = { - '_id': gen_doc_id, - GENERATION_KEY: gen, - DOC_ID_KEY: doc_id, - TRANSACTION_ID_KEY: trans_id, - } - logger.debug('[%s] creating gen doc: %s' % (db.name, gen_doc_id)) - if do_migrate: - try: - db.save(doc) - except ResourceConflict: - # this gen document already exists. if documents are the same, - # continue with migration. - existing_doc = db.get(gen_doc_id) - for key in [GENERATION_KEY, DOC_ID_KEY, TRANSACTION_ID_KEY]: - if existing_doc[key] != doc[key]: - raise - - -def _migrate_config_doc(db, do_migrate): - old_doc = db['u1db_config'] - new_doc = { - '_id': CONFIG_DOC_ID, - REPLICA_UID_KEY: old_doc[REPLICA_UID_KEY], - SCHEMA_VERSION_KEY: SCHEMA_VERSION, - } - logger.info("[%s] moving config doc: %s -> %s" - % (db.name, old_doc['_id'], new_doc['_id'])) - if do_migrate: - # the config doc must not exist, otherwise we would have skipped this - # database. - db.save(new_doc) - db.delete(old_doc) - - -def _migrate_sync_docs(db, do_migrate): - logger.info('[%s] moving sync docs' % db.name) - view = db.view( - '_all_docs', - startkey='u1db_sync', - endkey='u1db_synd', - include_docs='true') - for row in view.rows: - old_doc = row['doc'] - old_id = old_doc['_id'] - - # older schemas used different documents with ids starting with - # "u1db_sync" to store sync-related data: - # - # - u1db_sync_log: was used to store the whole sync log. - # - u1db_sync_state: was used to store the sync state. - # - # if any of these documents exist in the current db, they are leftover - # from previous migrations, and should just be removed. - if old_id in ['u1db_sync_log', 'u1db_sync_state']: - logger.info('[%s] removing leftover document: %s' - % (db.name, old_id)) - if do_migrate: - db.delete(old_doc) - continue - - replica_uid = old_id.replace('u1db_sync_', '') - new_id = "%s%s" % (SYNC_DOC_ID_PREFIX, replica_uid) - new_doc = { - '_id': new_id, - GENERATION_KEY: old_doc['generation'], - TRANSACTION_ID_KEY: old_doc['transaction_id'], - REPLICA_UID_KEY: replica_uid, - } - logger.debug("[%s] moving sync doc: %s -> %s" - % (db.name, old_id, new_id)) - if do_migrate: - try: - db.save(new_doc) - except ResourceConflict: - # this sync document already exists. if documents are the same, - # continue with migration. - existing_doc = db.get(new_id) - for key in [GENERATION_KEY, TRANSACTION_ID_KEY, - REPLICA_UID_KEY]: - if existing_doc[key] != new_doc[key]: - raise - db.delete(old_doc) - - -def _delete_design_docs(db, do_migrate): - for ddoc in ['docs', 'syncs', 'transactions']: - doc_id = '_design/%s' % ddoc - doc = db.get(doc_id) - if doc: - logger.info("[%s] deleting design doc: %s" % (db.name, doc_id)) - if do_migrate: - db.delete(doc) - else: - logger.warning("[%s] design doc not found: %s" % (db.name, doc_id)) diff --git a/scripts/migration/0.8.2/setup.py b/scripts/migration/0.8.2/setup.py deleted file mode 100644 index 0467e932..00000000 --- a/scripts/migration/0.8.2/setup.py +++ /dev/null @@ -1,8 +0,0 @@ -from setuptools import setup -from setuptools import find_packages - - -setup( - name='migrate_couch_schema', - packages=find_packages('.'), -) diff --git a/scripts/migration/0.8.2/tests/conftest.py b/scripts/migration/0.8.2/tests/conftest.py deleted file mode 100644 index 61f6c7ee..00000000 --- a/scripts/migration/0.8.2/tests/conftest.py +++ /dev/null @@ -1,54 +0,0 @@ -# conftest.py - -""" -Provide a couch database with content stored in old schema. -""" - -import couchdb -import pytest -import uuid - - -COUCH_URL = 'http://127.0.0.1:5984' - -transaction_map = """ -function(doc) { - if (doc.u1db_transactions) - doc.u1db_transactions.forEach(function(t) { - emit(t[0], // use timestamp as key so the results are ordered - t[1]); // value is the transaction_id - }); -} -""" - -initial_docs = [ - {'_id': 'u1db_config', 'replica_uid': 'an-uid'}, - {'_id': 'u1db_sync_A', 'generation': 0, 'replica_uid': 'A', - 'transaction_id': ''}, - {'_id': 'u1db_sync_B', 'generation': 2, 'replica_uid': 'B', - 'transaction_id': 'X'}, - {'_id': 'doc1', 'u1db_transactions': [(1, 'trans-1'), (3, 'trans-3')]}, - {'_id': 'doc2', 'u1db_transactions': [(2, 'trans-2'), (4, 'trans-4')]}, - {'_id': '_design/docs'}, - {'_id': '_design/syncs'}, - {'_id': '_design/transactions', - 'views': {'log': {'map': transaction_map}}}, - # add some data from previous interrupted migration - {'_id': '_local/sync_A', 'gen': 0, 'trans_id': '', 'replica_uid': 'A'}, - {'_id': 'gen-0000000002', - 'gen': 2, 'trans_id': 'trans-2', 'doc_id': 'doc2'}, - # the following should be removed if found in the dbs - {'_id': 'u1db_sync_log'}, - {'_id': 'u1db_sync_state'}, -] - - -@pytest.fixture(scope='function') -def db(request): - server = couchdb.Server(COUCH_URL) - dbname = "user-" + uuid.uuid4().hex - db = server.create(dbname) - for doc in initial_docs: - db.save(doc) - request.addfinalizer(lambda: server.delete(dbname)) - return db diff --git a/scripts/migration/0.8.2/tests/test_migrate.py b/scripts/migration/0.8.2/tests/test_migrate.py deleted file mode 100644 index 10c8b906..00000000 --- a/scripts/migration/0.8.2/tests/test_migrate.py +++ /dev/null @@ -1,67 +0,0 @@ -# test_migrate.py - -""" -Ensure that the migration script works! -""" - -from migrate_couch_schema import _migrate_user_db - -from leap.soledad.common.couch import GENERATION_KEY -from leap.soledad.common.couch import TRANSACTION_ID_KEY -from leap.soledad.common.couch import REPLICA_UID_KEY -from leap.soledad.common.couch import DOC_ID_KEY -from leap.soledad.common.couch import SCHEMA_VERSION_KEY -from leap.soledad.common.couch import CONFIG_DOC_ID -from leap.soledad.common.couch import SYNC_DOC_ID_PREFIX -from leap.soledad.common.couch import SCHEMA_VERSION - - -def test__migrate_user_db(db): - _migrate_user_db(db, True) - - # we should find exactly 6 documents: 2 normal documents and 4 generation - # documents - view = db.view('_all_docs') - assert len(view.rows) == 6 - - # ensure that the ids of the documents we found on the database are correct - doc_ids = map(lambda doc: doc.id, view.rows) - assert 'doc1' in doc_ids - assert 'doc2' in doc_ids - assert 'gen-0000000001' in doc_ids - assert 'gen-0000000002' in doc_ids - assert 'gen-0000000003' in doc_ids - assert 'gen-0000000004' in doc_ids - - # assert config doc contents - config_doc = db.get(CONFIG_DOC_ID) - assert config_doc[REPLICA_UID_KEY] == 'an-uid' - assert config_doc[SCHEMA_VERSION_KEY] == SCHEMA_VERSION - - # assert sync docs contents - sync_doc_A = db.get('%s%s' % (SYNC_DOC_ID_PREFIX, 'A')) - assert sync_doc_A[GENERATION_KEY] == 0 - assert sync_doc_A[REPLICA_UID_KEY] == 'A' - assert sync_doc_A[TRANSACTION_ID_KEY] == '' - sync_doc_B = db.get('%s%s' % (SYNC_DOC_ID_PREFIX, 'B')) - assert sync_doc_B[GENERATION_KEY] == 2 - assert sync_doc_B[REPLICA_UID_KEY] == 'B' - assert sync_doc_B[TRANSACTION_ID_KEY] == 'X' - - # assert gen docs contents - gen_1 = db.get('gen-0000000001') - assert gen_1[DOC_ID_KEY] == 'doc1' - assert gen_1[GENERATION_KEY] == 1 - assert gen_1[TRANSACTION_ID_KEY] == 'trans-1' - gen_2 = db.get('gen-0000000002') - assert gen_2[DOC_ID_KEY] == 'doc2' - assert gen_2[GENERATION_KEY] == 2 - assert gen_2[TRANSACTION_ID_KEY] == 'trans-2' - gen_3 = db.get('gen-0000000003') - assert gen_3[DOC_ID_KEY] == 'doc1' - assert gen_3[GENERATION_KEY] == 3 - assert gen_3[TRANSACTION_ID_KEY] == 'trans-3' - gen_4 = db.get('gen-0000000004') - assert gen_4[DOC_ID_KEY] == 'doc2' - assert gen_4[GENERATION_KEY] == 4 - assert gen_4[TRANSACTION_ID_KEY] == 'trans-4' diff --git a/scripts/migration/0.8.2/tox.ini b/scripts/migration/0.8.2/tox.ini deleted file mode 100644 index 2bb6be4c..00000000 --- a/scripts/migration/0.8.2/tox.ini +++ /dev/null @@ -1,13 +0,0 @@ -[tox] -envlist = py27 - -[testenv] -commands = py.test {posargs} -changedir = tests -deps = - pytest - couchdb - pdbpp - -e../../../common -setenv = - TERM=xterm diff --git a/scripts/migration/0.9.0/.gitignore b/scripts/migration/0.9.0/.gitignore new file mode 100644 index 00000000..6115c109 --- /dev/null +++ b/scripts/migration/0.9.0/.gitignore @@ -0,0 +1 @@ +log/* diff --git a/scripts/migration/0.9.0/README.md b/scripts/migration/0.9.0/README.md new file mode 100644 index 00000000..919a5235 --- /dev/null +++ b/scripts/migration/0.9.0/README.md @@ -0,0 +1,73 @@ +CouchDB schema migration to Soledad 0.8.2 +========================================= + +Migrate couch database schema from <= 0.8.1 version to 0.8.2 version. + + +ATTENTION! +---------- + + - This script does not backup your data for you. Make sure you have a backup + copy of your databases before running this script! + + - Make sure you turn off any service that might be writing to the couch + database before running this script. + + +Usage +----- + +To see what the script would do, run: + + ./migrate.py + +To actually run the migration, add the --do-migrate command line option: + + ./migrate.py --do-migrate + +See command line options: + + ./migrate.py --help + + +Log +--- + +If you don't pass a --log-file command line option, a log will be written to +the `log/` folder. + + +Differences between old and new couch schema +-------------------------------------------- + +The differences between old and new schemas are: + + - Transaction metadata was previously stored inside each document, and we + used design doc view/list functions to retrieve that information. Now, + transaction metadata is stored in documents with special ids + (gen-0000000001 to gen-9999999999). + + - Database replica config metadata was stored in a document called + "u1db_config", and now we store it in the "_local/config" document. + + - Sync metadata was previously stored in documents with id + "u1db_sync_", and now are stored in + "_local/sync_". + + - The new schema doesn't make use of any design documents. + + +What does this script do +------------------------ + +- List all databases starting with "user-". +- For each one, do: + - Check if it contains the old "u1db_config" document. + - If it doesn't, skip this db. + - Get the transaction log using the usual design doc view/list functions. + - Write a new "gen-X" document for each line on the transaction log. + - Get the "u1db_config" document, create a new one in "_local/config", + Delete the old one. + - List all "u1db_sync_X" documents, create new ones in "_local/sync_X", + delete the old ones. + - Delete unused design documents. diff --git a/scripts/migration/0.9.0/log/.empty b/scripts/migration/0.9.0/log/.empty new file mode 100644 index 00000000..e69de29b diff --git a/scripts/migration/0.9.0/migrate.py b/scripts/migration/0.9.0/migrate.py new file mode 100755 index 00000000..6ad5bc2d --- /dev/null +++ b/scripts/migration/0.9.0/migrate.py @@ -0,0 +1,117 @@ +#!/usr/bin/env python +# migrate.py + +""" +Migrate CouchDB schema to Soledad 0.8.2 schema. + +****************************************************************************** + ATTENTION! + + - This script does not backup your data for you. Make sure you have a backup + copy of your databases before running this script! + + - Make sure you turn off any service that might be writing to the couch + database before running this script. + +****************************************************************************** + +Run this script with the --help option to see command line options. + +See the README.md file for more information. +""" + +import datetime +import logging +import netrc +import os + +from argparse import ArgumentParser + +from leap.soledad.server import load_configuration + +from migrate_couch_schema import migrate + + +TARGET_VERSION = '0.8.2' +DEFAULT_COUCH_URL = 'http://127.0.0.1:5984' +CONF = load_configuration('/etc/soledad/soledad-server.conf') +NETRC_PATH = CONF['soledad-server']['admin_netrc'] + + +# +# command line args and execution +# + +def _configure_logger(log_file, level=logging.INFO): + if not log_file: + fname, _ = os.path.basename(__file__).split('.') + timestr = datetime.datetime.now().strftime('%Y-%m-%d_%H:%M:%S') + filename = 'soledad_%s_%s_%s.log' \ + % (TARGET_VERSION, fname, timestr) + dirname = os.path.join( + os.path.dirname(os.path.realpath(__file__)), 'log') + log_file = os.path.join(dirname, filename) + logging.basicConfig( + filename=log_file, + filemode='a', + format='%(asctime)s,%(msecs)d %(levelname)s %(message)s', + datefmt='%H:%M:%S', + level=level) + + +def _default_couch_url(): + if not os.path.exists(NETRC_PATH): + return DEFAULT_COUCH_URL + parsed_netrc = netrc.netrc(NETRC_PATH) + host, (login, _, password) = parsed_netrc.hosts.items()[0] + url = ('http://%(login)s:%(password)s@%(host)s:5984' % { + 'login': login, + 'password': password, + 'host': host}) + return url + + +def _parse_args(): + parser = ArgumentParser() + parser.add_argument( + '--couch_url', + help='the url for the couch database', + default=_default_couch_url()) + parser.add_argument( + '--do-migrate', + help='actually perform the migration (otherwise ' + 'just print what would be done)', + action='store_true') + parser.add_argument( + '--log-file', + help='the log file to use') + parser.add_argument( + '--pdb', action='store_true', + help='escape to pdb shell in case of exception') + parser.add_argument( + '--verbose', action='store_true', + help='output detailed information about the migration ' + '(i.e. include debug messages)') + return parser.parse_args() + + +def _enable_pdb(): + import sys + from IPython.core import ultratb + sys.excepthook = ultratb.FormattedTB( + mode='Verbose', color_scheme='Linux', call_pdb=1) + + +if __name__ == '__main__': + args = _parse_args() + if args.pdb: + _enable_pdb() + _configure_logger( + args.log_file, + level=logging.DEBUG if args.verbose else logging.INFO) + logger = logging.getLogger(__name__) + try: + migrate(args, TARGET_VERSION) + except: + logger.exception('Fatal error on migrate script!') + raise diff --git a/scripts/migration/0.9.0/migrate_couch_schema/__init__.py b/scripts/migration/0.9.0/migrate_couch_schema/__init__.py new file mode 100644 index 00000000..f0b456e4 --- /dev/null +++ b/scripts/migration/0.9.0/migrate_couch_schema/__init__.py @@ -0,0 +1,192 @@ +# __init__.py +""" +Support functions for migration script. +""" + +import logging + +from couchdb import Server +from couchdb import ResourceNotFound +from couchdb import ResourceConflict + +from leap.soledad.common.couch import GENERATION_KEY +from leap.soledad.common.couch import TRANSACTION_ID_KEY +from leap.soledad.common.couch import REPLICA_UID_KEY +from leap.soledad.common.couch import DOC_ID_KEY +from leap.soledad.common.couch import SCHEMA_VERSION_KEY +from leap.soledad.common.couch import CONFIG_DOC_ID +from leap.soledad.common.couch import SYNC_DOC_ID_PREFIX +from leap.soledad.common.couch import SCHEMA_VERSION + + +logger = logging.getLogger(__name__) + + +# +# support functions +# + +def _get_couch_server(couch_url): + return Server(couch_url) + + +def _is_migrateable(db): + config_doc = db.get('u1db_config') + return bool(config_doc) + + +def _get_transaction_log(db): + ddoc_path = ['_design', 'transactions', '_view', 'log'] + resource = db.resource(*ddoc_path) + try: + _, _, data = resource.get_json() + except ResourceNotFound: + logger.warning( + '[%s] missing transactions design document, ' + 'can\'t get transaction log.' % db.name) + return [] + rows = data['rows'] + transaction_log = [] + gen = 1 + for row in rows: + transaction_log.append((gen, row['id'], row['value'])) + gen += 1 + return transaction_log + + +def _get_user_dbs(server): + user_dbs = filter(lambda dbname: dbname.startswith('user-'), server) + return user_dbs + + +# +# migration main functions +# + +def migrate(args, target_version): + server = _get_couch_server(args.couch_url) + logger.info('starting couch schema migration to %s' % target_version) + if not args.do_migrate: + logger.warning('dry-run: no changes will be made to databases') + user_dbs = _get_user_dbs(server) + for dbname in user_dbs: + db = server[dbname] + if not _is_migrateable(db): + logger.warning("[%s] skipping not migrateable user db" % dbname) + continue + logger.info("[%s] starting migration of user db" % dbname) + try: + _migrate_user_db(db, args.do_migrate) + logger.info("[%s] finished migration of user db" % dbname) + except: + logger.exception('[%s] error migrating user db' % dbname) + logger.error('continuing with next database.') + logger.info('finished couch schema migration to %s' % target_version) + + +def _migrate_user_db(db, do_migrate): + _migrate_transaction_log(db, do_migrate) + _migrate_sync_docs(db, do_migrate) + _delete_design_docs(db, do_migrate) + _migrate_config_doc(db, do_migrate) + + +def _migrate_transaction_log(db, do_migrate): + transaction_log = _get_transaction_log(db) + for gen, doc_id, trans_id in transaction_log: + gen_doc_id = 'gen-%s' % str(gen).zfill(10) + doc = { + '_id': gen_doc_id, + GENERATION_KEY: gen, + DOC_ID_KEY: doc_id, + TRANSACTION_ID_KEY: trans_id, + } + logger.debug('[%s] creating gen doc: %s' % (db.name, gen_doc_id)) + if do_migrate: + try: + db.save(doc) + except ResourceConflict: + # this gen document already exists. if documents are the same, + # continue with migration. + existing_doc = db.get(gen_doc_id) + for key in [GENERATION_KEY, DOC_ID_KEY, TRANSACTION_ID_KEY]: + if existing_doc[key] != doc[key]: + raise + + +def _migrate_config_doc(db, do_migrate): + old_doc = db['u1db_config'] + new_doc = { + '_id': CONFIG_DOC_ID, + REPLICA_UID_KEY: old_doc[REPLICA_UID_KEY], + SCHEMA_VERSION_KEY: SCHEMA_VERSION, + } + logger.info("[%s] moving config doc: %s -> %s" + % (db.name, old_doc['_id'], new_doc['_id'])) + if do_migrate: + # the config doc must not exist, otherwise we would have skipped this + # database. + db.save(new_doc) + db.delete(old_doc) + + +def _migrate_sync_docs(db, do_migrate): + logger.info('[%s] moving sync docs' % db.name) + view = db.view( + '_all_docs', + startkey='u1db_sync', + endkey='u1db_synd', + include_docs='true') + for row in view.rows: + old_doc = row['doc'] + old_id = old_doc['_id'] + + # older schemas used different documents with ids starting with + # "u1db_sync" to store sync-related data: + # + # - u1db_sync_log: was used to store the whole sync log. + # - u1db_sync_state: was used to store the sync state. + # + # if any of these documents exist in the current db, they are leftover + # from previous migrations, and should just be removed. + if old_id in ['u1db_sync_log', 'u1db_sync_state']: + logger.info('[%s] removing leftover document: %s' + % (db.name, old_id)) + if do_migrate: + db.delete(old_doc) + continue + + replica_uid = old_id.replace('u1db_sync_', '') + new_id = "%s%s" % (SYNC_DOC_ID_PREFIX, replica_uid) + new_doc = { + '_id': new_id, + GENERATION_KEY: old_doc['generation'], + TRANSACTION_ID_KEY: old_doc['transaction_id'], + REPLICA_UID_KEY: replica_uid, + } + logger.debug("[%s] moving sync doc: %s -> %s" + % (db.name, old_id, new_id)) + if do_migrate: + try: + db.save(new_doc) + except ResourceConflict: + # this sync document already exists. if documents are the same, + # continue with migration. + existing_doc = db.get(new_id) + for key in [GENERATION_KEY, TRANSACTION_ID_KEY, + REPLICA_UID_KEY]: + if existing_doc[key] != new_doc[key]: + raise + db.delete(old_doc) + + +def _delete_design_docs(db, do_migrate): + for ddoc in ['docs', 'syncs', 'transactions']: + doc_id = '_design/%s' % ddoc + doc = db.get(doc_id) + if doc: + logger.info("[%s] deleting design doc: %s" % (db.name, doc_id)) + if do_migrate: + db.delete(doc) + else: + logger.warning("[%s] design doc not found: %s" % (db.name, doc_id)) diff --git a/scripts/migration/0.9.0/requirements.pip b/scripts/migration/0.9.0/requirements.pip new file mode 100644 index 00000000..ea22a1a4 --- /dev/null +++ b/scripts/migration/0.9.0/requirements.pip @@ -0,0 +1,3 @@ +couchdb +leap.soledad.common==0.9.0 +leap.soledad.server==0.9.0 diff --git a/scripts/migration/0.9.0/setup.py b/scripts/migration/0.9.0/setup.py new file mode 100644 index 00000000..0467e932 --- /dev/null +++ b/scripts/migration/0.9.0/setup.py @@ -0,0 +1,8 @@ +from setuptools import setup +from setuptools import find_packages + + +setup( + name='migrate_couch_schema', + packages=find_packages('.'), +) diff --git a/scripts/migration/0.9.0/tests/conftest.py b/scripts/migration/0.9.0/tests/conftest.py new file mode 100644 index 00000000..61f6c7ee --- /dev/null +++ b/scripts/migration/0.9.0/tests/conftest.py @@ -0,0 +1,54 @@ +# conftest.py + +""" +Provide a couch database with content stored in old schema. +""" + +import couchdb +import pytest +import uuid + + +COUCH_URL = 'http://127.0.0.1:5984' + +transaction_map = """ +function(doc) { + if (doc.u1db_transactions) + doc.u1db_transactions.forEach(function(t) { + emit(t[0], // use timestamp as key so the results are ordered + t[1]); // value is the transaction_id + }); +} +""" + +initial_docs = [ + {'_id': 'u1db_config', 'replica_uid': 'an-uid'}, + {'_id': 'u1db_sync_A', 'generation': 0, 'replica_uid': 'A', + 'transaction_id': ''}, + {'_id': 'u1db_sync_B', 'generation': 2, 'replica_uid': 'B', + 'transaction_id': 'X'}, + {'_id': 'doc1', 'u1db_transactions': [(1, 'trans-1'), (3, 'trans-3')]}, + {'_id': 'doc2', 'u1db_transactions': [(2, 'trans-2'), (4, 'trans-4')]}, + {'_id': '_design/docs'}, + {'_id': '_design/syncs'}, + {'_id': '_design/transactions', + 'views': {'log': {'map': transaction_map}}}, + # add some data from previous interrupted migration + {'_id': '_local/sync_A', 'gen': 0, 'trans_id': '', 'replica_uid': 'A'}, + {'_id': 'gen-0000000002', + 'gen': 2, 'trans_id': 'trans-2', 'doc_id': 'doc2'}, + # the following should be removed if found in the dbs + {'_id': 'u1db_sync_log'}, + {'_id': 'u1db_sync_state'}, +] + + +@pytest.fixture(scope='function') +def db(request): + server = couchdb.Server(COUCH_URL) + dbname = "user-" + uuid.uuid4().hex + db = server.create(dbname) + for doc in initial_docs: + db.save(doc) + request.addfinalizer(lambda: server.delete(dbname)) + return db diff --git a/scripts/migration/0.9.0/tests/test_migrate.py b/scripts/migration/0.9.0/tests/test_migrate.py new file mode 100644 index 00000000..10c8b906 --- /dev/null +++ b/scripts/migration/0.9.0/tests/test_migrate.py @@ -0,0 +1,67 @@ +# test_migrate.py + +""" +Ensure that the migration script works! +""" + +from migrate_couch_schema import _migrate_user_db + +from leap.soledad.common.couch import GENERATION_KEY +from leap.soledad.common.couch import TRANSACTION_ID_KEY +from leap.soledad.common.couch import REPLICA_UID_KEY +from leap.soledad.common.couch import DOC_ID_KEY +from leap.soledad.common.couch import SCHEMA_VERSION_KEY +from leap.soledad.common.couch import CONFIG_DOC_ID +from leap.soledad.common.couch import SYNC_DOC_ID_PREFIX +from leap.soledad.common.couch import SCHEMA_VERSION + + +def test__migrate_user_db(db): + _migrate_user_db(db, True) + + # we should find exactly 6 documents: 2 normal documents and 4 generation + # documents + view = db.view('_all_docs') + assert len(view.rows) == 6 + + # ensure that the ids of the documents we found on the database are correct + doc_ids = map(lambda doc: doc.id, view.rows) + assert 'doc1' in doc_ids + assert 'doc2' in doc_ids + assert 'gen-0000000001' in doc_ids + assert 'gen-0000000002' in doc_ids + assert 'gen-0000000003' in doc_ids + assert 'gen-0000000004' in doc_ids + + # assert config doc contents + config_doc = db.get(CONFIG_DOC_ID) + assert config_doc[REPLICA_UID_KEY] == 'an-uid' + assert config_doc[SCHEMA_VERSION_KEY] == SCHEMA_VERSION + + # assert sync docs contents + sync_doc_A = db.get('%s%s' % (SYNC_DOC_ID_PREFIX, 'A')) + assert sync_doc_A[GENERATION_KEY] == 0 + assert sync_doc_A[REPLICA_UID_KEY] == 'A' + assert sync_doc_A[TRANSACTION_ID_KEY] == '' + sync_doc_B = db.get('%s%s' % (SYNC_DOC_ID_PREFIX, 'B')) + assert sync_doc_B[GENERATION_KEY] == 2 + assert sync_doc_B[REPLICA_UID_KEY] == 'B' + assert sync_doc_B[TRANSACTION_ID_KEY] == 'X' + + # assert gen docs contents + gen_1 = db.get('gen-0000000001') + assert gen_1[DOC_ID_KEY] == 'doc1' + assert gen_1[GENERATION_KEY] == 1 + assert gen_1[TRANSACTION_ID_KEY] == 'trans-1' + gen_2 = db.get('gen-0000000002') + assert gen_2[DOC_ID_KEY] == 'doc2' + assert gen_2[GENERATION_KEY] == 2 + assert gen_2[TRANSACTION_ID_KEY] == 'trans-2' + gen_3 = db.get('gen-0000000003') + assert gen_3[DOC_ID_KEY] == 'doc1' + assert gen_3[GENERATION_KEY] == 3 + assert gen_3[TRANSACTION_ID_KEY] == 'trans-3' + gen_4 = db.get('gen-0000000004') + assert gen_4[DOC_ID_KEY] == 'doc2' + assert gen_4[GENERATION_KEY] == 4 + assert gen_4[TRANSACTION_ID_KEY] == 'trans-4' diff --git a/scripts/migration/0.9.0/tox.ini b/scripts/migration/0.9.0/tox.ini new file mode 100644 index 00000000..2bb6be4c --- /dev/null +++ b/scripts/migration/0.9.0/tox.ini @@ -0,0 +1,13 @@ +[tox] +envlist = py27 + +[testenv] +commands = py.test {posargs} +changedir = tests +deps = + pytest + couchdb + pdbpp + -e../../../common +setenv = + TERM=xterm -- cgit v1.2.3