From 40f2cb8b1f07b1346e01ff69b57e14c492f1cd0b Mon Sep 17 00:00:00 2001 From: drebs Date: Wed, 19 Apr 2017 10:18:27 +0200 Subject: [test] remove docker scripts from this repo Docker scripts are only used for CI and do not need to be in this repository. Beause of that, we decided to moved the docker scripts to a private repository where dockerfiles for other parts of leap also live. --- scripts/docker/Dockerfile | 38 -- scripts/docker/Makefile | 151 ----- scripts/docker/README.md | 62 -- scripts/docker/TODO | 5 - scripts/docker/couchdb/Dockerfile | 3 - scripts/docker/couchdb/Makefile | 4 - scripts/docker/couchdb/README.rst | 12 - scripts/docker/couchdb/local.ini | 2 - scripts/docker/files/apt/leap.list | 4 - scripts/docker/files/bin/client_side_db.py | 321 ---------- scripts/docker/files/bin/conf/cert_default.conf | 15 - scripts/docker/files/bin/conf/couchdb_default.ini | 361 ------------ .../files/bin/conf/soledad-server_default.conf | 5 - scripts/docker/files/bin/run-client-bootstrap.sh | 20 - scripts/docker/files/bin/run-client-perf.sh | 128 ---- scripts/docker/files/bin/run-perf.sh | 22 - scripts/docker/files/bin/run-server.sh | 89 --- scripts/docker/files/bin/run-tox.sh | 17 - .../docker/files/bin/run-trial-from-gitlab-ci.sh | 50 -- scripts/docker/files/bin/run-trial.sh | 23 - scripts/docker/files/bin/setup-test-env.py | 647 --------------------- scripts/docker/files/bin/util.py | 75 --- scripts/docker/files/bin/util.sh | 12 - .../docker/files/build/install-deps-from-repos.sh | 30 - scripts/docker/helper/get-container-ip.sh | 18 - scripts/docker/helper/run-test.sh | 75 --- scripts/docker/helper/run-until-error.sh | 12 - 27 files changed, 2201 deletions(-) delete mode 100644 scripts/docker/Dockerfile delete mode 100644 scripts/docker/Makefile delete mode 100644 scripts/docker/README.md delete mode 100644 scripts/docker/TODO delete mode 100644 scripts/docker/couchdb/Dockerfile delete mode 100644 scripts/docker/couchdb/Makefile delete mode 100644 scripts/docker/couchdb/README.rst delete mode 100644 scripts/docker/couchdb/local.ini delete mode 100644 scripts/docker/files/apt/leap.list delete mode 100644 scripts/docker/files/bin/client_side_db.py delete mode 100644 scripts/docker/files/bin/conf/cert_default.conf delete mode 100644 scripts/docker/files/bin/conf/couchdb_default.ini delete mode 100644 scripts/docker/files/bin/conf/soledad-server_default.conf delete mode 100755 scripts/docker/files/bin/run-client-bootstrap.sh delete mode 100755 scripts/docker/files/bin/run-client-perf.sh delete mode 100755 scripts/docker/files/bin/run-perf.sh delete mode 100755 scripts/docker/files/bin/run-server.sh delete mode 100755 scripts/docker/files/bin/run-tox.sh delete mode 100755 scripts/docker/files/bin/run-trial-from-gitlab-ci.sh delete mode 100755 scripts/docker/files/bin/run-trial.sh delete mode 100755 scripts/docker/files/bin/setup-test-env.py delete mode 100644 scripts/docker/files/bin/util.py delete mode 100644 scripts/docker/files/bin/util.sh delete mode 100755 scripts/docker/files/build/install-deps-from-repos.sh delete mode 100755 scripts/docker/helper/get-container-ip.sh delete mode 100755 scripts/docker/helper/run-test.sh delete mode 100755 scripts/docker/helper/run-until-error.sh (limited to 'scripts') diff --git a/scripts/docker/Dockerfile b/scripts/docker/Dockerfile deleted file mode 100644 index 7a741e84..00000000 --- a/scripts/docker/Dockerfile +++ /dev/null @@ -1,38 +0,0 @@ -# start with a fresh debian image -# we use backports because of libsqlcipher-dev -FROM debian:jessie-backports - -RUN apt-get update - -# needed to build python twisted module -RUN apt-get -y install libpython2.7-dev -# needed to build python cryptography module -RUN apt-get -y install libssl-dev -RUN apt-get -y install libffi-dev -# needed to build pysqlcipher -RUN apt-get -y install libsqlcipher-dev -# needed to support keymanager -RUN apt-get -y install libsqlite3-dev - -# install pip and tox -RUN apt-get -y install python-pip -# We need git from backports because it has -# the "%cI: committer date, strict ISO 8601 format" -# pretty format which is used by pytest-benchmark -RUN apt-get -y install -t jessie-backports git - -# Use use to show connection to couchdb during CI -RUN apt-get -y install curl - -RUN pip install -U pip -RUN pip install tox - -# clone repositories -RUN mkdir -p /builds/leap -RUN git clone https://0xacab.org/leap/soledad.git /builds/leap/soledad - -# use tox to install everything needed to run tests -RUN cd /builds/leap/soledad/testing && tox -v -r --notest - -RUN mkdir -p /usr/local/soledad -COPY files/bin/ /usr/local/soledad/ diff --git a/scripts/docker/Makefile b/scripts/docker/Makefile deleted file mode 100644 index 499b1a3f..00000000 --- a/scripts/docker/Makefile +++ /dev/null @@ -1,151 +0,0 @@ -#/usr/bin/env - -# This makefile is intended to aid on running soledad docker images for -# specific purposes, as running a server, a client or tests. -# -# In order to communicate the IP address of one container to another, we make -# use of a file containing the container id. You have to explicitelly pass the -# CONTAINER_ID_FILE variable when invoking some of the targets below. -# -# Example usage: -# -# make run-server CONTAINER_ID_FILE=/tmp/container-id.txt -# make run-client-perf CONTAINER_ID_FILE=/tmp/container-id.txt - -##################################################################### -# Some configurations you might override when calling this makefile # -##################################################################### - -IMAGE_NAME ?= leapcode/soledad:latest -SOLEDAD_REMOTE ?= https://0xacab.org/leap/soledad.git -SOLEDAD_BRANCH ?= develop -SOLEDAD_PRELOAD_NUM ?= 100 -SOLEDAD_PRELOAD_SIZE ?= 500 -MEMORY ?= 512m - -############################################## -# Docker image generation (main make target) # -############################################## - -all: soledad-image couchdb-image - -soledad-image: - docker build ${DOCKER_BUILD_OPTS} -t $(IMAGE_NAME) . - -couchdb-image: - (cd couchdb/ && make) - -################################################## -# Run a Soledad Server inside a docker container # -################################################## - -run-server: - @if [ -z "$(CONTAINER_ID_FILE)" ]; then \ - echo "Error: you have to pass a value to CONTAINER_ID_FILE."; \ - exit 2; \ - fi - docker run \ - --memory="$(MEMORY)" \ - --cpuset-cpus=0 \ - --env="SOLEDAD_REMOTE=$(SOLEDAD_REMOTE)" \ - --env="SOLEDAD_BRANCH=$(SOLEDAD_BRANCH)" \ - --env="SOLEDAD_PRELOAD_NUM=$(SOLEDAD_PRELOAD_NUM)" \ - --env="SOLEDAD_PRELOAD_SIZE=$(SOLEDAD_PRELOAD_SIZE)" \ - --cidfile=$(CONTAINER_ID_FILE) \ - --detach \ - $(IMAGE_NAME) \ - /usr/local/soledad/run-server.sh # --drop-to-shell - -run-client-bootstrap: - @if [ -z "$(CONTAINER_ID_FILE)" ]; then \ - echo "Error: you have to pass a value to CONTAINER_ID_FILE."; \ - exit 2; \ - fi - container_id=`cat $(CONTAINER_ID_FILE)`; \ - server_ip=`./helper/get-container-ip.sh $${container_id}`; \ - docker run -t -i \ - --memory="$(MEMORY)" \ - --env="SOLEDAD_REMOTE=$(SOLEDAD_REMOTE)" \ - --env="SOLEDAD_BRANCH=$(SOLEDAD_BRANCH)" \ - --env="SOLEDAD_SERVER_URL=http://$${server_ip}:2424" \ - $(IMAGE_NAME) \ - /usr/local/soledad/run-client-bootstrap.sh - -################################################# -# Run all tests inside a docker container # -################################################# - -run-tox: - name=$$(cat /dev/urandom | tr -dc 'a-zA-Z0-9' | fold -w 5 | head -n 1); \ - docker run -d --name $${name} leap/couchdb; \ - docker run -t -i \ - --memory="$(MEMORY)" \ - --env="SOLEDAD_REMOTE=$(SOLEDAD_REMOTE)" \ - --env="SOLEDAD_BRANCH=$(SOLEDAD_BRANCH)" \ - --env="COUCH_URL=http://$${name}:5984" \ - --link $${name} \ - $(IMAGE_NAME) \ - /usr/local/soledad/run-tox.sh - -############################################ -# Performance tests and graphic generation # -############################################ - -run-perf: - name=$$(cat /dev/urandom | tr -dc 'a-zA-Z0-9' | fold -w 5 | head -n 1); \ - docker run -d --name $${name} leap/couchdb; \ - docker run -t -i \ - --memory="$(MEMORY)" \ - --env="SOLEDAD_REMOTE=$(SOLEDAD_REMOTE)" \ - --env="SOLEDAD_BRANCH=$(SOLEDAD_BRANCH)" \ - --env="SOLEDAD_PRELOAD_NUM=$(SOLEDAD_PRELOAD_NUM)" \ - --env="COUCH_URL=http://$${name}:5984" \ - --link $${name} \ - $(IMAGE_NAME) \ - /usr/local/soledad/run-perf.sh - -run-client-perf: - @if [ -z "$(CONTAINER_ID_FILE)" ]; then \ - echo "Error: you have to pass a value to CONTAINER_ID_FILE."; \ - exit 2; \ - fi - container_id=`cat $(CONTAINER_ID_FILE)`; \ - server_ip=`./helper/get-container-ip.sh $${container_id}`; \ - docker run -t -i \ - --memory="$(MEMORY)" \ - --cpuset-cpus=1 \ - --cidfile=$(CONTAINER_ID_FILE)-perf \ - --env="SOLEDAD_REMOTE=$(SOLEDAD_REMOTE)" \ - --env="SOLEDAD_BRANCH=$(SOLEDAD_BRANCH)" \ - --env="SOLEDAD_PERF_REMOTE=https://0xacab.org/drebs/soledad-perf.git" \ - --env="SOLEDAD_PERF_BRANCH=bug/ensure-events-server" \ - --env="SOLEDAD_PRELOAD_NUM=$(SOLEDAD_PRELOAD_NUM)" \ - --env="SOLEDAD_PRELOAD_SIZE=$(SOLEDAD_PRELOAD_SIZE)" \ - --env="SOLEDAD_STATS=1" \ - --env="SOLEDAD_SERVER_URL=http://$${server_ip}:2424" \ - --env="SOLEDAD_LOG=1" \ - $(IMAGE_NAME) \ - /usr/local/soledad/run-client-perf.sh # --drop-to-shell - -cp-perf-result: - @if [ -z "$(CONTAINER_ID_FILE)" ]; then \ - echo "Error: you have to pass a value to CONTAINER_ID_FILE."; \ - exit 2; \ - fi - perf_id=`cat $(CONTAINER_ID_FILE)-perf`; \ - docker cp $${perf_id}:/var/local/soledad-perf/out/sync-stats.png /tmp/; \ - docker cp $${perf_id}:/var/local/soledad-perf/out/series.log /tmp/ - -######################## -# Other helper targets # -######################## - -run-shell: soledad-image - docker run -t -i \ - --memory="$(MEMORY)" \ - $(IMAGE_NAME) \ - /bin/bash - -rm-all-containers: - containers=`docker ps -a | cut -d" " -f 1 | tail -n +2 | xargs`; \ - if [ ! -z "$${containers}" ]; then docker rm -f $${containers}; fi diff --git a/scripts/docker/README.md b/scripts/docker/README.md deleted file mode 100644 index 97b39f87..00000000 --- a/scripts/docker/README.md +++ /dev/null @@ -1,62 +0,0 @@ -Soledad Docker Images -===================== - -The files in this directory help create a docker image that is usable for -running soledad server and client in an isolated docker context. This is -especially useful for testing purposes as you can limit/reserve a certain -amount of resources for the soledad process, and thus provide a baseline for -comparison of time and resource consumption between distinct runs. - -Check the `Dockerfile` for the steps for creating the docker image. - -Check the `Makefile` for the rules for running containers. - - -Installation ------------- - -1. Install docker for your system: https://docs.docker.com/ -2. Build images by running `make` -3. Execute `make run-tox` and `make run-perf` to run tox tests and perf tests, - respectivelly. -4. You may want to pass some variables to the `make` command to control - parameters of execution, for example: - - make run-perf SOLEDAD_PRELOAD_NUM=500 - - See more variables below. - - -Environment variables for docker containers -------------------------------------------- - -Different environment variables can be set for docker containers and will -cause the scripts to behave differently: - - SOLEDAD_REMOTE - a git url for a remote repository that is added at run time - to the local soledad git repository. - - SOLEDAD_BRANCH - the name of a branch to be checked out from the configured - remote repository. - - SOLEDAD_PRELOAD_NUM - The number of documents to be preloaded in the - container database (either client or server). - - SOLEDAD_PRELOAD_SIZE - The size of the payload of the documents to be - prelaoded in the container database (either client or - server). - - SOLEDAD_SERVER_URL - The URL of the soledad server to be used during the - test. - -Check the Makefile for examples on how to use these and maybe even other -variables not documented here. - - -Communication between client and server containers --------------------------------------------------- - -A CONTAINER_ID_FILE variable can be passed to the Makefile target so that the -container id is recorded in a file for further use. This makes it possible to -extract a container's IP and pass it to another container so they can -communicate. diff --git a/scripts/docker/TODO b/scripts/docker/TODO deleted file mode 100644 index 90597637..00000000 --- a/scripts/docker/TODO +++ /dev/null @@ -1,5 +0,0 @@ -- limit resources of containers (mem and cpu) -- allow running couchdb on another container -- use a config file to get defaults for running tests -- use the /builds directory as base of git repo -- save the test state to a directory to make it reproducible diff --git a/scripts/docker/couchdb/Dockerfile b/scripts/docker/couchdb/Dockerfile deleted file mode 100644 index 03448da5..00000000 --- a/scripts/docker/couchdb/Dockerfile +++ /dev/null @@ -1,3 +0,0 @@ -FROM couchdb:latest - -COPY local.ini /usr/local/etc/couchdb/ diff --git a/scripts/docker/couchdb/Makefile b/scripts/docker/couchdb/Makefile deleted file mode 100644 index cf3ac966..00000000 --- a/scripts/docker/couchdb/Makefile +++ /dev/null @@ -1,4 +0,0 @@ -IMAGE_NAME ?= leap/couchdb - -image: - docker build -t $(IMAGE_NAME) . diff --git a/scripts/docker/couchdb/README.rst b/scripts/docker/couchdb/README.rst deleted file mode 100644 index 31a791a8..00000000 --- a/scripts/docker/couchdb/README.rst +++ /dev/null @@ -1,12 +0,0 @@ -Couchdb Docker image -==================== - -This directory contains rules to build a custom couchdb docker image to be -provided as backend to soledad server. - -Type `make` to build the image. - -Differences between this image and the official one: - - - add the "nodelay" socket option on the httpd section of the config file - (see: https://leap.se/code/issues/8264). diff --git a/scripts/docker/couchdb/local.ini b/scripts/docker/couchdb/local.ini deleted file mode 100644 index 3650e0ed..00000000 --- a/scripts/docker/couchdb/local.ini +++ /dev/null @@ -1,2 +0,0 @@ -[httpd] -socket_options = [{recbuf, 262144}, {sndbuf, 262144}, {nodelay, true}] diff --git a/scripts/docker/files/apt/leap.list b/scripts/docker/files/apt/leap.list deleted file mode 100644 index 7eb474d8..00000000 --- a/scripts/docker/files/apt/leap.list +++ /dev/null @@ -1,4 +0,0 @@ -# This file is meant to be copied into the `/etc/apt/sources.list.d` directory -# inside a docker image to provide a source for leap-specific packages. - -deb http://deb.leap.se/0.8 jessie main diff --git a/scripts/docker/files/bin/client_side_db.py b/scripts/docker/files/bin/client_side_db.py deleted file mode 100644 index 80da7392..00000000 --- a/scripts/docker/files/bin/client_side_db.py +++ /dev/null @@ -1,321 +0,0 @@ -#!/usr/bin/python - -import os -import argparse -import tempfile -import getpass -import requests -import srp._pysrp as srp -import binascii -import logging -import json -import time - -from twisted.internet import reactor -from twisted.internet.defer import inlineCallbacks - -from leap.soledad.client import Soledad -from leap.keymanager import KeyManager -from leap.keymanager.openpgp import OpenPGPKey - -from leap.common.events import server -server.ensure_server() - -from util import ValidateUserHandle - - -""" -Script to give access to client-side Soledad database. - -This is mainly used for tests, but can also be used to recover data from a -Soledad database (public/private keys, export documents, etc). - -To speed up testing/debugging, this script can dump the auth data after -logging in. Use the --export-auth-data option to export auth data to a file. -The contents of the file is a json dictionary containing the uuid, server_url, -cert_file and token, which is enough info to instantiate a soledad client -without having to interact with the webapp again. Use the --use-auth-data -option to use the auth data stored in a file. - -Use the --help option to see available options. -""" - - -# create a logger -logger = logging.getLogger(__name__) -LOG_FORMAT = '%(asctime)s %(message)s' -logging.basicConfig(format=LOG_FORMAT, level=logging.DEBUG) - - -safe_unhexlify = lambda x: binascii.unhexlify(x) if ( - len(x) % 2 == 0) else binascii.unhexlify('0' + x) - - -def _fail(reason): - logger.error('Fail: ' + reason) - exit(2) - - -def _get_api_info(provider): - info = requests.get( - 'https://' + provider + '/provider.json', verify=False).json() - return info['api_uri'], info['api_version'] - - -def _login(username, passphrase, provider, api_uri, api_version): - usr = srp.User(username, passphrase, srp.SHA256, srp.NG_1024) - auth = None - try: - auth = _authenticate(api_uri, api_version, usr).json() - except requests.exceptions.ConnectionError: - _fail('Could not connect to server.') - if 'errors' in auth: - _fail(str(auth['errors'])) - return api_uri, api_version, auth - - -def _authenticate(api_uri, api_version, usr): - api_url = "%s/%s" % (api_uri, api_version) - session = requests.session() - uname, A = usr.start_authentication() - params = {'login': uname, 'A': binascii.hexlify(A)} - init = session.post( - api_url + '/sessions', data=params, verify=False).json() - if 'errors' in init: - _fail('test user not found') - M = usr.process_challenge( - safe_unhexlify(init['salt']), safe_unhexlify(init['B'])) - return session.put(api_url + '/sessions/' + uname, verify=False, - data={'client_auth': binascii.hexlify(M)}) - - -def _get_soledad_info(username, provider, passphrase, basedir): - api_uri, api_version = _get_api_info(provider) - auth = _login(username, passphrase, provider, api_uri, api_version) - # get soledad server url - service_url = '%s/%s/config/soledad-service.json' % \ - (api_uri, api_version) - soledad_hosts = requests.get(service_url, verify=False).json()['hosts'] - hostnames = soledad_hosts.keys() - # allow for choosing the host - host = hostnames[0] - if len(hostnames) > 1: - i = 1 - print "There are many available hosts:" - for h in hostnames: - print " (%d) %s.%s" % (i, h, provider) - i += 1 - choice = raw_input("Choose a host to use (default: 1): ") - if choice != '': - host = hostnames[int(choice) - 1] - server_url = 'https://%s:%d/user-%s' % \ - (soledad_hosts[host]['hostname'], soledad_hosts[host]['port'], - auth[2]['id']) - # get provider ca certificate - ca_cert = requests.get('https://%s/ca.crt' % provider, verify=False).text - cert_file = os.path.join(basedir, 'ca.crt') - with open(cert_file, 'w') as f: - f.write(ca_cert) - return auth[2]['id'], server_url, cert_file, auth[2]['token'] - - -def _get_soledad_instance(uuid, passphrase, basedir, server_url, cert_file, - token): - # setup soledad info - logger.info('UUID is %s' % uuid) - logger.info('Server URL is %s' % server_url) - secrets_path = os.path.join( - basedir, '%s.secret' % uuid) - local_db_path = os.path.join( - basedir, '%s.db' % uuid) - # instantiate soledad - return Soledad( - uuid, - unicode(passphrase), - secrets_path=secrets_path, - local_db_path=local_db_path, - server_url=server_url, - cert_file=cert_file, - auth_token=token) - - -def _get_keymanager_instance(username, provider, soledad, token, - ca_cert_path=None, api_uri=None, api_version=None, - uid=None, gpgbinary=None): - return KeyManager( - "{username}@{provider}".format(username=username, provider=provider), - "http://uri", - soledad, - token=token, - ca_cert_path=ca_cert_path, - api_uri=api_uri, - api_version=api_version, - uid=uid, - gpgbinary=gpgbinary) - - -def _parse_args(): - # parse command line - parser = argparse.ArgumentParser() - parser.add_argument( - 'user@provider', action=ValidateUserHandle, help='the user handle') - parser.add_argument( - '--basedir', '-b', default=None, - help='soledad base directory') - parser.add_argument( - '--passphrase', '-p', default=None, - help='the user passphrase') - parser.add_argument( - '--get-all-docs', '-a', action='store_true', - help='get all documents from the local database') - parser.add_argument( - '--create-docs', '-c', default=0, type=int, - help='create a number of documents') - parser.add_argument( - '--sync', '-s', action='store_true', - help='synchronize with the server replica') - parser.add_argument( - '--repeat-sync', '-r', action='store_true', - help='repeat synchronization until no new data is received') - parser.add_argument( - '--export-public-key', help="export the public key to a file") - parser.add_argument( - '--export-private-key', help="export the private key to a file") - parser.add_argument( - '--export-incoming-messages', - help="export incoming messages to a directory") - parser.add_argument( - '--export-auth-data', - help="export authentication data to a file") - parser.add_argument( - '--use-auth-data', - help="use authentication data from a file") - return parser.parse_args() - - -def _get_passphrase(args): - passphrase = args.passphrase - if passphrase is None: - passphrase = getpass.getpass( - 'Password for %s@%s: ' % (args.username, args.provider)) - return passphrase - - -def _get_basedir(args): - basedir = args.basedir - if basedir is None: - basedir = tempfile.mkdtemp() - elif not os.path.isdir(basedir): - os.mkdir(basedir) - logger.info('Using %s as base directory.' % basedir) - return basedir - - -@inlineCallbacks -def _export_key(args, km, fname, private=False): - address = args.username + "@" + args.provider - pkey = yield km.get_key( - address, OpenPGPKey, private=private, fetch_remote=False) - with open(args.export_private_key, "w") as f: - f.write(pkey.key_data) - - -@inlineCallbacks -def _export_incoming_messages(soledad, directory): - yield soledad.create_index("by-incoming", "bool(incoming)") - docs = yield soledad.get_from_index("by-incoming", '1') - i = 1 - for doc in docs: - with open(os.path.join(directory, "message_%d.gpg" % i), "w") as f: - f.write(doc.content["_enc_json"]) - i += 1 - - -@inlineCallbacks -def _get_all_docs(soledad): - _, docs = yield soledad.get_all_docs() - for doc in docs: - print json.dumps(doc.content, indent=4) - - -# main program - -@inlineCallbacks -def _main(soledad, km, args): - try: - if args.create_docs: - for i in xrange(args.create_docs): - t = time.time() - logger.debug( - "Creating doc %d/%d..." % (i + 1, args.create_docs)) - content = { - 'datetime': time.strftime( - "%Y-%m-%d %H:%M:%S", time.gmtime(t)), - 'timestamp': t, - 'index': i, - 'total': args.create_docs, - } - yield soledad.create_doc(content) - if args.sync: - yield soledad.sync() - if args.repeat_sync: - old_gen = 0 - new_gen = yield soledad.sync() - while old_gen != new_gen: - old_gen = new_gen - new_gen = yield soledad.sync() - if args.get_all_docs: - yield _get_all_docs(soledad) - if args.export_private_key: - yield _export_key(args, km, args.export_private_key, private=True) - if args.export_public_key: - yield _export_key(args, km, args.expoert_public_key, private=False) - if args.export_incoming_messages: - yield _export_incoming_messages( - soledad, args.export_incoming_messages) - except Exception as e: - logger.error(e) - finally: - soledad.close() - reactor.callWhenRunning(reactor.stop) - - -if __name__ == '__main__': - args = _parse_args() - passphrase = _get_passphrase(args) - basedir = _get_basedir(args) - - if not args.use_auth_data: - # get auth data from server - uuid, server_url, cert_file, token = \ - _get_soledad_info( - args.username, args.provider, passphrase, basedir) - else: - # load auth data from file - with open(args.use_auth_data) as f: - auth_data = json.loads(f.read()) - uuid = auth_data['uuid'] - server_url = auth_data['server_url'] - cert_file = auth_data['cert_file'] - token = auth_data['token'] - - # export auth data to a file - if args.export_auth_data: - with open(args.export_auth_data, "w") as f: - f.write(json.dumps({ - 'uuid': uuid, - 'server_url': server_url, - 'cert_file': cert_file, - 'token': token, - })) - - soledad = _get_soledad_instance( - uuid, passphrase, basedir, server_url, cert_file, token) - km = _get_keymanager_instance( - args.username, - args.provider, - soledad, - token, - uid=uuid) - _main(soledad, km, args) - reactor.run() diff --git a/scripts/docker/files/bin/conf/cert_default.conf b/scripts/docker/files/bin/conf/cert_default.conf deleted file mode 100644 index 8043cea3..00000000 --- a/scripts/docker/files/bin/conf/cert_default.conf +++ /dev/null @@ -1,15 +0,0 @@ -[ req ] -default_bits = 1024 -default_keyfile = keyfile.pem -distinguished_name = req_distinguished_name -prompt = no -output_password = mypass - -[ req_distinguished_name ] -C = GB -ST = Test State or Province -L = Test Locality -O = Organization Name -OU = Organizational Unit Name -CN = localhost -emailAddress = test@email.address diff --git a/scripts/docker/files/bin/conf/couchdb_default.ini b/scripts/docker/files/bin/conf/couchdb_default.ini deleted file mode 100644 index 5ab72d7b..00000000 --- a/scripts/docker/files/bin/conf/couchdb_default.ini +++ /dev/null @@ -1,361 +0,0 @@ -; etc/couchdb/default.ini.tpl. Generated from default.ini.tpl.in by configure. - -; Upgrading CouchDB will overwrite this file. -[vendor] -name = The Apache Software Foundation -version = 1.6.0 - -[couchdb] -database_dir = BASEDIR -view_index_dir = BASEDIR -util_driver_dir = /usr/lib/x86_64-linux-gnu/couchdb/erlang/lib/couch-1.6.0/priv/lib -max_document_size = 4294967296 ; 4 GB -os_process_timeout = 5000 ; 5 seconds. for view and external servers. -max_dbs_open = 100 -delayed_commits = true ; set this to false to ensure an fsync before 201 Created is returned -uri_file = BASEDIR/couch.uri -; Method used to compress everything that is appended to database and view index files, except -; for attachments (see the attachments section). Available methods are: -; -; none - no compression -; snappy - use google snappy, a very fast compressor/decompressor -uuid = bc2f8b84ecb0b13a31cf7f6881a52194 - -; deflate_[N] - use zlib's deflate, N is the compression level which ranges from 1 (fastest, -; lowest compression ratio) to 9 (slowest, highest compression ratio) -file_compression = snappy -; Higher values may give better read performance due to less read operations -; and/or more OS page cache hits, but they can also increase overall response -; time for writes when there are many attachment write requests in parallel. -attachment_stream_buffer_size = 4096 - -plugin_dir = /usr/lib/x86_64-linux-gnu/couchdb/plugins - -[database_compaction] -; larger buffer sizes can originate smaller files -doc_buffer_size = 524288 ; value in bytes -checkpoint_after = 5242880 ; checkpoint after every N bytes were written - -[view_compaction] -; larger buffer sizes can originate smaller files -keyvalue_buffer_size = 2097152 ; value in bytes - -[httpd] -port = 5984 -bind_address = 127.0.0.1 -authentication_handlers = {couch_httpd_oauth, oauth_authentication_handler}, {couch_httpd_auth, cookie_authentication_handler}, {couch_httpd_auth, default_authentication_handler} -default_handler = {couch_httpd_db, handle_request} -secure_rewrites = true -vhost_global_handlers = _utils, _uuids, _session, _oauth, _users -allow_jsonp = false -; Options for the MochiWeb HTTP server. -;server_options = [{backlog, 128}, {acceptor_pool_size, 16}] -; For more socket options, consult Erlang's module 'inet' man page. -;socket_options = [{recbuf, 262144}, {sndbuf, 262144}, {nodelay, true}] -socket_options = [{recbuf, 262144}, {sndbuf, 262144}] -log_max_chunk_size = 1000000 -enable_cors = false -; CouchDB can optionally enforce a maximum uri length; -; max_uri_length = 8000 - -[ssl] -port = 6984 - -[log] -file = BASEDIR/couch.log -level = info -include_sasl = true - -[couch_httpd_auth] -authentication_db = _users -authentication_redirect = /_utils/session.html -require_valid_user = false -timeout = 600 ; number of seconds before automatic logout -auth_cache_size = 50 ; size is number of cache entries -allow_persistent_cookies = false ; set to true to allow persistent cookies -iterations = 10 ; iterations for password hashing -; min_iterations = 1 -; max_iterations = 1000000000 -; comma-separated list of public fields, 404 if empty -; public_fields = - -[cors] -credentials = false -; List of origins separated by a comma, * means accept all -; Origins must include the scheme: http://example.com -; You can’t set origins: * and credentials = true at the same time. -;origins = * -; List of accepted headers separated by a comma -; headers = -; List of accepted methods -; methods = - - -; Configuration for a vhost -;[cors:http://example.com] -; credentials = false -; List of origins separated by a comma -; Origins must include the scheme: http://example.com -; You can’t set origins: * and credentials = true at the same time. -;origins = -; List of accepted headers separated by a comma -; headers = -; List of accepted methods -; methods = - -[couch_httpd_oauth] -; If set to 'true', oauth token and consumer secrets will be looked up -; in the authentication database (_users). These secrets are stored in -; a top level property named "oauth" in user documents. Example: -; { -; "_id": "org.couchdb.user:joe", -; "type": "user", -; "name": "joe", -; "password_sha": "fe95df1ca59a9b567bdca5cbaf8412abd6e06121", -; "salt": "4e170ffeb6f34daecfd814dfb4001a73" -; "roles": ["foo", "bar"], -; "oauth": { -; "consumer_keys": { -; "consumerKey1": "key1Secret", -; "consumerKey2": "key2Secret" -; }, -; "tokens": { -; "token1": "token1Secret", -; "token2": "token2Secret" -; } -; } -; } -use_users_db = false - -[query_servers] -javascript = /usr/bin/couchjs /usr/share/couchdb/server/main.js -coffeescript = /usr/bin/couchjs /usr/share/couchdb/server/main-coffee.js - - -; Changing reduce_limit to false will disable reduce_limit. -; If you think you're hitting reduce_limit with a "good" reduce function, -; please let us know on the mailing list so we can fine tune the heuristic. -[query_server_config] -reduce_limit = true -os_process_limit = 25 - -[daemons] -index_server={couch_index_server, start_link, []} -external_manager={couch_external_manager, start_link, []} -query_servers={couch_query_servers, start_link, []} -vhosts={couch_httpd_vhost, start_link, []} -httpd={couch_httpd, start_link, []} -stats_aggregator={couch_stats_aggregator, start, []} -stats_collector={couch_stats_collector, start, []} -uuids={couch_uuids, start, []} -auth_cache={couch_auth_cache, start_link, []} -replicator_manager={couch_replicator_manager, start_link, []} -os_daemons={couch_os_daemons, start_link, []} -compaction_daemon={couch_compaction_daemon, start_link, []} - -[httpd_global_handlers] -/ = {couch_httpd_misc_handlers, handle_welcome_req, <<"Welcome">>} -favicon.ico = {couch_httpd_misc_handlers, handle_favicon_req, "/usr/share/couchdb/www"} - -_utils = {couch_httpd_misc_handlers, handle_utils_dir_req, "/usr/share/couchdb/www"} -_all_dbs = {couch_httpd_misc_handlers, handle_all_dbs_req} -_active_tasks = {couch_httpd_misc_handlers, handle_task_status_req} -_config = {couch_httpd_misc_handlers, handle_config_req} -_replicate = {couch_replicator_httpd, handle_req} -_uuids = {couch_httpd_misc_handlers, handle_uuids_req} -_restart = {couch_httpd_misc_handlers, handle_restart_req} -_stats = {couch_httpd_stats_handlers, handle_stats_req} -_log = {couch_httpd_misc_handlers, handle_log_req} -_session = {couch_httpd_auth, handle_session_req} -_oauth = {couch_httpd_oauth, handle_oauth_req} -_db_updates = {couch_dbupdates_httpd, handle_req} -_plugins = {couch_plugins_httpd, handle_req} - -[httpd_db_handlers] -_all_docs = {couch_mrview_http, handle_all_docs_req} -_changes = {couch_httpd_db, handle_changes_req} -_compact = {couch_httpd_db, handle_compact_req} -_design = {couch_httpd_db, handle_design_req} -_temp_view = {couch_mrview_http, handle_temp_view_req} -_view_cleanup = {couch_mrview_http, handle_cleanup_req} - -; The external module takes an optional argument allowing you to narrow it to a -; single script. Otherwise the script name is inferred from the first path section -; after _external's own path. -; _mypath = {couch_httpd_external, handle_external_req, <<"mykey">>} -; _external = {couch_httpd_external, handle_external_req} - -[httpd_design_handlers] -_compact = {couch_mrview_http, handle_compact_req} -_info = {couch_mrview_http, handle_info_req} -_list = {couch_mrview_show, handle_view_list_req} -_rewrite = {couch_httpd_rewrite, handle_rewrite_req} -_show = {couch_mrview_show, handle_doc_show_req} -_update = {couch_mrview_show, handle_doc_update_req} -_view = {couch_mrview_http, handle_view_req} - -; enable external as an httpd handler, then link it with commands here. -; note, this api is still under consideration. -; [external] -; mykey = /path/to/mycommand - -; Here you can setup commands for CouchDB to manage -; while it is alive. It will attempt to keep each command -; alive if it exits. -; [os_daemons] -; some_daemon_name = /path/to/script -with args - - -[uuids] -; Known algorithms: -; random - 128 bits of random awesome -; All awesome, all the time. -; sequential - monotonically increasing ids with random increments -; First 26 hex characters are random. Last 6 increment in -; random amounts until an overflow occurs. On overflow, the -; random prefix is regenerated and the process starts over. -; utc_random - Time since Jan 1, 1970 UTC with microseconds -; First 14 characters are the time in hex. Last 18 are random. -; utc_id - Time since Jan 1, 1970 UTC with microseconds, plus utc_id_suffix string -; First 14 characters are the time in hex. uuids/utc_id_suffix string value is appended to these. -algorithm = sequential -; The utc_id_suffix value will be appended to uuids generated by the utc_id algorithm. -; Replicating instances should have unique utc_id_suffix values to ensure uniqueness of utc_id ids. -utc_id_suffix = -# Maximum number of UUIDs retrievable from /_uuids in a single request -max_count = 1000 - -[stats] -; rate is in milliseconds -rate = 1000 -; sample intervals are in seconds -samples = [0, 60, 300, 900] - -[attachments] -compression_level = 8 ; from 1 (lowest, fastest) to 9 (highest, slowest), 0 to disable compression -compressible_types = text/*, application/javascript, application/json, application/xml - -[replicator] -db = _replicator -; Maximum replicaton retry count can be a non-negative integer or "infinity". -max_replication_retry_count = 10 -; More worker processes can give higher network throughput but can also -; imply more disk and network IO. -worker_processes = 4 -; With lower batch sizes checkpoints are done more frequently. Lower batch sizes -; also reduce the total amount of used RAM memory. -worker_batch_size = 500 -; Maximum number of HTTP connections per replication. -http_connections = 20 -; HTTP connection timeout per replication. -; Even for very fast/reliable networks it might need to be increased if a remote -; database is too busy. -connection_timeout = 30000 -; If a request fails, the replicator will retry it up to N times. -retries_per_request = 10 -; Some socket options that might boost performance in some scenarios: -; {nodelay, boolean()} -; {sndbuf, integer()} -; {recbuf, integer()} -; {priority, integer()} -; See the `inet` Erlang module's man page for the full list of options. -socket_options = [{keepalive, true}, {nodelay, false}] -; Path to a file containing the user's certificate. -;cert_file = /full/path/to/server_cert.pem -; Path to file containing user's private PEM encoded key. -;key_file = /full/path/to/server_key.pem -; String containing the user's password. Only used if the private keyfile is password protected. -;password = somepassword -; Set to true to validate peer certificates. -verify_ssl_certificates = false -; File containing a list of peer trusted certificates (in the PEM format). -;ssl_trusted_certificates_file = /etc/ssl/certs/ca-certificates.crt -; Maximum peer certificate depth (must be set even if certificate validation is off). -ssl_certificate_max_depth = 3 - -[compaction_daemon] -; The delay, in seconds, between each check for which database and view indexes -; need to be compacted. -check_interval = 300 -; If a database or view index file is smaller then this value (in bytes), -; compaction will not happen. Very small files always have a very high -; fragmentation therefore it's not worth to compact them. -min_file_size = 131072 - -[compactions] -; List of compaction rules for the compaction daemon. -; The daemon compacts databases and their respective view groups when all the -; condition parameters are satisfied. Configuration can be per database or -; global, and it has the following format: -; -; database_name = [ {ParamName, ParamValue}, {ParamName, ParamValue}, ... ] -; _default = [ {ParamName, ParamValue}, {ParamName, ParamValue}, ... ] -; -; Possible parameters: -; -; * db_fragmentation - If the ratio (as an integer percentage), of the amount -; of old data (and its supporting metadata) over the database -; file size is equal to or greater then this value, this -; database compaction condition is satisfied. -; This value is computed as: -; -; (file_size - data_size) / file_size * 100 -; -; The data_size and file_size values can be obtained when -; querying a database's information URI (GET /dbname/). -; -; * view_fragmentation - If the ratio (as an integer percentage), of the amount -; of old data (and its supporting metadata) over the view -; index (view group) file size is equal to or greater then -; this value, then this view index compaction condition is -; satisfied. This value is computed as: -; -; (file_size - data_size) / file_size * 100 -; -; The data_size and file_size values can be obtained when -; querying a view group's information URI -; (GET /dbname/_design/groupname/_info). -; -; * from _and_ to - The period for which a database (and its view groups) compaction -; is allowed. The value for these parameters must obey the format: -; -; HH:MM - HH:MM (HH in [0..23], MM in [0..59]) -; -; * strict_window - If a compaction is still running after the end of the allowed -; period, it will be canceled if this parameter is set to 'true'. -; It defaults to 'false' and it's meaningful only if the *period* -; parameter is also specified. -; -; * parallel_view_compaction - If set to 'true', the database and its views are -; compacted in parallel. This is only useful on -; certain setups, like for example when the database -; and view index directories point to different -; disks. It defaults to 'false'. -; -; Before a compaction is triggered, an estimation of how much free disk space is -; needed is computed. This estimation corresponds to 2 times the data size of -; the database or view index. When there's not enough free disk space to compact -; a particular database or view index, a warning message is logged. -; -; Examples: -; -; 1) [{db_fragmentation, "70%"}, {view_fragmentation, "60%"}] -; The `foo` database is compacted if its fragmentation is 70% or more. -; Any view index of this database is compacted only if its fragmentation -; is 60% or more. -; -; 2) [{db_fragmentation, "70%"}, {view_fragmentation, "60%"}, {from, "00:00"}, {to, "04:00"}] -; Similar to the preceding example but a compaction (database or view index) -; is only triggered if the current time is between midnight and 4 AM. -; -; 3) [{db_fragmentation, "70%"}, {view_fragmentation, "60%"}, {from, "00:00"}, {to, "04:00"}, {strict_window, true}] -; Similar to the preceding example - a compaction (database or view index) -; is only triggered if the current time is between midnight and 4 AM. If at -; 4 AM the database or one of its views is still compacting, the compaction -; process will be canceled. -; -; 4) [{db_fragmentation, "70%"}, {view_fragmentation, "60%"}, {from, "00:00"}, {to, "04:00"}, {strict_window, true}, {parallel_view_compaction, true}] -; Similar to the preceding example, but a database and its views can be -; compacted in parallel. -; -;_default = [{db_fragmentation, "70%"}, {view_fragmentation, "60%"}, {from, "23:00"}, {to, "04:00"}] diff --git a/scripts/docker/files/bin/conf/soledad-server_default.conf b/scripts/docker/files/bin/conf/soledad-server_default.conf deleted file mode 100644 index 5e286374..00000000 --- a/scripts/docker/files/bin/conf/soledad-server_default.conf +++ /dev/null @@ -1,5 +0,0 @@ -[soledad-server] -couch_url = http://localhost:5984 -create_cmd = sudo -u soledad-admin /usr/bin/create-user-db -admin_netrc = /etc/couchdb/couchdb-soledad-admin.netrc -batching = 0 diff --git a/scripts/docker/files/bin/run-client-bootstrap.sh b/scripts/docker/files/bin/run-client-bootstrap.sh deleted file mode 100755 index fbbb42e8..00000000 --- a/scripts/docker/files/bin/run-client-bootstrap.sh +++ /dev/null @@ -1,20 +0,0 @@ -#!/bin/bash - -# Run a Soledad client connection test. -# -# This script is meant to be copied to the docker container and run upon -# container start. - -CMD="/usr/local/soledad/setup-test-env.py" -REPO="/var/local/soledad" - -if [ ! -z "${SOLEDAD_REMOTE}" ]; then - git -C ${REPO} remote set-url origin ${SOLEDAD_REMOTE} - git -C ${REPO} fetch origin -fi - -if [ ! -z "${SOLEDAD_BRANCH}" ]; then - git -C ${REPO} checkout ${SOLEDAD_BRANCH} -fi - -${CMD} soledad-client test --server-url ${SOLEDAD_SERVER_URL} diff --git a/scripts/docker/files/bin/run-client-perf.sh b/scripts/docker/files/bin/run-client-perf.sh deleted file mode 100755 index 01b27b98..00000000 --- a/scripts/docker/files/bin/run-client-perf.sh +++ /dev/null @@ -1,128 +0,0 @@ -#!/bin/sh - -# Start a soledad-perf test using a remote server. -# -# The script does the following: -# -# - configure a remote repository for soledad repo if SOLEDAD_REMOTE is set. -# -# - checkout a specific branch if SOLEDAD_BRANCH is set. -# -# - run the soledad-perf local twisted server that runs the client. Note -# that the actual soledad server should be running on another docker -# container. This local server is only used to measure responsiveness of -# soledad client. The script waits for the server to come up before -# continuing, or else times out after TIMEOUT seconds. -# -# - trigger the creation of documents for sync. -# -# - start the measurement of server responsiveness and sync stages. -# -# - stop the test. -# -# This script is meant to be copied to the docker container and run upon -# container start. - -CMD="/usr/local/soledad/setup-test-env.py" -REPO="/var/local/soledad" -TIMEOUT=20 - -#----------------------------------------------------------------------------- -# configure a remote and checkout a branch -#----------------------------------------------------------------------------- - -if [ ! -z "${SOLEDAD_REMOTE}" ]; then - git -C ${REPO} remote set-url origin ${SOLEDAD_REMOTE} - git -C ${REPO} fetch origin -fi - -if [ ! -z "${SOLEDAD_BRANCH}" ]; then - git -C ${REPO} checkout ${SOLEDAD_BRANCH} -fi - -if [ ! -z "${SOLEDAD_PERF_REMOTE}" ]; then - git -C /var/local/soledad-perf remote set-url origin ${SOLEDAD_PERF_REMOTE} - git -C /var/local/soledad-perf fetch origin -fi - -if [ ! -z "${SOLEDAD_PERF_BRANCH}" ]; then - git -C /var/local/soledad-perf checkout ${SOLEDAD_PERF_BRANCH} -fi - -#----------------------------------------------------------------------------- -# write a configuration file for the perf test -#----------------------------------------------------------------------------- - -cd /var/local/soledad-perf - -cat > defaults.conf < /dev/null & -sleep 5 # wait a bit for some data points - -# run a sync and generate a graph -make trigger-sync -make trigger-stop diff --git a/scripts/docker/files/bin/run-perf.sh b/scripts/docker/files/bin/run-perf.sh deleted file mode 100755 index 72060230..00000000 --- a/scripts/docker/files/bin/run-perf.sh +++ /dev/null @@ -1,22 +0,0 @@ -#!/bin/bash - -REPO=/builds/leap/soledad/testing -COUCH_URL="${COUCH_URL:-http://127.0.0.1:5984}" -SOLEDAD_PRELOAD_NUM="${SOLEDAD_PRELOAD_NUM:-100}" - -if [ ! -z "${SOLEDAD_REMOTE}" ]; then - git -C ${REPO} remote set-url origin ${SOLEDAD_REMOTE} - git -C ${REPO} fetch origin -fi - -if [ ! -z "${SOLEDAD_BRANCH}" ]; then - git -C ${REPO} checkout ${SOLEDAD_BRANCH} -fi - -cd ${REPO} - -tox perf -- \ - --durations 0 \ - --couch-url ${COUCH_URL} \ - --twisted \ - --num-docs ${SOLEDAD_PRELOAD_NUM} diff --git a/scripts/docker/files/bin/run-server.sh b/scripts/docker/files/bin/run-server.sh deleted file mode 100755 index feedee7e..00000000 --- a/scripts/docker/files/bin/run-server.sh +++ /dev/null @@ -1,89 +0,0 @@ -#!/bin/sh - -# Start a soledad server inside a docker container. -# -# This script will: -# -# - eventually checkout a specific branch from a specific soledad remote. -# -# - create everything a soledad server needs to run (certificate, backend -# server database, tables, etc. -# -# - eventually preload the server database with a number of documents equal -# to SOLEDAD_PRELOAD_NUM, and with payload size equal to -# SOLEDAD_PRELOAD_SIZE. -# -# - run the soledad server. -# -# This script is meant to be copied to the docker container and run upon -# container start. - -CMD="/usr/local/soledad/setup-test-env.py" - -#--------------------------------------------------------------------------- -# eventually checkout a specific branch from a specific remote -#--------------------------------------------------------------------------- - -REPO="/var/local/soledad" - -if [ ! -z "${SOLEDAD_REMOTE}" ]; then - git -C ${REPO} remote set-url origin ${SOLEDAD_REMOTE} - git -C ${REPO} fetch origin -fi - -if [ ! -z "${SOLEDAD_BRANCH}" ]; then - git -C ${REPO} checkout ${SOLEDAD_BRANCH} -fi - -#--------------------------------------------------------------------------- -# setup environment for running soledad server -#--------------------------------------------------------------------------- - -${CMD} couch start -${CMD} user-db create -${CMD} token-db create -${CMD} token-db insert-token -${CMD} shared-db create -${CMD} cert create - -#--------------------------------------------------------------------------- -# write a configuration file for the perf test -#--------------------------------------------------------------------------- - -if [ "${SOLEDAD_PRELOAD_NUM}" -gt 0 ]; then - cd /var/local/soledad-perf - - cat > defaults.conf < /dev/null - if [ ${?} -eq 0 ]; then - echo "Soledad server container is up!" - break - else - sleep 1 - fi - now=`date +%s` - elapsed=`expr ${now} - ${start}` -done - -# exit with an error code if timed out waiting for server -if [ ${elapsed} -ge ${TIMEOUT} ]; then - echo "Error: server unreachable at ${server_ip} after ${TIMEOUT} seconds." - exit 1 -fi - -set -e - -# run the test -make run-client-${test} CONTAINER_ID_FILE=${tempfile} ${branch} -rm -r ${tempfile} diff --git a/scripts/docker/helper/run-until-error.sh b/scripts/docker/helper/run-until-error.sh deleted file mode 100755 index a4cab6ec..00000000 --- a/scripts/docker/helper/run-until-error.sh +++ /dev/null @@ -1,12 +0,0 @@ -#!/bin/sh - -status=0 -runs=10 - -while [ ${status} -eq 0 -a ${runs} -gt 0 ]; do - echo "=== RUN ${runs}" - make rm-all-containers - make run-perf-test - status=${?} - runs=`expr ${runs} - 1` -done -- cgit v1.2.3