summaryrefslogtreecommitdiff
path: root/scripts
diff options
context:
space:
mode:
Diffstat (limited to 'scripts')
-rw-r--r--scripts/docker/Dockerfile38
-rw-r--r--scripts/docker/Makefile151
-rw-r--r--scripts/docker/README.md62
-rw-r--r--scripts/docker/TODO5
-rw-r--r--scripts/docker/couchdb/Dockerfile3
-rw-r--r--scripts/docker/couchdb/Makefile4
-rw-r--r--scripts/docker/couchdb/README.rst12
-rw-r--r--scripts/docker/couchdb/local.ini2
-rw-r--r--scripts/docker/files/apt/leap.list4
-rw-r--r--scripts/docker/files/bin/client_side_db.py321
-rw-r--r--scripts/docker/files/bin/conf/cert_default.conf15
-rw-r--r--scripts/docker/files/bin/conf/couchdb_default.ini361
-rw-r--r--scripts/docker/files/bin/conf/soledad-server_default.conf5
-rwxr-xr-xscripts/docker/files/bin/run-client-bootstrap.sh20
-rwxr-xr-xscripts/docker/files/bin/run-client-perf.sh128
-rwxr-xr-xscripts/docker/files/bin/run-perf.sh22
-rwxr-xr-xscripts/docker/files/bin/run-server.sh89
-rwxr-xr-xscripts/docker/files/bin/run-tox.sh17
-rwxr-xr-xscripts/docker/files/bin/run-trial-from-gitlab-ci.sh50
-rwxr-xr-xscripts/docker/files/bin/run-trial.sh23
-rwxr-xr-xscripts/docker/files/bin/setup-test-env.py647
-rw-r--r--scripts/docker/files/bin/util.py75
-rw-r--r--scripts/docker/files/bin/util.sh12
-rwxr-xr-xscripts/docker/files/build/install-deps-from-repos.sh30
-rwxr-xr-xscripts/docker/helper/get-container-ip.sh18
-rwxr-xr-xscripts/docker/helper/run-test.sh75
-rwxr-xr-xscripts/docker/helper/run-until-error.sh12
27 files changed, 0 insertions, 2201 deletions
diff --git a/scripts/docker/Dockerfile b/scripts/docker/Dockerfile
deleted file mode 100644
index 7a741e84..00000000
--- a/scripts/docker/Dockerfile
+++ /dev/null
@@ -1,38 +0,0 @@
-# start with a fresh debian image
-# we use backports because of libsqlcipher-dev
-FROM debian:jessie-backports
-
-RUN apt-get update
-
-# needed to build python twisted module
-RUN apt-get -y install libpython2.7-dev
-# needed to build python cryptography module
-RUN apt-get -y install libssl-dev
-RUN apt-get -y install libffi-dev
-# needed to build pysqlcipher
-RUN apt-get -y install libsqlcipher-dev
-# needed to support keymanager
-RUN apt-get -y install libsqlite3-dev
-
-# install pip and tox
-RUN apt-get -y install python-pip
-# We need git from backports because it has
-# the "%cI: committer date, strict ISO 8601 format"
-# pretty format which is used by pytest-benchmark
-RUN apt-get -y install -t jessie-backports git
-
-# Use use to show connection to couchdb during CI
-RUN apt-get -y install curl
-
-RUN pip install -U pip
-RUN pip install tox
-
-# clone repositories
-RUN mkdir -p /builds/leap
-RUN git clone https://0xacab.org/leap/soledad.git /builds/leap/soledad
-
-# use tox to install everything needed to run tests
-RUN cd /builds/leap/soledad/testing && tox -v -r --notest
-
-RUN mkdir -p /usr/local/soledad
-COPY files/bin/ /usr/local/soledad/
diff --git a/scripts/docker/Makefile b/scripts/docker/Makefile
deleted file mode 100644
index 499b1a3f..00000000
--- a/scripts/docker/Makefile
+++ /dev/null
@@ -1,151 +0,0 @@
-#/usr/bin/env
-
-# This makefile is intended to aid on running soledad docker images for
-# specific purposes, as running a server, a client or tests.
-#
-# In order to communicate the IP address of one container to another, we make
-# use of a file containing the container id. You have to explicitelly pass the
-# CONTAINER_ID_FILE variable when invoking some of the targets below.
-#
-# Example usage:
-#
-# make run-server CONTAINER_ID_FILE=/tmp/container-id.txt
-# make run-client-perf CONTAINER_ID_FILE=/tmp/container-id.txt
-
-#####################################################################
-# Some configurations you might override when calling this makefile #
-#####################################################################
-
-IMAGE_NAME ?= leapcode/soledad:latest
-SOLEDAD_REMOTE ?= https://0xacab.org/leap/soledad.git
-SOLEDAD_BRANCH ?= develop
-SOLEDAD_PRELOAD_NUM ?= 100
-SOLEDAD_PRELOAD_SIZE ?= 500
-MEMORY ?= 512m
-
-##############################################
-# Docker image generation (main make target) #
-##############################################
-
-all: soledad-image couchdb-image
-
-soledad-image:
- docker build ${DOCKER_BUILD_OPTS} -t $(IMAGE_NAME) .
-
-couchdb-image:
- (cd couchdb/ && make)
-
-##################################################
-# Run a Soledad Server inside a docker container #
-##################################################
-
-run-server:
- @if [ -z "$(CONTAINER_ID_FILE)" ]; then \
- echo "Error: you have to pass a value to CONTAINER_ID_FILE."; \
- exit 2; \
- fi
- docker run \
- --memory="$(MEMORY)" \
- --cpuset-cpus=0 \
- --env="SOLEDAD_REMOTE=$(SOLEDAD_REMOTE)" \
- --env="SOLEDAD_BRANCH=$(SOLEDAD_BRANCH)" \
- --env="SOLEDAD_PRELOAD_NUM=$(SOLEDAD_PRELOAD_NUM)" \
- --env="SOLEDAD_PRELOAD_SIZE=$(SOLEDAD_PRELOAD_SIZE)" \
- --cidfile=$(CONTAINER_ID_FILE) \
- --detach \
- $(IMAGE_NAME) \
- /usr/local/soledad/run-server.sh # --drop-to-shell
-
-run-client-bootstrap:
- @if [ -z "$(CONTAINER_ID_FILE)" ]; then \
- echo "Error: you have to pass a value to CONTAINER_ID_FILE."; \
- exit 2; \
- fi
- container_id=`cat $(CONTAINER_ID_FILE)`; \
- server_ip=`./helper/get-container-ip.sh $${container_id}`; \
- docker run -t -i \
- --memory="$(MEMORY)" \
- --env="SOLEDAD_REMOTE=$(SOLEDAD_REMOTE)" \
- --env="SOLEDAD_BRANCH=$(SOLEDAD_BRANCH)" \
- --env="SOLEDAD_SERVER_URL=http://$${server_ip}:2424" \
- $(IMAGE_NAME) \
- /usr/local/soledad/run-client-bootstrap.sh
-
-#################################################
-# Run all tests inside a docker container #
-#################################################
-
-run-tox:
- name=$$(cat /dev/urandom | tr -dc 'a-zA-Z0-9' | fold -w 5 | head -n 1); \
- docker run -d --name $${name} leap/couchdb; \
- docker run -t -i \
- --memory="$(MEMORY)" \
- --env="SOLEDAD_REMOTE=$(SOLEDAD_REMOTE)" \
- --env="SOLEDAD_BRANCH=$(SOLEDAD_BRANCH)" \
- --env="COUCH_URL=http://$${name}:5984" \
- --link $${name} \
- $(IMAGE_NAME) \
- /usr/local/soledad/run-tox.sh
-
-############################################
-# Performance tests and graphic generation #
-############################################
-
-run-perf:
- name=$$(cat /dev/urandom | tr -dc 'a-zA-Z0-9' | fold -w 5 | head -n 1); \
- docker run -d --name $${name} leap/couchdb; \
- docker run -t -i \
- --memory="$(MEMORY)" \
- --env="SOLEDAD_REMOTE=$(SOLEDAD_REMOTE)" \
- --env="SOLEDAD_BRANCH=$(SOLEDAD_BRANCH)" \
- --env="SOLEDAD_PRELOAD_NUM=$(SOLEDAD_PRELOAD_NUM)" \
- --env="COUCH_URL=http://$${name}:5984" \
- --link $${name} \
- $(IMAGE_NAME) \
- /usr/local/soledad/run-perf.sh
-
-run-client-perf:
- @if [ -z "$(CONTAINER_ID_FILE)" ]; then \
- echo "Error: you have to pass a value to CONTAINER_ID_FILE."; \
- exit 2; \
- fi
- container_id=`cat $(CONTAINER_ID_FILE)`; \
- server_ip=`./helper/get-container-ip.sh $${container_id}`; \
- docker run -t -i \
- --memory="$(MEMORY)" \
- --cpuset-cpus=1 \
- --cidfile=$(CONTAINER_ID_FILE)-perf \
- --env="SOLEDAD_REMOTE=$(SOLEDAD_REMOTE)" \
- --env="SOLEDAD_BRANCH=$(SOLEDAD_BRANCH)" \
- --env="SOLEDAD_PERF_REMOTE=https://0xacab.org/drebs/soledad-perf.git" \
- --env="SOLEDAD_PERF_BRANCH=bug/ensure-events-server" \
- --env="SOLEDAD_PRELOAD_NUM=$(SOLEDAD_PRELOAD_NUM)" \
- --env="SOLEDAD_PRELOAD_SIZE=$(SOLEDAD_PRELOAD_SIZE)" \
- --env="SOLEDAD_STATS=1" \
- --env="SOLEDAD_SERVER_URL=http://$${server_ip}:2424" \
- --env="SOLEDAD_LOG=1" \
- $(IMAGE_NAME) \
- /usr/local/soledad/run-client-perf.sh # --drop-to-shell
-
-cp-perf-result:
- @if [ -z "$(CONTAINER_ID_FILE)" ]; then \
- echo "Error: you have to pass a value to CONTAINER_ID_FILE."; \
- exit 2; \
- fi
- perf_id=`cat $(CONTAINER_ID_FILE)-perf`; \
- docker cp $${perf_id}:/var/local/soledad-perf/out/sync-stats.png /tmp/; \
- docker cp $${perf_id}:/var/local/soledad-perf/out/series.log /tmp/
-
-########################
-# Other helper targets #
-########################
-
-run-shell: soledad-image
- docker run -t -i \
- --memory="$(MEMORY)" \
- $(IMAGE_NAME) \
- /bin/bash
-
-rm-all-containers:
- containers=`docker ps -a | cut -d" " -f 1 | tail -n +2 | xargs`; \
- if [ ! -z "$${containers}" ]; then docker rm -f $${containers}; fi
diff --git a/scripts/docker/README.md b/scripts/docker/README.md
deleted file mode 100644
index 97b39f87..00000000
--- a/scripts/docker/README.md
+++ /dev/null
@@ -1,62 +0,0 @@
-Soledad Docker Images
-=====================
-
-The files in this directory help create a docker image that is usable for
-running soledad server and client in an isolated docker context. This is
-especially useful for testing purposes as you can limit/reserve a certain
-amount of resources for the soledad process, and thus provide a baseline for
-comparison of time and resource consumption between distinct runs.
-
-Check the `Dockerfile` for the steps for creating the docker image.
-
-Check the `Makefile` for the rules for running containers.
-
-
-Installation
-------------
-
-1. Install docker for your system: https://docs.docker.com/
-2. Build images by running `make`
-3. Execute `make run-tox` and `make run-perf` to run tox tests and perf tests,
- respectivelly.
-4. You may want to pass some variables to the `make` command to control
- parameters of execution, for example:
-
- make run-perf SOLEDAD_PRELOAD_NUM=500
-
- See more variables below.
-
-
-Environment variables for docker containers
--------------------------------------------
-
-Different environment variables can be set for docker containers and will
-cause the scripts to behave differently:
-
- SOLEDAD_REMOTE - a git url for a remote repository that is added at run time
- to the local soledad git repository.
-
- SOLEDAD_BRANCH - the name of a branch to be checked out from the configured
- remote repository.
-
- SOLEDAD_PRELOAD_NUM - The number of documents to be preloaded in the
- container database (either client or server).
-
- SOLEDAD_PRELOAD_SIZE - The size of the payload of the documents to be
- prelaoded in the container database (either client or
- server).
-
- SOLEDAD_SERVER_URL - The URL of the soledad server to be used during the
- test.
-
-Check the Makefile for examples on how to use these and maybe even other
-variables not documented here.
-
-
-Communication between client and server containers
---------------------------------------------------
-
-A CONTAINER_ID_FILE variable can be passed to the Makefile target so that the
-container id is recorded in a file for further use. This makes it possible to
-extract a container's IP and pass it to another container so they can
-communicate.
diff --git a/scripts/docker/TODO b/scripts/docker/TODO
deleted file mode 100644
index 90597637..00000000
--- a/scripts/docker/TODO
+++ /dev/null
@@ -1,5 +0,0 @@
-- limit resources of containers (mem and cpu)
-- allow running couchdb on another container
-- use a config file to get defaults for running tests
-- use the /builds directory as base of git repo
-- save the test state to a directory to make it reproducible
diff --git a/scripts/docker/couchdb/Dockerfile b/scripts/docker/couchdb/Dockerfile
deleted file mode 100644
index 03448da5..00000000
--- a/scripts/docker/couchdb/Dockerfile
+++ /dev/null
@@ -1,3 +0,0 @@
-FROM couchdb:latest
-
-COPY local.ini /usr/local/etc/couchdb/
diff --git a/scripts/docker/couchdb/Makefile b/scripts/docker/couchdb/Makefile
deleted file mode 100644
index cf3ac966..00000000
--- a/scripts/docker/couchdb/Makefile
+++ /dev/null
@@ -1,4 +0,0 @@
-IMAGE_NAME ?= leap/couchdb
-
-image:
- docker build -t $(IMAGE_NAME) .
diff --git a/scripts/docker/couchdb/README.rst b/scripts/docker/couchdb/README.rst
deleted file mode 100644
index 31a791a8..00000000
--- a/scripts/docker/couchdb/README.rst
+++ /dev/null
@@ -1,12 +0,0 @@
-Couchdb Docker image
-====================
-
-This directory contains rules to build a custom couchdb docker image to be
-provided as backend to soledad server.
-
-Type `make` to build the image.
-
-Differences between this image and the official one:
-
- - add the "nodelay" socket option on the httpd section of the config file
- (see: https://leap.se/code/issues/8264).
diff --git a/scripts/docker/couchdb/local.ini b/scripts/docker/couchdb/local.ini
deleted file mode 100644
index 3650e0ed..00000000
--- a/scripts/docker/couchdb/local.ini
+++ /dev/null
@@ -1,2 +0,0 @@
-[httpd]
-socket_options = [{recbuf, 262144}, {sndbuf, 262144}, {nodelay, true}]
diff --git a/scripts/docker/files/apt/leap.list b/scripts/docker/files/apt/leap.list
deleted file mode 100644
index 7eb474d8..00000000
--- a/scripts/docker/files/apt/leap.list
+++ /dev/null
@@ -1,4 +0,0 @@
-# This file is meant to be copied into the `/etc/apt/sources.list.d` directory
-# inside a docker image to provide a source for leap-specific packages.
-
-deb http://deb.leap.se/0.8 jessie main
diff --git a/scripts/docker/files/bin/client_side_db.py b/scripts/docker/files/bin/client_side_db.py
deleted file mode 100644
index 80da7392..00000000
--- a/scripts/docker/files/bin/client_side_db.py
+++ /dev/null
@@ -1,321 +0,0 @@
-#!/usr/bin/python
-
-import os
-import argparse
-import tempfile
-import getpass
-import requests
-import srp._pysrp as srp
-import binascii
-import logging
-import json
-import time
-
-from twisted.internet import reactor
-from twisted.internet.defer import inlineCallbacks
-
-from leap.soledad.client import Soledad
-from leap.keymanager import KeyManager
-from leap.keymanager.openpgp import OpenPGPKey
-
-from leap.common.events import server
-server.ensure_server()
-
-from util import ValidateUserHandle
-
-
-"""
-Script to give access to client-side Soledad database.
-
-This is mainly used for tests, but can also be used to recover data from a
-Soledad database (public/private keys, export documents, etc).
-
-To speed up testing/debugging, this script can dump the auth data after
-logging in. Use the --export-auth-data option to export auth data to a file.
-The contents of the file is a json dictionary containing the uuid, server_url,
-cert_file and token, which is enough info to instantiate a soledad client
-without having to interact with the webapp again. Use the --use-auth-data
-option to use the auth data stored in a file.
-
-Use the --help option to see available options.
-"""
-
-
-# create a logger
-logger = logging.getLogger(__name__)
-LOG_FORMAT = '%(asctime)s %(message)s'
-logging.basicConfig(format=LOG_FORMAT, level=logging.DEBUG)
-
-
-safe_unhexlify = lambda x: binascii.unhexlify(x) if (
- len(x) % 2 == 0) else binascii.unhexlify('0' + x)
-
-
-def _fail(reason):
- logger.error('Fail: ' + reason)
- exit(2)
-
-
-def _get_api_info(provider):
- info = requests.get(
- 'https://' + provider + '/provider.json', verify=False).json()
- return info['api_uri'], info['api_version']
-
-
-def _login(username, passphrase, provider, api_uri, api_version):
- usr = srp.User(username, passphrase, srp.SHA256, srp.NG_1024)
- auth = None
- try:
- auth = _authenticate(api_uri, api_version, usr).json()
- except requests.exceptions.ConnectionError:
- _fail('Could not connect to server.')
- if 'errors' in auth:
- _fail(str(auth['errors']))
- return api_uri, api_version, auth
-
-
-def _authenticate(api_uri, api_version, usr):
- api_url = "%s/%s" % (api_uri, api_version)
- session = requests.session()
- uname, A = usr.start_authentication()
- params = {'login': uname, 'A': binascii.hexlify(A)}
- init = session.post(
- api_url + '/sessions', data=params, verify=False).json()
- if 'errors' in init:
- _fail('test user not found')
- M = usr.process_challenge(
- safe_unhexlify(init['salt']), safe_unhexlify(init['B']))
- return session.put(api_url + '/sessions/' + uname, verify=False,
- data={'client_auth': binascii.hexlify(M)})
-
-
-def _get_soledad_info(username, provider, passphrase, basedir):
- api_uri, api_version = _get_api_info(provider)
- auth = _login(username, passphrase, provider, api_uri, api_version)
- # get soledad server url
- service_url = '%s/%s/config/soledad-service.json' % \
- (api_uri, api_version)
- soledad_hosts = requests.get(service_url, verify=False).json()['hosts']
- hostnames = soledad_hosts.keys()
- # allow for choosing the host
- host = hostnames[0]
- if len(hostnames) > 1:
- i = 1
- print "There are many available hosts:"
- for h in hostnames:
- print " (%d) %s.%s" % (i, h, provider)
- i += 1
- choice = raw_input("Choose a host to use (default: 1): ")
- if choice != '':
- host = hostnames[int(choice) - 1]
- server_url = 'https://%s:%d/user-%s' % \
- (soledad_hosts[host]['hostname'], soledad_hosts[host]['port'],
- auth[2]['id'])
- # get provider ca certificate
- ca_cert = requests.get('https://%s/ca.crt' % provider, verify=False).text
- cert_file = os.path.join(basedir, 'ca.crt')
- with open(cert_file, 'w') as f:
- f.write(ca_cert)
- return auth[2]['id'], server_url, cert_file, auth[2]['token']
-
-
-def _get_soledad_instance(uuid, passphrase, basedir, server_url, cert_file,
- token):
- # setup soledad info
- logger.info('UUID is %s' % uuid)
- logger.info('Server URL is %s' % server_url)
- secrets_path = os.path.join(
- basedir, '%s.secret' % uuid)
- local_db_path = os.path.join(
- basedir, '%s.db' % uuid)
- # instantiate soledad
- return Soledad(
- uuid,
- unicode(passphrase),
- secrets_path=secrets_path,
- local_db_path=local_db_path,
- server_url=server_url,
- cert_file=cert_file,
- auth_token=token)
-
-
-def _get_keymanager_instance(username, provider, soledad, token,
- ca_cert_path=None, api_uri=None, api_version=None,
- uid=None, gpgbinary=None):
- return KeyManager(
- "{username}@{provider}".format(username=username, provider=provider),
- "http://uri",
- soledad,
- token=token,
- ca_cert_path=ca_cert_path,
- api_uri=api_uri,
- api_version=api_version,
- uid=uid,
- gpgbinary=gpgbinary)
-
-
-def _parse_args():
- # parse command line
- parser = argparse.ArgumentParser()
- parser.add_argument(
- 'user@provider', action=ValidateUserHandle, help='the user handle')
- parser.add_argument(
- '--basedir', '-b', default=None,
- help='soledad base directory')
- parser.add_argument(
- '--passphrase', '-p', default=None,
- help='the user passphrase')
- parser.add_argument(
- '--get-all-docs', '-a', action='store_true',
- help='get all documents from the local database')
- parser.add_argument(
- '--create-docs', '-c', default=0, type=int,
- help='create a number of documents')
- parser.add_argument(
- '--sync', '-s', action='store_true',
- help='synchronize with the server replica')
- parser.add_argument(
- '--repeat-sync', '-r', action='store_true',
- help='repeat synchronization until no new data is received')
- parser.add_argument(
- '--export-public-key', help="export the public key to a file")
- parser.add_argument(
- '--export-private-key', help="export the private key to a file")
- parser.add_argument(
- '--export-incoming-messages',
- help="export incoming messages to a directory")
- parser.add_argument(
- '--export-auth-data',
- help="export authentication data to a file")
- parser.add_argument(
- '--use-auth-data',
- help="use authentication data from a file")
- return parser.parse_args()
-
-
-def _get_passphrase(args):
- passphrase = args.passphrase
- if passphrase is None:
- passphrase = getpass.getpass(
- 'Password for %s@%s: ' % (args.username, args.provider))
- return passphrase
-
-
-def _get_basedir(args):
- basedir = args.basedir
- if basedir is None:
- basedir = tempfile.mkdtemp()
- elif not os.path.isdir(basedir):
- os.mkdir(basedir)
- logger.info('Using %s as base directory.' % basedir)
- return basedir
-
-
-@inlineCallbacks
-def _export_key(args, km, fname, private=False):
- address = args.username + "@" + args.provider
- pkey = yield km.get_key(
- address, OpenPGPKey, private=private, fetch_remote=False)
- with open(args.export_private_key, "w") as f:
- f.write(pkey.key_data)
-
-
-@inlineCallbacks
-def _export_incoming_messages(soledad, directory):
- yield soledad.create_index("by-incoming", "bool(incoming)")
- docs = yield soledad.get_from_index("by-incoming", '1')
- i = 1
- for doc in docs:
- with open(os.path.join(directory, "message_%d.gpg" % i), "w") as f:
- f.write(doc.content["_enc_json"])
- i += 1
-
-
-@inlineCallbacks
-def _get_all_docs(soledad):
- _, docs = yield soledad.get_all_docs()
- for doc in docs:
- print json.dumps(doc.content, indent=4)
-
-
-# main program
-
-@inlineCallbacks
-def _main(soledad, km, args):
- try:
- if args.create_docs:
- for i in xrange(args.create_docs):
- t = time.time()
- logger.debug(
- "Creating doc %d/%d..." % (i + 1, args.create_docs))
- content = {
- 'datetime': time.strftime(
- "%Y-%m-%d %H:%M:%S", time.gmtime(t)),
- 'timestamp': t,
- 'index': i,
- 'total': args.create_docs,
- }
- yield soledad.create_doc(content)
- if args.sync:
- yield soledad.sync()
- if args.repeat_sync:
- old_gen = 0
- new_gen = yield soledad.sync()
- while old_gen != new_gen:
- old_gen = new_gen
- new_gen = yield soledad.sync()
- if args.get_all_docs:
- yield _get_all_docs(soledad)
- if args.export_private_key:
- yield _export_key(args, km, args.export_private_key, private=True)
- if args.export_public_key:
- yield _export_key(args, km, args.expoert_public_key, private=False)
- if args.export_incoming_messages:
- yield _export_incoming_messages(
- soledad, args.export_incoming_messages)
- except Exception as e:
- logger.error(e)
- finally:
- soledad.close()
- reactor.callWhenRunning(reactor.stop)
-
-
-if __name__ == '__main__':
- args = _parse_args()
- passphrase = _get_passphrase(args)
- basedir = _get_basedir(args)
-
- if not args.use_auth_data:
- # get auth data from server
- uuid, server_url, cert_file, token = \
- _get_soledad_info(
- args.username, args.provider, passphrase, basedir)
- else:
- # load auth data from file
- with open(args.use_auth_data) as f:
- auth_data = json.loads(f.read())
- uuid = auth_data['uuid']
- server_url = auth_data['server_url']
- cert_file = auth_data['cert_file']
- token = auth_data['token']
-
- # export auth data to a file
- if args.export_auth_data:
- with open(args.export_auth_data, "w") as f:
- f.write(json.dumps({
- 'uuid': uuid,
- 'server_url': server_url,
- 'cert_file': cert_file,
- 'token': token,
- }))
-
- soledad = _get_soledad_instance(
- uuid, passphrase, basedir, server_url, cert_file, token)
- km = _get_keymanager_instance(
- args.username,
- args.provider,
- soledad,
- token,
- uid=uuid)
- _main(soledad, km, args)
- reactor.run()
diff --git a/scripts/docker/files/bin/conf/cert_default.conf b/scripts/docker/files/bin/conf/cert_default.conf
deleted file mode 100644
index 8043cea3..00000000
--- a/scripts/docker/files/bin/conf/cert_default.conf
+++ /dev/null
@@ -1,15 +0,0 @@
-[ req ]
-default_bits = 1024
-default_keyfile = keyfile.pem
-distinguished_name = req_distinguished_name
-prompt = no
-output_password = mypass
-
-[ req_distinguished_name ]
-C = GB
-ST = Test State or Province
-L = Test Locality
-O = Organization Name
-OU = Organizational Unit Name
-CN = localhost
-emailAddress = test@email.address
diff --git a/scripts/docker/files/bin/conf/couchdb_default.ini b/scripts/docker/files/bin/conf/couchdb_default.ini
deleted file mode 100644
index 5ab72d7b..00000000
--- a/scripts/docker/files/bin/conf/couchdb_default.ini
+++ /dev/null
@@ -1,361 +0,0 @@
-; etc/couchdb/default.ini.tpl. Generated from default.ini.tpl.in by configure.
-
-; Upgrading CouchDB will overwrite this file.
-[vendor]
-name = The Apache Software Foundation
-version = 1.6.0
-
-[couchdb]
-database_dir = BASEDIR
-view_index_dir = BASEDIR
-util_driver_dir = /usr/lib/x86_64-linux-gnu/couchdb/erlang/lib/couch-1.6.0/priv/lib
-max_document_size = 4294967296 ; 4 GB
-os_process_timeout = 5000 ; 5 seconds. for view and external servers.
-max_dbs_open = 100
-delayed_commits = true ; set this to false to ensure an fsync before 201 Created is returned
-uri_file = BASEDIR/couch.uri
-; Method used to compress everything that is appended to database and view index files, except
-; for attachments (see the attachments section). Available methods are:
-;
-; none - no compression
-; snappy - use google snappy, a very fast compressor/decompressor
-uuid = bc2f8b84ecb0b13a31cf7f6881a52194
-
-; deflate_[N] - use zlib's deflate, N is the compression level which ranges from 1 (fastest,
-; lowest compression ratio) to 9 (slowest, highest compression ratio)
-file_compression = snappy
-; Higher values may give better read performance due to less read operations
-; and/or more OS page cache hits, but they can also increase overall response
-; time for writes when there are many attachment write requests in parallel.
-attachment_stream_buffer_size = 4096
-
-plugin_dir = /usr/lib/x86_64-linux-gnu/couchdb/plugins
-
-[database_compaction]
-; larger buffer sizes can originate smaller files
-doc_buffer_size = 524288 ; value in bytes
-checkpoint_after = 5242880 ; checkpoint after every N bytes were written
-
-[view_compaction]
-; larger buffer sizes can originate smaller files
-keyvalue_buffer_size = 2097152 ; value in bytes
-
-[httpd]
-port = 5984
-bind_address = 127.0.0.1
-authentication_handlers = {couch_httpd_oauth, oauth_authentication_handler}, {couch_httpd_auth, cookie_authentication_handler}, {couch_httpd_auth, default_authentication_handler}
-default_handler = {couch_httpd_db, handle_request}
-secure_rewrites = true
-vhost_global_handlers = _utils, _uuids, _session, _oauth, _users
-allow_jsonp = false
-; Options for the MochiWeb HTTP server.
-;server_options = [{backlog, 128}, {acceptor_pool_size, 16}]
-; For more socket options, consult Erlang's module 'inet' man page.
-;socket_options = [{recbuf, 262144}, {sndbuf, 262144}, {nodelay, true}]
-socket_options = [{recbuf, 262144}, {sndbuf, 262144}]
-log_max_chunk_size = 1000000
-enable_cors = false
-; CouchDB can optionally enforce a maximum uri length;
-; max_uri_length = 8000
-
-[ssl]
-port = 6984
-
-[log]
-file = BASEDIR/couch.log
-level = info
-include_sasl = true
-
-[couch_httpd_auth]
-authentication_db = _users
-authentication_redirect = /_utils/session.html
-require_valid_user = false
-timeout = 600 ; number of seconds before automatic logout
-auth_cache_size = 50 ; size is number of cache entries
-allow_persistent_cookies = false ; set to true to allow persistent cookies
-iterations = 10 ; iterations for password hashing
-; min_iterations = 1
-; max_iterations = 1000000000
-; comma-separated list of public fields, 404 if empty
-; public_fields =
-
-[cors]
-credentials = false
-; List of origins separated by a comma, * means accept all
-; Origins must include the scheme: http://example.com
-; You can’t set origins: * and credentials = true at the same time.
-;origins = *
-; List of accepted headers separated by a comma
-; headers =
-; List of accepted methods
-; methods =
-
-
-; Configuration for a vhost
-;[cors:http://example.com]
-; credentials = false
-; List of origins separated by a comma
-; Origins must include the scheme: http://example.com
-; You can’t set origins: * and credentials = true at the same time.
-;origins =
-; List of accepted headers separated by a comma
-; headers =
-; List of accepted methods
-; methods =
-
-[couch_httpd_oauth]
-; If set to 'true', oauth token and consumer secrets will be looked up
-; in the authentication database (_users). These secrets are stored in
-; a top level property named "oauth" in user documents. Example:
-; {
-; "_id": "org.couchdb.user:joe",
-; "type": "user",
-; "name": "joe",
-; "password_sha": "fe95df1ca59a9b567bdca5cbaf8412abd6e06121",
-; "salt": "4e170ffeb6f34daecfd814dfb4001a73"
-; "roles": ["foo", "bar"],
-; "oauth": {
-; "consumer_keys": {
-; "consumerKey1": "key1Secret",
-; "consumerKey2": "key2Secret"
-; },
-; "tokens": {
-; "token1": "token1Secret",
-; "token2": "token2Secret"
-; }
-; }
-; }
-use_users_db = false
-
-[query_servers]
-javascript = /usr/bin/couchjs /usr/share/couchdb/server/main.js
-coffeescript = /usr/bin/couchjs /usr/share/couchdb/server/main-coffee.js
-
-
-; Changing reduce_limit to false will disable reduce_limit.
-; If you think you're hitting reduce_limit with a "good" reduce function,
-; please let us know on the mailing list so we can fine tune the heuristic.
-[query_server_config]
-reduce_limit = true
-os_process_limit = 25
-
-[daemons]
-index_server={couch_index_server, start_link, []}
-external_manager={couch_external_manager, start_link, []}
-query_servers={couch_query_servers, start_link, []}
-vhosts={couch_httpd_vhost, start_link, []}
-httpd={couch_httpd, start_link, []}
-stats_aggregator={couch_stats_aggregator, start, []}
-stats_collector={couch_stats_collector, start, []}
-uuids={couch_uuids, start, []}
-auth_cache={couch_auth_cache, start_link, []}
-replicator_manager={couch_replicator_manager, start_link, []}
-os_daemons={couch_os_daemons, start_link, []}
-compaction_daemon={couch_compaction_daemon, start_link, []}
-
-[httpd_global_handlers]
-/ = {couch_httpd_misc_handlers, handle_welcome_req, <<"Welcome">>}
-favicon.ico = {couch_httpd_misc_handlers, handle_favicon_req, "/usr/share/couchdb/www"}
-
-_utils = {couch_httpd_misc_handlers, handle_utils_dir_req, "/usr/share/couchdb/www"}
-_all_dbs = {couch_httpd_misc_handlers, handle_all_dbs_req}
-_active_tasks = {couch_httpd_misc_handlers, handle_task_status_req}
-_config = {couch_httpd_misc_handlers, handle_config_req}
-_replicate = {couch_replicator_httpd, handle_req}
-_uuids = {couch_httpd_misc_handlers, handle_uuids_req}
-_restart = {couch_httpd_misc_handlers, handle_restart_req}
-_stats = {couch_httpd_stats_handlers, handle_stats_req}
-_log = {couch_httpd_misc_handlers, handle_log_req}
-_session = {couch_httpd_auth, handle_session_req}
-_oauth = {couch_httpd_oauth, handle_oauth_req}
-_db_updates = {couch_dbupdates_httpd, handle_req}
-_plugins = {couch_plugins_httpd, handle_req}
-
-[httpd_db_handlers]
-_all_docs = {couch_mrview_http, handle_all_docs_req}
-_changes = {couch_httpd_db, handle_changes_req}
-_compact = {couch_httpd_db, handle_compact_req}
-_design = {couch_httpd_db, handle_design_req}
-_temp_view = {couch_mrview_http, handle_temp_view_req}
-_view_cleanup = {couch_mrview_http, handle_cleanup_req}
-
-; The external module takes an optional argument allowing you to narrow it to a
-; single script. Otherwise the script name is inferred from the first path section
-; after _external's own path.
-; _mypath = {couch_httpd_external, handle_external_req, <<"mykey">>}
-; _external = {couch_httpd_external, handle_external_req}
-
-[httpd_design_handlers]
-_compact = {couch_mrview_http, handle_compact_req}
-_info = {couch_mrview_http, handle_info_req}
-_list = {couch_mrview_show, handle_view_list_req}
-_rewrite = {couch_httpd_rewrite, handle_rewrite_req}
-_show = {couch_mrview_show, handle_doc_show_req}
-_update = {couch_mrview_show, handle_doc_update_req}
-_view = {couch_mrview_http, handle_view_req}
-
-; enable external as an httpd handler, then link it with commands here.
-; note, this api is still under consideration.
-; [external]
-; mykey = /path/to/mycommand
-
-; Here you can setup commands for CouchDB to manage
-; while it is alive. It will attempt to keep each command
-; alive if it exits.
-; [os_daemons]
-; some_daemon_name = /path/to/script -with args
-
-
-[uuids]
-; Known algorithms:
-; random - 128 bits of random awesome
-; All awesome, all the time.
-; sequential - monotonically increasing ids with random increments
-; First 26 hex characters are random. Last 6 increment in
-; random amounts until an overflow occurs. On overflow, the
-; random prefix is regenerated and the process starts over.
-; utc_random - Time since Jan 1, 1970 UTC with microseconds
-; First 14 characters are the time in hex. Last 18 are random.
-; utc_id - Time since Jan 1, 1970 UTC with microseconds, plus utc_id_suffix string
-; First 14 characters are the time in hex. uuids/utc_id_suffix string value is appended to these.
-algorithm = sequential
-; The utc_id_suffix value will be appended to uuids generated by the utc_id algorithm.
-; Replicating instances should have unique utc_id_suffix values to ensure uniqueness of utc_id ids.
-utc_id_suffix =
-# Maximum number of UUIDs retrievable from /_uuids in a single request
-max_count = 1000
-
-[stats]
-; rate is in milliseconds
-rate = 1000
-; sample intervals are in seconds
-samples = [0, 60, 300, 900]
-
-[attachments]
-compression_level = 8 ; from 1 (lowest, fastest) to 9 (highest, slowest), 0 to disable compression
-compressible_types = text/*, application/javascript, application/json, application/xml
-
-[replicator]
-db = _replicator
-; Maximum replicaton retry count can be a non-negative integer or "infinity".
-max_replication_retry_count = 10
-; More worker processes can give higher network throughput but can also
-; imply more disk and network IO.
-worker_processes = 4
-; With lower batch sizes checkpoints are done more frequently. Lower batch sizes
-; also reduce the total amount of used RAM memory.
-worker_batch_size = 500
-; Maximum number of HTTP connections per replication.
-http_connections = 20
-; HTTP connection timeout per replication.
-; Even for very fast/reliable networks it might need to be increased if a remote
-; database is too busy.
-connection_timeout = 30000
-; If a request fails, the replicator will retry it up to N times.
-retries_per_request = 10
-; Some socket options that might boost performance in some scenarios:
-; {nodelay, boolean()}
-; {sndbuf, integer()}
-; {recbuf, integer()}
-; {priority, integer()}
-; See the `inet` Erlang module's man page for the full list of options.
-socket_options = [{keepalive, true}, {nodelay, false}]
-; Path to a file containing the user's certificate.
-;cert_file = /full/path/to/server_cert.pem
-; Path to file containing user's private PEM encoded key.
-;key_file = /full/path/to/server_key.pem
-; String containing the user's password. Only used if the private keyfile is password protected.
-;password = somepassword
-; Set to true to validate peer certificates.
-verify_ssl_certificates = false
-; File containing a list of peer trusted certificates (in the PEM format).
-;ssl_trusted_certificates_file = /etc/ssl/certs/ca-certificates.crt
-; Maximum peer certificate depth (must be set even if certificate validation is off).
-ssl_certificate_max_depth = 3
-
-[compaction_daemon]
-; The delay, in seconds, between each check for which database and view indexes
-; need to be compacted.
-check_interval = 300
-; If a database or view index file is smaller then this value (in bytes),
-; compaction will not happen. Very small files always have a very high
-; fragmentation therefore it's not worth to compact them.
-min_file_size = 131072
-
-[compactions]
-; List of compaction rules for the compaction daemon.
-; The daemon compacts databases and their respective view groups when all the
-; condition parameters are satisfied. Configuration can be per database or
-; global, and it has the following format:
-;
-; database_name = [ {ParamName, ParamValue}, {ParamName, ParamValue}, ... ]
-; _default = [ {ParamName, ParamValue}, {ParamName, ParamValue}, ... ]
-;
-; Possible parameters:
-;
-; * db_fragmentation - If the ratio (as an integer percentage), of the amount
-; of old data (and its supporting metadata) over the database
-; file size is equal to or greater then this value, this
-; database compaction condition is satisfied.
-; This value is computed as:
-;
-; (file_size - data_size) / file_size * 100
-;
-; The data_size and file_size values can be obtained when
-; querying a database's information URI (GET /dbname/).
-;
-; * view_fragmentation - If the ratio (as an integer percentage), of the amount
-; of old data (and its supporting metadata) over the view
-; index (view group) file size is equal to or greater then
-; this value, then this view index compaction condition is
-; satisfied. This value is computed as:
-;
-; (file_size - data_size) / file_size * 100
-;
-; The data_size and file_size values can be obtained when
-; querying a view group's information URI
-; (GET /dbname/_design/groupname/_info).
-;
-; * from _and_ to - The period for which a database (and its view groups) compaction
-; is allowed. The value for these parameters must obey the format:
-;
-; HH:MM - HH:MM (HH in [0..23], MM in [0..59])
-;
-; * strict_window - If a compaction is still running after the end of the allowed
-; period, it will be canceled if this parameter is set to 'true'.
-; It defaults to 'false' and it's meaningful only if the *period*
-; parameter is also specified.
-;
-; * parallel_view_compaction - If set to 'true', the database and its views are
-; compacted in parallel. This is only useful on
-; certain setups, like for example when the database
-; and view index directories point to different
-; disks. It defaults to 'false'.
-;
-; Before a compaction is triggered, an estimation of how much free disk space is
-; needed is computed. This estimation corresponds to 2 times the data size of
-; the database or view index. When there's not enough free disk space to compact
-; a particular database or view index, a warning message is logged.
-;
-; Examples:
-;
-; 1) [{db_fragmentation, "70%"}, {view_fragmentation, "60%"}]
-; The `foo` database is compacted if its fragmentation is 70% or more.
-; Any view index of this database is compacted only if its fragmentation
-; is 60% or more.
-;
-; 2) [{db_fragmentation, "70%"}, {view_fragmentation, "60%"}, {from, "00:00"}, {to, "04:00"}]
-; Similar to the preceding example but a compaction (database or view index)
-; is only triggered if the current time is between midnight and 4 AM.
-;
-; 3) [{db_fragmentation, "70%"}, {view_fragmentation, "60%"}, {from, "00:00"}, {to, "04:00"}, {strict_window, true}]
-; Similar to the preceding example - a compaction (database or view index)
-; is only triggered if the current time is between midnight and 4 AM. If at
-; 4 AM the database or one of its views is still compacting, the compaction
-; process will be canceled.
-;
-; 4) [{db_fragmentation, "70%"}, {view_fragmentation, "60%"}, {from, "00:00"}, {to, "04:00"}, {strict_window, true}, {parallel_view_compaction, true}]
-; Similar to the preceding example, but a database and its views can be
-; compacted in parallel.
-;
-;_default = [{db_fragmentation, "70%"}, {view_fragmentation, "60%"}, {from, "23:00"}, {to, "04:00"}]
diff --git a/scripts/docker/files/bin/conf/soledad-server_default.conf b/scripts/docker/files/bin/conf/soledad-server_default.conf
deleted file mode 100644
index 5e286374..00000000
--- a/scripts/docker/files/bin/conf/soledad-server_default.conf
+++ /dev/null
@@ -1,5 +0,0 @@
-[soledad-server]
-couch_url = http://localhost:5984
-create_cmd = sudo -u soledad-admin /usr/bin/create-user-db
-admin_netrc = /etc/couchdb/couchdb-soledad-admin.netrc
-batching = 0
diff --git a/scripts/docker/files/bin/run-client-bootstrap.sh b/scripts/docker/files/bin/run-client-bootstrap.sh
deleted file mode 100755
index fbbb42e8..00000000
--- a/scripts/docker/files/bin/run-client-bootstrap.sh
+++ /dev/null
@@ -1,20 +0,0 @@
-#!/bin/bash
-
-# Run a Soledad client connection test.
-#
-# This script is meant to be copied to the docker container and run upon
-# container start.
-
-CMD="/usr/local/soledad/setup-test-env.py"
-REPO="/var/local/soledad"
-
-if [ ! -z "${SOLEDAD_REMOTE}" ]; then
- git -C ${REPO} remote set-url origin ${SOLEDAD_REMOTE}
- git -C ${REPO} fetch origin
-fi
-
-if [ ! -z "${SOLEDAD_BRANCH}" ]; then
- git -C ${REPO} checkout ${SOLEDAD_BRANCH}
-fi
-
-${CMD} soledad-client test --server-url ${SOLEDAD_SERVER_URL}
diff --git a/scripts/docker/files/bin/run-client-perf.sh b/scripts/docker/files/bin/run-client-perf.sh
deleted file mode 100755
index 01b27b98..00000000
--- a/scripts/docker/files/bin/run-client-perf.sh
+++ /dev/null
@@ -1,128 +0,0 @@
-#!/bin/sh
-
-# Start a soledad-perf test using a remote server.
-#
-# The script does the following:
-#
-# - configure a remote repository for soledad repo if SOLEDAD_REMOTE is set.
-#
-# - checkout a specific branch if SOLEDAD_BRANCH is set.
-#
-# - run the soledad-perf local twisted server that runs the client. Note
-# that the actual soledad server should be running on another docker
-# container. This local server is only used to measure responsiveness of
-# soledad client. The script waits for the server to come up before
-# continuing, or else times out after TIMEOUT seconds.
-#
-# - trigger the creation of documents for sync.
-#
-# - start the measurement of server responsiveness and sync stages.
-#
-# - stop the test.
-#
-# This script is meant to be copied to the docker container and run upon
-# container start.
-
-CMD="/usr/local/soledad/setup-test-env.py"
-REPO="/var/local/soledad"
-TIMEOUT=20
-
-#-----------------------------------------------------------------------------
-# configure a remote and checkout a branch
-#-----------------------------------------------------------------------------
-
-if [ ! -z "${SOLEDAD_REMOTE}" ]; then
- git -C ${REPO} remote set-url origin ${SOLEDAD_REMOTE}
- git -C ${REPO} fetch origin
-fi
-
-if [ ! -z "${SOLEDAD_BRANCH}" ]; then
- git -C ${REPO} checkout ${SOLEDAD_BRANCH}
-fi
-
-if [ ! -z "${SOLEDAD_PERF_REMOTE}" ]; then
- git -C /var/local/soledad-perf remote set-url origin ${SOLEDAD_PERF_REMOTE}
- git -C /var/local/soledad-perf fetch origin
-fi
-
-if [ ! -z "${SOLEDAD_PERF_BRANCH}" ]; then
- git -C /var/local/soledad-perf checkout ${SOLEDAD_PERF_BRANCH}
-fi
-
-#-----------------------------------------------------------------------------
-# write a configuration file for the perf test
-#-----------------------------------------------------------------------------
-
-cd /var/local/soledad-perf
-
-cat > defaults.conf <<EOF
-[server]
-host = ${SOLEDAD_SERVER_URL}
-
-[client]
-uuid = 1234567890abcdef
-basedir = /tmp/soledad_client_test
-passphrase = 12345678
-
-[sync]
-num_docs = ${SOLEDAD_PRELOAD_NUM}
-payload = /tmp/payload
-payload_size = ${SOLEDAD_PRELOAD_SIZE}
-auth_token = an-auth-token
-
-[test]
-stats_file = ./out/stats.json
-EOF
-
-if [ "${1}" = "--drop-to-shell" ]; then
- /bin/bash
- exit 0
-fi
-
-#-----------------------------------------------------------------------------
-# start the local server and wait for it to come up
-#-----------------------------------------------------------------------------
-
-# start local test server on background
-make soledad-sync-server | grep -v stats | grep -v ping &
-
-# wait for server until timeout
-start=`date +%s`
-elapsed=0
-
-echo "Waiting for perf server to come up..."
-
-while [ ${elapsed} -lt ${TIMEOUT} ]; do
- result=`curl -s http://127.0.0.1:8080/ping`
- if [ ${?} -eq 0 -a "${result}" = "easy!" ]; then
- echo "Perf server (running soledad client) is up!"
- break
- else
- sleep 1
- fi
- now=`date +%s`
- elapsed=`expr ${now} - ${start}`
-done
-
-# exit with an error code if timed out waiting for server
-if [ ${elapsed} -ge ${TIMEOUT} ]; then
- echo "Error: server unreachable at http://127.0.0.1:8080 after ${TIMEOUT} seconds."
- exit 1
-fi
-
-#-----------------------------------------------------------------------------
-# create docs and run test
-#-----------------------------------------------------------------------------
-
-set -e
-
-# create documents in client
-make trigger-create-docs
-
-# launch background series measurement
-make measure-series > /dev/null &
-sleep 5 # wait a bit for some data points
-
-# run a sync and generate a graph
-make trigger-sync
-make trigger-stop
diff --git a/scripts/docker/files/bin/run-perf.sh b/scripts/docker/files/bin/run-perf.sh
deleted file mode 100755
index 72060230..00000000
--- a/scripts/docker/files/bin/run-perf.sh
+++ /dev/null
@@ -1,22 +0,0 @@
-#!/bin/bash
-
-REPO=/builds/leap/soledad/testing
-COUCH_URL="${COUCH_URL:-http://127.0.0.1:5984}"
-SOLEDAD_PRELOAD_NUM="${SOLEDAD_PRELOAD_NUM:-100}"
-
-if [ ! -z "${SOLEDAD_REMOTE}" ]; then
- git -C ${REPO} remote set-url origin ${SOLEDAD_REMOTE}
- git -C ${REPO} fetch origin
-fi
-
-if [ ! -z "${SOLEDAD_BRANCH}" ]; then
- git -C ${REPO} checkout ${SOLEDAD_BRANCH}
-fi
-
-cd ${REPO}
-
-tox perf -- \
- --durations 0 \
- --couch-url ${COUCH_URL} \
- --twisted \
- --num-docs ${SOLEDAD_PRELOAD_NUM}
diff --git a/scripts/docker/files/bin/run-server.sh b/scripts/docker/files/bin/run-server.sh
deleted file mode 100755
index feedee7e..00000000
--- a/scripts/docker/files/bin/run-server.sh
+++ /dev/null
@@ -1,89 +0,0 @@
-#!/bin/sh
-
-# Start a soledad server inside a docker container.
-#
-# This script will:
-#
-# - eventually checkout a specific branch from a specific soledad remote.
-#
-# - create everything a soledad server needs to run (certificate, backend
-# server database, tables, etc.
-#
-# - eventually preload the server database with a number of documents equal
-# to SOLEDAD_PRELOAD_NUM, and with payload size equal to
-# SOLEDAD_PRELOAD_SIZE.
-#
-# - run the soledad server.
-#
-# This script is meant to be copied to the docker container and run upon
-# container start.
-
-CMD="/usr/local/soledad/setup-test-env.py"
-
-#---------------------------------------------------------------------------
-# eventually checkout a specific branch from a specific remote
-#---------------------------------------------------------------------------
-
-REPO="/var/local/soledad"
-
-if [ ! -z "${SOLEDAD_REMOTE}" ]; then
- git -C ${REPO} remote set-url origin ${SOLEDAD_REMOTE}
- git -C ${REPO} fetch origin
-fi
-
-if [ ! -z "${SOLEDAD_BRANCH}" ]; then
- git -C ${REPO} checkout ${SOLEDAD_BRANCH}
-fi
-
-#---------------------------------------------------------------------------
-# setup environment for running soledad server
-#---------------------------------------------------------------------------
-
-${CMD} couch start
-${CMD} user-db create
-${CMD} token-db create
-${CMD} token-db insert-token
-${CMD} shared-db create
-${CMD} cert create
-
-#---------------------------------------------------------------------------
-# write a configuration file for the perf test
-#---------------------------------------------------------------------------
-
-if [ "${SOLEDAD_PRELOAD_NUM}" -gt 0 ]; then
- cd /var/local/soledad-perf
-
- cat > defaults.conf <<EOF
-[server]
-host = http://127.0.0.1:2424
-
-[client]
-uuid = 1234567890abcdef
-basedir = /tmp/soledad_client_test
-passphrase = 12345678
-
-[sync]
-num_docs = ${SOLEDAD_PRELOAD_NUM}
-payload = /tmp/payload
-payload_size = ${SOLEDAD_PRELOAD_SIZE}
-auth_token = an-auth-token
-
-[test]
-stats_file = ./out/stats.json
-EOF
-
- echo "Preloading server database..."
- ./scripts/preload_server_database.py
-fi
-
-#---------------------------------------------------------------------------
-# actually run the server
-#---------------------------------------------------------------------------
-
-if [ "${1}" = "--drop-to-shell" ]; then
- /bin/bash
- exit 0
-fi
-
-echo "Starting soledad server..."
-${CMD} soledad-server start --no-daemonize
diff --git a/scripts/docker/files/bin/run-tox.sh b/scripts/docker/files/bin/run-tox.sh
deleted file mode 100755
index 74fde182..00000000
--- a/scripts/docker/files/bin/run-tox.sh
+++ /dev/null
@@ -1,17 +0,0 @@
-#!/bin/bash
-
-REPO=/builds/leap/soledad/testing
-COUCH_URL="${COUCH_URL:-http://127.0.0.1:5984}"
-
-if [ ! -z "${SOLEDAD_REMOTE}" ]; then
- git -C ${REPO} remote set-url origin ${SOLEDAD_REMOTE}
- git -C ${REPO} fetch origin
-fi
-
-if [ ! -z "${SOLEDAD_BRANCH}" ]; then
- git -C ${REPO} checkout ${SOLEDAD_BRANCH}
-fi
-
-cd ${REPO}
-
-tox -- --couch-url ${COUCH_URL}
diff --git a/scripts/docker/files/bin/run-trial-from-gitlab-ci.sh b/scripts/docker/files/bin/run-trial-from-gitlab-ci.sh
deleted file mode 100755
index 96436e26..00000000
--- a/scripts/docker/files/bin/run-trial-from-gitlab-ci.sh
+++ /dev/null
@@ -1,50 +0,0 @@
-#!/bin/bash
-
-# Run Soledad trial tests in a docker container created by gitlab-ci.
-#
-# Gitlab-ci will copy the current test code into /builds/leap/soledad, so this
-# script has to uninstall currently installed soledad packages and re-install
-# from that location instead.
-#
-# This script is meant to be copied to the docker container and run upon
-# container start.
-
-CMD="/usr/local/soledad/setup-test-env.py"
-BASEDIR="/builds/leap/soledad"
-
-
-install_deps() {
- # ensure all dependencies are installed
- for pkg in common client server; do
- testing="--testing"
- if [ "${pkg}" = "server" ]; then
- # soledad server doesn't currently have a requirements-testing.pip file,
- # so we don't pass the option when that is the case
- testing=""
- fi
- pip uninstall leap.soledad.${pkg}
- (cd ${BASEDIR}/${pkg} \
- && ./pkg/pip_install_requirements.sh ${testing} --use-leap-wheels \
- && python setup.py develop)
- done
-}
-
-
-start_couch() {
- # currently soledad trial tests need a running couch on environment
- ${CMD} couch start
-}
-
-
-run_tests() {
- trial leap.soledad.common
-}
-
-
-main() {
- install_deps
- start_couch
- run_tests
-}
-
-main
diff --git a/scripts/docker/files/bin/run-trial.sh b/scripts/docker/files/bin/run-trial.sh
deleted file mode 100755
index f38f3124..00000000
--- a/scripts/docker/files/bin/run-trial.sh
+++ /dev/null
@@ -1,23 +0,0 @@
-#!/bin/bash
-
-# Run Soledad trial tests.
-#
-# This script is meant to be copied to the docker container and run upon
-# container start.
-
-CMD="/usr/local/soledad/setup-test-env.py"
-REPO="/var/local/soledad"
-
-if [ ! -z "${SOLEDAD_REMOTE}" ]; then
- git -C ${REPO} remote set-url origin ${SOLEDAD_REMOTE}
- git -C ${REPO} fetch origin
-fi
-
-if [ ! -z "${SOLEDAD_BRANCH}" ]; then
- git -C ${REPO} checkout ${SOLEDAD_BRANCH}
-fi
-
-# currently soledad trial tests need a running couch on environment
-${CMD} couch start
-
-trial leap.soledad.common
diff --git a/scripts/docker/files/bin/setup-test-env.py b/scripts/docker/files/bin/setup-test-env.py
deleted file mode 100755
index bbf5267c..00000000
--- a/scripts/docker/files/bin/setup-test-env.py
+++ /dev/null
@@ -1,647 +0,0 @@
-#!/usr/bin/env python
-
-
-"""
-This script knows how to build a minimum environment for Soledad Server, which
-includes the following:
-
- - Couch server startup
- - Token and shared database initialization
- - Soledad Server startup
-
-Options can be passed for configuring the different environments, so this may
-be used by other programs to setup different environments for arbitrary tests.
-Use the --help option to get information on usage.
-
-For some commands you will need an environment with Soledad python packages
-available, thus you might want to explicitly call python and not rely in the
-shebang line.
-"""
-
-
-import time
-import os
-import signal
-import tempfile
-import psutil
-from argparse import ArgumentParser
-from subprocess import call
-from couchdb import Server
-from couchdb.http import PreconditionFailed
-from couchdb.http import ResourceConflict
-from couchdb.http import ResourceNotFound
-from hashlib import sha512
-
-from leap.soledad.common.l2db.errors import DatabaseDoesNotExist
-
-
-#
-# Utilities
-#
-
-def get_pid(pidfile):
- if not os.path.isfile(pidfile):
- return 0
- try:
- with open(pidfile) as f:
- return int(f.read())
- except IOError:
- return 0
-
-
-def pid_is_running(pid):
- try:
- psutil.Process(pid)
- return True
- except psutil.NoSuchProcess:
- return False
-
-
-def pidfile_is_running(pidfile):
- try:
- pid = get_pid(pidfile)
- psutil.Process(pid)
- return pid
- except psutil.NoSuchProcess:
- return False
-
-
-def status_from_pidfile(args, default_basedir, name):
- basedir = _get_basedir(args, default_basedir)
- pidfile = os.path.join(basedir, args.pidfile)
- try:
- pid = get_pid(pidfile)
- psutil.Process(pid)
- print "[+] %s is running with pid %d" % (name, pid)
- except (IOError, psutil.NoSuchProcess):
- print "[-] %s stopped" % name
-
-
-def kill_all_executables(args):
- basename = os.path.basename(args.executable)
- pids = [int(pid) for pid in os.listdir('/proc') if pid.isdigit()]
- for pid in pids:
- try:
- p = psutil.Process(pid)
- if p.name() == basename:
- print '[!] killing - pid: %d' % pid
- os.kill(pid, signal.SIGKILL)
- except:
- pass
-
-
-#
-# Couch Server control
-#
-
-COUCH_EXECUTABLE = '/usr/bin/couchdb'
-ERLANG_EXECUTABLE = 'beam.smp'
-COUCH_TEMPLATE = os.path.join(
- os.path.dirname(os.path.realpath(__file__)),
- './conf/couchdb_default.ini')
-COUCH_TEMPLATE
-COUCH_PIDFILE = 'couchdb.pid'
-COUCH_LOGFILE = 'couchdb.log'
-COUCH_PORT = 5984
-COUCH_HOST = '127.0.0.1'
-COUCH_BASEDIR = '/tmp/couch_test'
-
-
-def _get_basedir(args, default):
- basedir = args.basedir
- if not basedir:
- basedir = default
- if not os.path.isdir(basedir):
- os.mkdir(basedir)
- return basedir
-
-
-def couch_server_start(args):
- basedir = _get_basedir(args, COUCH_BASEDIR)
- pidfile = os.path.join(basedir, args.pidfile)
- logfile = os.path.join(basedir, args.logfile)
-
- # check if already running
- pid = get_pid(pidfile)
- if pid_is_running(pid):
- print '[*] error: already running - pid: %d' % pid
- exit(1)
- if os.path.isfile(pidfile):
- os.unlink(pidfile)
-
- # generate a config file from template if needed
- config_file = args.config_file
- if not config_file:
- config_file = tempfile.mktemp(prefix='couch_config_', dir=basedir)
- lines = []
- with open(args.template) as f:
- lines = f.readlines()
- lines = map(lambda l: l.replace('BASEDIR', basedir), lines)
- with open(config_file, 'w') as f:
- f.writelines(lines)
-
- # start couch server
- try:
- call([
- args.executable,
- '-n', # reset configuration file chain (including system default)
- '-a %s' % config_file, # add configuration FILE to chain
- '-b', # spawn as a background process
- '-p %s' % pidfile, # set the background PID FILE
- '-o %s' % logfile, # redirect background stdout to FILE
- '-e %s' % logfile]) # redirect background stderr to FILE
- except Exception as e:
- print '[*] error: could not start couch server - %s' % str(e)
- exit(1)
-
- # couch may take a bit to store the pid in the pidfile, so we just wait
- # until it does
- pid = None
- while not pid:
- try:
- pid = get_pid(pidfile)
- break
- except:
- time.sleep(0.1)
-
- print '[+] couch is running with pid: %d' % pid
-
-
-def couch_server_stop(args):
- basedir = _get_basedir(args, COUCH_BASEDIR)
- pidfile = os.path.join(basedir, args.pidfile)
- pid = get_pid(pidfile)
- if not pid_is_running(pid):
- print '[*] error: no running server found'
- exit(1)
- call([
- args.executable,
- '-p %s' % pidfile, # set the background PID FILE
- '-k']) # kill the background process, will respawn if needed
- print '[-] stopped couch server with pid %d ' % pid
-
-
-def couch_status_from_pidfile(args):
- status_from_pidfile(args, COUCH_BASEDIR, 'couch')
-
-
-#
-# User DB maintenance #
-#
-
-def user_db_create(args):
- from leap.soledad.common.couch import CouchDatabase
- url = 'http://localhost:%d/user-%s' % (args.port, args.uuid)
- try:
- CouchDatabase.open_database(
- url=url, create=False, replica_uid=None)
- print '[*] error: database "user-%s" already exists' % args.uuid
- exit(1)
- except DatabaseDoesNotExist:
- CouchDatabase.open_database(
- url=url, create=True, replica_uid=None)
- print '[+] database created: user-%s' % args.uuid
-
-
-def user_db_delete(args):
- s = _couch_get_server(args)
- try:
- dbname = 'user-%s' % args.uuid
- s.delete(dbname)
- print '[-] database deleted: %s' % dbname
- except ResourceNotFound:
- print '[*] error: database "%s" does not exist' % dbname
- exit(1)
-
-
-#
-# Soledad Server control
-#
-
-TWISTD_EXECUTABLE = 'twistd' # use whatever is available on path
-
-SOLEDAD_SERVER_BASEDIR = '/tmp/soledad_server_test'
-SOLEDAD_SERVER_CONFIG_FILE = './conf/soledad_default.ini'
-SOLEDAD_SERVER_PIDFILE = 'soledad.pid'
-SOLEDAD_SERVER_LOGFILE = 'soledad.log'
-SOLEDAD_SERVER_PRIVKEY = 'soledad_privkey.pem'
-SOLEDAD_SERVER_CERTKEY = 'soledad_certkey.pem'
-SOLEDAD_SERVER_PORT = 2424
-SOLEDAD_SERVER_AUTH_TOKEN = 'an-auth-token'
-SOLEDAD_SERVER_URL = 'https://localhost:2424'
-
-SOLEDAD_CLIENT_PASS = '12345678'
-SOLEDAD_CLIENT_BASEDIR = '/tmp/soledad_client_test'
-SOLEDAD_CLIENT_UUID = '1234567890abcdef'
-
-
-def soledad_server_start(args):
- basedir = _get_basedir(args, SOLEDAD_SERVER_BASEDIR)
- pidfile = os.path.join(basedir, args.pidfile)
- logfile = os.path.join(basedir, args.logfile)
- private_key = os.path.join(basedir, args.private_key)
- cert_key = os.path.join(basedir, args.cert_key)
-
- pid = get_pid(pidfile)
- if pid_is_running(pid):
- pid = get_pid(pidfile)
- print "[*] error: already running - pid: %d" % pid
- exit(1)
-
- port = args.port
- if args.tls:
- port = 'ssl:%d:privateKey=%s:certKey=%s:sslmethod=SSLv23_METHOD' \
- % (args.port, private_key, cert_key)
- params = [
- '--logfile=%s' % logfile,
- '--pidfile=%s' % pidfile,
- 'web',
- '--wsgi=leap.soledad.server.application.wsgi_application',
- '--port=%s' % port
- ]
- if args.no_daemonize:
- params.insert(0, '--nodaemon')
-
- call([args.executable] + params)
-
- pid = get_pid(pidfile)
- print '[+] soledad-server is running with pid %d' % pid
-
-
-def soledad_server_stop(args):
- basedir = _get_basedir(args, SOLEDAD_SERVER_BASEDIR)
- pidfile = os.path.join(basedir, args.pidfile)
- pid = get_pid(pidfile)
- if not pid_is_running(pid):
- print '[*] error: no running server found'
- exit(1)
- os.kill(pid, signal.SIGKILL)
- print '[-] stopped - pid: %d' % pid
-
-
-def soledad_server_status_from_pidfile(args):
- status_from_pidfile(args, SOLEDAD_SERVER_BASEDIR, 'soledad-server')
-
-
-# couch helpers
-
-def _couch_get_server(args):
- url = 'http://%s:%d/' % (args.host, args.port)
- return Server(url=url)
-
-
-def _couch_create_db(args, dbname):
- s = _couch_get_server(args)
- # maybe create the database
- try:
- s.create(dbname)
- print '[+] database created: %s' % dbname
- except PreconditionFailed as e:
- error_code, _ = e.message
- if error_code == 'file_exists':
- print '[*] error: "%s" database already exists' % dbname
- exit(1)
- return s
-
-
-def _couch_delete_db(args, dbname):
- s = _couch_get_server(args)
- # maybe create the database
- try:
- s.delete(dbname)
- print '[-] database deleted: %s' % dbname
- except ResourceNotFound:
- print '[*] error: "%s" database does not exist' % dbname
- exit(1)
-
-
-def _token_dbname():
- dbname = 'tokens_' + \
- str(int(time.time() / (30 * 24 * 3600)))
- return dbname
-
-
-def token_db_create(args):
- dbname = _token_dbname()
- _couch_create_db(args, dbname)
-
-
-def token_db_insert_token(args):
- s = _couch_get_server(args)
- try:
- dbname = _token_dbname()
- db = s[dbname]
- token = sha512(args.auth_token).hexdigest()
- db[token] = {
- 'type': 'Token',
- 'user_id': args.uuid,
- }
- print '[+] token for uuid "%s" created in tokens database' % args.uuid
- except ResourceConflict:
- print '[*] error: token for uuid "%s" already exists in tokens database' \
- % args.uuid
- exit(1)
-
-
-def token_db_delete(args):
- dbname = _token_dbname()
- _couch_delete_db(args, dbname)
-
-
-#
-# Shared DB creation
-#
-
-def shared_db_create(args):
- _couch_create_db(args, 'shared')
-
-
-def shared_db_delete(args):
- _couch_delete_db(args, 'shared')
-
-
-#
-# Certificate creation
-#
-
-CERT_CONFIG_FILE = os.path.join(
- os.path.dirname(os.path.realpath(__file__)),
- './conf/cert_default.conf')
-
-
-def cert_create(args):
- private_key = os.path.join(args.basedir, args.private_key)
- cert_key = os.path.join(args.basedir, args.cert_key)
- try:
- os.mkdir(args.basedir)
- except OSError:
- pass
- call([
- 'openssl',
- 'req',
- '-x509',
- '-sha256',
- '-nodes',
- '-days', '365',
- '-newkey', 'rsa:2048',
- '-config', args.config_file,
- '-keyout', private_key,
- '-out', cert_key])
-
-
-def cert_delete(args):
- private_key = os.path.join(args.basedir, args.private_key)
- cert_key = os.path.join(args.basedir, args.cert_key)
- try:
- os.unlink(private_key)
- os.unlink(cert_key)
- except OSError:
- pass
-
-
-#
-# Soledad Client Control
-#
-
-def soledad_client_test(args):
-
- # maybe infer missing parameters
- basedir = args.basedir
- if not basedir:
- basedir = tempfile.mkdtemp()
- server_url = args.server_url
- if not server_url:
- server_url = 'http://127.0.0.1:%d' % args.port
-
- # get a soledad instance
- from client_side_db import _get_soledad_instance
- _get_soledad_instance(
- args.uuid,
- unicode(args.passphrase),
- basedir,
- server_url,
- args.cert_key,
- args.auth_token)
-
-
-#
-# Command Line Interface
-#
-
-class Command(object):
-
- def __init__(self, parser=ArgumentParser()):
- self.commands = []
- self.parser = parser
- self.subparsers = None
-
- def add_command(self, *args, **kwargs):
- # pop out the func parameter to use later
- func = None
- if 'func' in kwargs.keys():
- func = kwargs.pop('func')
- # eventually create a subparser
- if not self.subparsers:
- self.subparsers = self.parser.add_subparsers()
- # create command and associate a function with it
- command = Command(self.subparsers.add_parser(*args, **kwargs))
- if func:
- command.parser.set_defaults(func=func)
- self.commands.append(command)
- return command
-
- def set_func(self, func):
- self.parser.set_defaults(func=func)
-
- def add_argument(self, *args, **kwargs):
- self.parser.add_argument(*args, **kwargs)
-
- def add_arguments(self, arglist):
- for args, kwargs in arglist:
- self.add_argument(*args, **kwargs)
-
- def parse_args(self):
- return self.parser.parse_args()
-
-
-#
-# Command Line Interface
-#
-
-def run_cli():
- cli = Command()
-
- # couch command with subcommands
- cmd_couch = cli.add_command('couch', help="manage couch server")
-
- cmd_couch_start = cmd_couch.add_command('start', func=couch_server_start)
- cmd_couch_start.add_arguments([
- (['--executable', '-e'], {'default': COUCH_EXECUTABLE}),
- (['--basedir', '-b'], {}),
- (['--config-file', '-c'], {}),
- (['--template', '-t'], {'default': COUCH_TEMPLATE}),
- (['--pidfile', '-p'], {'default': COUCH_PIDFILE}),
- (['--logfile', '-l'], {'default': COUCH_LOGFILE})
- ])
-
- cmd_couch_stop = cmd_couch.add_command('stop', func=couch_server_stop)
- cmd_couch_stop.add_arguments([
- (['--executable', '-e'], {'default': COUCH_EXECUTABLE}),
- (['--basedir', '-b'], {}),
- (['--pidfile', '-p'], {'default': COUCH_PIDFILE}),
- ])
-
- cmd_couch_status = cmd_couch.add_command(
- 'status', func=couch_status_from_pidfile)
- cmd_couch_status.add_arguments([
- (['--basedir', '-b'], {}),
- (['--pidfile', '-p'], {'default': COUCH_PIDFILE})])
-
- cmd_couch_kill = cmd_couch.add_command('kill', func=kill_all_executables)
- cmd_couch_kill.add_argument(
- '--executable', '-e', default=ERLANG_EXECUTABLE)
-
- # user database maintenance
- cmd_user_db = cli.add_command('user-db')
-
- cmd_user_db_create = cmd_user_db.add_command('create', func=user_db_create)
- cmd_user_db_create.add_arguments([
- (['--host', '-H'], {'default': COUCH_HOST}),
- (['--port', '-P'], {'type': int, 'default': COUCH_PORT}),
- (['--uuid', '-u'], {'default': SOLEDAD_CLIENT_UUID}),
- ])
-
- cmd_user_db_create = cmd_user_db.add_command(
- 'delete', func=user_db_delete)
- cmd_user_db_create.add_arguments([
- (['--host', '-H'], {'default': COUCH_HOST}),
- (['--port', '-P'], {'type': int, 'default': COUCH_PORT}),
- (['--uuid', '-u'], {'default': SOLEDAD_CLIENT_UUID})
- ])
-
- # soledad server command with subcommands
- cmd_sol_server = cli.add_command(
- 'soledad-server', help="manage soledad server")
-
- cmd_sol_server_start = cmd_sol_server.add_command(
- 'start', func=soledad_server_start)
- cmd_sol_server_start.add_arguments([
- (['--executable', '-e'], {'default': TWISTD_EXECUTABLE}),
- (['--config-file', '-c'], {'default': SOLEDAD_SERVER_CONFIG_FILE}),
- (['--pidfile', '-p'], {'default': SOLEDAD_SERVER_PIDFILE}),
- (['--logfile', '-l'], {'default': SOLEDAD_SERVER_LOGFILE}),
- (['--port', '-P'], {'type': int, 'default': SOLEDAD_SERVER_PORT}),
- (['--tls', '-t'], {'action': 'store_true'}),
- (['--private-key', '-K'], {'default': SOLEDAD_SERVER_PRIVKEY}),
- (['--cert-key', '-C'], {'default': SOLEDAD_SERVER_CERTKEY}),
- (['--no-daemonize', '-n'], {'action': 'store_true'}),
- (['--basedir', '-b'], {'default': SOLEDAD_SERVER_BASEDIR}),
- ])
-
- cmd_sol_server_stop = cmd_sol_server.add_command(
- 'stop', func=soledad_server_stop)
- cmd_sol_server_stop.add_arguments([
- (['--basedir', '-b'], {'default': SOLEDAD_SERVER_BASEDIR}),
- (['--pidfile', '-p'], {'default': SOLEDAD_SERVER_PIDFILE}),
- ])
-
- cmd_sol_server_status = cmd_sol_server.add_command(
- 'status', func=soledad_server_status_from_pidfile)
- cmd_sol_server_status.add_arguments([
- (['--basedir', '-b'], {'default': SOLEDAD_SERVER_BASEDIR}),
- (['--pidfile', '-p'], {'default': SOLEDAD_SERVER_PIDFILE}),
- ])
-
- cmd_sol_server_kill = cmd_sol_server.add_command(
- 'kill', func=kill_all_executables)
- cmd_sol_server_kill.add_argument(
- '--executable', '-e', default=TWISTD_EXECUTABLE)
-
- # token db maintenance
- cmd_token_db = cli.add_command('token-db')
- cmd_token_db_create = cmd_token_db.add_command(
- 'create', func=token_db_create)
- cmd_token_db_create.add_arguments([
- (['--host', '-H'], {'default': COUCH_HOST}),
- (['--uuid', '-u'], {'default': SOLEDAD_CLIENT_UUID}),
- (['--port', '-P'], {'type': int, 'default': COUCH_PORT}),
- ])
-
- cmd_token_db_insert_token = cmd_token_db.add_command(
- 'insert-token', func=token_db_insert_token)
- cmd_token_db_insert_token.add_arguments([
- (['--host', '-H'], {'default': COUCH_HOST}),
- (['--uuid', '-u'], {'default': SOLEDAD_CLIENT_UUID}),
- (['--port', '-P'], {'type': int, 'default': COUCH_PORT}),
- (['--auth-token', '-a'], {'default': SOLEDAD_SERVER_AUTH_TOKEN}),
- ])
-
- cmd_token_db_delete = cmd_token_db.add_command(
- 'delete', func=token_db_delete)
- cmd_token_db_delete.add_arguments([
- (['--host', '-H'], {'default': COUCH_HOST}),
- (['--uuid', '-u'], {'default': SOLEDAD_CLIENT_UUID}),
- (['--port', '-P'], {'type': int, 'default': COUCH_PORT}),
- ])
-
- # shared db creation
- cmd_shared_db = cli.add_command('shared-db')
-
- cmd_shared_db_create = cmd_shared_db.add_command(
- 'create', func=shared_db_create)
- cmd_shared_db_create.add_arguments([
- (['--host', '-H'], {'default': COUCH_HOST}),
- (['--port', '-P'], {'type': int, 'default': COUCH_PORT}),
- ])
-
- cmd_shared_db_delete = cmd_shared_db.add_command(
- 'delete', func=shared_db_delete)
- cmd_shared_db_delete.add_arguments([
- (['--host', '-H'], {'default': COUCH_HOST}),
- (['--port', '-P'], {'type': int, 'default': COUCH_PORT}),
- ])
-
- # certificate generation
- cmd_cert = cli.add_command('cert', help="create tls certificates")
-
- cmd_cert_create = cmd_cert.add_command('create', func=cert_create)
- cmd_cert_create.add_arguments([
- (['--basedir', '-b'], {'default': SOLEDAD_SERVER_BASEDIR}),
- (['--config-file', '-c'], {'default': CERT_CONFIG_FILE}),
- (['--private-key', '-K'], {'default': SOLEDAD_SERVER_PRIVKEY}),
- (['--cert-key', '-C'], {'default': SOLEDAD_SERVER_CERTKEY}),
- ])
-
- cmd_cert_create = cmd_cert.add_command('delete', func=cert_delete)
- cmd_cert_create.add_arguments([
- (['--basedir', '-b'], {'default': SOLEDAD_SERVER_BASEDIR}),
- (['--private-key', '-K'], {'default': SOLEDAD_SERVER_PRIVKEY}),
- (['--cert-key', '-C'], {'default': SOLEDAD_SERVER_CERTKEY}),
- ])
-
- # soledad client command with subcommands
- cmd_sol_client = cli.add_command(
- 'soledad-client', help="manage soledad client")
-
- cmd_sol_client_test = cmd_sol_client.add_command(
- 'test', func=soledad_client_test)
- cmd_sol_client_test.add_arguments([
- (['--port', '-P'], {'type': int, 'default': SOLEDAD_SERVER_PORT}),
- (['--tls', '-t'], {'action': 'store_true'}),
- (['--uuid', '-u'], {'default': SOLEDAD_CLIENT_UUID}),
- (['--passphrase', '-k'], {'default': SOLEDAD_CLIENT_PASS}),
- (['--basedir', '-b'], {'default': SOLEDAD_CLIENT_BASEDIR}),
- (['--server-url', '-s'], {'default': SOLEDAD_SERVER_URL}),
- (['--cert-key', '-C'], {'default': os.path.join(
- SOLEDAD_SERVER_BASEDIR,
- SOLEDAD_SERVER_CERTKEY)}),
- (['--auth-token', '-a'], {'default': SOLEDAD_SERVER_AUTH_TOKEN}),
- ])
-
- # parse and run cli
- args = cli.parse_args()
- args.func(args)
-
-
-if __name__ == '__main__':
- run_cli()
diff --git a/scripts/docker/files/bin/util.py b/scripts/docker/files/bin/util.py
deleted file mode 100644
index e7e2ef9a..00000000
--- a/scripts/docker/files/bin/util.py
+++ /dev/null
@@ -1,75 +0,0 @@
-import re
-import psutil
-import time
-import threading
-import argparse
-import pytz
-import datetime
-
-
-class ValidateUserHandle(argparse.Action):
- def __call__(self, parser, namespace, values, option_string=None):
- m = re.compile('^([^@]+)@([^@]+\.[^@]+)$')
- res = m.match(values)
- if res == None:
- parser.error('User handle should have the form user@provider.')
- setattr(namespace, 'username', res.groups()[0])
- setattr(namespace, 'provider', res.groups()[1])
-
-
-class StatsLogger(threading.Thread):
-
- def __init__(self, name, fname, procs=[], interval=0.01):
- threading.Thread.__init__(self)
- self._stopped = True
- self._name = name
- self._fname = fname
- self._procs = self._find_procs(procs)
- self._interval = interval
-
- def _find_procs(self, procs):
- return filter(lambda p: p.name in procs, psutil.process_iter())
-
- def run(self):
- self._stopped = False
- with open(self._fname, 'w') as f:
- self._start = time.time()
- f.write(self._make_header())
- while self._stopped is False:
- f.write('%s %s\n' %
- (self._make_general_stats(), self._make_proc_stats()))
- time.sleep(self._interval)
- f.write(self._make_footer())
-
- def _make_general_stats(self):
- now = time.time()
- stats = []
- stats.append("%f" % (now - self._start)) # elapsed time
- stats.append("%f" % psutil.cpu_percent()) # total cpu
- stats.append("%f" % psutil.virtual_memory().percent) # total memory
- return ' '.join(stats)
-
- def _make_proc_stats(self):
- stats = []
- for p in self._procs:
- stats.append('%f' % p.get_cpu_percent()) # proc cpu
- stats.append('%f' % p.get_memory_percent()) # proc memory
- return ' '.join(stats)
-
- def _make_header(self):
- header = []
- header.append('# test_name: %s' % self._name)
- header.append('# start_time: %s' % datetime.datetime.now(pytz.utc))
- header.append(
- '# elapsed_time total_cpu total_memory proc_cpu proc_memory ')
- return '\n'.join(header) + '\n'
-
- def _make_footer(self):
- footer = []
- footer.append('# end_time: %s' % datetime.datetime.now(pytz.utc))
- return '\n'.join(footer)
-
- def stop(self):
- self._stopped = True
-
-
diff --git a/scripts/docker/files/bin/util.sh b/scripts/docker/files/bin/util.sh
deleted file mode 100644
index 77287d0d..00000000
--- a/scripts/docker/files/bin/util.sh
+++ /dev/null
@@ -1,12 +0,0 @@
-#!/bin/sh
-
-configure_soledad_repos() {
- if [ ! -z "${SOLEDAD_REMOTE}" ]; then
- git -C ${REPO} remote set-url origin ${SOLEDAD_REMOTE}
- git -C ${REPO} fetch origin
- fi
-
- if [ ! -z "${SOLEDAD_BRANCH}" ]; then
- git -C ${REPO} checkout ${SOLEDAD_BRANCH}
- fi
-}
diff --git a/scripts/docker/files/build/install-deps-from-repos.sh b/scripts/docker/files/build/install-deps-from-repos.sh
deleted file mode 100755
index 46530c86..00000000
--- a/scripts/docker/files/build/install-deps-from-repos.sh
+++ /dev/null
@@ -1,30 +0,0 @@
-#!/bin/bash
-
-# Install dependencies needed to run client and server in a test environment.
-#
-# In details, this script does the following:
-#
-# - install dependencies for packages in /var/local from their requirements
-# files in each of the repositories, using python wheels when possible.
-#
-# - install the python packages in development mode
-#
-# This script is meant to be copied to the docker container during container
-# build and run after system dependencies have been installed.
-
-BASEDIR="/var/local"
-
-# install dependencies and packages
-install_script="pkg/pip_install_requirements.sh"
-opts="--use-leap-wheels"
-pkgs="leap_pycommon soledad/common soledad/client soledad/server"
-
-for pkg in ${pkgs}; do
- pkgdir=${BASEDIR}/${pkg}
- testing=""
- if [ -f ${pkgdir}/pkg/requirements-testing.pip ]; then
- testing="--testing"
- fi
- (cd ${pkgdir} && ${install_script} ${testing} ${opts})
- (cd ${pkgdir} && python setup.py develop)
-done
diff --git a/scripts/docker/helper/get-container-ip.sh b/scripts/docker/helper/get-container-ip.sh
deleted file mode 100755
index 2b392350..00000000
--- a/scripts/docker/helper/get-container-ip.sh
+++ /dev/null
@@ -1,18 +0,0 @@
-#!/bin/sh
-
-# Print the IP of a container to stdout, given its id. Check the output for
-# the `docker inspect` commmand for more details:
-#
-# https://docs.docker.com/engine/reference/commandline/inspect/
-
-if [ ${#} -ne 1 ]; then
- echo "Usage: ${0} container_id"
- exit 1
-fi
-
-container_id=${1}
-
-/usr/bin/docker \
- inspect \
- --format='{{.NetworkSettings.IPAddress}}' \
- ${container_id}
diff --git a/scripts/docker/helper/run-test.sh b/scripts/docker/helper/run-test.sh
deleted file mode 100755
index 9b3ec0c9..00000000
--- a/scripts/docker/helper/run-test.sh
+++ /dev/null
@@ -1,75 +0,0 @@
-#!/bin/sh
-
-# Run 2 docker images, one with soledad server and another with a soledad
-# client running a test.
-#
-# As there are many possible, tests, you have to pass an argument to the
-# script saying which test you want to run. Currently, possible values are
-# "connect" and "perf".
-#
-# After launching the server container, the script waits for TIMEOUT seconds
-# for it to come up. If we fail to detect the server, the script exits with
-# nonzero status.
-
-# seconds to wait before giving up waiting from server
-TIMEOUT=20
-
-# parse command
-if [ ${#} -lt 1 -o ${#} -gt 2 ]; then
- echo "Usage: ${0} perf|bootstrap"
- exit 1
-fi
-
-test=${1}
-if [ "${test}" != "perf" -a "${test}" != "bootstrap" ]; then
- echo "Usage: ${0} perf|bootstrap"
- exit 1
-fi
-
-branch=""
-if [ ${#} -eq 2 ]; then
- branch="SOLEDAD_BRANCH=${2}"
-fi
-
-# make sure the image is up to date
-make image
-
-# get script name and path
-script=$(readlink -f "$0")
-scriptpath=$(dirname "${script}")
-
-# run the server
-tempfile=`mktemp -u`
-make run-server CONTAINER_ID_FILE=${tempfile} ${branch}
-
-# wait for server until timeout
-container_id=`cat ${tempfile}`
-server_ip=`${scriptpath}/get-container-ip.sh ${container_id}`
-start=`date +%s`
-elapsed=0
-
-echo "Waiting for soledad server container to come up..."
-
-while [ ${elapsed} -lt ${TIMEOUT} ]; do
- curl -s http://${server_ip}:2424 > /dev/null
- if [ ${?} -eq 0 ]; then
- echo "Soledad server container is up!"
- break
- else
- sleep 1
- fi
- now=`date +%s`
- elapsed=`expr ${now} - ${start}`
-done
-
-# exit with an error code if timed out waiting for server
-if [ ${elapsed} -ge ${TIMEOUT} ]; then
- echo "Error: server unreachable at ${server_ip} after ${TIMEOUT} seconds."
- exit 1
-fi
-
-set -e
-
-# run the test
-make run-client-${test} CONTAINER_ID_FILE=${tempfile} ${branch}
-rm -r ${tempfile}
diff --git a/scripts/docker/helper/run-until-error.sh b/scripts/docker/helper/run-until-error.sh
deleted file mode 100755
index a4cab6ec..00000000
--- a/scripts/docker/helper/run-until-error.sh
+++ /dev/null
@@ -1,12 +0,0 @@
-#!/bin/sh
-
-status=0
-runs=10
-
-while [ ${status} -eq 0 -a ${runs} -gt 0 ]; do
- echo "=== RUN ${runs}"
- make rm-all-containers
- make run-perf-test
- status=${?}
- runs=`expr ${runs} - 1`
-done