From 78d61dfaadf9bcac7258a33738c660b238b7bf27 Mon Sep 17 00:00:00 2001 From: drebs Date: Fri, 10 Jun 2016 22:07:35 -0300 Subject: [test] refactor of docker scripts --- scripts/docker/Dockerfile | 41 +- scripts/docker/Makefile | 18 +- scripts/docker/files/apt/leap.list | 4 + scripts/docker/files/bin/client_side_db.py | 322 +++++++++++ scripts/docker/files/bin/conf/cert_default.conf | 15 + scripts/docker/files/bin/conf/couchdb_default.ini | 361 ++++++++++++ .../files/bin/conf/soledad-server_default.conf | 5 + scripts/docker/files/bin/run-client-bootstrap.sh | 20 + scripts/docker/files/bin/run-client-perf.sh | 128 +++++ scripts/docker/files/bin/run-server.sh | 89 +++ scripts/docker/files/bin/run-trial.sh | 23 + scripts/docker/files/bin/setup-test-env.py | 640 +++++++++++++++++++++ scripts/docker/files/bin/util.py | 75 +++ scripts/docker/files/bin/util.sh | 12 + .../docker/files/build/install-deps-from-repos.sh | 30 + scripts/docker/files/client_side_db.py | 322 ----------- scripts/docker/files/conf/cert_default.conf | 15 - scripts/docker/files/conf/couchdb_default.ini | 361 ------------ .../docker/files/conf/soledad-server_default.conf | 5 - scripts/docker/files/leap.list | 4 - scripts/docker/files/run-perf-test.sh | 124 ---- scripts/docker/files/setup-env.sh | 55 -- scripts/docker/files/start-client-test.sh | 20 - scripts/docker/files/start-server.sh | 84 --- scripts/docker/files/start-trial-test.sh | 23 - scripts/docker/files/test-env.py | 640 --------------------- scripts/docker/files/util.py | 75 --- scripts/docker/helper/run-test.sh | 8 +- scripts/docker/helper/run-until-error.sh | 12 + 29 files changed, 1769 insertions(+), 1762 deletions(-) create mode 100644 scripts/docker/files/apt/leap.list create mode 100644 scripts/docker/files/bin/client_side_db.py create mode 100644 scripts/docker/files/bin/conf/cert_default.conf create mode 100644 scripts/docker/files/bin/conf/couchdb_default.ini create mode 100644 scripts/docker/files/bin/conf/soledad-server_default.conf create mode 100755 scripts/docker/files/bin/run-client-bootstrap.sh create mode 100755 scripts/docker/files/bin/run-client-perf.sh create mode 100755 scripts/docker/files/bin/run-server.sh create mode 100755 scripts/docker/files/bin/run-trial.sh create mode 100755 scripts/docker/files/bin/setup-test-env.py create mode 100644 scripts/docker/files/bin/util.py create mode 100644 scripts/docker/files/bin/util.sh create mode 100755 scripts/docker/files/build/install-deps-from-repos.sh delete mode 100644 scripts/docker/files/client_side_db.py delete mode 100644 scripts/docker/files/conf/cert_default.conf delete mode 100644 scripts/docker/files/conf/couchdb_default.ini delete mode 100644 scripts/docker/files/conf/soledad-server_default.conf delete mode 100644 scripts/docker/files/leap.list delete mode 100755 scripts/docker/files/run-perf-test.sh delete mode 100755 scripts/docker/files/setup-env.sh delete mode 100755 scripts/docker/files/start-client-test.sh delete mode 100755 scripts/docker/files/start-server.sh delete mode 100755 scripts/docker/files/start-trial-test.sh delete mode 100755 scripts/docker/files/test-env.py delete mode 100644 scripts/docker/files/util.py create mode 100755 scripts/docker/helper/run-until-error.sh diff --git a/scripts/docker/Dockerfile b/scripts/docker/Dockerfile index 36180633..915508ea 100644 --- a/scripts/docker/Dockerfile +++ b/scripts/docker/Dockerfile @@ -5,13 +5,18 @@ FROM debian EXPOSE 2424 # install dependencies from debian repos -COPY files/leap.list /etc/apt/sources.list.d/ +COPY files/apt/leap.list /etc/apt/sources.list.d/ RUN apt-get update RUN apt-get -y --force-yes install leap-archive-keyring RUN apt-get update + RUN apt-get -y install git +RUN apt-get -y install vim +RUN apt-get -y install python-ipdb + +# install python deps RUN apt-get -y install libpython2.7-dev RUN apt-get -y install libffi-dev RUN apt-get -y install libssl-dev @@ -23,30 +28,24 @@ RUN apt-get -y install python-scrypt RUN apt-get -y install leap-keymanager RUN apt-get -y install python-tz -# soledad-perf deps +RUN pip install -U pip +RUN pip install psutil + +# install soledad-perf deps RUN pip install klein -RUN apt-get -y install gnuplot RUN apt-get -y install curl RUN apt-get -y install httperf -# debugging deps -RUN apt-get -y install vim -RUN apt-get -y install python-ipdb +# clone repositories +ENV BASEURL "https://github.com/leapcode" +ENV VARDIR "/var/local" +ENV REPOS "soledad leap_pycommon soledad-perf" +RUN for repo in ${REPOS}; do git clone ${BASEURL}/${repo}.git /var/local/${repo}; done # copy over files to help setup the environment and run soledad RUN mkdir -p /usr/local/soledad -RUN mkdir -p /usr/local/soledad/conf - -# setup the enviroment for running soledad client and server -COPY files/setup-env.sh /usr/local/soledad/ -RUN /usr/local/soledad/setup-env.sh - -# copy runtime files for running server, client, tests, etc on a container -COPY files/client_side_db.py /usr/local/soledad/ -COPY files/start-client-test.sh /usr/local/soledad/ -COPY files/run-perf-test.sh /usr/local/soledad/ -COPY files/start-server.sh /usr/local/soledad/ -COPY files/start-trial-test.sh /usr/local/soledad/ -COPY files/test-env.py /usr/local/soledad/ -COPY files/util.py /usr/local/soledad/ -COPY files/conf/* /usr/local/soledad/conf/ + +COPY files/build/install-deps-from-repos.sh /usr/local/soledad/ +RUN /usr/local/soledad/install-deps-from-repos.sh + +COPY files/bin/ /usr/local/soledad/ diff --git a/scripts/docker/Makefile b/scripts/docker/Makefile index 080fd16c..9dbe9062 100644 --- a/scripts/docker/Makefile +++ b/scripts/docker/Makefile @@ -10,7 +10,7 @@ # Example usage: # # make run-server CONTAINER_ID_FILE=/tmp/container-id.txt -# make run-client-test CONTAINER_ID_FILE=/tmp/container-id.txt +# make run-client-perf CONTAINER_ID_FILE=/tmp/container-id.txt ##################################################################### # Some configurations you might override when calling this makefile # @@ -50,9 +50,9 @@ run-server: --cidfile=$(CONTAINER_ID_FILE) \ --detach \ $(IMAGE_NAME) \ - /usr/local/soledad/start-server.sh + /usr/local/soledad/run-server.sh # --drop-to-shell -run-client-test: +run-client-bootstrap: @if [ -z "$(CONTAINER_ID_FILE)" ]; then \ echo "Error: you have to pass a value to CONTAINER_ID_FILE."; \ exit 2; \ @@ -65,28 +65,28 @@ run-client-test: --env="SOLEDAD_BRANCH=$(SOLEDAD_BRANCH)" \ --env="SOLEDAD_SERVER_URL=http://$${server_ip}:2424" \ $(IMAGE_NAME) \ - /usr/local/soledad/start-client-test.sh + /usr/local/soledad/run-client-bootstrap.sh ################################################# # Run all trial tests inside a docker container # ################################################# -run-trial-test: +run-trial: docker run -t -i \ --memory="$(MEMORY)" \ --env="SOLEDAD_REMOTE=$(SOLEDAD_REMOTE)" \ --env="SOLEDAD_BRANCH=$(SOLEDAD_BRANCH)" \ $(IMAGE_NAME) \ - /usr/local/soledad/start-trial-test.sh + /usr/local/soledad/run-trial.sh ############################################ # Performance tests and graphic generation # ############################################ -run-perf: +run-perf-test: helper/run-test.sh perf -run-perf-test: +run-client-perf: @if [ -z "$(CONTAINER_ID_FILE)" ]; then \ echo "Error: you have to pass a value to CONTAINER_ID_FILE."; \ exit 2; \ @@ -105,7 +105,7 @@ run-perf-test: --env="SOLEDAD_STATS=1" \ --env="SOLEDAD_SERVER_URL=http://$${server_ip}:2424" \ $(IMAGE_NAME) \ - /usr/local/soledad/run-perf-test.sh + /usr/local/soledad/run-client-perf.sh # --drop-to-shell cp-perf-result: @if [ -z "$(CONTAINER_ID_FILE)" ]; then \ diff --git a/scripts/docker/files/apt/leap.list b/scripts/docker/files/apt/leap.list new file mode 100644 index 00000000..7eb474d8 --- /dev/null +++ b/scripts/docker/files/apt/leap.list @@ -0,0 +1,4 @@ +# This file is meant to be copied into the `/etc/apt/sources.list.d` directory +# inside a docker image to provide a source for leap-specific packages. + +deb http://deb.leap.se/0.8 jessie main diff --git a/scripts/docker/files/bin/client_side_db.py b/scripts/docker/files/bin/client_side_db.py new file mode 100644 index 00000000..4be33d13 --- /dev/null +++ b/scripts/docker/files/bin/client_side_db.py @@ -0,0 +1,322 @@ +#!/usr/bin/python + +import os +import argparse +import tempfile +import getpass +import requests +import srp._pysrp as srp +import binascii +import logging +import json +import time + +from twisted.internet import reactor +from twisted.internet.defer import inlineCallbacks + +from leap.soledad.client import Soledad +from leap.keymanager import KeyManager +from leap.keymanager.openpgp import OpenPGPKey + +from leap.common.events import server +server.ensure_server() + +from util import ValidateUserHandle + + +""" +Script to give access to client-side Soledad database. + +This is mainly used for tests, but can also be used to recover data from a +Soledad database (public/private keys, export documents, etc). + +To speed up testing/debugging, this script can dump the auth data after +logging in. Use the --export-auth-data option to export auth data to a file. +The contents of the file is a json dictionary containing the uuid, server_url, +cert_file and token, which is enough info to instantiate a soledad client +without having to interact with the webapp again. Use the --use-auth-data +option to use the auth data stored in a file. + +Use the --help option to see available options. +""" + + +# create a logger +logger = logging.getLogger(__name__) +LOG_FORMAT = '%(asctime)s %(message)s' +logging.basicConfig(format=LOG_FORMAT, level=logging.DEBUG) + + +safe_unhexlify = lambda x: binascii.unhexlify(x) if ( + len(x) % 2 == 0) else binascii.unhexlify('0' + x) + + +def _fail(reason): + logger.error('Fail: ' + reason) + exit(2) + + +def _get_api_info(provider): + info = requests.get( + 'https://' + provider + '/provider.json', verify=False).json() + return info['api_uri'], info['api_version'] + + +def _login(username, passphrase, provider, api_uri, api_version): + usr = srp.User(username, passphrase, srp.SHA256, srp.NG_1024) + auth = None + try: + auth = _authenticate(api_uri, api_version, usr).json() + except requests.exceptions.ConnectionError: + _fail('Could not connect to server.') + if 'errors' in auth: + _fail(str(auth['errors'])) + return api_uri, api_version, auth + + +def _authenticate(api_uri, api_version, usr): + api_url = "%s/%s" % (api_uri, api_version) + session = requests.session() + uname, A = usr.start_authentication() + params = {'login': uname, 'A': binascii.hexlify(A)} + init = session.post( + api_url + '/sessions', data=params, verify=False).json() + if 'errors' in init: + _fail('test user not found') + M = usr.process_challenge( + safe_unhexlify(init['salt']), safe_unhexlify(init['B'])) + return session.put(api_url + '/sessions/' + uname, verify=False, + data={'client_auth': binascii.hexlify(M)}) + + +def _get_soledad_info(username, provider, passphrase, basedir): + api_uri, api_version = _get_api_info(provider) + auth = _login(username, passphrase, provider, api_uri, api_version) + # get soledad server url + service_url = '%s/%s/config/soledad-service.json' % \ + (api_uri, api_version) + soledad_hosts = requests.get(service_url, verify=False).json()['hosts'] + hostnames = soledad_hosts.keys() + # allow for choosing the host + host = hostnames[0] + if len(hostnames) > 1: + i = 1 + print "There are many available hosts:" + for h in hostnames: + print " (%d) %s.%s" % (i, h, provider) + i += 1 + choice = raw_input("Choose a host to use (default: 1): ") + if choice != '': + host = hostnames[int(choice) - 1] + server_url = 'https://%s:%d/user-%s' % \ + (soledad_hosts[host]['hostname'], soledad_hosts[host]['port'], + auth[2]['id']) + # get provider ca certificate + ca_cert = requests.get('https://%s/ca.crt' % provider, verify=False).text + cert_file = os.path.join(basedir, 'ca.crt') + with open(cert_file, 'w') as f: + f.write(ca_cert) + return auth[2]['id'], server_url, cert_file, auth[2]['token'] + + +def _get_soledad_instance(uuid, passphrase, basedir, server_url, cert_file, + token): + # setup soledad info + logger.info('UUID is %s' % uuid) + logger.info('Server URL is %s' % server_url) + secrets_path = os.path.join( + basedir, '%s.secret' % uuid) + local_db_path = os.path.join( + basedir, '%s.db' % uuid) + # instantiate soledad + return Soledad( + uuid, + unicode(passphrase), + secrets_path=secrets_path, + local_db_path=local_db_path, + server_url=server_url, + cert_file=cert_file, + auth_token=token, + defer_encryption=True) + + +def _get_keymanager_instance(username, provider, soledad, token, + ca_cert_path=None, api_uri=None, api_version=None, + uid=None, gpgbinary=None): + return KeyManager( + "{username}@{provider}".format(username=username, provider=provider), + "http://uri", + soledad, + token=token, + ca_cert_path=ca_cert_path, + api_uri=api_uri, + api_version=api_version, + uid=uid, + gpgbinary=gpgbinary) + + +def _parse_args(): + # parse command line + parser = argparse.ArgumentParser() + parser.add_argument( + 'user@provider', action=ValidateUserHandle, help='the user handle') + parser.add_argument( + '--basedir', '-b', default=None, + help='soledad base directory') + parser.add_argument( + '--passphrase', '-p', default=None, + help='the user passphrase') + parser.add_argument( + '--get-all-docs', '-a', action='store_true', + help='get all documents from the local database') + parser.add_argument( + '--create-docs', '-c', default=0, type=int, + help='create a number of documents') + parser.add_argument( + '--sync', '-s', action='store_true', + help='synchronize with the server replica') + parser.add_argument( + '--repeat-sync', '-r', action='store_true', + help='repeat synchronization until no new data is received') + parser.add_argument( + '--export-public-key', help="export the public key to a file") + parser.add_argument( + '--export-private-key', help="export the private key to a file") + parser.add_argument( + '--export-incoming-messages', + help="export incoming messages to a directory") + parser.add_argument( + '--export-auth-data', + help="export authentication data to a file") + parser.add_argument( + '--use-auth-data', + help="use authentication data from a file") + return parser.parse_args() + + +def _get_passphrase(args): + passphrase = args.passphrase + if passphrase is None: + passphrase = getpass.getpass( + 'Password for %s@%s: ' % (args.username, args.provider)) + return passphrase + + +def _get_basedir(args): + basedir = args.basedir + if basedir is None: + basedir = tempfile.mkdtemp() + elif not os.path.isdir(basedir): + os.mkdir(basedir) + logger.info('Using %s as base directory.' % basedir) + return basedir + + +@inlineCallbacks +def _export_key(args, km, fname, private=False): + address = args.username + "@" + args.provider + pkey = yield km.get_key( + address, OpenPGPKey, private=private, fetch_remote=False) + with open(args.export_private_key, "w") as f: + f.write(pkey.key_data) + + +@inlineCallbacks +def _export_incoming_messages(soledad, directory): + yield soledad.create_index("by-incoming", "bool(incoming)") + docs = yield soledad.get_from_index("by-incoming", '1') + i = 1 + for doc in docs: + with open(os.path.join(directory, "message_%d.gpg" % i), "w") as f: + f.write(doc.content["_enc_json"]) + i += 1 + + +@inlineCallbacks +def _get_all_docs(soledad): + _, docs = yield soledad.get_all_docs() + for doc in docs: + print json.dumps(doc.content, indent=4) + + +# main program + +@inlineCallbacks +def _main(soledad, km, args): + try: + if args.create_docs: + for i in xrange(args.create_docs): + t = time.time() + logger.debug( + "Creating doc %d/%d..." % (i + 1, args.create_docs)) + content = { + 'datetime': time.strftime( + "%Y-%m-%d %H:%M:%S", time.gmtime(t)), + 'timestamp': t, + 'index': i, + 'total': args.create_docs, + } + yield soledad.create_doc(content) + if args.sync: + yield soledad.sync() + if args.repeat_sync: + old_gen = 0 + new_gen = yield soledad.sync() + while old_gen != new_gen: + old_gen = new_gen + new_gen = yield soledad.sync() + if args.get_all_docs: + yield _get_all_docs(soledad) + if args.export_private_key: + yield _export_key(args, km, args.export_private_key, private=True) + if args.export_public_key: + yield _export_key(args, km, args.expoert_public_key, private=False) + if args.export_incoming_messages: + yield _export_incoming_messages( + soledad, args.export_incoming_messages) + except Exception as e: + logger.error(e) + finally: + soledad.close() + reactor.callWhenRunning(reactor.stop) + + +if __name__ == '__main__': + args = _parse_args() + passphrase = _get_passphrase(args) + basedir = _get_basedir(args) + + if not args.use_auth_data: + # get auth data from server + uuid, server_url, cert_file, token = \ + _get_soledad_info( + args.username, args.provider, passphrase, basedir) + else: + # load auth data from file + with open(args.use_auth_data) as f: + auth_data = json.loads(f.read()) + uuid = auth_data['uuid'] + server_url = auth_data['server_url'] + cert_file = auth_data['cert_file'] + token = auth_data['token'] + + # export auth data to a file + if args.export_auth_data: + with open(args.export_auth_data, "w") as f: + f.write(json.dumps({ + 'uuid': uuid, + 'server_url': server_url, + 'cert_file': cert_file, + 'token': token, + })) + + soledad = _get_soledad_instance( + uuid, passphrase, basedir, server_url, cert_file, token) + km = _get_keymanager_instance( + args.username, + args.provider, + soledad, + token, + uid=uuid) + _main(soledad, km, args) + reactor.run() diff --git a/scripts/docker/files/bin/conf/cert_default.conf b/scripts/docker/files/bin/conf/cert_default.conf new file mode 100644 index 00000000..8043cea3 --- /dev/null +++ b/scripts/docker/files/bin/conf/cert_default.conf @@ -0,0 +1,15 @@ +[ req ] +default_bits = 1024 +default_keyfile = keyfile.pem +distinguished_name = req_distinguished_name +prompt = no +output_password = mypass + +[ req_distinguished_name ] +C = GB +ST = Test State or Province +L = Test Locality +O = Organization Name +OU = Organizational Unit Name +CN = localhost +emailAddress = test@email.address diff --git a/scripts/docker/files/bin/conf/couchdb_default.ini b/scripts/docker/files/bin/conf/couchdb_default.ini new file mode 100644 index 00000000..5ab72d7b --- /dev/null +++ b/scripts/docker/files/bin/conf/couchdb_default.ini @@ -0,0 +1,361 @@ +; etc/couchdb/default.ini.tpl. Generated from default.ini.tpl.in by configure. + +; Upgrading CouchDB will overwrite this file. +[vendor] +name = The Apache Software Foundation +version = 1.6.0 + +[couchdb] +database_dir = BASEDIR +view_index_dir = BASEDIR +util_driver_dir = /usr/lib/x86_64-linux-gnu/couchdb/erlang/lib/couch-1.6.0/priv/lib +max_document_size = 4294967296 ; 4 GB +os_process_timeout = 5000 ; 5 seconds. for view and external servers. +max_dbs_open = 100 +delayed_commits = true ; set this to false to ensure an fsync before 201 Created is returned +uri_file = BASEDIR/couch.uri +; Method used to compress everything that is appended to database and view index files, except +; for attachments (see the attachments section). Available methods are: +; +; none - no compression +; snappy - use google snappy, a very fast compressor/decompressor +uuid = bc2f8b84ecb0b13a31cf7f6881a52194 + +; deflate_[N] - use zlib's deflate, N is the compression level which ranges from 1 (fastest, +; lowest compression ratio) to 9 (slowest, highest compression ratio) +file_compression = snappy +; Higher values may give better read performance due to less read operations +; and/or more OS page cache hits, but they can also increase overall response +; time for writes when there are many attachment write requests in parallel. +attachment_stream_buffer_size = 4096 + +plugin_dir = /usr/lib/x86_64-linux-gnu/couchdb/plugins + +[database_compaction] +; larger buffer sizes can originate smaller files +doc_buffer_size = 524288 ; value in bytes +checkpoint_after = 5242880 ; checkpoint after every N bytes were written + +[view_compaction] +; larger buffer sizes can originate smaller files +keyvalue_buffer_size = 2097152 ; value in bytes + +[httpd] +port = 5984 +bind_address = 127.0.0.1 +authentication_handlers = {couch_httpd_oauth, oauth_authentication_handler}, {couch_httpd_auth, cookie_authentication_handler}, {couch_httpd_auth, default_authentication_handler} +default_handler = {couch_httpd_db, handle_request} +secure_rewrites = true +vhost_global_handlers = _utils, _uuids, _session, _oauth, _users +allow_jsonp = false +; Options for the MochiWeb HTTP server. +;server_options = [{backlog, 128}, {acceptor_pool_size, 16}] +; For more socket options, consult Erlang's module 'inet' man page. +;socket_options = [{recbuf, 262144}, {sndbuf, 262144}, {nodelay, true}] +socket_options = [{recbuf, 262144}, {sndbuf, 262144}] +log_max_chunk_size = 1000000 +enable_cors = false +; CouchDB can optionally enforce a maximum uri length; +; max_uri_length = 8000 + +[ssl] +port = 6984 + +[log] +file = BASEDIR/couch.log +level = info +include_sasl = true + +[couch_httpd_auth] +authentication_db = _users +authentication_redirect = /_utils/session.html +require_valid_user = false +timeout = 600 ; number of seconds before automatic logout +auth_cache_size = 50 ; size is number of cache entries +allow_persistent_cookies = false ; set to true to allow persistent cookies +iterations = 10 ; iterations for password hashing +; min_iterations = 1 +; max_iterations = 1000000000 +; comma-separated list of public fields, 404 if empty +; public_fields = + +[cors] +credentials = false +; List of origins separated by a comma, * means accept all +; Origins must include the scheme: http://example.com +; You can’t set origins: * and credentials = true at the same time. +;origins = * +; List of accepted headers separated by a comma +; headers = +; List of accepted methods +; methods = + + +; Configuration for a vhost +;[cors:http://example.com] +; credentials = false +; List of origins separated by a comma +; Origins must include the scheme: http://example.com +; You can’t set origins: * and credentials = true at the same time. +;origins = +; List of accepted headers separated by a comma +; headers = +; List of accepted methods +; methods = + +[couch_httpd_oauth] +; If set to 'true', oauth token and consumer secrets will be looked up +; in the authentication database (_users). These secrets are stored in +; a top level property named "oauth" in user documents. Example: +; { +; "_id": "org.couchdb.user:joe", +; "type": "user", +; "name": "joe", +; "password_sha": "fe95df1ca59a9b567bdca5cbaf8412abd6e06121", +; "salt": "4e170ffeb6f34daecfd814dfb4001a73" +; "roles": ["foo", "bar"], +; "oauth": { +; "consumer_keys": { +; "consumerKey1": "key1Secret", +; "consumerKey2": "key2Secret" +; }, +; "tokens": { +; "token1": "token1Secret", +; "token2": "token2Secret" +; } +; } +; } +use_users_db = false + +[query_servers] +javascript = /usr/bin/couchjs /usr/share/couchdb/server/main.js +coffeescript = /usr/bin/couchjs /usr/share/couchdb/server/main-coffee.js + + +; Changing reduce_limit to false will disable reduce_limit. +; If you think you're hitting reduce_limit with a "good" reduce function, +; please let us know on the mailing list so we can fine tune the heuristic. +[query_server_config] +reduce_limit = true +os_process_limit = 25 + +[daemons] +index_server={couch_index_server, start_link, []} +external_manager={couch_external_manager, start_link, []} +query_servers={couch_query_servers, start_link, []} +vhosts={couch_httpd_vhost, start_link, []} +httpd={couch_httpd, start_link, []} +stats_aggregator={couch_stats_aggregator, start, []} +stats_collector={couch_stats_collector, start, []} +uuids={couch_uuids, start, []} +auth_cache={couch_auth_cache, start_link, []} +replicator_manager={couch_replicator_manager, start_link, []} +os_daemons={couch_os_daemons, start_link, []} +compaction_daemon={couch_compaction_daemon, start_link, []} + +[httpd_global_handlers] +/ = {couch_httpd_misc_handlers, handle_welcome_req, <<"Welcome">>} +favicon.ico = {couch_httpd_misc_handlers, handle_favicon_req, "/usr/share/couchdb/www"} + +_utils = {couch_httpd_misc_handlers, handle_utils_dir_req, "/usr/share/couchdb/www"} +_all_dbs = {couch_httpd_misc_handlers, handle_all_dbs_req} +_active_tasks = {couch_httpd_misc_handlers, handle_task_status_req} +_config = {couch_httpd_misc_handlers, handle_config_req} +_replicate = {couch_replicator_httpd, handle_req} +_uuids = {couch_httpd_misc_handlers, handle_uuids_req} +_restart = {couch_httpd_misc_handlers, handle_restart_req} +_stats = {couch_httpd_stats_handlers, handle_stats_req} +_log = {couch_httpd_misc_handlers, handle_log_req} +_session = {couch_httpd_auth, handle_session_req} +_oauth = {couch_httpd_oauth, handle_oauth_req} +_db_updates = {couch_dbupdates_httpd, handle_req} +_plugins = {couch_plugins_httpd, handle_req} + +[httpd_db_handlers] +_all_docs = {couch_mrview_http, handle_all_docs_req} +_changes = {couch_httpd_db, handle_changes_req} +_compact = {couch_httpd_db, handle_compact_req} +_design = {couch_httpd_db, handle_design_req} +_temp_view = {couch_mrview_http, handle_temp_view_req} +_view_cleanup = {couch_mrview_http, handle_cleanup_req} + +; The external module takes an optional argument allowing you to narrow it to a +; single script. Otherwise the script name is inferred from the first path section +; after _external's own path. +; _mypath = {couch_httpd_external, handle_external_req, <<"mykey">>} +; _external = {couch_httpd_external, handle_external_req} + +[httpd_design_handlers] +_compact = {couch_mrview_http, handle_compact_req} +_info = {couch_mrview_http, handle_info_req} +_list = {couch_mrview_show, handle_view_list_req} +_rewrite = {couch_httpd_rewrite, handle_rewrite_req} +_show = {couch_mrview_show, handle_doc_show_req} +_update = {couch_mrview_show, handle_doc_update_req} +_view = {couch_mrview_http, handle_view_req} + +; enable external as an httpd handler, then link it with commands here. +; note, this api is still under consideration. +; [external] +; mykey = /path/to/mycommand + +; Here you can setup commands for CouchDB to manage +; while it is alive. It will attempt to keep each command +; alive if it exits. +; [os_daemons] +; some_daemon_name = /path/to/script -with args + + +[uuids] +; Known algorithms: +; random - 128 bits of random awesome +; All awesome, all the time. +; sequential - monotonically increasing ids with random increments +; First 26 hex characters are random. Last 6 increment in +; random amounts until an overflow occurs. On overflow, the +; random prefix is regenerated and the process starts over. +; utc_random - Time since Jan 1, 1970 UTC with microseconds +; First 14 characters are the time in hex. Last 18 are random. +; utc_id - Time since Jan 1, 1970 UTC with microseconds, plus utc_id_suffix string +; First 14 characters are the time in hex. uuids/utc_id_suffix string value is appended to these. +algorithm = sequential +; The utc_id_suffix value will be appended to uuids generated by the utc_id algorithm. +; Replicating instances should have unique utc_id_suffix values to ensure uniqueness of utc_id ids. +utc_id_suffix = +# Maximum number of UUIDs retrievable from /_uuids in a single request +max_count = 1000 + +[stats] +; rate is in milliseconds +rate = 1000 +; sample intervals are in seconds +samples = [0, 60, 300, 900] + +[attachments] +compression_level = 8 ; from 1 (lowest, fastest) to 9 (highest, slowest), 0 to disable compression +compressible_types = text/*, application/javascript, application/json, application/xml + +[replicator] +db = _replicator +; Maximum replicaton retry count can be a non-negative integer or "infinity". +max_replication_retry_count = 10 +; More worker processes can give higher network throughput but can also +; imply more disk and network IO. +worker_processes = 4 +; With lower batch sizes checkpoints are done more frequently. Lower batch sizes +; also reduce the total amount of used RAM memory. +worker_batch_size = 500 +; Maximum number of HTTP connections per replication. +http_connections = 20 +; HTTP connection timeout per replication. +; Even for very fast/reliable networks it might need to be increased if a remote +; database is too busy. +connection_timeout = 30000 +; If a request fails, the replicator will retry it up to N times. +retries_per_request = 10 +; Some socket options that might boost performance in some scenarios: +; {nodelay, boolean()} +; {sndbuf, integer()} +; {recbuf, integer()} +; {priority, integer()} +; See the `inet` Erlang module's man page for the full list of options. +socket_options = [{keepalive, true}, {nodelay, false}] +; Path to a file containing the user's certificate. +;cert_file = /full/path/to/server_cert.pem +; Path to file containing user's private PEM encoded key. +;key_file = /full/path/to/server_key.pem +; String containing the user's password. Only used if the private keyfile is password protected. +;password = somepassword +; Set to true to validate peer certificates. +verify_ssl_certificates = false +; File containing a list of peer trusted certificates (in the PEM format). +;ssl_trusted_certificates_file = /etc/ssl/certs/ca-certificates.crt +; Maximum peer certificate depth (must be set even if certificate validation is off). +ssl_certificate_max_depth = 3 + +[compaction_daemon] +; The delay, in seconds, between each check for which database and view indexes +; need to be compacted. +check_interval = 300 +; If a database or view index file is smaller then this value (in bytes), +; compaction will not happen. Very small files always have a very high +; fragmentation therefore it's not worth to compact them. +min_file_size = 131072 + +[compactions] +; List of compaction rules for the compaction daemon. +; The daemon compacts databases and their respective view groups when all the +; condition parameters are satisfied. Configuration can be per database or +; global, and it has the following format: +; +; database_name = [ {ParamName, ParamValue}, {ParamName, ParamValue}, ... ] +; _default = [ {ParamName, ParamValue}, {ParamName, ParamValue}, ... ] +; +; Possible parameters: +; +; * db_fragmentation - If the ratio (as an integer percentage), of the amount +; of old data (and its supporting metadata) over the database +; file size is equal to or greater then this value, this +; database compaction condition is satisfied. +; This value is computed as: +; +; (file_size - data_size) / file_size * 100 +; +; The data_size and file_size values can be obtained when +; querying a database's information URI (GET /dbname/). +; +; * view_fragmentation - If the ratio (as an integer percentage), of the amount +; of old data (and its supporting metadata) over the view +; index (view group) file size is equal to or greater then +; this value, then this view index compaction condition is +; satisfied. This value is computed as: +; +; (file_size - data_size) / file_size * 100 +; +; The data_size and file_size values can be obtained when +; querying a view group's information URI +; (GET /dbname/_design/groupname/_info). +; +; * from _and_ to - The period for which a database (and its view groups) compaction +; is allowed. The value for these parameters must obey the format: +; +; HH:MM - HH:MM (HH in [0..23], MM in [0..59]) +; +; * strict_window - If a compaction is still running after the end of the allowed +; period, it will be canceled if this parameter is set to 'true'. +; It defaults to 'false' and it's meaningful only if the *period* +; parameter is also specified. +; +; * parallel_view_compaction - If set to 'true', the database and its views are +; compacted in parallel. This is only useful on +; certain setups, like for example when the database +; and view index directories point to different +; disks. It defaults to 'false'. +; +; Before a compaction is triggered, an estimation of how much free disk space is +; needed is computed. This estimation corresponds to 2 times the data size of +; the database or view index. When there's not enough free disk space to compact +; a particular database or view index, a warning message is logged. +; +; Examples: +; +; 1) [{db_fragmentation, "70%"}, {view_fragmentation, "60%"}] +; The `foo` database is compacted if its fragmentation is 70% or more. +; Any view index of this database is compacted only if its fragmentation +; is 60% or more. +; +; 2) [{db_fragmentation, "70%"}, {view_fragmentation, "60%"}, {from, "00:00"}, {to, "04:00"}] +; Similar to the preceding example but a compaction (database or view index) +; is only triggered if the current time is between midnight and 4 AM. +; +; 3) [{db_fragmentation, "70%"}, {view_fragmentation, "60%"}, {from, "00:00"}, {to, "04:00"}, {strict_window, true}] +; Similar to the preceding example - a compaction (database or view index) +; is only triggered if the current time is between midnight and 4 AM. If at +; 4 AM the database or one of its views is still compacting, the compaction +; process will be canceled. +; +; 4) [{db_fragmentation, "70%"}, {view_fragmentation, "60%"}, {from, "00:00"}, {to, "04:00"}, {strict_window, true}, {parallel_view_compaction, true}] +; Similar to the preceding example, but a database and its views can be +; compacted in parallel. +; +;_default = [{db_fragmentation, "70%"}, {view_fragmentation, "60%"}, {from, "23:00"}, {to, "04:00"}] diff --git a/scripts/docker/files/bin/conf/soledad-server_default.conf b/scripts/docker/files/bin/conf/soledad-server_default.conf new file mode 100644 index 00000000..5e286374 --- /dev/null +++ b/scripts/docker/files/bin/conf/soledad-server_default.conf @@ -0,0 +1,5 @@ +[soledad-server] +couch_url = http://localhost:5984 +create_cmd = sudo -u soledad-admin /usr/bin/create-user-db +admin_netrc = /etc/couchdb/couchdb-soledad-admin.netrc +batching = 0 diff --git a/scripts/docker/files/bin/run-client-bootstrap.sh b/scripts/docker/files/bin/run-client-bootstrap.sh new file mode 100755 index 00000000..fbbb42e8 --- /dev/null +++ b/scripts/docker/files/bin/run-client-bootstrap.sh @@ -0,0 +1,20 @@ +#!/bin/bash + +# Run a Soledad client connection test. +# +# This script is meant to be copied to the docker container and run upon +# container start. + +CMD="/usr/local/soledad/setup-test-env.py" +REPO="/var/local/soledad" + +if [ ! -z "${SOLEDAD_REMOTE}" ]; then + git -C ${REPO} remote set-url origin ${SOLEDAD_REMOTE} + git -C ${REPO} fetch origin +fi + +if [ ! -z "${SOLEDAD_BRANCH}" ]; then + git -C ${REPO} checkout ${SOLEDAD_BRANCH} +fi + +${CMD} soledad-client test --server-url ${SOLEDAD_SERVER_URL} diff --git a/scripts/docker/files/bin/run-client-perf.sh b/scripts/docker/files/bin/run-client-perf.sh new file mode 100755 index 00000000..01b27b98 --- /dev/null +++ b/scripts/docker/files/bin/run-client-perf.sh @@ -0,0 +1,128 @@ +#!/bin/sh + +# Start a soledad-perf test using a remote server. +# +# The script does the following: +# +# - configure a remote repository for soledad repo if SOLEDAD_REMOTE is set. +# +# - checkout a specific branch if SOLEDAD_BRANCH is set. +# +# - run the soledad-perf local twisted server that runs the client. Note +# that the actual soledad server should be running on another docker +# container. This local server is only used to measure responsiveness of +# soledad client. The script waits for the server to come up before +# continuing, or else times out after TIMEOUT seconds. +# +# - trigger the creation of documents for sync. +# +# - start the measurement of server responsiveness and sync stages. +# +# - stop the test. +# +# This script is meant to be copied to the docker container and run upon +# container start. + +CMD="/usr/local/soledad/setup-test-env.py" +REPO="/var/local/soledad" +TIMEOUT=20 + +#----------------------------------------------------------------------------- +# configure a remote and checkout a branch +#----------------------------------------------------------------------------- + +if [ ! -z "${SOLEDAD_REMOTE}" ]; then + git -C ${REPO} remote set-url origin ${SOLEDAD_REMOTE} + git -C ${REPO} fetch origin +fi + +if [ ! -z "${SOLEDAD_BRANCH}" ]; then + git -C ${REPO} checkout ${SOLEDAD_BRANCH} +fi + +if [ ! -z "${SOLEDAD_PERF_REMOTE}" ]; then + git -C /var/local/soledad-perf remote set-url origin ${SOLEDAD_PERF_REMOTE} + git -C /var/local/soledad-perf fetch origin +fi + +if [ ! -z "${SOLEDAD_PERF_BRANCH}" ]; then + git -C /var/local/soledad-perf checkout ${SOLEDAD_PERF_BRANCH} +fi + +#----------------------------------------------------------------------------- +# write a configuration file for the perf test +#----------------------------------------------------------------------------- + +cd /var/local/soledad-perf + +cat > defaults.conf < /dev/null & +sleep 5 # wait a bit for some data points + +# run a sync and generate a graph +make trigger-sync +make trigger-stop diff --git a/scripts/docker/files/bin/run-server.sh b/scripts/docker/files/bin/run-server.sh new file mode 100755 index 00000000..feedee7e --- /dev/null +++ b/scripts/docker/files/bin/run-server.sh @@ -0,0 +1,89 @@ +#!/bin/sh + +# Start a soledad server inside a docker container. +# +# This script will: +# +# - eventually checkout a specific branch from a specific soledad remote. +# +# - create everything a soledad server needs to run (certificate, backend +# server database, tables, etc. +# +# - eventually preload the server database with a number of documents equal +# to SOLEDAD_PRELOAD_NUM, and with payload size equal to +# SOLEDAD_PRELOAD_SIZE. +# +# - run the soledad server. +# +# This script is meant to be copied to the docker container and run upon +# container start. + +CMD="/usr/local/soledad/setup-test-env.py" + +#--------------------------------------------------------------------------- +# eventually checkout a specific branch from a specific remote +#--------------------------------------------------------------------------- + +REPO="/var/local/soledad" + +if [ ! -z "${SOLEDAD_REMOTE}" ]; then + git -C ${REPO} remote set-url origin ${SOLEDAD_REMOTE} + git -C ${REPO} fetch origin +fi + +if [ ! -z "${SOLEDAD_BRANCH}" ]; then + git -C ${REPO} checkout ${SOLEDAD_BRANCH} +fi + +#--------------------------------------------------------------------------- +# setup environment for running soledad server +#--------------------------------------------------------------------------- + +${CMD} couch start +${CMD} user-db create +${CMD} token-db create +${CMD} token-db insert-token +${CMD} shared-db create +${CMD} cert create + +#--------------------------------------------------------------------------- +# write a configuration file for the perf test +#--------------------------------------------------------------------------- + +if [ "${SOLEDAD_PRELOAD_NUM}" -gt 0 ]; then + cd /var/local/soledad-perf + + cat > defaults.conf < 1: - i = 1 - print "There are many available hosts:" - for h in hostnames: - print " (%d) %s.%s" % (i, h, provider) - i += 1 - choice = raw_input("Choose a host to use (default: 1): ") - if choice != '': - host = hostnames[int(choice) - 1] - server_url = 'https://%s:%d/user-%s' % \ - (soledad_hosts[host]['hostname'], soledad_hosts[host]['port'], - auth[2]['id']) - # get provider ca certificate - ca_cert = requests.get('https://%s/ca.crt' % provider, verify=False).text - cert_file = os.path.join(basedir, 'ca.crt') - with open(cert_file, 'w') as f: - f.write(ca_cert) - return auth[2]['id'], server_url, cert_file, auth[2]['token'] - - -def _get_soledad_instance(uuid, passphrase, basedir, server_url, cert_file, - token): - # setup soledad info - logger.info('UUID is %s' % uuid) - logger.info('Server URL is %s' % server_url) - secrets_path = os.path.join( - basedir, '%s.secret' % uuid) - local_db_path = os.path.join( - basedir, '%s.db' % uuid) - # instantiate soledad - return Soledad( - uuid, - unicode(passphrase), - secrets_path=secrets_path, - local_db_path=local_db_path, - server_url=server_url, - cert_file=cert_file, - auth_token=token, - defer_encryption=True) - - -def _get_keymanager_instance(username, provider, soledad, token, - ca_cert_path=None, api_uri=None, api_version=None, - uid=None, gpgbinary=None): - return KeyManager( - "{username}@{provider}".format(username=username, provider=provider), - "http://uri", - soledad, - token=token, - ca_cert_path=ca_cert_path, - api_uri=api_uri, - api_version=api_version, - uid=uid, - gpgbinary=gpgbinary) - - -def _parse_args(): - # parse command line - parser = argparse.ArgumentParser() - parser.add_argument( - 'user@provider', action=ValidateUserHandle, help='the user handle') - parser.add_argument( - '--basedir', '-b', default=None, - help='soledad base directory') - parser.add_argument( - '--passphrase', '-p', default=None, - help='the user passphrase') - parser.add_argument( - '--get-all-docs', '-a', action='store_true', - help='get all documents from the local database') - parser.add_argument( - '--create-docs', '-c', default=0, type=int, - help='create a number of documents') - parser.add_argument( - '--sync', '-s', action='store_true', - help='synchronize with the server replica') - parser.add_argument( - '--repeat-sync', '-r', action='store_true', - help='repeat synchronization until no new data is received') - parser.add_argument( - '--export-public-key', help="export the public key to a file") - parser.add_argument( - '--export-private-key', help="export the private key to a file") - parser.add_argument( - '--export-incoming-messages', - help="export incoming messages to a directory") - parser.add_argument( - '--export-auth-data', - help="export authentication data to a file") - parser.add_argument( - '--use-auth-data', - help="use authentication data from a file") - return parser.parse_args() - - -def _get_passphrase(args): - passphrase = args.passphrase - if passphrase is None: - passphrase = getpass.getpass( - 'Password for %s@%s: ' % (args.username, args.provider)) - return passphrase - - -def _get_basedir(args): - basedir = args.basedir - if basedir is None: - basedir = tempfile.mkdtemp() - elif not os.path.isdir(basedir): - os.mkdir(basedir) - logger.info('Using %s as base directory.' % basedir) - return basedir - - -@inlineCallbacks -def _export_key(args, km, fname, private=False): - address = args.username + "@" + args.provider - pkey = yield km.get_key( - address, OpenPGPKey, private=private, fetch_remote=False) - with open(args.export_private_key, "w") as f: - f.write(pkey.key_data) - - -@inlineCallbacks -def _export_incoming_messages(soledad, directory): - yield soledad.create_index("by-incoming", "bool(incoming)") - docs = yield soledad.get_from_index("by-incoming", '1') - i = 1 - for doc in docs: - with open(os.path.join(directory, "message_%d.gpg" % i), "w") as f: - f.write(doc.content["_enc_json"]) - i += 1 - - -@inlineCallbacks -def _get_all_docs(soledad): - _, docs = yield soledad.get_all_docs() - for doc in docs: - print json.dumps(doc.content, indent=4) - - -# main program - -@inlineCallbacks -def _main(soledad, km, args): - try: - if args.create_docs: - for i in xrange(args.create_docs): - t = time.time() - logger.debug( - "Creating doc %d/%d..." % (i + 1, args.create_docs)) - content = { - 'datetime': time.strftime( - "%Y-%m-%d %H:%M:%S", time.gmtime(t)), - 'timestamp': t, - 'index': i, - 'total': args.create_docs, - } - yield soledad.create_doc(content) - if args.sync: - yield soledad.sync() - if args.repeat_sync: - old_gen = 0 - new_gen = yield soledad.sync() - while old_gen != new_gen: - old_gen = new_gen - new_gen = yield soledad.sync() - if args.get_all_docs: - yield _get_all_docs(soledad) - if args.export_private_key: - yield _export_key(args, km, args.export_private_key, private=True) - if args.export_public_key: - yield _export_key(args, km, args.expoert_public_key, private=False) - if args.export_incoming_messages: - yield _export_incoming_messages( - soledad, args.export_incoming_messages) - except Exception as e: - logger.error(e) - finally: - soledad.close() - reactor.callWhenRunning(reactor.stop) - - -if __name__ == '__main__': - args = _parse_args() - passphrase = _get_passphrase(args) - basedir = _get_basedir(args) - - if not args.use_auth_data: - # get auth data from server - uuid, server_url, cert_file, token = \ - _get_soledad_info( - args.username, args.provider, passphrase, basedir) - else: - # load auth data from file - with open(args.use_auth_data) as f: - auth_data = json.loads(f.read()) - uuid = auth_data['uuid'] - server_url = auth_data['server_url'] - cert_file = auth_data['cert_file'] - token = auth_data['token'] - - # export auth data to a file - if args.export_auth_data: - with open(args.export_auth_data, "w") as f: - f.write(json.dumps({ - 'uuid': uuid, - 'server_url': server_url, - 'cert_file': cert_file, - 'token': token, - })) - - soledad = _get_soledad_instance( - uuid, passphrase, basedir, server_url, cert_file, token) - km = _get_keymanager_instance( - args.username, - args.provider, - soledad, - token, - uid=uuid) - _main(soledad, km, args) - reactor.run() diff --git a/scripts/docker/files/conf/cert_default.conf b/scripts/docker/files/conf/cert_default.conf deleted file mode 100644 index 8043cea3..00000000 --- a/scripts/docker/files/conf/cert_default.conf +++ /dev/null @@ -1,15 +0,0 @@ -[ req ] -default_bits = 1024 -default_keyfile = keyfile.pem -distinguished_name = req_distinguished_name -prompt = no -output_password = mypass - -[ req_distinguished_name ] -C = GB -ST = Test State or Province -L = Test Locality -O = Organization Name -OU = Organizational Unit Name -CN = localhost -emailAddress = test@email.address diff --git a/scripts/docker/files/conf/couchdb_default.ini b/scripts/docker/files/conf/couchdb_default.ini deleted file mode 100644 index 5ab72d7b..00000000 --- a/scripts/docker/files/conf/couchdb_default.ini +++ /dev/null @@ -1,361 +0,0 @@ -; etc/couchdb/default.ini.tpl. Generated from default.ini.tpl.in by configure. - -; Upgrading CouchDB will overwrite this file. -[vendor] -name = The Apache Software Foundation -version = 1.6.0 - -[couchdb] -database_dir = BASEDIR -view_index_dir = BASEDIR -util_driver_dir = /usr/lib/x86_64-linux-gnu/couchdb/erlang/lib/couch-1.6.0/priv/lib -max_document_size = 4294967296 ; 4 GB -os_process_timeout = 5000 ; 5 seconds. for view and external servers. -max_dbs_open = 100 -delayed_commits = true ; set this to false to ensure an fsync before 201 Created is returned -uri_file = BASEDIR/couch.uri -; Method used to compress everything that is appended to database and view index files, except -; for attachments (see the attachments section). Available methods are: -; -; none - no compression -; snappy - use google snappy, a very fast compressor/decompressor -uuid = bc2f8b84ecb0b13a31cf7f6881a52194 - -; deflate_[N] - use zlib's deflate, N is the compression level which ranges from 1 (fastest, -; lowest compression ratio) to 9 (slowest, highest compression ratio) -file_compression = snappy -; Higher values may give better read performance due to less read operations -; and/or more OS page cache hits, but they can also increase overall response -; time for writes when there are many attachment write requests in parallel. -attachment_stream_buffer_size = 4096 - -plugin_dir = /usr/lib/x86_64-linux-gnu/couchdb/plugins - -[database_compaction] -; larger buffer sizes can originate smaller files -doc_buffer_size = 524288 ; value in bytes -checkpoint_after = 5242880 ; checkpoint after every N bytes were written - -[view_compaction] -; larger buffer sizes can originate smaller files -keyvalue_buffer_size = 2097152 ; value in bytes - -[httpd] -port = 5984 -bind_address = 127.0.0.1 -authentication_handlers = {couch_httpd_oauth, oauth_authentication_handler}, {couch_httpd_auth, cookie_authentication_handler}, {couch_httpd_auth, default_authentication_handler} -default_handler = {couch_httpd_db, handle_request} -secure_rewrites = true -vhost_global_handlers = _utils, _uuids, _session, _oauth, _users -allow_jsonp = false -; Options for the MochiWeb HTTP server. -;server_options = [{backlog, 128}, {acceptor_pool_size, 16}] -; For more socket options, consult Erlang's module 'inet' man page. -;socket_options = [{recbuf, 262144}, {sndbuf, 262144}, {nodelay, true}] -socket_options = [{recbuf, 262144}, {sndbuf, 262144}] -log_max_chunk_size = 1000000 -enable_cors = false -; CouchDB can optionally enforce a maximum uri length; -; max_uri_length = 8000 - -[ssl] -port = 6984 - -[log] -file = BASEDIR/couch.log -level = info -include_sasl = true - -[couch_httpd_auth] -authentication_db = _users -authentication_redirect = /_utils/session.html -require_valid_user = false -timeout = 600 ; number of seconds before automatic logout -auth_cache_size = 50 ; size is number of cache entries -allow_persistent_cookies = false ; set to true to allow persistent cookies -iterations = 10 ; iterations for password hashing -; min_iterations = 1 -; max_iterations = 1000000000 -; comma-separated list of public fields, 404 if empty -; public_fields = - -[cors] -credentials = false -; List of origins separated by a comma, * means accept all -; Origins must include the scheme: http://example.com -; You can’t set origins: * and credentials = true at the same time. -;origins = * -; List of accepted headers separated by a comma -; headers = -; List of accepted methods -; methods = - - -; Configuration for a vhost -;[cors:http://example.com] -; credentials = false -; List of origins separated by a comma -; Origins must include the scheme: http://example.com -; You can’t set origins: * and credentials = true at the same time. -;origins = -; List of accepted headers separated by a comma -; headers = -; List of accepted methods -; methods = - -[couch_httpd_oauth] -; If set to 'true', oauth token and consumer secrets will be looked up -; in the authentication database (_users). These secrets are stored in -; a top level property named "oauth" in user documents. Example: -; { -; "_id": "org.couchdb.user:joe", -; "type": "user", -; "name": "joe", -; "password_sha": "fe95df1ca59a9b567bdca5cbaf8412abd6e06121", -; "salt": "4e170ffeb6f34daecfd814dfb4001a73" -; "roles": ["foo", "bar"], -; "oauth": { -; "consumer_keys": { -; "consumerKey1": "key1Secret", -; "consumerKey2": "key2Secret" -; }, -; "tokens": { -; "token1": "token1Secret", -; "token2": "token2Secret" -; } -; } -; } -use_users_db = false - -[query_servers] -javascript = /usr/bin/couchjs /usr/share/couchdb/server/main.js -coffeescript = /usr/bin/couchjs /usr/share/couchdb/server/main-coffee.js - - -; Changing reduce_limit to false will disable reduce_limit. -; If you think you're hitting reduce_limit with a "good" reduce function, -; please let us know on the mailing list so we can fine tune the heuristic. -[query_server_config] -reduce_limit = true -os_process_limit = 25 - -[daemons] -index_server={couch_index_server, start_link, []} -external_manager={couch_external_manager, start_link, []} -query_servers={couch_query_servers, start_link, []} -vhosts={couch_httpd_vhost, start_link, []} -httpd={couch_httpd, start_link, []} -stats_aggregator={couch_stats_aggregator, start, []} -stats_collector={couch_stats_collector, start, []} -uuids={couch_uuids, start, []} -auth_cache={couch_auth_cache, start_link, []} -replicator_manager={couch_replicator_manager, start_link, []} -os_daemons={couch_os_daemons, start_link, []} -compaction_daemon={couch_compaction_daemon, start_link, []} - -[httpd_global_handlers] -/ = {couch_httpd_misc_handlers, handle_welcome_req, <<"Welcome">>} -favicon.ico = {couch_httpd_misc_handlers, handle_favicon_req, "/usr/share/couchdb/www"} - -_utils = {couch_httpd_misc_handlers, handle_utils_dir_req, "/usr/share/couchdb/www"} -_all_dbs = {couch_httpd_misc_handlers, handle_all_dbs_req} -_active_tasks = {couch_httpd_misc_handlers, handle_task_status_req} -_config = {couch_httpd_misc_handlers, handle_config_req} -_replicate = {couch_replicator_httpd, handle_req} -_uuids = {couch_httpd_misc_handlers, handle_uuids_req} -_restart = {couch_httpd_misc_handlers, handle_restart_req} -_stats = {couch_httpd_stats_handlers, handle_stats_req} -_log = {couch_httpd_misc_handlers, handle_log_req} -_session = {couch_httpd_auth, handle_session_req} -_oauth = {couch_httpd_oauth, handle_oauth_req} -_db_updates = {couch_dbupdates_httpd, handle_req} -_plugins = {couch_plugins_httpd, handle_req} - -[httpd_db_handlers] -_all_docs = {couch_mrview_http, handle_all_docs_req} -_changes = {couch_httpd_db, handle_changes_req} -_compact = {couch_httpd_db, handle_compact_req} -_design = {couch_httpd_db, handle_design_req} -_temp_view = {couch_mrview_http, handle_temp_view_req} -_view_cleanup = {couch_mrview_http, handle_cleanup_req} - -; The external module takes an optional argument allowing you to narrow it to a -; single script. Otherwise the script name is inferred from the first path section -; after _external's own path. -; _mypath = {couch_httpd_external, handle_external_req, <<"mykey">>} -; _external = {couch_httpd_external, handle_external_req} - -[httpd_design_handlers] -_compact = {couch_mrview_http, handle_compact_req} -_info = {couch_mrview_http, handle_info_req} -_list = {couch_mrview_show, handle_view_list_req} -_rewrite = {couch_httpd_rewrite, handle_rewrite_req} -_show = {couch_mrview_show, handle_doc_show_req} -_update = {couch_mrview_show, handle_doc_update_req} -_view = {couch_mrview_http, handle_view_req} - -; enable external as an httpd handler, then link it with commands here. -; note, this api is still under consideration. -; [external] -; mykey = /path/to/mycommand - -; Here you can setup commands for CouchDB to manage -; while it is alive. It will attempt to keep each command -; alive if it exits. -; [os_daemons] -; some_daemon_name = /path/to/script -with args - - -[uuids] -; Known algorithms: -; random - 128 bits of random awesome -; All awesome, all the time. -; sequential - monotonically increasing ids with random increments -; First 26 hex characters are random. Last 6 increment in -; random amounts until an overflow occurs. On overflow, the -; random prefix is regenerated and the process starts over. -; utc_random - Time since Jan 1, 1970 UTC with microseconds -; First 14 characters are the time in hex. Last 18 are random. -; utc_id - Time since Jan 1, 1970 UTC with microseconds, plus utc_id_suffix string -; First 14 characters are the time in hex. uuids/utc_id_suffix string value is appended to these. -algorithm = sequential -; The utc_id_suffix value will be appended to uuids generated by the utc_id algorithm. -; Replicating instances should have unique utc_id_suffix values to ensure uniqueness of utc_id ids. -utc_id_suffix = -# Maximum number of UUIDs retrievable from /_uuids in a single request -max_count = 1000 - -[stats] -; rate is in milliseconds -rate = 1000 -; sample intervals are in seconds -samples = [0, 60, 300, 900] - -[attachments] -compression_level = 8 ; from 1 (lowest, fastest) to 9 (highest, slowest), 0 to disable compression -compressible_types = text/*, application/javascript, application/json, application/xml - -[replicator] -db = _replicator -; Maximum replicaton retry count can be a non-negative integer or "infinity". -max_replication_retry_count = 10 -; More worker processes can give higher network throughput but can also -; imply more disk and network IO. -worker_processes = 4 -; With lower batch sizes checkpoints are done more frequently. Lower batch sizes -; also reduce the total amount of used RAM memory. -worker_batch_size = 500 -; Maximum number of HTTP connections per replication. -http_connections = 20 -; HTTP connection timeout per replication. -; Even for very fast/reliable networks it might need to be increased if a remote -; database is too busy. -connection_timeout = 30000 -; If a request fails, the replicator will retry it up to N times. -retries_per_request = 10 -; Some socket options that might boost performance in some scenarios: -; {nodelay, boolean()} -; {sndbuf, integer()} -; {recbuf, integer()} -; {priority, integer()} -; See the `inet` Erlang module's man page for the full list of options. -socket_options = [{keepalive, true}, {nodelay, false}] -; Path to a file containing the user's certificate. -;cert_file = /full/path/to/server_cert.pem -; Path to file containing user's private PEM encoded key. -;key_file = /full/path/to/server_key.pem -; String containing the user's password. Only used if the private keyfile is password protected. -;password = somepassword -; Set to true to validate peer certificates. -verify_ssl_certificates = false -; File containing a list of peer trusted certificates (in the PEM format). -;ssl_trusted_certificates_file = /etc/ssl/certs/ca-certificates.crt -; Maximum peer certificate depth (must be set even if certificate validation is off). -ssl_certificate_max_depth = 3 - -[compaction_daemon] -; The delay, in seconds, between each check for which database and view indexes -; need to be compacted. -check_interval = 300 -; If a database or view index file is smaller then this value (in bytes), -; compaction will not happen. Very small files always have a very high -; fragmentation therefore it's not worth to compact them. -min_file_size = 131072 - -[compactions] -; List of compaction rules for the compaction daemon. -; The daemon compacts databases and their respective view groups when all the -; condition parameters are satisfied. Configuration can be per database or -; global, and it has the following format: -; -; database_name = [ {ParamName, ParamValue}, {ParamName, ParamValue}, ... ] -; _default = [ {ParamName, ParamValue}, {ParamName, ParamValue}, ... ] -; -; Possible parameters: -; -; * db_fragmentation - If the ratio (as an integer percentage), of the amount -; of old data (and its supporting metadata) over the database -; file size is equal to or greater then this value, this -; database compaction condition is satisfied. -; This value is computed as: -; -; (file_size - data_size) / file_size * 100 -; -; The data_size and file_size values can be obtained when -; querying a database's information URI (GET /dbname/). -; -; * view_fragmentation - If the ratio (as an integer percentage), of the amount -; of old data (and its supporting metadata) over the view -; index (view group) file size is equal to or greater then -; this value, then this view index compaction condition is -; satisfied. This value is computed as: -; -; (file_size - data_size) / file_size * 100 -; -; The data_size and file_size values can be obtained when -; querying a view group's information URI -; (GET /dbname/_design/groupname/_info). -; -; * from _and_ to - The period for which a database (and its view groups) compaction -; is allowed. The value for these parameters must obey the format: -; -; HH:MM - HH:MM (HH in [0..23], MM in [0..59]) -; -; * strict_window - If a compaction is still running after the end of the allowed -; period, it will be canceled if this parameter is set to 'true'. -; It defaults to 'false' and it's meaningful only if the *period* -; parameter is also specified. -; -; * parallel_view_compaction - If set to 'true', the database and its views are -; compacted in parallel. This is only useful on -; certain setups, like for example when the database -; and view index directories point to different -; disks. It defaults to 'false'. -; -; Before a compaction is triggered, an estimation of how much free disk space is -; needed is computed. This estimation corresponds to 2 times the data size of -; the database or view index. When there's not enough free disk space to compact -; a particular database or view index, a warning message is logged. -; -; Examples: -; -; 1) [{db_fragmentation, "70%"}, {view_fragmentation, "60%"}] -; The `foo` database is compacted if its fragmentation is 70% or more. -; Any view index of this database is compacted only if its fragmentation -; is 60% or more. -; -; 2) [{db_fragmentation, "70%"}, {view_fragmentation, "60%"}, {from, "00:00"}, {to, "04:00"}] -; Similar to the preceding example but a compaction (database or view index) -; is only triggered if the current time is between midnight and 4 AM. -; -; 3) [{db_fragmentation, "70%"}, {view_fragmentation, "60%"}, {from, "00:00"}, {to, "04:00"}, {strict_window, true}] -; Similar to the preceding example - a compaction (database or view index) -; is only triggered if the current time is between midnight and 4 AM. If at -; 4 AM the database or one of its views is still compacting, the compaction -; process will be canceled. -; -; 4) [{db_fragmentation, "70%"}, {view_fragmentation, "60%"}, {from, "00:00"}, {to, "04:00"}, {strict_window, true}, {parallel_view_compaction, true}] -; Similar to the preceding example, but a database and its views can be -; compacted in parallel. -; -;_default = [{db_fragmentation, "70%"}, {view_fragmentation, "60%"}, {from, "23:00"}, {to, "04:00"}] diff --git a/scripts/docker/files/conf/soledad-server_default.conf b/scripts/docker/files/conf/soledad-server_default.conf deleted file mode 100644 index 5e286374..00000000 --- a/scripts/docker/files/conf/soledad-server_default.conf +++ /dev/null @@ -1,5 +0,0 @@ -[soledad-server] -couch_url = http://localhost:5984 -create_cmd = sudo -u soledad-admin /usr/bin/create-user-db -admin_netrc = /etc/couchdb/couchdb-soledad-admin.netrc -batching = 0 diff --git a/scripts/docker/files/leap.list b/scripts/docker/files/leap.list deleted file mode 100644 index 7eb474d8..00000000 --- a/scripts/docker/files/leap.list +++ /dev/null @@ -1,4 +0,0 @@ -# This file is meant to be copied into the `/etc/apt/sources.list.d` directory -# inside a docker image to provide a source for leap-specific packages. - -deb http://deb.leap.se/0.8 jessie main diff --git a/scripts/docker/files/run-perf-test.sh b/scripts/docker/files/run-perf-test.sh deleted file mode 100755 index ebd54d23..00000000 --- a/scripts/docker/files/run-perf-test.sh +++ /dev/null @@ -1,124 +0,0 @@ -#!/bin/sh - -# Start a soledad-perf test using a remote server. -# -# The script does the following: -# -# - configure a remote repository for soledad repo if SOLEDAD_REMOTE is set. -# -# - checkout a specific branch if SOLEDAD_BRANCH is set. -# -# - run the soledad-perf local twisted server that runs the client. Note -# that the actual soledad server should be running on another docker -# container. This local server is only used to measure responsiveness of -# soledad client. The script waits for the server to come up before -# continuing, or else times out after TIMEOUT seconds. -# -# - trigger the creation of documents for sync. -# -# - start the measurement of server responsiveness and sync stages. -# -# - stop the test. -# -# This script is meant to be copied to the docker container and run upon -# container start. - -CMD="/usr/local/soledad/test-env.py" -REPO="/var/local/soledad" -TIMEOUT=20 - -#----------------------------------------------------------------------------- -# configure a remote and checkout a branch -#----------------------------------------------------------------------------- - -if [ ! -z "${SOLEDAD_REMOTE}" ]; then - git -C ${REPO} remote set-url origin ${SOLEDAD_REMOTE} - git -C ${REPO} fetch origin -fi - -if [ ! -z "${SOLEDAD_BRANCH}" ]; then - git -C ${REPO} checkout ${SOLEDAD_BRANCH} -fi - -if [ ! -z "${SOLEDAD_PERF_REMOTE}" ]; then - git -C /var/local/soledad-perf remote set-url origin ${SOLEDAD_PERF_REMOTE} - git -C /var/local/soledad-perf fetch origin -fi - -if [ ! -z "${SOLEDAD_PERF_BRANCH}" ]; then - git -C /var/local/soledad-perf checkout ${SOLEDAD_PERF_BRANCH} -fi - -#----------------------------------------------------------------------------- -# write a configuration file for the perf test -#----------------------------------------------------------------------------- - -cd /var/local/soledad-perf - -cat > defaults.conf < /dev/null & -sleep 5 # wait a bit for some data points - -# run a sync and generate a graph -make trigger-sync -make trigger-stop -make graph-image diff --git a/scripts/docker/files/setup-env.sh b/scripts/docker/files/setup-env.sh deleted file mode 100755 index d5aeab7d..00000000 --- a/scripts/docker/files/setup-env.sh +++ /dev/null @@ -1,55 +0,0 @@ -#!/bin/bash - -# Clone soledad repository and install soledad dependencies needed to run -# client and server in a test environment. -# -# In details, this script does the following: -# -# - clone a series of python package repositories into /var/local/soledad. -# - install dependencies for those packages from the requirements files in -# each of the repositories, using python wheels when possible. -# - install the python packages in development mode -# -# The cloned git repositories might have a remote configured and a branch -# checked out on runtime, before a server, client or test instance is actually -# run. Check the other scripts in this directory. -# -# This script is meant to be copied to the docker container and run after -# system dependencies have been installed. - -BASEDIR="/var/local" -BASEURL="https://github.com/leapcode" - -mkdir -p ${BASEDIR} - -# clone repositories -repos="soledad leap_pycommon soledad-perf" - -for repo in ${repos}; do - repodir=${BASEDIR}/${repo} - if [ ! -d ${repodir} ]; then - git clone ${BASEURL}/${repo} ${repodir} - git -C ${repodir} fetch origin - fi -done - -# use latest pip because the version available in debian jessie doesn't -# support wheels -pip install -U pip - -pip install psutil - -# install dependencies and packages -install_script="pkg/pip_install_requirements.sh" -opts="--use-leap-wheels" -pkgs="leap_pycommon soledad/common soledad/client soledad/server" - -for pkg in ${pkgs}; do - pkgdir=${BASEDIR}/${pkg} - testing="" - if [ -f ${pkgdir}/pkg/requirements-testing.pip ]; then - testing="--testing" - fi - (cd ${pkgdir} && ${install_script} ${testing} ${opts}) - (cd ${pkgdir} && python setup.py develop) -done diff --git a/scripts/docker/files/start-client-test.sh b/scripts/docker/files/start-client-test.sh deleted file mode 100755 index 9dec3371..00000000 --- a/scripts/docker/files/start-client-test.sh +++ /dev/null @@ -1,20 +0,0 @@ -#!/bin/bash - -# Run a Soledad client connection test. -# -# This script is meant to be copied to the docker container and run upon -# container start. - -CMD="/usr/local/soledad/test-env.py" -REPO="/var/local/soledad" - -if [ ! -z "${SOLEDAD_REMOTE}" ]; then - git -C ${REPO} remote set-url origin ${SOLEDAD_REMOTE} - git -C ${REPO} fetch origin -fi - -if [ ! -z "${SOLEDAD_BRANCH}" ]; then - git -C ${REPO} checkout ${SOLEDAD_BRANCH} -fi - -${CMD} soledad-client test --server-url ${SOLEDAD_SERVER_URL} diff --git a/scripts/docker/files/start-server.sh b/scripts/docker/files/start-server.sh deleted file mode 100755 index 0980d352..00000000 --- a/scripts/docker/files/start-server.sh +++ /dev/null @@ -1,84 +0,0 @@ -#!/bin/sh - -# Start a soledad server inside a docker container. -# -# This script will: -# -# - eventually checkout a specific branch from a specific soledad remote. -# -# - create everything a soledad server needs to run (certificate, backend -# server database, tables, etc. -# -# - eventually preload the server database with a number of documents equal -# to SOLEDAD_PRELOAD_NUM, and with payload size equal to -# SOLEDAD_PRELOAD_SIZE. -# -# - run the soledad server. -# -# This script is meant to be copied to the docker container and run upon -# container start. - -CMD="/usr/local/soledad/test-env.py" - -#--------------------------------------------------------------------------- -# eventually checkout a specific branch from a specific remote -#--------------------------------------------------------------------------- - -REPO="/var/local/soledad" - -if [ ! -z "${SOLEDAD_REMOTE}" ]; then - git -C ${REPO} remote set-url origin ${SOLEDAD_REMOTE} - git -C ${REPO} fetch origin -fi - -if [ ! -z "${SOLEDAD_BRANCH}" ]; then - git -C ${REPO} checkout ${SOLEDAD_BRANCH} -fi - -#--------------------------------------------------------------------------- -# setup environment for running soledad server -#--------------------------------------------------------------------------- - -${CMD} couch start -${CMD} user-db create -${CMD} token-db create -${CMD} token-db insert-token -${CMD} shared-db create -${CMD} cert create - -#--------------------------------------------------------------------------- -# write a configuration file for the perf test -#--------------------------------------------------------------------------- - -if [ "${SOLEDAD_PRELOAD_NUM}" -gt 0 ]; then - cd /var/local/soledad-perf - - cat > defaults.conf <