From f829832457237b7342e510e4112f66819be3ab3d Mon Sep 17 00:00:00 2001 From: drebs Date: Mon, 30 May 2016 13:45:51 -0300 Subject: [test] add files to create docker image --- scripts/docker/Dockerfile | 32 ++ scripts/docker/Makefile | 30 + scripts/docker/README.md | 30 + scripts/docker/TODO | 3 + scripts/docker/files/conf/cert_default.conf | 15 + scripts/docker/files/conf/couchdb_default.ini | 361 ++++++++++++ .../docker/files/conf/soledad-server_default.conf | 5 + scripts/docker/files/leap.list | 4 + scripts/docker/files/setup-env.sh | 44 ++ scripts/docker/files/start-server.sh | 25 + scripts/docker/files/test-env.py | 639 +++++++++++++++++++++ scripts/docker/helper/get-container-ip.sh | 18 + 12 files changed, 1206 insertions(+) create mode 100644 scripts/docker/Dockerfile create mode 100644 scripts/docker/Makefile create mode 100644 scripts/docker/README.md create mode 100644 scripts/docker/TODO create mode 100644 scripts/docker/files/conf/cert_default.conf create mode 100644 scripts/docker/files/conf/couchdb_default.ini create mode 100644 scripts/docker/files/conf/soledad-server_default.conf create mode 100644 scripts/docker/files/leap.list create mode 100755 scripts/docker/files/setup-env.sh create mode 100755 scripts/docker/files/start-server.sh create mode 100755 scripts/docker/files/test-env.py create mode 100755 scripts/docker/helper/get-container-ip.sh (limited to 'scripts') diff --git a/scripts/docker/Dockerfile b/scripts/docker/Dockerfile new file mode 100644 index 00000000..8d462db9 --- /dev/null +++ b/scripts/docker/Dockerfile @@ -0,0 +1,32 @@ +# start with a fresh debian image +FROM debian + +# expose soledad server port in case we want to run a server container +EXPOSE 2424 + +# install dependencies from debian repos +COPY files/leap.list /etc/apt/sources.list.d/ + +RUN apt-get update +RUN apt-get -y --force-yes install leap-archive-keyring + +RUN apt-get update +RUN apt-get -y install git +RUN apt-get -y install libpython2.7-dev +RUN apt-get -y install libffi-dev +RUN apt-get -y install libssl-dev +RUN apt-get -y install libzmq3-dev +RUN apt-get -y install python-pip +RUN apt-get -y install couchdb + +# copy over files to help setup the environment and run soledad +RUN mkdir -p /usr/local/soledad +RUN mkdir -p /usr/local/soledad/conf + +COPY files/setup-env.sh /usr/local/soledad/ +COPY files/test-env.py /usr/local/soledad/ +COPY files/start-server.sh /usr/local/soledad/ +COPY files/conf/* /usr/local/soledad/conf/ + +# clone repos and install dependencies from leap wheels using pip +RUN /usr/local/soledad/setup-env.sh diff --git a/scripts/docker/Makefile b/scripts/docker/Makefile new file mode 100644 index 00000000..7a80fe02 --- /dev/null +++ b/scripts/docker/Makefile @@ -0,0 +1,30 @@ +#/usr/bin/env + +IMAGE_NAME = "leap/soledad:1.0" + +all: image + +image: + docker build -t $(IMAGE_NAME) . + +run-server: image + rm -f $(CONTAINER_ID_FILE) + docker run \ + --env="SOLEDAD_REMOTE=https://0xacab.org/leap/soledad.git" \ + --env="SOLEDAD_BRANCH=develop" \ + --cidfile=$(CONTAINER_ID_FILE) \ + --detach \ + $(IMAGE_NAME) \ + /usr/local/soledad/start-server.sh + +# TODO: the following rule does not work for now, we have to add a +# `start-test.sh` file +run-test: image + container_id=`cat $(CONTAINER_ID_FILE)`; \ + server_ip=`./helper/get-container-ip.sh $${container_id}`; \ + docker run \ + --env="SOLEDAD_REMOTE=https://0xacab.org/leap/soledad.git" \ + --env="SOLEDAD_BRANCH=develop" \ + --env="SOLEDAD_SERVER_IP=$${server_ip}" \ + $(IMAGE_NAME) \ + /usr/local/soledad/start-test.sh diff --git a/scripts/docker/README.md b/scripts/docker/README.md new file mode 100644 index 00000000..d15129fa --- /dev/null +++ b/scripts/docker/README.md @@ -0,0 +1,30 @@ +Soledad Docker Images +===================== + +The files in this directory help create a docker image that is usable for +running soledad server and client in an isolated docker context. This is +especially useful for testing purposes as you can limit/reserve a certain +amount of resources for the soledad process, and thus provide a baseline for +comparison of time and resource consumption between distinct runs. + +Check the `Dockerfile` for the rules for building the docker image. + +Check the `Makefile` for example usage of the files in this directory. + + +Environment variables for server script +--------------------------------------- + +If you want to run the image for testing you may pass the following +environment variables for the `files/start-server.sh` script for checking out +a specific branch on the soledad repository: + + SOLEDAD_REMOTE - a git url for a remote repository that is added at run time + to the local soledad git repository. + + SOLEDAD_BRANCH - the name of a branch to be checked out from the configured + remote repository. + +Example: + + docker run leap/soledad:1.0 /usr/local/soledad/start-server.sh diff --git a/scripts/docker/TODO b/scripts/docker/TODO new file mode 100644 index 00000000..75d45a8e --- /dev/null +++ b/scripts/docker/TODO @@ -0,0 +1,3 @@ +- limit resources of containers (mem and cpu) +- add a file to run tests inside container +- use server ip to run test diff --git a/scripts/docker/files/conf/cert_default.conf b/scripts/docker/files/conf/cert_default.conf new file mode 100644 index 00000000..8043cea3 --- /dev/null +++ b/scripts/docker/files/conf/cert_default.conf @@ -0,0 +1,15 @@ +[ req ] +default_bits = 1024 +default_keyfile = keyfile.pem +distinguished_name = req_distinguished_name +prompt = no +output_password = mypass + +[ req_distinguished_name ] +C = GB +ST = Test State or Province +L = Test Locality +O = Organization Name +OU = Organizational Unit Name +CN = localhost +emailAddress = test@email.address diff --git a/scripts/docker/files/conf/couchdb_default.ini b/scripts/docker/files/conf/couchdb_default.ini new file mode 100644 index 00000000..5ab72d7b --- /dev/null +++ b/scripts/docker/files/conf/couchdb_default.ini @@ -0,0 +1,361 @@ +; etc/couchdb/default.ini.tpl. Generated from default.ini.tpl.in by configure. + +; Upgrading CouchDB will overwrite this file. +[vendor] +name = The Apache Software Foundation +version = 1.6.0 + +[couchdb] +database_dir = BASEDIR +view_index_dir = BASEDIR +util_driver_dir = /usr/lib/x86_64-linux-gnu/couchdb/erlang/lib/couch-1.6.0/priv/lib +max_document_size = 4294967296 ; 4 GB +os_process_timeout = 5000 ; 5 seconds. for view and external servers. +max_dbs_open = 100 +delayed_commits = true ; set this to false to ensure an fsync before 201 Created is returned +uri_file = BASEDIR/couch.uri +; Method used to compress everything that is appended to database and view index files, except +; for attachments (see the attachments section). Available methods are: +; +; none - no compression +; snappy - use google snappy, a very fast compressor/decompressor +uuid = bc2f8b84ecb0b13a31cf7f6881a52194 + +; deflate_[N] - use zlib's deflate, N is the compression level which ranges from 1 (fastest, +; lowest compression ratio) to 9 (slowest, highest compression ratio) +file_compression = snappy +; Higher values may give better read performance due to less read operations +; and/or more OS page cache hits, but they can also increase overall response +; time for writes when there are many attachment write requests in parallel. +attachment_stream_buffer_size = 4096 + +plugin_dir = /usr/lib/x86_64-linux-gnu/couchdb/plugins + +[database_compaction] +; larger buffer sizes can originate smaller files +doc_buffer_size = 524288 ; value in bytes +checkpoint_after = 5242880 ; checkpoint after every N bytes were written + +[view_compaction] +; larger buffer sizes can originate smaller files +keyvalue_buffer_size = 2097152 ; value in bytes + +[httpd] +port = 5984 +bind_address = 127.0.0.1 +authentication_handlers = {couch_httpd_oauth, oauth_authentication_handler}, {couch_httpd_auth, cookie_authentication_handler}, {couch_httpd_auth, default_authentication_handler} +default_handler = {couch_httpd_db, handle_request} +secure_rewrites = true +vhost_global_handlers = _utils, _uuids, _session, _oauth, _users +allow_jsonp = false +; Options for the MochiWeb HTTP server. +;server_options = [{backlog, 128}, {acceptor_pool_size, 16}] +; For more socket options, consult Erlang's module 'inet' man page. +;socket_options = [{recbuf, 262144}, {sndbuf, 262144}, {nodelay, true}] +socket_options = [{recbuf, 262144}, {sndbuf, 262144}] +log_max_chunk_size = 1000000 +enable_cors = false +; CouchDB can optionally enforce a maximum uri length; +; max_uri_length = 8000 + +[ssl] +port = 6984 + +[log] +file = BASEDIR/couch.log +level = info +include_sasl = true + +[couch_httpd_auth] +authentication_db = _users +authentication_redirect = /_utils/session.html +require_valid_user = false +timeout = 600 ; number of seconds before automatic logout +auth_cache_size = 50 ; size is number of cache entries +allow_persistent_cookies = false ; set to true to allow persistent cookies +iterations = 10 ; iterations for password hashing +; min_iterations = 1 +; max_iterations = 1000000000 +; comma-separated list of public fields, 404 if empty +; public_fields = + +[cors] +credentials = false +; List of origins separated by a comma, * means accept all +; Origins must include the scheme: http://example.com +; You can’t set origins: * and credentials = true at the same time. +;origins = * +; List of accepted headers separated by a comma +; headers = +; List of accepted methods +; methods = + + +; Configuration for a vhost +;[cors:http://example.com] +; credentials = false +; List of origins separated by a comma +; Origins must include the scheme: http://example.com +; You can’t set origins: * and credentials = true at the same time. +;origins = +; List of accepted headers separated by a comma +; headers = +; List of accepted methods +; methods = + +[couch_httpd_oauth] +; If set to 'true', oauth token and consumer secrets will be looked up +; in the authentication database (_users). These secrets are stored in +; a top level property named "oauth" in user documents. Example: +; { +; "_id": "org.couchdb.user:joe", +; "type": "user", +; "name": "joe", +; "password_sha": "fe95df1ca59a9b567bdca5cbaf8412abd6e06121", +; "salt": "4e170ffeb6f34daecfd814dfb4001a73" +; "roles": ["foo", "bar"], +; "oauth": { +; "consumer_keys": { +; "consumerKey1": "key1Secret", +; "consumerKey2": "key2Secret" +; }, +; "tokens": { +; "token1": "token1Secret", +; "token2": "token2Secret" +; } +; } +; } +use_users_db = false + +[query_servers] +javascript = /usr/bin/couchjs /usr/share/couchdb/server/main.js +coffeescript = /usr/bin/couchjs /usr/share/couchdb/server/main-coffee.js + + +; Changing reduce_limit to false will disable reduce_limit. +; If you think you're hitting reduce_limit with a "good" reduce function, +; please let us know on the mailing list so we can fine tune the heuristic. +[query_server_config] +reduce_limit = true +os_process_limit = 25 + +[daemons] +index_server={couch_index_server, start_link, []} +external_manager={couch_external_manager, start_link, []} +query_servers={couch_query_servers, start_link, []} +vhosts={couch_httpd_vhost, start_link, []} +httpd={couch_httpd, start_link, []} +stats_aggregator={couch_stats_aggregator, start, []} +stats_collector={couch_stats_collector, start, []} +uuids={couch_uuids, start, []} +auth_cache={couch_auth_cache, start_link, []} +replicator_manager={couch_replicator_manager, start_link, []} +os_daemons={couch_os_daemons, start_link, []} +compaction_daemon={couch_compaction_daemon, start_link, []} + +[httpd_global_handlers] +/ = {couch_httpd_misc_handlers, handle_welcome_req, <<"Welcome">>} +favicon.ico = {couch_httpd_misc_handlers, handle_favicon_req, "/usr/share/couchdb/www"} + +_utils = {couch_httpd_misc_handlers, handle_utils_dir_req, "/usr/share/couchdb/www"} +_all_dbs = {couch_httpd_misc_handlers, handle_all_dbs_req} +_active_tasks = {couch_httpd_misc_handlers, handle_task_status_req} +_config = {couch_httpd_misc_handlers, handle_config_req} +_replicate = {couch_replicator_httpd, handle_req} +_uuids = {couch_httpd_misc_handlers, handle_uuids_req} +_restart = {couch_httpd_misc_handlers, handle_restart_req} +_stats = {couch_httpd_stats_handlers, handle_stats_req} +_log = {couch_httpd_misc_handlers, handle_log_req} +_session = {couch_httpd_auth, handle_session_req} +_oauth = {couch_httpd_oauth, handle_oauth_req} +_db_updates = {couch_dbupdates_httpd, handle_req} +_plugins = {couch_plugins_httpd, handle_req} + +[httpd_db_handlers] +_all_docs = {couch_mrview_http, handle_all_docs_req} +_changes = {couch_httpd_db, handle_changes_req} +_compact = {couch_httpd_db, handle_compact_req} +_design = {couch_httpd_db, handle_design_req} +_temp_view = {couch_mrview_http, handle_temp_view_req} +_view_cleanup = {couch_mrview_http, handle_cleanup_req} + +; The external module takes an optional argument allowing you to narrow it to a +; single script. Otherwise the script name is inferred from the first path section +; after _external's own path. +; _mypath = {couch_httpd_external, handle_external_req, <<"mykey">>} +; _external = {couch_httpd_external, handle_external_req} + +[httpd_design_handlers] +_compact = {couch_mrview_http, handle_compact_req} +_info = {couch_mrview_http, handle_info_req} +_list = {couch_mrview_show, handle_view_list_req} +_rewrite = {couch_httpd_rewrite, handle_rewrite_req} +_show = {couch_mrview_show, handle_doc_show_req} +_update = {couch_mrview_show, handle_doc_update_req} +_view = {couch_mrview_http, handle_view_req} + +; enable external as an httpd handler, then link it with commands here. +; note, this api is still under consideration. +; [external] +; mykey = /path/to/mycommand + +; Here you can setup commands for CouchDB to manage +; while it is alive. It will attempt to keep each command +; alive if it exits. +; [os_daemons] +; some_daemon_name = /path/to/script -with args + + +[uuids] +; Known algorithms: +; random - 128 bits of random awesome +; All awesome, all the time. +; sequential - monotonically increasing ids with random increments +; First 26 hex characters are random. Last 6 increment in +; random amounts until an overflow occurs. On overflow, the +; random prefix is regenerated and the process starts over. +; utc_random - Time since Jan 1, 1970 UTC with microseconds +; First 14 characters are the time in hex. Last 18 are random. +; utc_id - Time since Jan 1, 1970 UTC with microseconds, plus utc_id_suffix string +; First 14 characters are the time in hex. uuids/utc_id_suffix string value is appended to these. +algorithm = sequential +; The utc_id_suffix value will be appended to uuids generated by the utc_id algorithm. +; Replicating instances should have unique utc_id_suffix values to ensure uniqueness of utc_id ids. +utc_id_suffix = +# Maximum number of UUIDs retrievable from /_uuids in a single request +max_count = 1000 + +[stats] +; rate is in milliseconds +rate = 1000 +; sample intervals are in seconds +samples = [0, 60, 300, 900] + +[attachments] +compression_level = 8 ; from 1 (lowest, fastest) to 9 (highest, slowest), 0 to disable compression +compressible_types = text/*, application/javascript, application/json, application/xml + +[replicator] +db = _replicator +; Maximum replicaton retry count can be a non-negative integer or "infinity". +max_replication_retry_count = 10 +; More worker processes can give higher network throughput but can also +; imply more disk and network IO. +worker_processes = 4 +; With lower batch sizes checkpoints are done more frequently. Lower batch sizes +; also reduce the total amount of used RAM memory. +worker_batch_size = 500 +; Maximum number of HTTP connections per replication. +http_connections = 20 +; HTTP connection timeout per replication. +; Even for very fast/reliable networks it might need to be increased if a remote +; database is too busy. +connection_timeout = 30000 +; If a request fails, the replicator will retry it up to N times. +retries_per_request = 10 +; Some socket options that might boost performance in some scenarios: +; {nodelay, boolean()} +; {sndbuf, integer()} +; {recbuf, integer()} +; {priority, integer()} +; See the `inet` Erlang module's man page for the full list of options. +socket_options = [{keepalive, true}, {nodelay, false}] +; Path to a file containing the user's certificate. +;cert_file = /full/path/to/server_cert.pem +; Path to file containing user's private PEM encoded key. +;key_file = /full/path/to/server_key.pem +; String containing the user's password. Only used if the private keyfile is password protected. +;password = somepassword +; Set to true to validate peer certificates. +verify_ssl_certificates = false +; File containing a list of peer trusted certificates (in the PEM format). +;ssl_trusted_certificates_file = /etc/ssl/certs/ca-certificates.crt +; Maximum peer certificate depth (must be set even if certificate validation is off). +ssl_certificate_max_depth = 3 + +[compaction_daemon] +; The delay, in seconds, between each check for which database and view indexes +; need to be compacted. +check_interval = 300 +; If a database or view index file is smaller then this value (in bytes), +; compaction will not happen. Very small files always have a very high +; fragmentation therefore it's not worth to compact them. +min_file_size = 131072 + +[compactions] +; List of compaction rules for the compaction daemon. +; The daemon compacts databases and their respective view groups when all the +; condition parameters are satisfied. Configuration can be per database or +; global, and it has the following format: +; +; database_name = [ {ParamName, ParamValue}, {ParamName, ParamValue}, ... ] +; _default = [ {ParamName, ParamValue}, {ParamName, ParamValue}, ... ] +; +; Possible parameters: +; +; * db_fragmentation - If the ratio (as an integer percentage), of the amount +; of old data (and its supporting metadata) over the database +; file size is equal to or greater then this value, this +; database compaction condition is satisfied. +; This value is computed as: +; +; (file_size - data_size) / file_size * 100 +; +; The data_size and file_size values can be obtained when +; querying a database's information URI (GET /dbname/). +; +; * view_fragmentation - If the ratio (as an integer percentage), of the amount +; of old data (and its supporting metadata) over the view +; index (view group) file size is equal to or greater then +; this value, then this view index compaction condition is +; satisfied. This value is computed as: +; +; (file_size - data_size) / file_size * 100 +; +; The data_size and file_size values can be obtained when +; querying a view group's information URI +; (GET /dbname/_design/groupname/_info). +; +; * from _and_ to - The period for which a database (and its view groups) compaction +; is allowed. The value for these parameters must obey the format: +; +; HH:MM - HH:MM (HH in [0..23], MM in [0..59]) +; +; * strict_window - If a compaction is still running after the end of the allowed +; period, it will be canceled if this parameter is set to 'true'. +; It defaults to 'false' and it's meaningful only if the *period* +; parameter is also specified. +; +; * parallel_view_compaction - If set to 'true', the database and its views are +; compacted in parallel. This is only useful on +; certain setups, like for example when the database +; and view index directories point to different +; disks. It defaults to 'false'. +; +; Before a compaction is triggered, an estimation of how much free disk space is +; needed is computed. This estimation corresponds to 2 times the data size of +; the database or view index. When there's not enough free disk space to compact +; a particular database or view index, a warning message is logged. +; +; Examples: +; +; 1) [{db_fragmentation, "70%"}, {view_fragmentation, "60%"}] +; The `foo` database is compacted if its fragmentation is 70% or more. +; Any view index of this database is compacted only if its fragmentation +; is 60% or more. +; +; 2) [{db_fragmentation, "70%"}, {view_fragmentation, "60%"}, {from, "00:00"}, {to, "04:00"}] +; Similar to the preceding example but a compaction (database or view index) +; is only triggered if the current time is between midnight and 4 AM. +; +; 3) [{db_fragmentation, "70%"}, {view_fragmentation, "60%"}, {from, "00:00"}, {to, "04:00"}, {strict_window, true}] +; Similar to the preceding example - a compaction (database or view index) +; is only triggered if the current time is between midnight and 4 AM. If at +; 4 AM the database or one of its views is still compacting, the compaction +; process will be canceled. +; +; 4) [{db_fragmentation, "70%"}, {view_fragmentation, "60%"}, {from, "00:00"}, {to, "04:00"}, {strict_window, true}, {parallel_view_compaction, true}] +; Similar to the preceding example, but a database and its views can be +; compacted in parallel. +; +;_default = [{db_fragmentation, "70%"}, {view_fragmentation, "60%"}, {from, "23:00"}, {to, "04:00"}] diff --git a/scripts/docker/files/conf/soledad-server_default.conf b/scripts/docker/files/conf/soledad-server_default.conf new file mode 100644 index 00000000..5e286374 --- /dev/null +++ b/scripts/docker/files/conf/soledad-server_default.conf @@ -0,0 +1,5 @@ +[soledad-server] +couch_url = http://localhost:5984 +create_cmd = sudo -u soledad-admin /usr/bin/create-user-db +admin_netrc = /etc/couchdb/couchdb-soledad-admin.netrc +batching = 0 diff --git a/scripts/docker/files/leap.list b/scripts/docker/files/leap.list new file mode 100644 index 00000000..7eb474d8 --- /dev/null +++ b/scripts/docker/files/leap.list @@ -0,0 +1,4 @@ +# This file is meant to be copied into the `/etc/apt/sources.list.d` directory +# inside a docker image to provide a source for leap-specific packages. + +deb http://deb.leap.se/0.8 jessie main diff --git a/scripts/docker/files/setup-env.sh b/scripts/docker/files/setup-env.sh new file mode 100755 index 00000000..c98a6d08 --- /dev/null +++ b/scripts/docker/files/setup-env.sh @@ -0,0 +1,44 @@ +#!/bin/bash + +# Clone soledad repository and install soledad dependencies needed to run +# client and server in a test environment. +# +# This script is meant to be copied to the docker container and run after +# system dependencies have been installed. + +BASEDIR="/var/local" +BASEURL="https://github.com/leapcode" + +mkdir -p ${BASEDIR} + +# clone repositories +repos="soledad leap_pycommon" + +for repo in ${repos}; do + repodir=${BASEDIR}/${repo} + if [ ! -d ${repodir} ]; then + git clone ${BASEURL}/${repo} ${repodir} + git -C ${repodir} fetch origin + fi +done + +# use latest pip because the version available in debian jessie doesn't +# support wheels +pip install -U pip + +pip install psutil + +# install dependencies and packages +install_script="pkg/pip_install_requirements.sh" +opts="--use-leap-wheels" +pkgs="leap_pycommon soledad/common soledad/client soledad/server" + +for pkg in ${pkgs}; do + pkgdir=${BASEDIR}/${pkg} + testing="" + if [ -f ${pkgdir}/pkg/requirements-testing.pip ]; then + testing="--testing" + fi + (cd ${pkgdir} && ${install_script} ${testing} ${opts}) + (cd ${pkgdir} && python setup.py develop) +done diff --git a/scripts/docker/files/start-server.sh b/scripts/docker/files/start-server.sh new file mode 100755 index 00000000..ea14aa5a --- /dev/null +++ b/scripts/docker/files/start-server.sh @@ -0,0 +1,25 @@ +#!/bin/sh + +# Start a soledad server inside a docker container. +# +# This script is meant to be copied to the docker container and run upon +# container start. + +CMD="/usr/local/soledad/test-env.py" +REPO="/var/local/soledad" + +if [ ! -z "${SOLEDAD_REMOTE}" ]; then + git -C ${REPO} remote add test ${SOLEDAD_REMOTE} + git -C ${REPO} fetch test +fi + +if [ ! -z "${SOLEDAD_BRANCH}" ]; then + git -C ${REPO} checkout ${SOLEDAD_BRANCH} +fi + +${CMD} couch start +${CMD} user-db create +${CMD} token-db create +${CMD} token-db insert-token +${CMD} cert create +${CMD} soledad-server start --no-daemonize diff --git a/scripts/docker/files/test-env.py b/scripts/docker/files/test-env.py new file mode 100755 index 00000000..6ff0a4ba --- /dev/null +++ b/scripts/docker/files/test-env.py @@ -0,0 +1,639 @@ +#!/usr/bin/env python + + +""" +This script knows how to build a minimum environment for Soledad Server, which +includes the following: + + - Couch server startup + - Token and shared database initialization + - Soledad Server startup + +Options can be passed for configuring the different environments, so this may +be used by other programs to setup different environments for arbitrary tests. +Use the --help option to get information on usage. + +For some commands you will need an environment with Soledad python packages +available, thus you might want to explicitly call python and not rely in the +shebang line. +""" + + +import time +import os +import signal +import tempfile +import psutil +from argparse import ArgumentParser +from subprocess import call +from couchdb import Server +from couchdb.http import PreconditionFailed +from couchdb.http import ResourceConflict +from couchdb.http import ResourceNotFound +from hashlib import sha512 +from u1db.errors import DatabaseDoesNotExist + + +# +# Utilities +# + +def get_pid(pidfile): + if not os.path.isfile(pidfile): + return 0 + try: + with open(pidfile) as f: + return int(f.read()) + except IOError: + return 0 + + +def pid_is_running(pid): + try: + psutil.Process(pid) + return True + except psutil.NoSuchProcess: + return False + + +def pidfile_is_running(pidfile): + try: + pid = get_pid(pidfile) + psutil.Process(pid) + return pid + except psutil.NoSuchProcess: + return False + + +def status_from_pidfile(args, default_basedir): + basedir = _get_basedir(args, default_basedir) + pidfile = os.path.join(basedir, args.pidfile) + try: + pid = get_pid(pidfile) + psutil.Process(pid) + print "[+] running - pid: %d" % pid + except (IOError, psutil.NoSuchProcess): + print "[-] stopped" + + +def kill_all_executables(args): + basename = os.path.basename(args.executable) + pids = [int(pid) for pid in os.listdir('/proc') if pid.isdigit()] + for pid in pids: + try: + p = psutil.Process(pid) + if p.name() == basename: + print '[!] killing - pid: %d' % pid + os.kill(pid, signal.SIGKILL) + except: + pass + + +# +# Couch Server control +# + +COUCH_EXECUTABLE = '/usr/bin/couchdb' +ERLANG_EXECUTABLE = 'beam.smp' +COUCH_TEMPLATE = os.path.join( + os.path.dirname(os.path.realpath(__file__)), + './conf/couchdb_default.ini') +COUCH_TEMPLATE +COUCH_PIDFILE = 'couchdb.pid' +COUCH_LOGFILE = 'couchdb.log' +COUCH_PORT = 5984 +COUCH_HOST = '127.0.0.1' +COUCH_BASEDIR = '/tmp/couch_test' + + +def _get_basedir(args, default): + basedir = args.basedir + if not basedir: + basedir = default + if not os.path.isdir(basedir): + os.mkdir(basedir) + return basedir + + +def couch_server_start(args): + basedir = _get_basedir(args, COUCH_BASEDIR) + pidfile = os.path.join(basedir, args.pidfile) + logfile = os.path.join(basedir, args.logfile) + + # check if already running + pid = get_pid(pidfile) + if pid_is_running(pid): + print '[*] error: already running - pid: %d' % pid + exit(1) + if os.path.isfile(pidfile): + os.unlink(pidfile) + + # generate a config file from template if needed + config_file = args.config_file + if not config_file: + config_file = tempfile.mktemp(prefix='couch_config_', dir=basedir) + lines = [] + with open(args.template) as f: + lines = f.readlines() + lines = map(lambda l: l.replace('BASEDIR', basedir), lines) + with open(config_file, 'w') as f: + f.writelines(lines) + + # start couch server + try: + call([ + args.executable, + '-n', # reset configuration file chain (including system default) + '-a %s' % config_file, # add configuration FILE to chain + '-b', # spawn as a background process + '-p %s' % pidfile, # set the background PID FILE + '-o %s' % logfile, # redirect background stdout to FILE + '-e %s' % logfile]) # redirect background stderr to FILE + except Exception as e: + print '[*] error: could not start couch server - %s' % str(e) + exit(1) + + # couch may take a bit to store the pid in the pidfile, so we just wait + # until it does + pid = None + while not pid: + try: + pid = get_pid(pidfile) + break + except: + time.sleep(0.1) + + print '[+] running - pid: %d' % pid + + +def couch_server_stop(args): + basedir = _get_basedir(args, COUCH_BASEDIR) + pidfile = os.path.join(basedir, args.pidfile) + pid = get_pid(pidfile) + if not pid_is_running(pid): + print '[*] error: no running server found' + exit(1) + call([ + args.executable, + '-p %s' % pidfile, # set the background PID FILE + '-k']) # kill the background process, will respawn if needed + print '[-] stopped - pid: %d ' % pid + + +def couch_status_from_pidfile(args): + status_from_pidfile(args, COUCH_BASEDIR) + + +# +# User DB maintenance # +# + +def user_db_create(args): + from leap.soledad.common.couch import CouchDatabase + url = 'http://localhost:%d/user-%s' % (args.port, args.uuid) + try: + CouchDatabase.open_database( + url=url, create=False, replica_uid=None, ensure_ddocs=True) + print '[*] error: database "user-%s" already exists' % args.uuid + exit(1) + except DatabaseDoesNotExist: + CouchDatabase.open_database( + url=url, create=True, replica_uid=None, ensure_ddocs=True) + print '[+] database created: user-%s' % args.uuid + + +def user_db_delete(args): + s = _couch_get_server(args) + try: + dbname = 'user-%s' % args.uuid + s.delete(dbname) + print '[-] database deleted: %s' % dbname + except ResourceNotFound: + print '[*] error: database "%s" does not exist' % dbname + exit(1) + + +# +# Soledad Server control +# + +TWISTD_EXECUTABLE = 'twistd' # use whatever is available on path + +SOLEDAD_SERVER_BASEDIR = '/tmp/soledad_server_test' +SOLEDAD_SERVER_CONFIG_FILE = './conf/soledad_default.ini' +SOLEDAD_SERVER_PIDFILE = 'soledad.pid' +SOLEDAD_SERVER_LOGFILE = 'soledad.log' +SOLEDAD_SERVER_PRIVKEY = 'soledad_privkey.pem' +SOLEDAD_SERVER_CERTKEY = 'soledad_certkey.pem' +SOLEDAD_SERVER_PORT = 2424 +SOLEDAD_SERVER_AUTH_TOKEN = 'an-auth-token' +SOLEDAD_SERVER_URL = 'https://localhost:2424' + +SOLEDAD_CLIENT_PASS = '12345678' +SOLEDAD_CLIENT_BASEDIR = '/tmp/soledad_client_test' +SOLEDAD_CLIENT_UUID = '1234567890abcdef' + + +def soledad_server_start(args): + basedir = _get_basedir(args, SOLEDAD_SERVER_BASEDIR) + pidfile = os.path.join(basedir, args.pidfile) + logfile = os.path.join(basedir, args.logfile) + private_key = os.path.join(basedir, args.private_key) + cert_key = os.path.join(basedir, args.cert_key) + + pid = get_pid(pidfile) + if pid_is_running(pid): + pid = get_pid(pidfile) + print "[*] error: already running - pid: %d" % pid + exit(1) + + port = args.port + if args.tls: + port = 'ssl:%d:privateKey=%s:certKey=%s:sslmethod=SSLv23_METHOD' \ + % (args.port, private_key, cert_key) + params = [ + '--logfile=%s' % logfile, + '--pidfile=%s' % pidfile, + 'web', + '--wsgi=leap.soledad.server.application', + '--port=%s' % port + ] + if args.no_daemonize: + params.insert(0, '--nodaemon') + + call([args.executable] + params) + + pid = get_pid(pidfile) + print '[+] running - pid: %d' % pid + + +def soledad_server_stop(args): + basedir = _get_basedir(args, SOLEDAD_SERVER_BASEDIR) + pidfile = os.path.join(basedir, args.pidfile) + pid = get_pid(pidfile) + if not pid_is_running(pid): + print '[*] error: no running server found' + exit(1) + os.kill(pid, signal.SIGKILL) + print '[-] stopped - pid: %d' % pid + + +def soledad_server_status_from_pidfile(args): + status_from_pidfile(args, SOLEDAD_SERVER_BASEDIR) + + +# couch helpers + +def _couch_get_server(args): + url = 'http://%s:%d/' % (args.host, args.port) + return Server(url=url) + + +def _couch_create_db(args, dbname): + s = _couch_get_server(args) + # maybe create the database + try: + s.create(dbname) + print '[+] database created: %s' % dbname + except PreconditionFailed as e: + error_code, _ = e.message + if error_code == 'file_exists': + print '[*] error: "%s" database already exists' % dbname + exit(1) + return s + + +def _couch_delete_db(args, dbname): + s = _couch_get_server(args) + # maybe create the database + try: + s.delete(dbname) + print '[-] database deleted: %s' % dbname + except ResourceNotFound: + print '[*] error: "%s" database does not exist' % dbname + exit(1) + + +def _token_dbname(): + dbname = 'tokens_' + \ + str(int(time.time() / (30 * 24 * 3600))) + return dbname + + +def token_db_create(args): + dbname = _token_dbname() + _couch_create_db(args, dbname) + + +def token_db_insert_token(args): + s = _couch_get_server(args) + try: + dbname = _token_dbname() + db = s[dbname] + token = sha512(args.auth_token).hexdigest() + db[token] = { + 'type': 'Token', + 'user_id': args.uuid, + } + print '[+] token for uuid "%s" created in tokens database' % args.uuid + except ResourceConflict: + print '[*] error: token for uuid "%s" already exists in tokens database' \ + % args.uuid + exit(1) + + +def token_db_delete(args): + dbname = _token_dbname() + _couch_delete_db(args, dbname) + + +# +# Shared DB creation +# + +def shared_db_create(args): + _couch_create_db(args, 'shared') + + +def shared_db_delete(args): + _couch_delete_db(args, 'shared') + + +# +# Certificate creation +# + +CERT_CONFIG_FILE = os.path.join( + os.path.dirname(os.path.realpath(__file__)), + './conf/cert_default.conf') + + +def cert_create(args): + private_key = os.path.join(args.basedir, args.private_key) + cert_key = os.path.join(args.basedir, args.cert_key) + call([ + 'openssl', + 'req', + '-x509', + '-sha256', + '-nodes', + '-days', '365', + '-newkey', 'rsa:2048', + '-config', args.config_file, + '-keyout', private_key, + '-out', cert_key]) + + +def cert_delete(args): + private_key = os.path.join(args.basedir, args.private_key) + cert_key = os.path.join(args.basedir, args.cert_key) + os.unlink(private_key) + os.unlink(cert_key) + + +# +# Soledad Client Control +# + +def soledad_client_test(args): + + # maybe infer missing parameters + basedir = args.basedir + if not basedir: + basedir = tempfile.mkdtemp() + server_url = args.server_url + if not server_url: + server_url = 'http://127.0.0.1:%d' % args.port + + # get a soledad instance + from client_side_db import _get_soledad_instance + _get_soledad_instance( + args.uuid, + unicode(args.passphrase), + basedir, + server_url, + args.cert_key, + args.auth_token) + + +# +# Command Line Interface +# + +class Command(object): + + def __init__(self, parser=ArgumentParser()): + self.commands = [] + self.parser = parser + self.subparsers = None + + def add_command(self, *args, **kwargs): + # pop out the func parameter to use later + func = None + if 'func' in kwargs.keys(): + func = kwargs.pop('func') + # eventually create a subparser + if not self.subparsers: + self.subparsers = self.parser.add_subparsers() + # create command and associate a function with it + command = Command(self.subparsers.add_parser(*args, **kwargs)) + if func: + command.parser.set_defaults(func=func) + self.commands.append(command) + return command + + def set_func(self, func): + self.parser.set_defaults(func=func) + + def add_argument(self, *args, **kwargs): + self.parser.add_argument(*args, **kwargs) + + def add_arguments(self, arglist): + for args, kwargs in arglist: + self.add_argument(*args, **kwargs) + + def parse_args(self): + return self.parser.parse_args() + + +# +# Command Line Interface +# + +def run_cli(): + cli = Command() + + # couch command with subcommands + cmd_couch = cli.add_command('couch', help="manage couch server") + + cmd_couch_start = cmd_couch.add_command('start', func=couch_server_start) + cmd_couch_start.add_arguments([ + (['--executable', '-e'], {'default': COUCH_EXECUTABLE}), + (['--basedir', '-b'], {}), + (['--config-file', '-c'], {}), + (['--template', '-t'], {'default': COUCH_TEMPLATE}), + (['--pidfile', '-p'], {'default': COUCH_PIDFILE}), + (['--logfile', '-l'], {'default': COUCH_LOGFILE}) + ]) + + cmd_couch_stop = cmd_couch.add_command('stop', func=couch_server_stop) + cmd_couch_stop.add_arguments([ + (['--executable', '-e'], {'default': COUCH_EXECUTABLE}), + (['--basedir', '-b'], {}), + (['--pidfile', '-p'], {'default': COUCH_PIDFILE}), + ]) + + cmd_couch_status = cmd_couch.add_command( + 'status', func=couch_status_from_pidfile) + cmd_couch_status.add_arguments([ + (['--basedir', '-b'], {}), + (['--pidfile', '-p'], {'default': COUCH_PIDFILE})]) + + cmd_couch_kill = cmd_couch.add_command('kill', func=kill_all_executables) + cmd_couch_kill.add_argument( + '--executable', '-e', default=ERLANG_EXECUTABLE) + + # user database maintenance + cmd_user_db = cli.add_command('user-db') + + cmd_user_db_create = cmd_user_db.add_command('create', func=user_db_create) + cmd_user_db_create.add_arguments([ + (['--host', '-H'], {'default': COUCH_HOST}), + (['--port', '-P'], {'type': int, 'default': COUCH_PORT}), + (['--uuid', '-u'], {'default': SOLEDAD_CLIENT_UUID}), + ]) + + cmd_user_db_create = cmd_user_db.add_command( + 'delete', func=user_db_delete) + cmd_user_db_create.add_arguments([ + (['--host', '-H'], {'default': COUCH_HOST}), + (['--port', '-P'], {'type': int, 'default': COUCH_PORT}), + (['--uuid', '-u'], {'default': SOLEDAD_CLIENT_UUID}) + ]) + + # soledad server command with subcommands + cmd_sol_server = cli.add_command( + 'soledad-server', help="manage soledad server") + + cmd_sol_server_start = cmd_sol_server.add_command( + 'start', func=soledad_server_start) + cmd_sol_server_start.add_arguments([ + (['--executable', '-e'], {'default': TWISTD_EXECUTABLE}), + (['--config-file', '-c'], {'default': SOLEDAD_SERVER_CONFIG_FILE}), + (['--pidfile', '-p'], {'default': SOLEDAD_SERVER_PIDFILE}), + (['--logfile', '-l'], {'default': SOLEDAD_SERVER_LOGFILE}), + (['--port', '-P'], {'type': int, 'default': SOLEDAD_SERVER_PORT}), + (['--tls', '-t'], {'action': 'store_true'}), + (['--private-key', '-K'], {'default': SOLEDAD_SERVER_PRIVKEY}), + (['--cert-key', '-C'], {'default': SOLEDAD_SERVER_CERTKEY}), + (['--no-daemonize', '-n'], {'action': 'store_true'}), + (['--basedir', '-b'], {'default': SOLEDAD_SERVER_BASEDIR}), + ]) + + cmd_sol_server_stop = cmd_sol_server.add_command( + 'stop', func=soledad_server_stop) + cmd_sol_server_stop.add_arguments([ + (['--basedir', '-b'], {'default': SOLEDAD_SERVER_BASEDIR}), + (['--pidfile', '-p'], {'default': SOLEDAD_SERVER_PIDFILE}), + ]) + + cmd_sol_server_status = cmd_sol_server.add_command( + 'status', func=soledad_server_status_from_pidfile) + cmd_sol_server_status.add_arguments([ + (['--basedir', '-b'], {'default': SOLEDAD_SERVER_BASEDIR}), + (['--pidfile', '-p'], {'default': SOLEDAD_SERVER_PIDFILE}), + ]) + + cmd_sol_server_kill = cmd_sol_server.add_command( + 'kill', func=kill_all_executables) + cmd_sol_server_kill.add_argument( + '--executable', '-e', default=TWISTD_EXECUTABLE) + + # token db maintenance + cmd_token_db = cli.add_command('token-db') + cmd_token_db_create = cmd_token_db.add_command( + 'create', func=token_db_create) + cmd_token_db_create.add_arguments([ + (['--host', '-H'], {'default': COUCH_HOST}), + (['--uuid', '-u'], {'default': SOLEDAD_CLIENT_UUID}), + (['--port', '-P'], {'type': int, 'default': COUCH_PORT}), + ]) + + cmd_token_db_insert_token = cmd_token_db.add_command( + 'insert-token', func=token_db_insert_token) + cmd_token_db_insert_token.add_arguments([ + (['--host', '-H'], {'default': COUCH_HOST}), + (['--uuid', '-u'], {'default': SOLEDAD_CLIENT_UUID}), + (['--port', '-P'], {'type': int, 'default': COUCH_PORT}), + (['--auth-token', '-a'], {'default': SOLEDAD_SERVER_AUTH_TOKEN}), + ]) + + cmd_token_db_delete = cmd_token_db.add_command( + 'delete', func=token_db_delete) + cmd_token_db_delete.add_arguments([ + (['--host', '-H'], {'default': COUCH_HOST}), + (['--uuid', '-u'], {'default': SOLEDAD_CLIENT_UUID}), + (['--port', '-P'], {'type': int, 'default': COUCH_PORT}), + ]) + + # shared db creation + cmd_shared_db = cli.add_command('shared-db') + + cmd_shared_db_create = cmd_shared_db.add_command( + 'create', func=shared_db_create) + cmd_shared_db_create.add_arguments([ + (['--host', '-H'], {'default': COUCH_HOST}), + (['--port', '-P'], {'type': int, 'default': COUCH_PORT}), + ]) + + cmd_shared_db_delete = cmd_shared_db.add_command( + 'delete', func=shared_db_delete) + cmd_shared_db_delete.add_arguments([ + (['--host', '-H'], {'default': COUCH_HOST}), + (['--port', '-P'], {'type': int, 'default': COUCH_PORT}), + ]) + + # certificate generation + cmd_cert = cli.add_command('cert', help="create tls certificates") + + cmd_cert_create = cmd_cert.add_command('create', func=cert_create) + cmd_cert_create.add_arguments([ + (['--basedir', '-b'], {'default': SOLEDAD_SERVER_BASEDIR}), + (['--config-file', '-c'], {'default': CERT_CONFIG_FILE}), + (['--private-key', '-K'], {'default': SOLEDAD_SERVER_PRIVKEY}), + (['--cert-key', '-C'], {'default': SOLEDAD_SERVER_CERTKEY}), + ]) + + cmd_cert_create = cmd_cert.add_command('delete', func=cert_delete) + cmd_cert_create.add_arguments([ + (['--basedir', '-b'], {'default': SOLEDAD_SERVER_BASEDIR}), + (['--private-key', '-K'], {'default': SOLEDAD_SERVER_PRIVKEY}), + (['--cert-key', '-C'], {'default': SOLEDAD_SERVER_CERTKEY}), + ]) + + # soledad client command with subcommands + cmd_sol_client = cli.add_command( + 'soledad-client', help="manage soledad client") + + cmd_sol_client_test = cmd_sol_client.add_command( + 'test', func=soledad_client_test) + cmd_sol_client_test.add_arguments([ + (['--port', '-P'], {'type': int, 'default': SOLEDAD_SERVER_PORT}), + (['--tls', '-t'], {'action': 'store_true'}), + (['--uuid', '-u'], {'default': SOLEDAD_CLIENT_UUID}), + (['--passphrase', '-k'], {'default': SOLEDAD_CLIENT_PASS}), + (['--basedir', '-b'], {'default': SOLEDAD_CLIENT_BASEDIR}), + (['--server_url', '-s'], {'default': SOLEDAD_SERVER_URL}), + (['--cert-key', '-C'], {'default': os.path.join( + SOLEDAD_SERVER_BASEDIR, + SOLEDAD_SERVER_CERTKEY)}), + (['--auth-token', '-a'], {'default': SOLEDAD_SERVER_AUTH_TOKEN}), + ]) + + # parse and run cli + args = cli.parse_args() + args.func(args) + + +if __name__ == '__main__': + run_cli() diff --git a/scripts/docker/helper/get-container-ip.sh b/scripts/docker/helper/get-container-ip.sh new file mode 100755 index 00000000..2b392350 --- /dev/null +++ b/scripts/docker/helper/get-container-ip.sh @@ -0,0 +1,18 @@ +#!/bin/sh + +# Print the IP of a container to stdout, given its id. Check the output for +# the `docker inspect` commmand for more details: +# +# https://docs.docker.com/engine/reference/commandline/inspect/ + +if [ ${#} -ne 1 ]; then + echo "Usage: ${0} container_id" + exit 1 +fi + +container_id=${1} + +/usr/bin/docker \ + inspect \ + --format='{{.NetworkSettings.IPAddress}}' \ + ${container_id} -- cgit v1.2.3