summaryrefslogtreecommitdiff
diff options
context:
space:
mode:
authordrebs <drebs@leap.se>2016-04-30 22:33:11 -0300
committerdrebs <drebs@leap.se>2016-05-03 10:52:22 -0300
commit33ecd8375e80e452486b727c2ef54d67d330505c (patch)
tree61cf72d2fbe27589e2227221665819c6dc2065b6
parent856e302be8c9fbf1f098001d29c3ff1ac447cdcf (diff)
[test] add script to manage test envs
-rw-r--r--client/changes/next-changelog.rst1
l---------scripts/soledad_test_env/client_side_db.py1
-rw-r--r--scripts/soledad_test_env/conf/cert_default.conf15
-rw-r--r--scripts/soledad_test_env/conf/couchdb_default.ini361
-rw-r--r--scripts/soledad_test_env/conf/soledad-server_default.conf5
-rw-r--r--scripts/soledad_test_env/makefile19
-rwxr-xr-xscripts/soledad_test_env/soledad_test_env.py638
l---------scripts/soledad_test_env/util.py1
8 files changed, 1041 insertions, 0 deletions
diff --git a/client/changes/next-changelog.rst b/client/changes/next-changelog.rst
index 050d84be..93d90901 100644
--- a/client/changes/next-changelog.rst
+++ b/client/changes/next-changelog.rst
@@ -22,6 +22,7 @@ Bugfixes
Misc
~~~~
+- Add script for setting up develop environment.
- Refactor bootstrap to remove shared db lock.
- `#1236 <https://leap.se/code/issues/1236>`_: Description of the new feature corresponding with issue #1236.
- Some change without issue number.
diff --git a/scripts/soledad_test_env/client_side_db.py b/scripts/soledad_test_env/client_side_db.py
new file mode 120000
index 00000000..f0eaacd7
--- /dev/null
+++ b/scripts/soledad_test_env/client_side_db.py
@@ -0,0 +1 @@
+../db_access/client_side_db.py \ No newline at end of file
diff --git a/scripts/soledad_test_env/conf/cert_default.conf b/scripts/soledad_test_env/conf/cert_default.conf
new file mode 100644
index 00000000..8043cea3
--- /dev/null
+++ b/scripts/soledad_test_env/conf/cert_default.conf
@@ -0,0 +1,15 @@
+[ req ]
+default_bits = 1024
+default_keyfile = keyfile.pem
+distinguished_name = req_distinguished_name
+prompt = no
+output_password = mypass
+
+[ req_distinguished_name ]
+C = GB
+ST = Test State or Province
+L = Test Locality
+O = Organization Name
+OU = Organizational Unit Name
+CN = localhost
+emailAddress = test@email.address
diff --git a/scripts/soledad_test_env/conf/couchdb_default.ini b/scripts/soledad_test_env/conf/couchdb_default.ini
new file mode 100644
index 00000000..5ab72d7b
--- /dev/null
+++ b/scripts/soledad_test_env/conf/couchdb_default.ini
@@ -0,0 +1,361 @@
+; etc/couchdb/default.ini.tpl. Generated from default.ini.tpl.in by configure.
+
+; Upgrading CouchDB will overwrite this file.
+[vendor]
+name = The Apache Software Foundation
+version = 1.6.0
+
+[couchdb]
+database_dir = BASEDIR
+view_index_dir = BASEDIR
+util_driver_dir = /usr/lib/x86_64-linux-gnu/couchdb/erlang/lib/couch-1.6.0/priv/lib
+max_document_size = 4294967296 ; 4 GB
+os_process_timeout = 5000 ; 5 seconds. for view and external servers.
+max_dbs_open = 100
+delayed_commits = true ; set this to false to ensure an fsync before 201 Created is returned
+uri_file = BASEDIR/couch.uri
+; Method used to compress everything that is appended to database and view index files, except
+; for attachments (see the attachments section). Available methods are:
+;
+; none - no compression
+; snappy - use google snappy, a very fast compressor/decompressor
+uuid = bc2f8b84ecb0b13a31cf7f6881a52194
+
+; deflate_[N] - use zlib's deflate, N is the compression level which ranges from 1 (fastest,
+; lowest compression ratio) to 9 (slowest, highest compression ratio)
+file_compression = snappy
+; Higher values may give better read performance due to less read operations
+; and/or more OS page cache hits, but they can also increase overall response
+; time for writes when there are many attachment write requests in parallel.
+attachment_stream_buffer_size = 4096
+
+plugin_dir = /usr/lib/x86_64-linux-gnu/couchdb/plugins
+
+[database_compaction]
+; larger buffer sizes can originate smaller files
+doc_buffer_size = 524288 ; value in bytes
+checkpoint_after = 5242880 ; checkpoint after every N bytes were written
+
+[view_compaction]
+; larger buffer sizes can originate smaller files
+keyvalue_buffer_size = 2097152 ; value in bytes
+
+[httpd]
+port = 5984
+bind_address = 127.0.0.1
+authentication_handlers = {couch_httpd_oauth, oauth_authentication_handler}, {couch_httpd_auth, cookie_authentication_handler}, {couch_httpd_auth, default_authentication_handler}
+default_handler = {couch_httpd_db, handle_request}
+secure_rewrites = true
+vhost_global_handlers = _utils, _uuids, _session, _oauth, _users
+allow_jsonp = false
+; Options for the MochiWeb HTTP server.
+;server_options = [{backlog, 128}, {acceptor_pool_size, 16}]
+; For more socket options, consult Erlang's module 'inet' man page.
+;socket_options = [{recbuf, 262144}, {sndbuf, 262144}, {nodelay, true}]
+socket_options = [{recbuf, 262144}, {sndbuf, 262144}]
+log_max_chunk_size = 1000000
+enable_cors = false
+; CouchDB can optionally enforce a maximum uri length;
+; max_uri_length = 8000
+
+[ssl]
+port = 6984
+
+[log]
+file = BASEDIR/couch.log
+level = info
+include_sasl = true
+
+[couch_httpd_auth]
+authentication_db = _users
+authentication_redirect = /_utils/session.html
+require_valid_user = false
+timeout = 600 ; number of seconds before automatic logout
+auth_cache_size = 50 ; size is number of cache entries
+allow_persistent_cookies = false ; set to true to allow persistent cookies
+iterations = 10 ; iterations for password hashing
+; min_iterations = 1
+; max_iterations = 1000000000
+; comma-separated list of public fields, 404 if empty
+; public_fields =
+
+[cors]
+credentials = false
+; List of origins separated by a comma, * means accept all
+; Origins must include the scheme: http://example.com
+; You can’t set origins: * and credentials = true at the same time.
+;origins = *
+; List of accepted headers separated by a comma
+; headers =
+; List of accepted methods
+; methods =
+
+
+; Configuration for a vhost
+;[cors:http://example.com]
+; credentials = false
+; List of origins separated by a comma
+; Origins must include the scheme: http://example.com
+; You can’t set origins: * and credentials = true at the same time.
+;origins =
+; List of accepted headers separated by a comma
+; headers =
+; List of accepted methods
+; methods =
+
+[couch_httpd_oauth]
+; If set to 'true', oauth token and consumer secrets will be looked up
+; in the authentication database (_users). These secrets are stored in
+; a top level property named "oauth" in user documents. Example:
+; {
+; "_id": "org.couchdb.user:joe",
+; "type": "user",
+; "name": "joe",
+; "password_sha": "fe95df1ca59a9b567bdca5cbaf8412abd6e06121",
+; "salt": "4e170ffeb6f34daecfd814dfb4001a73"
+; "roles": ["foo", "bar"],
+; "oauth": {
+; "consumer_keys": {
+; "consumerKey1": "key1Secret",
+; "consumerKey2": "key2Secret"
+; },
+; "tokens": {
+; "token1": "token1Secret",
+; "token2": "token2Secret"
+; }
+; }
+; }
+use_users_db = false
+
+[query_servers]
+javascript = /usr/bin/couchjs /usr/share/couchdb/server/main.js
+coffeescript = /usr/bin/couchjs /usr/share/couchdb/server/main-coffee.js
+
+
+; Changing reduce_limit to false will disable reduce_limit.
+; If you think you're hitting reduce_limit with a "good" reduce function,
+; please let us know on the mailing list so we can fine tune the heuristic.
+[query_server_config]
+reduce_limit = true
+os_process_limit = 25
+
+[daemons]
+index_server={couch_index_server, start_link, []}
+external_manager={couch_external_manager, start_link, []}
+query_servers={couch_query_servers, start_link, []}
+vhosts={couch_httpd_vhost, start_link, []}
+httpd={couch_httpd, start_link, []}
+stats_aggregator={couch_stats_aggregator, start, []}
+stats_collector={couch_stats_collector, start, []}
+uuids={couch_uuids, start, []}
+auth_cache={couch_auth_cache, start_link, []}
+replicator_manager={couch_replicator_manager, start_link, []}
+os_daemons={couch_os_daemons, start_link, []}
+compaction_daemon={couch_compaction_daemon, start_link, []}
+
+[httpd_global_handlers]
+/ = {couch_httpd_misc_handlers, handle_welcome_req, <<"Welcome">>}
+favicon.ico = {couch_httpd_misc_handlers, handle_favicon_req, "/usr/share/couchdb/www"}
+
+_utils = {couch_httpd_misc_handlers, handle_utils_dir_req, "/usr/share/couchdb/www"}
+_all_dbs = {couch_httpd_misc_handlers, handle_all_dbs_req}
+_active_tasks = {couch_httpd_misc_handlers, handle_task_status_req}
+_config = {couch_httpd_misc_handlers, handle_config_req}
+_replicate = {couch_replicator_httpd, handle_req}
+_uuids = {couch_httpd_misc_handlers, handle_uuids_req}
+_restart = {couch_httpd_misc_handlers, handle_restart_req}
+_stats = {couch_httpd_stats_handlers, handle_stats_req}
+_log = {couch_httpd_misc_handlers, handle_log_req}
+_session = {couch_httpd_auth, handle_session_req}
+_oauth = {couch_httpd_oauth, handle_oauth_req}
+_db_updates = {couch_dbupdates_httpd, handle_req}
+_plugins = {couch_plugins_httpd, handle_req}
+
+[httpd_db_handlers]
+_all_docs = {couch_mrview_http, handle_all_docs_req}
+_changes = {couch_httpd_db, handle_changes_req}
+_compact = {couch_httpd_db, handle_compact_req}
+_design = {couch_httpd_db, handle_design_req}
+_temp_view = {couch_mrview_http, handle_temp_view_req}
+_view_cleanup = {couch_mrview_http, handle_cleanup_req}
+
+; The external module takes an optional argument allowing you to narrow it to a
+; single script. Otherwise the script name is inferred from the first path section
+; after _external's own path.
+; _mypath = {couch_httpd_external, handle_external_req, <<"mykey">>}
+; _external = {couch_httpd_external, handle_external_req}
+
+[httpd_design_handlers]
+_compact = {couch_mrview_http, handle_compact_req}
+_info = {couch_mrview_http, handle_info_req}
+_list = {couch_mrview_show, handle_view_list_req}
+_rewrite = {couch_httpd_rewrite, handle_rewrite_req}
+_show = {couch_mrview_show, handle_doc_show_req}
+_update = {couch_mrview_show, handle_doc_update_req}
+_view = {couch_mrview_http, handle_view_req}
+
+; enable external as an httpd handler, then link it with commands here.
+; note, this api is still under consideration.
+; [external]
+; mykey = /path/to/mycommand
+
+; Here you can setup commands for CouchDB to manage
+; while it is alive. It will attempt to keep each command
+; alive if it exits.
+; [os_daemons]
+; some_daemon_name = /path/to/script -with args
+
+
+[uuids]
+; Known algorithms:
+; random - 128 bits of random awesome
+; All awesome, all the time.
+; sequential - monotonically increasing ids with random increments
+; First 26 hex characters are random. Last 6 increment in
+; random amounts until an overflow occurs. On overflow, the
+; random prefix is regenerated and the process starts over.
+; utc_random - Time since Jan 1, 1970 UTC with microseconds
+; First 14 characters are the time in hex. Last 18 are random.
+; utc_id - Time since Jan 1, 1970 UTC with microseconds, plus utc_id_suffix string
+; First 14 characters are the time in hex. uuids/utc_id_suffix string value is appended to these.
+algorithm = sequential
+; The utc_id_suffix value will be appended to uuids generated by the utc_id algorithm.
+; Replicating instances should have unique utc_id_suffix values to ensure uniqueness of utc_id ids.
+utc_id_suffix =
+# Maximum number of UUIDs retrievable from /_uuids in a single request
+max_count = 1000
+
+[stats]
+; rate is in milliseconds
+rate = 1000
+; sample intervals are in seconds
+samples = [0, 60, 300, 900]
+
+[attachments]
+compression_level = 8 ; from 1 (lowest, fastest) to 9 (highest, slowest), 0 to disable compression
+compressible_types = text/*, application/javascript, application/json, application/xml
+
+[replicator]
+db = _replicator
+; Maximum replicaton retry count can be a non-negative integer or "infinity".
+max_replication_retry_count = 10
+; More worker processes can give higher network throughput but can also
+; imply more disk and network IO.
+worker_processes = 4
+; With lower batch sizes checkpoints are done more frequently. Lower batch sizes
+; also reduce the total amount of used RAM memory.
+worker_batch_size = 500
+; Maximum number of HTTP connections per replication.
+http_connections = 20
+; HTTP connection timeout per replication.
+; Even for very fast/reliable networks it might need to be increased if a remote
+; database is too busy.
+connection_timeout = 30000
+; If a request fails, the replicator will retry it up to N times.
+retries_per_request = 10
+; Some socket options that might boost performance in some scenarios:
+; {nodelay, boolean()}
+; {sndbuf, integer()}
+; {recbuf, integer()}
+; {priority, integer()}
+; See the `inet` Erlang module's man page for the full list of options.
+socket_options = [{keepalive, true}, {nodelay, false}]
+; Path to a file containing the user's certificate.
+;cert_file = /full/path/to/server_cert.pem
+; Path to file containing user's private PEM encoded key.
+;key_file = /full/path/to/server_key.pem
+; String containing the user's password. Only used if the private keyfile is password protected.
+;password = somepassword
+; Set to true to validate peer certificates.
+verify_ssl_certificates = false
+; File containing a list of peer trusted certificates (in the PEM format).
+;ssl_trusted_certificates_file = /etc/ssl/certs/ca-certificates.crt
+; Maximum peer certificate depth (must be set even if certificate validation is off).
+ssl_certificate_max_depth = 3
+
+[compaction_daemon]
+; The delay, in seconds, between each check for which database and view indexes
+; need to be compacted.
+check_interval = 300
+; If a database or view index file is smaller then this value (in bytes),
+; compaction will not happen. Very small files always have a very high
+; fragmentation therefore it's not worth to compact them.
+min_file_size = 131072
+
+[compactions]
+; List of compaction rules for the compaction daemon.
+; The daemon compacts databases and their respective view groups when all the
+; condition parameters are satisfied. Configuration can be per database or
+; global, and it has the following format:
+;
+; database_name = [ {ParamName, ParamValue}, {ParamName, ParamValue}, ... ]
+; _default = [ {ParamName, ParamValue}, {ParamName, ParamValue}, ... ]
+;
+; Possible parameters:
+;
+; * db_fragmentation - If the ratio (as an integer percentage), of the amount
+; of old data (and its supporting metadata) over the database
+; file size is equal to or greater then this value, this
+; database compaction condition is satisfied.
+; This value is computed as:
+;
+; (file_size - data_size) / file_size * 100
+;
+; The data_size and file_size values can be obtained when
+; querying a database's information URI (GET /dbname/).
+;
+; * view_fragmentation - If the ratio (as an integer percentage), of the amount
+; of old data (and its supporting metadata) over the view
+; index (view group) file size is equal to or greater then
+; this value, then this view index compaction condition is
+; satisfied. This value is computed as:
+;
+; (file_size - data_size) / file_size * 100
+;
+; The data_size and file_size values can be obtained when
+; querying a view group's information URI
+; (GET /dbname/_design/groupname/_info).
+;
+; * from _and_ to - The period for which a database (and its view groups) compaction
+; is allowed. The value for these parameters must obey the format:
+;
+; HH:MM - HH:MM (HH in [0..23], MM in [0..59])
+;
+; * strict_window - If a compaction is still running after the end of the allowed
+; period, it will be canceled if this parameter is set to 'true'.
+; It defaults to 'false' and it's meaningful only if the *period*
+; parameter is also specified.
+;
+; * parallel_view_compaction - If set to 'true', the database and its views are
+; compacted in parallel. This is only useful on
+; certain setups, like for example when the database
+; and view index directories point to different
+; disks. It defaults to 'false'.
+;
+; Before a compaction is triggered, an estimation of how much free disk space is
+; needed is computed. This estimation corresponds to 2 times the data size of
+; the database or view index. When there's not enough free disk space to compact
+; a particular database or view index, a warning message is logged.
+;
+; Examples:
+;
+; 1) [{db_fragmentation, "70%"}, {view_fragmentation, "60%"}]
+; The `foo` database is compacted if its fragmentation is 70% or more.
+; Any view index of this database is compacted only if its fragmentation
+; is 60% or more.
+;
+; 2) [{db_fragmentation, "70%"}, {view_fragmentation, "60%"}, {from, "00:00"}, {to, "04:00"}]
+; Similar to the preceding example but a compaction (database or view index)
+; is only triggered if the current time is between midnight and 4 AM.
+;
+; 3) [{db_fragmentation, "70%"}, {view_fragmentation, "60%"}, {from, "00:00"}, {to, "04:00"}, {strict_window, true}]
+; Similar to the preceding example - a compaction (database or view index)
+; is only triggered if the current time is between midnight and 4 AM. If at
+; 4 AM the database or one of its views is still compacting, the compaction
+; process will be canceled.
+;
+; 4) [{db_fragmentation, "70%"}, {view_fragmentation, "60%"}, {from, "00:00"}, {to, "04:00"}, {strict_window, true}, {parallel_view_compaction, true}]
+; Similar to the preceding example, but a database and its views can be
+; compacted in parallel.
+;
+;_default = [{db_fragmentation, "70%"}, {view_fragmentation, "60%"}, {from, "23:00"}, {to, "04:00"}]
diff --git a/scripts/soledad_test_env/conf/soledad-server_default.conf b/scripts/soledad_test_env/conf/soledad-server_default.conf
new file mode 100644
index 00000000..5e286374
--- /dev/null
+++ b/scripts/soledad_test_env/conf/soledad-server_default.conf
@@ -0,0 +1,5 @@
+[soledad-server]
+couch_url = http://localhost:5984
+create_cmd = sudo -u soledad-admin /usr/bin/create-user-db
+admin_netrc = /etc/couchdb/couchdb-soledad-admin.netrc
+batching = 0
diff --git a/scripts/soledad_test_env/makefile b/scripts/soledad_test_env/makefile
new file mode 100644
index 00000000..90465fb2
--- /dev/null
+++ b/scripts/soledad_test_env/makefile
@@ -0,0 +1,19 @@
+CLI := python ./soledad_test_env.py
+
+setup-env:
+ $(CLI) couch start
+ $(CLI) shared-db create
+ $(CLI) token-db create
+ $(CLI) token-db insert-token
+ $(CLI) user-db create
+ $(CLI) soledad-server start
+
+destroy-env:
+ $(CLI) soledad-server stop
+ $(CLI) user-db delete
+ $(CLI) token-db delete
+ $(CLI) shared-db delete
+ $(CLI) couch stop
+
+test:
+ $(CLI) soledad-client test
diff --git a/scripts/soledad_test_env/soledad_test_env.py b/scripts/soledad_test_env/soledad_test_env.py
new file mode 100755
index 00000000..807ee622
--- /dev/null
+++ b/scripts/soledad_test_env/soledad_test_env.py
@@ -0,0 +1,638 @@
+#!/usr/bin/env python
+
+
+"""
+This script knows how to build a minimum environment for Soledad Server, which
+includes the following:
+
+ - Couch server startup
+ - Token and shared database initialization
+ - Soledad Server startup
+
+Options can be passed for configuring the different environments, so this may
+be used by other programs to setup different environments for arbitrary tests.
+Use the --help option to get information on usage.
+
+For some commands you will need an environment with Soledad python packages
+available, thus you might want to explicitly call python and not rely in the
+shebang line.
+"""
+
+
+import time
+import os
+import signal
+import tempfile
+import psutil
+from argparse import ArgumentParser
+from subprocess import call
+from couchdb import Server
+from couchdb.http import PreconditionFailed
+from couchdb.http import ResourceConflict
+from couchdb.http import ResourceNotFound
+from hashlib import sha512
+from u1db.errors import DatabaseDoesNotExist
+
+
+#
+# Utilities
+#
+
+def get_pid(pidfile):
+ if not os.path.isfile(pidfile):
+ return 0
+ try:
+ with open(pidfile) as f:
+ return int(f.read())
+ except IOError:
+ return 0
+
+
+def pid_is_running(pid):
+ try:
+ psutil.Process(pid)
+ return True
+ except psutil.NoSuchProcess:
+ return False
+
+
+def pidfile_is_running(pidfile):
+ try:
+ pid = get_pid(pidfile)
+ psutil.Process(pid)
+ return pid
+ except psutil.NoSuchProcess:
+ return False
+
+
+def status_from_pidfile(args, default_basedir):
+ basedir = _get_basedir(args, default_basedir)
+ pidfile = os.path.join(basedir, args.pidfile)
+ try:
+ pid = get_pid(pidfile)
+ psutil.Process(pid)
+ print "[+] running - pid: %d" % pid
+ except (IOError, psutil.NoSuchProcess):
+ print "[-] stopped"
+
+
+def kill_all_executables(args):
+ basename = os.path.basename(args.executable)
+ pids = [int(pid) for pid in os.listdir('/proc') if pid.isdigit()]
+ for pid in pids:
+ try:
+ p = psutil.Process(pid)
+ if p.name() == basename:
+ print '[!] killing - pid: %d' % pid
+ os.kill(pid, signal.SIGKILL)
+ except:
+ pass
+
+
+#
+# Couch Server control
+#
+
+COUCH_EXECUTABLE = '/usr/bin/couchdb'
+ERLANG_EXECUTABLE = 'beam.smp'
+COUCH_TEMPLATE = os.path.join(
+ os.path.dirname(os.path.realpath(__file__)),
+ './conf/couchdb_default.ini')
+COUCH_TEMPLATE
+COUCH_PIDFILE = 'couchdb.pid'
+COUCH_LOGFILE = 'couchdb.log'
+COUCH_PORT = 5984
+COUCH_HOST = '127.0.0.1'
+COUCH_BASEDIR = '/tmp/couch_test'
+
+
+def _get_basedir(args, default):
+ basedir = args.basedir
+ if not basedir:
+ basedir = default
+ if not os.path.isdir(basedir):
+ os.mkdir(basedir)
+ return basedir
+
+
+def couch_server_start(args):
+ basedir = _get_basedir(args, COUCH_BASEDIR)
+ pidfile = os.path.join(basedir, args.pidfile)
+ logfile = os.path.join(basedir, args.logfile)
+
+ # check if already running
+ pid = get_pid(pidfile)
+ if pid_is_running(pid):
+ print '[*] error: already running - pid: %d' % pid
+ exit(1)
+ if os.path.isfile(pidfile):
+ os.unlink(pidfile)
+
+ # generate a config file from template if needed
+ config_file = args.config_file
+ if not config_file:
+ config_file = tempfile.mktemp(prefix='couch_config_', dir=basedir)
+ lines = []
+ with open(args.template) as f:
+ lines = f.readlines()
+ lines = map(lambda l: l.replace('BASEDIR', basedir), lines)
+ with open(config_file, 'w') as f:
+ f.writelines(lines)
+
+ # start couch server
+ try:
+ call([
+ args.executable,
+ '-n', # reset configuration file chain (including system default)
+ '-a %s' % config_file, # add configuration FILE to chain
+ '-b', # spawn as a background process
+ '-p %s' % pidfile, # set the background PID FILE
+ '-o %s' % logfile, # redirect background stdout to FILE
+ '-e %s' % logfile]) # redirect background stderr to FILE
+ except Exception as e:
+ print '[*] error: could not start couch server - %s' % str(e)
+ exit(1)
+
+ # couch may take a bit to store the pid in the pidfile, so we just wait
+ # until it does
+ pid = None
+ while not pid:
+ try:
+ pid = get_pid(pidfile)
+ break
+ except:
+ time.sleep(0.1)
+
+ print '[+] running - pid: %d' % pid
+
+
+def couch_server_stop(args):
+ basedir = _get_basedir(args, COUCH_BASEDIR)
+ pidfile = os.path.join(basedir, args.pidfile)
+ pid = get_pid(pidfile)
+ if not pid_is_running(pid):
+ print '[*] error: no running server found'
+ exit(1)
+ call([
+ args.executable,
+ '-p %s' % pidfile, # set the background PID FILE
+ '-k']) # kill the background process, will respawn if needed
+ print '[-] stopped - pid: %d ' % pid
+
+
+def couch_status_from_pidfile(args):
+ status_from_pidfile(args, COUCH_BASEDIR)
+
+
+#
+# User DB maintenance #
+#
+
+def user_db_create(args):
+ from leap.soledad.common.couch import CouchDatabase
+ url = 'http://localhost:%d/user-%s' % (args.port, args.uuid)
+ try:
+ CouchDatabase.open_database(
+ url=url, create=False, replica_uid=None, ensure_ddocs=True)
+ print '[*] error: database "user-%s" already exists' % args.uuid
+ exit(1)
+ except DatabaseDoesNotExist:
+ CouchDatabase.open_database(
+ url=url, create=True, replica_uid=None, ensure_ddocs=True)
+ print '[+] database created: user-%s' % args.uuid
+
+
+def user_db_delete(args):
+ s = _couch_get_server(args)
+ try:
+ dbname = 'user-%s' % args.uuid
+ s.delete(dbname)
+ print '[-] database deleted: %s' % dbname
+ except ResourceNotFound:
+ print '[*] error: database "%s" does not exist' % dbname
+ exit(1)
+
+
+#
+# Soledad Server control
+#
+
+TWISTD_EXECUTABLE = 'twistd' # use whatever is available on path
+
+SOLEDAD_SERVER_BASEDIR = '/tmp/soledad_server_test'
+SOLEDAD_SERVER_CONFIG_FILE = './conf/soledad_default.ini'
+SOLEDAD_SERVER_PIDFILE = 'soledad.pid'
+SOLEDAD_SERVER_LOGFILE = 'soledad.log'
+SOLEDAD_SERVER_PRIVKEY = 'soledad_privkey.pem'
+SOLEDAD_SERVER_CERTKEY = 'soledad_certkey.pem'
+SOLEDAD_SERVER_PORT = 2424
+SOLEDAD_SERVER_AUTH_TOKEN = 'an-auth-token'
+SOLEDAD_SERVER_URL = 'https://localhost:2424'
+
+SOLEDAD_CLIENT_PASS = '12345678'
+SOLEDAD_CLIENT_BASEDIR = '/tmp/soledad_client_test'
+SOLEDAD_CLIENT_UUID = '1234567890abcdef'
+
+
+def soledad_server_start(args):
+ basedir = _get_basedir(args, SOLEDAD_SERVER_BASEDIR)
+ pidfile = os.path.join(basedir, args.pidfile)
+ logfile = os.path.join(basedir, args.logfile)
+ private_key = os.path.join(basedir, args.private_key)
+ cert_key = os.path.join(basedir, args.cert_key)
+
+ pid = get_pid(pidfile)
+ if pid_is_running(pid):
+ pid = get_pid(pidfile)
+ print "[*] error: already running - pid: %d" % pid
+ exit(1)
+
+ port = args.port
+ if args.tls:
+ port = 'ssl:%d:privateKey=%s:certKey=%s:sslmethod=SSLv23_METHOD' \
+ % (args.port, private_key, cert_key)
+ params = [
+ '--logfile=%s' % logfile,
+ '--pidfile=%s' % pidfile,
+ 'web',
+ '--wsgi=leap.soledad.server.application',
+ '--port=%s' % port
+ ]
+ if args.no_daemonize:
+ params.insert(0, '--nodaemon')
+
+ call([args.executable] + params)
+
+ pid = get_pid(pidfile)
+ print '[+] running - pid: %d' % pid
+
+
+def soledad_server_stop(args):
+ basedir = _get_basedir(args, SOLEDAD_SERVER_BASEDIR)
+ pidfile = os.path.join(basedir, args.pidfile)
+ pid = get_pid(pidfile)
+ if not pid_is_running(pid):
+ print '[*] error: no running server found'
+ exit(1)
+ os.kill(pid, signal.SIGKILL)
+ print '[-] stopped - pid: %d' % pid
+
+
+def soledad_server_status_from_pidfile(args):
+ status_from_pidfile(args, SOLEDAD_SERVER_BASEDIR)
+
+
+# couch helpers
+
+def _couch_get_server(args):
+ url = 'http://%s:%d/' % (args.host, args.port)
+ return Server(url=url)
+
+
+def _couch_create_db(args, dbname):
+ s = _couch_get_server(args)
+ # maybe create the database
+ try:
+ s.create(dbname)
+ print '[+] database created: %s' % dbname
+ except PreconditionFailed as e:
+ error_code, _ = e.message
+ if error_code == 'file_exists':
+ print '[*] error: "%s" database already exists' % dbname
+ exit(1)
+ return s
+
+
+def _couch_delete_db(args, dbname):
+ s = _couch_get_server(args)
+ # maybe create the database
+ try:
+ s.delete(dbname)
+ print '[-] database deleted: %s' % dbname
+ except ResourceNotFound:
+ print '[*] error: "%s" database does not exist' % dbname
+ exit(1)
+
+
+def _token_dbname():
+ dbname = 'tokens_' + \
+ str(int(time.time() / (30 * 24 * 3600)))
+ return dbname
+
+
+def token_db_create(args):
+ dbname = _token_dbname()
+ _couch_create_db(args, dbname)
+
+
+def token_db_insert_token(args):
+ s = _couch_get_server(args)
+ try:
+ dbname = _token_dbname()
+ db = s[dbname]
+ token = sha512(args.auth_token).hexdigest()
+ db[token] = {
+ 'type': 'Token',
+ 'user_id': args.uuid,
+ }
+ print '[+] token for uuid "%s" created in tokens database' % args.uuid
+ except ResourceConflict:
+ print '[*] error: token for uuid "%s" already exists in tokens database' \
+ % args.uuid
+ exit(1)
+
+
+def token_db_delete(args):
+ dbname = _token_dbname()
+ _couch_delete_db(args, dbname)
+
+
+#
+# Shared DB creation
+#
+
+def shared_db_create(args):
+ _couch_create_db(args, 'shared')
+
+
+def shared_db_delete(args):
+ _couch_delete_db(args, 'shared')
+
+
+#
+# Certificate creation
+#
+
+CERT_CONFIG_FILE = os.path.join(
+ os.path.dirname(os.path.realpath(__file__)),
+ './conf/cert_default.conf')
+
+
+def cert_create(args):
+ private_key = os.path.join(args.basedir, args.private_key)
+ cert_key = os.path.join(args.basedir, args.cert_key)
+ call([
+ 'openssl',
+ 'req',
+ '-x509',
+ '-sha256',
+ '-nodes',
+ '-days', '365',
+ '-newkey', 'rsa:2048',
+ '-config', args.config_file,
+ '-keyout', private_key,
+ '-out', cert_key])
+
+
+def cert_delete(args):
+ private_key = os.path.join(args.basedir, args.private_key)
+ cert_key = os.path.join(args.basedir, args.cert_key)
+ os.unlink(private_key)
+ os.unlink(cert_key)
+
+
+#
+# Soledad Client Control
+#
+
+def soledad_client_test(args):
+
+ # maybe infer missing parameters
+ basedir = args.basedir
+ if not basedir:
+ basedir = tempfile.mkdtemp()
+ server_url = args.server_url
+ if not server_url:
+ server_url = 'http://127.0.0.1:%d' % args.port
+
+ # get a soledad instance
+ from client_side_db import _get_soledad_instance
+ _get_soledad_instance(
+ args.uuid,
+ unicode(args.passphrase),
+ basedir,
+ server_url,
+ args.cert_key,
+ args.auth_token)
+
+
+#
+# Command Line Interface
+#
+
+class Command(object):
+
+ def __init__(self, parser=ArgumentParser()):
+ self.commands = []
+ self.parser = parser
+ self.subparsers = None
+
+ def add_command(self, *args, **kwargs):
+ # pop out the func parameter to use later
+ func = None
+ if 'func' in kwargs.keys():
+ func = kwargs.pop('func')
+ # eventually create a subparser
+ if not self.subparsers:
+ self.subparsers = self.parser.add_subparsers()
+ # create command and associate a function with it
+ command = Command(self.subparsers.add_parser(*args, **kwargs))
+ if func:
+ command.parser.set_defaults(func=func)
+ self.commands.append(command)
+ return command
+
+ def set_func(self, func):
+ self.parser.set_defaults(func=func)
+
+ def add_argument(self, *args, **kwargs):
+ self.parser.add_argument(*args, **kwargs)
+
+ def add_arguments(self, arglist):
+ for args, kwargs in arglist:
+ self.add_argument(*args, **kwargs)
+
+ def parse_args(self):
+ return self.parser.parse_args()
+
+
+#
+# Command Line Interface
+#
+
+def run_cli():
+ cli = Command()
+
+ # couch command with subcommands
+ cmd_couch = cli.add_command('couch', help="manage couch server")
+
+ cmd_couch_start = cmd_couch.add_command('start', func=couch_server_start)
+ cmd_couch_start.add_arguments([
+ (['--executable', '-e'], {'default': COUCH_EXECUTABLE}),
+ (['--basedir', '-b'], {}),
+ (['--config-file', '-c'], {}),
+ (['--template', '-t'], {'default': COUCH_TEMPLATE}),
+ (['--pidfile', '-p'], {'default': COUCH_PIDFILE}),
+ (['--logfile', '-l'], {'default': COUCH_LOGFILE})
+ ])
+
+ cmd_couch_stop = cmd_couch.add_command('stop', func=couch_server_stop)
+ cmd_couch_stop.add_arguments([
+ (['--executable', '-e'], {'default': COUCH_EXECUTABLE}),
+ (['--basedir', '-b'], {}),
+ (['--pidfile', '-p'], {'default': COUCH_PIDFILE}),
+ ])
+
+ cmd_couch_status = cmd_couch.add_command(
+ 'status', func=couch_status_from_pidfile)
+ cmd_couch_status.add_arguments([
+ (['--basedir', '-b'], {}),
+ (['--pidfile', '-p'], {'default': COUCH_PIDFILE})])
+
+ cmd_couch_kill = cmd_couch.add_command('kill', func=kill_all_executables)
+ cmd_couch_kill.add_argument(
+ '--executable', '-e', default=ERLANG_EXECUTABLE)
+
+ # user database maintenance
+ cmd_user_db = cli.add_command('user-db')
+
+ cmd_user_db_create = cmd_user_db.add_command('create', func=user_db_create)
+ cmd_user_db_create.add_arguments([
+ (['--host', '-H'], {'default': COUCH_HOST}),
+ (['--port', '-P'], {'type': int, 'default': COUCH_PORT}),
+ (['--uuid', '-u'], {'default': SOLEDAD_CLIENT_UUID}),
+ ])
+
+ cmd_user_db_create = cmd_user_db.add_command(
+ 'delete', func=user_db_delete)
+ cmd_user_db_create.add_arguments([
+ (['--host', '-H'], {'default': COUCH_HOST}),
+ (['--port', '-P'], {'type': int, 'default': COUCH_PORT}),
+ (['--uuid', '-u'], {'default': SOLEDAD_CLIENT_UUID})
+ ])
+
+ # soledad server command with subcommands
+ cmd_sol_server = cli.add_command(
+ 'soledad-server', help="manage soledad server")
+
+ cmd_sol_server_start = cmd_sol_server.add_command(
+ 'start', func=soledad_server_start)
+ cmd_sol_server_start.add_arguments([
+ (['--executable', '-e'], {'default': TWISTD_EXECUTABLE}),
+ (['--config-file', '-c'], {'default': SOLEDAD_SERVER_CONFIG_FILE}),
+ (['--pidfile', '-p'], {'default': SOLEDAD_SERVER_PIDFILE}),
+ (['--logfile', '-l'], {'default': SOLEDAD_SERVER_LOGFILE}),
+ (['--port', '-P'], {'type': int, 'default': SOLEDAD_SERVER_PORT}),
+ (['--tls', '-t'], {'action': 'store_true'}),
+ (['--private-key', '-K'], {'default': SOLEDAD_SERVER_PRIVKEY}),
+ (['--cert-key', '-C'], {'default': SOLEDAD_SERVER_CERTKEY}),
+ (['--no-daemonize', '-n'], {'action': 'store_true'}),
+ (['--basedir', '-b'], {'default': SOLEDAD_SERVER_BASEDIR}),
+ ])
+
+ cmd_sol_server_stop = cmd_sol_server.add_command(
+ 'stop', func=soledad_server_stop)
+ cmd_sol_server_stop.add_arguments([
+ (['--basedir', '-b'], {'default': SOLEDAD_SERVER_BASEDIR}),
+ (['--pidfile', '-p'], {'default': SOLEDAD_SERVER_PIDFILE}),
+ ])
+
+ cmd_sol_server_status = cmd_sol_server.add_command(
+ 'status', func=soledad_server_status_from_pidfile)
+ cmd_sol_server_status.add_arguments([
+ (['--basedir', '-b'], {'default': SOLEDAD_SERVER_BASEDIR}),
+ (['--pidfile', '-p'], {'default': SOLEDAD_SERVER_PIDFILE}),
+ ])
+
+ cmd_sol_server_kill = cmd_sol_server.add_command(
+ 'kill', func=kill_all_executables)
+ cmd_sol_server_kill.add_argument(
+ '--executable', '-e', default=TWISTD_EXECUTABLE)
+
+ # token db maintenance
+ cmd_token_db = cli.add_command('token-db')
+ cmd_token_db_create = cmd_token_db.add_command(
+ 'create', func=token_db_create)
+ cmd_token_db_create.add_arguments([
+ (['--host', '-H'], {'default': COUCH_HOST}),
+ (['--uuid', '-u'], {'default': SOLEDAD_CLIENT_UUID}),
+ (['--port', '-P'], {'type': int, 'default': COUCH_PORT}),
+ ])
+
+ cmd_token_db_insert_token = cmd_token_db.add_command(
+ 'insert-token', func=token_db_insert_token)
+ cmd_token_db_insert_token.add_arguments([
+ (['--host', '-H'], {'default': COUCH_HOST}),
+ (['--uuid', '-u'], {'default': SOLEDAD_CLIENT_UUID}),
+ (['--port', '-P'], {'type': int, 'default': COUCH_PORT}),
+ (['--auth-token', '-a'], {'default': SOLEDAD_SERVER_AUTH_TOKEN}),
+ ])
+
+ cmd_token_db_delete = cmd_token_db.add_command(
+ 'delete', func=token_db_delete)
+ cmd_token_db_delete.add_arguments([
+ (['--host', '-H'], {'default': COUCH_HOST}),
+ (['--uuid', '-u'], {'default': SOLEDAD_CLIENT_UUID}),
+ (['--port', '-P'], {'type': int, 'default': COUCH_PORT}),
+ ])
+
+ # shared db creation
+ cmd_shared_db = cli.add_command('shared-db')
+
+ cmd_shared_db_create = cmd_shared_db.add_command(
+ 'create', func=shared_db_create)
+ cmd_shared_db_create.add_arguments([
+ (['--host', '-H'], {'default': COUCH_HOST}),
+ (['--port', '-P'], {'type': int, 'default': COUCH_PORT}),
+ ])
+
+ cmd_shared_db_delete = cmd_shared_db.add_command(
+ 'delete', func=shared_db_delete)
+ cmd_shared_db_delete.add_arguments([
+ (['--host', '-H'], {'default': COUCH_HOST}),
+ (['--port', '-P'], {'type': int, 'default': COUCH_PORT}),
+ ])
+
+ # certificate generation
+ cmd_cert = cli.add_command('cert', help="create tls certificates")
+
+ cmd_cert_create = cmd_cert.add_command('create', func=cert_create)
+ cmd_cert_create.add_arguments([
+ (['--basedir', '-b'], {'default': SOLEDAD_SERVER_BASEDIR}),
+ (['--config-file', '-c'], {'default': CERT_CONFIG_FILE}),
+ (['--private-key', '-K'], {'default': SOLEDAD_SERVER_PRIVKEY}),
+ (['--cert-key', '-C'], {'default': SOLEDAD_SERVER_CERTKEY}),
+ ])
+
+ cmd_cert_create = cmd_cert.add_command('delete', func=cert_delete)
+ cmd_cert_create.add_arguments([
+ (['--basedir', '-b'], {'default': SOLEDAD_SERVER_BASEDIR}),
+ (['--private-key', '-K'], {'default': SOLEDAD_SERVER_PRIVKEY}),
+ (['--cert-key', '-C'], {'default': SOLEDAD_SERVER_CERTKEY}),
+ ])
+
+ # soledad client command with subcommands
+ cmd_sol_client = cli.add_command(
+ 'soledad-client', help="manage soledad client")
+
+ cmd_sol_client_test = cmd_sol_client.add_command(
+ 'test', func=soledad_client_test)
+ cmd_sol_client_test.add_arguments([
+ (['--port', '-P'], {'type': int, 'default': SOLEDAD_SERVER_PORT}),
+ (['--tls', '-t'], {'action': 'store_true'}),
+ (['--uuid', '-u'], {'default': SOLEDAD_CLIENT_UUID}),
+ (['--passphrase', '-k'], {'default': SOLEDAD_CLIENT_PASS}),
+ (['--basedir', '-b'], {'default': SOLEDAD_CLIENT_BASEDIR}),
+ (['--server_url', '-s'], {'default': SOLEDAD_SERVER_URL}),
+ (['--cert-key', '-C'], {'default':
+ os.path.join(SOLEDAD_SERVER_BASEDIR, SOLEDAD_SERVER_CERTKEY)}),
+ (['--auth-token', '-a'], {'default': SOLEDAD_SERVER_AUTH_TOKEN}),
+ ])
+
+ # parse and run cli
+ args = cli.parse_args()
+ args.func(args)
+
+
+if __name__ == '__main__':
+ run_cli()
diff --git a/scripts/soledad_test_env/util.py b/scripts/soledad_test_env/util.py
new file mode 120000
index 00000000..d5266574
--- /dev/null
+++ b/scripts/soledad_test_env/util.py
@@ -0,0 +1 @@
+../db_access/util.py \ No newline at end of file