summaryrefslogtreecommitdiff
path: root/scripts/profiling/mail
diff options
context:
space:
mode:
authordrebs <drebs@leap.se>2015-06-04 11:21:40 -0300
committerdrebs <drebs@leap.se>2015-06-04 11:21:40 -0300
commit293c71080e9a21115d248e46d1a706c53cc8ee37 (patch)
tree3353672f1f770ef5f5c87ea7e3e27debe737a50b /scripts/profiling/mail
parentfa7708e256ba56cd1e9913993d68611b4ae95824 (diff)
parent9fb1c47ca7da06d6feef6846b812aec28128ed78 (diff)
Merge tag '0.7.0'
Tag version 0.7.0. Conflicts: CHANGELOG client/src/leap/soledad/client/__init__.py client/src/leap/soledad/client/sqlcipher.py client/src/leap/soledad/client/target.py server/pkg/soledad-server
Diffstat (limited to 'scripts/profiling/mail')
-rw-r--r--scripts/profiling/mail/__init__.py184
-rw-r--r--scripts/profiling/mail/couchdb.ini.template224
-rw-r--r--scripts/profiling/mail/couchdb_server.py42
-rw-r--r--scripts/profiling/mail/couchdb_wrapper.py84
-rw-r--r--scripts/profiling/mail/keys/5447A9AD50E3075ECCE432711B450E665FE63573.pub30
-rw-r--r--scripts/profiling/mail/keys/5447A9AD50E3075ECCE432711B450E665FE63573.sec57
-rw-r--r--scripts/profiling/mail/mail.py50
-rw-r--r--scripts/profiling/mail/mx.py80
-rw-r--r--scripts/profiling/mail/soledad_client.py40
-rw-r--r--scripts/profiling/mail/soledad_server.py48
-rw-r--r--scripts/profiling/mail/util.py8
11 files changed, 847 insertions, 0 deletions
diff --git a/scripts/profiling/mail/__init__.py b/scripts/profiling/mail/__init__.py
new file mode 100644
index 00000000..352faae6
--- /dev/null
+++ b/scripts/profiling/mail/__init__.py
@@ -0,0 +1,184 @@
+import threading
+import time
+import logging
+import argparse
+
+from twisted.internet import reactor
+
+from util import log
+from couchdb_server import get_couchdb_wrapper_and_u1db
+from mx import put_lots_of_messages
+from soledad_server import get_soledad_server
+from soledad_client import SoledadClient
+from mail import get_imap_server
+
+
+UUID = 'blah'
+AUTH_TOKEN = 'bleh'
+
+
+logging.basicConfig(level=logging.DEBUG)
+
+modules = [
+ 'gnupg',
+ 'leap.common',
+ 'leap.keymanager',
+ 'taskthread',
+]
+
+for module in modules:
+ logger = logging.getLogger(name=module)
+ logger.setLevel(logging.WARNING)
+
+
+class TestWatcher(threading.Thread):
+
+ def __init__(self, couchdb_wrapper, couchdb_u1db, soledad_server,
+ soledad_client, imap_service, number_of_msgs, lock):
+ threading.Thread.__init__(self)
+ self._couchdb_wrapper = couchdb_wrapper
+ self._couchdb_u1db = couchdb_u1db
+ self._soledad_server = soledad_server
+ self._soledad_client = soledad_client
+ self._imap_service = imap_service
+ self._number_of_msgs = number_of_msgs
+ self._lock = lock
+ self._mails_available_time = None
+ self._mails_available_time_lock = threading.Lock()
+ self._conditions = None
+
+ def run(self):
+ self._set_conditions()
+ while not self._test_finished():
+ time.sleep(5)
+ log("TestWatcher: Tests finished, cleaning up...",
+ line_break=False)
+ self._stop_reactor()
+ self._cleanup()
+ log("done.")
+ self._lock.release()
+
+ def _set_conditions(self):
+ self._conditions = []
+
+ # condition 1: number of received messages is equal to number of
+ # expected messages
+ def _condition1(*args):
+ msgcount = self._imap_service._inbox.getMessageCount()
+ cond = msgcount == self._number_of_msgs
+ log("[condition 1] received messages: %d (expected: %d) :: %s"
+ % (msgcount, self._number_of_msgs, cond))
+ if self.mails_available_time == None \
+ and cond:
+ with self._mails_available_time_lock:
+ self._mails_available_time = time.time()
+ return cond
+
+
+ # condition 2: number of documents in server is equal to in client
+ def _condition2(client_docs, server_docs):
+ cond = client_docs == server_docs
+ log("[condition 2] number of documents: client %d; server %d :: %s"
+ % (client_docs, server_docs, cond))
+ return cond
+
+ # condition 3: number of documents bigger than 3 x number of msgs
+ def _condition3(client_docs, *args):
+ cond = client_docs > (2 * self._number_of_msgs)
+ log("[condition 3] documents (%d) > 2 * msgs (%d) :: %s"
+ % (client_docs, self._number_of_msgs, cond))
+ return cond
+
+ # condition 4: not syncing
+ def _condition4(*args):
+ cond = not self._soledad_client.instance.syncing
+ log("[condition 4] not syncing :: %s" % cond)
+ return cond
+
+ self._conditions.append(_condition1)
+ self._conditions.append(_condition2)
+ self._conditions.append(_condition3)
+ self._conditions.append(_condition4)
+
+ def _test_finished(self):
+ client_docs = self._get_soledad_client_number_of_docs()
+ server_docs = self._get_couchdb_number_of_docs()
+ return not bool(filter(lambda x: not x(client_docs, server_docs),
+ self._conditions))
+
+ def _stop_reactor(self):
+ reactor.stop()
+
+ def _cleanup(self):
+ self._imap_service.stop()
+ self._soledad_client.close()
+ self._soledad_server.stop()
+ self._couchdb_wrapper.stop()
+
+ def _get_soledad_client_number_of_docs(self):
+ c = self._soledad_client.instance._db._db_handle.cursor()
+ c.execute('SELECT COUNT(*) FROM document WHERE content IS NOT NULL')
+ row = c.fetchone()
+ return int(row[0])
+
+ def _get_couchdb_number_of_docs(self):
+ couchdb = self._couchdb_u1db._database
+ view = couchdb.view('_all_docs', include_docs=True)
+ return len(filter(
+ lambda r: '_attachments' in r.values()[1]
+ and 'u1db_content' in r.values()[1]['_attachments'],
+ view.rows))
+
+ @property
+ def mails_available_time(self):
+ with self._mails_available_time_lock:
+ return self._mails_available_time
+
+
+if __name__ == '__main__':
+ parser = argparse.ArgumentParser()
+ parser.add_argument('number_of_msgs', help="The number of documents",
+ type=int)
+ parser.add_argument('report_file', help="The name of the report file",
+ type=str)
+ args = parser.parse_args()
+
+ # start a couchdb server
+ couchdb_wrapper, couchdb_u1db = get_couchdb_wrapper_and_u1db(
+ UUID, AUTH_TOKEN)
+
+ put_time = put_lots_of_messages(couchdb_u1db, args.number_of_msgs)
+
+ soledad_server = get_soledad_server(couchdb_wrapper.port)
+
+ soledad_client = SoledadClient(
+ uuid='blah',
+ server_url='http://127.0.0.1:%d' % soledad_server.port,
+ auth_token=AUTH_TOKEN)
+
+ imap_service = get_imap_server(
+ soledad_client.instance, UUID, 'snowden@bitmask.net', AUTH_TOKEN)
+
+ lock = threading.Lock()
+ lock.acquire()
+ test_watcher = TestWatcher(
+ couchdb_wrapper, couchdb_u1db, soledad_server, soledad_client,
+ imap_service, args.number_of_msgs, lock)
+ test_watcher.start()
+
+ # reactor.run() will block until TestWatcher stops the reactor.
+ start_time = time.time()
+ reactor.run()
+ log("Reactor stopped.")
+ end_time = time.time()
+ lock.acquire()
+ mails_available_time = test_watcher.mails_available_time - start_time
+ sync_time = end_time - start_time
+ log("Total syncing time: %f" % sync_time)
+ log("# number_of_msgs put_time mails_available_time sync_time")
+ result = "%d %f %f %f" \
+ % (args.number_of_msgs, put_time, mails_available_time,
+ sync_time)
+ log(result)
+ with open(args.report_file, 'a') as f:
+ f.write(result + "\n")
diff --git a/scripts/profiling/mail/couchdb.ini.template b/scripts/profiling/mail/couchdb.ini.template
new file mode 100644
index 00000000..1fc2205b
--- /dev/null
+++ b/scripts/profiling/mail/couchdb.ini.template
@@ -0,0 +1,224 @@
+; etc/couchdb/default.ini.tpl. Generated from default.ini.tpl.in by configure.
+
+; Upgrading CouchDB will overwrite this file.
+
+[couchdb]
+database_dir = %(tempdir)s/lib
+view_index_dir = %(tempdir)s/lib
+max_document_size = 4294967296 ; 4 GB
+os_process_timeout = 120000 ; 120 seconds. for view and external servers.
+max_dbs_open = 100
+delayed_commits = true ; set this to false to ensure an fsync before 201 Created is returned
+uri_file = %(tempdir)s/lib/couch.uri
+file_compression = snappy
+
+[database_compaction]
+; larger buffer sizes can originate smaller files
+doc_buffer_size = 524288 ; value in bytes
+checkpoint_after = 5242880 ; checkpoint after every N bytes were written
+
+[view_compaction]
+; larger buffer sizes can originate smaller files
+keyvalue_buffer_size = 2097152 ; value in bytes
+
+[httpd]
+port = 0
+bind_address = 127.0.0.1
+authentication_handlers = {couch_httpd_oauth, oauth_authentication_handler}, {couch_httpd_auth, cookie_authentication_handler}, {couch_httpd_auth, default_authentication_handler}
+default_handler = {couch_httpd_db, handle_request}
+secure_rewrites = true
+vhost_global_handlers = _utils, _uuids, _session, _oauth, _users
+allow_jsonp = false
+; Options for the MochiWeb HTTP server.
+;server_options = [{backlog, 128}, {acceptor_pool_size, 16}]
+; For more socket options, consult Erlang's module 'inet' man page.
+;socket_options = [{recbuf, 262144}, {sndbuf, 262144}, {nodelay, true}]
+log_max_chunk_size = 1000000
+
+[log]
+file = %(tempdir)s/log/couch.log
+level = info
+include_sasl = true
+
+[couch_httpd_auth]
+authentication_db = _users
+authentication_redirect = /_utils/session.html
+require_valid_user = false
+timeout = 600 ; number of seconds before automatic logout
+auth_cache_size = 50 ; size is number of cache entries
+allow_persistent_cookies = false ; set to true to allow persistent cookies
+
+[couch_httpd_oauth]
+; If set to 'true', oauth token and consumer secrets will be looked up
+; in the authentication database (_users). These secrets are stored in
+; a top level property named "oauth" in user documents. Example:
+; {
+; "_id": "org.couchdb.user:joe",
+; "type": "user",
+; "name": "joe",
+; "password_sha": "fe95df1ca59a9b567bdca5cbaf8412abd6e06121",
+; "salt": "4e170ffeb6f34daecfd814dfb4001a73"
+; "roles": ["foo", "bar"],
+; "oauth": {
+; "consumer_keys": {
+; "consumerKey1": "key1Secret",
+; "consumerKey2": "key2Secret"
+; },
+; "tokens": {
+; "token1": "token1Secret",
+; "token2": "token2Secret"
+; }
+; }
+; }
+use_users_db = false
+
+[query_servers]
+; javascript = %(tempdir)s/server/main.js
+javascript = /usr/bin/couchjs /usr/share/couchdb/server/main.js
+coffeescript = /usr/bin/couchjs /usr/share/couchdb/server/main-coffee.js
+
+
+; Changing reduce_limit to false will disable reduce_limit.
+; If you think you're hitting reduce_limit with a "good" reduce function,
+; please let us know on the mailing list so we can fine tune the heuristic.
+[query_server_config]
+reduce_limit = true
+os_process_limit = 25
+
+[daemons]
+view_manager={couch_view, start_link, []}
+external_manager={couch_external_manager, start_link, []}
+query_servers={couch_query_servers, start_link, []}
+vhosts={couch_httpd_vhost, start_link, []}
+httpd={couch_httpd, start_link, []}
+stats_aggregator={couch_stats_aggregator, start, []}
+stats_collector={couch_stats_collector, start, []}
+uuids={couch_uuids, start, []}
+auth_cache={couch_auth_cache, start_link, []}
+replication_manager={couch_replication_manager, start_link, []}
+os_daemons={couch_os_daemons, start_link, []}
+compaction_daemon={couch_compaction_daemon, start_link, []}
+
+[httpd_global_handlers]
+/ = {couch_httpd_misc_handlers, handle_welcome_req, <<"Welcome">>}
+
+_all_dbs = {couch_httpd_misc_handlers, handle_all_dbs_req}
+_active_tasks = {couch_httpd_misc_handlers, handle_task_status_req}
+_config = {couch_httpd_misc_handlers, handle_config_req}
+_replicate = {couch_httpd_replicator, handle_req}
+_uuids = {couch_httpd_misc_handlers, handle_uuids_req}
+_restart = {couch_httpd_misc_handlers, handle_restart_req}
+_stats = {couch_httpd_stats_handlers, handle_stats_req}
+_log = {couch_httpd_misc_handlers, handle_log_req}
+_session = {couch_httpd_auth, handle_session_req}
+_oauth = {couch_httpd_oauth, handle_oauth_req}
+
+[httpd_db_handlers]
+_view_cleanup = {couch_httpd_db, handle_view_cleanup_req}
+_compact = {couch_httpd_db, handle_compact_req}
+_design = {couch_httpd_db, handle_design_req}
+_temp_view = {couch_httpd_view, handle_temp_view_req}
+_changes = {couch_httpd_db, handle_changes_req}
+
+; The external module takes an optional argument allowing you to narrow it to a
+; single script. Otherwise the script name is inferred from the first path section
+; after _external's own path.
+; _mypath = {couch_httpd_external, handle_external_req, <<"mykey">>}
+; _external = {couch_httpd_external, handle_external_req}
+
+[httpd_design_handlers]
+_view = {couch_httpd_view, handle_view_req}
+_show = {couch_httpd_show, handle_doc_show_req}
+_list = {couch_httpd_show, handle_view_list_req}
+_info = {couch_httpd_db, handle_design_info_req}
+_rewrite = {couch_httpd_rewrite, handle_rewrite_req}
+_update = {couch_httpd_show, handle_doc_update_req}
+
+; enable external as an httpd handler, then link it with commands here.
+; note, this api is still under consideration.
+; [external]
+; mykey = /path/to/mycommand
+
+; Here you can setup commands for CouchDB to manage
+; while it is alive. It will attempt to keep each command
+; alive if it exits.
+; [os_daemons]
+; some_daemon_name = /path/to/script -with args
+
+
+[uuids]
+; Known algorithms:
+; random - 128 bits of random awesome
+; All awesome, all the time.
+; sequential - monotonically increasing ids with random increments
+; First 26 hex characters are random. Last 6 increment in
+; random amounts until an overflow occurs. On overflow, the
+; random prefix is regenerated and the process starts over.
+; utc_random - Time since Jan 1, 1970 UTC with microseconds
+; First 14 characters are the time in hex. Last 18 are random.
+algorithm = sequential
+
+[stats]
+; rate is in milliseconds
+rate = 1000
+; sample intervals are in seconds
+samples = [0, 60, 300, 900]
+
+[attachments]
+compression_level = 8 ; from 1 (lowest, fastest) to 9 (highest, slowest), 0 to disable compression
+compressible_types = text/*, application/javascript, application/json, application/xml
+
+[replicator]
+db = _replicator
+; Maximum replicaton retry count can be a non-negative integer or "infinity".
+max_replication_retry_count = 10
+; More worker processes can give higher network throughput but can also
+; imply more disk and network IO.
+worker_processes = 4
+; With lower batch sizes checkpoints are done more frequently. Lower batch sizes
+; also reduce the total amount of used RAM memory.
+worker_batch_size = 500
+; Maximum number of HTTP connections per replication.
+http_connections = 20
+; HTTP connection timeout per replication.
+; Even for very fast/reliable networks it might need to be increased if a remote
+; database is too busy.
+connection_timeout = 30000
+; If a request fails, the replicator will retry it up to N times.
+retries_per_request = 10
+; Some socket options that might boost performance in some scenarios:
+; {nodelay, boolean()}
+; {sndbuf, integer()}
+; {recbuf, integer()}
+; {priority, integer()}
+; See the `inet` Erlang module's man page for the full list of options.
+socket_options = [{keepalive, true}, {nodelay, false}]
+; Path to a file containing the user's certificate.
+;cert_file = /full/path/to/server_cert.pem
+; Path to file containing user's private PEM encoded key.
+;key_file = /full/path/to/server_key.pem
+; String containing the user's password. Only used if the private keyfile is password protected.
+;password = somepassword
+; Set to true to validate peer certificates.
+verify_ssl_certificates = false
+; File containing a list of peer trusted certificates (in the PEM format).
+;ssl_trusted_certificates_file = /etc/ssl/certs/ca-certificates.crt
+; Maximum peer certificate depth (must be set even if certificate validation is off).
+ssl_certificate_max_depth = 3
+
+[compaction_daemon]
+; The delay, in seconds, between each check for which database and view indexes
+; need to be compacted.
+check_interval = 300
+; If a database or view index file is smaller then this value (in bytes),
+; compaction will not happen. Very small files always have a very high
+; fragmentation therefore it's not worth to compact them.
+min_file_size = 131072
+
+[compactions]
+; List of compaction rules for the compaction daemon.
+
+
+;[admins]
+;testuser = -hashed-f50a252c12615697c5ed24ec5cd56b05d66fe91e,b05471ba260132953930cf9f97f327f5
+; pass for above user is 'testpass'
diff --git a/scripts/profiling/mail/couchdb_server.py b/scripts/profiling/mail/couchdb_server.py
new file mode 100644
index 00000000..2cf0a3fd
--- /dev/null
+++ b/scripts/profiling/mail/couchdb_server.py
@@ -0,0 +1,42 @@
+import hashlib
+import couchdb
+
+from leap.soledad.common.couch import CouchDatabase
+
+from util import log
+from couchdb_wrapper import CouchDBWrapper
+
+
+def start_couchdb_wrapper():
+ log("Starting couchdb... ", line_break=False)
+ couchdb_wrapper = CouchDBWrapper()
+ couchdb_wrapper.start()
+ log("couchdb started on port %d." % couchdb_wrapper.port)
+ return couchdb_wrapper
+
+
+def get_u1db_database(dbname, port):
+ return CouchDatabase.open_database(
+ 'http://127.0.0.1:%d/%s' % (port, dbname),
+ True,
+ ensure_ddocs=True)
+
+
+def create_tokens_database(port, uuid, token_value):
+ tokens_database = couchdb.Server(
+ 'http://127.0.0.1:%d' % port).create('tokens')
+ token = couchdb.Document()
+ token['_id'] = hashlib.sha512(token_value).hexdigest()
+ token['user_id'] = uuid
+ token['type'] = 'Token'
+ tokens_database.save(token)
+
+
+def get_couchdb_wrapper_and_u1db(uuid, token_value):
+ couchdb_wrapper = start_couchdb_wrapper()
+
+ couchdb_u1db = get_u1db_database('user-%s' % uuid, couchdb_wrapper.port)
+ get_u1db_database('shared', couchdb_wrapper.port)
+ create_tokens_database(couchdb_wrapper.port, uuid, token_value)
+
+ return couchdb_wrapper, couchdb_u1db
diff --git a/scripts/profiling/mail/couchdb_wrapper.py b/scripts/profiling/mail/couchdb_wrapper.py
new file mode 100644
index 00000000..cad1205b
--- /dev/null
+++ b/scripts/profiling/mail/couchdb_wrapper.py
@@ -0,0 +1,84 @@
+import re
+import os
+import tempfile
+import subprocess
+import time
+import shutil
+
+
+from leap.common.files import mkdir_p
+
+
+class CouchDBWrapper(object):
+ """
+ Wrapper for external CouchDB instance.
+ """
+
+ def start(self):
+ """
+ Start a CouchDB instance for a test.
+ """
+ self.tempdir = tempfile.mkdtemp(suffix='.couch.test')
+
+ path = os.path.join(os.path.dirname(__file__),
+ 'couchdb.ini.template')
+ handle = open(path)
+ conf = handle.read() % {
+ 'tempdir': self.tempdir,
+ }
+ handle.close()
+
+ confPath = os.path.join(self.tempdir, 'test.ini')
+ handle = open(confPath, 'w')
+ handle.write(conf)
+ handle.close()
+
+ # create the dirs from the template
+ mkdir_p(os.path.join(self.tempdir, 'lib'))
+ mkdir_p(os.path.join(self.tempdir, 'log'))
+ args = ['couchdb', '-n', '-a', confPath]
+ null = open('/dev/null', 'w')
+
+ self.process = subprocess.Popen(
+ args, env=None, stdout=null.fileno(), stderr=null.fileno(),
+ close_fds=True)
+ # find port
+ logPath = os.path.join(self.tempdir, 'log', 'couch.log')
+ while not os.path.exists(logPath):
+ if self.process.poll() is not None:
+ got_stdout, got_stderr = "", ""
+ if self.process.stdout is not None:
+ got_stdout = self.process.stdout.read()
+
+ if self.process.stderr is not None:
+ got_stderr = self.process.stderr.read()
+ raise Exception("""
+couchdb exited with code %d.
+stdout:
+%s
+stderr:
+%s""" % (
+ self.process.returncode, got_stdout, got_stderr))
+ time.sleep(0.01)
+ while os.stat(logPath).st_size == 0:
+ time.sleep(0.01)
+ PORT_RE = re.compile(
+ 'Apache CouchDB has started on http://127.0.0.1:(?P<port>\d+)')
+
+ handle = open(logPath)
+ m = None
+ line = handle.readline()
+ while m is None:
+ m = PORT_RE.search(line)
+ line = handle.readline()
+ handle.close()
+ self.port = int(m.group('port'))
+
+ def stop(self):
+ """
+ Terminate the CouchDB instance.
+ """
+ self.process.terminate()
+ self.process.communicate()
+ shutil.rmtree(self.tempdir)
+
diff --git a/scripts/profiling/mail/keys/5447A9AD50E3075ECCE432711B450E665FE63573.pub b/scripts/profiling/mail/keys/5447A9AD50E3075ECCE432711B450E665FE63573.pub
new file mode 100644
index 00000000..fee53b6d
--- /dev/null
+++ b/scripts/profiling/mail/keys/5447A9AD50E3075ECCE432711B450E665FE63573.pub
@@ -0,0 +1,30 @@
+-----BEGIN PGP PUBLIC KEY BLOCK-----
+Version: GnuPG v1.4.12 (GNU/Linux)
+
+mQENBFQEwmABCADC4wYD3mFt8xJtl3gjxRPEGN+FcgvzxxECIhyjYCHszrJu3f65
+/nyruriYdQLGR4YmUdERIwsZ7AMkAM1NAXe7sMq/gRPCb4PwrE7pRKzPAmaLeJMQ
+DC9CSCP+2gUmzeKHS71GkddcUI1HFr1AX9lLVW2ScvmSzOllenyUoFKRvz2uGkLG
+r5pvKsxJUHl9enpHRZV/0X5Y6PCinb4+eN2/ZTdpAywOycU+L+zflA0SOTCtf+dg
+8k839T30piuBulDLNeOX84YcyXTW7XeCeRTg/ryoFaYhbOGt68BwnP9xlpU62LW0
+8vzSZ0mLm4Ttz2uaALEoLmsa91nyLi9pLtrRABEBAAG0IEVkIFNub3dkZW4gPHNu
+b3dkZW5AYml0bWFzay5uZXQ+iQE4BBMBAgAiBQJUBMJgAhsDBgsJCAcDAgYVCAIJ
+CgsEFgIDAQIeAQIXgAAKCRAbRQ5mX+Y1cx4RCACzEiHpmknl+HnB3bHGcr8VZvU9
+hIoclVR/OBjWQFUynr66XmaMHMOLAVoZkIPnezWQe3gDY7QlFCNCfz8SC2++4WtB
+aBzal9IREkVnQBdnWalxLRviNH+zoFQ0URunBAyH4QAJRUC5tWfNj4yI6BCFPwXL
+o0CCISIN+VMRAnwjABQD840/TbcMHDqmJyk/vpPYPFQqQudN3eB2hphKUkZMistP
+O9++ui6glso+MgsbIUdqgnblM3FSrbjfLKekC+MeunFr8qRjettdaVyFD4GLg2SH
+/JpsjZKYoZStatpdJcrNjUMsGtXLxaCPl+VldNuOKIsA85TZJomMiaBDqG9YuQEN
+BFQEwmABCACrYiPXyGWHvs/aFKM63y9l6Th/+SKfzeq+ksLUI6fJIQytGORiiYZC
+1LrhOTmir+dY3IygkFlldxehGt/OMUKLB774WhBDRI43rAhImwhNutTIuUTO7DsD
+y7u83oVQH6xGZW5afs5BEU56Oa8DdUUA5gLfnpqAJG2mLB12JhClxzOYXK/VB0wJ
+QsIWl+zyN7uLQr5xZOthzvP6p7MmsAjhzU1imwyEm8s91DLhwonuqadkMGKi2qHW
+xuwxnr9aHQmobzy68/vOiBFeumr0YarirUdEDiUIti4rqy+0oteTNeMtXWo5rTtx
+xeayw+TjjaOT2fZ6CAbq0I+lOW0aJrPFABEBAAGJAR8EGAECAAkFAlQEwmACGwwA
+CgkQG0UOZl/mNXM0SggAuXzaLafCZiWx28K6mPKdgDOwTMm2rD7ukf3JiswlIyIU
+/K19BENu82iHRSu4nb9amhHOLEhaf1Ep2JTf2Trmd+/SNh0kv3dSBNjCrvrMvtcA
+qVxGc3DtRufGeRoy8ow/sEg+BCcfxJgR1efHOSQfMELDz2v8vbLbkR3Ubm7YRtKr
+Ri2HWYrAXRrwFC07yqO2zptCND/LBtnMrp08AOSSLpRWVD/Ww6IE1v1UEN53aGsm
+D+L/1XkuP4L9cqG3E2NYfsOPiblqRiKSe1adVid/rLn94u+fpE4kuvxoGKn1FJ/m
+FqU8aPtxvPbsMkSoNOalxqJGpuWRTXTLb5I+Ed2Szw==
+=yRE/
+-----END PGP PUBLIC KEY BLOCK-----
diff --git a/scripts/profiling/mail/keys/5447A9AD50E3075ECCE432711B450E665FE63573.sec b/scripts/profiling/mail/keys/5447A9AD50E3075ECCE432711B450E665FE63573.sec
new file mode 100644
index 00000000..64cb6c2a
--- /dev/null
+++ b/scripts/profiling/mail/keys/5447A9AD50E3075ECCE432711B450E665FE63573.sec
@@ -0,0 +1,57 @@
+-----BEGIN PGP PRIVATE KEY BLOCK-----
+Version: GnuPG v1.4.12 (GNU/Linux)
+
+lQOYBFQEwmABCADC4wYD3mFt8xJtl3gjxRPEGN+FcgvzxxECIhyjYCHszrJu3f65
+/nyruriYdQLGR4YmUdERIwsZ7AMkAM1NAXe7sMq/gRPCb4PwrE7pRKzPAmaLeJMQ
+DC9CSCP+2gUmzeKHS71GkddcUI1HFr1AX9lLVW2ScvmSzOllenyUoFKRvz2uGkLG
+r5pvKsxJUHl9enpHRZV/0X5Y6PCinb4+eN2/ZTdpAywOycU+L+zflA0SOTCtf+dg
+8k839T30piuBulDLNeOX84YcyXTW7XeCeRTg/ryoFaYhbOGt68BwnP9xlpU62LW0
+8vzSZ0mLm4Ttz2uaALEoLmsa91nyLi9pLtrRABEBAAEAB/0cLb885/amczMC7ZfN
+dD17aS1ImkjoIqxu5ofFh6zgFLLwHOEr+4QDQKhYQvL3wHfBKqtUEwET6nA50HPe
+4otxdAqczgkRYBZvwjpWuDtUY0B4giKhe2GJ7+xkeRmtlq9eaLEhdwzwqCUFVmBe
+4n0Ey4FgX4d+lmpY5fEFfHjz4bZpoCrNZKtiGtOqdlKXm8PnU+ek+G7DFuavJ+g5
+B4fiqkLAYFX/IDFfaTSBYzNDPbSQR5n4Q4r9PdKazPXg7bnLuxAIY4i6KEXq2YpS
+T1vLanCnBd4BEDUODCPZdc/AtbE0U+XoKTBjTvk3UEGIRJSsju8A1vWOG7UCl+0d
+UMmRBADaiQYnp9QiwPDbpqxzlWN8Ms/+tAyRnBbhghcRqtrDSke6fSJAqXzVGVmF
+FSJPMFf4mBYbr1U3YlYOJrlrb3tVhVN+7PTZDIaaENbtcsUAu7hTr7Ko6r1+WONC
+yhtrtOR9sWHVbTZ09ZvyvjHnBqZVA2PuZLUn2wrimnIJbVNdlwQA5EwgoS8UuDob
+hs6tLg29bAEDZRBHXQcDuEwdAX0KCHW0oQ0UE7exbDXXfQJSD9X3fDeqI+BdI+qQ
+Yuauz+fJxKl+qHAcy5l5NT7qomEjHCzjGUnn4NJzkn6a3T4SrBdSMFY2hL/tJN0i
+v1hXVNatjCEotqqsor+C6bf+Sl4I59cEAK+tYLTo/d+KOWtW4XbVhcYHjTBKtJGH
+p2/wNb49ibYpkgOUqW2ebiCB0Lg6QEupomcaMOJGol3v8vwBKsuwQJhWJrAXC2sT
+Bck5mI+DbabyAbYFtZgNHbcdDy62ADg1xD2Je7IjUDcpYaGB3VFhpD2rSvWDeSjR
+3jTG3PPINfoBODK0IEVkIFNub3dkZW4gPHNub3dkZW5AYml0bWFzay5uZXQ+iQE4
+BBMBAgAiBQJUBMJgAhsDBgsJCAcDAgYVCAIJCgsEFgIDAQIeAQIXgAAKCRAbRQ5m
+X+Y1cx4RCACzEiHpmknl+HnB3bHGcr8VZvU9hIoclVR/OBjWQFUynr66XmaMHMOL
+AVoZkIPnezWQe3gDY7QlFCNCfz8SC2++4WtBaBzal9IREkVnQBdnWalxLRviNH+z
+oFQ0URunBAyH4QAJRUC5tWfNj4yI6BCFPwXLo0CCISIN+VMRAnwjABQD840/TbcM
+HDqmJyk/vpPYPFQqQudN3eB2hphKUkZMistPO9++ui6glso+MgsbIUdqgnblM3FS
+rbjfLKekC+MeunFr8qRjettdaVyFD4GLg2SH/JpsjZKYoZStatpdJcrNjUMsGtXL
+xaCPl+VldNuOKIsA85TZJomMiaBDqG9YnQOYBFQEwmABCACrYiPXyGWHvs/aFKM6
+3y9l6Th/+SKfzeq+ksLUI6fJIQytGORiiYZC1LrhOTmir+dY3IygkFlldxehGt/O
+MUKLB774WhBDRI43rAhImwhNutTIuUTO7DsDy7u83oVQH6xGZW5afs5BEU56Oa8D
+dUUA5gLfnpqAJG2mLB12JhClxzOYXK/VB0wJQsIWl+zyN7uLQr5xZOthzvP6p7Mm
+sAjhzU1imwyEm8s91DLhwonuqadkMGKi2qHWxuwxnr9aHQmobzy68/vOiBFeumr0
+YarirUdEDiUIti4rqy+0oteTNeMtXWo5rTtxxeayw+TjjaOT2fZ6CAbq0I+lOW0a
+JrPFABEBAAEAB/4kyb13Z4MRyy37OkRakgdu2QvhfoVF59Hso/yxxFCTHibGLkpx
+82LQTDEsQNgkGZ2vp7IBElM6MkDuemIRtOW7icdesJh+lAPyI9moWi0DYGgmCQzh
+3PgDBdPQBDT6IL5eYw3323HjKjeeCW1NsPnFqlnyDe3MtWUbDyuozZ1ztA+Rekhb
+UhEDK8ZccEKwpzrE2H5zBZLeY0OKKROGnwd1RBVXnHMgVRF7vbellYaR4h2odxOp
+X8Ho4Xbs1h2VRNIuZwtfXxTIuTIfujlIPXMtVY40dgnEGt9PosJNr9IfGpfE3JCu
+k9PTvq37aZkQbYj52nccwKdos+sLQgqAdHhZBADHg7B5jyRRObsCUXQ+jMHXxuqT
+5l1twwOovvLC7YZoC8NAl4Bi0rh1Zj0ZEJJLFGzeiH+15C4qFTY+ospWpGu6X6g5
+I8ZWya8m2NSEWyJZNI1zKIU0iXucLevVTx+ctnovUNnb89v52/+BKr4k2iRISAzT
+7RL63aFTgnLw9GKweQQA2+eU5jcQ6LobPY/fZZImnhwLDq/OaUV+7u1RfB04GA15
+HOGQV77np/QTM6b+ezKTFhG/HMCTqxf+HPHfzohBPF9zvboLvCkqaHBDiV9qYE96
+id/el3ZeWloLcEe62sMGbv0YYmsYWgJxL8BFGw5v1QpYbfQCnXLjyG+/9f6Ygq0D
+/0W9X/NxWUyAXOv5KRy+rpkpNVxvie4tduvyVUa/9XHF7D/DMaXqkIvVX8yZUIDR
+bjuIvGZkZ9QP8zf8NKkB98zbqZi6CbNrerjrDpb7Pj7uQd3GIcjW4UmENGA6t7U9
+IWen966PAXSzh3996tRHxwXexVIEdX5n4pO39ZiodEIOPzmJAR8EGAECAAkFAlQE
+wmACGwwACgkQG0UOZl/mNXM0SggAuXzaLafCZiWx28K6mPKdgDOwTMm2rD7ukf3J
+iswlIyIU/K19BENu82iHRSu4nb9amhHOLEhaf1Ep2JTf2Trmd+/SNh0kv3dSBNjC
+rvrMvtcAqVxGc3DtRufGeRoy8ow/sEg+BCcfxJgR1efHOSQfMELDz2v8vbLbkR3U
+bm7YRtKrRi2HWYrAXRrwFC07yqO2zptCND/LBtnMrp08AOSSLpRWVD/Ww6IE1v1U
+EN53aGsmD+L/1XkuP4L9cqG3E2NYfsOPiblqRiKSe1adVid/rLn94u+fpE4kuvxo
+GKn1FJ/mFqU8aPtxvPbsMkSoNOalxqJGpuWRTXTLb5I+Ed2Szw==
+=9xZX
+-----END PGP PRIVATE KEY BLOCK-----
diff --git a/scripts/profiling/mail/mail.py b/scripts/profiling/mail/mail.py
new file mode 100644
index 00000000..8504c762
--- /dev/null
+++ b/scripts/profiling/mail/mail.py
@@ -0,0 +1,50 @@
+import os
+import threading
+
+from twisted.internet import reactor
+
+from leap.mail.imap.service import imap
+from leap.keymanager import KeyManager
+
+from util import log
+
+
+class IMAPServerThread(threading.Thread):
+ def __init__(self, imap_service):
+ threading.Thread.__init__(self)
+ self._imap_service = imap_service
+
+ def run(self):
+ self._imap_service.start_loop()
+ reactor.run()
+
+ def stop(self):
+ self._imap_service.stop()
+ reactor.stop()
+
+
+def get_imap_server(soledad, uuid, address, token):
+ log("Starting imap... ", line_break=False)
+
+ keymanager = KeyManager(address, '', soledad, token=token, uid=uuid)
+ with open(
+ os.path.join(
+ os.path.dirname(__file__),
+ 'keys/5447A9AD50E3075ECCE432711B450E665FE63573.sec'), 'r') as f:
+ pubkey, privkey = keymanager.parse_openpgp_ascii_key(f.read())
+ keymanager.put_key(privkey)
+
+ imap_service, imap_port, imap_factory = imap.run_service(
+ soledad, keymanager, userid=address, offline=False)
+
+ imap_service.start_loop()
+ log("started.")
+ return imap_service
+
+ #imap_server = IMAPServerThread(imap_service)
+ #try:
+ # imap_server.start()
+ #except Exception as e:
+ # print str(e)
+
+ #return imap_server
diff --git a/scripts/profiling/mail/mx.py b/scripts/profiling/mail/mx.py
new file mode 100644
index 00000000..b6a1e5cf
--- /dev/null
+++ b/scripts/profiling/mail/mx.py
@@ -0,0 +1,80 @@
+import datetime
+import uuid
+import json
+import timeit
+
+
+from leap.keymanager import openpgp
+from leap.soledad.common.couch import CouchDocument
+from leap.soledad.common.crypto import (
+ EncryptionSchemes,
+ ENC_JSON_KEY,
+ ENC_SCHEME_KEY,
+)
+
+
+from util import log
+
+
+message = """To: Ed Snowden <snowden@bitmask.net>
+Date: %s
+From: Glenn Greenwald <greenwald@bitmask.net>
+
+hi!
+
+"""
+
+
+def get_message():
+ return message % datetime.datetime.now().strftime("%a %b %d %H:%M:%S:%f %Y")
+
+
+def get_enc_json(pubkey, message):
+ with openpgp.TempGPGWrapper(gpgbinary='/usr/bin/gpg') as gpg:
+ gpg.import_keys(pubkey)
+ key = gpg.list_keys().pop()
+ # We don't care about the actual address, so we use a
+ # dummy one, we just care about the import of the pubkey
+ openpgp_key = openpgp._build_key_from_gpg("dummy@mail.com",
+ key, pubkey)
+ enc_json = str(gpg.encrypt(
+ json.dumps(
+ {'incoming': True, 'content': message},
+ ensure_ascii=False),
+ openpgp_key.fingerprint,
+ symmetric=False))
+ return enc_json
+
+
+def get_new_doc(enc_json):
+ doc = CouchDocument(doc_id=str(uuid.uuid4()))
+ doc.content = {
+ 'incoming': True,
+ ENC_SCHEME_KEY: EncryptionSchemes.PUBKEY,
+ ENC_JSON_KEY: enc_json
+ }
+ return doc
+
+
+def get_pubkey():
+ with open('./keys/5447A9AD50E3075ECCE432711B450E665FE63573.pub') as f:
+ return f.read()
+
+
+def put_one_message(pubkey, db):
+ enc_json = get_enc_json(pubkey, get_message())
+ doc = get_new_doc(enc_json)
+ db.put_doc(doc)
+
+
+def put_lots_of_messages(db, number):
+ log("Populating database with %d encrypted messages... "
+ % number, line_break=False)
+ pubkey = get_pubkey()
+ def _put_one_message():
+ put_one_message(pubkey, db)
+ time = timeit.timeit(_put_one_message, number=number)
+ log("done.")
+ average_time = time / number
+ log("put_one_message average time: %f" % average_time)
+ return average_time
diff --git a/scripts/profiling/mail/soledad_client.py b/scripts/profiling/mail/soledad_client.py
new file mode 100644
index 00000000..5ac8ce39
--- /dev/null
+++ b/scripts/profiling/mail/soledad_client.py
@@ -0,0 +1,40 @@
+import tempfile
+import os
+import shutil
+
+from leap.soledad.client import Soledad
+
+
+class SoledadClient(object):
+
+ def __init__(self, uuid, server_url, auth_token):
+ self._uuid = uuid
+ self._server_url = server_url
+ self._auth_token = auth_token
+ self._tempdir = None
+ self._soledad = None
+
+ @property
+ def instance(self):
+ if self._soledad is None:
+ self._soledad = self._get_soledad_client()
+ return self._soledad
+
+ def _get_soledad_client(self):
+ self._tempdir = tempfile.mkdtemp()
+ return Soledad(
+ uuid=self._uuid,
+ passphrase=u'123',
+ secrets_path=os.path.join(self._tempdir, 'secrets.json'),
+ local_db_path=os.path.join(self._tempdir, 'soledad.db'),
+ server_url=self._server_url,
+ cert_file=None,
+ auth_token=self._auth_token,
+ secret_id=None,
+ defer_encryption=True)
+
+ def close(self):
+ if self._soledad is not None:
+ self._soledad.close()
+ if self._tempdir is not None:
+ shutil.rmtree(self._tempdir)
diff --git a/scripts/profiling/mail/soledad_server.py b/scripts/profiling/mail/soledad_server.py
new file mode 100644
index 00000000..ad014456
--- /dev/null
+++ b/scripts/profiling/mail/soledad_server.py
@@ -0,0 +1,48 @@
+import threading
+
+from wsgiref.simple_server import make_server
+
+from leap.soledad.common.couch import CouchServerState
+
+from leap.soledad.server import SoledadApp
+from leap.soledad.server.gzip_middleware import GzipMiddleware
+from leap.soledad.server.auth import SoledadTokenAuthMiddleware
+
+from util import log
+
+
+class SoledadServerThread(threading.Thread):
+ def __init__(self, server):
+ threading.Thread.__init__(self)
+ self._server = server
+
+ def run(self):
+ self._server.serve_forever()
+
+ def stop(self):
+ self._server.shutdown()
+
+ @property
+ def port(self):
+ return self._server.server_port
+
+
+def make_soledad_server_thread(couch_port):
+ state = CouchServerState(
+ 'http://127.0.0.1:%d' % couch_port,
+ 'shared',
+ 'tokens')
+ application = GzipMiddleware(
+ SoledadTokenAuthMiddleware(SoledadApp(state)))
+ server = make_server('', 0, application)
+ t = SoledadServerThread(server)
+ return t
+
+
+def get_soledad_server(couchdb_port):
+ log("Starting soledad server... ", line_break=False)
+ soledad_server = make_soledad_server_thread(couchdb_port)
+ soledad_server.start()
+ log("soledad server started on port %d." % soledad_server.port)
+ return soledad_server
+
diff --git a/scripts/profiling/mail/util.py b/scripts/profiling/mail/util.py
new file mode 100644
index 00000000..86118e88
--- /dev/null
+++ b/scripts/profiling/mail/util.py
@@ -0,0 +1,8 @@
+import sys
+
+
+def log(msg, line_break=True):
+ sys.stdout.write(msg)
+ if line_break:
+ sys.stdout.write("\n")
+ sys.stdout.flush()