summaryrefslogtreecommitdiff
diff options
context:
space:
mode:
-rw-r--r--scripts/profiling/mail/__init__.py184
-rw-r--r--scripts/profiling/mail/couchdb.ini.template224
-rw-r--r--scripts/profiling/mail/couchdb_server.py42
-rw-r--r--scripts/profiling/mail/couchdb_wrapper.py84
-rw-r--r--scripts/profiling/mail/keys/5447A9AD50E3075ECCE432711B450E665FE63573.pub30
-rw-r--r--scripts/profiling/mail/keys/5447A9AD50E3075ECCE432711B450E665FE63573.sec57
-rw-r--r--scripts/profiling/mail/mail.py50
-rw-r--r--scripts/profiling/mail/mx.py80
-rw-r--r--scripts/profiling/mail/soledad_client.py40
-rw-r--r--scripts/profiling/mail/soledad_server.py48
-rw-r--r--scripts/profiling/mail/util.py8
-rw-r--r--scripts/profiling/storage/benchmark-storage.py104
-rw-r--r--scripts/profiling/storage/benchmark_storage_utils.py4
l---------scripts/profiling/storage/client_side_db.py1
-rwxr-xr-xscripts/profiling/storage/plot.py94
-rw-r--r--scripts/profiling/storage/profile-format.py29
-rwxr-xr-xscripts/profiling/storage/profile-storage.py107
l---------scripts/profiling/storage/util.py1
l---------scripts/profiling/sync/movingaverage.py1
-rw-r--r--scripts/profiling/sync/profile-decoupled.py24
20 files changed, 1212 insertions, 0 deletions
diff --git a/scripts/profiling/mail/__init__.py b/scripts/profiling/mail/__init__.py
new file mode 100644
index 00000000..352faae6
--- /dev/null
+++ b/scripts/profiling/mail/__init__.py
@@ -0,0 +1,184 @@
+import threading
+import time
+import logging
+import argparse
+
+from twisted.internet import reactor
+
+from util import log
+from couchdb_server import get_couchdb_wrapper_and_u1db
+from mx import put_lots_of_messages
+from soledad_server import get_soledad_server
+from soledad_client import SoledadClient
+from mail import get_imap_server
+
+
+UUID = 'blah'
+AUTH_TOKEN = 'bleh'
+
+
+logging.basicConfig(level=logging.DEBUG)
+
+modules = [
+ 'gnupg',
+ 'leap.common',
+ 'leap.keymanager',
+ 'taskthread',
+]
+
+for module in modules:
+ logger = logging.getLogger(name=module)
+ logger.setLevel(logging.WARNING)
+
+
+class TestWatcher(threading.Thread):
+
+ def __init__(self, couchdb_wrapper, couchdb_u1db, soledad_server,
+ soledad_client, imap_service, number_of_msgs, lock):
+ threading.Thread.__init__(self)
+ self._couchdb_wrapper = couchdb_wrapper
+ self._couchdb_u1db = couchdb_u1db
+ self._soledad_server = soledad_server
+ self._soledad_client = soledad_client
+ self._imap_service = imap_service
+ self._number_of_msgs = number_of_msgs
+ self._lock = lock
+ self._mails_available_time = None
+ self._mails_available_time_lock = threading.Lock()
+ self._conditions = None
+
+ def run(self):
+ self._set_conditions()
+ while not self._test_finished():
+ time.sleep(5)
+ log("TestWatcher: Tests finished, cleaning up...",
+ line_break=False)
+ self._stop_reactor()
+ self._cleanup()
+ log("done.")
+ self._lock.release()
+
+ def _set_conditions(self):
+ self._conditions = []
+
+ # condition 1: number of received messages is equal to number of
+ # expected messages
+ def _condition1(*args):
+ msgcount = self._imap_service._inbox.getMessageCount()
+ cond = msgcount == self._number_of_msgs
+ log("[condition 1] received messages: %d (expected: %d) :: %s"
+ % (msgcount, self._number_of_msgs, cond))
+ if self.mails_available_time == None \
+ and cond:
+ with self._mails_available_time_lock:
+ self._mails_available_time = time.time()
+ return cond
+
+
+ # condition 2: number of documents in server is equal to in client
+ def _condition2(client_docs, server_docs):
+ cond = client_docs == server_docs
+ log("[condition 2] number of documents: client %d; server %d :: %s"
+ % (client_docs, server_docs, cond))
+ return cond
+
+ # condition 3: number of documents bigger than 3 x number of msgs
+ def _condition3(client_docs, *args):
+ cond = client_docs > (2 * self._number_of_msgs)
+ log("[condition 3] documents (%d) > 2 * msgs (%d) :: %s"
+ % (client_docs, self._number_of_msgs, cond))
+ return cond
+
+ # condition 4: not syncing
+ def _condition4(*args):
+ cond = not self._soledad_client.instance.syncing
+ log("[condition 4] not syncing :: %s" % cond)
+ return cond
+
+ self._conditions.append(_condition1)
+ self._conditions.append(_condition2)
+ self._conditions.append(_condition3)
+ self._conditions.append(_condition4)
+
+ def _test_finished(self):
+ client_docs = self._get_soledad_client_number_of_docs()
+ server_docs = self._get_couchdb_number_of_docs()
+ return not bool(filter(lambda x: not x(client_docs, server_docs),
+ self._conditions))
+
+ def _stop_reactor(self):
+ reactor.stop()
+
+ def _cleanup(self):
+ self._imap_service.stop()
+ self._soledad_client.close()
+ self._soledad_server.stop()
+ self._couchdb_wrapper.stop()
+
+ def _get_soledad_client_number_of_docs(self):
+ c = self._soledad_client.instance._db._db_handle.cursor()
+ c.execute('SELECT COUNT(*) FROM document WHERE content IS NOT NULL')
+ row = c.fetchone()
+ return int(row[0])
+
+ def _get_couchdb_number_of_docs(self):
+ couchdb = self._couchdb_u1db._database
+ view = couchdb.view('_all_docs', include_docs=True)
+ return len(filter(
+ lambda r: '_attachments' in r.values()[1]
+ and 'u1db_content' in r.values()[1]['_attachments'],
+ view.rows))
+
+ @property
+ def mails_available_time(self):
+ with self._mails_available_time_lock:
+ return self._mails_available_time
+
+
+if __name__ == '__main__':
+ parser = argparse.ArgumentParser()
+ parser.add_argument('number_of_msgs', help="The number of documents",
+ type=int)
+ parser.add_argument('report_file', help="The name of the report file",
+ type=str)
+ args = parser.parse_args()
+
+ # start a couchdb server
+ couchdb_wrapper, couchdb_u1db = get_couchdb_wrapper_and_u1db(
+ UUID, AUTH_TOKEN)
+
+ put_time = put_lots_of_messages(couchdb_u1db, args.number_of_msgs)
+
+ soledad_server = get_soledad_server(couchdb_wrapper.port)
+
+ soledad_client = SoledadClient(
+ uuid='blah',
+ server_url='http://127.0.0.1:%d' % soledad_server.port,
+ auth_token=AUTH_TOKEN)
+
+ imap_service = get_imap_server(
+ soledad_client.instance, UUID, 'snowden@bitmask.net', AUTH_TOKEN)
+
+ lock = threading.Lock()
+ lock.acquire()
+ test_watcher = TestWatcher(
+ couchdb_wrapper, couchdb_u1db, soledad_server, soledad_client,
+ imap_service, args.number_of_msgs, lock)
+ test_watcher.start()
+
+ # reactor.run() will block until TestWatcher stops the reactor.
+ start_time = time.time()
+ reactor.run()
+ log("Reactor stopped.")
+ end_time = time.time()
+ lock.acquire()
+ mails_available_time = test_watcher.mails_available_time - start_time
+ sync_time = end_time - start_time
+ log("Total syncing time: %f" % sync_time)
+ log("# number_of_msgs put_time mails_available_time sync_time")
+ result = "%d %f %f %f" \
+ % (args.number_of_msgs, put_time, mails_available_time,
+ sync_time)
+ log(result)
+ with open(args.report_file, 'a') as f:
+ f.write(result + "\n")
diff --git a/scripts/profiling/mail/couchdb.ini.template b/scripts/profiling/mail/couchdb.ini.template
new file mode 100644
index 00000000..1fc2205b
--- /dev/null
+++ b/scripts/profiling/mail/couchdb.ini.template
@@ -0,0 +1,224 @@
+; etc/couchdb/default.ini.tpl. Generated from default.ini.tpl.in by configure.
+
+; Upgrading CouchDB will overwrite this file.
+
+[couchdb]
+database_dir = %(tempdir)s/lib
+view_index_dir = %(tempdir)s/lib
+max_document_size = 4294967296 ; 4 GB
+os_process_timeout = 120000 ; 120 seconds. for view and external servers.
+max_dbs_open = 100
+delayed_commits = true ; set this to false to ensure an fsync before 201 Created is returned
+uri_file = %(tempdir)s/lib/couch.uri
+file_compression = snappy
+
+[database_compaction]
+; larger buffer sizes can originate smaller files
+doc_buffer_size = 524288 ; value in bytes
+checkpoint_after = 5242880 ; checkpoint after every N bytes were written
+
+[view_compaction]
+; larger buffer sizes can originate smaller files
+keyvalue_buffer_size = 2097152 ; value in bytes
+
+[httpd]
+port = 0
+bind_address = 127.0.0.1
+authentication_handlers = {couch_httpd_oauth, oauth_authentication_handler}, {couch_httpd_auth, cookie_authentication_handler}, {couch_httpd_auth, default_authentication_handler}
+default_handler = {couch_httpd_db, handle_request}
+secure_rewrites = true
+vhost_global_handlers = _utils, _uuids, _session, _oauth, _users
+allow_jsonp = false
+; Options for the MochiWeb HTTP server.
+;server_options = [{backlog, 128}, {acceptor_pool_size, 16}]
+; For more socket options, consult Erlang's module 'inet' man page.
+;socket_options = [{recbuf, 262144}, {sndbuf, 262144}, {nodelay, true}]
+log_max_chunk_size = 1000000
+
+[log]
+file = %(tempdir)s/log/couch.log
+level = info
+include_sasl = true
+
+[couch_httpd_auth]
+authentication_db = _users
+authentication_redirect = /_utils/session.html
+require_valid_user = false
+timeout = 600 ; number of seconds before automatic logout
+auth_cache_size = 50 ; size is number of cache entries
+allow_persistent_cookies = false ; set to true to allow persistent cookies
+
+[couch_httpd_oauth]
+; If set to 'true', oauth token and consumer secrets will be looked up
+; in the authentication database (_users). These secrets are stored in
+; a top level property named "oauth" in user documents. Example:
+; {
+; "_id": "org.couchdb.user:joe",
+; "type": "user",
+; "name": "joe",
+; "password_sha": "fe95df1ca59a9b567bdca5cbaf8412abd6e06121",
+; "salt": "4e170ffeb6f34daecfd814dfb4001a73"
+; "roles": ["foo", "bar"],
+; "oauth": {
+; "consumer_keys": {
+; "consumerKey1": "key1Secret",
+; "consumerKey2": "key2Secret"
+; },
+; "tokens": {
+; "token1": "token1Secret",
+; "token2": "token2Secret"
+; }
+; }
+; }
+use_users_db = false
+
+[query_servers]
+; javascript = %(tempdir)s/server/main.js
+javascript = /usr/bin/couchjs /usr/share/couchdb/server/main.js
+coffeescript = /usr/bin/couchjs /usr/share/couchdb/server/main-coffee.js
+
+
+; Changing reduce_limit to false will disable reduce_limit.
+; If you think you're hitting reduce_limit with a "good" reduce function,
+; please let us know on the mailing list so we can fine tune the heuristic.
+[query_server_config]
+reduce_limit = true
+os_process_limit = 25
+
+[daemons]
+view_manager={couch_view, start_link, []}
+external_manager={couch_external_manager, start_link, []}
+query_servers={couch_query_servers, start_link, []}
+vhosts={couch_httpd_vhost, start_link, []}
+httpd={couch_httpd, start_link, []}
+stats_aggregator={couch_stats_aggregator, start, []}
+stats_collector={couch_stats_collector, start, []}
+uuids={couch_uuids, start, []}
+auth_cache={couch_auth_cache, start_link, []}
+replication_manager={couch_replication_manager, start_link, []}
+os_daemons={couch_os_daemons, start_link, []}
+compaction_daemon={couch_compaction_daemon, start_link, []}
+
+[httpd_global_handlers]
+/ = {couch_httpd_misc_handlers, handle_welcome_req, <<"Welcome">>}
+
+_all_dbs = {couch_httpd_misc_handlers, handle_all_dbs_req}
+_active_tasks = {couch_httpd_misc_handlers, handle_task_status_req}
+_config = {couch_httpd_misc_handlers, handle_config_req}
+_replicate = {couch_httpd_replicator, handle_req}
+_uuids = {couch_httpd_misc_handlers, handle_uuids_req}
+_restart = {couch_httpd_misc_handlers, handle_restart_req}
+_stats = {couch_httpd_stats_handlers, handle_stats_req}
+_log = {couch_httpd_misc_handlers, handle_log_req}
+_session = {couch_httpd_auth, handle_session_req}
+_oauth = {couch_httpd_oauth, handle_oauth_req}
+
+[httpd_db_handlers]
+_view_cleanup = {couch_httpd_db, handle_view_cleanup_req}
+_compact = {couch_httpd_db, handle_compact_req}
+_design = {couch_httpd_db, handle_design_req}
+_temp_view = {couch_httpd_view, handle_temp_view_req}
+_changes = {couch_httpd_db, handle_changes_req}
+
+; The external module takes an optional argument allowing you to narrow it to a
+; single script. Otherwise the script name is inferred from the first path section
+; after _external's own path.
+; _mypath = {couch_httpd_external, handle_external_req, <<"mykey">>}
+; _external = {couch_httpd_external, handle_external_req}
+
+[httpd_design_handlers]
+_view = {couch_httpd_view, handle_view_req}
+_show = {couch_httpd_show, handle_doc_show_req}
+_list = {couch_httpd_show, handle_view_list_req}
+_info = {couch_httpd_db, handle_design_info_req}
+_rewrite = {couch_httpd_rewrite, handle_rewrite_req}
+_update = {couch_httpd_show, handle_doc_update_req}
+
+; enable external as an httpd handler, then link it with commands here.
+; note, this api is still under consideration.
+; [external]
+; mykey = /path/to/mycommand
+
+; Here you can setup commands for CouchDB to manage
+; while it is alive. It will attempt to keep each command
+; alive if it exits.
+; [os_daemons]
+; some_daemon_name = /path/to/script -with args
+
+
+[uuids]
+; Known algorithms:
+; random - 128 bits of random awesome
+; All awesome, all the time.
+; sequential - monotonically increasing ids with random increments
+; First 26 hex characters are random. Last 6 increment in
+; random amounts until an overflow occurs. On overflow, the
+; random prefix is regenerated and the process starts over.
+; utc_random - Time since Jan 1, 1970 UTC with microseconds
+; First 14 characters are the time in hex. Last 18 are random.
+algorithm = sequential
+
+[stats]
+; rate is in milliseconds
+rate = 1000
+; sample intervals are in seconds
+samples = [0, 60, 300, 900]
+
+[attachments]
+compression_level = 8 ; from 1 (lowest, fastest) to 9 (highest, slowest), 0 to disable compression
+compressible_types = text/*, application/javascript, application/json, application/xml
+
+[replicator]
+db = _replicator
+; Maximum replicaton retry count can be a non-negative integer or "infinity".
+max_replication_retry_count = 10
+; More worker processes can give higher network throughput but can also
+; imply more disk and network IO.
+worker_processes = 4
+; With lower batch sizes checkpoints are done more frequently. Lower batch sizes
+; also reduce the total amount of used RAM memory.
+worker_batch_size = 500
+; Maximum number of HTTP connections per replication.
+http_connections = 20
+; HTTP connection timeout per replication.
+; Even for very fast/reliable networks it might need to be increased if a remote
+; database is too busy.
+connection_timeout = 30000
+; If a request fails, the replicator will retry it up to N times.
+retries_per_request = 10
+; Some socket options that might boost performance in some scenarios:
+; {nodelay, boolean()}
+; {sndbuf, integer()}
+; {recbuf, integer()}
+; {priority, integer()}
+; See the `inet` Erlang module's man page for the full list of options.
+socket_options = [{keepalive, true}, {nodelay, false}]
+; Path to a file containing the user's certificate.
+;cert_file = /full/path/to/server_cert.pem
+; Path to file containing user's private PEM encoded key.
+;key_file = /full/path/to/server_key.pem
+; String containing the user's password. Only used if the private keyfile is password protected.
+;password = somepassword
+; Set to true to validate peer certificates.
+verify_ssl_certificates = false
+; File containing a list of peer trusted certificates (in the PEM format).
+;ssl_trusted_certificates_file = /etc/ssl/certs/ca-certificates.crt
+; Maximum peer certificate depth (must be set even if certificate validation is off).
+ssl_certificate_max_depth = 3
+
+[compaction_daemon]
+; The delay, in seconds, between each check for which database and view indexes
+; need to be compacted.
+check_interval = 300
+; If a database or view index file is smaller then this value (in bytes),
+; compaction will not happen. Very small files always have a very high
+; fragmentation therefore it's not worth to compact them.
+min_file_size = 131072
+
+[compactions]
+; List of compaction rules for the compaction daemon.
+
+
+;[admins]
+;testuser = -hashed-f50a252c12615697c5ed24ec5cd56b05d66fe91e,b05471ba260132953930cf9f97f327f5
+; pass for above user is 'testpass'
diff --git a/scripts/profiling/mail/couchdb_server.py b/scripts/profiling/mail/couchdb_server.py
new file mode 100644
index 00000000..2cf0a3fd
--- /dev/null
+++ b/scripts/profiling/mail/couchdb_server.py
@@ -0,0 +1,42 @@
+import hashlib
+import couchdb
+
+from leap.soledad.common.couch import CouchDatabase
+
+from util import log
+from couchdb_wrapper import CouchDBWrapper
+
+
+def start_couchdb_wrapper():
+ log("Starting couchdb... ", line_break=False)
+ couchdb_wrapper = CouchDBWrapper()
+ couchdb_wrapper.start()
+ log("couchdb started on port %d." % couchdb_wrapper.port)
+ return couchdb_wrapper
+
+
+def get_u1db_database(dbname, port):
+ return CouchDatabase.open_database(
+ 'http://127.0.0.1:%d/%s' % (port, dbname),
+ True,
+ ensure_ddocs=True)
+
+
+def create_tokens_database(port, uuid, token_value):
+ tokens_database = couchdb.Server(
+ 'http://127.0.0.1:%d' % port).create('tokens')
+ token = couchdb.Document()
+ token['_id'] = hashlib.sha512(token_value).hexdigest()
+ token['user_id'] = uuid
+ token['type'] = 'Token'
+ tokens_database.save(token)
+
+
+def get_couchdb_wrapper_and_u1db(uuid, token_value):
+ couchdb_wrapper = start_couchdb_wrapper()
+
+ couchdb_u1db = get_u1db_database('user-%s' % uuid, couchdb_wrapper.port)
+ get_u1db_database('shared', couchdb_wrapper.port)
+ create_tokens_database(couchdb_wrapper.port, uuid, token_value)
+
+ return couchdb_wrapper, couchdb_u1db
diff --git a/scripts/profiling/mail/couchdb_wrapper.py b/scripts/profiling/mail/couchdb_wrapper.py
new file mode 100644
index 00000000..cad1205b
--- /dev/null
+++ b/scripts/profiling/mail/couchdb_wrapper.py
@@ -0,0 +1,84 @@
+import re
+import os
+import tempfile
+import subprocess
+import time
+import shutil
+
+
+from leap.common.files import mkdir_p
+
+
+class CouchDBWrapper(object):
+ """
+ Wrapper for external CouchDB instance.
+ """
+
+ def start(self):
+ """
+ Start a CouchDB instance for a test.
+ """
+ self.tempdir = tempfile.mkdtemp(suffix='.couch.test')
+
+ path = os.path.join(os.path.dirname(__file__),
+ 'couchdb.ini.template')
+ handle = open(path)
+ conf = handle.read() % {
+ 'tempdir': self.tempdir,
+ }
+ handle.close()
+
+ confPath = os.path.join(self.tempdir, 'test.ini')
+ handle = open(confPath, 'w')
+ handle.write(conf)
+ handle.close()
+
+ # create the dirs from the template
+ mkdir_p(os.path.join(self.tempdir, 'lib'))
+ mkdir_p(os.path.join(self.tempdir, 'log'))
+ args = ['couchdb', '-n', '-a', confPath]
+ null = open('/dev/null', 'w')
+
+ self.process = subprocess.Popen(
+ args, env=None, stdout=null.fileno(), stderr=null.fileno(),
+ close_fds=True)
+ # find port
+ logPath = os.path.join(self.tempdir, 'log', 'couch.log')
+ while not os.path.exists(logPath):
+ if self.process.poll() is not None:
+ got_stdout, got_stderr = "", ""
+ if self.process.stdout is not None:
+ got_stdout = self.process.stdout.read()
+
+ if self.process.stderr is not None:
+ got_stderr = self.process.stderr.read()
+ raise Exception("""
+couchdb exited with code %d.
+stdout:
+%s
+stderr:
+%s""" % (
+ self.process.returncode, got_stdout, got_stderr))
+ time.sleep(0.01)
+ while os.stat(logPath).st_size == 0:
+ time.sleep(0.01)
+ PORT_RE = re.compile(
+ 'Apache CouchDB has started on http://127.0.0.1:(?P<port>\d+)')
+
+ handle = open(logPath)
+ m = None
+ line = handle.readline()
+ while m is None:
+ m = PORT_RE.search(line)
+ line = handle.readline()
+ handle.close()
+ self.port = int(m.group('port'))
+
+ def stop(self):
+ """
+ Terminate the CouchDB instance.
+ """
+ self.process.terminate()
+ self.process.communicate()
+ shutil.rmtree(self.tempdir)
+
diff --git a/scripts/profiling/mail/keys/5447A9AD50E3075ECCE432711B450E665FE63573.pub b/scripts/profiling/mail/keys/5447A9AD50E3075ECCE432711B450E665FE63573.pub
new file mode 100644
index 00000000..fee53b6d
--- /dev/null
+++ b/scripts/profiling/mail/keys/5447A9AD50E3075ECCE432711B450E665FE63573.pub
@@ -0,0 +1,30 @@
+-----BEGIN PGP PUBLIC KEY BLOCK-----
+Version: GnuPG v1.4.12 (GNU/Linux)
+
+mQENBFQEwmABCADC4wYD3mFt8xJtl3gjxRPEGN+FcgvzxxECIhyjYCHszrJu3f65
+/nyruriYdQLGR4YmUdERIwsZ7AMkAM1NAXe7sMq/gRPCb4PwrE7pRKzPAmaLeJMQ
+DC9CSCP+2gUmzeKHS71GkddcUI1HFr1AX9lLVW2ScvmSzOllenyUoFKRvz2uGkLG
+r5pvKsxJUHl9enpHRZV/0X5Y6PCinb4+eN2/ZTdpAywOycU+L+zflA0SOTCtf+dg
+8k839T30piuBulDLNeOX84YcyXTW7XeCeRTg/ryoFaYhbOGt68BwnP9xlpU62LW0
+8vzSZ0mLm4Ttz2uaALEoLmsa91nyLi9pLtrRABEBAAG0IEVkIFNub3dkZW4gPHNu
+b3dkZW5AYml0bWFzay5uZXQ+iQE4BBMBAgAiBQJUBMJgAhsDBgsJCAcDAgYVCAIJ
+CgsEFgIDAQIeAQIXgAAKCRAbRQ5mX+Y1cx4RCACzEiHpmknl+HnB3bHGcr8VZvU9
+hIoclVR/OBjWQFUynr66XmaMHMOLAVoZkIPnezWQe3gDY7QlFCNCfz8SC2++4WtB
+aBzal9IREkVnQBdnWalxLRviNH+zoFQ0URunBAyH4QAJRUC5tWfNj4yI6BCFPwXL
+o0CCISIN+VMRAnwjABQD840/TbcMHDqmJyk/vpPYPFQqQudN3eB2hphKUkZMistP
+O9++ui6glso+MgsbIUdqgnblM3FSrbjfLKekC+MeunFr8qRjettdaVyFD4GLg2SH
+/JpsjZKYoZStatpdJcrNjUMsGtXLxaCPl+VldNuOKIsA85TZJomMiaBDqG9YuQEN
+BFQEwmABCACrYiPXyGWHvs/aFKM63y9l6Th/+SKfzeq+ksLUI6fJIQytGORiiYZC
+1LrhOTmir+dY3IygkFlldxehGt/OMUKLB774WhBDRI43rAhImwhNutTIuUTO7DsD
+y7u83oVQH6xGZW5afs5BEU56Oa8DdUUA5gLfnpqAJG2mLB12JhClxzOYXK/VB0wJ
+QsIWl+zyN7uLQr5xZOthzvP6p7MmsAjhzU1imwyEm8s91DLhwonuqadkMGKi2qHW
+xuwxnr9aHQmobzy68/vOiBFeumr0YarirUdEDiUIti4rqy+0oteTNeMtXWo5rTtx
+xeayw+TjjaOT2fZ6CAbq0I+lOW0aJrPFABEBAAGJAR8EGAECAAkFAlQEwmACGwwA
+CgkQG0UOZl/mNXM0SggAuXzaLafCZiWx28K6mPKdgDOwTMm2rD7ukf3JiswlIyIU
+/K19BENu82iHRSu4nb9amhHOLEhaf1Ep2JTf2Trmd+/SNh0kv3dSBNjCrvrMvtcA
+qVxGc3DtRufGeRoy8ow/sEg+BCcfxJgR1efHOSQfMELDz2v8vbLbkR3Ubm7YRtKr
+Ri2HWYrAXRrwFC07yqO2zptCND/LBtnMrp08AOSSLpRWVD/Ww6IE1v1UEN53aGsm
+D+L/1XkuP4L9cqG3E2NYfsOPiblqRiKSe1adVid/rLn94u+fpE4kuvxoGKn1FJ/m
+FqU8aPtxvPbsMkSoNOalxqJGpuWRTXTLb5I+Ed2Szw==
+=yRE/
+-----END PGP PUBLIC KEY BLOCK-----
diff --git a/scripts/profiling/mail/keys/5447A9AD50E3075ECCE432711B450E665FE63573.sec b/scripts/profiling/mail/keys/5447A9AD50E3075ECCE432711B450E665FE63573.sec
new file mode 100644
index 00000000..64cb6c2a
--- /dev/null
+++ b/scripts/profiling/mail/keys/5447A9AD50E3075ECCE432711B450E665FE63573.sec
@@ -0,0 +1,57 @@
+-----BEGIN PGP PRIVATE KEY BLOCK-----
+Version: GnuPG v1.4.12 (GNU/Linux)
+
+lQOYBFQEwmABCADC4wYD3mFt8xJtl3gjxRPEGN+FcgvzxxECIhyjYCHszrJu3f65
+/nyruriYdQLGR4YmUdERIwsZ7AMkAM1NAXe7sMq/gRPCb4PwrE7pRKzPAmaLeJMQ
+DC9CSCP+2gUmzeKHS71GkddcUI1HFr1AX9lLVW2ScvmSzOllenyUoFKRvz2uGkLG
+r5pvKsxJUHl9enpHRZV/0X5Y6PCinb4+eN2/ZTdpAywOycU+L+zflA0SOTCtf+dg
+8k839T30piuBulDLNeOX84YcyXTW7XeCeRTg/ryoFaYhbOGt68BwnP9xlpU62LW0
+8vzSZ0mLm4Ttz2uaALEoLmsa91nyLi9pLtrRABEBAAEAB/0cLb885/amczMC7ZfN
+dD17aS1ImkjoIqxu5ofFh6zgFLLwHOEr+4QDQKhYQvL3wHfBKqtUEwET6nA50HPe
+4otxdAqczgkRYBZvwjpWuDtUY0B4giKhe2GJ7+xkeRmtlq9eaLEhdwzwqCUFVmBe
+4n0Ey4FgX4d+lmpY5fEFfHjz4bZpoCrNZKtiGtOqdlKXm8PnU+ek+G7DFuavJ+g5
+B4fiqkLAYFX/IDFfaTSBYzNDPbSQR5n4Q4r9PdKazPXg7bnLuxAIY4i6KEXq2YpS
+T1vLanCnBd4BEDUODCPZdc/AtbE0U+XoKTBjTvk3UEGIRJSsju8A1vWOG7UCl+0d
+UMmRBADaiQYnp9QiwPDbpqxzlWN8Ms/+tAyRnBbhghcRqtrDSke6fSJAqXzVGVmF
+FSJPMFf4mBYbr1U3YlYOJrlrb3tVhVN+7PTZDIaaENbtcsUAu7hTr7Ko6r1+WONC
+yhtrtOR9sWHVbTZ09ZvyvjHnBqZVA2PuZLUn2wrimnIJbVNdlwQA5EwgoS8UuDob
+hs6tLg29bAEDZRBHXQcDuEwdAX0KCHW0oQ0UE7exbDXXfQJSD9X3fDeqI+BdI+qQ
+Yuauz+fJxKl+qHAcy5l5NT7qomEjHCzjGUnn4NJzkn6a3T4SrBdSMFY2hL/tJN0i
+v1hXVNatjCEotqqsor+C6bf+Sl4I59cEAK+tYLTo/d+KOWtW4XbVhcYHjTBKtJGH
+p2/wNb49ibYpkgOUqW2ebiCB0Lg6QEupomcaMOJGol3v8vwBKsuwQJhWJrAXC2sT
+Bck5mI+DbabyAbYFtZgNHbcdDy62ADg1xD2Je7IjUDcpYaGB3VFhpD2rSvWDeSjR
+3jTG3PPINfoBODK0IEVkIFNub3dkZW4gPHNub3dkZW5AYml0bWFzay5uZXQ+iQE4
+BBMBAgAiBQJUBMJgAhsDBgsJCAcDAgYVCAIJCgsEFgIDAQIeAQIXgAAKCRAbRQ5m
+X+Y1cx4RCACzEiHpmknl+HnB3bHGcr8VZvU9hIoclVR/OBjWQFUynr66XmaMHMOL
+AVoZkIPnezWQe3gDY7QlFCNCfz8SC2++4WtBaBzal9IREkVnQBdnWalxLRviNH+z
+oFQ0URunBAyH4QAJRUC5tWfNj4yI6BCFPwXLo0CCISIN+VMRAnwjABQD840/TbcM
+HDqmJyk/vpPYPFQqQudN3eB2hphKUkZMistPO9++ui6glso+MgsbIUdqgnblM3FS
+rbjfLKekC+MeunFr8qRjettdaVyFD4GLg2SH/JpsjZKYoZStatpdJcrNjUMsGtXL
+xaCPl+VldNuOKIsA85TZJomMiaBDqG9YnQOYBFQEwmABCACrYiPXyGWHvs/aFKM6
+3y9l6Th/+SKfzeq+ksLUI6fJIQytGORiiYZC1LrhOTmir+dY3IygkFlldxehGt/O
+MUKLB774WhBDRI43rAhImwhNutTIuUTO7DsDy7u83oVQH6xGZW5afs5BEU56Oa8D
+dUUA5gLfnpqAJG2mLB12JhClxzOYXK/VB0wJQsIWl+zyN7uLQr5xZOthzvP6p7Mm
+sAjhzU1imwyEm8s91DLhwonuqadkMGKi2qHWxuwxnr9aHQmobzy68/vOiBFeumr0
+YarirUdEDiUIti4rqy+0oteTNeMtXWo5rTtxxeayw+TjjaOT2fZ6CAbq0I+lOW0a
+JrPFABEBAAEAB/4kyb13Z4MRyy37OkRakgdu2QvhfoVF59Hso/yxxFCTHibGLkpx
+82LQTDEsQNgkGZ2vp7IBElM6MkDuemIRtOW7icdesJh+lAPyI9moWi0DYGgmCQzh
+3PgDBdPQBDT6IL5eYw3323HjKjeeCW1NsPnFqlnyDe3MtWUbDyuozZ1ztA+Rekhb
+UhEDK8ZccEKwpzrE2H5zBZLeY0OKKROGnwd1RBVXnHMgVRF7vbellYaR4h2odxOp
+X8Ho4Xbs1h2VRNIuZwtfXxTIuTIfujlIPXMtVY40dgnEGt9PosJNr9IfGpfE3JCu
+k9PTvq37aZkQbYj52nccwKdos+sLQgqAdHhZBADHg7B5jyRRObsCUXQ+jMHXxuqT
+5l1twwOovvLC7YZoC8NAl4Bi0rh1Zj0ZEJJLFGzeiH+15C4qFTY+ospWpGu6X6g5
+I8ZWya8m2NSEWyJZNI1zKIU0iXucLevVTx+ctnovUNnb89v52/+BKr4k2iRISAzT
+7RL63aFTgnLw9GKweQQA2+eU5jcQ6LobPY/fZZImnhwLDq/OaUV+7u1RfB04GA15
+HOGQV77np/QTM6b+ezKTFhG/HMCTqxf+HPHfzohBPF9zvboLvCkqaHBDiV9qYE96
+id/el3ZeWloLcEe62sMGbv0YYmsYWgJxL8BFGw5v1QpYbfQCnXLjyG+/9f6Ygq0D
+/0W9X/NxWUyAXOv5KRy+rpkpNVxvie4tduvyVUa/9XHF7D/DMaXqkIvVX8yZUIDR
+bjuIvGZkZ9QP8zf8NKkB98zbqZi6CbNrerjrDpb7Pj7uQd3GIcjW4UmENGA6t7U9
+IWen966PAXSzh3996tRHxwXexVIEdX5n4pO39ZiodEIOPzmJAR8EGAECAAkFAlQE
+wmACGwwACgkQG0UOZl/mNXM0SggAuXzaLafCZiWx28K6mPKdgDOwTMm2rD7ukf3J
+iswlIyIU/K19BENu82iHRSu4nb9amhHOLEhaf1Ep2JTf2Trmd+/SNh0kv3dSBNjC
+rvrMvtcAqVxGc3DtRufGeRoy8ow/sEg+BCcfxJgR1efHOSQfMELDz2v8vbLbkR3U
+bm7YRtKrRi2HWYrAXRrwFC07yqO2zptCND/LBtnMrp08AOSSLpRWVD/Ww6IE1v1U
+EN53aGsmD+L/1XkuP4L9cqG3E2NYfsOPiblqRiKSe1adVid/rLn94u+fpE4kuvxo
+GKn1FJ/mFqU8aPtxvPbsMkSoNOalxqJGpuWRTXTLb5I+Ed2Szw==
+=9xZX
+-----END PGP PRIVATE KEY BLOCK-----
diff --git a/scripts/profiling/mail/mail.py b/scripts/profiling/mail/mail.py
new file mode 100644
index 00000000..8504c762
--- /dev/null
+++ b/scripts/profiling/mail/mail.py
@@ -0,0 +1,50 @@
+import os
+import threading
+
+from twisted.internet import reactor
+
+from leap.mail.imap.service import imap
+from leap.keymanager import KeyManager
+
+from util import log
+
+
+class IMAPServerThread(threading.Thread):
+ def __init__(self, imap_service):
+ threading.Thread.__init__(self)
+ self._imap_service = imap_service
+
+ def run(self):
+ self._imap_service.start_loop()
+ reactor.run()
+
+ def stop(self):
+ self._imap_service.stop()
+ reactor.stop()
+
+
+def get_imap_server(soledad, uuid, address, token):
+ log("Starting imap... ", line_break=False)
+
+ keymanager = KeyManager(address, '', soledad, token=token, uid=uuid)
+ with open(
+ os.path.join(
+ os.path.dirname(__file__),
+ 'keys/5447A9AD50E3075ECCE432711B450E665FE63573.sec'), 'r') as f:
+ pubkey, privkey = keymanager.parse_openpgp_ascii_key(f.read())
+ keymanager.put_key(privkey)
+
+ imap_service, imap_port, imap_factory = imap.run_service(
+ soledad, keymanager, userid=address, offline=False)
+
+ imap_service.start_loop()
+ log("started.")
+ return imap_service
+
+ #imap_server = IMAPServerThread(imap_service)
+ #try:
+ # imap_server.start()
+ #except Exception as e:
+ # print str(e)
+
+ #return imap_server
diff --git a/scripts/profiling/mail/mx.py b/scripts/profiling/mail/mx.py
new file mode 100644
index 00000000..b6a1e5cf
--- /dev/null
+++ b/scripts/profiling/mail/mx.py
@@ -0,0 +1,80 @@
+import datetime
+import uuid
+import json
+import timeit
+
+
+from leap.keymanager import openpgp
+from leap.soledad.common.couch import CouchDocument
+from leap.soledad.common.crypto import (
+ EncryptionSchemes,
+ ENC_JSON_KEY,
+ ENC_SCHEME_KEY,
+)
+
+
+from util import log
+
+
+message = """To: Ed Snowden <snowden@bitmask.net>
+Date: %s
+From: Glenn Greenwald <greenwald@bitmask.net>
+
+hi!
+
+"""
+
+
+def get_message():
+ return message % datetime.datetime.now().strftime("%a %b %d %H:%M:%S:%f %Y")
+
+
+def get_enc_json(pubkey, message):
+ with openpgp.TempGPGWrapper(gpgbinary='/usr/bin/gpg') as gpg:
+ gpg.import_keys(pubkey)
+ key = gpg.list_keys().pop()
+ # We don't care about the actual address, so we use a
+ # dummy one, we just care about the import of the pubkey
+ openpgp_key = openpgp._build_key_from_gpg("dummy@mail.com",
+ key, pubkey)
+ enc_json = str(gpg.encrypt(
+ json.dumps(
+ {'incoming': True, 'content': message},
+ ensure_ascii=False),
+ openpgp_key.fingerprint,
+ symmetric=False))
+ return enc_json
+
+
+def get_new_doc(enc_json):
+ doc = CouchDocument(doc_id=str(uuid.uuid4()))
+ doc.content = {
+ 'incoming': True,
+ ENC_SCHEME_KEY: EncryptionSchemes.PUBKEY,
+ ENC_JSON_KEY: enc_json
+ }
+ return doc
+
+
+def get_pubkey():
+ with open('./keys/5447A9AD50E3075ECCE432711B450E665FE63573.pub') as f:
+ return f.read()
+
+
+def put_one_message(pubkey, db):
+ enc_json = get_enc_json(pubkey, get_message())
+ doc = get_new_doc(enc_json)
+ db.put_doc(doc)
+
+
+def put_lots_of_messages(db, number):
+ log("Populating database with %d encrypted messages... "
+ % number, line_break=False)
+ pubkey = get_pubkey()
+ def _put_one_message():
+ put_one_message(pubkey, db)
+ time = timeit.timeit(_put_one_message, number=number)
+ log("done.")
+ average_time = time / number
+ log("put_one_message average time: %f" % average_time)
+ return average_time
diff --git a/scripts/profiling/mail/soledad_client.py b/scripts/profiling/mail/soledad_client.py
new file mode 100644
index 00000000..5ac8ce39
--- /dev/null
+++ b/scripts/profiling/mail/soledad_client.py
@@ -0,0 +1,40 @@
+import tempfile
+import os
+import shutil
+
+from leap.soledad.client import Soledad
+
+
+class SoledadClient(object):
+
+ def __init__(self, uuid, server_url, auth_token):
+ self._uuid = uuid
+ self._server_url = server_url
+ self._auth_token = auth_token
+ self._tempdir = None
+ self._soledad = None
+
+ @property
+ def instance(self):
+ if self._soledad is None:
+ self._soledad = self._get_soledad_client()
+ return self._soledad
+
+ def _get_soledad_client(self):
+ self._tempdir = tempfile.mkdtemp()
+ return Soledad(
+ uuid=self._uuid,
+ passphrase=u'123',
+ secrets_path=os.path.join(self._tempdir, 'secrets.json'),
+ local_db_path=os.path.join(self._tempdir, 'soledad.db'),
+ server_url=self._server_url,
+ cert_file=None,
+ auth_token=self._auth_token,
+ secret_id=None,
+ defer_encryption=True)
+
+ def close(self):
+ if self._soledad is not None:
+ self._soledad.close()
+ if self._tempdir is not None:
+ shutil.rmtree(self._tempdir)
diff --git a/scripts/profiling/mail/soledad_server.py b/scripts/profiling/mail/soledad_server.py
new file mode 100644
index 00000000..ad014456
--- /dev/null
+++ b/scripts/profiling/mail/soledad_server.py
@@ -0,0 +1,48 @@
+import threading
+
+from wsgiref.simple_server import make_server
+
+from leap.soledad.common.couch import CouchServerState
+
+from leap.soledad.server import SoledadApp
+from leap.soledad.server.gzip_middleware import GzipMiddleware
+from leap.soledad.server.auth import SoledadTokenAuthMiddleware
+
+from util import log
+
+
+class SoledadServerThread(threading.Thread):
+ def __init__(self, server):
+ threading.Thread.__init__(self)
+ self._server = server
+
+ def run(self):
+ self._server.serve_forever()
+
+ def stop(self):
+ self._server.shutdown()
+
+ @property
+ def port(self):
+ return self._server.server_port
+
+
+def make_soledad_server_thread(couch_port):
+ state = CouchServerState(
+ 'http://127.0.0.1:%d' % couch_port,
+ 'shared',
+ 'tokens')
+ application = GzipMiddleware(
+ SoledadTokenAuthMiddleware(SoledadApp(state)))
+ server = make_server('', 0, application)
+ t = SoledadServerThread(server)
+ return t
+
+
+def get_soledad_server(couchdb_port):
+ log("Starting soledad server... ", line_break=False)
+ soledad_server = make_soledad_server_thread(couchdb_port)
+ soledad_server.start()
+ log("soledad server started on port %d." % soledad_server.port)
+ return soledad_server
+
diff --git a/scripts/profiling/mail/util.py b/scripts/profiling/mail/util.py
new file mode 100644
index 00000000..86118e88
--- /dev/null
+++ b/scripts/profiling/mail/util.py
@@ -0,0 +1,8 @@
+import sys
+
+
+def log(msg, line_break=True):
+ sys.stdout.write(msg)
+ if line_break:
+ sys.stdout.write("\n")
+ sys.stdout.flush()
diff --git a/scripts/profiling/storage/benchmark-storage.py b/scripts/profiling/storage/benchmark-storage.py
new file mode 100644
index 00000000..79ee3270
--- /dev/null
+++ b/scripts/profiling/storage/benchmark-storage.py
@@ -0,0 +1,104 @@
+#!/usr/bin/python
+
+# scenarios:
+# 1. soledad instantiation time.
+# a. for unexisting db.
+# b. for existing db.
+# 2. soledad doc storage/retrieval.
+# a. 1 KB document.
+# b 10 KB.
+# c. 100 KB.
+# d. 1 MB.
+
+
+import logging
+import getpass
+import tempfile
+import argparse
+import shutil
+import timeit
+
+
+from util import ValidateUserHandle
+
+# benchmarking args
+REPEAT_NUMBER = 1000
+DOC_SIZE = 1024
+
+
+# create a logger
+logger = logging.getLogger(__name__)
+LOG_FORMAT = '%(asctime)s %(message)s'
+logging.basicConfig(format=LOG_FORMAT, level=logging.INFO)
+
+
+def parse_args():
+ # parse command line
+ parser = argparse.ArgumentParser()
+ parser.add_argument(
+ 'user@provider', action=ValidateUserHandle, help='the user handle')
+ parser.add_argument(
+ '-b', dest='basedir', required=False, default=None,
+ help='soledad base directory')
+ parser.add_argument(
+ '-p', dest='passphrase', required=False, default=None,
+ help='the user passphrase')
+ parser.add_argument(
+ '-l', dest='logfile', required=False, default='/tmp/benchhmark-storage.log',
+ help='the file to which write the benchmark logs')
+ args = parser.parse_args()
+ # get the password
+ passphrase = args.passphrase
+ if passphrase is None:
+ passphrase = getpass.getpass(
+ 'Password for %s@%s: ' % (args.username, args.provider))
+ # get the basedir
+ basedir = args.basedir
+ if basedir is None:
+ basedir = tempfile.mkdtemp()
+ logger.info('Using %s as base directory.' % basedir)
+
+ return args.username, args.provider, passphrase, basedir, args.logfile
+
+
+if __name__ == '__main__':
+ username, provider, passphrase, basedir, logfile = parse_args()
+ create_results = []
+ getall_results = []
+ for i in [1, 200, 400, 600, 800, 1000]:
+ tempdir = tempfile.mkdtemp(dir=basedir)
+ setup_common = """
+import os
+#from benchmark_storage_utils import benchmark_fun
+#from benchmark_storage_utils import get_soledad_instance
+from client_side_db import get_soledad_instance
+sol = get_soledad_instance('%s', '%s', '%s', '%s')
+ """ % (username, provider, passphrase, tempdir)
+
+ setup_create = setup_common + """
+content = {'data': os.urandom(%d/2).encode('hex')}
+""" % (DOC_SIZE * i)
+ time = timeit.timeit(
+ 'sol.create_doc(content);',
+ setup=setup_create, number=REPEAT_NUMBER)
+ create_results.append((DOC_SIZE*i, time))
+ print "CREATE: %d %f" % (DOC_SIZE*i, time)
+
+ setup_get = setup_common + """
+doc_ids = [doc.doc_id for doc in sol.get_all_docs()[1]]
+"""
+
+ time = timeit.timeit(
+ "[sol.get_doc(doc_id) for doc_id in doc_ids]",
+ setup=setup_get, number=1)
+ getall_results.append((DOC_SIZE*i, time))
+ print "GET_ALL: %d %f" % (DOC_SIZE*i, time)
+ shutil.rmtree(tempdir)
+ print "# size, time for creation of %d docs" % REPEAT_NUMBER
+ for size, time in create_results:
+ print size, time
+ print "# size, time for retrieval of %d docs" % REPEAT_NUMBER
+ for size, time in getall_results:
+ print size, time
+ shutil.rmtree(basedir)
+
diff --git a/scripts/profiling/storage/benchmark_storage_utils.py b/scripts/profiling/storage/benchmark_storage_utils.py
new file mode 100644
index 00000000..fa8bb658
--- /dev/null
+++ b/scripts/profiling/storage/benchmark_storage_utils.py
@@ -0,0 +1,4 @@
+from client_side_db import get_soledad_instance
+
+def benchmark_fun(sol, content):
+ sol.create_doc(content)
diff --git a/scripts/profiling/storage/client_side_db.py b/scripts/profiling/storage/client_side_db.py
new file mode 120000
index 00000000..9e49a7f0
--- /dev/null
+++ b/scripts/profiling/storage/client_side_db.py
@@ -0,0 +1 @@
+../../db_access/client_side_db.py \ No newline at end of file
diff --git a/scripts/profiling/storage/plot.py b/scripts/profiling/storage/plot.py
new file mode 100755
index 00000000..280b9375
--- /dev/null
+++ b/scripts/profiling/storage/plot.py
@@ -0,0 +1,94 @@
+#!/usr/bin/python
+
+
+# Create a plot of the results of running the ./benchmark-storage.py script.
+
+
+import argparse
+from matplotlib import pyplot as plt
+
+from sets import Set
+
+
+def plot(filename, subtitle=''):
+
+ # config the plot
+ plt.xlabel('doc size (KB)')
+ plt.ylabel('operation time (s)')
+ title = 'soledad 1000 docs creation/retrieval times'
+ if subtitle != '':
+ title += '- %s' % subtitle
+ plt.title(title)
+
+ x = Set()
+ ycreate = []
+ yget = []
+
+ ys = []
+ #ys.append((ycreate, 'creation time', 'r', '-'))
+ #ys.append((yget, 'retrieval time', 'b', '-'))
+
+ # read data from file
+ with open(filename, 'r') as f:
+ f.readline()
+ for i in xrange(6):
+ size, y = f.readline().strip().split(' ')
+ x.add(int(size))
+ ycreate.append(float(y))
+
+ f.readline()
+ for i in xrange(6):
+ size, y = f.readline().strip().split(' ')
+ x.add(int(size))
+ yget.append(float(y))
+
+ # get doc size in KB
+ x = list(x)
+ x.sort()
+ x = map(lambda val: val / 1024, x)
+
+ # get normalized results per KB
+ nycreate = []
+ nyget = []
+ for i in xrange(len(x)):
+ nycreate.append(ycreate[i]/x[i])
+ nyget.append(yget[i]/x[i])
+
+ ys.append((nycreate, 'creation time per KB', 'r', '-.'))
+ ys.append((nyget, 'retrieval time per KB', 'b', '-.'))
+
+ for y in ys:
+ kwargs = {
+ 'linewidth': 1.0,
+ 'marker': '.',
+ 'color': y[2],
+ 'linestyle': y[3],
+ }
+ # normalize by doc size
+ plt.plot(
+ x,
+ y[0],
+ label=y[1], **kwargs)
+
+ #plt.axes().get_xaxis().set_ticks(x)
+ #plt.axes().get_xaxis().set_ticklabels(x)
+
+ # annotate max and min values
+ plt.xlim(0, 1100)
+ #plt.ylim(0, 350)
+ plt.grid()
+ plt.legend()
+ plt.show()
+
+
+if __name__ == '__main__':
+ # parse command line
+ parser = argparse.ArgumentParser()
+ parser.add_argument(
+ 'datafile',
+ help='the data file to plot')
+ parser.add_argument(
+ '-s', dest='subtitle', required=False, default='',
+ help='a subtitle for the plot')
+ args = parser.parse_args()
+ plot(args.datafile, args.subtitle)
diff --git a/scripts/profiling/storage/profile-format.py b/scripts/profiling/storage/profile-format.py
new file mode 100644
index 00000000..262a52ab
--- /dev/null
+++ b/scripts/profiling/storage/profile-format.py
@@ -0,0 +1,29 @@
+#!/usr/bin/python
+
+import argparse
+import pstats
+
+
+def parse_args():
+ # parse command line
+ parser = argparse.ArgumentParser()
+ parser.add_argument(
+ '-f', dest='statsfiles', action='append', required=True,
+ help='a stats file')
+ args = parser.parse_args()
+ return args.statsfiles
+
+
+def format_stats(statsfiles):
+ for f in statsfiles:
+ ps = pstats.Stats(f)
+ ps.strip_dirs()
+ ps.sort_stats('time')
+ ps.print_stats()
+ ps.sort_stats('cumulative')
+ ps.print_stats()
+
+
+if __name__ == '__main__':
+ statsfiles = parse_args()
+ format_stats(statsfiles)
diff --git a/scripts/profiling/storage/profile-storage.py b/scripts/profiling/storage/profile-storage.py
new file mode 100755
index 00000000..305e6d5a
--- /dev/null
+++ b/scripts/profiling/storage/profile-storage.py
@@ -0,0 +1,107 @@
+#!/usr/bin/python
+
+import os
+import logging
+import getpass
+import tempfile
+import argparse
+import cProfile
+import shutil
+import pstats
+import StringIO
+import datetime
+
+
+from client_side_db import get_soledad_instance
+from util import ValidateUserHandle
+
+# profiling args
+NUM_DOCS = 1
+DOC_SIZE = 1024**2
+
+
+# create a logger
+logger = logging.getLogger(__name__)
+LOG_FORMAT = '%(asctime)s %(message)s'
+logging.basicConfig(format=LOG_FORMAT, level=logging.INFO)
+
+
+def parse_args():
+ # parse command line
+ parser = argparse.ArgumentParser()
+ parser.add_argument(
+ 'user@provider', action=ValidateUserHandle, help='the user handle')
+ parser.add_argument(
+ '-b', dest='basedir', required=False, default=None,
+ help='soledad base directory')
+ parser.add_argument(
+ '-p', dest='passphrase', required=False, default=None,
+ help='the user passphrase')
+ parser.add_argument(
+ '-d', dest='logdir', required=False, default='/tmp/',
+ help='the direcroty to which write the profile stats')
+ args = parser.parse_args()
+ # get the password
+ passphrase = args.passphrase
+ if passphrase is None:
+ passphrase = getpass.getpass(
+ 'Password for %s@%s: ' % (args.username, args.provider))
+ # get the basedir
+ basedir = args.basedir
+ if basedir is None:
+ basedir = tempfile.mkdtemp()
+ logger.info('Using %s as base directory.' % basedir)
+
+ return args.username, args.provider, passphrase, basedir, args.logdir
+
+created_docs = []
+
+def create_docs(sol, content):
+ for i in xrange(NUM_DOCS):
+ doc = sol.create_doc(content)
+ created_docs.append(doc.doc_id)
+
+def get_all_docs(sol):
+ for doc_id in created_docs:
+ sol.get_doc(doc_id)
+
+def do_profile(logdir, sol):
+ fname_prefix = os.path.join(
+ logdir,
+ "profile_%s" \
+ % datetime.datetime.now().strftime('%Y-%m-%d_%H-%m-%S'))
+
+ # profile create docs
+ content = {'data': os.urandom(DOC_SIZE/2).encode('hex')}
+ pr = cProfile.Profile()
+ pr.runcall(
+ create_docs,
+ sol, content)
+ s = StringIO.StringIO()
+ ps = pstats.Stats(pr, stream=s).sort_stats('cumulative')
+ ps.print_stats()
+ ps.dump_stats("%s_creation.stats" % fname_prefix)
+ print s.getvalue()
+
+ # profile get all docs
+ pr = cProfile.Profile()
+ pr.runcall(
+ get_all_docs,
+ sol)
+ s = StringIO.StringIO()
+ ps = pstats.Stats(pr, stream=s).sort_stats('cumulative')
+ ps.dump_stats("%s_retrieval.stats" % fname_prefix)
+ ps.print_stats()
+ print s.getvalue()
+
+
+if __name__ == '__main__':
+ username, provider, passphrase, basedir, logdir = parse_args()
+ sol = get_soledad_instance(
+ username,
+ provider,
+ passphrase,
+ basedir)
+ do_profile(logdir, sol)
+ shutil.rmtree(basedir)
+
diff --git a/scripts/profiling/storage/util.py b/scripts/profiling/storage/util.py
new file mode 120000
index 00000000..7f16d684
--- /dev/null
+++ b/scripts/profiling/storage/util.py
@@ -0,0 +1 @@
+../util.py \ No newline at end of file
diff --git a/scripts/profiling/sync/movingaverage.py b/scripts/profiling/sync/movingaverage.py
new file mode 120000
index 00000000..098b0a01
--- /dev/null
+++ b/scripts/profiling/sync/movingaverage.py
@@ -0,0 +1 @@
+../movingaverage.py \ No newline at end of file
diff --git a/scripts/profiling/sync/profile-decoupled.py b/scripts/profiling/sync/profile-decoupled.py
new file mode 100644
index 00000000..a844c3c6
--- /dev/null
+++ b/scripts/profiling/sync/profile-decoupled.py
@@ -0,0 +1,24 @@
+# test_name: soledad-sync
+# start_time: 2014-06-12 20:09:11.232317+00:00
+# elapsed_time total_cpu total_memory proc_cpu proc_memory
+0.000225 68.400000 46.100000 105.300000 0.527224 0.000000 0.255308 0.000000 0.250167 0.000000 0.250167 0.000000 0.250167 0.000000 0.255160
+0.707006 76.200000 46.200000 90.000000 0.562369 0.000000 0.255308 0.000000 0.250167 0.000000 0.250167 0.000000 0.255308 0.000000 0.255160
+1.413140 63.200000 46.100000 0.000000 0.360199 0.000000 0.255308 0.000000 0.250167 0.000000 0.250167 0.000000 0.255308 0.000000 0.255160
+2.123962 0.000000 46.100000 0.000000 0.360199 0.000000 0.255308 0.000000 0.250167 0.000000 0.250167 0.000000 0.255308 0.000000 0.255160
+2.833941 31.600000 46.100000 0.000000 0.360248 0.000000 0.255308 0.000000 0.250167 0.000000 0.250167 0.000000 0.255308 0.000000 0.255160
+3.541532 5.300000 46.100000 0.000000 0.360298 0.000000 0.255308 0.000000 0.250167 0.000000 0.250167 0.000000 0.255308 0.000000 0.255160
+4.253390 14.300000 46.100000 11.100000 0.360347 0.000000 0.255308 0.000000 0.250167 0.000000 0.250167 0.000000 0.255308 0.000000 0.255160
+4.967365 5.000000 46.100000 0.000000 0.360347 0.000000 0.255308 0.000000 0.250167 0.000000 0.250167 0.000000 0.255308 0.000000 0.255160
+5.680172 5.600000 46.100000 0.000000 0.360397 0.000000 0.255308 0.000000 0.250167 0.000000 0.250167 0.000000 0.255308 0.000000 0.255160
+6.390501 10.500000 46.100000 0.000000 0.360397 0.000000 0.255308 0.000000 0.250167 0.000000 0.250167 0.000000 0.255308 0.000000 0.255160
+7.101711 23.800000 46.000000 0.000000 0.360397 0.000000 0.255308 0.000000 0.250167 0.000000 0.250167 0.000000 0.255308 0.000000 0.255160
+7.810529 30.000000 46.000000 0.000000 0.360397 0.000000 0.255308 0.000000 0.250167 0.000000 0.250167 0.000000 0.255308 0.000000 0.255160
+8.517835 25.000000 46.100000 0.000000 0.361484 0.000000 0.255308 0.000000 0.250167 0.000000 0.250167 0.000000 0.255308 0.000000 0.255160
+9.227455 5.300000 46.000000 9.500000 0.361484 0.000000 0.255308 0.000000 0.250167 0.000000 0.250167 0.000000 0.255308 0.000000 0.255160
+9.936479 9.500000 46.000000 10.000000 0.361484 0.000000 0.255308 0.000000 0.250167 0.000000 0.250167 0.000000 0.255308 0.000000 0.255160
+10.645015 52.400000 46.200000 0.000000 0.361484 0.000000 0.255308 0.000000 0.250167 0.000000 0.250167 0.000000 0.255308 0.000000 0.255160
+11.355179 21.100000 46.000000 0.000000 0.361484 0.000000 0.255308 0.000000 0.250167 0.000000 0.250167 0.000000 0.255308 0.000000 0.255160
+12.066252 36.800000 46.000000 0.000000 0.361484 0.000000 0.255308 0.000000 0.250167 0.000000 0.250167 0.000000 0.255308 0.000000 0.255160
+12.777689 28.600000 46.000000 0.000000 0.361484 0.000000 0.255308 0.000000 0.250167 0.000000 0.250167 0.000000 0.255308 0.000000 0.255160
+13.489886 0.000000 46.000000 0.000000 0.361484 0.000000 0.255308 0.000000 0.250167 0.000000 0.250167 0.000000 0.255308 0.000000 0.255160
+# end_time: 2014-06-12 20:09:25.434677+00:00 \ No newline at end of file