summaryrefslogtreecommitdiff
path: root/scripts
diff options
context:
space:
mode:
authordrebs <drebs@leap.se>2015-06-04 11:21:40 -0300
committerdrebs <drebs@leap.se>2015-06-04 11:21:40 -0300
commit293c71080e9a21115d248e46d1a706c53cc8ee37 (patch)
tree3353672f1f770ef5f5c87ea7e3e27debe737a50b /scripts
parentfa7708e256ba56cd1e9913993d68611b4ae95824 (diff)
parent9fb1c47ca7da06d6feef6846b812aec28128ed78 (diff)
Merge tag '0.7.0'
Tag version 0.7.0. Conflicts: CHANGELOG client/src/leap/soledad/client/__init__.py client/src/leap/soledad/client/sqlcipher.py client/src/leap/soledad/client/target.py server/pkg/soledad-server
Diffstat (limited to 'scripts')
-rwxr-xr-xscripts/build_debian_package.sh2
-rw-r--r--scripts/db_access/client_side_db.py168
-rw-r--r--scripts/db_access/reset_db.py132
l---------scripts/db_access/util.py1
-rw-r--r--scripts/ddocs/update_design_docs.py191
-rw-r--r--scripts/profiling/mail/__init__.py184
-rw-r--r--scripts/profiling/mail/couchdb.ini.template224
-rw-r--r--scripts/profiling/mail/couchdb_server.py42
-rw-r--r--scripts/profiling/mail/couchdb_wrapper.py84
-rw-r--r--scripts/profiling/mail/keys/5447A9AD50E3075ECCE432711B450E665FE63573.pub30
-rw-r--r--scripts/profiling/mail/keys/5447A9AD50E3075ECCE432711B450E665FE63573.sec57
-rw-r--r--scripts/profiling/mail/mail.py50
-rw-r--r--scripts/profiling/mail/mx.py80
-rw-r--r--scripts/profiling/mail/soledad_client.py40
-rw-r--r--scripts/profiling/mail/soledad_server.py48
-rw-r--r--scripts/profiling/mail/util.py8
-rwxr-xr-xscripts/profiling/spam.py123
-rw-r--r--scripts/profiling/storage/benchmark-storage.py104
-rw-r--r--scripts/profiling/storage/benchmark_storage_utils.py4
l---------scripts/profiling/storage/client_side_db.py1
-rwxr-xr-xscripts/profiling/storage/plot.py94
-rw-r--r--scripts/profiling/storage/profile-format.py29
-rwxr-xr-xscripts/profiling/storage/profile-storage.py107
l---------scripts/profiling/storage/util.py1
l---------scripts/profiling/sync/movingaverage.py1
-rw-r--r--scripts/profiling/sync/profile-decoupled.py24
-rwxr-xr-xscripts/run_tests.sh3
27 files changed, 1674 insertions, 158 deletions
diff --git a/scripts/build_debian_package.sh b/scripts/build_debian_package.sh
index 1ec9b00a..b9fb93a9 100755
--- a/scripts/build_debian_package.sh
+++ b/scripts/build_debian_package.sh
@@ -26,7 +26,7 @@ export GIT_DIR=${workdir}/soledad/.git
export GIT_WORK_TREE=${workdir}/soledad
git remote add leapcode ${SOLEDAD_MAIN_REPO}
git fetch leapcode
-git checkout -b debian leapcode/debian
+git checkout -b debian/experimental leapcode/debian/experimental
git merge --no-edit ${branch}
(cd ${workdir}/soledad && debuild -uc -us)
echo "Packages generated in ${workdir}"
diff --git a/scripts/db_access/client_side_db.py b/scripts/db_access/client_side_db.py
index 6c456c41..1d8d32e2 100644
--- a/scripts/db_access/client_side_db.py
+++ b/scripts/db_access/client_side_db.py
@@ -2,23 +2,22 @@
# This script gives client-side access to one Soledad user database.
-
-import sys
import os
import argparse
-import re
import tempfile
import getpass
import requests
-import json
import srp._pysrp as srp
import binascii
import logging
+import json
+from twisted.internet import reactor
+from twisted.internet.defer import inlineCallbacks
-from leap.common.config import get_path_prefix
from leap.soledad.client import Soledad
-
+from leap.keymanager import KeyManager
+from leap.keymanager.openpgp import OpenPGPKey
from util import ValidateUserHandle
@@ -26,37 +25,37 @@ from util import ValidateUserHandle
# create a logger
logger = logging.getLogger(__name__)
LOG_FORMAT = '%(asctime)s %(message)s'
-logging.basicConfig(format=LOG_FORMAT, level=logging.INFO)
+logging.basicConfig(format=LOG_FORMAT, level=logging.DEBUG)
safe_unhexlify = lambda x: binascii.unhexlify(x) if (
len(x) % 2 == 0) else binascii.unhexlify('0' + x)
-def fail(reason):
+def _fail(reason):
logger.error('Fail: ' + reason)
exit(2)
-def get_api_info(provider):
+def _get_api_info(provider):
info = requests.get(
'https://'+provider+'/provider.json', verify=False).json()
return info['api_uri'], info['api_version']
-def login(username, passphrase, provider, api_uri, api_version):
+def _login(username, passphrase, provider, api_uri, api_version):
usr = srp.User(username, passphrase, srp.SHA256, srp.NG_1024)
auth = None
try:
- auth = authenticate(api_uri, api_version, usr).json()
+ auth = _authenticate(api_uri, api_version, usr).json()
except requests.exceptions.ConnectionError:
- fail('Could not connect to server.')
+ _fail('Could not connect to server.')
if 'errors' in auth:
- fail(str(auth['errors']))
+ _fail(str(auth['errors']))
return api_uri, api_version, auth
-def authenticate(api_uri, api_version, usr):
+def _authenticate(api_uri, api_version, usr):
api_url = "%s/%s" % (api_uri, api_version)
session = requests.session()
uname, A = usr.start_authentication()
@@ -64,16 +63,16 @@ def authenticate(api_uri, api_version, usr):
init = session.post(
api_url + '/sessions', data=params, verify=False).json()
if 'errors' in init:
- fail('test user not found')
+ _fail('test user not found')
M = usr.process_challenge(
safe_unhexlify(init['salt']), safe_unhexlify(init['B']))
return session.put(api_url + '/sessions/' + uname, verify=False,
data={'client_auth': binascii.hexlify(M)})
-def get_soledad_info(username, provider, passphrase, basedir):
- api_uri, api_version = get_api_info(provider)
- auth = login(username, passphrase, provider, api_uri, api_version)
+def _get_soledad_info(username, provider, passphrase, basedir):
+ api_uri, api_version = _get_api_info(provider)
+ auth = _login(username, passphrase, provider, api_uri, api_version)
# get soledad server url
service_url = '%s/%s/config/soledad-service.json' % \
(api_uri, api_version)
@@ -101,10 +100,9 @@ def get_soledad_info(username, provider, passphrase, basedir):
return auth[2]['id'], server_url, cert_file, auth[2]['token']
-def get_soledad_instance(username, provider, passphrase, basedir):
+def _get_soledad_instance(uuid, passphrase, basedir, server_url, cert_file,
+ token):
# setup soledad info
- uuid, server_url, cert_file, token = \
- get_soledad_info(username, provider, passphrase, basedir)
logger.info('UUID is %s' % uuid)
logger.info('Server URL is %s' % server_url)
secrets_path = os.path.join(
@@ -119,37 +117,135 @@ def get_soledad_instance(username, provider, passphrase, basedir):
local_db_path=local_db_path,
server_url=server_url,
cert_file=cert_file,
- auth_token=token)
-
-
-# main program
-
-if __name__ == '__main__':
-
+ auth_token=token,
+ defer_encryption=False)
+
+
+def _get_keymanager_instance(username, provider, soledad, token,
+ ca_cert_path=None, api_uri=None, api_version=None, uid=None,
+ gpgbinary=None):
+ return KeyManager(
+ "{username}@{provider}".format(username=username, provider=provider),
+ "http://uri",
+ soledad,
+ token=token,
+ ca_cert_path=ca_cert_path,
+ api_uri=api_uri,
+ api_version=api_version,
+ uid=uid,
+ gpgbinary=gpgbinary)
+
+
+def _parse_args():
# parse command line
parser = argparse.ArgumentParser()
parser.add_argument(
'user@provider', action=ValidateUserHandle, help='the user handle')
parser.add_argument(
- '-b', dest='basedir', required=False, default=None,
+ '--basedir', '-b', default=None,
help='soledad base directory')
parser.add_argument(
- '-p', dest='passphrase', required=False, default=None,
+ '--passphrase', '-p', default=None,
help='the user passphrase')
- args = parser.parse_args()
+ parser.add_argument(
+ '--get-all-docs', '-a', action='store_true',
+ help='get all documents from the local database')
+ parser.add_argument(
+ '--create-doc', '-c', default=None,
+ help='create a document with give content')
+ parser.add_argument(
+ '--sync', '-s', action='store_true',
+ help='synchronize with the server replica')
+ parser.add_argument(
+ '--export-public-key', help="export the public key to a file")
+ parser.add_argument(
+ '--export-private-key', help="export the private key to a file")
+ parser.add_argument(
+ '--export-incoming-messages',
+ help="export incoming messages to a directory")
+ return parser.parse_args()
- # get the password
+
+def _get_passphrase(args):
passphrase = args.passphrase
if passphrase is None:
passphrase = getpass.getpass(
'Password for %s@%s: ' % (args.username, args.provider))
+ return passphrase
+
- # get the basedir
+def _get_basedir(args):
basedir = args.basedir
if basedir is None:
basedir = tempfile.mkdtemp()
+ elif not os.path.isdir(basedir):
+ os.mkdir(basedir)
logger.info('Using %s as base directory.' % basedir)
+ return basedir
+
+
+@inlineCallbacks
+def _export_key(args, km, fname, private=False):
+ address = args.username + "@" + args.provider
+ pkey = yield km.get_key(address, OpenPGPKey, private=private, fetch_remote=False)
+ with open(args.export_private_key, "w") as f:
+ f.write(pkey.key_data)
+
+
+@inlineCallbacks
+def _export_incoming_messages(soledad, directory):
+ yield soledad.create_index("by-incoming", "bool(incoming)")
+ docs = yield soledad.get_from_index("by-incoming", '1')
+ i = 1
+ for doc in docs:
+ with open(os.path.join(directory, "message_%d.gpg" % i), "w") as f:
+ f.write(doc.content["_enc_json"])
+ i += 1
+
+
+@inlineCallbacks
+def _get_all_docs(soledad):
+ _, docs = yield soledad.get_all_docs()
+ for doc in docs:
+ print json.dumps(doc.content, indent=4)
- # get the soledad instance
- s = get_soledad_instance(
- args.username, args.provider, passphrase, basedir)
+
+# main program
+
+@inlineCallbacks
+def _main(soledad, km, args):
+ try:
+ if args.create_doc:
+ yield soledad.create_doc({'content': args.create_doc})
+ if args.sync:
+ yield soledad.sync()
+ if args.get_all_docs:
+ yield _get_all_docs(soledad)
+ if args.export_private_key:
+ yield _export_key(args, km, args.export_private_key, private=True)
+ if args.export_public_key:
+ yield _export_key(args, km, args.expoert_public_key, private=False)
+ if args.export_incoming_messages:
+ yield _export_incoming_messages(soledad, args.export_incoming_messages)
+ except:
+ pass
+ finally:
+ reactor.stop()
+
+
+if __name__ == '__main__':
+ args = _parse_args()
+ passphrase = _get_passphrase(args)
+ basedir = _get_basedir(args)
+ uuid, server_url, cert_file, token = \
+ _get_soledad_info(args.username, args.provider, passphrase, basedir)
+ soledad = _get_soledad_instance(
+ uuid, passphrase, basedir, server_url, cert_file, token)
+ km = _get_keymanager_instance(
+ args.username,
+ args.provider,
+ soledad,
+ token,
+ uid=uuid)
+ _main(soledad, km, args)
+ reactor.run()
diff --git a/scripts/db_access/reset_db.py b/scripts/db_access/reset_db.py
index 80871856..7c6d281b 100644
--- a/scripts/db_access/reset_db.py
+++ b/scripts/db_access/reset_db.py
@@ -5,20 +5,21 @@
# WARNING: running this script over a database will delete all documents but
# the one with id u1db_config (which contains db metadata) and design docs
# needed for couch backend.
+#
+# Run it like this to get some help:
+#
+# ./reset_db.py --help
-import sys
-from ConfigParser import ConfigParser
import threading
import logging
-from couchdb import Database as CouchDatabase
-
+import argparse
+import re
-if len(sys.argv) != 2:
- print 'Usage: %s <uuid>' % sys.argv[0]
- exit(1)
-uuid = sys.argv[1]
+from ConfigParser import ConfigParser
+from couchdb import Database as CouchDatabase
+from couchdb import Server as CouchServer
# create a logger
@@ -27,23 +28,6 @@ LOG_FORMAT = '%(asctime)s %(message)s'
logging.basicConfig(format=LOG_FORMAT, level=logging.INFO)
-# get couch url
-cp = ConfigParser()
-cp.read('/etc/leap/soledad-server.conf')
-url = cp.get('soledad-server', 'couch_url')
-
-
-# confirm
-yes = raw_input("Are you sure you want to reset the database for user %s "
- "(type YES)? " % uuid)
-if yes != 'YES':
- print 'Bailing out...'
- exit(2)
-
-
-db = CouchDatabase('%s/user-%s' % (url, uuid))
-
-
class _DeleterThread(threading.Thread):
def __init__(self, db, doc_id, release_fun):
@@ -59,21 +43,95 @@ class _DeleterThread(threading.Thread):
self._release_fun()
-semaphore_pool = threading.BoundedSemaphore(value=20)
-
-
-threads = []
-for doc_id in db:
- if doc_id != 'u1db_config' and not doc_id.startswith('_design'):
+def get_confirmation(noconfirm, uuid, shared):
+ msg = "Are you sure you want to reset %s (type YES)? "
+ if shared:
+ msg = msg % "the shared database"
+ elif uuid:
+ msg = msg % ("the database for user %s" % uuid)
+ else:
+ msg = msg % "all databases"
+ if noconfirm is False:
+ yes = raw_input(msg)
+ if yes != 'YES':
+ print 'Bailing out...'
+ exit(2)
+
+
+def get_url(empty):
+ url = None
+ if empty is False:
+ # get couch url
+ cp = ConfigParser()
+ cp.read('/etc/leap/soledad-server.conf')
+ url = cp.get('soledad-server', 'couch_url')
+ else:
+ with open('/etc/couchdb/couchdb.netrc') as f:
+ netrc = f.read()
+ admin_password = re.match('^.* password (.*)$', netrc).groups()[0]
+ url = 'http://admin:%s@127.0.0.1:5984' % admin_password
+ return url
+
+
+def reset_all_dbs(url, empty):
+ server = CouchServer('%s' % (url))
+ for dbname in server:
+ if dbname.startswith('user-') or dbname == 'shared':
+ reset_db(url, dbname, empty)
+
+
+def reset_db(url, dbname, empty):
+ db = CouchDatabase('%s/%s' % (url, dbname))
+ semaphore_pool = threading.BoundedSemaphore(value=20)
+
+ # launch threads for deleting docs
+ threads = []
+ for doc_id in db:
+ if empty is False:
+ if doc_id == 'u1db_config' or doc_id.startswith('_design'):
+ continue
semaphore_pool.acquire()
logger.info('[main] launching thread for doc: %s' % doc_id)
t = _DeleterThread(db, doc_id, semaphore_pool.release)
t.start()
threads.append(t)
-
-logger.info('[main] waiting for threads.')
-map(lambda thread: thread.join(), threads)
-
-
-logger.info('[main] done.')
+ # wait for threads to finish
+ logger.info('[main] waiting for threads.')
+ map(lambda thread: thread.join(), threads)
+ logger.info('[main] done.')
+
+
+def _parse_args():
+ parser = argparse.ArgumentParser()
+ group = parser.add_mutually_exclusive_group()
+ group.add_argument('-u', dest='uuid', default=False,
+ help='Reset database of given user.')
+ group.add_argument('-s', dest='shared', action='store_true', default=False,
+ help='Reset the shared database.')
+ group.add_argument('-a', dest='all', action='store_true', default=False,
+ help='Reset all user databases.')
+ parser.add_argument(
+ '-e', dest='empty', action='store_true', required=False, default=False,
+ help='Empty database (do not preserve minimal set of u1db documents).')
+ parser.add_argument(
+ '-y', dest='noconfirm', action='store_true', required=False,
+ default=False,
+ help='Do not ask for confirmation.')
+ return parser.parse_args(), parser
+
+
+if __name__ == '__main__':
+ args, parser = _parse_args()
+ if not (args.uuid or args.shared or args.all):
+ parser.print_help()
+ exit(1)
+
+ url = get_url(args.empty)
+ get_confirmation(args.noconfirm, args.uuid, args.shared)
+ if args.uuid:
+ reset_db(url, "user-%s" % args.uuid, args.empty)
+ elif args.shared:
+ reset_db(url, "shared", args.empty)
+ elif args.all:
+ reset_all_dbs(url, args.empty)
diff --git a/scripts/db_access/util.py b/scripts/db_access/util.py
new file mode 120000
index 00000000..368734f7
--- /dev/null
+++ b/scripts/db_access/util.py
@@ -0,0 +1 @@
+../profiling/util.py \ No newline at end of file
diff --git a/scripts/ddocs/update_design_docs.py b/scripts/ddocs/update_design_docs.py
index e7b5a29c..2e2fa8f0 100644
--- a/scripts/ddocs/update_design_docs.py
+++ b/scripts/ddocs/update_design_docs.py
@@ -11,84 +11,83 @@ import re
import threading
import binascii
-
+from urlparse import urlparse
from getpass import getpass
from ConfigParser import ConfigParser
-from couchdb.client import Server
-from couchdb.http import Resource, Session
-from datetime import datetime
-from urlparse import urlparse
+from couchdb.client import Server
+from couchdb.http import Resource
+from couchdb.http import Session
+from couchdb.http import ResourceNotFound
from leap.soledad.common import ddocs
-# parse command line for the log file name
-logger_fname = "/tmp/update-design-docs_%s.log" % \
- str(datetime.now()).replace(' ', '_')
-parser = argparse.ArgumentParser()
-parser.add_argument('--log', action='store', default=logger_fname, type=str,
- required=False, help='the name of the log file', nargs=1)
-args = parser.parse_args()
+MAX_THREADS = 20
+DESIGN_DOCS = {
+ '_design/docs': json.loads(binascii.a2b_base64(ddocs.docs)),
+ '_design/syncs': json.loads(binascii.a2b_base64(ddocs.syncs)),
+ '_design/transactions': json.loads(
+ binascii.a2b_base64(ddocs.transactions)),
+}
-# configure the logger
+# create a logger
logger = logging.getLogger(__name__)
-logger.setLevel(logging.DEBUG)
-print "Logging to %s." % args.log
-logging.basicConfig(
- filename=args.log,
- format="%(asctime)-15s %(message)s")
+LOG_FORMAT = '%(asctime)s %(message)s'
+logging.basicConfig(format=LOG_FORMAT, level=logging.INFO)
-# configure threads
-max_threads = 20
-semaphore_pool = threading.BoundedSemaphore(value=max_threads)
-threads = []
+def _parse_args():
+ parser = argparse.ArgumentParser()
+ parser.add_argument('-u', dest='uuid', default=None, type=str,
+ help='the UUID of the user')
+ parser.add_argument('-t', dest='threads', default=MAX_THREADS, type=int,
+ help='the number of parallel threads')
+ return parser.parse_args()
-# get couch url
-cp = ConfigParser()
-cp.read('/etc/leap/soledad-server.conf')
-url = urlparse(cp.get('soledad-server', 'couch_url'))
-# get admin password
-netloc = re.sub('^.*@', '', url.netloc)
-url = url._replace(netloc=netloc)
-password = getpass("Admin password for %s: " % url.geturl())
-url = url._replace(netloc='admin:%s@%s' % (password, netloc))
+def _get_url():
+ # get couch url
+ cp = ConfigParser()
+ cp.read('/etc/leap/soledad-server.conf')
+ url = urlparse(cp.get('soledad-server', 'couch_url'))
+ # get admin password
+ netloc = re.sub('^.*@', '', url.netloc)
+ url = url._replace(netloc=netloc)
+ password = getpass("Admin password for %s: " % url.geturl())
+ return url._replace(netloc='admin:%s@%s' % (password, netloc))
-resource = Resource(url.geturl(), Session(retry_delays=[1,2,4,8], timeout=10))
-server = Server(url=resource)
-hidden_url = re.sub(
- 'http://(.*):.*@',
- 'http://\\1:xxxxx@',
- url.geturl())
+def _get_server(url):
+ resource = Resource(
+ url.geturl(), Session(retry_delays=[1, 2, 4, 8], timeout=10))
+ return Server(url=resource)
-print """
-==========
-ATTENTION!
-==========
-This script will modify Soledad's shared and user databases in:
+def _confirm(url):
+ hidden_url = re.sub(
+ 'http://(.*):.*@',
+ 'http://\\1:xxxxx@',
+ url.geturl())
- %s
+ print """
+ ==========
+ ATTENTION!
+ ==========
-This script does not make a backup of the couch db data, so make sure you
-have a copy or you may loose data.
-""" % hidden_url
-confirm = raw_input("Proceed (type uppercase YES)? ")
+ This script will modify Soledad's shared and user databases in:
-if confirm != "YES":
- exit(1)
+ %s
-# convert design doc content
+ This script does not make a backup of the couch db data, so make sure you
+ have a copy or you may loose data.
+ """ % hidden_url
+ confirm = raw_input("Proceed (type uppercase YES)? ")
+
+ if confirm != "YES":
+ exit(1)
-design_docs = {
- '_design/docs': json.loads(binascii.a2b_base64(ddocs.docs)),
- '_design/syncs': json.loads(binascii.a2b_base64(ddocs.syncs)),
- '_design/transactions': json.loads(binascii.a2b_base64(ddocs.transactions)),
-}
#
# Thread
@@ -106,42 +105,66 @@ class DBWorkerThread(threading.Thread):
def run(self):
- logger.info("(%d/%d) Updating db %s." % (self._db_idx, self._db_len,
- self._dbname))
+ logger.info(
+ "(%d/%d) Updating db %s."
+ % (self._db_idx, self._db_len, self._dbname))
- for doc_id in design_docs:
- doc = self._cdb[doc_id]
+ for doc_id in DESIGN_DOCS:
+ try:
+ doc = self._cdb[doc_id]
+ except ResourceNotFound:
+ doc = {'_id': doc_id}
for key in ['lists', 'views', 'updates']:
- if key in design_docs[doc_id]:
- doc[key] = design_docs[doc_id][key]
+ if key in DESIGN_DOCS[doc_id]:
+ doc[key] = DESIGN_DOCS[doc_id][key]
self._cdb.save(doc)
# release the semaphore
self._release_fun()
-db_idx = 0
-db_len = len(server)
-for dbname in server:
-
- db_idx += 1
-
- if not (dbname.startswith('user-') or dbname == 'shared') \
- or dbname == 'user-test-db':
- logger.info("(%d/%d) Skipping db %s." % (db_idx, db_len, dbname))
- continue
-
-
- # get access to couch db
- cdb = Server(url.geturl())[dbname]
-
- #---------------------------------------------------------------------
- # Start DB worker thread
- #---------------------------------------------------------------------
- semaphore_pool.acquire()
- thread = DBWorkerThread(server, dbname, db_idx, db_len, semaphore_pool.release)
+def _launch_update_design_docs_thread(
+ server, dbname, db_idx, db_len, semaphore_pool):
+ semaphore_pool.acquire() # wait for an available working slot
+ thread = DBWorkerThread(
+ server, dbname, db_idx, db_len, semaphore_pool.release)
thread.daemon = True
thread.start()
- threads.append(thread)
-
-map(lambda thread: thread.join(), threads)
+ return thread
+
+
+def _update_design_docs(args, server):
+
+ # find the actual databases to be updated
+ dbs = []
+ if args.uuid:
+ dbs.append('user-%s' % args.uuid)
+ else:
+ for dbname in server:
+ if dbname.startswith('user-') or dbname == 'shared':
+ dbs.append(dbname)
+ else:
+ logger.info("Skipping db %s." % dbname)
+
+ db_idx = 0
+ db_len = len(dbs)
+ semaphore_pool = threading.BoundedSemaphore(value=args.threads)
+ threads = []
+
+ # launch the update
+ for db in dbs:
+ db_idx += 1
+ threads.append(
+ _launch_update_design_docs_thread(
+ server, db, db_idx, db_len, semaphore_pool))
+
+ # wait for all threads to finish
+ map(lambda thread: thread.join(), threads)
+
+
+if __name__ == "__main__":
+ args = _parse_args()
+ url = _get_url()
+ _confirm(url)
+ server = _get_server(url)
+ _update_design_docs(args, server)
diff --git a/scripts/profiling/mail/__init__.py b/scripts/profiling/mail/__init__.py
new file mode 100644
index 00000000..352faae6
--- /dev/null
+++ b/scripts/profiling/mail/__init__.py
@@ -0,0 +1,184 @@
+import threading
+import time
+import logging
+import argparse
+
+from twisted.internet import reactor
+
+from util import log
+from couchdb_server import get_couchdb_wrapper_and_u1db
+from mx import put_lots_of_messages
+from soledad_server import get_soledad_server
+from soledad_client import SoledadClient
+from mail import get_imap_server
+
+
+UUID = 'blah'
+AUTH_TOKEN = 'bleh'
+
+
+logging.basicConfig(level=logging.DEBUG)
+
+modules = [
+ 'gnupg',
+ 'leap.common',
+ 'leap.keymanager',
+ 'taskthread',
+]
+
+for module in modules:
+ logger = logging.getLogger(name=module)
+ logger.setLevel(logging.WARNING)
+
+
+class TestWatcher(threading.Thread):
+
+ def __init__(self, couchdb_wrapper, couchdb_u1db, soledad_server,
+ soledad_client, imap_service, number_of_msgs, lock):
+ threading.Thread.__init__(self)
+ self._couchdb_wrapper = couchdb_wrapper
+ self._couchdb_u1db = couchdb_u1db
+ self._soledad_server = soledad_server
+ self._soledad_client = soledad_client
+ self._imap_service = imap_service
+ self._number_of_msgs = number_of_msgs
+ self._lock = lock
+ self._mails_available_time = None
+ self._mails_available_time_lock = threading.Lock()
+ self._conditions = None
+
+ def run(self):
+ self._set_conditions()
+ while not self._test_finished():
+ time.sleep(5)
+ log("TestWatcher: Tests finished, cleaning up...",
+ line_break=False)
+ self._stop_reactor()
+ self._cleanup()
+ log("done.")
+ self._lock.release()
+
+ def _set_conditions(self):
+ self._conditions = []
+
+ # condition 1: number of received messages is equal to number of
+ # expected messages
+ def _condition1(*args):
+ msgcount = self._imap_service._inbox.getMessageCount()
+ cond = msgcount == self._number_of_msgs
+ log("[condition 1] received messages: %d (expected: %d) :: %s"
+ % (msgcount, self._number_of_msgs, cond))
+ if self.mails_available_time == None \
+ and cond:
+ with self._mails_available_time_lock:
+ self._mails_available_time = time.time()
+ return cond
+
+
+ # condition 2: number of documents in server is equal to in client
+ def _condition2(client_docs, server_docs):
+ cond = client_docs == server_docs
+ log("[condition 2] number of documents: client %d; server %d :: %s"
+ % (client_docs, server_docs, cond))
+ return cond
+
+ # condition 3: number of documents bigger than 3 x number of msgs
+ def _condition3(client_docs, *args):
+ cond = client_docs > (2 * self._number_of_msgs)
+ log("[condition 3] documents (%d) > 2 * msgs (%d) :: %s"
+ % (client_docs, self._number_of_msgs, cond))
+ return cond
+
+ # condition 4: not syncing
+ def _condition4(*args):
+ cond = not self._soledad_client.instance.syncing
+ log("[condition 4] not syncing :: %s" % cond)
+ return cond
+
+ self._conditions.append(_condition1)
+ self._conditions.append(_condition2)
+ self._conditions.append(_condition3)
+ self._conditions.append(_condition4)
+
+ def _test_finished(self):
+ client_docs = self._get_soledad_client_number_of_docs()
+ server_docs = self._get_couchdb_number_of_docs()
+ return not bool(filter(lambda x: not x(client_docs, server_docs),
+ self._conditions))
+
+ def _stop_reactor(self):
+ reactor.stop()
+
+ def _cleanup(self):
+ self._imap_service.stop()
+ self._soledad_client.close()
+ self._soledad_server.stop()
+ self._couchdb_wrapper.stop()
+
+ def _get_soledad_client_number_of_docs(self):
+ c = self._soledad_client.instance._db._db_handle.cursor()
+ c.execute('SELECT COUNT(*) FROM document WHERE content IS NOT NULL')
+ row = c.fetchone()
+ return int(row[0])
+
+ def _get_couchdb_number_of_docs(self):
+ couchdb = self._couchdb_u1db._database
+ view = couchdb.view('_all_docs', include_docs=True)
+ return len(filter(
+ lambda r: '_attachments' in r.values()[1]
+ and 'u1db_content' in r.values()[1]['_attachments'],
+ view.rows))
+
+ @property
+ def mails_available_time(self):
+ with self._mails_available_time_lock:
+ return self._mails_available_time
+
+
+if __name__ == '__main__':
+ parser = argparse.ArgumentParser()
+ parser.add_argument('number_of_msgs', help="The number of documents",
+ type=int)
+ parser.add_argument('report_file', help="The name of the report file",
+ type=str)
+ args = parser.parse_args()
+
+ # start a couchdb server
+ couchdb_wrapper, couchdb_u1db = get_couchdb_wrapper_and_u1db(
+ UUID, AUTH_TOKEN)
+
+ put_time = put_lots_of_messages(couchdb_u1db, args.number_of_msgs)
+
+ soledad_server = get_soledad_server(couchdb_wrapper.port)
+
+ soledad_client = SoledadClient(
+ uuid='blah',
+ server_url='http://127.0.0.1:%d' % soledad_server.port,
+ auth_token=AUTH_TOKEN)
+
+ imap_service = get_imap_server(
+ soledad_client.instance, UUID, 'snowden@bitmask.net', AUTH_TOKEN)
+
+ lock = threading.Lock()
+ lock.acquire()
+ test_watcher = TestWatcher(
+ couchdb_wrapper, couchdb_u1db, soledad_server, soledad_client,
+ imap_service, args.number_of_msgs, lock)
+ test_watcher.start()
+
+ # reactor.run() will block until TestWatcher stops the reactor.
+ start_time = time.time()
+ reactor.run()
+ log("Reactor stopped.")
+ end_time = time.time()
+ lock.acquire()
+ mails_available_time = test_watcher.mails_available_time - start_time
+ sync_time = end_time - start_time
+ log("Total syncing time: %f" % sync_time)
+ log("# number_of_msgs put_time mails_available_time sync_time")
+ result = "%d %f %f %f" \
+ % (args.number_of_msgs, put_time, mails_available_time,
+ sync_time)
+ log(result)
+ with open(args.report_file, 'a') as f:
+ f.write(result + "\n")
diff --git a/scripts/profiling/mail/couchdb.ini.template b/scripts/profiling/mail/couchdb.ini.template
new file mode 100644
index 00000000..1fc2205b
--- /dev/null
+++ b/scripts/profiling/mail/couchdb.ini.template
@@ -0,0 +1,224 @@
+; etc/couchdb/default.ini.tpl. Generated from default.ini.tpl.in by configure.
+
+; Upgrading CouchDB will overwrite this file.
+
+[couchdb]
+database_dir = %(tempdir)s/lib
+view_index_dir = %(tempdir)s/lib
+max_document_size = 4294967296 ; 4 GB
+os_process_timeout = 120000 ; 120 seconds. for view and external servers.
+max_dbs_open = 100
+delayed_commits = true ; set this to false to ensure an fsync before 201 Created is returned
+uri_file = %(tempdir)s/lib/couch.uri
+file_compression = snappy
+
+[database_compaction]
+; larger buffer sizes can originate smaller files
+doc_buffer_size = 524288 ; value in bytes
+checkpoint_after = 5242880 ; checkpoint after every N bytes were written
+
+[view_compaction]
+; larger buffer sizes can originate smaller files
+keyvalue_buffer_size = 2097152 ; value in bytes
+
+[httpd]
+port = 0
+bind_address = 127.0.0.1
+authentication_handlers = {couch_httpd_oauth, oauth_authentication_handler}, {couch_httpd_auth, cookie_authentication_handler}, {couch_httpd_auth, default_authentication_handler}
+default_handler = {couch_httpd_db, handle_request}
+secure_rewrites = true
+vhost_global_handlers = _utils, _uuids, _session, _oauth, _users
+allow_jsonp = false
+; Options for the MochiWeb HTTP server.
+;server_options = [{backlog, 128}, {acceptor_pool_size, 16}]
+; For more socket options, consult Erlang's module 'inet' man page.
+;socket_options = [{recbuf, 262144}, {sndbuf, 262144}, {nodelay, true}]
+log_max_chunk_size = 1000000
+
+[log]
+file = %(tempdir)s/log/couch.log
+level = info
+include_sasl = true
+
+[couch_httpd_auth]
+authentication_db = _users
+authentication_redirect = /_utils/session.html
+require_valid_user = false
+timeout = 600 ; number of seconds before automatic logout
+auth_cache_size = 50 ; size is number of cache entries
+allow_persistent_cookies = false ; set to true to allow persistent cookies
+
+[couch_httpd_oauth]
+; If set to 'true', oauth token and consumer secrets will be looked up
+; in the authentication database (_users). These secrets are stored in
+; a top level property named "oauth" in user documents. Example:
+; {
+; "_id": "org.couchdb.user:joe",
+; "type": "user",
+; "name": "joe",
+; "password_sha": "fe95df1ca59a9b567bdca5cbaf8412abd6e06121",
+; "salt": "4e170ffeb6f34daecfd814dfb4001a73"
+; "roles": ["foo", "bar"],
+; "oauth": {
+; "consumer_keys": {
+; "consumerKey1": "key1Secret",
+; "consumerKey2": "key2Secret"
+; },
+; "tokens": {
+; "token1": "token1Secret",
+; "token2": "token2Secret"
+; }
+; }
+; }
+use_users_db = false
+
+[query_servers]
+; javascript = %(tempdir)s/server/main.js
+javascript = /usr/bin/couchjs /usr/share/couchdb/server/main.js
+coffeescript = /usr/bin/couchjs /usr/share/couchdb/server/main-coffee.js
+
+
+; Changing reduce_limit to false will disable reduce_limit.
+; If you think you're hitting reduce_limit with a "good" reduce function,
+; please let us know on the mailing list so we can fine tune the heuristic.
+[query_server_config]
+reduce_limit = true
+os_process_limit = 25
+
+[daemons]
+view_manager={couch_view, start_link, []}
+external_manager={couch_external_manager, start_link, []}
+query_servers={couch_query_servers, start_link, []}
+vhosts={couch_httpd_vhost, start_link, []}
+httpd={couch_httpd, start_link, []}
+stats_aggregator={couch_stats_aggregator, start, []}
+stats_collector={couch_stats_collector, start, []}
+uuids={couch_uuids, start, []}
+auth_cache={couch_auth_cache, start_link, []}
+replication_manager={couch_replication_manager, start_link, []}
+os_daemons={couch_os_daemons, start_link, []}
+compaction_daemon={couch_compaction_daemon, start_link, []}
+
+[httpd_global_handlers]
+/ = {couch_httpd_misc_handlers, handle_welcome_req, <<"Welcome">>}
+
+_all_dbs = {couch_httpd_misc_handlers, handle_all_dbs_req}
+_active_tasks = {couch_httpd_misc_handlers, handle_task_status_req}
+_config = {couch_httpd_misc_handlers, handle_config_req}
+_replicate = {couch_httpd_replicator, handle_req}
+_uuids = {couch_httpd_misc_handlers, handle_uuids_req}
+_restart = {couch_httpd_misc_handlers, handle_restart_req}
+_stats = {couch_httpd_stats_handlers, handle_stats_req}
+_log = {couch_httpd_misc_handlers, handle_log_req}
+_session = {couch_httpd_auth, handle_session_req}
+_oauth = {couch_httpd_oauth, handle_oauth_req}
+
+[httpd_db_handlers]
+_view_cleanup = {couch_httpd_db, handle_view_cleanup_req}
+_compact = {couch_httpd_db, handle_compact_req}
+_design = {couch_httpd_db, handle_design_req}
+_temp_view = {couch_httpd_view, handle_temp_view_req}
+_changes = {couch_httpd_db, handle_changes_req}
+
+; The external module takes an optional argument allowing you to narrow it to a
+; single script. Otherwise the script name is inferred from the first path section
+; after _external's own path.
+; _mypath = {couch_httpd_external, handle_external_req, <<"mykey">>}
+; _external = {couch_httpd_external, handle_external_req}
+
+[httpd_design_handlers]
+_view = {couch_httpd_view, handle_view_req}
+_show = {couch_httpd_show, handle_doc_show_req}
+_list = {couch_httpd_show, handle_view_list_req}
+_info = {couch_httpd_db, handle_design_info_req}
+_rewrite = {couch_httpd_rewrite, handle_rewrite_req}
+_update = {couch_httpd_show, handle_doc_update_req}
+
+; enable external as an httpd handler, then link it with commands here.
+; note, this api is still under consideration.
+; [external]
+; mykey = /path/to/mycommand
+
+; Here you can setup commands for CouchDB to manage
+; while it is alive. It will attempt to keep each command
+; alive if it exits.
+; [os_daemons]
+; some_daemon_name = /path/to/script -with args
+
+
+[uuids]
+; Known algorithms:
+; random - 128 bits of random awesome
+; All awesome, all the time.
+; sequential - monotonically increasing ids with random increments
+; First 26 hex characters are random. Last 6 increment in
+; random amounts until an overflow occurs. On overflow, the
+; random prefix is regenerated and the process starts over.
+; utc_random - Time since Jan 1, 1970 UTC with microseconds
+; First 14 characters are the time in hex. Last 18 are random.
+algorithm = sequential
+
+[stats]
+; rate is in milliseconds
+rate = 1000
+; sample intervals are in seconds
+samples = [0, 60, 300, 900]
+
+[attachments]
+compression_level = 8 ; from 1 (lowest, fastest) to 9 (highest, slowest), 0 to disable compression
+compressible_types = text/*, application/javascript, application/json, application/xml
+
+[replicator]
+db = _replicator
+; Maximum replicaton retry count can be a non-negative integer or "infinity".
+max_replication_retry_count = 10
+; More worker processes can give higher network throughput but can also
+; imply more disk and network IO.
+worker_processes = 4
+; With lower batch sizes checkpoints are done more frequently. Lower batch sizes
+; also reduce the total amount of used RAM memory.
+worker_batch_size = 500
+; Maximum number of HTTP connections per replication.
+http_connections = 20
+; HTTP connection timeout per replication.
+; Even for very fast/reliable networks it might need to be increased if a remote
+; database is too busy.
+connection_timeout = 30000
+; If a request fails, the replicator will retry it up to N times.
+retries_per_request = 10
+; Some socket options that might boost performance in some scenarios:
+; {nodelay, boolean()}
+; {sndbuf, integer()}
+; {recbuf, integer()}
+; {priority, integer()}
+; See the `inet` Erlang module's man page for the full list of options.
+socket_options = [{keepalive, true}, {nodelay, false}]
+; Path to a file containing the user's certificate.
+;cert_file = /full/path/to/server_cert.pem
+; Path to file containing user's private PEM encoded key.
+;key_file = /full/path/to/server_key.pem
+; String containing the user's password. Only used if the private keyfile is password protected.
+;password = somepassword
+; Set to true to validate peer certificates.
+verify_ssl_certificates = false
+; File containing a list of peer trusted certificates (in the PEM format).
+;ssl_trusted_certificates_file = /etc/ssl/certs/ca-certificates.crt
+; Maximum peer certificate depth (must be set even if certificate validation is off).
+ssl_certificate_max_depth = 3
+
+[compaction_daemon]
+; The delay, in seconds, between each check for which database and view indexes
+; need to be compacted.
+check_interval = 300
+; If a database or view index file is smaller then this value (in bytes),
+; compaction will not happen. Very small files always have a very high
+; fragmentation therefore it's not worth to compact them.
+min_file_size = 131072
+
+[compactions]
+; List of compaction rules for the compaction daemon.
+
+
+;[admins]
+;testuser = -hashed-f50a252c12615697c5ed24ec5cd56b05d66fe91e,b05471ba260132953930cf9f97f327f5
+; pass for above user is 'testpass'
diff --git a/scripts/profiling/mail/couchdb_server.py b/scripts/profiling/mail/couchdb_server.py
new file mode 100644
index 00000000..2cf0a3fd
--- /dev/null
+++ b/scripts/profiling/mail/couchdb_server.py
@@ -0,0 +1,42 @@
+import hashlib
+import couchdb
+
+from leap.soledad.common.couch import CouchDatabase
+
+from util import log
+from couchdb_wrapper import CouchDBWrapper
+
+
+def start_couchdb_wrapper():
+ log("Starting couchdb... ", line_break=False)
+ couchdb_wrapper = CouchDBWrapper()
+ couchdb_wrapper.start()
+ log("couchdb started on port %d." % couchdb_wrapper.port)
+ return couchdb_wrapper
+
+
+def get_u1db_database(dbname, port):
+ return CouchDatabase.open_database(
+ 'http://127.0.0.1:%d/%s' % (port, dbname),
+ True,
+ ensure_ddocs=True)
+
+
+def create_tokens_database(port, uuid, token_value):
+ tokens_database = couchdb.Server(
+ 'http://127.0.0.1:%d' % port).create('tokens')
+ token = couchdb.Document()
+ token['_id'] = hashlib.sha512(token_value).hexdigest()
+ token['user_id'] = uuid
+ token['type'] = 'Token'
+ tokens_database.save(token)
+
+
+def get_couchdb_wrapper_and_u1db(uuid, token_value):
+ couchdb_wrapper = start_couchdb_wrapper()
+
+ couchdb_u1db = get_u1db_database('user-%s' % uuid, couchdb_wrapper.port)
+ get_u1db_database('shared', couchdb_wrapper.port)
+ create_tokens_database(couchdb_wrapper.port, uuid, token_value)
+
+ return couchdb_wrapper, couchdb_u1db
diff --git a/scripts/profiling/mail/couchdb_wrapper.py b/scripts/profiling/mail/couchdb_wrapper.py
new file mode 100644
index 00000000..cad1205b
--- /dev/null
+++ b/scripts/profiling/mail/couchdb_wrapper.py
@@ -0,0 +1,84 @@
+import re
+import os
+import tempfile
+import subprocess
+import time
+import shutil
+
+
+from leap.common.files import mkdir_p
+
+
+class CouchDBWrapper(object):
+ """
+ Wrapper for external CouchDB instance.
+ """
+
+ def start(self):
+ """
+ Start a CouchDB instance for a test.
+ """
+ self.tempdir = tempfile.mkdtemp(suffix='.couch.test')
+
+ path = os.path.join(os.path.dirname(__file__),
+ 'couchdb.ini.template')
+ handle = open(path)
+ conf = handle.read() % {
+ 'tempdir': self.tempdir,
+ }
+ handle.close()
+
+ confPath = os.path.join(self.tempdir, 'test.ini')
+ handle = open(confPath, 'w')
+ handle.write(conf)
+ handle.close()
+
+ # create the dirs from the template
+ mkdir_p(os.path.join(self.tempdir, 'lib'))
+ mkdir_p(os.path.join(self.tempdir, 'log'))
+ args = ['couchdb', '-n', '-a', confPath]
+ null = open('/dev/null', 'w')
+
+ self.process = subprocess.Popen(
+ args, env=None, stdout=null.fileno(), stderr=null.fileno(),
+ close_fds=True)
+ # find port
+ logPath = os.path.join(self.tempdir, 'log', 'couch.log')
+ while not os.path.exists(logPath):
+ if self.process.poll() is not None:
+ got_stdout, got_stderr = "", ""
+ if self.process.stdout is not None:
+ got_stdout = self.process.stdout.read()
+
+ if self.process.stderr is not None:
+ got_stderr = self.process.stderr.read()
+ raise Exception("""
+couchdb exited with code %d.
+stdout:
+%s
+stderr:
+%s""" % (
+ self.process.returncode, got_stdout, got_stderr))
+ time.sleep(0.01)
+ while os.stat(logPath).st_size == 0:
+ time.sleep(0.01)
+ PORT_RE = re.compile(
+ 'Apache CouchDB has started on http://127.0.0.1:(?P<port>\d+)')
+
+ handle = open(logPath)
+ m = None
+ line = handle.readline()
+ while m is None:
+ m = PORT_RE.search(line)
+ line = handle.readline()
+ handle.close()
+ self.port = int(m.group('port'))
+
+ def stop(self):
+ """
+ Terminate the CouchDB instance.
+ """
+ self.process.terminate()
+ self.process.communicate()
+ shutil.rmtree(self.tempdir)
+
diff --git a/scripts/profiling/mail/keys/5447A9AD50E3075ECCE432711B450E665FE63573.pub b/scripts/profiling/mail/keys/5447A9AD50E3075ECCE432711B450E665FE63573.pub
new file mode 100644
index 00000000..fee53b6d
--- /dev/null
+++ b/scripts/profiling/mail/keys/5447A9AD50E3075ECCE432711B450E665FE63573.pub
@@ -0,0 +1,30 @@
+-----BEGIN PGP PUBLIC KEY BLOCK-----
+Version: GnuPG v1.4.12 (GNU/Linux)
+
+mQENBFQEwmABCADC4wYD3mFt8xJtl3gjxRPEGN+FcgvzxxECIhyjYCHszrJu3f65
+/nyruriYdQLGR4YmUdERIwsZ7AMkAM1NAXe7sMq/gRPCb4PwrE7pRKzPAmaLeJMQ
+DC9CSCP+2gUmzeKHS71GkddcUI1HFr1AX9lLVW2ScvmSzOllenyUoFKRvz2uGkLG
+r5pvKsxJUHl9enpHRZV/0X5Y6PCinb4+eN2/ZTdpAywOycU+L+zflA0SOTCtf+dg
+8k839T30piuBulDLNeOX84YcyXTW7XeCeRTg/ryoFaYhbOGt68BwnP9xlpU62LW0
+8vzSZ0mLm4Ttz2uaALEoLmsa91nyLi9pLtrRABEBAAG0IEVkIFNub3dkZW4gPHNu
+b3dkZW5AYml0bWFzay5uZXQ+iQE4BBMBAgAiBQJUBMJgAhsDBgsJCAcDAgYVCAIJ
+CgsEFgIDAQIeAQIXgAAKCRAbRQ5mX+Y1cx4RCACzEiHpmknl+HnB3bHGcr8VZvU9
+hIoclVR/OBjWQFUynr66XmaMHMOLAVoZkIPnezWQe3gDY7QlFCNCfz8SC2++4WtB
+aBzal9IREkVnQBdnWalxLRviNH+zoFQ0URunBAyH4QAJRUC5tWfNj4yI6BCFPwXL
+o0CCISIN+VMRAnwjABQD840/TbcMHDqmJyk/vpPYPFQqQudN3eB2hphKUkZMistP
+O9++ui6glso+MgsbIUdqgnblM3FSrbjfLKekC+MeunFr8qRjettdaVyFD4GLg2SH
+/JpsjZKYoZStatpdJcrNjUMsGtXLxaCPl+VldNuOKIsA85TZJomMiaBDqG9YuQEN
+BFQEwmABCACrYiPXyGWHvs/aFKM63y9l6Th/+SKfzeq+ksLUI6fJIQytGORiiYZC
+1LrhOTmir+dY3IygkFlldxehGt/OMUKLB774WhBDRI43rAhImwhNutTIuUTO7DsD
+y7u83oVQH6xGZW5afs5BEU56Oa8DdUUA5gLfnpqAJG2mLB12JhClxzOYXK/VB0wJ
+QsIWl+zyN7uLQr5xZOthzvP6p7MmsAjhzU1imwyEm8s91DLhwonuqadkMGKi2qHW
+xuwxnr9aHQmobzy68/vOiBFeumr0YarirUdEDiUIti4rqy+0oteTNeMtXWo5rTtx
+xeayw+TjjaOT2fZ6CAbq0I+lOW0aJrPFABEBAAGJAR8EGAECAAkFAlQEwmACGwwA
+CgkQG0UOZl/mNXM0SggAuXzaLafCZiWx28K6mPKdgDOwTMm2rD7ukf3JiswlIyIU
+/K19BENu82iHRSu4nb9amhHOLEhaf1Ep2JTf2Trmd+/SNh0kv3dSBNjCrvrMvtcA
+qVxGc3DtRufGeRoy8ow/sEg+BCcfxJgR1efHOSQfMELDz2v8vbLbkR3Ubm7YRtKr
+Ri2HWYrAXRrwFC07yqO2zptCND/LBtnMrp08AOSSLpRWVD/Ww6IE1v1UEN53aGsm
+D+L/1XkuP4L9cqG3E2NYfsOPiblqRiKSe1adVid/rLn94u+fpE4kuvxoGKn1FJ/m
+FqU8aPtxvPbsMkSoNOalxqJGpuWRTXTLb5I+Ed2Szw==
+=yRE/
+-----END PGP PUBLIC KEY BLOCK-----
diff --git a/scripts/profiling/mail/keys/5447A9AD50E3075ECCE432711B450E665FE63573.sec b/scripts/profiling/mail/keys/5447A9AD50E3075ECCE432711B450E665FE63573.sec
new file mode 100644
index 00000000..64cb6c2a
--- /dev/null
+++ b/scripts/profiling/mail/keys/5447A9AD50E3075ECCE432711B450E665FE63573.sec
@@ -0,0 +1,57 @@
+-----BEGIN PGP PRIVATE KEY BLOCK-----
+Version: GnuPG v1.4.12 (GNU/Linux)
+
+lQOYBFQEwmABCADC4wYD3mFt8xJtl3gjxRPEGN+FcgvzxxECIhyjYCHszrJu3f65
+/nyruriYdQLGR4YmUdERIwsZ7AMkAM1NAXe7sMq/gRPCb4PwrE7pRKzPAmaLeJMQ
+DC9CSCP+2gUmzeKHS71GkddcUI1HFr1AX9lLVW2ScvmSzOllenyUoFKRvz2uGkLG
+r5pvKsxJUHl9enpHRZV/0X5Y6PCinb4+eN2/ZTdpAywOycU+L+zflA0SOTCtf+dg
+8k839T30piuBulDLNeOX84YcyXTW7XeCeRTg/ryoFaYhbOGt68BwnP9xlpU62LW0
+8vzSZ0mLm4Ttz2uaALEoLmsa91nyLi9pLtrRABEBAAEAB/0cLb885/amczMC7ZfN
+dD17aS1ImkjoIqxu5ofFh6zgFLLwHOEr+4QDQKhYQvL3wHfBKqtUEwET6nA50HPe
+4otxdAqczgkRYBZvwjpWuDtUY0B4giKhe2GJ7+xkeRmtlq9eaLEhdwzwqCUFVmBe
+4n0Ey4FgX4d+lmpY5fEFfHjz4bZpoCrNZKtiGtOqdlKXm8PnU+ek+G7DFuavJ+g5
+B4fiqkLAYFX/IDFfaTSBYzNDPbSQR5n4Q4r9PdKazPXg7bnLuxAIY4i6KEXq2YpS
+T1vLanCnBd4BEDUODCPZdc/AtbE0U+XoKTBjTvk3UEGIRJSsju8A1vWOG7UCl+0d
+UMmRBADaiQYnp9QiwPDbpqxzlWN8Ms/+tAyRnBbhghcRqtrDSke6fSJAqXzVGVmF
+FSJPMFf4mBYbr1U3YlYOJrlrb3tVhVN+7PTZDIaaENbtcsUAu7hTr7Ko6r1+WONC
+yhtrtOR9sWHVbTZ09ZvyvjHnBqZVA2PuZLUn2wrimnIJbVNdlwQA5EwgoS8UuDob
+hs6tLg29bAEDZRBHXQcDuEwdAX0KCHW0oQ0UE7exbDXXfQJSD9X3fDeqI+BdI+qQ
+Yuauz+fJxKl+qHAcy5l5NT7qomEjHCzjGUnn4NJzkn6a3T4SrBdSMFY2hL/tJN0i
+v1hXVNatjCEotqqsor+C6bf+Sl4I59cEAK+tYLTo/d+KOWtW4XbVhcYHjTBKtJGH
+p2/wNb49ibYpkgOUqW2ebiCB0Lg6QEupomcaMOJGol3v8vwBKsuwQJhWJrAXC2sT
+Bck5mI+DbabyAbYFtZgNHbcdDy62ADg1xD2Je7IjUDcpYaGB3VFhpD2rSvWDeSjR
+3jTG3PPINfoBODK0IEVkIFNub3dkZW4gPHNub3dkZW5AYml0bWFzay5uZXQ+iQE4
+BBMBAgAiBQJUBMJgAhsDBgsJCAcDAgYVCAIJCgsEFgIDAQIeAQIXgAAKCRAbRQ5m
+X+Y1cx4RCACzEiHpmknl+HnB3bHGcr8VZvU9hIoclVR/OBjWQFUynr66XmaMHMOL
+AVoZkIPnezWQe3gDY7QlFCNCfz8SC2++4WtBaBzal9IREkVnQBdnWalxLRviNH+z
+oFQ0URunBAyH4QAJRUC5tWfNj4yI6BCFPwXLo0CCISIN+VMRAnwjABQD840/TbcM
+HDqmJyk/vpPYPFQqQudN3eB2hphKUkZMistPO9++ui6glso+MgsbIUdqgnblM3FS
+rbjfLKekC+MeunFr8qRjettdaVyFD4GLg2SH/JpsjZKYoZStatpdJcrNjUMsGtXL
+xaCPl+VldNuOKIsA85TZJomMiaBDqG9YnQOYBFQEwmABCACrYiPXyGWHvs/aFKM6
+3y9l6Th/+SKfzeq+ksLUI6fJIQytGORiiYZC1LrhOTmir+dY3IygkFlldxehGt/O
+MUKLB774WhBDRI43rAhImwhNutTIuUTO7DsDy7u83oVQH6xGZW5afs5BEU56Oa8D
+dUUA5gLfnpqAJG2mLB12JhClxzOYXK/VB0wJQsIWl+zyN7uLQr5xZOthzvP6p7Mm
+sAjhzU1imwyEm8s91DLhwonuqadkMGKi2qHWxuwxnr9aHQmobzy68/vOiBFeumr0
+YarirUdEDiUIti4rqy+0oteTNeMtXWo5rTtxxeayw+TjjaOT2fZ6CAbq0I+lOW0a
+JrPFABEBAAEAB/4kyb13Z4MRyy37OkRakgdu2QvhfoVF59Hso/yxxFCTHibGLkpx
+82LQTDEsQNgkGZ2vp7IBElM6MkDuemIRtOW7icdesJh+lAPyI9moWi0DYGgmCQzh
+3PgDBdPQBDT6IL5eYw3323HjKjeeCW1NsPnFqlnyDe3MtWUbDyuozZ1ztA+Rekhb
+UhEDK8ZccEKwpzrE2H5zBZLeY0OKKROGnwd1RBVXnHMgVRF7vbellYaR4h2odxOp
+X8Ho4Xbs1h2VRNIuZwtfXxTIuTIfujlIPXMtVY40dgnEGt9PosJNr9IfGpfE3JCu
+k9PTvq37aZkQbYj52nccwKdos+sLQgqAdHhZBADHg7B5jyRRObsCUXQ+jMHXxuqT
+5l1twwOovvLC7YZoC8NAl4Bi0rh1Zj0ZEJJLFGzeiH+15C4qFTY+ospWpGu6X6g5
+I8ZWya8m2NSEWyJZNI1zKIU0iXucLevVTx+ctnovUNnb89v52/+BKr4k2iRISAzT
+7RL63aFTgnLw9GKweQQA2+eU5jcQ6LobPY/fZZImnhwLDq/OaUV+7u1RfB04GA15
+HOGQV77np/QTM6b+ezKTFhG/HMCTqxf+HPHfzohBPF9zvboLvCkqaHBDiV9qYE96
+id/el3ZeWloLcEe62sMGbv0YYmsYWgJxL8BFGw5v1QpYbfQCnXLjyG+/9f6Ygq0D
+/0W9X/NxWUyAXOv5KRy+rpkpNVxvie4tduvyVUa/9XHF7D/DMaXqkIvVX8yZUIDR
+bjuIvGZkZ9QP8zf8NKkB98zbqZi6CbNrerjrDpb7Pj7uQd3GIcjW4UmENGA6t7U9
+IWen966PAXSzh3996tRHxwXexVIEdX5n4pO39ZiodEIOPzmJAR8EGAECAAkFAlQE
+wmACGwwACgkQG0UOZl/mNXM0SggAuXzaLafCZiWx28K6mPKdgDOwTMm2rD7ukf3J
+iswlIyIU/K19BENu82iHRSu4nb9amhHOLEhaf1Ep2JTf2Trmd+/SNh0kv3dSBNjC
+rvrMvtcAqVxGc3DtRufGeRoy8ow/sEg+BCcfxJgR1efHOSQfMELDz2v8vbLbkR3U
+bm7YRtKrRi2HWYrAXRrwFC07yqO2zptCND/LBtnMrp08AOSSLpRWVD/Ww6IE1v1U
+EN53aGsmD+L/1XkuP4L9cqG3E2NYfsOPiblqRiKSe1adVid/rLn94u+fpE4kuvxo
+GKn1FJ/mFqU8aPtxvPbsMkSoNOalxqJGpuWRTXTLb5I+Ed2Szw==
+=9xZX
+-----END PGP PRIVATE KEY BLOCK-----
diff --git a/scripts/profiling/mail/mail.py b/scripts/profiling/mail/mail.py
new file mode 100644
index 00000000..8504c762
--- /dev/null
+++ b/scripts/profiling/mail/mail.py
@@ -0,0 +1,50 @@
+import os
+import threading
+
+from twisted.internet import reactor
+
+from leap.mail.imap.service import imap
+from leap.keymanager import KeyManager
+
+from util import log
+
+
+class IMAPServerThread(threading.Thread):
+ def __init__(self, imap_service):
+ threading.Thread.__init__(self)
+ self._imap_service = imap_service
+
+ def run(self):
+ self._imap_service.start_loop()
+ reactor.run()
+
+ def stop(self):
+ self._imap_service.stop()
+ reactor.stop()
+
+
+def get_imap_server(soledad, uuid, address, token):
+ log("Starting imap... ", line_break=False)
+
+ keymanager = KeyManager(address, '', soledad, token=token, uid=uuid)
+ with open(
+ os.path.join(
+ os.path.dirname(__file__),
+ 'keys/5447A9AD50E3075ECCE432711B450E665FE63573.sec'), 'r') as f:
+ pubkey, privkey = keymanager.parse_openpgp_ascii_key(f.read())
+ keymanager.put_key(privkey)
+
+ imap_service, imap_port, imap_factory = imap.run_service(
+ soledad, keymanager, userid=address, offline=False)
+
+ imap_service.start_loop()
+ log("started.")
+ return imap_service
+
+ #imap_server = IMAPServerThread(imap_service)
+ #try:
+ # imap_server.start()
+ #except Exception as e:
+ # print str(e)
+
+ #return imap_server
diff --git a/scripts/profiling/mail/mx.py b/scripts/profiling/mail/mx.py
new file mode 100644
index 00000000..b6a1e5cf
--- /dev/null
+++ b/scripts/profiling/mail/mx.py
@@ -0,0 +1,80 @@
+import datetime
+import uuid
+import json
+import timeit
+
+
+from leap.keymanager import openpgp
+from leap.soledad.common.couch import CouchDocument
+from leap.soledad.common.crypto import (
+ EncryptionSchemes,
+ ENC_JSON_KEY,
+ ENC_SCHEME_KEY,
+)
+
+
+from util import log
+
+
+message = """To: Ed Snowden <snowden@bitmask.net>
+Date: %s
+From: Glenn Greenwald <greenwald@bitmask.net>
+
+hi!
+
+"""
+
+
+def get_message():
+ return message % datetime.datetime.now().strftime("%a %b %d %H:%M:%S:%f %Y")
+
+
+def get_enc_json(pubkey, message):
+ with openpgp.TempGPGWrapper(gpgbinary='/usr/bin/gpg') as gpg:
+ gpg.import_keys(pubkey)
+ key = gpg.list_keys().pop()
+ # We don't care about the actual address, so we use a
+ # dummy one, we just care about the import of the pubkey
+ openpgp_key = openpgp._build_key_from_gpg("dummy@mail.com",
+ key, pubkey)
+ enc_json = str(gpg.encrypt(
+ json.dumps(
+ {'incoming': True, 'content': message},
+ ensure_ascii=False),
+ openpgp_key.fingerprint,
+ symmetric=False))
+ return enc_json
+
+
+def get_new_doc(enc_json):
+ doc = CouchDocument(doc_id=str(uuid.uuid4()))
+ doc.content = {
+ 'incoming': True,
+ ENC_SCHEME_KEY: EncryptionSchemes.PUBKEY,
+ ENC_JSON_KEY: enc_json
+ }
+ return doc
+
+
+def get_pubkey():
+ with open('./keys/5447A9AD50E3075ECCE432711B450E665FE63573.pub') as f:
+ return f.read()
+
+
+def put_one_message(pubkey, db):
+ enc_json = get_enc_json(pubkey, get_message())
+ doc = get_new_doc(enc_json)
+ db.put_doc(doc)
+
+
+def put_lots_of_messages(db, number):
+ log("Populating database with %d encrypted messages... "
+ % number, line_break=False)
+ pubkey = get_pubkey()
+ def _put_one_message():
+ put_one_message(pubkey, db)
+ time = timeit.timeit(_put_one_message, number=number)
+ log("done.")
+ average_time = time / number
+ log("put_one_message average time: %f" % average_time)
+ return average_time
diff --git a/scripts/profiling/mail/soledad_client.py b/scripts/profiling/mail/soledad_client.py
new file mode 100644
index 00000000..5ac8ce39
--- /dev/null
+++ b/scripts/profiling/mail/soledad_client.py
@@ -0,0 +1,40 @@
+import tempfile
+import os
+import shutil
+
+from leap.soledad.client import Soledad
+
+
+class SoledadClient(object):
+
+ def __init__(self, uuid, server_url, auth_token):
+ self._uuid = uuid
+ self._server_url = server_url
+ self._auth_token = auth_token
+ self._tempdir = None
+ self._soledad = None
+
+ @property
+ def instance(self):
+ if self._soledad is None:
+ self._soledad = self._get_soledad_client()
+ return self._soledad
+
+ def _get_soledad_client(self):
+ self._tempdir = tempfile.mkdtemp()
+ return Soledad(
+ uuid=self._uuid,
+ passphrase=u'123',
+ secrets_path=os.path.join(self._tempdir, 'secrets.json'),
+ local_db_path=os.path.join(self._tempdir, 'soledad.db'),
+ server_url=self._server_url,
+ cert_file=None,
+ auth_token=self._auth_token,
+ secret_id=None,
+ defer_encryption=True)
+
+ def close(self):
+ if self._soledad is not None:
+ self._soledad.close()
+ if self._tempdir is not None:
+ shutil.rmtree(self._tempdir)
diff --git a/scripts/profiling/mail/soledad_server.py b/scripts/profiling/mail/soledad_server.py
new file mode 100644
index 00000000..ad014456
--- /dev/null
+++ b/scripts/profiling/mail/soledad_server.py
@@ -0,0 +1,48 @@
+import threading
+
+from wsgiref.simple_server import make_server
+
+from leap.soledad.common.couch import CouchServerState
+
+from leap.soledad.server import SoledadApp
+from leap.soledad.server.gzip_middleware import GzipMiddleware
+from leap.soledad.server.auth import SoledadTokenAuthMiddleware
+
+from util import log
+
+
+class SoledadServerThread(threading.Thread):
+ def __init__(self, server):
+ threading.Thread.__init__(self)
+ self._server = server
+
+ def run(self):
+ self._server.serve_forever()
+
+ def stop(self):
+ self._server.shutdown()
+
+ @property
+ def port(self):
+ return self._server.server_port
+
+
+def make_soledad_server_thread(couch_port):
+ state = CouchServerState(
+ 'http://127.0.0.1:%d' % couch_port,
+ 'shared',
+ 'tokens')
+ application = GzipMiddleware(
+ SoledadTokenAuthMiddleware(SoledadApp(state)))
+ server = make_server('', 0, application)
+ t = SoledadServerThread(server)
+ return t
+
+
+def get_soledad_server(couchdb_port):
+ log("Starting soledad server... ", line_break=False)
+ soledad_server = make_soledad_server_thread(couchdb_port)
+ soledad_server.start()
+ log("soledad server started on port %d." % soledad_server.port)
+ return soledad_server
+
diff --git a/scripts/profiling/mail/util.py b/scripts/profiling/mail/util.py
new file mode 100644
index 00000000..86118e88
--- /dev/null
+++ b/scripts/profiling/mail/util.py
@@ -0,0 +1,8 @@
+import sys
+
+
+def log(msg, line_break=True):
+ sys.stdout.write(msg)
+ if line_break:
+ sys.stdout.write("\n")
+ sys.stdout.flush()
diff --git a/scripts/profiling/spam.py b/scripts/profiling/spam.py
new file mode 100755
index 00000000..091a8c48
--- /dev/null
+++ b/scripts/profiling/spam.py
@@ -0,0 +1,123 @@
+#!/usr/bin/python
+
+# Send a lot of messages in parallel.
+
+
+import string
+import smtplib
+import threading
+import logging
+
+from argparse import ArgumentParser
+
+
+SMTP_HOST = 'chipmonk.cdev.bitmask.net'
+NUMBER_OF_THREADS = 20
+
+
+logger = logging.getLogger(__name__)
+LOG_FORMAT = '%(asctime)s %(message)s'
+logging.basicConfig(format=LOG_FORMAT, level=logging.INFO)
+
+
+def _send_email(host, subject, to_addr, from_addr, body_text):
+ """
+ Send an email
+ """
+ body = string.join((
+ "From: %s" % from_addr,
+ "To: %s" % to_addr,
+ "Subject: %s" % subject,
+ "",
+ body_text
+ ), "\r\n")
+ server = smtplib.SMTP(host)
+ server.sendmail(from_addr, [to_addr], body)
+ server.quit()
+
+
+def _parse_args():
+ parser = ArgumentParser()
+ parser.add_argument(
+ 'target_address',
+ help='The target email address to spam')
+ parser.add_argument(
+ 'number_of_messages', type=int,
+ help='The amount of messages email address to spam')
+ parser.add_argument(
+ '-s', dest='server', default=SMTP_HOST,
+ help='The SMTP server to use')
+ parser.add_argument(
+ '-t', dest='threads', default=NUMBER_OF_THREADS,
+ help='The maximum number of parallel threads to launch')
+ return parser.parse_args()
+
+
+class EmailSenderThread(threading.Thread):
+
+ def __init__(self, host, subject, to_addr, from_addr, body_text,
+ finished_fun):
+ threading.Thread.__init__(self)
+ self._host = host
+ self._subject = subject
+ self._to_addr = to_addr
+ self._from_addr = from_addr
+ self._body_text = body_text
+ self._finished_fun = finished_fun
+
+ def run(self):
+ _send_email(
+ self._host, self._subject, self._to_addr, self._from_addr,
+ self._body_text)
+ self._finished_fun()
+
+
+def _launch_email_thread(host, subject, to_addr, from_addr, body_text,
+ finished_fun):
+ thread = EmailSenderThread(
+ host, subject, to_addr, from_addr, body_text, finished_fun)
+ thread.start()
+ return thread
+
+
+class FinishedThreads(object):
+
+ def __init__(self):
+ self._finished = 0
+ self._lock = threading.Lock()
+
+ def signal(self):
+ with self._lock:
+ self._finished = self._finished + 1
+ logger.info('Number of messages sent: %d.' % self._finished)
+
+
+def _send_messages(args):
+ host = args.server
+ subject = "Message from Soledad script"
+ to_addr = args.target_address
+ from_addr = args.target_address
+ body_text = "Test message"
+
+ semaphore = threading.Semaphore(args.threads)
+ threads = []
+ finished_threads = FinishedThreads()
+
+ def _finished_fun():
+ semaphore.release()
+ finished_threads.signal()
+
+ for i in xrange(args.number_of_messages):
+ semaphore.acquire()
+ threads.append(
+ _launch_email_thread(
+ host, subject, to_addr, from_addr, body_text,
+ _finished_fun))
+
+ for t in threads:
+ t.join()
+
+
+if __name__ == "__main__":
+ args = _parse_args()
+ _send_messages(args)
diff --git a/scripts/profiling/storage/benchmark-storage.py b/scripts/profiling/storage/benchmark-storage.py
new file mode 100644
index 00000000..79ee3270
--- /dev/null
+++ b/scripts/profiling/storage/benchmark-storage.py
@@ -0,0 +1,104 @@
+#!/usr/bin/python
+
+# scenarios:
+# 1. soledad instantiation time.
+# a. for unexisting db.
+# b. for existing db.
+# 2. soledad doc storage/retrieval.
+# a. 1 KB document.
+# b 10 KB.
+# c. 100 KB.
+# d. 1 MB.
+
+
+import logging
+import getpass
+import tempfile
+import argparse
+import shutil
+import timeit
+
+
+from util import ValidateUserHandle
+
+# benchmarking args
+REPEAT_NUMBER = 1000
+DOC_SIZE = 1024
+
+
+# create a logger
+logger = logging.getLogger(__name__)
+LOG_FORMAT = '%(asctime)s %(message)s'
+logging.basicConfig(format=LOG_FORMAT, level=logging.INFO)
+
+
+def parse_args():
+ # parse command line
+ parser = argparse.ArgumentParser()
+ parser.add_argument(
+ 'user@provider', action=ValidateUserHandle, help='the user handle')
+ parser.add_argument(
+ '-b', dest='basedir', required=False, default=None,
+ help='soledad base directory')
+ parser.add_argument(
+ '-p', dest='passphrase', required=False, default=None,
+ help='the user passphrase')
+ parser.add_argument(
+ '-l', dest='logfile', required=False, default='/tmp/benchhmark-storage.log',
+ help='the file to which write the benchmark logs')
+ args = parser.parse_args()
+ # get the password
+ passphrase = args.passphrase
+ if passphrase is None:
+ passphrase = getpass.getpass(
+ 'Password for %s@%s: ' % (args.username, args.provider))
+ # get the basedir
+ basedir = args.basedir
+ if basedir is None:
+ basedir = tempfile.mkdtemp()
+ logger.info('Using %s as base directory.' % basedir)
+
+ return args.username, args.provider, passphrase, basedir, args.logfile
+
+
+if __name__ == '__main__':
+ username, provider, passphrase, basedir, logfile = parse_args()
+ create_results = []
+ getall_results = []
+ for i in [1, 200, 400, 600, 800, 1000]:
+ tempdir = tempfile.mkdtemp(dir=basedir)
+ setup_common = """
+import os
+#from benchmark_storage_utils import benchmark_fun
+#from benchmark_storage_utils import get_soledad_instance
+from client_side_db import get_soledad_instance
+sol = get_soledad_instance('%s', '%s', '%s', '%s')
+ """ % (username, provider, passphrase, tempdir)
+
+ setup_create = setup_common + """
+content = {'data': os.urandom(%d/2).encode('hex')}
+""" % (DOC_SIZE * i)
+ time = timeit.timeit(
+ 'sol.create_doc(content);',
+ setup=setup_create, number=REPEAT_NUMBER)
+ create_results.append((DOC_SIZE*i, time))
+ print "CREATE: %d %f" % (DOC_SIZE*i, time)
+
+ setup_get = setup_common + """
+doc_ids = [doc.doc_id for doc in sol.get_all_docs()[1]]
+"""
+
+ time = timeit.timeit(
+ "[sol.get_doc(doc_id) for doc_id in doc_ids]",
+ setup=setup_get, number=1)
+ getall_results.append((DOC_SIZE*i, time))
+ print "GET_ALL: %d %f" % (DOC_SIZE*i, time)
+ shutil.rmtree(tempdir)
+ print "# size, time for creation of %d docs" % REPEAT_NUMBER
+ for size, time in create_results:
+ print size, time
+ print "# size, time for retrieval of %d docs" % REPEAT_NUMBER
+ for size, time in getall_results:
+ print size, time
+ shutil.rmtree(basedir)
+
diff --git a/scripts/profiling/storage/benchmark_storage_utils.py b/scripts/profiling/storage/benchmark_storage_utils.py
new file mode 100644
index 00000000..fa8bb658
--- /dev/null
+++ b/scripts/profiling/storage/benchmark_storage_utils.py
@@ -0,0 +1,4 @@
+from client_side_db import get_soledad_instance
+
+def benchmark_fun(sol, content):
+ sol.create_doc(content)
diff --git a/scripts/profiling/storage/client_side_db.py b/scripts/profiling/storage/client_side_db.py
new file mode 120000
index 00000000..9e49a7f0
--- /dev/null
+++ b/scripts/profiling/storage/client_side_db.py
@@ -0,0 +1 @@
+../../db_access/client_side_db.py \ No newline at end of file
diff --git a/scripts/profiling/storage/plot.py b/scripts/profiling/storage/plot.py
new file mode 100755
index 00000000..280b9375
--- /dev/null
+++ b/scripts/profiling/storage/plot.py
@@ -0,0 +1,94 @@
+#!/usr/bin/python
+
+
+# Create a plot of the results of running the ./benchmark-storage.py script.
+
+
+import argparse
+from matplotlib import pyplot as plt
+
+from sets import Set
+
+
+def plot(filename, subtitle=''):
+
+ # config the plot
+ plt.xlabel('doc size (KB)')
+ plt.ylabel('operation time (s)')
+ title = 'soledad 1000 docs creation/retrieval times'
+ if subtitle != '':
+ title += '- %s' % subtitle
+ plt.title(title)
+
+ x = Set()
+ ycreate = []
+ yget = []
+
+ ys = []
+ #ys.append((ycreate, 'creation time', 'r', '-'))
+ #ys.append((yget, 'retrieval time', 'b', '-'))
+
+ # read data from file
+ with open(filename, 'r') as f:
+ f.readline()
+ for i in xrange(6):
+ size, y = f.readline().strip().split(' ')
+ x.add(int(size))
+ ycreate.append(float(y))
+
+ f.readline()
+ for i in xrange(6):
+ size, y = f.readline().strip().split(' ')
+ x.add(int(size))
+ yget.append(float(y))
+
+ # get doc size in KB
+ x = list(x)
+ x.sort()
+ x = map(lambda val: val / 1024, x)
+
+ # get normalized results per KB
+ nycreate = []
+ nyget = []
+ for i in xrange(len(x)):
+ nycreate.append(ycreate[i]/x[i])
+ nyget.append(yget[i]/x[i])
+
+ ys.append((nycreate, 'creation time per KB', 'r', '-.'))
+ ys.append((nyget, 'retrieval time per KB', 'b', '-.'))
+
+ for y in ys:
+ kwargs = {
+ 'linewidth': 1.0,
+ 'marker': '.',
+ 'color': y[2],
+ 'linestyle': y[3],
+ }
+ # normalize by doc size
+ plt.plot(
+ x,
+ y[0],
+ label=y[1], **kwargs)
+
+ #plt.axes().get_xaxis().set_ticks(x)
+ #plt.axes().get_xaxis().set_ticklabels(x)
+
+ # annotate max and min values
+ plt.xlim(0, 1100)
+ #plt.ylim(0, 350)
+ plt.grid()
+ plt.legend()
+ plt.show()
+
+
+if __name__ == '__main__':
+ # parse command line
+ parser = argparse.ArgumentParser()
+ parser.add_argument(
+ 'datafile',
+ help='the data file to plot')
+ parser.add_argument(
+ '-s', dest='subtitle', required=False, default='',
+ help='a subtitle for the plot')
+ args = parser.parse_args()
+ plot(args.datafile, args.subtitle)
diff --git a/scripts/profiling/storage/profile-format.py b/scripts/profiling/storage/profile-format.py
new file mode 100644
index 00000000..262a52ab
--- /dev/null
+++ b/scripts/profiling/storage/profile-format.py
@@ -0,0 +1,29 @@
+#!/usr/bin/python
+
+import argparse
+import pstats
+
+
+def parse_args():
+ # parse command line
+ parser = argparse.ArgumentParser()
+ parser.add_argument(
+ '-f', dest='statsfiles', action='append', required=True,
+ help='a stats file')
+ args = parser.parse_args()
+ return args.statsfiles
+
+
+def format_stats(statsfiles):
+ for f in statsfiles:
+ ps = pstats.Stats(f)
+ ps.strip_dirs()
+ ps.sort_stats('time')
+ ps.print_stats()
+ ps.sort_stats('cumulative')
+ ps.print_stats()
+
+
+if __name__ == '__main__':
+ statsfiles = parse_args()
+ format_stats(statsfiles)
diff --git a/scripts/profiling/storage/profile-storage.py b/scripts/profiling/storage/profile-storage.py
new file mode 100755
index 00000000..305e6d5a
--- /dev/null
+++ b/scripts/profiling/storage/profile-storage.py
@@ -0,0 +1,107 @@
+#!/usr/bin/python
+
+import os
+import logging
+import getpass
+import tempfile
+import argparse
+import cProfile
+import shutil
+import pstats
+import StringIO
+import datetime
+
+
+from client_side_db import get_soledad_instance
+from util import ValidateUserHandle
+
+# profiling args
+NUM_DOCS = 1
+DOC_SIZE = 1024**2
+
+
+# create a logger
+logger = logging.getLogger(__name__)
+LOG_FORMAT = '%(asctime)s %(message)s'
+logging.basicConfig(format=LOG_FORMAT, level=logging.INFO)
+
+
+def parse_args():
+ # parse command line
+ parser = argparse.ArgumentParser()
+ parser.add_argument(
+ 'user@provider', action=ValidateUserHandle, help='the user handle')
+ parser.add_argument(
+ '-b', dest='basedir', required=False, default=None,
+ help='soledad base directory')
+ parser.add_argument(
+ '-p', dest='passphrase', required=False, default=None,
+ help='the user passphrase')
+ parser.add_argument(
+ '-d', dest='logdir', required=False, default='/tmp/',
+ help='the direcroty to which write the profile stats')
+ args = parser.parse_args()
+ # get the password
+ passphrase = args.passphrase
+ if passphrase is None:
+ passphrase = getpass.getpass(
+ 'Password for %s@%s: ' % (args.username, args.provider))
+ # get the basedir
+ basedir = args.basedir
+ if basedir is None:
+ basedir = tempfile.mkdtemp()
+ logger.info('Using %s as base directory.' % basedir)
+
+ return args.username, args.provider, passphrase, basedir, args.logdir
+
+created_docs = []
+
+def create_docs(sol, content):
+ for i in xrange(NUM_DOCS):
+ doc = sol.create_doc(content)
+ created_docs.append(doc.doc_id)
+
+def get_all_docs(sol):
+ for doc_id in created_docs:
+ sol.get_doc(doc_id)
+
+def do_profile(logdir, sol):
+ fname_prefix = os.path.join(
+ logdir,
+ "profile_%s" \
+ % datetime.datetime.now().strftime('%Y-%m-%d_%H-%m-%S'))
+
+ # profile create docs
+ content = {'data': os.urandom(DOC_SIZE/2).encode('hex')}
+ pr = cProfile.Profile()
+ pr.runcall(
+ create_docs,
+ sol, content)
+ s = StringIO.StringIO()
+ ps = pstats.Stats(pr, stream=s).sort_stats('cumulative')
+ ps.print_stats()
+ ps.dump_stats("%s_creation.stats" % fname_prefix)
+ print s.getvalue()
+
+ # profile get all docs
+ pr = cProfile.Profile()
+ pr.runcall(
+ get_all_docs,
+ sol)
+ s = StringIO.StringIO()
+ ps = pstats.Stats(pr, stream=s).sort_stats('cumulative')
+ ps.dump_stats("%s_retrieval.stats" % fname_prefix)
+ ps.print_stats()
+ print s.getvalue()
+
+
+if __name__ == '__main__':
+ username, provider, passphrase, basedir, logdir = parse_args()
+ sol = get_soledad_instance(
+ username,
+ provider,
+ passphrase,
+ basedir)
+ do_profile(logdir, sol)
+ shutil.rmtree(basedir)
+
diff --git a/scripts/profiling/storage/util.py b/scripts/profiling/storage/util.py
new file mode 120000
index 00000000..7f16d684
--- /dev/null
+++ b/scripts/profiling/storage/util.py
@@ -0,0 +1 @@
+../util.py \ No newline at end of file
diff --git a/scripts/profiling/sync/movingaverage.py b/scripts/profiling/sync/movingaverage.py
new file mode 120000
index 00000000..098b0a01
--- /dev/null
+++ b/scripts/profiling/sync/movingaverage.py
@@ -0,0 +1 @@
+../movingaverage.py \ No newline at end of file
diff --git a/scripts/profiling/sync/profile-decoupled.py b/scripts/profiling/sync/profile-decoupled.py
new file mode 100644
index 00000000..a844c3c6
--- /dev/null
+++ b/scripts/profiling/sync/profile-decoupled.py
@@ -0,0 +1,24 @@
+# test_name: soledad-sync
+# start_time: 2014-06-12 20:09:11.232317+00:00
+# elapsed_time total_cpu total_memory proc_cpu proc_memory
+0.000225 68.400000 46.100000 105.300000 0.527224 0.000000 0.255308 0.000000 0.250167 0.000000 0.250167 0.000000 0.250167 0.000000 0.255160
+0.707006 76.200000 46.200000 90.000000 0.562369 0.000000 0.255308 0.000000 0.250167 0.000000 0.250167 0.000000 0.255308 0.000000 0.255160
+1.413140 63.200000 46.100000 0.000000 0.360199 0.000000 0.255308 0.000000 0.250167 0.000000 0.250167 0.000000 0.255308 0.000000 0.255160
+2.123962 0.000000 46.100000 0.000000 0.360199 0.000000 0.255308 0.000000 0.250167 0.000000 0.250167 0.000000 0.255308 0.000000 0.255160
+2.833941 31.600000 46.100000 0.000000 0.360248 0.000000 0.255308 0.000000 0.250167 0.000000 0.250167 0.000000 0.255308 0.000000 0.255160
+3.541532 5.300000 46.100000 0.000000 0.360298 0.000000 0.255308 0.000000 0.250167 0.000000 0.250167 0.000000 0.255308 0.000000 0.255160
+4.253390 14.300000 46.100000 11.100000 0.360347 0.000000 0.255308 0.000000 0.250167 0.000000 0.250167 0.000000 0.255308 0.000000 0.255160
+4.967365 5.000000 46.100000 0.000000 0.360347 0.000000 0.255308 0.000000 0.250167 0.000000 0.250167 0.000000 0.255308 0.000000 0.255160
+5.680172 5.600000 46.100000 0.000000 0.360397 0.000000 0.255308 0.000000 0.250167 0.000000 0.250167 0.000000 0.255308 0.000000 0.255160
+6.390501 10.500000 46.100000 0.000000 0.360397 0.000000 0.255308 0.000000 0.250167 0.000000 0.250167 0.000000 0.255308 0.000000 0.255160
+7.101711 23.800000 46.000000 0.000000 0.360397 0.000000 0.255308 0.000000 0.250167 0.000000 0.250167 0.000000 0.255308 0.000000 0.255160
+7.810529 30.000000 46.000000 0.000000 0.360397 0.000000 0.255308 0.000000 0.250167 0.000000 0.250167 0.000000 0.255308 0.000000 0.255160
+8.517835 25.000000 46.100000 0.000000 0.361484 0.000000 0.255308 0.000000 0.250167 0.000000 0.250167 0.000000 0.255308 0.000000 0.255160
+9.227455 5.300000 46.000000 9.500000 0.361484 0.000000 0.255308 0.000000 0.250167 0.000000 0.250167 0.000000 0.255308 0.000000 0.255160
+9.936479 9.500000 46.000000 10.000000 0.361484 0.000000 0.255308 0.000000 0.250167 0.000000 0.250167 0.000000 0.255308 0.000000 0.255160
+10.645015 52.400000 46.200000 0.000000 0.361484 0.000000 0.255308 0.000000 0.250167 0.000000 0.250167 0.000000 0.255308 0.000000 0.255160
+11.355179 21.100000 46.000000 0.000000 0.361484 0.000000 0.255308 0.000000 0.250167 0.000000 0.250167 0.000000 0.255308 0.000000 0.255160
+12.066252 36.800000 46.000000 0.000000 0.361484 0.000000 0.255308 0.000000 0.250167 0.000000 0.250167 0.000000 0.255308 0.000000 0.255160
+12.777689 28.600000 46.000000 0.000000 0.361484 0.000000 0.255308 0.000000 0.250167 0.000000 0.250167 0.000000 0.255308 0.000000 0.255160
+13.489886 0.000000 46.000000 0.000000 0.361484 0.000000 0.255308 0.000000 0.250167 0.000000 0.250167 0.000000 0.255308 0.000000 0.255160
+# end_time: 2014-06-12 20:09:25.434677+00:00 \ No newline at end of file
diff --git a/scripts/run_tests.sh b/scripts/run_tests.sh
new file mode 100755
index 00000000..e36466f8
--- /dev/null
+++ b/scripts/run_tests.sh
@@ -0,0 +1,3 @@
+#!/bin/sh
+cd common
+python setup.py test