summaryrefslogtreecommitdiff
path: root/testing/tests/benchmarks
diff options
context:
space:
mode:
authordrebs <drebs@leap.se>2016-11-17 22:35:21 -0200
committerdrebs <drebs@leap.se>2016-12-12 09:15:21 -0200
commit378a07113a713a7c25f0fb8510d18ecdae2198bd (patch)
treee1a0d7f7e6c1b252ea412b32d5f7e048e5db4326 /testing/tests/benchmarks
parent87259d4210e3488b00876d7ec83a8cc21e341712 (diff)
[test] rename benchmark tests directory and tag
Diffstat (limited to 'testing/tests/benchmarks')
-rw-r--r--testing/tests/benchmarks/assets/cert_default.conf15
-rw-r--r--testing/tests/benchmarks/conftest.py57
-rw-r--r--testing/tests/benchmarks/pytest.ini2
-rw-r--r--testing/tests/benchmarks/test_crypto.py99
-rw-r--r--testing/tests/benchmarks/test_misc.py8
-rw-r--r--testing/tests/benchmarks/test_sqlcipher.py40
-rw-r--r--testing/tests/benchmarks/test_sync.py64
7 files changed, 285 insertions, 0 deletions
diff --git a/testing/tests/benchmarks/assets/cert_default.conf b/testing/tests/benchmarks/assets/cert_default.conf
new file mode 100644
index 00000000..8043cea3
--- /dev/null
+++ b/testing/tests/benchmarks/assets/cert_default.conf
@@ -0,0 +1,15 @@
+[ req ]
+default_bits = 1024
+default_keyfile = keyfile.pem
+distinguished_name = req_distinguished_name
+prompt = no
+output_password = mypass
+
+[ req_distinguished_name ]
+C = GB
+ST = Test State or Province
+L = Test Locality
+O = Organization Name
+OU = Organizational Unit Name
+CN = localhost
+emailAddress = test@email.address
diff --git a/testing/tests/benchmarks/conftest.py b/testing/tests/benchmarks/conftest.py
new file mode 100644
index 00000000..a9cc3464
--- /dev/null
+++ b/testing/tests/benchmarks/conftest.py
@@ -0,0 +1,57 @@
+import pytest
+import random
+import base64
+
+from twisted.internet import threads, reactor
+
+
+# we have to manually setup the events server in order to be able to signal
+# events. This is usually done by the enclosing application using soledad
+# client (i.e. bitmask client).
+from leap.common.events import server
+server.ensure_server()
+
+
+def pytest_addoption(parser):
+ parser.addoption(
+ "--num-docs", type="int", default=100,
+ help="the number of documents to use in performance tests")
+
+
+@pytest.fixture()
+def payload():
+ def generate(size):
+ random.seed(1337) # same seed to avoid different bench results
+ payload_bytes = bytearray(random.getrandbits(8) for _ in xrange(size))
+ # encode as base64 to avoid ascii encode/decode errors
+ return base64.b64encode(payload_bytes)[:size] # remove b64 overhead
+ return generate
+
+
+@pytest.fixture()
+def txbenchmark(benchmark):
+ def blockOnThread(*args, **kwargs):
+ return threads.deferToThread(
+ benchmark, threads.blockingCallFromThread,
+ reactor, *args, **kwargs)
+ return blockOnThread
+
+
+@pytest.fixture()
+def txbenchmark_with_setup(benchmark):
+ def blockOnThreadWithSetup(setup, f):
+ def blocking_runner(*args, **kwargs):
+ return threads.blockingCallFromThread(reactor, f, *args, **kwargs)
+
+ def blocking_setup():
+ args = threads.blockingCallFromThread(reactor, setup)
+ try:
+ return tuple(arg for arg in args), {}
+ except TypeError:
+ return ((args,), {}) if args else None
+
+ def bench():
+ return benchmark.pedantic(blocking_runner, setup=blocking_setup,
+ rounds=4, warmup_rounds=1)
+ return threads.deferToThread(bench)
+ return blockOnThreadWithSetup
diff --git a/testing/tests/benchmarks/pytest.ini b/testing/tests/benchmarks/pytest.ini
new file mode 100644
index 00000000..7a0508ce
--- /dev/null
+++ b/testing/tests/benchmarks/pytest.ini
@@ -0,0 +1,2 @@
+[pytest]
+twisted = yes
diff --git a/testing/tests/benchmarks/test_crypto.py b/testing/tests/benchmarks/test_crypto.py
new file mode 100644
index 00000000..ab586bea
--- /dev/null
+++ b/testing/tests/benchmarks/test_crypto.py
@@ -0,0 +1,99 @@
+"""
+Benchmarks for crypto operations.
+If you don't want to stress your local machine too much, you can pass the
+SIZE_LIMT environment variable.
+
+For instance, to keep the maximum payload at 1MB:
+
+SIZE_LIMIT=1E6 py.test -s tests/perf/test_crypto.py
+"""
+import pytest
+import os
+import json
+from uuid import uuid4
+
+from leap.soledad.common.document import SoledadDocument
+from leap.soledad.client import _crypto
+
+LIMIT = int(float(os.environ.get('SIZE_LIMIT', 50 * 1000 * 1000)))
+
+
+pytestmark = pytest.mark.benchmark
+
+
+def create_doc_encryption(size):
+ @pytest.mark.benchmark(group="test_crypto_encrypt_doc")
+ def test_doc_encryption(soledad_client, benchmark, payload):
+ crypto = soledad_client()._crypto
+
+ DOC_CONTENT = {'payload': payload(size)}
+ doc = SoledadDocument(
+ doc_id=uuid4().hex, rev='rev',
+ json=json.dumps(DOC_CONTENT))
+
+ benchmark(crypto.encrypt_doc, doc)
+ return test_doc_encryption
+
+
+# TODO this test is really bullshit, because it's still including
+# the json serialization.
+
+def create_doc_decryption(size):
+ @pytest.inlineCallbacks
+ @pytest.mark.benchmark(group="test_crypto_decrypt_doc")
+ def test_doc_decryption(soledad_client, benchmark, payload):
+ crypto = soledad_client()._crypto
+
+ DOC_CONTENT = {'payload': payload(size)}
+ doc = SoledadDocument(
+ doc_id=uuid4().hex, rev='rev',
+ json=json.dumps(DOC_CONTENT))
+
+ encrypted_doc = yield crypto.encrypt_doc(doc)
+ doc.set_json(encrypted_doc)
+
+ benchmark(crypto.decrypt_doc, doc)
+ return test_doc_decryption
+
+
+def create_raw_encryption(size):
+ @pytest.mark.benchmark(group="test_crypto_raw_encrypt")
+ def test_raw_encrypt(benchmark, payload):
+ key = payload(32)
+ benchmark(_crypto.encrypt_sym, payload(size), key)
+ return test_raw_encrypt
+
+
+def create_raw_decryption(size):
+ @pytest.mark.benchmark(group="test_crypto_raw_decrypt")
+ def test_raw_decrypt(benchmark, payload):
+ key = payload(32)
+ iv, ciphertext = _crypto.encrypt_sym(payload(size), key)
+ benchmark(_crypto.decrypt_sym, ciphertext, key, iv)
+ return test_raw_decrypt
+
+
+# Create the TESTS in the global namespace, they'll be picked by the benchmark
+# plugin.
+
+encryption_tests = [
+ ('10k', 1E4),
+ ('100k', 1E5),
+ ('500k', 5E5),
+ ('1M', 1E6),
+ ('10M', 1E7),
+ ('50M', 5E7),
+]
+
+for name, size in encryption_tests:
+ if size < LIMIT:
+ sz = int(size)
+ globals()['test_encrypt_doc_' + name] = create_doc_encryption(sz)
+ globals()['test_decrypt_doc_' + name] = create_doc_decryption(sz)
+
+
+for name, size in encryption_tests:
+ if size < LIMIT:
+ sz = int(size)
+ globals()['test_encrypt_raw_' + name] = create_raw_encryption(sz)
+ globals()['test_decrypt_raw_' + name] = create_raw_decryption(sz)
diff --git a/testing/tests/benchmarks/test_misc.py b/testing/tests/benchmarks/test_misc.py
new file mode 100644
index 00000000..2f32ad7c
--- /dev/null
+++ b/testing/tests/benchmarks/test_misc.py
@@ -0,0 +1,8 @@
+import pytest
+
+pytestmark = pytest.mark.benchmark
+
+
+@pytest.mark.benchmark(group="test_instance")
+def test_initialization(soledad_client, benchmark):
+ benchmark(soledad_client)
diff --git a/testing/tests/benchmarks/test_sqlcipher.py b/testing/tests/benchmarks/test_sqlcipher.py
new file mode 100644
index 00000000..7f8842bd
--- /dev/null
+++ b/testing/tests/benchmarks/test_sqlcipher.py
@@ -0,0 +1,40 @@
+'''
+Tests SoledadClient/SQLCipher interaction
+'''
+import pytest
+
+from twisted.internet.defer import gatherResults
+
+pytestmark = pytest.mark.benchmark
+
+
+def load_up(client, amount, payload, defer=True):
+ results = [client.create_doc({'content': payload}) for _ in xrange(amount)]
+ if defer:
+ return gatherResults(results)
+
+
+def build_test_sqlcipher_async_create(amount, size):
+ @pytest.inlineCallbacks
+ @pytest.mark.benchmark(group="test_sqlcipher_async_create")
+ def test(soledad_client, txbenchmark, payload):
+ client = soledad_client()
+ yield txbenchmark(load_up, client, amount, payload(size))
+ return test
+
+
+def build_test_sqlcipher_create(amount, size):
+ @pytest.mark.benchmark(group="test_sqlcipher_create")
+ def test(soledad_client, benchmark, payload):
+ client = soledad_client()._dbsyncer
+ benchmark(load_up, client, amount, payload(size), defer=False)
+ return test
+
+
+test_async_create_20_500k = build_test_sqlcipher_async_create(20, 500 * 1000)
+test_async_create_100_100k = build_test_sqlcipher_async_create(100, 100 * 1000)
+test_async_create_1000_10k = build_test_sqlcipher_async_create(1000, 10 * 1000)
+# synchronous
+test_create_20_500k = build_test_sqlcipher_create(20, 500 * 1000)
+test_create_100_100k = build_test_sqlcipher_create(100, 100 * 1000)
+test_create_1000_10k = build_test_sqlcipher_create(1000, 10 * 1000)
diff --git a/testing/tests/benchmarks/test_sync.py b/testing/tests/benchmarks/test_sync.py
new file mode 100644
index 00000000..88afe9f8
--- /dev/null
+++ b/testing/tests/benchmarks/test_sync.py
@@ -0,0 +1,64 @@
+import pytest
+
+pytestmark = pytest.mark.benchmark
+
+
+@pytest.inlineCallbacks
+def load_up(client, amount, payload):
+ # create a bunch of local documents
+ for i in xrange(amount):
+ yield client.create_doc({'content': payload})
+
+
+def create_upload(uploads, size):
+ @pytest.inlineCallbacks
+ @pytest.mark.benchmark(group="test_upload")
+ def test(soledad_client, txbenchmark_with_setup, payload):
+ client = soledad_client()
+
+ def setup():
+ return load_up(client, uploads, payload(size))
+
+ yield txbenchmark_with_setup(setup, client.sync)
+ return test
+
+
+test_upload_20_500k = create_upload(20, 500 * 1000)
+test_upload_100_100k = create_upload(100, 100 * 1000)
+test_upload_1000_10k = create_upload(1000, 10 * 1000)
+
+
+def create_download(downloads, size):
+ @pytest.inlineCallbacks
+ @pytest.mark.benchmark(group="test_download")
+ def test(soledad_client, txbenchmark_with_setup, payload):
+ client = soledad_client()
+
+ yield load_up(client, downloads, payload(size))
+ yield client.sync()
+ # We could create them directly on couch, but sending them
+ # ensures we are dealing with properly encrypted docs
+
+ def setup():
+ return soledad_client()
+
+ def sync(clean_client):
+ return clean_client.sync()
+ yield txbenchmark_with_setup(setup, sync)
+ return test
+
+
+test_download_20_500k = create_download(20, 500 * 1000)
+test_download_100_100k = create_download(100, 100 * 1000)
+test_download_1000_10k = create_download(1000, 10 * 1000)
+
+
+@pytest.inlineCallbacks
+@pytest.mark.benchmark(group="test_nothing_to_sync")
+def test_nothing_to_sync(soledad_client, txbenchmark_with_setup):
+ def setup():
+ return soledad_client()
+
+ def sync(clean_client):
+ return clean_client.sync()
+ yield txbenchmark_with_setup(setup, sync)