summaryrefslogtreecommitdiff
path: root/testing/tests/benchmarks
diff options
context:
space:
mode:
authordrebs <drebs@riseup.net>2017-09-17 12:08:25 -0300
committerdrebs <drebs@riseup.net>2017-09-17 15:50:55 -0300
commitcfff46ff9becdbe5cf48816870e625ed253ecc57 (patch)
tree8d239e4499f559d86ed17ea3632008303b25d485 /testing/tests/benchmarks
parentf29abe28bd778838626d12fcabe3980a8ce4fa8c (diff)
[refactor] move tests to root of repository
Tests entrypoint was in a testing/ subfolder in the root of the repository. This was made mainly because we had some common files for tests and we didn't want to ship them (files in testing/test_soledad, which is itself a python package. This sometimes causes errors when loading tests (it seems setuptools is confused with having one python package in a subdirectory of another). This commit moves the tests entrypoint to the root of the repository. Closes: #8952
Diffstat (limited to 'testing/tests/benchmarks')
-rw-r--r--testing/tests/benchmarks/README.md51
-rw-r--r--testing/tests/benchmarks/assets/cert_default.conf15
-rw-r--r--testing/tests/benchmarks/conftest.py154
-rw-r--r--testing/tests/benchmarks/pytest.ini2
-rw-r--r--testing/tests/benchmarks/test_crypto.py109
-rw-r--r--testing/tests/benchmarks/test_legacy_vs_blobs.py305
-rw-r--r--testing/tests/benchmarks/test_misc.py9
-rw-r--r--testing/tests/benchmarks/test_resources.py50
-rw-r--r--testing/tests/benchmarks/test_sqlcipher.py47
-rw-r--r--testing/tests/benchmarks/test_sqlite_blobs_backend.py82
-rw-r--r--testing/tests/benchmarks/test_sync.py92
11 files changed, 0 insertions, 916 deletions
diff --git a/testing/tests/benchmarks/README.md b/testing/tests/benchmarks/README.md
deleted file mode 100644
index b2465a78..00000000
--- a/testing/tests/benchmarks/README.md
+++ /dev/null
@@ -1,51 +0,0 @@
-Benchmark tests
-===============
-
-This folder contains benchmark tests for Soledad. It aims to provide a fair
-account on the time and resources taken to perform some actions.
-
-These benchmarks are built on top of `pytest-benchmark`, a `pytest` fixture that
-provides means for running test functions multiple times and generating
-reports. The results are printed to screen and also posted to elasticsearch.
-
-`pytest-benchmark` runs tests multiple times so it can provide meaningful
-statistics for the time taken for a tipical run of a test function. The number
-of times that the test is run can be manually or automatically configured. When
-automatically configured, the number of runs is decided by taking into account
-multiple `pytest-benchmark` configuration parameters. See the following page
-for more details on how `pytest-benchmark` works:
-
- https://pytest-benchmark.readthedocs.io/en/stable/calibration.html
-
-Some graphs and analysis resulting from these tests can be seen on:
-
- https://benchmarks.leap.se/
-
-
-Resource consumption
---------------------
-
-For each test, CPU and memory usage statistics are also collected, by querying
-`cpu_percent()` and `memory_percent()` from `psutil.Process` for the current
-test process. Some notes about the current resource consumption estimation process:
-
-* Currently, resources are measured for the whole set of rounds that a test
- function is run. That means that the CPU and memory percentage include the
- `pytest` and `pytest-benchmark` machinery overhead. Anyway, for now this might
- provide a fair approximation of per-run test function resource usage.
-
-* CPU is measured before and after the run of the benchmark function and
- returns the percentage that the currnet process occupied of the CPU time
- between the two calls.
-
-* Memory is sampled during the benchmark run by a separate thread. Sampling
- interval might have to be configured on a per-test basis, as different tests
- take different times to execute (from milliseconds to tens of seconds). For
- now, an interval of 0.1s seems to cover all tests.
-
-
-Benchmarks website
-------------------
-
-To update the benchmarks website, see the documentation in
-``../../../docs/misc/benchmarks-website.rst``.
diff --git a/testing/tests/benchmarks/assets/cert_default.conf b/testing/tests/benchmarks/assets/cert_default.conf
deleted file mode 100644
index 8043cea3..00000000
--- a/testing/tests/benchmarks/assets/cert_default.conf
+++ /dev/null
@@ -1,15 +0,0 @@
-[ req ]
-default_bits = 1024
-default_keyfile = keyfile.pem
-distinguished_name = req_distinguished_name
-prompt = no
-output_password = mypass
-
-[ req_distinguished_name ]
-C = GB
-ST = Test State or Province
-L = Test Locality
-O = Organization Name
-OU = Organizational Unit Name
-CN = localhost
-emailAddress = test@email.address
diff --git a/testing/tests/benchmarks/conftest.py b/testing/tests/benchmarks/conftest.py
deleted file mode 100644
index 80eccb08..00000000
--- a/testing/tests/benchmarks/conftest.py
+++ /dev/null
@@ -1,154 +0,0 @@
-import functools
-import numpy
-import os
-import psutil
-import pytest
-import threading
-import time
-
-from twisted.internet import threads, reactor
-
-
-#
-# pytest customizations
-#
-
-# mark benchmark tests using their group names (thanks ionelmc! :)
-def pytest_collection_modifyitems(items, config):
- for item in items:
- bench = item.get_marker("benchmark")
- if bench and bench.kwargs.get('group'):
- group = bench.kwargs['group']
- marker = getattr(pytest.mark, 'benchmark_' + group)
- item.add_marker(marker)
-
- subdir = config.getoption('subdir')
- if subdir == 'benchmarks':
- # we have to manually setup the events server in order to be able to
- # signal events. This is usually done by the enclosing application
- # using soledad client (i.e. bitmask client).
- from leap.common.events import server
- server.ensure_server()
-
-
-#
-# benchmark fixtures
-#
-
-@pytest.fixture()
-def txbenchmark(monitored_benchmark):
- def blockOnThread(*args, **kwargs):
- return threads.deferToThread(
- monitored_benchmark, threads.blockingCallFromThread,
- reactor, *args, **kwargs)
- return blockOnThread
-
-
-@pytest.fixture()
-def txbenchmark_with_setup(monitored_benchmark_with_setup):
- def blockOnThreadWithSetup(setup, f, *args, **kwargs):
- def blocking_runner(*args, **kwargs):
- return threads.blockingCallFromThread(reactor, f, *args, **kwargs)
-
- def blocking_setup():
- args = threads.blockingCallFromThread(reactor, setup)
- try:
- return tuple(arg for arg in args), {}
- except TypeError:
- return ((args,), {}) if args else None
-
- def bench():
- return monitored_benchmark_with_setup(
- blocking_runner, setup=blocking_setup,
- rounds=4, warmup_rounds=1, iterations=1,
- args=args, kwargs=kwargs)
- return threads.deferToThread(bench)
- return blockOnThreadWithSetup
-
-
-#
-# resource monitoring
-#
-
-class ResourceWatcher(threading.Thread):
-
- sampling_interval = 0.1
-
- def __init__(self, watch_memory):
- threading.Thread.__init__(self)
- self.process = psutil.Process(os.getpid())
- self.running = False
- # monitored resources
- self.cpu_percent = None
- self.watch_memory = watch_memory
- self.memory_samples = []
- self.memory_percent = None
-
- def run(self):
- self.running = True
- self.process.cpu_percent()
- # decide how long to sleep based on need to sample memory
- sleep = self.sampling_interval if not self.watch_memory else 1
- while self.running:
- if self.watch_memory:
- sample = self.process.memory_percent(memtype='rss')
- self.memory_samples.append(sample)
- time.sleep(sleep)
-
- def stop(self):
- self.running = False
- self.join()
- # save cpu usage info
- self.cpu_percent = self.process.cpu_percent()
- # save memory usage info
- if self.watch_memory:
- memory_percent = {
- 'sampling_interval': self.sampling_interval,
- 'samples': self.memory_samples,
- 'stats': {},
- }
- for stat in 'max', 'min', 'mean', 'std':
- fun = getattr(numpy, stat)
- memory_percent['stats'][stat] = fun(self.memory_samples)
- self.memory_percent = memory_percent
-
-
-def _monitored_benchmark(benchmark_fixture, benchmark_function, request,
- *args, **kwargs):
- # setup resource monitoring
- watch_memory = _watch_memory(request)
- watcher = ResourceWatcher(watch_memory)
- watcher.start()
- # run benchmarking function
- benchmark_function(*args, **kwargs)
- # store results
- watcher.stop()
- benchmark_fixture.extra_info.update({
- 'cpu_percent': watcher.cpu_percent
- })
- if watch_memory:
- benchmark_fixture.extra_info.update({
- 'memory_percent': watcher.memory_percent,
- })
- # add docstring info
- if request.scope == 'function':
- fun = request.function
- doc = fun.__doc__ or ''
- benchmark_fixture.extra_info.update({'doc': doc.strip()})
-
-
-def _watch_memory(request):
- return request.config.getoption('--watch-memory')
-
-
-@pytest.fixture
-def monitored_benchmark(benchmark, request):
- return functools.partial(
- _monitored_benchmark, benchmark, benchmark, request)
-
-
-@pytest.fixture
-def monitored_benchmark_with_setup(benchmark, request, *args, **kwargs):
- return functools.partial(
- _monitored_benchmark, benchmark, benchmark.pedantic, request,
- *args, **kwargs)
diff --git a/testing/tests/benchmarks/pytest.ini b/testing/tests/benchmarks/pytest.ini
deleted file mode 100644
index 7a0508ce..00000000
--- a/testing/tests/benchmarks/pytest.ini
+++ /dev/null
@@ -1,2 +0,0 @@
-[pytest]
-twisted = yes
diff --git a/testing/tests/benchmarks/test_crypto.py b/testing/tests/benchmarks/test_crypto.py
deleted file mode 100644
index 3be447a5..00000000
--- a/testing/tests/benchmarks/test_crypto.py
+++ /dev/null
@@ -1,109 +0,0 @@
-"""
-Benchmarks for crypto operations.
-If you don't want to stress your local machine too much, you can pass the
-SIZE_LIMT environment variable.
-
-For instance, to keep the maximum payload at 1MB:
-
-SIZE_LIMIT=1E6 py.test -s tests/perf/test_crypto.py
-"""
-import pytest
-import os
-import json
-from uuid import uuid4
-
-from leap.soledad.common.document import SoledadDocument
-from leap.soledad.client import _crypto
-
-LIMIT = int(float(os.environ.get('SIZE_LIMIT', 50 * 1000 * 1000)))
-
-
-def create_doc_encryption(size):
- @pytest.mark.benchmark(group="test_crypto_encrypt_doc")
- @pytest.inlineCallbacks
- def test_doc_encryption(soledad_client, txbenchmark, payload):
- """
- Encrypt a document of a given size.
- """
- crypto = soledad_client()._crypto
-
- DOC_CONTENT = {'payload': payload(size)}
- doc = SoledadDocument(
- doc_id=uuid4().hex, rev='rev',
- json=json.dumps(DOC_CONTENT))
-
- yield txbenchmark(crypto.encrypt_doc, doc)
- return test_doc_encryption
-
-
-# TODO this test is really bullshit, because it's still including
-# the json serialization.
-
-def create_doc_decryption(size):
- @pytest.inlineCallbacks
- @pytest.mark.benchmark(group="test_crypto_decrypt_doc")
- def test_doc_decryption(soledad_client, txbenchmark, payload):
- """
- Decrypt a document of a given size.
- """
- crypto = soledad_client()._crypto
-
- DOC_CONTENT = {'payload': payload(size)}
- doc = SoledadDocument(
- doc_id=uuid4().hex, rev='rev',
- json=json.dumps(DOC_CONTENT))
-
- encrypted_doc = yield crypto.encrypt_doc(doc)
- doc.set_json(encrypted_doc)
-
- yield txbenchmark(crypto.decrypt_doc, doc)
- return test_doc_decryption
-
-
-def create_raw_encryption(size):
- @pytest.mark.benchmark(group="test_crypto_raw_encrypt")
- def test_raw_encrypt(monitored_benchmark, payload):
- """
- Encrypt raw payload using default mode from crypto module.
- """
- key = payload(32)
- monitored_benchmark(_crypto.encrypt_sym, payload(size), key)
- return test_raw_encrypt
-
-
-def create_raw_decryption(size):
- @pytest.mark.benchmark(group="test_crypto_raw_decrypt")
- def test_raw_decrypt(monitored_benchmark, payload):
- """
- Decrypt raw payload using default mode from crypto module.
- """
- key = payload(32)
- iv, ciphertext = _crypto.encrypt_sym(payload(size), key)
- monitored_benchmark(_crypto.decrypt_sym, ciphertext, key, iv)
- return test_raw_decrypt
-
-
-# Create the TESTS in the global namespace, they'll be picked by the benchmark
-# plugin.
-
-encryption_tests = [
- ('10k', 1E4),
- ('100k', 1E5),
- ('500k', 5E5),
- ('1M', 1E6),
- ('10M', 1E7),
- ('50M', 5E7),
-]
-
-for name, size in encryption_tests:
- if size < LIMIT:
- sz = int(size)
- globals()['test_encrypt_doc_' + name] = create_doc_encryption(sz)
- globals()['test_decrypt_doc_' + name] = create_doc_decryption(sz)
-
-
-for name, size in encryption_tests:
- if size < LIMIT:
- sz = int(size)
- globals()['test_encrypt_raw_' + name] = create_raw_encryption(sz)
- globals()['test_decrypt_raw_' + name] = create_raw_decryption(sz)
diff --git a/testing/tests/benchmarks/test_legacy_vs_blobs.py b/testing/tests/benchmarks/test_legacy_vs_blobs.py
deleted file mode 100644
index 47d6482c..00000000
--- a/testing/tests/benchmarks/test_legacy_vs_blobs.py
+++ /dev/null
@@ -1,305 +0,0 @@
-# "Legacy" versus "Incoming blobs" pipeline comparison
-# ====================================================
-#
-# This benchmarking aims to compare the legacy and new mail incoming pipeline,
-# to asses performance improvements brought by the introduction of blobs.
-#
-# We use the following sizes in these tests:
-#
-# - headers: 4 KB
-# - metadata: 0.1 KB
-# - flags: 0.5 KB
-# - content: variable
-#
-# "Legacy" incoming mail pipeline:
-#
-# - email arrives at MX.
-# - MX encrypts to public key and puts into couch.
-# - pubkey encrypted doc is synced to soledad client as "incoming".
-# - bitmask mail processes "incoming" and generates 3 metadocs + 1 payload
-# doc per message.
-# - soledad client syncs 4 documents back to server.
-#
-# "Incoming blobs" mail pipeline:
-#
-# - email arrives at MX.
-# - MX encyrpts to public key and puts into soledad server.
-# - soledad server writes a blob to filesystem.
-# - soledad client gets the incoming blob from server and generates 3
-# metadocs + 1 blob.
-# - soledad client syncs 3 meta documents and 1 blob back to server.
-#
-# Some notes about the tests in this file:
-#
-# - This is a simulation of the legacy and new incoming mail pipelines.
-# There is no actual mail processing operation done (i.e. no pubkey crypto,
-# no mail parsing), only usual soledad document manipulation and sync (with
-# local 1network and crypto).
-#
-# - Each test simulates a whole incoming mail pipeline, including get new
-# incoming messages from server, create new documents that represent the
-# parsed message, and synchronize those back to the server.
-#
-# - These tests are disabled by default because it doesn't make much sense to
-# have them run automatically for all commits in the repository. Instead,
-# we will run them manually for specific releases and store results and
-# analisys in a subfolder.
-
-import base64
-import pytest
-import random
-import sys
-import treq
-import uuid
-
-from io import BytesIO
-
-from twisted.internet.defer import gatherResults
-from twisted.internet.defer import returnValue
-from twisted.internet.defer import DeferredSemaphore
-
-from leap.soledad.common.blobs import Flags
-from leap.soledad.client._db.blobs import BlobDoc
-
-
-def payload(size):
- random.seed(1337) # same seed to avoid different bench results
- payload_bytes = bytearray(random.getrandbits(8) for _ in xrange(size))
- # encode as base64 to avoid ascii encode/decode errors
- return base64.b64encode(payload_bytes)[:size] # remove b64 overhead
-
-
-PARTS = {
- 'headers': payload(4000),
- 'metadata': payload(100),
- 'flags': payload(500),
-}
-
-
-#
-# "Legacy" incoming mail pipeline.
-#
-
-@pytest.inlineCallbacks
-def load_up_legacy(client, amount, content):
- # make sure there are no document from previous runs
- yield client.sync()
- _, docs = yield client.get_all_docs()
- deferreds = []
- for doc in docs:
- d = client.delete_doc(doc)
- deferreds.append(d)
- yield gatherResults(deferreds)
- yield client.sync()
-
- # create a bunch of local documents representing email messages
- deferreds = []
- for i in xrange(amount):
- deferreds.append(client.create_doc(content))
- yield gatherResults(deferreds)
- yield client.sync()
-
-
-@pytest.inlineCallbacks
-def process_incoming_docs(client, docs):
- deferreds = []
- for doc in docs:
-
- # create fake documents that represent message
- for name in PARTS.keys():
- d = client.create_doc({name: doc.content[name]})
- deferreds.append(d)
-
- # create one document with content
- key = 'content'
- d = client.create_doc({key: doc.content[key]})
- deferreds.append(d)
-
- # delete the old incoming document
- d = client.delete_doc(doc)
- deferreds.append(d)
-
- # wait for all operatios to succeed
- yield gatherResults(deferreds)
-
-
-def create_legacy_test(amount, size):
- group = 'test_legacy_vs_blobs_%d_%dk' % (amount, (size / 1000))
-
- @pytest.inlineCallbacks
- @pytest.mark.skip(reason="avoid running for all commits")
- @pytest.mark.benchmark(group=group)
- def test(soledad_client, txbenchmark_with_setup):
- client = soledad_client()
-
- # setup the content of initial documents representing incoming emails
- content = {'content': payload(size), 'incoming': True}
- for name, data in PARTS.items():
- content[name] = data
-
- @pytest.inlineCallbacks
- def setup():
- yield load_up_legacy(client, amount, content)
- clean_client = soledad_client(force_fresh_db=True)
- yield clean_client.create_index('incoming', 'bool(incoming)')
- returnValue(clean_client)
-
- @pytest.inlineCallbacks
- def legacy_pipeline(client):
- yield client.sync()
- docs = yield client.get_from_index('incoming', '1')
- yield process_incoming_docs(client, docs)
- yield client.sync()
-
- yield txbenchmark_with_setup(setup, legacy_pipeline)
- return test
-
-
-# ATTENTION: update the documentation in ../docs/benchmarks.rst if you change
-# the number of docs or the doc sizes for the tests below.
-test_legacy_10_1000k = create_legacy_test(10, 1000 * 1000)
-test_legacy_100_100k = create_legacy_test(100, 100 * 1000)
-test_legacy_1000_10k = create_legacy_test(1000, 10 * 1000)
-
-
-#
-# "Incoming blobs" mail pipeline:
-#
-
-# used to limit the amount of concurrent accesses to the blob manager
-semaphore = DeferredSemaphore(2)
-
-
-# deliver data to a user by using the incoming api at given url.
-def deliver_using_incoming_api(url, user_uuid, token, data):
- auth = 'Token %s' % base64.b64encode('%s:%s' % (user_uuid, token))
- uri = "%s/incoming/%s/%s?namespace=MX" % (url, user_uuid, uuid.uuid4().hex)
- return treq.put(uri, headers={'Authorization': auth}, data=BytesIO(data))
-
-
-# deliver data to a user by faking incoming using blobs
-@pytest.inlineCallbacks
-def deliver_using_blobs(client, fd):
- # put
- blob_id = uuid.uuid4().hex
- doc = BlobDoc(fd, blob_id=blob_id)
- size = sys.getsizeof(fd)
- yield client.blobmanager.put(doc, size, namespace='MX')
- # and flag
- flags = [Flags.PENDING]
- yield client.blobmanager.set_flags(blob_id, flags, namespace='MX')
-
-
-def reclaim_free_space(client):
- return client.blobmanager.local.dbpool.runQuery("VACUUM")
-
-
-@pytest.inlineCallbacks
-def load_up_blobs(client, amount, data):
- # make sure there are no document from previous runs
- yield client.sync()
- _, docs = yield client.get_all_docs()
- deferreds = []
- for doc in docs:
- d = client.delete_doc(doc)
- deferreds.append(d)
- yield gatherResults(deferreds)
- yield client.sync()
-
- # delete all payload from blobs db and server
- for namespace in ['MX', 'payload']:
- ids = yield client.blobmanager.remote_list(namespace=namespace)
- deferreds = []
- for blob_id in ids:
- d = semaphore.run(
- client.blobmanager.delete, blob_id, namespace=namespace)
- deferreds.append(d)
- yield gatherResults(deferreds)
-
- # create a bunch of incoming blobs
- deferreds = []
- for i in xrange(amount):
- # choose method of delivery based in test being local or remote
- if '127.0.0.1' in client.server_url:
- fun = deliver_using_incoming_api
- args = (client.server_url, client.uuid, client.token, data)
- else:
- fun = deliver_using_blobs
- args = (client, BytesIO(data))
- d = semaphore.run(fun, *args)
- deferreds.append(d)
- yield gatherResults(deferreds)
-
- # empty local blobs db
- yield client.blobmanager.local.dbpool.runQuery(
- "DELETE FROM blobs WHERE 1;")
- yield reclaim_free_space(client)
-
-
-@pytest.inlineCallbacks
-def process_incoming_blobs(client, pending):
- # process items
- deferreds = []
- for item in pending:
- d = process_one_incoming_blob(client, item)
- deferreds.append(d)
- yield gatherResults(deferreds)
-
-
-@pytest.inlineCallbacks
-def process_one_incoming_blob(client, item):
- fd = yield semaphore.run(
- client.blobmanager.get, item, namespace='MX')
-
- # create metadata docs
- deferreds = []
- for name, data in PARTS.items():
- d = client.create_doc({name: data})
- deferreds.append(d)
-
- # put the incoming blob as it would be done after mail processing
- doc = BlobDoc(fd, blob_id=uuid.uuid4().hex)
- size = sys.getsizeof(fd)
- d = semaphore.run(
- client.blobmanager.put, doc, size, namespace='payload')
- deferreds.append(d)
- yield gatherResults(deferreds)
-
- # delete incoming blob
- yield semaphore.run(
- client.blobmanager.delete, item, namespace='MX')
-
-
-def create_blobs_test(amount, size):
- group = 'test_legacy_vs_blobs_%d_%dk' % (amount, (size / 1000))
-
- @pytest.inlineCallbacks
- @pytest.mark.skip(reason="avoid running for all commits")
- @pytest.mark.benchmark(group=group)
- def test(soledad_client, txbenchmark_with_setup):
- client = soledad_client()
- blob_payload = payload(size)
-
- @pytest.inlineCallbacks
- def setup():
- yield load_up_blobs(client, amount, blob_payload)
- returnValue(soledad_client(force_fresh_db=True))
-
- @pytest.inlineCallbacks
- def blobs_pipeline(client):
- pending = yield client.blobmanager.remote_list(
- namespace='MX', filter_flags=Flags.PENDING)
- yield process_incoming_blobs(client, pending)
- # reclaim_free_space(client)
- yield client.sync()
- yield client.blobmanager.send_missing(namespace='payload')
-
- yield txbenchmark_with_setup(setup, blobs_pipeline)
- return test
-
-
-# ATTENTION: update the documentation in ../docs/benchmarks.rst if you change
-# the number of docs or the doc sizes for the tests below.
-test_blobs_10_1000k = create_blobs_test(10, 1000 * 1000)
-test_blobs_100_100k = create_blobs_test(100, 100 * 1000)
-test_blobs_1000_10k = create_blobs_test(1000, 10 * 1000)
diff --git a/testing/tests/benchmarks/test_misc.py b/testing/tests/benchmarks/test_misc.py
deleted file mode 100644
index 8b2178b9..00000000
--- a/testing/tests/benchmarks/test_misc.py
+++ /dev/null
@@ -1,9 +0,0 @@
-import pytest
-
-
-@pytest.mark.benchmark(group="test_instance")
-def test_initialization(soledad_client, monitored_benchmark):
- """
- Soledad client object initialization.
- """
- monitored_benchmark(soledad_client)
diff --git a/testing/tests/benchmarks/test_resources.py b/testing/tests/benchmarks/test_resources.py
deleted file mode 100644
index 173edbd1..00000000
--- a/testing/tests/benchmarks/test_resources.py
+++ /dev/null
@@ -1,50 +0,0 @@
-import pytest
-import random
-import time
-
-from decimal import Decimal
-
-
-def bellardBig(n):
- # http://en.wikipedia.org/wiki/Bellard%27s_formula
- pi = Decimal(0)
- k = 0
- while k < n:
- pi += (Decimal(-1) ** k / (1024 ** k)) * (
- Decimal(256) / (10 * k + 1) +
- Decimal(1) / (10 * k + 9) -
- Decimal(64) / (10 * k + 3) -
- Decimal(32) / (4 * k + 1) -
- Decimal(4) / (10 * k + 5) -
- Decimal(4) / (10 * k + 7) -
- Decimal(1) / (4 * k + 3))
- k += 1
- pi = pi * 1 / (2 ** 6)
- return pi
-
-
-@pytest.mark.skip(reason='not a real use case, used only for instrumentation')
-def test_cpu_intensive(monitored_benchmark):
-
- def _cpu_intensive():
- sleep = [random.uniform(0.5, 1.5) for _ in xrange(3)]
- while sleep:
- t = sleep.pop()
- time.sleep(t)
- bellardBig(int((10 ** 3) * t))
-
- monitored_benchmark(_cpu_intensive)
-
-
-@pytest.mark.skip(reason='not a real use case, used only for instrumentation')
-def test_memory_intensive(monitored_benchmark):
-
- def _memory_intensive():
- sleep = [random.uniform(0.5, 1.5) for _ in xrange(3)]
- bigdata = ""
- while sleep:
- t = sleep.pop()
- bigdata += "b" * 10 * int(10E6)
- time.sleep(t)
-
- monitored_benchmark(_memory_intensive)
diff --git a/testing/tests/benchmarks/test_sqlcipher.py b/testing/tests/benchmarks/test_sqlcipher.py
deleted file mode 100644
index 9108084c..00000000
--- a/testing/tests/benchmarks/test_sqlcipher.py
+++ /dev/null
@@ -1,47 +0,0 @@
-'''
-Tests SoledadClient/SQLCipher interaction
-'''
-import pytest
-
-from twisted.internet.defer import gatherResults
-
-
-def load_up(client, amount, payload, defer=True):
- results = [client.create_doc({'content': payload}) for _ in xrange(amount)]
- if defer:
- return gatherResults(results)
-
-
-def build_test_sqlcipher_async_create(amount, size):
- @pytest.inlineCallbacks
- @pytest.mark.benchmark(group="test_sqlcipher_async_create")
- def test(soledad_client, txbenchmark_with_setup, payload):
- """
- Create many documents of a given size concurrently.
- """
- client = soledad_client()
- yield txbenchmark_with_setup(
- lambda: None, load_up, client, amount, payload(size))
- return test
-
-
-def build_test_sqlcipher_create(amount, size):
- @pytest.mark.skip(reason="this test is lengthy and not a real use case")
- @pytest.mark.benchmark(group="test_sqlcipher_create")
- def test(soledad_client, monitored_benchmark, payload):
- """
- Create many documents of a given size serially.
- """
- client = soledad_client()._dbsyncer
- monitored_benchmark(
- load_up, client, amount, payload(size), defer=False)
- return test
-
-
-test_async_create_10_1000k = build_test_sqlcipher_async_create(10, 1000 * 1000)
-test_async_create_100_100k = build_test_sqlcipher_async_create(100, 100 * 1000)
-test_async_create_1000_10k = build_test_sqlcipher_async_create(1000, 10 * 1000)
-# synchronous
-test_create_10_1000k = build_test_sqlcipher_create(10, 1000 * 1000)
-test_create_100_100k = build_test_sqlcipher_create(100, 100 * 1000)
-test_create_1000_10k = build_test_sqlcipher_create(1000, 10 * 1000)
diff --git a/testing/tests/benchmarks/test_sqlite_blobs_backend.py b/testing/tests/benchmarks/test_sqlite_blobs_backend.py
deleted file mode 100644
index e02cacad..00000000
--- a/testing/tests/benchmarks/test_sqlite_blobs_backend.py
+++ /dev/null
@@ -1,82 +0,0 @@
-import pytest
-import os
-
-from uuid import uuid4
-from io import BytesIO
-
-from twisted.internet.defer import gatherResults
-from twisted.internet.defer import DeferredSemaphore
-
-from leap.soledad.client._db.blobs import SQLiteBlobBackend
-
-
-semaphore = DeferredSemaphore(2)
-
-
-#
-# put
-#
-
-def put(backend, amount, data):
- deferreds = []
- for _ in xrange(amount):
- blob_id = uuid4().hex
- fd = BytesIO(data)
- size = len(data)
- d = semaphore.run(backend.put, blob_id, fd, size)
- deferreds.append(d)
- return gatherResults(deferreds)
-
-
-def create_put_test(amount, size):
-
- @pytest.inlineCallbacks
- @pytest.mark.sqlite_blobs_backend_put
- def test(txbenchmark, payload, tmpdir):
- dbpath = os.path.join(tmpdir.strpath, 'blobs.db')
- backend = SQLiteBlobBackend(dbpath, key='123')
- data = payload(size)
- yield txbenchmark(put, backend, amount, data)
-
- return test
-
-
-test_sqlite_blobs_backend_put_1_10000k = create_put_test(1, 10000 * 1000)
-test_sqlite_blobs_backend_put_10_1000k = create_put_test(10, 1000 * 1000)
-test_sqlite_blobs_backend_put_100_100k = create_put_test(100, 100 * 1000)
-test_sqlite_blobs_backend_put_1000_10k = create_put_test(1000, 10 * 1000)
-
-
-#
-# put
-#
-
-@pytest.inlineCallbacks
-def get(backend):
- local = yield backend.list()
- deferreds = []
- for blob_id in local:
- d = backend.get(blob_id)
- deferreds.append(d)
- yield gatherResults(deferreds)
-
-
-def create_get_test(amount, size):
-
- @pytest.inlineCallbacks
- @pytest.mark.sqlite_blobs_backend_get
- def test(txbenchmark, payload, tmpdir):
- dbpath = os.path.join(tmpdir.strpath, 'blobs.db')
- backend = SQLiteBlobBackend(dbpath, key='123')
- data = payload(size)
-
- yield put(backend, amount, data)
- yield txbenchmark(get, backend)
-
- return test
-
-
-test_sqlite_blobs_backend_get_1_10000k = create_get_test(1, 10000 * 1000)
-test_sqlite_blobs_backend_get_10_1000k = create_get_test(10, 1000 * 1000)
-test_sqlite_blobs_backend_get_100_100k = create_get_test(100, 100 * 1000)
-test_sqlite_blobs_backend_get_1000_10k = create_get_test(1000, 10 * 1000)
diff --git a/testing/tests/benchmarks/test_sync.py b/testing/tests/benchmarks/test_sync.py
deleted file mode 100644
index 45506d77..00000000
--- a/testing/tests/benchmarks/test_sync.py
+++ /dev/null
@@ -1,92 +0,0 @@
-import pytest
-from twisted.internet.defer import gatherResults
-
-
-@pytest.inlineCallbacks
-def load_up(client, amount, payload):
- # create a bunch of local documents
- deferreds = []
- for i in xrange(amount):
- deferreds.append(client.create_doc({'content': payload}))
- yield gatherResults(deferreds)
-
-
-# Each test created with this function will:
-#
-# - get a fresh client.
-# - iterate:
-# - setup: create N docs of a certain size
-# - benchmark: sync() -- uploads N docs.
-def create_upload(uploads, size):
- @pytest.inlineCallbacks
- @pytest.mark.benchmark(group="test_upload")
- def test(soledad_client, txbenchmark_with_setup, payload):
- """
- Upload many documents of a given size.
- """
- client = soledad_client()
-
- def setup():
- return load_up(client, uploads, payload(size))
-
- yield txbenchmark_with_setup(setup, client.sync)
- return test
-
-
-# ATTENTION: update the documentation in ../docs/benchmarks.rst if you change
-# the number of docs or the doc sizes for the tests below.
-test_upload_10_1000k = create_upload(10, 1000 * 1000)
-test_upload_100_100k = create_upload(100, 100 * 1000)
-test_upload_1000_10k = create_upload(1000, 10 * 1000)
-
-
-# Each test created with this function will:
-#
-# - get a fresh client.
-# - create N docs of a certain size
-# - sync (uploads those docs)
-# - iterate:
-# - setup: get a fresh client with empty local db
-# - benchmark: sync() -- downloads N docs.
-def create_download(downloads, size):
- @pytest.inlineCallbacks
- @pytest.mark.benchmark(group="test_download")
- def test(soledad_client, txbenchmark_with_setup, payload):
- """
- Download many documents of the same size.
- """
- client = soledad_client()
-
- yield load_up(client, downloads, payload(size))
- yield client.sync()
- # We could create them directly on couch, but sending them
- # ensures we are dealing with properly encrypted docs
-
- def setup():
- return soledad_client(force_fresh_db=True)
-
- def sync(clean_client):
- return clean_client.sync()
- yield txbenchmark_with_setup(setup, sync)
- return test
-
-
-# ATTENTION: update the documentation in ../docs/benchmarks.rst if you change
-# the number of docs or the doc sizes for the tests below.
-test_download_10_1000k = create_download(10, 1000 * 1000)
-test_download_100_100k = create_download(100, 100 * 1000)
-test_download_1000_10k = create_download(1000, 10 * 1000)
-
-
-@pytest.inlineCallbacks
-@pytest.mark.benchmark(group="test_nothing_to_sync")
-def test_nothing_to_sync(soledad_client, txbenchmark_with_setup):
- """
- Sync two replicas that are already in sync.
- """
- def setup():
- return soledad_client()
-
- def sync(clean_client):
- return clean_client.sync()
- yield txbenchmark_with_setup(setup, sync)