summaryrefslogtreecommitdiff
diff options
context:
space:
mode:
-rw-r--r--testing/tests/benchmarks/conftest.py87
-rw-r--r--testing/tests/benchmarks/test_crypto.py8
-rw-r--r--testing/tests/benchmarks/test_misc.py4
-rw-r--r--testing/tests/benchmarks/test_sqlcipher.py5
4 files changed, 60 insertions, 44 deletions
diff --git a/testing/tests/benchmarks/conftest.py b/testing/tests/benchmarks/conftest.py
index ec7d7fca..cfad458a 100644
--- a/testing/tests/benchmarks/conftest.py
+++ b/testing/tests/benchmarks/conftest.py
@@ -1,4 +1,5 @@
import base64
+import functools
import numpy
import os
import psutil
@@ -52,16 +53,16 @@ def payload():
@pytest.fixture()
-def txbenchmark(benchmark):
+def txbenchmark(monitored_benchmark):
def blockOnThread(*args, **kwargs):
return threads.deferToThread(
- benchmark, threads.blockingCallFromThread,
+ monitored_benchmark, threads.blockingCallFromThread,
reactor, *args, **kwargs)
return blockOnThread
@pytest.fixture()
-def txbenchmark_with_setup(benchmark):
+def txbenchmark_with_setup(monitored_benchmark_with_setup):
def blockOnThreadWithSetup(setup, f):
def blocking_runner(*args, **kwargs):
return threads.blockingCallFromThread(reactor, f, *args, **kwargs)
@@ -74,8 +75,9 @@ def txbenchmark_with_setup(benchmark):
return ((args,), {}) if args else None
def bench():
- return benchmark.pedantic(blocking_runner, setup=blocking_setup,
- rounds=4, warmup_rounds=1)
+ return monitored_benchmark_with_setup(
+ blocking_runner, setup=blocking_setup,
+ rounds=4, warmup_rounds=1)
return threads.deferToThread(bench)
return blockOnThreadWithSetup
@@ -84,52 +86,65 @@ def txbenchmark_with_setup(benchmark):
# resource monitoring
#
-class MemoryWatcher(threading.Thread):
+class ResourceWatcher(threading.Thread):
- def __init__(self, process, interval):
+ sampling_interval = 1
+
+ def __init__(self):
threading.Thread.__init__(self)
- self.process = process
- self.interval = interval
- self.samples = []
+ self.process = psutil.Process(os.getpid())
self.running = False
+ # monitored resources
+ self.cpu_percent = None
+ self.memory_samples = []
+ self.memory_percent = None
def run(self):
self.running = True
+ self.process.cpu_percent()
while self.running:
- memory = self.process.memory_percent(memtype='rss')
- self.samples.append(memory)
- time.sleep(self.interval)
+ sample = self.process.memory_percent(memtype='rss')
+ self.memory_samples.append(sample)
+ time.sleep(self.sampling_interval)
def stop(self):
self.running = False
self.join()
-
- def info(self):
- info = {
- 'interval': self.interval,
- 'samples': self.samples,
+ # save cpu usage info
+ self.cpu_percent = self.process.cpu_percent()
+ # save memory usage info
+ memory_percent = {
+ 'sampling_interval': self.sampling_interval,
+ 'samples': self.memory_samples,
'stats': {},
}
for stat in 'max', 'min', 'mean', 'std':
fun = getattr(numpy, stat)
- info['stats'][stat] = fun(self.samples)
- return info
+ memory_percent['stats'][stat] = fun(self.memory_samples)
+ self.memory_percent = memory_percent
+
+
+def _monitored_benchmark(benchmark_fixture, benchmark_function,
+ *args, **kwargs):
+ # setup resource monitoring
+ watcher = ResourceWatcher()
+ watcher.start()
+ # run benchmarking function
+ benchmark_function(*args, **kwargs)
+ # store results
+ watcher.stop()
+ benchmark_fixture.extra_info.update({
+ 'cpu_percent': watcher.cpu_percent,
+ 'memory_percent': watcher.memory_percent,
+ })
+
+
+@pytest.fixture
+def monitored_benchmark(benchmark):
+ return functools.partial(_monitored_benchmark, benchmark, benchmark)
@pytest.fixture
-def monitored_benchmark(benchmark, request):
-
- def _monitored_benchmark(fun, *args, **kwargs):
- process = psutil.Process(os.getpid())
- memwatch = MemoryWatcher(process, 1)
- memwatch.start()
- process.cpu_percent()
- benchmark.pedantic(
- fun, args=args, kwargs=kwargs,
- rounds=1, iterations=1, warmup_rounds=0)
- memwatch.stop()
- # store results
- benchmark.extra_info['cpu_percent'] = process.cpu_percent()
- benchmark.extra_info['memory_percent'] = memwatch.info()
-
- return _monitored_benchmark
+def monitored_benchmark_with_setup(benchmark):
+ return functools.partial(
+ _monitored_benchmark, benchmark, benchmark.pedantic)
diff --git a/testing/tests/benchmarks/test_crypto.py b/testing/tests/benchmarks/test_crypto.py
index 8ee9b899..a53e1783 100644
--- a/testing/tests/benchmarks/test_crypto.py
+++ b/testing/tests/benchmarks/test_crypto.py
@@ -56,18 +56,18 @@ def create_doc_decryption(size):
def create_raw_encryption(size):
@pytest.mark.benchmark(group="test_crypto_raw_encrypt")
- def test_raw_encrypt(benchmark, payload):
+ def test_raw_encrypt(monitored_benchmark, payload):
key = payload(32)
- benchmark(_crypto.encrypt_sym, payload(size), key)
+ monitored_benchmark(_crypto.encrypt_sym, payload(size), key)
return test_raw_encrypt
def create_raw_decryption(size):
@pytest.mark.benchmark(group="test_crypto_raw_decrypt")
- def test_raw_decrypt(benchmark, payload):
+ def test_raw_decrypt(monitored_benchmark, payload):
key = payload(32)
iv, ciphertext = _crypto.encrypt_sym(payload(size), key)
- benchmark(_crypto.decrypt_sym, ciphertext, key, iv)
+ monitored_benchmark(_crypto.decrypt_sym, ciphertext, key, iv)
return test_raw_decrypt
diff --git a/testing/tests/benchmarks/test_misc.py b/testing/tests/benchmarks/test_misc.py
index ead48adf..4a7412a5 100644
--- a/testing/tests/benchmarks/test_misc.py
+++ b/testing/tests/benchmarks/test_misc.py
@@ -2,5 +2,5 @@ import pytest
@pytest.mark.benchmark(group="test_instance")
-def test_initialization(soledad_client, benchmark):
- benchmark(soledad_client)
+def test_initialization(soledad_client, monitored_benchmark):
+ monitored_benchmark(soledad_client)
diff --git a/testing/tests/benchmarks/test_sqlcipher.py b/testing/tests/benchmarks/test_sqlcipher.py
index 0cdda7e6..d300d931 100644
--- a/testing/tests/benchmarks/test_sqlcipher.py
+++ b/testing/tests/benchmarks/test_sqlcipher.py
@@ -24,9 +24,10 @@ def build_test_sqlcipher_async_create(amount, size):
def build_test_sqlcipher_create(amount, size):
@pytest.mark.synchronous
@pytest.mark.benchmark(group="test_sqlcipher_create")
- def test(soledad_client, benchmark, payload):
+ def test(soledad_client, monitored_benchmark, payload):
client = soledad_client()._dbsyncer
- benchmark(load_up, client, amount, payload(size), defer=False)
+ monitored_benchmark(
+ load_up, client, amount, payload(size), defer=False)
return test