summaryrefslogtreecommitdiff
path: root/testing
diff options
context:
space:
mode:
Diffstat (limited to 'testing')
-rw-r--r--testing/tests/benchmarks/conftest.py23
-rw-r--r--testing/tests/benchmarks/test_cpu.py36
-rw-r--r--testing/tests/conftest.py9
-rw-r--r--testing/tox.ini2
4 files changed, 70 insertions, 0 deletions
diff --git a/testing/tests/benchmarks/conftest.py b/testing/tests/benchmarks/conftest.py
index bec5d7ab..543f06b8 100644
--- a/testing/tests/benchmarks/conftest.py
+++ b/testing/tests/benchmarks/conftest.py
@@ -1,4 +1,6 @@
import base64
+import os
+import psutil
import pytest
import random
@@ -73,3 +75,24 @@ def txbenchmark_with_setup(benchmark):
rounds=4, warmup_rounds=1)
return threads.deferToThread(bench)
return blockOnThreadWithSetup
+
+
+#
+# resource monitoring
+#
+
+@pytest.fixture
+def monitored_benchmark(benchmark, request):
+
+ def _monitored_benchmark(fun, *args, **kwargs):
+ process = psutil.Process(os.getpid())
+ process.cpu_percent()
+ benchmark.pedantic(
+ fun, args=args, kwargs=kwargs,
+ rounds=1, iterations=1, warmup_rounds=0)
+ percent = process.cpu_percent()
+ # store value in benchmark session, so json output can be updated
+ bs = request.config._benchmarksession
+ bs.benchmarks[0].stats.cpu_percent = percent
+
+ return _monitored_benchmark
diff --git a/testing/tests/benchmarks/test_cpu.py b/testing/tests/benchmarks/test_cpu.py
new file mode 100644
index 00000000..fba6a6d4
--- /dev/null
+++ b/testing/tests/benchmarks/test_cpu.py
@@ -0,0 +1,36 @@
+import pytest
+import random
+import time
+
+from decimal import Decimal
+
+
+def bellardBig(n):
+ # http://en.wikipedia.org/wiki/Bellard%27s_formula
+ pi = Decimal(0)
+ k = 0
+ while k < n:
+ pi += (Decimal(-1) ** k / (1024 ** k)) * (
+ Decimal(256) / (10 * k + 1) +
+ Decimal(1) / (10 * k + 9) -
+ Decimal(64) / (10 * k + 3) -
+ Decimal(32) / (4 * k + 1) -
+ Decimal(4) / (10 * k + 5) -
+ Decimal(4) / (10 * k + 7) -
+ Decimal(1) / (4 * k + 3))
+ k += 1
+ pi = pi * 1 / (2 ** 6)
+ return pi
+
+
+def test_long_operation(monitored_benchmark):
+
+ def _long_operation():
+ sleep = [random.uniform(0.5, 1.5) for _ in xrange(3)]
+ while sleep:
+ t = sleep.pop()
+ time.sleep(t)
+ bellardBig(int((10 ** 3) * t))
+
+ results = monitored_benchmark(_long_operation)
+ print results
diff --git a/testing/tests/conftest.py b/testing/tests/conftest.py
index 2459307a..bece7609 100644
--- a/testing/tests/conftest.py
+++ b/testing/tests/conftest.py
@@ -235,3 +235,12 @@ if 'pytest_benchmark' in sys.modules:
"""
hostname = os.environ.get('HOST_HOSTNAME', socket.gethostname())
machine_info['host'] = hostname
+
+ def pytest_benchmark_update_json(config, benchmarks, output_json):
+ json_benchmarks = output_json['benchmarks']
+ for benchmark in benchmarks:
+ # find the json output that corresponds to this benchmark
+ name = benchmark['name']
+ output = filter(lambda d: d['name'] == name, json_benchmarks).pop()
+ # update output stats with previously saved values
+ output['stats']['cpu_percent'] = benchmark.stats.cpu_percent
diff --git a/testing/tox.ini b/testing/tox.ini
index 7d38a16c..f2119a15 100644
--- a/testing/tox.ini
+++ b/testing/tox.ini
@@ -63,6 +63,8 @@ install_command = pip3 install {opts} {packages}
[testenv:benchmark]
deps =
{[testenv]deps}
+ psutil
+ numpy
#pytest-benchmark>=3.1.0a2
git+https://github.com/ionelmc/pytest-benchmark.git@master
elasticsearch