summaryrefslogtreecommitdiff
path: root/testing/tests/benchmarks
diff options
context:
space:
mode:
authordrebs <drebs@leap.se>2017-07-07 18:06:30 -0300
committerdrebs <drebs@leap.se>2017-07-08 07:48:47 -0300
commit16d8677ad67ceb0cdf78f3b0a7d6b4e4dae4b624 (patch)
treeaa8714186ff0f58d89ca77e010bc49487a6b27f5 /testing/tests/benchmarks
parentb289e6caa81fd3c6c8b6a6070feee6b1b0965122 (diff)
[benchmarks] add --watch-resources option
This commit adds the --watch-resources command line option for benchmarks tests, and allows to running the benchmark test suite with and without resource monitoring instrumentation code. This is needed because resource consumption monitoring impacts the mean time and standard deviation of time taken to run benchmarked tests.
Diffstat (limited to 'testing/tests/benchmarks')
-rw-r--r--testing/tests/benchmarks/conftest.py21
1 files changed, 16 insertions, 5 deletions
diff --git a/testing/tests/benchmarks/conftest.py b/testing/tests/benchmarks/conftest.py
index 4dbc4377..0d171ef4 100644
--- a/testing/tests/benchmarks/conftest.py
+++ b/testing/tests/benchmarks/conftest.py
@@ -24,8 +24,10 @@ server.ensure_server()
def pytest_addoption(parser):
parser.addoption(
- "--num-docs", type="int", default=100,
- help="the number of documents to use in performance tests")
+ "--watch-resources", default=False, action="store_true",
+ help="whether to monitor CPU and memory percentages during test run. "
+ "**Warning**: enabling this will impact the time taken by the "
+ "benchmarked code, so use with caution!")
# mark benchmark tests using their group names (thanks ionelmc! :)
@@ -139,12 +141,21 @@ def _monitored_benchmark(benchmark_fixture, benchmark_function,
})
+def _watch_resources(request):
+ return request.config.getoption('--watch-resources')
+
+
@pytest.fixture
-def monitored_benchmark(benchmark):
- return functools.partial(_monitored_benchmark, benchmark, benchmark)
+def monitored_benchmark(benchmark, request):
+ if not _watch_resources(request):
+ return benchmark
+ return functools.partial(
+ _monitored_benchmark, benchmark, benchmark)
@pytest.fixture
-def monitored_benchmark_with_setup(benchmark):
+def monitored_benchmark_with_setup(benchmark, request):
+ if not _watch_resources(request):
+ return benchmark.pedantic
return functools.partial(
_monitored_benchmark, benchmark, benchmark.pedantic)