diff options
author | drebs <drebs@leap.se> | 2017-07-09 11:14:29 -0300 |
---|---|---|
committer | drebs <drebs@leap.se> | 2017-07-09 11:14:29 -0300 |
commit | 0ab7340e571ea7472018a67e6a0d5dad614eccb4 (patch) | |
tree | 456d1f6dca9e507bd834259195d8448aa682a9f1 /testing/tests | |
parent | 2bc92608cb41acb852a77270326c2a51fecc82bb (diff) |
[benchmarks] separate memory sampling from cpu measurement
Diffstat (limited to 'testing/tests')
-rw-r--r-- | testing/tests/benchmarks/conftest.py | 59 | ||||
-rw-r--r-- | testing/tests/conftest.py | 8 |
2 files changed, 36 insertions, 31 deletions
diff --git a/testing/tests/benchmarks/conftest.py b/testing/tests/benchmarks/conftest.py index ac29f17f..3be17083 100644 --- a/testing/tests/benchmarks/conftest.py +++ b/testing/tests/benchmarks/conftest.py @@ -84,22 +84,26 @@ class ResourceWatcher(threading.Thread): sampling_interval = 0.1 - def __init__(self): + def __init__(self, watch_memory): threading.Thread.__init__(self) self.process = psutil.Process(os.getpid()) self.running = False # monitored resources self.cpu_percent = None + self.watch_memory = watch_memory self.memory_samples = [] self.memory_percent = None def run(self): self.running = True self.process.cpu_percent() + # decide how long to sleep based on need to sample memory + sleep = self.sampling_interval if not self.watch_memory else 1 while self.running: - sample = self.process.memory_percent(memtype='rss') - self.memory_samples.append(sample) - time.sleep(self.sampling_interval) + if self.watch_memory: + sample = self.process.memory_percent(memtype='rss') + self.memory_samples.append(sample) + time.sleep(sleep) def stop(self): self.running = False @@ -107,47 +111,48 @@ class ResourceWatcher(threading.Thread): # save cpu usage info self.cpu_percent = self.process.cpu_percent() # save memory usage info - memory_percent = { - 'sampling_interval': self.sampling_interval, - 'samples': self.memory_samples, - 'stats': {}, - } - for stat in 'max', 'min', 'mean', 'std': - fun = getattr(numpy, stat) - memory_percent['stats'][stat] = fun(self.memory_samples) - self.memory_percent = memory_percent - - -def _monitored_benchmark(benchmark_fixture, benchmark_function, + if self.watch_memory: + memory_percent = { + 'sampling_interval': self.sampling_interval, + 'samples': self.memory_samples, + 'stats': {}, + } + for stat in 'max', 'min', 'mean', 'std': + fun = getattr(numpy, stat) + memory_percent['stats'][stat] = fun(self.memory_samples) + self.memory_percent = memory_percent + + +def _monitored_benchmark(benchmark_fixture, benchmark_function, request, *args, **kwargs): # setup resource monitoring - watcher = ResourceWatcher() + watch_memory = _watch_memory(request) + watcher = ResourceWatcher(watch_memory) watcher.start() # run benchmarking function benchmark_function(*args, **kwargs) # store results watcher.stop() benchmark_fixture.extra_info.update({ - 'cpu_percent': watcher.cpu_percent, - 'memory_percent': watcher.memory_percent, + 'cpu_percent': watcher.cpu_percent }) + if watch_memory: + benchmark_fixture.extra_info.update({ + 'memory_percent': watcher.memory_percent, + }) -def _watch_resources(request): - return request.config.getoption('--watch-resources') +def _watch_memory(request): + return request.config.getoption('--watch-memory') @pytest.fixture def monitored_benchmark(benchmark, request): - if not _watch_resources(request): - return benchmark return functools.partial( - _monitored_benchmark, benchmark, benchmark) + _monitored_benchmark, benchmark, benchmark, request) @pytest.fixture def monitored_benchmark_with_setup(benchmark, request): - if not _watch_resources(request): - return benchmark.pedantic return functools.partial( - _monitored_benchmark, benchmark, benchmark.pedantic) + _monitored_benchmark, benchmark, benchmark.pedantic, request) diff --git a/testing/tests/conftest.py b/testing/tests/conftest.py index 994b1610..16ab699d 100644 --- a/testing/tests/conftest.py +++ b/testing/tests/conftest.py @@ -44,10 +44,10 @@ def pytest_addoption(parser): # the following option is only used in benchmarks, but has to be defined # here due to how pytest discovers plugins during startup. parser.addoption( - "--watch-resources", default=False, action="store_true", - help="whether to monitor CPU and memory percentages during test run. " - "**Warning**: enabling this will impact the time taken by the " - "benchmarked code, so use with caution!") + "--watch-memory", default=False, action="store_true", + help="whether to monitor memory percentages during test run. " + "**Warning**: enabling this will impact the time taken and the " + "CPU used by the benchmarked code, so use with caution!") @pytest.fixture |