From cfff46ff9becdbe5cf48816870e625ed253ecc57 Mon Sep 17 00:00:00 2001 From: drebs Date: Sun, 17 Sep 2017 12:08:25 -0300 Subject: [refactor] move tests to root of repository Tests entrypoint was in a testing/ subfolder in the root of the repository. This was made mainly because we had some common files for tests and we didn't want to ship them (files in testing/test_soledad, which is itself a python package. This sometimes causes errors when loading tests (it seems setuptools is confused with having one python package in a subdirectory of another). This commit moves the tests entrypoint to the root of the repository. Closes: #8952 --- testing/tests/benchmarks/conftest.py | 154 ----------------------------------- 1 file changed, 154 deletions(-) delete mode 100644 testing/tests/benchmarks/conftest.py (limited to 'testing/tests/benchmarks/conftest.py') diff --git a/testing/tests/benchmarks/conftest.py b/testing/tests/benchmarks/conftest.py deleted file mode 100644 index 80eccb08..00000000 --- a/testing/tests/benchmarks/conftest.py +++ /dev/null @@ -1,154 +0,0 @@ -import functools -import numpy -import os -import psutil -import pytest -import threading -import time - -from twisted.internet import threads, reactor - - -# -# pytest customizations -# - -# mark benchmark tests using their group names (thanks ionelmc! :) -def pytest_collection_modifyitems(items, config): - for item in items: - bench = item.get_marker("benchmark") - if bench and bench.kwargs.get('group'): - group = bench.kwargs['group'] - marker = getattr(pytest.mark, 'benchmark_' + group) - item.add_marker(marker) - - subdir = config.getoption('subdir') - if subdir == 'benchmarks': - # we have to manually setup the events server in order to be able to - # signal events. This is usually done by the enclosing application - # using soledad client (i.e. bitmask client). - from leap.common.events import server - server.ensure_server() - - -# -# benchmark fixtures -# - -@pytest.fixture() -def txbenchmark(monitored_benchmark): - def blockOnThread(*args, **kwargs): - return threads.deferToThread( - monitored_benchmark, threads.blockingCallFromThread, - reactor, *args, **kwargs) - return blockOnThread - - -@pytest.fixture() -def txbenchmark_with_setup(monitored_benchmark_with_setup): - def blockOnThreadWithSetup(setup, f, *args, **kwargs): - def blocking_runner(*args, **kwargs): - return threads.blockingCallFromThread(reactor, f, *args, **kwargs) - - def blocking_setup(): - args = threads.blockingCallFromThread(reactor, setup) - try: - return tuple(arg for arg in args), {} - except TypeError: - return ((args,), {}) if args else None - - def bench(): - return monitored_benchmark_with_setup( - blocking_runner, setup=blocking_setup, - rounds=4, warmup_rounds=1, iterations=1, - args=args, kwargs=kwargs) - return threads.deferToThread(bench) - return blockOnThreadWithSetup - - -# -# resource monitoring -# - -class ResourceWatcher(threading.Thread): - - sampling_interval = 0.1 - - def __init__(self, watch_memory): - threading.Thread.__init__(self) - self.process = psutil.Process(os.getpid()) - self.running = False - # monitored resources - self.cpu_percent = None - self.watch_memory = watch_memory - self.memory_samples = [] - self.memory_percent = None - - def run(self): - self.running = True - self.process.cpu_percent() - # decide how long to sleep based on need to sample memory - sleep = self.sampling_interval if not self.watch_memory else 1 - while self.running: - if self.watch_memory: - sample = self.process.memory_percent(memtype='rss') - self.memory_samples.append(sample) - time.sleep(sleep) - - def stop(self): - self.running = False - self.join() - # save cpu usage info - self.cpu_percent = self.process.cpu_percent() - # save memory usage info - if self.watch_memory: - memory_percent = { - 'sampling_interval': self.sampling_interval, - 'samples': self.memory_samples, - 'stats': {}, - } - for stat in 'max', 'min', 'mean', 'std': - fun = getattr(numpy, stat) - memory_percent['stats'][stat] = fun(self.memory_samples) - self.memory_percent = memory_percent - - -def _monitored_benchmark(benchmark_fixture, benchmark_function, request, - *args, **kwargs): - # setup resource monitoring - watch_memory = _watch_memory(request) - watcher = ResourceWatcher(watch_memory) - watcher.start() - # run benchmarking function - benchmark_function(*args, **kwargs) - # store results - watcher.stop() - benchmark_fixture.extra_info.update({ - 'cpu_percent': watcher.cpu_percent - }) - if watch_memory: - benchmark_fixture.extra_info.update({ - 'memory_percent': watcher.memory_percent, - }) - # add docstring info - if request.scope == 'function': - fun = request.function - doc = fun.__doc__ or '' - benchmark_fixture.extra_info.update({'doc': doc.strip()}) - - -def _watch_memory(request): - return request.config.getoption('--watch-memory') - - -@pytest.fixture -def monitored_benchmark(benchmark, request): - return functools.partial( - _monitored_benchmark, benchmark, benchmark, request) - - -@pytest.fixture -def monitored_benchmark_with_setup(benchmark, request, *args, **kwargs): - return functools.partial( - _monitored_benchmark, benchmark, benchmark.pedantic, request, - *args, **kwargs) -- cgit v1.2.3