From aa9a13549eb30efa1f0528257e40fd67ebdfbee9 Mon Sep 17 00:00:00 2001 From: drebs Date: Wed, 26 Apr 2017 18:01:46 +0200 Subject: [test] add memory measurement --- testing/tests/benchmarks/conftest.py | 45 +++++++++++++++++++++++++--- testing/tests/benchmarks/test_cpu.py | 36 ----------------------- testing/tests/benchmarks/test_resources.py | 47 ++++++++++++++++++++++++++++++ testing/tests/conftest.py | 9 ------ 4 files changed, 88 insertions(+), 49 deletions(-) delete mode 100644 testing/tests/benchmarks/test_cpu.py create mode 100644 testing/tests/benchmarks/test_resources.py diff --git a/testing/tests/benchmarks/conftest.py b/testing/tests/benchmarks/conftest.py index 543f06b8..ec7d7fca 100644 --- a/testing/tests/benchmarks/conftest.py +++ b/testing/tests/benchmarks/conftest.py @@ -1,8 +1,11 @@ import base64 +import numpy import os import psutil import pytest import random +import threading +import time from twisted.internet import threads, reactor @@ -81,18 +84,52 @@ def txbenchmark_with_setup(benchmark): # resource monitoring # +class MemoryWatcher(threading.Thread): + + def __init__(self, process, interval): + threading.Thread.__init__(self) + self.process = process + self.interval = interval + self.samples = [] + self.running = False + + def run(self): + self.running = True + while self.running: + memory = self.process.memory_percent(memtype='rss') + self.samples.append(memory) + time.sleep(self.interval) + + def stop(self): + self.running = False + self.join() + + def info(self): + info = { + 'interval': self.interval, + 'samples': self.samples, + 'stats': {}, + } + for stat in 'max', 'min', 'mean', 'std': + fun = getattr(numpy, stat) + info['stats'][stat] = fun(self.samples) + return info + + @pytest.fixture def monitored_benchmark(benchmark, request): def _monitored_benchmark(fun, *args, **kwargs): process = psutil.Process(os.getpid()) + memwatch = MemoryWatcher(process, 1) + memwatch.start() process.cpu_percent() benchmark.pedantic( fun, args=args, kwargs=kwargs, rounds=1, iterations=1, warmup_rounds=0) - percent = process.cpu_percent() - # store value in benchmark session, so json output can be updated - bs = request.config._benchmarksession - bs.benchmarks[0].stats.cpu_percent = percent + memwatch.stop() + # store results + benchmark.extra_info['cpu_percent'] = process.cpu_percent() + benchmark.extra_info['memory_percent'] = memwatch.info() return _monitored_benchmark diff --git a/testing/tests/benchmarks/test_cpu.py b/testing/tests/benchmarks/test_cpu.py deleted file mode 100644 index fba6a6d4..00000000 --- a/testing/tests/benchmarks/test_cpu.py +++ /dev/null @@ -1,36 +0,0 @@ -import pytest -import random -import time - -from decimal import Decimal - - -def bellardBig(n): - # http://en.wikipedia.org/wiki/Bellard%27s_formula - pi = Decimal(0) - k = 0 - while k < n: - pi += (Decimal(-1) ** k / (1024 ** k)) * ( - Decimal(256) / (10 * k + 1) + - Decimal(1) / (10 * k + 9) - - Decimal(64) / (10 * k + 3) - - Decimal(32) / (4 * k + 1) - - Decimal(4) / (10 * k + 5) - - Decimal(4) / (10 * k + 7) - - Decimal(1) / (4 * k + 3)) - k += 1 - pi = pi * 1 / (2 ** 6) - return pi - - -def test_long_operation(monitored_benchmark): - - def _long_operation(): - sleep = [random.uniform(0.5, 1.5) for _ in xrange(3)] - while sleep: - t = sleep.pop() - time.sleep(t) - bellardBig(int((10 ** 3) * t)) - - results = monitored_benchmark(_long_operation) - print results diff --git a/testing/tests/benchmarks/test_resources.py b/testing/tests/benchmarks/test_resources.py new file mode 100644 index 00000000..39169012 --- /dev/null +++ b/testing/tests/benchmarks/test_resources.py @@ -0,0 +1,47 @@ +import random +import time + +from decimal import Decimal + + +def bellardBig(n): + # http://en.wikipedia.org/wiki/Bellard%27s_formula + pi = Decimal(0) + k = 0 + while k < n: + pi += (Decimal(-1) ** k / (1024 ** k)) * ( + Decimal(256) / (10 * k + 1) + + Decimal(1) / (10 * k + 9) - + Decimal(64) / (10 * k + 3) - + Decimal(32) / (4 * k + 1) - + Decimal(4) / (10 * k + 5) - + Decimal(4) / (10 * k + 7) - + Decimal(1) / (4 * k + 3)) + k += 1 + pi = pi * 1 / (2 ** 6) + return pi + + +def test_cpu_intensive(monitored_benchmark): + + def _cpu_intensive(): + sleep = [random.uniform(0.5, 1.5) for _ in xrange(3)] + while sleep: + t = sleep.pop() + time.sleep(t) + bellardBig(int((10 ** 3) * t)) + + monitored_benchmark(_cpu_intensive) + + +def test_memory_intensive(monitored_benchmark): + + def _memory_intensive(): + sleep = [random.uniform(0.5, 1.5) for _ in xrange(3)] + bigdata = "" + while sleep: + t = sleep.pop() + bigdata += "b" * 10 * int(10E6) + time.sleep(t) + + monitored_benchmark(_memory_intensive) diff --git a/testing/tests/conftest.py b/testing/tests/conftest.py index bece7609..2459307a 100644 --- a/testing/tests/conftest.py +++ b/testing/tests/conftest.py @@ -235,12 +235,3 @@ if 'pytest_benchmark' in sys.modules: """ hostname = os.environ.get('HOST_HOSTNAME', socket.gethostname()) machine_info['host'] = hostname - - def pytest_benchmark_update_json(config, benchmarks, output_json): - json_benchmarks = output_json['benchmarks'] - for benchmark in benchmarks: - # find the json output that corresponds to this benchmark - name = benchmark['name'] - output = filter(lambda d: d['name'] == name, json_benchmarks).pop() - # update output stats with previously saved values - output['stats']['cpu_percent'] = benchmark.stats.cpu_percent -- cgit v1.2.3