diff options
| -rw-r--r-- | testing/tests/benchmarks/conftest.py | 45 | ||||
| -rw-r--r-- | testing/tests/benchmarks/test_resources.py (renamed from testing/tests/benchmarks/test_cpu.py) | 21 | ||||
| -rw-r--r-- | testing/tests/conftest.py | 9 | 
3 files changed, 57 insertions, 18 deletions
| diff --git a/testing/tests/benchmarks/conftest.py b/testing/tests/benchmarks/conftest.py index 543f06b8..ec7d7fca 100644 --- a/testing/tests/benchmarks/conftest.py +++ b/testing/tests/benchmarks/conftest.py @@ -1,8 +1,11 @@  import base64 +import numpy  import os  import psutil  import pytest  import random +import threading +import time  from twisted.internet import threads, reactor @@ -81,18 +84,52 @@ def txbenchmark_with_setup(benchmark):  # resource monitoring  # +class MemoryWatcher(threading.Thread): + +    def __init__(self, process, interval): +        threading.Thread.__init__(self) +        self.process = process +        self.interval = interval +        self.samples = [] +        self.running = False + +    def run(self): +        self.running = True +        while self.running: +            memory = self.process.memory_percent(memtype='rss') +            self.samples.append(memory) +            time.sleep(self.interval) + +    def stop(self): +        self.running = False +        self.join() + +    def info(self): +        info = { +            'interval': self.interval, +            'samples': self.samples, +            'stats': {}, +        } +        for stat in 'max', 'min', 'mean', 'std': +            fun = getattr(numpy, stat) +            info['stats'][stat] = fun(self.samples) +        return info + +  @pytest.fixture  def monitored_benchmark(benchmark, request):      def _monitored_benchmark(fun, *args, **kwargs):          process = psutil.Process(os.getpid()) +        memwatch = MemoryWatcher(process, 1) +        memwatch.start()          process.cpu_percent()          benchmark.pedantic(              fun, args=args, kwargs=kwargs,              rounds=1, iterations=1, warmup_rounds=0) -        percent = process.cpu_percent() -        # store value in benchmark session, so json output can be updated -        bs = request.config._benchmarksession -        bs.benchmarks[0].stats.cpu_percent = percent +        memwatch.stop() +        # store results +        benchmark.extra_info['cpu_percent'] = process.cpu_percent() +        benchmark.extra_info['memory_percent'] = memwatch.info()      return _monitored_benchmark diff --git a/testing/tests/benchmarks/test_cpu.py b/testing/tests/benchmarks/test_resources.py index fba6a6d4..39169012 100644 --- a/testing/tests/benchmarks/test_cpu.py +++ b/testing/tests/benchmarks/test_resources.py @@ -1,4 +1,3 @@ -import pytest  import random  import time @@ -23,14 +22,26 @@ def bellardBig(n):      return pi -def test_long_operation(monitored_benchmark): +def test_cpu_intensive(monitored_benchmark): -    def _long_operation(): +    def _cpu_intensive():          sleep = [random.uniform(0.5, 1.5) for _ in xrange(3)]          while sleep:              t = sleep.pop()              time.sleep(t)              bellardBig(int((10 ** 3) * t)) -    results = monitored_benchmark(_long_operation) -    print results +    monitored_benchmark(_cpu_intensive) + + +def test_memory_intensive(monitored_benchmark): + +    def _memory_intensive(): +        sleep = [random.uniform(0.5, 1.5) for _ in xrange(3)] +        bigdata = "" +        while sleep: +            t = sleep.pop() +            bigdata += "b" * 10 * int(10E6) +            time.sleep(t) + +    monitored_benchmark(_memory_intensive) diff --git a/testing/tests/conftest.py b/testing/tests/conftest.py index bece7609..2459307a 100644 --- a/testing/tests/conftest.py +++ b/testing/tests/conftest.py @@ -235,12 +235,3 @@ if 'pytest_benchmark' in sys.modules:          """          hostname = os.environ.get('HOST_HOSTNAME', socket.gethostname())          machine_info['host'] = hostname - -    def pytest_benchmark_update_json(config, benchmarks, output_json): -        json_benchmarks = output_json['benchmarks'] -        for benchmark in benchmarks: -            # find the json output that corresponds to this benchmark -            name = benchmark['name'] -            output = filter(lambda d: d['name'] == name, json_benchmarks).pop() -            # update output stats with previously saved values -            output['stats']['cpu_percent'] = benchmark.stats.cpu_percent | 
