summaryrefslogtreecommitdiff
path: root/testing/tests/benchmarks/conftest.py
blob: cfad458a08ce1922e32ad797ef05a4e7ad4b817c (plain)
1
2
3
4
5
6
7
8
9
10
11
12
13
14
15
16
17
18
19
20
21
22
23
24
25
26
27
28
29
30
31
32
33
34
35
36
37
38
39
40
41
42
43
44
45
46
47
48
49
50
51
52
53
54
55
56
57
58
59
60
61
62
63
64
65
66
67
68
69
70
71
72
73
74
75
76
77
78
79
80
81
82
83
84
85
86
87
88
89
90
91
92
93
94
95
96
97
98
99
100
101
102
103
104
105
106
107
108
109
110
111
112
113
114
115
116
117
118
119
120
121
122
123
124
125
126
127
128
129
130
131
132
133
134
135
136
137
138
139
140
141
142
143
144
145
146
147
148
149
150
import base64
import functools
import numpy
import os
import psutil
import pytest
import random
import threading
import time

from twisted.internet import threads, reactor


# we have to manually setup the events server in order to be able to signal
# events. This is usually done by the enclosing application using soledad
# client (i.e. bitmask client).
from leap.common.events import server
server.ensure_server()


#
# pytest customizations
#

def pytest_addoption(parser):
    parser.addoption(
        "--num-docs", type="int", default=100,
        help="the number of documents to use in performance tests")


# mark benchmark tests using their group names (thanks ionelmc! :)
def pytest_collection_modifyitems(items):
    for item in items:
        bench = item.get_marker("benchmark")
        if bench and bench.kwargs.get('group'):
            group = bench.kwargs['group']
            marker = getattr(pytest.mark, 'benchmark_' + group)
            item.add_marker(marker)


#
# benchmark fixtures
#

@pytest.fixture()
def payload():
    def generate(size):
        random.seed(1337)  # same seed to avoid different bench results
        payload_bytes = bytearray(random.getrandbits(8) for _ in xrange(size))
        # encode as base64 to avoid ascii encode/decode errors
        return base64.b64encode(payload_bytes)[:size]  # remove b64 overhead
    return generate


@pytest.fixture()
def txbenchmark(monitored_benchmark):
    def blockOnThread(*args, **kwargs):
        return threads.deferToThread(
            monitored_benchmark, threads.blockingCallFromThread,
            reactor, *args, **kwargs)
    return blockOnThread


@pytest.fixture()
def txbenchmark_with_setup(monitored_benchmark_with_setup):
    def blockOnThreadWithSetup(setup, f):
        def blocking_runner(*args, **kwargs):
            return threads.blockingCallFromThread(reactor, f, *args, **kwargs)

        def blocking_setup():
            args = threads.blockingCallFromThread(reactor, setup)
            try:
                return tuple(arg for arg in args), {}
            except TypeError:
                    return ((args,), {}) if args else None

        def bench():
            return monitored_benchmark_with_setup(
                blocking_runner, setup=blocking_setup,
                rounds=4, warmup_rounds=1)
        return threads.deferToThread(bench)
    return blockOnThreadWithSetup


#
# resource monitoring
#

class ResourceWatcher(threading.Thread):

    sampling_interval = 1

    def __init__(self):
        threading.Thread.__init__(self)
        self.process = psutil.Process(os.getpid())
        self.running = False
        # monitored resources
        self.cpu_percent = None
        self.memory_samples = []
        self.memory_percent = None

    def run(self):
        self.running = True
        self.process.cpu_percent()
        while self.running:
            sample = self.process.memory_percent(memtype='rss')
            self.memory_samples.append(sample)
            time.sleep(self.sampling_interval)

    def stop(self):
        self.running = False
        self.join()
        # save cpu usage info
        self.cpu_percent = self.process.cpu_percent()
        # save memory usage info
        memory_percent = {
            'sampling_interval': self.sampling_interval,
            'samples': self.memory_samples,
            'stats': {},
        }
        for stat in 'max', 'min', 'mean', 'std':
            fun = getattr(numpy, stat)
            memory_percent['stats'][stat] = fun(self.memory_samples)
        self.memory_percent = memory_percent


def _monitored_benchmark(benchmark_fixture, benchmark_function,
                         *args, **kwargs):
    # setup resource monitoring
    watcher = ResourceWatcher()
    watcher.start()
    # run benchmarking function
    benchmark_function(*args, **kwargs)
    # store results
    watcher.stop()
    benchmark_fixture.extra_info.update({
        'cpu_percent': watcher.cpu_percent,
        'memory_percent': watcher.memory_percent,
    })


@pytest.fixture
def monitored_benchmark(benchmark):
    return functools.partial(_monitored_benchmark, benchmark, benchmark)


@pytest.fixture
def monitored_benchmark_with_setup(benchmark):
    return functools.partial(
        _monitored_benchmark, benchmark, benchmark.pedantic)