1
2
3
4
5
6
7
8
9
10
11
12
13
14
15
16
17
18
19
20
21
22
23
24
25
26
27
28
29
30
31
32
33
34
35
36
37
38
39
40
41
42
43
44
45
46
47
48
49
50
51
52
53
54
55
56
57
58
59
60
61
62
63
64
65
66
67
68
69
70
71
72
73
74
75
76
77
78
79
80
81
82
83
84
85
86
87
88
89
90
91
92
93
94
95
96
97
98
|
import base64
import os
import psutil
import pytest
import random
from twisted.internet import threads, reactor
# we have to manually setup the events server in order to be able to signal
# events. This is usually done by the enclosing application using soledad
# client (i.e. bitmask client).
from leap.common.events import server
server.ensure_server()
#
# pytest customizations
#
def pytest_addoption(parser):
parser.addoption(
"--num-docs", type="int", default=100,
help="the number of documents to use in performance tests")
# mark benchmark tests using their group names (thanks ionelmc! :)
def pytest_collection_modifyitems(items):
for item in items:
bench = item.get_marker("benchmark")
if bench and bench.kwargs.get('group'):
group = bench.kwargs['group']
marker = getattr(pytest.mark, 'benchmark_' + group)
item.add_marker(marker)
#
# benchmark fixtures
#
@pytest.fixture()
def payload():
def generate(size):
random.seed(1337) # same seed to avoid different bench results
payload_bytes = bytearray(random.getrandbits(8) for _ in xrange(size))
# encode as base64 to avoid ascii encode/decode errors
return base64.b64encode(payload_bytes)[:size] # remove b64 overhead
return generate
@pytest.fixture()
def txbenchmark(benchmark):
def blockOnThread(*args, **kwargs):
return threads.deferToThread(
benchmark, threads.blockingCallFromThread,
reactor, *args, **kwargs)
return blockOnThread
@pytest.fixture()
def txbenchmark_with_setup(benchmark):
def blockOnThreadWithSetup(setup, f):
def blocking_runner(*args, **kwargs):
return threads.blockingCallFromThread(reactor, f, *args, **kwargs)
def blocking_setup():
args = threads.blockingCallFromThread(reactor, setup)
try:
return tuple(arg for arg in args), {}
except TypeError:
return ((args,), {}) if args else None
def bench():
return benchmark.pedantic(blocking_runner, setup=blocking_setup,
rounds=4, warmup_rounds=1)
return threads.deferToThread(bench)
return blockOnThreadWithSetup
#
# resource monitoring
#
@pytest.fixture
def monitored_benchmark(benchmark, request):
def _monitored_benchmark(fun, *args, **kwargs):
process = psutil.Process(os.getpid())
process.cpu_percent()
benchmark.pedantic(
fun, args=args, kwargs=kwargs,
rounds=1, iterations=1, warmup_rounds=0)
percent = process.cpu_percent()
# store value in benchmark session, so json output can be updated
bs = request.config._benchmarksession
bs.benchmarks[0].stats.cpu_percent = percent
return _monitored_benchmark
|