summaryrefslogtreecommitdiff
path: root/tests/benchmarks/test_blobs_fs_backend.py
blob: 9f1ebf944834343a53c3ba57714be05ddf13cd14 (plain)
1
2
3
4
5
6
7
8
9
10
11
12
13
14
15
16
17
18
19
20
21
22
23
24
25
26
27
28
29
30
31
32
33
34
35
36
37
38
39
40
41
42
43
44
45
46
47
48
49
50
51
52
53
54
55
56
57
58
59
60
61
62
63
64
65
66
67
68
69
70
71
72
73
74
75
76
77
78
79
80
81
82
83
84
85
86
import pytest
from io import BytesIO
from leap.soledad.server._blobs import FilesystemBlobsBackend
from twisted.internet import defer
from twisted.web.client import FileBodyProducer
from twisted.internet._producer_helpers import _PullToPush


def create_write_test(amount, size):

    @pytest.inlineCallbacks
    @pytest.mark.benchmark(group='test_blobs_fs_backend_write')
    def test(txbenchmark, payload, tmpdir):
        """
        Write many blobs of the same size to the filesystem backend.
        """
        backend = FilesystemBlobsBackend(blobs_path=tmpdir.strpath)
        data = payload(size)
        semaphore = defer.DeferredSemaphore(100)
        deferreds = []
        for i in xrange(amount):
            producer = FileBodyProducer(BytesIO(data))
            d = semaphore.run(backend.write_blob, 'user', str(i), producer)
            deferreds.append(d)
        yield txbenchmark(defer.gatherResults, deferreds)

    return test


test_blobs_fs_backend_write_10_10000k = create_write_test(10, 10000 * 1000)
test_blobs_fs_backend_write_100_1000k = create_write_test(100, 1000 * 1000)
test_blobs_fs_backend_write_1000_100k = create_write_test(1000, 100 * 1000)
test_blobs_fs_backend_write_10000_10k = create_write_test(10000, 10 * 1000)


class DevNull(object):

    def write(self, data):
        pass

    def registerProducer(self, producer, streaming):
        producer = _PullToPush(producer, self)
        producer.startStreaming()

    def unregisterProducer(self):
        pass

    def finish(self):
        pass


def create_read_test(amount, size):

    @pytest.inlineCallbacks
    @pytest.mark.benchmark(group='test_blobs_fs_backend_read')
    def test(txbenchmark, payload, tmpdir):
        """
        Read many blobs of the same size from the filesystem backend.
        """
        backend = FilesystemBlobsBackend(blobs_path=tmpdir.strpath)
        data = payload(size)

        # first write blobs to the backend...
        semaphore = defer.DeferredSemaphore(100)
        deferreds = []
        for i in xrange(amount):
            producer = FileBodyProducer(BytesIO(data))
            d = semaphore.run(backend.write_blob, 'user', str(i), producer)
            deferreds.append(d)
        yield defer.gatherResults(deferreds)

        # ... then measure the read operation
        deferreds = []
        for i in xrange(amount):
            consumer = DevNull()
            d = semaphore.run(backend.read_blob, 'user', str(i), consumer)
            deferreds.append(d)
        yield txbenchmark(defer.gatherResults, deferreds)

    return test


test_blobs_fs_backend_read_10_10000k = create_read_test(10, 10000 * 1000)
test_blobs_fs_backend_read_100_1000k = create_read_test(100, 1000 * 1000)
test_blobs_fs_backend_read_1000_100k = create_read_test(1000, 100 * 1000)
test_blobs_fs_backend_read_10000_10k = create_read_test(10000, 10 * 1000)