summaryrefslogtreecommitdiff
path: root/tests
diff options
context:
space:
mode:
authordrebs <drebs@leap.se>2013-01-28 11:02:25 -0200
committerdrebs <drebs@leap.se>2013-01-28 11:02:25 -0200
commit0a4548d55b22b21d5d54e88a0cd1e5b118b867f7 (patch)
tree2ce3ece01fd693852314ed259f6e4c4085502ac1 /tests
parent72e5b36d7967290db12ceeaa6fb1061924df3668 (diff)
CouchDB tests now run with their own couch instance on a random dir.
Diffstat (limited to 'tests')
-rw-r--r--tests/couchdb.ini.template222
-rw-r--r--tests/test_couch.py116
2 files changed, 326 insertions, 12 deletions
diff --git a/tests/couchdb.ini.template b/tests/couchdb.ini.template
new file mode 100644
index 00000000..7d0316f0
--- /dev/null
+++ b/tests/couchdb.ini.template
@@ -0,0 +1,222 @@
+; etc/couchdb/default.ini.tpl. Generated from default.ini.tpl.in by configure.
+
+; Upgrading CouchDB will overwrite this file.
+
+[couchdb]
+database_dir = %(tempdir)s/lib
+view_index_dir = %(tempdir)s/lib
+max_document_size = 4294967296 ; 4 GB
+os_process_timeout = 5000 ; 5 seconds. for view and external servers.
+max_dbs_open = 100
+delayed_commits = true ; set this to false to ensure an fsync before 201 Created is returned
+uri_file = %(tempdir)s/lib/couch.uri
+file_compression = snappy
+
+[database_compaction]
+; larger buffer sizes can originate smaller files
+doc_buffer_size = 524288 ; value in bytes
+checkpoint_after = 5242880 ; checkpoint after every N bytes were written
+
+[view_compaction]
+; larger buffer sizes can originate smaller files
+keyvalue_buffer_size = 2097152 ; value in bytes
+
+[httpd]
+port = 0
+bind_address = 127.0.0.1
+authentication_handlers = {couch_httpd_oauth, oauth_authentication_handler}, {couch_httpd_auth, cookie_authentication_handler}, {couch_httpd_auth, default_authentication_handler}
+default_handler = {couch_httpd_db, handle_request}
+secure_rewrites = true
+vhost_global_handlers = _utils, _uuids, _session, _oauth, _users
+allow_jsonp = false
+; Options for the MochiWeb HTTP server.
+;server_options = [{backlog, 128}, {acceptor_pool_size, 16}]
+; For more socket options, consult Erlang's module 'inet' man page.
+;socket_options = [{recbuf, 262144}, {sndbuf, 262144}, {nodelay, true}]
+log_max_chunk_size = 1000000
+
+[log]
+file = %(tempdir)s/log/couch.log
+level = info
+include_sasl = true
+
+[couch_httpd_auth]
+authentication_db = _users
+authentication_redirect = /_utils/session.html
+require_valid_user = false
+timeout = 600 ; number of seconds before automatic logout
+auth_cache_size = 50 ; size is number of cache entries
+allow_persistent_cookies = false ; set to true to allow persistent cookies
+
+[couch_httpd_oauth]
+; If set to 'true', oauth token and consumer secrets will be looked up
+; in the authentication database (_users). These secrets are stored in
+; a top level property named "oauth" in user documents. Example:
+; {
+; "_id": "org.couchdb.user:joe",
+; "type": "user",
+; "name": "joe",
+; "password_sha": "fe95df1ca59a9b567bdca5cbaf8412abd6e06121",
+; "salt": "4e170ffeb6f34daecfd814dfb4001a73"
+; "roles": ["foo", "bar"],
+; "oauth": {
+; "consumer_keys": {
+; "consumerKey1": "key1Secret",
+; "consumerKey2": "key2Secret"
+; },
+; "tokens": {
+; "token1": "token1Secret",
+; "token2": "token2Secret"
+; }
+; }
+; }
+use_users_db = false
+
+[query_servers]
+; javascript = %(tempdir)s/server/main.js
+
+
+; Changing reduce_limit to false will disable reduce_limit.
+; If you think you're hitting reduce_limit with a "good" reduce function,
+; please let us know on the mailing list so we can fine tune the heuristic.
+[query_server_config]
+reduce_limit = true
+os_process_limit = 25
+
+[daemons]
+view_manager={couch_view, start_link, []}
+external_manager={couch_external_manager, start_link, []}
+query_servers={couch_query_servers, start_link, []}
+vhosts={couch_httpd_vhost, start_link, []}
+httpd={couch_httpd, start_link, []}
+stats_aggregator={couch_stats_aggregator, start, []}
+stats_collector={couch_stats_collector, start, []}
+uuids={couch_uuids, start, []}
+auth_cache={couch_auth_cache, start_link, []}
+replication_manager={couch_replication_manager, start_link, []}
+os_daemons={couch_os_daemons, start_link, []}
+compaction_daemon={couch_compaction_daemon, start_link, []}
+
+[httpd_global_handlers]
+/ = {couch_httpd_misc_handlers, handle_welcome_req, <<"Welcome">>}
+
+_all_dbs = {couch_httpd_misc_handlers, handle_all_dbs_req}
+_active_tasks = {couch_httpd_misc_handlers, handle_task_status_req}
+_config = {couch_httpd_misc_handlers, handle_config_req}
+_replicate = {couch_httpd_replicator, handle_req}
+_uuids = {couch_httpd_misc_handlers, handle_uuids_req}
+_restart = {couch_httpd_misc_handlers, handle_restart_req}
+_stats = {couch_httpd_stats_handlers, handle_stats_req}
+_log = {couch_httpd_misc_handlers, handle_log_req}
+_session = {couch_httpd_auth, handle_session_req}
+_oauth = {couch_httpd_oauth, handle_oauth_req}
+
+[httpd_db_handlers]
+_view_cleanup = {couch_httpd_db, handle_view_cleanup_req}
+_compact = {couch_httpd_db, handle_compact_req}
+_design = {couch_httpd_db, handle_design_req}
+_temp_view = {couch_httpd_view, handle_temp_view_req}
+_changes = {couch_httpd_db, handle_changes_req}
+
+; The external module takes an optional argument allowing you to narrow it to a
+; single script. Otherwise the script name is inferred from the first path section
+; after _external's own path.
+; _mypath = {couch_httpd_external, handle_external_req, <<"mykey">>}
+; _external = {couch_httpd_external, handle_external_req}
+
+[httpd_design_handlers]
+_view = {couch_httpd_view, handle_view_req}
+_show = {couch_httpd_show, handle_doc_show_req}
+_list = {couch_httpd_show, handle_view_list_req}
+_info = {couch_httpd_db, handle_design_info_req}
+_rewrite = {couch_httpd_rewrite, handle_rewrite_req}
+_update = {couch_httpd_show, handle_doc_update_req}
+
+; enable external as an httpd handler, then link it with commands here.
+; note, this api is still under consideration.
+; [external]
+; mykey = /path/to/mycommand
+
+; Here you can setup commands for CouchDB to manage
+; while it is alive. It will attempt to keep each command
+; alive if it exits.
+; [os_daemons]
+; some_daemon_name = /path/to/script -with args
+
+
+[uuids]
+; Known algorithms:
+; random - 128 bits of random awesome
+; All awesome, all the time.
+; sequential - monotonically increasing ids with random increments
+; First 26 hex characters are random. Last 6 increment in
+; random amounts until an overflow occurs. On overflow, the
+; random prefix is regenerated and the process starts over.
+; utc_random - Time since Jan 1, 1970 UTC with microseconds
+; First 14 characters are the time in hex. Last 18 are random.
+algorithm = sequential
+
+[stats]
+; rate is in milliseconds
+rate = 1000
+; sample intervals are in seconds
+samples = [0, 60, 300, 900]
+
+[attachments]
+compression_level = 8 ; from 1 (lowest, fastest) to 9 (highest, slowest), 0 to disable compression
+compressible_types = text/*, application/javascript, application/json, application/xml
+
+[replicator]
+db = _replicator
+; Maximum replicaton retry count can be a non-negative integer or "infinity".
+max_replication_retry_count = 10
+; More worker processes can give higher network throughput but can also
+; imply more disk and network IO.
+worker_processes = 4
+; With lower batch sizes checkpoints are done more frequently. Lower batch sizes
+; also reduce the total amount of used RAM memory.
+worker_batch_size = 500
+; Maximum number of HTTP connections per replication.
+http_connections = 20
+; HTTP connection timeout per replication.
+; Even for very fast/reliable networks it might need to be increased if a remote
+; database is too busy.
+connection_timeout = 30000
+; If a request fails, the replicator will retry it up to N times.
+retries_per_request = 10
+; Some socket options that might boost performance in some scenarios:
+; {nodelay, boolean()}
+; {sndbuf, integer()}
+; {recbuf, integer()}
+; {priority, integer()}
+; See the `inet` Erlang module's man page for the full list of options.
+socket_options = [{keepalive, true}, {nodelay, false}]
+; Path to a file containing the user's certificate.
+;cert_file = /full/path/to/server_cert.pem
+; Path to file containing user's private PEM encoded key.
+;key_file = /full/path/to/server_key.pem
+; String containing the user's password. Only used if the private keyfile is password protected.
+;password = somepassword
+; Set to true to validate peer certificates.
+verify_ssl_certificates = false
+; File containing a list of peer trusted certificates (in the PEM format).
+;ssl_trusted_certificates_file = /etc/ssl/certs/ca-certificates.crt
+; Maximum peer certificate depth (must be set even if certificate validation is off).
+ssl_certificate_max_depth = 3
+
+[compaction_daemon]
+; The delay, in seconds, between each check for which database and view indexes
+; need to be compacted.
+check_interval = 300
+; If a database or view index file is smaller then this value (in bytes),
+; compaction will not happen. Very small files always have a very high
+; fragmentation therefore it's not worth to compact them.
+min_file_size = 131072
+
+[compactions]
+; List of compaction rules for the compaction daemon.
+
+
+;[admins]
+;testuser = -hashed-f50a252c12615697c5ed24ec5cd56b05d66fe91e,b05471ba260132953930cf9f97f327f5
+; pass for above user is 'testpass' \ No newline at end of file
diff --git a/tests/test_couch.py b/tests/test_couch.py
index 6c3d7daf..b5d6378c 100644
--- a/tests/test_couch.py
+++ b/tests/test_couch.py
@@ -16,13 +16,102 @@ except ImportError:
#-----------------------------------------------------------------------------
+# A wrapper for running couchdb locally.
+#-----------------------------------------------------------------------------
+
+import re
+import os
+import tempfile
+import subprocess
+import time
+import unittest
+
+
+class CouchDBWrapper(object):
+ """
+ Wrapper for external CouchDB instance which is started and stopped for
+ testing.
+ """
+
+ def start(self):
+ self.tempdir = tempfile.mkdtemp(suffix='.couch.test')
+
+ path = os.path.join(os.path.dirname(__file__),
+ 'couchdb.ini.template')
+ handle = open(path)
+ conf = handle.read() % {
+ 'tempdir': self.tempdir,
+ }
+
+ confPath = os.path.join(self.tempdir, 'test.ini')
+ handle = open(confPath, 'w')
+ handle.write(conf)
+ handle.close()
+
+ # create the dirs from the template
+ os.mkdir(os.path.join(self.tempdir, 'lib'))
+ os.mkdir(os.path.join(self.tempdir, 'log'))
+ argus = ['couchdb', '-n' '-a', confPath]
+ null = open('/dev/null', 'w')
+ self.process = subprocess.Popen(
+ argus, env=None, stdout=subprocess.PIPE, stderr=subprocess.PIPE)
+ # find port
+ logPath = os.path.join(self.tempdir, 'log', 'couch.log')
+ while not os.path.exists(logPath):
+ if self.process.poll() is not None:
+ raise Exception("""
+couchdb exited with code %d.
+stdout:
+%s
+stderr:
+%s""" % (
+ self.process.returncode, self.process.stdout.read(),
+ self.process.stderr.read()))
+ time.sleep(0.01)
+ while os.stat(logPath).st_size == 0:
+ time.sleep(0.01)
+ PORT_RE = re.compile(
+ 'Apache CouchDB has started on http://127.0.0.1:(?P<port>\d+)')
+
+ handle = open(logPath)
+ line = handle.read()
+ m = PORT_RE.search(line)
+ if not m:
+ self.stop()
+ raise Exception("Cannot find port in line %s" % line)
+ self.port = int(m.group('port'))
+
+ def stop(self):
+ self.process.terminate()
+
+ os.system("rm -rf %s" % self.tempdir)
+
+
+class CouchDBTestCase(unittest.TestCase):
+ """
+ TestCase base class for tests against a real CouchDB server.
+ """
+
+ def setUp(self):
+ self.wrapper = CouchDBWrapper()
+ self.wrapper.start()
+ #self.db = self.wrapper.db
+ super(CouchDBTestCase, self).setUp()
+
+ def tearDown(self):
+ self.wrapper.stop()
+ super(CouchDBTestCase, self).tearDown()
+
+
+#-----------------------------------------------------------------------------
# The following tests come from `u1db.tests.test_common_backend`.
#-----------------------------------------------------------------------------
-class TestCouchBackendImpl(tests.TestCase):
+class TestCouchBackendImpl(CouchDBTestCase):
def test__allocate_doc_id(self):
- db = couch.CouchDatabase('http://localhost:5984', 'u1db_tests')
+ db = couch.CouchDatabase('http://localhost:'+str(self.wrapper.port),
+ 'u1db_tests')
doc_id1 = db._allocate_doc_id()
self.assertTrue(doc_id1.startswith('D-'))
self.assertEqual(34, len(doc_id1))
@@ -35,12 +124,14 @@ class TestCouchBackendImpl(tests.TestCase):
#-----------------------------------------------------------------------------
def make_couch_database_for_test(test, replica_uid):
- return couch.CouchDatabase('http://localhost:5984', replica_uid,
+ port = str(test.wrapper.port)
+ return couch.CouchDatabase('http://localhost:'+port, replica_uid,
replica_uid=replica_uid or 'test')
def copy_couch_database_for_test(test, db):
- new_db = couch.CouchDatabase('http://localhost:5984',
+ port = str(test.wrapper.port)
+ new_db = couch.CouchDatabase('http://localhost:'+port,
db._replica_uid + '_copy',
replica_uid=db._replica_uid or 'test')
gen, docs = db.get_all_docs(include_deleted=True)
@@ -61,7 +152,7 @@ COUCH_SCENARIOS = [
]
-class CouchTests(test_backends.AllDatabaseTests):
+class CouchTests(test_backends.AllDatabaseTests, CouchDBTestCase):
scenarios = COUCH_SCENARIOS
@@ -70,7 +161,7 @@ class CouchTests(test_backends.AllDatabaseTests):
super(CouchTests, self).tearDown()
-class CouchDatabaseTests(test_backends.LocalDatabaseTests):
+class CouchDatabaseTests(test_backends.LocalDatabaseTests, CouchDBTestCase):
scenarios = COUCH_SCENARIOS
@@ -80,7 +171,7 @@ class CouchDatabaseTests(test_backends.LocalDatabaseTests):
class CouchValidateGenNTransIdTests(
- test_backends.LocalDatabaseValidateGenNTransIdTests):
+ test_backends.LocalDatabaseValidateGenNTransIdTests, CouchDBTestCase):
scenarios = COUCH_SCENARIOS
@@ -90,7 +181,7 @@ class CouchValidateGenNTransIdTests(
class CouchValidateSourceGenTests(
- test_backends.LocalDatabaseValidateSourceGenTests):
+ test_backends.LocalDatabaseValidateSourceGenTests, CouchDBTestCase):
scenarios = COUCH_SCENARIOS
@@ -100,7 +191,7 @@ class CouchValidateSourceGenTests(
class CouchWithConflictsTests(
- test_backends.LocalDatabaseWithConflictsTests):
+ test_backends.LocalDatabaseWithConflictsTests, CouchDBTestCase):
scenarios = COUCH_SCENARIOS
@@ -113,7 +204,7 @@ class CouchWithConflictsTests(
# the server, so indexing makes no sense. Thus, we ignore index testing for
# now.
-class CouchIndexTests(test_backends.DatabaseIndexTests):
+class CouchIndexTests(test_backends.DatabaseIndexTests, CouchDBTestCase):
scenarios = COUCH_SCENARIOS
@@ -134,7 +225,8 @@ simple_doc = tests.simple_doc
nested_doc = tests.nested_doc
-class CouchDatabaseSyncTargetTests(test_sync.DatabaseSyncTargetTests):
+class CouchDatabaseSyncTargetTests(test_sync.DatabaseSyncTargetTests,
+ CouchDBTestCase):
scenarios = (tests.multiply_scenarios(COUCH_SCENARIOS, target_scenarios))
@@ -173,7 +265,7 @@ for name, scenario in COUCH_SCENARIOS:
scenario = dict(scenario)
-class CouchDatabaseSyncTests(test_sync.DatabaseSyncTests):
+class CouchDatabaseSyncTests(test_sync.DatabaseSyncTests, CouchDBTestCase):
scenarios = sync_scenarios