summaryrefslogtreecommitdiff
path: root/scripts/docker/files/bin/conf/couchdb_default.ini
diff options
context:
space:
mode:
authordrebs <drebs@leap.se>2016-11-10 23:50:30 -0200
committerdrebs <drebs@leap.se>2016-11-10 23:50:30 -0200
commitc1950b41e0995b0213227bd0ce2c633f312037dc (patch)
tree7c1fde54442fefd3553d33b3fe5a2ec454e0196b /scripts/docker/files/bin/conf/couchdb_default.ini
parent507e284773d9c4954225635741f275c5d327e2a9 (diff)
parent6b23b3f3215f2443aa3e790559b63a41b3040072 (diff)
Merge tag '0.8.1'
0.8.1
Diffstat (limited to 'scripts/docker/files/bin/conf/couchdb_default.ini')
-rw-r--r--scripts/docker/files/bin/conf/couchdb_default.ini361
1 files changed, 361 insertions, 0 deletions
diff --git a/scripts/docker/files/bin/conf/couchdb_default.ini b/scripts/docker/files/bin/conf/couchdb_default.ini
new file mode 100644
index 00000000..5ab72d7b
--- /dev/null
+++ b/scripts/docker/files/bin/conf/couchdb_default.ini
@@ -0,0 +1,361 @@
+; etc/couchdb/default.ini.tpl. Generated from default.ini.tpl.in by configure.
+
+; Upgrading CouchDB will overwrite this file.
+[vendor]
+name = The Apache Software Foundation
+version = 1.6.0
+
+[couchdb]
+database_dir = BASEDIR
+view_index_dir = BASEDIR
+util_driver_dir = /usr/lib/x86_64-linux-gnu/couchdb/erlang/lib/couch-1.6.0/priv/lib
+max_document_size = 4294967296 ; 4 GB
+os_process_timeout = 5000 ; 5 seconds. for view and external servers.
+max_dbs_open = 100
+delayed_commits = true ; set this to false to ensure an fsync before 201 Created is returned
+uri_file = BASEDIR/couch.uri
+; Method used to compress everything that is appended to database and view index files, except
+; for attachments (see the attachments section). Available methods are:
+;
+; none - no compression
+; snappy - use google snappy, a very fast compressor/decompressor
+uuid = bc2f8b84ecb0b13a31cf7f6881a52194
+
+; deflate_[N] - use zlib's deflate, N is the compression level which ranges from 1 (fastest,
+; lowest compression ratio) to 9 (slowest, highest compression ratio)
+file_compression = snappy
+; Higher values may give better read performance due to less read operations
+; and/or more OS page cache hits, but they can also increase overall response
+; time for writes when there are many attachment write requests in parallel.
+attachment_stream_buffer_size = 4096
+
+plugin_dir = /usr/lib/x86_64-linux-gnu/couchdb/plugins
+
+[database_compaction]
+; larger buffer sizes can originate smaller files
+doc_buffer_size = 524288 ; value in bytes
+checkpoint_after = 5242880 ; checkpoint after every N bytes were written
+
+[view_compaction]
+; larger buffer sizes can originate smaller files
+keyvalue_buffer_size = 2097152 ; value in bytes
+
+[httpd]
+port = 5984
+bind_address = 127.0.0.1
+authentication_handlers = {couch_httpd_oauth, oauth_authentication_handler}, {couch_httpd_auth, cookie_authentication_handler}, {couch_httpd_auth, default_authentication_handler}
+default_handler = {couch_httpd_db, handle_request}
+secure_rewrites = true
+vhost_global_handlers = _utils, _uuids, _session, _oauth, _users
+allow_jsonp = false
+; Options for the MochiWeb HTTP server.
+;server_options = [{backlog, 128}, {acceptor_pool_size, 16}]
+; For more socket options, consult Erlang's module 'inet' man page.
+;socket_options = [{recbuf, 262144}, {sndbuf, 262144}, {nodelay, true}]
+socket_options = [{recbuf, 262144}, {sndbuf, 262144}]
+log_max_chunk_size = 1000000
+enable_cors = false
+; CouchDB can optionally enforce a maximum uri length;
+; max_uri_length = 8000
+
+[ssl]
+port = 6984
+
+[log]
+file = BASEDIR/couch.log
+level = info
+include_sasl = true
+
+[couch_httpd_auth]
+authentication_db = _users
+authentication_redirect = /_utils/session.html
+require_valid_user = false
+timeout = 600 ; number of seconds before automatic logout
+auth_cache_size = 50 ; size is number of cache entries
+allow_persistent_cookies = false ; set to true to allow persistent cookies
+iterations = 10 ; iterations for password hashing
+; min_iterations = 1
+; max_iterations = 1000000000
+; comma-separated list of public fields, 404 if empty
+; public_fields =
+
+[cors]
+credentials = false
+; List of origins separated by a comma, * means accept all
+; Origins must include the scheme: http://example.com
+; You can’t set origins: * and credentials = true at the same time.
+;origins = *
+; List of accepted headers separated by a comma
+; headers =
+; List of accepted methods
+; methods =
+
+
+; Configuration for a vhost
+;[cors:http://example.com]
+; credentials = false
+; List of origins separated by a comma
+; Origins must include the scheme: http://example.com
+; You can’t set origins: * and credentials = true at the same time.
+;origins =
+; List of accepted headers separated by a comma
+; headers =
+; List of accepted methods
+; methods =
+
+[couch_httpd_oauth]
+; If set to 'true', oauth token and consumer secrets will be looked up
+; in the authentication database (_users). These secrets are stored in
+; a top level property named "oauth" in user documents. Example:
+; {
+; "_id": "org.couchdb.user:joe",
+; "type": "user",
+; "name": "joe",
+; "password_sha": "fe95df1ca59a9b567bdca5cbaf8412abd6e06121",
+; "salt": "4e170ffeb6f34daecfd814dfb4001a73"
+; "roles": ["foo", "bar"],
+; "oauth": {
+; "consumer_keys": {
+; "consumerKey1": "key1Secret",
+; "consumerKey2": "key2Secret"
+; },
+; "tokens": {
+; "token1": "token1Secret",
+; "token2": "token2Secret"
+; }
+; }
+; }
+use_users_db = false
+
+[query_servers]
+javascript = /usr/bin/couchjs /usr/share/couchdb/server/main.js
+coffeescript = /usr/bin/couchjs /usr/share/couchdb/server/main-coffee.js
+
+
+; Changing reduce_limit to false will disable reduce_limit.
+; If you think you're hitting reduce_limit with a "good" reduce function,
+; please let us know on the mailing list so we can fine tune the heuristic.
+[query_server_config]
+reduce_limit = true
+os_process_limit = 25
+
+[daemons]
+index_server={couch_index_server, start_link, []}
+external_manager={couch_external_manager, start_link, []}
+query_servers={couch_query_servers, start_link, []}
+vhosts={couch_httpd_vhost, start_link, []}
+httpd={couch_httpd, start_link, []}
+stats_aggregator={couch_stats_aggregator, start, []}
+stats_collector={couch_stats_collector, start, []}
+uuids={couch_uuids, start, []}
+auth_cache={couch_auth_cache, start_link, []}
+replicator_manager={couch_replicator_manager, start_link, []}
+os_daemons={couch_os_daemons, start_link, []}
+compaction_daemon={couch_compaction_daemon, start_link, []}
+
+[httpd_global_handlers]
+/ = {couch_httpd_misc_handlers, handle_welcome_req, <<"Welcome">>}
+favicon.ico = {couch_httpd_misc_handlers, handle_favicon_req, "/usr/share/couchdb/www"}
+
+_utils = {couch_httpd_misc_handlers, handle_utils_dir_req, "/usr/share/couchdb/www"}
+_all_dbs = {couch_httpd_misc_handlers, handle_all_dbs_req}
+_active_tasks = {couch_httpd_misc_handlers, handle_task_status_req}
+_config = {couch_httpd_misc_handlers, handle_config_req}
+_replicate = {couch_replicator_httpd, handle_req}
+_uuids = {couch_httpd_misc_handlers, handle_uuids_req}
+_restart = {couch_httpd_misc_handlers, handle_restart_req}
+_stats = {couch_httpd_stats_handlers, handle_stats_req}
+_log = {couch_httpd_misc_handlers, handle_log_req}
+_session = {couch_httpd_auth, handle_session_req}
+_oauth = {couch_httpd_oauth, handle_oauth_req}
+_db_updates = {couch_dbupdates_httpd, handle_req}
+_plugins = {couch_plugins_httpd, handle_req}
+
+[httpd_db_handlers]
+_all_docs = {couch_mrview_http, handle_all_docs_req}
+_changes = {couch_httpd_db, handle_changes_req}
+_compact = {couch_httpd_db, handle_compact_req}
+_design = {couch_httpd_db, handle_design_req}
+_temp_view = {couch_mrview_http, handle_temp_view_req}
+_view_cleanup = {couch_mrview_http, handle_cleanup_req}
+
+; The external module takes an optional argument allowing you to narrow it to a
+; single script. Otherwise the script name is inferred from the first path section
+; after _external's own path.
+; _mypath = {couch_httpd_external, handle_external_req, <<"mykey">>}
+; _external = {couch_httpd_external, handle_external_req}
+
+[httpd_design_handlers]
+_compact = {couch_mrview_http, handle_compact_req}
+_info = {couch_mrview_http, handle_info_req}
+_list = {couch_mrview_show, handle_view_list_req}
+_rewrite = {couch_httpd_rewrite, handle_rewrite_req}
+_show = {couch_mrview_show, handle_doc_show_req}
+_update = {couch_mrview_show, handle_doc_update_req}
+_view = {couch_mrview_http, handle_view_req}
+
+; enable external as an httpd handler, then link it with commands here.
+; note, this api is still under consideration.
+; [external]
+; mykey = /path/to/mycommand
+
+; Here you can setup commands for CouchDB to manage
+; while it is alive. It will attempt to keep each command
+; alive if it exits.
+; [os_daemons]
+; some_daemon_name = /path/to/script -with args
+
+
+[uuids]
+; Known algorithms:
+; random - 128 bits of random awesome
+; All awesome, all the time.
+; sequential - monotonically increasing ids with random increments
+; First 26 hex characters are random. Last 6 increment in
+; random amounts until an overflow occurs. On overflow, the
+; random prefix is regenerated and the process starts over.
+; utc_random - Time since Jan 1, 1970 UTC with microseconds
+; First 14 characters are the time in hex. Last 18 are random.
+; utc_id - Time since Jan 1, 1970 UTC with microseconds, plus utc_id_suffix string
+; First 14 characters are the time in hex. uuids/utc_id_suffix string value is appended to these.
+algorithm = sequential
+; The utc_id_suffix value will be appended to uuids generated by the utc_id algorithm.
+; Replicating instances should have unique utc_id_suffix values to ensure uniqueness of utc_id ids.
+utc_id_suffix =
+# Maximum number of UUIDs retrievable from /_uuids in a single request
+max_count = 1000
+
+[stats]
+; rate is in milliseconds
+rate = 1000
+; sample intervals are in seconds
+samples = [0, 60, 300, 900]
+
+[attachments]
+compression_level = 8 ; from 1 (lowest, fastest) to 9 (highest, slowest), 0 to disable compression
+compressible_types = text/*, application/javascript, application/json, application/xml
+
+[replicator]
+db = _replicator
+; Maximum replicaton retry count can be a non-negative integer or "infinity".
+max_replication_retry_count = 10
+; More worker processes can give higher network throughput but can also
+; imply more disk and network IO.
+worker_processes = 4
+; With lower batch sizes checkpoints are done more frequently. Lower batch sizes
+; also reduce the total amount of used RAM memory.
+worker_batch_size = 500
+; Maximum number of HTTP connections per replication.
+http_connections = 20
+; HTTP connection timeout per replication.
+; Even for very fast/reliable networks it might need to be increased if a remote
+; database is too busy.
+connection_timeout = 30000
+; If a request fails, the replicator will retry it up to N times.
+retries_per_request = 10
+; Some socket options that might boost performance in some scenarios:
+; {nodelay, boolean()}
+; {sndbuf, integer()}
+; {recbuf, integer()}
+; {priority, integer()}
+; See the `inet` Erlang module's man page for the full list of options.
+socket_options = [{keepalive, true}, {nodelay, false}]
+; Path to a file containing the user's certificate.
+;cert_file = /full/path/to/server_cert.pem
+; Path to file containing user's private PEM encoded key.
+;key_file = /full/path/to/server_key.pem
+; String containing the user's password. Only used if the private keyfile is password protected.
+;password = somepassword
+; Set to true to validate peer certificates.
+verify_ssl_certificates = false
+; File containing a list of peer trusted certificates (in the PEM format).
+;ssl_trusted_certificates_file = /etc/ssl/certs/ca-certificates.crt
+; Maximum peer certificate depth (must be set even if certificate validation is off).
+ssl_certificate_max_depth = 3
+
+[compaction_daemon]
+; The delay, in seconds, between each check for which database and view indexes
+; need to be compacted.
+check_interval = 300
+; If a database or view index file is smaller then this value (in bytes),
+; compaction will not happen. Very small files always have a very high
+; fragmentation therefore it's not worth to compact them.
+min_file_size = 131072
+
+[compactions]
+; List of compaction rules for the compaction daemon.
+; The daemon compacts databases and their respective view groups when all the
+; condition parameters are satisfied. Configuration can be per database or
+; global, and it has the following format:
+;
+; database_name = [ {ParamName, ParamValue}, {ParamName, ParamValue}, ... ]
+; _default = [ {ParamName, ParamValue}, {ParamName, ParamValue}, ... ]
+;
+; Possible parameters:
+;
+; * db_fragmentation - If the ratio (as an integer percentage), of the amount
+; of old data (and its supporting metadata) over the database
+; file size is equal to or greater then this value, this
+; database compaction condition is satisfied.
+; This value is computed as:
+;
+; (file_size - data_size) / file_size * 100
+;
+; The data_size and file_size values can be obtained when
+; querying a database's information URI (GET /dbname/).
+;
+; * view_fragmentation - If the ratio (as an integer percentage), of the amount
+; of old data (and its supporting metadata) over the view
+; index (view group) file size is equal to or greater then
+; this value, then this view index compaction condition is
+; satisfied. This value is computed as:
+;
+; (file_size - data_size) / file_size * 100
+;
+; The data_size and file_size values can be obtained when
+; querying a view group's information URI
+; (GET /dbname/_design/groupname/_info).
+;
+; * from _and_ to - The period for which a database (and its view groups) compaction
+; is allowed. The value for these parameters must obey the format:
+;
+; HH:MM - HH:MM (HH in [0..23], MM in [0..59])
+;
+; * strict_window - If a compaction is still running after the end of the allowed
+; period, it will be canceled if this parameter is set to 'true'.
+; It defaults to 'false' and it's meaningful only if the *period*
+; parameter is also specified.
+;
+; * parallel_view_compaction - If set to 'true', the database and its views are
+; compacted in parallel. This is only useful on
+; certain setups, like for example when the database
+; and view index directories point to different
+; disks. It defaults to 'false'.
+;
+; Before a compaction is triggered, an estimation of how much free disk space is
+; needed is computed. This estimation corresponds to 2 times the data size of
+; the database or view index. When there's not enough free disk space to compact
+; a particular database or view index, a warning message is logged.
+;
+; Examples:
+;
+; 1) [{db_fragmentation, "70%"}, {view_fragmentation, "60%"}]
+; The `foo` database is compacted if its fragmentation is 70% or more.
+; Any view index of this database is compacted only if its fragmentation
+; is 60% or more.
+;
+; 2) [{db_fragmentation, "70%"}, {view_fragmentation, "60%"}, {from, "00:00"}, {to, "04:00"}]
+; Similar to the preceding example but a compaction (database or view index)
+; is only triggered if the current time is between midnight and 4 AM.
+;
+; 3) [{db_fragmentation, "70%"}, {view_fragmentation, "60%"}, {from, "00:00"}, {to, "04:00"}, {strict_window, true}]
+; Similar to the preceding example - a compaction (database or view index)
+; is only triggered if the current time is between midnight and 4 AM. If at
+; 4 AM the database or one of its views is still compacting, the compaction
+; process will be canceled.
+;
+; 4) [{db_fragmentation, "70%"}, {view_fragmentation, "60%"}, {from, "00:00"}, {to, "04:00"}, {strict_window, true}, {parallel_view_compaction, true}]
+; Similar to the preceding example, but a database and its views can be
+; compacted in parallel.
+;
+;_default = [{db_fragmentation, "70%"}, {view_fragmentation, "60%"}, {from, "23:00"}, {to, "04:00"}]