; etc/couchdb/default.ini.tpl. Generated from default.ini.tpl.in by configure. ; Upgrading CouchDB will overwrite this file. [couchdb] database_dir = %(tempdir)s/lib view_index_dir = %(tempdir)s/lib max_document_size = 4294967296 ; 4 GB os_process_timeout = 5000 ; 5 seconds. for view and external servers. max_dbs_open = 100 delayed_commits = true ; set this to false to ensure an fsync before 201 Created is returned uri_file = %(tempdir)s/lib/couch.uri file_compression = snappy [database_compaction] ; larger buffer sizes can originate smaller files doc_buffer_size = 524288 ; value in bytes checkpoint_after = 5242880 ; checkpoint after every N bytes were written [view_compaction] ; larger buffer sizes can originate smaller files keyvalue_buffer_size = 2097152 ; value in bytes [httpd] port = 0 bind_address = 127.0.0.1 authentication_handlers = {couch_httpd_oauth, oauth_authentication_handler}, {couch_httpd_auth, cookie_authentication_handler}, {couch_httpd_auth, default_authentication_handler} default_handler = {couch_httpd_db, handle_request} secure_rewrites = true vhost_global_handlers = _utils, _uuids, _session, _oauth, _users allow_jsonp = false ; Options for the MochiWeb HTTP server. ;server_options = [{backlog, 128}, {acceptor_pool_size, 16}] ; For more socket options, consult Erlang's module 'inet' man page. ;socket_options = [{recbuf, 262144}, {sndbuf, 262144}, {nodelay, true}] log_max_chunk_size = 1000000 [log] file = %(tempdir)s/log/couch.log level = info include_sasl = true [couch_httpd_auth] authentication_db = _users authentication_redirect = /_utils/session.html require_valid_user = false timeout = 600 ; number of seconds before automatic logout auth_cache_size = 50 ; size is number of cache entries allow_persistent_cookies = false ; set to true to allow persistent cookies [couch_httpd_oauth] ; If set to 'true', oauth token and consumer secrets will be looked up ; in the authentication database (_users). These secrets are stored in ; a top level property named "oauth" in user documents. Example: ; { ; "_id": "org.couchdb.user:joe", ; "type": "user", ; "name": "joe", ; "password_sha": "fe95df1ca59a9b567bdca5cbaf8412abd6e06121", ; "salt": "4e170ffeb6f34daecfd814dfb4001a73" ; "roles": ["foo", "bar"], ; "oauth": { ; "consumer_keys": { ; "consumerKey1": "key1Secret", ; "consumerKey2": "key2Secret" ; }, ; "tokens": { ; "token1": "token1Secret", ; "token2": "token2Secret" ; } ; } ; } use_users_db = false [query_servers] ; javascript = %(tempdir)s/server/main.js ; Changing reduce_limit to false will disable reduce_limit. ; If you think you're hitting reduce_limit with a "good" reduce function, ; please let us know on the mailing list so we can fine tune the heuristic. [query_server_config] reduce_limit = true os_process_limit = 25 [daemons] view_manager={couch_view, start_link, []} external_manager={couch_external_manager, start_link, []} query_servers={couch_query_servers, start_link, []} vhosts={couch_httpd_vhost, start_link, []} httpd={couch_httpd, start_link, []} stats_aggregator={couch_stats_aggregator, start, []} stats_collector={couch_stats_collector, start, []} uuids={couch_uuids, start, []} auth_cache={couch_auth_cache, start_link, []} replication_manager={couch_replication_manager, start_link, []} os_daemons={couch_os_daemons, start_link, []} compaction_daemon={couch_compaction_daemon, start_link, []} [httpd_global_handlers] / = {couch_httpd_misc_handlers, handle_welcome_req, <<"Welcome">>} _all_dbs = {couch_httpd_misc_handlers, handle_all_dbs_req} _active_tasks = {couch_httpd_misc_handlers, handle_task_status_req} _config = {couch_httpd_misc_handlers, handle_config_req} _replicate = {couch_httpd_replicator, handle_req} _uuids = {couch_httpd_misc_handlers, handle_uuids_req} _restart = {couch_httpd_misc_handlers, handle_restart_req} _stats = {couch_httpd_stats_handlers, handle_stats_req} _log = {couch_httpd_misc_handlers, handle_log_req} _session = {couch_httpd_auth, handle_session_req} _oauth = {couch_httpd_oauth, handle_oauth_req} [httpd_db_handlers] _view_cleanup = {couch_httpd_db, handle_view_cleanup_req} _compact = {couch_httpd_db, handle_compact_req} _design = {couch_httpd_db, handle_design_req} _temp_view = {couch_httpd_view, handle_temp_view_req} _changes = {couch_httpd_db, handle_changes_req} ; The external module takes an optional argument allowing you to narrow it to a ; single script. Otherwise the script name is inferred from the first path section ; after _external's own path. ; _mypath = {couch_httpd_external, handle_external_req, <<"mykey">>} ; _external = {couch_httpd_external, handle_external_req} [httpd_design_handlers] _view = {couch_httpd_view, handle_view_req} _show = {couch_httpd_show, handle_doc_show_req} _list = {couch_httpd_show, handle_view_list_req} _info = {couch_httpd_db, handle_design_info_req} _rewrite = {couch_httpd_rewrite, handle_rewrite_req} _update = {couch_httpd_show, handle_doc_update_req} ; enable external as an httpd handler, then link it with commands here. ; note, this api is still under consideration. ; [external] ; mykey = /path/to/mycommand ; Here you can setup commands for CouchDB to manage ; while it is alive. It will attempt to keep each command ; alive if it exits. ; [os_daemons] ; some_daemon_name = /path/to/script -with args [uuids] ; Known algorithms: ; random - 128 bits of random awesome ; All awesome, all the time. ; sequential - monotonically increasing ids with random increments ; First 26 hex characters are random. Last 6 increment in ; random amounts until an overflow occurs. On overflow, the ; random prefix is regenerated and the process starts over. ; utc_random - Time since Jan 1, 1970 UTC with microseconds ; First 14 characters are the time in hex. Last 18 are random. algorithm = sequential [stats] ; rate is in milliseconds rate = 1000 ; sample intervals are in seconds samples = [0, 60, 300, 900] [attachments] compression_level = 8 ; from 1 (lowest, fastest) to 9 (highest, slowest), 0 to disable compression compressible_types = text/*, application/javascript, application/json, application/xml [replicator] db = _replicator ; Maximum replicaton retry count can be a non-negative integer or "infinity". max_replication_retry_count = 10 ; More worker processes can give higher network throughput but can also ; imply more disk and network IO. worker_processes = 4 ; With lower batch sizes checkpoints are done more frequently. Lower batch sizes ; also reduce the total amount of used RAM memory. worker_batch_size = 500 ; Maximum number of HTTP connections per replication. http_connections = 20 ; HTTP connection timeout per replication. ; Even for very fast/reliable networks it might need to be increased if a remote ; database is too busy. connection_timeout = 30000 ; If a request fails, the replicator will retry it up to N times. retries_per_request = 10 ; Some socket options that might boost performance in some scenarios: ; {nodelay, boolean()} ; {sndbuf, integer()} ; {recbuf, integer()} ; {priority, integer()} ; See the `inet` Erlang module's man page for the full list of options. socket_options = [{keepalive, true}, {nodelay, false}] ; Path to a file containing the user's certificate. ;cert_file = /full/path/to/server_cert.pem ; Path to file containing user's private PEM encoded key. ;key_file = /full/path/to/server_key.pem ; String containing the user's password. Only used if the private keyfile is password protected. ;password = somepassword ; Set to true to validate peer certificates. verify_ssl_certificates = false ; File containing a list of peer trusted certificates (in the PEM format). ;ssl_trusted_certificates_file = /etc/ssl/certs/ca-certificates.crt ; Maximum peer certificate depth (must be set even if certificate validation is off). ssl_certificate_max_depth = 3 [compaction_daemon] ; The delay, in seconds, between each check for which database and view indexes ; need to be compacted. check_interval = 300 ; If a database or view index file is smaller then this value (in bytes), ; compaction will not happen. Very small files always have a very high ; fragmentation therefore it's not worth to compact them. min_file_size = 131072 [compactions] ; List of compaction rules for the compaction daemon. ;[admins] ;testuser = -hashed-f50a252c12615697c5ed24ec5cd56b05d66fe91e,b05471ba260132953930cf9f97f327f5 ; pass for above user is 'testpass'