summaryrefslogtreecommitdiff
path: root/puppet/modules/couchdb/templates
diff options
context:
space:
mode:
authorMicah <micah@leap.se>2016-05-24 10:19:27 -0400
committerMicah <micah@leap.se>2016-05-24 10:19:27 -0400
commit8f1fd7c7e042539f3095541b8859276e4dad6629 (patch)
tree864440ba7ce27d8d1fc8d7a504d554bb2fcd2817 /puppet/modules/couchdb/templates
parent9721c9eeb21f027456b149764d300a11e301c8ee (diff)
parentaf6fdd31fb961fc1b7f408f51001e7a6d192a58a (diff)
Merge commit 'af6fdd31fb961fc1b7f408f51001e7a6d192a58a' as 'puppet/modules/couchdb'
Diffstat (limited to 'puppet/modules/couchdb/templates')
-rw-r--r--puppet/modules/couchdb/templates/admin.ini.erb9
-rw-r--r--puppet/modules/couchdb/templates/bigcouch/default.ini172
-rw-r--r--puppet/modules/couchdb/templates/bigcouch/vm.args32
-rw-r--r--puppet/modules/couchdb/templates/couchdb-backup.py.erb32
4 files changed, 245 insertions, 0 deletions
diff --git a/puppet/modules/couchdb/templates/admin.ini.erb b/puppet/modules/couchdb/templates/admin.ini.erb
new file mode 100644
index 00000000..479f8bfc
--- /dev/null
+++ b/puppet/modules/couchdb/templates/admin.ini.erb
@@ -0,0 +1,9 @@
+<%- require 'digest' -%>
+[admins]
+admin = <%= @admin_hash %>
+
+[couchdb]
+<%- # uuid uniquely identifies this couchdb instance. if not set, couchdb will set a random one
+ # but we want a stable one so that this config file doesn't change all the time.
+ # Md5 of hostname and ipaddress seems reasonable, but it could be based on anything. -%>
+uuid = <%= Digest::MD5.hexdigest(Facter.value("hostname") + Facter.value("ipaddress")) %>
diff --git a/puppet/modules/couchdb/templates/bigcouch/default.ini b/puppet/modules/couchdb/templates/bigcouch/default.ini
new file mode 100644
index 00000000..a315ddab
--- /dev/null
+++ b/puppet/modules/couchdb/templates/bigcouch/default.ini
@@ -0,0 +1,172 @@
+[couchdb]
+database_dir = /opt/bigcouch/var/lib
+view_index_dir = /opt/bigcouch/var/lib
+max_document_size = 67108864
+os_process_timeout = 5000
+max_dbs_open = 500
+delayed_commits = false
+
+[cluster]
+; Default number of shards for a new database
+q = 8
+; Default number of copies of each shard
+n = 3
+
+[chttpd]
+port = 5984
+docroot = /opt/bigcouch/share/www
+
+; Options for the MochiWeb HTTP server.
+;server_options = [{backlog, 128}, {acceptor_pool_size, 16}]
+
+; For more socket options, consult Erlang's module 'inet' man page.
+;socket_options = [{recbuf, 262144}, {sndbuf, 262144}, {nodelay, true}]
+
+bind_address = <%= scope.lookupvar('couchdb::chttpd_bind_address') %>
+
+[chttps]
+port = 6984
+
+; cert_file = /full/path/to/server_cert.pem
+; key_file = /full/path/to/server_key.pem
+; password = somepassword
+; also remember to enable the chttps daemon in [daemons] section.
+
+; set to true to validate peer certificates
+verify_ssl_certificates = false
+
+; Path to file containing PEM encoded CA certificates (trusted
+; certificates used for verifying a peer certificate). May be omitted if
+; you do not want to verify the peer.
+;cacert_file = /full/path/to/cacertf
+
+; The verification fun (optional) if not specified, the default
+; verification fun will be used.
+;verify_fun = {Module, VerifyFun}
+ssl_certificate_max_depth = 1
+
+[httpd]
+port = 5986
+bind_address = 127.0.0.1
+authentication_handlers = {couch_httpd_oauth, oauth_authentication_handler}, {couch_httpd_auth, cookie_authentication_handler}, {couch_httpd_auth, default_authentication_handler}
+default_handler = {couch_httpd_db, handle_request}
+secure_rewrites = true
+vhost_global_handlers = _utils, _uuids, _session, _oauth, _users
+allow_jsonp = false
+log_max_chunk_size = 1000000
+
+[ssl]
+port = 6984
+
+[log]
+file = /opt/bigcouch/var/log/bigcouch.log
+level = info
+include_sasl = true
+
+[couch_httpd_auth]
+authentication_db = _users
+authentication_redirect = /_utils/session.html
+require_valid_user = false
+timeout = 43200 ; (default to 12 hours) number of seconds before automatic logout
+auth_cache_size = 50 ; size is number of cache entries
+
+[query_servers]
+javascript = /opt/bigcouch/bin/couchjs /opt/bigcouch/share/couchjs/main.js
+
+[query_server_config]
+reduce_limit = true
+os_process_soft_limit = 100
+
+[daemons]
+view_manager={couch_view, start_link, []}
+external_manager={couch_external_manager, start_link, []}
+query_servers={couch_proc_manager, start_link, []}
+httpd={couch_httpd, start_link, []}
+stats_aggregator={couch_stats_aggregator, start, []}
+stats_collector={couch_stats_collector, start, []}
+uuids={couch_uuids, start, []}
+auth_cache={couch_auth_cache, start_link, []}
+replication_manager={couch_replication_manager, start_link, []}
+vhosts={couch_httpd_vhost, start_link, []}
+os_daemons={couch_os_daemons, start_link, []}
+; Uncomment next line to enable SSL daemon
+; chttpsd = {chttpd, start_link, [https]}
+
+[httpd_global_handlers]
+/ = {couch_httpd_misc_handlers, handle_welcome_req, <<"Welcome">>}
+favicon.ico = {couch_httpd_misc_handlers, handle_favicon_req, "/opt/bigcouch/share/www"}
+
+_utils = {couch_httpd_misc_handlers, handle_utils_dir_req, "/opt/bigcouch/share/www"}
+_all_dbs = {couch_httpd_misc_handlers, handle_all_dbs_req}
+_active_tasks = {couch_httpd_misc_handlers, handle_task_status_req}
+_config = {couch_httpd_misc_handlers, handle_config_req}
+_replicate = {couch_httpd_misc_handlers, handle_replicate_req}
+_uuids = {couch_httpd_misc_handlers, handle_uuids_req}
+_restart = {couch_httpd_misc_handlers, handle_restart_req}
+_stats = {couch_httpd_stats_handlers, handle_stats_req}
+_log = {couch_httpd_misc_handlers, handle_log_req}
+_session = {couch_httpd_auth, handle_session_req}
+_oauth = {couch_httpd_oauth, handle_oauth_req}
+_system = {chttpd_misc, handle_system_req}
+
+[httpd_db_handlers]
+_view_cleanup = {couch_httpd_db, handle_view_cleanup_req}
+_compact = {couch_httpd_db, handle_compact_req}
+_design = {couch_httpd_db, handle_design_req}
+_temp_view = {couch_httpd_view, handle_temp_view_req}
+_changes = {couch_httpd_db, handle_changes_req}
+
+[httpd_design_handlers]
+_view = {couch_httpd_view, handle_view_req}
+_show = {couch_httpd_show, handle_doc_show_req}
+_list = {couch_httpd_show, handle_view_list_req}
+_info = {couch_httpd_db, handle_design_info_req}
+_rewrite = {couch_httpd_rewrite, handle_rewrite_req}
+_update = {couch_httpd_show, handle_doc_update_req}
+
+; enable external as an httpd handler, then link it with commands here.
+; note, this api is still under consideration.
+; [external]
+; mykey = /path/to/mycommand
+
+; Here you can setup commands for CouchDB to manage
+; while it is alive. It will attempt to keep each command
+; alive if it exits.
+; [os_daemons]
+; some_daemon_name = /path/to/script -with args
+
+
+[uuids]
+; Known algorithms:
+; random - 128 bits of random awesome
+; All awesome, all the time.
+; sequential - monotonically increasing ids with random increments
+; First 26 hex characters are random. Last 6 increment in
+; random amounts until an overflow occurs. On overflow, the
+; random prefix is regenerated and the process starts over.
+; utc_random - Time since Jan 1, 1970 UTC with microseconds
+; First 14 characters are the time in hex. Last 18 are random.
+algorithm = sequential
+
+[stats]
+; rate is in milliseconds
+rate = 1000
+; sample intervals are in seconds
+samples = [0, 60, 300, 900]
+
+[attachments]
+compression_level = 8 ; from 1 (lowest, fastest) to 9 (highest, slowest), 0 to disable compression
+compressible_types = text/*, application/javascript, application/json, application/xml
+
+[replicator]
+db = _replicator
+; Maximum replicaton retry count can be a non-negative integer or "infinity".
+max_replication_retry_count = 10
+max_http_sessions = 20
+max_http_pipeline_size = 50
+; set to true to validate peer certificates
+verify_ssl_certificates = false
+; file containing a list of peer trusted certificates (PEM format)
+; ssl_trusted_certificates_file = /etc/ssl/certs/ca-certificates.crt
+; maximum peer certificate depth (must be set even if certificate validation is off)
+ssl_certificate_max_depth = 3
diff --git a/puppet/modules/couchdb/templates/bigcouch/vm.args b/puppet/modules/couchdb/templates/bigcouch/vm.args
new file mode 100644
index 00000000..4618a52c
--- /dev/null
+++ b/puppet/modules/couchdb/templates/bigcouch/vm.args
@@ -0,0 +1,32 @@
+# Each node in the system must have a unique name. A name can be short
+# (specified using -sname) or it can by fully qualified (-name). There can be
+# no communication between nodes running with the -sname flag and those running
+# with the -name flag.
+-name bigcouch
+
+# All nodes must share the same magic cookie for distributed Erlang to work.
+# Comment out this line if you synchronized the cookies by other means (using
+# the ~/.erlang.cookie file, for example).
+-setcookie <%= scope.lookupvar('couchdb::bigcouch_cookie') %>
+
+# Tell SASL not to log progress reports
+-sasl errlog_type error
+
+# Use kernel poll functionality if supported by emulator
++K true
+
+# Start a pool of asynchronous IO threads
++A 16
+
+# Comment this line out to enable the interactive Erlang shell on startup
++Bd -noinput
+
+# read config files
+# otherwise /etc/couchdb/local.d/admin.ini wouldn't be read mysteriously
+-couch_ini /etc/couchdb/default.ini /etc/couchdb/local.ini /etc/couchdb/local.d/admin.ini /etc/couchdb/default.ini /etc/couchdb/local.ini /etc/couchdb/local.d/admin.ini
+#
+
+# make firewalling easier, see
+# http://stackoverflow.com/questions/8459949/bigcouch-cluster-connection-issue#comment10467603_8463814
+
+-kernel inet_dist_listen_min <%= scope.lookupvar('couchdb::ednp_port') %> inet_dist_use_interface "{127,0,0,1}"
diff --git a/puppet/modules/couchdb/templates/couchdb-backup.py.erb b/puppet/modules/couchdb/templates/couchdb-backup.py.erb
new file mode 100644
index 00000000..c49df65b
--- /dev/null
+++ b/puppet/modules/couchdb/templates/couchdb-backup.py.erb
@@ -0,0 +1,32 @@
+#!/usr/bin/env python
+# file manage by puppet
+
+import os
+import gzip
+import tarfile
+import datetime
+import urllib2
+import simplejson
+import couchdb.tools.dump
+from os.path import join
+
+DB_URL="http://127.0.0.1:5984"
+DUMP_DIR="<%= backupdir %>"
+TODAY=datetime.datetime.today().strftime("%A").lower()
+
+ftar = os.path.join(DUMP_DIR,"%s.tar" % TODAY)
+tmp_ftar = os.path.join(DUMP_DIR,"_%s.tar" % TODAY)
+tar = tarfile.open(tmp_ftar, "w")
+
+databases = simplejson.load(urllib2.urlopen("%s/_all_dbs" % DB_URL))
+
+for db in databases:
+ db_file = os.path.join(DUMP_DIR,"%s.gz" % db)
+ f = gzip.open(db_file, 'wb')
+ couchdb.tools.dump.dump_db(os.path.join(DB_URL,db), output=f)
+ f.close()
+ tar.add(db_file,"%s.gz" % db)
+ os.remove(db_file)
+
+tar.close()
+os.rename(tmp_ftar,ftar)