summaryrefslogtreecommitdiff
diff options
context:
space:
mode:
-rw-r--r--platform.rb4
-rw-r--r--provider_base/common.json4
-rw-r--r--provider_base/lib/macros.rb14
-rw-r--r--provider_base/lib/macros/core.rb86
-rw-r--r--provider_base/lib/macros/files.rb79
-rw-r--r--provider_base/lib/macros/haproxy.rb69
-rw-r--r--provider_base/lib/macros/hosts.rb63
-rw-r--r--provider_base/lib/macros/nodes.rb88
-rw-r--r--provider_base/lib/macros/secrets.rb39
-rw-r--r--provider_base/lib/macros/stunnel.rb95
-rw-r--r--provider_base/services/_couchdb_master.json8
-rw-r--r--provider_base/services/_couchdb_mirror.json21
-rw-r--r--provider_base/services/_couchdb_multimaster.json24
-rw-r--r--provider_base/services/couchdb.json24
-rw-r--r--provider_base/services/couchdb.rb18
-rw-r--r--provider_base/services/mx.json9
-rw-r--r--provider_base/services/webapp.json9
-rw-r--r--puppet/modules/site_couchdb/manifests/add_users.pp12
-rw-r--r--puppet/modules/site_couchdb/manifests/bigcouch.pp34
-rw-r--r--puppet/modules/site_couchdb/manifests/bigcouch/add_nodes.pp2
-rw-r--r--puppet/modules/site_couchdb/manifests/create_dbs.pp21
-rw-r--r--puppet/modules/site_couchdb/manifests/init.pp84
-rw-r--r--puppet/modules/site_couchdb/manifests/master.pp9
-rw-r--r--puppet/modules/site_couchdb/manifests/mirror.pp74
-rw-r--r--puppet/modules/site_couchdb/manifests/setup.pp46
-rw-r--r--puppet/modules/site_couchdb/manifests/stunnel.pp112
-rw-r--r--puppet/modules/site_haproxy/manifests/init.pp42
-rw-r--r--puppet/modules/site_haproxy/templates/couch.erb32
-rw-r--r--puppet/modules/site_haproxy/templates/haproxy.cfg.erb11
-rw-r--r--puppet/modules/site_haproxy/templates/haproxy_couchdb.cfg.erb23
-rw-r--r--puppet/modules/site_mx/manifests/couchdb.pp23
-rw-r--r--puppet/modules/site_mx/manifests/init.pp2
-rw-r--r--puppet/modules/site_shorewall/manifests/couchdb.pp24
-rw-r--r--puppet/modules/site_shorewall/manifests/couchdb/bigcouch.pp51
-rw-r--r--puppet/modules/site_shorewall/manifests/couchdb/dnat.pp21
-rw-r--r--puppet/modules/site_shorewall/manifests/stunnel/client.pp40
-rw-r--r--puppet/modules/site_shorewall/manifests/stunnel/server.pp22
-rw-r--r--puppet/modules/site_stunnel/manifests/client.pp52
-rw-r--r--puppet/modules/site_stunnel/manifests/clients.pp52
-rw-r--r--puppet/modules/site_stunnel/manifests/init.pp15
-rw-r--r--puppet/modules/site_stunnel/manifests/servers.pp50
-rw-r--r--puppet/modules/site_webapp/manifests/couchdb.pp14
-rw-r--r--puppet/modules/tapicero/manifests/init.pp2
-rw-r--r--puppet/modules/tapicero/templates/tapicero.yaml.erb3
44 files changed, 1110 insertions, 417 deletions
diff --git a/platform.rb b/platform.rb
index cd0cbde0..872a34cb 100644
--- a/platform.rb
+++ b/platform.rb
@@ -4,8 +4,8 @@
#
Leap::Platform.define do
- self.version = "0.5.2"
- self.compatible_cli = "1.5.5".."1.99"
+ self.version = "0.5.3"
+ self.compatible_cli = "1.5.8".."1.99"
#
# the facter facts that should be gathered
diff --git a/provider_base/common.json b/provider_base/common.json
index 565633c0..dcd018d8 100644
--- a/provider_base/common.json
+++ b/provider_base/common.json
@@ -42,5 +42,9 @@
"enabled": true,
"mail": {
"smarthost": "= nodes_like_me[:services => :mx].exclude(self).field('domain.full')"
+ },
+ "stunnel": {
+ "clients": {},
+ "servers": {}
}
}
diff --git a/provider_base/lib/macros.rb b/provider_base/lib/macros.rb
new file mode 100644
index 00000000..854b92b5
--- /dev/null
+++ b/provider_base/lib/macros.rb
@@ -0,0 +1,14 @@
+#
+# MACROS
+#
+# The methods in these files are available in the context of a .json configuration file.
+# (The module LeapCli::Macro is included in Config::Object)
+#
+
+require_relative 'macros/core'
+require_relative 'macros/files'
+require_relative 'macros/haproxy'
+require_relative 'macros/hosts'
+require_relative 'macros/nodes'
+require_relative 'macros/secrets'
+require_relative 'macros/stunnel'
diff --git a/provider_base/lib/macros/core.rb b/provider_base/lib/macros/core.rb
new file mode 100644
index 00000000..d4d9171f
--- /dev/null
+++ b/provider_base/lib/macros/core.rb
@@ -0,0 +1,86 @@
+# encoding: utf-8
+
+module LeapCli
+ module Macro
+
+ #
+ # return a fingerprint for a x509 certificate
+ #
+ def fingerprint(filename)
+ "SHA256: " + X509.fingerprint("SHA256", Path.named_path(filename))
+ end
+
+ #
+ # Creates a hash from the ssh key info in users directory, for use in
+ # updating authorized_keys file. Additionally, the 'monitor' public key is
+ # included, which is used by the monitor nodes to run particular commands
+ # remotely.
+ #
+ def authorized_keys
+ hash = {}
+ keys = Dir.glob(Path.named_path([:user_ssh, '*']))
+ keys.sort.each do |keyfile|
+ ssh_type, ssh_key = File.read(keyfile, :encoding => 'UTF-8').strip.split(" ")
+ name = File.basename(File.dirname(keyfile))
+ hash[name] = {
+ "type" => ssh_type,
+ "key" => ssh_key
+ }
+ end
+ ssh_type, ssh_key = File.read(Path.named_path(:monitor_pub_key), :encoding => 'UTF-8').strip.split(" ")
+ hash[Leap::Platform.monitor_username] = {
+ "type" => ssh_type,
+ "key" => ssh_key
+ }
+ hash
+ end
+
+ def assert(assertion)
+ if instance_eval(assertion)
+ true
+ else
+ raise AssertionFailed.new(assertion)
+ end
+ end
+
+ #
+ # applies a JSON partial to this node
+ #
+ def apply_partial(partial_path)
+ manager.partials(partial_path).each do |partial_data|
+ self.deep_merge!(partial_data)
+ end
+ end
+
+ #
+ # If at first you don't succeed, then it is time to give up.
+ #
+ # try{} returns nil if anything in the block throws an exception.
+ #
+ # You can wrap something that might fail in `try`, like so.
+ #
+ # "= try{ nodes[:services => 'tor'].first.ip_address } "
+ #
+ def try(&block)
+ yield
+ rescue NoMethodError
+ nil
+ end
+
+ protected
+
+ #
+ # returns a node list, if argument is not already one
+ #
+ def listify(node_list)
+ if node_list.is_a? Config::ObjectList
+ node_list
+ elsif node_list.is_a? Config::Object
+ Config::ObjectList.new(node_list)
+ else
+ raise ArgumentError, 'argument must be a node or node list, not a `%s`' % node_list.class, caller
+ end
+ end
+
+ end
+end
diff --git a/provider_base/lib/macros/files.rb b/provider_base/lib/macros/files.rb
new file mode 100644
index 00000000..0a491325
--- /dev/null
+++ b/provider_base/lib/macros/files.rb
@@ -0,0 +1,79 @@
+# encoding: utf-8
+
+##
+## FILES
+##
+
+module LeapCli
+ module Macro
+
+ #
+ # inserts the contents of a file
+ #
+ def file(filename, options={})
+ if filename.is_a? Symbol
+ filename = [filename, @node.name]
+ end
+ filepath = Path.find_file(filename)
+ if filepath
+ if filepath =~ /\.erb$/
+ ERB.new(File.read(filepath, :encoding => 'UTF-8'), nil, '%<>').result(binding)
+ else
+ File.read(filepath, :encoding => 'UTF-8')
+ end
+ else
+ raise FileMissing.new(Path.named_path(filename), options)
+ ""
+ end
+ end
+
+ #
+ # like #file, but allow missing files
+ #
+ def try_file(filename)
+ return file(filename)
+ rescue FileMissing
+ return nil
+ end
+
+ #
+ # returns what the file path will be, once the file is rsynced to the server.
+ # an internal list of discovered file paths is saved, in order to rsync these files when needed.
+ #
+ # notes:
+ #
+ # * argument 'path' is relative to Path.provider/files or Path.provider_base/files
+ # * the path returned by this method is absolute
+ # * the path stored for use later by rsync is relative to Path.provider
+ # * if the path does not exist locally, but exists in provider_base, then the default file from
+ # provider_base is copied locally. this is required for rsync to work correctly.
+ #
+ def file_path(path)
+ if path.is_a? Symbol
+ path = [path, @node.name]
+ end
+ actual_path = Path.find_file(path)
+ if actual_path.nil?
+ Util::log 2, :skipping, "file_path(\"#{path}\") because there is no such file."
+ nil
+ else
+ if actual_path =~ /^#{Regexp.escape(Path.provider_base)}/
+ # if file is under Path.provider_base, we must copy the default file to
+ # to Path.provider in order for rsync to be able to sync the file.
+ local_provider_path = actual_path.sub(/^#{Regexp.escape(Path.provider_base)}/, Path.provider)
+ FileUtils.mkdir_p File.dirname(local_provider_path), :mode => 0700
+ FileUtils.install actual_path, local_provider_path, :mode => 0600
+ Util.log :created, Path.relative_path(local_provider_path)
+ actual_path = local_provider_path
+ end
+ if File.directory?(actual_path) && actual_path !~ /\/$/
+ actual_path += '/' # ensure directories end with /, important for building rsync command
+ end
+ relative_path = Path.relative_path(actual_path)
+ @node.file_paths << relative_path
+ @node.manager.provider.hiera_sync_destination + '/' + relative_path
+ end
+ end
+
+ end
+end \ No newline at end of file
diff --git a/provider_base/lib/macros/haproxy.rb b/provider_base/lib/macros/haproxy.rb
new file mode 100644
index 00000000..c0f9ede5
--- /dev/null
+++ b/provider_base/lib/macros/haproxy.rb
@@ -0,0 +1,69 @@
+# encoding: utf-8
+
+##
+## HAPROXY
+##
+
+module LeapCli
+ module Macro
+
+ #
+ # creates a hash suitable for configuring haproxy. the key is the node name of the server we are proxying to.
+ #
+ # * node_list - a hash of nodes for the haproxy servers
+ # * stunnel_client - contains the mappings to local ports for each server node.
+ # * non_stunnel_port - in case self is included in node_list, the port to connect to.
+ #
+ # 1000 weight is used for nodes in the same location.
+ # 100 otherwise.
+ #
+ def haproxy_servers(node_list, stunnel_clients, non_stunnel_port=nil)
+ default_weight = 10
+ local_weight = 100
+
+ # record the hosts_file
+ hostnames(node_list)
+
+ # create a simple map for node name -> local stunnel accept port
+ accept_ports = stunnel_clients.inject({}) do |hsh, stunnel_entry|
+ name = stunnel_entry.first.sub /_[0-9]+$/, ''
+ hsh[name] = stunnel_entry.last['accept_port']
+ hsh
+ end
+
+ # if one the nodes in the node list is ourself, then there will not be a stunnel to it,
+ # but we need to include it anyway in the haproxy config.
+ if node_list[self.name] && non_stunnel_port
+ accept_ports[self.name] = non_stunnel_port
+ end
+
+ # create the first pass of the servers hash
+ servers = node_list.values.inject(Config::ObjectList.new) do |hsh, node|
+ weight = default_weight
+ try {
+ weight = local_weight if self.location.name == node.location.name
+ }
+ hsh[node.name] = Config::Object[
+ 'backup', false,
+ 'host', 'localhost',
+ 'port', accept_ports[node.name] || 0,
+ 'weight', weight
+ ]
+ if node.services.include?('couchdb')
+ hsh[node.name]['writable'] = node.couch.mode != 'mirror'
+ end
+ hsh
+ end
+
+ # if there are some local servers, make the others backup
+ if servers.detect{|k,v| v.weight == local_weight}
+ servers.each do |k,server|
+ server['backup'] = server['weight'] == default_weight
+ end
+ end
+
+ return servers
+ end
+
+ end
+end \ No newline at end of file
diff --git a/provider_base/lib/macros/hosts.rb b/provider_base/lib/macros/hosts.rb
new file mode 100644
index 00000000..8a4058a5
--- /dev/null
+++ b/provider_base/lib/macros/hosts.rb
@@ -0,0 +1,63 @@
+# encoding: utf-8
+
+module LeapCli
+ module Macro
+
+ ##
+ ## HOSTS
+ ##
+
+ #
+ # records the list of hosts that are encountered for this node
+ #
+ def hostnames(nodes)
+ @referenced_nodes ||= Config::ObjectList.new
+ nodes = listify(nodes)
+ nodes.each_node do |node|
+ @referenced_nodes[node.name] ||= node
+ end
+ return nodes.values.collect {|node| node.domain.name}
+ end
+
+ #
+ # Generates entries needed for updating /etc/hosts on a node (as a hash).
+ #
+ # Argument `nodes` can be nil or a list of nodes. If nil, only include the
+ # IPs of the other nodes this @node as has encountered (plus all mx nodes).
+ #
+ # Also, for virtual machines, we use the local address if this @node is in
+ # the same location as the node in question.
+ #
+ # We include the ssh public key for each host, so that the hash can also
+ # be used to generate the /etc/ssh/known_hosts
+ #
+ def hosts_file(nodes=nil)
+ if nodes.nil?
+ if @referenced_nodes && @referenced_nodes.any?
+ nodes = @referenced_nodes
+ nodes = nodes.merge(nodes_like_me[:services => 'mx']) # all nodes always need to communicate with mx nodes.
+ end
+ end
+ return {} unless nodes
+ hosts = {}
+ my_location = @node['location'] ? @node['location']['name'] : nil
+ nodes.each_node do |node|
+ hosts[node.name] = {'ip_address' => node.ip_address, 'domain_internal' => node.domain.internal, 'domain_full' => node.domain.full}
+ node_location = node['location'] ? node['location']['name'] : nil
+ if my_location == node_location
+ if facts = @node.manager.facts[node.name]
+ if facts['ec2_public_ipv4']
+ hosts[node.name]['ip_address'] = facts['ec2_public_ipv4']
+ end
+ end
+ end
+ host_pub_key = Util::read_file([:node_ssh_pub_key,node.name])
+ if host_pub_key
+ hosts[node.name]['host_pub_key'] = host_pub_key
+ end
+ end
+ hosts
+ end
+
+ end
+end \ No newline at end of file
diff --git a/provider_base/lib/macros/nodes.rb b/provider_base/lib/macros/nodes.rb
new file mode 100644
index 00000000..0c6668a0
--- /dev/null
+++ b/provider_base/lib/macros/nodes.rb
@@ -0,0 +1,88 @@
+# encoding: utf-8
+
+##
+## node related macros
+##
+
+module LeapCli
+ module Macro
+
+ #
+ # the list of all the nodes
+ #
+ def nodes
+ global.nodes
+ end
+
+ #
+ # grab an environment appropriate provider
+ #
+ def provider
+ global.env(@node.environment).provider
+ end
+
+ #
+ # returns a list of nodes that match the same environment
+ #
+ # if @node.environment is not set, we return other nodes
+ # where environment is not set.
+ #
+ def nodes_like_me
+ nodes[:environment => @node.environment]
+ end
+
+ #
+ # returns a list of nodes that match the location name
+ # and environment of @node.
+ #
+ def nodes_near_me
+ if @node['location'] && @node['location']['name']
+ nodes_like_me['location.name' => @node.location.name]
+ else
+ nodes_like_me['location' => nil]
+ end
+ end
+
+ #
+ #
+ # picks a node out from the node list in such a way that:
+ #
+ # (1) which nodes picked which nodes is saved in secrets.json
+ # (2) when other nodes call this macro with the same node list, they are guaranteed to get a different node
+ # (3) if all the nodes in the pick_node list have been picked, remaining nodes are distributed randomly.
+ #
+ # if the node_list is empty, an exception is raised.
+ # if node_list size is 1, then that node is returned and nothing is
+ # memorized via the secrets.json file.
+ #
+ # `label` is needed to distinguish between pools of nodes for different purposes.
+ #
+ # TODO: more evenly balance after all the nodes have been picked.
+ #
+ def pick_node(label, node_list)
+ if node_list.any?
+ if node_list.size == 1
+ return node_list.values.first
+ else
+ secrets_key = "pick_node(:#{label},#{node_list.keys.sort.join(',')})"
+ secrets_value = @manager.secrets.retrieve(secrets_key, @node.environment) || {}
+ secrets_value[@node.name] ||= begin
+ node_to_pick = nil
+ node_list.each_node do |node|
+ next if secrets_value.values.include?(node.name)
+ node_to_pick = node.name
+ end
+ node_to_pick ||= secrets_value.values.shuffle.first # all picked already, so pick a random one.
+ node_to_pick
+ end
+ picked_node_name = secrets_value[@node.name]
+ @manager.secrets.set(secrets_key, secrets_value, @node.environment)
+ return node_list[picked_node_name]
+ end
+ else
+ raise ArgumentError.new('pick_node(node_list): node_list cannot be empty')
+ end
+ end
+
+ end
+end \ No newline at end of file
diff --git a/provider_base/lib/macros/secrets.rb b/provider_base/lib/macros/secrets.rb
new file mode 100644
index 00000000..51bf3971
--- /dev/null
+++ b/provider_base/lib/macros/secrets.rb
@@ -0,0 +1,39 @@
+# encoding: utf-8
+
+require 'base32'
+
+module LeapCli
+ module Macro
+
+ #
+ # inserts a named secret, generating it if needed.
+ #
+ # manager.export_secrets should be called later to capture any newly generated secrets.
+ #
+ # +length+ is the character length of the generated password.
+ #
+ def secret(name, length=32)
+ @manager.secrets.set(name, Util::Secret.generate(length), @node[:environment])
+ end
+
+ # inserts a base32 encoded secret
+ def base32_secret(name, length=20)
+ @manager.secrets.set(name, Base32.encode(Util::Secret.generate(length)), @node[:environment])
+ end
+
+ # Picks a random obfsproxy port from given range
+ def rand_range(name, range)
+ @manager.secrets.set(name, rand(range), @node[:environment])
+ end
+
+ #
+ # inserts an hexidecimal secret string, generating it if needed.
+ #
+ # +bit_length+ is the bits in the secret, (ie length of resulting hex string will be bit_length/4)
+ #
+ def hex_secret(name, bit_length=128)
+ @manager.secrets.set(name, Util::Secret.generate_hex(bit_length), @node[:environment])
+ end
+
+ end
+end \ No newline at end of file
diff --git a/provider_base/lib/macros/stunnel.rb b/provider_base/lib/macros/stunnel.rb
new file mode 100644
index 00000000..f16308c7
--- /dev/null
+++ b/provider_base/lib/macros/stunnel.rb
@@ -0,0 +1,95 @@
+##
+## STUNNEL
+##
+
+#
+# About stunnel
+# --------------------------
+#
+# The network looks like this:
+#
+# From the client's perspective:
+#
+# |------- stunnel client --------------| |---------- stunnel server -----------------------|
+# consumer app -> localhost:accept_port -> connect:connect_port -> ??
+#
+# From the server's perspective:
+#
+# |------- stunnel client --------------| |---------- stunnel server -----------------------|
+# ?? -> *:accept_port -> localhost:connect_port -> service
+#
+
+module LeapCli
+ module Macro
+
+ #
+ # stunnel configuration for the client side.
+ #
+ # +node_list+ is a ObjectList of nodes running stunnel servers.
+ #
+ # +port+ is the real port of the ultimate service running on the servers
+ # that the client wants to connect to.
+ #
+ # * accept_port is the port on localhost to which local clients
+ # can connect. it is auto generated serially.
+ #
+ # * connect_port is the port on the stunnel server to connect to.
+ # it is auto generated from the +port+ argument.
+ #
+ # generates an entry appropriate to be passed directly to
+ # create_resources(stunnel::service, hiera('..'), defaults)
+ #
+ # local ports are automatically generated, starting at 4000
+ # and incrementing in sorted order (by node name).
+ #
+ def stunnel_client(node_list, port, options={})
+ @next_stunnel_port ||= 4000
+ node_list = listify(node_list)
+ hostnames(node_list) # record the hosts
+ result = Config::ObjectList.new
+ node_list.each_node do |node|
+ if node.name != self.name || options[:include_self]
+ result["#{node.name}_#{port}"] = Config::Object[
+ 'accept_port', @next_stunnel_port,
+ 'connect', node.domain.internal,
+ 'connect_port', stunnel_port(port),
+ 'original_port', port
+ ]
+ @next_stunnel_port += 1
+ end
+ end
+ result
+ end
+
+ #
+ # generates a stunnel server entry.
+ #
+ # +port+ is the real port targeted service.
+ #
+ # * `accept_port` is the publicly bound port
+ # * `connect_port` is the port that the local service is running on.
+ #
+ def stunnel_server(port)
+ {
+ "accept_port" => stunnel_port(port),
+ "connect_port" => port
+ }
+ end
+
+ private
+
+ #
+ # maps a real port to a stunnel port (used as the connect_port in the client config
+ # and the accept_port in the server config)
+ #
+ def stunnel_port(port)
+ port = port.to_i
+ if port < 50000
+ return port + 10000
+ else
+ return port - 10000
+ end
+ end
+
+ end
+end \ No newline at end of file
diff --git a/provider_base/services/_couchdb_master.json b/provider_base/services/_couchdb_master.json
new file mode 100644
index 00000000..20c6f99b
--- /dev/null
+++ b/provider_base/services/_couchdb_master.json
@@ -0,0 +1,8 @@
+//
+// Applied to master couchdb node when there is a single master
+//
+{
+ "couch": {
+ "mode": "master"
+ }
+} \ No newline at end of file
diff --git a/provider_base/services/_couchdb_mirror.json b/provider_base/services/_couchdb_mirror.json
new file mode 100644
index 00000000..6a3402bd
--- /dev/null
+++ b/provider_base/services/_couchdb_mirror.json
@@ -0,0 +1,21 @@
+//
+// Applied to all non-master couchdb nodes
+//
+{
+ "stunnel": {
+ "clients": {
+ "couch_client": "= stunnel_client(nodes[couch.replication.masters.keys], couch.port)"
+ }
+ },
+ "couch": {
+ "mode": "mirror",
+ "replication": {
+ // for now, pick the first close one, or the first one.
+ // in the future, maybe use haproxy to balance among all the masters
+ "masters": "= try{pick_node(:couch_master,nodes_near_me['services' => 'couchdb']['couch.master' => true]).pick_fields('domain.internal', 'couch.port')} || try{pick_node(:couch_master,nodes_like_me['services' => 'couchdb']['couch.master' => true]).pick_fields('domain.internal', 'couch.port')}",
+ "username": "replication",
+ "password": "= secret :couch_replication_password",
+ "role": "replication"
+ }
+ }
+}
diff --git a/provider_base/services/_couchdb_multimaster.json b/provider_base/services/_couchdb_multimaster.json
new file mode 100644
index 00000000..8c433188
--- /dev/null
+++ b/provider_base/services/_couchdb_multimaster.json
@@ -0,0 +1,24 @@
+//
+// Only applied to master couchdb nodes when there are multiple masters
+//
+{
+ "stunnel": {
+ "servers": {
+ "epmd_server": "= stunnel_server(couch.bigcouch.epmd_port)",
+ "ednp_server": "= stunnel_server(couch.bigcouch.ednp_port)"
+ },
+ "clients": {
+ "epmd_clients": "= stunnel_client(nodes_like_me[:services => :couchdb], couch.bigcouch.epmd_port)",
+ "ednp_clients": "= stunnel_client(nodes_like_me[:services => :couchdb], couch.bigcouch.ednp_port)"
+ }
+ },
+ "couch": {
+ "mode": "multimaster",
+ "bigcouch": {
+ "epmd_port": 4369,
+ "ednp_port": 9002,
+ "cookie": "= secret :bigcouch_cookie",
+ "neighbors": "= nodes_like_me['services' => 'couchdb']['couch.master' => true].exclude(self).field('domain.full')"
+ }
+ }
+}
diff --git a/provider_base/services/couchdb.json b/provider_base/services/couchdb.json
index 5f1b5381..8b1386f8 100644
--- a/provider_base/services/couchdb.json
+++ b/provider_base/services/couchdb.json
@@ -3,20 +3,13 @@
"use": true
},
"stunnel": {
- "couch_server": "= stunnel_server(couch.port)",
- "epmd_server": "= stunnel_server(couch.bigcouch.epmd_port)",
- "epmd_clients": "= stunnel_client(nodes_like_me[:services => :couchdb], global.services[:couchdb].couch.bigcouch.epmd_port)",
- "ednp_server": "= stunnel_server(couch.bigcouch.ednp_port)",
- "ednp_clients": "= stunnel_client(nodes_like_me[:services => :couchdb], global.services[:couchdb].couch.bigcouch.ednp_port)"
+ "servers": {
+ "couch_server": "= stunnel_server(couch.port)"
+ }
},
"couch": {
+ "master": false,
"port": 5984,
- "bigcouch": {
- "epmd_port": 4369,
- "ednp_port": 9002,
- "cookie": "= secret :bigcouch_cookie",
- "neighbors": "= nodes_like_me[:services => :couchdb].exclude(self).field('domain.full')"
- },
"users": {
"admin": {
"username": "admin",
@@ -47,10 +40,15 @@
"username": "webapp",
"password": "= secret :couch_webapp_password",
"salt": "= hex_secret :couch_webapp_password_salt, 128"
+ },
+ "replication": {
+ "username": "replication",
+ "password": "= secret :couch_replication_password",
+ "salt": "= hex_secret :couch_replication_password_salt, 128"
}
},
- "webapp": {
- "nagios_test_pw": "= secret :nagios_test_password"
+ "webapp": {
+ "nagios_test_pw": "= secret :nagios_test_password"
}
}
}
diff --git a/provider_base/services/couchdb.rb b/provider_base/services/couchdb.rb
new file mode 100644
index 00000000..c63e3a00
--- /dev/null
+++ b/provider_base/services/couchdb.rb
@@ -0,0 +1,18 @@
+#
+# custom logic for couchdb json resolution
+#
+
+unless nodes_like_me['services' => 'couchdb']['couch.master' => true].any?
+ #raise 'node `%s`, environment `%s`: there must be at least one node with couch.master set to `true` for this environment.' % [@node.name, @node.environment]
+end
+
+if couch.master
+ if nodes_like_me['services' => 'couchdb']['couch.master' => true].size > 1
+ apply_partial 'services/_couchdb_multimaster.json'
+ else
+ apply_partial 'services/_couchdb_master.json'
+ end
+else
+ apply_partial 'services/_couchdb_mirror.json'
+end
+
diff --git a/provider_base/services/mx.json b/provider_base/services/mx.json
index 30a19d9a..1f0e613e 100644
--- a/provider_base/services/mx.json
+++ b/provider_base/services/mx.json
@@ -1,9 +1,14 @@
{
"stunnel": {
- "couch_client": "= stunnel_client(nodes_like_me[:services => :couchdb], global.services[:couchdb].couch.port)"
+ "clients": {
+ "couch_client": "= stunnel_client(nodes_like_me[:services => :couchdb], global.services[:couchdb].couch.port)"
+ }
},
"haproxy": {
- "servers": "= haproxy_servers(nodes_like_me[:services => :couchdb], stunnel.couch_client)"
+ "couch": {
+ "listen_port": 4096,
+ "servers": "= haproxy_servers(nodes_like_me[:services => :couchdb], stunnel.clients.couch_client)"
+ }
},
"couchdb_leap_mx_user": {
"username": "= global.services[:couchdb].couch.users[:leap_mx].username",
diff --git a/provider_base/services/webapp.json b/provider_base/services/webapp.json
index d268a020..1b550af9 100644
--- a/provider_base/services/webapp.json
+++ b/provider_base/services/webapp.json
@@ -32,10 +32,15 @@
]
},
"stunnel": {
- "couch_client": "= stunnel_client(nodes_like_me[:services => :couchdb], global.services[:couchdb].couch.port)"
+ "clients": {
+ "couch_client": "= stunnel_client(nodes_like_me[:services => :couchdb], global.services[:couchdb].couch.port)"
+ }
},
"haproxy": {
- "servers": "= haproxy_servers(nodes_like_me[:services => :couchdb], stunnel.couch_client, global.services[:couchdb].couch.port)"
+ "couch": {
+ "listen_port": 4096,
+ "servers": "= haproxy_servers(nodes_like_me[:services => :couchdb], stunnel.clients.couch_client, global.services[:couchdb].couch.port)"
+ }
},
"definition_files": {
"provider": "= file :provider_json_template",
diff --git a/puppet/modules/site_couchdb/manifests/add_users.pp b/puppet/modules/site_couchdb/manifests/add_users.pp
index f9ea7349..2f734ed4 100644
--- a/puppet/modules/site_couchdb/manifests/add_users.pp
+++ b/puppet/modules/site_couchdb/manifests/add_users.pp
@@ -1,5 +1,8 @@
class site_couchdb::add_users {
+ Class['site_couchdb::create_dbs']
+ -> Class['site_couchdb::add_users']
+
# Couchdb users
## leap_mx couchdb user
@@ -51,4 +54,13 @@ class site_couchdb::add_users {
require => Couchdb::Query::Setup['localhost']
}
+ ## replication couchdb user
+ ## read/write: all databases for replication
+ couchdb::add_user { $site_couchdb::couchdb_replication_user:
+ roles => '["replication"]',
+ pw => $site_couchdb::couchdb_replication_pw,
+ salt => $site_couchdb::couchdb_replication_salt,
+ require => Couchdb::Query::Setup['localhost']
+ }
+
}
diff --git a/puppet/modules/site_couchdb/manifests/bigcouch.pp b/puppet/modules/site_couchdb/manifests/bigcouch.pp
new file mode 100644
index 00000000..f0aab734
--- /dev/null
+++ b/puppet/modules/site_couchdb/manifests/bigcouch.pp
@@ -0,0 +1,34 @@
+class site_couchdb::bigcouch {
+
+ $config = $couchdb_config['bigcouch']
+ $cookie = $config['cookie']
+ $ednp_port = $config['ednp_port']
+
+ class { 'couchdb':
+ admin_pw => $couchdb_admin_pw,
+ admin_salt => $couchdb_admin_salt,
+ bigcouch => true,
+ bigcouch_cookie => $cookie,
+ ednp_port => $ednp_port,
+ chttpd_bind_address => '127.0.0.1'
+ }
+
+ #
+ # stunnel must running correctly before bigcouch dbs can be set up.
+ #
+ Class['site_config::default']
+ -> Class['couchdb::bigcouch::package::cloudant']
+ -> Service['shorewall']
+ -> Service['stunnel']
+ -> Class['site_couchdb::setup']
+ -> Class['site_couchdb::bigcouch::add_nodes']
+ -> Class['site_couchdb::bigcouch::settle_cluster']
+
+ include site_couchdb::bigcouch::add_nodes
+ include site_couchdb::bigcouch::settle_cluster
+ include site_couchdb::bigcouch::compaction
+
+ file { '/var/log/bigcouch':
+ ensure => directory
+ }
+}
diff --git a/puppet/modules/site_couchdb/manifests/bigcouch/add_nodes.pp b/puppet/modules/site_couchdb/manifests/bigcouch/add_nodes.pp
index 97e85785..c8c43275 100644
--- a/puppet/modules/site_couchdb/manifests/bigcouch/add_nodes.pp
+++ b/puppet/modules/site_couchdb/manifests/bigcouch/add_nodes.pp
@@ -1,6 +1,6 @@
class site_couchdb::bigcouch::add_nodes {
# loop through neighbors array and add nodes
- $nodes = $::site_couchdb::bigcouch_config['neighbors']
+ $nodes = $::site_couchdb::bigcouch::config['neighbors']
couchdb::bigcouch::add_node { $nodes:
require => Couchdb::Query::Setup['localhost']
diff --git a/puppet/modules/site_couchdb/manifests/create_dbs.pp b/puppet/modules/site_couchdb/manifests/create_dbs.pp
index 41500d3a..4322f773 100644
--- a/puppet/modules/site_couchdb/manifests/create_dbs.pp
+++ b/puppet/modules/site_couchdb/manifests/create_dbs.pp
@@ -1,11 +1,14 @@
class site_couchdb::create_dbs {
+ Class['site_couchdb::setup']
+ -> Class['site_couchdb::create_dbs']
+
# Couchdb databases
### customer database
### r/w: webapp,
couchdb::create_db { 'customers':
- members => "{ \"names\": [\"$site_couchdb::couchdb_webapp_user\"], \"roles\": [] }",
+ members => "{ \"names\": [\"$site_couchdb::couchdb_webapp_user\"], \"roles\": [\"replication\"] }",
require => Couchdb::Query::Setup['localhost']
}
@@ -13,35 +16,35 @@ class site_couchdb::create_dbs {
## r: nickserver, leap_mx - needs to be restrict with design document
## r/w: webapp
couchdb::create_db { 'identities':
- members => "{ \"names\": [], \"roles\": [\"identities\"] }",
+ members => "{ \"names\": [], \"roles\": [\"replication\", \"identities\"] }",
require => Couchdb::Query::Setup['localhost']
}
## keycache database
## r/w: nickserver
couchdb::create_db { 'keycache':
- members => "{ \"names\": [], \"roles\": [\"keycache\"] }",
+ members => "{ \"names\": [], \"roles\": [\"replication\", \"keycache\"] }",
require => Couchdb::Query::Setup['localhost']
}
## sessions database
## r/w: webapp
couchdb::create_db { 'sessions':
- members => "{ \"names\": [\"$site_couchdb::couchdb_webapp_user\"], \"roles\": [] }",
+ members => "{ \"names\": [\"$site_couchdb::couchdb_webapp_user\"], \"roles\": [\"replication\"] }",
require => Couchdb::Query::Setup['localhost']
}
## shared database
## r/w: soledad
couchdb::create_db { 'shared':
- members => "{ \"names\": [\"$site_couchdb::couchdb_soledad_user\"], \"roles\": [] }",
+ members => "{ \"names\": [\"$site_couchdb::couchdb_soledad_user\"], \"roles\": [\"replication\"] }",
require => Couchdb::Query::Setup['localhost']
}
## tickets database
## r/w: webapp
couchdb::create_db { 'tickets':
- members => "{ \"names\": [\"$site_couchdb::couchdb_webapp_user\"], \"roles\": [] }",
+ members => "{ \"names\": [\"$site_couchdb::couchdb_webapp_user\"], \"roles\": [\"replication\"] }",
require => Couchdb::Query::Setup['localhost']
}
@@ -49,14 +52,14 @@ class site_couchdb::create_dbs {
## r: soledad - needs to be restricted with a design document
## r/w: webapp
couchdb::create_db { 'tokens':
- members => "{ \"names\": [], \"roles\": [\"tokens\"] }",
+ members => "{ \"names\": [], \"roles\": [\"replication\", \"tokens\"] }",
require => Couchdb::Query::Setup['localhost']
}
## users database
## r/w: webapp
couchdb::create_db { 'users':
- members => "{ \"names\": [], \"roles\": [\"users\"] }",
+ members => "{ \"names\": [], \"roles\": [\"replication\", \"users\"] }",
require => Couchdb::Query::Setup['localhost']
}
@@ -64,7 +67,7 @@ class site_couchdb::create_dbs {
## store messages to the clients such as payment reminders
## r/w: webapp
couchdb::create_db { 'messages':
- members => "{ \"names\": [\"$site_couchdb::couchdb_webapp_user\"], \"roles\": [] }",
+ members => "{ \"names\": [\"$site_couchdb::couchdb_webapp_user\"], \"roles\": [\"replication\"] }",
require => Couchdb::Query::Setup['localhost']
}
}
diff --git a/puppet/modules/site_couchdb/manifests/init.pp b/puppet/modules/site_couchdb/manifests/init.pp
index 3614661d..6f7e974e 100644
--- a/puppet/modules/site_couchdb/manifests/init.pp
+++ b/puppet/modules/site_couchdb/manifests/init.pp
@@ -34,85 +34,35 @@ class site_couchdb {
$couchdb_webapp_pw = $couchdb_webapp['password']
$couchdb_webapp_salt = $couchdb_webapp['salt']
- $couchdb_backup = $couchdb_config['backup']
-
- $bigcouch_config = $couchdb_config['bigcouch']
- $bigcouch_cookie = $bigcouch_config['cookie']
+ $couchdb_replication = $couchdb_users['replication']
+ $couchdb_replication_user= $couchdb_replication['username']
+ $couchdb_replication_pw = $couchdb_replication['password']
+ $couchdb_replication_salt= $couchdb_replication['salt']
- $ednp_port = $bigcouch_config['ednp_port']
-
- class { 'couchdb':
- bigcouch => true,
- admin_pw => $couchdb_admin_pw,
- admin_salt => $couchdb_admin_salt,
- bigcouch_cookie => $bigcouch_cookie,
- ednp_port => $ednp_port,
- chttpd_bind_address => '127.0.0.1'
- }
+ $couchdb_backup = $couchdb_config['backup']
+ $couchdb_mode = $couchdb_config['mode']
- # ensure that we don't have leftovers from previous installations
- # where we installed the cloudant bigcouch package
- # https://leap.se/code/issues/4971
- class { 'couchdb::bigcouch::package::cloudant':
- ensure => absent
- }
+ if $couchdb_mode == "multimaster" { include site_couchdb::bigcouch }
+ if $couchdb_mode == "master" { include site_couchdb::master }
+ if $couchdb_mode == "mirror" { include site_couchdb::mirror }
Class['site_config::default']
- -> Class['couchdb::bigcouch::package::cloudant']
-> Service['shorewall']
- -> Class['site_couchdb::stunnel']
- -> Service['couchdb']
- -> File['/root/.netrc']
- -> Class['site_couchdb::bigcouch::add_nodes']
- -> Class['site_couchdb::bigcouch::settle_cluster']
- -> Class['site_couchdb::create_dbs']
- -> Class['site_couchdb::add_users']
-
- # /etc/couchdb/couchdb.netrc is deployed by couchdb::query::setup
- # we symlink this to /root/.netrc for couchdb_scripts (eg. backup)
- # and makes life easier for the admin (i.e. using curl/wget without
- # passing credentials)
- file {
- '/root/.netrc':
- ensure => link,
- target => '/etc/couchdb/couchdb.netrc';
-
- '/srv/leap/couchdb':
- ensure => directory
- }
-
- couchdb::query::setup { 'localhost':
- user => $couchdb_admin_user,
- pw => $couchdb_admin_pw,
- }
-
- vcsrepo { '/srv/leap/couchdb/scripts':
- ensure => present,
- provider => git,
- source => 'https://leap.se/git/couchdb_scripts',
- revision => 'origin/master',
- require => File['/srv/leap/couchdb']
- }
-
- include site_couchdb::stunnel
- include site_couchdb::bigcouch::add_nodes
- include site_couchdb::bigcouch::settle_cluster
+ -> Service['stunnel']
+ -> Class['couchdb']
+ -> Class['site_couchdb::setup']
+
+ include site_stunnel
+
+ include site_couchdb::setup
include site_couchdb::create_dbs
include site_couchdb::add_users
include site_couchdb::designs
include site_couchdb::logrotate
- include site_couchdb::bigcouch::compaction
- if $couchdb_backup { include site_couchdb::backup }
-
- include site_shorewall::couchdb
- include site_shorewall::couchdb::bigcouch
+ if $couchdb_backup { include site_couchdb::backup }
include site_check_mk::agent::couchdb
include site_check_mk::agent::tapicero
- file { '/var/log/bigcouch':
- ensure => directory
- }
-
}
diff --git a/puppet/modules/site_couchdb/manifests/master.pp b/puppet/modules/site_couchdb/manifests/master.pp
new file mode 100644
index 00000000..a0a6633d
--- /dev/null
+++ b/puppet/modules/site_couchdb/manifests/master.pp
@@ -0,0 +1,9 @@
+class site_couchdb::master {
+
+ class { 'couchdb':
+ admin_pw => $site_couchdb::couchdb_admin_pw,
+ admin_salt => $site_couchdb::couchdb_admin_salt,
+ chttpd_bind_address => '127.0.0.1'
+ }
+
+} \ No newline at end of file
diff --git a/puppet/modules/site_couchdb/manifests/mirror.pp b/puppet/modules/site_couchdb/manifests/mirror.pp
new file mode 100644
index 00000000..1cbd9bcc
--- /dev/null
+++ b/puppet/modules/site_couchdb/manifests/mirror.pp
@@ -0,0 +1,74 @@
+class site_couchdb::mirror {
+
+ class { 'couchdb':
+ admin_pw => $site_couchdb::couchdb_admin_pw,
+ admin_salt => $site_couchdb::couchdb_admin_salt,
+ chttpd_bind_address => '127.0.0.1'
+ }
+
+ $masters = $site_couchdb::couchdb_config['replication']['masters']
+ $master_node_names = keys($site_couchdb::couchdb_config['replication']['masters'])
+ $master_node = $masters[$master_node_names[0]]
+ $user = $site_couchdb::couchdb_replication_user
+ $password = $site_couchdb::couchdb_replication_pw
+ $from_host = $master_node['domain_internal']
+ $from_port = $master_node['couch_port']
+ $from = "http://${user}:${password}@${from_host}:${from_port}"
+
+ notice("mirror from: ${from}")
+
+ ### customer database
+ couchdb::mirror_db { 'customers':
+ from => $from,
+ require => Couchdb::Query::Setup['localhost']
+ }
+
+ ## identities database
+ couchdb::mirror_db { 'identities':
+ from => $from,
+ require => Couchdb::Query::Setup['localhost']
+ }
+
+ ## keycache database
+ couchdb::mirror_db { 'keycache':
+ from => $from,
+ require => Couchdb::Query::Setup['localhost']
+ }
+
+ ## sessions database
+ couchdb::mirror_db { 'sessions':
+ from => $from,
+ require => Couchdb::Query::Setup['localhost']
+ }
+
+ ## shared database
+ couchdb::mirror_db { 'shared':
+ from => $from,
+ require => Couchdb::Query::Setup['localhost']
+ }
+
+ ## tickets database
+ couchdb::mirror_db { 'tickets':
+ from => $from,
+ require => Couchdb::Query::Setup['localhost']
+ }
+
+ ## tokens database
+ couchdb::mirror_db { 'tokens':
+ from => $from,
+ require => Couchdb::Query::Setup['localhost']
+ }
+
+ ## users database
+ couchdb::mirror_db { 'users':
+ from => $from,
+ require => Couchdb::Query::Setup['localhost']
+ }
+
+ ## messages db
+ couchdb::mirror_db { 'messages':
+ from => $from,
+ require => Couchdb::Query::Setup['localhost']
+ }
+
+}
diff --git a/puppet/modules/site_couchdb/manifests/setup.pp b/puppet/modules/site_couchdb/manifests/setup.pp
new file mode 100644
index 00000000..69bd1c6a
--- /dev/null
+++ b/puppet/modules/site_couchdb/manifests/setup.pp
@@ -0,0 +1,46 @@
+#
+# An initial setup class. All the other classes depend on this
+#
+class site_couchdb::setup {
+
+ # ensure that we don't have leftovers from previous installations
+ # where we installed the cloudant bigcouch package
+ # https://leap.se/code/issues/4971
+ class { 'couchdb::bigcouch::package::cloudant':
+ ensure => absent
+ }
+
+ $user = $site_couchdb::couchdb_admin_user
+
+ # /etc/couchdb/couchdb-admin.netrc is deployed by couchdb::query::setup
+ # we symlink to couchdb.netrc for puppet commands.
+ # we symlink this to /root/.netrc for couchdb_scripts (eg. backup)
+ # and makes life easier for the admin (i.e. using curl/wget without
+ # passing credentials)
+ file {
+ '/etc/couchdb/couchdb.netrc':
+ ensure => link,
+ target => "/etc/couchdb/couchdb-${user}.netrc";
+
+ '/root/.netrc':
+ ensure => link,
+ target => '/etc/couchdb/couchdb.netrc';
+
+ '/srv/leap/couchdb':
+ ensure => directory
+ }
+
+ couchdb::query::setup { 'localhost':
+ user => $user,
+ pw => $site_couchdb::couchdb_admin_pw,
+ }
+
+ vcsrepo { '/srv/leap/couchdb/scripts':
+ ensure => present,
+ provider => git,
+ source => 'https://leap.se/git/couchdb_scripts',
+ revision => 'origin/master',
+ require => File['/srv/leap/couchdb']
+ }
+
+}
diff --git a/puppet/modules/site_couchdb/manifests/stunnel.pp b/puppet/modules/site_couchdb/manifests/stunnel.pp
deleted file mode 100644
index 91f1e3aa..00000000
--- a/puppet/modules/site_couchdb/manifests/stunnel.pp
+++ /dev/null
@@ -1,112 +0,0 @@
-class site_couchdb::stunnel {
-
- $stunnel = hiera('stunnel')
-
- $couch_server = $stunnel['couch_server']
- $couch_server_accept = $couch_server['accept']
- $couch_server_connect = $couch_server['connect']
-
- # Erlang Port Mapper Daemon (epmd) stunnel server/clients
- $epmd_server = $stunnel['epmd_server']
- $epmd_server_accept = $epmd_server['accept']
- $epmd_server_connect = $epmd_server['connect']
- $epmd_clients = $stunnel['epmd_clients']
-
- # Erlang Distributed Node Protocol (ednp) stunnel server/clients
- $ednp_server = $stunnel['ednp_server']
- $ednp_server_accept = $ednp_server['accept']
- $ednp_server_connect = $ednp_server['connect']
- $ednp_clients = $stunnel['ednp_clients']
-
-
-
- include site_config::x509::cert
- include site_config::x509::key
- include site_config::x509::ca
-
- include x509::variables
- $ca_path = "${x509::variables::local_CAs}/${site_config::params::ca_name}.crt"
- $cert_path = "${x509::variables::certs}/${site_config::params::cert_name}.crt"
- $key_path = "${x509::variables::keys}/${site_config::params::cert_name}.key"
-
- # setup a stunnel server for the webapp to connect to couchdb
- stunnel::service { 'couch_server':
- accept => $couch_server_accept,
- connect => $couch_server_connect,
- client => false,
- cafile => $ca_path,
- key => $key_path,
- cert => $cert_path,
- verify => '2',
- pid => '/var/run/stunnel4/couchserver.pid',
- rndfile => '/var/lib/stunnel4/.rnd',
- debuglevel => '4',
- require => [
- Class['Site_config::X509::Key'],
- Class['Site_config::X509::Cert'],
- Class['Site_config::X509::Ca'] ];
- }
-
-
- # setup stunnel server for Erlang Port Mapper Daemon (epmd), necessary for
- # bigcouch clustering between each bigcouchdb node
- stunnel::service { 'epmd_server':
- accept => $epmd_server_accept,
- connect => $epmd_server_connect,
- client => false,
- cafile => $ca_path,
- key => $key_path,
- cert => $cert_path,
- verify => '2',
- pid => '/var/run/stunnel4/epmd_server.pid',
- rndfile => '/var/lib/stunnel4/.rnd',
- debuglevel => '4',
- require => [
- Class['Site_config::X509::Key'],
- Class['Site_config::X509::Cert'],
- Class['Site_config::X509::Ca'] ];
- }
-
- # setup stunnel clients for Erlang Port Mapper Daemon (epmd) to connect
- # to the above epmd stunnel server.
- $epmd_client_defaults = {
- 'client' => true,
- 'cafile' => $ca_path,
- 'key' => $key_path,
- 'cert' => $cert_path,
- }
-
- create_resources(site_stunnel::clients, $epmd_clients, $epmd_client_defaults)
-
- # setup stunnel server for Erlang Distributed Node Protocol (ednp), necessary
- # for bigcouch clustering between each bigcouchdb node
- stunnel::service { 'ednp_server':
- accept => $ednp_server_accept,
- connect => $ednp_server_connect,
- client => false,
- cafile => $ca_path,
- key => $key_path,
- cert => $cert_path,
- verify => '2',
- pid => '/var/run/stunnel4/ednp_server.pid',
- rndfile => '/var/lib/stunnel4/.rnd',
- debuglevel => '4',
- require => [
- Class['Site_config::X509::Key'],
- Class['Site_config::X509::Cert'],
- Class['Site_config::X509::Ca'] ];
- }
-
- # setup stunnel clients for Erlang Distributed Node Protocol (ednp) to connect
- # to the above ednp stunnel server.
- $ednp_client_defaults = {
- 'client' => true,
- 'cafile' => $ca_path,
- 'key' => $key_path,
- 'cert' => $cert_path,
- }
-
- create_resources(site_stunnel::clients, $ednp_clients, $ednp_client_defaults)
-
- include site_check_mk::agent::stunnel
-}
diff --git a/puppet/modules/site_haproxy/manifests/init.pp b/puppet/modules/site_haproxy/manifests/init.pp
index 6bcf3f5c..b28ce80e 100644
--- a/puppet/modules/site_haproxy/manifests/init.pp
+++ b/puppet/modules/site_haproxy/manifests/init.pp
@@ -2,25 +2,25 @@ class site_haproxy {
$haproxy = hiera('haproxy')
class { 'haproxy':
- enable => true,
- manage_service => true,
- global_options => {
- 'log' => '127.0.0.1 local0',
- 'maxconn' => '4096',
- 'stats' => 'socket /var/run/haproxy.sock user haproxy group haproxy',
- 'chroot' => '/usr/share/haproxy',
- 'user' => 'haproxy',
- 'group' => 'haproxy',
- 'daemon' => ''
- },
- defaults_options => {
- 'log' => 'global',
- 'retries' => '3',
- 'option' => 'redispatch',
- 'timeout connect' => '4000',
- 'timeout client' => '20000',
- 'timeout server' => '20000'
- }
+ enable => true,
+ manage_service => true,
+ global_options => {
+ 'log' => '127.0.0.1 local0',
+ 'maxconn' => '4096',
+ 'stats' => 'socket /var/run/haproxy.sock user haproxy group haproxy',
+ 'chroot' => '/usr/share/haproxy',
+ 'user' => 'haproxy',
+ 'group' => 'haproxy',
+ 'daemon' => ''
+ },
+ defaults_options => {
+ 'log' => 'global',
+ 'retries' => '3',
+ 'option' => 'redispatch',
+ 'timeout connect' => '4000',
+ 'timeout client' => '20000',
+ 'timeout server' => '20000'
+ }
}
# monitor haproxy
@@ -34,8 +34,8 @@ class site_haproxy {
concat::fragment { 'leap_haproxy_webapp_couchdb':
target => '/etc/haproxy/haproxy.cfg',
order => '20',
- content => template('site_haproxy/haproxy_couchdb.cfg.erb'),
+ content => template('site_haproxy/haproxy.cfg.erb'),
}
-
+
include site_check_mk::agent::haproxy
}
diff --git a/puppet/modules/site_haproxy/templates/couch.erb b/puppet/modules/site_haproxy/templates/couch.erb
new file mode 100644
index 00000000..baa31486
--- /dev/null
+++ b/puppet/modules/site_haproxy/templates/couch.erb
@@ -0,0 +1,32 @@
+frontend couch
+ bind localhost:<%= @listen_port %>
+ mode http
+ option httplog
+ option dontlognull
+ option http-server-close # use client keep-alive, but close server connection.
+ use_backend couch_write if METH_POST
+ default_backend couch_read
+
+backend couch_write
+ mode http
+ balance roundrobin
+ option httpchk GET / # health check using simple get to root
+ option allbackups # balance among all backups, not just one.
+ default-server inter 3000 fastinter 1000 downinter 1000 rise 2 fall 1
+<%- @servers.sort.each do |name,server| -%>
+<%- next unless server['writable'] -%>
+ # <%=name%>
+ server couchdb_<%=server['port']%> <%=server['host']%>:<%=server['port']%> <%='backup' if server['backup']%> weight <%=server['weight']%> check
+<%- end -%>
+
+backend couch_read
+ mode http
+ balance roundrobin
+ option httpchk GET / # health check using simple get to root
+ option allbackups # balance among all backups, not just one.
+ default-server inter 3000 fastinter 1000 downinter 1000 rise 2 fall 1
+<%- @servers.sort.each do |name,server| -%>
+ # <%=name%>
+ server couchdb_<%=server['port']%> <%=server['host']%>:<%=server['port']%> <%='backup' if server['backup']%> weight <%=server['weight']%> check
+<%- end -%>
+
diff --git a/puppet/modules/site_haproxy/templates/haproxy.cfg.erb b/puppet/modules/site_haproxy/templates/haproxy.cfg.erb
new file mode 100644
index 00000000..8311b1a5
--- /dev/null
+++ b/puppet/modules/site_haproxy/templates/haproxy.cfg.erb
@@ -0,0 +1,11 @@
+<%- @haproxy.each do |frontend, options| -%>
+<%- if options['servers'] -%>
+
+##
+## <%= frontend %>
+##
+
+<%= scope.function_templatewlv(["site_haproxy/#{frontend}.erb", options]) %>
+<%- end -%>
+<%- end -%>
+
diff --git a/puppet/modules/site_haproxy/templates/haproxy_couchdb.cfg.erb b/puppet/modules/site_haproxy/templates/haproxy_couchdb.cfg.erb
deleted file mode 100644
index 1fa01b96..00000000
--- a/puppet/modules/site_haproxy/templates/haproxy_couchdb.cfg.erb
+++ /dev/null
@@ -1,23 +0,0 @@
-
-listen bigcouch-in
- mode http
- balance roundrobin
- option httplog
- option dontlognull
- option httpchk GET / # health check using simple get to root
- option http-server-close # use client keep-alive, but close server connection.
- option allbackups # balance among all backups, not just one.
-
- bind localhost:4096
-
- default-server inter 3000 fastinter 1000 downinter 1000 rise 2 fall 1
-
-<%- if @haproxy['servers'] -%>
-<%- @haproxy['servers'].sort.each do |name,server| -%>
-<%- backup = server['backup'] ? 'backup' : '' -%>
- # <%=name%>
- server couchdb_<%=server['port']%> <%=server['host']%>:<%=server['port']%> <%=backup%> weight <%=server['weight']%> check
-
-<%- end -%>
-<%- end -%>
-
diff --git a/puppet/modules/site_mx/manifests/couchdb.pp b/puppet/modules/site_mx/manifests/couchdb.pp
deleted file mode 100644
index b1f3bd02..00000000
--- a/puppet/modules/site_mx/manifests/couchdb.pp
+++ /dev/null
@@ -1,23 +0,0 @@
-class site_mx::couchdb {
-
- $stunnel = hiera('stunnel')
- $couch_client = $stunnel['couch_client']
- $couch_client_connect = $couch_client['connect']
-
- include x509::variables
- $ca_path = "${x509::variables::local_CAs}/${site_config::params::ca_name}.crt"
- $cert_path = "${x509::variables::certs}/${site_config::params::cert_name}.crt"
- $key_path = "${x509::variables::keys}/${site_config::params::cert_name}.key"
-
- include site_stunnel
-
- $couchdb_stunnel_client_defaults = {
- 'connect_port' => $couch_client_connect,
- 'client' => true,
- 'cafile' => $ca_path,
- 'key' => $key_path,
- 'cert' => $cert_path,
- }
-
- create_resources(site_stunnel::clients, $couch_client, $couchdb_stunnel_client_defaults)
-}
diff --git a/puppet/modules/site_mx/manifests/init.pp b/puppet/modules/site_mx/manifests/init.pp
index c3d38a46..91014ed6 100644
--- a/puppet/modules/site_mx/manifests/init.pp
+++ b/puppet/modules/site_mx/manifests/init.pp
@@ -8,12 +8,12 @@ class site_mx {
include site_config::x509::client_ca::ca
include site_config::x509::client_ca::key
+ include site_stunnel
include site_postfix::mx
include site_haproxy
include site_shorewall::mx
include site_shorewall::service::smtp
- include site_mx::couchdb
include leap_mx
include site_check_mk::agent::mx
}
diff --git a/puppet/modules/site_shorewall/manifests/couchdb.pp b/puppet/modules/site_shorewall/manifests/couchdb.pp
deleted file mode 100644
index 73bed62b..00000000
--- a/puppet/modules/site_shorewall/manifests/couchdb.pp
+++ /dev/null
@@ -1,24 +0,0 @@
-class site_shorewall::couchdb {
-
- include site_shorewall::defaults
-
- $stunnel = hiera('stunnel')
- $couch_server = $stunnel['couch_server']
- $couch_stunnel_port = $couch_server['accept']
-
- # define macro for incoming services
- file { '/etc/shorewall/macro.leap_couchdb':
- content => "PARAM - - tcp ${couch_stunnel_port}",
- notify => Service['shorewall'],
- require => Package['shorewall']
- }
-
- shorewall::rule {
- 'net2fw-couchdb':
- source => 'net',
- destination => '$FW',
- action => 'leap_couchdb(ACCEPT)',
- order => 200;
- }
-
-}
diff --git a/puppet/modules/site_shorewall/manifests/couchdb/bigcouch.pp b/puppet/modules/site_shorewall/manifests/couchdb/bigcouch.pp
deleted file mode 100644
index 20740650..00000000
--- a/puppet/modules/site_shorewall/manifests/couchdb/bigcouch.pp
+++ /dev/null
@@ -1,51 +0,0 @@
-class site_shorewall::couchdb::bigcouch {
-
- include site_shorewall::defaults
-
- $stunnel = hiera('stunnel')
-
- # Erlang Port Mapper Daemon (epmd) stunnel server/clients
- $epmd_clients = $stunnel['epmd_clients']
- $epmd_server = $stunnel['epmd_server']
- $epmd_server_port = $epmd_server['accept']
- $epmd_server_connect = $epmd_server['connect']
-
- # Erlang Distributed Node Protocol (ednp) stunnel server/clients
- $ednp_clients = $stunnel['ednp_clients']
- $ednp_server = $stunnel['ednp_server']
- $ednp_server_port = $ednp_server['accept']
- $ednp_server_connect = $ednp_server['connect']
-
- # define macro for incoming services
- file { '/etc/shorewall/macro.leap_bigcouch':
- content => "PARAM - - tcp ${epmd_server_port},${ednp_server_port}",
- notify => Service['shorewall'],
- require => Package['shorewall']
- }
-
- shorewall::rule {
- 'net2fw-bigcouch':
- source => 'net',
- destination => '$FW',
- action => 'leap_bigcouch(ACCEPT)',
- order => 300;
- }
-
- # setup DNAT rules for each epmd
- $epmd_shorewall_dnat_defaults = {
- 'source' => '$FW',
- 'proto' => 'tcp',
- 'destinationport' => regsubst($epmd_server_connect, '^([0-9.]+:)([0-9]+)$', '\2')
- }
- create_resources(site_shorewall::couchdb::dnat, $epmd_clients, $epmd_shorewall_dnat_defaults)
-
- # setup DNAT rules for each ednp
- $ednp_shorewall_dnat_defaults = {
- 'source' => '$FW',
- 'proto' => 'tcp',
- 'destinationport' => regsubst($ednp_server_connect, '^([0-9.]+:)([0-9]+)$', '\2')
- }
- create_resources(site_shorewall::couchdb::dnat, $ednp_clients, $ednp_shorewall_dnat_defaults)
-
-}
-
diff --git a/puppet/modules/site_shorewall/manifests/couchdb/dnat.pp b/puppet/modules/site_shorewall/manifests/couchdb/dnat.pp
deleted file mode 100644
index f1bc9acf..00000000
--- a/puppet/modules/site_shorewall/manifests/couchdb/dnat.pp
+++ /dev/null
@@ -1,21 +0,0 @@
-define site_shorewall::couchdb::dnat (
- $source,
- $connect,
- $connect_port,
- $accept_port,
- $proto,
- $destinationport )
-{
-
-
- shorewall::rule {
- "dnat_${name}_${destinationport}":
- action => 'DNAT',
- source => $source,
- destination => "\$FW:127.0.0.1:${accept_port}",
- proto => $proto,
- destinationport => $destinationport,
- originaldest => $connect,
- order => 200
- }
-}
diff --git a/puppet/modules/site_shorewall/manifests/stunnel/client.pp b/puppet/modules/site_shorewall/manifests/stunnel/client.pp
new file mode 100644
index 00000000..9a89a244
--- /dev/null
+++ b/puppet/modules/site_shorewall/manifests/stunnel/client.pp
@@ -0,0 +1,40 @@
+#
+# Adds some firewall magic to the stunnel.
+#
+# Using DNAT, this firewall rule allow a locally running program
+# to try to connect to the normal remote IP and remote port of the
+# service on another machine, but have this connection magically
+# routed through the locally running stunnel client.
+#
+# The network looks like this:
+#
+# From the client's perspective:
+#
+# |------- stunnel client --------------| |---------- stunnel server -----------------------|
+# consumer app -> localhost:accept_port -> connect:connect_port -> localhost:original_port
+#
+# From the server's perspective:
+#
+# |------- stunnel client --------------| |---------- stunnel server -----------------------|
+# ?? -> *:accept_port -> localhost:connect_port -> service
+#
+
+define site_shorewall::stunnel::client(
+ $accept_port,
+ $connect,
+ $connect_port,
+ $original_port) {
+
+ include site_shorewall::defaults
+
+ shorewall::rule {
+ "stunnel_dnat_${name}":
+ action => 'DNAT',
+ source => '$FW',
+ destination => "\$FW:127.0.0.1:${accept_port}",
+ proto => 'tcp',
+ destinationport => $original_port,
+ originaldest => $connect,
+ order => 200
+ }
+}
diff --git a/puppet/modules/site_shorewall/manifests/stunnel/server.pp b/puppet/modules/site_shorewall/manifests/stunnel/server.pp
new file mode 100644
index 00000000..db3ecd3e
--- /dev/null
+++ b/puppet/modules/site_shorewall/manifests/stunnel/server.pp
@@ -0,0 +1,22 @@
+#
+# Allow all incoming connections to stunnel server port
+#
+
+define site_shorewall::stunnel::server($port) {
+
+ include site_shorewall::defaults
+
+ file { "/etc/shorewall/macro.stunnel_server_${name}":
+ content => "PARAM - - tcp ${port}",
+ notify => Service['shorewall'],
+ require => Package['shorewall']
+ }
+ shorewall::rule {
+ 'net2fw-couchdb':
+ source => 'net',
+ destination => '$FW',
+ action => "stunnel_server_${name}(ACCEPT)",
+ order => 200;
+ }
+
+} \ No newline at end of file
diff --git a/puppet/modules/site_stunnel/manifests/client.pp b/puppet/modules/site_stunnel/manifests/client.pp
new file mode 100644
index 00000000..12d664b4
--- /dev/null
+++ b/puppet/modules/site_stunnel/manifests/client.pp
@@ -0,0 +1,52 @@
+#
+# Sets up stunnel and firewall configuration for
+# a single stunnel client
+#
+# As a client, we accept connections on localhost,
+# and connect to a remote $connect:$connect_port
+#
+
+define site_stunnel::client (
+ $accept_port,
+ $connect_port,
+ $connect,
+ $original_port,
+ $verify = '2',
+ $pid = $name,
+ $rndfile = '/var/lib/stunnel4/.rnd',
+ $debuglevel = '4' ) {
+
+ include site_config::x509::cert
+ include site_config::x509::key
+ include site_config::x509::ca
+ include x509::variables
+ $ca_path = "${x509::variables::local_CAs}/${site_config::params::ca_name}.crt"
+ $cert_path = "${x509::variables::certs}/${site_config::params::cert_name}.crt"
+ $key_path = "${x509::variables::keys}/${site_config::params::cert_name}.key"
+
+ stunnel::service { $name:
+ accept => "127.0.0.1:${accept_port}",
+ connect => "${connect}:${connect_port}",
+ client => true,
+ cafile => $ca_path,
+ key => $key_path,
+ cert => $cert_path,
+ verify => $verify,
+ pid => "/var/run/stunnel4/${pid}.pid",
+ rndfile => $rndfile,
+ debuglevel => $debuglevel,
+ subscribe => [
+ Class['Site_config::X509::Key'],
+ Class['Site_config::X509::Cert'],
+ Class['Site_config::X509::Ca'] ];
+ }
+
+ site_shorewall::stunnel::client { $name:
+ accept_port => $accept_port,
+ connect => $connect,
+ connect_port => $connect_port,
+ original_port => $original_port
+ }
+
+ include site_check_mk::agent::stunnel
+}
diff --git a/puppet/modules/site_stunnel/manifests/clients.pp b/puppet/modules/site_stunnel/manifests/clients.pp
index b75c9ac3..c0958b5f 100644
--- a/puppet/modules/site_stunnel/manifests/clients.pp
+++ b/puppet/modules/site_stunnel/manifests/clients.pp
@@ -1,33 +1,23 @@
-define site_stunnel::clients (
- $accept_port,
- $connect_port,
- $connect,
- $cafile,
- $key,
- $cert,
- $client = true,
- $verify = '2',
- $pid = $name,
- $rndfile = '/var/lib/stunnel4/.rnd',
- $debuglevel = '4' ) {
+#
+# example hiera yaml:
+#
+# stunnel:
+# clients:
+# ednp_clients:
+# thrips_9002:
+# accept_port: 4001
+# connect: thrips.demo.bitmask.i
+# connect_port: 19002
+# epmd_clients:
+# thrips_4369:
+# accept_port: 4000
+# connect: thrips.demo.bitmask.i
+# connect_port: 14369
+#
+# In the above example, this resource definition is called twice, with $name
+# 'ednp_clients' and 'epmd_clients'
+#
- stunnel::service { $name:
- accept => "127.0.0.1:${accept_port}",
- connect => "${connect}:${connect_port}",
- client => $client,
- cafile => $cafile,
- key => $key,
- cert => $cert,
- verify => $verify,
- pid => "/var/run/stunnel4/${pid}.pid",
- rndfile => $rndfile,
- debuglevel => $debuglevel,
- subscribe => [
- Class['Site_config::X509::Key'],
- Class['Site_config::X509::Cert'],
- Class['Site_config::X509::Ca'] ];
-
- }
-
- include site_check_mk::agent::stunnel
+define site_stunnel::clients {
+ create_resources(site_stunnel::client, $site_stunnel::clients[$name])
}
diff --git a/puppet/modules/site_stunnel/manifests/init.pp b/puppet/modules/site_stunnel/manifests/init.pp
index c7d6acc6..b292f1cd 100644
--- a/puppet/modules/site_stunnel/manifests/init.pp
+++ b/puppet/modules/site_stunnel/manifests/init.pp
@@ -1,3 +1,8 @@
+#
+# If you need something to happen after stunnel is started,
+# you can depend on Service['stunnel'] or Class['site_stunnel']
+#
+
class site_stunnel {
# include the generic stunnel module
@@ -13,5 +18,15 @@ class site_stunnel {
ensure => absent;
}
}
+
+ $stunnel = hiera('stunnel')
+
+ # add server stunnels
+ create_resources(site_stunnel::servers, $stunnel['servers'])
+
+ # add client stunnels
+ $clients = $stunnel['clients']
+ $client_sections = keys($clients)
+ site_stunnel::clients { $client_sections: }
}
diff --git a/puppet/modules/site_stunnel/manifests/servers.pp b/puppet/modules/site_stunnel/manifests/servers.pp
new file mode 100644
index 00000000..b1da5c59
--- /dev/null
+++ b/puppet/modules/site_stunnel/manifests/servers.pp
@@ -0,0 +1,50 @@
+#
+# example hiera yaml:
+#
+# stunnel:
+# servers:
+# couch_server:
+# accept_port: 15984
+# connect_port: 5984
+#
+
+define site_stunnel::servers (
+ $accept_port,
+ $connect_port,
+ $verify = '2',
+ $pid = $name,
+ $rndfile = '/var/lib/stunnel4/.rnd',
+ $debuglevel = '4' ) {
+
+ include site_config::x509::cert
+ include site_config::x509::key
+ include site_config::x509::ca
+ include x509::variables
+ $ca_path = "${x509::variables::local_CAs}/${site_config::params::ca_name}.crt"
+ $cert_path = "${x509::variables::certs}/${site_config::params::cert_name}.crt"
+ $key_path = "${x509::variables::keys}/${site_config::params::cert_name}.key"
+
+ stunnel::service { $name:
+ accept => $accept_port,
+ connect => "127.0.0.1:${connect_port}",
+ client => false,
+ cafile => $ca_path,
+ key => $key_path,
+ cert => $cert_path,
+ verify => $verify,
+ pid => "/var/run/stunnel4/${pid}.pid",
+ rndfile => '/var/lib/stunnel4/.rnd',
+ debuglevel => $debuglevel,
+ require => [
+ Class['Site_config::X509::Key'],
+ Class['Site_config::X509::Cert'],
+ Class['Site_config::X509::Ca'] ];
+ }
+
+ # allow incoming connections on $accept_port
+ site_shorewall::stunnel::server { $name:
+ port => $accept_port
+ }
+
+ include site_check_mk::agent::stunnel
+}
diff --git a/puppet/modules/site_webapp/manifests/couchdb.pp b/puppet/modules/site_webapp/manifests/couchdb.pp
index ff743fba..3ae4d266 100644
--- a/puppet/modules/site_webapp/manifests/couchdb.pp
+++ b/puppet/modules/site_webapp/manifests/couchdb.pp
@@ -7,10 +7,6 @@ class site_webapp::couchdb {
$couchdb_webapp_user = $webapp['couchdb_webapp_user']['username']
$couchdb_webapp_password = $webapp['couchdb_webapp_user']['password']
- $stunnel = hiera('stunnel')
- $couch_client = $stunnel['couch_client']
- $couch_client_connect = $couch_client['connect']
-
include x509::variables
file {
@@ -37,14 +33,4 @@ class site_webapp::couchdb {
}
include site_stunnel
-
- $couchdb_stunnel_client_defaults = {
- 'connect_port' => $couch_client_connect,
- 'client' => true,
- 'cafile' => "${x509::variables::local_CAs}/${site_config::params::ca_name}.crt",
- 'key' => "${x509::variables::keys}/${site_config::params::cert_name}.key",
- 'cert' => "${x509::variables::certs}/${site_config::params::cert_name}.crt",
- }
-
- create_resources(site_stunnel::clients, $couch_client, $couchdb_stunnel_client_defaults)
}
diff --git a/puppet/modules/tapicero/manifests/init.pp b/puppet/modules/tapicero/manifests/init.pp
index af1a96ac..fd8c1344 100644
--- a/puppet/modules/tapicero/manifests/init.pp
+++ b/puppet/modules/tapicero/manifests/init.pp
@@ -12,6 +12,8 @@ class tapicero {
$couchdb_soledad_user = $couchdb_users['soledad']['username']
$couchdb_leap_mx_user = $couchdb_users['leap_mx']['username']
+ $couchdb_mode = $couchdb['mode']
+ $couchdb_replication = $couchdb['replication']
Class['site_config::default'] -> Class['tapicero']
diff --git a/puppet/modules/tapicero/templates/tapicero.yaml.erb b/puppet/modules/tapicero/templates/tapicero.yaml.erb
index 8e19b22f..182a6aa6 100644
--- a/puppet/modules/tapicero/templates/tapicero.yaml.erb
+++ b/puppet/modules/tapicero/templates/tapicero.yaml.erb
@@ -24,6 +24,8 @@ log_level: info
options:
# prefix for per user databases:
db_prefix: "user-"
+ mode: <%= @couchdb_mode %>
+ replication: <%= @couchdb_replication %>
# security settings to be used for the per user databases
security:
@@ -40,3 +42,4 @@ options:
- <%= @couchdb_leap_mx_user %>
roles: []
+