summaryrefslogtreecommitdiff
diff options
context:
space:
mode:
-rwxr-xr-xbin/puppet_command55
-rwxr-xr-xbin/run_tests35
-rw-r--r--platform.rb2
-rw-r--r--provider_base/common.json10
-rw-r--r--provider_base/lib/macros.rb14
-rw-r--r--provider_base/lib/macros/core.rb91
-rw-r--r--provider_base/lib/macros/files.rb79
-rw-r--r--provider_base/lib/macros/haproxy.rb73
-rw-r--r--provider_base/lib/macros/hosts.rb63
-rw-r--r--provider_base/lib/macros/nodes.rb88
-rw-r--r--provider_base/lib/macros/secrets.rb39
-rw-r--r--provider_base/lib/macros/stunnel.rb95
-rw-r--r--provider_base/services/_couchdb_master.json8
-rw-r--r--provider_base/services/_couchdb_mirror.json21
-rw-r--r--provider_base/services/_couchdb_multimaster.json24
-rw-r--r--provider_base/services/couchdb.json24
-rw-r--r--provider_base/services/couchdb.rb60
-rw-r--r--provider_base/services/monitor.json6
-rw-r--r--provider_base/services/mx.json15
-rw-r--r--provider_base/services/obfsproxy.json9
-rw-r--r--provider_base/services/openvpn.json7
-rw-r--r--provider_base/services/webapp.json23
-rw-r--r--puppet/manifests/site.pp5
m---------puppet/modules/couchdb0
-rwxr-xr-xpuppet/modules/obfsproxy/files/obfsproxy_init93
-rw-r--r--puppet/modules/obfsproxy/files/obfsproxy_logrotate14
-rw-r--r--puppet/modules/obfsproxy/manifests/init.pp86
-rw-r--r--puppet/modules/obfsproxy/templates/etc_conf.erb11
-rw-r--r--puppet/modules/site_apt/manifests/preferences/obfsproxy.pp9
-rw-r--r--puppet/modules/site_couchdb/manifests/add_users.pp12
-rw-r--r--puppet/modules/site_couchdb/manifests/bigcouch.pp34
-rw-r--r--puppet/modules/site_couchdb/manifests/bigcouch/add_nodes.pp2
-rw-r--r--puppet/modules/site_couchdb/manifests/create_dbs.pp21
-rw-r--r--puppet/modules/site_couchdb/manifests/init.pp154
-rw-r--r--puppet/modules/site_couchdb/manifests/master.pp9
-rw-r--r--puppet/modules/site_couchdb/manifests/mirror.pp77
-rw-r--r--puppet/modules/site_couchdb/manifests/setup.pp46
-rw-r--r--puppet/modules/site_couchdb/manifests/stunnel.pp112
-rw-r--r--puppet/modules/site_haproxy/manifests/init.pp42
-rw-r--r--puppet/modules/site_haproxy/templates/couch.erb32
-rw-r--r--puppet/modules/site_haproxy/templates/haproxy.cfg.erb11
-rw-r--r--puppet/modules/site_haproxy/templates/haproxy_couchdb.cfg.erb23
-rw-r--r--puppet/modules/site_mx/manifests/couchdb.pp23
-rw-r--r--puppet/modules/site_mx/manifests/init.pp2
-rw-r--r--puppet/modules/site_obfsproxy/README0
-rw-r--r--puppet/modules/site_obfsproxy/manifests/init.pp39
-rw-r--r--puppet/modules/site_shorewall/manifests/couchdb.pp24
-rw-r--r--puppet/modules/site_shorewall/manifests/couchdb/bigcouch.pp51
-rw-r--r--puppet/modules/site_shorewall/manifests/couchdb/dnat.pp21
-rw-r--r--puppet/modules/site_shorewall/manifests/obfsproxy.pp24
-rw-r--r--puppet/modules/site_shorewall/manifests/stunnel/client.pp40
-rw-r--r--puppet/modules/site_shorewall/manifests/stunnel/server.pp22
-rw-r--r--puppet/modules/site_stunnel/manifests/client.pp52
-rw-r--r--puppet/modules/site_stunnel/manifests/clients.pp52
-rw-r--r--puppet/modules/site_stunnel/manifests/init.pp15
-rw-r--r--puppet/modules/site_stunnel/manifests/servers.pp50
-rw-r--r--puppet/modules/site_webapp/manifests/couchdb.pp14
-rw-r--r--puppet/modules/site_webapp/manifests/init.pp4
-rw-r--r--puppet/modules/site_webapp/templates/config.yml.erb7
-rw-r--r--puppet/modules/tapicero/manifests/init.pp2
-rw-r--r--puppet/modules/tapicero/templates/tapicero.yaml.erb12
-rw-r--r--tests/white-box/couchdb.rb66
-rw-r--r--tests/white-box/network.rb33
-rw-r--r--tests/white-box/webapp.rb24
64 files changed, 1676 insertions, 535 deletions
diff --git a/bin/puppet_command b/bin/puppet_command
index a6cd5a69..a9d39066 100755
--- a/bin/puppet_command
+++ b/bin/puppet_command
@@ -7,12 +7,15 @@
# (exit codes, lockfile, multiple manifests, etc)
#
+require 'pty'
+require 'yaml'
+
PUPPET_BIN = '/usr/bin/puppet'
PUPPET_DIRECTORY = '/srv/leap'
PUPPET_PARAMETERS = '--color=false --detailed-exitcodes --libdir=puppet/lib --confdir=puppet'
SITE_MANIFEST = 'puppet/manifests/site.pp'
-SETUP_MANIFEST = 'puppet/manifests/setup.pp'
DEFAULT_TAGS = 'leap_base,leap_service'
+HIERA_FILE = '/etc/leap/hiera.yaml'
def main
process_command_line_arguments
@@ -54,21 +57,37 @@ def apply
end
def set_hostname
- exit_code = puppet_apply(:manifest => SETUP_MANIFEST, :tags => '') do |line|
- # todo: replace setup.pp with https://github.com/lutter/ruby-augeas
- # or try this: http://www.puppetcookbook.com/posts/override-a-facter-fact.html
- if (line !~ /Finished catalog run/ || @verbosity > 2) &&
- (line !~ /dnsdomainname: Name or service not known/) &&
- (line !~ /warning: Could not retrieve fact fqdn/)
- puts line
+ unless File.exists?(HIERA_FILE)
+ puts("ERROR: Cannot set hostname without #{HIERA_FILE}")
+ exit(1)
+ end
+ hostname = YAML.load_file(HIERA_FILE)['name']
+ if hostname.nil? || hostname.empty?
+ puts('ERROR: NAME argument required')
+ exit(1)
+ end
+ current_hostname_file = File.read('/etc/hostname') rescue nil
+ current_hostname = `/bin/hostname`.strip
+
+ # set /etc/hostname
+ if current_hostname_file != hostname
+ File.open('/etc/hostname', 'w', 0611, :encoding => 'ascii') do |f|
+ f.write hostname
+ end
+ if File.read('/etc/hostname') == hostname
+ puts "Set /etc/hostname to #{hostname}"
+ else
+ puts "ERROR: failed to update /etc/hostname"
end
end
- if exit_code == 2
- puts "Hostname updated."
- elsif exit_code == 4 || exit_code == 6
- puts "ERROR: could not update hostname."
- elsif exit_code == 0 && @verbosity > 1
- puts "No change to hostname."
+
+ # call /bin/hostname
+ if current_hostname != hostname
+ if run("/bin/hostname #{hostname}") == 0
+ puts "Set hostname to #{hostname}"
+ else
+ puts "ERROR: failed to call `/bin/hostname #{hostname}`"
+ end
end
end
@@ -157,24 +176,18 @@ end
## this only works under ruby 1.9
##
-require "pty"
-
def run(cmd)
puts cmd if @verbosity >= 3
PTY.spawn("#{cmd}") do |output, input, pid|
begin
while line = output.gets do
yield line
- #$stdout.puts line
- #$stdout.flush
end
rescue Errno::EIO
end
Process.wait(pid) # only works in ruby 1.9, required to capture the exit status.
end
- status = $?.exitstatus
- #yield status if block_given?
- return status
+ return $?.exitstatus
rescue PTY::ChildExited
end
diff --git a/bin/run_tests b/bin/run_tests
index 526aa83a..2ee027f4 100755
--- a/bin/run_tests
+++ b/bin/run_tests
@@ -127,11 +127,22 @@ class LeapTest < MiniTest::Unit::TestCase
if params
uri.query = URI.encode_www_form(params)
end
- response = Net::HTTP.get_response(uri)
- if response.is_a?(Net::HTTPSuccess)
- yield response.body, response, nil
- else
- yield nil, response, nil
+ http = Net::HTTP.new uri.host, uri.port
+ if uri.scheme == 'https'
+ http.verify_mode = OpenSSL::SSL::VERIFY_NONE
+ http.use_ssl = true
+ end
+ http.start do |agent|
+ request = Net::HTTP::Get.new uri.request_uri
+ if uri.user
+ request.basic_auth uri.user, uri.password
+ end
+ response = agent.request(request)
+ if response.is_a?(Net::HTTPSuccess)
+ yield response.body, response, nil
+ else
+ yield nil, response, nil
+ end
end
rescue => exc
yield nil, nil, exc
@@ -151,6 +162,20 @@ class LeapTest < MiniTest::Unit::TestCase
end
#
+ # only a warning for now, should be a failure in the future
+ #
+ def assert_auth_fail(url, params)
+ uri = URI(url)
+ get(url, params) do |body, response, error|
+ unless response.code.to_s == "401"
+ warn "Expected a '401 Unauthorized' response, but got #{response.code} instead (GET #{uri.request_uri} with username '#{uri.user}')."
+ return false
+ end
+ end
+ true
+ end
+
+ #
# test if a socket can be connected to
#
diff --git a/platform.rb b/platform.rb
index 01797282..270dd25a 100644
--- a/platform.rb
+++ b/platform.rb
@@ -5,7 +5,7 @@
Leap::Platform.define do
self.version = "0.5.4.1"
- self.compatible_cli = "1.5.5".."1.5.7"
+ self.compatible_cli = "1.5.8".."1.99"
#
# the facter facts that should be gathered
diff --git a/provider_base/common.json b/provider_base/common.json
index a4d9c5f2..87af2152 100644
--- a/provider_base/common.json
+++ b/provider_base/common.json
@@ -25,9 +25,13 @@
"hosts": "=> hosts_file",
"x509": {
"use": true,
+ "use_commercial": false,
"cert": "= x509.use ? file(:node_x509_cert, :missing => 'x509 certificate for node $node. Run `leap cert update`') : nil",
"key": "= x509.use ? file(:node_x509_key, :missing => 'x509 key for node $node. Run `leap cert update`') : nil",
- "ca_cert": "= try_file :ca_cert"
+ "ca_cert": "= try_file :ca_cert",
+ "commercial_cert": "= x509.use_commercial ? file([:commercial_cert, try{webapp.domain}||domain.full_suffix], :missing => 'commercial x509 certificate for node $node. Add file $file, or run `leap cert csr` to generate a temporary self-signed cert and CSR you can use to purchase a real cert.') : nil",
+ "commercial_key": "= x509.use_commercial ? file([:commercial_key, try{webapp.domain}||domain.full_suffix], :missing => 'commercial x509 certificate for node $node. Add file $file, or run `leap cert csr` to generate a temporary self-signed cert and CSR you can use to purchase a real cert.') : nil",
+ "commercial_ca_cert": "= x509.use_commercial ? try_file(:commercial_ca_cert) : nil"
},
"service_type": "internal_service",
"development": {
@@ -38,5 +42,9 @@
"enabled": true,
"mail": {
"smarthost": "= nodes_like_me[:services => :mx].exclude(self).field('domain.full')"
+ },
+ "stunnel": {
+ "clients": {},
+ "servers": {}
}
}
diff --git a/provider_base/lib/macros.rb b/provider_base/lib/macros.rb
new file mode 100644
index 00000000..854b92b5
--- /dev/null
+++ b/provider_base/lib/macros.rb
@@ -0,0 +1,14 @@
+#
+# MACROS
+#
+# The methods in these files are available in the context of a .json configuration file.
+# (The module LeapCli::Macro is included in Config::Object)
+#
+
+require_relative 'macros/core'
+require_relative 'macros/files'
+require_relative 'macros/haproxy'
+require_relative 'macros/hosts'
+require_relative 'macros/nodes'
+require_relative 'macros/secrets'
+require_relative 'macros/stunnel'
diff --git a/provider_base/lib/macros/core.rb b/provider_base/lib/macros/core.rb
new file mode 100644
index 00000000..2ab2e71b
--- /dev/null
+++ b/provider_base/lib/macros/core.rb
@@ -0,0 +1,91 @@
+# encoding: utf-8
+
+module LeapCli
+ module Macro
+
+ #
+ # return a fingerprint for a x509 certificate
+ #
+ def fingerprint(filename)
+ "SHA256: " + X509.fingerprint("SHA256", Path.named_path(filename))
+ end
+
+ #
+ # Creates a hash from the ssh key info in users directory, for use in
+ # updating authorized_keys file. Additionally, the 'monitor' public key is
+ # included, which is used by the monitor nodes to run particular commands
+ # remotely.
+ #
+ def authorized_keys
+ hash = {}
+ keys = Dir.glob(Path.named_path([:user_ssh, '*']))
+ keys.sort.each do |keyfile|
+ ssh_type, ssh_key = File.read(keyfile, :encoding => 'UTF-8').strip.split(" ")
+ name = File.basename(File.dirname(keyfile))
+ hash[name] = {
+ "type" => ssh_type,
+ "key" => ssh_key
+ }
+ end
+ ssh_type, ssh_key = File.read(Path.named_path(:monitor_pub_key), :encoding => 'UTF-8').strip.split(" ")
+ hash[Leap::Platform.monitor_username] = {
+ "type" => ssh_type,
+ "key" => ssh_key
+ }
+ hash
+ end
+
+ def assert(assertion)
+ if instance_eval(assertion)
+ true
+ else
+ raise AssertionFailed.new(assertion), assertion, caller
+ end
+ end
+
+ def error(msg)
+ raise ConfigError.new(@node, msg), msg, caller
+ end
+
+ #
+ # applies a JSON partial to this node
+ #
+ def apply_partial(partial_path)
+ manager.partials(partial_path).each do |partial_data|
+ self.deep_merge!(partial_data)
+ end
+ end
+
+ #
+ # If at first you don't succeed, then it is time to give up.
+ #
+ # try{} returns nil if anything in the block throws an exception.
+ #
+ # You can wrap something that might fail in `try`, like so.
+ #
+ # "= try{ nodes[:services => 'tor'].first.ip_address } "
+ #
+ def try(&block)
+ yield
+ rescue NoMethodError
+ rescue ArgumentError
+ nil
+ end
+
+ protected
+
+ #
+ # returns a node list, if argument is not already one
+ #
+ def listify(node_list)
+ if node_list.is_a? Config::ObjectList
+ node_list
+ elsif node_list.is_a? Config::Object
+ Config::ObjectList.new(node_list)
+ else
+ raise ArgumentError, 'argument must be a node or node list, not a `%s`' % node_list.class, caller
+ end
+ end
+
+ end
+end
diff --git a/provider_base/lib/macros/files.rb b/provider_base/lib/macros/files.rb
new file mode 100644
index 00000000..0a491325
--- /dev/null
+++ b/provider_base/lib/macros/files.rb
@@ -0,0 +1,79 @@
+# encoding: utf-8
+
+##
+## FILES
+##
+
+module LeapCli
+ module Macro
+
+ #
+ # inserts the contents of a file
+ #
+ def file(filename, options={})
+ if filename.is_a? Symbol
+ filename = [filename, @node.name]
+ end
+ filepath = Path.find_file(filename)
+ if filepath
+ if filepath =~ /\.erb$/
+ ERB.new(File.read(filepath, :encoding => 'UTF-8'), nil, '%<>').result(binding)
+ else
+ File.read(filepath, :encoding => 'UTF-8')
+ end
+ else
+ raise FileMissing.new(Path.named_path(filename), options)
+ ""
+ end
+ end
+
+ #
+ # like #file, but allow missing files
+ #
+ def try_file(filename)
+ return file(filename)
+ rescue FileMissing
+ return nil
+ end
+
+ #
+ # returns what the file path will be, once the file is rsynced to the server.
+ # an internal list of discovered file paths is saved, in order to rsync these files when needed.
+ #
+ # notes:
+ #
+ # * argument 'path' is relative to Path.provider/files or Path.provider_base/files
+ # * the path returned by this method is absolute
+ # * the path stored for use later by rsync is relative to Path.provider
+ # * if the path does not exist locally, but exists in provider_base, then the default file from
+ # provider_base is copied locally. this is required for rsync to work correctly.
+ #
+ def file_path(path)
+ if path.is_a? Symbol
+ path = [path, @node.name]
+ end
+ actual_path = Path.find_file(path)
+ if actual_path.nil?
+ Util::log 2, :skipping, "file_path(\"#{path}\") because there is no such file."
+ nil
+ else
+ if actual_path =~ /^#{Regexp.escape(Path.provider_base)}/
+ # if file is under Path.provider_base, we must copy the default file to
+ # to Path.provider in order for rsync to be able to sync the file.
+ local_provider_path = actual_path.sub(/^#{Regexp.escape(Path.provider_base)}/, Path.provider)
+ FileUtils.mkdir_p File.dirname(local_provider_path), :mode => 0700
+ FileUtils.install actual_path, local_provider_path, :mode => 0600
+ Util.log :created, Path.relative_path(local_provider_path)
+ actual_path = local_provider_path
+ end
+ if File.directory?(actual_path) && actual_path !~ /\/$/
+ actual_path += '/' # ensure directories end with /, important for building rsync command
+ end
+ relative_path = Path.relative_path(actual_path)
+ @node.file_paths << relative_path
+ @node.manager.provider.hiera_sync_destination + '/' + relative_path
+ end
+ end
+
+ end
+end \ No newline at end of file
diff --git a/provider_base/lib/macros/haproxy.rb b/provider_base/lib/macros/haproxy.rb
new file mode 100644
index 00000000..602ae726
--- /dev/null
+++ b/provider_base/lib/macros/haproxy.rb
@@ -0,0 +1,73 @@
+# encoding: utf-8
+
+##
+## HAPROXY
+##
+
+module LeapCli
+ module Macro
+
+ #
+ # creates a hash suitable for configuring haproxy. the key is the node name of the server we are proxying to.
+ #
+ # * node_list - a hash of nodes for the haproxy servers
+ # * stunnel_client - contains the mappings to local ports for each server node.
+ # * non_stunnel_port - in case self is included in node_list, the port to connect to.
+ #
+ # 1000 weight is used for nodes in the same location.
+ # 100 otherwise.
+ #
+ def haproxy_servers(node_list, stunnel_clients, non_stunnel_port=nil)
+ default_weight = 10
+ local_weight = 100
+
+ # record the hosts_file
+ hostnames(node_list)
+
+ # create a simple map for node name -> local stunnel accept port
+ accept_ports = stunnel_clients.inject({}) do |hsh, stunnel_entry|
+ name = stunnel_entry.first.sub /_[0-9]+$/, ''
+ hsh[name] = stunnel_entry.last['accept_port']
+ hsh
+ end
+
+ # if one the nodes in the node list is ourself, then there will not be a stunnel to it,
+ # but we need to include it anyway in the haproxy config.
+ if node_list[self.name] && non_stunnel_port
+ accept_ports[self.name] = non_stunnel_port
+ end
+
+ # create the first pass of the servers hash
+ servers = node_list.values.inject(Config::ObjectList.new) do |hsh, node|
+ # make sure we have a port to talk to
+ unless accept_ports[node.name]
+ error "haproxy needs a local port to talk to when connecting to #{node.name}"
+ end
+ weight = default_weight
+ try {
+ weight = local_weight if self.location.name == node.location.name
+ }
+ hsh[node.name] = Config::Object[
+ 'backup', false,
+ 'host', 'localhost',
+ 'port', accept_ports[node.name],
+ 'weight', weight
+ ]
+ if node.services.include?('couchdb')
+ hsh[node.name]['writable'] = node.couch.mode != 'mirror'
+ end
+ hsh
+ end
+
+ # if there are some local servers, make the others backup
+ if servers.detect{|k,v| v.weight == local_weight}
+ servers.each do |k,server|
+ server['backup'] = server['weight'] == default_weight
+ end
+ end
+
+ return servers
+ end
+
+ end
+end
diff --git a/provider_base/lib/macros/hosts.rb b/provider_base/lib/macros/hosts.rb
new file mode 100644
index 00000000..8a4058a5
--- /dev/null
+++ b/provider_base/lib/macros/hosts.rb
@@ -0,0 +1,63 @@
+# encoding: utf-8
+
+module LeapCli
+ module Macro
+
+ ##
+ ## HOSTS
+ ##
+
+ #
+ # records the list of hosts that are encountered for this node
+ #
+ def hostnames(nodes)
+ @referenced_nodes ||= Config::ObjectList.new
+ nodes = listify(nodes)
+ nodes.each_node do |node|
+ @referenced_nodes[node.name] ||= node
+ end
+ return nodes.values.collect {|node| node.domain.name}
+ end
+
+ #
+ # Generates entries needed for updating /etc/hosts on a node (as a hash).
+ #
+ # Argument `nodes` can be nil or a list of nodes. If nil, only include the
+ # IPs of the other nodes this @node as has encountered (plus all mx nodes).
+ #
+ # Also, for virtual machines, we use the local address if this @node is in
+ # the same location as the node in question.
+ #
+ # We include the ssh public key for each host, so that the hash can also
+ # be used to generate the /etc/ssh/known_hosts
+ #
+ def hosts_file(nodes=nil)
+ if nodes.nil?
+ if @referenced_nodes && @referenced_nodes.any?
+ nodes = @referenced_nodes
+ nodes = nodes.merge(nodes_like_me[:services => 'mx']) # all nodes always need to communicate with mx nodes.
+ end
+ end
+ return {} unless nodes
+ hosts = {}
+ my_location = @node['location'] ? @node['location']['name'] : nil
+ nodes.each_node do |node|
+ hosts[node.name] = {'ip_address' => node.ip_address, 'domain_internal' => node.domain.internal, 'domain_full' => node.domain.full}
+ node_location = node['location'] ? node['location']['name'] : nil
+ if my_location == node_location
+ if facts = @node.manager.facts[node.name]
+ if facts['ec2_public_ipv4']
+ hosts[node.name]['ip_address'] = facts['ec2_public_ipv4']
+ end
+ end
+ end
+ host_pub_key = Util::read_file([:node_ssh_pub_key,node.name])
+ if host_pub_key
+ hosts[node.name]['host_pub_key'] = host_pub_key
+ end
+ end
+ hosts
+ end
+
+ end
+end \ No newline at end of file
diff --git a/provider_base/lib/macros/nodes.rb b/provider_base/lib/macros/nodes.rb
new file mode 100644
index 00000000..0c6668a0
--- /dev/null
+++ b/provider_base/lib/macros/nodes.rb
@@ -0,0 +1,88 @@
+# encoding: utf-8
+
+##
+## node related macros
+##
+
+module LeapCli
+ module Macro
+
+ #
+ # the list of all the nodes
+ #
+ def nodes
+ global.nodes
+ end
+
+ #
+ # grab an environment appropriate provider
+ #
+ def provider
+ global.env(@node.environment).provider
+ end
+
+ #
+ # returns a list of nodes that match the same environment
+ #
+ # if @node.environment is not set, we return other nodes
+ # where environment is not set.
+ #
+ def nodes_like_me
+ nodes[:environment => @node.environment]
+ end
+
+ #
+ # returns a list of nodes that match the location name
+ # and environment of @node.
+ #
+ def nodes_near_me
+ if @node['location'] && @node['location']['name']
+ nodes_like_me['location.name' => @node.location.name]
+ else
+ nodes_like_me['location' => nil]
+ end
+ end
+
+ #
+ #
+ # picks a node out from the node list in such a way that:
+ #
+ # (1) which nodes picked which nodes is saved in secrets.json
+ # (2) when other nodes call this macro with the same node list, they are guaranteed to get a different node
+ # (3) if all the nodes in the pick_node list have been picked, remaining nodes are distributed randomly.
+ #
+ # if the node_list is empty, an exception is raised.
+ # if node_list size is 1, then that node is returned and nothing is
+ # memorized via the secrets.json file.
+ #
+ # `label` is needed to distinguish between pools of nodes for different purposes.
+ #
+ # TODO: more evenly balance after all the nodes have been picked.
+ #
+ def pick_node(label, node_list)
+ if node_list.any?
+ if node_list.size == 1
+ return node_list.values.first
+ else
+ secrets_key = "pick_node(:#{label},#{node_list.keys.sort.join(',')})"
+ secrets_value = @manager.secrets.retrieve(secrets_key, @node.environment) || {}
+ secrets_value[@node.name] ||= begin
+ node_to_pick = nil
+ node_list.each_node do |node|
+ next if secrets_value.values.include?(node.name)
+ node_to_pick = node.name
+ end
+ node_to_pick ||= secrets_value.values.shuffle.first # all picked already, so pick a random one.
+ node_to_pick
+ end
+ picked_node_name = secrets_value[@node.name]
+ @manager.secrets.set(secrets_key, secrets_value, @node.environment)
+ return node_list[picked_node_name]
+ end
+ else
+ raise ArgumentError.new('pick_node(node_list): node_list cannot be empty')
+ end
+ end
+
+ end
+end \ No newline at end of file
diff --git a/provider_base/lib/macros/secrets.rb b/provider_base/lib/macros/secrets.rb
new file mode 100644
index 00000000..51bf3971
--- /dev/null
+++ b/provider_base/lib/macros/secrets.rb
@@ -0,0 +1,39 @@
+# encoding: utf-8
+
+require 'base32'
+
+module LeapCli
+ module Macro
+
+ #
+ # inserts a named secret, generating it if needed.
+ #
+ # manager.export_secrets should be called later to capture any newly generated secrets.
+ #
+ # +length+ is the character length of the generated password.
+ #
+ def secret(name, length=32)
+ @manager.secrets.set(name, Util::Secret.generate(length), @node[:environment])
+ end
+
+ # inserts a base32 encoded secret
+ def base32_secret(name, length=20)
+ @manager.secrets.set(name, Base32.encode(Util::Secret.generate(length)), @node[:environment])
+ end
+
+ # Picks a random obfsproxy port from given range
+ def rand_range(name, range)
+ @manager.secrets.set(name, rand(range), @node[:environment])
+ end
+
+ #
+ # inserts an hexidecimal secret string, generating it if needed.
+ #
+ # +bit_length+ is the bits in the secret, (ie length of resulting hex string will be bit_length/4)
+ #
+ def hex_secret(name, bit_length=128)
+ @manager.secrets.set(name, Util::Secret.generate_hex(bit_length), @node[:environment])
+ end
+
+ end
+end \ No newline at end of file
diff --git a/provider_base/lib/macros/stunnel.rb b/provider_base/lib/macros/stunnel.rb
new file mode 100644
index 00000000..f16308c7
--- /dev/null
+++ b/provider_base/lib/macros/stunnel.rb
@@ -0,0 +1,95 @@
+##
+## STUNNEL
+##
+
+#
+# About stunnel
+# --------------------------
+#
+# The network looks like this:
+#
+# From the client's perspective:
+#
+# |------- stunnel client --------------| |---------- stunnel server -----------------------|
+# consumer app -> localhost:accept_port -> connect:connect_port -> ??
+#
+# From the server's perspective:
+#
+# |------- stunnel client --------------| |---------- stunnel server -----------------------|
+# ?? -> *:accept_port -> localhost:connect_port -> service
+#
+
+module LeapCli
+ module Macro
+
+ #
+ # stunnel configuration for the client side.
+ #
+ # +node_list+ is a ObjectList of nodes running stunnel servers.
+ #
+ # +port+ is the real port of the ultimate service running on the servers
+ # that the client wants to connect to.
+ #
+ # * accept_port is the port on localhost to which local clients
+ # can connect. it is auto generated serially.
+ #
+ # * connect_port is the port on the stunnel server to connect to.
+ # it is auto generated from the +port+ argument.
+ #
+ # generates an entry appropriate to be passed directly to
+ # create_resources(stunnel::service, hiera('..'), defaults)
+ #
+ # local ports are automatically generated, starting at 4000
+ # and incrementing in sorted order (by node name).
+ #
+ def stunnel_client(node_list, port, options={})
+ @next_stunnel_port ||= 4000
+ node_list = listify(node_list)
+ hostnames(node_list) # record the hosts
+ result = Config::ObjectList.new
+ node_list.each_node do |node|
+ if node.name != self.name || options[:include_self]
+ result["#{node.name}_#{port}"] = Config::Object[
+ 'accept_port', @next_stunnel_port,
+ 'connect', node.domain.internal,
+ 'connect_port', stunnel_port(port),
+ 'original_port', port
+ ]
+ @next_stunnel_port += 1
+ end
+ end
+ result
+ end
+
+ #
+ # generates a stunnel server entry.
+ #
+ # +port+ is the real port targeted service.
+ #
+ # * `accept_port` is the publicly bound port
+ # * `connect_port` is the port that the local service is running on.
+ #
+ def stunnel_server(port)
+ {
+ "accept_port" => stunnel_port(port),
+ "connect_port" => port
+ }
+ end
+
+ private
+
+ #
+ # maps a real port to a stunnel port (used as the connect_port in the client config
+ # and the accept_port in the server config)
+ #
+ def stunnel_port(port)
+ port = port.to_i
+ if port < 50000
+ return port + 10000
+ else
+ return port - 10000
+ end
+ end
+
+ end
+end \ No newline at end of file
diff --git a/provider_base/services/_couchdb_master.json b/provider_base/services/_couchdb_master.json
new file mode 100644
index 00000000..20c6f99b
--- /dev/null
+++ b/provider_base/services/_couchdb_master.json
@@ -0,0 +1,8 @@
+//
+// Applied to master couchdb node when there is a single master
+//
+{
+ "couch": {
+ "mode": "master"
+ }
+} \ No newline at end of file
diff --git a/provider_base/services/_couchdb_mirror.json b/provider_base/services/_couchdb_mirror.json
new file mode 100644
index 00000000..6a3402bd
--- /dev/null
+++ b/provider_base/services/_couchdb_mirror.json
@@ -0,0 +1,21 @@
+//
+// Applied to all non-master couchdb nodes
+//
+{
+ "stunnel": {
+ "clients": {
+ "couch_client": "= stunnel_client(nodes[couch.replication.masters.keys], couch.port)"
+ }
+ },
+ "couch": {
+ "mode": "mirror",
+ "replication": {
+ // for now, pick the first close one, or the first one.
+ // in the future, maybe use haproxy to balance among all the masters
+ "masters": "= try{pick_node(:couch_master,nodes_near_me['services' => 'couchdb']['couch.master' => true]).pick_fields('domain.internal', 'couch.port')} || try{pick_node(:couch_master,nodes_like_me['services' => 'couchdb']['couch.master' => true]).pick_fields('domain.internal', 'couch.port')}",
+ "username": "replication",
+ "password": "= secret :couch_replication_password",
+ "role": "replication"
+ }
+ }
+}
diff --git a/provider_base/services/_couchdb_multimaster.json b/provider_base/services/_couchdb_multimaster.json
new file mode 100644
index 00000000..8c433188
--- /dev/null
+++ b/provider_base/services/_couchdb_multimaster.json
@@ -0,0 +1,24 @@
+//
+// Only applied to master couchdb nodes when there are multiple masters
+//
+{
+ "stunnel": {
+ "servers": {
+ "epmd_server": "= stunnel_server(couch.bigcouch.epmd_port)",
+ "ednp_server": "= stunnel_server(couch.bigcouch.ednp_port)"
+ },
+ "clients": {
+ "epmd_clients": "= stunnel_client(nodes_like_me[:services => :couchdb], couch.bigcouch.epmd_port)",
+ "ednp_clients": "= stunnel_client(nodes_like_me[:services => :couchdb], couch.bigcouch.ednp_port)"
+ }
+ },
+ "couch": {
+ "mode": "multimaster",
+ "bigcouch": {
+ "epmd_port": 4369,
+ "ednp_port": 9002,
+ "cookie": "= secret :bigcouch_cookie",
+ "neighbors": "= nodes_like_me['services' => 'couchdb']['couch.master' => true].exclude(self).field('domain.full')"
+ }
+ }
+}
diff --git a/provider_base/services/couchdb.json b/provider_base/services/couchdb.json
index 5f1b5381..8b1386f8 100644
--- a/provider_base/services/couchdb.json
+++ b/provider_base/services/couchdb.json
@@ -3,20 +3,13 @@
"use": true
},
"stunnel": {
- "couch_server": "= stunnel_server(couch.port)",
- "epmd_server": "= stunnel_server(couch.bigcouch.epmd_port)",
- "epmd_clients": "= stunnel_client(nodes_like_me[:services => :couchdb], global.services[:couchdb].couch.bigcouch.epmd_port)",
- "ednp_server": "= stunnel_server(couch.bigcouch.ednp_port)",
- "ednp_clients": "= stunnel_client(nodes_like_me[:services => :couchdb], global.services[:couchdb].couch.bigcouch.ednp_port)"
+ "servers": {
+ "couch_server": "= stunnel_server(couch.port)"
+ }
},
"couch": {
+ "master": false,
"port": 5984,
- "bigcouch": {
- "epmd_port": 4369,
- "ednp_port": 9002,
- "cookie": "= secret :bigcouch_cookie",
- "neighbors": "= nodes_like_me[:services => :couchdb].exclude(self).field('domain.full')"
- },
"users": {
"admin": {
"username": "admin",
@@ -47,10 +40,15 @@
"username": "webapp",
"password": "= secret :couch_webapp_password",
"salt": "= hex_secret :couch_webapp_password_salt, 128"
+ },
+ "replication": {
+ "username": "replication",
+ "password": "= secret :couch_replication_password",
+ "salt": "= hex_secret :couch_replication_password_salt, 128"
}
},
- "webapp": {
- "nagios_test_pw": "= secret :nagios_test_password"
+ "webapp": {
+ "nagios_test_pw": "= secret :nagios_test_password"
}
}
}
diff --git a/provider_base/services/couchdb.rb b/provider_base/services/couchdb.rb
new file mode 100644
index 00000000..3bee3a67
--- /dev/null
+++ b/provider_base/services/couchdb.rb
@@ -0,0 +1,60 @@
+#######################################################################
+###
+### NOTE!
+###
+### Currently, mirrors do not work! The only thing that works is all
+### nodes multimaster or a single master.
+###
+#######################################################################
+#
+# custom logic for couchdb json resolution
+# ============================================
+#
+# There are three modes for a node:
+#
+# Multimaster
+# -----------
+#
+# Multimaster uses bigcouch (soon to use couchdb in replication mode
+# similar to bigcouch).
+#
+# Use "multimaster" mode when:
+#
+# * multiple nodes are marked couch.master
+# * OR no nodes are marked couch.master
+#
+# Master
+# ------
+#
+# Master uses plain couchdb that is readable and writable.
+#
+# Use "master" mode when:
+#
+# * Exactly one node, this one, is marked as master.
+#
+# Mirror
+# ------
+#
+# Mirror creates a read-only copy of the database. It uses plain coucdhb
+# with legacy couchdb replication (http based).
+#
+# This does not currently work, because http replication can't handle
+# the number of user databases.
+#
+# Use "mirror" mode when:
+#
+# * some nodes are marked couch.master
+# * AND this node is not a master
+#
+
+master_count = nodes_like_me['services' => 'couchdb']['couch.master' => true].size
+
+if master_count == 0
+ apply_partial 'services/_couchdb_multimaster.json'
+elsif couch.master && master_count > 1
+ apply_partial 'services/_couchdb_multimaster.json'
+elsif couch.master && master_count == 1
+ apply_partial 'services/_couchdb_master.json'
+else
+ apply_partial 'services/_couchdb_mirror.json'
+end
diff --git a/provider_base/services/monitor.json b/provider_base/services/monitor.json
index 03f6c6d1..c24724bf 100644
--- a/provider_base/services/monitor.json
+++ b/provider_base/services/monitor.json
@@ -12,11 +12,9 @@
},
"x509": {
"use": true,
+ "use_commercial": true,
"ca_cert": "= file :ca_cert, :missing => 'provider CA. Run `leap cert ca`'",
"client_ca_cert": "= file :client_ca_cert, :missing => 'Certificate Authority. Run `leap cert ca`'",
- "client_ca_key": "= file :client_ca_key, :missing => 'Certificate Authority. Run `leap cert ca`'",
- "commercial_cert": "= file [:commercial_cert, domain.full_suffix]",
- "commercial_key": "= file [:commercial_key, domain.full_suffix]",
- "commercial_ca_cert": "= try_file :commercial_ca_cert"
+ "client_ca_key": "= file :client_ca_key, :missing => 'Certificate Authority. Run `leap cert ca`'"
}
}
diff --git a/provider_base/services/mx.json b/provider_base/services/mx.json
index 731dee9a..11293ae8 100644
--- a/provider_base/services/mx.json
+++ b/provider_base/services/mx.json
@@ -1,9 +1,14 @@
{
"stunnel": {
- "couch_client": "= stunnel_client(nodes_like_me[:services => :couchdb], global.services[:couchdb].couch.port)"
+ "clients": {
+ "couch_client": "= stunnel_client(nodes_like_me[:services => :couchdb], global.services[:couchdb].couch.port)"
+ }
},
"haproxy": {
- "servers": "= haproxy_servers(nodes_like_me[:services => :couchdb], stunnel.couch_client)"
+ "couch": {
+ "listen_port": 4096,
+ "servers": "= haproxy_servers(nodes_like_me[:services => :couchdb], stunnel.clients.couch_client, global.services[:couchdb].couch.port)"
+ }
},
"couchdb_leap_mx_user": {
"username": "= global.services[:couchdb].couch.users[:leap_mx].username",
@@ -13,12 +18,10 @@
"mynetworks": "= nodes['environment' => '!local'].map{|name, n| [n.ip_address, (global.facts[name]||{})['ec2_public_ipv4']]}.flatten.compact.uniq",
"x509": {
"use": true,
+ "use_commercial": true,
"ca_cert": "= file :ca_cert, :missing => 'provider CA. Run `leap cert ca`'",
"client_ca_cert": "= file :client_ca_cert, :missing => 'Certificate Authority. Run `leap cert ca`'",
- "client_ca_key": "= file :client_ca_key, :missing => 'Certificate Authority. Run `leap cert ca`'",
- "commercial_cert": "= file [:commercial_cert, domain.full_suffix]",
- "commercial_key": "= file [:commercial_key, domain.full_suffix]",
- "commercial_ca_cert": "= try_file :commercial_ca_cert"
+ "client_ca_key": "= file :client_ca_key, :missing => 'Certificate Authority. Run `leap cert ca`'"
},
"service_type": "user_service"
}
diff --git a/provider_base/services/obfsproxy.json b/provider_base/services/obfsproxy.json
new file mode 100644
index 00000000..979d0ef9
--- /dev/null
+++ b/provider_base/services/obfsproxy.json
@@ -0,0 +1,9 @@
+{
+ "obfsproxy": {
+ "scramblesuit": {
+ "password": "= base32_secret('scramblesuit_password_'+name)",
+ "port" : "= rand_range('scramblesuit_port_'+name, 18000..32000)"
+ },
+ "gateway_address": "= try{pick_node(:obfs_gateway,nodes_near_me['services' => 'openvpn']).pick_fields('openvpn.gateway_address')} || try{pick_node(:obfs_gateway,nodes_like_me['services' => 'openvpn']).pick_fields('openvpn.gateway_address')}"
+ }
+}
diff --git a/provider_base/services/openvpn.json b/provider_base/services/openvpn.json
index 090afcd6..1906244c 100644
--- a/provider_base/services/openvpn.json
+++ b/provider_base/services/openvpn.json
@@ -26,5 +26,12 @@
"keepalive": "10 30",
"tun-ipv6": true
}
+ },
+ "obfsproxy": {
+ "scramblesuit": {
+ "password": "= base32_secret('scramblesuit_password_'+name)",
+ "port" : "= rand_range('scramblesuit_port_'+name, 18000..32000)"
+ },
+ "gateway_address": "= openvpn.gateway_address"
}
}
diff --git a/provider_base/services/webapp.json b/provider_base/services/webapp.json
index bbb52094..3af0dade 100644
--- a/provider_base/services/webapp.json
+++ b/provider_base/services/webapp.json
@@ -13,6 +13,7 @@
"allow_limited_certs": "= provider.service.allow_limited_bandwidth",
"allow_unlimited_certs": "= provider.service.allow_unlimited_bandwidth",
"allow_anonymous_certs": "= provider.service.allow_anonymous",
+ "allow_registration": "= provider.service.allow_registration",
"default_service_level": "= provider.service.default_service_level",
"service_levels": "= provider.service.levels",
"secret_token": "= secret :webapp_secret_token",
@@ -26,13 +27,21 @@
"nagios_test_user": {
"username": "nagios_test",
"password": "= secret :nagios_test_password"
- }
+ },
+ "engines": [
+ "support"
+ ]
},
"stunnel": {
- "couch_client": "= stunnel_client(nodes_like_me[:services => :couchdb], global.services[:couchdb].couch.port)"
+ "clients": {
+ "couch_client": "= stunnel_client(nodes_like_me[:services => :couchdb], global.services[:couchdb].couch.port)"
+ }
},
"haproxy": {
- "servers": "= haproxy_servers(nodes_like_me[:services => :couchdb], stunnel.couch_client, global.services[:couchdb].couch.port)"
+ "couch": {
+ "listen_port": 4096,
+ "servers": "= haproxy_servers(nodes_like_me[:services => :couchdb], stunnel.clients.couch_client, global.services[:couchdb].couch.port)"
+ }
},
"definition_files": {
"provider": "= file :provider_json_template",
@@ -59,11 +68,9 @@
},
"x509": {
"use": true,
+ "use_commercial": true,
"ca_cert": "= file :ca_cert, :missing => 'provider CA. Run `leap cert ca`'",
- "client_ca_cert": "= file :client_ca_cert, :missing => 'Certificate Authority. Run `leap cert ca`'",
- "client_ca_key": "= file :client_ca_key, :missing => 'Certificate Authority. Run `leap cert ca`'",
- "commercial_cert": "= file [:commercial_cert, webapp.domain]",
- "commercial_key": "= file [:commercial_key, webapp.domain]",
- "commercial_ca_cert": "= try_file :commercial_ca_cert"
+ "client_ca_cert": "= file :client_ca_cert, :missing => 'Certificate Authority. Run `leap cert ca`.'",
+ "client_ca_key": "= file :client_ca_key, :missing => 'Certificate Authority. Run `leap cert ca`.'"
}
}
diff --git a/puppet/manifests/site.pp b/puppet/manifests/site.pp
index 9afa5dfd..57942d99 100644
--- a/puppet/manifests/site.pp
+++ b/puppet/manifests/site.pp
@@ -10,6 +10,7 @@ notice("Services for ${fqdn}: ${services_str}")
if member($services, 'openvpn') {
include site_openvpn
+ include site_obfsproxy
}
if member($services, 'couchdb') {
@@ -42,4 +43,8 @@ if member($services, 'static') {
include site_static
}
+if member($services, 'obfsproxy') {
+ include site_obfsproxy
+}
+
include site_config::packages::uninstall
diff --git a/puppet/modules/couchdb b/puppet/modules/couchdb
-Subproject c8f5443e0998d3d3d43505ff5a6fdf8c438d6c2
+Subproject f01b3586215bdc10f0067fa0f6d940be8e88bce
diff --git a/puppet/modules/obfsproxy/files/obfsproxy_init b/puppet/modules/obfsproxy/files/obfsproxy_init
new file mode 100755
index 00000000..01c8013a
--- /dev/null
+++ b/puppet/modules/obfsproxy/files/obfsproxy_init
@@ -0,0 +1,93 @@
+#!/bin/sh
+
+### BEGIN INIT INFO
+# Provides: obfsproxy daemon
+# Required-Start: $remote_fs $syslog
+# Required-Stop: $remote_fs $syslog
+# Default-Start: 2 3 4 5
+# Default-Stop: 0 1 6
+# Short-Description: obfsproxy daemon
+# Description: obfsproxy daemon
+### END INIT INFO
+
+. /lib/lsb/init-functions
+
+DAEMON=/usr/bin/obfsproxy
+NAME=obfsproxy
+DESC="obfsproxy daemon"
+USER=obfsproxy
+DATDIR=/etc/obfsproxy
+PIDFILE=/var/run/obfsproxy.pid
+CONF=$DATDIR/obfsproxy.conf
+LOGFILE=/var/log/obfsproxy.log
+
+# If the daemon is not there, then exit.
+test -x $DAEMON || exit 0
+
+if [ -f $CONF ] ; then
+ . $CONF
+else
+ echo "Obfsproxy configuration file is missing, aborting..."
+ exit 2
+fi
+
+DAEMONARGS=" --log-min-severity=$LOG --log-file=$LOGFILE --data-dir=$DATDIR \
+ $TRANSPORT $PARAM --dest=$DEST_IP:$DEST_PORT server $BINDADDR:$PORT"
+
+start_obfsproxy() {
+ start-stop-daemon --start --quiet --oknodo -m --pidfile $PIDFILE \
+ -b -c $USER --startas $DAEMON --$DAEMONARGS
+}
+
+stop_obfsproxy() {
+ start-stop-daemon --stop --quiet --oknodo --pidfile $PIDFILE
+}
+
+status_obfsproxy() {
+ status_of_proc -p $PIDFILE $DAEMON $NAME
+}
+
+case $1 in
+ start)
+ if [ -e $PIDFILE ]; then
+ status_obfsproxy
+ if [ $? = "0" ]; then
+ exit
+ fi
+ fi
+ log_begin_msg "Starting $DESC"
+ start_obfsproxy
+ log_end_msg $?
+ ;;
+ stop)
+ if [ -e $PIDFILE ]; then
+ status_obfsproxy
+ if [ $? = "0" ]; then
+ log_begin_msg "Stopping $DESC"
+ stop_obfsproxy
+ rm -f $PIDFILE
+ log_end_msg $?
+ fi
+ else
+ status_obfsproxy
+ fi
+ ;;
+ restart)
+ $0 stop && sleep 2 && $0 start
+ ;;
+ status)
+ status_obfsproxy
+ ;;
+ reload)
+ if [ -e $PIDFILE ]; then
+ start-stop-daemon --stop --signal USR1 --quiet --pidfile $PIDFILE --name $NAME
+ log_success_msg "$DESC reloaded successfully"
+ else
+ log_failure_msg "$PIDFILE does not exist"
+ fi
+ ;;
+ *)
+ echo "Usage: $0 {start|stop|restart|reload|status}"
+ exit 2
+ ;;
+esac
diff --git a/puppet/modules/obfsproxy/files/obfsproxy_logrotate b/puppet/modules/obfsproxy/files/obfsproxy_logrotate
new file mode 100644
index 00000000..e5679d0c
--- /dev/null
+++ b/puppet/modules/obfsproxy/files/obfsproxy_logrotate
@@ -0,0 +1,14 @@
+/var/log/obfsproxy.log {
+ daily
+ missingok
+ rotate 3
+ compress
+ delaycompress
+ notifempty
+ create 600 obfsproxy obfsproxy
+ postrotate
+ if [ -f /var/run/obfsproxy.pid ]; then
+ /etc/init.d/obfsproxy restart > /dev/null
+ fi
+ endscript
+}
diff --git a/puppet/modules/obfsproxy/manifests/init.pp b/puppet/modules/obfsproxy/manifests/init.pp
new file mode 100644
index 00000000..61714fdf
--- /dev/null
+++ b/puppet/modules/obfsproxy/manifests/init.pp
@@ -0,0 +1,86 @@
+class obfsproxy (
+ $transport,
+ $bind_address,
+ $port,
+ $param,
+ $dest_ip,
+ $dest_port,
+ $log_level = 'info'
+){
+
+ $user = 'obfsproxy'
+ $conf = '/etc/obfsproxy/obfsproxy.conf'
+
+ user { $user:
+ ensure => present,
+ system => true,
+ gid => $user,
+ }
+
+ group { $user:
+ ensure => present,
+ system => true,
+ }
+
+ file { '/etc/init.d/obfsproxy':
+ path => '/etc/init.d/obfsproxy',
+ ensure => present,
+ source => 'puppet:///modules/obfsproxy/obfsproxy_init',
+ owner => 'root',
+ group => 'root',
+ mode => '0750',
+ require => File[$conf],
+ }
+
+ file { $conf :
+ path => $conf,
+ ensure => present,
+ owner => 'root',
+ group => 'root',
+ mode => '0600',
+ content => template('obfsproxy/etc_conf.erb'),
+ }
+
+ file { '/etc/obfsproxy':
+ ensure => directory,
+ owner => $user,
+ group => $user,
+ mode => '0700',
+ require => User[$user],
+ }
+
+ file { '/var/log/obfsproxy.log':
+ ensure => present,
+ owner => $user,
+ group => $user,
+ mode => '0640',
+ require => User[$user],
+ }
+
+ file { '/etc/logrotate.d/obfsproxy':
+ ensure => present,
+ source => 'puppet:///modules/obfsproxy/obfsproxy_logrotate',
+ owner => 'root',
+ group => 'root',
+ mode => '0644',
+ require => File['/var/log/obfsproxy.log'],
+ }
+
+ package { 'obfsproxy':
+ ensure => present,
+ require => Class['site_apt::preferences::obfsproxy'],
+ }
+
+ service { 'obfsproxy':
+ ensure => running,
+ subscribe => File[$conf],
+ require => [
+ Package['obfsproxy'],
+ File['/etc/init.d/obfsproxy'],
+ User[$user],
+ Group[$user]]
+ }
+
+
+}
+
diff --git a/puppet/modules/obfsproxy/templates/etc_conf.erb b/puppet/modules/obfsproxy/templates/etc_conf.erb
new file mode 100644
index 00000000..8959ef78
--- /dev/null
+++ b/puppet/modules/obfsproxy/templates/etc_conf.erb
@@ -0,0 +1,11 @@
+TRANSPORT=<%= @transport %>
+PORT=<%= @port %>
+DEST_IP=<%= @dest_ip %>
+DEST_PORT=<%= @dest_port %>
+<% if @transport == "scramblesuit" -%>
+PARAM=--password=<%= @param %>
+<% else -%>
+PARAM=<%= @param %>
+<% end -%>
+LOG=<%= @log_level %>
+BINDADDR=<%= @bind_address %>
diff --git a/puppet/modules/site_apt/manifests/preferences/obfsproxy.pp b/puppet/modules/site_apt/manifests/preferences/obfsproxy.pp
new file mode 100644
index 00000000..75b01956
--- /dev/null
+++ b/puppet/modules/site_apt/manifests/preferences/obfsproxy.pp
@@ -0,0 +1,9 @@
+class site_apt::preferences::obfsproxy {
+
+ apt::preferences_snippet { 'obfsproxy':
+ package => 'obfsproxy',
+ release => 'wheezy-backports',
+ priority => 999;
+ }
+
+}
diff --git a/puppet/modules/site_couchdb/manifests/add_users.pp b/puppet/modules/site_couchdb/manifests/add_users.pp
index f9ea7349..2f734ed4 100644
--- a/puppet/modules/site_couchdb/manifests/add_users.pp
+++ b/puppet/modules/site_couchdb/manifests/add_users.pp
@@ -1,5 +1,8 @@
class site_couchdb::add_users {
+ Class['site_couchdb::create_dbs']
+ -> Class['site_couchdb::add_users']
+
# Couchdb users
## leap_mx couchdb user
@@ -51,4 +54,13 @@ class site_couchdb::add_users {
require => Couchdb::Query::Setup['localhost']
}
+ ## replication couchdb user
+ ## read/write: all databases for replication
+ couchdb::add_user { $site_couchdb::couchdb_replication_user:
+ roles => '["replication"]',
+ pw => $site_couchdb::couchdb_replication_pw,
+ salt => $site_couchdb::couchdb_replication_salt,
+ require => Couchdb::Query::Setup['localhost']
+ }
+
}
diff --git a/puppet/modules/site_couchdb/manifests/bigcouch.pp b/puppet/modules/site_couchdb/manifests/bigcouch.pp
new file mode 100644
index 00000000..f0aab734
--- /dev/null
+++ b/puppet/modules/site_couchdb/manifests/bigcouch.pp
@@ -0,0 +1,34 @@
+class site_couchdb::bigcouch {
+
+ $config = $couchdb_config['bigcouch']
+ $cookie = $config['cookie']
+ $ednp_port = $config['ednp_port']
+
+ class { 'couchdb':
+ admin_pw => $couchdb_admin_pw,
+ admin_salt => $couchdb_admin_salt,
+ bigcouch => true,
+ bigcouch_cookie => $cookie,
+ ednp_port => $ednp_port,
+ chttpd_bind_address => '127.0.0.1'
+ }
+
+ #
+ # stunnel must running correctly before bigcouch dbs can be set up.
+ #
+ Class['site_config::default']
+ -> Class['couchdb::bigcouch::package::cloudant']
+ -> Service['shorewall']
+ -> Service['stunnel']
+ -> Class['site_couchdb::setup']
+ -> Class['site_couchdb::bigcouch::add_nodes']
+ -> Class['site_couchdb::bigcouch::settle_cluster']
+
+ include site_couchdb::bigcouch::add_nodes
+ include site_couchdb::bigcouch::settle_cluster
+ include site_couchdb::bigcouch::compaction
+
+ file { '/var/log/bigcouch':
+ ensure => directory
+ }
+}
diff --git a/puppet/modules/site_couchdb/manifests/bigcouch/add_nodes.pp b/puppet/modules/site_couchdb/manifests/bigcouch/add_nodes.pp
index 97e85785..c8c43275 100644
--- a/puppet/modules/site_couchdb/manifests/bigcouch/add_nodes.pp
+++ b/puppet/modules/site_couchdb/manifests/bigcouch/add_nodes.pp
@@ -1,6 +1,6 @@
class site_couchdb::bigcouch::add_nodes {
# loop through neighbors array and add nodes
- $nodes = $::site_couchdb::bigcouch_config['neighbors']
+ $nodes = $::site_couchdb::bigcouch::config['neighbors']
couchdb::bigcouch::add_node { $nodes:
require => Couchdb::Query::Setup['localhost']
diff --git a/puppet/modules/site_couchdb/manifests/create_dbs.pp b/puppet/modules/site_couchdb/manifests/create_dbs.pp
index 41500d3a..4322f773 100644
--- a/puppet/modules/site_couchdb/manifests/create_dbs.pp
+++ b/puppet/modules/site_couchdb/manifests/create_dbs.pp
@@ -1,11 +1,14 @@
class site_couchdb::create_dbs {
+ Class['site_couchdb::setup']
+ -> Class['site_couchdb::create_dbs']
+
# Couchdb databases
### customer database
### r/w: webapp,
couchdb::create_db { 'customers':
- members => "{ \"names\": [\"$site_couchdb::couchdb_webapp_user\"], \"roles\": [] }",
+ members => "{ \"names\": [\"$site_couchdb::couchdb_webapp_user\"], \"roles\": [\"replication\"] }",
require => Couchdb::Query::Setup['localhost']
}
@@ -13,35 +16,35 @@ class site_couchdb::create_dbs {
## r: nickserver, leap_mx - needs to be restrict with design document
## r/w: webapp
couchdb::create_db { 'identities':
- members => "{ \"names\": [], \"roles\": [\"identities\"] }",
+ members => "{ \"names\": [], \"roles\": [\"replication\", \"identities\"] }",
require => Couchdb::Query::Setup['localhost']
}
## keycache database
## r/w: nickserver
couchdb::create_db { 'keycache':
- members => "{ \"names\": [], \"roles\": [\"keycache\"] }",
+ members => "{ \"names\": [], \"roles\": [\"replication\", \"keycache\"] }",
require => Couchdb::Query::Setup['localhost']
}
## sessions database
## r/w: webapp
couchdb::create_db { 'sessions':
- members => "{ \"names\": [\"$site_couchdb::couchdb_webapp_user\"], \"roles\": [] }",
+ members => "{ \"names\": [\"$site_couchdb::couchdb_webapp_user\"], \"roles\": [\"replication\"] }",
require => Couchdb::Query::Setup['localhost']
}
## shared database
## r/w: soledad
couchdb::create_db { 'shared':
- members => "{ \"names\": [\"$site_couchdb::couchdb_soledad_user\"], \"roles\": [] }",
+ members => "{ \"names\": [\"$site_couchdb::couchdb_soledad_user\"], \"roles\": [\"replication\"] }",
require => Couchdb::Query::Setup['localhost']
}
## tickets database
## r/w: webapp
couchdb::create_db { 'tickets':
- members => "{ \"names\": [\"$site_couchdb::couchdb_webapp_user\"], \"roles\": [] }",
+ members => "{ \"names\": [\"$site_couchdb::couchdb_webapp_user\"], \"roles\": [\"replication\"] }",
require => Couchdb::Query::Setup['localhost']
}
@@ -49,14 +52,14 @@ class site_couchdb::create_dbs {
## r: soledad - needs to be restricted with a design document
## r/w: webapp
couchdb::create_db { 'tokens':
- members => "{ \"names\": [], \"roles\": [\"tokens\"] }",
+ members => "{ \"names\": [], \"roles\": [\"replication\", \"tokens\"] }",
require => Couchdb::Query::Setup['localhost']
}
## users database
## r/w: webapp
couchdb::create_db { 'users':
- members => "{ \"names\": [], \"roles\": [\"users\"] }",
+ members => "{ \"names\": [], \"roles\": [\"replication\", \"users\"] }",
require => Couchdb::Query::Setup['localhost']
}
@@ -64,7 +67,7 @@ class site_couchdb::create_dbs {
## store messages to the clients such as payment reminders
## r/w: webapp
couchdb::create_db { 'messages':
- members => "{ \"names\": [\"$site_couchdb::couchdb_webapp_user\"], \"roles\": [] }",
+ members => "{ \"names\": [\"$site_couchdb::couchdb_webapp_user\"], \"roles\": [\"replication\"] }",
require => Couchdb::Query::Setup['localhost']
}
}
diff --git a/puppet/modules/site_couchdb/manifests/init.pp b/puppet/modules/site_couchdb/manifests/init.pp
index 3614661d..5a4fb936 100644
--- a/puppet/modules/site_couchdb/manifests/init.pp
+++ b/puppet/modules/site_couchdb/manifests/init.pp
@@ -1,118 +1,68 @@
class site_couchdb {
tag 'leap_service'
- $couchdb_config = hiera('couch')
- $couchdb_users = $couchdb_config['users']
-
- $couchdb_admin = $couchdb_users['admin']
- $couchdb_admin_user = $couchdb_admin['username']
- $couchdb_admin_pw = $couchdb_admin['password']
- $couchdb_admin_salt = $couchdb_admin['salt']
-
- $couchdb_leap_mx = $couchdb_users['leap_mx']
- $couchdb_leap_mx_user = $couchdb_leap_mx['username']
- $couchdb_leap_mx_pw = $couchdb_leap_mx['password']
- $couchdb_leap_mx_salt = $couchdb_leap_mx['salt']
-
- $couchdb_nickserver = $couchdb_users['nickserver']
- $couchdb_nickserver_user = $couchdb_nickserver['username']
- $couchdb_nickserver_pw = $couchdb_nickserver['password']
- $couchdb_nickserver_salt = $couchdb_nickserver['salt']
-
- $couchdb_soledad = $couchdb_users['soledad']
- $couchdb_soledad_user = $couchdb_soledad['username']
- $couchdb_soledad_pw = $couchdb_soledad['password']
- $couchdb_soledad_salt = $couchdb_soledad['salt']
-
- $couchdb_tapicero = $couchdb_users['tapicero']
- $couchdb_tapicero_user = $couchdb_tapicero['username']
- $couchdb_tapicero_pw = $couchdb_tapicero['password']
- $couchdb_tapicero_salt = $couchdb_tapicero['salt']
-
- $couchdb_webapp = $couchdb_users['webapp']
- $couchdb_webapp_user = $couchdb_webapp['username']
- $couchdb_webapp_pw = $couchdb_webapp['password']
- $couchdb_webapp_salt = $couchdb_webapp['salt']
-
- $couchdb_backup = $couchdb_config['backup']
-
- $bigcouch_config = $couchdb_config['bigcouch']
- $bigcouch_cookie = $bigcouch_config['cookie']
-
- $ednp_port = $bigcouch_config['ednp_port']
-
- class { 'couchdb':
- bigcouch => true,
- admin_pw => $couchdb_admin_pw,
- admin_salt => $couchdb_admin_salt,
- bigcouch_cookie => $bigcouch_cookie,
- ednp_port => $ednp_port,
- chttpd_bind_address => '127.0.0.1'
- }
-
- # ensure that we don't have leftovers from previous installations
- # where we installed the cloudant bigcouch package
- # https://leap.se/code/issues/4971
- class { 'couchdb::bigcouch::package::cloudant':
- ensure => absent
- }
+ $couchdb_config = hiera('couch')
+ $couchdb_users = $couchdb_config['users']
+
+ $couchdb_admin = $couchdb_users['admin']
+ $couchdb_admin_user = $couchdb_admin['username']
+ $couchdb_admin_pw = $couchdb_admin['password']
+ $couchdb_admin_salt = $couchdb_admin['salt']
+
+ $couchdb_leap_mx = $couchdb_users['leap_mx']
+ $couchdb_leap_mx_user = $couchdb_leap_mx['username']
+ $couchdb_leap_mx_pw = $couchdb_leap_mx['password']
+ $couchdb_leap_mx_salt = $couchdb_leap_mx['salt']
+
+ $couchdb_nickserver = $couchdb_users['nickserver']
+ $couchdb_nickserver_user = $couchdb_nickserver['username']
+ $couchdb_nickserver_pw = $couchdb_nickserver['password']
+ $couchdb_nickserver_salt = $couchdb_nickserver['salt']
+
+ $couchdb_soledad = $couchdb_users['soledad']
+ $couchdb_soledad_user = $couchdb_soledad['username']
+ $couchdb_soledad_pw = $couchdb_soledad['password']
+ $couchdb_soledad_salt = $couchdb_soledad['salt']
+
+ $couchdb_tapicero = $couchdb_users['tapicero']
+ $couchdb_tapicero_user = $couchdb_tapicero['username']
+ $couchdb_tapicero_pw = $couchdb_tapicero['password']
+ $couchdb_tapicero_salt = $couchdb_tapicero['salt']
+
+ $couchdb_webapp = $couchdb_users['webapp']
+ $couchdb_webapp_user = $couchdb_webapp['username']
+ $couchdb_webapp_pw = $couchdb_webapp['password']
+ $couchdb_webapp_salt = $couchdb_webapp['salt']
+
+ $couchdb_replication = $couchdb_users['replication']
+ $couchdb_replication_user = $couchdb_replication['username']
+ $couchdb_replication_pw = $couchdb_replication['password']
+ $couchdb_replication_salt = $couchdb_replication['salt']
+
+ $couchdb_backup = $couchdb_config['backup']
+ $couchdb_mode = $couchdb_config['mode']
+
+ if $couchdb_mode == "multimaster" { include site_couchdb::bigcouch }
+ if $couchdb_mode == "master" { include site_couchdb::master }
+ if $couchdb_mode == "mirror" { include site_couchdb::mirror }
Class['site_config::default']
- -> Class['couchdb::bigcouch::package::cloudant']
-> Service['shorewall']
- -> Class['site_couchdb::stunnel']
- -> Service['couchdb']
- -> File['/root/.netrc']
- -> Class['site_couchdb::bigcouch::add_nodes']
- -> Class['site_couchdb::bigcouch::settle_cluster']
- -> Class['site_couchdb::create_dbs']
- -> Class['site_couchdb::add_users']
-
- # /etc/couchdb/couchdb.netrc is deployed by couchdb::query::setup
- # we symlink this to /root/.netrc for couchdb_scripts (eg. backup)
- # and makes life easier for the admin (i.e. using curl/wget without
- # passing credentials)
- file {
- '/root/.netrc':
- ensure => link,
- target => '/etc/couchdb/couchdb.netrc';
-
- '/srv/leap/couchdb':
- ensure => directory
- }
-
- couchdb::query::setup { 'localhost':
- user => $couchdb_admin_user,
- pw => $couchdb_admin_pw,
- }
-
- vcsrepo { '/srv/leap/couchdb/scripts':
- ensure => present,
- provider => git,
- source => 'https://leap.se/git/couchdb_scripts',
- revision => 'origin/master',
- require => File['/srv/leap/couchdb']
- }
-
- include site_couchdb::stunnel
- include site_couchdb::bigcouch::add_nodes
- include site_couchdb::bigcouch::settle_cluster
+ -> Service['stunnel']
+ -> Class['couchdb']
+ -> Class['site_couchdb::setup']
+
+ include site_stunnel
+
+ include site_couchdb::setup
include site_couchdb::create_dbs
include site_couchdb::add_users
include site_couchdb::designs
include site_couchdb::logrotate
- include site_couchdb::bigcouch::compaction
- if $couchdb_backup { include site_couchdb::backup }
-
- include site_shorewall::couchdb
- include site_shorewall::couchdb::bigcouch
+ if $couchdb_backup { include site_couchdb::backup }
include site_check_mk::agent::couchdb
include site_check_mk::agent::tapicero
- file { '/var/log/bigcouch':
- ensure => directory
- }
-
}
diff --git a/puppet/modules/site_couchdb/manifests/master.pp b/puppet/modules/site_couchdb/manifests/master.pp
new file mode 100644
index 00000000..a0a6633d
--- /dev/null
+++ b/puppet/modules/site_couchdb/manifests/master.pp
@@ -0,0 +1,9 @@
+class site_couchdb::master {
+
+ class { 'couchdb':
+ admin_pw => $site_couchdb::couchdb_admin_pw,
+ admin_salt => $site_couchdb::couchdb_admin_salt,
+ chttpd_bind_address => '127.0.0.1'
+ }
+
+} \ No newline at end of file
diff --git a/puppet/modules/site_couchdb/manifests/mirror.pp b/puppet/modules/site_couchdb/manifests/mirror.pp
new file mode 100644
index 00000000..abe35c4c
--- /dev/null
+++ b/puppet/modules/site_couchdb/manifests/mirror.pp
@@ -0,0 +1,77 @@
+class site_couchdb::mirror {
+
+ Class['site_couchdb::add_users']
+ -> Class['site_couchdb::mirror']
+
+ class { 'couchdb':
+ admin_pw => $site_couchdb::couchdb_admin_pw,
+ admin_salt => $site_couchdb::couchdb_admin_salt,
+ chttpd_bind_address => '127.0.0.1'
+ }
+
+ $masters = $site_couchdb::couchdb_config['replication']['masters']
+ $master_node_names = keys($site_couchdb::couchdb_config['replication']['masters'])
+ $master_node = $masters[$master_node_names[0]]
+ $user = $site_couchdb::couchdb_replication_user
+ $password = $site_couchdb::couchdb_replication_pw
+ $from_host = $master_node['domain_internal']
+ $from_port = $master_node['couch_port']
+ $from = "http://${user}:${password}@${from_host}:${from_port}"
+
+ notice("mirror from: ${from}")
+
+ ### customer database
+ couchdb::mirror_db { 'customers':
+ from => $from,
+ require => Couchdb::Query::Setup['localhost']
+ }
+
+ ## identities database
+ couchdb::mirror_db { 'identities':
+ from => $from,
+ require => Couchdb::Query::Setup['localhost']
+ }
+
+ ## keycache database
+ couchdb::mirror_db { 'keycache':
+ from => $from,
+ require => Couchdb::Query::Setup['localhost']
+ }
+
+ ## sessions database
+ couchdb::mirror_db { 'sessions':
+ from => $from,
+ require => Couchdb::Query::Setup['localhost']
+ }
+
+ ## shared database
+ couchdb::mirror_db { 'shared':
+ from => $from,
+ require => Couchdb::Query::Setup['localhost']
+ }
+
+ ## tickets database
+ couchdb::mirror_db { 'tickets':
+ from => $from,
+ require => Couchdb::Query::Setup['localhost']
+ }
+
+ ## tokens database
+ couchdb::mirror_db { 'tokens':
+ from => $from,
+ require => Couchdb::Query::Setup['localhost']
+ }
+
+ ## users database
+ couchdb::mirror_db { 'users':
+ from => $from,
+ require => Couchdb::Query::Setup['localhost']
+ }
+
+ ## messages db
+ couchdb::mirror_db { 'messages':
+ from => $from,
+ require => Couchdb::Query::Setup['localhost']
+ }
+
+}
diff --git a/puppet/modules/site_couchdb/manifests/setup.pp b/puppet/modules/site_couchdb/manifests/setup.pp
new file mode 100644
index 00000000..69bd1c6a
--- /dev/null
+++ b/puppet/modules/site_couchdb/manifests/setup.pp
@@ -0,0 +1,46 @@
+#
+# An initial setup class. All the other classes depend on this
+#
+class site_couchdb::setup {
+
+ # ensure that we don't have leftovers from previous installations
+ # where we installed the cloudant bigcouch package
+ # https://leap.se/code/issues/4971
+ class { 'couchdb::bigcouch::package::cloudant':
+ ensure => absent
+ }
+
+ $user = $site_couchdb::couchdb_admin_user
+
+ # /etc/couchdb/couchdb-admin.netrc is deployed by couchdb::query::setup
+ # we symlink to couchdb.netrc for puppet commands.
+ # we symlink this to /root/.netrc for couchdb_scripts (eg. backup)
+ # and makes life easier for the admin (i.e. using curl/wget without
+ # passing credentials)
+ file {
+ '/etc/couchdb/couchdb.netrc':
+ ensure => link,
+ target => "/etc/couchdb/couchdb-${user}.netrc";
+
+ '/root/.netrc':
+ ensure => link,
+ target => '/etc/couchdb/couchdb.netrc';
+
+ '/srv/leap/couchdb':
+ ensure => directory
+ }
+
+ couchdb::query::setup { 'localhost':
+ user => $user,
+ pw => $site_couchdb::couchdb_admin_pw,
+ }
+
+ vcsrepo { '/srv/leap/couchdb/scripts':
+ ensure => present,
+ provider => git,
+ source => 'https://leap.se/git/couchdb_scripts',
+ revision => 'origin/master',
+ require => File['/srv/leap/couchdb']
+ }
+
+}
diff --git a/puppet/modules/site_couchdb/manifests/stunnel.pp b/puppet/modules/site_couchdb/manifests/stunnel.pp
deleted file mode 100644
index 91f1e3aa..00000000
--- a/puppet/modules/site_couchdb/manifests/stunnel.pp
+++ /dev/null
@@ -1,112 +0,0 @@
-class site_couchdb::stunnel {
-
- $stunnel = hiera('stunnel')
-
- $couch_server = $stunnel['couch_server']
- $couch_server_accept = $couch_server['accept']
- $couch_server_connect = $couch_server['connect']
-
- # Erlang Port Mapper Daemon (epmd) stunnel server/clients
- $epmd_server = $stunnel['epmd_server']
- $epmd_server_accept = $epmd_server['accept']
- $epmd_server_connect = $epmd_server['connect']
- $epmd_clients = $stunnel['epmd_clients']
-
- # Erlang Distributed Node Protocol (ednp) stunnel server/clients
- $ednp_server = $stunnel['ednp_server']
- $ednp_server_accept = $ednp_server['accept']
- $ednp_server_connect = $ednp_server['connect']
- $ednp_clients = $stunnel['ednp_clients']
-
-
-
- include site_config::x509::cert
- include site_config::x509::key
- include site_config::x509::ca
-
- include x509::variables
- $ca_path = "${x509::variables::local_CAs}/${site_config::params::ca_name}.crt"
- $cert_path = "${x509::variables::certs}/${site_config::params::cert_name}.crt"
- $key_path = "${x509::variables::keys}/${site_config::params::cert_name}.key"
-
- # setup a stunnel server for the webapp to connect to couchdb
- stunnel::service { 'couch_server':
- accept => $couch_server_accept,
- connect => $couch_server_connect,
- client => false,
- cafile => $ca_path,
- key => $key_path,
- cert => $cert_path,
- verify => '2',
- pid => '/var/run/stunnel4/couchserver.pid',
- rndfile => '/var/lib/stunnel4/.rnd',
- debuglevel => '4',
- require => [
- Class['Site_config::X509::Key'],
- Class['Site_config::X509::Cert'],
- Class['Site_config::X509::Ca'] ];
- }
-
-
- # setup stunnel server for Erlang Port Mapper Daemon (epmd), necessary for
- # bigcouch clustering between each bigcouchdb node
- stunnel::service { 'epmd_server':
- accept => $epmd_server_accept,
- connect => $epmd_server_connect,
- client => false,
- cafile => $ca_path,
- key => $key_path,
- cert => $cert_path,
- verify => '2',
- pid => '/var/run/stunnel4/epmd_server.pid',
- rndfile => '/var/lib/stunnel4/.rnd',
- debuglevel => '4',
- require => [
- Class['Site_config::X509::Key'],
- Class['Site_config::X509::Cert'],
- Class['Site_config::X509::Ca'] ];
- }
-
- # setup stunnel clients for Erlang Port Mapper Daemon (epmd) to connect
- # to the above epmd stunnel server.
- $epmd_client_defaults = {
- 'client' => true,
- 'cafile' => $ca_path,
- 'key' => $key_path,
- 'cert' => $cert_path,
- }
-
- create_resources(site_stunnel::clients, $epmd_clients, $epmd_client_defaults)
-
- # setup stunnel server for Erlang Distributed Node Protocol (ednp), necessary
- # for bigcouch clustering between each bigcouchdb node
- stunnel::service { 'ednp_server':
- accept => $ednp_server_accept,
- connect => $ednp_server_connect,
- client => false,
- cafile => $ca_path,
- key => $key_path,
- cert => $cert_path,
- verify => '2',
- pid => '/var/run/stunnel4/ednp_server.pid',
- rndfile => '/var/lib/stunnel4/.rnd',
- debuglevel => '4',
- require => [
- Class['Site_config::X509::Key'],
- Class['Site_config::X509::Cert'],
- Class['Site_config::X509::Ca'] ];
- }
-
- # setup stunnel clients for Erlang Distributed Node Protocol (ednp) to connect
- # to the above ednp stunnel server.
- $ednp_client_defaults = {
- 'client' => true,
- 'cafile' => $ca_path,
- 'key' => $key_path,
- 'cert' => $cert_path,
- }
-
- create_resources(site_stunnel::clients, $ednp_clients, $ednp_client_defaults)
-
- include site_check_mk::agent::stunnel
-}
diff --git a/puppet/modules/site_haproxy/manifests/init.pp b/puppet/modules/site_haproxy/manifests/init.pp
index 6bcf3f5c..b28ce80e 100644
--- a/puppet/modules/site_haproxy/manifests/init.pp
+++ b/puppet/modules/site_haproxy/manifests/init.pp
@@ -2,25 +2,25 @@ class site_haproxy {
$haproxy = hiera('haproxy')
class { 'haproxy':
- enable => true,
- manage_service => true,
- global_options => {
- 'log' => '127.0.0.1 local0',
- 'maxconn' => '4096',
- 'stats' => 'socket /var/run/haproxy.sock user haproxy group haproxy',
- 'chroot' => '/usr/share/haproxy',
- 'user' => 'haproxy',
- 'group' => 'haproxy',
- 'daemon' => ''
- },
- defaults_options => {
- 'log' => 'global',
- 'retries' => '3',
- 'option' => 'redispatch',
- 'timeout connect' => '4000',
- 'timeout client' => '20000',
- 'timeout server' => '20000'
- }
+ enable => true,
+ manage_service => true,
+ global_options => {
+ 'log' => '127.0.0.1 local0',
+ 'maxconn' => '4096',
+ 'stats' => 'socket /var/run/haproxy.sock user haproxy group haproxy',
+ 'chroot' => '/usr/share/haproxy',
+ 'user' => 'haproxy',
+ 'group' => 'haproxy',
+ 'daemon' => ''
+ },
+ defaults_options => {
+ 'log' => 'global',
+ 'retries' => '3',
+ 'option' => 'redispatch',
+ 'timeout connect' => '4000',
+ 'timeout client' => '20000',
+ 'timeout server' => '20000'
+ }
}
# monitor haproxy
@@ -34,8 +34,8 @@ class site_haproxy {
concat::fragment { 'leap_haproxy_webapp_couchdb':
target => '/etc/haproxy/haproxy.cfg',
order => '20',
- content => template('site_haproxy/haproxy_couchdb.cfg.erb'),
+ content => template('site_haproxy/haproxy.cfg.erb'),
}
-
+
include site_check_mk::agent::haproxy
}
diff --git a/puppet/modules/site_haproxy/templates/couch.erb b/puppet/modules/site_haproxy/templates/couch.erb
new file mode 100644
index 00000000..f42e8368
--- /dev/null
+++ b/puppet/modules/site_haproxy/templates/couch.erb
@@ -0,0 +1,32 @@
+frontend couch
+ bind localhost:<%= @listen_port %>
+ mode http
+ option httplog
+ option dontlognull
+ option http-server-close # use client keep-alive, but close server connection.
+ use_backend couch_read if METH_GET
+ default_backend couch_write
+
+backend couch_write
+ mode http
+ balance roundrobin
+ option httpchk GET / # health check using simple get to root
+ option allbackups # balance among all backups, not just one.
+ default-server inter 3000 fastinter 1000 downinter 1000 rise 2 fall 1
+<%- @servers.sort.each do |name,server| -%>
+<%- next unless server['writable'] -%>
+ # <%=name%>
+ server couchdb_<%=server['port']%> <%=server['host']%>:<%=server['port']%> <%='backup' if server['backup']%> weight <%=server['weight']%> check
+<%- end -%>
+
+backend couch_read
+ mode http
+ balance roundrobin
+ option httpchk GET / # health check using simple get to root
+ option allbackups # balance among all backups, not just one.
+ default-server inter 3000 fastinter 1000 downinter 1000 rise 2 fall 1
+<%- @servers.sort.each do |name,server| -%>
+ # <%=name%>
+ server couchdb_<%=server['port']%> <%=server['host']%>:<%=server['port']%> <%='backup' if server['backup']%> weight <%=server['weight']%> check
+<%- end -%>
+
diff --git a/puppet/modules/site_haproxy/templates/haproxy.cfg.erb b/puppet/modules/site_haproxy/templates/haproxy.cfg.erb
new file mode 100644
index 00000000..8311b1a5
--- /dev/null
+++ b/puppet/modules/site_haproxy/templates/haproxy.cfg.erb
@@ -0,0 +1,11 @@
+<%- @haproxy.each do |frontend, options| -%>
+<%- if options['servers'] -%>
+
+##
+## <%= frontend %>
+##
+
+<%= scope.function_templatewlv(["site_haproxy/#{frontend}.erb", options]) %>
+<%- end -%>
+<%- end -%>
+
diff --git a/puppet/modules/site_haproxy/templates/haproxy_couchdb.cfg.erb b/puppet/modules/site_haproxy/templates/haproxy_couchdb.cfg.erb
deleted file mode 100644
index 1fa01b96..00000000
--- a/puppet/modules/site_haproxy/templates/haproxy_couchdb.cfg.erb
+++ /dev/null
@@ -1,23 +0,0 @@
-
-listen bigcouch-in
- mode http
- balance roundrobin
- option httplog
- option dontlognull
- option httpchk GET / # health check using simple get to root
- option http-server-close # use client keep-alive, but close server connection.
- option allbackups # balance among all backups, not just one.
-
- bind localhost:4096
-
- default-server inter 3000 fastinter 1000 downinter 1000 rise 2 fall 1
-
-<%- if @haproxy['servers'] -%>
-<%- @haproxy['servers'].sort.each do |name,server| -%>
-<%- backup = server['backup'] ? 'backup' : '' -%>
- # <%=name%>
- server couchdb_<%=server['port']%> <%=server['host']%>:<%=server['port']%> <%=backup%> weight <%=server['weight']%> check
-
-<%- end -%>
-<%- end -%>
-
diff --git a/puppet/modules/site_mx/manifests/couchdb.pp b/puppet/modules/site_mx/manifests/couchdb.pp
deleted file mode 100644
index b1f3bd02..00000000
--- a/puppet/modules/site_mx/manifests/couchdb.pp
+++ /dev/null
@@ -1,23 +0,0 @@
-class site_mx::couchdb {
-
- $stunnel = hiera('stunnel')
- $couch_client = $stunnel['couch_client']
- $couch_client_connect = $couch_client['connect']
-
- include x509::variables
- $ca_path = "${x509::variables::local_CAs}/${site_config::params::ca_name}.crt"
- $cert_path = "${x509::variables::certs}/${site_config::params::cert_name}.crt"
- $key_path = "${x509::variables::keys}/${site_config::params::cert_name}.key"
-
- include site_stunnel
-
- $couchdb_stunnel_client_defaults = {
- 'connect_port' => $couch_client_connect,
- 'client' => true,
- 'cafile' => $ca_path,
- 'key' => $key_path,
- 'cert' => $cert_path,
- }
-
- create_resources(site_stunnel::clients, $couch_client, $couchdb_stunnel_client_defaults)
-}
diff --git a/puppet/modules/site_mx/manifests/init.pp b/puppet/modules/site_mx/manifests/init.pp
index c3d38a46..91014ed6 100644
--- a/puppet/modules/site_mx/manifests/init.pp
+++ b/puppet/modules/site_mx/manifests/init.pp
@@ -8,12 +8,12 @@ class site_mx {
include site_config::x509::client_ca::ca
include site_config::x509::client_ca::key
+ include site_stunnel
include site_postfix::mx
include site_haproxy
include site_shorewall::mx
include site_shorewall::service::smtp
- include site_mx::couchdb
include leap_mx
include site_check_mk::agent::mx
}
diff --git a/puppet/modules/site_obfsproxy/README b/puppet/modules/site_obfsproxy/README
new file mode 100644
index 00000000..e69de29b
--- /dev/null
+++ b/puppet/modules/site_obfsproxy/README
diff --git a/puppet/modules/site_obfsproxy/manifests/init.pp b/puppet/modules/site_obfsproxy/manifests/init.pp
new file mode 100644
index 00000000..40b7fba8
--- /dev/null
+++ b/puppet/modules/site_obfsproxy/manifests/init.pp
@@ -0,0 +1,39 @@
+class site_obfsproxy {
+ tag 'leap_service'
+ Class['site_config::default'] -> Class['site_obfsproxy']
+
+ $transport = 'scramblesuit'
+
+ $obfsproxy = hiera('obfsproxy')
+ $scramblesuit = $obfsproxy['scramblesuit']
+ $scram_pass = $scramblesuit['password']
+ $scram_port = $scramblesuit['port']
+ $dest_ip = $obfsproxy['gateway_address']
+ $dest_port = '443'
+
+ if $::services =~ /\bopenvpn\b/ {
+ $openvpn = hiera('openvpn')
+ $bind_address = $openvpn['gateway_address']
+ }
+ elsif $::services =~ /\bobfsproxy\b/ {
+ $bind_address = hiera('ip_address')
+ }
+
+ include site_apt::preferences::twisted
+ include site_apt::preferences::obfsproxy
+
+ class { 'obfsproxy':
+ transport => $transport,
+ bind_address => $bind_address,
+ port => $scram_port,
+ param => $scram_pass,
+ dest_ip => $dest_ip,
+ dest_port => $dest_port,
+ }
+
+ include site_shorewall::obfsproxy
+
+}
+
+
+
diff --git a/puppet/modules/site_shorewall/manifests/couchdb.pp b/puppet/modules/site_shorewall/manifests/couchdb.pp
deleted file mode 100644
index 73bed62b..00000000
--- a/puppet/modules/site_shorewall/manifests/couchdb.pp
+++ /dev/null
@@ -1,24 +0,0 @@
-class site_shorewall::couchdb {
-
- include site_shorewall::defaults
-
- $stunnel = hiera('stunnel')
- $couch_server = $stunnel['couch_server']
- $couch_stunnel_port = $couch_server['accept']
-
- # define macro for incoming services
- file { '/etc/shorewall/macro.leap_couchdb':
- content => "PARAM - - tcp ${couch_stunnel_port}",
- notify => Service['shorewall'],
- require => Package['shorewall']
- }
-
- shorewall::rule {
- 'net2fw-couchdb':
- source => 'net',
- destination => '$FW',
- action => 'leap_couchdb(ACCEPT)',
- order => 200;
- }
-
-}
diff --git a/puppet/modules/site_shorewall/manifests/couchdb/bigcouch.pp b/puppet/modules/site_shorewall/manifests/couchdb/bigcouch.pp
deleted file mode 100644
index 20740650..00000000
--- a/puppet/modules/site_shorewall/manifests/couchdb/bigcouch.pp
+++ /dev/null
@@ -1,51 +0,0 @@
-class site_shorewall::couchdb::bigcouch {
-
- include site_shorewall::defaults
-
- $stunnel = hiera('stunnel')
-
- # Erlang Port Mapper Daemon (epmd) stunnel server/clients
- $epmd_clients = $stunnel['epmd_clients']
- $epmd_server = $stunnel['epmd_server']
- $epmd_server_port = $epmd_server['accept']
- $epmd_server_connect = $epmd_server['connect']
-
- # Erlang Distributed Node Protocol (ednp) stunnel server/clients
- $ednp_clients = $stunnel['ednp_clients']
- $ednp_server = $stunnel['ednp_server']
- $ednp_server_port = $ednp_server['accept']
- $ednp_server_connect = $ednp_server['connect']
-
- # define macro for incoming services
- file { '/etc/shorewall/macro.leap_bigcouch':
- content => "PARAM - - tcp ${epmd_server_port},${ednp_server_port}",
- notify => Service['shorewall'],
- require => Package['shorewall']
- }
-
- shorewall::rule {
- 'net2fw-bigcouch':
- source => 'net',
- destination => '$FW',
- action => 'leap_bigcouch(ACCEPT)',
- order => 300;
- }
-
- # setup DNAT rules for each epmd
- $epmd_shorewall_dnat_defaults = {
- 'source' => '$FW',
- 'proto' => 'tcp',
- 'destinationport' => regsubst($epmd_server_connect, '^([0-9.]+:)([0-9]+)$', '\2')
- }
- create_resources(site_shorewall::couchdb::dnat, $epmd_clients, $epmd_shorewall_dnat_defaults)
-
- # setup DNAT rules for each ednp
- $ednp_shorewall_dnat_defaults = {
- 'source' => '$FW',
- 'proto' => 'tcp',
- 'destinationport' => regsubst($ednp_server_connect, '^([0-9.]+:)([0-9]+)$', '\2')
- }
- create_resources(site_shorewall::couchdb::dnat, $ednp_clients, $ednp_shorewall_dnat_defaults)
-
-}
-
diff --git a/puppet/modules/site_shorewall/manifests/couchdb/dnat.pp b/puppet/modules/site_shorewall/manifests/couchdb/dnat.pp
deleted file mode 100644
index f1bc9acf..00000000
--- a/puppet/modules/site_shorewall/manifests/couchdb/dnat.pp
+++ /dev/null
@@ -1,21 +0,0 @@
-define site_shorewall::couchdb::dnat (
- $source,
- $connect,
- $connect_port,
- $accept_port,
- $proto,
- $destinationport )
-{
-
-
- shorewall::rule {
- "dnat_${name}_${destinationport}":
- action => 'DNAT',
- source => $source,
- destination => "\$FW:127.0.0.1:${accept_port}",
- proto => $proto,
- destinationport => $destinationport,
- originaldest => $connect,
- order => 200
- }
-}
diff --git a/puppet/modules/site_shorewall/manifests/obfsproxy.pp b/puppet/modules/site_shorewall/manifests/obfsproxy.pp
new file mode 100644
index 00000000..68fb9b9f
--- /dev/null
+++ b/puppet/modules/site_shorewall/manifests/obfsproxy.pp
@@ -0,0 +1,24 @@
+class site_shorewall::obfsproxy {
+
+ include site_shorewall::defaults
+
+ $obfsproxy = hiera('obfsproxy')
+ $scramblesuit = $obfsproxy['scramblesuit']
+ $scram_port = $scramblesuit['port']
+
+ # define macro for incoming services
+ file { '/etc/shorewall/macro.leap_obfsproxy':
+ content => "PARAM - - tcp $scram_port ",
+ notify => Service['shorewall'],
+ require => Package['shorewall']
+ }
+
+ shorewall::rule {
+ 'net2fw-obfs':
+ source => 'net',
+ destination => '$FW',
+ action => 'leap_obfsproxy(ACCEPT)',
+ order => 200;
+ }
+
+}
diff --git a/puppet/modules/site_shorewall/manifests/stunnel/client.pp b/puppet/modules/site_shorewall/manifests/stunnel/client.pp
new file mode 100644
index 00000000..9a89a244
--- /dev/null
+++ b/puppet/modules/site_shorewall/manifests/stunnel/client.pp
@@ -0,0 +1,40 @@
+#
+# Adds some firewall magic to the stunnel.
+#
+# Using DNAT, this firewall rule allow a locally running program
+# to try to connect to the normal remote IP and remote port of the
+# service on another machine, but have this connection magically
+# routed through the locally running stunnel client.
+#
+# The network looks like this:
+#
+# From the client's perspective:
+#
+# |------- stunnel client --------------| |---------- stunnel server -----------------------|
+# consumer app -> localhost:accept_port -> connect:connect_port -> localhost:original_port
+#
+# From the server's perspective:
+#
+# |------- stunnel client --------------| |---------- stunnel server -----------------------|
+# ?? -> *:accept_port -> localhost:connect_port -> service
+#
+
+define site_shorewall::stunnel::client(
+ $accept_port,
+ $connect,
+ $connect_port,
+ $original_port) {
+
+ include site_shorewall::defaults
+
+ shorewall::rule {
+ "stunnel_dnat_${name}":
+ action => 'DNAT',
+ source => '$FW',
+ destination => "\$FW:127.0.0.1:${accept_port}",
+ proto => 'tcp',
+ destinationport => $original_port,
+ originaldest => $connect,
+ order => 200
+ }
+}
diff --git a/puppet/modules/site_shorewall/manifests/stunnel/server.pp b/puppet/modules/site_shorewall/manifests/stunnel/server.pp
new file mode 100644
index 00000000..798cd631
--- /dev/null
+++ b/puppet/modules/site_shorewall/manifests/stunnel/server.pp
@@ -0,0 +1,22 @@
+#
+# Allow all incoming connections to stunnel server port
+#
+
+define site_shorewall::stunnel::server($port) {
+
+ include site_shorewall::defaults
+
+ file { "/etc/shorewall/macro.stunnel_server_${name}":
+ content => "PARAM - - tcp ${port}",
+ notify => Service['shorewall'],
+ require => Package['shorewall']
+ }
+ shorewall::rule {
+ "net2fw-stunnel-server-${name}":
+ source => 'net',
+ destination => '$FW',
+ action => "stunnel_server_${name}(ACCEPT)",
+ order => 200;
+ }
+
+} \ No newline at end of file
diff --git a/puppet/modules/site_stunnel/manifests/client.pp b/puppet/modules/site_stunnel/manifests/client.pp
new file mode 100644
index 00000000..12d664b4
--- /dev/null
+++ b/puppet/modules/site_stunnel/manifests/client.pp
@@ -0,0 +1,52 @@
+#
+# Sets up stunnel and firewall configuration for
+# a single stunnel client
+#
+# As a client, we accept connections on localhost,
+# and connect to a remote $connect:$connect_port
+#
+
+define site_stunnel::client (
+ $accept_port,
+ $connect_port,
+ $connect,
+ $original_port,
+ $verify = '2',
+ $pid = $name,
+ $rndfile = '/var/lib/stunnel4/.rnd',
+ $debuglevel = '4' ) {
+
+ include site_config::x509::cert
+ include site_config::x509::key
+ include site_config::x509::ca
+ include x509::variables
+ $ca_path = "${x509::variables::local_CAs}/${site_config::params::ca_name}.crt"
+ $cert_path = "${x509::variables::certs}/${site_config::params::cert_name}.crt"
+ $key_path = "${x509::variables::keys}/${site_config::params::cert_name}.key"
+
+ stunnel::service { $name:
+ accept => "127.0.0.1:${accept_port}",
+ connect => "${connect}:${connect_port}",
+ client => true,
+ cafile => $ca_path,
+ key => $key_path,
+ cert => $cert_path,
+ verify => $verify,
+ pid => "/var/run/stunnel4/${pid}.pid",
+ rndfile => $rndfile,
+ debuglevel => $debuglevel,
+ subscribe => [
+ Class['Site_config::X509::Key'],
+ Class['Site_config::X509::Cert'],
+ Class['Site_config::X509::Ca'] ];
+ }
+
+ site_shorewall::stunnel::client { $name:
+ accept_port => $accept_port,
+ connect => $connect,
+ connect_port => $connect_port,
+ original_port => $original_port
+ }
+
+ include site_check_mk::agent::stunnel
+}
diff --git a/puppet/modules/site_stunnel/manifests/clients.pp b/puppet/modules/site_stunnel/manifests/clients.pp
index b75c9ac3..c0958b5f 100644
--- a/puppet/modules/site_stunnel/manifests/clients.pp
+++ b/puppet/modules/site_stunnel/manifests/clients.pp
@@ -1,33 +1,23 @@
-define site_stunnel::clients (
- $accept_port,
- $connect_port,
- $connect,
- $cafile,
- $key,
- $cert,
- $client = true,
- $verify = '2',
- $pid = $name,
- $rndfile = '/var/lib/stunnel4/.rnd',
- $debuglevel = '4' ) {
+#
+# example hiera yaml:
+#
+# stunnel:
+# clients:
+# ednp_clients:
+# thrips_9002:
+# accept_port: 4001
+# connect: thrips.demo.bitmask.i
+# connect_port: 19002
+# epmd_clients:
+# thrips_4369:
+# accept_port: 4000
+# connect: thrips.demo.bitmask.i
+# connect_port: 14369
+#
+# In the above example, this resource definition is called twice, with $name
+# 'ednp_clients' and 'epmd_clients'
+#
- stunnel::service { $name:
- accept => "127.0.0.1:${accept_port}",
- connect => "${connect}:${connect_port}",
- client => $client,
- cafile => $cafile,
- key => $key,
- cert => $cert,
- verify => $verify,
- pid => "/var/run/stunnel4/${pid}.pid",
- rndfile => $rndfile,
- debuglevel => $debuglevel,
- subscribe => [
- Class['Site_config::X509::Key'],
- Class['Site_config::X509::Cert'],
- Class['Site_config::X509::Ca'] ];
-
- }
-
- include site_check_mk::agent::stunnel
+define site_stunnel::clients {
+ create_resources(site_stunnel::client, $site_stunnel::clients[$name])
}
diff --git a/puppet/modules/site_stunnel/manifests/init.pp b/puppet/modules/site_stunnel/manifests/init.pp
index c7d6acc6..b292f1cd 100644
--- a/puppet/modules/site_stunnel/manifests/init.pp
+++ b/puppet/modules/site_stunnel/manifests/init.pp
@@ -1,3 +1,8 @@
+#
+# If you need something to happen after stunnel is started,
+# you can depend on Service['stunnel'] or Class['site_stunnel']
+#
+
class site_stunnel {
# include the generic stunnel module
@@ -13,5 +18,15 @@ class site_stunnel {
ensure => absent;
}
}
+
+ $stunnel = hiera('stunnel')
+
+ # add server stunnels
+ create_resources(site_stunnel::servers, $stunnel['servers'])
+
+ # add client stunnels
+ $clients = $stunnel['clients']
+ $client_sections = keys($clients)
+ site_stunnel::clients { $client_sections: }
}
diff --git a/puppet/modules/site_stunnel/manifests/servers.pp b/puppet/modules/site_stunnel/manifests/servers.pp
new file mode 100644
index 00000000..b1da5c59
--- /dev/null
+++ b/puppet/modules/site_stunnel/manifests/servers.pp
@@ -0,0 +1,50 @@
+#
+# example hiera yaml:
+#
+# stunnel:
+# servers:
+# couch_server:
+# accept_port: 15984
+# connect_port: 5984
+#
+
+define site_stunnel::servers (
+ $accept_port,
+ $connect_port,
+ $verify = '2',
+ $pid = $name,
+ $rndfile = '/var/lib/stunnel4/.rnd',
+ $debuglevel = '4' ) {
+
+ include site_config::x509::cert
+ include site_config::x509::key
+ include site_config::x509::ca
+ include x509::variables
+ $ca_path = "${x509::variables::local_CAs}/${site_config::params::ca_name}.crt"
+ $cert_path = "${x509::variables::certs}/${site_config::params::cert_name}.crt"
+ $key_path = "${x509::variables::keys}/${site_config::params::cert_name}.key"
+
+ stunnel::service { $name:
+ accept => $accept_port,
+ connect => "127.0.0.1:${connect_port}",
+ client => false,
+ cafile => $ca_path,
+ key => $key_path,
+ cert => $cert_path,
+ verify => $verify,
+ pid => "/var/run/stunnel4/${pid}.pid",
+ rndfile => '/var/lib/stunnel4/.rnd',
+ debuglevel => $debuglevel,
+ require => [
+ Class['Site_config::X509::Key'],
+ Class['Site_config::X509::Cert'],
+ Class['Site_config::X509::Ca'] ];
+ }
+
+ # allow incoming connections on $accept_port
+ site_shorewall::stunnel::server { $name:
+ port => $accept_port
+ }
+
+ include site_check_mk::agent::stunnel
+}
diff --git a/puppet/modules/site_webapp/manifests/couchdb.pp b/puppet/modules/site_webapp/manifests/couchdb.pp
index ff743fba..3ae4d266 100644
--- a/puppet/modules/site_webapp/manifests/couchdb.pp
+++ b/puppet/modules/site_webapp/manifests/couchdb.pp
@@ -7,10 +7,6 @@ class site_webapp::couchdb {
$couchdb_webapp_user = $webapp['couchdb_webapp_user']['username']
$couchdb_webapp_password = $webapp['couchdb_webapp_user']['password']
- $stunnel = hiera('stunnel')
- $couch_client = $stunnel['couch_client']
- $couch_client_connect = $couch_client['connect']
-
include x509::variables
file {
@@ -37,14 +33,4 @@ class site_webapp::couchdb {
}
include site_stunnel
-
- $couchdb_stunnel_client_defaults = {
- 'connect_port' => $couch_client_connect,
- 'client' => true,
- 'cafile' => "${x509::variables::local_CAs}/${site_config::params::ca_name}.crt",
- 'key' => "${x509::variables::keys}/${site_config::params::cert_name}.key",
- 'cert' => "${x509::variables::certs}/${site_config::params::cert_name}.crt",
- }
-
- create_resources(site_stunnel::clients, $couch_client, $couchdb_stunnel_client_defaults)
}
diff --git a/puppet/modules/site_webapp/manifests/init.pp b/puppet/modules/site_webapp/manifests/init.pp
index 7fdd0c3f..17b010f3 100644
--- a/puppet/modules/site_webapp/manifests/init.pp
+++ b/puppet/modules/site_webapp/manifests/init.pp
@@ -53,8 +53,8 @@ class site_webapp {
exec { 'bundler_update':
cwd => '/srv/leap/webapp',
- command => '/bin/bash -c "/usr/bin/bundle check || /usr/bin/bundle install --path vendor/bundle --without test development"',
- unless => '/usr/bin/bundle check',
+ command => '/bin/bash -c "/usr/bin/bundle check --path vendor/bundle || /usr/bin/bundle install --path vendor/bundle --without test development"',
+ unless => '/usr/bin/bundle check --path vendor/bundle',
user => 'leap-webapp',
timeout => 600,
require => [
diff --git a/puppet/modules/site_webapp/templates/config.yml.erb b/puppet/modules/site_webapp/templates/config.yml.erb
index 6461c5e8..9205438b 100644
--- a/puppet/modules/site_webapp/templates/config.yml.erb
+++ b/puppet/modules/site_webapp/templates/config.yml.erb
@@ -18,3 +18,10 @@ production:
minimum_client_version: "<%= @webapp['client_version']['min'] %>"
default_service_level: "<%= @webapp['default_service_level'] %>"
service_levels: <%= @webapp['service_levels'].to_json %>
+ allow_registration: <%= @webapp['allow_registration'].inspect %>
+<%- if @webapp['engines'] && @webapp['engines'].any? -%>
+ engines:
+<%- @webapp['engines'].each do |engine| -%>
+ - <%= engine %>
+<%- end -%>
+<%- end -%>
diff --git a/puppet/modules/tapicero/manifests/init.pp b/puppet/modules/tapicero/manifests/init.pp
index f2e723f5..2bf72004 100644
--- a/puppet/modules/tapicero/manifests/init.pp
+++ b/puppet/modules/tapicero/manifests/init.pp
@@ -12,6 +12,8 @@ class tapicero {
$couchdb_soledad_user = $couchdb_users['soledad']['username']
$couchdb_leap_mx_user = $couchdb_users['leap_mx']['username']
+ $couchdb_mode = $couchdb['mode']
+ $couchdb_replication = $couchdb['replication']
Class['site_config::default'] -> Class['tapicero']
diff --git a/puppet/modules/tapicero/templates/tapicero.yaml.erb b/puppet/modules/tapicero/templates/tapicero.yaml.erb
index 8e19b22f..510450ad 100644
--- a/puppet/modules/tapicero/templates/tapicero.yaml.erb
+++ b/puppet/modules/tapicero/templates/tapicero.yaml.erb
@@ -1,3 +1,5 @@
+<%- require 'json' -%>
+
#
# Default configuration options for Tapicero
#
@@ -24,6 +26,10 @@ log_level: info
options:
# prefix for per user databases:
db_prefix: "user-"
+ mode: <%= @couchdb_mode %>
+<%- if @couchdb_replication %>
+ replication: <%= @couchdb_replication.to_json %>
+<%- end -%>
# security settings to be used for the per user databases
security:
@@ -34,9 +40,11 @@ options:
# explicit about this
- <%= @couchdb_admin_user %>
roles: []
- readers:
+ members:
names:
- <%= @couchdb_soledad_user %>
- <%= @couchdb_leap_mx_user %>
- roles: []
+ roles:
+ - replication
+
diff --git a/tests/white-box/couchdb.rb b/tests/white-box/couchdb.rb
index 9d5da94f..6d3a7452 100644
--- a/tests/white-box/couchdb.rb
+++ b/tests/white-box/couchdb.rb
@@ -10,8 +10,10 @@ class CouchDB < LeapTest
def test_00_Are_daemons_running?
assert_running 'tapicero'
- assert_running 'bin/beam'
- assert_running 'bin/epmd'
+ if multimaster?
+ assert_running 'bin/beam'
+ assert_running 'bin/epmd'
+ end
pass
end
@@ -29,6 +31,7 @@ class CouchDB < LeapTest
# compare the configured nodes to the nodes that are actually listed in bigcouch
#
def test_02_Is_cluster_membership_ok?
+ return unless multimaster?
url = couchdb_backend_url("/nodes/_all_docs")
neighbors = assert_property('couch.bigcouch.neighbors')
neighbors << assert_property('domain.full')
@@ -48,7 +51,8 @@ class CouchDB < LeapTest
# this seems backward to me, so it might be the other way around.
#
def test_03_Are_configured_nodes_online?
- url = couchdb_url("/_membership")
+ return unless multimaster?
+ url = couchdb_url("/_membership", :user => 'admin')
assert_get(url) do |body|
response = JSON.parse(body)
nodes_configured_but_not_available = response['cluster_nodes'] - response['all_nodes']
@@ -66,11 +70,11 @@ class CouchDB < LeapTest
end
def test_04_Do_ACL_users_exist?
- acl_users = ['_design/_auth', 'leap_mx', 'nickserver', 'soledad', 'tapicero', 'webapp']
- url = couchdb_backend_url("/_users/_all_docs")
+ acl_users = ['_design/_auth', 'leap_mx', 'nickserver', 'soledad', 'tapicero', 'webapp', 'replication']
+ url = couchdb_backend_url("/_users/_all_docs", :user => 'admin')
assert_get(url) do |body|
response = JSON.parse(body)
- assert_equal 6, response['total_rows']
+ assert_equal acl_users.count, response['total_rows']
actual_users = response['rows'].map{|row| row['id'].sub(/^org.couchdb.user:/, '') }
assert_equal acl_users.sort, actual_users.sort
end
@@ -80,7 +84,8 @@ class CouchDB < LeapTest
def test_05_Do_required_databases_exist?
dbs_that_should_exist = ["customers","identities","keycache","sessions","shared","tickets","tokens","users"]
dbs_that_should_exist.each do |db_name|
- assert_get(couchdb_url("/"+db_name)) do |body|
+ url = couchdb_url("/"+db_name, :user => 'admin')
+ assert_get(url) do |body|
assert response = JSON.parse(body)
assert_equal db_name, response['db_name']
end
@@ -88,22 +93,55 @@ class CouchDB < LeapTest
pass
end
+ #
+ # for now, this just prints warnings, since we are failing these tests.
+ #
+ def test_06_Is_ACL_enforced?
+ ok = assert_auth_fail(
+ couchdb_url('/users/_all_docs', :user => 'leap_mx'),
+ {:limit => 1}
+ )
+ ok = assert_auth_fail(
+ couchdb_url('/users/_all_docs', :user => 'leap_mx'),
+ {:limit => 1}
+ ) && ok
+ pass if ok
+ end
+
+ def test_07_What?
+ pass
+ end
+
private
- def couchdb_url(path="", port=nil)
+ def couchdb_url(path="", options=nil)
+ options||={}
@port ||= begin
assert_property 'couch.port'
$node['couch']['port']
end
- @password ||= begin
- assert_property 'couch.users.admin.password'
- $node['couch']['users']['admin']['password']
+ url = 'http://'
+ if options[:user]
+ assert_property 'couch.users.' + options[:user]
+ password = $node['couch']['users'][options[:user]]['password']
+ url += "%s:%s@" % [options[:user], password]
end
- "http://admin:#{@password}@localhost:#{port || @port}#{path}"
+ url += "localhost:#{options[:port] || @port}#{path}"
+ url
+ end
+
+ def couchdb_backend_url(path="", options={})
+ # TODO: admin port is hardcoded for now but should be configurable.
+ options = {port: multimaster? && "5986"}.merge options
+ couchdb_url(path, options)
+ end
+
+ def multimaster?
+ mode == "multimaster"
end
- def couchdb_backend_url(path="")
- couchdb_url(path, "5986") # TODO: admin port is hardcoded for now but should be configurable.
+ def mode
+ assert_property('couch.mode')
end
end
diff --git a/tests/white-box/network.rb b/tests/white-box/network.rb
index e0b0339d..118861a7 100644
--- a/tests/white-box/network.rb
+++ b/tests/white-box/network.rb
@@ -28,29 +28,26 @@ class Network < LeapTest
def test_02_Is_stunnel_running?
if $node['stunnel']
good_stunnel_pids = []
- $node['stunnel'].each do |stunnel_type, stunnel_configs|
- if stunnel_type =~ /_clients?$/
- stunnel_configs.each do |stunnel_name, stunnel_conf|
- config_file_name = "/etc/stunnel/#{stunnel_name}.conf"
- processes = pgrep(config_file_name)
- assert_equal 6, processes.length, "There should be six stunnel processes running for `#{config_file_name}`"
- good_stunnel_pids += processes.map{|ps| ps[:pid]}
- assert port = stunnel_conf['accept_port'], 'Field `accept_port` must be present in `stunnel` property.'
- assert_tcp_socket('localhost', port)
- end
- elsif stunnel_type =~ /_server$/
- config_file_name = "/etc/stunnel/#{stunnel_type}.conf"
+ $node['stunnel']['clients'].each do |stunnel_type, stunnel_configs|
+ stunnel_configs.each do |stunnel_name, stunnel_conf|
+ config_file_name = "/etc/stunnel/#{stunnel_name}.conf"
processes = pgrep(config_file_name)
assert_equal 6, processes.length, "There should be six stunnel processes running for `#{config_file_name}`"
good_stunnel_pids += processes.map{|ps| ps[:pid]}
- assert accept = stunnel_configs['accept'], "Field `accept` must be present in property `stunnel.#{stunnel_type}`"
- assert_tcp_socket('localhost', accept)
- assert connect = stunnel_configs['connect'], "Field `connect` must be present in property `stunnel.#{stunnel_type}`"
- assert_tcp_socket(*connect.split(':'))
- else
- skip "Unknown stunnel type `#{stunnel_type}`"
+ assert port = stunnel_conf['accept_port'], 'Field `accept_port` must be present in `stunnel` property.'
+ assert_tcp_socket('localhost', port)
end
end
+ $node['stunnel']['servers'].each do |stunnel_name, stunnel_conf|
+ config_file_name = "/etc/stunnel/#{stunnel_name}.conf"
+ processes = pgrep(config_file_name)
+ assert_equal 6, processes.length, "There should be six stunnel processes running for `#{config_file_name}`"
+ good_stunnel_pids += processes.map{|ps| ps[:pid]}
+ assert accept_port = stunnel_conf['accept_port'], "Field `accept` must be present in property `stunnel.servers.#{stunnel_name}`"
+ assert_tcp_socket('localhost', accept_port)
+ assert connect_port = stunnel_conf['connect_port'], "Field `connect` must be present in property `stunnel.servers.#{stunnel_name}`"
+ assert_tcp_socket('localhost', connect_port)
+ end
all_stunnel_pids = pgrep('/usr/bin/stunnel').collect{|process| process[:pid]}.uniq
assert_equal good_stunnel_pids.sort, all_stunnel_pids.sort, "There should not be any extra stunnel processes that are not configured in /etc/stunnel"
pass
diff --git a/tests/white-box/webapp.rb b/tests/white-box/webapp.rb
index 142ac2de..7df57fd7 100644
--- a/tests/white-box/webapp.rb
+++ b/tests/white-box/webapp.rb
@@ -14,15 +14,16 @@ class Webapp < LeapTest
# example properties:
#
# stunnel:
- # couch_client:
- # couch1_5984:
- # accept_port: 4000
- # connect: couch1.bitmask.i
- # connect_port: 15984
+ # clients:
+ # couch_client:
+ # couch1_5984:
+ # accept_port: 4000
+ # connect: couch1.bitmask.i
+ # connect_port: 15984
#
def test_01_Can_contact_couchdb?
- assert_property('stunnel.couch_client')
- $node['stunnel']['couch_client'].values.each do |stunnel_conf|
+ assert_property('stunnel.clients.couch_client')
+ $node['stunnel']['clients']['couch_client'].values.each do |stunnel_conf|
assert port = stunnel_conf['accept_port'], 'Field `accept_port` must be present in `stunnel` property.'
local_stunnel_url = "http://localhost:#{port}"
remote_ip_address = TCPSocket.gethostbyname(stunnel_conf['connect']).last
@@ -60,4 +61,13 @@ class Webapp < LeapTest
pass
end
+ #
+ # this is technically a black-box test. so, move this when we have support
+ # for black box tests.
+ #
+ def test_04_Can_access_webapp?
+ assert_get('https://' + $node['webapp']['domain'] + '/')
+ pass
+ end
+
end