summaryrefslogtreecommitdiff
path: root/provider_base
diff options
context:
space:
mode:
Diffstat (limited to 'provider_base')
-rw-r--r--provider_base/common.json10
-rw-r--r--provider_base/lib/macros.rb14
-rw-r--r--provider_base/lib/macros/core.rb91
-rw-r--r--provider_base/lib/macros/files.rb79
-rw-r--r--provider_base/lib/macros/haproxy.rb73
-rw-r--r--provider_base/lib/macros/hosts.rb63
-rw-r--r--provider_base/lib/macros/nodes.rb88
-rw-r--r--provider_base/lib/macros/secrets.rb39
-rw-r--r--provider_base/lib/macros/stunnel.rb95
-rw-r--r--provider_base/services/_couchdb_master.json8
-rw-r--r--provider_base/services/_couchdb_mirror.json21
-rw-r--r--provider_base/services/_couchdb_multimaster.json24
-rw-r--r--provider_base/services/couchdb.json24
-rw-r--r--provider_base/services/couchdb.rb60
-rw-r--r--provider_base/services/monitor.json6
-rw-r--r--provider_base/services/mx.json15
-rw-r--r--provider_base/services/obfsproxy.json9
-rw-r--r--provider_base/services/openvpn.json7
-rw-r--r--provider_base/services/webapp.json23
19 files changed, 717 insertions, 32 deletions
diff --git a/provider_base/common.json b/provider_base/common.json
index a4d9c5f2..87af2152 100644
--- a/provider_base/common.json
+++ b/provider_base/common.json
@@ -25,9 +25,13 @@
"hosts": "=> hosts_file",
"x509": {
"use": true,
+ "use_commercial": false,
"cert": "= x509.use ? file(:node_x509_cert, :missing => 'x509 certificate for node $node. Run `leap cert update`') : nil",
"key": "= x509.use ? file(:node_x509_key, :missing => 'x509 key for node $node. Run `leap cert update`') : nil",
- "ca_cert": "= try_file :ca_cert"
+ "ca_cert": "= try_file :ca_cert",
+ "commercial_cert": "= x509.use_commercial ? file([:commercial_cert, try{webapp.domain}||domain.full_suffix], :missing => 'commercial x509 certificate for node $node. Add file $file, or run `leap cert csr` to generate a temporary self-signed cert and CSR you can use to purchase a real cert.') : nil",
+ "commercial_key": "= x509.use_commercial ? file([:commercial_key, try{webapp.domain}||domain.full_suffix], :missing => 'commercial x509 certificate for node $node. Add file $file, or run `leap cert csr` to generate a temporary self-signed cert and CSR you can use to purchase a real cert.') : nil",
+ "commercial_ca_cert": "= x509.use_commercial ? try_file(:commercial_ca_cert) : nil"
},
"service_type": "internal_service",
"development": {
@@ -38,5 +42,9 @@
"enabled": true,
"mail": {
"smarthost": "= nodes_like_me[:services => :mx].exclude(self).field('domain.full')"
+ },
+ "stunnel": {
+ "clients": {},
+ "servers": {}
}
}
diff --git a/provider_base/lib/macros.rb b/provider_base/lib/macros.rb
new file mode 100644
index 00000000..854b92b5
--- /dev/null
+++ b/provider_base/lib/macros.rb
@@ -0,0 +1,14 @@
+#
+# MACROS
+#
+# The methods in these files are available in the context of a .json configuration file.
+# (The module LeapCli::Macro is included in Config::Object)
+#
+
+require_relative 'macros/core'
+require_relative 'macros/files'
+require_relative 'macros/haproxy'
+require_relative 'macros/hosts'
+require_relative 'macros/nodes'
+require_relative 'macros/secrets'
+require_relative 'macros/stunnel'
diff --git a/provider_base/lib/macros/core.rb b/provider_base/lib/macros/core.rb
new file mode 100644
index 00000000..2ab2e71b
--- /dev/null
+++ b/provider_base/lib/macros/core.rb
@@ -0,0 +1,91 @@
+# encoding: utf-8
+
+module LeapCli
+ module Macro
+
+ #
+ # return a fingerprint for a x509 certificate
+ #
+ def fingerprint(filename)
+ "SHA256: " + X509.fingerprint("SHA256", Path.named_path(filename))
+ end
+
+ #
+ # Creates a hash from the ssh key info in users directory, for use in
+ # updating authorized_keys file. Additionally, the 'monitor' public key is
+ # included, which is used by the monitor nodes to run particular commands
+ # remotely.
+ #
+ def authorized_keys
+ hash = {}
+ keys = Dir.glob(Path.named_path([:user_ssh, '*']))
+ keys.sort.each do |keyfile|
+ ssh_type, ssh_key = File.read(keyfile, :encoding => 'UTF-8').strip.split(" ")
+ name = File.basename(File.dirname(keyfile))
+ hash[name] = {
+ "type" => ssh_type,
+ "key" => ssh_key
+ }
+ end
+ ssh_type, ssh_key = File.read(Path.named_path(:monitor_pub_key), :encoding => 'UTF-8').strip.split(" ")
+ hash[Leap::Platform.monitor_username] = {
+ "type" => ssh_type,
+ "key" => ssh_key
+ }
+ hash
+ end
+
+ def assert(assertion)
+ if instance_eval(assertion)
+ true
+ else
+ raise AssertionFailed.new(assertion), assertion, caller
+ end
+ end
+
+ def error(msg)
+ raise ConfigError.new(@node, msg), msg, caller
+ end
+
+ #
+ # applies a JSON partial to this node
+ #
+ def apply_partial(partial_path)
+ manager.partials(partial_path).each do |partial_data|
+ self.deep_merge!(partial_data)
+ end
+ end
+
+ #
+ # If at first you don't succeed, then it is time to give up.
+ #
+ # try{} returns nil if anything in the block throws an exception.
+ #
+ # You can wrap something that might fail in `try`, like so.
+ #
+ # "= try{ nodes[:services => 'tor'].first.ip_address } "
+ #
+ def try(&block)
+ yield
+ rescue NoMethodError
+ rescue ArgumentError
+ nil
+ end
+
+ protected
+
+ #
+ # returns a node list, if argument is not already one
+ #
+ def listify(node_list)
+ if node_list.is_a? Config::ObjectList
+ node_list
+ elsif node_list.is_a? Config::Object
+ Config::ObjectList.new(node_list)
+ else
+ raise ArgumentError, 'argument must be a node or node list, not a `%s`' % node_list.class, caller
+ end
+ end
+
+ end
+end
diff --git a/provider_base/lib/macros/files.rb b/provider_base/lib/macros/files.rb
new file mode 100644
index 00000000..0a491325
--- /dev/null
+++ b/provider_base/lib/macros/files.rb
@@ -0,0 +1,79 @@
+# encoding: utf-8
+
+##
+## FILES
+##
+
+module LeapCli
+ module Macro
+
+ #
+ # inserts the contents of a file
+ #
+ def file(filename, options={})
+ if filename.is_a? Symbol
+ filename = [filename, @node.name]
+ end
+ filepath = Path.find_file(filename)
+ if filepath
+ if filepath =~ /\.erb$/
+ ERB.new(File.read(filepath, :encoding => 'UTF-8'), nil, '%<>').result(binding)
+ else
+ File.read(filepath, :encoding => 'UTF-8')
+ end
+ else
+ raise FileMissing.new(Path.named_path(filename), options)
+ ""
+ end
+ end
+
+ #
+ # like #file, but allow missing files
+ #
+ def try_file(filename)
+ return file(filename)
+ rescue FileMissing
+ return nil
+ end
+
+ #
+ # returns what the file path will be, once the file is rsynced to the server.
+ # an internal list of discovered file paths is saved, in order to rsync these files when needed.
+ #
+ # notes:
+ #
+ # * argument 'path' is relative to Path.provider/files or Path.provider_base/files
+ # * the path returned by this method is absolute
+ # * the path stored for use later by rsync is relative to Path.provider
+ # * if the path does not exist locally, but exists in provider_base, then the default file from
+ # provider_base is copied locally. this is required for rsync to work correctly.
+ #
+ def file_path(path)
+ if path.is_a? Symbol
+ path = [path, @node.name]
+ end
+ actual_path = Path.find_file(path)
+ if actual_path.nil?
+ Util::log 2, :skipping, "file_path(\"#{path}\") because there is no such file."
+ nil
+ else
+ if actual_path =~ /^#{Regexp.escape(Path.provider_base)}/
+ # if file is under Path.provider_base, we must copy the default file to
+ # to Path.provider in order for rsync to be able to sync the file.
+ local_provider_path = actual_path.sub(/^#{Regexp.escape(Path.provider_base)}/, Path.provider)
+ FileUtils.mkdir_p File.dirname(local_provider_path), :mode => 0700
+ FileUtils.install actual_path, local_provider_path, :mode => 0600
+ Util.log :created, Path.relative_path(local_provider_path)
+ actual_path = local_provider_path
+ end
+ if File.directory?(actual_path) && actual_path !~ /\/$/
+ actual_path += '/' # ensure directories end with /, important for building rsync command
+ end
+ relative_path = Path.relative_path(actual_path)
+ @node.file_paths << relative_path
+ @node.manager.provider.hiera_sync_destination + '/' + relative_path
+ end
+ end
+
+ end
+end \ No newline at end of file
diff --git a/provider_base/lib/macros/haproxy.rb b/provider_base/lib/macros/haproxy.rb
new file mode 100644
index 00000000..602ae726
--- /dev/null
+++ b/provider_base/lib/macros/haproxy.rb
@@ -0,0 +1,73 @@
+# encoding: utf-8
+
+##
+## HAPROXY
+##
+
+module LeapCli
+ module Macro
+
+ #
+ # creates a hash suitable for configuring haproxy. the key is the node name of the server we are proxying to.
+ #
+ # * node_list - a hash of nodes for the haproxy servers
+ # * stunnel_client - contains the mappings to local ports for each server node.
+ # * non_stunnel_port - in case self is included in node_list, the port to connect to.
+ #
+ # 1000 weight is used for nodes in the same location.
+ # 100 otherwise.
+ #
+ def haproxy_servers(node_list, stunnel_clients, non_stunnel_port=nil)
+ default_weight = 10
+ local_weight = 100
+
+ # record the hosts_file
+ hostnames(node_list)
+
+ # create a simple map for node name -> local stunnel accept port
+ accept_ports = stunnel_clients.inject({}) do |hsh, stunnel_entry|
+ name = stunnel_entry.first.sub /_[0-9]+$/, ''
+ hsh[name] = stunnel_entry.last['accept_port']
+ hsh
+ end
+
+ # if one the nodes in the node list is ourself, then there will not be a stunnel to it,
+ # but we need to include it anyway in the haproxy config.
+ if node_list[self.name] && non_stunnel_port
+ accept_ports[self.name] = non_stunnel_port
+ end
+
+ # create the first pass of the servers hash
+ servers = node_list.values.inject(Config::ObjectList.new) do |hsh, node|
+ # make sure we have a port to talk to
+ unless accept_ports[node.name]
+ error "haproxy needs a local port to talk to when connecting to #{node.name}"
+ end
+ weight = default_weight
+ try {
+ weight = local_weight if self.location.name == node.location.name
+ }
+ hsh[node.name] = Config::Object[
+ 'backup', false,
+ 'host', 'localhost',
+ 'port', accept_ports[node.name],
+ 'weight', weight
+ ]
+ if node.services.include?('couchdb')
+ hsh[node.name]['writable'] = node.couch.mode != 'mirror'
+ end
+ hsh
+ end
+
+ # if there are some local servers, make the others backup
+ if servers.detect{|k,v| v.weight == local_weight}
+ servers.each do |k,server|
+ server['backup'] = server['weight'] == default_weight
+ end
+ end
+
+ return servers
+ end
+
+ end
+end
diff --git a/provider_base/lib/macros/hosts.rb b/provider_base/lib/macros/hosts.rb
new file mode 100644
index 00000000..8a4058a5
--- /dev/null
+++ b/provider_base/lib/macros/hosts.rb
@@ -0,0 +1,63 @@
+# encoding: utf-8
+
+module LeapCli
+ module Macro
+
+ ##
+ ## HOSTS
+ ##
+
+ #
+ # records the list of hosts that are encountered for this node
+ #
+ def hostnames(nodes)
+ @referenced_nodes ||= Config::ObjectList.new
+ nodes = listify(nodes)
+ nodes.each_node do |node|
+ @referenced_nodes[node.name] ||= node
+ end
+ return nodes.values.collect {|node| node.domain.name}
+ end
+
+ #
+ # Generates entries needed for updating /etc/hosts on a node (as a hash).
+ #
+ # Argument `nodes` can be nil or a list of nodes. If nil, only include the
+ # IPs of the other nodes this @node as has encountered (plus all mx nodes).
+ #
+ # Also, for virtual machines, we use the local address if this @node is in
+ # the same location as the node in question.
+ #
+ # We include the ssh public key for each host, so that the hash can also
+ # be used to generate the /etc/ssh/known_hosts
+ #
+ def hosts_file(nodes=nil)
+ if nodes.nil?
+ if @referenced_nodes && @referenced_nodes.any?
+ nodes = @referenced_nodes
+ nodes = nodes.merge(nodes_like_me[:services => 'mx']) # all nodes always need to communicate with mx nodes.
+ end
+ end
+ return {} unless nodes
+ hosts = {}
+ my_location = @node['location'] ? @node['location']['name'] : nil
+ nodes.each_node do |node|
+ hosts[node.name] = {'ip_address' => node.ip_address, 'domain_internal' => node.domain.internal, 'domain_full' => node.domain.full}
+ node_location = node['location'] ? node['location']['name'] : nil
+ if my_location == node_location
+ if facts = @node.manager.facts[node.name]
+ if facts['ec2_public_ipv4']
+ hosts[node.name]['ip_address'] = facts['ec2_public_ipv4']
+ end
+ end
+ end
+ host_pub_key = Util::read_file([:node_ssh_pub_key,node.name])
+ if host_pub_key
+ hosts[node.name]['host_pub_key'] = host_pub_key
+ end
+ end
+ hosts
+ end
+
+ end
+end \ No newline at end of file
diff --git a/provider_base/lib/macros/nodes.rb b/provider_base/lib/macros/nodes.rb
new file mode 100644
index 00000000..0c6668a0
--- /dev/null
+++ b/provider_base/lib/macros/nodes.rb
@@ -0,0 +1,88 @@
+# encoding: utf-8
+
+##
+## node related macros
+##
+
+module LeapCli
+ module Macro
+
+ #
+ # the list of all the nodes
+ #
+ def nodes
+ global.nodes
+ end
+
+ #
+ # grab an environment appropriate provider
+ #
+ def provider
+ global.env(@node.environment).provider
+ end
+
+ #
+ # returns a list of nodes that match the same environment
+ #
+ # if @node.environment is not set, we return other nodes
+ # where environment is not set.
+ #
+ def nodes_like_me
+ nodes[:environment => @node.environment]
+ end
+
+ #
+ # returns a list of nodes that match the location name
+ # and environment of @node.
+ #
+ def nodes_near_me
+ if @node['location'] && @node['location']['name']
+ nodes_like_me['location.name' => @node.location.name]
+ else
+ nodes_like_me['location' => nil]
+ end
+ end
+
+ #
+ #
+ # picks a node out from the node list in such a way that:
+ #
+ # (1) which nodes picked which nodes is saved in secrets.json
+ # (2) when other nodes call this macro with the same node list, they are guaranteed to get a different node
+ # (3) if all the nodes in the pick_node list have been picked, remaining nodes are distributed randomly.
+ #
+ # if the node_list is empty, an exception is raised.
+ # if node_list size is 1, then that node is returned and nothing is
+ # memorized via the secrets.json file.
+ #
+ # `label` is needed to distinguish between pools of nodes for different purposes.
+ #
+ # TODO: more evenly balance after all the nodes have been picked.
+ #
+ def pick_node(label, node_list)
+ if node_list.any?
+ if node_list.size == 1
+ return node_list.values.first
+ else
+ secrets_key = "pick_node(:#{label},#{node_list.keys.sort.join(',')})"
+ secrets_value = @manager.secrets.retrieve(secrets_key, @node.environment) || {}
+ secrets_value[@node.name] ||= begin
+ node_to_pick = nil
+ node_list.each_node do |node|
+ next if secrets_value.values.include?(node.name)
+ node_to_pick = node.name
+ end
+ node_to_pick ||= secrets_value.values.shuffle.first # all picked already, so pick a random one.
+ node_to_pick
+ end
+ picked_node_name = secrets_value[@node.name]
+ @manager.secrets.set(secrets_key, secrets_value, @node.environment)
+ return node_list[picked_node_name]
+ end
+ else
+ raise ArgumentError.new('pick_node(node_list): node_list cannot be empty')
+ end
+ end
+
+ end
+end \ No newline at end of file
diff --git a/provider_base/lib/macros/secrets.rb b/provider_base/lib/macros/secrets.rb
new file mode 100644
index 00000000..51bf3971
--- /dev/null
+++ b/provider_base/lib/macros/secrets.rb
@@ -0,0 +1,39 @@
+# encoding: utf-8
+
+require 'base32'
+
+module LeapCli
+ module Macro
+
+ #
+ # inserts a named secret, generating it if needed.
+ #
+ # manager.export_secrets should be called later to capture any newly generated secrets.
+ #
+ # +length+ is the character length of the generated password.
+ #
+ def secret(name, length=32)
+ @manager.secrets.set(name, Util::Secret.generate(length), @node[:environment])
+ end
+
+ # inserts a base32 encoded secret
+ def base32_secret(name, length=20)
+ @manager.secrets.set(name, Base32.encode(Util::Secret.generate(length)), @node[:environment])
+ end
+
+ # Picks a random obfsproxy port from given range
+ def rand_range(name, range)
+ @manager.secrets.set(name, rand(range), @node[:environment])
+ end
+
+ #
+ # inserts an hexidecimal secret string, generating it if needed.
+ #
+ # +bit_length+ is the bits in the secret, (ie length of resulting hex string will be bit_length/4)
+ #
+ def hex_secret(name, bit_length=128)
+ @manager.secrets.set(name, Util::Secret.generate_hex(bit_length), @node[:environment])
+ end
+
+ end
+end \ No newline at end of file
diff --git a/provider_base/lib/macros/stunnel.rb b/provider_base/lib/macros/stunnel.rb
new file mode 100644
index 00000000..f16308c7
--- /dev/null
+++ b/provider_base/lib/macros/stunnel.rb
@@ -0,0 +1,95 @@
+##
+## STUNNEL
+##
+
+#
+# About stunnel
+# --------------------------
+#
+# The network looks like this:
+#
+# From the client's perspective:
+#
+# |------- stunnel client --------------| |---------- stunnel server -----------------------|
+# consumer app -> localhost:accept_port -> connect:connect_port -> ??
+#
+# From the server's perspective:
+#
+# |------- stunnel client --------------| |---------- stunnel server -----------------------|
+# ?? -> *:accept_port -> localhost:connect_port -> service
+#
+
+module LeapCli
+ module Macro
+
+ #
+ # stunnel configuration for the client side.
+ #
+ # +node_list+ is a ObjectList of nodes running stunnel servers.
+ #
+ # +port+ is the real port of the ultimate service running on the servers
+ # that the client wants to connect to.
+ #
+ # * accept_port is the port on localhost to which local clients
+ # can connect. it is auto generated serially.
+ #
+ # * connect_port is the port on the stunnel server to connect to.
+ # it is auto generated from the +port+ argument.
+ #
+ # generates an entry appropriate to be passed directly to
+ # create_resources(stunnel::service, hiera('..'), defaults)
+ #
+ # local ports are automatically generated, starting at 4000
+ # and incrementing in sorted order (by node name).
+ #
+ def stunnel_client(node_list, port, options={})
+ @next_stunnel_port ||= 4000
+ node_list = listify(node_list)
+ hostnames(node_list) # record the hosts
+ result = Config::ObjectList.new
+ node_list.each_node do |node|
+ if node.name != self.name || options[:include_self]
+ result["#{node.name}_#{port}"] = Config::Object[
+ 'accept_port', @next_stunnel_port,
+ 'connect', node.domain.internal,
+ 'connect_port', stunnel_port(port),
+ 'original_port', port
+ ]
+ @next_stunnel_port += 1
+ end
+ end
+ result
+ end
+
+ #
+ # generates a stunnel server entry.
+ #
+ # +port+ is the real port targeted service.
+ #
+ # * `accept_port` is the publicly bound port
+ # * `connect_port` is the port that the local service is running on.
+ #
+ def stunnel_server(port)
+ {
+ "accept_port" => stunnel_port(port),
+ "connect_port" => port
+ }
+ end
+
+ private
+
+ #
+ # maps a real port to a stunnel port (used as the connect_port in the client config
+ # and the accept_port in the server config)
+ #
+ def stunnel_port(port)
+ port = port.to_i
+ if port < 50000
+ return port + 10000
+ else
+ return port - 10000
+ end
+ end
+
+ end
+end \ No newline at end of file
diff --git a/provider_base/services/_couchdb_master.json b/provider_base/services/_couchdb_master.json
new file mode 100644
index 00000000..20c6f99b
--- /dev/null
+++ b/provider_base/services/_couchdb_master.json
@@ -0,0 +1,8 @@
+//
+// Applied to master couchdb node when there is a single master
+//
+{
+ "couch": {
+ "mode": "master"
+ }
+} \ No newline at end of file
diff --git a/provider_base/services/_couchdb_mirror.json b/provider_base/services/_couchdb_mirror.json
new file mode 100644
index 00000000..6a3402bd
--- /dev/null
+++ b/provider_base/services/_couchdb_mirror.json
@@ -0,0 +1,21 @@
+//
+// Applied to all non-master couchdb nodes
+//
+{
+ "stunnel": {
+ "clients": {
+ "couch_client": "= stunnel_client(nodes[couch.replication.masters.keys], couch.port)"
+ }
+ },
+ "couch": {
+ "mode": "mirror",
+ "replication": {
+ // for now, pick the first close one, or the first one.
+ // in the future, maybe use haproxy to balance among all the masters
+ "masters": "= try{pick_node(:couch_master,nodes_near_me['services' => 'couchdb']['couch.master' => true]).pick_fields('domain.internal', 'couch.port')} || try{pick_node(:couch_master,nodes_like_me['services' => 'couchdb']['couch.master' => true]).pick_fields('domain.internal', 'couch.port')}",
+ "username": "replication",
+ "password": "= secret :couch_replication_password",
+ "role": "replication"
+ }
+ }
+}
diff --git a/provider_base/services/_couchdb_multimaster.json b/provider_base/services/_couchdb_multimaster.json
new file mode 100644
index 00000000..8c433188
--- /dev/null
+++ b/provider_base/services/_couchdb_multimaster.json
@@ -0,0 +1,24 @@
+//
+// Only applied to master couchdb nodes when there are multiple masters
+//
+{
+ "stunnel": {
+ "servers": {
+ "epmd_server": "= stunnel_server(couch.bigcouch.epmd_port)",
+ "ednp_server": "= stunnel_server(couch.bigcouch.ednp_port)"
+ },
+ "clients": {
+ "epmd_clients": "= stunnel_client(nodes_like_me[:services => :couchdb], couch.bigcouch.epmd_port)",
+ "ednp_clients": "= stunnel_client(nodes_like_me[:services => :couchdb], couch.bigcouch.ednp_port)"
+ }
+ },
+ "couch": {
+ "mode": "multimaster",
+ "bigcouch": {
+ "epmd_port": 4369,
+ "ednp_port": 9002,
+ "cookie": "= secret :bigcouch_cookie",
+ "neighbors": "= nodes_like_me['services' => 'couchdb']['couch.master' => true].exclude(self).field('domain.full')"
+ }
+ }
+}
diff --git a/provider_base/services/couchdb.json b/provider_base/services/couchdb.json
index 5f1b5381..8b1386f8 100644
--- a/provider_base/services/couchdb.json
+++ b/provider_base/services/couchdb.json
@@ -3,20 +3,13 @@
"use": true
},
"stunnel": {
- "couch_server": "= stunnel_server(couch.port)",
- "epmd_server": "= stunnel_server(couch.bigcouch.epmd_port)",
- "epmd_clients": "= stunnel_client(nodes_like_me[:services => :couchdb], global.services[:couchdb].couch.bigcouch.epmd_port)",
- "ednp_server": "= stunnel_server(couch.bigcouch.ednp_port)",
- "ednp_clients": "= stunnel_client(nodes_like_me[:services => :couchdb], global.services[:couchdb].couch.bigcouch.ednp_port)"
+ "servers": {
+ "couch_server": "= stunnel_server(couch.port)"
+ }
},
"couch": {
+ "master": false,
"port": 5984,
- "bigcouch": {
- "epmd_port": 4369,
- "ednp_port": 9002,
- "cookie": "= secret :bigcouch_cookie",
- "neighbors": "= nodes_like_me[:services => :couchdb].exclude(self).field('domain.full')"
- },
"users": {
"admin": {
"username": "admin",
@@ -47,10 +40,15 @@
"username": "webapp",
"password": "= secret :couch_webapp_password",
"salt": "= hex_secret :couch_webapp_password_salt, 128"
+ },
+ "replication": {
+ "username": "replication",
+ "password": "= secret :couch_replication_password",
+ "salt": "= hex_secret :couch_replication_password_salt, 128"
}
},
- "webapp": {
- "nagios_test_pw": "= secret :nagios_test_password"
+ "webapp": {
+ "nagios_test_pw": "= secret :nagios_test_password"
}
}
}
diff --git a/provider_base/services/couchdb.rb b/provider_base/services/couchdb.rb
new file mode 100644
index 00000000..3bee3a67
--- /dev/null
+++ b/provider_base/services/couchdb.rb
@@ -0,0 +1,60 @@
+#######################################################################
+###
+### NOTE!
+###
+### Currently, mirrors do not work! The only thing that works is all
+### nodes multimaster or a single master.
+###
+#######################################################################
+#
+# custom logic for couchdb json resolution
+# ============================================
+#
+# There are three modes for a node:
+#
+# Multimaster
+# -----------
+#
+# Multimaster uses bigcouch (soon to use couchdb in replication mode
+# similar to bigcouch).
+#
+# Use "multimaster" mode when:
+#
+# * multiple nodes are marked couch.master
+# * OR no nodes are marked couch.master
+#
+# Master
+# ------
+#
+# Master uses plain couchdb that is readable and writable.
+#
+# Use "master" mode when:
+#
+# * Exactly one node, this one, is marked as master.
+#
+# Mirror
+# ------
+#
+# Mirror creates a read-only copy of the database. It uses plain coucdhb
+# with legacy couchdb replication (http based).
+#
+# This does not currently work, because http replication can't handle
+# the number of user databases.
+#
+# Use "mirror" mode when:
+#
+# * some nodes are marked couch.master
+# * AND this node is not a master
+#
+
+master_count = nodes_like_me['services' => 'couchdb']['couch.master' => true].size
+
+if master_count == 0
+ apply_partial 'services/_couchdb_multimaster.json'
+elsif couch.master && master_count > 1
+ apply_partial 'services/_couchdb_multimaster.json'
+elsif couch.master && master_count == 1
+ apply_partial 'services/_couchdb_master.json'
+else
+ apply_partial 'services/_couchdb_mirror.json'
+end
diff --git a/provider_base/services/monitor.json b/provider_base/services/monitor.json
index 03f6c6d1..c24724bf 100644
--- a/provider_base/services/monitor.json
+++ b/provider_base/services/monitor.json
@@ -12,11 +12,9 @@
},
"x509": {
"use": true,
+ "use_commercial": true,
"ca_cert": "= file :ca_cert, :missing => 'provider CA. Run `leap cert ca`'",
"client_ca_cert": "= file :client_ca_cert, :missing => 'Certificate Authority. Run `leap cert ca`'",
- "client_ca_key": "= file :client_ca_key, :missing => 'Certificate Authority. Run `leap cert ca`'",
- "commercial_cert": "= file [:commercial_cert, domain.full_suffix]",
- "commercial_key": "= file [:commercial_key, domain.full_suffix]",
- "commercial_ca_cert": "= try_file :commercial_ca_cert"
+ "client_ca_key": "= file :client_ca_key, :missing => 'Certificate Authority. Run `leap cert ca`'"
}
}
diff --git a/provider_base/services/mx.json b/provider_base/services/mx.json
index 731dee9a..11293ae8 100644
--- a/provider_base/services/mx.json
+++ b/provider_base/services/mx.json
@@ -1,9 +1,14 @@
{
"stunnel": {
- "couch_client": "= stunnel_client(nodes_like_me[:services => :couchdb], global.services[:couchdb].couch.port)"
+ "clients": {
+ "couch_client": "= stunnel_client(nodes_like_me[:services => :couchdb], global.services[:couchdb].couch.port)"
+ }
},
"haproxy": {
- "servers": "= haproxy_servers(nodes_like_me[:services => :couchdb], stunnel.couch_client)"
+ "couch": {
+ "listen_port": 4096,
+ "servers": "= haproxy_servers(nodes_like_me[:services => :couchdb], stunnel.clients.couch_client, global.services[:couchdb].couch.port)"
+ }
},
"couchdb_leap_mx_user": {
"username": "= global.services[:couchdb].couch.users[:leap_mx].username",
@@ -13,12 +18,10 @@
"mynetworks": "= nodes['environment' => '!local'].map{|name, n| [n.ip_address, (global.facts[name]||{})['ec2_public_ipv4']]}.flatten.compact.uniq",
"x509": {
"use": true,
+ "use_commercial": true,
"ca_cert": "= file :ca_cert, :missing => 'provider CA. Run `leap cert ca`'",
"client_ca_cert": "= file :client_ca_cert, :missing => 'Certificate Authority. Run `leap cert ca`'",
- "client_ca_key": "= file :client_ca_key, :missing => 'Certificate Authority. Run `leap cert ca`'",
- "commercial_cert": "= file [:commercial_cert, domain.full_suffix]",
- "commercial_key": "= file [:commercial_key, domain.full_suffix]",
- "commercial_ca_cert": "= try_file :commercial_ca_cert"
+ "client_ca_key": "= file :client_ca_key, :missing => 'Certificate Authority. Run `leap cert ca`'"
},
"service_type": "user_service"
}
diff --git a/provider_base/services/obfsproxy.json b/provider_base/services/obfsproxy.json
new file mode 100644
index 00000000..979d0ef9
--- /dev/null
+++ b/provider_base/services/obfsproxy.json
@@ -0,0 +1,9 @@
+{
+ "obfsproxy": {
+ "scramblesuit": {
+ "password": "= base32_secret('scramblesuit_password_'+name)",
+ "port" : "= rand_range('scramblesuit_port_'+name, 18000..32000)"
+ },
+ "gateway_address": "= try{pick_node(:obfs_gateway,nodes_near_me['services' => 'openvpn']).pick_fields('openvpn.gateway_address')} || try{pick_node(:obfs_gateway,nodes_like_me['services' => 'openvpn']).pick_fields('openvpn.gateway_address')}"
+ }
+}
diff --git a/provider_base/services/openvpn.json b/provider_base/services/openvpn.json
index 090afcd6..1906244c 100644
--- a/provider_base/services/openvpn.json
+++ b/provider_base/services/openvpn.json
@@ -26,5 +26,12 @@
"keepalive": "10 30",
"tun-ipv6": true
}
+ },
+ "obfsproxy": {
+ "scramblesuit": {
+ "password": "= base32_secret('scramblesuit_password_'+name)",
+ "port" : "= rand_range('scramblesuit_port_'+name, 18000..32000)"
+ },
+ "gateway_address": "= openvpn.gateway_address"
}
}
diff --git a/provider_base/services/webapp.json b/provider_base/services/webapp.json
index bbb52094..3af0dade 100644
--- a/provider_base/services/webapp.json
+++ b/provider_base/services/webapp.json
@@ -13,6 +13,7 @@
"allow_limited_certs": "= provider.service.allow_limited_bandwidth",
"allow_unlimited_certs": "= provider.service.allow_unlimited_bandwidth",
"allow_anonymous_certs": "= provider.service.allow_anonymous",
+ "allow_registration": "= provider.service.allow_registration",
"default_service_level": "= provider.service.default_service_level",
"service_levels": "= provider.service.levels",
"secret_token": "= secret :webapp_secret_token",
@@ -26,13 +27,21 @@
"nagios_test_user": {
"username": "nagios_test",
"password": "= secret :nagios_test_password"
- }
+ },
+ "engines": [
+ "support"
+ ]
},
"stunnel": {
- "couch_client": "= stunnel_client(nodes_like_me[:services => :couchdb], global.services[:couchdb].couch.port)"
+ "clients": {
+ "couch_client": "= stunnel_client(nodes_like_me[:services => :couchdb], global.services[:couchdb].couch.port)"
+ }
},
"haproxy": {
- "servers": "= haproxy_servers(nodes_like_me[:services => :couchdb], stunnel.couch_client, global.services[:couchdb].couch.port)"
+ "couch": {
+ "listen_port": 4096,
+ "servers": "= haproxy_servers(nodes_like_me[:services => :couchdb], stunnel.clients.couch_client, global.services[:couchdb].couch.port)"
+ }
},
"definition_files": {
"provider": "= file :provider_json_template",
@@ -59,11 +68,9 @@
},
"x509": {
"use": true,
+ "use_commercial": true,
"ca_cert": "= file :ca_cert, :missing => 'provider CA. Run `leap cert ca`'",
- "client_ca_cert": "= file :client_ca_cert, :missing => 'Certificate Authority. Run `leap cert ca`'",
- "client_ca_key": "= file :client_ca_key, :missing => 'Certificate Authority. Run `leap cert ca`'",
- "commercial_cert": "= file [:commercial_cert, webapp.domain]",
- "commercial_key": "= file [:commercial_key, webapp.domain]",
- "commercial_ca_cert": "= try_file :commercial_ca_cert"
+ "client_ca_cert": "= file :client_ca_cert, :missing => 'Certificate Authority. Run `leap cert ca`.'",
+ "client_ca_key": "= file :client_ca_key, :missing => 'Certificate Authority. Run `leap cert ca`.'"
}
}