summaryrefslogtreecommitdiff
path: root/puppet
diff options
context:
space:
mode:
authorMicah Anderson <micah@riseup.net>2013-02-19 15:18:30 -0500
committerMicah Anderson <micah@riseup.net>2013-02-19 15:18:30 -0500
commit4dcc5f884cd22d0673f6493799ace2f03a9e66fe (patch)
tree3f3f5c217c40f3037c1b2a9cd8da3fe91fdd8389 /puppet
parent253b765620961bbc9d96e8f3653b0b9693d29811 (diff)
parent2e5eec3856b58aaff0a2049599a6455e6ff91122 (diff)
Merge remote-tracking branch 'origin/release/v0.2.0'0.2.0
Diffstat (limited to 'puppet')
-rw-r--r--puppet/hiera.yaml15
-rw-r--r--puppet/lib/puppet/parser/functions/create_resources_hash_from.rb116
-rw-r--r--puppet/manifests/site.pp40
m---------puppet/modules/apache0
m---------puppet/modules/apt0
m---------puppet/modules/augeas0
m---------puppet/modules/bundler0
m---------puppet/modules/common0
m---------puppet/modules/concat0
m---------puppet/modules/couchdb0
m---------puppet/modules/git0
m---------puppet/modules/lsb0
m---------puppet/modules/nagios0
m---------puppet/modules/ntp9
m---------puppet/modules/openvpn8
m---------puppet/modules/passenger0
m---------puppet/modules/resolvconf0
m---------puppet/modules/ruby0
m---------puppet/modules/rubygems0
m---------puppet/modules/shorewall0
l---------puppet/modules/site-apache1
-rw-r--r--puppet/modules/site_apache/files/vhosts.d/couchdb_proxy.conf10
-rw-r--r--puppet/modules/site_apache/templates/vhosts.d/api.conf.erb39
-rw-r--r--puppet/modules/site_apache/templates/vhosts.d/leap_webapp.conf.erb47
-rw-r--r--puppet/modules/site_apt/manifests/dist_upgrade.pp15
-rw-r--r--puppet/modules/site_apt/manifests/init.pp15
-rw-r--r--puppet/modules/site_apt/templates/fallback.list3
-rw-r--r--puppet/modules/site_ca_daemon/manifests/apache.pp62
-rw-r--r--puppet/modules/site_ca_daemon/manifests/couchdb.pp16
-rw-r--r--puppet/modules/site_ca_daemon/manifests/init.pp103
-rw-r--r--puppet/modules/site_ca_daemon/templates/leap_ca.yaml.erb31
-rw-r--r--puppet/modules/site_config/lib/facter/ip_interface.rb13
-rw-r--r--puppet/modules/site_config/manifests/caching_resolver.pp41
-rw-r--r--puppet/modules/site_config/manifests/default.pp36
-rw-r--r--puppet/modules/site_config/manifests/hosts.pp22
-rw-r--r--puppet/modules/site_config/manifests/resolvconf.pp24
-rw-r--r--puppet/modules/site_config/manifests/slow.pp6
-rw-r--r--puppet/modules/site_config/manifests/sshd.pp9
-rw-r--r--puppet/modules/site_config/templates/hosts15
-rwxr-xr-xpuppet/modules/site_couchdb/files/couchdb160
-rwxr-xr-xpuppet/modules/site_couchdb/files/leap_ca_daemon157
-rw-r--r--puppet/modules/site_couchdb/files/local.ini89
-rw-r--r--puppet/modules/site_couchdb/manifests/apache_ssl_proxy.pp25
-rw-r--r--puppet/modules/site_couchdb/manifests/configure.pp27
-rw-r--r--puppet/modules/site_couchdb/manifests/init.pp64
-rw-r--r--puppet/modules/site_nagios/files/configs/Debian/nagios.cfg1273
-rw-r--r--puppet/modules/site_nagios/manifests/add_host.pp31
-rw-r--r--puppet/modules/site_nagios/manifests/add_service.pp21
-rw-r--r--puppet/modules/site_nagios/manifests/init.pp4
-rw-r--r--puppet/modules/site_nagios/manifests/server.pp38
-rw-r--r--puppet/modules/site_nagios/manifests/server/purge.pp7
-rw-r--r--puppet/modules/site_openvpn/manifests/init.pp108
-rw-r--r--puppet/modules/site_openvpn/manifests/keys.pp51
-rw-r--r--puppet/modules/site_openvpn/manifests/resolver.pp36
-rw-r--r--puppet/modules/site_openvpn/manifests/server_config.pp166
-rw-r--r--puppet/modules/site_shorewall/manifests/couchdb.pp23
-rw-r--r--puppet/modules/site_shorewall/manifests/defaults.pp54
-rw-r--r--puppet/modules/site_shorewall/manifests/dnat_rule.pp25
-rw-r--r--puppet/modules/site_shorewall/manifests/eip.pp75
-rw-r--r--puppet/modules/site_shorewall/manifests/ip_forward.pp10
-rw-r--r--puppet/modules/site_shorewall/manifests/monitor.pp8
-rw-r--r--puppet/modules/site_shorewall/manifests/service/http.pp13
-rw-r--r--puppet/modules/site_shorewall/manifests/service/https.pp12
-rw-r--r--puppet/modules/site_shorewall/manifests/service/webapp_api.pp22
-rw-r--r--puppet/modules/site_shorewall/manifests/sshd.pp24
-rw-r--r--puppet/modules/site_shorewall/manifests/tor.pp25
-rw-r--r--puppet/modules/site_shorewall/manifests/webapp.pp6
-rw-r--r--puppet/modules/site_sshd/manifests/init.pp1
-rw-r--r--puppet/modules/site_sshd/manifests/ssh_key.pp3
-rw-r--r--puppet/modules/site_tor/manifests/disable_exit.pp7
-rw-r--r--puppet/modules/site_tor/manifests/init.pp28
-rw-r--r--puppet/modules/site_webapp/manifests/apache.pp65
-rw-r--r--puppet/modules/site_webapp/manifests/client_ca.pp25
-rw-r--r--puppet/modules/site_webapp/manifests/couchdb.pp16
-rw-r--r--puppet/modules/site_webapp/manifests/init.pp117
-rw-r--r--puppet/modules/site_webapp/templates/config.yml.erb5
-rw-r--r--puppet/modules/site_webapp/templates/couchdb.yml.erb8
m---------puppet/modules/sshd0
m---------puppet/modules/stdlib0
m---------puppet/modules/tor0
-rw-r--r--puppet/modules/try/README.md13
-rw-r--r--puppet/modules/try/manifests/file.pp51
-rw-r--r--puppet/modules/try/manifests/init.pp3
m---------puppet/modules/unbound0
m---------puppet/modules/vcsrepo8
m---------puppet/modules/x5090
86 files changed, 3598 insertions, 2 deletions
diff --git a/puppet/hiera.yaml b/puppet/hiera.yaml
new file mode 100644
index 00000000..93448e23
--- /dev/null
+++ b/puppet/hiera.yaml
@@ -0,0 +1,15 @@
+---
+:backends:
+ - yaml
+ - puppet
+
+:logger: console
+
+:yaml:
+ :datadir: /etc/leap
+
+:hierarchy:
+ - hiera
+
+:puppet:
+ :datasource: data
diff --git a/puppet/lib/puppet/parser/functions/create_resources_hash_from.rb b/puppet/lib/puppet/parser/functions/create_resources_hash_from.rb
new file mode 100644
index 00000000..47d0df9c
--- /dev/null
+++ b/puppet/lib/puppet/parser/functions/create_resources_hash_from.rb
@@ -0,0 +1,116 @@
+#
+# create_resources_hash_from.rb
+#
+# Licensed under the Apache License, Version 2.0 (the "License");
+# you may not use this file except in compliance with the License.
+# You may obtain a copy of the License at
+#
+# http://www.apache.org/licenses/LICENSE-2.0
+#
+# Unless required by applicable law or agreed to in writing, software
+# distributed under the License is distributed on an "AS IS" BASIS,
+# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+# See the License for the specific language governing permissions and
+# limitations under the License.
+#
+
+module Puppet::Parser::Functions
+ newfunction(:create_resources_hash_from, :type => :rvalue, :doc => <<-EOS
+Given:
+ A formatted string (to use as the resource name)
+ An array to loop through (because puppet cannot loop)
+ A hash defining the parameters for a resource
+ And optionally an hash of parameter names to add to the resource and an
+ associated formatted string that should be configured with the current
+ element of the loop array
+
+This function will return a hash of hashes that can be used with the
+create_resources function.
+
+*Examples:*
+ $allowed_hosts = ['10.0.0.0/8', '192.168.0.0/24']
+ $resource_name = "100 allow %s to apache on ports 80"
+ $my_resource_hash = {
+ 'proto' => 'tcp',
+ 'action' => 'accept',
+ 'dport' => 80
+ }
+ $dynamic_parameters = {
+ 'source' => '%s'
+ }
+
+ $created_resource_hash = create_resources_hash_from($resource_name, $allowed_hosts, $my_resource_hash, $dynamic_parameters)
+
+$created_resource_hash would equal:
+ {
+ '100 allow 10.0.0.0/8 to apache on ports 80' => {
+ 'proto' => 'tcp',
+ 'action' => 'accept',
+ 'dport' => 80,
+ 'source' => '10.0.0.0/8'
+ },
+ '100 allow 192.168.0.0/24 to apache on ports 80' => {
+ 'proto' => 'tcp',
+ 'action' => 'accept',
+ 'dport' => 80,
+ 'source' => '192.168.0.0/24'
+ }
+ }
+
+$created_resource_hash could then be used with create_resources
+
+ create_resources(firewall, $created_resource_hash)
+
+To create a bunch of resources in a way that would only otherwise be possible
+with a loop of some description.
+ EOS
+ ) do |arguments|
+
+ raise Puppet::ParseError, "create_resources_hash_from(): Wrong number of arguments " +
+ "given (#{arguments.size} for 3 or 4)" if arguments.size < 3 or arguments.size > 4
+
+ formatted_string = arguments[0]
+
+ unless formatted_string.is_a?(String)
+ raise(Puppet::ParseError, 'create_resources_hash_from(): first argument must be a string')
+ end
+
+ loop_array = arguments[1]
+
+ unless loop_array.is_a?(Array)
+ raise(Puppet::ParseError, 'create_resources_hash_from(): second argument must be an array')
+ end
+
+ resource_hash = arguments[2]
+ unless resource_hash.is_a?(Hash)
+ raise(Puppet::ParseError, 'create_resources_hash_from(): third argument must be a hash')
+ end
+
+ if arguments.size == 4
+ dynamic_parameters = arguments[3]
+ unless dynamic_parameters.is_a?(Hash)
+ raise(Puppet::ParseError, 'create_resources_hash_from(): fourth argument must be a hash')
+ end
+ end
+
+ result = {}
+
+ loop_array.each do |i|
+ my_resource_hash = resource_hash.clone
+ if dynamic_parameters
+ dynamic_parameters.each do |param, value|
+ if my_resource_hash.member?(param)
+ raise(Puppet::ParseError, "create_resources_hash_from(): dynamic_parameter '#{param}' already exists in resource hash")
+ end
+ my_resource_hash[param] = sprintf(value,[i])
+ end
+ end
+ result[sprintf(formatted_string,[i])] = my_resource_hash
+ end
+
+ result
+ end
+end
+
+# vim: set ts=2 sw=2 et :
+# encoding: utf-8
diff --git a/puppet/manifests/site.pp b/puppet/manifests/site.pp
index 3a136015..1ec806d9 100644
--- a/puppet/manifests/site.pp
+++ b/puppet/manifests/site.pp
@@ -1,3 +1,39 @@
-node "default" {
- notify {'Hello World':}
+# set a default exec path
+Exec { path => '/usr/bin:/usr/sbin/:/bin:/sbin:/usr/local/bin:/usr/local/sbin' }
+
+stage { 'initial':
+ before => Stage['main'],
+}
+
+import 'common'
+include site_config::default
+include site_config::slow
+
+# parse services for host
+$services=hiera_array('services')
+notice("Services for ${fqdn}: ${services}")
+
+# configure eip
+if 'openvpn' in $services {
+ include site_openvpn
+}
+
+if 'couchdb' in $services {
+ include site_couchdb
+}
+
+if 'webapp' in $services {
+ include site_webapp
+}
+
+if 'ca' in $services {
+ include site_ca_daemon
+}
+
+if 'monitor' in $services {
+ include site_nagios
+}
+
+if 'tor' in $services {
+ include site_tor
}
diff --git a/puppet/modules/apache b/puppet/modules/apache
new file mode 160000
+Subproject 077d4d1508b9ff3355f73ff8597991043b3ba5d
diff --git a/puppet/modules/apt b/puppet/modules/apt
new file mode 160000
+Subproject f16a0727dce187d07389388da8b816f7b520205
diff --git a/puppet/modules/augeas b/puppet/modules/augeas
new file mode 160000
+Subproject 4d8c8ba362cc57c12451e581f27feea97797e8c
diff --git a/puppet/modules/bundler b/puppet/modules/bundler
new file mode 160000
+Subproject b4a4a8434616247156e59b860b47cc6256ead8d
diff --git a/puppet/modules/common b/puppet/modules/common
new file mode 160000
+Subproject 0961ad453b8befb4ea61bbd19f6ecea32b9619c
diff --git a/puppet/modules/concat b/puppet/modules/concat
new file mode 160000
+Subproject abce1280e07b544d8455f1572dd870bbd2f1489
diff --git a/puppet/modules/couchdb b/puppet/modules/couchdb
new file mode 160000
+Subproject dcb8a082ac842b0660819ea61f9448c4e373746
diff --git a/puppet/modules/git b/puppet/modules/git
new file mode 160000
+Subproject 497a1034489e0dc3cab5dab2fb0a85778576973
diff --git a/puppet/modules/lsb b/puppet/modules/lsb
new file mode 160000
+Subproject 3742c1a00c5602154a81834443ec5b0ca32c4ca
diff --git a/puppet/modules/nagios b/puppet/modules/nagios
new file mode 160000
+Subproject 57a1140b437a8cfb9cfd5d94a5759b1e3ed86d4
diff --git a/puppet/modules/ntp b/puppet/modules/ntp
new file mode 160000
+Subproject 27f2bc72110b1001233eb0907aa07e06cdf3319
diff --git a/puppet/modules/openvpn b/puppet/modules/openvpn
new file mode 160000
+Subproject 25f1fe8d813f6128068d890a40f5e24be78fb47
diff --git a/puppet/modules/passenger b/puppet/modules/passenger
new file mode 160000
+Subproject d1b46de84acf4d9e3582b64e019935fb1125f9b
diff --git a/puppet/modules/resolvconf b/puppet/modules/resolvconf
new file mode 160000
+Subproject c7eca077fdda063edc96d3bea02c4774569e4b1
diff --git a/puppet/modules/ruby b/puppet/modules/ruby
new file mode 160000
+Subproject e4de25d78eefc7df70a35dee22a3e0dc1b7e1d0
diff --git a/puppet/modules/rubygems b/puppet/modules/rubygems
new file mode 160000
+Subproject 1e5ed3dbef9381bb9d5e2a7b4957bb3f5288d6a
diff --git a/puppet/modules/shorewall b/puppet/modules/shorewall
new file mode 160000
+Subproject e4a54e30bf2ad7fa45c73cc544e1da4524a287a
diff --git a/puppet/modules/site-apache b/puppet/modules/site-apache
new file mode 120000
index 00000000..f0517fa5
--- /dev/null
+++ b/puppet/modules/site-apache
@@ -0,0 +1 @@
+site_apache \ No newline at end of file
diff --git a/puppet/modules/site_apache/files/vhosts.d/couchdb_proxy.conf b/puppet/modules/site_apache/files/vhosts.d/couchdb_proxy.conf
new file mode 100644
index 00000000..0dff2cd6
--- /dev/null
+++ b/puppet/modules/site_apache/files/vhosts.d/couchdb_proxy.conf
@@ -0,0 +1,10 @@
+Listen 0.0.0.0:6984
+
+<VirtualHost *:6984>
+ SSLEngine On
+ SSLProxyEngine On
+ SSLCertificateKeyFile /etc/x509/keys/leap_couchdb.key
+ SSLCertificateFile /etc/x509/certs/leap_couchdb.crt
+ ProxyPass / http://127.0.0.1:5984/
+ ProxyPassReverse / http://127.0.0.1:5984/
+</VirtualHost>
diff --git a/puppet/modules/site_apache/templates/vhosts.d/api.conf.erb b/puppet/modules/site_apache/templates/vhosts.d/api.conf.erb
new file mode 100644
index 00000000..cdfcbd68
--- /dev/null
+++ b/puppet/modules/site_apache/templates/vhosts.d/api.conf.erb
@@ -0,0 +1,39 @@
+<VirtualHost *:80>
+ ServerName <%= api_domain %>
+ RewriteEngine On
+ RewriteRule ^.*$ https://<%= api_domain -%>:<%= api_port -%>%{REQUEST_URI} [R=permanent,L]
+</VirtualHost>
+
+Listen 0.0.0.0:<%= api_port %>
+
+<VirtualHost *:<%= api_port -%>>
+ ServerName <%= api_domain %>
+
+ SSLEngine on
+ SSLProtocol -all +SSLv3 +TLSv1
+ SSLCipherSuite HIGH:MEDIUM:!aNULL:!SSLv2:!MD5:@STRENGTH
+ SSLHonorCipherOrder on
+
+ SSLCACertificatePath /etc/ssl/certs
+ SSLCertificateChainFile /etc/ssl/certs/leap_api.pem
+ SSLCertificateKeyFile /etc/x509/keys/leap_api.key
+ SSLCertificateFile /etc/x509/certs/leap_api.crt
+
+ RequestHeader set X_FORWARDED_PROTO 'https'
+
+ DocumentRoot /srv/leap-webapp/public
+ Alias /1 /srv/leap-webapp/public
+
+ # Check for maintenance file and redirect all requests
+ RewriteEngine On
+ RewriteCond %{DOCUMENT_ROOT}/system/maintenance.html -f
+ RewriteCond %{SCRIPT_FILENAME} !maintenance.html
+ RewriteCond %{REQUEST_URI} !/images/maintenance.jpg
+ RewriteRule ^.*$ %{DOCUMENT_ROOT}/system/maintenance.html [L]
+
+ # http://www.modrails.com/documentation/Users%20guide%20Apache.html#_passengerallowencodedslashes_lt_on_off_gt
+ AllowEncodedSlashes on
+ PassengerAllowEncodedSlashes on
+ PassengerFriendlyErrorPages off
+ SetEnv TMPDIR /var/tmp
+</VirtualHost>
diff --git a/puppet/modules/site_apache/templates/vhosts.d/leap_webapp.conf.erb b/puppet/modules/site_apache/templates/vhosts.d/leap_webapp.conf.erb
new file mode 100644
index 00000000..4928cdd6
--- /dev/null
+++ b/puppet/modules/site_apache/templates/vhosts.d/leap_webapp.conf.erb
@@ -0,0 +1,47 @@
+<VirtualHost *:80>
+ ServerName <%= domain %>
+ ServerAlias www.<%= domain %>
+ RewriteEngine On
+ RewriteRule ^.*$ https://<%= domain -%>%{REQUEST_URI} [R=permanent,L]
+</VirtualHost>
+
+<VirtualHost *:443>
+ ServerName <%= domain %>
+ ServerAlias www.<%= domain %>
+
+ SSLEngine on
+ SSLProtocol -all +SSLv3 +TLSv1
+ SSLCipherSuite HIGH:MEDIUM:!aNULL:!SSLv2:!MD5:@STRENGTH
+ SSLHonorCipherOrder on
+
+ SSLCACertificatePath /etc/ssl/certs
+ SSLCertificateChainFile /etc/ssl/certs/leap_webapp.pem
+ SSLCertificateKeyFile /etc/x509/keys/leap_webapp.key
+ SSLCertificateFile /etc/x509/certs/leap_webapp.crt
+
+ RequestHeader set X_FORWARDED_PROTO 'https'
+
+ DocumentRoot /srv/leap-webapp/public
+ Alias /1 /srv/leap-webapp/public
+
+ RewriteEngine On
+ # Check for maintenance file and redirect all requests
+ RewriteCond %{DOCUMENT_ROOT}/system/maintenance.html -f
+ RewriteCond %{SCRIPT_FILENAME} !maintenance.html
+ RewriteCond %{REQUEST_URI} !/images/maintenance.jpg
+ RewriteRule ^.*$ %{DOCUMENT_ROOT}/system/maintenance.html [L]
+
+ # http://www.modrails.com/documentation/Users%20guide%20Apache.html#_passengerallowencodedslashes_lt_on_off_gt
+ AllowEncodedSlashes on
+ PassengerAllowEncodedSlashes on
+ PassengerFriendlyErrorPages off
+ SetEnv TMPDIR /var/tmp
+
+ <% if (defined? @services) and (services.is_a? Array) and (@services.include? 'monitor') -%>
+ <DirectoryMatch (/usr/share/nagios3/htdocs|/usr/lib/cgi-bin/nagios3|/etc/nagios3/stylesheets)>
+ PassengerEnabled off
+ AllowOverride all
+ </DirectoryMatch>
+ <% end -%>
+</VirtualHost>
+
diff --git a/puppet/modules/site_apt/manifests/dist_upgrade.pp b/puppet/modules/site_apt/manifests/dist_upgrade.pp
new file mode 100644
index 00000000..f129dd73
--- /dev/null
+++ b/puppet/modules/site_apt/manifests/dist_upgrade.pp
@@ -0,0 +1,15 @@
+class site_apt::dist_upgrade {
+
+ if $::apt_running == 'true' {
+ fail ('apt-get is running in background - Please wait until it finishes. Exiting.')
+ } else {
+ exec{'initial_apt_update':
+ command => '/usr/bin/apt-get update && /usr/bin/apt-get autoclean',
+ refreshonly => false,
+ }
+ exec{'initial_apt_dist_upgrade':
+ command => "/usr/bin/apt-get -q -y -o 'DPkg::Options::=--force-confold' dist-upgrade",
+ refreshonly => false,
+ }
+ }
+}
diff --git a/puppet/modules/site_apt/manifests/init.pp b/puppet/modules/site_apt/manifests/init.pp
new file mode 100644
index 00000000..80c6fbde
--- /dev/null
+++ b/puppet/modules/site_apt/manifests/init.pp
@@ -0,0 +1,15 @@
+class site_apt {
+
+ include ::apt
+
+ apt::apt_conf { '90disable-pdiffs':
+ content => 'Acquire::PDiffs "false";';
+ }
+
+ include ::apt::unattended_upgrades
+
+ apt::sources_list { 'fallback.list.disabled':
+ content => template('site_apt/fallback.list');
+ }
+
+}
diff --git a/puppet/modules/site_apt/templates/fallback.list b/puppet/modules/site_apt/templates/fallback.list
new file mode 100644
index 00000000..41334b0b
--- /dev/null
+++ b/puppet/modules/site_apt/templates/fallback.list
@@ -0,0 +1,3 @@
+# basic
+deb http://ftp.debian.org/debian/ <%= lsbdistcodename %> main contrib non-free
+
diff --git a/puppet/modules/site_ca_daemon/manifests/apache.pp b/puppet/modules/site_ca_daemon/manifests/apache.pp
new file mode 100644
index 00000000..ab6b08fd
--- /dev/null
+++ b/puppet/modules/site_ca_daemon/manifests/apache.pp
@@ -0,0 +1,62 @@
+class site_ca_daemon::apache {
+
+ $api_domain = hiera('api_domain')
+ $x509 = hiera('x509')
+ $commercial_key = $x509['commercial_key']
+ $commercial_cert = $x509['commercial_cert']
+ $commercial_root = $x509['commercial_ca_cert']
+ $api_key = $x509['key']
+ $api_cert = $x509['cert']
+ $api_root = $x509['ca_cert']
+
+ $apache_no_default_site = true
+ include apache::ssl
+
+ apache::module {
+ 'alias': ensure => present;
+ 'rewrite': ensure => present;
+ 'headers': ensure => present;
+ }
+
+ class { 'passenger': use_munin => false }
+
+ apache::vhost::file {
+ 'leap_ca_daemon':
+ content => template('site_apache/vhosts.d/leap_ca_daemon.conf.erb')
+ }
+
+ apache::vhost::file {
+ 'api':
+ content => template('site_apache/vhosts.d/api.conf.erb')
+ }
+
+ x509::key {
+ 'leap_ca_daemon':
+ content => $commercial_key,
+ notify => Service[apache];
+
+ 'leap_api':
+ content => $api_key,
+ notify => Service[apache];
+ }
+
+ x509::cert {
+ 'leap_ca_daemon':
+ content => $commercial_cert,
+ notify => Service[apache];
+
+ 'leap_api':
+ content => $api_cert,
+ notify => Service[apache];
+ }
+
+ x509::ca {
+ 'leap_ca_daemon':
+ content => $commercial_root,
+ notify => Service[apache];
+
+ 'leap_api':
+ content => $api_root,
+ notify => Service[apache];
+ }
+}
diff --git a/puppet/modules/site_ca_daemon/manifests/couchdb.pp b/puppet/modules/site_ca_daemon/manifests/couchdb.pp
new file mode 100644
index 00000000..f446a05b
--- /dev/null
+++ b/puppet/modules/site_ca_daemon/manifests/couchdb.pp
@@ -0,0 +1,16 @@
+class site_ca_daemon::couchdb {
+
+ $ca = hiera('ca_daemon')
+ $couchdb_host = $ca['couchdb_hosts']
+ $couchdb_user = $ca['couchdb_user']['username']
+ $couchdb_password = $ca['couchdb_user']['password']
+
+ file {
+ '/etc/leap/leap_ca.yaml':
+ content => template('site_ca_daemon/leap_ca.yaml.erb'),
+ owner => leap_ca_daemon,
+ group => leap_ca_daemon,
+ mode => '0600';
+ }
+
+}
diff --git a/puppet/modules/site_ca_daemon/manifests/init.pp b/puppet/modules/site_ca_daemon/manifests/init.pp
new file mode 100644
index 00000000..8ba9c506
--- /dev/null
+++ b/puppet/modules/site_ca_daemon/manifests/init.pp
@@ -0,0 +1,103 @@
+class site_ca_daemon {
+ tag 'leap_service'
+ #$definition_files = hiera('definition_files')
+ #$provider = $definition_files['provider']
+ #$eip_service = $definition_files['eip_service']
+ $x509 = hiera('x509')
+
+ Class[Ruby] -> Class[rubygems] -> Class[bundler::install]
+
+ class { 'ruby': ruby_version => '1.9.3' }
+
+ class { 'bundler::install': install_method => 'package' }
+
+ include rubygems
+ #include site_ca_daemon::apache
+ include site_ca_daemon::couchdb
+
+ group { 'leap_ca_daemon':
+ ensure => present,
+ allowdupe => false;
+ }
+
+ user { 'leap_ca_daemon':
+ ensure => present,
+ allowdupe => false,
+ gid => 'leap_ca_daemon',
+ home => '/srv/leap_ca_daemon',
+ require => [ Group['leap_ca_daemon'] ];
+ }
+
+
+ x509::key {
+ 'leap_ca_daemon':
+ content => $x509['ca_key'];
+ #notify => Service['leap_ca_daemon']; <== no service yet for leap_ca_daemon
+ }
+
+ x509::cert {
+ 'leap_ca_daemon':
+ content => $x509['ca_cert'];
+ #notify => Service['leap_ca_daemon']; <== no service yet for leap_ca_daemon
+ }
+
+ #
+ # Does CA need a server key/cert? I think not now.
+ #
+ # x509::key {
+ # 'server':
+ # content => $x509['key'];
+ # }
+ #
+ # x509::cert {
+ # 'server':
+ # content => $x509['cert'];
+ # }
+
+ # x509::ca {
+ # 'leap_ca_daemon':
+ # content => $x509['ca_cert'];
+ # }
+
+
+ file { '/srv/leap_ca_daemon':
+ ensure => directory,
+ owner => 'leap_ca_daemon',
+ group => 'leap_ca_daemon',
+ require => User['leap_ca_daemon'];
+ }
+
+ vcsrepo { '/srv/leap_ca_daemon':
+ ensure => present,
+ revision => 'origin/master',
+ provider => git,
+ source => 'git://code.leap.se/leap_ca',
+ owner => 'leap_ca_daemon',
+ group => 'leap_ca_daemon',
+ require => [ User['leap_ca_daemon'], Group['leap_ca_daemon'] ],
+ notify => Exec['bundler_update']
+ }
+
+ exec { 'bundler_update':
+ cwd => '/srv/leap_ca_daemon',
+ command => '/bin/bash -c "/usr/bin/bundle check || /usr/bin/bundle install"',
+ unless => '/usr/bin/bundle check',
+ timeout => 600,
+ require => [ Class['bundler::install'], Vcsrepo['/srv/leap_ca_daemon'] ];
+ }
+
+ file { '/usr/local/bin/leap_ca_daemon':
+ ensure => link,
+ target => '/srv/leap_ca_daemon/bin/leap_ca_daemon',
+ }
+
+ file { '/etc/cron.hourly/leap_ca':
+ ensure => present,
+ content => "#/bin/sh\n/srv/leap_ca_daemon/bin/leap_ca_daemon --run-once > /dev/null",
+ owner => 'root',
+ group => 0,
+ mode => '0755',
+ }
+
+
+}
diff --git a/puppet/modules/site_ca_daemon/templates/leap_ca.yaml.erb b/puppet/modules/site_ca_daemon/templates/leap_ca.yaml.erb
new file mode 100644
index 00000000..e0b95278
--- /dev/null
+++ b/puppet/modules/site_ca_daemon/templates/leap_ca.yaml.erb
@@ -0,0 +1,31 @@
+#
+# Default configuration options for LEAP Certificate Authority Daemon
+#
+
+#
+# Certificate Authority
+#
+ca_key_path: "/etc/x509/keys/leap_ca_daemon.key"
+ca_key_password: nil
+ca_cert_path: "/etc/x509/certs/leap_ca_daemon.crt"
+
+#
+# Certificate pool
+#
+max_pool_size: 100
+client_cert_lifespan: 2
+client_cert_bit_size: 2024
+client_cert_hash: "SHA256"
+
+#
+# Database
+#
+db_name: "client_certificates"
+couch_connection:
+ protocol: "https"
+ host: <%= couchdb_host %>
+ port: 6984
+ username: <%= couchdb_user %>
+ password: <%= couchdb_password %>
+ prefix: ""
+ suffix: ""
diff --git a/puppet/modules/site_config/lib/facter/ip_interface.rb b/puppet/modules/site_config/lib/facter/ip_interface.rb
new file mode 100644
index 00000000..45764bfc
--- /dev/null
+++ b/puppet/modules/site_config/lib/facter/ip_interface.rb
@@ -0,0 +1,13 @@
+require 'facter/util/ip'
+
+Facter::Util::IP.get_interfaces.each do |interface|
+ ip = Facter.value("ipaddress_#{interface}")
+ if ip != nil
+ Facter.add("interface_" + ip ) do
+ setcode do
+ interface
+ end
+ end
+ end
+end
+
diff --git a/puppet/modules/site_config/manifests/caching_resolver.pp b/puppet/modules/site_config/manifests/caching_resolver.pp
new file mode 100644
index 00000000..922c394f
--- /dev/null
+++ b/puppet/modules/site_config/manifests/caching_resolver.pp
@@ -0,0 +1,41 @@
+class site_config::caching_resolver {
+
+ # Setup a conf.d directory to place additional unbound configuration files.
+ # There must be at least one file in the directory, or unbound will not start,
+ # so create an empty placeholder to ensure this.
+
+ # Note: the version of unbound we are working with does not accept a wildcard
+ # for an include directive, so we are not able to use this. When we can use
+ # the newer unbound, then we will add 'include: /etc/unbound.d/*' to the
+ # configuration file
+
+ file {
+ '/etc/unbound/conf.d':
+ ensure => directory,
+ owner => root, group => root, mode => '0755',
+ require => Package['unbound'];
+
+ '/etc/unbound/conf.d/placeholder':
+ ensure => present,
+ content => '',
+ owner => root, group => root, mode => '0644';
+ }
+
+ class { 'unbound':
+ root_hints => false,
+ anchor => false,
+ ssl => false,
+ require => File['/etc/unbound/conf.d/placeholder'],
+ settings => {
+ server => {
+ verbosity => '1',
+ interface => [ '127.0.0.1', '::1' ],
+ port => '53',
+ hide-identity => 'yes',
+ hide-version => 'yes',
+ harden-glue => 'yes',
+ access-control => [ '127.0.0.0/8 allow', '::1 allow' ]
+ }
+ }
+ }
+}
diff --git a/puppet/modules/site_config/manifests/default.pp b/puppet/modules/site_config/manifests/default.pp
new file mode 100644
index 00000000..2191e9a1
--- /dev/null
+++ b/puppet/modules/site_config/manifests/default.pp
@@ -0,0 +1,36 @@
+class site_config::default {
+ tag 'leap_base'
+
+ $domain_hash = hiera('domain')
+
+ include concat::setup
+
+ # default class, used by all hosts
+
+ include lsb, git
+
+ # configure apt
+ include site_apt
+
+
+ # configure ssh and include ssh-keys
+ include site_config::sshd
+
+ # configure /etc/resolv.conf
+ include site_config::resolvconf
+
+ # configure caching, local resolver
+ include site_config::caching_resolver
+
+ # configure /etc/hosts
+ class { 'site_config::hosts':
+ stage => initial,
+ }
+
+ package { [ 'etckeeper' ]:
+ ensure => installed,
+ }
+
+ # include basic shorewall config
+ include site_shorewall::defaults
+}
diff --git a/puppet/modules/site_config/manifests/hosts.pp b/puppet/modules/site_config/manifests/hosts.pp
new file mode 100644
index 00000000..6c00f3b6
--- /dev/null
+++ b/puppet/modules/site_config/manifests/hosts.pp
@@ -0,0 +1,22 @@
+class site_config::hosts() {
+
+ $hosts = hiera('hosts','')
+ $hostname = hiera('name')
+
+ $domain_public = $site_config::default::domain_hash['full_suffix']
+
+ file { "/etc/hostname":
+ ensure => present,
+ content => $hostname
+ }
+
+ exec { "/bin/hostname $hostname":
+ subscribe => [ File['/etc/hostname'], File['/etc/hosts'] ],
+ refreshonly => true;
+ }
+
+ file { '/etc/hosts':
+ content => template('site_config/hosts'),
+ mode => '0644', owner => root, group => root;
+ }
+}
diff --git a/puppet/modules/site_config/manifests/resolvconf.pp b/puppet/modules/site_config/manifests/resolvconf.pp
new file mode 100644
index 00000000..d73f0b78
--- /dev/null
+++ b/puppet/modules/site_config/manifests/resolvconf.pp
@@ -0,0 +1,24 @@
+class site_config::resolvconf {
+
+ # bind9 purging can be taken out after some time
+ package { 'bind9':
+ ensure => absent,
+ }
+ file { '/etc/default/bind9':
+ ensure => absent;
+ }
+ file { '/etc/bind/named.conf.options':
+ ensure => absent;
+ }
+
+ $domain_public = $site_config::default::domain_hash['full_suffix']
+
+ # 127.0.0.1: caching-only local bind
+ # 87.118.100.175: http://server.privacyfoundation.de
+ # 62.141.58.13: http://www.privacyfoundation.ch/de/service/server.html
+ class { '::resolvconf':
+ domain => $domain_public,
+ search => $domain_public,
+ nameservers => [ '127.0.0.1', '87.118.100.175', '62.141.58.13' ]
+ }
+}
diff --git a/puppet/modules/site_config/manifests/slow.pp b/puppet/modules/site_config/manifests/slow.pp
new file mode 100644
index 00000000..18b22a9c
--- /dev/null
+++ b/puppet/modules/site_config/manifests/slow.pp
@@ -0,0 +1,6 @@
+class site_config::slow {
+ tag 'leap_slow'
+ class { 'site_apt::dist_upgrade':
+ stage => initial,
+ }
+}
diff --git a/puppet/modules/site_config/manifests/sshd.pp b/puppet/modules/site_config/manifests/sshd.pp
new file mode 100644
index 00000000..944dbce2
--- /dev/null
+++ b/puppet/modules/site_config/manifests/sshd.pp
@@ -0,0 +1,9 @@
+class site_config::sshd {
+ # configure sshd
+ include sshd
+ include site_sshd
+ # no need for configuring authorized_keys as leap_cli cares for that
+ #$ssh_pubkeys=hiera_hash('ssh_pubkeys')
+ #notice($ssh_pubkeys)
+ #create_resources('site_sshd::ssh_key', $ssh_pubkeys)
+}
diff --git a/puppet/modules/site_config/templates/hosts b/puppet/modules/site_config/templates/hosts
new file mode 100644
index 00000000..00cc6a79
--- /dev/null
+++ b/puppet/modules/site_config/templates/hosts
@@ -0,0 +1,15 @@
+# This file is managed by puppet, any changes will be overwritten!
+
+127.0.0.1 localhost
+127.0.1.1 <%= hostname %>.<%= @domain_public %> <%= hostname %>
+
+<%- if hosts.to_s != '' then -%>
+<%= hosts %>
+<% end -%>
+
+# The following lines are desirable for IPv6 capable hosts
+::1 ip6-localhost ip6-loopback
+fe00::0 ip6-localnet
+ff00::0 ip6-mcastprefix
+ff02::1 ip6-allnodes
+ff02::2 ip6-allrouters
diff --git a/puppet/modules/site_couchdb/files/couchdb b/puppet/modules/site_couchdb/files/couchdb
new file mode 100755
index 00000000..ccdfe716
--- /dev/null
+++ b/puppet/modules/site_couchdb/files/couchdb
@@ -0,0 +1,160 @@
+#!/bin/sh -e
+
+# Licensed under the Apache License, Version 2.0 (the "License"); you may not
+# use this file except in compliance with the License. You may obtain a copy of
+# the License at
+#
+# http://www.apache.org/licenses/LICENSE-2.0
+#
+# Unless required by applicable law or agreed to in writing, software
+# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT
+# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the
+# License for the specific language governing permissions and limitations under
+# the License.
+
+### BEGIN INIT INFO
+# Provides: couchdb
+# Required-Start: $local_fs $remote_fs
+# Required-Stop: $local_fs $remote_fs
+# Default-Start: 2 3 4 5
+# Default-Stop: 0 1 6
+# Short-Description: Apache CouchDB init script
+# Description: Apache CouchDB init script for the database server.
+### END INIT INFO
+
+SCRIPT_OK=0
+SCRIPT_ERROR=1
+
+DESCRIPTION="database server"
+NAME=couchdb
+SCRIPT_NAME=`basename $0`
+COUCHDB=/usr/bin/couchdb
+CONFIGURATION_FILE=/etc/default/couchdb
+RUN_DIR=/var/run/couchdb
+LSB_LIBRARY=/lib/lsb/init-functions
+
+if test ! -x $COUCHDB; then
+ exit $SCRIPT_ERROR
+fi
+
+if test -r $CONFIGURATION_FILE; then
+ . $CONFIGURATION_FILE
+fi
+
+log_daemon_msg () {
+ # Dummy function to be replaced by LSB library.
+
+ echo $@
+}
+
+log_end_msg () {
+ # Dummy function to be replaced by LSB library.
+
+ if test "$1" != "0"; then
+ echo "Error with $DESCRIPTION: $NAME"
+ fi
+ return $1
+}
+
+if test -r $LSB_LIBRARY; then
+ . $LSB_LIBRARY
+fi
+
+run_command () {
+ command="$1"
+ if test -n "$COUCHDB_OPTIONS"; then
+ command="$command $COUCHDB_OPTIONS"
+ fi
+ if test -n "$COUCHDB_USER"; then
+ if su $COUCHDB_USER -c "$command"; then
+ return $SCRIPT_OK
+ else
+ return $SCRIPT_ERROR
+ fi
+ else
+ if $command; then
+ return $SCRIPT_OK
+ else
+ return $SCRIPT_ERROR
+ fi
+ fi
+}
+
+start_couchdb () {
+ # Start Apache CouchDB as a background process.
+
+ mkdir -p "$RUN_DIR"
+ chown -R "$COUCHDB_USER" "$RUN_DIR"
+ command="$COUCHDB -b"
+ if test -n "$COUCHDB_STDOUT_FILE"; then
+ command="$command -o $COUCHDB_STDOUT_FILE"
+ fi
+ if test -n "$COUCHDB_STDERR_FILE"; then
+ command="$command -e $COUCHDB_STDERR_FILE"
+ fi
+ if test -n "$COUCHDB_RESPAWN_TIMEOUT"; then
+ command="$command -r $COUCHDB_RESPAWN_TIMEOUT"
+ fi
+ run_command "$command" > /dev/null
+}
+
+stop_couchdb () {
+ # Stop the running Apache CouchDB process.
+
+ run_command "$COUCHDB -d" > /dev/null
+ pkill -u couchdb
+ # always return true even if no remaining couchdb procs got killed
+ /bin/true
+}
+
+display_status () {
+ # Display the status of the running Apache CouchDB process.
+
+ run_command "$COUCHDB -s"
+}
+
+parse_script_option_list () {
+ # Parse arguments passed to the script and take appropriate action.
+
+ case "$1" in
+ start)
+ log_daemon_msg "Starting $DESCRIPTION" $NAME
+ if start_couchdb; then
+ log_end_msg $SCRIPT_OK
+ else
+ log_end_msg $SCRIPT_ERROR
+ fi
+ ;;
+ stop)
+ log_daemon_msg "Stopping $DESCRIPTION" $NAME
+ if stop_couchdb; then
+ log_end_msg $SCRIPT_OK
+ else
+ log_end_msg $SCRIPT_ERROR
+ fi
+ ;;
+ restart|force-reload)
+ log_daemon_msg "Restarting $DESCRIPTION" $NAME
+ if stop_couchdb; then
+ if start_couchdb; then
+ log_end_msg $SCRIPT_OK
+ else
+ log_end_msg $SCRIPT_ERROR
+ fi
+ else
+ log_end_msg $SCRIPT_ERROR
+ fi
+ ;;
+ status)
+ display_status
+ ;;
+ *)
+ cat << EOF >&2
+Usage: $SCRIPT_NAME {start|stop|restart|force-reload|status}
+EOF
+ exit $SCRIPT_ERROR
+ ;;
+ esac
+}
+
+parse_script_option_list $@
diff --git a/puppet/modules/site_couchdb/files/leap_ca_daemon b/puppet/modules/site_couchdb/files/leap_ca_daemon
new file mode 100755
index 00000000..9a1a0bc7
--- /dev/null
+++ b/puppet/modules/site_couchdb/files/leap_ca_daemon
@@ -0,0 +1,157 @@
+#! /bin/sh
+### BEGIN INIT INFO
+# Provides: leap_ca_daemon
+# Required-Start: $remote_fs $syslog
+# Required-Stop: $remote_fs $syslog
+# Default-Start: 2 3 4 5
+# Default-Stop: 0 1 6
+# Short-Description: leap_ca_daemon initscript
+# Description: Controls leap_ca_daemon (see https://github.com/leapcode/leap_ca
+# for more information.
+### END INIT INFO
+
+# Author: varac <varac@leap.se>
+#
+
+# Do NOT "set -e"
+
+# PATH should only include /usr/* if it runs after the mountnfs.sh script
+PATH=/sbin:/usr/sbin:/bin:/usr/bin
+DESC="leap_ca_daemon initscript"
+NAME=leap_ca_daemon
+DAEMON=/usr/local/bin/$NAME
+DAEMON_ARGS="run "
+PIDFILE=/var/run/$NAME.pid
+SCRIPTNAME=/etc/init.d/$NAME
+
+# Exit if the package is not installed
+[ -x "$DAEMON" ] || exit 0
+
+# Read configuration variable file if it is present
+[ -r /etc/default/$NAME ] && . /etc/default/$NAME
+
+# Load the VERBOSE setting and other rcS variables
+. /lib/init/vars.sh
+
+# Define LSB log_* functions.
+# Depend on lsb-base (>= 3.2-14) to ensure that this file is present
+# and status_of_proc is working.
+. /lib/lsb/init-functions
+
+#
+# Function that starts the daemon/service
+#
+do_start()
+{
+ # Return
+ # 0 if daemon has been started
+ # 1 if daemon was already running
+ # 2 if daemon could not be started
+ start-stop-daemon --start --quiet --pidfile $PIDFILE --exec $DAEMON --test > /dev/null \
+ || return 1
+ start-stop-daemon --start --quiet --pidfile $PIDFILE --exec $DAEMON -- \
+ $DAEMON_ARGS \
+ || return 2
+ # Add code here, if necessary, that waits for the process to be ready
+ # to handle requests from services started subsequently which depend
+ # on this one. As a last resort, sleep for some time.
+}
+
+#
+# Function that stops the daemon/service
+#
+do_stop()
+{
+ # Return
+ # 0 if daemon has been stopped
+ # 1 if daemon was already stopped
+ # 2 if daemon could not be stopped
+ # other if a failure occurred
+ start-stop-daemon --stop --quiet --retry=TERM/30/KILL/5 --pidfile $PIDFILE --name $NAME
+ RETVAL="$?"
+ [ "$RETVAL" = 2 ] && return 2
+ # Wait for children to finish too if this is a daemon that forks
+ # and if the daemon is only ever run from this initscript.
+ # If the above conditions are not satisfied then add some other code
+ # that waits for the process to drop all resources that could be
+ # needed by services started subsequently. A last resort is to
+ # sleep for some time.
+ start-stop-daemon --stop --quiet --oknodo --retry=0/30/KILL/5 --exec $DAEMON
+ [ "$?" = 2 ] && return 2
+ # Many daemons don't delete their pidfiles when they exit.
+ rm -f $PIDFILE
+ return "$RETVAL"
+}
+
+#
+# Function that sends a SIGHUP to the daemon/service
+#
+do_reload() {
+ #
+ # If the daemon can reload its configuration without
+ # restarting (for example, when it is sent a SIGHUP),
+ # then implement that here.
+ #
+ start-stop-daemon --stop --signal 1 --quiet --pidfile $PIDFILE --name $NAME
+ return 0
+}
+
+case "$1" in
+ start)
+ [ "$VERBOSE" != no ] && log_daemon_msg "Starting $DESC" "$NAME"
+ do_start
+ case "$?" in
+ 0|1) [ "$VERBOSE" != no ] && log_end_msg 0 ;;
+ 2) [ "$VERBOSE" != no ] && log_end_msg 1 ;;
+ esac
+ ;;
+ stop)
+ [ "$VERBOSE" != no ] && log_daemon_msg "Stopping $DESC" "$NAME"
+ do_stop
+ case "$?" in
+ 0|1) [ "$VERBOSE" != no ] && log_end_msg 0 ;;
+ 2) [ "$VERBOSE" != no ] && log_end_msg 1 ;;
+ esac
+ ;;
+ status)
+ status_of_proc "$DAEMON" "$NAME" && exit 0 || exit $?
+ ;;
+ #reload|force-reload)
+ #
+ # If do_reload() is not implemented then leave this commented out
+ # and leave 'force-reload' as an alias for 'restart'.
+ #
+ #log_daemon_msg "Reloading $DESC" "$NAME"
+ #do_reload
+ #log_end_msg $?
+ #;;
+ restart|force-reload)
+ #
+ # If the "reload" option is implemented then remove the
+ # 'force-reload' alias
+ #
+ log_daemon_msg "Restarting $DESC" "$NAME"
+ do_stop
+ case "$?" in
+ 0|1)
+ do_start
+ case "$?" in
+ 0) log_end_msg 0 ;;
+ 1) log_end_msg 1 ;; # Old process is still running
+ *) log_end_msg 1 ;; # Failed to start
+ esac
+ ;;
+ *)
+ # Failed to stop
+ log_end_msg 1
+ ;;
+ esac
+ ;;
+ *)
+ #echo "Usage: $SCRIPTNAME {start|stop|restart|reload|force-reload}" >&2
+ echo "Usage: $SCRIPTNAME {start|stop|status|restart|force-reload}" >&2
+ exit 3
+ ;;
+esac
+
+:
diff --git a/puppet/modules/site_couchdb/files/local.ini b/puppet/modules/site_couchdb/files/local.ini
new file mode 100644
index 00000000..b3376cbb
--- /dev/null
+++ b/puppet/modules/site_couchdb/files/local.ini
@@ -0,0 +1,89 @@
+; CouchDB Configuration Settings
+
+; Custom settings should be made in this file. They will override settings
+; in default.ini, but unlike changes made to default.ini, this file won't be
+; overwritten on server upgrade.
+
+[couchdb]
+;max_document_size = 4294967296 ; bytes
+
+[httpd]
+;port = 5984
+;bind_address = 127.0.0.1
+; Options for the MochiWeb HTTP server.
+;server_options = [{backlog, 128}, {acceptor_pool_size, 16}]
+; For more socket options, consult Erlang's module 'inet' man page.
+;socket_options = [{recbuf, 262144}, {sndbuf, 262144}, {nodelay, true}]
+
+; Uncomment next line to trigger basic-auth popup on unauthorized requests.
+;WWW-Authenticate = Basic realm="administrator"
+
+; Uncomment next line to set the configuration modification whitelist. Only
+; whitelisted values may be changed via the /_config URLs. To allow the admin
+; to change this value over HTTP, remember to include {httpd,config_whitelist}
+; itself. Excluding it from the list would require editing this file to update
+; the whitelist.
+;config_whitelist = [{httpd,config_whitelist}, {log,level}, {etc,etc}]
+
+[httpd_global_handlers]
+;_google = {couch_httpd_proxy, handle_proxy_req, <<"http://www.google.com">>}
+
+# enable futon
+_utils = {couch_httpd_misc_handlers, handle_utils_dir_req, "/usr/share/couchdb/www"}
+# disable futon
+#_utils = {couch_httpd_misc_handlers, handle_welcome_req, <<"Welcome, Futon is disabled!">>}
+
+[couch_httpd_auth]
+; If you set this to true, you should also uncomment the WWW-Authenticate line
+; above. If you don't configure a WWW-Authenticate header, CouchDB will send
+; Basic realm="server" in order to prevent you getting logged out.
+; require_valid_user = false
+
+[log]
+;level = debug
+
+[os_daemons]
+; For any commands listed here, CouchDB will attempt to ensure that
+; the process remains alive while CouchDB runs as well as shut them
+; down when CouchDB exits.
+;foo = /path/to/command -with args
+
+[daemons]
+; enable SSL support by uncommenting the following line and supply the PEM's below.
+; the default ssl port CouchDB listens on is 6984
+;httpsd = {couch_httpd, start_link, [https]}
+
+[ssl]
+;cert_file = /etc/couchdb/server_cert.pem
+;key_file = /etc/couchdb/server_key.pem
+;password = somepassword
+; set to true to validate peer certificates
+;verify_ssl_certificates = false
+; Path to file containing PEM encoded CA certificates (trusted
+; certificates used for verifying a peer certificate). May be omitted if
+; you do not want to verify the peer.
+;cacert_file = /full/path/to/cacertf
+; The verification fun (optionnal) if not specidied, the default
+; verification fun will be used.
+;verify_fun = {Module, VerifyFun}
+;ssl_certificate_max_depth = 1
+; To enable Virtual Hosts in CouchDB, add a vhost = path directive. All requests to
+; the Virual Host will be redirected to the path. In the example below all requests
+; to http://example.com/ are redirected to /database.
+; If you run CouchDB on a specific port, include the port number in the vhost:
+; example.com:5984 = /database
+
+[vhosts]
+;example.com = /database/
+
+[update_notification]
+;unique notifier name=/full/path/to/exe -with "cmd line arg"
+
+; To create an admin account uncomment the '[admins]' section below and add a
+; line in the format 'username = password'. When you next start CouchDB, it
+; will change the password to a hash (so that your passwords don't linger
+; around in plain-text files). You can add more admin accounts with more
+; 'username = password' lines. Don't forget to restart CouchDB after
+; changing this.
+;[admins]
+;admin = mysecretpassword
diff --git a/puppet/modules/site_couchdb/manifests/apache_ssl_proxy.pp b/puppet/modules/site_couchdb/manifests/apache_ssl_proxy.pp
new file mode 100644
index 00000000..7739473e
--- /dev/null
+++ b/puppet/modules/site_couchdb/manifests/apache_ssl_proxy.pp
@@ -0,0 +1,25 @@
+define site_couchdb::apache_ssl_proxy ($key, $cert) {
+
+ $apache_no_default_site = true
+ include apache
+ apache::module {
+ 'proxy': ensure => present;
+ 'proxy_http': ensure => present;
+ 'rewrite': ensure => present;
+ 'ssl': ensure => present;
+ }
+ apache::vhost::file { 'couchdb_proxy': }
+
+ x509::key {
+ 'leap_couchdb':
+ content => $key,
+ notify => Service[apache];
+ }
+
+ x509::cert {
+ 'leap_couchdb':
+ content => $cert,
+ notify => Service[apache];
+ }
+
+}
diff --git a/puppet/modules/site_couchdb/manifests/configure.pp b/puppet/modules/site_couchdb/manifests/configure.pp
new file mode 100644
index 00000000..333511b5
--- /dev/null
+++ b/puppet/modules/site_couchdb/manifests/configure.pp
@@ -0,0 +1,27 @@
+class site_couchdb::configure {
+
+ file { '/etc/init.d/couchdb':
+ source => 'puppet:///modules/site_couchdb/couchdb',
+ mode => '0755',
+ owner => 'root',
+ group => 'root',
+ }
+
+ file { '/etc/couchdb/local.d/admin.ini':
+ content => "[admins]
+admin = $site_couchdb::couchdb_admin_pw
+",
+ mode => '0600',
+ owner => 'couchdb',
+ group => 'couchdb',
+ notify => Service[couchdb]
+ }
+
+
+ exec { '/etc/init.d/couchdb restart; sleep 6':
+ path => ['/bin', '/usr/bin',],
+ subscribe => File['/etc/couchdb/local.d/admin.ini',
+ '/etc/couchdb/local.ini'],
+ refreshonly => true
+ }
+}
diff --git a/puppet/modules/site_couchdb/manifests/init.pp b/puppet/modules/site_couchdb/manifests/init.pp
new file mode 100644
index 00000000..9ecde5e6
--- /dev/null
+++ b/puppet/modules/site_couchdb/manifests/init.pp
@@ -0,0 +1,64 @@
+class site_couchdb {
+ tag 'leap_service'
+ include couchdb
+
+ $x509 = hiera('x509')
+ $key = $x509['key']
+ $cert = $x509['cert']
+ $couchdb_config = hiera('couch')
+ $couchdb_users = $couchdb_config['users']
+ $couchdb_admin = $couchdb_users['admin']
+ $couchdb_admin_user = $couchdb_admin['username']
+ $couchdb_admin_pw = $couchdb_admin['password']
+ $couchdb_webapp = $couchdb_users['webapp']
+ $couchdb_webapp_user = $couchdb_webapp['username']
+ $couchdb_webapp_pw = $couchdb_webapp['password']
+ $couchdb_ca_daemon = $couchdb_users['ca_daemon']
+ $couchdb_ca_daemon_user = $couchdb_ca_daemon['username']
+ $couchdb_ca_daemon_pw = $couchdb_ca_daemon['password']
+
+ Package ['couchdb']
+ -> File['/etc/init.d/couchdb']
+ -> File['/etc/couchdb/local.ini']
+ -> File['/etc/couchdb/local.d/admin.ini']
+ -> File['/etc/couchdb/couchdb.netrc']
+ -> Couchdb::Create_db['users']
+ -> Couchdb::Create_db['client_certificates']
+ -> Couchdb::Add_user[$couchdb_webapp_user]
+ -> Couchdb::Add_user[$couchdb_ca_daemon_user]
+ -> Site_couchdb::Apache_ssl_proxy['apache_ssl_proxy']
+
+ include site_couchdb::configure
+ include couchdb::deploy_config
+
+ site_couchdb::apache_ssl_proxy { 'apache_ssl_proxy':
+ key => $key,
+ cert => $cert
+ }
+
+ couchdb::query::setup { 'localhost':
+ user => $couchdb_admin_user,
+ pw => $couchdb_admin_pw
+ }
+
+ # Populate couchdb
+ couchdb::add_user { $couchdb_webapp_user:
+ roles => '["certs"]',
+ pw => $couchdb_webapp_pw
+ }
+
+ couchdb::add_user { $couchdb_ca_daemon_user:
+ roles => '["certs"]',
+ pw => $couchdb_ca_daemon_pw
+ }
+
+ couchdb::create_db { 'users':
+ readers => "{ \"names\": [\"$couchdb_webapp_user\"], \"roles\": [] }"
+ }
+
+ couchdb::create_db { 'client_certificates':
+ readers => "{ \"names\": [], \"roles\": [\"certs\"] }"
+ }
+
+ include site_shorewall::couchdb
+}
diff --git a/puppet/modules/site_nagios/files/configs/Debian/nagios.cfg b/puppet/modules/site_nagios/files/configs/Debian/nagios.cfg
new file mode 100644
index 00000000..753d1610
--- /dev/null
+++ b/puppet/modules/site_nagios/files/configs/Debian/nagios.cfg
@@ -0,0 +1,1273 @@
+##############################################################################
+#
+# NAGIOS.CFG - Sample Main Config File for Nagios
+#
+#
+##############################################################################
+
+
+# LOG FILE
+# This is the main log file where service and host events are logged
+# for historical purposes. This should be the first option specified
+# in the config file!!!
+
+log_file=/var/log/nagios3/nagios.log
+
+
+
+# OBJECT CONFIGURATION FILE(S)
+# These are the object configuration files in which you define hosts,
+# host groups, contacts, contact groups, services, etc.
+# You can split your object definitions across several config files
+# if you wish (as shown below), or keep them all in a single config file.
+#cfg_file=/etc/nagios3/commands.cfg
+
+# Puppet-managed configuration files
+cfg_dir=/etc/nagios3/conf.d
+
+# Debian also defaults to using the check commands defined by the debian
+# nagios-plugins package
+cfg_dir=/etc/nagios-plugins/config
+
+
+
+# OBJECT CACHE FILE
+# This option determines where object definitions are cached when
+# Nagios starts/restarts. The CGIs read object definitions from
+# this cache file (rather than looking at the object config files
+# directly) in order to prevent inconsistencies that can occur
+# when the config files are modified after Nagios starts.
+
+object_cache_file=/var/cache/nagios3/objects.cache
+
+
+
+# PRE-CACHED OBJECT FILE
+# This options determines the location of the precached object file.
+# If you run Nagios with the -p command line option, it will preprocess
+# your object configuration file(s) and write the cached config to this
+# file. You can then start Nagios with the -u option to have it read
+# object definitions from this precached file, rather than the standard
+# object configuration files (see the cfg_file and cfg_dir options above).
+# Using a precached object file can speed up the time needed to (re)start
+# the Nagios process if you've got a large and/or complex configuration.
+# Read the documentation section on optimizing Nagios to find our more
+# about how this feature works.
+
+precached_object_file=/var/lib/nagios3/objects.precache
+
+
+
+# RESOURCE FILE
+# This is an optional resource file that contains $USERx$ macro
+# definitions. Multiple resource files can be specified by using
+# multiple resource_file definitions. The CGIs will not attempt to
+# read the contents of resource files, so information that is
+# considered to be sensitive (usernames, passwords, etc) can be
+# defined as macros in this file and restrictive permissions (600)
+# can be placed on this file.
+
+resource_file=/etc/nagios3/private/resource.cfg
+
+
+
+# STATUS FILE
+# This is where the current status of all monitored services and
+# hosts is stored. Its contents are read and processed by the CGIs.
+# The contents of the status file are deleted every time Nagios
+# restarts.
+
+status_file=/var/cache/nagios3/status.dat
+
+
+
+# STATUS FILE UPDATE INTERVAL
+# This option determines the frequency (in seconds) that
+# Nagios will periodically dump program, host, and
+# service status data.
+
+status_update_interval=10
+
+
+
+# NAGIOS USER
+# This determines the effective user that Nagios should run as.
+# You can either supply a username or a UID.
+
+nagios_user=nagios
+
+
+
+# NAGIOS GROUP
+# This determines the effective group that Nagios should run as.
+# You can either supply a group name or a GID.
+
+nagios_group=nagios
+
+
+
+# EXTERNAL COMMAND OPTION
+# This option allows you to specify whether or not Nagios should check
+# for external commands (in the command file defined below). By default
+# Nagios will *not* check for external commands, just to be on the
+# cautious side. If you want to be able to use the CGI command interface
+# you will have to enable this.
+# Values: 0 = disable commands, 1 = enable commands
+
+check_external_commands=1
+
+
+
+# EXTERNAL COMMAND CHECK INTERVAL
+# This is the interval at which Nagios should check for external commands.
+# This value works of the interval_length you specify later. If you leave
+# that at its default value of 60 (seconds), a value of 1 here will cause
+# Nagios to check for external commands every minute. If you specify a
+# number followed by an "s" (i.e. 15s), this will be interpreted to mean
+# actual seconds rather than a multiple of the interval_length variable.
+# Note: In addition to reading the external command file at regularly
+# scheduled intervals, Nagios will also check for external commands after
+# event handlers are executed.
+# NOTE: Setting this value to -1 causes Nagios to check the external
+# command file as often as possible.
+
+#command_check_interval=15s
+command_check_interval=-1
+
+
+
+# EXTERNAL COMMAND FILE
+# This is the file that Nagios checks for external command requests.
+# It is also where the command CGI will write commands that are submitted
+# by users, so it must be writeable by the user that the web server
+# is running as (usually 'nobody'). Permissions should be set at the
+# directory level instead of on the file, as the file is deleted every
+# time its contents are processed.
+# Debian Users: In case you didn't read README.Debian yet, _NOW_ is the
+# time to do it.
+
+command_file=/var/lib/nagios3/rw/nagios.cmd
+
+
+
+# EXTERNAL COMMAND BUFFER SLOTS
+# This settings is used to tweak the number of items or "slots" that
+# the Nagios daemon should allocate to the buffer that holds incoming
+# external commands before they are processed. As external commands
+# are processed by the daemon, they are removed from the buffer.
+
+external_command_buffer_slots=4096
+
+
+
+# LOCK FILE
+# This is the lockfile that Nagios will use to store its PID number
+# in when it is running in daemon mode.
+
+lock_file=/var/run/nagios3/nagios3.pid
+
+
+
+# TEMP FILE
+# This is a temporary file that is used as scratch space when Nagios
+# updates the status log, cleans the comment file, etc. This file
+# is created, used, and deleted throughout the time that Nagios is
+# running.
+
+temp_file=/var/cache/nagios3/nagios.tmp
+
+
+
+# TEMP PATH
+# This is path where Nagios can create temp files for service and
+# host check results, etc.
+
+temp_path=/tmp
+
+
+
+# EVENT BROKER OPTIONS
+# Controls what (if any) data gets sent to the event broker.
+# Values: 0 = Broker nothing
+# -1 = Broker everything
+# <other> = See documentation
+
+event_broker_options=-1
+
+
+
+# EVENT BROKER MODULE(S)
+# This directive is used to specify an event broker module that should
+# by loaded by Nagios at startup. Use multiple directives if you want
+# to load more than one module. Arguments that should be passed to
+# the module at startup are seperated from the module path by a space.
+#
+#!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!
+# WARNING !!! WARNING !!! WARNING !!! WARNING !!! WARNING !!! WARNING
+#!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!
+#
+# Do NOT overwrite modules while they are being used by Nagios or Nagios
+# will crash in a fiery display of SEGFAULT glory. This is a bug/limitation
+# either in dlopen(), the kernel, and/or the filesystem. And maybe Nagios...
+#
+# The correct/safe way of updating a module is by using one of these methods:
+# 1. Shutdown Nagios, replace the module file, restart Nagios
+# 2. Delete the original module file, move the new module file into place, restart Nagios
+#
+# Example:
+#
+# broker_module=<modulepath> [moduleargs]
+
+#broker_module=/somewhere/module1.o
+#broker_module=/somewhere/module2.o arg1 arg2=3 debug=0
+
+
+
+# LOG ROTATION METHOD
+# This is the log rotation method that Nagios should use to rotate
+# the main log file. Values are as follows..
+# n = None - don't rotate the log
+# h = Hourly rotation (top of the hour)
+# d = Daily rotation (midnight every day)
+# w = Weekly rotation (midnight on Saturday evening)
+# m = Monthly rotation (midnight last day of month)
+
+log_rotation_method=d
+
+
+
+# LOG ARCHIVE PATH
+# This is the directory where archived (rotated) log files should be
+# placed (assuming you've chosen to do log rotation).
+
+log_archive_path=/var/log/nagios3/archives
+
+
+
+# LOGGING OPTIONS
+# If you want messages logged to the syslog facility, as well as the
+# Nagios log file set this option to 1. If not, set it to 0.
+
+use_syslog=1
+
+
+
+# NOTIFICATION LOGGING OPTION
+# If you don't want notifications to be logged, set this value to 0.
+# If notifications should be logged, set the value to 1.
+
+log_notifications=1
+
+
+
+# SERVICE RETRY LOGGING OPTION
+# If you don't want service check retries to be logged, set this value
+# to 0. If retries should be logged, set the value to 1.
+
+log_service_retries=1
+
+
+
+# HOST RETRY LOGGING OPTION
+# If you don't want host check retries to be logged, set this value to
+# 0. If retries should be logged, set the value to 1.
+
+log_host_retries=1
+
+
+
+# EVENT HANDLER LOGGING OPTION
+# If you don't want host and service event handlers to be logged, set
+# this value to 0. If event handlers should be logged, set the value
+# to 1.
+
+log_event_handlers=1
+
+
+
+# INITIAL STATES LOGGING OPTION
+# If you want Nagios to log all initial host and service states to
+# the main log file (the first time the service or host is checked)
+# you can enable this option by setting this value to 1. If you
+# are not using an external application that does long term state
+# statistics reporting, you do not need to enable this option. In
+# this case, set the value to 0.
+
+log_initial_states=0
+
+
+
+# EXTERNAL COMMANDS LOGGING OPTION
+# If you don't want Nagios to log external commands, set this value
+# to 0. If external commands should be logged, set this value to 1.
+# Note: This option does not include logging of passive service
+# checks - see the option below for controlling whether or not
+# passive checks are logged.
+
+log_external_commands=1
+
+
+
+# PASSIVE CHECKS LOGGING OPTION
+# If you don't want Nagios to log passive host and service checks, set
+# this value to 0. If passive checks should be logged, set
+# this value to 1.
+
+log_passive_checks=1
+
+
+
+# GLOBAL HOST AND SERVICE EVENT HANDLERS
+# These options allow you to specify a host and service event handler
+# command that is to be run for every host or service state change.
+# The global event handler is executed immediately prior to the event
+# handler that you have optionally specified in each host or
+# service definition. The command argument is the short name of a
+# command definition that you define in your host configuration file.
+# Read the HTML docs for more information.
+
+#global_host_event_handler=somecommand
+#global_service_event_handler=somecommand
+
+
+
+# SERVICE INTER-CHECK DELAY METHOD
+# This is the method that Nagios should use when initially
+# "spreading out" service checks when it starts monitoring. The
+# default is to use smart delay calculation, which will try to
+# space all service checks out evenly to minimize CPU load.
+# Using the dumb setting will cause all checks to be scheduled
+# at the same time (with no delay between them)! This is not a
+# good thing for production, but is useful when testing the
+# parallelization functionality.
+# n = None - don't use any delay between checks
+# d = Use a "dumb" delay of 1 second between checks
+# s = Use "smart" inter-check delay calculation
+# x.xx = Use an inter-check delay of x.xx seconds
+
+service_inter_check_delay_method=s
+
+
+
+# MAXIMUM SERVICE CHECK SPREAD
+# This variable determines the timeframe (in minutes) from the
+# program start time that an initial check of all services should
+# be completed. Default is 30 minutes.
+
+max_service_check_spread=30
+
+
+
+# SERVICE CHECK INTERLEAVE FACTOR
+# This variable determines how service checks are interleaved.
+# Interleaving the service checks allows for a more even
+# distribution of service checks and reduced load on remote
+# hosts. Setting this value to 1 is equivalent to how versions
+# of Nagios previous to 0.0.5 did service checks. Set this
+# value to s (smart) for automatic calculation of the interleave
+# factor unless you have a specific reason to change it.
+# s = Use "smart" interleave factor calculation
+# x = Use an interleave factor of x, where x is a
+# number greater than or equal to 1.
+
+service_interleave_factor=s
+
+
+
+# HOST INTER-CHECK DELAY METHOD
+# This is the method that Nagios should use when initially
+# "spreading out" host checks when it starts monitoring. The
+# default is to use smart delay calculation, which will try to
+# space all host checks out evenly to minimize CPU load.
+# Using the dumb setting will cause all checks to be scheduled
+# at the same time (with no delay between them)!
+# n = None - don't use any delay between checks
+# d = Use a "dumb" delay of 1 second between checks
+# s = Use "smart" inter-check delay calculation
+# x.xx = Use an inter-check delay of x.xx seconds
+
+host_inter_check_delay_method=s
+
+
+
+# MAXIMUM HOST CHECK SPREAD
+# This variable determines the timeframe (in minutes) from the
+# program start time that an initial check of all hosts should
+# be completed. Default is 30 minutes.
+
+max_host_check_spread=30
+
+
+
+# MAXIMUM CONCURRENT SERVICE CHECKS
+# This option allows you to specify the maximum number of
+# service checks that can be run in parallel at any given time.
+# Specifying a value of 1 for this variable essentially prevents
+# any service checks from being parallelized. A value of 0
+# will not restrict the number of concurrent checks that are
+# being executed.
+
+max_concurrent_checks=0
+
+
+
+# HOST AND SERVICE CHECK REAPER FREQUENCY
+# This is the frequency (in seconds!) that Nagios will process
+# the results of host and service checks.
+
+check_result_reaper_frequency=10
+
+
+
+
+# MAX CHECK RESULT REAPER TIME
+# This is the max amount of time (in seconds) that a single
+# check result reaper event will be allowed to run before
+# returning control back to Nagios so it can perform other
+# duties.
+
+max_check_result_reaper_time=30
+
+
+
+
+# CHECK RESULT PATH
+# This is directory where Nagios stores the results of host and
+# service checks that have not yet been processed.
+#
+# Note: Make sure that only one instance of Nagios has access
+# to this directory!
+
+check_result_path=/var/lib/nagios3/spool/checkresults
+
+
+
+
+# MAX CHECK RESULT FILE AGE
+# This option determines the maximum age (in seconds) which check
+# result files are considered to be valid. Files older than this
+# threshold will be mercilessly deleted without further processing.
+
+max_check_result_file_age=3600
+
+
+
+
+# CACHED HOST CHECK HORIZON
+# This option determines the maximum amount of time (in seconds)
+# that the state of a previous host check is considered current.
+# Cached host states (from host checks that were performed more
+# recently that the timeframe specified by this value) can immensely
+# improve performance in regards to the host check logic.
+# Too high of a value for this option may result in inaccurate host
+# states being used by Nagios, while a lower value may result in a
+# performance hit for host checks. Use a value of 0 to disable host
+# check caching.
+
+cached_host_check_horizon=15
+
+
+
+# CACHED SERVICE CHECK HORIZON
+# This option determines the maximum amount of time (in seconds)
+# that the state of a previous service check is considered current.
+# Cached service states (from service checks that were performed more
+# recently that the timeframe specified by this value) can immensely
+# improve performance in regards to predictive dependency checks.
+# Use a value of 0 to disable service check caching.
+
+cached_service_check_horizon=15
+
+
+
+# ENABLE PREDICTIVE HOST DEPENDENCY CHECKS
+# This option determines whether or not Nagios will attempt to execute
+# checks of hosts when it predicts that future dependency logic test
+# may be needed. These predictive checks can help ensure that your
+# host dependency logic works well.
+# Values:
+# 0 = Disable predictive checks
+# 1 = Enable predictive checks (default)
+
+enable_predictive_host_dependency_checks=1
+
+
+
+# ENABLE PREDICTIVE SERVICE DEPENDENCY CHECKS
+# This option determines whether or not Nagios will attempt to execute
+# checks of service when it predicts that future dependency logic test
+# may be needed. These predictive checks can help ensure that your
+# service dependency logic works well.
+# Values:
+# 0 = Disable predictive checks
+# 1 = Enable predictive checks (default)
+
+enable_predictive_service_dependency_checks=1
+
+
+
+# SOFT STATE DEPENDENCIES
+# This option determines whether or not Nagios will use soft state
+# information when checking host and service dependencies. Normally
+# Nagios will only use the latest hard host or service state when
+# checking dependencies. If you want it to use the latest state (regardless
+# of whether its a soft or hard state type), enable this option.
+# Values:
+# 0 = Don't use soft state dependencies (default)
+# 1 = Use soft state dependencies
+
+soft_state_dependencies=0
+
+
+
+# TIME CHANGE ADJUSTMENT THRESHOLDS
+# These options determine when Nagios will react to detected changes
+# in system time (either forward or backwards).
+
+#time_change_threshold=900
+
+
+
+# AUTO-RESCHEDULING OPTION
+# This option determines whether or not Nagios will attempt to
+# automatically reschedule active host and service checks to
+# "smooth" them out over time. This can help balance the load on
+# the monitoring server.
+# WARNING: THIS IS AN EXPERIMENTAL FEATURE - IT CAN DEGRADE
+# PERFORMANCE, RATHER THAN INCREASE IT, IF USED IMPROPERLY
+
+auto_reschedule_checks=0
+
+
+
+# AUTO-RESCHEDULING INTERVAL
+# This option determines how often (in seconds) Nagios will
+# attempt to automatically reschedule checks. This option only
+# has an effect if the auto_reschedule_checks option is enabled.
+# Default is 30 seconds.
+# WARNING: THIS IS AN EXPERIMENTAL FEATURE - IT CAN DEGRADE
+# PERFORMANCE, RATHER THAN INCREASE IT, IF USED IMPROPERLY
+
+auto_rescheduling_interval=30
+
+
+
+# AUTO-RESCHEDULING WINDOW
+# This option determines the "window" of time (in seconds) that
+# Nagios will look at when automatically rescheduling checks.
+# Only host and service checks that occur in the next X seconds
+# (determined by this variable) will be rescheduled. This option
+# only has an effect if the auto_reschedule_checks option is
+# enabled. Default is 180 seconds (3 minutes).
+# WARNING: THIS IS AN EXPERIMENTAL FEATURE - IT CAN DEGRADE
+# PERFORMANCE, RATHER THAN INCREASE IT, IF USED IMPROPERLY
+
+auto_rescheduling_window=180
+
+
+
+# SLEEP TIME
+# This is the number of seconds to sleep between checking for system
+# events and service checks that need to be run.
+
+sleep_time=0.25
+
+
+
+# TIMEOUT VALUES
+# These options control how much time Nagios will allow various
+# types of commands to execute before killing them off. Options
+# are available for controlling maximum time allotted for
+# service checks, host checks, event handlers, notifications, the
+# ocsp command, and performance data commands. All values are in
+# seconds.
+
+service_check_timeout=60
+host_check_timeout=30
+event_handler_timeout=30
+notification_timeout=30
+ocsp_timeout=5
+perfdata_timeout=5
+
+
+
+# RETAIN STATE INFORMATION
+# This setting determines whether or not Nagios will save state
+# information for services and hosts before it shuts down. Upon
+# startup Nagios will reload all saved service and host state
+# information before starting to monitor. This is useful for
+# maintaining long-term data on state statistics, etc, but will
+# slow Nagios down a bit when it (re)starts. Since its only
+# a one-time penalty, I think its well worth the additional
+# startup delay.
+
+retain_state_information=1
+
+
+
+# STATE RETENTION FILE
+# This is the file that Nagios should use to store host and
+# service state information before it shuts down. The state
+# information in this file is also read immediately prior to
+# starting to monitor the network when Nagios is restarted.
+# This file is used only if the preserve_state_information
+# variable is set to 1.
+
+state_retention_file=/var/lib/nagios3/retention.dat
+
+
+
+# RETENTION DATA UPDATE INTERVAL
+# This setting determines how often (in minutes) that Nagios
+# will automatically save retention data during normal operation.
+# If you set this value to 0, Nagios will not save retention
+# data at regular interval, but it will still save retention
+# data before shutting down or restarting. If you have disabled
+# state retention, this option has no effect.
+
+retention_update_interval=60
+
+
+
+# USE RETAINED PROGRAM STATE
+# This setting determines whether or not Nagios will set
+# program status variables based on the values saved in the
+# retention file. If you want to use retained program status
+# information, set this value to 1. If not, set this value
+# to 0.
+
+use_retained_program_state=1
+
+
+
+# USE RETAINED SCHEDULING INFO
+# This setting determines whether or not Nagios will retain
+# the scheduling info (next check time) for hosts and services
+# based on the values saved in the retention file. If you
+# If you want to use retained scheduling info, set this
+# value to 1. If not, set this value to 0.
+
+use_retained_scheduling_info=1
+
+
+
+# RETAINED ATTRIBUTE MASKS (ADVANCED FEATURE)
+# The following variables are used to specify specific host and
+# service attributes that should *not* be retained by Nagios during
+# program restarts.
+#
+# The values of the masks are bitwise ANDs of values specified
+# by the "MODATTR_" definitions found in include/common.h.
+# For example, if you do not want the current enabled/disabled state
+# of flap detection and event handlers for hosts to be retained, you
+# would use a value of 24 for the host attribute mask...
+# MODATTR_EVENT_HANDLER_ENABLED (8) + MODATTR_FLAP_DETECTION_ENABLED (16) = 24
+
+# This mask determines what host attributes are not retained
+retained_host_attribute_mask=0
+
+# This mask determines what service attributes are not retained
+retained_service_attribute_mask=0
+
+# These two masks determine what process attributes are not retained.
+# There are two masks, because some process attributes have host and service
+# options. For example, you can disable active host checks, but leave active
+# service checks enabled.
+retained_process_host_attribute_mask=0
+retained_process_service_attribute_mask=0
+
+# These two masks determine what contact attributes are not retained.
+# There are two masks, because some contact attributes have host and
+# service options. For example, you can disable host notifications for
+# a contact, but leave service notifications enabled for them.
+retained_contact_host_attribute_mask=0
+retained_contact_service_attribute_mask=0
+
+
+
+# INTERVAL LENGTH
+# This is the seconds per unit interval as used in the
+# host/contact/service configuration files. Setting this to 60 means
+# that each interval is one minute long (60 seconds). Other settings
+# have not been tested much, so your mileage is likely to vary...
+
+interval_length=60
+
+
+
+# AGGRESSIVE HOST CHECKING OPTION
+# If you don't want to turn on aggressive host checking features, set
+# this value to 0 (the default). Otherwise set this value to 1 to
+# enable the aggressive check option. Read the docs for more info
+# on what aggressive host check is or check out the source code in
+# base/checks.c
+
+use_aggressive_host_checking=0
+
+
+
+# SERVICE CHECK EXECUTION OPTION
+# This determines whether or not Nagios will actively execute
+# service checks when it initially starts. If this option is
+# disabled, checks are not actively made, but Nagios can still
+# receive and process passive check results that come in. Unless
+# you're implementing redundant hosts or have a special need for
+# disabling the execution of service checks, leave this enabled!
+# Values: 1 = enable checks, 0 = disable checks
+
+execute_service_checks=1
+
+
+
+# PASSIVE SERVICE CHECK ACCEPTANCE OPTION
+# This determines whether or not Nagios will accept passive
+# service checks results when it initially (re)starts.
+# Values: 1 = accept passive checks, 0 = reject passive checks
+
+accept_passive_service_checks=1
+
+
+
+# HOST CHECK EXECUTION OPTION
+# This determines whether or not Nagios will actively execute
+# host checks when it initially starts. If this option is
+# disabled, checks are not actively made, but Nagios can still
+# receive and process passive check results that come in. Unless
+# you're implementing redundant hosts or have a special need for
+# disabling the execution of host checks, leave this enabled!
+# Values: 1 = enable checks, 0 = disable checks
+
+execute_host_checks=1
+
+
+
+# PASSIVE HOST CHECK ACCEPTANCE OPTION
+# This determines whether or not Nagios will accept passive
+# host checks results when it initially (re)starts.
+# Values: 1 = accept passive checks, 0 = reject passive checks
+
+accept_passive_host_checks=1
+
+
+
+# NOTIFICATIONS OPTION
+# This determines whether or not Nagios will sent out any host or
+# service notifications when it is initially (re)started.
+# Values: 1 = enable notifications, 0 = disable notifications
+
+enable_notifications=1
+
+
+
+# EVENT HANDLER USE OPTION
+# This determines whether or not Nagios will run any host or
+# service event handlers when it is initially (re)started. Unless
+# you're implementing redundant hosts, leave this option enabled.
+# Values: 1 = enable event handlers, 0 = disable event handlers
+
+enable_event_handlers=1
+
+
+
+# PROCESS PERFORMANCE DATA OPTION
+# This determines whether or not Nagios will process performance
+# data returned from service and host checks. If this option is
+# enabled, host performance data will be processed using the
+# host_perfdata_command (defined below) and service performance
+# data will be processed using the service_perfdata_command (also
+# defined below). Read the HTML docs for more information on
+# performance data.
+# Values: 1 = process performance data, 0 = do not process performance data
+
+process_performance_data=0
+
+
+
+# HOST AND SERVICE PERFORMANCE DATA PROCESSING COMMANDS
+# These commands are run after every host and service check is
+# performed. These commands are executed only if the
+# enable_performance_data option (above) is set to 1. The command
+# argument is the short name of a command definition that you
+# define in your host configuration file. Read the HTML docs for
+# more information on performance data.
+
+#host_perfdata_command=process-host-perfdata
+#service_perfdata_command=process-service-perfdata
+
+
+
+# HOST AND SERVICE PERFORMANCE DATA FILES
+# These files are used to store host and service performance data.
+# Performance data is only written to these files if the
+# enable_performance_data option (above) is set to 1.
+
+#host_perfdata_file=/tmp/host-perfdata
+#service_perfdata_file=/tmp/service-perfdata
+
+
+
+# HOST AND SERVICE PERFORMANCE DATA FILE TEMPLATES
+# These options determine what data is written (and how) to the
+# performance data files. The templates may contain macros, special
+# characters (\t for tab, \r for carriage return, \n for newline)
+# and plain text. A newline is automatically added after each write
+# to the performance data file. Some examples of what you can do are
+# shown below.
+
+#host_perfdata_file_template=[HOSTPERFDATA]\t$TIMET$\t$HOSTNAME$\t$HOSTEXECUTIONTIME$\t$HOSTOUTPUT$\t$HOSTPERFDATA$
+#service_perfdata_file_template=[SERVICEPERFDATA]\t$TIMET$\t$HOSTNAME$\t$SERVICEDESC$\t$SERVICEEXECUTIONTIME$\t$SERVICELATENCY$\t$SERVICEOUTPUT$\t$SERVICEPERFDATA$
+
+
+
+# HOST AND SERVICE PERFORMANCE DATA FILE MODES
+# This option determines whether or not the host and service
+# performance data files are opened in write ("w") or append ("a")
+# mode. If you want to use named pipes, you should use the special
+# pipe ("p") mode which avoid blocking at startup, otherwise you will
+# likely want the defult append ("a") mode.
+
+#host_perfdata_file_mode=a
+#service_perfdata_file_mode=a
+
+
+
+# HOST AND SERVICE PERFORMANCE DATA FILE PROCESSING INTERVAL
+# These options determine how often (in seconds) the host and service
+# performance data files are processed using the commands defined
+# below. A value of 0 indicates the files should not be periodically
+# processed.
+
+#host_perfdata_file_processing_interval=0
+#service_perfdata_file_processing_interval=0
+
+
+
+# HOST AND SERVICE PERFORMANCE DATA FILE PROCESSING COMMANDS
+# These commands are used to periodically process the host and
+# service performance data files. The interval at which the
+# processing occurs is determined by the options above.
+
+#host_perfdata_file_processing_command=process-host-perfdata-file
+#service_perfdata_file_processing_command=process-service-perfdata-file
+
+
+
+# OBSESS OVER SERVICE CHECKS OPTION
+# This determines whether or not Nagios will obsess over service
+# checks and run the ocsp_command defined below. Unless you're
+# planning on implementing distributed monitoring, do not enable
+# this option. Read the HTML docs for more information on
+# implementing distributed monitoring.
+# Values: 1 = obsess over services, 0 = do not obsess (default)
+
+obsess_over_services=0
+
+
+
+# OBSESSIVE COMPULSIVE SERVICE PROCESSOR COMMAND
+# This is the command that is run for every service check that is
+# processed by Nagios. This command is executed only if the
+# obsess_over_services option (above) is set to 1. The command
+# argument is the short name of a command definition that you
+# define in your host configuration file. Read the HTML docs for
+# more information on implementing distributed monitoring.
+
+#ocsp_command=somecommand
+
+
+
+# OBSESS OVER HOST CHECKS OPTION
+# This determines whether or not Nagios will obsess over host
+# checks and run the ochp_command defined below. Unless you're
+# planning on implementing distributed monitoring, do not enable
+# this option. Read the HTML docs for more information on
+# implementing distributed monitoring.
+# Values: 1 = obsess over hosts, 0 = do not obsess (default)
+
+obsess_over_hosts=0
+
+
+
+# OBSESSIVE COMPULSIVE HOST PROCESSOR COMMAND
+# This is the command that is run for every host check that is
+# processed by Nagios. This command is executed only if the
+# obsess_over_hosts option (above) is set to 1. The command
+# argument is the short name of a command definition that you
+# define in your host configuration file. Read the HTML docs for
+# more information on implementing distributed monitoring.
+
+#ochp_command=somecommand
+
+
+
+# TRANSLATE PASSIVE HOST CHECKS OPTION
+# This determines whether or not Nagios will translate
+# DOWN/UNREACHABLE passive host check results into their proper
+# state for this instance of Nagios. This option is useful
+# if you have distributed or failover monitoring setup. In
+# these cases your other Nagios servers probably have a different
+# "view" of the network, with regards to the parent/child relationship
+# of hosts. If a distributed monitoring server thinks a host
+# is DOWN, it may actually be UNREACHABLE from the point of
+# this Nagios instance. Enabling this option will tell Nagios
+# to translate any DOWN or UNREACHABLE host states it receives
+# passively into the correct state from the view of this server.
+# Values: 1 = perform translation, 0 = do not translate (default)
+
+translate_passive_host_checks=0
+
+
+
+# PASSIVE HOST CHECKS ARE SOFT OPTION
+# This determines whether or not Nagios will treat passive host
+# checks as being HARD or SOFT. By default, a passive host check
+# result will put a host into a HARD state type. This can be changed
+# by enabling this option.
+# Values: 0 = passive checks are HARD, 1 = passive checks are SOFT
+
+passive_host_checks_are_soft=0
+
+
+
+# ORPHANED HOST/SERVICE CHECK OPTIONS
+# These options determine whether or not Nagios will periodically
+# check for orphaned host service checks. Since service checks are
+# not rescheduled until the results of their previous execution
+# instance are processed, there exists a possibility that some
+# checks may never get rescheduled. A similar situation exists for
+# host checks, although the exact scheduling details differ a bit
+# from service checks. Orphaned checks seem to be a rare
+# problem and should not happen under normal circumstances.
+# If you have problems with service checks never getting
+# rescheduled, make sure you have orphaned service checks enabled.
+# Values: 1 = enable checks, 0 = disable checks
+
+check_for_orphaned_services=1
+check_for_orphaned_hosts=1
+
+
+
+# SERVICE FRESHNESS CHECK OPTION
+# This option determines whether or not Nagios will periodically
+# check the "freshness" of service results. Enabling this option
+# is useful for ensuring passive checks are received in a timely
+# manner.
+# Values: 1 = enabled freshness checking, 0 = disable freshness checking
+
+check_service_freshness=1
+
+
+
+# SERVICE FRESHNESS CHECK INTERVAL
+# This setting determines how often (in seconds) Nagios will
+# check the "freshness" of service check results. If you have
+# disabled service freshness checking, this option has no effect.
+
+service_freshness_check_interval=60
+
+
+
+# HOST FRESHNESS CHECK OPTION
+# This option determines whether or not Nagios will periodically
+# check the "freshness" of host results. Enabling this option
+# is useful for ensuring passive checks are received in a timely
+# manner.
+# Values: 1 = enabled freshness checking, 0 = disable freshness checking
+
+check_host_freshness=0
+
+
+
+# HOST FRESHNESS CHECK INTERVAL
+# This setting determines how often (in seconds) Nagios will
+# check the "freshness" of host check results. If you have
+# disabled host freshness checking, this option has no effect.
+
+host_freshness_check_interval=60
+
+
+
+
+# ADDITIONAL FRESHNESS THRESHOLD LATENCY
+# This setting determines the number of seconds that Nagios
+# will add to any host and service freshness thresholds that
+# it calculates (those not explicitly specified by the user).
+
+additional_freshness_latency=15
+
+
+
+
+# FLAP DETECTION OPTION
+# This option determines whether or not Nagios will try
+# and detect hosts and services that are "flapping".
+# Flapping occurs when a host or service changes between
+# states too frequently. When Nagios detects that a
+# host or service is flapping, it will temporarily suppress
+# notifications for that host/service until it stops
+# flapping. Flap detection is very experimental, so read
+# the HTML documentation before enabling this feature!
+# Values: 1 = enable flap detection
+# 0 = disable flap detection (default)
+
+enable_flap_detection=1
+
+
+
+# FLAP DETECTION THRESHOLDS FOR HOSTS AND SERVICES
+# Read the HTML documentation on flap detection for
+# an explanation of what this option does. This option
+# has no effect if flap detection is disabled.
+
+low_service_flap_threshold=5.0
+high_service_flap_threshold=20.0
+low_host_flap_threshold=5.0
+high_host_flap_threshold=20.0
+
+
+
+# DATE FORMAT OPTION
+# This option determines how short dates are displayed. Valid options
+# include:
+# us (MM-DD-YYYY HH:MM:SS)
+# euro (DD-MM-YYYY HH:MM:SS)
+# iso8601 (YYYY-MM-DD HH:MM:SS)
+# strict-iso8601 (YYYY-MM-DDTHH:MM:SS)
+#
+
+date_format=iso8601
+
+
+
+
+# TIMEZONE OFFSET
+# This option is used to override the default timezone that this
+# instance of Nagios runs in. If not specified, Nagios will use
+# the system configured timezone.
+#
+# NOTE: In order to display the correct timezone in the CGIs, you
+# will also need to alter the Apache directives for the CGI path
+# to include your timezone. Example:
+#
+# <Directory "/usr/local/nagios/sbin/">
+# SetEnv TZ "Australia/Brisbane"
+# ...
+# </Directory>
+
+#use_timezone=US/Mountain
+#use_timezone=Australia/Brisbane
+
+
+
+
+# P1.PL FILE LOCATION
+# This value determines where the p1.pl perl script (used by the
+# embedded Perl interpreter) is located. If you didn't compile
+# Nagios with embedded Perl support, this option has no effect.
+
+p1_file=/usr/lib/nagios3/p1.pl
+
+
+
+# EMBEDDED PERL INTERPRETER OPTION
+# This option determines whether or not the embedded Perl interpreter
+# will be enabled during runtime. This option has no effect if Nagios
+# has not been compiled with support for embedded Perl.
+# Values: 0 = disable interpreter, 1 = enable interpreter
+
+enable_embedded_perl=1
+
+
+
+# EMBEDDED PERL USAGE OPTION
+# This option determines whether or not Nagios will process Perl plugins
+# and scripts with the embedded Perl interpreter if the plugins/scripts
+# do not explicitly indicate whether or not it is okay to do so. Read
+# the HTML documentation on the embedded Perl interpreter for more
+# information on how this option works.
+
+use_embedded_perl_implicitly=1
+
+
+
+# ILLEGAL OBJECT NAME CHARACTERS
+# This option allows you to specify illegal characters that cannot
+# be used in host names, service descriptions, or names of other
+# object types.
+
+illegal_object_name_chars=`~!$%^&*|'"<>?,()=
+
+
+
+# ILLEGAL MACRO OUTPUT CHARACTERS
+# This option allows you to specify illegal characters that are
+# stripped from macros before being used in notifications, event
+# handlers, etc. This DOES NOT affect macros used in service or
+# host check commands.
+# The following macros are stripped of the characters you specify:
+# $HOSTOUTPUT$
+# $HOSTPERFDATA$
+# $HOSTACKAUTHOR$
+# $HOSTACKCOMMENT$
+# $SERVICEOUTPUT$
+# $SERVICEPERFDATA$
+# $SERVICEACKAUTHOR$
+# $SERVICEACKCOMMENT$
+
+illegal_macro_output_chars=`~$&|'"<>
+
+
+
+# REGULAR EXPRESSION MATCHING
+# This option controls whether or not regular expression matching
+# takes place in the object config files. Regular expression
+# matching is used to match host, hostgroup, service, and service
+# group names/descriptions in some fields of various object types.
+# Values: 1 = enable regexp matching, 0 = disable regexp matching
+
+use_regexp_matching=0
+
+
+
+# "TRUE" REGULAR EXPRESSION MATCHING
+# This option controls whether or not "true" regular expression
+# matching takes place in the object config files. This option
+# only has an effect if regular expression matching is enabled
+# (see above). If this option is DISABLED, regular expression
+# matching only occurs if a string contains wildcard characters
+# (* and ?). If the option is ENABLED, regexp matching occurs
+# all the time (which can be annoying).
+# Values: 1 = enable true matching, 0 = disable true matching
+
+use_true_regexp_matching=0
+
+
+
+# ADMINISTRATOR EMAIL/PAGER ADDRESSES
+# The email and pager address of a global administrator (likely you).
+# Nagios never uses these values itself, but you can access them by
+# using the $ADMINEMAIL$ and $ADMINPAGER$ macros in your notification
+# commands.
+
+admin_email=root@localhost
+admin_pager=pageroot@localhost
+
+
+
+# DAEMON CORE DUMP OPTION
+# This option determines whether or not Nagios is allowed to create
+# a core dump when it runs as a daemon. Note that it is generally
+# considered bad form to allow this, but it may be useful for
+# debugging purposes. Enabling this option doesn't guarantee that
+# a core file will be produced, but that's just life...
+# Values: 1 - Allow core dumps
+# 0 - Do not allow core dumps (default)
+
+daemon_dumps_core=0
+
+
+
+# LARGE INSTALLATION TWEAKS OPTION
+# This option determines whether or not Nagios will take some shortcuts
+# which can save on memory and CPU usage in large Nagios installations.
+# Read the documentation for more information on the benefits/tradeoffs
+# of enabling this option.
+# Values: 1 - Enabled tweaks
+# 0 - Disable tweaks (default)
+
+use_large_installation_tweaks=0
+
+
+
+# ENABLE ENVIRONMENT MACROS
+# This option determines whether or not Nagios will make all standard
+# macros available as environment variables when host/service checks
+# and system commands (event handlers, notifications, etc.) are
+# executed. Enabling this option can cause performance issues in
+# large installations, as it will consume a bit more memory and (more
+# importantly) consume more CPU.
+# Values: 1 - Enable environment variable macros (default)
+# 0 - Disable environment variable macros
+
+enable_environment_macros=1
+
+
+
+# CHILD PROCESS MEMORY OPTION
+# This option determines whether or not Nagios will free memory in
+# child processes (processed used to execute system commands and host/
+# service checks). If you specify a value here, it will override
+# program defaults.
+# Value: 1 - Free memory in child processes
+# 0 - Do not free memory in child processes
+
+#free_child_process_memory=1
+
+
+
+# CHILD PROCESS FORKING BEHAVIOR
+# This option determines how Nagios will fork child processes
+# (used to execute system commands and host/service checks). Normally
+# child processes are fork()ed twice, which provides a very high level
+# of isolation from problems. Fork()ing once is probably enough and will
+# save a great deal on CPU usage (in large installs), so you might
+# want to consider using this. If you specify a value here, it will
+# program defaults.
+# Value: 1 - Child processes fork() twice
+# 0 - Child processes fork() just once
+
+#child_processes_fork_twice=1
+
+
+
+# DEBUG LEVEL
+# This option determines how much (if any) debugging information will
+# be written to the debug file. OR values together to log multiple
+# types of information.
+# Values:
+# -1 = Everything
+# 0 = Nothing
+# 1 = Functions
+# 2 = Configuration
+# 4 = Process information
+# 8 = Scheduled events
+# 16 = Host/service checks
+# 32 = Notifications
+# 64 = Event broker
+# 128 = External commands
+# 256 = Commands
+# 512 = Scheduled downtime
+# 1024 = Comments
+# 2048 = Macros
+
+debug_level=0
+
+
+
+# DEBUG VERBOSITY
+# This option determines how verbose the debug log out will be.
+# Values: 0 = Brief output
+# 1 = More detailed
+# 2 = Very detailed
+
+debug_verbosity=1
+
+
+
+# DEBUG FILE
+# This option determines where Nagios should write debugging information.
+
+debug_file=/var/lib/nagios3/nagios.debug
+
+
+
+# MAX DEBUG FILE SIZE
+# This option determines the maximum size (in bytes) of the debug file. If
+# the file grows larger than this size, it will be renamed with a .old
+# extension. If a file already exists with a .old extension it will
+# automatically be deleted. This helps ensure your disk space usage doesn't
+# get out of control when debugging Nagios.
+
+max_debug_file_size=1000000
+
+
diff --git a/puppet/modules/site_nagios/manifests/add_host.pp b/puppet/modules/site_nagios/manifests/add_host.pp
new file mode 100644
index 00000000..498552b5
--- /dev/null
+++ b/puppet/modules/site_nagios/manifests/add_host.pp
@@ -0,0 +1,31 @@
+define site_nagios::add_host {
+ $nagios_host = $name
+ $nagios_hostname = $name['domain_internal']
+ $nagios_ip = $name['ip_address']
+ $nagios_services = $name['services']
+ $nagios_openvpn_gw = $name['openvpn_gateway_address']
+
+ # Add Nagios host
+ nagios_host { $nagios_hostname:
+ address => $nagios_ip,
+ use => 'generic-host',
+ }
+
+ # Add Nagios service
+
+ # First, we need to turn the serice array into hash, using a "hash template"
+ # see https://github.com/ashak/puppet-resource-looping
+ $nagios_service_hashpart = {
+ 'hostname' => $nagios_hostname,
+ 'ip_address' => $nagios_ip,
+ 'openvpn_gw' => $nagios_openvpn_gw,
+ }
+ $dynamic_parameters = {
+ 'service' => '%s'
+ }
+ $nagios_servicename = "${nagios_hostname}_%s"
+
+ $nagios_service_hash = create_resources_hash_from($nagios_servicename, $nagios_services, $nagios_service_hashpart, $dynamic_parameters)
+
+ create_resources ( site_nagios::add_service, $nagios_service_hash )
+}
diff --git a/puppet/modules/site_nagios/manifests/add_service.pp b/puppet/modules/site_nagios/manifests/add_service.pp
new file mode 100644
index 00000000..6ef3cbf5
--- /dev/null
+++ b/puppet/modules/site_nagios/manifests/add_service.pp
@@ -0,0 +1,21 @@
+define site_nagios::add_service (
+ $hostname, $ip_address, $openvpn_gw = '', $service) {
+
+ case $service {
+ 'webapp': {
+ $check_command = 'check_https_cert'
+ $service_description = 'Website Certificate'
+ }
+ default: {
+ #notice ("No Nagios service check for service \"$service\"")
+ }
+ }
+
+ if ( $check_command != '' ) {
+ nagios_service { $name:
+ use => 'generic-service',
+ check_command => $check_command,
+ service_description => $service_description,
+ host_name => $hostname }
+ }
+}
diff --git a/puppet/modules/site_nagios/manifests/init.pp b/puppet/modules/site_nagios/manifests/init.pp
new file mode 100644
index 00000000..cab32905
--- /dev/null
+++ b/puppet/modules/site_nagios/manifests/init.pp
@@ -0,0 +1,4 @@
+class site_nagios {
+ tag 'leap_service'
+ include site_nagios::server
+}
diff --git a/puppet/modules/site_nagios/manifests/server.pp b/puppet/modules/site_nagios/manifests/server.pp
new file mode 100644
index 00000000..c98a8a1f
--- /dev/null
+++ b/puppet/modules/site_nagios/manifests/server.pp
@@ -0,0 +1,38 @@
+class site_nagios::server inherits nagios::base {
+
+ # First, purge old nagios config (see #1467)
+ class { 'site_nagios::server::purge':
+ stage => initial
+ }
+
+ $nagios_hiera=hiera('nagios')
+ $nagiosadmin_pw = htpasswd_sha1($nagios_hiera['nagiosadmin_pw'])
+ $hosts = $nagios_hiera['hosts']
+
+ include nagios::defaults
+ include nagios::base
+ #Class ['nagios'] -> Class ['nagios::defaults']
+ class {'nagios::apache':
+ allow_external_cmd => true,
+ stored_config => false,
+ #before => Class ['nagios::defaults']
+ }
+
+ File ['nagios_htpasswd'] {
+ source => undef,
+ content => "nagiosadmin:$nagiosadmin_pw",
+ mode => '0640',
+ }
+
+
+ # deploy serverside plugins
+ file { '/usr/lib/nagios/plugins/check_openvpn_server.pl':
+ source => 'puppet:///modules/nagios/plugins/check_openvpn_server.pl',
+ mode => '0755',
+ owner => 'nagios',
+ group => 'nagios',
+ }
+
+ site_nagios::add_host {$hosts:}
+ include site_shorewall::monitor
+}
diff --git a/puppet/modules/site_nagios/manifests/server/purge.pp b/puppet/modules/site_nagios/manifests/server/purge.pp
new file mode 100644
index 00000000..39735cd3
--- /dev/null
+++ b/puppet/modules/site_nagios/manifests/server/purge.pp
@@ -0,0 +1,7 @@
+class site_nagios::server::purge {
+ exec {'purge_conf.d':
+ command => '/bin/rm -rf /etc/nagios3/conf.d/*',
+ onlyif => 'test -e /etc/nagios3/conf.d'
+ }
+
+}
diff --git a/puppet/modules/site_openvpn/manifests/init.pp b/puppet/modules/site_openvpn/manifests/init.pp
new file mode 100644
index 00000000..e3d2a9af
--- /dev/null
+++ b/puppet/modules/site_openvpn/manifests/init.pp
@@ -0,0 +1,108 @@
+class site_openvpn {
+ tag 'leap_service'
+ # parse hiera config
+ $ip_address = hiera('ip_address')
+ $interface = getvar("interface_${ip_address}")
+ #$gateway_address = hiera('gateway_address')
+ $openvpn_config = hiera('openvpn')
+ $openvpn_gateway_address = $openvpn_config['gateway_address']
+ $openvpn_tcp_network_prefix = '10.1.0'
+ $openvpn_tcp_netmask = '255.255.248.0'
+ $openvpn_tcp_cidr = '21'
+ $openvpn_udp_network_prefix = '10.2.0'
+ $openvpn_udp_netmask = '255.255.248.0'
+ $openvpn_udp_cidr = '21'
+ $x509_config = hiera('x509')
+
+ # deploy ca + server keys
+ include site_openvpn::keys
+
+ # create 2 openvpn config files, one for tcp, one for udp
+ site_openvpn::server_config { 'tcp_config':
+ port => '1194',
+ proto => 'tcp',
+ local => $openvpn_gateway_address,
+ server => "${openvpn_tcp_network_prefix}.0 ${openvpn_tcp_netmask}",
+ push => "\"dhcp-option DNS ${openvpn_tcp_network_prefix}.1\"",
+ management => '127.0.0.1 1000'
+ }
+ site_openvpn::server_config { 'udp_config':
+ port => '1194',
+ proto => 'udp',
+ server => "${openvpn_udp_network_prefix}.0 ${openvpn_udp_netmask}",
+ push => "\"dhcp-option DNS ${openvpn_udp_network_prefix}.1\"",
+ local => $openvpn_gateway_address,
+ management => '127.0.0.1 1001'
+ }
+
+ # add second IP on given interface
+ file { '/usr/local/bin/leap_add_second_ip.sh':
+ content => "#!/bin/sh
+ip addr show dev $interface | grep -q ${openvpn_gateway_address}/24 || ip addr add ${openvpn_gateway_address}/24 dev $interface
+/bin/echo 1 > /proc/sys/net/ipv4/ip_forward
+",
+ mode => '0755',
+ }
+
+ exec { '/usr/local/bin/leap_add_second_ip.sh':
+ subscribe => File['/usr/local/bin/leap_add_second_ip.sh'],
+ }
+
+ cron { 'leap_add_second_ip.sh':
+ command => "/usr/local/bin/leap_add_second_ip.sh",
+ user => 'root',
+ special => 'reboot',
+ }
+
+ # setup the resolver to listen on the vpn IP
+ include site_openvpn::resolver
+
+ include site_shorewall::eip
+
+ package {
+ 'openvpn':
+ ensure => installed;
+ }
+ service {
+ 'openvpn':
+ ensure => running,
+ hasrestart => true,
+ hasstatus => true,
+ require => Exec['concat_/etc/default/openvpn'];
+ }
+
+ file {
+ '/etc/openvpn':
+ ensure => directory,
+ require => Package['openvpn'];
+ }
+
+ file {
+ '/etc/openvpn/keys':
+ ensure => directory,
+ require => Package['openvpn'];
+ }
+
+ concat {
+ '/etc/default/openvpn':
+ owner => root,
+ group => root,
+ mode => 644,
+ warn => true,
+ notify => Service['openvpn'];
+ }
+
+ concat::fragment {
+ 'openvpn.default.header':
+ content => template('openvpn/etc-default-openvpn.erb'),
+ target => '/etc/default/openvpn',
+ order => 01;
+ }
+
+ concat::fragment {
+ "openvpn.default.autostart.${name}":
+ content => 'AUTOSTART=all',
+ target => '/etc/default/openvpn',
+ order => 10;
+ }
+}
diff --git a/puppet/modules/site_openvpn/manifests/keys.pp b/puppet/modules/site_openvpn/manifests/keys.pp
new file mode 100644
index 00000000..f3c5b423
--- /dev/null
+++ b/puppet/modules/site_openvpn/manifests/keys.pp
@@ -0,0 +1,51 @@
+class site_openvpn::keys {
+
+ x509::key {
+ 'leap_openvpn':
+ content => $site_openvpn::x509_config['key'],
+ notify => Service[openvpn];
+ }
+
+ x509::cert {
+ 'leap_openvpn':
+ content => $site_openvpn::x509_config['cert'],
+ notify => Service[openvpn];
+ }
+
+ x509::ca {
+ 'leap_ca':
+ content => $site_openvpn::x509_config['ca_cert'],
+ notify => Service[openvpn];
+ }
+
+ file { '/etc/openvpn/keys/dh.pem':
+ content => $site_openvpn::x509_config['dh'],
+ mode => '0644',
+ }
+
+ #
+ # CA bundle -- we want to have the possibility of allowing multiple CAs.
+ # For now, the reason is to transition to using client CA. In the future,
+ # we will want to be able to smoothly phase out one CA and phase in another.
+ # I tried "--capath" for this, but it did not work.
+ #
+
+ concat {
+ '/etc/openvpn/ca_bundle.pem':
+ owner => root,
+ group => root,
+ mode => 644,
+ warn => true,
+ notify => Service['openvpn'];
+ }
+
+ concat::fragment {
+ 'client_ca_cert':
+ content => $site_openvpn::x509_config['client_ca_cert'],
+ target => '/etc/openvpn/ca_bundle.pem';
+ 'ca_cert':
+ content => $site_openvpn::x509_config['ca_cert'],
+ target => '/etc/openvpn/ca_bundle.pem';
+ }
+
+}
diff --git a/puppet/modules/site_openvpn/manifests/resolver.pp b/puppet/modules/site_openvpn/manifests/resolver.pp
new file mode 100644
index 00000000..d3963c95
--- /dev/null
+++ b/puppet/modules/site_openvpn/manifests/resolver.pp
@@ -0,0 +1,36 @@
+class site_openvpn::resolver {
+
+ # this is an unfortunate way to get around the fact that the version of
+ # unbound we are working with does not accept a wildcard include directive
+ # (/etc/unbound/conf.d/*), when it does, these line definitions should
+ # go away and instead the caching_resolver should be configured to
+ # include: /etc/unbound/conf.d/*
+
+ line {
+ 'add_tcp_resolver':
+ ensure => present,
+ file => '/etc/unbound/unbound.conf',
+ line => 'server: include: /etc/unbound/conf.d/vpn_tcp_resolver',
+ notify => Service['unbound'];
+
+ 'add_udp_resolver':
+ ensure => present,
+ file => '/etc/unbound/unbound.conf',
+ line => 'server: include: /etc/unbound/conf.d/vpn_udp_resolver',
+ notify => Service['unbound'];
+ }
+
+ file {
+ '/etc/unbound/conf.d/vpn_udp_resolver':
+ content => "interface: ${site_openvpn::openvpn_udp_network_prefix}.1\naccess-control: ${site_openvpn::openvpn_udp_network_prefix}.0/${site_openvpn::openvpn_udp_cidr} allow\n",
+ owner => root, group => root, mode => '0644',
+ require => Service['openvpn'],
+ notify => Service['unbound'];
+
+ '/etc/unbound/conf.d/vpn_tcp_resolver':
+ content => "interface: ${site_openvpn::openvpn_tcp_network_prefix}.1\naccess-control: ${site_openvpn::openvpn_tcp_network_prefix}.0/${site_openvpn::openvpn_tcp_cidr} allow\n",
+ owner => root, group => root, mode => '0644',
+ require => Service['openvpn'],
+ notify => Service['unbound'];
+ }
+}
diff --git a/puppet/modules/site_openvpn/manifests/server_config.pp b/puppet/modules/site_openvpn/manifests/server_config.pp
new file mode 100644
index 00000000..de273b46
--- /dev/null
+++ b/puppet/modules/site_openvpn/manifests/server_config.pp
@@ -0,0 +1,166 @@
+#
+# Cipher discussion
+# ================================
+#
+# We want to specify explicit values for the crypto options to prevent a MiTM from forcing
+# a weaker cipher. These should be set in both the server and the client ('auth' and 'cipher'
+# MUST be the same on both ends or no data will get transmitted).
+#
+# tls-cipher DHE-RSA-AES128-SHA
+#
+# dkg: For the TLS control channel, we want to make sure we choose a
+# key exchange mechanism that has PFS (meaning probably some form of ephemeral
+# Diffie-Hellman key exchange), and that uses a standard, well-tested cipher
+# (I recommend AES, and 128 bits is probably fine, since there are some known
+# weaknesses in the 192- and 256-bit key schedules). That leaves us with the
+# choice of public key algorithms: /usr/sbin/openvpn --show-tls | grep DHE |
+# grep AES128 | grep GCM.
+#
+# elijah:
+# I could not get any of these working:
+# * openvpn --show-tls | grep GCM
+# * openvpn --show-tls | grep DHE | grep AES128 | grep SHA256
+# so, i went with this:
+# * openvpn --show-tls | grep DHE | grep AES128 | grep -v SHA256 | grep -v GCM
+# Also, i couldn't get any of the elliptical curve algorithms to work. Not sure how
+# our cert generation interacts with the tls-cipher algorithms.
+#
+# note: in my tests, DHE-RSA-AES256-SHA is the one it negotiates if no value is set.
+#
+# auth SHA1
+#
+# dkg: For HMAC digest to authenticate packets, we just want SHA256. OpenVPN lists
+# a number of “digest” with names like “RSA-SHA256”, but this are legacy and
+# should be avoided.
+#
+# elijah: i am not so sure that the digest algo matters for 'auth' option, because
+# i think an attacker would have to forge the digest in real time, which is still far from
+# a possibility for SHA1. So, i am leaving the default for now (SHA1).
+#
+# cipher AES-128-CBC
+#
+# dkg: For the choice of cipher, we need to select an algorithm and a
+# cipher mode. OpenVPN defaults to Blowfish, which is a fine algorithm — but
+# our control channel is already relying on AES not being broken; if the
+# control channel is cracked, then the key material for the tunnel is exposed,
+# and the choice of algorithm is moot. So it makes more sense to me to rely on
+# the same cipher here: AES128. As for the cipher mode, OFB seems cleaner to
+# me, but CBC is more well-tested, and the OpenVPN man page (at least as of
+# version 2.2.1) says “CBC is recommended and CFB and OFB should be considered
+# advanced modes.”
+#
+# note: the default is BF-CBC (blowfish)
+#
+
+define site_openvpn::server_config ($port, $proto, $local, $server, $push, $management ) {
+
+ $openvpn_configname = $name
+
+ concat {
+ "/etc/openvpn/$openvpn_configname.conf":
+ owner => root,
+ group => root,
+ mode => 644,
+ warn => true,
+ require => File['/etc/openvpn'],
+ notify => Service['openvpn'];
+ }
+
+ openvpn::option {
+ "ca $openvpn_configname":
+ key => 'ca',
+ value => '/etc/openvpn/ca_bundle.pem',
+ server => $openvpn_configname;
+ "cert $openvpn_configname":
+ key => 'cert',
+ value => '/etc/x509/certs/leap_openvpn.crt',
+ server => $openvpn_configname;
+ "key $openvpn_configname":
+ key => 'key',
+ value => '/etc/x509/keys/leap_openvpn.key',
+ server => $openvpn_configname;
+ "dh $openvpn_configname":
+ key => 'dh',
+ value => '/etc/openvpn/keys/dh.pem',
+ server => $openvpn_configname;
+ "tls-cipher $openvpn_configname":
+ key => 'tls-cipher',
+ value => 'DHE-RSA-AES128-SHA',
+ server => $openvpn_configname;
+ "auth $openvpn_configname":
+ key => 'auth',
+ value => 'SHA1',
+ server => $openvpn_configname;
+ "cipher $openvpn_configname":
+ key => 'cipher',
+ value => 'AES-128-CBC',
+ server => $openvpn_configname;
+ "dev $openvpn_configname":
+ key => 'dev',
+ value => 'tun',
+ server => $openvpn_configname;
+ "duplicate-cn $openvpn_configname":
+ key => 'duplicate-cn',
+ server => $openvpn_configname;
+ "keepalive $openvpn_configname":
+ key => 'keepalive',
+ value => '5 20',
+ server => $openvpn_configname;
+ "local $openvpn_configname":
+ key => 'local',
+ value => $local,
+ server => $openvpn_configname;
+ "mute $openvpn_configname":
+ key => 'mute',
+ value => '5',
+ server => $openvpn_configname;
+ "mute-replay-warnings $openvpn_configname":
+ key => 'mute-replay-warnings',
+ server => $openvpn_configname;
+ "management $openvpn_configname":
+ key => 'management',
+ value => $management,
+ server => $openvpn_configname;
+ "proto $openvpn_configname":
+ key => 'proto',
+ value => $proto,
+ server => $openvpn_configname;
+ "push1 $openvpn_configname":
+ key => 'push',
+ value => $push,
+ server => $openvpn_configname;
+ "push2 $openvpn_configname":
+ key => 'push',
+ value => '"redirect-gateway def1"',
+ server => $openvpn_configname;
+ "script-security $openvpn_configname":
+ key => 'script-security',
+ value => '2',
+ server => $openvpn_configname;
+ "server $openvpn_configname":
+ key => 'server',
+ value => $server,
+ server => $openvpn_configname;
+ "status $openvpn_configname":
+ key => 'status',
+ value => '/var/run/openvpn-status 10',
+ server => $openvpn_configname;
+ "status-version $openvpn_configname":
+ key => 'status-version',
+ value => '3',
+ server => $openvpn_configname;
+ "topology $openvpn_configname":
+ key => 'topology',
+ value => 'subnet',
+ server => $openvpn_configname;
+ # no need for server-up.sh right now
+ #"up $openvpn_configname":
+ # key => 'up',
+ # value => '/etc/openvpn/server-up.sh',
+ # server => $openvpn_configname;
+ "verb $openvpn_configname":
+ key => 'verb',
+ value => '3',
+ server => $openvpn_configname;
+ }
+}
diff --git a/puppet/modules/site_shorewall/manifests/couchdb.pp b/puppet/modules/site_shorewall/manifests/couchdb.pp
new file mode 100644
index 00000000..9fa59569
--- /dev/null
+++ b/puppet/modules/site_shorewall/manifests/couchdb.pp
@@ -0,0 +1,23 @@
+class site_shorewall::couchdb {
+
+ include site_shorewall::defaults
+
+ $couchdb_port = '6984'
+
+ # define macro for incoming services
+ file { '/etc/shorewall/macro.leap_couchdb':
+ content => "PARAM - - tcp $couchdb_port",
+ notify => Service['shorewall'],
+ require => Package['shorewall']
+ }
+
+
+ shorewall::rule {
+ 'net2fw-couchdb':
+ source => 'net',
+ destination => '$FW',
+ action => 'leap_couchdb(ACCEPT)',
+ order => 200;
+ }
+
+}
diff --git a/puppet/modules/site_shorewall/manifests/defaults.pp b/puppet/modules/site_shorewall/manifests/defaults.pp
new file mode 100644
index 00000000..d5639a90
--- /dev/null
+++ b/puppet/modules/site_shorewall/manifests/defaults.pp
@@ -0,0 +1,54 @@
+class site_shorewall::defaults {
+ include shorewall
+
+ # be safe for development
+ #if ( $::virtual == 'virtualbox') { $shorewall_startup='0' }
+
+ $ip_address = hiera('ip_address')
+ # a special case for vagrant interfaces
+ $interface = $::virtual ? {
+ virtualbox => [ 'eth0', 'eth1' ],
+ default => getvar("interface_${ip_address}")
+ }
+
+
+ # If you want logging:
+ shorewall::params {
+ 'LOG': value => 'debug';
+ }
+
+ shorewall::zone {'net': type => 'ipv4'; }
+
+
+ # define interfaces
+ shorewall::interface { $interface:
+ zone => 'net',
+ options => 'tcpflags,blacklist,nosmurfs';
+ }
+
+ shorewall::routestopped { $interface: }
+
+ shorewall::policy {
+ 'fw-to-all':
+ sourcezone => 'fw',
+ destinationzone => 'all',
+ policy => 'ACCEPT',
+ order => 100;
+ 'all-to-all':
+ sourcezone => 'all',
+ destinationzone => 'all',
+ policy => 'DROP',
+ order => 200;
+ }
+
+ shorewall::rule {
+ # ping party
+ 'all2all-ping':
+ source => 'all',
+ destination => 'all',
+ action => 'Ping(ACCEPT)',
+ order => 200;
+ }
+
+ include site_shorewall::sshd
+}
diff --git a/puppet/modules/site_shorewall/manifests/dnat_rule.pp b/puppet/modules/site_shorewall/manifests/dnat_rule.pp
new file mode 100644
index 00000000..68f480d8
--- /dev/null
+++ b/puppet/modules/site_shorewall/manifests/dnat_rule.pp
@@ -0,0 +1,25 @@
+define site_shorewall::dnat_rule {
+
+ $port = $name
+ if $port != 1194 {
+ shorewall::rule {
+ "dnat_tcp_port_$port":
+ action => 'DNAT',
+ source => 'net',
+ destination => "\$FW:${site_openvpn::openvpn_gateway_address}:1194",
+ proto => 'tcp',
+ destinationport => $port,
+ order => 100;
+ }
+
+ shorewall::rule {
+ "dnat_udp_port_$port":
+ action => 'DNAT',
+ source => 'net',
+ destination => "\$FW:${site_openvpn::openvpn_gateway_address}:1194",
+ proto => 'udp',
+ destinationport => $port,
+ order => 100;
+ }
+ }
+}
diff --git a/puppet/modules/site_shorewall/manifests/eip.pp b/puppet/modules/site_shorewall/manifests/eip.pp
new file mode 100644
index 00000000..4e5a5d48
--- /dev/null
+++ b/puppet/modules/site_shorewall/manifests/eip.pp
@@ -0,0 +1,75 @@
+class site_shorewall::eip {
+
+ include site_shorewall::defaults
+ include site_shorewall::ip_forward
+
+ $openvpn_config = hiera('openvpn')
+ $openvpn_ports = $openvpn_config['ports']
+ $openvpn_gateway_address = $site_openvpn::openvpn_gateway_address
+
+ # define macro for incoming services
+ file { '/etc/shorewall/macro.leap_eip':
+ content => "PARAM - - tcp 1194
+PARAM - - udp 1194
+",
+ notify => Service['shorewall']
+ }
+
+
+ shorewall::interface {
+ 'tun0':
+ zone => 'eip',
+ options => 'tcpflags,blacklist,nosmurfs';
+ 'tun1':
+ zone => 'eip',
+ options => 'tcpflags,blacklist,nosmurfs'
+ }
+
+
+ shorewall::zone {'eip':
+ type => 'ipv4'; }
+
+ case $::virtual {
+ 'virtualbox': {
+ shorewall::masq {
+ 'eth0_tcp':
+ interface => 'eth0',
+ source => "${site_openvpn::openvpn_tcp_network_prefix}.0/${site_openvpn::openvpn_tcp_cidr}";
+ 'eth0_udp':
+ interface => 'eth0',
+ source => "${site_openvpn::openvpn_udp_network_prefix}.0/${site_openvpn::openvpn_udp_cidr}"; }
+ }
+ default: {
+ $interface = $site_shorewall::defaults::interface
+ shorewall::masq {
+ "${interface}_tcp":
+ interface => $interface,
+ source => "${site_openvpn::openvpn_tcp_network_prefix}.0/${site_openvpn::openvpn_tcp_cidr}";
+
+ "${interface}_udp":
+ interface => $interface,
+ source => "${site_openvpn::openvpn_udp_network_prefix}.0/${site_openvpn::openvpn_udp_cidr}"; }
+ }
+ }
+
+ shorewall::policy {
+ 'eip-to-all':
+ sourcezone => 'eip',
+ destinationzone => 'all',
+ policy => 'ACCEPT',
+ order => 100;
+ }
+
+ shorewall::rule {
+ 'net2fw-openvpn':
+ source => 'net',
+ destination => '$FW',
+ action => 'leap_eip(ACCEPT)',
+ order => 200;
+ }
+
+ # create dnat rule for each port
+ #create_resources('site_shorewall::dnat_rule', $openvpn_ports)
+ site_shorewall::dnat_rule { $openvpn_ports: }
+
+}
diff --git a/puppet/modules/site_shorewall/manifests/ip_forward.pp b/puppet/modules/site_shorewall/manifests/ip_forward.pp
new file mode 100644
index 00000000..d53ee8a5
--- /dev/null
+++ b/puppet/modules/site_shorewall/manifests/ip_forward.pp
@@ -0,0 +1,10 @@
+class site_shorewall::ip_forward {
+ include augeas
+ augeas { 'enable_ip_forwarding':
+ changes => 'set /files/etc/shorewall/shorewall.conf/IP_FORWARDING Yes',
+ lens => 'Shellvars.lns',
+ incl => '/etc/shorewall/shorewall.conf',
+ notify => Service[shorewall],
+ require => [ Class[augeas], Package[shorewall] ];
+ }
+}
diff --git a/puppet/modules/site_shorewall/manifests/monitor.pp b/puppet/modules/site_shorewall/manifests/monitor.pp
new file mode 100644
index 00000000..f4ed4f7c
--- /dev/null
+++ b/puppet/modules/site_shorewall/manifests/monitor.pp
@@ -0,0 +1,8 @@
+class site_shorewall::monitor {
+
+ include site_shorewall::defaults
+ include site_shorewall::service::http
+ include site_shorewall::service::https
+
+
+}
diff --git a/puppet/modules/site_shorewall/manifests/service/http.pp b/puppet/modules/site_shorewall/manifests/service/http.pp
new file mode 100644
index 00000000..74b874d5
--- /dev/null
+++ b/puppet/modules/site_shorewall/manifests/service/http.pp
@@ -0,0 +1,13 @@
+class site_shorewall::service::http {
+
+ include site_shorewall::defaults
+
+ shorewall::rule {
+ 'net2fw-http':
+ source => 'net',
+ destination => '$FW',
+ action => 'HTTP(ACCEPT)',
+ order => 200;
+ }
+
+}
diff --git a/puppet/modules/site_shorewall/manifests/service/https.pp b/puppet/modules/site_shorewall/manifests/service/https.pp
new file mode 100644
index 00000000..4a8b119c
--- /dev/null
+++ b/puppet/modules/site_shorewall/manifests/service/https.pp
@@ -0,0 +1,12 @@
+class site_shorewall::service::https {
+
+ include site_shorewall::defaults
+
+ shorewall::rule {
+ 'net2fw-https':
+ source => 'net',
+ destination => '$FW',
+ action => 'HTTPS(ACCEPT)',
+ order => 200;
+ }
+}
diff --git a/puppet/modules/site_shorewall/manifests/service/webapp_api.pp b/puppet/modules/site_shorewall/manifests/service/webapp_api.pp
new file mode 100644
index 00000000..0c6c824d
--- /dev/null
+++ b/puppet/modules/site_shorewall/manifests/service/webapp_api.pp
@@ -0,0 +1,22 @@
+class site_shorewall::service::webapp_api {
+
+ $api = hiera('api')
+ $api_port = $api['port']
+
+ # define macro for incoming services
+ file { '/etc/shorewall/macro.leap_webapp_api':
+ content => "PARAM - - tcp $api_port ",
+ notify => Service['shorewall'],
+ require => Package['shorewall']
+ }
+
+
+ shorewall::rule {
+ 'net2fw-webapp_api':
+ source => 'net',
+ destination => '$FW',
+ action => 'leap_webapp_api(ACCEPT)',
+ order => 200;
+ }
+
+}
diff --git a/puppet/modules/site_shorewall/manifests/sshd.pp b/puppet/modules/site_shorewall/manifests/sshd.pp
new file mode 100644
index 00000000..a8e09e42
--- /dev/null
+++ b/puppet/modules/site_shorewall/manifests/sshd.pp
@@ -0,0 +1,24 @@
+class site_shorewall::sshd {
+
+ $ssh_config = hiera('ssh')
+ $ssh_port = $ssh_config['port']
+
+ include shorewall
+
+ # define macro for incoming sshd
+ file { '/etc/shorewall/macro.leap_sshd':
+ content => "PARAM - - tcp $ssh_port",
+ notify => Service['shorewall'],
+ require => Package['shorewall']
+ }
+
+
+ shorewall::rule {
+ # outside to server
+ 'net2fw-ssh':
+ source => 'net',
+ destination => '$FW',
+ action => 'leap_sshd(ACCEPT)',
+ order => 200;
+ }
+}
diff --git a/puppet/modules/site_shorewall/manifests/tor.pp b/puppet/modules/site_shorewall/manifests/tor.pp
new file mode 100644
index 00000000..f35af985
--- /dev/null
+++ b/puppet/modules/site_shorewall/manifests/tor.pp
@@ -0,0 +1,25 @@
+class site_shorewall::tor {
+
+ include site_shorewall::defaults
+ include site_shorewall::ip_forward
+
+ $tor_port = '9001'
+
+ # define macro for incoming services
+ file { '/etc/shorewall/macro.leap_tor':
+ content => "PARAM - - tcp $tor_port ",
+ notify => Service['shorewall'],
+ require => Package['shorewall']
+ }
+
+
+ shorewall::rule {
+ 'net2fw-tor':
+ source => 'net',
+ destination => '$FW',
+ action => 'leap_tor(ACCEPT)',
+ order => 200;
+ }
+
+ include site_shorewall::service::http
+}
diff --git a/puppet/modules/site_shorewall/manifests/webapp.pp b/puppet/modules/site_shorewall/manifests/webapp.pp
new file mode 100644
index 00000000..d12bbc8f
--- /dev/null
+++ b/puppet/modules/site_shorewall/manifests/webapp.pp
@@ -0,0 +1,6 @@
+class site_shorewall::webapp {
+
+ include site_shorewall::defaults
+ include site_shorewall::service::https
+ include site_shorewall::service::webapp_api
+}
diff --git a/puppet/modules/site_sshd/manifests/init.pp b/puppet/modules/site_sshd/manifests/init.pp
new file mode 100644
index 00000000..630e9bdf
--- /dev/null
+++ b/puppet/modules/site_sshd/manifests/init.pp
@@ -0,0 +1 @@
+class site_sshd {}
diff --git a/puppet/modules/site_sshd/manifests/ssh_key.pp b/puppet/modules/site_sshd/manifests/ssh_key.pp
new file mode 100644
index 00000000..b47b2ebd
--- /dev/null
+++ b/puppet/modules/site_sshd/manifests/ssh_key.pp
@@ -0,0 +1,3 @@
+define site_sshd::ssh_key($key) {
+ # ... todo: deploy ssh_key
+}
diff --git a/puppet/modules/site_tor/manifests/disable_exit.pp b/puppet/modules/site_tor/manifests/disable_exit.pp
new file mode 100644
index 00000000..73016646
--- /dev/null
+++ b/puppet/modules/site_tor/manifests/disable_exit.pp
@@ -0,0 +1,7 @@
+class site_tor::disable_exit {
+ tor::daemon::exit_policy {
+ 'no_exit_at_all':
+ reject => '*:*';
+ }
+}
+
diff --git a/puppet/modules/site_tor/manifests/init.pp b/puppet/modules/site_tor/manifests/init.pp
new file mode 100644
index 00000000..ceb6fb13
--- /dev/null
+++ b/puppet/modules/site_tor/manifests/init.pp
@@ -0,0 +1,28 @@
+class site_tor {
+ tag 'leap_service'
+
+ $tor = hiera('tor')
+ $bandwidth_rate = $tor['bandwidth_rate']
+ $tor_type = $tor['type']
+ $nickname = $tor['nickname']
+ $contact_email = $tor['contacts']
+
+ $address = hiera('ip_address')
+
+ class { 'tor::daemon': }
+ tor::daemon::relay { $nickname:
+ port => 9001,
+ address => $address,
+ contact_info => $contact_email,
+ bandwidth_rate => $bandwidth_rate,
+ }
+
+ tor::daemon::directory { $::hostname: port => 80 }
+
+ include site_shorewall::tor
+
+ if ( $tor_type != 'exit' ) {
+ include site_tor::disable_exit
+ }
+
+}
diff --git a/puppet/modules/site_webapp/manifests/apache.pp b/puppet/modules/site_webapp/manifests/apache.pp
new file mode 100644
index 00000000..554b9147
--- /dev/null
+++ b/puppet/modules/site_webapp/manifests/apache.pp
@@ -0,0 +1,65 @@
+class site_webapp::apache {
+
+ $web_api = hiera('api')
+ $api_domain = $web_api['domain']
+ $api_port = $web_api['port']
+
+ $x509 = hiera('x509')
+ $commercial_key = $x509['commercial_key']
+ $commercial_cert = $x509['commercial_cert']
+ $commercial_root = $x509['commercial_ca_cert']
+ $api_key = $x509['key']
+ $api_cert = $x509['cert']
+ $api_root = $x509['ca_cert']
+
+ $apache_no_default_site = true
+ include apache::ssl
+
+ apache::module {
+ 'alias': ensure => present;
+ 'rewrite': ensure => present;
+ 'headers': ensure => present;
+ }
+
+ class { 'passenger': use_munin => false }
+
+ apache::vhost::file {
+ 'leap_webapp':
+ content => template('site_apache/vhosts.d/leap_webapp.conf.erb')
+ }
+
+ apache::vhost::file {
+ 'api':
+ content => template('site_apache/vhosts.d/api.conf.erb')
+ }
+
+ x509::key {
+ 'leap_webapp':
+ content => $commercial_key,
+ notify => Service[apache];
+
+ 'leap_api':
+ content => $api_key,
+ notify => Service[apache];
+ }
+
+ x509::cert {
+ 'leap_webapp':
+ content => $commercial_cert,
+ notify => Service[apache];
+
+ 'leap_api':
+ content => $api_cert,
+ notify => Service[apache];
+ }
+
+ x509::ca {
+ 'leap_webapp':
+ content => $commercial_root,
+ notify => Service[apache];
+
+ 'leap_api':
+ content => $api_root,
+ notify => Service[apache];
+ }
+}
diff --git a/puppet/modules/site_webapp/manifests/client_ca.pp b/puppet/modules/site_webapp/manifests/client_ca.pp
new file mode 100644
index 00000000..0d9b15d6
--- /dev/null
+++ b/puppet/modules/site_webapp/manifests/client_ca.pp
@@ -0,0 +1,25 @@
+##
+## This is for the special CA that is used exclusively for generating
+## client certificates by the webapp.
+##
+
+class site_webapp::client_ca {
+ include x509::variables
+
+ $x509 = hiera('x509')
+ $cert_path = "${x509::variables::certs}/leap_client_ca.crt"
+ $key_path = "${x509::variables::keys}/leap_client_ca.key"
+
+ x509::key {
+ 'leap_client_ca':
+ source => $x509['client_ca_key'],
+ group => 'leap-webapp',
+ notify => Service[apache];
+ }
+
+ x509::cert {
+ 'leap_client_ca':
+ source => $x509['client_ca_cert'],
+ notify => Service[apache];
+ }
+}
diff --git a/puppet/modules/site_webapp/manifests/couchdb.pp b/puppet/modules/site_webapp/manifests/couchdb.pp
new file mode 100644
index 00000000..6cac666f
--- /dev/null
+++ b/puppet/modules/site_webapp/manifests/couchdb.pp
@@ -0,0 +1,16 @@
+class site_webapp::couchdb {
+
+ $webapp = hiera('webapp')
+ $couchdb_host = $webapp['couchdb_hosts']
+ $couchdb_user = $webapp['couchdb_user']['username']
+ $couchdb_password = $webapp['couchdb_user']['password']
+
+ file {
+ '/srv/leap-webapp/config/couchdb.yml':
+ content => template('site_webapp/couchdb.yml.erb'),
+ owner => leap-webapp,
+ group => leap-webapp,
+ mode => '0600';
+ }
+
+}
diff --git a/puppet/modules/site_webapp/manifests/init.pp b/puppet/modules/site_webapp/manifests/init.pp
new file mode 100644
index 00000000..e8134521
--- /dev/null
+++ b/puppet/modules/site_webapp/manifests/init.pp
@@ -0,0 +1,117 @@
+class site_webapp {
+ tag 'leap_service'
+ $definition_files = hiera('definition_files')
+ $provider = $definition_files['provider']
+ $eip_service = $definition_files['eip_service']
+ $node_domain = hiera('domain')
+ $provider_domain = $node_domain['full_suffix']
+ $webapp = hiera('webapp')
+
+ Class[Ruby] -> Class[rubygems] -> Class[bundler::install]
+
+ class { 'ruby': ruby_version => '1.9.3' }
+
+ class { 'bundler::install': install_method => 'package' }
+
+ include rubygems
+ include site_webapp::apache
+ include site_webapp::couchdb
+ include site_webapp::client_ca
+
+ group { 'leap-webapp':
+ ensure => present,
+ allowdupe => false;
+ }
+
+ user { 'leap-webapp':
+ ensure => present,
+ allowdupe => false,
+ gid => 'leap-webapp',
+ groups => 'ssl-cert',
+ home => '/srv/leap-webapp',
+ require => [ Group['leap-webapp'] ];
+ }
+
+ file { '/srv/leap-webapp':
+ ensure => directory,
+ owner => 'leap-webapp',
+ group => 'leap-webapp',
+ require => User['leap-webapp'];
+ }
+
+ vcsrepo { '/srv/leap-webapp':
+ ensure => present,
+ revision => 'origin/master',
+ provider => git,
+ source => 'git://code.leap.se/leap_web',
+ owner => 'leap-webapp',
+ group => 'leap-webapp',
+ require => [ User['leap-webapp'], Group['leap-webapp'] ],
+ notify => Exec['bundler_update']
+ }
+
+ exec { 'bundler_update':
+ cwd => '/srv/leap-webapp',
+ command => '/bin/bash -c "/usr/bin/bundle check || /usr/bin/bundle install --path vendor/bundle"',
+ unless => '/usr/bin/bundle check',
+ user => 'leap-webapp',
+ timeout => 600,
+ require => [ Class['bundler::install'], Vcsrepo['/srv/leap-webapp'] ],
+ notify => Service['apache'];
+ }
+
+ exec { 'compile_assets':
+ cwd => '/srv/leap-webapp',
+ command => '/bin/bash -c "/usr/bin/bundle exec rake assets:precompile"',
+ user => 'leap-webapp',
+ require => Exec['bundler_update'],
+ notify => Service['apache'];
+ }
+
+ file {
+ '/srv/leap-webapp/public/provider.json':
+ content => $provider,
+ owner => leap-webapp, group => leap-webapp, mode => '0644';
+
+ '/srv/leap-webapp/public/ca.crt':
+ ensure => link,
+ target => '/usr/local/share/ca-certificates/leap_api.crt';
+
+ '/srv/leap-webapp/public/config':
+ ensure => directory,
+ owner => leap-webapp, group => leap-webapp, mode => '0755';
+
+ '/srv/leap-webapp/public/config/eip-service.json':
+ content => $eip_service,
+ owner => leap-webapp, group => leap-webapp, mode => '0644';
+ }
+
+ try::file {
+ '/srv/leap-webapp/public/favicon.ico':
+ ensure => 'link',
+ target => $webapp['favicon'];
+
+ '/srv/leap-webapp/app/assets/stylesheets/tail.scss':
+ ensure => 'link',
+ target => $webapp['tail_scss'];
+
+ '/srv/leap-webapp/app/assets/stylesheets/head.scss':
+ ensure => 'link',
+ target => $webapp['head_scss'];
+
+ '/srv/leap-webapp/public/img':
+ ensure => 'link',
+ target => $webapp['img_dir'];
+ }
+
+ file {
+ '/srv/leap-webapp/config/config.yml':
+ content => template('site_webapp/config.yml.erb'),
+ owner => leap-webapp,
+ group => leap-webapp,
+ mode => '0600';
+ }
+
+ include site_shorewall::webapp
+
+}
diff --git a/puppet/modules/site_webapp/templates/config.yml.erb b/puppet/modules/site_webapp/templates/config.yml.erb
new file mode 100644
index 00000000..9cf85f0c
--- /dev/null
+++ b/puppet/modules/site_webapp/templates/config.yml.erb
@@ -0,0 +1,5 @@
+production:
+ admins: [admin]
+ domain: <%= @provider_domain %>
+ client_ca_key: <%= scope.lookupvar('site_webapp::client_ca::key_path') %>
+ client_ca_cert: <%= scope.lookupvar('site_webapp::client_ca::cert_path') %>
diff --git a/puppet/modules/site_webapp/templates/couchdb.yml.erb b/puppet/modules/site_webapp/templates/couchdb.yml.erb
new file mode 100644
index 00000000..ee521713
--- /dev/null
+++ b/puppet/modules/site_webapp/templates/couchdb.yml.erb
@@ -0,0 +1,8 @@
+production:
+ prefix: ""
+ protocol: 'https'
+ host: <%= @couchdb_host %>
+ port: 6984
+ username: <%= @couchdb_user %>
+ password: <%= @couchdb_password %>
+
diff --git a/puppet/modules/sshd b/puppet/modules/sshd
new file mode 160000
+Subproject bd2e283ab59430a7b3194804f1c8da7a9b58f8f
diff --git a/puppet/modules/stdlib b/puppet/modules/stdlib
new file mode 160000
+Subproject 2df66c041109ecca1099bf3977657572cc32ad2
diff --git a/puppet/modules/tor b/puppet/modules/tor
new file mode 160000
+Subproject a780e84001177f10a86a7bf824589c0553f513a
diff --git a/puppet/modules/try/README.md b/puppet/modules/try/README.md
new file mode 100644
index 00000000..3888661e
--- /dev/null
+++ b/puppet/modules/try/README.md
@@ -0,0 +1,13 @@
+This module provides a "try" wrapper around common resource types.
+
+For example:
+
+ try::file {
+ '/path/to/file':
+ ensure => 'link',
+ target => $target;
+ }
+
+This will work just like `file`, but will silently fail if `$target` is undefined or the file does not exist.
+
+So far, only `file` type with symlinks works.
diff --git a/puppet/modules/try/manifests/file.pp b/puppet/modules/try/manifests/file.pp
new file mode 100644
index 00000000..406c0b7a
--- /dev/null
+++ b/puppet/modules/try/manifests/file.pp
@@ -0,0 +1,51 @@
+#
+# like built-in type "file", but gets gracefully ignored if the target does not exist or is undefined.
+#
+# /bin/true and /usr/bin/test are hardcoded to their paths in debian.
+#
+
+define try::file (
+ $ensure = undef,
+ $target = undef,
+ $restore = true) {
+
+ if $target != undef {
+ exec { "check_${name}":
+ command => "/bin/true",
+ onlyif => "/usr/bin/test -e '${target}'",
+ loglevel => info;
+ }
+ file { "$name":
+ ensure => $ensure,
+ target => $target,
+ require => Exec["check_${name}"],
+ loglevel => info;
+ }
+ }
+
+ #
+ # if the target does not exist (or is undef), and the file happens to be in a git repo,
+ # then restore the file to its original state.
+ #
+ if $target == undef or $restore {
+ $file_basename = basename($name)
+ $file_dirname = dirname($name)
+ $command = "git rev-parse && unlink '${name}'; git checkout -- '${file_basename}' && chown --reference='${file_dirname}' '${name}'; true"
+ debug($command)
+
+ if $target == undef {
+ exec { "restore_${name}":
+ command => $command,
+ cwd => $file_dirname,
+ loglevel => info;
+ }
+ } else {
+ exec { "restore_${name}":
+ unless => "/usr/bin/test -e '${target}'",
+ command => $command,
+ cwd => $file_dirname,
+ loglevel => info;
+ }
+ }
+ }
+}
diff --git a/puppet/modules/try/manifests/init.pp b/puppet/modules/try/manifests/init.pp
new file mode 100644
index 00000000..1d2108c9
--- /dev/null
+++ b/puppet/modules/try/manifests/init.pp
@@ -0,0 +1,3 @@
+class try {
+
+}
diff --git a/puppet/modules/unbound b/puppet/modules/unbound
new file mode 160000
+Subproject ca7eb732064ce29fc83d4c32a4df7d9512d4580
diff --git a/puppet/modules/vcsrepo b/puppet/modules/vcsrepo
new file mode 160000
+Subproject 04851c28b12973c679fc9f234fd0f5a193df9d7
diff --git a/puppet/modules/x509 b/puppet/modules/x509
new file mode 160000
+Subproject 19254a38c1c372ae7912ea9f15500b9b1cbffe8