summaryrefslogtreecommitdiff
path: root/puppet
diff options
context:
space:
mode:
authorMicah Anderson <micah@leap.se>2014-12-23 16:47:35 -0500
committerMicah Anderson <micah@leap.se>2014-12-23 16:47:35 -0500
commit574a0554a95ba74867ebd0ca4a93195bfa104c14 (patch)
treefd07b5b2ec8b32e82aa665dad117ee6e51791884 /puppet
parent126faf8606f4911ccc3c1f55a9e0f381a46d536a (diff)
parentfc9a8af17d927085486052a53233401c42b0caab (diff)
Merge branch 'develop'
Conflicts: platform.rb Change-Id: Ic2e08e594d29a585691341c8667ac0b64933a505
Diffstat (limited to 'puppet')
-rw-r--r--puppet/manifests/site.pp5
m---------puppet/modules/check_mk0
m---------puppet/modules/couchdb0
-rwxr-xr-xpuppet/modules/obfsproxy/files/obfsproxy_init93
-rw-r--r--puppet/modules/obfsproxy/files/obfsproxy_logrotate14
-rw-r--r--puppet/modules/obfsproxy/manifests/init.pp86
-rw-r--r--puppet/modules/obfsproxy/templates/etc_conf.erb11
m---------puppet/modules/rsyslog0
-rw-r--r--puppet/modules/site_apache/templates/vhosts.d/api.conf.erb2
-rw-r--r--puppet/modules/site_apache/templates/vhosts.d/common.conf.erb2
-rw-r--r--puppet/modules/site_apache/templates/vhosts.d/hidden_service.conf.erb33
-rw-r--r--puppet/modules/site_apt/files/Debian/50unattended-upgrades16
-rw-r--r--puppet/modules/site_apt/manifests/init.pp4
-rw-r--r--puppet/modules/site_apt/manifests/leap_repo.pp5
-rw-r--r--puppet/modules/site_apt/manifests/preferences/obfsproxy.pp9
-rw-r--r--puppet/modules/site_apt/manifests/preferences/rsyslog.pp14
-rw-r--r--puppet/modules/site_apt/manifests/unattended_upgrades.pp10
-rw-r--r--puppet/modules/site_check_mk/files/agent/logwatch/bigcouch.cfg8
-rw-r--r--puppet/modules/site_check_mk/files/agent/logwatch/soledad.cfg3
-rw-r--r--puppet/modules/site_check_mk/files/agent/logwatch/syslog/openvpn.cfg8
-rw-r--r--puppet/modules/site_check_mk/files/agent/logwatch/syslog/tapicero.cfg2
-rw-r--r--puppet/modules/site_check_mk/files/agent/logwatch/syslog_tail.cfg8
-rw-r--r--puppet/modules/site_check_mk/files/extra_host_conf.mk6
-rw-r--r--puppet/modules/site_check_mk/files/extra_service_conf.mk13
-rw-r--r--puppet/modules/site_check_mk/files/ignored_services.mk3
-rw-r--r--puppet/modules/site_check_mk/manifests/agent/couchdb.pp2
-rw-r--r--puppet/modules/site_check_mk/manifests/agent/tapicero.pp6
-rw-r--r--puppet/modules/site_check_mk/manifests/agent/webapp.pp19
-rw-r--r--puppet/modules/site_check_mk/manifests/server.pp34
-rw-r--r--puppet/modules/site_check_mk/templates/host_contactgroups.mk17
-rw-r--r--puppet/modules/site_check_mk/templates/hostgroups.mk17
-rw-r--r--puppet/modules/site_config/manifests/default.pp6
-rw-r--r--puppet/modules/site_config/templates/ipv4firewall_up.rules.erb12
-rw-r--r--puppet/modules/site_couchdb/files/runit_config6
-rw-r--r--puppet/modules/site_couchdb/manifests/add_users.pp12
-rw-r--r--puppet/modules/site_couchdb/manifests/bigcouch.pp44
-rw-r--r--puppet/modules/site_couchdb/manifests/bigcouch/add_nodes.pp2
-rw-r--r--puppet/modules/site_couchdb/manifests/bigcouch/settle_cluster.pp4
-rw-r--r--puppet/modules/site_couchdb/manifests/create_dbs.pp21
-rw-r--r--puppet/modules/site_couchdb/manifests/init.pp154
-rw-r--r--puppet/modules/site_couchdb/manifests/master.pp9
-rw-r--r--puppet/modules/site_couchdb/manifests/mirror.pp77
-rw-r--r--puppet/modules/site_couchdb/manifests/setup.pp46
-rw-r--r--puppet/modules/site_couchdb/manifests/stunnel.pp112
-rw-r--r--puppet/modules/site_haproxy/manifests/init.pp42
-rw-r--r--puppet/modules/site_haproxy/templates/couch.erb32
-rw-r--r--puppet/modules/site_haproxy/templates/haproxy.cfg.erb11
-rw-r--r--puppet/modules/site_haproxy/templates/haproxy_couchdb.cfg.erb23
-rw-r--r--puppet/modules/site_mx/manifests/couchdb.pp23
-rw-r--r--puppet/modules/site_mx/manifests/init.pp2
-rwxr-xr-xpuppet/modules/site_nagios/files/plugins/check_last_regex_in_log85
-rw-r--r--puppet/modules/site_nagios/manifests/add_host_services.pp6
-rw-r--r--puppet/modules/site_nagios/manifests/add_service.pp13
-rw-r--r--puppet/modules/site_nagios/manifests/plugins.pp16
-rw-r--r--puppet/modules/site_nagios/manifests/server.pp21
-rw-r--r--puppet/modules/site_nagios/manifests/server/add_contacts.pp16
-rw-r--r--puppet/modules/site_nagios/manifests/server/contactgroup.pp6
-rw-r--r--puppet/modules/site_nagios/manifests/server/hostgroup.pp3
-rw-r--r--puppet/modules/site_nagios/manifests/server/icli.pp26
-rw-r--r--puppet/modules/site_nagios/templates/icli_aliases.erb7
-rw-r--r--puppet/modules/site_obfsproxy/README0
-rw-r--r--puppet/modules/site_obfsproxy/manifests/init.pp39
-rw-r--r--puppet/modules/site_openvpn/manifests/init.pp16
-rw-r--r--puppet/modules/site_openvpn/manifests/server_config.pp12
-rw-r--r--puppet/modules/site_postfix/manifests/mx.pp4
-rw-r--r--puppet/modules/site_postfix/manifests/mx/smtp_tls.pp1
-rw-r--r--puppet/modules/site_shorewall/manifests/couchdb.pp24
-rw-r--r--puppet/modules/site_shorewall/manifests/couchdb/bigcouch.pp51
-rw-r--r--puppet/modules/site_shorewall/manifests/couchdb/dnat.pp21
-rw-r--r--puppet/modules/site_shorewall/manifests/dnat_rule.pp12
-rw-r--r--puppet/modules/site_shorewall/manifests/obfsproxy.pp24
-rw-r--r--puppet/modules/site_shorewall/manifests/stunnel/client.pp40
-rw-r--r--puppet/modules/site_shorewall/manifests/stunnel/server.pp22
-rw-r--r--puppet/modules/site_sshd/manifests/init.pp2
-rw-r--r--puppet/modules/site_sshd/templates/ssh_config.erb17
-rw-r--r--puppet/modules/site_static/manifests/init.pp7
-rw-r--r--puppet/modules/site_stunnel/manifests/client.pp49
-rw-r--r--puppet/modules/site_stunnel/manifests/clients.pp52
-rw-r--r--puppet/modules/site_stunnel/manifests/init.pp17
-rw-r--r--puppet/modules/site_stunnel/manifests/override_service.pp13
-rw-r--r--puppet/modules/site_stunnel/manifests/servers.pp47
-rw-r--r--puppet/modules/site_tor/manifests/init.pp30
-rw-r--r--puppet/modules/site_webapp/manifests/couchdb.pp14
-rw-r--r--puppet/modules/site_webapp/manifests/hidden_service.pp43
-rw-r--r--puppet/modules/site_webapp/manifests/init.pp19
-rw-r--r--puppet/modules/site_webapp/templates/config.yml.erb8
m---------puppet/modules/sshd0
m---------puppet/modules/stunnel0
-rw-r--r--puppet/modules/tapicero/manifests/init.pp4
-rw-r--r--puppet/modules/tapicero/templates/tapicero.yaml.erb12
90 files changed, 1366 insertions, 521 deletions
diff --git a/puppet/manifests/site.pp b/puppet/manifests/site.pp
index 9afa5dfd..57942d99 100644
--- a/puppet/manifests/site.pp
+++ b/puppet/manifests/site.pp
@@ -10,6 +10,7 @@ notice("Services for ${fqdn}: ${services_str}")
if member($services, 'openvpn') {
include site_openvpn
+ include site_obfsproxy
}
if member($services, 'couchdb') {
@@ -42,4 +43,8 @@ if member($services, 'static') {
include site_static
}
+if member($services, 'obfsproxy') {
+ include site_obfsproxy
+}
+
include site_config::packages::uninstall
diff --git a/puppet/modules/check_mk b/puppet/modules/check_mk
-Subproject 5c11597a055858b5ddc1ce8f7f8db249f5f1b33
+Subproject 205859d87884ac4ceee6d1365548e7dc55640bf
diff --git a/puppet/modules/couchdb b/puppet/modules/couchdb
-Subproject c8f5443e0998d3d3d43505ff5a6fdf8c438d6c2
+Subproject 4c0d5673df02fe42e1bbadfee7d4ea1ca1f88e9
diff --git a/puppet/modules/obfsproxy/files/obfsproxy_init b/puppet/modules/obfsproxy/files/obfsproxy_init
new file mode 100755
index 00000000..01c8013a
--- /dev/null
+++ b/puppet/modules/obfsproxy/files/obfsproxy_init
@@ -0,0 +1,93 @@
+#!/bin/sh
+
+### BEGIN INIT INFO
+# Provides: obfsproxy daemon
+# Required-Start: $remote_fs $syslog
+# Required-Stop: $remote_fs $syslog
+# Default-Start: 2 3 4 5
+# Default-Stop: 0 1 6
+# Short-Description: obfsproxy daemon
+# Description: obfsproxy daemon
+### END INIT INFO
+
+. /lib/lsb/init-functions
+
+DAEMON=/usr/bin/obfsproxy
+NAME=obfsproxy
+DESC="obfsproxy daemon"
+USER=obfsproxy
+DATDIR=/etc/obfsproxy
+PIDFILE=/var/run/obfsproxy.pid
+CONF=$DATDIR/obfsproxy.conf
+LOGFILE=/var/log/obfsproxy.log
+
+# If the daemon is not there, then exit.
+test -x $DAEMON || exit 0
+
+if [ -f $CONF ] ; then
+ . $CONF
+else
+ echo "Obfsproxy configuration file is missing, aborting..."
+ exit 2
+fi
+
+DAEMONARGS=" --log-min-severity=$LOG --log-file=$LOGFILE --data-dir=$DATDIR \
+ $TRANSPORT $PARAM --dest=$DEST_IP:$DEST_PORT server $BINDADDR:$PORT"
+
+start_obfsproxy() {
+ start-stop-daemon --start --quiet --oknodo -m --pidfile $PIDFILE \
+ -b -c $USER --startas $DAEMON --$DAEMONARGS
+}
+
+stop_obfsproxy() {
+ start-stop-daemon --stop --quiet --oknodo --pidfile $PIDFILE
+}
+
+status_obfsproxy() {
+ status_of_proc -p $PIDFILE $DAEMON $NAME
+}
+
+case $1 in
+ start)
+ if [ -e $PIDFILE ]; then
+ status_obfsproxy
+ if [ $? = "0" ]; then
+ exit
+ fi
+ fi
+ log_begin_msg "Starting $DESC"
+ start_obfsproxy
+ log_end_msg $?
+ ;;
+ stop)
+ if [ -e $PIDFILE ]; then
+ status_obfsproxy
+ if [ $? = "0" ]; then
+ log_begin_msg "Stopping $DESC"
+ stop_obfsproxy
+ rm -f $PIDFILE
+ log_end_msg $?
+ fi
+ else
+ status_obfsproxy
+ fi
+ ;;
+ restart)
+ $0 stop && sleep 2 && $0 start
+ ;;
+ status)
+ status_obfsproxy
+ ;;
+ reload)
+ if [ -e $PIDFILE ]; then
+ start-stop-daemon --stop --signal USR1 --quiet --pidfile $PIDFILE --name $NAME
+ log_success_msg "$DESC reloaded successfully"
+ else
+ log_failure_msg "$PIDFILE does not exist"
+ fi
+ ;;
+ *)
+ echo "Usage: $0 {start|stop|restart|reload|status}"
+ exit 2
+ ;;
+esac
diff --git a/puppet/modules/obfsproxy/files/obfsproxy_logrotate b/puppet/modules/obfsproxy/files/obfsproxy_logrotate
new file mode 100644
index 00000000..e5679d0c
--- /dev/null
+++ b/puppet/modules/obfsproxy/files/obfsproxy_logrotate
@@ -0,0 +1,14 @@
+/var/log/obfsproxy.log {
+ daily
+ missingok
+ rotate 3
+ compress
+ delaycompress
+ notifempty
+ create 600 obfsproxy obfsproxy
+ postrotate
+ if [ -f /var/run/obfsproxy.pid ]; then
+ /etc/init.d/obfsproxy restart > /dev/null
+ fi
+ endscript
+}
diff --git a/puppet/modules/obfsproxy/manifests/init.pp b/puppet/modules/obfsproxy/manifests/init.pp
new file mode 100644
index 00000000..61714fdf
--- /dev/null
+++ b/puppet/modules/obfsproxy/manifests/init.pp
@@ -0,0 +1,86 @@
+class obfsproxy (
+ $transport,
+ $bind_address,
+ $port,
+ $param,
+ $dest_ip,
+ $dest_port,
+ $log_level = 'info'
+){
+
+ $user = 'obfsproxy'
+ $conf = '/etc/obfsproxy/obfsproxy.conf'
+
+ user { $user:
+ ensure => present,
+ system => true,
+ gid => $user,
+ }
+
+ group { $user:
+ ensure => present,
+ system => true,
+ }
+
+ file { '/etc/init.d/obfsproxy':
+ path => '/etc/init.d/obfsproxy',
+ ensure => present,
+ source => 'puppet:///modules/obfsproxy/obfsproxy_init',
+ owner => 'root',
+ group => 'root',
+ mode => '0750',
+ require => File[$conf],
+ }
+
+ file { $conf :
+ path => $conf,
+ ensure => present,
+ owner => 'root',
+ group => 'root',
+ mode => '0600',
+ content => template('obfsproxy/etc_conf.erb'),
+ }
+
+ file { '/etc/obfsproxy':
+ ensure => directory,
+ owner => $user,
+ group => $user,
+ mode => '0700',
+ require => User[$user],
+ }
+
+ file { '/var/log/obfsproxy.log':
+ ensure => present,
+ owner => $user,
+ group => $user,
+ mode => '0640',
+ require => User[$user],
+ }
+
+ file { '/etc/logrotate.d/obfsproxy':
+ ensure => present,
+ source => 'puppet:///modules/obfsproxy/obfsproxy_logrotate',
+ owner => 'root',
+ group => 'root',
+ mode => '0644',
+ require => File['/var/log/obfsproxy.log'],
+ }
+
+ package { 'obfsproxy':
+ ensure => present,
+ require => Class['site_apt::preferences::obfsproxy'],
+ }
+
+ service { 'obfsproxy':
+ ensure => running,
+ subscribe => File[$conf],
+ require => [
+ Package['obfsproxy'],
+ File['/etc/init.d/obfsproxy'],
+ User[$user],
+ Group[$user]]
+ }
+
+
+}
+
diff --git a/puppet/modules/obfsproxy/templates/etc_conf.erb b/puppet/modules/obfsproxy/templates/etc_conf.erb
new file mode 100644
index 00000000..8959ef78
--- /dev/null
+++ b/puppet/modules/obfsproxy/templates/etc_conf.erb
@@ -0,0 +1,11 @@
+TRANSPORT=<%= @transport %>
+PORT=<%= @port %>
+DEST_IP=<%= @dest_ip %>
+DEST_PORT=<%= @dest_port %>
+<% if @transport == "scramblesuit" -%>
+PARAM=--password=<%= @param %>
+<% else -%>
+PARAM=<%= @param %>
+<% end -%>
+LOG=<%= @log_level %>
+BINDADDR=<%= @bind_address %>
diff --git a/puppet/modules/rsyslog b/puppet/modules/rsyslog
-Subproject 20fbda6b91472e656331a9c64630fb207e9f578
+Subproject b8ef11c23949d12732ad5cdaebb3023ff39a297
diff --git a/puppet/modules/site_apache/templates/vhosts.d/api.conf.erb b/puppet/modules/site_apache/templates/vhosts.d/api.conf.erb
index 182e476d..e4732289 100644
--- a/puppet/modules/site_apache/templates/vhosts.d/api.conf.erb
+++ b/puppet/modules/site_apache/templates/vhosts.d/api.conf.erb
@@ -2,12 +2,14 @@
ServerName <%= api_domain %>
RewriteEngine On
RewriteRule ^.*$ https://<%= api_domain -%>:<%= api_port -%>%{REQUEST_URI} [R=permanent,L]
+ CustomLog ${APACHE_LOG_DIR}/other_vhosts_access.log common
</VirtualHost>
Listen 0.0.0.0:<%= api_port %>
<VirtualHost *:<%= api_port -%>>
ServerName <%= api_domain %>
+ CustomLog ${APACHE_LOG_DIR}/other_vhosts_access.log common
SSLEngine on
SSLProtocol all -SSLv2 -SSLv3
diff --git a/puppet/modules/site_apache/templates/vhosts.d/common.conf.erb b/puppet/modules/site_apache/templates/vhosts.d/common.conf.erb
index 5423ed71..a9733a97 100644
--- a/puppet/modules/site_apache/templates/vhosts.d/common.conf.erb
+++ b/puppet/modules/site_apache/templates/vhosts.d/common.conf.erb
@@ -3,12 +3,14 @@
ServerAlias www.<%= domain %>
RewriteEngine On
RewriteRule ^.*$ https://<%= domain -%>%{REQUEST_URI} [R=permanent,L]
+ CustomLog ${APACHE_LOG_DIR}/other_vhosts_access.log common
</VirtualHost>
<VirtualHost *:443>
ServerName <%= domain_name %>
ServerAlias <%= domain %>
ServerAlias www.<%= domain %>
+ CustomLog ${APACHE_LOG_DIR}/other_vhosts_access.log common
SSLEngine on
SSLProtocol all -SSLv2 -SSLv3
diff --git a/puppet/modules/site_apache/templates/vhosts.d/hidden_service.conf.erb b/puppet/modules/site_apache/templates/vhosts.d/hidden_service.conf.erb
new file mode 100644
index 00000000..0c6f3b8e
--- /dev/null
+++ b/puppet/modules/site_apache/templates/vhosts.d/hidden_service.conf.erb
@@ -0,0 +1,33 @@
+<VirtualHost 127.0.0.1:80>
+ ServerName <%= tor_domain %>
+
+ <IfModule mod_headers.c>
+ Header always unset X-Powered-By
+ Header always unset X-Runtime
+ </IfModule>
+
+<% if (defined? @services) and (@services.include? 'webapp') -%>
+ DocumentRoot /srv/leap/webapp/public
+
+ RewriteEngine On
+ # Check for maintenance file and redirect all requests
+ RewriteCond %{DOCUMENT_ROOT}/system/maintenance.html -f
+ RewriteCond %{SCRIPT_FILENAME} !maintenance.html
+ RewriteCond %{REQUEST_URI} !/images/maintenance.jpg
+ RewriteRule ^.*$ %{DOCUMENT_ROOT}/system/maintenance.html [L]
+
+ # http://www.modrails.com/documentation/Users%20guide%20Apache.html#_passengerallowencodedslashes_lt_on_off_gt
+ AllowEncodedSlashes on
+ PassengerAllowEncodedSlashes on
+ PassengerFriendlyErrorPages off
+ SetEnv TMPDIR /var/tmp
+
+ # Allow rails assets to be cached for a very long time (since the URLs change whenever the content changes)
+ <Location /assets/>
+ Header unset ETag
+ FileETag None
+ ExpiresActive On
+ ExpiresDefault "access plus 1 year"
+ </Location>
+<% end -%>
+</VirtualHost>
diff --git a/puppet/modules/site_apt/files/Debian/50unattended-upgrades b/puppet/modules/site_apt/files/Debian/50unattended-upgrades
new file mode 100644
index 00000000..f2f574fc
--- /dev/null
+++ b/puppet/modules/site_apt/files/Debian/50unattended-upgrades
@@ -0,0 +1,16 @@
+// this file is managed by puppet !
+
+Unattended-Upgrade::Allowed-Origins {
+ "${distro_id}:stable";
+ "${distro_id}:${distro_codename}-security";
+ "${distro_id}:${distro_codename}-updates";
+ "${distro_id} Backports:${distro_codename}-backports";
+ "leap.se:stable";
+};
+
+APT::Periodic::Update-Package-Lists "1";
+APT::Periodic::Download-Upgradeable-Packages "1";
+APT::Periodic::Unattended-Upgrade "1";
+
+Unattended-Upgrade::Mail "root";
+Unattended-Upgrade::MailOnlyOnError "true";
diff --git a/puppet/modules/site_apt/manifests/init.pp b/puppet/modules/site_apt/manifests/init.pp
index 9facf4cc..633ccf1e 100644
--- a/puppet/modules/site_apt/manifests/init.pp
+++ b/puppet/modules/site_apt/manifests/init.pp
@@ -1,4 +1,4 @@
-class site_apt {
+class site_apt {
class { 'apt':
custom_key_dir => 'puppet:///modules/site_apt/keys'
@@ -11,7 +11,7 @@ class site_apt {
content => 'Acquire::PDiffs "false";';
}
- include ::apt::unattended_upgrades
+ include ::site_apt::unattended_upgrades
apt::sources_list { 'secondary.list.disabled':
content => template('site_apt/secondary.list');
diff --git a/puppet/modules/site_apt/manifests/leap_repo.pp b/puppet/modules/site_apt/manifests/leap_repo.pp
index 6b3d9919..2d4ba0e1 100644
--- a/puppet/modules/site_apt/manifests/leap_repo.pp
+++ b/puppet/modules/site_apt/manifests/leap_repo.pp
@@ -1,6 +1,9 @@
class site_apt::leap_repo {
+ $platform = hiera_hash('platform')
+ $major_version = $platform['major_version']
+
apt::sources_list { 'leap.list':
- content => 'deb http://deb.leap.se/debian stable main',
+ content => "deb http://deb.leap.se/${major_version} wheezy main\n",
before => Exec[refresh_apt]
}
diff --git a/puppet/modules/site_apt/manifests/preferences/obfsproxy.pp b/puppet/modules/site_apt/manifests/preferences/obfsproxy.pp
new file mode 100644
index 00000000..75b01956
--- /dev/null
+++ b/puppet/modules/site_apt/manifests/preferences/obfsproxy.pp
@@ -0,0 +1,9 @@
+class site_apt::preferences::obfsproxy {
+
+ apt::preferences_snippet { 'obfsproxy':
+ package => 'obfsproxy',
+ release => 'wheezy-backports',
+ priority => 999;
+ }
+
+}
diff --git a/puppet/modules/site_apt/manifests/preferences/rsyslog.pp b/puppet/modules/site_apt/manifests/preferences/rsyslog.pp
index 132a6e24..bfeaa7da 100644
--- a/puppet/modules/site_apt/manifests/preferences/rsyslog.pp
+++ b/puppet/modules/site_apt/manifests/preferences/rsyslog.pp
@@ -1,9 +1,13 @@
class site_apt::preferences::rsyslog {
- apt::preferences_snippet { 'rsyslog_anon_depends':
- package => 'libestr0 librelp0 rsyslog*',
- priority => '999',
- pin => 'release a=wheezy-backports',
- before => Class['rsyslog::install']
+ apt::preferences_snippet {
+ 'rsyslog_anon_depends':
+ package => 'libestr0 librelp0 rsyslog*',
+ priority => '999',
+ pin => 'release a=wheezy-backports',
+ before => Class['rsyslog::install'];
+
+ 'fixed_rsyslog_anon_package':
+ ensure => absent;
}
}
diff --git a/puppet/modules/site_apt/manifests/unattended_upgrades.pp b/puppet/modules/site_apt/manifests/unattended_upgrades.pp
new file mode 100644
index 00000000..daebffab
--- /dev/null
+++ b/puppet/modules/site_apt/manifests/unattended_upgrades.pp
@@ -0,0 +1,10 @@
+class site_apt::unattended_upgrades inherits apt::unattended_upgrades {
+ # override unattended-upgrades package resource to make sure
+ # that it is upgraded on every deploy (#6245)
+
+ include ::apt::unattended_upgrades
+
+ Package['unattended-upgrades'] {
+ ensure => latest
+ }
+}
diff --git a/puppet/modules/site_check_mk/files/agent/logwatch/bigcouch.cfg b/puppet/modules/site_check_mk/files/agent/logwatch/bigcouch.cfg
index 28f333b0..95ddd2ca 100644
--- a/puppet/modules/site_check_mk/files/agent/logwatch/bigcouch.cfg
+++ b/puppet/modules/site_check_mk/files/agent/logwatch/bigcouch.cfg
@@ -6,11 +6,19 @@
I 127.0.0.1 localhost:5984 .* ok
# https://leap.se/code/issues/5246
I Shutting down group server
+ # ignore bigcouch conflict errors, mainly coming from tapicero creating new users
+ I Error in process.*{{nocatch,conflict}
# ignore "Uncaught error in HTTP request: {exit, normal}" error
# it's suppressed in later versions of bigcouch anhow
# see https://leap.se/code/issues/5226
I Uncaught error in HTTP request: {exit,normal}
I Uncaught error in HTTP request: {exit,
+ # Ignore rexi_EXIT bigcouch error (Bug #6512)
+ I Error in process <[0-9.]+> on node .* with exit value: {{rexi_EXIT,{(killed|noproc|shutdown),\[{couch_db,collect_results
+ # Ignore "Generic server terminating" bigcouch message (Feature #6544)
+ I Generic server <.*> terminating
+ I {error_report,<.*>,
+ I {error_info,
C Uncaught error in HTTP request: {error,
C Response abnormally terminated: {nodedown,
C rexi_DOWN,noproc
diff --git a/puppet/modules/site_check_mk/files/agent/logwatch/soledad.cfg b/puppet/modules/site_check_mk/files/agent/logwatch/soledad.cfg
index 623d1e46..3af5045b 100644
--- a/puppet/modules/site_check_mk/files/agent/logwatch/soledad.cfg
+++ b/puppet/modules/site_check_mk/files/agent/logwatch/soledad.cfg
@@ -2,4 +2,5 @@
C WSGI application error
C Error
C error
- W Timing out client:
+# Removed this line because we determined it was better to ignore it (#6566)
+# W Timing out client:
diff --git a/puppet/modules/site_check_mk/files/agent/logwatch/syslog/openvpn.cfg b/puppet/modules/site_check_mk/files/agent/logwatch/syslog/openvpn.cfg
index d58e876d..ac17c0ca 100644
--- a/puppet/modules/site_check_mk/files/agent/logwatch/syslog/openvpn.cfg
+++ b/puppet/modules/site_check_mk/files/agent/logwatch/syslog/openvpn.cfg
@@ -2,6 +2,12 @@
# suddenly hangup before properly establishing
# a tls connection
I ovpn-.*TLS Error: Unroutable control packet received from
- I ovpn-.*TLS Error: TLS key negotiation failed to occur within 60 seconds (check your network connectivity)
+ I ovpn-.*TLS Error: TLS key negotiation failed to occur within 60 seconds \(check your network connectivity\)
I ovpn-.*TLS Error: TLS handshake failed
+ I ovpn-.*TLS Error: TLS object -> incoming plaintext read error
+ I ovpn-.*Fatal TLS error \(check_tls_errors_co\), restarting
+ I ovpn-.*TLS_ERROR: BIO read tls_read_plaintext error: error:140890B2:SSL routines:SSL3_GET_CLIENT_CERTIFICATE:no certificate
+
+ I ovpn-.*SIGUSR1\[soft,tls-error\] received, client-instance restarting
+ I ovpn-.*VERIFY ERROR: depth=0, error=certificate has expired
diff --git a/puppet/modules/site_check_mk/files/agent/logwatch/syslog/tapicero.cfg b/puppet/modules/site_check_mk/files/agent/logwatch/syslog/tapicero.cfg
index 93ce0311..e5721eea 100644
--- a/puppet/modules/site_check_mk/files/agent/logwatch/syslog/tapicero.cfg
+++ b/puppet/modules/site_check_mk/files/agent/logwatch/syslog/tapicero.cfg
@@ -1,3 +1,5 @@
+# Ignore transient Tapicero errors when creating a db (#6511)
+ I tapicero.*(Creating database|Checking security of|Writing security to|Uploading design doc to) user-.* failed (\(trying again soon\)|(twice )?due to): (RestClient::Resource Not Found|RestClient::InternalServerError): (404 Resource Not Found|500 Internal Server Error)
C tapicero.*RestClient::InternalServerError:
# possible race condition between multiple tapicero
# instances, so we ignore it
diff --git a/puppet/modules/site_check_mk/files/agent/logwatch/syslog_tail.cfg b/puppet/modules/site_check_mk/files/agent/logwatch/syslog_tail.cfg
index 450b9e90..71395c50 100644
--- a/puppet/modules/site_check_mk/files/agent/logwatch/syslog_tail.cfg
+++ b/puppet/modules/site_check_mk/files/agent/logwatch/syslog_tail.cfg
@@ -1,8 +1,14 @@
# some general patterns
+ I Error: Driver 'pcspkr' is already registered, aborting...
+# ignore postfix errors on lost connection (Bug #6476)
+ I postfix/smtpd.*SSL_accept error from.*lost connection
+# ignore postfix too many errors after DATA (#6545)
+ I postfix/smtpd.*too many errors after DATA from
C panic
C Oops
- I Error: Driver 'pcspkr' is already registered, aborting...
C Error
+# ignore ipv6 icmp errors for now (Bug #6540)
+ I kernel: .*icmpv6_send: no reply to icmp error
C error
W generic protection rip
W .*Unrecovered read error - auto reallocate failed
diff --git a/puppet/modules/site_check_mk/files/extra_host_conf.mk b/puppet/modules/site_check_mk/files/extra_host_conf.mk
new file mode 100644
index 00000000..2c96f97a
--- /dev/null
+++ b/puppet/modules/site_check_mk/files/extra_host_conf.mk
@@ -0,0 +1,6 @@
+# retry 3 times before setting a host into a hard state
+# and send out notification
+extra_host_conf["max_check_attempts"] = [
+ ("4", ALL_HOSTS )
+]
+
diff --git a/puppet/modules/site_check_mk/files/extra_service_conf.mk b/puppet/modules/site_check_mk/files/extra_service_conf.mk
new file mode 100644
index 00000000..03d1ea76
--- /dev/null
+++ b/puppet/modules/site_check_mk/files/extra_service_conf.mk
@@ -0,0 +1,13 @@
+# retry 3 times before setting a service into a hard state
+# and send out notification
+extra_service_conf["max_check_attempts"] = [
+ ("4", ALL_HOSTS , ALL_SERVICES )
+]
+
+# run check_mk_agent every 2 minutes if it terminates
+# successfully.
+# see https://leap.se/code/issues/6539 for the rationale
+extra_service_conf["normal_check_interval"] = [
+ ("2", ALL_HOSTS , "Check_MK" )
+]
+
diff --git a/puppet/modules/site_check_mk/files/ignored_services.mk b/puppet/modules/site_check_mk/files/ignored_services.mk
new file mode 100644
index 00000000..35dc4433
--- /dev/null
+++ b/puppet/modules/site_check_mk/files/ignored_services.mk
@@ -0,0 +1,3 @@
+ignored_services = [
+ ( ALL_HOSTS, [ "NTP Time" ] )
+]
diff --git a/puppet/modules/site_check_mk/manifests/agent/couchdb.pp b/puppet/modules/site_check_mk/manifests/agent/couchdb.pp
index 01e2b886..ee0268a3 100644
--- a/puppet/modules/site_check_mk/manifests/agent/couchdb.pp
+++ b/puppet/modules/site_check_mk/manifests/agent/couchdb.pp
@@ -29,7 +29,7 @@ class site_check_mk::agent::couchdb {
}
file_line {
'Bigcouch_open_files':
- line => 'Bigcouch_open_files /srv/leap/nagios/plugins/check_unix_open_fds.pl -a beam -w 750,750 -c 1000,1000',
+ line => 'Bigcouch_open_files /srv/leap/nagios/plugins/check_unix_open_fds.pl -a beam -w 28672,28672 -c 30720,30720',
path => '/etc/check_mk/mrpe.cfg';
}
diff --git a/puppet/modules/site_check_mk/manifests/agent/tapicero.pp b/puppet/modules/site_check_mk/manifests/agent/tapicero.pp
index 369ed00b..ffd11100 100644
--- a/puppet/modules/site_check_mk/manifests/agent/tapicero.pp
+++ b/puppet/modules/site_check_mk/manifests/agent/tapicero.pp
@@ -1,5 +1,7 @@
class site_check_mk::agent::tapicero {
+ include ::site_nagios::plugins
+
concat::fragment { 'syslog_tapicero':
source => 'puppet:///modules/site_check_mk/agent/logwatch/syslog/tapicero.cfg',
target => '/etc/check_mk/logwatch.d/syslog.cfg',
@@ -11,6 +13,10 @@ class site_check_mk::agent::tapicero {
'Tapicero_Procs':
line => 'Tapicero_Procs /usr/lib/nagios/plugins/check_procs -w 1:1 -c 1:1 -a tapicero',
path => '/etc/check_mk/mrpe.cfg';
+
+ 'Tapicero_Heartbeat':
+ line => 'Tapicero_Heartbeat /usr/local/lib/nagios/plugins/check_last_regex_in_log -f /var/log/syslog -r "tapicero" -w 300 -c 600',
+ path => '/etc/check_mk/mrpe.cfg';
}
}
diff --git a/puppet/modules/site_check_mk/manifests/agent/webapp.pp b/puppet/modules/site_check_mk/manifests/agent/webapp.pp
index 64f5ea6d..88c3da30 100644
--- a/puppet/modules/site_check_mk/manifests/agent/webapp.pp
+++ b/puppet/modules/site_check_mk/manifests/agent/webapp.pp
@@ -1,20 +1,11 @@
class site_check_mk::agent::webapp {
- # check webapp login + soledad sync
- package { [ 'python-srp', 'python-requests', 'python-yaml', 'python-u1db' ]:
- ensure => installed
+ # remove leftovers of webapp python checks
+ file {
+ [ '/usr/lib/check_mk_agent/local/nagios-webapp_login.py',
+ '/usr/lib/check_mk_agent/local/soledad_sync.py' ]:
+ ensure => absent
}
- file { '/usr/lib/check_mk_agent/local/nagios-webapp_login.py':
- ensure => link,
- target => '/srv/leap/webapp/test/nagios/webapp_login.py',
- require => Package['check_mk-agent']
- }
- file { '/usr/lib/check_mk_agent/local/soledad_sync.py':
- ensure => link,
- target => '/srv/leap/webapp/test/nagios/soledad_sync.py',
- require => Package['check_mk-agent']
- }
-
# check syslog
concat::fragment { 'syslog_webapp':
diff --git a/puppet/modules/site_check_mk/manifests/server.pp b/puppet/modules/site_check_mk/manifests/server.pp
index e544ef0d..171f1576 100644
--- a/puppet/modules/site_check_mk/manifests/server.pp
+++ b/puppet/modules/site_check_mk/manifests/server.pp
@@ -5,11 +5,13 @@ class site_check_mk::server {
$type = $ssh_hash['authorized_keys']['monitor']['type']
$seckey = $ssh_hash['monitor']['private_key']
- $nagios_hiera = hiera_hash('nagios')
- $nagios_hosts = $nagios_hiera['hosts']
+ $nagios_hiera = hiera_hash('nagios')
+ $nagios_hosts = $nagios_hiera['hosts']
- $hosts = hiera_hash('hosts')
- $all_hosts = inline_template ('<% @hosts.keys.sort.each do |key| -%>"<%= @hosts[key]["domain_internal"] %>", <% end -%>')
+ $hosts = hiera_hash('hosts')
+ $all_hosts = inline_template ('<% @hosts.keys.sort.each do |key| -%>"<%= @hosts[key]["domain_internal"] %>", <% end -%>')
+ $domains_internal = $nagios_hiera['domains_internal']
+ $environments = $nagios_hiera['environments']
package { 'check-mk-server':
ensure => installed,
@@ -35,10 +37,32 @@ class site_check_mk::server {
content => template('site_check_mk/use_ssh.mk'),
notify => Exec['check_mk-refresh'],
require => Package['check-mk-server'];
+ '/etc/check_mk/conf.d/hostgroups.mk':
+ content => template('site_check_mk/hostgroups.mk'),
+ notify => Exec['check_mk-refresh'],
+ require => Package['check-mk-server'];
+ '/etc/check_mk/conf.d/host_contactgroups.mk':
+ content => template('site_check_mk/host_contactgroups.mk'),
+ notify => Exec['check_mk-refresh'],
+ require => Package['check-mk-server'];
+ '/etc/check_mk/conf.d/ignored_services.mk':
+ source => 'puppet:///modules/site_check_mk/ignored_services.mk',
+ notify => Exec['check_mk-refresh'],
+ require => Package['check-mk-server'];
+ '/etc/check_mk/conf.d/extra_service_conf.mk':
+ source => 'puppet:///modules/site_check_mk/extra_service_conf.mk',
+ notify => Exec['check_mk-refresh'],
+ require => Package['check-mk-server'];
+ '/etc/check_mk/conf.d/extra_host_conf.mk':
+ source => 'puppet:///modules/site_check_mk/extra_host_conf.mk',
+ notify => Exec['check_mk-refresh'],
+ require => Package['check-mk-server'];
+
'/etc/check_mk/all_hosts_static':
content => $all_hosts,
notify => Exec['check_mk-refresh'],
require => Package['check-mk-server'];
+
'/etc/check_mk/.ssh':
ensure => directory,
require => Package['check-mk-server'];
@@ -52,6 +76,7 @@ class site_check_mk::server {
owner => 'nagios',
mode => '0644',
require => Package['check-mk-server'];
+
# check_icmp must be suid root or called by sudo
# see https://leap.se/code/issues/5171
'/usr/lib/nagios/plugins/check_icmp':
@@ -59,6 +84,5 @@ class site_check_mk::server {
require => Package['nagios-plugins-basic'];
}
-
include check_mk::agent::local_checks
}
diff --git a/puppet/modules/site_check_mk/templates/host_contactgroups.mk b/puppet/modules/site_check_mk/templates/host_contactgroups.mk
new file mode 100644
index 00000000..6a534967
--- /dev/null
+++ b/puppet/modules/site_check_mk/templates/host_contactgroups.mk
@@ -0,0 +1,17 @@
+<%
+ contact_groups = []
+ @environments.keys.sort.each do |env_name|
+ hosts = ""
+ @nagios_hosts.keys.sort.each do |hostname|
+ hostdata = @nagios_hosts[hostname]
+ domain_internal = hostdata['domain_internal']
+ if hostdata['environment'] == env_name
+ hosts << '"' + domain_internal + '", '
+ end
+ end
+ contact_groups << ' ( "%s", [%s] )' % [env_name, hosts]
+ end
+%>
+host_contactgroups = [
+<%= contact_groups.join(",\n") %>
+]
diff --git a/puppet/modules/site_check_mk/templates/hostgroups.mk b/puppet/modules/site_check_mk/templates/hostgroups.mk
new file mode 100644
index 00000000..7158dcd1
--- /dev/null
+++ b/puppet/modules/site_check_mk/templates/hostgroups.mk
@@ -0,0 +1,17 @@
+<%
+ host_groups = []
+ @environments.keys.sort.each do |env_name|
+ hosts = ""
+ @nagios_hosts.keys.sort.each do |hostname|
+ hostdata = @nagios_hosts[hostname]
+ domain_internal = hostdata['domain_internal']
+ if hostdata['environment'] == env_name
+ hosts << '"' + domain_internal + '", '
+ end
+ end
+ host_groups << ' ( "%s", [%s] )' % [env_name, hosts]
+ end
+%>
+host_groups = [
+<%= host_groups.join(",\n") %>
+]
diff --git a/puppet/modules/site_config/manifests/default.pp b/puppet/modules/site_config/manifests/default.pp
index fc2179de..790b5a16 100644
--- a/puppet/modules/site_config/manifests/default.pp
+++ b/puppet/modules/site_config/manifests/default.pp
@@ -59,10 +59,10 @@ class site_config::default {
include site_postfix::satellite
}
- # if class site_custom exists, include it.
+ # if class custom exists, include it.
# possibility for users to define custom puppet recipes
- if defined( '::site_custom') {
- include ::site_custom
+ if defined( '::custom') {
+ include ::custom
}
include site_check_mk::agent
diff --git a/puppet/modules/site_config/templates/ipv4firewall_up.rules.erb b/puppet/modules/site_config/templates/ipv4firewall_up.rules.erb
index 928a2b31..b0c2b7ad 100644
--- a/puppet/modules/site_config/templates/ipv4firewall_up.rules.erb
+++ b/puppet/modules/site_config/templates/ipv4firewall_up.rules.erb
@@ -2,7 +2,7 @@
*filter
:INPUT DROP [0:0]
:FORWARD DROP [0:0]
-:OUTPUT DROP [0:0]
+:OUTPUT ACCEPT [0:0]
-A INPUT -i lo -j ACCEPT
-A INPUT -m state --state RELATED,ESTABLISHED -j ACCEPT
-A INPUT -p tcp -m state --state NEW,ESTABLISHED --dport 22 -j ACCEPT
@@ -11,14 +11,4 @@
-A INPUT -p icmp -m icmp --icmp-type 8 -m state --state NEW,RELATED,ESTABLISHED -j ACCEPT
-A INPUT -p icmp -m icmp --icmp-type 0 -m state --state RELATED,ESTABLISHED -j ACCEPT
-A INPUT -m limit --limit 5/min -j LOG --log-prefix "iptables denied: " --log-level 7
--A OUTPUT -o lo -j ACCEPT
--A OUTPUT -p icmp -m icmp --icmp-type 0 -m state --state RELATED,ESTABLISHED -j ACCEPT
--A OUTPUT -p icmp -m icmp --icmp-type 8 -m state --state NEW,RELATED,ESTABLISHED -j ACCEPT
--A OUTPUT -p tcp -m state --state NEW,ESTABLISHED --sport 22 -j ACCEPT
--A OUTPUT -p tcp -m state --state NEW,ESTABLISHED --sport <%= @ssh_port %> -j ACCEPT
--A OUTPUT -p tcp -m state --state NEW,ESTABLISHED --dport 80 -j ACCEPT
--A OUTPUT -p tcp -m state --state NEW,ESTABLISHED --dport 443 -j ACCEPT
--A OUTPUT -p udp -m udp --dport 53 -j ACCEPT
--A OUTPUT -p udp -m udp --dport 123 -j ACCEPT
--A OUTPUT -m limit --limit 5/min -j LOG --log-prefix "iptables denied: " --log-level 7
COMMIT
diff --git a/puppet/modules/site_couchdb/files/runit_config b/puppet/modules/site_couchdb/files/runit_config
new file mode 100644
index 00000000..169b4832
--- /dev/null
+++ b/puppet/modules/site_couchdb/files/runit_config
@@ -0,0 +1,6 @@
+#!/bin/bash
+exec 2>&1
+export HOME=/home/bigcouch
+ulimit -H -n 32768
+ulimit -S -n 32768
+exec chpst -u bigcouch /opt/bigcouch/bin/bigcouch
diff --git a/puppet/modules/site_couchdb/manifests/add_users.pp b/puppet/modules/site_couchdb/manifests/add_users.pp
index f9ea7349..2f734ed4 100644
--- a/puppet/modules/site_couchdb/manifests/add_users.pp
+++ b/puppet/modules/site_couchdb/manifests/add_users.pp
@@ -1,5 +1,8 @@
class site_couchdb::add_users {
+ Class['site_couchdb::create_dbs']
+ -> Class['site_couchdb::add_users']
+
# Couchdb users
## leap_mx couchdb user
@@ -51,4 +54,13 @@ class site_couchdb::add_users {
require => Couchdb::Query::Setup['localhost']
}
+ ## replication couchdb user
+ ## read/write: all databases for replication
+ couchdb::add_user { $site_couchdb::couchdb_replication_user:
+ roles => '["replication"]',
+ pw => $site_couchdb::couchdb_replication_pw,
+ salt => $site_couchdb::couchdb_replication_salt,
+ require => Couchdb::Query::Setup['localhost']
+ }
+
}
diff --git a/puppet/modules/site_couchdb/manifests/bigcouch.pp b/puppet/modules/site_couchdb/manifests/bigcouch.pp
new file mode 100644
index 00000000..16593ec7
--- /dev/null
+++ b/puppet/modules/site_couchdb/manifests/bigcouch.pp
@@ -0,0 +1,44 @@
+class site_couchdb::bigcouch {
+
+ $config = $::site_couchdb::couchdb_config['bigcouch']
+ $cookie = $config['cookie']
+ $ednp_port = $config['ednp_port']
+
+ class { 'couchdb':
+ admin_pw => $::site_couchdb::couchdb_admin_pw,
+ admin_salt => $::site_couchdb::couchdb_admin_salt,
+ bigcouch => true,
+ bigcouch_cookie => $cookie,
+ ednp_port => $ednp_port,
+ chttpd_bind_address => '127.0.0.1'
+ }
+
+ #
+ # stunnel must running correctly before bigcouch dbs can be set up.
+ #
+ Class['site_config::default']
+ -> Class['couchdb::bigcouch::package::cloudant']
+ -> Service['shorewall']
+ -> Exec['refresh_stunnel']
+ -> Class['site_couchdb::setup']
+ -> Class['site_couchdb::bigcouch::add_nodes']
+ -> Class['site_couchdb::bigcouch::settle_cluster']
+
+ include site_couchdb::bigcouch::add_nodes
+ include site_couchdb::bigcouch::settle_cluster
+ include site_couchdb::bigcouch::compaction
+
+ file { '/var/log/bigcouch':
+ ensure => directory
+ }
+
+ file { '/etc/sv/bigcouch/run':
+ ensure => present,
+ source => 'puppet:///modules/site_couchdb/runit_config',
+ owner => root,
+ group => root,
+ mode => '0755',
+ require => Package['couchdb'],
+ notify => Service['couchdb']
+ }
+}
diff --git a/puppet/modules/site_couchdb/manifests/bigcouch/add_nodes.pp b/puppet/modules/site_couchdb/manifests/bigcouch/add_nodes.pp
index 97e85785..c8c43275 100644
--- a/puppet/modules/site_couchdb/manifests/bigcouch/add_nodes.pp
+++ b/puppet/modules/site_couchdb/manifests/bigcouch/add_nodes.pp
@@ -1,6 +1,6 @@
class site_couchdb::bigcouch::add_nodes {
# loop through neighbors array and add nodes
- $nodes = $::site_couchdb::bigcouch_config['neighbors']
+ $nodes = $::site_couchdb::bigcouch::config['neighbors']
couchdb::bigcouch::add_node { $nodes:
require => Couchdb::Query::Setup['localhost']
diff --git a/puppet/modules/site_couchdb/manifests/bigcouch/settle_cluster.pp b/puppet/modules/site_couchdb/manifests/bigcouch/settle_cluster.pp
index aa843e2e..820b5be2 100644
--- a/puppet/modules/site_couchdb/manifests/bigcouch/settle_cluster.pp
+++ b/puppet/modules/site_couchdb/manifests/bigcouch/settle_cluster.pp
@@ -1,11 +1,11 @@
class site_couchdb::bigcouch::settle_cluster {
exec { 'wait_for_couch_nodes':
- command => '/srv/leap/bin/run_tests --test CouchDB/Are_configured_nodes_online? --retry 6 --wait 10'
+ command => '/srv/leap/bin/run_tests --test CouchDB/Are_configured_nodes_online? --retry 12 --wait 10'
}
exec { 'settle_cluster_membership':
- command => '/srv/leap/bin/run_tests --test CouchDB/Is_cluster_membership_ok? --retry 6 --wait 10',
+ command => '/srv/leap/bin/run_tests --test CouchDB/Is_cluster_membership_ok? --retry 12 --wait 10',
require => Exec['wait_for_couch_nodes']
}
}
diff --git a/puppet/modules/site_couchdb/manifests/create_dbs.pp b/puppet/modules/site_couchdb/manifests/create_dbs.pp
index 41500d3a..4322f773 100644
--- a/puppet/modules/site_couchdb/manifests/create_dbs.pp
+++ b/puppet/modules/site_couchdb/manifests/create_dbs.pp
@@ -1,11 +1,14 @@
class site_couchdb::create_dbs {
+ Class['site_couchdb::setup']
+ -> Class['site_couchdb::create_dbs']
+
# Couchdb databases
### customer database
### r/w: webapp,
couchdb::create_db { 'customers':
- members => "{ \"names\": [\"$site_couchdb::couchdb_webapp_user\"], \"roles\": [] }",
+ members => "{ \"names\": [\"$site_couchdb::couchdb_webapp_user\"], \"roles\": [\"replication\"] }",
require => Couchdb::Query::Setup['localhost']
}
@@ -13,35 +16,35 @@ class site_couchdb::create_dbs {
## r: nickserver, leap_mx - needs to be restrict with design document
## r/w: webapp
couchdb::create_db { 'identities':
- members => "{ \"names\": [], \"roles\": [\"identities\"] }",
+ members => "{ \"names\": [], \"roles\": [\"replication\", \"identities\"] }",
require => Couchdb::Query::Setup['localhost']
}
## keycache database
## r/w: nickserver
couchdb::create_db { 'keycache':
- members => "{ \"names\": [], \"roles\": [\"keycache\"] }",
+ members => "{ \"names\": [], \"roles\": [\"replication\", \"keycache\"] }",
require => Couchdb::Query::Setup['localhost']
}
## sessions database
## r/w: webapp
couchdb::create_db { 'sessions':
- members => "{ \"names\": [\"$site_couchdb::couchdb_webapp_user\"], \"roles\": [] }",
+ members => "{ \"names\": [\"$site_couchdb::couchdb_webapp_user\"], \"roles\": [\"replication\"] }",
require => Couchdb::Query::Setup['localhost']
}
## shared database
## r/w: soledad
couchdb::create_db { 'shared':
- members => "{ \"names\": [\"$site_couchdb::couchdb_soledad_user\"], \"roles\": [] }",
+ members => "{ \"names\": [\"$site_couchdb::couchdb_soledad_user\"], \"roles\": [\"replication\"] }",
require => Couchdb::Query::Setup['localhost']
}
## tickets database
## r/w: webapp
couchdb::create_db { 'tickets':
- members => "{ \"names\": [\"$site_couchdb::couchdb_webapp_user\"], \"roles\": [] }",
+ members => "{ \"names\": [\"$site_couchdb::couchdb_webapp_user\"], \"roles\": [\"replication\"] }",
require => Couchdb::Query::Setup['localhost']
}
@@ -49,14 +52,14 @@ class site_couchdb::create_dbs {
## r: soledad - needs to be restricted with a design document
## r/w: webapp
couchdb::create_db { 'tokens':
- members => "{ \"names\": [], \"roles\": [\"tokens\"] }",
+ members => "{ \"names\": [], \"roles\": [\"replication\", \"tokens\"] }",
require => Couchdb::Query::Setup['localhost']
}
## users database
## r/w: webapp
couchdb::create_db { 'users':
- members => "{ \"names\": [], \"roles\": [\"users\"] }",
+ members => "{ \"names\": [], \"roles\": [\"replication\", \"users\"] }",
require => Couchdb::Query::Setup['localhost']
}
@@ -64,7 +67,7 @@ class site_couchdb::create_dbs {
## store messages to the clients such as payment reminders
## r/w: webapp
couchdb::create_db { 'messages':
- members => "{ \"names\": [\"$site_couchdb::couchdb_webapp_user\"], \"roles\": [] }",
+ members => "{ \"names\": [\"$site_couchdb::couchdb_webapp_user\"], \"roles\": [\"replication\"] }",
require => Couchdb::Query::Setup['localhost']
}
}
diff --git a/puppet/modules/site_couchdb/manifests/init.pp b/puppet/modules/site_couchdb/manifests/init.pp
index 3614661d..a11f6309 100644
--- a/puppet/modules/site_couchdb/manifests/init.pp
+++ b/puppet/modules/site_couchdb/manifests/init.pp
@@ -1,118 +1,68 @@
class site_couchdb {
tag 'leap_service'
- $couchdb_config = hiera('couch')
- $couchdb_users = $couchdb_config['users']
-
- $couchdb_admin = $couchdb_users['admin']
- $couchdb_admin_user = $couchdb_admin['username']
- $couchdb_admin_pw = $couchdb_admin['password']
- $couchdb_admin_salt = $couchdb_admin['salt']
-
- $couchdb_leap_mx = $couchdb_users['leap_mx']
- $couchdb_leap_mx_user = $couchdb_leap_mx['username']
- $couchdb_leap_mx_pw = $couchdb_leap_mx['password']
- $couchdb_leap_mx_salt = $couchdb_leap_mx['salt']
-
- $couchdb_nickserver = $couchdb_users['nickserver']
- $couchdb_nickserver_user = $couchdb_nickserver['username']
- $couchdb_nickserver_pw = $couchdb_nickserver['password']
- $couchdb_nickserver_salt = $couchdb_nickserver['salt']
-
- $couchdb_soledad = $couchdb_users['soledad']
- $couchdb_soledad_user = $couchdb_soledad['username']
- $couchdb_soledad_pw = $couchdb_soledad['password']
- $couchdb_soledad_salt = $couchdb_soledad['salt']
-
- $couchdb_tapicero = $couchdb_users['tapicero']
- $couchdb_tapicero_user = $couchdb_tapicero['username']
- $couchdb_tapicero_pw = $couchdb_tapicero['password']
- $couchdb_tapicero_salt = $couchdb_tapicero['salt']
-
- $couchdb_webapp = $couchdb_users['webapp']
- $couchdb_webapp_user = $couchdb_webapp['username']
- $couchdb_webapp_pw = $couchdb_webapp['password']
- $couchdb_webapp_salt = $couchdb_webapp['salt']
-
- $couchdb_backup = $couchdb_config['backup']
-
- $bigcouch_config = $couchdb_config['bigcouch']
- $bigcouch_cookie = $bigcouch_config['cookie']
-
- $ednp_port = $bigcouch_config['ednp_port']
-
- class { 'couchdb':
- bigcouch => true,
- admin_pw => $couchdb_admin_pw,
- admin_salt => $couchdb_admin_salt,
- bigcouch_cookie => $bigcouch_cookie,
- ednp_port => $ednp_port,
- chttpd_bind_address => '127.0.0.1'
- }
-
- # ensure that we don't have leftovers from previous installations
- # where we installed the cloudant bigcouch package
- # https://leap.se/code/issues/4971
- class { 'couchdb::bigcouch::package::cloudant':
- ensure => absent
- }
+ $couchdb_config = hiera('couch')
+ $couchdb_users = $couchdb_config['users']
+
+ $couchdb_admin = $couchdb_users['admin']
+ $couchdb_admin_user = $couchdb_admin['username']
+ $couchdb_admin_pw = $couchdb_admin['password']
+ $couchdb_admin_salt = $couchdb_admin['salt']
+
+ $couchdb_leap_mx = $couchdb_users['leap_mx']
+ $couchdb_leap_mx_user = $couchdb_leap_mx['username']
+ $couchdb_leap_mx_pw = $couchdb_leap_mx['password']
+ $couchdb_leap_mx_salt = $couchdb_leap_mx['salt']
+
+ $couchdb_nickserver = $couchdb_users['nickserver']
+ $couchdb_nickserver_user = $couchdb_nickserver['username']
+ $couchdb_nickserver_pw = $couchdb_nickserver['password']
+ $couchdb_nickserver_salt = $couchdb_nickserver['salt']
+
+ $couchdb_soledad = $couchdb_users['soledad']
+ $couchdb_soledad_user = $couchdb_soledad['username']
+ $couchdb_soledad_pw = $couchdb_soledad['password']
+ $couchdb_soledad_salt = $couchdb_soledad['salt']
+
+ $couchdb_tapicero = $couchdb_users['tapicero']
+ $couchdb_tapicero_user = $couchdb_tapicero['username']
+ $couchdb_tapicero_pw = $couchdb_tapicero['password']
+ $couchdb_tapicero_salt = $couchdb_tapicero['salt']
+
+ $couchdb_webapp = $couchdb_users['webapp']
+ $couchdb_webapp_user = $couchdb_webapp['username']
+ $couchdb_webapp_pw = $couchdb_webapp['password']
+ $couchdb_webapp_salt = $couchdb_webapp['salt']
+
+ $couchdb_replication = $couchdb_users['replication']
+ $couchdb_replication_user = $couchdb_replication['username']
+ $couchdb_replication_pw = $couchdb_replication['password']
+ $couchdb_replication_salt = $couchdb_replication['salt']
+
+ $couchdb_backup = $couchdb_config['backup']
+ $couchdb_mode = $couchdb_config['mode']
+
+ if $couchdb_mode == 'multimaster' { include site_couchdb::bigcouch }
+ if $couchdb_mode == 'master' { include site_couchdb::master }
+ if $couchdb_mode == 'mirror' { include site_couchdb::mirror }
Class['site_config::default']
- -> Class['couchdb::bigcouch::package::cloudant']
-> Service['shorewall']
- -> Class['site_couchdb::stunnel']
- -> Service['couchdb']
- -> File['/root/.netrc']
- -> Class['site_couchdb::bigcouch::add_nodes']
- -> Class['site_couchdb::bigcouch::settle_cluster']
- -> Class['site_couchdb::create_dbs']
- -> Class['site_couchdb::add_users']
-
- # /etc/couchdb/couchdb.netrc is deployed by couchdb::query::setup
- # we symlink this to /root/.netrc for couchdb_scripts (eg. backup)
- # and makes life easier for the admin (i.e. using curl/wget without
- # passing credentials)
- file {
- '/root/.netrc':
- ensure => link,
- target => '/etc/couchdb/couchdb.netrc';
-
- '/srv/leap/couchdb':
- ensure => directory
- }
-
- couchdb::query::setup { 'localhost':
- user => $couchdb_admin_user,
- pw => $couchdb_admin_pw,
- }
-
- vcsrepo { '/srv/leap/couchdb/scripts':
- ensure => present,
- provider => git,
- source => 'https://leap.se/git/couchdb_scripts',
- revision => 'origin/master',
- require => File['/srv/leap/couchdb']
- }
-
- include site_couchdb::stunnel
- include site_couchdb::bigcouch::add_nodes
- include site_couchdb::bigcouch::settle_cluster
+ -> Exec['refresh_stunnel']
+ -> Class['couchdb']
+ -> Class['site_couchdb::setup']
+
+ include site_stunnel
+
+ include site_couchdb::setup
include site_couchdb::create_dbs
include site_couchdb::add_users
include site_couchdb::designs
include site_couchdb::logrotate
- include site_couchdb::bigcouch::compaction
- if $couchdb_backup { include site_couchdb::backup }
-
- include site_shorewall::couchdb
- include site_shorewall::couchdb::bigcouch
+ if $couchdb_backup { include site_couchdb::backup }
include site_check_mk::agent::couchdb
include site_check_mk::agent::tapicero
- file { '/var/log/bigcouch':
- ensure => directory
- }
-
}
diff --git a/puppet/modules/site_couchdb/manifests/master.pp b/puppet/modules/site_couchdb/manifests/master.pp
new file mode 100644
index 00000000..a0a6633d
--- /dev/null
+++ b/puppet/modules/site_couchdb/manifests/master.pp
@@ -0,0 +1,9 @@
+class site_couchdb::master {
+
+ class { 'couchdb':
+ admin_pw => $site_couchdb::couchdb_admin_pw,
+ admin_salt => $site_couchdb::couchdb_admin_salt,
+ chttpd_bind_address => '127.0.0.1'
+ }
+
+} \ No newline at end of file
diff --git a/puppet/modules/site_couchdb/manifests/mirror.pp b/puppet/modules/site_couchdb/manifests/mirror.pp
new file mode 100644
index 00000000..abe35c4c
--- /dev/null
+++ b/puppet/modules/site_couchdb/manifests/mirror.pp
@@ -0,0 +1,77 @@
+class site_couchdb::mirror {
+
+ Class['site_couchdb::add_users']
+ -> Class['site_couchdb::mirror']
+
+ class { 'couchdb':
+ admin_pw => $site_couchdb::couchdb_admin_pw,
+ admin_salt => $site_couchdb::couchdb_admin_salt,
+ chttpd_bind_address => '127.0.0.1'
+ }
+
+ $masters = $site_couchdb::couchdb_config['replication']['masters']
+ $master_node_names = keys($site_couchdb::couchdb_config['replication']['masters'])
+ $master_node = $masters[$master_node_names[0]]
+ $user = $site_couchdb::couchdb_replication_user
+ $password = $site_couchdb::couchdb_replication_pw
+ $from_host = $master_node['domain_internal']
+ $from_port = $master_node['couch_port']
+ $from = "http://${user}:${password}@${from_host}:${from_port}"
+
+ notice("mirror from: ${from}")
+
+ ### customer database
+ couchdb::mirror_db { 'customers':
+ from => $from,
+ require => Couchdb::Query::Setup['localhost']
+ }
+
+ ## identities database
+ couchdb::mirror_db { 'identities':
+ from => $from,
+ require => Couchdb::Query::Setup['localhost']
+ }
+
+ ## keycache database
+ couchdb::mirror_db { 'keycache':
+ from => $from,
+ require => Couchdb::Query::Setup['localhost']
+ }
+
+ ## sessions database
+ couchdb::mirror_db { 'sessions':
+ from => $from,
+ require => Couchdb::Query::Setup['localhost']
+ }
+
+ ## shared database
+ couchdb::mirror_db { 'shared':
+ from => $from,
+ require => Couchdb::Query::Setup['localhost']
+ }
+
+ ## tickets database
+ couchdb::mirror_db { 'tickets':
+ from => $from,
+ require => Couchdb::Query::Setup['localhost']
+ }
+
+ ## tokens database
+ couchdb::mirror_db { 'tokens':
+ from => $from,
+ require => Couchdb::Query::Setup['localhost']
+ }
+
+ ## users database
+ couchdb::mirror_db { 'users':
+ from => $from,
+ require => Couchdb::Query::Setup['localhost']
+ }
+
+ ## messages db
+ couchdb::mirror_db { 'messages':
+ from => $from,
+ require => Couchdb::Query::Setup['localhost']
+ }
+
+}
diff --git a/puppet/modules/site_couchdb/manifests/setup.pp b/puppet/modules/site_couchdb/manifests/setup.pp
new file mode 100644
index 00000000..69bd1c6a
--- /dev/null
+++ b/puppet/modules/site_couchdb/manifests/setup.pp
@@ -0,0 +1,46 @@
+#
+# An initial setup class. All the other classes depend on this
+#
+class site_couchdb::setup {
+
+ # ensure that we don't have leftovers from previous installations
+ # where we installed the cloudant bigcouch package
+ # https://leap.se/code/issues/4971
+ class { 'couchdb::bigcouch::package::cloudant':
+ ensure => absent
+ }
+
+ $user = $site_couchdb::couchdb_admin_user
+
+ # /etc/couchdb/couchdb-admin.netrc is deployed by couchdb::query::setup
+ # we symlink to couchdb.netrc for puppet commands.
+ # we symlink this to /root/.netrc for couchdb_scripts (eg. backup)
+ # and makes life easier for the admin (i.e. using curl/wget without
+ # passing credentials)
+ file {
+ '/etc/couchdb/couchdb.netrc':
+ ensure => link,
+ target => "/etc/couchdb/couchdb-${user}.netrc";
+
+ '/root/.netrc':
+ ensure => link,
+ target => '/etc/couchdb/couchdb.netrc';
+
+ '/srv/leap/couchdb':
+ ensure => directory
+ }
+
+ couchdb::query::setup { 'localhost':
+ user => $user,
+ pw => $site_couchdb::couchdb_admin_pw,
+ }
+
+ vcsrepo { '/srv/leap/couchdb/scripts':
+ ensure => present,
+ provider => git,
+ source => 'https://leap.se/git/couchdb_scripts',
+ revision => 'origin/master',
+ require => File['/srv/leap/couchdb']
+ }
+
+}
diff --git a/puppet/modules/site_couchdb/manifests/stunnel.pp b/puppet/modules/site_couchdb/manifests/stunnel.pp
deleted file mode 100644
index 91f1e3aa..00000000
--- a/puppet/modules/site_couchdb/manifests/stunnel.pp
+++ /dev/null
@@ -1,112 +0,0 @@
-class site_couchdb::stunnel {
-
- $stunnel = hiera('stunnel')
-
- $couch_server = $stunnel['couch_server']
- $couch_server_accept = $couch_server['accept']
- $couch_server_connect = $couch_server['connect']
-
- # Erlang Port Mapper Daemon (epmd) stunnel server/clients
- $epmd_server = $stunnel['epmd_server']
- $epmd_server_accept = $epmd_server['accept']
- $epmd_server_connect = $epmd_server['connect']
- $epmd_clients = $stunnel['epmd_clients']
-
- # Erlang Distributed Node Protocol (ednp) stunnel server/clients
- $ednp_server = $stunnel['ednp_server']
- $ednp_server_accept = $ednp_server['accept']
- $ednp_server_connect = $ednp_server['connect']
- $ednp_clients = $stunnel['ednp_clients']
-
-
-
- include site_config::x509::cert
- include site_config::x509::key
- include site_config::x509::ca
-
- include x509::variables
- $ca_path = "${x509::variables::local_CAs}/${site_config::params::ca_name}.crt"
- $cert_path = "${x509::variables::certs}/${site_config::params::cert_name}.crt"
- $key_path = "${x509::variables::keys}/${site_config::params::cert_name}.key"
-
- # setup a stunnel server for the webapp to connect to couchdb
- stunnel::service { 'couch_server':
- accept => $couch_server_accept,
- connect => $couch_server_connect,
- client => false,
- cafile => $ca_path,
- key => $key_path,
- cert => $cert_path,
- verify => '2',
- pid => '/var/run/stunnel4/couchserver.pid',
- rndfile => '/var/lib/stunnel4/.rnd',
- debuglevel => '4',
- require => [
- Class['Site_config::X509::Key'],
- Class['Site_config::X509::Cert'],
- Class['Site_config::X509::Ca'] ];
- }
-
-
- # setup stunnel server for Erlang Port Mapper Daemon (epmd), necessary for
- # bigcouch clustering between each bigcouchdb node
- stunnel::service { 'epmd_server':
- accept => $epmd_server_accept,
- connect => $epmd_server_connect,
- client => false,
- cafile => $ca_path,
- key => $key_path,
- cert => $cert_path,
- verify => '2',
- pid => '/var/run/stunnel4/epmd_server.pid',
- rndfile => '/var/lib/stunnel4/.rnd',
- debuglevel => '4',
- require => [
- Class['Site_config::X509::Key'],
- Class['Site_config::X509::Cert'],
- Class['Site_config::X509::Ca'] ];
- }
-
- # setup stunnel clients for Erlang Port Mapper Daemon (epmd) to connect
- # to the above epmd stunnel server.
- $epmd_client_defaults = {
- 'client' => true,
- 'cafile' => $ca_path,
- 'key' => $key_path,
- 'cert' => $cert_path,
- }
-
- create_resources(site_stunnel::clients, $epmd_clients, $epmd_client_defaults)
-
- # setup stunnel server for Erlang Distributed Node Protocol (ednp), necessary
- # for bigcouch clustering between each bigcouchdb node
- stunnel::service { 'ednp_server':
- accept => $ednp_server_accept,
- connect => $ednp_server_connect,
- client => false,
- cafile => $ca_path,
- key => $key_path,
- cert => $cert_path,
- verify => '2',
- pid => '/var/run/stunnel4/ednp_server.pid',
- rndfile => '/var/lib/stunnel4/.rnd',
- debuglevel => '4',
- require => [
- Class['Site_config::X509::Key'],
- Class['Site_config::X509::Cert'],
- Class['Site_config::X509::Ca'] ];
- }
-
- # setup stunnel clients for Erlang Distributed Node Protocol (ednp) to connect
- # to the above ednp stunnel server.
- $ednp_client_defaults = {
- 'client' => true,
- 'cafile' => $ca_path,
- 'key' => $key_path,
- 'cert' => $cert_path,
- }
-
- create_resources(site_stunnel::clients, $ednp_clients, $ednp_client_defaults)
-
- include site_check_mk::agent::stunnel
-}
diff --git a/puppet/modules/site_haproxy/manifests/init.pp b/puppet/modules/site_haproxy/manifests/init.pp
index 6bcf3f5c..b28ce80e 100644
--- a/puppet/modules/site_haproxy/manifests/init.pp
+++ b/puppet/modules/site_haproxy/manifests/init.pp
@@ -2,25 +2,25 @@ class site_haproxy {
$haproxy = hiera('haproxy')
class { 'haproxy':
- enable => true,
- manage_service => true,
- global_options => {
- 'log' => '127.0.0.1 local0',
- 'maxconn' => '4096',
- 'stats' => 'socket /var/run/haproxy.sock user haproxy group haproxy',
- 'chroot' => '/usr/share/haproxy',
- 'user' => 'haproxy',
- 'group' => 'haproxy',
- 'daemon' => ''
- },
- defaults_options => {
- 'log' => 'global',
- 'retries' => '3',
- 'option' => 'redispatch',
- 'timeout connect' => '4000',
- 'timeout client' => '20000',
- 'timeout server' => '20000'
- }
+ enable => true,
+ manage_service => true,
+ global_options => {
+ 'log' => '127.0.0.1 local0',
+ 'maxconn' => '4096',
+ 'stats' => 'socket /var/run/haproxy.sock user haproxy group haproxy',
+ 'chroot' => '/usr/share/haproxy',
+ 'user' => 'haproxy',
+ 'group' => 'haproxy',
+ 'daemon' => ''
+ },
+ defaults_options => {
+ 'log' => 'global',
+ 'retries' => '3',
+ 'option' => 'redispatch',
+ 'timeout connect' => '4000',
+ 'timeout client' => '20000',
+ 'timeout server' => '20000'
+ }
}
# monitor haproxy
@@ -34,8 +34,8 @@ class site_haproxy {
concat::fragment { 'leap_haproxy_webapp_couchdb':
target => '/etc/haproxy/haproxy.cfg',
order => '20',
- content => template('site_haproxy/haproxy_couchdb.cfg.erb'),
+ content => template('site_haproxy/haproxy.cfg.erb'),
}
-
+
include site_check_mk::agent::haproxy
}
diff --git a/puppet/modules/site_haproxy/templates/couch.erb b/puppet/modules/site_haproxy/templates/couch.erb
new file mode 100644
index 00000000..f42e8368
--- /dev/null
+++ b/puppet/modules/site_haproxy/templates/couch.erb
@@ -0,0 +1,32 @@
+frontend couch
+ bind localhost:<%= @listen_port %>
+ mode http
+ option httplog
+ option dontlognull
+ option http-server-close # use client keep-alive, but close server connection.
+ use_backend couch_read if METH_GET
+ default_backend couch_write
+
+backend couch_write
+ mode http
+ balance roundrobin
+ option httpchk GET / # health check using simple get to root
+ option allbackups # balance among all backups, not just one.
+ default-server inter 3000 fastinter 1000 downinter 1000 rise 2 fall 1
+<%- @servers.sort.each do |name,server| -%>
+<%- next unless server['writable'] -%>
+ # <%=name%>
+ server couchdb_<%=server['port']%> <%=server['host']%>:<%=server['port']%> <%='backup' if server['backup']%> weight <%=server['weight']%> check
+<%- end -%>
+
+backend couch_read
+ mode http
+ balance roundrobin
+ option httpchk GET / # health check using simple get to root
+ option allbackups # balance among all backups, not just one.
+ default-server inter 3000 fastinter 1000 downinter 1000 rise 2 fall 1
+<%- @servers.sort.each do |name,server| -%>
+ # <%=name%>
+ server couchdb_<%=server['port']%> <%=server['host']%>:<%=server['port']%> <%='backup' if server['backup']%> weight <%=server['weight']%> check
+<%- end -%>
+
diff --git a/puppet/modules/site_haproxy/templates/haproxy.cfg.erb b/puppet/modules/site_haproxy/templates/haproxy.cfg.erb
new file mode 100644
index 00000000..8311b1a5
--- /dev/null
+++ b/puppet/modules/site_haproxy/templates/haproxy.cfg.erb
@@ -0,0 +1,11 @@
+<%- @haproxy.each do |frontend, options| -%>
+<%- if options['servers'] -%>
+
+##
+## <%= frontend %>
+##
+
+<%= scope.function_templatewlv(["site_haproxy/#{frontend}.erb", options]) %>
+<%- end -%>
+<%- end -%>
+
diff --git a/puppet/modules/site_haproxy/templates/haproxy_couchdb.cfg.erb b/puppet/modules/site_haproxy/templates/haproxy_couchdb.cfg.erb
deleted file mode 100644
index 1fa01b96..00000000
--- a/puppet/modules/site_haproxy/templates/haproxy_couchdb.cfg.erb
+++ /dev/null
@@ -1,23 +0,0 @@
-
-listen bigcouch-in
- mode http
- balance roundrobin
- option httplog
- option dontlognull
- option httpchk GET / # health check using simple get to root
- option http-server-close # use client keep-alive, but close server connection.
- option allbackups # balance among all backups, not just one.
-
- bind localhost:4096
-
- default-server inter 3000 fastinter 1000 downinter 1000 rise 2 fall 1
-
-<%- if @haproxy['servers'] -%>
-<%- @haproxy['servers'].sort.each do |name,server| -%>
-<%- backup = server['backup'] ? 'backup' : '' -%>
- # <%=name%>
- server couchdb_<%=server['port']%> <%=server['host']%>:<%=server['port']%> <%=backup%> weight <%=server['weight']%> check
-
-<%- end -%>
-<%- end -%>
-
diff --git a/puppet/modules/site_mx/manifests/couchdb.pp b/puppet/modules/site_mx/manifests/couchdb.pp
deleted file mode 100644
index b1f3bd02..00000000
--- a/puppet/modules/site_mx/manifests/couchdb.pp
+++ /dev/null
@@ -1,23 +0,0 @@
-class site_mx::couchdb {
-
- $stunnel = hiera('stunnel')
- $couch_client = $stunnel['couch_client']
- $couch_client_connect = $couch_client['connect']
-
- include x509::variables
- $ca_path = "${x509::variables::local_CAs}/${site_config::params::ca_name}.crt"
- $cert_path = "${x509::variables::certs}/${site_config::params::cert_name}.crt"
- $key_path = "${x509::variables::keys}/${site_config::params::cert_name}.key"
-
- include site_stunnel
-
- $couchdb_stunnel_client_defaults = {
- 'connect_port' => $couch_client_connect,
- 'client' => true,
- 'cafile' => $ca_path,
- 'key' => $key_path,
- 'cert' => $cert_path,
- }
-
- create_resources(site_stunnel::clients, $couch_client, $couchdb_stunnel_client_defaults)
-}
diff --git a/puppet/modules/site_mx/manifests/init.pp b/puppet/modules/site_mx/manifests/init.pp
index c3d38a46..91014ed6 100644
--- a/puppet/modules/site_mx/manifests/init.pp
+++ b/puppet/modules/site_mx/manifests/init.pp
@@ -8,12 +8,12 @@ class site_mx {
include site_config::x509::client_ca::ca
include site_config::x509::client_ca::key
+ include site_stunnel
include site_postfix::mx
include site_haproxy
include site_shorewall::mx
include site_shorewall::service::smtp
- include site_mx::couchdb
include leap_mx
include site_check_mk::agent::mx
}
diff --git a/puppet/modules/site_nagios/files/plugins/check_last_regex_in_log b/puppet/modules/site_nagios/files/plugins/check_last_regex_in_log
new file mode 100755
index 00000000..cf7c03e5
--- /dev/null
+++ b/puppet/modules/site_nagios/files/plugins/check_last_regex_in_log
@@ -0,0 +1,85 @@
+#!/bin/sh
+#
+# depends on nagios-plugins-common for /usr/lib/nagios/plugins/utils.sh
+# this package is installed using leap_platform by the Site_check_mk::Agent::Mrpe
+# class
+
+set -e
+
+usage()
+{
+cat << EOF
+usage: $0 -w <sec> -c <sec> -r <regexp> -f <filename>
+
+OPTIONS:
+ -h Show this message
+ -r <regex> regex to grep for
+ -f <file> logfile to search in
+ -w <sec> warning state after X seconds
+ -c <sec> critical state after x seconds
+
+example: $0 -f /var/log/syslog -r 'tapicero' -w 300 -c 600
+EOF
+}
+
+
+. /usr/lib/nagios/plugins/utils.sh
+
+
+warn=0
+crit=0
+log=''
+regex=''
+
+set -- $(getopt hr:f:w:c: "$@")
+while [ $# -gt 0 ]
+do
+ case "$1" in
+ (-h) usage; exit 0 ;;
+ (-f) log="$2"; shift;;
+ (-r) regex="$2"; shift;;
+ (-w) warn="$2"; shift;;
+ (-c) crit="$2"; shift;;
+ (--) shift; break;;
+ (-*) echo "$0: error - unrecognized option $1" 1>&2; exit 1;;
+ (*) break;;
+ esac
+ shift
+done
+
+[ $warn -eq 0 -o $crit -eq 0 -o -z "$regex" -o -z "$log" ] && ( usage; exit $STATE_UNKNOWN)
+[ -f "$log" ] || (echo "$log doesn't exist"; exit $STATE_UNKNOWN)
+
+lastmsg=$(tac $log | grep -i $regex | head -1 | cut -d' ' -f 1-3)
+
+if [ -z "$lastmsg" ]
+then
+ summary="\"$regex\" in $log was not found"
+ state=$STATE_CRITICAL
+ state_text='CRITICAL'
+ diff_sec=0
+else
+ lastmsg_sec=$(date '+%s' -d "$lastmsg")
+ now_sec=$(date '+%s')
+
+ diff_sec=$(($now_sec - $lastmsg_sec))
+
+ if [ $diff_sec -lt $warn ]; then
+ state=$STATE_OK
+ state_text='OK'
+ elif [ $diff_sec -lt $crit ]; then
+ state=$STATE_WARNING
+ state_text='WARNING'
+ else
+ state=$STATE_CRITICAL
+ state_text='CRITICAL'
+ fi
+
+ summary="Last occurrence of \"$regex\" in $log was $diff_sec sec ago"
+fi
+
+# check_mk_agent output
+# echo "$state Tapicero_Heatbeat sec=$diff_sec;$warn;$crit;0; $state_text - $summary"
+
+echo "${state_text}: $summary | seconds=${diff_sec};$warn;$crit;0;"
+exit $state
diff --git a/puppet/modules/site_nagios/manifests/add_host_services.pp b/puppet/modules/site_nagios/manifests/add_host_services.pp
index 279809d1..bd968e6f 100644
--- a/puppet/modules/site_nagios/manifests/add_host_services.pp
+++ b/puppet/modules/site_nagios/manifests/add_host_services.pp
@@ -1,10 +1,13 @@
define site_nagios::add_host_services (
$domain_full_suffix,
$domain_internal,
+ $domain_internal_suffix,
$ip_address,
$services,
$ssh_port,
- $openvpn_gateway_address='' ) {
+ $environment,
+ $openvpn_gateway_address='',
+ ) {
$nagios_hostname = $domain_internal
@@ -16,6 +19,7 @@ define site_nagios::add_host_services (
'hostname' => $nagios_hostname,
'ip_address' => $ip_address,
'openvpn_gw' => $openvpn_gateway_address,
+ 'environment' => $environment
}
$dynamic_parameters = {
'service' => '%s'
diff --git a/puppet/modules/site_nagios/manifests/add_service.pp b/puppet/modules/site_nagios/manifests/add_service.pp
index 1b67d14e..72cd038a 100644
--- a/puppet/modules/site_nagios/manifests/add_service.pp
+++ b/puppet/modules/site_nagios/manifests/add_service.pp
@@ -1,5 +1,5 @@
define site_nagios::add_service (
- $hostname, $ip_address, $openvpn_gw = '', $service) {
+ $hostname, $ip_address, $service, $environment, $openvpn_gw = '') {
$ssh = hiera_hash('ssh')
$ssh_port = $ssh['port']
@@ -9,19 +9,22 @@ define site_nagios::add_service (
nagios_service {
"${name}_ssh":
use => 'generic-service',
- check_command => "check_ssh_port!$ssh_port",
+ check_command => "check_ssh_port!${ssh_port}",
service_description => 'SSH',
- host_name => $hostname;
+ host_name => $hostname,
+ contact_groups => $environment;
"${name}_cert":
use => 'generic-service',
check_command => 'check_https_cert',
service_description => 'Website Certificate',
- host_name => $hostname;
+ host_name => $hostname,
+ contact_groups => $environment;
"${name}_website":
use => 'generic-service',
check_command => 'check_https',
service_description => 'Website',
- host_name => $hostname
+ host_name => $hostname,
+ contact_groups => $environment;
}
}
default: {}
diff --git a/puppet/modules/site_nagios/manifests/plugins.pp b/puppet/modules/site_nagios/manifests/plugins.pp
new file mode 100644
index 00000000..90a01cfb
--- /dev/null
+++ b/puppet/modules/site_nagios/manifests/plugins.pp
@@ -0,0 +1,16 @@
+# Deploy generic plugins useful to all nodes
+# nagios::plugin won't work to deploy a plugin
+# because it complains with:
+# Could not find dependency Package[nagios-plugins] …
+# at /srv/leap/puppet/modules/nagios/manifests/plugin.pp:18
+class site_nagios::plugins {
+
+ file { [
+ '/usr/local/lib', '/usr/local/lib/nagios',
+ '/usr/local/lib/nagios/plugins' ]:
+ ensure => directory;
+ '/usr/local/lib/nagios/plugins/check_last_regex_in_log':
+ source => 'puppet:///modules/site_nagios/plugins/check_last_regex_in_log',
+ mode => '0755';
+ }
+}
diff --git a/puppet/modules/site_nagios/manifests/server.pp b/puppet/modules/site_nagios/manifests/server.pp
index 85443917..092ca503 100644
--- a/puppet/modules/site_nagios/manifests/server.pp
+++ b/puppet/modules/site_nagios/manifests/server.pp
@@ -3,13 +3,19 @@ class site_nagios::server inherits nagios::base {
# First, purge old nagios config (see #1467)
class { 'site_nagios::server::purge': }
- $nagios_hiera = hiera('nagios')
- $nagiosadmin_pw = htpasswd_sha1($nagios_hiera['nagiosadmin_pw'])
- $nagios_hosts = $nagios_hiera['hosts']
+ $nagios_hiera = hiera('nagios')
+ $nagiosadmin_pw = htpasswd_sha1($nagios_hiera['nagiosadmin_pw'])
+ $nagios_hosts = $nagios_hiera['hosts']
+ $nagios_contacts = hiera('contacts')
+ $environment = $nagios_hiera['environments']
- include nagios::defaults
include nagios::base
- class {'nagios':
+ include nagios::defaults::commands
+ include nagios::defaults::templates
+ include nagios::defaults::timeperiods
+ include nagios::defaults::plugins
+
+ class { 'nagios':
# don't manage apache class from nagios, cause we already include
# it in site_apache::common
httpd => 'absent',
@@ -46,6 +52,7 @@ class site_nagios::server inherits nagios::base {
include site_nagios::server::apache
include site_check_mk::server
include site_shorewall::monitor
+ include site_nagios::server::icli
augeas {
'logrotate_nagios':
@@ -55,4 +62,8 @@ class site_nagios::server inherits nagios::base {
'set missingok missingok', 'set ifempty notifempty',
'set copytruncate copytruncate' ]
}
+
+ create_resources ( site_nagios::server::hostgroup, $environment )
+ create_resources ( site_nagios::server::contactgroup, $environment )
+ create_resources ( site_nagios::server::add_contacts, $environment )
}
diff --git a/puppet/modules/site_nagios/manifests/server/add_contacts.pp b/puppet/modules/site_nagios/manifests/server/add_contacts.pp
new file mode 100644
index 00000000..db507abf
--- /dev/null
+++ b/puppet/modules/site_nagios/manifests/server/add_contacts.pp
@@ -0,0 +1,16 @@
+define site_nagios::server::add_contacts ($contact_emails) {
+
+ $environment = $name
+
+ nagios_contact {
+ $environment:
+ alias => $environment,
+ service_notification_period => '24x7',
+ host_notification_period => '24x7',
+ service_notification_options => 'w,u,c,r',
+ host_notification_options => 'd,r',
+ service_notification_commands => 'notify-service-by-email',
+ host_notification_commands => 'notify-host-by-email',
+ email => join($contact_emails, ', ')
+ }
+}
diff --git a/puppet/modules/site_nagios/manifests/server/contactgroup.pp b/puppet/modules/site_nagios/manifests/server/contactgroup.pp
new file mode 100644
index 00000000..188c54f1
--- /dev/null
+++ b/puppet/modules/site_nagios/manifests/server/contactgroup.pp
@@ -0,0 +1,6 @@
+define site_nagios::server::contactgroup ($contact_emails) {
+
+ nagios_contactgroup { $name:
+ members => $name
+ }
+}
diff --git a/puppet/modules/site_nagios/manifests/server/hostgroup.pp b/puppet/modules/site_nagios/manifests/server/hostgroup.pp
new file mode 100644
index 00000000..6f85ca6d
--- /dev/null
+++ b/puppet/modules/site_nagios/manifests/server/hostgroup.pp
@@ -0,0 +1,3 @@
+define site_nagios::server::hostgroup ($contact_emails) {
+ nagios_hostgroup { $name: }
+}
diff --git a/puppet/modules/site_nagios/manifests/server/icli.pp b/puppet/modules/site_nagios/manifests/server/icli.pp
new file mode 100644
index 00000000..26fba725
--- /dev/null
+++ b/puppet/modules/site_nagios/manifests/server/icli.pp
@@ -0,0 +1,26 @@
+# Install icli package and configure ncli aliases
+class site_nagios::server::icli {
+ $nagios_hiera = hiera('nagios')
+ $environments = $nagios_hiera['environments']
+
+ package { 'icli':
+ ensure => installed;
+ }
+
+ file { '/root/.bashrc':
+ ensure => present;
+ }
+
+ file_line { 'icli aliases':
+ path => '/root/.bashrc',
+ line => 'source /root/.icli_aliases';
+ }
+
+ file { '/root/.icli_aliases':
+ content => template("${module_name}/icli_aliases.erb"),
+ mode => '0644',
+ owner => root,
+ group => 0,
+ require => Package['icli'];
+ }
+} \ No newline at end of file
diff --git a/puppet/modules/site_nagios/templates/icli_aliases.erb b/puppet/modules/site_nagios/templates/icli_aliases.erb
new file mode 100644
index 00000000..f1428f9e
--- /dev/null
+++ b/puppet/modules/site_nagios/templates/icli_aliases.erb
@@ -0,0 +1,7 @@
+alias ncli='icli -c /var/cache/nagios3/objects.cache -f /var/cache/nagios3/status.dat -F /var/lib/nagios3/rw/nagios.cmd'
+alias ncli_problems='ncli -z '!o,!A''
+
+<% @environments.keys.sort.each do |env_name| %>
+alias ncli_<%= env_name %>='ncli -z '!o,!A' -g <%= env_name %>'
+alias ncli_<%= env_name %>_recheck='ncli -s Check_MK -g <%= env_name %> -r'
+<% end -%> \ No newline at end of file
diff --git a/puppet/modules/site_obfsproxy/README b/puppet/modules/site_obfsproxy/README
new file mode 100644
index 00000000..e69de29b
--- /dev/null
+++ b/puppet/modules/site_obfsproxy/README
diff --git a/puppet/modules/site_obfsproxy/manifests/init.pp b/puppet/modules/site_obfsproxy/manifests/init.pp
new file mode 100644
index 00000000..6275ebee
--- /dev/null
+++ b/puppet/modules/site_obfsproxy/manifests/init.pp
@@ -0,0 +1,39 @@
+class site_obfsproxy {
+ tag 'leap_service'
+ Class['site_config::default'] -> Class['site_obfsproxy']
+
+ $transport = 'scramblesuit'
+
+ $obfsproxy = hiera('obfsproxy')
+ $scramblesuit = $obfsproxy['scramblesuit']
+ $scram_pass = $scramblesuit['password']
+ $scram_port = $scramblesuit['port']
+ $dest_ip = $obfsproxy['gateway_address']
+ $dest_port = '443'
+
+ if member($::services, 'openvpn') {
+ $openvpn = hiera('openvpn')
+ $bind_address = $openvpn['gateway_address']
+ }
+ elsif member($::services, 'obfsproxy') {
+ $bind_address = hiera('ip_address')
+ }
+
+ include site_apt::preferences::twisted
+ include site_apt::preferences::obfsproxy
+
+ class { 'obfsproxy':
+ transport => $transport,
+ bind_address => $bind_address,
+ port => $scram_port,
+ param => $scram_pass,
+ dest_ip => $dest_ip,
+ dest_port => $dest_port,
+ }
+
+ include site_shorewall::obfsproxy
+
+}
+
+
+
diff --git a/puppet/modules/site_openvpn/manifests/init.pp b/puppet/modules/site_openvpn/manifests/init.pp
index b6331f12..d6f9150b 100644
--- a/puppet/modules/site_openvpn/manifests/init.pp
+++ b/puppet/modules/site_openvpn/manifests/init.pp
@@ -148,13 +148,17 @@ class site_openvpn {
exec { 'restart_openvpn':
command => '/etc/init.d/openvpn restart',
refreshonly => true,
- subscribe => File['/etc/openvpn'],
+ subscribe => [
+ File['/etc/openvpn'],
+ Class['Site_config::X509::Key'],
+ Class['Site_config::X509::Cert'],
+ Class['Site_config::X509::Ca_bundle'] ],
require => [
- Package['openvpn'],
- File['/etc/openvpn'],
- Class['Site_config::X509::Key'],
- Class['Site_config::X509::Cert'],
- Class['Site_config::X509::Ca_bundle'] ];
+ Package['openvpn'],
+ File['/etc/openvpn'],
+ Class['Site_config::X509::Key'],
+ Class['Site_config::X509::Cert'],
+ Class['Site_config::X509::Ca_bundle'] ];
}
cron { 'add_gateway_ips.sh':
diff --git a/puppet/modules/site_openvpn/manifests/server_config.pp b/puppet/modules/site_openvpn/manifests/server_config.pp
index 97cf2842..466f6d00 100644
--- a/puppet/modules/site_openvpn/manifests/server_config.pp
+++ b/puppet/modules/site_openvpn/manifests/server_config.pp
@@ -85,6 +85,18 @@ define site_openvpn::server_config(
key => 'tcp-nodelay',
server => $openvpn_configname;
}
+ } elsif $proto == 'udp' {
+ if $config['fragment'] != 1500 {
+ openvpn::option {
+ "fragment ${openvpn_configname}":
+ key => 'fragment',
+ value => $config['fragment'],
+ server => $openvpn_configname;
+ "mssfix ${openvpn_configname}":
+ key => 'mssfix',
+ server => $openvpn_configname;
+ }
+ }
}
openvpn::option {
diff --git a/puppet/modules/site_postfix/manifests/mx.pp b/puppet/modules/site_postfix/manifests/mx.pp
index bdfee665..81f10b77 100644
--- a/puppet/modules/site_postfix/manifests/mx.pp
+++ b/puppet/modules/site_postfix/manifests/mx.pp
@@ -1,12 +1,12 @@
class site_postfix::mx {
- $domain_hash = hiera ('domain')
+ $domain_hash = hiera('domain')
$domain = $domain_hash['full_suffix']
$host_domain = $domain_hash['full']
$cert_name = hiera('name')
$mynetworks = join(hiera('mynetworks'), ' ')
- $root_mail_recipient = hiera ('contacts')
+ $root_mail_recipient = hiera('contacts')
$postfix_smtp_listen = 'all'
include site_config::x509::cert
diff --git a/puppet/modules/site_postfix/manifests/mx/smtp_tls.pp b/puppet/modules/site_postfix/manifests/mx/smtp_tls.pp
index d9b59f40..d56f6b54 100644
--- a/puppet/modules/site_postfix/manifests/mx/smtp_tls.pp
+++ b/puppet/modules/site_postfix/manifests/mx/smtp_tls.pp
@@ -1,5 +1,6 @@
class site_postfix::mx::smtp_tls {
+ include site_config::x509::ca
include x509::variables
$ca_path = "${x509::variables::local_CAs}/${site_config::params::ca_name}.crt"
$cert_path = "${x509::variables::certs}/${site_config::params::cert_name}.crt"
diff --git a/puppet/modules/site_shorewall/manifests/couchdb.pp b/puppet/modules/site_shorewall/manifests/couchdb.pp
deleted file mode 100644
index 73bed62b..00000000
--- a/puppet/modules/site_shorewall/manifests/couchdb.pp
+++ /dev/null
@@ -1,24 +0,0 @@
-class site_shorewall::couchdb {
-
- include site_shorewall::defaults
-
- $stunnel = hiera('stunnel')
- $couch_server = $stunnel['couch_server']
- $couch_stunnel_port = $couch_server['accept']
-
- # define macro for incoming services
- file { '/etc/shorewall/macro.leap_couchdb':
- content => "PARAM - - tcp ${couch_stunnel_port}",
- notify => Service['shorewall'],
- require => Package['shorewall']
- }
-
- shorewall::rule {
- 'net2fw-couchdb':
- source => 'net',
- destination => '$FW',
- action => 'leap_couchdb(ACCEPT)',
- order => 200;
- }
-
-}
diff --git a/puppet/modules/site_shorewall/manifests/couchdb/bigcouch.pp b/puppet/modules/site_shorewall/manifests/couchdb/bigcouch.pp
deleted file mode 100644
index 20740650..00000000
--- a/puppet/modules/site_shorewall/manifests/couchdb/bigcouch.pp
+++ /dev/null
@@ -1,51 +0,0 @@
-class site_shorewall::couchdb::bigcouch {
-
- include site_shorewall::defaults
-
- $stunnel = hiera('stunnel')
-
- # Erlang Port Mapper Daemon (epmd) stunnel server/clients
- $epmd_clients = $stunnel['epmd_clients']
- $epmd_server = $stunnel['epmd_server']
- $epmd_server_port = $epmd_server['accept']
- $epmd_server_connect = $epmd_server['connect']
-
- # Erlang Distributed Node Protocol (ednp) stunnel server/clients
- $ednp_clients = $stunnel['ednp_clients']
- $ednp_server = $stunnel['ednp_server']
- $ednp_server_port = $ednp_server['accept']
- $ednp_server_connect = $ednp_server['connect']
-
- # define macro for incoming services
- file { '/etc/shorewall/macro.leap_bigcouch':
- content => "PARAM - - tcp ${epmd_server_port},${ednp_server_port}",
- notify => Service['shorewall'],
- require => Package['shorewall']
- }
-
- shorewall::rule {
- 'net2fw-bigcouch':
- source => 'net',
- destination => '$FW',
- action => 'leap_bigcouch(ACCEPT)',
- order => 300;
- }
-
- # setup DNAT rules for each epmd
- $epmd_shorewall_dnat_defaults = {
- 'source' => '$FW',
- 'proto' => 'tcp',
- 'destinationport' => regsubst($epmd_server_connect, '^([0-9.]+:)([0-9]+)$', '\2')
- }
- create_resources(site_shorewall::couchdb::dnat, $epmd_clients, $epmd_shorewall_dnat_defaults)
-
- # setup DNAT rules for each ednp
- $ednp_shorewall_dnat_defaults = {
- 'source' => '$FW',
- 'proto' => 'tcp',
- 'destinationport' => regsubst($ednp_server_connect, '^([0-9.]+:)([0-9]+)$', '\2')
- }
- create_resources(site_shorewall::couchdb::dnat, $ednp_clients, $ednp_shorewall_dnat_defaults)
-
-}
-
diff --git a/puppet/modules/site_shorewall/manifests/couchdb/dnat.pp b/puppet/modules/site_shorewall/manifests/couchdb/dnat.pp
deleted file mode 100644
index f1bc9acf..00000000
--- a/puppet/modules/site_shorewall/manifests/couchdb/dnat.pp
+++ /dev/null
@@ -1,21 +0,0 @@
-define site_shorewall::couchdb::dnat (
- $source,
- $connect,
- $connect_port,
- $accept_port,
- $proto,
- $destinationport )
-{
-
-
- shorewall::rule {
- "dnat_${name}_${destinationport}":
- action => 'DNAT',
- source => $source,
- destination => "\$FW:127.0.0.1:${accept_port}",
- proto => $proto,
- destinationport => $destinationport,
- originaldest => $connect,
- order => 200
- }
-}
diff --git a/puppet/modules/site_shorewall/manifests/dnat_rule.pp b/puppet/modules/site_shorewall/manifests/dnat_rule.pp
index aa298408..f9fbe950 100644
--- a/puppet/modules/site_shorewall/manifests/dnat_rule.pp
+++ b/puppet/modules/site_shorewall/manifests/dnat_rule.pp
@@ -4,41 +4,45 @@ define site_shorewall::dnat_rule {
if $port != 1194 {
if $site_openvpn::openvpn_allow_unlimited {
shorewall::rule {
- "dnat_tcp_port_$port":
+ "dnat_tcp_port_${port}":
action => 'DNAT',
source => 'net',
destination => "\$FW:${site_openvpn::unlimited_gateway_address}:1194",
proto => 'tcp',
destinationport => $port,
+ originaldest => $site_openvpn::unlimited_gateway_address,
order => 100;
}
shorewall::rule {
- "dnat_udp_port_$port":
+ "dnat_udp_port_${port}":
action => 'DNAT',
source => 'net',
destination => "\$FW:${site_openvpn::unlimited_gateway_address}:1194",
proto => 'udp',
destinationport => $port,
+ originaldest => $site_openvpn::unlimited_gateway_address,
order => 100;
}
}
if $site_openvpn::openvpn_allow_limited {
shorewall::rule {
- "dnat_free_tcp_port_$port":
+ "dnat_free_tcp_port_${port}":
action => 'DNAT',
source => 'net',
destination => "\$FW:${site_openvpn::limited_gateway_address}:1194",
proto => 'tcp',
destinationport => $port,
+ originaldest => $site_openvpn::unlimited_gateway_address,
order => 100;
}
shorewall::rule {
- "dnat_free_udp_port_$port":
+ "dnat_free_udp_port_${port}":
action => 'DNAT',
source => 'net',
destination => "\$FW:${site_openvpn::limited_gateway_address}:1194",
proto => 'udp',
destinationport => $port,
+ originaldest => $site_openvpn::unlimited_gateway_address,
order => 100;
}
}
diff --git a/puppet/modules/site_shorewall/manifests/obfsproxy.pp b/puppet/modules/site_shorewall/manifests/obfsproxy.pp
new file mode 100644
index 00000000..68fb9b9f
--- /dev/null
+++ b/puppet/modules/site_shorewall/manifests/obfsproxy.pp
@@ -0,0 +1,24 @@
+class site_shorewall::obfsproxy {
+
+ include site_shorewall::defaults
+
+ $obfsproxy = hiera('obfsproxy')
+ $scramblesuit = $obfsproxy['scramblesuit']
+ $scram_port = $scramblesuit['port']
+
+ # define macro for incoming services
+ file { '/etc/shorewall/macro.leap_obfsproxy':
+ content => "PARAM - - tcp $scram_port ",
+ notify => Service['shorewall'],
+ require => Package['shorewall']
+ }
+
+ shorewall::rule {
+ 'net2fw-obfs':
+ source => 'net',
+ destination => '$FW',
+ action => 'leap_obfsproxy(ACCEPT)',
+ order => 200;
+ }
+
+}
diff --git a/puppet/modules/site_shorewall/manifests/stunnel/client.pp b/puppet/modules/site_shorewall/manifests/stunnel/client.pp
new file mode 100644
index 00000000..9a89a244
--- /dev/null
+++ b/puppet/modules/site_shorewall/manifests/stunnel/client.pp
@@ -0,0 +1,40 @@
+#
+# Adds some firewall magic to the stunnel.
+#
+# Using DNAT, this firewall rule allow a locally running program
+# to try to connect to the normal remote IP and remote port of the
+# service on another machine, but have this connection magically
+# routed through the locally running stunnel client.
+#
+# The network looks like this:
+#
+# From the client's perspective:
+#
+# |------- stunnel client --------------| |---------- stunnel server -----------------------|
+# consumer app -> localhost:accept_port -> connect:connect_port -> localhost:original_port
+#
+# From the server's perspective:
+#
+# |------- stunnel client --------------| |---------- stunnel server -----------------------|
+# ?? -> *:accept_port -> localhost:connect_port -> service
+#
+
+define site_shorewall::stunnel::client(
+ $accept_port,
+ $connect,
+ $connect_port,
+ $original_port) {
+
+ include site_shorewall::defaults
+
+ shorewall::rule {
+ "stunnel_dnat_${name}":
+ action => 'DNAT',
+ source => '$FW',
+ destination => "\$FW:127.0.0.1:${accept_port}",
+ proto => 'tcp',
+ destinationport => $original_port,
+ originaldest => $connect,
+ order => 200
+ }
+}
diff --git a/puppet/modules/site_shorewall/manifests/stunnel/server.pp b/puppet/modules/site_shorewall/manifests/stunnel/server.pp
new file mode 100644
index 00000000..798cd631
--- /dev/null
+++ b/puppet/modules/site_shorewall/manifests/stunnel/server.pp
@@ -0,0 +1,22 @@
+#
+# Allow all incoming connections to stunnel server port
+#
+
+define site_shorewall::stunnel::server($port) {
+
+ include site_shorewall::defaults
+
+ file { "/etc/shorewall/macro.stunnel_server_${name}":
+ content => "PARAM - - tcp ${port}",
+ notify => Service['shorewall'],
+ require => Package['shorewall']
+ }
+ shorewall::rule {
+ "net2fw-stunnel-server-${name}":
+ source => 'net',
+ destination => '$FW',
+ action => "stunnel_server_${name}(ACCEPT)",
+ order => 200;
+ }
+
+} \ No newline at end of file
diff --git a/puppet/modules/site_sshd/manifests/init.pp b/puppet/modules/site_sshd/manifests/init.pp
index 9a05b6ed..1da2f1d5 100644
--- a/puppet/modules/site_sshd/manifests/init.pp
+++ b/puppet/modules/site_sshd/manifests/init.pp
@@ -53,7 +53,7 @@ class site_sshd {
##
class { '::sshd':
manage_nagios => false,
- ports => $ssh['port'],
+ ports => [ $ssh['port'] ],
use_pam => 'yes',
hardened_ssl => 'yes',
print_motd => 'no',
diff --git a/puppet/modules/site_sshd/templates/ssh_config.erb b/puppet/modules/site_sshd/templates/ssh_config.erb
index 7e967413..36c0b6d5 100644
--- a/puppet/modules/site_sshd/templates/ssh_config.erb
+++ b/puppet/modules/site_sshd/templates/ssh_config.erb
@@ -21,3 +21,20 @@ Host *
StrictHostKeyChecking no
<% end -%>
+#
+# Tell SSH what host key algorithm we should use. I don't understand why this
+# is needed, since the man page says that "if hostkeys are known for the
+# destination host then [HostKeyAlgorithms default] is modified to prefer
+# their algorithms."
+#
+
+<% @hosts.sort.each do |name, host| -%>
+Host <%= name %> <%= host['domain_full'] %> <%= host['domain_internal'] %> <%= host['ip_address'] %>
+<% if host['host_pub_key'] -%>
+HostKeyAlgorithms <%= host['host_pub_key'].split(" ").first %>
+<% end -%>
+<% if host['port'] -%>
+Port <%= host['port'] %>
+<% end -%>
+
+<% end -%>
diff --git a/puppet/modules/site_static/manifests/init.pp b/puppet/modules/site_static/manifests/init.pp
index 6e347d35..aed9775e 100644
--- a/puppet/modules/site_static/manifests/init.pp
+++ b/puppet/modules/site_static/manifests/init.pp
@@ -1,5 +1,10 @@
class site_static {
tag 'leap_service'
+
+ include site_config::x509::cert
+ include site_config::x509::key
+ include site_config::x509::ca_bundle
+
$static = hiera('static')
$domains = $static['domains']
$formats = $static['formats']
@@ -33,7 +38,7 @@ class site_static {
include site_apt::preferences::passenger
class { 'passenger':
use_munin => false,
- require => Class['site_apt::preferences::passenger']
+ require => Class['site_apt::preferences::passenger']
}
}
diff --git a/puppet/modules/site_stunnel/manifests/client.pp b/puppet/modules/site_stunnel/manifests/client.pp
new file mode 100644
index 00000000..3b10ecb8
--- /dev/null
+++ b/puppet/modules/site_stunnel/manifests/client.pp
@@ -0,0 +1,49 @@
+#
+# Sets up stunnel and firewall configuration for
+# a single stunnel client
+#
+# As a client, we accept connections on localhost,
+# and connect to a remote $connect:$connect_port
+#
+
+define site_stunnel::client (
+ $accept_port,
+ $connect_port,
+ $connect,
+ $original_port,
+ $verify = '2',
+ $pid = $name,
+ $rndfile = '/var/lib/stunnel4/.rnd',
+ $debuglevel = '4' ) {
+
+ include site_config::x509::cert
+ include site_config::x509::key
+ include site_config::x509::ca
+ include x509::variables
+ $ca_path = "${x509::variables::local_CAs}/${site_config::params::ca_name}.crt"
+ $cert_path = "${x509::variables::certs}/${site_config::params::cert_name}.crt"
+ $key_path = "${x509::variables::keys}/${site_config::params::cert_name}.key"
+
+ stunnel::service { $name:
+ accept => "127.0.0.1:${accept_port}",
+ connect => "${connect}:${connect_port}",
+ client => true,
+ cafile => $ca_path,
+ key => $key_path,
+ cert => $cert_path,
+ verify => $verify,
+ pid => "/var/run/stunnel4/${pid}.pid",
+ rndfile => $rndfile,
+ debuglevel => $debuglevel,
+ sslversion => 'TLSv1';
+ }
+
+ site_shorewall::stunnel::client { $name:
+ accept_port => $accept_port,
+ connect => $connect,
+ connect_port => $connect_port,
+ original_port => $original_port
+ }
+
+ include site_check_mk::agent::stunnel
+}
diff --git a/puppet/modules/site_stunnel/manifests/clients.pp b/puppet/modules/site_stunnel/manifests/clients.pp
index b75c9ac3..c0958b5f 100644
--- a/puppet/modules/site_stunnel/manifests/clients.pp
+++ b/puppet/modules/site_stunnel/manifests/clients.pp
@@ -1,33 +1,23 @@
-define site_stunnel::clients (
- $accept_port,
- $connect_port,
- $connect,
- $cafile,
- $key,
- $cert,
- $client = true,
- $verify = '2',
- $pid = $name,
- $rndfile = '/var/lib/stunnel4/.rnd',
- $debuglevel = '4' ) {
+#
+# example hiera yaml:
+#
+# stunnel:
+# clients:
+# ednp_clients:
+# thrips_9002:
+# accept_port: 4001
+# connect: thrips.demo.bitmask.i
+# connect_port: 19002
+# epmd_clients:
+# thrips_4369:
+# accept_port: 4000
+# connect: thrips.demo.bitmask.i
+# connect_port: 14369
+#
+# In the above example, this resource definition is called twice, with $name
+# 'ednp_clients' and 'epmd_clients'
+#
- stunnel::service { $name:
- accept => "127.0.0.1:${accept_port}",
- connect => "${connect}:${connect_port}",
- client => $client,
- cafile => $cafile,
- key => $key,
- cert => $cert,
- verify => $verify,
- pid => "/var/run/stunnel4/${pid}.pid",
- rndfile => $rndfile,
- debuglevel => $debuglevel,
- subscribe => [
- Class['Site_config::X509::Key'],
- Class['Site_config::X509::Cert'],
- Class['Site_config::X509::Ca'] ];
-
- }
-
- include site_check_mk::agent::stunnel
+define site_stunnel::clients {
+ create_resources(site_stunnel::client, $site_stunnel::clients[$name])
}
diff --git a/puppet/modules/site_stunnel/manifests/init.pp b/puppet/modules/site_stunnel/manifests/init.pp
index c7d6acc6..2e0cf5b8 100644
--- a/puppet/modules/site_stunnel/manifests/init.pp
+++ b/puppet/modules/site_stunnel/manifests/init.pp
@@ -1,3 +1,8 @@
+#
+# If you need something to happen after stunnel is started,
+# you can depend on Service['stunnel'] or Class['site_stunnel']
+#
+
class site_stunnel {
# include the generic stunnel module
@@ -13,5 +18,17 @@ class site_stunnel {
ensure => absent;
}
}
+
+ $stunnel = hiera('stunnel')
+
+ # add server stunnels
+ create_resources(site_stunnel::servers, $stunnel['servers'])
+
+ # add client stunnels
+ $clients = $stunnel['clients']
+ $client_sections = keys($clients)
+ site_stunnel::clients { $client_sections: }
+
+ include site_stunnel::override_service
}
diff --git a/puppet/modules/site_stunnel/manifests/override_service.pp b/puppet/modules/site_stunnel/manifests/override_service.pp
new file mode 100644
index 00000000..96187048
--- /dev/null
+++ b/puppet/modules/site_stunnel/manifests/override_service.pp
@@ -0,0 +1,13 @@
+class site_stunnel::override_service inherits stunnel::debian {
+
+ include site_config::x509::cert
+ include site_config::x509::key
+ include site_config::x509::ca
+
+ Service[stunnel] {
+ subscribe => [
+ Class['Site_config::X509::Key'],
+ Class['Site_config::X509::Cert'],
+ Class['Site_config::X509::Ca'] ]
+ }
+}
diff --git a/puppet/modules/site_stunnel/manifests/servers.pp b/puppet/modules/site_stunnel/manifests/servers.pp
new file mode 100644
index 00000000..b6fac319
--- /dev/null
+++ b/puppet/modules/site_stunnel/manifests/servers.pp
@@ -0,0 +1,47 @@
+#
+# example hiera yaml:
+#
+# stunnel:
+# servers:
+# couch_server:
+# accept_port: 15984
+# connect_port: 5984
+#
+
+define site_stunnel::servers (
+ $accept_port,
+ $connect_port,
+ $verify = '2',
+ $pid = $name,
+ $rndfile = '/var/lib/stunnel4/.rnd',
+ $debuglevel = '4' ) {
+
+ include site_config::x509::cert
+ include site_config::x509::key
+ include site_config::x509::ca
+ include x509::variables
+ $ca_path = "${x509::variables::local_CAs}/${site_config::params::ca_name}.crt"
+ $cert_path = "${x509::variables::certs}/${site_config::params::cert_name}.crt"
+ $key_path = "${x509::variables::keys}/${site_config::params::cert_name}.key"
+
+ stunnel::service { $name:
+ accept => $accept_port,
+ connect => "127.0.0.1:${connect_port}",
+ client => false,
+ cafile => $ca_path,
+ key => $key_path,
+ cert => $cert_path,
+ verify => $verify,
+ pid => "/var/run/stunnel4/${pid}.pid",
+ rndfile => '/var/lib/stunnel4/.rnd',
+ debuglevel => $debuglevel,
+ sslversion => 'TLSv1';
+ }
+
+ # allow incoming connections on $accept_port
+ site_shorewall::stunnel::server { $name:
+ port => $accept_port
+ }
+
+ include site_check_mk::agent::stunnel
+}
diff --git a/puppet/modules/site_tor/manifests/init.pp b/puppet/modules/site_tor/manifests/init.pp
index e62cb12d..80ccc5d3 100644
--- a/puppet/modules/site_tor/manifests/init.pp
+++ b/puppet/modules/site_tor/manifests/init.pp
@@ -11,23 +11,31 @@ class site_tor {
$address = hiera('ip_address')
- class { 'tor::daemon': }
+ $openvpn = hiera('openvpn', undef)
+ if $openvpn {
+ $openvpn_ports = $openvpn['ports']
+ }
+ else {
+ $openvpn_ports = []
+ }
+
+ include tor::daemon
tor::daemon::relay { $nickname:
- port => 9001,
- address => $address,
- contact_info => obfuscate_email($contact_emails),
- bandwidth_rate => $bandwidth_rate,
- my_family => $family
+ port => 9001,
+ address => $address,
+ contact_info => obfuscate_email($contact_emails),
+ bandwidth_rate => $bandwidth_rate,
+ my_family => $family
}
if ( $tor_type == 'exit'){
- tor::daemon::directory { $::hostname: port => 80 }
+ # Only enable the daemon directory if the node isn't also a webapp node
+ # or running openvpn on port 80
+ if ! member($::services, 'webapp') and ! member($openvpn_ports, '80') {
+ tor::daemon::directory { $::hostname: port => 80 }
+ }
}
else {
- tor::daemon::directory { $::hostname:
- port => 80,
- port_front_page => '';
- }
include site_tor::disable_exit
}
diff --git a/puppet/modules/site_webapp/manifests/couchdb.pp b/puppet/modules/site_webapp/manifests/couchdb.pp
index ff743fba..3ae4d266 100644
--- a/puppet/modules/site_webapp/manifests/couchdb.pp
+++ b/puppet/modules/site_webapp/manifests/couchdb.pp
@@ -7,10 +7,6 @@ class site_webapp::couchdb {
$couchdb_webapp_user = $webapp['couchdb_webapp_user']['username']
$couchdb_webapp_password = $webapp['couchdb_webapp_user']['password']
- $stunnel = hiera('stunnel')
- $couch_client = $stunnel['couch_client']
- $couch_client_connect = $couch_client['connect']
-
include x509::variables
file {
@@ -37,14 +33,4 @@ class site_webapp::couchdb {
}
include site_stunnel
-
- $couchdb_stunnel_client_defaults = {
- 'connect_port' => $couch_client_connect,
- 'client' => true,
- 'cafile' => "${x509::variables::local_CAs}/${site_config::params::ca_name}.crt",
- 'key' => "${x509::variables::keys}/${site_config::params::cert_name}.key",
- 'cert' => "${x509::variables::certs}/${site_config::params::cert_name}.crt",
- }
-
- create_resources(site_stunnel::clients, $couch_client, $couchdb_stunnel_client_defaults)
}
diff --git a/puppet/modules/site_webapp/manifests/hidden_service.pp b/puppet/modules/site_webapp/manifests/hidden_service.pp
new file mode 100644
index 00000000..16b6e2e7
--- /dev/null
+++ b/puppet/modules/site_webapp/manifests/hidden_service.pp
@@ -0,0 +1,43 @@
+class site_webapp::hidden_service {
+ $tor = hiera('tor')
+ $hidden_service = $tor['hidden_service']
+ $tor_domain = "${hidden_service['address']}.onion"
+
+ include site_apache::common
+ include site_apache::module::headers
+ include site_apache::module::alias
+ include site_apache::module::expires
+ include site_apache::module::removeip
+
+ include tor::daemon
+ tor::daemon::hidden_service { 'webapp': ports => '80 127.0.0.1:80' }
+
+ file {
+ '/var/lib/tor/webapp/':
+ ensure => directory,
+ owner => 'debian-tor',
+ group => 'debian-tor',
+ mode => '2700';
+
+ '/var/lib/tor/webapp/private_key':
+ ensure => present,
+ source => "/srv/leap/files/nodes/${::hostname}/tor.key",
+ owner => 'debian-tor',
+ group => 'debian-tor',
+ mode => '0600';
+
+ '/var/lib/tor/webapp/hostname':
+ ensure => present,
+ content => $tor_domain,
+ owner => 'debian-tor',
+ group => 'debian-tor',
+ mode => '0600';
+ }
+
+ apache::vhost::file {
+ 'hidden_service':
+ content => template('site_apache/vhosts.d/hidden_service.conf.erb')
+ }
+
+ include site_shorewall::tor
+} \ No newline at end of file
diff --git a/puppet/modules/site_webapp/manifests/init.pp b/puppet/modules/site_webapp/manifests/init.pp
index 7fdd0c3f..9f97d2c5 100644
--- a/puppet/modules/site_webapp/manifests/init.pp
+++ b/puppet/modules/site_webapp/manifests/init.pp
@@ -10,6 +10,7 @@ class site_webapp {
$webapp = hiera('webapp')
$api_version = $webapp['api_version']
$secret_token = $webapp['secret_token']
+ $tor = hiera('tor', false)
Class['site_config::default'] -> Class['site_webapp']
@@ -53,8 +54,8 @@ class site_webapp {
exec { 'bundler_update':
cwd => '/srv/leap/webapp',
- command => '/bin/bash -c "/usr/bin/bundle check || /usr/bin/bundle install --path vendor/bundle --without test development"',
- unless => '/usr/bin/bundle check',
+ command => '/bin/bash -c "/usr/bin/bundle check --path vendor/bundle || /usr/bin/bundle install --path vendor/bundle --without test development"',
+ unless => '/usr/bin/bundle check --path vendor/bundle',
user => 'leap-webapp',
timeout => 600,
require => [
@@ -157,6 +158,20 @@ class site_webapp {
notify => Service['apache'];
}
+ if $tor {
+ $hidden_service = $tor['hidden_service']
+ if $hidden_service['active'] {
+ include site_webapp::hidden_service
+ }
+ }
+
+
+ # needed for the soledad-sync check which is run on the
+ # webapp node (#6520)
+ package { 'python-u1db':
+ ensure => latest,
+ }
+
include site_shorewall::webapp
include site_check_mk::agent::webapp
}
diff --git a/puppet/modules/site_webapp/templates/config.yml.erb b/puppet/modules/site_webapp/templates/config.yml.erb
index 6461c5e8..0c75f3ca 100644
--- a/puppet/modules/site_webapp/templates/config.yml.erb
+++ b/puppet/modules/site_webapp/templates/config.yml.erb
@@ -18,3 +18,11 @@ production:
minimum_client_version: "<%= @webapp['client_version']['min'] %>"
default_service_level: "<%= @webapp['default_service_level'] %>"
service_levels: <%= @webapp['service_levels'].to_json %>
+ allow_registration: <%= @webapp['allow_registration'].inspect %>
+ handle_blacklist: <%= @webapp['forbidden_usernames'].inspect %>
+<%- if @webapp['engines'] && @webapp['engines'].any? -%>
+ engines:
+<%- @webapp['engines'].each do |engine| -%>
+ - <%= engine %>
+<%- end -%>
+<%- end -%>
diff --git a/puppet/modules/sshd b/puppet/modules/sshd
-Subproject 5c23b33200fc6229ada7f4e13672b5da0d4bdd8
+Subproject 750a497758d94c2f5a6cad23cecc3dbde2d2f92
diff --git a/puppet/modules/stunnel b/puppet/modules/stunnel
-Subproject ec49fd93c2469bc5c13f7e6a7d25468613e1b84
+Subproject b0dc7c84b5f55aec12d7d65da812037913d9dbe
diff --git a/puppet/modules/tapicero/manifests/init.pp b/puppet/modules/tapicero/manifests/init.pp
index f2e723f5..28711b94 100644
--- a/puppet/modules/tapicero/manifests/init.pp
+++ b/puppet/modules/tapicero/manifests/init.pp
@@ -12,6 +12,8 @@ class tapicero {
$couchdb_soledad_user = $couchdb_users['soledad']['username']
$couchdb_leap_mx_user = $couchdb_users['leap_mx']['username']
+ $couchdb_mode = $couchdb['mode']
+ $couchdb_replication = $couchdb['replication']
Class['site_config::default'] -> Class['tapicero']
@@ -93,7 +95,7 @@ class tapicero {
vcsrepo { '/srv/leap/tapicero':
ensure => present,
force => true,
- revision => 'origin/master',
+ revision => 'origin/version/0.6',
provider => git,
source => 'https://leap.se/git/tapicero',
owner => 'tapicero',
diff --git a/puppet/modules/tapicero/templates/tapicero.yaml.erb b/puppet/modules/tapicero/templates/tapicero.yaml.erb
index 8e19b22f..510450ad 100644
--- a/puppet/modules/tapicero/templates/tapicero.yaml.erb
+++ b/puppet/modules/tapicero/templates/tapicero.yaml.erb
@@ -1,3 +1,5 @@
+<%- require 'json' -%>
+
#
# Default configuration options for Tapicero
#
@@ -24,6 +26,10 @@ log_level: info
options:
# prefix for per user databases:
db_prefix: "user-"
+ mode: <%= @couchdb_mode %>
+<%- if @couchdb_replication %>
+ replication: <%= @couchdb_replication.to_json %>
+<%- end -%>
# security settings to be used for the per user databases
security:
@@ -34,9 +40,11 @@ options:
# explicit about this
- <%= @couchdb_admin_user %>
roles: []
- readers:
+ members:
names:
- <%= @couchdb_soledad_user %>
- <%= @couchdb_leap_mx_user %>
- roles: []
+ roles:
+ - replication
+