summaryrefslogtreecommitdiff
path: root/puppet/modules
diff options
context:
space:
mode:
Diffstat (limited to 'puppet/modules')
-rw-r--r--puppet/modules/clamav/files/01-leap.conf58
-rw-r--r--puppet/modules/clamav/files/clamav-daemon_default8
-rw-r--r--puppet/modules/clamav/files/clamav-milter_default14
-rw-r--r--puppet/modules/clamav/manifests/daemon.pp91
-rw-r--r--puppet/modules/clamav/manifests/freshclam.pp23
-rw-r--r--puppet/modules/clamav/manifests/init.pp8
-rw-r--r--puppet/modules/clamav/manifests/milter.pp50
-rw-r--r--puppet/modules/clamav/manifests/unofficial_sigs.pp23
-rw-r--r--puppet/modules/clamav/templates/clamav-milter.conf.erb28
-rw-r--r--puppet/modules/clamav/templates/local.pdb.erb1
-rw-r--r--puppet/modules/clamav/templates/whitelisted_addresses.erb5
-rw-r--r--puppet/modules/haveged/manifests/init.pp16
-rw-r--r--puppet/modules/journald/manifests/init.pp7
-rw-r--r--puppet/modules/leap/manifests/cli/install.pp46
-rw-r--r--puppet/modules/leap/manifests/init.pp3
-rw-r--r--puppet/modules/leap/manifests/logfile.pp34
-rw-r--r--puppet/modules/leap/templates/rsyslog.erb5
-rw-r--r--puppet/modules/leap_mx/manifests/init.pp119
-rw-r--r--puppet/modules/leap_mx/templates/mx.conf.erb18
-rwxr-xr-xpuppet/modules/obfsproxy/files/obfsproxy_init93
-rw-r--r--puppet/modules/obfsproxy/files/obfsproxy_logrotate14
-rw-r--r--puppet/modules/obfsproxy/manifests/init.pp86
-rw-r--r--puppet/modules/obfsproxy/templates/etc_conf.erb11
-rw-r--r--puppet/modules/opendkim/manifests/init.pp67
-rw-r--r--puppet/modules/opendkim/templates/opendkim.conf45
-rw-r--r--puppet/modules/openvpn/.fixtures.yml6
-rw-r--r--puppet/modules/openvpn/.gitignore3
-rw-r--r--puppet/modules/openvpn/.rvmrc38
-rw-r--r--puppet/modules/openvpn/.travis.yml29
-rw-r--r--puppet/modules/openvpn/Gemfile7
-rw-r--r--puppet/modules/openvpn/Gemfile.lock36
-rw-r--r--puppet/modules/openvpn/LICENSE177
-rw-r--r--puppet/modules/openvpn/Modulefile11
-rw-r--r--puppet/modules/openvpn/Rakefile2
-rw-r--r--puppet/modules/openvpn/Readme.markdown54
-rw-r--r--puppet/modules/openvpn/Vagrantfile42
-rw-r--r--puppet/modules/openvpn/manifests/client.pp187
-rw-r--r--puppet/modules/openvpn/manifests/client_specific_config.pp79
-rw-r--r--puppet/modules/openvpn/manifests/config.pp52
-rw-r--r--puppet/modules/openvpn/manifests/init.pp43
-rw-r--r--puppet/modules/openvpn/manifests/install.pp46
-rw-r--r--puppet/modules/openvpn/manifests/params.pp37
-rw-r--r--puppet/modules/openvpn/manifests/server.pp233
-rw-r--r--puppet/modules/openvpn/manifests/service.pp36
-rw-r--r--puppet/modules/openvpn/spec/classes/openvpn_config_spec.rb15
-rw-r--r--puppet/modules/openvpn/spec/classes/openvpn_init_spec.rb9
-rw-r--r--puppet/modules/openvpn/spec/classes/openvpn_install_spec.rb11
-rw-r--r--puppet/modules/openvpn/spec/classes/openvpn_service_spec.rb13
-rw-r--r--puppet/modules/openvpn/spec/defines/openvpn_client_spec.rb88
-rw-r--r--puppet/modules/openvpn/spec/defines/openvpn_client_specific_config_spec.rb40
-rw-r--r--puppet/modules/openvpn/spec/defines/openvpn_server_spec.rb165
-rw-r--r--puppet/modules/openvpn/spec/spec_helper.rb2
-rw-r--r--puppet/modules/openvpn/templates/client.erb26
-rw-r--r--puppet/modules/openvpn/templates/client_specific_config.erb10
-rw-r--r--puppet/modules/openvpn/templates/etc-default-openvpn.erb20
-rw-r--r--puppet/modules/openvpn/templates/server.erb37
-rw-r--r--puppet/modules/openvpn/templates/vars.erb68
-rw-r--r--puppet/modules/openvpn/vagrant/client.pp5
-rw-r--r--puppet/modules/openvpn/vagrant/server.pp23
-rw-r--r--puppet/modules/postfwd/files/postfwd_default19
-rw-r--r--puppet/modules/postfwd/manifests/init.pp43
-rw-r--r--puppet/modules/postfwd/templates/postfwd.cf.erb28
-rw-r--r--puppet/modules/site_apache/files/conf.d/security55
-rw-r--r--puppet/modules/site_apache/files/include.d/ssl_common.inc7
-rw-r--r--puppet/modules/site_apache/manifests/common.pp30
-rw-r--r--puppet/modules/site_apache/manifests/common/tls.pp6
-rw-r--r--puppet/modules/site_apache/templates/vhosts.d/api.conf.erb48
-rw-r--r--puppet/modules/site_apache/templates/vhosts.d/common.conf.erb76
-rw-r--r--puppet/modules/site_apache/templates/vhosts.d/hidden_service.conf.erb55
-rw-r--r--puppet/modules/site_apt/files/Debian/51unattended-upgrades-leap6
-rw-r--r--puppet/modules/site_apt/files/keys/leap-archive.gpgbin0 -> 20188 bytes
-rw-r--r--puppet/modules/site_apt/files/keys/leap-experimental-archive.gpgbin0 -> 3423 bytes
-rw-r--r--puppet/modules/site_apt/manifests/dist_upgrade.pp17
-rw-r--r--puppet/modules/site_apt/manifests/init.pp55
-rw-r--r--puppet/modules/site_apt/manifests/leap_repo.pp16
-rw-r--r--puppet/modules/site_apt/manifests/preferences/check_mk.pp9
-rw-r--r--puppet/modules/site_apt/manifests/preferences/passenger.pp14
-rw-r--r--puppet/modules/site_apt/manifests/preferences/rsyslog.pp13
-rw-r--r--puppet/modules/site_apt/manifests/unattended_upgrades.pp20
-rw-r--r--puppet/modules/site_apt/templates/jessie/postfix.seeds1
-rw-r--r--puppet/modules/site_apt/templates/preferences.include_squeeze25
-rw-r--r--puppet/modules/site_apt/templates/secondary.list3
-rw-r--r--puppet/modules/site_apt/templates/wheezy/postfix.seeds1
-rw-r--r--puppet/modules/site_check_mk/files/agent/local_checks/all_hosts/run_node_tests.sh5
-rwxr-xr-xpuppet/modules/site_check_mk/files/agent/local_checks/couchdb/leap_couch_stats.sh122
-rwxr-xr-xpuppet/modules/site_check_mk/files/agent/local_checks/mx/check_leap_mx.sh33
-rw-r--r--puppet/modules/site_check_mk/files/agent/logwatch/bigcouch.cfg28
-rw-r--r--puppet/modules/site_check_mk/files/agent/logwatch/leap_mx.cfg4
-rw-r--r--puppet/modules/site_check_mk/files/agent/logwatch/logwatch.cfg31
-rw-r--r--puppet/modules/site_check_mk/files/agent/logwatch/openvpn.cfg19
-rw-r--r--puppet/modules/site_check_mk/files/agent/logwatch/soledad.cfg6
-rw-r--r--puppet/modules/site_check_mk/files/agent/logwatch/stunnel.cfg10
-rw-r--r--puppet/modules/site_check_mk/files/agent/logwatch/syslog/bigcouch.cfg5
-rw-r--r--puppet/modules/site_check_mk/files/agent/logwatch/syslog/couchdb.cfg2
-rw-r--r--puppet/modules/site_check_mk/files/agent/logwatch/syslog_header.cfg1
-rw-r--r--puppet/modules/site_check_mk/files/agent/logwatch/syslog_tail.cfg21
-rw-r--r--puppet/modules/site_check_mk/files/agent/logwatch/webapp.cfg8
-rwxr-xr-xpuppet/modules/site_check_mk/files/agent/nagios_plugins/check_unix_open_fds.pl322
-rwxr-xr-xpuppet/modules/site_check_mk/files/agent/plugins/mk_logwatch.1.2.4374
-rw-r--r--puppet/modules/site_check_mk/files/extra_service_conf.mk14
-rw-r--r--puppet/modules/site_check_mk/files/ignored_services.mk3
-rw-r--r--puppet/modules/site_check_mk/manifests/agent.pp35
-rw-r--r--puppet/modules/site_check_mk/manifests/agent/couchdb.pp34
-rw-r--r--puppet/modules/site_check_mk/manifests/agent/couchdb/bigcouch.pp49
-rw-r--r--puppet/modules/site_check_mk/manifests/agent/couchdb/plain.pp23
-rw-r--r--puppet/modules/site_check_mk/manifests/agent/haproxy.pp15
-rw-r--r--puppet/modules/site_check_mk/manifests/agent/haveged.pp15
-rw-r--r--puppet/modules/site_check_mk/manifests/agent/logwatch.pp36
-rw-r--r--puppet/modules/site_check_mk/manifests/agent/logwatch/syslog.pp18
-rw-r--r--puppet/modules/site_check_mk/manifests/agent/mrpe.pp24
-rw-r--r--puppet/modules/site_check_mk/manifests/agent/mx.pp27
-rw-r--r--puppet/modules/site_check_mk/manifests/agent/openvpn.pp10
-rw-r--r--puppet/modules/site_check_mk/manifests/agent/package/nagios_plugins_contrib.pp5
-rw-r--r--puppet/modules/site_check_mk/manifests/agent/package/perl_plugin.pp5
-rw-r--r--puppet/modules/site_check_mk/manifests/agent/soledad.pp17
-rw-r--r--puppet/modules/site_check_mk/manifests/agent/stunnel.pp9
-rw-r--r--puppet/modules/site_check_mk/manifests/agent/webapp.pp15
-rw-r--r--puppet/modules/site_check_mk/manifests/server.pp103
-rw-r--r--puppet/modules/site_check_mk/templates/extra_host_conf.mk13
-rw-r--r--puppet/modules/site_check_mk/templates/host_contactgroups.mk17
-rw-r--r--puppet/modules/site_check_mk/templates/hostgroups.mk17
-rw-r--r--puppet/modules/site_check_mk/templates/use_ssh.mk6
-rw-r--r--puppet/modules/site_config/files/xterm-title.sh8
-rw-r--r--puppet/modules/site_config/lib/facter/dhcp_enabled.rb22
-rw-r--r--puppet/modules/site_config/lib/facter/ip_interface.rb13
-rw-r--r--puppet/modules/site_config/manifests/caching_resolver.pp27
-rw-r--r--puppet/modules/site_config/manifests/default.pp71
-rw-r--r--puppet/modules/site_config/manifests/dhclient.pp40
-rw-r--r--puppet/modules/site_config/manifests/files.pp24
-rw-r--r--puppet/modules/site_config/manifests/hosts.pp44
-rw-r--r--puppet/modules/site_config/manifests/initial_firewall.pp64
-rw-r--r--puppet/modules/site_config/manifests/packages.pp32
-rw-r--r--puppet/modules/site_config/manifests/packages/build_essential.pp28
-rw-r--r--puppet/modules/site_config/manifests/packages/gnutls.pp5
-rw-r--r--puppet/modules/site_config/manifests/params.pp35
-rw-r--r--puppet/modules/site_config/manifests/remove.pp11
-rw-r--r--puppet/modules/site_config/manifests/remove/bigcouch.pp42
-rw-r--r--puppet/modules/site_config/manifests/remove/files.pp56
-rw-r--r--puppet/modules/site_config/manifests/remove/jessie.pp14
-rw-r--r--puppet/modules/site_config/manifests/remove/monitoring.pp13
-rw-r--r--puppet/modules/site_config/manifests/remove/tapicero.pp72
-rw-r--r--puppet/modules/site_config/manifests/remove/webapp.pp7
-rw-r--r--puppet/modules/site_config/manifests/resolvconf.pp14
-rw-r--r--puppet/modules/site_config/manifests/ruby.pp8
-rw-r--r--puppet/modules/site_config/manifests/ruby/dev.pp8
-rw-r--r--puppet/modules/site_config/manifests/setup.pp50
-rw-r--r--puppet/modules/site_config/manifests/shell.pp22
-rw-r--r--puppet/modules/site_config/manifests/slow.pp10
-rw-r--r--puppet/modules/site_config/manifests/sysctl.pp8
-rw-r--r--puppet/modules/site_config/manifests/syslog.pp62
-rw-r--r--puppet/modules/site_config/manifests/vagrant.pp11
-rw-r--r--puppet/modules/site_config/manifests/x509/ca.pp11
-rw-r--r--puppet/modules/site_config/manifests/x509/ca_bundle.pp17
-rw-r--r--puppet/modules/site_config/manifests/x509/cert.pp12
-rw-r--r--puppet/modules/site_config/manifests/x509/client_ca/ca.pp16
-rw-r--r--puppet/modules/site_config/manifests/x509/client_ca/key.pp16
-rw-r--r--puppet/modules/site_config/manifests/x509/commercial/ca.pp11
-rw-r--r--puppet/modules/site_config/manifests/x509/commercial/cert.pp15
-rw-r--r--puppet/modules/site_config/manifests/x509/commercial/key.pp11
-rw-r--r--puppet/modules/site_config/manifests/x509/key.pp11
-rw-r--r--puppet/modules/site_config/templates/hosts19
-rw-r--r--puppet/modules/site_config/templates/ipv4firewall_up.rules.erb14
-rw-r--r--puppet/modules/site_config/templates/ipv6firewall_up.rules.erb8
-rw-r--r--puppet/modules/site_config/templates/reload_dhclient.erb13
-rw-r--r--puppet/modules/site_couchdb/files/couchdb_scripts_defaults.conf4
-rw-r--r--puppet/modules/site_couchdb/files/designs/Readme.md14
-rw-r--r--puppet/modules/site_couchdb/files/designs/customers/Customer.json18
-rw-r--r--puppet/modules/site_couchdb/files/designs/identities/Identity.json34
-rw-r--r--puppet/modules/site_couchdb/files/designs/invite_codes/InviteCode.json22
-rw-r--r--puppet/modules/site_couchdb/files/designs/messages/Message.json18
-rw-r--r--puppet/modules/site_couchdb/files/designs/sessions/Session.json8
-rw-r--r--puppet/modules/site_couchdb/files/designs/shared/docs.json8
-rw-r--r--puppet/modules/site_couchdb/files/designs/shared/syncs.json11
-rw-r--r--puppet/modules/site_couchdb/files/designs/shared/transactions.json13
-rw-r--r--puppet/modules/site_couchdb/files/designs/tickets/Ticket.json50
-rw-r--r--puppet/modules/site_couchdb/files/designs/tokens/Token.json14
-rw-r--r--puppet/modules/site_couchdb/files/designs/users/User.json22
-rwxr-xr-xpuppet/modules/site_couchdb/files/leap_ca_daemon157
-rw-r--r--puppet/modules/site_couchdb/files/local.ini8
-rw-r--r--puppet/modules/site_couchdb/files/runit_config6
-rw-r--r--puppet/modules/site_couchdb/lib/puppet/parser/functions/rotated_db_name.rb24
-rw-r--r--puppet/modules/site_couchdb/manifests/add_users.pp57
-rw-r--r--puppet/modules/site_couchdb/manifests/backup.pp23
-rw-r--r--puppet/modules/site_couchdb/manifests/bigcouch.pp50
-rw-r--r--puppet/modules/site_couchdb/manifests/bigcouch/add_nodes.pp8
-rw-r--r--puppet/modules/site_couchdb/manifests/bigcouch/compaction.pp8
-rw-r--r--puppet/modules/site_couchdb/manifests/bigcouch/settle_cluster.pp11
-rw-r--r--puppet/modules/site_couchdb/manifests/create_dbs.pp102
-rw-r--r--puppet/modules/site_couchdb/manifests/designs.pp46
-rw-r--r--puppet/modules/site_couchdb/manifests/init.pp81
-rw-r--r--puppet/modules/site_couchdb/manifests/logrotate.pp14
-rw-r--r--puppet/modules/site_couchdb/manifests/mirror.pp78
-rw-r--r--puppet/modules/site_couchdb/manifests/plain.pp14
-rw-r--r--puppet/modules/site_couchdb/manifests/setup.pp61
-rw-r--r--puppet/modules/site_couchdb/manifests/upload_design.pp14
-rw-r--r--puppet/modules/site_haproxy/files/haproxy-stats.cfg6
-rw-r--r--puppet/modules/site_haproxy/manifests/init.pp41
-rw-r--r--puppet/modules/site_haproxy/templates/couch.erb32
-rw-r--r--puppet/modules/site_haproxy/templates/haproxy.cfg.erb11
-rw-r--r--puppet/modules/site_mx/manifests/init.pp20
-rw-r--r--puppet/modules/site_nagios/files/configs/Debian/nagios.cfg1302
-rwxr-xr-xpuppet/modules/site_nagios/files/plugins/check_last_regex_in_log85
-rw-r--r--puppet/modules/site_nagios/manifests/add_host_services.pp32
-rw-r--r--puppet/modules/site_nagios/manifests/add_service.pp32
-rw-r--r--puppet/modules/site_nagios/manifests/init.pp13
-rw-r--r--puppet/modules/site_nagios/manifests/plugins.pp16
-rw-r--r--puppet/modules/site_nagios/manifests/server.pp97
-rw-r--r--puppet/modules/site_nagios/manifests/server/add_contacts.pp18
-rw-r--r--puppet/modules/site_nagios/manifests/server/apache.pp25
-rw-r--r--puppet/modules/site_nagios/manifests/server/contactgroup.pp8
-rw-r--r--puppet/modules/site_nagios/manifests/server/hostgroup.pp7
-rw-r--r--puppet/modules/site_nagios/manifests/server/icli.pp26
-rw-r--r--puppet/modules/site_nagios/templates/icli_aliases.erb7
-rw-r--r--puppet/modules/site_nickserver/manifests/init.pp178
-rw-r--r--puppet/modules/site_nickserver/templates/nickserver-proxy.conf.erb19
-rw-r--r--puppet/modules/site_nickserver/templates/nickserver.yml.erb19
-rw-r--r--puppet/modules/site_obfsproxy/README0
-rw-r--r--puppet/modules/site_obfsproxy/manifests/init.pp38
-rw-r--r--puppet/modules/site_openvpn/README20
-rw-r--r--puppet/modules/site_openvpn/manifests/dh_key.pp10
-rw-r--r--puppet/modules/site_openvpn/manifests/init.pp238
-rw-r--r--puppet/modules/site_openvpn/manifests/resolver.pp50
-rw-r--r--puppet/modules/site_openvpn/manifests/server_config.pp228
-rw-r--r--puppet/modules/site_openvpn/templates/add_gateway_ips.sh.erb11
-rw-r--r--puppet/modules/site_postfix/files/checks/received_anon2
-rw-r--r--puppet/modules/site_postfix/manifests/debug.pp9
-rw-r--r--puppet/modules/site_postfix/manifests/mx.pp152
-rw-r--r--puppet/modules/site_postfix/manifests/mx/checks.pp23
-rw-r--r--puppet/modules/site_postfix/manifests/mx/received_anon.pp13
-rw-r--r--puppet/modules/site_postfix/manifests/mx/rewrite_openpgp_header.pp11
-rw-r--r--puppet/modules/site_postfix/manifests/mx/smtp_auth.pp6
-rw-r--r--puppet/modules/site_postfix/manifests/mx/smtp_tls.pp43
-rw-r--r--puppet/modules/site_postfix/manifests/mx/smtpd_checks.pp36
-rw-r--r--puppet/modules/site_postfix/manifests/mx/smtpd_tls.pp69
-rw-r--r--puppet/modules/site_postfix/manifests/mx/static_aliases.pp88
-rw-r--r--puppet/modules/site_postfix/manifests/satellite.pp47
-rw-r--r--puppet/modules/site_postfix/templates/checks/helo_access.erb21
-rw-r--r--puppet/modules/site_postfix/templates/checks/rewrite_openpgp_headers.erb13
-rw-r--r--puppet/modules/site_postfix/templates/virtual-aliases.erb21
-rw-r--r--puppet/modules/site_rsyslog/templates/client.conf.erb134
-rw-r--r--puppet/modules/site_shorewall/files/Debian/shorewall.service23
-rw-r--r--puppet/modules/site_shorewall/manifests/defaults.pp86
-rw-r--r--puppet/modules/site_shorewall/manifests/dnat.pp19
-rw-r--r--puppet/modules/site_shorewall/manifests/dnat_rule.pp50
-rw-r--r--puppet/modules/site_shorewall/manifests/eip.pp92
-rw-r--r--puppet/modules/site_shorewall/manifests/ip_forward.pp10
-rw-r--r--puppet/modules/site_shorewall/manifests/monitor.pp8
-rw-r--r--puppet/modules/site_shorewall/manifests/mx.pp24
-rw-r--r--puppet/modules/site_shorewall/manifests/obfsproxy.pp25
-rw-r--r--puppet/modules/site_shorewall/manifests/service/http.pp13
-rw-r--r--puppet/modules/site_shorewall/manifests/service/https.pp12
-rw-r--r--puppet/modules/site_shorewall/manifests/service/smtp.pp13
-rw-r--r--puppet/modules/site_shorewall/manifests/service/webapp_api.pp23
-rw-r--r--puppet/modules/site_shorewall/manifests/soledad.pp23
-rw-r--r--puppet/modules/site_shorewall/manifests/sshd.pp31
-rw-r--r--puppet/modules/site_shorewall/manifests/stunnel/client.pp40
-rw-r--r--puppet/modules/site_shorewall/manifests/stunnel/server.pp22
-rw-r--r--puppet/modules/site_shorewall/manifests/tor.pp26
-rw-r--r--puppet/modules/site_shorewall/manifests/webapp.pp7
-rw-r--r--puppet/modules/site_squid_deb_proxy/manifests/client.pp5
-rw-r--r--puppet/modules/site_sshd/manifests/authorized_keys.pp34
-rw-r--r--puppet/modules/site_sshd/manifests/deploy_authorized_keys.pp9
-rw-r--r--puppet/modules/site_sshd/manifests/init.pp82
-rw-r--r--puppet/modules/site_sshd/manifests/mosh.pp21
-rw-r--r--puppet/modules/site_sshd/templates/authorized_keys.erb10
-rw-r--r--puppet/modules/site_sshd/templates/ssh_config.erb40
-rw-r--r--puppet/modules/site_sshd/templates/ssh_known_hosts.erb7
-rw-r--r--puppet/modules/site_static/README3
-rw-r--r--puppet/modules/site_static/manifests/domain.pp33
-rw-r--r--puppet/modules/site_static/manifests/init.pp72
-rw-r--r--puppet/modules/site_static/manifests/location.pp36
-rw-r--r--puppet/modules/site_static/templates/amber.erb13
-rw-r--r--puppet/modules/site_static/templates/apache.conf.erb88
-rw-r--r--puppet/modules/site_static/templates/rack.erb19
-rw-r--r--puppet/modules/site_stunnel/manifests/client.pp64
-rw-r--r--puppet/modules/site_stunnel/manifests/clients.pp23
-rw-r--r--puppet/modules/site_stunnel/manifests/init.pp48
-rw-r--r--puppet/modules/site_stunnel/manifests/override_service.pp18
-rw-r--r--puppet/modules/site_stunnel/manifests/servers.pp51
-rw-r--r--puppet/modules/site_tor/manifests/disable_exit.pp7
-rw-r--r--puppet/modules/site_tor/manifests/init.pp45
-rw-r--r--puppet/modules/site_webapp/files/server-status.conf26
-rw-r--r--puppet/modules/site_webapp/manifests/apache.pp28
-rw-r--r--puppet/modules/site_webapp/manifests/common_vhost.pp18
-rw-r--r--puppet/modules/site_webapp/manifests/couchdb.pp52
-rw-r--r--puppet/modules/site_webapp/manifests/cron.pp37
-rw-r--r--puppet/modules/site_webapp/manifests/hidden_service.pp52
-rw-r--r--puppet/modules/site_webapp/manifests/init.pp179
-rw-r--r--puppet/modules/site_webapp/templates/config.yml.erb36
-rw-r--r--puppet/modules/site_webapp/templates/couchdb.admin.yml.erb9
-rw-r--r--puppet/modules/site_webapp/templates/couchdb.yml.erb9
-rw-r--r--puppet/modules/soledad/manifests/client.pp16
-rw-r--r--puppet/modules/soledad/manifests/common.pp8
-rw-r--r--puppet/modules/soledad/manifests/server.pp104
-rw-r--r--puppet/modules/soledad/templates/default-soledad.erb5
-rw-r--r--puppet/modules/soledad/templates/soledad-server.conf.erb12
-rw-r--r--puppet/modules/templatewlv/Modulefile11
-rw-r--r--puppet/modules/templatewlv/README.md21
-rw-r--r--puppet/modules/templatewlv/lib/puppet/parser/functions/templatewlv.rb41
-rw-r--r--puppet/modules/templatewlv/lib/puppet/parser/templatewrapperwlv.rb39
-rw-r--r--puppet/modules/try/README.md13
-rw-r--r--puppet/modules/try/manifests/file.pp114
-rw-r--r--puppet/modules/try/manifests/init.pp3
303 files changed, 11949 insertions, 0 deletions
diff --git a/puppet/modules/clamav/files/01-leap.conf b/puppet/modules/clamav/files/01-leap.conf
new file mode 100644
index 00000000..a7e49d17
--- /dev/null
+++ b/puppet/modules/clamav/files/01-leap.conf
@@ -0,0 +1,58 @@
+# If running clamd in "LocalSocket" mode (*NOT* in TCP/IP mode), and
+# either "SOcket Cat" (socat) or the "IO::Socket::UNIX" perl module
+# are installed on the system, and you want to report whether clamd
+# is running or not, uncomment the "clamd_socket" variable below (you
+# will be warned if neither socat nor IO::Socket::UNIX are found, but
+# the script will still run). You will also need to set the correct
+# path to your clamd socket file (if unsure of the path, check the
+# "LocalSocket" setting in your clamd.conf file for socket location).
+clamd_socket="/run/clamav/clamd.ctl"
+
+# If you would like to attempt to restart ClamD if detected not running,
+# uncomment the next 2 lines. Confirm the path to the "clamd_lock" file
+# (usually can be found in the clamd init script) and also enter the clamd
+# start command for your particular distro for the "start_clamd" variable
+# (the sample start command shown below should work for most linux distros).
+# NOTE: these 2 variables are dependant on the "clamd_socket" variable
+# shown above - if not enabled, then the following 2 variables will be
+# ignored, whether enabled or not.
+clamd_lock="/run/clamav/clamd.pid"
+start_clamd="clamdscan --reload"
+
+ss_dbs="
+ junk.ndb
+ phish.ndb
+ rogue.hdb
+ sanesecurity.ftm
+ scam.ndb
+ sigwhitelist.ign2
+ spamattach.hdb
+ spamimg.hdb
+ winnow.attachments.hdb
+ winnow_bad_cw.hdb
+ winnow_extended_malware.hdb
+ winnow_malware.hdb
+ winnow_malware_links.ndb
+ malwarehash.hsb
+ doppelstern.hdb
+ bofhland_cracked_URL.ndb
+ bofhland_malware_attach.hdb
+ bofhland_malware_URL.ndb
+ bofhland_phishing_URL.ndb
+ crdfam.clamav.hdb
+ phishtank.ndb
+ porcupine.ndb
+ spear.ndb
+ spearl.ndb
+"
+
+# ========================
+# SecuriteInfo Database(s)
+# ========================
+# Add or remove database file names between quote marks as needed. To
+# disable any SecuriteInfo database downloads, remove the appropriate
+# lines below. To disable all SecuriteInfo database file downloads,
+# comment all of the following lines.
+si_dbs=""
+
+mbl_dbs="" \ No newline at end of file
diff --git a/puppet/modules/clamav/files/clamav-daemon_default b/puppet/modules/clamav/files/clamav-daemon_default
new file mode 100644
index 00000000..b4cd6a4f
--- /dev/null
+++ b/puppet/modules/clamav/files/clamav-daemon_default
@@ -0,0 +1,8 @@
+# This is a file designed only t0 set special environment variables
+# eg TMP or TMPDIR. It is sourced from a shell script, so anything
+# put in here must be in variable=value format, suitable for sourcing
+# from a shell script.
+# Examples:
+# export TMPDIR=/dev/shm
+export TMP=/var/tmp
+export TMPDIR=/var/tmp
diff --git a/puppet/modules/clamav/files/clamav-milter_default b/puppet/modules/clamav/files/clamav-milter_default
new file mode 100644
index 00000000..5e33e822
--- /dev/null
+++ b/puppet/modules/clamav/files/clamav-milter_default
@@ -0,0 +1,14 @@
+#
+# clamav-milter init options
+#
+
+## SOCKET_RWGROUP
+# by default, the socket created by the milter has permissions
+# clamav:clamav:755. SOCKET_RWGROUP changes the group and changes the
+# permissions to 775 to give read-write access to that group.
+#
+# If you are using postfix to speak to the milter, you have to give permission
+# to the postfix group to write
+#
+SOCKET_RWGROUP=postfix
+export TMPDIR=/var/tmp
diff --git a/puppet/modules/clamav/manifests/daemon.pp b/puppet/modules/clamav/manifests/daemon.pp
new file mode 100644
index 00000000..2e13a8fb
--- /dev/null
+++ b/puppet/modules/clamav/manifests/daemon.pp
@@ -0,0 +1,91 @@
+# deploy clamav daemon
+class clamav::daemon {
+
+ $domain_hash = hiera('domain')
+ $domain = $domain_hash['full_suffix']
+
+ package { [ 'clamav-daemon', 'arj' ]:
+ ensure => installed;
+ }
+
+ service {
+ 'clamav-daemon':
+ ensure => running,
+ name => clamav-daemon,
+ pattern => '/usr/sbin/clamd',
+ enable => true,
+ hasrestart => true,
+ subscribe => File['/etc/default/clamav-daemon'],
+ require => Package['clamav-daemon'];
+ }
+
+ file {
+ '/var/run/clamav':
+ ensure => directory,
+ mode => '0750',
+ owner => clamav,
+ group => postfix,
+ require => [Package['postfix'], Package['clamav-daemon']];
+
+ '/var/lib/clamav':
+ mode => '0755',
+ owner => clamav,
+ group => clamav,
+ require => Package['clamav-daemon'];
+
+ '/etc/default/clamav-daemon':
+ source => 'puppet:///modules/clamav/clamav-daemon_default',
+ mode => '0644',
+ owner => root,
+ group => root;
+
+ # this file contains additional domains that we want the clamav
+ # phishing process to look for (our domain)
+ '/var/lib/clamav/local.pdb':
+ content => template('clamav/local.pdb.erb'),
+ mode => '0644',
+ owner => clamav,
+ group => clamav,
+ require => Package['clamav-daemon'];
+ }
+
+ file_line {
+ 'clamav_daemon_tmp':
+ path => '/etc/clamav/clamd.conf',
+ line => 'TemporaryDirectory /var/tmp',
+ require => Package['clamav-daemon'],
+ notify => Service['clamav-daemon'];
+
+ 'enable_phishscanurls':
+ path => '/etc/clamav/clamd.conf',
+ match => 'PhishingScanURLs no',
+ line => 'PhishingScanURLs yes',
+ require => Package['clamav-daemon'],
+ notify => Service['clamav-daemon'];
+
+ 'clamav_LogSyslog_true':
+ path => '/etc/clamav/clamd.conf',
+ match => '^LogSyslog false',
+ line => 'LogSyslog true',
+ require => Package['clamav-daemon'],
+ notify => Service['clamav-daemon'];
+
+ 'clamav_MaxThreads':
+ path => '/etc/clamav/clamd.conf',
+ match => 'MaxThreads 20',
+ line => 'MaxThreads 100',
+ require => Package['clamav-daemon'],
+ notify => Service['clamav-daemon'];
+ }
+
+ # remove LogFile line
+ file_line {
+ 'clamav_LogFile':
+ path => '/etc/clamav/clamd.conf',
+ match => '^LogFile .*',
+ line => '',
+ require => Package['clamav-daemon'],
+ notify => Service['clamav-daemon'];
+ }
+
+}
diff --git a/puppet/modules/clamav/manifests/freshclam.pp b/puppet/modules/clamav/manifests/freshclam.pp
new file mode 100644
index 00000000..80c822a4
--- /dev/null
+++ b/puppet/modules/clamav/manifests/freshclam.pp
@@ -0,0 +1,23 @@
+class clamav::freshclam {
+
+ package { 'clamav-freshclam': ensure => installed }
+
+ service {
+ 'freshclam':
+ ensure => running,
+ enable => true,
+ name => clamav-freshclam,
+ pattern => '/usr/bin/freshclam',
+ hasrestart => true,
+ require => Package['clamav-freshclam'];
+ }
+
+ file_line {
+ 'freshclam_notify':
+ path => '/etc/clamav/freshclam.conf',
+ line => 'NotifyClamd /etc/clamav/clamd.conf',
+ require => Package['clamav-freshclam'],
+ notify => Service['freshclam'];
+ }
+
+}
diff --git a/puppet/modules/clamav/manifests/init.pp b/puppet/modules/clamav/manifests/init.pp
new file mode 100644
index 00000000..de8fb4dc
--- /dev/null
+++ b/puppet/modules/clamav/manifests/init.pp
@@ -0,0 +1,8 @@
+class clamav {
+
+ include clamav::daemon
+ include clamav::milter
+ include clamav::unofficial_sigs
+ include clamav::freshclam
+
+}
diff --git a/puppet/modules/clamav/manifests/milter.pp b/puppet/modules/clamav/manifests/milter.pp
new file mode 100644
index 00000000..e8a85e3f
--- /dev/null
+++ b/puppet/modules/clamav/manifests/milter.pp
@@ -0,0 +1,50 @@
+class clamav::milter {
+
+ $clamav = hiera('clamav')
+ $whitelisted_addresses = $clamav['whitelisted_addresses']
+ $domain_hash = hiera('domain')
+ $domain = $domain_hash['full_suffix']
+
+ package { 'clamav-milter': ensure => installed }
+
+ service {
+ 'clamav-milter':
+ ensure => running,
+ enable => true,
+ name => clamav-milter,
+ pattern => '/usr/sbin/clamav-milter',
+ hasrestart => true,
+ require => Package['clamav-milter'],
+ subscribe => File['/etc/default/clamav-milter'];
+ }
+
+ file {
+ '/run/clamav/milter.ctl':
+ mode => '0666',
+ owner => clamav,
+ group => postfix,
+ require => Class['clamav::daemon'];
+
+ '/etc/clamav/clamav-milter.conf':
+ content => template('clamav/clamav-milter.conf.erb'),
+ mode => '0644',
+ owner => root,
+ group => root,
+ require => Package['clamav-milter'],
+ subscribe => Service['clamav-milter'];
+
+ '/etc/default/clamav-milter':
+ source => 'puppet:///modules/clamav/clamav-milter_default',
+ mode => '0644',
+ owner => root,
+ group => root;
+
+ '/etc/clamav/whitelisted_addresses':
+ content => template('clamav/whitelisted_addresses.erb'),
+ mode => '0644',
+ owner => root,
+ group => root,
+ require => Package['clamav-milter'];
+ }
+
+}
diff --git a/puppet/modules/clamav/manifests/unofficial_sigs.pp b/puppet/modules/clamav/manifests/unofficial_sigs.pp
new file mode 100644
index 00000000..2d849585
--- /dev/null
+++ b/puppet/modules/clamav/manifests/unofficial_sigs.pp
@@ -0,0 +1,23 @@
+class clamav::unofficial_sigs {
+
+ package { 'clamav-unofficial-sigs':
+ ensure => installed
+ }
+
+ ensure_packages(['wget', 'gnupg', 'socat', 'rsync', 'curl'])
+
+ file {
+ '/var/log/clamav-unofficial-sigs.log':
+ ensure => file,
+ owner => clamav,
+ group => clamav,
+ require => Package['clamav-unofficial-sigs'];
+
+ '/etc/clamav-unofficial-sigs.conf.d/01-leap.conf':
+ source => 'puppet:///modules/clamav/01-leap.conf',
+ mode => '0755',
+ owner => root,
+ group => root,
+ require => Package['clamav-unofficial-sigs'];
+ }
+}
diff --git a/puppet/modules/clamav/templates/clamav-milter.conf.erb b/puppet/modules/clamav/templates/clamav-milter.conf.erb
new file mode 100644
index 00000000..9bf7099e
--- /dev/null
+++ b/puppet/modules/clamav/templates/clamav-milter.conf.erb
@@ -0,0 +1,28 @@
+# THIS FILE MANAGED BY PUPPET
+MilterSocket /var/run/clamav/milter.ctl
+FixStaleSocket true
+User clamav
+MilterSocketGroup clamav
+MilterSocketMode 666
+AllowSupplementaryGroups true
+ReadTimeout 120
+Foreground false
+PidFile /var/run/clamav/clamav-milter.pid
+ClamdSocket unix:/var/run/clamav/clamd.ctl
+OnClean Accept
+OnInfected Reject
+OnFail Defer
+AddHeader Replace
+LogSyslog true
+LogFacility LOG_LOCAL6
+LogVerbose yes
+LogInfected Basic
+LogTime true
+LogFileUnlock false
+LogClean Off
+LogRotate true
+SupportMultipleRecipients false
+MaxFileSize 10M
+TemporaryDirectory /var/tmp
+RejectMsg "Message refused due to content violation: %v - contact https://<%= @domain %>/tickets/new if this is in error"
+Whitelist /etc/clamav/whitelisted_addresses
diff --git a/puppet/modules/clamav/templates/local.pdb.erb b/puppet/modules/clamav/templates/local.pdb.erb
new file mode 100644
index 00000000..9ea0584a
--- /dev/null
+++ b/puppet/modules/clamav/templates/local.pdb.erb
@@ -0,0 +1 @@
+H:<%= @domain %>
diff --git a/puppet/modules/clamav/templates/whitelisted_addresses.erb b/puppet/modules/clamav/templates/whitelisted_addresses.erb
new file mode 100644
index 00000000..9e068ec5
--- /dev/null
+++ b/puppet/modules/clamav/templates/whitelisted_addresses.erb
@@ -0,0 +1,5 @@
+<%- if @whitelisted_addresses then -%>
+<% @whitelisted_addresses.each do |name| -%>
+From::<%= name %>
+<% end -%>
+<% end -%>
diff --git a/puppet/modules/haveged/manifests/init.pp b/puppet/modules/haveged/manifests/init.pp
new file mode 100644
index 00000000..8f901937
--- /dev/null
+++ b/puppet/modules/haveged/manifests/init.pp
@@ -0,0 +1,16 @@
+class haveged {
+
+ package { 'haveged':
+ ensure => present,
+ }
+
+ service { 'haveged':
+ ensure => running,
+ hasrestart => true,
+ hasstatus => true,
+ enable => true,
+ require => Package['haveged'];
+ }
+
+ include site_check_mk::agent::haveged
+}
diff --git a/puppet/modules/journald/manifests/init.pp b/puppet/modules/journald/manifests/init.pp
new file mode 100644
index 00000000..879baba4
--- /dev/null
+++ b/puppet/modules/journald/manifests/init.pp
@@ -0,0 +1,7 @@
+class journald {
+
+ service { 'systemd-journald':
+ ensure => running,
+ enable => true,
+ }
+}
diff --git a/puppet/modules/leap/manifests/cli/install.pp b/puppet/modules/leap/manifests/cli/install.pp
new file mode 100644
index 00000000..25e87033
--- /dev/null
+++ b/puppet/modules/leap/manifests/cli/install.pp
@@ -0,0 +1,46 @@
+# installs leap_cli on node
+class leap::cli::install ( $source = false ) {
+ if $source {
+ # needed for building leap_cli from source
+ include ::git
+ include ::rubygems
+
+ class { '::ruby':
+ install_dev => true
+ }
+
+ class { 'bundler::install': install_method => 'package' }
+
+ Class[Ruby] ->
+ Class[rubygems] ->
+ Class[bundler::install]
+
+
+ vcsrepo { '/srv/leap/cli':
+ ensure => present,
+ force => true,
+ revision => 'develop',
+ provider => 'git',
+ source => 'https://leap.se/git/leap_cli.git',
+ owner => 'root',
+ group => 'root',
+ notify => Exec['install_leap_cli'],
+ require => Package['git']
+ }
+
+ exec { 'install_leap_cli':
+ command => '/usr/bin/rake build && /usr/bin/rake install',
+ cwd => '/srv/leap/cli',
+ user => 'root',
+ environment => 'USER=root',
+ refreshonly => true,
+ require => [ Class[bundler::install] ]
+ }
+ }
+ else {
+ package { 'leap_cli':
+ ensure => installed,
+ provider => gem
+ }
+ }
+}
diff --git a/puppet/modules/leap/manifests/init.pp b/puppet/modules/leap/manifests/init.pp
new file mode 100644
index 00000000..bbae3781
--- /dev/null
+++ b/puppet/modules/leap/manifests/init.pp
@@ -0,0 +1,3 @@
+class leap {
+
+} \ No newline at end of file
diff --git a/puppet/modules/leap/manifests/logfile.pp b/puppet/modules/leap/manifests/logfile.pp
new file mode 100644
index 00000000..adb3ca8a
--- /dev/null
+++ b/puppet/modules/leap/manifests/logfile.pp
@@ -0,0 +1,34 @@
+#
+# make syslog log to a particular file for a particular process.
+#
+# arguments:
+#
+# * name: what config files are named as (eg. /etc/rsyslog.d/50-$name.conf)
+# * log: the full path of the log file (defaults to /var/log/leap/$name.log
+# * process: the syslog tag to filter on (defaults to name)
+#
+define leap::logfile($process = $name, $log = undef) {
+ if $log {
+ $logfile = $log
+ } else {
+ $logfile = "/var/log/leap/${name}.log"
+ }
+
+ rsyslog::snippet { "50-${name}":
+ content => template('leap/rsyslog.erb')
+ }
+
+ augeas {
+ "logrotate_${name}":
+ context => "/files/etc/logrotate.d/${name}/rule",
+ changes => [
+ "set file ${logfile}",
+ 'set rotate 5',
+ 'set schedule daily',
+ 'set compress compress',
+ 'set missingok missingok',
+ 'set ifempty notifempty',
+ 'set copytruncate copytruncate'
+ ]
+ }
+}
diff --git a/puppet/modules/leap/templates/rsyslog.erb b/puppet/modules/leap/templates/rsyslog.erb
new file mode 100644
index 00000000..7bb5316f
--- /dev/null
+++ b/puppet/modules/leap/templates/rsyslog.erb
@@ -0,0 +1,5 @@
+if $programname startswith '<%= @process %>' then {
+ action(type="omfile" file="<%= @logfile %>" template="RSYSLOG_TraditionalFileFormat")
+ stop
+}
+
diff --git a/puppet/modules/leap_mx/manifests/init.pp b/puppet/modules/leap_mx/manifests/init.pp
new file mode 100644
index 00000000..d758e3ab
--- /dev/null
+++ b/puppet/modules/leap_mx/manifests/init.pp
@@ -0,0 +1,119 @@
+# deploy leap mx service
+class leap_mx {
+
+ $leap_mx = hiera('couchdb_leap_mx_user')
+ $couchdb_user = $leap_mx['username']
+ $couchdb_password = $leap_mx['password']
+
+ $couchdb_host = 'localhost'
+ $couchdb_port = '4096'
+
+ $sources = hiera('sources')
+
+ include soledad::common
+
+ #
+ # USER AND GROUP
+ #
+ # Make the user for leap-mx. This user is where all legitimate, non-system
+ # mail is delivered so leap-mx can process it. Previously, we let the system
+ # pick a uid/gid, but we need to know what they are set to in order to set the
+ # virtual_uid_maps and virtual_gid_maps. Its a bit overkill write a fact just
+ # for this, so instead we pick arbitrary numbers that seem unlikely to be used
+ # and then use them in the postfix configuration
+
+ group { 'leap-mx':
+ ensure => present,
+ gid => 42424,
+ allowdupe => false;
+ }
+
+ user { 'leap-mx':
+ ensure => present,
+ comment => 'Leap Mail',
+ allowdupe => false,
+ uid => 42424,
+ gid => 'leap-mx',
+ home => '/var/mail/leap-mx',
+ shell => '/bin/false',
+ managehome => true,
+ require => Group['leap-mx'];
+ }
+
+ file {
+ '/var/mail/leap-mx':
+ ensure => directory,
+ owner => 'leap-mx',
+ group => 'leap-mx',
+ mode => '0755',
+ require => User['leap-mx'];
+
+ '/var/mail/leap-mx/Maildir':
+ ensure => directory,
+ owner => 'leap-mx',
+ group => 'leap-mx',
+ mode => '0700';
+
+ '/var/mail/leap-mx/Maildir/new':
+ ensure => directory,
+ owner => 'leap-mx',
+ group => 'leap-mx',
+ mode => '0700';
+
+ '/var/mail/leap-mx/Maildir/cur':
+ ensure => directory,
+ owner => 'leap-mx',
+ group => 'leap-mx',
+ mode => '0700';
+
+ '/var/mail/leap-mx/Maildir/tmp':
+ ensure => directory,
+ owner => 'leap-mx',
+ group => 'leap-mx',
+ mode => '0700';
+ }
+
+ #
+ # LEAP-MX CONFIG
+ #
+
+ file { '/etc/leap/mx.conf':
+ content => template('leap_mx/mx.conf.erb'),
+ owner => 'leap-mx',
+ group => 'leap-mx',
+ mode => '0600',
+ notify => Service['leap-mx'];
+ }
+
+ leap::logfile { 'leap-mx':
+ log => '/var/log/leap/mx.log',
+ process => 'leap-mx'
+ }
+
+ #
+ # LEAP-MX CODE AND DEPENDENCIES
+ #
+
+ package {
+ $sources['leap-mx']['package']:
+ ensure => $sources['leap-mx']['revision'],
+ require => [
+ Class['site_apt::leap_repo'],
+ User['leap-mx'] ];
+
+ 'leap-keymanager':
+ ensure => latest;
+ }
+
+ #
+ # LEAP-MX DAEMON
+ #
+
+ service { 'leap-mx':
+ ensure => running,
+ enable => true,
+ hasstatus => true,
+ hasrestart => true,
+ require => [ Package['leap-mx'] ];
+ }
+}
diff --git a/puppet/modules/leap_mx/templates/mx.conf.erb b/puppet/modules/leap_mx/templates/mx.conf.erb
new file mode 100644
index 00000000..b54b3a86
--- /dev/null
+++ b/puppet/modules/leap_mx/templates/mx.conf.erb
@@ -0,0 +1,18 @@
+[mail1]
+path=/var/mail/leap-mx/Maildir
+recursive=True
+
+[couchdb]
+user=<%= @couchdb_user %>
+password=<%= @couchdb_password %>
+server=<%= @couchdb_host %>
+port=<%= @couchdb_port %>
+
+[alias map]
+port=4242
+
+[check recipient]
+port=2244
+
+[fingerprint map]
+port=2424
diff --git a/puppet/modules/obfsproxy/files/obfsproxy_init b/puppet/modules/obfsproxy/files/obfsproxy_init
new file mode 100755
index 00000000..01c8013a
--- /dev/null
+++ b/puppet/modules/obfsproxy/files/obfsproxy_init
@@ -0,0 +1,93 @@
+#!/bin/sh
+
+### BEGIN INIT INFO
+# Provides: obfsproxy daemon
+# Required-Start: $remote_fs $syslog
+# Required-Stop: $remote_fs $syslog
+# Default-Start: 2 3 4 5
+# Default-Stop: 0 1 6
+# Short-Description: obfsproxy daemon
+# Description: obfsproxy daemon
+### END INIT INFO
+
+. /lib/lsb/init-functions
+
+DAEMON=/usr/bin/obfsproxy
+NAME=obfsproxy
+DESC="obfsproxy daemon"
+USER=obfsproxy
+DATDIR=/etc/obfsproxy
+PIDFILE=/var/run/obfsproxy.pid
+CONF=$DATDIR/obfsproxy.conf
+LOGFILE=/var/log/obfsproxy.log
+
+# If the daemon is not there, then exit.
+test -x $DAEMON || exit 0
+
+if [ -f $CONF ] ; then
+ . $CONF
+else
+ echo "Obfsproxy configuration file is missing, aborting..."
+ exit 2
+fi
+
+DAEMONARGS=" --log-min-severity=$LOG --log-file=$LOGFILE --data-dir=$DATDIR \
+ $TRANSPORT $PARAM --dest=$DEST_IP:$DEST_PORT server $BINDADDR:$PORT"
+
+start_obfsproxy() {
+ start-stop-daemon --start --quiet --oknodo -m --pidfile $PIDFILE \
+ -b -c $USER --startas $DAEMON --$DAEMONARGS
+}
+
+stop_obfsproxy() {
+ start-stop-daemon --stop --quiet --oknodo --pidfile $PIDFILE
+}
+
+status_obfsproxy() {
+ status_of_proc -p $PIDFILE $DAEMON $NAME
+}
+
+case $1 in
+ start)
+ if [ -e $PIDFILE ]; then
+ status_obfsproxy
+ if [ $? = "0" ]; then
+ exit
+ fi
+ fi
+ log_begin_msg "Starting $DESC"
+ start_obfsproxy
+ log_end_msg $?
+ ;;
+ stop)
+ if [ -e $PIDFILE ]; then
+ status_obfsproxy
+ if [ $? = "0" ]; then
+ log_begin_msg "Stopping $DESC"
+ stop_obfsproxy
+ rm -f $PIDFILE
+ log_end_msg $?
+ fi
+ else
+ status_obfsproxy
+ fi
+ ;;
+ restart)
+ $0 stop && sleep 2 && $0 start
+ ;;
+ status)
+ status_obfsproxy
+ ;;
+ reload)
+ if [ -e $PIDFILE ]; then
+ start-stop-daemon --stop --signal USR1 --quiet --pidfile $PIDFILE --name $NAME
+ log_success_msg "$DESC reloaded successfully"
+ else
+ log_failure_msg "$PIDFILE does not exist"
+ fi
+ ;;
+ *)
+ echo "Usage: $0 {start|stop|restart|reload|status}"
+ exit 2
+ ;;
+esac
diff --git a/puppet/modules/obfsproxy/files/obfsproxy_logrotate b/puppet/modules/obfsproxy/files/obfsproxy_logrotate
new file mode 100644
index 00000000..e5679d0c
--- /dev/null
+++ b/puppet/modules/obfsproxy/files/obfsproxy_logrotate
@@ -0,0 +1,14 @@
+/var/log/obfsproxy.log {
+ daily
+ missingok
+ rotate 3
+ compress
+ delaycompress
+ notifempty
+ create 600 obfsproxy obfsproxy
+ postrotate
+ if [ -f /var/run/obfsproxy.pid ]; then
+ /etc/init.d/obfsproxy restart > /dev/null
+ fi
+ endscript
+}
diff --git a/puppet/modules/obfsproxy/manifests/init.pp b/puppet/modules/obfsproxy/manifests/init.pp
new file mode 100644
index 00000000..6a3d2c72
--- /dev/null
+++ b/puppet/modules/obfsproxy/manifests/init.pp
@@ -0,0 +1,86 @@
+# deploy obfsproxy service
+class obfsproxy (
+ $transport,
+ $bind_address,
+ $port,
+ $param,
+ $dest_ip,
+ $dest_port,
+ $log_level = 'info'
+){
+
+ $user = 'obfsproxy'
+ $conf = '/etc/obfsproxy/obfsproxy.conf'
+
+ user { $user:
+ ensure => present,
+ system => true,
+ gid => $user,
+ }
+
+ group { $user:
+ ensure => present,
+ system => true,
+ }
+
+ file { '/etc/init.d/obfsproxy':
+ ensure => present,
+ path => '/etc/init.d/obfsproxy',
+ source => 'puppet:///modules/obfsproxy/obfsproxy_init',
+ owner => 'root',
+ group => 'root',
+ mode => '0750',
+ require => File[$conf],
+ }
+
+ file { $conf :
+ ensure => present,
+ path => $conf,
+ owner => 'root',
+ group => 'root',
+ mode => '0600',
+ content => template('obfsproxy/etc_conf.erb'),
+ }
+
+ file { '/etc/obfsproxy':
+ ensure => directory,
+ owner => $user,
+ group => $user,
+ mode => '0700',
+ require => User[$user],
+ }
+
+ file { '/var/log/obfsproxy.log':
+ ensure => present,
+ owner => $user,
+ group => $user,
+ mode => '0640',
+ require => User[$user],
+ }
+
+ file { '/etc/logrotate.d/obfsproxy':
+ ensure => present,
+ source => 'puppet:///modules/obfsproxy/obfsproxy_logrotate',
+ owner => 'root',
+ group => 'root',
+ mode => '0644',
+ require => File['/var/log/obfsproxy.log'],
+ }
+
+ package { 'obfsproxy':
+ ensure => present
+ }
+
+ service { 'obfsproxy':
+ ensure => running,
+ subscribe => File[$conf],
+ require => [
+ Package['obfsproxy'],
+ File['/etc/init.d/obfsproxy'],
+ User[$user],
+ Group[$user]]
+ }
+
+
+}
+
diff --git a/puppet/modules/obfsproxy/templates/etc_conf.erb b/puppet/modules/obfsproxy/templates/etc_conf.erb
new file mode 100644
index 00000000..8959ef78
--- /dev/null
+++ b/puppet/modules/obfsproxy/templates/etc_conf.erb
@@ -0,0 +1,11 @@
+TRANSPORT=<%= @transport %>
+PORT=<%= @port %>
+DEST_IP=<%= @dest_ip %>
+DEST_PORT=<%= @dest_port %>
+<% if @transport == "scramblesuit" -%>
+PARAM=--password=<%= @param %>
+<% else -%>
+PARAM=<%= @param %>
+<% end -%>
+LOG=<%= @log_level %>
+BINDADDR=<%= @bind_address %>
diff --git a/puppet/modules/opendkim/manifests/init.pp b/puppet/modules/opendkim/manifests/init.pp
new file mode 100644
index 00000000..4d4c5312
--- /dev/null
+++ b/puppet/modules/opendkim/manifests/init.pp
@@ -0,0 +1,67 @@
+#
+# I am not sure about what issues might arise with DKIM key sizes
+# larger than 2048. It might or might not be supported. See:
+# http://dkim.org/specs/rfc4871-dkimbase.html#rfc.section.3.3.3
+#
+class opendkim {
+
+ $domain_hash = hiera('domain')
+ $domain = $domain_hash['full_suffix']
+ $mx = hiera('mx')
+ $dkim = $mx['dkim']
+ $selector = $dkim['selector']
+ $dkim_cert = $dkim['public_key']
+ $dkim_key = $dkim['private_key']
+
+ ensure_packages(['opendkim', 'libvbr2'])
+
+ # postfix user needs to be in the opendkim group
+ # in order to access the opendkim socket located at:
+ # local:/var/run/opendkim/opendkim.sock
+ user { 'postfix':
+ groups => 'opendkim',
+ require => Package['opendkim'];
+ }
+
+ service { 'opendkim':
+ ensure => running,
+ enable => true,
+ hasstatus => true,
+ hasrestart => true,
+ subscribe => File[$dkim_key];
+ }
+
+ file {
+ '/etc/opendkim.conf':
+ ensure => file,
+ content => template('opendkim/opendkim.conf'),
+ mode => '0644',
+ owner => root,
+ group => root,
+ notify => Service['opendkim'],
+ require => Package['opendkim'];
+
+ '/etc/default/opendkim.conf':
+ ensure => file,
+ content => 'SOCKET="inet:8891@localhost" # listen on loopback on port 8891',
+ mode => '0644',
+ owner => root,
+ group => root,
+ notify => Service['opendkim'],
+ require => Package['opendkim'];
+
+ $dkim_key:
+ ensure => file,
+ mode => '0600',
+ owner => 'opendkim',
+ group => 'opendkim',
+ require => Package['opendkim'];
+
+ $dkim_cert:
+ ensure => file,
+ mode => '0600',
+ owner => 'opendkim',
+ group => 'opendkim',
+ require => Package['opendkim'];
+ }
+}
diff --git a/puppet/modules/opendkim/templates/opendkim.conf b/puppet/modules/opendkim/templates/opendkim.conf
new file mode 100644
index 00000000..5a948229
--- /dev/null
+++ b/puppet/modules/opendkim/templates/opendkim.conf
@@ -0,0 +1,45 @@
+# This is a basic configuration that can easily be adapted to suit a standard
+# installation. For more advanced options, see opendkim.conf(5) and/or
+# /usr/share/doc/opendkim/examples/opendkim.conf.sample.
+
+# Log to syslog
+Syslog yes
+SyslogSuccess yes
+LogWhy no
+# Required to use local socket with MTAs that access the socket as a non-
+# privileged user (e.g. Postfix)
+UMask 002
+
+Domain <%= @domain %>
+SubDomains yes
+
+# set internal hosts to all the known hosts, like mydomains?
+
+# can we generate a larger key and get it in dns?
+KeyFile <%= @dkim_key %>
+
+Selector <%= @selector %>
+
+# Commonly-used options; the commented-out versions show the defaults.
+Canonicalization relaxed
+#Mode sv
+#ADSPDiscard no
+
+SignatureAlgorithm rsa-sha256
+
+# Always oversign From (sign using actual From and a null From to prevent
+# malicious signatures header fields (From and/or others) between the signer
+# and the verifier. From is oversigned by default in the Debian pacakge
+# because it is often the identity key used by reputation systems and thus
+# somewhat security sensitive.
+OversignHeaders From
+
+# List domains to use for RFC 6541 DKIM Authorized Third-Party Signatures
+# (ATPS) (experimental)
+
+#ATPSDomains example.com
+
+RemoveOldSignatures yes
+
+Mode sv
+BaseDirectory /var/tmp
diff --git a/puppet/modules/openvpn/.fixtures.yml b/puppet/modules/openvpn/.fixtures.yml
new file mode 100644
index 00000000..1125ecca
--- /dev/null
+++ b/puppet/modules/openvpn/.fixtures.yml
@@ -0,0 +1,6 @@
+fixtures:
+ repositories:
+ concat: git://github.com/ripienaar/puppet-concat.git
+ symlinks:
+ openvpn: "#{source_dir}"
+
diff --git a/puppet/modules/openvpn/.gitignore b/puppet/modules/openvpn/.gitignore
new file mode 100644
index 00000000..6fd248b3
--- /dev/null
+++ b/puppet/modules/openvpn/.gitignore
@@ -0,0 +1,3 @@
+pkg
+spec/fixtures
+.vagrant
diff --git a/puppet/modules/openvpn/.rvmrc b/puppet/modules/openvpn/.rvmrc
new file mode 100644
index 00000000..6fbfb7f1
--- /dev/null
+++ b/puppet/modules/openvpn/.rvmrc
@@ -0,0 +1,38 @@
+#!/usr/bin/env bash
+
+# This is an RVM Project .rvmrc file, used to automatically load the ruby
+# development environment upon cd'ing into the directory
+
+# First we specify our desired <ruby>[@<gemset>], the @gemset name is optional,
+# Only full ruby name is supported here, for short names use:
+# echo "rvm use 1.9.3" > .rvmrc
+environment_id="ruby-1.9.3-p194@puppet"
+
+# Uncomment the following lines if you want to verify rvm version per project
+# rvmrc_rvm_version="1.15.8 (stable)" # 1.10.1 seams as a safe start
+# eval "$(echo ${rvm_version}.${rvmrc_rvm_version} | awk -F. '{print "[[ "$1*65536+$2*256+$3" -ge "$4*65536+$5*256+$6" ]]"}' )" || {
+# echo "This .rvmrc file requires at least RVM ${rvmrc_rvm_version}, aborting loading."
+# return 1
+# }
+
+# First we attempt to load the desired environment directly from the environment
+# file. This is very fast and efficient compared to running through the entire
+# CLI and selector. If you want feedback on which environment was used then
+# insert the word 'use' after --create as this triggers verbose mode.
+if [[ -d "${rvm_path:-$HOME/.rvm}/environments"
+ && -s "${rvm_path:-$HOME/.rvm}/environments/$environment_id" ]]
+then
+ \. "${rvm_path:-$HOME/.rvm}/environments/$environment_id"
+ [[ -s "${rvm_path:-$HOME/.rvm}/hooks/after_use" ]] &&
+ \. "${rvm_path:-$HOME/.rvm}/hooks/after_use" || true
+ if [[ $- == *i* ]] # check for interactive shells
+ then echo "Using: $(tput setaf 2)$GEM_HOME$(tput sgr0)" # show the user the ruby and gemset they are using in green
+ else echo "Using: $GEM_HOME" # don't use colors in non-interactive shells
+ fi
+else
+ # If the environment file has not yet been created, use the RVM CLI to select.
+ rvm --create use "$environment_id" || {
+ echo "Failed to create RVM environment '${environment_id}'."
+ return 1
+ }
+fi
diff --git a/puppet/modules/openvpn/.travis.yml b/puppet/modules/openvpn/.travis.yml
new file mode 100644
index 00000000..da5c389d
--- /dev/null
+++ b/puppet/modules/openvpn/.travis.yml
@@ -0,0 +1,29 @@
+language: ruby
+bundler_args: --without development
+script: "bundle exec rake spec SPEC_OPTS='--format documentation'"
+rvm:
+ - 1.8.7
+ - 1.9.3
+ - 2.0.0
+script:
+ - "rake lint"
+ - "rake spec SPEC_OPTS='--format documentation'"
+env:
+ - PUPPET_VERSION="~> 2.7.0"
+ - PUPPET_VERSION="~> 3.0.0"
+ - PUPPET_VERSION="~> 3.1.0"
+ - PUPPET_VERSION="~> 3.2.0"
+matrix:
+ exclude:
+ - rvm: 1.9.3
+ env: PUPPET_VERSION="~> 2.7.0"
+ - rvm: 2.0.0
+ env: PUPPET_VERSION="~> 2.7.0"
+ - rvm: 2.0.0
+ env: PUPPET_VERSION="~> 3.0.0"
+ - rvm: 2.0.0
+ env: PUPPET_VERSION="~> 3.1.0"
+notifications:
+ email: false
+ on_success: always
+ on_failure: always
diff --git a/puppet/modules/openvpn/Gemfile b/puppet/modules/openvpn/Gemfile
new file mode 100644
index 00000000..68e10e7d
--- /dev/null
+++ b/puppet/modules/openvpn/Gemfile
@@ -0,0 +1,7 @@
+source :rubygems
+
+puppetversion = ENV['PUPPET_VERSION']
+gem 'puppet', puppetversion, :require => false
+gem 'puppet-lint'
+gem 'rspec-puppet'
+gem 'puppetlabs_spec_helper'
diff --git a/puppet/modules/openvpn/Gemfile.lock b/puppet/modules/openvpn/Gemfile.lock
new file mode 100644
index 00000000..9fce3f98
--- /dev/null
+++ b/puppet/modules/openvpn/Gemfile.lock
@@ -0,0 +1,36 @@
+GEM
+ remote: http://rubygems.org/
+ specs:
+ diff-lcs (1.1.3)
+ facter (1.6.17)
+ hiera (1.0.0)
+ metaclass (0.0.1)
+ mocha (0.13.1)
+ metaclass (~> 0.0.1)
+ puppet (3.0.2)
+ facter (~> 1.6.11)
+ hiera (~> 1.0.0)
+ puppetlabs_spec_helper (0.4.0)
+ mocha (>= 0.10.5)
+ rake
+ rspec (>= 2.9.0)
+ rspec-puppet (>= 0.1.1)
+ rake (10.0.3)
+ rspec (2.12.0)
+ rspec-core (~> 2.12.0)
+ rspec-expectations (~> 2.12.0)
+ rspec-mocks (~> 2.12.0)
+ rspec-core (2.12.2)
+ rspec-expectations (2.12.1)
+ diff-lcs (~> 1.1.3)
+ rspec-mocks (2.12.1)
+ rspec-puppet (0.1.5)
+ rspec
+
+PLATFORMS
+ ruby
+
+DEPENDENCIES
+ puppet
+ puppetlabs_spec_helper
+ rspec-puppet
diff --git a/puppet/modules/openvpn/LICENSE b/puppet/modules/openvpn/LICENSE
new file mode 100644
index 00000000..f433b1a5
--- /dev/null
+++ b/puppet/modules/openvpn/LICENSE
@@ -0,0 +1,177 @@
+
+ Apache License
+ Version 2.0, January 2004
+ http://www.apache.org/licenses/
+
+ TERMS AND CONDITIONS FOR USE, REPRODUCTION, AND DISTRIBUTION
+
+ 1. Definitions.
+
+ "License" shall mean the terms and conditions for use, reproduction,
+ and distribution as defined by Sections 1 through 9 of this document.
+
+ "Licensor" shall mean the copyright owner or entity authorized by
+ the copyright owner that is granting the License.
+
+ "Legal Entity" shall mean the union of the acting entity and all
+ other entities that control, are controlled by, or are under common
+ control with that entity. For the purposes of this definition,
+ "control" means (i) the power, direct or indirect, to cause the
+ direction or management of such entity, whether by contract or
+ otherwise, or (ii) ownership of fifty percent (50%) or more of the
+ outstanding shares, or (iii) beneficial ownership of such entity.
+
+ "You" (or "Your") shall mean an individual or Legal Entity
+ exercising permissions granted by this License.
+
+ "Source" form shall mean the preferred form for making modifications,
+ including but not limited to software source code, documentation
+ source, and configuration files.
+
+ "Object" form shall mean any form resulting from mechanical
+ transformation or translation of a Source form, including but
+ not limited to compiled object code, generated documentation,
+ and conversions to other media types.
+
+ "Work" shall mean the work of authorship, whether in Source or
+ Object form, made available under the License, as indicated by a
+ copyright notice that is included in or attached to the work
+ (an example is provided in the Appendix below).
+
+ "Derivative Works" shall mean any work, whether in Source or Object
+ form, that is based on (or derived from) the Work and for which the
+ editorial revisions, annotations, elaborations, or other modifications
+ represent, as a whole, an original work of authorship. For the purposes
+ of this License, Derivative Works shall not include works that remain
+ separable from, or merely link (or bind by name) to the interfaces of,
+ the Work and Derivative Works thereof.
+
+ "Contribution" shall mean any work of authorship, including
+ the original version of the Work and any modifications or additions
+ to that Work or Derivative Works thereof, that is intentionally
+ submitted to Licensor for inclusion in the Work by the copyright owner
+ or by an individual or Legal Entity authorized to submit on behalf of
+ the copyright owner. For the purposes of this definition, "submitted"
+ means any form of electronic, verbal, or written communication sent
+ to the Licensor or its representatives, including but not limited to
+ communication on electronic mailing lists, source code control systems,
+ and issue tracking systems that are managed by, or on behalf of, the
+ Licensor for the purpose of discussing and improving the Work, but
+ excluding communication that is conspicuously marked or otherwise
+ designated in writing by the copyright owner as "Not a Contribution."
+
+ "Contributor" shall mean Licensor and any individual or Legal Entity
+ on behalf of whom a Contribution has been received by Licensor and
+ subsequently incorporated within the Work.
+
+ 2. Grant of Copyright License. Subject to the terms and conditions of
+ this License, each Contributor hereby grants to You a perpetual,
+ worldwide, non-exclusive, no-charge, royalty-free, irrevocable
+ copyright license to reproduce, prepare Derivative Works of,
+ publicly display, publicly perform, sublicense, and distribute the
+ Work and such Derivative Works in Source or Object form.
+
+ 3. Grant of Patent License. Subject to the terms and conditions of
+ this License, each Contributor hereby grants to You a perpetual,
+ worldwide, non-exclusive, no-charge, royalty-free, irrevocable
+ (except as stated in this section) patent license to make, have made,
+ use, offer to sell, sell, import, and otherwise transfer the Work,
+ where such license applies only to those patent claims licensable
+ by such Contributor that are necessarily infringed by their
+ Contribution(s) alone or by combination of their Contribution(s)
+ with the Work to which such Contribution(s) was submitted. If You
+ institute patent litigation against any entity (including a
+ cross-claim or counterclaim in a lawsuit) alleging that the Work
+ or a Contribution incorporated within the Work constitutes direct
+ or contributory patent infringement, then any patent licenses
+ granted to You under this License for that Work shall terminate
+ as of the date such litigation is filed.
+
+ 4. Redistribution. You may reproduce and distribute copies of the
+ Work or Derivative Works thereof in any medium, with or without
+ modifications, and in Source or Object form, provided that You
+ meet the following conditions:
+
+ (a) You must give any other recipients of the Work or
+ Derivative Works a copy of this License; and
+
+ (b) You must cause any modified files to carry prominent notices
+ stating that You changed the files; and
+
+ (c) You must retain, in the Source form of any Derivative Works
+ that You distribute, all copyright, patent, trademark, and
+ attribution notices from the Source form of the Work,
+ excluding those notices that do not pertain to any part of
+ the Derivative Works; and
+
+ (d) If the Work includes a "NOTICE" text file as part of its
+ distribution, then any Derivative Works that You distribute must
+ include a readable copy of the attribution notices contained
+ within such NOTICE file, excluding those notices that do not
+ pertain to any part of the Derivative Works, in at least one
+ of the following places: within a NOTICE text file distributed
+ as part of the Derivative Works; within the Source form or
+ documentation, if provided along with the Derivative Works; or,
+ within a display generated by the Derivative Works, if and
+ wherever such third-party notices normally appear. The contents
+ of the NOTICE file are for informational purposes only and
+ do not modify the License. You may add Your own attribution
+ notices within Derivative Works that You distribute, alongside
+ or as an addendum to the NOTICE text from the Work, provided
+ that such additional attribution notices cannot be construed
+ as modifying the License.
+
+ You may add Your own copyright statement to Your modifications and
+ may provide additional or different license terms and conditions
+ for use, reproduction, or distribution of Your modifications, or
+ for any such Derivative Works as a whole, provided Your use,
+ reproduction, and distribution of the Work otherwise complies with
+ the conditions stated in this License.
+
+ 5. Submission of Contributions. Unless You explicitly state otherwise,
+ any Contribution intentionally submitted for inclusion in the Work
+ by You to the Licensor shall be under the terms and conditions of
+ this License, without any additional terms or conditions.
+ Notwithstanding the above, nothing herein shall supersede or modify
+ the terms of any separate license agreement you may have executed
+ with Licensor regarding such Contributions.
+
+ 6. Trademarks. This License does not grant permission to use the trade
+ names, trademarks, service marks, or product names of the Licensor,
+ except as required for reasonable and customary use in describing the
+ origin of the Work and reproducing the content of the NOTICE file.
+
+ 7. Disclaimer of Warranty. Unless required by applicable law or
+ agreed to in writing, Licensor provides the Work (and each
+ Contributor provides its Contributions) on an "AS IS" BASIS,
+ WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or
+ implied, including, without limitation, any warranties or conditions
+ of TITLE, NON-INFRINGEMENT, MERCHANTABILITY, or FITNESS FOR A
+ PARTICULAR PURPOSE. You are solely responsible for determining the
+ appropriateness of using or redistributing the Work and assume any
+ risks associated with Your exercise of permissions under this License.
+
+ 8. Limitation of Liability. In no event and under no legal theory,
+ whether in tort (including negligence), contract, or otherwise,
+ unless required by applicable law (such as deliberate and grossly
+ negligent acts) or agreed to in writing, shall any Contributor be
+ liable to You for damages, including any direct, indirect, special,
+ incidental, or consequential damages of any character arising as a
+ result of this License or out of the use or inability to use the
+ Work (including but not limited to damages for loss of goodwill,
+ work stoppage, computer failure or malfunction, or any and all
+ other commercial damages or losses), even if such Contributor
+ has been advised of the possibility of such damages.
+
+ 9. Accepting Warranty or Additional Liability. While redistributing
+ the Work or Derivative Works thereof, You may choose to offer,
+ and charge a fee for, acceptance of support, warranty, indemnity,
+ or other liability obligations and/or rights consistent with this
+ License. However, in accepting such obligations, You may act only
+ on Your own behalf and on Your sole responsibility, not on behalf
+ of any other Contributor, and only if You agree to indemnify,
+ defend, and hold each Contributor harmless for any liability
+ incurred by, or claims asserted against, such Contributor by reason
+ of your accepting any such warranty or additional liability.
+
+ END OF TERMS AND CONDITIONS
diff --git a/puppet/modules/openvpn/Modulefile b/puppet/modules/openvpn/Modulefile
new file mode 100644
index 00000000..679e7e64
--- /dev/null
+++ b/puppet/modules/openvpn/Modulefile
@@ -0,0 +1,11 @@
+name 'luxflux-openvpn'
+version '2.1.0'
+source 'https://github.com/luxflux/puppet-openvpn'
+author 'luxflux'
+license 'Apache 2.0'
+summary 'OpenVPN server puppet module'
+description 'Puppet module to manage OpenVPN servers'
+project_page 'https://github.com/luxflux/puppet-openvpn'
+
+## Add dependencies, if any:
+dependency 'ripienaar/concat', '0.2.0'
diff --git a/puppet/modules/openvpn/Rakefile b/puppet/modules/openvpn/Rakefile
new file mode 100644
index 00000000..14f1c246
--- /dev/null
+++ b/puppet/modules/openvpn/Rakefile
@@ -0,0 +1,2 @@
+require 'rubygems'
+require 'puppetlabs_spec_helper/rake_tasks'
diff --git a/puppet/modules/openvpn/Readme.markdown b/puppet/modules/openvpn/Readme.markdown
new file mode 100644
index 00000000..6bcf49ea
--- /dev/null
+++ b/puppet/modules/openvpn/Readme.markdown
@@ -0,0 +1,54 @@
+# OpenVPN Puppet module
+
+Puppet module to manage OpenVPN servers
+
+## Features:
+
+* Client-specific rules and access policies
+* Generated client configurations and SSL-Certificates
+* Downloadable client configurations and SSL-Certificates for easy client configuration
+* Support for multiple server instances
+
+Tested on Ubuntu Precise Pangolin, CentOS 6, RedHat 6.
+
+
+## Dependencies
+ - [puppet-concat](https://github.com/ripienaar/puppet-concat)
+
+
+## Example
+
+```puppet
+ # add a server instance
+ openvpn::server { 'winterthur':
+ country => 'CH',
+ province => 'ZH',
+ city => 'Winterthur',
+ organization => 'example.org',
+ email => 'root@example.org',
+ server => '10.200.200.0 255.255.255.0'
+ }
+
+ # define clients
+ openvpn::client { 'client1':
+ server => 'winterthur'
+ }
+ openvpn::client { 'client2':
+ server => 'winterthur'
+ }
+
+ openvpn::client_specific_config { 'client1':
+ server => 'winterthur',
+ ifconfig => '10.200.200.50 255.255.255.0'
+ }
+```
+
+Don't forget the [sysctl](https://github.com/luxflux/puppet-sysctl) directive ```net.ipv4.ip_forward```!
+
+
+# Contributors
+
+These fine folks helped to get this far with this module:
+* [@jlambert121](https://github.com/jlambert121)
+* [@jlk](https://github.com/jlk)
+* [@elisiano](https://github.com/elisiano)
diff --git a/puppet/modules/openvpn/Vagrantfile b/puppet/modules/openvpn/Vagrantfile
new file mode 100644
index 00000000..88875ff8
--- /dev/null
+++ b/puppet/modules/openvpn/Vagrantfile
@@ -0,0 +1,42 @@
+# -*- mode: ruby -*-
+# vi: set ft=ruby :
+
+def server_config(config)
+ config.vm.provision :puppet, :module_path => '..' do |puppet|
+ puppet.manifests_path = "vagrant"
+ puppet.manifest_file = "server.pp"
+ end
+end
+
+def client_config(config)
+ config.vm.provision :puppet, :module_path => '..' do |puppet|
+ puppet.manifests_path = "vagrant"
+ puppet.manifest_file = "client.pp"
+ end
+end
+
+Vagrant::Config.run do |config|
+
+ config.vm.define :server_ubuntu do |c|
+ c.vm.box = 'precise64'
+ server_config c
+ c.vm.network :hostonly, '10.255.255.10'
+ end
+
+ config.vm.define :server_centos do |c|
+ c.vm.box = 'centos63'
+
+ c.vm.provision :shell, :inline => 'if [ ! -f rpmforge-release-0.5.2-2.el6.rf.x86_64.rpm ]; then wget -q http://pkgs.repoforge.org/rpmforge-release/rpmforge-release-0.5.2-2.el6.rf.x86_64.rpm; fi'
+ c.vm.provision :shell, :inline => 'yum install -y rpmforge-release-0.5.2-2.el6.rf.x86_64.rpm || exit 0'
+
+ server_config c
+ c.vm.network :hostonly, '10.255.255.11'
+ end
+
+ config.vm.define :client_ubuntu do |c|
+ c.vm.box = 'precise64'
+ client_config c
+ c.vm.network :hostonly, '10.255.255.20'
+ end
+
+end
diff --git a/puppet/modules/openvpn/manifests/client.pp b/puppet/modules/openvpn/manifests/client.pp
new file mode 100644
index 00000000..92c6aa4e
--- /dev/null
+++ b/puppet/modules/openvpn/manifests/client.pp
@@ -0,0 +1,187 @@
+# == Define: openvpn::client
+#
+# This define creates the client certs for a specified openvpn server as well
+# as creating a tarball that can be directly imported into openvpn clients
+#
+#
+# === Parameters
+#
+# [*server*]
+# String. Name of the corresponding openvpn endpoint
+# Required
+#
+# [*compression*]
+# String. Which compression algorithim to use
+# Default: comp-lzo
+# Options: comp-lzo or '' (disable compression)
+#
+# [*dev*]
+# String. Device method
+# Default: tun
+# Options: tun (routed connections), tap (bridged connections)
+#
+# [*mute*]
+# Integer. Set log mute level
+# Default: 20
+#
+# [*mute_replay_warnings*]
+# Boolean. Silence duplicate packet warnings (common on wireless networks)
+# Default: true
+#
+# [*nobind*]
+# Boolean. Whether or not to bind to a specific port number
+# Default: true
+#
+# [*persist_key*]
+# Boolean. Try to retain access to resources that may be unavailable
+# because of privilege downgrades
+# Default: true
+#
+# [*persist_tun*]
+# Boolean. Try to retain access to resources that may be unavailable
+# because of privilege downgrades
+# Default: true
+#
+# [*port*]
+# Integer. The port the openvpn server service is running on
+# Default: 1194
+#
+# [*proto*]
+# String. What IP protocol is being used.
+# Default: tcp
+# Options: tcp or udp
+#
+# [*remote_host*]
+# String. The IP or hostname of the openvpn server service
+# Default: FQDN
+#
+# [*resolv_retry*]
+# Integer/String. How many seconds should the openvpn client try to resolve
+# the server's hostname
+# Default: infinite
+# Options: Integer or infinite
+#
+# [*verb*]
+# Integer. Level of logging verbosity
+# Default: 3
+#
+#
+# === Examples
+#
+# openvpn::client {
+# 'my_user':
+# server => 'contractors',
+# remote_host => 'vpn.mycompany.com'
+# }
+#
+# * Removal:
+# Manual process right now, todo for the future
+#
+#
+# === Authors
+#
+# * Raffael Schmid <mailto:raffael@yux.ch>
+# * John Kinsella <mailto:jlkinsel@gmail.com>
+# * Justin Lambert <mailto:jlambert@letsevenup.com>
+#
+# === License
+#
+# Copyright 2013 Raffael Schmid, <raffael@yux.ch>
+#
+# Licensed under the Apache License, Version 2.0 (the "License");
+# you may not use this file except in compliance with the License.
+# You may obtain a copy of the License at
+#
+# http://www.apache.org/licenses/LICENSE-2.0
+#
+# Unless required by applicable law or agreed to in writing, software
+# distributed under the License is distributed on an "AS IS" BASIS,
+# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+# See the License for the specific language governing permissions and
+# limitations under the License.
+#
+define openvpn::client(
+ $server,
+ $compression = 'comp-lzo',
+ $dev = 'tun',
+ $mute = '20',
+ $mute_replay_warnings = true,
+ $nobind = true,
+ $persist_key = true,
+ $persist_tun = true,
+ $port = '1194',
+ $proto = 'tcp',
+ $remote_host = $::fqdn,
+ $resolv_retry = 'infinite',
+ $verb = '3',
+) {
+
+ Openvpn::Server[$server] ->
+ Openvpn::Client[$name]
+
+ exec {
+ "generate certificate for ${name} in context of ${server}":
+ command => ". ./vars && ./pkitool ${name}",
+ cwd => "/etc/openvpn/${server}/easy-rsa",
+ creates => "/etc/openvpn/${server}/easy-rsa/keys/${name}.crt",
+ provider => 'shell';
+ }
+
+ file {
+ [ "/etc/openvpn/${server}/download-configs/${name}",
+ "/etc/openvpn/${server}/download-configs/${name}/keys"]:
+ ensure => directory;
+
+ "/etc/openvpn/${server}/download-configs/${name}/keys/${name}.crt":
+ ensure => link,
+ target => "/etc/openvpn/${server}/easy-rsa/keys/${name}.crt",
+ require => Exec["generate certificate for ${name} in context of ${server}"];
+
+ "/etc/openvpn/${server}/download-configs/${name}/keys/${name}.key":
+ ensure => link,
+ target => "/etc/openvpn/${server}/easy-rsa/keys/${name}.key",
+ require => Exec["generate certificate for ${name} in context of ${server}"];
+
+ "/etc/openvpn/${server}/download-configs/${name}/keys/ca.crt":
+ ensure => link,
+ target => "/etc/openvpn/${server}/easy-rsa/keys/ca.crt",
+ require => Exec["generate certificate for ${name} in context of ${server}"];
+
+ "/etc/openvpn/${server}/download-configs/${name}/${name}.conf":
+ owner => root,
+ group => root,
+ mode => '0444',
+ content => template('openvpn/client.erb'),
+ notify => Exec["tar the thing ${server} with ${name}"];
+ }
+
+ exec {
+ "tar the thing ${server} with ${name}":
+ cwd => "/etc/openvpn/${server}/download-configs/",
+ command => "/bin/rm ${name}.tar.gz; tar --exclude=\\*.conf.d -chzvf ${name}.tar.gz ${name}",
+ refreshonly => true,
+ require => [ File["/etc/openvpn/${server}/download-configs/${name}/${name}.conf"],
+ File["/etc/openvpn/${server}/download-configs/${name}/keys/ca.crt"],
+ File["/etc/openvpn/${server}/download-configs/${name}/keys/${name}.key"],
+ File["/etc/openvpn/${server}/download-configs/${name}/keys/${name}.crt"]
+ ],
+ notify => Exec["generate ${name}.ovpn in ${server}"];
+ }
+
+ exec {
+ "generate ${name}.ovpn in ${server}":
+ cwd => "/etc/openvpn/${server}/download-configs/",
+ command => "/bin/rm ${name}.ovpn; cat ${name}/${name}.conf|perl -lne 'if(m|^ca keys/ca.crt|){ chomp(\$ca=`cat ${name}/keys/ca.crt`); print \"<ca>\n\$ca\n</ca>\"} elsif(m|^cert keys/${name}.crt|) { chomp(\$crt=`cat ${name}/keys/${name}.crt`); print \"<cert>\n\$crt\n</cert>\"} elsif(m|^key keys/${name}.key|){ chomp(\$key=`cat ${name}/keys/${name}.key`); print \"<key>\n\$key\n</key>\"} else { print} ' > ${name}.ovpn",
+ refreshonly => true,
+ require => [ File["/etc/openvpn/${server}/download-configs/${name}/${name}.conf"],
+ File["/etc/openvpn/${server}/download-configs/${name}/keys/ca.crt"],
+ File["/etc/openvpn/${server}/download-configs/${name}/keys/${name}.key"],
+ File["/etc/openvpn/${server}/download-configs/${name}/keys/${name}.crt"],
+ ],
+ }
+
+ file { "/etc/openvpn/${server}/download-configs/${name}.ovpn":
+ mode => '0400',
+ require => Exec["generate ${name}.ovpn in ${server}"],
+ }
+}
diff --git a/puppet/modules/openvpn/manifests/client_specific_config.pp b/puppet/modules/openvpn/manifests/client_specific_config.pp
new file mode 100644
index 00000000..4287421a
--- /dev/null
+++ b/puppet/modules/openvpn/manifests/client_specific_config.pp
@@ -0,0 +1,79 @@
+# == Define: openvpn::client_specific_config
+#
+# This define configures options which will be pushed by the server to a
+# specific client only. This feature is explained here:
+# http://openvpn.net/index.php/open-source/documentation/howto.html#policy
+#
+# === Parameters
+#
+# All the parameters are explained in the openvpn documentation:
+# http://openvpn.net/index.php/open-source/documentation/howto.html#policy
+#
+# [*server*]
+# String. Name of the corresponding openvpn endpoint
+# Required
+#
+# [*iroute*]
+# Array. Array of iroute combinations.
+# Default: []
+#
+# [*ifconfig*]
+# String. IP configuration to push to the client.
+# Default: false
+#
+# [*dhcp_options]
+# Array. DHCP options to push to the client.
+# Default: []
+#
+#
+# === Examples
+#
+# openvpn::client_specific_config {
+# 'vpn_client':
+# server => 'contractors',
+# iroute => ['10.0.1.0 255.255.255.0'],
+# ifconfig => '10.10.10.1 10.10.10.2',
+# dhcp_options => ['DNS 8.8.8.8']
+# }
+#
+# * Removal:
+# Manual process right now, todo for the future
+#
+#
+# === Authors
+#
+# * Raffael Schmid <mailto:raffael@yux.ch>
+#
+# === License
+#
+# Copyright 2013 Raffael Schmid, <raffael@yux.ch>
+#
+# Licensed under the Apache License, Version 2.0 (the "License");
+# you may not use this file except in compliance with the License.
+# You may obtain a copy of the License at
+#
+# http://www.apache.org/licenses/LICENSE-2.0
+#
+# Unless required by applicable law or agreed to in writing, software
+# distributed under the License is distributed on an "AS IS" BASIS,
+# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+# See the License for the specific language governing permissions and
+# limitations under the License.
+#
+define openvpn::client_specific_config(
+ $server,
+ $iroute = [],
+ $ifconfig = false,
+ $dhcp_options = []
+) {
+
+ Openvpn::Server[$server] ->
+ Openvpn::Client[$name] ->
+ Openvpn::Client_specific_config[$name]
+
+ file { "/etc/openvpn/${server}/client-configs/${name}":
+ ensure => present,
+ content => template('openvpn/client_specific_config.erb')
+ }
+
+}
diff --git a/puppet/modules/openvpn/manifests/config.pp b/puppet/modules/openvpn/manifests/config.pp
new file mode 100644
index 00000000..32b32094
--- /dev/null
+++ b/puppet/modules/openvpn/manifests/config.pp
@@ -0,0 +1,52 @@
+# == Class: openvpn::config
+#
+# This class sets up the openvpn enviornment as well as the default config file
+#
+#
+# === Examples
+#
+# This class should not be directly invoked
+#
+# === Authors
+#
+# * Raffael Schmid <mailto:raffael@yux.ch>
+# * John Kinsella <mailto:jlkinsel@gmail.com>
+# * Justin Lambert <mailto:jlambert@letsevenup.com>
+#
+# === License
+#
+# Copyright 2013 Raffael Schmid, <raffael@yux.ch>
+#
+# Licensed under the Apache License, Version 2.0 (the "License");
+# you may not use this file except in compliance with the License.
+# You may obtain a copy of the License at
+#
+# http://www.apache.org/licenses/LICENSE-2.0
+#
+# Unless required by applicable law or agreed to in writing, software
+# distributed under the License is distributed on an "AS IS" BASIS,
+# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+# See the License for the specific language governing permissions and
+# limitations under the License.
+#
+class openvpn::config {
+
+ if $::osfamily == 'Debian' {
+ include concat::setup
+
+ concat {
+ '/etc/default/openvpn':
+ owner => root,
+ group => root,
+ mode => 644,
+ warn => true;
+ }
+
+ concat::fragment {
+ 'openvpn.default.header':
+ content => template('openvpn/etc-default-openvpn.erb'),
+ target => '/etc/default/openvpn',
+ order => 01;
+ }
+ }
+}
diff --git a/puppet/modules/openvpn/manifests/init.pp b/puppet/modules/openvpn/manifests/init.pp
new file mode 100644
index 00000000..7e07f025
--- /dev/null
+++ b/puppet/modules/openvpn/manifests/init.pp
@@ -0,0 +1,43 @@
+# == Class: openvpn
+#
+# This module installs the openvpn service, configures vpn endpoints, generates
+# client certificates, and generates client config files
+#
+#
+# === Examples
+#
+# * Installation:
+# class { 'openvpn': }
+#
+#
+# === Authors
+#
+# * Raffael Schmid <mailto:raffael@yux.ch>
+# * John Kinsella <mailto:jlkinsel@gmail.com>
+# * Justin Lambert <mailto:jlambert@letsevenup.com>
+#
+# === License
+#
+# Copyright 2013 Raffael Schmid, <raffael@yux.ch>
+#
+# Licensed under the Apache License, Version 2.0 (the "License");
+# you may not use this file except in compliance with the License.
+# You may obtain a copy of the License at
+#
+# http://www.apache.org/licenses/LICENSE-2.0
+#
+# Unless required by applicable law or agreed to in writing, software
+# distributed under the License is distributed on an "AS IS" BASIS,
+# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+# See the License for the specific language governing permissions and
+# limitations under the License.
+#
+class openvpn {
+
+ class {'openvpn::params': } ->
+ class {'openvpn::install': } ->
+ class {'openvpn::config': } ~>
+ class {'openvpn::service': } ->
+ Class['openvpn']
+
+}
diff --git a/puppet/modules/openvpn/manifests/install.pp b/puppet/modules/openvpn/manifests/install.pp
new file mode 100644
index 00000000..a230373a
--- /dev/null
+++ b/puppet/modules/openvpn/manifests/install.pp
@@ -0,0 +1,46 @@
+# == Class: openvpn
+#
+# This module installs the openvpn service, configures vpn endpoints, generates
+# client certificates, and generates client config files
+#
+#
+# === Examples
+#
+# This class should not be directly invoked
+#
+#
+# === Authors
+#
+# * Raffael Schmid <mailto:raffael@yux.ch>
+# * John Kinsella <mailto:jlkinsel@gmail.com>
+# * Justin Lambert <mailto:jlambert@letsevenup.com>
+#
+# === License
+#
+# Copyright 2013 Raffael Schmid, <raffael@yux.ch>
+#
+# Licensed under the Apache License, Version 2.0 (the "License");
+# you may not use this file except in compliance with the License.
+# You may obtain a copy of the License at
+#
+# http://www.apache.org/licenses/LICENSE-2.0
+#
+# Unless required by applicable law or agreed to in writing, software
+# distributed under the License is distributed on an "AS IS" BASIS,
+# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+# See the License for the specific language governing permissions and
+# limitations under the License.
+#
+class openvpn::install {
+
+ package {
+ 'openvpn':
+ ensure => installed;
+ }
+
+ file {
+ [ '/etc/openvpn', '/etc/openvpn/keys' ]:
+ ensure => directory,
+ require => Package['openvpn'];
+ }
+}
diff --git a/puppet/modules/openvpn/manifests/params.pp b/puppet/modules/openvpn/manifests/params.pp
new file mode 100644
index 00000000..33495270
--- /dev/null
+++ b/puppet/modules/openvpn/manifests/params.pp
@@ -0,0 +1,37 @@
+# === License
+#
+# Copyright 2013 Raffael Schmid, <raffael@yux.ch>
+#
+# Licensed under the Apache License, Version 2.0 (the "License");
+# you may not use this file except in compliance with the License.
+# You may obtain a copy of the License at
+#
+# http://www.apache.org/licenses/LICENSE-2.0
+#
+# Unless required by applicable law or agreed to in writing, software
+# distributed under the License is distributed on an "AS IS" BASIS,
+# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+# See the License for the specific language governing permissions and
+# limitations under the License.
+#
+class openvpn::params {
+
+ $group = $::osfamily ? {
+ 'RedHat' => 'nobody',
+ default => 'nogroup'
+ }
+
+ $easyrsa_source = $::osfamily ? {
+ 'RedHat' => $::operatingsystemmajrelease ? {
+ 6 => '/usr/share/openvpn/easy-rsa/2.0',
+ default => '/usr/share/doc/openvpn-2.2.2/easy-rsa/2.0'
+ },
+ default => '/usr/share/doc/openvpn/examples/easy-rsa/2.0'
+ }
+
+ $link_openssl_cnf = $::osfamily ? {
+ /(Debian|RedHat)/ => true,
+ default => false
+ }
+
+}
diff --git a/puppet/modules/openvpn/manifests/server.pp b/puppet/modules/openvpn/manifests/server.pp
new file mode 100644
index 00000000..649048c4
--- /dev/null
+++ b/puppet/modules/openvpn/manifests/server.pp
@@ -0,0 +1,233 @@
+# == Define: openvpn::server
+#
+# This define creates the openvpn server instance and ssl certificates
+#
+#
+# === Parameters
+#
+# [*country*]
+# String. Country to be used for the SSL certificate
+#
+# [*province*]
+# String. Province to be used for the SSL certificate
+#
+# [*city*]
+# String. City to be used for the SSL certificate
+#
+# [*organization*]
+# String. Organization to be used for the SSL certificate
+#
+# [*email*]
+# String. Email address to be used for the SSL certificate
+#
+# [*compression*]
+# String. Which compression algorithim to use
+# Default: comp-lzo
+# Options: comp-lzo or '' (disable compression)
+#
+# [*dev*]
+# String. Device method
+# Default: tun
+# Options: tun (routed connections), tap (bridged connections)
+#
+# [*user*]
+# String. Group to drop privileges to after startup
+# Default: nobody
+#
+# [*group*]
+# String. User to drop privileges to after startup
+# Default: depends on your $::osfamily
+#
+# [*ipp*]
+# Boolean. Persist ifconfig information to a file to retain client IP
+# addresses between sessions
+# Default: false
+#
+# [*local*]
+# String. Interface for openvpn to bind to.
+# Default: $::ipaddress_eth0
+# Options: An IP address or '' to bind to all ip addresses
+#
+# [*logfile*]
+# String. Logfile for this openvpn server
+# Default: false
+# Options: false (syslog) or log file name
+#
+# [*port*]
+# Integer. The port the openvpn server service is running on
+# Default: 1194
+#
+# [*proto*]
+# String. What IP protocol is being used.
+# Default: tcp
+# Options: tcp or udp
+#
+# [*status_log*]
+# String. Logfile for periodic dumps of the vpn service status
+# Default: "${name}/openvpn-status.log"
+#
+# [*server*]
+# String. Network to assign client addresses out of
+# Default: None. Required in tun mode, not in tap mode
+#
+# [*push*]
+# Array. Options to push out to the client. This can include routes, DNS
+# servers, DNS search domains, and many other options.
+# Default: []
+#
+#
+# === Examples
+#
+# openvpn::client {
+# 'my_user':
+# server => 'contractors',
+# remote_host => 'vpn.mycompany.com'
+# }
+#
+# * Removal:
+# Manual process right now, todo for the future
+#
+#
+# === Authors
+#
+# * Raffael Schmid <mailto:raffael@yux.ch>
+# * John Kinsella <mailto:jlkinsel@gmail.com>
+# * Justin Lambert <mailto:jlambert@letsevenup.com>
+#
+# === License
+#
+# Copyright 2013 Raffael Schmid, <raffael@yux.ch>
+#
+# Licensed under the Apache License, Version 2.0 (the "License");
+# you may not use this file except in compliance with the License.
+# You may obtain a copy of the License at
+#
+# http://www.apache.org/licenses/LICENSE-2.0
+#
+# Unless required by applicable law or agreed to in writing, software
+# distributed under the License is distributed on an "AS IS" BASIS,
+# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+# See the License for the specific language governing permissions and
+# limitations under the License.
+#
+define openvpn::server(
+ $country,
+ $province,
+ $city,
+ $organization,
+ $email,
+ $compression = 'comp-lzo',
+ $dev = 'tun0',
+ $user = 'nobody',
+ $group = false,
+ $ipp = false,
+ $ip_pool = [],
+ $local = $::ipaddress_eth0,
+ $logfile = false,
+ $port = '1194',
+ $proto = 'tcp',
+ $status_log = "${name}/openvpn-status.log",
+ $server = '',
+ $push = []
+) {
+
+ include openvpn
+ Class['openvpn::install'] ->
+ Openvpn::Server[$name] ~>
+ Class['openvpn::service']
+
+ $tls_server = $proto ? {
+ /tcp/ => true,
+ default => false
+ }
+
+ $group_to_set = $group ? {
+ false => $openvpn::params::group,
+ default => $group
+ }
+
+ file {
+ ["/etc/openvpn/${name}", "/etc/openvpn/${name}/client-configs", "/etc/openvpn/${name}/download-configs" ]:
+ ensure => directory;
+ }
+
+ exec {
+ "copy easy-rsa to openvpn config folder ${name}":
+ command => "/bin/cp -r ${openvpn::params::easyrsa_source} /etc/openvpn/${name}/easy-rsa",
+ creates => "/etc/openvpn/${name}/easy-rsa",
+ notify => Exec["fix_easyrsa_file_permissions_${name}"],
+ require => File["/etc/openvpn/${name}"];
+ }
+
+ exec {
+ "fix_easyrsa_file_permissions_${name}":
+ refreshonly => true,
+ command => "/bin/chmod 755 /etc/openvpn/${name}/easy-rsa/*";
+ }
+
+ file {
+ "/etc/openvpn/${name}/easy-rsa/vars":
+ ensure => present,
+ content => template('openvpn/vars.erb'),
+ require => Exec["copy easy-rsa to openvpn config folder ${name}"];
+ }
+
+ file {
+ "/etc/openvpn/${name}/easy-rsa/openssl.cnf":
+ require => Exec["copy easy-rsa to openvpn config folder ${name}"];
+ }
+
+ if $openvpn::params::link_openssl_cnf == true {
+ File["/etc/openvpn/${name}/easy-rsa/openssl.cnf"] {
+ ensure => link,
+ target => "/etc/openvpn/${name}/easy-rsa/openssl-1.0.0.cnf"
+ }
+ }
+
+ exec {
+ "generate dh param ${name}":
+ command => '. ./vars && ./clean-all && ./build-dh',
+ cwd => "/etc/openvpn/${name}/easy-rsa",
+ creates => "/etc/openvpn/${name}/easy-rsa/keys/dh1024.pem",
+ provider => 'shell',
+ require => File["/etc/openvpn/${name}/easy-rsa/vars"];
+
+ "initca ${name}":
+ command => '. ./vars && ./pkitool --initca',
+ cwd => "/etc/openvpn/${name}/easy-rsa",
+ creates => "/etc/openvpn/${name}/easy-rsa/keys/ca.key",
+ provider => 'shell',
+ require => [ Exec["generate dh param ${name}"], File["/etc/openvpn/${name}/easy-rsa/openssl.cnf"] ];
+
+ "generate server cert ${name}":
+ command => '. ./vars && ./pkitool --server server',
+ cwd => "/etc/openvpn/${name}/easy-rsa",
+ creates => "/etc/openvpn/${name}/easy-rsa/keys/server.key",
+ provider => 'shell',
+ require => Exec["initca ${name}"];
+ }
+
+ file {
+ "/etc/openvpn/${name}/keys":
+ ensure => link,
+ target => "/etc/openvpn/${name}/easy-rsa/keys",
+ require => Exec["copy easy-rsa to openvpn config folder ${name}"];
+ }
+
+ if $::osfamily == 'Debian' {
+ concat::fragment {
+ "openvpn.default.autostart.${name}":
+ content => "AUTOSTART=\"\$AUTOSTART ${name}\"\n",
+ target => '/etc/default/openvpn',
+ order => 10;
+ }
+ }
+
+ file {
+ "/etc/openvpn/${name}.conf":
+ owner => root,
+ group => root,
+ mode => '0444',
+ content => template('openvpn/server.erb');
+ }
+}
diff --git a/puppet/modules/openvpn/manifests/service.pp b/puppet/modules/openvpn/manifests/service.pp
new file mode 100644
index 00000000..54e8db7d
--- /dev/null
+++ b/puppet/modules/openvpn/manifests/service.pp
@@ -0,0 +1,36 @@
+# == Class: openvpn::config
+#
+# This class maintains the openvpn service
+#
+#
+# === Examples
+#
+# This class should not be directly invoked
+#
+# === Authors
+#
+# * Raffael Schmid <mailto:raffael@yux.ch>
+# * John Kinsella <mailto:jlkinsel@gmail.com>
+# * Justin Lambert <mailto:jlambert@letsevenup.com>
+#
+# === License
+#
+# Copyright 2013 Raffael Schmid, <raffael@yux.ch>
+#
+# Licensed under the Apache License, Version 2.0 (the "License");
+# you may not use this file except in compliance with the License.
+# You may obtain a copy of the License at
+#
+# lied.
+# See the License for the specific language governing permissions and
+# limitations under the License.
+#
+class openvpn::service {
+ service {
+ 'openvpn':
+ ensure => running,
+ enable => true,
+ hasrestart => true,
+ hasstatus => true;
+ }
+}
diff --git a/puppet/modules/openvpn/spec/classes/openvpn_config_spec.rb b/puppet/modules/openvpn/spec/classes/openvpn_config_spec.rb
new file mode 100644
index 00000000..bbb63a77
--- /dev/null
+++ b/puppet/modules/openvpn/spec/classes/openvpn_config_spec.rb
@@ -0,0 +1,15 @@
+require 'spec_helper'
+
+describe 'openvpn::config', :type => :class do
+
+ it { should create_class('openvpn::config') }
+
+ context "on Debian based machines" do
+ let (:facts) { { :osfamily => 'Debian', :concat_basedir => '/var/lib/puppet/concat' } }
+
+ it { should contain_class('concat::setup') }
+ it { should contain_concat('/etc/default/openvpn') }
+ it { should contain_concat__fragment('openvpn.default.header') }
+ end
+
+end
diff --git a/puppet/modules/openvpn/spec/classes/openvpn_init_spec.rb b/puppet/modules/openvpn/spec/classes/openvpn_init_spec.rb
new file mode 100644
index 00000000..45dcc9bf
--- /dev/null
+++ b/puppet/modules/openvpn/spec/classes/openvpn_init_spec.rb
@@ -0,0 +1,9 @@
+require 'spec_helper'
+
+describe 'openvpn', :type => :class do
+
+ let (:facts) { { :concat_basedir => '/var/lib/puppet/concat' } }
+
+ it { should create_class('openvpn') }
+
+end
diff --git a/puppet/modules/openvpn/spec/classes/openvpn_install_spec.rb b/puppet/modules/openvpn/spec/classes/openvpn_install_spec.rb
new file mode 100644
index 00000000..cdb31358
--- /dev/null
+++ b/puppet/modules/openvpn/spec/classes/openvpn_install_spec.rb
@@ -0,0 +1,11 @@
+require 'spec_helper'
+
+describe 'openvpn::install', :type => :class do
+
+ it { should create_class('openvpn::install') }
+ it { should contain_package('openvpn') }
+
+ it { should contain_file('/etc/openvpn').with('ensure' => 'directory') }
+ it { should contain_file('/etc/openvpn/keys').with('ensure' => 'directory') }
+
+end
diff --git a/puppet/modules/openvpn/spec/classes/openvpn_service_spec.rb b/puppet/modules/openvpn/spec/classes/openvpn_service_spec.rb
new file mode 100644
index 00000000..f427e7f1
--- /dev/null
+++ b/puppet/modules/openvpn/spec/classes/openvpn_service_spec.rb
@@ -0,0 +1,13 @@
+require 'spec_helper'
+
+describe 'openvpn::service', :type => :class do
+
+ let (:facts) { { :concat_basedir => '/var/lib/puppet/concat' } }
+
+ it { should create_class('openvpn::service') }
+ it { should contain_service('openvpn').with(
+ 'ensure' => 'running',
+ 'enable' => true
+ ) }
+
+end
diff --git a/puppet/modules/openvpn/spec/defines/openvpn_client_spec.rb b/puppet/modules/openvpn/spec/defines/openvpn_client_spec.rb
new file mode 100644
index 00000000..a4b580e8
--- /dev/null
+++ b/puppet/modules/openvpn/spec/defines/openvpn_client_spec.rb
@@ -0,0 +1,88 @@
+require 'spec_helper'
+
+describe 'openvpn::client', :type => :define do
+ let(:title) { 'test_client' }
+ let(:params) { { 'server' => 'test_server' } }
+ let(:facts) { { :fqdn => 'somehost', :concat_basedir => '/var/lib/puppet/concat' } }
+ let(:pre_condition) do
+ 'openvpn::server { "test_server":
+ country => "CO",
+ province => "ST",
+ city => "Some City",
+ organization => "example.org",
+ email => "testemail@example.org"
+ }'
+ end
+
+ it { should contain_exec('generate certificate for test_client in context of test_server') }
+
+ [ 'test_client', 'test_client/keys'].each do |directory|
+ it { should contain_file("/etc/openvpn/test_server/download-configs/#{directory}") }
+ end
+
+ [ 'test_client.crt', 'test_client.key', 'ca.crt' ].each do |file|
+ it { should contain_file("/etc/openvpn/test_server/download-configs/test_client/keys/#{file}").with(
+ 'ensure' => 'link',
+ 'target' => "/etc/openvpn/test_server/easy-rsa/keys/#{file}"
+ )}
+ end
+
+ it { should contain_exec('tar the thing test_server with test_client').with(
+ 'cwd' => '/etc/openvpn/test_server/download-configs/',
+ 'command' => '/bin/rm test_client.tar.gz; tar --exclude=\*.conf.d -chzvf test_client.tar.gz test_client'
+ ) }
+
+ context "setting the minimum parameters" do
+ let(:params) { { 'server' => 'test_server' } }
+ let(:facts) { { :fqdn => 'somehost', :concat_basedir => '/var/lib/puppet/concat' } }
+
+ it { should contain_file('/etc/openvpn/test_server/download-configs/test_client/test_client.conf').with_content(/^client$/)}
+ it { should contain_file('/etc/openvpn/test_server/download-configs/test_client/test_client.conf').with_content(/^ca\s+keys\/ca\.crt$/)}
+ it { should contain_file('/etc/openvpn/test_server/download-configs/test_client/test_client.conf').with_content(/^cert\s+keys\/test_client.crt$/)}
+ it { should contain_file('/etc/openvpn/test_server/download-configs/test_client/test_client.conf').with_content(/^key\s+keys\/test_client\.key$/)}
+ it { should contain_file('/etc/openvpn/test_server/download-configs/test_client/test_client.conf').with_content(/^dev\s+tun$/)}
+ it { should contain_file('/etc/openvpn/test_server/download-configs/test_client/test_client.conf').with_content(/^proto\s+tcp$/)}
+ it { should contain_file('/etc/openvpn/test_server/download-configs/test_client/test_client.conf').with_content(/^remote\s+somehost\s+1194$/)}
+ it { should contain_file('/etc/openvpn/test_server/download-configs/test_client/test_client.conf').with_content(/^comp-lzo$/)}
+ it { should contain_file('/etc/openvpn/test_server/download-configs/test_client/test_client.conf').with_content(/^resolv-retry\s+infinite$/)}
+ it { should contain_file('/etc/openvpn/test_server/download-configs/test_client/test_client.conf').with_content(/^nobind$/)}
+ it { should contain_file('/etc/openvpn/test_server/download-configs/test_client/test_client.conf').with_content(/^persist-key$/)}
+ it { should contain_file('/etc/openvpn/test_server/download-configs/test_client/test_client.conf').with_content(/^persist-tun$/)}
+ it { should contain_file('/etc/openvpn/test_server/download-configs/test_client/test_client.conf').with_content(/^mute-replay-warnings$/)}
+ it { should contain_file('/etc/openvpn/test_server/download-configs/test_client/test_client.conf').with_content(/^ns\-cert\-type\s+server$/)}
+ it { should contain_file('/etc/openvpn/test_server/download-configs/test_client/test_client.conf').with_content(/^verb\s+3$/)}
+ it { should contain_file('/etc/openvpn/test_server/download-configs/test_client/test_client.conf').with_content(/^mute\s+20$/)}
+ end
+
+ context "setting all of the parameters" do
+ let(:params) { {
+ 'server' => 'test_server',
+ 'compression' => 'comp-something',
+ 'dev' => 'tap',
+ 'mute' => 10,
+ 'mute_replay_warnings' => false,
+ 'nobind' => false,
+ 'persist_key' => false,
+ 'persist_tun' => false,
+ 'port' => '123',
+ 'proto' => 'udp',
+ 'remote_host' => 'somewhere',
+ 'resolv_retry' => '2m',
+ 'verb' => '1'
+ } }
+ let(:facts) { { :fqdn => 'somehost', :concat_basedir => '/var/lib/puppet/concat' } }
+
+ it { should contain_file('/etc/openvpn/test_server/download-configs/test_client/test_client.conf').with_content(/^client$/)}
+ it { should contain_file('/etc/openvpn/test_server/download-configs/test_client/test_client.conf').with_content(/^ca\s+keys\/ca\.crt$/)}
+ it { should contain_file('/etc/openvpn/test_server/download-configs/test_client/test_client.conf').with_content(/^cert\s+keys\/test_client.crt$/)}
+ it { should contain_file('/etc/openvpn/test_server/download-configs/test_client/test_client.conf').with_content(/^key\s+keys\/test_client\.key$/)}
+ it { should contain_file('/etc/openvpn/test_server/download-configs/test_client/test_client.conf').with_content(/^dev\s+tap$/)}
+ it { should contain_file('/etc/openvpn/test_server/download-configs/test_client/test_client.conf').with_content(/^proto\s+udp$/)}
+ it { should contain_file('/etc/openvpn/test_server/download-configs/test_client/test_client.conf').with_content(/^remote\s+somewhere\s+123$/)}
+ it { should contain_file('/etc/openvpn/test_server/download-configs/test_client/test_client.conf').with_content(/^comp-something$/)}
+ it { should contain_file('/etc/openvpn/test_server/download-configs/test_client/test_client.conf').with_content(/^resolv-retry\s+2m$/)}
+ it { should contain_file('/etc/openvpn/test_server/download-configs/test_client/test_client.conf').with_content(/^verb\s+1$/)}
+ it { should contain_file('/etc/openvpn/test_server/download-configs/test_client/test_client.conf').with_content(/^mute\s+10$/)}
+ end
+
+end
diff --git a/puppet/modules/openvpn/spec/defines/openvpn_client_specific_config_spec.rb b/puppet/modules/openvpn/spec/defines/openvpn_client_specific_config_spec.rb
new file mode 100644
index 00000000..cfdab389
--- /dev/null
+++ b/puppet/modules/openvpn/spec/defines/openvpn_client_specific_config_spec.rb
@@ -0,0 +1,40 @@
+require 'spec_helper'
+
+describe 'openvpn::client_specific_config', :type => :define do
+ let(:title) { 'test_client' }
+ let(:params) { { 'server' => 'test_server' } }
+ let(:facts) { { :fqdn => 'somehost', :concat_basedir => '/var/lib/puppet/concat' } }
+ let(:pre_condition) do
+ [
+ 'openvpn::server { "test_server":
+ country => "CO",
+ province => "ST",
+ city => "Some City",
+ organization => "example.org",
+ email => "testemail@example.org"
+ }',
+ 'openvpn::client { "test_client":
+ server => "test_server"
+ }'
+ ].join
+ end
+
+ it { should contain_file('/etc/openvpn/test_server/client-configs/test_client') }
+
+ describe "setting no paramter at all" do
+ it { should contain_file('/etc/openvpn/test_server/client-configs/test_client').with_content(/\A\n\z/) }
+ end
+
+ describe "setting all parameters" do
+ let(:params) do
+ {:server => 'test_server',
+ :iroute => ['10.0.1.0 255.255.255.0'],
+ :ifconfig => '10.10.10.2 255.255.255.0',
+ :dhcp_options => ['DNS 8.8.8.8']}
+ end
+
+ it { should contain_file('/etc/openvpn/test_server/client-configs/test_client').with_content(/^iroute 10.0.1.0 255.255.255.0$/) }
+ it { should contain_file('/etc/openvpn/test_server/client-configs/test_client').with_content(/^ifconfig-push 10.10.10.2 255.255.255.0$/) }
+ it { should contain_file('/etc/openvpn/test_server/client-configs/test_client').with_content(/^push dhcp-option DNS 8.8.8.8$/) }
+ end
+end
diff --git a/puppet/modules/openvpn/spec/defines/openvpn_server_spec.rb b/puppet/modules/openvpn/spec/defines/openvpn_server_spec.rb
new file mode 100644
index 00000000..467be6aa
--- /dev/null
+++ b/puppet/modules/openvpn/spec/defines/openvpn_server_spec.rb
@@ -0,0 +1,165 @@
+require 'spec_helper'
+
+describe 'openvpn::server', :type => :define do
+
+ let(:title) { 'test_server' }
+
+ context "creating a server with the minimum parameters" do
+ let(:params) { {
+ 'country' => 'CO',
+ 'province' => 'ST',
+ 'city' => 'Some City',
+ 'organization' => 'example.org',
+ 'email' => 'testemail@example.org'
+ } }
+
+ let (:facts) { {
+ :ipaddress_eth0 => '1.2.3.4',
+ :network_eth0 => '1.2.3.0',
+ :netmask_eth0 => '255.255.255.0',
+ :concat_basedir => '/var/lib/puppet/concat',
+ :osfamily => 'anything_else'
+ } }
+
+ # Files associated with a server config
+ it { should contain_file('/etc/openvpn/test_server').with('ensure' => 'directory')}
+ it { should contain_file('/etc/openvpn/test_server/client-configs').with('ensure' => 'directory')}
+ it { should contain_file('/etc/openvpn/test_server/download-configs').with('ensure' => 'directory')}
+ it { should contain_file('/etc/openvpn/test_server/easy-rsa/vars')}
+ it { should contain_file('/etc/openvpn/test_server/easy-rsa/openssl.cnf')}
+ it { should contain_file('/etc/openvpn/test_server/keys').with(
+ 'ensure' => 'link',
+ 'target' => '/etc/openvpn/test_server/easy-rsa/keys'
+ )}
+
+ # Execs to working with certificates
+ it { should contain_exec('copy easy-rsa to openvpn config folder test_server').with(
+ 'command' => '/bin/cp -r /usr/share/doc/openvpn/examples/easy-rsa/2.0 /etc/openvpn/test_server/easy-rsa'
+ )}
+ it { should contain_exec('generate dh param test_server') }
+ it { should contain_exec('initca test_server') }
+ it { should contain_exec('generate server cert test_server') }
+
+ # VPN server config file itself
+ it { should contain_file('/etc/openvpn/test_server.conf').with_content(/^mode\s+server$/) }
+ it { should contain_file('/etc/openvpn/test_server.conf').with_content(/^client\-config\-dir\s+\/etc\/openvpn\/test_server\/client\-configs$/) }
+ it { should contain_file('/etc/openvpn/test_server.conf').with_content(/^ca\s+\/etc\/openvpn\/test_server\/keys\/ca.crt$/) }
+ it { should contain_file('/etc/openvpn/test_server.conf').with_content(/^cert\s+\/etc\/openvpn\/test_server\/keys\/server.crt$/) }
+ it { should contain_file('/etc/openvpn/test_server.conf').with_content(/^key\s+\/etc\/openvpn\/test_server\/keys\/server.key$/) }
+ it { should contain_file('/etc/openvpn/test_server.conf').with_content(/^dh\s+\/etc\/openvpn\/test_server\/keys\/dh1024.pem$/) }
+ it { should contain_file('/etc/openvpn/test_server.conf').with_content(/^proto\s+tcp-server$/) }
+ it { should contain_file('/etc/openvpn/test_server.conf').with_content(/^tls-server$/) }
+ it { should contain_file('/etc/openvpn/test_server.conf').with_content(/^port\s+1194$/) }
+ it { should contain_file('/etc/openvpn/test_server.conf').with_content(/^comp-lzo$/) }
+ it { should contain_file('/etc/openvpn/test_server.conf').with_content(/^group\s+nogroup$/) }
+ it { should contain_file('/etc/openvpn/test_server.conf').with_content(/^user\s+nobody$/) }
+ it { should_not contain_file('/etc/openvpn/test_server.conf').with_content(/^log\-append\s+test_server\/openvpn\.log$/) }
+ it { should contain_file('/etc/openvpn/test_server.conf').with_content(/^status\s+test_server\/openvpn\-status\.log$/) }
+ it { should contain_file('/etc/openvpn/test_server.conf').with_content(/^dev\s+tun0$/) }
+ it { should contain_file('/etc/openvpn/test_server.conf').with_content(/^local\s+1\.2\.3\.4$/) }
+ it { should_not contain_file('/etc/openvpn/test_server.conf').with_content(/^ifconfig-pool-persist/) }
+ end
+
+ context "creating a server setting all parameters" do
+ let(:params) { {
+ 'country' => 'CO',
+ 'province' => 'ST',
+ 'city' => 'Some City',
+ 'organization' => 'example.org',
+ 'email' => 'testemail@example.org',
+ 'compression' => 'fake_compression',
+ 'port' => '123',
+ 'proto' => 'udp',
+ 'group' => 'someone',
+ 'user' => 'someone',
+ 'logfile' => '/var/log/openvpn/test_server.log',
+ 'status_log' => '/var/log/openvpn/test_server_status.log',
+ 'dev' => 'tun1',
+ 'local' => '2.3.4.5',
+ 'ipp' => true,
+ 'server' => '2.3.4.0 255.255.0.0',
+ 'push' => [ 'dhcp-option DNS 172.31.0.30', 'route 172.31.0.0 255.255.0.0' ]
+ } }
+
+ let (:facts) { {
+ :ipaddress_eth0 => '1.2.3.4',
+ :network_eth0 => '1.2.3.0',
+ :netmask_eth0 => '255.255.255.0',
+ :concat_basedir => '/var/lib/puppet/concat'
+ } }
+
+ it { should contain_file('/etc/openvpn/test_server.conf').with_content(/^mode\s+server$/) }
+ it { should contain_file('/etc/openvpn/test_server.conf').with_content(/^client\-config\-dir\s+\/etc\/openvpn\/test_server\/client\-configs$/) }
+ it { should contain_file('/etc/openvpn/test_server.conf').with_content(/^ca\s+\/etc\/openvpn\/test_server\/keys\/ca.crt$/) }
+ it { should contain_file('/etc/openvpn/test_server.conf').with_content(/^cert\s+\/etc\/openvpn\/test_server\/keys\/server.crt$/) }
+ it { should contain_file('/etc/openvpn/test_server.conf').with_content(/^key\s+\/etc\/openvpn\/test_server\/keys\/server.key$/) }
+ it { should contain_file('/etc/openvpn/test_server.conf').with_content(/^dh\s+\/etc\/openvpn\/test_server\/keys\/dh1024.pem$/) }
+ it { should contain_file('/etc/openvpn/test_server.conf').with_content(/^proto\s+udp$/) }
+ it { should_not contain_file('/etc/openvpn/test_server.conf').with_content(/^proto\s+tls-server$/) }
+ it { should contain_file('/etc/openvpn/test_server.conf').with_content(/^port\s+123$/) }
+ it { should contain_file('/etc/openvpn/test_server.conf').with_content(/^fake_compression$/) }
+ it { should contain_file('/etc/openvpn/test_server.conf').with_content(/^group\s+someone$/) }
+ it { should contain_file('/etc/openvpn/test_server.conf').with_content(/^user\s+someone$/) }
+ it { should contain_file('/etc/openvpn/test_server.conf').with_content(/^log\-append\s+\/var\/log\/openvpn\/test_server\.log$/) }
+ it { should contain_file('/etc/openvpn/test_server.conf').with_content(/^status\s+\/var\/log\/openvpn\/test_server_status\.log$/) }
+ it { should contain_file('/etc/openvpn/test_server.conf').with_content(/^dev\s+tun1$/) }
+ it { should contain_file('/etc/openvpn/test_server.conf').with_content(/^local\s+2\.3\.4\.5$/) }
+ it { should contain_file('/etc/openvpn/test_server.conf').with_content(/^server\s+2\.3\.4\.0\s+255\.255\.0\.0$/) }
+ it { should contain_file('/etc/openvpn/test_server.conf').with_content(/^push\s+dhcp-option\s+DNS\s+172\.31\.0\.30$/) }
+ it { should contain_file('/etc/openvpn/test_server.conf').with_content(/^push\s+route\s+172\.31\.0\.0\s+255\.255\.0\.0$/) }
+ end
+
+ context "when RedHat based machine" do
+ let(:params) { {
+ 'country' => 'CO',
+ 'province' => 'ST',
+ 'city' => 'Some City',
+ 'organization' => 'example.org',
+ 'email' => 'testemail@example.org'
+ } }
+
+ let(:facts) { { :osfamily => 'RedHat', :concat_basedir => '/var/lib/puppet/concat' } }
+
+ it { should contain_file('/etc/openvpn/test_server/easy-rsa/openssl.cnf').with(
+ 'ensure' => 'link',
+ 'target' => '/etc/openvpn/test_server/easy-rsa/openssl-1.0.0.cnf'
+ )}
+
+ it { should contain_exec('copy easy-rsa to openvpn config folder test_server').with(
+ 'command' => '/bin/cp -r /usr/share/doc/openvpn-2.2.2/easy-rsa/2.0 /etc/openvpn/test_server/easy-rsa'
+ )}
+ it { should contain_file('/etc/openvpn/test_server.conf').with_content(/^group\s+nobody$/) }
+
+ end
+
+ context "when Debian based machine" do
+ let(:params) { {
+ 'country' => 'CO',
+ 'province' => 'ST',
+ 'city' => 'Some City',
+ 'organization' => 'example.org',
+ 'email' => 'testemail@example.org'
+ } }
+
+ let(:facts) { { :osfamily => 'Debian', :concat_basedir => '/var/lib/puppet/concat' } }
+
+ it { should contain_file('/etc/openvpn/test_server/easy-rsa/openssl.cnf').with(
+ 'ensure' => 'link',
+ 'target' => '/etc/openvpn/test_server/easy-rsa/openssl-1.0.0.cnf'
+ )}
+
+ it { should contain_exec('copy easy-rsa to openvpn config folder test_server').with(
+ 'command' => '/bin/cp -r /usr/share/doc/openvpn/examples/easy-rsa/2.0 /etc/openvpn/test_server/easy-rsa'
+ )}
+
+ # Configure to start vpn session
+ it { should contain_concat__fragment('openvpn.default.autostart.test_server').with(
+ 'content' => "AUTOSTART=\"$AUTOSTART test_server\"\n",
+ 'target' => '/etc/default/openvpn'
+ )}
+
+ it { should contain_file('/etc/openvpn/test_server.conf').with_content(/^group\s+nogroup$/) }
+
+ end
+
+end
diff --git a/puppet/modules/openvpn/spec/spec_helper.rb b/puppet/modules/openvpn/spec/spec_helper.rb
new file mode 100644
index 00000000..dc7e9f4a
--- /dev/null
+++ b/puppet/modules/openvpn/spec/spec_helper.rb
@@ -0,0 +1,2 @@
+require 'rubygems'
+require 'puppetlabs_spec_helper/module_spec_helper'
diff --git a/puppet/modules/openvpn/templates/client.erb b/puppet/modules/openvpn/templates/client.erb
new file mode 100644
index 00000000..021ed617
--- /dev/null
+++ b/puppet/modules/openvpn/templates/client.erb
@@ -0,0 +1,26 @@
+client
+ca keys/ca.crt
+cert keys/<%= scope.lookupvar('name') %>.crt
+key keys/<%= scope.lookupvar('name') %>.key
+dev <%= scope.lookupvar('dev') %>
+proto <%= scope.lookupvar('proto') %>
+remote <%= scope.lookupvar('remote_host') %> <%= scope.lookupvar('port') %>
+<% if scope.lookupvar('compression') != '' -%>
+<%= scope.lookupvar('compression') %>
+<% end -%>
+resolv-retry <%= scope.lookupvar('resolv_retry') %>
+<% if scope.lookupvar('nobind') -%>
+nobind
+<% end -%>
+<% if scope.lookupvar('persist_key') -%>
+persist-key
+<% end -%>
+<% if scope.lookupvar('persist_tun') -%>
+persist-tun
+<% end -%>
+<% if scope.lookupvar('mute_replay_warnings') -%>
+mute-replay-warnings
+<% end -%>
+ns-cert-type server
+verb <%= scope.lookupvar('verb') %>
+mute <%= scope.lookupvar('mute') %>
diff --git a/puppet/modules/openvpn/templates/client_specific_config.erb b/puppet/modules/openvpn/templates/client_specific_config.erb
new file mode 100644
index 00000000..62cc0e7a
--- /dev/null
+++ b/puppet/modules/openvpn/templates/client_specific_config.erb
@@ -0,0 +1,10 @@
+<% scope.lookupvar('iroute').each do |route| -%>
+iroute <%= route %>
+<% end -%>
+<% if ifconfig = scope.lookupvar('ifconfig') -%>
+ifconfig-push <%= ifconfig %>
+<% end -%>
+<% scope.lookupvar('dhcp_options').each do |option| -%>
+push dhcp-option <%= option %>
+<% end -%>
+
diff --git a/puppet/modules/openvpn/templates/etc-default-openvpn.erb b/puppet/modules/openvpn/templates/etc-default-openvpn.erb
new file mode 100644
index 00000000..310e462e
--- /dev/null
+++ b/puppet/modules/openvpn/templates/etc-default-openvpn.erb
@@ -0,0 +1,20 @@
+# This is the configuration file for /etc/init.d/openvpn
+
+#
+# Start only these VPNs automatically via init script.
+# Allowed values are "all", "none" or space separated list of
+# names of the VPNs. If empty, "all" is assumed.
+#
+#AUTOSTART="all"
+#AUTOSTART="none"
+#AUTOSTART="home office"
+#
+# Refresh interval (in seconds) of default status files
+# located in /var/run/openvpn.$NAME.status
+# Defaults to 10, 0 disables status file generation
+#
+#STATUSREFRESH=10
+#STATUSREFRESH=0
+# Optional arguments to openvpn's command line
+OPTARGS=""
+AUTOSTART=""
diff --git a/puppet/modules/openvpn/templates/server.erb b/puppet/modules/openvpn/templates/server.erb
new file mode 100644
index 00000000..6ef13263
--- /dev/null
+++ b/puppet/modules/openvpn/templates/server.erb
@@ -0,0 +1,37 @@
+mode server
+client-config-dir /etc/openvpn/<%= scope.lookupvar('name') %>/client-configs
+ca /etc/openvpn/<%= scope.lookupvar('name') %>/keys/ca.crt
+cert /etc/openvpn/<%= scope.lookupvar('name') %>/keys/server.crt
+key /etc/openvpn/<%= scope.lookupvar('name') %>/keys/server.key
+dh /etc/openvpn/<%= scope.lookupvar('name') %>/keys/dh1024.pem
+<% if scope.lookupvar('proto') == 'tcp' -%>
+proto <%= scope.lookupvar('proto') %>-server
+<% else -%>
+proto <%= scope.lookupvar('proto') %>
+<% end -%>
+port <%= scope.lookupvar('port') %>
+<% if scope.lookupvar('tls_server') -%>
+tls-server
+<% end -%>
+<% if scope.lookupvar('compression') != '' -%>
+<%= scope.lookupvar('compression') %>
+<% end -%>
+group <%= scope.lookupvar('group_to_set') %>
+user <%= scope.lookupvar('user') %>
+<% if scope.lookupvar('logfile') -%>
+log-append <%= scope.lookupvar('logfile') %>
+<% end -%>
+status <%= scope.lookupvar('status_log') %>
+dev <%= scope.lookupvar('dev') %>
+<% if scope.lookupvar('local') != '' -%>
+local <%= scope.lookupvar('local') %>
+<% end -%>
+<% if scope.lookupvar('ipp') -%>
+ifconfig-pool-persist <%= scope.lookupvar('name') %>/vpn-ipp.txt
+<% end -%>
+<% if scope.lookupvar('server') != '' -%>
+server <%= scope.lookupvar('server') %>
+<% end -%>
+<% scope.lookupvar('push').each do |item| -%>
+push <%= item %>
+<% end -%>
diff --git a/puppet/modules/openvpn/templates/vars.erb b/puppet/modules/openvpn/templates/vars.erb
new file mode 100644
index 00000000..20448b8b
--- /dev/null
+++ b/puppet/modules/openvpn/templates/vars.erb
@@ -0,0 +1,68 @@
+# easy-rsa parameter settings
+
+# NOTE: If you installed from an RPM,
+# don't edit this file in place in
+# /usr/share/openvpn/easy-rsa --
+# instead, you should copy the whole
+# easy-rsa directory to another location
+# (such as /etc/openvpn) so that your
+# edits will not be wiped out by a future
+# OpenVPN package upgrade.
+
+# This variable should point to
+# the top level of the easy-rsa
+# tree.
+export EASY_RSA="/etc/openvpn/<%= @name %>/easy-rsa"
+
+#
+# This variable should point to
+# the requested executables
+#
+export OPENSSL="openssl"
+export PKCS11TOOL="pkcs11-tool"
+export GREP="grep"
+
+
+# This variable should point to
+# the openssl.cnf file included
+# with easy-rsa.
+export KEY_CONFIG=`$EASY_RSA/whichopensslcnf $EASY_RSA`
+
+# Edit this variable to point to
+# your soon-to-be-created key
+# directory.
+#
+# WARNING: clean-all will do
+# a rm -rf on this directory
+# so make sure you define
+# it correctly!
+export KEY_DIR="$EASY_RSA/keys"
+
+# Issue rm -rf warning
+echo NOTE: If you run ./clean-all, I will be doing a rm -rf on $KEY_DIR
+
+# PKCS11 fixes
+export PKCS11_MODULE_PATH="dummy"
+export PKCS11_PIN="dummy"
+
+# Increase this to 2048 if you
+# are paranoid. This will slow
+# down TLS negotiation performance
+# as well as the one-time DH parms
+# generation process.
+export KEY_SIZE=1024
+
+# In how many days should the root CA key expire?
+export CA_EXPIRE=3650
+
+# In how many days should certificates expire?
+export KEY_EXPIRE=3650
+
+# These are the default values for fields
+# which will be placed in the certificate.
+# Don't leave any of these fields blank.
+export KEY_COUNTRY="<%= @country %>"
+export KEY_PROVINCE="<%= @province %>"
+export KEY_CITY="<%= @city %>"
+export KEY_ORG="<%= @organization %>"
+export KEY_EMAIL="<%= @email %>"
diff --git a/puppet/modules/openvpn/vagrant/client.pp b/puppet/modules/openvpn/vagrant/client.pp
new file mode 100644
index 00000000..7ebeb1d7
--- /dev/null
+++ b/puppet/modules/openvpn/vagrant/client.pp
@@ -0,0 +1,5 @@
+node default {
+
+ package { 'openvpn': ensure => installed; }
+
+}
diff --git a/puppet/modules/openvpn/vagrant/server.pp b/puppet/modules/openvpn/vagrant/server.pp
new file mode 100644
index 00000000..a95def06
--- /dev/null
+++ b/puppet/modules/openvpn/vagrant/server.pp
@@ -0,0 +1,23 @@
+node default {
+ openvpn::server { 'winterthur':
+ country => 'CH',
+ province => 'ZH',
+ city => 'Winterthur',
+ organization => 'example.org',
+ email => 'root@example.org',
+ server => '10.200.200.0 255.255.255.0'
+ }
+
+ openvpn::client { 'client1':
+ server => 'winterthur';
+ }
+
+ openvpn::client_specific_config { 'client1':
+ server => 'winterthur',
+ ifconfig => '10.200.200.100 255.255.255.0'
+ }
+
+ openvpn::client { 'client2':
+ server => 'winterthur';
+ }
+}
diff --git a/puppet/modules/postfwd/files/postfwd_default b/puppet/modules/postfwd/files/postfwd_default
new file mode 100644
index 00000000..83742e40
--- /dev/null
+++ b/puppet/modules/postfwd/files/postfwd_default
@@ -0,0 +1,19 @@
+### This file managed by Puppet
+# Global options for postfwd(8).
+
+# Set to '1' to enable startup (daemon mode)
+STARTUP=1
+
+# Config file
+CONF=/etc/postfix/postfwd.cf
+# IP where listen to
+INET=127.0.0.1
+# Port where listen to
+PORT=10040
+# run as user postfwd
+RUNAS="postfw"
+# Arguments passed on start (--daemon implied)
+# disable summary and cache-no-size
+#ARGS="--summary=600 --cache=600 --cache-rdomain-only --cache-no-size"
+ARGS="--cache=600 --cache-rdomain-only --no-rulestats"
+
diff --git a/puppet/modules/postfwd/manifests/init.pp b/puppet/modules/postfwd/manifests/init.pp
new file mode 100644
index 00000000..6db3fa52
--- /dev/null
+++ b/puppet/modules/postfwd/manifests/init.pp
@@ -0,0 +1,43 @@
+# This class provides rate-limiting for outgoing SMTP, using postfwd
+# it is configured with some limits that seem reasonable for a generic
+# use-case. Each of the following applies to sasl_authenticated users:
+#
+# . 150 recipients at a time
+# . no more than 50 messages in 60 minutes
+# . no more than 250 recipients in 60 minutes.
+#
+# This class could be easily extended to add overrides to these rules,
+# maximum sizes per client, or additional rules
+class postfwd {
+
+ ensure_packages(['libnet-server-perl', 'libnet-dns-perl', 'postfwd'])
+
+ file {
+ '/etc/default/postfwd':
+ source => 'puppet:///modules/postfwd/postfwd_default',
+ mode => '0644',
+ owner => root,
+ group => root,
+ before => Package['postfwd'];
+
+ '/etc/postfix/postfwd.cf':
+ content => template('postfwd/postfwd.cf.erb'),
+ mode => '0644',
+ owner => root,
+ group => root,
+ require => Package['postfix'],
+ before => Package['postfwd'];
+ }
+
+ service {
+ 'postfwd':
+ ensure => running,
+ name => postfwd,
+ pattern => '/usr/sbin/postfwd',
+ enable => true,
+ hasrestart => true,
+ hasstatus => false,
+ require => [ File['/etc/default/postfwd'],
+ File['/etc/postfix/postfwd.cf']];
+ }
+}
diff --git a/puppet/modules/postfwd/templates/postfwd.cf.erb b/puppet/modules/postfwd/templates/postfwd.cf.erb
new file mode 100644
index 00000000..1c45dd03
--- /dev/null
+++ b/puppet/modules/postfwd/templates/postfwd.cf.erb
@@ -0,0 +1,28 @@
+### This file managed by Puppet
+# Before deploying a rule
+# 1. test with an additional "sender==test@domain.org;" in the rule so it
+# only applies to your test account
+# 2. then when ready to test for all users, use WARN and watch the logs
+# for a few days and make sure it working the way you like
+# 3. Then when ready to deploy for real set a proper error code
+
+## Overrides - make like the following example
+# id=exampleuser; sasl_username==exampleuser; action=dunno
+
+## Rules that apply to all senders
+# Recipient Per Message Limit
+# We only receive mail via smtp from sasl authenticated users
+# directly. We want to limit to a lower amount to prevent phished accounts
+# spamming
+id=RCPTSENDER; recipient_count=150; action=REJECT Too many recipients, please try again. Contact http://<%= @domain %>/tickets/new if this is in error. ERROR:RCPTSENDER
+
+# Message Rate Limit
+# This limits sasl authenticated users to no more than 50/60mins
+# NOTE: sasl_username needs to be set to something or this check will fail
+id=MSGRATE ; sasl_username=!!(^$); action==rate($$sasl_username/100/3600/450 4.7.1 exceeded message rate. Contact Contact http://<%= @domain %>/tickets/new if this is in error. ERROR:MSGRATE)
+
+# Total Recipient Rate Limit
+# This adds up the recipients for all the sasl authenticated users messages
+# and can't exceed more than 250/60min
+# NOTE: sasl_username needs to be set to something or this check will fail
+id=RCPTRATE ; sasl_username=!!(^$); action==rcpt($$sasl_username/500/3600/450 4.7.1 exceeded message rate. Contact http://<%= @domain %>/tickets/new if this is in error. ERROR:RCPTRATE)
diff --git a/puppet/modules/site_apache/files/conf.d/security b/puppet/modules/site_apache/files/conf.d/security
new file mode 100644
index 00000000..a5ae5bdc
--- /dev/null
+++ b/puppet/modules/site_apache/files/conf.d/security
@@ -0,0 +1,55 @@
+#
+# Disable access to the entire file system except for the directories that
+# are explicitly allowed later.
+#
+# This currently breaks the configurations that come with some web application
+# Debian packages. It will be made the default for the release after lenny.
+#
+#<Directory />
+# AllowOverride None
+# Order Deny,Allow
+# Deny from all
+#</Directory>
+
+
+# Changing the following options will not really affect the security of the
+# server, but might make attacks slightly more difficult in some cases.
+
+#
+# ServerTokens
+# This directive configures what you return as the Server HTTP response
+# Header. The default is 'Full' which sends information about the OS-Type
+# and compiled in modules.
+# Set to one of: Full | OS | Minimal | Minor | Major | Prod
+# where Full conveys the most information, and Prod the least.
+#
+#ServerTokens Minimal
+ServerTokens Prod
+
+#
+# Optionally add a line containing the server version and virtual host
+# name to server-generated pages (internal error documents, FTP directory
+# listings, mod_status and mod_info output etc., but not CGI generated
+# documents or custom error documents).
+# Set to "EMail" to also include a mailto: link to the ServerAdmin.
+# Set to one of: On | Off | EMail
+#
+#ServerSignature Off
+ServerSignature Off
+
+#
+# Allow TRACE method
+#
+# Set to "extended" to also reflect the request body (only for testing and
+# diagnostic purposes).
+#
+# Set to one of: On | Off | extended
+#
+#TraceEnable Off
+TraceEnable On
+
+# Setting this header will prevent other sites from embedding pages from this
+# site as frames. This defends against clickjacking attacks.
+# Requires mod_headers to be enabled.
+#
+Header set X-Frame-Options: "DENY"
diff --git a/puppet/modules/site_apache/files/include.d/ssl_common.inc b/puppet/modules/site_apache/files/include.d/ssl_common.inc
new file mode 100644
index 00000000..2d282c84
--- /dev/null
+++ b/puppet/modules/site_apache/files/include.d/ssl_common.inc
@@ -0,0 +1,7 @@
+SSLEngine on
+SSLProtocol all -SSLv2 -SSLv3
+SSLHonorCipherOrder on
+SSLCompression off
+SSLCipherSuite "ECDHE-RSA-AES128-GCM-SHA256:ECDHE-ECDSA-AES128-GCM-SHA256:ECDHE-RSA-AES256-GCM-SHA384:ECDHE-ECDSA-AES256-GCM-SHA384:DHE-RSA-AES128-GCM-SHA256:DHE-DSS-AES128-GCM-SHA256:kEDH+AESGCM:ECDHE-RSA-AES128-SHA256:ECDHE-ECDSA-AES128-SHA256:ECDHE-RSA-AES128-SHA:ECDHE-ECDSA-AES128-SHA:ECDHE-RSA-AES256-SHA384:ECDHE-ECDSA-AES256-SHA384:ECDHE-RSA-AES256-SHA:ECDHE-ECDSA-AES256-SHA:DHE-RSA-AES128-SHA256:DHE-RSA-AES128-SHA:DHE-DSS-AES128-SHA256:DHE-RSA-AES256-SHA256:DHE-DSS-AES256-SHA:DHE-RSA-AES256-SHA:AES128-GCM-SHA256:AES256-GCM-SHA384:AES128-SHA256:AES256-SHA256:AES128-SHA:AES256-SHA:AES:CAMELLIA:DES-CBC3-SHA:!aNULL:!eNULL:!EXPORT:!DES:!3DES:!RC4:!MD5:!PSK!aECDH:!EDH-DSS-DES-CBC3-SHA:!EDH-RSA-DES-CBC3-SHA:!KRB5-DES-CBC3-SHA"
+
+RequestHeader set X_FORWARDED_PROTO 'https' \ No newline at end of file
diff --git a/puppet/modules/site_apache/manifests/common.pp b/puppet/modules/site_apache/manifests/common.pp
new file mode 100644
index 00000000..8a11759a
--- /dev/null
+++ b/puppet/modules/site_apache/manifests/common.pp
@@ -0,0 +1,30 @@
+# install basic apache modules needed for all services (nagios, webapp)
+class site_apache::common {
+
+ include apache::module::rewrite
+ include apache::module::env
+
+ class { '::apache':
+ no_default_site => true,
+ ssl => true,
+ ssl_cipher_suite => 'HIGH:MEDIUM:!aNULL:!MD5'
+ }
+
+ # needed for the mod_ssl config
+ include apache::module::mime
+
+ # load mods depending on apache version
+ if ( $::lsbdistcodename == 'jessie' ) {
+ # apache >= 2.4, debian jessie
+ # needed for mod_ssl config
+ include apache::module::socache_shmcb
+ # generally needed
+ include apache::module::mpm_prefork
+ } else {
+ # apache < 2.4, debian wheezy
+ # for "Order" directive, i.e. main apache2.conf
+ include apache::module::authz_host
+ }
+
+ include site_apache::common::tls
+}
diff --git a/puppet/modules/site_apache/manifests/common/tls.pp b/puppet/modules/site_apache/manifests/common/tls.pp
new file mode 100644
index 00000000..040868bf
--- /dev/null
+++ b/puppet/modules/site_apache/manifests/common/tls.pp
@@ -0,0 +1,6 @@
+class site_apache::common::tls {
+ # class to setup common SSL configurations
+
+ apache::config::include{ 'ssl_common.inc': }
+
+}
diff --git a/puppet/modules/site_apache/templates/vhosts.d/api.conf.erb b/puppet/modules/site_apache/templates/vhosts.d/api.conf.erb
new file mode 100644
index 00000000..bfa5d04d
--- /dev/null
+++ b/puppet/modules/site_apache/templates/vhosts.d/api.conf.erb
@@ -0,0 +1,48 @@
+<VirtualHost *:80>
+ ServerName <%= @api_domain %>
+ RewriteEngine On
+ RewriteRule ^.*$ https://<%= @api_domain -%>:<%= @api_port -%>%{REQUEST_URI} [R=permanent,L]
+ CustomLog ${APACHE_LOG_DIR}/other_vhosts_access.log common
+</VirtualHost>
+
+Listen 0.0.0.0:<%= @api_port %>
+
+<VirtualHost *:<%= @api_port -%>>
+ ServerName <%= @api_domain %>
+ CustomLog ${APACHE_LOG_DIR}/other_vhosts_access.log common
+
+ SSLCACertificatePath /etc/ssl/certs
+ SSLCertificateKeyFile <%= scope.lookupvar('x509::variables::keys') %>/<%= scope.lookupvar('site_config::params::cert_name') %>.key
+ SSLCertificateFile <%= scope.lookupvar('x509::variables::certs') %>/<%= scope.lookupvar('site_config::params::cert_name') %>.crt
+
+ Include include.d/ssl_common.inc
+
+ <IfModule mod_headers.c>
+<% if @webapp['secure'] -%>
+ Header always set Strict-Transport-Security "max-age=31536000; includeSubDomains"
+<% end -%>
+ Header always unset X-Powered-By
+ Header always unset X-Runtime
+ </IfModule>
+
+ DocumentRoot /srv/leap/webapp/public
+ <% if scope.function_guess_apache_version([]) == '2.4' %>
+ <Directory /srv/leap/webapp/public>
+ AllowOverride None
+ Require all granted
+ </Directory>
+ <% end %>
+
+ # Check for maintenance file and redirect all requests
+ RewriteEngine On
+ RewriteCond %{DOCUMENT_ROOT}/system/maintenance.html -f
+ RewriteCond %{SCRIPT_FILENAME} !maintenance.html
+ RewriteCond %{REQUEST_URI} !/images/maintenance.jpg
+ RewriteRule ^.*$ %{DOCUMENT_ROOT}/system/maintenance.html [L]
+
+ # http://www.modrails.com/documentation/Users%20guide%20Apache.html#_passengerallowencodedslashes_lt_on_off_gt
+ AllowEncodedSlashes on
+ PassengerAllowEncodedSlashes on
+ PassengerFriendlyErrorPages off
+ SetEnv TMPDIR /var/tmp
+</VirtualHost>
diff --git a/puppet/modules/site_apache/templates/vhosts.d/common.conf.erb b/puppet/modules/site_apache/templates/vhosts.d/common.conf.erb
new file mode 100644
index 00000000..bf60e794
--- /dev/null
+++ b/puppet/modules/site_apache/templates/vhosts.d/common.conf.erb
@@ -0,0 +1,76 @@
+<VirtualHost *:80>
+ ServerName <%= @webapp_domain %>
+ ServerAlias <%= @domain_name %>
+ ServerAlias <%= @domain %>
+ ServerAlias www.<%= @domain %>
+ RewriteEngine On
+ RewriteRule ^.*$ https://<%= @webapp_domain -%>%{REQUEST_URI} [R=permanent,L]
+ CustomLog ${APACHE_LOG_DIR}/other_vhosts_access.log common
+</VirtualHost>
+
+<VirtualHost *:443>
+ ServerName <%= @webapp_domain %>
+ ServerAlias <%= @domain_name %>
+ ServerAlias <%= @domain %>
+ ServerAlias www.<%= @domain %>
+ CustomLog ${APACHE_LOG_DIR}/other_vhosts_access.log common
+
+ SSLCACertificatePath /etc/ssl/certs
+ SSLCertificateKeyFile <%= scope.lookupvar('x509::variables::keys') %>/<%= scope.lookupvar('site_config::params::commercial_cert_name') %>.key
+ SSLCertificateFile <%= scope.lookupvar('x509::variables::certs') %>/<%= scope.lookupvar('site_config::params::commercial_cert_name') %>.crt
+
+ Include include.d/ssl_common.inc
+
+ <IfModule mod_headers.c>
+<% if (defined? @services) and (@services.include? 'webapp') and (@webapp['secure']) -%>
+ Header always set Strict-Transport-Security "max-age=31536000; includeSubDomains"
+<% end -%>
+ Header always unset X-Powered-By
+ Header always unset X-Runtime
+ </IfModule>
+
+<% if (defined? @services) and (@services.include? 'webapp') -%>
+ DocumentRoot /srv/leap/webapp/public
+ <% if scope.function_guess_apache_version([]) == '2.4' %>
+ <Directory /srv/leap/webapp/public>
+ AllowOverride None
+ Require all granted
+ </Directory>
+ <% end %>
+
+ RewriteEngine On
+ # Check for maintenance file and redirect all requests
+ RewriteCond %{DOCUMENT_ROOT}/system/maintenance.html -f
+ RewriteCond %{SCRIPT_FILENAME} !maintenance.html
+ RewriteCond %{REQUEST_URI} !/images/maintenance.jpg
+ RewriteRule ^.*$ %{DOCUMENT_ROOT}/system/maintenance.html [L]
+
+ # http://www.modrails.com/documentation/Users%20guide%20Apache.html#_passengerallowencodedslashes_lt_on_off_gt
+ AllowEncodedSlashes on
+ PassengerAllowEncodedSlashes on
+ PassengerFriendlyErrorPages off
+ SetEnv TMPDIR /var/tmp
+
+ # Allow rails assets to be cached for a very long time (since the URLs change whenever the content changes)
+ <Location /assets/>
+ Header unset ETag
+ FileETag None
+ ExpiresActive On
+ ExpiresDefault "access plus 1 year"
+ </Location>
+<% end -%>
+
+
+<% if (defined? @services) and (@services.include? 'monitor') -%>
+ <DirectoryMatch (/usr/share/nagios3/htdocs|/usr/lib/cgi-bin/nagios3|/etc/nagios3/stylesheets|/usr/share/pnp4nagios)>
+ <% if (defined? @services) and (@services.include? 'webapp') -%>
+ PassengerEnabled off
+ <% end -%>
+ AllowOverride all
+ # Nagios won't work with setting this option to "DENY",
+ # as set in conf.d/security (#4169). Therefor we allow
+ # it here, only for nagios.
+ Header set X-Frame-Options: "ALLOW"
+ </DirectoryMatch>
+<% end -%>
+</VirtualHost>
diff --git a/puppet/modules/site_apache/templates/vhosts.d/hidden_service.conf.erb b/puppet/modules/site_apache/templates/vhosts.d/hidden_service.conf.erb
new file mode 100644
index 00000000..232b1577
--- /dev/null
+++ b/puppet/modules/site_apache/templates/vhosts.d/hidden_service.conf.erb
@@ -0,0 +1,55 @@
+<VirtualHost 127.0.0.1:80>
+ ServerName <%= @tor_domain %>
+
+ <IfModule mod_headers.c>
+ Header always unset X-Powered-By
+ Header always unset X-Runtime
+ </IfModule>
+
+<% if (defined? @services) and (@services.include? 'webapp') -%>
+ DocumentRoot /srv/leap/webapp/public
+ <% if scope.function_guess_apache_version([]) == '2.4' %>
+ <Directory /srv/leap/webapp/public>
+ AllowOverride None
+ Require all granted
+ </Directory>
+ <% end %>
+
+ RewriteEngine On
+ # Check for maintenance file and redirect all requests
+ RewriteCond %{DOCUMENT_ROOT}/system/maintenance.html -f
+ RewriteCond %{SCRIPT_FILENAME} !maintenance.html
+ RewriteCond %{REQUEST_URI} !/images/maintenance.jpg
+ RewriteRule ^.*$ %{DOCUMENT_ROOT}/system/maintenance.html [L]
+
+ # http://www.modrails.com/documentation/Users%20guide%20Apache.html#_passengerallowencodedslashes_lt_on_off_gt
+ AllowEncodedSlashes on
+ PassengerAllowEncodedSlashes on
+ PassengerFriendlyErrorPages off
+ SetEnv TMPDIR /var/tmp
+
+ # Allow rails assets to be cached for a very long time (since the URLs change whenever the content changes)
+ <Location /assets/>
+ Header unset ETag
+ FileETag None
+ ExpiresActive On
+ ExpiresDefault "access plus 1 year"
+ </Location>
+<% end -%>
+
+<% if (defined? @services) and (@services.include? 'static') -%>
+ DocumentRoot "/srv/static/root/public"
+ <% if scope.function_guess_apache_version([]) == '2.4' %>
+ <Directory /srv/static/root/public>
+ AllowOverride None
+ Require all granted
+ </Directory>
+ <% end %>
+ AccessFileName .htaccess
+
+ Alias /provider.json /srv/leap/provider.json
+ <Location /provider.json>
+ Header set X-Minimum-Client-Version 0.5
+ </Location>
+<% end -%>
+</VirtualHost>
diff --git a/puppet/modules/site_apt/files/Debian/51unattended-upgrades-leap b/puppet/modules/site_apt/files/Debian/51unattended-upgrades-leap
new file mode 100644
index 00000000..bbaac6a2
--- /dev/null
+++ b/puppet/modules/site_apt/files/Debian/51unattended-upgrades-leap
@@ -0,0 +1,6 @@
+// this file is managed by puppet !
+
+Unattended-Upgrade::Allowed-Origins {
+ "leap.se:stable";
+}
+
diff --git a/puppet/modules/site_apt/files/keys/leap-archive.gpg b/puppet/modules/site_apt/files/keys/leap-archive.gpg
new file mode 100644
index 00000000..dd7f3be6
--- /dev/null
+++ b/puppet/modules/site_apt/files/keys/leap-archive.gpg
Binary files differ
diff --git a/puppet/modules/site_apt/files/keys/leap-experimental-archive.gpg b/puppet/modules/site_apt/files/keys/leap-experimental-archive.gpg
new file mode 100644
index 00000000..5cc9064b
--- /dev/null
+++ b/puppet/modules/site_apt/files/keys/leap-experimental-archive.gpg
Binary files differ
diff --git a/puppet/modules/site_apt/manifests/dist_upgrade.pp b/puppet/modules/site_apt/manifests/dist_upgrade.pp
new file mode 100644
index 00000000..0eb98cea
--- /dev/null
+++ b/puppet/modules/site_apt/manifests/dist_upgrade.pp
@@ -0,0 +1,17 @@
+# upgrade all packages
+class site_apt::dist_upgrade {
+
+ # facter returns 'true' as string
+ # lint:ignore:quoted_booleans
+ if $::apt_running == 'true' {
+ # lint:endignore
+ fail ('apt-get is running in background - Please wait until it finishes. Exiting.')
+ } else {
+ exec{'initial_apt_dist_upgrade':
+ command => "/usr/bin/apt-get -q -y -o 'DPkg::Options::=--force-confold' dist-upgrade",
+ refreshonly => false,
+ timeout => 1200,
+ require => Exec['apt_updated']
+ }
+ }
+}
diff --git a/puppet/modules/site_apt/manifests/init.pp b/puppet/modules/site_apt/manifests/init.pp
new file mode 100644
index 00000000..455425c1
--- /dev/null
+++ b/puppet/modules/site_apt/manifests/init.pp
@@ -0,0 +1,55 @@
+# setup apt on all nodes
+class site_apt {
+
+ $sources = hiera('sources')
+ $apt_config = $sources['apt']
+
+ # debian repo urls
+ $apt_url_basic = $apt_config['basic']
+ $apt_url_security = $apt_config['security']
+ $apt_url_backports = $apt_config['backports']
+
+ # leap repo url
+ $platform_sources = $sources['platform']
+ $apt_url_platform_basic = $platform_sources['apt']['basic']
+
+ # needed on jessie hosts for getting pnp4nagios from testing
+ if ( $::operatingsystemmajrelease == '8' ) {
+ $use_next_release = true
+ } else {
+ $use_next_release = false
+ }
+
+ class { 'apt':
+ custom_key_dir => 'puppet:///modules/site_apt/keys',
+ debian_url => $apt_url_basic,
+ security_url => $apt_url_security,
+ backports_url => $apt_url_backports,
+ use_next_release => $use_next_release
+ }
+
+ # enable http://deb.leap.se debian package repository
+ include site_apt::leap_repo
+
+ apt::apt_conf { '90disable-pdiffs':
+ content => 'Acquire::PDiffs "false";';
+ }
+
+ include ::site_apt::unattended_upgrades
+
+ # not currently used
+ #apt::sources_list { 'secondary.list':
+ # content => template('site_apt/secondary.list');
+ #}
+
+ apt::preferences_snippet { 'leap':
+ priority => 999,
+ package => '*',
+ pin => 'origin "deb.leap.se"'
+ }
+
+ # All packages should be installed after 'update_apt' is called,
+ # which does an 'apt-get update'.
+ Exec['update_apt'] -> Package <||>
+
+}
diff --git a/puppet/modules/site_apt/manifests/leap_repo.pp b/puppet/modules/site_apt/manifests/leap_repo.pp
new file mode 100644
index 00000000..5eedce45
--- /dev/null
+++ b/puppet/modules/site_apt/manifests/leap_repo.pp
@@ -0,0 +1,16 @@
+# install leap deb repo together with leap-keyring package
+# containing the apt signing key
+class site_apt::leap_repo {
+ $platform = hiera_hash('platform')
+ $major_version = $platform['major_version']
+
+ apt::sources_list { 'leap.list':
+ content => "deb ${::site_apt::apt_url_platform_basic} ${::lsbdistcodename} main\n",
+ before => Exec[refresh_apt]
+ }
+
+ package { 'leap-archive-keyring':
+ ensure => latest
+ }
+
+}
diff --git a/puppet/modules/site_apt/manifests/preferences/check_mk.pp b/puppet/modules/site_apt/manifests/preferences/check_mk.pp
new file mode 100644
index 00000000..580e0d3f
--- /dev/null
+++ b/puppet/modules/site_apt/manifests/preferences/check_mk.pp
@@ -0,0 +1,9 @@
+class site_apt::preferences::check_mk {
+
+ apt::preferences_snippet { 'check-mk':
+ package => 'check-mk-*',
+ release => "${::lsbdistcodename}-backports",
+ priority => 999;
+ }
+
+}
diff --git a/puppet/modules/site_apt/manifests/preferences/passenger.pp b/puppet/modules/site_apt/manifests/preferences/passenger.pp
new file mode 100644
index 00000000..8cd41f91
--- /dev/null
+++ b/puppet/modules/site_apt/manifests/preferences/passenger.pp
@@ -0,0 +1,14 @@
+#
+# currently, this is only used by static_site to get passenger v4.
+#
+# UPGRADE: this is not needed for jessie.
+#
+class site_apt::preferences::passenger {
+
+ apt::preferences_snippet { 'passenger':
+ package => 'libapache2-mod-passenger',
+ release => "${::lsbdistcodename}-backports",
+ priority => 999;
+ }
+
+}
diff --git a/puppet/modules/site_apt/manifests/preferences/rsyslog.pp b/puppet/modules/site_apt/manifests/preferences/rsyslog.pp
new file mode 100644
index 00000000..bfeaa7da
--- /dev/null
+++ b/puppet/modules/site_apt/manifests/preferences/rsyslog.pp
@@ -0,0 +1,13 @@
+class site_apt::preferences::rsyslog {
+
+ apt::preferences_snippet {
+ 'rsyslog_anon_depends':
+ package => 'libestr0 librelp0 rsyslog*',
+ priority => '999',
+ pin => 'release a=wheezy-backports',
+ before => Class['rsyslog::install'];
+
+ 'fixed_rsyslog_anon_package':
+ ensure => absent;
+ }
+}
diff --git a/puppet/modules/site_apt/manifests/unattended_upgrades.pp b/puppet/modules/site_apt/manifests/unattended_upgrades.pp
new file mode 100644
index 00000000..42f1f4c6
--- /dev/null
+++ b/puppet/modules/site_apt/manifests/unattended_upgrades.pp
@@ -0,0 +1,20 @@
+# configute unattended upgrades so packages from both Debian and LEAP
+# repos get upgraded unattended
+class site_apt::unattended_upgrades {
+ # override unattended-upgrades package resource to make sure
+ # that it is upgraded on every deploy (#6245)
+
+ # configure upgrades for Debian
+ class { 'apt::unattended_upgrades':
+ ensure_version => latest
+ }
+
+ # configure LEAP upgrades
+ apt::apt_conf { '51unattended-upgrades-leap':
+ source => [
+ "puppet:///modules/site_apt/${::lsbdistid}/51unattended-upgrades-leap"],
+ require => Package['unattended-upgrades'],
+ refresh_apt => false,
+ }
+
+}
diff --git a/puppet/modules/site_apt/templates/jessie/postfix.seeds b/puppet/modules/site_apt/templates/jessie/postfix.seeds
new file mode 100644
index 00000000..1a878ccc
--- /dev/null
+++ b/puppet/modules/site_apt/templates/jessie/postfix.seeds
@@ -0,0 +1 @@
+postfix postfix/main_mailer_type select No configuration
diff --git a/puppet/modules/site_apt/templates/preferences.include_squeeze b/puppet/modules/site_apt/templates/preferences.include_squeeze
new file mode 100644
index 00000000..d6d36b60
--- /dev/null
+++ b/puppet/modules/site_apt/templates/preferences.include_squeeze
@@ -0,0 +1,25 @@
+Explanation: Debian wheezy
+Package: *
+Pin: release o=Debian,n=wheezy
+Pin-Priority: 990
+
+Explanation: Debian wheezy-updates
+Package: *
+Pin: release o=Debian,n=wheezy-updates
+Pin-Priority: 990
+
+Explanation: Debian sid
+Package: *
+Pin: release o=Debian,n=sid
+Pin-Priority: 1
+
+Explanation: Debian squeeze
+Package: *
+Pin: release o=Debian,n=squeeze
+Pin-Priority: 980
+
+Explanation: Debian fallback
+Package: *
+Pin: release o=Debian
+Pin-Priority: -10
+
diff --git a/puppet/modules/site_apt/templates/secondary.list b/puppet/modules/site_apt/templates/secondary.list
new file mode 100644
index 00000000..0c024549
--- /dev/null
+++ b/puppet/modules/site_apt/templates/secondary.list
@@ -0,0 +1,3 @@
+# basic
+deb http://ftp.debian.org/debian/ <%= @lsbdistcodename %> main contrib non-free
+
diff --git a/puppet/modules/site_apt/templates/wheezy/postfix.seeds b/puppet/modules/site_apt/templates/wheezy/postfix.seeds
new file mode 100644
index 00000000..1a878ccc
--- /dev/null
+++ b/puppet/modules/site_apt/templates/wheezy/postfix.seeds
@@ -0,0 +1 @@
+postfix postfix/main_mailer_type select No configuration
diff --git a/puppet/modules/site_check_mk/files/agent/local_checks/all_hosts/run_node_tests.sh b/puppet/modules/site_check_mk/files/agent/local_checks/all_hosts/run_node_tests.sh
new file mode 100644
index 00000000..1dd0afc9
--- /dev/null
+++ b/puppet/modules/site_check_mk/files/agent/local_checks/all_hosts/run_node_tests.sh
@@ -0,0 +1,5 @@
+#!/bin/sh
+#
+# runs node tests
+
+/srv/leap/bin/run_tests --checkmk
diff --git a/puppet/modules/site_check_mk/files/agent/local_checks/couchdb/leap_couch_stats.sh b/puppet/modules/site_check_mk/files/agent/local_checks/couchdb/leap_couch_stats.sh
new file mode 100755
index 00000000..c7477b18
--- /dev/null
+++ b/puppet/modules/site_check_mk/files/agent/local_checks/couchdb/leap_couch_stats.sh
@@ -0,0 +1,122 @@
+#!/bin/bash
+#
+# todo:
+# - thresholds
+# - couch response time
+# - make CURL/URL/DBLIST_EXCLUDE vars configurable
+# - move load_nagios_utils() to helper library so we can use it from multiple scripts
+
+start_time=$(date +%s.%N)
+
+CURL='curl -s --netrc-file /etc/couchdb/couchdb.netrc'
+URL='http://127.0.0.1:5984'
+TMPFILE=$(mktemp)
+DBLIST_EXCLUDE='(user-|sessions_|tokens_|_replicator|_users)'
+PREFIX='Couchdb_'
+
+
+load_nagios_utils () {
+ # load the nagios utils
+ # in debian, the package nagios-plugins-common installs utils.sh to /usr/lib/nagios/plugins/utils.sh
+ utilsfn=
+ for d in $PROGPATH /usr/lib/nagios/plugins /usr/lib64/nagios/plugins /usr/local/nagios/libexec /opt/nagios-plugins/libexec . ; do
+ if [ -f "$d/utils.sh" ]; then
+ utilsfn=$d/utils.sh;
+ fi
+ done
+ if [ "$utilsfn" = "" ]; then
+ echo "UNKNOWN - cannot find utils.sh (part of nagios plugins)";
+ exit 3;
+ fi
+ . "$utilsfn";
+ STATE[$STATE_OK]='OK'
+ STATE[$STATE_WARNING]='Warning'
+ STATE[$STATE_CRITICAL]='Critical'
+ STATE[$STATE_UNKNOWN]='Unknown'
+ STATE[$STATE_DEPENDENT]='Dependend'
+}
+
+get_global_stats_perf () {
+ trap "localexit=3" ERR
+ local localexit db_count
+ localexit=0
+
+ # get a list of all dbs
+ $CURL -X GET $URL/_all_dbs | json_pp | egrep -v '(\[|\])' > $TMPFILE
+
+ db_count=$( wc -l < $TMPFILE)
+ excluded_db_count=$( egrep -c "$DBLIST_EXCLUDE" $TMPFILE )
+
+ echo "db_count=$db_count|excluded_db_count=$excluded_db_count"
+ return ${localexit}
+}
+
+db_stats () {
+ trap "localexit=3" ERR
+ local db db_stats doc_count del_doc_count localexit
+ localexit=0
+
+ db="$1"
+ name="$2"
+
+ if [ -z "$name" ]
+ then
+ name="$db"
+ fi
+
+ perf="$perf|${db}_docs=$( $CURL -s -X GET ${URL}/$db | json_pp |grep 'doc_count' | sed 's/[^0-9]//g' )"
+ db_stats=$( $CURL -s -X GET ${URL}/$db | json_pp )
+
+ doc_count=$( echo "$db_stats" | grep 'doc_count' | grep -v 'deleted_doc_count' | sed 's/[^0-9]//g' )
+ del_doc_count=$( echo "$db_stats" | grep 'doc_del_count' | sed 's/[^0-9]//g' )
+
+ # don't divide by zero
+ if [ $del_doc_count -eq 0 ]
+ then
+ del_doc_perc=0
+ else
+ del_doc_perc=$(( del_doc_count * 100 / doc_count ))
+ fi
+
+ bytes=$( echo "$db_stats" | grep disk_size | sed 's/[^0-9]//g' )
+ disk_size=$( echo "scale = 2; $bytes / 1024 / 1024" | bc -l )
+
+ echo -n "${localexit} ${PREFIX}${name}_database ${name}_docs=$doc_count|${name}_deleted_docs=$del_doc_count|${name}_deleted_docs_percentage=${del_doc_perc}%"
+ printf "|${name}_disksize_mb=%02.2fmb ${STATE[localexit]}: database $name\n" "$disk_size"
+
+ return ${localexit}
+}
+
+# main
+
+load_nagios_utils
+
+# per-db stats
+# get a list of all dbs
+$CURL -X GET $URL/_all_dbs | json_pp | egrep -v '(\[|\])' > $TMPFILE
+
+# get list of dbs to check
+dbs=$( egrep -v "${DBLIST_EXCLUDE}" $TMPFILE | tr -d '\n"' | sed 's/,/ /g' )
+
+for db in $dbs
+do
+ db_stats "$db"
+done
+
+# special handling for rotated dbs
+suffix=$(($(date +'%s') / (60*60*24*30)))
+db_stats "sessions_${suffix}" "sessions"
+db_stats "tokens_${suffix}" "tokens"
+
+
+# show global couchdb stats
+global_stats_perf=$(get_global_stats_perf)
+exitcode=$?
+
+end_time=$(date +%s.%N)
+duration=$( echo "scale = 2; $end_time - $start_time" | bc -l )
+
+printf "${exitcode} ${PREFIX}global_stats ${global_stats_perf}|script_duration=%02.2fs ${STATE[exitcode]}: global couchdb status\n" "$duration"
+
+rm "$TMPFILE"
+
diff --git a/puppet/modules/site_check_mk/files/agent/local_checks/mx/check_leap_mx.sh b/puppet/modules/site_check_mk/files/agent/local_checks/mx/check_leap_mx.sh
new file mode 100755
index 00000000..4711e247
--- /dev/null
+++ b/puppet/modules/site_check_mk/files/agent/local_checks/mx/check_leap_mx.sh
@@ -0,0 +1,33 @@
+#!/bin/bash
+
+
+WARN=1
+CRIT=5
+
+# in minutes
+MAXAGE=10
+
+STATUS[0]='OK'
+STATUS[1]='Warning'
+STATUS[2]='Critical'
+CHECKNAME='Leap_MX_Queue'
+
+WATCHDIR='/var/mail/leap-mx/Maildir/new/'
+
+
+total=`find $WATCHDIR -type f -mmin +$MAXAGE | wc -l`
+
+if [ $total -lt $WARN ]
+then
+ exitcode=0
+else
+ if [ $total -le $CRIT ]
+ then
+ exitcode=1
+ else
+ exitcode=2
+ fi
+fi
+
+echo "${exitcode} ${CHECKNAME} stale_files=${total} ${STATUS[exitcode]}: ${total} stale files (>=${MAXAGE} min) in ${WATCHDIR}."
+
diff --git a/puppet/modules/site_check_mk/files/agent/logwatch/bigcouch.cfg b/puppet/modules/site_check_mk/files/agent/logwatch/bigcouch.cfg
new file mode 100644
index 00000000..0f378a5a
--- /dev/null
+++ b/puppet/modules/site_check_mk/files/agent/logwatch/bigcouch.cfg
@@ -0,0 +1,28 @@
+/opt/bigcouch/var/log/bigcouch.log nocontext=1
+# ignore requests that are fine
+ I undefined - -.*200$
+ I undefined - -.*201$
+ I 127.0.0.1 undefined.* ok
+ I 127.0.0.1 localhost:5984 .* ok
+ # https://leap.se/code/issues/5246
+ I Shutting down group server
+ # ignore bigcouch conflict errors
+ I Error in process.*{{nocatch,conflict}
+ # ignore "Uncaught error in HTTP request: {exit, normal}" error
+ # it's suppressed in later versions of bigcouch anhow
+ # see https://leap.se/code/issues/5226
+ I Uncaught error in HTTP request: {exit,normal}
+ I Uncaught error in HTTP request: {exit,
+ # Ignore rexi_EXIT bigcouch error (Bug #6512)
+ I Error in process <[0-9.]+> on node .* with exit value: {{rexi_EXIT,{(killed|noproc|shutdown),\[{couch_db,collect_results
+ # Ignore "Generic server terminating" bigcouch message (Feature #6544)
+ I Generic server <.*> terminating
+ I {error_report,<.*>,
+ I {error_info,
+ C Uncaught error in HTTP request: {error,
+ C Response abnormally terminated: {nodedown,
+ C rexi_DOWN,noproc
+ C rexi_DOWN,noconnection
+ C error
+ C Connection attempt from disallowed node
+ W Apache CouchDB has started
diff --git a/puppet/modules/site_check_mk/files/agent/logwatch/leap_mx.cfg b/puppet/modules/site_check_mk/files/agent/logwatch/leap_mx.cfg
new file mode 100644
index 00000000..166d0230
--- /dev/null
+++ b/puppet/modules/site_check_mk/files/agent/logwatch/leap_mx.cfg
@@ -0,0 +1,4 @@
+/var/log/leap/mx.log
+ W Don't know how to deliver mail
+ W No public key, stopping the processing chain
+
diff --git a/puppet/modules/site_check_mk/files/agent/logwatch/logwatch.cfg b/puppet/modules/site_check_mk/files/agent/logwatch/logwatch.cfg
new file mode 100644
index 00000000..4f16d1bd
--- /dev/null
+++ b/puppet/modules/site_check_mk/files/agent/logwatch/logwatch.cfg
@@ -0,0 +1,31 @@
+# This file is managed by Puppet. DO NOT EDIT.
+
+# logwatch.cfg
+# This file configures mk_logwatch. Define your logfiles
+# and patterns to be looked for here.
+
+# Name one or more logfiles
+/var/log/messages
+# Patterns are indented with one space are prefixed with:
+# C: Critical messages
+# W: Warning messages
+# I: ignore these lines (OK)
+# The first match decided. Lines that do not match any pattern
+# are ignored
+ C Fail event detected on md device
+ I mdadm.*: Rebuild.*event detected
+ W mdadm\[
+ W ata.*hard resetting link
+ W ata.*soft reset failed (.*FIS failed)
+ W device-mapper: thin:.*reached low water mark
+ C device-mapper: thin:.*no free space
+
+/var/log/auth.log
+ W sshd.*Corrupted MAC on input
+
+/var/log/kern.log
+ C panic
+ C Oops
+ W generic protection rip
+ W .*Unrecovered read error - auto reallocate failed
+
diff --git a/puppet/modules/site_check_mk/files/agent/logwatch/openvpn.cfg b/puppet/modules/site_check_mk/files/agent/logwatch/openvpn.cfg
new file mode 100644
index 00000000..d99dcde9
--- /dev/null
+++ b/puppet/modules/site_check_mk/files/agent/logwatch/openvpn.cfg
@@ -0,0 +1,19 @@
+/var/log/leap/openvpn.log
+# ignore openvpn TLS initialization errors when clients
+# suddenly hangup before properly establishing
+# a tls connection
+ I ovpn-.*TLS Error: Unroutable control packet received from
+ I ovpn-.*TLS Error: TLS key negotiation failed to occur within 60 seconds \(check your network connectivity\)
+ I ovpn-.*TLS Error: TLS handshake failed
+ I ovpn-.*TLS Error: TLS object -> incoming plaintext read error
+ I ovpn-.*Fatal TLS error \(check_tls_errors_co\), restarting
+ I ovpn-.*TLS_ERROR: BIO read tls_read_plaintext error: error:140890B2:SSL routines:SSL3_GET_CLIENT_CERTIFICATE:no certificate
+ I ovpn-.*TLS_ERROR: BIO read tls_read_plaintext error: error:140890C7:SSL routines:SSL3_GET_CLIENT_CERTIFICATE:peer did not return a certificate
+ I ovpn-.*TLS Error: unknown opcode received from
+ I ovpn-.*Authenticate/Decrypt packet error: packet HMAC authentication failed
+ I ovpn-.*TLS Error: reading acknowledgement record from packet
+ I ovpn-.*TLS Error: session-id not found in packet from
+
+ I ovpn-.*SIGUSR1\[soft,tls-error\] received, client-instance restarting
+ I ovpn-.*VERIFY ERROR: depth=0, error=certificate has expired
+
diff --git a/puppet/modules/site_check_mk/files/agent/logwatch/soledad.cfg b/puppet/modules/site_check_mk/files/agent/logwatch/soledad.cfg
new file mode 100644
index 00000000..3af5045b
--- /dev/null
+++ b/puppet/modules/site_check_mk/files/agent/logwatch/soledad.cfg
@@ -0,0 +1,6 @@
+/var/log/soledad.log
+ C WSGI application error
+ C Error
+ C error
+# Removed this line because we determined it was better to ignore it (#6566)
+# W Timing out client:
diff --git a/puppet/modules/site_check_mk/files/agent/logwatch/stunnel.cfg b/puppet/modules/site_check_mk/files/agent/logwatch/stunnel.cfg
new file mode 100644
index 00000000..b1e6cf2f
--- /dev/null
+++ b/puppet/modules/site_check_mk/files/agent/logwatch/stunnel.cfg
@@ -0,0 +1,10 @@
+/var/log/leap/stunnel.log
+# check for stunnel failures
+#
+# these are temporary failures and happen very often, so we
+# ignore them until we tuned stunnel timeouts/logging,
+# see https://leap.se/code/issues/5218
+ I stunnel:.*Connection reset by peer
+ I stunnel:.*Peer suddenly disconnected
+ I stunnel:.*Connection refused
+
diff --git a/puppet/modules/site_check_mk/files/agent/logwatch/syslog/bigcouch.cfg b/puppet/modules/site_check_mk/files/agent/logwatch/syslog/bigcouch.cfg
new file mode 100644
index 00000000..f53f0780
--- /dev/null
+++ b/puppet/modules/site_check_mk/files/agent/logwatch/syslog/bigcouch.cfg
@@ -0,0 +1,5 @@
+# on one-node bigcouch setups, we'll get this msg
+# a lot, so we ignore it here until we fix
+# https://leap.se/code/issues/5244
+ I epmd: got partial packet only on file descriptor
+
diff --git a/puppet/modules/site_check_mk/files/agent/logwatch/syslog/couchdb.cfg b/puppet/modules/site_check_mk/files/agent/logwatch/syslog/couchdb.cfg
new file mode 100644
index 00000000..5f8d5b95
--- /dev/null
+++ b/puppet/modules/site_check_mk/files/agent/logwatch/syslog/couchdb.cfg
@@ -0,0 +1,2 @@
+ C /usr/local/bin/couch-doc-update.*failed
+ C /usr/local/bin/couch-doc-update.*ERROR
diff --git a/puppet/modules/site_check_mk/files/agent/logwatch/syslog_header.cfg b/puppet/modules/site_check_mk/files/agent/logwatch/syslog_header.cfg
new file mode 100644
index 00000000..f60d752b
--- /dev/null
+++ b/puppet/modules/site_check_mk/files/agent/logwatch/syslog_header.cfg
@@ -0,0 +1 @@
+/var/log/syslog
diff --git a/puppet/modules/site_check_mk/files/agent/logwatch/syslog_tail.cfg b/puppet/modules/site_check_mk/files/agent/logwatch/syslog_tail.cfg
new file mode 100644
index 00000000..7daf0cac
--- /dev/null
+++ b/puppet/modules/site_check_mk/files/agent/logwatch/syslog_tail.cfg
@@ -0,0 +1,21 @@
+# some general patterns
+ I Error: Driver 'pcspkr' is already registered, aborting...
+# ignore postfix errors on lost connection (Bug #6476)
+ I postfix/smtpd.*SSL_accept error from.*lost connection
+# ignore postfix too many errors after DATA (#6545)
+ I postfix/smtpd.*too many errors after DATA from
+ C panic
+ C Oops
+ C Error
+# ignore ipv6 icmp errors for now (Bug #6540)
+ I kernel: .*icmpv6_send: no reply to icmp error
+ C error
+ W generic protection rip
+ W .*Unrecovered read error - auto reallocate failed
+# 401 Unauthorized error logged by webapp and possible other
+# applications
+ C Unauthorized
+# catch abnormal termination of processes (due to segfault/fpe
+# signals etc).
+# see https://github.com/pixelated/pixelated-user-agent/issues/683
+ C systemd.*: main process exited, code=killed, status=
diff --git a/puppet/modules/site_check_mk/files/agent/logwatch/webapp.cfg b/puppet/modules/site_check_mk/files/agent/logwatch/webapp.cfg
new file mode 100644
index 00000000..337d9ec6
--- /dev/null
+++ b/puppet/modules/site_check_mk/files/agent/logwatch/webapp.cfg
@@ -0,0 +1,8 @@
+/var/log/leap/webapp.log
+# check for webapp errors
+ C Completed 500
+# couch connection issues
+ C webapp.*Could not connect to couch database messages due to 401 Unauthorized: {"error":"unauthorized","reason":"You are not a server admin."}
+# ignore RoutingErrors that rails throw when it can't handle a url
+# see https://leap.se/code/issues/5173
+ I webapp.*ActionController::RoutingError
diff --git a/puppet/modules/site_check_mk/files/agent/nagios_plugins/check_unix_open_fds.pl b/puppet/modules/site_check_mk/files/agent/nagios_plugins/check_unix_open_fds.pl
new file mode 100755
index 00000000..06163d49
--- /dev/null
+++ b/puppet/modules/site_check_mk/files/agent/nagios_plugins/check_unix_open_fds.pl
@@ -0,0 +1,322 @@
+#!/usr/bin/perl -w
+
+# check_unix_open_fds Nagios Plugin
+#
+# TComm - Carlos Peris Pla
+#
+# This nagios plugin is free software, and comes with ABSOLUTELY
+# NO WARRANTY. It may be used, redistributed and/or modified under
+# the terms of the GNU General Public Licence (see
+# http://www.fsf.org/licensing/licenses/gpl.txt).
+
+
+# MODULE DECLARATION
+
+use strict;
+use Nagios::Plugin;
+
+
+# FUNCTION DECLARATION
+
+sub CreateNagiosManager ();
+sub CheckArguments ();
+sub PerformCheck ();
+
+
+# CONSTANT DEFINITION
+
+use constant NAME => 'check_unix_open_fds';
+use constant VERSION => '0.1b';
+use constant USAGE => "Usage:\ncheck_unix_open_fds -w <process_threshold,application_threshold> -c <process_threshold,application_threshold>\n".
+ "\t\t[-V <version>]\n";
+use constant BLURB => "This plugin checks, in UNIX systems with the command lsof installed and with its SUID bit activated, the number\n".
+ "of file descriptors opened by an application and its processes.\n";
+use constant LICENSE => "This nagios plugin is free software, and comes with ABSOLUTELY\n".
+ "no WARRANTY. It may be used, redistributed and/or modified under\n".
+ "the terms of the GNU General Public Licence\n".
+ "(see http://www.fsf.org/licensing/licenses/gpl.txt).\n";
+use constant EXAMPLE => "\n\n".
+ "Example:\n".
+ "\n".
+ "check_unix_open_fds -a /usr/local/nagios/bin/ndo2db -w 20,75 -c 25,85\n".
+ "\n".
+ "It returns CRITICAL if number of file descriptors opened by ndo2db is higher than 85,\n".
+ "if not it returns WARNING if number of file descriptors opened by ndo2db is higher \n".
+ "than 75, if not it returns CRITICAL if number of file descriptors opened by any process\n".
+ "of ndo2db is higher than 25, if not it returns WARNING if number of file descriptors \n".
+ "opened by any process of ndo2db is higher than 20.\n".
+ "In other cases it returns OK if check has been performed succesfully.\n\n";
+
+
+# VARIABLE DEFINITION
+
+my $Nagios;
+my $Error;
+my $PluginResult;
+my $PluginOutput;
+my @WVRange;
+my @CVRange;
+
+
+# MAIN FUNCTION
+
+# Get command line arguments
+$Nagios = &CreateNagiosManager(USAGE, VERSION, BLURB, LICENSE, NAME, EXAMPLE);
+eval {$Nagios->getopts};
+
+if (!$@) {
+ # Command line parsed
+ if (&CheckArguments($Nagios, \$Error, \@WVRange, \@CVRange)) {
+ # Argument checking passed
+ $PluginResult = &PerformCheck($Nagios, \$PluginOutput, \@WVRange, \@CVRange)
+ }
+ else {
+ # Error checking arguments
+ $PluginOutput = $Error;
+ $PluginResult = UNKNOWN;
+ }
+ $Nagios->nagios_exit($PluginResult,$PluginOutput);
+}
+else {
+ # Error parsing command line
+ $Nagios->nagios_exit(UNKNOWN,$@);
+}
+
+
+
+# FUNCTION DEFINITIONS
+
+# Creates and configures a Nagios plugin object
+# Input: strings (usage, version, blurb, license, name and example) to configure argument parsing functionality
+# Return value: reference to a Nagios plugin object
+
+sub CreateNagiosManager() {
+ # Create GetOpt object
+ my $Nagios = Nagios::Plugin->new(usage => $_[0], version => $_[1], blurb => $_[2], license => $_[3], plugin => $_[4], extra => $_[5]);
+
+ # Add argument units
+ $Nagios->add_arg(spec => 'application|a=s',
+ help => 'Application path for which you want to check the number of open file descriptors',
+ required => 1);
+
+ # Add argument warning
+ $Nagios->add_arg(spec => 'warning|w=s',
+ help => "Warning thresholds. Format: <process_threshold,application_threshold>",
+ required => 1);
+ # Add argument critical
+ $Nagios->add_arg(spec => 'critical|c=s',
+ help => "Critical thresholds. Format: <process_threshold,application_threshold>",
+ required => 1);
+
+ # Return value
+ return $Nagios;
+}
+
+
+# Checks argument values and sets some default values
+# Input: Nagios Plugin object
+# Output: reference to Error description string, Memory Unit, Swap Unit, reference to WVRange ($_[4]), reference to CVRange ($_[5])
+# Return value: True if arguments ok, false if not
+
+sub CheckArguments() {
+ my ($Nagios, $Error, $WVRange, $CVRange) = @_;
+ my $commas;
+ my $units;
+ my $i;
+ my $firstpos;
+ my $secondpos;
+
+ # Check Warning thresholds list
+ $commas = $Nagios->opts->warning =~ tr/,//;
+ if ($commas !=1){
+ ${$Error} = "Invalid Warning list format. One comma is expected.";
+ return 0;
+ }
+ else{
+ $i=0;
+ $firstpos=0;
+ my $warning=$Nagios->opts->warning;
+ while ($warning =~ /[,]/g) {
+ $secondpos=pos $warning;
+ if ($secondpos - $firstpos==1){
+ @{$WVRange}[$i] = "~:";
+ }
+ else{
+ @{$WVRange}[$i] = substr $Nagios->opts->warning, $firstpos, ($secondpos-$firstpos-1);
+ }
+ $firstpos=$secondpos;
+ $i++
+ }
+ if (length($Nagios->opts->warning) - $firstpos==0){#La coma es el ultimo elemento del string
+ @{$WVRange}[$i] = "~:";
+ }
+ else{
+ @{$WVRange}[$i] = substr $Nagios->opts->warning, $firstpos, (length($Nagios->opts->warning)-$firstpos);
+ }
+
+ if (@{$WVRange}[0] !~/^(@?(\d+|(\d+|~):(\d+)?))?$/){
+ ${$Error} = "Invalid Process Warning threshold in ${$WVRange[0]}";
+ return 0;
+ }if (@{$WVRange}[1] !~/^(@?(\d+|(\d+|~):(\d+)?))?$/){
+ ${$Error} = "Invalid Application Warning threshold in ${$WVRange[1]}";
+ return 0;
+ }
+ }
+
+ # Check Critical thresholds list
+ $commas = $Nagios->opts->critical =~ tr/,//;
+ if ($commas !=1){
+ ${$Error} = "Invalid Critical list format. One comma is expected.";
+ return 0;
+ }
+ else{
+ $i=0;
+ $firstpos=0;
+ my $critical=$Nagios->opts->critical;
+ while ($critical =~ /[,]/g) {
+ $secondpos=pos $critical ;
+ if ($secondpos - $firstpos==1){
+ @{$CVRange}[$i] = "~:";
+ }
+ else{
+ @{$CVRange}[$i] =substr $Nagios->opts->critical, $firstpos, ($secondpos-$firstpos-1);
+ }
+ $firstpos=$secondpos;
+ $i++
+ }
+ if (length($Nagios->opts->critical) - $firstpos==0){#La coma es el ultimo elemento del string
+ @{$CVRange}[$i] = "~:";
+ }
+ else{
+ @{$CVRange}[$i] = substr $Nagios->opts->critical, $firstpos, (length($Nagios->opts->critical)-$firstpos);
+ }
+
+ if (@{$CVRange}[0] !~/^(@?(\d+|(\d+|~):(\d+)?))?$/) {
+ ${$Error} = "Invalid Process Critical threshold in @{$CVRange}[0]";
+ return 0;
+ }
+ if (@{$CVRange}[1] !~/^(@?(\d+|(\d+|~):(\d+)?))?$/) {
+ ${$Error} = "Invalid Application Critical threshold in @{$CVRange}[1]";
+ return 0;
+ }
+ }
+
+ return 1;
+}
+
+
+# Performs whole check:
+# Input: Nagios Plugin object, reference to Plugin output string, Application, referece to WVRange, reference to CVRange
+# Output: Plugin output string
+# Return value: Plugin return value
+
+sub PerformCheck() {
+ my ($Nagios, $PluginOutput, $WVRange, $CVRange) = @_;
+ my $Application;
+ my @AppNameSplitted;
+ my $ApplicationName;
+ my $PsCommand;
+ my $PsResult;
+ my @PsResultLines;
+ my $ProcLine;
+ my $ProcPid;
+ my $LsofCommand;
+ my $LsofResult;
+ my $ProcCount = 0;
+ my $FDCount = 0;
+ my $ProcFDAvg = 0;
+ my $PerProcMaxFD = 0;
+ my $ProcOKFlag = 0;
+ my $ProcWarningFlag = 0;
+ my $ProcCriticalFlag = 0;
+ my $OKFlag = 0;
+ my $WarningFlag = 0;
+ my $CriticalFlag = 0;
+ my $LastWarningProcFDs = 0;
+ my $LastWarningProc = -1;
+ my $LastCriticalProcFDs = 0;
+ my $LastCriticalProc = -1;
+ my $ProcPluginReturnValue = UNKNOWN;
+ my $AppPluginReturnValue = UNKNOWN;
+ my $PluginReturnValue = UNKNOWN;
+ my $PerformanceData = "";
+ my $PerfdataUnit = "FDs";
+
+ $Application = $Nagios->opts->application;
+ $PsCommand = "ps -eaf | grep $Application";
+ $PsResult = `$PsCommand`;
+ @AppNameSplitted = split(/\//, $Application);
+ $ApplicationName = $AppNameSplitted[$#AppNameSplitted];
+ @PsResultLines = split(/\n/, $PsResult);
+ if ( $#PsResultLines > 1 ) {
+ foreach my $Proc (split(/\n/, $PsResult)) {
+ if ($Proc !~ /check_unix_open_fds/ && $Proc !~ / grep /) {
+ $ProcCount += 1;
+ $ProcPid = (split(/\s+/, $Proc))[1];
+ $LsofCommand = "lsof -p $ProcPid | wc -l";
+ $LsofResult = `$LsofCommand`;
+ $LsofResult = ($LsofResult > 0 ) ? ($LsofResult - 1) : 0;
+ $FDCount += $LsofResult;
+ if ($LsofResult >= $PerProcMaxFD) { $PerProcMaxFD = $LsofResult; }
+ $ProcPluginReturnValue = $Nagios->check_threshold(check => $LsofResult,warning => @{$WVRange}[0],critical => @{$CVRange}[0]);
+ if ($ProcPluginReturnValue eq OK) {
+ $ProcOKFlag = 1;
+ }
+ elsif ($ProcPluginReturnValue eq WARNING) {
+ $ProcWarningFlag = 1;
+ if ($LsofResult >= $LastWarningProcFDs) {
+ $LastWarningProcFDs = $LsofResult;
+ $LastWarningProc = $ProcPid;
+ }
+ }
+ #if ($LsofResult >= $PCT) {
+ elsif ($ProcPluginReturnValue eq CRITICAL) {
+ $ProcCriticalFlag = 1;
+ if ($LsofResult >= $LastCriticalProcFDs) {
+ $LastCriticalProcFDs = $LsofResult;
+ $LastCriticalProc = $ProcPid;
+ }
+ }
+ }
+ }
+ if ($ProcCount) { $ProcFDAvg = int($FDCount / $ProcCount); }
+ $AppPluginReturnValue = $Nagios->check_threshold(check => $FDCount,warning => @{$WVRange}[1],critical => @{$CVRange}[1]);
+ #if ($FDCount >= $TWT) {
+ if ($AppPluginReturnValue eq OK) { $OKFlag = 1; }
+ elsif ($AppPluginReturnValue eq WARNING) { $WarningFlag = 1; }
+ elsif ($AppPluginReturnValue eq CRITICAL) { $CriticalFlag = 1; }
+
+ # PluginReturnValue and PluginOutput
+ if ($CriticalFlag) {
+ $PluginReturnValue = CRITICAL;
+ ${$PluginOutput} .= "$ApplicationName handling $FDCount files (critical threshold set to @{$CVRange}[1])";
+ }
+ elsif ($WarningFlag) {
+ $PluginReturnValue = WARNING;
+ ${$PluginOutput} .= "$ApplicationName handling $FDCount files (warning threshold set to @{$WVRange}[1])";
+ }
+ elsif ($ProcCriticalFlag) {
+ $PluginReturnValue = CRITICAL;
+ ${$PluginOutput} .= "Process ID $LastCriticalProc handling $LastCriticalProcFDs files (critical threshold set to @{$CVRange}[0])";
+ }
+ elsif ($ProcWarningFlag) {
+ $PluginReturnValue = WARNING;
+ ${$PluginOutput} .= "Process ID $LastWarningProc handling $LastWarningProcFDs files (warning threshold set to @{$WVRange}[0])";
+ }
+ elsif ($OKFlag && $ProcOKFlag) {
+ $PluginReturnValue = OK;
+ ${$PluginOutput} .= "$ApplicationName handling $FDCount files";
+ }
+ }
+ else {
+ ${$PluginOutput} .= "No existe la aplicacion $ApplicationName";
+ }
+
+
+ $PerformanceData .= "ProcCount=$ProcCount$PerfdataUnit FDCount=$FDCount$PerfdataUnit ProcFDAvg=$ProcFDAvg$PerfdataUnit PerProcMaxFD=$PerProcMaxFD$PerfdataUnit";
+
+ # Output with performance data:
+ ${$PluginOutput} .= " | $PerformanceData";
+
+ return $PluginReturnValue;
+}
diff --git a/puppet/modules/site_check_mk/files/agent/plugins/mk_logwatch.1.2.4 b/puppet/modules/site_check_mk/files/agent/plugins/mk_logwatch.1.2.4
new file mode 100755
index 00000000..3dbca322
--- /dev/null
+++ b/puppet/modules/site_check_mk/files/agent/plugins/mk_logwatch.1.2.4
@@ -0,0 +1,374 @@
+#!/usr/bin/python
+# -*- encoding: utf-8; py-indent-offset: 4 -*-
+# +------------------------------------------------------------------+
+# | ____ _ _ __ __ _ __ |
+# | / ___| |__ ___ ___| | __ | \/ | |/ / |
+# | | | | '_ \ / _ \/ __| |/ / | |\/| | ' / |
+# | | |___| | | | __/ (__| < | | | | . \ |
+# | \____|_| |_|\___|\___|_|\_\___|_| |_|_|\_\ |
+# | |
+# | Copyright Mathias Kettner 2010 mk@mathias-kettner.de |
+# +------------------------------------------------------------------+
+#
+# This file is part of Check_MK.
+# The official homepage is at http://mathias-kettner.de/check_mk.
+#
+# check_mk is free software; you can redistribute it and/or modify it
+# under the terms of the GNU General Public License as published by
+# the Free Software Foundation in version 2. check_mk is distributed
+# in the hope that it will be useful, but WITHOUT ANY WARRANTY; with-
+# out even the implied warranty of MERCHANTABILITY or FITNESS FOR A
+# PARTICULAR PURPOSE. See the GNU General Public License for more de-
+# ails. You should have received a copy of the GNU General Public
+# License along with GNU Make; see the file COPYING. If not, write
+# to the Free Software Foundation, Inc., 51 Franklin St, Fifth Floor,
+# Boston, MA 02110-1301 USA.
+
+# Call with -d for debug mode: colored output, no saving of status
+
+import sys, os, re, time
+import glob
+
+if '-d' in sys.argv[1:] or '--debug' in sys.argv[1:]:
+ tty_red = '\033[1;31m'
+ tty_green = '\033[1;32m'
+ tty_yellow = '\033[1;33m'
+ tty_blue = '\033[1;34m'
+ tty_normal = '\033[0m'
+ debug = True
+else:
+ tty_red = ''
+ tty_green = ''
+ tty_yellow = ''
+ tty_blue = ''
+ tty_normal = ''
+ debug = False
+
+# The configuration file and status file are searched
+# in the directory named by the environment variable
+# LOGWATCH_DIR. If that is not set, MK_CONFDIR is used.
+# If that is not set either, the current directory ist
+# used.
+logwatch_dir = os.getenv("LOGWATCH_DIR")
+if not logwatch_dir:
+ logwatch_dir = os.getenv("MK_CONFDIR")
+ if not logwatch_dir:
+ logwatch_dir = "."
+
+print "<<<logwatch>>>"
+
+config_filename = logwatch_dir + "/logwatch.cfg"
+status_filename = logwatch_dir + "/logwatch.state"
+config_dir = logwatch_dir + "/logwatch.d/*.cfg"
+
+def is_not_comment(line):
+ if line.lstrip().startswith('#') or \
+ line.strip() == '':
+ return False
+ return True
+
+def parse_filenames(line):
+ return line.split()
+
+def parse_pattern(level, pattern):
+ if level not in [ 'C', 'W', 'I', 'O' ]:
+ raise(Exception("Invalid pattern line '%s'" % line))
+ try:
+ compiled = re.compile(pattern)
+ except:
+ raise(Exception("Invalid regular expression in line '%s'" % line))
+ return (level, compiled)
+
+def read_config():
+ config_lines = [ line.rstrip() for line in filter(is_not_comment, file(config_filename).readlines()) ]
+ # Add config from a logwatch.d folder
+ for config_file in glob.glob(config_dir):
+ config_lines += [ line.rstrip() for line in filter(is_not_comment, file(config_file).readlines()) ]
+
+ have_filenames = False
+ config = []
+
+ for line in config_lines:
+ rewrite = False
+ if line[0].isspace(): # pattern line
+ if not have_filenames:
+ raise Exception("Missing logfile names")
+ level, pattern = line.split(None, 1)
+ if level == 'A':
+ cont_list.append(parse_cont_pattern(pattern))
+ elif level == 'R':
+ rewrite_list.append(pattern)
+ else:
+ level, compiled = parse_pattern(level, pattern)
+ cont_list = [] # List of continuation patterns
+ rewrite_list = [] # List of rewrite patterns
+ patterns.append((level, compiled, cont_list, rewrite_list))
+ else: # filename line
+ patterns = []
+ config.append((parse_filenames(line), patterns))
+ have_filenames = True
+ return config
+
+def parse_cont_pattern(pattern):
+ try:
+ return int(pattern)
+ except:
+ try:
+ return re.compile(pattern)
+ except:
+ if debug:
+ raise
+ raise Exception("Invalid regular expression in line '%s'" % pattern)
+
+# structure of statusfile
+# # LOGFILE OFFSET INODE
+# /var/log/messages|7767698|32455445
+# /var/test/x12134.log|12345|32444355
+def read_status():
+ if debug:
+ return {}
+
+ status = {}
+ for line in file(status_filename):
+ # TODO: Remove variants with spaces. rsplit is
+ # not portable. split fails if logfilename contains
+ # spaces
+ inode = -1
+ try:
+ parts = line.split('|')
+ filename = parts[0]
+ offset = parts[1]
+ if len(parts) >= 3:
+ inode = parts[2]
+
+ except:
+ try:
+ filename, offset = line.rsplit(None, 1)
+ except:
+ filename, offset = line.split(None, 1)
+ status[filename] = int(offset), int(inode)
+ return status
+
+def save_status(status):
+ f = file(status_filename, "w")
+ for filename, (offset, inode) in status.items():
+ f.write("%s|%d|%d\n" % (filename, offset, inode))
+
+pushed_back_line = None
+def next_line(f):
+ global pushed_back_line
+ if pushed_back_line != None:
+ line = pushed_back_line
+ pushed_back_line = None
+ return line
+ else:
+ try:
+ line = f.next()
+ return line
+ except:
+ return None
+
+
+def process_logfile(logfile, patterns):
+ global pushed_back_line
+
+ # Look at which file offset we have finished scanning
+ # the logfile last time. If we have never seen this file
+ # before, we set the offset to -1
+ offset, prev_inode = status.get(logfile, (-1, -1))
+ try:
+ fl = os.open(logfile, os.O_RDONLY)
+ inode = os.fstat(fl)[1] # 1 = st_ino
+ except:
+ if debug:
+ raise
+ print "[[[%s:cannotopen]]]" % logfile
+ return
+
+ print "[[[%s]]]" % logfile
+
+ # Seek to the current end in order to determine file size
+ current_end = os.lseek(fl, 0, 2) # os.SEEK_END not available in Python 2.4
+ status[logfile] = current_end, inode
+
+ # If we have never seen this file before, we just set the
+ # current pointer to the file end. We do not want to make
+ # a fuss about ancient log messages...
+ if offset == -1:
+ if not debug:
+ return
+ else:
+ offset = 0
+
+
+ # If the inode of the logfile has changed it has appearently
+ # been started from new (logfile rotation). At least we must
+ # assume that. In some rare cases (restore of a backup, etc)
+ # we are wrong and resend old log messages
+ if prev_inode >= 0 and inode != prev_inode:
+ offset = 0
+
+ # Our previously stored offset is the current end ->
+ # no new lines in this file
+ if offset == current_end:
+ return # nothing new
+
+ # If our offset is beyond the current end, the logfile has been
+ # truncated or wrapped while keeping the same inode. We assume
+ # that it contains all new data in that case and restart from
+ # offset 0.
+ if offset > current_end:
+ offset = 0
+
+ # now seek to offset where interesting data begins
+ os.lseek(fl, offset, 0) # os.SEEK_SET not available in Python 2.4
+ f = os.fdopen(fl)
+ worst = -1
+ outputtxt = ""
+ lines_parsed = 0
+ start_time = time.time()
+
+ while True:
+ line = next_line(f)
+ if line == None:
+ break # End of file
+
+ lines_parsed += 1
+ # Check if maximum number of new log messages is exceeded
+ if opt_maxlines != None and lines_parsed > opt_maxlines:
+ outputtxt += "%s Maximum number (%d) of new log messages exceeded.\n" % (
+ opt_overflow, opt_maxlines)
+ worst = max(worst, opt_overflow_level)
+ os.lseek(fl, 0, 2) # Seek to end of file, skip all other messages
+ break
+
+ # Check if maximum processing time (per file) is exceeded. Check only
+ # every 100'th line in order to save system calls
+ if opt_maxtime != None and lines_parsed % 100 == 10 \
+ and time.time() - start_time > opt_maxtime:
+ outputtxt += "%s Maximum parsing time (%.1f sec) of this log file exceeded.\n" % (
+ opt_overflow, opt_maxtime)
+ worst = max(worst, opt_overflow_level)
+ os.lseek(fl, 0, 2) # Seek to end of file, skip all other messages
+ break
+
+ level = "."
+ for lev, pattern, cont_patterns, replacements in patterns:
+ matches = pattern.search(line[:-1])
+ if matches:
+ level = lev
+ levelint = {'C': 2, 'W': 1, 'O': 0, 'I': -1, '.': -1}[lev]
+ worst = max(levelint, worst)
+
+ # Check for continuation lines
+ for cont_pattern in cont_patterns:
+ if type(cont_pattern) == int: # add that many lines
+ for x in range(cont_pattern):
+ cont_line = next_line(f)
+ if cont_line == None: # end of file
+ break
+ line = line[:-1] + "\1" + cont_line
+
+ else: # pattern is regex
+ while True:
+ cont_line = next_line(f)
+ if cont_line == None: # end of file
+ break
+ elif cont_pattern.search(cont_line[:-1]):
+ line = line[:-1] + "\1" + cont_line
+ else:
+ pushed_back_line = cont_line # sorry for stealing this line
+ break
+
+ # Replacement
+ for replace in replacements:
+ line = replace.replace('\\0', line) + "\n"
+ for nr, group in enumerate(matches.groups()):
+ line = line.replace('\\%d' % (nr+1), group)
+
+ break # matching rule found and executed
+
+ color = {'C': tty_red, 'W': tty_yellow, 'O': tty_green, 'I': tty_blue, '.': ''}[level]
+ if debug:
+ line = line.replace("\1", "\nCONT:")
+ if level == "I":
+ level = "."
+ if opt_nocontext and level == '.':
+ continue
+ outputtxt += "%s%s %s%s\n" % (color, level, line[:-1], tty_normal)
+
+ new_offset = os.lseek(fl, 0, 1) # os.SEEK_CUR not available in Python 2.4
+ status[logfile] = new_offset, inode
+
+ # output all lines if at least one warning, error or ok has been found
+ if worst > -1:
+ sys.stdout.write(outputtxt)
+ sys.stdout.flush()
+
+try:
+ config = read_config()
+except Exception, e:
+ if debug:
+ raise
+ print "CANNOT READ CONFIG FILE: %s" % e
+ sys.exit(1)
+
+# Simply ignore errors in the status file. In case of a corrupted status file we simply begin
+# with an empty status. That keeps the monitoring up and running - even if we might loose a
+# message in the extreme case of a corrupted status file.
+try:
+ status = read_status()
+except Exception, e:
+ status = {}
+
+
+# The filename line may contain options like 'maxlines=100' or 'maxtime=10'
+for filenames, patterns in config:
+ # Initialize options with default values
+ opt_maxlines = None
+ opt_maxtime = None
+ opt_regex = None
+ opt_overflow = 'C'
+ opt_overflow_level = 2
+ opt_nocontext = False
+ try:
+ options = [ o.split('=', 1) for o in filenames if '=' in o ]
+ for key, value in options:
+ if key == 'maxlines':
+ opt_maxlines = int(value)
+ elif key == 'maxtime':
+ opt_maxtime = float(value)
+ elif key == 'overflow':
+ if value not in [ 'C', 'I', 'W', 'O' ]:
+ raise Exception("Invalid value %s for overflow. Allowed are C, I, O and W" % value)
+ opt_overflow = value
+ opt_overflow_level = {'C':2, 'W':1, 'O':0, 'I':0}[value]
+ elif key == 'regex':
+ opt_regex = re.compile(value)
+ elif key == 'iregex':
+ opt_regex = re.compile(value, re.I)
+ elif key == 'nocontext':
+ opt_nocontext = True
+ else:
+ raise Exception("Invalid option %s" % key)
+ except Exception, e:
+ if debug:
+ raise
+ print "INVALID CONFIGURATION: %s" % e
+ sys.exit(1)
+
+
+ for glob in filenames:
+ if '=' in glob:
+ continue
+ logfiles = [ l.strip() for l in os.popen("ls %s 2>/dev/null" % glob).readlines() ]
+ if opt_regex:
+ logfiles = [ f for f in logfiles if opt_regex.search(f) ]
+ if len(logfiles) == 0:
+ print '[[[%s:missing]]]' % glob
+ else:
+ for logfile in logfiles:
+ process_logfile(logfile, patterns)
+
+if not debug:
+ save_status(status)
diff --git a/puppet/modules/site_check_mk/files/extra_service_conf.mk b/puppet/modules/site_check_mk/files/extra_service_conf.mk
new file mode 100644
index 00000000..c7120a96
--- /dev/null
+++ b/puppet/modules/site_check_mk/files/extra_service_conf.mk
@@ -0,0 +1,14 @@
+# retry 3 times before setting a service into a hard state
+# and send out notification
+extra_service_conf["max_check_attempts"] = [
+ ("4", ALL_HOSTS , ALL_SERVICES )
+]
+
+#
+# run check_mk_agent every 4 minutes if it terminates successfully.
+# see https://leap.se/code/issues/6539 for the rationale
+#
+extra_service_conf["normal_check_interval"] = [
+ ("4", ALL_HOSTS , "Check_MK" )
+]
+
diff --git a/puppet/modules/site_check_mk/files/ignored_services.mk b/puppet/modules/site_check_mk/files/ignored_services.mk
new file mode 100644
index 00000000..35dc4433
--- /dev/null
+++ b/puppet/modules/site_check_mk/files/ignored_services.mk
@@ -0,0 +1,3 @@
+ignored_services = [
+ ( ALL_HOSTS, [ "NTP Time" ] )
+]
diff --git a/puppet/modules/site_check_mk/manifests/agent.pp b/puppet/modules/site_check_mk/manifests/agent.pp
new file mode 100644
index 00000000..b95d5d64
--- /dev/null
+++ b/puppet/modules/site_check_mk/manifests/agent.pp
@@ -0,0 +1,35 @@
+# installs check-mk agent
+class site_check_mk::agent {
+
+ $ssh_hash = hiera('ssh')
+ $pubkey = $ssh_hash['authorized_keys']['monitor']['key']
+ $type = $ssh_hash['authorized_keys']['monitor']['type']
+
+
+ # /usr/bin/mk-job depends on /usr/bin/time
+ ensure_packages('time')
+
+ class { 'site_apt::preferences::check_mk': } ->
+
+ class { 'check_mk::agent':
+ agent_package_name => 'check-mk-agent',
+ agent_logwatch_package_name => 'check-mk-agent-logwatch',
+ method => 'ssh',
+ authdir => '/root/.ssh',
+ authfile => 'authorized_keys',
+ register_agent => false,
+ require => Package['time']
+ } ->
+
+ class { 'site_check_mk::agent::mrpe': } ->
+ class { 'site_check_mk::agent::logwatch': } ->
+
+ file {
+ [ '/srv/leap/nagios', '/srv/leap/nagios/plugins' ]:
+ ensure => directory;
+ '/usr/lib/check_mk_agent/local/run_node_tests.sh':
+ source => 'puppet:///modules/site_check_mk/agent/local_checks/all_hosts/run_node_tests.sh',
+ mode => '0755';
+ }
+
+}
diff --git a/puppet/modules/site_check_mk/manifests/agent/couchdb.pp b/puppet/modules/site_check_mk/manifests/agent/couchdb.pp
new file mode 100644
index 00000000..1554fd3c
--- /dev/null
+++ b/puppet/modules/site_check_mk/manifests/agent/couchdb.pp
@@ -0,0 +1,34 @@
+# configure logwatch and nagios checks for couchdb (both bigcouch and plain
+# couchdb installations)
+class site_check_mk::agent::couchdb {
+
+ concat::fragment { 'syslog_couchdb':
+ source => 'puppet:///modules/site_check_mk/agent/logwatch/syslog/couchdb.cfg',
+ target => '/etc/check_mk/logwatch.d/syslog.cfg',
+ order => '02';
+ }
+
+ # check different couchdb stats
+ file { '/usr/lib/check_mk_agent/local/leap_couch_stats.sh':
+ source => 'puppet:///modules/site_check_mk/agent/local_checks/couchdb/leap_couch_stats.sh',
+ mode => '0755',
+ require => Package['check_mk-agent']
+ }
+
+ # check open files for bigcouch proc
+ include site_check_mk::agent::package::perl_plugin
+ file { '/srv/leap/nagios/plugins/check_unix_open_fds.pl':
+ source => 'puppet:///modules/site_check_mk/agent/nagios_plugins/check_unix_open_fds.pl',
+ mode => '0755'
+ }
+ augeas {
+ 'Couchdb_open_files':
+ incl => '/etc/check_mk/mrpe.cfg',
+ lens => 'Spacevars.lns',
+ changes => [
+ 'rm /files/etc/check_mk/mrpe.cfg/Couchdb_open_files',
+ 'set Couchdb_open_files \'/srv/leap/nagios/plugins/check_unix_open_fds.pl -a beam -w 28672,28672 -c 30720,30720\'' ],
+ require => File['/etc/check_mk/mrpe.cfg'];
+ }
+
+}
diff --git a/puppet/modules/site_check_mk/manifests/agent/couchdb/bigcouch.pp b/puppet/modules/site_check_mk/manifests/agent/couchdb/bigcouch.pp
new file mode 100644
index 00000000..82c3ac72
--- /dev/null
+++ b/puppet/modules/site_check_mk/manifests/agent/couchdb/bigcouch.pp
@@ -0,0 +1,49 @@
+# configure logwatch and nagios checks for bigcouch
+class site_check_mk::agent::couchdb::bigcouch {
+
+ # watch bigcouch logs
+ # currently disabled because bigcouch is too noisy
+ # see https://leap.se/code/issues/7375 for more details
+ # and site_config::remove_files for removing leftovers
+ #file { '/etc/check_mk/logwatch.d/bigcouch.cfg':
+ # source => 'puppet:///modules/site_check_mk/agent/logwatch/bigcouch.cfg',
+ #}
+
+ # check syslog msg from:
+ # - empd
+ # - /usr/local/bin/couch-doc-update
+ concat::fragment { 'syslog_bigcouch':
+ source => 'puppet:///modules/site_check_mk/agent/logwatch/syslog/bigcouch.cfg',
+ target => '/etc/check_mk/logwatch.d/syslog.cfg',
+ order => '02';
+ }
+
+ # check bigcouch processes
+ augeas {
+ 'Bigcouch_epmd_procs':
+ incl => '/etc/check_mk/mrpe.cfg',
+ lens => 'Spacevars.lns',
+ changes => [
+ 'rm /files/etc/check_mk/mrpe.cfg/Bigcouch_epmd_procs',
+ 'set Bigcouch_epmd_procs \'/usr/lib/nagios/plugins/check_procs -w 1:1 -c 1:1 -a /opt/bigcouch/erts-5.9.1/bin/epmd\'' ],
+ require => File['/etc/check_mk/mrpe.cfg'];
+ 'Bigcouch_beam_procs':
+ incl => '/etc/check_mk/mrpe.cfg',
+ lens => 'Spacevars.lns',
+ changes => [
+ 'rm /files/etc/check_mk/mrpe.cfg/Bigcouch_beam_procs',
+ 'set Bigcouch_beam_procs \'/usr/lib/nagios/plugins/check_procs -w 1:1 -c 1:1 -a /opt/bigcouch/erts-5.9.1/bin/beam\'' ],
+ require => File['/etc/check_mk/mrpe.cfg'];
+ }
+
+ augeas {
+ 'Bigcouch_open_files':
+ incl => '/etc/check_mk/mrpe.cfg',
+ lens => 'Spacevars.lns',
+ changes => [
+ 'rm /files/etc/check_mk/mrpe.cfg/Bigcouch_open_files',
+ 'set Bigcouch_open_files \'/srv/leap/nagios/plugins/check_unix_open_fds.pl -a beam -w 28672,28672 -c 30720,30720\'' ],
+ require => File['/etc/check_mk/mrpe.cfg'];
+ }
+
+}
diff --git a/puppet/modules/site_check_mk/manifests/agent/couchdb/plain.pp b/puppet/modules/site_check_mk/manifests/agent/couchdb/plain.pp
new file mode 100644
index 00000000..3ec2267b
--- /dev/null
+++ b/puppet/modules/site_check_mk/manifests/agent/couchdb/plain.pp
@@ -0,0 +1,23 @@
+# configure logwatch and nagios checks for plain single couchdb master
+class site_check_mk::agent::couchdb::plain {
+
+ # remove bigcouch leftovers
+ augeas {
+ 'Bigcouch_epmd_procs':
+ incl => '/etc/check_mk/mrpe.cfg',
+ lens => 'Spacevars.lns',
+ changes => 'rm /files/etc/check_mk/mrpe.cfg/Bigcouch_epmd_procs',
+ require => File['/etc/check_mk/mrpe.cfg'];
+ 'Bigcouch_beam_procs':
+ incl => '/etc/check_mk/mrpe.cfg',
+ lens => 'Spacevars.lns',
+ changes => 'rm /files/etc/check_mk/mrpe.cfg/Bigcouch_beam_procs',
+ require => File['/etc/check_mk/mrpe.cfg'];
+ 'Bigcouch_open_files':
+ incl => '/etc/check_mk/mrpe.cfg',
+ lens => 'Spacevars.lns',
+ changes => 'rm /files/etc/check_mk/mrpe.cfg/Bigcouch_open_files',
+ require => File['/etc/check_mk/mrpe.cfg'];
+ }
+
+}
diff --git a/puppet/modules/site_check_mk/manifests/agent/haproxy.pp b/puppet/modules/site_check_mk/manifests/agent/haproxy.pp
new file mode 100644
index 00000000..6d52efba
--- /dev/null
+++ b/puppet/modules/site_check_mk/manifests/agent/haproxy.pp
@@ -0,0 +1,15 @@
+class site_check_mk::agent::haproxy {
+
+ include site_check_mk::agent::package::nagios_plugins_contrib
+
+ # local nagios plugin checks via mrpe
+ augeas { 'haproxy':
+ incl => '/etc/check_mk/mrpe.cfg',
+ lens => 'Spacevars.lns',
+ changes => [
+ 'rm /files/etc/check_mk/mrpe.cfg/Haproxy',
+ 'set Haproxy \'/usr/lib/nagios/plugins/check_haproxy -u "http://localhost:8000/haproxy;csv"\'' ],
+ require => File['/etc/check_mk/mrpe.cfg'];
+ }
+
+}
diff --git a/puppet/modules/site_check_mk/manifests/agent/haveged.pp b/puppet/modules/site_check_mk/manifests/agent/haveged.pp
new file mode 100644
index 00000000..cacbea8c
--- /dev/null
+++ b/puppet/modules/site_check_mk/manifests/agent/haveged.pp
@@ -0,0 +1,15 @@
+class site_check_mk::agent::haveged {
+
+# check haveged process
+ augeas {
+ 'haveged_proc':
+ incl => '/etc/check_mk/mrpe.cfg',
+ lens => 'Spacevars.lns',
+ changes => [
+ 'rm /files/etc/check_mk/mrpe.cfg/haveged_proc',
+ 'set haveged_proc \'/usr/lib/nagios/plugins/check_procs -w 1:1 -c 1:1 -a /usr/sbin/haveged\'' ],
+ require => File['/etc/check_mk/mrpe.cfg'];
+
+ }
+
+}
diff --git a/puppet/modules/site_check_mk/manifests/agent/logwatch.pp b/puppet/modules/site_check_mk/manifests/agent/logwatch.pp
new file mode 100644
index 00000000..423cace2
--- /dev/null
+++ b/puppet/modules/site_check_mk/manifests/agent/logwatch.pp
@@ -0,0 +1,36 @@
+class site_check_mk::agent::logwatch {
+ # Deploy mk_logwatch 1.2.4 so we can split the config
+ # into multiple config files in /etc/check_mk/logwatch.d
+ # see https://leap.se/code/issues/5135
+
+ file { '/usr/lib/check_mk_agent/plugins/mk_logwatch':
+ source => 'puppet:///modules/site_check_mk/agent/plugins/mk_logwatch.1.2.4',
+ mode => '0755',
+ require => Package['check-mk-agent-logwatch']
+ }
+
+ # only config files that watch a distinct logfile should go in logwatch.d/
+ file { '/etc/check_mk/logwatch.d':
+ ensure => directory,
+ recurse => true,
+ purge => true,
+ require => Package['check-mk-agent-logwatch']
+ }
+
+ # service that share a common logfile (i.e. /var/log/syslog) need to get
+ # concanated in one file, otherwise the last file sourced will override
+ # the config before
+ # see mk_logwatch: "logwatch.cfg overwrites config files in logwatch.d",
+ # https://leap.se/code/issues/5155
+
+ # first, we need to deploy a custom logwatch.cfg that doesn't include
+ # a section about /var/log/syslog
+
+ file { '/etc/check_mk/logwatch.cfg':
+ source => 'puppet:///modules/site_check_mk/agent/logwatch/logwatch.cfg',
+ require => Package['check_mk-agent-logwatch']
+ }
+
+ include concat::setup
+ include site_check_mk::agent::logwatch::syslog
+}
diff --git a/puppet/modules/site_check_mk/manifests/agent/logwatch/syslog.pp b/puppet/modules/site_check_mk/manifests/agent/logwatch/syslog.pp
new file mode 100644
index 00000000..c927780d
--- /dev/null
+++ b/puppet/modules/site_check_mk/manifests/agent/logwatch/syslog.pp
@@ -0,0 +1,18 @@
+class site_check_mk::agent::logwatch::syslog {
+
+ concat { '/etc/check_mk/logwatch.d/syslog.cfg':
+ warn => true
+ }
+
+ concat::fragment { 'syslog_header':
+ source => 'puppet:///modules/site_check_mk/agent/logwatch/syslog_header.cfg',
+ target => '/etc/check_mk/logwatch.d/syslog.cfg',
+ order => '01';
+ }
+ concat::fragment { 'syslog_tail':
+ source => 'puppet:///modules/site_check_mk/agent/logwatch/syslog_tail.cfg',
+ target => '/etc/check_mk/logwatch.d/syslog.cfg',
+ order => '99';
+ }
+
+}
diff --git a/puppet/modules/site_check_mk/manifests/agent/mrpe.pp b/puppet/modules/site_check_mk/manifests/agent/mrpe.pp
new file mode 100644
index 00000000..5e1f087a
--- /dev/null
+++ b/puppet/modules/site_check_mk/manifests/agent/mrpe.pp
@@ -0,0 +1,24 @@
+class site_check_mk::agent::mrpe {
+ # check_mk can use standard nagios plugins using
+ # a wrapper called mrpe
+ # see http://mathias-kettner.de/checkmk_mrpe.html
+
+ package { 'nagios-plugins-basic':
+ ensure => latest,
+ }
+
+ file { '/etc/check_mk/mrpe.cfg':
+ ensure => present,
+ require => Package['check-mk-agent']
+ } ->
+
+ augeas {
+ 'Apt':
+ incl => '/etc/check_mk/mrpe.cfg',
+ lens => 'Spacevars.lns',
+ changes => [
+ 'rm /files/etc/check_mk/mrpe.cfg/APT',
+ 'set APT \'/usr/lib/nagios/plugins/check_apt\'' ];
+ }
+
+}
diff --git a/puppet/modules/site_check_mk/manifests/agent/mx.pp b/puppet/modules/site_check_mk/manifests/agent/mx.pp
new file mode 100644
index 00000000..20cbcade
--- /dev/null
+++ b/puppet/modules/site_check_mk/manifests/agent/mx.pp
@@ -0,0 +1,27 @@
+# check check_mk agent checks for mx service
+class site_check_mk::agent::mx {
+
+ # watch logs
+ file { '/etc/check_mk/logwatch.d/leap_mx.cfg':
+ source => 'puppet:///modules/site_check_mk/agent/logwatch/leap_mx.cfg',
+ }
+
+ # local nagios plugin checks via mrpe
+ # removed because leap_cli integrates a check for running mx procs already,
+ # which is also integrated into nagios (called "Mx/Are_MX_daemons_running")
+ augeas {
+ 'Leap_MX_Procs':
+ incl => '/etc/check_mk/mrpe.cfg',
+ lens => 'Spacevars.lns',
+ changes => 'rm /files/etc/check_mk/mrpe.cfg/Leap_MX_Procs',
+ require => File['/etc/check_mk/mrpe.cfg'];
+ }
+
+ # check stale files in queue dir
+ file { '/usr/lib/check_mk_agent/local/check_leap_mx.sh':
+ source => 'puppet:///modules/site_check_mk/agent/local_checks/mx/check_leap_mx.sh',
+ mode => '0755',
+ require => Package['check_mk-agent']
+ }
+
+}
diff --git a/puppet/modules/site_check_mk/manifests/agent/openvpn.pp b/puppet/modules/site_check_mk/manifests/agent/openvpn.pp
new file mode 100644
index 00000000..0596a497
--- /dev/null
+++ b/puppet/modules/site_check_mk/manifests/agent/openvpn.pp
@@ -0,0 +1,10 @@
+class site_check_mk::agent::openvpn {
+
+ # check syslog
+ concat::fragment { 'syslog_openpvn':
+ source => 'puppet:///modules/site_check_mk/agent/logwatch/openvpn.cfg',
+ target => '/etc/check_mk/logwatch.d/syslog.cfg',
+ order => '02';
+ }
+
+}
diff --git a/puppet/modules/site_check_mk/manifests/agent/package/nagios_plugins_contrib.pp b/puppet/modules/site_check_mk/manifests/agent/package/nagios_plugins_contrib.pp
new file mode 100644
index 00000000..95a60d17
--- /dev/null
+++ b/puppet/modules/site_check_mk/manifests/agent/package/nagios_plugins_contrib.pp
@@ -0,0 +1,5 @@
+class site_check_mk::agent::package::nagios_plugins_contrib {
+ package { 'nagios-plugins-contrib':
+ ensure => installed,
+ }
+}
diff --git a/puppet/modules/site_check_mk/manifests/agent/package/perl_plugin.pp b/puppet/modules/site_check_mk/manifests/agent/package/perl_plugin.pp
new file mode 100644
index 00000000..4feda375
--- /dev/null
+++ b/puppet/modules/site_check_mk/manifests/agent/package/perl_plugin.pp
@@ -0,0 +1,5 @@
+class site_check_mk::agent::package::perl_plugin {
+ package { 'libnagios-plugin-perl':
+ ensure => installed,
+ }
+}
diff --git a/puppet/modules/site_check_mk/manifests/agent/soledad.pp b/puppet/modules/site_check_mk/manifests/agent/soledad.pp
new file mode 100644
index 00000000..f4a3f3a6
--- /dev/null
+++ b/puppet/modules/site_check_mk/manifests/agent/soledad.pp
@@ -0,0 +1,17 @@
+class site_check_mk::agent::soledad {
+
+ file { '/etc/check_mk/logwatch.d/soledad.cfg':
+ source => 'puppet:///modules/site_check_mk/agent/logwatch/soledad.cfg',
+ }
+
+ # local nagios plugin checks via mrpe
+
+ augeas { 'Soledad_Procs':
+ incl => '/etc/check_mk/mrpe.cfg',
+ lens => 'Spacevars.lns',
+ changes => [
+ 'rm /files/etc/check_mk/mrpe.cfg/Soledad_Procs',
+ 'set Soledad_Procs \'/usr/lib/nagios/plugins/check_procs -w 1:1 -c 1:1 -a "/usr/bin/python /usr/bin/twistd --uid=soledad --gid=soledad --pidfile=/var/run/soledad.pid --logfile=/var/log/soledad.log web --wsgi=leap.soledad.server.application --port=ssl:2323:privateKey=/etc/x509/keys/leap.key:certKey=/etc/x509/certs/leap.crt:sslmethod=SSLv23_METHOD"\'' ],
+ require => File['/etc/check_mk/mrpe.cfg'];
+ }
+}
diff --git a/puppet/modules/site_check_mk/manifests/agent/stunnel.pp b/puppet/modules/site_check_mk/manifests/agent/stunnel.pp
new file mode 100644
index 00000000..7f765771
--- /dev/null
+++ b/puppet/modules/site_check_mk/manifests/agent/stunnel.pp
@@ -0,0 +1,9 @@
+class site_check_mk::agent::stunnel {
+
+ concat::fragment { 'syslog_stunnel':
+ source => 'puppet:///modules/site_check_mk/agent/logwatch/stunnel.cfg',
+ target => '/etc/check_mk/logwatch.d/syslog.cfg',
+ order => '02';
+ }
+
+}
diff --git a/puppet/modules/site_check_mk/manifests/agent/webapp.pp b/puppet/modules/site_check_mk/manifests/agent/webapp.pp
new file mode 100644
index 00000000..9bf3b197
--- /dev/null
+++ b/puppet/modules/site_check_mk/manifests/agent/webapp.pp
@@ -0,0 +1,15 @@
+class site_check_mk::agent::webapp {
+
+ # remove leftovers of webapp python checks
+ file {
+ [ '/usr/lib/check_mk_agent/local/nagios-webapp_login.py',
+ '/usr/lib/check_mk_agent/local/soledad_sync.py' ]:
+ ensure => absent
+ }
+
+ # watch logs
+ file { '/etc/check_mk/logwatch.d/webapp.cfg':
+ source => 'puppet:///modules/site_check_mk/agent/logwatch/webapp.cfg',
+ }
+
+}
diff --git a/puppet/modules/site_check_mk/manifests/server.pp b/puppet/modules/site_check_mk/manifests/server.pp
new file mode 100644
index 00000000..7ff9eb4a
--- /dev/null
+++ b/puppet/modules/site_check_mk/manifests/server.pp
@@ -0,0 +1,103 @@
+# setup check_mk on the monitoring server
+class site_check_mk::server {
+
+ $ssh_hash = hiera('ssh')
+ $pubkey = $ssh_hash['authorized_keys']['monitor']['key']
+ $type = $ssh_hash['authorized_keys']['monitor']['type']
+ $seckey = $ssh_hash['monitor']['private_key']
+
+ $nagios_hiera = hiera_hash('nagios')
+ $hosts = $nagios_hiera['hosts']
+
+ $all_hosts = inline_template ('<% @hosts.keys.sort.each do |key| -%><% if @hosts[key]["environment"] != "disabled" %>"<%= @hosts[key]["domain_internal"] %>", <% end -%><% end -%>')
+ $domains_internal = $nagios_hiera['domains_internal']
+ $environments = $nagios_hiera['environments']
+
+ package { 'check-mk-server':
+ ensure => installed,
+ }
+
+ # we don't use check-mk-multisite, and the jessie version
+ # of this config file breaks with apache 2.4
+ # until https://gitlab.com/shared-puppet-modules-group/apache/issues/11
+ # is not fixed, we need to use a generic file type here
+ #apache::config::global { 'check-mk-multisite.conf':
+ # ensure => absent
+ #}
+
+ file { '/etc/apache2/conf-enabled/check-mk-multisite.conf':
+ ensure => absent,
+ require => Package['check-mk-server'];
+ }
+
+ # override paths to use the system check_mk rather than OMD
+ class { 'check_mk::config':
+ site => '',
+ etc_dir => '/etc',
+ nagios_subdir => 'nagios3',
+ bin_dir => '/usr/bin',
+ host_groups => undef,
+ use_storedconfigs => false,
+ inventory_only_on_changes => false,
+ require => Package['check-mk-server']
+ }
+
+ Exec['check_mk-refresh'] ->
+ Exec['check_mk-refresh-inventory-daily'] ->
+ Exec['check_mk-reload'] ->
+ Service['nagios']
+
+ file {
+ '/etc/check_mk/conf.d/use_ssh.mk':
+ content => template('site_check_mk/use_ssh.mk'),
+ notify => Exec['check_mk-refresh'],
+ require => Package['check-mk-server'];
+ '/etc/check_mk/conf.d/hostgroups.mk':
+ content => template('site_check_mk/hostgroups.mk'),
+ notify => Exec['check_mk-refresh'],
+ require => Package['check-mk-server'];
+ '/etc/check_mk/conf.d/host_contactgroups.mk':
+ content => template('site_check_mk/host_contactgroups.mk'),
+ notify => Exec['check_mk-refresh'],
+ require => Package['check-mk-server'];
+ '/etc/check_mk/conf.d/ignored_services.mk':
+ source => 'puppet:///modules/site_check_mk/ignored_services.mk',
+ notify => Exec['check_mk-refresh'],
+ require => Package['check-mk-server'];
+ '/etc/check_mk/conf.d/extra_service_conf.mk':
+ source => 'puppet:///modules/site_check_mk/extra_service_conf.mk',
+ notify => Exec['check_mk-refresh'],
+ require => Package['check-mk-server'];
+ '/etc/check_mk/conf.d/extra_host_conf.mk':
+ content => template('site_check_mk/extra_host_conf.mk'),
+ notify => Exec['check_mk-refresh'],
+ require => Package['check-mk-server'];
+
+ '/etc/check_mk/all_hosts_static':
+ content => $all_hosts,
+ notify => Exec['check_mk-refresh'],
+ require => Package['check-mk-server'];
+
+ '/etc/check_mk/.ssh':
+ ensure => directory,
+ require => Package['check-mk-server'];
+ '/etc/check_mk/.ssh/id_rsa':
+ content => $seckey,
+ owner => 'nagios',
+ mode => '0600',
+ require => Package['check-mk-server'];
+ '/etc/check_mk/.ssh/id_rsa.pub':
+ content => "${type} ${pubkey} monitor",
+ owner => 'nagios',
+ mode => '0644',
+ require => Package['check-mk-server'];
+
+ # check_icmp must be suid root or called by sudo
+ # see https://leap.se/code/issues/5171
+ '/usr/lib/nagios/plugins/check_icmp':
+ mode => '4755',
+ require => Package['nagios-plugins-basic'];
+ }
+
+ include check_mk::agent::local_checks
+}
diff --git a/puppet/modules/site_check_mk/templates/extra_host_conf.mk b/puppet/modules/site_check_mk/templates/extra_host_conf.mk
new file mode 100644
index 00000000..bc27b514
--- /dev/null
+++ b/puppet/modules/site_check_mk/templates/extra_host_conf.mk
@@ -0,0 +1,13 @@
+# retry 3 times before setting a host into a hard state
+# and send out notification
+extra_host_conf["max_check_attempts"] = [
+ ("4", ALL_HOSTS )
+]
+
+# Use hostnames as alias so notification mail subjects
+# are more readable and not so long. Alias defaults to
+# the fqdn of a host is not changed.
+extra_host_conf["alias"] = [
+<% @hosts.keys.sort.each do |key| -%> ( "<%= key.strip %>", ["<%= @hosts[key]['domain_internal']%>"]),
+<% end -%>
+]
diff --git a/puppet/modules/site_check_mk/templates/host_contactgroups.mk b/puppet/modules/site_check_mk/templates/host_contactgroups.mk
new file mode 100644
index 00000000..6a534967
--- /dev/null
+++ b/puppet/modules/site_check_mk/templates/host_contactgroups.mk
@@ -0,0 +1,17 @@
+<%
+ contact_groups = []
+ @environments.keys.sort.each do |env_name|
+ hosts = ""
+ @nagios_hosts.keys.sort.each do |hostname|
+ hostdata = @nagios_hosts[hostname]
+ domain_internal = hostdata['domain_internal']
+ if hostdata['environment'] == env_name
+ hosts << '"' + domain_internal + '", '
+ end
+ end
+ contact_groups << ' ( "%s", [%s] )' % [env_name, hosts]
+ end
+%>
+host_contactgroups = [
+<%= contact_groups.join(",\n") %>
+]
diff --git a/puppet/modules/site_check_mk/templates/hostgroups.mk b/puppet/modules/site_check_mk/templates/hostgroups.mk
new file mode 100644
index 00000000..7158dcd1
--- /dev/null
+++ b/puppet/modules/site_check_mk/templates/hostgroups.mk
@@ -0,0 +1,17 @@
+<%
+ host_groups = []
+ @environments.keys.sort.each do |env_name|
+ hosts = ""
+ @nagios_hosts.keys.sort.each do |hostname|
+ hostdata = @nagios_hosts[hostname]
+ domain_internal = hostdata['domain_internal']
+ if hostdata['environment'] == env_name
+ hosts << '"' + domain_internal + '", '
+ end
+ end
+ host_groups << ' ( "%s", [%s] )' % [env_name, hosts]
+ end
+%>
+host_groups = [
+<%= host_groups.join(",\n") %>
+]
diff --git a/puppet/modules/site_check_mk/templates/use_ssh.mk b/puppet/modules/site_check_mk/templates/use_ssh.mk
new file mode 100644
index 00000000..55269536
--- /dev/null
+++ b/puppet/modules/site_check_mk/templates/use_ssh.mk
@@ -0,0 +1,6 @@
+# http://mathias-kettner.de/checkmk_datasource_programs.html
+datasource_programs = [
+<% @nagios_hosts.sort.each do |name,config| %>
+ ( "ssh -l root -i /etc/check_mk/.ssh/id_rsa -p <%=config['ssh_port']%> <%=config['domain_internal']%> check_mk_agent", [ "<%=config['domain_internal']%>" ], ),<%- end -%>
+
+]
diff --git a/puppet/modules/site_config/files/xterm-title.sh b/puppet/modules/site_config/files/xterm-title.sh
new file mode 100644
index 00000000..3cff0e3a
--- /dev/null
+++ b/puppet/modules/site_config/files/xterm-title.sh
@@ -0,0 +1,8 @@
+# If this is an xterm set the title to user@host:dir
+case "$TERM" in
+xterm*|rxvt*)
+ PROMPT_COMMAND='echo -ne "\033]0;${USER}@${HOSTNAME}: ${PWD}\007"'
+ ;;
+*)
+ ;;
+esac
diff --git a/puppet/modules/site_config/lib/facter/dhcp_enabled.rb b/puppet/modules/site_config/lib/facter/dhcp_enabled.rb
new file mode 100644
index 00000000..33220da3
--- /dev/null
+++ b/puppet/modules/site_config/lib/facter/dhcp_enabled.rb
@@ -0,0 +1,22 @@
+require 'facter'
+def dhcp_enabled?(ifs, recurse=true)
+ dhcp = false
+ included_ifs = []
+ if FileTest.exists?(ifs)
+ File.open(ifs) do |file|
+ dhcp = file.enum_for(:each_line).any? do |line|
+ if recurse && line =~ /^\s*source\s+([^\s]+)/
+ included_ifs += Dir.glob($1)
+ end
+ line =~ /inet\s+dhcp/
+ end
+ end
+ end
+ dhcp || included_ifs.any? { |ifs| dhcp_enabled?(ifs, false) }
+end
+Facter.add(:dhcp_enabled) do
+ confine :osfamily => 'Debian'
+ setcode do
+ dhcp_enabled?('/etc/network/interfaces')
+ end
+end
diff --git a/puppet/modules/site_config/lib/facter/ip_interface.rb b/puppet/modules/site_config/lib/facter/ip_interface.rb
new file mode 100644
index 00000000..45764bfc
--- /dev/null
+++ b/puppet/modules/site_config/lib/facter/ip_interface.rb
@@ -0,0 +1,13 @@
+require 'facter/util/ip'
+
+Facter::Util::IP.get_interfaces.each do |interface|
+ ip = Facter.value("ipaddress_#{interface}")
+ if ip != nil
+ Facter.add("interface_" + ip ) do
+ setcode do
+ interface
+ end
+ end
+ end
+end
+
diff --git a/puppet/modules/site_config/manifests/caching_resolver.pp b/puppet/modules/site_config/manifests/caching_resolver.pp
new file mode 100644
index 00000000..8bf465c1
--- /dev/null
+++ b/puppet/modules/site_config/manifests/caching_resolver.pp
@@ -0,0 +1,27 @@
+# deploy local caching resolver
+class site_config::caching_resolver {
+ tag 'leap_base'
+
+ class { 'unbound':
+ root_hints => false,
+ anchor => false,
+ ssl => false,
+ settings => {
+ server => {
+ verbosity => '1',
+ interface => [ '127.0.0.1', '::1' ],
+ port => '53',
+ hide-identity => 'yes',
+ hide-version => 'yes',
+ harden-glue => 'yes',
+ access-control => [ '127.0.0.0/8 allow', '::1 allow' ]
+ }
+ }
+ }
+
+ concat::fragment { 'unbound glob include':
+ target => $unbound::params::config,
+ content => "include: /etc/unbound/unbound.conf.d/*.conf\n\n",
+ order => 10
+ }
+}
diff --git a/puppet/modules/site_config/manifests/default.pp b/puppet/modules/site_config/manifests/default.pp
new file mode 100644
index 00000000..256de1a1
--- /dev/null
+++ b/puppet/modules/site_config/manifests/default.pp
@@ -0,0 +1,71 @@
+# common things to set up on every node
+class site_config::default {
+ tag 'leap_base'
+
+ $services = hiera('services', [])
+ $domain_hash = hiera('domain')
+ include site_config::params
+ include site_config::setup
+
+ # default class, used by all hosts
+
+ include lsb, git
+
+ # configure sysctl parameters
+ include site_config::sysctl
+
+ # configure ssh and include ssh-keys
+ include site_sshd
+
+ # include classes for special environments
+ # i.e. openstack/aws nodes, vagrant nodes
+
+ # fix dhclient from changing resolver information
+ # facter returns 'true' as string
+ # lint:ignore:quoted_booleans
+ if $::dhcp_enabled == 'true' {
+ # lint:endignore
+ include site_config::dhclient
+ }
+
+ # configure /etc/resolv.conf
+ include site_config::resolvconf
+
+ # configure caching, local resolver
+ include site_config::caching_resolver
+
+ # install/configure syslog and core log rotations
+ include site_config::syslog
+
+ # provide a basic level of quality entropy
+ include haveged
+
+ # install/remove base packages
+ include site_config::packages
+
+ # include basic shorewall config
+ include site_shorewall::defaults
+
+ Package['git'] -> Vcsrepo<||>
+
+ # include basic shell config
+ include site_config::shell
+
+ # set up core leap files and directories
+ include site_config::files
+
+ # remove leftovers from previous deploys
+ include site_config::remove
+
+ if ! member($services, 'mx') {
+ include site_postfix::satellite
+ }
+
+ # if class custom exists, include it.
+ # possibility for users to define custom puppet recipes
+ if defined( '::custom') {
+ include ::custom
+ }
+
+ include site_check_mk::agent
+}
diff --git a/puppet/modules/site_config/manifests/dhclient.pp b/puppet/modules/site_config/manifests/dhclient.pp
new file mode 100644
index 00000000..a1f87d41
--- /dev/null
+++ b/puppet/modules/site_config/manifests/dhclient.pp
@@ -0,0 +1,40 @@
+# Unfortunately, there does not seem to be a way to reload the dhclient.conf
+# config file, or a convenient way to disable the modifications to
+# /etc/resolv.conf. So the following makes the functions involved noops and
+# ships a script to kill and restart dhclient. See the debian bugs:
+# #681698, #712796
+class site_config::dhclient {
+
+
+ include site_config::params
+
+ file { '/usr/local/sbin/reload_dhclient':
+ owner => 0,
+ group => 0,
+ mode => '0755',
+ content => template('site_config/reload_dhclient.erb');
+ }
+
+ exec { 'reload_dhclient':
+ refreshonly => true,
+ command => '/usr/local/sbin/reload_dhclient',
+ before => Class['site_config::resolvconf'],
+ require => File['/usr/local/sbin/reload_dhclient'],
+ }
+
+ file { '/etc/dhcp/dhclient-enter-hooks.d':
+ ensure => directory,
+ mode => '0755',
+ owner => 'root',
+ group => 'root',
+ }
+
+ file { '/etc/dhcp/dhclient-enter-hooks.d/disable_resolvconf':
+ content => 'make_resolv_conf() { : ; } ; set_hostname() { : ; }',
+ mode => '0644',
+ owner => 'root',
+ group => 'root',
+ require => File['/etc/dhcp/dhclient-enter-hooks.d'],
+ notify => Exec['reload_dhclient'];
+ }
+}
diff --git a/puppet/modules/site_config/manifests/files.pp b/puppet/modules/site_config/manifests/files.pp
new file mode 100644
index 00000000..d2ef8a98
--- /dev/null
+++ b/puppet/modules/site_config/manifests/files.pp
@@ -0,0 +1,24 @@
+# set up core leap files and directories
+class site_config::files {
+
+ file {
+ '/srv/leap':
+ ensure => directory,
+ owner => 'root',
+ group => 'root',
+ mode => '0711';
+
+ [ '/etc/leap', '/var/lib/leap']:
+ ensure => directory,
+ owner => 'root',
+ group => 'root',
+ mode => '0755';
+
+ '/var/log/leap':
+ ensure => directory,
+ owner => 'root',
+ group => 'adm',
+ mode => '0750';
+ }
+
+}
diff --git a/puppet/modules/site_config/manifests/hosts.pp b/puppet/modules/site_config/manifests/hosts.pp
new file mode 100644
index 00000000..878b6af0
--- /dev/null
+++ b/puppet/modules/site_config/manifests/hosts.pp
@@ -0,0 +1,44 @@
+class site_config::hosts() {
+ $hosts = hiera('hosts', false)
+
+ # calculate all the hostname aliases that might be used
+ $hostname = hiera('name')
+ $domain_hash = hiera('domain', {})
+ $dns = hiera('dns', {})
+ if $dns['aliases'] == undef {
+ $dns_aliases = []
+ } else {
+ $dns_aliases = $dns['aliases']
+ }
+ $my_hostnames = unique(concat(
+ [$domain_hash['full'], $hostname, $domain_hash['internal']], $dns_aliases
+ ))
+
+ file { '/etc/hostname':
+ ensure => present,
+ content => $hostname
+ }
+
+ exec { "/bin/hostname ${hostname}":
+ subscribe => [ File['/etc/hostname'], File['/etc/hosts'] ],
+ refreshonly => true;
+ }
+
+ # we depend on reliable hostnames from /etc/hosts for the stunnel services
+ # so restart stunnel service when /etc/hosts is modified
+ # because this is done in an early stage, the stunnel module may not
+ # have been deployed and will not be available for overriding, so
+ # this is handled in an unorthodox manner
+ exec { '/etc/init.d/stunnel4 restart':
+ subscribe => File['/etc/hosts'],
+ refreshonly => true,
+ onlyif => 'test -f /etc/init.d/stunnel4';
+ }
+
+ file { '/etc/hosts':
+ content => template('site_config/hosts'),
+ mode => '0644',
+ owner => root,
+ group => root;
+ }
+}
diff --git a/puppet/modules/site_config/manifests/initial_firewall.pp b/puppet/modules/site_config/manifests/initial_firewall.pp
new file mode 100644
index 00000000..93cfb847
--- /dev/null
+++ b/puppet/modules/site_config/manifests/initial_firewall.pp
@@ -0,0 +1,64 @@
+class site_config::initial_firewall {
+
+ # This class is intended to setup an initial firewall, before shorewall is
+ # configured. The purpose of this is for the rare case where shorewall fails
+ # to start, we should not expose services to the public.
+
+ $ssh_config = hiera('ssh')
+ $ssh_port = $ssh_config['port']
+
+ package { 'iptables':
+ ensure => present
+ }
+
+ file {
+ # This firewall enables ssh access, dns lookups and web lookups (for
+ # package installation) but otherwise restricts all outgoing and incoming
+ # ports
+ '/etc/network/ipv4firewall_up.rules':
+ content => template('site_config/ipv4firewall_up.rules.erb'),
+ owner => root,
+ group => 0,
+ mode => '0644';
+
+ # This firewall denys all ipv6 traffic - we will need to change this
+ # when we begin to support ipv6
+ '/etc/network/ipv6firewall_up.rules':
+ content => template('site_config/ipv6firewall_up.rules.erb'),
+ owner => root,
+ group => 0,
+ mode => '0644';
+
+ # Run the iptables-restore in if-pre-up so that the network is locked down
+ # until the correct interfaces and ips are connected
+ '/etc/network/if-pre-up.d/ipv4tables':
+ content => "#!/bin/sh\n/sbin/iptables-restore < /etc/network/ipv4firewall_up.rules\n",
+ owner => root,
+ group => 0,
+ mode => '0744';
+
+ # Same as above for IPv6
+ '/etc/network/if-pre-up.d/ipv6tables':
+ content => "#!/bin/sh\n/sbin/ip6tables-restore < /etc/network/ipv6firewall_up.rules\n",
+ owner => root,
+ group => 0,
+ mode => '0744';
+ }
+
+ # Immediately setup these firewall rules, but only if shorewall is not running
+ exec {
+ 'default_ipv4_firewall':
+ command => '/sbin/iptables-restore < /etc/network/ipv4firewall_up.rules',
+ logoutput => true,
+ unless => 'test -x /etc/init.d/shorewall && /etc/init.d/shorewall status',
+ subscribe => File['/etc/network/ipv4firewall_up.rules'],
+ require => File['/etc/network/ipv4firewall_up.rules'];
+
+ 'default_ipv6_firewall':
+ command => '/sbin/ip6tables-restore < /etc/network/ipv6firewall_up.rules',
+ logoutput => true,
+ unless => 'test -x /etc/init.d/shorewall6 && /etc/init.d/shorewall6 status',
+ subscribe => File['/etc/network/ipv6firewall_up.rules'],
+ require => File['/etc/network/ipv6firewall_up.rules'];
+ }
+}
diff --git a/puppet/modules/site_config/manifests/packages.pp b/puppet/modules/site_config/manifests/packages.pp
new file mode 100644
index 00000000..140189a4
--- /dev/null
+++ b/puppet/modules/site_config/manifests/packages.pp
@@ -0,0 +1,32 @@
+# install default packages and remove unwanted packages
+class site_config::packages {
+
+
+ # base set of packages that we want to have installed everywhere
+ package { [ 'etckeeper', 'screen', 'less', 'ntp' ]:
+ ensure => installed,
+ }
+
+ # base set of packages that we want to remove everywhere
+ package { [
+ 'acpi', 'build-essential',
+ 'cpp', 'cpp-4.6', 'cpp-4.7', 'cpp-4.8', 'cpp-4.9',
+ 'eject', 'ftp',
+ 'g++', 'g++-4.6', 'g++-4.7', 'g++-4.8', 'g++-4.9',
+ 'gcc', 'gcc-4.6', 'gcc-4.7', 'gcc-4.8', 'gcc-4.9',
+ 'laptop-detect', 'libc6-dev', 'libssl-dev', 'lpr', 'make',
+ 'pppconfig', 'pppoe', 'pump', 'qstat',
+ 'samba-common', 'samba-common-bin', 'smbclient',
+ 'tcl8.5', 'tk8.5', 'os-prober', 'unzip', 'xauth', 'x11-common',
+ 'x11-utils', 'xterm' ]:
+ ensure => purged;
+ }
+
+ # leave a few packages installed on local environments
+ # vagrant i.e. needs them for mounting shared folders
+ if $::site_config::params::environment != 'local' {
+ package { [ 'nfs-common', 'nfs-kernel-server', 'rpcbind', 'portmap' ]:
+ ensure => purged;
+ }
+ }
+}
diff --git a/puppet/modules/site_config/manifests/packages/build_essential.pp b/puppet/modules/site_config/manifests/packages/build_essential.pp
new file mode 100644
index 00000000..2b3e13b9
--- /dev/null
+++ b/puppet/modules/site_config/manifests/packages/build_essential.pp
@@ -0,0 +1,28 @@
+#
+# include this whenever you want to ensure build-essential package and related compilers are installed.
+#
+class site_config::packages::build_essential inherits ::site_config::packages {
+
+ # NICKSERVER CODE NOTE: in order to support TLS, libssl-dev must be installed
+ # before EventMachine gem is built/installed.
+ Package[ 'gcc', 'make', 'g++', 'cpp', 'libssl-dev', 'libc6-dev' ] {
+ ensure => present
+ }
+
+ case $::operatingsystemrelease {
+ /^8.*/: {
+ Package[ 'gcc-4.9','g++-4.9', 'cpp-4.9' ] {
+ ensure => present
+ }
+ }
+
+ /^7.*/: {
+ Package[ 'gcc-4.7','g++-4.7', 'cpp-4.7' ] {
+ ensure => present
+ }
+ }
+
+ default: { }
+ }
+
+}
diff --git a/puppet/modules/site_config/manifests/packages/gnutls.pp b/puppet/modules/site_config/manifests/packages/gnutls.pp
new file mode 100644
index 00000000..b1f17480
--- /dev/null
+++ b/puppet/modules/site_config/manifests/packages/gnutls.pp
@@ -0,0 +1,5 @@
+class site_config::packages::gnutls {
+
+ package { 'gnutls-bin': ensure => installed }
+
+}
diff --git a/puppet/modules/site_config/manifests/params.pp b/puppet/modules/site_config/manifests/params.pp
new file mode 100644
index 00000000..012b3ce0
--- /dev/null
+++ b/puppet/modules/site_config/manifests/params.pp
@@ -0,0 +1,35 @@
+class site_config::params {
+
+ $ip_address = hiera('ip_address')
+ $ip_address_interface = getvar("interface_${ip_address}")
+ $ec2_local_ipv4_interface = getvar("interface_${::ec2_local_ipv4}")
+ $environment = hiera('environment', undef)
+
+
+ if $environment == 'local' {
+ $interface = 'eth1'
+ include site_config::packages::build_essential
+ }
+ elsif hiera('interface','') != '' {
+ $interface = hiera('interface')
+ }
+ elsif $ip_address_interface != '' {
+ $interface = $ip_address_interface
+ }
+ elsif $ec2_local_ipv4_interface != '' {
+ $interface = $ec2_local_ipv4_interface
+ }
+ elsif $::interfaces =~ /eth0/ {
+ $interface = 'eth0'
+ }
+ else {
+ fail("unable to determine a valid interface, please set a valid interface for this node in nodes/${::hostname}.json")
+ }
+
+ $ca_name = 'leap_ca'
+ $client_ca_name = 'leap_client_ca'
+ $ca_bundle_name = 'leap_ca_bundle'
+ $cert_name = 'leap'
+ $commercial_ca_name = 'leap_commercial_ca'
+ $commercial_cert_name = 'leap_commercial'
+}
diff --git a/puppet/modules/site_config/manifests/remove.pp b/puppet/modules/site_config/manifests/remove.pp
new file mode 100644
index 00000000..443df9c2
--- /dev/null
+++ b/puppet/modules/site_config/manifests/remove.pp
@@ -0,0 +1,11 @@
+# remove leftovers from previous deploys
+class site_config::remove {
+ include site_config::remove::files
+
+ case $::operatingsystemrelease {
+ /^8.*/: {
+ include site_config::remove::jessie
+ }
+ default: { }
+ }
+}
diff --git a/puppet/modules/site_config/manifests/remove/bigcouch.pp b/puppet/modules/site_config/manifests/remove/bigcouch.pp
new file mode 100644
index 00000000..3535c3c1
--- /dev/null
+++ b/puppet/modules/site_config/manifests/remove/bigcouch.pp
@@ -0,0 +1,42 @@
+# remove bigcouch leftovers from previous installations
+class site_config::remove::bigcouch {
+
+ # Don't use check_mk logwatch to watch bigcouch logs anymore
+ # see https://leap.se/code/issues/7375 for more details
+ file { '/etc/check_mk/logwatch.d/bigcouch.cfg':
+ ensure => absent,
+ notify => [
+ Exec['remove_bigcouch_logwatch_stateline']
+ ]
+ }
+
+ exec { 'remove_bigcouch_logwatch_stateline':
+ command => "sed -i '/bigcouch.log/d' /etc/check_mk/logwatch.state",
+ refreshonly => true,
+ }
+
+ cron { 'compact_all_shards':
+ ensure => absent
+ }
+
+
+ exec { 'kill_bigcouch_stunnel_procs':
+ refreshonly => true,
+ command => '/usr/bin/pkill -f "/usr/bin/stunnel4 /etc/stunnel/(ednp|epmd)_server.conf"'
+ }
+
+ # 'tidy' doesn't notify other resources, so we need to use file here instead
+ # see https://tickets.puppetlabs.com/browse/PUP-6021
+ file {
+ [ '/etc/stunnel/ednp_server.conf', '/etc/stunnel/epmd_server.conf']:
+ ensure => absent,
+ # notifying Service[stunnel] doesn't work here because the config
+ # files contain the pid of the procs to stop/start.
+ # If we remove the config, and restart stunnel then it will only
+ # stop/start the procs for which config files are found and the stale
+ # service will continue to run.
+ # So we simply kill them.
+ notify => Exec['kill_bigcouch_stunnel_procs']
+ }
+
+}
diff --git a/puppet/modules/site_config/manifests/remove/files.pp b/puppet/modules/site_config/manifests/remove/files.pp
new file mode 100644
index 00000000..41d6462e
--- /dev/null
+++ b/puppet/modules/site_config/manifests/remove/files.pp
@@ -0,0 +1,56 @@
+#
+# Sometimes when we upgrade the platform, we need to ensure that files that
+# the platform previously created will get removed.
+#
+# These file removals don't need to be kept forever: we only need to remove
+# files that are present in the prior platform release.
+#
+# We can assume that the every node is upgraded from the previous platform
+# release.
+#
+
+class site_config::remove::files {
+
+ # Platform 0.8 removals
+ tidy {
+ '/etc/default/leap_mx':;
+ '/etc/logrotate.d/mx':;
+ '/etc/rsyslog.d/50-mx.conf':;
+ '/etc/apt/preferences.d/openvpn':;
+ '/etc/apt/sources.list.d/secondary.list.disabled.list':;
+ }
+
+ #
+ # Platform 0.7 removals
+ #
+
+ tidy {
+ '/etc/rsyslog.d/99-tapicero.conf':;
+ '/etc/rsyslog.d/01-webapp.conf':;
+ '/etc/rsyslog.d/50-stunnel.conf':;
+ '/etc/logrotate.d/stunnel':;
+ '/var/log/stunnel4/stunnel.log':;
+ 'leap_mx':
+ path => '/var/log/',
+ recurse => true,
+ matches => ['leap_mx*', 'mx.log.[1-5]', 'mx.log.[6-9](.gz)?',
+ 'mx.log.[0-9][0-9](.gz)?'];
+ '/srv/leap/webapp/public/provider.json':;
+ '/srv/leap/couchdb/designs/tmp_users':
+ recurse => true,
+ rmdirs => true;
+ '/etc/leap/soledad-server.conf':;
+ '/var/log/leap/openvpn.log':;
+ '/etc/rsyslog.d/50-openvpn.conf':;
+ }
+
+ # leax-mx logged to /var/log/leap_mx.log in the past
+ # we need to use a dumb exec here because file_line doesn't
+ # allow removing lines that match a regex in the current version
+ # of stdlib, see https://tickets.puppetlabs.com/browse/MODULES-1903
+ exec { 'rm_old_leap_mx_log_destination':
+ command => "/bin/sed -i '/leap_mx.log/d' /etc/check_mk/logwatch.state",
+ onlyif => "/bin/grep -qe 'leap_mx.log' /etc/check_mk/logwatch.state"
+ }
+
+}
diff --git a/puppet/modules/site_config/manifests/remove/jessie.pp b/puppet/modules/site_config/manifests/remove/jessie.pp
new file mode 100644
index 00000000..e9497baf
--- /dev/null
+++ b/puppet/modules/site_config/manifests/remove/jessie.pp
@@ -0,0 +1,14 @@
+# remove possible leftovers after upgrading from wheezy to jessie
+class site_config::remove::jessie {
+
+ tidy {
+ '/etc/apt/preferences.d/rsyslog_anon_depends':
+ notify => Exec['apt_updated'];
+ }
+
+ apt::preferences_snippet {
+ [ 'facter', 'obfsproxy', 'python-twisted', 'unbound' ]:
+ ensure => absent;
+ }
+
+}
diff --git a/puppet/modules/site_config/manifests/remove/monitoring.pp b/puppet/modules/site_config/manifests/remove/monitoring.pp
new file mode 100644
index 00000000..18e2949b
--- /dev/null
+++ b/puppet/modules/site_config/manifests/remove/monitoring.pp
@@ -0,0 +1,13 @@
+# remove leftovers on monitoring nodes
+class site_config::remove::monitoring {
+
+ # Remove check_mk loggwatch spoolfiles for
+ # tapicero and bigcouch
+ tidy {
+ 'remove_logwatch_spoolfiles':
+ path => '/var/lib/check_mk/logwatch',
+ recurse => true,
+ matches => [ '*tapicero.log', '*bigcouch.log'];
+ }
+
+}
diff --git a/puppet/modules/site_config/manifests/remove/tapicero.pp b/puppet/modules/site_config/manifests/remove/tapicero.pp
new file mode 100644
index 00000000..07c3c6c6
--- /dev/null
+++ b/puppet/modules/site_config/manifests/remove/tapicero.pp
@@ -0,0 +1,72 @@
+# remove tapicero leftovers from previous deploys on couchdb nodes
+class site_config::remove::tapicero {
+
+ ensure_packages('curl')
+
+ # remove tapicero couchdb user
+ $couchdb_config = hiera('couch')
+ $couchdb_mode = $couchdb_config['mode']
+
+ if $couchdb_mode == 'multimaster'
+ {
+ $port = 5986
+ } else {
+ $port = 5984
+ }
+
+ exec { 'remove_couchdb_user':
+ onlyif => "/usr/bin/curl -s 127.0.0.1:${port}/_users/org.couchdb.user:tapicero | grep -qv 'not_found'",
+ command => "/usr/local/bin/couch-doc-update --host 127.0.0.1:${port} --db _users --id org.couchdb.user:tapicero --delete",
+ require => Package['curl']
+ }
+
+
+ exec { 'kill_tapicero':
+ onlyif => '/usr/bin/test -s /var/run/tapicero.pid',
+ command => '/usr/bin/pkill --pidfile /var/run/tapicero.pid'
+ }
+
+ user { 'tapicero':
+ ensure => absent;
+ }
+
+ group { 'tapicero':
+ ensure => absent,
+ require => User['tapicero'];
+ }
+
+ tidy {
+ '/srv/leap/tapicero':
+ recurse => true,
+ require => [ Exec['kill_tapicero'] ];
+ '/var/lib/leap/tapicero':
+ require => [ Exec['kill_tapicero'] ];
+ '/var/run/tapicero':
+ require => [ Exec['kill_tapicero'] ];
+ '/etc/leap/tapicero.yaml':
+ require => [ Exec['kill_tapicero'] ];
+ '/etc/init.d/tapicero':
+ require => [ Exec['kill_tapicero'] ];
+ 'tapicero_logs':
+ path => '/var/log/leap',
+ recurse => true,
+ matches => 'tapicero*',
+ require => [ Exec['kill_tapicero'] ];
+ '/etc/check_mk/logwatch.d/tapicero.cfg':;
+ }
+
+ # remove local nagios plugin checks via mrpe
+ augeas {
+ 'Tapicero_Procs':
+ incl => '/etc/check_mk/mrpe.cfg',
+ lens => 'Spacevars.lns',
+ changes => 'rm /files/etc/check_mk/mrpe.cfg/Tapicero_Procs',
+ require => File['/etc/check_mk/mrpe.cfg'];
+ 'Tapicero_Heartbeat':
+ incl => '/etc/check_mk/mrpe.cfg',
+ lens => 'Spacevars.lns',
+ changes => 'rm Tapicero_Heartbeat',
+ require => File['/etc/check_mk/mrpe.cfg'];
+ }
+
+}
diff --git a/puppet/modules/site_config/manifests/remove/webapp.pp b/puppet/modules/site_config/manifests/remove/webapp.pp
new file mode 100644
index 00000000..58f59815
--- /dev/null
+++ b/puppet/modules/site_config/manifests/remove/webapp.pp
@@ -0,0 +1,7 @@
+# remove leftovers on webapp nodes
+class site_config::remove::webapp {
+ tidy {
+ '/etc/apache/sites-enabled/leap_webapp.conf':
+ notify => Service['apache'];
+ }
+}
diff --git a/puppet/modules/site_config/manifests/resolvconf.pp b/puppet/modules/site_config/manifests/resolvconf.pp
new file mode 100644
index 00000000..09f0b405
--- /dev/null
+++ b/puppet/modules/site_config/manifests/resolvconf.pp
@@ -0,0 +1,14 @@
+class site_config::resolvconf {
+
+ $domain_public = $site_config::default::domain_hash['full_suffix']
+
+ class { '::resolvconf':
+ domain => $domain_public,
+ search => $domain_public,
+ nameservers => [
+ '127.0.0.1 # local caching-only, unbound',
+ '85.214.20.141 # Digitalcourage, a german privacy organisation: (https://en.wikipedia.org/wiki/Digitalcourage)',
+ '172.81.176.146 # OpenNIC (https://servers.opennicproject.org/edit.php?srv=ns1.tor.ca.dns.opennic.glue)'
+ ]
+ }
+}
diff --git a/puppet/modules/site_config/manifests/ruby.pp b/puppet/modules/site_config/manifests/ruby.pp
new file mode 100644
index 00000000..5c13233d
--- /dev/null
+++ b/puppet/modules/site_config/manifests/ruby.pp
@@ -0,0 +1,8 @@
+# install ruby, rubygems and bundler
+# configure ruby settings common to all servers
+class site_config::ruby {
+ Class[Ruby] -> Class[rubygems] -> Class[bundler::install]
+ class { '::ruby': }
+ class { 'bundler::install': install_method => 'package' }
+ include rubygems
+}
diff --git a/puppet/modules/site_config/manifests/ruby/dev.pp b/puppet/modules/site_config/manifests/ruby/dev.pp
new file mode 100644
index 00000000..2b0b106d
--- /dev/null
+++ b/puppet/modules/site_config/manifests/ruby/dev.pp
@@ -0,0 +1,8 @@
+# install ruby dev packages needed for building some gems
+class site_config::ruby::dev {
+ include site_config::ruby
+ include ::ruby::devel
+
+ # building gems locally probably requires build-essential and gcc:
+ include site_config::packages::build_essential
+}
diff --git a/puppet/modules/site_config/manifests/setup.pp b/puppet/modules/site_config/manifests/setup.pp
new file mode 100644
index 00000000..82dfe76d
--- /dev/null
+++ b/puppet/modules/site_config/manifests/setup.pp
@@ -0,0 +1,50 @@
+# common things to set up on every node
+# leftover from the past, where we did two puppetruns
+# after another. We should consolidate this into site_config::default
+# in the future.
+class site_config::setup {
+ tag 'leap_base'
+
+ #
+ # this is applied before each run of site.pp
+ #
+
+ Exec { path => '/usr/bin:/usr/sbin/:/bin:/sbin:/usr/local/bin:/usr/local/sbin' }
+
+ include site_config::params
+
+ include concat::setup
+ include stdlib
+
+ # configure /etc/hosts
+ class { 'site_config::hosts': }
+
+ include site_config::initial_firewall
+
+ include site_apt
+
+ package { 'facter':
+ ensure => latest
+ }
+
+ # if squid_deb_proxy_client is set to true, install and configure
+ # squid_deb_proxy_client for apt caching
+ if hiera('squid_deb_proxy_client', false) {
+ include site_squid_deb_proxy::client
+ }
+
+ # shorewall is installed/half-configured during setup.pp (Bug #3871)
+ # we need to include shorewall::interface{eth0} in setup.pp so
+ # packages can be installed during main puppetrun, even before shorewall
+ # is configured completly
+ if ( $::site_config::params::environment == 'local' ) {
+ include site_config::vagrant
+ }
+
+ # if class site_custom::setup exists, include it.
+ # possibility for users to define custom puppet recipes
+ if defined( '::site_custom::setup') {
+ include ::site_custom::setup
+ }
+
+}
diff --git a/puppet/modules/site_config/manifests/shell.pp b/puppet/modules/site_config/manifests/shell.pp
new file mode 100644
index 00000000..5b8c025d
--- /dev/null
+++ b/puppet/modules/site_config/manifests/shell.pp
@@ -0,0 +1,22 @@
+class site_config::shell {
+
+ file {
+ '/etc/profile.d/leap_path.sh':
+ content => 'PATH=$PATH:/srv/leap/bin',
+ mode => '0644',
+ owner => root,
+ group => root;
+ }
+
+ ##
+ ## XTERM TITLE
+ ##
+
+ file { '/etc/profile.d/xterm-title.sh':
+ source => 'puppet:///modules/site_config/xterm-title.sh',
+ owner => root,
+ group => 0,
+ mode => '0644';
+ }
+
+}
diff --git a/puppet/modules/site_config/manifests/slow.pp b/puppet/modules/site_config/manifests/slow.pp
new file mode 100644
index 00000000..8e9b7035
--- /dev/null
+++ b/puppet/modules/site_config/manifests/slow.pp
@@ -0,0 +1,10 @@
+# this class is run by default, but can be excluded
+# for testing purposes by calling "leap deploy" with
+# the "--fast" parameter
+class site_config::slow {
+ tag 'leap_slow'
+
+ include site_config::default
+ include apt::update
+ class { 'site_apt::dist_upgrade': }
+}
diff --git a/puppet/modules/site_config/manifests/sysctl.pp b/puppet/modules/site_config/manifests/sysctl.pp
new file mode 100644
index 00000000..99f75123
--- /dev/null
+++ b/puppet/modules/site_config/manifests/sysctl.pp
@@ -0,0 +1,8 @@
+class site_config::sysctl {
+
+ sysctl::config {
+ 'net.ipv4.ip_nonlocal_bind':
+ value => 1,
+ comment => 'Allow applications to bind to an address when link is down (see https://leap.se/code/issues/4506)'
+ }
+}
diff --git a/puppet/modules/site_config/manifests/syslog.pp b/puppet/modules/site_config/manifests/syslog.pp
new file mode 100644
index 00000000..591e0601
--- /dev/null
+++ b/puppet/modules/site_config/manifests/syslog.pp
@@ -0,0 +1,62 @@
+# configure rsyslog on all nodes
+class site_config::syslog {
+
+ # only pin rsyslog packages to backports on wheezy
+ case $::operatingsystemrelease {
+ /^7.*/: {
+ include ::site_apt::preferences::rsyslog
+ }
+ # on jessie+ systems, systemd and journald are enabled,
+ # and journald logs IP addresses, so we need to disable
+ # it until a solution is found, (#7863):
+ # https://github.com/systemd/systemd/issues/2447
+ default: {
+ include ::journald
+ augeas {
+ 'disable_journald':
+ incl => '/etc/systemd/journald.conf',
+ lens => 'Puppet.lns',
+ changes => 'set /files/etc/systemd/journald.conf/Journal/Storage \'none\'',
+ notify => Service['systemd-journald'];
+ }
+ }
+ }
+
+ class { '::rsyslog::client':
+ log_remote => false,
+ log_local => true,
+ custom_config => 'site_rsyslog/client.conf.erb'
+ }
+
+ rsyslog::snippet { '00-anonymize_logs':
+ content => '$ModLoad mmanon
+action(type="mmanon" ipv4.bits="32" mode="rewrite")'
+ }
+
+ augeas {
+ 'logrotate_leap_deploy':
+ context => '/files/etc/logrotate.d/leap_deploy/rule',
+ changes => [
+ 'set file /var/log/leap/deploy.log',
+ 'set rotate 5',
+ 'set size 1M',
+ 'set compress compress',
+ 'set missingok missingok',
+ 'set copytruncate copytruncate' ];
+
+ # NOTE:
+ # the puppet_command script requires the option delaycompress
+ # be set on the summary log file.
+
+ 'logrotate_leap_deploy_summary':
+ context => '/files/etc/logrotate.d/leap_deploy_summary/rule',
+ changes => [
+ 'set file /var/log/leap/deploy-summary.log',
+ 'set rotate 5',
+ 'set size 100k',
+ 'set delaycompress delaycompress',
+ 'set compress compress',
+ 'set missingok missingok',
+ 'set copytruncate copytruncate' ]
+ }
+}
diff --git a/puppet/modules/site_config/manifests/vagrant.pp b/puppet/modules/site_config/manifests/vagrant.pp
new file mode 100644
index 00000000..8f50b305
--- /dev/null
+++ b/puppet/modules/site_config/manifests/vagrant.pp
@@ -0,0 +1,11 @@
+class site_config::vagrant {
+ # class for vagrant nodes
+
+ include site_shorewall::defaults
+ # eth0 on vagrant nodes is the uplink if
+ shorewall::interface { 'eth0':
+ zone => 'net',
+ options => 'tcpflags,blacklist,nosmurfs';
+ }
+
+}
diff --git a/puppet/modules/site_config/manifests/x509/ca.pp b/puppet/modules/site_config/manifests/x509/ca.pp
new file mode 100644
index 00000000..2880ecaf
--- /dev/null
+++ b/puppet/modules/site_config/manifests/x509/ca.pp
@@ -0,0 +1,11 @@
+class site_config::x509::ca {
+
+ include ::site_config::params
+
+ $x509 = hiera('x509')
+ $ca = $x509['ca_cert']
+
+ x509::ca { $site_config::params::ca_name:
+ content => $ca
+ }
+}
diff --git a/puppet/modules/site_config/manifests/x509/ca_bundle.pp b/puppet/modules/site_config/manifests/x509/ca_bundle.pp
new file mode 100644
index 00000000..5808e29e
--- /dev/null
+++ b/puppet/modules/site_config/manifests/x509/ca_bundle.pp
@@ -0,0 +1,17 @@
+class site_config::x509::ca_bundle {
+
+ # CA bundle -- we want to have the possibility of allowing multiple CAs.
+ # For now, the reason is to transition to using client CA. In the future,
+ # we will want to be able to smoothly phase out one CA and phase in another.
+ # I tried "--capath" for this, but it did not work.
+
+ include ::site_config::params
+
+ $x509 = hiera('x509')
+ $ca = $x509['ca_cert']
+ $client_ca = $x509['client_ca_cert']
+
+ x509::ca { $site_config::params::ca_bundle_name:
+ content => "${ca}${client_ca}"
+ }
+}
diff --git a/puppet/modules/site_config/manifests/x509/cert.pp b/puppet/modules/site_config/manifests/x509/cert.pp
new file mode 100644
index 00000000..7e5a36b9
--- /dev/null
+++ b/puppet/modules/site_config/manifests/x509/cert.pp
@@ -0,0 +1,12 @@
+class site_config::x509::cert {
+
+ include ::site_config::params
+
+ $x509 = hiera('x509')
+ $cert = $x509['cert']
+
+ x509::cert { $site_config::params::cert_name:
+ content => $cert
+ }
+
+}
diff --git a/puppet/modules/site_config/manifests/x509/client_ca/ca.pp b/puppet/modules/site_config/manifests/x509/client_ca/ca.pp
new file mode 100644
index 00000000..3fbafa98
--- /dev/null
+++ b/puppet/modules/site_config/manifests/x509/client_ca/ca.pp
@@ -0,0 +1,16 @@
+class site_config::x509::client_ca::ca {
+
+ ##
+ ## This is for the special CA that is used exclusively for generating
+ ## client certificates by the webapp.
+ ##
+
+ include ::site_config::params
+
+ $x509 = hiera('x509')
+ $cert = $x509['client_ca_cert']
+
+ x509::ca { $site_config::params::client_ca_name:
+ content => $cert
+ }
+}
diff --git a/puppet/modules/site_config/manifests/x509/client_ca/key.pp b/puppet/modules/site_config/manifests/x509/client_ca/key.pp
new file mode 100644
index 00000000..0b537e76
--- /dev/null
+++ b/puppet/modules/site_config/manifests/x509/client_ca/key.pp
@@ -0,0 +1,16 @@
+class site_config::x509::client_ca::key {
+
+ ##
+ ## This is for the special CA that is used exclusively for generating
+ ## client certificates by the webapp.
+ ##
+
+ include ::site_config::params
+
+ $x509 = hiera('x509')
+ $key = $x509['client_ca_key']
+
+ x509::key { $site_config::params::client_ca_name:
+ content => $key
+ }
+}
diff --git a/puppet/modules/site_config/manifests/x509/commercial/ca.pp b/puppet/modules/site_config/manifests/x509/commercial/ca.pp
new file mode 100644
index 00000000..c76a9dbb
--- /dev/null
+++ b/puppet/modules/site_config/manifests/x509/commercial/ca.pp
@@ -0,0 +1,11 @@
+class site_config::x509::commercial::ca {
+
+ include ::site_config::params
+
+ $x509 = hiera('x509')
+ $ca = $x509['commercial_ca_cert']
+
+ x509::ca { $site_config::params::commercial_ca_name:
+ content => $ca
+ }
+}
diff --git a/puppet/modules/site_config/manifests/x509/commercial/cert.pp b/puppet/modules/site_config/manifests/x509/commercial/cert.pp
new file mode 100644
index 00000000..9dd6ffcd
--- /dev/null
+++ b/puppet/modules/site_config/manifests/x509/commercial/cert.pp
@@ -0,0 +1,15 @@
+class site_config::x509::commercial::cert {
+
+ include ::site_config::params
+
+ $x509 = hiera('x509')
+ $cert = $x509['commercial_cert']
+ $ca = $x509['commercial_ca_cert']
+
+ $cafile = "${cert}\n${ca}"
+
+ x509::cert { $site_config::params::commercial_cert_name:
+ content => $cafile
+ }
+
+}
diff --git a/puppet/modules/site_config/manifests/x509/commercial/key.pp b/puppet/modules/site_config/manifests/x509/commercial/key.pp
new file mode 100644
index 00000000..2be439fd
--- /dev/null
+++ b/puppet/modules/site_config/manifests/x509/commercial/key.pp
@@ -0,0 +1,11 @@
+class site_config::x509::commercial::key {
+
+ include ::site_config::params
+
+ $x509 = hiera('x509')
+ $key = $x509['commercial_key']
+
+ x509::key { $site_config::params::commercial_cert_name:
+ content => $key
+ }
+}
diff --git a/puppet/modules/site_config/manifests/x509/key.pp b/puppet/modules/site_config/manifests/x509/key.pp
new file mode 100644
index 00000000..448dc6a6
--- /dev/null
+++ b/puppet/modules/site_config/manifests/x509/key.pp
@@ -0,0 +1,11 @@
+class site_config::x509::key {
+
+ include ::site_config::params
+
+ $x509 = hiera('x509')
+ $key = $x509['key']
+
+ x509::key { $site_config::params::cert_name:
+ content => $key
+ }
+}
diff --git a/puppet/modules/site_config/templates/hosts b/puppet/modules/site_config/templates/hosts
new file mode 100644
index 00000000..d62cbc3f
--- /dev/null
+++ b/puppet/modules/site_config/templates/hosts
@@ -0,0 +1,19 @@
+# This file is managed by puppet, any changes will be overwritten!
+
+127.0.0.1 localhost
+127.0.1.1 <%= @my_hostnames.join(' ') %>
+
+<%- if @hosts then -%>
+<% @hosts.keys.sort.each do |name| -%>
+<%- props = @hosts[name] -%>
+<%- aliases = props["aliases"] ? props["aliases"].join(' ') : nil -%>
+<%= [props["ip_address"], props["domain_full"], props["domain_internal"], aliases, name].compact.uniq.join(' ') %>
+<% end -%>
+<% end -%>
+
+# The following lines are desirable for IPv6 capable hosts
+::1 ip6-localhost ip6-loopback
+fe00::0 ip6-localnet
+ff00::0 ip6-mcastprefix
+ff02::1 ip6-allnodes
+ff02::2 ip6-allrouters
diff --git a/puppet/modules/site_config/templates/ipv4firewall_up.rules.erb b/puppet/modules/site_config/templates/ipv4firewall_up.rules.erb
new file mode 100644
index 00000000..b0c2b7ad
--- /dev/null
+++ b/puppet/modules/site_config/templates/ipv4firewall_up.rules.erb
@@ -0,0 +1,14 @@
+# Generated by iptables-save v1.4.14 on Tue Aug 20 14:40:40 2013
+*filter
+:INPUT DROP [0:0]
+:FORWARD DROP [0:0]
+:OUTPUT ACCEPT [0:0]
+-A INPUT -i lo -j ACCEPT
+-A INPUT -m state --state RELATED,ESTABLISHED -j ACCEPT
+-A INPUT -p tcp -m state --state NEW,ESTABLISHED --dport 22 -j ACCEPT
+-A INPUT -p tcp -m state --state NEW,ESTABLISHED --dport <%= @ssh_port %> -j ACCEPT
+-A INPUT -p udp -m udp --sport 53 -j ACCEPT
+-A INPUT -p icmp -m icmp --icmp-type 8 -m state --state NEW,RELATED,ESTABLISHED -j ACCEPT
+-A INPUT -p icmp -m icmp --icmp-type 0 -m state --state RELATED,ESTABLISHED -j ACCEPT
+-A INPUT -m limit --limit 5/min -j LOG --log-prefix "iptables denied: " --log-level 7
+COMMIT
diff --git a/puppet/modules/site_config/templates/ipv6firewall_up.rules.erb b/puppet/modules/site_config/templates/ipv6firewall_up.rules.erb
new file mode 100644
index 00000000..e2c92524
--- /dev/null
+++ b/puppet/modules/site_config/templates/ipv6firewall_up.rules.erb
@@ -0,0 +1,8 @@
+# Generated by ip6tables-save v1.4.20 on Tue Aug 20 12:19:43 2013
+*filter
+:INPUT DROP [24:1980]
+:FORWARD DROP [0:0]
+:OUTPUT DROP [14:8030]
+-A OUTPUT -j REJECT --reject-with icmp6-port-unreachable
+COMMIT
+# Completed on Tue Aug 20 12:19:43 2013
diff --git a/puppet/modules/site_config/templates/reload_dhclient.erb b/puppet/modules/site_config/templates/reload_dhclient.erb
new file mode 100644
index 00000000..075828b7
--- /dev/null
+++ b/puppet/modules/site_config/templates/reload_dhclient.erb
@@ -0,0 +1,13 @@
+#!/bin/sh
+
+# Get the PID
+PIDFILE='/var/run/dhclient.<%= scope.lookupvar('site_config::params::interface') %>.pid'
+
+# Capture how dhclient is currently running so we can relaunch it
+dhclient=`/bin/ps --no-headers --pid $(cat $PIDFILE) -f | /usr/bin/awk '{for(i=8;i<=NF;++i) printf("%s ", $i) }'`
+
+# Kill the current dhclient
+/usr/bin/pkill -F $PIDFILE
+
+# Restart dhclient with the arguments it had previously
+$dhclient
diff --git a/puppet/modules/site_couchdb/files/couchdb_scripts_defaults.conf b/puppet/modules/site_couchdb/files/couchdb_scripts_defaults.conf
new file mode 100644
index 00000000..1565e1a1
--- /dev/null
+++ b/puppet/modules/site_couchdb/files/couchdb_scripts_defaults.conf
@@ -0,0 +1,4 @@
+# space separated list of excluded DBs for dumping
+# sourced by couchdb_dumpall.sh
+EXCLUDE_DBS='sessions tokens'
+
diff --git a/puppet/modules/site_couchdb/files/designs/Readme.md b/puppet/modules/site_couchdb/files/designs/Readme.md
new file mode 100644
index 00000000..983f629f
--- /dev/null
+++ b/puppet/modules/site_couchdb/files/designs/Readme.md
@@ -0,0 +1,14 @@
+This directory contains design documents for the leap platform.
+
+They need to be uploaded to the couch database in order to query the
+database in certain ways.
+
+Each subdirectory corresponds to a couch database and contains the design
+documents that need to be added to that particular database.
+
+Here's an example of how to upload the users design document:
+```bash
+HOST="http://localhost:5984"
+curl -X PUT $HOST/users/_design/User --data @users/User.json
+
+```
diff --git a/puppet/modules/site_couchdb/files/designs/customers/Customer.json b/puppet/modules/site_couchdb/files/designs/customers/Customer.json
new file mode 100644
index 00000000..1b4bbddd
--- /dev/null
+++ b/puppet/modules/site_couchdb/files/designs/customers/Customer.json
@@ -0,0 +1,18 @@
+{
+ "_id": "_design/Customer",
+ "language": "javascript",
+ "views": {
+ "by_user_id": {
+ "map": " function(doc) {\n if ((doc['type'] == 'Customer') && (doc['user_id'] != null)) {\n emit(doc['user_id'], 1);\n }\n }\n",
+ "reduce": "_sum"
+ },
+ "by_braintree_customer_id": {
+ "map": " function(doc) {\n if ((doc['type'] == 'Customer') && (doc['braintree_customer_id'] != null)) {\n emit(doc['braintree_customer_id'], 1);\n }\n }\n",
+ "reduce": "_sum"
+ },
+ "all": {
+ "map": " function(doc) {\n if (doc['type'] == 'Customer') {\n emit(doc._id, null);\n }\n }\n"
+ }
+ },
+ "couchrest-hash": "688c401ec0230b75625c176a88fc4a02"
+} \ No newline at end of file
diff --git a/puppet/modules/site_couchdb/files/designs/identities/Identity.json b/puppet/modules/site_couchdb/files/designs/identities/Identity.json
new file mode 100644
index 00000000..b1c567c1
--- /dev/null
+++ b/puppet/modules/site_couchdb/files/designs/identities/Identity.json
@@ -0,0 +1,34 @@
+{
+ "_id": "_design/Identity",
+ "language": "javascript",
+ "views": {
+ "by_address_and_destination": {
+ "map": " function(doc) {\n if ((doc['type'] == 'Identity') && (doc['address'] != null) && (doc['destination'] != null)) {\n emit([doc['address'], doc['destination']], 1);\n }\n }\n",
+ "reduce": "_sum"
+ },
+ "all": {
+ "map": " function(doc) {\n if (doc['type'] == 'Identity') {\n emit(doc._id, null);\n }\n }\n"
+ },
+ "cert_fingerprints_by_expiry": {
+ "map": "function(doc) {\n if (doc.type != 'Identity') {\n return;\n }\n if (typeof doc.cert_fingerprints === \"object\") {\n for (fp in doc.cert_fingerprints) {\n if (doc.cert_fingerprints.hasOwnProperty(fp)) {\n emit(doc.cert_fingerprints[fp], fp);\n }\n }\n }\n}\n"
+ },
+ "cert_expiry_by_fingerprint": {
+ "map": "function(doc) {\n if (doc.type != 'Identity') {\n return;\n }\n if (typeof doc.cert_fingerprints === \"object\") {\n for (fp in doc.cert_fingerprints) {\n if (doc.cert_fingerprints.hasOwnProperty(fp)) {\n emit(fp, doc.cert_fingerprints[fp]);\n }\n }\n }\n}\n"
+ },
+ "disabled": {
+ "map": "function(doc) {\n if (doc.type != 'Identity') {\n return;\n }\n if (typeof doc.user_id === \"undefined\") {\n emit(doc._id, 1);\n }\n}\n"
+ },
+ "pgp_key_by_email": {
+ "map": "function(doc) {\n if (doc.type != 'Identity') {\n return;\n }\n if (typeof doc.keys === \"object\") {\n emit(doc.address, doc.keys[\"pgp\"]);\n }\n}\n"
+ },
+ "by_user_id": {
+ "map": " function(doc) {\n if ((doc['type'] == 'Identity') && (doc['user_id'] != null)) {\n emit(doc['user_id'], 1);\n }\n }\n",
+ "reduce": "_sum"
+ },
+ "by_address": {
+ "map": " function(doc) {\n if ((doc['type'] == 'Identity') && (doc['address'] != null)) {\n emit(doc['address'], 1);\n }\n }\n",
+ "reduce": "_sum"
+ }
+ },
+ "couchrest-hash": "4a774c3f56122b655a314670403b27e2"
+} \ No newline at end of file
diff --git a/puppet/modules/site_couchdb/files/designs/invite_codes/InviteCode.json b/puppet/modules/site_couchdb/files/designs/invite_codes/InviteCode.json
new file mode 100644
index 00000000..006c1ea1
--- /dev/null
+++ b/puppet/modules/site_couchdb/files/designs/invite_codes/InviteCode.json
@@ -0,0 +1,22 @@
+{
+ "_id": "_design/InviteCode",
+ "language": "javascript",
+ "views": {
+ "by__id": {
+ "map": " function(doc) {\n if ((doc['type'] == 'InviteCode') && (doc['_id'] != null)) {\n emit(doc['_id'], 1);\n }\n }\n",
+ "reduce": "_sum"
+ },
+ "by_invite_code": {
+ "map": " function(doc) {\n if ((doc['type'] == 'InviteCode') && (doc['invite_code'] != null)) {\n emit(doc['invite_code'], 1);\n }\n }\n",
+ "reduce": "_sum"
+ },
+ "by_invite_count": {
+ "map": " function(doc) {\n if ((doc['type'] == 'InviteCode') && (doc['invite_count'] != null)) {\n emit(doc['invite_count'], 1);\n }\n }\n",
+ "reduce": "_sum"
+ },
+ "all": {
+ "map": " function(doc) {\n if (doc['type'] == 'InviteCode') {\n emit(doc._id, null);\n }\n }\n"
+ }
+ },
+ "couchrest-hash": "83fb8f504520b4a9c7ddbb7928cd0ce3"
+} \ No newline at end of file
diff --git a/puppet/modules/site_couchdb/files/designs/messages/Message.json b/puppet/modules/site_couchdb/files/designs/messages/Message.json
new file mode 100644
index 00000000..6a48fc4d
--- /dev/null
+++ b/puppet/modules/site_couchdb/files/designs/messages/Message.json
@@ -0,0 +1,18 @@
+{
+ "_id": "_design/Message",
+ "language": "javascript",
+ "views": {
+ "by_user_ids_to_show": {
+ "map": "function (doc) {\n if (doc.type === 'Message' && doc.user_ids_to_show && Array.isArray(doc.user_ids_to_show)) {\n doc.user_ids_to_show.forEach(function (userId) {\n emit(userId, 1);\n });\n }\n}\n",
+ "reduce": " function(key, values, rereduce) {\n return sum(values);\n }\n"
+ },
+ "by_user_ids_to_show_and_created_at": {
+ "map": "// not using at moment\n// call with something like Message.by_user_ids_to_show_and_created_at.startkey([user_id, start_date]).endkey([user_id,end_date])\nfunction (doc) {\n if (doc.type === 'Message' && doc.user_ids_to_show && Array.isArray(doc.user_ids_to_show)) {\n doc.user_ids_to_show.forEach(function (userId) {\n emit([userId, doc.created_at], 1);\n });\n }\n}\n",
+ "reduce": " function(key, values, rereduce) {\n return sum(values);\n }\n"
+ },
+ "all": {
+ "map": " function(doc) {\n if (doc['type'] == 'Message') {\n emit(doc._id, null);\n }\n }\n"
+ }
+ },
+ "couchrest-hash": "ba80168e51015d2678cad88fc6c5b986"
+} \ No newline at end of file
diff --git a/puppet/modules/site_couchdb/files/designs/sessions/Session.json b/puppet/modules/site_couchdb/files/designs/sessions/Session.json
new file mode 100644
index 00000000..70202780
--- /dev/null
+++ b/puppet/modules/site_couchdb/files/designs/sessions/Session.json
@@ -0,0 +1,8 @@
+{
+ "views": {
+ "by_expires": {
+ "reduce": "_sum",
+ "map": "function(doc) {\n if(typeof doc.expires !== \"undefined\") {\n emit(doc.expires, 1);\n }\n}\n"
+ }
+ }
+}
diff --git a/puppet/modules/site_couchdb/files/designs/shared/docs.json b/puppet/modules/site_couchdb/files/designs/shared/docs.json
new file mode 100644
index 00000000..004180cd
--- /dev/null
+++ b/puppet/modules/site_couchdb/files/designs/shared/docs.json
@@ -0,0 +1,8 @@
+{
+ "_id": "_design/docs",
+ "views": {
+ "get": {
+ "map": "function(doc) {\n if (doc.u1db_rev) {\n var is_tombstone = true;\n var has_conflicts = false;\n if (doc._attachments) {\n if (doc._attachments.u1db_content)\n is_tombstone = false;\n if (doc._attachments.u1db_conflicts)\n has_conflicts = true;\n }\n emit(doc._id,\n {\n \"couch_rev\": doc._rev,\n \"u1db_rev\": doc.u1db_rev,\n \"is_tombstone\": is_tombstone,\n \"has_conflicts\": has_conflicts,\n }\n );\n }\n}\n"
+ }
+ }
+} \ No newline at end of file
diff --git a/puppet/modules/site_couchdb/files/designs/shared/syncs.json b/puppet/modules/site_couchdb/files/designs/shared/syncs.json
new file mode 100644
index 00000000..bab5622f
--- /dev/null
+++ b/puppet/modules/site_couchdb/files/designs/shared/syncs.json
@@ -0,0 +1,11 @@
+{
+ "_id": "_design/syncs",
+ "updates": {
+ "put": "function(doc, req){\n if (!doc) {\n doc = {}\n doc['_id'] = 'u1db_sync_log';\n doc['syncs'] = [];\n }\n body = JSON.parse(req.body);\n // remove outdated info\n doc['syncs'] = doc['syncs'].filter(\n function (entry) {\n return entry[0] != body['other_replica_uid'];\n }\n );\n // store u1db rev\n doc['syncs'].push([\n body['other_replica_uid'],\n body['other_generation'],\n body['other_transaction_id']\n ]);\n return [doc, 'ok'];\n}\n\n"
+ },
+ "views": {
+ "log": {
+ "map": "function(doc) {\n if (doc._id == 'u1db_sync_log') {\n if (doc.syncs)\n doc.syncs.forEach(function (entry) {\n emit(entry[0],\n {\n 'known_generation': entry[1],\n 'known_transaction_id': entry[2]\n });\n });\n }\n}\n"
+ }
+ }
+} \ No newline at end of file
diff --git a/puppet/modules/site_couchdb/files/designs/shared/transactions.json b/puppet/modules/site_couchdb/files/designs/shared/transactions.json
new file mode 100644
index 00000000..106ad46c
--- /dev/null
+++ b/puppet/modules/site_couchdb/files/designs/shared/transactions.json
@@ -0,0 +1,13 @@
+{
+ "_id": "_design/transactions",
+ "lists": {
+ "generation": "function(head, req) {\n var row;\n var rows=[];\n // fetch all rows\n while(row = getRow()) {\n rows.push(row);\n }\n if (rows.length > 0)\n send(JSON.stringify({\n \"generation\": rows.length,\n \"doc_id\": rows[rows.length-1]['id'],\n \"transaction_id\": rows[rows.length-1]['value']\n }));\n else\n send(JSON.stringify({\n \"generation\": 0,\n \"doc_id\": \"\",\n \"transaction_id\": \"\",\n }));\n}\n",
+ "trans_id_for_gen": "function(head, req) {\n var row;\n var rows=[];\n var i = 1;\n var gen = 1;\n if (req.query.gen)\n gen = parseInt(req.query['gen']);\n // fetch all rows\n while(row = getRow())\n rows.push(row);\n if (gen <= rows.length)\n send(JSON.stringify({\n \"generation\": gen,\n \"doc_id\": rows[gen-1]['id'],\n \"transaction_id\": rows[gen-1]['value'],\n }));\n else\n send('{}');\n}\n",
+ "whats_changed": "function(head, req) {\n var row;\n var gen = 1;\n var old_gen = 0;\n if (req.query.old_gen)\n old_gen = parseInt(req.query['old_gen']);\n send('{\"transactions\":[\\n');\n // fetch all rows\n while(row = getRow()) {\n if (gen > old_gen) {\n if (gen > old_gen+1)\n send(',\\n');\n send(JSON.stringify({\n \"generation\": gen,\n \"doc_id\": row[\"id\"],\n \"transaction_id\": row[\"value\"]\n }));\n }\n gen++;\n }\n send('\\n]}');\n}\n"
+ },
+ "views": {
+ "log": {
+ "map": "function(doc) {\n if (doc.u1db_transactions)\n doc.u1db_transactions.forEach(function(t) {\n emit(t[0], // use timestamp as key so the results are ordered\n t[1]); // value is the transaction_id\n });\n}\n"
+ }
+ }
+} \ No newline at end of file
diff --git a/puppet/modules/site_couchdb/files/designs/tickets/Ticket.json b/puppet/modules/site_couchdb/files/designs/tickets/Ticket.json
new file mode 100644
index 00000000..578f632b
--- /dev/null
+++ b/puppet/modules/site_couchdb/files/designs/tickets/Ticket.json
@@ -0,0 +1,50 @@
+{
+ "_id": "_design/Ticket",
+ "language": "javascript",
+ "views": {
+ "by_updated_at": {
+ "map": " function(doc) {\n if ((doc['type'] == 'Ticket') && (doc['updated_at'] != null)) {\n emit(doc['updated_at'], 1);\n }\n }\n",
+ "reduce": "_sum"
+ },
+ "by_created_at": {
+ "map": " function(doc) {\n if ((doc['type'] == 'Ticket') && (doc['created_at'] != null)) {\n emit(doc['created_at'], 1);\n }\n }\n",
+ "reduce": "_sum"
+ },
+ "by_created_by": {
+ "map": " function(doc) {\n if ((doc['type'] == 'Ticket') && (doc['created_by'] != null)) {\n emit(doc['created_by'], 1);\n }\n }\n",
+ "reduce": "_sum"
+ },
+ "by_is_open_and_created_at": {
+ "map": " function(doc) {\n if ((doc['type'] == 'Ticket') && (doc['is_open'] != null) && (doc['created_at'] != null)) {\n emit([doc['is_open'], doc['created_at']], 1);\n }\n }\n",
+ "reduce": "_sum"
+ },
+ "by_is_open_and_updated_at": {
+ "map": " function(doc) {\n if ((doc['type'] == 'Ticket') && (doc['is_open'] != null) && (doc['updated_at'] != null)) {\n emit([doc['is_open'], doc['updated_at']], 1);\n }\n }\n",
+ "reduce": "_sum"
+ },
+ "by_includes_post_by_and_is_open_and_created_at": {
+ "map": "function(doc) {\n var arr = {}\n if (doc['type'] == 'Ticket' && doc.comments) {\n doc.comments.forEach(function(comment){\n if (comment.posted_by && !arr[comment.posted_by]) {\n //don't add duplicates\n arr[comment.posted_by] = true;\n emit([comment.posted_by, doc.is_open, doc.created_at], 1);\n }\n });\n }\n}\n",
+ "reduce": " function(key, values, rereduce) {\n return sum(values);\n }\n"
+ },
+ "by_includes_post_by": {
+ "map": "// TODO: This view is only used in tests--should we keep it?\nfunction(doc) {\n var arr = {}\n if (doc['type'] == 'Ticket' && doc.comments) {\n doc.comments.forEach(function(comment){\n if (comment.posted_by && !arr[comment.posted_by]) {\n //don't add duplicates\n arr[comment.posted_by] = true;\n emit(comment.posted_by, 1);\n }\n });\n }\n}\n",
+ "reduce": " function(key, values, rereduce) {\n return sum(values);\n }\n"
+ },
+ "by_includes_post_by_and_is_open_and_updated_at": {
+ "map": "function(doc) {\n var arr = {}\n if (doc['type'] == 'Ticket' && doc.comments) {\n doc.comments.forEach(function(comment){\n if (comment.posted_by && !arr[comment.posted_by]) {\n //don't add duplicates\n arr[comment.posted_by] = true;\n emit([comment.posted_by, doc.is_open, doc.updated_at], 1);\n }\n });\n }\n}\n",
+ "reduce": " function(key, values, rereduce) {\n return sum(values);\n }\n"
+ },
+ "by_includes_post_by_and_created_at": {
+ "map": "function(doc) {\n var arr = {}\n if (doc['type'] == 'Ticket' && doc.comments) {\n doc.comments.forEach(function(comment){\n if (comment.posted_by && !arr[comment.posted_by]) {\n //don't add duplicates\n arr[comment.posted_by] = true;\n emit([comment.posted_by, doc.created_at], 1);\n }\n });\n }\n}\n",
+ "reduce": " function(key, values, rereduce) {\n return sum(values);\n }\n"
+ },
+ "by_includes_post_by_and_updated_at": {
+ "map": "function(doc) {\n var arr = {}\n if (doc['type'] == 'Ticket' && doc.comments) {\n doc.comments.forEach(function(comment){\n if (comment.posted_by && !arr[comment.posted_by]) {\n //don't add duplicates\n arr[comment.posted_by] = true;\n emit([comment.posted_by, doc.updated_at], 1);\n }\n });\n }\n}\n",
+ "reduce": " function(key, values, rereduce) {\n return sum(values);\n }\n"
+ },
+ "all": {
+ "map": " function(doc) {\n if (doc['type'] == 'Ticket') {\n emit(doc._id, null);\n }\n }\n"
+ }
+ },
+ "couchrest-hash": "b21eaeea8ea66bfda65581b1b7ce06af"
+} \ No newline at end of file
diff --git a/puppet/modules/site_couchdb/files/designs/tokens/Token.json b/puppet/modules/site_couchdb/files/designs/tokens/Token.json
new file mode 100644
index 00000000..b9025f15
--- /dev/null
+++ b/puppet/modules/site_couchdb/files/designs/tokens/Token.json
@@ -0,0 +1,14 @@
+{
+ "_id": "_design/Token",
+ "language": "javascript",
+ "views": {
+ "by_last_seen_at": {
+ "map": " function(doc) {\n if ((doc['type'] == 'Token') && (doc['last_seen_at'] != null)) {\n emit(doc['last_seen_at'], 1);\n }\n }\n",
+ "reduce": "_sum"
+ },
+ "all": {
+ "map": " function(doc) {\n if (doc['type'] == 'Token') {\n emit(doc._id, null);\n }\n }\n"
+ }
+ },
+ "couchrest-hash": "541dd924551c42a2317b345effbe65cc"
+} \ No newline at end of file
diff --git a/puppet/modules/site_couchdb/files/designs/users/User.json b/puppet/modules/site_couchdb/files/designs/users/User.json
new file mode 100644
index 00000000..8a82cf4a
--- /dev/null
+++ b/puppet/modules/site_couchdb/files/designs/users/User.json
@@ -0,0 +1,22 @@
+{
+ "_id": "_design/User",
+ "language": "javascript",
+ "views": {
+ "by_login": {
+ "map": " function(doc) {\n if ((doc['type'] == 'User') && (doc['login'] != null)) {\n emit(doc['login'], 1);\n }\n }\n",
+ "reduce": "_sum"
+ },
+ "all": {
+ "map": " function(doc) {\n if (doc['type'] == 'User') {\n emit(doc._id, null);\n }\n }\n"
+ },
+ "by_created_at_and_one_month_warning_not_sent": {
+ "map": "function (doc) {\n if ((doc['type'] == 'User') && (doc['created_at'] != null) && (doc['one_month_warning_sent'] == null)) {\n emit(doc['created_at'], 1);\n } \n}\n",
+ "reduce": " function(key, values, rereduce) {\n return sum(values);\n }\n"
+ },
+ "by_created_at": {
+ "map": " function(doc) {\n if ((doc['type'] == 'User') && (doc['created_at'] != null)) {\n emit(doc['created_at'], 1);\n }\n }\n",
+ "reduce": "_sum"
+ }
+ },
+ "couchrest-hash": "d854607d299887a347e554176cb79e20"
+} \ No newline at end of file
diff --git a/puppet/modules/site_couchdb/files/leap_ca_daemon b/puppet/modules/site_couchdb/files/leap_ca_daemon
new file mode 100755
index 00000000..9a1a0bc7
--- /dev/null
+++ b/puppet/modules/site_couchdb/files/leap_ca_daemon
@@ -0,0 +1,157 @@
+#! /bin/sh
+### BEGIN INIT INFO
+# Provides: leap_ca_daemon
+# Required-Start: $remote_fs $syslog
+# Required-Stop: $remote_fs $syslog
+# Default-Start: 2 3 4 5
+# Default-Stop: 0 1 6
+# Short-Description: leap_ca_daemon initscript
+# Description: Controls leap_ca_daemon (see https://github.com/leapcode/leap_ca
+# for more information.
+### END INIT INFO
+
+# Author: varac <varac@leap.se>
+#
+
+# Do NOT "set -e"
+
+# PATH should only include /usr/* if it runs after the mountnfs.sh script
+PATH=/sbin:/usr/sbin:/bin:/usr/bin
+DESC="leap_ca_daemon initscript"
+NAME=leap_ca_daemon
+DAEMON=/usr/local/bin/$NAME
+DAEMON_ARGS="run "
+PIDFILE=/var/run/$NAME.pid
+SCRIPTNAME=/etc/init.d/$NAME
+
+# Exit if the package is not installed
+[ -x "$DAEMON" ] || exit 0
+
+# Read configuration variable file if it is present
+[ -r /etc/default/$NAME ] && . /etc/default/$NAME
+
+# Load the VERBOSE setting and other rcS variables
+. /lib/init/vars.sh
+
+# Define LSB log_* functions.
+# Depend on lsb-base (>= 3.2-14) to ensure that this file is present
+# and status_of_proc is working.
+. /lib/lsb/init-functions
+
+#
+# Function that starts the daemon/service
+#
+do_start()
+{
+ # Return
+ # 0 if daemon has been started
+ # 1 if daemon was already running
+ # 2 if daemon could not be started
+ start-stop-daemon --start --quiet --pidfile $PIDFILE --exec $DAEMON --test > /dev/null \
+ || return 1
+ start-stop-daemon --start --quiet --pidfile $PIDFILE --exec $DAEMON -- \
+ $DAEMON_ARGS \
+ || return 2
+ # Add code here, if necessary, that waits for the process to be ready
+ # to handle requests from services started subsequently which depend
+ # on this one. As a last resort, sleep for some time.
+}
+
+#
+# Function that stops the daemon/service
+#
+do_stop()
+{
+ # Return
+ # 0 if daemon has been stopped
+ # 1 if daemon was already stopped
+ # 2 if daemon could not be stopped
+ # other if a failure occurred
+ start-stop-daemon --stop --quiet --retry=TERM/30/KILL/5 --pidfile $PIDFILE --name $NAME
+ RETVAL="$?"
+ [ "$RETVAL" = 2 ] && return 2
+ # Wait for children to finish too if this is a daemon that forks
+ # and if the daemon is only ever run from this initscript.
+ # If the above conditions are not satisfied then add some other code
+ # that waits for the process to drop all resources that could be
+ # needed by services started subsequently. A last resort is to
+ # sleep for some time.
+ start-stop-daemon --stop --quiet --oknodo --retry=0/30/KILL/5 --exec $DAEMON
+ [ "$?" = 2 ] && return 2
+ # Many daemons don't delete their pidfiles when they exit.
+ rm -f $PIDFILE
+ return "$RETVAL"
+}
+
+#
+# Function that sends a SIGHUP to the daemon/service
+#
+do_reload() {
+ #
+ # If the daemon can reload its configuration without
+ # restarting (for example, when it is sent a SIGHUP),
+ # then implement that here.
+ #
+ start-stop-daemon --stop --signal 1 --quiet --pidfile $PIDFILE --name $NAME
+ return 0
+}
+
+case "$1" in
+ start)
+ [ "$VERBOSE" != no ] && log_daemon_msg "Starting $DESC" "$NAME"
+ do_start
+ case "$?" in
+ 0|1) [ "$VERBOSE" != no ] && log_end_msg 0 ;;
+ 2) [ "$VERBOSE" != no ] && log_end_msg 1 ;;
+ esac
+ ;;
+ stop)
+ [ "$VERBOSE" != no ] && log_daemon_msg "Stopping $DESC" "$NAME"
+ do_stop
+ case "$?" in
+ 0|1) [ "$VERBOSE" != no ] && log_end_msg 0 ;;
+ 2) [ "$VERBOSE" != no ] && log_end_msg 1 ;;
+ esac
+ ;;
+ status)
+ status_of_proc "$DAEMON" "$NAME" && exit 0 || exit $?
+ ;;
+ #reload|force-reload)
+ #
+ # If do_reload() is not implemented then leave this commented out
+ # and leave 'force-reload' as an alias for 'restart'.
+ #
+ #log_daemon_msg "Reloading $DESC" "$NAME"
+ #do_reload
+ #log_end_msg $?
+ #;;
+ restart|force-reload)
+ #
+ # If the "reload" option is implemented then remove the
+ # 'force-reload' alias
+ #
+ log_daemon_msg "Restarting $DESC" "$NAME"
+ do_stop
+ case "$?" in
+ 0|1)
+ do_start
+ case "$?" in
+ 0) log_end_msg 0 ;;
+ 1) log_end_msg 1 ;; # Old process is still running
+ *) log_end_msg 1 ;; # Failed to start
+ esac
+ ;;
+ *)
+ # Failed to stop
+ log_end_msg 1
+ ;;
+ esac
+ ;;
+ *)
+ #echo "Usage: $SCRIPTNAME {start|stop|restart|reload|force-reload}" >&2
+ echo "Usage: $SCRIPTNAME {start|stop|status|restart|force-reload}" >&2
+ exit 3
+ ;;
+esac
+
+:
diff --git a/puppet/modules/site_couchdb/files/local.ini b/puppet/modules/site_couchdb/files/local.ini
new file mode 100644
index 00000000..b921a927
--- /dev/null
+++ b/puppet/modules/site_couchdb/files/local.ini
@@ -0,0 +1,8 @@
+; Puppet modified file !!
+
+; Custom settings should be made in this file. They will override settings
+; in default.ini, but unlike changes made to default.ini, this file won't be
+; overwritten on server upgrade.
+
+[compactions]
+_default = [{db_fragmentation, "70%"}, {view_fragmentation, "60%"}, {from, "03:00"}, {to, "05:00"}]
diff --git a/puppet/modules/site_couchdb/files/runit_config b/puppet/modules/site_couchdb/files/runit_config
new file mode 100644
index 00000000..169b4832
--- /dev/null
+++ b/puppet/modules/site_couchdb/files/runit_config
@@ -0,0 +1,6 @@
+#!/bin/bash
+exec 2>&1
+export HOME=/home/bigcouch
+ulimit -H -n 32768
+ulimit -S -n 32768
+exec chpst -u bigcouch /opt/bigcouch/bin/bigcouch
diff --git a/puppet/modules/site_couchdb/lib/puppet/parser/functions/rotated_db_name.rb b/puppet/modules/site_couchdb/lib/puppet/parser/functions/rotated_db_name.rb
new file mode 100644
index 00000000..6458ae81
--- /dev/null
+++ b/puppet/modules/site_couchdb/lib/puppet/parser/functions/rotated_db_name.rb
@@ -0,0 +1,24 @@
+module Puppet::Parser::Functions
+ newfunction(:rotated_db_name, :type => :rvalue, :doc => <<-EOS
+This function takes a database name string and returns a database name with the current rotation stamp appended.
+The first argument is the base name of the database. Subsequent arguments may contain these options:
+ * 'next' -- return the db name for the next rotation, not the current one.
+ * 'monthly' -- rotate monthly (default)
+ * 'weekly' -- rotate weekly
+*Examples:*
+ rotated_db_name('tokens') => 'tokens_551'
+ EOS
+ ) do |arguments|
+ if arguments.include?('weekly')
+ rotation_period = 604800 # 1 week
+ else
+ rotation_period = 2592000 # 1 month
+ end
+ suffix = Time.now.utc.to_i / rotation_period
+ if arguments.include?('next')
+ suffix += 1
+ end
+ "#{arguments.first}_#{suffix}"
+ end
+end
+
diff --git a/puppet/modules/site_couchdb/manifests/add_users.pp b/puppet/modules/site_couchdb/manifests/add_users.pp
new file mode 100644
index 00000000..c905316b
--- /dev/null
+++ b/puppet/modules/site_couchdb/manifests/add_users.pp
@@ -0,0 +1,57 @@
+# add couchdb users for all services
+class site_couchdb::add_users {
+
+ Class['site_couchdb::create_dbs']
+ -> Class['site_couchdb::add_users']
+
+ # Couchdb users
+
+ ## leap_mx couchdb user
+ ## read: identities
+ ## write access to user-<uuid>
+ couchdb::add_user { $site_couchdb::couchdb_leap_mx_user:
+ roles => '["identities"]',
+ pw => $site_couchdb::couchdb_leap_mx_pw,
+ salt => $site_couchdb::couchdb_leap_mx_salt,
+ require => Couchdb::Query::Setup['localhost']
+ }
+
+ ## nickserver couchdb user
+ ## r: identities
+ ## r/w: keycache
+ couchdb::add_user { $site_couchdb::couchdb_nickserver_user:
+ roles => '["identities","keycache"]',
+ pw => $site_couchdb::couchdb_nickserver_pw,
+ salt => $site_couchdb::couchdb_nickserver_salt,
+ require => Couchdb::Query::Setup['localhost']
+ }
+
+ ## soledad couchdb user
+ ## r/w: user-<uuid>, shared
+ ## read: tokens
+ couchdb::add_user { $site_couchdb::couchdb_soledad_user:
+ roles => '["tokens"]',
+ pw => $site_couchdb::couchdb_soledad_pw,
+ salt => $site_couchdb::couchdb_soledad_salt,
+ require => Couchdb::Query::Setup['localhost']
+ }
+
+ ## webapp couchdb user
+ ## read/write: users, tokens, sessions, tickets, identities, customer
+ couchdb::add_user { $site_couchdb::couchdb_webapp_user:
+ roles => '["tokens","identities","users"]',
+ pw => $site_couchdb::couchdb_webapp_pw,
+ salt => $site_couchdb::couchdb_webapp_salt,
+ require => Couchdb::Query::Setup['localhost']
+ }
+
+ ## replication couchdb user
+ ## read/write: all databases for replication
+ couchdb::add_user { $site_couchdb::couchdb_replication_user:
+ roles => '["replication"]',
+ pw => $site_couchdb::couchdb_replication_pw,
+ salt => $site_couchdb::couchdb_replication_salt,
+ require => Couchdb::Query::Setup['localhost']
+ }
+
+}
diff --git a/puppet/modules/site_couchdb/manifests/backup.pp b/puppet/modules/site_couchdb/manifests/backup.pp
new file mode 100644
index 00000000..8b5aa6ea
--- /dev/null
+++ b/puppet/modules/site_couchdb/manifests/backup.pp
@@ -0,0 +1,23 @@
+class site_couchdb::backup {
+
+ # general backupninja config
+ backupninja::config { 'backupninja_config':
+ usecolors => false,
+ }
+
+ # dump all DBs locally to /var/backups/couchdb once a day
+ backupninja::sh { 'couchdb_backup':
+ command_string => "cd /srv/leap/couchdb/scripts \n./couchdb_dumpall.sh"
+ }
+
+ # Deploy /etc/leap/couchdb_scripts_defaults.conf so we can exclude
+ # some databases
+
+ file { '/etc/leap/couchdb_scripts_defaults.conf':
+ source => 'puppet:///modules/site_couchdb/couchdb_scripts_defaults.conf',
+ mode => '0644',
+ owner => 'root',
+ group => 'root',
+ }
+
+}
diff --git a/puppet/modules/site_couchdb/manifests/bigcouch.pp b/puppet/modules/site_couchdb/manifests/bigcouch.pp
new file mode 100644
index 00000000..2de3d4d0
--- /dev/null
+++ b/puppet/modules/site_couchdb/manifests/bigcouch.pp
@@ -0,0 +1,50 @@
+# sets up bigcouch on couchdb node
+class site_couchdb::bigcouch {
+
+ $config = $::site_couchdb::couchdb_config['bigcouch']
+ $cookie = $config['cookie']
+ $ednp_port = $config['ednp_port']
+
+ class { 'couchdb':
+ admin_pw => $::site_couchdb::couchdb_admin_pw,
+ admin_salt => $::site_couchdb::couchdb_admin_salt,
+ bigcouch => true,
+ bigcouch_cookie => $cookie,
+ ednp_port => $ednp_port,
+ chttpd_bind_address => '127.0.0.1'
+ }
+
+ #
+ # stunnel must running correctly before bigcouch dbs can be set up.
+ #
+ Class['site_config::default']
+ -> Class['site_config::resolvconf']
+ -> Class['couchdb::bigcouch::package::cloudant']
+ -> Service['shorewall']
+ -> Exec['refresh_stunnel']
+ -> Class['site_couchdb::setup']
+ -> Class['site_couchdb::bigcouch::add_nodes']
+ -> Class['site_couchdb::bigcouch::settle_cluster']
+ -> Class['site_couchdb::create_dbs']
+
+ include site_couchdb::bigcouch::add_nodes
+ include site_couchdb::bigcouch::settle_cluster
+ include site_couchdb::bigcouch::compaction
+
+ file { '/var/log/bigcouch':
+ ensure => directory
+ }
+
+ file { '/etc/sv/bigcouch/run':
+ ensure => present,
+ source => 'puppet:///modules/site_couchdb/runit_config',
+ owner => root,
+ group => root,
+ mode => '0755',
+ require => Package['couchdb'],
+ notify => Service['couchdb']
+ }
+
+ include site_check_mk::agent::couchdb::bigcouch
+
+}
diff --git a/puppet/modules/site_couchdb/manifests/bigcouch/add_nodes.pp b/puppet/modules/site_couchdb/manifests/bigcouch/add_nodes.pp
new file mode 100644
index 00000000..c8c43275
--- /dev/null
+++ b/puppet/modules/site_couchdb/manifests/bigcouch/add_nodes.pp
@@ -0,0 +1,8 @@
+class site_couchdb::bigcouch::add_nodes {
+ # loop through neighbors array and add nodes
+ $nodes = $::site_couchdb::bigcouch::config['neighbors']
+
+ couchdb::bigcouch::add_node { $nodes:
+ require => Couchdb::Query::Setup['localhost']
+ }
+}
diff --git a/puppet/modules/site_couchdb/manifests/bigcouch/compaction.pp b/puppet/modules/site_couchdb/manifests/bigcouch/compaction.pp
new file mode 100644
index 00000000..84aab4ef
--- /dev/null
+++ b/puppet/modules/site_couchdb/manifests/bigcouch/compaction.pp
@@ -0,0 +1,8 @@
+class site_couchdb::bigcouch::compaction {
+ cron {
+ 'compact_all_shards':
+ command => '/srv/leap/couchdb/scripts/bigcouch_compact_all_shards.sh >> /var/log/bigcouch/compaction.log',
+ hour => 3,
+ minute => 17;
+ }
+}
diff --git a/puppet/modules/site_couchdb/manifests/bigcouch/settle_cluster.pp b/puppet/modules/site_couchdb/manifests/bigcouch/settle_cluster.pp
new file mode 100644
index 00000000..820b5be2
--- /dev/null
+++ b/puppet/modules/site_couchdb/manifests/bigcouch/settle_cluster.pp
@@ -0,0 +1,11 @@
+class site_couchdb::bigcouch::settle_cluster {
+
+ exec { 'wait_for_couch_nodes':
+ command => '/srv/leap/bin/run_tests --test CouchDB/Are_configured_nodes_online? --retry 12 --wait 10'
+ }
+
+ exec { 'settle_cluster_membership':
+ command => '/srv/leap/bin/run_tests --test CouchDB/Is_cluster_membership_ok? --retry 12 --wait 10',
+ require => Exec['wait_for_couch_nodes']
+ }
+}
diff --git a/puppet/modules/site_couchdb/manifests/create_dbs.pp b/puppet/modules/site_couchdb/manifests/create_dbs.pp
new file mode 100644
index 00000000..a2d1c655
--- /dev/null
+++ b/puppet/modules/site_couchdb/manifests/create_dbs.pp
@@ -0,0 +1,102 @@
+# creates neccesary databases
+class site_couchdb::create_dbs {
+
+ Class['site_couchdb::setup']
+ -> Class['site_couchdb::create_dbs']
+
+ ### customer database
+ ### r/w: webapp,
+ couchdb::create_db { 'customers':
+ members => "{ \"names\": [\"${site_couchdb::couchdb_webapp_user}\"], \"roles\": [\"replication\"] }",
+ require => Couchdb::Query::Setup['localhost']
+ }
+
+ ## identities database
+ ## r: nickserver, leap_mx - needs to be restrict with design document
+ ## r/w: webapp
+ couchdb::create_db { 'identities':
+ members => "{ \"names\": [], \"roles\": [\"replication\", \"identities\"] }",
+ require => Couchdb::Query::Setup['localhost']
+ }
+
+ ## keycache database
+ ## r/w: nickserver
+ couchdb::create_db { 'keycache':
+ members => "{ \"names\": [], \"roles\": [\"replication\", \"keycache\"] }",
+ require => Couchdb::Query::Setup['localhost']
+ }
+
+ ## sessions database
+ ## r/w: webapp
+ $sessions_db = rotated_db_name('sessions', 'monthly')
+ couchdb::create_db { $sessions_db:
+ members => "{ \"names\": [\"${site_couchdb::couchdb_webapp_user}\"], \"roles\": [\"replication\"] }",
+ require => Couchdb::Query::Setup['localhost']
+ }
+
+ $sessions_next_db = rotated_db_name('sessions', 'monthly', 'next')
+ couchdb::create_db { $sessions_next_db:
+ members => "{ \"names\": [\"${site_couchdb::couchdb_webapp_user}\"], \"roles\": [\"replication\"] }",
+ require => Couchdb::Query::Setup['localhost']
+ }
+
+ ## shared database
+ ## r/w: soledad
+ couchdb::create_db { 'shared':
+ members => "{ \"names\": [\"${site_couchdb::couchdb_soledad_user}\"], \"roles\": [\"replication\"] }",
+ require => Couchdb::Query::Setup['localhost']
+ }
+
+ ## tickets database
+ ## r/w: webapp
+ couchdb::create_db { 'tickets':
+ members => "{ \"names\": [\"${site_couchdb::couchdb_webapp_user}\"], \"roles\": [\"replication\"] }",
+ require => Couchdb::Query::Setup['localhost']
+ }
+
+ ## tokens database
+ ## r: soledad - needs to be restricted with a design document
+ ## r/w: webapp
+ $tokens_db = rotated_db_name('tokens', 'monthly')
+ couchdb::create_db { $tokens_db:
+ members => "{ \"names\": [], \"roles\": [\"replication\", \"tokens\"] }",
+ require => Couchdb::Query::Setup['localhost']
+ }
+
+ $tokens_next_db = rotated_db_name('tokens', 'monthly', 'next')
+ couchdb::create_db { $tokens_next_db:
+ members => "{ \"names\": [], \"roles\": [\"replication\", \"tokens\"] }",
+ require => Couchdb::Query::Setup['localhost']
+ }
+
+ ## users database
+ ## r/w: webapp
+ couchdb::create_db { 'users':
+ members => "{ \"names\": [], \"roles\": [\"replication\", \"users\"] }",
+ require => Couchdb::Query::Setup['localhost']
+ }
+
+ ## tmp_users database
+ ## r/w: webapp
+ couchdb::create_db { 'tmp_users':
+ members => "{ \"names\": [], \"roles\": [\"replication\", \"users\"] }",
+ require => Couchdb::Query::Setup['localhost']
+ }
+
+ ## messages db
+ ## store messages to the clients such as payment reminders
+ ## r/w: webapp
+ couchdb::create_db { 'messages':
+ members => "{ \"names\": [\"${site_couchdb::couchdb_webapp_user}\"], \"roles\": [\"replication\"] }",
+ require => Couchdb::Query::Setup['localhost']
+ }
+
+ ## invite_codes db
+ ## store invite codes for new signups
+ ## r/w: webapp
+ couchdb::create_db { 'invite_codes':
+ members => "{ \"names\": [\"${site_couchdb::couchdb_webapp_user}\"], \"roles\": [\"replication\"] }",
+ require => Couchdb::Query::Setup['localhost']
+ }
+
+}
diff --git a/puppet/modules/site_couchdb/manifests/designs.pp b/puppet/modules/site_couchdb/manifests/designs.pp
new file mode 100644
index 00000000..e5fd94c6
--- /dev/null
+++ b/puppet/modules/site_couchdb/manifests/designs.pp
@@ -0,0 +1,46 @@
+class site_couchdb::designs {
+
+ Class['site_couchdb::create_dbs']
+ -> Class['site_couchdb::designs']
+
+ file { '/srv/leap/couchdb/designs':
+ ensure => directory,
+ source => 'puppet:///modules/site_couchdb/designs',
+ recurse => true,
+ purge => true,
+ mode => '0755'
+ }
+
+ site_couchdb::upload_design {
+ 'customers': design => 'customers/Customer.json';
+ 'identities': design => 'identities/Identity.json';
+ 'tickets': design => 'tickets/Ticket.json';
+ 'messages': design => 'messages/Message.json';
+ 'users': design => 'users/User.json';
+ 'tmp_users': design => 'users/User.json';
+ 'invite_codes': design => 'invite_codes/InviteCode.json';
+ 'shared_docs':
+ db => 'shared',
+ design => 'shared/docs.json';
+ 'shared_syncs':
+ db => 'shared',
+ design => 'shared/syncs.json';
+ 'shared_transactions':
+ db => 'shared',
+ design => 'shared/transactions.json';
+ }
+
+ $sessions_db = rotated_db_name('sessions', 'monthly')
+ $sessions_next_db = rotated_db_name('sessions', 'monthly', 'next')
+ site_couchdb::upload_design {
+ $sessions_db: design => 'sessions/Session.json';
+ $sessions_next_db: design => 'sessions/Session.json';
+ }
+
+ $tokens_db = rotated_db_name('tokens', 'monthly')
+ $tokens_next_db = rotated_db_name('tokens', 'monthly', 'next')
+ site_couchdb::upload_design {
+ $tokens_db: design => 'tokens/Token.json';
+ $tokens_next_db: design => 'tokens/Token.json';
+ }
+}
diff --git a/puppet/modules/site_couchdb/manifests/init.pp b/puppet/modules/site_couchdb/manifests/init.pp
new file mode 100644
index 00000000..c4fe6277
--- /dev/null
+++ b/puppet/modules/site_couchdb/manifests/init.pp
@@ -0,0 +1,81 @@
+# entry class for configuring couchdb/bigcouch node
+# couchdb node
+class site_couchdb {
+ tag 'leap_service'
+
+ $couchdb_config = hiera('couch')
+ $couchdb_users = $couchdb_config['users']
+
+ $couchdb_admin = $couchdb_users['admin']
+ $couchdb_admin_user = $couchdb_admin['username']
+ $couchdb_admin_pw = $couchdb_admin['password']
+ $couchdb_admin_salt = $couchdb_admin['salt']
+
+ $couchdb_leap_mx = $couchdb_users['leap_mx']
+ $couchdb_leap_mx_user = $couchdb_leap_mx['username']
+ $couchdb_leap_mx_pw = $couchdb_leap_mx['password']
+ $couchdb_leap_mx_salt = $couchdb_leap_mx['salt']
+
+ $couchdb_nickserver = $couchdb_users['nickserver']
+ $couchdb_nickserver_user = $couchdb_nickserver['username']
+ $couchdb_nickserver_pw = $couchdb_nickserver['password']
+ $couchdb_nickserver_salt = $couchdb_nickserver['salt']
+
+ $couchdb_soledad = $couchdb_users['soledad']
+ $couchdb_soledad_user = $couchdb_soledad['username']
+ $couchdb_soledad_pw = $couchdb_soledad['password']
+ $couchdb_soledad_salt = $couchdb_soledad['salt']
+
+ $couchdb_webapp = $couchdb_users['webapp']
+ $couchdb_webapp_user = $couchdb_webapp['username']
+ $couchdb_webapp_pw = $couchdb_webapp['password']
+ $couchdb_webapp_salt = $couchdb_webapp['salt']
+
+ $couchdb_replication = $couchdb_users['replication']
+ $couchdb_replication_user = $couchdb_replication['username']
+ $couchdb_replication_pw = $couchdb_replication['password']
+ $couchdb_replication_salt = $couchdb_replication['salt']
+
+ $couchdb_backup = $couchdb_config['backup']
+ $couchdb_mode = $couchdb_config['mode']
+
+ # ensure bigcouch has been purged from the system:
+ # TODO: remove this check in 0.9 release
+ if file('/opt/bigcouch/bin/bigcouch', '/dev/null') != '' {
+ fail 'ERROR: BigCouch appears to be installed. Make sure you have migrated to CouchDB before proceeding. See https://leap.se/upgrade-0-8'
+ }
+
+ include site_couchdb::plain
+
+ Class['site_config::default']
+ -> Service['shorewall']
+ -> Exec['refresh_stunnel']
+ -> Class['couchdb']
+ -> Class['site_couchdb::setup']
+
+ include ::site_config::default
+ include site_stunnel
+
+ include site_couchdb::setup
+ include site_couchdb::create_dbs
+ include site_couchdb::add_users
+ include site_couchdb::designs
+ include site_couchdb::logrotate
+
+ if $couchdb_backup { include site_couchdb::backup }
+
+ include site_check_mk::agent::couchdb
+
+ # remove tapicero leftovers on couchdb nodes
+ include site_config::remove::tapicero
+
+ # Destroy every per-user storage database
+ # where the corresponding user record does not exist.
+ cron { 'cleanup_stale_userdbs':
+ command => '(/bin/date; /srv/leap/couchdb/scripts/cleanup-user-dbs) >> /var/log/leap/couchdb-cleanup.log',
+ user => 'root',
+ hour => 4,
+ minute => 7;
+ }
+
+}
diff --git a/puppet/modules/site_couchdb/manifests/logrotate.pp b/puppet/modules/site_couchdb/manifests/logrotate.pp
new file mode 100644
index 00000000..bb8843bb
--- /dev/null
+++ b/puppet/modules/site_couchdb/manifests/logrotate.pp
@@ -0,0 +1,14 @@
+# configure couchdb logrotation
+class site_couchdb::logrotate {
+
+ augeas {
+ 'logrotate_bigcouch':
+ context => '/files/etc/logrotate.d/bigcouch/rule',
+ changes => [
+ 'set file /opt/bigcouch/var/log/*.log', 'set rotate 7',
+ 'set schedule daily', 'set compress compress',
+ 'set missingok missingok', 'set ifempty notifempty',
+ 'set copytruncate copytruncate' ]
+ }
+
+}
diff --git a/puppet/modules/site_couchdb/manifests/mirror.pp b/puppet/modules/site_couchdb/manifests/mirror.pp
new file mode 100644
index 00000000..fb82b897
--- /dev/null
+++ b/puppet/modules/site_couchdb/manifests/mirror.pp
@@ -0,0 +1,78 @@
+# configure mirroring of couch nodes
+class site_couchdb::mirror {
+
+ Class['site_couchdb::add_users']
+ -> Class['site_couchdb::mirror']
+
+ class { 'couchdb':
+ admin_pw => $site_couchdb::couchdb_admin_pw,
+ admin_salt => $site_couchdb::couchdb_admin_salt,
+ chttpd_bind_address => '127.0.0.1'
+ }
+
+ $masters = $site_couchdb::couchdb_config['replication']['masters']
+ $master_node_names = keys($site_couchdb::couchdb_config['replication']['masters'])
+ $master_node = $masters[$master_node_names[0]]
+ $user = $site_couchdb::couchdb_replication_user
+ $password = $site_couchdb::couchdb_replication_pw
+ $from_host = $master_node['domain_internal']
+ $from_port = $master_node['couch_port']
+ $from = "http://${user}:${password}@${from_host}:${from_port}"
+
+ notice("mirror from: ${from}")
+
+ ### customer database
+ couchdb::mirror_db { 'customers':
+ from => $from,
+ require => Couchdb::Query::Setup['localhost']
+ }
+
+ ## identities database
+ couchdb::mirror_db { 'identities':
+ from => $from,
+ require => Couchdb::Query::Setup['localhost']
+ }
+
+ ## keycache database
+ couchdb::mirror_db { 'keycache':
+ from => $from,
+ require => Couchdb::Query::Setup['localhost']
+ }
+
+ ## sessions database
+ couchdb::mirror_db { 'sessions':
+ from => $from,
+ require => Couchdb::Query::Setup['localhost']
+ }
+
+ ## shared database
+ couchdb::mirror_db { 'shared':
+ from => $from,
+ require => Couchdb::Query::Setup['localhost']
+ }
+
+ ## tickets database
+ couchdb::mirror_db { 'tickets':
+ from => $from,
+ require => Couchdb::Query::Setup['localhost']
+ }
+
+ ## tokens database
+ couchdb::mirror_db { 'tokens':
+ from => $from,
+ require => Couchdb::Query::Setup['localhost']
+ }
+
+ ## users database
+ couchdb::mirror_db { 'users':
+ from => $from,
+ require => Couchdb::Query::Setup['localhost']
+ }
+
+ ## messages db
+ couchdb::mirror_db { 'messages':
+ from => $from,
+ require => Couchdb::Query::Setup['localhost']
+ }
+
+}
diff --git a/puppet/modules/site_couchdb/manifests/plain.pp b/puppet/modules/site_couchdb/manifests/plain.pp
new file mode 100644
index 00000000..b40fc100
--- /dev/null
+++ b/puppet/modules/site_couchdb/manifests/plain.pp
@@ -0,0 +1,14 @@
+# this class sets up a single, plain couchdb node
+class site_couchdb::plain {
+ class { 'couchdb':
+ admin_pw => $site_couchdb::couchdb_admin_pw,
+ admin_salt => $site_couchdb::couchdb_admin_salt,
+ chttpd_bind_address => '127.0.0.1'
+ }
+
+ include site_check_mk::agent::couchdb::plain
+
+ # remove bigcouch leftovers from previous installations
+ include ::site_config::remove::bigcouch
+
+}
diff --git a/puppet/modules/site_couchdb/manifests/setup.pp b/puppet/modules/site_couchdb/manifests/setup.pp
new file mode 100644
index 00000000..710d3c1c
--- /dev/null
+++ b/puppet/modules/site_couchdb/manifests/setup.pp
@@ -0,0 +1,61 @@
+#
+# An initial setup class. All the other classes depend on this
+#
+class site_couchdb::setup {
+
+ # ensure that we don't have leftovers from previous installations
+ # where we installed the cloudant bigcouch package
+ # https://leap.se/code/issues/4971
+ class { 'couchdb::bigcouch::package::cloudant':
+ ensure => absent
+ }
+
+ $user = $site_couchdb::couchdb_admin_user
+
+ # setup /etc/couchdb/couchdb-admin.netrc for couchdb admin access
+ couchdb::query::setup { 'localhost':
+ user => $user,
+ pw => $site_couchdb::couchdb_admin_pw
+ }
+
+ # We symlink /etc/couchdb/couchdb-admin.netrc to /etc/couchdb/couchdb.netrc
+ # for puppet commands, and to to /root/.netrc for couchdb_scripts
+ # (eg. backup) and to makes life easier for the admin on the command line
+ # (i.e. using curl/wget without passing credentials)
+ file {
+ '/etc/couchdb/couchdb.netrc':
+ ensure => link,
+ target => "/etc/couchdb/couchdb-${user}.netrc";
+ '/root/.netrc':
+ ensure => link,
+ target => '/etc/couchdb/couchdb.netrc';
+ }
+
+ # setup /etc/couchdb/couchdb-soledad-admin.netrc file for couchdb admin
+ # access, accessible only for the soledad-admin user to create soledad
+ # userdbs
+ if member(hiera('services', []), 'soledad') {
+ file { '/etc/couchdb/couchdb-soledad-admin.netrc':
+ content => "machine localhost login ${user} password ${site_couchdb::couchdb_admin_pw}",
+ mode => '0400',
+ owner => 'soledad-admin',
+ group => 'root',
+ require => [ Package['couchdb'], User['soledad-admin'] ];
+ }
+ }
+
+ # Checkout couchdb_scripts repo
+ file {
+ '/srv/leap/couchdb':
+ ensure => directory
+ }
+
+ vcsrepo { '/srv/leap/couchdb/scripts':
+ ensure => present,
+ provider => git,
+ source => 'https://leap.se/git/couchdb_scripts',
+ revision => 'origin/master',
+ require => File['/srv/leap/couchdb']
+ }
+
+}
diff --git a/puppet/modules/site_couchdb/manifests/upload_design.pp b/puppet/modules/site_couchdb/manifests/upload_design.pp
new file mode 100644
index 00000000..bd73ebf2
--- /dev/null
+++ b/puppet/modules/site_couchdb/manifests/upload_design.pp
@@ -0,0 +1,14 @@
+# upload a design doc to a db
+define site_couchdb::upload_design($design, $db = $title) {
+ $design_name = regsubst($design, '^.*\/(.*)\.json$', '\1')
+ $id = "_design/${design_name}"
+ $file = "/srv/leap/couchdb/designs/${design}"
+ exec {
+ "upload_design_${name}":
+ command => "/usr/local/bin/couch-doc-update --host 127.0.0.1:5984 --db '${db}' --id '${id}' --data '{}' --file '${file}'",
+ refreshonly => false,
+ loglevel => debug,
+ logoutput => on_failure,
+ require => File['/srv/leap/couchdb/designs'];
+ }
+}
diff --git a/puppet/modules/site_haproxy/files/haproxy-stats.cfg b/puppet/modules/site_haproxy/files/haproxy-stats.cfg
new file mode 100644
index 00000000..e6335ba2
--- /dev/null
+++ b/puppet/modules/site_haproxy/files/haproxy-stats.cfg
@@ -0,0 +1,6 @@
+# provide access to stats for the nagios plugin
+listen stats 127.0.0.1:8000
+ mode http
+ stats enable
+ stats uri /haproxy
+
diff --git a/puppet/modules/site_haproxy/manifests/init.pp b/puppet/modules/site_haproxy/manifests/init.pp
new file mode 100644
index 00000000..b28ce80e
--- /dev/null
+++ b/puppet/modules/site_haproxy/manifests/init.pp
@@ -0,0 +1,41 @@
+class site_haproxy {
+ $haproxy = hiera('haproxy')
+
+ class { 'haproxy':
+ enable => true,
+ manage_service => true,
+ global_options => {
+ 'log' => '127.0.0.1 local0',
+ 'maxconn' => '4096',
+ 'stats' => 'socket /var/run/haproxy.sock user haproxy group haproxy',
+ 'chroot' => '/usr/share/haproxy',
+ 'user' => 'haproxy',
+ 'group' => 'haproxy',
+ 'daemon' => ''
+ },
+ defaults_options => {
+ 'log' => 'global',
+ 'retries' => '3',
+ 'option' => 'redispatch',
+ 'timeout connect' => '4000',
+ 'timeout client' => '20000',
+ 'timeout server' => '20000'
+ }
+ }
+
+ # monitor haproxy
+ concat::fragment { 'stats':
+ target => '/etc/haproxy/haproxy.cfg',
+ order => '90',
+ source => 'puppet:///modules/site_haproxy/haproxy-stats.cfg';
+ }
+
+ # Template uses $haproxy
+ concat::fragment { 'leap_haproxy_webapp_couchdb':
+ target => '/etc/haproxy/haproxy.cfg',
+ order => '20',
+ content => template('site_haproxy/haproxy.cfg.erb'),
+ }
+
+ include site_check_mk::agent::haproxy
+}
diff --git a/puppet/modules/site_haproxy/templates/couch.erb b/puppet/modules/site_haproxy/templates/couch.erb
new file mode 100644
index 00000000..f42e8368
--- /dev/null
+++ b/puppet/modules/site_haproxy/templates/couch.erb
@@ -0,0 +1,32 @@
+frontend couch
+ bind localhost:<%= @listen_port %>
+ mode http
+ option httplog
+ option dontlognull
+ option http-server-close # use client keep-alive, but close server connection.
+ use_backend couch_read if METH_GET
+ default_backend couch_write
+
+backend couch_write
+ mode http
+ balance roundrobin
+ option httpchk GET / # health check using simple get to root
+ option allbackups # balance among all backups, not just one.
+ default-server inter 3000 fastinter 1000 downinter 1000 rise 2 fall 1
+<%- @servers.sort.each do |name,server| -%>
+<%- next unless server['writable'] -%>
+ # <%=name%>
+ server couchdb_<%=server['port']%> <%=server['host']%>:<%=server['port']%> <%='backup' if server['backup']%> weight <%=server['weight']%> check
+<%- end -%>
+
+backend couch_read
+ mode http
+ balance roundrobin
+ option httpchk GET / # health check using simple get to root
+ option allbackups # balance among all backups, not just one.
+ default-server inter 3000 fastinter 1000 downinter 1000 rise 2 fall 1
+<%- @servers.sort.each do |name,server| -%>
+ # <%=name%>
+ server couchdb_<%=server['port']%> <%=server['host']%>:<%=server['port']%> <%='backup' if server['backup']%> weight <%=server['weight']%> check
+<%- end -%>
+
diff --git a/puppet/modules/site_haproxy/templates/haproxy.cfg.erb b/puppet/modules/site_haproxy/templates/haproxy.cfg.erb
new file mode 100644
index 00000000..8311b1a5
--- /dev/null
+++ b/puppet/modules/site_haproxy/templates/haproxy.cfg.erb
@@ -0,0 +1,11 @@
+<%- @haproxy.each do |frontend, options| -%>
+<%- if options['servers'] -%>
+
+##
+## <%= frontend %>
+##
+
+<%= scope.function_templatewlv(["site_haproxy/#{frontend}.erb", options]) %>
+<%- end -%>
+<%- end -%>
+
diff --git a/puppet/modules/site_mx/manifests/init.pp b/puppet/modules/site_mx/manifests/init.pp
new file mode 100644
index 00000000..a9b0198b
--- /dev/null
+++ b/puppet/modules/site_mx/manifests/init.pp
@@ -0,0 +1,20 @@
+class site_mx {
+ tag 'leap_service'
+ Class['site_config::default'] -> Class['site_mx']
+
+ include site_config::default
+ include site_config::x509::cert
+ include site_config::x509::key
+ include site_config::x509::ca
+ include site_config::x509::client_ca::ca
+ include site_config::x509::client_ca::key
+
+ include site_stunnel
+
+ include site_postfix::mx
+ include site_haproxy
+ include site_shorewall::mx
+ include site_shorewall::service::smtp
+ include leap_mx
+ include site_check_mk::agent::mx
+}
diff --git a/puppet/modules/site_nagios/files/configs/Debian/nagios.cfg b/puppet/modules/site_nagios/files/configs/Debian/nagios.cfg
new file mode 100644
index 00000000..62f26f2c
--- /dev/null
+++ b/puppet/modules/site_nagios/files/configs/Debian/nagios.cfg
@@ -0,0 +1,1302 @@
+##############################################################################
+#
+# NAGIOS.CFG - Sample Main Config File for Nagios
+#
+#
+##############################################################################
+
+
+# LOG FILE
+# This is the main log file where service and host events are logged
+# for historical purposes. This should be the first option specified
+# in the config file!!!
+
+log_file=/var/log/nagios3/nagios.log
+
+
+
+# OBJECT CONFIGURATION FILE(S)
+# These are the object configuration files in which you define hosts,
+# host groups, contacts, contact groups, services, etc.
+# You can split your object definitions across several config files
+# if you wish (as shown below), or keep them all in a single config file.
+#cfg_file=/etc/nagios3/commands.cfg
+
+# Check_mk configuration files
+cfg_dir=/etc/nagios3/conf.d/check_mk
+cfg_dir=/etc/nagios3/local
+
+# Puppet-managed configuration files
+cfg_file=/etc/nagios3/nagios_templates.cfg
+cfg_file=/etc/nagios3/nagios_command.cfg
+cfg_file=/etc/nagios3/nagios_contact.cfg
+cfg_file=/etc/nagios3/nagios_contactgroup.cfg
+cfg_file=/etc/nagios3/nagios_host.cfg
+cfg_file=/etc/nagios3/nagios_hostdependency.cfg
+cfg_file=/etc/nagios3/nagios_hostescalation.cfg
+cfg_file=/etc/nagios3/nagios_hostextinfo.cfg
+cfg_file=/etc/nagios3/nagios_hostgroup.cfg
+cfg_file=/etc/nagios3/nagios_hostgroupescalation.cfg
+cfg_file=/etc/nagios3/nagios_service.cfg
+cfg_file=/etc/nagios3/nagios_servicedependency.cfg
+cfg_file=/etc/nagios3/nagios_serviceescalation.cfg
+cfg_file=/etc/nagios3/nagios_serviceextinfo.cfg
+cfg_file=/etc/nagios3/nagios_servicegroup.cfg
+cfg_file=/etc/nagios3/nagios_timeperiod.cfg
+
+# Debian also defaults to using the check commands defined by the debian
+# nagios-plugins package
+cfg_dir=/etc/nagios-plugins/config
+
+
+# OBJECT CACHE FILE
+# This option determines where object definitions are cached when
+# Nagios starts/restarts. The CGIs read object definitions from
+# this cache file (rather than looking at the object config files
+# directly) in order to prevent inconsistencies that can occur
+# when the config files are modified after Nagios starts.
+
+object_cache_file=/var/cache/nagios3/objects.cache
+
+
+
+# PRE-CACHED OBJECT FILE
+# This options determines the location of the precached object file.
+# If you run Nagios with the -p command line option, it will preprocess
+# your object configuration file(s) and write the cached config to this
+# file. You can then start Nagios with the -u option to have it read
+# object definitions from this precached file, rather than the standard
+# object configuration files (see the cfg_file and cfg_dir options above).
+# Using a precached object file can speed up the time needed to (re)start
+# the Nagios process if you've got a large and/or complex configuration.
+# Read the documentation section on optimizing Nagios to find our more
+# about how this feature works.
+
+precached_object_file=/var/lib/nagios3/objects.precache
+
+
+
+# RESOURCE FILE
+# This is an optional resource file that contains $USERx$ macro
+# definitions. Multiple resource files can be specified by using
+# multiple resource_file definitions. The CGIs will not attempt to
+# read the contents of resource files, so information that is
+# considered to be sensitive (usernames, passwords, etc) can be
+# defined as macros in this file and restrictive permissions (600)
+# can be placed on this file.
+
+resource_file=/etc/nagios3/resource.cfg
+
+
+
+# STATUS FILE
+# This is where the current status of all monitored services and
+# hosts is stored. Its contents are read and processed by the CGIs.
+# The contents of the status file are deleted every time Nagios
+# restarts.
+
+status_file=/var/cache/nagios3/status.dat
+
+
+
+# STATUS FILE UPDATE INTERVAL
+# This option determines the frequency (in seconds) that
+# Nagios will periodically dump program, host, and
+# service status data.
+
+status_update_interval=10
+
+
+
+# NAGIOS USER
+# This determines the effective user that Nagios should run as.
+# You can either supply a username or a UID.
+
+nagios_user=nagios
+
+
+
+# NAGIOS GROUP
+# This determines the effective group that Nagios should run as.
+# You can either supply a group name or a GID.
+
+nagios_group=nagios
+
+
+
+# EXTERNAL COMMAND OPTION
+# This option allows you to specify whether or not Nagios should check
+# for external commands (in the command file defined below). By default
+# Nagios will *not* check for external commands, just to be on the
+# cautious side. If you want to be able to use the CGI command interface
+# you will have to enable this.
+# Values: 0 = disable commands, 1 = enable commands
+
+check_external_commands=1
+
+
+
+# EXTERNAL COMMAND CHECK INTERVAL
+# This is the interval at which Nagios should check for external commands.
+# This value works of the interval_length you specify later. If you leave
+# that at its default value of 60 (seconds), a value of 1 here will cause
+# Nagios to check for external commands every minute. If you specify a
+# number followed by an "s" (i.e. 15s), this will be interpreted to mean
+# actual seconds rather than a multiple of the interval_length variable.
+# Note: In addition to reading the external command file at regularly
+# scheduled intervals, Nagios will also check for external commands after
+# event handlers are executed.
+# NOTE: Setting this value to -1 causes Nagios to check the external
+# command file as often as possible.
+
+#command_check_interval=15s
+command_check_interval=-1
+
+
+
+# EXTERNAL COMMAND FILE
+# This is the file that Nagios checks for external command requests.
+# It is also where the command CGI will write commands that are submitted
+# by users, so it must be writeable by the user that the web server
+# is running as (usually 'nobody'). Permissions should be set at the
+# directory level instead of on the file, as the file is deleted every
+# time its contents are processed.
+# Debian Users: In case you didn't read README.Debian yet, _NOW_ is the
+# time to do it.
+
+command_file=/var/lib/nagios3/rw/nagios.cmd
+
+
+
+# EXTERNAL COMMAND BUFFER SLOTS
+# This settings is used to tweak the number of items or "slots" that
+# the Nagios daemon should allocate to the buffer that holds incoming
+# external commands before they are processed. As external commands
+# are processed by the daemon, they are removed from the buffer.
+
+external_command_buffer_slots=4096
+
+
+
+# LOCK FILE
+# This is the lockfile that Nagios will use to store its PID number
+# in when it is running in daemon mode.
+
+lock_file=/var/run/nagios3/nagios3.pid
+
+
+
+# TEMP FILE
+# This is a temporary file that is used as scratch space when Nagios
+# updates the status log, cleans the comment file, etc. This file
+# is created, used, and deleted throughout the time that Nagios is
+# running.
+
+temp_file=/var/cache/nagios3/nagios.tmp
+
+
+
+# TEMP PATH
+# This is path where Nagios can create temp files for service and
+# host check results, etc.
+
+temp_path=/tmp
+
+
+
+# EVENT BROKER OPTIONS
+# Controls what (if any) data gets sent to the event broker.
+# Values: 0 = Broker nothing
+# -1 = Broker everything
+# <other> = See documentation
+
+event_broker_options=-1
+
+
+
+# EVENT BROKER MODULE(S)
+# This directive is used to specify an event broker module that should
+# by loaded by Nagios at startup. Use multiple directives if you want
+# to load more than one module. Arguments that should be passed to
+# the module at startup are seperated from the module path by a space.
+#
+#!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!
+# WARNING !!! WARNING !!! WARNING !!! WARNING !!! WARNING !!! WARNING
+#!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!
+#
+# Do NOT overwrite modules while they are being used by Nagios or Nagios
+# will crash in a fiery display of SEGFAULT glory. This is a bug/limitation
+# either in dlopen(), the kernel, and/or the filesystem. And maybe Nagios...
+#
+# The correct/safe way of updating a module is by using one of these methods:
+# 1. Shutdown Nagios, replace the module file, restart Nagios
+# 2. Delete the original module file, move the new module file into place, restart Nagios
+#
+# Example:
+#
+# broker_module=<modulepath> [moduleargs]
+
+#broker_module=/somewhere/module1.o
+#broker_module=/somewhere/module2.o arg1 arg2=3 debug=0
+
+
+
+# LOG ROTATION METHOD
+# This is the log rotation method that Nagios should use to rotate
+# the main log file. Values are as follows..
+# n = None - don't rotate the log
+# h = Hourly rotation (top of the hour)
+# d = Daily rotation (midnight every day)
+# w = Weekly rotation (midnight on Saturday evening)
+# m = Monthly rotation (midnight last day of month)
+
+log_rotation_method=n
+
+
+
+# LOG ARCHIVE PATH
+# This is the directory where archived (rotated) log files should be
+# placed (assuming you've chosen to do log rotation).
+
+log_archive_path=/var/log/nagios3/archives
+
+
+
+# LOGGING OPTIONS
+# If you want messages logged to the syslog facility, as well as the
+# Nagios log file set this option to 1. If not, set it to 0.
+
+use_syslog=0
+
+
+
+# NOTIFICATION LOGGING OPTION
+# If you don't want notifications to be logged, set this value to 0.
+# If notifications should be logged, set the value to 1.
+
+log_notifications=1
+
+
+
+# SERVICE RETRY LOGGING OPTION
+# If you don't want service check retries to be logged, set this value
+# to 0. If retries should be logged, set the value to 1.
+
+log_service_retries=1
+
+
+
+# HOST RETRY LOGGING OPTION
+# If you don't want host check retries to be logged, set this value to
+# 0. If retries should be logged, set the value to 1.
+
+log_host_retries=1
+
+
+
+# EVENT HANDLER LOGGING OPTION
+# If you don't want host and service event handlers to be logged, set
+# this value to 0. If event handlers should be logged, set the value
+# to 1.
+
+log_event_handlers=1
+
+
+
+# INITIAL STATES LOGGING OPTION
+# If you want Nagios to log all initial host and service states to
+# the main log file (the first time the service or host is checked)
+# you can enable this option by setting this value to 1. If you
+# are not using an external application that does long term state
+# statistics reporting, you do not need to enable this option. In
+# this case, set the value to 0.
+
+log_initial_states=0
+
+
+
+# EXTERNAL COMMANDS LOGGING OPTION
+# If you don't want Nagios to log external commands, set this value
+# to 0. If external commands should be logged, set this value to 1.
+# Note: This option does not include logging of passive service
+# checks - see the option below for controlling whether or not
+# passive checks are logged.
+
+log_external_commands=1
+
+
+
+# PASSIVE CHECKS LOGGING OPTION
+# If you don't want Nagios to log passive host and service checks, set
+# this value to 0. If passive checks should be logged, set
+# this value to 1.
+
+log_passive_checks=1
+
+
+
+# GLOBAL HOST AND SERVICE EVENT HANDLERS
+# These options allow you to specify a host and service event handler
+# command that is to be run for every host or service state change.
+# The global event handler is executed immediately prior to the event
+# handler that you have optionally specified in each host or
+# service definition. The command argument is the short name of a
+# command definition that you define in your host configuration file.
+# Read the HTML docs for more information.
+
+#global_host_event_handler=somecommand
+#global_service_event_handler=somecommand
+
+
+
+# SERVICE INTER-CHECK DELAY METHOD
+# This is the method that Nagios should use when initially
+# "spreading out" service checks when it starts monitoring. The
+# default is to use smart delay calculation, which will try to
+# space all service checks out evenly to minimize CPU load.
+# Using the dumb setting will cause all checks to be scheduled
+# at the same time (with no delay between them)! This is not a
+# good thing for production, but is useful when testing the
+# parallelization functionality.
+# n = None - don't use any delay between checks
+# d = Use a "dumb" delay of 1 second between checks
+# s = Use "smart" inter-check delay calculation
+# x.xx = Use an inter-check delay of x.xx seconds
+
+service_inter_check_delay_method=s
+
+
+
+# MAXIMUM SERVICE CHECK SPREAD
+# This variable determines the timeframe (in minutes) from the
+# program start time that an initial check of all services should
+# be completed. Default is 30 minutes.
+
+max_service_check_spread=30
+
+
+
+# SERVICE CHECK INTERLEAVE FACTOR
+# This variable determines how service checks are interleaved.
+# Interleaving the service checks allows for a more even
+# distribution of service checks and reduced load on remote
+# hosts. Setting this value to 1 is equivalent to how versions
+# of Nagios previous to 0.0.5 did service checks. Set this
+# value to s (smart) for automatic calculation of the interleave
+# factor unless you have a specific reason to change it.
+# s = Use "smart" interleave factor calculation
+# x = Use an interleave factor of x, where x is a
+# number greater than or equal to 1.
+
+service_interleave_factor=s
+
+
+
+# HOST INTER-CHECK DELAY METHOD
+# This is the method that Nagios should use when initially
+# "spreading out" host checks when it starts monitoring. The
+# default is to use smart delay calculation, which will try to
+# space all host checks out evenly to minimize CPU load.
+# Using the dumb setting will cause all checks to be scheduled
+# at the same time (with no delay between them)!
+# n = None - don't use any delay between checks
+# d = Use a "dumb" delay of 1 second between checks
+# s = Use "smart" inter-check delay calculation
+# x.xx = Use an inter-check delay of x.xx seconds
+
+host_inter_check_delay_method=s
+
+
+
+# MAXIMUM HOST CHECK SPREAD
+# This variable determines the timeframe (in minutes) from the
+# program start time that an initial check of all hosts should
+# be completed. Default is 30 minutes.
+
+max_host_check_spread=30
+
+
+
+# MAXIMUM CONCURRENT SERVICE CHECKS
+# This option allows you to specify the maximum number of
+# service checks that can be run in parallel at any given time.
+# Specifying a value of 1 for this variable essentially prevents
+# any service checks from being parallelized. A value of 0
+# will not restrict the number of concurrent checks that are
+# being executed.
+
+max_concurrent_checks=0
+
+
+
+# HOST AND SERVICE CHECK REAPER FREQUENCY
+# This is the frequency (in seconds!) that Nagios will process
+# the results of host and service checks.
+
+check_result_reaper_frequency=10
+
+
+
+
+# MAX CHECK RESULT REAPER TIME
+# This is the max amount of time (in seconds) that a single
+# check result reaper event will be allowed to run before
+# returning control back to Nagios so it can perform other
+# duties.
+
+max_check_result_reaper_time=30
+
+
+
+
+# CHECK RESULT PATH
+# This is directory where Nagios stores the results of host and
+# service checks that have not yet been processed.
+#
+# Note: Make sure that only one instance of Nagios has access
+# to this directory!
+
+check_result_path=/var/lib/nagios3/spool/checkresults
+
+
+
+
+# MAX CHECK RESULT FILE AGE
+# This option determines the maximum age (in seconds) which check
+# result files are considered to be valid. Files older than this
+# threshold will be mercilessly deleted without further processing.
+
+max_check_result_file_age=3600
+
+
+
+
+# CACHED HOST CHECK HORIZON
+# This option determines the maximum amount of time (in seconds)
+# that the state of a previous host check is considered current.
+# Cached host states (from host checks that were performed more
+# recently that the timeframe specified by this value) can immensely
+# improve performance in regards to the host check logic.
+# Too high of a value for this option may result in inaccurate host
+# states being used by Nagios, while a lower value may result in a
+# performance hit for host checks. Use a value of 0 to disable host
+# check caching.
+
+cached_host_check_horizon=15
+
+
+
+# CACHED SERVICE CHECK HORIZON
+# This option determines the maximum amount of time (in seconds)
+# that the state of a previous service check is considered current.
+# Cached service states (from service checks that were performed more
+# recently that the timeframe specified by this value) can immensely
+# improve performance in regards to predictive dependency checks.
+# Use a value of 0 to disable service check caching.
+
+cached_service_check_horizon=15
+
+
+
+# ENABLE PREDICTIVE HOST DEPENDENCY CHECKS
+# This option determines whether or not Nagios will attempt to execute
+# checks of hosts when it predicts that future dependency logic test
+# may be needed. These predictive checks can help ensure that your
+# host dependency logic works well.
+# Values:
+# 0 = Disable predictive checks
+# 1 = Enable predictive checks (default)
+
+enable_predictive_host_dependency_checks=1
+
+
+
+# ENABLE PREDICTIVE SERVICE DEPENDENCY CHECKS
+# This option determines whether or not Nagios will attempt to execute
+# checks of service when it predicts that future dependency logic test
+# may be needed. These predictive checks can help ensure that your
+# service dependency logic works well.
+# Values:
+# 0 = Disable predictive checks
+# 1 = Enable predictive checks (default)
+
+enable_predictive_service_dependency_checks=1
+
+
+
+# SOFT STATE DEPENDENCIES
+# This option determines whether or not Nagios will use soft state
+# information when checking host and service dependencies. Normally
+# Nagios will only use the latest hard host or service state when
+# checking dependencies. If you want it to use the latest state (regardless
+# of whether its a soft or hard state type), enable this option.
+# Values:
+# 0 = Don't use soft state dependencies (default)
+# 1 = Use soft state dependencies
+
+soft_state_dependencies=0
+
+
+
+# TIME CHANGE ADJUSTMENT THRESHOLDS
+# These options determine when Nagios will react to detected changes
+# in system time (either forward or backwards).
+
+#time_change_threshold=900
+
+
+
+# AUTO-RESCHEDULING OPTION
+# This option determines whether or not Nagios will attempt to
+# automatically reschedule active host and service checks to
+# "smooth" them out over time. This can help balance the load on
+# the monitoring server.
+# WARNING: THIS IS AN EXPERIMENTAL FEATURE - IT CAN DEGRADE
+# PERFORMANCE, RATHER THAN INCREASE IT, IF USED IMPROPERLY
+
+auto_reschedule_checks=0
+
+
+
+# AUTO-RESCHEDULING INTERVAL
+# This option determines how often (in seconds) Nagios will
+# attempt to automatically reschedule checks. This option only
+# has an effect if the auto_reschedule_checks option is enabled.
+# Default is 30 seconds.
+# WARNING: THIS IS AN EXPERIMENTAL FEATURE - IT CAN DEGRADE
+# PERFORMANCE, RATHER THAN INCREASE IT, IF USED IMPROPERLY
+
+auto_rescheduling_interval=30
+
+
+
+# AUTO-RESCHEDULING WINDOW
+# This option determines the "window" of time (in seconds) that
+# Nagios will look at when automatically rescheduling checks.
+# Only host and service checks that occur in the next X seconds
+# (determined by this variable) will be rescheduled. This option
+# only has an effect if the auto_reschedule_checks option is
+# enabled. Default is 180 seconds (3 minutes).
+# WARNING: THIS IS AN EXPERIMENTAL FEATURE - IT CAN DEGRADE
+# PERFORMANCE, RATHER THAN INCREASE IT, IF USED IMPROPERLY
+
+auto_rescheduling_window=180
+
+
+
+# SLEEP TIME
+# This is the number of seconds to sleep between checking for system
+# events and service checks that need to be run.
+
+sleep_time=0.25
+
+
+
+# TIMEOUT VALUES
+# These options control how much time Nagios will allow various
+# types of commands to execute before killing them off. Options
+# are available for controlling maximum time allotted for
+# service checks, host checks, event handlers, notifications, the
+# ocsp command, and performance data commands. All values are in
+# seconds.
+
+service_check_timeout=60
+host_check_timeout=30
+event_handler_timeout=30
+notification_timeout=30
+ocsp_timeout=5
+perfdata_timeout=5
+
+
+
+# RETAIN STATE INFORMATION
+# This setting determines whether or not Nagios will save state
+# information for services and hosts before it shuts down. Upon
+# startup Nagios will reload all saved service and host state
+# information before starting to monitor. This is useful for
+# maintaining long-term data on state statistics, etc, but will
+# slow Nagios down a bit when it (re)starts. Since its only
+# a one-time penalty, I think its well worth the additional
+# startup delay.
+
+retain_state_information=1
+
+
+
+# STATE RETENTION FILE
+# This is the file that Nagios should use to store host and
+# service state information before it shuts down. The state
+# information in this file is also read immediately prior to
+# starting to monitor the network when Nagios is restarted.
+# This file is used only if the preserve_state_information
+# variable is set to 1.
+
+state_retention_file=/var/lib/nagios3/retention.dat
+
+
+
+# RETENTION DATA UPDATE INTERVAL
+# This setting determines how often (in minutes) that Nagios
+# will automatically save retention data during normal operation.
+# If you set this value to 0, Nagios will not save retention
+# data at regular interval, but it will still save retention
+# data before shutting down or restarting. If you have disabled
+# state retention, this option has no effect.
+
+retention_update_interval=60
+
+
+
+# USE RETAINED PROGRAM STATE
+# This setting determines whether or not Nagios will set
+# program status variables based on the values saved in the
+# retention file. If you want to use retained program status
+# information, set this value to 1. If not, set this value
+# to 0.
+
+use_retained_program_state=1
+
+
+
+# USE RETAINED SCHEDULING INFO
+# This setting determines whether or not Nagios will retain
+# the scheduling info (next check time) for hosts and services
+# based on the values saved in the retention file. If you
+# If you want to use retained scheduling info, set this
+# value to 1. If not, set this value to 0.
+
+use_retained_scheduling_info=1
+
+
+
+# RETAINED ATTRIBUTE MASKS (ADVANCED FEATURE)
+# The following variables are used to specify specific host and
+# service attributes that should *not* be retained by Nagios during
+# program restarts.
+#
+# The values of the masks are bitwise ANDs of values specified
+# by the "MODATTR_" definitions found in include/common.h.
+# For example, if you do not want the current enabled/disabled state
+# of flap detection and event handlers for hosts to be retained, you
+# would use a value of 24 for the host attribute mask...
+# MODATTR_EVENT_HANDLER_ENABLED (8) + MODATTR_FLAP_DETECTION_ENABLED (16) = 24
+
+# This mask determines what host attributes are not retained
+retained_host_attribute_mask=0
+
+# This mask determines what service attributes are not retained
+retained_service_attribute_mask=0
+
+# These two masks determine what process attributes are not retained.
+# There are two masks, because some process attributes have host and service
+# options. For example, you can disable active host checks, but leave active
+# service checks enabled.
+retained_process_host_attribute_mask=0
+retained_process_service_attribute_mask=0
+
+# These two masks determine what contact attributes are not retained.
+# There are two masks, because some contact attributes have host and
+# service options. For example, you can disable host notifications for
+# a contact, but leave service notifications enabled for them.
+retained_contact_host_attribute_mask=0
+retained_contact_service_attribute_mask=0
+
+
+
+# INTERVAL LENGTH
+# This is the seconds per unit interval as used in the
+# host/contact/service configuration files. Setting this to 60 means
+# that each interval is one minute long (60 seconds). Other settings
+# have not been tested much, so your mileage is likely to vary...
+
+interval_length=60
+
+
+
+# AGGRESSIVE HOST CHECKING OPTION
+# If you don't want to turn on aggressive host checking features, set
+# this value to 0 (the default). Otherwise set this value to 1 to
+# enable the aggressive check option. Read the docs for more info
+# on what aggressive host check is or check out the source code in
+# base/checks.c
+
+use_aggressive_host_checking=0
+
+
+
+# SERVICE CHECK EXECUTION OPTION
+# This determines whether or not Nagios will actively execute
+# service checks when it initially starts. If this option is
+# disabled, checks are not actively made, but Nagios can still
+# receive and process passive check results that come in. Unless
+# you're implementing redundant hosts or have a special need for
+# disabling the execution of service checks, leave this enabled!
+# Values: 1 = enable checks, 0 = disable checks
+
+execute_service_checks=1
+
+
+
+# PASSIVE SERVICE CHECK ACCEPTANCE OPTION
+# This determines whether or not Nagios will accept passive
+# service checks results when it initially (re)starts.
+# Values: 1 = accept passive checks, 0 = reject passive checks
+
+accept_passive_service_checks=1
+
+
+
+# HOST CHECK EXECUTION OPTION
+# This determines whether or not Nagios will actively execute
+# host checks when it initially starts. If this option is
+# disabled, checks are not actively made, but Nagios can still
+# receive and process passive check results that come in. Unless
+# you're implementing redundant hosts or have a special need for
+# disabling the execution of host checks, leave this enabled!
+# Values: 1 = enable checks, 0 = disable checks
+
+execute_host_checks=1
+
+
+
+# PASSIVE HOST CHECK ACCEPTANCE OPTION
+# This determines whether or not Nagios will accept passive
+# host checks results when it initially (re)starts.
+# Values: 1 = accept passive checks, 0 = reject passive checks
+
+accept_passive_host_checks=1
+
+
+
+# NOTIFICATIONS OPTION
+# This determines whether or not Nagios will sent out any host or
+# service notifications when it is initially (re)started.
+# Values: 1 = enable notifications, 0 = disable notifications
+
+enable_notifications=1
+
+
+
+# EVENT HANDLER USE OPTION
+# This determines whether or not Nagios will run any host or
+# service event handlers when it is initially (re)started. Unless
+# you're implementing redundant hosts, leave this option enabled.
+# Values: 1 = enable event handlers, 0 = disable event handlers
+
+enable_event_handlers=1
+
+
+
+# PROCESS PERFORMANCE DATA OPTION
+# This determines whether or not Nagios will process performance
+# data returned from service and host checks. If this option is
+# enabled, host performance data will be processed using the
+# host_perfdata_command (defined below) and service performance
+# data will be processed using the service_perfdata_command (also
+# defined below). Read the HTML docs for more information on
+# performance data.
+# Values: 1 = process performance data, 0 = do not process performance data
+
+process_performance_data=0
+
+
+
+# HOST AND SERVICE PERFORMANCE DATA PROCESSING COMMANDS
+# These commands are run after every host and service check is
+# performed. These commands are executed only if the
+# enable_performance_data option (above) is set to 1. The command
+# argument is the short name of a command definition that you
+# define in your host configuration file. Read the HTML docs for
+# more information on performance data.
+
+#host_perfdata_command=process-host-perfdata
+#service_perfdata_command=process-service-perfdata
+
+
+
+# HOST AND SERVICE PERFORMANCE DATA FILES
+# These files are used to store host and service performance data.
+# Performance data is only written to these files if the
+# enable_performance_data option (above) is set to 1.
+
+#host_perfdata_file=/tmp/host-perfdata
+#service_perfdata_file=/tmp/service-perfdata
+
+
+
+# HOST AND SERVICE PERFORMANCE DATA FILE TEMPLATES
+# These options determine what data is written (and how) to the
+# performance data files. The templates may contain macros, special
+# characters (\t for tab, \r for carriage return, \n for newline)
+# and plain text. A newline is automatically added after each write
+# to the performance data file. Some examples of what you can do are
+# shown below.
+
+#host_perfdata_file_template=[HOSTPERFDATA]\t$TIMET$\t$HOSTNAME$\t$HOSTEXECUTIONTIME$\t$HOSTOUTPUT$\t$HOSTPERFDATA$
+#service_perfdata_file_template=[SERVICEPERFDATA]\t$TIMET$\t$HOSTNAME$\t$SERVICEDESC$\t$SERVICEEXECUTIONTIME$\t$SERVICELATENCY$\t$SERVICEOUTPUT$\t$SERVICEPERFDATA$
+
+
+
+# HOST AND SERVICE PERFORMANCE DATA FILE MODES
+# This option determines whether or not the host and service
+# performance data files are opened in write ("w") or append ("a")
+# mode. If you want to use named pipes, you should use the special
+# pipe ("p") mode which avoid blocking at startup, otherwise you will
+# likely want the defult append ("a") mode.
+
+#host_perfdata_file_mode=a
+#service_perfdata_file_mode=a
+
+
+
+# HOST AND SERVICE PERFORMANCE DATA FILE PROCESSING INTERVAL
+# These options determine how often (in seconds) the host and service
+# performance data files are processed using the commands defined
+# below. A value of 0 indicates the files should not be periodically
+# processed.
+
+#host_perfdata_file_processing_interval=0
+#service_perfdata_file_processing_interval=0
+
+
+
+# HOST AND SERVICE PERFORMANCE DATA FILE PROCESSING COMMANDS
+# These commands are used to periodically process the host and
+# service performance data files. The interval at which the
+# processing occurs is determined by the options above.
+
+#host_perfdata_file_processing_command=process-host-perfdata-file
+#service_perfdata_file_processing_command=process-service-perfdata-file
+
+
+
+# OBSESS OVER SERVICE CHECKS OPTION
+# This determines whether or not Nagios will obsess over service
+# checks and run the ocsp_command defined below. Unless you're
+# planning on implementing distributed monitoring, do not enable
+# this option. Read the HTML docs for more information on
+# implementing distributed monitoring.
+# Values: 1 = obsess over services, 0 = do not obsess (default)
+
+obsess_over_services=0
+
+
+
+# OBSESSIVE COMPULSIVE SERVICE PROCESSOR COMMAND
+# This is the command that is run for every service check that is
+# processed by Nagios. This command is executed only if the
+# obsess_over_services option (above) is set to 1. The command
+# argument is the short name of a command definition that you
+# define in your host configuration file. Read the HTML docs for
+# more information on implementing distributed monitoring.
+
+#ocsp_command=somecommand
+
+
+
+# OBSESS OVER HOST CHECKS OPTION
+# This determines whether or not Nagios will obsess over host
+# checks and run the ochp_command defined below. Unless you're
+# planning on implementing distributed monitoring, do not enable
+# this option. Read the HTML docs for more information on
+# implementing distributed monitoring.
+# Values: 1 = obsess over hosts, 0 = do not obsess (default)
+
+obsess_over_hosts=0
+
+
+
+# OBSESSIVE COMPULSIVE HOST PROCESSOR COMMAND
+# This is the command that is run for every host check that is
+# processed by Nagios. This command is executed only if the
+# obsess_over_hosts option (above) is set to 1. The command
+# argument is the short name of a command definition that you
+# define in your host configuration file. Read the HTML docs for
+# more information on implementing distributed monitoring.
+
+#ochp_command=somecommand
+
+
+
+# TRANSLATE PASSIVE HOST CHECKS OPTION
+# This determines whether or not Nagios will translate
+# DOWN/UNREACHABLE passive host check results into their proper
+# state for this instance of Nagios. This option is useful
+# if you have distributed or failover monitoring setup. In
+# these cases your other Nagios servers probably have a different
+# "view" of the network, with regards to the parent/child relationship
+# of hosts. If a distributed monitoring server thinks a host
+# is DOWN, it may actually be UNREACHABLE from the point of
+# this Nagios instance. Enabling this option will tell Nagios
+# to translate any DOWN or UNREACHABLE host states it receives
+# passively into the correct state from the view of this server.
+# Values: 1 = perform translation, 0 = do not translate (default)
+
+translate_passive_host_checks=0
+
+
+
+# PASSIVE HOST CHECKS ARE SOFT OPTION
+# This determines whether or not Nagios will treat passive host
+# checks as being HARD or SOFT. By default, a passive host check
+# result will put a host into a HARD state type. This can be changed
+# by enabling this option.
+# Values: 0 = passive checks are HARD, 1 = passive checks are SOFT
+
+passive_host_checks_are_soft=0
+
+
+
+# ORPHANED HOST/SERVICE CHECK OPTIONS
+# These options determine whether or not Nagios will periodically
+# check for orphaned host service checks. Since service checks are
+# not rescheduled until the results of their previous execution
+# instance are processed, there exists a possibility that some
+# checks may never get rescheduled. A similar situation exists for
+# host checks, although the exact scheduling details differ a bit
+# from service checks. Orphaned checks seem to be a rare
+# problem and should not happen under normal circumstances.
+# If you have problems with service checks never getting
+# rescheduled, make sure you have orphaned service checks enabled.
+# Values: 1 = enable checks, 0 = disable checks
+
+check_for_orphaned_services=1
+check_for_orphaned_hosts=1
+
+
+
+# SERVICE FRESHNESS CHECK OPTION
+# This option determines whether or not Nagios will periodically
+# check the "freshness" of service results. Enabling this option
+# is useful for ensuring passive checks are received in a timely
+# manner.
+# Values: 1 = enabled freshness checking, 0 = disable freshness checking
+
+check_service_freshness=1
+
+
+
+# SERVICE FRESHNESS CHECK INTERVAL
+# This setting determines how often (in seconds) Nagios will
+# check the "freshness" of service check results. If you have
+# disabled service freshness checking, this option has no effect.
+
+service_freshness_check_interval=60
+
+
+
+# HOST FRESHNESS CHECK OPTION
+# This option determines whether or not Nagios will periodically
+# check the "freshness" of host results. Enabling this option
+# is useful for ensuring passive checks are received in a timely
+# manner.
+# Values: 1 = enabled freshness checking, 0 = disable freshness checking
+
+check_host_freshness=0
+
+
+
+# HOST FRESHNESS CHECK INTERVAL
+# This setting determines how often (in seconds) Nagios will
+# check the "freshness" of host check results. If you have
+# disabled host freshness checking, this option has no effect.
+
+host_freshness_check_interval=60
+
+
+
+
+# ADDITIONAL FRESHNESS THRESHOLD LATENCY
+# This setting determines the number of seconds that Nagios
+# will add to any host and service freshness thresholds that
+# it calculates (those not explicitly specified by the user).
+
+additional_freshness_latency=15
+
+
+
+
+# FLAP DETECTION OPTION
+# This option determines whether or not Nagios will try
+# and detect hosts and services that are "flapping".
+# Flapping occurs when a host or service changes between
+# states too frequently. When Nagios detects that a
+# host or service is flapping, it will temporarily suppress
+# notifications for that host/service until it stops
+# flapping. Flap detection is very experimental, so read
+# the HTML documentation before enabling this feature!
+# Values: 1 = enable flap detection
+# 0 = disable flap detection (default)
+
+enable_flap_detection=1
+
+
+
+# FLAP DETECTION THRESHOLDS FOR HOSTS AND SERVICES
+# Read the HTML documentation on flap detection for
+# an explanation of what this option does. This option
+# has no effect if flap detection is disabled.
+
+low_service_flap_threshold=5.0
+high_service_flap_threshold=20.0
+low_host_flap_threshold=5.0
+high_host_flap_threshold=20.0
+
+
+
+# DATE FORMAT OPTION
+# This option determines how short dates are displayed. Valid options
+# include:
+# us (MM-DD-YYYY HH:MM:SS)
+# euro (DD-MM-YYYY HH:MM:SS)
+# iso8601 (YYYY-MM-DD HH:MM:SS)
+# strict-iso8601 (YYYY-MM-DDTHH:MM:SS)
+#
+
+date_format=iso8601
+
+
+
+
+# TIMEZONE OFFSET
+# This option is used to override the default timezone that this
+# instance of Nagios runs in. If not specified, Nagios will use
+# the system configured timezone.
+#
+# NOTE: In order to display the correct timezone in the CGIs, you
+# will also need to alter the Apache directives for the CGI path
+# to include your timezone. Example:
+#
+# <Directory "/usr/local/nagios/sbin/">
+# SetEnv TZ "Australia/Brisbane"
+# ...
+# </Directory>
+
+#use_timezone=US/Mountain
+#use_timezone=Australia/Brisbane
+
+
+
+
+# P1.PL FILE LOCATION
+# This value determines where the p1.pl perl script (used by the
+# embedded Perl interpreter) is located. If you didn't compile
+# Nagios with embedded Perl support, this option has no effect.
+
+p1_file=/usr/lib/nagios3/p1.pl
+
+
+
+# EMBEDDED PERL INTERPRETER OPTION
+# This option determines whether or not the embedded Perl interpreter
+# will be enabled during runtime. This option has no effect if Nagios
+# has not been compiled with support for embedded Perl.
+# Values: 0 = disable interpreter, 1 = enable interpreter
+
+enable_embedded_perl=1
+
+
+
+# EMBEDDED PERL USAGE OPTION
+# This option determines whether or not Nagios will process Perl plugins
+# and scripts with the embedded Perl interpreter if the plugins/scripts
+# do not explicitly indicate whether or not it is okay to do so. Read
+# the HTML documentation on the embedded Perl interpreter for more
+# information on how this option works.
+
+use_embedded_perl_implicitly=1
+
+
+
+# ILLEGAL OBJECT NAME CHARACTERS
+# This option allows you to specify illegal characters that cannot
+# be used in host names, service descriptions, or names of other
+# object types.
+
+illegal_object_name_chars=`~!$%^&*|'"<>?,()=
+
+
+
+# ILLEGAL MACRO OUTPUT CHARACTERS
+# This option allows you to specify illegal characters that are
+# stripped from macros before being used in notifications, event
+# handlers, etc. This DOES NOT affect macros used in service or
+# host check commands.
+# The following macros are stripped of the characters you specify:
+# $HOSTOUTPUT$
+# $HOSTPERFDATA$
+# $HOSTACKAUTHOR$
+# $HOSTACKCOMMENT$
+# $SERVICEOUTPUT$
+# $SERVICEPERFDATA$
+# $SERVICEACKAUTHOR$
+# $SERVICEACKCOMMENT$
+
+illegal_macro_output_chars=`~$&|'"<>
+
+
+
+# REGULAR EXPRESSION MATCHING
+# This option controls whether or not regular expression matching
+# takes place in the object config files. Regular expression
+# matching is used to match host, hostgroup, service, and service
+# group names/descriptions in some fields of various object types.
+# Values: 1 = enable regexp matching, 0 = disable regexp matching
+
+use_regexp_matching=0
+
+
+
+# "TRUE" REGULAR EXPRESSION MATCHING
+# This option controls whether or not "true" regular expression
+# matching takes place in the object config files. This option
+# only has an effect if regular expression matching is enabled
+# (see above). If this option is DISABLED, regular expression
+# matching only occurs if a string contains wildcard characters
+# (* and ?). If the option is ENABLED, regexp matching occurs
+# all the time (which can be annoying).
+# Values: 1 = enable true matching, 0 = disable true matching
+
+use_true_regexp_matching=0
+
+
+
+# ADMINISTRATOR EMAIL/PAGER ADDRESSES
+# The email and pager address of a global administrator (likely you).
+# Nagios never uses these values itself, but you can access them by
+# using the $ADMINEMAIL$ and $ADMINPAGER$ macros in your notification
+# commands.
+
+admin_email=root@localhost
+admin_pager=pageroot@localhost
+
+
+
+# DAEMON CORE DUMP OPTION
+# This option determines whether or not Nagios is allowed to create
+# a core dump when it runs as a daemon. Note that it is generally
+# considered bad form to allow this, but it may be useful for
+# debugging purposes. Enabling this option doesn't guarantee that
+# a core file will be produced, but that's just life...
+# Values: 1 - Allow core dumps
+# 0 - Do not allow core dumps (default)
+
+daemon_dumps_core=0
+
+
+
+# LARGE INSTALLATION TWEAKS OPTION
+# This option determines whether or not Nagios will take some shortcuts
+# which can save on memory and CPU usage in large Nagios installations.
+# Read the documentation for more information on the benefits/tradeoffs
+# of enabling this option.
+# Values: 1 - Enabled tweaks
+# 0 - Disable tweaks (default)
+
+use_large_installation_tweaks=0
+
+
+
+# ENABLE ENVIRONMENT MACROS
+# This option determines whether or not Nagios will make all standard
+# macros available as environment variables when host/service checks
+# and system commands (event handlers, notifications, etc.) are
+# executed. Enabling this option can cause performance issues in
+# large installations, as it will consume a bit more memory and (more
+# importantly) consume more CPU.
+# Values: 1 - Enable environment variable macros (default)
+# 0 - Disable environment variable macros
+
+enable_environment_macros=1
+
+
+
+# CHILD PROCESS MEMORY OPTION
+# This option determines whether or not Nagios will free memory in
+# child processes (processed used to execute system commands and host/
+# service checks). If you specify a value here, it will override
+# program defaults.
+# Value: 1 - Free memory in child processes
+# 0 - Do not free memory in child processes
+
+#free_child_process_memory=1
+
+
+
+# CHILD PROCESS FORKING BEHAVIOR
+# This option determines how Nagios will fork child processes
+# (used to execute system commands and host/service checks). Normally
+# child processes are fork()ed twice, which provides a very high level
+# of isolation from problems. Fork()ing once is probably enough and will
+# save a great deal on CPU usage (in large installs), so you might
+# want to consider using this. If you specify a value here, it will
+# program defaults.
+# Value: 1 - Child processes fork() twice
+# 0 - Child processes fork() just once
+
+#child_processes_fork_twice=1
+
+
+
+# DEBUG LEVEL
+# This option determines how much (if any) debugging information will
+# be written to the debug file. OR values together to log multiple
+# types of information.
+# Values:
+# -1 = Everything
+# 0 = Nothing
+# 1 = Functions
+# 2 = Configuration
+# 4 = Process information
+# 8 = Scheduled events
+# 16 = Host/service checks
+# 32 = Notifications
+# 64 = Event broker
+# 128 = External commands
+# 256 = Commands
+# 512 = Scheduled downtime
+# 1024 = Comments
+# 2048 = Macros
+
+debug_level=0
+
+
+
+# DEBUG VERBOSITY
+# This option determines how verbose the debug log out will be.
+# Values: 0 = Brief output
+# 1 = More detailed
+# 2 = Very detailed
+
+debug_verbosity=1
+
+
+
+# DEBUG FILE
+# This option determines where Nagios should write debugging information.
+
+debug_file=/var/lib/nagios3/nagios.debug
+
+
+
+# MAX DEBUG FILE SIZE
+# This option determines the maximum size (in bytes) of the debug file. If
+# the file grows larger than this size, it will be renamed with a .old
+# extension. If a file already exists with a .old extension it will
+# automatically be deleted. This helps ensure your disk space usage doesn't
+# get out of control when debugging Nagios.
+
+max_debug_file_size=1000000
+
+process_performance_data=1
+service_perfdata_file=/var/lib/nagios3/service-perfdata
+service_perfdata_file_template=DATATYPE::SERVICEPERFDATA\tTIMET::$TIMET$\tHOSTNAME::$HOSTNAME$\tSERVICEDESC::$SERVICEDESC$\tSERVICEPERFDATA::$SERVICEPERFDATA$\tSERVICECHECKCOMMAND::$SERVICECHECKCOMMAND$\tHOSTSTATE::$HOSTSTATE$\tHOSTSTATETYPE::$HOSTSTATETYPE$\tSERVICESTATE::$SERVICESTATE$\tSERVICESTATETYPE::$SERVICESTATETYPE$
+service_perfdata_file_mode=a
+service_perfdata_file_processing_interval=15
+service_perfdata_file_processing_command=process-service-perfdata-file-pnp4nagios-bulk-npcd
+host_perfdata_file=/var/lib/nagios3/host-perfdata
+host_perfdata_file_template=DATATYPE::HOSTPERFDATA\tTIMET::$TIMET$\tHOSTNAME::$HOSTNAME$\tHOSTPERFDATA::$HOSTPERFDATA$\tHOSTCHECKCOMMAND::$HOSTCHECKCOMMAND$\tHOSTSTATE::$HOSTSTATE$\tHOSTSTATETYPE::$HOSTSTATETYPE$
+host_perfdata_file_mode=a
+host_perfdata_file_processing_interval=15
+host_perfdata_file_processing_command=process-host-perfdata-file-pnp4nagios-bulk-npcd
+
diff --git a/puppet/modules/site_nagios/files/plugins/check_last_regex_in_log b/puppet/modules/site_nagios/files/plugins/check_last_regex_in_log
new file mode 100755
index 00000000..47569388
--- /dev/null
+++ b/puppet/modules/site_nagios/files/plugins/check_last_regex_in_log
@@ -0,0 +1,85 @@
+#!/bin/sh
+#
+# depends on nagios-plugins-common for /usr/lib/nagios/plugins/utils.sh
+# this package is installed using leap_platform by the Site_check_mk::Agent::Mrpe
+# class
+
+set -e
+
+usage()
+{
+cat << EOF
+usage: $0 -w <sec> -c <sec> -r <regexp> -f <filename>
+
+OPTIONS:
+ -h Show this message
+ -r <regex> regex to grep for
+ -f <file> logfile to search in
+ -w <sec> warning state after X seconds
+ -c <sec> critical state after x seconds
+
+example: $0 -f /var/log/syslog -r 'tapicero' -w 300 -c 600
+EOF
+}
+
+
+. /usr/lib/nagios/plugins/utils.sh
+
+
+warn=0
+crit=0
+log=''
+regex=''
+
+set -- $(getopt hr:f:w:c: "$@")
+while [ $# -gt 0 ]
+do
+ case "$1" in
+ (-h) usage; exit 0 ;;
+ (-f) log="$2"; shift;;
+ (-r) regex="$2"; shift;;
+ (-w) warn="$2"; shift;;
+ (-c) crit="$2"; shift;;
+ (--) shift; break;;
+ (-*) echo "$0: error - unrecognized option $1" 1>&2; exit 1;;
+ (*) break;;
+ esac
+ shift
+done
+
+[ $warn -eq 0 -o $crit -eq 0 -o -z "$regex" -o -z "$log" ] && ( usage; exit $STATE_UNKNOWN)
+[ -f "$log" ] || (echo "$log doesn't exist"; exit $STATE_UNKNOWN)
+
+lastmsg=$(tac $log | grep -i $regex | head -1 | sed 's/ / /g' | cut -d' ' -f 1-3)
+
+if [ -z "$lastmsg" ]
+then
+ summary="\"$regex\" in $log was not found"
+ state=$STATE_CRITICAL
+ state_text='CRITICAL'
+ diff_sec=0
+else
+ lastmsg_sec=$(date '+%s' -d "$lastmsg")
+ now_sec=$(date '+%s')
+
+ diff_sec=$(($now_sec - $lastmsg_sec))
+
+ if [ $diff_sec -lt $warn ]; then
+ state=$STATE_OK
+ state_text='OK'
+ elif [ $diff_sec -lt $crit ]; then
+ state=$STATE_WARNING
+ state_text='WARNING'
+ else
+ state=$STATE_CRITICAL
+ state_text='CRITICAL'
+ fi
+
+ summary="Last occurrence of \"$regex\" in $log was $diff_sec sec ago"
+fi
+
+# check_mk_agent output
+# echo "$state Tapicero_Heatbeat sec=$diff_sec;$warn;$crit;0; $state_text - $summary"
+
+echo "${state_text}: $summary | seconds=${diff_sec};$warn;$crit;0;"
+exit $state
diff --git a/puppet/modules/site_nagios/manifests/add_host_services.pp b/puppet/modules/site_nagios/manifests/add_host_services.pp
new file mode 100644
index 00000000..bd968e6f
--- /dev/null
+++ b/puppet/modules/site_nagios/manifests/add_host_services.pp
@@ -0,0 +1,32 @@
+define site_nagios::add_host_services (
+ $domain_full_suffix,
+ $domain_internal,
+ $domain_internal_suffix,
+ $ip_address,
+ $services,
+ $ssh_port,
+ $environment,
+ $openvpn_gateway_address='',
+ ) {
+
+ $nagios_hostname = $domain_internal
+
+ # Add Nagios service
+
+ # First, we need to turn the serice array into hash, using a "hash template"
+ # see https://github.com/ashak/puppet-resource-looping
+ $nagios_service_hashpart = {
+ 'hostname' => $nagios_hostname,
+ 'ip_address' => $ip_address,
+ 'openvpn_gw' => $openvpn_gateway_address,
+ 'environment' => $environment
+ }
+ $dynamic_parameters = {
+ 'service' => '%s'
+ }
+ $nagios_servicename = "${nagios_hostname}_%s"
+
+ $nagios_service_hash = create_resources_hash_from($nagios_servicename, $services, $nagios_service_hashpart, $dynamic_parameters)
+
+ create_resources ( site_nagios::add_service, $nagios_service_hash )
+}
diff --git a/puppet/modules/site_nagios/manifests/add_service.pp b/puppet/modules/site_nagios/manifests/add_service.pp
new file mode 100644
index 00000000..72cd038a
--- /dev/null
+++ b/puppet/modules/site_nagios/manifests/add_service.pp
@@ -0,0 +1,32 @@
+define site_nagios::add_service (
+ $hostname, $ip_address, $service, $environment, $openvpn_gw = '') {
+
+ $ssh = hiera_hash('ssh')
+ $ssh_port = $ssh['port']
+
+ case $service {
+ 'webapp': {
+ nagios_service {
+ "${name}_ssh":
+ use => 'generic-service',
+ check_command => "check_ssh_port!${ssh_port}",
+ service_description => 'SSH',
+ host_name => $hostname,
+ contact_groups => $environment;
+ "${name}_cert":
+ use => 'generic-service',
+ check_command => 'check_https_cert',
+ service_description => 'Website Certificate',
+ host_name => $hostname,
+ contact_groups => $environment;
+ "${name}_website":
+ use => 'generic-service',
+ check_command => 'check_https',
+ service_description => 'Website',
+ host_name => $hostname,
+ contact_groups => $environment;
+ }
+ }
+ default: {}
+ }
+}
diff --git a/puppet/modules/site_nagios/manifests/init.pp b/puppet/modules/site_nagios/manifests/init.pp
new file mode 100644
index 00000000..f91bfc26
--- /dev/null
+++ b/puppet/modules/site_nagios/manifests/init.pp
@@ -0,0 +1,13 @@
+# setup nagios on monitoring node
+class site_nagios {
+ tag 'leap_service'
+
+ include site_config::default
+
+ Class['site_config::default'] -> Class['site_nagios']
+
+ include site_nagios::server
+
+ # remove leftovers on monitoring nodes
+ include site_config::remove::monitoring
+}
diff --git a/puppet/modules/site_nagios/manifests/plugins.pp b/puppet/modules/site_nagios/manifests/plugins.pp
new file mode 100644
index 00000000..90a01cfb
--- /dev/null
+++ b/puppet/modules/site_nagios/manifests/plugins.pp
@@ -0,0 +1,16 @@
+# Deploy generic plugins useful to all nodes
+# nagios::plugin won't work to deploy a plugin
+# because it complains with:
+# Could not find dependency Package[nagios-plugins] …
+# at /srv/leap/puppet/modules/nagios/manifests/plugin.pp:18
+class site_nagios::plugins {
+
+ file { [
+ '/usr/local/lib', '/usr/local/lib/nagios',
+ '/usr/local/lib/nagios/plugins' ]:
+ ensure => directory;
+ '/usr/local/lib/nagios/plugins/check_last_regex_in_log':
+ source => 'puppet:///modules/site_nagios/plugins/check_last_regex_in_log',
+ mode => '0755';
+ }
+}
diff --git a/puppet/modules/site_nagios/manifests/server.pp b/puppet/modules/site_nagios/manifests/server.pp
new file mode 100644
index 00000000..6537124d
--- /dev/null
+++ b/puppet/modules/site_nagios/manifests/server.pp
@@ -0,0 +1,97 @@
+# configures nagios on monitoring node
+# lint:ignore:inherits_across_namespaces
+class site_nagios::server inherits nagios::base {
+# lint:endignore
+
+ $nagios_hiera = hiera('nagios')
+ $nagiosadmin_pw = htpasswd_sha1($nagios_hiera['nagiosadmin_pw'])
+ $nagios_hosts = $nagios_hiera['hosts']
+ $nagios_contacts = hiera('contacts')
+ $environment = $nagios_hiera['environments']
+
+ include nagios::base
+ include nagios::defaults::commands
+ include nagios::defaults::templates
+ include nagios::defaults::timeperiods
+ include nagios::pnp4nagios
+ include nagios::pnp4nagios::popup
+
+ class { 'nagios':
+ # don't manage apache class from nagios, cause we already include
+ # it in site_apache::common
+ httpd => 'absent',
+ allow_external_cmd => true,
+ storeconfigs => false,
+ }
+
+ # Delete nagios config files provided by packages
+ # These don't get parsed by nagios.conf, but are
+ # still irritating duplicates to the real config
+ # files deployed by puppet in /etc/nagios3/
+ file { [
+ '/etc/nagios3/conf.d/contacts_nagios2.cfg',
+ '/etc/nagios3/conf.d/extinfo_nagios2.cfg',
+ '/etc/nagios3/conf.d/generic-host_nagios2.cfg',
+ '/etc/nagios3/conf.d/generic-service_nagios2.cfg',
+ '/etc/nagios3/conf.d/hostgroups_nagios2.cfg',
+ '/etc/nagios3/conf.d/localhost_nagios2.cfg',
+ '/etc/nagios3/conf.d/pnp4nagios.cfg',
+ '/etc/nagios3/conf.d/services_nagios2.cfg',
+ '/etc/nagios3/conf.d/timeperiods_nagios2.cfg' ]:
+ ensure => absent;
+ }
+
+ # deploy apache nagios3 config
+ # until https://gitlab.com/shared-puppet-modules-group/apache/issues/11
+ # is not fixed, we need to manually deploy the config file
+ file {
+ '/etc/apache2/conf-available/nagios3.conf':
+ ensure => present,
+ source => 'puppet:///modules/nagios/configs/apache2.conf',
+ require => [ Package['nagios3'], Package['apache2'] ];
+ '/etc/apache2/conf-enabled/nagios3.conf':
+ ensure => link,
+ target => '/etc/apache2/conf-available/nagios3.conf',
+ require => [ Package['nagios3'], Package['apache2'] ];
+ }
+
+ include site_apache::common
+ include site_webapp::common_vhost
+ include apache::module::headers
+
+ File['nagios_htpasswd'] {
+ source => undef,
+ content => "nagiosadmin:${nagiosadmin_pw}",
+ mode => '0640',
+ }
+
+
+ # deploy serverside plugins
+ file { '/usr/lib/nagios/plugins/check_openvpn_server.pl':
+ source => 'puppet:///modules/nagios/plugins/check_openvpn_server.pl',
+ mode => '0755',
+ owner => 'nagios',
+ group => 'nagios',
+ require => Package['nagios-plugins'];
+ }
+
+ create_resources ( site_nagios::add_host_services, $nagios_hosts )
+
+ include site_nagios::server::apache
+ include site_check_mk::server
+ include site_shorewall::monitor
+ include site_nagios::server::icli
+
+ augeas {
+ 'logrotate_nagios':
+ context => '/files/etc/logrotate.d/nagios/rule',
+ changes => [ 'set file /var/log/nagios3/nagios.log', 'set rotate 7',
+ 'set schedule daily', 'set compress compress',
+ 'set missingok missingok', 'set ifempty notifempty',
+ 'set copytruncate copytruncate' ]
+ }
+
+ create_resources ( site_nagios::server::hostgroup, $environment )
+ create_resources ( site_nagios::server::contactgroup, $environment )
+ create_resources ( site_nagios::server::add_contacts, $environment )
+}
diff --git a/puppet/modules/site_nagios/manifests/server/add_contacts.pp b/puppet/modules/site_nagios/manifests/server/add_contacts.pp
new file mode 100644
index 00000000..b5c6f0a5
--- /dev/null
+++ b/puppet/modules/site_nagios/manifests/server/add_contacts.pp
@@ -0,0 +1,18 @@
+# configure a nagios_contact
+define site_nagios::server::add_contacts ($contact_emails) {
+
+ $environment = $name
+
+ nagios_contact {
+ $environment:
+ alias => $environment,
+ service_notification_period => '24x7',
+ host_notification_period => '24x7',
+ service_notification_options => 'w,u,c,r',
+ host_notification_options => 'd,r',
+ service_notification_commands => 'notify-service-by-email',
+ host_notification_commands => 'notify-host-by-email',
+ email => join($contact_emails, ', '),
+ require => Package['nagios']
+ }
+}
diff --git a/puppet/modules/site_nagios/manifests/server/apache.pp b/puppet/modules/site_nagios/manifests/server/apache.pp
new file mode 100644
index 00000000..82962e89
--- /dev/null
+++ b/puppet/modules/site_nagios/manifests/server/apache.pp
@@ -0,0 +1,25 @@
+# set up apache for nagios
+class site_nagios::server::apache {
+
+ include x509::variables
+
+ include site_config::x509::commercial::cert
+ include site_config::x509::commercial::key
+ include site_config::x509::commercial::ca
+
+ include apache::module::authn_file
+ # "AuthUserFile"
+ include apache::module::authz_user
+ # "AuthType Basic"
+ include apache::module::auth_basic
+ # "DirectoryIndex"
+ include apache::module::dir
+ include apache::module::php5
+ include apache::module::cgi
+
+ # apache >= 2.4, debian jessie
+ if ( $::lsbdistcodename == 'jessie' ) {
+ include apache::module::authn_core
+ }
+
+}
diff --git a/puppet/modules/site_nagios/manifests/server/contactgroup.pp b/puppet/modules/site_nagios/manifests/server/contactgroup.pp
new file mode 100644
index 00000000..5e60dd06
--- /dev/null
+++ b/puppet/modules/site_nagios/manifests/server/contactgroup.pp
@@ -0,0 +1,8 @@
+# configure a contactgroup
+define site_nagios::server::contactgroup ($contact_emails) {
+
+ nagios_contactgroup { $name:
+ members => $name,
+ require => Package['nagios']
+ }
+}
diff --git a/puppet/modules/site_nagios/manifests/server/hostgroup.pp b/puppet/modules/site_nagios/manifests/server/hostgroup.pp
new file mode 100644
index 00000000..0692fced
--- /dev/null
+++ b/puppet/modules/site_nagios/manifests/server/hostgroup.pp
@@ -0,0 +1,7 @@
+# create a nagios hostsgroup
+define site_nagios::server::hostgroup ($contact_emails) {
+ nagios_hostgroup { $name:
+ ensure => present,
+ require => Package['nagios']
+ }
+}
diff --git a/puppet/modules/site_nagios/manifests/server/icli.pp b/puppet/modules/site_nagios/manifests/server/icli.pp
new file mode 100644
index 00000000..26fba725
--- /dev/null
+++ b/puppet/modules/site_nagios/manifests/server/icli.pp
@@ -0,0 +1,26 @@
+# Install icli package and configure ncli aliases
+class site_nagios::server::icli {
+ $nagios_hiera = hiera('nagios')
+ $environments = $nagios_hiera['environments']
+
+ package { 'icli':
+ ensure => installed;
+ }
+
+ file { '/root/.bashrc':
+ ensure => present;
+ }
+
+ file_line { 'icli aliases':
+ path => '/root/.bashrc',
+ line => 'source /root/.icli_aliases';
+ }
+
+ file { '/root/.icli_aliases':
+ content => template("${module_name}/icli_aliases.erb"),
+ mode => '0644',
+ owner => root,
+ group => 0,
+ require => Package['icli'];
+ }
+} \ No newline at end of file
diff --git a/puppet/modules/site_nagios/templates/icli_aliases.erb b/puppet/modules/site_nagios/templates/icli_aliases.erb
new file mode 100644
index 00000000..bcb2abb0
--- /dev/null
+++ b/puppet/modules/site_nagios/templates/icli_aliases.erb
@@ -0,0 +1,7 @@
+alias ncli='icli -c /var/cache/nagios3/objects.cache -f /var/cache/nagios3/status.dat -F /var/lib/nagios3/rw/nagios.cmd'
+alias ncli_problems='ncli -z '!o,!A''
+
+<% @environments.keys.sort.each do |env_name| %>
+alias ncli_<%= env_name %>='ncli -z '!o,!A' -g <%= env_name %>'
+alias ncli_<%= env_name %>_recheck='ncli -s Check_MK -g <%= env_name %> -a R'
+<% end -%>
diff --git a/puppet/modules/site_nickserver/manifests/init.pp b/puppet/modules/site_nickserver/manifests/init.pp
new file mode 100644
index 00000000..eb4415e7
--- /dev/null
+++ b/puppet/modules/site_nickserver/manifests/init.pp
@@ -0,0 +1,178 @@
+#
+# TODO: currently, this is dependent on some things that are set up in
+# site_webapp
+#
+# (1) HAProxy -> couchdb
+# (2) Apache
+#
+# It would be good in the future to make nickserver installable independently of
+# site_webapp.
+#
+
+class site_nickserver {
+ tag 'leap_service'
+ Class['site_config::default'] -> Class['site_nickserver']
+
+ include site_config::ruby::dev
+
+ #
+ # VARIABLES
+ #
+
+ $nickserver = hiera('nickserver')
+ $nickserver_domain = $nickserver['domain']
+ $couchdb_user = $nickserver['couchdb_nickserver_user']['username']
+ $couchdb_password = $nickserver['couchdb_nickserver_user']['password']
+
+ # the port that public connects to (should be 6425)
+ $nickserver_port = $nickserver['port']
+ # the port that nickserver is actually running on
+ $nickserver_local_port = '64250'
+
+ # couchdb is available on localhost via haproxy, which is bound to 4096.
+ $couchdb_host = 'localhost'
+ # See site_webapp/templates/haproxy_couchdb.cfg.erg
+ $couchdb_port = '4096'
+
+ $sources = hiera('sources')
+
+ # temporarily for now:
+ $domain = hiera('domain')
+ $address_domain = $domain['full_suffix']
+
+ include site_config::x509::cert
+ include site_config::x509::key
+ include site_config::x509::ca
+
+ #
+ # USER AND GROUP
+ #
+
+ group { 'nickserver':
+ ensure => present,
+ allowdupe => false;
+ }
+
+ user { 'nickserver':
+ ensure => present,
+ allowdupe => false,
+ gid => 'nickserver',
+ home => '/srv/leap/nickserver',
+ require => Group['nickserver'];
+ }
+
+ vcsrepo { '/srv/leap/nickserver':
+ ensure => present,
+ revision => $sources['nickserver']['revision'],
+ provider => $sources['nickserver']['type'],
+ source => $sources['nickserver']['source'],
+ owner => 'nickserver',
+ group => 'nickserver',
+ require => [ User['nickserver'], Group['nickserver'] ],
+ notify => Exec['nickserver_bundler_update'];
+ }
+
+ exec { 'nickserver_bundler_update':
+ cwd => '/srv/leap/nickserver',
+ command => '/bin/bash -c "/usr/bin/bundle check || /usr/bin/bundle install --path vendor/bundle"',
+ unless => '/usr/bin/bundle check',
+ user => 'nickserver',
+ timeout => 600,
+ require => [
+ Class['bundler::install'], Vcsrepo['/srv/leap/nickserver'],
+ Package['libssl-dev'], Class['site_config::ruby::dev'] ],
+
+ notify => Service['nickserver'];
+ }
+
+ #
+ # NICKSERVER CONFIG
+ #
+
+ file { '/etc/nickserver.yml':
+ content => template('site_nickserver/nickserver.yml.erb'),
+ owner => nickserver,
+ group => nickserver,
+ mode => '0600',
+ notify => Service['nickserver'];
+ }
+
+ #
+ # NICKSERVER DAEMON
+ #
+
+ file {
+ '/usr/bin/nickserver':
+ ensure => link,
+ target => '/srv/leap/nickserver/bin/nickserver',
+ require => Vcsrepo['/srv/leap/nickserver'];
+
+ '/etc/init.d/nickserver':
+ owner => root,
+ group => 0,
+ mode => '0755',
+ source => '/srv/leap/nickserver/dist/debian-init-script',
+ require => Vcsrepo['/srv/leap/nickserver'];
+ }
+
+ # register initscript at systemd on nodes newer than wheezy
+ # see https://leap.se/code/issues/7614
+ case $::operatingsystemrelease {
+ /^7.*/: { }
+ default: {
+ exec { 'register_systemd_nickserver':
+ refreshonly => true,
+ command => '/bin/systemctl enable nickserver',
+ subscribe => File['/etc/init.d/nickserver'],
+ before => Service['nickserver'];
+ }
+ }
+ }
+
+ service { 'nickserver':
+ ensure => running,
+ enable => true,
+ hasrestart => true,
+ hasstatus => true,
+ require => [
+ File['/etc/init.d/nickserver'],
+ File['/usr/bin/nickserver'],
+ Class['Site_config::X509::Key'],
+ Class['Site_config::X509::Cert'],
+ Class['Site_config::X509::Ca'] ];
+ }
+
+ #
+ # FIREWALL
+ # poke a hole in the firewall to allow nickserver requests
+ #
+
+ file { '/etc/shorewall/macro.nickserver':
+ content => "PARAM - - tcp ${nickserver_port}",
+ notify => Service['shorewall'],
+ require => Package['shorewall'];
+ }
+
+ shorewall::rule { 'net2fw-nickserver':
+ source => 'net',
+ destination => '$FW',
+ action => 'nickserver(ACCEPT)',
+ order => 200;
+ }
+
+ #
+ # APACHE REVERSE PROXY
+ # nickserver doesn't speak TLS natively, let Apache handle that.
+ #
+
+ apache::module {
+ 'proxy': ensure => present;
+ 'proxy_http': ensure => present
+ }
+
+ apache::vhost::file {
+ 'nickserver':
+ content => template('site_nickserver/nickserver-proxy.conf.erb')
+ }
+
+}
diff --git a/puppet/modules/site_nickserver/templates/nickserver-proxy.conf.erb b/puppet/modules/site_nickserver/templates/nickserver-proxy.conf.erb
new file mode 100644
index 00000000..8f59fe38
--- /dev/null
+++ b/puppet/modules/site_nickserver/templates/nickserver-proxy.conf.erb
@@ -0,0 +1,19 @@
+#
+# Apache reverse proxy configuration for the Nickserver
+#
+
+Listen 0.0.0.0:<%= @nickserver_port -%>
+
+<VirtualHost *:<%= @nickserver_port -%>>
+ ServerName <%= @nickserver_domain %>
+ ServerAlias <%= @address_domain %>
+
+ SSLCACertificatePath /etc/ssl/certs
+ SSLCertificateKeyFile <%= scope.lookupvar('x509::variables::keys') %>/<%= scope.lookupvar('site_config::params::cert_name') %>.key
+ SSLCertificateFile <%= scope.lookupvar('x509::variables::certs') %>/<%= scope.lookupvar('site_config::params::cert_name') %>.crt
+
+ Include include.d/ssl_common.inc
+
+ ProxyPass / http://localhost:<%= @nickserver_local_port %>/
+ ProxyPreserveHost On # preserve Host header in HTTP request
+</VirtualHost>
diff --git a/puppet/modules/site_nickserver/templates/nickserver.yml.erb b/puppet/modules/site_nickserver/templates/nickserver.yml.erb
new file mode 100644
index 00000000..e717cbaa
--- /dev/null
+++ b/puppet/modules/site_nickserver/templates/nickserver.yml.erb
@@ -0,0 +1,19 @@
+#
+# configuration for nickserver.
+#
+
+domain: "<%= @address_domain %>"
+
+couch_host: "<%= @couchdb_host %>"
+couch_port: <%= @couchdb_port %>
+couch_database: "identities"
+couch_user: "<%= @couchdb_user %>"
+couch_password: "<%= @couchdb_password %>"
+
+hkp_url: "https://hkps.pool.sks-keyservers.net:/pks/lookup"
+
+user: "nickserver"
+port: <%= @nickserver_local_port %>
+pid_file: "/var/run/nickserver"
+log_file: "/var/log/nickserver.log"
+
diff --git a/puppet/modules/site_obfsproxy/README b/puppet/modules/site_obfsproxy/README
new file mode 100644
index 00000000..e69de29b
--- /dev/null
+++ b/puppet/modules/site_obfsproxy/README
diff --git a/puppet/modules/site_obfsproxy/manifests/init.pp b/puppet/modules/site_obfsproxy/manifests/init.pp
new file mode 100644
index 00000000..2ed5ec9e
--- /dev/null
+++ b/puppet/modules/site_obfsproxy/manifests/init.pp
@@ -0,0 +1,38 @@
+class site_obfsproxy {
+ tag 'leap_service'
+ Class['site_config::default'] -> Class['site_obfsproxy']
+
+ $transport = 'scramblesuit'
+
+ $obfsproxy = hiera('obfsproxy')
+ $scramblesuit = $obfsproxy['scramblesuit']
+ $scram_pass = $scramblesuit['password']
+ $scram_port = $scramblesuit['port']
+ $dest_ip = $obfsproxy['gateway_address']
+ $dest_port = '443'
+
+ if member($::services, 'openvpn') {
+ $openvpn = hiera('openvpn')
+ $bind_address = $openvpn['gateway_address']
+ }
+ elsif member($::services, 'obfsproxy') {
+ $bind_address = hiera('ip_address')
+ }
+
+ include site_config::default
+
+ class { 'obfsproxy':
+ transport => $transport,
+ bind_address => $bind_address,
+ port => $scram_port,
+ param => $scram_pass,
+ dest_ip => $dest_ip,
+ dest_port => $dest_port,
+ }
+
+ include site_shorewall::obfsproxy
+
+}
+
+
+
diff --git a/puppet/modules/site_openvpn/README b/puppet/modules/site_openvpn/README
new file mode 100644
index 00000000..cef5be23
--- /dev/null
+++ b/puppet/modules/site_openvpn/README
@@ -0,0 +1,20 @@
+Place to look when debugging problems
+========================================
+
+Log files:
+
+ openvpn: /var/log/syslog
+ shorewall: /var/log/syslog
+ shorewall startup: /var/log/shorewall-init.log
+
+Check NAT masq:
+
+ iptables -t nat --list-rules
+
+Check interfaces:
+
+ ip addr ls
+
+Scripts:
+
+ /usr/local/bin/add_gateway_ips.sh \ No newline at end of file
diff --git a/puppet/modules/site_openvpn/manifests/dh_key.pp b/puppet/modules/site_openvpn/manifests/dh_key.pp
new file mode 100644
index 00000000..13cc0f5b
--- /dev/null
+++ b/puppet/modules/site_openvpn/manifests/dh_key.pp
@@ -0,0 +1,10 @@
+class site_openvpn::dh_key {
+
+ $x509_config = hiera('x509')
+
+ file { '/etc/openvpn/keys/dh.pem':
+ content => $x509_config['dh'],
+ mode => '0644',
+ }
+
+}
diff --git a/puppet/modules/site_openvpn/manifests/init.pp b/puppet/modules/site_openvpn/manifests/init.pp
new file mode 100644
index 00000000..f1ecefb9
--- /dev/null
+++ b/puppet/modules/site_openvpn/manifests/init.pp
@@ -0,0 +1,238 @@
+#
+# An openvpn gateway can support three modes:
+#
+# (1) limited and unlimited
+# (2) unlimited only
+# (3) limited only
+#
+# The difference is that 'unlimited' gateways only allow client certs that match
+# the 'unlimited_prefix', and 'limited' gateways only allow certs that match the
+# 'limited_prefix'.
+#
+# We potentially create four openvpn config files (thus four daemons):
+#
+# (1) unlimited + tcp => tcp_config.conf
+# (2) unlimited + udp => udp_config.conf
+# (3) limited + tcp => limited_tcp_config.conf
+# (4) limited + udp => limited_udp_config.conf
+#
+
+class site_openvpn {
+ tag 'leap_service'
+
+ include site_config::x509::cert
+ include site_config::x509::key
+ include site_config::x509::ca_bundle
+
+ include site_config::default
+ Class['site_config::default'] -> Class['site_openvpn']
+
+ include ::site_obfsproxy
+
+ $openvpn = hiera('openvpn')
+ $openvpn_ports = $openvpn['ports']
+ $openvpn_config = $openvpn['configuration']
+
+ if $::ec2_instance_id {
+ $openvpn_gateway_address = $::ipaddress
+ } else {
+ $openvpn_gateway_address = $openvpn['gateway_address']
+ if $openvpn['second_gateway_address'] {
+ $openvpn_second_gateway_address = $openvpn['second_gateway_address']
+ } else {
+ $openvpn_second_gateway_address = undef
+ }
+ }
+
+ $openvpn_allow_unlimited = $openvpn['allow_unlimited']
+ $openvpn_unlimited_prefix = $openvpn['unlimited_prefix']
+ $openvpn_unlimited_tcp_network_prefix = '10.41.0'
+ $openvpn_unlimited_tcp_netmask = '255.255.248.0'
+ $openvpn_unlimited_tcp_cidr = '21'
+ $openvpn_unlimited_udp_network_prefix = '10.42.0'
+ $openvpn_unlimited_udp_netmask = '255.255.248.0'
+ $openvpn_unlimited_udp_cidr = '21'
+
+ if !$::ec2_instance_id {
+ $openvpn_allow_limited = $openvpn['allow_limited']
+ $openvpn_limited_prefix = $openvpn['limited_prefix']
+ $openvpn_rate_limit = $openvpn['rate_limit']
+ $openvpn_limited_tcp_network_prefix = '10.43.0'
+ $openvpn_limited_tcp_netmask = '255.255.248.0'
+ $openvpn_limited_tcp_cidr = '21'
+ $openvpn_limited_udp_network_prefix = '10.44.0'
+ $openvpn_limited_udp_netmask = '255.255.248.0'
+ $openvpn_limited_udp_cidr = '21'
+ }
+
+ # find out the netmask in cidr format of the primary IF
+ # thx to https://blog.kumina.nl/tag/puppet-tips-and-tricks/
+ # we can do this using an inline_template:
+ $factname_primary_netmask = "netmask_cidr_${::site_config::params::interface}"
+ $primary_netmask = inline_template('<%= scope.lookupvar(@factname_primary_netmask) %>')
+
+ # deploy dh keys
+ include site_openvpn::dh_key
+
+ if $openvpn_allow_unlimited and $openvpn_allow_limited {
+ $unlimited_gateway_address = $openvpn_gateway_address
+ $limited_gateway_address = $openvpn_second_gateway_address
+ } elsif $openvpn_allow_unlimited {
+ $unlimited_gateway_address = $openvpn_gateway_address
+ $limited_gateway_address = undef
+ } elsif $openvpn_allow_limited {
+ $unlimited_gateway_address = undef
+ $limited_gateway_address = $openvpn_gateway_address
+ }
+
+ if $openvpn_allow_unlimited {
+ site_openvpn::server_config { 'tcp_config':
+ port => '1194',
+ proto => 'tcp',
+ local => $unlimited_gateway_address,
+ tls_remote => "\"${openvpn_unlimited_prefix}\"",
+ server => "${openvpn_unlimited_tcp_network_prefix}.0 ${openvpn_unlimited_tcp_netmask}",
+ push => "\"dhcp-option DNS ${openvpn_unlimited_tcp_network_prefix}.1\"",
+ management => '127.0.0.1 1000',
+ config => $openvpn_config
+ }
+ site_openvpn::server_config { 'udp_config':
+ port => '1194',
+ proto => 'udp',
+ local => $unlimited_gateway_address,
+ tls_remote => "\"${openvpn_unlimited_prefix}\"",
+ server => "${openvpn_unlimited_udp_network_prefix}.0 ${openvpn_unlimited_udp_netmask}",
+ push => "\"dhcp-option DNS ${openvpn_unlimited_udp_network_prefix}.1\"",
+ management => '127.0.0.1 1001',
+ config => $openvpn_config
+ }
+ } else {
+ tidy { '/etc/openvpn/tcp_config.conf': }
+ tidy { '/etc/openvpn/udp_config.conf': }
+ }
+
+ if $openvpn_allow_limited {
+ site_openvpn::server_config { 'limited_tcp_config':
+ port => '1194',
+ proto => 'tcp',
+ local => $limited_gateway_address,
+ tls_remote => "\"${openvpn_limited_prefix}\"",
+ server => "${openvpn_limited_tcp_network_prefix}.0 ${openvpn_limited_tcp_netmask}",
+ push => "\"dhcp-option DNS ${openvpn_limited_tcp_network_prefix}.1\"",
+ management => '127.0.0.1 1002',
+ config => $openvpn_config
+ }
+ site_openvpn::server_config { 'limited_udp_config':
+ port => '1194',
+ proto => 'udp',
+ local => $limited_gateway_address,
+ tls_remote => "\"${openvpn_limited_prefix}\"",
+ server => "${openvpn_limited_udp_network_prefix}.0 ${openvpn_limited_udp_netmask}",
+ push => "\"dhcp-option DNS ${openvpn_limited_udp_network_prefix}.1\"",
+ management => '127.0.0.1 1003',
+ config => $openvpn_config
+ }
+ } else {
+ tidy { '/etc/openvpn/limited_tcp_config.conf': }
+ tidy { '/etc/openvpn/limited_udp_config.conf': }
+ }
+
+ file {
+ '/usr/local/bin/add_gateway_ips.sh':
+ content => template('site_openvpn/add_gateway_ips.sh.erb'),
+ mode => '0755';
+ }
+
+ exec { '/usr/local/bin/add_gateway_ips.sh':
+ subscribe => File['/usr/local/bin/add_gateway_ips.sh'],
+ }
+
+ exec { 'restart_openvpn':
+ command => '/etc/init.d/openvpn restart',
+ refreshonly => true,
+ subscribe => [
+ File['/etc/openvpn'],
+ Class['Site_config::X509::Key'],
+ Class['Site_config::X509::Cert'],
+ Class['Site_config::X509::Ca_bundle'] ],
+ require => [
+ Package['openvpn'],
+ File['/etc/openvpn'],
+ Class['Site_config::X509::Key'],
+ Class['Site_config::X509::Cert'],
+ Class['Site_config::X509::Ca_bundle'] ];
+ }
+
+ cron { 'add_gateway_ips.sh':
+ command => '/usr/local/bin/add_gateway_ips.sh',
+ user => 'root',
+ special => 'reboot',
+ }
+
+ # setup the resolver to listen on the vpn IP
+ include site_openvpn::resolver
+
+ include site_shorewall::eip
+
+ package {
+ 'openvpn': ensure => latest
+ }
+
+ service {
+ 'openvpn':
+ ensure => running,
+ hasrestart => true,
+ hasstatus => true,
+ require => [
+ Package['openvpn'],
+ Exec['concat_/etc/default/openvpn'] ];
+ }
+
+ file {
+ '/etc/openvpn':
+ ensure => directory,
+ notify => Exec['restart_openvpn'],
+ require => Package['openvpn'];
+ }
+
+ file {
+ '/etc/openvpn/keys':
+ ensure => directory,
+ require => Package['openvpn'];
+ }
+
+ concat {
+ '/etc/default/openvpn':
+ owner => root,
+ group => root,
+ mode => 644,
+ warn => true,
+ notify => Service['openvpn'];
+ }
+
+ concat::fragment {
+ 'openvpn.default.header':
+ content => template('openvpn/etc-default-openvpn.erb'),
+ target => '/etc/default/openvpn',
+ order => 01;
+ }
+
+ concat::fragment {
+ "openvpn.default.autostart.${name}":
+ content => 'AUTOSTART=all',
+ target => '/etc/default/openvpn',
+ order => 10;
+ }
+
+ leap::logfile { 'openvpn_tcp': }
+ leap::logfile { 'openvpn_udp': }
+
+ # Because we currently do not support ipv6 and instead block it (so no leaks
+ # happen), we get a large number of these messages, so we ignore them (#6540)
+ rsyslog::snippet { '01-ignore_icmpv6_send':
+ content => ':msg, contains, "icmpv6_send: no reply to icmp error" ~'
+ }
+
+ include site_check_mk::agent::openvpn
+
+}
diff --git a/puppet/modules/site_openvpn/manifests/resolver.pp b/puppet/modules/site_openvpn/manifests/resolver.pp
new file mode 100644
index 00000000..cea0153a
--- /dev/null
+++ b/puppet/modules/site_openvpn/manifests/resolver.pp
@@ -0,0 +1,50 @@
+class site_openvpn::resolver {
+
+ if $site_openvpn::openvpn_allow_unlimited {
+ $ensure_unlimited = 'present'
+ file {
+ '/etc/unbound/unbound.conf.d/vpn_unlimited_udp_resolver.conf':
+ content => "server:\n\tinterface: ${site_openvpn::openvpn_unlimited_udp_network_prefix}.1\n\taccess-control: ${site_openvpn::openvpn_unlimited_udp_network_prefix}.0/${site_openvpn::openvpn_unlimited_udp_cidr} allow\n",
+ owner => root,
+ group => root,
+ mode => '0644',
+ require => [ Class['site_config::caching_resolver'], Service['openvpn'] ],
+ notify => Service['unbound'];
+ '/etc/unbound/unbound.conf.d/vpn_unlimited_tcp_resolver.conf':
+ content => "server:\n\tinterface: ${site_openvpn::openvpn_unlimited_tcp_network_prefix}.1\n\taccess-control: ${site_openvpn::openvpn_unlimited_tcp_network_prefix}.0/${site_openvpn::openvpn_unlimited_tcp_cidr} allow\n",
+ owner => root,
+ group => root,
+ mode => '0644',
+ require => [ Class['site_config::caching_resolver'], Service['openvpn'] ],
+ notify => Service['unbound'];
+ }
+ } else {
+ $ensure_unlimited = 'absent'
+ tidy { '/etc/unbound/unbound.conf.d/vpn_unlimited_udp_resolver.conf': }
+ tidy { '/etc/unbound/unbound.conf.d/vpn_unlimited_tcp_resolver.conf': }
+ }
+
+ if $site_openvpn::openvpn_allow_limited {
+ $ensure_limited = 'present'
+ file {
+ '/etc/unbound/unbound.conf.d/vpn_limited_udp_resolver.conf':
+ content => "server:\n\tinterface: ${site_openvpn::openvpn_limited_udp_network_prefix}.1\n\taccess-control: ${site_openvpn::openvpn_limited_udp_network_prefix}.0/${site_openvpn::openvpn_limited_udp_cidr} allow\n",
+ owner => root,
+ group => root,
+ mode => '0644',
+ require => [ Class['site_config::caching_resolver'], Service['openvpn'] ],
+ notify => Service['unbound'];
+ '/etc/unbound/unbound.conf.d/vpn_limited_tcp_resolver.conf':
+ content => "server\n\tinterface: ${site_openvpn::openvpn_limited_tcp_network_prefix}.1\n\taccess-control: ${site_openvpn::openvpn_limited_tcp_network_prefix}.0/${site_openvpn::openvpn_limited_tcp_cidr} allow\n",
+ owner => root,
+ group => root,
+ mode => '0644',
+ require => [ Class['site_config::caching_resolver'], Service['openvpn'] ],
+ notify => Service['unbound'];
+ }
+ } else {
+ $ensure_limited = 'absent'
+ tidy { '/etc/unbound/unbound.conf.d/vpn_limited_udp_resolver.conf': }
+ tidy { '/etc/unbound/unbound.conf.d/vpn_limited_tcp_resolver.conf': }
+ }
+}
diff --git a/puppet/modules/site_openvpn/manifests/server_config.pp b/puppet/modules/site_openvpn/manifests/server_config.pp
new file mode 100644
index 00000000..15e6fb38
--- /dev/null
+++ b/puppet/modules/site_openvpn/manifests/server_config.pp
@@ -0,0 +1,228 @@
+#
+# Cipher discussion
+# ================================
+#
+# We want to specify explicit values for the crypto options to prevent a MiTM from forcing
+# a weaker cipher. These should be set in both the server and the client ('auth' and 'cipher'
+# MUST be the same on both ends or no data will get transmitted).
+#
+# tls-cipher DHE-RSA-AES128-SHA
+#
+# dkg: For the TLS control channel, we want to make sure we choose a
+# key exchange mechanism that has PFS (meaning probably some form of ephemeral
+# Diffie-Hellman key exchange), and that uses a standard, well-tested cipher
+# (I recommend AES, and 128 bits is probably fine, since there are some known
+# weaknesses in the 192- and 256-bit key schedules). That leaves us with the
+# choice of public key algorithms: /usr/sbin/openvpn --show-tls | grep DHE |
+# grep AES128 | grep GCM.
+#
+# elijah:
+# I could not get any of these working:
+# * openvpn --show-tls | grep GCM
+# * openvpn --show-tls | grep DHE | grep AES128 | grep SHA256
+# so, i went with this:
+# * openvpn --show-tls | grep DHE | grep AES128 | grep -v SHA256 | grep -v GCM
+# Also, i couldn't get any of the elliptical curve algorithms to work. Not sure how
+# our cert generation interacts with the tls-cipher algorithms.
+#
+# note: in my tests, DHE-RSA-AES256-SHA is the one it negotiates if no value is set.
+#
+# auth SHA1
+#
+# dkg: For HMAC digest to authenticate packets, we just want SHA256. OpenVPN lists
+# a number of "digest" with names like "RSA-SHA256", but this are legacy and
+# should be avoided.
+#
+# elijah: i am not so sure that the digest algo matters for 'auth' option, because
+# i think an attacker would have to forge the digest in real time, which is still far from
+# a possibility for SHA1. So, i am leaving the default for now (SHA1).
+#
+# cipher AES-128-CBC
+#
+# dkg: For the choice of cipher, we need to select an algorithm and a
+# cipher mode. OpenVPN defaults to Blowfish, which is a fine algorithm - but
+# our control channel is already relying on AES not being broken; if the
+# control channel is cracked, then the key material for the tunnel is exposed,
+# and the choice of algorithm is moot. So it makes more sense to me to rely on
+# the same cipher here: AES128. As for the cipher mode, OFB seems cleaner to
+# me, but CBC is more well-tested, and the OpenVPN man page (at least as of
+# version 2.2.1) says "CBC is recommended and CFB and OFB should be considered
+# advanced modes."
+#
+# note: the default is BF-CBC (blowfish)
+#
+
+define site_openvpn::server_config(
+ $port, $proto, $local, $server, $push,
+ $management, $config, $tls_remote = undef) {
+
+ $openvpn_configname = $name
+ $shortname = regsubst(regsubst($name, '_config', ''), '_', '-')
+ $openvpn_status_filename = "/var/run/openvpn-status-${shortname}"
+
+ concat {
+ "/etc/openvpn/${openvpn_configname}.conf":
+ owner => root,
+ group => root,
+ mode => 644,
+ warn => true,
+ require => File['/etc/openvpn'],
+ before => Service['openvpn'],
+ notify => Exec['restart_openvpn'];
+ }
+
+ if $tls_remote != undef {
+ openvpn::option {
+ "tls-remote ${openvpn_configname}":
+ key => 'tls-remote',
+ value => $tls_remote,
+ server => $openvpn_configname;
+ }
+ }
+
+ # according to openvpn man page: tcp-nodelay is a "generally a good latency optimization".
+ if $proto == 'tcp' {
+ openvpn::option {
+ "tcp-nodelay ${openvpn_configname}":
+ key => 'tcp-nodelay',
+ server => $openvpn_configname;
+ }
+ } elsif $proto == 'udp' {
+ if $config['fragment'] != 1500 {
+ openvpn::option {
+ "fragment ${openvpn_configname}":
+ key => 'fragment',
+ value => $config['fragment'],
+ server => $openvpn_configname;
+ "mssfix ${openvpn_configname}":
+ key => 'mssfix',
+ server => $openvpn_configname;
+ }
+ }
+ }
+
+ openvpn::option {
+ "ca ${openvpn_configname}":
+ key => 'ca',
+ value => "${x509::variables::local_CAs}/${site_config::params::ca_bundle_name}.crt",
+ server => $openvpn_configname;
+ "cert ${openvpn_configname}":
+ key => 'cert',
+ value => "${x509::variables::certs}/${site_config::params::cert_name}.crt",
+ server => $openvpn_configname;
+ "key ${openvpn_configname}":
+ key => 'key',
+ value => "${x509::variables::keys}/${site_config::params::cert_name}.key",
+ server => $openvpn_configname;
+ "dh ${openvpn_configname}":
+ key => 'dh',
+ value => '/etc/openvpn/keys/dh.pem',
+ server => $openvpn_configname;
+ "tls-cipher ${openvpn_configname}":
+ key => 'tls-cipher',
+ value => $config['tls-cipher'],
+ server => $openvpn_configname;
+ "auth ${openvpn_configname}":
+ key => 'auth',
+ value => $config['auth'],
+ server => $openvpn_configname;
+ "cipher ${openvpn_configname}":
+ key => 'cipher',
+ value => $config['cipher'],
+ server => $openvpn_configname;
+ "dev ${openvpn_configname}":
+ key => 'dev',
+ value => 'tun',
+ server => $openvpn_configname;
+ "tun-ipv6 ${openvpn_configname}":
+ key => 'tun-ipv6',
+ server => $openvpn_configname;
+ "duplicate-cn ${openvpn_configname}":
+ key => 'duplicate-cn',
+ server => $openvpn_configname;
+ "keepalive ${openvpn_configname}":
+ key => 'keepalive',
+ value => $config['keepalive'],
+ server => $openvpn_configname;
+ "local ${openvpn_configname}":
+ key => 'local',
+ value => $local,
+ server => $openvpn_configname;
+ "mute ${openvpn_configname}":
+ key => 'mute',
+ value => '5',
+ server => $openvpn_configname;
+ "mute-replay-warnings ${openvpn_configname}":
+ key => 'mute-replay-warnings',
+ server => $openvpn_configname;
+ "management ${openvpn_configname}":
+ key => 'management',
+ value => $management,
+ server => $openvpn_configname;
+ "proto ${openvpn_configname}":
+ key => 'proto',
+ value => $proto,
+ server => $openvpn_configname;
+ "push1 ${openvpn_configname}":
+ key => 'push',
+ value => $push,
+ server => $openvpn_configname;
+ "push2 ${openvpn_configname}":
+ key => 'push',
+ value => '"redirect-gateway def1"',
+ server => $openvpn_configname;
+ "push-ipv6 ${openvpn_configname}":
+ key => 'push',
+ value => '"route-ipv6 2000::/3"',
+ server => $openvpn_configname;
+ "script-security ${openvpn_configname}":
+ key => 'script-security',
+ value => '1',
+ server => $openvpn_configname;
+ "server ${openvpn_configname}":
+ key => 'server',
+ value => $server,
+ server => $openvpn_configname;
+ "server-ipv6 ${openvpn_configname}":
+ key => 'server-ipv6',
+ value => '2001:db8:123::/64',
+ server => $openvpn_configname;
+ "status ${openvpn_configname}":
+ key => 'status',
+ value => "${openvpn_status_filename} 10",
+ server => $openvpn_configname;
+ "status-version ${openvpn_configname}":
+ key => 'status-version',
+ value => '3',
+ server => $openvpn_configname;
+ "topology ${openvpn_configname}":
+ key => 'topology',
+ value => 'subnet',
+ server => $openvpn_configname;
+ "verb ${openvpn_configname}":
+ key => 'verb',
+ value => '3',
+ server => $openvpn_configname;
+ "log-append /var/log/leap/openvpn_${proto}.log":
+ key => 'log-append',
+ value => "/var/log/leap/openvpn_${proto}.log",
+ server => $openvpn_configname;
+ }
+
+ # register openvpn services at systemd on nodes newer than wheezy
+ # see https://leap.se/code/issues/7798
+ case $::operatingsystemrelease {
+ /^7.*/: { }
+ default: {
+ exec { "enable_systemd_${openvpn_configname}":
+ refreshonly => true,
+ command => "/bin/systemctl enable openvpn@${openvpn_configname}",
+ subscribe => File["/etc/openvpn/${openvpn_configname}.conf"],
+ notify => Service["openvpn@${openvpn_configname}"];
+ }
+ service { "openvpn@${openvpn_configname}":
+ ensure => running
+ }
+ }
+ }
+}
diff --git a/puppet/modules/site_openvpn/templates/add_gateway_ips.sh.erb b/puppet/modules/site_openvpn/templates/add_gateway_ips.sh.erb
new file mode 100644
index 00000000..e76b756b
--- /dev/null
+++ b/puppet/modules/site_openvpn/templates/add_gateway_ips.sh.erb
@@ -0,0 +1,11 @@
+#!/bin/sh
+
+ip addr show dev <%= scope.lookupvar('site_config::params::interface') %> | grep -q <%= @openvpn_gateway_address %>/<%= @primary_netmask %> ||
+ ip addr add <%= @openvpn_gateway_address %>/<%= @primary_netmask %> dev <%= scope.lookupvar('site_config::params::interface') %>
+
+<% if @openvpn_second_gateway_address %>
+ip addr show dev <%= scope.lookupvar('site_config::params::interface') %> | grep -q <%= @openvpn_second_gateway_address %>/<%= @primary_netmask %> ||
+ ip addr add <%= @openvpn_second_gateway_address %>/<%= @primary_netmask %> dev <%= scope.lookupvar('site_config::params::interface') %>
+<% end %>
+
+/bin/echo 1 > /proc/sys/net/ipv4/ip_forward
diff --git a/puppet/modules/site_postfix/files/checks/received_anon b/puppet/modules/site_postfix/files/checks/received_anon
new file mode 100644
index 00000000..9de25e63
--- /dev/null
+++ b/puppet/modules/site_postfix/files/checks/received_anon
@@ -0,0 +1,2 @@
+/^Received: from (.* \([-._[:alnum:]]+ \[[.[:digit:]]{7,15}\]\))([[:space:]]+).*(\(using [.[:alnum:]]+ with cipher [-A-Z0-9]+ \([0-9]+\/[0-9]+ bits\)\))[[:space:]]+\(Client CN "([-._@[:alnum:]]+)", Issuer "[[:print:]]+" \(verified OK\)\)[[:space:]]+by ([.[:alnum:]]+) \(([^)]+)\) with (E?SMTPS?A?) id ([A-F[:digit:]]+).*/
+ REPLACE Received: from [127.0.0.1] (localhost [127.0.0.1])${2}${3}${2}(Authenticated sender: $4)${2}with $7 id $8
diff --git a/puppet/modules/site_postfix/manifests/debug.pp b/puppet/modules/site_postfix/manifests/debug.pp
new file mode 100644
index 00000000..f370d166
--- /dev/null
+++ b/puppet/modules/site_postfix/manifests/debug.pp
@@ -0,0 +1,9 @@
+class site_postfix::debug {
+
+ postfix::config {
+ 'debug_peer_list': value => '127.0.0.1';
+ 'debug_peer_level': value => '1';
+ 'smtpd_tls_loglevel': value => '1';
+ }
+
+}
diff --git a/puppet/modules/site_postfix/manifests/mx.pp b/puppet/modules/site_postfix/manifests/mx.pp
new file mode 100644
index 00000000..c269946b
--- /dev/null
+++ b/puppet/modules/site_postfix/manifests/mx.pp
@@ -0,0 +1,152 @@
+#
+# configure mx node
+#
+class site_postfix::mx {
+
+ $domain_hash = hiera('domain')
+ $domain = $domain_hash['full_suffix']
+ $host_domain = $domain_hash['full']
+ $cert_name = hiera('name')
+ $mynetworks = join(hiera('mynetworks', ''), ' ')
+ $rbls = suffix(prefix(hiera('rbls', []), 'reject_rbl_client '), ',')
+
+ $root_mail_recipient = hiera('contacts')
+ $postfix_smtp_listen = 'all'
+ $postfix_use_postscreen = 'yes'
+
+ include site_config::x509::cert
+ include site_config::x509::key
+ include site_config::x509::client_ca::ca
+ include site_config::x509::client_ca::key
+
+ postfix::config {
+ 'mynetworks':
+ value => "127.0.0.0/8 [::1]/128 [fe80::]/64 ${mynetworks}";
+ # Note: mydestination should not include @domain, because this is
+ # used in virtual alias maps.
+ 'mydestination':
+ value => "\$myorigin, localhost, localhost.\$mydomain";
+ 'myhostname':
+ value => $host_domain;
+ 'mailbox_size_limit':
+ value => '0';
+ 'home_mailbox':
+ value => '';
+ 'virtual_mailbox_domains':
+ value => 'deliver.local';
+ 'virtual_mailbox_base':
+ value => '/var/mail/leap-mx';
+ 'virtual_mailbox_maps':
+ value => 'static:Maildir/';
+ # Note: virtual-aliases map will take precedence over leap-mx
+ # lookup (tcp:localhost)
+ 'virtual_alias_maps':
+ value => 'hash:/etc/postfix/virtual-aliases tcp:localhost:4242';
+ 'luser_relay':
+ value => '';
+ # uid and gid are set to an arbitrary hard-coded value here, this
+ # must match the 'leap-mx' user/group
+ 'virtual_uid_maps':
+ value => 'static:42424';
+ 'virtual_gid_maps':
+ value => 'static:42424';
+ # the two following configs are needed for matching user's client cert
+ # fingerprints to enable relaying (#3634). Satellites do not have
+ # these configured.
+ 'smtpd_tls_fingerprint_digest':
+ value => 'sha1';
+ 'relay_clientcerts':
+ value => 'tcp:localhost:2424';
+ # Note: we are setting this here, instead of in site_postfix::mx::smtp_tls
+ # because the satellites need to have a different value
+ 'smtp_tls_security_level':
+ value => 'may';
+ # reject inbound mail to system users
+ # see https://leap.se/code/issues/6829
+ # this blocks *only* mails to system users, that don't appear in the
+ # alias map
+ 'local_recipient_maps':
+ value => '$alias_maps';
+ # setup clamav and opendkim on smtpd
+ 'smtpd_milters':
+ value => 'unix:/run/clamav/milter.ctl,inet:localhost:8891';
+ # setup opendkim for smtp (non-smtpd) outgoing mail
+ 'non_smtpd_milters':
+ value => 'inet:localhost:8891';
+ 'milter_default_action':
+ value => 'accept';
+ # Make sure that the right values are set, these could be set to different
+ # things on install, depending on preseed or debconf options
+ # selected (see #7478)
+ 'relay_transport':
+ value => 'relay';
+ 'default_transport':
+ value => 'smtp';
+ 'mailbox_command':
+ value => '';
+ 'header_checks':
+ value => '';
+ 'postscreen_access_list':
+ value => 'permit_mynetworks';
+ 'postscreen_greet_action':
+ value => 'enforce';
+ }
+
+ # Make sure that the cleanup serivce is not chrooted, otherwise it cannot
+ # access the opendkim milter socket (#8020)
+ exec { 'unset_cleanup_chroot':
+ command => '/usr/sbin/postconf -F "cleanup/unix/chroot=n"',
+ onlyif => '/usr/sbin/postconf -h -F "cleanup/unix/chroot" | egrep -q ^n',
+ notify => Service['postfix'],
+ require => File['/etc/postfix/master.cf']
+ }
+
+ include ::site_postfix::mx::smtpd_checks
+ include ::site_postfix::mx::checks
+ include ::site_postfix::mx::smtp_tls
+ include ::site_postfix::mx::smtpd_tls
+ include ::site_postfix::mx::static_aliases
+ include ::site_postfix::mx::rewrite_openpgp_header
+ include ::site_postfix::mx::received_anon
+ include ::clamav
+ include ::opendkim
+ include ::postfwd
+
+ # greater verbosity for debugging, take out for production
+ #include site_postfix::debug
+
+ case $::operatingsystemrelease {
+ /^7.*/: {
+ $smtpd_relay_restrictions=''
+ }
+ default: {
+ $smtpd_relay_restrictions=" -o smtpd_relay_restrictions=\$smtps_relay_restrictions\n"
+ }
+ }
+
+ $mastercf_tail = "
+smtps inet n - - - - smtpd
+ -o smtpd_tls_wrappermode=yes
+ -o smtpd_tls_security_level=encrypt
+ -o tls_preempt_cipherlist=yes
+${smtpd_relay_restrictions} -o smtpd_recipient_restrictions=\$smtps_recipient_restrictions
+ -o smtpd_helo_restrictions=\$smtps_helo_restrictions
+ -o smtpd_client_restrictions=
+ -o cleanup_service_name=clean_smtps
+clean_smtps unix n - n - 0 cleanup
+ -o header_checks=pcre:/etc/postfix/checks/rewrite_openpgp_headers,pcre:/etc/postfix/checks/received_anon"
+
+ class { 'postfix':
+ preseed => true,
+ root_mail_recipient => $root_mail_recipient,
+ smtp_listen => 'all',
+ mastercf_tail => $mastercf_tail,
+ use_postscreen => 'yes',
+ require => [
+ Class['Site_config::X509::Key'],
+ Class['Site_config::X509::Cert'],
+ Class['Site_config::X509::Client_ca::Key'],
+ Class['Site_config::X509::Client_ca::Ca'],
+ User['leap-mx'] ]
+ }
+}
diff --git a/puppet/modules/site_postfix/manifests/mx/checks.pp b/puppet/modules/site_postfix/manifests/mx/checks.pp
new file mode 100644
index 00000000..f406ad34
--- /dev/null
+++ b/puppet/modules/site_postfix/manifests/mx/checks.pp
@@ -0,0 +1,23 @@
+class site_postfix::mx::checks {
+
+ file {
+ '/etc/postfix/checks':
+ ensure => directory,
+ mode => '0755',
+ owner => root,
+ group => postfix,
+ require => Package['postfix'];
+
+ '/etc/postfix/checks/helo_checks':
+ content => template('site_postfix/checks/helo_access.erb'),
+ mode => '0644',
+ owner => root,
+ group => root;
+ }
+
+ exec {
+ '/usr/sbin/postmap /etc/postfix/checks/helo_checks':
+ refreshonly => true,
+ subscribe => File['/etc/postfix/checks/helo_checks'];
+ }
+}
diff --git a/puppet/modules/site_postfix/manifests/mx/received_anon.pp b/puppet/modules/site_postfix/manifests/mx/received_anon.pp
new file mode 100644
index 00000000..51ba3faa
--- /dev/null
+++ b/puppet/modules/site_postfix/manifests/mx/received_anon.pp
@@ -0,0 +1,13 @@
+# Anonymize the user's home IP from the email headers (Feature #3866)
+class site_postfix::mx::received_anon {
+
+ package { 'postfix-pcre': ensure => installed, require => Package['postfix'] }
+
+ file { '/etc/postfix/checks/received_anon':
+ source => 'puppet:///modules/site_postfix/checks/received_anon',
+ mode => '0644',
+ owner => root,
+ group => root,
+ notify => Service['postfix']
+ }
+}
diff --git a/puppet/modules/site_postfix/manifests/mx/rewrite_openpgp_header.pp b/puppet/modules/site_postfix/manifests/mx/rewrite_openpgp_header.pp
new file mode 100644
index 00000000..71f945b8
--- /dev/null
+++ b/puppet/modules/site_postfix/manifests/mx/rewrite_openpgp_header.pp
@@ -0,0 +1,11 @@
+class site_postfix::mx::rewrite_openpgp_header {
+ $mx = hiera('mx')
+ $correct_domain = $mx['key_lookup_domain']
+
+ file { '/etc/postfix/checks/rewrite_openpgp_headers':
+ content => template('site_postfix/checks/rewrite_openpgp_headers.erb'),
+ mode => '0644',
+ owner => root,
+ group => root;
+ }
+}
diff --git a/puppet/modules/site_postfix/manifests/mx/smtp_auth.pp b/puppet/modules/site_postfix/manifests/mx/smtp_auth.pp
new file mode 100644
index 00000000..afa70527
--- /dev/null
+++ b/puppet/modules/site_postfix/manifests/mx/smtp_auth.pp
@@ -0,0 +1,6 @@
+class site_postfix::mx::smtp_auth {
+
+ postfix::config {
+ 'smtpd_tls_ask_ccert': value => 'yes';
+ }
+}
diff --git a/puppet/modules/site_postfix/manifests/mx/smtp_tls.pp b/puppet/modules/site_postfix/manifests/mx/smtp_tls.pp
new file mode 100644
index 00000000..c93c3ba2
--- /dev/null
+++ b/puppet/modules/site_postfix/manifests/mx/smtp_tls.pp
@@ -0,0 +1,43 @@
+# configure smtp tls
+class site_postfix::mx::smtp_tls {
+
+ include site_config::x509::ca
+ include x509::variables
+ $cert_name = hiera('name')
+ $ca_path = "${x509::variables::local_CAs}/${site_config::params::ca_name}.crt"
+ $cert_path = "${x509::variables::certs}/${site_config::params::cert_name}.crt"
+ $key_path = "${x509::variables::keys}/${site_config::params::cert_name}.key"
+
+ include site_config::x509::cert
+ include site_config::x509::key
+
+ # smtp TLS
+ postfix::config {
+ 'smtp_use_tls': value => 'yes';
+ 'smtp_tls_CApath': value => '/etc/ssl/certs/';
+ 'smtp_tls_CAfile': value => $ca_path;
+ 'smtp_tls_cert_file': value => $cert_path;
+ 'smtp_tls_key_file': value => $key_path;
+ 'smtp_tls_loglevel': value => '1';
+ 'smtp_tls_exclude_ciphers':
+ value => 'aNULL, MD5, DES';
+ # upstream default is md5 (since 2.5 and older used it), we force sha1
+ 'smtp_tls_fingerprint_digest':
+ value => 'sha1';
+ 'smtp_tls_session_cache_database':
+ value => "btree:\${data_directory}/smtp_cache";
+ # see issue #4011
+ 'smtp_tls_protocols':
+ value => '!SSLv2, !SSLv3';
+ 'smtp_tls_mandatory_protocols':
+ value => '!SSLv2, !SSLv3';
+ 'tls_ssl_options':
+ value => 'NO_COMPRESSION';
+ # We can switch between the different postfix internal list of ciphers by
+ # using smtpd_tls_ciphers. For server-to-server connections we leave this
+ # at its default because of opportunistic encryption combined with many mail
+ # servers only support outdated protocols and ciphers and if we are too
+ # strict with required ciphers, then connections *will* fall-back to
+ # plain-text. Bad ciphers are still better than plain text transmission.
+ }
+}
diff --git a/puppet/modules/site_postfix/manifests/mx/smtpd_checks.pp b/puppet/modules/site_postfix/manifests/mx/smtpd_checks.pp
new file mode 100644
index 00000000..291d7ee4
--- /dev/null
+++ b/puppet/modules/site_postfix/manifests/mx/smtpd_checks.pp
@@ -0,0 +1,36 @@
+# smtpd checks for incoming mail on smtp port 25 and
+# mail sent via the bitmask client using smtps port 465
+class site_postfix::mx::smtpd_checks {
+
+ postfix::config {
+ 'smtpd_helo_required':
+ value => 'yes';
+ 'checks_dir':
+ value => '$config_directory/checks';
+ 'smtpd_client_restrictions':
+ value => "permit_mynetworks,${site_postfix::mx::rbls},permit";
+ 'smtpd_data_restrictions':
+ value => 'permit_mynetworks, reject_unauth_pipelining, permit';
+ 'smtpd_delay_reject':
+ value => 'yes';
+ 'smtpd_helo_restrictions':
+ value => 'permit_mynetworks, reject_invalid_helo_hostname, reject_non_fqdn_helo_hostname, check_helo_access hash:$checks_dir/helo_checks, permit';
+ 'smtpd_recipient_restrictions':
+ value => 'reject_unknown_recipient_domain, permit_mynetworks, check_recipient_access tcp:localhost:2244, reject_unauth_destination, permit';
+
+ # permit_tls_clientcerts will lookup client cert fingerprints from the tcp
+ # lookup on port 2424 (based on what is configured in relay_clientcerts
+ # paramter, see site_postfix::mx postfix::config resource) to determine
+ # if a client is allowed to relay mail through us. This enables us to
+ # disable a user by removing their valid client cert (#3634)
+ 'smtps_recipient_restrictions':
+ value => 'permit_tls_clientcerts, check_recipient_access tcp:localhost:2244, reject_unauth_destination, permit';
+ 'smtps_relay_restrictions':
+ value => 'permit_mynetworks, permit_tls_clientcerts, defer_unauth_destination';
+ 'smtps_helo_restrictions':
+ value => 'permit_mynetworks, check_helo_access hash:$checks_dir/helo_checks, permit';
+ 'smtpd_sender_restrictions':
+ value => 'permit_mynetworks, reject_non_fqdn_sender, reject_unknown_sender_domain, permit';
+ }
+
+}
diff --git a/puppet/modules/site_postfix/manifests/mx/smtpd_tls.pp b/puppet/modules/site_postfix/manifests/mx/smtpd_tls.pp
new file mode 100644
index 00000000..66297f55
--- /dev/null
+++ b/puppet/modules/site_postfix/manifests/mx/smtpd_tls.pp
@@ -0,0 +1,69 @@
+# configure smtpd tls
+class site_postfix::mx::smtpd_tls {
+
+ include x509::variables
+ $ca_path = "${x509::variables::local_CAs}/${site_config::params::client_ca_name}.crt"
+ $cert_path = "${x509::variables::certs}/${site_config::params::cert_name}.crt"
+ $key_path = "${x509::variables::keys}/${site_config::params::cert_name}.key"
+
+
+ postfix::config {
+ 'smtpd_use_tls': value => 'yes';
+ 'smtpd_tls_CAfile': value => $ca_path;
+ 'smtpd_tls_cert_file': value => $cert_path;
+ 'smtpd_tls_key_file': value => $key_path;
+ 'smtpd_tls_ask_ccert': value => 'yes';
+ 'smtpd_tls_received_header':
+ value => 'yes';
+ 'smtpd_tls_security_level':
+ value => 'may';
+ 'smtpd_tls_eecdh_grade':
+ value => 'ultra';
+ 'smtpd_tls_session_cache_database':
+ value => "btree:\${data_directory}/smtpd_scache";
+ # see issue #4011
+ 'smtpd_tls_mandatory_protocols':
+ value => '!SSLv2, !SSLv3';
+ 'smtpd_tls_protocols':
+ value => '!SSLv2, !SSLv3';
+ # For connections to MUAs, TLS is mandatory and the ciphersuite is modified.
+ # MX and SMTP client configuration
+ 'smtpd_tls_mandatory_ciphers':
+ value => 'high';
+ 'tls_high_cipherlist':
+ value => 'ECDHE-RSA-AES128-GCM-SHA256:ECDHE-ECDSA-AES128-GCM-SHA256:ECDHE-RSA-AES256-GCM-SHA384:ECDHE-ECDSA-AES256-GCM-SHA384:DHE-RSA-AES128-GCM-SHA256:DHE-DSS-AES128-GCM-SHA256:kEDH+AESGCM:ECDHE-RSA-AES128-SHA256:ECDHE-ECDSA-AES128-SHA256:ECDHE-RSA-AES128-SHA:ECDHE-ECDSA-AES128-SHA:ECDHE-RSA-AES256-SHA384:ECDHE-ECDSA-AES256-SHA384:ECDHE-RSA-AES256-SHA:ECDHE-ECDSA-AES256-SHA:DHE-RSA-AES128-SHA256:DHE-RSA-AES128-SHA:DHE-DSS-AES128-SHA256:DHE-RSA-AES256-SHA256:DHE-DSS-AES256-SHA:DHE-RSA-AES256-SHA:AES128-GCM-SHA256:AES256-GCM-SHA384:AES128-SHA256:AES256-SHA256:AES128-SHA:AES256-SHA:AES:CAMELLIA:DES-CBC3-SHA:!aNULL:!eNULL:!EXPORT:!DES:!3DES:!RC4:!MD5:!PSK!aECDH:!EDH-DSS-DES-CBC3-SHA:!EDH-RSA-DES-CBC3-SHA:!KRB5-DES-CBC3-SHA';
+ }
+
+ # Setup DH parameters
+ # Instead of using the dh parameters that are created by leap cli, it is more
+ # secure to generate new parameter files that will only be used for postfix,
+ # for each machine
+
+ include site_config::packages::gnutls
+
+ # Note, the file name is called dh_1024.pem, but we are generating 2048bit dh
+ # parameters Neither Postfix nor OpenSSL actually care about the size of the
+ # prime in "smtpd_tls_dh1024_param_file". You can make it 2048 bits
+
+ exec { 'certtool-postfix-gendh':
+ command => 'certtool --generate-dh-params --bits 2048 --outfile /etc/postfix/smtpd_tls_dh_param.pem',
+ user => root,
+ group => root,
+ creates => '/etc/postfix/smtpd_tls_dh_param.pem',
+ require => [ Package['gnutls-bin'], Package['postfix'] ]
+ }
+
+ # Make sure the dh params file has correct ownership and mode
+ file {
+ '/etc/postfix/smtpd_tls_dh_param.pem':
+ owner => root,
+ group => root,
+ mode => '0600',
+ require => Exec['certtool-postfix-gendh'];
+ }
+
+ postfix::config { 'smtpd_tls_dh1024_param_file':
+ value => '/etc/postfix/smtpd_tls_dh_param.pem',
+ require => File['/etc/postfix/smtpd_tls_dh_param.pem']
+ }
+}
diff --git a/puppet/modules/site_postfix/manifests/mx/static_aliases.pp b/puppet/modules/site_postfix/manifests/mx/static_aliases.pp
new file mode 100644
index 00000000..9cd7ca02
--- /dev/null
+++ b/puppet/modules/site_postfix/manifests/mx/static_aliases.pp
@@ -0,0 +1,88 @@
+#
+# Defines static, hard coded aliases that are not in the database.
+# These aliases take precedence over the database aliases.
+#
+# There are three classes of reserved names:
+#
+# (1) forbidden_usernames:
+# Some usernames are forbidden and cannot be registered.
+# this is defined in node property webapp.forbidden_usernames
+# This is enforced by the webapp.
+#
+# (2) public aliases:
+# Some aliases for root, and are publicly exposed so that anyone
+# can deliver mail to them. For example, postmaster.
+# These are implemented in the virtual alias map, which takes
+# precedence over the local alias map.
+#
+# (3) local aliases:
+# Some aliases are only available locally: mail can be delivered
+# to the alias if the mail originates from the local host, or is
+# hostname qualified, but otherwise it will be rejected.
+# These are implemented in the local alias map.
+#
+# The alias for local 'root' is defined elsewhere. In this file, we
+# define the virtual 'root@domain' (which can be overwritten by
+# defining an entry for root in node property mx.aliases).
+#
+
+class site_postfix::mx::static_aliases {
+
+ $mx = hiera('mx')
+ $root_recipients = hiera('contacts')
+
+ #
+ # LOCAL ALIASES
+ #
+
+ # NOTE: if you remove one of these, they will still appear in the
+ # /etc/aliases file
+ $local_aliases = [
+ 'admin', 'administrator', 'bin', 'cron', 'games', 'ftp', 'lp', 'maildrop',
+ 'mysql', 'news', 'nobody', 'noc', 'postgresql', 'ssladmin', 'sys',
+ 'usenet', 'uucp', 'www', 'www-data', 'leap-mx'
+ ]
+
+ postfix::mailalias {
+ $local_aliases:
+ ensure => present,
+ recipient => 'root'
+ }
+
+ #
+ # PUBLIC ALIASES
+ #
+
+ $public_aliases = $mx['aliases']
+
+ $default_public_aliases = {
+ 'root' => $root_recipients,
+ 'abuse' => 'postmaster',
+ 'arin-admin' => 'root',
+ 'certmaster' => 'hostmaster',
+ 'domainadmin' => 'hostmaster',
+ 'hostmaster' => 'root',
+ 'mailer-daemon' => 'postmaster',
+ 'postmaster' => 'root',
+ 'security' => 'root',
+ 'webmaster' => 'hostmaster',
+ }
+
+ $aliases = merge($default_public_aliases, $public_aliases)
+
+ exec { 'postmap_virtual_aliases':
+ command => '/usr/sbin/postmap /etc/postfix/virtual-aliases',
+ refreshonly => true,
+ user => root,
+ group => root,
+ require => Package['postfix'],
+ subscribe => File['/etc/postfix/virtual-aliases']
+ }
+ file { '/etc/postfix/virtual-aliases':
+ content => template('site_postfix/virtual-aliases.erb'),
+ owner => root,
+ group => root,
+ mode => '0600',
+ require => Package['postfix']
+ }
+}
diff --git a/puppet/modules/site_postfix/manifests/satellite.pp b/puppet/modules/site_postfix/manifests/satellite.pp
new file mode 100644
index 00000000..5725e6b8
--- /dev/null
+++ b/puppet/modules/site_postfix/manifests/satellite.pp
@@ -0,0 +1,47 @@
+class site_postfix::satellite {
+
+ $root_mail_recipient = hiera ('contacts')
+ $mail = hiera ('mail')
+ $relayhost = $mail['smarthost']
+ $cert_name = hiera('name')
+
+ class { '::postfix::satellite':
+ relayhost => $relayhost,
+ root_mail_recipient => $root_mail_recipient
+ }
+
+ # There are special conditions for satellite hosts that will make them not be
+ # able to contact their relayhost:
+ #
+ # 1. they are on openstack/amazon/PC and are on the same cluster as the relay
+ # host, the MX lookup for the relay host will use the public IP, which cannot
+ # be contacted
+ #
+ # 2. When a domain is used that is not in DNS, because it is internal,
+ # a testing domain, etc. eg. a .local domain cannot be looked up in DNS
+ #
+ # to resolve this, so the satellite can contact the relayhost, we need to set
+ # the http://www.postfix.org/postconf.5.html#smtp_host_lookup to be 'native'
+ # which will cause the lookup to use the native naming service
+ # (nsswitch.conf), which typically defaults to 'files, dns' allowing the
+ # /etc/hosts to be consulted first, then DNS if the entry doesn't exist.
+ #
+ # NOTE: this will make it not possible to enable DANE support through DNSSEC
+ # with http://www.postfix.org/postconf.5.html#smtp_dns_support_level - but
+ # this parameter is not available until 2.11. If this ends up being important
+ # we could also make this an optional parameter for providers without
+ # dns / local domains
+
+ postfix::config {
+ 'smtp_host_lookup':
+ value => 'native';
+
+ # Note: we are setting this here, instead of in site_postfix::mx::smtp_tls
+ # because the mx server has to have a different value
+ 'smtp_tls_security_level':
+ value => 'encrypt';
+ }
+
+ include site_postfix::mx::smtp_tls
+
+}
diff --git a/puppet/modules/site_postfix/templates/checks/helo_access.erb b/puppet/modules/site_postfix/templates/checks/helo_access.erb
new file mode 100644
index 00000000..bac2c45a
--- /dev/null
+++ b/puppet/modules/site_postfix/templates/checks/helo_access.erb
@@ -0,0 +1,21 @@
+# THIS FILE IS MANAGED BY PUPPET
+# To make changes to this file, please edit your platform directory under
+# puppet/modules/site_postfix/templates/checks/helo_access.erb and then deploy
+
+# The format of this file is the HELO/EHLO domain followed by an action.
+# The action could be OK to allow it, REJECT to reject it, or a custom
+# status code and message. Any lines that are prefixed by an octothorpe (#)
+# will be considered comments.
+
+# Some examples:
+#
+# Reject anyone that HELO's with foobar:
+# foobar REJECT
+#
+# Allow the switches to skip this check:
+# switch1 OK
+# switch2 OK
+
+# Reject anybody that HELO's as being in our own domain(s)
+# anyone who identifies themselves as us is a virus/spammer
+<%= @domain %> 554 You are not in domain <%= @domain %>
diff --git a/puppet/modules/site_postfix/templates/checks/rewrite_openpgp_headers.erb b/puppet/modules/site_postfix/templates/checks/rewrite_openpgp_headers.erb
new file mode 100644
index 00000000..7af14f7d
--- /dev/null
+++ b/puppet/modules/site_postfix/templates/checks/rewrite_openpgp_headers.erb
@@ -0,0 +1,13 @@
+# THIS FILE IS MANAGED BY PUPPET
+#
+# This will replace the OpenPGP header that the client adds, because it is
+# sometimes incorrect (due to the client not always knowing what the proper URL
+# is for the webapp).
+# e.g. This will rewrite this header:
+# OpenPGP: id=4C0E01CD50E2F653; url="https://leap.se/key/elijah"; preference="signencrypt
+# with this replacement:
+# OpenPGP: id=4C0E01CD50E2F653; url="https://user.leap.se/key/elijah"; preference="signencrypt
+#
+# Note: whitespace in the pattern is represented by [[:space:]] to avoid these warnings from postmap:
+# "record is in "key: value" format; is this an alias file?" and "duplicate entry"
+/^(OpenPGP:[[:space:]]id=[[:alnum:]]+;[[:space:]]url="https:\/\/)<%= @domain %>(\/key\/[[:alpha:]]+";.*)/i REPLACE ${1}<%= @correct_domain %>${2}
diff --git a/puppet/modules/site_postfix/templates/virtual-aliases.erb b/puppet/modules/site_postfix/templates/virtual-aliases.erb
new file mode 100644
index 00000000..8373de97
--- /dev/null
+++ b/puppet/modules/site_postfix/templates/virtual-aliases.erb
@@ -0,0 +1,21 @@
+#
+# This file is managed by puppet.
+#
+# These virtual aliases take precedence over all other aliases.
+#
+
+#
+# enable these virtual domains:
+#
+<%= @domain %> enabled
+<%- @aliases.keys.map {|addr| addr.split('@')[1] }.compact.sort.uniq.each do |virt_domain| -%>
+<%= virt_domain %> enabled
+<%- end %>
+
+#
+# virtual aliases:
+#
+<%- @aliases.keys.sort.each do |from| -%>
+<%- full_address = from =~ /@/ ? from : from + "@" + @domain -%>
+<%= full_address %> <%= [@aliases[from]].flatten.map{|a| a =~ /@/ ? a : a + "@" + @domain}.join(', ') %>
+<%- end -%>
diff --git a/puppet/modules/site_rsyslog/templates/client.conf.erb b/puppet/modules/site_rsyslog/templates/client.conf.erb
new file mode 100644
index 00000000..7f94759d
--- /dev/null
+++ b/puppet/modules/site_rsyslog/templates/client.conf.erb
@@ -0,0 +1,134 @@
+
+# An "In-Memory Queue" is created for remote logging.
+$WorkDirectory <%= scope.lookupvar('rsyslog::spool_dir') -%> # where to place spool files
+$ActionQueueFileName queue # unique name prefix for spool files
+$ActionQueueMaxDiskSpace <%= scope.lookupvar('rsyslog::client::spool_size') -%> # spool space limit (use as much as possible)
+$ActionQueueSaveOnShutdown on # save messages to disk on shutdown
+$ActionQueueType LinkedList # run asynchronously
+$ActionResumeRetryCount -1 # infinety retries if host is down
+<% if scope.lookupvar('rsyslog::client::log_templates') and ! scope.lookupvar('rsyslog::client::log_templates').empty?-%>
+
+# Define custom logging templates
+<% scope.lookupvar('rsyslog::client::log_templates').flatten.compact.each do |log_template| -%>
+$template <%= log_template['name'] %>,"<%= log_template['template'] %>"
+<% end -%>
+<% end -%>
+<% if scope.lookupvar('rsyslog::client::actionfiletemplate') -%>
+
+# Using specified format for default logging format:
+$ActionFileDefaultTemplate <%= scope.lookupvar('rsyslog::client::actionfiletemplate') %>
+<% else -%>
+
+#Using default format for default logging format:
+$ActionFileDefaultTemplate RSYSLOG_TraditionalFileFormat
+<% end -%>
+<% if scope.lookupvar('rsyslog::client::ssl') -%>
+
+# Setup SSL connection.
+# CA/Cert
+$DefaultNetStreamDriverCAFile <%= scope.lookupvar('rsyslog::client::ssl_ca') %>
+
+# Connection settings.
+$DefaultNetstreamDriver gtls
+$ActionSendStreamDriverMode 1
+$ActionSendStreamDriverAuthMode anon
+<% end -%>
+<% if scope.lookupvar('rsyslog::client::remote_servers') -%>
+
+<% scope.lookupvar('rsyslog::client::remote_servers').flatten.compact.each do |server| -%>
+<% if server['pattern'] and server['pattern'] != ''-%>
+<% pattern = server['pattern'] -%>
+<% else -%>
+<% pattern = '*.*' -%>
+<% end -%>
+<% if server['protocol'] == 'TCP' or server['protocol'] == 'tcp'-%>
+<% protocol = '@@' -%>
+<% protocol_type = 'TCP' -%>
+<% else -%>
+<% protocol = '@' -%>
+<% protocol_type = 'UDP' -%>
+<% end -%>
+<% if server['host'] and server['host'] != ''-%>
+<% host = server['host'] -%>
+<% else -%>
+<% host = 'localhost' -%>
+<% end -%>
+<% if server['port'] and server['port'] != ''-%>
+<% port = server['port'] -%>
+<% else -%>
+<% port = '514' -%>
+<% end -%>
+<% if server['format'] -%>
+<% format = ";#{server['format']}" -%>
+<% format_type = server['format'] -%>
+<% else -%>
+<% format = '' -%>
+<% format_type = 'the default' -%>
+<% end -%>
+# Sending logs that match <%= pattern %> to <%= host %> via <%= protocol_type %> on <%= port %> using <%=format_type %> format.
+<%= pattern %> <%= protocol %><%= host %>:<%= port %><%= format %>
+<% end -%>
+<% elsif scope.lookupvar('rsyslog::client::log_remote') -%>
+
+# Log to remote syslog server using <%= scope.lookupvar('rsyslog::client::remote_type') %>
+<% if scope.lookupvar('rsyslog::client::remote_type') == 'tcp' -%>
+*.* @@<%= scope.lookupvar('rsyslog::client::server') -%>:<%= scope.lookupvar('rsyslog::client::port') -%>;<%= scope.lookupvar('remote_forward_format') -%>
+<% else -%>
+*.* @<%= scope.lookupvar('rsyslog::client::server') -%>:<%= scope.lookupvar('rsyslog::client::port') -%>;<%= scope.lookupvar('remote_forward_format') -%>
+<% end -%>
+<% end -%>
+<% if scope.lookupvar('rsyslog::client::log_auth_local') or scope.lookupvar('rsyslog::client::log_local') -%>
+
+# Logging locally.
+
+<% if scope.lookupvar('rsyslog::log_style') == 'debian' -%>
+# Log auth messages locally
+.*;auth,authpriv.none;mail.none -/var/log/syslog
+<% elsif scope.lookupvar('rsyslog::log_style') == 'redhat' -%>
+# Log auth messages locally
+auth,authpriv.* /var/log/secure
+<% end -%>
+<% end -%>
+<% if scope.lookupvar('rsyslog::client::log_local') -%>
+<% if scope.lookupvar('rsyslog::log_style') == 'debian' -%>
+# First some standard log files. Log by facility.
+#
+*.*;auth,authpriv.none -/var/log/syslog
+cron.* /var/log/cron.log
+daemon.* -/var/log/daemon.log
+kern.* -/var/log/kern.log
+mail.* -/var/log/mail.log
+user.* -/var/log/user.log
+
+#
+# Some "catch-all" log files.
+#
+*.=debug;\
+ auth,authpriv.none;\
+ news.none;mail.none -/var/log/debug
+*.=info;*.=notice;*.=warn;\
+ auth,authpriv.none;\
+ cron,daemon.none;\
+ mail,news.none -/var/log/messages
+
+# Log anything (except mail) of level info or higher.
+# Don't log private authentication messages!
+*.info;mail.none;authpriv.none;cron.none /var/log/messages
+
+# Log cron stuff
+cron.* /var/log/cron
+
+# Everybody gets emergency messages
+<% if @rsyslog_version and @rsyslog_version.split('.')[0].to_i >= 8 -%>
+*.emerg :omusrmsg:*
+<% else -%>
+*.emerg *
+<% end -%>
+
+# Save boot messages also to boot.log
+local7.* -/var/log/boot.log
+<% end -%>
+<% end -%>
+
+
+
diff --git a/puppet/modules/site_shorewall/files/Debian/shorewall.service b/puppet/modules/site_shorewall/files/Debian/shorewall.service
new file mode 100644
index 00000000..ec250ef1
--- /dev/null
+++ b/puppet/modules/site_shorewall/files/Debian/shorewall.service
@@ -0,0 +1,23 @@
+#
+# The Shoreline Firewall (Shorewall) Packet Filtering Firewall
+#
+# Copyright 2011 Jonathan Underwood <jonathan.underwood@gmail.com>
+# Copyright 2015 Tom Eastep <teastep@shorewall.net>
+#
+[Unit]
+Description=Shorewall IPv4 firewall
+Wants=network-online.target
+After=network-online.target
+Conflicts=iptables.service firewalld.service
+
+[Service]
+Type=oneshot
+RemainAfterExit=yes
+EnvironmentFile=-/etc/default/shorewall
+StandardOutput=syslog
+ExecStart=/sbin/shorewall $OPTIONS start $STARTOPTIONS
+ExecStop=/sbin/shorewall $OPTIONS stop
+ExecReload=/sbin/shorewall $OPTIONS reload $RELOADOPTIONS
+
+[Install]
+WantedBy=basic.target
diff --git a/puppet/modules/site_shorewall/manifests/defaults.pp b/puppet/modules/site_shorewall/manifests/defaults.pp
new file mode 100644
index 00000000..ceb17868
--- /dev/null
+++ b/puppet/modules/site_shorewall/manifests/defaults.pp
@@ -0,0 +1,86 @@
+class site_shorewall::defaults {
+
+ include shorewall
+ include site_config::params
+
+ # be safe for development
+ # if ( $::site_config::params::environment == 'local' ) {
+ # $shorewall_startup='0'
+ # }
+
+ # If you want logging:
+ shorewall::params {
+ 'LOG': value => 'debug';
+ }
+
+ shorewall::zone {'net': type => 'ipv4'; }
+
+ # define interfaces
+ shorewall::interface { $site_config::params::interface:
+ zone => 'net',
+ options => 'tcpflags,blacklist,nosmurfs';
+ }
+
+ shorewall::policy {
+ 'fw-to-all':
+ sourcezone => 'fw',
+ destinationzone => 'all',
+ policy => 'ACCEPT',
+ order => 100;
+ 'all-to-all':
+ sourcezone => 'all',
+ destinationzone => 'all',
+ policy => 'DROP',
+ order => 200;
+ }
+
+ shorewall::rule {
+ # ping party
+ 'all2all-ping':
+ source => 'all',
+ destination => 'all',
+ action => 'Ping(ACCEPT)',
+ order => 200;
+ }
+
+ package { 'shorewall-init':
+ ensure => installed
+ }
+
+ include ::systemd
+ file { '/etc/systemd/system/shorewall.service':
+ ensure => file,
+ owner => 'root',
+ group => 'root',
+ mode => '0644',
+ source => 'puppet:///modules/site_shorewall/Debian/shorewall.service',
+ require => Package['shorewall'],
+ notify => Service['shorewall'],
+ } ~>
+ Exec['systemctl-daemon-reload']
+
+ augeas {
+ # stop instead of clear firewall on shutdown
+ 'shorewall_SAFESTOP':
+ changes => 'set /files/etc/shorewall/shorewall.conf/SAFESTOP Yes',
+ lens => 'Shellvars.lns',
+ incl => '/etc/shorewall/shorewall.conf',
+ require => Package['shorewall'],
+ notify => Service['shorewall'];
+ # require that the interface exist
+ 'shorewall_REQUIRE_INTERFACE':
+ changes => 'set /files/etc/shorewall/shorewall.conf/REQUIRE_INTERFACE Yes',
+ lens => 'Shellvars.lns',
+ incl => '/etc/shorewall/shorewall.conf',
+ require => Package['shorewall'],
+ notify => Service['shorewall'];
+ # configure shorewall-init
+ 'shorewall-init':
+ changes => 'set /files/etc/default/shorewall-init/PRODUCTS shorewall',
+ lens => 'Shellvars.lns',
+ incl => '/etc/default/shorewall-init',
+ require => [ Package['shorewall-init'], Service['shorewall'] ]
+ }
+
+ include site_shorewall::sshd
+}
diff --git a/puppet/modules/site_shorewall/manifests/dnat.pp b/puppet/modules/site_shorewall/manifests/dnat.pp
new file mode 100644
index 00000000..a73294cc
--- /dev/null
+++ b/puppet/modules/site_shorewall/manifests/dnat.pp
@@ -0,0 +1,19 @@
+define site_shorewall::dnat (
+ $source,
+ $destination,
+ $proto,
+ $destinationport,
+ $originaldest ) {
+
+
+ shorewall::rule {
+ "dnat_${name}_${destinationport}":
+ action => 'DNAT',
+ source => $source,
+ destination => $destination,
+ proto => $proto,
+ destinationport => $destinationport,
+ originaldest => $originaldest,
+ order => 200
+ }
+}
diff --git a/puppet/modules/site_shorewall/manifests/dnat_rule.pp b/puppet/modules/site_shorewall/manifests/dnat_rule.pp
new file mode 100644
index 00000000..f9fbe950
--- /dev/null
+++ b/puppet/modules/site_shorewall/manifests/dnat_rule.pp
@@ -0,0 +1,50 @@
+define site_shorewall::dnat_rule {
+
+ $port = $name
+ if $port != 1194 {
+ if $site_openvpn::openvpn_allow_unlimited {
+ shorewall::rule {
+ "dnat_tcp_port_${port}":
+ action => 'DNAT',
+ source => 'net',
+ destination => "\$FW:${site_openvpn::unlimited_gateway_address}:1194",
+ proto => 'tcp',
+ destinationport => $port,
+ originaldest => $site_openvpn::unlimited_gateway_address,
+ order => 100;
+ }
+ shorewall::rule {
+ "dnat_udp_port_${port}":
+ action => 'DNAT',
+ source => 'net',
+ destination => "\$FW:${site_openvpn::unlimited_gateway_address}:1194",
+ proto => 'udp',
+ destinationport => $port,
+ originaldest => $site_openvpn::unlimited_gateway_address,
+ order => 100;
+ }
+ }
+ if $site_openvpn::openvpn_allow_limited {
+ shorewall::rule {
+ "dnat_free_tcp_port_${port}":
+ action => 'DNAT',
+ source => 'net',
+ destination => "\$FW:${site_openvpn::limited_gateway_address}:1194",
+ proto => 'tcp',
+ destinationport => $port,
+ originaldest => $site_openvpn::unlimited_gateway_address,
+ order => 100;
+ }
+ shorewall::rule {
+ "dnat_free_udp_port_${port}":
+ action => 'DNAT',
+ source => 'net',
+ destination => "\$FW:${site_openvpn::limited_gateway_address}:1194",
+ proto => 'udp',
+ destinationport => $port,
+ originaldest => $site_openvpn::unlimited_gateway_address,
+ order => 100;
+ }
+ }
+ }
+}
diff --git a/puppet/modules/site_shorewall/manifests/eip.pp b/puppet/modules/site_shorewall/manifests/eip.pp
new file mode 100644
index 00000000..8fbba658
--- /dev/null
+++ b/puppet/modules/site_shorewall/manifests/eip.pp
@@ -0,0 +1,92 @@
+class site_shorewall::eip {
+
+ include site_shorewall::defaults
+ include site_config::params
+ include site_shorewall::ip_forward
+
+ # define macro for incoming services
+ file { '/etc/shorewall/macro.leap_eip':
+ content => "PARAM - - tcp 1194
+ PARAM - - udp 1194
+ ",
+ notify => Service['shorewall'],
+ require => Package['shorewall']
+ }
+
+ shorewall::interface {
+ 'tun0':
+ zone => 'eip',
+ options => 'tcpflags,blacklist,nosmurfs';
+ 'tun1':
+ zone => 'eip',
+ options => 'tcpflags,blacklist,nosmurfs';
+ 'tun2':
+ zone => 'eip',
+ options => 'tcpflags,blacklist,nosmurfs';
+ 'tun3':
+ zone => 'eip',
+ options => 'tcpflags,blacklist,nosmurfs';
+ }
+
+ shorewall::zone {
+ 'eip':
+ type => 'ipv4';
+ }
+
+ $interface = $site_config::params::interface
+
+ shorewall::masq {
+ "${interface}_unlimited_tcp":
+ interface => $interface,
+ source => "${site_openvpn::openvpn_unlimited_tcp_network_prefix}.0/${site_openvpn::openvpn_unlimited_tcp_cidr}";
+ "${interface}_unlimited_udp":
+ interface => $interface,
+ source => "${site_openvpn::openvpn_unlimited_udp_network_prefix}.0/${site_openvpn::openvpn_unlimited_udp_cidr}";
+ }
+ if ! $::ec2_instance_id {
+ shorewall::masq {
+ "${interface}_limited_tcp":
+ interface => $interface,
+ source => "${site_openvpn::openvpn_limited_tcp_network_prefix}.0/${site_openvpn::openvpn_limited_tcp_cidr}";
+ "${interface}_limited_udp":
+ interface => $interface,
+ source => "${site_openvpn::openvpn_limited_udp_network_prefix}.0/${site_openvpn::openvpn_limited_udp_cidr}";
+ }
+ }
+
+ shorewall::policy {
+ 'eip-to-all':
+ sourcezone => 'eip',
+ destinationzone => 'all',
+ policy => 'ACCEPT',
+ order => 100;
+ }
+
+ shorewall::rule {
+ 'net2fw-openvpn':
+ source => 'net',
+ destination => '$FW',
+ action => 'leap_eip(ACCEPT)',
+ order => 200;
+
+ 'block_eip_dns_udp':
+ action => 'REJECT',
+ source => 'eip',
+ destination => 'net',
+ proto => 'udp',
+ destinationport => 'domain',
+ order => 300;
+
+ 'block_eip_dns_tcp':
+ action => 'REJECT',
+ source => 'eip',
+ destination => 'net',
+ proto => 'tcp',
+ destinationport => 'domain',
+ order => 301;
+ }
+
+ # create dnat rule for each port
+ site_shorewall::dnat_rule { $site_openvpn::openvpn_ports: }
+
+}
diff --git a/puppet/modules/site_shorewall/manifests/ip_forward.pp b/puppet/modules/site_shorewall/manifests/ip_forward.pp
new file mode 100644
index 00000000..d53ee8a5
--- /dev/null
+++ b/puppet/modules/site_shorewall/manifests/ip_forward.pp
@@ -0,0 +1,10 @@
+class site_shorewall::ip_forward {
+ include augeas
+ augeas { 'enable_ip_forwarding':
+ changes => 'set /files/etc/shorewall/shorewall.conf/IP_FORWARDING Yes',
+ lens => 'Shellvars.lns',
+ incl => '/etc/shorewall/shorewall.conf',
+ notify => Service[shorewall],
+ require => [ Class[augeas], Package[shorewall] ];
+ }
+}
diff --git a/puppet/modules/site_shorewall/manifests/monitor.pp b/puppet/modules/site_shorewall/manifests/monitor.pp
new file mode 100644
index 00000000..f4ed4f7c
--- /dev/null
+++ b/puppet/modules/site_shorewall/manifests/monitor.pp
@@ -0,0 +1,8 @@
+class site_shorewall::monitor {
+
+ include site_shorewall::defaults
+ include site_shorewall::service::http
+ include site_shorewall::service::https
+
+
+}
diff --git a/puppet/modules/site_shorewall/manifests/mx.pp b/puppet/modules/site_shorewall/manifests/mx.pp
new file mode 100644
index 00000000..332f164e
--- /dev/null
+++ b/puppet/modules/site_shorewall/manifests/mx.pp
@@ -0,0 +1,24 @@
+class site_shorewall::mx {
+
+ include site_shorewall::defaults
+
+ $smtpd_ports = '25,465,587'
+
+ # define macro for incoming services
+ file { '/etc/shorewall/macro.leap_mx':
+ content => "PARAM - - tcp ${smtpd_ports} ",
+ notify => Service['shorewall'],
+ require => Package['shorewall']
+ }
+
+
+ shorewall::rule {
+ 'net2fw-mx':
+ source => 'net',
+ destination => '$FW',
+ action => 'leap_mx(ACCEPT)',
+ order => 200;
+ }
+
+ include site_shorewall::service::smtp
+}
diff --git a/puppet/modules/site_shorewall/manifests/obfsproxy.pp b/puppet/modules/site_shorewall/manifests/obfsproxy.pp
new file mode 100644
index 00000000..75846705
--- /dev/null
+++ b/puppet/modules/site_shorewall/manifests/obfsproxy.pp
@@ -0,0 +1,25 @@
+# configure shorewell for obfsproxy
+class site_shorewall::obfsproxy {
+
+ include site_shorewall::defaults
+
+ $obfsproxy = hiera('obfsproxy')
+ $scramblesuit = $obfsproxy['scramblesuit']
+ $scram_port = $scramblesuit['port']
+
+ # define macro for incoming services
+ file { '/etc/shorewall/macro.leap_obfsproxy':
+ content => "PARAM - - tcp ${scram_port} ",
+ notify => Service['shorewall'],
+ require => Package['shorewall']
+ }
+
+ shorewall::rule {
+ 'net2fw-obfs':
+ source => 'net',
+ destination => '$FW',
+ action => 'leap_obfsproxy(ACCEPT)',
+ order => 200;
+ }
+
+}
diff --git a/puppet/modules/site_shorewall/manifests/service/http.pp b/puppet/modules/site_shorewall/manifests/service/http.pp
new file mode 100644
index 00000000..74b874d5
--- /dev/null
+++ b/puppet/modules/site_shorewall/manifests/service/http.pp
@@ -0,0 +1,13 @@
+class site_shorewall::service::http {
+
+ include site_shorewall::defaults
+
+ shorewall::rule {
+ 'net2fw-http':
+ source => 'net',
+ destination => '$FW',
+ action => 'HTTP(ACCEPT)',
+ order => 200;
+ }
+
+}
diff --git a/puppet/modules/site_shorewall/manifests/service/https.pp b/puppet/modules/site_shorewall/manifests/service/https.pp
new file mode 100644
index 00000000..4a8b119c
--- /dev/null
+++ b/puppet/modules/site_shorewall/manifests/service/https.pp
@@ -0,0 +1,12 @@
+class site_shorewall::service::https {
+
+ include site_shorewall::defaults
+
+ shorewall::rule {
+ 'net2fw-https':
+ source => 'net',
+ destination => '$FW',
+ action => 'HTTPS(ACCEPT)',
+ order => 200;
+ }
+}
diff --git a/puppet/modules/site_shorewall/manifests/service/smtp.pp b/puppet/modules/site_shorewall/manifests/service/smtp.pp
new file mode 100644
index 00000000..7fbdf14e
--- /dev/null
+++ b/puppet/modules/site_shorewall/manifests/service/smtp.pp
@@ -0,0 +1,13 @@
+class site_shorewall::service::smtp {
+
+ include site_shorewall::defaults
+
+ shorewall::rule {
+ 'fw2net-http':
+ source => '$FW',
+ destination => 'net',
+ action => 'SMTP(ACCEPT)',
+ order => 200;
+ }
+
+}
diff --git a/puppet/modules/site_shorewall/manifests/service/webapp_api.pp b/puppet/modules/site_shorewall/manifests/service/webapp_api.pp
new file mode 100644
index 00000000..d3a1aeed
--- /dev/null
+++ b/puppet/modules/site_shorewall/manifests/service/webapp_api.pp
@@ -0,0 +1,23 @@
+# configure shorewall for webapp api
+class site_shorewall::service::webapp_api {
+
+ $api = hiera('api')
+ $api_port = $api['port']
+
+ # define macro for incoming services
+ file { '/etc/shorewall/macro.leap_webapp_api':
+ content => "PARAM - - tcp ${api_port} ",
+ notify => Service['shorewall'],
+ require => Package['shorewall']
+ }
+
+
+ shorewall::rule {
+ 'net2fw-webapp_api':
+ source => 'net',
+ destination => '$FW',
+ action => 'leap_webapp_api(ACCEPT)',
+ order => 200;
+ }
+
+}
diff --git a/puppet/modules/site_shorewall/manifests/soledad.pp b/puppet/modules/site_shorewall/manifests/soledad.pp
new file mode 100644
index 00000000..518d8689
--- /dev/null
+++ b/puppet/modules/site_shorewall/manifests/soledad.pp
@@ -0,0 +1,23 @@
+class site_shorewall::soledad {
+
+ $soledad = hiera('soledad')
+ $soledad_port = $soledad['port']
+
+ include site_shorewall::defaults
+
+ # define macro for incoming services
+ file { '/etc/shorewall/macro.leap_soledad':
+ content => "PARAM - - tcp ${soledad_port}",
+ notify => Service['shorewall'],
+ require => Package['shorewall']
+ }
+
+ shorewall::rule {
+ 'net2fw-soledad':
+ source => 'net',
+ destination => '$FW',
+ action => 'leap_soledad(ACCEPT)',
+ order => 200;
+ }
+}
+
diff --git a/puppet/modules/site_shorewall/manifests/sshd.pp b/puppet/modules/site_shorewall/manifests/sshd.pp
new file mode 100644
index 00000000..e2332592
--- /dev/null
+++ b/puppet/modules/site_shorewall/manifests/sshd.pp
@@ -0,0 +1,31 @@
+# configure shorewall for sshd
+class site_shorewall::sshd {
+
+ $ssh_config = hiera('ssh')
+ $ssh_port = $ssh_config['port']
+
+ include shorewall
+
+ # define macro for incoming sshd
+ file { '/etc/shorewall/macro.leap_sshd':
+ content => "PARAM - - tcp ${ssh_port}",
+ notify => Service['shorewall'],
+ require => Package['shorewall']
+ }
+
+
+ shorewall::rule {
+ # outside to server
+ 'net2fw-ssh':
+ source => 'net',
+ destination => '$FW',
+ action => 'leap_sshd(ACCEPT)',
+ order => 200;
+ }
+
+ # setup a routestopped rule to allow ssh when shorewall is stopped
+ shorewall::routestopped { $site_config::params::interface:
+ options => "- tcp ${ssh_port}"
+ }
+
+}
diff --git a/puppet/modules/site_shorewall/manifests/stunnel/client.pp b/puppet/modules/site_shorewall/manifests/stunnel/client.pp
new file mode 100644
index 00000000..9a89a244
--- /dev/null
+++ b/puppet/modules/site_shorewall/manifests/stunnel/client.pp
@@ -0,0 +1,40 @@
+#
+# Adds some firewall magic to the stunnel.
+#
+# Using DNAT, this firewall rule allow a locally running program
+# to try to connect to the normal remote IP and remote port of the
+# service on another machine, but have this connection magically
+# routed through the locally running stunnel client.
+#
+# The network looks like this:
+#
+# From the client's perspective:
+#
+# |------- stunnel client --------------| |---------- stunnel server -----------------------|
+# consumer app -> localhost:accept_port -> connect:connect_port -> localhost:original_port
+#
+# From the server's perspective:
+#
+# |------- stunnel client --------------| |---------- stunnel server -----------------------|
+# ?? -> *:accept_port -> localhost:connect_port -> service
+#
+
+define site_shorewall::stunnel::client(
+ $accept_port,
+ $connect,
+ $connect_port,
+ $original_port) {
+
+ include site_shorewall::defaults
+
+ shorewall::rule {
+ "stunnel_dnat_${name}":
+ action => 'DNAT',
+ source => '$FW',
+ destination => "\$FW:127.0.0.1:${accept_port}",
+ proto => 'tcp',
+ destinationport => $original_port,
+ originaldest => $connect,
+ order => 200
+ }
+}
diff --git a/puppet/modules/site_shorewall/manifests/stunnel/server.pp b/puppet/modules/site_shorewall/manifests/stunnel/server.pp
new file mode 100644
index 00000000..798cd631
--- /dev/null
+++ b/puppet/modules/site_shorewall/manifests/stunnel/server.pp
@@ -0,0 +1,22 @@
+#
+# Allow all incoming connections to stunnel server port
+#
+
+define site_shorewall::stunnel::server($port) {
+
+ include site_shorewall::defaults
+
+ file { "/etc/shorewall/macro.stunnel_server_${name}":
+ content => "PARAM - - tcp ${port}",
+ notify => Service['shorewall'],
+ require => Package['shorewall']
+ }
+ shorewall::rule {
+ "net2fw-stunnel-server-${name}":
+ source => 'net',
+ destination => '$FW',
+ action => "stunnel_server_${name}(ACCEPT)",
+ order => 200;
+ }
+
+} \ No newline at end of file
diff --git a/puppet/modules/site_shorewall/manifests/tor.pp b/puppet/modules/site_shorewall/manifests/tor.pp
new file mode 100644
index 00000000..324b4844
--- /dev/null
+++ b/puppet/modules/site_shorewall/manifests/tor.pp
@@ -0,0 +1,26 @@
+# configure shorewall for tor
+class site_shorewall::tor {
+
+ include site_shorewall::defaults
+ include site_shorewall::ip_forward
+
+ $tor_port = '9001'
+
+ # define macro for incoming services
+ file { '/etc/shorewall/macro.leap_tor':
+ content => "PARAM - - tcp ${tor_port} ",
+ notify => Service['shorewall'],
+ require => Package['shorewall']
+ }
+
+
+ shorewall::rule {
+ 'net2fw-tor':
+ source => 'net',
+ destination => '$FW',
+ action => 'leap_tor(ACCEPT)',
+ order => 200;
+ }
+
+ include site_shorewall::service::http
+}
diff --git a/puppet/modules/site_shorewall/manifests/webapp.pp b/puppet/modules/site_shorewall/manifests/webapp.pp
new file mode 100644
index 00000000..a8d2aa5b
--- /dev/null
+++ b/puppet/modules/site_shorewall/manifests/webapp.pp
@@ -0,0 +1,7 @@
+class site_shorewall::webapp {
+
+ include site_shorewall::defaults
+ include site_shorewall::service::https
+ include site_shorewall::service::http
+ include site_shorewall::service::webapp_api
+}
diff --git a/puppet/modules/site_squid_deb_proxy/manifests/client.pp b/puppet/modules/site_squid_deb_proxy/manifests/client.pp
new file mode 100644
index 00000000..27844270
--- /dev/null
+++ b/puppet/modules/site_squid_deb_proxy/manifests/client.pp
@@ -0,0 +1,5 @@
+class site_squid_deb_proxy::client {
+ include squid_deb_proxy::client
+ include site_shorewall::defaults
+ include shorewall::rules::mdns
+}
diff --git a/puppet/modules/site_sshd/manifests/authorized_keys.pp b/puppet/modules/site_sshd/manifests/authorized_keys.pp
new file mode 100644
index 00000000..a1fde3f6
--- /dev/null
+++ b/puppet/modules/site_sshd/manifests/authorized_keys.pp
@@ -0,0 +1,34 @@
+# We want to purge unmanaged keys from the authorized_keys file so that only
+# keys added in the provider are valid. Any manually added keys will be
+# overridden.
+#
+# In order to do this, we have to use a custom define to deploy the
+# authorized_keys file because puppet's internal resource doesn't allow
+# purging before populating this file.
+#
+# See the following for more information:
+# https://tickets.puppetlabs.com/browse/PUP-1174
+# https://leap.se/code/issues/2990
+# https://leap.se/code/issues/3010
+#
+define site_sshd::authorized_keys ($keys, $ensure = 'present', $home = '') {
+ # This line allows default homedir based on $title variable.
+ # If $home is empty, the default is used.
+ $homedir = $home ? {'' => "/home/${title}", default => $home}
+ $owner = $ensure ? {'present' => $title, default => undef }
+ $group = $ensure ? {'present' => $title, default => undef }
+ file {
+ "${homedir}/.ssh":
+ ensure => 'directory',
+ owner => $title,
+ group => $title,
+ mode => '0700';
+ "${homedir}/.ssh/authorized_keys":
+ ensure => $ensure,
+ owner => $owner,
+ group => $group,
+ mode => '0600',
+ require => File["${homedir}/.ssh"],
+ content => template('site_sshd/authorized_keys.erb');
+ }
+}
diff --git a/puppet/modules/site_sshd/manifests/deploy_authorized_keys.pp b/puppet/modules/site_sshd/manifests/deploy_authorized_keys.pp
new file mode 100644
index 00000000..97ca058f
--- /dev/null
+++ b/puppet/modules/site_sshd/manifests/deploy_authorized_keys.pp
@@ -0,0 +1,9 @@
+class site_sshd::deploy_authorized_keys ( $keys ) {
+ tag 'leap_authorized_keys'
+
+ site_sshd::authorized_keys {'root':
+ keys => $keys,
+ home => '/root'
+ }
+
+}
diff --git a/puppet/modules/site_sshd/manifests/init.pp b/puppet/modules/site_sshd/manifests/init.pp
new file mode 100644
index 00000000..a9202da4
--- /dev/null
+++ b/puppet/modules/site_sshd/manifests/init.pp
@@ -0,0 +1,82 @@
+# configures sshd, mosh, authorized keys and known hosts
+class site_sshd {
+ $ssh = hiera_hash('ssh')
+ $ssh_config = $ssh['config']
+ $hosts = hiera('hosts', '')
+
+ ##
+ ## SETUP AUTHORIZED KEYS
+ ##
+
+ $authorized_keys = $ssh['authorized_keys']
+
+ class { 'site_sshd::deploy_authorized_keys':
+ keys => $authorized_keys
+ }
+
+ ##
+ ## SETUP KNOWN HOSTS and SSH_CONFIG
+ ##
+
+ file {
+ '/etc/ssh/ssh_known_hosts':
+ owner => root,
+ group => root,
+ mode => '0644',
+ content => template('site_sshd/ssh_known_hosts.erb');
+
+ '/etc/ssh/ssh_config':
+ owner => root,
+ group => root,
+ mode => '0644',
+ content => template('site_sshd/ssh_config.erb');
+ }
+
+ ##
+ ## OPTIONAL MOSH SUPPORT
+ ##
+
+ $mosh = $ssh['mosh']
+
+ if $mosh['enabled'] {
+ class { 'site_sshd::mosh':
+ ensure => present,
+ ports => $mosh['ports']
+ }
+ }
+ else {
+ class { 'site_sshd::mosh':
+ ensure => absent
+ }
+ }
+
+ # we cannot use the 'hardened' parameter because leap_cli uses an
+ # old net-ssh gem that is incompatible with the included
+ # "KexAlgorithms curve25519-sha256@libssh.org",
+ # see https://leap.se/code/issues/7591
+ # therefore we don't use it here, but include all other options
+ # that would be applied by the 'hardened' parameter
+ # not all options are available on wheezy
+ if ( $::lsbdistcodename == 'wheezy' ) {
+ $tail_additional_options = 'Ciphers aes256-ctr
+MACs hmac-sha2-512,hmac-sha2-256,hmac-ripemd160'
+ } else {
+ $tail_additional_options = 'Ciphers chacha20-poly1305@openssh.com,aes256-gcm@openssh.com,aes256-ctr
+MACs hmac-sha2-512,hmac-sha2-256,hmac-ripemd160'
+ }
+
+ ##
+ ## SSHD SERVER CONFIGURATION
+ ##
+ class { '::sshd':
+ manage_nagios => false,
+ ports => [ $ssh['port'] ],
+ use_pam => 'yes',
+ print_motd => 'no',
+ tcp_forwarding => $ssh_config['AllowTcpForwarding'],
+ manage_client => false,
+ use_storedconfigs => false,
+ tail_additional_options => $tail_additional_options,
+ hostkey_type => [ 'rsa', 'dsa', 'ecdsa' ]
+ }
+}
diff --git a/puppet/modules/site_sshd/manifests/mosh.pp b/puppet/modules/site_sshd/manifests/mosh.pp
new file mode 100644
index 00000000..49f56ca0
--- /dev/null
+++ b/puppet/modules/site_sshd/manifests/mosh.pp
@@ -0,0 +1,21 @@
+class site_sshd::mosh ( $ensure = present, $ports = '60000-61000' ) {
+
+ package { 'mosh':
+ ensure => $ensure
+ }
+
+ file { '/etc/shorewall/macro.mosh':
+ ensure => $ensure,
+ content => "PARAM - - udp ${ports}",
+ notify => Service['shorewall'],
+ require => Package['shorewall'];
+ }
+
+ shorewall::rule { 'net2fw-mosh':
+ ensure => $ensure,
+ source => 'net',
+ destination => '$FW',
+ action => 'mosh(ACCEPT)',
+ order => 200;
+ }
+}
diff --git a/puppet/modules/site_sshd/templates/authorized_keys.erb b/puppet/modules/site_sshd/templates/authorized_keys.erb
new file mode 100644
index 00000000..51bdc5b3
--- /dev/null
+++ b/puppet/modules/site_sshd/templates/authorized_keys.erb
@@ -0,0 +1,10 @@
+# NOTICE: This file is autogenerated by Puppet
+# all manually added keys will be overridden
+
+<% @keys.sort.each do |user, hash| -%>
+<% if user == 'monitor' -%>
+command="/usr/bin/check_mk_agent",no-port-forwarding,no-x11-forwarding,no-agent-forwarding,no-pty,no-user-rc, <%=hash['type']-%> <%=hash['key']%> <%=user%>
+<% else -%>
+<%=hash['type']-%> <%=hash['key']%> <%=user%>
+<% end -%>
+<% end -%>
diff --git a/puppet/modules/site_sshd/templates/ssh_config.erb b/puppet/modules/site_sshd/templates/ssh_config.erb
new file mode 100644
index 00000000..36c0b6d5
--- /dev/null
+++ b/puppet/modules/site_sshd/templates/ssh_config.erb
@@ -0,0 +1,40 @@
+# This file is generated by Puppet
+# This is the ssh client system-wide configuration file. See
+# ssh_config(5) for more information. This file provides defaults for
+# users, and the values can be changed in per-user configuration files
+# or on the command line.
+
+Host *
+ SendEnv LANG LC_*
+ HashKnownHosts yes
+ GSSAPIAuthentication yes
+ GSSAPIDelegateCredentials no
+<% if scope.lookupvar('::site_config::params::environment') == 'local' -%>
+ #
+ # Vagrant nodes should have strict host key checking
+ # turned off. The problem is that the host key for a vagrant
+ # node is specific to the particular instance of the vagrant
+ # node you have running locally. For this reason, we can't
+ # track the host keys, or your host key for vpn1 would conflict
+ # with my host key for vpn1.
+ #
+ StrictHostKeyChecking no
+<% end -%>
+
+#
+# Tell SSH what host key algorithm we should use. I don't understand why this
+# is needed, since the man page says that "if hostkeys are known for the
+# destination host then [HostKeyAlgorithms default] is modified to prefer
+# their algorithms."
+#
+
+<% @hosts.sort.each do |name, host| -%>
+Host <%= name %> <%= host['domain_full'] %> <%= host['domain_internal'] %> <%= host['ip_address'] %>
+<% if host['host_pub_key'] -%>
+HostKeyAlgorithms <%= host['host_pub_key'].split(" ").first %>
+<% end -%>
+<% if host['port'] -%>
+Port <%= host['port'] %>
+<% end -%>
+
+<% end -%>
diff --git a/puppet/modules/site_sshd/templates/ssh_known_hosts.erb b/puppet/modules/site_sshd/templates/ssh_known_hosts.erb
new file mode 100644
index 00000000..002ab732
--- /dev/null
+++ b/puppet/modules/site_sshd/templates/ssh_known_hosts.erb
@@ -0,0 +1,7 @@
+# This file is generated by Puppet
+
+<% @hosts.sort.each do |name, hash| -%>
+<% if hash['host_pub_key'] -%>
+<%= name%>,<%=hash['domain_full']%>,<%=hash['domain_internal']%>,<%=hash['ip_address']%> <%=hash['host_pub_key']%>
+<% end -%>
+<% end -%>
diff --git a/puppet/modules/site_static/README b/puppet/modules/site_static/README
new file mode 100644
index 00000000..bc719782
--- /dev/null
+++ b/puppet/modules/site_static/README
@@ -0,0 +1,3 @@
+Deploy one or more static websites to a node.
+
+For now, it only supports `amber` based static sites. Should support plain html and jekyll in the future.
diff --git a/puppet/modules/site_static/manifests/domain.pp b/puppet/modules/site_static/manifests/domain.pp
new file mode 100644
index 00000000..b26cc9e3
--- /dev/null
+++ b/puppet/modules/site_static/manifests/domain.pp
@@ -0,0 +1,33 @@
+# configure static service for domain
+define site_static::domain (
+ $ca_cert,
+ $key,
+ $cert,
+ $tls_only=true,
+ $locations=undef,
+ $aliases=undef,
+ $apache_config=undef) {
+
+ $domain = $name
+ $base_dir = '/srv/static'
+
+ $cafile = "${cert}\n${ca_cert}"
+
+ if is_hash($locations) {
+ create_resources(site_static::location, $locations)
+ }
+
+ x509::cert { $domain:
+ content => $cafile,
+ notify => Service[apache]
+ }
+ x509::key { $domain:
+ content => $key,
+ notify => Service[apache]
+ }
+
+ apache::vhost::file { $domain:
+ content => template('site_static/apache.conf.erb')
+ }
+
+}
diff --git a/puppet/modules/site_static/manifests/init.pp b/puppet/modules/site_static/manifests/init.pp
new file mode 100644
index 00000000..4a722d62
--- /dev/null
+++ b/puppet/modules/site_static/manifests/init.pp
@@ -0,0 +1,72 @@
+# deploy static service
+class site_static {
+ tag 'leap_service'
+
+ include site_config::default
+ include site_config::x509::cert
+ include site_config::x509::key
+ include site_config::x509::ca_bundle
+
+ $static = hiera('static')
+ $domains = $static['domains']
+ $formats = $static['formats']
+ $bootstrap = $static['bootstrap_files']
+ $tor = hiera('tor', false)
+
+ if $bootstrap['enabled'] {
+ $bootstrap_domain = $bootstrap['domain']
+ $bootstrap_client = $bootstrap['client_version']
+ file { '/srv/leap/provider.json':
+ content => $bootstrap['provider_json'],
+ owner => 'www-data',
+ group => 'www-data',
+ mode => '0444';
+ }
+ # It is important to always touch provider.json: the client needs to check x-min-client-version header,
+ # but this is only sent when the file has been modified (otherwise 304 is sent by apache). The problem
+ # is that changing min client version won't alter the content of provider.json, so we must touch it.
+ exec { '/bin/touch /srv/leap/provider.json':
+ require => File['/srv/leap/provider.json'];
+ }
+ }
+
+ include apache::module::headers
+ include apache::module::alias
+ include apache::module::expires
+ include apache::module::removeip
+ include apache::module::dir
+ include apache::module::negotiation
+ include site_apache::common
+ include site_config::ruby::dev
+
+ if (member($formats, 'rack')) {
+ include site_apt::preferences::passenger
+ class { 'passenger':
+ use_munin => false,
+ require => Class['site_apt::preferences::passenger']
+ }
+ }
+
+ if (member($formats, 'amber')) {
+ rubygems::gem{'amber-0.3.8':
+ require => Package['zlib1g-dev']
+ }
+
+ package { 'zlib1g-dev':
+ ensure => installed
+ }
+ }
+
+ create_resources(site_static::domain, $domains)
+
+ if $tor {
+ $hidden_service = $tor['hidden_service']
+ if $hidden_service['active'] {
+ include site_webapp::hidden_service
+ }
+ }
+
+ include site_shorewall::defaults
+ include site_shorewall::service::http
+ include site_shorewall::service::https
+}
diff --git a/puppet/modules/site_static/manifests/location.pp b/puppet/modules/site_static/manifests/location.pp
new file mode 100644
index 00000000..d116de2f
--- /dev/null
+++ b/puppet/modules/site_static/manifests/location.pp
@@ -0,0 +1,36 @@
+# configure static service for location
+define site_static::location($path, $format, $source) {
+
+ $file_path = "/srv/static/${name}"
+ $allowed_formats = ['amber','rack']
+
+ if $format == undef {
+ fail("static_site location `${path}` is missing `format` field.")
+ }
+
+ if ! member($allowed_formats, $format) {
+ $formats_str = join($allowed_formats, ', ')
+ fail("Unsupported static_site location format `${format}`. Supported formats include ${formats_str}.")
+ }
+
+ if ($format == 'amber') {
+ exec {"amber_build_${name}":
+ cwd => $file_path,
+ command => 'amber rebuild',
+ user => 'www-data',
+ timeout => 600,
+ subscribe => Vcsrepo[$file_path]
+ }
+ }
+
+ vcsrepo { $file_path:
+ ensure => present,
+ force => true,
+ revision => $source['revision'],
+ provider => $source['type'],
+ source => $source['repo'],
+ owner => 'www-data',
+ group => 'www-data'
+ }
+
+}
diff --git a/puppet/modules/site_static/templates/amber.erb b/puppet/modules/site_static/templates/amber.erb
new file mode 100644
index 00000000..694f1136
--- /dev/null
+++ b/puppet/modules/site_static/templates/amber.erb
@@ -0,0 +1,13 @@
+<%- if @location_path != '' -%>
+ AliasMatch ^/[a-z]{2}/<%=@location_path%>(/.+|/|)$ "<%=@directory%>/$1"
+ Alias /<%=@location_path%> "<%=@directory%>/"
+<%- end -%>
+ <Directory "<%=@directory%>/">
+ AllowOverride FileInfo Indexes Options=All,MultiViews
+<% if scope.function_guess_apache_version([]) == '2.4' %>
+ Require all granted
+<% else %>
+ Order deny,allow
+ Allow from all
+<% end %>
+ </Directory>
diff --git a/puppet/modules/site_static/templates/apache.conf.erb b/puppet/modules/site_static/templates/apache.conf.erb
new file mode 100644
index 00000000..6b969d1c
--- /dev/null
+++ b/puppet/modules/site_static/templates/apache.conf.erb
@@ -0,0 +1,88 @@
+<%-
+ ##
+ ## An apache config for static websites.
+ ##
+
+ def location_directory(name, location)
+ if ['amber', 'rack'].include?(location['format'])
+ File.join(@base_dir, name, 'public')
+ else
+ File.join(@base_dir, name)
+ end
+ end
+
+ @document_root = begin
+ root = '/var/www'
+ @locations && @locations.each do |name, location|
+ root = location_directory(name, location) if location['path'] == '/'
+ end
+ root.gsub(%r{^/|/$}, '')
+ end
+
+ bootstrap_domain = scope.lookupvar('site_static::bootstrap_domain')
+ bootstrap_client = scope.lookupvar('site_static::bootstrap_client')
+-%>
+
+<VirtualHost *:80>
+ ServerName <%= @domain %>
+ ServerAlias www.<%= @domain %>
+<%- @aliases && @aliases.each do |domain_alias| -%>
+ ServerAlias <%= domain_alias %>
+<%- end -%>
+<%- if @tls_only -%>
+ RewriteEngine On
+ RewriteRule ^.*$ https://<%= @domain -%>%{REQUEST_URI} [R=permanent,L]
+<%- end -%>
+</VirtualHost>
+
+<VirtualHost *:443>
+ ServerName <%= @domain %>
+ ServerAlias www.<%= @domain %>
+<%- @aliases && @aliases.each do |domain_alias| -%>
+ ServerAlias <%= domain_alias %>
+<%- end -%>
+
+ #RewriteLog "/var/log/apache2/rewrite.log"
+ #RewriteLogLevel 3
+
+ Include include.d/ssl_common.inc
+
+<%- if @tls_only -%>
+ Header always set Strict-Transport-Security: "max-age=15768000;includeSubdomains"
+<%- end -%>
+ Header set X-Frame-Options "deny"
+ Header always unset X-Powered-By
+ Header always unset X-Runtime
+
+ SSLCertificateKeyFile /etc/x509/keys/<%= @domain %>.key
+ SSLCertificateFile /etc/x509/certs/<%= @domain %>.crt
+
+ RequestHeader set X_FORWARDED_PROTO 'https'
+
+ DocumentRoot "/<%= @document_root %>/"
+ AccessFileName .htaccess
+
+<%- if ([@aliases]+[@domain]).flatten.include?(bootstrap_domain) -%>
+ Alias /provider.json /srv/leap/provider.json
+ <Location /provider.json>
+ Header set X-Minimum-Client-Version <%= bootstrap_client['min'] %>
+ </Location>
+<%- end -%>
+
+<%- if @apache_config -%>
+<%= @apache_config.gsub(':percent:','%') %>
+<%- end -%>
+
+<%- @locations && @locations.each do |name, location| -%>
+<%- location_path = location['path'].gsub(%r{^/|/$}, '') -%>
+<%- directory = location_directory(name, location) -%>
+<%- local_vars = {'location_path'=>location_path, 'directory'=>directory, 'location'=>location, 'name'=>name} -%>
+<%- template_path = File.join(File.dirname(__FILE__), location['format']) + '.erb' -%>
+<%- break unless File.exists?(template_path) -%>
+ ##
+ ## <%= name %> (<%= location['format'] %>)
+ ##
+<%= scope.function_templatewlv([template_path, local_vars]) %>
+<%- end -%>
+
+</VirtualHost>
diff --git a/puppet/modules/site_static/templates/rack.erb b/puppet/modules/site_static/templates/rack.erb
new file mode 100644
index 00000000..431778bb
--- /dev/null
+++ b/puppet/modules/site_static/templates/rack.erb
@@ -0,0 +1,19 @@
+ #PassengerLogLevel 1
+ #PassengerAppEnv production
+ #PassengerFriendlyErrorPages on
+<%- if @location_path != '' -%>
+ Alias /<%=@location_path%> "<%=@directory%>"
+ <Location /<%=@location_path%>>
+ PassengerBaseURI /<%=@location_path%>
+ PassengerAppRoot "<%=File.dirname(@directory)%>"
+ </Location>
+<%- end -%>
+ <Directory "<%=@directory%>">
+ Options -MultiViews
+<% if scope.function_guess_apache_version([]) == '2.4' %>
+ Require all granted
+<% else %>
+ Order deny,allow
+ Allow from all
+<% end %>
+ </Directory>
diff --git a/puppet/modules/site_stunnel/manifests/client.pp b/puppet/modules/site_stunnel/manifests/client.pp
new file mode 100644
index 00000000..c9e034f1
--- /dev/null
+++ b/puppet/modules/site_stunnel/manifests/client.pp
@@ -0,0 +1,64 @@
+#
+# Sets up stunnel and firewall configuration for
+# a single stunnel client
+#
+# As a client, we accept connections on localhost,
+# and connect to a remote $connect:$connect_port
+#
+
+define site_stunnel::client (
+ $accept_port,
+ $connect_port,
+ $connect,
+ $original_port,
+ $verify = '2',
+ $pid = $name,
+ $rndfile = '/var/lib/stunnel4/.rnd',
+ $debuglevel = 'warning' ) {
+
+ $logfile = "/var/log/stunnel4/${name}.log"
+
+ include site_config::x509::cert
+ include site_config::x509::key
+ include site_config::x509::ca
+ include x509::variables
+ $ca_path = "${x509::variables::local_CAs}/${site_config::params::ca_name}.crt"
+ $cert_path = "${x509::variables::certs}/${site_config::params::cert_name}.crt"
+ $key_path = "${x509::variables::keys}/${site_config::params::cert_name}.key"
+
+ stunnel::service { $name:
+ accept => "127.0.0.1:${accept_port}",
+ connect => "${connect}:${connect_port}",
+ client => true,
+ cafile => $ca_path,
+ key => $key_path,
+ cert => $cert_path,
+ verify => $verify,
+ pid => "/var/run/stunnel4/${pid}.pid",
+ rndfile => $rndfile,
+ debuglevel => $debuglevel,
+ sslversion => 'TLSv1',
+ syslog => 'no',
+ output => $logfile;
+ }
+
+ # define the log files so that we can purge the
+ # files from /var/log/stunnel4 that are not defined.
+ file {
+ $logfile:;
+ "${logfile}.1.gz":;
+ "${logfile}.2.gz":;
+ "${logfile}.3.gz":;
+ "${logfile}.4.gz":;
+ "${logfile}.5.gz":;
+ }
+
+ site_shorewall::stunnel::client { $name:
+ accept_port => $accept_port,
+ connect => $connect,
+ connect_port => $connect_port,
+ original_port => $original_port
+ }
+
+ include site_check_mk::agent::stunnel
+}
diff --git a/puppet/modules/site_stunnel/manifests/clients.pp b/puppet/modules/site_stunnel/manifests/clients.pp
new file mode 100644
index 00000000..c0958b5f
--- /dev/null
+++ b/puppet/modules/site_stunnel/manifests/clients.pp
@@ -0,0 +1,23 @@
+#
+# example hiera yaml:
+#
+# stunnel:
+# clients:
+# ednp_clients:
+# thrips_9002:
+# accept_port: 4001
+# connect: thrips.demo.bitmask.i
+# connect_port: 19002
+# epmd_clients:
+# thrips_4369:
+# accept_port: 4000
+# connect: thrips.demo.bitmask.i
+# connect_port: 14369
+#
+# In the above example, this resource definition is called twice, with $name
+# 'ednp_clients' and 'epmd_clients'
+#
+
+define site_stunnel::clients {
+ create_resources(site_stunnel::client, $site_stunnel::clients[$name])
+}
diff --git a/puppet/modules/site_stunnel/manifests/init.pp b/puppet/modules/site_stunnel/manifests/init.pp
new file mode 100644
index 00000000..a874721f
--- /dev/null
+++ b/puppet/modules/site_stunnel/manifests/init.pp
@@ -0,0 +1,48 @@
+#
+# If you need something to happen after stunnel is started,
+# you can depend on Service['stunnel'] or Class['site_stunnel']
+#
+
+class site_stunnel {
+
+ # include the generic stunnel module
+ # increase the number of open files to allow for 800 connections
+ class { 'stunnel': default_extra => 'ulimit -n 4096' }
+
+ # The stunnel.conf provided by the Debian package is broken by default
+ # so we get rid of it and just define our own. See #549384
+ if !defined(File['/etc/stunnel/stunnel.conf']) {
+ file {
+ # this file is a broken config installed by the package
+ '/etc/stunnel/stunnel.conf':
+ ensure => absent;
+ }
+ }
+
+ $stunnel = hiera('stunnel')
+
+ # add server stunnels
+ create_resources(site_stunnel::servers, $stunnel['servers'])
+
+ # add client stunnels
+ $clients = $stunnel['clients']
+ $client_sections = keys($clients)
+ site_stunnel::clients { $client_sections: }
+
+ # remove any old stunnel logs that are not
+ # defined by this puppet run
+ file {'/var/log/stunnel4': purge => true;}
+
+ # the default is to keep 356 log files for each stunnel.
+ # here we set a more reasonable number.
+ augeas {
+ 'logrotate_stunnel':
+ context => '/files/etc/logrotate.d/stunnel4/rule',
+ changes => [
+ 'set rotate 5',
+ ]
+ }
+
+ include site_stunnel::override_service
+}
+
diff --git a/puppet/modules/site_stunnel/manifests/override_service.pp b/puppet/modules/site_stunnel/manifests/override_service.pp
new file mode 100644
index 00000000..435b9aa0
--- /dev/null
+++ b/puppet/modules/site_stunnel/manifests/override_service.pp
@@ -0,0 +1,18 @@
+# override stunnel::debian defaults
+#
+# ignore puppet lint error about inheriting from different namespace
+# lint:ignore:inherits_across_namespaces
+class site_stunnel::override_service inherits stunnel::debian {
+# lint:endignore
+
+ include site_config::x509::cert
+ include site_config::x509::key
+ include site_config::x509::ca
+
+ Service[stunnel] {
+ subscribe => [
+ Class['Site_config::X509::Key'],
+ Class['Site_config::X509::Cert'],
+ Class['Site_config::X509::Ca'] ]
+ }
+}
diff --git a/puppet/modules/site_stunnel/manifests/servers.pp b/puppet/modules/site_stunnel/manifests/servers.pp
new file mode 100644
index 00000000..e76d1e9d
--- /dev/null
+++ b/puppet/modules/site_stunnel/manifests/servers.pp
@@ -0,0 +1,51 @@
+#
+# example hiera yaml:
+#
+# stunnel:
+# servers:
+# couch_server:
+# accept_port: 15984
+# connect_port: 5984
+#
+
+define site_stunnel::servers (
+ $accept_port,
+ $connect_port,
+ $verify = '2',
+ $pid = $name,
+ $rndfile = '/var/lib/stunnel4/.rnd',
+ $debuglevel = '4' ) {
+
+ $logfile = "/var/log/stunnel4/${name}.log"
+
+ include site_config::x509::cert
+ include site_config::x509::key
+ include site_config::x509::ca
+ include x509::variables
+ $ca_path = "${x509::variables::local_CAs}/${site_config::params::ca_name}.crt"
+ $cert_path = "${x509::variables::certs}/${site_config::params::cert_name}.crt"
+ $key_path = "${x509::variables::keys}/${site_config::params::cert_name}.key"
+
+ stunnel::service { $name:
+ accept => $accept_port,
+ connect => "127.0.0.1:${connect_port}",
+ client => false,
+ cafile => $ca_path,
+ key => $key_path,
+ cert => $cert_path,
+ verify => $verify,
+ pid => "/var/run/stunnel4/${pid}.pid",
+ rndfile => '/var/lib/stunnel4/.rnd',
+ debuglevel => $debuglevel,
+ sslversion => 'TLSv1',
+ syslog => 'no',
+ output => $logfile;
+ }
+
+ # allow incoming connections on $accept_port
+ site_shorewall::stunnel::server { $name:
+ port => $accept_port
+ }
+
+ include site_check_mk::agent::stunnel
+}
diff --git a/puppet/modules/site_tor/manifests/disable_exit.pp b/puppet/modules/site_tor/manifests/disable_exit.pp
new file mode 100644
index 00000000..078f80ae
--- /dev/null
+++ b/puppet/modules/site_tor/manifests/disable_exit.pp
@@ -0,0 +1,7 @@
+class site_tor::disable_exit {
+ tor::daemon::exit_policy {
+ 'no_exit_at_all':
+ reject => [ '*:*' ];
+ }
+}
+
diff --git a/puppet/modules/site_tor/manifests/init.pp b/puppet/modules/site_tor/manifests/init.pp
new file mode 100644
index 00000000..2207a5a9
--- /dev/null
+++ b/puppet/modules/site_tor/manifests/init.pp
@@ -0,0 +1,45 @@
+class site_tor {
+ tag 'leap_service'
+ Class['site_config::default'] -> Class['site_tor']
+
+ $tor = hiera('tor')
+ $bandwidth_rate = $tor['bandwidth_rate']
+ $tor_type = $tor['type']
+ $nickname = $tor['nickname']
+ $contact_emails = join($tor['contacts'],', ')
+ $family = $tor['family']
+
+ $address = hiera('ip_address')
+
+ $openvpn = hiera('openvpn', undef)
+ if $openvpn {
+ $openvpn_ports = $openvpn['ports']
+ }
+ else {
+ $openvpn_ports = []
+ }
+
+ include site_config::default
+ include tor::daemon
+ tor::daemon::relay { $nickname:
+ port => 9001,
+ address => $address,
+ contact_info => obfuscate_email($contact_emails),
+ bandwidth_rate => $bandwidth_rate,
+ my_family => $family
+ }
+
+ if ( $tor_type == 'exit'){
+ # Only enable the daemon directory if the node isn't also a webapp node
+ # or running openvpn on port 80
+ if ! member($::services, 'webapp') and ! member($openvpn_ports, '80') {
+ tor::daemon::directory { $::hostname: port => 80 }
+ }
+ }
+ else {
+ include site_tor::disable_exit
+ }
+
+ include site_shorewall::tor
+
+}
diff --git a/puppet/modules/site_webapp/files/server-status.conf b/puppet/modules/site_webapp/files/server-status.conf
new file mode 100644
index 00000000..10b2d4ed
--- /dev/null
+++ b/puppet/modules/site_webapp/files/server-status.conf
@@ -0,0 +1,26 @@
+# Keep track of extended status information for each request
+ExtendedStatus On
+
+# Determine if mod_status displays the first 63 characters of a request or
+# the last 63, assuming the request itself is greater than 63 chars.
+# Default: Off
+#SeeRequestTail On
+
+Listen 127.0.0.1:8162
+
+<VirtualHost 127.0.0.1:8162>
+
+<Location /server-status>
+ SetHandler server-status
+ Require all granted
+ Allow from 127.0.0.1
+</Location>
+
+</VirtualHost>
+
+
+<IfModule mod_proxy.c>
+ # Show Proxy LoadBalancer status in mod_status
+ ProxyStatus On
+</IfModule>
+
diff --git a/puppet/modules/site_webapp/manifests/apache.pp b/puppet/modules/site_webapp/manifests/apache.pp
new file mode 100644
index 00000000..80c7b29b
--- /dev/null
+++ b/puppet/modules/site_webapp/manifests/apache.pp
@@ -0,0 +1,28 @@
+# configure apache and passenger to serve the webapp
+class site_webapp::apache {
+
+ $web_api = hiera('api')
+ $api_domain = $web_api['domain']
+ $api_port = $web_api['port']
+
+ $web_domain = hiera('domain')
+ $domain_name = $web_domain['name']
+
+ $webapp = hiera('webapp')
+ $webapp_domain = $webapp['domain']
+
+ include site_apache::common
+ include apache::module::headers
+ include apache::module::alias
+ include apache::module::expires
+ include apache::module::removeip
+ include site_webapp::common_vhost
+
+ class { 'passenger': use_munin => false }
+
+ apache::vhost::file {
+ 'api':
+ content => template('site_apache/vhosts.d/api.conf.erb');
+ }
+
+}
diff --git a/puppet/modules/site_webapp/manifests/common_vhost.pp b/puppet/modules/site_webapp/manifests/common_vhost.pp
new file mode 100644
index 00000000..c57aad57
--- /dev/null
+++ b/puppet/modules/site_webapp/manifests/common_vhost.pp
@@ -0,0 +1,18 @@
+class site_webapp::common_vhost {
+ # installs x509 cert + key and common config
+ # that both nagios + leap webapp use
+
+ include x509::variables
+ include site_config::x509::commercial::cert
+ include site_config::x509::commercial::key
+ include site_config::x509::commercial::ca
+
+ Class['Site_config::X509::Commercial::Key'] ~> Service[apache]
+ Class['Site_config::X509::Commercial::Cert'] ~> Service[apache]
+ Class['Site_config::X509::Commercial::Ca'] ~> Service[apache]
+
+ apache::vhost::file {
+ 'common':
+ content => template('site_apache/vhosts.d/common.conf.erb')
+ }
+}
diff --git a/puppet/modules/site_webapp/manifests/couchdb.pp b/puppet/modules/site_webapp/manifests/couchdb.pp
new file mode 100644
index 00000000..71450370
--- /dev/null
+++ b/puppet/modules/site_webapp/manifests/couchdb.pp
@@ -0,0 +1,52 @@
+class site_webapp::couchdb {
+
+ $webapp = hiera('webapp')
+ # haproxy listener on port localhost:4096, see site_webapp::haproxy
+ $couchdb_host = 'localhost'
+ $couchdb_port = '4096'
+ $couchdb_webapp_user = $webapp['couchdb_webapp_user']['username']
+ $couchdb_webapp_password = $webapp['couchdb_webapp_user']['password']
+ $couchdb_admin_user = $webapp['couchdb_admin_user']['username']
+ $couchdb_admin_password = $webapp['couchdb_admin_user']['password']
+
+ include x509::variables
+
+ file {
+ '/srv/leap/webapp/config/couchdb.yml':
+ content => template('site_webapp/couchdb.yml.erb'),
+ owner => 'leap-webapp',
+ group => 'leap-webapp',
+ mode => '0600',
+ require => Vcsrepo['/srv/leap/webapp'];
+
+ # couchdb.admin.yml is a symlink to prevent the vcsrepo resource
+ # from changing its user permissions every time.
+ '/srv/leap/webapp/config/couchdb.admin.yml':
+ ensure => 'link',
+ target => '/etc/leap/couchdb.admin.yml',
+ require => Vcsrepo['/srv/leap/webapp'];
+
+ '/etc/leap/couchdb.admin.yml':
+ content => template('site_webapp/couchdb.admin.yml.erb'),
+ owner => 'root',
+ group => 'root',
+ mode => '0600',
+ require => File['/etc/leap'];
+
+ '/srv/leap/webapp/log':
+ ensure => directory,
+ owner => 'leap-webapp',
+ group => 'leap-webapp',
+ mode => '0755',
+ require => Vcsrepo['/srv/leap/webapp'];
+
+ '/srv/leap/webapp/log/production.log':
+ ensure => present,
+ owner => 'leap-webapp',
+ group => 'leap-webapp',
+ mode => '0666',
+ require => Vcsrepo['/srv/leap/webapp'];
+ }
+
+ include site_stunnel
+}
diff --git a/puppet/modules/site_webapp/manifests/cron.pp b/puppet/modules/site_webapp/manifests/cron.pp
new file mode 100644
index 00000000..70b9da04
--- /dev/null
+++ b/puppet/modules/site_webapp/manifests/cron.pp
@@ -0,0 +1,37 @@
+# setup webapp cronjobs
+class site_webapp::cron {
+
+ # cron tasks that need to be performed to cleanup the database
+ cron {
+ 'rotate_databases':
+ command => 'cd /srv/leap/webapp && bundle exec rake db:rotate',
+ environment => 'RAILS_ENV=production',
+ user => 'root',
+ hour => [0,6,12,18],
+ minute => 0;
+
+ 'delete_tmp_databases':
+ command => 'cd /srv/leap/webapp && bundle exec rake db:deletetmp',
+ environment => 'RAILS_ENV=production',
+ user => 'root',
+ hour => 1,
+ minute => 1;
+
+ # there is no longer a need to remove expired sessions, since the database
+ # will get destroyed.
+ 'remove_expired_sessions':
+ ensure => absent,
+ command => 'cd /srv/leap/webapp && bundle exec rake cleanup:sessions',
+ environment => 'RAILS_ENV=production',
+ user => 'leap-webapp',
+ hour => 2,
+ minute => 30;
+
+ 'remove_expired_tokens':
+ command => 'cd /srv/leap/webapp && bundle exec rake cleanup:tokens',
+ environment => 'RAILS_ENV=production',
+ user => 'leap-webapp',
+ hour => 3,
+ minute => 0;
+ }
+}
diff --git a/puppet/modules/site_webapp/manifests/hidden_service.pp b/puppet/modules/site_webapp/manifests/hidden_service.pp
new file mode 100644
index 00000000..72a2ce95
--- /dev/null
+++ b/puppet/modules/site_webapp/manifests/hidden_service.pp
@@ -0,0 +1,52 @@
+class site_webapp::hidden_service {
+ $tor = hiera('tor')
+ $hidden_service = $tor['hidden_service']
+ $tor_domain = "${hidden_service['address']}.onion"
+
+ include site_apache::common
+ include apache::module::headers
+ include apache::module::alias
+ include apache::module::expires
+ include apache::module::removeip
+
+ include tor::daemon
+ tor::daemon::hidden_service { 'webapp': ports => [ '80 127.0.0.1:80'] }
+
+ file {
+ '/var/lib/tor/webapp/':
+ ensure => directory,
+ owner => 'debian-tor',
+ group => 'debian-tor',
+ mode => '2700';
+
+ '/var/lib/tor/webapp/private_key':
+ ensure => present,
+ source => "/srv/leap/files/nodes/${::hostname}/tor.key",
+ owner => 'debian-tor',
+ group => 'debian-tor',
+ mode => '0600';
+
+ '/var/lib/tor/webapp/hostname':
+ ensure => present,
+ content => $tor_domain,
+ owner => 'debian-tor',
+ group => 'debian-tor',
+ mode => '0600';
+ }
+
+ # it is necessary to zero out the config of the status module
+ # because we are configuring our own version that is unavailable
+ # over the hidden service (see: #7456 and #7776)
+ apache::module { 'status': ensure => present, conf_content => ' ' }
+ # the access_compat module is required to enable Allow directives
+ apache::module { 'access_compat': ensure => present }
+
+ apache::vhost::file {
+ 'hidden_service':
+ content => template('site_apache/vhosts.d/hidden_service.conf.erb');
+ 'server_status':
+ vhost_source => 'modules/site_webapp/server-status.conf';
+ }
+
+ include site_shorewall::tor
+}
diff --git a/puppet/modules/site_webapp/manifests/init.pp b/puppet/modules/site_webapp/manifests/init.pp
new file mode 100644
index 00000000..15925aba
--- /dev/null
+++ b/puppet/modules/site_webapp/manifests/init.pp
@@ -0,0 +1,179 @@
+# configure webapp service
+class site_webapp {
+ tag 'leap_service'
+ $definition_files = hiera('definition_files')
+ $provider = $definition_files['provider']
+ $eip_service = $definition_files['eip_service']
+ $soledad_service = $definition_files['soledad_service']
+ $smtp_service = $definition_files['smtp_service']
+ $node_domain = hiera('domain')
+ $provider_domain = $node_domain['full_suffix']
+ $webapp = hiera('webapp')
+ $api_version = $webapp['api_version']
+ $secret_token = $webapp['secret_token']
+ $tor = hiera('tor', false)
+ $sources = hiera('sources')
+
+ Class['site_config::default'] -> Class['site_webapp']
+
+ include site_config::ruby::dev
+ include site_webapp::apache
+ include site_webapp::couchdb
+ include site_haproxy
+ include site_webapp::cron
+ include site_config::default
+ include site_config::x509::cert
+ include site_config::x509::key
+ include site_config::x509::ca
+ include site_config::x509::client_ca::ca
+ include site_config::x509::client_ca::key
+ include site_nickserver
+
+ # remove leftovers from previous installations on webapp nodes
+ include site_config::remove::webapp
+
+ group { 'leap-webapp':
+ ensure => present,
+ allowdupe => false;
+ }
+
+ user { 'leap-webapp':
+ ensure => present,
+ allowdupe => false,
+ gid => 'leap-webapp',
+ groups => 'ssl-cert',
+ home => '/srv/leap/webapp',
+ require => [ Group['leap-webapp'] ];
+ }
+
+ vcsrepo { '/srv/leap/webapp':
+ ensure => present,
+ force => true,
+ revision => $sources['webapp']['revision'],
+ provider => $sources['webapp']['type'],
+ source => $sources['webapp']['source'],
+ owner => 'leap-webapp',
+ group => 'leap-webapp',
+ require => [ User['leap-webapp'], Group['leap-webapp'] ],
+ notify => Exec['bundler_update']
+ }
+
+ exec { 'bundler_update':
+ cwd => '/srv/leap/webapp',
+ command => '/bin/bash -c "/usr/bin/bundle check --path vendor/bundle || /usr/bin/bundle install --path vendor/bundle --without test development debug"',
+ unless => '/usr/bin/bundle check --path vendor/bundle',
+ user => 'leap-webapp',
+ timeout => 600,
+ require => [
+ Class['bundler::install'],
+ Vcsrepo['/srv/leap/webapp'],
+ Class['site_config::ruby::dev'],
+ Service['shorewall'] ],
+ notify => Service['apache'];
+ }
+
+ #
+ # NOTE: in order to support a webapp that is running on a subpath and not the
+ # root of the domain assets:precompile needs to be run with
+ # RAILS_RELATIVE_URL_ROOT=/application-root
+ #
+
+ exec { 'compile_assets':
+ cwd => '/srv/leap/webapp',
+ command => '/bin/bash -c "RAILS_ENV=production /usr/bin/bundle exec rake assets:precompile"',
+ user => 'leap-webapp',
+ logoutput => on_failure,
+ require => Exec['bundler_update'],
+ notify => Service['apache'];
+ }
+
+ file {
+ '/srv/leap/webapp/config/provider':
+ ensure => directory,
+ require => Vcsrepo['/srv/leap/webapp'],
+ owner => leap-webapp, group => leap-webapp, mode => '0755';
+
+ '/srv/leap/webapp/config/provider/provider.json':
+ content => $provider,
+ require => Vcsrepo['/srv/leap/webapp'],
+ owner => leap-webapp, group => leap-webapp, mode => '0644';
+
+ '/srv/leap/webapp/public/ca.crt':
+ ensure => link,
+ require => Vcsrepo['/srv/leap/webapp'],
+ target => "${x509::variables::local_CAs}/${site_config::params::ca_name}.crt";
+
+ "/srv/leap/webapp/public/${api_version}":
+ ensure => directory,
+ require => Vcsrepo['/srv/leap/webapp'],
+ owner => leap-webapp, group => leap-webapp, mode => '0755';
+
+ "/srv/leap/webapp/public/${api_version}/config/":
+ ensure => directory,
+ require => Vcsrepo['/srv/leap/webapp'],
+ owner => leap-webapp, group => leap-webapp, mode => '0755';
+
+ "/srv/leap/webapp/public/${api_version}/config/eip-service.json":
+ content => $eip_service,
+ require => Vcsrepo['/srv/leap/webapp'],
+ owner => leap-webapp, group => leap-webapp, mode => '0644';
+
+ "/srv/leap/webapp/public/${api_version}/config/soledad-service.json":
+ content => $soledad_service,
+ require => Vcsrepo['/srv/leap/webapp'],
+ owner => leap-webapp, group => leap-webapp, mode => '0644';
+
+ "/srv/leap/webapp/public/${api_version}/config/smtp-service.json":
+ content => $smtp_service,
+ require => Vcsrepo['/srv/leap/webapp'],
+ owner => leap-webapp, group => leap-webapp, mode => '0644';
+ }
+
+ try::file {
+ '/srv/leap/webapp/config/customization':
+ ensure => directory,
+ recurse => true,
+ purge => true,
+ force => true,
+ owner => leap-webapp,
+ group => leap-webapp,
+ mode => 'u=rwX,go=rX',
+ require => Vcsrepo['/srv/leap/webapp'],
+ notify => Exec['compile_assets'],
+ source => $webapp['customization_dir'];
+ }
+
+ git::changes {
+ 'public/favicon.ico':
+ cwd => '/srv/leap/webapp',
+ require => Vcsrepo['/srv/leap/webapp'],
+ user => 'leap-webapp';
+ }
+
+ file {
+ '/srv/leap/webapp/config/config.yml':
+ content => template('site_webapp/config.yml.erb'),
+ owner => leap-webapp,
+ group => leap-webapp,
+ mode => '0600',
+ require => Vcsrepo['/srv/leap/webapp'],
+ notify => Service['apache'];
+ }
+
+ if $tor {
+ $hidden_service = $tor['hidden_service']
+ if $hidden_service['active'] {
+ include site_webapp::hidden_service
+ }
+ }
+
+
+ # needed for the soledad-sync check which is run on the
+ # webapp node
+ include soledad::client
+
+ leap::logfile { 'webapp': }
+
+ include site_shorewall::webapp
+ include site_check_mk::agent::webapp
+}
diff --git a/puppet/modules/site_webapp/templates/config.yml.erb b/puppet/modules/site_webapp/templates/config.yml.erb
new file mode 100644
index 00000000..dd55d3e9
--- /dev/null
+++ b/puppet/modules/site_webapp/templates/config.yml.erb
@@ -0,0 +1,36 @@
+<%
+cert_options = @webapp['client_certificates']
+production = {
+ "admins" => @webapp['admins'],
+ "default_locale" => @webapp['default_locale'],
+ "available_locales" => @webapp['locales'],
+ "domain" => @provider_domain,
+ "force_ssl" => @webapp['secure'],
+ "client_ca_key" => "%s/%s.key" % [scope.lookupvar('x509::variables::keys'), scope.lookupvar('site_config::params::client_ca_name')],
+ "client_ca_cert" => "%s/%s.crt" % [scope.lookupvar('x509::variables::local_CAs'), scope.lookupvar('site_config::params::client_ca_name')],
+ "secret_token" => @secret_token,
+ "client_cert_lifespan" => cert_options['life_span'],
+ "client_cert_bit_size" => cert_options['bit_size'].to_i,
+ "client_cert_hash" => cert_options['digest'],
+ "allow_limited_certs" => @webapp['allow_limited_certs'],
+ "allow_unlimited_certs" => @webapp['allow_unlimited_certs'],
+ "allow_anonymous_certs" => @webapp['allow_anonymous_certs'],
+ "limited_cert_prefix" => cert_options['limited_prefix'],
+ "unlimited_cert_prefix" => cert_options['unlimited_prefix'],
+ "minimum_client_version" => @webapp['client_version']['min'],
+ "default_service_level" => @webapp['default_service_level'],
+ "service_levels" => @webapp['service_levels'],
+ "allow_registration" => @webapp['allow_registration'],
+ "handle_blacklist" => @webapp['forbidden_usernames'],
+ "invite_required" => @webapp['invite_required'],
+ "api_tokens" => @webapp['api_tokens']
+}
+
+if @webapp['engines'] && @webapp['engines'].any?
+ production["engines"] = @webapp['engines']
+end
+-%>
+#
+# This file is generated by puppet. This file inherits from defaults.yml.
+#
+<%= scope.function_sorted_yaml([{"production" => production}]) %>
diff --git a/puppet/modules/site_webapp/templates/couchdb.admin.yml.erb b/puppet/modules/site_webapp/templates/couchdb.admin.yml.erb
new file mode 100644
index 00000000..a0921add
--- /dev/null
+++ b/puppet/modules/site_webapp/templates/couchdb.admin.yml.erb
@@ -0,0 +1,9 @@
+production:
+ prefix: ""
+ protocol: 'http'
+ host: <%= @couchdb_host %>
+ port: <%= @couchdb_port %>
+ auto_update_design_doc: false
+ username: <%= @couchdb_admin_user %>
+ password: <%= @couchdb_admin_password %>
+
diff --git a/puppet/modules/site_webapp/templates/couchdb.yml.erb b/puppet/modules/site_webapp/templates/couchdb.yml.erb
new file mode 100644
index 00000000..2bef0af5
--- /dev/null
+++ b/puppet/modules/site_webapp/templates/couchdb.yml.erb
@@ -0,0 +1,9 @@
+production:
+ prefix: ""
+ protocol: 'http'
+ host: <%= @couchdb_host %>
+ port: <%= @couchdb_port %>
+ auto_update_design_doc: false
+ username: <%= @couchdb_webapp_user %>
+ password: <%= @couchdb_webapp_password %>
+
diff --git a/puppet/modules/soledad/manifests/client.pp b/puppet/modules/soledad/manifests/client.pp
new file mode 100644
index 00000000..e470adeb
--- /dev/null
+++ b/puppet/modules/soledad/manifests/client.pp
@@ -0,0 +1,16 @@
+# setup soledad-client
+# currently needed on webapp node to run the soledad-sync test
+class soledad::client {
+
+ tag 'leap_service'
+ include soledad::common
+
+ package {
+ 'soledad-client':
+ ensure => latest,
+ require => Class['site_apt::leap_repo'];
+ 'python-u1db':
+ ensure => latest;
+ }
+
+}
diff --git a/puppet/modules/soledad/manifests/common.pp b/puppet/modules/soledad/manifests/common.pp
new file mode 100644
index 00000000..8d8339d4
--- /dev/null
+++ b/puppet/modules/soledad/manifests/common.pp
@@ -0,0 +1,8 @@
+# install soledad-common, both needed both soledad-client and soledad-server
+class soledad::common {
+
+ package { 'soledad-common':
+ ensure => latest;
+ }
+
+}
diff --git a/puppet/modules/soledad/manifests/server.pp b/puppet/modules/soledad/manifests/server.pp
new file mode 100644
index 00000000..8674f421
--- /dev/null
+++ b/puppet/modules/soledad/manifests/server.pp
@@ -0,0 +1,104 @@
+# setup soledad-server
+class soledad::server {
+ tag 'leap_service'
+
+ include site_config::default
+ include soledad::common
+
+ $soledad = hiera('soledad')
+ $couchdb_user = $soledad['couchdb_soledad_user']['username']
+ $couchdb_password = $soledad['couchdb_soledad_user']['password']
+ $couchdb_leap_mx_user = $soledad['couchdb_leap_mx_user']['username']
+
+ $couchdb_host = 'localhost'
+ $couchdb_port = '5984'
+
+ $soledad_port = $soledad['port']
+
+ $sources = hiera('sources')
+
+ include site_config::x509::cert
+ include site_config::x509::key
+ include site_config::x509::ca
+
+ #
+ # SOLEDAD CONFIG
+ #
+
+ file {
+ '/etc/soledad':
+ ensure => directory,
+ owner => 'root',
+ group => 'root',
+ mode => '0755';
+ '/etc/soledad/soledad-server.conf':
+ content => template('soledad/soledad-server.conf.erb'),
+ owner => 'soledad',
+ group => 'soledad',
+ mode => '0640',
+ notify => Service['soledad-server'],
+ require => [ User['soledad'], Group['soledad'] ];
+ '/srv/leap/soledad':
+ ensure => directory,
+ owner => 'soledad',
+ group => 'soledad',
+ require => [ User['soledad'], Group['soledad'] ];
+ '/var/lib/soledad':
+ ensure => directory,
+ owner => 'soledad',
+ group => 'soledad',
+ require => [ User['soledad'], Group['soledad'] ];
+ }
+
+ package { $sources['soledad']['package']:
+ ensure => $sources['soledad']['revision'],
+ require => Class['site_apt::leap_repo'];
+ }
+
+ file { '/etc/default/soledad':
+ content => template('soledad/default-soledad.erb'),
+ owner => 'soledad',
+ group => 'soledad',
+ mode => '0600',
+ notify => Service['soledad-server'],
+ require => [ User['soledad'], Group['soledad'] ];
+ }
+
+ service { 'soledad-server':
+ ensure => running,
+ enable => true,
+ hasstatus => true,
+ hasrestart => true,
+ require => [ User['soledad'], Group['soledad'] ],
+ subscribe => [
+ Package['soledad-server'],
+ Class['Site_config::X509::Key'],
+ Class['Site_config::X509::Cert'],
+ Class['Site_config::X509::Ca'] ];
+ }
+
+ include site_shorewall::soledad
+ include site_check_mk::agent::soledad
+
+ # set up users, group and directories for soledad-server
+ # although the soledad users are already created by the
+ # soledad-server package
+ group { 'soledad':
+ ensure => present,
+ system => true,
+ }
+ user {
+ 'soledad':
+ ensure => present,
+ system => true,
+ gid => 'soledad',
+ home => '/srv/leap/soledad',
+ require => Group['soledad'];
+ 'soledad-admin':
+ ensure => present,
+ system => true,
+ gid => 'soledad',
+ home => '/srv/leap/soledad',
+ require => Group['soledad'];
+ }
+}
diff --git a/puppet/modules/soledad/templates/default-soledad.erb b/puppet/modules/soledad/templates/default-soledad.erb
new file mode 100644
index 00000000..32504e38
--- /dev/null
+++ b/puppet/modules/soledad/templates/default-soledad.erb
@@ -0,0 +1,5 @@
+# this file is managed by puppet
+START=yes
+CERT_PATH=<%= scope.lookupvar('x509::variables::certs') %>/<%= scope.lookupvar('site_config::params::cert_name') %>.crt
+PRIVKEY_PATH=<%= scope.lookupvar('x509::variables::keys') %>/<%= scope.lookupvar('site_config::params::cert_name') %>.key
+HTTPS_PORT=<%=@soledad_port%>
diff --git a/puppet/modules/soledad/templates/soledad-server.conf.erb b/puppet/modules/soledad/templates/soledad-server.conf.erb
new file mode 100644
index 00000000..1c6a0d19
--- /dev/null
+++ b/puppet/modules/soledad/templates/soledad-server.conf.erb
@@ -0,0 +1,12 @@
+[soledad-server]
+couch_url = http://<%= @couchdb_user %>:<%= @couchdb_password %>@<%= @couchdb_host %>:<%= @couchdb_port %>
+create_cmd = sudo -u soledad-admin /usr/bin/create-user-db
+admin_netrc = /etc/couchdb/couchdb-soledad-admin.netrc
+
+[database-security]
+members = <%= @couchdb_user %>, <%= @couchdb_leap_mx_user %>
+# not needed, but for documentation:
+# members_roles = replication
+# admins = admin
+# admins_roles = replication
+
diff --git a/puppet/modules/templatewlv/Modulefile b/puppet/modules/templatewlv/Modulefile
new file mode 100644
index 00000000..8007a070
--- /dev/null
+++ b/puppet/modules/templatewlv/Modulefile
@@ -0,0 +1,11 @@
+name 'duritong-templatewlv'
+version '0.0.1'
+source 'https://github.com/duritong/puppet-templatewlv.git'
+author 'duritong'
+license 'Apache License, Version 2.0'
+summary 'Template With Local Variables'
+description 'Pass local variables to templates'
+project_page 'https://github.com/duritong/puppet-templatewlv'
+
+## Add dependencies, if any:
+# dependency 'username/name', '>= 1.2.0'
diff --git a/puppet/modules/templatewlv/README.md b/puppet/modules/templatewlv/README.md
new file mode 100644
index 00000000..5ab01e45
--- /dev/null
+++ b/puppet/modules/templatewlv/README.md
@@ -0,0 +1,21 @@
+# templatewlv
+
+## Template With Local Variables
+
+A wrapper around puppet's template function. See
+[the templating docs](http://docs.puppetlabs.com/guides/templating.html) for
+the basic functionality.
+
+Additionally, you can pass a hash, as the last argument, which will be turned into
+local variables and available to the template itself. This will allow you to define
+variables in a template and pass them down to a template you include in the current
+template. An example:
+
+ scope.function_templatewlv(['sub_template', { 'local_var' => 'value' }])
+
+Note that if multiple templates are specified, their output is all
+concatenated and returned as the output of the function.
+
+# Who - License
+
+duritong - Apache License, Version 2.0
diff --git a/puppet/modules/templatewlv/lib/puppet/parser/functions/templatewlv.rb b/puppet/modules/templatewlv/lib/puppet/parser/functions/templatewlv.rb
new file mode 100644
index 00000000..c9579e2c
--- /dev/null
+++ b/puppet/modules/templatewlv/lib/puppet/parser/functions/templatewlv.rb
@@ -0,0 +1,41 @@
+require File.join(File.dirname(__FILE__),'../templatewrapperwlv')
+Puppet::Parser::Functions::newfunction(:templatewlv, :type => :rvalue, :arity => -2, :doc =>
+ "A wrapper around puppet's template function. See
+ [the templating docs](http://docs.puppetlabs.com/guides/templating.html) for
+ the basic functionality.
+
+ Additionally, you can pass a hash, as the last argument, which will be turned into
+ local variables and available to the template itself. This will allow you to define
+ variables in a template and pass them down to a template you include in the current
+ template. An example:
+
+ scope.function_templatewlv(['sub_template', { 'local_var' => 'value' }])
+
+ Note that if multiple templates are specified, their output is all
+ concatenated and returned as the output of the function.") do |vals|
+
+ if vals.last.is_a?(Hash)
+ local_vars = vals.last
+ local_vals = vals[0..-2]
+ else
+ local_vars = {}
+ local_vals = vals
+ end
+
+ result = nil
+ local_vals.collect do |file|
+ # Use a wrapper, so the template can't get access to the full
+ # Scope object.
+ debug "Retrieving template #{file}"
+
+ wrapper = Puppet::Parser::TemplateWrapperWlv.new(self,local_vars)
+ wrapper.file = file
+ begin
+ wrapper.result
+ rescue => detail
+ info = detail.backtrace.first.split(':')
+ raise Puppet::ParseError,
+ "Failed to parse template #{file}:\n Filepath: #{info[0]}\n Line: #{info[1]}\n Detail: #{detail}\n"
+ end
+ end.join("")
+end
diff --git a/puppet/modules/templatewlv/lib/puppet/parser/templatewrapperwlv.rb b/puppet/modules/templatewlv/lib/puppet/parser/templatewrapperwlv.rb
new file mode 100644
index 00000000..f1753e18
--- /dev/null
+++ b/puppet/modules/templatewlv/lib/puppet/parser/templatewrapperwlv.rb
@@ -0,0 +1,39 @@
+# A wrapper for templates, that allows you to additionally define
+# local variables
+class Puppet::Parser::TemplateWrapperWlv < Puppet::Parser::TemplateWrapper
+ attr_reader :local_vars
+ def initialize(scope, local_vars)
+ super(scope)
+ @local_vars = local_vars
+ end
+
+ # Should return true if a variable is defined, false if it is not
+ def has_variable?(name)
+ super(name) || local_vars.keys.include?(name.to_s)
+ end
+
+ def method_missing(name, *args)
+ if local_vars.keys.include?(n=name.to_s)
+ local_vars[n]
+ else
+ super(name, *args)
+ end
+ end
+
+ def result(string = nil)
+ # Expose all the variables in our scope as instance variables of the
+ # current object, making it possible to access them without conflict
+ # to the regular methods.
+ benchmark(:debug, "Bound local template variables for #{@__file__}") do
+ local_vars.each do |name, value|
+ if name.kind_of?(String)
+ realname = name.gsub(/[^\w]/, "_")
+ else
+ realname = name
+ end
+ instance_variable_set("@#{realname}", value)
+ end
+ end
+ super(string)
+ end
+end
diff --git a/puppet/modules/try/README.md b/puppet/modules/try/README.md
new file mode 100644
index 00000000..3888661e
--- /dev/null
+++ b/puppet/modules/try/README.md
@@ -0,0 +1,13 @@
+This module provides a "try" wrapper around common resource types.
+
+For example:
+
+ try::file {
+ '/path/to/file':
+ ensure => 'link',
+ target => $target;
+ }
+
+This will work just like `file`, but will silently fail if `$target` is undefined or the file does not exist.
+
+So far, only `file` type with symlinks works.
diff --git a/puppet/modules/try/manifests/file.pp b/puppet/modules/try/manifests/file.pp
new file mode 100644
index 00000000..2493d343
--- /dev/null
+++ b/puppet/modules/try/manifests/file.pp
@@ -0,0 +1,114 @@
+#
+# Works like the built-in type "file", but gets gracefully ignored if the target/source does not exist or is undefined.
+#
+# Also, if the source or target doesn't exist, and the destination is a git repo, then the file is restored from git.
+#
+# All executable paths are hardcoded to their paths in debian.
+#
+# known limitations:
+# * this is far too noisy
+# * $restore does not work for directories
+# * only file:// $source is supported
+# * $content is not supported, only $target or $source.
+# * does not auto-require all the parent directories like 'file' does
+#
+define try::file (
+ $ensure = undef,
+ $target = undef,
+ $source = undef,
+ $owner = undef,
+ $group = undef,
+ $recurse = undef,
+ $purge = undef,
+ $force = undef,
+ $mode = undef,
+ $restore = true) {
+
+ # dummy exec to propagate requires:
+ # metaparameter 'require' will get triggered by this dummy exec
+ # so then we just need to depend on this to capture all requires.
+ # exec { $name: command => "/bin/true" }
+
+ exec {
+ "chmod_${name}":
+ command => "/bin/chmod -R ${mode} '${name}'",
+ onlyif => "/usr/bin/test ${mode}",
+ refreshonly => true,
+ loglevel => debug;
+ "chown_${name}":
+ command => "/bin/chown -R ${owner} '${name}'",
+ onlyif => "/usr/bin/test ${owner}",
+ refreshonly => true,
+ loglevel => debug;
+ "chgrp_${name}":
+ command => "/bin/chgrp -R ${group} '${name}'",
+ onlyif => "/usr/bin/test ${group}",
+ refreshonly => true,
+ loglevel => debug;
+ }
+
+ if $target {
+ exec { "symlink_${name}":
+ command => "/bin/ln -s ${target} ${name}",
+ onlyif => "/usr/bin/test -d '${target}'",
+ }
+ } elsif $source {
+ if $ensure == 'directory' {
+ if $purge {
+ exec { "rsync_${name}":
+ command => "/usr/bin/rsync -r --delete '${source}/' '${name}'",
+ onlyif => "/usr/bin/test -d '${source}'",
+ unless => "/usr/bin/diff -rq '${source}' '${name}'",
+ notify => [Exec["chmod_${name}"], Exec["chown_${name}"], Exec["chgrp_${name}"]]
+ }
+ } else {
+ exec { "cp_r_${name}":
+ command => "/bin/cp -r '${source}' '${name}'",
+ onlyif => "/usr/bin/test -d '${source}'",
+ unless => "/usr/bin/diff -rq '${source}' '${name}'",
+ notify => [Exec["chmod_${name}"], Exec["chown_${name}"], Exec["chgrp_${name}"]]
+ }
+ }
+ } else {
+ exec { "cp_${name}":
+ command => "/bin/cp --remove-destination '${source}' '${name}'",
+ onlyif => "/usr/bin/test -e '${source}'",
+ unless => "/usr/bin/test ! -h '${name}' && /usr/bin/diff -q '${source}' '${name}'",
+ notify => [Exec["chmod_${name}"], Exec["chown_${name}"], Exec["chgrp_${name}"]]
+ }
+ }
+ }
+
+ #
+ # if the target/source does not exist (or is undef), and the file happens to be in a git repo,
+ # then restore the file to its original state.
+ #
+
+ if $target {
+ $target_or_source = $target
+ } else {
+ $target_or_source = $source
+ }
+
+ if ($target_or_source == undef) or $restore {
+ $file_basename = basename($name)
+ $file_dirname = dirname($name)
+ $command = "git rev-parse && unlink '${name}'; git checkout -- '${file_basename}' && chown --reference='${file_dirname}' '${name}'; true"
+ debug($command)
+
+ if $target_or_source == undef {
+ exec { "restore_${name}":
+ command => $command,
+ cwd => $file_dirname,
+ loglevel => info;
+ }
+ } else {
+ exec { "restore_${name}":
+ unless => "/usr/bin/test -e '${target_or_source}'",
+ command => $command,
+ cwd => $file_dirname,
+ loglevel => info;
+ }
+ }
+ }
+}
diff --git a/puppet/modules/try/manifests/init.pp b/puppet/modules/try/manifests/init.pp
new file mode 100644
index 00000000..1d2108c9
--- /dev/null
+++ b/puppet/modules/try/manifests/init.pp
@@ -0,0 +1,3 @@
+class try {
+
+}