summaryrefslogtreecommitdiff
diff options
context:
space:
mode:
authorMicah Anderson <micah@leap.se>2014-04-22 14:13:46 -0400
committerMicah Anderson <micah@leap.se>2014-04-22 14:13:46 -0400
commit327d5c934e408f90011d7949b89ab01fed88998e (patch)
tree77cfefffc8f9ffe160c4413b26dd5ca5cdd6f1e8
parentca11482dd7cd4ea8ffa69407ee2fd5b5e1b7981b (diff)
parent4295f334ea4f92d7fb47f7121a42633630c368d1 (diff)
Merge branch 'develop' (0.5.0)
Conflicts: .gitignore Change-Id: I778f3e1f1f4832f5894bc149ead67e9a4becf304
-rw-r--r--.gitignore1
-rw-r--r--.gitmodules72
-rw-r--r--.mailmap8
-rw-r--r--README.md47
-rwxr-xr-xbin/run_tests595
-rw-r--r--doc/development.md272
-rw-r--r--doc/en.md4
-rw-r--r--doc/faq.md53
-rw-r--r--doc/guide.md57
-rw-r--r--doc/known-issues.md6
-rw-r--r--doc/quick-start.md336
-rw-r--r--doc/troubleshooting.md147
-rw-r--r--leap-debug-remote.sh23
-rw-r--r--platform.rb14
-rw-r--r--provider_base/common.json13
-rw-r--r--provider_base/files/service-definitions/provider.json.erb6
-rw-r--r--provider_base/files/service-definitions/v1/eip-service.json.erb11
-rw-r--r--provider_base/files/service-definitions/v1/smtp-service.json.erb4
-rw-r--r--provider_base/provider.json32
-rw-r--r--provider_base/services/couchdb.json86
-rw-r--r--provider_base/services/monitor.json18
-rw-r--r--provider_base/services/mx.json24
-rw-r--r--provider_base/services/openvpn.json16
-rw-r--r--provider_base/services/soledad.json14
-rw-r--r--provider_base/services/static.json6
-rw-r--r--provider_base/services/tor.json2
-rw-r--r--provider_base/services/webapp.json48
-rw-r--r--provider_base/tags/development.json4
-rwxr-xr-xpuppet/bin/apply_on_node.sh30
-rw-r--r--puppet/manifests/setup.pp13
-rw-r--r--puppet/manifests/site.pp27
m---------puppet/modules/apt0
m---------puppet/modules/backupninja0
m---------puppet/modules/check_mk0
m---------puppet/modules/couchdb0
m---------puppet/modules/git0
-rw-r--r--puppet/modules/leap_mx/manifests/init.pp62
-rw-r--r--puppet/modules/leap_mx/templates/mx.conf.erb15
m---------puppet/modules/postfix0
m---------puppet/modules/rsyslog0
m---------puppet/modules/rubygems0
-rw-r--r--puppet/modules/site_apache/files/conf.d/security55
-rw-r--r--puppet/modules/site_apache/manifests/common.pp26
-rw-r--r--puppet/modules/site_apache/manifests/module/alias.pp5
-rw-r--r--puppet/modules/site_apache/manifests/module/expires.pp4
-rw-r--r--puppet/modules/site_apache/manifests/module/headers.pp5
-rw-r--r--puppet/modules/site_apache/manifests/module/removeip.pp5
-rw-r--r--puppet/modules/site_apache/manifests/module/rewrite.pp5
-rw-r--r--puppet/modules/site_apache/templates/vhosts.d/api.conf.erb19
-rw-r--r--puppet/modules/site_apache/templates/vhosts.d/common.conf.erb73
-rw-r--r--puppet/modules/site_apache/templates/vhosts.d/leap_webapp.conf.erb46
-rw-r--r--puppet/modules/site_apt/files/keys/cloudant-key.asc52
-rw-r--r--puppet/modules/site_apt/files/keys/leap_key.asc18
-rw-r--r--puppet/modules/site_apt/manifests/init.pp13
-rw-r--r--puppet/modules/site_apt/manifests/preferences/check_mk.pp9
-rw-r--r--puppet/modules/site_apt/manifests/preferences/twisted.pp9
-rw-r--r--puppet/modules/site_apt/templates/wheezy/postfix.seeds1
-rw-r--r--puppet/modules/site_check_mk/files/agent/local_checks/all_hosts/run_node_tests.sh5
-rwxr-xr-xpuppet/modules/site_check_mk/files/agent/local_checks/mx/check_leap_mx.sh33
-rw-r--r--puppet/modules/site_check_mk/files/agent/logwatch/bigcouch.cfg20
-rw-r--r--puppet/modules/site_check_mk/files/agent/logwatch/leap_mx.cfg4
-rw-r--r--puppet/modules/site_check_mk/files/agent/logwatch/logwatch.cfg31
-rw-r--r--puppet/modules/site_check_mk/files/agent/logwatch/soledad.cfg5
-rw-r--r--puppet/modules/site_check_mk/files/agent/logwatch/syslog/couchdb.cfg7
-rw-r--r--puppet/modules/site_check_mk/files/agent/logwatch/syslog/openvpn.cfg7
-rw-r--r--puppet/modules/site_check_mk/files/agent/logwatch/syslog/stunnel.cfg9
-rw-r--r--puppet/modules/site_check_mk/files/agent/logwatch/syslog/tapicero.cfg8
-rw-r--r--puppet/modules/site_check_mk/files/agent/logwatch/syslog/webapp.cfg5
-rw-r--r--puppet/modules/site_check_mk/files/agent/logwatch/syslog_header.cfg1
-rw-r--r--puppet/modules/site_check_mk/files/agent/logwatch/syslog_tail.cfg11
-rwxr-xr-xpuppet/modules/site_check_mk/files/agent/nagios_plugins/check_unix_open_fds.pl322
-rwxr-xr-xpuppet/modules/site_check_mk/files/agent/plugins/mk_logwatch.1.2.4374
-rw-r--r--puppet/modules/site_check_mk/manifests/agent.pp28
-rw-r--r--puppet/modules/site_check_mk/manifests/agent/couchdb.pp36
-rw-r--r--puppet/modules/site_check_mk/manifests/agent/haproxy.pp12
-rw-r--r--puppet/modules/site_check_mk/manifests/agent/logwatch.pp36
-rw-r--r--puppet/modules/site_check_mk/manifests/agent/logwatch/syslog.pp18
-rw-r--r--puppet/modules/site_check_mk/manifests/agent/mrpe.pp18
-rw-r--r--puppet/modules/site_check_mk/manifests/agent/mx.pp23
-rw-r--r--puppet/modules/site_check_mk/manifests/agent/openvpn.pp10
-rw-r--r--puppet/modules/site_check_mk/manifests/agent/package/nagios_plugins_contrib.pp5
-rw-r--r--puppet/modules/site_check_mk/manifests/agent/package/perl_plugin.pp5
-rw-r--r--puppet/modules/site_check_mk/manifests/agent/soledad.pp14
-rw-r--r--puppet/modules/site_check_mk/manifests/agent/stunnel.pp9
-rw-r--r--puppet/modules/site_check_mk/manifests/agent/tapicero.pp16
-rw-r--r--puppet/modules/site_check_mk/manifests/agent/webapp.pp26
-rw-r--r--puppet/modules/site_check_mk/manifests/server.pp64
-rw-r--r--puppet/modules/site_check_mk/templates/use_ssh.mk6
-rw-r--r--puppet/modules/site_config/manifests/caching_resolver.pp1
-rw-r--r--puppet/modules/site_config/manifests/default.pp37
-rw-r--r--puppet/modules/site_config/manifests/files.pp23
-rw-r--r--puppet/modules/site_config/manifests/hosts.pp3
-rw-r--r--puppet/modules/site_config/manifests/initial_firewall.pp62
-rw-r--r--puppet/modules/site_config/manifests/packages/base.pp (renamed from puppet/modules/site_config/manifests/base_packages.pp)18
-rw-r--r--puppet/modules/site_config/manifests/packages/build_essential.pp11
-rw-r--r--puppet/modules/site_config/manifests/packages/gnutls.pp5
-rw-r--r--puppet/modules/site_config/manifests/packages/uninstall.pp16
-rw-r--r--puppet/modules/site_config/manifests/params.pp16
-rw-r--r--puppet/modules/site_config/manifests/resolvconf.pp9
-rw-r--r--puppet/modules/site_config/manifests/ruby/dev.pp8
-rw-r--r--puppet/modules/site_config/manifests/setup.pp50
-rw-r--r--puppet/modules/site_config/manifests/sysctl.pp8
-rw-r--r--puppet/modules/site_config/manifests/syslog.pp28
-rw-r--r--puppet/modules/site_config/manifests/vagrant.pp11
-rw-r--r--puppet/modules/site_config/manifests/x509/ca.pp9
-rw-r--r--puppet/modules/site_config/manifests/x509/ca_bundle.pp16
-rw-r--r--puppet/modules/site_config/manifests/x509/cert.pp10
-rw-r--r--puppet/modules/site_config/manifests/x509/client_ca/ca.pp14
-rw-r--r--puppet/modules/site_config/manifests/x509/client_ca/key.pp14
-rw-r--r--puppet/modules/site_config/manifests/x509/commercial/ca.pp9
-rw-r--r--puppet/modules/site_config/manifests/x509/commercial/cert.pp10
-rw-r--r--puppet/modules/site_config/manifests/x509/commercial/key.pp9
-rw-r--r--puppet/modules/site_config/manifests/x509/key.pp9
-rw-r--r--puppet/modules/site_config/templates/hosts6
-rw-r--r--puppet/modules/site_config/templates/ipv4firewall_up.rules.erb22
-rw-r--r--puppet/modules/site_config/templates/ipv6firewall_up.rules.erb7
-rw-r--r--puppet/modules/site_couchdb/files/couchdb_scripts_defaults.conf4
-rw-r--r--puppet/modules/site_couchdb/files/designs/Readme.md14
-rw-r--r--puppet/modules/site_couchdb/files/designs/customers/Customer.json18
-rw-r--r--puppet/modules/site_couchdb/files/designs/identities/Identity.json28
-rw-r--r--puppet/modules/site_couchdb/files/designs/messages/Message.json18
-rw-r--r--puppet/modules/site_couchdb/files/designs/sessions/Session.json8
-rw-r--r--puppet/modules/site_couchdb/files/designs/shared/docs.json8
-rw-r--r--puppet/modules/site_couchdb/files/designs/shared/syncs.json11
-rw-r--r--puppet/modules/site_couchdb/files/designs/shared/transactions.json13
-rw-r--r--puppet/modules/site_couchdb/files/designs/tickets/Ticket.json50
-rw-r--r--puppet/modules/site_couchdb/files/designs/tokens/Token.json14
-rw-r--r--puppet/modules/site_couchdb/files/designs/users/User.json22
-rw-r--r--puppet/modules/site_couchdb/manifests/add_users.pp54
-rw-r--r--puppet/modules/site_couchdb/manifests/backup.pp23
-rw-r--r--puppet/modules/site_couchdb/manifests/bigcouch/add_nodes.pp5
-rw-r--r--puppet/modules/site_couchdb/manifests/bigcouch/compaction.pp8
-rw-r--r--puppet/modules/site_couchdb/manifests/bigcouch/settle_cluster.pp11
-rw-r--r--puppet/modules/site_couchdb/manifests/create_dbs.pp70
-rw-r--r--puppet/modules/site_couchdb/manifests/designs.pp20
-rw-r--r--puppet/modules/site_couchdb/manifests/init.pp155
-rw-r--r--puppet/modules/site_couchdb/manifests/logrotate.pp12
-rw-r--r--puppet/modules/site_couchdb/manifests/stunnel.pp46
-rw-r--r--puppet/modules/site_haproxy/files/haproxy-stats.cfg6
-rw-r--r--puppet/modules/site_haproxy/manifests/init.pp8
-rw-r--r--puppet/modules/site_mx/manifests/couchdb.pp23
-rw-r--r--puppet/modules/site_mx/manifests/haproxy.pp14
-rw-r--r--puppet/modules/site_mx/manifests/init.pp19
-rw-r--r--puppet/modules/site_nagios/files/configs/Debian/nagios.cfg91
-rw-r--r--puppet/modules/site_nagios/manifests/add_host.pp31
-rw-r--r--puppet/modules/site_nagios/manifests/add_host_services.pp28
-rw-r--r--puppet/modules/site_nagios/manifests/add_service.pp26
-rw-r--r--puppet/modules/site_nagios/manifests/init.pp2
-rw-r--r--puppet/modules/site_nagios/manifests/server.pp40
-rw-r--r--puppet/modules/site_nagios/manifests/server/apache.pp7
-rw-r--r--puppet/modules/site_nagios/manifests/server/purge.pp20
-rw-r--r--puppet/modules/site_nickserver/manifests/init.pp89
-rw-r--r--puppet/modules/site_nickserver/templates/nickserver-proxy.conf.erb6
-rw-r--r--puppet/modules/site_nickserver/templates/nickserver.yml.erb2
-rw-r--r--puppet/modules/site_openvpn/manifests/dh_key.pp10
-rw-r--r--puppet/modules/site_openvpn/manifests/init.pp78
-rw-r--r--puppet/modules/site_openvpn/manifests/keys.pp51
-rw-r--r--puppet/modules/site_openvpn/manifests/resolver.pp10
-rw-r--r--puppet/modules/site_openvpn/manifests/server_config.pp70
-rw-r--r--puppet/modules/site_openvpn/templates/add_gateway_ips.sh.erb8
-rw-r--r--puppet/modules/site_postfix/files/checks/received_anon2
-rw-r--r--puppet/modules/site_postfix/manifests/debug.pp9
-rw-r--r--puppet/modules/site_postfix/manifests/mx.pp74
-rw-r--r--puppet/modules/site_postfix/manifests/mx/checks.pp41
-rw-r--r--puppet/modules/site_postfix/manifests/mx/reserved_aliases.pp15
-rw-r--r--puppet/modules/site_postfix/manifests/mx/smtp_auth.pp6
-rw-r--r--puppet/modules/site_postfix/manifests/mx/smtp_tls.pp27
-rw-r--r--puppet/modules/site_postfix/manifests/mx/smtpd_checks.pp31
-rw-r--r--puppet/modules/site_postfix/manifests/mx/smtpd_tls.pp55
-rw-r--r--puppet/modules/site_postfix/manifests/satellite.pp47
-rw-r--r--puppet/modules/site_postfix/templates/checks/helo_access.erb21
-rw-r--r--puppet/modules/site_shorewall/manifests/defaults.pp34
-rw-r--r--puppet/modules/site_shorewall/manifests/mx.pp24
-rw-r--r--puppet/modules/site_shorewall/manifests/service/smtp.pp13
-rw-r--r--puppet/modules/site_shorewall/manifests/soledad.pp23
-rw-r--r--puppet/modules/site_shorewall/manifests/sshd.pp6
-rw-r--r--puppet/modules/site_squid_deb_proxy/manifests/client.pp5
-rw-r--r--puppet/modules/site_sshd/manifests/authorized_keys.pp3
-rw-r--r--puppet/modules/site_sshd/manifests/init.pp18
-rw-r--r--puppet/modules/site_sshd/templates/authorized_keys.erb4
-rw-r--r--puppet/modules/site_sshd/templates/ssh_config.erb23
-rw-r--r--puppet/modules/site_sshd/templates/ssh_known_hosts.erb7
-rw-r--r--puppet/modules/site_static/README3
-rw-r--r--puppet/modules/site_static/manifests/domain.pp28
-rw-r--r--puppet/modules/site_static/manifests/init.pp17
-rw-r--r--puppet/modules/site_static/manifests/location.pp25
-rw-r--r--puppet/modules/site_static/templates/apache.conf.erb77
-rw-r--r--puppet/modules/site_stunnel/manifests/clients.pp9
-rw-r--r--puppet/modules/site_stunnel/manifests/setup.pp24
-rw-r--r--puppet/modules/site_tor/manifests/init.pp20
-rw-r--r--puppet/modules/site_webapp/files/migrate_design_documents16
-rw-r--r--puppet/modules/site_webapp/manifests/apache.pp55
-rw-r--r--puppet/modules/site_webapp/manifests/client_ca.pp25
-rw-r--r--puppet/modules/site_webapp/manifests/couchdb.pp53
-rw-r--r--puppet/modules/site_webapp/manifests/cron.pp17
-rw-r--r--puppet/modules/site_webapp/manifests/haproxy.pp1
-rw-r--r--puppet/modules/site_webapp/manifests/init.pp99
-rw-r--r--puppet/modules/site_webapp/templates/config.yml.erb8
-rw-r--r--puppet/modules/site_webapp/templates/couchdb.yml.admin.erb9
-rw-r--r--puppet/modules/site_webapp/templates/haproxy_couchdb.cfg.erb25
-rw-r--r--puppet/modules/soledad/manifests/common.pp10
-rw-r--r--puppet/modules/soledad/manifests/init.pp29
-rw-r--r--puppet/modules/soledad/manifests/server.pp63
-rw-r--r--puppet/modules/soledad/templates/default-soledad.erb5
-rw-r--r--puppet/modules/soledad/templates/soledad-server.conf.erb3
m---------puppet/modules/squid_deb_proxy0
m---------puppet/modules/sshd0
m---------puppet/modules/stdlib0
m---------puppet/modules/stunnel0
m---------puppet/modules/sysctl0
-rwxr-xr-xpuppet/modules/tapicero/files/tapicero.init60
-rw-r--r--puppet/modules/tapicero/manifests/init.pp123
-rw-r--r--puppet/modules/tapicero/templates/tapicero.yaml.erb42
m---------puppet/modules/tor0
-rw-r--r--puppet/modules/try/manifests/file.pp108
m---------puppet/modules/vcsrepo0
-rw-r--r--tests/README.md12
-rw-r--r--tests/order.rb15
-rw-r--r--tests/white-box/couchdb.rb109
-rw-r--r--tests/white-box/dummy.rb71
-rw-r--r--tests/white-box/network.rb60
-rw-r--r--tests/white-box/openvpn.rb16
-rw-r--r--tests/white-box/webapp.rb63
223 files changed, 6008 insertions, 1051 deletions
diff --git a/.gitignore b/.gitignore
index 62603355..eda5e35f 100644
--- a/.gitignore
+++ b/.gitignore
@@ -1 +1,2 @@
.reviewboardrc
+/puppet/modules/site_custom
diff --git a/.gitmodules b/.gitmodules
index 717ae5ed..7005b770 100644
--- a/.gitmodules
+++ b/.gitmodules
@@ -1,75 +1,93 @@
[submodule "puppet/modules/openvpn"]
path = puppet/modules/openvpn
- url = git://github.com/luxflux/puppet-openvpn.git
+ url = https://leap.se/git/puppet_openvpn
[submodule "puppet/modules/concat"]
path = puppet/modules/concat
- url = git://code.leap.se/puppet_concat
+ url = https://leap.se/git/puppet_concat
[submodule "puppet/modules/sshd"]
path = puppet/modules/sshd
- url = git://labs.riseup.net/shared-sshd
+ url = https://leap.se/git/puppet_sshd
[submodule "puppet/modules/apt"]
path = puppet/modules/apt
- url = git://code.leap.se/puppet_apt
+ url = https://leap.se/git/puppet_apt
[submodule "puppet/modules/lsb"]
path = puppet/modules/lsb
- url = git://labs.riseup.net/shared-lsb
+ url = https://leap.se/git/puppet_lsb
[submodule "puppet/modules/ntp"]
path = puppet/modules/ntp
- url = git://github.com/puppetlabs/puppetlabs-ntp.git
+ url = https://leap.se/git/puppet_ntp.git
[submodule "puppet/modules/git"]
path = puppet/modules/git
- url = git://code.leap.se/puppet_git
+ url = https://leap.se/git/puppet_git
[submodule "puppet/modules/common"]
path = puppet/modules/common
- url = git://labs.riseup.net/shared-common
+ url = https://leap.se/git/puppet_common
[submodule "puppet/modules/shorewall"]
path = puppet/modules/shorewall
- url = git://code.leap.se/puppet_shorewall
+ url = https://leap.se/git/puppet_shorewall
[submodule "puppet/modules/resolvconf"]
path = puppet/modules/resolvconf
- url = git://git.puppet.immerda.ch/module-resolvconf.git
+ url = https://leap.se/git/puppet_resolvconf.git
[submodule "puppet/modules/couchdb"]
path = puppet/modules/couchdb
- url = git://code.leap.se/puppet_couchdb
+ url = https://leap.se/git/puppet_couchdb
[submodule "puppet/modules/apache"]
path = puppet/modules/apache
- url = git://code.leap.se/puppet_apache
+ url = https://leap.se/git/puppet_apache
[submodule "puppet/modules/bundler"]
path = puppet/modules/bundler
- url = git://code.leap.se/puppet_bundler
-[submodule "puppet/modules/vcsrepo"]
- path = puppet/modules/vcsrepo
- url = git://labs.riseup.net/module_vcs
+ url = https://leap.se/git/puppet_bundler
[submodule "puppet/modules/rubygems"]
path = puppet/modules/rubygems
- url = git://code.leap.se/puppet_rubygems
+ url = https://leap.se/git/puppet_rubygems
[submodule "puppet/modules/ruby"]
path = puppet/modules/ruby
- url = git://code.leap.se/puppet_ruby
+ url = https://leap.se/git/puppet_ruby
[submodule "puppet/modules/x509"]
path = puppet/modules/x509
- url = git://code.leap.se/puppet_x509
+ url = https://leap.se/git/puppet_x509
[submodule "puppet/modules/passenger"]
path = puppet/modules/passenger
- url = git://code.leap.se/puppet_passenger
+ url = https://leap.se/git/puppet_passenger
[submodule "puppet/modules/augeas"]
path = puppet/modules/augeas
- url = git://code.leap.se/puppet_augeas
+ url = https://leap.se/git/puppet_augeas
[submodule "puppet/modules/stdlib"]
path = puppet/modules/stdlib
- url = git://code.leap.se/puppet_stdlib
+ url = https://leap.se/git/puppet_stdlib
[submodule "puppet/modules/unbound"]
path = puppet/modules/unbound
- url = git://code.leap.se/puppet_unbound
+ url = https://leap.se/git/puppet_unbound
[submodule "puppet/modules/nagios"]
path = puppet/modules/nagios
- url = git://code.leap.se/puppet_nagios
+ url = https://leap.se/git/puppet_nagios
[submodule "puppet/modules/tor"]
path = puppet/modules/tor
- url = git://labs.riseup.net/shared-tor
+ url = https://leap.se/git/puppet_tor
[submodule "puppet/modules/stunnel"]
path = puppet/modules/stunnel
- url = git://code.leap.se/puppet_stunnel
+ url = https://leap.se/git/puppet_stunnel
[submodule "puppet/modules/haproxy"]
path = puppet/modules/haproxy
- url = git://code.leap.se/puppet_haproxy
+ url = https://leap.se/git/puppet_haproxy
+[submodule "puppet/modules/squid_deb_proxy"]
+ path = puppet/modules/squid_deb_proxy
+ url = https://leap.se/git/puppet_squid_deb_proxy
+[submodule "puppet/modules/postfix"]
+ path = puppet/modules/postfix
+ url = https://leap.se/git/puppet_postfix
+[submodule "puppet/modules/vcsrepo"]
+ path = puppet/modules/vcsrepo
+ url = https://leap.se/git/puppet_vcsrepo
+[submodule "puppet/modules/rsyslog"]
+ path = puppet/modules/rsyslog
+ url = https://leap.se/git/puppet_rsyslog
+[submodule "puppet/modules/backupninja"]
+ path = puppet/modules/backupninja
+ url = https://leap.se/git/puppet_backupninja
+[submodule "puppet/modules/sysctl"]
+ path = puppet/modules/sysctl
+ url = https://leap.se/git/puppet_sysctl
+[submodule "puppet/modules/check_mk"]
+ path = puppet/modules/check_mk
+ url = https://leap.se/git/puppet_check_mk
diff --git a/.mailmap b/.mailmap
new file mode 100644
index 00000000..aee70b0a
--- /dev/null
+++ b/.mailmap
@@ -0,0 +1,8 @@
+Varac <varacanero@zeromail.org>
+Micah Anderson <micah@leap.se> Micah Anderson <micah@riseup.net>
+Micah Anderson <micah@leap.se> micah <micah@leap.se>
+Kwadronaut <kwadronaut@leap.se>
+Elijah <elijah@riseup.net> elijah <elijah@ChrUbuntu.(none)>
+Elijah <elijah@riseup.net> elijah <elijah@riseup.net>
+Leap Admins <admin@leap.se> root <root@localhost>
+
diff --git a/README.md b/README.md
index 890008f5..7c253f62 100644
--- a/README.md
+++ b/README.md
@@ -1,65 +1,49 @@
-=============
-Leap Platform
-=============
-
What is it?
-===========
-
-The LEAP Platform is set of complementary packages and server recipes to automate the maintenance of LEAP services in a hardened Debian environment. Its goal is to make it as painless as possible for sysadmins to deploy and maintain a service provider’s infrastructure for secure communication. These recipes define an abstract service provider. It is a set of Puppet modules designed to work together to provide to sysadmins everything they need to manage a service provider infrastructure that provides secure communication services.
-
-As these recipes consist of abstract definitions, in order to configure settings for a particular service provider a system administrator has to obtain the leap command-line interface and create a provider instance. The details of how to get started are contained in the `Quick Start` documentation as detailed below.
+=============================
+The LEAP Platform is set of complementary packages and server recipes to automate the maintenance of LEAP services in a hardened Debian environment. Its goal is to make it as painless as possible for sysadmins to deploy and maintain a service provider's infrastructure for secure communication. These recipes define an abstract service provider. It is a set of Puppet modules designed to work together to provide to sysadmins everything they need to manage a service provider infrastructure that provides secure communication services.
Getting started
-===============
+=============================
-It is highly recommended that you start by reading the overview of the Leap Platform on the website (https://leap.se/docs/platform) and then begin with the `Quick Start` guide (https://leap.se/docs/platform/quick-start) to walk through a test environment setup to get familiar with how things work before deploying to live servers.
+It is highly recommended that you start by reading the overview of the [LEAP Platform](https://leap.se/docs/platform) and then begin with the [Quick Start guide](https://leap.se/docs/platform/quick-start) to walk through a test environment setup to get familiar with how things work before deploying to live servers.
An offline copy of this documentation is contained in the `doc` subdirectory. For more current updates to the documentation, visit the website.
Requirements
-------------
-
-For a minimal test or develop install we recommend a fairly recent computer x86_64 with hardware virtualization features (AMD-V or VT-x) with plenty of RAM. If you follow the `Quick Start` documentation we will walk you through using Vagrant to setup a test deployment.
-
-For a live deployment of the platform the amount of required (virtual) servers depends on your needs and which services you want to deploy. At the moment, the Leap Platform supports servers with a base Debian Wheezy installation.
+------------------
-While you can deploy all services on one server, we stronly recommend to use seperate servers for better security.
+For testing a virtual deployment simulated on your computer, you will need a fairly recent computer x86_64 with hardware virtualization features (AMD-V or VT-x) and plenty of RAM. If you follow the "Quick Start" documentation we will walk you through using Vagrant to setup a test deployment.
+For a live deployment of the platform, the number of servers that is required depends on your needs and which services you want to deploy. At the moment, the LEAP Platform supports servers with a base Debian Wheezy installation.
Troubleshooting
-===============
+=============================
-If you have a problem, we are interested in fixing it!
+If you have a problem, we are interested in fixing it!
-If you have a problem, be sure to have a look at the Known Issues section of the documentation to see if your issue is detailed there.
+If you have a problem, be sure to have a look at the [Known Issues](https://leap.se/docs/platform/known-issues) to see if your issue is detailed there.
If not, the best way for us to solve your problem is if you provide to us the complete log of what you did, and the output that was produced. Please don't cut out what appears to be useless information and only include the error that you received, instead copy and paste the complete log so that we can better determine the overall situation. If you can run the same command that produced the error with a raised verbosity level (such as -v2), that provides us with more useful debugging information.
-Visit https://leap.se/development for contact possibilities.
-
-Known Issues
-------------
-
-* Please read the section in the documentation about Known Issues (https://leap.se/docs/platform/known-issues)
+To capture the log, you can copy from the console, or run `leap --log FILE` or edit Leapfile to include `@log = '/tmp/leap.log'`.
+Visit https://leap.se/en/docs/get-involved/communication for details on how to contact the developers.
More Information
================
-For more information about the LEAP Encryption Access Project, please visit the website https://leap.se which also lists contact data.
-
Changelog
---------
For a changelog of the current branch:
- git log
+ git log
Authors and Credits
------------------
-See contributors:
+See contributors:
git shortlog -es --all
@@ -68,6 +52,3 @@ Copyright/License
-----------------
Read LICENSE
-
-
-
diff --git a/bin/run_tests b/bin/run_tests
new file mode 100755
index 00000000..9102c325
--- /dev/null
+++ b/bin/run_tests
@@ -0,0 +1,595 @@
+#!/usr/bin/ruby
+
+#
+# this script will run the unit tests in ../tests/*.rb.
+#
+# Tests for the platform differ from traditional ruby unit tests in a few ways:
+#
+# (1) at the end of every test function, you should call 'pass()'
+# (2) you can specify test dependencies by calling depends_on("TestFirst") in the test class definition.
+# (3) test functions are always run in alphabetical order.
+# (4) any halt or error will stop the testing unless --continue is specified.
+#
+
+require 'minitest/unit'
+require 'yaml'
+require 'tsort'
+require 'net/http'
+
+##
+## EXIT CODES
+##
+
+EXIT_CODES = {
+ :success => 0,
+ :warning => 1,
+ :failure => 2,
+ :error => 3
+}
+
+def bail(code, msg=nil)
+ puts msg if msg
+ if code.is_a? Symbol
+ exit(EXIT_CODES[code])
+ else
+ exit(code)
+ end
+end
+
+##
+## EXCEPTIONS
+##
+
+# this class is raised if a test file wants to be skipped entirely.
+class SkipTest < Exception
+end
+
+# raised if --no-continue and there is an error
+class TestError < Exception
+end
+
+# raised if --no-continue and there is a failure
+class TestFailure < Exception
+end
+
+##
+## CUSTOM UNIT TEST CLASS
+##
+
+#
+# Our custom unit test class. All tests should be subclasses of this.
+#
+class LeapTest < MiniTest::Unit::TestCase
+ class Pass < MiniTest::Assertion
+ end
+
+ def initialize(name)
+ super(name)
+ io # << calling this will suppress the marching ants
+ end
+
+ #
+ # Test class dependencies
+ #
+ def self.depends_on(*class_names)
+ @dependencies ||= []
+ @dependencies += class_names
+ end
+ def self.dependencies
+ @dependencies || []
+ end
+
+ #
+ # returns all the test classes, sorted in dependency order.
+ #
+ def self.test_classes
+ classes = ObjectSpace.each_object(Class).select {|test_class|
+ test_class.ancestors.include?(self)
+ }
+ return TestDependencyGraph.new(classes).sorted
+ end
+
+ def self.tests
+ self.instance_methods.grep(/^test_/).sort
+ end
+
+ #
+ # The default pass just does an `assert true`. In our case, we want to make the passes more explicit.
+ #
+ def pass
+ raise LeapTest::Pass
+ end
+
+ #
+ # the default fail() is part of the kernel and it just throws a runtime exception. for tests,
+ # we want the same behavior as assert(false)
+ #
+ def fail(msg=nil)
+ assert(false, msg)
+ end
+
+ def warn(*msg)
+ method_name = caller.first.split('`').last.gsub(/(block in |')/,'')
+ MiniTest::Unit.runner.warn(self.class, method_name, msg.join("\n"))
+ end
+
+ # Always runs test methods within a test class in alphanumeric order
+ #
+ def self.test_order
+ :alpha
+ end
+
+ #
+ # attempts a http GET on the url, yields |body, response, error|
+ #
+ def get(url, params=nil)
+ uri = URI(url)
+ if params
+ uri.query = URI.encode_www_form(params)
+ end
+ response = Net::HTTP.get_response(uri)
+ if response.is_a?(Net::HTTPSuccess)
+ yield response.body, response, nil
+ else
+ yield nil, response, nil
+ end
+ rescue => exc
+ yield nil, nil, exc
+ end
+
+ def assert_get(url, params=nil, options=nil)
+ options ||= {}
+ get(url, params) do |body, response, error|
+ if body
+ yield body if block_given?
+ elsif response
+ fail ["Expected a 200 status code from #{url}, but got #{response.code} instead.", options[:error_msg]].compact.join("\n")
+ else
+ fail ["Expected a response from #{url}, but got \"#{error}\" instead.", options[:error_msg]].compact.join("\n")
+ end
+ end
+ end
+
+ #
+ # test if a socket can be connected to
+ #
+
+ #
+ # tcp connection helper with timeout
+ #
+ def try_tcp_connect(host, port, timeout = 5)
+ addr = Socket.getaddrinfo(host, nil)
+ sockaddr = Socket.pack_sockaddr_in(port, addr[0][3])
+
+ Socket.new(Socket.const_get(addr[0][0]), Socket::SOCK_STREAM, 0).tap do |socket|
+ socket.setsockopt(Socket::IPPROTO_TCP, Socket::TCP_NODELAY, 1)
+ begin
+ socket.connect_nonblock(sockaddr)
+ rescue IO::WaitReadable
+ if IO.select([socket], nil, nil, timeout) == nil
+ raise "Connection timeout"
+ else
+ socket.connect_nonblock(sockaddr)
+ end
+ rescue IO::WaitWritable
+ if IO.select(nil, [socket], nil, timeout) == nil
+ raise "Connection timeout"
+ else
+ socket.connect_nonblock(sockaddr)
+ end
+ end
+ return socket
+ end
+ end
+
+ def try_tcp_write(socket, timeout = 5)
+ begin
+ socket.write_nonblock("\0")
+ rescue IO::WaitReadable
+ if IO.select([socket], nil, nil, timeout) == nil
+ raise "Write timeout"
+ else
+ retry
+ end
+ rescue IO::WaitWritable
+ if IO.select(nil, [socket], nil, timeout) == nil
+ raise "Write timeout"
+ else
+ retry
+ end
+ end
+ end
+
+ def try_tcp_read(socket, timeout = 5)
+ begin
+ socket.read_nonblock(1)
+ rescue IO::WaitReadable
+ if IO.select([socket], nil, nil, timeout) == nil
+ raise "Read timeout"
+ else
+ retry
+ end
+ rescue IO::WaitWritable
+ if IO.select(nil, [socket], nil, timeout) == nil
+ raise "Read timeout"
+ else
+ retry
+ end
+ end
+ end
+
+ def assert_tcp_socket(host, port, msg=nil)
+ begin
+ socket = try_tcp_connect(host, port, 1)
+ #try_tcp_write(socket,1)
+ #try_tcp_read(socket,1)
+ rescue StandardError => exc
+ fail ["Failed to open socket #{host}:#{port}", exc].join("\n")
+ ensure
+ socket.close if socket
+ end
+ end
+
+ #
+ # Matches the regexp in the file, and returns the first matched string (or fails if no match).
+ #
+ def file_match(filename, regexp)
+ if match = File.read(filename).match(regexp)
+ match.captures.first
+ else
+ fail "Regexp #{regexp.inspect} not found in file #{filename.inspect}."
+ end
+ end
+
+ #
+ # Matches the regexp in the file, and returns array of matched strings (or fails if no match).
+ #
+ def file_matches(filename, regexp)
+ if match = File.read(filename).match(regexp)
+ match.captures
+ else
+ fail "Regexp #{regexp.inspect} not found in file #{filename.inspect}."
+ end
+ end
+
+ #
+ # checks to make sure the given property path exists in $node (e.g. hiera.yaml)
+ # and returns the value
+ #
+ def assert_property(property)
+ latest = $node
+ property.split('.').each do |segment|
+ latest = latest[segment]
+ fail "Required node property `#{property}` is missing." if latest.nil?
+ end
+ return latest
+ end
+
+ #
+ # works like pgrep command line
+ # return an array of hashes like so [{:pid => "1234", :process => "ls"}]
+ #
+ def pgrep(match)
+ output = `pgrep --full --list-name '#{match}'`
+ output.each_line.map{|line|
+ pid = line.split(' ')[0]
+ process = line.gsub(/(#{pid} |\n)/, '')
+ if process =~ /pgrep --full --list-name/
+ nil
+ else
+ {:pid => pid, :process => process}
+ end
+ }.compact
+ end
+end
+
+def assert_running(process)
+ assert pgrep(process).any?, "No running process for #{process}"
+end
+
+#
+# Custom test runner in order to modify the output.
+#
+class LeapRunner < MiniTest::Unit
+
+ attr_accessor :passes, :warnings
+
+ def initialize
+ @passes = 0
+ @warnings = 0
+ super
+ end
+
+ #
+ # call stack:
+ # MiniTest::Unit.new.run
+ # MiniTest::Unit.runner
+ # LeapTest._run
+ #
+ def _run args = []
+ if $pinned_test_class
+ suites = [$pinned_test_class]
+ if $pinned_test_method
+ options.merge!(:filter => $pinned_test_method.to_s)
+ end
+ else
+ suites = LeapTest.send "test_suites"
+ suites = TestDependencyGraph.new(suites).sorted
+ end
+ output.sync = true
+ results = _run_suites(suites, :test)
+ @test_count = results.inject(0) { |sum, (tc, _)| sum + tc }
+ @assertion_count = results.inject(0) { |sum, (_, ac)| sum + ac }
+ status
+ return exit_code()
+ rescue Interrupt
+ bail :error, 'Tests halted on interrupt.'
+ rescue TestFailure
+ bail :failure, 'Tests halted on failure (because of --no-continue).'
+ rescue TestError
+ bail :error, 'Tests halted on error (because of --no-continue).'
+ end
+
+ #
+ # override puke to change what prints out.
+ #
+ def puke(klass, meth, e)
+ case e
+ when MiniTest::Skip then
+ @skips += 1
+ #if @verbose
+ report_line("SKIP", klass, meth, e, e.message)
+ #end
+ when LeapTest::Pass then
+ @passes += 1
+ report_line("PASS", klass, meth)
+ when MiniTest::Assertion then
+ @failures += 1
+ report_line("FAIL", klass, meth, e, e.message)
+ if $halt_on_failure
+ raise TestFailure.new
+ end
+ else
+ @errors += 1
+ bt = MiniTest::filter_backtrace(e.backtrace).join "\n"
+ report_line("ERROR", klass, meth, e, "#{e.class}: #{e.message}\n#{bt}")
+ if $halt_on_failure
+ raise TestError.new
+ end
+ end
+ return "" # disable the marching ants
+ end
+
+ #
+ # override default status summary
+ #
+ def status(io = self.output)
+ if $output_format == :human
+ format = "%d tests: %d passes, %d skips, %d warnings, %d failures, %d errors"
+ output.puts format % [test_count, passes, skips, warnings, failures, errors]
+ end
+ end
+
+ #
+ # return an appropriate exit_code symbol
+ #
+ def exit_code
+ if @errors > 0
+ :error
+ elsif @failures > 0
+ :failure
+ elsif @warnings > 0
+ :warning
+ else
+ :success
+ end
+ end
+
+ #
+ # returns a string for a PASS, SKIP, or FAIL error
+ #
+ def report_line(prefix, klass, meth, e=nil, message=nil)
+ msg_txt = nil
+ if message
+ message = message.sub(/http:\/\/([a-z_]+):([a-zA-Z0-9_]+)@/, "http://\\1:password@")
+ if $output_format == :human
+ indent = "\n "
+ msg_txt = indent + message.split("\n").join(indent)
+ else
+ msg_txt = message.gsub("\n", ' ')
+ end
+ end
+
+ if $output_format == :human
+ if e && msg_txt
+ output.puts "#{prefix}: #{readable(klass.name)} > #{readable(meth)} [#{File.basename(location(e))}]:#{msg_txt}"
+ elsif msg_txt
+ output.puts "#{prefix}: #{readable(klass.name)} > #{readable(meth)}:#{msg_txt}"
+ else
+ output.puts "#{prefix}: #{readable(klass.name)} > #{readable(meth)}"
+ end
+ # I don't understand at all why, but adding a very tiny sleep here will
+ sleep(0.0001) # keep lines from being joined together by the logger. output.flush doesn't.
+ elsif $output_format == :checkmk
+ code = CHECKMK_CODES[prefix]
+ msg_txt ||= "Success" if prefix == "PASS"
+ if e && msg_txt
+ output.puts "#{code} #{klass.name}/#{machine_readable(meth)} - [#{File.basename(location(e))}]:#{msg_txt}"
+ elsif msg_txt
+ output.puts "#{code} #{klass.name}/#{machine_readable(meth)} - #{msg_txt}"
+ else
+ output.puts "#{code} #{klass.name}/#{machine_readable(meth)} - no message"
+ end
+ end
+ end
+
+ #
+ # a new function used by TestCase to report warnings.
+ #
+ def warn(klass, method_name, msg)
+ @warnings += 1
+ report_line("WARN", klass, method_name, nil, msg)
+ end
+
+ private
+
+ CHECKMK_CODES = {"PASS" => 0, "SKIP" => 1, "FAIL" => 2, "ERROR" => 3}
+
+ #
+ # Converts snake_case and CamelCase to something more pleasant for humans to read.
+ #
+ def readable(str)
+ str.
+ gsub(/_/, ' ').
+ sub(/^test (\d* )?/i, '')
+ end
+
+ def machine_readable(str)
+ str.sub(/^test_(\d+_)?/i, '')
+ end
+
+end
+
+##
+## Dependency resolution
+## Use a topographical sort to manage test dependencies
+##
+
+class TestDependencyGraph
+ include TSort
+
+ def initialize(test_classes)
+ @dependencies = {} # each key is a test class name, and the values
+ # are arrays of test class names that the key depends on.
+ test_classes.each do |test_class|
+ @dependencies[test_class.name] = test_class.dependencies
+ end
+ end
+
+ def tsort_each_node(&block)
+ @dependencies.each_key(&block)
+ end
+
+ def tsort_each_child(test_class_name, &block)
+ if @dependencies[test_class_name]
+ @dependencies[test_class_name].each(&block)
+ else
+ puts "ERROR: bad dependency, no such class `#{test_class_name}`"
+ bail :error
+ end
+ end
+
+ def sorted
+ self.tsort.collect {|class_name|
+ Kernel.const_get(class_name)
+ }
+ end
+end
+
+##
+## COMMAND LINE ACTIONS
+##
+
+def die(test, msg)
+ if $output_format == :human
+ puts "ERROR in test `#{test}`: #{msg}"
+ elsif $output_format == :checkmk
+ puts "3 #{test} - #{msg}"
+ end
+ bail :error
+end
+
+def print_help
+ puts ["USAGE: run_tests [OPTIONS]",
+ " --continue Don't halt on an error, but continue to the next test.",
+ " --checkmk Print test results in checkmk format (must come before --test).",
+ " --test TEST Run only the test with name TEST.",
+ " --list-tests Prints the names of all available tests and exit.",
+ " --retry COUNT If the tests don't pass, retry COUNT additional times (default is zero)",
+ " --wait SECONDS Wait for SECONDS between retries (default is 5)"].join("\n")
+ exit(0)
+end
+
+def list_tests
+ LeapTest.test_classes.each do |test_class|
+ test_class.tests.each do |test|
+ puts test_class.name + "/" + test.to_s.sub(/^test_(\d+_)?/, '')
+ end
+ end
+ exit(0)
+end
+
+def pin_test_name(name)
+ test_class, test_name = name.split('/')
+ $pinned_test_class = LeapTest.test_classes.detect{|c| c.name == test_class}
+ unless $pinned_test_class
+ die name, "there is no test class `#{test_class}`"
+ end
+ if test_name
+ $pinned_test_method = $pinned_test_class.tests.detect{|m| m.to_s =~ /^test_(\d+_)?#{Regexp.escape(test_name)}$/}
+ unless $pinned_test_method
+ die name, "there is no test `#{test_name}` in class `#{test_class}`"
+ end
+ end
+end
+
+#
+# run the tests, multiple times if `--retry` and not all tests were successful.
+#
+def run_tests
+ exit_code = nil
+ run_count = $retry ? $retry + 1 : 1
+ run_count.times do |i|
+ MiniTest::Unit.runner = LeapRunner.new
+ exit_code = MiniTest::Unit.new.run
+ if !$retry || exit_code == :success
+ break
+ elsif i != run_count-1
+ sleep $wait
+ end
+ end
+ bail exit_code
+end
+
+##
+## MAIN
+##
+
+def main
+ # load node data from hiera file
+ if File.exists?('/etc/leap/hiera.yaml')
+ $node = YAML.load_file('/etc/leap/hiera.yaml')
+ else
+ $node = {"services" => [], "dummy" => true}
+ end
+
+ # load all test classes
+ this_file = File.symlink?(__FILE__) ? File.readlink(__FILE__) : __FILE__
+ Dir[File.expand_path('../../tests/white-box/*.rb', this_file)].each do |test_file|
+ begin
+ require test_file
+ rescue SkipTest
+ end
+ end
+
+ # parse command line options
+ $halt_on_failure = true
+ $output_format = :human
+ $retry = false
+ $wait = 5
+ loop do
+ case ARGV[0]
+ when '--continue' then ARGV.shift; $halt_on_failure = false;
+ when '--checkmk' then ARGV.shift; $output_format = :checkmk; $halt_on_failure = false
+ when '--help' then print_help
+ when '--test' then ARGV.shift; pin_test_name(ARGV.shift)
+ when '--list-tests' then list_tests
+ when '--retry' then ARGV.shift; $retry = ARGV.shift.to_i
+ when '--wait' then ARGV.shift; $wait = ARGV.shift.to_i
+ else break
+ end
+ end
+ run_tests
+end
+
+main()
diff --git a/doc/development.md b/doc/development.md
new file mode 100644
index 00000000..7a761418
--- /dev/null
+++ b/doc/development.md
@@ -0,0 +1,272 @@
+@title = "Development Environment"
+@toc = true
+
+If you are wanting to make local changes to your provider, or want to contribute some fixes back to LEAP, we recommend that you follow this guide to build up a development environment to test your changes first. Using this method, you can quickly test your changes without deploying them to your production environment, while benefitting from the convenience of reverting to known good states in order to retry things from scratch.
+
+This page will walk you through setting up nodes using [Vagrant](http://www.vagrantup.com/) for convenient deployment testing, snapshotting known good states, and reverting to previous snapshots.
+
+Requirements
+============
+
+* Be a real machine with virtualization support in the CPU (VT-x or AMD-V). In other words, not a virtual machine.
+* Have at least 4gb of RAM.
+* Have a fast internet connection (because you will be downloading a lot of big files, like virtual machine images).
+
+Install prerequisites
+--------------------------------
+
+For development purposes, you will need everything that you need for deploying the LEAP platform:
+
+* LEAP cli
+* A provider instance
+
+You will also need to setup a virtualized Vagrant environment, to do so please make sure you have the following
+pre-requisites installed:
+
+*Debian & Ubuntu*
+
+Install core prerequisites:
+
+ sudo apt-get install git ruby ruby-dev rsync openssh-client openssl rake make
+
+Install Vagrant in order to be able to test with local virtual machines (typically optional, but required for this tutorial):
+
+ sudo apt-get install vagrant virtualbox
+
+<!--
+*Mac OS*
+
+1. Install rubygems from https://rubygems.org/pages/download (unless the `gem` command is already installed).
+2. Install Vagrant.dmg from http://downloads.vagrantup.com/
+-->
+
+
+Adding development nodes to your provider
+=========================================
+
+Now you will add local-only Vagrant development nodes to your provider.
+
+You do not need to setup a different provider instance for development, in fact it is more convenient if you do not, but you can if you wish. If you do not have a provider already, you will need to create one and configure it before continuing (it is recommended you go through the [Quick Start](quick-start) before continuing down this path).
+
+
+Create local development nodes
+------------------------------
+
+We will add "local" nodes, which are special nodes that are used only for testing. These nodes exist only as virtual machines on your computer, and cannot be accessed from the outside. Each "node" is a server that can have one or more services attached to it. We recommend that you create different nodes for different services to better isolate issues.
+
+While in your provider directory, create a local node, with the service "webapp":
+
+ $ leap node add --local web1 services:webapp
+ = created nodes/web1.json
+ = created files/nodes/web1/
+ = created files/nodes/web1/web1.key
+ = created files/nodes/web1/web1.crt
+
+This command creates a node configuration file in `nodes/web1.json` with the webapp service.
+
+Starting local development nodes
+--------------------------------
+
+In order to test the node "web1" we need to start it. Starting a node for the first time will spin up a virtual machine. The first time you do this will take some time because it will need to download a VM image (about 700mb). After you've downloaded the base image, you will not need to download it again, and instead you will re-use the downloaded image (until you need to update the image).
+
+NOTE: Many people have difficulties getting Vagrant working. If the following commands do not work, please visit the [Vagrant page](vagrant) to troubleshoot your Vagrant install before proceeding.
+
+ $ leap local start web
+ = created test/
+ = created test/Vagrantfile
+ = installing vagrant plugin 'sahara'
+ Bringing machine 'web1' up with 'virtualbox' provider...
+ [web1] Box 'leap-wheezy' was not found. Fetching box from specified URL for
+ the provider 'virtualbox'. Note that if the URL does not have
+ a box for this provider, you should interrupt Vagrant now and add
+ the box yourself. Otherwise Vagrant will attempt to download the
+ full box prior to discovering this error.
+ Downloading or copying the box...
+ Progress: 3% (Rate: 560k/s, Estimated time remaining: 0:13:36)
+ ...
+ Bringing machine 'web1' up with 'virtualbox' provider...
+ [web1] Importing base box 'leap-wheezy'...
+ 0%...10%...20%...30%...40%...50%...60%...70%...80%...90%...100%
+
+Now the virtual machine 'web1' is running. You can add another local node using the same process. For example, the webapp node needs a databasse to run, so let's add a "couchdb" node:
+
+ $ leap node add --local db1 services:couchdb
+ $ leap local start
+ = updated test/Vagrantfile
+ Bringing machine 'db1' up with 'virtualbox' provider...
+ [db1] Importing base box 'leap-wheezy'...
+ [db1] Matching MAC address for NAT networking...
+ [db1] Setting the name of the VM...
+ [db1] Clearing any previously set forwarded ports...
+ [db1] Fixed port collision for 22 => 2222. Now on port 2202.
+ [db1] Creating shared folders metadata...
+ [db1] Clearing any previously set network interfaces...
+ [db1] Preparing network interfaces based on configuration...
+ [db1] Forwarding ports...
+ [db1] -- 22 => 2202 (adapter 1)
+ [db1] Running any VM customizations...
+ [db1] Booting VM...
+ [db1] Waiting for VM to boot. This can take a few minutes.
+ [db1] VM booted and ready for use!
+ [db1] Configuring and enabling network interfaces...
+ [db1] Mounting shared folders...
+ [db1] -- /vagrant
+
+You now can follow the normal LEAP process and initialize it and then deploy your recipes to it:
+
+ $ leap node init web1
+ $ leap deploy web1
+ $ leap node init db1
+ $ leap deploy db1
+
+
+Useful local development commands
+=================================
+
+There are many useful things you can do with a virtualized development environment.
+
+Listing what machines are running
+---------------------------------
+
+Now you have the two virtual machines "web1" and "db1" running, you can see the running machines as follows:
+
+ $ leap local status
+ Current machine states:
+
+ db1 running (virtualbox)
+ web1 running (virtualbox)
+
+ This environment represents multiple VMs. The VMs are all listed
+ above with their current state. For more information about a specific
+ VM, run `vagrant status NAME`.
+
+Stopping machines
+-----------------
+
+It is not recommended that you leave your virtual machines running when you are not using them. They consume memory and other resources! To stop your machines, simply do the following:
+
+ $ leap local stop web1 db1
+
+Connecting to machines
+----------------------
+
+You can connect to your local nodes just like you do with normal LEAP nodes, by running 'leap ssh node'.
+
+However, if you cannot connect to your local node, because the networking is not setup properly, or you have deployed a firewall that locks you out, you may need to access the graphical console.
+
+In order to do that, you will need to configure Vagrant to launch a graphical console and then you can login as root there to diagnose the networking problem. To do this, add the following to you
+$HOME/.leaprc:
+
+ @custom_vagrant_vm_line = 'config.vm.boot_mode = :gui'
+
+and then start, or restart, your local Vagrant node. You should get a VirtualBox graphical interface presented to you showing you the bootup and eventually the login.
+
+Snapshotting machines
+---------------------
+
+A very useful feature of local Vagrant development nodes is the ability to snapshot the current state and then revert to that when you need.
+
+For example, perhaps the base image is a little bit out of date and you want to get the packages updated to the latest before continuing. You can do that simply by starting the node, connecting to it and updating the packages and then snapshotting the node:
+
+ $ leap local start web1
+ $ leap ssh web1
+ web1# apt-get -u dist-upgrade
+ web1# exit
+ $ leap local save web1
+
+Now you can deploy to web1 and if you decide you want to revert to the state before deployment, you simply have to reset the node to your previous save:
+
+ $ leap local reset web1
+
+More information
+----------------
+
+See `leap help local` for a complete list of local-only commands and how they can be used.
+
+
+Limitations
+===========
+
+Please consult the known issues for vagrant, see the [Known Issues](known-issues), section *Special Environments*
+
+
+Troubleshooting Vagrant
+=======================
+
+To troubleshoot vagrant issues, try going through these steps:
+
+* Try plain vagrant using the [Getting started guide](http://docs.vagrantup.com/v2/getting-started/index.html).
+* If that fails, make sure that you can run virtual machines (VMs) in plain virtualbox (Virtualbox GUI or VBoxHeadless).
+ We don't suggest a sepecial howto for that, [this one](http://www.thegeekstuff.com/2012/02/virtualbox-install-create-vm/) seems pretty decent, or you follow the [Oracale Virtualbox User Manual](http://www.virtualbox.org/manual/UserManual.html). There's also specific documentation for [Debian](https://wiki.debian.org/VirtualBox) and for [Ubuntu](https://help.ubuntu.com/community/VirtualBox). If you succeeded, try again if you now can start vagrant nodes using plain vagrant (see first step).
+* If plain vagrant works for you, you're very close to using vagrant with leap ! If you encounter any problems now, please [contact us](https://leap.se/en/about-us/contact) or use our [issue tracker](https://leap.se/code)
+
+Known working combinations
+--------------------------
+
+Please consider that using other combinations might work for you as well, these are just the combinations we tried and worked for us:
+
+
+Debian Wheezy
+-------------
+
+* `virtualbox-4.2 4.2.16-86992~Debian~wheezy` from Oracle and `vagrant 1.2.2` from vagrantup.com
+
+
+Ubuntu Raring 13.04
+-------------------
+
+* `virtualbox 4.2.10-dfsg-0ubuntu2.1` from Ubuntu raring and `vagrant 1.2.2` from vagrantup.com
+
+
+Using Vagrant with libvirt/kvm
+==============================
+
+Vagrant can be used with different providers/backends, one of them is [vagrant-libvirt](https://github.com/pradels/vagrant-libvirt). Here are the steps how to use it. Be sure to use a recent vagrant version (>= 1.3).
+
+Install vagrant-libvirt plugin and add box
+------------------------------------------
+ sudo apt-get install libvirt-bin libvirt-dev
+ vagrant plugin install vagrant-libvirt
+ vagrant plugin install sahara
+ vagrant box add leap-wheezy https://downloads.leap.se/leap-debian-libvirt.box
+
+
+Debugging
+---------
+
+If you get an error in any of the above commands, try to get some debugging information, it will often tell you what is wrong. In order to get debugging logs, you simply need to re-run the command that produced the error but prepend the command with VAGRANT_LOG=info, for example:
+ VAGRANT_LOG=info vagrant box add leap-wheezy https://downloads.leap.se/leap-debian-libvirt.box
+
+Start it
+--------
+
+Use this example Vagrantfile:
+
+ Vagrant.configure("2") do |config|
+ config.vm.define :testvm do |testvm|
+ testvm.vm.box = "leap-wheezy"
+ testvm.vm.network :private_network, :ip => '10.6.6.201'
+ end
+
+ config.vm.provider :libvirt do |libvirt|
+ libvirt.connect_via_ssh = false
+ end
+ end
+
+Then:
+
+ vagrant up --provider=libvirt
+
+If everything works, you should export libvirt as the VAGRANT_DEFAULT_PROVIDER:
+
+ export VAGRANT_DEFAULT_PROVIDER="libvirt"
+
+Now you should be able to use the `leap local` commands.
+
+Known Issues
+------------
+
+* 'Call to virConnectOpen failed: internal error: Unable to locate libvirtd daemon in /usr/sbin (to override, set $LIBVIRTD_PATH to the name of the libvirtd binary)' - you don't have the libvirtd daemon running or installed, be sure you installed the 'libvirt-bin' package and it is running
+* 'Call to virConnectOpen failed: Failed to connect socket to '/var/run/libvirt/libvirt-sock': Permission denied' - you need to be in the libvirt group to access the socket, do 'sudo adduser <user> libvirt' and then re-login to your session
+* see the [vagrant-libvirt issue list on github](https://github.com/pradels/vagrant-libvirt/issues)
+* be sure to use vagrant-libvirt >= 0.0.11 and sahara >= 0.0.16 (which are the latest stable gems you would get with `vagrant plugin install [vagrant-libvirt|sahara]`) for proper libvirt support
diff --git a/doc/en.md b/doc/en.md
index bdae4630..f1a1fc17 100644
--- a/doc/en.md
+++ b/doc/en.md
@@ -20,7 +20,7 @@ LEAP maintains a repository of platform recipes, which typically do not need to
As these recipes consist in abstract definitions, in order to configure settings for a particular service provider a system administrator has to create a provider instance (see below).
-LEAP's platform recipes are distributed as a git repository: `git://leap.se/leap_platform.git`
+LEAP's platform recipes are distributed as a git repository: `https://leap.se/git/leap_platform`
The provider instance
---------------------
@@ -64,7 +64,7 @@ One other significant difference between LEAP and typical system automation is h
These two approaches, masterless push and pre-compiled static configuration, allow the sysadmin to manage a set of LEAP servers using traditional software development techniques of branching and merging, to more easily create local testing environments using virtual servers, and to deploy without the added complexity and failure potential of a master server.
-The `leap` command line tool is distributed as a git repository: `git://leap.se/leap_cli`. It can be installed with `sudo gem install leap_cli`.
+The `leap` command line tool is distributed as a git repository: `https://leap.se/git/leap_cli`. It can be installed with `sudo gem install leap_cli`.
Getting started
----------------------------------
diff --git a/doc/faq.md b/doc/faq.md
new file mode 100644
index 00000000..2654ce80
--- /dev/null
+++ b/doc/faq.md
@@ -0,0 +1,53 @@
+@title = 'Frequently asked questions'
+@nav_title = 'FAQ'
+@toc = true
+
+Puppet
+======
+
+Where do i find the time a server was last deployed ?
+-----------------------------------------------------
+
+The puppet state file on the node indicates the last puppetrun:
+
+ ls -la /var/lib/puppet/state/state.yaml
+
+What resources are touched by puppet/leap_platform (services/packages/files etc.) ?
+-----------------------------------------------------------------------------------
+
+Log into your server and issue:
+
+ grep -v '!ruby/sym' /var/lib/puppet/state/state.yaml | sed 's/\"//' | sort
+
+
+How can i customize the leap_platform puppet manifests ?
+--------------------------------------------------------
+
+You can create a custom module `site_custom`. The class `site_custom::setup` will get
+included in the first part of the deploy process, and `site_custom` during the second part.
+Of cause you can also create a different git branch and change whatever you want, if you are
+familiar wit git.
+
+Facter
+======
+
+How can i see custom facts distributed by leap_platform on a node ?
+-------------------------------------------------------------------
+
+On the server, export the FACTERLIB env. variable to include the path of the custom fact in question:
+
+ export FACTERLIB=/var/lib/puppet/lib/facter:/srv/leap/puppet/modules/stdlib/lib/facter/
+ facter
+
+
+Etc
+===
+
+How do i change the domain of my provider ?
+-------------------------------------------
+
+* First of all, you need to have access to the nameserver config of your new domain.
+* Update domain in provider.json
+* remove all ca and cert files: `rm files/cert/* files/ca/*`
+* create ca, csr and certs : `leap cert ca; leap cert csr; leap cert dh; leap cert update`
+* deploy
diff --git a/doc/guide.md b/doc/guide.md
index dae392e5..52c3b2fa 100644
--- a/doc/guide.md
+++ b/doc/guide.md
@@ -15,16 +15,11 @@ When adding a new node to your provider, you should ask yourself four questions:
Brief overview of the services:
-![services diagram](service-diagram.png)
-
* **webapp**: The web application. Runs both webapp control panel for users and admins as well as the REST API that the client uses. Needs to communicate heavily with `couchdb` nodes. You need at least one, good to have two for redundancy. The webapp does not get a lot of traffic, so you will not need many.
* **couchdb**: The database for users and user data. You can get away with just one, but for proper redundancy you should have at least three. Communicates heavily with `webapp` and `mx` nodes.
* **soledad**: Handles the data syncing with clients. Typically combined with `couchdb` service, since it communicates heavily with couchdb. (not currently in stable release)
* **mx**: Incoming and outgoing MX servers. Communicates with the public internet, clients, and `couchdb` nodes. (not currently in stable release)
* **openvpn**: OpenVPN gateway for clients. You need at least one, but want as many as needed to support the bandwidth your users are doing. The `openvpn` nodes are autonomous and don't need to communicate with any other nodes. Often combined with `tor` service.
-
-Not pictured:
-
* **monitor**: Internal service to monitor all the other nodes. Currently, you can have zero or one `monitor` nodes.
* **tor**: Sets up a tor exit node, unconnected to any other service.
* **dns**: Not yet implemented.
@@ -157,30 +152,32 @@ Configuration options
The `ca` option in provider.json provides settings used when generating CAs and certificates. The defaults are as follows:
- "ca": {
- "name": "= global.provider.ca.organization + ' Root CA'",
- "organization": "= global.provider.name",
- "organizational_unit": "= 'https://' + global.provider.name",
- "bit_size": 4096,
- "digest": "SHA256",
- "life_span": "10y",
- "server_certificates": {
- "bit_size": 2024,
- "digest": "SHA256",
- "life_span": "1y"
- },
- "client_certificates": {
- "bit_size": 2024,
+ {
+ "ca": {
+ "name": "= global.provider.ca.organization + ' Root CA'",
+ "organization": "= global.provider.name[global.provider.default_language]",
+ "organizational_unit": "= 'https://' + global.provider.domain",
+ "bit_size": 4096,
"digest": "SHA256",
- "life_span": "2m",
- "limited_prefix": "LIMITED",
- "unlimited_prefix": "UNLIMITED"
+ "life_span": "10y",
+ "server_certificates": {
+ "bit_size": 2048,
+ "digest": "SHA256",
+ "life_span": "1y"
+ },
+ "client_certificates": {
+ "bit_size": 2048,
+ "digest": "SHA256",
+ "life_span": "2m",
+ "limited_prefix": "LIMITED",
+ "unlimited_prefix": "UNLIMITED"
+ }
}
}
-To see what values are used for your provider, run `leap inspect provider.json`. You can modify the defaults as you wish by adding the values to provider.json.
+You should not need to override these defaults in your own provider.json, but you can if you want to. To see what values are used for your provider, run `leap inspect provider.json`.
-NOTE: A certificate `bit_size` greater than 2024 will probably not be recognized by most commercial CAs.
+NOTE: A certificate `bit_size` greater than 2048 will probably not be recognized by most commercial CAs.
Certificate Authorities
-----------------------------------------
@@ -245,6 +242,18 @@ The private key file is extremely sensitive and care should be taken with its pr
If your commercial CA has a chained CA cert, you should be OK if you just put the **last** cert in the chain into the `commercial_ca.crt` file. This only works if the other CAs in the chain have certs in the debian package `ca-certificates`, which is the case for almost all CAs.
+If you want to add additional fields to the CSR, like country, city, or locality, you can configure these values in provider.json like so:
+
+ "ca": {
+ "server_certificates": {
+ "country": "US",
+ "state": "Washington",
+ "locality": "Seattle"
+ }
+ }
+
+If they are not present, the CSR will be created without them.
+
Facts
==============================
diff --git a/doc/known-issues.md b/doc/known-issues.md
index abd28084..960eaad7 100644
--- a/doc/known-issues.md
+++ b/doc/known-issues.md
@@ -34,15 +34,15 @@ User setup and ssh
. If the ssh host key changes, you need to run node init again (see: https://leap.se/en/docs/platform/guide#Working.with.SSH)
-. At the moment, only ECDSA ssh host keys are supported. If you get the following error: `= FAILED ssh-keyscan: no hostkey alg (must be missing an ecdsa public host key)` then you should confirm that you have the following line defined in your server's /etc/ssh/sshd_config:
-HostKey /etc/ssh/ssh_host_ecdsa_key and that file exists. If you made a change to your sshd_config, then you need to run `/etc/init.d/ssh restart` (see: https://leap.se/code/issues/2373)
+. At the moment, only ECDSA ssh host keys are supported. If you get the following error: `= FAILED ssh-keyscan: no hostkey alg (must be missing an ecdsa public host key)` then you should confirm that you have the following line defined in your server's **/etc/ssh/sshd_config**: `HostKey /etc/ssh/ssh_host_ecdsa_key`. If that file doesn't exist, run `ssh-keygen -t ecdsa -f /etc/ssh/ssh_host_ecdsa_key -N ""` in order to create it. If you made a change to your sshd_config, then you need to run `/etc/init.d/ssh restart` (see: https://leap.se/code/issues/2373)
-. To remove an admin's access to your servers, please remove the directory for that user under the `users/` subdirectory in your provider directory and then remove that user's ssh keys from files/ssh/authorized_keys. When finished you *must* run a `leap deploy` to update that information on the servers (see: https://leap.se/code/issues/1863)
+. To remove an admin's access to your servers, please remove the directory for that user under the `users/` subdirectory in your provider directory and then remove that user's ssh keys from files/ssh/authorized_keys. When finished you *must* run a `leap deploy` to update that information on the servers.
. At the moment, it is only possible to add an admin who will have access to all LEAP servers (see: https://leap.se/code/issues/2280)
. leap add-user --self allows only one key - if you run that command twice with different keys, you will just replace the key with the second key. To add a second key, add it manually to files/ssh/authorized_keys (see: https://leap.se/code/issues/866)
+
Deploying
---------
diff --git a/doc/quick-start.md b/doc/quick-start.md
index 5ba28f8d..0bce271a 100644
--- a/doc/quick-start.md
+++ b/doc/quick-start.md
@@ -1,116 +1,197 @@
@title = 'LEAP Platform Quick Start'
@nav_title = 'Quick Start'
-This tutorial walks you through the initial process of creating and deploying a service provider running the [LEAP platform](platform). First examples aim to build a provider in a virtual environment, and in the end running in real hardware is targeted.
+Quick Start
+===========
-First, a few definitions:
+This tutorial walks you through the initial process of creating and deploying a minimal service provider running the [LEAP platform](platform). This Quick Start guide will guide you through building a three node OpenVPN provider.
+If you are curious how this will look like without trying it out yourself, you can watch our [recorded screencasts](http://shelr.tv/users/524415e69660807910000021).
-* **node:** A server that is part of the service provider's infrastructure. All nodes are running the Debian GNU/Linux operating system.
-* **sysadmin:** This is you.
-* **sysadmin machine:** Your desktop or laptop computer that you use to control the nodes. This machine can be running any variant of Unix, Linux, or Mac OS (however, only Debian derivatives are supported at the moment).
+Our goal
+------------------
-All the commands in this tutorial are run on your sysadmin machine. In order to complete the tutorial, the sysadmin machine must:
+We are going to create a minimal LEAP provider offering OpenVPN service. This basic setup can be expanded by adding more OpenVPN nodes to increase capacity, or more webapp and couchdb nodes to increase availability (performance wise, a single couchdb and a single webapp are more than enough for most usage, since they are only lightly used, but you might want redundancy).
-* Be a real machine with virtualization support in the CPU (VT-x or AMD-V). In other words, not a virtual machine.
-* Have at least 4gb of RAM.
-* Have a fast internet connection (because you will be downloading a lot of big files, like virtual machine images).
+Our goal is something like this:
-Install prerequisites
+ $ leap list
+ NODES SERVICES TAGS
+ couch1 couchdb
+ web1 webapp
+ vpn1 openvpn
+
+NOTE: You won't be able to run that `leap list` command yet, not until we actually create the node configurations.
+
+Requirements
+------------
+
+In order to complete this Quick Start, you will need a few things:
+
+* You will need three real or paravirtualized virtual machines (KVM, Xen, Openstack, Amazon, but not Vagrant - sorry) that have a basic Debian Stable installed. If you allocate 10G to each node, that should be plenty.
+* You should be able to SSH into them remotely, and know their IP addresses and their SSH host keys
+* You will need four different IPs, one for each node, and a second one for the VPN gateway
+* The ability to create/modify DNS entries for your domain is preferable, but not needed. If you don't have access to DNS, you can workaround this by modifying your local resolver, i.e. editing `/etc/hosts`.
+* You need to be aware that this process will make changes to your systems, so please be sure that these machines are a basic install with nothing configured or running for other purposes
+* Your machines will need to be connected to the internet, and not behind a restrictive firewall.
+* You should work locally on your laptop/workstation (one that you trust and that is ideally full-disk encrypted) while going through this guide. This is important because the provider configurations you are creating contain sensitive data that should not reside on a remote machine. The leap cli utility will login to your servers and configure the services.
+
+All the commands in this tutorial are run on your sysadmin machine. In order to complete the tutorial, the sysadmin will do the following:
+
+* Install pre-requisites
+* Install the LEAP command-line utility
+* Check out the LEAP platform
+* Create a provider and its certificates
+* Setup the provider's nodes and the services that will reside on those nodes
+* Initialize the nodes
+* Deploy the LEAP platform to the nodes
+* Test that things worked correctly
+* Some additional commands
+
+We will walk you through each of these steps.
+
+
+Prepare your environment
+========================
+
+There are a few things you need to setup before you can get going. Just some packages, the LEAP cli and the platform.
+
+Install pre-requisites
--------------------------------
*Debian & Ubuntu*
Install core prerequisites:
- sudo apt-get install git ruby ruby-dev rsync openssh-client openssl rake make
-
-Install Vagrant in order to be able to test with local virtual machines (typically optional, but required for this tutorial):
-
- sudo apt-get install vagrant virtualbox
+ $ sudo apt-get install git ruby ruby-dev rsync openssh-client openssl rake make bzip2
<!--
*Mac OS*
1. Install rubygems from https://rubygems.org/pages/download (unless the `gem` command is already installed).
-2. Install Vagrant.dmg from http://downloads.vagrantup.com/
-->
-Install leap
+NOTE: leap_cli should work with ruby1.8, but has only been tested using ruby1.9.
+
+
+Install the LEAP command-line utility
---------------------
<!--Install the `leap` command as a gem:
- sudo gem install leap_cli
+ $ sudo gem install leap_cli
Alternately, you can install `leap` from source:
- git clone git://leap.se/leap_cli.git
- cd leap_cli
- rake build
+ $ git clone https://leap.se/git/leap_cli
+ $ cd leap_cli
+ $ rake build
-->
Install `leap` command from source:
- git clone git://leap.se/leap_cli.git
- cd leap_cli
- rake build
+ $ git clone https://leap.se/git/leap_cli
+ $ cd leap_cli
+ $ rake build
Then, install as root user (recommended):
- sudo rake install
+ $ sudo rake install
Or, install as unprivileged user:
- rake install
+ $ rake install
# watch out for the directory leap is installed to, then i.e.
- sudo ln -s ~/.gem/ruby/1.9.1/bin/leap /usr/local/bin/leap
+ $ sudo ln -s ~/.gem/ruby/1.9.1/bin/leap /usr/local/bin/leap
With both methods, you can use now /usr/local/bin/leap, which in most cases will be in your $PATH.
+If you have successfully installed the LEAP cli, then you should be able to do the following:
-Create a provider instance
----------------------------------------
+ $ leap --help
+
+and be presented with the command-line help options. If you receive an error when doing this, please read through the README.md in the LEAP cli source to try and resolve any problems before going forwards.
+
+
+Check out the platform
+----------------------
+
+The LEAP Platform is a series of puppet recipes and modules that will be used to configure your provider. You will need a local copy of the platform that will be used to setup your nodes and manage your services. To begin with, you will not need to modify the LEAP Platform.
+
+First we'll create a directory for LEAP things, and then we'll check out the platform code and initalize the modules:
+
+ $ mkdir ~/leap
+ $ cd ~/leap
+ $ git clone https://leap.se/git/leap_platform.git
+ $ cd leap_platform
+ $ git submodule sync; git submodule update --init
-A provider instance is a directory tree, usually stored in git, that contains everything you need to manage an infrastructure for a service provider. In this case, we create one for bitmask.net and call the instance directory 'bitmask'.
- mkdir -p ~/leap/bitmask
+Provider Setup
+==============
-Now, we will initialize this directory to make it a provider instance. Your provider instance will need to know where it can find local copy of the git repository leap_platform, which holds the puppet recipes you will need to manage your servers. Typically, you will not need to modify leap_platform.
+A provider instance is a directory tree, usually stored in git, that contains everything you need to manage an infrastructure for a service provider. In this case, we create one for example.org and call the instance directory 'example'.
- cd ~/leap/bitmask
- leap new .
+ $ mkdir -p ~/leap/example
+
+Bootstrap the provider
+-----------------------
+
+Now, we will initialize this directory to make it a provider instance. Your provider instance will need to know where it can find the local copy of the git repository leap_platform, which we setup in the previous step.
+
+ $ cd ~/leap/example
+ $ leap new .
+
+NOTES:
+ . make sure you include that trailing dot!
The `leap new` command will ask you for several required values:
-* domain: The primary domain name of your service provider. In this tutorial, we will be using "bitmask.net".
-* name: The name of your service provider.
+* domain: The primary domain name of your service provider. In this tutorial, we will be using "example.org".
+* name: The name of your service provider (we use "Example").
* contact emails: A comma separated list of email addresses that should be used for important service provider contacts (for things like postmaster aliases, Tor contact emails, etc).
-* platform: The directory where you have a copy of the `leap_platform` git repository checked out. If it doesn't exist, it will be downloaded for you.
+* platform: The directory where you have a copy of the `leap_platform` git repository checked out.
+
+You could also have passed these configuration options on the command-line, like so:
+
+ $ leap new --contacts your@email.here --domain leap.example.org --name Example --platform=~/leap/leap_platform .
You may want to poke around and see what is in the files we just created. For example:
- cat provider.json
+ $ cat provider.json
Optionally, commit your provider directory using the version control software you fancy. For example:
- git init
- git add .
- git commit -m "initial commit"
+ $ git init
+ $ git add .
+ $ git commit -m "initial provider commit"
Now add yourself as a privileged sysadmin who will have access to deploy to servers:
- leap add-user --self
+ $ leap add-user --self
-NOTE: in most cases, `leap` must be run from within a provider instance directory tree (e.g. ~/leap/bitmask).
+NOTE: in most cases, `leap` must be run from within a provider instance directory tree (e.g. ~/leap/example).
-Now generate required X509 certificates and keys:
+Create provider certificates
+----------------------------
- leap cert ca
- leap cert csr
+Create two certificate authorities, one for server certs and one for client
+certs (note: you only need to run this one command to get both):
+
+ $ leap cert ca
+
+Create a temporary cert for your main domain (you should replace with a real commercial cert at some point)
+
+ $ leap cert csr
To see details about the keys and certs that the prior two commands created, you can use `leap inspect` like so:
- leap inspect files/ca/ca.crt
+ $ leap inspect files/ca/ca.crt
+
+Create the Diffie-Hellman parameters file, needed for forward secret OpenVPN ciphers:
+
+ $ leap cert dh
+
+NOTE: the files `files/ca/*.key` are extremely sensitive and must be carefully protected. The other key files are much less sensitive and can simply be regenerated if needed.
Edit provider.json configuration
@@ -119,58 +200,99 @@ Edit provider.json configuration
There are a few required settings in provider.json. At a minimum, you must have:
{
- "domain": "bitmask.net",
- "name": "Bitmask",
+ "domain": "example.org",
+ "name": "Example",
"contacts": {
- "default": "email1@domain.org, email2@domain.org"
+ "default": "email1@example.org"
}
}
For a full list of possible settings, you can use `leap inspect` to see how provider.json is evaluated after including the inherited defaults:
- leap inspect provider.json
+ $ leap inspect provider.json
-Create nodes
----------------------
-A "node" is a server that is part of your infrastructure. Every node can have one or more services associated with it. Some nodes are "local" and used only for testing. These local nodes exist only as virtual machines on your computer and cannot be accessed from outside (see `leap help local` for more information).
+Setup the provider's nodes and services
+---------------------------------------
-Create a local node, with the service "webapp":
+A "node" is a server that is part of your infrastructure. Every node can have one or more services associated with it. Some nodes are "local" and used only for testing, see [Development](developmet) for more information.
- leap node add --local web1 services:webapp
+Create a node, with the service "webapp":
-This created a node configuration file in `nodes/web1.json`, but it did not create the virtual machine. In order to test our node "web1", we need to first spin up a virtual machine. The next command will probably take a very long time, because it will need to download a VM image (about 700mb).
+ $ leap node add web1 ip_address:x.x.x.w services:webapp tags:production
- leap local start
+NOTE: replace x.x.x.w with the actual IP address of this node
-Now that the virtual machine for web1 is running, you need to initialize it and then deploy the recipes to it. You only need to initialize a node once, but there is no harm in doing it multiple times. These commands will take a while to run the first time, as it needs to update the package cache on the new virtual machine.
+This created a node configuration file in `nodes/web1.json`, but it did not do anything else. It also added the 'tag' called 'production' to this node. Tags allow us to conveniently group nodes together. When creating nodes, you should give them the tag 'production' if the node is to be used in your production infrastructure.
- leap node init web1
- leap deploy web1
+The web application and the VPN nodes require a database, so lets create the database server node:
-That is it, you should now have your first running node. However, the LEAP web application requires a database to run, so let's add a "couchdb" node:
+ $ leap node add couch1 ip_address:x.x.x.x services:couchdb tags:production
- leap node add --local db1 services:couchdb
- leap local start
- leap node init db1
- leap deploy db1
+NOTE: replace x.x.x.x with the actual IP address of this node
-Access the web application
---------------------------------------------
+Now we need the VPN gateway, so lets create that node:
-You should now have two local virtual machines running, one for the web application and one for the database. In order to connect to the web application in your browser, you need to point your domain at the IP address of the web application node (named web1 in this example).
+ $ leap node add vpn1 ip_address:x.x.x.y openvpn.gateway_address:x.x.x.z services:openvpn tags:production
-There are a lot of different ways to do this, but one easy way is to modify your `/etc/hosts` file. First, find the IP address of the webapp node:
+NOTE: replace x.x.x.y with the IP address of the machine, and x.x.x.z with the second IP. openvpn gateways must be assigned two IP addresses, one for the host itself and one for the openvpn gateway. We do this to prevent incoming and outgoing VPN traffic on the same IP. Without this, the client might send some traffic to other VPN users in the clear, bypassing the VPN.
- leap list webapp --print ip_address
-Then modify `/etc/hosts` like so:
+Setup DNS
+---------
+
+Now that you have the nodes configured, you should create the DNS entries for these nodes.
+
+Set up your DNS with these hostnames:
+
+ $ leap list --print ip_address,domain.full,dns.aliases
+ couch1 x.x.x.w, couch1.example.org, null
+ web1 x.x.x.x, web1.example.org, api.example.org, nicknym.example.org
+ vpn1 x.x.x.y, vpn1.example.org, null
+
+Alternately, you can adapt this zone file snippet:
- 10.5.5.47 DOMAIN
+ $ leap compile zone
-Replacing 'DOMAIN' with whatever you specified as the `domain` in the `leap new` command.
+If you cannot edit your DNS zone file, you can still test your provider by adding entries to your local resolver hosts file (`/etc/hosts` for linux):
-Next, you can connect to the web application either using a web browser or via the API using the LEAP client. To use a browser, connect to https://DOMAIN. Your browser will complain about an untrusted cert, but for now just bypass this. From there, you should be able to register a new user and login.
+ x.x.x.w couch1.example.org
+ x.x.x.x web1.example.org api.example.org example.org
+ x.x.x.y vpn1.example.org
+
+Please don't forget about these entries, they will override DNS queries if you setup your DNS later.
+
+
+Initialize the nodes
+--------------------
+
+Node initialization only needs to be done once, but there is no harm in doing it multiple times:
+
+ $ leap node init production
+
+This will initialize all nodes with the tag "production". When `leap node init` is run, you will be prompted to verify the fingerprint of the SSH host key and to provide the root password of the server(s). You should only need to do this once.
+
+If you prefer, you can initalize each node, one at a time:
+
+ $ leap node init web1
+ $ leap node init couch1
+ $ leap node init vpn1
+
+Deploy the LEAP platform to the nodes
+--------------------
+
+Now you should deploy the platform recipes to the nodes. Deployment can take a while to run, especially on the first run, as it needs to update the packages on the new machine:
+
+ $ leap deploy web1
+
+Watch the output for any errors (in red), if everything worked fine, you should now have your first running node. If you do have errors, try doing the deploy again.
+
+However, to deploy our three-node openvpn setup, we need the database and LEAP web application requires a database to run, so let's deploy to the couchdb and openvpn nodes:
+
+ $ leap deploy couch1
+ $ leap deploy vpn1
+
+NOTE: the output from deploying can be quite busy, so we often do them each node one by one.
What is going on here?
--------------------------------------------
@@ -190,17 +312,55 @@ You can run `leap -v2 deploy` to see exactly what commands are being executed.
<!-- See [under the hood](under-the-hood) for more details. -->
-Additional commands
--------------------------------------------
+
+Test that things worked correctly
+=================================
+
+You should now have three machines with the LEAP platform deployed to them, one for the web application, one for the database and one for the OpenVPN gateway.
+
+
+Access the web application
+--------------------------------------------
+
+In order to connect to the web application in your browser, you need to point your domain at the IP address of the web application node (named web1 in this example).
+
+There are a lot of different ways to do this, but one easy way is to modify your `/etc/hosts` file. First, find the IP address of the webapp node:
+
+ $ leap list webapp --print ip_address
+
+Then modify `/etc/hosts` like so:
+
+ x.x.x.w leap.example.org
+
+Replacing 'leap.example.org' with whatever you specified as the `domain` in the `leap new` command.
+
+Next, you can connect to the web application either using a web browser or via the API using the LEAP client. To use a browser, connect to https://leap.example.org (replacing that with your domain). Your browser will complain about an untrusted cert, but for now just bypass this. From there, you should be able to register a new user and login.
+
+Use the VPN
+-----------
+
+You should be able to simply test that the OpenVPN gateway works properly by doing the following:
+
+ $ leap test init
+ $ sudo openvpn test/openvpn/unlimited.ovpn
+
+Or, you can use the LEAP client (called "bitmask") to connect to your new provider, create a user and then connect to the VPN.
+
+
+Additional information
+======================
+
+It is useful to know a few additional things.
+
+Useful commands
+---------------
Here are a few useful commands you can run on your new local nodes:
* `leap ssh web1` -- SSH into node web1 (requires `leap node init web1` first).
* `leap list` -- list all nodes.
+* `leap list production` -- list only those nodes with the tag 'production'
* `leap list --print ip_address` -- list a particular attribute of all nodes.
-* `leap local reset web1` -- return web1 to a pristine state.
-* `leap local stop` -- stop all local virtual machines.
-* `leap local status` -- get the running state of all the local virtual machines.
* `leap cert update` -- generate new certificates if needed.
See the full command reference for more information.
@@ -223,20 +383,12 @@ Examples:
* `leap deploy webapp openvpn` -- deploy to all webapp OR openvpn nodes.
* `leap node init vpn1` -- just init the node named vpn1.
-Running on real hardware
------------------------------------
-
-The steps required to initialize and deploy to nodes on the public internet are basically the same as we have seen so far for local testing nodes. There are a few key differences:
-
-* Obviously, you will need to acquire a real or virtual machine that you can SSH into remotely.
-* When creating the node configuration, you should give it the tag "production" if the node is to be used in your production infrastructure.
-* When creating the node configuration, you need to specify the IP address of the node.
-
-For example:
+Keep track of your provider configurations
+------------------------------------------
- leap node add db1 tags:production services:couchdb ip_address:4.4.4.4
+You should commit your provider changes to your favorite VCS whenever things change. This way you can share your configurations with other admins, all they have to do is to pull the changes to stay up to date. Every time you make a change to your provider, such as adding nodes, services, generating certificates, etc. you should add those to your VCS, commit them and push them to where your repository is hosted.
-Also, running `leap node init NODE_NAME` on a real server will prompt you to verify the fingerprint of the SSH host key and to provide the root password of the server NODE_NAME. You should only need to do this once.
+Note that your provider directory contains secrets! Those secrets include passwords for various services. You do not want to have those passwords readable by the world, so make sure that wherever you are hosting your repository, it is not public for the world to read.
What's next
-----------------------------------
diff --git a/doc/troubleshooting.md b/doc/troubleshooting.md
new file mode 100644
index 00000000..bb2fc4b5
--- /dev/null
+++ b/doc/troubleshooting.md
@@ -0,0 +1,147 @@
+@title = 'Troubleshooting Guide'
+@nav_title = 'Troubleshooting'
+@toc = true
+
+
+General
+=======
+
+* Please increase verbosity when debugging / filing issues in our issue tracker. You can do this with adding i.e. `-v 5` after the `leap` cmd, i.e. `leap -v 2 deploy`.
+
+Webapp node
+===========
+
+Places to look for errors
+-------------------------
+
+* `/var/log/apache2/error.log`
+* `/srv/leap/webapp/log/production.log`
+* `/var/log/syslog` (watch out for stunnel issues)
+
+Is haproxy ok ?
+---------------
+
+
+ curl -s -X GET "http://127.0.0.1:4096"
+
+Is couchdb accessible through stunnel ?
+---------------------------------------
+
+
+ curl -s -X GET "http://127.0.0.1:4000"
+
+
+Check couchdb acl
+-----------------
+
+
+ mkdir /etc/couchdb
+ cat /srv/leap/webapp/config/couchdb.yml.admin # see username and password
+ echo "machine 127.0.0.1 login admin password <PASSWORD>" > /etc/couchdb/couchdb-admin.netrc
+ chmod 600 /etc/couchdb/couchdb-admin.netrc
+
+ curl -s --netrc-file /etc/couchdb/couchdb-admin.netrc -X GET "http://127.0.0.1:4096"
+ curl -s --netrc-file /etc/couchdb/couchdb-admin.netrc -X GET "http://127.0.0.1:4096/_all_dbs"
+
+
+Couchdb node
+============
+
+Places to look for errors
+-------------------------
+
+* `/opt/bigcouch/var/log/bigcouch.log`
+* `/var/log/syslog` (watch out for stunnel issues)
+
+
+Bigcouch membership
+-------------------
+
+* All nodes configured for the provider should appear here:
+
+
+ curl -s --netrc-file /etc/couchdb/couchdb.netrc -X GET 'http://127.0.0.1:5986/nodes/_all_docs'
+
+* All configured nodes should show up under "cluster_nodes", and the ones online and communicating with each other should appear under "all_nodes". This example output shows the configured cluster nodes `couch1.bitmask.net` and `couch2.bitmask.net`, but `couch2.bitmask.net` is currently not accessible from `couch1.bitmask.net`
+
+
+ curl -s --netrc-file /etc/couchdb/couchdb.netrc 'http://127.0.0.1:5984/_membership'
+ {"all_nodes":["bigcouch@couch1.bitmask.net"],"cluster_nodes":["bigcouch@couch1.bitmask.net","bigcouch@couch2.bitmask.net"]}
+
+
+
+Databases
+---------
+
+* Following output shows all neccessary DBs that should be present. Note that the `user-0123456....` DBs are the data stores for a particular user.
+
+
+ curl -s --netrc-file /etc/couchdb/couchdb.netrc -X GET 'http://127.0.0.1:5984/_all_dbs'
+ ["customers","identities","sessions","shared","tickets","tokens","user-0","user-9d34680b01074c75c2ec58c7321f540c","user-9d34680b01074c75c2ec58c7325fb7ff","users"]
+
+
+
+Design Documents
+----------------
+
+* Is User `_design doc` available ?
+
+
+ curl -s --netrc-file /etc/couchdb/couchdb.netrc -X GET "http://127.0.0.1:5984/users/_design/User"
+
+
+
+MX node
+=======
+
+Places to look for errors
+-------------------------
+
+* `/var/log/mail.log`
+* `/var/log/leap_mx.log`
+* `/var/log/syslog` (watch out for stunnel issues)
+
+
+Query leap-mx
+-------------
+
+* for useraccount
+
+
+ postmap -v -q "joe@dev.bitmask.net" tcp:localhost:2244
+ ...
+ postmap: dict_tcp_lookup: send: get jow@dev.bitmask.net
+ postmap: dict_tcp_lookup: recv: 200
+ ...
+
+* for mailalias
+
+
+ postmap -v -q "joe@dev.bitmask.net" tcp:localhost:4242
+ ...
+ postmap: dict_tcp_lookup: send: get joe@dev.bitmask.net
+ postmap: dict_tcp_lookup: recv: 200 f01bc1c70de7d7d80bc1ad77d987e73a
+ postmap: dict_tcp_lookup: found: f01bc1c70de7d7d80bc1ad77d987e73a
+ f01bc1c70de7d7d80bc1ad77d987e73a
+ ...
+
+
+
+Mailspool
+---------
+
+* Any file in the mailspool longer for a few seconds ?
+
+
+ ls -la /var/mail/vmail/Maildir/cur/
+
+
+VPN node
+========
+
+Places to look for errors
+-------------------------
+
+* `/var/log/syslog` (watch out for openvpn issues)
+
+
diff --git a/leap-debug-remote.sh b/leap-debug-remote.sh
new file mode 100644
index 00000000..7f9c6945
--- /dev/null
+++ b/leap-debug-remote.sh
@@ -0,0 +1,23 @@
+#!/bin/sh
+# debug script to be run on remote servers
+
+regexp='(leap|stunnel|couch|soledad|haproxy)'
+
+find /etc/leap/
+
+echo
+
+ls -la /srv/leap/
+
+echo
+
+
+dpkg -l | egrep "$regexp"
+
+echo
+
+ps aux|egrep "$regexp"
+
+echo
+
+cat /etc/hosts
diff --git a/platform.rb b/platform.rb
index 9f63b4ca..689c58b7 100644
--- a/platform.rb
+++ b/platform.rb
@@ -1,15 +1,16 @@
+# encoding: utf-8
#
# These are variables defined by this leap_platform and used by leap_cli.
#
Leap::Platform.define do
- self.version = "1.1.2"
- self.compatible_cli = "1.1.2".."1.99"
+ self.version = "0.4.0"
+ self.compatible_cli = "1.5.0".."1.99"
#
# the facter facts that should be gathered
#
- self.facts = ["ec2_local_ipv4"]
+ self.facts = ["ec2_local_ipv4", "ec2_public_ipv4"]
#
# the named paths for this platform
@@ -26,6 +27,7 @@ Leap::Platform.define do
# input config files
:common_config => 'common.json',
:provider_config => 'provider.json',
+ :provider_env_config => 'provider.#{arg}.json',
:secrets_config => 'secrets.json',
:node_config => 'nodes/#{arg}.json',
:service_config => 'services/#{arg}.json',
@@ -43,6 +45,8 @@ Leap::Platform.define do
:user_pgp => 'users/#{arg}/#{arg}_pgp.pub',
:known_hosts => 'files/ssh/known_hosts',
:authorized_keys => 'files/ssh/authorized_keys',
+ :monitor_pub_key => 'files/ssh/monitor_ssh.pub',
+ :monitor_priv_key => 'files/ssh/monitor_ssh',
:ca_key => 'files/ca/ca.key',
:ca_cert => 'files/ca/ca.crt',
:client_ca_key => 'files/ca/client_ca.key',
@@ -73,5 +77,9 @@ Leap::Platform.define do
self.node_files = [
:node_config, :hiera, :node_x509_cert, :node_x509_key, :node_ssh_pub_key
]
+
+ self.monitor_username = 'monitor'
+
+ self.reserved_usernames = ['monitor']
end
diff --git a/provider_base/common.json b/provider_base/common.json
index 2313bd8b..a4d9c5f2 100644
--- a/provider_base/common.json
+++ b/provider_base/common.json
@@ -3,9 +3,10 @@
"environment": null,
"services": [],
"tags": [],
+ "contacts": "= provider.contacts.default",
"domain": {
- "full_suffix": "= global.provider.domain",
- "internal_suffix": "= global.provider.domain_internal",
+ "full_suffix": "= provider.domain",
+ "internal_suffix": "= provider.domain_internal",
"full": "= node.name + '.' + domain.full_suffix",
"internal": "= node.name + '.' + domain.internal_suffix",
"name": "= node.name + '.' + (dns.public ? domain.full_suffix : domain.internal_suffix)"
@@ -15,7 +16,6 @@
},
"ssh": {
"authorized_keys": "= authorized_keys",
- "known_hosts": "=> known_hosts_file",
"port": 22,
"mosh": {
"ports": "60000:61000",
@@ -24,7 +24,7 @@
},
"hosts": "=> hosts_file",
"x509": {
- "use": false,
+ "use": true,
"cert": "= x509.use ? file(:node_x509_cert, :missing => 'x509 certificate for node $node. Run `leap cert update`') : nil",
"key": "= x509.use ? file(:node_x509_key, :missing => 'x509 key for node $node. Run `leap cert update`') : nil",
"ca_cert": "= try_file :ca_cert"
@@ -35,5 +35,8 @@
},
"name": "common",
"location": null,
- "enabled": true
+ "enabled": true,
+ "mail": {
+ "smarthost": "= nodes_like_me[:services => :mx].exclude(self).field('domain.full')"
+ }
}
diff --git a/provider_base/files/service-definitions/provider.json.erb b/provider_base/files/service-definitions/provider.json.erb
index 5d4c63a0..3e055e9a 100644
--- a/provider_base/files/service-definitions/provider.json.erb
+++ b/provider_base/files/service-definitions/provider.json.erb
@@ -1,13 +1,13 @@
<%=
# grab some fields from provider.json
- hsh = global.provider.pick(
- :languages, :description, :name,
+ hsh = provider.pick(
+ :languages, :description, :name, :services,
:enrollment_policy, :default_language, :service
)
hsh['domain'] = domain.full_suffix
# advertise services that are 'user services' and for which there are actually nodes
- hsh['services'] = global.services[:service_type => :user_service].field(:name).select do |service|
+ hsh['services'] ||= global.services[:service_type => :user_service].field(:name).select do |service|
nodes_like_me[:services => service].any?
end
diff --git a/provider_base/files/service-definitions/v1/eip-service.json.erb b/provider_base/files/service-definitions/v1/eip-service.json.erb
index feaea25b..3b8976fd 100644
--- a/provider_base/files/service-definitions/v1/eip-service.json.erb
+++ b/provider_base/files/service-definitions/v1/eip-service.json.erb
@@ -27,6 +27,7 @@
hsh["version"] = 1
locations = {}
gateways = []
+ configuration = nil
nodes_like_me[:services => 'openvpn'].each_node do |node|
if node.openvpn.allow_limited && node.openvpn.allow_unlimited
gateways << add_gateway(node, locations, :ip => node.openvpn.gateway_address, :limited => false)
@@ -36,13 +37,13 @@
elsif node.openvpn.allow_limited
gateways << add_gateway(node, locations, :ip => node.openvpn.gateway_address, :limited => true)
end
+ if configuration && node.openvpn.configuration != configuration
+ log :error, "OpenVPN nodes in the environment `#{node.environment}` have conflicting `openvpn.configuration` values. This will result in bad errors."
+ end
+ configuration = node.openvpn.configuration
end
hsh["gateways"] = gateways.compact
hsh["locations"] = locations
- hsh["openvpn_configuration"] = {
- "tls-cipher" => "DHE-RSA-AES128-SHA",
- "auth" => "SHA1",
- "cipher" => "AES-128-CBC"
- }
+ hsh["openvpn_configuration"] = configuration
JSON.sorted_generate hsh
%> \ No newline at end of file
diff --git a/provider_base/files/service-definitions/v1/smtp-service.json.erb b/provider_base/files/service-definitions/v1/smtp-service.json.erb
index 60129f5f..45f240ac 100644
--- a/provider_base/files/service-definitions/v1/smtp-service.json.erb
+++ b/provider_base/files/service-definitions/v1/smtp-service.json.erb
@@ -15,7 +15,7 @@
host = {}
host["hostname"] = node.domain.full
host["ip_address"] = node.ip_address
- host["port"] = 25 # hard coded for now, later node.smtp.port
+ host["port"] = 465 # hard coded for now, later node.smtp.port
if node['location']
location_name = underscore(node.location.name)
host["location"] = location_name
@@ -26,4 +26,4 @@
hsh["hosts"] = hosts
hsh["locations"] = locations
JSON.sorted_generate hsh
-%> \ No newline at end of file
+%>
diff --git a/provider_base/provider.json b/provider_base/provider.json
index b6a7af21..fa69318b 100644
--- a/provider_base/provider.json
+++ b/provider_base/provider.json
@@ -8,8 +8,8 @@
"en": "REQUIRED"
},
"contacts": {
- "default": "REQUIRED",
- "english": "= contacts.default.split('@').join(' at the domain ')"
+ "default": ["REQUIRED"],
+ "english": "= contacts.default.map {|email| email.split('@').join(' at the domain ')}.join(', ')"
},
"languages": ["en"],
"default_language": "en",
@@ -23,32 +23,36 @@
],
"default_service_level": 1,
"bandwidth_limit": 102400,
- "allow_free": "= global.provider.service.levels.select {|l| l['rate'].nil?}.any?",
- "allow_paid": "= global.provider.service.levels.select {|l| !l['rate'].nil?}.any?",
- "allow_anonymous": "= global.provider.service.levels.select {|l| l['name'] == 'anonymous'}.any?",
- "allow_registration": "= global.provider.service.levels.select {|l| l['name'] != 'anonymous'}.any?",
- "allow_limited_bandwidth": "= global.provider.service.levels.select {|l| l['bandwidth'] == 'limited'}.any?",
- "allow_unlimited_bandwidth": "= global.provider.service.levels.select {|l| l['bandwidth'].nil?}.any?"
+ "allow_free": "= provider.service.levels.select {|l| l['rate'].nil?}.any?",
+ "allow_paid": "= provider.service.levels.select {|l| !l['rate'].nil?}.any?",
+ "allow_anonymous": "= provider.service.levels.select {|l| l['name'] == 'anonymous'}.any?",
+ "allow_registration": "= provider.service.levels.select {|l| l['name'] != 'anonymous'}.any?",
+ "allow_limited_bandwidth": "= provider.service.levels.select {|l| l['bandwidth'] == 'limited'}.any?",
+ "allow_unlimited_bandwidth": "= provider.service.levels.select {|l| l['bandwidth'].nil?}.any?"
},
"ca": {
- "name": "= global.provider.ca.organization + ' Root CA'",
- "organization": "= global.provider.name[global.provider.default_language]",
- "organizational_unit": "= 'https://' + global.provider.domain",
+ "name": "= provider.ca.organization + ' Root CA'",
+ "organization": "= provider.name[provider.default_language]",
+ "organizational_unit": "= 'https://' + provider.domain",
"bit_size": 4096,
"digest": "SHA256",
"life_span": "10y",
"server_certificates": {
- "bit_size": 2024,
+ "bit_size": 2048,
"digest": "SHA256",
"life_span": "1y"
},
"client_certificates": {
- "bit_size": 2024,
+ "bit_size": 2048,
"digest": "SHA256",
"life_span": "2m",
"limited_prefix": "LIMITED",
"unlimited_prefix": "UNLIMITED"
}
},
- "hiera_sync_destination": "/etc/leap"
+ "hiera_sync_destination": "/etc/leap",
+ "client_version": {
+ "min": "0.5",
+ "max": null
+ }
}
diff --git a/provider_base/services/couchdb.json b/provider_base/services/couchdb.json
index a26579c8..5f1b5381 100644
--- a/provider_base/services/couchdb.json
+++ b/provider_base/services/couchdb.json
@@ -1,38 +1,56 @@
{
- "x509": {
- "use": true
- },
- "stunnel": {
- "couch_server": "= stunnel_server(couch.port)",
- "epmd_server": "= stunnel_server(couch.bigcouch.epmd_port)",
- "epmd_clients": "= stunnel_client(nodes_like_me[:services => :couchdb], global.services[:couchdb].couch.bigcouch.epmd_port)",
- "ednp_server": "= stunnel_server(couch.bigcouch.ednp_port)",
- "ednp_clients": "= stunnel_client(nodes_like_me[:services => :couchdb], global.services[:couchdb].couch.bigcouch.ednp_port)"
- },
- "couch": {
- "port": 5984,
- "bigcouch": {
- "epmd_port": 4369,
- "ednp_port": 9002,
- "cookie": "= secret :bigcouch_cookie",
- "neighbors": "= nodes_like_me[:services => :couchdb].exclude(self).field('domain.full')"
+ "x509": {
+ "use": true
},
- "users": {
- "admin": {
- "username": "admin",
- "password": "= secret :couch_admin_password",
- "salt": "= hex_secret :couch_admin_password_salt, 128"
- },
- "webapp": {
- "username": "webapp",
- "password": "= secret :couch_webapp_password",
- "salt": "= hex_secret :couch_webapp_password_salt, 128"
- },
- "soledad": {
- "username": "soledad",
- "password": "= secret :couch_soledad_password",
- "salt": "= hex_secret :couch_soledad_password_salt, 128"
- }
+ "stunnel": {
+ "couch_server": "= stunnel_server(couch.port)",
+ "epmd_server": "= stunnel_server(couch.bigcouch.epmd_port)",
+ "epmd_clients": "= stunnel_client(nodes_like_me[:services => :couchdb], global.services[:couchdb].couch.bigcouch.epmd_port)",
+ "ednp_server": "= stunnel_server(couch.bigcouch.ednp_port)",
+ "ednp_clients": "= stunnel_client(nodes_like_me[:services => :couchdb], global.services[:couchdb].couch.bigcouch.ednp_port)"
+ },
+ "couch": {
+ "port": 5984,
+ "bigcouch": {
+ "epmd_port": 4369,
+ "ednp_port": 9002,
+ "cookie": "= secret :bigcouch_cookie",
+ "neighbors": "= nodes_like_me[:services => :couchdb].exclude(self).field('domain.full')"
+ },
+ "users": {
+ "admin": {
+ "username": "admin",
+ "password": "= secret :couch_admin_password",
+ "salt": "= hex_secret :couch_admin_password_salt, 128"
+ },
+ "leap_mx": {
+ "username": "leap_mx",
+ "password": "= secret :couch_leap_mx_password",
+ "salt": "= hex_secret :couch_leap_mx_password_salt, 128"
+ },
+ "nickserver": {
+ "username": "nickserver",
+ "password": "= secret :couch_nickserver_password",
+ "salt": "= hex_secret :couch_nickserver_password_salt, 128"
+ },
+ "soledad": {
+ "username": "soledad",
+ "password": "= secret :couch_soledad_password",
+ "salt": "= hex_secret :couch_soledad_password_salt, 128"
+ },
+ "tapicero": {
+ "username": "tapicero",
+ "password": "= secret :couch_tapicero_password",
+ "salt": "= hex_secret :couch_tapicero_password_salt, 128"
+ },
+ "webapp": {
+ "username": "webapp",
+ "password": "= secret :couch_webapp_password",
+ "salt": "= hex_secret :couch_webapp_password_salt, 128"
+ }
+ },
+ "webapp": {
+ "nagios_test_pw": "= secret :nagios_test_password"
+ }
}
- }
}
diff --git a/provider_base/services/monitor.json b/provider_base/services/monitor.json
index f5e4d922..03f6c6d1 100644
--- a/provider_base/services/monitor.json
+++ b/provider_base/services/monitor.json
@@ -1,6 +1,22 @@
{
"nagios": {
"nagiosadmin_pw": "= secret :nagios_admin_password",
- "hosts": "= nodes_like_me.fields('domain.internal', 'ip_address', 'services', 'openvpn.gateway_address')"
+ "hosts": "= (self.environment == 'local' ? nodes_like_me : nodes[:environment => '!local']).pick_fields('domain.internal', 'domain.full_suffix', 'ip_address', 'services', 'openvpn.gateway_address', 'ssh.port')"
+ },
+ "hosts": "= self.environment == 'local' ? hosts_file(nodes_like_me) : hosts_file(nodes[:environment => '!local'])",
+ "ssh": {
+ "monitor": {
+ "username": "= Leap::Platform.monitor_username",
+ "private_key": "= file(:monitor_priv_key)"
+ }
+ },
+ "x509": {
+ "use": true,
+ "ca_cert": "= file :ca_cert, :missing => 'provider CA. Run `leap cert ca`'",
+ "client_ca_cert": "= file :client_ca_cert, :missing => 'Certificate Authority. Run `leap cert ca`'",
+ "client_ca_key": "= file :client_ca_key, :missing => 'Certificate Authority. Run `leap cert ca`'",
+ "commercial_cert": "= file [:commercial_cert, domain.full_suffix]",
+ "commercial_key": "= file [:commercial_key, domain.full_suffix]",
+ "commercial_ca_cert": "= try_file :commercial_ca_cert"
}
}
diff --git a/provider_base/services/mx.json b/provider_base/services/mx.json
new file mode 100644
index 00000000..731dee9a
--- /dev/null
+++ b/provider_base/services/mx.json
@@ -0,0 +1,24 @@
+{
+ "stunnel": {
+ "couch_client": "= stunnel_client(nodes_like_me[:services => :couchdb], global.services[:couchdb].couch.port)"
+ },
+ "haproxy": {
+ "servers": "= haproxy_servers(nodes_like_me[:services => :couchdb], stunnel.couch_client)"
+ },
+ "couchdb_leap_mx_user": {
+ "username": "= global.services[:couchdb].couch.users[:leap_mx].username",
+ "password": "= secret :couch_leap_mx_password",
+ "salt": "= hex_secret :couch_leap_mx_password_salt, 128"
+ },
+ "mynetworks": "= nodes['environment' => '!local'].map{|name, n| [n.ip_address, (global.facts[name]||{})['ec2_public_ipv4']]}.flatten.compact.uniq",
+ "x509": {
+ "use": true,
+ "ca_cert": "= file :ca_cert, :missing => 'provider CA. Run `leap cert ca`'",
+ "client_ca_cert": "= file :client_ca_cert, :missing => 'Certificate Authority. Run `leap cert ca`'",
+ "client_ca_key": "= file :client_ca_key, :missing => 'Certificate Authority. Run `leap cert ca`'",
+ "commercial_cert": "= file [:commercial_cert, domain.full_suffix]",
+ "commercial_key": "= file [:commercial_key, domain.full_suffix]",
+ "commercial_ca_cert": "= try_file :commercial_ca_cert"
+ },
+ "service_type": "user_service"
+}
diff --git a/provider_base/services/openvpn.json b/provider_base/services/openvpn.json
index 5d77f946..04e19aa2 100644
--- a/provider_base/services/openvpn.json
+++ b/provider_base/services/openvpn.json
@@ -14,10 +14,16 @@
"filter_dns": false,
"adblock": false,
"user_ips": false,
- "allow_limited": "= global.provider.service.allow_limited_bandwidth",
- "allow_unlimited": "= global.provider.service.allow_unlimited_bandwidth",
- "limited_prefix": "= global.provider.ca.client_certificates.limited_prefix",
- "unlimited_prefix": "= global.provider.ca.client_certificates.unlimited_prefix",
- "rate_limit": "= openvpn.allow_limited ? global.provider.service.bandwidth_limit : nil"
+ "allow_limited": "= provider.service.allow_limited_bandwidth",
+ "allow_unlimited": "= provider.service.allow_unlimited_bandwidth",
+ "limited_prefix": "= provider.ca.client_certificates.limited_prefix",
+ "unlimited_prefix": "= provider.ca.client_certificates.unlimited_prefix",
+ "rate_limit": "= openvpn.allow_limited ? provider.service.bandwidth_limit : nil",
+ "configuration": {
+ "tls-cipher": "DHE-RSA-AES128-SHA",
+ "auth": "SHA1",
+ "cipher": "AES-128-CBC",
+ "keepalive": "10 30"
+ }
}
}
diff --git a/provider_base/services/soledad.json b/provider_base/services/soledad.json
index 10657563..ed6fbc9f 100644
--- a/provider_base/services/soledad.json
+++ b/provider_base/services/soledad.json
@@ -1,6 +1,12 @@
{
- "service_type": "public_service",
"soledad": {
- "port": 1111
- }
-} \ No newline at end of file
+ "port": 2323,
+ "require_couchdb": "=> assert %(services.include? 'couchdb')",
+ "couchdb_soledad_user": {
+ "username": "= global.services[:couchdb].couch.users[:soledad].username",
+ "password": "= secret :couch_soledad_password",
+ "salt": "= hex_secret :couch_soledad_password_salt, 128"
+ }
+ },
+ "service_type": "public_service"
+}
diff --git a/provider_base/services/static.json b/provider_base/services/static.json
new file mode 100644
index 00000000..d9155a84
--- /dev/null
+++ b/provider_base/services/static.json
@@ -0,0 +1,6 @@
+{
+ "static": {
+ "formats": "=> (self.static.domains||{}).values.collect{|d| (d.locations||{}).values.collect{|l|l['format']}}.flatten.uniq"
+ },
+ "service_type": "public_service"
+} \ No newline at end of file
diff --git a/provider_base/services/tor.json b/provider_base/services/tor.json
index 9173b8d4..ae4da46d 100644
--- a/provider_base/services/tor.json
+++ b/provider_base/services/tor.json
@@ -1,6 +1,6 @@
{
"tor": {
"bandwidth_rate": 6550,
- "contacts": "= global.provider.contacts['tor'] || global.provider.contacts.default"
+ "contacts": "= [provider.contacts['tor'] || provider.contacts.default].flatten"
}
}
diff --git a/provider_base/services/webapp.json b/provider_base/services/webapp.json
index 93396ec7..29c0cbf9 100644
--- a/provider_base/services/webapp.json
+++ b/provider_base/services/webapp.json
@@ -1,25 +1,35 @@
{
"webapp": {
+ "admins": [],
"modules": ["user", "billing", "help"],
- "couchdb_admin_user": "= global.services[:couchdb].couch.users[:admin]",
-// "couchdb_webapp_user": "= global.services[:couchdb].couch.users[:webapp]",
- "couchdb_webapp_user": "= global.services[:couchdb].couch.users[:admin]",
- "favicon": "= file_path 'branding/favicon.ico'",
- "tail_scss": "= file_path 'branding/tail.scss'",
- "head_scss": "= file_path 'branding/head.scss'",
- "img_dir": "= file_path 'branding/img'",
- "client_certificates": "= global.provider.ca.client_certificates",
- "allow_limited_certs": "= global.provider.service.allow_limited_bandwidth",
- "allow_unlimited_certs": "= global.provider.service.allow_unlimited_bandwidth",
- "allow_anonymous_certs": "= global.provider.service.allow_anonymous",
+ "couchdb_webapp_user": {
+ "username": "= global.services[:couchdb].couch.users[:webapp].username",
+ "password": "= secret :couch_webapp_password",
+ "salt": "= hex_secret :couch_webapp_password_salt, 128"
+ },
+ "customization_dir": "= file_path 'webapp'",
+ "client_certificates": "= provider.ca.client_certificates",
+ "allow_limited_certs": "= provider.service.allow_limited_bandwidth",
+ "allow_unlimited_certs": "= provider.service.allow_unlimited_bandwidth",
+ "allow_anonymous_certs": "= provider.service.allow_anonymous",
"secret_token": "= secret :webapp_secret_token",
- "api_version": 1
+ "api_version": 1,
+ "secure": false,
+ "git": {
+ "source": "https://leap.se/git/leap_web",
+ "revision": "origin/master"
+ },
+ "client_version": "= provider.client_version",
+ "nagios_test_user": {
+ "username": "nagios_test",
+ "password": "= secret :nagios_test_password"
+ }
},
"stunnel": {
"couch_client": "= stunnel_client(nodes_like_me[:services => :couchdb], global.services[:couchdb].couch.port)"
},
"haproxy": {
- "local_ports": "= stunnel.couch_client.field(:accept_port)"
+ "servers": "= haproxy_servers(nodes_like_me[:services => :couchdb], stunnel.couch_client, global.services[:couchdb].couch.port)"
},
"definition_files": {
"provider": "= file :provider_json_template",
@@ -34,8 +44,12 @@
},
"nickserver": {
"domain": "= 'nicknym.' + domain.full_suffix",
- "port": 6425,
- "couchdb_user": "= global.services[:couchdb].couch.users[:admin]"
+ "couchdb_nickserver_user": {
+ "username": "= global.services[:couchdb].couch.users[:nickserver].username",
+ "password": "= secret :couch_nickserver_password",
+ "salt": "= hex_secret :couch_nickserver_password_salt, 128"
+ },
+ "port": 6425
},
"dns": {
"aliases": "= [domain.full_suffix, domain.full, api.domain, nickserver.domain]"
@@ -43,8 +57,8 @@
"x509": {
"use": true,
"ca_cert": "= file :ca_cert, :missing => 'provider CA. Run `leap cert ca`'",
- "client_ca_cert": "= file_path :client_ca_cert",
- "client_ca_key": "= file_path :client_ca_key",
+ "client_ca_cert": "= file :client_ca_cert, :missing => 'Certificate Authority. Run `leap cert ca`'",
+ "client_ca_key": "= file :client_ca_key, :missing => 'Certificate Authority. Run `leap cert ca`'",
"commercial_cert": "= file [:commercial_cert, domain.full_suffix]",
"commercial_key": "= file [:commercial_key, domain.full_suffix]",
"commercial_ca_cert": "= try_file :commercial_ca_cert"
diff --git a/provider_base/tags/development.json b/provider_base/tags/development.json
index 6d4f9e25..d9c2c007 100644
--- a/provider_base/tags/development.json
+++ b/provider_base/tags/development.json
@@ -1,7 +1,7 @@
{
"environment": "development",
"domain": {
- "full_suffix": "= 'dev.' + global.provider.domain",
- "internal_suffix": "= 'dev.' + global.provider.domain_internal"
+ "full_suffix": "= 'dev.' + provider.domain",
+ "internal_suffix": "= 'dev.' + provider.domain_internal"
}
} \ No newline at end of file
diff --git a/puppet/bin/apply_on_node.sh b/puppet/bin/apply_on_node.sh
new file mode 100755
index 00000000..09e5b035
--- /dev/null
+++ b/puppet/bin/apply_on_node.sh
@@ -0,0 +1,30 @@
+#!/bin/sh
+
+# Script to use on a node for debugging
+# Usage: ./apply_on_node.sh <puppet parameters>
+#
+# Example: ./apply_on_node.sh --debug --verbose
+
+ROOTDIR='/srv/leap'
+PLATFORM="$ROOTDIR"
+MODULEPATH="$PLATFORM/puppet/modules"
+LOG=/var/log/leap.log
+
+# example tags to use
+#TAGS='--tags=leap_base,leap_service,leap_slow'
+#TAGS='--tags=leap_base,leap_slow'
+#TAGS='--tags=leap_base,leap_service'
+
+#######
+# Setup
+#######
+
+puppet apply -v --confdir $PLATFORM/puppet --libdir $PLATFORM/puppet/lib --modulepath=$MODULEPATH $PLATFORM/puppet/manifests/setup.pp $TAGS $@ |tee $LOG 2>&1
+
+#########
+# site.pp
+#########
+
+puppet apply -v --confdir $PLATFORM/puppet --libdir $PLATFORM/puppet/lib --modulepath=$MODULEPATH $PLATFORM/puppet/manifests/site.pp $TAGS $@ |tee $LOG 2>&1
+
+
diff --git a/puppet/manifests/setup.pp b/puppet/manifests/setup.pp
index 80e7ffc2..4dd03203 100644
--- a/puppet/manifests/setup.pp
+++ b/puppet/manifests/setup.pp
@@ -1,16 +1,5 @@
#
# this is applied before each run of site.pp
#
-$services = ''
-
-Exec { path => '/usr/bin:/usr/sbin/:/bin:/sbin:/usr/local/bin:/usr/local/sbin' }
-
-include site_config::hosts
-
-include site_apt
-
-package { 'facter':
- ensure => latest,
- require => Exec['refresh_apt']
-}
+include ::site_config::setup
diff --git a/puppet/manifests/site.pp b/puppet/manifests/site.pp
index 08cbbb9e..f8726fa9 100644
--- a/puppet/manifests/site.pp
+++ b/puppet/manifests/site.pp
@@ -2,19 +2,11 @@
Exec { path => '/usr/bin:/usr/sbin/:/bin:/sbin:/usr/local/bin:/usr/local/sbin' }
# parse services for host
-$services=join(hiera_array('services'), ' ')
+$services=join(hiera_array('services', ['']), ' ')
notice("Services for ${fqdn}: ${services}")
-# make sure apt is updated before any packages are installed
-include apt::update
-Package { require => Exec['apt_updated'] }
-
-include stdlib
-
-import 'common'
+include site_config::setup
include site_config::default
-include site_config::slow
-
# configure eip
if $services =~ /\bopenvpn\b/ {
@@ -23,6 +15,7 @@ if $services =~ /\bopenvpn\b/ {
if $services =~ /\bcouchdb\b/ {
include site_couchdb
+ include tapicero
}
if $services =~ /\bwebapp\b/ {
@@ -30,6 +23,10 @@ if $services =~ /\bwebapp\b/ {
include site_nickserver
}
+if $services =~ /\bsoledad\b/ {
+ include soledad::server
+}
+
if $services =~ /\bmonitor\b/ {
include site_nagios
}
@@ -37,3 +34,13 @@ if $services =~ /\bmonitor\b/ {
if $services =~ /\btor\b/ {
include site_tor
}
+
+if $services =~ /\bmx\b/ {
+ include site_mx
+}
+
+if $services =~ /\bstatic\b/ {
+ include site_static
+}
+
+include site_config::packages::uninstall
diff --git a/puppet/modules/apt b/puppet/modules/apt
-Subproject 1a72a99693c1d77bfe891546408f88264fca98e
+Subproject 64fb988c0e37d64fb3e241dc95f156072e43bf2
diff --git a/puppet/modules/backupninja b/puppet/modules/backupninja
new file mode 160000
+Subproject daeb1a1f112a4dbf6b39565f0dea461e46a6468
diff --git a/puppet/modules/check_mk b/puppet/modules/check_mk
new file mode 160000
+Subproject 5c11597a055858b5ddc1ce8f7f8db249f5f1b33
diff --git a/puppet/modules/couchdb b/puppet/modules/couchdb
-Subproject 20deb0652ccfe105eddec6ba2ad32b8d633705f
+Subproject c8f5443e0998d3d3d43505ff5a6fdf8c438d6c2
diff --git a/puppet/modules/git b/puppet/modules/git
-Subproject 497a1034489e0dc3cab5dab2fb0a85778576973
+Subproject ba5dd8d5c8e09d521ff49f1ebc753601e449f82
diff --git a/puppet/modules/leap_mx/manifests/init.pp b/puppet/modules/leap_mx/manifests/init.pp
new file mode 100644
index 00000000..b59eac01
--- /dev/null
+++ b/puppet/modules/leap_mx/manifests/init.pp
@@ -0,0 +1,62 @@
+class leap_mx {
+
+ $leap_mx = hiera('couchdb_leap_mx_user')
+ $couchdb_user = $leap_mx['username']
+ $couchdb_password = $leap_mx['password']
+
+ $couchdb_host = 'localhost'
+ $couchdb_port = '4096'
+
+ include soledad::common
+ include site_apt::preferences::twisted
+
+ #
+ # USER AND GROUP
+ #
+
+ group { 'leap-mx':
+ ensure => present,
+ allowdupe => false;
+ }
+
+ user { 'leap-mx':
+ ensure => present,
+ allowdupe => false,
+ gid => 'leap-mx',
+ home => '/etc/leap',
+ require => Group['leap-mx'];
+ }
+
+ #
+ # LEAP-MX CONFIG
+ #
+
+ file { '/etc/leap/mx.conf':
+ content => template('leap_mx/mx.conf.erb'),
+ owner => 'leap-mx',
+ group => 'leap-mx',
+ mode => '0600',
+ notify => Service['leap-mx'];
+ }
+
+ #
+ # LEAP-MX CODE
+ #
+
+ package { 'leap-mx':
+ ensure => installed,
+ require => Class['site_apt::preferences::twisted']
+ }
+
+ #
+ # LEAP-MX DAEMON
+ #
+
+ service { 'leap-mx':
+ ensure => running,
+ enable => true,
+ hasstatus => true,
+ hasrestart => true,
+ require => [ Package['leap-mx'] ];
+ }
+}
diff --git a/puppet/modules/leap_mx/templates/mx.conf.erb b/puppet/modules/leap_mx/templates/mx.conf.erb
new file mode 100644
index 00000000..e05bc150
--- /dev/null
+++ b/puppet/modules/leap_mx/templates/mx.conf.erb
@@ -0,0 +1,15 @@
+[mail1]
+path=/var/mail/vmail/Maildir
+recursive=True
+
+[couchdb]
+user=<%= @couchdb_user %>
+password=<%= @couchdb_password %>
+server=<%= @couchdb_host %>
+port=<%= @couchdb_port %>
+
+[alias map]
+port=4242
+
+[check recipient]
+port=2244
diff --git a/puppet/modules/postfix b/puppet/modules/postfix
new file mode 160000
+Subproject 1103a73ab4253712c6446bba7a443619fe51671
diff --git a/puppet/modules/rsyslog b/puppet/modules/rsyslog
new file mode 160000
+Subproject 20fbda6b91472e656331a9c64630fb207e9f578
diff --git a/puppet/modules/rubygems b/puppet/modules/rubygems
-Subproject 1e5ed3dbef9381bb9d5e2a7b4957bb3f5288d6a
+Subproject ef820cfec3321d17be99ef814318adb4e3cc1e9
diff --git a/puppet/modules/site_apache/files/conf.d/security b/puppet/modules/site_apache/files/conf.d/security
new file mode 100644
index 00000000..a5ae5bdc
--- /dev/null
+++ b/puppet/modules/site_apache/files/conf.d/security
@@ -0,0 +1,55 @@
+#
+# Disable access to the entire file system except for the directories that
+# are explicitly allowed later.
+#
+# This currently breaks the configurations that come with some web application
+# Debian packages. It will be made the default for the release after lenny.
+#
+#<Directory />
+# AllowOverride None
+# Order Deny,Allow
+# Deny from all
+#</Directory>
+
+
+# Changing the following options will not really affect the security of the
+# server, but might make attacks slightly more difficult in some cases.
+
+#
+# ServerTokens
+# This directive configures what you return as the Server HTTP response
+# Header. The default is 'Full' which sends information about the OS-Type
+# and compiled in modules.
+# Set to one of: Full | OS | Minimal | Minor | Major | Prod
+# where Full conveys the most information, and Prod the least.
+#
+#ServerTokens Minimal
+ServerTokens Prod
+
+#
+# Optionally add a line containing the server version and virtual host
+# name to server-generated pages (internal error documents, FTP directory
+# listings, mod_status and mod_info output etc., but not CGI generated
+# documents or custom error documents).
+# Set to "EMail" to also include a mailto: link to the ServerAdmin.
+# Set to one of: On | Off | EMail
+#
+#ServerSignature Off
+ServerSignature Off
+
+#
+# Allow TRACE method
+#
+# Set to "extended" to also reflect the request body (only for testing and
+# diagnostic purposes).
+#
+# Set to one of: On | Off | extended
+#
+#TraceEnable Off
+TraceEnable On
+
+# Setting this header will prevent other sites from embedding pages from this
+# site as frames. This defends against clickjacking attacks.
+# Requires mod_headers to be enabled.
+#
+Header set X-Frame-Options: "DENY"
diff --git a/puppet/modules/site_apache/manifests/common.pp b/puppet/modules/site_apache/manifests/common.pp
new file mode 100644
index 00000000..72f24838
--- /dev/null
+++ b/puppet/modules/site_apache/manifests/common.pp
@@ -0,0 +1,26 @@
+class site_apache::common {
+ # installs x509 cert + key and common config
+ # that both nagios + leap webapp use
+
+ $web_domain = hiera('domain')
+ $domain_name = $web_domain['name']
+
+ include x509::variables
+ include site_config::x509::commercial::cert
+ include site_config::x509::commercial::key
+ include site_config::x509::commercial::ca
+
+ Class['Site_config::X509::Commercial::Key'] ~> Service[apache]
+ Class['Site_config::X509::Commercial::Cert'] ~> Service[apache]
+ Class['Site_config::X509::Commercial::Ca'] ~> Service[apache]
+
+ include site_apache::module::rewrite
+
+ class { '::apache': no_default_site => true, ssl => true }
+
+ apache::vhost::file {
+ 'common':
+ content => template('site_apache/vhosts.d/common.conf.erb')
+ }
+
+}
diff --git a/puppet/modules/site_apache/manifests/module/alias.pp b/puppet/modules/site_apache/manifests/module/alias.pp
new file mode 100644
index 00000000..c1f5e185
--- /dev/null
+++ b/puppet/modules/site_apache/manifests/module/alias.pp
@@ -0,0 +1,5 @@
+class site_apache::module::alias ( $ensure = present )
+{
+
+ apache::module { 'alias': ensure => $ensure }
+}
diff --git a/puppet/modules/site_apache/manifests/module/expires.pp b/puppet/modules/site_apache/manifests/module/expires.pp
new file mode 100644
index 00000000..f73a5607
--- /dev/null
+++ b/puppet/modules/site_apache/manifests/module/expires.pp
@@ -0,0 +1,4 @@
+class site_apache::module::expires ( $ensure = present )
+{
+ apache::module { 'expires': ensure => $ensure }
+}
diff --git a/puppet/modules/site_apache/manifests/module/headers.pp b/puppet/modules/site_apache/manifests/module/headers.pp
new file mode 100644
index 00000000..f7caa28c
--- /dev/null
+++ b/puppet/modules/site_apache/manifests/module/headers.pp
@@ -0,0 +1,5 @@
+class site_apache::module::headers ( $ensure = present )
+{
+
+ apache::module {'headers': ensure => $ensure }
+}
diff --git a/puppet/modules/site_apache/manifests/module/removeip.pp b/puppet/modules/site_apache/manifests/module/removeip.pp
new file mode 100644
index 00000000..f106167a
--- /dev/null
+++ b/puppet/modules/site_apache/manifests/module/removeip.pp
@@ -0,0 +1,5 @@
+class site_apache::module::removeip ( $ensure = present )
+{
+ package { 'libapache2-mod-removeip': ensure => $ensure }
+ apache::module { 'removeip': ensure => $ensure }
+}
diff --git a/puppet/modules/site_apache/manifests/module/rewrite.pp b/puppet/modules/site_apache/manifests/module/rewrite.pp
new file mode 100644
index 00000000..7ad00a0c
--- /dev/null
+++ b/puppet/modules/site_apache/manifests/module/rewrite.pp
@@ -0,0 +1,5 @@
+class site_apache::module::rewrite ( $ensure = present )
+{
+
+ apache::module { 'rewrite': ensure => $ensure }
+}
diff --git a/puppet/modules/site_apache/templates/vhosts.d/api.conf.erb b/puppet/modules/site_apache/templates/vhosts.d/api.conf.erb
index ae894cd4..3360ac59 100644
--- a/puppet/modules/site_apache/templates/vhosts.d/api.conf.erb
+++ b/puppet/modules/site_apache/templates/vhosts.d/api.conf.erb
@@ -10,17 +10,26 @@ Listen 0.0.0.0:<%= api_port %>
ServerName <%= api_domain %>
SSLEngine on
- SSLProtocol -all +SSLv3 +TLSv1
- SSLCipherSuite HIGH:MEDIUM:!aNULL:!SSLv2:!MD5:@STRENGTH
+ SSLProtocol all -SSLv2
SSLHonorCipherOrder on
+ SSLCompression off
+ SSLCipherSuite "ECDHE-RSA-AES128-GCM-SHA256:ECDHE-ECDSA-AES128-GCM-SHA256:ECDHE-RSA-AES256-GCM-SHA384:ECDHE-ECDSA-AES256-GCM-SHA384:DHE-RSA-AES128-GCM-SHA256:DHE-DSS-AES128-GCM-SHA256:kEDH+AESGCM:ECDHE-RSA-AES128-SHA256:ECDHE-ECDSA-AES128-SHA256:ECDHE-RSA-AES128-SHA:ECDHE-ECDSA-AES128-SHA:ECDHE-RSA-AES256-SHA384:ECDHE-ECDSA-AES256-SHA384:ECDHE-RSA-AES256-SHA:ECDHE-ECDSA-AES256-SHA:DHE-RSA-AES128-SHA256:DHE-RSA-AES128-SHA:DHE-DSS-AES128-SHA256:DHE-RSA-AES256-SHA256:DHE-DSS-AES256-SHA:DHE-RSA-AES256-SHA:AES128-GCM-SHA256:AES256-GCM-SHA384:ECDHE-RSA-RC4-SHA:ECDHE-ECDSA-RC4-SHA:AES128:AES256:RC4-SHA:HIGH:!aNULL:!eNULL:!EXPORT:!DES:!3DES:!MD5:!PSK"
SSLCACertificatePath /etc/ssl/certs
- SSLCertificateChainFile /etc/ssl/certs/leap_api.pem
- SSLCertificateKeyFile /etc/x509/keys/leap_api.key
- SSLCertificateFile /etc/x509/certs/leap_api.crt
+ SSLCertificateChainFile <%= scope.lookupvar('x509::variables::local_CAs') %>/<%= scope.lookupvar('site_config::params::ca_name') %>.crt
+ SSLCertificateKeyFile <%= scope.lookupvar('x509::variables::keys') %>/<%= scope.lookupvar('site_config::params::cert_name') %>.key
+ SSLCertificateFile <%= scope.lookupvar('x509::variables::certs') %>/<%= scope.lookupvar('site_config::params::cert_name') %>.crt
RequestHeader set X_FORWARDED_PROTO 'https'
+ <IfModule mod_headers.c>
+<% if @webapp['secure'] -%>
+ Header always set Strict-Transport-Security "max-age=31536000; includeSubDomains"
+<% end -%>
+ Header always unset X-Powered-By
+ Header always unset X-Runtime
+ </IfModule>
+
DocumentRoot /srv/leap/webapp/public
# Check for maintenance file and redirect all requests
diff --git a/puppet/modules/site_apache/templates/vhosts.d/common.conf.erb b/puppet/modules/site_apache/templates/vhosts.d/common.conf.erb
new file mode 100644
index 00000000..ed430510
--- /dev/null
+++ b/puppet/modules/site_apache/templates/vhosts.d/common.conf.erb
@@ -0,0 +1,73 @@
+<VirtualHost *:80>
+ ServerName <%= domain %>
+ ServerAlias www.<%= domain %>
+ RewriteEngine On
+ RewriteRule ^.*$ https://<%= domain -%>%{REQUEST_URI} [R=permanent,L]
+</VirtualHost>
+
+<VirtualHost *:443>
+ ServerName <%= domain_name %>
+ ServerAlias <%= domain %>
+ ServerAlias www.<%= domain %>
+
+ SSLEngine on
+ SSLProtocol all -SSLv2
+ SSLHonorCipherOrder on
+ SSLCompression off
+ SSLCipherSuite "ECDHE-RSA-AES128-GCM-SHA256:ECDHE-ECDSA-AES128-GCM-SHA256:ECDHE-RSA-AES256-GCM-SHA384:ECDHE-ECDSA-AES256-GCM-SHA384:DHE-RSA-AES128-GCM-SHA256:DHE-DSS-AES128-GCM-SHA256:kEDH+AESGCM:ECDHE-RSA-AES128-SHA256:ECDHE-ECDSA-AES128-SHA256:ECDHE-RSA-AES128-SHA:ECDHE-ECDSA-AES128-SHA:ECDHE-RSA-AES256-SHA384:ECDHE-ECDSA-AES256-SHA384:ECDHE-RSA-AES256-SHA:ECDHE-ECDSA-AES256-SHA:DHE-RSA-AES128-SHA256:DHE-RSA-AES128-SHA:DHE-DSS-AES128-SHA256:DHE-RSA-AES256-SHA256:DHE-DSS-AES256-SHA:DHE-RSA-AES256-SHA:AES128-GCM-SHA256:AES256-GCM-SHA384:ECDHE-RSA-RC4-SHA:ECDHE-ECDSA-RC4-SHA:AES128:AES256:RC4-SHA:HIGH:!aNULL:!eNULL:!EXPORT:!DES:!3DES:!MD5:!PSK"
+
+ SSLCACertificatePath /etc/ssl/certs
+ SSLCertificateChainFile <%= scope.lookupvar('x509::variables::local_CAs') %>/<%= scope.lookupvar('site_config::params::commercial_ca_name') %>.crt
+ SSLCertificateKeyFile <%= scope.lookupvar('x509::variables::keys') %>/<%= scope.lookupvar('site_config::params::commercial_cert_name') %>.key
+ SSLCertificateFile <%= scope.lookupvar('x509::variables::certs') %>/<%= scope.lookupvar('site_config::params::commercial_cert_name') %>.crt
+
+ RequestHeader set X_FORWARDED_PROTO 'https'
+
+ <IfModule mod_headers.c>
+<% if (defined? @services) and (@services.include? 'webapp') and (@webapp['secure']) -%>
+ Header always set Strict-Transport-Security "max-age=31536000; includeSubDomains"
+<% end -%>
+ Header always unset X-Powered-By
+ Header always unset X-Runtime
+ </IfModule>
+
+<% if (defined? @services) and (@services.include? 'webapp') -%>
+ DocumentRoot /srv/leap/webapp/public
+
+ RewriteEngine On
+ # Check for maintenance file and redirect all requests
+ RewriteCond %{DOCUMENT_ROOT}/system/maintenance.html -f
+ RewriteCond %{SCRIPT_FILENAME} !maintenance.html
+ RewriteCond %{REQUEST_URI} !/images/maintenance.jpg
+ RewriteRule ^.*$ %{DOCUMENT_ROOT}/system/maintenance.html [L]
+
+ # http://www.modrails.com/documentation/Users%20guide%20Apache.html#_passengerallowencodedslashes_lt_on_off_gt
+ AllowEncodedSlashes on
+ PassengerAllowEncodedSlashes on
+ PassengerFriendlyErrorPages off
+ SetEnv TMPDIR /var/tmp
+
+ # Allow rails assets to be cached for a very long time (since the URLs change whenever the content changes)
+ <Location /assets/>
+ Header unset ETag
+ FileETag None
+ ExpiresActive On
+ ExpiresDefault "access plus 1 year"
+ </Location>
+<% end -%>
+
+
+<% if (defined? @services) and (@services.include? 'monitor') -%>
+ <DirectoryMatch (/usr/share/nagios3/htdocs|/usr/lib/cgi-bin/nagios3|/etc/nagios3/stylesheets)>
+ <% if (defined? @services) and (@services.include? 'webapp') -%>
+ PassengerEnabled off
+ <% end -%>
+ AllowOverride all
+ # Nagios won't work with setting this option to "DENY",
+ # as set in conf.d/security (#4169). Therefor we allow
+ # it here, only for nagios.
+ Header set X-Frame-Options: "ALLOW"
+ </DirectoryMatch>
+<% end -%>
+</VirtualHost>
+
diff --git a/puppet/modules/site_apache/templates/vhosts.d/leap_webapp.conf.erb b/puppet/modules/site_apache/templates/vhosts.d/leap_webapp.conf.erb
deleted file mode 100644
index 4b051699..00000000
--- a/puppet/modules/site_apache/templates/vhosts.d/leap_webapp.conf.erb
+++ /dev/null
@@ -1,46 +0,0 @@
-<VirtualHost *:80>
- ServerName <%= domain %>
- ServerAlias www.<%= domain %>
- RewriteEngine On
- RewriteRule ^.*$ https://<%= domain -%>%{REQUEST_URI} [R=permanent,L]
-</VirtualHost>
-
-<VirtualHost *:443>
- ServerName <%= domain %>
- ServerAlias www.<%= domain %>
-
- SSLEngine on
- SSLProtocol -all +SSLv3 +TLSv1
- SSLCipherSuite HIGH:MEDIUM:!aNULL:!SSLv2:!MD5:@STRENGTH
- SSLHonorCipherOrder on
-
- SSLCACertificatePath /etc/ssl/certs
- SSLCertificateChainFile /etc/ssl/certs/leap_webapp.pem
- SSLCertificateKeyFile /etc/x509/keys/leap_webapp.key
- SSLCertificateFile /etc/x509/certs/leap_webapp.crt
-
- RequestHeader set X_FORWARDED_PROTO 'https'
-
- DocumentRoot /srv/leap/webapp/public
-
- RewriteEngine On
- # Check for maintenance file and redirect all requests
- RewriteCond %{DOCUMENT_ROOT}/system/maintenance.html -f
- RewriteCond %{SCRIPT_FILENAME} !maintenance.html
- RewriteCond %{REQUEST_URI} !/images/maintenance.jpg
- RewriteRule ^.*$ %{DOCUMENT_ROOT}/system/maintenance.html [L]
-
- # http://www.modrails.com/documentation/Users%20guide%20Apache.html#_passengerallowencodedslashes_lt_on_off_gt
- AllowEncodedSlashes on
- PassengerAllowEncodedSlashes on
- PassengerFriendlyErrorPages off
- SetEnv TMPDIR /var/tmp
-
- <% if (defined? @services) and (@services.include? 'monitor') -%>
- <DirectoryMatch (/usr/share/nagios3/htdocs|/usr/lib/cgi-bin/nagios3|/etc/nagios3/stylesheets)>
- PassengerEnabled off
- AllowOverride all
- </DirectoryMatch>
- <% end -%>
-</VirtualHost>
-
diff --git a/puppet/modules/site_apt/files/keys/cloudant-key.asc b/puppet/modules/site_apt/files/keys/cloudant-key.asc
deleted file mode 100644
index 99716a3c..00000000
--- a/puppet/modules/site_apt/files/keys/cloudant-key.asc
+++ /dev/null
@@ -1,52 +0,0 @@
------BEGIN PGP PUBLIC KEY BLOCK-----
-Version: GnuPG v1.4.11 (GNU/Linux)
-
-mQINBFE7fhIBEACrDREcODnhdugNozMeBawOm2irpNCP54yMljST/DOXx1uo3gQw
-HnVcQ4lL7lXhbfL6Tp0WhrNYTWbbWHO0DaQbW0GQMHa2BGG0Xm0HPrjr3j55tAcM
-NPr0ArDuplq4Py2pwviZiEtQkkn+biH9oV+N3jNO+8+zVHLVU7pHaX6Yd7HAxFM8
-XX+7SeVtplZ7nvSxUREiMNxQb9o0kYNRPS+b0UjiIXHrFO9afl7lTdg/I8AhKWa0
-3jJoY/IRvVopJblISQNGFipR11Lpu5sOHghgz4V8mk/in7JLMmoqSl5DP5VhRII8
-OyADBjaUJD2mkv5cGaevqpB4AId78X9+Y62gFJrGkIHY9uBxIUkRe+leYI4Zz4Bm
-D9qBIbEY/kKkblTlC1G7u3qbGQcsbCRVIOnhruCih7vifcP40YwGUk5NmDA5AE78
-OovCGYGp4zMepDTSJxGT3sJOTEbzN09so6C7fQWBeQiiG5Uepp1q+VnaGpT1L4rc
-Y6yRbu9dOFj6WzY4W5HtnbalzTIEYy+SIGZqRkJt6jREYLiFfyrpSFIgGoJAs0yx
-9M0McXfeOod69TPufB1PeppnBwFcTmYNYxakusQxAebRDPEBZqoEgl0gMmxWbAdI
-nxGMWWnSsN/Dj0dXRf1MG/5akOhX2zQcUzBOE2m/Xr5kjDPYFtFxVJDGzQARAQAB
-tDNDbG91ZGFudCBQYWNrYWdlIFNpZ25pbmcgS2V5IDxzdXBwb3J0QGNsb3VkYW50
-LmNvbT6JAj4EEwECACgFAlE7fhICGwMFCQHhM4AGCwkIBwMCBhUIAgkKCwQWAgMB
-Ah4BAheAAAoJEFngH70Vvo4mciIP/AlqHA/LDtSYfrFwdXifY2ImCMyzYvH40Ko2
-DHCw2qDjvK5UXn1iWuzXidT7DrxOfYoZpzySRP7VGyHxa3VPhOtzLDZSvTpk9ELo
-2x2IczUwLC17M0Iis4CpqlxSFIBYGX78pMzvsEyC4TFqUDfXRlye3apjD0iwK0hE
-kdP1+TPdJjhWImJm+3TLu45zTw3Ph5dnf5pLQPNhKfBSdku+vRrd35N5hHso9S1y
-Z3NrxcQlWnXuqkLIA14gM7qbBFD+el9Y+tZ7ERGYg3s5uNDQRTb0QC8zg/um2+zW
-4hHmuRcWY3n8IgHcYUruC1VyrrsFIWWMyLv7SZkAAoSY+jKyESDfYpJQ8jtZ4EF9
-2/gYm4FgZR8j4gWkzHSLGVt/4EIykJZb0yIg/QEovmmHqpy8xYri3goMSl4h7tfF
-TOCZLTzTyQ7xONdyEsrvQPhmdtXEgvSo5S7ZU9kkx32OjCoshLLjhtqAipBgEXqb
-hElFo1oSyOVoGc7UNh7KNBjWfeP8dNdCbIbIYPMeM0/CVjD60kW5ZEVDuYglT+Rz
-enJJvS4Hs+fq8cFNxMB+l64qE7iS+I6RP2bPeQM2aBa2UZNWxUIbXF7bb3zLrCGn
-GT8GF1AFRoW3GiDzB7QnLVp8BhIaqFUzbDim+5mFFG8wguxHTiz4snDdQXq2Es6V
-UETFsNsluQINBFE7fhIBEADIyLHyBh8AKJKQHksFAPHOyA48ocxgQDpQnqYlQcAK
-D8eUbRXciIz4ePBmvjaQmz8wJgWULc04u4i9jK8Jd/Ks+VhEz3AjRBfjvkBaVMog
-FMPKaoDn9LVMBSZJ3fcC1DVck1oO8LnFIdktt0zhvzG+pV5b/UTRsVZmwNh1p2dM
-4cJswxlksJXYnI9tFA74qiomDCPYM0zpv7TEjX23PZTLqTSHP5aWctx+MIEtdoqp
-EsEDL6npvYBRz/tuL41cUWs7CItH131Hyuizo4vGrxgWPnoXIxLmLOOZCMk/kbx0
-XCSvengqYwNgAOlIjewtTw+WJm1gtNQQeKmaXBX7njf2Wz7LI/0KVxttEpKT5/5y
-embOGn7My9i7zOc1frMCDivIOTQDBZTzR9o7/6wUJ69DIoFLMlO8UcCK3R7o5VUI
-ezx+XYsOAD7D2vKoiD8Se65Vnax2rfFlLP7OQqdem5l2lkHpJzP3lA8qmA2MfJ7V
-jsk7eDSyJQjG5c6KBoaFlYGhp/E2kR82cAKVaFIbW3euMM4XK6Mgzy3+DVKfk8mu
-AEuHub7plfxM+65yjLNAK6l6IKtY1HfM7F4GFyNSd3mNNcWN7ceIHh8Ur4DeD2Tp
-7r3XcWd6/czLYNsw2BAHeVUxnMTCeGN99UZTtHgVq9IJMOCDOPwMSzHFfZ6sNaYL
-qQARAQABiQIlBBgBAgAPBQJRO34SAhsMBQkB4TOAAAoJEFngH70Vvo4mpokP/jJJ
-2mXdhMVqZCtZhwphJfdxg8nBERzrd6ebXxKbTq1MmSN/fDwLknPabFHUpzk1ADCf
-6mh2o0HB+67yMzo1UVtyfPOaHgCE/pWer5ultJM8gOdpBfSWL8jRwU8ZQ4fDu3z8
-AC6zTNq7znOVLEzZPy8U7q5Rt5/6QdQYoTLe6DwlLmkflzWP5VWi/mTGvtu/t5OV
-tGZkzBYQ5QAXRXXkKswqkJpQFuW6d1vlYm9+x/+Q1+2kGT+CKbRAkqkf77qVcyJR
-1M2JQSs4ko+rLMZzr01sYA+EBD17nxqV8vUdYebNc9Qnk8Aphid1zarUbySgAdnJ
-5SLAjLe/6N6IEE9F3uKsPEs87gJrnwrYHRrmu0wAPwA0cMmtgD4Bz7Iiz4CLYPFW
-rHpQCA313K+rS/LLfLBL66wIRKcPuYIFR9N03jX9eGR6qtk0b5Zb3YjWOo4V9Q1r
-o+g6IB0Us5vH6ISuokq7Bv+8cXhEMVoctL9A8xWN1KDkweZ+7dNWCGV8lUWKy3Hw
-ig6hENH6H7J57U8H2v2aZTeUo6e7VDP9gddNKPSEEeoBKfVnWYGoG8mVPQ2PzTgZ
-ZO2vwp4c3Ix/kIV3xe+/Opcq1lxYhD7HSre1MB7HOeFmis6tBBjMJPaatZVfzj1v
-6Uhz5oUCwcPol8rsp69DvGVUPSHfDwBxurDX71oG
-=lEm7
------END PGP PUBLIC KEY BLOCK-----
diff --git a/puppet/modules/site_apt/files/keys/leap_key.asc b/puppet/modules/site_apt/files/keys/leap_key.asc
index b69251f0..b6b5077b 100644
--- a/puppet/modules/site_apt/files/keys/leap_key.asc
+++ b/puppet/modules/site_apt/files/keys/leap_key.asc
@@ -1,5 +1,5 @@
-----BEGIN PGP PUBLIC KEY BLOCK-----
-Version: GnuPG v1.4.11 (GNU/Linux)
+Version: GnuPG v1.4.14 (GNU/Linux)
mQINBFESwt0BEAC2CR+XgW04DVwT427v2T4+qz+O/xGOwQcalVaSOUuguYgf29en
Apb6mUqROOTuJWN1nw1lvXiA6iFxg6DjDUhsp6j54X7GAAAjZ9QuavPgcsractsJ
@@ -36,7 +36,19 @@ y9k8peFB7wwf0sW3Eg78XFsfy4gyV619VnBR+PbfOpKqFFXAodF1mFiIrPeefaVp
F9fiQ5Owt0sJjDaJnYT83ksAO2Aj+VsY3UjnDrGFaiV8Neit9y/8W8DqmZ3EZEF/
M3iS0yDjqqt9ACFD+jkGlKYsyHv7gbpTq0yi6u/kRXHUTIvVwFL9M6Z6AUcG8gzo
qbKhXGfWKEq0lN5HAjJ//V9ro3DekFd0A+NQOlFV6XtspZwphVdtW1WS078HmVlw
-F5dbD8pcfT/RjbkCDQRREsLdARAA3Frw+j6H9McEIi/gjiGwvxnIdGc8McWchnFp
+F5dbD8pcfT/RjYkCPQQTAQoAJwIbAwULCQgHAwUVCgkICwUWAgMBAAIeAQIXgAUC
+UvT9ZgUJA8NuBQAKCRAeNKGCjiB5AXB+D/9k/BzZdAczQ3/v7hKrN9y3/D8kOEYK
+rF8HdcBOH522sN6mqvm7wGkf3RmNSi731m6vzlbBSonrAT5KDMpj+THOmUcY29V5
+a1YOgFCCkToOfl+LmlLiuqfrGCJyE28MKMrsi2zMBKhsSxhvcI0EhJkQpPBu8gUs
+XW1GSHuh5CYzwf/i8eNDpVrhHjRF0AVCOWIq52LTR62QchR+6ci/wVDHWd9Ase5X
+8rxNnt2/pCbgATklQbmRcQS6efTVk3oXk1DZ8M46vayJ1g2BFuIi7pohiekLAAAt
+MCwRKHTHvtPkGAUAEXExPGS78qHxLHIau2VCtSBxm+bQX+ZyCMANDpI+ZTFp1APJ
+9SpbtGozuQOpWFjWY1rERunrbyWHIb2DuVVNKGiHlkMJB76zzysvbIPYWx1RqD6s
+KFJBkjrM0xn8H+D6qzwzGfmX1Yaw12oYA6pcai4aK5sO7KHt+THAxYAcVF7qxGU7
+lnDifM56hrH/DbE5InlDC8OUqDysj0cHacRee+ZYtj7TiEykWfP5RrZCLQ7L6Jd/
+HtgQti/9TVUaFkIlQCfvF+l4BYZQYvnhx3MVK7ChKLmy6AVQLWnDrBrDvl07HLvW
+6pslRzVHfWyIYng0pZ0HvK+MpQztCoUcDK470mjlpAtjNHuyKh6r6TtaiVK8MgbR
+Sx/NMHb1/PXQJrkCDQRREsLdARAA3Frw+j6H9McEIi/gjiGwvxnIdGc8McWchnFp
OWvdhTW9056v+y22DoKbULjT8k+8GzuRQ0xp4VwCC1rX3UExwceczzGs+tSKuIGm
g1ELygsaOZHdQBNLGPvn+TZNGlaYXPlQo7m8YhXGHwgQrdKyjcFD5xnOHxe981LT
q+IQ6jVYhho7/Qik9rVE1XHxoOfYvnNZJD0cFdf9OcX47YoqmM4sZYPMoOmKoVQT
@@ -59,5 +71,5 @@ Dxix2FBXQU/4pVpGHjXTQP6RqeTrAedXvpgCHWP1UIlswIQecGmQcJ/hRZjd+0vl
cjfCYhZHr7N96Da6Cy8v2fZiZHaSAt7T2oIZ9X3gEh/kOlLDcuIdvMHUfojn0MrP
Ce1AqOHyQQqhkVylvZpS0PdE0VW3PmJ98uKfX2FVAOTUD4Rw3n9Ew7bfM249HuP4
JOXi/Skp4sBB/xgrtV1u+E+BW0SS/BOiwfrI4xUy+MrWuw==
-=4STg
+=Om8x
-----END PGP PUBLIC KEY BLOCK-----
diff --git a/puppet/modules/site_apt/manifests/init.pp b/puppet/modules/site_apt/manifests/init.pp
index 8821c110..9facf4cc 100644
--- a/puppet/modules/site_apt/manifests/init.pp
+++ b/puppet/modules/site_apt/manifests/init.pp
@@ -1,15 +1,6 @@
class site_apt {
- # on couchdb we need to include squeeze in apt preferences,
- # so the cloudant package can pull some packages from squeeze
- # template() must be unquoted !
- if 'couchdb' in $::services {
- $custom_preferences = template("site_apt/preferences.include_squeeze")
- } else {
- $custom_preferences = ''
- }
class { 'apt':
- custom_preferences => $custom_preferences,
custom_key_dir => 'puppet:///modules/site_apt/keys'
}
@@ -37,6 +28,6 @@ class site_apt {
# The creation of sources.list depends on the lsb package
File['/etc/apt/preferences'] ->
- Exec['refresh_apt']
- Package <| ( title != 'lsb' ) |>
+ Exec['refresh_apt'] ->
+ Package <| ( title != 'lsb' ) |>
}
diff --git a/puppet/modules/site_apt/manifests/preferences/check_mk.pp b/puppet/modules/site_apt/manifests/preferences/check_mk.pp
new file mode 100644
index 00000000..580e0d3f
--- /dev/null
+++ b/puppet/modules/site_apt/manifests/preferences/check_mk.pp
@@ -0,0 +1,9 @@
+class site_apt::preferences::check_mk {
+
+ apt::preferences_snippet { 'check-mk':
+ package => 'check-mk-*',
+ release => "${::lsbdistcodename}-backports",
+ priority => 999;
+ }
+
+}
diff --git a/puppet/modules/site_apt/manifests/preferences/twisted.pp b/puppet/modules/site_apt/manifests/preferences/twisted.pp
new file mode 100644
index 00000000..abff6838
--- /dev/null
+++ b/puppet/modules/site_apt/manifests/preferences/twisted.pp
@@ -0,0 +1,9 @@
+class site_apt::preferences::twisted {
+
+ apt::preferences_snippet { 'python-twisted':
+ package => 'python-twisted*',
+ release => "${::lsbdistcodename}-backports",
+ priority => 999;
+ }
+
+}
diff --git a/puppet/modules/site_apt/templates/wheezy/postfix.seeds b/puppet/modules/site_apt/templates/wheezy/postfix.seeds
new file mode 100644
index 00000000..1a878ccc
--- /dev/null
+++ b/puppet/modules/site_apt/templates/wheezy/postfix.seeds
@@ -0,0 +1 @@
+postfix postfix/main_mailer_type select No configuration
diff --git a/puppet/modules/site_check_mk/files/agent/local_checks/all_hosts/run_node_tests.sh b/puppet/modules/site_check_mk/files/agent/local_checks/all_hosts/run_node_tests.sh
new file mode 100644
index 00000000..1dd0afc9
--- /dev/null
+++ b/puppet/modules/site_check_mk/files/agent/local_checks/all_hosts/run_node_tests.sh
@@ -0,0 +1,5 @@
+#!/bin/sh
+#
+# runs node tests
+
+/srv/leap/bin/run_tests --checkmk
diff --git a/puppet/modules/site_check_mk/files/agent/local_checks/mx/check_leap_mx.sh b/puppet/modules/site_check_mk/files/agent/local_checks/mx/check_leap_mx.sh
new file mode 100755
index 00000000..b8687c9a
--- /dev/null
+++ b/puppet/modules/site_check_mk/files/agent/local_checks/mx/check_leap_mx.sh
@@ -0,0 +1,33 @@
+#!/bin/bash
+
+
+WARN=1
+CRIT=5
+
+# in minutes
+MAXAGE=10
+
+STATUS[0]='OK'
+STATUS[1]='Warning'
+STATUS[2]='Critical'
+CHECKNAME='Leap_MX_Queue'
+
+WATCHDIR='/var/mail/vmail/Maildir/new/'
+
+
+total=`find $WATCHDIR -type f -mmin +$MAXAGE | wc -l`
+
+if [ $total -lt $WARN ]
+then
+ exitcode=0
+else
+ if [ $total -le $CRIT ]
+ then
+ exitcode=1
+ else
+ exitcode=2
+ fi
+fi
+
+echo "${exitcode} ${CHECKNAME} stale_files=${total} ${STATUS[exitcode]}: ${total} stale files (>=${MAXAGE} min) in ${WATCHDIR}."
+
diff --git a/puppet/modules/site_check_mk/files/agent/logwatch/bigcouch.cfg b/puppet/modules/site_check_mk/files/agent/logwatch/bigcouch.cfg
new file mode 100644
index 00000000..28f333b0
--- /dev/null
+++ b/puppet/modules/site_check_mk/files/agent/logwatch/bigcouch.cfg
@@ -0,0 +1,20 @@
+/opt/bigcouch/var/log/bigcouch.log nocontext=1
+# ignore requests that are fine
+ I undefined - -.*200$
+ I undefined - -.*201$
+ I 127.0.0.1 undefined.* ok
+ I 127.0.0.1 localhost:5984 .* ok
+ # https://leap.se/code/issues/5246
+ I Shutting down group server
+ # ignore "Uncaught error in HTTP request: {exit, normal}" error
+ # it's suppressed in later versions of bigcouch anhow
+ # see https://leap.se/code/issues/5226
+ I Uncaught error in HTTP request: {exit,normal}
+ I Uncaught error in HTTP request: {exit,
+ C Uncaught error in HTTP request: {error,
+ C Response abnormally terminated: {nodedown,
+ C rexi_DOWN,noproc
+ C rexi_DOWN,noconnection
+ C error
+ C Connection attempt from disallowed node
+ W Apache CouchDB has started
diff --git a/puppet/modules/site_check_mk/files/agent/logwatch/leap_mx.cfg b/puppet/modules/site_check_mk/files/agent/logwatch/leap_mx.cfg
new file mode 100644
index 00000000..c71c5392
--- /dev/null
+++ b/puppet/modules/site_check_mk/files/agent/logwatch/leap_mx.cfg
@@ -0,0 +1,4 @@
+/var/log/leap_mx.log
+ W Don't know how to deliver mail
+ W No public key, stopping the processing chain
+
diff --git a/puppet/modules/site_check_mk/files/agent/logwatch/logwatch.cfg b/puppet/modules/site_check_mk/files/agent/logwatch/logwatch.cfg
new file mode 100644
index 00000000..4f16d1bd
--- /dev/null
+++ b/puppet/modules/site_check_mk/files/agent/logwatch/logwatch.cfg
@@ -0,0 +1,31 @@
+# This file is managed by Puppet. DO NOT EDIT.
+
+# logwatch.cfg
+# This file configures mk_logwatch. Define your logfiles
+# and patterns to be looked for here.
+
+# Name one or more logfiles
+/var/log/messages
+# Patterns are indented with one space are prefixed with:
+# C: Critical messages
+# W: Warning messages
+# I: ignore these lines (OK)
+# The first match decided. Lines that do not match any pattern
+# are ignored
+ C Fail event detected on md device
+ I mdadm.*: Rebuild.*event detected
+ W mdadm\[
+ W ata.*hard resetting link
+ W ata.*soft reset failed (.*FIS failed)
+ W device-mapper: thin:.*reached low water mark
+ C device-mapper: thin:.*no free space
+
+/var/log/auth.log
+ W sshd.*Corrupted MAC on input
+
+/var/log/kern.log
+ C panic
+ C Oops
+ W generic protection rip
+ W .*Unrecovered read error - auto reallocate failed
+
diff --git a/puppet/modules/site_check_mk/files/agent/logwatch/soledad.cfg b/puppet/modules/site_check_mk/files/agent/logwatch/soledad.cfg
new file mode 100644
index 00000000..623d1e46
--- /dev/null
+++ b/puppet/modules/site_check_mk/files/agent/logwatch/soledad.cfg
@@ -0,0 +1,5 @@
+/var/log/soledad.log
+ C WSGI application error
+ C Error
+ C error
+ W Timing out client:
diff --git a/puppet/modules/site_check_mk/files/agent/logwatch/syslog/couchdb.cfg b/puppet/modules/site_check_mk/files/agent/logwatch/syslog/couchdb.cfg
new file mode 100644
index 00000000..f546135a
--- /dev/null
+++ b/puppet/modules/site_check_mk/files/agent/logwatch/syslog/couchdb.cfg
@@ -0,0 +1,7 @@
+ C /usr/local/bin/couch-doc-update.*failed
+ C /usr/local/bin/couch-doc-update.*ERROR
+# on one-node bigcouch setups, we'll get this msg
+# a lot, so we ignore it here until we fix
+# https://leap.se/code/issues/5244
+ I epmd: got partial packet only on file descriptor
+
diff --git a/puppet/modules/site_check_mk/files/agent/logwatch/syslog/openvpn.cfg b/puppet/modules/site_check_mk/files/agent/logwatch/syslog/openvpn.cfg
new file mode 100644
index 00000000..d58e876d
--- /dev/null
+++ b/puppet/modules/site_check_mk/files/agent/logwatch/syslog/openvpn.cfg
@@ -0,0 +1,7 @@
+# ignore openvpn TLS initialization errors when clients
+# suddenly hangup before properly establishing
+# a tls connection
+ I ovpn-.*TLS Error: Unroutable control packet received from
+ I ovpn-.*TLS Error: TLS key negotiation failed to occur within 60 seconds (check your network connectivity)
+ I ovpn-.*TLS Error: TLS handshake failed
+
diff --git a/puppet/modules/site_check_mk/files/agent/logwatch/syslog/stunnel.cfg b/puppet/modules/site_check_mk/files/agent/logwatch/syslog/stunnel.cfg
new file mode 100644
index 00000000..eb3131f2
--- /dev/null
+++ b/puppet/modules/site_check_mk/files/agent/logwatch/syslog/stunnel.cfg
@@ -0,0 +1,9 @@
+# check for stunnel failures
+#
+# these are temporary failures and happen very often, so we
+# ignore them until we tuned stunnel timeouts/logging,
+# see https://leap.se/code/issues/5218
+ I stunnel:.*Connection reset by peer
+ I stunnel:.*Peer suddenly disconnected
+ I stunnel:.*Connection refused
+
diff --git a/puppet/modules/site_check_mk/files/agent/logwatch/syslog/tapicero.cfg b/puppet/modules/site_check_mk/files/agent/logwatch/syslog/tapicero.cfg
new file mode 100644
index 00000000..93ce0311
--- /dev/null
+++ b/puppet/modules/site_check_mk/files/agent/logwatch/syslog/tapicero.cfg
@@ -0,0 +1,8 @@
+ C tapicero.*RestClient::InternalServerError:
+# possible race condition between multiple tapicero
+# instances, so we ignore it
+# see https://leap.se/code/issues/5168
+ I tapicero.*RestClient::PreconditionFailed:
+ C tapicero.*Creating database.*failed due to:
+ C tapicero.*failed
+ W tapicero.*Couch stream ended unexpectedly.
diff --git a/puppet/modules/site_check_mk/files/agent/logwatch/syslog/webapp.cfg b/puppet/modules/site_check_mk/files/agent/logwatch/syslog/webapp.cfg
new file mode 100644
index 00000000..00f9c7fd
--- /dev/null
+++ b/puppet/modules/site_check_mk/files/agent/logwatch/syslog/webapp.cfg
@@ -0,0 +1,5 @@
+# check for webapp errors
+ C webapp.*Could not connect to couch database messages due to 401 Unauthorized: {"error":"unauthorized","reason":"You are not a server admin."}
+# ignore RoutingErrors that rails throw when it can't handle a url
+# see https://leap.se/code/issues/5173
+ I webapp.*ActionController::RoutingError
diff --git a/puppet/modules/site_check_mk/files/agent/logwatch/syslog_header.cfg b/puppet/modules/site_check_mk/files/agent/logwatch/syslog_header.cfg
new file mode 100644
index 00000000..f60d752b
--- /dev/null
+++ b/puppet/modules/site_check_mk/files/agent/logwatch/syslog_header.cfg
@@ -0,0 +1 @@
+/var/log/syslog
diff --git a/puppet/modules/site_check_mk/files/agent/logwatch/syslog_tail.cfg b/puppet/modules/site_check_mk/files/agent/logwatch/syslog_tail.cfg
new file mode 100644
index 00000000..450b9e90
--- /dev/null
+++ b/puppet/modules/site_check_mk/files/agent/logwatch/syslog_tail.cfg
@@ -0,0 +1,11 @@
+# some general patterns
+ C panic
+ C Oops
+ I Error: Driver 'pcspkr' is already registered, aborting...
+ C Error
+ C error
+ W generic protection rip
+ W .*Unrecovered read error - auto reallocate failed
+# 401 Unauthorized error logged by webapp and possible other
+# applications
+ C Unauthorized
diff --git a/puppet/modules/site_check_mk/files/agent/nagios_plugins/check_unix_open_fds.pl b/puppet/modules/site_check_mk/files/agent/nagios_plugins/check_unix_open_fds.pl
new file mode 100755
index 00000000..06163d49
--- /dev/null
+++ b/puppet/modules/site_check_mk/files/agent/nagios_plugins/check_unix_open_fds.pl
@@ -0,0 +1,322 @@
+#!/usr/bin/perl -w
+
+# check_unix_open_fds Nagios Plugin
+#
+# TComm - Carlos Peris Pla
+#
+# This nagios plugin is free software, and comes with ABSOLUTELY
+# NO WARRANTY. It may be used, redistributed and/or modified under
+# the terms of the GNU General Public Licence (see
+# http://www.fsf.org/licensing/licenses/gpl.txt).
+
+
+# MODULE DECLARATION
+
+use strict;
+use Nagios::Plugin;
+
+
+# FUNCTION DECLARATION
+
+sub CreateNagiosManager ();
+sub CheckArguments ();
+sub PerformCheck ();
+
+
+# CONSTANT DEFINITION
+
+use constant NAME => 'check_unix_open_fds';
+use constant VERSION => '0.1b';
+use constant USAGE => "Usage:\ncheck_unix_open_fds -w <process_threshold,application_threshold> -c <process_threshold,application_threshold>\n".
+ "\t\t[-V <version>]\n";
+use constant BLURB => "This plugin checks, in UNIX systems with the command lsof installed and with its SUID bit activated, the number\n".
+ "of file descriptors opened by an application and its processes.\n";
+use constant LICENSE => "This nagios plugin is free software, and comes with ABSOLUTELY\n".
+ "no WARRANTY. It may be used, redistributed and/or modified under\n".
+ "the terms of the GNU General Public Licence\n".
+ "(see http://www.fsf.org/licensing/licenses/gpl.txt).\n";
+use constant EXAMPLE => "\n\n".
+ "Example:\n".
+ "\n".
+ "check_unix_open_fds -a /usr/local/nagios/bin/ndo2db -w 20,75 -c 25,85\n".
+ "\n".
+ "It returns CRITICAL if number of file descriptors opened by ndo2db is higher than 85,\n".
+ "if not it returns WARNING if number of file descriptors opened by ndo2db is higher \n".
+ "than 75, if not it returns CRITICAL if number of file descriptors opened by any process\n".
+ "of ndo2db is higher than 25, if not it returns WARNING if number of file descriptors \n".
+ "opened by any process of ndo2db is higher than 20.\n".
+ "In other cases it returns OK if check has been performed succesfully.\n\n";
+
+
+# VARIABLE DEFINITION
+
+my $Nagios;
+my $Error;
+my $PluginResult;
+my $PluginOutput;
+my @WVRange;
+my @CVRange;
+
+
+# MAIN FUNCTION
+
+# Get command line arguments
+$Nagios = &CreateNagiosManager(USAGE, VERSION, BLURB, LICENSE, NAME, EXAMPLE);
+eval {$Nagios->getopts};
+
+if (!$@) {
+ # Command line parsed
+ if (&CheckArguments($Nagios, \$Error, \@WVRange, \@CVRange)) {
+ # Argument checking passed
+ $PluginResult = &PerformCheck($Nagios, \$PluginOutput, \@WVRange, \@CVRange)
+ }
+ else {
+ # Error checking arguments
+ $PluginOutput = $Error;
+ $PluginResult = UNKNOWN;
+ }
+ $Nagios->nagios_exit($PluginResult,$PluginOutput);
+}
+else {
+ # Error parsing command line
+ $Nagios->nagios_exit(UNKNOWN,$@);
+}
+
+
+
+# FUNCTION DEFINITIONS
+
+# Creates and configures a Nagios plugin object
+# Input: strings (usage, version, blurb, license, name and example) to configure argument parsing functionality
+# Return value: reference to a Nagios plugin object
+
+sub CreateNagiosManager() {
+ # Create GetOpt object
+ my $Nagios = Nagios::Plugin->new(usage => $_[0], version => $_[1], blurb => $_[2], license => $_[3], plugin => $_[4], extra => $_[5]);
+
+ # Add argument units
+ $Nagios->add_arg(spec => 'application|a=s',
+ help => 'Application path for which you want to check the number of open file descriptors',
+ required => 1);
+
+ # Add argument warning
+ $Nagios->add_arg(spec => 'warning|w=s',
+ help => "Warning thresholds. Format: <process_threshold,application_threshold>",
+ required => 1);
+ # Add argument critical
+ $Nagios->add_arg(spec => 'critical|c=s',
+ help => "Critical thresholds. Format: <process_threshold,application_threshold>",
+ required => 1);
+
+ # Return value
+ return $Nagios;
+}
+
+
+# Checks argument values and sets some default values
+# Input: Nagios Plugin object
+# Output: reference to Error description string, Memory Unit, Swap Unit, reference to WVRange ($_[4]), reference to CVRange ($_[5])
+# Return value: True if arguments ok, false if not
+
+sub CheckArguments() {
+ my ($Nagios, $Error, $WVRange, $CVRange) = @_;
+ my $commas;
+ my $units;
+ my $i;
+ my $firstpos;
+ my $secondpos;
+
+ # Check Warning thresholds list
+ $commas = $Nagios->opts->warning =~ tr/,//;
+ if ($commas !=1){
+ ${$Error} = "Invalid Warning list format. One comma is expected.";
+ return 0;
+ }
+ else{
+ $i=0;
+ $firstpos=0;
+ my $warning=$Nagios->opts->warning;
+ while ($warning =~ /[,]/g) {
+ $secondpos=pos $warning;
+ if ($secondpos - $firstpos==1){
+ @{$WVRange}[$i] = "~:";
+ }
+ else{
+ @{$WVRange}[$i] = substr $Nagios->opts->warning, $firstpos, ($secondpos-$firstpos-1);
+ }
+ $firstpos=$secondpos;
+ $i++
+ }
+ if (length($Nagios->opts->warning) - $firstpos==0){#La coma es el ultimo elemento del string
+ @{$WVRange}[$i] = "~:";
+ }
+ else{
+ @{$WVRange}[$i] = substr $Nagios->opts->warning, $firstpos, (length($Nagios->opts->warning)-$firstpos);
+ }
+
+ if (@{$WVRange}[0] !~/^(@?(\d+|(\d+|~):(\d+)?))?$/){
+ ${$Error} = "Invalid Process Warning threshold in ${$WVRange[0]}";
+ return 0;
+ }if (@{$WVRange}[1] !~/^(@?(\d+|(\d+|~):(\d+)?))?$/){
+ ${$Error} = "Invalid Application Warning threshold in ${$WVRange[1]}";
+ return 0;
+ }
+ }
+
+ # Check Critical thresholds list
+ $commas = $Nagios->opts->critical =~ tr/,//;
+ if ($commas !=1){
+ ${$Error} = "Invalid Critical list format. One comma is expected.";
+ return 0;
+ }
+ else{
+ $i=0;
+ $firstpos=0;
+ my $critical=$Nagios->opts->critical;
+ while ($critical =~ /[,]/g) {
+ $secondpos=pos $critical ;
+ if ($secondpos - $firstpos==1){
+ @{$CVRange}[$i] = "~:";
+ }
+ else{
+ @{$CVRange}[$i] =substr $Nagios->opts->critical, $firstpos, ($secondpos-$firstpos-1);
+ }
+ $firstpos=$secondpos;
+ $i++
+ }
+ if (length($Nagios->opts->critical) - $firstpos==0){#La coma es el ultimo elemento del string
+ @{$CVRange}[$i] = "~:";
+ }
+ else{
+ @{$CVRange}[$i] = substr $Nagios->opts->critical, $firstpos, (length($Nagios->opts->critical)-$firstpos);
+ }
+
+ if (@{$CVRange}[0] !~/^(@?(\d+|(\d+|~):(\d+)?))?$/) {
+ ${$Error} = "Invalid Process Critical threshold in @{$CVRange}[0]";
+ return 0;
+ }
+ if (@{$CVRange}[1] !~/^(@?(\d+|(\d+|~):(\d+)?))?$/) {
+ ${$Error} = "Invalid Application Critical threshold in @{$CVRange}[1]";
+ return 0;
+ }
+ }
+
+ return 1;
+}
+
+
+# Performs whole check:
+# Input: Nagios Plugin object, reference to Plugin output string, Application, referece to WVRange, reference to CVRange
+# Output: Plugin output string
+# Return value: Plugin return value
+
+sub PerformCheck() {
+ my ($Nagios, $PluginOutput, $WVRange, $CVRange) = @_;
+ my $Application;
+ my @AppNameSplitted;
+ my $ApplicationName;
+ my $PsCommand;
+ my $PsResult;
+ my @PsResultLines;
+ my $ProcLine;
+ my $ProcPid;
+ my $LsofCommand;
+ my $LsofResult;
+ my $ProcCount = 0;
+ my $FDCount = 0;
+ my $ProcFDAvg = 0;
+ my $PerProcMaxFD = 0;
+ my $ProcOKFlag = 0;
+ my $ProcWarningFlag = 0;
+ my $ProcCriticalFlag = 0;
+ my $OKFlag = 0;
+ my $WarningFlag = 0;
+ my $CriticalFlag = 0;
+ my $LastWarningProcFDs = 0;
+ my $LastWarningProc = -1;
+ my $LastCriticalProcFDs = 0;
+ my $LastCriticalProc = -1;
+ my $ProcPluginReturnValue = UNKNOWN;
+ my $AppPluginReturnValue = UNKNOWN;
+ my $PluginReturnValue = UNKNOWN;
+ my $PerformanceData = "";
+ my $PerfdataUnit = "FDs";
+
+ $Application = $Nagios->opts->application;
+ $PsCommand = "ps -eaf | grep $Application";
+ $PsResult = `$PsCommand`;
+ @AppNameSplitted = split(/\//, $Application);
+ $ApplicationName = $AppNameSplitted[$#AppNameSplitted];
+ @PsResultLines = split(/\n/, $PsResult);
+ if ( $#PsResultLines > 1 ) {
+ foreach my $Proc (split(/\n/, $PsResult)) {
+ if ($Proc !~ /check_unix_open_fds/ && $Proc !~ / grep /) {
+ $ProcCount += 1;
+ $ProcPid = (split(/\s+/, $Proc))[1];
+ $LsofCommand = "lsof -p $ProcPid | wc -l";
+ $LsofResult = `$LsofCommand`;
+ $LsofResult = ($LsofResult > 0 ) ? ($LsofResult - 1) : 0;
+ $FDCount += $LsofResult;
+ if ($LsofResult >= $PerProcMaxFD) { $PerProcMaxFD = $LsofResult; }
+ $ProcPluginReturnValue = $Nagios->check_threshold(check => $LsofResult,warning => @{$WVRange}[0],critical => @{$CVRange}[0]);
+ if ($ProcPluginReturnValue eq OK) {
+ $ProcOKFlag = 1;
+ }
+ elsif ($ProcPluginReturnValue eq WARNING) {
+ $ProcWarningFlag = 1;
+ if ($LsofResult >= $LastWarningProcFDs) {
+ $LastWarningProcFDs = $LsofResult;
+ $LastWarningProc = $ProcPid;
+ }
+ }
+ #if ($LsofResult >= $PCT) {
+ elsif ($ProcPluginReturnValue eq CRITICAL) {
+ $ProcCriticalFlag = 1;
+ if ($LsofResult >= $LastCriticalProcFDs) {
+ $LastCriticalProcFDs = $LsofResult;
+ $LastCriticalProc = $ProcPid;
+ }
+ }
+ }
+ }
+ if ($ProcCount) { $ProcFDAvg = int($FDCount / $ProcCount); }
+ $AppPluginReturnValue = $Nagios->check_threshold(check => $FDCount,warning => @{$WVRange}[1],critical => @{$CVRange}[1]);
+ #if ($FDCount >= $TWT) {
+ if ($AppPluginReturnValue eq OK) { $OKFlag = 1; }
+ elsif ($AppPluginReturnValue eq WARNING) { $WarningFlag = 1; }
+ elsif ($AppPluginReturnValue eq CRITICAL) { $CriticalFlag = 1; }
+
+ # PluginReturnValue and PluginOutput
+ if ($CriticalFlag) {
+ $PluginReturnValue = CRITICAL;
+ ${$PluginOutput} .= "$ApplicationName handling $FDCount files (critical threshold set to @{$CVRange}[1])";
+ }
+ elsif ($WarningFlag) {
+ $PluginReturnValue = WARNING;
+ ${$PluginOutput} .= "$ApplicationName handling $FDCount files (warning threshold set to @{$WVRange}[1])";
+ }
+ elsif ($ProcCriticalFlag) {
+ $PluginReturnValue = CRITICAL;
+ ${$PluginOutput} .= "Process ID $LastCriticalProc handling $LastCriticalProcFDs files (critical threshold set to @{$CVRange}[0])";
+ }
+ elsif ($ProcWarningFlag) {
+ $PluginReturnValue = WARNING;
+ ${$PluginOutput} .= "Process ID $LastWarningProc handling $LastWarningProcFDs files (warning threshold set to @{$WVRange}[0])";
+ }
+ elsif ($OKFlag && $ProcOKFlag) {
+ $PluginReturnValue = OK;
+ ${$PluginOutput} .= "$ApplicationName handling $FDCount files";
+ }
+ }
+ else {
+ ${$PluginOutput} .= "No existe la aplicacion $ApplicationName";
+ }
+
+
+ $PerformanceData .= "ProcCount=$ProcCount$PerfdataUnit FDCount=$FDCount$PerfdataUnit ProcFDAvg=$ProcFDAvg$PerfdataUnit PerProcMaxFD=$PerProcMaxFD$PerfdataUnit";
+
+ # Output with performance data:
+ ${$PluginOutput} .= " | $PerformanceData";
+
+ return $PluginReturnValue;
+}
diff --git a/puppet/modules/site_check_mk/files/agent/plugins/mk_logwatch.1.2.4 b/puppet/modules/site_check_mk/files/agent/plugins/mk_logwatch.1.2.4
new file mode 100755
index 00000000..3dbca322
--- /dev/null
+++ b/puppet/modules/site_check_mk/files/agent/plugins/mk_logwatch.1.2.4
@@ -0,0 +1,374 @@
+#!/usr/bin/python
+# -*- encoding: utf-8; py-indent-offset: 4 -*-
+# +------------------------------------------------------------------+
+# | ____ _ _ __ __ _ __ |
+# | / ___| |__ ___ ___| | __ | \/ | |/ / |
+# | | | | '_ \ / _ \/ __| |/ / | |\/| | ' / |
+# | | |___| | | | __/ (__| < | | | | . \ |
+# | \____|_| |_|\___|\___|_|\_\___|_| |_|_|\_\ |
+# | |
+# | Copyright Mathias Kettner 2010 mk@mathias-kettner.de |
+# +------------------------------------------------------------------+
+#
+# This file is part of Check_MK.
+# The official homepage is at http://mathias-kettner.de/check_mk.
+#
+# check_mk is free software; you can redistribute it and/or modify it
+# under the terms of the GNU General Public License as published by
+# the Free Software Foundation in version 2. check_mk is distributed
+# in the hope that it will be useful, but WITHOUT ANY WARRANTY; with-
+# out even the implied warranty of MERCHANTABILITY or FITNESS FOR A
+# PARTICULAR PURPOSE. See the GNU General Public License for more de-
+# ails. You should have received a copy of the GNU General Public
+# License along with GNU Make; see the file COPYING. If not, write
+# to the Free Software Foundation, Inc., 51 Franklin St, Fifth Floor,
+# Boston, MA 02110-1301 USA.
+
+# Call with -d for debug mode: colored output, no saving of status
+
+import sys, os, re, time
+import glob
+
+if '-d' in sys.argv[1:] or '--debug' in sys.argv[1:]:
+ tty_red = '\033[1;31m'
+ tty_green = '\033[1;32m'
+ tty_yellow = '\033[1;33m'
+ tty_blue = '\033[1;34m'
+ tty_normal = '\033[0m'
+ debug = True
+else:
+ tty_red = ''
+ tty_green = ''
+ tty_yellow = ''
+ tty_blue = ''
+ tty_normal = ''
+ debug = False
+
+# The configuration file and status file are searched
+# in the directory named by the environment variable
+# LOGWATCH_DIR. If that is not set, MK_CONFDIR is used.
+# If that is not set either, the current directory ist
+# used.
+logwatch_dir = os.getenv("LOGWATCH_DIR")
+if not logwatch_dir:
+ logwatch_dir = os.getenv("MK_CONFDIR")
+ if not logwatch_dir:
+ logwatch_dir = "."
+
+print "<<<logwatch>>>"
+
+config_filename = logwatch_dir + "/logwatch.cfg"
+status_filename = logwatch_dir + "/logwatch.state"
+config_dir = logwatch_dir + "/logwatch.d/*.cfg"
+
+def is_not_comment(line):
+ if line.lstrip().startswith('#') or \
+ line.strip() == '':
+ return False
+ return True
+
+def parse_filenames(line):
+ return line.split()
+
+def parse_pattern(level, pattern):
+ if level not in [ 'C', 'W', 'I', 'O' ]:
+ raise(Exception("Invalid pattern line '%s'" % line))
+ try:
+ compiled = re.compile(pattern)
+ except:
+ raise(Exception("Invalid regular expression in line '%s'" % line))
+ return (level, compiled)
+
+def read_config():
+ config_lines = [ line.rstrip() for line in filter(is_not_comment, file(config_filename).readlines()) ]
+ # Add config from a logwatch.d folder
+ for config_file in glob.glob(config_dir):
+ config_lines += [ line.rstrip() for line in filter(is_not_comment, file(config_file).readlines()) ]
+
+ have_filenames = False
+ config = []
+
+ for line in config_lines:
+ rewrite = False
+ if line[0].isspace(): # pattern line
+ if not have_filenames:
+ raise Exception("Missing logfile names")
+ level, pattern = line.split(None, 1)
+ if level == 'A':
+ cont_list.append(parse_cont_pattern(pattern))
+ elif level == 'R':
+ rewrite_list.append(pattern)
+ else:
+ level, compiled = parse_pattern(level, pattern)
+ cont_list = [] # List of continuation patterns
+ rewrite_list = [] # List of rewrite patterns
+ patterns.append((level, compiled, cont_list, rewrite_list))
+ else: # filename line
+ patterns = []
+ config.append((parse_filenames(line), patterns))
+ have_filenames = True
+ return config
+
+def parse_cont_pattern(pattern):
+ try:
+ return int(pattern)
+ except:
+ try:
+ return re.compile(pattern)
+ except:
+ if debug:
+ raise
+ raise Exception("Invalid regular expression in line '%s'" % pattern)
+
+# structure of statusfile
+# # LOGFILE OFFSET INODE
+# /var/log/messages|7767698|32455445
+# /var/test/x12134.log|12345|32444355
+def read_status():
+ if debug:
+ return {}
+
+ status = {}
+ for line in file(status_filename):
+ # TODO: Remove variants with spaces. rsplit is
+ # not portable. split fails if logfilename contains
+ # spaces
+ inode = -1
+ try:
+ parts = line.split('|')
+ filename = parts[0]
+ offset = parts[1]
+ if len(parts) >= 3:
+ inode = parts[2]
+
+ except:
+ try:
+ filename, offset = line.rsplit(None, 1)
+ except:
+ filename, offset = line.split(None, 1)
+ status[filename] = int(offset), int(inode)
+ return status
+
+def save_status(status):
+ f = file(status_filename, "w")
+ for filename, (offset, inode) in status.items():
+ f.write("%s|%d|%d\n" % (filename, offset, inode))
+
+pushed_back_line = None
+def next_line(f):
+ global pushed_back_line
+ if pushed_back_line != None:
+ line = pushed_back_line
+ pushed_back_line = None
+ return line
+ else:
+ try:
+ line = f.next()
+ return line
+ except:
+ return None
+
+
+def process_logfile(logfile, patterns):
+ global pushed_back_line
+
+ # Look at which file offset we have finished scanning
+ # the logfile last time. If we have never seen this file
+ # before, we set the offset to -1
+ offset, prev_inode = status.get(logfile, (-1, -1))
+ try:
+ fl = os.open(logfile, os.O_RDONLY)
+ inode = os.fstat(fl)[1] # 1 = st_ino
+ except:
+ if debug:
+ raise
+ print "[[[%s:cannotopen]]]" % logfile
+ return
+
+ print "[[[%s]]]" % logfile
+
+ # Seek to the current end in order to determine file size
+ current_end = os.lseek(fl, 0, 2) # os.SEEK_END not available in Python 2.4
+ status[logfile] = current_end, inode
+
+ # If we have never seen this file before, we just set the
+ # current pointer to the file end. We do not want to make
+ # a fuss about ancient log messages...
+ if offset == -1:
+ if not debug:
+ return
+ else:
+ offset = 0
+
+
+ # If the inode of the logfile has changed it has appearently
+ # been started from new (logfile rotation). At least we must
+ # assume that. In some rare cases (restore of a backup, etc)
+ # we are wrong and resend old log messages
+ if prev_inode >= 0 and inode != prev_inode:
+ offset = 0
+
+ # Our previously stored offset is the current end ->
+ # no new lines in this file
+ if offset == current_end:
+ return # nothing new
+
+ # If our offset is beyond the current end, the logfile has been
+ # truncated or wrapped while keeping the same inode. We assume
+ # that it contains all new data in that case and restart from
+ # offset 0.
+ if offset > current_end:
+ offset = 0
+
+ # now seek to offset where interesting data begins
+ os.lseek(fl, offset, 0) # os.SEEK_SET not available in Python 2.4
+ f = os.fdopen(fl)
+ worst = -1
+ outputtxt = ""
+ lines_parsed = 0
+ start_time = time.time()
+
+ while True:
+ line = next_line(f)
+ if line == None:
+ break # End of file
+
+ lines_parsed += 1
+ # Check if maximum number of new log messages is exceeded
+ if opt_maxlines != None and lines_parsed > opt_maxlines:
+ outputtxt += "%s Maximum number (%d) of new log messages exceeded.\n" % (
+ opt_overflow, opt_maxlines)
+ worst = max(worst, opt_overflow_level)
+ os.lseek(fl, 0, 2) # Seek to end of file, skip all other messages
+ break
+
+ # Check if maximum processing time (per file) is exceeded. Check only
+ # every 100'th line in order to save system calls
+ if opt_maxtime != None and lines_parsed % 100 == 10 \
+ and time.time() - start_time > opt_maxtime:
+ outputtxt += "%s Maximum parsing time (%.1f sec) of this log file exceeded.\n" % (
+ opt_overflow, opt_maxtime)
+ worst = max(worst, opt_overflow_level)
+ os.lseek(fl, 0, 2) # Seek to end of file, skip all other messages
+ break
+
+ level = "."
+ for lev, pattern, cont_patterns, replacements in patterns:
+ matches = pattern.search(line[:-1])
+ if matches:
+ level = lev
+ levelint = {'C': 2, 'W': 1, 'O': 0, 'I': -1, '.': -1}[lev]
+ worst = max(levelint, worst)
+
+ # Check for continuation lines
+ for cont_pattern in cont_patterns:
+ if type(cont_pattern) == int: # add that many lines
+ for x in range(cont_pattern):
+ cont_line = next_line(f)
+ if cont_line == None: # end of file
+ break
+ line = line[:-1] + "\1" + cont_line
+
+ else: # pattern is regex
+ while True:
+ cont_line = next_line(f)
+ if cont_line == None: # end of file
+ break
+ elif cont_pattern.search(cont_line[:-1]):
+ line = line[:-1] + "\1" + cont_line
+ else:
+ pushed_back_line = cont_line # sorry for stealing this line
+ break
+
+ # Replacement
+ for replace in replacements:
+ line = replace.replace('\\0', line) + "\n"
+ for nr, group in enumerate(matches.groups()):
+ line = line.replace('\\%d' % (nr+1), group)
+
+ break # matching rule found and executed
+
+ color = {'C': tty_red, 'W': tty_yellow, 'O': tty_green, 'I': tty_blue, '.': ''}[level]
+ if debug:
+ line = line.replace("\1", "\nCONT:")
+ if level == "I":
+ level = "."
+ if opt_nocontext and level == '.':
+ continue
+ outputtxt += "%s%s %s%s\n" % (color, level, line[:-1], tty_normal)
+
+ new_offset = os.lseek(fl, 0, 1) # os.SEEK_CUR not available in Python 2.4
+ status[logfile] = new_offset, inode
+
+ # output all lines if at least one warning, error or ok has been found
+ if worst > -1:
+ sys.stdout.write(outputtxt)
+ sys.stdout.flush()
+
+try:
+ config = read_config()
+except Exception, e:
+ if debug:
+ raise
+ print "CANNOT READ CONFIG FILE: %s" % e
+ sys.exit(1)
+
+# Simply ignore errors in the status file. In case of a corrupted status file we simply begin
+# with an empty status. That keeps the monitoring up and running - even if we might loose a
+# message in the extreme case of a corrupted status file.
+try:
+ status = read_status()
+except Exception, e:
+ status = {}
+
+
+# The filename line may contain options like 'maxlines=100' or 'maxtime=10'
+for filenames, patterns in config:
+ # Initialize options with default values
+ opt_maxlines = None
+ opt_maxtime = None
+ opt_regex = None
+ opt_overflow = 'C'
+ opt_overflow_level = 2
+ opt_nocontext = False
+ try:
+ options = [ o.split('=', 1) for o in filenames if '=' in o ]
+ for key, value in options:
+ if key == 'maxlines':
+ opt_maxlines = int(value)
+ elif key == 'maxtime':
+ opt_maxtime = float(value)
+ elif key == 'overflow':
+ if value not in [ 'C', 'I', 'W', 'O' ]:
+ raise Exception("Invalid value %s for overflow. Allowed are C, I, O and W" % value)
+ opt_overflow = value
+ opt_overflow_level = {'C':2, 'W':1, 'O':0, 'I':0}[value]
+ elif key == 'regex':
+ opt_regex = re.compile(value)
+ elif key == 'iregex':
+ opt_regex = re.compile(value, re.I)
+ elif key == 'nocontext':
+ opt_nocontext = True
+ else:
+ raise Exception("Invalid option %s" % key)
+ except Exception, e:
+ if debug:
+ raise
+ print "INVALID CONFIGURATION: %s" % e
+ sys.exit(1)
+
+
+ for glob in filenames:
+ if '=' in glob:
+ continue
+ logfiles = [ l.strip() for l in os.popen("ls %s 2>/dev/null" % glob).readlines() ]
+ if opt_regex:
+ logfiles = [ f for f in logfiles if opt_regex.search(f) ]
+ if len(logfiles) == 0:
+ print '[[[%s:missing]]]' % glob
+ else:
+ for logfile in logfiles:
+ process_logfile(logfile, patterns)
+
+if not debug:
+ save_status(status)
diff --git a/puppet/modules/site_check_mk/manifests/agent.pp b/puppet/modules/site_check_mk/manifests/agent.pp
new file mode 100644
index 00000000..589041eb
--- /dev/null
+++ b/puppet/modules/site_check_mk/manifests/agent.pp
@@ -0,0 +1,28 @@
+class site_check_mk::agent {
+
+ $ssh_hash = hiera('ssh')
+ $pubkey = $ssh_hash['authorized_keys']['monitor']['key']
+ $type = $ssh_hash['authorized_keys']['monitor']['type']
+
+ class { 'site_apt::preferences::check_mk': } ->
+
+ class { 'check_mk::agent':
+ agent_package_name => 'check-mk-agent',
+ agent_logwatch_package_name => 'check-mk-agent-logwatch',
+ method => 'ssh',
+ homedir => '/etc/nagios/check_mk',
+ register_agent => false
+ } ->
+
+ class { 'site_check_mk::agent::mrpe': } ->
+ class { 'site_check_mk::agent::logwatch': } ->
+
+ file {
+ [ '/srv/leap/nagios', '/srv/leap/nagios/plugins' ]:
+ ensure => directory;
+ '/usr/lib/check_mk_agent/local/run_node_tests.sh':
+ source => 'puppet:///modules/site_check_mk/agent/local_checks/all_hosts/run_node_tests.sh',
+ mode => '0755';
+ }
+
+}
diff --git a/puppet/modules/site_check_mk/manifests/agent/couchdb.pp b/puppet/modules/site_check_mk/manifests/agent/couchdb.pp
new file mode 100644
index 00000000..01e2b886
--- /dev/null
+++ b/puppet/modules/site_check_mk/manifests/agent/couchdb.pp
@@ -0,0 +1,36 @@
+class site_check_mk::agent::couchdb {
+
+ # watch logs
+ file { '/etc/check_mk/logwatch.d/bigcouch.cfg':
+ source => 'puppet:///modules/site_check_mk/agent/logwatch/bigcouch.cfg',
+ }
+ concat::fragment { 'syslog_couchdb':
+ source => 'puppet:///modules/site_check_mk/agent/logwatch/syslog/couchdb.cfg',
+ target => '/etc/check_mk/logwatch.d/syslog.cfg',
+ order => '02';
+ }
+
+
+ # check bigcouch processes
+ file_line {
+ 'Bigcouch_epmd_procs':
+ line => 'Bigcouch_epmd_procs /usr/lib/nagios/plugins/check_procs -w 1:1 -c 1:1 -a /opt/bigcouch/erts-5.9.1/bin/epmd',
+ path => '/etc/check_mk/mrpe.cfg';
+ 'Bigcouch_beam_procs':
+ line => 'Bigcouch_beam_procs /usr/lib/nagios/plugins/check_procs -w 1:1 -c 1:1 -a /opt/bigcouch/erts-5.9.1/bin/beam',
+ path => '/etc/check_mk/mrpe.cfg';
+ }
+
+ # check open files for bigcouch proc
+ include site_check_mk::agent::package::perl_plugin
+ file { '/srv/leap/nagios/plugins/check_unix_open_fds.pl':
+ source => 'puppet:///modules/site_check_mk/agent/nagios_plugins/check_unix_open_fds.pl',
+ mode => '0755'
+ }
+ file_line {
+ 'Bigcouch_open_files':
+ line => 'Bigcouch_open_files /srv/leap/nagios/plugins/check_unix_open_fds.pl -a beam -w 750,750 -c 1000,1000',
+ path => '/etc/check_mk/mrpe.cfg';
+ }
+
+}
diff --git a/puppet/modules/site_check_mk/manifests/agent/haproxy.pp b/puppet/modules/site_check_mk/manifests/agent/haproxy.pp
new file mode 100644
index 00000000..e7986db1
--- /dev/null
+++ b/puppet/modules/site_check_mk/manifests/agent/haproxy.pp
@@ -0,0 +1,12 @@
+class site_check_mk::agent::haproxy {
+
+ include site_check_mk::agent::package::nagios_plugins_contrib
+
+ # local nagios plugin checks via mrpe
+ file_line {
+ 'haproxy':
+ line => 'Haproxy /usr/lib/nagios/plugins/check_haproxy -u "http://localhost:8000/haproxy;csv"',
+ path => '/etc/check_mk/mrpe.cfg';
+ }
+
+}
diff --git a/puppet/modules/site_check_mk/manifests/agent/logwatch.pp b/puppet/modules/site_check_mk/manifests/agent/logwatch.pp
new file mode 100644
index 00000000..423cace2
--- /dev/null
+++ b/puppet/modules/site_check_mk/manifests/agent/logwatch.pp
@@ -0,0 +1,36 @@
+class site_check_mk::agent::logwatch {
+ # Deploy mk_logwatch 1.2.4 so we can split the config
+ # into multiple config files in /etc/check_mk/logwatch.d
+ # see https://leap.se/code/issues/5135
+
+ file { '/usr/lib/check_mk_agent/plugins/mk_logwatch':
+ source => 'puppet:///modules/site_check_mk/agent/plugins/mk_logwatch.1.2.4',
+ mode => '0755',
+ require => Package['check-mk-agent-logwatch']
+ }
+
+ # only config files that watch a distinct logfile should go in logwatch.d/
+ file { '/etc/check_mk/logwatch.d':
+ ensure => directory,
+ recurse => true,
+ purge => true,
+ require => Package['check-mk-agent-logwatch']
+ }
+
+ # service that share a common logfile (i.e. /var/log/syslog) need to get
+ # concanated in one file, otherwise the last file sourced will override
+ # the config before
+ # see mk_logwatch: "logwatch.cfg overwrites config files in logwatch.d",
+ # https://leap.se/code/issues/5155
+
+ # first, we need to deploy a custom logwatch.cfg that doesn't include
+ # a section about /var/log/syslog
+
+ file { '/etc/check_mk/logwatch.cfg':
+ source => 'puppet:///modules/site_check_mk/agent/logwatch/logwatch.cfg',
+ require => Package['check_mk-agent-logwatch']
+ }
+
+ include concat::setup
+ include site_check_mk::agent::logwatch::syslog
+}
diff --git a/puppet/modules/site_check_mk/manifests/agent/logwatch/syslog.pp b/puppet/modules/site_check_mk/manifests/agent/logwatch/syslog.pp
new file mode 100644
index 00000000..c927780d
--- /dev/null
+++ b/puppet/modules/site_check_mk/manifests/agent/logwatch/syslog.pp
@@ -0,0 +1,18 @@
+class site_check_mk::agent::logwatch::syslog {
+
+ concat { '/etc/check_mk/logwatch.d/syslog.cfg':
+ warn => true
+ }
+
+ concat::fragment { 'syslog_header':
+ source => 'puppet:///modules/site_check_mk/agent/logwatch/syslog_header.cfg',
+ target => '/etc/check_mk/logwatch.d/syslog.cfg',
+ order => '01';
+ }
+ concat::fragment { 'syslog_tail':
+ source => 'puppet:///modules/site_check_mk/agent/logwatch/syslog_tail.cfg',
+ target => '/etc/check_mk/logwatch.d/syslog.cfg',
+ order => '99';
+ }
+
+}
diff --git a/puppet/modules/site_check_mk/manifests/agent/mrpe.pp b/puppet/modules/site_check_mk/manifests/agent/mrpe.pp
new file mode 100644
index 00000000..6921574f
--- /dev/null
+++ b/puppet/modules/site_check_mk/manifests/agent/mrpe.pp
@@ -0,0 +1,18 @@
+class site_check_mk::agent::mrpe {
+ # check_mk can use standard nagios plugins using
+ # a wrapper called mrpe
+ # see http://mathias-kettner.de/checkmk_mrpe.html
+
+ package { 'nagios-plugins-basic':
+ ensure => latest,
+ }
+
+ file { '/etc/check_mk/mrpe.cfg':
+ ensure => present,
+ require => Package['check-mk-agent']
+ } ->
+ file_line { 'Apt':
+ line => 'APT /usr/lib/nagios/plugins/check_apt',
+ path => '/etc/check_mk/mrpe.cfg',
+ }
+}
diff --git a/puppet/modules/site_check_mk/manifests/agent/mx.pp b/puppet/modules/site_check_mk/manifests/agent/mx.pp
new file mode 100644
index 00000000..35a4e9a5
--- /dev/null
+++ b/puppet/modules/site_check_mk/manifests/agent/mx.pp
@@ -0,0 +1,23 @@
+class site_check_mk::agent::mx {
+
+ # watch logs
+ file { '/etc/check_mk/logwatch.d/leap_mx.cfg':
+ source => 'puppet:///modules/site_check_mk/agent/logwatch/leap_mx.cfg',
+ }
+
+ # local nagios plugin checks via mrpe
+ file_line {
+ 'Leap_MX_Procs':
+ line => 'Leap_MX_Procs /usr/lib/nagios/plugins/check_procs -w 1:1 -c 1:1 -a leap_mx',
+ path => '/etc/check_mk/mrpe.cfg';
+ }
+
+
+ # check stale files in queue dir
+ file { '/usr/lib/check_mk_agent/local/check_leap_mx.sh':
+ source => 'puppet:///modules/site_check_mk/agent/local_checks/mx/check_leap_mx.sh',
+ mode => '0755',
+ require => Package['check_mk-agent']
+ }
+
+}
diff --git a/puppet/modules/site_check_mk/manifests/agent/openvpn.pp b/puppet/modules/site_check_mk/manifests/agent/openvpn.pp
new file mode 100644
index 00000000..919a408d
--- /dev/null
+++ b/puppet/modules/site_check_mk/manifests/agent/openvpn.pp
@@ -0,0 +1,10 @@
+class site_check_mk::agent::openvpn {
+
+ # check syslog
+ concat::fragment { 'syslog_openpvn':
+ source => 'puppet:///modules/site_check_mk/agent/logwatch/syslog/openvpn.cfg',
+ target => '/etc/check_mk/logwatch.d/syslog.cfg',
+ order => '02';
+ }
+
+}
diff --git a/puppet/modules/site_check_mk/manifests/agent/package/nagios_plugins_contrib.pp b/puppet/modules/site_check_mk/manifests/agent/package/nagios_plugins_contrib.pp
new file mode 100644
index 00000000..95a60d17
--- /dev/null
+++ b/puppet/modules/site_check_mk/manifests/agent/package/nagios_plugins_contrib.pp
@@ -0,0 +1,5 @@
+class site_check_mk::agent::package::nagios_plugins_contrib {
+ package { 'nagios-plugins-contrib':
+ ensure => installed,
+ }
+}
diff --git a/puppet/modules/site_check_mk/manifests/agent/package/perl_plugin.pp b/puppet/modules/site_check_mk/manifests/agent/package/perl_plugin.pp
new file mode 100644
index 00000000..4feda375
--- /dev/null
+++ b/puppet/modules/site_check_mk/manifests/agent/package/perl_plugin.pp
@@ -0,0 +1,5 @@
+class site_check_mk::agent::package::perl_plugin {
+ package { 'libnagios-plugin-perl':
+ ensure => installed,
+ }
+}
diff --git a/puppet/modules/site_check_mk/manifests/agent/soledad.pp b/puppet/modules/site_check_mk/manifests/agent/soledad.pp
new file mode 100644
index 00000000..cbae81fe
--- /dev/null
+++ b/puppet/modules/site_check_mk/manifests/agent/soledad.pp
@@ -0,0 +1,14 @@
+class site_check_mk::agent::soledad {
+
+ file { '/etc/check_mk/logwatch.d/soledad.cfg':
+ source => 'puppet:///modules/site_check_mk/agent/logwatch/soledad.cfg',
+ }
+
+ # local nagios plugin checks via mrpe
+ file_line {
+ 'Soledad_Procs':
+ line => 'Soledad_Procs /usr/lib/nagios/plugins/check_procs -w 1:1 -c 1:1 -a soledad',
+ path => '/etc/check_mk/mrpe.cfg';
+ }
+
+}
diff --git a/puppet/modules/site_check_mk/manifests/agent/stunnel.pp b/puppet/modules/site_check_mk/manifests/agent/stunnel.pp
new file mode 100644
index 00000000..64022824
--- /dev/null
+++ b/puppet/modules/site_check_mk/manifests/agent/stunnel.pp
@@ -0,0 +1,9 @@
+class site_check_mk::agent::stunnel {
+
+ concat::fragment { 'syslog_stunnel':
+ source => 'puppet:///modules/site_check_mk/agent/logwatch/syslog/stunnel.cfg',
+ target => '/etc/check_mk/logwatch.d/syslog.cfg',
+ order => '02';
+ }
+
+}
diff --git a/puppet/modules/site_check_mk/manifests/agent/tapicero.pp b/puppet/modules/site_check_mk/manifests/agent/tapicero.pp
new file mode 100644
index 00000000..369ed00b
--- /dev/null
+++ b/puppet/modules/site_check_mk/manifests/agent/tapicero.pp
@@ -0,0 +1,16 @@
+class site_check_mk::agent::tapicero {
+
+ concat::fragment { 'syslog_tapicero':
+ source => 'puppet:///modules/site_check_mk/agent/logwatch/syslog/tapicero.cfg',
+ target => '/etc/check_mk/logwatch.d/syslog.cfg',
+ order => '02';
+ }
+
+ # local nagios plugin checks via mrpe
+ file_line {
+ 'Tapicero_Procs':
+ line => 'Tapicero_Procs /usr/lib/nagios/plugins/check_procs -w 1:1 -c 1:1 -a tapicero',
+ path => '/etc/check_mk/mrpe.cfg';
+ }
+
+}
diff --git a/puppet/modules/site_check_mk/manifests/agent/webapp.pp b/puppet/modules/site_check_mk/manifests/agent/webapp.pp
new file mode 100644
index 00000000..64f5ea6d
--- /dev/null
+++ b/puppet/modules/site_check_mk/manifests/agent/webapp.pp
@@ -0,0 +1,26 @@
+class site_check_mk::agent::webapp {
+
+ # check webapp login + soledad sync
+ package { [ 'python-srp', 'python-requests', 'python-yaml', 'python-u1db' ]:
+ ensure => installed
+ }
+ file { '/usr/lib/check_mk_agent/local/nagios-webapp_login.py':
+ ensure => link,
+ target => '/srv/leap/webapp/test/nagios/webapp_login.py',
+ require => Package['check_mk-agent']
+ }
+ file { '/usr/lib/check_mk_agent/local/soledad_sync.py':
+ ensure => link,
+ target => '/srv/leap/webapp/test/nagios/soledad_sync.py',
+ require => Package['check_mk-agent']
+ }
+
+
+ # check syslog
+ concat::fragment { 'syslog_webapp':
+ source => 'puppet:///modules/site_check_mk/agent/logwatch/syslog/webapp.cfg',
+ target => '/etc/check_mk/logwatch.d/syslog.cfg',
+ order => '02';
+ }
+
+}
diff --git a/puppet/modules/site_check_mk/manifests/server.pp b/puppet/modules/site_check_mk/manifests/server.pp
new file mode 100644
index 00000000..e544ef0d
--- /dev/null
+++ b/puppet/modules/site_check_mk/manifests/server.pp
@@ -0,0 +1,64 @@
+class site_check_mk::server {
+
+ $ssh_hash = hiera('ssh')
+ $pubkey = $ssh_hash['authorized_keys']['monitor']['key']
+ $type = $ssh_hash['authorized_keys']['monitor']['type']
+ $seckey = $ssh_hash['monitor']['private_key']
+
+ $nagios_hiera = hiera_hash('nagios')
+ $nagios_hosts = $nagios_hiera['hosts']
+
+ $hosts = hiera_hash('hosts')
+ $all_hosts = inline_template ('<% @hosts.keys.sort.each do |key| -%>"<%= @hosts[key]["domain_internal"] %>", <% end -%>')
+
+ package { 'check-mk-server':
+ ensure => installed,
+ }
+
+ # override paths to use the system check_mk rather than OMD
+ class { 'check_mk::config':
+ site => '',
+ etc_dir => '/etc',
+ nagios_subdir => 'nagios3',
+ bin_dir => '/usr/bin',
+ host_groups => undef,
+ use_storedconfigs => false,
+ require => Package['check-mk-server']
+ }
+
+ Exec['check_mk-reload'] ->
+ Exec['check_mk-refresh-inventory-daily'] ->
+ Service['nagios']
+
+ file {
+ '/etc/check_mk/conf.d/use_ssh.mk':
+ content => template('site_check_mk/use_ssh.mk'),
+ notify => Exec['check_mk-refresh'],
+ require => Package['check-mk-server'];
+ '/etc/check_mk/all_hosts_static':
+ content => $all_hosts,
+ notify => Exec['check_mk-refresh'],
+ require => Package['check-mk-server'];
+ '/etc/check_mk/.ssh':
+ ensure => directory,
+ require => Package['check-mk-server'];
+ '/etc/check_mk/.ssh/id_rsa':
+ content => $seckey,
+ owner => 'nagios',
+ mode => '0600',
+ require => Package['check-mk-server'];
+ '/etc/check_mk/.ssh/id_rsa.pub':
+ content => "${type} ${pubkey} monitor",
+ owner => 'nagios',
+ mode => '0644',
+ require => Package['check-mk-server'];
+ # check_icmp must be suid root or called by sudo
+ # see https://leap.se/code/issues/5171
+ '/usr/lib/nagios/plugins/check_icmp':
+ mode => '4755',
+ require => Package['nagios-plugins-basic'];
+ }
+
+
+ include check_mk::agent::local_checks
+}
diff --git a/puppet/modules/site_check_mk/templates/use_ssh.mk b/puppet/modules/site_check_mk/templates/use_ssh.mk
new file mode 100644
index 00000000..0bebebcf
--- /dev/null
+++ b/puppet/modules/site_check_mk/templates/use_ssh.mk
@@ -0,0 +1,6 @@
+# http://mathias-kettner.de/checkmk_datasource_programs.html
+datasource_programs = [
+<% nagios_hosts.sort.each do |name,config| %>
+ ( "ssh -l root -i /etc/check_mk/.ssh/id_rsa -p <%=config['ssh_port']%> <%=config['domain_internal']%> check_mk_agent", [ "<%=config['domain_internal']%>" ], ),<%- end -%>
+
+]
diff --git a/puppet/modules/site_config/manifests/caching_resolver.pp b/puppet/modules/site_config/manifests/caching_resolver.pp
index 922c394f..3d7b9206 100644
--- a/puppet/modules/site_config/manifests/caching_resolver.pp
+++ b/puppet/modules/site_config/manifests/caching_resolver.pp
@@ -1,4 +1,5 @@
class site_config::caching_resolver {
+ tag 'leap_base'
# Setup a conf.d directory to place additional unbound configuration files.
# There must be at least one file in the directory, or unbound will not start,
diff --git a/puppet/modules/site_config/manifests/default.pp b/puppet/modules/site_config/manifests/default.pp
index 00eee9d0..7e421a21 100644
--- a/puppet/modules/site_config/manifests/default.pp
+++ b/puppet/modules/site_config/manifests/default.pp
@@ -2,19 +2,27 @@ class site_config::default {
tag 'leap_base'
$domain_hash = hiera('domain')
+ include site_config::params
- include concat::setup
+ # make sure apt is updated before any packages are installed
+ include apt::update
+ Package { require => Exec['apt_updated'] }
+
+ include site_config::slow
# default class, used by all hosts
include lsb, git
- # configure apt
- include site_apt
+ # configure sysctl parameters
+ include site_config::sysctl
# configure ssh and include ssh-keys
include site_config::sshd
+ # include classes for special environments
+ # i.e. openstack/aws nodes, vagrant nodes
+
# fix dhclient from changing resolver information
if $::ec2_instance_id {
include site_config::dhclient
@@ -26,13 +34,11 @@ class site_config::default {
# configure caching, local resolver
include site_config::caching_resolver
- # configure /etc/hosts
- class { 'site_config::hosts':
- stage => setup,
- }
+ # install/configure syslog
+ include site_config::syslog
# install/remove base packages
- include site_config::base_packages
+ include site_config::packages::base
# include basic shorewall config
include site_shorewall::defaults
@@ -41,4 +47,19 @@ class site_config::default {
# include basic shell config
include site_config::shell
+
+ # set up core leap files and directories
+ include site_config::files
+
+ if $::services !~ /\bmx\b/ {
+ include site_postfix::satellite
+ }
+
+ # if class site_custom exists, include it.
+ # possibility for users to define custom puppet recipes
+ if defined( '::site_custom') {
+ include ::site_custom
+ }
+
+ include site_check_mk::agent
}
diff --git a/puppet/modules/site_config/manifests/files.pp b/puppet/modules/site_config/manifests/files.pp
new file mode 100644
index 00000000..684d3ad0
--- /dev/null
+++ b/puppet/modules/site_config/manifests/files.pp
@@ -0,0 +1,23 @@
+class site_config::files {
+
+ file {
+ '/srv/leap':
+ ensure => directory,
+ owner => 'root',
+ group => 'root',
+ mode => '0711';
+
+ '/var/lib/leap':
+ ensure => directory,
+ owner => root,
+ group => 'root',
+ mode => '0755';
+
+ '/var/log/leap':
+ ensure => directory,
+ owner => root,
+ group => 'adm',
+ mode => '0750';
+ }
+
+}
diff --git a/puppet/modules/site_config/manifests/hosts.pp b/puppet/modules/site_config/manifests/hosts.pp
index ccedf036..e5d4dd70 100644
--- a/puppet/modules/site_config/manifests/hosts.pp
+++ b/puppet/modules/site_config/manifests/hosts.pp
@@ -1,8 +1,9 @@
class site_config::hosts() {
- $hosts = hiera('hosts','')
+ $hosts = hiera('hosts', false)
$hostname = hiera('name')
$domain_hash = hiera('domain')
$domain_public = $domain_hash['full_suffix']
+ $api = hiera('api', '')
file { '/etc/hostname':
ensure => present,
diff --git a/puppet/modules/site_config/manifests/initial_firewall.pp b/puppet/modules/site_config/manifests/initial_firewall.pp
new file mode 100644
index 00000000..51cceb31
--- /dev/null
+++ b/puppet/modules/site_config/manifests/initial_firewall.pp
@@ -0,0 +1,62 @@
+class site_config::initial_firewall {
+
+ # This class is intended to setup an initial firewall, before shorewall is
+ # configured. The purpose of this is for the rare case where shorewall fails
+ # to start, we should not expose services to the public.
+
+ $ssh_config = hiera('ssh')
+ $ssh_port = $ssh_config['port']
+
+ package { 'iptables':
+ ensure => present
+ }
+
+ file {
+ # This firewall enables ssh access, dns lookups and web lookups (for
+ # package installation) but otherwise restricts all outgoing and incoming
+ # ports
+ '/etc/network/ipv4firewall_up.rules':
+ content => template('site_config/ipv4firewall_up.rules.erb'),
+ owner => root,
+ group => 0,
+ mode => '0644';
+
+ # This firewall denys all ipv6 traffic - we will need to change this
+ # when we begin to support ipv6
+ '/etc/network/ipv6firewall_up.rules':
+ content => template('site_config/ipv6firewall_up.rules.erb'),
+ owner => root,
+ group => 0,
+ mode => '0644';
+
+ # Run the iptables-restore in if-pre-up so that the network is locked down
+ # until the correct interfaces and ips are connected
+ '/etc/network/if-pre-up.d/ipv4tables':
+ content => "#!/bin/sh\n/sbin/iptables-restore < /etc/network/ipv4firewall_up.rules\n",
+ owner => root,
+ group => 0,
+ mode => '0744';
+
+ # Same as above for IPv6
+ '/etc/network/if-pre-up.d/ipv6tables':
+ content => "#!/bin/sh\n/sbin/ip6tables-restore < /etc/network/ipv6firewall_up.rules\n",
+ owner => root,
+ group => 0,
+ mode => '0744';
+ }
+
+ # Immediately setup these firewall rules, but only if shorewall is not running
+ exec {
+ 'default_ipv4_firewall':
+ command => '/sbin/iptables-restore < /etc/network/ipv4firewall_up.rules',
+ logoutput => true,
+ unless => 'test -x /etc/init.d/shorewall && /etc/init.d/shorewall status',
+ require => File['/etc/network/ipv4firewall_up.rules'];
+
+ 'default_ipv6_firewall':
+ command => '/sbin/ip6tables-restore < /etc/network/ipv6firewall_up.rules',
+ logoutput => true,
+ unless => 'test -x /etc/init.d/shorewall && /etc/init.d/shorewall status',
+ require => File['/etc/network/ipv6firewall_up.rules'];
+ }
+}
diff --git a/puppet/modules/site_config/manifests/base_packages.pp b/puppet/modules/site_config/manifests/packages/base.pp
index 3d40f7a2..ae47963c 100644
--- a/puppet/modules/site_config/manifests/base_packages.pp
+++ b/puppet/modules/site_config/manifests/packages/base.pp
@@ -1,12 +1,13 @@
-class site_config::base_packages {
+class site_config::packages::base {
+
# base set of packages that we want to have installed everywhere
- package { [ 'etckeeper', 'screen', 'less' ]:
+ package { [ 'etckeeper', 'screen', 'less', 'ntp' ]:
ensure => installed,
}
# base set of packages that we want to remove everywhere
- package { [ 'acpi', 'acpid', 'acpi-support-base', 'eject', 'ftp',
+ package { [ 'acpi', 'acpid', 'acpi-support-base', 'eject', 'ftp', 'fontconfig-config',
'laptop-detect', 'lpr', 'nfs-common', 'nfs-kernel-server',
'portmap', 'pppconfig', 'pppoe', 'pump', 'qstat', 'rpcbind',
'samba-common', 'samba-common-bin', 'smbclient', 'tcl8.5',
@@ -14,15 +15,4 @@ class site_config::base_packages {
'x11-utils', 'xterm' ]:
ensure => absent;
}
-
- if $::virtual == 'virtualbox' {
- $virtualbox_ensure = present
- } else {
- $virtualbox_ensure = absent
- }
-
- package { [ 'build-essential', 'fontconfig-config', 'g++', 'g++-4.7', 'gcc',
- 'gcc-4.6', 'gcc-4.7', 'cpp', 'cpp-4.6', 'cpp-4.7', 'libc6-dev' ]:
- ensure => $virtualbox_ensure
- }
}
diff --git a/puppet/modules/site_config/manifests/packages/build_essential.pp b/puppet/modules/site_config/manifests/packages/build_essential.pp
new file mode 100644
index 00000000..7dfb8b03
--- /dev/null
+++ b/puppet/modules/site_config/manifests/packages/build_essential.pp
@@ -0,0 +1,11 @@
+#
+# include this whenever you want to ensure build-essential package and related compilers are installed.
+#
+class site_config::packages::build_essential {
+ if !defined(Package['build-essential']) {
+ package {
+ ['build-essential', 'g++', 'g++-4.7', 'gcc', 'gcc-4.6', 'gcc-4.7', 'cpp', 'cpp-4.6', 'cpp-4.7', 'libc6-dev']:
+ ensure => present
+ }
+ }
+} \ No newline at end of file
diff --git a/puppet/modules/site_config/manifests/packages/gnutls.pp b/puppet/modules/site_config/manifests/packages/gnutls.pp
new file mode 100644
index 00000000..b1f17480
--- /dev/null
+++ b/puppet/modules/site_config/manifests/packages/gnutls.pp
@@ -0,0 +1,5 @@
+class site_config::packages::gnutls {
+
+ package { 'gnutls-bin': ensure => installed }
+
+}
diff --git a/puppet/modules/site_config/manifests/packages/uninstall.pp b/puppet/modules/site_config/manifests/packages/uninstall.pp
new file mode 100644
index 00000000..12f527d9
--- /dev/null
+++ b/puppet/modules/site_config/manifests/packages/uninstall.pp
@@ -0,0 +1,16 @@
+#
+# Uninstall build-essential and compilers, unless they have been explicitly installed elsewhere.
+#
+class site_config::packages::uninstall {
+ tag 'leap_base'
+
+ # generally, dev packages are needed for installing ruby gems with native extensions.
+ # (nickserver, webapp, etc)
+
+ if !defined(Package['build-essential']) {
+ package {
+ ['build-essential', 'g++', 'g++-4.7', 'gcc', 'gcc-4.6', 'gcc-4.7', 'cpp', 'cpp-4.6', 'cpp-4.7', 'libc6-dev']:
+ ensure => purged
+ }
+ }
+} \ No newline at end of file
diff --git a/puppet/modules/site_config/manifests/params.pp b/puppet/modules/site_config/manifests/params.pp
index 237ee454..012b3ce0 100644
--- a/puppet/modules/site_config/manifests/params.pp
+++ b/puppet/modules/site_config/manifests/params.pp
@@ -3,9 +3,12 @@ class site_config::params {
$ip_address = hiera('ip_address')
$ip_address_interface = getvar("interface_${ip_address}")
$ec2_local_ipv4_interface = getvar("interface_${::ec2_local_ipv4}")
+ $environment = hiera('environment', undef)
- if $::virtual == 'virtualbox' {
- $interface = [ 'eth0', 'eth1' ]
+
+ if $environment == 'local' {
+ $interface = 'eth1'
+ include site_config::packages::build_essential
}
elsif hiera('interface','') != '' {
$interface = hiera('interface')
@@ -17,9 +20,16 @@ class site_config::params {
$interface = $ec2_local_ipv4_interface
}
elsif $::interfaces =~ /eth0/ {
- $interface = eth0
+ $interface = 'eth0'
}
else {
fail("unable to determine a valid interface, please set a valid interface for this node in nodes/${::hostname}.json")
}
+
+ $ca_name = 'leap_ca'
+ $client_ca_name = 'leap_client_ca'
+ $ca_bundle_name = 'leap_ca_bundle'
+ $cert_name = 'leap'
+ $commercial_ca_name = 'leap_commercial_ca'
+ $commercial_cert_name = 'leap_commercial'
}
diff --git a/puppet/modules/site_config/manifests/resolvconf.pp b/puppet/modules/site_config/manifests/resolvconf.pp
index 271c5043..05990c67 100644
--- a/puppet/modules/site_config/manifests/resolvconf.pp
+++ b/puppet/modules/site_config/manifests/resolvconf.pp
@@ -2,12 +2,13 @@ class site_config::resolvconf {
$domain_public = $site_config::default::domain_hash['full_suffix']
- # 127.0.0.1: caching-only local bind
- # 87.118.100.175: http://server.privacyfoundation.de
- # 62.141.58.13: http://www.privacyfoundation.ch/de/service/server.html
class { '::resolvconf':
domain => $domain_public,
search => $domain_public,
- nameservers => [ '127.0.0.1', '87.118.100.175', '62.141.58.13' ]
+ nameservers => [
+ '127.0.0.1 # local caching-only, unbound',
+ '85.214.20.141 # Digitalcourage, a german privacy organisation: (https://en.wikipedia.org/wiki/Digitalcourage)',
+ '77.109.138.45 # Swiss privacy Foundation (http://www.privacyfoundation.ch/de/service/server.html)'
+ ]
}
}
diff --git a/puppet/modules/site_config/manifests/ruby/dev.pp b/puppet/modules/site_config/manifests/ruby/dev.pp
new file mode 100644
index 00000000..3ea6ca96
--- /dev/null
+++ b/puppet/modules/site_config/manifests/ruby/dev.pp
@@ -0,0 +1,8 @@
+class site_config::ruby::dev inherits site_config::ruby {
+ Class['::ruby'] {
+ ruby_version => '1.9.3',
+ install_dev => true
+ }
+ # building gems locally probably requires build-essential and gcc:
+ include site_config::packages::build_essential
+}
diff --git a/puppet/modules/site_config/manifests/setup.pp b/puppet/modules/site_config/manifests/setup.pp
new file mode 100644
index 00000000..6d89be86
--- /dev/null
+++ b/puppet/modules/site_config/manifests/setup.pp
@@ -0,0 +1,50 @@
+class site_config::setup {
+ tag 'leap_base'
+
+ #
+ # this is applied before each run of site.pp
+ #
+ #$services = ''
+
+ Exec { path => '/usr/bin:/usr/sbin/:/bin:/sbin:/usr/local/bin:/usr/local/sbin' }
+
+ include site_config::params
+
+ include concat::setup
+ include stdlib
+
+ # configure /etc/hosts
+ class { 'site_config::hosts':
+ stage => setup,
+ }
+
+ include site_config::initial_firewall
+
+ include site_apt
+
+ package { 'facter':
+ ensure => latest,
+ require => Exec['refresh_apt']
+ }
+
+ # if squid_deb_proxy_client is set to true, install and configure
+ # squid_deb_proxy_client for apt caching
+ if hiera('squid_deb_proxy_client', false) {
+ include site_squid_deb_proxy::client
+ }
+
+ # shorewall is installed/half-configured during setup.pp (Bug #3871)
+ # we need to include shorewall::interface{eth0} in setup.pp so
+ # packages can be installed during main puppetrun, even before shorewall
+ # is configured completly
+ if ( $::site_config::params::environment == 'local' ) {
+ include site_config::vagrant
+ }
+
+ # if class site_custom::setup exists, include it.
+ # possibility for users to define custom puppet recipes
+ if defined( '::site_custom::setup') {
+ include ::site_custom::setup
+ }
+
+}
diff --git a/puppet/modules/site_config/manifests/sysctl.pp b/puppet/modules/site_config/manifests/sysctl.pp
new file mode 100644
index 00000000..99f75123
--- /dev/null
+++ b/puppet/modules/site_config/manifests/sysctl.pp
@@ -0,0 +1,8 @@
+class site_config::sysctl {
+
+ sysctl::config {
+ 'net.ipv4.ip_nonlocal_bind':
+ value => 1,
+ comment => 'Allow applications to bind to an address when link is down (see https://leap.se/code/issues/4506)'
+ }
+}
diff --git a/puppet/modules/site_config/manifests/syslog.pp b/puppet/modules/site_config/manifests/syslog.pp
new file mode 100644
index 00000000..d3abeca1
--- /dev/null
+++ b/puppet/modules/site_config/manifests/syslog.pp
@@ -0,0 +1,28 @@
+class site_config::syslog {
+
+ # we need to pull in rsyslog from the leap repository until it is availbale in
+ # wheezy-backports
+ apt::preferences_snippet { 'fixed_rsyslog_anon_package':
+ package => 'rsyslog*',
+ priority => '999',
+ pin => 'release o=leap.se',
+ before => Class['rsyslog::install']
+ }
+
+ apt::preferences_snippet { 'rsyslog_anon_depends':
+ package => 'libestr0 librelp0',
+ priority => '999',
+ pin => 'release a=wheezy-backports',
+ before => Class['rsyslog::install']
+ }
+
+ class { 'rsyslog::client':
+ log_remote => false,
+ log_local => true
+ }
+
+ rsyslog::snippet { '00-anonymize_logs':
+ content => '$ModLoad mmanon
+action(type="mmanon" ipv4.bits="32" mode="rewrite")'
+ }
+}
diff --git a/puppet/modules/site_config/manifests/vagrant.pp b/puppet/modules/site_config/manifests/vagrant.pp
new file mode 100644
index 00000000..8f50b305
--- /dev/null
+++ b/puppet/modules/site_config/manifests/vagrant.pp
@@ -0,0 +1,11 @@
+class site_config::vagrant {
+ # class for vagrant nodes
+
+ include site_shorewall::defaults
+ # eth0 on vagrant nodes is the uplink if
+ shorewall::interface { 'eth0':
+ zone => 'net',
+ options => 'tcpflags,blacklist,nosmurfs';
+ }
+
+}
diff --git a/puppet/modules/site_config/manifests/x509/ca.pp b/puppet/modules/site_config/manifests/x509/ca.pp
new file mode 100644
index 00000000..b16d0eeb
--- /dev/null
+++ b/puppet/modules/site_config/manifests/x509/ca.pp
@@ -0,0 +1,9 @@
+class site_config::x509::ca {
+
+ $x509 = hiera('x509')
+ $ca = $x509['ca_cert']
+
+ x509::ca { $site_config::params::ca_name:
+ content => $ca
+ }
+}
diff --git a/puppet/modules/site_config/manifests/x509/ca_bundle.pp b/puppet/modules/site_config/manifests/x509/ca_bundle.pp
new file mode 100644
index 00000000..4cbe574a
--- /dev/null
+++ b/puppet/modules/site_config/manifests/x509/ca_bundle.pp
@@ -0,0 +1,16 @@
+class site_config::x509::ca_bundle {
+
+ # CA bundle -- we want to have the possibility of allowing multiple CAs.
+ # For now, the reason is to transition to using client CA. In the future,
+ # we will want to be able to smoothly phase out one CA and phase in another.
+ # I tried "--capath" for this, but it did not work.
+
+
+ $x509 = hiera('x509')
+ $ca = $x509['ca_cert']
+ $client_ca = $x509['client_ca_cert']
+
+ x509::ca { $site_config::params::ca_bundle_name:
+ content => "${ca}${client_ca}"
+ }
+}
diff --git a/puppet/modules/site_config/manifests/x509/cert.pp b/puppet/modules/site_config/manifests/x509/cert.pp
new file mode 100644
index 00000000..7ed42959
--- /dev/null
+++ b/puppet/modules/site_config/manifests/x509/cert.pp
@@ -0,0 +1,10 @@
+class site_config::x509::cert {
+
+ $x509 = hiera('x509')
+ $cert = $x509['cert']
+
+ x509::cert { $site_config::params::cert_name:
+ content => $cert
+ }
+
+}
diff --git a/puppet/modules/site_config/manifests/x509/client_ca/ca.pp b/puppet/modules/site_config/manifests/x509/client_ca/ca.pp
new file mode 100644
index 00000000..0f313898
--- /dev/null
+++ b/puppet/modules/site_config/manifests/x509/client_ca/ca.pp
@@ -0,0 +1,14 @@
+class site_config::x509::client_ca::ca {
+
+ ##
+ ## This is for the special CA that is used exclusively for generating
+ ## client certificates by the webapp.
+ ##
+
+ $x509 = hiera('x509')
+ $cert = $x509['client_ca_cert']
+
+ x509::ca { $site_config::params::client_ca_name:
+ content => $cert
+ }
+}
diff --git a/puppet/modules/site_config/manifests/x509/client_ca/key.pp b/puppet/modules/site_config/manifests/x509/client_ca/key.pp
new file mode 100644
index 00000000..f9ef3f52
--- /dev/null
+++ b/puppet/modules/site_config/manifests/x509/client_ca/key.pp
@@ -0,0 +1,14 @@
+class site_config::x509::client_ca::key {
+
+ ##
+ ## This is for the special CA that is used exclusively for generating
+ ## client certificates by the webapp.
+ ##
+
+ $x509 = hiera('x509')
+ $key = $x509['client_ca_key']
+
+ x509::key { $site_config::params::client_ca_name:
+ content => $key
+ }
+}
diff --git a/puppet/modules/site_config/manifests/x509/commercial/ca.pp b/puppet/modules/site_config/manifests/x509/commercial/ca.pp
new file mode 100644
index 00000000..8f35759f
--- /dev/null
+++ b/puppet/modules/site_config/manifests/x509/commercial/ca.pp
@@ -0,0 +1,9 @@
+class site_config::x509::commercial::ca {
+
+ $x509 = hiera('x509')
+ $ca = $x509['commercial_ca_cert']
+
+ x509::ca { $site_config::params::commercial_ca_name:
+ content => $ca
+ }
+}
diff --git a/puppet/modules/site_config/manifests/x509/commercial/cert.pp b/puppet/modules/site_config/manifests/x509/commercial/cert.pp
new file mode 100644
index 00000000..0c71a705
--- /dev/null
+++ b/puppet/modules/site_config/manifests/x509/commercial/cert.pp
@@ -0,0 +1,10 @@
+class site_config::x509::commercial::cert {
+
+ $x509 = hiera('x509')
+ $cert = $x509['commercial_cert']
+
+ x509::cert { $site_config::params::commercial_cert_name:
+ content => $cert
+ }
+
+}
diff --git a/puppet/modules/site_config/manifests/x509/commercial/key.pp b/puppet/modules/site_config/manifests/x509/commercial/key.pp
new file mode 100644
index 00000000..d32e85ef
--- /dev/null
+++ b/puppet/modules/site_config/manifests/x509/commercial/key.pp
@@ -0,0 +1,9 @@
+class site_config::x509::commercial::key {
+
+ $x509 = hiera('x509')
+ $key = $x509['commercial_key']
+
+ x509::key { $site_config::params::commercial_cert_name:
+ content => $key
+ }
+}
diff --git a/puppet/modules/site_config/manifests/x509/key.pp b/puppet/modules/site_config/manifests/x509/key.pp
new file mode 100644
index 00000000..32b59726
--- /dev/null
+++ b/puppet/modules/site_config/manifests/x509/key.pp
@@ -0,0 +1,9 @@
+class site_config::x509::key {
+
+ $x509 = hiera('x509')
+ $key = $x509['key']
+
+ x509::key { $site_config::params::cert_name:
+ content => $key
+ }
+}
diff --git a/puppet/modules/site_config/templates/hosts b/puppet/modules/site_config/templates/hosts
index 2c784b05..bfcabaa5 100644
--- a/puppet/modules/site_config/templates/hosts
+++ b/puppet/modules/site_config/templates/hosts
@@ -1,10 +1,12 @@
# This file is managed by puppet, any changes will be overwritten!
127.0.0.1 localhost
-127.0.1.1 <%= @hostname %>.<%= @domain_public %> <%= @hostname %>
+127.0.1.1 <%= @hostname %>.<%= @domain_public %> <%= @hostname %> <% if (defined? @services) and (@services.include? 'webapp') -%><%= @domain_public %> <%= @api['domain'] %><% end -%>
+
<%- if @hosts then -%>
-<% @hosts.each do |name, props| -%>
+<% @hosts.keys.sort.each do |name| -%>
+<%- props = @hosts[name] -%>
<%= props["ip_address"] %> <%= props["domain_full"] %> <%= props["domain_internal"] %> <%= name %>
<% end -%>
<% end -%>
diff --git a/puppet/modules/site_config/templates/ipv4firewall_up.rules.erb b/puppet/modules/site_config/templates/ipv4firewall_up.rules.erb
new file mode 100644
index 00000000..524ae308
--- /dev/null
+++ b/puppet/modules/site_config/templates/ipv4firewall_up.rules.erb
@@ -0,0 +1,22 @@
+# Generated by iptables-save v1.4.14 on Tue Aug 20 14:40:40 2013
+*filter
+:INPUT DROP [0:0]
+:FORWARD DROP [0:0]
+:OUTPUT DROP [0:0]
+-A INPUT -i lo -j ACCEPT
+-A INPUT -m state --state RELATED,ESTABLISHED -j ACCEPT
+-A INPUT -p tcp -m state --state NEW,ESTABLISHED --dport <%= @ssh_port %> -j ACCEPT
+-A INPUT -p udp -m udp --sport 53 -j ACCEPT
+-A INPUT -p icmp -m icmp --icmp-type 8 -m state --state NEW,RELATED,ESTABLISHED -j ACCEPT
+-A INPUT -p icmp -m icmp --icmp-type 0 -m state --state RELATED,ESTABLISHED -j ACCEPT
+-A INPUT -m limit --limit 5/min -j LOG --log-prefix "iptables denied: " --log-level 7
+-A OUTPUT -o lo -j ACCEPT
+-A OUTPUT -p icmp -m icmp --icmp-type 0 -m state --state RELATED,ESTABLISHED -j ACCEPT
+-A OUTPUT -p icmp -m icmp --icmp-type 8 -m state --state NEW,RELATED,ESTABLISHED -j ACCEPT
+-A OUTPUT -p tcp -m state --state NEW,ESTABLISHED --sport <%= @ssh_port %> -j ACCEPT
+-A OUTPUT -p tcp -m state --state NEW,ESTABLISHED --dport 80 -j ACCEPT
+-A OUTPUT -p tcp -m state --state NEW,ESTABLISHED --dport 443 -j ACCEPT
+-A OUTPUT -p udp -m udp --dport 53 -j ACCEPT
+-A OUTPUT -p udp -m udp --dport 123 -j ACCEPT
+-A OUTPUT -m limit --limit 5/min -j LOG --log-prefix "iptables denied: " --log-level 7
+COMMIT
diff --git a/puppet/modules/site_config/templates/ipv6firewall_up.rules.erb b/puppet/modules/site_config/templates/ipv6firewall_up.rules.erb
new file mode 100644
index 00000000..e7fae52e
--- /dev/null
+++ b/puppet/modules/site_config/templates/ipv6firewall_up.rules.erb
@@ -0,0 +1,7 @@
+# Generated by ip6tables-save v1.4.20 on Tue Aug 20 12:19:43 2013
+*filter
+:INPUT DROP [24:1980]
+:FORWARD DROP [0:0]
+:OUTPUT DROP [14:8030]
+COMMIT
+# Completed on Tue Aug 20 12:19:43 2013
diff --git a/puppet/modules/site_couchdb/files/couchdb_scripts_defaults.conf b/puppet/modules/site_couchdb/files/couchdb_scripts_defaults.conf
new file mode 100644
index 00000000..1565e1a1
--- /dev/null
+++ b/puppet/modules/site_couchdb/files/couchdb_scripts_defaults.conf
@@ -0,0 +1,4 @@
+# space separated list of excluded DBs for dumping
+# sourced by couchdb_dumpall.sh
+EXCLUDE_DBS='sessions tokens'
+
diff --git a/puppet/modules/site_couchdb/files/designs/Readme.md b/puppet/modules/site_couchdb/files/designs/Readme.md
new file mode 100644
index 00000000..983f629f
--- /dev/null
+++ b/puppet/modules/site_couchdb/files/designs/Readme.md
@@ -0,0 +1,14 @@
+This directory contains design documents for the leap platform.
+
+They need to be uploaded to the couch database in order to query the
+database in certain ways.
+
+Each subdirectory corresponds to a couch database and contains the design
+documents that need to be added to that particular database.
+
+Here's an example of how to upload the users design document:
+```bash
+HOST="http://localhost:5984"
+curl -X PUT $HOST/users/_design/User --data @users/User.json
+
+```
diff --git a/puppet/modules/site_couchdb/files/designs/customers/Customer.json b/puppet/modules/site_couchdb/files/designs/customers/Customer.json
new file mode 100644
index 00000000..1b4bbddd
--- /dev/null
+++ b/puppet/modules/site_couchdb/files/designs/customers/Customer.json
@@ -0,0 +1,18 @@
+{
+ "_id": "_design/Customer",
+ "language": "javascript",
+ "views": {
+ "by_user_id": {
+ "map": " function(doc) {\n if ((doc['type'] == 'Customer') && (doc['user_id'] != null)) {\n emit(doc['user_id'], 1);\n }\n }\n",
+ "reduce": "_sum"
+ },
+ "by_braintree_customer_id": {
+ "map": " function(doc) {\n if ((doc['type'] == 'Customer') && (doc['braintree_customer_id'] != null)) {\n emit(doc['braintree_customer_id'], 1);\n }\n }\n",
+ "reduce": "_sum"
+ },
+ "all": {
+ "map": " function(doc) {\n if (doc['type'] == 'Customer') {\n emit(doc._id, null);\n }\n }\n"
+ }
+ },
+ "couchrest-hash": "688c401ec0230b75625c176a88fc4a02"
+} \ No newline at end of file
diff --git a/puppet/modules/site_couchdb/files/designs/identities/Identity.json b/puppet/modules/site_couchdb/files/designs/identities/Identity.json
new file mode 100644
index 00000000..2ac092ab
--- /dev/null
+++ b/puppet/modules/site_couchdb/files/designs/identities/Identity.json
@@ -0,0 +1,28 @@
+{
+ "_id": "_design/Identity",
+ "language": "javascript",
+ "views": {
+ "by_user_id": {
+ "map": " function(doc) {\n if ((doc['type'] == 'Identity') && (doc['user_id'] != null)) {\n emit(doc['user_id'], 1);\n }\n }\n",
+ "reduce": "_sum"
+ },
+ "by_address_and_destination": {
+ "map": " function(doc) {\n if ((doc['type'] == 'Identity') && (doc['address'] != null) && (doc['destination'] != null)) {\n emit([doc['address'], doc['destination']], 1);\n }\n }\n",
+ "reduce": "_sum"
+ },
+ "by_address": {
+ "map": " function(doc) {\n if ((doc['type'] == 'Identity') && (doc['address'] != null)) {\n emit(doc['address'], 1);\n }\n }\n",
+ "reduce": "_sum"
+ },
+ "pgp_key_by_email": {
+ "map": " function(doc) {\n if (doc.type != 'Identity') {\n return;\n }\n if (typeof doc.keys === \"object\") {\n emit(doc.address, doc.keys[\"pgp\"]);\n }\n }\n"
+ },
+ "disabled": {
+ "map": " function(doc) {\n if (doc.type != 'Identity') {\n return;\n }\n if (typeof doc.user_id === \"undefined\") {\n emit(doc._id, 1);\n }\n }\n"
+ },
+ "all": {
+ "map": " function(doc) {\n if (doc['type'] == 'Identity') {\n emit(doc._id, null);\n }\n }\n"
+ }
+ },
+ "couchrest-hash": "e9004d70e26770c621a9667536429a68"
+} \ No newline at end of file
diff --git a/puppet/modules/site_couchdb/files/designs/messages/Message.json b/puppet/modules/site_couchdb/files/designs/messages/Message.json
new file mode 100644
index 00000000..7bcd74c7
--- /dev/null
+++ b/puppet/modules/site_couchdb/files/designs/messages/Message.json
@@ -0,0 +1,18 @@
+{
+ "_id": "_design/Message",
+ "language": "javascript",
+ "views": {
+ "by_user_ids_to_show_and_created_at": {
+ "map": "// not using at moment\n// call with something like Message.by_user_ids_to_show_and_created_at.startkey([user_id, start_date]).endkey([user_id,end_date])\nfunction (doc) {\n if (doc.type === 'Message' && doc.user_ids_to_show && Array.isArray(doc.user_ids_to_show)) {\n doc.user_ids_to_show.forEach(function (userId) {\n emit([userId, doc.created_at], 1);\n });\n }\n}\n",
+ "reduce": "function(key, values, rereduce) { return sum(values); }"
+ },
+ "by_user_ids_to_show": {
+ "map": "function (doc) {\n if (doc.type === 'Message' && doc.user_ids_to_show && Array.isArray(doc.user_ids_to_show)) {\n doc.user_ids_to_show.forEach(function (userId) {\n emit(userId, 1);\n });\n }\n}\n",
+ "reduce": "function(key, values, rereduce) { return sum(values); }"
+ },
+ "all": {
+ "map": " function(doc) {\n if (doc['type'] == 'Message') {\n emit(doc._id, null);\n }\n }\n"
+ }
+ },
+ "couchrest-hash": "0967e7cc5bb1e61edc1c085f6f0cecbf"
+} \ No newline at end of file
diff --git a/puppet/modules/site_couchdb/files/designs/sessions/Session.json b/puppet/modules/site_couchdb/files/designs/sessions/Session.json
new file mode 100644
index 00000000..70202780
--- /dev/null
+++ b/puppet/modules/site_couchdb/files/designs/sessions/Session.json
@@ -0,0 +1,8 @@
+{
+ "views": {
+ "by_expires": {
+ "reduce": "_sum",
+ "map": "function(doc) {\n if(typeof doc.expires !== \"undefined\") {\n emit(doc.expires, 1);\n }\n}\n"
+ }
+ }
+}
diff --git a/puppet/modules/site_couchdb/files/designs/shared/docs.json b/puppet/modules/site_couchdb/files/designs/shared/docs.json
new file mode 100644
index 00000000..004180cd
--- /dev/null
+++ b/puppet/modules/site_couchdb/files/designs/shared/docs.json
@@ -0,0 +1,8 @@
+{
+ "_id": "_design/docs",
+ "views": {
+ "get": {
+ "map": "function(doc) {\n if (doc.u1db_rev) {\n var is_tombstone = true;\n var has_conflicts = false;\n if (doc._attachments) {\n if (doc._attachments.u1db_content)\n is_tombstone = false;\n if (doc._attachments.u1db_conflicts)\n has_conflicts = true;\n }\n emit(doc._id,\n {\n \"couch_rev\": doc._rev,\n \"u1db_rev\": doc.u1db_rev,\n \"is_tombstone\": is_tombstone,\n \"has_conflicts\": has_conflicts,\n }\n );\n }\n}\n"
+ }
+ }
+} \ No newline at end of file
diff --git a/puppet/modules/site_couchdb/files/designs/shared/syncs.json b/puppet/modules/site_couchdb/files/designs/shared/syncs.json
new file mode 100644
index 00000000..bab5622f
--- /dev/null
+++ b/puppet/modules/site_couchdb/files/designs/shared/syncs.json
@@ -0,0 +1,11 @@
+{
+ "_id": "_design/syncs",
+ "updates": {
+ "put": "function(doc, req){\n if (!doc) {\n doc = {}\n doc['_id'] = 'u1db_sync_log';\n doc['syncs'] = [];\n }\n body = JSON.parse(req.body);\n // remove outdated info\n doc['syncs'] = doc['syncs'].filter(\n function (entry) {\n return entry[0] != body['other_replica_uid'];\n }\n );\n // store u1db rev\n doc['syncs'].push([\n body['other_replica_uid'],\n body['other_generation'],\n body['other_transaction_id']\n ]);\n return [doc, 'ok'];\n}\n\n"
+ },
+ "views": {
+ "log": {
+ "map": "function(doc) {\n if (doc._id == 'u1db_sync_log') {\n if (doc.syncs)\n doc.syncs.forEach(function (entry) {\n emit(entry[0],\n {\n 'known_generation': entry[1],\n 'known_transaction_id': entry[2]\n });\n });\n }\n}\n"
+ }
+ }
+} \ No newline at end of file
diff --git a/puppet/modules/site_couchdb/files/designs/shared/transactions.json b/puppet/modules/site_couchdb/files/designs/shared/transactions.json
new file mode 100644
index 00000000..106ad46c
--- /dev/null
+++ b/puppet/modules/site_couchdb/files/designs/shared/transactions.json
@@ -0,0 +1,13 @@
+{
+ "_id": "_design/transactions",
+ "lists": {
+ "generation": "function(head, req) {\n var row;\n var rows=[];\n // fetch all rows\n while(row = getRow()) {\n rows.push(row);\n }\n if (rows.length > 0)\n send(JSON.stringify({\n \"generation\": rows.length,\n \"doc_id\": rows[rows.length-1]['id'],\n \"transaction_id\": rows[rows.length-1]['value']\n }));\n else\n send(JSON.stringify({\n \"generation\": 0,\n \"doc_id\": \"\",\n \"transaction_id\": \"\",\n }));\n}\n",
+ "trans_id_for_gen": "function(head, req) {\n var row;\n var rows=[];\n var i = 1;\n var gen = 1;\n if (req.query.gen)\n gen = parseInt(req.query['gen']);\n // fetch all rows\n while(row = getRow())\n rows.push(row);\n if (gen <= rows.length)\n send(JSON.stringify({\n \"generation\": gen,\n \"doc_id\": rows[gen-1]['id'],\n \"transaction_id\": rows[gen-1]['value'],\n }));\n else\n send('{}');\n}\n",
+ "whats_changed": "function(head, req) {\n var row;\n var gen = 1;\n var old_gen = 0;\n if (req.query.old_gen)\n old_gen = parseInt(req.query['old_gen']);\n send('{\"transactions\":[\\n');\n // fetch all rows\n while(row = getRow()) {\n if (gen > old_gen) {\n if (gen > old_gen+1)\n send(',\\n');\n send(JSON.stringify({\n \"generation\": gen,\n \"doc_id\": row[\"id\"],\n \"transaction_id\": row[\"value\"]\n }));\n }\n gen++;\n }\n send('\\n]}');\n}\n"
+ },
+ "views": {
+ "log": {
+ "map": "function(doc) {\n if (doc.u1db_transactions)\n doc.u1db_transactions.forEach(function(t) {\n emit(t[0], // use timestamp as key so the results are ordered\n t[1]); // value is the transaction_id\n });\n}\n"
+ }
+ }
+} \ No newline at end of file
diff --git a/puppet/modules/site_couchdb/files/designs/tickets/Ticket.json b/puppet/modules/site_couchdb/files/designs/tickets/Ticket.json
new file mode 100644
index 00000000..2c9408b8
--- /dev/null
+++ b/puppet/modules/site_couchdb/files/designs/tickets/Ticket.json
@@ -0,0 +1,50 @@
+{
+ "_id": "_design/Ticket",
+ "language": "javascript",
+ "views": {
+ "by_updated_at": {
+ "map": " function(doc) {\n if ((doc['type'] == 'Ticket') && (doc['updated_at'] != null)) {\n emit(doc['updated_at'], 1);\n }\n }\n",
+ "reduce": "_sum"
+ },
+ "by_created_at": {
+ "map": " function(doc) {\n if ((doc['type'] == 'Ticket') && (doc['created_at'] != null)) {\n emit(doc['created_at'], 1);\n }\n }\n",
+ "reduce": "_sum"
+ },
+ "by_created_by": {
+ "map": " function(doc) {\n if ((doc['type'] == 'Ticket') && (doc['created_by'] != null)) {\n emit(doc['created_by'], 1);\n }\n }\n",
+ "reduce": "_sum"
+ },
+ "by_is_open_and_created_at": {
+ "map": " function(doc) {\n if ((doc['type'] == 'Ticket') && (doc['is_open'] != null) && (doc['created_at'] != null)) {\n emit([doc['is_open'], doc['created_at']], 1);\n }\n }\n",
+ "reduce": "_sum"
+ },
+ "by_is_open_and_updated_at": {
+ "map": " function(doc) {\n if ((doc['type'] == 'Ticket') && (doc['is_open'] != null) && (doc['updated_at'] != null)) {\n emit([doc['is_open'], doc['updated_at']], 1);\n }\n }\n",
+ "reduce": "_sum"
+ },
+ "by_includes_post_by_and_is_open_and_created_at": {
+ "map": "function(doc) {\n var arr = {}\n if (doc['type'] == 'Ticket' && doc.comments) {\n doc.comments.forEach(function(comment){\n if (comment.posted_by && !arr[comment.posted_by]) {\n //don't add duplicates\n arr[comment.posted_by] = true;\n emit([comment.posted_by, doc.is_open, doc.created_at], 1);\n }\n });\n }\n}\n",
+ "reduce": "function(key, values, rereduce) { return sum(values); }"
+ },
+ "by_includes_post_by_and_is_open_and_updated_at": {
+ "map": "function(doc) {\n var arr = {}\n if (doc['type'] == 'Ticket' && doc.comments) {\n doc.comments.forEach(function(comment){\n if (comment.posted_by && !arr[comment.posted_by]) {\n //don't add duplicates\n arr[comment.posted_by] = true;\n emit([comment.posted_by, doc.is_open, doc.updated_at], 1);\n }\n });\n }\n}\n",
+ "reduce": "function(key, values, rereduce) { return sum(values); }"
+ },
+ "by_includes_post_by_and_updated_at": {
+ "map": "function(doc) {\n var arr = {}\n if (doc['type'] == 'Ticket' && doc.comments) {\n doc.comments.forEach(function(comment){\n if (comment.posted_by && !arr[comment.posted_by]) {\n //don't add duplicates\n arr[comment.posted_by] = true;\n emit([comment.posted_by, doc.updated_at], 1);\n }\n });\n }\n}\n",
+ "reduce": "function(key, values, rereduce) { return sum(values); }"
+ },
+ "by_includes_post_by": {
+ "map": "// TODO: This view is only used in tests--should we keep it?\nfunction(doc) {\n var arr = {}\n if (doc['type'] == 'Ticket' && doc.comments) {\n doc.comments.forEach(function(comment){\n if (comment.posted_by && !arr[comment.posted_by]) {\n //don't add duplicates\n arr[comment.posted_by] = true;\n emit(comment.posted_by, 1);\n }\n });\n }\n}\n",
+ "reduce": "function(key, values, rereduce) { return sum(values); }"
+ },
+ "by_includes_post_by_and_created_at": {
+ "map": "function(doc) {\n var arr = {}\n if (doc['type'] == 'Ticket' && doc.comments) {\n doc.comments.forEach(function(comment){\n if (comment.posted_by && !arr[comment.posted_by]) {\n //don't add duplicates\n arr[comment.posted_by] = true;\n emit([comment.posted_by, doc.created_at], 1);\n }\n });\n }\n}\n",
+ "reduce": "function(key, values, rereduce) { return sum(values); }"
+ },
+ "all": {
+ "map": " function(doc) {\n if (doc['type'] == 'Ticket') {\n emit(doc._id, null);\n }\n }\n"
+ }
+ },
+ "couchrest-hash": "9978e2cbeacbe8622c2a7f103bf8130f"
+} \ No newline at end of file
diff --git a/puppet/modules/site_couchdb/files/designs/tokens/Token.json b/puppet/modules/site_couchdb/files/designs/tokens/Token.json
new file mode 100644
index 00000000..b9025f15
--- /dev/null
+++ b/puppet/modules/site_couchdb/files/designs/tokens/Token.json
@@ -0,0 +1,14 @@
+{
+ "_id": "_design/Token",
+ "language": "javascript",
+ "views": {
+ "by_last_seen_at": {
+ "map": " function(doc) {\n if ((doc['type'] == 'Token') && (doc['last_seen_at'] != null)) {\n emit(doc['last_seen_at'], 1);\n }\n }\n",
+ "reduce": "_sum"
+ },
+ "all": {
+ "map": " function(doc) {\n if (doc['type'] == 'Token') {\n emit(doc._id, null);\n }\n }\n"
+ }
+ },
+ "couchrest-hash": "541dd924551c42a2317b345effbe65cc"
+} \ No newline at end of file
diff --git a/puppet/modules/site_couchdb/files/designs/users/User.json b/puppet/modules/site_couchdb/files/designs/users/User.json
new file mode 100644
index 00000000..4089ad97
--- /dev/null
+++ b/puppet/modules/site_couchdb/files/designs/users/User.json
@@ -0,0 +1,22 @@
+{
+ "_id": "_design/User",
+ "language": "javascript",
+ "views": {
+ "by_login": {
+ "map": " function(doc) {\n if ((doc['type'] == 'User') && (doc['login'] != null)) {\n emit(doc['login'], 1);\n }\n }\n",
+ "reduce": "_sum"
+ },
+ "all": {
+ "map": " function(doc) {\n if (doc['type'] == 'User') {\n emit(doc._id, null);\n }\n }\n"
+ },
+ "by_created_at_and_one_month_warning_not_sent": {
+ "map": "function (doc) {\n if ((doc['type'] == 'User') && (doc['created_at'] != null) && (doc['one_month_warning_sent'] == null)) {\n emit(doc['created_at'], 1);\n } \n}\n",
+ "reduce": "function(key, values, rereduce) { return sum(values); }"
+ },
+ "by_created_at": {
+ "map": " function(doc) {\n if ((doc['type'] == 'User') && (doc['created_at'] != null)) {\n emit(doc['created_at'], 1);\n }\n }\n",
+ "reduce": "_sum"
+ }
+ },
+ "couchrest-hash": "61840ab3ec0f94ef8bbd6dd208db3b70"
+} \ No newline at end of file
diff --git a/puppet/modules/site_couchdb/manifests/add_users.pp b/puppet/modules/site_couchdb/manifests/add_users.pp
new file mode 100644
index 00000000..f9ea7349
--- /dev/null
+++ b/puppet/modules/site_couchdb/manifests/add_users.pp
@@ -0,0 +1,54 @@
+class site_couchdb::add_users {
+
+ # Couchdb users
+
+ ## leap_mx couchdb user
+ ## read: identities
+ ## write access to user-<uuid>
+ couchdb::add_user { $site_couchdb::couchdb_leap_mx_user:
+ roles => '["identities"]',
+ pw => $site_couchdb::couchdb_leap_mx_pw,
+ salt => $site_couchdb::couchdb_leap_mx_salt,
+ require => Couchdb::Query::Setup['localhost']
+ }
+
+ ## nickserver couchdb user
+ ## r: identities
+ ## r/w: keycache
+ couchdb::add_user { $site_couchdb::couchdb_nickserver_user:
+ roles => '["identities","keycache"]',
+ pw => $site_couchdb::couchdb_nickserver_pw,
+ salt => $site_couchdb::couchdb_nickserver_salt,
+ require => Couchdb::Query::Setup['localhost']
+ }
+
+ ## soledad couchdb user
+ ## r/w: user-<uuid>, shared
+ ## read: tokens
+ couchdb::add_user { $site_couchdb::couchdb_soledad_user:
+ roles => '["tokens"]',
+ pw => $site_couchdb::couchdb_soledad_pw,
+ salt => $site_couchdb::couchdb_soledad_salt,
+ require => Couchdb::Query::Setup['localhost']
+ }
+
+ ### tapicero couchdb user
+ ### admin: needs to be able to create user-<uuid> databases
+ ### read: users
+ couchdb::add_user { $site_couchdb::couchdb_tapicero_user:
+ roles => '["users"]',
+ pw => $site_couchdb::couchdb_tapicero_pw,
+ salt => $site_couchdb::couchdb_tapicero_salt,
+ require => Couchdb::Query::Setup['localhost']
+ }
+
+ ## webapp couchdb user
+ ## read/write: users, tokens, sessions, tickets, identities, customer
+ couchdb::add_user { $site_couchdb::couchdb_webapp_user:
+ roles => '["tokens","identities","users"]',
+ pw => $site_couchdb::couchdb_webapp_pw,
+ salt => $site_couchdb::couchdb_webapp_salt,
+ require => Couchdb::Query::Setup['localhost']
+ }
+
+}
diff --git a/puppet/modules/site_couchdb/manifests/backup.pp b/puppet/modules/site_couchdb/manifests/backup.pp
new file mode 100644
index 00000000..8b5aa6ea
--- /dev/null
+++ b/puppet/modules/site_couchdb/manifests/backup.pp
@@ -0,0 +1,23 @@
+class site_couchdb::backup {
+
+ # general backupninja config
+ backupninja::config { 'backupninja_config':
+ usecolors => false,
+ }
+
+ # dump all DBs locally to /var/backups/couchdb once a day
+ backupninja::sh { 'couchdb_backup':
+ command_string => "cd /srv/leap/couchdb/scripts \n./couchdb_dumpall.sh"
+ }
+
+ # Deploy /etc/leap/couchdb_scripts_defaults.conf so we can exclude
+ # some databases
+
+ file { '/etc/leap/couchdb_scripts_defaults.conf':
+ source => 'puppet:///modules/site_couchdb/couchdb_scripts_defaults.conf',
+ mode => '0644',
+ owner => 'root',
+ group => 'root',
+ }
+
+}
diff --git a/puppet/modules/site_couchdb/manifests/bigcouch/add_nodes.pp b/puppet/modules/site_couchdb/manifests/bigcouch/add_nodes.pp
index 241a4914..97e85785 100644
--- a/puppet/modules/site_couchdb/manifests/bigcouch/add_nodes.pp
+++ b/puppet/modules/site_couchdb/manifests/bigcouch/add_nodes.pp
@@ -1,5 +1,8 @@
class site_couchdb::bigcouch::add_nodes {
# loop through neighbors array and add nodes
$nodes = $::site_couchdb::bigcouch_config['neighbors']
- couchdb::bigcouch::add_node { $nodes: }
+
+ couchdb::bigcouch::add_node { $nodes:
+ require => Couchdb::Query::Setup['localhost']
+ }
}
diff --git a/puppet/modules/site_couchdb/manifests/bigcouch/compaction.pp b/puppet/modules/site_couchdb/manifests/bigcouch/compaction.pp
new file mode 100644
index 00000000..84aab4ef
--- /dev/null
+++ b/puppet/modules/site_couchdb/manifests/bigcouch/compaction.pp
@@ -0,0 +1,8 @@
+class site_couchdb::bigcouch::compaction {
+ cron {
+ 'compact_all_shards':
+ command => '/srv/leap/couchdb/scripts/bigcouch_compact_all_shards.sh >> /var/log/bigcouch/compaction.log',
+ hour => 3,
+ minute => 17;
+ }
+}
diff --git a/puppet/modules/site_couchdb/manifests/bigcouch/settle_cluster.pp b/puppet/modules/site_couchdb/manifests/bigcouch/settle_cluster.pp
new file mode 100644
index 00000000..aa843e2e
--- /dev/null
+++ b/puppet/modules/site_couchdb/manifests/bigcouch/settle_cluster.pp
@@ -0,0 +1,11 @@
+class site_couchdb::bigcouch::settle_cluster {
+
+ exec { 'wait_for_couch_nodes':
+ command => '/srv/leap/bin/run_tests --test CouchDB/Are_configured_nodes_online? --retry 6 --wait 10'
+ }
+
+ exec { 'settle_cluster_membership':
+ command => '/srv/leap/bin/run_tests --test CouchDB/Is_cluster_membership_ok? --retry 6 --wait 10',
+ require => Exec['wait_for_couch_nodes']
+ }
+}
diff --git a/puppet/modules/site_couchdb/manifests/create_dbs.pp b/puppet/modules/site_couchdb/manifests/create_dbs.pp
new file mode 100644
index 00000000..41500d3a
--- /dev/null
+++ b/puppet/modules/site_couchdb/manifests/create_dbs.pp
@@ -0,0 +1,70 @@
+class site_couchdb::create_dbs {
+
+ # Couchdb databases
+
+ ### customer database
+ ### r/w: webapp,
+ couchdb::create_db { 'customers':
+ members => "{ \"names\": [\"$site_couchdb::couchdb_webapp_user\"], \"roles\": [] }",
+ require => Couchdb::Query::Setup['localhost']
+ }
+
+ ## identities database
+ ## r: nickserver, leap_mx - needs to be restrict with design document
+ ## r/w: webapp
+ couchdb::create_db { 'identities':
+ members => "{ \"names\": [], \"roles\": [\"identities\"] }",
+ require => Couchdb::Query::Setup['localhost']
+ }
+
+ ## keycache database
+ ## r/w: nickserver
+ couchdb::create_db { 'keycache':
+ members => "{ \"names\": [], \"roles\": [\"keycache\"] }",
+ require => Couchdb::Query::Setup['localhost']
+ }
+
+ ## sessions database
+ ## r/w: webapp
+ couchdb::create_db { 'sessions':
+ members => "{ \"names\": [\"$site_couchdb::couchdb_webapp_user\"], \"roles\": [] }",
+ require => Couchdb::Query::Setup['localhost']
+ }
+
+ ## shared database
+ ## r/w: soledad
+ couchdb::create_db { 'shared':
+ members => "{ \"names\": [\"$site_couchdb::couchdb_soledad_user\"], \"roles\": [] }",
+ require => Couchdb::Query::Setup['localhost']
+ }
+
+ ## tickets database
+ ## r/w: webapp
+ couchdb::create_db { 'tickets':
+ members => "{ \"names\": [\"$site_couchdb::couchdb_webapp_user\"], \"roles\": [] }",
+ require => Couchdb::Query::Setup['localhost']
+ }
+
+ ## tokens database
+ ## r: soledad - needs to be restricted with a design document
+ ## r/w: webapp
+ couchdb::create_db { 'tokens':
+ members => "{ \"names\": [], \"roles\": [\"tokens\"] }",
+ require => Couchdb::Query::Setup['localhost']
+ }
+
+ ## users database
+ ## r/w: webapp
+ couchdb::create_db { 'users':
+ members => "{ \"names\": [], \"roles\": [\"users\"] }",
+ require => Couchdb::Query::Setup['localhost']
+ }
+
+ ## messages db
+ ## store messages to the clients such as payment reminders
+ ## r/w: webapp
+ couchdb::create_db { 'messages':
+ members => "{ \"names\": [\"$site_couchdb::couchdb_webapp_user\"], \"roles\": [] }",
+ require => Couchdb::Query::Setup['localhost']
+ }
+}
diff --git a/puppet/modules/site_couchdb/manifests/designs.pp b/puppet/modules/site_couchdb/manifests/designs.pp
new file mode 100644
index 00000000..9e88de64
--- /dev/null
+++ b/puppet/modules/site_couchdb/manifests/designs.pp
@@ -0,0 +1,20 @@
+class site_couchdb::designs {
+
+ Class['site_couchdb::create_dbs']
+ -> Class['site_couchdb::designs']
+
+ file { '/srv/leap/couchdb/designs':
+ ensure => directory,
+ source => 'puppet:///modules/site_couchdb/designs',
+ recurse => true,
+ purge => true,
+ mode => '0755'
+ }
+
+ exec { '/srv/leap/couchdb/scripts/load_design_documents.sh':
+ require => Vcsrepo['/srv/leap/couchdb/scripts'],
+ refreshonly => false
+ }
+
+}
+
diff --git a/puppet/modules/site_couchdb/manifests/init.pp b/puppet/modules/site_couchdb/manifests/init.pp
index 802f3224..3614661d 100644
--- a/puppet/modules/site_couchdb/manifests/init.pp
+++ b/puppet/modules/site_couchdb/manifests/init.pp
@@ -1,83 +1,118 @@
class site_couchdb {
tag 'leap_service'
- $x509 = hiera('x509')
- $key = $x509['key']
- $cert = $x509['cert']
- $ca = $x509['ca_cert']
-
- $couchdb_config = hiera('couch')
- $couchdb_users = $couchdb_config['users']
- $couchdb_admin = $couchdb_users['admin']
- $couchdb_admin_user = $couchdb_admin['username']
- $couchdb_admin_pw = $couchdb_admin['password']
- $couchdb_admin_salt = $couchdb_admin['salt']
- $couchdb_webapp = $couchdb_users['webapp']
- $couchdb_webapp_user = $couchdb_webapp['username']
- $couchdb_webapp_pw = $couchdb_webapp['password']
- $couchdb_webapp_salt = $couchdb_webapp['salt']
- $couchdb_soledad = $couchdb_users['soledad']
- $couchdb_soledad_user = $couchdb_soledad['username']
- $couchdb_soledad_pw = $couchdb_soledad['password']
- $couchdb_soledad_salt = $couchdb_soledad['salt']
-
- $bigcouch_config = $couchdb_config['bigcouch']
- $bigcouch_cookie = $bigcouch_config['cookie']
-
- $ednp_port = $bigcouch_config['ednp_port']
+ $couchdb_config = hiera('couch')
+ $couchdb_users = $couchdb_config['users']
+
+ $couchdb_admin = $couchdb_users['admin']
+ $couchdb_admin_user = $couchdb_admin['username']
+ $couchdb_admin_pw = $couchdb_admin['password']
+ $couchdb_admin_salt = $couchdb_admin['salt']
+
+ $couchdb_leap_mx = $couchdb_users['leap_mx']
+ $couchdb_leap_mx_user = $couchdb_leap_mx['username']
+ $couchdb_leap_mx_pw = $couchdb_leap_mx['password']
+ $couchdb_leap_mx_salt = $couchdb_leap_mx['salt']
+
+ $couchdb_nickserver = $couchdb_users['nickserver']
+ $couchdb_nickserver_user = $couchdb_nickserver['username']
+ $couchdb_nickserver_pw = $couchdb_nickserver['password']
+ $couchdb_nickserver_salt = $couchdb_nickserver['salt']
+
+ $couchdb_soledad = $couchdb_users['soledad']
+ $couchdb_soledad_user = $couchdb_soledad['username']
+ $couchdb_soledad_pw = $couchdb_soledad['password']
+ $couchdb_soledad_salt = $couchdb_soledad['salt']
+
+ $couchdb_tapicero = $couchdb_users['tapicero']
+ $couchdb_tapicero_user = $couchdb_tapicero['username']
+ $couchdb_tapicero_pw = $couchdb_tapicero['password']
+ $couchdb_tapicero_salt = $couchdb_tapicero['salt']
+
+ $couchdb_webapp = $couchdb_users['webapp']
+ $couchdb_webapp_user = $couchdb_webapp['username']
+ $couchdb_webapp_pw = $couchdb_webapp['password']
+ $couchdb_webapp_salt = $couchdb_webapp['salt']
+
+ $couchdb_backup = $couchdb_config['backup']
+
+ $bigcouch_config = $couchdb_config['bigcouch']
+ $bigcouch_cookie = $bigcouch_config['cookie']
+
+ $ednp_port = $bigcouch_config['ednp_port']
class { 'couchdb':
- bigcouch => true,
- admin_pw => $couchdb_admin_pw,
- admin_salt => $couchdb_admin_salt,
- bigcouch_cookie => $bigcouch_cookie,
- ednp_port => $ednp_port
+ bigcouch => true,
+ admin_pw => $couchdb_admin_pw,
+ admin_salt => $couchdb_admin_salt,
+ bigcouch_cookie => $bigcouch_cookie,
+ ednp_port => $ednp_port,
+ chttpd_bind_address => '127.0.0.1'
+ }
+
+ # ensure that we don't have leftovers from previous installations
+ # where we installed the cloudant bigcouch package
+ # https://leap.se/code/issues/4971
+ class { 'couchdb::bigcouch::package::cloudant':
+ ensure => absent
}
- class { 'couchdb::bigcouch::package::cloudant': }
+ Class['site_config::default']
+ -> Class['couchdb::bigcouch::package::cloudant']
+ -> Service['shorewall']
+ -> Class['site_couchdb::stunnel']
+ -> Service['couchdb']
+ -> File['/root/.netrc']
+ -> Class['site_couchdb::bigcouch::add_nodes']
+ -> Class['site_couchdb::bigcouch::settle_cluster']
+ -> Class['site_couchdb::create_dbs']
+ -> Class['site_couchdb::add_users']
- Class ['couchdb::bigcouch::package::cloudant']
- -> Service ['couchdb']
- -> Class ['site_couchdb::bigcouch::add_nodes']
- -> Couchdb::Create_db['users']
- -> Couchdb::Create_db['tokens']
- -> Couchdb::Add_user[$couchdb_webapp_user]
- -> Couchdb::Add_user[$couchdb_soledad_user]
+ # /etc/couchdb/couchdb.netrc is deployed by couchdb::query::setup
+ # we symlink this to /root/.netrc for couchdb_scripts (eg. backup)
+ # and makes life easier for the admin (i.e. using curl/wget without
+ # passing credentials)
+ file {
+ '/root/.netrc':
+ ensure => link,
+ target => '/etc/couchdb/couchdb.netrc';
- class { 'site_couchdb::stunnel':
- key => $key,
- cert => $cert,
- ca => $ca
+ '/srv/leap/couchdb':
+ ensure => directory
}
- class { 'site_couchdb::bigcouch::add_nodes': }
-
couchdb::query::setup { 'localhost':
user => $couchdb_admin_user,
pw => $couchdb_admin_pw,
}
- # Populate couchdb
- couchdb::add_user { $couchdb_webapp_user:
- roles => '["auth"]',
- pw => $couchdb_webapp_pw,
- salt => $couchdb_webapp_salt
+ vcsrepo { '/srv/leap/couchdb/scripts':
+ ensure => present,
+ provider => git,
+ source => 'https://leap.se/git/couchdb_scripts',
+ revision => 'origin/master',
+ require => File['/srv/leap/couchdb']
}
- couchdb::add_user { $couchdb_soledad_user:
- roles => '["auth"]',
- pw => $couchdb_soledad_pw,
- salt => $couchdb_soledad_salt
- }
-
- couchdb::create_db { 'users':
- readers => "{ \"names\": [\"$couchdb_webapp_user\"], \"roles\": [] }"
- }
+ include site_couchdb::stunnel
+ include site_couchdb::bigcouch::add_nodes
+ include site_couchdb::bigcouch::settle_cluster
+ include site_couchdb::create_dbs
+ include site_couchdb::add_users
+ include site_couchdb::designs
+ include site_couchdb::logrotate
+ include site_couchdb::bigcouch::compaction
- couchdb::create_db { 'tokens':
- readers => "{ \"names\": [], \"roles\": [\"auth\"] }"
- }
+ if $couchdb_backup { include site_couchdb::backup }
include site_shorewall::couchdb
include site_shorewall::couchdb::bigcouch
+
+ include site_check_mk::agent::couchdb
+ include site_check_mk::agent::tapicero
+
+ file { '/var/log/bigcouch':
+ ensure => directory
+ }
+
}
diff --git a/puppet/modules/site_couchdb/manifests/logrotate.pp b/puppet/modules/site_couchdb/manifests/logrotate.pp
new file mode 100644
index 00000000..e1039d49
--- /dev/null
+++ b/puppet/modules/site_couchdb/manifests/logrotate.pp
@@ -0,0 +1,12 @@
+class site_couchdb::logrotate {
+
+ augeas {
+ 'logrotate_bigcouch':
+ context => '/files/etc/logrotate.d/bigcouch/rule',
+ changes => [ 'set file /opt/bigcouch/var/log/*.log', 'set rotate 7',
+ 'set schedule daily', 'set compress compress',
+ 'set missingok missingok', 'set ifempty notifempty',
+ 'set copytruncate copytruncate' ]
+ }
+
+}
diff --git a/puppet/modules/site_couchdb/manifests/stunnel.pp b/puppet/modules/site_couchdb/manifests/stunnel.pp
index d982013e..91f1e3aa 100644
--- a/puppet/modules/site_couchdb/manifests/stunnel.pp
+++ b/puppet/modules/site_couchdb/manifests/stunnel.pp
@@ -1,4 +1,4 @@
-class site_couchdb::stunnel ($key, $cert, $ca) {
+class site_couchdb::stunnel {
$stunnel = hiera('stunnel')
@@ -18,22 +18,16 @@ class site_couchdb::stunnel ($key, $cert, $ca) {
$ednp_server_connect = $ednp_server['connect']
$ednp_clients = $stunnel['ednp_clients']
+
+
+ include site_config::x509::cert
+ include site_config::x509::key
+ include site_config::x509::ca
+
include x509::variables
- $cert_name = 'leap_couchdb'
- $ca_name = 'leap_ca'
- $ca_path = "${x509::variables::local_CAs}/${ca_name}.crt"
- $cert_path = "${x509::variables::certs}/${cert_name}.crt"
- $key_path = "${x509::variables::keys}/${cert_name}.key"
-
- # basic setup: ensure cert, key, ca files are in place, and some generic
- # stunnel things are done
- class { 'site_stunnel::setup':
- cert_name => $cert_name,
- key => $key,
- cert => $cert,
- ca_name => $ca_name,
- ca => $ca
- }
+ $ca_path = "${x509::variables::local_CAs}/${site_config::params::ca_name}.crt"
+ $cert_path = "${x509::variables::certs}/${site_config::params::cert_name}.crt"
+ $key_path = "${x509::variables::keys}/${site_config::params::cert_name}.key"
# setup a stunnel server for the webapp to connect to couchdb
stunnel::service { 'couch_server':
@@ -46,7 +40,11 @@ class site_couchdb::stunnel ($key, $cert, $ca) {
verify => '2',
pid => '/var/run/stunnel4/couchserver.pid',
rndfile => '/var/lib/stunnel4/.rnd',
- debuglevel => '4'
+ debuglevel => '4',
+ require => [
+ Class['Site_config::X509::Key'],
+ Class['Site_config::X509::Cert'],
+ Class['Site_config::X509::Ca'] ];
}
@@ -62,7 +60,11 @@ class site_couchdb::stunnel ($key, $cert, $ca) {
verify => '2',
pid => '/var/run/stunnel4/epmd_server.pid',
rndfile => '/var/lib/stunnel4/.rnd',
- debuglevel => '4'
+ debuglevel => '4',
+ require => [
+ Class['Site_config::X509::Key'],
+ Class['Site_config::X509::Cert'],
+ Class['Site_config::X509::Ca'] ];
}
# setup stunnel clients for Erlang Port Mapper Daemon (epmd) to connect
@@ -88,7 +90,11 @@ class site_couchdb::stunnel ($key, $cert, $ca) {
verify => '2',
pid => '/var/run/stunnel4/ednp_server.pid',
rndfile => '/var/lib/stunnel4/.rnd',
- debuglevel => '4'
+ debuglevel => '4',
+ require => [
+ Class['Site_config::X509::Key'],
+ Class['Site_config::X509::Cert'],
+ Class['Site_config::X509::Ca'] ];
}
# setup stunnel clients for Erlang Distributed Node Protocol (ednp) to connect
@@ -101,4 +107,6 @@ class site_couchdb::stunnel ($key, $cert, $ca) {
}
create_resources(site_stunnel::clients, $ednp_clients, $ednp_client_defaults)
+
+ include site_check_mk::agent::stunnel
}
diff --git a/puppet/modules/site_haproxy/files/haproxy-stats.cfg b/puppet/modules/site_haproxy/files/haproxy-stats.cfg
new file mode 100644
index 00000000..e6335ba2
--- /dev/null
+++ b/puppet/modules/site_haproxy/files/haproxy-stats.cfg
@@ -0,0 +1,6 @@
+# provide access to stats for the nagios plugin
+listen stats 127.0.0.1:8000
+ mode http
+ stats enable
+ stats uri /haproxy
+
diff --git a/puppet/modules/site_haproxy/manifests/init.pp b/puppet/modules/site_haproxy/manifests/init.pp
index ace88a7b..1a681373 100644
--- a/puppet/modules/site_haproxy/manifests/init.pp
+++ b/puppet/modules/site_haproxy/manifests/init.pp
@@ -2,7 +2,6 @@ class site_haproxy {
class { 'haproxy':
enable => true,
- version => '1.4.23-0.1~leap60+1',
manage_service => true,
global_options => {
'log' => '127.0.0.1 local0',
@@ -23,4 +22,11 @@ class site_haproxy {
}
}
+ # monitor haproxy
+ concat::fragment { 'stats':
+ target => '/etc/haproxy/haproxy.cfg',
+ order => '90',
+ source => 'puppet:///modules/site_haproxy/haproxy-stats.cfg';
+ }
+ include site_check_mk::agent::haproxy
}
diff --git a/puppet/modules/site_mx/manifests/couchdb.pp b/puppet/modules/site_mx/manifests/couchdb.pp
new file mode 100644
index 00000000..b1f3bd02
--- /dev/null
+++ b/puppet/modules/site_mx/manifests/couchdb.pp
@@ -0,0 +1,23 @@
+class site_mx::couchdb {
+
+ $stunnel = hiera('stunnel')
+ $couch_client = $stunnel['couch_client']
+ $couch_client_connect = $couch_client['connect']
+
+ include x509::variables
+ $ca_path = "${x509::variables::local_CAs}/${site_config::params::ca_name}.crt"
+ $cert_path = "${x509::variables::certs}/${site_config::params::cert_name}.crt"
+ $key_path = "${x509::variables::keys}/${site_config::params::cert_name}.key"
+
+ include site_stunnel
+
+ $couchdb_stunnel_client_defaults = {
+ 'connect_port' => $couch_client_connect,
+ 'client' => true,
+ 'cafile' => $ca_path,
+ 'key' => $key_path,
+ 'cert' => $cert_path,
+ }
+
+ create_resources(site_stunnel::clients, $couch_client, $couchdb_stunnel_client_defaults)
+}
diff --git a/puppet/modules/site_mx/manifests/haproxy.pp b/puppet/modules/site_mx/manifests/haproxy.pp
new file mode 100644
index 00000000..988eeaf3
--- /dev/null
+++ b/puppet/modules/site_mx/manifests/haproxy.pp
@@ -0,0 +1,14 @@
+class site_mx::haproxy {
+
+ include site_haproxy
+
+ $haproxy = hiera('haproxy')
+ $local_ports = $haproxy['local_ports']
+
+ # Template uses $global_options, $defaults_options
+ concat::fragment { 'leap_haproxy_webapp_couchdb':
+ target => '/etc/haproxy/haproxy.cfg',
+ order => '20',
+ content => template('site_webapp/haproxy_couchdb.cfg.erb'),
+ }
+}
diff --git a/puppet/modules/site_mx/manifests/init.pp b/puppet/modules/site_mx/manifests/init.pp
new file mode 100644
index 00000000..3949c787
--- /dev/null
+++ b/puppet/modules/site_mx/manifests/init.pp
@@ -0,0 +1,19 @@
+class site_mx {
+ tag 'leap_service'
+ Class['site_config::default'] -> Class['site_mx']
+
+ include site_config::x509::cert
+ include site_config::x509::key
+ include site_config::x509::ca
+ include site_config::x509::client_ca::ca
+ include site_config::x509::client_ca::key
+
+
+ include site_postfix::mx
+ include site_mx::haproxy
+ include site_shorewall::mx
+ include site_shorewall::service::smtp
+ include site_mx::couchdb
+ include leap_mx
+ include site_check_mk::agent::mx
+}
diff --git a/puppet/modules/site_nagios/files/configs/Debian/nagios.cfg b/puppet/modules/site_nagios/files/configs/Debian/nagios.cfg
index 753d1610..e46ebf62 100644
--- a/puppet/modules/site_nagios/files/configs/Debian/nagios.cfg
+++ b/puppet/modules/site_nagios/files/configs/Debian/nagios.cfg
@@ -1,6 +1,6 @@
##############################################################################
#
-# NAGIOS.CFG - Sample Main Config File for Nagios
+# NAGIOS.CFG - Sample Main Config File for Nagios
#
#
##############################################################################
@@ -8,7 +8,7 @@
# LOG FILE
# This is the main log file where service and host events are logged
-# for historical purposes. This should be the first option specified
+# for historical purposes. This should be the first option specified
# in the config file!!!
log_file=/var/log/nagios3/nagios.log
@@ -25,6 +25,9 @@ log_file=/var/log/nagios3/nagios.log
# Puppet-managed configuration files
cfg_dir=/etc/nagios3/conf.d
+# check-mk managed configuration files
+cfg_dir=/etc/nagios3/local
+
# Debian also defaults to using the check commands defined by the debian
# nagios-plugins package
cfg_dir=/etc/nagios-plugins/config
@@ -33,7 +36,7 @@ cfg_dir=/etc/nagios-plugins/config
# OBJECT CACHE FILE
# This option determines where object definitions are cached when
-# Nagios starts/restarts. The CGIs read object definitions from
+# Nagios starts/restarts. The CGIs read object definitions from
# this cache file (rather than looking at the object config files
# directly) in order to prevent inconsistencies that can occur
# when the config files are modified after Nagios starts.
@@ -49,7 +52,7 @@ object_cache_file=/var/cache/nagios3/objects.cache
# file. You can then start Nagios with the -u option to have it read
# object definitions from this precached file, rather than the standard
# object configuration files (see the cfg_file and cfg_dir options above).
-# Using a precached object file can speed up the time needed to (re)start
+# Using a precached object file can speed up the time needed to (re)start
# the Nagios process if you've got a large and/or complex configuration.
# Read the documentation section on optimizing Nagios to find our more
# about how this feature works.
@@ -83,7 +86,7 @@ status_file=/var/cache/nagios3/status.dat
# STATUS FILE UPDATE INTERVAL
# This option determines the frequency (in seconds) that
-# Nagios will periodically dump program, host, and
+# Nagios will periodically dump program, host, and
# service status data.
status_update_interval=10
@@ -91,7 +94,7 @@ status_update_interval=10
# NAGIOS USER
-# This determines the effective user that Nagios should run as.
+# This determines the effective user that Nagios should run as.
# You can either supply a username or a UID.
nagios_user=nagios
@@ -99,7 +102,7 @@ nagios_user=nagios
# NAGIOS GROUP
-# This determines the effective group that Nagios should run as.
+# This determines the effective group that Nagios should run as.
# You can either supply a group name or a GID.
nagios_group=nagios
@@ -125,7 +128,7 @@ check_external_commands=1
# Nagios to check for external commands every minute. If you specify a
# number followed by an "s" (i.e. 15s), this will be interpreted to mean
# actual seconds rather than a multiple of the interval_length variable.
-# Note: In addition to reading the external command file at regularly
+# Note: In addition to reading the external command file at regularly
# scheduled intervals, Nagios will also check for external commands after
# event handlers are executed.
# NOTE: Setting this value to -1 causes Nagios to check the external
@@ -140,7 +143,7 @@ command_check_interval=-1
# This is the file that Nagios checks for external command requests.
# It is also where the command CGI will write commands that are submitted
# by users, so it must be writeable by the user that the web server
-# is running as (usually 'nobody'). Permissions should be set at the
+# is running as (usually 'nobody'). Permissions should be set at the
# directory level instead of on the file, as the file is deleted every
# time its contents are processed.
# Debian Users: In case you didn't read README.Debian yet, _NOW_ is the
@@ -152,9 +155,9 @@ command_file=/var/lib/nagios3/rw/nagios.cmd
# EXTERNAL COMMAND BUFFER SLOTS
# This settings is used to tweak the number of items or "slots" that
-# the Nagios daemon should allocate to the buffer that holds incoming
-# external commands before they are processed. As external commands
-# are processed by the daemon, they are removed from the buffer.
+# the Nagios daemon should allocate to the buffer that holds incoming
+# external commands before they are processed. As external commands
+# are processed by the daemon, they are removed from the buffer.
external_command_buffer_slots=4096
@@ -232,12 +235,12 @@ event_broker_options=-1
# w = Weekly rotation (midnight on Saturday evening)
# m = Monthly rotation (midnight last day of month)
-log_rotation_method=d
+log_rotation_method=n
# LOG ARCHIVE PATH
-# This is the directory where archived (rotated) log files should be
+# This is the directory where archived (rotated) log files should be
# placed (assuming you've chosen to do log rotation).
log_archive_path=/var/log/nagios3/archives
@@ -248,7 +251,7 @@ log_archive_path=/var/log/nagios3/archives
# If you want messages logged to the syslog facility, as well as the
# Nagios log file set this option to 1. If not, set it to 0.
-use_syslog=1
+use_syslog=0
@@ -400,7 +403,7 @@ max_host_check_spread=30
# MAXIMUM CONCURRENT SERVICE CHECKS
-# This option allows you to specify the maximum number of
+# This option allows you to specify the maximum number of
# service checks that can be run in parallel at any given time.
# Specifying a value of 1 for this variable essentially prevents
# any service checks from being parallelized. A value of 0
@@ -422,7 +425,7 @@ check_result_reaper_frequency=10
# MAX CHECK RESULT REAPER TIME
# This is the max amount of time (in seconds) that a single
-# check result reaper event will be allowed to run before
+# check result reaper event will be allowed to run before
# returning control back to Nagios so it can perform other
# duties.
@@ -436,7 +439,7 @@ max_check_result_reaper_time=30
# service checks that have not yet been processed.
#
# Note: Make sure that only one instance of Nagios has access
-# to this directory!
+# to this directory!
check_result_path=/var/lib/nagios3/spool/checkresults
@@ -445,7 +448,7 @@ check_result_path=/var/lib/nagios3/spool/checkresults
# MAX CHECK RESULT FILE AGE
# This option determines the maximum age (in seconds) which check
-# result files are considered to be valid. Files older than this
+# result files are considered to be valid. Files older than this
# threshold will be mercilessly deleted without further processing.
max_check_result_file_age=3600
@@ -507,14 +510,14 @@ enable_predictive_service_dependency_checks=1
# SOFT STATE DEPENDENCIES
-# This option determines whether or not Nagios will use soft state
-# information when checking host and service dependencies. Normally
-# Nagios will only use the latest hard host or service state when
+# This option determines whether or not Nagios will use soft state
+# information when checking host and service dependencies. Normally
+# Nagios will only use the latest hard host or service state when
# checking dependencies. If you want it to use the latest state (regardless
-# of whether its a soft or hard state type), enable this option.
+# of whether its a soft or hard state type), enable this option.
# Values:
-# 0 = Don't use soft state dependencies (default)
-# 1 = Use soft state dependencies
+# 0 = Don't use soft state dependencies (default)
+# 1 = Use soft state dependencies
soft_state_dependencies=0
@@ -532,7 +535,7 @@ soft_state_dependencies=0
# This option determines whether or not Nagios will attempt to
# automatically reschedule active host and service checks to
# "smooth" them out over time. This can help balance the load on
-# the monitoring server.
+# the monitoring server.
# WARNING: THIS IS AN EXPERIMENTAL FEATURE - IT CAN DEGRADE
# PERFORMANCE, RATHER THAN INCREASE IT, IF USED IMPROPERLY
@@ -595,7 +598,7 @@ perfdata_timeout=5
# This setting determines whether or not Nagios will save state
# information for services and hosts before it shuts down. Upon
# startup Nagios will reload all saved service and host state
-# information before starting to monitor. This is useful for
+# information before starting to monitor. This is useful for
# maintaining long-term data on state statistics, etc, but will
# slow Nagios down a bit when it (re)starts. Since its only
# a one-time penalty, I think its well worth the additional
@@ -607,7 +610,7 @@ retain_state_information=1
# STATE RETENTION FILE
# This is the file that Nagios should use to store host and
-# service state information before it shuts down. The state
+# service state information before it shuts down. The state
# information in this file is also read immediately prior to
# starting to monitor the network when Nagios is restarted.
# This file is used only if the preserve_state_information
@@ -630,7 +633,7 @@ retention_update_interval=60
# USE RETAINED PROGRAM STATE
-# This setting determines whether or not Nagios will set
+# This setting determines whether or not Nagios will set
# program status variables based on the values saved in the
# retention file. If you want to use retained program status
# information, set this value to 1. If not, set this value
@@ -657,7 +660,7 @@ use_retained_scheduling_info=1
# program restarts.
#
# The values of the masks are bitwise ANDs of values specified
-# by the "MODATTR_" definitions found in include/common.h.
+# by the "MODATTR_" definitions found in include/common.h.
# For example, if you do not want the current enabled/disabled state
# of flap detection and event handlers for hosts to be retained, you
# would use a value of 24 for the host attribute mask...
@@ -708,7 +711,7 @@ use_aggressive_host_checking=0
# SERVICE CHECK EXECUTION OPTION
# This determines whether or not Nagios will actively execute
-# service checks when it initially starts. If this option is
+# service checks when it initially starts. If this option is
# disabled, checks are not actively made, but Nagios can still
# receive and process passive check results that come in. Unless
# you're implementing redundant hosts or have a special need for
@@ -730,7 +733,7 @@ accept_passive_service_checks=1
# HOST CHECK EXECUTION OPTION
# This determines whether or not Nagios will actively execute
-# host checks when it initially starts. If this option is
+# host checks when it initially starts. If this option is
# disabled, checks are not actively made, but Nagios can still
# receive and process passive check results that come in. Unless
# you're implementing redundant hosts or have a special need for
@@ -787,7 +790,7 @@ process_performance_data=0
# These commands are run after every host and service check is
# performed. These commands are executed only if the
# enable_performance_data option (above) is set to 1. The command
-# argument is the short name of a command definition that you
+# argument is the short name of a command definition that you
# define in your host configuration file. Read the HTML docs for
# more information on performance data.
@@ -867,7 +870,7 @@ obsess_over_services=0
# OBSESSIVE COMPULSIVE SERVICE PROCESSOR COMMAND
# This is the command that is run for every service check that is
# processed by Nagios. This command is executed only if the
-# obsess_over_services option (above) is set to 1. The command
+# obsess_over_services option (above) is set to 1. The command
# argument is the short name of a command definition that you
# define in your host configuration file. Read the HTML docs for
# more information on implementing distributed monitoring.
@@ -891,7 +894,7 @@ obsess_over_hosts=0
# OBSESSIVE COMPULSIVE HOST PROCESSOR COMMAND
# This is the command that is run for every host check that is
# processed by Nagios. This command is executed only if the
-# obsess_over_hosts option (above) is set to 1. The command
+# obsess_over_hosts option (above) is set to 1. The command
# argument is the short name of a command definition that you
# define in your host configuration file. Read the HTML docs for
# more information on implementing distributed monitoring.
@@ -930,9 +933,9 @@ passive_host_checks_are_soft=0
# ORPHANED HOST/SERVICE CHECK OPTIONS
-# These options determine whether or not Nagios will periodically
+# These options determine whether or not Nagios will periodically
# check for orphaned host service checks. Since service checks are
-# not rescheduled until the results of their previous execution
+# not rescheduled until the results of their previous execution
# instance are processed, there exists a possibility that some
# checks may never get rescheduled. A similar situation exists for
# host checks, although the exact scheduling details differ a bit
@@ -1000,9 +1003,9 @@ additional_freshness_latency=15
# FLAP DETECTION OPTION
# This option determines whether or not Nagios will try
-# and detect hosts and services that are "flapping".
+# and detect hosts and services that are "flapping".
# Flapping occurs when a host or service changes between
-# states too frequently. When Nagios detects that a
+# states too frequently. When Nagios detects that a
# host or service is flapping, it will temporarily suppress
# notifications for that host/service until it stops
# flapping. Flap detection is very experimental, so read
@@ -1046,7 +1049,7 @@ date_format=iso8601
# the system configured timezone.
#
# NOTE: In order to display the correct timezone in the CGIs, you
-# will also need to alter the Apache directives for the CGI path
+# will also need to alter the Apache directives for the CGI path
# to include your timezone. Example:
#
# <Directory "/usr/local/nagios/sbin/">
@@ -1083,7 +1086,7 @@ enable_embedded_perl=1
# This option determines whether or not Nagios will process Perl plugins
# and scripts with the embedded Perl interpreter if the plugins/scripts
# do not explicitly indicate whether or not it is okay to do so. Read
-# the HTML documentation on the embedded Perl interpreter for more
+# the HTML documentation on the embedded Perl interpreter for more
# information on how this option works.
use_embedded_perl_implicitly=1
@@ -1130,7 +1133,7 @@ use_regexp_matching=0
# "TRUE" REGULAR EXPRESSION MATCHING
-# This option controls whether or not "true" regular expression
+# This option controls whether or not "true" regular expression
# matching takes place in the object config files. This option
# only has an effect if regular expression matching is enabled
# (see above). If this option is DISABLED, regular expression
@@ -1183,7 +1186,7 @@ use_large_installation_tweaks=0
# This option determines whether or not Nagios will make all standard
# macros available as environment variables when host/service checks
# and system commands (event handlers, notifications, etc.) are
-# executed. Enabling this option can cause performance issues in
+# executed. Enabling this option can cause performance issues in
# large installations, as it will consume a bit more memory and (more
# importantly) consume more CPU.
# Values: 1 - Enable environment variable macros (default)
@@ -1224,7 +1227,7 @@ enable_environment_macros=1
# This option determines how much (if any) debugging information will
# be written to the debug file. OR values together to log multiple
# types of information.
-# Values:
+# Values:
# -1 = Everything
# 0 = Nothing
# 1 = Functions
diff --git a/puppet/modules/site_nagios/manifests/add_host.pp b/puppet/modules/site_nagios/manifests/add_host.pp
deleted file mode 100644
index 498552b5..00000000
--- a/puppet/modules/site_nagios/manifests/add_host.pp
+++ /dev/null
@@ -1,31 +0,0 @@
-define site_nagios::add_host {
- $nagios_host = $name
- $nagios_hostname = $name['domain_internal']
- $nagios_ip = $name['ip_address']
- $nagios_services = $name['services']
- $nagios_openvpn_gw = $name['openvpn_gateway_address']
-
- # Add Nagios host
- nagios_host { $nagios_hostname:
- address => $nagios_ip,
- use => 'generic-host',
- }
-
- # Add Nagios service
-
- # First, we need to turn the serice array into hash, using a "hash template"
- # see https://github.com/ashak/puppet-resource-looping
- $nagios_service_hashpart = {
- 'hostname' => $nagios_hostname,
- 'ip_address' => $nagios_ip,
- 'openvpn_gw' => $nagios_openvpn_gw,
- }
- $dynamic_parameters = {
- 'service' => '%s'
- }
- $nagios_servicename = "${nagios_hostname}_%s"
-
- $nagios_service_hash = create_resources_hash_from($nagios_servicename, $nagios_services, $nagios_service_hashpart, $dynamic_parameters)
-
- create_resources ( site_nagios::add_service, $nagios_service_hash )
-}
diff --git a/puppet/modules/site_nagios/manifests/add_host_services.pp b/puppet/modules/site_nagios/manifests/add_host_services.pp
new file mode 100644
index 00000000..279809d1
--- /dev/null
+++ b/puppet/modules/site_nagios/manifests/add_host_services.pp
@@ -0,0 +1,28 @@
+define site_nagios::add_host_services (
+ $domain_full_suffix,
+ $domain_internal,
+ $ip_address,
+ $services,
+ $ssh_port,
+ $openvpn_gateway_address='' ) {
+
+ $nagios_hostname = $domain_internal
+
+ # Add Nagios service
+
+ # First, we need to turn the serice array into hash, using a "hash template"
+ # see https://github.com/ashak/puppet-resource-looping
+ $nagios_service_hashpart = {
+ 'hostname' => $nagios_hostname,
+ 'ip_address' => $ip_address,
+ 'openvpn_gw' => $openvpn_gateway_address,
+ }
+ $dynamic_parameters = {
+ 'service' => '%s'
+ }
+ $nagios_servicename = "${nagios_hostname}_%s"
+
+ $nagios_service_hash = create_resources_hash_from($nagios_servicename, $services, $nagios_service_hashpart, $dynamic_parameters)
+
+ create_resources ( site_nagios::add_service, $nagios_service_hash )
+}
diff --git a/puppet/modules/site_nagios/manifests/add_service.pp b/puppet/modules/site_nagios/manifests/add_service.pp
index 6ef3cbf5..8d2a310b 100644
--- a/puppet/modules/site_nagios/manifests/add_service.pp
+++ b/puppet/modules/site_nagios/manifests/add_service.pp
@@ -3,19 +3,19 @@ define site_nagios::add_service (
case $service {
'webapp': {
- $check_command = 'check_https_cert'
- $service_description = 'Website Certificate'
+ nagios_service {
+ "${name}_cert":
+ use => 'generic-service',
+ check_command => 'check_https_cert',
+ service_description => 'Website Certificate',
+ host_name => $hostname;
+ "${name}_website":
+ use => 'generic-service',
+ check_command => 'check_https',
+ service_description => 'Website',
+ host_name => $hostname
+ }
}
- default: {
- #notice ("No Nagios service check for service \"$service\"")
- }
- }
-
- if ( $check_command != '' ) {
- nagios_service { $name:
- use => 'generic-service',
- check_command => $check_command,
- service_description => $service_description,
- host_name => $hostname }
+ default: {}
}
}
diff --git a/puppet/modules/site_nagios/manifests/init.pp b/puppet/modules/site_nagios/manifests/init.pp
index cab32905..eb08cdcb 100644
--- a/puppet/modules/site_nagios/manifests/init.pp
+++ b/puppet/modules/site_nagios/manifests/init.pp
@@ -1,4 +1,6 @@
class site_nagios {
tag 'leap_service'
+ Class['site_config::default'] -> Class['site_nagios']
+
include site_nagios::server
}
diff --git a/puppet/modules/site_nagios/manifests/server.pp b/puppet/modules/site_nagios/manifests/server.pp
index c114a39a..85443917 100644
--- a/puppet/modules/site_nagios/manifests/server.pp
+++ b/puppet/modules/site_nagios/manifests/server.pp
@@ -1,26 +1,34 @@
class site_nagios::server inherits nagios::base {
# First, purge old nagios config (see #1467)
- class { 'site_nagios::server::purge':
- stage => setup
- }
+ class { 'site_nagios::server::purge': }
- $nagios_hiera=hiera('nagios')
+ $nagios_hiera = hiera('nagios')
$nagiosadmin_pw = htpasswd_sha1($nagios_hiera['nagiosadmin_pw'])
- $hosts = $nagios_hiera['hosts']
+ $nagios_hosts = $nagios_hiera['hosts']
include nagios::defaults
include nagios::base
- #Class ['nagios'] -> Class ['nagios::defaults']
- class {'nagios::apache':
+ class {'nagios':
+ # don't manage apache class from nagios, cause we already include
+ # it in site_apache::common
+ httpd => 'absent',
allow_external_cmd => true,
stored_config => false,
- #before => Class ['nagios::defaults']
}
+ file { '/etc/apache2/conf.d/nagios3.conf':
+ ensure => link,
+ target => '/usr/share/doc/nagios3-common/examples/apache2.conf',
+ notify => Service['apache']
+ }
+
+ include site_apache::common
+ include site_apache::module::headers
+
File ['nagios_htpasswd'] {
source => undef,
- content => "nagiosadmin:$nagiosadmin_pw",
+ content => "nagiosadmin:${nagiosadmin_pw}",
mode => '0640',
}
@@ -33,6 +41,18 @@ class site_nagios::server inherits nagios::base {
group => 'nagios',
}
- site_nagios::add_host {$hosts:}
+ create_resources ( site_nagios::add_host_services, $nagios_hosts )
+
+ include site_nagios::server::apache
+ include site_check_mk::server
include site_shorewall::monitor
+
+ augeas {
+ 'logrotate_nagios':
+ context => '/files/etc/logrotate.d/nagios/rule',
+ changes => [ 'set file /var/log/nagios3/nagios.log', 'set rotate 7',
+ 'set schedule daily', 'set compress compress',
+ 'set missingok missingok', 'set ifempty notifempty',
+ 'set copytruncate copytruncate' ]
+ }
}
diff --git a/puppet/modules/site_nagios/manifests/server/apache.pp b/puppet/modules/site_nagios/manifests/server/apache.pp
new file mode 100644
index 00000000..8dbc7e9b
--- /dev/null
+++ b/puppet/modules/site_nagios/manifests/server/apache.pp
@@ -0,0 +1,7 @@
+class site_nagios::server::apache {
+ include x509::variables
+ include site_config::x509::commercial::cert
+ include site_config::x509::commercial::key
+ include site_config::x509::commercial::ca
+
+}
diff --git a/puppet/modules/site_nagios/manifests/server/purge.pp b/puppet/modules/site_nagios/manifests/server/purge.pp
index 39735cd3..6815a703 100644
--- a/puppet/modules/site_nagios/manifests/server/purge.pp
+++ b/puppet/modules/site_nagios/manifests/server/purge.pp
@@ -1,7 +1,19 @@
-class site_nagios::server::purge {
- exec {'purge_conf.d':
- command => '/bin/rm -rf /etc/nagios3/conf.d/*',
- onlyif => 'test -e /etc/nagios3/conf.d'
+class site_nagios::server::purge inherits nagios::base {
+ # we don't want to get /etc/nagios3 and /etc/nagios3/conf.d
+ # purged, cause the check-mk-config-nagios3 package
+ # places its templates in /etc/nagios3/conf.d/check_mk,
+ # and check_mk -O updated it's nagios config in /etc/nagios3/conf.d/check_mk
+ File['nagios_cfgdir'] {
+ purge => false
+ }
+ File['nagios_confd'] {
+ purge => false
}
+ # only purge files in the /etc/nagios3/conf.d/ dir, not in any subdir
+ exec {'purge_conf.d':
+ command => '/usr/bin/find /etc/nagios3/conf.d/ -maxdepth 1 -type f -exec rm {} \;',
+ onlyif => '/usr/bin/find /etc/nagios3/conf.d/ -maxdepth 1 -type f | grep -q "/etc/nagios3/conf.d"',
+ require => Package['nagios']
+ }
}
diff --git a/puppet/modules/site_nickserver/manifests/init.pp b/puppet/modules/site_nickserver/manifests/init.pp
index 7dfa2603..eaf90d55 100644
--- a/puppet/modules/site_nickserver/manifests/init.pp
+++ b/puppet/modules/site_nickserver/manifests/init.pp
@@ -1,37 +1,47 @@
#
-# TODO: currently, this is dependent on some things that are set up in site_webapp
+# TODO: currently, this is dependent on some things that are set up in
+# site_webapp
#
# (1) HAProxy -> couchdb
# (2) Apache
#
-# It would be good in the future to make nickserver installable independently of site_webapp.
+# It would be good in the future to make nickserver installable independently of
+# site_webapp.
#
class site_nickserver {
tag 'leap_service'
- include site_config::ruby
+ Class['site_config::default'] -> Class['site_nickserver']
+
+ include site_config::ruby::dev
#
# VARIABLES
#
$nickserver = hiera('nickserver')
- $nickserver_port = $nickserver['port'] # the port that public connects to (should be 6425)
- $nickserver_local_port = '64250' # the port that nickserver is actually running on
$nickserver_domain = $nickserver['domain']
+ $couchdb_user = $nickserver['couchdb_nickserver_user']['username']
+ $couchdb_password = $nickserver['couchdb_nickserver_user']['password']
+
+ # the port that public connects to (should be 6425)
+ $nickserver_port = $nickserver['port']
+ # the port that nickserver is actually running on
+ $nickserver_local_port = '64250'
- $couchdb_user = $nickserver['couchdb_user']['username']
- $couchdb_password = $nickserver['couchdb_user']['password']
- $couchdb_host = 'localhost' # couchdb is available on localhost via haproxy, which is bound to 4096.
- $couchdb_port = '4096' # See site_webapp/templates/haproxy_couchdb.cfg.erg
+ # couchdb is available on localhost via haproxy, which is bound to 4096.
+ $couchdb_host = 'localhost'
+ # See site_webapp/templates/haproxy_couchdb.cfg.erg
+ $couchdb_port = '4096'
# temporarily for now:
$domain = hiera('domain')
$address_domain = $domain['full_suffix']
- $x509 = hiera('x509')
- $x509_key = $x509['key']
- $x509_cert = $x509['cert']
- $x509_ca = $x509['ca_cert']
+
+
+ include site_config::x509::cert
+ include site_config::x509::key
+ include site_config::x509::ca
#
# USER AND GROUP
@@ -41,6 +51,7 @@ class site_nickserver {
ensure => present,
allowdupe => false;
}
+
user { 'nickserver':
ensure => present,
allowdupe => false,
@@ -50,31 +61,33 @@ class site_nickserver {
}
#
- # NICKSERVER CODE
- # NOTE: in order to support TLS, libssl-dev must be installed before EventMachine gem
- # is built/installed.
+ # NICKSERVER CODE NOTE: in order to support TLS, libssl-dev must be installed
+ # before EventMachine gem is built/installed.
#
- package {
- 'libssl-dev': ensure => installed;
- }
+ package { 'libssl-dev': ensure => installed }
+
vcsrepo { '/srv/leap/nickserver':
ensure => present,
revision => 'origin/master',
provider => git,
- source => 'git://code.leap.se/nickserver',
+ source => 'https://leap.se/git/nickserver',
owner => 'nickserver',
group => 'nickserver',
require => [ User['nickserver'], Group['nickserver'] ],
notify => Exec['nickserver_bundler_update'];
}
+
exec { 'nickserver_bundler_update':
cwd => '/srv/leap/nickserver',
command => '/bin/bash -c "/usr/bin/bundle check || /usr/bin/bundle install --path vendor/bundle"',
unless => '/usr/bin/bundle check',
user => 'nickserver',
timeout => 600,
- require => [ Class['bundler::install'], Vcsrepo['/srv/leap/nickserver'], Package['libssl-dev'] ],
+ require => [
+ Class['bundler::install'], Vcsrepo['/srv/leap/nickserver'],
+ Package['libssl-dev'], Class['site_config::ruby::dev'] ],
+
notify => Service['nickserver'];
}
@@ -82,7 +95,7 @@ class site_nickserver {
# NICKSERVER CONFIG
#
- file { '/etc/leap/nickserver.yml':
+ file { '/etc/nickserver.yml':
content => template('site_nickserver/nickserver.yml.erb'),
owner => nickserver,
group => nickserver,
@@ -99,8 +112,11 @@ class site_nickserver {
ensure => link,
target => '/srv/leap/nickserver/bin/nickserver',
require => Vcsrepo['/srv/leap/nickserver'];
+
'/etc/init.d/nickserver':
- owner => root, group => 0, mode => '0755',
+ owner => root,
+ group => 0,
+ mode => '0755',
source => '/srv/leap/nickserver/dist/debian-init-script',
require => Vcsrepo['/srv/leap/nickserver'];
}
@@ -110,7 +126,11 @@ class site_nickserver {
enable => true,
hasrestart => true,
hasstatus => true,
- require => File['/etc/init.d/nickserver'];
+ require => [
+ File['/etc/init.d/nickserver'],
+ Class['Site_config::X509::Key'],
+ Class['Site_config::X509::Cert'],
+ Class['Site_config::X509::Ca'] ];
}
#
@@ -119,7 +139,7 @@ class site_nickserver {
#
file { '/etc/shorewall/macro.nickserver':
- content => "PARAM - - tcp $nickserver_port",
+ content => "PARAM - - tcp ${nickserver_port}",
notify => Service['shorewall'],
require => Package['shorewall'];
}
@@ -142,21 +162,8 @@ class site_nickserver {
}
apache::vhost::file {
- 'nickserver': content => template('site_nickserver/nickserver-proxy.conf.erb')
- }
-
- x509::key { 'nickserver':
- content => $x509_key,
- notify => Service[apache];
+ 'nickserver':
+ content => template('site_nickserver/nickserver-proxy.conf.erb')
}
- x509::cert { 'nickserver':
- content => $x509_cert,
- notify => Service[apache];
- }
-
- x509::ca { 'nickserver':
- content => $x509_ca,
- notify => Service[apache];
- }
-} \ No newline at end of file
+}
diff --git a/puppet/modules/site_nickserver/templates/nickserver-proxy.conf.erb b/puppet/modules/site_nickserver/templates/nickserver-proxy.conf.erb
index 67896cd3..ae06410e 100644
--- a/puppet/modules/site_nickserver/templates/nickserver-proxy.conf.erb
+++ b/puppet/modules/site_nickserver/templates/nickserver-proxy.conf.erb
@@ -14,9 +14,9 @@ Listen 0.0.0.0:<%= @nickserver_port -%>
SSLHonorCipherOrder on
SSLCACertificatePath /etc/ssl/certs
- SSLCertificateChainFile /etc/ssl/certs/nickserver.pem
- SSLCertificateKeyFile /etc/x509/keys/nickserver.key
- SSLCertificateFile /etc/x509/certs/nickserver.crt
+ SSLCertificateChainFile <%= scope.lookupvar('x509::variables::local_CAs') %>/<%= scope.lookupvar('site_config::params::ca_name') %>.crt
+ SSLCertificateKeyFile <%= scope.lookupvar('x509::variables::keys') %>/<%= scope.lookupvar('site_config::params::cert_name') %>.key
+ SSLCertificateFile <%= scope.lookupvar('x509::variables::certs') %>/<%= scope.lookupvar('site_config::params::cert_name') %>.crt
ProxyPass / http://localhost:<%= @nickserver_local_port %>/
ProxyPreserveHost On # preserve Host header in HTTP request
diff --git a/puppet/modules/site_nickserver/templates/nickserver.yml.erb b/puppet/modules/site_nickserver/templates/nickserver.yml.erb
index 7aab5605..e717cbaa 100644
--- a/puppet/modules/site_nickserver/templates/nickserver.yml.erb
+++ b/puppet/modules/site_nickserver/templates/nickserver.yml.erb
@@ -6,7 +6,7 @@ domain: "<%= @address_domain %>"
couch_host: "<%= @couchdb_host %>"
couch_port: <%= @couchdb_port %>
-couch_database: "users"
+couch_database: "identities"
couch_user: "<%= @couchdb_user %>"
couch_password: "<%= @couchdb_password %>"
diff --git a/puppet/modules/site_openvpn/manifests/dh_key.pp b/puppet/modules/site_openvpn/manifests/dh_key.pp
new file mode 100644
index 00000000..13cc0f5b
--- /dev/null
+++ b/puppet/modules/site_openvpn/manifests/dh_key.pp
@@ -0,0 +1,10 @@
+class site_openvpn::dh_key {
+
+ $x509_config = hiera('x509')
+
+ file { '/etc/openvpn/keys/dh.pem':
+ content => $x509_config['dh'],
+ mode => '0644',
+ }
+
+}
diff --git a/puppet/modules/site_openvpn/manifests/init.pp b/puppet/modules/site_openvpn/manifests/init.pp
index 4f900623..7aec0faa 100644
--- a/puppet/modules/site_openvpn/manifests/init.pp
+++ b/puppet/modules/site_openvpn/manifests/init.pp
@@ -5,8 +5,9 @@
# (2) unlimited only
# (3) limited only
#
-# The difference is that 'unlimited' gateways only allow client certs that match the 'unlimited_prefix',
-# and 'limited' gateways only allow certs that match the 'limited_prefix'.
+# The difference is that 'unlimited' gateways only allow client certs that match
+# the 'unlimited_prefix', and 'limited' gateways only allow certs that match the
+# 'limited_prefix'.
#
# We potentially create four openvpn config files (thus four daemons):
#
@@ -19,23 +20,30 @@
class site_openvpn {
tag 'leap_service'
- $openvpn_config = hiera('openvpn')
- $x509_config = hiera('x509')
- $openvpn_ports = $openvpn_config['ports']
+ include site_config::x509::cert
+ include site_config::x509::key
+ include site_config::x509::ca_bundle
+
+
+ Class['site_config::default'] -> Class['site_openvpn']
+
+ $openvpn = hiera('openvpn')
+ $openvpn_ports = $openvpn['ports']
+ $openvpn_config = $openvpn['configuration']
if $::ec2_instance_id {
$openvpn_gateway_address = $::ipaddress
} else {
- $openvpn_gateway_address = $openvpn_config['gateway_address']
- if $openvpn_config['second_gateway_address'] {
- $openvpn_second_gateway_address = $openvpn_config['second_gateway_address']
+ $openvpn_gateway_address = $openvpn['gateway_address']
+ if $openvpn['second_gateway_address'] {
+ $openvpn_second_gateway_address = $openvpn['second_gateway_address']
} else {
$openvpn_second_gateway_address = undef
}
}
- $openvpn_allow_unlimited = $openvpn_config['allow_unlimited']
- $openvpn_unlimited_prefix = $openvpn_config['unlimited_prefix']
+ $openvpn_allow_unlimited = $openvpn['allow_unlimited']
+ $openvpn_unlimited_prefix = $openvpn['unlimited_prefix']
$openvpn_unlimited_tcp_network_prefix = '10.41.0'
$openvpn_unlimited_tcp_netmask = '255.255.248.0'
$openvpn_unlimited_tcp_cidr = '21'
@@ -44,9 +52,9 @@ class site_openvpn {
$openvpn_unlimited_udp_cidr = '21'
if !$::ec2_instance_id {
- $openvpn_allow_limited = $openvpn_config['allow_limited']
- $openvpn_limited_prefix = $openvpn_config['limited_prefix']
- $openvpn_rate_limit = $openvpn_config['rate_limit']
+ $openvpn_allow_limited = $openvpn['allow_limited']
+ $openvpn_limited_prefix = $openvpn['limited_prefix']
+ $openvpn_rate_limit = $openvpn['rate_limit']
$openvpn_limited_tcp_network_prefix = '10.43.0'
$openvpn_limited_tcp_netmask = '255.255.248.0'
$openvpn_limited_tcp_cidr = '21'
@@ -55,8 +63,14 @@ class site_openvpn {
$openvpn_limited_udp_cidr = '21'
}
- # deploy ca + server keys
- include site_openvpn::keys
+ # find out the netmask in cidr format of the primary IF
+ # thx to https://blog.kumina.nl/tag/puppet-tips-and-tricks/
+ # we can do this using an inline_template:
+ $factname_primary_netmask = "netmask_cidr_${::site_config::params::interface}"
+ $primary_netmask = inline_template('<%= scope.lookupvar(factname_primary_netmask) %>')
+
+ # deploy dh keys
+ include site_openvpn::dh_key
if $openvpn_allow_unlimited and $openvpn_allow_limited {
$unlimited_gateway_address = $openvpn_gateway_address
@@ -77,7 +91,8 @@ class site_openvpn {
tls_remote => "\"${openvpn_unlimited_prefix}\"",
server => "${openvpn_unlimited_tcp_network_prefix}.0 ${openvpn_unlimited_tcp_netmask}",
push => "\"dhcp-option DNS ${openvpn_unlimited_tcp_network_prefix}.1\"",
- management => '127.0.0.1 1000'
+ management => '127.0.0.1 1000',
+ config => $openvpn_config
}
site_openvpn::server_config { 'udp_config':
port => '1194',
@@ -86,11 +101,12 @@ class site_openvpn {
tls_remote => "\"${openvpn_unlimited_prefix}\"",
server => "${openvpn_unlimited_udp_network_prefix}.0 ${openvpn_unlimited_udp_netmask}",
push => "\"dhcp-option DNS ${openvpn_unlimited_udp_network_prefix}.1\"",
- management => '127.0.0.1 1001'
+ management => '127.0.0.1 1001',
+ config => $openvpn_config
}
} else {
- tidy { "/etc/openvpn/tcp_config.conf": }
- tidy { "/etc/openvpn/udp_config.conf": }
+ tidy { '/etc/openvpn/tcp_config.conf': }
+ tidy { '/etc/openvpn/udp_config.conf': }
}
if $openvpn_allow_limited {
@@ -101,7 +117,8 @@ class site_openvpn {
tls_remote => "\"${openvpn_limited_prefix}\"",
server => "${openvpn_limited_tcp_network_prefix}.0 ${openvpn_limited_tcp_netmask}",
push => "\"dhcp-option DNS ${openvpn_limited_tcp_network_prefix}.1\"",
- management => '127.0.0.1 1002'
+ management => '127.0.0.1 1002',
+ config => $openvpn_config
}
site_openvpn::server_config { 'limited_udp_config':
port => '1194',
@@ -110,11 +127,12 @@ class site_openvpn {
tls_remote => "\"${openvpn_limited_prefix}\"",
server => "${openvpn_limited_udp_network_prefix}.0 ${openvpn_limited_udp_netmask}",
push => "\"dhcp-option DNS ${openvpn_limited_udp_network_prefix}.1\"",
- management => '127.0.0.1 1003'
+ management => '127.0.0.1 1003',
+ config => $openvpn_config
}
} else {
- tidy { "/etc/openvpn/limited_tcp_config.conf": }
- tidy { "/etc/openvpn/limited_udp_config.conf": }
+ tidy { '/etc/openvpn/limited_tcp_config.conf': }
+ tidy { '/etc/openvpn/limited_udp_config.conf': }
}
file {
@@ -131,7 +149,12 @@ class site_openvpn {
command => '/etc/init.d/openvpn restart',
refreshonly => true,
subscribe => File['/etc/openvpn'],
- require => [ Package['openvpn'], File['/etc/openvpn'] ];
+ require => [
+ Package['openvpn'],
+ File['/etc/openvpn'],
+ Class['Site_config::X509::Key'],
+ Class['Site_config::X509::Cert'],
+ Class['Site_config::X509::Ca_bundle'] ];
}
cron { 'add_gateway_ips.sh':
@@ -155,7 +178,9 @@ class site_openvpn {
ensure => running,
hasrestart => true,
hasstatus => true,
- require => Exec['concat_/etc/default/openvpn'];
+ require => [
+ Package['openvpn'],
+ Exec['concat_/etc/default/openvpn'] ];
}
file {
@@ -193,4 +218,7 @@ class site_openvpn {
target => '/etc/default/openvpn',
order => 10;
}
+
+ include site_check_mk::agent::openvpn
+
}
diff --git a/puppet/modules/site_openvpn/manifests/keys.pp b/puppet/modules/site_openvpn/manifests/keys.pp
deleted file mode 100644
index f3c5b423..00000000
--- a/puppet/modules/site_openvpn/manifests/keys.pp
+++ /dev/null
@@ -1,51 +0,0 @@
-class site_openvpn::keys {
-
- x509::key {
- 'leap_openvpn':
- content => $site_openvpn::x509_config['key'],
- notify => Service[openvpn];
- }
-
- x509::cert {
- 'leap_openvpn':
- content => $site_openvpn::x509_config['cert'],
- notify => Service[openvpn];
- }
-
- x509::ca {
- 'leap_ca':
- content => $site_openvpn::x509_config['ca_cert'],
- notify => Service[openvpn];
- }
-
- file { '/etc/openvpn/keys/dh.pem':
- content => $site_openvpn::x509_config['dh'],
- mode => '0644',
- }
-
- #
- # CA bundle -- we want to have the possibility of allowing multiple CAs.
- # For now, the reason is to transition to using client CA. In the future,
- # we will want to be able to smoothly phase out one CA and phase in another.
- # I tried "--capath" for this, but it did not work.
- #
-
- concat {
- '/etc/openvpn/ca_bundle.pem':
- owner => root,
- group => root,
- mode => 644,
- warn => true,
- notify => Service['openvpn'];
- }
-
- concat::fragment {
- 'client_ca_cert':
- content => $site_openvpn::x509_config['client_ca_cert'],
- target => '/etc/openvpn/ca_bundle.pem';
- 'ca_cert':
- content => $site_openvpn::x509_config['ca_cert'],
- target => '/etc/openvpn/ca_bundle.pem';
- }
-
-}
diff --git a/puppet/modules/site_openvpn/manifests/resolver.pp b/puppet/modules/site_openvpn/manifests/resolver.pp
index c1bce858..c74fb509 100644
--- a/puppet/modules/site_openvpn/manifests/resolver.pp
+++ b/puppet/modules/site_openvpn/manifests/resolver.pp
@@ -60,25 +60,25 @@ class site_openvpn::resolver {
path => '/etc/unbound/unbound.conf',
line => 'server: include: /etc/unbound/conf.d/vpn_unlimited_tcp_resolver',
notify => Service['unbound'],
- require => Package['unbound'];
+ require => [ Package['openvpn'], Package['unbound'] ];
'add_unlimited_udp_resolver':
ensure => $ensure_unlimited,
path => '/etc/unbound/unbound.conf',
line => 'server: include: /etc/unbound/conf.d/vpn_unlimited_udp_resolver',
notify => Service['unbound'],
- require => Package['unbound'];
+ require => [ Package['openvpn'], Package['unbound'] ];
'add_limited_tcp_resolver':
ensure => $ensure_limited,
path => '/etc/unbound/unbound.conf',
line => 'server: include: /etc/unbound/conf.d/vpn_limited_tcp_resolver',
notify => Service['unbound'],
- require => Package['unbound'];
- 'add_limited_udp_resolver':
+ require => [ Package['openvpn'], Package['unbound'] ];
+ 'add_limited_udp_resolver':
ensure => $ensure_limited,
path => '/etc/unbound/unbound.conf',
line => 'server: include: /etc/unbound/conf.d/vpn_limited_udp_resolver',
notify => Service['unbound'],
- require => Package['unbound']
+ require => [ Package['openvpn'], Package['unbound'] ];
}
}
diff --git a/puppet/modules/site_openvpn/manifests/server_config.pp b/puppet/modules/site_openvpn/manifests/server_config.pp
index 6106cfbb..b1f4997c 100644
--- a/puppet/modules/site_openvpn/manifests/server_config.pp
+++ b/puppet/modules/site_openvpn/manifests/server_config.pp
@@ -54,7 +54,7 @@
define site_openvpn::server_config(
$port, $proto, $local, $server, $push,
- $management, $tls_remote = undef) {
+ $management, $config, $tls_remote = undef) {
$openvpn_configname = $name
@@ -70,97 +70,97 @@ define site_openvpn::server_config(
if $tls_remote != undef {
openvpn::option {
- "tls-remote $openvpn_configname":
- key => 'tls-remote',
- value => $tls_remote,
- server => $openvpn_configname;
+ "tls-remote ${openvpn_configname}":
+ key => 'tls-remote',
+ value => $tls_remote,
+ server => $openvpn_configname;
}
}
openvpn::option {
- "ca $openvpn_configname":
+ "ca ${openvpn_configname}":
key => 'ca',
- value => '/etc/openvpn/ca_bundle.pem',
+ value => "${x509::variables::local_CAs}/${site_config::params::ca_bundle_name}.crt",
server => $openvpn_configname;
- "cert $openvpn_configname":
+ "cert ${openvpn_configname}":
key => 'cert',
- value => '/etc/x509/certs/leap_openvpn.crt',
+ value => "${x509::variables::certs}/${site_config::params::cert_name}.crt",
server => $openvpn_configname;
- "key $openvpn_configname":
+ "key ${openvpn_configname}":
key => 'key',
- value => '/etc/x509/keys/leap_openvpn.key',
+ value => "${x509::variables::keys}/${site_config::params::cert_name}.key",
server => $openvpn_configname;
- "dh $openvpn_configname":
+ "dh ${openvpn_configname}":
key => 'dh',
value => '/etc/openvpn/keys/dh.pem',
server => $openvpn_configname;
- "tls-cipher $openvpn_configname":
+ "tls-cipher ${openvpn_configname}":
key => 'tls-cipher',
- value => 'DHE-RSA-AES128-SHA',
+ value => $config['tls-cipher'],
server => $openvpn_configname;
- "auth $openvpn_configname":
+ "auth ${openvpn_configname}":
key => 'auth',
- value => 'SHA1',
+ value => $config['auth'],
server => $openvpn_configname;
- "cipher $openvpn_configname":
+ "cipher ${openvpn_configname}":
key => 'cipher',
- value => 'AES-128-CBC',
+ value => $config['cipher'],
server => $openvpn_configname;
- "dev $openvpn_configname":
+ "dev ${openvpn_configname}":
key => 'dev',
value => 'tun',
server => $openvpn_configname;
- "duplicate-cn $openvpn_configname":
+ "duplicate-cn ${openvpn_configname}":
key => 'duplicate-cn',
server => $openvpn_configname;
- "keepalive $openvpn_configname":
+ "keepalive ${openvpn_configname}":
key => 'keepalive',
- value => '5 20',
+ value => $config['keepalive'],
server => $openvpn_configname;
- "local $openvpn_configname":
+ "local ${openvpn_configname}":
key => 'local',
value => $local,
server => $openvpn_configname;
- "mute $openvpn_configname":
+ "mute ${openvpn_configname}":
key => 'mute',
value => '5',
server => $openvpn_configname;
- "mute-replay-warnings $openvpn_configname":
+ "mute-replay-warnings ${openvpn_configname}":
key => 'mute-replay-warnings',
server => $openvpn_configname;
- "management $openvpn_configname":
+ "management ${openvpn_configname}":
key => 'management',
value => $management,
server => $openvpn_configname;
- "proto $openvpn_configname":
+ "proto ${openvpn_configname}":
key => 'proto',
value => $proto,
server => $openvpn_configname;
- "push1 $openvpn_configname":
+ "push1 ${openvpn_configname}":
key => 'push',
value => $push,
server => $openvpn_configname;
- "push2 $openvpn_configname":
+ "push2 ${openvpn_configname}":
key => 'push',
value => '"redirect-gateway def1"',
server => $openvpn_configname;
- "script-security $openvpn_configname":
+ "script-security ${openvpn_configname}":
key => 'script-security',
value => '2',
server => $openvpn_configname;
- "server $openvpn_configname":
+ "server ${openvpn_configname}":
key => 'server',
value => $server,
server => $openvpn_configname;
- "status $openvpn_configname":
+ "status ${openvpn_configname}":
key => 'status',
value => '/var/run/openvpn-status 10',
server => $openvpn_configname;
- "status-version $openvpn_configname":
+ "status-version ${openvpn_configname}":
key => 'status-version',
value => '3',
server => $openvpn_configname;
- "topology $openvpn_configname":
+ "topology ${openvpn_configname}":
key => 'topology',
value => 'subnet',
server => $openvpn_configname;
@@ -169,7 +169,7 @@ define site_openvpn::server_config(
# key => 'up',
# value => '/etc/openvpn/server-up.sh',
# server => $openvpn_configname;
- "verb $openvpn_configname":
+ "verb ${openvpn_configname}":
key => 'verb',
value => '3',
server => $openvpn_configname;
diff --git a/puppet/modules/site_openvpn/templates/add_gateway_ips.sh.erb b/puppet/modules/site_openvpn/templates/add_gateway_ips.sh.erb
index 05f3d16b..e76b756b 100644
--- a/puppet/modules/site_openvpn/templates/add_gateway_ips.sh.erb
+++ b/puppet/modules/site_openvpn/templates/add_gateway_ips.sh.erb
@@ -1,11 +1,11 @@
#!/bin/sh
-ip addr show dev <%= scope.lookupvar('site_config::params::interface') %> | grep -q <%= @openvpn_gateway_address %>/24 ||
- ip addr add <%= @openvpn_gateway_address %>/24 dev <%= scope.lookupvar('site_config::params::interface') %>
+ip addr show dev <%= scope.lookupvar('site_config::params::interface') %> | grep -q <%= @openvpn_gateway_address %>/<%= @primary_netmask %> ||
+ ip addr add <%= @openvpn_gateway_address %>/<%= @primary_netmask %> dev <%= scope.lookupvar('site_config::params::interface') %>
<% if @openvpn_second_gateway_address %>
-ip addr show dev <%= scope.lookupvar('site_config::params::interface') %> | grep -q <%= @openvpn_second_gateway_address %>/24 ||
- ip addr add <%= @openvpn_second_gateway_address %>/24 dev <%= scope.lookupvar('site_config::params::interface') %>
+ip addr show dev <%= scope.lookupvar('site_config::params::interface') %> | grep -q <%= @openvpn_second_gateway_address %>/<%= @primary_netmask %> ||
+ ip addr add <%= @openvpn_second_gateway_address %>/<%= @primary_netmask %> dev <%= scope.lookupvar('site_config::params::interface') %>
<% end %>
/bin/echo 1 > /proc/sys/net/ipv4/ip_forward
diff --git a/puppet/modules/site_postfix/files/checks/received_anon b/puppet/modules/site_postfix/files/checks/received_anon
new file mode 100644
index 00000000..2822973e
--- /dev/null
+++ b/puppet/modules/site_postfix/files/checks/received_anon
@@ -0,0 +1,2 @@
+/^Received: from (.* \([-._[:alnum:]]+ \[[.[:digit:]]{7,15}\]\))([[:space:]]+).*(\(using [.[:alnum:]]+ with cipher [-A-Z0-9]+ \([0-9]+\/[0-9]+ bits\)\))[[:space:]]+\(Client CN "([[:alnum:]]+)", Issuer "[[:print:]]+" \(verified OK\)\)[[:space:]]+by ([.[:alnum:]]+) \(([^)]+)\) with (E?SMTPS?A?) id ([A-F[:digit:]]+).*/
+ REPLACE Received: from [127.0.0.1] (localhost [127.0.0.1])${2}${3}${2}(Authenticated sender: $4)${2}with $7 id $8
diff --git a/puppet/modules/site_postfix/manifests/debug.pp b/puppet/modules/site_postfix/manifests/debug.pp
new file mode 100644
index 00000000..f370d166
--- /dev/null
+++ b/puppet/modules/site_postfix/manifests/debug.pp
@@ -0,0 +1,9 @@
+class site_postfix::debug {
+
+ postfix::config {
+ 'debug_peer_list': value => '127.0.0.1';
+ 'debug_peer_level': value => '1';
+ 'smtpd_tls_loglevel': value => '1';
+ }
+
+}
diff --git a/puppet/modules/site_postfix/manifests/mx.pp b/puppet/modules/site_postfix/manifests/mx.pp
new file mode 100644
index 00000000..bdfee665
--- /dev/null
+++ b/puppet/modules/site_postfix/manifests/mx.pp
@@ -0,0 +1,74 @@
+class site_postfix::mx {
+
+ $domain_hash = hiera ('domain')
+ $domain = $domain_hash['full_suffix']
+ $host_domain = $domain_hash['full']
+ $cert_name = hiera('name')
+ $mynetworks = join(hiera('mynetworks'), ' ')
+
+ $root_mail_recipient = hiera ('contacts')
+ $postfix_smtp_listen = 'all'
+
+ include site_config::x509::cert
+ include site_config::x509::key
+ include site_config::x509::client_ca::ca
+ include site_config::x509::client_ca::key
+
+ postfix::config {
+ 'mynetworks':
+ value => "127.0.0.0/8 [::1]/128 [fe80::]/64 ${mynetworks}";
+ 'mydestination':
+ value => "\$myorigin, localhost, localhost.\$mydomain, ${domain}";
+ 'myhostname':
+ value => $host_domain;
+ 'mailbox_size_limit':
+ value => '0';
+ 'home_mailbox':
+ value => 'Maildir/';
+ 'virtual_alias_maps':
+ value => 'tcp:localhost:4242';
+ 'luser_relay':
+ value => 'vmail';
+ 'smtpd_tls_received_header':
+ value => 'yes';
+ # Note: we are setting this here, instead of in site_postfix::mx::smtp_tls
+ # because the satellites need to have a different value
+ 'smtp_tls_security_level':
+ value => 'may';
+ }
+
+ include site_postfix::mx::smtpd_checks
+ include site_postfix::mx::checks
+ include site_postfix::mx::smtp_tls
+ include site_postfix::mx::smtpd_tls
+ include site_postfix::mx::reserved_aliases
+
+ # greater verbosity for debugging, take out for production
+ #include site_postfix::debug
+
+ user { 'vmail':
+ ensure => present,
+ comment => 'Leap Mailspool',
+ home => '/var/mail/vmail',
+ shell => '/bin/false',
+ managehome => true,
+ }
+
+ class { 'postfix':
+ preseed => true,
+ root_mail_recipient => $root_mail_recipient,
+ smtp_listen => 'all',
+ mastercf_tail =>
+ "smtps inet n - - - - smtpd
+ -o smtpd_tls_wrappermode=yes
+ -o smtpd_tls_security_level=encrypt
+ -o smtpd_recipient_restrictions=\$smtps_recipient_restrictions
+ -o smtpd_helo_restrictions=\$smtps_helo_restrictions",
+ require => [
+ Class['Site_config::X509::Key'],
+ Class['Site_config::X509::Cert'],
+ Class['Site_config::X509::Client_ca::Key'],
+ Class['Site_config::X509::Client_ca::Ca'],
+ User['vmail'] ]
+ }
+}
diff --git a/puppet/modules/site_postfix/manifests/mx/checks.pp b/puppet/modules/site_postfix/manifests/mx/checks.pp
new file mode 100644
index 00000000..5d75a5e5
--- /dev/null
+++ b/puppet/modules/site_postfix/manifests/mx/checks.pp
@@ -0,0 +1,41 @@
+class site_postfix::mx::checks {
+
+ file {
+ '/etc/postfix/checks':
+ ensure => directory,
+ mode => '0755',
+ owner => root,
+ group => postfix,
+ require => Package['postfix'];
+
+ '/etc/postfix/checks/helo_checks':
+ content => template('site_postfix/checks/helo_access.erb'),
+ mode => '0644',
+ owner => root,
+ group => root;
+ }
+
+ exec {
+ '/usr/sbin/postmap /etc/postfix/checks/helo_checks':
+ refreshonly => true,
+ subscribe => File['/etc/postfix/checks/helo_checks'];
+ }
+
+ # Anonymize the user's home IP from the email headers (Feature #3866)
+ package { 'postfix-pcre': ensure => installed, require => Package['postfix'] }
+
+ file { '/etc/postfix/checks/received_anon':
+ source => 'puppet:///modules/site_postfix/checks/received_anon',
+ mode => '0644',
+ owner => root,
+ group => root,
+ notify => Service['postfix']
+ }
+
+ postfix::config {
+ 'header_checks':
+ value => 'pcre:/etc/postfix/checks/received_anon',
+ require => File['/etc/postfix/checks/received_anon'];
+ }
+
+}
diff --git a/puppet/modules/site_postfix/manifests/mx/reserved_aliases.pp b/puppet/modules/site_postfix/manifests/mx/reserved_aliases.pp
new file mode 100644
index 00000000..83e27376
--- /dev/null
+++ b/puppet/modules/site_postfix/manifests/mx/reserved_aliases.pp
@@ -0,0 +1,15 @@
+# Defines which mail addresses shouldn't be available and where they should fwd
+class site_postfix::mx::reserved_aliases {
+
+ postfix::mailalias {
+ [ 'abuse', 'admin', 'arin-admin', 'administrator', 'bin', 'cron',
+ 'certmaster', 'domainadmin', 'games', 'ftp', 'hostmaster', 'lp',
+ 'maildrop', 'mysql', 'news', 'nobody', 'noc', 'postmaster', 'postgresql',
+ 'security', 'ssladmin', 'sys', 'usenet', 'uucp', 'webmaster', 'www',
+ 'www-data',
+ ]:
+ ensure => present,
+ recipient => 'root'
+ }
+
+}
diff --git a/puppet/modules/site_postfix/manifests/mx/smtp_auth.pp b/puppet/modules/site_postfix/manifests/mx/smtp_auth.pp
new file mode 100644
index 00000000..afa70527
--- /dev/null
+++ b/puppet/modules/site_postfix/manifests/mx/smtp_auth.pp
@@ -0,0 +1,6 @@
+class site_postfix::mx::smtp_auth {
+
+ postfix::config {
+ 'smtpd_tls_ask_ccert': value => 'yes';
+ }
+}
diff --git a/puppet/modules/site_postfix/manifests/mx/smtp_tls.pp b/puppet/modules/site_postfix/manifests/mx/smtp_tls.pp
new file mode 100644
index 00000000..d9b59f40
--- /dev/null
+++ b/puppet/modules/site_postfix/manifests/mx/smtp_tls.pp
@@ -0,0 +1,27 @@
+class site_postfix::mx::smtp_tls {
+
+ include x509::variables
+ $ca_path = "${x509::variables::local_CAs}/${site_config::params::ca_name}.crt"
+ $cert_path = "${x509::variables::certs}/${site_config::params::cert_name}.crt"
+ $key_path = "${x509::variables::keys}/${site_config::params::cert_name}.key"
+
+ # smtp TLS
+ postfix::config {
+ 'smtp_use_tls': value => 'yes';
+ 'smtp_tls_CApath': value => '/etc/ssl/certs/';
+ 'smtp_tls_CAfile': value => $ca_path;
+ 'smtp_tls_cert_file': value => $cert_path;
+ 'smtp_tls_key_file': value => $key_path;
+ 'smtp_tls_loglevel': value => '1';
+ 'smtp_tls_exclude_ciphers':
+ value => 'aNULL, MD5, DES';
+ # upstream default is md5 (since 2.5 and older used it), we force sha1
+ 'smtp_tls_fingerprint_digest':
+ value => 'sha1';
+ 'smtp_tls_session_cache_database':
+ value => 'btree:${data_directory}/smtp_cache';
+ # see issue #4011
+ 'smtp_tls_protocols':
+ value => '!SSLv2, !SSLv3';
+ }
+}
diff --git a/puppet/modules/site_postfix/manifests/mx/smtpd_checks.pp b/puppet/modules/site_postfix/manifests/mx/smtpd_checks.pp
new file mode 100644
index 00000000..0ec40277
--- /dev/null
+++ b/puppet/modules/site_postfix/manifests/mx/smtpd_checks.pp
@@ -0,0 +1,31 @@
+class site_postfix::mx::smtpd_checks {
+
+ postfix::config {
+ 'smtpd_helo_required':
+ value => 'yes';
+ 'checks_dir':
+ value => '$config_directory/checks';
+ 'smtpd_client_restrictions':
+ value => 'permit_mynetworks,permit';
+ 'smtpd_data_restrictions':
+ value => 'permit_mynetworks, reject_unauth_pipelining, permit';
+ 'smtpd_delay_reject':
+ value => 'yes';
+ 'smtpd_helo_restrictions':
+ value => 'permit_mynetworks, reject_invalid_helo_hostname, reject_non_fqdn_helo_hostname, check_helo_access hash:$checks_dir/helo_checks, permit';
+ 'smtpd_recipient_restrictions':
+ value => 'reject_unknown_recipient_domain, permit_mynetworks, check_recipient_access tcp:localhost:2244, reject_unauth_destination, permit';
+ # We should change from permit_tls_all_clientcerts to permit_tls_clientcerts
+ # with a lookup on $relay_clientcerts! Right now we are listing the only
+ # valid CA that client certificates can use in the $smtp_tls_CAfile parameter
+ # but we cannot cut off a certificate that should no longer be used unless
+ # we use permit_tls_clientcerts with the $relay_clientcerts lookup
+ 'smtps_recipient_restrictions':
+ value => 'permit_tls_all_clientcerts, check_recipient_access tcp:localhost:2244, reject_unauth_destination, permit';
+ 'smtps_helo_restrictions':
+ value => 'permit_mynetworks, check_helo_access hash:$checks_dir/helo_checks, permit';
+ 'smtpd_sender_restrictions':
+ value => 'permit_mynetworks, reject_non_fqdn_sender, reject_unknown_sender_domain, permit';
+ }
+
+}
diff --git a/puppet/modules/site_postfix/manifests/mx/smtpd_tls.pp b/puppet/modules/site_postfix/manifests/mx/smtpd_tls.pp
new file mode 100644
index 00000000..0809c75f
--- /dev/null
+++ b/puppet/modules/site_postfix/manifests/mx/smtpd_tls.pp
@@ -0,0 +1,55 @@
+class site_postfix::mx::smtpd_tls {
+
+ include x509::variables
+ $ca_path = "${x509::variables::local_CAs}/${site_config::params::client_ca_name}.crt"
+ $cert_path = "${x509::variables::certs}/${site_config::params::cert_name}.crt"
+ $key_path = "${x509::variables::keys}/${site_config::params::cert_name}.key"
+
+
+ postfix::config {
+ 'smtpd_use_tls': value => 'yes';
+ 'smtpd_tls_CAfile': value => $ca_path;
+ 'smtpd_tls_cert_file': value => $cert_path;
+ 'smtpd_tls_key_file': value => $key_path;
+ 'smtpd_tls_ask_ccert': value => 'yes';
+ 'smtpd_tls_security_level':
+ value => 'may';
+ 'smtpd_tls_eecdh_grade':
+ value => 'ultra';
+ 'smtpd_tls_session_cache_database':
+ value => 'btree:${data_directory}/smtpd_scache';
+ }
+
+ # Setup DH parameters
+ # Instead of using the dh parameters that are created by leap cli, it is more
+ # secure to generate new parameter files that will only be used for postfix,
+ # for each machine
+
+ include site_config::packages::gnutls
+
+ # Note, the file name is called dh_1024.pem, but we are generating 2048bit dh
+ # parameters Neither Postfix nor OpenSSL actually care about the size of the
+ # prime in "smtpd_tls_dh1024_param_file". You can make it 2048 bits
+
+ exec { 'certtool-postfix-gendh':
+ command => 'certtool --generate-dh-params --bits 2048 --outfile /etc/postfix/smtpd_tls_dh_param.pem',
+ user => root,
+ group => root,
+ creates => '/etc/postfix/smtpd_tls_dh_param.pem',
+ require => [ Package['gnutls-bin'], Package['postfix'] ]
+ }
+
+ # Make sure the dh params file has correct ownership and mode
+ file {
+ '/etc/postfix/smtpd_tls_dh_param.pem':
+ owner => root,
+ group => root,
+ mode => '0600',
+ require => Exec['certtool-postfix-gendh'];
+ }
+
+ postfix::config { 'smtpd_tls_dh1024_param_file':
+ value => '/etc/postfix/smtpd_tls_dh_param.pem',
+ require => File['/etc/postfix/smtpd_tls_dh_param.pem']
+ }
+}
diff --git a/puppet/modules/site_postfix/manifests/satellite.pp b/puppet/modules/site_postfix/manifests/satellite.pp
new file mode 100644
index 00000000..5725e6b8
--- /dev/null
+++ b/puppet/modules/site_postfix/manifests/satellite.pp
@@ -0,0 +1,47 @@
+class site_postfix::satellite {
+
+ $root_mail_recipient = hiera ('contacts')
+ $mail = hiera ('mail')
+ $relayhost = $mail['smarthost']
+ $cert_name = hiera('name')
+
+ class { '::postfix::satellite':
+ relayhost => $relayhost,
+ root_mail_recipient => $root_mail_recipient
+ }
+
+ # There are special conditions for satellite hosts that will make them not be
+ # able to contact their relayhost:
+ #
+ # 1. they are on openstack/amazon/PC and are on the same cluster as the relay
+ # host, the MX lookup for the relay host will use the public IP, which cannot
+ # be contacted
+ #
+ # 2. When a domain is used that is not in DNS, because it is internal,
+ # a testing domain, etc. eg. a .local domain cannot be looked up in DNS
+ #
+ # to resolve this, so the satellite can contact the relayhost, we need to set
+ # the http://www.postfix.org/postconf.5.html#smtp_host_lookup to be 'native'
+ # which will cause the lookup to use the native naming service
+ # (nsswitch.conf), which typically defaults to 'files, dns' allowing the
+ # /etc/hosts to be consulted first, then DNS if the entry doesn't exist.
+ #
+ # NOTE: this will make it not possible to enable DANE support through DNSSEC
+ # with http://www.postfix.org/postconf.5.html#smtp_dns_support_level - but
+ # this parameter is not available until 2.11. If this ends up being important
+ # we could also make this an optional parameter for providers without
+ # dns / local domains
+
+ postfix::config {
+ 'smtp_host_lookup':
+ value => 'native';
+
+ # Note: we are setting this here, instead of in site_postfix::mx::smtp_tls
+ # because the mx server has to have a different value
+ 'smtp_tls_security_level':
+ value => 'encrypt';
+ }
+
+ include site_postfix::mx::smtp_tls
+
+}
diff --git a/puppet/modules/site_postfix/templates/checks/helo_access.erb b/puppet/modules/site_postfix/templates/checks/helo_access.erb
new file mode 100644
index 00000000..bef3c11d
--- /dev/null
+++ b/puppet/modules/site_postfix/templates/checks/helo_access.erb
@@ -0,0 +1,21 @@
+# THIS FILE IS MANAGED BY PUPPET
+# To make changes to this file, please edit your platform directory under
+# puppet/modules/site_postfix/templates/checks/helo_access.erb and then deploy
+
+# The format of this file is the HELO/EHLO domain followed by an action.
+# The action could be OK to allow it, REJECT to reject it, or a custom
+# status code and message. Any lines that are prefixed by an octothorpe (#)
+# will be considered comments.
+
+# Some examples:
+#
+# Reject anyone that HELO's with foobar:
+# foobar REJECT
+#
+# Allow the switches to skip this check:
+# switch1 OK
+# switch2 OK
+
+# Reject anybody that HELO's as being in our own domain(s)
+# anyone who identifies themselves as us is a virus/spammer
+<%= domain %> 554 You are not in domain <%= domain %>
diff --git a/puppet/modules/site_shorewall/manifests/defaults.pp b/puppet/modules/site_shorewall/manifests/defaults.pp
index c62c9307..8f56ac42 100644
--- a/puppet/modules/site_shorewall/manifests/defaults.pp
+++ b/puppet/modules/site_shorewall/manifests/defaults.pp
@@ -1,9 +1,12 @@
class site_shorewall::defaults {
+
include shorewall
include site_config::params
# be safe for development
- #if ( $::virtual == 'virtualbox') { $shorewall_startup='0' }
+ # if ( $::site_config::params::environment == 'local' ) {
+ # $shorewall_startup='0'
+ # }
# If you want logging:
shorewall::params {
@@ -18,8 +21,6 @@ class site_shorewall::defaults {
options => 'tcpflags,blacklist,nosmurfs';
}
- shorewall::routestopped { $site_config::params::interface: }
-
shorewall::policy {
'fw-to-all':
sourcezone => 'fw',
@@ -42,5 +43,32 @@ class site_shorewall::defaults {
order => 200;
}
+ package { 'shorewall-init':
+ ensure => installed
+ }
+
+ augeas {
+ # stop instead of clear firewall on shutdown
+ 'shorewall_SAFESTOP':
+ changes => 'set /files/etc/shorewall/shorewall.conf/SAFESTOP Yes',
+ lens => 'Shellvars.lns',
+ incl => '/etc/shorewall/shorewall.conf',
+ require => Package['shorewall'],
+ notify => Service[shorewall];
+ # require that the interface exist
+ 'shorewall_REQUIRE_INTERFACE':
+ changes => 'set /files/etc/shorewall/shorewall.conf/REQUIRE_INTERFACE Yes',
+ lens => 'Shellvars.lns',
+ incl => '/etc/shorewall/shorewall.conf',
+ require => Package['shorewall'],
+ notify => Service[shorewall];
+ # configure shorewall-init
+ 'shorewall-init':
+ changes => 'set /files/etc/default/shorewall-init/PRODUCTS shorewall',
+ lens => 'Shellvars.lns',
+ incl => '/etc/default/shorewall-init',
+ require => [ Package['shorewall-init'], Service['shorewall'] ]
+ }
+
include site_shorewall::sshd
}
diff --git a/puppet/modules/site_shorewall/manifests/mx.pp b/puppet/modules/site_shorewall/manifests/mx.pp
new file mode 100644
index 00000000..332f164e
--- /dev/null
+++ b/puppet/modules/site_shorewall/manifests/mx.pp
@@ -0,0 +1,24 @@
+class site_shorewall::mx {
+
+ include site_shorewall::defaults
+
+ $smtpd_ports = '25,465,587'
+
+ # define macro for incoming services
+ file { '/etc/shorewall/macro.leap_mx':
+ content => "PARAM - - tcp ${smtpd_ports} ",
+ notify => Service['shorewall'],
+ require => Package['shorewall']
+ }
+
+
+ shorewall::rule {
+ 'net2fw-mx':
+ source => 'net',
+ destination => '$FW',
+ action => 'leap_mx(ACCEPT)',
+ order => 200;
+ }
+
+ include site_shorewall::service::smtp
+}
diff --git a/puppet/modules/site_shorewall/manifests/service/smtp.pp b/puppet/modules/site_shorewall/manifests/service/smtp.pp
new file mode 100644
index 00000000..7fbdf14e
--- /dev/null
+++ b/puppet/modules/site_shorewall/manifests/service/smtp.pp
@@ -0,0 +1,13 @@
+class site_shorewall::service::smtp {
+
+ include site_shorewall::defaults
+
+ shorewall::rule {
+ 'fw2net-http':
+ source => '$FW',
+ destination => 'net',
+ action => 'SMTP(ACCEPT)',
+ order => 200;
+ }
+
+}
diff --git a/puppet/modules/site_shorewall/manifests/soledad.pp b/puppet/modules/site_shorewall/manifests/soledad.pp
new file mode 100644
index 00000000..518d8689
--- /dev/null
+++ b/puppet/modules/site_shorewall/manifests/soledad.pp
@@ -0,0 +1,23 @@
+class site_shorewall::soledad {
+
+ $soledad = hiera('soledad')
+ $soledad_port = $soledad['port']
+
+ include site_shorewall::defaults
+
+ # define macro for incoming services
+ file { '/etc/shorewall/macro.leap_soledad':
+ content => "PARAM - - tcp ${soledad_port}",
+ notify => Service['shorewall'],
+ require => Package['shorewall']
+ }
+
+ shorewall::rule {
+ 'net2fw-soledad':
+ source => 'net',
+ destination => '$FW',
+ action => 'leap_soledad(ACCEPT)',
+ order => 200;
+ }
+}
+
diff --git a/puppet/modules/site_shorewall/manifests/sshd.pp b/puppet/modules/site_shorewall/manifests/sshd.pp
index a8e09e42..88b4102c 100644
--- a/puppet/modules/site_shorewall/manifests/sshd.pp
+++ b/puppet/modules/site_shorewall/manifests/sshd.pp
@@ -21,4 +21,10 @@ class site_shorewall::sshd {
action => 'leap_sshd(ACCEPT)',
order => 200;
}
+
+ # setup a routestopped rule to allow ssh when shorewall is stopped
+ shorewall::routestopped { $site_config::params::interface:
+ options => "- tcp ${ssh_port}"
+ }
+
}
diff --git a/puppet/modules/site_squid_deb_proxy/manifests/client.pp b/puppet/modules/site_squid_deb_proxy/manifests/client.pp
new file mode 100644
index 00000000..27844270
--- /dev/null
+++ b/puppet/modules/site_squid_deb_proxy/manifests/client.pp
@@ -0,0 +1,5 @@
+class site_squid_deb_proxy::client {
+ include squid_deb_proxy::client
+ include site_shorewall::defaults
+ include shorewall::rules::mdns
+}
diff --git a/puppet/modules/site_sshd/manifests/authorized_keys.pp b/puppet/modules/site_sshd/manifests/authorized_keys.pp
index c18f691c..f36fe20f 100644
--- a/puppet/modules/site_sshd/manifests/authorized_keys.pp
+++ b/puppet/modules/site_sshd/manifests/authorized_keys.pp
@@ -1,4 +1,7 @@
define site_sshd::authorized_keys ($keys, $ensure = 'present', $home = '') {
+ # We use a custom define here to deploy the authorized_keys file
+ # cause puppet doesn't allow purgin before populating this file
+ # (see https://tickets.puppetlabs.com/browse/PUP-1174)
# This line allows default homedir based on $title variable.
# If $home is empty, the default is used.
$homedir = $home ? {'' => "/home/${title}", default => $home}
diff --git a/puppet/modules/site_sshd/manifests/init.pp b/puppet/modules/site_sshd/manifests/init.pp
index 90dd2d0e..d9bc1d51 100644
--- a/puppet/modules/site_sshd/manifests/init.pp
+++ b/puppet/modules/site_sshd/manifests/init.pp
@@ -1,5 +1,6 @@
class site_sshd {
$ssh = hiera_hash('ssh')
+ $hosts = hiera('hosts', '')
##
## SETUP AUTHORIZED KEYS
@@ -12,6 +13,23 @@ class site_sshd {
}
##
+ ## SETUP KNOWN HOSTS and SSH_CONFIG
+ ##
+
+ file {
+ '/etc/ssh/ssh_known_hosts':
+ owner => root,
+ group => root,
+ mode => '0644',
+ content => template('site_sshd/ssh_known_hosts.erb');
+ '/etc/ssh/ssh_config':
+ owner => root,
+ group => root,
+ mode => '0644',
+ content => template('site_sshd/ssh_config.erb');
+ }
+
+ ##
## OPTIONAL MOSH SUPPORT
##
diff --git a/puppet/modules/site_sshd/templates/authorized_keys.erb b/puppet/modules/site_sshd/templates/authorized_keys.erb
index 3c65e8ab..69f4d8e6 100644
--- a/puppet/modules/site_sshd/templates/authorized_keys.erb
+++ b/puppet/modules/site_sshd/templates/authorized_keys.erb
@@ -2,5 +2,9 @@
# all manually added keys will be overridden
<% keys.sort.each do |user, hash| -%>
+<% if user == 'monitor' -%>
+command="/usr/bin/check_mk_agent",no-port-forwarding,no-x11-forwarding,no-agent-forwarding,no-pty,no-user-rc, <%=hash['type']-%> <%=hash['key']%> <%=user%>
+<% else -%>
<%=hash['type']-%> <%=hash['key']%> <%=user%>
+<% end -%>
<% end -%>
diff --git a/puppet/modules/site_sshd/templates/ssh_config.erb b/puppet/modules/site_sshd/templates/ssh_config.erb
new file mode 100644
index 00000000..7e967413
--- /dev/null
+++ b/puppet/modules/site_sshd/templates/ssh_config.erb
@@ -0,0 +1,23 @@
+# This file is generated by Puppet
+# This is the ssh client system-wide configuration file. See
+# ssh_config(5) for more information. This file provides defaults for
+# users, and the values can be changed in per-user configuration files
+# or on the command line.
+
+Host *
+ SendEnv LANG LC_*
+ HashKnownHosts yes
+ GSSAPIAuthentication yes
+ GSSAPIDelegateCredentials no
+<% if scope.lookupvar('::site_config::params::environment') == 'local' -%>
+ #
+ # Vagrant nodes should have strict host key checking
+ # turned off. The problem is that the host key for a vagrant
+ # node is specific to the particular instance of the vagrant
+ # node you have running locally. For this reason, we can't
+ # track the host keys, or your host key for vpn1 would conflict
+ # with my host key for vpn1.
+ #
+ StrictHostKeyChecking no
+<% end -%>
+
diff --git a/puppet/modules/site_sshd/templates/ssh_known_hosts.erb b/puppet/modules/site_sshd/templates/ssh_known_hosts.erb
new file mode 100644
index 00000000..002ab732
--- /dev/null
+++ b/puppet/modules/site_sshd/templates/ssh_known_hosts.erb
@@ -0,0 +1,7 @@
+# This file is generated by Puppet
+
+<% @hosts.sort.each do |name, hash| -%>
+<% if hash['host_pub_key'] -%>
+<%= name%>,<%=hash['domain_full']%>,<%=hash['domain_internal']%>,<%=hash['ip_address']%> <%=hash['host_pub_key']%>
+<% end -%>
+<% end -%>
diff --git a/puppet/modules/site_static/README b/puppet/modules/site_static/README
new file mode 100644
index 00000000..bc719782
--- /dev/null
+++ b/puppet/modules/site_static/README
@@ -0,0 +1,3 @@
+Deploy one or more static websites to a node.
+
+For now, it only supports `amber` based static sites. Should support plain html and jekyll in the future.
diff --git a/puppet/modules/site_static/manifests/domain.pp b/puppet/modules/site_static/manifests/domain.pp
new file mode 100644
index 00000000..8af2230f
--- /dev/null
+++ b/puppet/modules/site_static/manifests/domain.pp
@@ -0,0 +1,28 @@
+define site_static::domain (
+ $locations,
+ $ca_cert,
+ $key,
+ $cert,
+ $tls_only) {
+
+ $domain = $name
+ $base_dir = '/srv/static'
+
+ create_resources(site_static::location, $locations)
+
+ x509::cert { $domain: content => $cert }
+ x509::key { $domain: content => $key }
+ x509::ca { "${domain}_ca": content => $ca_cert }
+
+ class { '::apache': no_default_site => true, ssl => true }
+ include site_apache::module::headers
+ include site_apache::module::alias
+ include site_apache::module::expires
+ include site_apache::module::removeip
+ include site_apache::module::rewrite
+
+ apache::vhost::file { $domain:
+ content => template('site_static/apache.conf.erb')
+ }
+
+}
diff --git a/puppet/modules/site_static/manifests/init.pp b/puppet/modules/site_static/manifests/init.pp
new file mode 100644
index 00000000..91a4a7a9
--- /dev/null
+++ b/puppet/modules/site_static/manifests/init.pp
@@ -0,0 +1,17 @@
+class site_static {
+ tag 'leap_service'
+ $static = hiera('static')
+ $domains = $static['domains']
+ $formats = $static['formats']
+
+ if (member($formats, 'amber')) {
+ include site_config::ruby::dev
+ rubygems::gem{'amber': }
+ }
+
+ create_resources(site_static::domain, $domains)
+
+ include site_shorewall::defaults
+ include site_shorewall::service::http
+ include site_shorewall::service::https
+} \ No newline at end of file
diff --git a/puppet/modules/site_static/manifests/location.pp b/puppet/modules/site_static/manifests/location.pp
new file mode 100644
index 00000000..1ba6807e
--- /dev/null
+++ b/puppet/modules/site_static/manifests/location.pp
@@ -0,0 +1,25 @@
+define site_static::location($path, $format, $source) {
+
+ $file_path = "/srv/static/${name}"
+
+ if ($format == 'amber') {
+ exec {"amber_build_${name}":
+ cwd => $file_path,
+ command => 'amber rebuild',
+ user => 'www-data',
+ timeout => 600,
+ subscribe => Vcsrepo[$file_path]
+ }
+ }
+
+ vcsrepo { $file_path:
+ ensure => present,
+ force => true,
+ revision => $source['revision'],
+ provider => $source['type'],
+ source => $source['repo'],
+ owner => 'www-data',
+ group => 'www-data'
+ }
+
+}
diff --git a/puppet/modules/site_static/templates/apache.conf.erb b/puppet/modules/site_static/templates/apache.conf.erb
new file mode 100644
index 00000000..2abe1a98
--- /dev/null
+++ b/puppet/modules/site_static/templates/apache.conf.erb
@@ -0,0 +1,77 @@
+<%-
+ ##
+ ## An apache config for static websites.
+ ##
+ def location_directory(name, location)
+ if location['format'] == 'amber'
+ File.join(@base_dir, name, 'public')
+ else
+ File.join(@base_dir, name)
+ end
+ end
+ document_root = '/var/www'
+ @locations.each do |name, location|
+ if location['path'] == '/'
+ document_root = location_directory(name, location)
+ end
+ end
+ document_root = document_root.gsub(%r{^/|/$}, '')
+-%>
+
+<VirtualHost *:80>
+ ServerName <%= @domain %>
+ ServerAlias www.<%= @domain %>
+ RewriteEngine On
+ RewriteRule ^.*$ https://<%= @domain -%>%{REQUEST_URI} [R=permanent,L]
+</VirtualHost>
+
+<VirtualHost *:443>
+ ServerName <%= @domain %>
+ ServerAlias www.<%= @domain %>
+
+ #RewriteLog "/var/log/apache2/rewrite.log"
+ #RewriteLogLevel 3
+
+ SSLEngine on
+ SSLProtocol all -SSLv2
+ SSLHonorCipherOrder on
+ SSLCompression off
+ SSLCipherSuite "ECDHE-RSA-AES128-GCM-SHA256:ECDHE-ECDSA-AES128-GCM-SHA256:ECDHE-RSA-AES256-GCM-SHA384:ECDHE-ECDSA-AES256-GCM-SHA384:DHE-RSA-AES128-GCM-SHA256:DHE-DSS-AES128-GCM-SHA256:kEDH+AESGCM:ECDHE-RSA-AES128-SHA256:ECDHE-ECDSA-AES128-SHA256:ECDHE-RSA-AES128-SHA:ECDHE-ECDSA-AES128-SHA:ECDHE-RSA-AES256-SHA384:ECDHE-ECDSA-AES256-SHA384:ECDHE-RSA-AES256-SHA:ECDHE-ECDSA-AES256-SHA:DHE-RSA-AES128-SHA256:DHE-RSA-AES128-SHA:DHE-DSS-AES128-SHA256:DHE-RSA-AES256-SHA256:DHE-DSS-AES256-SHA:DHE-RSA-AES256-SHA:AES128-GCM-SHA256:AES256-GCM-SHA384:ECDHE-RSA-RC4-SHA:ECDHE-ECDSA-RC4-SHA:AES128:AES256:RC4-SHA:HIGH:!aNULL:!eNULL:!EXPORT:!DES:!3DES:!MD5:!PSK"
+
+ Header add Strict-Transport-Security: "max-age=15768000;includeSubdomains"
+ Header set X-Frame-Options "deny"
+
+ SSLCertificateKeyFile /etc/x509/keys/<%= @domain %>.key
+ SSLCertificateFile /etc/x509/certs/<%= @domain %>.crt
+ SSLCertificateChainFile /etc/ssl/certs/<%= @domain %>_ca.pem
+
+ RequestHeader set X_FORWARDED_PROTO 'https'
+
+ DocumentRoot "/<%= document_root %>/"
+ AccessFileName .htaccess
+
+<%- @locations.each do |name, location| -%>
+ <%- path = location['path'].gsub(%r{^/|/$}, '') -%>
+ <%- directory = location_directory(name, location) -%>
+ ##
+ ## <%= name %>
+ ##
+ <%- if path == '' -%>
+ <Directory "/<%= document_root %>/">
+ AllowOverride FileInfo Indexes Options=All,MultiViews
+ Order deny,allow
+ Allow from all
+ </Directory>
+ <%- else -%>
+ AliasMatch ^/[a-z]{2}/<%=path%>(/.+|/|)$ "/<%=directory%>/$1"
+ Alias /<%=path%> "/<%=directory%>/"
+ <Directory "/<%=directory%>/">
+ AllowOverride FileInfo Indexes Options=All,MultiViews
+ Order deny,allow
+ Allow from all
+ </Directory>
+ <%- end -%>
+
+<%- end -%>
+
+</VirtualHost>
diff --git a/puppet/modules/site_stunnel/manifests/clients.pp b/puppet/modules/site_stunnel/manifests/clients.pp
index ed766e1a..837665a3 100644
--- a/puppet/modules/site_stunnel/manifests/clients.pp
+++ b/puppet/modules/site_stunnel/manifests/clients.pp
@@ -21,6 +21,13 @@ define site_stunnel::clients (
verify => $verify,
pid => "/var/run/stunnel4/${pid}.pid",
rndfile => $rndfile,
- debuglevel => $debuglevel
+ debuglevel => $debuglevel,
+ require => [
+ Class['Site_config::X509::Key'],
+ Class['Site_config::X509::Cert'],
+ Class['Site_config::X509::Ca'] ];
+
}
+
+ include site_check_mk::agent::stunnel
}
diff --git a/puppet/modules/site_stunnel/manifests/setup.pp b/puppet/modules/site_stunnel/manifests/setup.pp
deleted file mode 100644
index 92eeb425..00000000
--- a/puppet/modules/site_stunnel/manifests/setup.pp
+++ /dev/null
@@ -1,24 +0,0 @@
-class site_stunnel::setup ($cert_name, $key, $cert, $ca_name, $ca) {
-
- include site_stunnel
-
- x509::key {
- $cert_name:
- content => $key,
- notify => Service['stunnel'];
- }
-
- x509::cert {
- $cert_name:
- content => $cert,
- notify => Service['stunnel'];
- }
-
- x509::ca {
- $ca_name:
- content => $ca,
- notify => Service['stunnel'];
- }
-
-}
-
diff --git a/puppet/modules/site_tor/manifests/init.pp b/puppet/modules/site_tor/manifests/init.pp
index 50ab636b..02368a0e 100644
--- a/puppet/modules/site_tor/manifests/init.pp
+++ b/puppet/modules/site_tor/manifests/init.pp
@@ -1,11 +1,12 @@
class site_tor {
tag 'leap_service'
+ Class['site_config::default'] -> Class['site_tor']
$tor = hiera('tor')
$bandwidth_rate = $tor['bandwidth_rate']
$tor_type = $tor['type']
$nickname = $tor['nickname']
- $contact_email = $tor['contacts']
+ $contact_emails = join($tor['contacts'],', ')
$address = hiera('ip_address')
@@ -13,17 +14,22 @@ class site_tor {
tor::daemon::relay { $nickname:
port => 9001,
address => $address,
- contact_info => $contact_email,
+ contact_info => obfuscate_email($contact_emails),
bandwidth_rate => $bandwidth_rate,
my_family => '$2A431444756B0E7228A7918C85A8DACFF7E3B050',
}
- tor::daemon::directory { $::hostname: port => 80 }
-
- include site_shorewall::tor
-
- if ( $tor_type != 'exit' ) {
+ if ( $tor_type == 'exit'){
+ tor::daemon::directory { $::hostname: port => 80 }
+ }
+ else {
+ tor::daemon::directory { $::hostname:
+ port => 80,
+ port_front_page => '';
+ }
include site_tor::disable_exit
}
+ include site_shorewall::tor
+
}
diff --git a/puppet/modules/site_webapp/files/migrate_design_documents b/puppet/modules/site_webapp/files/migrate_design_documents
deleted file mode 100644
index 6e24aa5b..00000000
--- a/puppet/modules/site_webapp/files/migrate_design_documents
+++ /dev/null
@@ -1,16 +0,0 @@
-#!/bin/sh
-
-cd /srv/leap/webapp
-
-# use admin credentials
-cp config/couchdb.yml.admin config/couchdb.yml
-chown leap-webapp:leap-webapp config/couchdb.yml
-
-# needs to be run twice
-RAILS_ENV=production /usr/bin/bundle exec rake couchrest:migrate
-RAILS_ENV=production /usr/bin/bundle exec rake couchrest:migrate
-
-# use user credentials and remove admin credentials
-cp config/couchdb.yml.webapp config/couchdb.yml
-chown leap-webapp:leap-webapp config/couchdb.yml
-
diff --git a/puppet/modules/site_webapp/manifests/apache.pp b/puppet/modules/site_webapp/manifests/apache.pp
index 8b340160..21243d34 100644
--- a/puppet/modules/site_webapp/manifests/apache.pp
+++ b/puppet/modules/site_webapp/manifests/apache.pp
@@ -4,61 +4,20 @@ class site_webapp::apache {
$api_domain = $web_api['domain']
$api_port = $web_api['port']
- $x509 = hiera('x509')
- $commercial_key = $x509['commercial_key']
- $commercial_cert = $x509['commercial_cert']
- $commercial_root = $x509['commercial_ca_cert']
- $api_key = $x509['key']
- $api_cert = $x509['cert']
- $api_root = $x509['ca_cert']
+ $web_domain = hiera('domain')
+ $domain_name = $web_domain['name']
- class { '::apache': no_default_site => true, ssl => true }
-
- apache::module {
- 'alias': ensure => present;
- 'rewrite': ensure => present;
- 'headers': ensure => present;
- }
+ include site_apache::common
+ include site_apache::module::headers
+ include site_apache::module::alias
+ include site_apache::module::expires
+ include site_apache::module::removeip
class { 'passenger': use_munin => false }
apache::vhost::file {
- 'leap_webapp':
- content => template('site_apache/vhosts.d/leap_webapp.conf.erb')
- }
-
- apache::vhost::file {
'api':
content => template('site_apache/vhosts.d/api.conf.erb')
}
- x509::key {
- 'leap_webapp':
- content => $commercial_key,
- notify => Service[apache];
-
- 'leap_api':
- content => $api_key,
- notify => Service[apache];
- }
-
- x509::cert {
- 'leap_webapp':
- content => $commercial_cert,
- notify => Service[apache];
-
- 'leap_api':
- content => $api_cert,
- notify => Service[apache];
- }
-
- x509::ca {
- 'leap_webapp':
- content => $commercial_root,
- notify => Service[apache];
-
- 'leap_api':
- content => $api_root,
- notify => Service[apache];
- }
}
diff --git a/puppet/modules/site_webapp/manifests/client_ca.pp b/puppet/modules/site_webapp/manifests/client_ca.pp
deleted file mode 100644
index 0d9b15d6..00000000
--- a/puppet/modules/site_webapp/manifests/client_ca.pp
+++ /dev/null
@@ -1,25 +0,0 @@
-##
-## This is for the special CA that is used exclusively for generating
-## client certificates by the webapp.
-##
-
-class site_webapp::client_ca {
- include x509::variables
-
- $x509 = hiera('x509')
- $cert_path = "${x509::variables::certs}/leap_client_ca.crt"
- $key_path = "${x509::variables::keys}/leap_client_ca.key"
-
- x509::key {
- 'leap_client_ca':
- source => $x509['client_ca_key'],
- group => 'leap-webapp',
- notify => Service[apache];
- }
-
- x509::cert {
- 'leap_client_ca':
- source => $x509['client_ca_cert'],
- notify => Service[apache];
- }
-}
diff --git a/puppet/modules/site_webapp/manifests/couchdb.pp b/puppet/modules/site_webapp/manifests/couchdb.pp
index b4ef0980..ff743fba 100644
--- a/puppet/modules/site_webapp/manifests/couchdb.pp
+++ b/puppet/modules/site_webapp/manifests/couchdb.pp
@@ -4,8 +4,6 @@ class site_webapp::couchdb {
# haproxy listener on port localhost:4096, see site_webapp::haproxy
$couchdb_host = 'localhost'
$couchdb_port = '4096'
- $couchdb_admin_user = $webapp['couchdb_admin_user']['username']
- $couchdb_admin_password = $webapp['couchdb_admin_user']['password']
$couchdb_webapp_user = $webapp['couchdb_webapp_user']['username']
$couchdb_webapp_password = $webapp['couchdb_webapp_user']['password']
@@ -14,65 +12,38 @@ class site_webapp::couchdb {
$couch_client_connect = $couch_client['connect']
include x509::variables
- $x509 = hiera('x509')
- $key = $x509['key']
- $cert = $x509['cert']
- $ca = $x509['ca_cert']
- $cert_name = 'leap_couchdb'
- $ca_name = 'leap_ca'
- $ca_path = "${x509::variables::local_CAs}/${ca_name}.crt"
- $cert_path = "${x509::variables::certs}/${cert_name}.crt"
- $key_path = "${x509::variables::keys}/${cert_name}.key"
file {
- '/srv/leap/webapp/config/couchdb.yml.admin':
- content => template('site_webapp/couchdb.yml.admin.erb'),
+ '/srv/leap/webapp/config/couchdb.yml':
+ content => template('site_webapp/couchdb.yml.erb'),
owner => leap-webapp,
group => leap-webapp,
mode => '0600',
require => Vcsrepo['/srv/leap/webapp'];
- '/srv/leap/webapp/config/couchdb.yml.webapp':
- content => template('site_webapp/couchdb.yml.erb'),
+ '/srv/leap/webapp/log':
+ ensure => directory,
owner => leap-webapp,
group => leap-webapp,
- mode => '0600',
+ mode => '0755',
require => Vcsrepo['/srv/leap/webapp'];
- '/srv/leap/webapp/logs/production.log':
+ '/srv/leap/webapp/log/production.log':
+ ensure => present,
owner => leap-webapp,
group => leap-webapp,
mode => '0666',
require => Vcsrepo['/srv/leap/webapp'];
-
- '/usr/local/sbin/migrate_design_documents':
- source => 'puppet:///modules/site_webapp/migrate_design_documents',
- owner => root,
- group => root,
- mode => '0744';
}
- class { 'site_stunnel::setup':
- cert_name => $cert_name,
- key => $key,
- cert => $cert,
- ca_name => $ca_name,
- ca => $ca
- }
-
- exec { 'migrate_design_documents':
- cwd => '/srv/leap/webapp',
- command => '/usr/local/sbin/migrate_design_documents',
- require => Exec['bundler_update'],
- notify => Service['apache'];
- }
+ include site_stunnel
$couchdb_stunnel_client_defaults = {
'connect_port' => $couch_client_connect,
- 'client' => true,
- 'cafile' => $ca_path,
- 'key' => $key_path,
- 'cert' => $cert_path,
+ 'client' => true,
+ 'cafile' => "${x509::variables::local_CAs}/${site_config::params::ca_name}.crt",
+ 'key' => "${x509::variables::keys}/${site_config::params::cert_name}.key",
+ 'cert' => "${x509::variables::certs}/${site_config::params::cert_name}.crt",
}
create_resources(site_stunnel::clients, $couch_client, $couchdb_stunnel_client_defaults)
diff --git a/puppet/modules/site_webapp/manifests/cron.pp b/puppet/modules/site_webapp/manifests/cron.pp
new file mode 100644
index 00000000..811ad11d
--- /dev/null
+++ b/puppet/modules/site_webapp/manifests/cron.pp
@@ -0,0 +1,17 @@
+class site_webapp::cron {
+
+ # cron tasks that need to be performed to cleanup the database
+ cron {
+ 'remove_expired_sessions':
+ command => 'cd /srv/leap/webapp && bundle exec rake cleanup:sessions',
+ environment => 'RAILS_ENV=production',
+ hour => 2,
+ minute => 30;
+
+ 'remove_expired_tokens':
+ command => 'cd /srv/leap/webapp && bundle exec rake cleanup:tokens',
+ environment => 'RAILS_ENV=production',
+ hour => 3,
+ minute => 0;
+ }
+}
diff --git a/puppet/modules/site_webapp/manifests/haproxy.pp b/puppet/modules/site_webapp/manifests/haproxy.pp
index 4a7e3c25..b69c69da 100644
--- a/puppet/modules/site_webapp/manifests/haproxy.pp
+++ b/puppet/modules/site_webapp/manifests/haproxy.pp
@@ -3,7 +3,6 @@ class site_webapp::haproxy {
include site_haproxy
$haproxy = hiera('haproxy')
- $local_ports = $haproxy['local_ports']
# Template uses $global_options, $defaults_options
concat::fragment { 'leap_haproxy_webapp_couchdb':
diff --git a/puppet/modules/site_webapp/manifests/init.pp b/puppet/modules/site_webapp/manifests/init.pp
index e743dc07..d02a7261 100644
--- a/puppet/modules/site_webapp/manifests/init.pp
+++ b/puppet/modules/site_webapp/manifests/init.pp
@@ -11,11 +11,18 @@ class site_webapp {
$api_version = $webapp['api_version']
$secret_token = $webapp['secret_token']
- include site_config::ruby
+ Class['site_config::default'] -> Class['site_webapp']
+
+ include site_config::ruby::dev
include site_webapp::apache
include site_webapp::couchdb
- include site_webapp::client_ca
include site_webapp::haproxy
+ include site_webapp::cron
+ include site_config::x509::cert
+ include site_config::x509::key
+ include site_config::x509::ca
+ include site_config::x509::client_ca::ca
+ include site_config::x509::client_ca::key
group { 'leap-webapp':
ensure => present,
@@ -31,19 +38,12 @@ class site_webapp {
require => [ Group['leap-webapp'] ];
}
- file { '/srv/leap/webapp':
- ensure => directory,
- owner => 'leap-webapp',
- group => 'leap-webapp',
- require => User['leap-webapp'];
- }
-
vcsrepo { '/srv/leap/webapp':
ensure => present,
force => true,
- revision => 'origin/master',
+ revision => $webapp['git']['revision'],
provider => git,
- source => 'git://code.leap.se/leap_web',
+ source => $webapp['git']['source'],
owner => 'leap-webapp',
group => 'leap-webapp',
require => [ User['leap-webapp'], Group['leap-webapp'] ],
@@ -56,38 +56,58 @@ class site_webapp {
unless => '/usr/bin/bundle check',
user => 'leap-webapp',
timeout => 600,
- require => [ Class['bundler::install'], Vcsrepo['/srv/leap/webapp'] ],
+ require => [
+ Class['bundler::install'],
+ Vcsrepo['/srv/leap/webapp'],
+ Class['site_config::ruby::dev'],
+ Service['shorewall'] ],
notify => Service['apache'];
}
+ #
+ # NOTE: in order to support a webapp that is running on a subpath and not the
+ # root of the domain assets:precompile needs to be run with
+ # RAILS_RELATIVE_URL_ROOT=/application-root
+ #
+
exec { 'compile_assets':
- cwd => '/srv/leap/webapp',
- command => '/bin/bash -c "/usr/bin/bundle exec rake assets:precompile"',
- user => 'leap-webapp',
- require => Exec['bundler_update'],
- notify => Service['apache'];
+ cwd => '/srv/leap/webapp',
+ command => '/bin/bash -c "RAILS_ENV=production /usr/bin/bundle exec rake assets:precompile"',
+ user => 'leap-webapp',
+ logoutput => on_failure,
+ require => Exec['bundler_update'],
+ notify => Service['apache'];
}
file {
- '/srv/leap/webapp/public/provider.json':
+ '/srv/leap/webapp/config/provider':
+ ensure => directory,
+ require => Vcsrepo['/srv/leap/webapp'],
+ owner => leap-webapp, group => leap-webapp, mode => '0755';
+
+ '/srv/leap/webapp/config/provider/provider.json':
content => $provider,
require => Vcsrepo['/srv/leap/webapp'],
owner => leap-webapp, group => leap-webapp, mode => '0644';
+ # old provider.json location. this can be removed after everyone upgrades.
+ '/srv/leap/webapp/public/provider.json':
+ ensure => absent;
+
'/srv/leap/webapp/public/ca.crt':
ensure => link,
require => Vcsrepo['/srv/leap/webapp'],
- target => '/usr/local/share/ca-certificates/leap_api.crt';
+ target => "${x509::variables::local_CAs}/${site_config::params::ca_name}.crt";
"/srv/leap/webapp/public/${api_version}":
- ensure => directory,
+ ensure => directory,
require => Vcsrepo['/srv/leap/webapp'],
- owner => leap-webapp, group => leap-webapp, mode => '0755';
+ owner => leap-webapp, group => leap-webapp, mode => '0755';
"/srv/leap/webapp/public/${api_version}/config/":
- ensure => directory,
+ ensure => directory,
require => Vcsrepo['/srv/leap/webapp'],
- owner => leap-webapp, group => leap-webapp, mode => '0755';
+ owner => leap-webapp, group => leap-webapp, mode => '0755';
"/srv/leap/webapp/public/${api_version}/config/eip-service.json":
content => $eip_service,
@@ -106,25 +126,24 @@ class site_webapp {
}
try::file {
- '/srv/leap/webapp/public/favicon.ico':
- ensure => 'link',
- require => Vcsrepo['/srv/leap/webapp'],
- target => $webapp['favicon'];
-
- '/srv/leap/webapp/app/assets/stylesheets/tail.scss':
- ensure => 'link',
- require => Vcsrepo['/srv/leap/webapp'],
- target => $webapp['tail_scss'];
-
- '/srv/leap/webapp/app/assets/stylesheets/head.scss':
- ensure => 'link',
+ '/srv/leap/webapp/config/customization':
+ ensure => directory,
+ recurse => true,
+ purge => true,
+ force => true,
+ owner => leap-webapp,
+ group => leap-webapp,
+ mode => 'u=rwX,go=rX',
require => Vcsrepo['/srv/leap/webapp'],
- target => $webapp['head_scss'];
+ notify => Exec['compile_assets'],
+ source => $webapp['customization_dir'];
+ }
- '/srv/leap/webapp/public/img':
- ensure => 'link',
+ git::changes {
+ 'public/favicon.ico':
+ cwd => '/srv/leap/webapp',
require => Vcsrepo['/srv/leap/webapp'],
- target => $webapp['img_dir'];
+ user => 'leap-webapp';
}
file {
@@ -138,5 +157,5 @@ class site_webapp {
}
include site_shorewall::webapp
-
+ include site_check_mk::agent::webapp
}
diff --git a/puppet/modules/site_webapp/templates/config.yml.erb b/puppet/modules/site_webapp/templates/config.yml.erb
index df562cd9..98f8564e 100644
--- a/puppet/modules/site_webapp/templates/config.yml.erb
+++ b/puppet/modules/site_webapp/templates/config.yml.erb
@@ -1,9 +1,10 @@
<%- cert_options = @webapp['client_certificates'] -%>
production:
- admins: [admin]
+ admins: <%= @webapp['admins'].inspect %>
domain: <%= @provider_domain %>
- client_ca_key: <%= scope.lookupvar('site_webapp::client_ca::key_path') %>
- client_ca_cert: <%= scope.lookupvar('site_webapp::client_ca::cert_path') %>
+ force_ssl: <%= @webapp['secure'] %>
+ client_ca_key: <%= scope.lookupvar('x509::variables::keys') %>/<%= scope.lookupvar('site_config::params::client_ca_name') %>.key
+ client_ca_cert: <%= scope.lookupvar('x509::variables::local_CAs') %>/<%= scope.lookupvar('site_config::params::client_ca_name') %>.crt
secret_token: "<%= @secret_token %>"
client_cert_lifespan: <%= cert_options['life_span'].to_i %>
client_cert_bit_size: <%= cert_options['bit_size'].to_i %>
@@ -13,3 +14,4 @@ production:
allow_anonymous_certs: <%= @webapp['allow_anonymous_certs'].inspect %>
limited_cert_prefix: "<%= cert_options['limited_prefix'] %>"
unlimited_cert_prefix: "<%= cert_options['unlimited_prefix'] %>"
+ minimum_client_version: "<%= @webapp['client_version']['min'] %>"
diff --git a/puppet/modules/site_webapp/templates/couchdb.yml.admin.erb b/puppet/modules/site_webapp/templates/couchdb.yml.admin.erb
deleted file mode 100644
index a0921add..00000000
--- a/puppet/modules/site_webapp/templates/couchdb.yml.admin.erb
+++ /dev/null
@@ -1,9 +0,0 @@
-production:
- prefix: ""
- protocol: 'http'
- host: <%= @couchdb_host %>
- port: <%= @couchdb_port %>
- auto_update_design_doc: false
- username: <%= @couchdb_admin_user %>
- password: <%= @couchdb_admin_password %>
-
diff --git a/puppet/modules/site_webapp/templates/haproxy_couchdb.cfg.erb b/puppet/modules/site_webapp/templates/haproxy_couchdb.cfg.erb
index f08161ee..1fa01b96 100644
--- a/puppet/modules/site_webapp/templates/haproxy_couchdb.cfg.erb
+++ b/puppet/modules/site_webapp/templates/haproxy_couchdb.cfg.erb
@@ -1,16 +1,23 @@
listen bigcouch-in
- mode http
+ mode http
balance roundrobin
- option httplog
- option dontlognull
- option httpchk GET /
- option http-server-close
-
+ option httplog
+ option dontlognull
+ option httpchk GET / # health check using simple get to root
+ option http-server-close # use client keep-alive, but close server connection.
+ option allbackups # balance among all backups, not just one.
+
bind localhost:4096
-<% for port in @local_ports -%>
- server couchdb_<%=port%> localhost:<%=port%> check inter 3000 fastinter 1000 downinter 1000 rise 2 fall 1
-<% end -%>
+ default-server inter 3000 fastinter 1000 downinter 1000 rise 2 fall 1
+
+<%- if @haproxy['servers'] -%>
+<%- @haproxy['servers'].sort.each do |name,server| -%>
+<%- backup = server['backup'] ? 'backup' : '' -%>
+ # <%=name%>
+ server couchdb_<%=server['port']%> <%=server['host']%>:<%=server['port']%> <%=backup%> weight <%=server['weight']%> check
+<%- end -%>
+<%- end -%>
diff --git a/puppet/modules/soledad/manifests/common.pp b/puppet/modules/soledad/manifests/common.pp
new file mode 100644
index 00000000..8a1d664a
--- /dev/null
+++ b/puppet/modules/soledad/manifests/common.pp
@@ -0,0 +1,10 @@
+class soledad::common {
+
+ include soledad
+
+ package { 'soledad-common':
+ ensure => latest,
+ require => User['soledad']
+ }
+
+}
diff --git a/puppet/modules/soledad/manifests/init.pp b/puppet/modules/soledad/manifests/init.pp
new file mode 100644
index 00000000..7cf0b729
--- /dev/null
+++ b/puppet/modules/soledad/manifests/init.pp
@@ -0,0 +1,29 @@
+class soledad {
+
+ group { 'soledad':
+ ensure => present,
+ allowdupe => false;
+ }
+
+ user { 'soledad':
+ ensure => present,
+ allowdupe => false,
+ gid => 'soledad',
+ home => '/srv/leap/soledad',
+ require => Group['soledad'];
+ }
+
+ file {
+ '/srv/leap/soledad':
+ ensure => directory,
+ owner => 'soledad',
+ group => 'soledad',
+ require => User['soledad'];
+
+ '/var/lib/soledad':
+ ensure => directory,
+ owner => 'soledad',
+ group => 'soledad',
+ require => User['soledad'];
+ }
+}
diff --git a/puppet/modules/soledad/manifests/server.pp b/puppet/modules/soledad/manifests/server.pp
new file mode 100644
index 00000000..394e6032
--- /dev/null
+++ b/puppet/modules/soledad/manifests/server.pp
@@ -0,0 +1,63 @@
+class soledad::server {
+ tag 'leap_service'
+ include soledad
+ include site_apt::preferences::twisted
+
+ $soledad = hiera('soledad')
+ $couchdb_user = $soledad['couchdb_soledad_user']['username']
+ $couchdb_password = $soledad['couchdb_soledad_user']['password']
+
+ $couchdb_host = 'localhost'
+ $couchdb_port = '5984'
+
+ $soledad_port = $soledad['port']
+
+ include site_config::x509::cert
+ include site_config::x509::key
+ include site_config::x509::ca
+
+ #
+ # SOLEDAD CONFIG
+ #
+
+ file { '/etc/leap/soledad-server.conf':
+ content => template('soledad/soledad-server.conf.erb'),
+ owner => 'soledad',
+ group => 'soledad',
+ mode => '0600',
+ notify => Service['soledad-server'],
+ require => Class['soledad'];
+ }
+
+ package { 'soledad-server':
+ ensure => latest,
+ require => [
+ Class['site_apt::preferences::twisted'],
+ Class['site_apt::leap_repo'] ];
+ }
+
+ file { '/etc/default/soledad':
+ content => template('soledad/default-soledad.erb'),
+ owner => 'soledad',
+ group => 'soledad',
+ mode => '0600',
+ notify => Service['soledad-server'],
+ require => Class['soledad'];
+ }
+
+ service { 'soledad-server':
+ ensure => running,
+ enable => true,
+ hasstatus => true,
+ hasrestart => true,
+ require => Class['soledad'],
+ subscribe => [
+ Package['soledad-server'],
+ Class['Site_config::X509::Key'],
+ Class['Site_config::X509::Cert'],
+ Class['Site_config::X509::Ca'] ];
+ }
+
+ include site_shorewall::soledad
+ include site_check_mk::agent::soledad
+}
diff --git a/puppet/modules/soledad/templates/default-soledad.erb b/puppet/modules/soledad/templates/default-soledad.erb
new file mode 100644
index 00000000..32504e38
--- /dev/null
+++ b/puppet/modules/soledad/templates/default-soledad.erb
@@ -0,0 +1,5 @@
+# this file is managed by puppet
+START=yes
+CERT_PATH=<%= scope.lookupvar('x509::variables::certs') %>/<%= scope.lookupvar('site_config::params::cert_name') %>.crt
+PRIVKEY_PATH=<%= scope.lookupvar('x509::variables::keys') %>/<%= scope.lookupvar('site_config::params::cert_name') %>.key
+HTTPS_PORT=<%=@soledad_port%>
diff --git a/puppet/modules/soledad/templates/soledad-server.conf.erb b/puppet/modules/soledad/templates/soledad-server.conf.erb
new file mode 100644
index 00000000..47d1f6e4
--- /dev/null
+++ b/puppet/modules/soledad/templates/soledad-server.conf.erb
@@ -0,0 +1,3 @@
+[soledad-server]
+couch_url = http://<%= @couchdb_user %>:<%= @couchdb_password %>@<%= @couchdb_host %>:<%= @couchdb_port %>
+
diff --git a/puppet/modules/squid_deb_proxy b/puppet/modules/squid_deb_proxy
new file mode 160000
+Subproject e796aac43aa9781069e167459253d040504c209
diff --git a/puppet/modules/sshd b/puppet/modules/sshd
-Subproject bd2e283ab59430a7b3194804f1c8da7a9b58f8f
+Subproject 1eabfe1b590f6663c2558f949408a08fc5f58fa
diff --git a/puppet/modules/stdlib b/puppet/modules/stdlib
-Subproject 66e0fa8f1bc5062e9d753598ad17602c378a299
+Subproject 71cb0f4c2c3bf95f62c9f189f5cef155b09a968
diff --git a/puppet/modules/stunnel b/puppet/modules/stunnel
-Subproject fc1589a5f09d80f58d730d4e1f6a8058483f61f
+Subproject ec49fd93c2469bc5c13f7e6a7d25468613e1b84
diff --git a/puppet/modules/sysctl b/puppet/modules/sysctl
new file mode 160000
+Subproject 975852b7acc1125b4cd9d4d490b9abd8d31217e
diff --git a/puppet/modules/tapicero/files/tapicero.init b/puppet/modules/tapicero/files/tapicero.init
new file mode 100755
index 00000000..7a9af45f
--- /dev/null
+++ b/puppet/modules/tapicero/files/tapicero.init
@@ -0,0 +1,60 @@
+#!/bin/sh
+
+### BEGIN INIT INFO
+# Provides: tapicero
+# Required-Start: $remote_fs $syslog
+# Required-Stop: $remote_fs $syslog
+# Default-Start: 2 3 4 5
+# Default-Stop: 0 1 6
+# Short-Description: tapicero initscript
+# Description: Controls tapicero daemon
+### END INIT INFO
+
+PATH=/sbin:/usr/sbin:/bin:/usr/bin
+BUNDLER=/usr/bin/bundle
+NAME=tapicero
+HOME="/srv/leap"
+DAEMON="${HOME}/${NAME}/bin/${NAME}"
+BUNDLE_GEMFILE="${HOME}/${NAME}/Gemfile"
+
+export BUNDLE_GEMFILE
+
+# exit if the daemon doesn't exist
+[ -x "$DAEMON" ] || exit 0
+
+. /lib/init/vars.sh
+. /lib/lsb/init-functions
+
+if [ "$VERBOSE" != no ]; then
+ OPTIONS="--verbose"
+else
+ OPTIONS=""
+fi
+
+case "$1" in
+ start)
+ $BUNDLER exec $DAEMON start $OPTIONS
+ exit $?
+ ;;
+ stop)
+ $BUNDLER exec $DAEMON stop $OPTIONS
+ exit $?
+ ;;
+ restart)
+ $BUNDLER exec $DAEMON restart $OPTIONS
+ exit $?
+ ;;
+ reload)
+ $BUNDLER exec $DAEMON reload $OPTIONS
+ exit $?
+ ;;
+ status)
+ $BUNDLER exec $DAEMON status $OPTIONS
+ exit $?
+ ;;
+ *)
+ echo "Usage: /etc/init.d/$NAME {start|stop|reload|restart|status}"
+ exit 1
+esac
+
+exit 0
diff --git a/puppet/modules/tapicero/manifests/init.pp b/puppet/modules/tapicero/manifests/init.pp
new file mode 100644
index 00000000..743e8a84
--- /dev/null
+++ b/puppet/modules/tapicero/manifests/init.pp
@@ -0,0 +1,123 @@
+class tapicero {
+ tag 'leap_service'
+
+ $couchdb = hiera('couch')
+ $couchdb_port = $couchdb['port']
+
+ $couchdb_users = $couchdb['users']
+
+ $couchdb_admin_user = $couchdb_users['admin']['username']
+ $couchdb_admin_password = $couchdb_users['admin']['password']
+
+ $couchdb_soledad_user = $couchdb_users['soledad']['username']
+ $couchdb_leap_mx_user = $couchdb_users['leap_mx']['username']
+
+
+ Class['site_config::default'] -> Class['tapicero']
+
+ include site_config::ruby::dev
+
+ #
+ # USER AND GROUP
+ #
+
+ group { 'tapicero':
+ ensure => present,
+ allowdupe => false;
+ }
+
+ user { 'tapicero':
+ ensure => present,
+ allowdupe => false,
+ gid => 'tapicero',
+ home => '/srv/leap/tapicero',
+ require => Group['tapicero'];
+ }
+
+ #
+ # TAPICERO FILES
+ #
+
+ file {
+
+ ##
+ ## TAPICERO DIRECTORIES
+ ##
+
+ '/srv/leap/tapicero':
+ ensure => directory,
+ owner => 'tapicero',
+ group => 'tapicero',
+ require => User['tapicero'];
+
+ '/var/lib/leap/tapicero':
+ ensure => directory,
+ owner => 'tapicero',
+ group => 'tapicero',
+ require => User['tapicero'];
+
+ ##
+ ## TAPICERO CONFIG
+ ##
+
+ '/etc/leap/tapicero.yaml':
+ content => template('tapicero/tapicero.yaml.erb'),
+ owner => 'tapicero',
+ group => 'tapicero',
+ mode => '0600',
+ notify => Service['tapicero'];
+
+ ##
+ ## TAPICERO INIT
+ ##
+
+ '/etc/init.d/tapicero':
+ source => 'puppet:///modules/tapicero/tapicero.init',
+ owner => root,
+ group => 0,
+ mode => '0755',
+ require => Vcsrepo['/srv/leap/tapicero'];
+ }
+
+ #
+ # TAPICERO CODE
+ #
+
+ vcsrepo { '/srv/leap/tapicero':
+ ensure => present,
+ force => true,
+ revision => 'origin/master',
+ provider => git,
+ source => 'https://leap.se/git/tapicero',
+ owner => 'tapicero',
+ group => 'tapicero',
+ require => [ User['tapicero'], Group['tapicero'] ],
+ notify => Exec['tapicero_bundler_update']
+ }
+
+ exec { 'tapicero_bundler_update':
+ cwd => '/srv/leap/tapicero',
+ command => '/bin/bash -c "/usr/bin/bundle check || /usr/bin/bundle install --path vendor/bundle --without test development"',
+ unless => '/usr/bin/bundle check',
+ user => 'tapicero',
+ timeout => 600,
+ require => [
+ Class['bundler::install'],
+ Vcsrepo['/srv/leap/tapicero'],
+ Class['site_config::ruby::dev'] ],
+ notify => Service['tapicero'];
+ }
+
+ #
+ # TAPICERO DAEMON
+ #
+
+ service { 'tapicero':
+ ensure => running,
+ enable => true,
+ hasstatus => true,
+ hasrestart => true,
+ require => File['/etc/init.d/tapicero'];
+ }
+
+}
diff --git a/puppet/modules/tapicero/templates/tapicero.yaml.erb b/puppet/modules/tapicero/templates/tapicero.yaml.erb
new file mode 100644
index 00000000..8e19b22f
--- /dev/null
+++ b/puppet/modules/tapicero/templates/tapicero.yaml.erb
@@ -0,0 +1,42 @@
+#
+# Default configuration options for Tapicero
+#
+
+# couch connection configuration
+connection:
+ protocol: "http"
+ host: "localhost"
+ port: <%= @couchdb_port %>
+ username: <%= @couchdb_admin_user %>
+ password: <%= @couchdb_admin_password %>
+ prefix : ""
+ suffix : ""
+
+# file to store the last processed user record in so we can resume after
+# a restart:
+seq_file: "/var/lib/leap/tapicero/tapicero.seq"
+
+# Configure log_file like this if you want to log to a file instead of syslog:
+# log_file: "/var/leap/log/tapicero.log"
+log_level: info
+
+# tapicero specific options
+options:
+ # prefix for per user databases:
+ db_prefix: "user-"
+
+ # security settings to be used for the per user databases
+ security:
+ admins:
+ names:
+ # We explicitly allow the admin user to access per user databases, even
+ # though admin access ignores per database security we just do this to be
+ # explicit about this
+ - <%= @couchdb_admin_user %>
+ roles: []
+ readers:
+ names:
+ - <%= @couchdb_soledad_user %>
+ - <%= @couchdb_leap_mx_user %>
+ roles: []
+
diff --git a/puppet/modules/tor b/puppet/modules/tor
-Subproject a780e84001177f10a86a7bf824589c0553f513a
+Subproject dcb6e748864e7dfd3c14f4f2aba4c9120f12b78
diff --git a/puppet/modules/try/manifests/file.pp b/puppet/modules/try/manifests/file.pp
index 47a8c269..cd1bb035 100644
--- a/puppet/modules/try/manifests/file.pp
+++ b/puppet/modules/try/manifests/file.pp
@@ -1,60 +1,112 @@
#
-# like built-in type "file", but gets gracefully ignored if the target does not exist or is undefined.
+# Works like the built-in type "file", but gets gracefully ignored if the target/source does not exist or is undefined.
#
-# /bin/true and /usr/bin/test are hardcoded to their paths in debian.
+# Also, if the source or target doesn't exist, and the destination is a git repo, then the file is restored from git.
+#
+# All executable paths are hardcoded to their paths in debian.
+#
+# known limitations:
+# * this is far too noisy
+# * $restore does not work for directories
+# * only file:// $source is supported
+# * $content is not supported, only $target or $source.
+# * does not auto-require all the parent directories like 'file' does
#
-
define try::file (
$ensure = undef,
$target = undef,
+ $source = undef,
+ $owner = undef,
+ $group = undef,
+ $recurse = undef,
+ $purge = undef,
+ $force = undef,
+ $mode = undef,
$restore = true) {
- if $target != undef {
- exec { "check_${name}":
- command => "/bin/true",
- onlyif => "/usr/bin/test -e '${target}'",
- loglevel => info;
+ # dummy exec to propagate requires:
+ # metaparameter 'require' will get triggered by this dummy exec
+ # so then we just need to depend on this to capture all requires.
+ # exec { $name: command => "/bin/true" }
+
+ exec {
+ "chmod_${name}":
+ command => "/bin/chmod -R ${mode} '${name}'",
+ onlyif => "/usr/bin/test $mode",
+ refreshonly => true,
+ loglevel => debug;
+ "chown_${name}":
+ command => "/bin/chown -R ${owner} '${name}'",
+ onlyif => "/usr/bin/test $owner",
+ refreshonly => true,
+ loglevel => debug;
+ "chgrp_${name}":
+ command => "/bin/chgrp -R ${group} '${name}'",
+ onlyif => "/usr/bin/test $group",
+ refreshonly => true,
+ loglevel => debug;
+ }
+
+ if $target {
+ exec { "symlink_${name}":
+ command => "/bin/ln -s ${target} ${name}",
+ onlyif => "/usr/bin/test -d '${target}'",
}
- file { "$name":
- ensure => $ensure,
- target => $target,
- require => $require ? {
- undef => Exec["check_${name}"],
- default => [ $require, Exec["check_${name}"] ]
- },
- loglevel => info;
+ } elsif $source {
+ if $ensure == 'directory' {
+ if $purge {
+ exec { "rsync_${name}":
+ command => "/usr/bin/rsync -r --delete '${source}/' '${name}'",
+ onlyif => "/usr/bin/test -d '${source}'",
+ unless => "/usr/bin/diff -rq '${source}' '${name}'",
+ notify => [Exec["chmod_${name}"], Exec["chown_${name}"], Exec["chgrp_${name}"]]
+ }
+ } else {
+ exec { "cp_r_${name}":
+ command => "/bin/cp -r '${source}' '${name}'",
+ onlyif => "/usr/bin/test -d '${source}'",
+ unless => "/usr/bin/diff -rq '${source}' '${name}'",
+ notify => [Exec["chmod_${name}"], Exec["chown_${name}"], Exec["chgrp_${name}"]]
+ }
+ }
+ } else {
+ exec { "cp_${name}":
+ command => "/bin/cp --remove-destination '${source}' '${name}'",
+ onlyif => "/usr/bin/test -e '${source}'",
+ unless => "/usr/bin/test ! -h '${name}' && /usr/bin/diff -q '${source}' '${name}'",
+ notify => [Exec["chmod_${name}"], Exec["chown_${name}"], Exec["chgrp_${name}"]]
+ }
}
}
#
- # if the target does not exist (or is undef), and the file happens to be in a git repo,
+ # if the target/source does not exist (or is undef), and the file happens to be in a git repo,
# then restore the file to its original state.
#
- if $target == undef or $restore {
+
+ if $target {
+ $target_or_source = $target
+ } else {
+ $target_or_source = $source
+ }
+
+ if ($target_or_source == undef) or $restore {
$file_basename = basename($name)
$file_dirname = dirname($name)
$command = "git rev-parse && unlink '${name}'; git checkout -- '${file_basename}' && chown --reference='${file_dirname}' '${name}'; true"
debug($command)
- if $target == undef {
+ if $target_or_source == undef {
exec { "restore_${name}":
command => $command,
cwd => $file_dirname,
- require => $require ? {
- undef => undef,
- default => [ $require ]
- },
loglevel => info;
}
} else {
exec { "restore_${name}":
- unless => "/usr/bin/test -e '${target}'",
+ unless => "/usr/bin/test -e '${target_or_source}'",
command => $command,
cwd => $file_dirname,
- require => $require ? {
- undef => undef,
- default => [ $require ]
- },
loglevel => info;
}
}
diff --git a/puppet/modules/vcsrepo b/puppet/modules/vcsrepo
-Subproject 4db1120c78763f5244dc6c9d2e0d064a6ef363e
+Subproject f92d09226cfddb0c7e5e342dd199d8ea05b497c
diff --git a/tests/README.md b/tests/README.md
new file mode 100644
index 00000000..debbf700
--- /dev/null
+++ b/tests/README.md
@@ -0,0 +1,12 @@
+This directory contains to kinds of tests:
+
+White Box Tests
+================================
+
+These tests are run on the server as superuser. They are for troubleshooting any problems with the internal setup of the server.
+
+Black Box Tests
+================================
+
+These test are run the user's local machine. They are for troubleshooting any external problems with the service exposed by the server.
+
diff --git a/tests/order.rb b/tests/order.rb
new file mode 100644
index 00000000..ffa6ae4e
--- /dev/null
+++ b/tests/order.rb
@@ -0,0 +1,15 @@
+class LeapCli::Config::Node
+ #
+ # returns a list of node names that should be tested before this node.
+ # make sure to not return ourselves (please no dependency loops!).
+ #
+ def test_dependencies
+ dependents = LeapCli::Config::ObjectList.new
+ unless services.include?('couchdb')
+ if services.include?('webapp') || services.include?('mx') || services.include?('soledad')
+ dependents.merge! nodes_like_me[:services => 'couchdb']
+ end
+ end
+ dependents.keys.delete_if {|name| self.name == name}
+ end
+end \ No newline at end of file
diff --git a/tests/white-box/couchdb.rb b/tests/white-box/couchdb.rb
new file mode 100644
index 00000000..9d5da94f
--- /dev/null
+++ b/tests/white-box/couchdb.rb
@@ -0,0 +1,109 @@
+raise SkipTest unless $node["services"].include?("couchdb")
+
+require 'json'
+
+class CouchDB < LeapTest
+ depends_on "Network"
+
+ def setup
+ end
+
+ def test_00_Are_daemons_running?
+ assert_running 'tapicero'
+ assert_running 'bin/beam'
+ assert_running 'bin/epmd'
+ pass
+ end
+
+ #
+ # check to make sure we can get welcome response from local couchdb
+ #
+ def test_01_Is_CouchDB_running?
+ assert_get(couchdb_url) do |body|
+ assert_match /"couchdb":"Welcome"/, body, "Could not get welcome message from #{couchdb_url}. Probably couchdb is not running."
+ end
+ pass
+ end
+
+ #
+ # compare the configured nodes to the nodes that are actually listed in bigcouch
+ #
+ def test_02_Is_cluster_membership_ok?
+ url = couchdb_backend_url("/nodes/_all_docs")
+ neighbors = assert_property('couch.bigcouch.neighbors')
+ neighbors << assert_property('domain.full')
+ neighbors.sort!
+ assert_get(url) do |body|
+ response = JSON.parse(body)
+ nodes_in_db = response['rows'].collect{|row| row['id'].sub(/^bigcouch@/, '')}.sort
+ assert_equal neighbors, nodes_in_db, "The couchdb replication node list is wrong (/nodes/_all_docs)"
+ end
+ pass
+ end
+
+ #
+ # all configured nodes are in 'cluster_nodes'
+ # all nodes online and communicating are in 'all_nodes'
+ #
+ # this seems backward to me, so it might be the other way around.
+ #
+ def test_03_Are_configured_nodes_online?
+ url = couchdb_url("/_membership")
+ assert_get(url) do |body|
+ response = JSON.parse(body)
+ nodes_configured_but_not_available = response['cluster_nodes'] - response['all_nodes']
+ nodes_available_but_not_configured = response['all_nodes'] - response['cluster_nodes']
+ if nodes_configured_but_not_available.any?
+ warn "These nodes are configured but not available:", nodes_configured_but_not_available
+ end
+ if nodes_available_but_not_configured.any?
+ warn "These nodes are available but not configured:", nodes_available_but_not_configured
+ end
+ if response['cluster_nodes'] == response['all_nodes']
+ pass
+ end
+ end
+ end
+
+ def test_04_Do_ACL_users_exist?
+ acl_users = ['_design/_auth', 'leap_mx', 'nickserver', 'soledad', 'tapicero', 'webapp']
+ url = couchdb_backend_url("/_users/_all_docs")
+ assert_get(url) do |body|
+ response = JSON.parse(body)
+ assert_equal 6, response['total_rows']
+ actual_users = response['rows'].map{|row| row['id'].sub(/^org.couchdb.user:/, '') }
+ assert_equal acl_users.sort, actual_users.sort
+ end
+ pass
+ end
+
+ def test_05_Do_required_databases_exist?
+ dbs_that_should_exist = ["customers","identities","keycache","sessions","shared","tickets","tokens","users"]
+ dbs_that_should_exist.each do |db_name|
+ assert_get(couchdb_url("/"+db_name)) do |body|
+ assert response = JSON.parse(body)
+ assert_equal db_name, response['db_name']
+ end
+ end
+ pass
+ end
+
+ private
+
+ def couchdb_url(path="", port=nil)
+ @port ||= begin
+ assert_property 'couch.port'
+ $node['couch']['port']
+ end
+ @password ||= begin
+ assert_property 'couch.users.admin.password'
+ $node['couch']['users']['admin']['password']
+ end
+ "http://admin:#{@password}@localhost:#{port || @port}#{path}"
+ end
+
+ def couchdb_backend_url(path="")
+ couchdb_url(path, "5986") # TODO: admin port is hardcoded for now but should be configurable.
+ end
+
+end
diff --git a/tests/white-box/dummy.rb b/tests/white-box/dummy.rb
new file mode 100644
index 00000000..a3e8ad68
--- /dev/null
+++ b/tests/white-box/dummy.rb
@@ -0,0 +1,71 @@
+# only run in the dummy case where there is no hiera.yaml file.
+raise SkipTest unless $node["dummy"]
+
+class Robot
+ def can_shoot_lasers?
+ "OHAI!"
+ end
+
+ def can_fly?
+ "YES!"
+ end
+end
+
+class TestDummy < LeapTest
+ def setup
+ @robot = Robot.new
+ end
+
+ def test_lasers
+ assert_equal "OHAI!", @robot.can_shoot_lasers?
+ pass
+ end
+
+ def test_fly
+ refute_match /^no/i, @robot.can_fly?
+ pass
+ end
+
+ def test_fail
+ fail "fail"
+ pass
+ end
+
+ def test_01_will_be_skipped
+ skip "test this later"
+ pass
+ end
+
+ def test_socket_failure
+ assert_tcp_socket('localhost', 900000)
+ pass
+ end
+
+ def test_warn
+ block_test do
+ warn "not everything", "is a success or failure"
+ end
+ end
+
+ # used to test extracting the proper caller even when in a block
+ def block_test
+ yield
+ end
+
+ def test_socket_success
+ fork {
+ Socket.tcp_server_loop('localhost', 12345) do |sock, client_addrinfo|
+ begin
+ sock.write('hi')
+ ensure
+ sock.close
+ exit
+ end
+ end
+ }
+ sleep 0.2
+ assert_tcp_socket('localhost', 12345)
+ pass
+ end
+
+end
diff --git a/tests/white-box/network.rb b/tests/white-box/network.rb
new file mode 100644
index 00000000..955857dc
--- /dev/null
+++ b/tests/white-box/network.rb
@@ -0,0 +1,60 @@
+require 'socket'
+
+raise SkipTest if $node["dummy"]
+
+class Network < LeapTest
+
+ def setup
+ end
+
+ def test_01_Can_connect_to_internet?
+ assert_get('http://www.google.com/images/srpr/logo11w.png')
+ pass
+ end
+
+ #
+ # example properties:
+ #
+ # stunnel:
+ # ednp_clients:
+ # elk_9002:
+ # accept_port: 4003
+ # connect: elk.dev.bitmask.i
+ # connect_port: 19002
+ # couch_server:
+ # accept: 15984
+ # connect: "127.0.0.1:5984"
+ #
+ def test_02_Is_stunnel_running?
+ if $node['stunnel']
+ good_stunnel_pids = []
+ $node['stunnel'].each do |stunnel_type, stunnel_configs|
+ if stunnel_type =~ /_clients?$/
+ stunnel_configs.each do |stunnel_name, stunnel_conf|
+ config_file_name = "/etc/stunnel/#{stunnel_name}.conf"
+ processes = pgrep(config_file_name)
+ assert_equal 6, processes.length, "There should be six stunnel processes running for `#{config_file_name}`"
+ good_stunnel_pids += processes.map{|ps| ps[:pid]}
+ assert port = stunnel_conf['accept_port'], 'Field `accept_port` must be present in `stunnel` property.'
+ assert_tcp_socket('localhost', port)
+ end
+ elsif stunnel_type =~ /_server$/
+ config_file_name = "/etc/stunnel/#{stunnel_type}.conf"
+ processes = pgrep(config_file_name)
+ assert_equal 6, processes.length, "There should be six stunnel processes running for `#{config_file_name}`"
+ good_stunnel_pids += processes.map{|ps| ps[:pid]}
+ assert accept = stunnel_configs['accept'], "Field `accept` must be present in property `stunnel.#{stunnel_type}`"
+ assert_tcp_socket('localhost', accept)
+ assert connect = stunnel_configs['connect'], "Field `connect` must be present in property `stunnel.#{stunnel_type}`"
+ assert_tcp_socket(*connect.split(':'))
+ else
+ skip "Unknown stunnel type `#{stunnel_type}`"
+ end
+ end
+ all_stunnel_pids = pgrep('/usr/bin/stunnel').collect{|process| process[:pid]}.uniq
+ assert_equal good_stunnel_pids.sort, all_stunnel_pids.sort, "There should not be any extra stunnel processes that are not configured in /etc/stunnel"
+ pass
+ end
+ end
+
+end
diff --git a/tests/white-box/openvpn.rb b/tests/white-box/openvpn.rb
new file mode 100644
index 00000000..5eb2bdb5
--- /dev/null
+++ b/tests/white-box/openvpn.rb
@@ -0,0 +1,16 @@
+raise SkipTest unless $node["services"].include?("openvpn")
+
+class Openvpn < LeapTest
+ depends_on "Network"
+
+ def setup
+ end
+
+ def test_01_Are_daemons_running?
+ assert_running '/usr/sbin/openvpn .* /etc/openvpn/tcp_config.conf'
+ assert_running '/usr/sbin/openvpn .* /etc/openvpn/udp_config.conf'
+ assert_running '/usr/sbin/unbound'
+ pass
+ end
+
+end
diff --git a/tests/white-box/webapp.rb b/tests/white-box/webapp.rb
new file mode 100644
index 00000000..142ac2de
--- /dev/null
+++ b/tests/white-box/webapp.rb
@@ -0,0 +1,63 @@
+raise SkipTest unless $node["services"].include?("webapp")
+
+require 'socket'
+
+class Webapp < LeapTest
+ depends_on "Network"
+
+ HAPROXY_CONFIG = '/etc/haproxy/haproxy.cfg'
+
+ def setup
+ end
+
+ #
+ # example properties:
+ #
+ # stunnel:
+ # couch_client:
+ # couch1_5984:
+ # accept_port: 4000
+ # connect: couch1.bitmask.i
+ # connect_port: 15984
+ #
+ def test_01_Can_contact_couchdb?
+ assert_property('stunnel.couch_client')
+ $node['stunnel']['couch_client'].values.each do |stunnel_conf|
+ assert port = stunnel_conf['accept_port'], 'Field `accept_port` must be present in `stunnel` property.'
+ local_stunnel_url = "http://localhost:#{port}"
+ remote_ip_address = TCPSocket.gethostbyname(stunnel_conf['connect']).last
+ msg = "(stunnel to %s:%s, aka %s)" % [stunnel_conf['connect'], stunnel_conf['connect_port'], remote_ip_address]
+ assert_get(local_stunnel_url, nil, error_msg: msg) do |body|
+ assert_match /"couchdb":"Welcome"/, body, "Request to #{local_stunnel_url} should return couchdb welcome message."
+ end
+ end
+ pass
+ end
+
+ #
+ # example properties:
+ #
+ # haproxy:
+ # servers:
+ # couch1:
+ # backup: false
+ # host: localhost
+ # port: 4000
+ # weight: 10
+ #
+ def test_02_Is_haproxy_working?
+ port = file_match(HAPROXY_CONFIG, /^ bind localhost:(\d+)$/)
+ url = "http://localhost:#{port}"
+ assert_get(url) do |body|
+ assert_match /"couchdb":"Welcome"/, body, "Request to #{url} should return couchdb welcome message."
+ end
+ pass
+ end
+
+ def test_03_Are_daemons_running?
+ assert_running '/usr/sbin/apache2'
+ assert_running '/usr/bin/nickserver'
+ pass
+ end
+
+end