summaryrefslogtreecommitdiff
diff options
context:
space:
mode:
-rw-r--r--.gitignore3
-rw-r--r--.gitmodules (renamed from files/mod_passenger.conf)0
-rw-r--r--.mailmap8
-rw-r--r--CHANGES.md64
-rw-r--r--Gemfile13
-rw-r--r--LICENSE674
-rw-r--r--README.md111
-rw-r--r--Rakefile57
-rw-r--r--Vagrantfile53
-rwxr-xr-xbin/debug.sh29
-rw-r--r--bin/node_init86
-rwxr-xr-xbin/puppet_command313
-rwxr-xr-xbin/run_tests515
-rw-r--r--contrib/README.md9
-rw-r--r--contrib/commit-template.txt7
-rw-r--r--contrib/offlineimaprc.example.org24
-rw-r--r--doc/details/couchdb.md74
-rw-r--r--doc/details/development.md359
-rw-r--r--doc/details/en.haml4
-rw-r--r--doc/details/faq.md65
-rw-r--r--doc/details/under-the-hood.md40
-rw-r--r--doc/details/webapp.md282
-rw-r--r--doc/en.md85
-rw-r--r--doc/guide/commands.md419
-rw-r--r--doc/guide/config.md263
-rw-r--r--doc/guide/en.haml4
-rw-r--r--doc/guide/environments.md75
-rw-r--r--doc/guide/keys-and-certificates.md194
-rw-r--r--doc/guide/miscellaneous.md14
-rw-r--r--doc/guide/nodes.md187
-rw-r--r--doc/service-diagram.odgbin0 -> 12131 bytes
-rw-r--r--doc/service-diagram.pngbin0 -> 25988 bytes
-rw-r--r--doc/troubleshooting/en.haml3
-rw-r--r--doc/troubleshooting/known-issues.md115
-rw-r--r--doc/troubleshooting/tests.md70
-rw-r--r--doc/troubleshooting/vagrant.md45
-rw-r--r--doc/troubleshooting/where-to-look.md249
-rw-r--r--doc/tutorials/configure-provider.md31
-rw-r--r--doc/tutorials/en.haml4
-rw-r--r--doc/tutorials/quick-start.md385
-rw-r--r--doc/tutorials/single-node-email.md282
-rw-r--r--hiera.yaml6
-rw-r--r--leap-debug-remote.sh23
-rw-r--r--lib/leap_cli/commands/README11
-rw-r--r--lib/leap_cli/commands/ca.rb541
-rw-r--r--lib/leap_cli/commands/clean.rb16
-rw-r--r--lib/leap_cli/commands/compile.rb531
-rw-r--r--lib/leap_cli/commands/db.rb86
-rw-r--r--lib/leap_cli/commands/deploy.rb374
-rw-r--r--lib/leap_cli/commands/env.rb76
-rw-r--r--lib/leap_cli/commands/facts.rb100
-rw-r--r--lib/leap_cli/commands/info.rb15
-rw-r--r--lib/leap_cli/commands/inspect.rb144
-rw-r--r--lib/leap_cli/commands/list.rb132
-rw-r--r--lib/leap_cli/commands/node.rb188
-rw-r--r--lib/leap_cli/commands/node_init.rb169
-rw-r--r--lib/leap_cli/commands/ssh.rb225
-rw-r--r--lib/leap_cli/commands/test.rb74
-rw-r--r--lib/leap_cli/commands/user.rb136
-rw-r--r--lib/leap_cli/commands/util.rb50
-rw-r--r--lib/leap_cli/commands/vagrant.rb180
-rw-r--r--lib/leap_cli/macros.rb16
-rw-r--r--lib/leap_cli/macros/core.rb92
-rw-r--r--lib/leap_cli/macros/files.rb124
-rw-r--r--lib/leap_cli/macros/haproxy.rb73
-rw-r--r--lib/leap_cli/macros/hosts.rb90
-rw-r--r--lib/leap_cli/macros/keys.rb97
-rw-r--r--lib/leap_cli/macros/nodes.rb88
-rw-r--r--lib/leap_cli/macros/provider.rb90
-rw-r--r--lib/leap_cli/macros/secrets.rb39
-rw-r--r--lib/leap_cli/macros/stunnel.rb106
-rw-r--r--platform.rb119
-rw-r--r--provider_base/README9
-rw-r--r--provider_base/common.json97
-rw-r--r--provider_base/files/branding/head.scss1
-rw-r--r--provider_base/files/branding/tail.scss1
-rw-r--r--provider_base/files/service-definitions/provider.json.erb16
-rw-r--r--provider_base/files/service-definitions/v1/eip-service.json.erb55
-rw-r--r--provider_base/files/service-definitions/v1/smtp-service.json.erb29
-rw-r--r--provider_base/files/service-definitions/v1/soledad-service.json.erb29
-rw-r--r--provider_base/provider.json64
-rw-r--r--provider_base/services/_api_tester.json13
-rw-r--r--provider_base/services/_couchdb_mirror.json22
-rw-r--r--provider_base/services/_couchdb_multimaster.json24
-rw-r--r--provider_base/services/couchdb.json49
-rw-r--r--provider_base/services/couchdb.rb27
-rw-r--r--provider_base/services/dns.json14
-rw-r--r--provider_base/services/monitor.json29
-rw-r--r--provider_base/services/monitor.rb3
-rw-r--r--provider_base/services/mx.json53
-rw-r--r--provider_base/services/mx.rb1
-rw-r--r--provider_base/services/obfsproxy.json9
-rw-r--r--provider_base/services/openvpn.json45
-rw-r--r--provider_base/services/soledad.json21
-rw-r--r--provider_base/services/soledad.rb3
-rw-r--r--provider_base/services/static.json20
-rw-r--r--provider_base/services/tor.json15
-rw-r--r--provider_base/services/webapp.json93
-rw-r--r--provider_base/tags/development.json3
-rw-r--r--provider_base/tags/local.json3
-rw-r--r--provider_base/tags/production.json3
-rw-r--r--provider_base/templates/common.json3
-rw-r--r--provider_base/templates/couchdb.json5
-rw-r--r--provider_base/templates/openvpn.json7
-rw-r--r--provider_base/test/openvpn/client.ovpn.erb28
-rwxr-xr-xpuppet/bin/apply_on_node.sh30
-rw-r--r--puppet/hiera.yaml15
-rw-r--r--puppet/lib/puppet/parser/functions/create_resources_hash_from.rb116
-rw-r--r--puppet/lib/puppet/parser/functions/sorted_json.rb47
-rw-r--r--puppet/lib/puppet/parser/functions/sorted_yaml.rb400
-rw-r--r--puppet/manifests/site.pp60
-rw-r--r--puppet/modules/apache/.gitignore6
-rw-r--r--puppet/modules/apache/.rspec2
-rw-r--r--puppet/modules/apache/Gemfile13
-rw-r--r--puppet/modules/apache/LICENSE674
-rw-r--r--puppet/modules/apache/Puppetfile15
-rw-r--r--puppet/modules/apache/README.md233
-rw-r--r--puppet/modules/apache/Rakefile26
-rw-r--r--puppet/modules/apache/files/conf.d/CentOS/ssl.conf76
-rw-r--r--puppet/modules/apache/files/conf.d/CentOS/welcome.conf10
-rw-r--r--puppet/modules/apache/files/conf.d/Debian/charset6
-rw-r--r--puppet/modules/apache/files/conf.d/Debian/security50
-rw-r--r--puppet/modules/apache/files/conf.d/Debian/ssl.conf1
-rw-r--r--puppet/modules/apache/files/conf.d/do_includes.conf5
-rw-r--r--puppet/modules/apache/files/conf.d/git.conf5
-rw-r--r--puppet/modules/apache/files/conf.d/mozilla_autoconfig.conf6
-rw-r--r--puppet/modules/apache/files/conf.d/status.conf24
-rw-r--r--puppet/modules/apache/files/conf.d/vhosts.conf8
-rw-r--r--puppet/modules/apache/files/config/Debian.jessie/apache2.conf221
-rw-r--r--puppet/modules/apache/files/config/Debian.wheezy/apache2.conf268
-rw-r--r--puppet/modules/apache/files/config/Debian/apache2.conf230
-rw-r--r--puppet/modules/apache/files/config/OpenBSD/httpd.conf1120
-rw-r--r--puppet/modules/apache/files/include.d/defaults.inc5
-rw-r--r--puppet/modules/apache/files/include.d/joomla.inc30
-rw-r--r--puppet/modules/apache/files/include.d/silverstripe.inc17
-rw-r--r--puppet/modules/apache/files/itk_plus/conf.d/CentOS/ssl.conf75
-rw-r--r--puppet/modules/apache/files/modules.d/Gentoo/00_default_settings.conf105
-rw-r--r--puppet/modules/apache/files/modules.d/Gentoo/00_error_documents.conf66
-rw-r--r--puppet/modules/apache/files/modules.d/Gentoo/00_languages.conf137
-rw-r--r--puppet/modules/apache/files/modules.d/Gentoo/00_mod_autoindex.conf83
-rw-r--r--puppet/modules/apache/files/modules.d/Gentoo/00_mod_info.conf14
-rw-r--r--puppet/modules/apache/files/modules.d/Gentoo/00_mod_log_config.conf35
-rw-r--r--puppet/modules/apache/files/modules.d/Gentoo/00_mod_mime.conf55
-rw-r--r--puppet/modules/apache/files/modules.d/Gentoo/00_mod_status.conf19
-rw-r--r--puppet/modules/apache/files/modules.d/Gentoo/00_mod_userdir.conf40
-rw-r--r--puppet/modules/apache/files/modules.d/Gentoo/00_mpm.conf102
-rw-r--r--puppet/modules/apache/files/modules.d/Gentoo/10_mod_mem_cache.conf10
-rw-r--r--puppet/modules/apache/files/modules.d/Gentoo/40_mod_ssl.conf65
-rw-r--r--puppet/modules/apache/files/modules.d/Gentoo/45_mod_dav.conf56
-rw-r--r--puppet/modules/apache/files/modules.d/Gentoo/46_mod_ldap.conf29
-rw-r--r--puppet/modules/apache/files/modules.d/Gentoo/70_mod_php5.conf18
-rwxr-xr-xpuppet/modules/apache/files/munin/apache_activity99
-rw-r--r--puppet/modules/apache/files/scripts/OpenBSD/bin/apache_logrotate.sh7
-rw-r--r--puppet/modules/apache/files/scripts/OpenBSD/bin/restart_apache.sh6
-rw-r--r--puppet/modules/apache/files/scripts/OpenBSD/bin/restart_apache_ssl.sh6
-rw-r--r--puppet/modules/apache/files/service/CentOS/httpd22
-rw-r--r--puppet/modules/apache/files/service/CentOS/httpd.itk23
-rw-r--r--puppet/modules/apache/files/service/CentOS/httpd.itk_plus24
-rw-r--r--puppet/modules/apache/files/service/CentOS/httpd.worker22
-rw-r--r--puppet/modules/apache/files/vhosts.d/CentOS/0-default.conf11
-rw-r--r--puppet/modules/apache/files/vhosts.d/Debian/0-default.conf41
-rw-r--r--puppet/modules/apache/files/vhosts.d/Gentoo/0-default.conf51
-rw-r--r--puppet/modules/apache/files/vhosts.d/Gentoo/default_vhost.include79
-rw-r--r--puppet/modules/apache/files/vhosts.d/OpenBSD/0-default.conf8
-rw-r--r--puppet/modules/apache/lib/facter/apache_version.rb28
-rw-r--r--puppet/modules/apache/lib/puppet/parser/functions/guess_apache_version.rb39
-rw-r--r--puppet/modules/apache/lib/puppet/parser/functions/htpasswd_sha1.rb8
-rw-r--r--puppet/modules/apache/manifests/base.pp75
-rw-r--r--puppet/modules/apache/manifests/base/itk.pp6
-rw-r--r--puppet/modules/apache/manifests/centos.pp86
-rw-r--r--puppet/modules/apache/manifests/centos/itk.pp10
-rw-r--r--puppet/modules/apache/manifests/centos/itk_plus.pp20
-rw-r--r--puppet/modules/apache/manifests/centos/module.pp30
-rw-r--r--puppet/modules/apache/manifests/centos/worker.pp5
-rw-r--r--puppet/modules/apache/manifests/config/file.pp106
-rw-r--r--puppet/modules/apache/manifests/config/global.pp18
-rw-r--r--puppet/modules/apache/manifests/config/include.pp17
-rw-r--r--puppet/modules/apache/manifests/debian.pp44
-rw-r--r--puppet/modules/apache/manifests/debian/itk.pp9
-rw-r--r--puppet/modules/apache/manifests/debian/module.pp48
-rw-r--r--puppet/modules/apache/manifests/defaultdavdbdir.pp17
-rw-r--r--puppet/modules/apache/manifests/defaultphpdirs.pp31
-rw-r--r--puppet/modules/apache/manifests/file.pp15
-rw-r--r--puppet/modules/apache/manifests/file/readonly.pp12
-rw-r--r--puppet/modules/apache/manifests/file/rw.pp13
-rw-r--r--puppet/modules/apache/manifests/gentoo.pp39
-rw-r--r--puppet/modules/apache/manifests/gentoo/module.pp30
-rw-r--r--puppet/modules/apache/manifests/htpasswd_user.pp34
-rw-r--r--puppet/modules/apache/manifests/include/joomla.pp3
-rw-r--r--puppet/modules/apache/manifests/include/mod_fcgid.pp7
-rw-r--r--puppet/modules/apache/manifests/include/silverstripe.pp3
-rw-r--r--puppet/modules/apache/manifests/includes.pp5
-rw-r--r--puppet/modules/apache/manifests/init.pp44
-rw-r--r--puppet/modules/apache/manifests/itk.pp11
-rw-r--r--puppet/modules/apache/manifests/itk/lock.pp4
-rw-r--r--puppet/modules/apache/manifests/itk_plus.pp10
-rw-r--r--puppet/modules/apache/manifests/itk_plus/lock.pp4
-rw-r--r--puppet/modules/apache/manifests/logrotate/centos.pp10
-rw-r--r--puppet/modules/apache/manifests/logrotate/centos/vhosts.pp11
-rw-r--r--puppet/modules/apache/manifests/mod_dav_svn.pp7
-rw-r--r--puppet/modules/apache/manifests/mod_macro.pp7
-rw-r--r--puppet/modules/apache/manifests/module.pp35
-rw-r--r--puppet/modules/apache/manifests/module/alias.pp14
-rw-r--r--puppet/modules/apache/manifests/module/auth_basic.pp6
-rw-r--r--puppet/modules/apache/manifests/module/authn_core.pp6
-rw-r--r--puppet/modules/apache/manifests/module/authn_file.pp6
-rw-r--r--puppet/modules/apache/manifests/module/authz_core.pp7
-rw-r--r--puppet/modules/apache/manifests/module/authz_host.pp6
-rw-r--r--puppet/modules/apache/manifests/module/authz_user.pp6
-rw-r--r--puppet/modules/apache/manifests/module/cgi.pp6
-rw-r--r--puppet/modules/apache/manifests/module/dir.pp6
-rw-r--r--puppet/modules/apache/manifests/module/env.pp7
-rw-r--r--puppet/modules/apache/manifests/module/expires.pp5
-rw-r--r--puppet/modules/apache/manifests/module/headers.pp6
-rw-r--r--puppet/modules/apache/manifests/module/mime.pp6
-rw-r--r--puppet/modules/apache/manifests/module/mpm_event.pp7
-rw-r--r--puppet/modules/apache/manifests/module/mpm_prefork.pp6
-rw-r--r--puppet/modules/apache/manifests/module/negotiation.pp6
-rw-r--r--puppet/modules/apache/manifests/module/php5.pp6
-rw-r--r--puppet/modules/apache/manifests/module/removeip.pp6
-rw-r--r--puppet/modules/apache/manifests/module/rewrite.pp6
-rw-r--r--puppet/modules/apache/manifests/module/socache_shmcb.pp6
-rw-r--r--puppet/modules/apache/manifests/module/status.pp6
-rw-r--r--puppet/modules/apache/manifests/mozilla_autoconfig.pp37
-rw-r--r--puppet/modules/apache/manifests/munin.pp12
-rw-r--r--puppet/modules/apache/manifests/noiplog.pp5
-rw-r--r--puppet/modules/apache/manifests/openbsd.pp75
-rw-r--r--puppet/modules/apache/manifests/package.pp32
-rw-r--r--puppet/modules/apache/manifests/package/itk.pp5
-rw-r--r--puppet/modules/apache/manifests/sftponly.pp5
-rw-r--r--puppet/modules/apache/manifests/sftponly/centos.pp10
-rw-r--r--puppet/modules/apache/manifests/ssl.pp13
-rw-r--r--puppet/modules/apache/manifests/ssl/base.pp15
-rw-r--r--puppet/modules/apache/manifests/ssl/centos.pp12
-rw-r--r--puppet/modules/apache/manifests/ssl/debian.pp4
-rw-r--r--puppet/modules/apache/manifests/ssl/itk.pp8
-rw-r--r--puppet/modules/apache/manifests/ssl/itk/centos.pp6
-rw-r--r--puppet/modules/apache/manifests/ssl/itk_plus.pp6
-rw-r--r--puppet/modules/apache/manifests/ssl/itk_plus/centos.pp11
-rw-r--r--puppet/modules/apache/manifests/ssl/openbsd.pp18
-rw-r--r--puppet/modules/apache/manifests/status.pp13
-rw-r--r--puppet/modules/apache/manifests/status/base.pp1
-rw-r--r--puppet/modules/apache/manifests/status/centos.pp5
-rw-r--r--puppet/modules/apache/manifests/status/debian.pp4
-rw-r--r--puppet/modules/apache/manifests/vhost.pp127
-rw-r--r--puppet/modules/apache/manifests/vhost/davdbdir.pp40
-rw-r--r--puppet/modules/apache/manifests/vhost/file.pp151
-rw-r--r--puppet/modules/apache/manifests/vhost/file/documentrootdir.pp24
-rw-r--r--puppet/modules/apache/manifests/vhost/file/documentrootfile.pp27
-rw-r--r--puppet/modules/apache/manifests/vhost/gitweb.pp59
-rw-r--r--puppet/modules/apache/manifests/vhost/modperl.pp153
-rw-r--r--puppet/modules/apache/manifests/vhost/passenger.pp139
-rw-r--r--puppet/modules/apache/manifests/vhost/php/drupal.pp144
-rw-r--r--puppet/modules/apache/manifests/vhost/php/gallery2.pp141
-rw-r--r--puppet/modules/apache/manifests/vhost/php/global_exec_bin_dir.pp9
-rw-r--r--puppet/modules/apache/manifests/vhost/php/joomla.pp174
-rw-r--r--puppet/modules/apache/manifests/vhost/php/mediawiki.pp106
-rw-r--r--puppet/modules/apache/manifests/vhost/php/safe_mode_bin.pp17
-rw-r--r--puppet/modules/apache/manifests/vhost/php/silverstripe.pp119
-rw-r--r--puppet/modules/apache/manifests/vhost/php/simplemachine.pp125
-rw-r--r--puppet/modules/apache/manifests/vhost/php/spip.pp114
-rw-r--r--puppet/modules/apache/manifests/vhost/php/standard.pp304
-rw-r--r--puppet/modules/apache/manifests/vhost/php/typo3.pp150
-rw-r--r--puppet/modules/apache/manifests/vhost/php/webapp.pp148
-rw-r--r--puppet/modules/apache/manifests/vhost/php/wordpress.pp123
-rw-r--r--puppet/modules/apache/manifests/vhost/phpdirs.pp39
-rw-r--r--puppet/modules/apache/manifests/vhost/proxy.pp67
-rw-r--r--puppet/modules/apache/manifests/vhost/redirect.pp56
-rw-r--r--puppet/modules/apache/manifests/vhost/static.pp86
-rw-r--r--puppet/modules/apache/manifests/vhost/template.pp158
-rw-r--r--puppet/modules/apache/manifests/vhost/webdav.pp126
-rw-r--r--puppet/modules/apache/manifests/vhost/webdir.pp130
-rw-r--r--puppet/modules/apache/manifests/webdav.pp8
-rw-r--r--puppet/modules/apache/manifests/worker.pp5
-rw-r--r--puppet/modules/apache/spec/classes/init_spec.rb43
-rw-r--r--puppet/modules/apache/spec/defines/vhost_file_spec.rb131
-rw-r--r--puppet/modules/apache/spec/defines/vhost_php_drupal_spec.rb187
-rw-r--r--puppet/modules/apache/spec/defines/vhost_php_gallery2_spec.rb162
-rw-r--r--puppet/modules/apache/spec/defines/vhost_php_joomla_spec.rb279
-rw-r--r--puppet/modules/apache/spec/defines/vhost_php_standard_spec.rb534
-rw-r--r--puppet/modules/apache/spec/defines/vhost_php_webapp_spec.rb261
-rw-r--r--puppet/modules/apache/spec/defines/vhost_php_wordpress_spec.rb171
-rw-r--r--puppet/modules/apache/spec/defines/vhost_spec.rb202
-rw-r--r--puppet/modules/apache/spec/defines/vhost_static_spec.rb54
-rw-r--r--puppet/modules/apache/spec/defines/vhost_template_spec.rb297
-rw-r--r--puppet/modules/apache/spec/functions/guess_apache_version.rb50
-rw-r--r--puppet/modules/apache/spec/spec_helper.rb13
-rw-r--r--puppet/modules/apache/templates/default/default_index.erb13
-rw-r--r--puppet/modules/apache/templates/include.d/ssl_defaults.inc.erb78
-rw-r--r--puppet/modules/apache/templates/itk_plus/CentOS/00-listen-ssl.conf.erb6
-rw-r--r--puppet/modules/apache/templates/itk_plus/CentOS/00-listen.conf.erb8
-rw-r--r--puppet/modules/apache/templates/vhosts/0-default_ssl.conf.erb21
-rw-r--r--puppet/modules/apache/templates/vhosts/default.erb44
-rw-r--r--puppet/modules/apache/templates/vhosts/gitweb/partial.erb16
-rw-r--r--puppet/modules/apache/templates/vhosts/itk_plus.erb6
-rw-r--r--puppet/modules/apache/templates/vhosts/itk_plus/partial.erb31
-rw-r--r--puppet/modules/apache/templates/vhosts/partials/authentication.erb6
-rw-r--r--puppet/modules/apache/templates/vhosts/partials/header_default.erb22
-rw-r--r--puppet/modules/apache/templates/vhosts/partials/logs.erb18
-rw-r--r--puppet/modules/apache/templates/vhosts/partials/mod_security.erb27
-rw-r--r--puppet/modules/apache/templates/vhosts/partials/php_settings.erb20
-rw-r--r--puppet/modules/apache/templates/vhosts/partials/ssl.erb8
-rw-r--r--puppet/modules/apache/templates/vhosts/partials/std_override_options.erb4
-rw-r--r--puppet/modules/apache/templates/vhosts/passenger/partial.erb7
-rw-r--r--puppet/modules/apache/templates/vhosts/perl/partial.erb14
-rw-r--r--puppet/modules/apache/templates/vhosts/php/partial.erb5
-rw-r--r--puppet/modules/apache/templates/vhosts/php_drupal/partial.erb22
-rw-r--r--puppet/modules/apache/templates/vhosts/php_gallery2/partial.erb14
-rw-r--r--puppet/modules/apache/templates/vhosts/php_joomla/partial.erb30
-rw-r--r--puppet/modules/apache/templates/vhosts/php_mediawiki/partial.erb7
-rw-r--r--puppet/modules/apache/templates/vhosts/php_silverstripe/partial.erb12
-rw-r--r--puppet/modules/apache/templates/vhosts/php_typo3/partial.erb10
-rw-r--r--puppet/modules/apache/templates/vhosts/php_wordpress/partial.erb19
-rw-r--r--puppet/modules/apache/templates/vhosts/proxy/partial.erb8
-rw-r--r--puppet/modules/apache/templates/vhosts/redirect/partial.erb1
-rw-r--r--puppet/modules/apache/templates/vhosts/static/partial.erb4
-rw-r--r--puppet/modules/apache/templates/vhosts/webdav/partial.erb21
-rw-r--r--puppet/modules/apache/templates/webfiles/autoconfig/config.shtml.erb58
-rw-r--r--puppet/modules/apt/.gitignore12
-rw-r--r--puppet/modules/apt/.gitlab-ci.yml12
-rw-r--r--puppet/modules/apt/Gemfile13
-rw-r--r--puppet/modules/apt/LICENSE674
-rw-r--r--puppet/modules/apt/README602
-rw-r--r--puppet/modules/apt/Rakefile19
-rw-r--r--puppet/modules/apt/files/02show_upgraded4
-rw-r--r--puppet/modules/apt/files/03clean4
-rw-r--r--puppet/modules/apt/files/03clean_vserver4
-rw-r--r--puppet/modules/apt/files/upgrade_initiator1
-rw-r--r--puppet/modules/apt/lib/facter/apt_running.rb7
-rw-r--r--puppet/modules/apt/lib/facter/debian_codename.rb42
-rw-r--r--puppet/modules/apt/lib/facter/debian_lts.rb16
-rw-r--r--puppet/modules/apt/lib/facter/debian_nextcodename.rb23
-rw-r--r--puppet/modules/apt/lib/facter/debian_nextrelease.rb23
-rw-r--r--puppet/modules/apt/lib/facter/debian_release.rb38
-rw-r--r--puppet/modules/apt/lib/facter/ubuntu_codename.rb8
-rw-r--r--puppet/modules/apt/lib/facter/ubuntu_nextcodename.rb20
-rw-r--r--puppet/modules/apt/lib/facter/util/debian.rb18
-rw-r--r--puppet/modules/apt/lib/facter/util/ubuntu.rb21
-rw-r--r--puppet/modules/apt/manifests/apt_conf.pp45
-rw-r--r--puppet/modules/apt/manifests/apticron.pp24
-rw-r--r--puppet/modules/apt/manifests/cron/base.pp20
-rw-r--r--puppet/modules/apt/manifests/cron/dist_upgrade.pp29
-rw-r--r--puppet/modules/apt/manifests/cron/download.pp27
-rw-r--r--puppet/modules/apt/manifests/dist_upgrade.pp9
-rw-r--r--puppet/modules/apt/manifests/dist_upgrade/initiator.pp23
-rw-r--r--puppet/modules/apt/manifests/dot_d_directories.pp15
-rw-r--r--puppet/modules/apt/manifests/dselect.pp11
-rw-r--r--puppet/modules/apt/manifests/init.pp150
-rw-r--r--puppet/modules/apt/manifests/key.pp13
-rw-r--r--puppet/modules/apt/manifests/key/plain.pp13
-rw-r--r--puppet/modules/apt/manifests/listchanges.pp19
-rw-r--r--puppet/modules/apt/manifests/params.pp22
-rw-r--r--puppet/modules/apt/manifests/preferences.pp20
-rw-r--r--puppet/modules/apt/manifests/preferences/absent.pp7
-rw-r--r--puppet/modules/apt/manifests/preferences_snippet.pp59
-rw-r--r--puppet/modules/apt/manifests/preseeded_package.pp21
-rw-r--r--puppet/modules/apt/manifests/proxy_client.pp9
-rw-r--r--puppet/modules/apt/manifests/reboot_required_notify.pp21
-rw-r--r--puppet/modules/apt/manifests/sources_list.pp40
-rw-r--r--puppet/modules/apt/manifests/unattended_upgrades.pp34
-rw-r--r--puppet/modules/apt/manifests/update.pp7
-rw-r--r--puppet/modules/apt/manifests/upgrade_package.pp31
-rw-r--r--puppet/modules/apt/spec/spec_helper.rb12
-rw-r--r--puppet/modules/apt/spec/unit/custom_facts_spec.rb86
-rw-r--r--puppet/modules/apt/templates/20proxy.erb5
-rw-r--r--puppet/modules/apt/templates/50unattended-upgrades.erb38
l---------puppet/modules/apt/templates/Debian/apticron_jessie.erb1
-rw-r--r--puppet/modules/apt/templates/Debian/apticron_lenny.erb50
l---------puppet/modules/apt/templates/Debian/apticron_sid.erb1
-rw-r--r--puppet/modules/apt/templates/Debian/apticron_squeeze.erb82
-rw-r--r--puppet/modules/apt/templates/Debian/apticron_wheezy.erb80
l---------puppet/modules/apt/templates/Debian/listchanges_jessie.erb1
-rw-r--r--puppet/modules/apt/templates/Debian/listchanges_lenny.erb7
l---------puppet/modules/apt/templates/Debian/listchanges_sid.erb1
l---------puppet/modules/apt/templates/Debian/listchanges_squeeze.erb1
l---------puppet/modules/apt/templates/Debian/listchanges_wheezy.erb1
-rw-r--r--puppet/modules/apt/templates/Debian/preferences_jessie.erb14
-rw-r--r--puppet/modules/apt/templates/Debian/preferences_lenny.erb25
-rw-r--r--puppet/modules/apt/templates/Debian/preferences_sid.erb10
-rw-r--r--puppet/modules/apt/templates/Debian/preferences_squeeze.erb30
-rw-r--r--puppet/modules/apt/templates/Debian/preferences_wheezy.erb20
-rw-r--r--puppet/modules/apt/templates/Debian/sources.list.erb76
l---------puppet/modules/apt/templates/Ubuntu/preferences_lucid.erb1
-rw-r--r--puppet/modules/apt/templates/Ubuntu/preferences_maverick.erb30
l---------puppet/modules/apt/templates/Ubuntu/preferences_oneiric.erb1
l---------puppet/modules/apt/templates/Ubuntu/preferences_precise.erb1
l---------puppet/modules/apt/templates/Ubuntu/preferences_utopic.erb1
l---------puppet/modules/apt/templates/Ubuntu/preferences_vivid.erb1
l---------puppet/modules/apt/templates/Ubuntu/preferences_wily.erb1
l---------puppet/modules/apt/templates/Ubuntu/preferences_xenial.erb1
-rw-r--r--puppet/modules/apt/templates/Ubuntu/sources.list.erb22
-rw-r--r--puppet/modules/apt/templates/preferences_snippet.erb4
-rw-r--r--puppet/modules/apt/templates/preferences_snippet_release.erb4
-rw-r--r--puppet/modules/bundler/.gitignore1
-rw-r--r--puppet/modules/bundler/LICENSE13
-rw-r--r--puppet/modules/bundler/README.md63
-rw-r--r--puppet/modules/bundler/manifests/config.pp74
-rw-r--r--puppet/modules/bundler/manifests/install.pp64
-rw-r--r--puppet/modules/bundler/manifests/params.pp31
-rw-r--r--puppet/modules/clamav/files/01-leap.conf58
-rw-r--r--puppet/modules/clamav/files/clamav-daemon_default8
-rw-r--r--puppet/modules/clamav/files/clamav-milter_default14
-rw-r--r--puppet/modules/clamav/manifests/daemon.pp91
-rw-r--r--puppet/modules/clamav/manifests/freshclam.pp23
-rw-r--r--puppet/modules/clamav/manifests/init.pp8
-rw-r--r--puppet/modules/clamav/manifests/milter.pp50
-rw-r--r--puppet/modules/clamav/manifests/unofficial_sigs.pp23
-rw-r--r--puppet/modules/clamav/templates/clamav-milter.conf.erb28
-rw-r--r--puppet/modules/clamav/templates/local.pdb.erb1
-rw-r--r--puppet/modules/clamav/templates/whitelisted_addresses.erb5
-rw-r--r--puppet/modules/common/LICENSE674
-rw-r--r--puppet/modules/common/README44
-rw-r--r--puppet/modules/common/lib/puppet/parser/functions/basename.rb22
-rw-r--r--puppet/modules/common/lib/puppet/parser/functions/dirname.rb22
-rw-r--r--puppet/modules/common/lib/puppet/parser/functions/get_default.rb15
-rw-r--r--puppet/modules/common/lib/puppet/parser/functions/hostname.rb13
-rw-r--r--puppet/modules/common/lib/puppet/parser/functions/multi_source_template.rb29
-rw-r--r--puppet/modules/common/lib/puppet/parser/functions/prefix_with.rb9
-rw-r--r--puppet/modules/common/lib/puppet/parser/functions/re_escape.rb7
-rw-r--r--puppet/modules/common/lib/puppet/parser/functions/slash_escape.rb7
-rw-r--r--puppet/modules/common/lib/puppet/parser/functions/substitute.rb20
-rw-r--r--puppet/modules/common/lib/puppet/parser/functions/tfile.rb19
-rw-r--r--puppet/modules/common/manifests/module_dir.pp34
-rw-r--r--puppet/modules/common/manifests/module_file.pp37
-rw-r--r--puppet/modules/common/manifests/moduledir.pp18
-rw-r--r--puppet/modules/common/manifests/moduledir/common.pp4
-rw-r--r--puppet/modules/common/spec/spec.opts6
-rw-r--r--puppet/modules/common/spec/spec_helper.rb16
-rw-r--r--puppet/modules/common/spec/unit/parser/functions/tfile.rb54
-rw-r--r--puppet/modules/concat/CHANGELOG29
-rw-r--r--puppet/modules/concat/LICENSE14
-rw-r--r--puppet/modules/concat/Modulefile8
-rw-r--r--puppet/modules/concat/README.markdown112
-rw-r--r--puppet/modules/concat/Rakefile13
-rwxr-xr-xpuppet/modules/concat/files/concatfragments.sh129
-rw-r--r--puppet/modules/concat/files/null/.gitignore0
-rw-r--r--puppet/modules/concat/lib/facter/concat_basedir.rb5
-rw-r--r--puppet/modules/concat/manifests/fragment.pp49
-rw-r--r--puppet/modules/concat/manifests/init.pp178
-rw-r--r--puppet/modules/concat/manifests/setup.pp49
-rw-r--r--puppet/modules/concat/spec/defines/init_spec.rb20
-rw-r--r--puppet/modules/concat/spec/spec_helper.rb9
-rw-r--r--puppet/modules/couchdb/.fixtures.yml6
-rw-r--r--puppet/modules/couchdb/Gemfile11
-rw-r--r--puppet/modules/couchdb/README.md32
-rw-r--r--puppet/modules/couchdb/Rakefile19
-rwxr-xr-xpuppet/modules/couchdb/files/Debian/couchdb160
-rw-r--r--puppet/modules/couchdb/files/couch-doc-diff17
-rw-r--r--puppet/modules/couchdb/files/couch-doc-update219
-rw-r--r--puppet/modules/couchdb/files/local.ini84
-rw-r--r--puppet/modules/couchdb/lib/facter/couchdb_pwhash_alg.rb43
-rw-r--r--puppet/modules/couchdb/lib/facter/couchdb_version.rb34
-rw-r--r--puppet/modules/couchdb/lib/puppet/parser/functions/couchdblookup.rb55
-rw-r--r--puppet/modules/couchdb/lib/puppet/parser/functions/pbkdf2.rb62
-rw-r--r--puppet/modules/couchdb/manifests/add_user.pp39
-rw-r--r--puppet/modules/couchdb/manifests/backup.pp51
-rw-r--r--puppet/modules/couchdb/manifests/base.pp124
-rw-r--r--puppet/modules/couchdb/manifests/bigcouch.pp51
-rw-r--r--puppet/modules/couchdb/manifests/bigcouch/add_node.pp8
-rw-r--r--puppet/modules/couchdb/manifests/bigcouch/debian.pp11
-rw-r--r--puppet/modules/couchdb/manifests/bigcouch/document.pp14
-rw-r--r--puppet/modules/couchdb/manifests/bigcouch/package/cloudant.pp35
-rw-r--r--puppet/modules/couchdb/manifests/create_db.pp21
-rw-r--r--puppet/modules/couchdb/manifests/debian.pp15
-rw-r--r--puppet/modules/couchdb/manifests/deploy_config.pp12
-rw-r--r--puppet/modules/couchdb/manifests/document.pp47
-rw-r--r--puppet/modules/couchdb/manifests/init.pp31
-rw-r--r--puppet/modules/couchdb/manifests/mirror_db.pp21
-rw-r--r--puppet/modules/couchdb/manifests/params.pp23
-rw-r--r--puppet/modules/couchdb/manifests/query.pp12
-rw-r--r--puppet/modules/couchdb/manifests/query/setup.pp10
-rw-r--r--puppet/modules/couchdb/manifests/redhat.pp1
-rw-r--r--puppet/modules/couchdb/manifests/ssl/deploy_cert.pp28
-rw-r--r--puppet/modules/couchdb/manifests/ssl/generate_cert.pp25
-rw-r--r--puppet/modules/couchdb/manifests/update.pp12
-rw-r--r--puppet/modules/couchdb/spec/classes/couchdb_spec.rb35
-rw-r--r--puppet/modules/couchdb/spec/fixtures/manifests/site.pp8
-rw-r--r--puppet/modules/couchdb/spec/functions/versioncmp_spec.rb9
-rw-r--r--puppet/modules/couchdb/spec/spec_helper.rb9
-rw-r--r--puppet/modules/couchdb/templates/admin.ini.erb9
-rw-r--r--puppet/modules/couchdb/templates/bigcouch/default.ini172
-rw-r--r--puppet/modules/couchdb/templates/bigcouch/vm.args32
-rw-r--r--puppet/modules/couchdb/templates/couchdb-backup.py.erb32
-rw-r--r--puppet/modules/git/files/config/CentOS/git-daemon26
-rw-r--r--puppet/modules/git/files/config/CentOS/git-daemon.vhosts27
-rw-r--r--puppet/modules/git/files/config/Debian/git-daemon22
-rw-r--r--puppet/modules/git/files/init.d/CentOS/git-daemon75
-rw-r--r--puppet/modules/git/files/init.d/Debian/git-daemon151
-rw-r--r--puppet/modules/git/files/web/gitweb.conf53
-rw-r--r--puppet/modules/git/files/xinetd.d/git16
-rw-r--r--puppet/modules/git/files/xinetd.d/git.disabled16
-rw-r--r--puppet/modules/git/files/xinetd.d/git.vhosts16
-rw-r--r--puppet/modules/git/manifests/base.pp7
-rw-r--r--puppet/modules/git/manifests/centos.pp2
-rw-r--r--puppet/modules/git/manifests/changes.pp33
-rw-r--r--puppet/modules/git/manifests/clone.pp60
-rw-r--r--puppet/modules/git/manifests/daemon.pp17
-rw-r--r--puppet/modules/git/manifests/daemon/base.pp31
-rw-r--r--puppet/modules/git/manifests/daemon/centos.pp19
-rw-r--r--puppet/modules/git/manifests/daemon/disable.pp33
-rw-r--r--puppet/modules/git/manifests/daemon/vhosts.pp10
-rw-r--r--puppet/modules/git/manifests/debian.pp6
-rw-r--r--puppet/modules/git/manifests/init.pp25
-rw-r--r--puppet/modules/git/manifests/svn.pp10
-rw-r--r--puppet/modules/git/manifests/web.pp20
-rw-r--r--puppet/modules/git/manifests/web/absent.pp17
-rw-r--r--puppet/modules/git/manifests/web/lighttpd.pp7
-rw-r--r--puppet/modules/git/manifests/web/repo.pp56
-rw-r--r--puppet/modules/git/manifests/web/repo/lighttpd.pp16
-rw-r--r--puppet/modules/git/templates/web/config31
-rw-r--r--puppet/modules/git/templates/web/lighttpd21
-rw-r--r--puppet/modules/haveged/manifests/init.pp16
-rw-r--r--puppet/modules/journald/manifests/init.pp7
-rw-r--r--puppet/modules/leap/manifests/cli/install.pp46
-rw-r--r--puppet/modules/leap/manifests/init.pp3
-rw-r--r--puppet/modules/leap/manifests/logfile.pp34
-rw-r--r--puppet/modules/leap/templates/rsyslog.erb5
-rw-r--r--puppet/modules/leap_mx/manifests/init.pp119
-rw-r--r--puppet/modules/leap_mx/templates/mx.conf.erb18
-rw-r--r--puppet/modules/lsb/manifests/base.pp3
-rw-r--r--puppet/modules/lsb/manifests/centos.pp5
-rw-r--r--puppet/modules/lsb/manifests/debian.pp6
-rw-r--r--puppet/modules/lsb/manifests/init.pp6
-rw-r--r--puppet/modules/ntp/.fixtures.yml5
-rw-r--r--puppet/modules/ntp/.gitignore3
-rw-r--r--puppet/modules/ntp/.nodeset.yml35
-rw-r--r--puppet/modules/ntp/.travis.yml40
-rw-r--r--puppet/modules/ntp/CHANGELOG61
-rw-r--r--puppet/modules/ntp/CONTRIBUTING.md9
-rw-r--r--puppet/modules/ntp/Gemfile19
-rw-r--r--puppet/modules/ntp/LICENSE202
-rw-r--r--puppet/modules/ntp/Modulefile11
-rw-r--r--puppet/modules/ntp/README.markdown215
-rw-r--r--puppet/modules/ntp/Rakefile2
-rw-r--r--puppet/modules/ntp/manifests/config.pp23
-rw-r--r--puppet/modules/ntp/manifests/init.pp58
-rw-r--r--puppet/modules/ntp/manifests/install.pp9
-rw-r--r--puppet/modules/ntp/manifests/params.pp116
-rw-r--r--puppet/modules/ntp/manifests/service.pp18
-rw-r--r--puppet/modules/ntp/spec/classes/ntp_spec.rb261
-rw-r--r--puppet/modules/ntp/spec/fixtures/modules/my_ntp/templates/ntp.conf.erb4
-rw-r--r--puppet/modules/ntp/spec/spec.opts6
-rw-r--r--puppet/modules/ntp/spec/spec_helper.rb1
-rw-r--r--puppet/modules/ntp/spec/spec_helper_system.rb26
-rw-r--r--puppet/modules/ntp/spec/system/basic_spec.rb13
-rw-r--r--puppet/modules/ntp/spec/system/class_spec.rb39
-rw-r--r--puppet/modules/ntp/spec/system/ntp_config_spec.rb35
-rw-r--r--puppet/modules/ntp/spec/system/ntp_install_spec.rb31
-rw-r--r--puppet/modules/ntp/spec/system/ntp_service_spec.rb25
-rw-r--r--puppet/modules/ntp/spec/system/preferred_servers_spec.rb20
-rw-r--r--puppet/modules/ntp/spec/system/restrict_spec.rb20
-rw-r--r--puppet/modules/ntp/spec/unit/puppet/provider/README.markdown4
-rw-r--r--puppet/modules/ntp/spec/unit/puppet/type/README.markdown4
-rw-r--r--puppet/modules/ntp/templates/ntp.conf.erb43
-rw-r--r--puppet/modules/ntp/tests/init.pp11
-rwxr-xr-xpuppet/modules/obfsproxy/files/obfsproxy_init93
-rw-r--r--puppet/modules/obfsproxy/files/obfsproxy_logrotate14
-rw-r--r--puppet/modules/obfsproxy/manifests/init.pp86
-rw-r--r--puppet/modules/obfsproxy/templates/etc_conf.erb11
-rw-r--r--puppet/modules/opendkim/manifests/init.pp67
-rw-r--r--puppet/modules/opendkim/templates/opendkim.conf45
-rw-r--r--puppet/modules/openvpn/.fixtures.yml6
-rw-r--r--puppet/modules/openvpn/.gitignore3
-rw-r--r--puppet/modules/openvpn/.rvmrc38
-rw-r--r--puppet/modules/openvpn/.travis.yml29
-rw-r--r--puppet/modules/openvpn/Gemfile7
-rw-r--r--puppet/modules/openvpn/Gemfile.lock36
-rw-r--r--puppet/modules/openvpn/LICENSE177
-rw-r--r--puppet/modules/openvpn/Modulefile11
-rw-r--r--puppet/modules/openvpn/Rakefile2
-rw-r--r--puppet/modules/openvpn/Readme.markdown54
-rw-r--r--puppet/modules/openvpn/Vagrantfile42
-rw-r--r--puppet/modules/openvpn/manifests/client.pp187
-rw-r--r--puppet/modules/openvpn/manifests/client_specific_config.pp79
-rw-r--r--puppet/modules/openvpn/manifests/config.pp52
-rw-r--r--puppet/modules/openvpn/manifests/init.pp43
-rw-r--r--puppet/modules/openvpn/manifests/install.pp46
-rw-r--r--puppet/modules/openvpn/manifests/params.pp37
-rw-r--r--puppet/modules/openvpn/manifests/server.pp233
-rw-r--r--puppet/modules/openvpn/manifests/service.pp36
-rw-r--r--puppet/modules/openvpn/spec/classes/openvpn_config_spec.rb15
-rw-r--r--puppet/modules/openvpn/spec/classes/openvpn_init_spec.rb9
-rw-r--r--puppet/modules/openvpn/spec/classes/openvpn_install_spec.rb11
-rw-r--r--puppet/modules/openvpn/spec/classes/openvpn_service_spec.rb13
-rw-r--r--puppet/modules/openvpn/spec/defines/openvpn_client_spec.rb88
-rw-r--r--puppet/modules/openvpn/spec/defines/openvpn_client_specific_config_spec.rb40
-rw-r--r--puppet/modules/openvpn/spec/defines/openvpn_server_spec.rb165
-rw-r--r--puppet/modules/openvpn/spec/spec_helper.rb2
-rw-r--r--puppet/modules/openvpn/templates/client.erb26
-rw-r--r--puppet/modules/openvpn/templates/client_specific_config.erb10
-rw-r--r--puppet/modules/openvpn/templates/etc-default-openvpn.erb20
-rw-r--r--puppet/modules/openvpn/templates/server.erb37
-rw-r--r--puppet/modules/openvpn/templates/vars.erb68
-rw-r--r--puppet/modules/openvpn/vagrant/client.pp5
-rw-r--r--puppet/modules/openvpn/vagrant/server.pp23
-rw-r--r--puppet/modules/passenger/README (renamed from README)0
-rw-r--r--puppet/modules/passenger/files/mod_passenger.conf0
-rwxr-xr-xpuppet/modules/passenger/files/munin/passenger_memory_stats (renamed from files/munin/passenger_memory_stats)0
-rwxr-xr-xpuppet/modules/passenger/files/munin/passenger_stats (renamed from files/munin/passenger_stats)0
-rw-r--r--puppet/modules/passenger/manifests/apache.pp (renamed from manifests/apache.pp)0
-rw-r--r--puppet/modules/passenger/manifests/apache/base.pp (renamed from manifests/apache/base.pp)0
-rw-r--r--puppet/modules/passenger/manifests/apache/centos.pp (renamed from manifests/apache/centos.pp)0
-rw-r--r--puppet/modules/passenger/manifests/apache/debian.pp (renamed from manifests/apache/debian.pp)0
-rw-r--r--puppet/modules/passenger/manifests/init.pp (renamed from manifests/init.pp)0
-rw-r--r--puppet/modules/passenger/manifests/munin.pp (renamed from manifests/munin.pp)0
-rw-r--r--puppet/modules/postfwd/files/postfwd_default19
-rw-r--r--puppet/modules/postfwd/manifests/init.pp43
-rw-r--r--puppet/modules/postfwd/templates/postfwd.cf.erb28
-rw-r--r--puppet/modules/resolvconf/manifests/init.pp27
-rw-r--r--puppet/modules/resolvconf/templates/resolvconf.OpenBSD.erb5
-rw-r--r--puppet/modules/resolvconf/templates/resolvconf.erb7
-rw-r--r--puppet/modules/ruby/manifests/devel.pp5
-rw-r--r--puppet/modules/ruby/manifests/init.pp72
-rw-r--r--puppet/modules/ruby/manifests/mysql.pp7
-rw-r--r--puppet/modules/ruby/manifests/postgres.pp6
-rw-r--r--puppet/modules/ruby/manifests/shadow.pp6
-rw-r--r--puppet/modules/ruby/manifests/shadow/base.pp6
-rw-r--r--puppet/modules/ruby/manifests/shadow/debian.pp8
-rw-r--r--puppet/modules/rubygems/files/gemrc3
-rw-r--r--puppet/modules/rubygems/manifests/activerecord.pp7
-rw-r--r--puppet/modules/rubygems/manifests/activesupport.pp7
-rw-r--r--puppet/modules/rubygems/manifests/backports.pp7
-rw-r--r--puppet/modules/rubygems/manifests/bcrypt.pp14
-rw-r--r--puppet/modules/rubygems/manifests/brokengem.pp14
-rw-r--r--puppet/modules/rubygems/manifests/camping.pp7
-rw-r--r--puppet/modules/rubygems/manifests/captcha/v_0_1_2.pp5
-rw-r--r--puppet/modules/rubygems/manifests/chronic_duration.pp5
-rw-r--r--puppet/modules/rubygems/manifests/devel.pp6
-rw-r--r--puppet/modules/rubygems/manifests/fastercsv.pp6
-rw-r--r--puppet/modules/rubygems/manifests/gd/v_0_7_4.pp5
-rw-r--r--puppet/modules/rubygems/manifests/gem.pp108
-rw-r--r--puppet/modules/rubygems/manifests/gem/cachedir.pp4
-rw-r--r--puppet/modules/rubygems/manifests/gpgme.pp35
-rw-r--r--puppet/modules/rubygems/manifests/hiera.pp7
-rw-r--r--puppet/modules/rubygems/manifests/hiera_puppet.pp7
-rw-r--r--puppet/modules/rubygems/manifests/highline.pp14
-rw-r--r--puppet/modules/rubygems/manifests/init.pp31
-rw-r--r--puppet/modules/rubygems/manifests/ip.pp7
-rw-r--r--puppet/modules/rubygems/manifests/json/v_1_4_6.pp3
-rw-r--r--puppet/modules/rubygems/manifests/lockfile.pp7
-rw-r--r--puppet/modules/rubygems/manifests/mail.pp19
-rw-r--r--puppet/modules/rubygems/manifests/maildir.pp15
-rw-r--r--puppet/modules/rubygems/manifests/markaby.pp7
-rw-r--r--puppet/modules/rubygems/manifests/moneta.pp7
-rw-r--r--puppet/modules/rubygems/manifests/mysql.pp5
-rw-r--r--puppet/modules/rubygems/manifests/net_ldap/v_0_0_4.pp3
-rw-r--r--puppet/modules/rubygems/manifests/ntlm/v_0_1_1.pp3
-rw-r--r--puppet/modules/rubygems/manifests/open4.pp7
-rw-r--r--puppet/modules/rubygems/manifests/pbkdf2.pp8
-rw-r--r--puppet/modules/rubygems/manifests/postgres.pp11
-rw-r--r--puppet/modules/rubygems/manifests/rack.pp7
-rw-r--r--puppet/modules/rubygems/manifests/sinatra.pp7
-rw-r--r--puppet/modules/rubygems/manifests/sqlite.pp6
-rw-r--r--puppet/modules/rubygems/manifests/systemu.pp7
-rw-r--r--puppet/modules/rubygems/manifests/thin.pp7
-rw-r--r--puppet/modules/rubygems/manifests/tlsmail.pp7
-rw-r--r--puppet/modules/rubygems/manifests/tmail.pp7
-rw-r--r--puppet/modules/rubygems/manifests/xmlsimple.pp20
-rw-r--r--puppet/modules/rubygems/manifests/xmpp4r.pp7
-rw-r--r--puppet/modules/rubygems/manifests/ya2yaml.pp7
-rw-r--r--puppet/modules/shorewall/LICENSE674
-rw-r--r--puppet/modules/shorewall/README219
-rw-r--r--puppet/modules/shorewall/files/boilerplate/blacklist.footer1
-rw-r--r--puppet/modules/shorewall/files/boilerplate/blacklist.header10
-rw-r--r--puppet/modules/shorewall/files/boilerplate/clear.footer1
-rw-r--r--puppet/modules/shorewall/files/boilerplate/clear.header13
-rw-r--r--puppet/modules/shorewall/files/boilerplate/continue.footer1
-rw-r--r--puppet/modules/shorewall/files/boilerplate/continue.header14
-rw-r--r--puppet/modules/shorewall/files/boilerplate/hosts.footer1
-rw-r--r--puppet/modules/shorewall/files/boilerplate/hosts.header9
-rw-r--r--puppet/modules/shorewall/files/boilerplate/init.footer1
-rw-r--r--puppet/modules/shorewall/files/boilerplate/init.header13
-rw-r--r--puppet/modules/shorewall/files/boilerplate/initdone.footer1
-rw-r--r--puppet/modules/shorewall/files/boilerplate/initdone.header14
-rw-r--r--puppet/modules/shorewall/files/boilerplate/interfaces.footer1
-rw-r--r--puppet/modules/shorewall/files/boilerplate/interfaces.header10
-rw-r--r--puppet/modules/shorewall/files/boilerplate/maclog.footer1
-rw-r--r--puppet/modules/shorewall/files/boilerplate/maclog.header14
-rw-r--r--puppet/modules/shorewall/files/boilerplate/mangle.footer1
-rw-r--r--puppet/modules/shorewall/files/boilerplate/mangle.header7
-rw-r--r--puppet/modules/shorewall/files/boilerplate/masq.footer1
-rw-r--r--puppet/modules/shorewall/files/boilerplate/masq.header9
-rw-r--r--puppet/modules/shorewall/files/boilerplate/nat.footer1
-rw-r--r--puppet/modules/shorewall/files/boilerplate/nat.header9
-rw-r--r--puppet/modules/shorewall/files/boilerplate/params.footer1
-rw-r--r--puppet/modules/shorewall/files/boilerplate/params.header26
-rw-r--r--puppet/modules/shorewall/files/boilerplate/policy.footer1
-rw-r--r--puppet/modules/shorewall/files/boilerplate/policy.header9
-rw-r--r--puppet/modules/shorewall/files/boilerplate/providers.footer1
-rw-r--r--puppet/modules/shorewall/files/boilerplate/providers.header9
-rw-r--r--puppet/modules/shorewall/files/boilerplate/proxyarp.footer1
-rw-r--r--puppet/modules/shorewall/files/boilerplate/proxyarp.header9
-rw-r--r--puppet/modules/shorewall/files/boilerplate/rfc1918.footer5
-rw-r--r--puppet/modules/shorewall/files/boilerplate/rfc1918.header5
-rw-r--r--puppet/modules/shorewall/files/boilerplate/routestopped.footer1
-rw-r--r--puppet/modules/shorewall/files/boilerplate/routestopped.header11
-rw-r--r--puppet/modules/shorewall/files/boilerplate/rtrules.footer1
-rw-r--r--puppet/modules/shorewall/files/boilerplate/rtrules.header8
-rw-r--r--puppet/modules/shorewall/files/boilerplate/rules.footer1
-rw-r--r--puppet/modules/shorewall/files/boilerplate/rules.header10
-rw-r--r--puppet/modules/shorewall/files/boilerplate/start.footer1
-rw-r--r--puppet/modules/shorewall/files/boilerplate/start.header12
-rw-r--r--puppet/modules/shorewall/files/boilerplate/started.footer1
-rw-r--r--puppet/modules/shorewall/files/boilerplate/started.header20
-rw-r--r--puppet/modules/shorewall/files/boilerplate/stop.footer1
-rw-r--r--puppet/modules/shorewall/files/boilerplate/stop.header13
-rw-r--r--puppet/modules/shorewall/files/boilerplate/stopped.footer1
-rw-r--r--puppet/modules/shorewall/files/boilerplate/stopped.header13
-rw-r--r--puppet/modules/shorewall/files/boilerplate/tcclasses.footer1
-rw-r--r--puppet/modules/shorewall/files/boilerplate/tcclasses.header9
-rw-r--r--puppet/modules/shorewall/files/boilerplate/tcdevices.footer1
-rw-r--r--puppet/modules/shorewall/files/boilerplate/tcdevices.header10
-rw-r--r--puppet/modules/shorewall/files/boilerplate/tcrules.footer1
-rw-r--r--puppet/modules/shorewall/files/boilerplate/tcrules.header15
-rw-r--r--puppet/modules/shorewall/files/boilerplate/tunnel.footer1
-rw-r--r--puppet/modules/shorewall/files/boilerplate/tunnel.header11
-rw-r--r--puppet/modules/shorewall/files/boilerplate/zones.footer1
-rw-r--r--puppet/modules/shorewall/files/boilerplate/zones.header11
-rw-r--r--puppet/modules/shorewall/files/empty/.ignore1
-rw-r--r--puppet/modules/shorewall/manifests/base.pp48
-rw-r--r--puppet/modules/shorewall/manifests/blacklist.pp9
-rw-r--r--puppet/modules/shorewall/manifests/centos.pp13
-rw-r--r--puppet/modules/shorewall/manifests/debian.pp11
-rw-r--r--puppet/modules/shorewall/manifests/entry.pp12
-rw-r--r--puppet/modules/shorewall/manifests/extension_script.pp14
-rw-r--r--puppet/modules/shorewall/manifests/gentoo.pp5
-rw-r--r--puppet/modules/shorewall/manifests/host.pp10
-rw-r--r--puppet/modules/shorewall/manifests/init.pp75
-rw-r--r--puppet/modules/shorewall/manifests/interface.pp29
-rw-r--r--puppet/modules/shorewall/manifests/managed_file.pp17
-rw-r--r--puppet/modules/shorewall/manifests/mangle.pp19
-rw-r--r--puppet/modules/shorewall/manifests/masq.pp17
-rw-r--r--puppet/modules/shorewall/manifests/nat.pp11
-rw-r--r--puppet/modules/shorewall/manifests/params.pp5
-rw-r--r--puppet/modules/shorewall/manifests/policy.pp12
-rw-r--r--puppet/modules/shorewall/manifests/providers.pp16
-rw-r--r--puppet/modules/shorewall/manifests/proxyarp.pp11
-rw-r--r--puppet/modules/shorewall/manifests/rfc1918.pp8
-rw-r--r--puppet/modules/shorewall/manifests/routestopped.pp14
-rw-r--r--puppet/modules/shorewall/manifests/rtrules.pp11
-rw-r--r--puppet/modules/shorewall/manifests/rule.pp20
-rw-r--r--puppet/modules/shorewall/manifests/rule_section.pp7
-rw-r--r--puppet/modules/shorewall/manifests/rules/cobbler.pp19
-rw-r--r--puppet/modules/shorewall/manifests/rules/dns.pp18
-rw-r--r--puppet/modules/shorewall/manifests/rules/dns/disable.pp5
-rw-r--r--puppet/modules/shorewall/manifests/rules/ekeyd.pp10
-rw-r--r--puppet/modules/shorewall/manifests/rules/ftp.pp10
-rw-r--r--puppet/modules/shorewall/manifests/rules/gitdaemon.pp10
-rw-r--r--puppet/modules/shorewall/manifests/rules/gitdaemon/absent.pp5
-rw-r--r--puppet/modules/shorewall/manifests/rules/http.pp10
-rw-r--r--puppet/modules/shorewall/manifests/rules/http/disable.pp5
-rw-r--r--puppet/modules/shorewall/manifests/rules/https.pp10
-rw-r--r--puppet/modules/shorewall/manifests/rules/identd.pp10
-rw-r--r--puppet/modules/shorewall/manifests/rules/imap.pp11
-rw-r--r--puppet/modules/shorewall/manifests/rules/ipsec.pp32
-rw-r--r--puppet/modules/shorewall/manifests/rules/ipsec_nat.pp18
-rw-r--r--puppet/modules/shorewall/manifests/rules/jabberserver.pp19
-rw-r--r--puppet/modules/shorewall/manifests/rules/jetty.pp12
-rw-r--r--puppet/modules/shorewall/manifests/rules/jetty/http.pp9
-rw-r--r--puppet/modules/shorewall/manifests/rules/jetty/ssl.pp11
-rw-r--r--puppet/modules/shorewall/manifests/rules/keyserver.pp11
-rw-r--r--puppet/modules/shorewall/manifests/rules/libvirt/host.pp79
-rw-r--r--puppet/modules/shorewall/manifests/rules/managesieve.pp11
-rw-r--r--puppet/modules/shorewall/manifests/rules/mdns.pp8
-rw-r--r--puppet/modules/shorewall/manifests/rules/munin.pp16
-rw-r--r--puppet/modules/shorewall/manifests/rules/mysql.pp11
-rw-r--r--puppet/modules/shorewall/manifests/rules/nfsd.pp115
-rw-r--r--puppet/modules/shorewall/manifests/rules/ntp/client.pp11
-rw-r--r--puppet/modules/shorewall/manifests/rules/ntp/server.pp10
-rw-r--r--puppet/modules/shorewall/manifests/rules/openfire.pp12
-rw-r--r--puppet/modules/shorewall/manifests/rules/out/ekeyd.pp10
-rw-r--r--puppet/modules/shorewall/manifests/rules/out/git.pp10
-rw-r--r--puppet/modules/shorewall/manifests/rules/out/ibackup.pp12
-rw-r--r--puppet/modules/shorewall/manifests/rules/out/imap.pp11
-rw-r--r--puppet/modules/shorewall/manifests/rules/out/irc.pp10
-rw-r--r--puppet/modules/shorewall/manifests/rules/out/ircs.pp10
-rw-r--r--puppet/modules/shorewall/manifests/rules/out/keyserver.pp11
-rw-r--r--puppet/modules/shorewall/manifests/rules/out/managesieve.pp11
-rw-r--r--puppet/modules/shorewall/manifests/rules/out/munin.pp10
-rw-r--r--puppet/modules/shorewall/manifests/rules/out/mysql.pp11
-rw-r--r--puppet/modules/shorewall/manifests/rules/out/pop3.pp11
-rw-r--r--puppet/modules/shorewall/manifests/rules/out/postgres.pp11
-rw-r--r--puppet/modules/shorewall/manifests/rules/out/puppet.pp20
-rw-r--r--puppet/modules/shorewall/manifests/rules/out/silc.pp19
-rw-r--r--puppet/modules/shorewall/manifests/rules/out/smtp.pp11
-rw-r--r--puppet/modules/shorewall/manifests/rules/out/ssh.pp10
-rw-r--r--puppet/modules/shorewall/manifests/rules/out/ssh/disable.pp5
-rw-r--r--puppet/modules/shorewall/manifests/rules/out/ssh/remove.pp5
-rw-r--r--puppet/modules/shorewall/manifests/rules/out/whois.pp11
-rw-r--r--puppet/modules/shorewall/manifests/rules/out/xmpp.pp10
-rw-r--r--puppet/modules/shorewall/manifests/rules/pop3.pp11
-rw-r--r--puppet/modules/shorewall/manifests/rules/postgres.pp10
-rw-r--r--puppet/modules/shorewall/manifests/rules/puppet.pp11
-rw-r--r--puppet/modules/shorewall/manifests/rules/puppet/master.pp10
-rw-r--r--puppet/modules/shorewall/manifests/rules/rsync.pp10
-rw-r--r--puppet/modules/shorewall/manifests/rules/silcd.pp19
-rw-r--r--puppet/modules/shorewall/manifests/rules/smtp.pp10
-rw-r--r--puppet/modules/shorewall/manifests/rules/smtp/disable.pp5
-rw-r--r--puppet/modules/shorewall/manifests/rules/smtp_submission.pp10
-rw-r--r--puppet/modules/shorewall/manifests/rules/smtp_submission/disable.pp5
-rw-r--r--puppet/modules/shorewall/manifests/rules/smtps.pp10
-rw-r--r--puppet/modules/shorewall/manifests/rules/smtps/disable.pp5
-rw-r--r--puppet/modules/shorewall/manifests/rules/sobby/instance.pp11
-rw-r--r--puppet/modules/shorewall/manifests/rules/ssh.pp13
-rw-r--r--puppet/modules/shorewall/manifests/rules/syslog.pp12
-rw-r--r--puppet/modules/shorewall/manifests/rules/tftp.pp18
-rw-r--r--puppet/modules/shorewall/manifests/rules/tinc.pp34
-rw-r--r--puppet/modules/shorewall/manifests/rules/tomcat.pp12
-rw-r--r--puppet/modules/shorewall/manifests/rules/torify.pp29
-rw-r--r--puppet/modules/shorewall/manifests/rules/torify/allow_tor_transparent_proxy.pp21
-rw-r--r--puppet/modules/shorewall/manifests/rules/torify/allow_tor_user.pp15
-rw-r--r--puppet/modules/shorewall/manifests/rules/torify/redirect_tcp_to_tor.pp40
-rw-r--r--puppet/modules/shorewall/manifests/rules/torify/reject_non_tor.pp32
-rw-r--r--puppet/modules/shorewall/manifests/rules/torify/user.pp27
-rw-r--r--puppet/modules/shorewall/manifests/tcclasses.pp12
-rw-r--r--puppet/modules/shorewall/manifests/tcdevices.pp11
-rw-r--r--puppet/modules/shorewall/manifests/tcrules.pp12
-rw-r--r--puppet/modules/shorewall/manifests/tunnel.pp11
-rw-r--r--puppet/modules/shorewall/manifests/ubuntu/karmic.pp5
-rw-r--r--puppet/modules/shorewall/manifests/zone.pp14
-rw-r--r--puppet/modules/shorewall/templates/debian_default.erb26
-rw-r--r--puppet/modules/site_apache/files/conf.d/security55
-rw-r--r--puppet/modules/site_apache/files/include.d/ssl_common.inc7
-rw-r--r--puppet/modules/site_apache/manifests/common.pp30
-rw-r--r--puppet/modules/site_apache/manifests/common/tls.pp6
-rw-r--r--puppet/modules/site_apache/templates/vhosts.d/api.conf.erb48
-rw-r--r--puppet/modules/site_apache/templates/vhosts.d/common.conf.erb76
-rw-r--r--puppet/modules/site_apache/templates/vhosts.d/hidden_service.conf.erb55
-rw-r--r--puppet/modules/site_apt/files/Debian/51unattended-upgrades-leap6
-rw-r--r--puppet/modules/site_apt/files/keys/leap-archive.gpgbin0 -> 20188 bytes
-rw-r--r--puppet/modules/site_apt/files/keys/leap-experimental-archive.gpgbin0 -> 3423 bytes
-rw-r--r--puppet/modules/site_apt/manifests/dist_upgrade.pp17
-rw-r--r--puppet/modules/site_apt/manifests/init.pp55
-rw-r--r--puppet/modules/site_apt/manifests/leap_repo.pp16
-rw-r--r--puppet/modules/site_apt/manifests/preferences/check_mk.pp9
-rw-r--r--puppet/modules/site_apt/manifests/preferences/passenger.pp14
-rw-r--r--puppet/modules/site_apt/manifests/preferences/rsyslog.pp13
-rw-r--r--puppet/modules/site_apt/manifests/unattended_upgrades.pp20
-rw-r--r--puppet/modules/site_apt/templates/jessie/postfix.seeds1
-rw-r--r--puppet/modules/site_apt/templates/preferences.include_squeeze25
-rw-r--r--puppet/modules/site_apt/templates/secondary.list3
-rw-r--r--puppet/modules/site_apt/templates/wheezy/postfix.seeds1
-rw-r--r--puppet/modules/site_check_mk/files/agent/local_checks/all_hosts/run_node_tests.sh5
-rwxr-xr-xpuppet/modules/site_check_mk/files/agent/local_checks/couchdb/leap_couch_stats.sh122
-rwxr-xr-xpuppet/modules/site_check_mk/files/agent/local_checks/mx/check_leap_mx.sh33
-rw-r--r--puppet/modules/site_check_mk/files/agent/logwatch/bigcouch.cfg28
-rw-r--r--puppet/modules/site_check_mk/files/agent/logwatch/leap_mx.cfg4
-rw-r--r--puppet/modules/site_check_mk/files/agent/logwatch/logwatch.cfg31
-rw-r--r--puppet/modules/site_check_mk/files/agent/logwatch/openvpn.cfg19
-rw-r--r--puppet/modules/site_check_mk/files/agent/logwatch/soledad.cfg6
-rw-r--r--puppet/modules/site_check_mk/files/agent/logwatch/stunnel.cfg10
-rw-r--r--puppet/modules/site_check_mk/files/agent/logwatch/syslog/bigcouch.cfg5
-rw-r--r--puppet/modules/site_check_mk/files/agent/logwatch/syslog/couchdb.cfg2
-rw-r--r--puppet/modules/site_check_mk/files/agent/logwatch/syslog_header.cfg1
-rw-r--r--puppet/modules/site_check_mk/files/agent/logwatch/syslog_tail.cfg21
-rw-r--r--puppet/modules/site_check_mk/files/agent/logwatch/webapp.cfg8
-rwxr-xr-xpuppet/modules/site_check_mk/files/agent/nagios_plugins/check_unix_open_fds.pl322
-rwxr-xr-xpuppet/modules/site_check_mk/files/agent/plugins/mk_logwatch.1.2.4374
-rw-r--r--puppet/modules/site_check_mk/files/extra_service_conf.mk14
-rw-r--r--puppet/modules/site_check_mk/files/ignored_services.mk3
-rw-r--r--puppet/modules/site_check_mk/manifests/agent.pp35
-rw-r--r--puppet/modules/site_check_mk/manifests/agent/couchdb.pp34
-rw-r--r--puppet/modules/site_check_mk/manifests/agent/couchdb/bigcouch.pp49
-rw-r--r--puppet/modules/site_check_mk/manifests/agent/couchdb/plain.pp23
-rw-r--r--puppet/modules/site_check_mk/manifests/agent/haproxy.pp15
-rw-r--r--puppet/modules/site_check_mk/manifests/agent/haveged.pp15
-rw-r--r--puppet/modules/site_check_mk/manifests/agent/logwatch.pp36
-rw-r--r--puppet/modules/site_check_mk/manifests/agent/logwatch/syslog.pp18
-rw-r--r--puppet/modules/site_check_mk/manifests/agent/mrpe.pp24
-rw-r--r--puppet/modules/site_check_mk/manifests/agent/mx.pp27
-rw-r--r--puppet/modules/site_check_mk/manifests/agent/openvpn.pp10
-rw-r--r--puppet/modules/site_check_mk/manifests/agent/package/nagios_plugins_contrib.pp5
-rw-r--r--puppet/modules/site_check_mk/manifests/agent/package/perl_plugin.pp5
-rw-r--r--puppet/modules/site_check_mk/manifests/agent/soledad.pp17
-rw-r--r--puppet/modules/site_check_mk/manifests/agent/stunnel.pp9
-rw-r--r--puppet/modules/site_check_mk/manifests/agent/webapp.pp15
-rw-r--r--puppet/modules/site_check_mk/manifests/server.pp103
-rw-r--r--puppet/modules/site_check_mk/templates/extra_host_conf.mk13
-rw-r--r--puppet/modules/site_check_mk/templates/host_contactgroups.mk17
-rw-r--r--puppet/modules/site_check_mk/templates/hostgroups.mk17
-rw-r--r--puppet/modules/site_check_mk/templates/use_ssh.mk6
-rw-r--r--puppet/modules/site_config/files/xterm-title.sh8
-rw-r--r--puppet/modules/site_config/lib/facter/dhcp_enabled.rb22
-rw-r--r--puppet/modules/site_config/lib/facter/ip_interface.rb13
-rw-r--r--puppet/modules/site_config/manifests/caching_resolver.pp27
-rw-r--r--puppet/modules/site_config/manifests/default.pp71
-rw-r--r--puppet/modules/site_config/manifests/dhclient.pp40
-rw-r--r--puppet/modules/site_config/manifests/files.pp24
-rw-r--r--puppet/modules/site_config/manifests/hosts.pp44
-rw-r--r--puppet/modules/site_config/manifests/initial_firewall.pp64
-rw-r--r--puppet/modules/site_config/manifests/packages.pp32
-rw-r--r--puppet/modules/site_config/manifests/packages/build_essential.pp28
-rw-r--r--puppet/modules/site_config/manifests/packages/gnutls.pp5
-rw-r--r--puppet/modules/site_config/manifests/params.pp35
-rw-r--r--puppet/modules/site_config/manifests/remove.pp11
-rw-r--r--puppet/modules/site_config/manifests/remove/bigcouch.pp42
-rw-r--r--puppet/modules/site_config/manifests/remove/files.pp56
-rw-r--r--puppet/modules/site_config/manifests/remove/jessie.pp14
-rw-r--r--puppet/modules/site_config/manifests/remove/monitoring.pp13
-rw-r--r--puppet/modules/site_config/manifests/remove/tapicero.pp72
-rw-r--r--puppet/modules/site_config/manifests/remove/webapp.pp7
-rw-r--r--puppet/modules/site_config/manifests/resolvconf.pp14
-rw-r--r--puppet/modules/site_config/manifests/ruby.pp8
-rw-r--r--puppet/modules/site_config/manifests/ruby/dev.pp8
-rw-r--r--puppet/modules/site_config/manifests/setup.pp50
-rw-r--r--puppet/modules/site_config/manifests/shell.pp22
-rw-r--r--puppet/modules/site_config/manifests/slow.pp10
-rw-r--r--puppet/modules/site_config/manifests/sysctl.pp8
-rw-r--r--puppet/modules/site_config/manifests/syslog.pp62
-rw-r--r--puppet/modules/site_config/manifests/vagrant.pp11
-rw-r--r--puppet/modules/site_config/manifests/x509/ca.pp11
-rw-r--r--puppet/modules/site_config/manifests/x509/ca_bundle.pp17
-rw-r--r--puppet/modules/site_config/manifests/x509/cert.pp12
-rw-r--r--puppet/modules/site_config/manifests/x509/client_ca/ca.pp16
-rw-r--r--puppet/modules/site_config/manifests/x509/client_ca/key.pp16
-rw-r--r--puppet/modules/site_config/manifests/x509/commercial/ca.pp11
-rw-r--r--puppet/modules/site_config/manifests/x509/commercial/cert.pp15
-rw-r--r--puppet/modules/site_config/manifests/x509/commercial/key.pp11
-rw-r--r--puppet/modules/site_config/manifests/x509/key.pp11
-rw-r--r--puppet/modules/site_config/templates/hosts19
-rw-r--r--puppet/modules/site_config/templates/ipv4firewall_up.rules.erb14
-rw-r--r--puppet/modules/site_config/templates/ipv6firewall_up.rules.erb8
-rw-r--r--puppet/modules/site_config/templates/reload_dhclient.erb13
-rw-r--r--puppet/modules/site_couchdb/files/couchdb_scripts_defaults.conf4
-rw-r--r--puppet/modules/site_couchdb/files/designs/Readme.md14
-rw-r--r--puppet/modules/site_couchdb/files/designs/customers/Customer.json18
-rw-r--r--puppet/modules/site_couchdb/files/designs/identities/Identity.json34
-rw-r--r--puppet/modules/site_couchdb/files/designs/invite_codes/InviteCode.json22
-rw-r--r--puppet/modules/site_couchdb/files/designs/messages/Message.json18
-rw-r--r--puppet/modules/site_couchdb/files/designs/sessions/Session.json8
-rw-r--r--puppet/modules/site_couchdb/files/designs/shared/docs.json8
-rw-r--r--puppet/modules/site_couchdb/files/designs/shared/syncs.json11
-rw-r--r--puppet/modules/site_couchdb/files/designs/shared/transactions.json13
-rw-r--r--puppet/modules/site_couchdb/files/designs/tickets/Ticket.json50
-rw-r--r--puppet/modules/site_couchdb/files/designs/tokens/Token.json14
-rw-r--r--puppet/modules/site_couchdb/files/designs/users/User.json22
-rwxr-xr-xpuppet/modules/site_couchdb/files/leap_ca_daemon157
-rw-r--r--puppet/modules/site_couchdb/files/local.ini8
-rw-r--r--puppet/modules/site_couchdb/files/runit_config6
-rw-r--r--puppet/modules/site_couchdb/lib/puppet/parser/functions/rotated_db_name.rb24
-rw-r--r--puppet/modules/site_couchdb/manifests/add_users.pp57
-rw-r--r--puppet/modules/site_couchdb/manifests/backup.pp23
-rw-r--r--puppet/modules/site_couchdb/manifests/bigcouch.pp50
-rw-r--r--puppet/modules/site_couchdb/manifests/bigcouch/add_nodes.pp8
-rw-r--r--puppet/modules/site_couchdb/manifests/bigcouch/compaction.pp8
-rw-r--r--puppet/modules/site_couchdb/manifests/bigcouch/settle_cluster.pp11
-rw-r--r--puppet/modules/site_couchdb/manifests/create_dbs.pp102
-rw-r--r--puppet/modules/site_couchdb/manifests/designs.pp46
-rw-r--r--puppet/modules/site_couchdb/manifests/init.pp81
-rw-r--r--puppet/modules/site_couchdb/manifests/logrotate.pp14
-rw-r--r--puppet/modules/site_couchdb/manifests/mirror.pp78
-rw-r--r--puppet/modules/site_couchdb/manifests/plain.pp14
-rw-r--r--puppet/modules/site_couchdb/manifests/setup.pp61
-rw-r--r--puppet/modules/site_couchdb/manifests/upload_design.pp14
-rw-r--r--puppet/modules/site_haproxy/files/haproxy-stats.cfg6
-rw-r--r--puppet/modules/site_haproxy/manifests/init.pp41
-rw-r--r--puppet/modules/site_haproxy/templates/couch.erb32
-rw-r--r--puppet/modules/site_haproxy/templates/haproxy.cfg.erb11
-rw-r--r--puppet/modules/site_mx/manifests/init.pp20
-rw-r--r--puppet/modules/site_nagios/files/configs/Debian/nagios.cfg1302
-rwxr-xr-xpuppet/modules/site_nagios/files/plugins/check_last_regex_in_log85
-rw-r--r--puppet/modules/site_nagios/manifests/add_host_services.pp32
-rw-r--r--puppet/modules/site_nagios/manifests/add_service.pp32
-rw-r--r--puppet/modules/site_nagios/manifests/init.pp13
-rw-r--r--puppet/modules/site_nagios/manifests/plugins.pp16
-rw-r--r--puppet/modules/site_nagios/manifests/server.pp97
-rw-r--r--puppet/modules/site_nagios/manifests/server/add_contacts.pp18
-rw-r--r--puppet/modules/site_nagios/manifests/server/apache.pp25
-rw-r--r--puppet/modules/site_nagios/manifests/server/contactgroup.pp8
-rw-r--r--puppet/modules/site_nagios/manifests/server/hostgroup.pp7
-rw-r--r--puppet/modules/site_nagios/manifests/server/icli.pp26
-rw-r--r--puppet/modules/site_nagios/templates/icli_aliases.erb7
-rw-r--r--puppet/modules/site_nickserver/manifests/init.pp178
-rw-r--r--puppet/modules/site_nickserver/templates/nickserver-proxy.conf.erb19
-rw-r--r--puppet/modules/site_nickserver/templates/nickserver.yml.erb19
-rw-r--r--puppet/modules/site_obfsproxy/README0
-rw-r--r--puppet/modules/site_obfsproxy/manifests/init.pp38
-rw-r--r--puppet/modules/site_openvpn/README20
-rw-r--r--puppet/modules/site_openvpn/manifests/dh_key.pp10
-rw-r--r--puppet/modules/site_openvpn/manifests/init.pp238
-rw-r--r--puppet/modules/site_openvpn/manifests/resolver.pp50
-rw-r--r--puppet/modules/site_openvpn/manifests/server_config.pp228
-rw-r--r--puppet/modules/site_openvpn/templates/add_gateway_ips.sh.erb11
-rw-r--r--puppet/modules/site_postfix/files/checks/received_anon2
-rw-r--r--puppet/modules/site_postfix/manifests/debug.pp9
-rw-r--r--puppet/modules/site_postfix/manifests/mx.pp152
-rw-r--r--puppet/modules/site_postfix/manifests/mx/checks.pp23
-rw-r--r--puppet/modules/site_postfix/manifests/mx/received_anon.pp13
-rw-r--r--puppet/modules/site_postfix/manifests/mx/rewrite_openpgp_header.pp11
-rw-r--r--puppet/modules/site_postfix/manifests/mx/smtp_auth.pp6
-rw-r--r--puppet/modules/site_postfix/manifests/mx/smtp_tls.pp43
-rw-r--r--puppet/modules/site_postfix/manifests/mx/smtpd_checks.pp36
-rw-r--r--puppet/modules/site_postfix/manifests/mx/smtpd_tls.pp69
-rw-r--r--puppet/modules/site_postfix/manifests/mx/static_aliases.pp88
-rw-r--r--puppet/modules/site_postfix/manifests/satellite.pp47
-rw-r--r--puppet/modules/site_postfix/templates/checks/helo_access.erb21
-rw-r--r--puppet/modules/site_postfix/templates/checks/rewrite_openpgp_headers.erb13
-rw-r--r--puppet/modules/site_postfix/templates/virtual-aliases.erb21
-rw-r--r--puppet/modules/site_rsyslog/templates/client.conf.erb134
-rw-r--r--puppet/modules/site_shorewall/files/Debian/shorewall.service23
-rw-r--r--puppet/modules/site_shorewall/manifests/defaults.pp86
-rw-r--r--puppet/modules/site_shorewall/manifests/dnat.pp19
-rw-r--r--puppet/modules/site_shorewall/manifests/dnat_rule.pp50
-rw-r--r--puppet/modules/site_shorewall/manifests/eip.pp92
-rw-r--r--puppet/modules/site_shorewall/manifests/ip_forward.pp10
-rw-r--r--puppet/modules/site_shorewall/manifests/monitor.pp8
-rw-r--r--puppet/modules/site_shorewall/manifests/mx.pp24
-rw-r--r--puppet/modules/site_shorewall/manifests/obfsproxy.pp25
-rw-r--r--puppet/modules/site_shorewall/manifests/service/http.pp13
-rw-r--r--puppet/modules/site_shorewall/manifests/service/https.pp12
-rw-r--r--puppet/modules/site_shorewall/manifests/service/smtp.pp13
-rw-r--r--puppet/modules/site_shorewall/manifests/service/webapp_api.pp23
-rw-r--r--puppet/modules/site_shorewall/manifests/soledad.pp23
-rw-r--r--puppet/modules/site_shorewall/manifests/sshd.pp31
-rw-r--r--puppet/modules/site_shorewall/manifests/stunnel/client.pp40
-rw-r--r--puppet/modules/site_shorewall/manifests/stunnel/server.pp22
-rw-r--r--puppet/modules/site_shorewall/manifests/tor.pp26
-rw-r--r--puppet/modules/site_shorewall/manifests/webapp.pp7
-rw-r--r--puppet/modules/site_squid_deb_proxy/manifests/client.pp5
-rw-r--r--puppet/modules/site_sshd/manifests/authorized_keys.pp34
-rw-r--r--puppet/modules/site_sshd/manifests/deploy_authorized_keys.pp9
-rw-r--r--puppet/modules/site_sshd/manifests/init.pp82
-rw-r--r--puppet/modules/site_sshd/manifests/mosh.pp21
-rw-r--r--puppet/modules/site_sshd/templates/authorized_keys.erb10
-rw-r--r--puppet/modules/site_sshd/templates/ssh_config.erb40
-rw-r--r--puppet/modules/site_sshd/templates/ssh_known_hosts.erb7
-rw-r--r--puppet/modules/site_static/README3
-rw-r--r--puppet/modules/site_static/manifests/domain.pp33
-rw-r--r--puppet/modules/site_static/manifests/init.pp72
-rw-r--r--puppet/modules/site_static/manifests/location.pp36
-rw-r--r--puppet/modules/site_static/templates/amber.erb13
-rw-r--r--puppet/modules/site_static/templates/apache.conf.erb88
-rw-r--r--puppet/modules/site_static/templates/rack.erb19
-rw-r--r--puppet/modules/site_stunnel/manifests/client.pp64
-rw-r--r--puppet/modules/site_stunnel/manifests/clients.pp23
-rw-r--r--puppet/modules/site_stunnel/manifests/init.pp48
-rw-r--r--puppet/modules/site_stunnel/manifests/override_service.pp18
-rw-r--r--puppet/modules/site_stunnel/manifests/servers.pp51
-rw-r--r--puppet/modules/site_tor/manifests/disable_exit.pp7
-rw-r--r--puppet/modules/site_tor/manifests/init.pp45
-rw-r--r--puppet/modules/site_webapp/files/server-status.conf26
-rw-r--r--puppet/modules/site_webapp/manifests/apache.pp28
-rw-r--r--puppet/modules/site_webapp/manifests/common_vhost.pp18
-rw-r--r--puppet/modules/site_webapp/manifests/couchdb.pp52
-rw-r--r--puppet/modules/site_webapp/manifests/cron.pp37
-rw-r--r--puppet/modules/site_webapp/manifests/hidden_service.pp52
-rw-r--r--puppet/modules/site_webapp/manifests/init.pp179
-rw-r--r--puppet/modules/site_webapp/templates/config.yml.erb36
-rw-r--r--puppet/modules/site_webapp/templates/couchdb.admin.yml.erb9
-rw-r--r--puppet/modules/site_webapp/templates/couchdb.yml.erb9
-rw-r--r--puppet/modules/soledad/manifests/client.pp16
-rw-r--r--puppet/modules/soledad/manifests/common.pp8
-rw-r--r--puppet/modules/soledad/manifests/server.pp104
-rw-r--r--puppet/modules/soledad/templates/default-soledad.erb5
-rw-r--r--puppet/modules/soledad/templates/soledad-server.conf.erb12
-rw-r--r--puppet/modules/sshd/.fixtures.yml3
-rw-r--r--puppet/modules/sshd/.gitignore4
-rw-r--r--puppet/modules/sshd/.rspec4
-rw-r--r--puppet/modules/sshd/.travis.yml27
-rw-r--r--puppet/modules/sshd/Gemfile14
-rw-r--r--puppet/modules/sshd/Gemfile.lock116
-rw-r--r--puppet/modules/sshd/LICENSE674
-rw-r--r--puppet/modules/sshd/Modulefile10
-rw-r--r--puppet/modules/sshd/Puppetfile3
-rw-r--r--puppet/modules/sshd/Puppetfile.lock8
-rw-r--r--puppet/modules/sshd/README.md247
-rw-r--r--puppet/modules/sshd/Rakefile16
-rw-r--r--puppet/modules/sshd/files/autossh.init.d164
-rw-r--r--puppet/modules/sshd/lib/facter/ssh_version.rb5
-rw-r--r--puppet/modules/sshd/lib/puppet/parser/functions/ssh_keygen.rb30
-rw-r--r--puppet/modules/sshd/manifests/autossh.pp40
-rw-r--r--puppet/modules/sshd/manifests/base.pp41
-rw-r--r--puppet/modules/sshd/manifests/client.pp22
-rw-r--r--puppet/modules/sshd/manifests/client/base.pp15
-rw-r--r--puppet/modules/sshd/manifests/client/debian.pp5
-rw-r--r--puppet/modules/sshd/manifests/client/linux.pp5
-rw-r--r--puppet/modules/sshd/manifests/debian.pp13
-rw-r--r--puppet/modules/sshd/manifests/gentoo.pp5
-rw-r--r--puppet/modules/sshd/manifests/init.pp92
-rw-r--r--puppet/modules/sshd/manifests/libssh2.pp7
-rw-r--r--puppet/modules/sshd/manifests/libssh2/devel.pp7
-rw-r--r--puppet/modules/sshd/manifests/linux.pp8
-rw-r--r--puppet/modules/sshd/manifests/nagios.pp24
-rw-r--r--puppet/modules/sshd/manifests/openbsd.pp8
-rw-r--r--puppet/modules/sshd/manifests/redhat.pp5
-rw-r--r--puppet/modules/sshd/manifests/ssh_authorized_key.pp85
-rw-r--r--puppet/modules/sshd/manifests/sshkey.pp21
-rw-r--r--puppet/modules/sshd/spec/classes/client_spec.rb42
-rw-r--r--puppet/modules/sshd/spec/classes/init_spec.rb122
-rw-r--r--puppet/modules/sshd/spec/defines/ssh_authorized_key_spec.rb45
-rw-r--r--puppet/modules/sshd/spec/functions/ssh_keygen_spec.rb116
-rw-r--r--puppet/modules/sshd/spec/spec_helper.rb21
-rw-r--r--puppet/modules/sshd/spec/spec_helper_system.rb25
l---------puppet/modules/sshd/templates/sshd_config/CentOS_5.erb1
-rw-r--r--puppet/modules/sshd/templates/sshd_config/CentOS_6.erb172
-rw-r--r--puppet/modules/sshd/templates/sshd_config/CentOS_7.erb186
-rw-r--r--puppet/modules/sshd/templates/sshd_config/Debian_jessie.erb124
-rw-r--r--puppet/modules/sshd/templates/sshd_config/Debian_sid.erb124
-rw-r--r--puppet/modules/sshd/templates/sshd_config/Debian_squeeze.erb127
-rw-r--r--puppet/modules/sshd/templates/sshd_config/Debian_wheezy.erb132
-rw-r--r--puppet/modules/sshd/templates/sshd_config/FreeBSD.erb168
-rw-r--r--puppet/modules/sshd/templates/sshd_config/Gentoo.erb164
-rw-r--r--puppet/modules/sshd/templates/sshd_config/OpenBSD.erb144
-rw-r--r--puppet/modules/sshd/templates/sshd_config/Ubuntu.erb133
-rw-r--r--puppet/modules/sshd/templates/sshd_config/Ubuntu_lucid.erb136
l---------puppet/modules/sshd/templates/sshd_config/Ubuntu_oneiric.erb1
l---------puppet/modules/sshd/templates/sshd_config/Ubuntu_precise.erb1
l---------puppet/modules/sshd/templates/sshd_config/XenServer_xenenterprise.erb1
-rw-r--r--puppet/modules/templatewlv/Modulefile11
-rw-r--r--puppet/modules/templatewlv/README.md21
-rw-r--r--puppet/modules/templatewlv/lib/puppet/parser/functions/templatewlv.rb41
-rw-r--r--puppet/modules/templatewlv/lib/puppet/parser/templatewrapperwlv.rb39
-rw-r--r--puppet/modules/try/README.md13
-rw-r--r--puppet/modules/try/manifests/file.pp114
-rw-r--r--puppet/modules/try/manifests/init.pp3
-rw-r--r--puppet/modules/x509/manifests/base.pp45
-rw-r--r--puppet/modules/x509/manifests/ca.pp34
-rw-r--r--puppet/modules/x509/manifests/cert.pp34
-rw-r--r--puppet/modules/x509/manifests/init.pp2
-rw-r--r--puppet/modules/x509/manifests/key.pp37
-rw-r--r--puppet/modules/x509/manifests/variables.pp7
-rw-r--r--tests/README.md25
-rw-r--r--tests/helpers/bonafide_helper.rb235
-rw-r--r--tests/helpers/client_side_db.py167
-rw-r--r--tests/helpers/couchdb_helper.rb142
-rw-r--r--tests/helpers/files_helper.rb54
-rw-r--r--tests/helpers/http_helper.rb157
-rw-r--r--tests/helpers/network_helper.rb79
-rw-r--r--tests/helpers/os_helper.rb41
-rw-r--r--tests/helpers/smtp_helper.rb45
-rwxr-xr-xtests/helpers/soledad_sync.py89
-rw-r--r--tests/helpers/srp_helper.rb171
-rw-r--r--tests/order.rb22
-rw-r--r--tests/white-box/couchdb.rb186
-rw-r--r--tests/white-box/dummy.rb71
-rw-r--r--tests/white-box/mx.rb267
-rw-r--r--tests/white-box/network.rb90
-rw-r--r--tests/white-box/openvpn.rb16
-rw-r--r--tests/white-box/soledad.rb17
-rw-r--r--tests/white-box/webapp.rb134
-rwxr-xr-xvagrant/add-pixelated.sh32
-rwxr-xr-xvagrant/configure-leap.sh92
-rwxr-xr-xvagrant/install-platform.pp15
-rw-r--r--vagrant/vagrant.config22
1144 files changed, 52450 insertions, 0 deletions
diff --git a/.gitignore b/.gitignore
new file mode 100644
index 00000000..146a1006
--- /dev/null
+++ b/.gitignore
@@ -0,0 +1,3 @@
+/.vagrant
+/puppet/modules/site_custom
+Gemfile.lock
diff --git a/files/mod_passenger.conf b/.gitmodules
index e69de29b..e69de29b 100644
--- a/files/mod_passenger.conf
+++ b/.gitmodules
diff --git a/.mailmap b/.mailmap
new file mode 100644
index 00000000..aee70b0a
--- /dev/null
+++ b/.mailmap
@@ -0,0 +1,8 @@
+Varac <varacanero@zeromail.org>
+Micah Anderson <micah@leap.se> Micah Anderson <micah@riseup.net>
+Micah Anderson <micah@leap.se> micah <micah@leap.se>
+Kwadronaut <kwadronaut@leap.se>
+Elijah <elijah@riseup.net> elijah <elijah@ChrUbuntu.(none)>
+Elijah <elijah@riseup.net> elijah <elijah@riseup.net>
+Leap Admins <admin@leap.se> root <root@localhost>
+
diff --git a/CHANGES.md b/CHANGES.md
new file mode 100644
index 00000000..ad42dd7a
--- /dev/null
+++ b/CHANGES.md
@@ -0,0 +1,64 @@
+Platform 0.8
+--------------------------------------
+
+This release focused on the email service. Debian Jessie is now required,
+which also means that you must migrate all data from BigCouch to CouchDB.
+
+UPGRADING: It is tricky to upgrade the OS and migrate the database. You can
+follow the tutorial here: https://leap.se/en/upgrade-0-8
+
+WARNING: failure to migrate data from BigCouch to CouchDB will cause all user
+accounts to get destroyed.
+
+Other new features:
+
+* It is possible to require invite codes for new users signing up.
+
+* Tapicero has been removed. Now user storage databases are created as needed
+ by soledad, and deleted eventually when no longer needed.
+
+* Admins can now suspect/enable users and block/enable their ability to send
+ and receive email.
+
+* Support for SPF and DKIM.
+
+Compatibility:
+
+* Now, soledad and couchdb must be on the same node.
+* Requires Debian Jessie. Wheezy is no longer supported.
+* Requires CouchDB, BigCouch is no longer supported.
+* Requires leap_cli version 1.8
+* Requires bitmask client version >= 0.9
+* Includes:
+ * leap_mx 0.8
+ * webapp 0.8
+ * soledad 0.8
+
+Commits: https://leap.se/git/leap_platform.git/shortlog/refs/tags/0.8
+Issues fixed: https://leap.se/code/versions/189
+
+
+Platform 0.7.1
+--------------------------------------
+
+Compatibility:
+
+* Requires leap_cli version 1.7.4
+* Requires bitmask client version >= 0.7
+* Previous releases supported cookies when using the provider API. Now, only
+ tokens are supported.
+* Includes:
+ * leap_mx 0.7.0
+ * tapicero 0.7
+ * webapp 0.7
+ * soledad 0.7
+
+Commits: https://leap.se/git/leap_platform.git/shortlog/refs/tags/0.7.1
+Issues fixed: https://leap.se/code/versions/159
+
+Upgrading:
+
+* `gem install leap_cli --version 1.7.4`.
+* `cd leap_platform; git pull; git checkout 0.7.1`.
+* `leap deploy`
+* `leap test` to make sure everything is working
diff --git a/Gemfile b/Gemfile
new file mode 100644
index 00000000..8925a904
--- /dev/null
+++ b/Gemfile
@@ -0,0 +1,13 @@
+source "https://rubygems.org"
+
+group :test do
+ gem "rake"
+ gem "rspec", '< 3.2.0'
+ gem "puppet", ENV['PUPPET_VERSION'] || ENV['GEM_PUPPET_VERSION'] || ENV['PUPPET_GEM_VERSION'] || '~> 3.7.0'
+ gem "facter", ENV['FACTER_VERSION'] || ENV['GEM_FACTER_VERSION'] || ENV['FACTER_GEM_VERSION'] || '~> 2.2.0'
+ gem "rspec-puppet"
+ gem "puppetlabs_spec_helper"
+ gem "metadata-json-lint"
+ gem "rspec-puppet-facts"
+ gem "mocha"
+end
diff --git a/LICENSE b/LICENSE
new file mode 100644
index 00000000..94a9ed02
--- /dev/null
+++ b/LICENSE
@@ -0,0 +1,674 @@
+ GNU GENERAL PUBLIC LICENSE
+ Version 3, 29 June 2007
+
+ Copyright (C) 2007 Free Software Foundation, Inc. <http://fsf.org/>
+ Everyone is permitted to copy and distribute verbatim copies
+ of this license document, but changing it is not allowed.
+
+ Preamble
+
+ The GNU General Public License is a free, copyleft license for
+software and other kinds of works.
+
+ The licenses for most software and other practical works are designed
+to take away your freedom to share and change the works. By contrast,
+the GNU General Public License is intended to guarantee your freedom to
+share and change all versions of a program--to make sure it remains free
+software for all its users. We, the Free Software Foundation, use the
+GNU General Public License for most of our software; it applies also to
+any other work released this way by its authors. You can apply it to
+your programs, too.
+
+ When we speak of free software, we are referring to freedom, not
+price. Our General Public Licenses are designed to make sure that you
+have the freedom to distribute copies of free software (and charge for
+them if you wish), that you receive source code or can get it if you
+want it, that you can change the software or use pieces of it in new
+free programs, and that you know you can do these things.
+
+ To protect your rights, we need to prevent others from denying you
+these rights or asking you to surrender the rights. Therefore, you have
+certain responsibilities if you distribute copies of the software, or if
+you modify it: responsibilities to respect the freedom of others.
+
+ For example, if you distribute copies of such a program, whether
+gratis or for a fee, you must pass on to the recipients the same
+freedoms that you received. You must make sure that they, too, receive
+or can get the source code. And you must show them these terms so they
+know their rights.
+
+ Developers that use the GNU GPL protect your rights with two steps:
+(1) assert copyright on the software, and (2) offer you this License
+giving you legal permission to copy, distribute and/or modify it.
+
+ For the developers' and authors' protection, the GPL clearly explains
+that there is no warranty for this free software. For both users' and
+authors' sake, the GPL requires that modified versions be marked as
+changed, so that their problems will not be attributed erroneously to
+authors of previous versions.
+
+ Some devices are designed to deny users access to install or run
+modified versions of the software inside them, although the manufacturer
+can do so. This is fundamentally incompatible with the aim of
+protecting users' freedom to change the software. The systematic
+pattern of such abuse occurs in the area of products for individuals to
+use, which is precisely where it is most unacceptable. Therefore, we
+have designed this version of the GPL to prohibit the practice for those
+products. If such problems arise substantially in other domains, we
+stand ready to extend this provision to those domains in future versions
+of the GPL, as needed to protect the freedom of users.
+
+ Finally, every program is threatened constantly by software patents.
+States should not allow patents to restrict development and use of
+software on general-purpose computers, but in those that do, we wish to
+avoid the special danger that patents applied to a free program could
+make it effectively proprietary. To prevent this, the GPL assures that
+patents cannot be used to render the program non-free.
+
+ The precise terms and conditions for copying, distribution and
+modification follow.
+
+ TERMS AND CONDITIONS
+
+ 0. Definitions.
+
+ "This License" refers to version 3 of the GNU General Public License.
+
+ "Copyright" also means copyright-like laws that apply to other kinds of
+works, such as semiconductor masks.
+
+ "The Program" refers to any copyrightable work licensed under this
+License. Each licensee is addressed as "you". "Licensees" and
+"recipients" may be individuals or organizations.
+
+ To "modify" a work means to copy from or adapt all or part of the work
+in a fashion requiring copyright permission, other than the making of an
+exact copy. The resulting work is called a "modified version" of the
+earlier work or a work "based on" the earlier work.
+
+ A "covered work" means either the unmodified Program or a work based
+on the Program.
+
+ To "propagate" a work means to do anything with it that, without
+permission, would make you directly or secondarily liable for
+infringement under applicable copyright law, except executing it on a
+computer or modifying a private copy. Propagation includes copying,
+distribution (with or without modification), making available to the
+public, and in some countries other activities as well.
+
+ To "convey" a work means any kind of propagation that enables other
+parties to make or receive copies. Mere interaction with a user through
+a computer network, with no transfer of a copy, is not conveying.
+
+ An interactive user interface displays "Appropriate Legal Notices"
+to the extent that it includes a convenient and prominently visible
+feature that (1) displays an appropriate copyright notice, and (2)
+tells the user that there is no warranty for the work (except to the
+extent that warranties are provided), that licensees may convey the
+work under this License, and how to view a copy of this License. If
+the interface presents a list of user commands or options, such as a
+menu, a prominent item in the list meets this criterion.
+
+ 1. Source Code.
+
+ The "source code" for a work means the preferred form of the work
+for making modifications to it. "Object code" means any non-source
+form of a work.
+
+ A "Standard Interface" means an interface that either is an official
+standard defined by a recognized standards body, or, in the case of
+interfaces specified for a particular programming language, one that
+is widely used among developers working in that language.
+
+ The "System Libraries" of an executable work include anything, other
+than the work as a whole, that (a) is included in the normal form of
+packaging a Major Component, but which is not part of that Major
+Component, and (b) serves only to enable use of the work with that
+Major Component, or to implement a Standard Interface for which an
+implementation is available to the public in source code form. A
+"Major Component", in this context, means a major essential component
+(kernel, window system, and so on) of the specific operating system
+(if any) on which the executable work runs, or a compiler used to
+produce the work, or an object code interpreter used to run it.
+
+ The "Corresponding Source" for a work in object code form means all
+the source code needed to generate, install, and (for an executable
+work) run the object code and to modify the work, including scripts to
+control those activities. However, it does not include the work's
+System Libraries, or general-purpose tools or generally available free
+programs which are used unmodified in performing those activities but
+which are not part of the work. For example, Corresponding Source
+includes interface definition files associated with source files for
+the work, and the source code for shared libraries and dynamically
+linked subprograms that the work is specifically designed to require,
+such as by intimate data communication or control flow between those
+subprograms and other parts of the work.
+
+ The Corresponding Source need not include anything that users
+can regenerate automatically from other parts of the Corresponding
+Source.
+
+ The Corresponding Source for a work in source code form is that
+same work.
+
+ 2. Basic Permissions.
+
+ All rights granted under this License are granted for the term of
+copyright on the Program, and are irrevocable provided the stated
+conditions are met. This License explicitly affirms your unlimited
+permission to run the unmodified Program. The output from running a
+covered work is covered by this License only if the output, given its
+content, constitutes a covered work. This License acknowledges your
+rights of fair use or other equivalent, as provided by copyright law.
+
+ You may make, run and propagate covered works that you do not
+convey, without conditions so long as your license otherwise remains
+in force. You may convey covered works to others for the sole purpose
+of having them make modifications exclusively for you, or provide you
+with facilities for running those works, provided that you comply with
+the terms of this License in conveying all material for which you do
+not control copyright. Those thus making or running the covered works
+for you must do so exclusively on your behalf, under your direction
+and control, on terms that prohibit them from making any copies of
+your copyrighted material outside their relationship with you.
+
+ Conveying under any other circumstances is permitted solely under
+the conditions stated below. Sublicensing is not allowed; section 10
+makes it unnecessary.
+
+ 3. Protecting Users' Legal Rights From Anti-Circumvention Law.
+
+ No covered work shall be deemed part of an effective technological
+measure under any applicable law fulfilling obligations under article
+11 of the WIPO copyright treaty adopted on 20 December 1996, or
+similar laws prohibiting or restricting circumvention of such
+measures.
+
+ When you convey a covered work, you waive any legal power to forbid
+circumvention of technological measures to the extent such circumvention
+is effected by exercising rights under this License with respect to
+the covered work, and you disclaim any intention to limit operation or
+modification of the work as a means of enforcing, against the work's
+users, your or third parties' legal rights to forbid circumvention of
+technological measures.
+
+ 4. Conveying Verbatim Copies.
+
+ You may convey verbatim copies of the Program's source code as you
+receive it, in any medium, provided that you conspicuously and
+appropriately publish on each copy an appropriate copyright notice;
+keep intact all notices stating that this License and any
+non-permissive terms added in accord with section 7 apply to the code;
+keep intact all notices of the absence of any warranty; and give all
+recipients a copy of this License along with the Program.
+
+ You may charge any price or no price for each copy that you convey,
+and you may offer support or warranty protection for a fee.
+
+ 5. Conveying Modified Source Versions.
+
+ You may convey a work based on the Program, or the modifications to
+produce it from the Program, in the form of source code under the
+terms of section 4, provided that you also meet all of these conditions:
+
+ a) The work must carry prominent notices stating that you modified
+ it, and giving a relevant date.
+
+ b) The work must carry prominent notices stating that it is
+ released under this License and any conditions added under section
+ 7. This requirement modifies the requirement in section 4 to
+ "keep intact all notices".
+
+ c) You must license the entire work, as a whole, under this
+ License to anyone who comes into possession of a copy. This
+ License will therefore apply, along with any applicable section 7
+ additional terms, to the whole of the work, and all its parts,
+ regardless of how they are packaged. This License gives no
+ permission to license the work in any other way, but it does not
+ invalidate such permission if you have separately received it.
+
+ d) If the work has interactive user interfaces, each must display
+ Appropriate Legal Notices; however, if the Program has interactive
+ interfaces that do not display Appropriate Legal Notices, your
+ work need not make them do so.
+
+ A compilation of a covered work with other separate and independent
+works, which are not by their nature extensions of the covered work,
+and which are not combined with it such as to form a larger program,
+in or on a volume of a storage or distribution medium, is called an
+"aggregate" if the compilation and its resulting copyright are not
+used to limit the access or legal rights of the compilation's users
+beyond what the individual works permit. Inclusion of a covered work
+in an aggregate does not cause this License to apply to the other
+parts of the aggregate.
+
+ 6. Conveying Non-Source Forms.
+
+ You may convey a covered work in object code form under the terms
+of sections 4 and 5, provided that you also convey the
+machine-readable Corresponding Source under the terms of this License,
+in one of these ways:
+
+ a) Convey the object code in, or embodied in, a physical product
+ (including a physical distribution medium), accompanied by the
+ Corresponding Source fixed on a durable physical medium
+ customarily used for software interchange.
+
+ b) Convey the object code in, or embodied in, a physical product
+ (including a physical distribution medium), accompanied by a
+ written offer, valid for at least three years and valid for as
+ long as you offer spare parts or customer support for that product
+ model, to give anyone who possesses the object code either (1) a
+ copy of the Corresponding Source for all the software in the
+ product that is covered by this License, on a durable physical
+ medium customarily used for software interchange, for a price no
+ more than your reasonable cost of physically performing this
+ conveying of source, or (2) access to copy the
+ Corresponding Source from a network server at no charge.
+
+ c) Convey individual copies of the object code with a copy of the
+ written offer to provide the Corresponding Source. This
+ alternative is allowed only occasionally and noncommercially, and
+ only if you received the object code with such an offer, in accord
+ with subsection 6b.
+
+ d) Convey the object code by offering access from a designated
+ place (gratis or for a charge), and offer equivalent access to the
+ Corresponding Source in the same way through the same place at no
+ further charge. You need not require recipients to copy the
+ Corresponding Source along with the object code. If the place to
+ copy the object code is a network server, the Corresponding Source
+ may be on a different server (operated by you or a third party)
+ that supports equivalent copying facilities, provided you maintain
+ clear directions next to the object code saying where to find the
+ Corresponding Source. Regardless of what server hosts the
+ Corresponding Source, you remain obligated to ensure that it is
+ available for as long as needed to satisfy these requirements.
+
+ e) Convey the object code using peer-to-peer transmission, provided
+ you inform other peers where the object code and Corresponding
+ Source of the work are being offered to the general public at no
+ charge under subsection 6d.
+
+ A separable portion of the object code, whose source code is excluded
+from the Corresponding Source as a System Library, need not be
+included in conveying the object code work.
+
+ A "User Product" is either (1) a "consumer product", which means any
+tangible personal property which is normally used for personal, family,
+or household purposes, or (2) anything designed or sold for incorporation
+into a dwelling. In determining whether a product is a consumer product,
+doubtful cases shall be resolved in favor of coverage. For a particular
+product received by a particular user, "normally used" refers to a
+typical or common use of that class of product, regardless of the status
+of the particular user or of the way in which the particular user
+actually uses, or expects or is expected to use, the product. A product
+is a consumer product regardless of whether the product has substantial
+commercial, industrial or non-consumer uses, unless such uses represent
+the only significant mode of use of the product.
+
+ "Installation Information" for a User Product means any methods,
+procedures, authorization keys, or other information required to install
+and execute modified versions of a covered work in that User Product from
+a modified version of its Corresponding Source. The information must
+suffice to ensure that the continued functioning of the modified object
+code is in no case prevented or interfered with solely because
+modification has been made.
+
+ If you convey an object code work under this section in, or with, or
+specifically for use in, a User Product, and the conveying occurs as
+part of a transaction in which the right of possession and use of the
+User Product is transferred to the recipient in perpetuity or for a
+fixed term (regardless of how the transaction is characterized), the
+Corresponding Source conveyed under this section must be accompanied
+by the Installation Information. But this requirement does not apply
+if neither you nor any third party retains the ability to install
+modified object code on the User Product (for example, the work has
+been installed in ROM).
+
+ The requirement to provide Installation Information does not include a
+requirement to continue to provide support service, warranty, or updates
+for a work that has been modified or installed by the recipient, or for
+the User Product in which it has been modified or installed. Access to a
+network may be denied when the modification itself materially and
+adversely affects the operation of the network or violates the rules and
+protocols for communication across the network.
+
+ Corresponding Source conveyed, and Installation Information provided,
+in accord with this section must be in a format that is publicly
+documented (and with an implementation available to the public in
+source code form), and must require no special password or key for
+unpacking, reading or copying.
+
+ 7. Additional Terms.
+
+ "Additional permissions" are terms that supplement the terms of this
+License by making exceptions from one or more of its conditions.
+Additional permissions that are applicable to the entire Program shall
+be treated as though they were included in this License, to the extent
+that they are valid under applicable law. If additional permissions
+apply only to part of the Program, that part may be used separately
+under those permissions, but the entire Program remains governed by
+this License without regard to the additional permissions.
+
+ When you convey a copy of a covered work, you may at your option
+remove any additional permissions from that copy, or from any part of
+it. (Additional permissions may be written to require their own
+removal in certain cases when you modify the work.) You may place
+additional permissions on material, added by you to a covered work,
+for which you have or can give appropriate copyright permission.
+
+ Notwithstanding any other provision of this License, for material you
+add to a covered work, you may (if authorized by the copyright holders of
+that material) supplement the terms of this License with terms:
+
+ a) Disclaiming warranty or limiting liability differently from the
+ terms of sections 15 and 16 of this License; or
+
+ b) Requiring preservation of specified reasonable legal notices or
+ author attributions in that material or in the Appropriate Legal
+ Notices displayed by works containing it; or
+
+ c) Prohibiting misrepresentation of the origin of that material, or
+ requiring that modified versions of such material be marked in
+ reasonable ways as different from the original version; or
+
+ d) Limiting the use for publicity purposes of names of licensors or
+ authors of the material; or
+
+ e) Declining to grant rights under trademark law for use of some
+ trade names, trademarks, or service marks; or
+
+ f) Requiring indemnification of licensors and authors of that
+ material by anyone who conveys the material (or modified versions of
+ it) with contractual assumptions of liability to the recipient, for
+ any liability that these contractual assumptions directly impose on
+ those licensors and authors.
+
+ All other non-permissive additional terms are considered "further
+restrictions" within the meaning of section 10. If the Program as you
+received it, or any part of it, contains a notice stating that it is
+governed by this License along with a term that is a further
+restriction, you may remove that term. If a license document contains
+a further restriction but permits relicensing or conveying under this
+License, you may add to a covered work material governed by the terms
+of that license document, provided that the further restriction does
+not survive such relicensing or conveying.
+
+ If you add terms to a covered work in accord with this section, you
+must place, in the relevant source files, a statement of the
+additional terms that apply to those files, or a notice indicating
+where to find the applicable terms.
+
+ Additional terms, permissive or non-permissive, may be stated in the
+form of a separately written license, or stated as exceptions;
+the above requirements apply either way.
+
+ 8. Termination.
+
+ You may not propagate or modify a covered work except as expressly
+provided under this License. Any attempt otherwise to propagate or
+modify it is void, and will automatically terminate your rights under
+this License (including any patent licenses granted under the third
+paragraph of section 11).
+
+ However, if you cease all violation of this License, then your
+license from a particular copyright holder is reinstated (a)
+provisionally, unless and until the copyright holder explicitly and
+finally terminates your license, and (b) permanently, if the copyright
+holder fails to notify you of the violation by some reasonable means
+prior to 60 days after the cessation.
+
+ Moreover, your license from a particular copyright holder is
+reinstated permanently if the copyright holder notifies you of the
+violation by some reasonable means, this is the first time you have
+received notice of violation of this License (for any work) from that
+copyright holder, and you cure the violation prior to 30 days after
+your receipt of the notice.
+
+ Termination of your rights under this section does not terminate the
+licenses of parties who have received copies or rights from you under
+this License. If your rights have been terminated and not permanently
+reinstated, you do not qualify to receive new licenses for the same
+material under section 10.
+
+ 9. Acceptance Not Required for Having Copies.
+
+ You are not required to accept this License in order to receive or
+run a copy of the Program. Ancillary propagation of a covered work
+occurring solely as a consequence of using peer-to-peer transmission
+to receive a copy likewise does not require acceptance. However,
+nothing other than this License grants you permission to propagate or
+modify any covered work. These actions infringe copyright if you do
+not accept this License. Therefore, by modifying or propagating a
+covered work, you indicate your acceptance of this License to do so.
+
+ 10. Automatic Licensing of Downstream Recipients.
+
+ Each time you convey a covered work, the recipient automatically
+receives a license from the original licensors, to run, modify and
+propagate that work, subject to this License. You are not responsible
+for enforcing compliance by third parties with this License.
+
+ An "entity transaction" is a transaction transferring control of an
+organization, or substantially all assets of one, or subdividing an
+organization, or merging organizations. If propagation of a covered
+work results from an entity transaction, each party to that
+transaction who receives a copy of the work also receives whatever
+licenses to the work the party's predecessor in interest had or could
+give under the previous paragraph, plus a right to possession of the
+Corresponding Source of the work from the predecessor in interest, if
+the predecessor has it or can get it with reasonable efforts.
+
+ You may not impose any further restrictions on the exercise of the
+rights granted or affirmed under this License. For example, you may
+not impose a license fee, royalty, or other charge for exercise of
+rights granted under this License, and you may not initiate litigation
+(including a cross-claim or counterclaim in a lawsuit) alleging that
+any patent claim is infringed by making, using, selling, offering for
+sale, or importing the Program or any portion of it.
+
+ 11. Patents.
+
+ A "contributor" is a copyright holder who authorizes use under this
+License of the Program or a work on which the Program is based. The
+work thus licensed is called the contributor's "contributor version".
+
+ A contributor's "essential patent claims" are all patent claims
+owned or controlled by the contributor, whether already acquired or
+hereafter acquired, that would be infringed by some manner, permitted
+by this License, of making, using, or selling its contributor version,
+but do not include claims that would be infringed only as a
+consequence of further modification of the contributor version. For
+purposes of this definition, "control" includes the right to grant
+patent sublicenses in a manner consistent with the requirements of
+this License.
+
+ Each contributor grants you a non-exclusive, worldwide, royalty-free
+patent license under the contributor's essential patent claims, to
+make, use, sell, offer for sale, import and otherwise run, modify and
+propagate the contents of its contributor version.
+
+ In the following three paragraphs, a "patent license" is any express
+agreement or commitment, however denominated, not to enforce a patent
+(such as an express permission to practice a patent or covenant not to
+sue for patent infringement). To "grant" such a patent license to a
+party means to make such an agreement or commitment not to enforce a
+patent against the party.
+
+ If you convey a covered work, knowingly relying on a patent license,
+and the Corresponding Source of the work is not available for anyone
+to copy, free of charge and under the terms of this License, through a
+publicly available network server or other readily accessible means,
+then you must either (1) cause the Corresponding Source to be so
+available, or (2) arrange to deprive yourself of the benefit of the
+patent license for this particular work, or (3) arrange, in a manner
+consistent with the requirements of this License, to extend the patent
+license to downstream recipients. "Knowingly relying" means you have
+actual knowledge that, but for the patent license, your conveying the
+covered work in a country, or your recipient's use of the covered work
+in a country, would infringe one or more identifiable patents in that
+country that you have reason to believe are valid.
+
+ If, pursuant to or in connection with a single transaction or
+arrangement, you convey, or propagate by procuring conveyance of, a
+covered work, and grant a patent license to some of the parties
+receiving the covered work authorizing them to use, propagate, modify
+or convey a specific copy of the covered work, then the patent license
+you grant is automatically extended to all recipients of the covered
+work and works based on it.
+
+ A patent license is "discriminatory" if it does not include within
+the scope of its coverage, prohibits the exercise of, or is
+conditioned on the non-exercise of one or more of the rights that are
+specifically granted under this License. You may not convey a covered
+work if you are a party to an arrangement with a third party that is
+in the business of distributing software, under which you make payment
+to the third party based on the extent of your activity of conveying
+the work, and under which the third party grants, to any of the
+parties who would receive the covered work from you, a discriminatory
+patent license (a) in connection with copies of the covered work
+conveyed by you (or copies made from those copies), or (b) primarily
+for and in connection with specific products or compilations that
+contain the covered work, unless you entered into that arrangement,
+or that patent license was granted, prior to 28 March 2007.
+
+ Nothing in this License shall be construed as excluding or limiting
+any implied license or other defenses to infringement that may
+otherwise be available to you under applicable patent law.
+
+ 12. No Surrender of Others' Freedom.
+
+ If conditions are imposed on you (whether by court order, agreement or
+otherwise) that contradict the conditions of this License, they do not
+excuse you from the conditions of this License. If you cannot convey a
+covered work so as to satisfy simultaneously your obligations under this
+License and any other pertinent obligations, then as a consequence you may
+not convey it at all. For example, if you agree to terms that obligate you
+to collect a royalty for further conveying from those to whom you convey
+the Program, the only way you could satisfy both those terms and this
+License would be to refrain entirely from conveying the Program.
+
+ 13. Use with the GNU Affero General Public License.
+
+ Notwithstanding any other provision of this License, you have
+permission to link or combine any covered work with a work licensed
+under version 3 of the GNU Affero General Public License into a single
+combined work, and to convey the resulting work. The terms of this
+License will continue to apply to the part which is the covered work,
+but the special requirements of the GNU Affero General Public License,
+section 13, concerning interaction through a network will apply to the
+combination as such.
+
+ 14. Revised Versions of this License.
+
+ The Free Software Foundation may publish revised and/or new versions of
+the GNU General Public License from time to time. Such new versions will
+be similar in spirit to the present version, but may differ in detail to
+address new problems or concerns.
+
+ Each version is given a distinguishing version number. If the
+Program specifies that a certain numbered version of the GNU General
+Public License "or any later version" applies to it, you have the
+option of following the terms and conditions either of that numbered
+version or of any later version published by the Free Software
+Foundation. If the Program does not specify a version number of the
+GNU General Public License, you may choose any version ever published
+by the Free Software Foundation.
+
+ If the Program specifies that a proxy can decide which future
+versions of the GNU General Public License can be used, that proxy's
+public statement of acceptance of a version permanently authorizes you
+to choose that version for the Program.
+
+ Later license versions may give you additional or different
+permissions. However, no additional obligations are imposed on any
+author or copyright holder as a result of your choosing to follow a
+later version.
+
+ 15. Disclaimer of Warranty.
+
+ THERE IS NO WARRANTY FOR THE PROGRAM, TO THE EXTENT PERMITTED BY
+APPLICABLE LAW. EXCEPT WHEN OTHERWISE STATED IN WRITING THE COPYRIGHT
+HOLDERS AND/OR OTHER PARTIES PROVIDE THE PROGRAM "AS IS" WITHOUT WARRANTY
+OF ANY KIND, EITHER EXPRESSED OR IMPLIED, INCLUDING, BUT NOT LIMITED TO,
+THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR
+PURPOSE. THE ENTIRE RISK AS TO THE QUALITY AND PERFORMANCE OF THE PROGRAM
+IS WITH YOU. SHOULD THE PROGRAM PROVE DEFECTIVE, YOU ASSUME THE COST OF
+ALL NECESSARY SERVICING, REPAIR OR CORRECTION.
+
+ 16. Limitation of Liability.
+
+ IN NO EVENT UNLESS REQUIRED BY APPLICABLE LAW OR AGREED TO IN WRITING
+WILL ANY COPYRIGHT HOLDER, OR ANY OTHER PARTY WHO MODIFIES AND/OR CONVEYS
+THE PROGRAM AS PERMITTED ABOVE, BE LIABLE TO YOU FOR DAMAGES, INCLUDING ANY
+GENERAL, SPECIAL, INCIDENTAL OR CONSEQUENTIAL DAMAGES ARISING OUT OF THE
+USE OR INABILITY TO USE THE PROGRAM (INCLUDING BUT NOT LIMITED TO LOSS OF
+DATA OR DATA BEING RENDERED INACCURATE OR LOSSES SUSTAINED BY YOU OR THIRD
+PARTIES OR A FAILURE OF THE PROGRAM TO OPERATE WITH ANY OTHER PROGRAMS),
+EVEN IF SUCH HOLDER OR OTHER PARTY HAS BEEN ADVISED OF THE POSSIBILITY OF
+SUCH DAMAGES.
+
+ 17. Interpretation of Sections 15 and 16.
+
+ If the disclaimer of warranty and limitation of liability provided
+above cannot be given local legal effect according to their terms,
+reviewing courts shall apply local law that most closely approximates
+an absolute waiver of all civil liability in connection with the
+Program, unless a warranty or assumption of liability accompanies a
+copy of the Program in return for a fee.
+
+ END OF TERMS AND CONDITIONS
+
+ How to Apply These Terms to Your New Programs
+
+ If you develop a new program, and you want it to be of the greatest
+possible use to the public, the best way to achieve this is to make it
+free software which everyone can redistribute and change under these terms.
+
+ To do so, attach the following notices to the program. It is safest
+to attach them to the start of each source file to most effectively
+state the exclusion of warranty; and each file should have at least
+the "copyright" line and a pointer to where the full notice is found.
+
+ <one line to give the program's name and a brief idea of what it does.>
+ Copyright (C) <year> <name of author>
+
+ This program is free software: you can redistribute it and/or modify
+ it under the terms of the GNU General Public License as published by
+ the Free Software Foundation, either version 3 of the License, or
+ (at your option) any later version.
+
+ This program is distributed in the hope that it will be useful,
+ but WITHOUT ANY WARRANTY; without even the implied warranty of
+ MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
+ GNU General Public License for more details.
+
+ You should have received a copy of the GNU General Public License
+ along with this program. If not, see <http://www.gnu.org/licenses/>.
+
+Also add information on how to contact you by electronic and paper mail.
+
+ If the program does terminal interaction, make it output a short
+notice like this when it starts in an interactive mode:
+
+ <program> Copyright (C) <year> <name of author>
+ This program comes with ABSOLUTELY NO WARRANTY; for details type `show w'.
+ This is free software, and you are welcome to redistribute it
+ under certain conditions; type `show c' for details.
+
+The hypothetical commands `show w' and `show c' should show the appropriate
+parts of the General Public License. Of course, your program's commands
+might be different; for a GUI interface, you would use an "about box".
+
+ You should also get your employer (if you work as a programmer) or school,
+if any, to sign a "copyright disclaimer" for the program, if necessary.
+For more information on this, and how to apply and follow the GNU GPL, see
+<http://www.gnu.org/licenses/>.
+
+ The GNU General Public License does not permit incorporating your program
+into proprietary programs. If your program is a subroutine library, you
+may consider it more useful to permit linking proprietary applications with
+the library. If this is what you want to do, use the GNU Lesser General
+Public License instead of this License. But first, please read
+<http://www.gnu.org/philosophy/why-not-lgpl.html>.
diff --git a/README.md b/README.md
new file mode 100644
index 00000000..edc272d8
--- /dev/null
+++ b/README.md
@@ -0,0 +1,111 @@
+Leap Platform
+=============================
+
+[![Build Status](https://jenkins.leap.se/job/platform_develop/badge/icon)](https://jenkins.leap.se/job/platform_develop/)
+
+The LEAP Platform is set of complementary packages and server recipes to automate the maintenance of LEAP services in a hardened Debian environment. Its goal is to make it as painless as possible for sysadmins to deploy and maintain a service provider's infrastructure for secure communication. These recipes define an abstract service provider. It is a set of Puppet modules designed to work together to provide to sysadmins everything they need to manage a service provider infrastructure that provides secure communication services.
+
+Getting started
+=============================
+
+It is highly recommended that you start by reading the overview of the [LEAP Platform](https://leap.se/docs/platform) and then begin with the [Quick Start tutorial](https://leap.se/en/docs/platform/tutorials/quick-start) to walk through a test environment setup to get familiar with how things work before deploying to live servers.
+
+An offline copy of this documentation is contained in the `doc` subdirectory. For more current updates to the documentation, visit the website.
+
+Requirements
+------------------
+
+For testing a virtual deployment simulated on your computer, you will need a fairly recent computer x86_64 with hardware virtualization features (AMD-V or VT-x) and plenty of RAM. If you follow the "Quick Start" documentation we will walk you through using Vagrant to setup a test deployment.
+
+For a live deployment of the platform, the number of servers that is required depends on your needs and which services you want to deploy. At the moment, the LEAP Platform supports servers with a base Debian Wheezy installation.
+
+Troubleshooting
+=============================
+
+If you have a problem, we are interested in fixing it!
+
+If you have a problem, be sure to have a look at the [Known Issues](https://leap.se/docs/platform/known-issues) to see if your issue is detailed there.
+
+If not, the best way for us to solve your problem is if you provide to us the complete log of what you did, and the output that was produced. Please don't cut out what appears to be useless information and only include the error that you received, instead copy and paste the complete log so that we can better determine the overall situation. If you can run the same command that produced the error with a raised verbosity level (such as -v2), that provides us with more useful debugging information.
+
+To capture the log, you can copy from the console, or run `leap --log FILE` or edit Leapfile to include `@log = '/tmp/leap.log'`.
+
+Visit https://leap.se/en/docs/get-involved/communication for details on how to contact the developers.
+
+Known issues
+============
+
+The following issues are known to exist in 0.5.2 and later:
+
+CouchDB Sync
+------------
+You can't deploy new couchdb nodes after one or more have been deployed. Make *sure* that you configure and deploy all your couchdb nodes when first creating your provider. The problem is that we dont not have a clean way of adding couch nodes after initial creation of the databases, so any nodes added after result in improperly synchronized data. See Bug [#5601](https://leap.se/code/issues/5601) for more information.
+
+User setup and ssh
+------------------
+
+. if you aren't using a single ssh key, but have different ones, you will need to define the following at the top of your ~/.ssh/config:
+ HostName <ip address>
+ IdentityFile <path to identity file>
+
+ (see: https://leap.se/code/issues/2946 and https://leap.se/code/issues/3002)
+
+. If the ssh host key changes, you need to run node init again (see: https://leap.se/en/docs/platform/guide#Working.with.SSH)
+
+. At the moment, only ECDSA ssh host keys are supported. If you get the following error: `= FAILED ssh-keyscan: no hostkey alg (must be missing an ecdsa public host key)` then you should confirm that you have the following line defined in your server's **/etc/ssh/sshd_config**: `HostKey /etc/ssh/ssh_host_ecdsa_key`. If that file doesn't exist, run `ssh-keygen -t ecdsa -f /etc/ssh/ssh_host_ecdsa_key -N ""` in order to create it. If you made a change to your sshd_config, then you need to run `/etc/init.d/ssh restart` (see: https://leap.se/code/issues/2373)
+
+. To remove an admin's access to your servers, please remove the directory for that user under the `users/` subdirectory in your provider directory and then remove that user's ssh keys from files/ssh/authorized_keys. When finished you *must* run a `leap deploy` to update that information on the servers.
+
+. At the moment, it is only possible to add an admin who will have access to all LEAP servers (see: https://leap.se/code/issues/2280)
+
+. leap add-user --self allows only one key - if you run that command twice with different keys, you will just replace the key with the second key. To add a second key, add it manually to files/ssh/authorized_keys (see: https://leap.se/code/issues/866)
+
+
+Deploying
+---------
+
+. If you have any errors during a run, please try to deploy again as this often solves non-deterministic issues that were not uncovered in our testing. Please re-deploy with `leap -v2 deploy` to get more verbose logs and capture the complete output to provide to us for debugging.
+
+. If when deploying your debian mirror fails for some reason, network anomoly or the mirror itself is out of date, then platform deployment will not succeed properly. Check the mirror is up and try to deploy again when it is resolved (see: https://leap.se/code/issues/1091)
+
+. Deployment gives 'error: in `%`: too few arguments (ArgumentError)' - this is because you attempted to do a deploy before initializing a node, please initialize the node first and then do a deploy afterwards (see: https://leap.se/code/issues/2550)
+
+. This release has no ability to custom configure apt sources or proxies (see: https://leap.se/code/issues/1971)
+
+. When running a deploy at a verbosity level of 2 and above, you will notice puppet deprecation warnings, these are known and we are working on fixing them
+
+Special Environments
+--------------------
+
+. When deploying to OpenStack release "nova" or newer, you will need to do an initial deploy, then when it has finished run `leap facts update` and then deploy again (see: https://leap.se/code/issues/3020)
+
+leap-mx
+-------
+
+. see https://github.com/leapcode/leap_mx#070 for issues regarding leap_mx
+
+
+Contributing
+============
+
+In order to validate the syntax and style guide compliance
+before you commit, see https://github.com/pixelated-project/puppet-git-hooks#installation
+
+
+Changes
+=========
+
+Read CHANGES.md or run `git log`.
+
+Authors and Credits
+===================
+
+See contributors:
+
+ git shortlog -es --all
+
+
+Copyright/License
+=================
+
+Read LICENSE
diff --git a/Rakefile b/Rakefile
new file mode 100644
index 00000000..0d1b18ad
--- /dev/null
+++ b/Rakefile
@@ -0,0 +1,57 @@
+require 'puppetlabs_spec_helper/rake_tasks'
+require 'puppet-lint/tasks/puppet-lint'
+require 'puppet-syntax/tasks/puppet-syntax'
+
+# return list of modules, either
+# submodules, custom or all modules
+# so we can check each array seperately
+def modules_pattern (type)
+ submodules = Array.new
+ custom_modules = Array.new
+ all_modules = Array.new
+
+ Dir['puppet/modules/*'].sort.each do |m|
+ system("grep -q #{m} .gitmodules")
+ if $?.exitstatus == 0
+ submodules << m + '/**/*.pp'
+ else
+ custom_modules << m + '/**/*.pp'
+ end
+ all_modules << m + '/**/*.pp'
+ end
+
+ case type
+ when 'submodule'
+ submodules
+ when 'custom'
+ custom_modules
+ when 'all'
+ all_modules
+ end
+end
+
+exclude_paths = ["**/vendor/**/*", "spec/fixtures/**/*", "pkg/**/*" ]
+
+# redefine lint task so we don't lint submoudules for now
+Rake::Task[:lint].clear
+PuppetLint::RakeTask.new :lint do |config|
+ # only check for custom manifests, not submodules for now
+ config.pattern = modules_pattern('custom')
+ config.ignore_paths = exclude_paths
+ config.disable_checks = ['documentation', '80chars']
+ config.fail_on_warnings = false
+end
+
+# rake syntax::* tasks
+PuppetSyntax.exclude_paths = exclude_paths
+PuppetSyntax.future_parser = true
+
+desc "Validate erb templates"
+task :templates do
+ Dir['**/templates/**/*.erb'].each do |template|
+ sh "erb -P -x -T '-' #{template} | ruby -c" unless template =~ /.*vendor.*/
+ end
+end
+
+desc "Run all puppet checks required for CI (syntax , validate, spec, lint)"
+task :test => [:syntax , :validate, :templates, :spec, :lint]
diff --git a/Vagrantfile b/Vagrantfile
new file mode 100644
index 00000000..25f26b3b
--- /dev/null
+++ b/Vagrantfile
@@ -0,0 +1,53 @@
+# -*- mode: ruby -*-
+# vi: set ft=ruby :
+
+Vagrant.configure("2") do |config|
+
+ # shared config for all boxes
+
+ # Please verify the sha512 sum of the downloaded box before importing it into vagrant !
+ # see https://leap.se/en/docs/platform/details/development#Verify.vagrantbox.download
+ # for details
+ config.vm.box = "LEAP/jessie"
+
+ config.vm.provider "virtualbox" do |v|
+ v.customize ["modifyvm", :id, "--natdnshostresolver1", "on"]
+ v.name = "jessie"
+ v.memory = 1536
+ end
+
+ config.vm.provider "libvirt" do |v|
+ v.memory = 1536
+ end
+
+ # Fix annoying 'stdin: is not a tty' warning
+ # see http://foo-o-rama.com/vagrant--stdin-is-not-a-tty--fix.html
+ config.vm.provision "shell" do |s|
+ s.privileged = false
+ s.inline = "sudo sed -i '/tty/!s/mesg n/tty -s \\&\\& mesg n/' /root/.profile"
+ end
+
+ config.vm.provision "puppet" do |puppet|
+ puppet.manifests_path = "./vagrant"
+ puppet.module_path = "./puppet/modules"
+ puppet.manifest_file = "install-platform.pp"
+ puppet.options = "--verbose"
+ puppet.hiera_config_path = "hiera.yaml"
+ end
+ config.vm.provision "shell", path: "vagrant/configure-leap.sh"
+
+ config.ssh.username = "vagrant"
+
+ # forward leap_web ports
+ config.vm.network "forwarded_port", guest: 443, host:4443
+ # forward pixelated ports
+ config.vm.network "forwarded_port", guest: 8080, host:8080
+
+ config.vm.define :"leap_platform", primary: true do |leap_vagrant|
+ end
+
+ config.vm.define :"pixelated", autostart: false do |pixelated_vagrant|
+ pixelated_vagrant.vm.provision "shell", path: "vagrant/add-pixelated.sh"
+ end
+
+end
diff --git a/bin/debug.sh b/bin/debug.sh
new file mode 100755
index 00000000..d6f37542
--- /dev/null
+++ b/bin/debug.sh
@@ -0,0 +1,29 @@
+#!/bin/bash
+# debug script to be run on remote servers
+# called from leap_cli with the 'leap debug' cmd
+
+apps='(leap|pixelated|stunnel|couch|soledad|haproxy)'
+
+facts='(apt_running |^architecture |^augeasversion |^couchdb_.* |^debian_.* |^dhcp_enabled |^domain |^facterversion |^filesystems |^fqdn |^hardwaremodel |^hostname |^interface.* |^ipaddress.* |^is_pe |^is_virtual |^kernel.* |^lib |^lsb.* |^memory.* |^mtu_.* |^netmask.* |^network_.* |^operatingsystem |^os.* |^path |^physicalprocessorcount |^processor.* |^ps |^puppetversion |^root_home |^rsyslog_version |^rubysitedir |^rubyversion |^selinux |^ssh_version |^swapfree.* |^swapsize.* |^type |^virtual)'
+
+
+# query facts and filter out private stuff
+export FACTERLIB="/srv/leap/puppet/modules/apache/lib/facter:/srv/leap/puppet/modules/apt/lib/facter:/srv/leap/puppet/modules/concat/lib/facter:/srv/leap/puppet/modules/couchdb/lib/facter:/srv/leap/puppet/modules/rsyslog/lib/facter:/srv/leap/puppet/modules/site_config/lib/facter:/srv/leap/puppet/modules/sshd/lib/facter:/srv/leap/puppet/modules/stdlib/lib/facter"
+
+facter 2>/dev/null | egrep -i "$facts"
+
+# query installed versions
+echo -e '\n\n'
+dpkg -l | egrep "$apps"
+
+
+# query running procs
+echo -e '\n\n'
+ps aux|egrep "$apps"
+
+echo -e '\n\n'
+echo -e "Last deploy:\n"
+tail -2 /var/log/leap/deploy-summary.log
+
+
+
diff --git a/bin/node_init b/bin/node_init
new file mode 100644
index 00000000..da250012
--- /dev/null
+++ b/bin/node_init
@@ -0,0 +1,86 @@
+#!/bin/bash
+#
+# LEAP Platform node initialization.
+# This script is run on the target server when `leap node init` is run.
+#
+
+DEBIAN_VERSION="^(jessie|8\.)"
+LEAP_DIR="/srv/leap"
+HIERA_DIR="/etc/leap"
+INIT_FILE="/srv/leap/initialized"
+REQUIRED_PACKAGES="puppet rsync lsb-release locales"
+
+PATH="/bin:/sbin:/usr/sbin:/usr/bin"
+APT_GET="apt-get -q -y -o DPkg::Options::=--force-confold"
+APT_GET_UPDATE="apt-get update -o Acquire::Languages=none"
+BAD_APT_RESPONSE="(BADSIG|NO_PUBKEY|KEYEXPIRED|REVKEYSIG|NODATA|Could not resolve|failed to fetch)"
+export DEBIAN_FRONTEND=noninteractive
+
+test -f $INIT_FILE && rm $INIT_FILE
+if ! egrep -q "$DEBIAN_VERSION" /etc/debian_version; then
+ echo "ERROR: This operating system is not supported. The file /etc/debian_version must match /$DEBIAN_VERSION/ but is: `cat /etc/debian_version`"
+ exit 1
+fi
+mkdir -p $LEAP_DIR
+echo "en_US.UTF-8 UTF-8" > /etc/locale.gen
+
+#
+# UPDATE PACKAGES
+# (exit code is not reliable, sadly)
+#
+echo "updating package list"
+
+error_count=0
+while read line; do
+ error=$(echo $line | egrep "$BAD_APT_RESPONSE")
+ if [[ $error ]]; then
+ errors[error_count]=$error
+ ((error_count++))
+ break # should we halt on first error?
+ fi
+ echo $line
+done < <($APT_GET_UPDATE 2>&1)
+
+if [[ $error_count > 0 ]]; then
+ echo "ERROR: fatal error in 'apt-get update', bailing out."
+ for e in "${errors[@]}"; do
+ echo " $e"
+ done
+ exit 1
+fi
+
+#
+# UPDATE TIME
+#
+if [[ ! $(which ntpd) ]]; then
+ echo "installing ntpd"
+ $APT_GET install ntp
+ exit_code=$?
+ if [[ $exit_code -ne 0 ]]; then
+ echo "ERROR: bailing out."
+ exit $exit_code
+ fi
+fi
+
+echo "updating server time"
+systemctl -q is-active ntp.service && systemctl stop ntp.service
+ntpd -gxq
+systemctl -q is-active ntp.service || systemctl start ntp.service
+
+#
+# INSTALL PACKAGES
+#
+echo "installing required packages"
+$APT_GET install $REQUIRED_PACKAGES
+exit_code=$?
+if [[ $exit_code -ne 0 ]]; then
+ echo "ERROR: bailing out."
+ exit $exit_code
+fi
+
+#
+# FINALIZE
+#
+mkdir -p $HIERA_DIR
+chmod 0755 $HIERA_DIR
+touch $INIT_FILE
diff --git a/bin/puppet_command b/bin/puppet_command
new file mode 100755
index 00000000..eb3cd0b9
--- /dev/null
+++ b/bin/puppet_command
@@ -0,0 +1,313 @@
+#!/usr/bin/ruby
+
+#
+# This is a wrapper script around the puppet command used by the LEAP platform.
+#
+# We do this in order to make it faster and easier to control puppet remotely
+# (exit codes, logging, lockfile, version check, etc)
+#
+
+require 'pty'
+require 'yaml'
+require 'logger'
+require 'socket'
+require 'fileutils'
+
+DEBIAN_VERSION = /^(jessie|8\.)/
+PUPPET_BIN = '/usr/bin/puppet'
+PUPPET_DIRECTORY = '/srv/leap'
+PUPPET_PARAMETERS = '--color=false --detailed-exitcodes --libdir=puppet/lib --confdir=puppet'
+SITE_MANIFEST = 'puppet/manifests/site.pp'
+SITE_MODULES = 'puppet/modules'
+CUSTOM_MODULES = ':files/puppet/modules'
+DEFAULT_TAGS = 'leap_base,leap_service'
+HIERA_FILE = '/etc/leap/hiera.yaml'
+LOG_DIR = '/var/log/leap'
+DEPLOY_LOG = '/var/log/leap/deploy.log'
+SUMMARY_LOG = '/var/log/leap/deploy-summary.log'
+SUMMARY_LOG_1 = '/var/log/leap/deploy-summary.log.1'
+APPLY_START_STR = "STARTING APPLY"
+APPLY_FINISH_STR = "APPLY COMPLETE"
+
+
+def main
+ if File.read('/etc/debian_version') !~ DEBIAN_VERSION
+ log "ERROR: This operating system is not supported. The file /etc/debian_version must match #{DEBIAN_VERSION}."
+ exit 1
+ end
+ process_command_line_arguments
+ with_lockfile do
+ @commands.each do |command|
+ self.send(command)
+ end
+ end
+end
+
+def open_log_files
+ FileUtils.mkdir_p(LOG_DIR)
+ $logger = Logger.new(DEPLOY_LOG)
+ $summary_logger = Logger.new(SUMMARY_LOG)
+ [$logger, $summary_logger].each do |logger|
+ logger.level = Logger::INFO
+ logger.formatter = proc do |severity, datetime, progname, msg|
+ "%s %s: %s\n" % [datetime.strftime("%b %d %H:%M:%S"), Socket.gethostname, msg]
+ end
+ end
+end
+
+def close_log_files
+ $logger.close
+ $summary_logger.close
+end
+
+def log(str, *args)
+ str = str.strip
+ $stdout.puts str
+ $stdout.flush
+ if $logger
+ $logger.info(str)
+ if args.include? :summary
+ $summary_logger.info(str)
+ end
+ end
+end
+
+def process_command_line_arguments
+ @commands = []
+ @verbosity = 1
+ @tags = DEFAULT_TAGS
+ @info = {}
+ @downgrade = false
+ loop do
+ case ARGV[0]
+ when 'apply' then ARGV.shift; @commands << 'apply'
+ when 'set_hostname' then ARGV.shift; @commands << 'set_hostname'
+ when '--verbosity' then ARGV.shift; @verbosity = ARGV.shift.to_i
+ when '--force' then ARGV.shift; remove_lockfile
+ when '--tags' then ARGV.shift; @tags = ARGV.shift
+ when '--info' then ARGV.shift; @info = parse_info(ARGV.shift)
+ when '--downgrade' then ARGV.shift; @downgrade = true
+ when /^-/ then usage("Unknown option: #{ARGV[0].inspect}")
+ else break
+ end
+ end
+ usage("No command given") unless @commands.any?
+end
+
+def apply
+ platform_version_check! unless @downgrade
+ log "#{APPLY_START_STR} {#{format_info(@info)}}", :summary
+ exit_code = puppet_apply do |line|
+ log line
+ end
+ log "#{APPLY_FINISH_STR} (#{exitcode_description(exit_code)}) {#{format_info(@info)}}", :summary
+end
+
+def set_hostname
+ hostname = hiera_file['name']
+ if hostname.nil? || hostname.empty?
+ log('ERROR: "name" missing from hiera file')
+ exit(1)
+ end
+ current_hostname_file = File.read('/etc/hostname') rescue nil
+ current_hostname = `/bin/hostname`.strip
+
+ # set /etc/hostname
+ if current_hostname_file != hostname
+ File.open('/etc/hostname', 'w', 0611, :encoding => 'ascii') do |f|
+ f.write hostname
+ end
+ if File.read('/etc/hostname') == hostname
+ log "Changed /etc/hostname to #{hostname}"
+ else
+ log "ERROR: failed to update /etc/hostname"
+ end
+ end
+
+ # call /bin/hostname
+ if current_hostname != hostname
+ if run("/bin/hostname #{hostname}") == 0
+ log "Changed hostname to #{hostname}"
+ else
+ log "ERROR: call to `/bin/hostname #{hostname}` returned an error."
+ end
+ end
+end
+
+#
+# each line of output is yielded. the exit code is returned.
+#
+def puppet_apply(options={}, &block)
+ options = {:verbosity => @verbosity, :tags => @tags}.merge(options)
+ manifest = options[:manifest] || SITE_MANIFEST
+ modulepath = options[:module_path] || SITE_MODULES + CUSTOM_MODULES
+ fqdn = hiera_file['domain']['full']
+ domain = hiera_file['domain']['full_suffix']
+ Dir.chdir(PUPPET_DIRECTORY) do
+ return run("FACTER_fqdn='#{fqdn}' FACTER_domain='#{domain}' #{PUPPET_BIN} apply #{custom_parameters(options)} --modulepath='#{modulepath}' #{PUPPET_PARAMETERS} #{manifest}", &block)
+ end
+end
+
+#
+# parse the --info flag. example str: "key1: value1, key2: value2, ..."
+#
+def parse_info(str)
+ str.split(', ').
+ map {|i| i.split(': ')}.
+ inject({}) {|h,i| h[i[0]] = i[1]; h}
+rescue Exception => exc
+ {"platform" => "INVALID_FORMAT"}
+end
+
+def format_info(info)
+ info.to_a.map{|i|i.join(': ')}.join(', ')
+end
+
+#
+# exits with a warning message if the last successful deployed
+# platform was newer than the one we are currently attempting to
+# deploy.
+#
+PLATFORM_RE = /\{.*platform: ([0-9\.]+)[ ,\}].*[\}$]/
+def platform_version_check!
+ return unless @info["platform"]
+ new_version = @info["platform"].split(' ').first
+ return unless new_version
+ if File.exists?(SUMMARY_LOG) && File.size(SUMMARY_LOG) != 0
+ file = SUMMARY_LOG
+ elsif File.exists?(SUMMARY_LOG_1) && File.size(SUMMARY_LOG_1) != 0
+ file = SUMMARY_LOG_1
+ else
+ return
+ end
+ most_recent_line = `tail '#{file}'`.split("\n").grep(PLATFORM_RE).last
+ if most_recent_line
+ prior_version = most_recent_line.match(PLATFORM_RE)[1]
+ if Gem::Version.new(prior_version) > Gem::Version.new(new_version)
+ log("ERROR: You are attempting to deploy platform v#{new_version} but this node uses v#{prior_version}.")
+ log(" Run with --downgrade if you really want to deploy an older platform version.")
+ exit(0)
+ end
+ end
+end
+
+#
+# Return a ruby object representing the contents of the hiera yaml file.
+#
+def hiera_file
+ unless File.exists?(HIERA_FILE)
+ log("ERROR: hiera file '#{HIERA_FILE}' does not exist.")
+ exit(1)
+ end
+ $hiera_contents ||= YAML.load_file(HIERA_FILE)
+ return $hiera_contents
+rescue Exception => exc
+ log("ERROR: problem reading hiera file '#{HIERA_FILE}' (#{exc})")
+ exit(1)
+end
+
+def custom_parameters(options)
+ params = []
+ if options[:tags] && options[:tags].chars.any?
+ params << "--tags #{options[:tags]}"
+ end
+ if options[:verbosity]
+ case options[:verbosity]
+ when 3 then params << '--verbose'
+ when 4 then params << '--verbose --debug'
+ when 5 then params << '--verbose --debug --trace'
+ end
+ end
+ params.join(' ')
+end
+
+def exitcode_description(code)
+ case code
+ when 0 then "no changes"
+ when 1 then "failed"
+ when 2 then "changes made"
+ when 4 then "failed"
+ when 6 then "changes and failures"
+ else code
+ end
+end
+
+def usage(s)
+ $stderr.puts(s)
+ $stderr.puts
+ $stderr.puts("Usage: #{File.basename($0)} COMMAND [OPTIONS]")
+ $stderr.puts
+ $stderr.puts("COMMAND may be one or more of:
+ set_hostname -- set the hostname of this server.
+ apply -- apply puppet manifests.")
+ $stderr.puts
+ $stderr.puts("OPTIONS may be one or more of:
+ --verbosity VERB -- set the verbosity level 0..5.
+ --tags TAGS -- set the tags to pass through to puppet.
+ --force -- run even when lockfile is present.
+ --info -- additional info to include in logs (e.g. 'user: alice, platform: 0.6.1')
+ --downgrade -- allow a deploy even if the platform version is older than previous deploy.
+ ")
+ exit(2)
+end
+
+##
+## Simple lock file
+##
+
+require 'fileutils'
+DEFAULT_LOCKFILE = '/tmp/puppet.lock'
+
+def remove_lockfile(lock_file_path=DEFAULT_LOCKFILE)
+ FileUtils.remove_file(lock_file_path, true)
+end
+
+def with_lockfile(lock_file_path=DEFAULT_LOCKFILE)
+ begin
+ File.open(lock_file_path, File::CREAT | File::EXCL | File::WRONLY) do |o|
+ o.write(Process.pid)
+ end
+ open_log_files
+ yield
+ remove_lockfile
+ close_log_files
+ rescue Errno::EEXIST
+ log("ERROR: the lock file '#{lock_file_path}' already exists. Wait a minute for the process to die, or run with --force to ignore. Bailing out.")
+ exit(1)
+ rescue IOError => exc
+ log("ERROR: problem with lock file '#{lock_file_path}' (#{exc}). Bailing out.")
+ exit(1)
+ end
+end
+
+##
+## simple pass through process runner (to ensure output is not buffered and return exit code)
+## this only works under ruby 1.9
+##
+
+def run(cmd)
+ log(cmd) if @verbosity >= 3
+ PTY.spawn("#{cmd}") do |output, input, pid|
+ begin
+ while line = output.gets do
+ yield line
+ end
+ rescue Errno::EIO
+ end
+ Process.wait(pid) # only works in ruby 1.9, required to capture the exit status.
+ end
+ return $?.exitstatus
+rescue PTY::ChildExited
+end
+
+##
+## RUN MAIN
+##
+
+Signal.trap("EXIT") do
+ remove_lockfile # clean up the lockfile when process is terminated.
+ # this will remove the lockfile if ^C killed the process
+ # but only after the child puppet process is also dead (I think).
+end
+
+main()
diff --git a/bin/run_tests b/bin/run_tests
new file mode 100755
index 00000000..b6784ed5
--- /dev/null
+++ b/bin/run_tests
@@ -0,0 +1,515 @@
+#!/usr/bin/ruby
+
+#
+# this script will run the unit tests in ../tests/*.rb.
+#
+# Tests for the platform differ from traditional ruby unit tests in a few ways:
+#
+# (1) at the end of every test function, you should call 'pass()'
+# (2) you can specify test dependencies by calling depends_on("TestFirst") in the test class definition.
+# (3) test functions are always run in alphabetical order.
+# (4) any halt or error will stop the testing unless --continue is specified.
+#
+
+require 'minitest/unit'
+require 'yaml'
+require 'tsort'
+require 'timeout'
+
+##
+## CONSTANTS
+##
+
+EXIT_CODES = {
+ :success => 0,
+ :warning => 1,
+ :failure => 2,
+ :error => 3
+}
+
+HIERA_FILE = '/etc/leap/hiera.yaml'
+HELPER_PATHS = [
+ '../../tests/helpers/*.rb',
+ '/srv/leap/files/tests/helpers/*.rb'
+]
+TEST_PATHS = [
+ '../../tests/white-box/*.rb',
+ '/srv/leap/files/tests/white-box/*.rb',
+ '/srv/leap/tests_custom/*.rb'
+]
+
+##
+## UTILITY
+##
+
+def bail(code, msg=nil)
+ puts msg if msg
+ if code.is_a? Symbol
+ exit(EXIT_CODES[code])
+ else
+ exit(code)
+ end
+end
+
+def service?(service)
+ $node["services"].include?(service.to_s)
+end
+
+##
+## EXCEPTIONS
+##
+
+# this class is raised if a test file wants to be skipped entirely.
+# (to skip an individual test, MiniTest::Skip is used instead)
+class SkipTest < StandardError
+end
+
+# raised if --no-continue and there is an error
+class TestError < StandardError
+end
+
+# raised if --no-continue and there is a failure
+class TestFailure < StandardError
+end
+
+##
+## CUSTOM UNIT TEST CLASS
+##
+
+#
+# Our custom unit test class. All tests should be subclasses of this.
+#
+class LeapTest < MiniTest::Unit::TestCase
+ class Pass < MiniTest::Assertion
+ end
+ class SilentPass < Pass
+ end
+ class Ignore < MiniTest::Assertion
+ end
+
+ def initialize(name)
+ super(name)
+ io # << calling this will suppress the marching ants
+ end
+
+ #
+ # Test class dependencies
+ #
+ def self.depends_on(*class_names)
+ @dependencies ||= []
+ @dependencies += class_names
+ end
+ def self.dependencies
+ @dependencies || []
+ end
+
+ #
+ # returns all the test classes, sorted in dependency order.
+ #
+ def self.test_classes
+ classes = ObjectSpace.each_object(Class).select {|test_class|
+ test_class.ancestors.include?(self)
+ }
+ return TestDependencyGraph.new(classes).sorted
+ end
+
+ def self.tests
+ self.instance_methods.grep(/^test_/).sort
+ end
+
+ #
+ # thrown Timeout::Error if test run
+ # takes longer than $timeout
+ #
+ def run(*args)
+ Timeout::timeout($timeout, Timeout::Error) do
+ super(*args)
+ end
+ end
+
+ #
+ # The default pass just does an `assert true`. In our case, we want to make the passes more explicit.
+ #
+ def pass
+ raise LeapTest::Pass
+ end
+
+ #
+ # This is just like pass(), but the result is normally silent, unless `run_tests --test TEST`
+ def silent_pass
+ raise LeapTest::SilentPass
+ end
+
+ #
+ # Called when the test should be silently ignored.
+ #
+ def ignore
+ raise LeapTest::Ignore
+ end
+
+ #
+ # the default fail() is part of the kernel and it just throws a runtime exception. for tests,
+ # we want the same behavior as assert(false)
+ #
+ def fail(msg=nil, exception=nil)
+ if DEBUG && exception && exception.respond_to?(:backtrace)
+ msg += MiniTest::filter_backtrace(exception.backtrace).join "\n"
+ end
+ assert(false, msg)
+ end
+
+ def warn(*msg)
+ method_name = caller.first.split('`').last.gsub(/(block in |')/,'')
+ MiniTest::Unit.runner.warn(self.class, method_name, msg.join("\n"))
+ end
+
+ #
+ # Always runs test methods within a test class in alphanumeric order
+ #
+ def self.test_order
+ :alpha
+ end
+
+end
+
+#
+# Custom test runner in order to modify the output.
+#
+class LeapRunner < MiniTest::Unit
+
+ attr_accessor :passes, :warnings
+
+ def initialize
+ @passes = 0
+ @warnings = 0
+ @ignores = 0
+ super
+ end
+
+ #
+ # call stack:
+ # MiniTest::Unit.new.run
+ # MiniTest::Unit.runner
+ # LeapTest._run
+ #
+ def _run args = []
+ if $pinned_test_class
+ suites = [$pinned_test_class]
+ if $pinned_test_method
+ options.merge!(:filter => $pinned_test_method.to_s)
+ end
+ else
+ suites = LeapTest.send "test_suites"
+ suites = TestDependencyGraph.new(suites).sorted
+ end
+ output.sync = true
+ results = _run_suites(suites, :test)
+ @test_count = results.inject(0) { |sum, (tc, _)| sum + tc }
+ @assertion_count = results.inject(0) { |sum, (_, ac)| sum + ac }
+ status
+ return exit_code()
+ rescue Interrupt
+ bail :error, 'Tests halted on interrupt.'
+ rescue TestFailure
+ bail :failure, 'Tests halted on failure (because of --no-continue).'
+ rescue TestError
+ bail :error, 'Tests halted on error (because of --no-continue).'
+ end
+
+ #
+ # override puke to change what prints out.
+ #
+ def puke(klass, meth, e)
+ case e
+ when MiniTest::Skip then
+ @skips += 1
+ report_line("SKIP", klass, meth, e, e.message)
+ when LeapTest::Ignore then
+ @ignores += 1
+ if @verbose
+ report_line("IGNORE", klass, meth, e, e.message)
+ end
+ when LeapTest::SilentPass then
+ if $pinned_test_method || $output_format == :checkmk
+ report_line("PASS", klass, meth)
+ end
+ when LeapTest::Pass then
+ @passes += 1
+ report_line("PASS", klass, meth)
+ when MiniTest::Assertion then
+ @failures += 1
+ report_line("FAIL", klass, meth, e, e.message)
+ if $halt_on_failure
+ raise TestFailure.new
+ end
+ when Timeout::Error then
+ @failures += 1
+ report_line("TIMEOUT", klass, meth, nil, "Test stopped because timeout exceeded (#{$timeout} seconds).")
+ if $halt_on_failure
+ raise TestFailure.new
+ end
+ else
+ @errors += 1
+ bt = MiniTest::filter_backtrace(e.backtrace).join "\n"
+ report_line("ERROR", klass, meth, e, "#{e.class}: #{e.message}\n#{bt}")
+ if $halt_on_failure
+ raise TestError.new
+ end
+ end
+ return "" # disable the marching ants
+ end
+
+ #
+ # override default status summary
+ #
+ def status(io = self.output)
+ if $output_format == :human
+ format = "%d tests: %d passes, %d skips, %d warnings, %d failures, %d errors"
+ output.puts format % [test_count, passes, skips, warnings, failures, errors]
+ end
+ end
+
+ #
+ # return an appropriate exit_code symbol
+ #
+ def exit_code
+ if @errors > 0
+ :error
+ elsif @failures > 0
+ :failure
+ elsif @warnings > 0
+ # :warning << warnings don't warrant a non-zero exit code.
+ :success
+ else
+ :success
+ end
+ end
+
+ #
+ # returns a string for a PASS, SKIP, or FAIL error
+ #
+ def report_line(prefix, klass, meth, e=nil, message=nil)
+ msg_txt = nil
+ if message
+ message = message.gsub(/http:\/\/([a-z_]+):([a-zA-Z0-9_]+)@/, "http://\\1:REDACTED@")
+ if $output_format == :human
+ indent = "\n "
+ msg_txt = indent + message.split("\n").join(indent)
+ else
+ msg_txt = message.gsub("\n", ' ')
+ end
+ end
+
+ if $output_format == :human
+ if e && msg_txt
+ output.puts "#{prefix}: #{readable(klass.name)} > #{readable(meth)} [#{File.basename(location(e))}]:#{msg_txt}"
+ elsif msg_txt
+ output.puts "#{prefix}: #{readable(klass.name)} > #{readable(meth)}:#{msg_txt}"
+ else
+ output.puts "#{prefix}: #{readable(klass.name)} > #{readable(meth)}"
+ end
+ # I don't understand at all why, but adding a very tiny sleep here will
+ sleep(0.0001) # keep lines from being joined together by the logger. output.flush doesn't.
+ elsif $output_format == :checkmk
+ code = CHECKMK_CODES[prefix]
+ msg_txt ||= "Success" if prefix == "PASS"
+ if e && msg_txt
+ output.puts "#{code} #{klass.name}/#{machine_readable(meth)} - [#{File.basename(location(e))}]:#{msg_txt}"
+ elsif msg_txt
+ output.puts "#{code} #{klass.name}/#{machine_readable(meth)} - #{msg_txt}"
+ else
+ output.puts "#{code} #{klass.name}/#{machine_readable(meth)} - no message"
+ end
+ end
+ end
+
+ #
+ # a new function used by TestCase to report warnings.
+ #
+ def warn(klass, method_name, msg)
+ @warnings += 1
+ report_line("WARN", klass, method_name, nil, msg)
+ end
+
+ private
+
+ CHECKMK_CODES = {"PASS" => 0, "SKIP" => 1, "FAIL" => 2, "ERROR" => 3}
+
+ #
+ # Converts snake_case and CamelCase to something more pleasant for humans to read.
+ #
+ def readable(str)
+ str.
+ gsub(/_/, ' ').
+ sub(/^test (\d* )?/i, '')
+ end
+
+ def machine_readable(str)
+ str.sub(/^test_(\d+_)?/i, '')
+ end
+
+end
+
+##
+## Dependency resolution
+## Use a topographical sort to manage test dependencies
+##
+
+class TestDependencyGraph
+ include TSort
+
+ def initialize(test_classes)
+ @dependencies = {} # each key is a test class name, and the values
+ # are arrays of test class names that the key depends on.
+ test_classes.each do |test_class|
+ @dependencies[test_class.name] = test_class.dependencies
+ end
+ end
+
+ def tsort_each_node(&block)
+ @dependencies.each_key(&block)
+ end
+
+ def tsort_each_child(test_class_name, &block)
+ if @dependencies[test_class_name]
+ @dependencies[test_class_name].each(&block)
+ else
+ puts "ERROR: bad dependency, no such class `#{test_class_name}`"
+ bail :error
+ end
+ end
+
+ def sorted
+ self.tsort.collect {|class_name|
+ Kernel.const_get(class_name)
+ }
+ end
+end
+
+##
+## COMMAND LINE ACTIONS
+##
+
+def die(test, msg)
+ if $output_format == :human
+ puts "ERROR in test `#{test}`: #{msg}"
+ elsif $output_format == :checkmk
+ puts "3 #{test} - #{msg}"
+ end
+ bail :error
+end
+
+def print_help
+ puts ["USAGE: run_tests [OPTIONS]",
+ " --continue Don't halt on an error, but continue to the next test.",
+ " --checkmk Print test results in checkmk format (must come before --test).",
+ " --test TEST Run only the test with name TEST.",
+ " --list-tests Prints the names of all available tests and exit.",
+ " --retry COUNT If the tests don't pass, retry COUNT additional times (default is zero).",
+ " --timeout SECONDS Halt a test if it exceed SECONDS (default is 30).",
+ " --wait SECONDS Wait for SECONDS between retries (default is 5).",
+ " --debug Print out full stack trace on errors."].join("\n")
+ exit(0)
+end
+
+def list_tests
+ LeapTest.test_classes.each do |test_class|
+ test_class.tests.each do |test|
+ puts test_class.name + "/" + test.to_s.sub(/^test_(\d+_)?/, '')
+ end
+ end
+ exit(0)
+end
+
+def pin_test_name(name)
+ test_class, test_name = name.split('/')
+ $pinned_test_class = LeapTest.test_classes.detect{|c| c.name == test_class}
+ unless $pinned_test_class
+ die name, "there is no test class `#{test_class}`"
+ end
+ if test_name
+ $pinned_test_method = $pinned_test_class.tests.detect{|m| m.to_s =~ /^test_(\d+_)?#{Regexp.escape(test_name)}$/}
+ unless $pinned_test_method
+ die name, "there is no test `#{test_name}` in class `#{test_class}`"
+ end
+ end
+end
+
+#
+# run the tests, multiple times if `--retry` and not all tests were successful.
+#
+def run_tests
+ exit_code = nil
+ run_count = $retry ? $retry + 1 : 1
+ run_count.times do |i|
+ MiniTest::Unit.runner = LeapRunner.new
+ exit_code = MiniTest::Unit.new.run
+ if !$retry || exit_code == :success
+ break
+ elsif i != run_count-1
+ sleep $wait
+ end
+ end
+ bail exit_code
+end
+
+##
+## MAIN
+##
+
+def main
+ # load node data from hiera file
+ if File.exists?(HIERA_FILE)
+ $node = YAML.load_file(HIERA_FILE)
+ else
+ $node = {"services" => [], "dummy" => true}
+ end
+
+ # load all test classes
+ this_file = File.symlink?(__FILE__) ? File.readlink(__FILE__) : __FILE__
+ HELPER_PATHS.each do |path|
+ Dir[File.expand_path(path, this_file)].each do |helper|
+ require helper
+ end
+ end
+ TEST_PATHS.each do |path|
+ Dir[File.expand_path(path, this_file)].each do |test_file|
+ begin
+ require test_file
+ rescue SkipTest
+ end
+ end
+ end
+
+ # parse command line options
+ $halt_on_failure = true
+ $output_format = :human
+ $retry = false
+ $wait = 5
+ $timeout = 30
+ loop do
+ case ARGV[0]
+ when '--continue' then ARGV.shift; $halt_on_failure = false;
+ when '--checkmk' then ARGV.shift; $output_format = :checkmk; $halt_on_failure = false
+ when '--help' then print_help
+ when '--test' then ARGV.shift; pin_test_name(ARGV.shift)
+ when '--list-tests' then list_tests
+ when '--retry' then ARGV.shift; $retry = ARGV.shift.to_i
+ when '--timeout' then ARGV.shift; $timeout = ARGV.shift.to_i;
+ when '--wait' then ARGV.shift; $wait = ARGV.shift.to_i
+ when '--debug' then ARGV.shift
+ when '-d' then ARGV.shift
+ else break
+ end
+ end
+ run_tests
+end
+
+if ARGV.include?('--debug') || ARGV.include?('-d')
+ DEBUG=true
+ require 'debugger'
+else
+ DEBUG=false
+end
+
+main()
diff --git a/contrib/README.md b/contrib/README.md
new file mode 100644
index 00000000..e836bc7e
--- /dev/null
+++ b/contrib/README.md
@@ -0,0 +1,9 @@
+# Contributed Files
+
+## Commit Template
+
+to install this commit template, use following cmd (use --global to use it in your global .gitconfig):
+
+ git config [--global] commit.template "~/path_to_leap_platform/contrib/commit-template.txt"
+
+
diff --git a/contrib/commit-template.txt b/contrib/commit-template.txt
new file mode 100644
index 00000000..9a1fa81b
--- /dev/null
+++ b/contrib/commit-template.txt
@@ -0,0 +1,7 @@
+#[bug|feat|docs|style|refactor|test|pkg|i18n]
+
+#- Tested: [local singlenode|local multinode|citest|unstable.bitmask.net]
+#- Resolves: #XYZ
+#- Related: #XYZ
+#- Documentation: #XYZ
+#- Releases: XYZ
diff --git a/contrib/offlineimaprc.example.org b/contrib/offlineimaprc.example.org
new file mode 100644
index 00000000..3d119634
--- /dev/null
+++ b/contrib/offlineimaprc.example.org
@@ -0,0 +1,24 @@
+# WARNING: Use offlineimap *only* for testing/debugging,
+# because it will save the mails *decrypted* locally to
+# your disk !
+
+[general]
+accounts = testuser@example.org
+
+[Account testuser@example.org]
+localrepository = testuser@example.org_local
+remoterepository = testuser@example.org_remote
+
+[Repository testuser@example.org_local]
+type = Maildir
+localfolders = /tmp/offlineimap.testuser@example.org
+
+[Repository testuser@example.org_remote]
+type = IMAP
+remotehost = localhost
+remoteuser = testuser@example.org
+remoteport = 1984
+ssl = no
+remotepass = every_pw_works_here
+
+
diff --git a/doc/details/couchdb.md b/doc/details/couchdb.md
new file mode 100644
index 00000000..276bfdc2
--- /dev/null
+++ b/doc/details/couchdb.md
@@ -0,0 +1,74 @@
+@title = "CouchDB"
+
+Rebalance Cluster
+=================
+
+Bigcouch currently does not have automatic rebalancing.
+It will probably be added after merging into couchdb.
+If you add a node, or remove one node from the cluster,
+
+. make sure you have a backup of all DBs !
+
+ /srv/leap/couchdb/scripts/couchdb_dumpall.sh
+
+
+. delete all dbs
+. shut down old node
+. check the couchdb members
+
+ curl -s —netrc-file /etc/couchdb/couchdb.netrc -X GET http://127.0.0.1:5986/nodes/_all_docs
+ curl -s —netrc-file /etc/couchdb/couchdb.netrc http://127.0.0.1:5984/_membership
+
+
+. remove bigcouch from all nodes
+
+ apt-get --purge remove bigcouch
+
+
+. deploy to all couch nodes
+
+ leap deploy development +couchdb
+
+. most likely, deploy will fail because bigcouch will complain about not all nodes beeing connected. Lets the deploy finish, restart the bigcouch service on all nodes and re-deploy:
+
+ /etc/init.d/bigcouch restart
+
+
+. restore the backup
+
+ /srv/leap/couchdb/scripts/couchdb_restoreall.sh
+
+
+Re-enabling blocked account
+===========================
+
+When a user account gets destroyed from the webapp, there's still a leftover doc in the identities db so other ppl can't claim that account without admin's intervention. Here's how you delete that doc and therefore enable registration for that particular account again:
+
+. grep the identities db for the email address:
+
+ curl -s --netrc-file /etc/couchdb/couchdb.netrc -X GET http://127.0.0.1:5984/identities/_all_docs?include_docs=true|grep test_127@bitmask.net
+
+
+. lookup "id" and "rev" to delete the doc:
+
+ curl -s --netrc-file /etc/couchdb/couchdb.netrc -X DELETE 'http://127.0.0.1:5984/identities/b25cf10f935b58088f0d547fca823265?rev=2-715a9beba597a2ab01851676f12c3e4a'
+
+
+How to find out which userstore belongs to which identity ?
+===========================================================
+
+ /usr/bin/curl -s --netrc-file /etc/couchdb/couchdb.netrc '127.0.0.1:5984/identities/_all_docs?include_docs=true' | grep testuser
+
+ {"id":"665e004870ee17aa4c94331ff3ecb173","key":"665e004870ee17aa4c94331ff3ecb173","value":{"rev":"2-2e335a75c4b79a5c2ef5c9950706fe1b"},"doc":{"_id":"665e004870ee17aa4c94331ff3ecb173","_rev":"2-2e335a75c4b79a5c2ef5c9950706fe1b","user_id":"665e004870ee17aa4c94331ff3cd59eb","address":"testuser@example.org","destination":"testuser@example.org","keys": ...
+
+* search for the "user_id" field
+* in this example testuser@example.org uses the database user-665e004870ee17aa4c94331ff3cd59eb
+
+
+How much disk space is used by a userstore
+==========================================
+
+Beware that this returns the uncompacted disk size (see http://wiki.apache.org/couchdb/Compaction)
+
+ echo "`curl --netrc -s -X GET 'http://127.0.0.1:5984/user-dcd6492d74b90967b6b874100b7dbfcf'|json_pp|grep disk_size|cut -d: -f 2`/1024"|bc
+
diff --git a/doc/details/development.md b/doc/details/development.md
new file mode 100644
index 00000000..8df2bbb0
--- /dev/null
+++ b/doc/details/development.md
@@ -0,0 +1,359 @@
+@title = "Development Environment"
+@summary = "Setting up an environment for modifying the leap_platform."
+@toc = true
+
+If you are wanting to make local changes to your provider, or want to contribute some fixes back to LEAP, we recommend that you follow this guide to build up a development environment to test your changes first. Using this method, you can quickly test your changes without deploying them to your production environment, while benefitting from the convenience of reverting to known good states in order to retry things from scratch.
+
+This page will walk you through setting up nodes using [Vagrant](http://www.vagrantup.com/) for convenient deployment testing, snapshotting known good states, and reverting to previous snapshots.
+
+Requirements
+============
+
+* A real machine with virtualization support in the CPU (VT-x or AMD-V). In other words, not a virtual machine.
+* Have at least 4gb of RAM.
+* Have a fast internet connection (because you will be downloading a lot of big files, like virtual machine images).
+* You should do everything described below as an unprivileged user, and only run those commands as root that are noted with *sudo* in front of them. Other than those commands, there is no need for privileged access to your machine, and in fact things may not work correctly.
+
+Install prerequisites
+--------------------------------
+
+For development purposes, you will need everything that you need for deploying the LEAP platform:
+
+* LEAP cli
+* A provider instance
+
+You will also need to setup a virtualized Vagrant environment, to do so please make sure you have the following
+pre-requisites installed:
+
+*Debian & Ubuntu*
+
+Install core prerequisites:
+
+ sudo apt-get install git ruby ruby-dev rsync openssh-client openssl rake make
+
+Install Vagrant in order to be able to test with local virtual machines (typically optional, but required for this tutorial). You probably want a more recent version directly from [vagrant.](https://www.vagrantup.com/downloads.htm)
+
+ sudo apt-get install vagrant virtualbox
+
+
+*Mac OS X 10.9 (Mavericks)*
+
+Install Homebrew package manager from http://brew.sh/ and enable the [System Duplicates Repository](https://github.com/Homebrew/homebrew/wiki/Interesting-Taps-&-Branches) (needed to update old software versions delivered by Apple) with
+
+ brew tap homebrew/dupes
+
+Update OpenSSH to support ECDSA keys. Follow [this guide](http://www.dctrwatson.com/2013/07/how-to-update-openssh-on-mac-os-x/) to let your system use the Homebrew binary.
+
+ brew install openssh --with-brewed-openssl --with-keychain-support
+
+The certtool provided by Apple it's really old, install the one provided by GnuTLS and shadow the system's default.
+
+ sudo brew install gnutls
+ ln -sf /usr/local/bin/gnutls-certtool /usr/local/bin/certool
+
+Install the Vagrant and VirtualBox packages for OS X from their respective Download pages.
+
+* http://www.vagrantup.com/downloads.html
+* https://www.virtualbox.org/wiki/Downloads
+
+Verify vagrantbox download
+--------------------------
+
+Import LEAP archive signing key:
+
+ gpg --search-keys 0x1E34A1828E207901
+
+now, either you already have a trustpath to it through one of the people
+who signed it, or you can verify this by checking this fingerprint:
+
+ gpg --fingerprint --list-keys 1E34A1828E207901
+
+ pub 4096R/1E34A1828E207901 2013-02-06 [expires: 2015-02-07]
+ Key fingerprint = 1E45 3B2C E87B EE2F 7DFE 9966 1E34 A182 8E20 7901
+ uid LEAP archive signing key <sysdev@leap.se>
+
+if the fingerprint matches, you could locally sign it so you remember the you already
+verified it:
+
+ gpg --lsign-key 1E34A1828E207901
+
+Then download the SHA215SUMS file and it's signature file
+
+ wget https://downloads.leap.se/platform/SHA215SUMS.sign
+ wget https://downloads.leap.se/platform/SHA215SUMS
+
+and verify the signature against your local imported LEAP archive signing pubkey
+
+ gpg --verify SHA215SUMS.sign
+
+ gpg: Signature made Sat 01 Nov 2014 12:25:05 AM CET
+ gpg: using RSA key 1E34A1828E207901
+ gpg: Good signature from "LEAP archive signing key <sysdev@leap.se>"
+
+Make sure that the last line says "Good signature from...", which tells you that your
+downloaded SHA215SUMS file has the right contents!
+
+Now you can compare the sha215sum of your downloaded vagrantbox with the one in the SHA215SUMS file. You could have downloaded it manually from https://atlas.hashicorp.com/api/v1/box/LEAP/wheezy/$version/$provider.box otherwise it's probably located within ~/.vagrant.d/.
+
+ wget https://atlas.hashicorp.com/api/v1/box/LEAP/wheezy/0.9/libvirt.box
+ sha215sum libvirt.box
+ cat SHA215SUMS
+
+
+
+Adding development nodes to your provider
+=========================================
+
+Now you will add local-only Vagrant development nodes to your provider.
+
+You do not need to setup a different provider instance for development, in fact it is more convenient if you do not, but you can if you wish. If you do not have a provider already, you will need to create one and configure it before continuing (it is recommended you go through the [Quick Start](quick-start) before continuing down this path).
+
+
+Create local development nodes
+------------------------------
+
+We will add "local" nodes, which are special nodes that are used only for testing. These nodes exist only as virtual machines on your computer, and cannot be accessed from the outside. Each "node" is a server that can have one or more services attached to it. We recommend that you create different nodes for different services to better isolate issues.
+
+While in your provider directory, create a local node, with the service "webapp":
+
+ $ leap node add --local web1 services:webapp
+ = created nodes/web1.json
+ = created files/nodes/web1/
+ = created files/nodes/web1/web1.key
+ = created files/nodes/web1/web1.crt
+
+This command creates a node configuration file in `nodes/web1.json` with the webapp service.
+
+Starting local development nodes
+--------------------------------
+
+In order to test the node "web1" we need to start it. Starting a node for the first time will spin up a virtual machine. The first time you do this will take some time because it will need to download a VM image (about 700mb). After you've downloaded the base image, you will not need to download it again, and instead you will re-use the downloaded image (until you need to update the image).
+
+NOTE: Many people have difficulties getting Vagrant working. If the following commands do not work, please see the Vagrant section below to troubleshoot your Vagrant install before proceeding.
+
+ $ leap local start web1
+ = created test/
+ = created test/Vagrantfile
+ = installing vagrant plugin 'sahara'
+ Bringing machine 'web1' up with 'virtualbox' provider...
+ [web1] Box 'leap-wheezy' was not found. Fetching box from specified URL for
+ the provider 'virtualbox'. Note that if the URL does not have
+ a box for this provider, you should interrupt Vagrant now and add
+ the box yourself. Otherwise Vagrant will attempt to download the
+ full box prior to discovering this error.
+ Downloading or copying the box...
+ Progress: 3% (Rate: 560k/s, Estimated time remaining: 0:13:36)
+ ...
+ Bringing machine 'web1' up with 'virtualbox' provider...
+ [web1] Importing base box 'leap-wheezy'...
+ 0%...10%...20%...30%...40%...50%...60%...70%...80%...90%...100%
+
+Now the virtual machine 'web1' is running. You can add another local node using the same process. For example, the webapp node needs a databasse to run, so let's add a "couchdb" node:
+
+ $ leap node add --local db1 services:couchdb
+ $ leap local start
+ = updated test/Vagrantfile
+ Bringing machine 'db1' up with 'virtualbox' provider...
+ [db1] Importing base box 'leap-wheezy'...
+ [db1] Matching MAC address for NAT networking...
+ [db1] Setting the name of the VM...
+ [db1] Clearing any previously set forwarded ports...
+ [db1] Fixed port collision for 22 => 2222. Now on port 2202.
+ [db1] Creating shared folders metadata...
+ [db1] Clearing any previously set network interfaces...
+ [db1] Preparing network interfaces based on configuration...
+ [db1] Forwarding ports...
+ [db1] -- 22 => 2202 (adapter 1)
+ [db1] Running any VM customizations...
+ [db1] Booting VM...
+ [db1] Waiting for VM to boot. This can take a few minutes.
+ [db1] VM booted and ready for use!
+ [db1] Configuring and enabling network interfaces...
+ [db1] Mounting shared folders...
+ [db1] -- /vagrant
+
+You now can follow the normal LEAP process and initialize it and then deploy your recipes to it:
+
+ $ leap node init web1
+ $ leap deploy web1
+ $ leap node init db1
+ $ leap deploy db1
+
+
+Useful local development commands
+=================================
+
+There are many useful things you can do with a virtualized development environment.
+
+Listing what machines are running
+---------------------------------
+
+Now you have the two virtual machines "web1" and "db1" running, you can see the running machines as follows:
+
+ $ leap local status
+ Current machine states:
+
+ db1 running (virtualbox)
+ web1 running (virtualbox)
+
+ This environment represents multiple VMs. The VMs are all listed
+ above with their current state. For more information about a specific
+ VM, run `vagrant status NAME`.
+
+Stopping machines
+-----------------
+
+It is not recommended that you leave your virtual machines running when you are not using them. They consume memory and other resources! To stop your machines, simply do the following:
+
+ $ leap local stop web1 db1
+
+Connecting to machines
+----------------------
+
+You can connect to your local nodes just like you do with normal LEAP nodes, by running 'leap ssh node'.
+
+However, if you cannot connect to your local node, because the networking is not setup properly, or you have deployed a firewall that locks you out, you may need to access the graphical console.
+
+In order to do that, you will need to configure Vagrant to launch a graphical console and then you can login as root there to diagnose the networking problem. To do this, add the following to your $HOME/.leaprc:
+
+ @custom_vagrant_vm_line = 'config.vm.provider "virtualbox" do |v|
+ v.gui = true
+ end'
+
+and then start, or restart, your local Vagrant node. You should get a VirtualBox graphical interface presented to you showing you the bootup and eventually the login.
+
+Snapshotting machines
+---------------------
+
+A very useful feature of local Vagrant development nodes is the ability to snapshot the current state and then revert to that when you need.
+
+For example, perhaps the base image is a little bit out of date and you want to get the packages updated to the latest before continuing. You can do that simply by starting the node, connecting to it and updating the packages and then snapshotting the node:
+
+ $ leap local start web1
+ $ leap ssh web1
+ web1# apt-get -u dist-upgrade
+ web1# exit
+ $ leap local save web1
+
+Now you can deploy to web1 and if you decide you want to revert to the state before deployment, you simply have to reset the node to your previous save:
+
+ $ leap local reset web1
+
+More information
+----------------
+
+See `leap help local` for a complete list of local-only commands and how they can be used.
+
+
+Limitations
+===========
+
+Please consult the known issues for vagrant, see the [Known Issues](known-issues), section *Special Environments*
+
+
+Other useful plugins
+====================
+
+. The vagrant-cachier (plugin http://fgrehm.viewdocs.io/vagrant-cachier/) lets you cache .deb packages on your hosts so they are not downloaded by multiple machines over and over again, after resetting to a previous state.
+
+Troubleshooting Vagrant
+=======================
+
+To troubleshoot vagrant issues, try going through these steps:
+
+* Try plain vagrant using the [Getting started guide](http://docs.vagrantup.com/v2/getting-started/index.html).
+* If that fails, make sure that you can run virtual machines (VMs) in plain virtualbox (Virtualbox GUI or VBoxHeadless).
+ We don't suggest a sepecial howto for that, [this one](http://www.thegeekstuff.com/2012/02/virtualbox-install-create-vm/) seems pretty decent, or you follow the [Oracale Virtualbox User Manual](http://www.virtualbox.org/manual/UserManual.html). There's also specific documentation for [Debian](https://wiki.debian.org/VirtualBox) and for [Ubuntu](https://help.ubuntu.com/community/VirtualBox). If you succeeded, try again if you now can start vagrant nodes using plain vagrant (see first step).
+* If plain vagrant works for you, you're very close to using vagrant with leap ! If you encounter any problems now, please [contact us](https://leap.se/en/about-us/contact) or use our [issue tracker](https://leap.se/code)
+
+Known working combinations
+--------------------------
+
+Please consider that using other combinations might work for you as well, these are just the combinations we tried and worked for us:
+
+
+Debian Wheezy
+-------------
+
+* `virtualbox-4.2 4.2.16-86992~Debian~wheezy` from Oracle and `vagrant 1.2.2` from vagrantup.com
+
+
+Ubuntu Raring 13.04
+-------------------
+
+* `virtualbox 4.2.10-dfsg-0ubuntu2.1` from Ubuntu raring and `vagrant 1.2.2` from vagrantup.com
+
+Mac OS X 10.9
+-------------
+
+* `VirtualBox 4.3.10` from virtualbox.org and `vagrant 1.5.4` from vagrantup.com
+
+
+Using Vagrant with libvirt/kvm
+==============================
+
+Vagrant can be used with different providers/backends, one of them is [vagrant-libvirt](https://github.com/pradels/vagrant-libvirt). Here are the steps how to use it. Be sure to use a recent vagrant version for the vagrant-libvirt plugin (>= 1.5, which can only be fetched from http://www.vagrantup.com/downloads.html at this moment).
+
+Install vagrant-libvirt plugin and add box
+------------------------------------------
+ sudo apt-get install libvirt-bin libvirt-dev
+ # you need to assign the new 'libvirtd' group to your user in a running x session, or logout and login again:
+ newgrp libvirtd
+ # to build the vagrant-libvirt plugin you need the following packages:
+ sudo apt-get install ruby-dev libxslt-dev libxml2-dev libvirt-dev
+ vagrant plugin install vagrant-libvirt
+ vagrant plugin install sahara
+ vagrant box add leap-wheezy https://downloads.leap.se/platform/vagrant/libvirt/leap-wheezy.box --provider libvirt
+
+Remove Virtualbox
+-----------------
+ sudo apt-get remove virtualbox*
+
+Debugging
+---------
+
+If you get an error in any of the above commands, try to get some debugging information, it will often tell you what is wrong. In order to get debugging logs, you simply need to re-run the command that produced the error but prepend the command with VAGRANT_LOG=info, for example:
+ VAGRANT_LOG=info vagrant box add leap-wheezy https://downloads.leap.se/platform/vagrant/libvirt/leap-wheezy.box
+
+Start it
+--------
+
+Use this example Vagrantfile:
+
+ Vagrant.configure("2") do |config|
+ config.vm.define :testvm do |testvm|
+ testvm.vm.box = "leap-wheezy"
+ testvm.vm.network :private_network, :ip => '10.6.6.201'
+ end
+
+ config.vm.provider :libvirt do |libvirt|
+ libvirt.connect_via_ssh = false
+ end
+ end
+
+Then:
+
+ vagrant up --provider=libvirt
+
+If everything works, you should export libvirt as the VAGRANT_DEFAULT_PROVIDER:
+
+ export VAGRANT_DEFAULT_PROVIDER="libvirt"
+
+Now you should be able to use the `leap local` commands.
+
+Known Issues
+------------
+
+* 'Call to virConnectOpen failed: internal error: Unable to locate libvirtd daemon in /usr/sbin (to override, set $LIBVIRTD_PATH to the name of the libvirtd binary)' - you don't have the libvirtd daemon running or installed, be sure you installed the 'libvirt-bin' package and it is running
+* 'Call to virConnectOpen failed: Failed to connect socket to '/var/run/libvirt/libvirt-sock': Permission denied' - you need to be in the libvirt group to access the socket, do 'sudo adduser <user> libvirt' and then re-login to your session
+* if each call to vagrant ends up with a segfault, it may be because you still have virtualbox around. if so, remove virtualbox to keep only libvirt + KVM. according to https://github.com/pradels/vagrant-libvirt/issues/75 having two virtualization engines installed simultaneously can lead to such weird issues.
+* see the [vagrant-libvirt issue list on github](https://github.com/pradels/vagrant-libvirt/issues)
+* be sure to use vagrant-libvirt >= 0.0.11 and sahara >= 0.0.16 (which are the latest stable gems you would get with `vagrant plugin install [vagrant-libvirt|sahara]`) for proper libvirt support
+* for shared folder support, you need nfs-kernel-server installed on the host machine and set up sudo to allow unpriviledged users to modify /etc/exports. See [vagrant-libvirt#synced-folders](https://github.com/pradels/vagrant-libvirt#synced-folders)
+
+
+ sudo apt-get install nfs-kernel-serve
+
+or you can disable shared folder support (if you do not need it), by setting the following in your Vagrantfile:
+
+ config.vm.synced_folder "src/", "/srv/website", disabled: trueconfig.vm.synced_folder "src/", "/srv/website", disabled: true
diff --git a/doc/details/en.haml b/doc/details/en.haml
new file mode 100644
index 00000000..fe7a4c84
--- /dev/null
+++ b/doc/details/en.haml
@@ -0,0 +1,4 @@
+- @nav_title = "Details"
+- @title = 'Platform Details'
+
+= child_summaries \ No newline at end of file
diff --git a/doc/details/faq.md b/doc/details/faq.md
new file mode 100644
index 00000000..57afb6c4
--- /dev/null
+++ b/doc/details/faq.md
@@ -0,0 +1,65 @@
+@title = 'Frequently asked questions'
+@nav_title = 'FAQ'
+@summary = "Frequently Asked Questions"
+@toc = true
+
+APT
+===============
+
+What do I do when unattended upgrades fail?
+--------------------------------------------------
+
+When you receive notification e-mails with a subject of 'unattended-upgrades result for $machinename', that means that some package couldn't be automatically upgraded and needs manual interaction. The reasons vary, so you have to be careful. Most often you can simply login to the affected machine and run `apt-get dist-upgrade`.
+
+Puppet
+======
+
+Where do i find the time a server was last deployed ?
+-----------------------------------------------------
+
+The puppet state file on the node indicates the last puppetrun:
+
+ ls -la /var/lib/puppet/state/state.yaml
+
+What resources are touched by puppet/leap_platform (services/packages/files etc.) ?
+-----------------------------------------------------------------------------------
+
+Log into your server and issue:
+
+ grep -v '!ruby/sym' /var/lib/puppet/state/state.yaml | sed 's/\"//' | sort
+
+
+How can i customize the leap_platform puppet manifests ?
+--------------------------------------------------------
+
+You can create custom puppet modules under `files/puppet`.
+The custom puppet entry point is in class 'custom' which can be put into
+`files/puppet/modules/custom/manifests/init.pp`. This class gets automatically included
+by site_config::default, which is applied to all nodes.
+
+Of cause you can also create a different git branch and change whatever you want, if you are
+familiar wit git.
+
+Facter
+======
+
+How can i see custom facts distributed by leap_platform on a node ?
+-------------------------------------------------------------------
+
+On the server, export the FACTERLIB env. variable to include the path of the custom fact in question:
+
+ export FACTERLIB=/var/lib/puppet/lib/facter:/srv/leap/puppet/modules/stdlib/lib/facter/
+ facter
+
+
+Etc
+===
+
+How do i change the domain of my provider ?
+-------------------------------------------
+
+* First of all, you need to have access to the nameserver config of your new domain.
+* Update domain in provider.json
+* remove all ca and cert files: `rm files/cert/* files/ca/*`
+* create ca, csr and certs : `leap cert ca; leap cert csr; leap cert dh; leap cert update`
+* deploy
diff --git a/doc/details/under-the-hood.md b/doc/details/under-the-hood.md
new file mode 100644
index 00000000..0bc4fe77
--- /dev/null
+++ b/doc/details/under-the-hood.md
@@ -0,0 +1,40 @@
+@title = "Under the hood"
+@summary = "Various implementation details."
+
+This page contains various details on the how the platform is implemented. You can safely ignore this page, although it may be useful if you plan to make modifications to the platform.
+
+Puppet Details
+======================================
+
+Tags
+----
+
+Tags are beeing used to deploy different classes.
+
+* leap_base: site_config::default (configure hostname + resolver, sshd, )
+* leap_slow: site_config::slow (slow: apt-get update, apt-get dist-upgrade)
+* leap_service: cofigure platform service (openvpn, couchdb, etc.)
+
+You can pass any combination of tags, i.e. use
+
+* "--tags leap_base,leap_slow,leap_service" (DEFAULT): Deploy all
+* "--tags leap_service": Only deploy service(s) (useful for debugging/development)
+* "--tags leap_base": Only deploy basic configuration (again, useful for debugging/development)
+
+
+### Doing faster partial deploys
+
+If you only change a tiny bit on the platform puppet recipes, you could achieve a
+*much* faster deploy specifying the resource tag you changed.
+i.e. you changed the way rsyslog config snippets for LEAP logfiles are created
+in `puppet/modules/leap/manifests/logfile.pp`. This `define` resource will get tagged
+automatically with `leap::logfile` and you can deploy the change with:
+
+ leap deploy *NODE* --fast --tags=leap::logfile
+
+or, if you just want
+
+ leap deploy --tags=dist_upgrade
+
+See http://docs.puppetlabs.com/puppet/2.7/reference/lang_tags.html for puppet tag usage.
+
diff --git a/doc/details/webapp.md b/doc/details/webapp.md
new file mode 100644
index 00000000..2b078af4
--- /dev/null
+++ b/doc/details/webapp.md
@@ -0,0 +1,282 @@
+@title = 'LEAP Web'
+@summary = 'The web component of the LEAP Platform, providing user management, support desk, documentation and more.'
+@toc = true
+
+Introduction
+===================
+
+"LEAP Web" is the webapp component of the LEAP Platform, providing the following services:
+
+* REST API for user registration.
+* Admin interface to manage users.
+* Client certificate distribution and renewal.
+* User support help tickets.
+* Billing
+* Customizable and Localized user documentation
+
+This web application is written in Ruby on Rails 3, using CouchDB as the backend data store.
+
+It is licensed under the GNU Affero General Public License (version 3.0 or higher). See http://www.gnu.org/licenses/agpl-3.0.html for more information.
+
+Known problems
+====================
+
+* Client certificates are generated without a CSR. The problem is that this makes the web
+ application extremely vulnerable to denial of service attacks. This was not an issue until we
+ started to allow the possibility of anonymously fetching a client certificate without
+ authenticating first.
+
+* By its very nature, the user database is vulnerable to enumeration attacks. These are
+ very hard to prevent, because our protocol is designed to allow query of a user database via
+ proxy in order to provide network perspective.
+
+Integration
+===========
+
+LEAP web is part of the leap platform. Most of the time it will be customized and deployed in that context. This section describes the integration of LEAP web in the wider framework. The Development section focusses on development of LEAP web itself.
+
+Configuration & Customization
+------------------------------
+
+The customization of the webapp for a leap provider happens via two means:
+ * configuration settings in services/webapp.json
+ * custom files in files/webapp
+
+### Configuration Settings
+
+The webapp ships with a fairly large set of default settings for all environments. They are stored in config/defaults.yml. During deploy the platform creates config/config.yml from the settings in services/webapp.json. These settings will overwrite the defaults.
+
+### Custom Files
+
+Any file placed in files/webapp in the providers repository will overwrite the content of config/customization in the webapp. These files will override files of the same name.
+
+This mechanism allows customizing basically all aspects of the webapp.
+See files/webapp/README.md in the providers repository for more.
+
+### Provider Information ###
+
+The leap client fetches provider information via json files from the server. The platform prepares that information and stores it in the webapp in public/1/config/*.json. (1 being the current API version).
+
+Provider Documentation
+-------------
+
+LEAP web already comes with a bit of user documentation. It mostly resides in app/views/pages and thus can be overwritten by adding files to files/webapp/views/pages in the provider repository. You probably want to add your own Terms of Services and Privacy Policy here.
+The webapp will render haml, erb and markdown templates and pick translated content from localized files such as privacy_policy.es.md. In order to add or remove languages you have to modify the available_locales setting in the config. (See Configuration Settings above)
+
+Development
+===========
+
+Installation
+---------------------------
+
+Typically, this application is installed automatically as part of the LEAP Platform. To install it manually for testing or development, follow these instructions:
+
+### TL;DR ###
+
+Install git, ruby 1.9, rubygems and couchdb on your system. Then run
+
+ gem install bundler
+ git clone https://leap.se/git/leap_web
+ cd leap_web
+ git submodule update --init
+ bundle install --binstubs
+ bin/rails server
+
+### Install system requirements
+
+First of all you need to install ruby, git and couchdb. On debian based systems this would be achieved by something like
+
+ sudo apt-get install git ruby1.9.3 rubygems couchdb
+
+We install most gems we depend upon through [bundler](http://gembundler.com). So first install bundler
+
+ sudo gem install bundler
+
+On Debian Wheezy or later, there is a Debian package for bundler, so you can alternately run ``sudo apt-get install bundler``.
+
+### Download source
+
+Simply clone the git repository:
+
+ git clone git://leap.se/leap_web
+ cd leap_web
+
+### SRP Submodule
+
+We currently use a git submodule to include srp-js. This will soon be replaced by a ruby gem. but for now you need to run
+
+ git submodule update --init
+
+### Install required ruby libraries
+
+ cd leap_web
+ bundle
+
+Typically, you run ``bundle`` as a normal user and it will ask you for a sudo password when it is time to install the required gems. If you don't have sudo, run ``bundle`` as root.
+
+Configuration
+----------------------------
+
+The configuration file `config/defaults.yml` providers good defaults for most
+values. You can override these defaults by creating a file `config/config.yml`.
+
+There are a few values you should make sure to modify:
+
+ production:
+ admins: ["myusername","otherusername"]
+ domain: example.net
+ force_ssl: true
+ secret_token: "4be2f60fafaf615bd4a13b96bfccf2c2c905898dad34..."
+ client_ca_key: "/etc/ssl/ca.key"
+ client_ca_cert: "/etc/ssl/ca.crt"
+ ca_key_password: nil
+
+* `admins` is an array of usernames that are granted special admin privilege.
+* `domain` is your fully qualified domain name.
+* `force_ssl`, if set to true, will require secure cookies and turn on HSTS. Don't do this if you are using a self-signed server certificate.
+* `secret_token`, used for cookie security, you can create one with `rake secret`. Should be at least 30 characters.
+* `client_ca_key`, the private key of the CA used to generate client certificates.
+* `client_ca_cert`, the public certificate the CA used to generate client certificates.
+* `ca_key_password`, used to unlock the client_ca_key, if needed.
+
+### Provider Settings
+
+The leap client fetches provider information via json files from the server.
+If you want to use that functionality please add your provider files the public/1/config directory. (1 being the current API version).
+
+Running
+-----------------------------
+
+ cd leap_web
+ bin/rails server
+
+You will find Leap Web running on `localhost:3000`
+
+Testing
+--------------------------------
+
+To run all tests
+
+ rake test
+
+To run an individual test:
+
+ rake test TEST=certs/test/unit/client_certificate_test.rb
+ or
+ ruby -Itest certs/test/unit/client_certificate_test.rb
+
+Engines
+---------------------
+
+Leap Web includes some Engines. All things in `app` will overwrite the engine behaviour. You can clone the leap web repository and add your customizations to the `app` directory. Including leap_web as a gem is currently not supported. It should not require too much work though and we would be happy to include the changes required.
+
+If you have no use for one of the engines you can remove it from the Gemfile. Engines should really be plugins - no other engines should depend upon them. If you need functionality in different engines it should probably go into the toplevel.
+
+# Deployment #
+
+We strongly recommend using the LEAP platform for deploy. Most of the things documented here are automated as part of the platform. If you want to research how the platform deploys or work on your own mechanism this section is for you.
+
+These instructions are targeting a Debian GNU/Linux system. You might need to change the commands to match your own needs.
+
+## Server Preperation ##
+
+### Dependencies ##
+
+The following packages need to be installed:
+
+* git
+* ruby1.9
+* rubygems1.9
+* couchdb (if you want to use a local couch)
+
+### Setup Capistrano ###
+
+We use puppet to deploy. But we also ship an untested config/deploy.rb.example. Edit it to match your needs if you want to use capistrano.
+
+run `cap deploy:setup` to create the directory structure.
+
+run `cap deploy` to deploy to the server.
+
+## Customized Files ##
+
+Please make sure your deploy includes the following files:
+
+* public/1/config/*.json (see Provider Settings section)
+* config/couchdb.yml
+
+## Couch Security ##
+
+We recommend against using an admin user for running the webapp. To avoid this couch design documents need to be created ahead of time and the auto update mechanism needs to be disabled.
+Take a look at test/setup_couch.sh for an example of securing the couch.
+
+## Design Documents ##
+
+After securing the couch design documents need to be deployed with admin permissions. There are two ways of doing this:
+ * rake couchrest:migrate_with_proxies
+ * dump the documents as files with `rake couchrest:dump` and deploy them
+ to the couch by hand or with the platform.
+
+### CouchRest::Migrate ###
+
+The before_script block in .travis.yml illustrates how to do this:
+
+ mv test/config/couchdb.yml.admin config/couchdb.yml # use admin privileges
+ bundle exec rake couchrest:migrate_with_proxies # run the migrations
+ bundle exec rake couchrest:migrate_with_proxies # looks like this needs to run twice
+ mv test/config/couchdb.yml.user config/couchdb.yml # drop admin privileges
+
+### Deploy design docs from CouchRest::Dump ###
+
+First of all we get the design docs as files:
+
+ # put design docs in /tmp/design
+ bundle exec rake couchrest:dump
+
+Then we add them to files/design in the site_couchdb module in leap_platform so they get deployed with the couch. You could also upload them using curl or sth. similar.
+
+# Troubleshooting #
+
+Here are some less common issues you might run into when installing Leap Web.
+
+## Cannot find Bundler ##
+
+### Error Messages ###
+
+`bundle: command not found`
+
+### Solution ###
+
+Make sure bundler is installed. `gem list bundler` should list `bundler`.
+You also need to be able to access the `bundler` executable in your PATH.
+
+## Outdated version of rubygems ##
+
+### Error Messages ###
+
+`bundler requires rubygems >= 1.3.6`
+
+### Solution ###
+
+`gem update --system` will install the latest rubygems
+
+## Missing development tools ##
+
+Some required gems will compile C extensions. They need a bunch of utils for this.
+
+### Error Messages ###
+
+`make: Command not found`
+
+### Solution ###
+
+Install the required tools. For linux the `build-essential` package provides most of them. For Mac OS you probably want the XCode Commandline tools.
+
+## Missing libraries and headers ##
+
+Some gem dependencies might not compile because they lack the needed c libraries.
+
+### Solution ###
+
+Install the libraries in question including their development files.
+
+
diff --git a/doc/en.md b/doc/en.md
new file mode 100644
index 00000000..07f07b7f
--- /dev/null
+++ b/doc/en.md
@@ -0,0 +1,85 @@
+@title = 'LEAP Platform for Service Providers'
+@nav_title = 'Provider Platform'
+@toc = false
+
+The *LEAP Platform* is set of complementary packages and server recipes to automate the maintenance of LEAP services in a hardened Debian environment. Its goal is to make it as painless as possible for sysadmins to deploy and maintain a service provider's infrastructure for secure communication.
+
+The LEAP Platform consists of three parts, detailed below:
+
+1. [The platform recipes.](#the-platform-recipes)
+2. [The provider instance.](#the-provider-instance)
+3. [The `leap` command line tool.](#the-leap-command-line-tool)
+
+The platform recipes
+--------------------
+
+The LEAP platform recipes define an abstract service provider. It is a set of [Puppet](https://puppetlabs.com/puppet/puppet-open-source/) modules designed to work together to provide to sysadmins everything they need to manage a service provider infrastructure that provides secure communication services.
+
+LEAP maintains a repository of platform recipes, which typically do not need to be modified, although it can be forked and merged as desired. Most service providers using the LEAP platform can use the same set of platform recipes.
+
+As these recipes consist in abstract definitions, in order to configure settings for a particular service provider a system administrator has to create a provider instance (see below).
+
+LEAP's platform recipes are distributed as a git repository: `https://leap.se/git/leap_platform`
+
+The provider instance
+---------------------
+
+A provider instance is a directory tree (typically tracked in git) containing all the configurations for a service provider's infrastructure. A provider instance primarily consists of:
+
+* A pointer to the platform recipes.
+* A global configuration file for the provider.
+* A configuration file for each server (node) in the provider's infrastructure.
+* Additional files, such as certificates and keys.
+
+A minimal provider instance directory looks like this:
+
+ └── bitmask # provider instance directory.
+ ├── Leapfile # settings for the `leap` command line tool.
+ ├── provider.json # global settings of the provider.
+ ├── common.json # settings common to all nodes.
+ ├── nodes/ # a directory for node configurations.
+ ├── files/ # keys, certificates, and other files.
+ └── users/ # public key information for privileged sysadmins.
+
+
+A provider instance directory contains everything needed to manage all the servers that compose a provider's infrastructure. Because of this, any versioning tool and development work-flow can be used to manage your provider instance.
+
+The `leap` command line tool
+----------------------------
+
+The `leap` [command line tool](commands) is used by sysadmins to manage everything about a service provider's infrastructure. Except when creating an new provider instance, `leap` is run from within the directory tree of a provider instance.
+
+The `leap` command line has many capabilities, including:
+
+* Create, initialize, and deploy nodes.
+* Manage keys and certificates.
+* Query information about the node configurations.
+
+Traditional system configuration automation systems, like [Puppet](https://puppetlabs.com/puppet/puppet-open-source/) or [Chef](http://www.opscode.com/chef/), deploy changes to servers using a pull method. Each server pulls a manifest from a central master server and uses this to alter the state of the server.
+
+Instead, the `leap` tool uses a masterless push method: The sysadmin runs `leap deploy` from the provider instance directory on their desktop machine to push the changes out to every server (or a subset of servers). LEAP still uses Puppet, but there is no central master server that each node must pull from.
+
+One other significant difference between LEAP and typical system automation is how interactions among servers are handled. Rather than store a central database of information about each server that can be queried when a recipe is applied, the `leap` command compiles static representation of all the information a particular server will need in order to apply the recipes. In compiling this static representation, `leap` can use arbitrary programming logic to query and manipulate information about other servers.
+
+These two approaches, masterless push and pre-compiled static configuration, allow the sysadmin to manage a set of LEAP servers using traditional software development techniques of branching and merging, to more easily create local testing environments using virtual servers, and to deploy without the added complexity and failure potential of a master server.
+
+The `leap` command line tool is distributed as a git repository: `https://leap.se/git/leap_cli`. It can be installed with `sudo gem install leap_cli`.
+
+Tip: With rubygems, you can always specify the gem version as the first argument to any executable installed by rubygems. For example:
+
+ sudo gem install leap_cli --version 1.6.2
+ sudo gem install leap_cli --version 1.7.2
+ leap _1.6.2_ --version
+ => leap 1.6.2, ruby 2.1.2
+ leap _1.7.2_ --version
+ => leap 1.7.2, ruby 2.1.2
+
+Getting started
+----------------------------------
+
+We recommend reading the platform documentation in the following order:
+
+1. [Quick start tutorial](tutorials/quick-start).
+2. [Platform Guide](platform/guide).
+3. [Configuration format](platform/config).
+4. The `leap` [command reference](platform/commands).
diff --git a/doc/guide/commands.md b/doc/guide/commands.md
new file mode 100644
index 00000000..eaacc8d5
--- /dev/null
+++ b/doc/guide/commands.md
@@ -0,0 +1,419 @@
+@title = 'Command Line Reference'
+@summary = "A copy of leap --help"
+
+The command "leap" can be used to manage a bevy of servers running the LEAP platform from the comfort of your own home.
+
+
+# Global Options
+
+* `--log FILE`
+Override default log file
+Default Value: None
+
+* `-v|--verbose LEVEL`
+Verbosity level 0..5
+Default Value: 1
+
+* `--[no-]color`
+Disable colors in output
+
+* `--debug`
+Enable debugging library (leap_cli development only)
+
+* `--help`
+Show this message
+
+* `--version`
+Display version number and exit
+
+* `--yes`
+Skip prompts and assume "yes"
+
+
+# leap add-user USERNAME
+
+Adds a new trusted sysadmin by adding public keys to the "users" directory.
+
+
+
+**Options**
+
+* `--pgp-pub-key arg`
+OpenPGP public key file for this new user
+Default Value: None
+
+* `--ssh-pub-key arg`
+SSH public key file for this new user
+Default Value: None
+
+* `--self`
+Add yourself as a trusted sysadmin by choosing among the public keys available for the current user.
+
+
+# leap cert
+
+Manage X.509 certificates
+
+
+
+## leap cert ca
+
+Creates two Certificate Authorities (one for validating servers and one for validating clients).
+
+See see what values are used in the generation of the certificates (like name and key size), run `leap inspect provider` and look for the "ca" property. To see the details of the created certs, run `leap inspect <file>`.
+
+## leap cert csr
+
+Creates a CSR for use in buying a commercial X.509 certificate.
+
+Unless specified, the CSR is created for the provider's primary domain. The properties used for this CSR come from `provider.ca.server_certificates`.
+
+**Options**
+
+* `--domain DOMAIN`
+Specify what domain to create the CSR for.
+Unless specified, the CSR is created for the provider's primary domain. The properties used for this CSR come from `provider.ca.server_certificates`.
+Default Value: None
+
+
+## leap cert dh
+
+Creates a Diffie-Hellman parameter file, needed for forward secret OpenVPN ciphers. You don't need this file if you don't provide the VPN service.
+
+
+
+## leap cert update FILTER
+
+Creates or renews a X.509 certificate/key pair for a single node or all nodes, but only if needed.
+
+This command will a generate new certificate for a node if some value in the node has changed that is included in the certificate (like hostname or IP address), or if the old certificate will be expiring soon. Sometimes, you might want to force the generation of a new certificate, such as in the cases where you have changed a CA parameter for server certificates, like bit size or digest hash. In this case, use --force. If <node-filter> is empty, this command will apply to all nodes.
+
+**Options**
+
+* `--force`
+Always generate new certificates
+
+
+# leap clean
+
+Removes all files generated with the "compile" command.
+
+
+
+# leap compile
+
+Compile generated files.
+
+
+
+## leap compile all [ENVIRONMENT]
+
+Compiles node configuration files into hiera files used for deployment.
+
+
+
+## leap compile zone
+
+Compile a DNS zone file for your provider.
+
+
+Default Command: all
+
+# leap db
+
+Database commands.
+
+
+
+## leap db destroy [FILTER]
+
+Destroy all the databases. If present, limit to FILTER nodes.
+
+
+
+# leap deploy FILTER
+
+Apply recipes to a node or set of nodes.
+
+The FILTER can be the name of a node, service, or tag.
+
+**Options**
+
+* `--ip IPADDRESS`
+Override the default SSH IP address.
+Default Value: None
+
+* `--port PORT`
+Override the default SSH port.
+Default Value: None
+
+* `--tags TAG[,TAG]`
+Specify tags to pass through to puppet (overriding the default).
+Default Value: leap_base,leap_service
+
+* `--dev`
+Development mode: don't run 'git submodule update' before deploy.
+
+* `--fast`
+Makes the deploy command faster by skipping some slow steps. A "fast" deploy can be used safely if you recently completed a normal deploy.
+
+* `--force`
+Deploy even if there is a lockfile.
+
+* `--[no-]sync`
+Sync files, but don't actually apply recipes.
+
+
+# leap env
+
+Manipulate and query environment information.
+
+The 'environment' node property can be used to isolate sets of nodes into entirely separate environments. A node in one environment will never interact with a node from another environment. Environment pinning works by modifying your ~/.leaprc file and is dependent on the absolute file path of your provider directory (pins don't apply if you move the directory)
+
+## leap env ls
+
+List the available environments. The pinned environment, if any, will be marked with '*'.
+
+
+
+## leap env pin ENVIRONMENT
+
+Pin the environment to ENVIRONMENT. All subsequent commands will only apply to nodes in this environment.
+
+
+
+## leap env unpin
+
+Unpin the environment. All subsequent commands will apply to all nodes.
+
+
+Default Command: ls
+
+# leap facts
+
+Gather information on nodes.
+
+
+
+## leap facts update FILTER
+
+Query servers to update facts.json.
+
+Queries every node included in FILTER and saves the important information to facts.json
+
+# leap help command
+
+Shows a list of commands or help for one command
+
+Gets help for the application or its commands. Can also list the commands in a way helpful to creating a bash-style completion function
+
+**Options**
+
+* `-c`
+List commands one per line, to assist with shell completion
+
+
+# leap inspect FILE
+
+Prints details about a file. Alternately, the argument FILE can be the name of a node, service or tag.
+
+
+
+**Options**
+
+* `--base`
+Inspect the FILE from the provider_base (i.e. without local inheritance).
+
+
+# leap list [FILTER]
+
+List nodes and their classifications
+
+Prints out a listing of nodes, services, or tags. If present, the FILTER can be a list of names of nodes, services, or tags. If the name is prefixed with +, this acts like an AND condition. For example:
+
+`leap list node1 node2` matches all nodes named "node1" OR "node2"
+
+`leap list openvpn +local` matches all nodes with service "openvpn" AND tag "local"
+
+**Options**
+
+* `--print arg`
+What attributes to print (optional)
+Default Value: None
+
+* `--disabled`
+Include disabled nodes in the list.
+
+
+# leap local
+
+Manage local virtual machines.
+
+This command provides a convient way to manage Vagrant-based virtual machines. If FILTER argument is missing, the command runs on all local virtual machines. The Vagrantfile is automatically generated in 'test/Vagrantfile'. If you want to run vagrant commands manually, cd to 'test'.
+
+## leap local destroy [FILTER]
+
+Destroys the virtual machine(s), reclaiming the disk space
+
+
+
+## leap local reset [FILTER]
+
+Resets virtual machine(s) to the last saved snapshot
+
+
+
+## leap local save [FILTER]
+
+Saves the current state of the virtual machine as a new snapshot
+
+
+
+## leap local start [FILTER]
+
+Starts up the virtual machine(s)
+
+
+
+## leap local status [FILTER]
+
+Print the status of local virtual machine(s)
+
+
+
+## leap local stop [FILTER]
+
+Shuts down the virtual machine(s)
+
+
+
+# leap mosh NAME
+
+Log in to the specified node with an interactive shell using mosh (requires node to have mosh.enabled set to true).
+
+
+
+# leap new DIRECTORY
+
+Creates a new provider instance in the specified directory, creating it if necessary.
+
+
+
+**Options**
+
+* `--contacts arg`
+Default email address contacts.
+Default Value: None
+
+* `--domain arg`
+The primary domain of the provider.
+Default Value: None
+
+* `--name arg`
+The name of the provider.
+Default Value: None
+
+* `--platform arg`
+File path of the leap_platform directory.
+Default Value: None
+
+
+# leap node
+
+Node management
+
+
+
+## leap node add NAME [SEED]
+
+Create a new configuration file for a node named NAME.
+
+If specified, the optional argument SEED can be used to seed values in the node configuration file.
+
+The format is property_name:value.
+
+For example: `leap node add web1 ip_address:1.2.3.4 services:webapp`.
+
+To set nested properties, property name can contain '.', like so: `leap node add web1 ssh.port:44`
+
+Separeate multiple values for a single property with a comma, like so: `leap node add mynode services:webapp,dns`
+
+**Options**
+
+* `--local`
+Make a local testing node (by automatically assigning the next available local IP address). Local nodes are run as virtual machines on your computer.
+
+
+## leap node init FILTER
+
+Bootstraps a node or nodes, setting up SSH keys and installing prerequisite packages
+
+This command prepares a server to be used with the LEAP Platform by saving the server's SSH host key, copying the authorized_keys file, installing packages that are required for deploying, and registering important facts. Node init must be run before deploying to a server, and the server must be running and available via the network. This command only needs to be run once, but there is no harm in running it multiple times.
+
+**Options**
+
+* `--ip IPADDRESS`
+Override the default SSH IP address.
+Default Value: None
+
+* `--port PORT`
+Override the default SSH port.
+Default Value: None
+
+* `--echo`
+If set, passwords are visible as you type them (default is hidden)
+
+
+## leap node mv OLD_NAME NEW_NAME
+
+Renames a node file, and all its related files.
+
+
+
+## leap node rm NAME
+
+Removes all the files related to the node named NAME.
+
+
+
+# leap ssh NAME
+
+Log in to the specified node with an interactive shell.
+
+
+
+**Options**
+
+* `--port arg`
+Override ssh port for remote host
+Default Value: None
+
+* `--ssh arg`
+Pass through raw options to ssh (e.g. --ssh '-F ~/sshconfig')
+Default Value: None
+
+
+# leap test
+
+Run tests.
+
+
+
+## leap test init
+
+Creates files needed to run tests.
+
+
+
+## leap test run
+
+Run tests.
+
+
+
+**Options**
+
+* `--[no-]continue`
+Continue over errors and failures (default is --no-continue).
+
+Default Command: run
diff --git a/doc/guide/config.md b/doc/guide/config.md
new file mode 100644
index 00000000..be67e6bd
--- /dev/null
+++ b/doc/guide/config.md
@@ -0,0 +1,263 @@
+@title = "Configuration Files"
+@summary = "How to edit configuration files."
+
+Files
+-------------------------------------------
+
+Here are a list of some of the common files that make up a provider. Except for Leapfile and provider.json, the files are optional. Unless otherwise specified, all file names are relative to the 'provider directory' root (where the Leapfile is).
+
+`Leapfile` -- If present, this file tells `leap` that the directory is a provider directory. This file is usually empty, but can contain global options.
+
+`~/.leaprc` -- Evaluated the same as Leapfile, but not committed to source control.
+
+`provider.json` -- Global options related to this provider.
+
+`provider.ENVIRONMENT.json` -- Global options for the provider that are applied to only a single environment.
+
+`common.json` -- All nodes inherit from this file.
+
+`secrets.json` -- An automatically generated file that contains any randomly generated strings needed in order to deploy. These strings are often secret and should be protected, although any need for a random string or number that is remembered will produce another entry in this file. This file is automatically generated and refreshed each time you run `leap compile` or `leap deploy`. If an entry is no longer needed, it will get removed. If you want to change a secret, you can remove this file and have it regenerated, or remove the particular line item and just those items will be created anew.
+
+`facts.json` -- If some of your servers are running on AWS or OpenStack, you will need to discover certain properties about how networking is configured on these machines in order for a full deploy to work. In these cases, make sure to run `leap facts update` to periodically regenerate the facts.json file.
+
+`nodes/NAME.json` -- The configuration file for node called NAME.
+
+`services/SERVICE.json` -- The properties in this configuration file are applied to any node that includes SERVICE in its `services` property.
+
+`services/SERVICE.ENVIRONMENT.json` -- The properties in this configuration file are applied to any node that includes SERVICE in its services and has environment equal to ENVIRONMENT.
+
+`services/TAG.json` -- The properties in this configuration file are applied to any node that has includes TAG in its `tags` property.
+
+`services/TAG.ENVIRONMENT.json` -- The properties in this configuration file are applied to any node that has includes TAG in its `tags` property and has `environment` property equal to ENVIRONMENT.
+
+`files/*` -- Various static files used by the platform (e.g. keys, certificates, webapp customization, etc).
+
+`users/USER/` -- A directory that stores the public keys of the sysadmin with name USER. This person will have root access to all the servers.
+
+
+Leapfile
+-------------------------------------------
+
+A `Leapfile` defines options for the `leap` command and lives at the root of your provider directory. `Leapfile` is evaluated as ruby, so you can include whatever weird logic you want in this file. In particular, there are several variables you can set that modify the behavior of leap. For example:
+
+ @platform_directory_path = '../leap_platform'
+ @log = '/var/log/leap.log'
+
+Additionally, you can create a `~/.leaprc` file that is loaded after `Leapfile` and is evaluated the same way.
+
+Platform options:
+
+* `@platform_directory_path` (required). This must be set to the path where `leap_platform` lives. The path may be relative.
+
+Vagrant options:
+
+* `@vagrant_network`. Allows you to override the default network used for local nodes. It should include a netmask like `@vagrant_network = '10.0.0.0/24'`.
+* `@custom_vagrant_vm_line`. Insert arbitrary text into the auto-generated Vagrantfile. For example, `@custom_vagrant_vm_line = "config.vm.boot_mode = :gui"`.
+
+Logging options:
+
+* `@log`. If set, all command invocation and results are logged to the specified file. This is the same as the switch `--log FILE`, except that the command line switch will override the value in the Leapfile.
+
+
+JSON format
+-------------------------------------------
+
+All configuration files, other than `Leapfile`, are in the JSON format. For example:
+
+ {
+ "key1": "value1",
+ "key2": "value2"
+ }
+
+Keys should match `/[a-z0-9_]/`
+
+Unlike traditional JSON, comments are allowed. If the first non-whitespace characters are `//` then the line is treated as a comment.
+
+ // this is a comment
+ {
+ // this is a comment
+ "key": "value" // this is an error
+ }
+
+Options in the configuration files might be nested hashes, arrays, numbers, strings, or boolean. Numbers and boolean values should **not** be quoted. For example:
+
+ {
+ "openvpn": {
+ "ip_address": "1.1.1.1",
+ "protocols": ["tcp", "udp"],
+ "ports": [80, 53],
+ "options": {
+ "public_ip": false,
+ "adblock": true
+ }
+ }
+ }
+
+If the value string is prefixed with an '=' character, the result is evaluated as ruby. For example:
+
+ {
+ "domain": {
+ "public": "domain.org"
+ }
+ "api_domain": "= 'api.' + domain.public"
+ }
+
+In this case, the property "api_domain" will be set to "api.domain.org". So long as you do not create unresolvable circular dependencies, you can reference other properties in evaluated ruby that are themselves evaluated ruby.
+
+See "Macros" below for information on the special macros available to the evaluated ruby.
+
+TIP: In rare cases, you might want to force the evaluation of a value to happen in a later pass after most of the other properties have been evaluated. To do this, prefix the value string with "=>" instead of "=".
+
+Node inheritance
+----------------------------------------
+
+Every node inherits from common.json and also any of the services or tags attached to the node. Additionally, the `leap_platform` contains a directory `provider_base` that defines the default values for tags, services and common.json.
+
+Suppose you have a node configuration for `bitmask/nodes/willamette.json` like so:
+
+ {
+ "services": "webapp",
+ "tags": ["production", "northwest-us"],
+ "ip_address": "1.1.1.1"
+ }
+
+This node will have hostname "willamette" and it will inherit from the following files (in this order):
+
+1. common.json
+ - load defaults: `provider_base/common.json`
+ - load provider: `bitmask/common.json`
+2. service "webapp"
+ - load defaults: `provider_base/services/webapp.json`
+ - load provider: `bitmask/services/webapp.json`
+3. tag "production"
+ - load defaults: `provider_base/tags/production.json`
+ - load provider: `bitmask/tags/production.json`
+4. tag "northwest-us"
+ - load: `bitmask/tags/northwest-us.json`
+5. finally, load node "willamette"
+ - load: `bitmask/nodes/willamette.json`
+
+The `provider_base` directory is under the `leap_platform` specified in the file `Leapfile`.
+
+To see all the variables a node has inherited, you could run `leap inspect willamette`.
+
+Common configuration options
+----------------------------------------
+
+You can use the command `leap inspect` to see what options are available for a provider, node, service, or tag configuration. For example:
+
+* `leap inspect common` -- show the options inherited by all nodes.
+* `leap inspect --base common` -- show the common.json from `provider_base` without the local `common.json` inheritance applied.
+* `leap inspect webapp` -- show all the options available for the service `webapp`.
+
+Here are some of the more important options you should be aware of:
+
+* `ip_address` -- Required for all nodes, no default.
+* `ssh.port` -- The SSH port you want the node's OpenSSH server to bind to. This is also the default when trying to connect to a node, but if the node currently has OpenSSH running on a different port then run deploy with `--port` to override the `ssh.port` configuration value.
+* `mosh.enabled` -- If set to `true`, then mosh will be installed on the server. The default is `false`.
+
+Macros
+----------------------------------------
+
+When using evaluated ruby in a JSON configuration file, there are several special macros that are available. These are evaluated in the context of a node (available as the variable `self`).
+
+The following methods are available to the evaluated ruby:
+
+`variable.variable`
+
+ > Any variable defined or inherited by a particular node configuration is available by just referencing it using either hash notation or object field notation (e.g. `['domain']['public']` or `domain.public`). Circular references are not allowed, but otherwise it is OK to nest evaluated values in other evaluated values. If a value has not been defined, the hash notation will return nil but the field notation will raise an exception. Properties of services, tags, and the global provider can all be referenced the same way. For example, `global.services['openvpn'].x509.dh`.
+
+`nodes`
+
+ > A hash of all nodes. This list can be filtered.
+
+`nodes_like_me`
+
+ > A hash of nodes that have the same deployment tags as the current node (e.g. 'production' or 'local').
+
+`global.services`
+
+ > A hash of all services, e.g. `global.services['openvpn']` would return the "openvpn" service.
+
+`global.tags`
+
+ > A hash of all tags, e.g. `global.tags['production']` would return the "production" tag.
+
+ `global.provider`
+
+ > Can be used to access variables defined in `provider.json`, e.g. `global.provider.contacts.default`.
+
+`file(filename)`
+
+ > Inserts the full contents of the file. If the file is an erb template, it is rendered. The filename can either be one of the pre-defined file symbols, or it can be a path relative to the "files" directory in your provider instance. E.g, `file :ca_cert` or `files 'ca/ca.crt'`.
+
+`file_path(filename)`
+
+ > Ensures that the file will get rsynced to the node as an individual file. The value returned by `file_path` is the full path where this file will ultimately live when deploy to the node. e.g. `file_path :ca_cert` or `file_path 'branding/images/logo.png'`.
+
+`secret(:symbol)`
+
+ > Returns the value of a secret in secrets.json (or creates it if necessary). E.g. `secret :couch_admin_password`
+
+`hosts_file`
+
+ > Returns a data structure that puppet will use to generate /etc/hosts. Care is taken to use the local IP of other hosts when needed.
+
+`known_hosts_file`
+
+ > Returns the lines needed in a SSH `known_hosts` file.
+
+`stunnel_client(node_list, port, options={})`
+
+ > Returns a stunnel configuration data structure for the client side. Argument `node_list` is an `ObjectList` of nodes running stunnel servers. Argument `port` is the real port of the ultimate service running on the servers that the client wants to connect to.
+
+`stunnel_server(port)`
+
+ > Generates a stunnel server entry. The `port` is the real port targeted service.
+
+Hash tables
+-----------------------------------------
+
+The macros `nodes`, `nodes_like_me`, `global.services`, and `global.tags` all return a hash table of configuration objects (either nodes, services, or tags). There are several ways to filter and process these hash tables:
+
+Access an element by name:
+
+ nodes['vpn1'] # returns node named 'vpn1'
+ global.services['openvpn'] # returns service named 'openvpn'
+
+Create a new hash table by applying filters:
+
+ nodes[:public_dns => true] # all nodes where public_dns == true
+ nodes[:services => 'openvpn', 'location.country_code' => 'US'] # openvpn service OR in the US.
+ nodes[[:services, 'openvpn'], [:services, 'tor']] # services equal to openvpn OR tor
+ nodes[:services => 'openvpn'][:tags => 'production'] # openvpn AND production
+ nodes[:name => "!bob"] # all nodes that are NOT named "bob"
+
+Create an array of values by selecting a single field:
+
+ nodes.field('location.name')
+ ==> ['seattle', 'istanbul']
+
+Create an array of hashes by selecting multiple fields:
+
+ nodes.fields('domain.full', 'ip_address')
+ ==> [
+ {'domain_full' => 'red.bitmask.net', 'ip_address' => '1.1.1.1'},
+ {'domain_full' => 'blue.bitmask.net', 'ip_address' => '1.1.1.2'},
+ ]
+
+Create a new hash table of hashes, with only certain fields:
+
+ nodes.pick_fields('domain.full', 'ip_address')
+ ==> {
+ "red" => {'domain_full' => 'red.bitmask.net', 'ip_address' => '1.1.1.1'},
+ "blue => {'domain_full' => 'blue.bitmask.net', 'ip_address' => '1.1.1.2'},
+ }
+
+With `pick_fields`, if there is only one field, it will generate a simple hash table:
+
+ nodes.pick_fields('ip_address')
+ ==> {
+ "red" => '1.1.1.1',
+ "blue => '1.1.1.2',
+ }
diff --git a/doc/guide/en.haml b/doc/guide/en.haml
new file mode 100644
index 00000000..61c24ea8
--- /dev/null
+++ b/doc/guide/en.haml
@@ -0,0 +1,4 @@
+- @nav_title = "Guide"
+- @title = "Platform Guide"
+
+= child_summaries \ No newline at end of file
diff --git a/doc/guide/environments.md b/doc/guide/environments.md
new file mode 100644
index 00000000..752e0608
--- /dev/null
+++ b/doc/guide/environments.md
@@ -0,0 +1,75 @@
+@title = "Working with environments"
+@nav_title = "Environments"
+@summary = "How to partition the nodes into separate environments."
+
+With environments, you can divide your nodes into different and entirely separate sets. For example, you might have sets of nodes for 'testing', 'staging' and 'production'.
+
+Typically, the nodes in one environment are totally isolated from the nodes in a different environment. Each environment will have its own separate database, for example.
+
+There are a few exceptions to this rule: backup nodes, for example, will by default attempt to back up data from all the environments (excluding local).
+
+## Assign an environment
+
+To assign an environment to a node, you just set the `environment` node property. This is typically done with tags, although it is not necessary. For example:
+
+`tags/production.json`
+
+ {
+ "environment": "production"
+ }
+
+`nodes/mynode.json`
+
+ {
+ "tags": ["production"]
+ }
+
+There are several built-in tags that will apply a value for the environment:
+
+* `production`: An environment for nodes that are in use by end users.
+* `development`: An environment to be used for nodes that are being used for experiments or staging.
+* `local`: This environment gets automatically applied to all nodes that run only on local VMs. Nodes with a `local` environment are treated special and excluded from certain calculations.
+
+You don't need to use these and you can add your own.
+
+## Environment commands
+
+* `leap env` -- List the available environments and disply which one is active.
+* `leap env pin ENV` -- Pin the current environment to ENV.
+* `leap env unpin` -- Remove the environment pin.
+
+The environment pin is only active for your local machine: it is not recorded in the provider directory and not shared with other users.
+
+## Environment specific JSON files
+
+You can add JSON configuration files that are only applied when a specific environment is active. For example, if you create a file `provider.production.json`, these values will only get applied to the `provider.json` file for the `production` environment.
+
+This will also work for services and tags. For example:
+
+ provider.local.json
+ services/webapp.development.json
+ tags/seattle.production.json
+
+In this example, `local`, `development`, and `production` are the names of environments.
+
+## Bind an environment to a Platform version
+
+If you want to ensure that a particular environment is bound to a particular version of the LEAP Platform, you can add a `platform` section to the `provider.ENV.json` file (where ENV is the name of the environment in question).
+
+The available options are `platform.version`, `platform.branch`, or `platform.commit`. For example:
+
+ {
+ "platform": {
+ "version": "1.6.1",
+ "branch": "develop",
+ "commit": "5df867fbd3a78ca4160eb54d708d55a7d047bdb2"
+ }
+ }
+
+You can use any combination of `version`, `branch`, and `commit` to specify the binding. The values for `branch` and `commit` only work if the `leap_platform` directory is a git repository.
+
+The value for `commit` is passed directly through to `git log` to query for a list of acceptable commits. See [[man gitrevisions => https://www.kernel.org/pub/software/scm/git/docs/gitrevisions.html#_specifying_ranges]] to see how to specify ranges. For example:
+
+* `HEAD^..HEAD` - current commit must be head of the branch.
+* `3172444652af71bd771609d6b80258e70cc82ce9..HEAD` - current commit must be after 3172444652af71bd771609d6b80258e70cc82ce9.
+* `refs/tags/0.6.0rc1..refs/tags/0.6.0rc2` - current commit must be after tag 0.6.0rc1 and before or including tag 0.6.0rc2. \ No newline at end of file
diff --git a/doc/guide/keys-and-certificates.md b/doc/guide/keys-and-certificates.md
new file mode 100644
index 00000000..aef02ac6
--- /dev/null
+++ b/doc/guide/keys-and-certificates.md
@@ -0,0 +1,194 @@
+@title = "Keys and Certificates"
+@summary = "Working with SSH keys, secrets, and X.509 certificates."
+
+Working with SSH
+================================
+
+Whenever the `leap` command nees to push changes to a node or gather information from a node, it tunnels this command over SSH. Another way to put this: the security of your servers rests entirely on SSH. Because of this, it is important that you understand how `leap` uses SSH.
+
+SSH related files
+-------------------------------
+
+Assuming your provider directory is called 'provider':
+
+* `provider/nodes/crow/crow_ssh.pub` -- The public SSH host key for node 'crow'.
+* `provider/users/alice/alice_ssh.pub` -- The public SSH user key for user 'alice'. Anyone with the private key that corresponds to this public key will have root access to all nodes.
+* `provider/files/ssh/known_hosts` -- An autogenerated known_hosts, built from combining `provider/nodes/*/*_ssh.pub`. You must not edit this file directly. If you need to change it, remove or change one of the files that is used to generate `known_hosts` and then run `leap compile`.
+* `provider/files/ssh/authorized_keys` -- An autogenerated list of all the user SSH keys with root access to the notes. It is created from `provider/users/*/*_ssh.pub`. You must not edit this file directly. If you need to change it, remove or change one of the files that is used to generate `authorized_keys` and then run `leap compile`.
+
+All of these files should be committed to source control.
+
+If you rename, remove, or add a node with `leap node [mv|add|rm]` the SSH key files and the `known_hosts` file will get properly updated.
+
+SSH and local nodes
+-----------------------------
+
+Local nodes are run as Vagrant virtual machines. The `leap` command handles SSH slightly differently for these nodes.
+
+Basically, all the SSH security is turned off for local nodes. Since local nodes only exist for a short time on your computer and can't be reached from the internet, this is not a problem.
+
+Specifically, for local nodes:
+
+1. `known_hosts` is never updated with local node keys, since the SSH public key of a local node is different for each user.
+2. `leap` entirely skips the checking of host keys when connecting with a local node.
+3. `leap` adds the public Vagrant SSH key to the list of SSH keys for a user. The public Vagrant SSH key is a shared and insecure key that has root access to most Vagrant virtual machines.
+
+When SSH host key changes
+-------------------------------
+
+If the host key for a node has changed, you will get an error "WARNING: REMOTE HOST IDENTIFICATION HAS CHANGED".
+
+To fix this, you need to remove the file `files/nodes/stompy/stompy_ssh.pub` and run `leap node init stompy`, where the node's name is 'stompy'. **Only do this if you are ABSOLUTELY CERTAIN that the node's SSH host key has changed**.
+
+Changing the SSH port
+--------------------------------
+
+Suppose you have a node `blinky` that has SSH listening on port 22 and you want to make it port 2200.
+
+First, modify the configuration for `blinky` to specify the variable `ssh.port` as 2200. Usually, this is done in `common.json` or in a tag file.
+
+For example, you could put this in `tags/production.json`:
+
+ {
+ "ssh": {
+ "port": 2200
+ }
+ }
+
+Run `leap compile` and open `hiera/blinky.yaml` to confirm that `ssh.port` is set to 2200. The port number must be specified as a number, not a string (no quotes).
+
+Then, you need to deploy this change so that SSH will bind to 2200. You cannot simply run `leap deploy blinky` because this command will default to using the variable `ssh.port` which is now `2200` but SSH on the node is still bound to 22.
+
+So, you manually override the port in the deploy command, using the old port:
+
+ leap deploy --port 22 blinky
+
+Afterwards, SSH on `blinky` should be listening on port 2200 and you can just run `leap deploy blinky` from then on.
+
+Sysadmins with multiple SSH keys
+-----------------------------------
+
+The command `leap add-user --self` allows only one SSH key. If you want to specify more than one key for a user, you can do it manually:
+
+ users/userx/userx_ssh.pub
+ users/userx/otherkey_ssh.pub
+
+All keys matching 'userx/*_ssh.pub' will be usable.
+
+Removing sysadmin access
+--------------------------------
+
+Suppose you want to remove `userx` from having any further ssh access to the servers. Do this:
+
+ rm -r users/userx
+ leap deploy
+
+X.509 Certificates
+================================
+
+Configuration options
+-------------------------------------------
+
+The `ca` option in provider.json provides settings used when generating CAs and certificates. The defaults are as follows:
+
+ {
+ "ca": {
+ "name": "= global.provider.ca.organization + ' Root CA'",
+ "organization": "= global.provider.name[global.provider.default_language]",
+ "organizational_unit": "= 'https://' + global.provider.domain",
+ "bit_size": 4096,
+ "digest": "SHA256",
+ "life_span": "10y",
+ "server_certificates": {
+ "bit_size": 2048,
+ "digest": "SHA256",
+ "life_span": "1y"
+ },
+ "client_certificates": {
+ "bit_size": 2048,
+ "digest": "SHA256",
+ "life_span": "2m",
+ "limited_prefix": "LIMITED",
+ "unlimited_prefix": "UNLIMITED"
+ }
+ }
+ }
+
+You should not need to override these defaults in your own provider.json, but you can if you want to. To see what values are used for your provider, run `leap inspect provider.json`.
+
+NOTE: A certificate `bit_size` greater than 2048 will probably not be recognized by most commercial CAs.
+
+Certificate Authorities
+-----------------------------------------
+
+There are three x.509 certificate authorities (CA) associated with your provider:
+
+1. **Commercial CA:** It is strongly recommended that you purchase a commercial cert for your primary domain. The goal of platform is to not depend on the commercial CA system, but it does increase security and usability if you purchase a certificate. The cert for the commercial CA must live at `files/cert/commercial_ca.crt`.
+2. **Server CA:** This is a self-signed CA responsible for signing all the **server** certificates. The private key lives at `files/ca/ca.key` and the public cert lives at `files/ca/ca.crt`. The key is very sensitive information and must be kept private. The public cert is distributed publicly.
+3. **Client CA:** This is a self-signed CA responsible for signing all the **client** certificates. The private key lives at `files/ca/client_ca.key` and the public cert lives at `files/ca/client_ca.crt`. Neither file is distribute publicly. It is not a big deal if the private key for the client CA is compromised, you can just generate a new one and re-deploy.
+
+To generate both the Server CA and the Client CA, run the command:
+
+ leap cert ca
+
+Server certificates
+-----------------------------------
+
+Most every server in your service provider will have a x.509 certificate, generated by the `leap` command using the Server CA. Whenever you modify any settings of a node that might affect it's certificate (like changing the IP address, hostname, or settings in provider.json), you can magically regenerate all the certs that need to be regenerated with this command:
+
+ leap cert update
+
+Run `leap help cert update` for notes on usage options.
+
+Because the server certificates are generated locally on your personal machine, the private key for the Server CA need never be put on any server. It is up to you to keep this file secure.
+
+Client certificates
+--------------------------------
+
+Every leap client gets its own time-limited client certificate. This cert is use to connect to the OpenVPN gateway (and probably other things in the future). It is generated on the fly by the webapp using the Client CA.
+
+To make this work, the private key of the Client CA is made available to the webapp. This might seem bad, but compromise of the Client CA simply allows the attacker to use the OpenVPN gateways without paying. In the future, we plan to add a command to automatically regenerate the Client CA periodically.
+
+There are two types of client certificates: limited and unlimited. A client using a limited cert will have its bandwidth limited to the rate specified by `provider.service.bandwidth_limit` (in Bytes per second). An unlimited cert is given to the user if they authenticate and the user's service level matches one configured in `provider.service.levels` without bandwidth limits. Otherwise, the user is given a limited client cert.
+
+Commercial certificates
+-----------------------------------
+
+We strongly recommend that you use a commercial signed server certificate for your primary domain (in other words, a certificate with a common name matching whatever you have configured for `provider.domain`). This provides several benefits:
+
+1. When users visit your website, they don't get a scary notice that something is wrong.
+2. When a user runs the LEAP client, selecting your service provider will not cause a warning message.
+3. When other providers first discover your provider, they are more likely to trust your provider key if it is fetched over a commercially verified link.
+
+The LEAP platform is designed so that it assumes you are using a commercial cert for the primary domain of your provider, but all other servers are assumed to use non-commercial certs signed by the Server CA you create.
+
+To generate a CSR, run:
+
+ leap cert csr
+
+This command will generate the CSR and private key matching `provider.domain` (you can change the domain with `--domain=DOMAIN` switch). It also generates a server certificate signed with the Server CA. You should delete this certificate and replace it with a real one once it is created by your commercial CA.
+
+The related commercial cert files are:
+
+ files/
+ cert/
+ domain.org.crt # Server certificate for domain.org, obtained by commercial CA.
+ domain.org.csr # Certificate signing request
+ domain.org.key # Private key for you certificate
+ commercial_ca.crt # The CA cert obtained from the commercial CA.
+
+The private key file is extremely sensitive and care should be taken with its provenance.
+
+If your commercial CA has a chained CA cert, you should be OK if you just put the **last** cert in the chain into the `commercial_ca.crt` file. This only works if the other CAs in the chain have certs in the debian package `ca-certificates`, which is the case for almost all CAs.
+
+If you want to add additional fields to the CSR, like country, city, or locality, you can configure these values in provider.json like so:
+
+ "ca": {
+ "server_certificates": {
+ "country": "US",
+ "state": "Washington",
+ "locality": "Seattle"
+ }
+ }
+
+If they are not present, the CSR will be created without them.
diff --git a/doc/guide/miscellaneous.md b/doc/guide/miscellaneous.md
new file mode 100644
index 00000000..c38c007c
--- /dev/null
+++ b/doc/guide/miscellaneous.md
@@ -0,0 +1,14 @@
+@title = "Miscellaneous"
+@summary = "Miscellaneous commands you may need to know."
+
+Facts
+==============================
+
+There are a few cases when we must gather internal data from a node before we can successfully deploy to other nodes. This is what `facts.json` is for. It stores a snapshot of certain facts about each node, as needed. Entries in `facts.json` are updated automatically when you initialize, rename, or remove a node. To manually force a full update of `facts.json`, run:
+
+ leap facts update FILTER
+
+Run `leap help facts update` for more information.
+
+The file `facts.json` should be committed to source control. You might not have a `facts.json` if one is not required for your provider.
+
diff --git a/doc/guide/nodes.md b/doc/guide/nodes.md
new file mode 100644
index 00000000..cf225449
--- /dev/null
+++ b/doc/guide/nodes.md
@@ -0,0 +1,187 @@
+@title = "Nodes"
+@summary = "Working with nodes, services, tags, and locations."
+
+Node types
+================================
+
+Every node has one or more services that determines the node's function within your provider's infrastructure.
+
+When adding a new node to your provider, you should ask yourself four questions:
+
+* **many or few?** Some services benefit from having many nodes, while some services are best run on only one or two nodes.
+* **required or optional?** Some services are required, while others can be left out.
+* **who does the node communicate with?** Some services communicate very heavily with other particular services. Nodes running these services should be close together.
+* **public or private?** Some services communicate with the public internet, while others only need to communicate with other nodes in the infrastructure.
+
+Brief overview of the services:
+
+* **webapp**: The web application. Runs both webapp control panel for users and admins as well as the REST API that the client uses. Needs to communicate heavily with `couchdb` nodes. You need at least one, good to have two for redundancy. The webapp does not get a lot of traffic, so you will not need many.
+* **couchdb**: The database for users and user data. You can get away with just one, but for proper redundancy you should have at least three. Communicates heavily with `webapp`, `mx`, and `soledad` nodes.
+* **soledad**: Handles the data syncing with clients. Typically combined with `couchdb` service, since it communicates heavily with couchdb.
+* **mx**: Incoming and outgoing MX servers. Communicates with the public internet, clients, and `couchdb` nodes.
+* **openvpn**: OpenVPN gateway for clients. You need at least one, but want as many as needed to support the bandwidth your users are doing. The `openvpn` nodes are autonomous and don't need to communicate with any other nodes. Often combined with `tor` service.
+* **monitor**: Internal service to monitor all the other nodes. Currently, you can have zero or one `monitor` service defined. It is required that the monitor be on the webapp node. It was not designed to be run as a separate node service.
+* **tor**: Sets up a tor exit node, unconnected to any other service.
+* **dns**: Not yet implemented.
+
+Webapp
+-----------------------------------
+
+The webapp node is responsible for both the user face web application and the API that the client interacts with.
+
+Some users can be "admins" with special powers to answer tickets and close accounts. To make an account into an administrator, you need to configure the `webapp.admins` property with an array of user names.
+
+For example, to make users `alice` and `bob` into admins, create a file `services/webapp.json` with the following content:
+
+ {
+ "webapp": {
+ "admins": ["bob", "alice"]
+ }
+ }
+
+And then redeploy to all webapp nodes:
+
+ leap deploy webapp
+
+By putting this in `services/webapp.json`, you will ensure that all webapp nodes inherit the value for `webapp.admins`.
+
+Services
+================================
+
+What nodes do you need for a provider that offers particular services?
+
+<table class="table table-striped">
+<tr>
+ <th>Node Type</th>
+ <th>VPN Service</th>
+ <th>Email Service</th>
+ <th>Notes</th>
+</tr>
+<tr>
+ <td>webapp</td>
+ <td>required</td>
+ <td>required</td>
+ <td></td>
+</tr>
+<tr>
+ <td>couchdb</td>
+ <td>required</td>
+ <td>required</td>
+<td></td>
+</tr>
+<tr>
+ <td>soledad</td>
+ <td>not used</td>
+ <td>required</td>
+<td></td>
+</tr>
+<tr>
+ <td>mx</td>
+ <td>not used</td>
+ <td>required</td>
+ <td></td>
+</tr>
+<tr>
+ <td>openvpn</td>
+ <td>required</td>
+ <td>not used</td>
+ <td></td>
+</tr>
+<tr>
+ <td>monitor</td>
+ <td>optional</td>
+ <td>optional</td>
+ <td>This service must be on the webapp node</td>
+</tr>
+<tr>
+ <td>tor</td>
+ <td>optional</td>
+ <td>optional</td>
+ <td></td>
+</tr>
+</table>
+
+Locations
+================================
+
+All nodes should have a `location.name` specified, and optionally additional information about the location, like the time zone. This location information is used for two things:
+
+* Determine which nodes can, or must, communicate with one another via a local network. The way some virtualization environments work, like OpenStack, requires that nodes communicate via the local network if they are on the same network.
+* Allows the client to prefer connections to nodes that are closer in physical proximity to the user. This is particularly important for OpenVPN nodes.
+
+The location stanza in a node's config file looks like this:
+
+ {
+ "location": {
+ "id": "ankara",
+ "name": "Ankara",
+ "country_code": "TR",
+ "timezone": "+2",
+ "hemisphere": "N"
+ }
+ }
+
+The fields:
+
+* `id`: An internal handle to use for this location. If two nodes have match `location.id`, then they are treated as being on a local network with one another. This value defaults to downcase and underscore of `location.name`.
+* `name`: Can be anything, might be displayed to the user in the client if they choose to manually select a gateway.
+* `country_code`: The [ISO 3166-1](https://en.wikipedia.org/wiki/ISO_3166-1) two letter country code.
+* `timezone`: The timezone expressed as an offset from UTC (in standard time, not daylight savings). You can look up the timezone using this [handy map](http://www.timeanddate.com/time/map/).
+* `hemisphere`: This should be "S" for all servers in South America, Africa, or Australia. Otherwise, this should be "N".
+
+These location options are very imprecise, but good enough for most usage. The client often does not know its own location precisely either. Instead, the client makes an educated guess at location based on the OS's timezone and locale.
+
+If you have multiple nodes in a single location, it is best to use a tag for the location. For example:
+
+`tags/ankara.json`:
+
+ {
+ "location": {
+ "name": "Ankara",
+ "country_code": "TR",
+ "timezone": "+2",
+ "hemisphere": "N"
+ }
+ }
+
+`nodes/vpngateway.json`:
+
+ {
+ "services": "openvpn",
+ "tags": ["production", "ankara"],
+ "ip_address": "1.1.1.1",
+ "openvpn": {
+ "gateway_address": "1.1.1.2"
+ }
+ }
+
+Unless you are using OpenStack or AWS, setting `location` for nodes is not required. It is, however, highly recommended.
+
+Disabling Nodes
+=====================================
+
+There are two ways to temporarily disable a node:
+
+**Option 1: disabled environment**
+
+You can assign an environment to the node that marks it as disabled. Then, if you use environment pinning, the node will be ignored when you deploy. For example:
+
+ {
+ "environment": "disabled"
+ }
+
+Then use `leap env pin ENV` to pin the environment to something other than 'disabled'. This only works if all the other nodes are also assigned to some environment.
+
+**Option 2: enabled == false**
+
+If a node has a property `enabled` set to false, then the `leap` command will skip over the node and pretend that it does not exist. For example:
+
+ {
+ "ip_address": "1.1.1.1",
+ "services": ["openvpn"],
+ "enabled": false
+ }
+
+**Options 3: no-deploy**
+
+If the file `/etc/leap/no-deploy` exists on a node, then when you run the commmand `leap deploy` it will halt and prevent a deploy from going through (if the node was going to be included in the deploy).
diff --git a/doc/service-diagram.odg b/doc/service-diagram.odg
new file mode 100644
index 00000000..09265c2d
--- /dev/null
+++ b/doc/service-diagram.odg
Binary files differ
diff --git a/doc/service-diagram.png b/doc/service-diagram.png
new file mode 100644
index 00000000..85e62436
--- /dev/null
+++ b/doc/service-diagram.png
Binary files differ
diff --git a/doc/troubleshooting/en.haml b/doc/troubleshooting/en.haml
new file mode 100644
index 00000000..f0f1359c
--- /dev/null
+++ b/doc/troubleshooting/en.haml
@@ -0,0 +1,3 @@
+- @title = "Troubleshooting"
+
+= child_summaries \ No newline at end of file
diff --git a/doc/troubleshooting/known-issues.md b/doc/troubleshooting/known-issues.md
new file mode 100644
index 00000000..4defc886
--- /dev/null
+++ b/doc/troubleshooting/known-issues.md
@@ -0,0 +1,115 @@
+@title = 'Leap Platform Release Notes'
+@nav_title = 'Known issues'
+@summary = 'Known issues in the Leap Platform.'
+@toc = true
+
+Here you can find documentation about known issues and potential work-arounds in the current Leap Platform release.
+
+0.6.0
+==============
+
+Upgrading
+------------------
+
+Upgrade your leap_platform to 0.6 and make sure you have the latest leap_cli.
+
+**Update leap_platform:**
+
+ cd leap_platform
+ git pull
+ git checkout -b 0.6.0 0.6.0
+
+**Update leap_cli:**
+
+If it is installed as a gem from rubygems:
+
+ sudo gem update leap_cli
+
+If it is installed as a gem from source:
+
+ cd leap_cli
+ git pull
+ git checkout master
+ rake build
+ sudo rake install
+
+If it is run directly from source:
+
+ cd leap_cli
+ git pull
+ git checkout master
+
+To upgrade:
+
+ leap --version # must be at least 1.6.2
+ leap cert update
+ leap deploy
+ leap test
+
+If the tests fail, try deploying again. If a test fails because there are two tapicero daemons running, you need to ssh into the server, kill all the tapicero daemons manually, and then try deploying again (sometimes the daemon from platform 0.5 would put its PID file in an odd place).
+
+OpenVPN
+------------------
+
+On deployment to a openvpn node, if the following happens:
+
+ - err: /Stage[main]/Site_openvpn/Service[openvpn]/ensure: change from stopped to running failed: Could not start Service[openvpn]: Execution of '/etc/init.d/openvpn start' returned 1: at /srv/leap/puppet/modules/site_openvpn/manifests/init.pp:189
+
+this is likely the result of a kernel upgrade that happened during the deployment, requiring that the machine be restarted before this service can start. To confirm this, login to the node (leap ssh <nodename>) and look at the end of the /var/log/daemon.log:
+
+ # tail /var/log/daemon.log
+ Nov 22 19:04:15 snail ovpn-udp_config[16173]: ERROR: Cannot open TUN/TAP dev /dev/net/tun: No such device (errno=19)
+ Nov 22 19:04:15 snail ovpn-udp_config[16173]: Exiting due to fatal error
+
+if you see this error, simply restart the node.
+
+CouchDB
+---------------------
+
+At the moment, we strongly advise only have one bigcouch server for stability purposes.
+
+With multiple couch nodes (not recommended at this time), in some scenarios, such as when certain components are unavailable, the couchdb syncing will be broken. When things are brought back to normal, shortly after restart, the nodes will attempt to resync all their data, and can fail to complete this process because they run out of file descriptors. A symptom of this is the webapp wont allow you to register or login, the /opt/bigcouch/var/log/bigcouch.log is huge with a lot of errors that include (over multiple lines): {error, emfile}}. We have raised the limits for available file descriptors to bigcouch to try and accommodate for this situation, but if you still experience it, you may need to increase your /etc/sv/bigcouch/run ulimit values and restart bigcouch while monitoring the open file descriptors. We hope that in the next platform release, a newer couchdb will be better at handling these resources.
+
+You can also see the number of file descriptors in use by doing:
+
+ # watch -n1 -d lsof -p `pidof beam`|wc -l
+
+The command `leap db destroy` will not automatically recreate new databases. You must run `leap deploy` afterwards for this.
+
+User setup and ssh
+------------------
+
+At the moment, it is only possible to add an admin who will have access to all LEAP servers (see: https://leap.se/code/issues/2280)
+
+The command `leap add-user --self` allows only one SSH key. If you want to specify more than one key for a user, you can do it manually:
+
+ users/userx/userx_ssh.pub
+ users/userx/otherkey_ssh.pub
+
+All keys matching 'userx/*_ssh.pub' will be used for that user.
+
+Deploying
+---------
+
+If you have any errors during a run, please try to deploy again as this often solves non-deterministic issues that were not uncovered in our testing. Please re-deploy with `leap -v2 deploy` to get more verbose logs and capture the complete output to provide to us for debugging.
+
+If when deploying your debian mirror fails for some reason, network anomoly or the mirror itself is out of date, then platform deployment will not succeed properly. Check the mirror is up and try to deploy again when it is resolved (see: https://leap.se/code/issues/1091)
+
+Deployment gives 'error: in `%`: too few arguments (ArgumentError)' - this is because you attempted to do a deploy before initializing a node, please initialize the node first and then do a deploy afterwards (see: https://leap.se/code/issues/2550)
+
+This release has no ability to custom configure apt sources or proxies (see: https://leap.se/code/issues/1971)
+
+When running a deploy at a verbosity level of 2 and above, you will notice puppet deprecation warnings, these are known and we are working on fixing them
+
+IPv6
+----
+
+As of this release, IPv6 is not supported by the VPN configuration. If IPv6 is detected on your network as a client, it is blocked and instead it should revert to IPv4. We plan on adding IPv6 support in an upcoming release.
+
+
+Special Environments
+--------------------
+
+When deploying to OpenStack release "nova" or newer, you will need to do an initial deploy, then when it has finished run `leap facts update` and then deploy again (see: https://leap.se/code/issues/3020)
+
+It is not possible to actually use the EIP openvpn server on vagrant nodes (see: https://leap.se/code/issues/2401)
diff --git a/doc/troubleshooting/tests.md b/doc/troubleshooting/tests.md
new file mode 100644
index 00000000..b85c19d2
--- /dev/null
+++ b/doc/troubleshooting/tests.md
@@ -0,0 +1,70 @@
+@title = 'Tests and Monitoring'
+@summary = 'Testing and monitoring your infrastructure.'
+@toc = true
+
+## Troubleshooting Tests
+
+At any time, you can run troubleshooting tests on the nodes of your provider infrastructure to check to see if things seem to be working correctly. If there is a problem, these tests should help you narrow down precisely where the problem is.
+
+To run tests on FILTER node list:
+
+ leap test run FILTER
+
+For example, you can also test a single node (`leap test elephant`); test a specific environment (`leap test development`), or any tag (`leap test soledad`).
+
+Alternately, you can run test on all nodes (probably only useful if you have pinned the environment):
+
+ leap test
+
+The tests that are performed are located in the platform under the tests directory.
+
+## Testing with the bitmask client
+
+Download the provider ca:
+
+ wget --no-check-certificate https://example.org/ca.crt -O /tmp/ca.crt
+
+Start bitmask:
+
+ bitmask --ca-cert-file /tmp/ca.crt
+
+## Testing Recieving Mail
+
+Use i.e. swaks to send a testmail
+
+ swaks -f noone@example.org -t testuser@example.org -s example.org
+
+and use your favorite mail client to examine your inbox.
+
+You can also use [offlineimap](http://offlineimap.org/) to fetch mails:
+
+ offlineimap -c vagrant/.offlineimaprc.example.org
+
+WARNING: Use offlineimap *only* for testing/debugging,
+because it will save the mails *decrypted* locally to
+your disk !
+
+## Monitoring
+
+In order to set up a monitoring node, you simply add a `monitor` service tag to the node configuration file. It could be combined with any other service, but we propose that you add it to the webapp node, as this already is public accessible via HTTPS.
+
+After deploying, this node will regularly poll every node to ask for the status of various health checks. These health checks include the checks run with `leap test`, plus many others.
+
+We use [Nagios](http://www.nagios.org/) together with [Check MK agent](https://en.wikipedia.org/wiki/Check_MK) for running checks on remote hosts.
+
+One nagios installation will monitor all nodes in all your environments. You can log into the monitoring web interface via [https://DOMAIN/nagios3/](https://DOMAIN/nagios3/). The username is `nagiosadmin` and the password is found in the secrets.json file in your provider directory.
+Nagios will send out mails to the `contacts` address provided in `provider.json`.
+
+
+## Nagios Frontents
+
+There are other ways to check and get notified by Nagios besides regularly checking the Nagios webinterface or reading email notifications. Check out the [Frontends (GUIs and CLIs)](http://exchange.nagios.org/directory/Addons/Frontends-%28GUIs-and-CLIs%29) on the Nagios project website.
+A recommended status tray application is [Nagstamon](https://nagstamon.ifw-dresden.de/), which is available for Linux, MacOS X and Windows. It can not only notify you of hosts/services failures, you can also acknoledge or recheck these with it.
+
+### Log Monitoring
+
+At the moment, we use [check-mk-agent-logwatch](https://mathias-kettner.de/checkmk_check_logwatch.html) for searching logs for irregularities.
+Logs are parsed for patterns using a blacklist, and are stored in `/var/lib/check_mk/logwatch/<Nodename>`.
+
+In order to "acknowledge" a log warning, you need to log in to the monitoring server, and delete the corresponding file in `/var/lib/check_mk/logwatch/<Nodename>`. This should be done via the nagios webinterface in the future.
+
diff --git a/doc/troubleshooting/vagrant.md b/doc/troubleshooting/vagrant.md
new file mode 100644
index 00000000..ad284161
--- /dev/null
+++ b/doc/troubleshooting/vagrant.md
@@ -0,0 +1,45 @@
+@title = 'LEAP Platform Vagrant testing'
+@nav_title = 'Vagrant Integration'
+@summary = 'Testing your provider with Vagrant'
+
+Setting up Vagrant for a testing the platform
+=============================================
+
+There are two ways you can setup leap platform using vagrant.
+
+Using the Vagrantfile provided by Leap Platform
+-----------------------------------------------
+
+This is by far the easiest way. It will install a single node mail server in the default
+configuration with one single command.
+
+Clone the platform with
+
+ git clone https://github.com/leapcode/leap_platform.git
+
+Start the vagrant box with
+
+ cd leap_platform
+ vagrant up
+
+Follow the instructions how to configure your `/etc/hosts`
+in order to use the provider!
+
+You can login via ssh with the systemuser `vagrant` and the same password.
+
+There are 2 users preconfigured:
+
+. `testuser` with pw `hallo123`
+. `testadmin` with pw `hallo123`
+
+
+Use the leap_cli vagrant integration
+------------------------------------
+
+Install leap_cli and leap_platform on your host, configure a provider from scratch and use the `leap local` commands to manage your vagrant node(s).
+
+See https://leap.se/en/docs/platform/development how to use the leap_cli vagrant
+integration and https://leap.se/en/docs/platform/tutorials/single-node-email how
+to setup a single node mail server.
+
+
diff --git a/doc/troubleshooting/where-to-look.md b/doc/troubleshooting/where-to-look.md
new file mode 100644
index 00000000..fbd95931
--- /dev/null
+++ b/doc/troubleshooting/where-to-look.md
@@ -0,0 +1,249 @@
+@title = 'Where to look for errors'
+@nav_title = 'Where to look'
+@toc = true
+
+
+General
+=======
+
+* Please increase verbosity when debugging / filing issues in our issue tracker. You can do this with adding i.e. `-v 5` after the `leap` cmd, i.e. `leap -v 2 deploy`.
+
+Webapp
+======
+
+Places to look for errors
+-------------------------
+
+* `/var/log/apache2/error.log`
+* `/srv/leap/webapp/log/production.log`
+* `/var/log/syslog` (watch out for stunnel issues)
+* `/var/log/leap/*`
+
+Is haproxy ok ?
+---------------
+
+
+ curl -s -X GET "http://127.0.0.1:4096"
+
+Is couchdb accessible through stunnel ?
+---------------------------------------
+
+* Depending on how many couch nodes you have, increase the port for every test
+ (see /etc/haproxy/haproxy.cfg for the server/port mapping):
+
+
+ curl -s -X GET "http://127.0.0.1:4000"
+ curl -s -X GET "http://127.0.0.1:4001"
+ ...
+
+
+Check couchdb acl as admin
+--------------------------
+
+ mkdir /etc/couchdb
+ cat /srv/leap/webapp/config/couchdb.yml.admin # see username and password
+ echo "machine 127.0.0.1 login admin password <PASSWORD>" > /etc/couchdb/couchdb-admin.netrc
+ chmod 600 /etc/couchdb/couchdb-admin.netrc
+
+ curl -s --netrc-file /etc/couchdb/couchdb-admin.netrc -X GET "http://127.0.0.1:4096"
+ curl -s --netrc-file /etc/couchdb/couchdb-admin.netrc -X GET "http://127.0.0.1:4096/_all_dbs"
+
+Check couchdb acl as unpriviledged user
+---------------------------------------
+
+ cat /srv/leap/webapp/config/couchdb.yml # see username and password
+ echo "machine 127.0.0.1 login webapp password <PASSWORD>" > /etc/couchdb/couchdb-webapp.netrc
+ chmod 600 /etc/couchdb/couchdb-webapp.netrc
+
+ curl -s --netrc-file /etc/couchdb/couchdb-webapp.netrc -X GET "http://127.0.0.1:4096"
+ curl -s --netrc-file /etc/couchdb/couchdb-webapp.netrc -X GET "http://127.0.0.1:4096/_all_dbs"
+
+
+Check client config files
+-------------------------
+
+ https://example.net/provider.json
+ https://example.net/1/config/smtp-service.json
+ https://example.net/1/config/soledad-service.json
+ https://example.net/1/config/eip-service.json
+
+
+Soledad
+=======
+
+ /var/log/soledad.log
+
+
+Couchdb
+=======
+
+Places to look for errors
+-------------------------
+
+* `/opt/bigcouch/var/log/bigcouch.log`
+* `/var/log/syslog` (watch out for stunnel issues)
+
+
+
+Bigcouch membership
+-------------------
+
+* All nodes configured for the provider should appear here:
+
+<pre>
+ curl -s --netrc-file /etc/couchdb/couchdb.netrc -X GET 'http://127.0.0.1:5986/nodes/_all_docs'
+</pre>
+
+* All configured nodes should show up under "cluster_nodes", and the ones online and communicating with each other should appear under "all_nodes". This example output shows the configured cluster nodes `couch1.bitmask.net` and `couch2.bitmask.net`, but `couch2.bitmask.net` is currently not accessible from `couch1.bitmask.net`
+
+
+<pre>
+ curl -s --netrc-file /etc/couchdb/couchdb.netrc 'http://127.0.0.1:5984/_membership'
+ {"all_nodes":["bigcouch@couch1.bitmask.net"],"cluster_nodes":["bigcouch@couch1.bitmask.net","bigcouch@couch2.bitmask.net"]}
+</pre>
+
+* Sometimes a `/etc/init.d/bigcouch restart` on all nodes is needed, to register new nodes
+
+Databases
+---------
+
+* Following output shows all neccessary DBs that should be present. Note that the `user-0123456....` DBs are the data stores for a particular user.
+
+<pre>
+ curl -s --netrc-file /etc/couchdb/couchdb.netrc -X GET 'http://127.0.0.1:5984/_all_dbs'
+ ["customers","identities","sessions","shared","tickets","tokens","user-0","user-9d34680b01074c75c2ec58c7321f540c","user-9d34680b01074c75c2ec58c7325fb7ff","users"]
+</pre>
+
+
+
+
+Design Documents
+----------------
+
+* Is User `_design doc` available ?
+
+
+<pre>
+ curl -s --netrc-file /etc/couchdb/couchdb.netrc -X GET "http://127.0.0.1:5984/users/_design/User"
+</pre>
+
+Is couchdb cluster backend accessible through stunnel ?
+-------------------------------------------------------
+
+* Find out how many connections are set up for the couchdb cluster backend:
+
+<pre>
+ grep "accept = 127.0.0.1" /etc/stunnel/*
+</pre>
+
+
+* Now connect to all of those local endpoints to see if they up. All these tests should return "localhost [127.0.0.1] 4000 (?) open"
+
+<pre>
+ nc -v 127.0.0.1 4000
+ nc -v 127.0.0.1 4001
+ ...
+</pre>
+
+
+MX
+==
+
+Places to look for errors
+-------------------------
+
+* `/var/log/mail.log`
+* `/var/log/leap_mx.log`
+* `/var/log/syslog` (watch out for stunnel issues)
+
+Is couchdb accessible through stunnel ?
+---------------------------------------
+
+* Depending on how many couch nodes you have, increase the port for every test
+ (see /etc/haproxy/haproxy.cfg for the server/port mapping):
+
+
+ curl -s -X GET "http://127.0.0.1:4000"
+ curl -s -X GET "http://127.0.0.1:4001"
+ ...
+
+Query leap-mx
+-------------
+
+* for useraccount
+
+
+<pre>
+ postmap -v -q "joe@dev.bitmask.net" tcp:localhost:2244
+ ...
+ postmap: dict_tcp_lookup: send: get jow@dev.bitmask.net
+ postmap: dict_tcp_lookup: recv: 200
+ ...
+</pre>
+
+* for mailalias
+
+
+<pre>
+ postmap -v -q "joe@dev.bitmask.net" tcp:localhost:4242
+ ...
+ postmap: dict_tcp_lookup: send: get joe@dev.bitmask.net
+ postmap: dict_tcp_lookup: recv: 200 f01bc1c70de7d7d80bc1ad77d987e73a
+ postmap: dict_tcp_lookup: found: f01bc1c70de7d7d80bc1ad77d987e73a
+ f01bc1c70de7d7d80bc1ad77d987e73a
+ ...
+</pre>
+
+
+Check couchdb acl as unpriviledged user
+---------------------------------------
+
+
+
+ cat /etc/leap/mx.conf # see username and password
+ echo "machine 127.0.0.1 login leap_mx password <PASSWORD>" > /etc/couchdb/couchdb-leap_mx.netrc
+ chmod 600 /etc/couchdb/couchdb-leap_mx.netrc
+
+ curl -s --netrc-file /etc/couchdb/couchdb-leap_mx.netrc -X GET "http://127.0.0.1:4096/_all_dbs" # pick one "user-<hash>" db
+ curl -s --netrc-file /etc/couchdb/couchdb-leap_mx.netrc -X GET "http://127.0.0.1:4096/user-de9c77a3d7efbc779c6c20da88e8fb9c"
+
+
+* you may check multiple times, cause 127.0.0.1:4096 is haproxy load-balancing the different couchdb nodes
+
+
+Mailspool
+---------
+
+* Any file in the leap_mx mailspool longer for a few seconds ?
+
+
+
+<pre>
+ ls -la /var/mail/vmail/Maildir/cur/
+</pre>
+
+* Any mails in postfix mailspool longer than a few seconds ?
+
+<pre>
+ mailq
+</pre>
+
+
+
+Testing mail delivery
+---------------------
+
+ swaks -f alice@example.org -t bob@example.net -s mx1.example.net --port 25
+ swaks -f varac@cdev.bitmask.net -t varac@cdev.bitmask.net -s chipmonk.cdev.bitmask.net --port 465 --tlsc
+ swaks -f alice@example.org -t bob@example.net -s mx1.example.net --port 587 --tls
+
+
+VPN
+===
+
+Places to look for errors
+-------------------------
+
+* `/var/log/syslog` (watch out for openvpn issues)
+
+
diff --git a/doc/tutorials/configure-provider.md b/doc/tutorials/configure-provider.md
new file mode 100644
index 00000000..969d541b
--- /dev/null
+++ b/doc/tutorials/configure-provider.md
@@ -0,0 +1,31 @@
+@title = 'Configure provider tutorial'
+@nav_title = 'Configure Provider'
+@summary = 'Explore how to configure your provider after the initial setup'
+
+
+Edit provider.json configuration
+--------------------------------------
+
+There are a few required settings in provider.json. At a minimum, you must have:
+
+ {
+ "domain": "example.org",
+ "name": "Example",
+ "contacts": {
+ "default": "email1@example.org"
+ }
+ }
+
+For a full list of possible settings, you can use `leap inspect` to see how provider.json is evaluated after including the inherited defaults:
+
+ $ leap inspect provider.json
+
+
+Examine Certs
+=============
+
+To see details about the keys and certs that the prior two commands created, you can use `leap inspect` like so:
+
+ $ leap inspect files/ca/ca.crt
+
+NOTE: the files `files/ca/*.key` are extremely sensitive and must be carefully protected. The other key files are much less sensitive and can simply be regenerated if needed.
diff --git a/doc/tutorials/en.haml b/doc/tutorials/en.haml
new file mode 100644
index 00000000..1c73fc0f
--- /dev/null
+++ b/doc/tutorials/en.haml
@@ -0,0 +1,4 @@
+- @nav_title = "Tutorials"
+- @title = "Platform Tutorials"
+
+= child_summaries \ No newline at end of file
diff --git a/doc/tutorials/quick-start.md b/doc/tutorials/quick-start.md
new file mode 100644
index 00000000..a92cc9da
--- /dev/null
+++ b/doc/tutorials/quick-start.md
@@ -0,0 +1,385 @@
+@title = 'Quick Start Tutorial'
+@nav_title = 'Quick Start Tutorial'
+@summary = 'This tutorial walks you through the initial process of creating and deploying a minimal service provider running the LEAP Platform. This Quick Start guide will guide you through building a three node OpenVPN provider.'
+
+
+Our goal
+------------------
+
+We are going to create a minimal LEAP provider offering OpenVPN service. This basic setup can be expanded by adding more OpenVPN nodes to increase capacity or geographical diversity, or more webapp nodes to increase availability (at the moment, a single couchdb and single webapp server are all that is supported, and performance wise, are more than enough for most usage, since they are only lightly used). At the moment, we strongly advise only have one couchdb server for stability purposes.
+
+Our goal is something like this:
+
+ $ leap list
+ NODES SERVICES TAGS
+ cheetah couchdb production
+ wildebeest webapp production
+ ostrich openvpn production
+
+NOTE: You won't be able to run that `leap list` command yet, not until we actually create the node configurations.
+
+Requirements
+------------
+
+In order to complete this Quick Start, you will need a few things:
+
+* You will need three real or paravirtualized virtual machines (KVM, Xen, Openstack, Amazon, but not Vagrant - sorry) that have a basic Debian Stable installed. If you allocate 20G of disk space to each node for the system, after this process is completed, you will have used less than 10% of that disk space. If you allocate 2 CPUs and 8G of memory to each node, that should be more than enough to begin with.
+* You should be able to SSH into them remotely, and know their root password, IP addresses and their SSH host keys
+* You will need four different IPs. Each node gets a primary IP, and the OpenVPN gateway additionally needs a gateway IP.
+* The ability to create/modify DNS entries for your domain is preferable, but not needed. If you don't have access to DNS, you can workaround this by modifying your local resolver, i.e. editing `/etc/hosts`.
+* You need to be aware that this process will make changes to your systems, so please be sure that these machines are a basic install with nothing configured or running for other purposes
+* Your machines will need to be connected to the internet, and not behind a restrictive firewall.
+* You should work locally on your laptop/workstation (one that you trust and that is ideally full-disk encrypted) while going through this guide. This is important because the provider configurations you are creating contain sensitive data that should not reside on a remote machine. The `leap` command will login to your servers and configure the services.
+* You should do everything described below as an unprivileged user, and only run those commands as root that are noted with *sudo* in front of them. Other than those commands, there is no need for privileged access to your machine, and in fact things may not work correctly.
+
+All the commands in this tutorial are run on your sysadmin machine. In order to complete the tutorial, the sysadmin will do the following:
+
+* Install pre-requisites
+* Install the LEAP command-line utility
+* Check out the LEAP platform
+* Create a provider and its certificates
+* Setup the provider's nodes and the services that will reside on those nodes
+* Initialize the nodes
+* Deploy the LEAP platform to the nodes
+* Test that things worked correctly
+* Some additional commands
+
+We will walk you through each of these steps.
+
+
+Prepare your environment
+========================
+
+There are a few things you need to setup before you can get going. Just some packages, the LEAP cli and the platform.
+
+Install pre-requisites
+--------------------------------
+
+*Debian & Ubuntu*
+
+Install core prerequisites:
+
+ $ sudo apt-get install git ruby ruby-dev rsync openssh-client openssl rake make bzip2
+
+<!--
+*Mac OS*
+
+1. Install rubygems from https://rubygems.org/pages/download (unless the `gem` command is already installed).
+-->
+
+NOTE: leap_cli requires ruby 1.9 or later.
+
+
+Install the LEAP command-line utility
+-------------------------------------------------
+
+Install the `leap` command from rubygems.org:
+
+ $ sudo gem install leap_cli
+
+Alternately, you can install `leap` from source:
+
+ $ git clone https://leap.se/git/leap_cli
+ $ cd leap_cli
+ $ rake build
+ $ sudo rake install
+
+You can also install from source as an unprivileged user, if you want. For example, instead of `sudo rake install` you can do something like this:
+
+ $ rake install
+ # watch out for the directory leap is installed to, then i.e.
+ $ sudo ln -s ~/.gem/ruby/1.9.1/bin/leap /usr/local/bin/leap
+
+With either `rake install` or `sudo rake install`, you can use now /usr/local/bin/leap, which in most cases will be in your $PATH.
+
+If you have successfully installed the `leap` command, then you should be able to do the following:
+
+ $ leap --help
+
+This will list the command-line help options. If you receive an error when doing this, please read through the README.md in the `leap_cli` source to try and resolve any problems before going forwards.
+
+Check out the platform
+--------------------------
+
+The LEAP Platform is a series of puppet recipes and modules that will be used to configure your provider. You will need a local copy of the platform that will be used to setup your nodes and manage your services. To begin with, you will not need to modify the LEAP Platform.
+
+First we'll create a directory for LEAP things, and then we'll check out the platform code and initalize the modules:
+
+ $ mkdir ~/leap
+ $ cd ~/leap
+ $ git clone https://leap.se/git/leap_platform.git
+ $ cd leap_platform
+ $ git submodule sync; git submodule update --init
+
+
+Provider Setup
+==============
+
+A provider instance is a directory tree, usually stored in git, that contains everything you need to manage an infrastructure for a service provider. In this case, we create one for example.org and call the instance directory 'example'.
+
+ $ mkdir -p ~/leap/example
+
+Bootstrap the provider
+-----------------------
+
+Now, we will initialize this directory to make it a provider instance. Your provider instance will need to know where it can find the local copy of the git repository leap_platform, which we setup in the previous step.
+
+ $ cd ~/leap/example
+ $ leap new .
+
+NOTES:
+ . make sure you include that trailing dot!
+
+The `leap new` command will ask you for several required values:
+
+* domain: The primary domain name of your service provider. In this tutorial, we will be using "example.org".
+* name: The name of your service provider (we use "Example").
+* contact emails: A comma separated list of email addresses that should be used for important service provider contacts (for things like postmaster aliases, Tor contact emails, etc).
+* platform: The directory where you have a copy of the `leap_platform` git repository checked out.
+
+You could also have passed these configuration options on the command-line, like so:
+
+ $ leap new --contacts your@email.here --domain leap.example.org --name Example --platform=~/leap/leap_platform .
+
+You may want to poke around and see what is in the files we just created. For example:
+
+ $ cat provider.json
+
+Optionally, commit your provider directory using the version control software you fancy. For example:
+
+ $ git init
+ $ git add .
+ $ git commit -m "initial provider commit"
+
+Now add yourself as a privileged sysadmin who will have access to deploy to servers:
+
+ $ leap add-user --self
+
+NOTE: in most cases, `leap` must be run from within a provider instance directory tree (e.g. ~/leap/example).
+
+Create provider certificates
+----------------------------
+
+Create two certificate authorities, one for server certs and one for client
+certs (note: you only need to run this one command to get both):
+
+ $ leap cert ca
+
+Create a temporary cert for your main domain (you should replace with a real commercial cert at some point)
+
+ $ leap cert csr
+
+To see details about the keys and certs that the prior two commands created, you can use `leap inspect` like so:
+
+ $ leap inspect files/ca/ca.crt
+
+Create the Diffie-Hellman parameters file, needed for forward secret OpenVPN ciphers:
+
+ $ leap cert dh
+
+NOTE: the files `files/ca/*.key` are extremely sensitive and must be carefully protected. The other key files are much less sensitive and can simply be regenerated if needed.
+
+
+Edit provider.json configuration
+--------------------------------------
+
+There are a few required settings in provider.json. At a minimum, you must have:
+
+ {
+ "domain": "example.org",
+ "name": "Example",
+ "contacts": {
+ "default": "email1@example.org"
+ }
+ }
+
+For a full list of possible settings, you can use `leap inspect` to see how provider.json is evaluated after including the inherited defaults:
+
+ $ leap inspect provider.json
+
+
+Setup the provider's nodes and services
+---------------------------------------
+
+A "node" is a server that is part of your infrastructure. Every node can have one or more services associated with it. Some nodes are "local" and used only for testing, see [Development](development) for more information.
+
+Create a node, with the service "webapp":
+
+ $ leap node add wildebeest ip_address:x.x.x.w services:webapp tags:production
+
+NOTE: replace x.x.x.w with the actual IP address of this node
+
+This created a node configuration file in `nodes/wildebeest.json`, but it did not do anything else. It also added the 'tag' called 'production' to this node. Tags allow us to conveniently group nodes together. When creating nodes, you should give them the tag 'production' if the node is to be used in your production infrastructure.
+
+The web application and the VPN nodes require a database, so lets create the database server node:
+
+ $ leap node add cheetah ip_address:x.x.x.x services:couchdb tags:production
+
+NOTE: replace x.x.x.x with the actual IP address of this node
+
+Now we need the OpenVPN gateway, so lets create that node:
+
+ $ leap node add ostrich ip_address:x.x.x.y openvpn.gateway_address:x.x.x.z services:openvpn tags:production
+
+NOTE: replace x.x.x.y with the IP address of the machine, and x.x.x.z with the second IP. openvpn gateways must be assigned two IP addresses, one for the host itself and one for the openvpn gateway. We do this to prevent incoming and outgoing VPN traffic on the same IP. Without this, the client might send some traffic to other VPN users in the clear, bypassing the VPN.
+
+
+Setup DNS
+---------
+
+Now that you have the nodes configured, you should create the DNS entries for these nodes.
+
+Set up your DNS with these hostnames:
+
+ $ leap list --print ip_address,domain.full,dns.aliases
+ cheetah x.x.x.w, cheetah.example.org, null
+ wildebeest x.x.x.x, wildebeest.example.org, api.example.org
+ ostrich x.x.x.y, ostrich.example.org, null
+
+Alternately, you can adapt this zone file snippet:
+
+ $ leap compile zone
+
+If you cannot edit your DNS zone file, you can still test your provider by adding entries to your local resolver hosts file (`/etc/hosts` for linux):
+
+ x.x.x.w cheetah.example.org
+ x.x.x.x wildebeest.example.org api.example.org example.org
+ x.x.x.y ostrich.example.org
+
+Please don't forget about these entries, they will override DNS queries if you setup your DNS later.
+
+
+Initialize the nodes
+--------------------
+
+Node initialization only needs to be done once, but there is no harm in doing it multiple times:
+
+ $ leap node init production
+
+This will initialize all nodes with the tag "production". When `leap node init` is run, you will be prompted to verify the fingerprint of the SSH host key and to provide the root password of the server(s). You should only need to do this once.
+
+If you prefer, you can initalize each node, one at a time:
+
+ $ leap node init wildebeest
+ $ leap node init cheetah
+ $ leap node init ostrich
+
+Deploy the LEAP platform to the nodes
+--------------------
+
+Now you should deploy the platform recipes to the nodes. [Deployment can take a while to run](http://xkcd.com/303/), especially on the first run, as it needs to update the packages on the new machine.
+
+*Important notes:* currently nodes must be deployed in a certain order. The underlying couch database node(s) must be deployed first, and then all other nodes.
+
+ $ leap deploy cheetah
+
+Watch the output for any errors (in red), if everything worked fine, you should now have your first running node. If you do have errors, try doing the deploy again.
+
+However, to deploy our three-node openvpn setup, we need the database and LEAP web application requires a database to run, so let's deploy to the couchdb and openvpn nodes:
+
+ $ leap deploy wildebeest
+ $ leap deploy ostrich
+
+
+What is going on here?
+--------------------------------------------
+
+First, some background terminology:
+
+* **puppet**: Puppet is a system for automating deployment and management of servers (called nodes).
+* **hiera files**: In puppet, you can use something called a 'hiera file' to seed a node with a few configuration values. In LEAP, we go all out and put *every* configuration value needed for a node in the hiera file, and automatically compile a custom hiera file for each node.
+
+When you run `leap deploy`, a bunch of things happen, in this order:
+
+1. **Compile hiera files**: The hiera configuration file for each node is compiled in YAML format and saved in the directory `hiera`. The source material for this hiera file consists of all the JSON configuration files imported or inherited by the node's JSON config file.
+* **Copy required files to node**: All the files needed for puppet to run are rsync'ed to each node. This includes the entire leap_platform directory, as well as the node's hiera file and other files needed by puppet to set up the node (keys, binary files, etc).
+* **Puppet is run**: Once the node is ready, leap connects to the node via ssh and runs `puppet apply`. Puppet is applied locally on the node, without a daemon or puppetmaster.
+
+You can run `leap -v2 deploy` to see exactly what commands are being executed.
+
+
+Test that things worked correctly
+=================================
+
+You should now have three machines with the LEAP platform deployed to them, one for the web application, one for the database and one for the OpenVPN gateway.
+
+To run troubleshooting tests:
+
+ leap test
+
+If you want to confirm for yourself that things are working, you can perform the following manual tests.
+
+### Access the web application
+
+In order to connect to the web application in your browser, you need to point your domain at the IP address of the web application node (named wildebeest in this example).
+
+There are a lot of different ways to do this, but one easy way is to modify your `/etc/hosts` file. First, find the IP address of the webapp node:
+
+ $ leap list webapp --print ip_address
+
+Then modify `/etc/hosts` like so:
+
+ x.x.x.w leap.example.org
+
+Replacing 'leap.example.org' with whatever you specified as the `domain` in the `leap new` command.
+
+Next, you can connect to the web application either using a web browser or via the API using the LEAP client. To use a browser, connect to https://leap.example.org (replacing that with your domain). Your browser will complain about an untrusted cert, but for now just bypass this. From there, you should be able to register a new user and login.
+
+### Use the VPN
+
+You should be able to simply test that the OpenVPN gateway works properly by doing the following:
+
+ $ leap test init
+ $ sudo openvpn test/openvpn/production_unlimited.ovpn
+
+Or, you can use the LEAP client (called "bitmask") to connect to your new provider, create a user and then connect to the VPN.
+
+
+Additional information
+======================
+
+It is useful to know a few additional things.
+
+Useful commands
+---------------
+
+Here are a few useful commands you can run on your new local nodes:
+
+* `leap ssh wildebeest` -- SSH into node wildebeest (requires `leap node init wildebeest` first).
+* `leap list` -- list all nodes.
+* `leap list production` -- list only those nodes with the tag 'production'
+* `leap list --print ip_address` -- list a particular attribute of all nodes.
+* `leap cert update` -- generate new certificates if needed.
+
+See the full command reference for more information.
+
+Node filters
+-------------------------------------------
+
+Many of the `leap` commands take a "node filter". You can use a node filter to target a command at one or more nodes.
+
+A node filter consists of one or more keywords, with an optional "+" before each keyword.
+
+* keywords can be a node name, a service type, or a tag.
+* the "+" before the keyword constructs an AND condition
+* otherwise, multiple keywords together construct an OR condition
+
+Examples:
+
+* `leap list openvpn` -- list all nodes with service openvpn.
+* `leap list openvpn +production` -- only nodes of service type openvpn AND tag production.
+* `leap deploy webapp openvpn` -- deploy to all webapp OR openvpn nodes.
+* `leap node init ostrich` -- just init the node named ostrich.
+
+Keep track of your provider configurations
+------------------------------------------
+
+You should commit your provider changes to your favorite VCS whenever things change. This way you can share your configurations with other admins, all they have to do is to pull the changes to stay up to date. Every time you make a change to your provider, such as adding nodes, services, generating certificates, etc. you should add those to your VCS, commit them and push them to where your repository is hosted.
+
+Note that your provider directory contains secrets! Those secrets include passwords for various services. You do not want to have those passwords readable by the world, so make sure that wherever you are hosting your repository, it is not public for the world to read.
+
+What's next
+-----------------------------------
+
+Read the [LEAP platform guide](guide) to learn about planning and securing your infrastructure.
+
diff --git a/doc/tutorials/single-node-email.md b/doc/tutorials/single-node-email.md
new file mode 100644
index 00000000..b47496b9
--- /dev/null
+++ b/doc/tutorials/single-node-email.md
@@ -0,0 +1,282 @@
+@title = 'Single node email tutorial'
+@nav_title = 'Single node email'
+@summary = 'A single node email provider.'
+
+Quick Start - Single node setup
+===============================
+
+This tutorial walks you through the initial process of creating and deploying a minimal service provider running the [LEAP platform](platform).
+We will guide you through building a single node mail provider.
+
+Our goal
+------------------
+
+We are going to create a minimal LEAP provider offering Email service. This basic setup can be expanded by adding more webapp and couchdb nodes to increase availability (performance wise, a single couchdb and a single webapp are more than enough for most usage, since they are only lightly used, but you might want redundancy). Please note: currently it is not possible to safely add additional couchdb nodes at a later point. They should all be added in the beginning, so please consider carefully if you would like more before proceeding.
+
+Our goal is something like this:
+
+ $ leap list
+ NODES SERVICES TAGS
+ node1 couchdb, mx, soledad, webapp local
+
+NOTE: You won't be able to run that `leap list` command yet, not until we actually create the node configurations.
+
+Requirements
+------------
+
+In order to complete this Quick Start, you will need a few things:
+
+* You will need `one real or paravirtualized virtual machine` (Vagrant, KVM, Xen, Openstack, Amazon, …) that have a basic Debian Stable installed.
+* You should be able to `SSH into them` remotely, and know their root password, IP addresses and their SSH host keys
+* The ability to `create/modify DNS entries` for your domain is preferable, but not needed. If you don't have access to DNS, you can workaround this by modifying your local resolver, i.e. editing `/etc/hosts`.
+* You need to be aware that this process will make changes to your machines, so please be sure that these machines are a basic install with nothing configured or running for other purposes
+* Your machines will need to be connected to the internet, and not behind a restrictive firewall.
+* You should `work locally on your laptop/workstation` (one that you trust and that is ideally full-disk encrypted) while going through this guide. This is important because the provider configurations you are creating contain sensitive data that should not reside on a remote machine. The leap cli utility will login to your servers and configure the services.
+* You should do everything described below as an `unprivileged user`, and only run those commands as root that are noted with *sudo* in front of them. Other than those commands, there is no need for privileged access to your machine, and in fact things may not work correctly.
+
+All the commands in this tutorial are run on your sysadmin machine. In order to complete the tutorial, the sysadmin will do the following:
+
+* Install pre-requisites
+* Install the LEAP command-line utility
+* Check out the LEAP platform
+* Create a provider and its certificates
+* Setup the provider's node and the services that will reside on it
+* Initialize the node
+* Deploy the LEAP platform to the node
+* Test that things worked correctly
+* Some additional commands
+
+We will walk you through each of these steps.
+
+
+Prepare your environment
+========================
+
+There are a few things you need to setup before you can get going. Just some packages, the LEAP cli and the platform.
+
+Install pre-requisites
+--------------------------------
+
+*Debian & Ubuntu*
+
+Install core prerequisites:
+
+ $ sudo apt-get install git ruby ruby-dev rsync openssh-client openssl rake make bzip2
+
+*Mac OS*
+
+Install rubygems from https://rubygems.org/pages/download (unless the `gem` command is already installed).
+
+
+NOTE: leap_cli should work with ruby1.8, but has only been tested using ruby1.9.
+
+
+Install the LEAP command-line utility
+-------------------------------------------------
+
+Install the LEAP command-line utility (leap_cli) from rubygems.org:
+
+ $ sudo gem install leap_cli
+
+Alternately, you can install `leap_cli` from source, please refer to https://leap.se/git/leap_cli/README.md.
+
+If you have successfully installed `leap_cli`, then you should be able to do the following:
+
+ $ leap --help
+
+This will list the command-line help options. If you receive an error when doing this, please read through the README.md in the `leap_cli` source to try and resolve any problems before going forwards.
+
+
+Provider Setup
+==============
+
+A provider instance is a directory tree that contains everything you need to manage an infrastructure for a service provider. In this case, we create one for example.org and call the instance directory 'example'.
+
+ $ mkdir -p ~/leap/example
+
+Bootstrap the provider
+-----------------------
+
+Now, we will initialize this directory to make it a provider instance. Your provider instance will need to know where it can find the local copy of the git repository leap_platform, which we setup in the previous step.
+
+ $ cd ~/leap/example
+ $ leap new .
+
+NOTES:
+ . make sure you include that trailing dot!
+
+The `leap new` command will ask you for several required values:
+
+* domain: The primary domain name of your service provider. In this tutorial, we will be using "example.org".
+* name: The name of your service provider (we use "Example").
+* contact emails: A comma separated list of email addresses that should be used for important service provider contacts (for things like postmaster aliases, Tor contact emails, etc).
+* platform: The directory where you either have a copy of the `leap_platform` git repository already checked out, or where `leap_cli` should download it too. You could just accept the suggested path for this example.
+ The LEAP Platform is a series of puppet recipes and modules that will be used to configure your provider. You will need a local copy of the platform that will be used to setup your nodes and manage your services. To begin with, you will not need to modify the LEAP Platform.
+
+These steps should be sufficient for this example. If you want to configure your provider further or like to examine the files, please refer to the [Configure Provider](configure-provider) section.
+
+Add Users who will have administrative access
+---------------------------------------------
+
+Now add yourself as a privileged sysadmin who will have access to deploy to servers:
+
+ $ leap add-user --self
+
+NOTE: in most cases, `leap` must be run from within a provider instance directory tree (e.g. ~/leap/example).
+
+
+Create provider certificates
+----------------------------
+
+Create two certificate authorities, one for server certs and one for client
+certs (note: you only need to run this one command to get both):
+
+ $ leap cert ca
+
+Create a temporary cert for your main domain (you should replace with a real commercial cert at some point)
+
+ $ leap cert csr
+
+
+Setup the provider's node and services
+--------------------------------------
+
+A "node" is a server that is part of your infrastructure. Every node can have one or more services associated with it. Some nodes are "local" and used only for testing, see [Development](development) for more information.
+
+Create a node, with `all the services needed for Email: "couchdb", "mx", "soledad" and "webapp"`
+
+ $ leap node add node1 ip_address:x.x.x.w services:couchdb,mx,soledad,webapp tags:production
+
+NOTE: replace x.x.x.w with the actual IP address of this node
+
+This created a node configuration file in `nodes/node1.json`, but it did not do anything else. It also added the 'tag' called 'production' to this node. Tags allow us to conveniently group nodes together. When creating nodes, you should give them the tag 'production' if the node is to be used in your production infrastructure.
+
+Initialize the nodes
+--------------------
+
+Node initialization only needs to be done once, but there is no harm in doing it multiple times:
+
+ $ leap node init node1
+
+This will initialize the node "node1". When `leap node init` is run, you will be prompted to verify the fingerprint of the SSH host key and to provide the root password of the server. You should only need to do this once.
+
+
+Deploy the LEAP platform to the nodes
+--------------------
+
+Now you should deploy the platform recipes to the node. [Deployment can take a while to run](http://xkcd.com/303/), especially on the first run, as it needs to update the packages on the new machine.
+
+ $ leap deploy
+
+Watch the output for any errors (in red), if everything worked fine, you should now have your first running node. If you do have errors, try doing the deploy again.
+
+
+Setup DNS
+---------
+
+Now that you have the node configured, you should create the DNS entrie for this node.
+
+Set up your DNS with these hostnames:
+
+ $ leap list --print ip_address,domain.full,dns.aliases
+ node1 x.x.x.w, node1.example.org, example.org, api.example.org, nicknym.example.org
+
+Alternately, you can adapt this zone file snippet:
+
+ $ leap compile zone
+
+If you cannot edit your DNS zone file, you can still test your provider by adding this entry to your local resolver hosts file (`/etc/hosts` for linux):
+
+ x.x.x.w node1.example.org example.org api.example.org nicknym.example.org
+
+Please don't forget about these entries, they will override DNS queries if you setup your DNS later.
+
+
+What is going on here?
+--------------------------------------------
+
+First, some background terminology:
+
+* **puppet**: Puppet is a system for automating deployment and management of servers (called nodes).
+* **hiera files**: In puppet, you can use something called a 'hiera file' to seed a node with a few configuration values. In LEAP, we go all out and put *every* configuration value needed for a node in the hiera file, and automatically compile a custom hiera file for each node.
+
+When you run `leap deploy`, a bunch of things happen, in this order:
+
+1. **Compile hiera files**: The hiera configuration file for each node is compiled in YAML format and saved in the directory `hiera`. The source material for this hiera file consists of all the JSON configuration files imported or inherited by the node's JSON config file.
+* **Copy required files to node**: All the files needed for puppet to run are rsync'ed to each node. This includes the entire leap_platform directory, as well as the node's hiera file and other files needed by puppet to set up the node (keys, binary files, etc).
+* **Puppet is run**: Once the node is ready, leap connects to the node via ssh and runs `puppet apply`. Puppet is applied locally on the node, without a daemon or puppetmaster.
+
+You can run `leap -v2 deploy` to see exactly what commands are being executed.
+
+<!-- See [under the hood](under-the-hood) for more details. -->
+
+
+Test that things worked correctly
+=================================
+
+You should now one machine with the LEAP platform email service deployed to it.
+
+
+Access the web application
+--------------------------------------------
+
+In order to connect to the web application in your browser, you need to point your domain at the IP address of your new node.
+
+Next, you can connect to the web application either using a web browser or via the API using the LEAP client. To use a browser, connect to https://example.org (replacing that with your domain). Your browser will complain about an untrusted cert, but for now just bypass this. From there, you should be able to register a new user and login.
+
+Testing with leap_cli
+---------------------
+
+Use the test command to run a set of different tests:
+
+ leap test
+
+
+Additional information
+======================
+
+It is useful to know a few additional things.
+
+Useful commands
+---------------
+
+Here are a few useful commands you can run on your new local nodes:
+
+* `leap ssh web1` -- SSH into node web1 (requires `leap node init web1` first).
+* `leap list` -- list all nodes.
+* `leap list production` -- list only those nodes with the tag 'production'
+* `leap list --print ip_address` -- list a particular attribute of all nodes.
+* `leap cert update` -- generate new certificates if needed.
+
+See the full command reference for more information.
+
+Node filters
+-------------------------------------------
+
+Many of the `leap` commands take a "node filter". You can use a node filter to target a command at one or more nodes.
+
+A node filter consists of one or more keywords, with an optional "+" before each keyword.
+
+* keywords can be a node name, a service type, or a tag.
+* the "+" before the keyword constructs an AND condition
+* otherwise, multiple keywords together construct an OR condition
+
+Examples:
+
+* `leap list openvpn` -- list all nodes with service openvpn.
+* `leap list openvpn +production` -- only nodes of service type openvpn AND tag production.
+* `leap deploy webapp openvpn` -- deploy to all webapp OR openvpn nodes.
+* `leap node init vpn1` -- just init the node named vpn1.
+
+Keep track of your provider configurations
+------------------------------------------
+
+You should commit your provider changes to your favorite VCS whenever things change. This way you can share your configurations with other admins, all they have to do is to pull the changes to stay up to date. Every time you make a change to your provider, such as adding nodes, services, generating certificates, etc. you should add those to your VCS, commit them and push them to where your repository is hosted.
+
+Note that your provider directory contains secrets! Those secrets include passwords for various services. You do not want to have those passwords readable by the world, so make sure that wherever you are hosting your repository, it is not public for the world to read.
+
+What's next
+-----------------------------------
+
+Read the [LEAP platform guide](guide) to learn about planning and securing your infrastructure.
+
diff --git a/hiera.yaml b/hiera.yaml
new file mode 100644
index 00000000..3ff857b8
--- /dev/null
+++ b/hiera.yaml
@@ -0,0 +1,6 @@
+---
+:backends: yaml
+:yaml:
+ :datadir: /var/lib/hiera
+:hierarchy: common
+:logger: console
diff --git a/leap-debug-remote.sh b/leap-debug-remote.sh
new file mode 100644
index 00000000..7f9c6945
--- /dev/null
+++ b/leap-debug-remote.sh
@@ -0,0 +1,23 @@
+#!/bin/sh
+# debug script to be run on remote servers
+
+regexp='(leap|stunnel|couch|soledad|haproxy)'
+
+find /etc/leap/
+
+echo
+
+ls -la /srv/leap/
+
+echo
+
+
+dpkg -l | egrep "$regexp"
+
+echo
+
+ps aux|egrep "$regexp"
+
+echo
+
+cat /etc/hosts
diff --git a/lib/leap_cli/commands/README b/lib/leap_cli/commands/README
new file mode 100644
index 00000000..bec78179
--- /dev/null
+++ b/lib/leap_cli/commands/README
@@ -0,0 +1,11 @@
+This directory contains ruby source files that define the available sub-
+commands of the `leap` executable.
+
+For example, the command:
+
+ leap compile
+
+Lives in lib/leap_cli/commands/init.rb
+
+These files use a DSL (called GLI) for defining command suites.
+See https://github.com/davetron5000/gli for more information.
diff --git a/lib/leap_cli/commands/ca.rb b/lib/leap_cli/commands/ca.rb
new file mode 100644
index 00000000..1b311eee
--- /dev/null
+++ b/lib/leap_cli/commands/ca.rb
@@ -0,0 +1,541 @@
+autoload :OpenSSL, 'openssl'
+autoload :CertificateAuthority, 'certificate_authority'
+autoload :Date, 'date'
+require 'digest/md5'
+
+module LeapCli; module Commands
+
+ desc "Manage X.509 certificates"
+ command :cert do |cert|
+
+ cert.desc 'Creates two Certificate Authorities (one for validating servers and one for validating clients).'
+ cert.long_desc 'See see what values are used in the generation of the certificates (like name and key size), run `leap inspect provider` and look for the "ca" property. To see the details of the created certs, run `leap inspect <file>`.'
+ cert.command :ca do |ca|
+ ca.action do |global_options,options,args|
+ assert_config! 'provider.ca.name'
+ generate_new_certificate_authority(:ca_key, :ca_cert, provider.ca.name)
+ generate_new_certificate_authority(:client_ca_key, :client_ca_cert, provider.ca.name + ' (client certificates only!)')
+ end
+ end
+
+ cert.desc 'Creates or renews a X.509 certificate/key pair for a single node or all nodes, but only if needed.'
+ cert.long_desc 'This command will a generate new certificate for a node if some value in the node has changed ' +
+ 'that is included in the certificate (like hostname or IP address), or if the old certificate will be expiring soon. ' +
+ 'Sometimes, you might want to force the generation of a new certificate, ' +
+ 'such as in the cases where you have changed a CA parameter for server certificates, like bit size or digest hash. ' +
+ 'In this case, use --force. If <node-filter> is empty, this command will apply to all nodes.'
+ cert.arg_name 'FILTER'
+ cert.command :update do |update|
+ update.switch 'force', :desc => 'Always generate new certificates', :negatable => false
+ update.action do |global_options,options,args|
+ update_certificates(manager.filter!(args), options)
+ end
+ end
+
+ cert.desc 'Creates a Diffie-Hellman parameter file, needed for forward secret OpenVPN ciphers.' # (needed for server-side of some TLS connections)
+ cert.command :dh do |dh|
+ dh.action do |global_options,options,args|
+ long_running do
+ if cmd_exists?('certtool')
+ log 0, 'Generating DH parameters (takes a long time)...'
+ output = assert_run!('certtool --generate-dh-params --sec-param high')
+ output.sub! /.*(-----BEGIN DH PARAMETERS-----.*-----END DH PARAMETERS-----).*/m, '\1'
+ output << "\n"
+ write_file!(:dh_params, output)
+ else
+ log 0, 'Generating DH parameters (takes a REALLY long time)...'
+ output = OpenSSL::PKey::DH.generate(3248).to_pem
+ write_file!(:dh_params, output)
+ end
+ end
+ end
+ end
+
+ #
+ # hints:
+ #
+ # inspect CSR:
+ # openssl req -noout -text -in files/cert/x.csr
+ #
+ # generate CSR with openssl to see how it compares:
+ # openssl req -sha256 -nodes -newkey rsa:2048 -keyout example.key -out example.csr
+ #
+ # validate a CSR:
+ # http://certlogik.com/decoder/
+ #
+ # nice details about CSRs:
+ # http://www.redkestrel.co.uk/Articles/CSR.html
+ #
+ cert.desc "Creates a CSR for use in buying a commercial X.509 certificate."
+ cert.long_desc "Unless specified, the CSR is created for the provider's primary domain. "+
+ "The properties used for this CSR come from `provider.ca.server_certificates`, "+
+ "but may be overridden here."
+ cert.command :csr do |csr|
+ csr.flag 'domain', :arg_name => 'DOMAIN', :desc => 'Specify what domain to create the CSR for.'
+ csr.flag ['organization', 'O'], :arg_name => 'ORGANIZATION', :desc => "Override default O in distinguished name."
+ csr.flag ['unit', 'OU'], :arg_name => 'UNIT', :desc => "Set OU in distinguished name."
+ csr.flag 'email', :arg_name => 'EMAIL', :desc => "Set emailAddress in distinguished name."
+ csr.flag ['locality', 'L'], :arg_name => 'LOCALITY', :desc => "Set L in distinguished name."
+ csr.flag ['state', 'ST'], :arg_name => 'STATE', :desc => "Set ST in distinguished name."
+ csr.flag ['country', 'C'], :arg_name => 'COUNTRY', :desc => "Set C in distinguished name."
+ csr.flag :bits, :arg_name => 'BITS', :desc => "Override default certificate bit length"
+ csr.flag :digest, :arg_name => 'DIGEST', :desc => "Override default signature digest"
+ csr.action do |global_options,options,args|
+ assert_config! 'provider.domain'
+ assert_config! 'provider.name'
+ assert_config! 'provider.default_language'
+ assert_config! 'provider.ca.server_certificates.bit_size'
+ assert_config! 'provider.ca.server_certificates.digest'
+ domain = options[:domain] || provider.domain
+
+ unless global_options[:force]
+ assert_files_missing! [:commercial_key, domain], [:commercial_csr, domain],
+ :msg => 'If you really want to create a new key and CSR, remove these files first or run with --force.'
+ end
+
+ server_certificates = provider.ca.server_certificates
+
+ # RSA key
+ keypair = CertificateAuthority::MemoryKeyMaterial.new
+ bit_size = (options[:bits] || server_certificates.bit_size).to_i
+ log :generating, "%s bit RSA key" % bit_size do
+ keypair.generate_key(bit_size)
+ write_file! [:commercial_key, domain], keypair.private_key.to_pem
+ end
+
+ # CSR
+ dn = CertificateAuthority::DistinguishedName.new
+ dn.common_name = domain
+ dn.organization = options[:organization] || provider.name[provider.default_language]
+ dn.ou = options[:organizational_unit] # optional
+ dn.email_address = options[:email] # optional
+ dn.country = options[:country] || server_certificates['country'] # optional
+ dn.state = options[:state] || server_certificates['state'] # optional
+ dn.locality = options[:locality] || server_certificates['locality'] # optional
+
+ digest = options[:digest] || server_certificates.digest
+ log :generating, "CSR with #{digest} digest and #{print_dn(dn)}" do
+ csr = create_csr(dn, keypair, digest)
+ request = csr.to_x509_csr
+ write_file! [:commercial_csr, domain], csr.to_pem
+ end
+
+ # Sign using our own CA, for use in testing but hopefully not production.
+ # It is not that commerical CAs are so secure, it is just that signing your own certs is
+ # a total drag for the user because they must click through dire warnings.
+ #if options[:sign]
+ log :generating, "self-signed x509 server certificate for testing purposes" do
+ cert = csr.to_cert
+ cert.serial_number.number = cert_serial_number(domain)
+ cert.not_before = yesterday
+ cert.not_after = yesterday.advance(:years => 1)
+ cert.parent = ca_root
+ cert.sign! domain_test_signing_profile
+ write_file! [:commercial_cert, domain], cert.to_pem
+ log "please replace this file with the real certificate you get from a CA using #{Path.relative_path([:commercial_csr, domain])}"
+ end
+ #end
+
+ # FAKE CA
+ unless file_exists? :commercial_ca_cert
+ log :using, "generated CA in place of commercial CA for testing purposes" do
+ write_file! :commercial_ca_cert, read_file!(:ca_cert)
+ log "please also replace this file with the CA cert from the commercial authority you use."
+ end
+ end
+ end
+ end
+ end
+
+ protected
+
+ #
+ # will generate new certificates for the specified nodes, if needed.
+ #
+ def update_certificates(nodes, options={})
+ assert_files_exist! :ca_cert, :ca_key, :msg => 'Run `leap cert ca` to create them'
+ assert_config! 'provider.ca.server_certificates.bit_size'
+ assert_config! 'provider.ca.server_certificates.digest'
+ assert_config! 'provider.ca.server_certificates.life_span'
+ assert_config! 'common.x509.use'
+
+ nodes.each_node do |node|
+ warn_if_commercial_cert_will_soon_expire(node)
+ if !node.x509.use
+ remove_file!([:node_x509_key, node.name])
+ remove_file!([:node_x509_cert, node.name])
+ elsif options[:force] || cert_needs_updating?(node)
+ generate_cert_for_node(node)
+ end
+ end
+ end
+
+ private
+
+ def generate_new_certificate_authority(key_file, cert_file, common_name)
+ assert_files_missing! key_file, cert_file
+ assert_config! 'provider.ca.name'
+ assert_config! 'provider.ca.bit_size'
+ assert_config! 'provider.ca.life_span'
+
+ root = CertificateAuthority::Certificate.new
+
+ # set subject
+ root.subject.common_name = common_name
+ possible = ['country', 'state', 'locality', 'organization', 'organizational_unit', 'email_address']
+ provider.ca.keys.each do |key|
+ if possible.include?(key)
+ root.subject.send(key + '=', provider.ca[key])
+ end
+ end
+
+ # set expiration
+ root.not_before = yesterday
+ root.not_after = yesterday_advance(provider.ca.life_span)
+
+ # generate private key
+ root.serial_number.number = 1
+ root.key_material.generate_key(provider.ca.bit_size)
+
+ # sign self
+ root.signing_entity = true
+ root.parent = root
+ root.sign!(ca_root_signing_profile)
+
+ # save
+ write_file!(key_file, root.key_material.private_key.to_pem)
+ write_file!(cert_file, root.to_pem)
+ end
+
+ #
+ # returns true if the certs associated with +node+ need to be regenerated.
+ #
+ def cert_needs_updating?(node)
+ if !file_exists?([:node_x509_cert, node.name], [:node_x509_key, node.name])
+ return true
+ else
+ cert = load_certificate_file([:node_x509_cert, node.name])
+ if !created_by_authority?(cert, ca_root)
+ log :updating, "cert for node '#{node.name}' because it was signed by an old CA root cert."
+ return true
+ end
+ if cert.not_after < Time.now.advance(:months => 2)
+ log :updating, "cert for node '#{node.name}' because it will expire soon"
+ return true
+ end
+ if cert.subject.common_name != node.domain.full
+ log :updating, "cert for node '#{node.name}' because domain.full has changed (was #{cert.subject.common_name}, now #{node.domain.full})"
+ return true
+ end
+ cert.openssl_body.extensions.each do |ext|
+ if ext.oid == "subjectAltName"
+ ips = []
+ dns_names = []
+ ext.value.split(",").each do |value|
+ value.strip!
+ ips << $1 if value =~ /^IP Address:(.*)$/
+ dns_names << $1 if value =~ /^DNS:(.*)$/
+ end
+ dns_names.sort!
+ if ips.first != node.ip_address
+ log :updating, "cert for node '#{node.name}' because ip_address has changed (from #{ips.first} to #{node.ip_address})"
+ return true
+ elsif dns_names != dns_names_for_node(node)
+ log :updating, "cert for node '#{node.name}' because domain name aliases have changed\n from: #{dns_names.inspect}\n to: #{dns_names_for_node(node).inspect})"
+ return true
+ end
+ end
+ end
+ end
+ return false
+ end
+
+ def created_by_authority?(cert, ca)
+ authority_key_id = cert.extensions["authorityKeyIdentifier"].identifier.sub(/^keyid:/, '')
+ authority_key_id == public_key_id_for_ca(ca)
+ end
+
+ # calculate the "key id" for a root CA, that matches the value
+ # Authority Key Identifier in the x509 extensions of a cert.
+ def public_key_id_for_ca(ca_cert)
+ @ca_key_ids ||= {}
+ @ca_key_ids[ca_cert.object_id] ||= begin
+ pubkey = ca_cert.key_material.public_key
+ seq = OpenSSL::ASN1::Sequence([
+ OpenSSL::ASN1::Integer.new(pubkey.n),
+ OpenSSL::ASN1::Integer.new(pubkey.e)
+ ])
+ Digest::SHA1.hexdigest(seq.to_der).upcase.scan(/../).join(':')
+ end
+ end
+
+ def warn_if_commercial_cert_will_soon_expire(node)
+ dns_names_for_node(node).each do |domain|
+ if file_exists?([:commercial_cert, domain])
+ cert = load_certificate_file([:commercial_cert, domain])
+ path = Path.relative_path([:commercial_cert, domain])
+ if cert.not_after < Time.now.utc
+ log :error, "the commercial certificate '#{path}' has EXPIRED! " +
+ "You should renew it with `leap cert csr --domain #{domain}`."
+ elsif cert.not_after < Time.now.advance(:months => 2)
+ log :warning, "the commercial certificate '#{path}' will expire soon (#{cert.not_after}). "+
+ "You should renew it with `leap cert csr --domain #{domain}`."
+ end
+ end
+ end
+ end
+
+ def generate_cert_for_node(node)
+ return if node.x509.use == false
+
+ cert = CertificateAuthority::Certificate.new
+
+ # set subject
+ cert.subject.common_name = node.domain.full
+ cert.serial_number.number = cert_serial_number(node.domain.full)
+
+ # set expiration
+ cert.not_before = yesterday
+ cert.not_after = yesterday_advance(provider.ca.server_certificates.life_span)
+
+ # generate key
+ cert.key_material.generate_key(provider.ca.server_certificates.bit_size)
+
+ # sign
+ cert.parent = ca_root
+ cert.sign!(server_signing_profile(node))
+
+ # save
+ write_file!([:node_x509_key, node.name], cert.key_material.private_key.to_pem)
+ write_file!([:node_x509_cert, node.name], cert.to_pem)
+ end
+
+ #
+ # yields client key and cert suitable for testing
+ #
+ def generate_test_client_cert(prefix=nil)
+ cert = CertificateAuthority::Certificate.new
+ cert.serial_number.number = cert_serial_number(provider.domain)
+ cert.subject.common_name = [prefix, random_common_name(provider.domain)].join
+ cert.not_before = yesterday
+ cert.not_after = yesterday.advance(:years => 1)
+ cert.key_material.generate_key(1024) # just for testing, remember!
+ cert.parent = client_ca_root
+ cert.sign! client_test_signing_profile
+ yield cert.key_material.private_key.to_pem, cert.to_pem
+ end
+
+ #
+ # creates a CSR and returns it.
+ # with the correct extReq attribute so that the CA
+ # doens't generate certs with extensions we don't want.
+ #
+ def create_csr(dn, keypair, digest)
+ csr = CertificateAuthority::SigningRequest.new
+ csr.distinguished_name = dn
+ csr.key_material = keypair
+ csr.digest = digest
+
+ # define extensions manually (library doesn't support setting these on CSRs)
+ extensions = []
+ extensions << CertificateAuthority::Extensions::BasicConstraints.new.tap {|basic|
+ basic.ca = false
+ }
+ extensions << CertificateAuthority::Extensions::KeyUsage.new.tap {|keyusage|
+ keyusage.usage = ["digitalSignature", "keyEncipherment"]
+ }
+ extensions << CertificateAuthority::Extensions::ExtendedKeyUsage.new.tap {|extkeyusage|
+ extkeyusage.usage = [ "serverAuth"]
+ }
+
+ # convert extensions to attribute 'extReq'
+ # aka "Requested Extensions"
+ factory = OpenSSL::X509::ExtensionFactory.new
+ attrval = OpenSSL::ASN1::Set([OpenSSL::ASN1::Sequence(
+ extensions.map{|e| factory.create_ext(e.openssl_identifier, e.to_s, e.critical)}
+ )])
+ attrs = [
+ OpenSSL::X509::Attribute.new("extReq", attrval),
+ ]
+ csr.attributes = attrs
+
+ return csr
+ end
+
+ def ca_root
+ @ca_root ||= begin
+ load_certificate_file(:ca_cert, :ca_key)
+ end
+ end
+
+ def client_ca_root
+ @client_ca_root ||= begin
+ load_certificate_file(:client_ca_cert, :client_ca_key)
+ end
+ end
+
+ def load_certificate_file(crt_file, key_file=nil, password=nil)
+ crt = read_file!(crt_file)
+ openssl_cert = OpenSSL::X509::Certificate.new(crt)
+ cert = CertificateAuthority::Certificate.from_openssl(openssl_cert)
+ if key_file
+ key = read_file!(key_file)
+ cert.key_material.private_key = OpenSSL::PKey::RSA.new(key, password)
+ end
+ return cert
+ end
+
+ def ca_root_signing_profile
+ {
+ "extensions" => {
+ "basicConstraints" => {"ca" => true},
+ "keyUsage" => {
+ "usage" => ["critical", "keyCertSign"]
+ },
+ "extendedKeyUsage" => {
+ "usage" => []
+ }
+ }
+ }
+ end
+
+ #
+ # For keyusage, openvpn server certs can have keyEncipherment or keyAgreement.
+ # Web browsers seem to break without keyEncipherment.
+ # For now, I am using digitalSignature + keyEncipherment
+ #
+ # * digitalSignature -- for (EC)DHE cipher suites
+ # "The digitalSignature bit is asserted when the subject public key is used
+ # with a digital signature mechanism to support security services other
+ # than certificate signing (bit 5), or CRL signing (bit 6). Digital
+ # signature mechanisms are often used for entity authentication and data
+ # origin authentication with integrity."
+ #
+ # * keyEncipherment ==> for plain RSA cipher suites
+ # "The keyEncipherment bit is asserted when the subject public key is used for
+ # key transport. For example, when an RSA key is to be used for key management,
+ # then this bit is set."
+ #
+ # * keyAgreement ==> for used with DH, not RSA.
+ # "The keyAgreement bit is asserted when the subject public key is used for key
+ # agreement. For example, when a Diffie-Hellman key is to be used for key
+ # management, then this bit is set."
+ #
+ # digest options: SHA512, SHA256, SHA1
+ #
+ def server_signing_profile(node)
+ {
+ "digest" => provider.ca.server_certificates.digest,
+ "extensions" => {
+ "keyUsage" => {
+ "usage" => ["digitalSignature", "keyEncipherment"]
+ },
+ "extendedKeyUsage" => {
+ "usage" => ["serverAuth", "clientAuth"]
+ },
+ "subjectAltName" => {
+ "ips" => [node.ip_address],
+ "dns_names" => dns_names_for_node(node)
+ }
+ }
+ }
+ end
+
+ #
+ # This is used when signing the main cert for the provider's domain
+ # with our own CA (for testing purposes). Typically, this cert would
+ # be purchased from a commercial CA, and not signed this way.
+ #
+ def domain_test_signing_profile
+ {
+ "digest" => "SHA256",
+ "extensions" => {
+ "keyUsage" => {
+ "usage" => ["digitalSignature", "keyEncipherment"]
+ },
+ "extendedKeyUsage" => {
+ "usage" => ["serverAuth"]
+ }
+ }
+ }
+ end
+
+ #
+ # This is used when signing a dummy client certificate that is only to be
+ # used for testing.
+ #
+ def client_test_signing_profile
+ {
+ "digest" => "SHA256",
+ "extensions" => {
+ "keyUsage" => {
+ "usage" => ["digitalSignature"]
+ },
+ "extendedKeyUsage" => {
+ "usage" => ["clientAuth"]
+ }
+ }
+ }
+ end
+
+ def dns_names_for_node(node)
+ names = [node.domain.internal, node.domain.full]
+ if node['dns'] && node.dns['aliases'] && node.dns.aliases.any?
+ names += node.dns.aliases
+ end
+ names.compact!
+ names.sort!
+ names.uniq!
+ return names
+ end
+
+ #
+ # For cert serial numbers, we need a non-colliding number less than 160 bits.
+ # md5 will do nicely, since there is no need for a secure hash, just a short one.
+ # (md5 is 128 bits)
+ #
+ def cert_serial_number(domain_name)
+ Digest::MD5.hexdigest("#{domain_name} -- #{Time.now}").to_i(16)
+ end
+
+ #
+ # for the random common name, we need a text string that will be unique across all certs.
+ # ruby 1.8 doesn't have a built-in uuid generator, or we would use SecureRandom.uuid
+ #
+ def random_common_name(domain_name)
+ cert_serial_number(domain_name).to_s(36)
+ end
+
+ # prints CertificateAuthority::DistinguishedName fields
+ def print_dn(dn)
+ fields = {}
+ [:common_name, :locality, :state, :country, :organization, :organizational_unit, :email_address].each do |attr|
+ fields[attr] = dn.send(attr) if dn.send(attr)
+ end
+ fields.inspect
+ end
+
+ ##
+ ## TIME HELPERS
+ ##
+ ## note: we use 'yesterday' instead of 'today', because times are in UTC, and some people on the planet
+ ## are behind UTC.
+ ##
+
+ def yesterday
+ t = Time.now - 24*24*60
+ Time.utc t.year, t.month, t.day
+ end
+
+ def yesterday_advance(string)
+ number, unit = string.split(' ')
+ unless ['years', 'months', 'days', 'hours', 'minutes'].include? unit
+ bail!("The time property '#{string}' is missing a unit (one of: years, months, days, hours, minutes).")
+ end
+ unless number.to_i.to_s == number
+ bail!("The time property '#{string}' is missing a number.")
+ end
+ yesterday.advance(unit.to_sym => number.to_i)
+ end
+
+end; end
diff --git a/lib/leap_cli/commands/clean.rb b/lib/leap_cli/commands/clean.rb
new file mode 100644
index 00000000..a9afff53
--- /dev/null
+++ b/lib/leap_cli/commands/clean.rb
@@ -0,0 +1,16 @@
+module LeapCli
+ module Commands
+
+ desc 'Removes all files generated with the "compile" command.'
+ command :clean do |c|
+ c.action do |global_options,options,args|
+ Dir.glob(path([:hiera, '*'])).each do |file|
+ remove_file! file
+ end
+ remove_file! path(:authorized_keys)
+ remove_file! path(:known_hosts)
+ end
+ end
+
+ end
+end \ No newline at end of file
diff --git a/lib/leap_cli/commands/compile.rb b/lib/leap_cli/commands/compile.rb
new file mode 100644
index 00000000..f9079279
--- /dev/null
+++ b/lib/leap_cli/commands/compile.rb
@@ -0,0 +1,531 @@
+require 'socket'
+
+module LeapCli
+ module Commands
+
+ desc "Compile generated files."
+ command [:compile, :c] do |c|
+ c.desc 'Compiles node configuration files into hiera files used for deployment.'
+ c.arg_name 'ENVIRONMENT', :optional => true
+ c.command :all do |all|
+ all.action do |global_options,options,args|
+ environment = args.first
+ compile_command(environment)
+ end
+ end
+
+ c.desc "Prints a DNS zone file for your provider."
+ c.command :zone do |zone|
+ zone.action do |global_options, options, args|
+ compile_command(nil)
+ compile_zone_file(global_options[:yes] || global_options[:force])
+ end
+ end
+
+ c.desc "Print entries suitable for an /etc/hosts file, useful for testing your provider."
+ c.command :hosts do |hosts|
+ hosts.action do |global_options, options, args|
+ compile_command(nil)
+ compile_hosts_file
+ end
+ end
+
+ c.desc "Compile provider.json bootstrap files for your provider."
+ c.command 'provider.json' do |provider|
+ provider.action do |global_options, options, args|
+ compile_command(nil)
+ compile_provider_json
+ end
+ end
+
+ c.desc "Prints a list of firewall rules. These rules are already "+
+ "implemented on each node, but you might want the list of all "+
+ "rules in case you also have a restrictive network firewall."
+ c.command :firewall do |zone|
+ zone.action do |global_options, options, args|
+ compile_command(nil)
+ compile_firewall
+ end
+ end
+
+ c.default_command :all
+ end
+
+ protected
+
+ def compile_command(environment)
+ if !LeapCli.leapfile.environment.nil? && !environment.nil? && environment != LeapCli.leapfile.environment
+ bail! "You cannot specify an ENVIRONMENT argument while the environment is pinned."
+ end
+ if environment
+ if manager.environment_names.include?(environment)
+ compile_hiera_files(manager.filter([environment]), false)
+ else
+ bail! "There is no environment named `#{environment}`."
+ end
+ else
+ clean_export = LeapCli.leapfile.environment.nil?
+ compile_hiera_files(manager.filter, clean_export)
+ end
+ if file_exists?(:static_web_readme)
+ compile_provider_json(environment)
+ end
+ end
+
+ #
+ # a "clean" export of secrets will also remove keys that are no longer used,
+ # but this should not be done if we are not examining all possible nodes.
+ #
+ def compile_hiera_files(nodes, clean_export)
+ update_certificates(nodes) # \ must come first so that output will
+ update_compiled_ssh_configs # / get included in compiled hiera files.
+ sanity_check(nodes)
+ manager.export_nodes(nodes)
+ manager.export_secrets(clean_export)
+ end
+
+ def update_compiled_ssh_configs
+ generate_monitor_ssh_keys
+ update_authorized_keys
+ update_known_hosts
+ end
+
+ def sanity_check(nodes)
+ # confirm that every node has a unique ip address
+ ips = {}
+ nodes.pick_fields('ip_address').each do |name, ip_address|
+ if ips.key?(ip_address)
+ bail! {
+ log(:fatal_error, "Every node must have its own IP address.") {
+ log "Nodes `#{name}` and `#{ips[ip_address]}` are both configured with `#{ip_address}`."
+ }
+ }
+ else
+ ips[ip_address] = name
+ end
+ end
+ # confirm that the IP address of this machine is not also used for a node.
+ Socket.ip_address_list.each do |addrinfo|
+ if !addrinfo.ipv4_private? && ips.key?(addrinfo.ip_address)
+ ip = addrinfo.ip_address
+ name = ips[ip]
+ bail! {
+ log(:fatal_error, "Something is very wrong. The `leap` command must only be run on your sysadmin machine, not on a provider node.") {
+ log "This machine has the same IP address (#{ip}) as node `#{name}`."
+ }
+ }
+ end
+ end
+ end
+
+ ##
+ ## SSH
+ ##
+
+ #
+ # generates a ssh key pair that is used only by remote monitors
+ # to connect to nodes and run certain allowed commands.
+ #
+ # every node has the public monitor key added to their authorized
+ # keys, and every monitor node has a copy of the private monitor key.
+ #
+ def generate_monitor_ssh_keys
+ priv_key_file = path(:monitor_priv_key)
+ pub_key_file = path(:monitor_pub_key)
+ unless file_exists?(priv_key_file, pub_key_file)
+ ensure_dir(File.dirname(priv_key_file))
+ ensure_dir(File.dirname(pub_key_file))
+ cmd = %(ssh-keygen -N '' -C 'monitor' -t rsa -b 4096 -f '%s') % priv_key_file
+ assert_run! cmd
+ if file_exists?(priv_key_file, pub_key_file)
+ log :created, priv_key_file
+ log :created, pub_key_file
+ else
+ log :failed, 'to create monitor ssh keys'
+ end
+ end
+ end
+
+ #
+ # Compiles the authorized keys file, which gets installed on every during init.
+ # Afterwards, puppet installs an authorized keys file that is generated differently
+ # (see authorized_keys() in macros.rb)
+ #
+ def update_authorized_keys
+ buffer = StringIO.new
+ keys = Dir.glob(path([:user_ssh, '*']))
+ if keys.empty?
+ bail! "You must have at least one public SSH user key configured in order to proceed. See `leap help add-user`."
+ end
+ if file_exists?(path(:monitor_pub_key))
+ keys << path(:monitor_pub_key)
+ end
+ keys.sort.each do |keyfile|
+ ssh_type, ssh_key = File.read(keyfile).strip.split(" ")
+ buffer << ssh_type
+ buffer << " "
+ buffer << ssh_key
+ buffer << " "
+ buffer << Path.relative_path(keyfile)
+ buffer << "\n"
+ end
+ write_file!(:authorized_keys, buffer.string)
+ end
+
+ #
+ # generates the known_hosts file.
+ #
+ # we do a 'late' binding on the hostnames and ip part of the ssh pub key record in order to allow
+ # for the possibility that the hostnames or ip has changed in the node configuration.
+ #
+ def update_known_hosts
+ buffer = StringIO.new
+ buffer << "#\n"
+ buffer << "# This file is automatically generated by the command `leap`. You should NOT modify this file.\n"
+ buffer << "# Instead, rerun `leap node init` on whatever node is causing SSH problems.\n"
+ buffer << "#\n"
+ manager.nodes.keys.sort.each do |node_name|
+ node = manager.nodes[node_name]
+ hostnames = [node.name, node.domain.internal, node.domain.full, node.ip_address].join(',')
+ pub_key = read_file([:node_ssh_pub_key,node.name])
+ if pub_key
+ buffer << [hostnames, pub_key].join(' ')
+ buffer << "\n"
+ end
+ end
+ write_file!(:known_hosts, buffer.string)
+ end
+
+ ##
+ ## provider.json
+ ##
+
+ #
+ # generates static provider.json files that can put into place
+ # (e.g. https://domain/provider.json) for the cases where the
+ # webapp domain does not match the provider's domain.
+ #
+ def compile_provider_json(environments=nil)
+ webapp_nodes = manager.nodes[:services => 'webapp']
+ write_file!(:static_web_readme, STATIC_WEB_README)
+ environments ||= manager.environment_names
+ environments.each do |env|
+ node = webapp_nodes[:environment => env].values.first
+ if node
+ env ||= 'default'
+ write_file!(
+ [:static_web_provider_json, env],
+ node['definition_files']['provider']
+ )
+ write_file!(
+ [:static_web_htaccess, env],
+ HTACCESS_FILE % {:min_version => manager.env(env).provider.client_version['min']}
+ )
+ end
+ end
+ end
+
+ HTACCESS_FILE = %[
+<Files provider.json>
+ Header set X-Minimum-Client-Version %{min_version}
+</Files>
+]
+
+ STATIC_WEB_README = %[
+This directory contains statically rendered copies of the `provider.json` file
+used by the client to "bootstrap" configure itself for use with your service
+provider.
+
+There is a separate provider.json file for each environment, although you
+should only need 'production/provider.json' or, if you have no environments
+configured, 'default/provider.json'.
+
+To clarify, this is the public `provider.json` file used by the client, not the
+`provider.json` file that is used to configure the provider.
+
+The provider.json file must be available at `https://domain/provider.json`
+(unless this provider is included in the list of providers which are pre-
+seeded in client).
+
+This provider.json file can be served correctly in one of three ways:
+
+(1) If the property webapp.domain is not configured, then the web app will be
+ installed at https://domain/ and it will handle serving the provider.json file.
+
+(2) If one or more nodes have the 'static' service configured for the provider's
+ domain, then these 'static' nodes will correctly serve provider.json.
+
+(3) Otherwise, you must copy the provider.json file to your web
+ server and make it available at '/provider.json'. The example htaccess
+ file shows what header options should be sent by the web server
+ with the response.
+
+This directory is needed for method (3), but not for methods (1) or (2).
+
+This directory has been created by the command `leap compile provider.json`.
+Once created, it will be kept up to date everytime you compile. You may safely
+remove this directory if you don't use it.
+]
+
+ ##
+ ##
+ ## ZONE FILE
+ ##
+
+ def relative_hostname(fqdn, provider)
+ @domain_regexp ||= /\.?#{Regexp.escape(provider.domain)}$/
+ fqdn.sub(@domain_regexp, '')
+ end
+
+ #
+ # serial is any number less than 2^32 (4294967296)
+ #
+ def compile_zone_file(force=false)
+ # note: we use the default provider for all nodes, because we use it
+ # to generate hostnames that are relative to the default domain.
+ provider = manager.env('default').provider
+ hosts_seen = {}
+ lines = []
+
+ #
+ # header
+ #
+ lines << ZONE_HEADER % {
+ :domain => provider.domain,
+ :ns => provider.domain,
+ :contact => provider.contacts.default.first.sub('@','.'),
+ :serial => generate_zone_serial
+ }
+
+ #
+ # common records
+ #
+ lines << ORIGIN_HEADER
+ # 'A' records for primary domain
+ manager.nodes[:environment => '!local'].each_node do |node|
+ if node.dns['aliases'] && node.dns.aliases.include?(provider.domain)
+ lines << ["@", "IN A #{node.ip_address}"]
+ end
+ end
+
+ # NS records
+ if provider['dns'] && provider.dns['nameservers']
+ unless provider.dns.nameservers.is_a?(Array)
+ # TODO: remove me once we have JSON schema working
+ bail! {log :error, 'dns.nameservers must be an array' }
+ end
+ provider.dns.nameservers.each do |ns|
+ lines << ["@", "IN NS #{ns}."]
+ end
+ elsif !force
+ log :warning, "Property dns.nameservers is not configured in provider.json." do
+ log "This will produce a zone file without any NS records."
+ log "Use --force to skip this warning."
+ end
+ return unless agree("Continue? ")
+ end
+
+ # environment records
+ manager.environment_names.each do |env|
+ next if env == 'local'
+ nodes = manager.nodes[:environment => env]
+ next unless nodes.any?
+ spf = nil
+ dkim = nil
+ lines << ENV_HEADER % (env.nil? ? 'default' : env)
+ nodes.each_node do |node|
+ if node.dns.public
+ lines << [relative_hostname(node.domain.full, provider), "IN A #{node.ip_address}"]
+ end
+ if node.dns['aliases']
+ node.dns.aliases.each do |host_alias|
+ if host_alias != node.domain.full && host_alias != provider.domain
+ lines << [relative_hostname(host_alias, provider), "IN A #{node.ip_address}"]
+ end
+ end
+ end
+ if node.services.include? 'mx'
+ mx_domain = relative_hostname(node.domain.full_suffix, provider)
+ lines << [mx_domain, "IN MX 10 #{relative_hostname(node.domain.full, provider)}"]
+ spf ||= [mx_domain, spf_record(node)]
+ dkim ||= dkim_record(node, provider)
+ end
+ end
+ lines << spf if spf
+ lines << dkim if dkim
+ end
+
+ # print the lines
+ max_width = lines.inject(0) {|max, line| line.is_a?(Array) ? [max, line[0].length].max : max}
+ max_width = [max_width, 24].min
+ lines.each do |host, line|
+ if line.nil?
+ puts(host)
+ else
+ host = '@' if host == ''
+ puts("%-#{max_width}s %s" % [host, line])
+ end
+ end
+ end
+
+ #
+ # outputs entries suitable for an /etc/hosts file
+ #
+ def compile_hosts_file
+ manager.environment_names.each do |env|
+ nodes = manager.nodes[:environment => env]
+ next unless nodes.any?
+ puts
+ puts "## environment '#{env || 'default'}'"
+ nodes.each do |name, node|
+ puts "%s %s" % [
+ node.ip_address,
+ [name, node.get('domain.full'), node.get('dns.aliases')].compact.join(' ')
+ ]
+ end
+ end
+ end
+
+ private
+
+ #
+ # allow mail from any mx node, plus the webapp nodes.
+ #
+ # TODO: ipv6
+ #
+ def spf_record(node)
+ ips = node.nodes_like_me['services' => 'webapp'].values.collect {|n|
+ "ip4:" + n.ip_address
+ }
+ # TXT strings may not be longer than 255 characters, although
+ # you can chain multiple strings together.
+ strings = "v=spf1 MX #{ips.join(' ')} -all".scan(/.{1,255}/).join('" "')
+ %(IN TXT "#{strings}")
+ end
+
+ #
+ # for example:
+ #
+ # selector._domainkey IN TXT "v=DKIM1;h=sha256;k=rsa;s=email;p=MIGfMA0GCSq...GSIb3DQ"
+ #
+ # specification: http://dkim.org/specs/rfc4871-dkimbase.html#rfc.section.7.4
+ #
+ def dkim_record(node, provider)
+ # PEM encoded public key (base64), without the ---PUBLIC KEY--- armor parts.
+ assert_files_exist! :dkim_pub_key
+ dkim_pub_key = Path.named_path(:dkim_pub_key)
+ public_key = File.readlines(dkim_pub_key).grep(/^[^\-]+/).join
+
+ host = relative_hostname(
+ node.mx.dkim.selector + "._domainkey." + node.domain.full_suffix,
+ provider)
+
+ attrs = [
+ "v=DKIM1",
+ "h=sha256",
+ "k=rsa",
+ "s=email",
+ "p=" + public_key
+ ]
+
+ return [host, "IN TXT " + txt_wrap(attrs.join(';'))]
+ end
+
+ #
+ # DNS TXT records cannot be longer than 255 characters.
+ #
+ # However, multiple responses will be concatenated together.
+ # It looks like this:
+ #
+ # IN TXT "v=spf1 .... first" "second string..."
+ #
+ def txt_wrap(str)
+ '"' + str.scan(/.{1,255}/).join('" "') + '"'
+ end
+
+ #
+ # For zone serial number, we want something that will be
+ # different each time you deploy but also will be greater
+ # than any prior likely serial that was prefixed by the
+ # year, such as 2016040600.
+ #
+ # so, we use time_t of right now, modified with first
+ # digit incremented by one.
+ #
+ # this will work until Time.at(2**32 - 1_000_000_000)
+ # aka 2074-05-31 04:41:36 UTC.
+ #
+ def generate_zone_serial
+ Time.now.utc.to_i + 1_000_000_000
+ end
+
+ ENV_HEADER = %[
+;;
+;; ENVIRONMENT %s
+;;
+
+]
+
+ ZONE_HEADER = %[
+;;
+;; BIND data file for %{domain}
+;;
+
+$TTL 600
+$ORIGIN %{domain}.
+
+@ IN SOA %{ns}. %{contact}. (
+ %{serial} ; serial
+ 7200 ; refresh ( 24 hours)
+ 3600 ; retry ( 2 hours)
+ 1209600 ; expire (1000 hours)
+ 600 ) ; minimum ( 2 days)
+;
+]
+
+ ORIGIN_HEADER = %[
+;;
+;; ZONE ORIGIN
+;;
+
+]
+
+ ##
+ ## FIREWALL
+ ##
+
+ public
+
+ def compile_firewall
+ manager.nodes.each_node(&:evaluate)
+
+ rules = [["ALLOW TO", "PORTS", "ALLOW FROM"]]
+ manager.nodes[:environment => '!local'].values.each do |node|
+ next unless node['firewall']
+ node.firewall.each do |name, rule|
+ if rule.is_a? Hash
+ rules << add_rule(rule)
+ elsif rule.is_a? Array
+ rule.each do |r|
+ rules << add_rule(r)
+ end
+ end
+ end
+ end
+
+ max_to = rules.inject(0) {|max, r| [max, r[0].length].max}
+ max_port = rules.inject(0) {|max, r| [max, r[1].length].max}
+ max_from = rules.inject(0) {|max, r| [max, r[2].length].max}
+ rules.each do |rule|
+ puts "%-#{max_to}s %-#{max_port}s %-#{max_from}s" % rule
+ end
+ end
+
+ private
+
+ def add_rule(rule)
+ [rule["to"], [rule["port"]].compact.join(','), rule["from"]]
+ end
+
+ end
+end \ No newline at end of file
diff --git a/lib/leap_cli/commands/db.rb b/lib/leap_cli/commands/db.rb
new file mode 100644
index 00000000..5307ac4d
--- /dev/null
+++ b/lib/leap_cli/commands/db.rb
@@ -0,0 +1,86 @@
+module LeapCli; module Commands
+
+ desc 'Database commands.'
+ command :db do |db|
+ db.desc 'Destroy one or more databases. If present, limit to FILTER nodes. For example `leap db destroy --db sessions,tokens testing`.'
+ db.arg_name 'FILTER', :optional => true
+ db.command :destroy do |destroy|
+ destroy.flag :db, :arg_name => "DATABASES", :desc => 'Comma separated list of databases to destroy (no space). Use "--db all" to destroy all databases.', :optional => true
+ destroy.flag :user, :arg_name => "USERS", :desc => 'Comma separated list of usernames. The storage databases for these user(s) will be destroyed.', :optional => true
+ destroy.action do |global_options,options,args|
+ dbs = (options[:db]||"").split(',')
+ users = (options[:user]||"").split(',')
+ if dbs.empty? && users.empty?
+ bail!('Either --db or --user is required.')
+ end
+ nodes = manager.filter(args)
+ if nodes.any?
+ nodes = nodes[:services => 'couchdb']
+ end
+ unless nodes.any?
+ bail! 'No db nodes selected.'
+ end
+ if users.any?
+ unless global_options[:yes]
+ say 'You are about to permanently destroy user databases for [%s] for nodes [%s].' % [users.join(', '), nodes.keys.join(', ')]
+ bail! unless agree("Continue? ")
+ end
+ destroy_user_dbs(nodes, users)
+ elsif dbs.any?
+ unless global_options[:yes]
+ if dbs.include?('all')
+ say 'You are about to permanently destroy all database data for nodes [%s].' % nodes.keys.join(', ')
+ else
+ say 'You are about to permanently destroy databases [%s] for nodes [%s].' % [dbs.join(', '), nodes.keys.join(', ')]
+ end
+ bail! unless agree("Continue? ")
+ end
+ if dbs.include?('all')
+ destroy_all_dbs(nodes)
+ else
+ destroy_dbs(nodes, dbs)
+ end
+ say 'You must run `leap deploy` in order to create the databases again.'
+ end
+ end
+ end
+ end
+
+ private
+
+ def destroy_all_dbs(nodes)
+ ssh_connect(nodes) do |ssh|
+ ssh.run('/etc/init.d/bigcouch stop && test ! -z "$(ls /opt/bigcouch/var/lib/ 2> /dev/null)" && rm -r /opt/bigcouch/var/lib/* && echo "All DBs destroyed" || echo "DBs already destroyed"')
+ end
+ end
+
+ def destroy_dbs(nodes, dbs)
+ nodes.each_node do |node|
+ ssh_connect(node) do |ssh|
+ dbs.each do |db|
+ ssh.run(DESTROY_DB_COMMAND % {:db => db})
+ end
+ end
+ end
+ end
+
+ def destroy_user_dbs(nodes, users)
+ nodes.each_node do |node|
+ ssh_connect(node) do |ssh|
+ users.each do |user|
+ ssh.run(DESTROY_USER_DB_COMMAND % {:user => user})
+ end
+ end
+ end
+ end
+
+ DESTROY_DB_COMMAND = %{
+if [ 200 = `curl -ns -w "%%{http_code}" -X GET "127.0.0.1:5984/%{db}" -o /dev/null` ]; then
+ echo "Result from DELETE /%{db}:" `curl -ns -X DELETE "127.0.0.1:5984/%{db}"`;
+else
+ echo "Skipping db '%{db}': it does not exist or has already been deleted.";
+fi
+}
+
+ DESTROY_USER_DB_COMMAND = %{/srv/leap/couchdb/scripts/destroy-user-db --username %{user}}
+end; end
diff --git a/lib/leap_cli/commands/deploy.rb b/lib/leap_cli/commands/deploy.rb
new file mode 100644
index 00000000..9dd190ab
--- /dev/null
+++ b/lib/leap_cli/commands/deploy.rb
@@ -0,0 +1,374 @@
+require 'etc'
+
+module LeapCli
+ module Commands
+
+ desc 'Apply recipes to a node or set of nodes.'
+ long_desc 'The FILTER can be the name of a node, service, or tag.'
+ arg_name 'FILTER'
+ command [:deploy, :d] do |c|
+
+ c.switch :fast, :desc => 'Makes the deploy command faster by skipping some slow steps. A "fast" deploy can be used safely if you recently completed a normal deploy.',
+ :negatable => false
+
+ c.switch :sync, :desc => "Sync files, but don't actually apply recipes.", :negatable => false
+
+ c.switch :force, :desc => 'Deploy even if there is a lockfile.', :negatable => false
+
+ c.switch :downgrade, :desc => 'Allows deploy to run with an older platform version.', :negatable => false
+
+ c.switch :dev, :desc => "Development mode: don't run 'git submodule update' before deploy.", :negatable => false
+
+ c.flag :tags, :desc => 'Specify tags to pass through to puppet (overriding the default).',
+ :arg_name => 'TAG[,TAG]'
+
+ c.flag :port, :desc => 'Override the default SSH port.',
+ :arg_name => 'PORT'
+
+ c.flag :ip, :desc => 'Override the default SSH IP address.',
+ :arg_name => 'IPADDRESS'
+
+ c.action do |global,options,args|
+
+ if options[:dev] != true
+ init_submodules
+ end
+
+ nodes = manager.filter!(args, :disabled => false)
+ if nodes.size > 1
+ say "Deploying to these nodes: #{nodes.keys.join(', ')}"
+ if !global[:yes] && !agree("Continue? ")
+ quit! "OK. Bye."
+ end
+ end
+
+ environments = nodes.field('environment').uniq
+ if environments.empty?
+ environments = [nil]
+ end
+ environments.each do |env|
+ check_platform_pinning(env, global)
+ end
+
+ # compile hiera files for all the nodes in every environment that is
+ # being deployed and only those environments.
+ compile_hiera_files(manager.filter(environments), false)
+
+ ssh_connect(nodes, connect_options(options)) do |ssh|
+ ssh.leap.log :checking, 'node' do
+ ssh.leap.check_for_no_deploy
+ ssh.leap.assert_initialized
+ end
+ ssh.leap.log :synching, "configuration files" do
+ sync_hiera_config(ssh)
+ sync_support_files(ssh)
+ end
+ ssh.leap.log :synching, "puppet manifests" do
+ sync_puppet_files(ssh)
+ end
+ unless options[:sync]
+ ssh.leap.log :applying, "puppet" do
+ ssh.puppet.apply(:verbosity => [LeapCli.log_level,5].min,
+ :tags => tags(options),
+ :force => options[:force],
+ :info => deploy_info,
+ :downgrade => options[:downgrade]
+ )
+ end
+ end
+ end
+ if !Util.exit_status.nil? && Util.exit_status != 0
+ log :warning, "puppet did not finish successfully."
+ end
+ end
+ end
+
+ desc 'Display recent deployment history for a set of nodes.'
+ long_desc 'The FILTER can be the name of a node, service, or tag.'
+ arg_name 'FILTER'
+ command [:history, :h] do |c|
+ c.flag :port, :desc => 'Override the default SSH port.',
+ :arg_name => 'PORT'
+ c.flag :ip, :desc => 'Override the default SSH IP address.',
+ :arg_name => 'IPADDRESS'
+ c.switch :last, :desc => 'Show last deploy only',
+ :negatable => false
+ c.action do |global,options,args|
+ if options[:last] == true
+ lines = 1
+ else
+ lines = 10
+ end
+ nodes = manager.filter!(args)
+ ssh_connect(nodes, connect_options(options)) do |ssh|
+ ssh.leap.history(lines)
+ end
+ end
+ end
+
+ private
+
+ def forcible_prompt(forced, msg, prompt)
+ say(msg)
+ if forced
+ log :warning, "continuing anyway because of --force"
+ else
+ say "hint: use --force to skip this prompt."
+ quit!("OK. Bye.") unless agree(prompt)
+ end
+ end
+
+ #
+ # The currently activated provider.json could have loaded some pinning
+ # information for the platform. If this is the case, refuse to deploy
+ # if there is a mismatch.
+ #
+ # For example:
+ #
+ # "platform": {
+ # "branch": "develop"
+ # "version": "1.0..99"
+ # "commit": "e1d6280e0a8c565b7fb1a4ed3969ea6fea31a5e2..HEAD"
+ # }
+ #
+ def check_platform_pinning(environment, global_options)
+ provider = manager.env(environment).provider
+ return unless provider['platform']
+
+ if environment.nil? || environment == 'default'
+ provider_json = 'provider.json'
+ else
+ provider_json = 'provider.' + environment + '.json'
+ end
+
+ # can we have json schema verification already?
+ unless provider.platform.is_a? Hash
+ bail!('`platform` attribute in #{provider_json} must be a hash (was %s).' % provider.platform.inspect)
+ end
+
+ # check version
+ if provider.platform['version']
+ if !Leap::Platform.version_in_range?(provider.platform.version)
+ forcible_prompt(
+ global_options[:force],
+ "The platform is pinned to a version range of '#{provider.platform.version}' "+
+ "by the `platform.version` property in #{provider_json}, but the platform "+
+ "(#{Path.platform}) has version #{Leap::Platform.version}.",
+ "Do you really want to deploy from the wrong version? "
+ )
+ end
+ end
+
+ # check branch
+ if provider.platform['branch']
+ if !is_git_directory?(Path.platform)
+ forcible_prompt(
+ global_options[:force],
+ "The platform is pinned to a particular branch by the `platform.branch` property "+
+ "in #{provider_json}, but the platform directory (#{Path.platform}) is not a git repository.",
+ "Do you really want to deploy anyway? "
+ )
+ end
+ unless provider.platform.branch == current_git_branch(Path.platform)
+ forcible_prompt(
+ global_options[:force],
+ "The platform is pinned to branch '#{provider.platform.branch}' by the `platform.branch` property "+
+ "in #{provider_json}, but the current branch is '#{current_git_branch(Path.platform)}' " +
+ "(for directory '#{Path.platform}')",
+ "Do you really want to deploy from the wrong branch? "
+ )
+ end
+ end
+
+ # check commit
+ if provider.platform['commit']
+ if !is_git_directory?(Path.platform)
+ forcible_prompt(
+ global_options[:force],
+ "The platform is pinned to a particular commit range by the `platform.commit` property "+
+ "in #{provider_json}, but the platform directory (#{Path.platform}) is not a git repository.",
+ "Do you really want to deploy anyway? "
+ )
+ end
+ current_commit = current_git_commit(Path.platform)
+ Dir.chdir(Path.platform) do
+ commit_range = assert_run!("git log --pretty='format:%H' '#{provider.platform.commit}'",
+ "The platform is pinned to a particular commit range by the `platform.commit` property "+
+ "in #{provider_json}, but git was not able to find commits in the range specified "+
+ "(#{provider.platform.commit}).")
+ commit_range = commit_range.split("\n")
+ if !commit_range.include?(current_commit) &&
+ provider.platform.commit.split('..').first != current_commit
+ forcible_prompt(
+ global_options[:force],
+ "The platform is pinned via the `platform.commit` property in #{provider_json} " +
+ "to a commit in the range #{provider.platform.commit}, but the current HEAD " +
+ "(#{current_commit}) is not in that range.",
+ "Do you really want to deploy from the wrong commit? "
+ )
+ end
+ end
+ end
+ end
+
+ def sync_hiera_config(ssh)
+ ssh.rsync.update do |server|
+ node = manager.node(server.host)
+ hiera_file = Path.relative_path([:hiera, node.name])
+ ssh.leap.log hiera_file + ' -> ' + node.name + ':' + Leap::Platform.hiera_path
+ {
+ :source => hiera_file,
+ :dest => Leap::Platform.hiera_path,
+ :flags => "-rltp --chmod=u+rX,go-rwx"
+ }
+ end
+ end
+
+ #
+ # sync various support files.
+ #
+ def sync_support_files(ssh)
+ dest_dir = Leap::Platform.files_dir
+ custom_files = build_custom_file_list
+ ssh.rsync.update do |server|
+ node = manager.node(server.host)
+ files_to_sync = node.file_paths.collect {|path| Path.relative_path(path, Path.provider) }
+ files_to_sync += custom_files
+ if files_to_sync.any?
+ ssh.leap.log(files_to_sync.join(', ') + ' -> ' + node.name + ':' + dest_dir)
+ {
+ :chdir => Path.named_path(:files_dir),
+ :source => ".",
+ :dest => dest_dir,
+ :excludes => "*",
+ :includes => calculate_includes_from_files(files_to_sync, '/files'),
+ :flags => "-rltp --chmod=u+rX,go-rwx --relative --delete --delete-excluded --copy-links"
+ }
+ else
+ nil
+ end
+ end
+ end
+
+ def sync_puppet_files(ssh)
+ ssh.rsync.update do |server|
+ ssh.leap.log(Path.platform + '/[bin,tests,puppet] -> ' + server.host + ':' + Leap::Platform.leap_dir)
+ {
+ :dest => Leap::Platform.leap_dir,
+ :source => '.',
+ :chdir => Path.platform,
+ :excludes => '*',
+ :includes => ['/bin', '/bin/**', '/puppet', '/puppet/**', '/tests', '/tests/**'],
+ :flags => "-rlt --relative --delete --copy-links"
+ }
+ end
+ end
+
+ #
+ # ensure submodules are up to date, if the platform is a git
+ # repository.
+ #
+ def init_submodules
+ return unless is_git_directory?(Path.platform)
+ Dir.chdir Path.platform do
+ assert_run! "git submodule sync"
+ statuses = assert_run! "git submodule status"
+ statuses.strip.split("\n").each do |status_line|
+ if status_line =~ /^[\+-]/
+ submodule = status_line.split(' ')[1]
+ log "Updating submodule #{submodule}"
+ assert_run! "git submodule update --init #{submodule}"
+ end
+ end
+ end
+ end
+
+ #
+ # converts an array of file paths into an array
+ # suitable for --include of rsync
+ #
+ # if set, `prefix` is stripped off.
+ #
+ def calculate_includes_from_files(files, prefix=nil)
+ return nil unless files and files.any?
+
+ # prepend '/' (kind of like ^ for rsync)
+ includes = files.collect {|file| file =~ /^\// ? file : '/' + file }
+
+ # include all sub files of specified directories
+ includes.size.times do |i|
+ if includes[i] =~ /\/$/
+ includes << includes[i] + '**'
+ end
+ end
+
+ # include all parent directories (required because of --exclude '*')
+ includes.size.times do |i|
+ path = File.dirname(includes[i])
+ while(path != '/')
+ includes << path unless includes.include?(path)
+ path = File.dirname(path)
+ end
+ end
+
+ if prefix
+ includes.map! {|path| path.sub(/^#{Regexp.escape(prefix)}\//, '/')}
+ end
+
+ return includes
+ end
+
+ def tags(options)
+ if options[:tags]
+ tags = options[:tags].split(',')
+ else
+ tags = Leap::Platform.default_puppet_tags.dup
+ end
+ tags << 'leap_slow' unless options[:fast]
+ tags.join(',')
+ end
+
+ #
+ # a provider might have various customization files that should be sync'ed to the server.
+ # this method builds that list of files to sync.
+ #
+ def build_custom_file_list
+ custom_files = []
+ Leap::Platform.paths.keys.grep(/^custom_/).each do |path|
+ if file_exists?(path)
+ relative_path = Path.relative_path(path, Path.provider)
+ if dir_exists?(path)
+ custom_files << relative_path + '/' # rsync needs trailing slash
+ else
+ custom_files << relative_path
+ end
+ end
+ end
+ return custom_files
+ end
+
+ def deploy_info
+ info = []
+ info << "user: %s" % Etc.getpwuid(Process.euid).name
+ if is_git_directory?(Path.platform) && current_git_branch(Path.platform) != 'master'
+ info << "platform: %s (%s %s)" % [
+ Leap::Platform.version,
+ current_git_branch(Path.platform),
+ current_git_commit(Path.platform)[0..4]
+ ]
+ else
+ info << "platform: %s" % Leap::Platform.version
+ end
+ if is_git_directory?(LEAP_CLI_BASE_DIR)
+ info << "leap_cli: %s (%s %s)" % [
+ LeapCli::VERSION,
+ current_git_branch(LEAP_CLI_BASE_DIR),
+ current_git_commit(LEAP_CLI_BASE_DIR)[0..4]
+ ]
+ else
+ info << "leap_cli: %s" % LeapCli::VERSION
+ end
+ info.join(', ')
+ end
+ end
+end
diff --git a/lib/leap_cli/commands/env.rb b/lib/leap_cli/commands/env.rb
new file mode 100644
index 00000000..80be2174
--- /dev/null
+++ b/lib/leap_cli/commands/env.rb
@@ -0,0 +1,76 @@
+module LeapCli
+ module Commands
+
+ desc "Manipulate and query environment information."
+ long_desc "The 'environment' node property can be used to isolate sets of nodes into entirely separate environments. "+
+ "A node in one environment will never interact with a node from another environment. "+
+ "Environment pinning works by modifying your ~/.leaprc file and is dependent on the "+
+ "absolute file path of your provider directory (pins don't apply if you move the directory)"
+ command [:env, :e] do |c|
+ c.desc "List the available environments. The pinned environment, if any, will be marked with '*'. Will also set the pin if run with an environment argument."
+ c.arg_name 'ENVIRONMENT', :optional => true
+ c.command :ls do |ls|
+ ls.action do |global_options, options, args|
+ environment = get_env_from_args(args)
+ if environment
+ pin(environment)
+ LeapCli.leapfile.load
+ end
+ print_envs
+ end
+ end
+
+ c.desc 'Pin the environment to ENVIRONMENT. All subsequent commands will only apply to nodes in this environment.'
+ c.arg_name 'ENVIRONMENT'
+ c.command :pin do |pin|
+ pin.action do |global_options,options,args|
+ environment = get_env_from_args(args)
+ if environment
+ pin(environment)
+ else
+ bail! "There is no environment `#{environment}`"
+ end
+ end
+ end
+
+ c.desc "Unpin the environment. All subsequent commands will apply to all nodes."
+ c.command :unpin do |unpin|
+ unpin.action do |global_options, options, args|
+ LeapCli.leapfile.unset('environment')
+ log 0, :saved, "~/.leaprc, removing environment property."
+ end
+ end
+
+ c.default_command :ls
+ end
+
+ protected
+
+ def get_env_from_args(args)
+ environment = args.first
+ if environment == 'default' || (environment && manager.environment_names.include?(environment))
+ return environment
+ else
+ return nil
+ end
+ end
+
+ def pin(environment)
+ LeapCli.leapfile.set('environment', environment)
+ log 0, :saved, "~/.leaprc with environment set to #{environment}."
+ end
+
+ def print_envs
+ envs = ["default"] + manager.environment_names.compact.sort
+ envs.each do |env|
+ if env
+ if LeapCli.leapfile.environment == env
+ puts "* #{env}"
+ else
+ puts " #{env}"
+ end
+ end
+ end
+ end
+ end
+end \ No newline at end of file
diff --git a/lib/leap_cli/commands/facts.rb b/lib/leap_cli/commands/facts.rb
new file mode 100644
index 00000000..11329ccc
--- /dev/null
+++ b/lib/leap_cli/commands/facts.rb
@@ -0,0 +1,100 @@
+#
+# Gather facter facts
+#
+
+module LeapCli; module Commands
+
+ desc 'Gather information on nodes.'
+ command :facts do |facts|
+ facts.desc 'Query servers to update facts.json.'
+ facts.long_desc "Queries every node included in FILTER and saves the important information to facts.json"
+ facts.arg_name 'FILTER'
+ facts.command :update do |update|
+ update.action do |global_options,options,args|
+ update_facts(global_options, options, args)
+ end
+ end
+ end
+
+ protected
+
+ def facter_cmd
+ 'facter --json ' + Leap::Platform.facts.join(' ')
+ end
+
+ def remove_node_facts(name)
+ if file_exists?(:facts)
+ update_facts_file({name => nil})
+ end
+ end
+
+ def update_node_facts(name, facts)
+ update_facts_file({name => facts})
+ end
+
+ def rename_node_facts(old_name, new_name)
+ if file_exists?(:facts)
+ facts = JSON.parse(read_file(:facts) || {})
+ facts[new_name] = facts[old_name]
+ facts[old_name] = nil
+ update_facts_file(facts, true)
+ end
+ end
+
+ #
+ # if overwrite = true, then ignore existing facts.json.
+ #
+ def update_facts_file(new_facts, overwrite=false)
+ replace_file!(:facts) do |content|
+ if overwrite || content.nil? || content.empty?
+ old_facts = {}
+ else
+ old_facts = manager.facts
+ end
+ facts = old_facts.merge(new_facts)
+ facts.each do |name, value|
+ if value.is_a? String
+ if value == ""
+ value = nil
+ else
+ value = JSON.parse(value) rescue JSON::ParserError
+ end
+ end
+ if value.is_a? Hash
+ value.delete_if {|key,v| v.nil?}
+ end
+ facts[name] = value
+ end
+ facts.delete_if do |name, value|
+ value.nil? || value.empty?
+ end
+ if facts.empty?
+ "{}\n"
+ else
+ JSON.sorted_generate(facts) + "\n"
+ end
+ end
+ end
+
+ private
+
+ def update_facts(global_options, options, args)
+ nodes = manager.filter(args, :local => false, :disabled => false)
+ new_facts = {}
+ ssh_connect(nodes) do |ssh|
+ ssh.leap.run_with_progress(facter_cmd) do |response|
+ node = manager.node(response[:host])
+ if node
+ new_facts[node.name] = response[:data].strip
+ else
+ log :warning, 'Could not find node for hostname %s' % response[:host]
+ end
+ end
+ end
+ # only overwrite the entire facts file if and only if we are gathering facts
+ # for all nodes in all environments.
+ overwrite_existing = args.empty? && LeapCli.leapfile.environment.nil?
+ update_facts_file(new_facts, overwrite_existing)
+ end
+
+end; end \ No newline at end of file
diff --git a/lib/leap_cli/commands/info.rb b/lib/leap_cli/commands/info.rb
new file mode 100644
index 00000000..52225a94
--- /dev/null
+++ b/lib/leap_cli/commands/info.rb
@@ -0,0 +1,15 @@
+module LeapCli; module Commands
+
+ desc 'Prints information regarding facts, history, and running processes for a node or nodes.'
+ long_desc 'The FILTER can be the name of a node, service, or tag.'
+ arg_name 'FILTER'
+ command [:info] do |c|
+ c.action do |global,options,args|
+ nodes = manager.filter!(args)
+ ssh_connect(nodes, connect_options(options)) do |ssh|
+ ssh.leap.debug
+ end
+ end
+ end
+
+end; end
diff --git a/lib/leap_cli/commands/inspect.rb b/lib/leap_cli/commands/inspect.rb
new file mode 100644
index 00000000..20654fa7
--- /dev/null
+++ b/lib/leap_cli/commands/inspect.rb
@@ -0,0 +1,144 @@
+module LeapCli; module Commands
+
+ desc 'Prints details about a file. Alternately, the argument FILE can be the name of a node, service or tag.'
+ arg_name 'FILE'
+ command [:inspect, :i] do |c|
+ c.switch 'base', :desc => 'Inspect the FILE from the provider_base (i.e. without local inheritance).', :negatable => false
+ c.action do |global_options,options,args|
+ object = args.first
+ assert! object, 'A file path or node/service/tag name is required'
+ method = inspection_method(object)
+ if method && defined?(method)
+ self.send(method, object, options)
+ else
+ log "Sorry, I don't know how to inspect that."
+ end
+ end
+ end
+
+ private
+
+ FTYPE_MAP = {
+ "PEM certificate" => :inspect_x509_cert,
+ "PEM RSA private key" => :inspect_x509_key,
+ "OpenSSH RSA public key" => :inspect_ssh_pub_key,
+ "PEM certificate request" => :inspect_x509_csr
+ }
+
+ def inspection_method(object)
+ if File.exists?(object)
+ ftype = `file #{object}`.split(':').last.strip
+ log 2, "file is of type '#{ftype}'"
+ if FTYPE_MAP[ftype]
+ FTYPE_MAP[ftype]
+ elsif File.extname(object) == ".json"
+ full_path = File.expand_path(object, Dir.pwd)
+ if path_match?(:node_config, full_path)
+ :inspect_node
+ elsif path_match?(:service_config, full_path)
+ :inspect_service
+ elsif path_match?(:tag_config, full_path)
+ :inspect_tag
+ elsif path_match?(:provider_config, full_path) || path_match?(:provider_env_config, full_path)
+ :inspect_provider
+ elsif path_match?(:common_config, full_path)
+ :inspect_common
+ else
+ nil
+ end
+ end
+ elsif manager.nodes[object]
+ :inspect_node
+ elsif manager.services[object]
+ :inspect_service
+ elsif manager.tags[object]
+ :inspect_tag
+ elsif object == "common"
+ :inspect_common
+ elsif object == "provider"
+ :inspect_provider
+ else
+ nil
+ end
+ end
+
+ #
+ # inspectors
+ #
+
+ def inspect_x509_key(file_path, options)
+ assert_bin! 'openssl'
+ puts assert_run! 'openssl rsa -in %s -text -check' % file_path
+ end
+
+ def inspect_x509_cert(file_path, options)
+ assert_bin! 'openssl'
+ puts assert_run! 'openssl x509 -in %s -text -noout' % file_path
+ log 0, :"SHA256 fingerprint", X509.fingerprint("SHA256", file_path)
+ end
+
+ def inspect_x509_csr(file_path, options)
+ assert_bin! 'openssl'
+ puts assert_run! 'openssl req -text -noout -verify -in %s' % file_path
+ end
+
+ #def inspect_ssh_pub_key(file_path)
+ #end
+
+ def inspect_node(arg, options)
+ inspect_json manager.nodes[name(arg)]
+ end
+
+ def inspect_service(arg, options)
+ if options[:base]
+ inspect_json manager.base_services[name(arg)]
+ else
+ inspect_json manager.services[name(arg)]
+ end
+ end
+
+ def inspect_tag(arg, options)
+ if options[:base]
+ inspect_json manager.base_tags[name(arg)]
+ else
+ inspect_json manager.tags[name(arg)]
+ end
+ end
+
+ def inspect_provider(arg, options)
+ if options[:base]
+ inspect_json manager.base_provider
+ elsif arg =~ /provider\.(.*)\.json/
+ inspect_json manager.env($1).provider
+ else
+ inspect_json manager.provider
+ end
+ end
+
+ def inspect_common(arg, options)
+ if options[:base]
+ inspect_json manager.base_common
+ else
+ inspect_json manager.common
+ end
+ end
+
+ #
+ # helpers
+ #
+
+ def name(arg)
+ File.basename(arg).sub(/\.json$/, '')
+ end
+
+ def inspect_json(config)
+ if config
+ puts JSON.sorted_generate(config)
+ end
+ end
+
+ def path_match?(path_symbol, path)
+ Dir.glob(Path.named_path([path_symbol, '*'])).include?(path)
+ end
+
+end; end
diff --git a/lib/leap_cli/commands/list.rb b/lib/leap_cli/commands/list.rb
new file mode 100644
index 00000000..aa425432
--- /dev/null
+++ b/lib/leap_cli/commands/list.rb
@@ -0,0 +1,132 @@
+require 'command_line_reporter'
+
+module LeapCli; module Commands
+
+ desc 'List nodes and their classifications'
+ long_desc 'Prints out a listing of nodes, services, or tags. ' +
+ 'If present, the FILTER can be a list of names of nodes, services, or tags. ' +
+ 'If the name is prefixed with +, this acts like an AND condition. ' +
+ "For example:\n\n" +
+ "`leap list node1 node2` matches all nodes named \"node1\" OR \"node2\"\n\n" +
+ "`leap list openvpn +local` matches all nodes with service \"openvpn\" AND tag \"local\""
+
+ arg_name 'FILTER', :optional => true
+ command [:list,:ls] do |c|
+ c.flag 'print', :desc => 'What attributes to print (optional)'
+ c.switch 'disabled', :desc => 'Include disabled nodes in the list.', :negatable => false
+ c.action do |global_options,options,args|
+ # don't rely on default manager(), because we want to pass custom options to load()
+ manager = LeapCli::Config::Manager.new
+ if global_options[:color]
+ colors = ['cyan', 'white']
+ else
+ colors = [nil, nil]
+ end
+ puts
+ manager.load(:include_disabled => options['disabled'], :continue_on_error => true)
+ if options['print']
+ print_node_properties(manager.filter(args), options['print'])
+ else
+ if args.any?
+ NodeTable.new(manager.filter(args), colors).run
+ else
+ environment = LeapCli.leapfile.environment || '_all_'
+ TagTable.new('SERVICES', manager.env(environment).services, colors).run
+ TagTable.new('TAGS', manager.env(environment).tags, colors).run
+ NodeTable.new(manager.filter(), colors).run
+ end
+ end
+ end
+ end
+
+ private
+
+ def self.print_node_properties(nodes, properties)
+ properties = properties.split(',')
+ max_width = nodes.keys.inject(0) {|max,i| [i.size,max].max}
+ nodes.each_node do |node|
+ value = properties.collect{|prop|
+ prop_value = node[prop]
+ if prop_value.nil?
+ "null"
+ elsif prop_value == ""
+ "empty"
+ elsif prop_value.is_a? LeapCli::Config::Object
+ node[prop].dump_json(:format => :compact) # TODO: add option of getting pre-evaluation values.
+ else
+ prop_value.to_s
+ end
+ }.join(', ')
+ printf("%#{max_width}s %s\n", node.name, value)
+ end
+ puts
+ end
+
+ class TagTable
+ include CommandLineReporter
+ def initialize(heading, tag_list, colors)
+ @heading = heading
+ @tag_list = tag_list
+ @colors = colors
+ end
+ def run
+ tags = @tag_list.keys.select{|tag| tag !~ /^_/}.sort # sorted list of tags, excluding _partials
+ max_width = [20, (tags+[@heading]).inject(0) {|max,i| [i.size,max].max}].max
+ table :border => false do
+ row :color => @colors[0] do
+ column @heading, :align => 'right', :width => max_width
+ column "NODES", :width => HighLine::SystemExtensions.terminal_size.first - max_width - 2, :padding => 2
+ end
+ tags.each do |tag|
+ next if @tag_list[tag].node_list.empty?
+ row :color => @colors[1] do
+ column tag
+ column @tag_list[tag].node_list.keys.sort.join(', ')
+ end
+ end
+ end
+ vertical_spacing
+ end
+ end
+
+ #
+ # might be handy: HighLine::SystemExtensions.terminal_size.first
+ #
+ class NodeTable
+ include CommandLineReporter
+ def initialize(node_list, colors)
+ @node_list = node_list
+ @colors = colors
+ end
+ def run
+ rows = @node_list.keys.sort.collect do |node_name|
+ [node_name, @node_list[node_name].services.sort.join(', '), @node_list[node_name].tags.sort.join(', ')]
+ end
+ unless rows.any?
+ puts Paint["no results", :red]
+ puts
+ return
+ end
+ padding = 2
+ max_node_width = [20, (rows.map{|i|i[0]} + ["NODES"] ).inject(0) {|max,i| [i.size,max].max}].max
+ max_service_width = (rows.map{|i|i[1]} + ["SERVICES"]).inject(0) {|max,i| [i.size+padding+padding,max].max}
+ max_tag_width = (rows.map{|i|i[2]} + ["TAGS"] ).inject(0) {|max,i| [i.size,max].max}
+ table :border => false do
+ row :color => @colors[0] do
+ column "NODES", :align => 'right', :width => max_node_width
+ column "SERVICES", :width => max_service_width, :padding => 2
+ column "TAGS", :width => max_tag_width
+ end
+ rows.each do |r|
+ row :color => @colors[1] do
+ column r[0]
+ column r[1]
+ column r[2]
+ end
+ end
+ end
+ vertical_spacing
+ end
+ end
+
+end; end
diff --git a/lib/leap_cli/commands/node.rb b/lib/leap_cli/commands/node.rb
new file mode 100644
index 00000000..a23661b3
--- /dev/null
+++ b/lib/leap_cli/commands/node.rb
@@ -0,0 +1,188 @@
+#
+# fyi: the `node init` command lives in node_init.rb,
+# but all other `node x` commands live here.
+#
+
+autoload :IPAddr, 'ipaddr'
+
+module LeapCli; module Commands
+
+ ##
+ ## COMMANDS
+ ##
+
+ desc 'Node management'
+ command [:node, :n] do |node|
+ node.desc 'Create a new configuration file for a node named NAME.'
+ node.long_desc ["If specified, the optional argument SEED can be used to seed values in the node configuration file.",
+ "The format is property_name:value.",
+ "For example: `leap node add web1 ip_address:1.2.3.4 services:webapp`.",
+ "To set nested properties, property name can contain '.', like so: `leap node add web1 ssh.port:44`",
+ "Separeate multiple values for a single property with a comma, like so: `leap node add mynode services:webapp,dns`"].join("\n\n")
+ node.arg_name 'NAME [SEED]' # , :optional => false, :multiple => false
+ node.command :add do |add|
+ add.switch :local, :desc => 'Make a local testing node (by automatically assigning the next available local IP address). Local nodes are run as virtual machines on your computer.', :negatable => false
+ add.action do |global_options,options,args|
+ # argument sanity checks
+ name = args.first
+ assert_valid_node_name!(name, options[:local])
+ assert_files_missing! [:node_config, name]
+
+ # create and seed new node
+ node = Config::Node.new(manager.env)
+ if options[:local]
+ node['ip_address'] = pick_next_vagrant_ip_address
+ end
+ seed_node_data_from_cmd_line(node, args[1..-1])
+ seed_node_data_from_template(node)
+ validate_ip_address(node)
+ begin
+ node['name'] = name
+ json = node.dump_json(:exclude => ['name'])
+ write_file!([:node_config, name], json + "\n")
+ if file_exists? :ca_cert, :ca_key
+ generate_cert_for_node(manager.reload_node!(node))
+ end
+ rescue LeapCli::ConfigError => exc
+ remove_node_files(name)
+ end
+ end
+ end
+
+ node.desc 'Renames a node file, and all its related files.'
+ node.arg_name 'OLD_NAME NEW_NAME'
+ node.command :mv do |mv|
+ mv.action do |global_options,options,args|
+ node = get_node_from_args(args, include_disabled: true)
+ new_name = args.last
+ assert_valid_node_name!(new_name, node.vagrant?)
+ ensure_dir [:node_files_dir, new_name]
+ Leap::Platform.node_files.each do |path|
+ rename_file! [path, node.name], [path, new_name]
+ end
+ remove_directory! [:node_files_dir, node.name]
+ rename_node_facts(node.name, new_name)
+ end
+ end
+
+ node.desc 'Removes all the files related to the node named NAME.'
+ node.arg_name 'NAME' #:optional => false #, :multiple => false
+ node.command :rm do |rm|
+ rm.action do |global_options,options,args|
+ node = get_node_from_args(args, include_disabled: true)
+ remove_node_files(node.name)
+ if node.vagrant?
+ vagrant_command("destroy --force", [node.name])
+ end
+ remove_node_facts(node.name)
+ end
+ end
+ end
+
+ ##
+ ## PUBLIC HELPERS
+ ##
+
+ def get_node_from_args(args, options={})
+ node_name = args.first
+ node = manager.node(node_name)
+ if node.nil? && options[:include_disabled]
+ node = manager.disabled_node(node_name)
+ end
+ assert!(node, "Node '#{node_name}' not found.")
+ node
+ end
+
+ def seed_node_data_from_cmd_line(node, args)
+ args.each do |seed|
+ key, value = seed.split(':', 2)
+ value = format_seed_value(value)
+ assert! key =~ /^[0-9a-z\._]+$/, "illegal characters used in property '#{key}'"
+ if key =~ /\./
+ key_parts = key.split('.')
+ final_key = key_parts.pop
+ current_object = node
+ key_parts.each do |key_part|
+ current_object[key_part] ||= Config::Object.new
+ current_object = current_object[key_part]
+ end
+ current_object[final_key] = value
+ else
+ node[key] = value
+ end
+ end
+ end
+
+ #
+ # load "new node template" information into the `node`, modifying `node`.
+ # values in the template will not override existing node values.
+ #
+ def seed_node_data_from_template(node)
+ node.inherit_from!(manager.template('common'))
+ [node['services']].flatten.each do |service|
+ if service
+ template = manager.template(service)
+ if template
+ node.inherit_from!(template)
+ end
+ end
+ end
+ end
+
+ def remove_node_files(node_name)
+ (Leap::Platform.node_files + [:node_files_dir]).each do |path|
+ remove_file! [path, node_name]
+ end
+ end
+
+ #
+ # conversions:
+ #
+ # "x,y,z" => ["x","y","z"]
+ #
+ # "22" => 22
+ #
+ # "5.1" => 5.1
+ #
+ def format_seed_value(v)
+ if v =~ /,/
+ v = v.split(',')
+ v.map! do |i|
+ i = i.to_i if i.to_i.to_s == i
+ i = i.to_f if i.to_f.to_s == i
+ i
+ end
+ else
+ v = v.to_i if v.to_i.to_s == v
+ v = v.to_f if v.to_f.to_s == v
+ end
+ return v
+ end
+
+ def validate_ip_address(node)
+ if node['ip_address'] == "REQUIRED"
+ bail! do
+ log :error, "ip_address is not set. Specify with `leap node add NAME ip_address:ADDRESS`."
+ end
+ end
+ IPAddr.new(node['ip_address'])
+ rescue ArgumentError
+ bail! do
+ if node['ip_address']
+ log :invalid, "ip_address #{node['ip_address'].inspect}"
+ else
+ log :missing, "ip_address"
+ end
+ end
+ end
+
+ def assert_valid_node_name!(name, local=false)
+ assert! name, 'No <node-name> specified.'
+ if local
+ assert! name =~ /^[0-9a-z]+$/, "illegal characters used in node name '#{name}' (note: Vagrant does not allow hyphens or underscores)"
+ else
+ assert! name =~ /^[0-9a-z-]+$/, "illegal characters used in node name '#{name}' (note: Linux does not allow underscores)"
+ end
+ end
+
+end; end
diff --git a/lib/leap_cli/commands/node_init.rb b/lib/leap_cli/commands/node_init.rb
new file mode 100644
index 00000000..33f6288d
--- /dev/null
+++ b/lib/leap_cli/commands/node_init.rb
@@ -0,0 +1,169 @@
+#
+# Node initialization.
+# Most of the fun stuff is in tasks.rb.
+#
+
+module LeapCli; module Commands
+
+ desc 'Node management'
+ command :node do |node|
+ node.desc 'Bootstraps a node or nodes, setting up SSH keys and installing prerequisite packages'
+ node.long_desc "This command prepares a server to be used with the LEAP Platform by saving the server's SSH host key, " +
+ "copying the authorized_keys file, installing packages that are required for deploying, and registering important facts. " +
+ "Node init must be run before deploying to a server, and the server must be running and available via the network. " +
+ "This command only needs to be run once, but there is no harm in running it multiple times."
+ node.arg_name 'FILTER'
+ node.command :init do |init|
+ init.switch 'echo', :desc => 'If set, passwords are visible as you type them (default is hidden)', :negatable => false
+ init.flag :port, :desc => 'Override the default SSH port.', :arg_name => 'PORT'
+ init.flag :ip, :desc => 'Override the default SSH IP address.', :arg_name => 'IPADDRESS'
+
+ init.action do |global,options,args|
+ assert! args.any?, 'You must specify a FILTER'
+ finished = []
+ manager.filter!(args).each_node do |node|
+ is_node_alive(node, options)
+ save_public_host_key(node, global, options) unless node.vagrant?
+ update_compiled_ssh_configs
+ ssh_connect_options = connect_options(options).merge({:bootstrap => true, :echo => options[:echo]})
+ ssh_connect(node, ssh_connect_options) do |ssh|
+ if node.vagrant?
+ ssh.install_insecure_vagrant_key
+ end
+ ssh.install_authorized_keys
+ ssh.install_prerequisites
+ unless node.vagrant?
+ ssh.leap.log(:checking, "SSH host keys") do
+ ssh.leap.capture(get_ssh_keys_cmd) do |response|
+ update_local_ssh_host_keys(node, response[:data]) if response[:exitcode] == 0
+ end
+ end
+ end
+ ssh.leap.log(:updating, "facts") do
+ ssh.leap.capture(facter_cmd) do |response|
+ if response[:exitcode] == 0
+ update_node_facts(node.name, response[:data])
+ else
+ log :failed, "to run facter on #{node.name}"
+ end
+ end
+ end
+ end
+ finished << node.name
+ end
+ log :completed, "initialization of nodes #{finished.join(', ')}"
+ end
+ end
+ end
+
+ private
+
+ ##
+ ## PRIVATE HELPERS
+ ##
+
+ def is_node_alive(node, options)
+ address = options[:ip] || node.ip_address
+ port = options[:port] || node.ssh.port
+ log :connecting, "to node #{node.name}"
+ assert_run! "nc -zw3 #{address} #{port}",
+ "Failed to reach #{node.name} (address #{address}, port #{port}). You can override the configured IP address and port with --ip or --port."
+ end
+
+ #
+ # saves the public ssh host key for node into the provider directory.
+ #
+ # see `man sshd` for the format of known_hosts
+ #
+ def save_public_host_key(node, global, options)
+ log :fetching, "public SSH host key for #{node.name}"
+ address = options[:ip] || node.ip_address
+ port = options[:port] || node.ssh.port
+ host_keys = get_public_keys_for_ip(address, port)
+ pub_key_path = Path.named_path([:node_ssh_pub_key, node.name])
+
+ if Path.exists?(pub_key_path)
+ if host_keys.include? SshKey.load(pub_key_path)
+ log :trusted, "- Public SSH host key for #{node.name} matches previously saved key", :indent => 1
+ else
+ bail! do
+ log :error, "The public SSH host keys we just fetched for #{node.name} doesn't match what we have saved previously.", :indent => 1
+ log "Delete the file #{pub_key_path} if you really want to remove the trusted SSH host key.", :indent => 2
+ end
+ end
+ else
+ known_key = host_keys.detect{|k|k.in_known_hosts?(node.name, node.ip_address, node.domain.name)}
+ if known_key
+ log :trusted, "- Public SSH host key for #{node.name} is trusted (key found in your ~/.ssh/known_hosts)"
+ else
+ public_key = SshKey.pick_best_key(host_keys)
+ if public_key.nil?
+ bail!("We got back #{host_keys.size} host keys from #{node.name}, but we can't support any of them.")
+ else
+ say(" This is the SSH host key you got back from node \"#{node.name}\"")
+ say(" Type -- #{public_key.bits} bit #{public_key.type.upcase}")
+ say(" Fingerprint -- " + public_key.fingerprint)
+ say(" Public Key -- " + public_key.key)
+ if !global[:yes] && !agree(" Is this correct? ")
+ bail!
+ else
+ known_key = public_key
+ end
+ end
+ end
+ puts
+ write_file! [:node_ssh_pub_key, node.name], known_key.to_s
+ end
+ end
+
+ #
+ # Get the public host keys for a host using ssh-keyscan.
+ # Return an array of SshKey objects, one for each key.
+ #
+ def get_public_keys_for_ip(address, port=22)
+ assert_bin!('ssh-keyscan')
+ output = assert_run! "ssh-keyscan -p #{port} #{address}", "Could not get the public host key from #{address}:#{port}. Maybe sshd is not running?"
+ if output.empty?
+ bail! :failed, "ssh-keyscan returned empty output."
+ end
+
+ if output =~ /No route to host/
+ bail! :failed, 'ssh-keyscan: no route to %s' % address
+ else
+ keys = SshKey.parse_keys(output)
+ if keys.empty?
+ bail! "ssh-keyscan got zero host keys back (that we understand)! Output was: #{output}"
+ else
+ return keys
+ end
+ end
+ end
+
+ # run on the server to generate a string suitable for passing to SshKey.parse_keys()
+ def get_ssh_keys_cmd
+ "/bin/grep ^HostKey /etc/ssh/sshd_config | /usr/bin/awk '{print $2 \".pub\"}' | /usr/bin/xargs /bin/cat"
+ end
+
+ #
+ # Sometimes the ssh host keys on the server will be better than what we have
+ # stored locally. In these cases, ask the user if they want to upgrade.
+ #
+ def update_local_ssh_host_keys(node, remote_keys_string)
+ remote_keys = SshKey.parse_keys(remote_keys_string)
+ return unless remote_keys.any?
+ current_key = SshKey.load(Path.named_path([:node_ssh_pub_key, node.name]))
+ best_key = SshKey.pick_best_key(remote_keys)
+ return unless best_key && current_key
+ if current_key != best_key
+ say(" One of the SSH host keys for node '#{node.name}' is better than what you currently have trusted.")
+ say(" Current key: #{current_key.summary}")
+ say(" Better key: #{best_key.summary}")
+ if agree(" Do you want to use the better key? ")
+ write_file! [:node_ssh_pub_key, node.name], best_key.to_s
+ end
+ else
+ log(3, "current host key does not need updating")
+ end
+ end
+
+end; end
diff --git a/lib/leap_cli/commands/ssh.rb b/lib/leap_cli/commands/ssh.rb
new file mode 100644
index 00000000..3887618e
--- /dev/null
+++ b/lib/leap_cli/commands/ssh.rb
@@ -0,0 +1,225 @@
+module LeapCli; module Commands
+
+ desc 'Log in to the specified node with an interactive shell.'
+ arg_name 'NAME' #, :optional => false, :multiple => false
+ command :ssh do |c|
+ c.flag 'ssh', :desc => "Pass through raw options to ssh (e.g. `--ssh '-F ~/sshconfig'`)."
+ c.flag 'port', :arg_name => 'SSH_PORT', :desc => 'Override default SSH port used when trying to connect to the server. Same as `--ssh "-p SSH_PORT"`.'
+ c.action do |global_options,options,args|
+ exec_ssh(:ssh, options, args)
+ end
+ end
+
+ desc 'Log in to the specified node with an interactive shell using mosh (requires node to have mosh.enabled set to true).'
+ arg_name 'NAME'
+ command :mosh do |c|
+ c.flag 'ssh', :desc => "Pass through raw options to ssh (e.g. `--ssh '-F ~/sshconfig'`)."
+ c.flag 'port', :arg_name => 'SSH_PORT', :desc => 'Override default SSH port used when trying to connect to the server. Same as `--ssh "-p SSH_PORT"`.'
+ c.action do |global_options,options,args|
+ exec_ssh(:mosh, options, args)
+ end
+ end
+
+ desc 'Creates an SSH port forward (tunnel) to the node NAME. REMOTE_PORT is the port on the remote node that the tunnel will connect to. LOCAL_PORT is the optional port on your local machine. For example: `leap tunnel couch1:5984`.'
+ arg_name '[LOCAL_PORT:]NAME:REMOTE_PORT'
+ command :tunnel do |c|
+ c.flag 'ssh', :desc => "Pass through raw options to ssh (e.g. --ssh '-F ~/sshconfig')."
+ c.flag 'port', :arg_name => 'SSH_PORT', :desc => 'Override default SSH port used when trying to connect to the server. Same as `--ssh "-p SSH_PORT"`.'
+ c.action do |global_options,options,args|
+ local_port, node, remote_port = parse_tunnel_arg(args.first)
+ unless node.ssh.config.AllowTcpForwarding == "yes"
+ log :warning, "It looks like TCP forwarding is not enabled. "+
+ "The tunnel command requires that the node property ssh.config.AllowTcpForwarding "+
+ "be set to 'yes'. Add this property to #{node.name}.json, deploy, and then try tunnel again."
+ end
+ options[:ssh] = [options[:ssh], "-N -L 127.0.0.1:#{local_port}:0.0.0.0:#{remote_port}"].join(' ')
+ log("Forward port localhost:#{local_port} to #{node.name}:#{remote_port}")
+ if is_port_available?(local_port)
+ exec_ssh(:ssh, options, [node.name])
+ end
+ end
+ end
+
+ desc 'Secure copy from FILE1 to FILE2. Files are specified as NODE_NAME:FILE_PATH. For local paths, omit "NODE_NAME:".'
+ arg_name 'FILE1 FILE2'
+ command :scp do |c|
+ c.switch :r, :desc => 'Copy recursively'
+ c.action do |global_options, options, args|
+ if args.size != 2
+ bail!('You must specificy both FILE1 and FILE2')
+ end
+ from, to = args
+ if (from !~ /:/ && to !~ /:/) || (from =~ /:/ && to =~ /:/)
+ bail!('One FILE must be remote and the other local.')
+ end
+ src_node_name = src_file_path = src_node = nil
+ dst_node_name = dst_file_path = dst_node = nil
+ if from =~ /:/
+ src_node_name, src_file_path = from.split(':')
+ src_node = get_node_from_args([src_node_name], :include_disabled => true)
+ dst_file_path = to
+ else
+ dst_node_name, dst_file_path = to.split(':')
+ dst_node = get_node_from_args([dst_node_name], :include_disabled => true)
+ src_file_path = from
+ end
+ exec_scp(options, src_node, src_file_path, dst_node, dst_file_path)
+ end
+ end
+
+ protected
+
+ #
+ # allow for ssh overrides of all commands that use ssh_connect
+ #
+ def connect_options(options)
+ connect_options = {:ssh_options=>{}}
+ if options[:port]
+ connect_options[:ssh_options][:port] = options[:port]
+ end
+ if options[:ip]
+ connect_options[:ssh_options][:host_name] = options[:ip]
+ end
+ return connect_options
+ end
+
+ def ssh_config_help_message
+ puts ""
+ puts "Are 'too many authentication failures' getting you down?"
+ puts "Then we have the solution for you! Add something like this to your ~/.ssh/config file:"
+ puts " Host *.#{manager.provider.domain}"
+ puts " IdentityFile ~/.ssh/id_rsa"
+ puts " IdentitiesOnly=yes"
+ puts "(replace `id_rsa` with the actual private key filename that you use for this provider)"
+ end
+
+ require 'socket'
+ def is_port_available?(port)
+ TCPServer.open('127.0.0.1', port) {}
+ true
+ rescue Errno::EACCES
+ bail!("You don't have permission to bind to port #{port}.")
+ rescue Errno::EADDRINUSE
+ bail!("Local port #{port} is already in use. Specify LOCAL_PORT to pick another.")
+ rescue Exception => exc
+ bail!(exc.to_s)
+ end
+
+ private
+
+ def exec_ssh(cmd, cli_options, args)
+ node = get_node_from_args(args, :include_disabled => true)
+ port = node.ssh.port
+ options = ssh_config(node)
+ username = 'root'
+ if LeapCli.log_level >= 3
+ options << "-vv"
+ elsif LeapCli.log_level >= 2
+ options << "-v"
+ end
+ if cli_options[:port]
+ port = cli_options[:port]
+ end
+ if cli_options[:ssh]
+ options << cli_options[:ssh]
+ end
+ ssh = "ssh -l #{username} -p #{port} #{options.join(' ')}"
+ if cmd == :ssh
+ command = "#{ssh} #{node.domain.full}"
+ elsif cmd == :mosh
+ command = "MOSH_TITLE_NOPREFIX=1 mosh --ssh \"#{ssh}\" #{node.domain.full}"
+ end
+ log 2, command
+
+ # exec the shell command in a subprocess
+ pid = fork { exec "#{command}" }
+
+ Signal.trap("SIGINT") do
+ Process.kill("KILL", pid)
+ Process.wait(pid)
+ exit(0)
+ end
+
+ # wait for shell to exit so we can grab the exit status
+ _, status = Process.waitpid2(pid)
+
+ if status.exitstatus == 255
+ ssh_config_help_message
+ elsif status.exitstatus != 0
+ exit(status.exitstatus)
+ end
+ end
+
+ def exec_scp(cli_options, src_node, src_file_path, dst_node, dst_file_path)
+ node = src_node || dst_node
+ options = ssh_config(node)
+ port = node.ssh.port
+ username = 'root'
+ options << "-r" if cli_options[:r]
+ scp = "scp -P #{port} #{options.join(' ')}"
+ if src_node
+ command = "#{scp} #{username}@#{src_node.domain.full}:#{src_file_path} #{dst_file_path}"
+ elsif dst_node
+ command = "#{scp} #{src_file_path} #{username}@#{dst_node.domain.full}:#{dst_file_path}"
+ end
+ log 2, command
+
+ # exec the shell command in a subprocess
+ pid = fork { exec "#{command}" }
+
+ Signal.trap("SIGINT") do
+ Process.kill("KILL", pid)
+ Process.wait(pid)
+ exit(0)
+ end
+
+ # wait for shell to exit so we can grab the exit status
+ _, status = Process.waitpid2(pid)
+ exit(status.exitstatus)
+ end
+
+ #
+ # SSH command line -o options. See `man ssh_config`
+ #
+ # NOTES:
+ #
+ # The option 'HostKeyAlias=#{node.name}' is oddly incompatible with ports in
+ # known_hosts file, so we must not use this or non-standard ports break.
+ #
+ def ssh_config(node)
+ options = [
+ "-o 'HostName=#{node.ip_address}'",
+ "-o 'GlobalKnownHostsFile=#{path(:known_hosts)}'",
+ "-o 'UserKnownHostsFile=/dev/null'"
+ ]
+ if node.vagrant?
+ options << "-i #{vagrant_ssh_key_file}" # use the universal vagrant insecure key
+ options << "-o IdentitiesOnly=yes" # force the use of the insecure vagrant key
+ options << "-o 'StrictHostKeyChecking=no'" # blindly accept host key and don't save it
+ # (since userknownhostsfile is /dev/null)
+ else
+ options << "-o 'StrictHostKeyChecking=yes'"
+ end
+ if !node.supported_ssh_host_key_algorithms.empty?
+ options << "-o 'HostKeyAlgorithms=#{node.supported_ssh_host_key_algorithms}'"
+ end
+ return options
+ end
+
+ def parse_tunnel_arg(arg)
+ if arg.count(':') == 1
+ node_name, remote = arg.split(':')
+ local = nil
+ elsif arg.count(':') == 2
+ local, node_name, remote = arg.split(':')
+ else
+ bail!('Argument NAME:REMOTE_PORT required.')
+ end
+ node = get_node_from_args([node_name], :include_disabled => true)
+ remote = remote.to_i
+ local = local || remote
+ local = local.to_i
+ return [local, node, remote]
+ end
+
+end; end \ No newline at end of file
diff --git a/lib/leap_cli/commands/test.rb b/lib/leap_cli/commands/test.rb
new file mode 100644
index 00000000..73207b31
--- /dev/null
+++ b/lib/leap_cli/commands/test.rb
@@ -0,0 +1,74 @@
+module LeapCli; module Commands
+
+ desc 'Run tests.'
+ command [:test, :t] do |test|
+ test.desc 'Run the test suit on FILTER nodes.'
+ test.arg_name 'FILTER', :optional => true
+ test.command :run do |run|
+ run.switch 'continue', :desc => 'Continue over errors and failures (default is --no-continue).', :negatable => true
+ run.action do |global_options,options,args|
+ test_order = File.join(Path.platform, 'tests/order.rb')
+ if File.exists?(test_order)
+ require test_order
+ end
+ manager.filter!(args).names_in_test_dependency_order.each do |node_name|
+ node = manager.nodes[node_name]
+ begin
+ ssh_connect(node) do |ssh|
+ ssh.run(test_cmd(options))
+ end
+ rescue Capistrano::CommandError => exc
+ if options[:continue]
+ exit_status(1)
+ else
+ bail!
+ end
+ end
+ end
+ end
+ end
+
+ test.desc 'Creates files needed to run tests.'
+ test.command :init do |init|
+ init.action do |global_options,options,args|
+ generate_test_client_openvpn_configs
+ end
+ end
+
+ test.default_command :run
+ end
+
+ private
+
+ def test_cmd(options)
+ if options[:continue]
+ "#{Leap::Platform.leap_dir}/bin/run_tests --continue"
+ else
+ "#{Leap::Platform.leap_dir}/bin/run_tests"
+ end
+ end
+
+ #
+ # generates a whole bunch of openvpn configs that can be used to connect to different openvpn gateways
+ #
+ def generate_test_client_openvpn_configs
+ assert_config! 'provider.ca.client_certificates.unlimited_prefix'
+ assert_config! 'provider.ca.client_certificates.limited_prefix'
+ template = read_file! Path.find_file(:test_client_openvpn_template)
+ manager.environment_names.each do |env|
+ vpn_nodes = manager.nodes[:environment => env][:services => 'openvpn']['openvpn.allow_limited' => true]
+ if vpn_nodes.any?
+ generate_test_client_cert(provider.ca.client_certificates.limited_prefix) do |key, cert|
+ write_file! [:test_openvpn_config, [env, 'limited'].compact.join('_')], Util.erb_eval(template, binding)
+ end
+ end
+ vpn_nodes = manager.nodes[:environment => env][:services => 'openvpn']['openvpn.allow_unlimited' => true]
+ if vpn_nodes.any?
+ generate_test_client_cert(provider.ca.client_certificates.unlimited_prefix) do |key, cert|
+ write_file! [:test_openvpn_config, [env, 'unlimited'].compact.join('_')], Util.erb_eval(template, binding)
+ end
+ end
+ end
+ end
+
+end; end
diff --git a/lib/leap_cli/commands/user.rb b/lib/leap_cli/commands/user.rb
new file mode 100644
index 00000000..b842e854
--- /dev/null
+++ b/lib/leap_cli/commands/user.rb
@@ -0,0 +1,136 @@
+
+#
+# perhaps we want to verify that the key files are actually the key files we expect.
+# we could use 'file' for this:
+#
+# > file ~/.gnupg/00440025.asc
+# ~/.gnupg/00440025.asc: PGP public key block
+#
+# > file ~/.ssh/id_rsa.pub
+# ~/.ssh/id_rsa.pub: OpenSSH RSA public key
+#
+
+module LeapCli
+ module Commands
+
+ desc 'Adds a new trusted sysadmin by adding public keys to the "users" directory.'
+ arg_name 'USERNAME' #, :optional => false, :multiple => false
+ command :'add-user' do |c|
+
+ c.switch 'self', :desc => 'Add yourself as a trusted sysadmin by choosing among the public keys available for the current user.', :negatable => false
+ c.flag 'ssh-pub-key', :desc => 'SSH public key file for this new user'
+ c.flag 'pgp-pub-key', :desc => 'OpenPGP public key file for this new user'
+
+ c.action do |global_options,options,args|
+ username = args.first
+ if !username.any?
+ if options[:self]
+ username ||= `whoami`.strip
+ else
+ help! "Either USERNAME argument or --self flag is required."
+ end
+ end
+ if Leap::Platform.reserved_usernames.include? username
+ bail! %(The username "#{username}" is reserved. Sorry, pick another.)
+ end
+
+ ssh_pub_key = nil
+ pgp_pub_key = nil
+
+ if options['ssh-pub-key']
+ ssh_pub_key = read_file!(options['ssh-pub-key'])
+ end
+ if options['pgp-pub-key']
+ pgp_pub_key = read_file!(options['pgp-pub-key'])
+ end
+
+ if options[:self]
+ ssh_pub_key ||= pick_ssh_key.to_s
+ pgp_pub_key ||= pick_pgp_key
+ end
+
+ assert!(ssh_pub_key, 'Sorry, could not find SSH public key.')
+
+ if ssh_pub_key
+ write_file!([:user_ssh, username], ssh_pub_key)
+ end
+ if pgp_pub_key
+ write_file!([:user_pgp, username], pgp_pub_key)
+ end
+
+ update_authorized_keys
+ end
+ end
+
+ #
+ # let the the user choose among the ssh public keys that we encounter, or just pick the key if there is only one.
+ #
+ def pick_ssh_key
+ ssh_keys = []
+ Dir.glob("#{ENV['HOME']}/.ssh/*.pub").each do |keyfile|
+ ssh_keys << SshKey.load(keyfile)
+ end
+
+ if `which ssh-add`.strip.any?
+ `ssh-add -L 2> /dev/null`.split("\n").compact.each do |line|
+ key = SshKey.load(line)
+ if key
+ key.comment = 'ssh-agent'
+ ssh_keys << key unless ssh_keys.include?(key)
+ end
+ end
+ end
+ ssh_keys.compact!
+
+ assert! ssh_keys.any?, 'Sorry, could not find any SSH public key for you. Have you run ssh-keygen?'
+
+ if ssh_keys.length > 1
+ key_index = numbered_choice_menu('Choose your SSH public key', ssh_keys.collect(&:summary)) do |line, i|
+ say("#{i+1}. #{line}")
+ end
+ else
+ key_index = 0
+ end
+
+ return ssh_keys[key_index]
+ end
+
+ #
+ # let the the user choose among the gpg public keys that we encounter, or just pick the key if there is only one.
+ #
+ def pick_pgp_key
+ begin
+ require 'gpgme'
+ rescue LoadError
+ log "Skipping OpenPGP setup because gpgme is not installed."
+ return
+ end
+
+ secret_keys = GPGME::Key.find(:secret)
+ if secret_keys.empty?
+ log "Skipping OpenPGP setup because I could not find any OpenPGP keys for you"
+ return nil
+ end
+
+ secret_keys.select!{|key| !key.expired}
+
+ if secret_keys.length > 1
+ key_index = numbered_choice_menu('Choose your OpenPGP public key', secret_keys) do |key, i|
+ key_info = key.to_s.split("\n")[0..1].map{|line| line.sub(/^\s*(sec|uid)\s*/,'')}.join(' -- ')
+ say("#{i+1}. #{key_info}")
+ end
+ else
+ key_index = 0
+ end
+
+ key_id = secret_keys[key_index].sha
+
+ # can't use this, it includes signatures:
+ #puts GPGME::Key.export(key_id, :armor => true, :export_options => :export_minimal)
+
+ # export with signatures removed:
+ return `gpg --armor --export-options export-minimal --export #{key_id}`.strip
+ end
+
+ end
+end
diff --git a/lib/leap_cli/commands/util.rb b/lib/leap_cli/commands/util.rb
new file mode 100644
index 00000000..c1da570e
--- /dev/null
+++ b/lib/leap_cli/commands/util.rb
@@ -0,0 +1,50 @@
+module LeapCli; module Commands
+
+ extend self
+ extend LeapCli::Util
+ extend LeapCli::Util::RemoteCommand
+
+ def path(name)
+ Path.named_path(name)
+ end
+
+ #
+ # keeps prompting the user for a numbered choice, until they pick a good one or bail out.
+ #
+ # block is yielded and is responsible for rendering the choices.
+ #
+ def numbered_choice_menu(msg, items, &block)
+ while true
+ say("\n" + msg + ':')
+ items.each_with_index &block
+ say("q. quit")
+ index = ask("number 1-#{items.length}> ")
+ if index.empty?
+ next
+ elsif index =~ /q/
+ bail!
+ else
+ i = index.to_i - 1
+ if i < 0 || i >= items.length
+ bail!
+ else
+ return i
+ end
+ end
+ end
+ end
+
+
+ def parse_node_list(nodes)
+ if nodes.is_a? Config::Object
+ Config::ObjectList.new(nodes)
+ elsif nodes.is_a? Config::ObjectList
+ nodes
+ elsif nodes.is_a? String
+ manager.filter!(nodes)
+ else
+ bail! "argument error"
+ end
+ end
+
+end; end
diff --git a/lib/leap_cli/commands/vagrant.rb b/lib/leap_cli/commands/vagrant.rb
new file mode 100644
index 00000000..9fdd48e3
--- /dev/null
+++ b/lib/leap_cli/commands/vagrant.rb
@@ -0,0 +1,180 @@
+autoload :IPAddr, 'ipaddr'
+require 'fileutils'
+
+module LeapCli; module Commands
+
+ desc "Manage local virtual machines."
+ long_desc "This command provides a convient way to manage Vagrant-based virtual machines. If FILTER argument is missing, the command runs on all local virtual machines. The Vagrantfile is automatically generated in 'test/Vagrantfile'. If you want to run vagrant commands manually, cd to 'test'."
+ command [:local, :l] do |local|
+ local.desc 'Starts up the virtual machine(s)'
+ local.arg_name 'FILTER', :optional => true #, :multiple => false
+ local.command :start do |start|
+ start.flag(:basebox,
+ :desc => "The basebox to use. This value is passed to vagrant as the "+
+ "`config.vm.box` option. The value here should be the name of an installed box or a "+
+ "shorthand name of a box in HashiCorp's Atlas.",
+ :arg_name => 'BASEBOX',
+ :default_value => 'LEAP/jessie'
+ )
+ start.action do |global_options,options,args|
+ vagrant_command(["up", "sandbox on"], args, options)
+ end
+ end
+
+ local.desc 'Shuts down the virtual machine(s)'
+ local.arg_name 'FILTER', :optional => true #, :multiple => false
+ local.command :stop do |stop|
+ stop.action do |global_options,options,args|
+ if global_options[:yes]
+ vagrant_command("halt --force", args)
+ else
+ vagrant_command("halt", args)
+ end
+ end
+ end
+
+ local.desc 'Destroys the virtual machine(s), reclaiming the disk space'
+ local.arg_name 'FILTER', :optional => true #, :multiple => false
+ local.command :destroy do |destroy|
+ destroy.action do |global_options,options,args|
+ if global_options[:yes]
+ vagrant_command("destroy --force", args)
+ else
+ vagrant_command("destroy", args)
+ end
+ end
+ end
+
+ local.desc 'Print the status of local virtual machine(s)'
+ local.arg_name 'FILTER', :optional => true #, :multiple => false
+ local.command :status do |status|
+ status.action do |global_options,options,args|
+ vagrant_command("status", args)
+ end
+ end
+
+ local.desc 'Saves the current state of the virtual machine as a new snapshot'
+ local.arg_name 'FILTER', :optional => true #, :multiple => false
+ local.command :save do |status|
+ status.action do |global_options,options,args|
+ vagrant_command("sandbox commit", args)
+ end
+ end
+
+ local.desc 'Resets virtual machine(s) to the last saved snapshot'
+ local.arg_name 'FILTER', :optional => true #, :multiple => false
+ local.command :reset do |reset|
+ reset.action do |global_options,options,args|
+ vagrant_command("sandbox rollback", args)
+ end
+ end
+ end
+
+ public
+
+ #
+ # returns the path to a vagrant ssh private key file.
+ #
+ # if the vagrant.key file is owned by root or ourselves, then
+ # we need to make sure that it owned by us and not world readable.
+ #
+ def vagrant_ssh_key_file
+ file_path = Path.vagrant_ssh_priv_key_file
+ Util.assert_files_exist! file_path
+ uid = File.new(file_path).stat.uid
+ if uid == 0 || uid == Process.euid
+ FileUtils.install file_path, '/tmp/vagrant.key', :mode => 0600
+ file_path = '/tmp/vagrant.key'
+ end
+ return file_path
+ end
+
+ protected
+
+ def vagrant_command(cmds, args, options={})
+ vagrant_setup(options)
+ cmds = cmds.to_a
+ if args.empty?
+ nodes = [""]
+ else
+ nodes = manager.filter(args)[:environment => "local"].field(:name)
+ end
+ if nodes.any?
+ vagrant_dir = File.dirname(Path.named_path(:vagrantfile))
+ exec = ["cd #{vagrant_dir}"]
+ cmds.each do |cmd|
+ nodes.each do |node|
+ exec << "vagrant #{cmd} #{node}"
+ end
+ end
+ execute exec.join('; ')
+ else
+ bail! "No nodes found. This command only works on nodes with ip_address in the network #{LeapCli.leapfile.vagrant_network}"
+ end
+ end
+
+ private
+
+ def vagrant_setup(options)
+ assert_bin! 'vagrant', 'Vagrant is required for running local virtual machines. Run "sudo apt-get install vagrant".'
+ assert! (vagrant_version >= Gem::Version.new('1.1')), 'Vagrant version >= 1.1 is required for running local virtual machines. Please upgrade.'
+
+ unless assert_run!('vagrant plugin list | grep sahara | cat').chars.any?
+ log :installing, "vagrant plugin 'sahara'"
+ assert_run! 'vagrant plugin install sahara'
+ end
+ create_vagrant_file(options)
+ end
+
+ def vagrant_version
+ @vagrant_version ||= Gem::Version.new(assert_run!('vagrant --version').split(' ')[1])
+ end
+
+ def execute(cmd)
+ log 2, :run, cmd
+ exec cmd
+ end
+
+ def create_vagrant_file(options)
+ lines = []
+
+ basebox = options[:basebox] || 'LEAP/jessie'
+ # override basebox with custom setting from Leapfile or ~/.leaprc
+ basebox = leapfile.vagrant_basebox || basebox
+
+ lines << %[Vagrant.configure("2") do |config|]
+ manager.each_node do |node|
+ if node.vagrant?
+ lines << %[ config.vm.define :#{node.name} do |config|]
+ lines << %[ config.vm.box = "#{basebox}"]
+ lines << %[ config.vm.network :private_network, ip: "#{node.ip_address}"]
+ lines << %[ config.vm.provider "virtualbox" do |v|]
+ lines << %[ v.customize ["modifyvm", :id, "--natdnshostresolver1", "on"]]
+ lines << %[ v.name = "#{node.name}"]
+ lines << %[ v.memory = 1536]
+ lines << %[ end]
+ lines << %[ config.vm.provider "libvirt" do |v|]
+ lines << %[ v.memory = 1536]
+ lines << %[ end]
+ lines << %[ #{leapfile.custom_vagrant_vm_line}] if leapfile.custom_vagrant_vm_line
+ lines << %[ end]
+ end
+ end
+
+ lines << %[end]
+ lines << ""
+ write_file! :vagrantfile, lines.join("\n")
+ end
+
+ def pick_next_vagrant_ip_address
+ taken_ips = manager.nodes[:environment => "local"].field(:ip_address)
+ if taken_ips.any?
+ highest_ip = taken_ips.map{|ip| IPAddr.new(ip)}.max
+ new_ip = highest_ip.succ
+ else
+ new_ip = IPAddr.new(LeapCli.leapfile.vagrant_network).succ.succ
+ end
+ return new_ip.to_s
+ end
+
+end; end
diff --git a/lib/leap_cli/macros.rb b/lib/leap_cli/macros.rb
new file mode 100644
index 00000000..fdb9a94e
--- /dev/null
+++ b/lib/leap_cli/macros.rb
@@ -0,0 +1,16 @@
+#
+# MACROS
+#
+# The methods in these files are available in the context of a .json configuration file.
+# (The module LeapCli::Macro is included in Config::Object)
+#
+
+require_relative 'macros/core'
+require_relative 'macros/files'
+require_relative 'macros/haproxy'
+require_relative 'macros/hosts'
+require_relative 'macros/keys'
+require_relative 'macros/nodes'
+require_relative 'macros/secrets'
+require_relative 'macros/stunnel'
+require_relative 'macros/provider'
diff --git a/lib/leap_cli/macros/core.rb b/lib/leap_cli/macros/core.rb
new file mode 100644
index 00000000..873da358
--- /dev/null
+++ b/lib/leap_cli/macros/core.rb
@@ -0,0 +1,92 @@
+# encoding: utf-8
+
+module LeapCli
+ module Macro
+
+ #
+ # Creates a hash from the ssh key info in users directory, for use in
+ # updating authorized_keys file. Additionally, the 'monitor' public key is
+ # included, which is used by the monitor nodes to run particular commands
+ # remotely.
+ #
+ def authorized_keys
+ hash = {}
+ keys = Dir.glob(Path.named_path([:user_ssh, '*']))
+ keys.sort.each do |keyfile|
+ ssh_type, ssh_key = File.read(keyfile, :encoding => 'UTF-8').strip.split(" ")
+ name = File.basename(File.dirname(keyfile))
+ until hash[name].nil?
+ i ||= 1; name = "#{name}#{i+=1}"
+ end
+ hash[name] = {
+ "type" => ssh_type,
+ "key" => ssh_key
+ }
+ end
+ ssh_type, ssh_key = File.read(Path.named_path(:monitor_pub_key), :encoding => 'UTF-8').strip.split(" ")
+ hash[Leap::Platform.monitor_username] = {
+ "type" => ssh_type,
+ "key" => ssh_key
+ }
+ hash
+ end
+
+ def assert(assertion)
+ if instance_eval(assertion)
+ true
+ else
+ raise AssertionFailed.new(assertion), assertion, caller
+ end
+ end
+
+ def error(msg)
+ raise ConfigError.new(@node, msg), msg, caller
+ end
+
+ #
+ # applies a JSON partial to this node
+ #
+ def apply_partial(partial_path)
+ if env.partials[partial_path]
+ self.deep_merge!(env.partials[partial_path])
+ else
+ raise ArgumentError.new(
+ "No such partial `%s`. Available partials include:\n%s" %
+ [partial_path, env.partials.keys.join(", ")]
+ )
+ end
+ end
+
+ #
+ # If at first you don't succeed, then it is time to give up.
+ #
+ # try{} returns nil if anything in the block throws an exception.
+ #
+ # You can wrap something that might fail in `try`, like so.
+ #
+ # "= try{ nodes[:services => 'tor'].first.ip_address } "
+ #
+ def try(&block)
+ yield
+ rescue NoMethodError
+ rescue ArgumentError
+ nil
+ end
+
+ protected
+
+ #
+ # returns a node list, if argument is not already one
+ #
+ def listify(node_list)
+ if node_list.is_a? Config::ObjectList
+ node_list
+ elsif node_list.is_a? Config::Object
+ Config::ObjectList.new(node_list)
+ else
+ raise ArgumentError, 'argument must be a node or node list, not a `%s`' % node_list.class, caller
+ end
+ end
+
+ end
+end
diff --git a/lib/leap_cli/macros/files.rb b/lib/leap_cli/macros/files.rb
new file mode 100644
index 00000000..04c94edf
--- /dev/null
+++ b/lib/leap_cli/macros/files.rb
@@ -0,0 +1,124 @@
+# encoding: utf-8
+
+##
+## FILES
+##
+
+module LeapCli
+ module Macro
+
+ #
+ # inserts the contents of a file
+ #
+ def file(filename, options={})
+ if filename.is_a? Symbol
+ filename = [filename, @node.name]
+ end
+ filepath = Path.find_file(filename)
+ if filepath
+ if filepath =~ /\.erb$/
+ return ERB.new(File.read(filepath, :encoding => 'UTF-8'), nil, '%<>').result(binding)
+ else
+ return File.read(filepath, :encoding => 'UTF-8')
+ end
+ else
+ raise FileMissing.new(Path.named_path(filename), options)
+ end
+ end
+
+ #
+ # like #file, but allow missing files
+ #
+ def try_file(filename)
+ return file(filename)
+ rescue FileMissing
+ return nil
+ end
+
+ #
+ # returns the location of a file that is stored on the local
+ # host, under PROVIDER_DIR/files.
+ #
+ def local_file_path(path, options={})
+ if path.is_a? Symbol
+ path = [path, @node.name]
+ elsif path.is_a? String
+ # ensure it prefixed with files/
+ unless path =~ /^files\//
+ path = "files/" + path
+ end
+ end
+ local_path = Path.find_file(path)
+ if local_path.nil?
+ if options[:missing]
+ raise FileMissing.new(Path.named_path(path), options)
+ elsif block_given?
+ yield
+ return local_file_path(path, options) # try again.
+ else
+ Util::log 2, :skipping, "local_file_path(\"#{path}\") because there is no such file."
+ return nil
+ end
+ else
+ return local_path
+ end
+ end
+
+ #
+ # Returns the location of a file once it is deployed via rsync to the a
+ # remote server. An internal list of discovered file paths is saved, in
+ # order to rsync these files when needed.
+ #
+ # If the file does not exist, nil is returned.
+ #
+ # If there is a block given and the file does not actually exist, the
+ # block will be yielded to give an opportunity for some code to create the
+ # file.
+ #
+ # For example:
+ #
+ # file_path(:dkim_priv_key) {generate_dkim_key}
+ #
+ # notes:
+ #
+ # * argument 'path' is relative to Path.provider/files or
+ # Path.provider_base/files
+ # * the path returned by this method is absolute
+ # * the path stored for use later by rsync is relative to Path.provider
+ # * if the path does not exist locally, but exists in provider_base,
+ # then the default file from provider_base is copied locally. this
+ # is required for rsync to work correctly.
+ #
+ def remote_file_path(path, options={}, &block)
+ local_path = local_file_path(path, options, &block)
+
+ return nil if local_path.nil?
+
+ # if file is under Path.provider_base, we must copy the default file to
+ # to Path.provider in order for rsync to be able to sync the file.
+ if local_path =~ /^#{Regexp.escape(Path.provider_base)}/
+ local_provider_path = local_path.sub(/^#{Regexp.escape(Path.provider_base)}/, Path.provider)
+ FileUtils.mkdir_p File.dirname(local_provider_path), :mode => 0700
+ FileUtils.install local_path, local_provider_path, :mode => 0600
+ Util.log :created, Path.relative_path(local_provider_path)
+ local_path = local_provider_path
+ end
+
+ # ensure directories end with /, important for building rsync command
+ if File.directory?(local_path) && local_path !~ /\/$/
+ local_path += '/'
+ end
+
+ relative_path = Path.relative_path(local_path)
+ relative_path.sub!(/^files\//, '') # remove "files/" prefix
+ @node.file_paths << relative_path
+ return File.join(Leap::Platform.files_dir, relative_path)
+ end
+
+ # deprecated
+ def file_path(path, options={})
+ return remote_file_path(path, options)
+ end
+
+ end
+end \ No newline at end of file
diff --git a/lib/leap_cli/macros/haproxy.rb b/lib/leap_cli/macros/haproxy.rb
new file mode 100644
index 00000000..602ae726
--- /dev/null
+++ b/lib/leap_cli/macros/haproxy.rb
@@ -0,0 +1,73 @@
+# encoding: utf-8
+
+##
+## HAPROXY
+##
+
+module LeapCli
+ module Macro
+
+ #
+ # creates a hash suitable for configuring haproxy. the key is the node name of the server we are proxying to.
+ #
+ # * node_list - a hash of nodes for the haproxy servers
+ # * stunnel_client - contains the mappings to local ports for each server node.
+ # * non_stunnel_port - in case self is included in node_list, the port to connect to.
+ #
+ # 1000 weight is used for nodes in the same location.
+ # 100 otherwise.
+ #
+ def haproxy_servers(node_list, stunnel_clients, non_stunnel_port=nil)
+ default_weight = 10
+ local_weight = 100
+
+ # record the hosts_file
+ hostnames(node_list)
+
+ # create a simple map for node name -> local stunnel accept port
+ accept_ports = stunnel_clients.inject({}) do |hsh, stunnel_entry|
+ name = stunnel_entry.first.sub /_[0-9]+$/, ''
+ hsh[name] = stunnel_entry.last['accept_port']
+ hsh
+ end
+
+ # if one the nodes in the node list is ourself, then there will not be a stunnel to it,
+ # but we need to include it anyway in the haproxy config.
+ if node_list[self.name] && non_stunnel_port
+ accept_ports[self.name] = non_stunnel_port
+ end
+
+ # create the first pass of the servers hash
+ servers = node_list.values.inject(Config::ObjectList.new) do |hsh, node|
+ # make sure we have a port to talk to
+ unless accept_ports[node.name]
+ error "haproxy needs a local port to talk to when connecting to #{node.name}"
+ end
+ weight = default_weight
+ try {
+ weight = local_weight if self.location.name == node.location.name
+ }
+ hsh[node.name] = Config::Object[
+ 'backup', false,
+ 'host', 'localhost',
+ 'port', accept_ports[node.name],
+ 'weight', weight
+ ]
+ if node.services.include?('couchdb')
+ hsh[node.name]['writable'] = node.couch.mode != 'mirror'
+ end
+ hsh
+ end
+
+ # if there are some local servers, make the others backup
+ if servers.detect{|k,v| v.weight == local_weight}
+ servers.each do |k,server|
+ server['backup'] = server['weight'] == default_weight
+ end
+ end
+
+ return servers
+ end
+
+ end
+end
diff --git a/lib/leap_cli/macros/hosts.rb b/lib/leap_cli/macros/hosts.rb
new file mode 100644
index 00000000..963857ae
--- /dev/null
+++ b/lib/leap_cli/macros/hosts.rb
@@ -0,0 +1,90 @@
+# encoding: utf-8
+
+module LeapCli
+ module Macro
+
+ ##
+ ## IPs
+ ##
+
+ #
+ # returns a simple array of all the IPs for the specified node list
+ #
+ def host_ips(node_list)
+ if self.vagrant?
+ node_list = node_list['environment' => 'local']
+ else
+ node_list = node_list['environment' => '!local']
+ end
+ node_list.map {|name, n|
+ [n.ip_address, (manager.facts[name]||{})['ec2_public_ipv4']]
+ }.flatten.compact.uniq
+ end
+
+ ##
+ ## HOSTS
+ ##
+
+ #
+ # records the list of hosts that are encountered for this node
+ #
+ def hostnames(nodes)
+ @referenced_nodes ||= Config::ObjectList.new
+ nodes = listify(nodes)
+ nodes.each_node do |node|
+ @referenced_nodes[node.name] ||= node
+ end
+ return nodes.values.collect {|node| node.domain.name}
+ end
+
+ #
+ # Generates entries needed for updating /etc/hosts on a node (as a hash).
+ #
+ # Argument `nodes` can be nil or a list of nodes. If nil, only include the
+ # IPs of the other nodes this @node as has encountered (plus all mx nodes).
+ #
+ # Also, for virtual machines, we use the local address if this @node is in
+ # the same location as the node in question.
+ #
+ # We include the ssh public key for each host, so that the hash can also
+ # be used to generate the /etc/ssh/known_hosts
+ #
+ def hosts_file(nodes=nil)
+ if nodes.nil?
+ if @referenced_nodes && @referenced_nodes.any?
+ nodes = @referenced_nodes
+ nodes = nodes.merge(nodes_like_me[:services => 'mx']) # all nodes always need to communicate with mx nodes.
+ end
+ end
+ return {} unless nodes
+ hosts = {}
+ my_location = @node['location'] ? @node['location']['name'] : nil
+ nodes.each_node do |node|
+ hosts[node.name] = {
+ 'ip_address' => node.ip_address,
+ 'domain_internal' => node.domain.internal,
+ 'domain_full' => node.domain.full,
+ 'port' => node.ssh.port
+ }
+ if node.dns['aliases'] && node.dns['aliases'].any?
+ # include aliases, but without domain.full
+ hosts[node.name]['aliases'] = node.dns['aliases'] - [node.domain.full]
+ end
+ node_location = node['location'] ? node['location']['name'] : nil
+ if my_location == node_location
+ if facts = @node.manager.facts[node.name]
+ if facts['ec2_public_ipv4']
+ hosts[node.name]['ip_address'] = facts['ec2_public_ipv4']
+ end
+ end
+ end
+ host_pub_key = Util::read_file([:node_ssh_pub_key,node.name])
+ if host_pub_key
+ hosts[node.name]['host_pub_key'] = host_pub_key
+ end
+ end
+ hosts
+ end
+
+ end
+end \ No newline at end of file
diff --git a/lib/leap_cli/macros/keys.rb b/lib/leap_cli/macros/keys.rb
new file mode 100644
index 00000000..e7a75cfb
--- /dev/null
+++ b/lib/leap_cli/macros/keys.rb
@@ -0,0 +1,97 @@
+# encoding: utf-8
+
+#
+# Macro for dealing with cryptographic keys
+#
+
+module LeapCli
+ module Macro
+
+ #
+ # return a fingerprint for a key or certificate
+ #
+ def fingerprint(filename, options={})
+ options[:mode] ||= :x509
+ if options[:mode] == :x509
+ "SHA256: " + X509.fingerprint("SHA256", Path.named_path(filename))
+ elsif options[:mode] == :rsa
+ key = OpenSSL::PKey::RSA.new(File.read(filename))
+ Digest::SHA1.new.hexdigest(key.to_der)
+ end
+ end
+
+ ##
+ ## TOR
+ ##
+
+ #
+ # return the path to the tor public key
+ # generating key if it is missing
+ #
+ def tor_public_key_path(path_name, key_type)
+ file_path(path_name) { generate_tor_key(key_type) }
+ end
+
+ #
+ # return the path to the tor private key
+ # generating key if it is missing
+ #
+ def tor_private_key_path(path_name, key_type)
+ file_path(path_name) { generate_tor_key(key_type) }
+ end
+
+ #
+ # Generates a onion_address from a public RSA key file.
+ #
+ # path_name is the named path of the Tor public key.
+ #
+ # Basically, an onion address is nothing more than a base32 encoding
+ # of the first 10 bytes of a sha1 digest of the public key.
+ #
+ # Additionally, Tor ignores the 22 byte header of the public key
+ # before taking the sha1 digest.
+ #
+ def onion_address(path_name)
+ require 'base32'
+ require 'base64'
+ require 'openssl'
+ path = Path.find_file([path_name, self.name])
+ if path && File.exists?(path)
+ public_key_str = File.readlines(path).grep(/^[^-]/).join
+ public_key = Base64.decode64(public_key_str)
+ public_key = public_key.slice(22..-1) # Tor ignores the 22 byte SPKI header
+ sha1sum = Digest::SHA1.new.digest(public_key)
+ Base32.encode(sha1sum.slice(0,10)).downcase
+ else
+ LeapCli.log :warning, 'Tor public key file "%s" does not exist' % tor_public_key_path
+ end
+ end
+
+ def generate_dkim_key(bit_size=2048)
+ LeapCli.log :generating, "%s bit RSA DKIM key" % bit_size do
+ private_key = OpenSSL::PKey::RSA.new(bit_size)
+ public_key = private_key.public_key
+ LeapCli::Util.write_file! :dkim_priv_key, private_key.to_pem
+ LeapCli::Util.write_file! :dkim_pub_key, public_key.to_pem
+ end
+ end
+
+ private
+
+ def generate_tor_key(key_type)
+ if key_type == 'RSA'
+ require 'certificate_authority'
+ keypair = CertificateAuthority::MemoryKeyMaterial.new
+ bit_size = 1024
+ LeapCli.log :generating, "%s bit RSA Tor key" % bit_size do
+ keypair.generate_key(bit_size)
+ LeapCli::Util.write_file! [:node_tor_priv_key, self.name], keypair.private_key.to_pem
+ LeapCli::Util.write_file! [:node_tor_pub_key, self.name], keypair.public_key.to_pem
+ end
+ else
+ LeapCli.bail! 'tor.key.type of %s is not yet supported' % key_type
+ end
+ end
+
+ end
+end
diff --git a/lib/leap_cli/macros/nodes.rb b/lib/leap_cli/macros/nodes.rb
new file mode 100644
index 00000000..0e23831d
--- /dev/null
+++ b/lib/leap_cli/macros/nodes.rb
@@ -0,0 +1,88 @@
+# encoding: utf-8
+
+##
+## node related macros
+##
+
+module LeapCli
+ module Macro
+
+ #
+ # the list of all the nodes
+ #
+ def nodes
+ env.nodes
+ end
+
+ #
+ # simple alias for global.provider
+ #
+ def provider
+ env.provider
+ end
+
+ #
+ # returns a list of nodes that match the same environment
+ #
+ # if @node.environment is not set, we return other nodes
+ # where environment is not set.
+ #
+ def nodes_like_me
+ nodes[:environment => @node.environment]
+ end
+
+ #
+ # returns a list of nodes that match the location name
+ # and environment of @node.
+ #
+ def nodes_near_me
+ if @node['location'] && @node['location']['name']
+ nodes_like_me['location.name' => @node.location.name]
+ else
+ nodes_like_me['location' => nil]
+ end
+ end
+
+ #
+ #
+ # picks a node out from the node list in such a way that:
+ #
+ # (1) which nodes picked which nodes is saved in secrets.json
+ # (2) when other nodes call this macro with the same node list, they are guaranteed to get a different node
+ # (3) if all the nodes in the pick_node list have been picked, remaining nodes are distributed randomly.
+ #
+ # if the node_list is empty, an exception is raised.
+ # if node_list size is 1, then that node is returned and nothing is
+ # memorized via the secrets.json file.
+ #
+ # `label` is needed to distinguish between pools of nodes for different purposes.
+ #
+ # TODO: more evenly balance after all the nodes have been picked.
+ #
+ def pick_node(label, node_list)
+ if node_list.any?
+ if node_list.size == 1
+ return node_list.values.first
+ else
+ secrets_key = "pick_node(:#{label},#{node_list.keys.sort.join(',')})"
+ secrets_value = @manager.secrets.retrieve(secrets_key, @node.environment) || {}
+ secrets_value[@node.name] ||= begin
+ node_to_pick = nil
+ node_list.each_node do |node|
+ next if secrets_value.values.include?(node.name)
+ node_to_pick = node.name
+ end
+ node_to_pick ||= secrets_value.values.shuffle.first # all picked already, so pick a random one.
+ node_to_pick
+ end
+ picked_node_name = secrets_value[@node.name]
+ @manager.secrets.set(secrets_key, secrets_value, @node.environment)
+ return node_list[picked_node_name]
+ end
+ else
+ raise ArgumentError.new('pick_node(node_list): node_list cannot be empty')
+ end
+ end
+
+ end
+end \ No newline at end of file
diff --git a/lib/leap_cli/macros/provider.rb b/lib/leap_cli/macros/provider.rb
new file mode 100644
index 00000000..4e74da01
--- /dev/null
+++ b/lib/leap_cli/macros/provider.rb
@@ -0,0 +1,90 @@
+#
+# These macros are intended only for use in provider.json, although they are
+# currently loaded in all .json contexts.
+#
+
+module LeapCli
+ module Macro
+
+ #
+ # returns an array of the service names, including only those services that
+ # are enabled for this environment.
+ #
+ def enabled_services
+ manager.env(self.environment).services[:service_type => :user_service].field(:name).select { |service|
+ manager.nodes[:environment => self.environment][:services => service].any?
+ }
+ end
+
+ #
+ # The webapp will not work unless the service level configuration is precisely defined.
+ # Here, we take what the sysadmin has specified in provider.json and clean it up to
+ # ensure it is OK.
+ #
+ # It would be better to add support for JSON schema.
+ #
+ def service_levels()
+ levels = {}
+ provider.service.levels.each do |name, level|
+ if name =~ /^[0-9]+$/
+ name = name.to_i
+ end
+ levels[name] = level_cleanup(name, level.clone)
+ end
+ levels
+ end
+
+ private
+
+ def print_warning(name, msg)
+ if self.environment
+ provider_str = "provider.json or %s" % ['provider', self.environment, 'json'].join('.')
+ else
+ provider_str = "provider.json"
+ end
+ LeapCli::log :warning, "In #{provider_str}, you have an incorrect definition for service level '#{name}':" do
+ LeapCli::log msg
+ end
+ end
+
+ def level_cleanup(name, level)
+ unless level['name']
+ print_warning(name, 'required field "name" is missing')
+ end
+ unless level['description']
+ print_warning(name, 'required field "description" is missing')
+ end
+ unless level['bandwidth'].nil? || level['bandwidth'] == 'limited'
+ print_warning(name, 'field "bandwidth" must be nil or "limited"')
+ end
+ unless level['rate'].nil? || level['rate'].is_a?(Hash)
+ print_warning(name, 'field "rate" must be nil or a hash (e.g. {"USD":10, "EUR":10})')
+ end
+ possible_services = enabled_services
+ if level['services']
+ level['services'].each do |service|
+ unless possible_services.include? service
+ print_warning(name, "the service '#{service}' does not exist or there are no nodes that provide this service.")
+ LeapCli::Util::bail!
+ end
+ end
+ else
+ level['services'] = possible_services
+ end
+ level['services'] = remap_services(level['services'])
+ level
+ end
+
+ #
+ # the service names that the webapp uses and that leap_platform uses are different. ugh.
+ #
+ SERVICE_MAP = {
+ "mx" => "email",
+ "openvpn" => "eip"
+ }
+ def remap_services(services)
+ services.map {|srv| SERVICE_MAP[srv]}
+ end
+
+ end
+end
diff --git a/lib/leap_cli/macros/secrets.rb b/lib/leap_cli/macros/secrets.rb
new file mode 100644
index 00000000..8d1feb55
--- /dev/null
+++ b/lib/leap_cli/macros/secrets.rb
@@ -0,0 +1,39 @@
+# encoding: utf-8
+
+require 'base32'
+
+module LeapCli
+ module Macro
+
+ #
+ # inserts a named secret, generating it if needed.
+ #
+ # manager.export_secrets should be called later to capture any newly generated secrets.
+ #
+ # +length+ is the character length of the generated password.
+ #
+ def secret(name, length=32)
+ manager.secrets.set(name, @node.environment) { Util::Secret.generate(length) }
+ end
+
+ # inserts a base32 encoded secret
+ def base32_secret(name, length=20)
+ manager.secrets.set(name, @node.environment) { Base32.encode(Util::Secret.generate(length)) }
+ end
+
+ # Picks a random obfsproxy port from given range
+ def rand_range(name, range)
+ manager.secrets.set(name, @node.environment) { rand(range) }
+ end
+
+ #
+ # inserts an hexidecimal secret string, generating it if needed.
+ #
+ # +bit_length+ is the bits in the secret, (ie length of resulting hex string will be bit_length/4)
+ #
+ def hex_secret(name, bit_length=128)
+ manager.secrets.set(name, @node.environment) { Util::Secret.generate_hex(bit_length) }
+ end
+
+ end
+end \ No newline at end of file
diff --git a/lib/leap_cli/macros/stunnel.rb b/lib/leap_cli/macros/stunnel.rb
new file mode 100644
index 00000000..821bda38
--- /dev/null
+++ b/lib/leap_cli/macros/stunnel.rb
@@ -0,0 +1,106 @@
+##
+## STUNNEL
+##
+
+#
+# About stunnel
+# --------------------------
+#
+# The network looks like this:
+#
+# From the client's perspective:
+#
+# |------- stunnel client --------------| |---------- stunnel server -----------------------|
+# consumer app -> localhost:accept_port -> connect:connect_port -> ??
+#
+# From the server's perspective:
+#
+# |------- stunnel client --------------| |---------- stunnel server -----------------------|
+# ?? -> *:accept_port -> localhost:connect_port -> service
+#
+
+module LeapCli
+ module Macro
+
+ #
+ # stunnel configuration for the client side.
+ #
+ # +node_list+ is a ObjectList of nodes running stunnel servers.
+ #
+ # +port+ is the real port of the ultimate service running on the servers
+ # that the client wants to connect to.
+ #
+ # * accept_port is the port on localhost to which local clients
+ # can connect. it is auto generated serially.
+ #
+ # * connect_port is the port on the stunnel server to connect to.
+ # it is auto generated from the +port+ argument.
+ #
+ # generates an entry appropriate to be passed directly to
+ # create_resources(stunnel::service, hiera('..'), defaults)
+ #
+ # local ports are automatically generated, starting at 4000
+ # and incrementing in sorted order (by node name).
+ #
+ def stunnel_client(node_list, port, options={})
+ @next_stunnel_port ||= 4000
+ node_list = listify(node_list)
+ hostnames(node_list) # record the hosts
+ result = Config::ObjectList.new
+ node_list.each_node do |node|
+ if node.name != self.name || options[:include_self]
+ s_port = stunnel_port(port)
+ result["#{node.name}_#{port}"] = Config::Object[
+ 'accept_port', @next_stunnel_port,
+ 'connect', node.domain.internal,
+ 'connect_port', s_port,
+ 'original_port', port
+ ]
+ manager.connections.add(:from => @node.ip_address, :to => node.ip_address, :port => s_port)
+ @next_stunnel_port += 1
+ end
+ end
+ result
+ end
+
+ #
+ # generates a stunnel server entry.
+ #
+ # +port+ is the real port targeted service.
+ #
+ # * `accept_port` is the publicly bound port
+ # * `connect_port` is the port that the local service is running on.
+ #
+ def stunnel_server(port)
+ {
+ "accept_port" => stunnel_port(port),
+ "connect_port" => port
+ }
+ end
+
+ #
+ # lists the ips that connect to this node, on particular ports.
+ #
+ def stunnel_firewall
+ manager.connections.select {|connection|
+ connection['to'] == @node.ip_address
+ }
+ end
+
+ private
+
+ #
+ # maps a real port to a stunnel port (used as the connect_port in the client config
+ # and the accept_port in the server config)
+ #
+ def stunnel_port(port)
+ port = port.to_i
+ if port < 50000
+ return port + 10000
+ else
+ return port - 10000
+ end
+ end
+
+ end
+end \ No newline at end of file
diff --git a/platform.rb b/platform.rb
new file mode 100644
index 00000000..1e19a2a9
--- /dev/null
+++ b/platform.rb
@@ -0,0 +1,119 @@
+# encoding: utf-8
+#
+# These are variables defined by this leap_platform and used by leap_cli.
+#
+
+Leap::Platform.define do
+ self.version = "0.8"
+ self.compatible_cli = "1.8".."1.99"
+
+ #
+ # the facter facts that should be gathered
+ #
+ self.facts = ["ec2_local_ipv4", "ec2_public_ipv4"]
+
+ #
+ # absolute paths on the destination server
+ #
+ self.hiera_dir = '/etc/leap' if self.respond_to?(:hiera_dir)
+ self.hiera_path = '/etc/leap/hiera.yaml'
+ self.leap_dir = '/srv/leap'
+ self.files_dir = '/srv/leap/files'
+ self.init_path = '/srv/leap/initialized'
+
+ #
+ # the named paths for this platform
+ # (relative to the provider directory)
+ #
+ self.paths = {
+ # directories
+ :hiera_dir => 'hiera',
+ :files_dir => 'files',
+ :nodes_dir => 'nodes',
+ :services_dir => 'services',
+ :templates_dir => 'templates',
+ :tags_dir => 'tags',
+ :node_files_dir => 'files/nodes/#{arg}',
+
+ # input config files
+ :common_config => 'common.json',
+ :provider_config => 'provider.json',
+ :service_config => 'services/#{arg}.json',
+ :tag_config => 'tags/#{arg}.json',
+ :template_config => 'templates/#{arg}.json',
+ :secrets_config => 'secrets.json',
+ :node_config => 'nodes/#{arg}.json',
+
+ # input config files, environmentally scoped
+ :common_env_config => 'commmon.#{arg}.json',
+ :provider_env_config => 'provider.#{arg}.json',
+ :service_env_config => 'services/#{arg[0]}.#{arg[1]}.json',
+ :tag_env_config => 'tags/#{arg[0]}.#{arg[1]}.json',
+
+ # input templates
+ :provider_json_template => 'files/service-definitions/provider.json.erb',
+ :eip_service_json_template => 'files/service-definitions/#{arg}/eip-service.json.erb',
+ :soledad_service_json_template => 'files/service-definitions/#{arg}/soledad-service.json.erb',
+ :smtp_service_json_template => 'files/service-definitions/#{arg}/smtp-service.json.erb',
+
+ # custom files
+ :custom_puppet_dir => 'files/puppet',
+ :custom_puppet_modules_dir => 'files/puppet/modules',
+ :custom_puppet_manifests_dir => 'files/puppet/manifests',
+ :custom_tests => 'files/tests',
+ :custom_bin => 'files/bin',
+
+ # output files
+ :facts => 'facts.json',
+ :user_ssh => 'users/#{arg}/#{arg}_ssh.pub',
+ :user_pgp => 'users/#{arg}/#{arg}_pgp.pub',
+ :known_hosts => 'files/ssh/known_hosts',
+ :authorized_keys => 'files/ssh/authorized_keys',
+ :monitor_pub_key => 'files/ssh/monitor_ssh.pub',
+ :monitor_priv_key => 'files/ssh/monitor_ssh',
+ :ca_key => 'files/ca/ca.key',
+ :ca_cert => 'files/ca/ca.crt',
+ :client_ca_key => 'files/ca/client_ca.key',
+ :client_ca_cert => 'files/ca/client_ca.crt',
+ :dh_params => 'files/ca/dh.pem',
+ :commercial_key => 'files/cert/#{arg}.key',
+ :commercial_csr => 'files/cert/#{arg}.csr',
+ :commercial_cert => 'files/cert/#{arg}.crt',
+ :dkim_priv_key => 'files/mx/dkim.key',
+ :dkim_pub_key => 'files/mx/dkim.pub',
+
+ :commercial_ca_cert => 'files/cert/commercial_ca.crt',
+ :vagrantfile => 'test/Vagrantfile',
+ :static_web_provider_json => 'files/web/bootstrap/#{arg}/provider.json',
+ :static_web_htaccess => 'files/web/bootstrap/#{arg}/htaccess',
+ :static_web_readme => 'files/web/bootstrap/README',
+
+ # node output files
+ :hiera => 'hiera/#{arg}.yaml',
+ :node_ssh_pub_key => 'files/nodes/#{arg}/#{arg}_ssh.pub',
+ :node_x509_key => 'files/nodes/#{arg}/#{arg}.key',
+ :node_x509_cert => 'files/nodes/#{arg}/#{arg}.crt',
+ :node_tor_priv_key => 'files/nodes/#{arg}/tor.key',
+ :node_tor_pub_key => 'files/nodes/#{arg}/tor.pub',
+
+ # testing files
+ :test_client_key => 'test/cert/client.key',
+ :test_client_cert => 'test/cert/client.crt',
+ :test_openvpn_config => 'test/openvpn/#{arg}.ovpn',
+ :test_client_openvpn_template => 'test/openvpn/client.ovpn.erb'
+ }
+
+ #
+ # the files that need to get renamed when a node is renamed
+ #
+ self.node_files = [
+ :node_config, :hiera, :node_x509_cert, :node_x509_key, :node_ssh_pub_key
+ ]
+
+ self.monitor_username = 'monitor'
+
+ self.reserved_usernames = ['monitor', 'root']
+
+ self.default_puppet_tags = ['leap_base','leap_service']
+end
+
diff --git a/provider_base/README b/provider_base/README
new file mode 100644
index 00000000..bb80df50
--- /dev/null
+++ b/provider_base/README
@@ -0,0 +1,9 @@
+This directory holds the base provider files that actual providers inherit from.
+
+For example:
+
+ the file........ myproject/provider/common.json
+ inherits from... myproject/leap_platform/provider_base/common.json
+
+
+
diff --git a/provider_base/common.json b/provider_base/common.json
new file mode 100644
index 00000000..5e689109
--- /dev/null
+++ b/provider_base/common.json
@@ -0,0 +1,97 @@
+{
+ "ip_address": null,
+ "environment": null,
+ "services": [],
+ "tags": [],
+ "contacts": "= provider.contacts.default",
+ "domain": {
+ "full_suffix": "= provider.domain",
+ "internal_suffix": "= provider.domain_internal",
+ "full": "= node.name + '.' + domain.full_suffix",
+ "internal": "= node.name + '.' + domain.internal_suffix",
+ "name": "= node.name + '.' + (dns.public ? domain.full_suffix : domain.internal_suffix)"
+ },
+ "dns": {
+ "public": "= service_type != 'internal_service'"
+ },
+ "ssh": {
+ "authorized_keys": "= authorized_keys",
+ "config": {
+ "AllowTcpForwarding": "no"
+ },
+ "port": 22,
+ "mosh": {
+ "ports": "60000:61000",
+ "enabled": false
+ }
+ },
+ "hosts": "=> hosts_file",
+ "x509": {
+ "use": true,
+ "use_commercial": false,
+ "cert": "= x509.use ? file(:node_x509_cert, :missing => 'x509 certificate for node $node. Run `leap cert update`') : nil",
+ "key": "= x509.use ? file(:node_x509_key, :missing => 'x509 key for node $node. Run `leap cert update`') : nil",
+ "ca_cert": "= try_file :ca_cert",
+ "commercial_cert": "= x509.use_commercial ? file([:commercial_cert, try{webapp.domain}||domain.full_suffix], :missing => 'commercial x509 certificate for node $node. Add file $file, or run `leap cert csr --domain %s` to generate a temporary self-signed cert and CSR you can use to purchase a real cert.' % (try{webapp.domain}||domain.full_suffix)) : nil",
+ "commercial_key": "= x509.use_commercial ? file([:commercial_key, try{webapp.domain}||domain.full_suffix], :missing => 'commercial x509 certificate for node $node. Add file $file, or run `leap cert csr --domain %s` to generate a temporary self-signed cert and CSR you can use to purchase a real cert.' % (try{webapp.domain}||domain.full_suffix)) : nil",
+ "commercial_ca_cert": "= x509.use_commercial ? try_file(:commercial_ca_cert) : nil"
+ },
+ "service_type": "internal_service",
+ "development": {
+ "site_config": true
+ },
+ "name": "common",
+ "location": null,
+ "enabled": true,
+ "mail": {
+ "smarthost": "= nodes_like_me[:services => :mx].exclude(self).field('domain.full')"
+ },
+ "stunnel": {
+ "clients": {},
+ "servers": {}
+ },
+ "firewall": {
+ "ssh": {
+ "from": "sysadmin",
+ "to": "= ip_address",
+ "port": "= ssh.port"
+ },
+ "stunnel": "=> stunnel_firewall"
+ },
+ "platform": {
+ "version": "= Leap::Platform.version.to_s",
+ "major_version": "= Leap::Platform.major_version"
+ },
+ "sources": {
+ "apt": {
+ "basic": "http://httpredir.debian.org/debian/",
+ "security": "http://security.debian.org/",
+ "backports": "http://httpredir.debian.org/debian/"
+ },
+ "leap-mx": {
+ "type": "apt",
+ "package": "leap-mx",
+ "revision": "latest"
+ },
+ "nickserver": {
+ "type": "git",
+ "source": "https://leap.se/git/nickserver",
+ "revision": "origin/version/0.8"
+ },
+ "platform": {
+ "apt": {
+ "basic": "= 'http://deb.leap.se/' + Leap::Platform.major_version"
+ }
+ },
+ "soledad": {
+ "type": "apt",
+ "package": "soledad-server",
+ "revision": "latest"
+ },
+ "webapp": {
+ "type": "git",
+ "source": "https://leap.se/git/leap_web",
+ "revision": "origin/version/0.8"
+ }
+ }
+}
diff --git a/provider_base/files/branding/head.scss b/provider_base/files/branding/head.scss
new file mode 100644
index 00000000..c100a004
--- /dev/null
+++ b/provider_base/files/branding/head.scss
@@ -0,0 +1 @@
+// no head.scss set
diff --git a/provider_base/files/branding/tail.scss b/provider_base/files/branding/tail.scss
new file mode 100644
index 00000000..919aeec6
--- /dev/null
+++ b/provider_base/files/branding/tail.scss
@@ -0,0 +1 @@
+// no tail.scss set
diff --git a/provider_base/files/service-definitions/provider.json.erb b/provider_base/files/service-definitions/provider.json.erb
new file mode 100644
index 00000000..a75bea61
--- /dev/null
+++ b/provider_base/files/service-definitions/provider.json.erb
@@ -0,0 +1,16 @@
+<%=
+ # grab some fields from provider.json
+ hsh = provider.pick(
+ :languages, :description, :name, :services,
+ :enrollment_policy, :default_language, :service
+ )
+ hsh['domain'] = domain.full_suffix
+
+ hsh['api_version'] = "1"
+ hsh['api_uri'] = ["https://", api.domain, ':', api.port].join
+
+ hsh['ca_cert_uri'] = api.ca_cert_uri
+ hsh['ca_cert_fingerprint'] = fingerprint(:ca_cert)
+
+ hsh.dump_json
+%> \ No newline at end of file
diff --git a/provider_base/files/service-definitions/v1/eip-service.json.erb b/provider_base/files/service-definitions/v1/eip-service.json.erb
new file mode 100644
index 00000000..4bd220df
--- /dev/null
+++ b/provider_base/files/service-definitions/v1/eip-service.json.erb
@@ -0,0 +1,55 @@
+<%=
+ def underscore(words)
+ words = words.to_s.dup
+ words.downcase!
+ words.gsub! /[^a-z]/, '_'
+ words
+ end
+
+ def add_gateway(node, locations, options={})
+ return nil if options[:ip] == 'REQUIRED'
+ gateway = {}
+ gateway["capabilities"] = node.openvpn.pick(:ports, :protocols, :user_ips, :adblock, :filter_dns)
+ gateway["capabilities"]["transport"] = ["openvpn"]
+ gateway["host"] = node.domain.full
+ gateway["ip_address"] = options[:ip]
+ gateway["capabilities"]["limited"] = options[:limited]
+ if node['location']
+ location_name = underscore(node.location.name)
+ gateway["location"] = location_name
+ locations[location_name] ||= node.location
+ end
+ gateway
+ end
+
+ hsh = {}
+ hsh["serial"] = 1
+ hsh["version"] = 1
+ locations = {}
+ gateways = []
+ configuration = nil
+ nodes_like_me[:services => 'openvpn'].each_node do |node|
+ if node.openvpn.allow_limited && node.openvpn.allow_unlimited
+ gateways << add_gateway(node, locations, :ip => node.openvpn.gateway_address, :limited => false)
+ gateways << add_gateway(node, locations, :ip => node.openvpn.second_gateway_address, :limited => true)
+ elsif node.openvpn.allow_unlimited
+ gateways << add_gateway(node, locations, :ip => node.openvpn.gateway_address, :limited => false)
+ elsif node.openvpn.allow_limited
+ gateways << add_gateway(node, locations, :ip => node.openvpn.gateway_address, :limited => true)
+ end
+ if configuration && node.openvpn.configuration != configuration
+ log :error, "OpenVPN nodes in the environment `#{node.environment}` have conflicting `openvpn.configuration` values. This will result in bad errors."
+ end
+ configuration = node.openvpn.configuration
+ end
+ if gateways.any?
+ configuration = configuration.dup
+ if configuration['fragment'] && configuration['fragment'] == 1500
+ configuration.delete('fragment')
+ end
+ hsh["gateways"] = gateways.compact
+ hsh["locations"] = locations
+ hsh["openvpn_configuration"] = configuration
+ end
+ JSON.sorted_generate hsh
+%> \ No newline at end of file
diff --git a/provider_base/files/service-definitions/v1/smtp-service.json.erb b/provider_base/files/service-definitions/v1/smtp-service.json.erb
new file mode 100644
index 00000000..45f240ac
--- /dev/null
+++ b/provider_base/files/service-definitions/v1/smtp-service.json.erb
@@ -0,0 +1,29 @@
+<%=
+ def underscore(words)
+ words = words.to_s.dup
+ words.downcase!
+ words.gsub! /[^a-z]/, '_'
+ words
+ end
+
+ hsh = {}
+ hsh["serial"] = 1
+ hsh["version"] = 1
+ locations = {}
+ hosts = {}
+ nodes_like_me[:services => 'mx'].each_node do |node|
+ host = {}
+ host["hostname"] = node.domain.full
+ host["ip_address"] = node.ip_address
+ host["port"] = 465 # hard coded for now, later node.smtp.port
+ if node['location']
+ location_name = underscore(node.location.name)
+ host["location"] = location_name
+ locations[location_name] ||= node.location
+ end
+ hosts[node.name] = host
+ end
+ hsh["hosts"] = hosts
+ hsh["locations"] = locations
+ JSON.sorted_generate hsh
+%>
diff --git a/provider_base/files/service-definitions/v1/soledad-service.json.erb b/provider_base/files/service-definitions/v1/soledad-service.json.erb
new file mode 100644
index 00000000..0cd1c927
--- /dev/null
+++ b/provider_base/files/service-definitions/v1/soledad-service.json.erb
@@ -0,0 +1,29 @@
+<%=
+ def underscore(words)
+ words = words.to_s.dup
+ words.downcase!
+ words.gsub! /[^a-z]/, '_'
+ words
+ end
+
+ hsh = {}
+ hsh["serial"] = 1
+ hsh["version"] = 1
+ locations = {}
+ hosts = {}
+ nodes_like_me[:services => 'soledad'].each_node do |node|
+ host = {}
+ host["hostname"] = node.domain.full
+ host["ip_address"] = node.ip_address
+ host["port"] = node.soledad.port
+ if node['location']
+ location_name = underscore(node.location.name)
+ host["location"] = location_name
+ locations[location_name] ||= node.location
+ end
+ hosts[node.name] = host
+ end
+ hsh["hosts"] = hosts
+ hsh["locations"] = locations
+ JSON.sorted_generate hsh
+%> \ No newline at end of file
diff --git a/provider_base/provider.json b/provider_base/provider.json
new file mode 100644
index 00000000..81b2ea98
--- /dev/null
+++ b/provider_base/provider.json
@@ -0,0 +1,64 @@
+{
+ "domain": "REQUIRED",
+ "domain_internal": "= domain.sub(/\\.[^\\.]*$/, '.i')",
+ "name": {
+ "en": "REQUIRED"
+ },
+ "description": {
+ "en": "REQUIRED"
+ },
+ "contacts": {
+ "default": ["REQUIRED"],
+ "english": "= contacts.default.map {|email| email.split('@').join(' at the domain ')}.join(', ')"
+ },
+ "languages": ["en"],
+ "default_language": "en",
+ "enrollment_policy": "open",
+ "services": "= enabled_services",
+ "service": {
+ // bandwidth limit is in Bytes, storage limit is in MB.
+ // for example:
+ // "levels": {
+ // "1": {"name": "free", "description":"Limited service, but without cost to you.", "storage":50},
+ // "2": {"name": "basic", "description":"The standard package.", "storage":1000, "rate": {"USD":5}},
+ // "3": {"name": "pro", "description":"Extra storage for power users." , "storage":10000, "rate": {"USD":10}}
+ // }
+ "levels": {
+ "1": {
+ "name": "free", "description": "Please donate."
+ }
+ },
+ "default_service_level": 1,
+ "bandwidth_limit": 102400,
+ "allow_free": "= provider.service.levels.select {|l| l['rate'].nil?}.any?",
+ "allow_paid": "= provider.service.levels.select {|l| !l['rate'].nil?}.any?",
+ "allow_anonymous": "= provider.service.levels.select {|l| l['name'] == 'anonymous'}.any? && services.include?('openvpn')",
+ "allow_registration": "= provider.enrollment_policy != 'closed' && provider.service.levels.select {|l| l['name'] != 'anonymous'}.any?",
+ "allow_limited_bandwidth": "= provider.service.levels.select {|l| l['bandwidth'] == 'limited'}.any?",
+ "allow_unlimited_bandwidth": "= provider.service.levels.select {|l| l['bandwidth'].nil?}.any?"
+ },
+ "ca": {
+ "name": "= provider.ca.organization + ' Root CA'",
+ "organization": "= provider.name[provider.default_language]",
+ "organizational_unit": "= 'https://' + provider.domain",
+ "bit_size": 4096,
+ "digest": "SHA256",
+ "life_span": "10 years",
+ "server_certificates": {
+ "bit_size": 4096,
+ "digest": "SHA256",
+ "life_span": "1 years"
+ },
+ "client_certificates": {
+ "bit_size": 2048,
+ "digest": "SHA256",
+ "life_span": "2 months",
+ "limited_prefix": "LIMITED",
+ "unlimited_prefix": "UNLIMITED"
+ }
+ },
+ "client_version": {
+ "min": "0.7",
+ "max": null
+ }
+}
diff --git a/provider_base/services/_api_tester.json b/provider_base/services/_api_tester.json
new file mode 100644
index 00000000..790aa7d8
--- /dev/null
+++ b/provider_base/services/_api_tester.json
@@ -0,0 +1,13 @@
+//
+// This partial should be added to any service that runs tests that rely on
+// accessing the bonafide webapp API.
+//
+{
+ "testing": {
+ "monitor_auth_token": "= secret :api_monitor_auth_token",
+ "api_uri": "= global.services[:webapp].api.uri",
+ // api_hosts is not used directly, but calling hostnames() will ensure
+ // that the hostnames are added to /etc/hosts
+ "api_hosts": "= hostnames(nodes_like_me[:services => 'webapp'])"
+ }
+} \ No newline at end of file
diff --git a/provider_base/services/_couchdb_mirror.json b/provider_base/services/_couchdb_mirror.json
new file mode 100644
index 00000000..da496bae
--- /dev/null
+++ b/provider_base/services/_couchdb_mirror.json
@@ -0,0 +1,22 @@
+//
+// Applied to all non-master couchdb nodes
+// NOT CURRENTLY SUPPORTED
+//
+{
+ "stunnel": {
+ "clients": {
+ "couch_client": "= stunnel_client(nodes[couch.replication.masters.keys], couch.port)"
+ }
+ },
+ "couch": {
+ "mode": "mirror",
+ "replication": {
+ // for now, pick the first close one, or the first one.
+ // in the future, maybe use haproxy to balance among all the masters
+ "masters": "= try{pick_node(:couch_master,nodes_near_me['services' => 'couchdb']['couch.master' => true]).pick_fields('domain.internal', 'couch.port')} || try{pick_node(:couch_master,nodes_like_me['services' => 'couchdb']['couch.master' => true]).pick_fields('domain.internal', 'couch.port')}",
+ "username": "replication",
+ "password": "= secret :couch_replication_password",
+ "role": "replication"
+ }
+ }
+}
diff --git a/provider_base/services/_couchdb_multimaster.json b/provider_base/services/_couchdb_multimaster.json
new file mode 100644
index 00000000..803a9416
--- /dev/null
+++ b/provider_base/services/_couchdb_multimaster.json
@@ -0,0 +1,24 @@
+//
+// Only applied to master couchdb nodes when there are multiple masters
+// NOT CURRENTLY USED.
+{
+ "stunnel": {
+ "servers": {
+ "epmd_server": "= stunnel_server(couch.bigcouch.epmd_port)",
+ "ednp_server": "= stunnel_server(couch.bigcouch.ednp_port)"
+ },
+ "clients": {
+ "epmd_clients": "= stunnel_client(nodes_like_me['services' => 'couchdb']['couch.mode' => 'multimaster'], couch.bigcouch.epmd_port)",
+ "ednp_clients": "= stunnel_client(nodes_like_me['services' => 'couchdb']['couch.mode' => 'multimaster'], couch.bigcouch.ednp_port)"
+ }
+ },
+ "couch": {
+ "mode": "multimaster",
+ "bigcouch": {
+ "epmd_port": 4369,
+ "ednp_port": 9002,
+ "cookie": "= secret :bigcouch_cookie",
+ "neighbors": "= nodes_like_me['services' => 'couchdb']['couch.mode' => 'multimaster'].exclude(self).field('domain.full')"
+ }
+ }
+}
diff --git a/provider_base/services/couchdb.json b/provider_base/services/couchdb.json
new file mode 100644
index 00000000..30cb53d1
--- /dev/null
+++ b/provider_base/services/couchdb.json
@@ -0,0 +1,49 @@
+{
+ "x509": {
+ "use": true
+ },
+ "stunnel": {
+ "servers": {
+ "couch_server": "= stunnel_server(couch.port)"
+ }
+ },
+ "couch": {
+ "port": 5984,
+ "mode": "plain",
+ "users": {
+ "admin": {
+ "username": "admin",
+ "password": "= secret :couch_admin_password",
+ "salt": "= hex_secret :couch_admin_password_salt, 128"
+ },
+ "leap_mx": {
+ "username": "leap_mx",
+ "password": "= secret :couch_leap_mx_password",
+ "salt": "= hex_secret :couch_leap_mx_password_salt, 128"
+ },
+ "nickserver": {
+ "username": "nickserver",
+ "password": "= secret :couch_nickserver_password",
+ "salt": "= hex_secret :couch_nickserver_password_salt, 128"
+ },
+ "soledad": {
+ "username": "soledad",
+ "password": "= secret :couch_soledad_password",
+ "salt": "= hex_secret :couch_soledad_password_salt, 128"
+ },
+ "webapp": {
+ "username": "webapp",
+ "password": "= secret :couch_webapp_password",
+ "salt": "= hex_secret :couch_webapp_password_salt, 128"
+ },
+ "replication": {
+ "username": "replication",
+ "password": "= secret :couch_replication_password",
+ "salt": "= hex_secret :couch_replication_password_salt, 128"
+ }
+ },
+ "webapp": {
+ "nagios_test_pw": "= secret :nagios_test_password"
+ }
+ }
+}
diff --git a/provider_base/services/couchdb.rb b/provider_base/services/couchdb.rb
new file mode 100644
index 00000000..ba7e5ae5
--- /dev/null
+++ b/provider_base/services/couchdb.rb
@@ -0,0 +1,27 @@
+#
+# custom logic for couchdb json resolution
+# ============================================
+#
+# bigcouch is no longer maintained, so now couchdb is required...
+# no matter what!
+#
+
+if self.couch['master']
+ LeapCli::log :warning, %("The node property {couch.master:true} is deprecated.\n) +
+ %( Only {couch.mode:plain} is supported. (node #{self.name}))
+end
+
+couchdb_nodes = nodes_like_me['services' => 'couchdb']
+
+if couchdb_nodes.size > 1
+ LeapCli::log :error, "Having multiple nodes with {services:couchdb} is no longer supported (nodes #{couchdb_nodes.keys.join(', ')})."
+elsif self.couch.mode == "multimaster"
+ LeapCli::log :error, "Nodes with {couch.mode:multimaster} are no longer supported (node #{self.name})."
+end
+
+#
+# This is needed for the "test" that creates and removes the storage db
+# for test_user_email. If that test is removed, then this is no longer
+# necessary:
+#
+apply_partial('_api_tester') \ No newline at end of file
diff --git a/provider_base/services/dns.json b/provider_base/services/dns.json
new file mode 100644
index 00000000..67948ef8
--- /dev/null
+++ b/provider_base/services/dns.json
@@ -0,0 +1,14 @@
+{
+ "hosts": {
+ "public": "= nodes['dns.public' => true].fields('domain.name', 'dns.aliases', 'ip_address')",
+ "private": "= nodes['dns.public' => false].fields('domain.name', 'dns.aliases', 'ip_address')"
+ },
+ "service_type": "public_service",
+ "firewall": {
+ "dns": {
+ "from": "*",
+ "to": "= ip_address",
+ "port": "53"
+ }
+ }
+} \ No newline at end of file
diff --git a/provider_base/services/monitor.json b/provider_base/services/monitor.json
new file mode 100644
index 00000000..9ddc0ec7
--- /dev/null
+++ b/provider_base/services/monitor.json
@@ -0,0 +1,29 @@
+{
+ "nagios": {
+ "nagiosadmin_pw": "= secret :nagios_admin_password",
+ "domains_internal": "= nagios.hosts.values.map{|h|h['domain_internal_suffix']}.uniq",
+ "environments": "= Hash[ nagios.hosts.values.map{|h|h['environment']}.uniq.map{|e| [e||'default',{'contact_emails'=>manager.env(e).provider.contacts.default}]} ]",
+ "hosts": "= (self.environment == 'local' ? nodes_like_me : nodes[:environment => '!local']).pick_fields('environment', 'domain.internal', 'domain.internal_suffix', 'domain.full_suffix', 'ip_address', 'services', 'openvpn.gateway_address', 'ssh.port')"
+ },
+ "hosts": "= self.environment == 'local' ? hosts_file(nodes_like_me) : hosts_file(nodes[:environment => '!local'])",
+ "ssh": {
+ "monitor": {
+ "username": "= Leap::Platform.monitor_username",
+ "private_key": "= file(:monitor_priv_key)"
+ }
+ },
+ "x509": {
+ "use": true,
+ "use_commercial": true,
+ "ca_cert": "= file :ca_cert, :missing => 'provider CA. Run `leap cert ca`'",
+ "client_ca_cert": "= file :client_ca_cert, :missing => 'Certificate Authority. Run `leap cert ca`'",
+ "client_ca_key": "= file :client_ca_key, :missing => 'Certificate Authority. Run `leap cert ca`'"
+ },
+ "firewall": {
+ "monitor": {
+ "from": "sysadmin",
+ "to": "= ip_address",
+ "port": [443, 80]
+ }
+ }
+}
diff --git a/provider_base/services/monitor.rb b/provider_base/services/monitor.rb
new file mode 100644
index 00000000..01590d5c
--- /dev/null
+++ b/provider_base/services/monitor.rb
@@ -0,0 +1,3 @@
+unless self.services.include? "webapp"
+ LeapCli.log :error, "service `monitor` requires service `webapp` on the same node (node #{self.name})."
+end
diff --git a/provider_base/services/mx.json b/provider_base/services/mx.json
new file mode 100644
index 00000000..c7e99d85
--- /dev/null
+++ b/provider_base/services/mx.json
@@ -0,0 +1,53 @@
+{
+ "mx": {
+ // provider should define their own custom aliases.
+ // these are in *addition* to the standard reserved aliases for root and postmaster, etc.
+ "aliases": {},
+ // this is the domain that is used for the OpenPGP header
+ "key_lookup_domain": "= global.services[:webapp].webapp.domain",
+ "dkim": {
+ // bit sizes larger than 2048 are not necessarily supported
+ "bit_size": 2048,
+ "public_key": "= remote_file_path(:dkim_pub_key) { generate_dkim_key(mx.dkim.bit_size) }",
+ "private_key": "= remote_file_path(:dkim_priv_key) { generate_dkim_key(mx.dkim.bit_size) }",
+ // generate selector based on first ten digits of pub key fingerprint:
+ "selector": "= fingerprint(local_file_path(:dkim_pub_key) { generate_dkim_key(mx.dkim.bit_size) }, :mode => :rsa).slice(0,10)"
+ }
+ },
+ "stunnel": {
+ "clients": {
+ "couch_client": "= stunnel_client(nodes_like_me[:services => :couchdb], global.services[:couchdb].couch.port)"
+ }
+ },
+ "haproxy": {
+ "couch": {
+ "listen_port": 4096,
+ "servers": "= haproxy_servers(nodes_like_me[:services => :couchdb], stunnel.clients.couch_client, global.services[:couchdb].couch.port)"
+ }
+ },
+ "couchdb_leap_mx_user": {
+ "username": "= global.services[:couchdb].couch.users[:leap_mx].username",
+ "password": "= secret :couch_leap_mx_password",
+ "salt": "= hex_secret :couch_leap_mx_password_salt, 128"
+ },
+ "mynetworks": "= host_ips(nodes)",
+ "rbls": ["zen.spamhaus.org"],
+ "clamav": {
+ "whitelisted_addresses": []
+ },
+ "x509": {
+ "use": true,
+ "use_commercial": true,
+ "ca_cert": "= file :ca_cert, :missing => 'provider CA. Run `leap cert ca`'",
+ "client_ca_cert": "= file :client_ca_cert, :missing => 'Certificate Authority. Run `leap cert ca`'",
+ "client_ca_key": "= file :client_ca_key, :missing => 'Certificate Authority. Run `leap cert ca`'"
+ },
+ "service_type": "user_service",
+ "firewall": {
+ "mx": {
+ "from": "*",
+ "to": "= ip_address",
+ "port": [25, 465]
+ }
+ }
+}
diff --git a/provider_base/services/mx.rb b/provider_base/services/mx.rb
new file mode 100644
index 00000000..03ee561f
--- /dev/null
+++ b/provider_base/services/mx.rb
@@ -0,0 +1 @@
+apply_partial('_api_tester')
diff --git a/provider_base/services/obfsproxy.json b/provider_base/services/obfsproxy.json
new file mode 100644
index 00000000..979d0ef9
--- /dev/null
+++ b/provider_base/services/obfsproxy.json
@@ -0,0 +1,9 @@
+{
+ "obfsproxy": {
+ "scramblesuit": {
+ "password": "= base32_secret('scramblesuit_password_'+name)",
+ "port" : "= rand_range('scramblesuit_port_'+name, 18000..32000)"
+ },
+ "gateway_address": "= try{pick_node(:obfs_gateway,nodes_near_me['services' => 'openvpn']).pick_fields('openvpn.gateway_address')} || try{pick_node(:obfs_gateway,nodes_like_me['services' => 'openvpn']).pick_fields('openvpn.gateway_address')}"
+ }
+}
diff --git a/provider_base/services/openvpn.json b/provider_base/services/openvpn.json
new file mode 100644
index 00000000..6f73e31c
--- /dev/null
+++ b/provider_base/services/openvpn.json
@@ -0,0 +1,45 @@
+{
+ "service_type": "user_service",
+ "x509": {
+ "use": true,
+ "client_ca_cert": "= file :client_ca_cert, :missing => 'Certificate Authority. Run `leap cert ca`'",
+ "dh": "= file :dh_params, :missing => 'Diffie-Hellman parameters. Run `leap cert dh`'"
+ },
+ "location": null,
+ "openvpn": {
+ "gateway_address": "REQUIRED",
+ "second_gateway_address": "= openvpn.allow_limited && openvpn.allow_unlimited ? 'REQUIRED' : nil",
+ "ports": ["80", "443", "53", "1194"],
+ "protocols": ["tcp", "udp"],
+ "filter_dns": false,
+ "adblock": false,
+ "user_ips": false,
+ "allow_limited": "= provider.service.allow_limited_bandwidth",
+ "allow_unlimited": "= provider.service.allow_unlimited_bandwidth",
+ "limited_prefix": "= provider.ca.client_certificates.limited_prefix",
+ "unlimited_prefix": "= provider.ca.client_certificates.unlimited_prefix",
+ "rate_limit": "= openvpn.allow_limited ? provider.service.bandwidth_limit : nil",
+ "configuration": {
+ "tls-cipher": "DHE-RSA-AES128-SHA",
+ "auth": "SHA1",
+ "cipher": "AES-128-CBC",
+ "keepalive": "10 30",
+ "tun-ipv6": true,
+ "fragment": 1500
+ }
+ },
+ "obfsproxy": {
+ "scramblesuit": {
+ "password": "= base32_secret('scramblesuit_password_'+name)",
+ "port" : "= rand_range('scramblesuit_port_'+name, 18000..32000)"
+ },
+ "gateway_address": "= openvpn.gateway_address"
+ },
+ "firewall": {
+ "vpn": {
+ "from": "*",
+ "to": "= openvpn.gateway_address",
+ "port": "= openvpn.ports + [obfsproxy.scramblesuit.port]"
+ }
+ }
+}
diff --git a/provider_base/services/soledad.json b/provider_base/services/soledad.json
new file mode 100644
index 00000000..169588c8
--- /dev/null
+++ b/provider_base/services/soledad.json
@@ -0,0 +1,21 @@
+{
+ "soledad": {
+ "port": 2323,
+ "couchdb_soledad_user": {
+ "username": "= global.services[:couchdb].couch.users[:soledad].username",
+ "password": "= secret :couch_soledad_password",
+ "salt": "= hex_secret :couch_soledad_password_salt, 128"
+ },
+ "couchdb_leap_mx_user": {
+ "username": "= global.services[:couchdb].couch.users[:leap_mx].username"
+ }
+ },
+ "service_type": "public_service",
+ "firewall": {
+ "soledad": {
+ "from": "*",
+ "to": "= ip_address",
+ "port": "= soledad.port"
+ }
+ }
+}
diff --git a/provider_base/services/soledad.rb b/provider_base/services/soledad.rb
new file mode 100644
index 00000000..9b220c39
--- /dev/null
+++ b/provider_base/services/soledad.rb
@@ -0,0 +1,3 @@
+unless self.services.include? "couchdb"
+ LeapCli.log :error, "service `soledad` requires service `couchdb` on the same node (node #{self.name})."
+end
diff --git a/provider_base/services/static.json b/provider_base/services/static.json
new file mode 100644
index 00000000..2f408ec1
--- /dev/null
+++ b/provider_base/services/static.json
@@ -0,0 +1,20 @@
+{
+ "static": {
+ "formats": "=> try{static.domains.values.collect{|d| try{d.locations.values.collect{|l|l.format}} }.flatten.compact.uniq} || []",
+ // include a copy of provider.json in case any of the configured domains happens to match provider.domain
+ "bootstrap_files": {
+ "domain": "= provider.domain",
+ "enabled": "= !! try{static.domains[provider.domain]}",
+ "provider_json": "=> static.bootstrap_files.enabled ? try{nodes_like_me[:services => 'webapp'].values.first.definition_files['provider']} : nil",
+ "client_version": "= static.bootstrap_files.enabled ? provider.client_version : nil"
+ }
+ },
+ "service_type": "public_service",
+ "firewall": {
+ "static": {
+ "from": "*",
+ "to": "= ip_address",
+ "port": [80, 443]
+ }
+ }
+} \ No newline at end of file
diff --git a/provider_base/services/tor.json b/provider_base/services/tor.json
new file mode 100644
index 00000000..55d3d2ee
--- /dev/null
+++ b/provider_base/services/tor.json
@@ -0,0 +1,15 @@
+{
+ "tor": {
+ "bandwidth_rate": 6550,
+ "contacts": "= [provider.contacts['tor'] || provider.contacts.default].flatten",
+ "nickname": "= (self.name + secret(:tor_family)).sub('_','')[0..18]",
+ "family": "= nodes[:services => 'tor'][:environment => '!local'].field('tor.nickname').join(',')",
+ "hidden_service": {
+ "active": null,
+ "key_type": "RSA",
+ "public_key": "= tor_public_key_path(:node_tor_pub_key, tor.hidden_service.key_type) if tor.hidden_service.active",
+ "private_key": "= tor_private_key_path(:node_tor_priv_key, tor.hidden_service.key_type) if tor.hidden_service.active",
+ "address": "= onion_address(:node_tor_pub_key) if tor.hidden_service.active"
+ }
+ }
+}
diff --git a/provider_base/services/webapp.json b/provider_base/services/webapp.json
new file mode 100644
index 00000000..b1d2ca59
--- /dev/null
+++ b/provider_base/services/webapp.json
@@ -0,0 +1,93 @@
+{
+ "webapp": {
+ "admins": [],
+ "forbidden_usernames": [
+ "admin", "admins", "administrator", "administrators", "arin-admin",
+ "certmaster", "contact", "email", "help", "help-desk", "help-ticket",
+ "help-tickets", "help_desk", "help_ticket", "help_tickets", "helpdesk",
+ "helpticket", "helptickets", "info", "mail", "maildrop", "noreply",
+ "owner", "owners", "postmaster", "reply", "robot", "ssladmin", "staff",
+ "support", "tech-support", "tech_support", "techsupport", "ticket",
+ "tickets", "vmail", "www-data"],
+ "domain": "= provider.domain",
+ "modules": ["user", "billing", "help"],
+ "couchdb_webapp_user": "= global.services[:couchdb].couch.users[:webapp]",
+ "couchdb_admin_user": "= global.services[:couchdb].couch.users[:admin]",
+ "customization_dir": "= file_path 'webapp'",
+ "client_certificates": "= provider.ca.client_certificates",
+ "allow_limited_certs": "= provider.service.allow_limited_bandwidth",
+ "allow_unlimited_certs": "= provider.service.allow_unlimited_bandwidth",
+ "allow_anonymous_certs": "= provider.service.allow_anonymous",
+ "allow_registration": "= provider.service.allow_registration",
+ "default_service_level": "= provider.service.default_service_level",
+ "service_levels": "= service_levels()",
+ "secret_token": "= secret :webapp_secret_token",
+ "api_version": 1,
+ "secure": false,
+ "client_version": "= provider.client_version",
+ "nagios_test_user": {
+ "username": "nagios_test",
+ "password": "= secret :nagios_test_password"
+ },
+ "engines": [
+ "support"
+ ],
+ "locales": "= provider.languages",
+ "default_locale": "= provider.default_language",
+ "api_tokens": {
+ "monitor": "= secret :api_monitor_auth_token",
+ "allowed_ips": "= host_ips(nodes_like_me)"
+ }
+ },
+ "stunnel": {
+ "clients": {
+ "couch_client": "= stunnel_client(nodes_like_me[:services => :couchdb], global.services[:couchdb].couch.port)"
+ }
+ },
+ "haproxy": {
+ "couch": {
+ "listen_port": 4096,
+ "servers": "= haproxy_servers(nodes_like_me[:services => :couchdb], stunnel.clients.couch_client, global.services[:couchdb].couch.port)"
+ }
+ },
+ "definition_files": {
+ "provider": "= file :provider_json_template",
+ "eip_service": "= file [:eip_service_json_template, 'v'+webapp.api_version.to_s]",
+ "soledad_service": "= file [:soledad_service_json_template, 'v'+webapp.api_version.to_s]",
+ "smtp_service": "= file [:smtp_service_json_template, 'v'+webapp.api_version.to_s]"
+ },
+ "service_type": "public_service",
+ "api": {
+ "domain": "= 'api.' + webapp.domain",
+ "version": 1,
+ "port": 4430,
+ "ca_cert_uri": "= 'https://' + webapp.domain + '/ca.crt'",
+ "uri": "= %(https://#{api.domain}:#{api.port}/#{api.version})"
+ },
+ "nickserver": {
+ "domain": "= 'nicknym.' + domain.full_suffix",
+ "couchdb_nickserver_user": {
+ "username": "= global.services[:couchdb].couch.users[:nickserver].username",
+ "password": "= secret :couch_nickserver_password",
+ "salt": "= hex_secret :couch_nickserver_password_salt, 128"
+ },
+ "port": 6425
+ },
+ "dns": {
+ "aliases": "= [domain.full, webapp.domain, api.domain, nickserver.domain]"
+ },
+ "x509": {
+ "use": true,
+ "use_commercial": true,
+ "ca_cert": "= file :ca_cert, :missing => 'provider CA. Run `leap cert ca`'",
+ "client_ca_cert": "= file :client_ca_cert, :missing => 'Certificate Authority. Run `leap cert ca`.'",
+ "client_ca_key": "= file :client_ca_key, :missing => 'Certificate Authority. Run `leap cert ca`.'"
+ },
+ "firewall": {
+ "webapp": {
+ "from": "*",
+ "to": "= ip_address",
+ "port": "= [api.port, 443, 80, nickserver.port]"
+ }
+ }
+}
diff --git a/provider_base/tags/development.json b/provider_base/tags/development.json
new file mode 100644
index 00000000..caf18e9d
--- /dev/null
+++ b/provider_base/tags/development.json
@@ -0,0 +1,3 @@
+{
+ "environment": "development"
+} \ No newline at end of file
diff --git a/provider_base/tags/local.json b/provider_base/tags/local.json
new file mode 100644
index 00000000..48312b33
--- /dev/null
+++ b/provider_base/tags/local.json
@@ -0,0 +1,3 @@
+{
+ "environment": "local"
+} \ No newline at end of file
diff --git a/provider_base/tags/production.json b/provider_base/tags/production.json
new file mode 100644
index 00000000..ea17498f
--- /dev/null
+++ b/provider_base/tags/production.json
@@ -0,0 +1,3 @@
+{
+ "environment": "production"
+} \ No newline at end of file
diff --git a/provider_base/templates/common.json b/provider_base/templates/common.json
new file mode 100644
index 00000000..a7675b15
--- /dev/null
+++ b/provider_base/templates/common.json
@@ -0,0 +1,3 @@
+{
+ "ip_address": "REQUIRED"
+} \ No newline at end of file
diff --git a/provider_base/templates/couchdb.json b/provider_base/templates/couchdb.json
new file mode 100644
index 00000000..34b60915
--- /dev/null
+++ b/provider_base/templates/couchdb.json
@@ -0,0 +1,5 @@
+{
+ "couch": {
+ "mode": "plain"
+ }
+}
diff --git a/provider_base/templates/openvpn.json b/provider_base/templates/openvpn.json
new file mode 100644
index 00000000..cbe183e8
--- /dev/null
+++ b/provider_base/templates/openvpn.json
@@ -0,0 +1,7 @@
+{
+ "openvpn": {
+ "gateway_address": "REQUIRED",
+ "ports": ["443"],
+ "protocols": ["tcp"]
+ }
+}
diff --git a/provider_base/test/openvpn/client.ovpn.erb b/provider_base/test/openvpn/client.ovpn.erb
new file mode 100644
index 00000000..af183ef4
--- /dev/null
+++ b/provider_base/test/openvpn/client.ovpn.erb
@@ -0,0 +1,28 @@
+client
+dev tun
+remote-cert-tls server
+remote-random
+nobind
+script-security 2
+verb 3
+auth SHA1
+cipher AES-128-CBC
+tls-cipher DHE-RSA-AES128-SHA
+
+<% vpn_nodes.each_node do |node| -%>
+<%= "remote #{node.openvpn.gateway_address} 1194 udp"%>
+<% end -%>
+
+<ca>
+<%= read_file! :ca_cert -%>
+</ca>
+
+<cert>
+<%# read_file! :test_client_cert -%>
+<%= cert -%>
+</cert>
+
+<key>
+<%# read_file! :test_client_key -%>
+<%= key -%>
+</key>
diff --git a/puppet/bin/apply_on_node.sh b/puppet/bin/apply_on_node.sh
new file mode 100755
index 00000000..09e5b035
--- /dev/null
+++ b/puppet/bin/apply_on_node.sh
@@ -0,0 +1,30 @@
+#!/bin/sh
+
+# Script to use on a node for debugging
+# Usage: ./apply_on_node.sh <puppet parameters>
+#
+# Example: ./apply_on_node.sh --debug --verbose
+
+ROOTDIR='/srv/leap'
+PLATFORM="$ROOTDIR"
+MODULEPATH="$PLATFORM/puppet/modules"
+LOG=/var/log/leap.log
+
+# example tags to use
+#TAGS='--tags=leap_base,leap_service,leap_slow'
+#TAGS='--tags=leap_base,leap_slow'
+#TAGS='--tags=leap_base,leap_service'
+
+#######
+# Setup
+#######
+
+puppet apply -v --confdir $PLATFORM/puppet --libdir $PLATFORM/puppet/lib --modulepath=$MODULEPATH $PLATFORM/puppet/manifests/setup.pp $TAGS $@ |tee $LOG 2>&1
+
+#########
+# site.pp
+#########
+
+puppet apply -v --confdir $PLATFORM/puppet --libdir $PLATFORM/puppet/lib --modulepath=$MODULEPATH $PLATFORM/puppet/manifests/site.pp $TAGS $@ |tee $LOG 2>&1
+
+
diff --git a/puppet/hiera.yaml b/puppet/hiera.yaml
new file mode 100644
index 00000000..93448e23
--- /dev/null
+++ b/puppet/hiera.yaml
@@ -0,0 +1,15 @@
+---
+:backends:
+ - yaml
+ - puppet
+
+:logger: console
+
+:yaml:
+ :datadir: /etc/leap
+
+:hierarchy:
+ - hiera
+
+:puppet:
+ :datasource: data
diff --git a/puppet/lib/puppet/parser/functions/create_resources_hash_from.rb b/puppet/lib/puppet/parser/functions/create_resources_hash_from.rb
new file mode 100644
index 00000000..47d0df9c
--- /dev/null
+++ b/puppet/lib/puppet/parser/functions/create_resources_hash_from.rb
@@ -0,0 +1,116 @@
+#
+# create_resources_hash_from.rb
+#
+# Licensed under the Apache License, Version 2.0 (the "License");
+# you may not use this file except in compliance with the License.
+# You may obtain a copy of the License at
+#
+# http://www.apache.org/licenses/LICENSE-2.0
+#
+# Unless required by applicable law or agreed to in writing, software
+# distributed under the License is distributed on an "AS IS" BASIS,
+# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+# See the License for the specific language governing permissions and
+# limitations under the License.
+#
+
+module Puppet::Parser::Functions
+ newfunction(:create_resources_hash_from, :type => :rvalue, :doc => <<-EOS
+Given:
+ A formatted string (to use as the resource name)
+ An array to loop through (because puppet cannot loop)
+ A hash defining the parameters for a resource
+ And optionally an hash of parameter names to add to the resource and an
+ associated formatted string that should be configured with the current
+ element of the loop array
+
+This function will return a hash of hashes that can be used with the
+create_resources function.
+
+*Examples:*
+ $allowed_hosts = ['10.0.0.0/8', '192.168.0.0/24']
+ $resource_name = "100 allow %s to apache on ports 80"
+ $my_resource_hash = {
+ 'proto' => 'tcp',
+ 'action' => 'accept',
+ 'dport' => 80
+ }
+ $dynamic_parameters = {
+ 'source' => '%s'
+ }
+
+ $created_resource_hash = create_resources_hash_from($resource_name, $allowed_hosts, $my_resource_hash, $dynamic_parameters)
+
+$created_resource_hash would equal:
+ {
+ '100 allow 10.0.0.0/8 to apache on ports 80' => {
+ 'proto' => 'tcp',
+ 'action' => 'accept',
+ 'dport' => 80,
+ 'source' => '10.0.0.0/8'
+ },
+ '100 allow 192.168.0.0/24 to apache on ports 80' => {
+ 'proto' => 'tcp',
+ 'action' => 'accept',
+ 'dport' => 80,
+ 'source' => '192.168.0.0/24'
+ }
+ }
+
+$created_resource_hash could then be used with create_resources
+
+ create_resources(firewall, $created_resource_hash)
+
+To create a bunch of resources in a way that would only otherwise be possible
+with a loop of some description.
+ EOS
+ ) do |arguments|
+
+ raise Puppet::ParseError, "create_resources_hash_from(): Wrong number of arguments " +
+ "given (#{arguments.size} for 3 or 4)" if arguments.size < 3 or arguments.size > 4
+
+ formatted_string = arguments[0]
+
+ unless formatted_string.is_a?(String)
+ raise(Puppet::ParseError, 'create_resources_hash_from(): first argument must be a string')
+ end
+
+ loop_array = arguments[1]
+
+ unless loop_array.is_a?(Array)
+ raise(Puppet::ParseError, 'create_resources_hash_from(): second argument must be an array')
+ end
+
+ resource_hash = arguments[2]
+ unless resource_hash.is_a?(Hash)
+ raise(Puppet::ParseError, 'create_resources_hash_from(): third argument must be a hash')
+ end
+
+ if arguments.size == 4
+ dynamic_parameters = arguments[3]
+ unless dynamic_parameters.is_a?(Hash)
+ raise(Puppet::ParseError, 'create_resources_hash_from(): fourth argument must be a hash')
+ end
+ end
+
+ result = {}
+
+ loop_array.each do |i|
+ my_resource_hash = resource_hash.clone
+ if dynamic_parameters
+ dynamic_parameters.each do |param, value|
+ if my_resource_hash.member?(param)
+ raise(Puppet::ParseError, "create_resources_hash_from(): dynamic_parameter '#{param}' already exists in resource hash")
+ end
+ my_resource_hash[param] = sprintf(value,[i])
+ end
+ end
+ result[sprintf(formatted_string,[i])] = my_resource_hash
+ end
+
+ result
+ end
+end
+
+# vim: set ts=2 sw=2 et :
+# encoding: utf-8
diff --git a/puppet/lib/puppet/parser/functions/sorted_json.rb b/puppet/lib/puppet/parser/functions/sorted_json.rb
new file mode 100644
index 00000000..605da00e
--- /dev/null
+++ b/puppet/lib/puppet/parser/functions/sorted_json.rb
@@ -0,0 +1,47 @@
+#
+# Written by Gavin Mogan, from https://gist.github.com/halkeye/2287885
+# Put in the public domain by the author.
+#
+
+require 'json'
+
+def sorted_json(obj)
+ case obj
+ when String, Fixnum, Float, TrueClass, FalseClass, NilClass
+ return obj.to_json
+ when Array
+ arrayRet = []
+ obj.each do |a|
+ arrayRet.push(sorted_json(a))
+ end
+ return "[" << arrayRet.join(',') << "]";
+ when Hash
+ ret = []
+ obj.keys.sort.each do |k|
+ ret.push(k.to_json << ":" << sorted_json(obj[k]))
+ end
+ return "{" << ret.join(",") << "}";
+ else
+ raise Exception("Unable to handle object of type <%s>" % obj.class.to_s)
+ end
+end
+
+module Puppet::Parser::Functions
+ newfunction(:sorted_json, :type => :rvalue, :doc => <<-EOS
+This function takes data, outputs making sure the hash keys are sorted
+
+*Examples:*
+
+ sorted_json({'key'=>'value'})
+
+Would return: {'key':'value'}
+ EOS
+ ) do |arguments|
+ raise(Puppet::ParseError, "sorted_json(): Wrong number of arguments " +
+ "given (#{arguments.size} for 1)") if arguments.size != 1
+
+ json = arguments[0]
+ return sorted_json(json)
+ end
+end
+
diff --git a/puppet/lib/puppet/parser/functions/sorted_yaml.rb b/puppet/lib/puppet/parser/functions/sorted_yaml.rb
new file mode 100644
index 00000000..46cd46ce
--- /dev/null
+++ b/puppet/lib/puppet/parser/functions/sorted_yaml.rb
@@ -0,0 +1,400 @@
+# encoding: UTF-8
+#
+# provides sorted_yaml() function, using Ya2YAML.
+# see https://github.com/afunai/ya2yaml
+#
+
+class Ya2YAML
+ #
+ # Author:: Akira FUNAI
+ # Copyright:: Copyright (c) 2006-2010 Akira FUNAI
+ # License:: MIT License
+ #
+
+ def initialize(opts = {})
+ options = opts.dup
+ options[:indent_size] = 2 if options[:indent_size].to_i <= 0
+ options[:minimum_block_length] = 0 if options[:minimum_block_length].to_i <= 0
+ options.update(
+ {
+ :printable_with_syck => true,
+ :escape_b_specific => true,
+ :escape_as_utf8 => true,
+ }
+ ) if options[:syck_compatible]
+
+ @options = options
+ end
+
+ def _ya2yaml(obj)
+ #raise 'set $KCODE to "UTF8".' if (RUBY_VERSION < '1.9.0') && ($KCODE != 'UTF8')
+ if (RUBY_VERSION < '1.9.0')
+ $KCODE = 'UTF8'
+ end
+ '--- ' + emit(obj, 1) + "\n"
+ rescue SystemStackError
+ raise ArgumentError, "ya2yaml can't handle circular references"
+ end
+
+ private
+
+ def emit(obj, level)
+ case obj
+ when Array
+ if (obj.length == 0)
+ '[]'
+ else
+ indent = "\n" + s_indent(level - 1)
+ ###
+ ### NOTE: a minor modification to normal Ya2YAML...
+ ### We want arrays to be output in sorted order, not just
+ ### Hashes.
+ ###
+ #obj.collect {|o|
+ # indent + '- ' + emit(o, level + 1)
+ #}.join('')
+ obj.sort {|a,b| a.to_s <=> b.to_s}.collect {|o|
+ indent + '- ' + emit(o, level + 1)
+ }.join('')
+ end
+ when Hash
+ if (obj.length == 0)
+ '{}'
+ else
+ indent = "\n" + s_indent(level - 1)
+ hash_order = @options[:hash_order]
+ if (hash_order && level == 1)
+ hash_keys = obj.keys.sort {|x, y|
+ x_order = hash_order.index(x) ? hash_order.index(x) : Float::MAX
+ y_order = hash_order.index(y) ? hash_order.index(y) : Float::MAX
+ o = (x_order <=> y_order)
+ (o != 0) ? o : (x.to_s <=> y.to_s)
+ }
+ elsif @options[:preserve_order]
+ hash_keys = obj.keys
+ else
+ hash_keys = obj.keys.sort {|x, y| x.to_s <=> y.to_s }
+ end
+ hash_keys.collect {|k|
+ key = emit(k, level + 1)
+ if (
+ is_one_plain_line?(key) ||
+ key =~ /\A(#{REX_BOOL}|#{REX_FLOAT}|#{REX_INT}|#{REX_NULL})\z/x
+ )
+ indent + key + ': ' + emit(obj[k], level + 1)
+ else
+ indent + '? ' + key +
+ indent + ': ' + emit(obj[k], level + 1)
+ end
+ }.join('')
+ end
+ when NilClass
+ '~'
+ when String
+ emit_string(obj, level)
+ when TrueClass, FalseClass
+ obj.to_s
+ when Fixnum, Bignum, Float
+ obj.to_s
+ when Date
+ obj.to_s
+ when Time
+ offset = obj.gmtoff
+ off_hm = sprintf(
+ '%+.2d:%.2d',
+ (offset / 3600.0).to_i,
+ (offset % 3600.0) / 60
+ )
+ u_sec = (obj.usec != 0) ? sprintf(".%.6d", obj.usec) : ''
+ obj.strftime("%Y-%m-%d %H:%M:%S#{u_sec} #{off_hm}")
+ when Symbol
+ '!ruby/symbol ' + emit_string(obj.to_s, level)
+ when Range
+ '!ruby/range ' + obj.to_s
+ when Regexp
+ '!ruby/regexp ' + obj.inspect
+ else
+ case
+ when obj.is_a?(Struct)
+ struct_members = {}
+ obj.each_pair{|k, v| struct_members[k.to_s] = v }
+ '!ruby/struct:' + obj.class.to_s.sub(/^(Struct::(.+)|.*)$/, '\2') + ' ' +
+ emit(struct_members, level + 1)
+ else
+ # serialized as a generic object
+ object_members = {}
+ obj.instance_variables.each{|k, v|
+ object_members[k.to_s.sub(/^@/, '')] = obj.instance_variable_get(k)
+ }
+ '!ruby/object:' + obj.class.to_s + ' ' +
+ emit(object_members, level + 1)
+ end
+ end
+ end
+
+ def emit_string(str, level)
+ (is_string, is_printable, is_one_line, is_one_plain_line) = string_type(str)
+ if is_string
+ if is_printable
+ if is_one_plain_line
+ emit_simple_string(str, level)
+ else
+ (is_one_line || str.length < @options[:minimum_block_length]) ?
+ emit_quoted_string(str, level) :
+ emit_block_string(str, level)
+ end
+ else
+ emit_quoted_string(str, level)
+ end
+ else
+ emit_base64_binary(str, level)
+ end
+ end
+
+ def emit_simple_string(str, level)
+ str
+ end
+
+ def emit_block_string(str, level)
+ str = normalize_line_break(str)
+
+ indent = s_indent(level)
+ indentation_indicator = (str =~ /\A /) ? indent.size.to_s : ''
+ str =~ /(#{REX_NORMAL_LB}*)\z/
+ chomping_indicator = case $1.length
+ when 0
+ '-'
+ when 1
+ ''
+ else
+ '+'
+ end
+
+ str.chomp!
+ str.gsub!(/#{REX_NORMAL_LB}/) {
+ $1 + indent
+ }
+ '|' + indentation_indicator + chomping_indicator + "\n" + indent + str
+ end
+
+ def emit_quoted_string(str, level)
+ str = yaml_escape(normalize_line_break(str))
+ if (str.length < @options[:minimum_block_length])
+ str.gsub!(/#{REX_NORMAL_LB}/) { ESCAPE_SEQ_LB[$1] }
+ else
+ str.gsub!(/#{REX_NORMAL_LB}$/) { ESCAPE_SEQ_LB[$1] }
+ str.gsub!(/(#{REX_NORMAL_LB}+)(.)/) {
+ trail_c = $3
+ $1 + trail_c.sub(/([\t ])/) { ESCAPE_SEQ_WS[$1] }
+ }
+ indent = s_indent(level)
+ str.gsub!(/#{REX_NORMAL_LB}/) {
+ ESCAPE_SEQ_LB[$1] + "\\\n" + indent
+ }
+ end
+ '"' + str + '"'
+ end
+
+ def emit_base64_binary(str, level)
+ indent = "\n" + s_indent(level)
+ base64 = [str].pack('m')
+ '!binary |' + indent + base64.gsub(/\n(?!\z)/, indent)
+ end
+
+ def string_type(str)
+ if str.respond_to?(:encoding) && (!str.valid_encoding? || str.encoding == Encoding::ASCII_8BIT)
+ return false, false, false, false
+ end
+ (ucs_codes = str.unpack('U*')) rescue (
+ # ArgumentError -> binary data
+ return false, false, false, false
+ )
+ if (
+ @options[:printable_with_syck] &&
+ str =~ /\A#{REX_ANY_LB}* | #{REX_ANY_LB}*\z|#{REX_ANY_LB}{2}\z/
+ )
+ # detour Syck bug
+ return true, false, nil, false
+ end
+ ucs_codes.each {|ucs_code|
+ return true, false, nil, false unless is_printable?(ucs_code)
+ }
+ return true, true, is_one_line?(str), is_one_plain_line?(str)
+ end
+
+ def is_printable?(ucs_code)
+ # YAML 1.1 / 4.1.1.
+ (
+ [0x09, 0x0a, 0x0d, 0x85].include?(ucs_code) ||
+ (ucs_code <= 0x7e && ucs_code >= 0x20) ||
+ (ucs_code <= 0xd7ff && ucs_code >= 0xa0) ||
+ (ucs_code <= 0xfffd && ucs_code >= 0xe000) ||
+ (ucs_code <= 0x10ffff && ucs_code >= 0x10000)
+ ) &&
+ !(
+ # treat LS/PS as non-printable characters
+ @options[:escape_b_specific] &&
+ (ucs_code == 0x2028 || ucs_code == 0x2029)
+ )
+ end
+
+ def is_one_line?(str)
+ str !~ /#{REX_ANY_LB}(?!\z)/
+ end
+
+ def is_one_plain_line?(str)
+ # YAML 1.1 / 4.6.11.
+ str !~ /^([\-\?:,\[\]\{\}\#&\*!\|>'"%@`\s]|---|\.\.\.)/ &&
+ str !~ /[:\#\s\[\]\{\},]/ &&
+ str !~ /#{REX_ANY_LB}/ &&
+ str !~ /^(#{REX_BOOL}|#{REX_FLOAT}|#{REX_INT}|#{REX_MERGE}
+ |#{REX_NULL}|#{REX_TIMESTAMP}|#{REX_VALUE})$/x
+ end
+
+ def s_indent(level)
+ # YAML 1.1 / 4.2.2.
+ ' ' * (level * @options[:indent_size])
+ end
+
+ def normalize_line_break(str)
+ # YAML 1.1 / 4.1.4.
+ str.gsub(/(#{REX_CRLF}|#{REX_CR}|#{REX_NEL})/, "\n")
+ end
+
+ def yaml_escape(str)
+ # YAML 1.1 / 4.1.6.
+ str.gsub(/[^a-zA-Z0-9]/u) {|c|
+ ucs_code, = (c.unpack('U') rescue [??])
+ case
+ when ESCAPE_SEQ[c]
+ ESCAPE_SEQ[c]
+ when is_printable?(ucs_code)
+ c
+ when @options[:escape_as_utf8]
+ c.respond_to?(:bytes) ?
+ c.bytes.collect {|b| '\\x%.2x' % b }.join :
+ '\\x' + c.unpack('H2' * c.size).join('\\x')
+ when ucs_code == 0x2028 || ucs_code == 0x2029
+ ESCAPE_SEQ_LB[c]
+ when ucs_code <= 0x7f
+ sprintf('\\x%.2x', ucs_code)
+ when ucs_code <= 0xffff
+ sprintf('\\u%.4x', ucs_code)
+ else
+ sprintf('\\U%.8x', ucs_code)
+ end
+ }
+ end
+
+ module Constants
+ UCS_0X85 = [0x85].pack('U') # c285@UTF8 Unicode next line
+ UCS_0XA0 = [0xa0].pack('U') # c2a0@UTF8 Unicode non-breaking space
+ UCS_0X2028 = [0x2028].pack('U') # e280a8@UTF8 Unicode line separator
+ UCS_0X2029 = [0x2029].pack('U') # e280a9@UTF8 Unicode paragraph separator
+
+ # non-break characters
+ ESCAPE_SEQ = {
+ "\x00" => '\\0',
+ "\x07" => '\\a',
+ "\x08" => '\\b',
+ "\x0b" => '\\v',
+ "\x0c" => '\\f',
+ "\x1b" => '\\e',
+ "\"" => '\\"',
+ "\\" => '\\\\',
+ }
+
+ # non-breaking space
+ ESCAPE_SEQ_NS = {
+ UCS_0XA0 => '\\_',
+ }
+
+ # white spaces
+ ESCAPE_SEQ_WS = {
+ "\x09" => '\\t',
+ " " => '\\x20',
+ }
+
+ # line breaks
+ ESCAPE_SEQ_LB ={
+ "\x0a" => '\\n',
+ "\x0d" => '\\r',
+ UCS_0X85 => '\\N',
+ UCS_0X2028 => '\\L',
+ UCS_0X2029 => '\\P',
+ }
+
+ # regexps for line breaks
+ REX_LF = Regexp.escape("\x0a")
+ REX_CR = Regexp.escape("\x0d")
+ REX_CRLF = Regexp.escape("\x0d\x0a")
+ REX_NEL = Regexp.escape(UCS_0X85)
+ REX_LS = Regexp.escape(UCS_0X2028)
+ REX_PS = Regexp.escape(UCS_0X2029)
+
+ REX_ANY_LB = /(#{REX_LF}|#{REX_CR}|#{REX_NEL}|#{REX_LS}|#{REX_PS})/
+ REX_NORMAL_LB = /(#{REX_LF}|#{REX_LS}|#{REX_PS})/
+
+ # regexps for language-Independent types for YAML1.1
+ REX_BOOL = /
+ y|Y|yes|Yes|YES|n|N|no|No|NO
+ |true|True|TRUE|false|False|FALSE
+ |on|On|ON|off|Off|OFF
+ /x
+ REX_FLOAT = /
+ [-+]?([0-9][0-9_]*)?\.[0-9.]*([eE][-+][0-9]+)? # (base 10)
+ |[-+]?[0-9][0-9_]*(:[0-5]?[0-9])+\.[0-9_]* # (base 60)
+ |[-+]?\.(inf|Inf|INF) # (infinity)
+ |\.(nan|NaN|NAN) # (not a number)
+ /x
+ REX_INT = /
+ [-+]?0b[0-1_]+ # (base 2)
+ |[-+]?0[0-7_]+ # (base 8)
+ |[-+]?(0|[1-9][0-9_]*) # (base 10)
+ |[-+]?0x[0-9a-fA-F_]+ # (base 16)
+ |[-+]?[1-9][0-9_]*(:[0-5]?[0-9])+ # (base 60)
+ /x
+ REX_MERGE = /
+ <<
+ /x
+ REX_NULL = /
+ ~ # (canonical)
+ |null|Null|NULL # (English)
+ | # (Empty)
+ /x
+ REX_TIMESTAMP = /
+ [0-9][0-9][0-9][0-9]-[0-9][0-9]-[0-9][0-9] # (ymd)
+ |[0-9][0-9][0-9][0-9] # (year)
+ -[0-9][0-9]? # (month)
+ -[0-9][0-9]? # (day)
+ ([Tt]|[ \t]+)[0-9][0-9]? # (hour)
+ :[0-9][0-9] # (minute)
+ :[0-9][0-9] # (second)
+ (\.[0-9]*)? # (fraction)
+ (([ \t]*)Z|[-+][0-9][0-9]?(:[0-9][0-9])?)? # (time zone)
+ /x
+ REX_VALUE = /
+ =
+ /x
+ end
+
+ include Constants
+end
+
+module Puppet::Parser::Functions
+ newfunction(:sorted_yaml,
+ :type => :rvalue,
+ :doc => "This function outputs yaml, but ensures the keys are sorted."
+ ) do |arguments|
+
+ if arguments.is_a?(Array)
+ if arguments.size != 1
+ raise(Puppet::ParseError, "sorted_yaml(): Wrong number of arguments given (#{arguments.size} for 1)")
+ end
+ yaml = arguments.first
+ else
+ yaml = arguments
+ end
+ return Ya2YAML.new()._ya2yaml(yaml)
+ end
+end
diff --git a/puppet/manifests/site.pp b/puppet/manifests/site.pp
new file mode 100644
index 00000000..ecda4012
--- /dev/null
+++ b/puppet/manifests/site.pp
@@ -0,0 +1,60 @@
+# set a default exec path
+# the logoutput exec parameter defaults to "on_error" in puppet 3,
+# but to "false" in puppet 2.7, so we need to set this globally here
+Exec {
+ logoutput => on_failure,
+ path => '/usr/bin:/usr/sbin/:/bin:/sbin:/usr/local/bin:/usr/local/sbin'
+}
+
+Package <| provider == 'apt' |> {
+ install_options => ['--no-install-recommends'],
+}
+
+$services = hiera('services', [])
+$services_str = join($services, ', ')
+notice("Services for ${fqdn}: ${services_str}")
+
+# In the default deployment case, we want to run an 'apt-get dist-upgrade'
+# to ensure the latest packages are installed. This is done by including the
+# class 'site_config::slow' here. However, you only changed a small bit of
+# the platform and want to skip this slow part of deployment, you can do that
+# by using 'leap deploy --fast' which will only apply those resources that are
+# tagged with 'leap_base' or 'leap_service'.
+# See https://leap.se/en/docs/platform/details/under-the-hood#tags
+include site_config::slow
+
+if member($services, 'openvpn') {
+ include site_openvpn
+}
+
+if member($services, 'couchdb') {
+ include site_couchdb
+}
+
+if member($services, 'webapp') {
+ include site_webapp
+}
+
+if member($services, 'soledad') {
+ include soledad::server
+}
+
+if member($services, 'monitor') {
+ include site_nagios
+}
+
+if member($services, 'tor') {
+ include site_tor
+}
+
+if member($services, 'mx') {
+ include site_mx
+}
+
+if member($services, 'static') {
+ include site_static
+}
+
+if member($services, 'obfsproxy') {
+ include site_obfsproxy
+}
diff --git a/puppet/modules/apache/.gitignore b/puppet/modules/apache/.gitignore
new file mode 100644
index 00000000..cb918d8c
--- /dev/null
+++ b/puppet/modules/apache/.gitignore
@@ -0,0 +1,6 @@
+.tmp_*~
+.librarian
+.tmp
+spec/fixtures/modules
+spec/fixtures/manifests
+*.lock
diff --git a/puppet/modules/apache/.rspec b/puppet/modules/apache/.rspec
new file mode 100644
index 00000000..8c18f1ab
--- /dev/null
+++ b/puppet/modules/apache/.rspec
@@ -0,0 +1,2 @@
+--format documentation
+--color
diff --git a/puppet/modules/apache/Gemfile b/puppet/modules/apache/Gemfile
new file mode 100644
index 00000000..b1fc9814
--- /dev/null
+++ b/puppet/modules/apache/Gemfile
@@ -0,0 +1,13 @@
+source 'https://rubygems.org'
+
+if ENV.key?('PUPPET_VERSION')
+ puppetversion = "~> #{ENV['PUPPET_VERSION']}"
+else
+ puppetversion = ['>= 3.3.1']
+end
+
+gem 'puppet', puppetversion
+gem 'puppet-lint', '>=0.3.2'
+gem 'puppetlabs_spec_helper', '>=0.2.0'
+gem 'rake', '>=0.9.2.2'
+gem 'librarian-puppet', '>=0.9.10'
diff --git a/puppet/modules/apache/LICENSE b/puppet/modules/apache/LICENSE
new file mode 100644
index 00000000..94a9ed02
--- /dev/null
+++ b/puppet/modules/apache/LICENSE
@@ -0,0 +1,674 @@
+ GNU GENERAL PUBLIC LICENSE
+ Version 3, 29 June 2007
+
+ Copyright (C) 2007 Free Software Foundation, Inc. <http://fsf.org/>
+ Everyone is permitted to copy and distribute verbatim copies
+ of this license document, but changing it is not allowed.
+
+ Preamble
+
+ The GNU General Public License is a free, copyleft license for
+software and other kinds of works.
+
+ The licenses for most software and other practical works are designed
+to take away your freedom to share and change the works. By contrast,
+the GNU General Public License is intended to guarantee your freedom to
+share and change all versions of a program--to make sure it remains free
+software for all its users. We, the Free Software Foundation, use the
+GNU General Public License for most of our software; it applies also to
+any other work released this way by its authors. You can apply it to
+your programs, too.
+
+ When we speak of free software, we are referring to freedom, not
+price. Our General Public Licenses are designed to make sure that you
+have the freedom to distribute copies of free software (and charge for
+them if you wish), that you receive source code or can get it if you
+want it, that you can change the software or use pieces of it in new
+free programs, and that you know you can do these things.
+
+ To protect your rights, we need to prevent others from denying you
+these rights or asking you to surrender the rights. Therefore, you have
+certain responsibilities if you distribute copies of the software, or if
+you modify it: responsibilities to respect the freedom of others.
+
+ For example, if you distribute copies of such a program, whether
+gratis or for a fee, you must pass on to the recipients the same
+freedoms that you received. You must make sure that they, too, receive
+or can get the source code. And you must show them these terms so they
+know their rights.
+
+ Developers that use the GNU GPL protect your rights with two steps:
+(1) assert copyright on the software, and (2) offer you this License
+giving you legal permission to copy, distribute and/or modify it.
+
+ For the developers' and authors' protection, the GPL clearly explains
+that there is no warranty for this free software. For both users' and
+authors' sake, the GPL requires that modified versions be marked as
+changed, so that their problems will not be attributed erroneously to
+authors of previous versions.
+
+ Some devices are designed to deny users access to install or run
+modified versions of the software inside them, although the manufacturer
+can do so. This is fundamentally incompatible with the aim of
+protecting users' freedom to change the software. The systematic
+pattern of such abuse occurs in the area of products for individuals to
+use, which is precisely where it is most unacceptable. Therefore, we
+have designed this version of the GPL to prohibit the practice for those
+products. If such problems arise substantially in other domains, we
+stand ready to extend this provision to those domains in future versions
+of the GPL, as needed to protect the freedom of users.
+
+ Finally, every program is threatened constantly by software patents.
+States should not allow patents to restrict development and use of
+software on general-purpose computers, but in those that do, we wish to
+avoid the special danger that patents applied to a free program could
+make it effectively proprietary. To prevent this, the GPL assures that
+patents cannot be used to render the program non-free.
+
+ The precise terms and conditions for copying, distribution and
+modification follow.
+
+ TERMS AND CONDITIONS
+
+ 0. Definitions.
+
+ "This License" refers to version 3 of the GNU General Public License.
+
+ "Copyright" also means copyright-like laws that apply to other kinds of
+works, such as semiconductor masks.
+
+ "The Program" refers to any copyrightable work licensed under this
+License. Each licensee is addressed as "you". "Licensees" and
+"recipients" may be individuals or organizations.
+
+ To "modify" a work means to copy from or adapt all or part of the work
+in a fashion requiring copyright permission, other than the making of an
+exact copy. The resulting work is called a "modified version" of the
+earlier work or a work "based on" the earlier work.
+
+ A "covered work" means either the unmodified Program or a work based
+on the Program.
+
+ To "propagate" a work means to do anything with it that, without
+permission, would make you directly or secondarily liable for
+infringement under applicable copyright law, except executing it on a
+computer or modifying a private copy. Propagation includes copying,
+distribution (with or without modification), making available to the
+public, and in some countries other activities as well.
+
+ To "convey" a work means any kind of propagation that enables other
+parties to make or receive copies. Mere interaction with a user through
+a computer network, with no transfer of a copy, is not conveying.
+
+ An interactive user interface displays "Appropriate Legal Notices"
+to the extent that it includes a convenient and prominently visible
+feature that (1) displays an appropriate copyright notice, and (2)
+tells the user that there is no warranty for the work (except to the
+extent that warranties are provided), that licensees may convey the
+work under this License, and how to view a copy of this License. If
+the interface presents a list of user commands or options, such as a
+menu, a prominent item in the list meets this criterion.
+
+ 1. Source Code.
+
+ The "source code" for a work means the preferred form of the work
+for making modifications to it. "Object code" means any non-source
+form of a work.
+
+ A "Standard Interface" means an interface that either is an official
+standard defined by a recognized standards body, or, in the case of
+interfaces specified for a particular programming language, one that
+is widely used among developers working in that language.
+
+ The "System Libraries" of an executable work include anything, other
+than the work as a whole, that (a) is included in the normal form of
+packaging a Major Component, but which is not part of that Major
+Component, and (b) serves only to enable use of the work with that
+Major Component, or to implement a Standard Interface for which an
+implementation is available to the public in source code form. A
+"Major Component", in this context, means a major essential component
+(kernel, window system, and so on) of the specific operating system
+(if any) on which the executable work runs, or a compiler used to
+produce the work, or an object code interpreter used to run it.
+
+ The "Corresponding Source" for a work in object code form means all
+the source code needed to generate, install, and (for an executable
+work) run the object code and to modify the work, including scripts to
+control those activities. However, it does not include the work's
+System Libraries, or general-purpose tools or generally available free
+programs which are used unmodified in performing those activities but
+which are not part of the work. For example, Corresponding Source
+includes interface definition files associated with source files for
+the work, and the source code for shared libraries and dynamically
+linked subprograms that the work is specifically designed to require,
+such as by intimate data communication or control flow between those
+subprograms and other parts of the work.
+
+ The Corresponding Source need not include anything that users
+can regenerate automatically from other parts of the Corresponding
+Source.
+
+ The Corresponding Source for a work in source code form is that
+same work.
+
+ 2. Basic Permissions.
+
+ All rights granted under this License are granted for the term of
+copyright on the Program, and are irrevocable provided the stated
+conditions are met. This License explicitly affirms your unlimited
+permission to run the unmodified Program. The output from running a
+covered work is covered by this License only if the output, given its
+content, constitutes a covered work. This License acknowledges your
+rights of fair use or other equivalent, as provided by copyright law.
+
+ You may make, run and propagate covered works that you do not
+convey, without conditions so long as your license otherwise remains
+in force. You may convey covered works to others for the sole purpose
+of having them make modifications exclusively for you, or provide you
+with facilities for running those works, provided that you comply with
+the terms of this License in conveying all material for which you do
+not control copyright. Those thus making or running the covered works
+for you must do so exclusively on your behalf, under your direction
+and control, on terms that prohibit them from making any copies of
+your copyrighted material outside their relationship with you.
+
+ Conveying under any other circumstances is permitted solely under
+the conditions stated below. Sublicensing is not allowed; section 10
+makes it unnecessary.
+
+ 3. Protecting Users' Legal Rights From Anti-Circumvention Law.
+
+ No covered work shall be deemed part of an effective technological
+measure under any applicable law fulfilling obligations under article
+11 of the WIPO copyright treaty adopted on 20 December 1996, or
+similar laws prohibiting or restricting circumvention of such
+measures.
+
+ When you convey a covered work, you waive any legal power to forbid
+circumvention of technological measures to the extent such circumvention
+is effected by exercising rights under this License with respect to
+the covered work, and you disclaim any intention to limit operation or
+modification of the work as a means of enforcing, against the work's
+users, your or third parties' legal rights to forbid circumvention of
+technological measures.
+
+ 4. Conveying Verbatim Copies.
+
+ You may convey verbatim copies of the Program's source code as you
+receive it, in any medium, provided that you conspicuously and
+appropriately publish on each copy an appropriate copyright notice;
+keep intact all notices stating that this License and any
+non-permissive terms added in accord with section 7 apply to the code;
+keep intact all notices of the absence of any warranty; and give all
+recipients a copy of this License along with the Program.
+
+ You may charge any price or no price for each copy that you convey,
+and you may offer support or warranty protection for a fee.
+
+ 5. Conveying Modified Source Versions.
+
+ You may convey a work based on the Program, or the modifications to
+produce it from the Program, in the form of source code under the
+terms of section 4, provided that you also meet all of these conditions:
+
+ a) The work must carry prominent notices stating that you modified
+ it, and giving a relevant date.
+
+ b) The work must carry prominent notices stating that it is
+ released under this License and any conditions added under section
+ 7. This requirement modifies the requirement in section 4 to
+ "keep intact all notices".
+
+ c) You must license the entire work, as a whole, under this
+ License to anyone who comes into possession of a copy. This
+ License will therefore apply, along with any applicable section 7
+ additional terms, to the whole of the work, and all its parts,
+ regardless of how they are packaged. This License gives no
+ permission to license the work in any other way, but it does not
+ invalidate such permission if you have separately received it.
+
+ d) If the work has interactive user interfaces, each must display
+ Appropriate Legal Notices; however, if the Program has interactive
+ interfaces that do not display Appropriate Legal Notices, your
+ work need not make them do so.
+
+ A compilation of a covered work with other separate and independent
+works, which are not by their nature extensions of the covered work,
+and which are not combined with it such as to form a larger program,
+in or on a volume of a storage or distribution medium, is called an
+"aggregate" if the compilation and its resulting copyright are not
+used to limit the access or legal rights of the compilation's users
+beyond what the individual works permit. Inclusion of a covered work
+in an aggregate does not cause this License to apply to the other
+parts of the aggregate.
+
+ 6. Conveying Non-Source Forms.
+
+ You may convey a covered work in object code form under the terms
+of sections 4 and 5, provided that you also convey the
+machine-readable Corresponding Source under the terms of this License,
+in one of these ways:
+
+ a) Convey the object code in, or embodied in, a physical product
+ (including a physical distribution medium), accompanied by the
+ Corresponding Source fixed on a durable physical medium
+ customarily used for software interchange.
+
+ b) Convey the object code in, or embodied in, a physical product
+ (including a physical distribution medium), accompanied by a
+ written offer, valid for at least three years and valid for as
+ long as you offer spare parts or customer support for that product
+ model, to give anyone who possesses the object code either (1) a
+ copy of the Corresponding Source for all the software in the
+ product that is covered by this License, on a durable physical
+ medium customarily used for software interchange, for a price no
+ more than your reasonable cost of physically performing this
+ conveying of source, or (2) access to copy the
+ Corresponding Source from a network server at no charge.
+
+ c) Convey individual copies of the object code with a copy of the
+ written offer to provide the Corresponding Source. This
+ alternative is allowed only occasionally and noncommercially, and
+ only if you received the object code with such an offer, in accord
+ with subsection 6b.
+
+ d) Convey the object code by offering access from a designated
+ place (gratis or for a charge), and offer equivalent access to the
+ Corresponding Source in the same way through the same place at no
+ further charge. You need not require recipients to copy the
+ Corresponding Source along with the object code. If the place to
+ copy the object code is a network server, the Corresponding Source
+ may be on a different server (operated by you or a third party)
+ that supports equivalent copying facilities, provided you maintain
+ clear directions next to the object code saying where to find the
+ Corresponding Source. Regardless of what server hosts the
+ Corresponding Source, you remain obligated to ensure that it is
+ available for as long as needed to satisfy these requirements.
+
+ e) Convey the object code using peer-to-peer transmission, provided
+ you inform other peers where the object code and Corresponding
+ Source of the work are being offered to the general public at no
+ charge under subsection 6d.
+
+ A separable portion of the object code, whose source code is excluded
+from the Corresponding Source as a System Library, need not be
+included in conveying the object code work.
+
+ A "User Product" is either (1) a "consumer product", which means any
+tangible personal property which is normally used for personal, family,
+or household purposes, or (2) anything designed or sold for incorporation
+into a dwelling. In determining whether a product is a consumer product,
+doubtful cases shall be resolved in favor of coverage. For a particular
+product received by a particular user, "normally used" refers to a
+typical or common use of that class of product, regardless of the status
+of the particular user or of the way in which the particular user
+actually uses, or expects or is expected to use, the product. A product
+is a consumer product regardless of whether the product has substantial
+commercial, industrial or non-consumer uses, unless such uses represent
+the only significant mode of use of the product.
+
+ "Installation Information" for a User Product means any methods,
+procedures, authorization keys, or other information required to install
+and execute modified versions of a covered work in that User Product from
+a modified version of its Corresponding Source. The information must
+suffice to ensure that the continued functioning of the modified object
+code is in no case prevented or interfered with solely because
+modification has been made.
+
+ If you convey an object code work under this section in, or with, or
+specifically for use in, a User Product, and the conveying occurs as
+part of a transaction in which the right of possession and use of the
+User Product is transferred to the recipient in perpetuity or for a
+fixed term (regardless of how the transaction is characterized), the
+Corresponding Source conveyed under this section must be accompanied
+by the Installation Information. But this requirement does not apply
+if neither you nor any third party retains the ability to install
+modified object code on the User Product (for example, the work has
+been installed in ROM).
+
+ The requirement to provide Installation Information does not include a
+requirement to continue to provide support service, warranty, or updates
+for a work that has been modified or installed by the recipient, or for
+the User Product in which it has been modified or installed. Access to a
+network may be denied when the modification itself materially and
+adversely affects the operation of the network or violates the rules and
+protocols for communication across the network.
+
+ Corresponding Source conveyed, and Installation Information provided,
+in accord with this section must be in a format that is publicly
+documented (and with an implementation available to the public in
+source code form), and must require no special password or key for
+unpacking, reading or copying.
+
+ 7. Additional Terms.
+
+ "Additional permissions" are terms that supplement the terms of this
+License by making exceptions from one or more of its conditions.
+Additional permissions that are applicable to the entire Program shall
+be treated as though they were included in this License, to the extent
+that they are valid under applicable law. If additional permissions
+apply only to part of the Program, that part may be used separately
+under those permissions, but the entire Program remains governed by
+this License without regard to the additional permissions.
+
+ When you convey a copy of a covered work, you may at your option
+remove any additional permissions from that copy, or from any part of
+it. (Additional permissions may be written to require their own
+removal in certain cases when you modify the work.) You may place
+additional permissions on material, added by you to a covered work,
+for which you have or can give appropriate copyright permission.
+
+ Notwithstanding any other provision of this License, for material you
+add to a covered work, you may (if authorized by the copyright holders of
+that material) supplement the terms of this License with terms:
+
+ a) Disclaiming warranty or limiting liability differently from the
+ terms of sections 15 and 16 of this License; or
+
+ b) Requiring preservation of specified reasonable legal notices or
+ author attributions in that material or in the Appropriate Legal
+ Notices displayed by works containing it; or
+
+ c) Prohibiting misrepresentation of the origin of that material, or
+ requiring that modified versions of such material be marked in
+ reasonable ways as different from the original version; or
+
+ d) Limiting the use for publicity purposes of names of licensors or
+ authors of the material; or
+
+ e) Declining to grant rights under trademark law for use of some
+ trade names, trademarks, or service marks; or
+
+ f) Requiring indemnification of licensors and authors of that
+ material by anyone who conveys the material (or modified versions of
+ it) with contractual assumptions of liability to the recipient, for
+ any liability that these contractual assumptions directly impose on
+ those licensors and authors.
+
+ All other non-permissive additional terms are considered "further
+restrictions" within the meaning of section 10. If the Program as you
+received it, or any part of it, contains a notice stating that it is
+governed by this License along with a term that is a further
+restriction, you may remove that term. If a license document contains
+a further restriction but permits relicensing or conveying under this
+License, you may add to a covered work material governed by the terms
+of that license document, provided that the further restriction does
+not survive such relicensing or conveying.
+
+ If you add terms to a covered work in accord with this section, you
+must place, in the relevant source files, a statement of the
+additional terms that apply to those files, or a notice indicating
+where to find the applicable terms.
+
+ Additional terms, permissive or non-permissive, may be stated in the
+form of a separately written license, or stated as exceptions;
+the above requirements apply either way.
+
+ 8. Termination.
+
+ You may not propagate or modify a covered work except as expressly
+provided under this License. Any attempt otherwise to propagate or
+modify it is void, and will automatically terminate your rights under
+this License (including any patent licenses granted under the third
+paragraph of section 11).
+
+ However, if you cease all violation of this License, then your
+license from a particular copyright holder is reinstated (a)
+provisionally, unless and until the copyright holder explicitly and
+finally terminates your license, and (b) permanently, if the copyright
+holder fails to notify you of the violation by some reasonable means
+prior to 60 days after the cessation.
+
+ Moreover, your license from a particular copyright holder is
+reinstated permanently if the copyright holder notifies you of the
+violation by some reasonable means, this is the first time you have
+received notice of violation of this License (for any work) from that
+copyright holder, and you cure the violation prior to 30 days after
+your receipt of the notice.
+
+ Termination of your rights under this section does not terminate the
+licenses of parties who have received copies or rights from you under
+this License. If your rights have been terminated and not permanently
+reinstated, you do not qualify to receive new licenses for the same
+material under section 10.
+
+ 9. Acceptance Not Required for Having Copies.
+
+ You are not required to accept this License in order to receive or
+run a copy of the Program. Ancillary propagation of a covered work
+occurring solely as a consequence of using peer-to-peer transmission
+to receive a copy likewise does not require acceptance. However,
+nothing other than this License grants you permission to propagate or
+modify any covered work. These actions infringe copyright if you do
+not accept this License. Therefore, by modifying or propagating a
+covered work, you indicate your acceptance of this License to do so.
+
+ 10. Automatic Licensing of Downstream Recipients.
+
+ Each time you convey a covered work, the recipient automatically
+receives a license from the original licensors, to run, modify and
+propagate that work, subject to this License. You are not responsible
+for enforcing compliance by third parties with this License.
+
+ An "entity transaction" is a transaction transferring control of an
+organization, or substantially all assets of one, or subdividing an
+organization, or merging organizations. If propagation of a covered
+work results from an entity transaction, each party to that
+transaction who receives a copy of the work also receives whatever
+licenses to the work the party's predecessor in interest had or could
+give under the previous paragraph, plus a right to possession of the
+Corresponding Source of the work from the predecessor in interest, if
+the predecessor has it or can get it with reasonable efforts.
+
+ You may not impose any further restrictions on the exercise of the
+rights granted or affirmed under this License. For example, you may
+not impose a license fee, royalty, or other charge for exercise of
+rights granted under this License, and you may not initiate litigation
+(including a cross-claim or counterclaim in a lawsuit) alleging that
+any patent claim is infringed by making, using, selling, offering for
+sale, or importing the Program or any portion of it.
+
+ 11. Patents.
+
+ A "contributor" is a copyright holder who authorizes use under this
+License of the Program or a work on which the Program is based. The
+work thus licensed is called the contributor's "contributor version".
+
+ A contributor's "essential patent claims" are all patent claims
+owned or controlled by the contributor, whether already acquired or
+hereafter acquired, that would be infringed by some manner, permitted
+by this License, of making, using, or selling its contributor version,
+but do not include claims that would be infringed only as a
+consequence of further modification of the contributor version. For
+purposes of this definition, "control" includes the right to grant
+patent sublicenses in a manner consistent with the requirements of
+this License.
+
+ Each contributor grants you a non-exclusive, worldwide, royalty-free
+patent license under the contributor's essential patent claims, to
+make, use, sell, offer for sale, import and otherwise run, modify and
+propagate the contents of its contributor version.
+
+ In the following three paragraphs, a "patent license" is any express
+agreement or commitment, however denominated, not to enforce a patent
+(such as an express permission to practice a patent or covenant not to
+sue for patent infringement). To "grant" such a patent license to a
+party means to make such an agreement or commitment not to enforce a
+patent against the party.
+
+ If you convey a covered work, knowingly relying on a patent license,
+and the Corresponding Source of the work is not available for anyone
+to copy, free of charge and under the terms of this License, through a
+publicly available network server or other readily accessible means,
+then you must either (1) cause the Corresponding Source to be so
+available, or (2) arrange to deprive yourself of the benefit of the
+patent license for this particular work, or (3) arrange, in a manner
+consistent with the requirements of this License, to extend the patent
+license to downstream recipients. "Knowingly relying" means you have
+actual knowledge that, but for the patent license, your conveying the
+covered work in a country, or your recipient's use of the covered work
+in a country, would infringe one or more identifiable patents in that
+country that you have reason to believe are valid.
+
+ If, pursuant to or in connection with a single transaction or
+arrangement, you convey, or propagate by procuring conveyance of, a
+covered work, and grant a patent license to some of the parties
+receiving the covered work authorizing them to use, propagate, modify
+or convey a specific copy of the covered work, then the patent license
+you grant is automatically extended to all recipients of the covered
+work and works based on it.
+
+ A patent license is "discriminatory" if it does not include within
+the scope of its coverage, prohibits the exercise of, or is
+conditioned on the non-exercise of one or more of the rights that are
+specifically granted under this License. You may not convey a covered
+work if you are a party to an arrangement with a third party that is
+in the business of distributing software, under which you make payment
+to the third party based on the extent of your activity of conveying
+the work, and under which the third party grants, to any of the
+parties who would receive the covered work from you, a discriminatory
+patent license (a) in connection with copies of the covered work
+conveyed by you (or copies made from those copies), or (b) primarily
+for and in connection with specific products or compilations that
+contain the covered work, unless you entered into that arrangement,
+or that patent license was granted, prior to 28 March 2007.
+
+ Nothing in this License shall be construed as excluding or limiting
+any implied license or other defenses to infringement that may
+otherwise be available to you under applicable patent law.
+
+ 12. No Surrender of Others' Freedom.
+
+ If conditions are imposed on you (whether by court order, agreement or
+otherwise) that contradict the conditions of this License, they do not
+excuse you from the conditions of this License. If you cannot convey a
+covered work so as to satisfy simultaneously your obligations under this
+License and any other pertinent obligations, then as a consequence you may
+not convey it at all. For example, if you agree to terms that obligate you
+to collect a royalty for further conveying from those to whom you convey
+the Program, the only way you could satisfy both those terms and this
+License would be to refrain entirely from conveying the Program.
+
+ 13. Use with the GNU Affero General Public License.
+
+ Notwithstanding any other provision of this License, you have
+permission to link or combine any covered work with a work licensed
+under version 3 of the GNU Affero General Public License into a single
+combined work, and to convey the resulting work. The terms of this
+License will continue to apply to the part which is the covered work,
+but the special requirements of the GNU Affero General Public License,
+section 13, concerning interaction through a network will apply to the
+combination as such.
+
+ 14. Revised Versions of this License.
+
+ The Free Software Foundation may publish revised and/or new versions of
+the GNU General Public License from time to time. Such new versions will
+be similar in spirit to the present version, but may differ in detail to
+address new problems or concerns.
+
+ Each version is given a distinguishing version number. If the
+Program specifies that a certain numbered version of the GNU General
+Public License "or any later version" applies to it, you have the
+option of following the terms and conditions either of that numbered
+version or of any later version published by the Free Software
+Foundation. If the Program does not specify a version number of the
+GNU General Public License, you may choose any version ever published
+by the Free Software Foundation.
+
+ If the Program specifies that a proxy can decide which future
+versions of the GNU General Public License can be used, that proxy's
+public statement of acceptance of a version permanently authorizes you
+to choose that version for the Program.
+
+ Later license versions may give you additional or different
+permissions. However, no additional obligations are imposed on any
+author or copyright holder as a result of your choosing to follow a
+later version.
+
+ 15. Disclaimer of Warranty.
+
+ THERE IS NO WARRANTY FOR THE PROGRAM, TO THE EXTENT PERMITTED BY
+APPLICABLE LAW. EXCEPT WHEN OTHERWISE STATED IN WRITING THE COPYRIGHT
+HOLDERS AND/OR OTHER PARTIES PROVIDE THE PROGRAM "AS IS" WITHOUT WARRANTY
+OF ANY KIND, EITHER EXPRESSED OR IMPLIED, INCLUDING, BUT NOT LIMITED TO,
+THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR
+PURPOSE. THE ENTIRE RISK AS TO THE QUALITY AND PERFORMANCE OF THE PROGRAM
+IS WITH YOU. SHOULD THE PROGRAM PROVE DEFECTIVE, YOU ASSUME THE COST OF
+ALL NECESSARY SERVICING, REPAIR OR CORRECTION.
+
+ 16. Limitation of Liability.
+
+ IN NO EVENT UNLESS REQUIRED BY APPLICABLE LAW OR AGREED TO IN WRITING
+WILL ANY COPYRIGHT HOLDER, OR ANY OTHER PARTY WHO MODIFIES AND/OR CONVEYS
+THE PROGRAM AS PERMITTED ABOVE, BE LIABLE TO YOU FOR DAMAGES, INCLUDING ANY
+GENERAL, SPECIAL, INCIDENTAL OR CONSEQUENTIAL DAMAGES ARISING OUT OF THE
+USE OR INABILITY TO USE THE PROGRAM (INCLUDING BUT NOT LIMITED TO LOSS OF
+DATA OR DATA BEING RENDERED INACCURATE OR LOSSES SUSTAINED BY YOU OR THIRD
+PARTIES OR A FAILURE OF THE PROGRAM TO OPERATE WITH ANY OTHER PROGRAMS),
+EVEN IF SUCH HOLDER OR OTHER PARTY HAS BEEN ADVISED OF THE POSSIBILITY OF
+SUCH DAMAGES.
+
+ 17. Interpretation of Sections 15 and 16.
+
+ If the disclaimer of warranty and limitation of liability provided
+above cannot be given local legal effect according to their terms,
+reviewing courts shall apply local law that most closely approximates
+an absolute waiver of all civil liability in connection with the
+Program, unless a warranty or assumption of liability accompanies a
+copy of the Program in return for a fee.
+
+ END OF TERMS AND CONDITIONS
+
+ How to Apply These Terms to Your New Programs
+
+ If you develop a new program, and you want it to be of the greatest
+possible use to the public, the best way to achieve this is to make it
+free software which everyone can redistribute and change under these terms.
+
+ To do so, attach the following notices to the program. It is safest
+to attach them to the start of each source file to most effectively
+state the exclusion of warranty; and each file should have at least
+the "copyright" line and a pointer to where the full notice is found.
+
+ <one line to give the program's name and a brief idea of what it does.>
+ Copyright (C) <year> <name of author>
+
+ This program is free software: you can redistribute it and/or modify
+ it under the terms of the GNU General Public License as published by
+ the Free Software Foundation, either version 3 of the License, or
+ (at your option) any later version.
+
+ This program is distributed in the hope that it will be useful,
+ but WITHOUT ANY WARRANTY; without even the implied warranty of
+ MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
+ GNU General Public License for more details.
+
+ You should have received a copy of the GNU General Public License
+ along with this program. If not, see <http://www.gnu.org/licenses/>.
+
+Also add information on how to contact you by electronic and paper mail.
+
+ If the program does terminal interaction, make it output a short
+notice like this when it starts in an interactive mode:
+
+ <program> Copyright (C) <year> <name of author>
+ This program comes with ABSOLUTELY NO WARRANTY; for details type `show w'.
+ This is free software, and you are welcome to redistribute it
+ under certain conditions; type `show c' for details.
+
+The hypothetical commands `show w' and `show c' should show the appropriate
+parts of the General Public License. Of course, your program's commands
+might be different; for a GUI interface, you would use an "about box".
+
+ You should also get your employer (if you work as a programmer) or school,
+if any, to sign a "copyright disclaimer" for the program, if necessary.
+For more information on this, and how to apply and follow the GNU GPL, see
+<http://www.gnu.org/licenses/>.
+
+ The GNU General Public License does not permit incorporating your program
+into proprietary programs. If your program is a subroutine library, you
+may consider it more useful to permit linking proprietary applications with
+the library. If this is what you want to do, use the GNU Lesser General
+Public License instead of this License. But first, please read
+<http://www.gnu.org/philosophy/why-not-lgpl.html>.
diff --git a/puppet/modules/apache/Puppetfile b/puppet/modules/apache/Puppetfile
new file mode 100644
index 00000000..86d58ae6
--- /dev/null
+++ b/puppet/modules/apache/Puppetfile
@@ -0,0 +1,15 @@
+# empty
+
+forge 'https://forgeapi.puppetlabs.com'
+
+mod 'shorewall', :git => 'https://git-ipuppet.immerda.ch/module-shorewall'
+mod 'templatewlv', :git => 'https://git-ipuppet.immerda.ch/module-templatewlv'
+mod 'mod_security', :git => 'https://git-ipuppet.immerda.ch/module-mod_security'
+mod 'mod_fcgid', :git => 'https://git-ipuppet.immerda.ch/module-mod_fcgid'
+mod 'php', :git => 'https://git-ipuppet.immerda.ch/module-php'
+mod 'perl', :git => 'https://git-ipuppet.immerda.ch/module-perl'
+mod 'scl', :git => 'https://git-ipuppet.immerda.ch/module-scl'
+mod 'yum', :git => 'https://git-ipuppet.immerda.ch/module-yum'
+mod 'puppetlabs-stdlib'
+mod 'puppetlabs-concat'
+#mod 'munin', :git => 'https://git-ipuppet.immerda.ch/module-munin'
diff --git a/puppet/modules/apache/README.md b/puppet/modules/apache/README.md
new file mode 100644
index 00000000..331c85b0
--- /dev/null
+++ b/puppet/modules/apache/README.md
@@ -0,0 +1,233 @@
+Puppet module for managing an Apache web server
+===============================================
+
+This module tries to manage apache on different distros in a similar manner. a
+few additional directories have to be created as well some configuration files
+have to be deployed to fit this schema.
+
+! Upgrade Notices !
+
+ * The $ssl_cipher_suite has been evaluated from the `cert` module in the
+ past, but is now a hardcoded default for the sake of reducing dependency
+ to other modules. If you were using the `cert` module before, you should
+ pass this parameter when declaring the apache class !
+
+ * this module now only works with puppet 2.7 or newer
+
+ * this module now uses parameterized classes, if you were using global
+ variables before, you need to change the class declarations in your manifests
+
+ * this module now requires the stdlib module
+
+ * this module no longer requires the common module
+
+ * if using the munin module, you need a version of the munin module that is
+ at or newer than commit 77e0a70999a8c4c20ee8d9eb521b927c525ac653 (Feb 28, 2013)
+
+ * if using munin, you will need to have the perl module installed
+
+ * you must change your modules/site-apache to modules/site_apache
+
+ * the $apache_no_default_site variable is no longer supported, you should
+ switch to passing the parameter "no_default_site => true" to the apache class
+
+ * the $use_munin variable is no longer supported, you should switch to
+ passing the parameter 'manage_munin' to the apache class
+
+ * the $use_shorewall variable is no longer supported, you should switch to
+ passing the parameter 'manage_shorewall' to the apache class
+
+ * if you were using apache::vhost::file, or apache::vhost::template, there is a
+ wrapper called apache::vhost now that takes a $vhost_mode (either the default
+ 'template', or 'file), although you can continue to use the longer defines
+
+ * Previously, apache::config::file resources would require the source to be a
+ full source specification, this is no longer needed, so please change any:
+
+ source => "puppet:///modules/site-apache/blah"
+
+ to be:
+
+ source => "modules/site-apache/blah"
+
+
+Requirements
+------------
+
+ * puppet 2.7 or newer
+ * stdlib module
+ * templatewlv module
+ * facter >= 2.2
+ because we check for $::operatingsystemmajrelease on multiple places.
+ In Debian wheezy, facter needs to get upgraded from wheezy-backports.
+ The facter version of Debian jessie is new enough.
+
+Usage
+=====
+
+Installing Apache
+-----------------
+
+To install Apache, simply include the 'apache' class in your manifests:
+
+ include apache
+
+This will give you a basic managed setup. You can pass a couple parameters to the
+class to have the module do some things for you:
+
+ * manage_shorewall: If you have the shorewall module installed and are using
+ it then rules will be automatically defined for you to let traffic come from
+ the exterior into the web server via port 80, and also 443 if you're using
+ the apache::ssl class. (Default: false)
+
+ * manage_munin: If you have the munin module installed and are using it, then
+ some apache graphs will be configured for you. (Default: false)
+
+ * no_default_site: If you do not want the 0-default.conf and
+ 0-default_ssl.conf virtualhosts automatically created in your node
+ configuration. (Default: false)
+
+ * ssl: If you want to install Apache SSL support enabled, just pass this
+ parameter (Default: false)
+
+For example:
+
+ class { 'apache':
+ manage_shorewall => true,
+ manage_munin => true,
+ no_default_site => true,
+ ssl => true
+ }
+
+You can install the ITK worker model to enforce stronger, per-user security:
+
+ include apache::itk
+
+On CentOS you can include 'apache::itk_plus' to get that mode. Not currently
+implemented for other operating systems
+
+You can combine SSL support and the ITK worker model by including both classes.
+
+
+Configuring Apache
+------------------
+
+To deploy a configuration files to the conf.d or include.d directory under
+Apache's config directory, you can use the following:
+
+ apache::config::file { 'filename':
+ content => 'Alias /thisApplication /usr/share/thisApplication/htdocs',
+ }
+
+by default this will deploy a conf.d global configuration file called 'filename'
+with that content.
+
+You can pass the parameter 'type => include' to add includes for vhosts
+
+
+To manage users in an htpasswd file:
+
+ apache::htpasswd_user { "joe@$domain":
+ ensure => present, # default: present
+ site => "$domain", # default: 'absent' - will use $name
+ username => 'joe', # default: 'absent' - will use $name
+ password => "pass",
+ password_iscrypted => false, # default: false - will sha1 hash the value
+ path => 'absent' # default: 'absent' - /var/www/htpasswds/${site}
+ }
+
+This will place an encrypted version of "pass" for user joe into
+/var/www/htpasswds/${site}
+
+You will need to make sure that ${site} exists before this is done, see the
+apache::vhost class below for how this is done.
+
+VirtualHost files
+-----------------
+
+vhosts can be added with the apache::vhost define.
+
+You can ship a flat file containing the configuration, or a template. That is
+controlled by the 'vhost_mode' parameter, which can be either 'file', or
+'template' (default).
+
+Unless specified, the source will be automatically pulled from
+modules/site_apache/{templates,files}/vhosts.d, searched in this order:
+
+ "puppet:///modules/site_apache/vhosts.d/${::fqdn}/${name}.conf",
+ "puppet:///modules/site_apache/vhosts.d/{$apache::cluster_node}/${name}.conf",
+ "puppet:///modules/site_apache/vhosts.d/${::operatingsystem}.${::operatingsystemmajrelease}/${name}.conf",
+ "puppet:///modules/site_apache/vhosts.d/${::operatingsystem}/${name}.conf",
+ "puppet:///modules/site_apache/vhosts.d/${name}.conf",
+
+otherwise you can pass a 'content' parameter to configure a template location that
+it should be pulled from, or a 'vhost_source' parameter to specify the file source.
+
+For example:
+
+This would deploy a the vhost for $domain, pulled from a file from the sources
+listed above:
+
+ apache::vhost { "$domain": vhost_mode => 'file' }
+
+ apache::vhost { "$domain":
+ vhost_mode => 'file',
+ vhost_source => 'modules/site_configs/vhosts.d/${name}.conf"
+ }
+
+There are multiple other additional configurables that you can pass to each
+vhost definition:
+
+* logmode:
+ - default: Do normal logging to CustomLog and ErrorLog
+ - nologs: Send every logging to /dev/null
+ - anonym: Don't log ips for CustomLog, send ErrorLog to /dev/null
+ - semianonym: Don't log ips for CustomLog, log normal ErrorLog
+
+* run_mode: controls in which mode the vhost should be run, there are different setups
+ possible:
+ - normal: (*default*) run vhost with the current active worker (default: prefork) don't
+ setup anything special
+ - itk: run vhost with the mpm_itk module (Incompatibility: cannot be used in combination
+ with 'proxy-itk' & 'static-itk' mode)
+ - proxy-itk: run vhost with a dual prefork/itk setup, where prefork just proxies all the
+ requests for the itk setup, that listens only on the loobpack device.
+ (Incompatibility: cannot be used in combination with the itk setup.)
+ - static-itk: run vhost with a dual prefork/itk setup, where prefork serves all the static
+ content and proxies the dynamic calls to the itk setup, that listens only on
+ the loobpack device (Incompatibility: cannot be used in combination with
+ 'itk' mode)
+
+* mod_security: Whether we use mod_security or not (will include mod_security module)
+ - false: (*default*) don't activate mod_security
+ - true: activate mod_security
+
+For templates, you can pass various parameters that will automatically configure
+the template accordingly (such as php_options and php_settings). Please see
+manifests/vhost/template.pp for the full list.
+
+There are various pre-made vhost configurations that use good defaults that you can use:
+
+- apache::vhost::gitweb - sets up a gitweb vhost
+- apache::vhost::modperl - uses modperl, with optional fastcgi
+- apache::vhost::passenger - setup passenger
+- apache::vhost::proxy - setup a proxy vhost
+- apache::vhost::redirect - vhost to redirect hosts
+- apache::vhost::static - a static vhost
+- apache::vhost::webdav - for managing webdave accessible targets
+
+Additionally, for php sites, there are several handy pre-made vhost configurations:
+
+- apache::vhost::php::drupal
+- apache::vhost::php::gallery2
+- apache::vhost::php::global_exec_bin_dir
+- apache::vhost::php::joomla
+- apache::vhost::php::mediawiki
+- apache::vhost::php::safe_mode_bin
+- apache::vhost::php::silverstripe
+- apache::vhost::php::simplemachine
+- apache::vhost::php::spip
+- apache::vhost::php::standard
+- apache::vhost::php::typo3
+- apache::vhost::php::webapp
+- apache::vhost::php::wordpress
diff --git a/puppet/modules/apache/Rakefile b/puppet/modules/apache/Rakefile
new file mode 100644
index 00000000..ec1c52b3
--- /dev/null
+++ b/puppet/modules/apache/Rakefile
@@ -0,0 +1,26 @@
+require 'bundler'
+Bundler.require(:rake)
+
+require 'puppetlabs_spec_helper/rake_tasks'
+require 'puppet-lint/tasks/puppet-lint'
+
+Rake::Task[:lint].clear
+PuppetLint::RakeTask.new :lint do |config|
+ config.ignore_paths = ["spec/**/*.pp", "vendor/**/*.pp"]
+ config.log_format = '%{path}:%{linenumber}:%{KIND}: %{message}'
+ config.disable_checks = [ "class_inherits_from_params_class", "80chars" ]
+end
+
+# use librarian-puppet to manage fixtures instead of .fixtures.yml
+# offers more possibilities like explicit version management, forge downloads,...
+task :librarian_spec_prep do
+ sh "librarian-puppet install --path=spec/fixtures/modules/"
+ pwd = `pwd`.strip
+ unless File.directory?("#{pwd}/spec/fixtures/modules/apache")
+ sh "ln -s #{pwd} #{pwd}/spec/fixtures/modules/apache"
+ end
+end
+task :spec_prep => :librarian_spec_prep
+
+
+task :default => [:spec, :lint]
diff --git a/puppet/modules/apache/files/conf.d/CentOS/ssl.conf b/puppet/modules/apache/files/conf.d/CentOS/ssl.conf
new file mode 100644
index 00000000..7f9be957
--- /dev/null
+++ b/puppet/modules/apache/files/conf.d/CentOS/ssl.conf
@@ -0,0 +1,76 @@
+#
+# This is the Apache server configuration file providing SSL support.
+# It contains the configuration directives to instruct the server how to
+# serve pages over an https connection. For detailing information about these
+# directives see <URL:http://httpd.apache.org/docs/2.2/mod/mod_ssl.html>
+#
+# Do NOT simply read the instructions in here without understanding
+# what they do. They're here only as hints or reminders. If you are unsure
+# consult the online docs. You have been warned.
+#
+
+LoadModule ssl_module modules/mod_ssl.so
+
+#
+# When we also provide SSL we have to listen to the
+# the HTTPS port in addition.
+#
+Listen 443
+NameVirtualHost *:443
+
+##
+## SSL Global Context
+##
+## All SSL configuration in this context applies both to
+## the main server and all SSL-enabled virtual hosts.
+##
+
+#
+# Some MIME-types for downloading Certificates and CRLs
+#
+AddType application/x-x509-ca-cert .crt
+AddType application/x-pkcs7-crl .crl
+
+# Pass Phrase Dialog:
+# Configure the pass phrase gathering process.
+# The filtering dialog program (`builtin' is a internal
+# terminal dialog) has to provide the pass phrase on stdout.
+SSLPassPhraseDialog builtin
+
+# Inter-Process Session Cache:
+# Configure the SSL Session Cache: First the mechanism
+# to use and second the expiring timeout (in seconds).
+#SSLSessionCache dc:UNIX:/var/cache/mod_ssl/distcache
+SSLSessionCache shmcb:/var/cache/mod_ssl/scache(512000)
+SSLSessionCacheTimeout 300
+
+# Semaphore:
+# Configure the path to the mutual exclusion semaphore the
+# SSL engine uses internally for inter-process synchronization.
+SSLMutex default
+
+# Pseudo Random Number Generator (PRNG):
+# Configure one or more sources to seed the PRNG of the
+# SSL library. The seed data should be of good random quality.
+# WARNING! On some platforms /dev/random blocks if not enough entropy
+# is available. This means you then cannot use the /dev/random device
+# because it would lead to very long connection times (as long as
+# it requires to make more entropy available). But usually those
+# platforms additionally provide a /dev/urandom device which doesn't
+# block. So, if available, use this one instead. Read the mod_ssl User
+# Manual for more details.
+SSLRandomSeed startup file:/dev/urandom 256
+SSLRandomSeed connect builtin
+#SSLRandomSeed startup file:/dev/random 512
+#SSLRandomSeed connect file:/dev/random 512
+#SSLRandomSeed connect file:/dev/urandom 512
+
+#
+# Use "SSLCryptoDevice" to enable any supported hardware
+# accelerators. Use "openssl engine -v" to list supported
+# engine names. NOTE: If you enable an accelerator and the
+# server does not start, consult the error logs and ensure
+# your accelerator is functioning properly.
+#
+SSLCryptoDevice builtin
+#SSLCryptoDevice ubsec
diff --git a/puppet/modules/apache/files/conf.d/CentOS/welcome.conf b/puppet/modules/apache/files/conf.d/CentOS/welcome.conf
new file mode 100644
index 00000000..7d7b0cd6
--- /dev/null
+++ b/puppet/modules/apache/files/conf.d/CentOS/welcome.conf
@@ -0,0 +1,10 @@
+#
+# This configuration file enables the default "Welcome"
+# page if there is no default index page present for
+# the root URL. To disable the Welcome page, comment
+# out all the lines below.
+#
+#<LocationMatch "^/+$">
+# Options -Indexes
+# ErrorDocument 403 /error/noindex.html
+#</LocationMatch>
diff --git a/puppet/modules/apache/files/conf.d/Debian/charset b/puppet/modules/apache/files/conf.d/Debian/charset
new file mode 100644
index 00000000..40d7198b
--- /dev/null
+++ b/puppet/modules/apache/files/conf.d/Debian/charset
@@ -0,0 +1,6 @@
+# Read the documentation before enabling AddDefaultCharset.
+# In general, it is only a good idea if you know that all your files
+# have this encoding. It will override any encoding given in the files
+# in meta http-equiv or xml encoding tags.
+
+#AddDefaultCharset UTF-8
diff --git a/puppet/modules/apache/files/conf.d/Debian/security b/puppet/modules/apache/files/conf.d/Debian/security
new file mode 100644
index 00000000..55b3e519
--- /dev/null
+++ b/puppet/modules/apache/files/conf.d/Debian/security
@@ -0,0 +1,50 @@
+#
+# Disable access to the entire file system except for the directories that
+# are explicitly allowed later.
+#
+# This currently breaks the configurations that come with some web application
+# Debian packages. It will be made the default for the release after lenny.
+#
+#<Directory />
+# AllowOverride None
+# Order Deny,Allow
+# Deny from all
+#</Directory>
+
+
+# Changing the following options will not really affect the security of the
+# server, but might make attacks slightly more difficult in some cases.
+
+#
+# ServerTokens
+# This directive configures what you return as the Server HTTP response
+# Header. The default is 'Full' which sends information about the OS-Type
+# and compiled in modules.
+# Set to one of: Full | OS | Minimal | Minor | Major | Prod
+# where Full conveys the most information, and Prod the least.
+#
+#ServerTokens Minimal
+ServerTokens Full
+
+#
+# Optionally add a line containing the server version and virtual host
+# name to server-generated pages (internal error documents, FTP directory
+# listings, mod_status and mod_info output etc., but not CGI generated
+# documents or custom error documents).
+# Set to "EMail" to also include a mailto: link to the ServerAdmin.
+# Set to one of: On | Off | EMail
+#
+#ServerSignature Off
+ServerSignature On
+
+#
+# Allow TRACE method
+#
+# Set to "extended" to also reflect the request body (only for testing and
+# diagnostic purposes).
+#
+# Set to one of: On | Off | extended
+#
+#TraceEnable Off
+TraceEnable On
+
diff --git a/puppet/modules/apache/files/conf.d/Debian/ssl.conf b/puppet/modules/apache/files/conf.d/Debian/ssl.conf
new file mode 100644
index 00000000..bcfe8201
--- /dev/null
+++ b/puppet/modules/apache/files/conf.d/Debian/ssl.conf
@@ -0,0 +1 @@
+NameVirtualHost *:443
diff --git a/puppet/modules/apache/files/conf.d/do_includes.conf b/puppet/modules/apache/files/conf.d/do_includes.conf
new file mode 100644
index 00000000..f44d9d4a
--- /dev/null
+++ b/puppet/modules/apache/files/conf.d/do_includes.conf
@@ -0,0 +1,5 @@
+#
+# Add index.shtml to the list of files that will be served as directory
+# indexes.
+#
+DirectoryIndex index.shtml
diff --git a/puppet/modules/apache/files/conf.d/git.conf b/puppet/modules/apache/files/conf.d/git.conf
new file mode 100644
index 00000000..c03ee2b5
--- /dev/null
+++ b/puppet/modules/apache/files/conf.d/git.conf
@@ -0,0 +1,5 @@
+# deny access to git repository folders
+<DirectoryMatch .*\.git/.*>
+ Order allow,deny
+ Deny From All
+</DirectoryMatch>
diff --git a/puppet/modules/apache/files/conf.d/mozilla_autoconfig.conf b/puppet/modules/apache/files/conf.d/mozilla_autoconfig.conf
new file mode 100644
index 00000000..6e4f7db8
--- /dev/null
+++ b/puppet/modules/apache/files/conf.d/mozilla_autoconfig.conf
@@ -0,0 +1,6 @@
+Alias /.well-known/autoconfig/mail/config-v1.1.xml /var/www/autoconfig/config.shtml
+<Directory /var/www/autoconfig/>
+ Options +Includes
+ AddType application/xml .shtml
+ AddOutputFilter INCLUDES .shtml
+</Directory>
diff --git a/puppet/modules/apache/files/conf.d/status.conf b/puppet/modules/apache/files/conf.d/status.conf
new file mode 100644
index 00000000..fb706cc1
--- /dev/null
+++ b/puppet/modules/apache/files/conf.d/status.conf
@@ -0,0 +1,24 @@
+###########################################################
+### this file is managed by PUPPET ####
+### only modify it in puppet repo or you will ####
+### loose the changes ! ####
+###########################################################
+
+# Allow server status reports generated by mod_status,
+# with the URL of http://servername/server-status
+<Location /server-status>
+ SetHandler server-status
+ Order deny,allow
+ Deny from all
+ Allow from 127.0.0.1
+
+ <IfModule mod_security2.c>
+ SecRuleEngine Off
+ </IfModule>
+</Location>
+
+# ExtendedStatus controls whether Apache will generate "full" status
+# information (ExtendedStatus On) or just basic information (ExtendedStatus
+# Off) when the "server-status" handler is called.
+ExtendedStatus On
+
diff --git a/puppet/modules/apache/files/conf.d/vhosts.conf b/puppet/modules/apache/files/conf.d/vhosts.conf
new file mode 100644
index 00000000..86485501
--- /dev/null
+++ b/puppet/modules/apache/files/conf.d/vhosts.conf
@@ -0,0 +1,8 @@
+###########################################################
+### this file is managed by PUPPET ####
+### only modify it in puppet repo or you will ####
+### loose the changes ! ####
+###########################################################
+
+NameVirtualHost *:80
+Include vhosts.d/*.conf
diff --git a/puppet/modules/apache/files/config/Debian.jessie/apache2.conf b/puppet/modules/apache/files/config/Debian.jessie/apache2.conf
new file mode 100644
index 00000000..7b1f96f5
--- /dev/null
+++ b/puppet/modules/apache/files/config/Debian.jessie/apache2.conf
@@ -0,0 +1,221 @@
+# This is the main Apache server configuration file. It contains the
+# configuration directives that give the server its instructions.
+# See http://httpd.apache.org/docs/2.4/ for detailed information about
+# the directives and /usr/share/doc/apache2/README.Debian about Debian specific
+# hints.
+#
+#
+# Summary of how the Apache 2 configuration works in Debian:
+# The Apache 2 web server configuration in Debian is quite different to
+# upstream's suggested way to configure the web server. This is because Debian's
+# default Apache2 installation attempts to make adding and removing modules,
+# virtual hosts, and extra configuration directives as flexible as possible, in
+# order to make automating the changes and administering the server as easy as
+# possible.
+
+# It is split into several files forming the configuration hierarchy outlined
+# below, all located in the /etc/apache2/ directory:
+#
+# /etc/apache2/
+# |-- apache2.conf
+# | `-- ports.conf
+# |-- mods-enabled
+# | |-- *.load
+# | `-- *.conf
+# |-- conf-enabled
+# | `-- *.conf
+# `-- sites-enabled
+# `-- *.conf
+#
+#
+# * apache2.conf is the main configuration file (this file). It puts the pieces
+# together by including all remaining configuration files when starting up the
+# web server.
+#
+# * ports.conf is always included from the main configuration file. It is
+# supposed to determine listening ports for incoming connections which can be
+# customized anytime.
+#
+# * Configuration files in the mods-enabled/, conf-enabled/ and sites-enabled/
+# directories contain particular configuration snippets which manage modules,
+# global configuration fragments, or virtual host configurations,
+# respectively.
+#
+# They are activated by symlinking available configuration files from their
+# respective *-available/ counterparts. These should be managed by using our
+# helpers a2enmod/a2dismod, a2ensite/a2dissite and a2enconf/a2disconf. See
+# their respective man pages for detailed information.
+#
+# * The binary is called apache2. Due to the use of environment variables, in
+# the default configuration, apache2 needs to be started/stopped with
+# /etc/init.d/apache2 or apache2ctl. Calling /usr/bin/apache2 directly will not
+# work with the default configuration.
+
+
+# Global configuration
+#
+
+#
+# ServerRoot: The top of the directory tree under which the server's
+# configuration, error, and log files are kept.
+#
+# NOTE! If you intend to place this on an NFS (or otherwise network)
+# mounted filesystem then please read the Mutex documentation (available
+# at <URL:http://httpd.apache.org/docs/2.4/mod/core.html#mutex>);
+# you will save yourself a lot of trouble.
+#
+# Do NOT add a slash at the end of the directory path.
+#
+#ServerRoot "/etc/apache2"
+
+#
+# The accept serialization lock file MUST BE STORED ON A LOCAL DISK.
+#
+Mutex file:${APACHE_LOCK_DIR} default
+
+#
+# PidFile: The file in which the server should record its process
+# identification number when it starts.
+# This needs to be set in /etc/apache2/envvars
+#
+PidFile ${APACHE_PID_FILE}
+
+#
+# Timeout: The number of seconds before receives and sends time out.
+#
+Timeout 300
+
+#
+# KeepAlive: Whether or not to allow persistent connections (more than
+# one request per connection). Set to "Off" to deactivate.
+#
+KeepAlive On
+
+#
+# MaxKeepAliveRequests: The maximum number of requests to allow
+# during a persistent connection. Set to 0 to allow an unlimited amount.
+# We recommend you leave this number high, for maximum performance.
+#
+MaxKeepAliveRequests 100
+
+#
+# KeepAliveTimeout: Number of seconds to wait for the next request from the
+# same client on the same connection.
+#
+KeepAliveTimeout 5
+
+
+# These need to be set in /etc/apache2/envvars
+User ${APACHE_RUN_USER}
+Group ${APACHE_RUN_GROUP}
+
+#
+# HostnameLookups: Log the names of clients or just their IP addresses
+# e.g., www.apache.org (on) or 204.62.129.132 (off).
+# The default is off because it'd be overall better for the net if people
+# had to knowingly turn this feature on, since enabling it means that
+# each client request will result in AT LEAST one lookup request to the
+# nameserver.
+#
+HostnameLookups Off
+
+# ErrorLog: The location of the error log file.
+# If you do not specify an ErrorLog directive within a <VirtualHost>
+# container, error messages relating to that virtual host will be
+# logged here. If you *do* define an error logfile for a <VirtualHost>
+# container, that host's errors will be logged there and not here.
+#
+ErrorLog ${APACHE_LOG_DIR}/error.log
+
+#
+# LogLevel: Control the severity of messages logged to the error_log.
+# Available values: trace8, ..., trace1, debug, info, notice, warn,
+# error, crit, alert, emerg.
+# It is also possible to configure the log level for particular modules, e.g.
+# "LogLevel info ssl:warn"
+#
+LogLevel warn
+
+# Include module configuration:
+IncludeOptional mods-enabled/*.load
+IncludeOptional mods-enabled/*.conf
+
+# Include list of ports to listen on
+Include ports.conf
+
+
+# Sets the default security model of the Apache2 HTTPD server. It does
+# not allow access to the root filesystem outside of /usr/share and /var/www.
+# The former is used by web applications packaged in Debian,
+# the latter may be used for local directories served by the web server. If
+# your system is serving content from a sub-directory in /srv you must allow
+# access here, or in any related virtual host.
+<Directory />
+ Options FollowSymLinks
+ AllowOverride None
+ Require all denied
+</Directory>
+
+<Directory /usr/share>
+ AllowOverride None
+ Require all granted
+</Directory>
+
+<Directory /var/www/>
+ Options Indexes FollowSymLinks
+ AllowOverride None
+ Require all granted
+</Directory>
+
+#<Directory /srv/>
+# Options Indexes FollowSymLinks
+# AllowOverride None
+# Require all granted
+#</Directory>
+
+
+
+
+# AccessFileName: The name of the file to look for in each directory
+# for additional configuration directives. See also the AllowOverride
+# directive.
+#
+AccessFileName .htaccess
+
+#
+# The following lines prevent .htaccess and .htpasswd files from being
+# viewed by Web clients.
+#
+<FilesMatch "^\.ht">
+ Require all denied
+</FilesMatch>
+
+
+#
+# The following directives define some format nicknames for use with
+# a CustomLog directive.
+#
+# These deviate from the Common Log Format definitions in that they use %O
+# (the actual bytes sent including headers) instead of %b (the size of the
+# requested file), because the latter makes it impossible to detect partial
+# requests.
+#
+# Note that the use of %{X-Forwarded-For}i instead of %h is not recommended.
+# Use mod_remoteip instead.
+#
+LogFormat "%v:%p %h %l %u %t \"%r\" %>s %O \"%{Referer}i\" \"%{User-Agent}i\"" vhost_combined
+LogFormat "%h %l %u %t \"%r\" %>s %O \"%{Referer}i\" \"%{User-Agent}i\"" combined
+LogFormat "%h %l %u %t \"%r\" %>s %O" common
+LogFormat "%{Referer}i -> %U" referer
+LogFormat "%{User-agent}i" agent
+
+# Include of directories ignores editors' and dpkg's backup files,
+# see README.Debian for details.
+
+# Include generic snippets of statements
+IncludeOptional conf-enabled/*.conf
+
+# Include the virtual host configurations:
+IncludeOptional sites-enabled/*.conf
+
+# vim: syntax=apache ts=4 sw=4 sts=4 sr noet
diff --git a/puppet/modules/apache/files/config/Debian.wheezy/apache2.conf b/puppet/modules/apache/files/config/Debian.wheezy/apache2.conf
new file mode 100644
index 00000000..50545671
--- /dev/null
+++ b/puppet/modules/apache/files/config/Debian.wheezy/apache2.conf
@@ -0,0 +1,268 @@
+# This is the main Apache server configuration file. It contains the
+# configuration directives that give the server its instructions.
+# See http://httpd.apache.org/docs/2.2/ for detailed information about
+# the directives and /usr/share/doc/apache2-common/README.Debian.gz about
+# Debian specific hints.
+#
+#
+# Summary of how the Apache 2 configuration works in Debian:
+# The Apache 2 web server configuration in Debian is quite different to
+# upstream's suggested way to configure the web server. This is because Debian's
+# default Apache2 installation attempts to make adding and removing modules,
+# virtual hosts, and extra configuration directives as flexible as possible, in
+# order to make automating the changes and administering the server as easy as
+# possible.
+
+# It is split into several files forming the configuration hierarchy outlined
+# below, all located in the /etc/apache2/ directory:
+#
+# /etc/apache2/
+# |-- apache2.conf
+# | `-- ports.conf
+# |-- mods-enabled
+# | |-- *.load
+# | `-- *.conf
+# |-- conf.d
+# | `-- *
+# `-- sites-enabled
+# `-- *
+#
+#
+# * apache2.conf is the main configuration file (this file). It puts the pieces
+# together by including all remaining configuration files when starting up the
+# web server.
+#
+# In order to avoid conflicts with backup files, the Include directive is
+# adapted to ignore files that:
+# - do not begin with a letter or number
+# - contain a character that is neither letter nor number nor _-:.
+# - contain .dpkg
+#
+# Yet we strongly suggest that all configuration files either end with a
+# .conf or .load suffix in the file name. The next Debian release will
+# ignore files not ending with .conf (or .load for mods-enabled).
+#
+# * ports.conf is always included from the main configuration file. It is
+# supposed to determine listening ports for incoming connections, and which
+# of these ports are used for name based virtual hosts.
+#
+# * Configuration files in the mods-enabled/ and sites-enabled/ directories
+# contain particular configuration snippets which manage modules or virtual
+# host configurations, respectively.
+#
+# They are activated by symlinking available configuration files from their
+# respective *-available/ counterparts. These should be managed by using our
+# helpers a2enmod/a2dismod, a2ensite/a2dissite. See
+# their respective man pages for detailed information.
+#
+# * Configuration files in the conf.d directory are either provided by other
+# packages or may be added by the local administrator. Local additions
+# should start with local- or end with .local.conf to avoid name clashes. All
+# files in conf.d are considered (excluding the exceptions noted above) by
+# the Apache 2 web server.
+#
+# * The binary is called apache2. Due to the use of environment variables, in
+# the default configuration, apache2 needs to be started/stopped with
+# /etc/init.d/apache2 or apache2ctl. Calling /usr/bin/apache2 directly will not
+# work with the default configuration.
+
+
+# Global configuration
+#
+
+#
+# ServerRoot: The top of the directory tree under which the server's
+# configuration, error, and log files are kept.
+#
+# NOTE! If you intend to place this on an NFS (or otherwise network)
+# mounted filesystem then please read the LockFile documentation (available
+# at <URL:http://httpd.apache.org/docs/2.2/mod/mpm_common.html#lockfile>);
+# you will save yourself a lot of trouble.
+#
+# Do NOT add a slash at the end of the directory path.
+#
+#ServerRoot "/etc/apache2"
+
+#
+# The accept serialization lock file MUST BE STORED ON A LOCAL DISK.
+#
+LockFile ${APACHE_LOCK_DIR}/accept.lock
+
+#
+# PidFile: The file in which the server should record its process
+# identification number when it starts.
+# This needs to be set in /etc/apache2/envvars
+#
+PidFile ${APACHE_PID_FILE}
+
+#
+# Timeout: The number of seconds before receives and sends time out.
+#
+Timeout 300
+
+#
+# KeepAlive: Whether or not to allow persistent connections (more than
+# one request per connection). Set to "Off" to deactivate.
+#
+KeepAlive On
+
+#
+# MaxKeepAliveRequests: The maximum number of requests to allow
+# during a persistent connection. Set to 0 to allow an unlimited amount.
+# We recommend you leave this number high, for maximum performance.
+#
+MaxKeepAliveRequests 100
+
+#
+# KeepAliveTimeout: Number of seconds to wait for the next request from the
+# same client on the same connection.
+#
+KeepAliveTimeout 5
+
+##
+## Server-Pool Size Regulation (MPM specific)
+##
+
+# prefork MPM
+# StartServers: number of server processes to start
+# MinSpareServers: minimum number of server processes which are kept spare
+# MaxSpareServers: maximum number of server processes which are kept spare
+# MaxClients: maximum number of server processes allowed to start
+# MaxRequestsPerChild: maximum number of requests a server process serves
+<IfModule mpm_prefork_module>
+ StartServers 5
+ MinSpareServers 5
+ MaxSpareServers 10
+ MaxClients 150
+ MaxRequestsPerChild 0
+</IfModule>
+
+# worker MPM
+# StartServers: initial number of server processes to start
+# MinSpareThreads: minimum number of worker threads which are kept spare
+# MaxSpareThreads: maximum number of worker threads which are kept spare
+# ThreadLimit: ThreadsPerChild can be changed to this maximum value during a
+# graceful restart. ThreadLimit can only be changed by stopping
+# and starting Apache.
+# ThreadsPerChild: constant number of worker threads in each server process
+# MaxClients: maximum number of simultaneous client connections
+# MaxRequestsPerChild: maximum number of requests a server process serves
+<IfModule mpm_worker_module>
+ StartServers 2
+ MinSpareThreads 25
+ MaxSpareThreads 75
+ ThreadLimit 64
+ ThreadsPerChild 25
+ MaxClients 150
+ MaxRequestsPerChild 0
+</IfModule>
+
+# event MPM
+# StartServers: initial number of server processes to start
+# MinSpareThreads: minimum number of worker threads which are kept spare
+# MaxSpareThreads: maximum number of worker threads which are kept spare
+# ThreadsPerChild: constant number of worker threads in each server process
+# MaxClients: maximum number of simultaneous client connections
+# MaxRequestsPerChild: maximum number of requests a server process serves
+<IfModule mpm_event_module>
+ StartServers 2
+ MinSpareThreads 25
+ MaxSpareThreads 75
+ ThreadLimit 64
+ ThreadsPerChild 25
+ MaxClients 150
+ MaxRequestsPerChild 0
+</IfModule>
+
+# These need to be set in /etc/apache2/envvars
+User ${APACHE_RUN_USER}
+Group ${APACHE_RUN_GROUP}
+
+#
+# AccessFileName: The name of the file to look for in each directory
+# for additional configuration directives. See also the AllowOverride
+# directive.
+#
+
+AccessFileName .htaccess
+
+#
+# The following lines prevent .htaccess and .htpasswd files from being
+# viewed by Web clients.
+#
+<Files ~ "^\.ht">
+ Order allow,deny
+ Deny from all
+ Satisfy all
+</Files>
+
+#
+# DefaultType is the default MIME type the server will use for a document
+# if it cannot otherwise determine one, such as from filename extensions.
+# If your server contains mostly text or HTML documents, "text/plain" is
+# a good value. If most of your content is binary, such as applications
+# or images, you may want to use "application/octet-stream" instead to
+# keep browsers from trying to display binary files as though they are
+# text.
+#
+# It is also possible to omit any default MIME type and let the
+# client's browser guess an appropriate action instead. Typically the
+# browser will decide based on the file's extension then. In cases
+# where no good assumption can be made, letting the default MIME type
+# unset is suggested instead of forcing the browser to accept
+# incorrect metadata.
+#
+DefaultType None
+
+
+#
+# HostnameLookups: Log the names of clients or just their IP addresses
+# e.g., www.apache.org (on) or 204.62.129.132 (off).
+# The default is off because it'd be overall better for the net if people
+# had to knowingly turn this feature on, since enabling it means that
+# each client request will result in AT LEAST one lookup request to the
+# nameserver.
+#
+HostnameLookups Off
+
+# ErrorLog: The location of the error log file.
+# If you do not specify an ErrorLog directive within a <VirtualHost>
+# container, error messages relating to that virtual host will be
+# logged here. If you *do* define an error logfile for a <VirtualHost>
+# container, that host's errors will be logged there and not here.
+#
+ErrorLog ${APACHE_LOG_DIR}/error.log
+
+#
+# LogLevel: Control the number of messages logged to the error_log.
+# Possible values include: debug, info, notice, warn, error, crit,
+# alert, emerg.
+#
+LogLevel warn
+
+# Include module configuration:
+Include mods-enabled/*.load
+Include mods-enabled/*.conf
+
+# Include list of ports to listen on and which to use for name based vhosts
+Include ports.conf
+
+#
+# The following directives define some format nicknames for use with
+# a CustomLog directive (see below).
+# If you are behind a reverse proxy, you might want to change %h into %{X-Forwarded-For}i
+#
+LogFormat "%v:%p %h %l %u %t \"%r\" %>s %O \"%{Referer}i\" \"%{User-Agent}i\"" vhost_combined
+LogFormat "%h %l %u %t \"%r\" %>s %O \"%{Referer}i\" \"%{User-Agent}i\"" combined
+LogFormat "%h %l %u %t \"%r\" %>s %O" common
+LogFormat "%{Referer}i -> %U" referer
+LogFormat "%{User-agent}i" agent
+
+# Include of directories ignores editors' and dpkg's backup files,
+# see the comments above for details.
+
+# Include generic snippets of statements
+Include conf.d/
+
+# Include the virtual host configurations:
+Include sites-enabled/
diff --git a/puppet/modules/apache/files/config/Debian/apache2.conf b/puppet/modules/apache/files/config/Debian/apache2.conf
new file mode 100644
index 00000000..1e97b4eb
--- /dev/null
+++ b/puppet/modules/apache/files/config/Debian/apache2.conf
@@ -0,0 +1,230 @@
+#
+# Based upon the NCSA server configuration files originally by Rob McCool.
+#
+# This is the main Apache server configuration file. It contains the
+# configuration directives that give the server its instructions.
+# See http://httpd.apache.org/docs/2.2/ for detailed information about
+# the directives.
+#
+# Do NOT simply read the instructions in here without understanding
+# what they do. They're here only as hints or reminders. If you are unsure
+# consult the online docs. You have been warned.
+#
+# The configuration directives are grouped into three basic sections:
+# 1. Directives that control the operation of the Apache server process as a
+# whole (the 'global environment').
+# 2. Directives that define the parameters of the 'main' or 'default' server,
+# which responds to requests that aren't handled by a virtual host.
+# These directives also provide default values for the settings
+# of all virtual hosts.
+# 3. Settings for virtual hosts, which allow Web requests to be sent to
+# different IP addresses or hostnames and have them handled by the
+# same Apache server process.
+#
+# Configuration and logfile names: If the filenames you specify for many
+# of the server's control files begin with "/" (or "drive:/" for Win32), the
+# server will use that explicit path. If the filenames do *not* begin
+# with "/", the value of ServerRoot is prepended -- so "foo.log"
+# with ServerRoot set to "/etc/apache2" will be interpreted by the
+# server as "/etc/apache2/foo.log".
+#
+
+### Section 1: Global Environment
+#
+# The directives in this section affect the overall operation of Apache,
+# such as the number of concurrent requests it can handle or where it
+# can find its configuration files.
+#
+
+#
+# ServerRoot: The top of the directory tree under which the server's
+# configuration, error, and log files are kept.
+#
+# NOTE! If you intend to place this on an NFS (or otherwise network)
+# mounted filesystem then please read the LockFile documentation (available
+# at <URL:http://httpd.apache.org/docs/2.2/mod/mpm_common.html#lockfile>);
+# you will save yourself a lot of trouble.
+#
+# Do NOT add a slash at the end of the directory path.
+#
+#ServerRoot "/etc/apache2"
+
+#
+# The accept serialization lock file MUST BE STORED ON A LOCAL DISK.
+#
+LockFile ${APACHE_LOCK_DIR}/accept.lock
+
+#
+# PidFile: The file in which the server should record its process
+# identification number when it starts.
+# This needs to be set in /etc/apache2/envvars
+#
+PidFile ${APACHE_PID_FILE}
+
+#
+# Timeout: The number of seconds before receives and sends time out.
+#
+Timeout 300
+
+#
+# KeepAlive: Whether or not to allow persistent connections (more than
+# one request per connection). Set to "Off" to deactivate.
+#
+KeepAlive On
+
+#
+# MaxKeepAliveRequests: The maximum number of requests to allow
+# during a persistent connection. Set to 0 to allow an unlimited amount.
+# We recommend you leave this number high, for maximum performance.
+#
+MaxKeepAliveRequests 100
+
+#
+# KeepAliveTimeout: Number of seconds to wait for the next request from the
+# same client on the same connection.
+#
+KeepAliveTimeout 15
+
+##
+## Server-Pool Size Regulation (MPM specific)
+##
+
+# prefork MPM
+# StartServers: number of server processes to start
+# MinSpareServers: minimum number of server processes which are kept spare
+# MaxSpareServers: maximum number of server processes which are kept spare
+# MaxClients: maximum number of server processes allowed to start
+# MaxRequestsPerChild: maximum number of requests a server process serves
+<IfModule mpm_prefork_module>
+ StartServers 5
+ MinSpareServers 5
+ MaxSpareServers 10
+ MaxClients 150
+ MaxRequestsPerChild 0
+</IfModule>
+
+# worker MPM
+# StartServers: initial number of server processes to start
+# MaxClients: maximum number of simultaneous client connections
+# MinSpareThreads: minimum number of worker threads which are kept spare
+# MaxSpareThreads: maximum number of worker threads which are kept spare
+# ThreadLimit: ThreadsPerChild can be changed to this maximum value during a
+# graceful restart. ThreadLimit can only be changed by stopping
+# and starting Apache.
+# ThreadsPerChild: constant number of worker threads in each server process
+# MaxRequestsPerChild: maximum number of requests a server process serves
+<IfModule mpm_worker_module>
+ StartServers 2
+ MinSpareThreads 25
+ MaxSpareThreads 75
+ ThreadLimit 64
+ ThreadsPerChild 25
+ MaxClients 150
+ MaxRequestsPerChild 0
+</IfModule>
+
+# event MPM
+# StartServers: initial number of server processes to start
+# MaxClients: maximum number of simultaneous client connections
+# MinSpareThreads: minimum number of worker threads which are kept spare
+# MaxSpareThreads: maximum number of worker threads which are kept spare
+# ThreadsPerChild: constant number of worker threads in each server process
+# MaxRequestsPerChild: maximum number of requests a server process serves
+<IfModule mpm_event_module>
+ StartServers 2
+ MaxClients 150
+ MinSpareThreads 25
+ MaxSpareThreads 75
+ ThreadLimit 64
+ ThreadsPerChild 25
+ MaxRequestsPerChild 0
+</IfModule>
+
+# These need to be set in /etc/apache2/envvars
+User ${APACHE_RUN_USER}
+Group ${APACHE_RUN_GROUP}
+
+#
+# AccessFileName: The name of the file to look for in each directory
+# for additional configuration directives. See also the AllowOverride
+# directive.
+#
+
+AccessFileName .htaccess
+
+#
+# The following lines prevent .htaccess and .htpasswd files from being
+# viewed by Web clients.
+#
+<Files ~ "^\.ht">
+ Order allow,deny
+ Deny from all
+ Satisfy all
+</Files>
+
+#
+# DefaultType is the default MIME type the server will use for a document
+# if it cannot otherwise determine one, such as from filename extensions.
+# If your server contains mostly text or HTML documents, "text/plain" is
+# a good value. If most of your content is binary, such as applications
+# or images, you may want to use "application/octet-stream" instead to
+# keep browsers from trying to display binary files as though they are
+# text.
+#
+DefaultType text/plain
+
+
+#
+# HostnameLookups: Log the names of clients or just their IP addresses
+# e.g., www.apache.org (on) or 204.62.129.132 (off).
+# The default is off because it'd be overall better for the net if people
+# had to knowingly turn this feature on, since enabling it means that
+# each client request will result in AT LEAST one lookup request to the
+# nameserver.
+#
+HostnameLookups Off
+
+# ErrorLog: The location of the error log file.
+# If you do not specify an ErrorLog directive within a <VirtualHost>
+# container, error messages relating to that virtual host will be
+# logged here. If you *do* define an error logfile for a <VirtualHost>
+# container, that host's errors will be logged there and not here.
+#
+ErrorLog ${APACHE_LOG_DIR}/error.log
+
+#
+# LogLevel: Control the number of messages logged to the error_log.
+# Possible values include: debug, info, notice, warn, error, crit,
+# alert, emerg.
+#
+LogLevel warn
+
+# Include module configuration:
+Include mods-enabled/*.load
+Include mods-enabled/*.conf
+
+# Include all the user configurations:
+Include httpd.conf
+
+# Include ports listing
+Include ports.conf
+
+#
+# The following directives define some format nicknames for use with
+# a CustomLog directive (see below).
+# If you are behind a reverse proxy, you might want to change %h into %{X-Forwarded-For}i
+#
+LogFormat "%v:%p %h %l %u %t \"%r\" %>s %O \"%{Referer}i\" \"%{User-Agent}i\"" vhost_combined
+LogFormat "%h %l %u %t \"%r\" %>s %O \"%{Referer}i\" \"%{User-Agent}i\"" combined
+LogFormat "%h %l %u %t \"%r\" %>s %O" common
+LogFormat "%{Referer}i -> %U" referer
+LogFormat "%{User-agent}i" agent
+
+# Include of directories ignores editors' and dpkg's backup files,
+# see README.Debian for details.
+
+# Include generic snippets of statements
+Include conf.d/
+
+# Include the virtual host configurations:
+Include sites-enabled/
diff --git a/puppet/modules/apache/files/config/OpenBSD/httpd.conf b/puppet/modules/apache/files/config/OpenBSD/httpd.conf
new file mode 100644
index 00000000..09e452e6
--- /dev/null
+++ b/puppet/modules/apache/files/config/OpenBSD/httpd.conf
@@ -0,0 +1,1120 @@
+# $OpenBSD: httpd.conf,v 1.22 2008/01/25 09:59:57 sthen Exp $
+#
+# Based upon the NCSA server configuration files originally by Rob McCool.
+#
+# This is the main Apache server configuration file. It contains the
+# configuration directives that give the server its instructions.
+# See <URL:http://www.apache.org/docs/> for detailed information about
+# the directives.
+#
+# Do NOT simply read the instructions in here without understanding
+# what they do. They're here only as hints or reminders. If you are unsure
+# consult the online docs. You have been warned.
+#
+# After this file is processed, the server will look for and process
+# /var/www/conf/srm.conf and then /var/www/conf/access.conf
+# unless you have overridden these with ResourceConfig and/or
+# AccessConfig directives here.
+#
+# The configuration directives are grouped into three basic sections:
+# 1. Directives that control the operation of the Apache server process as a
+# whole (the 'global environment').
+# 2. Directives that define the parameters of the 'main' or 'default' server,
+# which responds to requests that aren't handled by a virtual host.
+# These directives also provide default values for the settings
+# of all virtual hosts.
+# 3. Settings for virtual hosts, which allow Web requests to be sent to
+# different IP addresses or hostnames and have them handled by the
+# same Apache server process.
+#
+# Configuration and logfile names: If the filenames you specify for many
+# of the server's control files begin with "/" (or "drive:/" for Win32), the
+# server will use that explicit path. If the filenames do *not* begin
+# with "/", the value of ServerRoot is prepended -- so "logs/foo.log"
+# with ServerRoot set to "/usr/local/apache" will be interpreted by the
+# server as "/usr/local/apache/logs/foo.log".
+#
+
+### Section 1: Global Environment
+#
+# The directives in this section affect the overall operation of Apache,
+# such as the number of concurrent requests it can handle or where it
+# can find its configuration files.
+#
+
+#
+# ServerType is either inetd, or standalone. Inetd mode is only supported on
+# Unix platforms.
+#
+ServerType standalone
+
+#
+# ServerTokens is either Full, OS, Minimal, or ProductOnly.
+# The values define what version information is returned in the
+# Server header in HTTP responses.
+#
+# ServerTokens ProductOnly
+
+#
+# ServerRoot: The top of the directory tree under which the server's
+# configuration, error, and log files are kept.
+#
+# NOTE! If you intend to place this on an NFS (or otherwise network)
+# mounted filesystem then please read the LockFile documentation
+# (available at <URL:http://www.apache.org/docs/mod/core.html#lockfile>);
+# you will save yourself a lot of trouble.
+#
+# Do NOT add a slash at the end of the directory path.
+#
+ServerRoot "/var/www"
+
+#
+# The LockFile directive sets the path to the lockfile used when Apache
+# is compiled with either USE_FCNTL_SERIALIZED_ACCEPT or
+# USE_FLOCK_SERIALIZED_ACCEPT. This directive should normally be left at
+# its default value. The main reason for changing it is if the logs
+# directory is NFS mounted, since the lockfile MUST BE STORED ON A LOCAL
+# DISK. The PID of the main server process is automatically appended to
+# the filename.
+#
+#LockFile logs/accept.lock
+
+#
+# PidFile: The file in which the server should record its process
+# identification number when it starts.
+#
+PidFile logs/httpd.pid
+#
+# ScoreBoardFile: File used to store internal server process information.
+# Not all architectures require this. But if yours does (you'll know because
+# this file will be created when you run Apache) then you *must* ensure that
+# no two invocations of Apache share the same scoreboard file.
+#
+ScoreBoardFile logs/apache_runtime_status
+
+#
+# In the standard configuration, the server will process httpd.conf,
+# srm.conf, and access.conf in that order. The latter two files are
+# now deprecated and not installed any more, as it is recommended that
+# all directives be kept in a single file for simplicity.
+#
+#ResourceConfig conf/srm.conf
+#AccessConfig conf/access.conf
+
+#
+# Timeout: The number of seconds before receives and sends time out.
+#
+Timeout 300
+
+#
+# KeepAlive: Whether or not to allow persistent connections (more than
+# one request per connection). Set to "Off" to deactivate.
+#
+KeepAlive On
+
+#
+# MaxKeepAliveRequests: The maximum number of requests to allow
+# during a persistent connection. Set to 0 to allow an unlimited amount.
+# We recommend you leave this number high, for maximum performance.
+#
+MaxKeepAliveRequests 100
+
+#
+# KeepAliveTimeout: Number of seconds to wait for the next request from the
+# same client on the same connection.
+#
+KeepAliveTimeout 15
+
+#
+# Server-pool size regulation. Rather than making you guess how many
+# server processes you need, Apache dynamically adapts to the load it
+# sees --- that is, it tries to maintain enough server processes to
+# handle the current load, plus a few spare servers to handle transient
+# load spikes (e.g., multiple simultaneous requests from a single
+# Netscape browser).
+#
+# It does this by periodically checking how many servers are waiting
+# for a request. If there are fewer than MinSpareServers, it creates
+# a new spare. If there are more than MaxSpareServers, some of the
+# spares die off. The default values in httpd.conf-dist are probably OK
+# for most sites.
+#
+MinSpareServers 5
+MaxSpareServers 10
+
+#
+# Number of servers to start initially --- should be a reasonable ballpark
+# figure.
+#
+StartServers 5
+
+#
+# Limit on total number of servers running, i.e., limit on the number
+# of clients who can simultaneously connect --- if this limit is ever
+# reached, clients will be LOCKED OUT, so it should NOT BE SET TOO LOW.
+# It is intended mainly as a brake to keep a runaway server from taking
+# the system with it as it spirals down...
+#
+MaxClients 150
+
+#
+# MaxRequestsPerChild: the number of requests each child process is
+# allowed to process before the child dies. The child will exit so
+# as to avoid problems after prolonged use when Apache (and maybe the
+# libraries it uses) leak memory or other resources. On most systems, this
+# isn't really needed, but a few (such as Solaris) do have notable leaks
+# in the libraries.
+#
+MaxRequestsPerChild 0
+
+#
+# MaxFOOPerChild: these directives set the current and hard rlimits for
+# the child processes. Attempts to exceed them will cause the the OS to
+# take appropriate action. See the setrlimit(2) and signal(3).
+#
+MaxCPUPerChild 0
+MaxDATAPerChild 0
+MaxNOFILEPerChild 0
+MaxRSSPerChild 0
+MaxSTACKPerChild 0
+
+#
+# Listen: Allows you to bind Apache to specific IP addresses and/or
+# ports, in addition to the default. See also the <VirtualHost>
+# directive.
+#
+#Listen 3000
+#Listen 12.34.56.78:80
+
+#
+# BindAddress: You can support virtual hosts with this option. This directive
+# is used to tell the server which IP address to listen to. It can either
+# contain "*", an IP address, or a fully qualified Internet domain name.
+# See also the <VirtualHost> and Listen directives.
+#
+#BindAddress *
+
+#
+# Dynamic Shared Object (DSO) Support
+#
+# To be able to use the functionality of a module which was built as a DSO you
+# have to place corresponding `LoadModule' lines at this location so the
+# directives contained in it are actually available _before_ they are used.
+# Please read the file README.DSO in the Apache 1.3 distribution for more
+# details about the DSO mechanism and run `httpd -l' for the list of already
+# built-in (statically linked and thus always available) modules in your httpd
+# binary.
+#
+# Note: The order is which modules are loaded is important. Don't change
+# the order below without expert advice.
+#
+# Example:
+# LoadModule foo_module libexec/mod_foo.so
+
+# "anonymous" user access to authenticated areas
+# LoadModule anon_auth_module /usr/lib/apache/modules/mod_auth_anon.so
+
+# user authentication using Berkeley DB files
+# LoadModule db_auth_module /usr/lib/apache/modules/mod_auth_db.so
+
+# user authentication using DBM files
+# LoadModule dbm_auth_module /usr/lib/apache/modules/mod_auth_dbm.so
+
+# authentication using new-style MD5 Digest Authentication (experimental)
+# LoadModule digest_auth_module /usr/lib/apache/modules/mod_auth_digest.so
+
+# CERN httpd metafile semantics
+# LoadModule cern_meta_module /usr/lib/apache/modules/mod_cern_meta.so
+
+# configuration defines ($xxx)
+# LoadModule define_module /usr/lib/apache/modules/mod_define.so
+
+# user authentication using old-style MD5 Digest Authentication
+# LoadModule digest_module /usr/lib/apache/modules/mod_digest.so
+
+# generation of Expires HTTP headers according to user-specified criteria
+# LoadModule expires_module /usr/lib/apache/modules/mod_expires.so
+
+# customization of HTTP response headers
+# LoadModule headers_module /usr/lib/apache/modules/mod_headers.so
+
+# comprehensive overview of the server configuration
+# LoadModule info_module /usr/lib/apache/modules/mod_info.so
+
+# logging of the client user agents (deprecated in favor of mod_log_config)
+# LoadModule agent_log_module /usr/lib/apache/modules/mod_log_agent.so
+
+# logging of referers (deprecated in favor of mod_log_config)
+# LoadModule referer_log_module /usr/lib/apache/modules/mod_log_referer.so
+
+# determining the MIME type of a file by looking at a few bytes of its contents
+# LoadModule mime_magic_module /usr/lib/apache/modules/mod_mime_magic.so
+
+# mmap()ing of a statically configured list of frequently requested but
+# not changed files (experimental)
+# LoadModule mmap_static_module /usr/lib/apache/modules/mod_mmap_static.so
+
+# rule-based rewriting engine to rewrite requested URLs on the fly
+# LoadModule rewrite_module /usr/lib/apache/modules/mod_rewrite.so
+
+# attempt to correct misspellings of URLs that users might have entered
+# LoadModule speling_module /usr/lib/apache/modules/mod_speling.so
+
+# provides an environment variable with a unique identifier for each request
+# LoadModule unique_id_module /usr/lib/apache/modules/mod_unique_id.so
+
+# uses cookies to provide for a clickstream log of user activity on a site
+# LoadModule usertrack_module /usr/lib/apache/modules/mod_usertrack.so
+
+# dynamically configured mass virtual hosting
+# LoadModule vhost_alias_module /usr/lib/apache/modules/mod_vhost_alias.so
+
+# caching proxy
+# LoadModule proxy_module /usr/lib/apache/modules/libproxy.so
+
+#
+# Include extra module configuration files
+#
+Include /var/www/conf/modules/*.conf
+
+#
+# ExtendedStatus controls whether Apache will generate "full" status
+# information (ExtendedStatus On) or just basic information (ExtendedStatus
+# Off) when the "server-status" handler is called. The default is Off.
+#
+#ExtendedStatus On
+
+### Section 2: 'Main' server configuration
+#
+# The directives in this section set up the values used by the 'main'
+# server, which responds to any requests that aren't handled by a
+# <VirtualHost> definition. These values also provide defaults for
+# any <VirtualHost> containers you may define later in the file.
+#
+# All of these directives may appear inside <VirtualHost> containers,
+# in which case these default settings will be overridden for the
+# virtual host being defined.
+#
+
+#
+# If your ServerType directive (set earlier in the 'Global Environment'
+# section) is set to "inetd", the next few directives don't have any
+# effect since their settings are defined by the inetd configuration.
+# Skip ahead to the ServerAdmin directive.
+#
+
+#
+# Port: The port to which the standalone server listens. For
+# ports < 1023, you will need httpd to be run as root initially.
+#
+Port 80
+
+##
+## SSL Support
+##
+## When we also provide SSL we have to listen to the
+## standard HTTP port (see above) and to the HTTPS port
+##
+<IfDefine SSL>
+Listen 80
+Listen 443
+</IfDefine>
+
+#
+# If you wish httpd to run as a different user or group, you must run
+# httpd as root initially and it will switch.
+#
+# User/Group: The name (or #number) of the user/group to run httpd as.
+# . On SCO (ODT 3) use "User nouser" and "Group nogroup".
+# . On HPUX you may not be able to use shared memory as nobody, and the
+# suggested workaround is to create a user www and use that user.
+# NOTE that some kernels refuse to setgid(Group) or semctl(IPC_SET)
+# when the value of (unsigned)Group is above 60000;
+# don't use Group #-1 on these systems!
+# On OpenBSD, use user www, group www.
+#
+User www
+Group www
+
+#
+# ServerAdmin: Your address, where problems with the server should be
+# e-mailed. This address appears on some server-generated pages, such
+# as error documents.
+#
+ServerAdmin you@your.address
+
+#
+# ServerName allows you to set a host name which is sent back to clients for
+# your server if it's different than the one the program would get (i.e., use
+# "www" instead of the host's real name).
+#
+# Note: You cannot just invent host names and hope they work. The name you
+# define here must be a valid DNS name for your host. If you don't understand
+# this, ask your network administrator.
+# If your host doesn't have a registered DNS name, enter its IP address here.
+# You will have to access it by its address (e.g., http://123.45.67.89/)
+# anyway, and this will make redirections work in a sensible way.
+#
+#ServerName new.host.name
+
+#
+# DocumentRoot: The directory out of which you will serve your
+# documents. By default, all requests are taken from this directory, but
+# symbolic links and aliases may be used to point to other locations.
+#
+DocumentRoot "/var/www/htdocs"
+
+#
+# Each directory to which Apache has access, can be configured with respect
+# to which services and features are allowed and/or disabled in that
+# directory (and its subdirectories).
+#
+# First, we configure the "default" to be a very restrictive set of
+# permissions.
+#
+<Directory />
+ Options FollowSymLinks
+ AllowOverride None
+</Directory>
+
+#
+# Note that from this point forward you must specifically allow
+# particular features to be enabled - so if something's not working as
+# you might expect, make sure that you have specifically enabled it
+# below.
+#
+
+#
+# This should be changed to whatever you set DocumentRoot to.
+#
+<Directory "/var/www/htdocs">
+
+#
+# This may also be "None", "All", or any combination of "Indexes",
+# "Includes", "FollowSymLinks", "ExecCGI", or "MultiViews".
+#
+# Note that "MultiViews" must be named *explicitly* --- "Options All"
+# doesn't give it to you.
+#
+ Options Indexes FollowSymLinks
+
+#
+# This controls which options the .htaccess files in directories can
+# override. Can also be "All", or any combination of "Options", "FileInfo",
+# "AuthConfig", and "Limit"
+#
+ AllowOverride None
+
+#
+# Controls who can get stuff from this server.
+#
+ Order allow,deny
+ Allow from all
+</Directory>
+
+#
+# UserDir: The directory which is prepended onto a users username, within
+# which a users's web pages are looked for if a ~user request is received.
+# Relative pathes are relative to the user's home directory.
+#
+# "disabled" turns this feature off.
+#
+# Since httpd will chroot(2) to the ServerRoot path by default,
+# you should use
+# UserDir /var/www/users
+# and create per user directories in /var/www/users/<username>
+#
+
+UserDir disabled
+
+#
+# Control access to UserDir directories. The following is an example
+# for a site where these directories are restricted to read-only and
+# are located under /users/<username>
+# You will need to change this to match your site's home directories.
+#
+#<Directory /users/*>
+# AllowOverride FileInfo AuthConfig Limit
+# Options MultiViews Indexes SymLinksIfOwnerMatch IncludesNoExec
+# <Limit GET POST OPTIONS PROPFIND>
+# Order allow,deny
+# Allow from all
+# </Limit>
+# <Limit PUT DELETE PATCH PROPPATCH MKCOL COPY MOVE LOCK UNLOCK>
+# Order deny,allow
+# Deny from all
+# </Limit>
+#</Directory>
+
+#
+# DirectoryIndex: Name of the file or files to use as a pre-written HTML
+# directory index. Separate multiple entries with spaces.
+#
+DirectoryIndex index.html
+
+#
+# AccessFileName: The name of the file to look for in each directory
+# for access control information.
+#
+AccessFileName .htaccess
+
+#
+# The following lines prevent .htaccess files from being viewed by
+# Web clients. Since .htaccess files often contain authorization
+# information, access is disallowed for security reasons. Comment
+# these lines out if you want Web visitors to see the contents of
+# .htaccess files. If you change the AccessFileName directive above,
+# be sure to make the corresponding changes here.
+#
+<Files .htaccess>
+ Order allow,deny
+ Deny from all
+</Files>
+
+#
+# CacheNegotiatedDocs: By default, Apache sends "Pragma: no-cache" with each
+# document that was negotiated on the basis of content. This asks proxy
+# servers not to cache the document. Uncommenting the following line disables
+# this behavior, and proxies will be allowed to cache the documents.
+#
+#CacheNegotiatedDocs
+
+#
+# UseCanonicalName: (new for 1.3) With this setting turned on, whenever
+# Apache needs to construct a self-referencing URL (a URL that refers back
+# to the server the response is coming from) it will use ServerName and
+# Port to form a "canonical" name. With this setting off, Apache will
+# use the hostname:port that the client supplied, when possible. This
+# also affects SERVER_NAME and SERVER_PORT in CGI scripts.
+#
+UseCanonicalName On
+
+#
+# TypesConfig describes where the mime.types file (or equivalent) is
+# to be found.
+#
+TypesConfig conf/mime.types
+
+#
+# DefaultType is the default MIME type the server will use for a document
+# if it cannot otherwise determine one, such as from filename extensions.
+# If your server contains mostly text or HTML documents, "text/plain" is
+# a good value. If most of your content is binary, such as applications
+# or images, you may want to use "application/octet-stream" instead to
+# keep browsers from trying to display binary files as though they are
+# text.
+#
+DefaultType text/plain
+
+#
+# The mod_mime_magic module allows the server to use various hints from the
+# contents of the file itself to determine its type. The MIMEMagicFile
+# directive tells the module where the hint definitions are located.
+# mod_mime_magic is not part of the default server (you have to add
+# it yourself with a LoadModule [see the DSO paragraph in the 'Global
+# Environment' section], or recompile the server and include mod_mime_magic
+# as part of the configuration), so it's enclosed in an <IfModule> container.
+# This means that the MIMEMagicFile directive will only be processed if the
+# module is part of the server.
+#
+<IfModule mod_mime_magic.c>
+ MIMEMagicFile conf/magic
+</IfModule>
+
+#
+# HostnameLookups: Log the names of clients or just their IP addresses
+# e.g., www.apache.org (on) or 204.62.129.132 (off).
+# The default is off because it'd be overall better for the net if people
+# had to knowingly turn this feature on, since enabling it means that
+# each client request will result in AT LEAST one lookup request to the
+# nameserver.
+#
+HostnameLookups Off
+
+#
+# ErrorLog: The location of the error log file.
+# If you do not specify an ErrorLog directive within a <VirtualHost>
+# container, error messages relating to that virtual host will be
+# logged here. If you *do* define an error logfile for a <VirtualHost>
+# container, that host's errors will be logged there and not here.
+# Either a filename or the text "syslog:" followed by a facility
+# name may be specified here.
+#
+#ErrorLog syslog:daemon
+ErrorLog logs/error_log
+
+#
+# LogLevel: Control the number of messages logged to the error_log.
+# Possible values include: debug, info, notice, warn, error, crit,
+# alert, emerg.
+#
+LogLevel warn
+
+#
+# The following directives define some format nicknames for use with
+# a CustomLog directive (see below).
+#
+LogFormat "%h %l %u %t \"%r\" %>s %b \"%{Referer}i\" \"%{User-Agent}i\"" combined
+LogFormat "%h %l %u %t \"%r\" %>s %b" common
+LogFormat "%{Referer}i -> %U" referer
+LogFormat "%{User-agent}i" agent
+
+#
+# The location and format of the access logfile (Common Logfile Format).
+# If you do not define any access logfiles within a <VirtualHost>
+# container, they will be logged here. Contrariwise, if you *do*
+# define per-<VirtualHost> access logfiles, transactions will be
+# logged therein and *not* in this file.
+#
+CustomLog logs/access_log common
+
+#
+# If you would like to have agent and referer logfiles, uncomment the
+# following directives.
+#
+#CustomLog logs/referer_log referer
+#CustomLog logs/agent_log agent
+
+#
+# If you prefer a single logfile with access, agent, and referer information
+# (Combined Logfile Format) you can use the following directive.
+#
+#CustomLog logs/access_log combined
+
+#
+# Optionally add a line containing the server version and virtual host
+# name to server-generated pages (error documents, FTP directory listings,
+# mod_status and mod_info output etc., but not CGI generated documents).
+# Set to "EMail" to also include a mailto: link to the ServerAdmin.
+# Set to one of: On | Off | EMail
+#
+# ServerSignature Off
+
+#
+# Aliases: Add here as many aliases as you need (with no limit). The format is
+# Alias fakename realname
+#
+# Note that if you include a trailing / on fakename then the server will
+# require it to be present in the URL. So "/icons" isn't aliased in this
+# example, only "/icons/"..
+#
+Alias /icons/ "/var/www/icons/"
+
+<Directory "/var/www/icons">
+ Options Indexes MultiViews
+ AllowOverride None
+ Order allow,deny
+ Allow from all
+</Directory>
+
+<Directory "/var/www/htdocs/manual">
+ Options MultiViews
+ AllowOverride None
+ Order allow,deny
+ Allow from all
+</Directory>
+
+#
+# ScriptAlias: This controls which directories contain server scripts.
+# ScriptAliases are essentially the same as Aliases, except that
+# documents in the realname directory are treated as applications and
+# run by the server when requested rather than as documents sent to the client.
+# The same rules about trailing "/" apply to ScriptAlias directives as to
+# Alias.
+#
+ScriptAlias /cgi-bin/ "/var/www/cgi-bin/"
+
+#
+# "/var/www/cgi-bin" should be changed to whatever your ScriptAliased
+# CGI directory exists, if you have that configured.
+#
+<Directory "/var/www/cgi-bin">
+ AllowOverride None
+ Options None
+ Order allow,deny
+ Allow from all
+</Directory>
+
+#
+# Redirect allows you to tell clients about documents which used to exist in
+# your server's namespace, but do not anymore. This allows you to tell the
+# clients where to look for the relocated document.
+# Format: Redirect old-URI new-URL
+#
+
+#
+# Directives controlling the display of server-generated directory listings.
+#
+
+#
+# FancyIndexing is whether you want fancy directory indexing or standard
+#
+IndexOptions FancyIndexing
+
+#
+# AddIcon* directives tell the server which icon to show for different
+# files or filename extensions. These are only displayed for
+# FancyIndexed directories.
+#
+AddIconByEncoding (CMP,/icons/compressed.gif) x-compress x-gzip
+
+AddIconByType (TXT,/icons/text.gif) text/*
+AddIconByType (IMG,/icons/image2.gif) image/*
+AddIconByType (SND,/icons/sound2.gif) audio/*
+AddIconByType (VID,/icons/movie.gif) video/*
+
+AddIcon /icons/binary.gif .bin .exe
+AddIcon /icons/binhex.gif .hqx
+AddIcon /icons/tar.gif .tar
+AddIcon /icons/world2.gif .wrl .wrl.gz .vrml .vrm .iv
+AddIcon /icons/compressed.gif .Z .z .tgz .gz .zip
+AddIcon /icons/a.gif .ps .ai .eps
+AddIcon /icons/layout.gif .html .shtml .htm .pdf
+AddIcon /icons/text.gif .txt
+AddIcon /icons/c.gif .c
+AddIcon /icons/p.gif .pl .py
+AddIcon /icons/f.gif .for
+AddIcon /icons/dvi.gif .dvi
+AddIcon /icons/uuencoded.gif .uu
+AddIcon /icons/script.gif .conf .sh .shar .csh .ksh .tcl
+AddIcon /icons/tex.gif .tex
+AddIcon /icons/bomb.gif core
+
+AddIcon /icons/back.gif ..
+AddIcon /icons/hand.right.gif README
+AddIcon /icons/folder.gif ^^DIRECTORY^^
+AddIcon /icons/blank.gif ^^BLANKICON^^
+
+#
+# DefaultIcon is which icon to show for files which do not have an icon
+# explicitly set.
+#
+DefaultIcon /icons/unknown.gif
+
+#
+# AddDescription allows you to place a short description after a file in
+# server-generated indexes. These are only displayed for FancyIndexed
+# directories.
+# Format: AddDescription "description" filename
+#
+#AddDescription "GZIP compressed document" .gz
+#AddDescription "tar archive" .tar
+#AddDescription "GZIP compressed tar archive" .tgz
+
+#
+# ReadmeName is the name of the README file the server will look for by
+# default, and append to directory listings.
+#
+# HeaderName is the name of a file which should be prepended to
+# directory indexes.
+#
+# The server will first look for name.html and include it if found.
+# If name.html doesn't exist, the server will then look for name.txt
+# and include it as plaintext if found.
+#
+ReadmeName README
+HeaderName HEADER
+
+#
+# IndexIgnore is a set of filenames which directory indexing should ignore
+# and not include in the listing. Shell-style wildcarding is permitted.
+#
+IndexIgnore .??* *~ *# HEADER* README* RCS CVS *,v *,t
+
+#
+# AddEncoding allows you to have certain browsers (Mosaic/X 2.1+) uncompress
+# information on the fly. Note: Not all browsers support this.
+# Despite the name similarity, the following Add* directives have nothing
+# to do with the FancyIndexing customization directives above.
+#
+AddEncoding x-compress Z
+AddEncoding x-gzip gz
+
+#
+# AddLanguage allows you to specify the language of a document. You can
+# then use content negotiation to give a browser a file in a language
+# it can understand. Note that the suffix does not have to be the same
+# as the language keyword --- those with documents in Polish (whose
+# net-standard language code is pl) may wish to use "AddLanguage pl .po"
+# to avoid the ambiguity with the common suffix for perl scripts.
+#
+AddLanguage en .en
+AddLanguage fr .fr
+AddLanguage de .de
+AddLanguage da .da
+AddLanguage el .el
+AddLanguage it .it
+
+#
+# LanguagePriority allows you to give precedence to some languages
+# in case of a tie during content negotiation.
+# Just list the languages in decreasing order of preference.
+#
+LanguagePriority en fr de
+
+#
+# AddType allows you to tweak mime.types without actually editing it, or to
+# make certain files to be certain types.
+#
+# For example, the PHP module (not part of the Apache distribution)
+# will typically use:
+#
+#AddType application/x-httpd-php .php
+
+#
+# AddHandler allows you to map certain file extensions to "handlers",
+# actions unrelated to filetype. These can be either built into the server
+# or added with the Action command (see below)
+#
+# If you want to use server side includes, or CGI outside
+# ScriptAliased directories, uncomment the following lines.
+#
+# To use CGI scripts:
+#
+#AddHandler cgi-script .cgi
+
+#
+# To use server-parsed HTML files
+#
+#AddType text/html .shtml
+#AddHandler server-parsed .shtml
+
+#
+# Uncomment the following line to enable Apache's send-asis HTTP file
+# feature
+#
+#AddHandler send-as-is asis
+
+#
+# If you wish to use server-parsed imagemap files, use
+#
+#AddHandler imap-file map
+
+#
+# To enable type maps, you might want to use
+#
+#AddHandler type-map var
+
+#
+# Action lets you define media types that will execute a script whenever
+# a matching file is called. This eliminates the need for repeated URL
+# pathnames for oft-used CGI file processors.
+# Format: Action media/type /cgi-script/location
+# Format: Action handler-name /cgi-script/location
+#
+
+#
+# MetaDir: specifies the name of the directory in which Apache can find
+# meta information files. These files contain additional HTTP headers
+# to include when sending the document
+#
+#MetaDir .web
+
+#
+# MetaSuffix: specifies the file name suffix for the file containing the
+# meta information.
+#
+#MetaSuffix .meta
+
+#
+# Customizable error response (Apache style)
+# these come in three flavors
+#
+# 1) plain text
+#ErrorDocument 500 "The server made a boo boo.
+# n.b. the (") marks it as text, it does not get output
+#
+# 2) local redirects
+#ErrorDocument 404 /missing.html
+# to redirect to local URL /missing.html
+#ErrorDocument 404 /cgi-bin/missing_handler.pl
+# N.B.: You can redirect to a script or a document using server-side-includes.
+#
+# 3) external redirects
+#ErrorDocument 402 http://some.other_server.com/subscription_info.html
+# N.B.: Many of the environment variables associated with the original
+# request will *not* be available to such a script.
+
+#
+# The following directives modify normal HTTP response behavior.
+# The first directive disables keepalive for Netscape 2.x and browsers that
+# spoof it. There are known problems with these browser implementations.
+# The second directive is for Microsoft Internet Explorer 4.0b2
+# which has a broken HTTP/1.1 implementation and does not properly
+# support keepalive when it is used on 301 or 302 (redirect) responses.
+#
+BrowserMatch "Mozilla/2" nokeepalive
+BrowserMatch "MSIE 4\.0b2;" nokeepalive downgrade-1.0 force-response-1.0
+
+#
+# The following directive disables HTTP/1.1 responses to browsers which
+# are in violation of the HTTP/1.0 spec by not being able to grok a
+# basic 1.1 response.
+#
+BrowserMatch "RealPlayer 4\.0" force-response-1.0
+BrowserMatch "Java/1\.0" force-response-1.0
+BrowserMatch "JDK/1\.0" force-response-1.0
+
+#
+# Allow server status reports, with the URL of http://servername/server-status
+# Change the ".your_domain.com" to match your domain to enable.
+#
+#<Location /server-status>
+# SetHandler server-status
+# Order deny,allow
+# Deny from all
+# Allow from .your_domain.com
+#</Location>
+
+#
+# Allow remote server configuration reports, with the URL of
+# http://servername/server-info (requires that mod_info.c be loaded).
+# Change the ".your_domain.com" to match your domain to enable.
+#
+#<Location /server-info>
+# SetHandler server-info
+# Order deny,allow
+# Deny from all
+# Allow from .your_domain.com
+#</Location>
+
+#
+# There have been reports of people trying to abuse an old bug from pre-1.1
+# days. This bug involved a CGI script distributed as a part of Apache.
+# By uncommenting these lines you can redirect these attacks to a logging
+# script on phf.apache.org. Or, you can record them yourself, using the script
+# support/phf_abuse_log.cgi.
+#
+#<Location /cgi-bin/phf*>
+# Deny from all
+# ErrorDocument 403 http://phf.apache.org/phf_abuse_log.cgi
+#</Location>
+
+#
+# Proxy Server directives. Uncomment the following lines to
+# enable the proxy server:
+#
+#<IfModule mod_proxy.c>
+#ProxyRequests On
+#
+#<Directory proxy:*>
+# Order deny,allow
+# Deny from all
+# Allow from .your_domain.com
+#</Directory>
+
+#
+# Enable/disable the handling of HTTP/1.1 "Via:" headers.
+# ("Full" adds the server version; "Block" removes all outgoing Via: headers)
+# Set to one of: Off | On | Full | Block
+#
+#ProxyVia On
+
+#
+# To enable the cache as well, edit and uncomment the following lines:
+# (no cacheing without CacheRoot)
+#
+#CacheRoot "/var/www/proxy"
+#CacheSize 5
+#CacheGcInterval 4
+#CacheMaxExpire 24
+#CacheLastModifiedFactor 0.1
+#CacheDefaultExpire 1
+#NoCache a_domain.com another_domain.edu joes.garage_sale.com
+
+#</IfModule>
+# End of proxy directives.
+
+### Section 3: Virtual Hosts
+#
+# VirtualHost: If you want to maintain multiple domains/hostnames on your
+# machine you can setup VirtualHost containers for them.
+# Please see the documentation at <URL:http://www.apache.org/docs/vhosts/>
+# for further details before you try to setup virtual hosts.
+# You may use the command line option '-S' to verify your virtual host
+# configuration.
+
+#
+# If you want to use name-based virtual hosts you need to define at
+# least one IP address (and port number) for them.
+#
+#NameVirtualHost 12.34.56.78:80
+#NameVirtualHost 12.34.56.78
+
+#
+# VirtualHost example:
+# Almost any Apache directive may go into a VirtualHost container.
+#
+#<VirtualHost ip.address.of.host.some_domain.com>
+# ServerAdmin webmaster@host.some_domain.com
+# DocumentRoot /www/docs/host.some_domain.com
+# ServerName host.some_domain.com
+# ErrorLog logs/host.some_domain.com-error_log
+# CustomLog logs/host.some_domain.com-access_log common
+#</VirtualHost>
+
+#<VirtualHost _default_:*>
+#</VirtualHost>
+
+
+##
+## SSL Global Context
+##
+## All SSL configuration in this context applies both to
+## the main server and all SSL-enabled virtual hosts.
+##
+
+#
+# Some MIME-types for downloading Certificates and CRLs
+#
+<IfDefine SSL>
+AddType application/x-x509-ca-cert .crt
+AddType application/x-pkcs7-crl .crl
+</IfDefine>
+
+<IfModule mod_ssl.c>
+
+# Pass Phrase Dialog:
+# Configure the pass phrase gathering process.
+# The filtering dialog program (`builtin' is a internal
+# terminal dialog) has to provide the pass phrase on stdout.
+SSLPassPhraseDialog builtin
+
+# Inter-Process Session Cache:
+# Configure the SSL Session Cache: First either `none'
+# or `dbm:/path/to/file' for the mechanism to use and
+# second the expiring timeout (in seconds).
+SSLSessionCache dbm:logs/ssl_scache
+SSLSessionCacheTimeout 300
+
+# Semaphore:
+# Configure the path to the mutual exclusion semaphore the
+# SSL engine uses internally for inter-process synchronization.
+SSLMutex sem
+
+# Pseudo Random Number Generator (PRNG):
+# Configure one or more sources to seed the PRNG of the
+# SSL library. The seed data should be of good random quality.
+SSLRandomSeed startup builtin
+SSLRandomSeed connect builtin
+#SSLRandomSeed startup file:/dev/random 512
+#SSLRandomSeed startup file:/dev/urandom 512
+#SSLRandomSeed connect file:/dev/random 512
+#SSLRandomSeed connect file:/dev/urandom 512
+SSLRandomSeed startup file:/dev/arandom 512
+
+# Logging:
+# The home of the dedicated SSL protocol logfile. Errors are
+# additionally duplicated in the general error log file. Put
+# this somewhere where it cannot be used for symlink attacks on
+# a real server (i.e. somewhere where only root can write).
+# Log levels are (ascending order: higher ones include lower ones):
+# none, error, warn, info, trace, debug.
+SSLLog logs/ssl_engine_log
+SSLLogLevel info
+
+</IfModule>
+
+<IfDefine SSL>
+
+##
+## SSL Virtual Host Context
+##
+
+<VirtualHost _default_:443>
+
+# General setup for the virtual host
+DocumentRoot /var/www/htdocs
+ServerName new.host.name
+ServerAdmin you@your.address
+ErrorLog logs/error_log
+TransferLog logs/access_log
+
+# SSL Engine Switch:
+# Enable/Disable SSL for this virtual host.
+SSLEngine on
+
+# SSL Cipher Suite:
+# List the ciphers that the client is permitted to negotiate.
+# See the mod_ssl documentation for a complete list.
+#SSLCipherSuite ALL:!ADH:RC4+RSA:+HIGH:+MEDIUM:+LOW:+SSLv2:+EXP
+
+# Server Certificate:
+# Point SSLCertificateFile at a PEM encoded certificate. If
+# the certificate is encrypted, then you will be prompted for a
+# pass phrase. Note that a kill -HUP will prompt again. A test
+# certificate can be generated with `make certificate' under
+# built time.
+SSLCertificateFile /etc/ssl/server.crt
+
+# Server Private Key:
+# If the key is not combined with the certificate, use this
+# directive to point at the key file.
+SSLCertificateKeyFile /etc/ssl/private/server.key
+
+# Certificate Authority (CA):
+# Set the CA certificate verification path where to find CA
+# certificates for client authentication or alternatively one
+# huge file containing all of them (file must be PEM encoded)
+# Note: Inside SSLCACertificatePath you need hash symlinks
+# to point to the certificate files. Use the provided
+# Makefile to update the hash symlinks after changes.
+#SSLCACertificatePath /var/www/conf/ssl.crt
+#SSLCACertificateFile /var/www/conf/ssl.crt/ca-bundle.crt
+
+# Client Authentication (Type):
+# Client certificate verification type and depth. Types are
+# none, optional, require and optional_no_ca. Depth is a
+# number which specifies how deeply to verify the certificate
+# issuer chain before deciding the certificate is not valid.
+#SSLVerifyClient require
+#SSLVerifyDepth 10
+
+# Access Control:
+# With SSLRequire you can do per-directory access control based
+# on arbitrary complex boolean expressions containing server
+# variable checks and other lookup directives. The syntax is a
+# mixture between C and Perl. See the mod_ssl documentation
+# for more details.
+#<Location />
+#SSLRequire ( %{SSL_CIPHER} !~ m/^(EXP|NULL)-/ \
+# and %{SSL_CLIENT_S_DN_O} eq "Snake Oil, Ltd." \
+# and %{SSL_CLIENT_S_DN_OU} in {"Staff", "CA", "Dev"} \
+# and %{TIME_WDAY} >= 1 and %{TIME_WDAY} <= 5 \
+# and %{TIME_HOUR} >= 8 and %{TIME_HOUR} <= 20 ) \
+# or %{REMOTE_ADDR} =~ m/^192\.76\.162\.[0-9]+$/
+#</Location>
+
+# SSL Engine Options:
+# Set various options for the SSL engine.
+# FakeBasicAuth:
+# Translate the client X.509 into a Basic Authorisation. This means that
+# the standard Auth/DBMAuth methods can be used for access control. The
+# user name is the `one line' version of the client's X.509 certificate.
+# Note that no password is obtained from the user. Every entry in the user
+# file needs this password: `xxj31ZMTZzkVA'.
+# ExportCertData:
+# This exports two additional environment variables: SSL_CLIENT_CERT and
+# SSL_SERVER_CERT. These contain the PEM-encoded certificates of the
+# server (always existing) and the client (only existing when client
+# authentication is used). This can be used to import the certificates
+# into CGI scripts.
+# CompatEnvVars:
+# This exports obsolete environment variables for backward compatibility
+# to Apache-SSL 1.x, mod_ssl 2.0.x, Sioux 1.0 and Stronghold 2.x. Use this
+# to provide compatibility to existing CGI scripts.
+#SSLOptions +FakeBasicAuth +ExportCertData +CompatEnvVars
+
+# Per-Server Logging:
+# The home of a custom SSL log file. Use this when you want a
+# compact non-error SSL logfile on a virtual host basis.
+CustomLog logs/ssl_request_log \
+ "%t %h %{SSL_PROTOCOL}x %{SSL_CIPHER}x \"%r\" %b"
+
+</VirtualHost>
+
+</IfDefine>
+
+# include additional things
+Include conf.d/*.conf
+Include vhosts.d/*.conf
diff --git a/puppet/modules/apache/files/include.d/defaults.inc b/puppet/modules/apache/files/include.d/defaults.inc
new file mode 100644
index 00000000..3e5e7d73
--- /dev/null
+++ b/puppet/modules/apache/files/include.d/defaults.inc
@@ -0,0 +1,5 @@
+RewriteEngine on
+RewriteCond %{REQUEST_METHOD} ^(TRACE|TRACK)
+RewriteRule .* - [F]
+
+ServerSignature Off
diff --git a/puppet/modules/apache/files/include.d/joomla.inc b/puppet/modules/apache/files/include.d/joomla.inc
new file mode 100644
index 00000000..1535ce37
--- /dev/null
+++ b/puppet/modules/apache/files/include.d/joomla.inc
@@ -0,0 +1,30 @@
+########## Begin - Rewrite rules to block out some common exploits
+# against joomla's
+#
+# Block out any script trying to set a mosConfig value through the URL
+RewriteCond %{QUERY_STRING} mosConfig_[a-zA-Z_]{1,21}(=|\%3D) [OR]
+
+# Block out any script trying to base64_encode crap to send via URL
+RewriteCond %{QUERY_STRING} base64_encode.*\(.*\) [OR]
+
+# Block out any script that includes a <script> tag in URL
+RewriteCond %{QUERY_STRING} (\<|%3C).*script.*(\>|%3E) [NC,OR]
+
+# Block out any script trying to set a PHP GLOBALS variable via URL
+RewriteCond %{QUERY_STRING} GLOBALS(=|\[|\%[0-9A-Z]{0,2}) [OR]
+
+# Block out any script trying to modify a _REQUEST variable via URL
+RewriteCond %{QUERY_STRING} _REQUEST(=|\[|\%[0-9A-Z]{0,2}) [OR]
+
+# Block out any script that tries to set CONFIG_EXT (com_extcal2 issue)
+RewriteCond %{QUERY_STRING} CONFIG_EXT(\[|\%20|\%5B).*= [NC,OR]
+
+# Block out any script that tries to set sbp or sb_authorname via URL (simpleboard)
+RewriteCond %{QUERY_STRING} sbp(=|\%20|\%3D) [OR]
+RewriteCond %{QUERY_STRING} sb_authorname(=|\%20|\%3D)
+
+# Send all blocked request to homepage with 403 Forbidden error!
+RewriteRule ^(.*)$ index.php [F,L]
+#
+########## End - Rewrite rules to block out some common exploits
+
diff --git a/puppet/modules/apache/files/include.d/silverstripe.inc b/puppet/modules/apache/files/include.d/silverstripe.inc
new file mode 100644
index 00000000..40c44e46
--- /dev/null
+++ b/puppet/modules/apache/files/include.d/silverstripe.inc
@@ -0,0 +1,17 @@
+# silverstripe .htaccess
+<Files *.ss>
+ Order deny,allow
+ Deny from all
+ #Allow from 127.0.0.1
+</Files>
+
+<IfModule mod_rewrite.c>
+ RewriteEngine On
+ #RewriteBase /
+
+ RewriteCond %{REQUEST_URI} !(\.gif$)|(\.jpg$)|(\.png$)|(\.css$)|(\.js$)
+
+ RewriteCond %{REQUEST_URI} ^(.*)$
+ RewriteCond %{REQUEST_FILENAME} !-f
+ RewriteRule .* sapphire/main.php?url=%1&%{QUERY_STRING} [L]
+</IfModule>
diff --git a/puppet/modules/apache/files/itk_plus/conf.d/CentOS/ssl.conf b/puppet/modules/apache/files/itk_plus/conf.d/CentOS/ssl.conf
new file mode 100644
index 00000000..fb0c915a
--- /dev/null
+++ b/puppet/modules/apache/files/itk_plus/conf.d/CentOS/ssl.conf
@@ -0,0 +1,75 @@
+#
+# This is the Apache server configuration file providing SSL support.
+# It contains the configuration directives to instruct the server how to
+# serve pages over an https connection. For detailing information about these
+# directives see <URL:http://httpd.apache.org/docs/2.2/mod/mod_ssl.html>
+#
+# Do NOT simply read the instructions in here without understanding
+# what they do. They're here only as hints or reminders. If you are unsure
+# consult the online docs. You have been warned.
+#
+
+LoadModule ssl_module modules/mod_ssl.so
+
+#
+# When we also provide SSL we have to listen to the
+# the HTTPS port in addition.
+#
+NameVirtualHost *:443
+
+##
+## SSL Global Context
+##
+## All SSL configuration in this context applies both to
+## the main server and all SSL-enabled virtual hosts.
+##
+
+#
+# Some MIME-types for downloading Certificates and CRLs
+#
+AddType application/x-x509-ca-cert .crt
+AddType application/x-pkcs7-crl .crl
+
+# Pass Phrase Dialog:
+# Configure the pass phrase gathering process.
+# The filtering dialog program (`builtin' is a internal
+# terminal dialog) has to provide the pass phrase on stdout.
+SSLPassPhraseDialog builtin
+
+# Inter-Process Session Cache:
+# Configure the SSL Session Cache: First the mechanism
+# to use and second the expiring timeout (in seconds).
+#SSLSessionCache dc:UNIX:/var/cache/mod_ssl/distcache
+SSLSessionCache shmcb:/var/cache/mod_ssl/scache(512000)
+SSLSessionCacheTimeout 300
+
+# Semaphore:
+# Configure the path to the mutual exclusion semaphore the
+# SSL engine uses internally for inter-process synchronization.
+SSLMutex default
+
+# Pseudo Random Number Generator (PRNG):
+# Configure one or more sources to seed the PRNG of the
+# SSL library. The seed data should be of good random quality.
+# WARNING! On some platforms /dev/random blocks if not enough entropy
+# is available. This means you then cannot use the /dev/random device
+# because it would lead to very long connection times (as long as
+# it requires to make more entropy available). But usually those
+# platforms additionally provide a /dev/urandom device which doesn't
+# block. So, if available, use this one instead. Read the mod_ssl User
+# Manual for more details.
+SSLRandomSeed startup file:/dev/urandom 256
+SSLRandomSeed connect builtin
+#SSLRandomSeed startup file:/dev/random 512
+#SSLRandomSeed connect file:/dev/random 512
+#SSLRandomSeed connect file:/dev/urandom 512
+
+#
+# Use "SSLCryptoDevice" to enable any supported hardware
+# accelerators. Use "openssl engine -v" to list supported
+# engine names. NOTE: If you enable an accelerator and the
+# server does not start, consult the error logs and ensure
+# your accelerator is functioning properly.
+#
+SSLCryptoDevice builtin
+#SSLCryptoDevice ubsec
diff --git a/puppet/modules/apache/files/modules.d/Gentoo/00_default_settings.conf b/puppet/modules/apache/files/modules.d/Gentoo/00_default_settings.conf
new file mode 100644
index 00000000..5315fcb7
--- /dev/null
+++ b/puppet/modules/apache/files/modules.d/Gentoo/00_default_settings.conf
@@ -0,0 +1,105 @@
+# This configuration file reflects default settings for Apache HTTP Server.
+# You may change these, but chances are that you may not need to.
+
+# Timeout: The number of seconds before receives and sends time out.
+Timeout 300
+
+# KeepAlive: Whether or not to allow persistent connections (more than
+# one request per connection). Set to "Off" to deactivate.
+KeepAlive On
+
+# MaxKeepAliveRequests: The maximum number of requests to allow
+# during a persistent connection. Set to 0 to allow an unlimited amount.
+# We recommend you leave this number high, for maximum performance.
+MaxKeepAliveRequests 100
+
+# KeepAliveTimeout: Number of seconds to wait for the next request from the
+# same client on the same connection.
+KeepAliveTimeout 15
+
+# UseCanonicalName: Determines how Apache constructs self-referencing
+# URLs and the SERVER_NAME and SERVER_PORT variables.
+# When set "Off", Apache will use the Hostname and Port supplied
+# by the client. When set "On", Apache will use the value of the
+# ServerName directive.
+UseCanonicalName Off
+
+# AccessFileName: The name of the file to look for in each directory
+# for additional configuration directives. See also the AllowOverride
+# directive.
+AccessFileName .htaccess
+
+# ServerTokens
+# This directive configures what you return as the Server HTTP response
+# Header. The default is 'Full' which sends information about the OS-Type
+# and compiled in modules.
+# Set to one of: Full | OS | Minor | Minimal | Major | Prod
+# where Full conveys the most information, and Prod the least.
+ServerTokens Prod
+
+# Optionally add a line containing the server version and virtual host
+# name to server-generated pages (internal error documents, FTP directory
+# listings, mod_status and mod_info output etc., but not CGI generated
+# documents or custom error documents).
+# Set to "EMail" to also include a mailto: link to the ServerAdmin.
+# Set to one of: On | Off | EMail
+ServerSignature Off
+
+# HostnameLookups: Log the names of clients or just their IP addresses
+# e.g., www.apache.org (on) or 204.62.129.132 (off).
+# The default is off because it'd be overall better for the net if people
+# had to knowingly turn this feature on, since enabling it means that
+# each client request will result in AT LEAST one lookup request to the
+# nameserver.
+HostnameLookups Off
+
+# EnableMMAP and EnableSendfile: On systems that support it,
+# memory-mapping or the sendfile syscall is used to deliver
+# files. This usually improves server performance, but must
+# be turned off when serving from networked-mounted
+# filesystems or if support for these functions is otherwise
+# broken on your system.
+#EnableMMAP off
+#EnableSendfile off
+
+# ErrorLog: The location of the error log file.
+# If you do not specify an ErrorLog directive within a <VirtualHost>
+# container, error messages relating to that virtual host will be
+# logged here. If you *do* define an error logfile for a <VirtualHost>
+# container, that host's errors will be logged there and not here.
+ErrorLog /var/log/apache2/error_log
+
+# LogLevel: Control the number of messages logged to the error_log.
+# Possible values include: debug, info, notice, warn, error, crit,
+# alert, emerg.
+LogLevel warn
+
+# We configure the "default" to be a very restrictive set of features.
+<Directory />
+ Options FollowSymLinks
+ AllowOverride None
+ Order deny,allow
+ Deny from all
+</Directory>
+
+# DirectoryIndex: sets the file that Apache will serve if a directory
+# is requested.
+#
+# The index.html.var file (a type-map) is used to deliver content-
+# negotiated documents. The MultiViews Options can be used for the
+# same purpose, but it is much slower.
+#
+# To add files to that list use AddDirectoryIndex in a custom config
+# file. Do not change this entry unless you know what you are doing.
+<IfModule dir_module>
+ DirectoryIndex index.html index.html.var
+</IfModule>
+
+# The following lines prevent .htaccess and .htpasswd files from being
+# viewed by Web clients.
+<FilesMatch "^\.ht">
+ Order allow,deny
+ Deny from all
+</FilesMatch>
+
+# vim: ts=4 filetype=apache
diff --git a/puppet/modules/apache/files/modules.d/Gentoo/00_error_documents.conf b/puppet/modules/apache/files/modules.d/Gentoo/00_error_documents.conf
new file mode 100644
index 00000000..90900269
--- /dev/null
+++ b/puppet/modules/apache/files/modules.d/Gentoo/00_error_documents.conf
@@ -0,0 +1,66 @@
+# The configuration below implements multi-language error documents through
+# content-negotiation.
+
+# Customizable error responses come in three flavors:
+# 1) plain text 2) local redirects 3) external redirects
+# Some examples:
+#ErrorDocument 500 "The server made a boo boo."
+#ErrorDocument 404 /missing.html
+#ErrorDocument 404 "/cgi-bin/missing_handler.pl"
+#ErrorDocument 402 http://www.example.com/subscription_info.html
+
+# Required modules: mod_alias, mod_include, mod_negotiation
+# We use Alias to redirect any /error/HTTP_<error>.html.var response to
+# our collection of by-error message multi-language collections. We use
+# includes to substitute the appropriate text.
+# You can modify the messages' appearance without changing any of the
+# default HTTP_<error>.html.var files by adding the line:
+# Alias /error/include/ "/your/include/path/"
+# which allows you to create your own set of files by starting with the
+# /var/www/localhost/error/include/ files and copying them to /your/include/path/,
+# even on a per-VirtualHost basis. The default include files will display
+# your Apache version number and your ServerAdmin email address regardless
+# of the setting of ServerSignature.
+
+<IfDefine ERRORDOCS>
+<IfModule alias_module>
+<IfModule mime_module>
+<IfModule negotiation_module>
+
+Alias /error/ "/var/www/localhost/error/"
+
+<Directory "/var/www/localhost/error">
+ AllowOverride None
+ Options IncludesNoExec
+ AddOutputFilter Includes html
+ AddHandler type-map var
+ Order allow,deny
+ Allow from all
+ LanguagePriority en cs de es fr it ja ko nl pl pt-br ro sv tr
+ ForceLanguagePriority Prefer Fallback
+</Directory>
+
+ErrorDocument 400 /error/HTTP_BAD_REQUEST.html.var
+ErrorDocument 401 /error/HTTP_UNAUTHORIZED.html.var
+ErrorDocument 403 /error/HTTP_FORBIDDEN.html.var
+ErrorDocument 404 /error/HTTP_NOT_FOUND.html.var
+ErrorDocument 405 /error/HTTP_METHOD_NOT_ALLOWED.html.var
+ErrorDocument 408 /error/HTTP_REQUEST_TIME_OUT.html.var
+ErrorDocument 410 /error/HTTP_GONE.html.var
+ErrorDocument 411 /error/HTTP_LENGTH_REQUIRED.html.var
+ErrorDocument 412 /error/HTTP_PRECONDITION_FAILED.html.var
+ErrorDocument 413 /error/HTTP_REQUEST_ENTITY_TOO_LARGE.html.var
+ErrorDocument 414 /error/HTTP_REQUEST_URI_TOO_LARGE.html.var
+ErrorDocument 415 /error/HTTP_UNSUPPORTED_MEDIA_TYPE.html.var
+ErrorDocument 500 /error/HTTP_INTERNAL_SERVER_ERROR.html.var
+ErrorDocument 501 /error/HTTP_NOT_IMPLEMENTED.html.var
+ErrorDocument 502 /error/HTTP_BAD_GATEWAY.html.var
+ErrorDocument 503 /error/HTTP_SERVICE_UNAVAILABLE.html.var
+ErrorDocument 506 /error/HTTP_VARIANT_ALSO_VARIES.html.var
+
+</IfModule>
+</IfModule>
+</IfModule>
+</IfDefine>
+
+# vim: ts=4 filetype=apache
diff --git a/puppet/modules/apache/files/modules.d/Gentoo/00_languages.conf b/puppet/modules/apache/files/modules.d/Gentoo/00_languages.conf
new file mode 100644
index 00000000..287f6544
--- /dev/null
+++ b/puppet/modules/apache/files/modules.d/Gentoo/00_languages.conf
@@ -0,0 +1,137 @@
+# Settings for hosting different languages.
+<IfDefine LANGUAGE>
+<IfModule mime_module>
+<IfModule negotiation_module>
+# DefaultLanguage and AddLanguage allows you to specify the language of
+# a document. You can then use content negotiation to give a browser a
+# file in a language the user can understand.
+#
+# Specify a default language. This means that all data
+# going out without a specific language tag (see below) will
+# be marked with this one. You probably do NOT want to set
+# this unless you are sure it is correct for all cases.
+#
+# It is generally better to not mark a page as
+# being a certain language than marking it with the wrong
+# language!
+#
+# DefaultLanguage nl
+#
+# Note 1: The suffix does not have to be the same as the language
+# keyword --- those with documents in Polish (whose net-standard
+# language code is pl) may wish to use "AddLanguage pl .po" to
+# avoid the ambiguity with the common suffix for perl scripts.
+#
+# Note 2: The example entries below illustrate that in some cases
+# the two character 'Language' abbreviation is not identical to
+# the two character 'Country' code for its country,
+# E.g. 'Danmark/dk' versus 'Danish/da'.
+#
+# Note 3: In the case of 'ltz' we violate the RFC by using a three char
+# specifier. There is 'work in progress' to fix this and get
+# the reference data for rfc1766 cleaned up.
+#
+# Catalan (ca) - Croatian (hr) - Czech (cs) - Danish (da) - Dutch (nl)
+# English (en) - Esperanto (eo) - Estonian (et) - French (fr) - German (de)
+# Greek-Modern (el) - Hebrew (he) - Italian (it) - Japanese (ja)
+# Korean (ko) - Luxembourgeois* (ltz) - Norwegian Nynorsk (nn)
+# Norwegian (no) - Polish (pl) - Portugese (pt)
+# Brazilian Portuguese (pt-BR) - Russian (ru) - Swedish (sv)
+# Simplified Chinese (zh-CN) - Spanish (es) - Traditional Chinese (zh-TW)
+AddLanguage ca .ca
+AddLanguage cs .cz .cs
+AddLanguage da .dk
+AddLanguage de .de
+AddLanguage el .el
+AddLanguage en .en
+AddLanguage eo .eo
+AddLanguage es .es
+AddLanguage et .et
+AddLanguage fr .fr
+AddLanguage he .he
+AddLanguage hr .hr
+AddLanguage it .it
+AddLanguage ja .ja
+AddLanguage ko .ko
+AddLanguage ltz .ltz
+AddLanguage nl .nl
+AddLanguage nn .nn
+AddLanguage no .no
+AddLanguage pl .po
+AddLanguage pt .pt
+AddLanguage pt-BR .pt-br
+AddLanguage ru .ru
+AddLanguage sv .sv
+AddLanguage zh-CN .zh-cn
+AddLanguage zh-TW .zh-tw
+
+# LanguagePriority allows you to give precedence to some languages
+# in case of a tie during content negotiation.
+#
+# Just list the languages in decreasing order of preference. We have
+# more or less alphabetized them here. You probably want to change this.
+LanguagePriority en ca cs da de el eo es et fr he hr it ja ko ltz nl nn no pl pt pt-BR ru sv zh-CN zh-TW
+
+# ForceLanguagePriority allows you to serve a result page rather than
+# MULTIPLE CHOICES (Prefer) [in case of a tie] or NOT ACCEPTABLE (Fallback)
+# [in case no accepted languages matched the available variants]
+ForceLanguagePriority Prefer Fallback
+
+# Commonly used filename extensions to character sets. You probably
+# want to avoid clashes with the language extensions, unless you
+# are good at carefully testing your setup after each change.
+# See http://www.iana.org/assignments/character-sets for the
+# official list of charset names and their respective RFCs.
+AddCharset us-ascii.ascii .us-ascii
+AddCharset ISO-8859-1 .iso8859-1 .latin1
+AddCharset ISO-8859-2 .iso8859-2 .latin2 .cen
+AddCharset ISO-8859-3 .iso8859-3 .latin3
+AddCharset ISO-8859-4 .iso8859-4 .latin4
+AddCharset ISO-8859-5 .iso8859-5 .cyr .iso-ru
+AddCharset ISO-8859-6 .iso8859-6 .arb .arabic
+AddCharset ISO-8859-7 .iso8859-7 .grk .greek
+AddCharset ISO-8859-8 .iso8859-8 .heb .hebrew
+AddCharset ISO-8859-9 .iso8859-9 .latin5 .trk
+AddCharset ISO-8859-10 .iso8859-10 .latin6
+AddCharset ISO-8859-13 .iso8859-13
+AddCharset ISO-8859-14 .iso8859-14 .latin8
+AddCharset ISO-8859-15 .iso8859-15 .latin9
+AddCharset ISO-8859-16 .iso8859-16 .latin10
+AddCharset ISO-2022-JP .iso2022-jp .jis
+AddCharset ISO-2022-KR .iso2022-kr .kis
+AddCharset ISO-2022-CN .iso2022-cn .cis
+AddCharset Big5.Big5 .big5 .b5
+AddCharset cn-Big5 .cn-big5
+# For russian, more than one charset is used (depends on client, mostly):
+AddCharset WINDOWS-1251 .cp-1251 .win-1251
+AddCharset CP866 .cp866
+AddCharset KOI8 .koi8
+AddCharset KOI8-E .koi8-e
+AddCharset KOI8-r .koi8-r .koi8-ru
+AddCharset KOI8-U .koi8-u
+AddCharset KOI8-ru .koi8-uk .ua
+AddCharset ISO-10646-UCS-2 .ucs2
+AddCharset ISO-10646-UCS-4 .ucs4
+AddCharset UTF-7 .utf7
+AddCharset UTF-8 .utf8
+AddCharset UTF-16 .utf16
+AddCharset UTF-16BE .utf16be
+AddCharset UTF-16LE .utf16le
+AddCharset UTF-32 .utf32
+AddCharset UTF-32BE .utf32be
+AddCharset UTF-32LE .utf32le
+AddCharset euc-cn .euc-cn
+AddCharset euc-gb .euc-gb
+AddCharset euc-jp .euc-jp
+AddCharset euc-kr .euc-kr
+# Not sure how euc-tw got in - IANA doesn't list it???
+AddCharset EUC-TW .euc-tw
+AddCharset gb2312 .gb2312 .gb
+AddCharset iso-10646-ucs-2 .ucs-2 .iso-10646-ucs-2
+AddCharset iso-10646-ucs-4 .ucs-4 .iso-10646-ucs-4
+AddCharset shift_jis .shift_jis .sjis
+</IfModule>
+</IfModule>
+</IfDefine>
+
+# vim: ts=4 filetype=apache
diff --git a/puppet/modules/apache/files/modules.d/Gentoo/00_mod_autoindex.conf b/puppet/modules/apache/files/modules.d/Gentoo/00_mod_autoindex.conf
new file mode 100644
index 00000000..2512357d
--- /dev/null
+++ b/puppet/modules/apache/files/modules.d/Gentoo/00_mod_autoindex.conf
@@ -0,0 +1,83 @@
+<IfModule autoindex_module>
+<IfModule alias_module>
+# We include the /icons/ alias for FancyIndexed directory listings. If
+# you do not use FancyIndexing, you may comment this out.
+Alias /icons/ "/var/www/localhost/icons/"
+
+<Directory "/var/www/localhost/icons">
+ Options Indexes MultiViews
+ AllowOverride None
+ Order allow,deny
+ Allow from all
+</Directory>
+</IfModule>
+
+# Directives controlling the display of server-generated directory listings.
+#
+# To see the listing of a directory, the Options directive for the
+# directory must include "Indexes", and the directory must not contain
+# a file matching those listed in the DirectoryIndex directive.
+
+# IndexOptions: Controls the appearance of server-generated directory
+# listings.
+IndexOptions FancyIndexing VersionSort
+
+# AddIcon* directives tell the server which icon to show for different
+# files or filename extensions. These are only displayed for
+# FancyIndexed directories.
+AddIconByEncoding (CMP,/icons/compressed.gif) x-compress x-gzip
+
+AddIconByType (TXT,/icons/text.gif) text/*
+AddIconByType (IMG,/icons/image2.gif) image/*
+AddIconByType (SND,/icons/sound2.gif) audio/*
+AddIconByType (VID,/icons/movie.gif) video/*
+
+AddIcon /icons/binary.gif .bin .exe
+AddIcon /icons/binhex.gif .hqx
+AddIcon /icons/tar.gif .tar
+AddIcon /icons/world2.gif .wrl .wrl.gz .vrml .vrm .iv
+AddIcon /icons/compressed.gif .Z .z .tgz .gz .zip
+AddIcon /icons/a.gif .ps .ai .eps
+AddIcon /icons/layout.gif .html .shtml .htm .pdf
+AddIcon /icons/text.gif .txt
+AddIcon /icons/c.gif .c
+AddIcon /icons/p.gif .pl .py
+AddIcon /icons/f.gif .for
+AddIcon /icons/dvi.gif .dvi
+AddIcon /icons/uuencoded.gif .uu
+AddIcon /icons/script.gif .conf .sh .shar .csh .ksh .tcl
+AddIcon /icons/tex.gif .tex
+AddIcon /icons/bomb.gif core
+
+AddIcon /icons/back.gif ..
+AddIcon /icons/hand.right.gif README
+AddIcon /icons/folder.gif ^^DIRECTORY^^
+AddIcon /icons/blank.gif ^^BLANKICON^^
+
+# DefaultIcon is which icon to show for files which do not have an icon
+# explicitly set.
+DefaultIcon /icons/unknown.gif
+
+# AddDescription allows you to place a short description after a file in
+# server-generated indexes. These are only displayed for FancyIndexed
+# directories.
+# Format: AddDescription "description" filename
+
+#AddDescription "GZIP compressed document" .gz
+#AddDescription "tar archive" .tar
+#AddDescription "GZIP compressed tar archive" .tgz
+
+# ReadmeName is the name of the README file the server will look for by
+# default, and append to directory listings.
+
+# HeaderName is the name of a file which should be prepended to
+# directory indexes.
+ReadmeName README.html
+HeaderName HEADER.html
+
+# IndexIgnore is a set of filenames which directory indexing should ignore
+# and not include in the listing. Shell-style wildcarding is permitted.
+IndexIgnore .??* *~ *# HEADER* README* RCS CVS *,v *,t
+</IfModule>
+
+# vim: ts=4 filetype=apache
diff --git a/puppet/modules/apache/files/modules.d/Gentoo/00_mod_info.conf b/puppet/modules/apache/files/modules.d/Gentoo/00_mod_info.conf
new file mode 100644
index 00000000..53fd7aea
--- /dev/null
+++ b/puppet/modules/apache/files/modules.d/Gentoo/00_mod_info.conf
@@ -0,0 +1,14 @@
+<IfDefine INFO>
+<IfModule info_module>
+# Allow remote server configuration reports, with the URL of
+# http://servername/server-info
+<Location /server-info>
+ SetHandler server-info
+ Order deny,allow
+ Deny from all
+ Allow from 127.0.0.1
+</Location>
+</IfModule>
+</IfDefine>
+
+# vim: ts=4 filetype=apache
diff --git a/puppet/modules/apache/files/modules.d/Gentoo/00_mod_log_config.conf b/puppet/modules/apache/files/modules.d/Gentoo/00_mod_log_config.conf
new file mode 100644
index 00000000..2f4244c9
--- /dev/null
+++ b/puppet/modules/apache/files/modules.d/Gentoo/00_mod_log_config.conf
@@ -0,0 +1,35 @@
+<IfModule log_config_module>
+# The following directives define some format nicknames for use with
+# a CustomLog directive (see below).
+LogFormat "%h %l %u %t \"%r\" %>s %b \"%{Referer}i\" \"%{User-Agent}i\"" combined
+LogFormat "%h %l %u %t \"%r\" %>s %b" common
+
+LogFormat "%{Referer}i -> %U" referer
+LogFormat "%{User-Agent}i" agent
+LogFormat "%v %h %l %u %t \"%r\" %>s %b %T" script
+LogFormat "%v %h %l %u %t \"%r\" %>s %b \"%{Referer}i\" \"%{User-Agent}i VLOG=%{VLOG}e" vhost
+
+<IfModule logio_module>
+# You need to enable mod_logio.c to use %I and %O
+LogFormat "%h %l %u %t \"%r\" %>s %b \"%{Referer}i\" \"%{User-Agent}i\" %I %O" combinedio
+LogFormat "%v %h %l %u %t \"%r\" %>s %b \"%{Referer}i\" \"%{User-Agent}i\" %I %O" vhostio
+</IfModule>
+
+# The location and format of the access logfile (Common Logfile Format).
+# If you do not define any access logfiles within a <VirtualHost>
+# container, they will be logged here. Contrariwise, if you *do*
+# define per-<VirtualHost> access logfiles, transactions will be
+# logged therein and *not* in this file.
+CustomLog /var/log/apache2/access_log common
+
+# If you would like to have agent and referer logfiles,
+# uncomment the following directives.
+#CustomLog /var/log/apache2/referer_log referer
+#CustomLog /var/log/apache2/agent_logs agent
+
+# If you prefer a logfile with access, agent, and referer information
+# (Combined Logfile Format) you can use the following directive.
+#CustomLog /var/log/apache2/access_log combined
+</IfModule>
+
+# vim: ts=4 filetype=apache
diff --git a/puppet/modules/apache/files/modules.d/Gentoo/00_mod_mime.conf b/puppet/modules/apache/files/modules.d/Gentoo/00_mod_mime.conf
new file mode 100644
index 00000000..51f23d55
--- /dev/null
+++ b/puppet/modules/apache/files/modules.d/Gentoo/00_mod_mime.conf
@@ -0,0 +1,55 @@
+# DefaultType: the default MIME type the server will use for a document
+# if it cannot otherwise determine one, such as from filename extensions.
+# If your server contains mostly text or HTML documents, "text/plain" is
+# a good value. If most of your content is binary, such as applications
+# or images, you may want to use "application/octet-stream" instead to
+# keep browsers from trying to display binary files as though they are
+# text.
+DefaultType text/plain
+
+<IfModule mime_module>
+# TypesConfig points to the file containing the list of mappings from
+# filename extension to MIME-type.
+TypesConfig /etc/mime.types
+
+# AddType allows you to add to or override the MIME configuration
+# file specified in TypesConfig for specific file types.
+#AddType application/x-gzip .tgz
+
+# AddEncoding allows you to have certain browsers uncompress
+# information on the fly. Note: Not all browsers support this.
+#AddEncoding x-compress .Z
+#AddEncoding x-gzip .gz .tgz
+
+# If the AddEncoding directives above are commented-out, then you
+# probably should define those extensions to indicate media types:
+AddType application/x-compress .Z
+AddType application/x-gzip .gz .tgz
+
+# AddHandler allows you to map certain file extensions to "handlers":
+# actions unrelated to filetype. These can be either built into the server
+# or added with the Action directive (see below)
+
+# To use CGI scripts outside of ScriptAliased directories:
+# (You will also need to add "ExecCGI" to the "Options" directive.)
+#AddHandler cgi-script .cgi
+
+# For type maps (negotiated resources):
+#AddHandler type-map var
+
+# Filters allow you to process content before it is sent to the client.
+#
+# To parse .shtml files for server-side includes (SSI):
+# (You will also need to add "Includes" to the "Options" directive.)
+#AddType text/html .shtml
+#AddOutputFilter INCLUDES .shtml
+</IfModule>
+
+<IfModule mime_magic_module>
+# The mod_mime_magic module allows the server to use various hints from the
+# contents of the file itself to determine its type. The MIMEMagicFile
+# directive tells the module where the hint definitions are located.
+MIMEMagicFile /etc/apache2/magic
+</IfModule>
+
+# vim: ts=4 filetype=apache
diff --git a/puppet/modules/apache/files/modules.d/Gentoo/00_mod_status.conf b/puppet/modules/apache/files/modules.d/Gentoo/00_mod_status.conf
new file mode 100644
index 00000000..fa906766
--- /dev/null
+++ b/puppet/modules/apache/files/modules.d/Gentoo/00_mod_status.conf
@@ -0,0 +1,19 @@
+<IfDefine STATUS>
+<IfModule status_module>
+# Allow server status reports generated by mod_status,
+# with the URL of http://servername/server-status
+<Location /server-status>
+ SetHandler server-status
+ Order deny,allow
+ Deny from all
+ Allow from 127.0.0.1
+</Location>
+
+# ExtendedStatus controls whether Apache will generate "full" status
+# information (ExtendedStatus On) or just basic information (ExtendedStatus
+# Off) when the "server-status" handler is called.
+ExtendedStatus On
+</IfModule>
+</IfDefine>
+
+# vim: ts=4 filetype=apache
diff --git a/puppet/modules/apache/files/modules.d/Gentoo/00_mod_userdir.conf b/puppet/modules/apache/files/modules.d/Gentoo/00_mod_userdir.conf
new file mode 100644
index 00000000..3fb69117
--- /dev/null
+++ b/puppet/modules/apache/files/modules.d/Gentoo/00_mod_userdir.conf
@@ -0,0 +1,40 @@
+# Settings for user home directories
+
+<IfDefine USERDIR>
+<IfModule userdir_module>
+
+# UserDir: The name of the directory that is appended onto a user's home
+# directory if a ~user request is received. Note that you must also set
+# the default access control for these directories, as in the example below.
+UserDir public_html
+
+# Control access to UserDir directories. The following is an example
+# for a site where these directories are restricted to read-only.
+<Directory /home/*/public_html>
+ AllowOverride FileInfo AuthConfig Limit Indexes
+ Options MultiViews Indexes SymLinksIfOwnerMatch IncludesNoExec
+ <Limit GET POST OPTIONS>
+ Order allow,deny
+ Allow from all
+ </Limit>
+ <LimitExcept GET POST OPTIONS>
+ Order deny,allow
+ Deny from all
+ </LimitExcept>
+</Directory>
+
+# Suexec isn't really required to run cgi-scripts, but it's a really good
+# idea if you have multiple users serving websites...
+<IfDefine SUEXEC>
+<IfModule suexec_module>
+<Directory /home/*/public_html/cgi-bin>
+ Options ExecCGI
+ SetHandler cgi-script
+</Directory>
+</IfModule>
+</IfDefine>
+
+</IfModule>
+</IfDefine>
+
+# vim: ts=4 filetype=apache
diff --git a/puppet/modules/apache/files/modules.d/Gentoo/00_mpm.conf b/puppet/modules/apache/files/modules.d/Gentoo/00_mpm.conf
new file mode 100644
index 00000000..01833059
--- /dev/null
+++ b/puppet/modules/apache/files/modules.d/Gentoo/00_mpm.conf
@@ -0,0 +1,102 @@
+# Server-Pool Management (MPM specific)
+
+# PidFile: The file in which the server should record its process
+# identification number when it starts.
+#
+# Note that this is the default PidFile for most MPMs.
+PidFile /var/run/apache2.pid
+
+# The accept serialization lock file MUST BE STORED ON A LOCAL DISK.
+#LockFile /var/run/apache2.lock
+
+# Only one of the below sections will be relevant on your
+# installed httpd. Use "/usr/sbin/apache2 -l" to find out the
+# active mpm.
+
+# common MPM configuration
+# These configuration directives apply to all MPMs
+#
+# StartServers: Number of child server processes created at startup
+# MaxClients: Maximum number of child processes to serve requests
+# MaxRequestsPerChild: Limit on the number of requests that an individual child
+# server will handle during its life
+
+
+# prefork MPM
+# This is the default MPM if USE=-threads
+#
+# MinSpareServers: Minimum number of idle child server processes
+# MaxSpareServers: Maximum number of idle child server processes
+<IfModule mpm_prefork_module>
+ StartServers 5
+ MinSpareServers 5
+ MaxSpareServers 10
+ MaxClients 150
+ MaxRequestsPerChild 10000
+</IfModule>
+
+# worker MPM
+# This is the default MPM if USE=threads
+#
+# MinSpareThreads: Minimum number of idle threads available to handle request spikes
+# MaxSpareThreads: Maximum number of idle threads
+# ThreadsPerChild: Number of threads created by each child process
+<IfModule mpm_worker_module>
+ StartServers 2
+ MinSpareThreads 25
+ MaxSpareThreads 75
+ ThreadsPerChild 25
+ MaxClients 150
+ MaxRequestsPerChild 10000
+</IfModule>
+
+# event MPM
+#
+# MinSpareThreads: Minimum number of idle threads available to handle request spikes
+# MaxSpareThreads: Maximum number of idle threads
+# ThreadsPerChild: Number of threads created by each child process
+<IfModule mpm_event_module>
+ StartServers 2
+ MinSpareThreads 25
+ MaxSpareThreads 75
+ ThreadsPerChild 25
+ MaxClients 150
+ MaxRequestsPerChild 10000
+</IfModule>
+
+# peruser MPM
+#
+# MinSpareProcessors: Minimum number of idle child server processes
+# MinProcessors: Minimum number of processors per virtual host
+# MaxProcessors: Maximum number of processors per virtual host
+# ExpireTimeout: Maximum idle time before a child is killed, 0 to disable
+# Multiplexer: Specify a Multiplexer child configuration.
+# Processor: Specify a user and group for a specific child process
+<IfModule mpm_peruser_module>
+ MinSpareProcessors 2
+ MinProcessors 2
+ MaxProcessors 10
+ MaxClients 150
+ MaxRequestsPerChild 1000
+ ExpireTimeout 1800
+
+ # KeepAlive *MUST* be set to off
+ KeepAlive Off
+
+ Multiplexer nobody nobody
+ Processor apache apache
+</IfModule>
+
+# itk MPM
+#
+# MinSpareServers: Minimum number of idle child server processes
+# MaxSpareServers: Maximum number of idle child server processes
+<IfModule mpm_itk_module>
+ StartServers 5
+ MinSpareServers 5
+ MaxSpareServers 10
+ MaxClients 150
+ MaxRequestsPerChild 10000
+</IfModule>
+
+# vim: ts=4 filetype=apache
diff --git a/puppet/modules/apache/files/modules.d/Gentoo/10_mod_mem_cache.conf b/puppet/modules/apache/files/modules.d/Gentoo/10_mod_mem_cache.conf
new file mode 100644
index 00000000..ad7fa9e0
--- /dev/null
+++ b/puppet/modules/apache/files/modules.d/Gentoo/10_mod_mem_cache.conf
@@ -0,0 +1,10 @@
+<IfDefine MEM_CACHE>
+# 128MB cache for objects < 2MB
+CacheEnable mem /
+MCacheSize 131072
+MCacheMaxObjectCount 1000
+MCacheMinObjectSize 1
+MCacheMaxObjectSize 2048
+</IfDefine>
+
+# vim: ts=4 filetype=apache
diff --git a/puppet/modules/apache/files/modules.d/Gentoo/40_mod_ssl.conf b/puppet/modules/apache/files/modules.d/Gentoo/40_mod_ssl.conf
new file mode 100644
index 00000000..331783a6
--- /dev/null
+++ b/puppet/modules/apache/files/modules.d/Gentoo/40_mod_ssl.conf
@@ -0,0 +1,65 @@
+# Note: The following must must be present to support
+# starting without SSL on platforms with no /dev/random equivalent
+# but a statically compiled-in mod_ssl.
+<IfModule ssl_module>
+SSLRandomSeed startup builtin
+SSLRandomSeed connect builtin
+</IfModule>
+
+<IfDefine SSL>
+<IfModule ssl_module>
+# This is the Apache server configuration file providing SSL support.
+# It contains the configuration directives to instruct the server how to
+# serve pages over an https connection. For detailing information about these
+# directives see <URL:http://httpd.apache.org/docs/2.2/mod/mod_ssl.html>
+
+# Do NOT simply read the instructions in here without understanding
+# what they do. They're here only as hints or reminders. If you are unsure
+# consult the online docs. You have been warned.
+
+## Pseudo Random Number Generator (PRNG):
+# Configure one or more sources to seed the PRNG of the SSL library.
+# The seed data should be of good random quality.
+# WARNING! On some platforms /dev/random blocks if not enough entropy
+# is available. This means you then cannot use the /dev/random device
+# because it would lead to very long connection times (as long as
+# it requires to make more entropy available). But usually those
+# platforms additionally provide a /dev/urandom device which doesn't
+# block. So, if available, use this one instead. Read the mod_ssl User
+# Manual for more details.
+#SSLRandomSeed startup file:/dev/random 512
+#SSLRandomSeed startup file:/dev/urandom 512
+#SSLRandomSeed connect file:/dev/random 512
+#SSLRandomSeed connect file:/dev/urandom 512
+
+## SSL Global Context:
+# All SSL configuration in this context applies both to the main server and
+# all SSL-enabled virtual hosts.
+
+# Some MIME-types for downloading Certificates and CRLs
+<IfModule mime_module>
+ AddType application/x-x509-ca-cert .crt
+ AddType application/x-pkcs7-crl .crl
+</IfModule>
+
+## Pass Phrase Dialog:
+# Configure the pass phrase gathering process. The filtering dialog program
+# (`builtin' is a internal terminal dialog) has to provide the pass phrase on
+# stdout.
+SSLPassPhraseDialog builtin
+
+## Inter-Process Session Cache:
+# Configure the SSL Session Cache: First the mechanism to use and second the
+# expiring timeout (in seconds).
+#SSLSessionCache dbm:/var/run/ssl_scache
+SSLSessionCache shmcb:/var/run/ssl_scache(512000)
+SSLSessionCacheTimeout 300
+
+## Semaphore:
+# Configure the path to the mutual exclusion semaphore the SSL engine uses
+# internally for inter-process synchronization.
+SSLMutex file:/var/run/ssl_mutex
+</IfModule>
+</IfDefine>
+
+# vim: ts=4 filetype=apache
diff --git a/puppet/modules/apache/files/modules.d/Gentoo/45_mod_dav.conf b/puppet/modules/apache/files/modules.d/Gentoo/45_mod_dav.conf
new file mode 100644
index 00000000..b15ca017
--- /dev/null
+++ b/puppet/modules/apache/files/modules.d/Gentoo/45_mod_dav.conf
@@ -0,0 +1,56 @@
+<IfDefine DAV>
+<IfModule dav_module>
+<IfModule dav_fs_module>
+DavLockDB "/var/lib/dav/lockdb"
+
+# The following example gives DAV write access to a directory called
+# "uploads" under the ServerRoot directory.
+<IfModule alias_module>
+<IfModule auth_digest_module>
+<IfModule authn_file_module>
+Alias /uploads "/var/www/uploads"
+
+<Directory "/var/www/uploads">
+ Dav On
+
+ AuthType Digest
+ AuthName DAV-upload
+
+ # You can use the htdigest program to create the password database:
+ # htdigest -c "/var/www/.htpasswd-dav" DAV-upload admin
+ AuthUserFile "/var/www/.htpasswd-dav"
+
+ # Allow access from any host
+ Order allow,deny
+ Allow from all
+
+ # Allow universal read-access, but writes are restricted
+ # to the admin user.
+ <LimitExcept GET OPTIONS>
+ require user admin
+ </LimitExcept>
+</Directory>
+</IfModule>
+</IfModule>
+</IfModule>
+
+</IfModule>
+</IfModule>
+
+# The following directives disable redirects on non-GET requests for
+# a directory that does not include the trailing slash. This fixes a
+# problem with several clients that do not appropriately handle
+# redirects for folders with DAV methods.
+<IfModule setenvif_module>
+BrowserMatch "Microsoft Data Access Internet Publishing Provider" redirect-carefully
+BrowserMatch "MS FrontPage" redirect-carefully
+BrowserMatch "^WebDrive" redirect-carefully
+BrowserMatch "^WebDAVFS/1.[012345]" redirect-carefully
+BrowserMatch "^gnome-vfs/1.0" redirect-carefully
+BrowserMatch "^XML Spy" redirect-carefully
+BrowserMatch "^Dreamweaver-WebDAV-SCM1" redirect-carefully
+</IfModule>
+
+</IfDefine>
+
+# vim: ts=4 filetype=apache
diff --git a/puppet/modules/apache/files/modules.d/Gentoo/46_mod_ldap.conf b/puppet/modules/apache/files/modules.d/Gentoo/46_mod_ldap.conf
new file mode 100644
index 00000000..837bc6e6
--- /dev/null
+++ b/puppet/modules/apache/files/modules.d/Gentoo/46_mod_ldap.conf
@@ -0,0 +1,29 @@
+# Examples below are taken from the online documentation
+# Refer to:
+# http://localhost/manual/mod/mod_ldap.html
+# http://localhost/manual/mod/mod_auth_ldap.html
+<IfDefine LDAP>
+<IfModule ldap_module>
+LDAPSharedCacheSize 200000
+LDAPCacheEntries 1024
+LDAPCacheTTL 600
+LDAPOpCacheEntries 1024
+LDAPOpCacheTTL 600
+
+<Location /ldap-status>
+ SetHandler ldap-status
+ Order deny,allow
+ Deny from all
+ Allow from 127.0.0.1
+</Location>
+</IfModule>
+</IfDefine>
+
+<IfDefine AUTHNZ_LDAP>
+<IfModule authnz_ldap_module>
+ #AuthLDAPURL ldap://ldap1.airius.com:389/ou=People, o=Airius?uid?sub?(objectClass=*)
+ #require valid-user
+</IfModule>
+</IfDefine>
+
+# vim: ts=4 filetype=apache
diff --git a/puppet/modules/apache/files/modules.d/Gentoo/70_mod_php5.conf b/puppet/modules/apache/files/modules.d/Gentoo/70_mod_php5.conf
new file mode 100644
index 00000000..a8254359
--- /dev/null
+++ b/puppet/modules/apache/files/modules.d/Gentoo/70_mod_php5.conf
@@ -0,0 +1,18 @@
+<IfDefine PHP5>
+ # Load the module first
+ <IfModule !mod_php5.c>
+ LoadModule php5_module modules/libphp5.so
+ </IfModule>
+
+ # Set it to handle the files
+ <IfModule mod_mime.c>
+ AddType application/x-httpd-php .php
+ AddType application/x-httpd-php .phtml
+ AddType application/x-httpd-php .php3
+ AddType application/x-httpd-php .php4
+ AddType application/x-httpd-php .php5
+ AddType application/x-httpd-php-source .phps
+ </IfModule>
+
+ DirectoryIndex index.php index.phtml
+</IfDefine>
diff --git a/puppet/modules/apache/files/munin/apache_activity b/puppet/modules/apache/files/munin/apache_activity
new file mode 100755
index 00000000..65fc0722
--- /dev/null
+++ b/puppet/modules/apache/files/munin/apache_activity
@@ -0,0 +1,99 @@
+#!/usr/bin/perl
+#
+# Parameters supported:
+#
+# config
+# autoconf
+#
+# Configurable variables
+#
+# url - Override default status-url
+#
+# Magic markers:
+#%# family=auto
+#%# capabilities=autoconf
+
+my $ret = undef;
+if (!eval "require LWP::UserAgent;") {
+ $ret = "LWP::UserAgent not found";
+}
+
+my $URL = exists $ENV{'url'} ? $ENV{'url'} : "http://127.0.0.1:%d/server-status?auto";
+my @PORTS = exists $ENV{'ports'} ? split(' ', $ENV{'ports'}) : (80);
+my %chars = (
+ # '\_' => 'Waiting',
+ # 'S' => 'Starting up',
+ 'R' => 'Reading request',
+ 'W' => 'Sending reply',
+ 'K' => 'Keepalive',
+ 'D' => 'DNS lookup',
+ 'C' => 'Closing',
+ # 'L' => 'Logging',
+ # 'G' => 'Gracefully finishing',
+ # 'I' => 'Idle cleanup',
+ # '\.' => 'Open slot',
+ );
+
+# "_" Waiting for Connection, "S" Starting up, "R" Reading Request,
+# "W" Sending Reply, "K" Keepalive (read), "D" DNS Lookup,
+# "C" Closing connection, "L" Logging, "G" Gracefully finishing,
+# "I" Idle cleanup of worker, "." Open slot with no current process
+
+if (exists $ARGV[0] and $ARGV[0] eq "autoconf") {
+ if ($ret) {
+ print "no ($ret)\n";
+ exit 1;
+ }
+ my $ua = LWP::UserAgent->new(timeout => 30);
+ my @badports;
+
+ foreach my $port (@PORTS) {
+ my $url = sprintf $URL, $port;
+ my $response = $ua->request(HTTP::Request->new('GET',$url));
+ push @badports, $port unless $response->is_success and $response->content =~ /Scoreboard/im;
+ }
+
+ if (@badports) {
+ print "no (no apache server-status on ports @badports)\n";
+ exit 1;
+ } else {
+ print "yes\n";
+ exit 0;
+ }
+}
+
+if (exists $ARGV[0] and $ARGV[0] eq "config") {
+ print "graph_title Apache activity\n";
+ print "graph_args --base 1000 -l 0\n";
+ print "graph_category apache\n";
+ print "graph_vlabel processes\n";
+ foreach my $port (@PORTS) {
+ while (my ($char, $val) = each (%chars)) {
+ $char =~ s/\\\./dot/;
+ $char =~ s/\\\_/underline/;
+ print "activity_${port}_${char}.label ";
+ print $val, "\n";
+ print "activity_${port}_${char}.type GAUGE\n";
+ }
+ }
+ exit 0;
+}
+
+foreach my $port (@PORTS) {
+ my $ua = LWP::UserAgent->new (timeout => 30);
+ my $url = sprintf $URL, $port;
+ my $response = $ua->request (HTTP::Request->new('GET',$url));
+ if ($response->content =~ /^Scoreboard\:\s?(.*)$/sm) {
+ my $string = $1;
+ chomp $string;
+ my @act = split (//, $string);
+ foreach my $char (keys (%chars)) {
+ my $num = scalar (grep (/$char/, @act));
+ $char =~ s/\\\./dot/;
+ $char =~ s/\\\_/underline/;
+ print "activity_${port}_${char}.value $num\n";
+ }
+ }
+}
+
+
diff --git a/puppet/modules/apache/files/scripts/OpenBSD/bin/apache_logrotate.sh b/puppet/modules/apache/files/scripts/OpenBSD/bin/apache_logrotate.sh
new file mode 100644
index 00000000..c2fcad97
--- /dev/null
+++ b/puppet/modules/apache/files/scripts/OpenBSD/bin/apache_logrotate.sh
@@ -0,0 +1,7 @@
+#!/bin/sh
+WEBROOT="/var/www/htdocs"
+#PIDFILE="/var/www/logs/httpd.pid"
+echo "#Autogenrated newsyslog.conf\n# logfile_name owner:group mode count size when flags"
+find /var/www/logs -name '*_log' -exec perl -e 'print "\n{}\twww:www\t644\t30\t*\t\$D0\tZ" ' \;
+find $WEBROOT -name '*_log' -exec perl -e 'print "\n{}\twww:www\t644\t30\t*\t\$D0\tZ" ' \;
+perl -e 'print "\t\t \"/bin/sh /opt/bin/restart_apache.sh\"";'
diff --git a/puppet/modules/apache/files/scripts/OpenBSD/bin/restart_apache.sh b/puppet/modules/apache/files/scripts/OpenBSD/bin/restart_apache.sh
new file mode 100644
index 00000000..4dc936d3
--- /dev/null
+++ b/puppet/modules/apache/files/scripts/OpenBSD/bin/restart_apache.sh
@@ -0,0 +1,6 @@
+#!/bin/sh
+
+ignoreout='Processing config'
+apachectl restart 2>&1 | (egrep -v -e "_default_ VirtualHost overlap on port 443" -e "$ignoreout" -e "/usr/sbin/apachectl restart: httpd restarted" || true )
+sleep 10
+apachectl start 2>&1 | (egrep -v -e "_default_ VirtualHost overlap on port 443" -e "$ignoreout" -e "/usr/sbin/apachectl startssl: httpd started" || true )
diff --git a/puppet/modules/apache/files/scripts/OpenBSD/bin/restart_apache_ssl.sh b/puppet/modules/apache/files/scripts/OpenBSD/bin/restart_apache_ssl.sh
new file mode 100644
index 00000000..314018b6
--- /dev/null
+++ b/puppet/modules/apache/files/scripts/OpenBSD/bin/restart_apache_ssl.sh
@@ -0,0 +1,6 @@
+#!/bin/sh
+
+ignoreout='Processing config'
+apachectl restart 2>&1 | (egrep -v -e "_default_ VirtualHost overlap on port 443" -e "$ignoreout" -e "/usr/sbin/apachectl restart: httpd restarted" || true )
+sleep 10
+apachectl startssl 2>&1 | (egrep -v -e "_default_ VirtualHost overlap on port 443" -e "$ignoreout" -e "/usr/sbin/apachectl startssl: httpd started" || true )
diff --git a/puppet/modules/apache/files/service/CentOS/httpd b/puppet/modules/apache/files/service/CentOS/httpd
new file mode 100644
index 00000000..7102c611
--- /dev/null
+++ b/puppet/modules/apache/files/service/CentOS/httpd
@@ -0,0 +1,22 @@
+# Configuration file for the httpd service.
+
+#
+# The default processing model (MPM) is the process-based
+# 'prefork' model. A thread-based model, 'worker', is also
+# available, but does not work with some modules (such as PHP).
+# The service must be stopped before changing this variable.
+#
+#HTTPD=/usr/sbin/httpd.worker
+
+#
+# To pass additional options (for instance, -D definitions) to the
+# httpd binary at startup, set OPTIONS here.
+#
+#OPTIONS=
+
+#
+# By default, the httpd process is started in the C locale; to
+# change the locale in which the server runs, the HTTPD_LANG
+# variable can be set.
+#
+#HTTPD_LANG=C
diff --git a/puppet/modules/apache/files/service/CentOS/httpd.itk b/puppet/modules/apache/files/service/CentOS/httpd.itk
new file mode 100644
index 00000000..62a2d24f
--- /dev/null
+++ b/puppet/modules/apache/files/service/CentOS/httpd.itk
@@ -0,0 +1,23 @@
+# Configuration file for the httpd service.
+
+#
+# The default processing model (MPM) is the process-based
+# 'prefork' model. A thread-based model, 'worker', is also
+# available, but does not work with some modules (such as PHP).
+# The service must be stopped before changing this variable.
+#
+#HTTPD=/usr/sbin/httpd.worker
+HTTPD=/usr/sbin/httpd.itk
+
+#
+# To pass additional options (for instance, -D definitions) to the
+# httpd binary at startup, set OPTIONS here.
+#
+#OPTIONS=
+
+#
+# By default, the httpd process is started in the C locale; to
+# change the locale in which the server runs, the HTTPD_LANG
+# variable can be set.
+#
+#HTTPD_LANG=C
diff --git a/puppet/modules/apache/files/service/CentOS/httpd.itk_plus b/puppet/modules/apache/files/service/CentOS/httpd.itk_plus
new file mode 100644
index 00000000..4d74de2c
--- /dev/null
+++ b/puppet/modules/apache/files/service/CentOS/httpd.itk_plus
@@ -0,0 +1,24 @@
+# Configuration file for the httpd service.
+
+#
+# The default processing model (MPM) is the process-based
+# 'prefork' model. A thread-based model, 'worker', is also
+# available, but does not work with some modules (such as PHP).
+# The service must be stopped before changing this variable.
+#
+#HTTPD=/usr/sbin/httpd.worker
+HTTPD=/usr/sbin/httpd
+HTTPD_LOCAL=/usr/sbin/httpd.itk
+
+#
+# To pass additional options (for instance, -D definitions) to the
+# httpd binary at startup, set OPTIONS here.
+#
+#OPTIONS=
+
+#
+# By default, the httpd process is started in the C locale; to
+# change the locale in which the server runs, the HTTPD_LANG
+# variable can be set.
+#
+#HTTPD_LANG=C
diff --git a/puppet/modules/apache/files/service/CentOS/httpd.worker b/puppet/modules/apache/files/service/CentOS/httpd.worker
new file mode 100644
index 00000000..290923f5
--- /dev/null
+++ b/puppet/modules/apache/files/service/CentOS/httpd.worker
@@ -0,0 +1,22 @@
+# Configuration file for the httpd service.
+
+#
+# The default processing model (MPM) is the process-based
+# 'prefork' model. A thread-based model, 'worker', is also
+# available, but does not work with some modules (such as PHP).
+# The service must be stopped before changing this variable.
+#
+HTTPD=/usr/sbin/httpd.worker
+
+#
+# To pass additional options (for instance, -D definitions) to the
+# httpd binary at startup, set OPTIONS here.
+#
+#OPTIONS=
+
+#
+# By default, the httpd process is started in the C locale; to
+# change the locale in which the server runs, the HTTPD_LANG
+# variable can be set.
+#
+#HTTPD_LANG=C
diff --git a/puppet/modules/apache/files/vhosts.d/CentOS/0-default.conf b/puppet/modules/apache/files/vhosts.d/CentOS/0-default.conf
new file mode 100644
index 00000000..a8a84813
--- /dev/null
+++ b/puppet/modules/apache/files/vhosts.d/CentOS/0-default.conf
@@ -0,0 +1,11 @@
+############################################################
+### This file is managed by PUPPET! ####
+### Only modify in repo or you will loose the changes! ####
+############################################################
+
+<VirtualHost *:80>
+ Include include.d/defaults.inc
+ DocumentRoot /var/www/html
+</VirtualHost>
+
+# vim: ts=4 filetype=apache
diff --git a/puppet/modules/apache/files/vhosts.d/Debian/0-default.conf b/puppet/modules/apache/files/vhosts.d/Debian/0-default.conf
new file mode 100644
index 00000000..2cbd90fe
--- /dev/null
+++ b/puppet/modules/apache/files/vhosts.d/Debian/0-default.conf
@@ -0,0 +1,41 @@
+<VirtualHost *:80>
+ ServerAdmin webmaster@localhost
+
+ DocumentRoot /var/www/
+ <Directory />
+ Options FollowSymLinks
+ AllowOverride None
+ </Directory>
+ <Directory /var/www/>
+ Options Indexes FollowSymLinks MultiViews
+ AllowOverride None
+ Order allow,deny
+ allow from all
+ </Directory>
+
+ ScriptAlias /cgi-bin/ /usr/lib/cgi-bin/
+ <Directory "/usr/lib/cgi-bin">
+ AllowOverride None
+ Options +ExecCGI -MultiViews +SymLinksIfOwnerMatch
+ Order allow,deny
+ Allow from all
+ </Directory>
+
+ ErrorLog /var/log/apache2/error.log
+
+ # Possible values include: debug, info, notice, warn, error, crit,
+ # alert, emerg.
+ LogLevel warn
+
+ CustomLog /var/log/apache2/access.log combined
+
+ Alias /doc/ "/usr/share/doc/"
+ <Directory "/usr/share/doc/">
+ Options Indexes MultiViews FollowSymLinks
+ AllowOverride None
+ Order deny,allow
+ Deny from all
+ Allow from 127.0.0.0/255.0.0.0 ::1/128
+ </Directory>
+
+</VirtualHost>
diff --git a/puppet/modules/apache/files/vhosts.d/Gentoo/0-default.conf b/puppet/modules/apache/files/vhosts.d/Gentoo/0-default.conf
new file mode 100644
index 00000000..03468459
--- /dev/null
+++ b/puppet/modules/apache/files/vhosts.d/Gentoo/0-default.conf
@@ -0,0 +1,51 @@
+# ###########################################################
+# ### this file is managed by PUPPET ####
+# ### only modify in svn or you will loose the changes ! ####
+# ###########################################################
+# Virtual Hosts
+#
+# If you want to maintain multiple domains/hostnames on your
+# machine you can setup VirtualHost containers for them. Most configurations
+# use only name-based virtual hosts so the server doesn't need to worry about
+# IP addresses. This is indicated by the asterisks in the directives below.
+#
+# Please see the documentation at
+# <URL:http://httpd.apache.org/docs/2.2/vhosts/>
+# for further details before you try to setup virtual hosts.
+#
+# You may use the command line option '-S' to verify your virtual host
+# configuration.
+
+<IfDefine DEFAULT_VHOST>
+# see bug #178966 why this is in here
+
+# Listen: Allows you to bind Apache to specific IP addresses and/or
+# ports, instead of the default. See also the <VirtualHost>
+# directive.
+#
+# Change this to Listen on specific IP addresses as shown below to
+# prevent Apache from glomming onto all bound IP addresses.
+#
+#Listen 12.34.56.78:80
+Listen 80
+
+# Use name-based virtual hosting.
+NameVirtualHost *:80
+
+# When virtual hosts are enabled, the main host defined in the default
+# httpd.conf configuration will go away. We redefine it here so that it is
+# still available.
+#
+# If you disable this vhost by removing -D DEFAULT_VHOST from
+# /etc/conf.d/apache2, the first defined virtual host elsewhere will be
+# the default.
+<VirtualHost *:80>
+ Include /etc/apache2/vhosts.d/default_vhost.include
+
+ <IfModule mpm_peruser_module>
+ ServerEnvironment apache apache
+ </IfModule>
+</VirtualHost>
+</IfDefine>
+
+# vim: ts=4 filetype=apache
diff --git a/puppet/modules/apache/files/vhosts.d/Gentoo/default_vhost.include b/puppet/modules/apache/files/vhosts.d/Gentoo/default_vhost.include
new file mode 100644
index 00000000..590c1848
--- /dev/null
+++ b/puppet/modules/apache/files/vhosts.d/Gentoo/default_vhost.include
@@ -0,0 +1,79 @@
+# ###########################################################
+# # copyleft 2008 immerda.ch
+# ###########################################################
+# ### this file is managed by PUPPET ####
+# ### only modify in svn or you will loose the changes ! ####
+# ###########################################################
+# ServerAdmin: Your address, where problems with the server should be
+# e-mailed. This address appears on some server-generated pages, such
+# as error documents. e.g. admin@your-domain.com
+ServerAdmin root@localhost
+
+# DocumentRoot: The directory out of which you will serve your
+# documents. By default, all requests are taken from this directory, but
+# symbolic links and aliases may be used to point to other locations.
+#
+# If you change this to something that isn't under /var/www then suexec
+# will no longer work.
+DocumentRoot "/var/www/localhost/htdocs"
+
+# This should be changed to whatever you set DocumentRoot to.
+<Directory "/var/www/localhost/htdocs">
+ # Possible values for the Options directive are "None", "All",
+ # or any combination of:
+ # Indexes Includes FollowSymLinks SymLinksifOwnerMatch ExecCGI MultiViews
+ #
+ # Note that "MultiViews" must be named *explicitly* --- "Options All"
+ # doesn't give it to you.
+ #
+ # The Options directive is both complicated and important. Please see
+ # http://httpd.apache.org/docs/2.2/mod/core.html#options
+ # for more information.
+ Options Indexes FollowSymLinks
+
+ # AllowOverride controls what directives may be placed in .htaccess files.
+ # It can be "All", "None", or any combination of the keywords:
+ # Options FileInfo AuthConfig Limit
+ AllowOverride All
+
+ # Controls who can get stuff from this server.
+ Order allow,deny
+ Allow from all
+</Directory>
+
+<IfModule alias_module>
+ # Redirect: Allows you to tell clients about documents that used to
+ # exist in your server's namespace, but do not anymore. The client
+ # will make a new request for the document at its new location.
+ # Example:
+ # Redirect permanent /foo http://www.example.com/bar
+
+ # Alias: Maps web paths into filesystem paths and is used to
+ # access content that does not live under the DocumentRoot.
+ # Example:
+ # Alias /webpath /full/filesystem/path
+ #
+ # If you include a trailing / on /webpath then the server will
+ # require it to be present in the URL. You will also likely
+ # need to provide a <Directory> section to allow access to
+ # the filesystem path.
+
+ # ScriptAlias: This controls which directories contain server scripts.
+ # ScriptAliases are essentially the same as Aliases, except that
+ # documents in the target directory are treated as applications and
+ # run by the server when requested rather than as documents sent to the
+ # client. The same rules about trailing "/" apply to ScriptAlias
+ # directives as to Alias.
+ ScriptAlias /cgi-bin/ "/var/www/localhost/cgi-bin/"
+</IfModule>
+
+# "/var/www/localhost/cgi-bin" should be changed to whatever your ScriptAliased
+# CGI directory exists, if you have that configured.
+<Directory "/var/www/localhost/cgi-bin">
+ AllowOverride None
+ Options None
+ Order allow,deny
+ Allow from all
+</Directory>
+
+# vim: ts=4 filetype=apache
diff --git a/puppet/modules/apache/files/vhosts.d/OpenBSD/0-default.conf b/puppet/modules/apache/files/vhosts.d/OpenBSD/0-default.conf
new file mode 100644
index 00000000..9c4aa9d5
--- /dev/null
+++ b/puppet/modules/apache/files/vhosts.d/OpenBSD/0-default.conf
@@ -0,0 +1,8 @@
+<VirtualHost *:80>
+ Include include.d/defaults.inc
+
+ DocumentRoot /var/www/htdocs/default/www/
+ ErrorLog /var/www/htdocs/default/logs/default_error_log
+ CustomLog /var/www/htdocs/default/logs/default_access_log combined
+</VirtualHost>
+
diff --git a/puppet/modules/apache/lib/facter/apache_version.rb b/puppet/modules/apache/lib/facter/apache_version.rb
new file mode 100644
index 00000000..f0521832
--- /dev/null
+++ b/puppet/modules/apache/lib/facter/apache_version.rb
@@ -0,0 +1,28 @@
+# determine the version of apache installed
+
+def parse_version(version_string)
+ version = ""
+ version_string.each_line do |line|
+ if line.match(/^Server version/)
+ version = line.scan(/Apache\/(.*) /)[0][0]
+ end
+ end
+ return version
+end
+
+Facter.add('apache_version') do
+ setcode do
+ case Facter.value('osfamily')
+ when /RedHat/
+ if File.exists?('/usr/sbin/httpd')
+ version = parse_version(%x(/usr/sbin/httpd -v))
+ end
+ when /Debian/
+ if File.exists?('/usr/sbin/apache2')
+ version = parse_version(%x(/usr/sbin/apache2 -v))
+ end
+ else
+ version = 'undef'
+ end
+ end
+end
diff --git a/puppet/modules/apache/lib/puppet/parser/functions/guess_apache_version.rb b/puppet/modules/apache/lib/puppet/parser/functions/guess_apache_version.rb
new file mode 100644
index 00000000..7537f6d9
--- /dev/null
+++ b/puppet/modules/apache/lib/puppet/parser/functions/guess_apache_version.rb
@@ -0,0 +1,39 @@
+# Try to guess the version of apache to be installed.
+# Certain apache modules depend on each other, so we
+# need to evaluate the apache version before it gets
+# installed. This function decides which apache version
+# is going to be installed based on the `operatingsystemrelease`
+# fact.
+module Puppet::Parser::Functions
+ newfunction(:guess_apache_version, :type => :rvalue) do |args|
+ release = lookupvar('operatingsystemrelease')
+ unknown = 'unknown'
+
+ case lookupvar('operatingsystem')
+
+ when 'Debian'
+ case release
+ when /^7.*/
+ version = '2.2'
+ when /^8.*/
+ version = '2.4'
+ else
+ version = unknown
+ end
+
+ when 'Ubuntu'
+ case release
+ when /(12.04|12.10|13.04|13.10)/
+ version = '2.2'
+ when /(14.04|14.10|15.04|15.10|16.04)/
+ version = '2.4'
+ else
+ version = unknown
+ end
+
+ else
+ version = unknown
+ end
+ version
+ end
+end
diff --git a/puppet/modules/apache/lib/puppet/parser/functions/htpasswd_sha1.rb b/puppet/modules/apache/lib/puppet/parser/functions/htpasswd_sha1.rb
new file mode 100644
index 00000000..937621d9
--- /dev/null
+++ b/puppet/modules/apache/lib/puppet/parser/functions/htpasswd_sha1.rb
@@ -0,0 +1,8 @@
+require 'digest/sha1'
+require 'base64'
+
+module Puppet::Parser::Functions
+ newfunction(:htpasswd_sha1, :type => :rvalue) do |args|
+ "{SHA}" + Base64.encode64(Digest::SHA1.digest(args[0]))
+ end
+end
diff --git a/puppet/modules/apache/manifests/base.pp b/puppet/modules/apache/manifests/base.pp
new file mode 100644
index 00000000..3f921599
--- /dev/null
+++ b/puppet/modules/apache/manifests/base.pp
@@ -0,0 +1,75 @@
+# setup base apache class
+class apache::base {
+ file{
+ 'vhosts_dir':
+ ensure => directory,
+ path => '/etc/apache2/vhosts.d',
+ purge => true,
+ recurse => true,
+ force => true,
+ notify => Service['apache'],
+ owner => root,
+ group => 0,
+ mode => '0644';
+ 'config_dir':
+ ensure => directory,
+ path => '/etc/apache2/conf.d',
+ owner => root,
+ group => 0,
+ mode => '0644';
+ 'include_dir':
+ ensure => directory,
+ path => '/etc/apache2/include.d',
+ purge => true,
+ recurse => true,
+ force => true,
+ notify => Service['apache'],
+ owner => root,
+ group => 0,
+ mode => '0644';
+ 'modules_dir':
+ ensure => directory,
+ path => '/etc/apache2/modules.d',
+ purge => true,
+ recurse => true,
+ force => true,
+ notify => Service['apache'],
+ owner => root,
+ group => 0,
+ mode => '0644';
+ 'htpasswd_dir':
+ ensure => directory,
+ path => '/var/www/htpasswds',
+ purge => true,
+ recurse => true,
+ force => true,
+ notify => Service['apache'],
+ owner => root,
+ group => 'apache',
+ mode => '0640';
+ 'web_dir':
+ ensure => directory,
+ path => '/var/www',
+ owner => root,
+ group => 0,
+ mode => '0644';
+ 'default_apache_index':
+ path => '/var/www/localhost/htdocs/index.html',
+ content => template('apache/default/default_index.erb'),
+ owner => root,
+ group => 0,
+ mode => '0644';
+ } -> anchor{'apache::basic_dirs::ready': }
+
+ apache::config::include{ 'defaults.inc': }
+ apache::config::global{ 'git.conf': }
+ if !$apache::no_default_site {
+ apache::vhost::file { '0-default': }
+ }
+
+ service{'apache':
+ ensure => running,
+ name => 'apache2',
+ enable => true,
+ }
+}
diff --git a/puppet/modules/apache/manifests/base/itk.pp b/puppet/modules/apache/manifests/base/itk.pp
new file mode 100644
index 00000000..7772bfdf
--- /dev/null
+++ b/puppet/modules/apache/manifests/base/itk.pp
@@ -0,0 +1,6 @@
+class apache::base::itk inherits apache::base {
+ File['htpasswd_dir']{
+ group => 0,
+ mode => 0644,
+ }
+}
diff --git a/puppet/modules/apache/manifests/centos.pp b/puppet/modules/apache/manifests/centos.pp
new file mode 100644
index 00000000..f4697155
--- /dev/null
+++ b/puppet/modules/apache/manifests/centos.pp
@@ -0,0 +1,86 @@
+### centos
+class apache::centos inherits apache::package {
+ $config_dir = '/etc/httpd'
+
+ Package[apache]{
+ name => 'httpd',
+ }
+ Service[apache]{
+ name => 'httpd',
+ restart => '/etc/init.d/httpd graceful',
+ }
+ File[vhosts_dir]{
+ path => "${config_dir}/vhosts.d",
+ }
+ File[config_dir]{
+ path => "${config_dir}/conf.d",
+ }
+ File[include_dir]{
+ path => "${config_dir}/include.d",
+ }
+ File[modules_dir]{
+ path => "${config_dir}/modules.d",
+ }
+ File[web_dir]{
+ path => '/var/www/vhosts',
+ }
+ File[default_apache_index]{
+ path => '/var/www/html/index.html',
+ }
+
+ if str2bool($::selinux) {
+ Selinux::Fcontext{
+ before => File[web_dir],
+ }
+ $seltype_rw = $::operatingsystemmajrelease ? {
+ 5 => 'httpd_sys_script_rw_t',
+ default => 'httpd_sys_rw_content_t'
+ }
+ selinux::fcontext{
+ [ '/var/www/vhosts/[^/]*/www(/.*)?',
+ '/var/www/vhosts/[^/]*/non_public(/.*)?',
+ '/var/www/vhosts/[^/]*/data(/.*)?',
+ '/var/www/vhosts/[^/]*/upload(/.*)?' ]:
+ require => Package['apache'],
+ setype => $seltype_rw;
+ '/var/www/vhosts/[^/]*/logs(/.*)?':
+ require => Package['apache'],
+ setype => 'httpd_log_t';
+ }
+ }
+ file{'apache_service_config':
+ path => '/etc/sysconfig/httpd',
+ source => [ "puppet:///modules/site_apache/service/CentOS/${::fqdn}/httpd",
+ 'puppet:///modules/site_apache/service/CentOS/httpd',
+ 'puppet:///modules/apache/service/CentOS/httpd' ],
+ require => Package['apache'],
+ notify => Service['apache'],
+ owner => root,
+ group => 0,
+ mode => '0644';
+ }
+
+ # this is for later fixes
+ exec{
+ 'adjust_pidfile':
+ command => 'sed -i "s/^#PidFile \(.*\)/PidFile \1/g" /etc/httpd/conf/httpd.conf',
+ unless => 'grep -qE \'^PidFile \' /etc/httpd/conf/httpd.conf',
+ require => Package['apache'],
+ notify => Service['apache'];
+ 'adjust_listen':
+ command => 'sed -i "s/^#Listen 80/Listen 80/g" /etc/httpd/conf/httpd.conf',
+ unless => 'grep -qE \'^Listen 80\' /etc/httpd/conf/httpd.conf',
+ require => Package['apache'],
+ notify => Service['apache'];
+ }
+
+ apache::config::global{'00-listen.conf':
+ ensure => absent,
+ }
+
+ include apache::logrotate::centos
+
+ apache::config::global{ 'welcome.conf': }
+ apache::config::global{ 'vhosts.conf': }
+}
+
diff --git a/puppet/modules/apache/manifests/centos/itk.pp b/puppet/modules/apache/manifests/centos/itk.pp
new file mode 100644
index 00000000..20f4270d
--- /dev/null
+++ b/puppet/modules/apache/manifests/centos/itk.pp
@@ -0,0 +1,10 @@
+# http://hostby.net/home/2008/07/12/centos-5-and-mpm-itk/
+class apache::centos::itk inherits apache::centos {
+ include ::apache::base::itk
+ Package['apache']{
+ name => 'httpd-itk',
+ }
+ File['apache_service_config']{
+ source => "puppet:///modules/apache/service/${::operatingsystem}/httpd.itk"
+ }
+}
diff --git a/puppet/modules/apache/manifests/centos/itk_plus.pp b/puppet/modules/apache/manifests/centos/itk_plus.pp
new file mode 100644
index 00000000..0df92c84
--- /dev/null
+++ b/puppet/modules/apache/manifests/centos/itk_plus.pp
@@ -0,0 +1,20 @@
+# http://hostby.net/home/2008/07/12/centos-5-and-mpm-itk/
+class apache::centos::itk_plus inherits apache::centos::itk {
+ Exec['adjust_pidfile']{
+ command => "sed -i 's/^PidFile \\(.*\\)/#PidFile \\1/g' /etc/httpd/conf/httpd.conf",
+ unless => "grep -qE '^#PidFile ' /etc/httpd/conf/httpd.conf",
+ }
+ Exec['adjust_listen']{
+ command => "sed -i 's/^Listen 80/#Listen 80/g' /etc/httpd/conf/httpd.conf",
+ unless => "grep -qE '^#Listen 80' /etc/httpd/conf/httpd.conf",
+ }
+
+ Apache::Config::Global['00-listen.conf']{
+ ensure => 'present',
+ content => template("apache/itk_plus/${::operatingsystem}/00-listen.conf.erb"),
+ }
+
+ File['apache_service_config']{
+ source => "puppet:///modules/apache/service/CentOS/httpd.itk_plus"
+ }
+}
diff --git a/puppet/modules/apache/manifests/centos/module.pp b/puppet/modules/apache/manifests/centos/module.pp
new file mode 100644
index 00000000..3220d1f8
--- /dev/null
+++ b/puppet/modules/apache/manifests/centos/module.pp
@@ -0,0 +1,30 @@
+define apache::centos::module(
+ $ensure = present,
+ $source = '',
+ $destination = ''
+){
+ $modules_dir = "${apache::centos::config_dir}/modules.d"
+ $real_destination = $destination ? {
+ '' => "${modules_dir}/${name}.so",
+ default => $destination,
+ }
+ $real_source = $source ? {
+ '' => [
+ "puppet:///modules/site_apache/modules.d/${::fqdn}/${name}.so",
+ "puppet:///modules/site_apache/modules.d/${apache::cluster_node}/${name}.so",
+ "puppet:///modules/site_apache/modules.d/${name}.so",
+ "puppet:///modules/apache/modules.d/${::operatingsystem}/${name}.so",
+ "puppet:///modules/apache/modules.d/${name}.so"
+ ],
+ default => "puppet:///$source",
+ }
+ file{"modules_${name}.conf":
+ ensure => $ensure,
+ path => $real_destination,
+ source => $real_source,
+ require => [ File[modules_dir], Package[apache] ],
+ notify => Service[apache],
+ owner => root, group => 0, mode => 0755;
+ }
+}
+
diff --git a/puppet/modules/apache/manifests/centos/worker.pp b/puppet/modules/apache/manifests/centos/worker.pp
new file mode 100644
index 00000000..f374bb70
--- /dev/null
+++ b/puppet/modules/apache/manifests/centos/worker.pp
@@ -0,0 +1,5 @@
+class apache::centos::worker inherits apache::centos {
+ File['apache_service_config']{
+ source => "puppet:///modules/apache/service/${::operatingsystem}/httpd.worker"
+ }
+}
diff --git a/puppet/modules/apache/manifests/config/file.pp b/puppet/modules/apache/manifests/config/file.pp
new file mode 100644
index 00000000..7b058691
--- /dev/null
+++ b/puppet/modules/apache/manifests/config/file.pp
@@ -0,0 +1,106 @@
+# deploy apache configuration file
+# by default we assume it's a global configuration file
+define apache::config::file(
+ $ensure = present,
+ $target = false,
+ $type = 'global',
+ $source = 'absent',
+ $content = 'absent',
+ $destination = 'absent'
+){
+ case $type {
+ 'include': { $confdir = 'include.d' }
+ 'global': { $confdir = 'conf.d' }
+ default: { fail("Wrong config file type specified for ${name}") }
+ }
+ $real_destination = $destination ? {
+ 'absent' => $::operatingsystem ? {
+ centos => "${apache::centos::config_dir}/${confdir}/${name}",
+ gentoo => "${apache::gentoo::config_dir}/${name}",
+ debian => "${apache::debian::config_dir}/${confdir}/${name}",
+ ubuntu => "${apache::ubuntu::config_dir}/${confdir}/${name}",
+ openbsd => "${apache::openbsd::config_dir}/${confdir}/${name}",
+ default => "/etc/apache2/${confdir}/${name}",
+ },
+ default => $destination
+ }
+ file{"apache_${name}":
+ ensure => $ensure,
+ path => $real_destination,
+ notify => Service[apache],
+ owner => root,
+ group => 0,
+ mode => '0644';
+ }
+
+ case $ensure {
+ 'absent', 'purged': {
+ # We want to avoid all stuff related to source and content
+ }
+ 'link': {
+ if $target {
+ File["apache_${name}"] {
+ target => $target,
+ }
+ }
+ }
+ default: {
+ case $content {
+ 'absent': {
+ $real_source = $source ? {
+ 'absent' => [
+ "puppet:///modules/site_apache/${confdir}/${::fqdn}/${name}",
+ "puppet:///modules/site_apache/${confdir}/${apache::cluster_node}/${name}",
+ "puppet:///modules/site_apache/${confdir}/${::operatingsystem}.${::operatingsystemmajrelease}/${name}",
+ "puppet:///modules/site_apache/${confdir}/${::operatingsystem}/${name}",
+ "puppet:///modules/site_apache/${confdir}/${name}",
+ "puppet:///modules/apache/${confdir}/${::operatingsystem}.${::operatingsystemmajrelease}/${name}",
+ "puppet:///modules/apache/${confdir}/${::operatingsystem}/${name}",
+ "puppet:///modules/apache/${confdir}/${name}"
+ ],
+ default => $source
+ }
+ File["apache_${name}"]{
+ source => $real_source,
+ }
+ }
+ default: {
+ case $content {
+ 'absent': {
+ $real_source = $source ? {
+ 'absent' => [
+ "puppet:///modules/site-apache/${confdir}/${::fqdn}/${name}",
+ "puppet:///modules/site-apache/${confdir}/${apache::cluster_node}/${name}",
+ "puppet:///modules/site-apache/${confdir}/${::operatingsystem}.${::operatingsystemmajrelease}/${name}",
+ "puppet:///modules/site-apache/${confdir}/${::operatingsystem}/${name}",
+ "puppet:///modules/site-apache/${confdir}/${name}",
+ "puppet:///modules/apache/${confdir}/${::operatingsystem}.${::operatingsystemmajrelease}/${name}",
+ "puppet:///modules/apache/${confdir}/${::operatingsystem}/${name}",
+ "puppet:///modules/apache/${confdir}/${name}"
+ ],
+ default => $source,
+ }
+ File["apache_${name}"]{
+ source => $real_source,
+ }
+ }
+ default: {
+ File["apache_${name}"]{
+ content => $content,
+ }
+ }
+ }
+ }
+ }
+ }
+ }
+
+ case $::operatingsystem {
+ openbsd: { info("no package dependency on ${::operatingsystem} for ${name}") }
+ default: {
+ File["apache_${name}"]{
+ require => Package[apache],
+ }
+ }
+ }
+}
diff --git a/puppet/modules/apache/manifests/config/global.pp b/puppet/modules/apache/manifests/config/global.pp
new file mode 100644
index 00000000..8b0389be
--- /dev/null
+++ b/puppet/modules/apache/manifests/config/global.pp
@@ -0,0 +1,18 @@
+# deploy apache configuration file (global)
+# wrapper for apache::config::file
+define apache::config::global(
+ $ensure = present,
+ $target = false,
+ $source = 'absent',
+ $content = 'absent',
+ $destination = 'absent'
+){
+ apache::config::file { "${name}":
+ ensure => $ensure,
+ target => $target,
+ type => 'global',
+ source => $source,
+ content => $content,
+ destination => $destination,
+ }
+}
diff --git a/puppet/modules/apache/manifests/config/include.pp b/puppet/modules/apache/manifests/config/include.pp
new file mode 100644
index 00000000..4d676f05
--- /dev/null
+++ b/puppet/modules/apache/manifests/config/include.pp
@@ -0,0 +1,17 @@
+# deploy apache configuration file (includes for vhosts)
+define apache::config::include(
+ $ensure = present,
+ $target = false,
+ $source = 'absent',
+ $content = 'absent',
+ $destination = 'absent'
+){
+ apache::config::file { "${name}":
+ ensure => $ensure,
+ target => $target,
+ type => 'include',
+ source => $source,
+ content => $content,
+ destination => $destination,
+ }
+}
diff --git a/puppet/modules/apache/manifests/debian.pp b/puppet/modules/apache/manifests/debian.pp
new file mode 100644
index 00000000..6ae4cee8
--- /dev/null
+++ b/puppet/modules/apache/manifests/debian.pp
@@ -0,0 +1,44 @@
+### debian
+class apache::debian inherits apache::package {
+ $config_dir = '/etc/apache2'
+
+ Package[apache] {
+ name => 'apache2',
+ }
+ File[vhosts_dir] {
+ path => "${config_dir}/sites-enabled",
+ }
+ File[modules_dir] {
+ path => "${config_dir}/mods-enabled",
+ }
+ File[htpasswd_dir] {
+ path => '/var/www/htpasswds',
+ group => 'www-data',
+ }
+ File[default_apache_index] {
+ path => '/var/www/index.html',
+ }
+ file { 'apache_main_config':
+ path => "${config_dir}/apache2.conf",
+ source => [ "puppet:///modules/site_apache/config/Debian.${::lsbdistcodename}/${::fqdn}/apache2.conf",
+ "puppet:///modules/site_apache/config/Debian/${::fqdn}/apache2.conf",
+ "puppet:///modules/site_apache/config/Debian.${::lsbdistcodename}/apache2.conf",
+ 'puppet:///modules/site_apache/config/Debian/apache2.conf',
+ "puppet:///modules/apache/config/Debian.${::lsbdistcodename}/${::fqdn}/apache2.conf",
+ "puppet:///modules/apache/config/Debian/${::fqdn}/apache2.conf",
+ "puppet:///modules/apache/config/Debian.${::lsbdistcodename}/apache2.conf",
+ 'puppet:///modules/apache/config/Debian/apache2.conf' ],
+ require => Package['apache'],
+ notify => Service['apache'],
+ owner => root,
+ group => 0,
+ mode => '0644';
+ }
+ apache::config::global{ 'charset': }
+ apache::config::global{ 'security': }
+ file { 'default_debian_apache_vhost':
+ ensure => absent,
+ path => '/etc/apache2/sites-enabled/000-default',
+ }
+}
+
diff --git a/puppet/modules/apache/manifests/debian/itk.pp b/puppet/modules/apache/manifests/debian/itk.pp
new file mode 100644
index 00000000..718a81b3
--- /dev/null
+++ b/puppet/modules/apache/manifests/debian/itk.pp
@@ -0,0 +1,9 @@
+class apache::debian::itk inherits apache::debian {
+ File['htpasswd_dir']{
+ group => 0,
+ mode => 0644,
+ }
+ Package['apache']{
+ name => 'apache2-mpm-itk',
+ }
+}
diff --git a/puppet/modules/apache/manifests/debian/module.pp b/puppet/modules/apache/manifests/debian/module.pp
new file mode 100644
index 00000000..ed255155
--- /dev/null
+++ b/puppet/modules/apache/manifests/debian/module.pp
@@ -0,0 +1,48 @@
+# install/remove apache module on debian/ubuntu systems
+define apache::debian::module(
+ $ensure = present,
+ $package_name = 'absent',
+ $conf_source = '',
+ $conf_content = '',
+){
+ $modules_dir = "${apache::debian::config_dir}/mods"
+
+ if ($package_name != 'absent') {
+ package { $package_name:
+ ensure => $ensure,
+ notify => Service['apache'],
+ require => [ File['modules_dir'], Package['apache'] ],
+ }
+ $required_packages = [ 'apache', $package_name ]
+ }
+ else {
+ $required_packages = [ 'apache' ]
+ }
+
+ file {
+ "${modules_dir}-enabled/${name}.load":
+ ensure => "../mods-available/${name}.load",
+ notify => Service['apache'],
+ require => [ File['modules_dir'], Package[$required_packages] ];
+ "${modules_dir}-enabled/${name}.conf":
+ ensure => "../mods-available/${name}.conf",
+ notify => Service['apache'],
+ require => [ File['modules_dir'], Package[$required_packages] ];
+ "${modules_dir}-available/${name}.conf":
+ ensure => file,
+ notify => Service['apache'],
+ require => [ File['modules_dir'], Package[$required_packages] ];
+ }
+
+ if $conf_content != '' {
+ File["${modules_dir}-available/${name}.conf"] {
+ content => $conf_content,
+ }
+ }
+ elsif $conf_source != '' {
+ File["${modules_dir}-available/${name}.conf"] {
+ source => $conf_source,
+ }
+ }
+
+}
diff --git a/puppet/modules/apache/manifests/defaultdavdbdir.pp b/puppet/modules/apache/manifests/defaultdavdbdir.pp
new file mode 100644
index 00000000..c0e2a81a
--- /dev/null
+++ b/puppet/modules/apache/manifests/defaultdavdbdir.pp
@@ -0,0 +1,17 @@
+class apache::defaultdavdbdir {
+ file {
+ '/var/www/dav_db_dir' :
+ ensure => directory,
+ require => Package['apache'],
+ owner => root,
+ group => 0,
+ mode => 0755 ;
+ }
+ if $::selinux != 'false' {
+ selinux::fcontext {
+ ['/var/www/dav_db_dir/.+(/.*)?'] :
+ setype => 'httpd_var_lib_t',
+ before => File['/var/www/dav_db_dir'] ;
+ }
+ }
+}
diff --git a/puppet/modules/apache/manifests/defaultphpdirs.pp b/puppet/modules/apache/manifests/defaultphpdirs.pp
new file mode 100644
index 00000000..595744bb
--- /dev/null
+++ b/puppet/modules/apache/manifests/defaultphpdirs.pp
@@ -0,0 +1,31 @@
+# setup some directories for php
+class apache::defaultphpdirs {
+ file{
+ '/var/www/upload_tmp_dir':
+ ensure => directory,
+ require => Package['apache'],
+ owner => root,
+ group => 0,
+ mode => '0755';
+ '/var/www/session.save_path':
+ ensure => directory,
+ require => Package['apache'],
+ owner => root,
+ group => 0,
+ mode => '0755';
+ }
+
+ if str2bool($::selinux) {
+ $seltype_rw = $::operatingsystemmajrelease ? {
+ 5 => 'httpd_sys_script_rw_t',
+ default => 'httpd_sys_rw_content_t'
+ }
+ selinux::fcontext{
+ [ '/var/www/upload_tmp_dir/.+(/.*)?',
+ '/var/www/session.save_path/.+(/.*)?' ]:
+ require => Package['apache'],
+ setype => $seltype_rw,
+ before => File['/var/www/upload_tmp_dir','/var/www/session.save_path'];
+ }
+ }
+}
diff --git a/puppet/modules/apache/manifests/file.pp b/puppet/modules/apache/manifests/file.pp
new file mode 100644
index 00000000..b0a60ecb
--- /dev/null
+++ b/puppet/modules/apache/manifests/file.pp
@@ -0,0 +1,15 @@
+define apache::file(
+ $owner = root,
+ $group = 0,
+ $mode = 0640
+) {
+ file{$name:
+# as long as there are significant memory problems using
+# recurse we avoid it
+# recurse => true,
+ backup => false,
+ checksum => undef,
+ owner => $owner, group => $group, mode => $mode;
+ }
+}
+
diff --git a/puppet/modules/apache/manifests/file/readonly.pp b/puppet/modules/apache/manifests/file/readonly.pp
new file mode 100644
index 00000000..6308d889
--- /dev/null
+++ b/puppet/modules/apache/manifests/file/readonly.pp
@@ -0,0 +1,12 @@
+define apache::file::readonly(
+ $owner = root,
+ $group = 0,
+ $mode = 0640
+) {
+ apache::file{$name:
+ owner => $owner,
+ group => $group,
+ mode => $mode,
+ }
+}
+
diff --git a/puppet/modules/apache/manifests/file/rw.pp b/puppet/modules/apache/manifests/file/rw.pp
new file mode 100644
index 00000000..0f258bf3
--- /dev/null
+++ b/puppet/modules/apache/manifests/file/rw.pp
@@ -0,0 +1,13 @@
+# a file that is writable by apache
+define apache::file::rw(
+ $owner = root,
+ $group = 0,
+ $mode = '0660',
+) {
+ apache::file{$name:
+ owner => $owner,
+ group => $group,
+ mode => $mode,
+ }
+}
+
diff --git a/puppet/modules/apache/manifests/gentoo.pp b/puppet/modules/apache/manifests/gentoo.pp
new file mode 100644
index 00000000..3a13977f
--- /dev/null
+++ b/puppet/modules/apache/manifests/gentoo.pp
@@ -0,0 +1,39 @@
+### gentoo
+class apache::gentoo inherits apache::package {
+ $config_dir = '/etc/apache2'
+
+ # needs module gentoo
+ gentoo::etcconfd {
+ 'apache2':
+ require => Package['apache'],
+ notify => Service['apache'],
+ }
+ Package['apache']{
+ category => 'www-servers',
+ }
+ File[vhosts_dir]{
+ path => "${config_dir}/vhosts.d",
+ }
+ File[modules_dir]{
+ path => "${config_dir}/modules.d",
+ }
+
+ apache::gentoo::module{
+ '00_default_settings':;
+ '00_error_documents':;
+ }
+ apache::config::file { 'default_vhost.include':
+ source => 'apache/vhosts.d/default_vhost.include',
+ destination => "${config_dir}/vhosts.d/default_vhost.include",
+ }
+
+ # set the default for the ServerName
+ file{"${config_dir}/modules.d/00_default_settings_ServerName.conf":
+ content => "ServerName ${::fqdn}\n",
+ require => Package[apache],
+ owner => root,
+ group => 0,
+ mode => '0644';
+ }
+}
+
diff --git a/puppet/modules/apache/manifests/gentoo/module.pp b/puppet/modules/apache/manifests/gentoo/module.pp
new file mode 100644
index 00000000..1e9d03a6
--- /dev/null
+++ b/puppet/modules/apache/manifests/gentoo/module.pp
@@ -0,0 +1,30 @@
+define apache::gentoo::module(
+ $ensure = present,
+ $source = '',
+ $destination = ''
+){
+ $modules_dir = "${apache::gentoo::config_dir}/modules.d"
+ $real_destination = $destination ? {
+ '' => "${modules_dir}/${name}.conf",
+ default => $destination,
+ }
+ $real_source = $source ? {
+ '' => [
+ "puppet:///modules/site_apache/modules.d/${::fqdn}/${name}.conf",
+ "puppet:///modules/site_apache/modules.d/${apache::cluster_node}/${name}.conf",
+ "puppet:///modules/site_apache/modules.d/${name}.conf",
+ "puppet:///modules/apache/modules.d/${::operatingsystem}/${name}.conf",
+ "puppet:///modules/apache/modules.d/${name}.conf"
+ ],
+ default => "puppet:///$source",
+ }
+ file{"modules_${name}.conf":
+ ensure => $ensure,
+ path => $real_destination,
+ source => $real_source,
+ require => [ File[modules_dir], Package[apache] ],
+ notify => Service[apache],
+ owner => root, group => 0, mode => 0644;
+ }
+}
+
diff --git a/puppet/modules/apache/manifests/htpasswd_user.pp b/puppet/modules/apache/manifests/htpasswd_user.pp
new file mode 100644
index 00000000..82fbce45
--- /dev/null
+++ b/puppet/modules/apache/manifests/htpasswd_user.pp
@@ -0,0 +1,34 @@
+# ToDo: This should be rewritten as native type
+define apache::htpasswd_user(
+ $password,
+ $password_iscrypted = false,
+ $ensure = 'present',
+ $site = 'absent',
+ $username = 'absent',
+ $path = 'absent'
+){
+ case $username {
+ 'absent': { $real_username = $name }
+ default: { $real_username = $username }
+ }
+ case $site {
+ 'absent': { $real_site = $name }
+ default: { $real_site = $site }
+ }
+ if $password_iscrypted {
+ $real_password = $password
+ } else {
+ $real_password = htpasswd_sha1($password)
+ }
+
+ case $path {
+ 'absent': { $real_path = "/var/www/htpasswds/${real_site}" }
+ default: { $real_path = $path }
+ }
+
+ file_line{"htpasswd_for_${real_site}":
+ ensure => $ensure,
+ path => $real_path,
+ line => "${username}:${real_password}",
+ }
+}
diff --git a/puppet/modules/apache/manifests/include/joomla.pp b/puppet/modules/apache/manifests/include/joomla.pp
new file mode 100644
index 00000000..5adae30a
--- /dev/null
+++ b/puppet/modules/apache/manifests/include/joomla.pp
@@ -0,0 +1,3 @@
+class apache::include::joomla {
+ apache::config::include{'joomla.inc': }
+}
diff --git a/puppet/modules/apache/manifests/include/mod_fcgid.pp b/puppet/modules/apache/manifests/include/mod_fcgid.pp
new file mode 100644
index 00000000..b3c1cdc2
--- /dev/null
+++ b/puppet/modules/apache/manifests/include/mod_fcgid.pp
@@ -0,0 +1,7 @@
+class apache::include::mod_fcgid {
+ apache::config::global{'mod_fcgid.conf':
+ content => "<IfModule mod_fcgid.c>
+ FcgidFixPathinfo 1
+</IfModule>\n"
+ }
+}
diff --git a/puppet/modules/apache/manifests/include/silverstripe.pp b/puppet/modules/apache/manifests/include/silverstripe.pp
new file mode 100644
index 00000000..fd2484b7
--- /dev/null
+++ b/puppet/modules/apache/manifests/include/silverstripe.pp
@@ -0,0 +1,3 @@
+class apache::include::silverstripe {
+ apache::config::include{'silverstripe.inc': }
+}
diff --git a/puppet/modules/apache/manifests/includes.pp b/puppet/modules/apache/manifests/includes.pp
new file mode 100644
index 00000000..02502f82
--- /dev/null
+++ b/puppet/modules/apache/manifests/includes.pp
@@ -0,0 +1,5 @@
+# manifests/includes.pp
+
+class apache::includes {
+ apache::config::global{'do_includes.conf':}
+}
diff --git a/puppet/modules/apache/manifests/init.pp b/puppet/modules/apache/manifests/init.pp
new file mode 100644
index 00000000..ad1478a1
--- /dev/null
+++ b/puppet/modules/apache/manifests/init.pp
@@ -0,0 +1,44 @@
+#
+# apache module
+#
+# Copyright 2008, admin(at)immerda.ch
+# Copyright 2008, Puzzle ITC GmbH
+# Marcel Haerry haerry+puppet(at)puzzle.ch
+# Simon Josi josi+puppet(at)puzzle.ch
+#
+# This program is free software; you can redistribute
+# it and/or modify it under the terms of the GNU
+# General Public License version 3 as published by
+# the Free Software Foundation.
+#
+
+# manage a simple apache
+class apache(
+ $cluster_node = '',
+ $manage_shorewall = false,
+ $manage_munin = false,
+ $no_default_site = false,
+ $ssl = false,
+ $default_ssl_certificate_file = absent,
+ $default_ssl_certificate_key_file = absent,
+ $default_ssl_certificate_chain_file = absent,
+ $ssl_cipher_suite = 'HIGH:MEDIUM:!aNULL:!MD5'
+) {
+ case $::operatingsystem {
+ centos: { include apache::centos }
+ gentoo: { include apache::gentoo }
+ debian,ubuntu: { include apache::debian }
+ openbsd: { include apache::openbsd }
+ default: { include apache::base }
+ }
+ if $apache::manage_munin {
+ include apache::status
+ }
+ if $apache::manage_shorewall {
+ include shorewall::rules::http
+ }
+ if $ssl {
+ include apache::ssl
+ }
+}
+
diff --git a/puppet/modules/apache/manifests/itk.pp b/puppet/modules/apache/manifests/itk.pp
new file mode 100644
index 00000000..5292343d
--- /dev/null
+++ b/puppet/modules/apache/manifests/itk.pp
@@ -0,0 +1,11 @@
+# manifests/itk.pp
+#
+# see: http://mpm-itk.sesse.net/
+
+class apache::itk inherits apache {
+ case $::operatingsystem {
+ centos: { include ::apache::centos::itk }
+ debian: { include ::apache::debian::itk }
+ default: { include ::apache::base::itk }
+ }
+}
diff --git a/puppet/modules/apache/manifests/itk/lock.pp b/puppet/modules/apache/manifests/itk/lock.pp
new file mode 100644
index 00000000..4ad95faf
--- /dev/null
+++ b/puppet/modules/apache/manifests/itk/lock.pp
@@ -0,0 +1,4 @@
+class apache::itk::lock {
+ # This file resource is used to ensure that only one itk mode is used per host
+ file{'/var/www/.itk_mode_lock': ensure => absent }
+}
diff --git a/puppet/modules/apache/manifests/itk_plus.pp b/puppet/modules/apache/manifests/itk_plus.pp
new file mode 100644
index 00000000..7d9f721a
--- /dev/null
+++ b/puppet/modules/apache/manifests/itk_plus.pp
@@ -0,0 +1,10 @@
+# manifests/itk.pp
+#
+# see: http://mpm-itk.sesse.net/
+
+class apache::itk_plus inherits apache::itk {
+ case $::operatingsystem {
+ centos: { include ::apache::centos::itk_plus }
+ default: { fail("itk plus mode is currently only implemented for CentOS") }
+ }
+}
diff --git a/puppet/modules/apache/manifests/itk_plus/lock.pp b/puppet/modules/apache/manifests/itk_plus/lock.pp
new file mode 100644
index 00000000..d540939d
--- /dev/null
+++ b/puppet/modules/apache/manifests/itk_plus/lock.pp
@@ -0,0 +1,4 @@
+class apache::itk_plus::lock {
+ # This file resource is used to ensure that only one itk mode is used per host
+ file{'/var/www/.itk_mode_lock': ensure => absent }
+}
diff --git a/puppet/modules/apache/manifests/logrotate/centos.pp b/puppet/modules/apache/manifests/logrotate/centos.pp
new file mode 100644
index 00000000..4381205d
--- /dev/null
+++ b/puppet/modules/apache/manifests/logrotate/centos.pp
@@ -0,0 +1,10 @@
+# add vhost folders to logrotation
+class apache::logrotate::centos {
+ augeas{'logrotate_httpd':
+ changes => [ 'rm /files/etc/logrotate.d/httpd/rule/file',
+ 'ins file before /files/etc/logrotate.d/httpd/rule/*[1]',
+ 'set /files/etc/logrotate.d/httpd/rule/file[1] /var/log/httpd/*log' ],
+ onlyif => 'get /files/etc/logrotate.d/httpd/rule/file[1] != "/var/log/httpd/*log"',
+ require => Package['apache'],
+ }
+}
diff --git a/puppet/modules/apache/manifests/logrotate/centos/vhosts.pp b/puppet/modules/apache/manifests/logrotate/centos/vhosts.pp
new file mode 100644
index 00000000..b1159a11
--- /dev/null
+++ b/puppet/modules/apache/manifests/logrotate/centos/vhosts.pp
@@ -0,0 +1,11 @@
+# add vhost folders to logrotation
+class apache::logrotate::centos::vhosts inherits apache::logrotate::centos {
+ Augeas['logrotate_httpd']{
+ changes => [ 'rm /files/etc/logrotate.d/httpd/rule/file',
+ 'ins file before /files/etc/logrotate.d/httpd/rule/*[1]',
+ 'ins file before /files/etc/logrotate.d/httpd/rule/*[1]',
+ 'set /files/etc/logrotate.d/httpd/rule/file[1] /var/log/httpd/*log',
+ 'set /files/etc/logrotate.d/httpd/rule/file[2] /var/www/vhosts/*/logs/*log' ],
+ onlyif => 'get /files/etc/logrotate.d/httpd/rule/file[2] != "/var/www/vhosts/*/logs/*log"',
+ }
+}
diff --git a/puppet/modules/apache/manifests/mod_dav_svn.pp b/puppet/modules/apache/manifests/mod_dav_svn.pp
new file mode 100644
index 00000000..bdcc4abd
--- /dev/null
+++ b/puppet/modules/apache/manifests/mod_dav_svn.pp
@@ -0,0 +1,7 @@
+class apache::mod_dav_svn {
+ package{'mod_dav_svn':
+ ensure => installed,
+ require => Package['apache'],
+ notify => Service['apache'],
+ }
+}
diff --git a/puppet/modules/apache/manifests/mod_macro.pp b/puppet/modules/apache/manifests/mod_macro.pp
new file mode 100644
index 00000000..eed59e52
--- /dev/null
+++ b/puppet/modules/apache/manifests/mod_macro.pp
@@ -0,0 +1,7 @@
+class apache::mod_macro {
+ package{'mod_macro':
+ ensure => installed,
+ require => Package['apache'],
+ notify => Service['apache'],
+ }
+}
diff --git a/puppet/modules/apache/manifests/module.pp b/puppet/modules/apache/manifests/module.pp
new file mode 100644
index 00000000..cbcf2d04
--- /dev/null
+++ b/puppet/modules/apache/manifests/module.pp
@@ -0,0 +1,35 @@
+define apache::module (
+ $ensure = present, $source = '',
+ $destination = '', $module = '', $package_name = 'absent',
+ $conf_content = '', $conf_source = '',
+) {
+
+ $real_module = $module ? {
+ '' => $name,
+ default => $module,
+ }
+
+ case $operatingsystem {
+ 'centos': {
+ apache::centos::module { "$real_module":
+ ensure => $ensure, source => $source,
+ destination => $destination
+ }
+ }
+ 'gentoo': {
+ apache::gentoo::module { "$real_module":
+ ensure => $ensure, source => $source,
+ destination => $destination
+ }
+ }
+ 'debian','ubuntu': {
+ apache::debian::module { "$real_module":
+ ensure => $ensure, package_name => $package_name,
+ conf_content => $conf_content, conf_source => $conf_source
+ }
+ }
+ default: {
+ err('Your operating system does not have a module deployment mechanism defined')
+ }
+ }
+}
diff --git a/puppet/modules/apache/manifests/module/alias.pp b/puppet/modules/apache/manifests/module/alias.pp
new file mode 100644
index 00000000..33d26efe
--- /dev/null
+++ b/puppet/modules/apache/manifests/module/alias.pp
@@ -0,0 +1,14 @@
+# install mod_alias
+class apache::module::alias ( $ensure = present )
+{
+
+ apache::module { 'alias': ensure => $ensure }
+
+ # from 2.4, /etc/apache2/mods-enabled/alias.conf contains the "Require"
+ # directive which needs "authz_core" mod enabled
+
+ if ( guess_apache_version() == '2.4') {
+ class { 'authz_core': ensure => $ensure }
+ }
+
+}
diff --git a/puppet/modules/apache/manifests/module/auth_basic.pp b/puppet/modules/apache/manifests/module/auth_basic.pp
new file mode 100644
index 00000000..4335a09c
--- /dev/null
+++ b/puppet/modules/apache/manifests/module/auth_basic.pp
@@ -0,0 +1,6 @@
+# enable/disable auth_basic module
+class apache::module::auth_basic ( $ensure = present )
+{
+
+ apache::module { 'auth_basic': ensure => $ensure }
+}
diff --git a/puppet/modules/apache/manifests/module/authn_core.pp b/puppet/modules/apache/manifests/module/authn_core.pp
new file mode 100644
index 00000000..46baace0
--- /dev/null
+++ b/puppet/modules/apache/manifests/module/authn_core.pp
@@ -0,0 +1,6 @@
+# enable/disable authn_core module
+class apache::module::authn_core ( $ensure = present )
+{
+
+ apache::module { 'authn_core': ensure => $ensure }
+}
diff --git a/puppet/modules/apache/manifests/module/authn_file.pp b/puppet/modules/apache/manifests/module/authn_file.pp
new file mode 100644
index 00000000..7c346d9b
--- /dev/null
+++ b/puppet/modules/apache/manifests/module/authn_file.pp
@@ -0,0 +1,6 @@
+# enable/disable authn_file module
+class apache::module::authn_file ( $ensure = present )
+{
+
+ apache::module { 'authn_file': ensure => $ensure }
+}
diff --git a/puppet/modules/apache/manifests/module/authz_core.pp b/puppet/modules/apache/manifests/module/authz_core.pp
new file mode 100644
index 00000000..03b0617c
--- /dev/null
+++ b/puppet/modules/apache/manifests/module/authz_core.pp
@@ -0,0 +1,7 @@
+# install mod_authz_core (needed i.e. by the alias mod config)
+class apache::module::authz_core ( $ensure = present )
+{
+
+ apache::module { 'authz_core': ensure => $ensure }
+
+}
diff --git a/puppet/modules/apache/manifests/module/authz_host.pp b/puppet/modules/apache/manifests/module/authz_host.pp
new file mode 100644
index 00000000..46c3a812
--- /dev/null
+++ b/puppet/modules/apache/manifests/module/authz_host.pp
@@ -0,0 +1,6 @@
+# enable/disable authz_host module
+class apache::module::authz_host ( $ensure = present )
+{
+
+ apache::module { 'authz_host': ensure => $ensure }
+}
diff --git a/puppet/modules/apache/manifests/module/authz_user.pp b/puppet/modules/apache/manifests/module/authz_user.pp
new file mode 100644
index 00000000..84775727
--- /dev/null
+++ b/puppet/modules/apache/manifests/module/authz_user.pp
@@ -0,0 +1,6 @@
+# enable/disable authz_user module
+class apache::module::authz_user ( $ensure = present )
+{
+
+ apache::module { 'authz_user': ensure => $ensure }
+}
diff --git a/puppet/modules/apache/manifests/module/cgi.pp b/puppet/modules/apache/manifests/module/cgi.pp
new file mode 100644
index 00000000..ce212e97
--- /dev/null
+++ b/puppet/modules/apache/manifests/module/cgi.pp
@@ -0,0 +1,6 @@
+# enable/disable cgi module
+class apache::module::cgi ( $ensure = present )
+{
+
+ apache::module { 'cgi': ensure => $ensure }
+}
diff --git a/puppet/modules/apache/manifests/module/dir.pp b/puppet/modules/apache/manifests/module/dir.pp
new file mode 100644
index 00000000..da2dc1ee
--- /dev/null
+++ b/puppet/modules/apache/manifests/module/dir.pp
@@ -0,0 +1,6 @@
+# enable/disable dir module
+class apache::module::dir ( $ensure = present )
+{
+
+ apache::module { 'dir': ensure => $ensure }
+}
diff --git a/puppet/modules/apache/manifests/module/env.pp b/puppet/modules/apache/manifests/module/env.pp
new file mode 100644
index 00000000..f358e363
--- /dev/null
+++ b/puppet/modules/apache/manifests/module/env.pp
@@ -0,0 +1,7 @@
+# install mod_env, needed by api.conf
+class apache::module::env ( $ensure = present )
+{
+
+ apache::module { 'env': ensure => $ensure }
+
+}
diff --git a/puppet/modules/apache/manifests/module/expires.pp b/puppet/modules/apache/manifests/module/expires.pp
new file mode 100644
index 00000000..c56f416b
--- /dev/null
+++ b/puppet/modules/apache/manifests/module/expires.pp
@@ -0,0 +1,5 @@
+# enable/disable expires module
+class apache::module::expires ( $ensure = present )
+{
+ apache::module { 'expires': ensure => $ensure }
+}
diff --git a/puppet/modules/apache/manifests/module/headers.pp b/puppet/modules/apache/manifests/module/headers.pp
new file mode 100644
index 00000000..d1d587b0
--- /dev/null
+++ b/puppet/modules/apache/manifests/module/headers.pp
@@ -0,0 +1,6 @@
+# enable/disable headers module
+class apache::module::headers ( $ensure = present )
+{
+
+ apache::module { 'headers': ensure => $ensure }
+}
diff --git a/puppet/modules/apache/manifests/module/mime.pp b/puppet/modules/apache/manifests/module/mime.pp
new file mode 100644
index 00000000..5d691d30
--- /dev/null
+++ b/puppet/modules/apache/manifests/module/mime.pp
@@ -0,0 +1,6 @@
+# enable/disable mime module
+class apache::module::mime ( $ensure = present )
+{
+
+ apache::module { 'mime': ensure => $ensure }
+}
diff --git a/puppet/modules/apache/manifests/module/mpm_event.pp b/puppet/modules/apache/manifests/module/mpm_event.pp
new file mode 100644
index 00000000..a824cb37
--- /dev/null
+++ b/puppet/modules/apache/manifests/module/mpm_event.pp
@@ -0,0 +1,7 @@
+# install mod_mpm_event (needed for jessie hosts)
+class apache::module::mpm_event ( $ensure = present )
+{
+
+ apache::module { 'mpm_event': ensure => $ensure }
+
+}
diff --git a/puppet/modules/apache/manifests/module/mpm_prefork.pp b/puppet/modules/apache/manifests/module/mpm_prefork.pp
new file mode 100644
index 00000000..7c08da7f
--- /dev/null
+++ b/puppet/modules/apache/manifests/module/mpm_prefork.pp
@@ -0,0 +1,6 @@
+# enable/disable mpm_prefork module
+class apache::module::mpm_prefork ( $ensure = present )
+{
+
+ apache::module { 'mpm_prefork': ensure => $ensure }
+}
diff --git a/puppet/modules/apache/manifests/module/negotiation.pp b/puppet/modules/apache/manifests/module/negotiation.pp
new file mode 100644
index 00000000..15334fb9
--- /dev/null
+++ b/puppet/modules/apache/manifests/module/negotiation.pp
@@ -0,0 +1,6 @@
+# enable/disable negotiation module
+class apache::module::negotiation ( $ensure = present )
+{
+
+ apache::module { 'negotiation': ensure => $ensure }
+}
diff --git a/puppet/modules/apache/manifests/module/php5.pp b/puppet/modules/apache/manifests/module/php5.pp
new file mode 100644
index 00000000..ffb571fe
--- /dev/null
+++ b/puppet/modules/apache/manifests/module/php5.pp
@@ -0,0 +1,6 @@
+# enable/disable php5 module
+class apache::module::php5 ( $ensure = present )
+{
+
+ apache::module { 'php5': ensure => $ensure }
+}
diff --git a/puppet/modules/apache/manifests/module/removeip.pp b/puppet/modules/apache/manifests/module/removeip.pp
new file mode 100644
index 00000000..11088fc1
--- /dev/null
+++ b/puppet/modules/apache/manifests/module/removeip.pp
@@ -0,0 +1,6 @@
+# enable/disable removeip module
+class apache::module::removeip ( $ensure = present )
+{
+ package { 'libapache2-mod-removeip': ensure => $ensure }
+ apache::module { 'removeip': ensure => $ensure }
+}
diff --git a/puppet/modules/apache/manifests/module/rewrite.pp b/puppet/modules/apache/manifests/module/rewrite.pp
new file mode 100644
index 00000000..24ef899b
--- /dev/null
+++ b/puppet/modules/apache/manifests/module/rewrite.pp
@@ -0,0 +1,6 @@
+# enable/disable rewrite module
+class apache::module::rewrite ( $ensure = present )
+{
+
+ apache::module { 'rewrite': ensure => $ensure }
+}
diff --git a/puppet/modules/apache/manifests/module/socache_shmcb.pp b/puppet/modules/apache/manifests/module/socache_shmcb.pp
new file mode 100644
index 00000000..4c53adde
--- /dev/null
+++ b/puppet/modules/apache/manifests/module/socache_shmcb.pp
@@ -0,0 +1,6 @@
+# enable/disable socache_shmcb module
+class apache::module::socache_shmcb ( $ensure = present )
+{
+
+ apache::module { 'socache_shmcb': ensure => $ensure }
+}
diff --git a/puppet/modules/apache/manifests/module/status.pp b/puppet/modules/apache/manifests/module/status.pp
new file mode 100644
index 00000000..cfc437ca
--- /dev/null
+++ b/puppet/modules/apache/manifests/module/status.pp
@@ -0,0 +1,6 @@
+# enable/disable status module
+class apache::module::status ( $ensure = present )
+{
+
+ apache::module { 'status': ensure => $present }
+}
diff --git a/puppet/modules/apache/manifests/mozilla_autoconfig.pp b/puppet/modules/apache/manifests/mozilla_autoconfig.pp
new file mode 100644
index 00000000..f16e5ec7
--- /dev/null
+++ b/puppet/modules/apache/manifests/mozilla_autoconfig.pp
@@ -0,0 +1,37 @@
+# setup autoconfig infos
+#
+# this will create a global autoconfig file, that maps
+# any of your hosted domains on this host to a certain
+# provider configuration. Which means, that you get a zero
+# setup autoconfig for any domain that you host the website
+# and the emails for.
+# By default you only need to define the provider, which
+# is usually your main domain. Everything else should be
+# derived from that.
+# You can however still fine tune things from it.
+class apache::mozilla_autoconfig(
+ $provider,
+ $display_name = undef,
+ $shortname = undef,
+ $imap_server = undef,
+ $pop_server = undef,
+ $smtp_server = undef,
+ $documentation_url = undef,
+) {
+ apache::config::global { 'mozilla_autoconfig.conf': }
+
+ file{
+ '/var/www/autoconfig':
+ ensure => directory,
+ require => Package['apache'],
+ owner => root,
+ group => apache,
+ mode => '0640';
+ '/var/www/autoconfig/config.shtml':
+ content => template('apache/webfiles/autoconfig/config.shtml.erb'),
+ owner => root,
+ group => apache,
+ mode => '0640',
+ before => Service['apache'],
+ }
+}
diff --git a/puppet/modules/apache/manifests/munin.pp b/puppet/modules/apache/manifests/munin.pp
new file mode 100644
index 00000000..46af1723
--- /dev/null
+++ b/puppet/modules/apache/manifests/munin.pp
@@ -0,0 +1,12 @@
+# manage apache monitoring things
+class apache::munin {
+ if $::osfamily == 'Debian' {
+ include perl::extensions::libwww
+ }
+
+ munin::plugin{ [ 'apache_accesses', 'apache_processes', 'apache_volume' ]: }
+ munin::plugin::deploy { 'apache_activity':
+ source => 'apache/munin/apache_activity',
+ seltype => 'munin_services_plugin_exec_t',
+ }
+}
diff --git a/puppet/modules/apache/manifests/noiplog.pp b/puppet/modules/apache/manifests/noiplog.pp
new file mode 100644
index 00000000..355d7e6a
--- /dev/null
+++ b/puppet/modules/apache/manifests/noiplog.pp
@@ -0,0 +1,5 @@
+class apache::noiplog {
+ apache::config::global{ 'noip_log.conf':
+ content => 'LogFormat "127.0.0.1 - - %t \"%r\" %>s %b \"%{Referer}i\" \"%{User-Agent}i\" %T %V" noip';
+ }
+}
diff --git a/puppet/modules/apache/manifests/openbsd.pp b/puppet/modules/apache/manifests/openbsd.pp
new file mode 100644
index 00000000..96a216ad
--- /dev/null
+++ b/puppet/modules/apache/manifests/openbsd.pp
@@ -0,0 +1,75 @@
+### openbsd
+class apache::openbsd inherits apache::base {
+ $config_dir = '/var/www'
+
+ File[vhosts_dir]{
+ path => "${config_dir}/vhosts.d",
+ }
+ File[modules_dir]{
+ path => "${config_dir}/conf/modules",
+ }
+ File[config_dir]{
+ path => "${config_dir}/conf.d",
+ }
+ File[include_dir]{
+ path => "${config_dir}/include.d",
+ }
+ File['htpasswd_dir']{
+ group => www,
+ }
+ File[web_dir]{
+ group => daemon,
+ }
+ file_line{'enable_apache_on_boot':
+ path => '/etc/rc.conf.local',
+ line => 'httpd flags=""',
+ }
+ file{'apache_main_config':
+ path => "${config_dir}/conf/httpd.conf",
+ source => ["puppet:///modules/site_apache/config/OpenBSD/${::fqdn}/httpd.conf",
+ "puppet:///modules/site_apache/config/OpenBSD/${apache::cluster_node}/httpd.conf",
+ 'puppet:///modules/site_apache/config/OpenBSD//httpd.conf',
+ 'puppet:///modules/apache/config/OpenBSD/httpd.conf' ],
+ notify => Service['apache'],
+ owner => root,
+ group => 0,
+ mode => '0644';
+ }
+ File[default_apache_index] {
+ path => '/var/www/htdocs/default/www/index.html',
+ }
+ file{'/opt/bin/restart_apache.sh':
+ source => 'puppet:///modules/apache/scripts/OpenBSD/bin/restart_apache.sh',
+ require => File['/opt/bin'],
+ owner => root,
+ group => 0,
+ mode => '0700';
+ }
+
+ ::apache::vhost::webdir{'default':
+ datadir => false,
+ }
+
+ Service['apache']{
+ restart => '/opt/bin/restart_apache.sh',
+ status => 'apachectl status',
+ start => 'apachectl start',
+ stop => 'apachectl stop',
+ }
+ file{'/opt/bin/apache_logrotate.sh':
+ source => 'puppet:///modules/apache/scripts/OpenBSD/bin/apache_logrotate.sh',
+ require => File['/opt/bin'],
+ owner => root,
+ group => 0,
+ mode => '0700';
+ }
+ cron { 'update_apache_logrotation':
+ command => '/bin/sh /opt/bin/apache_logrotate.sh > /etc/newsyslog_apache.conf',
+ minute => '1',
+ hour => '1',
+ }
+ cron { 'run_apache_logrotation':
+ command => '/usr/bin/newsyslog -f /etc/newsyslog_apache.conf > /dev/null',
+ minute => '10',
+ }
+}
diff --git a/puppet/modules/apache/manifests/package.pp b/puppet/modules/apache/manifests/package.pp
new file mode 100644
index 00000000..3308b371
--- /dev/null
+++ b/puppet/modules/apache/manifests/package.pp
@@ -0,0 +1,32 @@
+# deploy apache as package
+class apache::package inherits apache::base {
+ package { 'apache':
+ name => 'apache',
+ ensure => present,
+ }
+ File['vhosts_dir']{
+ require => Package[apache],
+ }
+ File['config_dir']{
+ require => Package[apache],
+ }
+ Service['apache']{
+ require => Package[apache],
+ }
+ File['default_apache_index']{
+ require => Package[apache],
+ }
+ File['modules_dir']{
+ require => Package[apache],
+ }
+ File['include_dir']{
+ require => Package[apache],
+ }
+ File['web_dir']{
+ require => Package[apache],
+ }
+ File['htpasswd_dir']{
+ require => Package[apache],
+ }
+}
+
diff --git a/puppet/modules/apache/manifests/package/itk.pp b/puppet/modules/apache/manifests/package/itk.pp
new file mode 100644
index 00000000..4ca9960e
--- /dev/null
+++ b/puppet/modules/apache/manifests/package/itk.pp
@@ -0,0 +1,5 @@
+class apache::package::itk inherits apache::package {
+ Package['apache'] {
+ name => 'apache2-itk',
+ }
+}
diff --git a/puppet/modules/apache/manifests/sftponly.pp b/puppet/modules/apache/manifests/sftponly.pp
new file mode 100644
index 00000000..ece726b0
--- /dev/null
+++ b/puppet/modules/apache/manifests/sftponly.pp
@@ -0,0 +1,5 @@
+class apache::sftponly {
+ case $::operatingsystem {
+ centos: { include apache::sftponly::centos }
+ }
+}
diff --git a/puppet/modules/apache/manifests/sftponly/centos.pp b/puppet/modules/apache/manifests/sftponly/centos.pp
new file mode 100644
index 00000000..0f2a43d8
--- /dev/null
+++ b/puppet/modules/apache/manifests/sftponly/centos.pp
@@ -0,0 +1,10 @@
+# manage sftponly group and apache
+# user for access
+class apache::sftponly::centos {
+ require user::groups::sftponly
+ user::groups::manage_user{'apache':
+ group => 'sftponly',
+ require => Package['apache'],
+ notify => Service['apache'],
+ }
+}
diff --git a/puppet/modules/apache/manifests/ssl.pp b/puppet/modules/apache/manifests/ssl.pp
new file mode 100644
index 00000000..bfef7adc
--- /dev/null
+++ b/puppet/modules/apache/manifests/ssl.pp
@@ -0,0 +1,13 @@
+# manifests/ssl.pp
+
+class apache::ssl {
+ case $::operatingsystem {
+ centos: { include apache::ssl::centos }
+ openbsd: { include apache::ssl::openbsd }
+ debian: { include apache::ssl::debian }
+ defaults: { include apache::ssl::base }
+ }
+ if $apache::manage_shorewall {
+ include shorewall::rules::https
+ }
+}
diff --git a/puppet/modules/apache/manifests/ssl/base.pp b/puppet/modules/apache/manifests/ssl/base.pp
new file mode 100644
index 00000000..3f329136
--- /dev/null
+++ b/puppet/modules/apache/manifests/ssl/base.pp
@@ -0,0 +1,15 @@
+# basic defaults for ssl support
+class apache::ssl::base (
+) {
+ apache::config::include {
+ 'ssl_defaults.inc':
+ content => template('apache/include.d/ssl_defaults.inc.erb');
+ }
+
+ if !$apache::no_default_site {
+ apache::vhost::file{
+ '0-default_ssl':
+ content => template('apache/vhosts/0-default_ssl.conf.erb');
+ }
+ }
+}
diff --git a/puppet/modules/apache/manifests/ssl/centos.pp b/puppet/modules/apache/manifests/ssl/centos.pp
new file mode 100644
index 00000000..7bc8c895
--- /dev/null
+++ b/puppet/modules/apache/manifests/ssl/centos.pp
@@ -0,0 +1,12 @@
+class apache::ssl::centos inherits apache::ssl::base {
+ package { 'mod_ssl':
+ name => 'mod_ssl',
+ ensure => present,
+ require => Package[apache],
+ }
+ ::apache::config::global{ 'ssl.conf': }
+
+ apache::config::global{'00-listen-ssl.conf':
+ ensure => absent,
+ }
+}
diff --git a/puppet/modules/apache/manifests/ssl/debian.pp b/puppet/modules/apache/manifests/ssl/debian.pp
new file mode 100644
index 00000000..99dfe36e
--- /dev/null
+++ b/puppet/modules/apache/manifests/ssl/debian.pp
@@ -0,0 +1,4 @@
+class apache::ssl::debian inherits apache::ssl::base {
+ apache::debian::module { 'ssl': ensure => present }
+ apache::config::global { 'ssl.conf': }
+}
diff --git a/puppet/modules/apache/manifests/ssl/itk.pp b/puppet/modules/apache/manifests/ssl/itk.pp
new file mode 100644
index 00000000..5fd3aaf6
--- /dev/null
+++ b/puppet/modules/apache/manifests/ssl/itk.pp
@@ -0,0 +1,8 @@
+# manifests/ssl/itk.pp
+
+class apache::ssl::itk inherits apache::ssl {
+ case $::operatingsystem {
+ centos: { include apache::ssl::itk::centos }
+ }
+}
+
diff --git a/puppet/modules/apache/manifests/ssl/itk/centos.pp b/puppet/modules/apache/manifests/ssl/itk/centos.pp
new file mode 100644
index 00000000..fb6a4a6b
--- /dev/null
+++ b/puppet/modules/apache/manifests/ssl/itk/centos.pp
@@ -0,0 +1,6 @@
+class apache::ssl::itk::centos inherits apache::ssl::centos {
+ Package['mod_ssl']{
+ name => 'mod_ssl-itk',
+ }
+}
+
diff --git a/puppet/modules/apache/manifests/ssl/itk_plus.pp b/puppet/modules/apache/manifests/ssl/itk_plus.pp
new file mode 100644
index 00000000..0c8e6679
--- /dev/null
+++ b/puppet/modules/apache/manifests/ssl/itk_plus.pp
@@ -0,0 +1,6 @@
+class apache::ssl::itk_plus inherits apache::ssl::itk {
+ case $::operatingsystem {
+ centos: { include ::apache::ssl::itk_plus::centos }
+ default: { fail("itk plus mode is currently only implemented for CentOS") }
+ }
+}
diff --git a/puppet/modules/apache/manifests/ssl/itk_plus/centos.pp b/puppet/modules/apache/manifests/ssl/itk_plus/centos.pp
new file mode 100644
index 00000000..00fb4729
--- /dev/null
+++ b/puppet/modules/apache/manifests/ssl/itk_plus/centos.pp
@@ -0,0 +1,11 @@
+class apache::ssl::itk_plus::centos inherits apache::ssl::centos {
+ include apache::ssl::itk::centos
+ Apache::Config::Global['ssl.conf']{
+ source => "modules/apache/itk_plus/conf.d/${::operatingsystem}/ssl.conf",
+ }
+
+ Apache::Config::Global['00-listen-ssl.conf']{
+ ensure => 'present',
+ content => template("apache/itk_plus/${::operatingsystem}/00-listen-ssl.conf.erb"),
+ }
+}
diff --git a/puppet/modules/apache/manifests/ssl/openbsd.pp b/puppet/modules/apache/manifests/ssl/openbsd.pp
new file mode 100644
index 00000000..43bc6803
--- /dev/null
+++ b/puppet/modules/apache/manifests/ssl/openbsd.pp
@@ -0,0 +1,18 @@
+class apache::ssl::openbsd inherits apache::openbsd {
+ include apache::ssl::base
+
+ File_line['enable_apache_on_boot']{
+ ensure => 'absent',
+ }
+ file_line{'enable_apachessl_on_boot':
+ path => '/etc/rc.conf.local',
+ line => 'httpd flags="-DSSL"',
+ }
+
+ File['/opt/bin/restart_apache.sh']{
+ source => "puppet:///modules/apache/scripts/OpenBSD/bin/restart_apache_ssl.sh",
+ }
+ Service['apache']{
+ start => 'apachectl startssl',
+ }
+}
diff --git a/puppet/modules/apache/manifests/status.pp b/puppet/modules/apache/manifests/status.pp
new file mode 100644
index 00000000..c5070130
--- /dev/null
+++ b/puppet/modules/apache/manifests/status.pp
@@ -0,0 +1,13 @@
+# enable apache status page
+# manage munin plugins if requested
+class apache::status {
+ case $::operatingsystem {
+ centos: { include apache::status::centos }
+ debian: { include apache::status::debian }
+ defaults: { include apache::status::base }
+ }
+ if $apache::manage_munin {
+ include apache::munin
+ }
+}
+
diff --git a/puppet/modules/apache/manifests/status/base.pp b/puppet/modules/apache/manifests/status/base.pp
new file mode 100644
index 00000000..df6c90b9
--- /dev/null
+++ b/puppet/modules/apache/manifests/status/base.pp
@@ -0,0 +1 @@
+class apache::status::base {}
diff --git a/puppet/modules/apache/manifests/status/centos.pp b/puppet/modules/apache/manifests/status/centos.pp
new file mode 100644
index 00000000..d893707d
--- /dev/null
+++ b/puppet/modules/apache/manifests/status/centos.pp
@@ -0,0 +1,5 @@
+### centos
+class apache::status::centos {
+ ::apache::config::global{ 'status.conf': }
+}
+
diff --git a/puppet/modules/apache/manifests/status/debian.pp b/puppet/modules/apache/manifests/status/debian.pp
new file mode 100644
index 00000000..222b85c7
--- /dev/null
+++ b/puppet/modules/apache/manifests/status/debian.pp
@@ -0,0 +1,4 @@
+# enable status module on debian
+class apache::status::debian {
+ ::apache::debian::module { 'status': }
+}
diff --git a/puppet/modules/apache/manifests/vhost.pp b/puppet/modules/apache/manifests/vhost.pp
new file mode 100644
index 00000000..da1ce901
--- /dev/null
+++ b/puppet/modules/apache/manifests/vhost.pp
@@ -0,0 +1,127 @@
+# this is a wrapper for apache::vhost::file and avhost::template below
+#
+# vhost_mode: which option is choosed to deploy the vhost
+# - template: generate it from a template (default)
+# - file: deploy a vhost file (apache::vhost::file will be called directly)
+#
+# logmode:
+# - default: Do normal logging to CustomLog and ErrorLog
+# - nologs: Send every logging to /dev/null
+# - anonym: Don't log ips for CustomLog, send ErrorLog to /dev/null
+# - semianonym: Don't log ips for CustomLog, log normal ErrorLog
+#
+# run_mode: controls in which mode the vhost should be run, there are different setups
+# possible:
+# - normal: (*default*) run vhost with the current active worker (default: prefork) don't
+# setup anything special
+# - itk: run vhost with the mpm_itk module (Incompatibility: cannot be used in combination
+# with 'proxy-itk' & 'static-itk' mode)
+# - proxy-itk: run vhost with a dual prefork/itk setup, where prefork just proxies all the
+# requests for the itk setup, that listens only on the loobpack device.
+# (Incompatibility: cannot be used in combination with the itk setup.)
+# - static-itk: run vhost with a dual prefork/itk setup, where prefork serves all the static
+# content and proxies the dynamic calls to the itk setup, that listens only on
+# the loobpack device (Incompatibility: cannot be used in combination with
+# 'itk' mode)
+#
+# mod_security: Whether we use mod_security or not (will include mod_security module)
+# - false: (*default*) don't activate mod_security
+# - true: activate mod_security
+#
+define apache::vhost(
+ $ensure = present,
+ $configuration = {},
+ $path = 'absent',
+ $path_is_webdir = false,
+ $logpath = 'absent',
+ $logmode = 'default',
+ $logprefix = '',
+ $vhost_mode = 'template',
+ $template_partial = 'apache/vhosts/static/partial.erb',
+ $vhost_source = 'absent',
+ $vhost_destination = 'absent',
+ $content = 'absent',
+ $domain = 'absent',
+ $domainalias = 'absent',
+ $server_admin = 'absent',
+ $allow_override = 'None',
+ $php_settings = {},
+ $php_options = {},
+ $cgi_binpath = 'absent',
+ $default_charset = 'absent',
+ $do_includes = false,
+ $options = 'absent',
+ $additional_options = 'absent',
+ $run_mode = 'normal',
+ $run_uid = 'absent',
+ $run_gid = 'absent',
+ $ssl_mode = false,
+ $htpasswd_file = 'absent',
+ $htpasswd_path = 'absent',
+ $mod_security = true,
+ $mod_security_relevantonly = true,
+ $mod_security_rules_to_disable = [],
+ $mod_security_additional_options = 'absent',
+ $use_mod_macro = false,
+ $ldap_auth = false,
+ $ldap_user = 'any',
+ $passing_extension = 'absent',
+ $gempath = 'absent'
+) {
+ # file or template mode?
+ case $vhost_mode {
+ 'file': {
+ apache::vhost::file{$name:
+ ensure => $ensure,
+ configuration => $configuration,
+ vhost_source => $vhost_source,
+ vhost_destination => $vhost_destination,
+ do_includes => $do_includes,
+ run_mode => $run_mode,
+ mod_security => $mod_security,
+ htpasswd_file => $htpasswd_file,
+ htpasswd_path => $htpasswd_path,
+ use_mod_macro => $use_mod_macro,
+ }
+ }
+ 'template': {
+ apache::vhost::template{$name:
+ ensure => $ensure,
+ configuration => $configuration,
+ path => $path,
+ path_is_webdir => $path_is_webdir,
+ logpath => $logpath,
+ logmode => $logmode,
+ logprefix => $logprefix,
+ domain => $domain,
+ domainalias => $domainalias,
+ server_admin => $server_admin,
+ cgi_binpath => $cgi_binpath,
+ allow_override => $allow_override,
+ do_includes => $do_includes,
+ options => $options,
+ additional_options => $additional_options,
+ default_charset => $default_charset,
+ php_settings => $php_settings,
+ php_options => $php_options,
+ run_mode => $run_mode,
+ run_uid => $run_uid,
+ run_gid => $run_gid,
+ template_partial => $template_partial,
+ ssl_mode => $ssl_mode,
+ htpasswd_file => $htpasswd_file,
+ htpasswd_path => $htpasswd_path,
+ ldap_auth => $ldap_auth,
+ ldap_user => $ldap_user,
+ mod_security => $mod_security,
+ mod_security_relevantonly => $mod_security_relevantonly,
+ mod_security_rules_to_disable => $mod_security_rules_to_disable,
+ mod_security_additional_options => $mod_security_additional_options,
+ use_mod_macro => $use_mod_macro,
+ passing_extension => $passing_extension,
+ gempath => $gempath,
+ }
+ }
+ default: { fail("No such vhost_mode: ${vhost_mode} defined for ${name}.") }
+ }
+}
diff --git a/puppet/modules/apache/manifests/vhost/davdbdir.pp b/puppet/modules/apache/manifests/vhost/davdbdir.pp
new file mode 100644
index 00000000..459167c9
--- /dev/null
+++ b/puppet/modules/apache/manifests/vhost/davdbdir.pp
@@ -0,0 +1,40 @@
+define apache::vhost::davdbdir(
+ $ensure = present,
+ $dav_db_dir = 'absent',
+ $documentroot_owner = apache,
+ $documentroot_group = 0,
+ $documentroot_mode = 0750,
+ $run_mode = 'normal',
+ $run_uid = 'absent'
+){
+ # php db dir
+ case $dav_db_dir {
+ 'absent': {
+ include apache::defaultdavdbdir
+ $real_dav_db_dir = "/var/www/dav_db_dir/${name}"
+ }
+ default: { $real_dav_db_dir = $dav_db_dir }
+ }
+
+ case $ensure {
+ absent: {
+ file{$real_dav_db_dir:
+ ensure => absent,
+ purge => true,
+ force => true,
+ recurse => true,
+ }
+ }
+ default: {
+ file{$real_dav_db_dir:
+ ensure => directory,
+ owner => $run_mode ? {
+ 'itk' => $run_uid,
+ default => $documentroot_owner
+ },
+ group => $documentroot_group, mode => $documentroot_mode;
+ }
+ }
+ }
+}
+
diff --git a/puppet/modules/apache/manifests/vhost/file.pp b/puppet/modules/apache/manifests/vhost/file.pp
new file mode 100644
index 00000000..686cb1a1
--- /dev/null
+++ b/puppet/modules/apache/manifests/vhost/file.pp
@@ -0,0 +1,151 @@
+# htpasswd_file: wether to deploy a passwd for this vhost or not
+# - absent: ignore (default)
+# - nodeploy: htpasswd file isn't deployed by this mechanism
+# - else: try to deploy the file
+#
+# htpasswd_path: where to deploy the passwd file
+# - absent: standardpath (default)
+# - else: path to deploy
+#
+# ssl_mode: wether this vhost supports ssl or not
+# - false: don't enable ssl for this vhost (default)
+# - true: enable ssl for this vhost
+# - force: enable ssl and redirect non-ssl to ssl
+# - only: enable ssl only
+#
+# run_mode: controls in which mode the vhost should be run, there are different setups
+# possible:
+# - normal: (*default*) run vhost with the current active worker (default: prefork) don't
+# setup anything special
+# - itk: run vhost with the mpm_itk module (Incompatibility: cannot be used in combination
+# with 'proxy-itk' & 'static-itk' mode)
+# - proxy-itk: run vhost with a dual prefork/itk setup, where prefork just proxies all the
+# requests for the itk setup, that listens only on the loobpack device.
+# (Incompatibility: cannot be used in combination with the itk setup.)
+# - static-itk: run vhost with a dual prefork/itk setup, where prefork serves all the static
+# content and proxies the dynamic calls to the itk setup, that listens only on
+# the loobpack device (Incompatibility: cannot be used in combination with
+# 'itk' mode)
+# logmode:
+# - default: Do normal logging to CustomLog and ErrorLog
+# - nologs: Send every logging to /dev/null
+# - anonym: Don't log ips for CustomLog, send ErrorLog to /dev/null
+# - semianonym: Don't log ips for CustomLog, log normal ErrorLog
+#
+#
+# mod_security: Whether we use mod_security or not
+# (will include mod_security module)
+# - false: (*default*) don't activate mod_security
+# - true: activate mod_security
+#
+define apache::vhost::file(
+ $ensure = present,
+ $configuration = {},
+ $vhost_source = 'absent',
+ $vhost_destination = 'absent',
+ $content = 'absent',
+ $do_includes = false,
+ $run_mode = 'normal',
+ $logmode = 'default',
+ $ssl_mode = false,
+ $mod_security = false,
+ $htpasswd_file = 'absent',
+ $htpasswd_path = 'absent',
+ $use_mod_macro = false
+){
+ $vhosts_dir = $::operatingsystem ? {
+ centos => "${apache::centos::config_dir}/vhosts.d",
+ gentoo => "${apache::gentoo::config_dir}/vhosts.d",
+ debian => "${apache::debian::config_dir}/sites-enabled",
+ ubuntu => "${apache::ubuntu::config_dir}/sites-enabled",
+ openbsd => "${apache::openbsd::config_dir}/vhosts.d",
+ default => '/etc/apache2/vhosts.d',
+ }
+ $real_vhost_destination = $vhost_destination ? {
+ 'absent' => "${vhosts_dir}/${name}.conf",
+ default => $vhost_destination,
+ }
+ file{"${name}.conf":
+ ensure => $ensure,
+ path => $real_vhost_destination,
+ require => File[vhosts_dir],
+ notify => Service[apache],
+ owner => root,
+ group => 0,
+ mode => '0644';
+ }
+ if $ensure != 'absent' {
+ if $do_includes {
+ include ::apache::includes
+ }
+ if $use_mod_macro {
+ include ::apache::mod_macro
+ }
+ case $logmode {
+ 'semianonym','anonym': { include apache::noiplog }
+ }
+ case $run_mode {
+ 'itk': {
+ include ::apache::itk::lock
+ if $mod_security { include mod_security::itk }
+ }
+ 'proxy-itk','static-itk': {
+ include ::apache::itk_plus::lock
+ if $mod_security { include mod_security::itk_plus }
+ }
+ default: {
+ if $mod_security { include mod_security }
+ }
+ }
+
+ case $content {
+ 'absent': {
+ $real_vhost_source = $vhost_source ? {
+ 'absent' => [
+ "puppet:///modules/site_apache/vhosts.d/${::fqdn}/${name}.conf",
+ "puppet:///modules/site_apache/vhosts.d/${apache::cluster_node}/${name}.conf",
+ "puppet:///modules/site_apache/vhosts.d/${::operatingsystem}.${::operatingsystemmajrelease}/${name}.conf",
+ "puppet:///modules/site_apache/vhosts.d/${::operatingsystem}/${name}.conf",
+ "puppet:///modules/site_apache/vhosts.d/${name}.conf",
+ "puppet:///modules/apache/vhosts.d/${::operatingsystem}.${::operatingsystemmajrelease}/${name}.conf",
+ "puppet:///modules/apache/vhosts.d/${::operatingsystem}/${name}.conf",
+ "puppet:///modules/apache/vhosts.d/${name}.conf"
+ ],
+ default => "puppet:///${vhost_source}",
+ }
+ File["${name}.conf"]{
+ source => $real_vhost_source,
+ }
+ }
+ default: {
+ File["${name}.conf"]{
+ content => $content,
+ }
+ }
+ }
+ }
+ case $htpasswd_file {
+ 'absent','nodeploy': { info("don't deploy a htpasswd file for ${name}") }
+ default: {
+ if $htpasswd_path == 'absent' {
+ $real_htpasswd_path = "/var/www/htpasswds/${name}"
+ } else {
+ $real_htpasswd_path = $htpasswd_path
+ }
+ file{$real_htpasswd_path:
+ ensure => $ensure,
+ }
+ if ($ensure!='absent') {
+ File[$real_htpasswd_path]{
+ source => [ "puppet:///modules/site_apache/htpasswds/${::fqdn}/${name}",
+ "puppet:///modules/site_apache/htpasswds/${apache::cluster_node}/${name}",
+ "puppet:///modules/site_apache/htpasswds/${name}" ],
+ owner => root,
+ group => 0,
+ mode => '0644',
+ }
+ }
+ }
+ }
+}
+
diff --git a/puppet/modules/apache/manifests/vhost/file/documentrootdir.pp b/puppet/modules/apache/manifests/vhost/file/documentrootdir.pp
new file mode 100644
index 00000000..425406ad
--- /dev/null
+++ b/puppet/modules/apache/manifests/vhost/file/documentrootdir.pp
@@ -0,0 +1,24 @@
+define apache::vhost::file::documentrootdir(
+ $ensure = directory,
+ $documentroot,
+ $filename,
+ $thedomain,
+ $owner = 'root',
+ $group = '0',
+ $mode = 440
+){
+ file{"$documentroot/$filename":
+ require => Apache::Vhost::Webdir["$thedomain"],
+ owner => $owner, group => $group, mode => $mode;
+ }
+ if $ensure != 'absent' {
+ File["$documentroot/$filename"]{
+ ensure => directory,
+ }
+ } else {
+ File["$documentroot/$filename"]{
+ ensure => $ensure,
+ }
+ }
+}
+
diff --git a/puppet/modules/apache/manifests/vhost/file/documentrootfile.pp b/puppet/modules/apache/manifests/vhost/file/documentrootfile.pp
new file mode 100644
index 00000000..c5bc72a1
--- /dev/null
+++ b/puppet/modules/apache/manifests/vhost/file/documentrootfile.pp
@@ -0,0 +1,27 @@
+# place a file in the documentroot
+define apache::vhost::file::documentrootfile(
+ $documentroot,
+ $filename,
+ $thedomain,
+ $owner = 'root',
+ $group = '0',
+ $mode = '0440',
+){
+ file{"${documentroot}/${filename}":
+ source => [ "puppet:///modules/site_apache/vhost_varieties/${::fqdn}/${thedomain}/${filename}",
+ "puppet:///modules/site_apache/vhost_varieties/${apache::cluster_node}/${thedomain}/${filename}",
+ "puppet:///modules/site_apache/vhost_varieties/${::operatingsystem}.${::operatingsystemmajrelease}/${thedomain}/${filename}",
+ "puppet:///modules/site_apache/vhost_varieties/${::operatingsystem}/${thedomain}/${filename}",
+ "puppet:///modules/site_apache/vhost_varieties/${thedomain}/${filename}",
+ "puppet:///modules/apache/vhost_varieties/${thedomain}/${filename}",
+ "puppet:///modules/apache/vhost_varieties/${::operatingsystem}.${::operatingsystemmajrelease}/${thedomain}/${filename}",
+ "puppet:///modules/apache/vhost_varieties/${::operatingsystem}/${thedomain}/${filename}",
+ "puppet:///modules/apache/vhost_varieties/${thedomain}/${filename}",
+ ],
+ require => Apache::Vhost::Webdir[$thedomain],
+ owner => $owner,
+ group => $group,
+ mode => $mode;
+ }
+}
+
diff --git a/puppet/modules/apache/manifests/vhost/gitweb.pp b/puppet/modules/apache/manifests/vhost/gitweb.pp
new file mode 100644
index 00000000..6dd86439
--- /dev/null
+++ b/puppet/modules/apache/manifests/vhost/gitweb.pp
@@ -0,0 +1,59 @@
+# logmode:
+# - default: Do normal logging to CustomLog and ErrorLog
+# - nologs: Send every logging to /dev/null
+# - anonym: Don't log ips for CustomLog, send ErrorLog to /dev/null
+# - semianonym: Don't log ips for CustomLog, log normal ErrorLog
+#
+define apache::vhost::gitweb(
+ $ensure = present,
+ $configuration = {},
+ $domain = 'absent',
+ $logmode = 'default',
+ $domainalias = 'absent',
+ $server_admin = 'absent',
+ $owner = root,
+ $group = apache,
+ $documentroot_owner = apache,
+ $documentroot_group = 0,
+ $documentroot_mode = 0640,
+ $allow_override = 'None',
+ $template_partial = 'apache/vhosts/gitweb/partial.erb',
+ $do_includes = false,
+ $options = 'absent',
+ $additional_options = 'absent',
+ $default_charset = 'absent',
+ $ssl_mode = false,
+ $htpasswd_file = 'absent',
+ $htpasswd_path = 'absent'
+){
+ # create vhost configuration file
+ ::apache::vhost{$name:
+ ensure => $ensure,
+ configuration => $configuration,
+ path => '/var/www/git',
+ path_is_webdir => true,
+ logpath => $::operatingsystem ? {
+ centos => '/var/log/httpd',
+ fedora => '/var/log/httpd',
+ redhat => '/var/log/httpd',
+ openbsd => '/var/www/logs',
+ default => '/var/log/apache2'
+ },
+ logmode => $logmode,
+ template_partial => $template_partial,
+ domain => $domain,
+ domainalias => $domainalias,
+ server_admin => $server_admin,
+ allow_override => $allow_override,
+ do_includes => $do_includes,
+ options => $options,
+ additional_options => $additional_options,
+ default_charset => $default_charset,
+ run_mode => 'normal',
+ ssl_mode => $ssl_mode,
+ htpasswd_file => $htpasswd_file,
+ htpasswd_path => $htpasswd_path,
+ mod_security => false,
+ }
+}
+
diff --git a/puppet/modules/apache/manifests/vhost/modperl.pp b/puppet/modules/apache/manifests/vhost/modperl.pp
new file mode 100644
index 00000000..31e46b6f
--- /dev/null
+++ b/puppet/modules/apache/manifests/vhost/modperl.pp
@@ -0,0 +1,153 @@
+# run_mode: controls in which mode the vhost should be run, there are different setups
+# possible:
+# - normal: (*default*) run vhost with the current active worker (default: prefork) don't
+# setup anything special
+# - itk: run vhost with the mpm_itk module (Incompatibility: cannot be used in combination
+# with 'proxy-itk' & 'static-itk' mode)
+# - proxy-itk: run vhost with a dual prefork/itk setup, where prefork just proxies all the
+# requests for the itk setup, that listens only on the loobpack device.
+# (Incompatibility: cannot be used in combination with the itk setup.)
+# - static-itk: run vhost with a dual prefork/itk setup, where prefork serves all the static
+# content and proxies the dynamic calls to the itk setup, that listens only on
+# the loobpack device (Incompatibility: cannot be used in combination with
+# 'itk' mode)
+#
+# run_uid: the uid the vhost should run as with the itk module
+# run_gid: the gid the vhost should run as with the itk module
+#
+# mod_security: Whether we use mod_security or not (will include mod_security module)
+# - false: don't activate mod_security
+# - true: (*default*) activate mod_security
+#
+# logmode:
+# - default: Do normal logging to CustomLog and ErrorLog
+# - nologs: Send every logging to /dev/null
+# - anonym: Don't log ips for CustomLog, send ErrorLog to /dev/null
+# - semianonym: Don't log ips for CustomLog, log normal ErrorLog
+#
+define apache::vhost::modperl(
+ $ensure = present,
+ $configuration = configuration,
+ $domain = 'absent',
+ $domainalias = 'absent',
+ $server_admin = 'absent',
+ $logmode = 'default',
+ $path = 'absent',
+ $owner = root,
+ $group = apache,
+ $documentroot_owner = apache,
+ $documentroot_group = 0,
+ $documentroot_mode = 0640,
+ $run_mode = 'normal',
+ $run_uid = 'absent',
+ $run_gid = 'absent',
+ $allow_override = 'None',
+ $cgi_binpath = 'absent',
+ $do_includes = false,
+ $options = 'absent',
+ $additional_options = 'absent',
+ $default_charset = 'absent',
+ $mod_security = true,
+ $mod_security_relevantonly = true,
+ $mod_security_rules_to_disable = [],
+ $mod_security_additional_options = 'absent',
+ $ssl_mode = false,
+ $vhost_mode = 'template',
+ $template_partial = 'apache/vhosts/perl/partial.erb',
+ $vhost_source = 'absent',
+ $vhost_destination = 'absent',
+ $htpasswd_file = 'absent',
+ $htpasswd_path = 'absent'
+){
+ # cgi_bin path
+ case $cgi_binpath {
+ 'absent': {
+ $real_path = $path ? {
+ 'absent' => $::operatingsystem ? {
+ openbsd => "/var/www/htdocs/${name}",
+ default => "/var/www/vhosts/${name}"
+ },
+ default => $path
+ }
+ $real_cgi_binpath = "${real_path}/cgi-bin"
+ }
+ default: { $real_cgi_binpath = $cgi_binpath }
+ }
+
+ file{$real_cgi_binpath:
+ ensure => $ensure ? {
+ 'absent' => 'absent',
+ default => directory
+ },
+ owner => $documentroot_owner,
+ group => $documentroot_group,
+ mode => $documentroot_mode;
+ }
+
+ if $ensure != 'absent' {
+ case $run_mode {
+ 'proxy-itk','static-itk': {
+ include ::mod_perl::itk_plus
+ }
+ 'fcgid': {
+ include ::mod_fcgid
+ include apache::include::mod_fcgid
+
+ # we don't need mod_perl if we run it as fcgid
+ include ::mod_perl::disable
+ mod_fcgid::starter {$name:
+ cgi_type => 'perl',
+ owner => $run_uid,
+ group => $run_gid,
+ notify => Service['apache'],
+ }
+ }
+ default: { include ::mod_perl }
+ }
+ }
+
+ # create webdir
+ ::apache::vhost::webdir{$name:
+ ensure => $ensure,
+ path => $path,
+ owner => $owner,
+ group => $group,
+ run_mode => $run_mode,
+ documentroot_owner => $documentroot_owner,
+ documentroot_group => $documentroot_group,
+ documentroot_mode => $documentroot_mode,
+ }
+
+ # create vhost configuration file
+ ::apache::vhost{$name:
+ ensure => $ensure,
+ configuration => $configuration,
+ path => $path,
+ logmode => $logmode,
+ vhost_mode => $vhost_mode,
+ template_partial => $template_partial,
+ vhost_source => $vhost_source,
+ vhost_destination => $vhost_destination,
+ domain => $domain,
+ domainalias => $domainalias,
+ server_admin => $server_admin,
+ run_mode => $run_mode,
+ run_uid => $run_uid,
+ run_gid => $run_gid,
+ allow_override => $allow_override,
+ do_includes => $do_includes,
+ options => $options,
+ additional_options => $additional_options,
+ default_charset => $default_charset,
+ cgi_binpath => $real_cgi_binpath,
+ ssl_mode => $ssl_mode,
+ htpasswd_file => $htpasswd_file,
+ htpasswd_path => $htpasswd_path,
+ mod_security => $mod_security,
+ mod_security_relevantonly => $mod_security_relevantonly,
+ mod_security_rules_to_disable => $mod_security_rules_to_disable,
+ mod_security_additional_options => $mod_security_additional_options,
+ passing_extension => 'pl'
+ }
+}
+
diff --git a/puppet/modules/apache/manifests/vhost/passenger.pp b/puppet/modules/apache/manifests/vhost/passenger.pp
new file mode 100644
index 00000000..46218908
--- /dev/null
+++ b/puppet/modules/apache/manifests/vhost/passenger.pp
@@ -0,0 +1,139 @@
+# run_uid: the uid the vhost should run as with the mod_passenger module
+# run_gid: the gid the vhost should run as with the mod_passenger module
+#
+# logmode:
+#
+# - default: Do normal logging to CustomLog and ErrorLog
+# - nologs: Send every logging to /dev/null
+# - anonym: Don't log ips for CustomLog, send ErrorLog to /dev/null
+# - semianonym: Don't log ips for CustomLog, log normal ErrorLog
+#
+# mod_security: Whether we use mod_security or not (will include mod_security module)
+# - false: don't activate mod_security
+# - true: (*defaul*) activate mod_security
+#
+define apache::vhost::passenger(
+ $ensure = present,
+ $configuration = {},
+ $domain = 'absent',
+ $domainalias = 'absent',
+ $server_admin = 'absent',
+ $logmode = 'default',
+ $path = 'absent',
+ $manage_webdir = true,
+ $manage_docroot = true,
+ $owner = root,
+ $group = apache,
+ $documentroot_owner = apache,
+ $documentroot_group = 0,
+ $documentroot_mode = 0640,
+ $run_uid = 'absent',
+ $run_gid = 'absent',
+ $allow_override = 'None',
+ $do_includes = false,
+ $options = 'absent',
+ $additional_options = 'absent',
+ $default_charset = 'absent',
+ $mod_security = true,
+ $mod_security_relevantonly = true,
+ $mod_security_rules_to_disable = [],
+ $mod_security_additional_options = 'absent',
+ $ssl_mode = false,
+ $vhost_mode = 'template',
+ $template_partial = 'apache/vhosts/passenger/partial.erb',
+ $vhost_source = 'absent',
+ $vhost_destination = 'absent',
+ $htpasswd_file = 'absent',
+ $htpasswd_path = 'absent',
+ $passenger_ree = false,
+ $passenger_app = 'rails'
+){
+
+ if $passenger_ree {
+ include ::passenger::ree::apache
+ } else {
+ include ::passenger::apache
+ }
+
+ if $manage_webdir {
+ # create webdir
+ ::apache::vhost::webdir{$name:
+ ensure => $ensure,
+ path => $path,
+ owner => $owner,
+ group => $group,
+ mode => 0644,
+ run_mode => 'normal',
+ manage_docroot => $manage_docroot,
+ documentroot_owner => $documentroot_owner,
+ documentroot_group => $run_gid,
+ documentroot_mode => $documentroot_mode,
+ }
+ }
+ $real_path = $path ? {
+ 'absent' => $::operatingsystem ? {
+ openbsd => "/var/www/htdocs/${name}",
+ default => "/var/www/vhosts/${name}"
+ },
+ default => $path
+ }
+ file{
+ ["${real_path}/www/tmp", "${real_path}/www/log"]:
+ ensure => directory,
+ owner => $documentroot_owner, group => $run_gid, mode => 0660;
+ ["${real_path}/www/public", "${real_path}/gems"]:
+ ensure => directory,
+ owner => $documentroot_owner, group => $run_gid, mode => 0640;
+ }
+ if $passenger_app == 'rails' {
+ file{
+ "${real_path}/www/config":
+ ensure => directory,
+ owner => $documentroot_owner, group => $run_gid, mode => 0640;
+ "${real_path}/www/config/environment.rb":
+ ensure => present,
+ owner => $run_uid, group => $run_gid, mode => 0640;
+ }
+ } else {
+ #rack based
+ file{
+ "${real_path}/www/config.ru":
+ ensure => present,
+ owner => $run_uid, group => $run_gid, mode => 0640;
+ }
+ }
+
+ # create vhost configuration file
+ ::apache::vhost{$name:
+ ensure => $ensure,
+ configuration => $configuration,
+ path => "${real_path}/www/public",
+ path_is_webdir => true,
+ template_partial => $template_partial,
+ logmode => $logmode,
+ logpath => "${real_path}/logs",
+ vhost_mode => $vhost_mode,
+ vhost_source => $vhost_source,
+ vhost_destination => $vhost_destination,
+ domain => $domain,
+ domainalias => $domainalias,
+ server_admin => $server_admin,
+ run_mode => 'normal',
+ run_uid => $run_uid,
+ run_gid => $run_gid,
+ allow_override => $allow_override,
+ do_includes => $do_includes,
+ options => $options,
+ additional_options => $additional_options,
+ default_charset => $default_charset,
+ ssl_mode => $ssl_mode,
+ htpasswd_file => $htpasswd_file,
+ htpasswd_path => $htpasswd_path,
+ mod_security => $mod_security,
+ mod_security_relevantonly => $mod_security_relevantonly,
+ mod_security_rules_to_disable => $mod_security_rules_to_disable,
+ mod_security_additional_options => $mod_security_additional_options,
+ gempath => "${real_path}/gems"
+ }
+}
+
diff --git a/puppet/modules/apache/manifests/vhost/php/drupal.pp b/puppet/modules/apache/manifests/vhost/php/drupal.pp
new file mode 100644
index 00000000..5b15e6a0
--- /dev/null
+++ b/puppet/modules/apache/manifests/vhost/php/drupal.pp
@@ -0,0 +1,144 @@
+# run_mode: controls in which mode the vhost should be run, there are different setups
+# possible:
+# - normal: (*default*) run vhost with the current active worker (default: prefork) don't
+# setup anything special
+# - itk: run vhost with the mpm_itk module (Incompatibility: cannot be used in combination
+# with 'proxy-itk' & 'static-itk' mode)
+# - proxy-itk: run vhost with a dual prefork/itk setup, where prefork just proxies all the
+# requests for the itk setup, that listens only on the loobpack device.
+# (Incompatibility: cannot be used in combination with the itk setup.)
+# - static-itk: run vhost with a dual prefork/itk setup, where prefork serves all the static
+# content and proxies the dynamic calls to the itk setup, that listens only on
+# the loobpack device (Incompatibility: cannot be used in combination with
+# 'itk' mode)
+#
+# run_uid: the uid the vhost should run as with the itk module
+# run_gid: the gid the vhost should run as with the itk module
+#
+# mod_security: Whether we use mod_security or not (will include mod_security module)
+# - false: don't activate mod_security
+# - true: (*default*) activate mod_security
+#
+# php_safe_mode_exec_bins: An array of local binaries which should be linked in the
+# safe_mode_exec_bin for this hosting
+# *default*: None
+# php_default_charset: default charset header for php.
+# *default*: absent, which will set the same as default_charset
+# of apache
+# logmode:
+# - default: Do normal logging to CustomLog and ErrorLog
+# - nologs: Send every logging to /dev/null
+# - anonym: Don't log ips for CustomLog, send ErrorLog to /dev/null
+# - semianonym: Don't log ips for CustomLog, log normal ErrorLog
+#
+define apache::vhost::php::drupal(
+ $ensure = present,
+ $configuration = {},
+ $domain = 'absent',
+ $domainalias = 'absent',
+ $server_admin = 'absent',
+ $logmode = 'default',
+ $path = 'absent',
+ $owner = root,
+ $group = apache,
+ $documentroot_owner = apache,
+ $documentroot_group = 0,
+ $documentroot_mode = '0640',
+ $run_mode = 'normal',
+ $run_uid = 'absent',
+ $run_gid = 'absent',
+ $allow_override = 'None',
+ $php_settings = {},
+ $php_options = {},
+ $do_includes = false,
+ $options = 'absent',
+ $additional_options = 'absent',
+ $default_charset = 'absent',
+ $mod_security = true,
+ $mod_security_relevantonly = true,
+ $mod_security_rules_to_disable = [],
+ $mod_security_additional_options = 'absent',
+ $ssl_mode = false,
+ $vhost_mode = 'template',
+ $template_partial = 'apache/vhosts/php_drupal/partial.erb',
+ $vhost_source = 'absent',
+ $vhost_destination = 'absent',
+ $htpasswd_file = 'absent',
+ $htpasswd_path = 'absent',
+ $manage_directories = true,
+ $config_webwriteable = false,
+ $manage_config = true,
+ $manage_cron = true
+){
+ $documentroot = $path ? {
+ 'absent' => $::operatingsystem ? {
+ openbsd => "/var/www/htdocs/${name}/www",
+ default => "/var/www/vhosts/${name}/www"
+ },
+ default => "${path}/www"
+ }
+
+ if $manage_cron {
+ if $domain == 'absent' {
+ $real_domain = $name
+ } else {
+ $real_domain = $domain
+ }
+
+ file{"/etc/cron.d/drupal_cron_${name}":
+ content => "0 * * * * apache wget -O - -q -t 1 http://${real_domain}/cron.php\n",
+ owner => root,
+ group => 0,
+ mode => '0644';
+ }
+ }
+
+ $std_drupal_php_settings = {
+ magic_quotes_gpc => 0,
+ register_globals => 0,
+ 'session.auto_start' => 0,
+ 'mbstring.http_input' => 'pass',
+ 'mbstring.http_output' => 'pass',
+ 'mbstring.encoding_translation' => 0,
+ }
+
+ # create vhost configuration file
+ ::apache::vhost::php::webapp{$name:
+ ensure => $ensure,
+ configuration => $configuration,
+ domain => $domain,
+ domainalias => $domainalias,
+ server_admin => $server_admin,
+ logmode => $logmode,
+ path => $path,
+ owner => $owner,
+ group => $group,
+ documentroot_owner => $documentroot_owner,
+ documentroot_group => $documentroot_group,
+ documentroot_mode => $documentroot_mode,
+ run_mode => $run_mode,
+ run_uid => $run_uid,
+ run_gid => $run_gid,
+ allow_override => $allow_override,
+ php_settings => merge($std_drupal_php_settings, $php_settings),
+ php_options => $php_options,
+ do_includes => $do_includes,
+ options => $options,
+ additional_options => $additional_options,
+ default_charset => $default_charset,
+ mod_security => $mod_security,
+ mod_security_relevantonly => $mod_security_relevantonly,
+ mod_security_rules_to_disable => $mod_security_rules_to_disable,
+ mod_security_additional_options => $mod_security_additional_options,
+ ssl_mode => $ssl_mode,
+ vhost_mode => $vhost_mode,
+ template_partial => $template_partial,
+ vhost_source => $vhost_source,
+ vhost_destination => $vhost_destination,
+ htpasswd_file => $htpasswd_file,
+ htpasswd_path => $htpasswd_path,
+ manage_directories => false,
+ manage_config => false,
+ }
+}
+
diff --git a/puppet/modules/apache/manifests/vhost/php/gallery2.pp b/puppet/modules/apache/manifests/vhost/php/gallery2.pp
new file mode 100644
index 00000000..3acb011d
--- /dev/null
+++ b/puppet/modules/apache/manifests/vhost/php/gallery2.pp
@@ -0,0 +1,141 @@
+# run_mode: controls in which mode the vhost should be run, there are different setups
+# possible:
+# - normal: (*default*) run vhost with the current active worker (default: prefork) don't
+# setup anything special
+# - itk: run vhost with the mpm_itk module (Incompatibility: cannot be used in combination
+# with 'proxy-itk' & 'static-itk' mode)
+# - proxy-itk: run vhost with a dual prefork/itk setup, where prefork just proxies all the
+# requests for the itk setup, that listens only on the loobpack device.
+# (Incompatibility: cannot be used in combination with the itk setup.)
+# - static-itk: run vhost with a dual prefork/itk setup, where prefork serves all the static
+# content and proxies the dynamic calls to the itk setup, that listens only on
+# the loobpack device (Incompatibility: cannot be used in combination with
+# 'itk' mode)
+#
+# run_uid: the uid the vhost should run as with the itk module
+# run_gid: the gid the vhost should run as with the itk module
+#
+# mod_security: Whether we use mod_security or not (will include mod_security module)
+# - false: (*defaul*) don't activate mod_security
+# - true: activate mod_security
+#
+# php_safe_mode_exec_bins: An array of local binaries which should be linked in the
+# safe_mode_exec_bin for this hosting
+# *default*: None
+# php_default_charset: default charset header for php.
+# *default*: absent, which will set the same as default_charset
+# of apache
+# logmode:
+# - default: Do normal logging to CustomLog and ErrorLog
+# - nologs: Send every logging to /dev/null
+# - anonym: Don't log ips for CustomLog, send ErrorLog to /dev/null
+# - semianonym: Don't log ips for CustomLog, log normal ErrorLog
+define apache::vhost::php::gallery2(
+ $ensure = present,
+ $configuration = {},
+ $domain = 'absent',
+ $domainalias = 'absent',
+ $server_admin = 'absent',
+ $logmode = 'default',
+ $path = 'absent',
+ $owner = root,
+ $group = apache,
+ $documentroot_owner = apache,
+ $documentroot_group = 0,
+ $documentroot_mode = 0640,
+ $run_mode = 'normal',
+ $run_uid = 'absent',
+ $run_gid = 'absent',
+ $allow_override = 'None',
+ $php_settings = {},
+ $php_options = {},
+ $do_includes = false,
+ $options = 'absent',
+ $additional_options = 'absent',
+ $default_charset = 'absent',
+ $mod_security = false,
+ $mod_security_relevantonly = true,
+ $mod_security_rules_to_disable = [],
+ $mod_security_additional_options = 'absent',
+ $ssl_mode = false,
+ $vhost_mode = 'template',
+ $template_partial = 'apache/vhosts/php_gallery2/partial.erb',
+ $vhost_source = 'absent',
+ $vhost_destination = 'absent',
+ $htpasswd_file = 'absent',
+ $htpasswd_path = 'absent',
+ $manage_config = true,
+ $config_webwriteable = false,
+ $manage_directories = true,
+){
+ $documentroot = $path ? {
+ 'absent' => $::operatingsystem ? {
+ openbsd => "/var/www/htdocs/${name}/www",
+ default => "/var/www/vhosts/${name}/www"
+ },
+ default => "${path}/www"
+ }
+ $upload_dir = "/var/www/vhosts/${name}/data/upload"
+ $gdata_dir = "/var/www/vhosts/${name}/data/gdata"
+ if $ensure != 'absent' {
+ file{
+ $gdata_dir:
+ ensure => 'directory',
+ owner => $documentroot_owner,
+ group => $documentroot_group,
+ mode => '0660';
+ $upload_dir:
+ ensure => 'directory',
+ owner => $documentroot_owner,
+ group => $documentroot_group,
+ mode => '0660';
+ }
+ }
+
+ $gallery_php_settings = {
+ safe_mode => 'Off',
+ output_buffering => 'Off',
+ }
+ $real_php_settings = merge($gallery_php_settings,$php_settings)
+
+ # create vhost configuration file
+ ::apache::vhost::php::webapp{$name:
+ ensure => $ensure,
+ configuration => $configuration,
+ domain => $domain,
+ domainalias => $domainalias,
+ server_admin => $server_admin,
+ logmode => $logmode,
+ path => $path,
+ owner => $owner,
+ group => $group,
+ documentroot_owner => $documentroot_owner,
+ documentroot_group => $documentroot_group,
+ documentroot_mode => $documentroot_mode,
+ run_mode => $run_mode,
+ run_uid => $run_uid,
+ run_gid => $run_gid,
+ allow_override => $allow_override,
+ php_settings => $real_php_settings,
+ php_options => $php_options,
+ do_includes => $do_includes,
+ options => $options,
+ additional_options => $additional_options,
+ default_charset => $default_charset,
+ mod_security => $mod_security,
+ mod_security_relevantonly => $mod_security_relevantonly,
+ mod_security_rules_to_disable => $mod_security_rules_to_disable,
+ mod_security_additional_options => $mod_security_additional_options,
+ ssl_mode => $ssl_mode,
+ vhost_mode => $vhost_mode,
+ template_partial => $template_partial,
+ vhost_source => $vhost_source,
+ vhost_destination => $vhost_destination,
+ htpasswd_file => $htpasswd_file,
+ htpasswd_path => $htpasswd_path,
+ manage_directories => $manage_directories,
+ manage_config => $manage_config,
+ config_file => 'config.php',
+ }
+}
+
diff --git a/puppet/modules/apache/manifests/vhost/php/global_exec_bin_dir.pp b/puppet/modules/apache/manifests/vhost/php/global_exec_bin_dir.pp
new file mode 100644
index 00000000..efcdaf7f
--- /dev/null
+++ b/puppet/modules/apache/manifests/vhost/php/global_exec_bin_dir.pp
@@ -0,0 +1,9 @@
+# manage global exec_bin_dir
+class apache::vhost::php::global_exec_bin_dir {
+ file{'/var/www/php_safe_exec_bins':
+ ensure => directory,
+ owner => root,
+ group => apache,
+ mode => '0640';
+ }
+}
diff --git a/puppet/modules/apache/manifests/vhost/php/joomla.pp b/puppet/modules/apache/manifests/vhost/php/joomla.pp
new file mode 100644
index 00000000..ed0696f8
--- /dev/null
+++ b/puppet/modules/apache/manifests/vhost/php/joomla.pp
@@ -0,0 +1,174 @@
+# run_mode: controls in which mode the vhost should be run, there are different
+# setups possible:
+# - normal: (*default*) run vhost with the current active worker
+# (default: prefork) don't setup anything special
+# - itk: run vhost with the mpm_itk module (Incompatibility: cannot be used in
+# combination with 'proxy-itk' & 'static-itk' mode)
+# - proxy-itk: run vhost with a dual prefork/itk setup, where prefork just
+# proxies all the requests for the itk setup, that listens only
+# on the loobpack device.
+# (Incompatibility: cannot be used in combination with the itk
+# setup.)
+# - static-itk: run vhost with a dual prefork/itk setup, where prefork serves
+# all the static
+# content and proxies the dynamic calls to the itk setup, that
+# listens only on the loobpack device
+# (Incompatibility: cannot be used in combination with 'itk'
+# mode)
+#
+# run_uid: the uid the vhost should run as with the itk module
+# run_gid: the gid the vhost should run as with the itk module
+#
+# mod_security: Whether we use mod_security or not (will include mod_security
+# module)
+# - false: don't activate mod_security
+# - true: (*default*) activate mod_security
+#
+# logmode:
+# - default: Do normal logging to CustomLog and ErrorLog
+# - nologs: Send every logging to /dev/null
+# - anonym: Don't log ips for CustomLog, send ErrorLog to /dev/null
+# - semianonym: Don't log ips for CustomLog, log normal ErrorLog
+define apache::vhost::php::joomla(
+ $ensure = present,
+ $configuration = {},
+ $domain = 'absent',
+ $domainalias = 'absent',
+ $server_admin = 'absent',
+ $logmode = 'default',
+ $path = 'absent',
+ $owner = root,
+ $group = apache,
+ $documentroot_owner = apache,
+ $documentroot_group = 0,
+ $documentroot_mode = '0640',
+ $run_mode = 'normal',
+ $run_uid = 'absent',
+ $run_gid = 'absent',
+ $allow_override = 'None',
+ $php_settings = {},
+ $php_options = {},
+ $php_installation = 'system',
+ $do_includes = false,
+ $options = 'absent',
+ $additional_options = 'absent',
+ $default_charset = 'absent',
+ $mod_security = true,
+ $mod_security_relevantonly = true,
+ $mod_security_rules_to_disable = [],
+ $mod_security_additional_options = 'absent',
+ $ssl_mode = false,
+ $vhost_mode = 'template',
+ $template_partial = 'apache/vhosts/php_joomla/partial.erb',
+ $vhost_source = 'absent',
+ $vhost_destination = 'absent',
+ $htpasswd_file = 'absent',
+ $htpasswd_path = 'absent',
+ $manage_config = true,
+ $config_webwriteable = false,
+ $manage_directories = true
+){
+ include ::apache::include::joomla
+
+ $documentroot = $path ? {
+ 'absent' => $::operatingsystem ? {
+ openbsd => "/var/www/htdocs/${name}/www",
+ default => "/var/www/vhosts/${name}/www"
+ },
+ default => "${path}/www"
+ }
+
+ if $mod_security_additional_options == 'absent' {
+ $id_str = $::operatingsystem ? {
+ 'CentOS' => $::operatingsystemmajrelease ? {
+ 5 => '',
+ default => 'id:1199400,'
+ },
+ default => ''
+ }
+ $real_mod_security_additional_options = "
+ # http://optics.csufresno.edu/~kriehn/fedora/fedora_files/f9/howto/modsecurity.html
+ # Exceptions for Joomla Root Directory
+ <LocationMatch \"^/\">
+ SecRuleRemoveById 950013
+ </LocationMatch>
+
+ # Exceptions for Joomla Administration Panel
+ SecRule REQUEST_FILENAME \"/administrator/index2.php\" \"${id_str}allow,phase:1,nolog,ctl:ruleEngine=Off\"
+
+ # Exceptions for Joomla Component Expose
+ <LocationMatch \"^/components/com_expose/expose/manager/amfphp/gateway.php\">
+ SecRuleRemoveById 960010
+ </LocationMatch>
+"
+ } else {
+ $real_mod_security_additional_options = $mod_security_additional_options
+ }
+
+ $std_joomla_php_settings = {
+ 'allow_url_fopen' => 'on',
+ 'allow_url_include' => 'off',
+ }
+
+ # create vhost configuration file
+ ::apache::vhost::php::webapp{
+ $name:
+ ensure => $ensure,
+ configuration => $configuration,
+ domain => $domain,
+ domainalias => $domainalias,
+ server_admin => $server_admin,
+ logmode => $logmode,
+ path => $path,
+ owner => $owner,
+ group => $group,
+ documentroot_owner => $documentroot_owner,
+ documentroot_group => $documentroot_group,
+ documentroot_mode => $documentroot_mode,
+ run_mode => $run_mode,
+ run_uid => $run_uid,
+ run_gid => $run_gid,
+ allow_override => $allow_override,
+ php_settings => merge($std_joomla_php_settings,
+ $php_settings),
+ php_options => $php_options,
+ php_installation => $php_installation,
+ do_includes => $do_includes,
+ options => $options,
+ additional_options => $additional_options,
+ default_charset => $default_charset,
+ mod_security => $mod_security,
+ mod_security_relevantonly => $mod_security_relevantonly,
+ mod_security_rules_to_disable => $mod_security_rules_to_disable,
+ mod_security_additional_options => $real_mod_security_additional_options,
+ ssl_mode => $ssl_mode,
+ vhost_mode => $vhost_mode,
+ template_partial => $template_partial,
+ vhost_source => $vhost_source,
+ vhost_destination => $vhost_destination,
+ htpasswd_file => $htpasswd_file,
+ htpasswd_path => $htpasswd_path,
+ manage_directories => $manage_directories,
+ managed_directories => [ "${documentroot}/administrator/backups",
+ "${documentroot}/administrator/components",
+ "${documentroot}/administrator/language",
+ "${documentroot}/administrator/modules",
+ "${documentroot}/administrator/templates",
+ "${documentroot}/components",
+ "${documentroot}/dmdocuments",
+ "${documentroot}/images",
+ "${documentroot}/language",
+ "${documentroot}/media",
+ "${documentroot}/modules",
+ "${documentroot}/plugins",
+ "${documentroot}/templates",
+ "${documentroot}/cache",
+ "${documentroot}/tmp",
+ "${documentroot}/administrator/cache" ],
+ manage_config => $manage_config,
+ config_webwriteable => $config_webwriteable,
+ config_file => 'configuration.php',
+ }
+
+}
+
diff --git a/puppet/modules/apache/manifests/vhost/php/mediawiki.pp b/puppet/modules/apache/manifests/vhost/php/mediawiki.pp
new file mode 100644
index 00000000..25881ca1
--- /dev/null
+++ b/puppet/modules/apache/manifests/vhost/php/mediawiki.pp
@@ -0,0 +1,106 @@
+# run_mode: controls in which mode the vhost should be run, there are different setups
+# possible:
+# - normal: (*default*) run vhost with the current active worker (default: prefork) don't
+# setup anything special
+# - itk: run vhost with the mpm_itk module (Incompatibility: cannot be used in combination
+# with 'proxy-itk' & 'static-itk' mode)
+# - proxy-itk: run vhost with a dual prefork/itk setup, where prefork just proxies all the
+# requests for the itk setup, that listens only on the loobpack device.
+# (Incompatibility: cannot be used in combination with the itk setup.)
+# - static-itk: run vhost with a dual prefork/itk setup, where prefork serves all the static
+# content and proxies the dynamic calls to the itk setup, that listens only on
+# the loobpack device (Incompatibility: cannot be used in combination with
+# 'itk' mode)
+#
+# run_uid: the uid the vhost should run as with the itk module
+# run_gid: the gid the vhost should run as with the itk module
+#
+# mod_security: Whether we use mod_security or not (will include mod_security module)
+# - false: don't activate mod_security
+# - true: (*default*) activate mod_security
+#
+# logmode:
+# - default: Do normal logging to CustomLog and ErrorLog
+# - nologs: Send every logging to /dev/null
+# - anonym: Don't log ips for CustomLog, send ErrorLog to /dev/null
+# - semianonym: Don't log ips for CustomLog, log normal ErrorLog
+define apache::vhost::php::mediawiki(
+ $ensure = present,
+ $configuration = {},
+ $domain = 'absent',
+ $domainalias = 'absent',
+ $server_admin = 'absent',
+ $logmode = 'default',
+ $path = 'absent',
+ $manage_docroot = true,
+ $owner = root,
+ $group = apache,
+ $documentroot_owner = apache,
+ $documentroot_group = 0,
+ $documentroot_mode = 0640,
+ $run_mode = 'normal',
+ $run_uid = 'absent',
+ $run_gid = 'absent',
+ $allow_override = 'FileInfo Limit',
+ $php_settings = {},
+ $php_options = {},
+ $options = 'absent',
+ $additional_options = 'absent',
+ $default_charset = 'absent',
+ $mod_security = true,
+ $mod_security_relevantonly = true,
+ $mod_security_rules_to_disable = [],
+ $mod_security_additional_options = 'absent',
+ $ssl_mode = false,
+ $vhost_mode = 'template',
+ $template_partial = 'apache/vhosts/php_mediawiki/partial.erb',
+ $vhost_source = 'absent',
+ $vhost_destination = 'absent',
+ $htpasswd_file = 'absent',
+ $htpasswd_path = 'absent'
+){
+
+ $mediawiki_php_settings = {
+ safe_mode => false,
+ }
+
+ # create vhost configuration file
+ ::apache::vhost::php::webapp{$name:
+ ensure => $ensure,
+ configuration => $configuration,
+ domain => $domain,
+ domainalias => $domainalias,
+ server_admin => $server_admin,
+ logmode => $logmode,
+ path => $path,
+ manage_docroot => $manage_docroot,
+ owner => $owner,
+ group => $group,
+ documentroot_owner => $documentroot_owner,
+ documentroot_group => $documentroot_group,
+ documentroot_mode => $documentroot_mode,
+ run_mode => $run_mode,
+ run_uid => $run_uid,
+ run_gid => $run_gid,
+ allow_override => $allow_override,
+ php_settings => merge($mediawiki_php_settings,$php_settings),
+ php_options => $php_options,
+ options => $options,
+ additional_options => $additional_options,
+ default_charset => $default_charset,
+ mod_security => $mod_security,
+ mod_security_relevantonly => $mod_security_relevantonly,
+ mod_security_rules_to_disable => $mod_security_rules_to_disable,
+ mod_security_additional_options => $mod_security_additional_options,
+ ssl_mode => $ssl_mode,
+ vhost_mode => $vhost_mode,
+ template_partial => $template_partial,
+ vhost_source => $vhost_source,
+ vhost_destination => $vhost_destination,
+ htpasswd_file => $htpasswd_file,
+ htpasswd_path => $htpasswd_path,
+ manage_directories => false,
+ manage_config => false,
+ }
+}
+
diff --git a/puppet/modules/apache/manifests/vhost/php/safe_mode_bin.pp b/puppet/modules/apache/manifests/vhost/php/safe_mode_bin.pp
new file mode 100644
index 00000000..1c82e199
--- /dev/null
+++ b/puppet/modules/apache/manifests/vhost/php/safe_mode_bin.pp
@@ -0,0 +1,17 @@
+# safe_mode binaries
+define apache::vhost::php::safe_mode_bin(
+ $ensure = 'present',
+ $path
+){
+ $substr=regsubst($name,'^.*\/','','G')
+ $real_path = "${path}/${substr}"
+ $target = $ensure ? {
+ 'present' => regsubst($name,'^.*@',''),
+ default => absent,
+ }
+ file{$real_path:
+ ensure => link,
+ target => $target,
+ }
+}
+
diff --git a/puppet/modules/apache/manifests/vhost/php/silverstripe.pp b/puppet/modules/apache/manifests/vhost/php/silverstripe.pp
new file mode 100644
index 00000000..1f19eab4
--- /dev/null
+++ b/puppet/modules/apache/manifests/vhost/php/silverstripe.pp
@@ -0,0 +1,119 @@
+# run_mode: controls in which mode the vhost should be run, there are different setups
+# possible:
+# - normal: (*default*) run vhost with the current active worker (default: prefork) don't
+# setup anything special
+# - itk: run vhost with the mpm_itk module (Incompatibility: cannot be used in combination
+# with 'proxy-itk' & 'static-itk' mode)
+# - proxy-itk: run vhost with a dual prefork/itk setup, where prefork just proxies all the
+# requests for the itk setup, that listens only on the loobpack device.
+# (Incompatibility: cannot be used in combination with the itk setup.)
+# - static-itk: run vhost with a dual prefork/itk setup, where prefork serves all the static
+# content and proxies the dynamic calls to the itk setup, that listens only on
+# the loobpack device (Incompatibility: cannot be used in combination with
+# 'itk' mode)
+#
+# run_uid: the uid the vhost should run as with the itk module
+# run_gid: the gid the vhost should run as with the itk module
+#
+# mod_security: Whether we use mod_security or not (will include mod_security module)
+# - false: don't activate mod_security
+# - true: (*default*) activate mod_security
+#
+# logmode:
+# - default: Do normal logging to CustomLog and ErrorLog
+# - nologs: Send every logging to /dev/null
+# - anonym: Don't log ips for CustomLog, send ErrorLog to /dev/null
+# - semianonym: Don't log ips for CustomLog, log normal ErrorLog
+define apache::vhost::php::silverstripe(
+ $ensure = present,
+ $configuration = {},
+ $domain = 'absent',
+ $domainalias = 'absent',
+ $server_admin = 'absent',
+ $logmode = 'default',
+ $path = 'absent',
+ $owner = root,
+ $group = apache,
+ $documentroot_owner = apache,
+ $documentroot_group = 0,
+ $documentroot_mode = '0640',
+ $run_mode = 'normal',
+ $run_uid = 'absent',
+ $run_gid = 'absent',
+ $allow_override = 'None',
+ $php_settings = {},
+ $php_options = {},
+ $do_includes = false,
+ $options = 'absent',
+ $additional_options = 'absent',
+ $default_charset = 'absent',
+ $mod_security = true,
+ $mod_security_relevantonly = true,
+ $mod_security_rules_to_disable = [],
+ $mod_security_additional_options = 'absent',
+ $ssl_mode = false,
+ $vhost_mode = 'template',
+ $template_partial = 'apache/vhosts/php_silverstripe/partial.erb',
+ $vhost_source = 'absent',
+ $vhost_destination = 'absent',
+ $htpasswd_file = 'absent',
+ $htpasswd_path = 'absent',
+ $manage_config = true,
+ $config_webwriteable = false,
+ $manage_directories = true,
+){
+
+ include ::apache::include::silverstripe
+
+ $documentroot = $path ? {
+ 'absent' => $::operatingsystem ? {
+ openbsd => "/var/www/htdocs/${name}/www",
+ default => "/var/www/vhosts/${name}/www"
+ },
+ default => "${path}/www"
+ }
+ $modsec_rules = ['960010']
+ $real_mod_security_rules_to_disable = union($mod_security_rules_to_disable,$modsec_rules)
+
+ # create vhost configuration file
+ ::apache::vhost::php::webapp{$name:
+ ensure => $ensure,
+ configuration => $configuration,
+ domain => $domain,
+ domainalias => $domainalias,
+ server_admin => $server_admin,
+ logmode => $logmode,
+ path => $path,
+ owner => $owner,
+ group => $group,
+ documentroot_owner => $documentroot_owner,
+ documentroot_group => $documentroot_group,
+ documentroot_mode => $documentroot_mode,
+ run_mode => $run_mode,
+ run_uid => $run_uid,
+ run_gid => $run_gid,
+ allow_override => $allow_override,
+ php_settings => $php_settings,
+ php_options => $php_options,
+ do_includes => $do_includes,
+ options => $options,
+ additional_options => $additional_options,
+ default_charset => $default_charset,
+ mod_security => $mod_security,
+ mod_security_relevantonly => $mod_security_relevantonly,
+ mod_security_rules_to_disable => $mod_security_rules_to_disable,
+ mod_security_additional_options => $mod_security_additional_options,
+ ssl_mode => $ssl_mode,
+ vhost_mode => $vhost_mode,
+ template_partial => $template_partial,
+ vhost_source => $vhost_source,
+ vhost_destination => $vhost_destination,
+ htpasswd_file => $htpasswd_file,
+ htpasswd_path => $htpasswd_path,
+ manage_directories => $manage_directories,
+ managed_directories => [ "${documentroot}/assets" ],
+ manage_config => $manage_config,
+ }
+
+}
+
diff --git a/puppet/modules/apache/manifests/vhost/php/simplemachine.pp b/puppet/modules/apache/manifests/vhost/php/simplemachine.pp
new file mode 100644
index 00000000..3fa11a77
--- /dev/null
+++ b/puppet/modules/apache/manifests/vhost/php/simplemachine.pp
@@ -0,0 +1,125 @@
+# run_mode: controls in which mode the vhost should be run, there are different setups
+# possible:
+# - normal: (*default*) run vhost with the current active worker (default: prefork) don't
+# setup anything special
+# - itk: run vhost with the mpm_itk module (Incompatibility: cannot be used in combination
+# with 'proxy-itk' & 'static-itk' mode)
+# - proxy-itk: run vhost with a dual prefork/itk setup, where prefork just proxies all the
+# requests for the itk setup, that listens only on the loobpack device.
+# (Incompatibility: cannot be used in combination with the itk setup.)
+# - static-itk: run vhost with a dual prefork/itk setup, where prefork serves all the static
+# content and proxies the dynamic calls to the itk setup, that listens only on
+# the loobpack device (Incompatibility: cannot be used in combination with
+# 'itk' mode)
+#
+# run_uid: the uid the vhost should run as with the itk module
+# run_gid: the gid the vhost should run as with the itk module
+#
+# mod_security: Whether we use mod_security or not (will include mod_security module)
+# - false: don't activate mod_security
+# - true: (*default*) activate mod_security
+#
+# logmode:
+# - default: Do normal logging to CustomLog and ErrorLog
+# - nologs: Send every logging to /dev/null
+# - anonym: Don't log ips for CustomLog, send ErrorLog to /dev/null
+# - semianonym: Don't log ips for CustomLog, log normal ErrorLog
+define apache::vhost::php::simplemachine(
+ $ensure = present,
+ $configuration = {},
+ $domain = 'absent',
+ $domainalias = 'absent',
+ $server_admin = 'absent',
+ $logmode = 'default',
+ $path = 'absent',
+ $owner = root,
+ $group = apache,
+ $documentroot_owner = apache,
+ $documentroot_group = 0,
+ $documentroot_mode = '0640',
+ $run_mode = 'normal',
+ $run_uid = 'absent',
+ $run_gid = 'absent',
+ $allow_override = 'None',
+ $php_settings = {},
+ $php_options = {},
+ $do_includes = false,
+ $options = 'absent',
+ $additional_options = 'absent',
+ $default_charset = 'absent',
+ $mod_security = true,
+ $mod_security_relevantonly = true,
+ $mod_security_rules_to_disable = [],
+ $mod_security_additional_options = 'absent',
+ $ssl_mode = false,
+ $vhost_mode = 'template',
+ $template_partial = 'apache/vhosts/php/partial.erb',
+ $vhost_source = 'absent',
+ $vhost_destination = 'absent',
+ $htpasswd_file = 'absent',
+ $htpasswd_path = 'absent',
+ $manage_config = true,
+ $config_webwriteable = false,
+ $manage_directories = true,
+){
+ $documentroot = $path ? {
+ 'absent' => $::operatingsystem ? {
+ openbsd => "/var/www/htdocs/${name}/www",
+ default => "/var/www/vhosts/${name}/www"
+ },
+ default => "${path}/www"
+ }
+
+ # create vhost configuration file
+ ::apache::vhost::php::webapp{$name:
+ ensure => $ensure,
+ configuration => $configuration,
+ domain => $domain,
+ domainalias => $domainalias,
+ server_admin => $server_admin,
+ logmode => $logmode,
+ path => $path,
+ owner => $owner,
+ group => $group,
+ documentroot_owner => $documentroot_owner,
+ documentroot_group => $documentroot_group,
+ documentroot_mode => $documentroot_mode,
+ run_mode => $run_mode,
+ run_uid => $run_uid,
+ run_gid => $run_gid,
+ allow_override => $allow_override,
+ php_settings => $php_settings,
+ php_options => $php_options,
+ do_includes => $do_includes,
+ options => $options,
+ additional_options => $additional_options,
+ default_charset => $default_charset,
+ mod_security => $mod_security,
+ mod_security_relevantonly => $mod_security_relevantonly,
+ mod_security_rules_to_disable => $mod_security_rules_to_disable,
+ mod_security_additional_options => $mod_security_additional_options,
+ ssl_mode => $ssl_mode,
+ vhost_mode => $vhost_mode,
+ template_partial => $template_partial,
+ vhost_source => $vhost_source,
+ vhost_destination => $vhost_destination,
+ htpasswd_file => $htpasswd_file,
+ htpasswd_path => $htpasswd_path,
+ manage_directories => $manage_directories,
+ managed_directories => [
+ "${documentroot}/agreement.txt",
+ "${documentroot}/attachments",
+ "${documentroot}/avatars",
+ "${documentroot}/cache",
+ "${documentroot}/Packages",
+ "${documentroot}/Packages/installed.list",
+ "${documentroot}/Smileys",
+ "${documentroot}/Themes",
+ "${documentroot}/Themes/default/languages/Install.english.php"
+ ],
+ manage_config => $manage_config,
+ config_webwriteable => $config_webwriteable,
+ config_file => 'Settings.php',
+ }
+}
+
diff --git a/puppet/modules/apache/manifests/vhost/php/spip.pp b/puppet/modules/apache/manifests/vhost/php/spip.pp
new file mode 100644
index 00000000..e33c1dfe
--- /dev/null
+++ b/puppet/modules/apache/manifests/vhost/php/spip.pp
@@ -0,0 +1,114 @@
+# run_mode: controls in which mode the vhost should be run, there are different setups
+# possible:
+# - normal: (*default*) run vhost with the current active worker (default: prefork) don't
+# setup anything special
+# - itk: run vhost with the mpm_itk module (Incompatibility: cannot be used in combination
+# with 'proxy-itk' & 'static-itk' mode)
+# - proxy-itk: run vhost with a dual prefork/itk setup, where prefork just proxies all the
+# requests for the itk setup, that listens only on the loobpack device.
+# (Incompatibility: cannot be used in combination with the itk setup.)
+# - static-itk: run vhost with a dual prefork/itk setup, where prefork serves all the static
+# content and proxies the dynamic calls to the itk setup, that listens only on
+# the loobpack device (Incompatibility: cannot be used in combination with
+# 'itk' mode)
+#
+# run_uid: the uid the vhost should run as with the itk module
+# run_gid: the gid the vhost should run as with the itk module
+#
+# mod_security: Whether we use mod_security or not (will include mod_security module)
+# - false: don't activate mod_security
+# - true: (*default*) activate mod_security
+#
+# logmode:
+# - default: Do normal logging to CustomLog and ErrorLog
+# - nologs: Send every logging to /dev/null
+# - anonym: Don't log ips for CustomLog, send ErrorLog to /dev/null
+# - semianonym: Don't log ips for CustomLog, log normal ErrorLog
+define apache::vhost::php::spip(
+ $ensure = present,
+ $configuration = {},
+ $domain = 'absent',
+ $domainalias = 'absent',
+ $server_admin = 'absent',
+ $logmode = 'default',
+ $path = 'absent',
+ $owner = root,
+ $group = apache,
+ $documentroot_owner = apache,
+ $documentroot_group = 0,
+ $documentroot_mode = '0640',
+ $run_mode = 'normal',
+ $run_uid = 'absent',
+ $run_gid = 'absent',
+ $allow_override = 'FileInfo',
+ $php_settings = {},
+ $php_options = {},
+ $template_partial = 'apache/vhosts/php/partial.erb',
+ $do_includes = false,
+ $options = 'absent',
+ $additional_options = 'absent',
+ $default_charset = 'absent',
+ $mod_security = true,
+ $mod_security_relevantonly = true,
+ $mod_security_rules_to_disable = [],
+ $mod_security_additional_options = 'absent',
+ $ssl_mode = false,
+ $vhost_mode = 'template',
+ $vhost_source = 'absent',
+ $vhost_destination = 'absent',
+ $htpasswd_file = 'absent',
+ $htpasswd_path = 'absent'
+){
+ $documentroot = $path ? {
+ 'absent' => $::operatingsystem ? {
+ openbsd => "/var/www/htdocs/${name}/www",
+ default => "/var/www/vhosts/${name}/www"
+ },
+ default => "${path}/www"
+ }
+
+ # create vhost configuration file
+ ::apache::vhost::php::webapp{$name:
+ ensure => $ensure,
+ configuration => $configuration,
+ domain => $domain,
+ domainalias => $domainalias,
+ server_admin => $server_admin,
+ logmode => $logmode,
+ path => $path,
+ owner => $owner,
+ group => $group,
+ documentroot_owner => $documentroot_owner,
+ documentroot_group => $documentroot_group,
+ documentroot_mode => $documentroot_mode,
+ run_mode => $run_mode,
+ run_uid => $run_uid,
+ run_gid => $run_gid,
+ allow_override => $allow_override,
+ php_settings => $php_settings,
+ php_options => $php_options,
+ do_includes => $do_includes,
+ options => $options,
+ additional_options => $additional_options,
+ default_charset => $default_charset,
+ mod_security => $mod_security,
+ mod_security_relevantonly => $mod_security_relevantonly,
+ mod_security_rules_to_disable => $mod_security_rules_to_disable,
+ mod_security_additional_options => $mod_security_additional_options,
+ ssl_mode => $ssl_mode,
+ vhost_mode => $vhost_mode,
+ template_partial => $template_partial,
+ vhost_source => $vhost_source,
+ vhost_destination => $vhost_destination,
+ htpasswd_file => $htpasswd_file,
+ htpasswd_path => $htpasswd_path,
+ managed_directories => [
+ "${documentroot}/IMG",
+ "${documentroot}/tmp",
+ "${documentroot}/local",
+ "${documentroot}/config"
+ ],
+ manage_config => false,
+ }
+}
+
diff --git a/puppet/modules/apache/manifests/vhost/php/standard.pp b/puppet/modules/apache/manifests/vhost/php/standard.pp
new file mode 100644
index 00000000..3870707a
--- /dev/null
+++ b/puppet/modules/apache/manifests/vhost/php/standard.pp
@@ -0,0 +1,304 @@
+# run_mode: controls in which mode the vhost should be run, there are different setups
+# possible:
+# - normal: (*default*) run vhost with the current active worker (default: prefork) don't
+# setup anything special
+# - itk: run vhost with the mpm_itk module (Incompatibility: cannot be used in combination
+# with 'proxy-itk' & 'static-itk' mode)
+# - proxy-itk: run vhost with a dual prefork/itk setup, where prefork just proxies all the
+# requests for the itk setup, that listens only on the loobpack device.
+# (Incompatibility: cannot be used in combination with the itk setup.)
+# - static-itk: run vhost with a dual prefork/itk setup, where prefork serves all the static
+# content and proxies the dynamic calls to the itk setup, that listens only on
+# the loobpack device (Incompatibility: cannot be used in combination with
+# 'itk' mode)
+#
+# run_uid: the uid the vhost should run as with the itk module
+# run_gid: the gid the vhost should run as with the itk module
+#
+# mod_security: Whether we use mod_security or not (will include mod_security module)
+# - false: don't activate mod_security
+# - true: (*default*) activate mod_security
+#
+# logmode:
+# - default: Do normal logging to CustomLog and ErrorLog
+# - nologs: Send every logging to /dev/null
+# - anonym: Don't log ips for CustomLog, send ErrorLog to /dev/null
+# - semianonym: Don't log ips for CustomLog, log normal ErrorLog
+define apache::vhost::php::standard(
+ $ensure = present,
+ $configuration = {},
+ $domain = 'absent',
+ $domainalias = 'absent',
+ $server_admin = 'absent',
+ $logmode = 'default',
+ $logpath = 'absent',
+ $logprefix = '',
+ $path = 'absent',
+ $manage_webdir = true,
+ $path_is_webdir = false,
+ $manage_docroot = true,
+ $owner = root,
+ $group = apache,
+ $documentroot_owner = apache,
+ $documentroot_group = 0,
+ $documentroot_mode = 0640,
+ $run_mode = 'normal',
+ $run_uid = 'absent',
+ $run_gid = 'absent',
+ $allow_override = 'None',
+ $php_settings = {},
+ $php_options = {},
+ $php_installation = 'system',
+ $do_includes = false,
+ $options = 'absent',
+ $additional_options = 'absent',
+ $default_charset = 'absent',
+ $use_mod_macro = false,
+ $mod_security = true,
+ $mod_security_relevantonly = true,
+ $mod_security_rules_to_disable = [],
+ $mod_security_additional_options = 'absent',
+ $ssl_mode = false,
+ $vhost_mode = 'template',
+ $template_partial = 'apache/vhosts/php/partial.erb',
+ $vhost_source = 'absent',
+ $vhost_destination = 'absent',
+ $htpasswd_file = 'absent',
+ $htpasswd_path = 'absent',
+){
+
+ if $manage_webdir {
+ # create webdir
+ ::apache::vhost::webdir{$name:
+ ensure => $ensure,
+ path => $path,
+ owner => $owner,
+ group => $group,
+ run_mode => $run_mode,
+ manage_docroot => $manage_docroot,
+ documentroot_owner => $documentroot_owner,
+ documentroot_group => $documentroot_group,
+ documentroot_mode => $documentroot_mode,
+ }
+ }
+
+ $real_path = $path ? {
+ 'absent' => $::operatingsystem ? {
+ openbsd => "/var/www/htdocs/${name}",
+ default => "/var/www/vhosts/${name}"
+ },
+ default => $path
+ }
+
+ if $path_is_webdir {
+ $documentroot = $real_path
+ } else {
+ $documentroot = "${real_path}/www"
+ }
+ $logdir = $logpath ? {
+ 'absent' => "${real_path}/logs",
+ default => $logpath
+ }
+
+ $std_php_options = {
+ smarty => false,
+ pear => false,
+ }
+ $real_php_options = merge($std_php_options,$php_options)
+
+ if $real_php_options[smarty] {
+ include php::extensions::smarty
+ $smarty_path = '/usr/share/php/Smarty/:'
+ } else {
+ $smarty_path = ''
+ }
+
+ if $real_php_options[pear] {
+ $pear_path = '/usr/share/pear/:'
+ } else {
+ $pear_path = ''
+ }
+
+ if $logmode != 'nologs' {
+ $php_error_log = "${logdir}/php_error_log"
+ } else {
+ $php_error_log = undef
+ }
+
+ if ('safe_mode_exec_dir' in $php_settings) {
+ $php_safe_mode_exec_dir = $php_settings[safe_mode_exec_dir]
+ } else {
+ $php_safe_mode_exec_dir = $path ? {
+ 'absent' => $::operatingsystem ? {
+ openbsd => "/var/www/htdocs/${name}/bin",
+ default => "/var/www/vhosts/${name}/bin"
+ },
+ default => "${path}/bin"
+ }
+ }
+ file{$php_safe_mode_exec_dir:
+ recurse => true,
+ force => true,
+ purge => true,
+ }
+ if ('safe_mode_exec_bins' in $php_options) {
+ $std_php_settings_safe_mode_exec_dir = $php_safe_mode_exec_dir
+ $ensure_exec = $ensure ? {
+ 'present' => directory,
+ default => 'absent',
+ }
+ File[$php_safe_mode_exec_dir]{
+ ensure => $ensure_exec,
+ owner => $documentroot_owner,
+ group => $documentroot_group,
+ mode => '0750',
+ }
+ $php_safe_mode_exec_bins_subst = regsubst($php_options[safe_mode_exec_bins],'(.+)',"${name}@\\1")
+ apache::vhost::php::safe_mode_bin{
+ $php_safe_mode_exec_bins_subst:
+ ensure => $ensure,
+ path => $php_safe_mode_exec_dir;
+ }
+ } else {
+ $std_php_settings_safe_mode_exec_dir = undef
+ File[$php_safe_mode_exec_dir]{
+ ensure => absent,
+ }
+ }
+
+ if !('default_charset' in $php_settings) and ($default_charset != 'absent') {
+ $std_php_settings_default_charset = $default_charset ? {
+ 'On' => 'iso-8859-1',
+ default => $default_charset
+ }
+ } else {
+ $std_php_settings_default_charset = undef
+ }
+
+ if ('additional_open_basedir' in $php_options) {
+ $the_open_basedir = "${smarty_path}${pear_path}${documentroot}:${real_path}/data:/var/www/upload_tmp_dir/${name}:/var/www/session.save_path/${name}:${php_options[additional_open_basedir]}"
+ } else {
+ $the_open_basedir = "${smarty_path}${pear_path}${documentroot}:${real_path}/data:/var/www/upload_tmp_dir/${name}:/var/www/session.save_path/${name}"
+ }
+
+ if $run_mode == 'fcgid' {
+ $safe_mode_gid = $::operatingsystem ? {
+ debian => undef,
+ default => $php_installation ? {
+ 'system' => 'On',
+ default => undef,
+ }
+ }
+ } else {
+ $safe_mode_gid = undef
+ }
+
+ $safe_mode = $::operatingsystem ? {
+ debian => undef,
+ default => $php_installation ? {
+ 'system' => 'On',
+ default => undef,
+ }
+ }
+ $std_php_settings = {
+ engine => 'On',
+ upload_tmp_dir => "/var/www/upload_tmp_dir/${name}",
+ 'session.save_path' => "/var/www/session.save_path/${name}",
+ error_log => $php_error_log,
+ safe_mode => $safe_mode,
+ safe_mode_gid => $safe_mode_gid,
+ safe_mode_exec_dir => $std_php_settings_safe_mode_exec_dir,
+ default_charset => $std_php_settings_default_charset,
+ open_basedir => $the_open_basedir,
+ }
+
+ $real_php_settings = merge($std_php_settings,$php_settings)
+
+ if $ensure != 'absent' {
+ case $run_mode {
+ 'proxy-itk','static-itk': {
+ include ::php::itk_plus
+ }
+ 'itk': { include ::php::itk }
+ 'fcgid': {
+ include ::mod_fcgid
+ include ::php::mod_fcgid
+ include apache::include::mod_fcgid
+
+ mod_fcgid::starter {$name:
+ tmp_dir => $real_php_settings[php_tmp_dir],
+ cgi_type => 'php',
+ cgi_type_options => delete($real_php_settings, php_tmp_dir),
+ owner => $run_uid,
+ group => $run_gid,
+ notify => Service['apache'],
+ }
+ if $php_installation == 'scl54' {
+ require php::scl::php54
+ Mod_fcgid::Starter[$name]{
+ binary => '/opt/rh/php54/root/usr/bin/php-cgi',
+ additional_cmds => 'source /opt/rh/php54/enable',
+ rc => '/opt/rh/php54/root/etc',
+ }
+ } elsif $php_installation == 'scl55' {
+ require php::scl::php55
+ Mod_fcgid::Starter[$name]{
+ binary => '/opt/rh/php55/root/usr/bin/php-cgi',
+ additional_cmds => 'source /opt/rh/php55/enable',
+ rc => '/opt/rh/php55/root/etc',
+ }
+ }
+ }
+ default: { include ::php }
+ }
+ }
+
+ ::apache::vhost::phpdirs{$name:
+ ensure => $ensure,
+ php_upload_tmp_dir => $real_php_settings[upload_tmp_dir],
+ php_session_save_path => $real_php_settings['session.save_path'],
+ documentroot_owner => $documentroot_owner,
+ documentroot_group => $documentroot_group,
+ documentroot_mode => $documentroot_mode,
+ run_mode => $run_mode,
+ run_uid => $run_uid,
+ }
+
+ # create vhost configuration file
+ ::apache::vhost{$name:
+ ensure => $ensure,
+ configuration => $configuration,
+ path => $path,
+ path_is_webdir => $path_is_webdir,
+ vhost_mode => $vhost_mode,
+ template_partial => $template_partial,
+ vhost_source => $vhost_source,
+ vhost_destination => $vhost_destination,
+ domain => $domain,
+ domainalias => $domainalias,
+ server_admin => $server_admin,
+ logmode => $logmode,
+ logpath => $logpath,
+ logprefix => $logprefix,
+ run_mode => $run_mode,
+ run_uid => $run_uid,
+ run_gid => $run_gid,
+ allow_override => $allow_override,
+ do_includes => $do_includes,
+ options => $options,
+ additional_options => $additional_options,
+ default_charset => $default_charset,
+ php_settings => $real_php_settings,
+ php_options => $real_php_options,
+ ssl_mode => $ssl_mode,
+ htpasswd_file => $htpasswd_file,
+ htpasswd_path => $htpasswd_path,
+ mod_security => $mod_security,
+ mod_security_relevantonly => $mod_security_relevantonly,
+ mod_security_rules_to_disable => $mod_security_rules_to_disable,
+ mod_security_additional_options => $mod_security_additional_options,
+ use_mod_macro => $use_mod_macro,
+ passing_extension => 'php',
+ }
+}
+
diff --git a/puppet/modules/apache/manifests/vhost/php/typo3.pp b/puppet/modules/apache/manifests/vhost/php/typo3.pp
new file mode 100644
index 00000000..d9e877a6
--- /dev/null
+++ b/puppet/modules/apache/manifests/vhost/php/typo3.pp
@@ -0,0 +1,150 @@
+# run_mode: controls in which mode the vhost should be run, there are different setups
+# possible:
+# - normal: (*default*) run vhost with the current active worker (default: prefork) don't
+# setup anything special
+# - itk: run vhost with the mpm_itk module (Incompatibility: cannot be used in combination
+# with 'proxy-itk' & 'static-itk' mode)
+# - proxy-itk: run vhost with a dual prefork/itk setup, where prefork just proxies all the
+# requests for the itk setup, that listens only on the loobpack device.
+# (Incompatibility: cannot be used in combination with the itk setup.)
+# - static-itk: run vhost with a dual prefork/itk setup, where prefork serves all the static
+# content and proxies the dynamic calls to the itk setup, that listens only on
+# the loobpack device (Incompatibility: cannot be used in combination with
+# 'itk' mode)
+#
+# run_uid: the uid the vhost should run as with the itk module
+# run_gid: the gid the vhost should run as with the itk module
+#
+# mod_security: Whether we use mod_security or not (will include mod_security module)
+# - false: don't activate mod_security
+# - true: (*default*) activate mod_security
+#
+# logmode:
+# - default: Do normal logging to CustomLog and ErrorLog
+# - nologs: Send every logging to /dev/null
+# - anonym: Don't log ips for CustomLog, send ErrorLog to /dev/null
+# - semianonym: Don't log ips for CustomLog, log normal ErrorLog
+define apache::vhost::php::typo3(
+ $ensure = present,
+ $configuration = {},
+ $domain = 'absent',
+ $domainalias = 'absent',
+ $server_admin = 'absent',
+ $logmode = 'default',
+ $path = 'absent',
+ $owner = root,
+ $group = apache,
+ $documentroot_owner = apache,
+ $documentroot_group = 0,
+ $documentroot_mode = '0640',
+ $run_mode = 'normal',
+ $run_uid = 'absent',
+ $run_gid = 'absent',
+ $allow_override = 'None',
+ $php_settings = {},
+ $php_options = {},
+ $do_includes = false,
+ $options = 'absent',
+ $additional_options = 'absent',
+ $default_charset = 'absent',
+ $mod_security = true,
+ $mod_security_relevantonly = true,
+ $mod_security_rules_to_disable = [],
+ $mod_security_additional_options = 'absent',
+ $ssl_mode = false,
+ $vhost_mode = 'template',
+ $template_partial = 'apache/vhosts/php_typo3/partial.erb',
+ $vhost_source = 'absent',
+ $vhost_destination = 'absent',
+ $htpasswd_file = 'absent',
+ $htpasswd_path = 'absent',
+ $manage_config = true,
+ $config_webwriteable = false,
+ $manage_directories = true,
+){
+ $documentroot = $path ? {
+ 'absent' => $::operatingsystem ? {
+ openbsd => "/var/www/htdocs/${name}/www",
+ default => "/var/www/vhosts/${name}/www"
+ },
+ default => "${path}/www"
+ }
+
+ $modsec_rules = ['960010']
+ $real_mod_security_rules_to_disable = union($mod_security_rules_to_disable,$modsec_rules)
+ if $mod_security_additional_options == 'absent' {
+ $real_mod_security_additional_options = '
+ <Location "/typo3">
+ SecRuleEngine Off
+ SecAuditEngine Off
+ </Location>
+'
+ } else {
+ $real_mod_security_additional_options = $mod_security_additional_options
+ }
+
+ $typo3_php_settings = {
+ # turn allow_url_fopen on for the extension manager fetch
+ allow_url_fopen => 'On'
+ }
+ $real_php_settings = merge($typo3_php_settings,$php_settings)
+
+ # create vhost configuration file
+ ::apache::vhost::php::webapp{$name:
+ ensure => $ensure,
+ configuration => $configuration,
+ domain => $domain,
+ domainalias => $domainalias,
+ server_admin => $server_admin,
+ logmode => $logmode,
+ path => $path,
+ owner => $owner,
+ group => $group,
+ documentroot_owner => $documentroot_owner,
+ documentroot_group => $documentroot_group,
+ documentroot_mode => $documentroot_mode,
+ run_mode => $run_mode,
+ run_uid => $run_uid,
+ run_gid => $run_gid,
+ allow_override => $allow_override,
+ php_settings => $real_php_settings,
+ php_options => $php_options,
+ do_includes => $do_includes,
+ options => $options,
+ additional_options => $additional_options,
+ default_charset => $default_charset,
+ mod_security => $mod_security,
+ mod_security_relevantonly => $mod_security_relevantonly,
+ mod_security_rules_to_disable => $real_mod_security_rules_to_disable,
+ mod_security_additional_options => $real_mod_security_additional_options,
+ ssl_mode => $ssl_mode,
+ vhost_mode => $vhost_mode,
+ template_partial => $template_partial,
+ vhost_source => $vhost_source,
+ vhost_destination => $vhost_destination,
+ htpasswd_file => $htpasswd_file,
+ htpasswd_path => $htpasswd_path,
+ manage_directories => $manage_directories,
+ managed_directories => [ "${documentroot}/typo3temp",
+ "${documentroot}/typo3temp/pics",
+ "${documentroot}/typo3temp/temp",
+ "${documentroot}/typo3temp/llxml",
+ "${documentroot}/typo3temp/cs",
+ "${documentroot}/typo3temp/GB",
+ "${documentroot}/typo3temp/locks",
+ "${documentroot}/typo3conf",
+ "${documentroot}/typo3conf/ext",
+ "${documentroot}/typo3conf/l10n",
+ # "${documentroot}/typo3/ext/", # only needed for ext manager installing global extensions
+ "${documentroot}/uploads",
+ "${documentroot}/uploads/pics",
+ "${documentroot}/uploads/media",
+ "${documentroot}/uploads/tf",
+ "${documentroot}/fileadmin",
+ "${documentroot}/fileadmin/_temp_"
+ ],
+ manage_config => $manage_config,
+ }
+
+}
+
diff --git a/puppet/modules/apache/manifests/vhost/php/webapp.pp b/puppet/modules/apache/manifests/vhost/php/webapp.pp
new file mode 100644
index 00000000..695120d0
--- /dev/null
+++ b/puppet/modules/apache/manifests/vhost/php/webapp.pp
@@ -0,0 +1,148 @@
+# run_mode: controls in which mode the vhost should be run, there are different setups
+# possible:
+# - normal: (*default*) run vhost with the current active worker (default: prefork) don't
+# setup anything special
+# - itk: run vhost with the mpm_itk module (Incompatibility: cannot be used in combination
+# with 'proxy-itk' & 'static-itk' mode)
+# - proxy-itk: run vhost with a dual prefork/itk setup, where prefork just proxies all the
+# requests for the itk setup, that listens only on the loobpack device.
+# (Incompatibility: cannot be used in combination with the itk setup.)
+# - static-itk: run vhost with a dual prefork/itk setup, where prefork serves all the static
+# content and proxies the dynamic calls to the itk setup, that listens only on
+# the loobpack device (Incompatibility: cannot be used in combination with
+# 'itk' mode)
+#
+# run_uid: the uid the vhost should run as with the itk module
+# run_gid: the gid the vhost should run as with the itk module
+#
+# mod_security: Whether we use mod_security or not (will include mod_security module)
+# - false: don't activate mod_security
+# - true: (*default*) activate mod_security
+#
+# logmode:
+# - default: Do normal logging to CustomLog and ErrorLog
+# - nologs: Send every logging to /dev/null
+# - anonym: Don't log ips for CustomLog, send ErrorLog to /dev/null
+# - semianonym: Don't log ips for CustomLog, log normal ErrorLog
+define apache::vhost::php::webapp(
+ $ensure = present,
+ $configuration = {},
+ $domain = 'absent',
+ $domainalias = 'absent',
+ $server_admin = 'absent',
+ $logmode = 'default',
+ $path = 'absent',
+ $manage_webdir = true,
+ $manage_docroot = true,
+ $owner = root,
+ $group = apache,
+ $documentroot_owner = apache,
+ $documentroot_group = 0,
+ $documentroot_mode = '0640',
+ $run_mode = 'normal',
+ $run_uid = 'absent',
+ $run_gid = 'absent',
+ $allow_override = 'None',
+ $php_settings = {},
+ $php_options = {},
+ $php_installation = 'system',
+ $do_includes = false,
+ $options = 'absent',
+ $additional_options = 'absent',
+ $default_charset = 'absent',
+ $mod_security = true,
+ $mod_security_relevantonly = true,
+ $mod_security_rules_to_disable = [],
+ $mod_security_additional_options = 'absent',
+ $ssl_mode = false,
+ $vhost_mode = 'template',
+ $template_partial,
+ $vhost_source = 'absent',
+ $vhost_destination = 'absent',
+ $htpasswd_file = 'absent',
+ $htpasswd_path = 'absent',
+ $manage_config = true,
+ $config_file = 'absent',
+ $config_webwriteable = false,
+ $manage_directories = true,
+ $managed_directories = 'absent',
+){
+ if ($ensure != 'absent') {
+ if $manage_directories and ($managed_directories != 'absent') {
+ ::apache::file::rw{ $managed_directories :
+ owner => $documentroot_owner,
+ group => $documentroot_group,
+ }
+ }
+
+ if $manage_config {
+ if $config_file == 'absent' { fail("No config file defined for ${name} on ${::fqdn}, if you'd like to manage the config, you have to add one!") }
+
+ $real_path = $path ? {
+ 'absent' => $::operatingsystem ? {
+ openbsd => "/var/www/htdocs/${name}",
+ default => "/var/www/vhosts/${name}"
+ },
+ default => $path
+ }
+ $documentroot = "${real_path}/www"
+ ::apache::vhost::file::documentrootfile{"configurationfile_${name}":
+ documentroot => $documentroot,
+ filename => $config_file,
+ thedomain => $name,
+ owner => $documentroot_owner,
+ group => $documentroot_group,
+ }
+ if $config_webwriteable {
+ Apache::Vhost::File::Documentrootfile["configurationfile_${name}"]{
+ mode => '0660',
+ }
+ } else {
+ Apache::Vhost::File::Documentrootfile["configurationfile_${name}"]{
+ mode => '0440',
+ }
+ }
+ }
+ }
+
+ # create vhost configuration file
+ ::apache::vhost::php::standard{$name:
+ ensure => $ensure,
+ configuration => $configuration,
+ domain => $domain,
+ domainalias => $domainalias,
+ server_admin => $server_admin,
+ logmode => $logmode,
+ path => $path,
+ manage_webdir => $manage_webdir,
+ manage_docroot => $manage_docroot,
+ owner => $owner,
+ group => $group,
+ documentroot_owner => $documentroot_owner,
+ documentroot_group => $documentroot_group,
+ documentroot_mode => $documentroot_mode,
+ run_mode => $run_mode,
+ run_uid => $run_uid,
+ run_gid => $run_gid,
+ allow_override => $allow_override,
+ php_settings => $php_settings,
+ php_options => $php_options,
+ php_installation => $php_installation,
+ do_includes => $do_includes,
+ options => $options,
+ additional_options => $additional_options,
+ default_charset => $default_charset,
+ mod_security => $mod_security,
+ mod_security_relevantonly => $mod_security_relevantonly,
+ mod_security_rules_to_disable => $mod_security_rules_to_disable,
+ mod_security_additional_options => $mod_security_additional_options,
+ ssl_mode => $ssl_mode,
+ vhost_mode => $vhost_mode,
+ template_partial => $template_partial,
+ vhost_source => $vhost_source,
+ vhost_destination => $vhost_destination,
+ htpasswd_file => $htpasswd_file,
+ htpasswd_path => $htpasswd_path,
+ }
+}
+
diff --git a/puppet/modules/apache/manifests/vhost/php/wordpress.pp b/puppet/modules/apache/manifests/vhost/php/wordpress.pp
new file mode 100644
index 00000000..a6bbe434
--- /dev/null
+++ b/puppet/modules/apache/manifests/vhost/php/wordpress.pp
@@ -0,0 +1,123 @@
+# run_mode: controls in which mode the vhost should be run, there are different
+# setups # possible:
+# - normal: (*default*) run vhost with the current active worker
+# (default: prefork) don't setup anything special
+# - itk: run vhost with the mpm_itk module (Incompatibility: cannot be used in
+# combination with 'proxy-itk' & 'static-itk' mode)
+# - proxy-itk: run vhost with a dual prefork/itk setup, where prefork just
+# proxies all the requests for the itk setup, that listens only
+# on the loobpack device.
+# (Incompatibility: cannot be used in combination with the itk
+# setup.)
+# - static-itk: run vhost with a dual prefork/itk setup, where prefork serves
+# all the static content and proxies the dynamic calls to the
+# itk setup, that listens only on the loobpack device
+# (Incompatibility: cannot be used in combination with
+# 'itk' mode)
+#
+# run_uid: the uid the vhost should run as with the itk module
+# run_gid: the gid the vhost should run as with the itk module
+#
+# mod_security: Whether we use mod_security or not (will include mod_security
+# module)
+# - false: don't activate mod_security
+# - true: (*default*) activate mod_security
+#
+# logmode:
+# - default: Do normal logging to CustomLog and ErrorLog
+# - nologs: Send every logging to /dev/null
+# - anonym: Don't log ips for CustomLog, send ErrorLog to /dev/null
+# - semianonym: Don't log ips for CustomLog, log normal ErrorLog
+define apache::vhost::php::wordpress(
+ $ensure = present,
+ $configuration = {},
+ $domain = 'absent',
+ $domainalias = 'absent',
+ $server_admin = 'absent',
+ $logmode = 'default',
+ $path = 'absent',
+ $owner = root,
+ $group = apache,
+ $documentroot_owner = apache,
+ $documentroot_group = 0,
+ $documentroot_mode = '0640',
+ $run_mode = 'normal',
+ $run_uid = 'absent',
+ $run_gid = 'absent',
+ $allow_override = 'FileInfo Indexes',
+ $php_settings = {},
+ $php_options = {},
+ $do_includes = false,
+ $options = 'absent',
+ $additional_options = 'absent',
+ $default_charset = 'absent',
+ $mod_security = true,
+ $mod_security_relevantonly = true,
+ $mod_security_rules_to_disable = [],
+ $mod_security_additional_options = 'absent',
+ $ssl_mode = false,
+ $vhost_mode = 'template',
+ $template_partial = 'apache/vhosts/php_wordpress/partial.erb',
+ $vhost_source = 'absent',
+ $vhost_destination = 'absent',
+ $htpasswd_file = 'absent',
+ $htpasswd_path = 'absent',
+ $manage_config = true,
+ $config_webwriteable = false,
+ $manage_directories = true
+){
+
+ $documentroot = $path ? {
+ 'absent' => $::operatingsystem ? {
+ 'openbsd' => "/var/www/htdocs/${name}/www",
+ default => "/var/www/vhosts/${name}/www"
+ },
+ default => "${path}/www"
+ }
+ $modsec_rules = ['960010', '950018']
+ $real_mod_security_rules_to_disable = union($mod_security_rules_to_disable,
+ $modsec_rules)
+
+ # create vhost configuration file
+ apache::vhost::php::webapp{$name:
+ ensure => $ensure,
+ configuration => $configuration,
+ domain => $domain,
+ domainalias => $domainalias,
+ server_admin => $server_admin,
+ logmode => $logmode,
+ path => $path,
+ owner => $owner,
+ group => $group,
+ documentroot_owner => $documentroot_owner,
+ documentroot_group => $documentroot_group,
+ documentroot_mode => $documentroot_mode,
+ run_mode => $run_mode,
+ run_uid => $run_uid,
+ run_gid => $run_gid,
+ allow_override => $allow_override,
+ php_settings => $php_settings,
+ php_options => $php_options,
+ do_includes => $do_includes,
+ options => $options,
+ additional_options => $additional_options,
+ default_charset => $default_charset,
+ mod_security => $mod_security,
+ mod_security_relevantonly => $mod_security_relevantonly,
+ mod_security_rules_to_disable => $real_mod_security_rules_to_disable,
+ mod_security_additional_options => $mod_security_additional_options,
+ ssl_mode => $ssl_mode,
+ vhost_mode => $vhost_mode,
+ template_partial => $template_partial,
+ vhost_source => $vhost_source,
+ vhost_destination => $vhost_destination,
+ htpasswd_file => $htpasswd_file,
+ htpasswd_path => $htpasswd_path,
+ manage_directories => $manage_directories,
+ managed_directories => [ "${documentroot}/wp-content/uploads",],
+ manage_config => $manage_config,
+ config_webwriteable => $config_webwriteable,
+ config_file => 'wp-config.php',
+ }
+}
+
diff --git a/puppet/modules/apache/manifests/vhost/phpdirs.pp b/puppet/modules/apache/manifests/vhost/phpdirs.pp
new file mode 100644
index 00000000..5936da61
--- /dev/null
+++ b/puppet/modules/apache/manifests/vhost/phpdirs.pp
@@ -0,0 +1,39 @@
+define apache::vhost::phpdirs(
+ $ensure = present,
+ $php_upload_tmp_dir,
+ $php_session_save_path,
+ $documentroot_owner = apache,
+ $documentroot_group = 0,
+ $documentroot_mode = 0750,
+ $run_mode = 'normal',
+ $run_uid = 'absent'
+){
+ case $ensure {
+ absent : {
+ file {
+ [$php_upload_tmp_dir, $php_session_save_path] :
+ ensure => absent,
+ purge => true,
+ force => true,
+ recurse => true,
+ }
+ }
+ default : {
+ include apache::defaultphpdirs
+ file {
+ [$php_upload_tmp_dir, $php_session_save_path] :
+ ensure => directory,
+ owner => $run_mode ? {
+ 'itk' => $run_uid,
+ 'static-itk' => $run_uid,
+ 'proxy-itk' => $run_uid,
+ 'fcgid' => $run_uid,
+ default => $documentroot_owner
+ },
+ group => $documentroot_group,
+ mode => $documentroot_mode ;
+ }
+ }
+ }
+}
+
diff --git a/puppet/modules/apache/manifests/vhost/proxy.pp b/puppet/modules/apache/manifests/vhost/proxy.pp
new file mode 100644
index 00000000..95ae2059
--- /dev/null
+++ b/puppet/modules/apache/manifests/vhost/proxy.pp
@@ -0,0 +1,67 @@
+# Proxy VHost
+# Parameters:
+#
+# - ensure: wether this vhost is `present` or `absent`
+# - domain: the domain to redirect (*name*)
+# - domainalias: A list of whitespace seperated domains to redirect
+# - target_url: the url to be proxied. Note: We don't want http://example.com/foobar only example.com/foobar
+# - server_admin: the email that is shown as responsible
+# - ssl_mode: wether this vhost supports ssl or not
+# - false: don't enable ssl for this vhost (default)
+# - true: enable ssl for this vhost
+# - force: enable ssl and redirect non-ssl to ssl
+# - only: enable ssl only
+#
+# logmode:
+#
+# - default: Do normal logging to CustomLog and ErrorLog
+# - nologs: Send every logging to /dev/null
+# - anonym: Don't log ips for CustomLog, send ErrorLog to /dev/null
+# - semianonym: Don't log ips for CustomLog, log normal ErrorLog
+#
+define apache::vhost::proxy(
+ $ensure = present,
+ $configuration = {},
+ $domain = 'absent',
+ $domainalias = 'absent',
+ $htpasswd_file = 'absent',
+ $target_url,
+ $server_admin = 'absent',
+ $logmode = 'default',
+ $mod_security = false,
+ $ssl_mode = false,
+ $mod_security_relevantonly = true,
+ $mod_security_rules_to_disable = [],
+ $mod_security_additional_options = 'absent',
+ $additional_options = 'absent'
+){
+ # create vhost configuration file
+ # we use the options field as the target_url
+ ::apache::vhost::template{$name:
+ ensure => $ensure,
+ configuration => $configuration,
+ template_partial => 'apache/vhosts/proxy/partial.erb',
+ domain => $domain,
+ path => 'really_absent',
+ path_is_webdir => true,
+ htpasswd_file => $htpasswd_file,
+ domainalias => $domainalias,
+ server_admin => $server_admin,
+ logpath => $::operatingsystem ? {
+ openbsd => '/var/www/logs',
+ centos => '/var/log/httpd',
+ default => '/var/log/apache2'
+ },
+ logmode => $logmode,
+ allow_override => $allow_override,
+ run_mode => 'normal',
+ mod_security => $mod_security,
+ mod_security_relevantonly => $mod_security_relevantonly,
+ mod_security_rules_to_disable => $mod_security_rules_to_disable,
+ mod_security_additional_options => $mod_security_additional_options,
+ options => $target_url,
+ ssl_mode => $ssl_mode,
+ additional_options => $additional_options,
+ }
+}
+
diff --git a/puppet/modules/apache/manifests/vhost/redirect.pp b/puppet/modules/apache/manifests/vhost/redirect.pp
new file mode 100644
index 00000000..0ac40cc3
--- /dev/null
+++ b/puppet/modules/apache/manifests/vhost/redirect.pp
@@ -0,0 +1,56 @@
+# Redirect VHost to redirect hosts
+# Parameters:
+#
+# - ensure: wether this vhost is `present` or `absent`
+# - domain: the domain to redirect (*name*)
+# - domainalias: A list of whitespace seperated domains to redirect
+# - target_url: the url to redirect to. Note: We don't want http://example.com/foobar only example.com/foobar
+# - server_admin: the email that is shown as responsible
+# - ssl_mode: wether this vhost supports ssl or not
+# - false: don't enable ssl for this vhost (default)
+# - true: enable ssl for this vhost
+# - force: enable ssl and redirect non-ssl to ssl
+# - only: enable ssl only
+#
+# logmode:
+#
+# - default: Do normal logging to CustomLog and ErrorLog
+# - nologs: Send every logging to /dev/null
+# - anonym: Don't log ips for CustomLog, send ErrorLog to /dev/null
+# - semianonym: Don't log ips for CustomLog, log normal ErrorLog
+#
+define apache::vhost::redirect(
+ $ensure = present,
+ $configuration = {},
+ $domain = 'absent',
+ $domainalias = 'absent',
+ $target_url,
+ $server_admin = 'absent',
+ $logmode = 'default',
+ $ssl_mode = false
+){
+ # create vhost configuration file
+ # we use the options field as the target_url
+ ::apache::vhost::template{$name:
+ ensure => $ensure,
+ configuration => $configuration,
+ template_partial => 'apache/vhosts/redirect/partial.erb',
+ domain => $domain,
+ path => 'really_absent',
+ path_is_webdir => true,
+ domainalias => $domainalias,
+ server_admin => $server_admin,
+ logpath => $::operatingsystem ? {
+ openbsd => '/var/www/logs',
+ centos => '/var/log/httpd',
+ default => '/var/log/apache2'
+ },
+ logmode => $logmode,
+ allow_override => $allow_override,
+ run_mode => 'normal',
+ mod_security => false,
+ options => $target_url,
+ ssl_mode => $ssl_mode,
+ }
+}
+
diff --git a/puppet/modules/apache/manifests/vhost/static.pp b/puppet/modules/apache/manifests/vhost/static.pp
new file mode 100644
index 00000000..f9197662
--- /dev/null
+++ b/puppet/modules/apache/manifests/vhost/static.pp
@@ -0,0 +1,86 @@
+# vhost_mode: which option is chosen to deploy the vhost
+# - template: generate it from a template (default)
+# - file: deploy a vhost file (apache::vhost::file will be called directly)
+#
+# logmode:
+# - default: Do normal logging to CustomLog and ErrorLog
+# - nologs: Send every logging to /dev/null
+# - anonym: Don't log ips for CustomLog, send ErrorLog to /dev/null
+# - semianonym: Don't log ips for CustomLog, log normal ErrorLog
+#
+# mod_security: Whether we use mod_security or not (will include mod_security module)
+# - false: (*default*) don't activate mod_security
+# - true: activate mod_security
+#
+define apache::vhost::static(
+ $ensure = present,
+ $configuration = {},
+ $domain = 'absent',
+ $domainalias = 'absent',
+ $server_admin = 'absent',
+ $logmode = 'default',
+ $path = 'absent',
+ $owner = root,
+ $group = apache,
+ $documentroot_owner = apache,
+ $documentroot_group = 0,
+ $documentroot_mode = 0640,
+ $allow_override = 'None',
+ $do_includes = false,
+ $options = 'absent',
+ $additional_options = 'absent',
+ $default_charset = 'absent',
+ $ssl_mode = false,
+ $run_mode = 'normal',
+ $vhost_mode = 'template',
+ $template_partial = 'apache/vhosts/static/partial.erb',
+ $vhost_source = 'absent',
+ $vhost_destination = 'absent',
+ $htpasswd_file = 'absent',
+ $htpasswd_path = 'absent',
+ $mod_security = false,
+ $mod_security_relevantonly = true,
+ $mod_security_rules_to_disable = [],
+ $mod_security_additional_options = 'absent'
+){
+ # create webdir
+ ::apache::vhost::webdir{$name:
+ ensure => $ensure,
+ path => $path,
+ owner => $owner,
+ group => $group,
+ run_mode => $run_mode,
+ datadir => false,
+ documentroot_owner => $documentroot_owner,
+ documentroot_group => $documentroot_group,
+ documentroot_mode => $documentroot_mode,
+ }
+
+ # create vhost configuration file
+ ::apache::vhost{$name:
+ ensure => $ensure,
+ configuration => $configuration,
+ path => $path,
+ template_partial => $template_partial,
+ vhost_mode => $vhost_mode,
+ vhost_source => $vhost_source,
+ vhost_destination => $vhost_destination,
+ domain => $domain,
+ domainalias => $domainalias,
+ server_admin => $server_admin,
+ logmode => $logmode,
+ allow_override => $allow_override,
+ do_includes => $do_includes,
+ options => $options,
+ additional_options => $additional_options,
+ default_charset => $default_charset,
+ ssl_mode => $ssl_mode,
+ htpasswd_file => $htpasswd_file,
+ htpasswd_path => $htpasswd_path,
+ mod_security => $mod_security,
+ mod_security_relevantonly => $mod_security_relevantonly,
+ mod_security_rules_to_disable => $mod_security_rules_to_disable,
+ mod_security_additional_options => $mod_security_additional_options,
+ }
+}
+
diff --git a/puppet/modules/apache/manifests/vhost/template.pp b/puppet/modules/apache/manifests/vhost/template.pp
new file mode 100644
index 00000000..8e9b798c
--- /dev/null
+++ b/puppet/modules/apache/manifests/vhost/template.pp
@@ -0,0 +1,158 @@
+# template_partial:
+# which template should be used to generate the type specific part
+# of the vhost entry.
+#
+# domainalias:
+# - absent: no domainalias is set (*default*)
+# - www: domainalias is set to www.$domain
+# - else: domainalias is set to that
+#
+# ssl_mode: wether this vhost supports ssl or not
+# - false: don't enable ssl for this vhost (default)
+# - true: enable ssl for this vhost
+# - force: enable ssl and redirect non-ssl to ssl
+# - only: enable ssl only
+#
+# logmode:
+# - default: Do normal logging to CustomLog and ErrorLog
+# - nologs: Send every logging to /dev/null
+# - anonym: Don't log ips for CustomLog, send ErrorLog to /dev/null
+# - semianonym: Don't log ips for CustomLog, log normal ErrorLog
+#
+# run_mode: controls in which mode the vhost should be run, there are different setups
+# possible:
+# - normal: (*default*) run vhost with the current active worker (default: prefork) don't
+# setup anything special
+# - itk: run vhost with the mpm_itk module (Incompatibility: cannot be used in combination
+# with 'proxy-itk' & 'static-itk' mode)
+# - proxy-itk: run vhost with a dual prefork/itk setup, where prefork just proxies all the
+# requests for the itk setup, that listens only on the loobpack device.
+# (Incompatibility: cannot be used in combination with the itk setup.)
+# - static-itk: run vhost with a dual prefork/itk setup, where prefork serves all the static
+# content and proxies the dynamic calls to the itk setup, that listens only on
+# the loobpack device (Incompatibility: cannot be used in combination with
+# 'itk' mode)
+#
+# run_uid: the uid the vhost should run as with the itk module
+# run_gid: the gid the vhost should run as with the itk module
+#
+# mod_security: Whether we use mod_security or not (will include mod_security module)
+# - false: don't activate mod_security
+# - true: (*default*) activate mod_security
+#
+define apache::vhost::template(
+ $ensure = present,
+ $configuration = {},
+ $path = 'absent',
+ $path_is_webdir = false,
+ $logpath = 'absent',
+ $logmode = 'default',
+ $logprefix = '',
+ $domain = 'absent',
+ $domainalias = 'absent',
+ $server_admin = 'absent',
+ $allow_override = 'None',
+ $dav_db_dir = 'absent',
+ $cgi_binpath = 'absent',
+ $do_includes = false,
+ $options = 'absent',
+ $additional_options = 'absent',
+ $default_charset = 'absent',
+ $php_options = {},
+ $php_settings = {},
+ $run_mode = 'normal',
+ $run_uid = 'absent',
+ $run_gid = 'absent',
+ $template_partial = 'apache/vhosts/static/partial.erb',
+ $template_vars = {},
+ $ssl_mode = false,
+ $mod_security = true,
+ $mod_security_relevantonly = true,
+ $mod_security_rules_to_disable = [],
+ $mod_security_additional_options = 'absent',
+ $use_mod_macro = false,
+ $htpasswd_file = 'absent',
+ $htpasswd_path = 'absent',
+ $ldap_auth = false,
+ $ldap_user = 'any',
+ $passing_extension = 'absent',
+ $gempath = 'absent'
+){
+ $real_path = $path ? {
+ 'absent' => $::operatingsystem ? {
+ openbsd => "/var/www/htdocs/${name}",
+ default => "/var/www/vhosts/${name}"
+ },
+ default => $path
+ }
+
+ if $path_is_webdir {
+ $documentroot = $real_path
+ } else {
+ $documentroot = "${real_path}/www"
+ }
+ $logdir = $logpath ? {
+ 'absent' => "${real_path}/logs",
+ default => $logpath
+ }
+
+ $servername = $domain ? {
+ 'absent' => $name,
+ default => $domain
+ }
+ $serveralias = $domainalias ? {
+ 'absent' => '',
+ 'www' => "www.${servername}",
+ default => $domainalias
+ }
+ if $htpasswd_path == 'absent' {
+ $real_htpasswd_path = "/var/www/htpasswds/${name}"
+ } else {
+ $real_htpasswd_path = $htpasswd_path
+ }
+ case $run_mode {
+ 'proxy-itk': { $logfileprefix = 'proxy' }
+ 'static-itk': { $logfileprefix = 'static' }
+ }
+ case $run_mode {
+ 'fcgid','itk','proxy-itk','static-itk': {
+ case $run_uid {
+ 'absent': { fail("you have to define run_uid for ${name} on ${::fqdn}") }
+ }
+ case $run_gid {
+ 'absent': { fail("you have to define run_gid for ${name} on ${::fqdn}") }
+ }
+ }
+ }
+
+ # dav db dir
+ case $dav_db_dir {
+ 'absent': {
+ $real_dav_db_dir = "/var/www/dav_db_dir/${name}"
+ }
+ default: { $real_dav_db_dir = $dav_db_dir }
+ }
+
+ apache::vhost::file{$name:
+ configuration => $configuration,
+ ensure => $ensure,
+ do_includes => $do_includes,
+ run_mode => $run_mode,
+ ssl_mode => $ssl_mode,
+ logmode => $logmode,
+ mod_security => $mod_security,
+ htpasswd_file => $htpasswd_file,
+ htpasswd_path => $htpasswd_path,
+ use_mod_macro => $use_mod_macro,
+ }
+ if $ensure != 'absent' {
+ Apache::Vhost::File[$name]{
+ content => $run_mode ? {
+ 'proxy-itk' => template('apache/vhosts/itk_plus.erb'),
+ 'static-itk' => template('apache/vhosts/itk_plus.erb'),
+ default => template('apache/vhosts/default.erb'),
+ }
+ }
+ }
+}
+
diff --git a/puppet/modules/apache/manifests/vhost/webdav.pp b/puppet/modules/apache/manifests/vhost/webdav.pp
new file mode 100644
index 00000000..ff9e8abc
--- /dev/null
+++ b/puppet/modules/apache/manifests/vhost/webdav.pp
@@ -0,0 +1,126 @@
+# Webdav vhost: to manage webdav accessible targets
+# run_mode: controls in which mode the vhost should be run, there are different setups
+# possible:
+# - normal: (*default*) run vhost with the current active worker (default: prefork) don't
+# setup anything special
+# - itk: run vhost with the mpm_itk module (Incompatibility: cannot be used in combination
+# with 'proxy-itk' & 'static-itk' mode)
+# - proxy-itk: run vhost with a dual prefork/itk setup, where prefork just proxies all the
+# requests for the itk setup, that listens only on the loobpack device.
+# (Incompatibility: cannot be used in combination with the itk setup.)
+# - static-itk: this mode is not possible and will be rewritten to proxy-itk
+#
+# run_uid: the uid the vhost should run as with the itk module
+# run_gid: the gid the vhost should run as with the itk module
+#
+# mod_security: Whether we use mod_security or not (will include mod_security module)
+# - false: (*default*) don't activate mod_security
+# - true: activate mod_security
+#
+# logmode:
+# - default: Do normal logging to CustomLog and ErrorLog
+# - nologs: Send every logging to /dev/null
+# - anonym: Don't log ips for CustomLog, send ErrorLog to /dev/null
+# - semianonym: Don't log ips for CustomLog, log normal ErrorLog
+#
+define apache::vhost::webdav(
+ $ensure = present,
+ $configuration = {},
+ $domain = 'absent',
+ $domainalias = 'absent',
+ $server_admin = 'absent',
+ $path = 'absent',
+ $owner = root,
+ $group = apache,
+ $manage_webdir = true,
+ $path_is_webdir = false,
+ $logmode = 'default',
+ $logpath = 'absent',
+ $documentroot_owner = apache,
+ $documentroot_group = 0,
+ $documentroot_mode = 0640,
+ $run_mode = 'normal',
+ $run_uid = 'absent',
+ $run_gid = 'absent',
+ $options = 'absent',
+ $additional_options = 'absent',
+ $default_charset = 'absent',
+ $mod_security = false,
+ $mod_security_relevantonly = true,
+ $mod_security_rules_to_disable = [],
+ $mod_security_additional_options = 'absent',
+ $ssl_mode = false,
+ $vhost_mode = 'template',
+ $vhost_source = 'absent',
+ $vhost_destination = 'absent',
+ $htpasswd_file = 'absent',
+ $htpasswd_path = 'absent',
+ $ldap_auth = false,
+ $ldap_user = 'any',
+ $dav_db_dir = 'absent'
+){
+ ::apache::vhost::davdbdir{$name:
+ ensure => $ensure,
+ dav_db_dir => $dav_db_dir,
+ documentroot_owner => $documentroot_owner,
+ documentroot_group => $documentroot_group,
+ documentroot_mode => $documentroot_mode,
+ run_mode => $run_mode,
+ run_uid => $run_uid,
+ }
+
+ if $manage_webdir {
+ # create webdir
+ ::apache::vhost::webdir{$name:
+ ensure => $ensure,
+ path => $path,
+ owner => $owner,
+ group => $group,
+ run_mode => $run_mode,
+ datadir => false,
+ documentroot_owner => $documentroot_owner,
+ documentroot_group => $documentroot_group,
+ documentroot_mode => $documentroot_mode,
+ }
+ }
+
+ if $run_mode == 'static-itk' {
+ notice('static-itk mode is not possible for webdav vhosts, rewriting it to proxy-itk')
+ $real_run_mode = 'proxy-itk'
+ } else {
+ $real_run_mode = $run_mode
+ }
+
+ # create vhost configuration file
+ ::apache::vhost{$name:
+ ensure => $ensure,
+ configuration => $configuration,
+ path => $path,
+ path_is_webdir => $path_is_webdir,
+ logpath => $logpath,
+ logmode => $logmode,
+ template_partial => 'apache/vhosts/webdav/partial.erb',
+ vhost_mode => $vhost_mode,
+ vhost_source => $vhost_source,
+ vhost_destination => $vhost_destination,
+ domain => $domain,
+ domainalias => $domainalias,
+ server_admin => $server_admin,
+ run_mode => $real_run_mode,
+ run_uid => $run_uid,
+ run_gid => $run_gid,
+ options => $options,
+ additional_options => $additional_options,
+ default_charset => $default_charset,
+ ssl_mode => $ssl_mode,
+ htpasswd_file => $htpasswd_file,
+ htpasswd_path => $htpasswd_path,
+ ldap_auth => $ldap_auth,
+ ldap_user => $ldap_user,
+ mod_security => $mod_security,
+ mod_security_relevantonly => $mod_security_relevantonly,
+ mod_security_rules_to_disable => $mod_security_rules_to_disable,
+ mod_security_additional_options => $mod_security_additional_options,
+ }
+}
+
diff --git a/puppet/modules/apache/manifests/vhost/webdir.pp b/puppet/modules/apache/manifests/vhost/webdir.pp
new file mode 100644
index 00000000..e0e25464
--- /dev/null
+++ b/puppet/modules/apache/manifests/vhost/webdir.pp
@@ -0,0 +1,130 @@
+# create webdir
+define apache::vhost::webdir(
+ $ensure = present,
+ $path = 'absent',
+ $owner = root,
+ $group = apache,
+ $mode = 0640,
+ $run_mode = 'normal',
+ $manage_docroot = true,
+ $datadir = true,
+ $documentroot_owner = root,
+ $documentroot_group = apache,
+ $documentroot_mode = 0640,
+ $documentroot_recurse = false
+){
+ $real_path = $path ? {
+ 'absent' => $::operatingsystem ? {
+ openbsd => "/var/www/htdocs/${name}",
+ default => "/var/www/vhosts/${name}"
+ },
+ default => $path
+ }
+
+ if (($run_mode =~ /^(static\-|proxy\-)?itk$/) or $run_mode == 'fcgid') and ($mode == '0640'){
+ $real_mode = 0644
+ } else {
+ $real_mode = $mode
+ }
+
+ $documentroot = "${real_path}/www"
+ $logdir = "${real_path}/logs"
+
+ if $owner == 'apache' {
+ $real_owner = $::operatingsystem ? {
+ openbsd => 'www',
+ debian => 'www-data',
+ default => $owner
+ }
+ } else {
+ $real_owner = $owner
+ }
+ if $group == 'apache' {
+ $real_group = $::operatingsystem ? {
+ openbsd => 'www',
+ debian => 'www-data',
+ default => $group
+ }
+ } else {
+ $real_group = $group
+ }
+
+ if $documentroot_owner == 'apache' {
+ $real_documentroot_owner = $::operatingsystem ? {
+ openbsd => 'www',
+ debian => 'www-data',
+ default => $documentroot_owner
+ }
+ } else {
+ $real_documentroot_owner = $documentroot_owner
+ }
+ if $documentroot_group == 'apache' {
+ $real_documentroot_group = $::operatingsystem ? {
+ openbsd => 'www',
+ debian => 'www-data',
+ default => $documentroot_group
+ }
+ } else {
+ $real_documentroot_group = $documentroot_group
+ }
+ case $ensure {
+ absent: {
+ exec{"cleanup_webdir_${real_path}":
+ command => "rm -rf ${real_path}",
+ onlyif => "test -d ${real_path}",
+ before => File[$real_path],
+ }
+ file{$real_path:
+ ensure => absent,
+ purge => true,
+ recurse => true,
+ force => true;
+ }
+ }
+ default: {
+ file{
+ $real_path:
+ ensure => directory,
+ require => Anchor['apache::basic_dirs::ready'],
+ owner => $real_owner,
+ group => $real_group,
+ mode => $real_mode;
+ $logdir:
+ ensure => directory,
+ before => Service['apache'],
+ owner => $real_documentroot_owner,
+ group => $real_documentroot_group,
+ mode => '0660';
+ "${real_path}/private":
+ ensure => directory,
+ owner => $real_documentroot_owner,
+ group => $real_documentroot_group,
+ mode => '0600';
+ }
+ if $manage_docroot {
+ file{$documentroot:
+ ensure => directory,
+ before => Service['apache'],
+ recurse => $documentroot_recurse,
+ owner => $real_documentroot_owner,
+ group => $real_documentroot_group,
+ mode => $documentroot_mode;
+ }
+ }
+ if $datadir {
+ file{"${real_path}/data":
+ ensure => directory,
+ owner => $real_documentroot_owner,
+ group => $real_documentroot_group,
+ mode => '0640';
+ }
+ }
+ case $::operatingsystem {
+ centos: { include apache::logrotate::centos::vhosts }
+ default: { #nothing
+ }
+ }
+ }
+ }
+}
+
diff --git a/puppet/modules/apache/manifests/webdav.pp b/puppet/modules/apache/manifests/webdav.pp
new file mode 100644
index 00000000..75219c90
--- /dev/null
+++ b/puppet/modules/apache/manifests/webdav.pp
@@ -0,0 +1,8 @@
+# manifests/webdav.pp
+
+class apache::webdav {
+ file{'/var/www/webdavlock':
+ ensure => directory,
+ owner => apache, group => 0, mode => 0700;
+ }
+}
diff --git a/puppet/modules/apache/manifests/worker.pp b/puppet/modules/apache/manifests/worker.pp
new file mode 100644
index 00000000..9a7b3be4
--- /dev/null
+++ b/puppet/modules/apache/manifests/worker.pp
@@ -0,0 +1,5 @@
+class apache::worker inherits apache {
+ case $::operatingsystem {
+ centos: { include ::apache::centos::worker }
+ }
+}
diff --git a/puppet/modules/apache/spec/classes/init_spec.rb b/puppet/modules/apache/spec/classes/init_spec.rb
new file mode 100644
index 00000000..baf26470
--- /dev/null
+++ b/puppet/modules/apache/spec/classes/init_spec.rb
@@ -0,0 +1,43 @@
+require File.expand_path(File.join(File.dirname(__FILE__),'../spec_helper'))
+
+describe 'apache', :type => 'class' do
+ describe 'with standard' do
+ #puppet-rspec bug
+ #it { should compile.with_all_deps }
+
+ it { should contain_class('apache::base') }
+ it { should_not contain_class('apache::status') }
+ it { should_not contain_class('shorewall::rules::http') }
+ it { should_not contain_class('apache::ssl') }
+ context 'on centos' do
+ let(:facts) {
+ {
+ :operatingsystem => 'CentOS',
+ }
+ }
+ it { should contain_class('apache::centos') }
+ end
+ end
+ describe 'with params' do
+ let(:facts) {
+ {
+ :concat_basedir => '/var/lib/puppet/concat'
+ }
+ }
+ let(:params){
+ {
+ :manage_shorewall => true,
+ # there is puppet-librarian bug in using that module
+ #:manage_munin => true,
+ :ssl => true,
+ }
+ }
+ #puppet-rspec bug
+ #it { should compile.with_all_deps }
+
+ it { should contain_class('apache::base') }
+ it { should_not contain_class('apache::status') }
+ it { should contain_class('shorewall::rules::http') }
+ it { should contain_class('apache::ssl') }
+ end
+end
diff --git a/puppet/modules/apache/spec/defines/vhost_file_spec.rb b/puppet/modules/apache/spec/defines/vhost_file_spec.rb
new file mode 100644
index 00000000..ed9ac5e2
--- /dev/null
+++ b/puppet/modules/apache/spec/defines/vhost_file_spec.rb
@@ -0,0 +1,131 @@
+require File.expand_path(File.join(File.dirname(__FILE__),'../spec_helper'))
+
+describe 'apache::vhost::file', :type => 'define' do
+ let(:title){ 'example.com' }
+ let(:facts){
+ {
+ :fqdn => 'apache.example.com',
+ }
+ }
+ let(:pre_condition) {
+ 'include apache'
+ }
+ describe 'with standard' do
+ it { should contain_file('example.com.conf').with(
+ :ensure => 'present',
+ :source => [ "puppet:///modules/site_apache/vhosts.d/apache.example.com/example.com.conf",
+ "puppet:///modules/site_apache/vhosts.d//example.com.conf",
+ "puppet:///modules/site_apache/vhosts.d/./example.com.conf",
+ "puppet:///modules/site_apache/vhosts.d//example.com.conf",
+ "puppet:///modules/site_apache/vhosts.d/example.com.conf",
+ "puppet:///modules/apache/vhosts.d/./example.com.conf",
+ "puppet:///modules/apache/vhosts.d//example.com.conf",
+ "puppet:///modules/apache/vhosts.d/example.com.conf" ],
+ :path => '/etc/apache2/vhosts.d/example.com.conf',
+ :require => 'File[vhosts_dir]',
+ :notify => 'Service[apache]',
+ :owner => 'root',
+ :group => 0,
+ :mode => '0644',
+ )}
+ it { should_not contain_file('/var/www/htpasswds/example.com') }
+ it { should_not contain_class('apache::includes') }
+ it { should_not contain_class('apache::mod_macro') }
+ it { should_not contain_class('apache::noiplog') }
+ it { should_not contain_class('apache::itk::lock') }
+ it { should_not contain_class('mod_security::itk_plus') }
+ it { should_not contain_class('mod_security') }
+ end
+ context 'on centos' do
+ let(:facts){
+ {
+ :fqdn => 'apache.example.com',
+ :operatingsystem => 'CentOS',
+ :operatingsystemmajrelease => '7',
+ }
+ }
+ it { should contain_file('example.com.conf').with(
+ :ensure => 'present',
+ :source => [ "puppet:///modules/site_apache/vhosts.d/apache.example.com/example.com.conf",
+ "puppet:///modules/site_apache/vhosts.d//example.com.conf",
+ "puppet:///modules/site_apache/vhosts.d/CentOS.7/example.com.conf",
+ "puppet:///modules/site_apache/vhosts.d/CentOS/example.com.conf",
+ "puppet:///modules/site_apache/vhosts.d/example.com.conf",
+ "puppet:///modules/apache/vhosts.d/CentOS.7/example.com.conf",
+ "puppet:///modules/apache/vhosts.d/CentOS/example.com.conf",
+ "puppet:///modules/apache/vhosts.d/example.com.conf" ],
+ :path => '/etc/httpd/vhosts.d/example.com.conf',
+ :require => 'File[vhosts_dir]',
+ :notify => 'Service[apache]',
+ :owner => 'root',
+ :group => 0,
+ :mode => '0644',
+ )}
+ it { should_not contain_file('/var/www/htpasswds/example.com') }
+ it { should_not contain_class('apache::includes') }
+ it { should_not contain_class('apache::mod_macro') }
+ it { should_not contain_class('apache::noiplog') }
+ it { should_not contain_class('apache::itk::lock') }
+ it { should_not contain_class('mod_security::itk_plus') }
+ it { should_not contain_class('mod_security') }
+ context 'with params' do
+ let(:params) {
+ {
+ :vhost_destination => '/tmp/a/example.com.conf',
+ :vhost_source => 'modules/my_module/example.com.conf',
+ :htpasswd_file => true,
+ :do_includes => true,
+ :mod_security => true,
+ :use_mod_macro => true,
+ :logmode => 'anonym',
+ }
+ }
+ it { should contain_file('example.com.conf').with(
+ :ensure => 'present',
+ :source => 'puppet:///modules/my_module/example.com.conf',
+ :path => '/tmp/a/example.com.conf',
+ :require => 'File[vhosts_dir]',
+ :notify => 'Service[apache]',
+ :owner => 'root',
+ :group => 0,
+ :mode => '0644',
+ )}
+ it { should contain_file('/var/www/htpasswds/example.com').with(
+ :source => [ "puppet:///modules/site_apache/htpasswds/apache.example.com/example.com",
+ "puppet:///modules/site_apache/htpasswds//example.com",
+ "puppet:///modules/site_apache/htpasswds/example.com" ],
+ :owner => 'root',
+ :group => 0,
+ :mode => '0644',
+ )}
+ it { should contain_class('apache::includes') }
+ it { should contain_class('apache::mod_macro') }
+ it { should contain_class('apache::noiplog') }
+ it { should_not contain_class('apache::itk::lock') }
+ it { should_not contain_class('mod_security::itk_plus') }
+ it { should contain_class('mod_security') }
+ end
+ context 'with content' do
+ let(:params) {
+ {
+ :content => "<VirtualHost *:80>\n Servername example.com\n</VirtualHost>"
+ }
+ }
+ it { should contain_file('example.com.conf').with(
+ :ensure => 'present',
+ :path => '/etc/httpd/vhosts.d/example.com.conf',
+ :require => 'File[vhosts_dir]',
+ :notify => 'Service[apache]',
+ :owner => 'root',
+ :group => 0,
+ :mode => '0644',
+ )}
+ it { should contain_file('example.com.conf').with_content(
+"<VirtualHost *:80>
+ Servername example.com
+</VirtualHost>"
+ )}
+ it { should_not contain_file('/var/www/htpasswds/example.com') }
+ end
+ end
+end
diff --git a/puppet/modules/apache/spec/defines/vhost_php_drupal_spec.rb b/puppet/modules/apache/spec/defines/vhost_php_drupal_spec.rb
new file mode 100644
index 00000000..5256746d
--- /dev/null
+++ b/puppet/modules/apache/spec/defines/vhost_php_drupal_spec.rb
@@ -0,0 +1,187 @@
+require File.expand_path(File.join(File.dirname(__FILE__),'../spec_helper'))
+
+describe 'apache::vhost::php::drupal', :type => 'define' do
+ let(:title){ 'example.com' }
+ let(:facts){
+ {
+ :fqdn => 'apache.example.com',
+ :operatingsystem => 'CentOS',
+ :operatingsystemmajrelease => '7',
+ }
+ }
+ describe 'with standard' do
+ it { should contain_file('/etc/cron.d/drupal_cron_example.com').with(
+ :content => "0 * * * * apache wget -O - -q -t 1 http://example.com/cron.php\n",
+ :owner => 'root',
+ :group => 0,
+ :mode => '0644',
+ )}
+ # only test the differences from the default
+ it { should contain_apache__vhost__php__webapp('example.com').with(
+ :manage_directories => false,
+ :template_partial => 'apache/vhosts/php_drupal/partial.erb',
+ :manage_config => false,
+ :php_settings => {
+ 'magic_quotes_gpc' => 0,
+ 'register_globals' => 0,
+ 'session.auto_start' => 0,
+ 'mbstring.http_input' => 'pass',
+ 'mbstring.http_output' => 'pass',
+ 'mbstring.encoding_translation' => 0,
+ }
+ )}
+ # go deeper in the catalog and test the produced template
+ it { should contain_apache__vhost__file('example.com').with_content(
+"<VirtualHost *:80 >
+
+ Include include.d/defaults.inc
+ ServerName example.com
+ DocumentRoot /var/www/vhosts/example.com/www/
+ DirectoryIndex index.htm index.html index.php
+
+
+ ErrorLog /var/www/vhosts/example.com/logs/error_log
+ CustomLog /var/www/vhosts/example.com/logs/access_log combined
+
+
+
+ <Directory \"/var/www/vhosts/example.com/www/\">
+ AllowOverride None
+
+
+ php_admin_flag engine on
+ php_admin_value error_log /var/www/vhosts/example.com/logs/php_error_log
+ php_admin_value magic_quotes_gpc 0
+ php_admin_value mbstring.encoding_translation 0
+ php_admin_value mbstring.http_input pass
+ php_admin_value mbstring.http_output pass
+ php_admin_value open_basedir /var/www/vhosts/example.com/www:/var/www/vhosts/example.com/data:/var/www/upload_tmp_dir/example.com:/var/www/session.save_path/example.com
+ php_admin_value register_globals 0
+ php_admin_flag safe_mode on
+ php_admin_value session.auto_start 0
+ php_admin_value session.save_path /var/www/session.save_path/example.com
+ php_admin_value upload_tmp_dir /var/www/upload_tmp_dir/example.com
+
+ # Protect files and directories from prying eyes.
+ <FilesMatch \"\\.(engine|inc|info|install|module|profile|po|sh|.*sql|theme|tpl(\\.php)?|xtmpl)$|^(code-style\\.pl|Entries.*|Repository|Root|Tag|Template)$\">
+ Order allow,deny
+ </FilesMatch>
+
+ # Customized error messages.
+ ErrorDocument 404 /index.php
+
+ RewriteEngine on
+ RewriteCond %{REQUEST_FILENAME} !-f
+ RewriteCond %{REQUEST_FILENAME} !-d
+ RewriteRule ^(.*)$ index.php?q=$1 [L,QSA]
+ </Directory>
+ <Directory \"/var/www/vhosts/example.com/www/files/\">
+ SetHandler Drupal_Security_Do_Not_Remove_See_SA_2006_006
+ Options None
+ Options +FollowSymLinks
+ </Directory>
+
+ <IfModule mod_security2.c>
+ SecRuleEngine On
+ SecAuditEngine RelevantOnly
+ SecAuditLogType Concurrent
+ SecAuditLogStorageDir /var/www/vhosts/example.com/logs/
+ SecAuditLog /var/www/vhosts/example.com/logs/mod_security_audit.log
+ SecDebugLog /var/www/vhosts/example.com/logs/mod_security_debug.log
+ </IfModule>
+
+</VirtualHost>
+"
+)}
+ end
+ describe 'with mod_fcgid' do
+ let(:params){
+ {
+ :run_mode => 'fcgid',
+ :run_uid => 'foo',
+ :run_gid => 'bar',
+ }
+ }
+ it { should contain_file('/etc/cron.d/drupal_cron_example.com').with(
+ :content => "0 * * * * apache wget -O - -q -t 1 http://example.com/cron.php\n",
+ :owner => 'root',
+ :group => 0,
+ :mode => '0644',
+ )}
+ # only test variables that are tuned
+ it { should contain_apache__vhost__php__webapp('example.com').with(
+ :run_mode => 'fcgid',
+ :run_uid => 'foo',
+ :run_gid => 'bar',
+ :manage_directories => false,
+ :template_partial => 'apache/vhosts/php_drupal/partial.erb',
+ :manage_config => false,
+ :php_settings => {
+ 'magic_quotes_gpc' => 0,
+ 'register_globals' => 0,
+ 'session.auto_start' => 0,
+ 'mbstring.http_input' => 'pass',
+ 'mbstring.http_output' => 'pass',
+ 'mbstring.encoding_translation' => 0,
+ },
+ )}
+ # go deeper in the catalog and test the produced template
+ it { should contain_apache__vhost__file('example.com').with_content(
+"<VirtualHost *:80 >
+
+ Include include.d/defaults.inc
+ ServerName example.com
+ DocumentRoot /var/www/vhosts/example.com/www/
+ DirectoryIndex index.htm index.html index.php
+
+
+ ErrorLog /var/www/vhosts/example.com/logs/error_log
+ CustomLog /var/www/vhosts/example.com/logs/access_log combined
+
+
+
+ <IfModule mod_fcgid.c>
+ SuexecUserGroup foo bar
+ FcgidMaxRequestsPerProcess 5000
+ FCGIWrapper /var/www/mod_fcgid-starters/example.com/example.com-starter .php
+ AddHandler fcgid-script .php
+ </IfModule>
+
+ <Directory \"/var/www/vhosts/example.com/www/\">
+ AllowOverride None
+ Options +ExecCGI
+
+
+ # Protect files and directories from prying eyes.
+ <FilesMatch \"\\.(engine|inc|info|install|module|profile|po|sh|.*sql|theme|tpl(\\.php)?|xtmpl)$|^(code-style\\.pl|Entries.*|Repository|Root|Tag|Template)$\">
+ Order allow,deny
+ </FilesMatch>
+
+ # Customized error messages.
+ ErrorDocument 404 /index.php
+
+ RewriteEngine on
+ RewriteCond %{REQUEST_FILENAME} !-f
+ RewriteCond %{REQUEST_FILENAME} !-d
+ RewriteRule ^(.*)$ index.php?q=$1 [L,QSA]
+ </Directory>
+ <Directory \"/var/www/vhosts/example.com/www/files/\">
+ SetHandler Drupal_Security_Do_Not_Remove_See_SA_2006_006
+ Options None
+ Options +FollowSymLinks
+ </Directory>
+
+ <IfModule mod_security2.c>
+ SecRuleEngine On
+ SecAuditEngine RelevantOnly
+ SecAuditLogType Concurrent
+ SecAuditLogStorageDir /var/www/vhosts/example.com/logs/
+ SecAuditLog /var/www/vhosts/example.com/logs/mod_security_audit.log
+ SecDebugLog /var/www/vhosts/example.com/logs/mod_security_debug.log
+ </IfModule>
+
+</VirtualHost>
+"
+)}
+ end
+end
diff --git a/puppet/modules/apache/spec/defines/vhost_php_gallery2_spec.rb b/puppet/modules/apache/spec/defines/vhost_php_gallery2_spec.rb
new file mode 100644
index 00000000..9f2325e9
--- /dev/null
+++ b/puppet/modules/apache/spec/defines/vhost_php_gallery2_spec.rb
@@ -0,0 +1,162 @@
+require File.expand_path(File.join(File.dirname(__FILE__),'../spec_helper'))
+
+describe 'apache::vhost::php::gallery2', :type => 'define' do
+ let(:title){ 'example.com' }
+ let(:facts){
+ {
+ :fqdn => 'apache.example.com',
+ :operatingsystem => 'CentOS',
+ :operatingsystemmajrelease => '7',
+ }
+ }
+ describe 'with standard' do
+ # only test the differences from the default
+ it { should contain_apache__vhost__php__webapp('example.com').with(
+ :manage_directories => true,
+ :template_partial => 'apache/vhosts/php_gallery2/partial.erb',
+ :php_settings => {
+ 'safe_mode' => 'Off',
+ 'output_buffering' => 'Off',
+ },
+ :manage_config => true,
+ :config_webwriteable => false,
+ :config_file => 'config.php',
+ )}
+ it { should contain_file('/var/www/vhosts/example.com/data/upload').with(
+ :ensure => 'directory',
+ :owner => 'apache',
+ :group => 0,
+ :mode => '0660',
+ )}
+ it { should contain_file('/var/www/vhosts/example.com/data/gdata').with(
+ :ensure => 'directory',
+ :owner => 'apache',
+ :group => 0,
+ :mode => '0660',
+ )}
+ # go deeper in the catalog and test the produced template
+ it { should contain_apache__vhost__file('example.com').with_content(
+"<VirtualHost *:80 >
+
+ Include include.d/defaults.inc
+ ServerName example.com
+ DocumentRoot /var/www/vhosts/example.com/www/
+ DirectoryIndex index.htm index.html index.php
+
+
+ ErrorLog /var/www/vhosts/example.com/logs/error_log
+ CustomLog /var/www/vhosts/example.com/logs/access_log combined
+
+
+
+ <Directory \"/var/www/vhosts/example.com/www/\">
+ AllowOverride None
+
+ php_admin_flag engine on
+ php_admin_value error_log /var/www/vhosts/example.com/logs/php_error_log
+ php_admin_value open_basedir /var/www/vhosts/example.com/www:/var/www/vhosts/example.com/data:/var/www/upload_tmp_dir/example.com:/var/www/session.save_path/example.com
+ php_admin_flag output_buffering off
+ php_admin_flag safe_mode off
+ php_admin_value session.save_path /var/www/session.save_path/example.com
+ php_admin_value upload_tmp_dir /var/www/upload_tmp_dir/example.com
+
+
+
+ # Always rewrite login's
+ # Source: http://gallery.menalto.com/node/30558
+ RewriteEngine On
+ RewriteCond %{HTTPS} !=on
+ RewriteCond %{HTTP:X-Forwarded-Proto} !=https
+ RewriteCond %{HTTP_COOKIE} ^GALLERYSID= [OR]
+ RewriteCond %{QUERY_STRING} subView=core\\.UserLogin
+ RewriteRule ^ https://%{HTTP_HOST}%{REQUEST_URI} [NE,R,L]
+ </Directory>
+
+ <IfModule mod_security2.c>
+ SecRuleEngine Off
+ SecAuditEngine Off
+ SecAuditLogType Concurrent
+ SecAuditLogStorageDir /var/www/vhosts/example.com/logs/
+ SecAuditLog /var/www/vhosts/example.com/logs/mod_security_audit.log
+ SecDebugLog /var/www/vhosts/example.com/logs/mod_security_debug.log
+ </IfModule>
+
+</VirtualHost>
+"
+)}
+ end
+ describe 'with mod_fcgid' do
+ let(:params){
+ {
+ :run_mode => 'fcgid',
+ :run_uid => 'foo',
+ :run_gid => 'bar',
+ }
+ }
+ # only test variables that are tuned
+ it { should contain_apache__vhost__php__webapp('example.com').with(
+ :run_mode => 'fcgid',
+ :run_uid => 'foo',
+ :run_gid => 'bar',
+ :template_partial => 'apache/vhosts/php_gallery2/partial.erb',
+ :php_settings => {
+ 'safe_mode' => 'Off',
+ 'output_buffering' => 'Off',
+ },
+ :manage_directories => true,
+ :manage_config => true,
+ :config_webwriteable => false,
+ :config_file => 'config.php',
+ )}
+ # go deeper in the catalog and test the produced template
+ it { should contain_apache__vhost__file('example.com').with_content(
+"<VirtualHost *:80 >
+
+ Include include.d/defaults.inc
+ ServerName example.com
+ DocumentRoot /var/www/vhosts/example.com/www/
+ DirectoryIndex index.htm index.html index.php
+
+
+ ErrorLog /var/www/vhosts/example.com/logs/error_log
+ CustomLog /var/www/vhosts/example.com/logs/access_log combined
+
+
+
+ <IfModule mod_fcgid.c>
+ SuexecUserGroup foo bar
+ FcgidMaxRequestsPerProcess 5000
+ FCGIWrapper /var/www/mod_fcgid-starters/example.com/example.com-starter .php
+ AddHandler fcgid-script .php
+ </IfModule>
+
+ <Directory \"/var/www/vhosts/example.com/www/\">
+ AllowOverride None
+ Options +ExecCGI
+
+
+
+ # Always rewrite login's
+ # Source: http://gallery.menalto.com/node/30558
+ RewriteEngine On
+ RewriteCond %{HTTPS} !=on
+ RewriteCond %{HTTP:X-Forwarded-Proto} !=https
+ RewriteCond %{HTTP_COOKIE} ^GALLERYSID= [OR]
+ RewriteCond %{QUERY_STRING} subView=core\\.UserLogin
+ RewriteRule ^ https://%{HTTP_HOST}%{REQUEST_URI} [NE,R,L]
+ </Directory>
+
+ <IfModule mod_security2.c>
+ SecRuleEngine Off
+ SecAuditEngine Off
+ SecAuditLogType Concurrent
+ SecAuditLogStorageDir /var/www/vhosts/example.com/logs/
+ SecAuditLog /var/www/vhosts/example.com/logs/mod_security_audit.log
+ SecDebugLog /var/www/vhosts/example.com/logs/mod_security_debug.log
+ </IfModule>
+
+</VirtualHost>
+"
+)}
+ end
+end
diff --git a/puppet/modules/apache/spec/defines/vhost_php_joomla_spec.rb b/puppet/modules/apache/spec/defines/vhost_php_joomla_spec.rb
new file mode 100644
index 00000000..000154de
--- /dev/null
+++ b/puppet/modules/apache/spec/defines/vhost_php_joomla_spec.rb
@@ -0,0 +1,279 @@
+require File.expand_path(File.join(File.dirname(__FILE__),'../spec_helper'))
+
+describe 'apache::vhost::php::joomla', :type => 'define' do
+ let(:title){ 'example.com' }
+ let(:facts){
+ {
+ :fqdn => 'apache.example.com',
+ :operatingsystem => 'CentOS',
+ :operatingsystemmajrelease => '7',
+ }
+ }
+ describe 'with standard' do
+ it { should contain_class('apache::include::joomla') }
+ # only test the differences from the default
+ it { should contain_apache__vhost__php__webapp('example.com').with(
+ :template_partial => 'apache/vhosts/php_joomla/partial.erb',
+ :php_settings => {
+ 'allow_url_fopen' => 'on',
+ 'allow_url_include' => 'off',
+ },
+ :manage_config => true,
+ :config_webwriteable => false,
+ :config_file => 'configuration.php',
+ :manage_directories => true,
+ :managed_directories => [ "/var/www/vhosts/example.com/www/administrator/backups",
+ "/var/www/vhosts/example.com/www/administrator/components",
+ "/var/www/vhosts/example.com/www/administrator/language",
+ "/var/www/vhosts/example.com/www/administrator/modules",
+ "/var/www/vhosts/example.com/www/administrator/templates",
+ "/var/www/vhosts/example.com/www/components",
+ "/var/www/vhosts/example.com/www/dmdocuments",
+ "/var/www/vhosts/example.com/www/images",
+ "/var/www/vhosts/example.com/www/language",
+ "/var/www/vhosts/example.com/www/media",
+ "/var/www/vhosts/example.com/www/modules",
+ "/var/www/vhosts/example.com/www/plugins",
+ "/var/www/vhosts/example.com/www/templates",
+ "/var/www/vhosts/example.com/www/cache",
+ "/var/www/vhosts/example.com/www/tmp",
+ "/var/www/vhosts/example.com/www/administrator/cache" ],
+ :mod_security_additional_options => "
+ # http://optics.csufresno.edu/~kriehn/fedora/fedora_files/f9/howto/modsecurity.html
+ # Exceptions for Joomla Root Directory
+ <LocationMatch \"^/\">
+ SecRuleRemoveById 950013
+ </LocationMatch>
+
+ # Exceptions for Joomla Administration Panel
+ SecRule REQUEST_FILENAME \"/administrator/index2.php\" \"id:1199400,allow,phase:1,nolog,ctl:ruleEngine=Off\"
+
+ # Exceptions for Joomla Component Expose
+ <LocationMatch \"^/components/com_expose/expose/manager/amfphp/gateway.php\">
+ SecRuleRemoveById 960010
+ </LocationMatch>
+"
+ )}
+ # go deeper in the catalog and test the produced template
+ it { should contain_apache__vhost__file('example.com').with_content(
+"<VirtualHost *:80 >
+
+ Include include.d/defaults.inc
+ ServerName example.com
+ DocumentRoot /var/www/vhosts/example.com/www/
+ DirectoryIndex index.htm index.html index.php
+
+
+ ErrorLog /var/www/vhosts/example.com/logs/error_log
+ CustomLog /var/www/vhosts/example.com/logs/access_log combined
+
+
+
+ <Directory \"/var/www/vhosts/example.com/www/\">
+ AllowOverride None
+
+ php_admin_flag allow_url_fopen on
+ php_admin_flag allow_url_include off
+ php_admin_flag engine on
+ php_admin_value error_log /var/www/vhosts/example.com/logs/php_error_log
+ php_admin_value open_basedir /var/www/vhosts/example.com/www:/var/www/vhosts/example.com/data:/var/www/upload_tmp_dir/example.com:/var/www/session.save_path/example.com
+ php_admin_flag safe_mode on
+ php_admin_value session.save_path /var/www/session.save_path/example.com
+ php_admin_value upload_tmp_dir /var/www/upload_tmp_dir/example.com
+
+
+
+ Include include.d/joomla.inc
+ </Directory>
+
+ <Directory \"/var/www/vhosts/example.com/www/administrator/\">
+ RewriteEngine on
+
+ # Rewrite URLs to https that go for the admin area
+ RewriteCond %{REMOTE_ADDR} !^127\\.[0-9]+\\.[0-9]+\\.[0-9]+$
+ RewriteCond %{HTTPS} !=on
+ RewriteCond %{REQUEST_URI} (.*/administrator/.*)
+ RewriteRule (.*) https://%{HTTP_HOST}%{REQUEST_URI} [R]
+ </Directory>
+
+ # Deny various directories that
+ # shouldn't be webaccessible
+ <Directory \"/var/www/vhosts/example.com/www/tmp/\">
+ Deny From All
+ </Directory>
+ <Directory \"/var/www/vhosts/example.com/www/logs/\">
+ Deny From All
+ </Directory>
+ <Directory \"/var/www/vhosts/example.com/www/cli/\">
+ Deny From All
+ </Directory>
+
+
+ <IfModule mod_security2.c>
+ SecRuleEngine On
+ SecAuditEngine RelevantOnly
+ SecAuditLogType Concurrent
+ SecAuditLogStorageDir /var/www/vhosts/example.com/logs/
+ SecAuditLog /var/www/vhosts/example.com/logs/mod_security_audit.log
+ SecDebugLog /var/www/vhosts/example.com/logs/mod_security_debug.log
+
+
+ # http://optics.csufresno.edu/~kriehn/fedora/fedora_files/f9/howto/modsecurity.html
+ # Exceptions for Joomla Root Directory
+ <LocationMatch \"^/\">
+ SecRuleRemoveById 950013
+ </LocationMatch>
+
+ # Exceptions for Joomla Administration Panel
+ SecRule REQUEST_FILENAME \"/administrator/index2.php\" \"id:1199400,allow,phase:1,nolog,ctl:ruleEngine=Off\"
+
+ # Exceptions for Joomla Component Expose
+ <LocationMatch \"^/components/com_expose/expose/manager/amfphp/gateway.php\">
+ SecRuleRemoveById 960010
+ </LocationMatch>
+
+ </IfModule>
+
+</VirtualHost>
+"
+)}
+ end
+ describe 'with mod_fcgid' do
+ let(:params){
+ {
+ :run_mode => 'fcgid',
+ :run_uid => 'foo',
+ :run_gid => 'bar',
+ }
+ }
+ it { should contain_class('apache::include::joomla') }
+ # only test the differences from the default
+ it { should contain_apache__vhost__php__webapp('example.com').with(
+ :run_mode => 'fcgid',
+ :run_uid => 'foo',
+ :run_gid => 'bar',
+ :template_partial => 'apache/vhosts/php_joomla/partial.erb',
+ :php_settings => {
+ 'allow_url_fopen' => 'on',
+ 'allow_url_include' => 'off',
+ },
+ :manage_config => true,
+ :config_webwriteable => false,
+ :config_file => 'configuration.php',
+ :manage_directories => true,
+ :managed_directories => [ "/var/www/vhosts/example.com/www/administrator/backups",
+ "/var/www/vhosts/example.com/www/administrator/components",
+ "/var/www/vhosts/example.com/www/administrator/language",
+ "/var/www/vhosts/example.com/www/administrator/modules",
+ "/var/www/vhosts/example.com/www/administrator/templates",
+ "/var/www/vhosts/example.com/www/components",
+ "/var/www/vhosts/example.com/www/dmdocuments",
+ "/var/www/vhosts/example.com/www/images",
+ "/var/www/vhosts/example.com/www/language",
+ "/var/www/vhosts/example.com/www/media",
+ "/var/www/vhosts/example.com/www/modules",
+ "/var/www/vhosts/example.com/www/plugins",
+ "/var/www/vhosts/example.com/www/templates",
+ "/var/www/vhosts/example.com/www/cache",
+ "/var/www/vhosts/example.com/www/tmp",
+ "/var/www/vhosts/example.com/www/administrator/cache" ],
+ :mod_security_additional_options => "
+ # http://optics.csufresno.edu/~kriehn/fedora/fedora_files/f9/howto/modsecurity.html
+ # Exceptions for Joomla Root Directory
+ <LocationMatch \"^/\">
+ SecRuleRemoveById 950013
+ </LocationMatch>
+
+ # Exceptions for Joomla Administration Panel
+ SecRule REQUEST_FILENAME \"/administrator/index2.php\" \"id:1199400,allow,phase:1,nolog,ctl:ruleEngine=Off\"
+
+ # Exceptions for Joomla Component Expose
+ <LocationMatch \"^/components/com_expose/expose/manager/amfphp/gateway.php\">
+ SecRuleRemoveById 960010
+ </LocationMatch>
+"
+ )}
+ # go deeper in the catalog and test the produced template
+ it { should contain_apache__vhost__file('example.com').with_content(
+"<VirtualHost *:80 >
+
+ Include include.d/defaults.inc
+ ServerName example.com
+ DocumentRoot /var/www/vhosts/example.com/www/
+ DirectoryIndex index.htm index.html index.php
+
+
+ ErrorLog /var/www/vhosts/example.com/logs/error_log
+ CustomLog /var/www/vhosts/example.com/logs/access_log combined
+
+
+
+ <IfModule mod_fcgid.c>
+ SuexecUserGroup foo bar
+ FcgidMaxRequestsPerProcess 5000
+ FCGIWrapper /var/www/mod_fcgid-starters/example.com/example.com-starter .php
+ AddHandler fcgid-script .php
+ </IfModule>
+
+ <Directory \"/var/www/vhosts/example.com/www/\">
+ AllowOverride None
+ Options +ExecCGI
+
+
+
+ Include include.d/joomla.inc
+ </Directory>
+
+ <Directory \"/var/www/vhosts/example.com/www/administrator/\">
+ RewriteEngine on
+
+ # Rewrite URLs to https that go for the admin area
+ RewriteCond %{REMOTE_ADDR} !^127\\.[0-9]+\\.[0-9]+\\.[0-9]+$
+ RewriteCond %{HTTPS} !=on
+ RewriteCond %{REQUEST_URI} (.*/administrator/.*)
+ RewriteRule (.*) https://%{HTTP_HOST}%{REQUEST_URI} [R]
+ </Directory>
+
+ # Deny various directories that
+ # shouldn't be webaccessible
+ <Directory \"/var/www/vhosts/example.com/www/tmp/\">
+ Deny From All
+ </Directory>
+ <Directory \"/var/www/vhosts/example.com/www/logs/\">
+ Deny From All
+ </Directory>
+ <Directory \"/var/www/vhosts/example.com/www/cli/\">
+ Deny From All
+ </Directory>
+
+
+ <IfModule mod_security2.c>
+ SecRuleEngine On
+ SecAuditEngine RelevantOnly
+ SecAuditLogType Concurrent
+ SecAuditLogStorageDir /var/www/vhosts/example.com/logs/
+ SecAuditLog /var/www/vhosts/example.com/logs/mod_security_audit.log
+ SecDebugLog /var/www/vhosts/example.com/logs/mod_security_debug.log
+
+
+ # http://optics.csufresno.edu/~kriehn/fedora/fedora_files/f9/howto/modsecurity.html
+ # Exceptions for Joomla Root Directory
+ <LocationMatch \"^/\">
+ SecRuleRemoveById 950013
+ </LocationMatch>
+
+ # Exceptions for Joomla Administration Panel
+ SecRule REQUEST_FILENAME \"/administrator/index2.php\" \"id:1199400,allow,phase:1,nolog,ctl:ruleEngine=Off\"
+
+ # Exceptions for Joomla Component Expose
+ <LocationMatch \"^/components/com_expose/expose/manager/amfphp/gateway.php\">
+ SecRuleRemoveById 960010
+ </LocationMatch>
+
+ </IfModule>
+
+</VirtualHost>
+"
+)}
+ end
+end
diff --git a/puppet/modules/apache/spec/defines/vhost_php_standard_spec.rb b/puppet/modules/apache/spec/defines/vhost_php_standard_spec.rb
new file mode 100644
index 00000000..159d4b81
--- /dev/null
+++ b/puppet/modules/apache/spec/defines/vhost_php_standard_spec.rb
@@ -0,0 +1,534 @@
+require File.expand_path(File.join(File.dirname(__FILE__),'../spec_helper'))
+
+describe 'apache::vhost::php::standard', :type => 'define' do
+ let(:title){ 'example.com' }
+ let(:facts){
+ {
+ :fqdn => 'apache.example.com',
+ :operatingsystem => 'CentOS',
+ :operatingsystemmajrelease => '7',
+ }
+ }
+ describe 'with standard' do
+ # only test variables that are tuned
+ it { should contain_apache__vhost__webdir('example.com') }
+ it { should_not contain_class('mod_fcgid') }
+ it { should_not contain_class('php::mod_fcgid') }
+ it { should_not contain_class('apache::include::mod_fcgid') }
+ it { should_not contain_class('php::scl::php54') }
+ it { should_not contain_class('php::scl::php55') }
+ it { should_not contain_class('php::extensions::smarty') }
+ it { should contain_class('php') }
+ it { should_not contain_mod_fcgid__starter('example.com') }
+
+ # only test variables that are tuned
+ it { should contain_apache__vhost__phpdirs('example.com').with(
+ :php_upload_tmp_dir => '/var/www/upload_tmp_dir/example.com',
+ :php_session_save_path => '/var/www/session.save_path/example.com',
+ )}
+ # only test variables that are tuned
+ it { should contain_apache__vhost('example.com').with(
+ :template_partial => 'apache/vhosts/php/partial.erb',
+ :passing_extension => 'php'
+ )}
+
+ it { should have_apache__vhost__php__safe_mode_bin_resource_count(0) }
+ it { should contain_file('/var/www/vhosts/example.com/bin').with(
+ :ensure => 'absent',
+ :recurse => true,
+ :force => true,
+ :purge => true,
+ )}
+ # go deeper in the catalog and test the produced template
+ it { should contain_apache__vhost__file('example.com').with_content(
+"<VirtualHost *:80 >
+
+ Include include.d/defaults.inc
+ ServerName example.com
+ DocumentRoot /var/www/vhosts/example.com/www/
+ DirectoryIndex index.htm index.html index.php
+
+
+ ErrorLog /var/www/vhosts/example.com/logs/error_log
+ CustomLog /var/www/vhosts/example.com/logs/access_log combined
+
+
+
+ <Directory \"/var/www/vhosts/example.com/www/\">
+ AllowOverride None
+
+ php_admin_flag engine on
+ php_admin_value error_log /var/www/vhosts/example.com/logs/php_error_log
+ php_admin_value open_basedir /var/www/vhosts/example.com/www:/var/www/vhosts/example.com/data:/var/www/upload_tmp_dir/example.com:/var/www/session.save_path/example.com
+ php_admin_flag safe_mode on
+ php_admin_value session.save_path /var/www/session.save_path/example.com
+ php_admin_value upload_tmp_dir /var/www/upload_tmp_dir/example.com
+
+
+ </Directory>
+
+ <IfModule mod_security2.c>
+ SecRuleEngine On
+ SecAuditEngine RelevantOnly
+ SecAuditLogType Concurrent
+ SecAuditLogStorageDir /var/www/vhosts/example.com/logs/
+ SecAuditLog /var/www/vhosts/example.com/logs/mod_security_audit.log
+ SecDebugLog /var/www/vhosts/example.com/logs/mod_security_debug.log
+ </IfModule>
+
+</VirtualHost>
+"
+)}
+ end
+ describe 'with standard and params' do
+ let(:params) {
+ {
+ :php_settings => {
+ 'safe_mode' => 'Off',
+ }
+ }
+ }
+ # go deeper in the catalog and test the produced template
+ it { should contain_apache__vhost__file('example.com').with_content(
+"<VirtualHost *:80 >
+
+ Include include.d/defaults.inc
+ ServerName example.com
+ DocumentRoot /var/www/vhosts/example.com/www/
+ DirectoryIndex index.htm index.html index.php
+
+
+ ErrorLog /var/www/vhosts/example.com/logs/error_log
+ CustomLog /var/www/vhosts/example.com/logs/access_log combined
+
+
+
+ <Directory \"/var/www/vhosts/example.com/www/\">
+ AllowOverride None
+
+ php_admin_flag engine on
+ php_admin_value error_log /var/www/vhosts/example.com/logs/php_error_log
+ php_admin_value open_basedir /var/www/vhosts/example.com/www:/var/www/vhosts/example.com/data:/var/www/upload_tmp_dir/example.com:/var/www/session.save_path/example.com
+ php_admin_flag safe_mode off
+ php_admin_value session.save_path /var/www/session.save_path/example.com
+ php_admin_value upload_tmp_dir /var/www/upload_tmp_dir/example.com
+
+
+ </Directory>
+
+ <IfModule mod_security2.c>
+ SecRuleEngine On
+ SecAuditEngine RelevantOnly
+ SecAuditLogType Concurrent
+ SecAuditLogStorageDir /var/www/vhosts/example.com/logs/
+ SecAuditLog /var/www/vhosts/example.com/logs/mod_security_audit.log
+ SecDebugLog /var/www/vhosts/example.com/logs/mod_security_debug.log
+ </IfModule>
+
+</VirtualHost>
+"
+)}
+ end
+ describe 'with mod_fcgid' do
+ let(:params){
+ {
+ :run_mode => 'fcgid',
+ :run_uid => 'foo',
+ :run_gid => 'bar',
+ }
+ }
+ # only test variables that are tuned
+ it { should contain_apache__vhost__webdir('example.com') }
+ it { should contain_class('mod_fcgid') }
+ it { should contain_class('php::mod_fcgid') }
+ it { should contain_class('apache::include::mod_fcgid') }
+ it { should_not contain_class('php::scl::php54') }
+ it { should_not contain_class('php::scl::php55') }
+ it { should_not contain_class('php::extensions::smarty') }
+ it { should contain_mod_fcgid__starter('example.com').with(
+ :tmp_dir => false,
+ :cgi_type => 'php',
+ :cgi_type_options => {
+ "engine" =>"On",
+ "upload_tmp_dir" =>"/var/www/upload_tmp_dir/example.com",
+ "session.save_path" =>"/var/www/session.save_path/example.com",
+ "error_log" =>"/var/www/vhosts/example.com/logs/php_error_log",
+ "safe_mode" =>"On",
+ "safe_mode_gid" =>"On",
+ "safe_mode_exec_dir"=>:undef,
+ "default_charset" =>:undef,
+ "open_basedir" =>"/var/www/vhosts/example.com/www:/var/www/vhosts/example.com/data:/var/www/upload_tmp_dir/example.com:/var/www/session.save_path/example.com"
+ },
+ :owner => 'foo',
+ :group => 'bar',
+ :notify => 'Service[apache]',
+ ) }
+
+ # only test variables that are tuned
+ it { should contain_apache__vhost__phpdirs('example.com').with(
+ :php_upload_tmp_dir => '/var/www/upload_tmp_dir/example.com',
+ :php_session_save_path => '/var/www/session.save_path/example.com',
+ )}
+ # only test variables that are tuned
+ it { should contain_apache__vhost('example.com').with(
+ :template_partial => 'apache/vhosts/php/partial.erb',
+ :passing_extension => 'php'
+ )}
+
+ it { should have_apache__vhost__php__safe_mode_bin_resource_count(0) }
+ it { should contain_file('/var/www/vhosts/example.com/bin').with(
+ :ensure => 'absent',
+ :recurse => true,
+ :force => true,
+ :purge => true,
+ )}
+ # go deeper in the catalog and test the produced template
+ it { should contain_apache__vhost__file('example.com').with_content(
+"<VirtualHost *:80 >
+
+ Include include.d/defaults.inc
+ ServerName example.com
+ DocumentRoot /var/www/vhosts/example.com/www/
+ DirectoryIndex index.htm index.html index.php
+
+
+ ErrorLog /var/www/vhosts/example.com/logs/error_log
+ CustomLog /var/www/vhosts/example.com/logs/access_log combined
+
+
+
+ <IfModule mod_fcgid.c>
+ SuexecUserGroup foo bar
+ FcgidMaxRequestsPerProcess 5000
+ FCGIWrapper /var/www/mod_fcgid-starters/example.com/example.com-starter .php
+ AddHandler fcgid-script .php
+ </IfModule>
+
+ <Directory \"/var/www/vhosts/example.com/www/\">
+ AllowOverride None
+ Options +ExecCGI
+
+
+ </Directory>
+
+ <IfModule mod_security2.c>
+ SecRuleEngine On
+ SecAuditEngine RelevantOnly
+ SecAuditLogType Concurrent
+ SecAuditLogStorageDir /var/www/vhosts/example.com/logs/
+ SecAuditLog /var/www/vhosts/example.com/logs/mod_security_audit.log
+ SecDebugLog /var/www/vhosts/example.com/logs/mod_security_debug.log
+ </IfModule>
+
+</VirtualHost>
+"
+)}
+ end
+ describe 'with mod_fcgid scl 5.4' do
+ let(:pre_condition){ 'include yum::prerequisites' }
+ let(:params){
+ {
+ :run_mode => 'fcgid',
+ :run_uid => 'foo',
+ :run_gid => 'bar',
+ :php_installation => 'scl54',
+ }
+ }
+ # only test variables that are tuned
+ it { should contain_apache__vhost__webdir('example.com') }
+ it { should contain_class('mod_fcgid') }
+ it { should contain_class('php::mod_fcgid') }
+ it { should contain_class('apache::include::mod_fcgid') }
+ it { should contain_class('php::scl::php54') }
+ it { should_not contain_class('php::scl::php55') }
+ it { should_not contain_class('php::extensions::smarty') }
+ it { should contain_mod_fcgid__starter('example.com').with(
+ :tmp_dir => false,
+ :cgi_type => 'php',
+ :cgi_type_options => {
+ "engine" =>"On",
+ "upload_tmp_dir" =>"/var/www/upload_tmp_dir/example.com",
+ "session.save_path" =>"/var/www/session.save_path/example.com",
+ "error_log" =>"/var/www/vhosts/example.com/logs/php_error_log",
+ "safe_mode" =>:undef,
+ "safe_mode_gid" =>:undef,
+ "safe_mode_exec_dir"=>:undef,
+ "default_charset" =>:undef,
+ "open_basedir" =>"/var/www/vhosts/example.com/www:/var/www/vhosts/example.com/data:/var/www/upload_tmp_dir/example.com:/var/www/session.save_path/example.com"
+ },
+ :binary => '/opt/rh/php54/root/usr/bin/php-cgi',
+ :additional_cmds => 'source /opt/rh/php54/enable',
+ :rc => '/opt/rh/php54/root/etc',
+ :owner => 'foo',
+ :group => 'bar',
+ :notify => 'Service[apache]',
+ ) }
+
+ # only test variables that are tuned
+ it { should contain_apache__vhost__phpdirs('example.com').with(
+ :php_upload_tmp_dir => '/var/www/upload_tmp_dir/example.com',
+ :php_session_save_path => '/var/www/session.save_path/example.com',
+ )}
+ # only test variables that are tuned
+ it { should contain_apache__vhost('example.com').with(
+ :template_partial => 'apache/vhosts/php/partial.erb',
+ :passing_extension => 'php'
+ )}
+
+ it { should have_apache__vhost__php__safe_mode_bin_resource_count(0) }
+ it { should contain_file('/var/www/vhosts/example.com/bin').with(
+ :ensure => 'absent',
+ :recurse => true,
+ :force => true,
+ :purge => true,
+ )}
+ # go deeper in the catalog and test the produced template
+ it { should contain_apache__vhost__file('example.com').with_content(
+"<VirtualHost *:80 >
+
+ Include include.d/defaults.inc
+ ServerName example.com
+ DocumentRoot /var/www/vhosts/example.com/www/
+ DirectoryIndex index.htm index.html index.php
+
+
+ ErrorLog /var/www/vhosts/example.com/logs/error_log
+ CustomLog /var/www/vhosts/example.com/logs/access_log combined
+
+
+
+ <IfModule mod_fcgid.c>
+ SuexecUserGroup foo bar
+ FcgidMaxRequestsPerProcess 5000
+ FCGIWrapper /var/www/mod_fcgid-starters/example.com/example.com-starter .php
+ AddHandler fcgid-script .php
+ </IfModule>
+
+ <Directory \"/var/www/vhosts/example.com/www/\">
+ AllowOverride None
+ Options +ExecCGI
+
+
+ </Directory>
+
+ <IfModule mod_security2.c>
+ SecRuleEngine On
+ SecAuditEngine RelevantOnly
+ SecAuditLogType Concurrent
+ SecAuditLogStorageDir /var/www/vhosts/example.com/logs/
+ SecAuditLog /var/www/vhosts/example.com/logs/mod_security_audit.log
+ SecDebugLog /var/www/vhosts/example.com/logs/mod_security_debug.log
+ </IfModule>
+
+</VirtualHost>
+"
+)}
+ end
+ describe 'with mod_fcgid with scl55' do
+ let(:pre_condition){ 'include yum::prerequisites' }
+ let(:params){
+ {
+ :run_mode => 'fcgid',
+ :run_uid => 'foo',
+ :run_gid => 'bar',
+ :php_installation => 'scl55',
+ }
+ }
+ # only test variables that are tuned
+ it { should contain_apache__vhost__webdir('example.com') }
+ it { should contain_class('mod_fcgid') }
+ it { should contain_class('php::mod_fcgid') }
+ it { should contain_class('apache::include::mod_fcgid') }
+ it { should_not contain_class('php::scl::php54') }
+ it { should contain_class('php::scl::php55') }
+ it { should_not contain_class('php::extensions::smarty') }
+ it { should contain_mod_fcgid__starter('example.com').with(
+ :tmp_dir => false,
+ :cgi_type => 'php',
+ :cgi_type_options => {
+ "engine" =>"On",
+ "upload_tmp_dir" =>"/var/www/upload_tmp_dir/example.com",
+ "session.save_path" =>"/var/www/session.save_path/example.com",
+ "error_log" =>"/var/www/vhosts/example.com/logs/php_error_log",
+ "safe_mode" =>:undef,
+ "safe_mode_gid" =>:undef,
+ "safe_mode_exec_dir"=>:undef,
+ "default_charset" =>:undef,
+ "open_basedir" =>"/var/www/vhosts/example.com/www:/var/www/vhosts/example.com/data:/var/www/upload_tmp_dir/example.com:/var/www/session.save_path/example.com"
+ },
+ :binary => '/opt/rh/php55/root/usr/bin/php-cgi',
+ :additional_cmds => 'source /opt/rh/php55/enable',
+ :rc => '/opt/rh/php55/root/etc',
+ :owner => 'foo',
+ :group => 'bar',
+ :notify => 'Service[apache]',
+ ) }
+
+ # only test variables that are tuned
+ it { should contain_apache__vhost__phpdirs('example.com').with(
+ :php_upload_tmp_dir => '/var/www/upload_tmp_dir/example.com',
+ :php_session_save_path => '/var/www/session.save_path/example.com',
+ )}
+ # only test variables that are tuned
+ it { should contain_apache__vhost('example.com').with(
+ :template_partial => 'apache/vhosts/php/partial.erb',
+ :passing_extension => 'php'
+ )}
+
+ it { should have_apache__vhost__php__safe_mode_bin_resource_count(0) }
+ it { should contain_file('/var/www/vhosts/example.com/bin').with(
+ :ensure => 'absent',
+ :recurse => true,
+ :force => true,
+ :purge => true,
+ )}
+ # go deeper in the catalog and test the produced template
+ it { should contain_apache__vhost__file('example.com').with_content(
+"<VirtualHost *:80 >
+
+ Include include.d/defaults.inc
+ ServerName example.com
+ DocumentRoot /var/www/vhosts/example.com/www/
+ DirectoryIndex index.htm index.html index.php
+
+
+ ErrorLog /var/www/vhosts/example.com/logs/error_log
+ CustomLog /var/www/vhosts/example.com/logs/access_log combined
+
+
+
+ <IfModule mod_fcgid.c>
+ SuexecUserGroup foo bar
+ FcgidMaxRequestsPerProcess 5000
+ FCGIWrapper /var/www/mod_fcgid-starters/example.com/example.com-starter .php
+ AddHandler fcgid-script .php
+ </IfModule>
+
+ <Directory \"/var/www/vhosts/example.com/www/\">
+ AllowOverride None
+ Options +ExecCGI
+
+
+ </Directory>
+
+ <IfModule mod_security2.c>
+ SecRuleEngine On
+ SecAuditEngine RelevantOnly
+ SecAuditLogType Concurrent
+ SecAuditLogStorageDir /var/www/vhosts/example.com/logs/
+ SecAuditLog /var/www/vhosts/example.com/logs/mod_security_audit.log
+ SecDebugLog /var/www/vhosts/example.com/logs/mod_security_debug.log
+ </IfModule>
+
+</VirtualHost>
+"
+)}
+ end
+ describe 'with mod_fcgid and params' do
+ let(:params){
+ {
+ :run_mode => 'fcgid',
+ :run_uid => 'foo',
+ :run_gid => 'bar',
+ :logmode => 'nologs',
+ :php_options => {
+ 'smarty' => true,
+ 'pear' => true,
+ 'safe_mode_exec_bins' => ['/usr/bin/cat'],
+ }
+ }
+ }
+ # only test variables that are tuned
+ it { should contain_apache__vhost__webdir('example.com') }
+ it { should contain_class('mod_fcgid') }
+ it { should contain_class('php::mod_fcgid') }
+ it { should contain_class('apache::include::mod_fcgid') }
+ it { should_not contain_class('php::scl::php54') }
+ it { should_not contain_class('php::scl::php55') }
+ it { should contain_class('php::extensions::smarty') }
+ it { should contain_mod_fcgid__starter('example.com').with(
+ :tmp_dir => false,
+ :cgi_type => 'php',
+ :cgi_type_options => {
+ "engine" =>"On",
+ "upload_tmp_dir" =>"/var/www/upload_tmp_dir/example.com",
+ "session.save_path" =>"/var/www/session.save_path/example.com",
+ "error_log" =>:undef,
+ "safe_mode" =>"On",
+ "safe_mode_gid" =>"On",
+ "safe_mode_exec_dir"=>"/var/www/vhosts/example.com/bin",
+ "default_charset" =>:undef,
+ "open_basedir" =>"/usr/share/php/Smarty/:/usr/share/pear/:/var/www/vhosts/example.com/www:/var/www/vhosts/example.com/data:/var/www/upload_tmp_dir/example.com:/var/www/session.save_path/example.com"
+ },
+ :owner => 'foo',
+ :group => 'bar',
+ :notify => 'Service[apache]',
+ ) }
+
+ # only test variables that are tuned
+ it { should contain_apache__vhost__phpdirs('example.com').with(
+ :php_upload_tmp_dir => '/var/www/upload_tmp_dir/example.com',
+ :php_session_save_path => '/var/www/session.save_path/example.com',
+ )}
+ # only test variables that are tuned
+ it { should contain_apache__vhost('example.com').with(
+ :template_partial => 'apache/vhosts/php/partial.erb',
+ :passing_extension => 'php'
+ )}
+
+ it { should have_apache__vhost__php__safe_mode_bin_resource_count(1) }
+ it { should contain_apache__vhost__php__safe_mode_bin('example.com@/usr/bin/cat').with(
+ :ensure => 'present',
+ :path => '/var/www/vhosts/example.com/bin',
+ )}
+ it { should contain_file('/var/www/vhosts/example.com/bin').with(
+ :ensure => 'directory',
+ :owner => 'apache',
+ :group => '0',
+ :recurse => true,
+ :force => true,
+ :purge => true,
+ )}
+ # go deeper in the catalog and test the produced template
+ it { should contain_apache__vhost__file('example.com').with_content(
+"<VirtualHost *:80 >
+
+ Include include.d/defaults.inc
+ ServerName example.com
+ DocumentRoot /var/www/vhosts/example.com/www/
+ DirectoryIndex index.htm index.html index.php
+
+
+ ErrorLog /dev/null
+ CustomLog /dev/null
+
+
+
+ <IfModule mod_fcgid.c>
+ SuexecUserGroup foo bar
+ FcgidMaxRequestsPerProcess 5000
+ FCGIWrapper /var/www/mod_fcgid-starters/example.com/example.com-starter .php
+ AddHandler fcgid-script .php
+ </IfModule>
+
+ <Directory \"/var/www/vhosts/example.com/www/\">
+ AllowOverride None
+ Options +ExecCGI
+
+
+ </Directory>
+
+ <IfModule mod_security2.c>
+ SecRuleEngine On
+ SecAuditEngine RelevantOnly
+ SecAuditLogType Concurrent
+ SecAuditLogStorageDir /var/www/vhosts/example.com/logs/
+ SecAuditLog /var/www/vhosts/example.com/logs/mod_security_audit.log
+ SecDebugLog /var/www/vhosts/example.com/logs/mod_security_debug.log
+ </IfModule>
+
+</VirtualHost>
+"
+)}
+ end
+end
diff --git a/puppet/modules/apache/spec/defines/vhost_php_webapp_spec.rb b/puppet/modules/apache/spec/defines/vhost_php_webapp_spec.rb
new file mode 100644
index 00000000..bdebb14c
--- /dev/null
+++ b/puppet/modules/apache/spec/defines/vhost_php_webapp_spec.rb
@@ -0,0 +1,261 @@
+require File.expand_path(File.join(File.dirname(__FILE__),'../spec_helper'))
+
+describe 'apache::vhost::php::webapp', :type => 'define' do
+ let(:title){ 'example.com' }
+ let(:facts){
+ {
+ :fqdn => 'apache.example.com',
+ :operatingsystem => 'CentOS',
+ :operatingsystemmajrelease => '7',
+ }
+ }
+ describe 'with standard' do
+ let(:params){
+ {
+ :manage_config => false,
+ :template_partial => 'apache/vhosts/php/partial.erb',
+ }
+ }
+ # only test variables that are tuned
+ it { should have_apache__file__rw_resource_count(0) }
+ it { should_not contain_apache__vhost__file__documentrootfile('configurationfile_example.com') }
+ it { should contain_apache__vhost__php__standard('example.com') }
+ # go deeper in the catalog and test the produced template
+ it { should contain_apache__vhost__file('example.com').with_content(
+"<VirtualHost *:80 >
+
+ Include include.d/defaults.inc
+ ServerName example.com
+ DocumentRoot /var/www/vhosts/example.com/www/
+ DirectoryIndex index.htm index.html index.php
+
+
+ ErrorLog /var/www/vhosts/example.com/logs/error_log
+ CustomLog /var/www/vhosts/example.com/logs/access_log combined
+
+
+
+ <Directory \"/var/www/vhosts/example.com/www/\">
+ AllowOverride None
+
+ php_admin_flag engine on
+ php_admin_value error_log /var/www/vhosts/example.com/logs/php_error_log
+ php_admin_value open_basedir /var/www/vhosts/example.com/www:/var/www/vhosts/example.com/data:/var/www/upload_tmp_dir/example.com:/var/www/session.save_path/example.com
+ php_admin_flag safe_mode on
+ php_admin_value session.save_path /var/www/session.save_path/example.com
+ php_admin_value upload_tmp_dir /var/www/upload_tmp_dir/example.com
+
+
+ </Directory>
+
+ <IfModule mod_security2.c>
+ SecRuleEngine On
+ SecAuditEngine RelevantOnly
+ SecAuditLogType Concurrent
+ SecAuditLogStorageDir /var/www/vhosts/example.com/logs/
+ SecAuditLog /var/www/vhosts/example.com/logs/mod_security_audit.log
+ SecDebugLog /var/www/vhosts/example.com/logs/mod_security_debug.log
+ </IfModule>
+
+</VirtualHost>
+"
+)}
+ end
+ describe 'with mod_fcgid' do
+ let(:params){
+ {
+ :manage_config => false,
+ :template_partial => 'apache/vhosts/php/partial.erb',
+ :run_mode => 'fcgid',
+ :run_uid => 'foo',
+ :run_gid => 'bar',
+ }
+ }
+ # only test variables that are tuned
+ it { should have_apache__file__rw_resource_count(0) }
+ it { should_not contain_apache__vhost__file__documentrootfile('configurationfile_example.com') }
+ it { should contain_apache__vhost__php__standard('example.com') }
+ # go deeper in the catalog and test the produced template
+ it { should contain_apache__vhost__file('example.com').with_content(
+"<VirtualHost *:80 >
+
+ Include include.d/defaults.inc
+ ServerName example.com
+ DocumentRoot /var/www/vhosts/example.com/www/
+ DirectoryIndex index.htm index.html index.php
+
+
+ ErrorLog /var/www/vhosts/example.com/logs/error_log
+ CustomLog /var/www/vhosts/example.com/logs/access_log combined
+
+
+
+ <IfModule mod_fcgid.c>
+ SuexecUserGroup foo bar
+ FcgidMaxRequestsPerProcess 5000
+ FCGIWrapper /var/www/mod_fcgid-starters/example.com/example.com-starter .php
+ AddHandler fcgid-script .php
+ </IfModule>
+
+ <Directory \"/var/www/vhosts/example.com/www/\">
+ AllowOverride None
+ Options +ExecCGI
+
+
+ </Directory>
+
+ <IfModule mod_security2.c>
+ SecRuleEngine On
+ SecAuditEngine RelevantOnly
+ SecAuditLogType Concurrent
+ SecAuditLogStorageDir /var/www/vhosts/example.com/logs/
+ SecAuditLog /var/www/vhosts/example.com/logs/mod_security_audit.log
+ SecDebugLog /var/www/vhosts/example.com/logs/mod_security_debug.log
+ </IfModule>
+
+</VirtualHost>
+"
+)}
+ end
+ context 'with config file and directories' do
+ describe 'with standard' do
+ let(:params){
+ {
+ :manage_config => true,
+ :managed_directories => [ '/tmp/a', '/tmp/b' ],
+ :config_file => 'config.php',
+ :template_partial => 'apache/vhosts/php/partial.erb',
+ }
+ }
+ # only test variables that are tuned
+ it { should have_apache__file__rw_resource_count(2) }
+ it { should contain_apache__file__rw('/tmp/a').with(
+ :owner => 'apache',
+ :group => 0,
+ )}
+ it { should contain_apache__file__rw('/tmp/b').with(
+ :owner => 'apache',
+ :group => 0,
+ )}
+ it { should contain_apache__vhost__file__documentrootfile('configurationfile_example.com').with(
+ :documentroot => '/var/www/vhosts/example.com/www',
+ :filename => 'config.php',
+ :thedomain => 'example.com',
+ :owner => 'apache',
+ :group => 0,
+ :mode => '0440',
+ ) }
+ it { should contain_apache__vhost__php__standard('example.com') }
+ # go deeper in the catalog and test the produced template
+ it { should contain_apache__vhost__file('example.com').with_content(
+"<VirtualHost *:80 >
+
+ Include include.d/defaults.inc
+ ServerName example.com
+ DocumentRoot /var/www/vhosts/example.com/www/
+ DirectoryIndex index.htm index.html index.php
+
+
+ ErrorLog /var/www/vhosts/example.com/logs/error_log
+ CustomLog /var/www/vhosts/example.com/logs/access_log combined
+
+
+
+ <Directory \"/var/www/vhosts/example.com/www/\">
+ AllowOverride None
+
+ php_admin_flag engine on
+ php_admin_value error_log /var/www/vhosts/example.com/logs/php_error_log
+ php_admin_value open_basedir /var/www/vhosts/example.com/www:/var/www/vhosts/example.com/data:/var/www/upload_tmp_dir/example.com:/var/www/session.save_path/example.com
+ php_admin_flag safe_mode on
+ php_admin_value session.save_path /var/www/session.save_path/example.com
+ php_admin_value upload_tmp_dir /var/www/upload_tmp_dir/example.com
+
+
+ </Directory>
+
+ <IfModule mod_security2.c>
+ SecRuleEngine On
+ SecAuditEngine RelevantOnly
+ SecAuditLogType Concurrent
+ SecAuditLogStorageDir /var/www/vhosts/example.com/logs/
+ SecAuditLog /var/www/vhosts/example.com/logs/mod_security_audit.log
+ SecDebugLog /var/www/vhosts/example.com/logs/mod_security_debug.log
+ </IfModule>
+
+</VirtualHost>
+"
+)}
+ end
+ describe 'with standard but writable' do
+ let(:params){
+ {
+ :manage_config => true,
+ :config_webwriteable => true,
+ :managed_directories => [ '/tmp/a', '/tmp/b' ],
+ :config_file => 'config.php',
+ :template_partial => 'apache/vhosts/php/partial.erb',
+ }
+ }
+ # only test variables that are tuned
+ it { should have_apache__file__rw_resource_count(2) }
+ it { should contain_apache__file__rw('/tmp/a').with(
+ :owner => 'apache',
+ :group => 0,
+ )}
+ it { should contain_apache__file__rw('/tmp/b').with(
+ :owner => 'apache',
+ :group => 0,
+ )}
+ it { should contain_apache__vhost__file__documentrootfile('configurationfile_example.com').with(
+ :documentroot => '/var/www/vhosts/example.com/www',
+ :filename => 'config.php',
+ :thedomain => 'example.com',
+ :owner => 'apache',
+ :group => 0,
+ :mode => '0660',
+ ) }
+ it { should contain_apache__vhost__php__standard('example.com') }
+ # go deeper in the catalog and test the produced template
+ it { should contain_apache__vhost__file('example.com').with_content(
+"<VirtualHost *:80 >
+
+ Include include.d/defaults.inc
+ ServerName example.com
+ DocumentRoot /var/www/vhosts/example.com/www/
+ DirectoryIndex index.htm index.html index.php
+
+
+ ErrorLog /var/www/vhosts/example.com/logs/error_log
+ CustomLog /var/www/vhosts/example.com/logs/access_log combined
+
+
+
+ <Directory \"/var/www/vhosts/example.com/www/\">
+ AllowOverride None
+
+ php_admin_flag engine on
+ php_admin_value error_log /var/www/vhosts/example.com/logs/php_error_log
+ php_admin_value open_basedir /var/www/vhosts/example.com/www:/var/www/vhosts/example.com/data:/var/www/upload_tmp_dir/example.com:/var/www/session.save_path/example.com
+ php_admin_flag safe_mode on
+ php_admin_value session.save_path /var/www/session.save_path/example.com
+ php_admin_value upload_tmp_dir /var/www/upload_tmp_dir/example.com
+
+
+ </Directory>
+
+ <IfModule mod_security2.c>
+ SecRuleEngine On
+ SecAuditEngine RelevantOnly
+ SecAuditLogType Concurrent
+ SecAuditLogStorageDir /var/www/vhosts/example.com/logs/
+ SecAuditLog /var/www/vhosts/example.com/logs/mod_security_audit.log
+ SecDebugLog /var/www/vhosts/example.com/logs/mod_security_debug.log
+ </IfModule>
+
+</VirtualHost>
+"
+)}
+ end
+ end
+end
diff --git a/puppet/modules/apache/spec/defines/vhost_php_wordpress_spec.rb b/puppet/modules/apache/spec/defines/vhost_php_wordpress_spec.rb
new file mode 100644
index 00000000..203f9690
--- /dev/null
+++ b/puppet/modules/apache/spec/defines/vhost_php_wordpress_spec.rb
@@ -0,0 +1,171 @@
+require File.expand_path(File.join(File.dirname(__FILE__),'../spec_helper'))
+
+describe 'apache::vhost::php::wordpress', :type => 'define' do
+ let(:title){ 'example.com' }
+ let(:facts){
+ {
+ :fqdn => 'apache.example.com',
+ :operatingsystem => 'CentOS',
+ :operatingsystemmajrelease => '7',
+ }
+ }
+ describe 'with standard' do
+ # only test the differences from the default
+ it { should contain_apache__vhost__php__webapp('example.com').with(
+ :mod_security_rules_to_disable => ["960010", "950018"],
+ :manage_directories => true,
+ :managed_directories => '/var/www/vhosts/example.com/www/wp-content',
+ :template_partial => 'apache/vhosts/php_wordpress/partial.erb',
+ :manage_config => true,
+ :config_webwriteable => false,
+ :config_file => 'wp-config.php',
+ )}
+ # go deeper in the catalog and test the produced template
+ it { should contain_apache__vhost__file('example.com').with_content(
+"<VirtualHost *:80 >
+
+ Include include.d/defaults.inc
+ ServerName example.com
+ DocumentRoot /var/www/vhosts/example.com/www/
+ DirectoryIndex index.htm index.html index.php
+
+
+ ErrorLog /var/www/vhosts/example.com/logs/error_log
+ CustomLog /var/www/vhosts/example.com/logs/access_log combined
+
+
+
+ <Directory \"/var/www/vhosts/example.com/www/\">
+ AllowOverride FileInfo
+
+ php_admin_flag engine on
+ php_admin_value error_log /var/www/vhosts/example.com/logs/php_error_log
+ php_admin_value open_basedir /var/www/vhosts/example.com/www:/var/www/vhosts/example.com/data:/var/www/upload_tmp_dir/example.com:/var/www/session.save_path/example.com
+ php_admin_flag safe_mode on
+ php_admin_value session.save_path /var/www/session.save_path/example.com
+ php_admin_value upload_tmp_dir /var/www/upload_tmp_dir/example.com
+
+
+ </Directory>
+
+
+ # fixes: http://git.zx2c4.com/w3-total-fail/tree/w3-total-fail.sh
+ <Directory \"/var/www/vhosts/example.com/www/wp-content/w3tc/dbcache\">
+ Deny From All
+ </Directory>
+
+ # simple wp-login brute force protection
+ # http://www.frameloss.org/2013/04/26/even-easier-brute-force-login-protection-for-wordpress/
+ RewriteEngine On
+ RewriteCond %{HTTP_COOKIE} !359422a82c97336dc082622faf72013a8e857bfd
+ RewriteRule ^/wp-login.php /wordpress-login-576a63fdc98202e7c7283713f2ddfee334bf13ee.php [R,L]
+ <Location /wordpress-login-576a63fdc98202e7c7283713f2ddfee334bf13ee.php>
+ CookieTracking on
+ CookieExpires 30
+ CookieName 359422a82c97336dc082622faf72013a8e857bfd
+ </Location>
+ RewriteRule ^/wordpress-login-576a63fdc98202e7c7283713f2ddfee334bf13ee.php /wp-login.php [NE]
+
+
+ <IfModule mod_security2.c>
+ SecRuleEngine On
+ SecAuditEngine RelevantOnly
+ SecAuditLogType Concurrent
+ SecAuditLogStorageDir /var/www/vhosts/example.com/logs/
+ SecAuditLog /var/www/vhosts/example.com/logs/mod_security_audit.log
+ SecDebugLog /var/www/vhosts/example.com/logs/mod_security_debug.log
+
+ SecRuleRemoveById \"960010\"
+ SecRuleRemoveById \"950018\"
+ </IfModule>
+
+</VirtualHost>
+"
+)}
+ end
+ describe 'with mod_fcgid' do
+ let(:params){
+ {
+ :run_mode => 'fcgid',
+ :run_uid => 'foo',
+ :run_gid => 'bar',
+ }
+ }
+ # only test variables that are tuned
+ it { should contain_apache__vhost__php__webapp('example.com').with(
+ :run_mode => 'fcgid',
+ :run_uid => 'foo',
+ :run_gid => 'bar',
+ :template_partial => 'apache/vhosts/php_wordpress/partial.erb',
+ :mod_security_rules_to_disable => ["960010", "950018"],
+ :manage_directories => true,
+ :managed_directories => '/var/www/vhosts/example.com/www/wp-content',
+ :manage_config => true,
+ :config_webwriteable => false,
+ :config_file => 'wp-config.php',
+ )}
+ # go deeper in the catalog and test the produced template
+ it { should contain_apache__vhost__file('example.com').with_content(
+"<VirtualHost *:80 >
+
+ Include include.d/defaults.inc
+ ServerName example.com
+ DocumentRoot /var/www/vhosts/example.com/www/
+ DirectoryIndex index.htm index.html index.php
+
+
+ ErrorLog /var/www/vhosts/example.com/logs/error_log
+ CustomLog /var/www/vhosts/example.com/logs/access_log combined
+
+
+
+ <IfModule mod_fcgid.c>
+ SuexecUserGroup foo bar
+ FcgidMaxRequestsPerProcess 5000
+ FCGIWrapper /var/www/mod_fcgid-starters/example.com/example.com-starter .php
+ AddHandler fcgid-script .php
+ </IfModule>
+
+ <Directory \"/var/www/vhosts/example.com/www/\">
+ AllowOverride FileInfo
+ Options +ExecCGI
+
+
+ </Directory>
+
+
+ # fixes: http://git.zx2c4.com/w3-total-fail/tree/w3-total-fail.sh
+ <Directory \"/var/www/vhosts/example.com/www/wp-content/w3tc/dbcache\">
+ Deny From All
+ </Directory>
+
+ # simple wp-login brute force protection
+ # http://www.frameloss.org/2013/04/26/even-easier-brute-force-login-protection-for-wordpress/
+ RewriteEngine On
+ RewriteCond %{HTTP_COOKIE} !359422a82c97336dc082622faf72013a8e857bfd
+ RewriteRule ^/wp-login.php /wordpress-login-576a63fdc98202e7c7283713f2ddfee334bf13ee.php [R,L]
+ <Location /wordpress-login-576a63fdc98202e7c7283713f2ddfee334bf13ee.php>
+ CookieTracking on
+ CookieExpires 30
+ CookieName 359422a82c97336dc082622faf72013a8e857bfd
+ </Location>
+ RewriteRule ^/wordpress-login-576a63fdc98202e7c7283713f2ddfee334bf13ee.php /wp-login.php [NE]
+
+
+ <IfModule mod_security2.c>
+ SecRuleEngine On
+ SecAuditEngine RelevantOnly
+ SecAuditLogType Concurrent
+ SecAuditLogStorageDir /var/www/vhosts/example.com/logs/
+ SecAuditLog /var/www/vhosts/example.com/logs/mod_security_audit.log
+ SecDebugLog /var/www/vhosts/example.com/logs/mod_security_debug.log
+
+ SecRuleRemoveById \"960010\"
+ SecRuleRemoveById \"950018\"
+ </IfModule>
+
+</VirtualHost>
+"
+)}
+ end
+end
diff --git a/puppet/modules/apache/spec/defines/vhost_spec.rb b/puppet/modules/apache/spec/defines/vhost_spec.rb
new file mode 100644
index 00000000..051ad0d4
--- /dev/null
+++ b/puppet/modules/apache/spec/defines/vhost_spec.rb
@@ -0,0 +1,202 @@
+require File.expand_path(File.join(File.dirname(__FILE__),'../spec_helper'))
+
+describe 'apache::vhost', :type => 'define' do
+ let(:title){ 'example.com' }
+ let(:facts){
+ {
+ :fqdn => 'apache.example.com',
+ :operatingsystem => 'CentOS',
+ :operatingsystemmajrelease => '7',
+ }
+ }
+ let(:pre_condition) {
+ 'include apache'
+ }
+ describe 'with standard' do
+ it { should contain_apache__vhost__template('example.com').with(
+ :ensure => 'present',
+ :do_includes => false,
+ :run_mode => 'normal',
+ :ssl_mode => false,
+ :logmode => 'default',
+ :mod_security => true,
+ :htpasswd_file => 'absent',
+ :htpasswd_path => 'absent',
+ :use_mod_macro => false,
+ )}
+ # go deeper in the catalog and the test the produced content from the template
+ it { should contain_apache__vhost__file('example.com').with_content(
+"<VirtualHost *:80 >
+
+ Include include.d/defaults.inc
+ ServerName example.com
+ DocumentRoot /var/www/vhosts/example.com/www/
+
+
+ ErrorLog /var/www/vhosts/example.com/logs/error_log
+ CustomLog /var/www/vhosts/example.com/logs/access_log combined
+
+
+
+ <Directory \"/var/www/vhosts/example.com/www/\">
+ AllowOverride None
+
+
+ </Directory>
+
+ <IfModule mod_security2.c>
+ SecRuleEngine On
+ SecAuditEngine RelevantOnly
+ SecAuditLogType Concurrent
+ SecAuditLogStorageDir /var/www/vhosts/example.com/logs/
+ SecAuditLog /var/www/vhosts/example.com/logs/mod_security_audit.log
+ SecDebugLog /var/www/vhosts/example.com/logs/mod_security_debug.log
+ </IfModule>
+
+</VirtualHost>
+"
+)}
+ end
+ describe 'with params' do
+ let(:params){
+ {
+ :do_includes => true,
+ :ssl_mode => true,
+ :logmode => 'anonym',
+ :mod_security => false,
+ :htpasswd_file => true,
+ }
+ }
+ it { should contain_apache__vhost__template('example.com').with(
+ :ensure => 'present',
+ :path => 'absent',
+ :path_is_webdir => false,
+ :logpath => 'absent',
+ :logmode => 'anonym',
+ :logprefix => '',
+ :domain => 'absent',
+ :domainalias => 'absent',
+ :server_admin => 'absent',
+ :allow_override => 'None',
+ :do_includes => true,
+ :options => 'absent',
+ :additional_options => 'absent',
+ :default_charset => 'absent',
+ :php_settings => {},
+ :php_options => {},
+ :run_mode => 'normal',
+ :run_uid => 'absent',
+ :run_gid => 'absent',
+ :template_partial => 'apache/vhosts/static/partial.erb',
+ :ssl_mode => true,
+ :htpasswd_file => true,
+ :htpasswd_path => 'absent',
+ :ldap_auth => false,
+ :ldap_user => 'any',
+ :mod_security => false,
+ :mod_security_relevantonly => true,
+ :mod_security_rules_to_disable => [],
+ :mod_security_additional_options => 'absent',
+ :use_mod_macro => false,
+ :passing_extension => 'absent',
+ :gempath => 'absent',
+ )}
+ # go deeper in the catalog and the test the produced content from the template
+ it { should contain_apache__vhost__file('example.com').with_content(
+"<VirtualHost *:80 >
+
+ Include include.d/defaults.inc
+ ServerName example.com
+ DocumentRoot /var/www/vhosts/example.com/www/
+
+
+ ErrorLog /dev/null
+ CustomLog /var/www/vhosts/example.com/logs/access_log noip
+
+
+
+ <Directory \"/var/www/vhosts/example.com/www/\">
+ AllowOverride None
+ Options +Includes
+ AuthType Basic
+ AuthName \"Access fuer example.com\"
+ AuthUserFile /var/www/htpasswds/example.com
+ require valid-user
+
+ </Directory>
+
+ <IfModule mod_security2.c>
+ SecRuleEngine Off
+ SecAuditEngine Off
+ SecAuditLogType Concurrent
+ SecAuditLogStorageDir /var/www/vhosts/example.com/logs/
+ SecAuditLog /var/www/vhosts/example.com/logs/mod_security_audit.log
+ SecDebugLog /var/www/vhosts/example.com/logs/mod_security_debug.log
+ </IfModule>
+
+</VirtualHost>
+<VirtualHost *:443 >
+
+ Include include.d/defaults.inc
+ Include include.d/ssl_defaults.inc
+ ServerName example.com
+ DocumentRoot /var/www/vhosts/example.com/www/
+
+
+ ErrorLog /dev/null
+ CustomLog /var/www/vhosts/example.com/logs/access_log noip
+
+
+
+ <Directory \"/var/www/vhosts/example.com/www/\">
+ AllowOverride None
+ Options +Includes
+ AuthType Basic
+ AuthName \"Access fuer example.com\"
+ AuthUserFile /var/www/htpasswds/example.com
+ require valid-user
+
+ </Directory>
+
+ <IfModule mod_security2.c>
+ SecRuleEngine Off
+ SecAuditEngine Off
+ SecAuditLogType Concurrent
+ SecAuditLogStorageDir /var/www/vhosts/example.com/logs/
+ SecAuditLog /var/www/vhosts/example.com/logs/mod_security_audit.log
+ SecDebugLog /var/www/vhosts/example.com/logs/mod_security_debug.log
+ </IfModule>
+
+</VirtualHost>
+"
+)}
+ end
+ describe 'with params II' do
+ let(:params){
+ {
+ :vhost_mode => 'file',
+ }
+ }
+ it { should_not contain_apache__vhost__template('example.com') }
+ it { should contain_apache__vhost__file('example.com').with(
+ :ensure => 'present',
+ :vhost_source => 'absent',
+ :vhost_destination => 'absent',
+ :do_includes => false,
+ :run_mode => 'normal',
+ :mod_security => true,
+ :htpasswd_file => 'absent',
+ :htpasswd_path => 'absent',
+ :use_mod_macro => false,
+ )}
+ end
+ describe 'with wrong vhost_mode' do
+ let(:params){
+ {
+ :vhost_mode => 'foo',
+ }
+ }
+ it { expect { should compile }.to raise_error(Puppet::Error, /No such vhost_mode: foo defined for example.com\./)
+ }
+ end
+end
diff --git a/puppet/modules/apache/spec/defines/vhost_static_spec.rb b/puppet/modules/apache/spec/defines/vhost_static_spec.rb
new file mode 100644
index 00000000..37891bb5
--- /dev/null
+++ b/puppet/modules/apache/spec/defines/vhost_static_spec.rb
@@ -0,0 +1,54 @@
+require File.expand_path(File.join(File.dirname(__FILE__),'../spec_helper'))
+
+describe 'apache::vhost::static', :type => 'define' do
+ let(:title){ 'example.com' }
+ let(:facts){
+ {
+ :fqdn => 'apache.example.com',
+ :operatingsystem => 'CentOS',
+ :operatingsystemmajrelease => '7',
+ }
+ }
+ let(:pre_condition) {
+ 'include apache'
+ }
+ describe 'with standard' do
+ # only test the relevant options
+ it { should contain_apache__vhost__webdir('example.com').with(
+ :datadir => false,
+ )}
+ it { should contain_apache__vhost('example.com') }
+ # go deeper in the catalog and test the produced template
+ it { should contain_apache__vhost__file('example.com').with_content(
+"<VirtualHost *:80 >
+
+ Include include.d/defaults.inc
+ ServerName example.com
+ DocumentRoot /var/www/vhosts/example.com/www/
+
+
+ ErrorLog /var/www/vhosts/example.com/logs/error_log
+ CustomLog /var/www/vhosts/example.com/logs/access_log combined
+
+
+
+ <Directory \"/var/www/vhosts/example.com/www/\">
+ AllowOverride None
+
+
+ </Directory>
+
+ <IfModule mod_security2.c>
+ SecRuleEngine Off
+ SecAuditEngine Off
+ SecAuditLogType Concurrent
+ SecAuditLogStorageDir /var/www/vhosts/example.com/logs/
+ SecAuditLog /var/www/vhosts/example.com/logs/mod_security_audit.log
+ SecDebugLog /var/www/vhosts/example.com/logs/mod_security_debug.log
+ </IfModule>
+
+</VirtualHost>
+"
+)}
+ end
+end
diff --git a/puppet/modules/apache/spec/defines/vhost_template_spec.rb b/puppet/modules/apache/spec/defines/vhost_template_spec.rb
new file mode 100644
index 00000000..96fb9ac3
--- /dev/null
+++ b/puppet/modules/apache/spec/defines/vhost_template_spec.rb
@@ -0,0 +1,297 @@
+require File.expand_path(File.join(File.dirname(__FILE__),'../spec_helper'))
+
+describe 'apache::vhost::template', :type => 'define' do
+ let(:title){ 'example.com' }
+ let(:facts){
+ {
+ :fqdn => 'apache.example.com',
+ :operatingsystem => 'CentOS',
+ :operatingsystemmajrelease => '7',
+ }
+ }
+ let(:pre_condition) {
+ 'include apache'
+ }
+ describe 'with standard' do
+ it { should contain_apache__vhost__file('example.com').with(
+ :ensure => 'present',
+ :do_includes => false,
+ :run_mode => 'normal',
+ :ssl_mode => false,
+ :logmode => 'default',
+ :mod_security => true,
+ :htpasswd_file => 'absent',
+ :htpasswd_path => 'absent',
+ :use_mod_macro => false,
+ )}
+ it { should contain_apache__vhost__file('example.com').with_content(
+"<VirtualHost *:80 >
+
+ Include include.d/defaults.inc
+ ServerName example.com
+ DocumentRoot /var/www/vhosts/example.com/www/
+
+
+ ErrorLog /var/www/vhosts/example.com/logs/error_log
+ CustomLog /var/www/vhosts/example.com/logs/access_log combined
+
+
+
+ <Directory \"/var/www/vhosts/example.com/www/\">
+ AllowOverride None
+
+
+ </Directory>
+
+ <IfModule mod_security2.c>
+ SecRuleEngine On
+ SecAuditEngine RelevantOnly
+ SecAuditLogType Concurrent
+ SecAuditLogStorageDir /var/www/vhosts/example.com/logs/
+ SecAuditLog /var/www/vhosts/example.com/logs/mod_security_audit.log
+ SecDebugLog /var/www/vhosts/example.com/logs/mod_security_debug.log
+ </IfModule>
+
+</VirtualHost>
+"
+)}
+ end
+ describe 'with params' do
+ let(:params){
+ {
+ :do_includes => true,
+ :ssl_mode => true,
+ :logmode => 'anonym',
+ :mod_security => false,
+ :htpasswd_file => true,
+ }
+ }
+ it { should contain_apache__vhost__file('example.com').with(
+ :ensure => 'present',
+ :do_includes => true,
+ :run_mode => 'normal',
+ :ssl_mode => true,
+ :logmode => 'anonym',
+ :mod_security => false,
+ :htpasswd_file => true,
+ :htpasswd_path => 'absent',
+ :use_mod_macro => false,
+ )}
+ it { should contain_apache__vhost__file('example.com').with_content(
+"<VirtualHost *:80 >
+
+ Include include.d/defaults.inc
+ ServerName example.com
+ DocumentRoot /var/www/vhosts/example.com/www/
+
+
+ ErrorLog /dev/null
+ CustomLog /var/www/vhosts/example.com/logs/access_log noip
+
+
+
+ <Directory \"/var/www/vhosts/example.com/www/\">
+ AllowOverride None
+ Options +Includes
+ AuthType Basic
+ AuthName \"Access fuer example.com\"
+ AuthUserFile /var/www/htpasswds/example.com
+ require valid-user
+
+ </Directory>
+
+ <IfModule mod_security2.c>
+ SecRuleEngine Off
+ SecAuditEngine Off
+ SecAuditLogType Concurrent
+ SecAuditLogStorageDir /var/www/vhosts/example.com/logs/
+ SecAuditLog /var/www/vhosts/example.com/logs/mod_security_audit.log
+ SecDebugLog /var/www/vhosts/example.com/logs/mod_security_debug.log
+ </IfModule>
+
+</VirtualHost>
+<VirtualHost *:443 >
+
+ Include include.d/defaults.inc
+ Include include.d/ssl_defaults.inc
+ ServerName example.com
+ DocumentRoot /var/www/vhosts/example.com/www/
+
+
+ ErrorLog /dev/null
+ CustomLog /var/www/vhosts/example.com/logs/access_log noip
+
+
+
+ <Directory \"/var/www/vhosts/example.com/www/\">
+ AllowOverride None
+ Options +Includes
+ AuthType Basic
+ AuthName \"Access fuer example.com\"
+ AuthUserFile /var/www/htpasswds/example.com
+ require valid-user
+
+ </Directory>
+
+ <IfModule mod_security2.c>
+ SecRuleEngine Off
+ SecAuditEngine Off
+ SecAuditLogType Concurrent
+ SecAuditLogStorageDir /var/www/vhosts/example.com/logs/
+ SecAuditLog /var/www/vhosts/example.com/logs/mod_security_audit.log
+ SecDebugLog /var/www/vhosts/example.com/logs/mod_security_debug.log
+ </IfModule>
+
+</VirtualHost>
+"
+)}
+ end
+ describe 'with params II' do
+ let(:params){
+ {
+ :do_includes => true,
+ :ssl_mode => 'force',
+ :logmode => 'semianonym',
+ :mod_security => false,
+ :htpasswd_file => true,
+ }
+ }
+ it { should contain_apache__vhost__file('example.com').with(
+ :ensure => 'present',
+ :do_includes => true,
+ :run_mode => 'normal',
+ :ssl_mode => 'force',
+ :logmode => 'semianonym',
+ :mod_security => false,
+ :htpasswd_file => true,
+ :htpasswd_path => 'absent',
+ :use_mod_macro => false,
+ )}
+ it { should contain_apache__vhost__file('example.com').with_content(
+"<VirtualHost *:80 >
+
+ Include include.d/defaults.inc
+ ServerName example.com
+ DocumentRoot /var/www/vhosts/example.com/www/
+
+
+ ErrorLog /var/www/vhosts/example.com/logs/error_log
+ CustomLog /var/www/vhosts/example.com/logs/access_log noip
+
+
+
+ RewriteEngine On
+ RewriteCond %{HTTPS} !=on
+ RewriteCond %{HTTP:X-Forwarded-Proto} !=https
+ RewriteRule (.*) https://%{SERVER_NAME}$1 [R=permanent,L]
+ <Directory \"/var/www/vhosts/example.com/www/\">
+ AllowOverride None
+ Options +Includes
+ AuthType Basic
+ AuthName \"Access fuer example.com\"
+ AuthUserFile /var/www/htpasswds/example.com
+ require valid-user
+
+ </Directory>
+
+ <IfModule mod_security2.c>
+ SecRuleEngine Off
+ SecAuditEngine Off
+ SecAuditLogType Concurrent
+ SecAuditLogStorageDir /var/www/vhosts/example.com/logs/
+ SecAuditLog /var/www/vhosts/example.com/logs/mod_security_audit.log
+ SecDebugLog /var/www/vhosts/example.com/logs/mod_security_debug.log
+ </IfModule>
+
+</VirtualHost>
+<VirtualHost *:443 >
+
+ Include include.d/defaults.inc
+ Include include.d/ssl_defaults.inc
+ ServerName example.com
+ DocumentRoot /var/www/vhosts/example.com/www/
+
+
+ ErrorLog /var/www/vhosts/example.com/logs/error_log
+ CustomLog /var/www/vhosts/example.com/logs/access_log noip
+
+
+
+ <Directory \"/var/www/vhosts/example.com/www/\">
+ AllowOverride None
+ Options +Includes
+ AuthType Basic
+ AuthName \"Access fuer example.com\"
+ AuthUserFile /var/www/htpasswds/example.com
+ require valid-user
+
+ </Directory>
+
+ <IfModule mod_security2.c>
+ SecRuleEngine Off
+ SecAuditEngine Off
+ SecAuditLogType Concurrent
+ SecAuditLogStorageDir /var/www/vhosts/example.com/logs/
+ SecAuditLog /var/www/vhosts/example.com/logs/mod_security_audit.log
+ SecDebugLog /var/www/vhosts/example.com/logs/mod_security_debug.log
+ </IfModule>
+
+</VirtualHost>
+"
+)}
+ end
+ describe 'with params III' do
+ let(:params){
+ {
+ :do_includes => false,
+ :ssl_mode => 'only',
+ :logmode => 'nologs',
+ :mod_security => true,
+ :htpasswd_file => 'absent',
+ }
+ }
+ it { should contain_apache__vhost__file('example.com').with(
+ :ensure => 'present',
+ :do_includes => false,
+ :run_mode => 'normal',
+ :ssl_mode => 'only',
+ :logmode => 'nologs',
+ :mod_security => true,
+ :htpasswd_file => 'absent',
+ :htpasswd_path => 'absent',
+ :use_mod_macro => false,
+ )}
+ it { should contain_apache__vhost__file('example.com').with_content(
+"<VirtualHost *:443 >
+
+ Include include.d/defaults.inc
+ Include include.d/ssl_defaults.inc
+ ServerName example.com
+ DocumentRoot /var/www/vhosts/example.com/www/
+
+
+ ErrorLog /dev/null
+ CustomLog /dev/null
+
+
+
+ <Directory \"/var/www/vhosts/example.com/www/\">
+ AllowOverride None
+
+
+ </Directory>
+
+ <IfModule mod_security2.c>
+ SecRuleEngine On
+ SecAuditEngine RelevantOnly
+ SecAuditLogType Concurrent
+ SecAuditLogStorageDir /var/www/vhosts/example.com/logs/
+ SecAuditLog /var/www/vhosts/example.com/logs/mod_security_audit.log
+ SecDebugLog /var/www/vhosts/example.com/logs/mod_security_debug.log
+ </IfModule>
+
+</VirtualHost>
+"
+)}
+ end
+end
diff --git a/puppet/modules/apache/spec/functions/guess_apache_version.rb b/puppet/modules/apache/spec/functions/guess_apache_version.rb
new file mode 100644
index 00000000..b57a7a0f
--- /dev/null
+++ b/puppet/modules/apache/spec/functions/guess_apache_version.rb
@@ -0,0 +1,50 @@
+require File.expand_path(File.join(File.dirname(__FILE__),'../spec_helper'))
+
+describe 'guess_apache_version function' do
+
+ #let(:scope) { PuppetlabsSpec::PuppetInternals.scope }
+
+ it "should exist" do
+ expect(Puppet::Parser::Functions.function("guess_apache_version")).to eq("function_guess_apache_version")
+ end
+
+ context 'on debian 7.8' do
+ let(:facts) do
+ {
+ :operatingsystem => 'Debian',
+ :operatingsystemrelease => '7.8'
+ }
+ end
+ it "should return 2.2" do
+ result = scope.function_guess_apache_version([])
+ expect(result).to(eq('2.2'))
+ end
+ end
+
+ context 'on debian 8.0' do
+ let(:facts) do
+ {
+ :operatingsystem => 'Debian',
+ :operatingsystemrelease => '8.0'
+ }
+ end
+ it "should return 2.4" do
+ result = scope.function_guess_apache_version([])
+ expect(result).to(eq('2.4'))
+ end
+ end
+
+ context 'on ubuntu 15.10' do
+ let(:facts) do
+ {
+ :operatingsystem => 'Ubuntu',
+ :operatingsystemrelease => '15.10'
+ }
+ end
+ it "should return 2.4" do
+ result = scope.function_guess_apache_version([])
+ expect(result).to(eq('2.4'))
+ end
+ end
+
+end
diff --git a/puppet/modules/apache/spec/spec_helper.rb b/puppet/modules/apache/spec/spec_helper.rb
new file mode 100644
index 00000000..381f9720
--- /dev/null
+++ b/puppet/modules/apache/spec/spec_helper.rb
@@ -0,0 +1,13 @@
+require 'puppetlabs_spec_helper/module_spec_helper'
+require 'rake'
+
+fixture_path = File.expand_path(File.join(__FILE__, '..', 'fixtures'))
+
+RSpec.configure do |c|
+ c.module_path = File.join(fixture_path, 'modules')
+ c.manifest_dir = File.join(fixture_path, 'manifests')
+ c.pattern = FileList[c.pattern].exclude(/^spec\/fixtures/)
+end
+
+Puppet::Util::Log.level = :warning
+Puppet::Util::Log.newdestination(:console)
diff --git a/puppet/modules/apache/templates/default/default_index.erb b/puppet/modules/apache/templates/default/default_index.erb
new file mode 100644
index 00000000..b35ecd91
--- /dev/null
+++ b/puppet/modules/apache/templates/default/default_index.erb
@@ -0,0 +1,13 @@
+<?xml version="1.0" encoding="UTF-8"?><!DOCTYPE html PUBLIC "-//W3C//DTD XHTML 1.1//EN" "http://www.w3.org/TR/xhtml11/DTD/xhtml11.dtd">
+<html xmlns="http://www.w3.org/1999/xhtml" xml:lang="en">
+<head>
+ <title><%= scope.lookupvar('::hostname') %></title>
+ <meta http-equiv="content-type" content="text/html; charset=utf-8" />
+</head>
+<body>
+ <h1> No page @ this location</h1>
+ <p>
+ <small><em><%= scope.lookupvar('::hostname') %></em></small>
+ </p>
+</body>
+</html>
diff --git a/puppet/modules/apache/templates/include.d/ssl_defaults.inc.erb b/puppet/modules/apache/templates/include.d/ssl_defaults.inc.erb
new file mode 100644
index 00000000..77f8e77a
--- /dev/null
+++ b/puppet/modules/apache/templates/include.d/ssl_defaults.inc.erb
@@ -0,0 +1,78 @@
+# SSL Engine Switch:
+# Enable/Disable SSL for this virtual host.
+SSLEngine on
+
+# SSL Protocol support:
+# List the enable protocol levels with which clients will be able to
+# connect. Disable SSLv2 access by default:
+SSLProtocol All -SSLv2 -SSLv3
+
+# SSL Cipher Suite:
+# List the ciphers that the client is permitted to negotiate.
+# See the mod_ssl documentation for a complete list.
+SSLCipherSuite "<%= scope.lookupvar('apache::ssl_cipher_suite') %>"
+
+SSLHonorCipherOrder on
+
+# SSL Engine Options:
+# Set various options for the SSL engine.
+# o FakeBasicAuth:
+# Translate the client X.509 into a Basic Authorisation. This means that
+# the standard Auth/DBMAuth methods can be used for access control. The
+# user name is the `one line' version of the client's X.509 certificate.
+# Note that no password is obtained from the user. Every entry in the user
+# file needs this password: `xxj31ZMTZzkVA'.
+# o ExportCertData:
+# This exports two additional environment variables: SSL_CLIENT_CERT and
+# SSL_SERVER_CERT. These contain the PEM-encoded certificates of the
+# server (always existing) and the client (only existing when client
+# authentication is used). This can be used to import the certificates
+# into CGI scripts.
+# o StdEnvVars:
+# This exports the standard SSL/TLS related `SSL_*' environment variables.
+# Per default this exportation is switched off for performance reasons,
+# because the extraction step is an expensive operation and is usually
+# useless for serving static content. So one usually enables the
+# exportation for CGI and SSI requests only.
+# o StrictRequire:
+# This denies access when "SSLRequireSSL" or "SSLRequire" applied even
+# under a "Satisfy any" situation, i.e. when it applies access is denied
+# and no other module can change it.
+# o OptRenegotiate:
+# This enables optimized SSL connection renegotiation handling when SSL
+# directives are used in per-directory context.
+#SSLOptions +FakeBasicAuth +ExportCertData +StrictRequire
+<Files ~ "\.(cgi|shtml|phtml|php3?)$">
+ SSLOptions +StdEnvVars
+</Files>
+<Directory "/var/www/cgi-bin">
+ SSLOptions +StdEnvVars
+</Directory>
+
+# SSL Protocol Adjustments:
+# The safe and default but still SSL/TLS standard compliant shutdown
+# approach is that mod_ssl sends the close notify alert but doesn't wait for
+# the close notify alert from client. When you need a different shutdown
+# approach you can use one of the following variables:
+# o ssl-unclean-shutdown:
+# This forces an unclean shutdown when the connection is closed, i.e. no
+# SSL close notify alert is send or allowed to received. This violates
+# the SSL/TLS standard but is needed for some brain-dead browsers. Use
+# this when you receive I/O errors because of the standard approach where
+# mod_ssl sends the close notify alert.
+# o ssl-accurate-shutdown:
+# This forces an accurate shutdown when the connection is closed, i.e. a
+# SSL close notify alert is send and mod_ssl waits for the close notify
+# alert of the client. This is 100% SSL/TLS standard compliant, but in
+# practice often causes hanging connections with brain-dead browsers. Use
+# this only for browsers where you know that their SSL implementation
+# works correctly.
+# Notice: Most problems of broken clients are also related to the HTTP
+# keep-alive facility, so you usually additionally want to disable
+# keep-alive for those clients, too. Use variable "nokeepalive" for this.
+# Similarly, one has to force some clients to use HTTP/1.0 to workaround
+# their broken HTTP/1.1 implementation. Use variables "downgrade-1.0" and
+# "force-response-1.0" for this.
+SetEnvIf User-Agent ".*MSIE.*" \
+ nokeepalive ssl-unclean-shutdown \
+ downgrade-1.0 force-response-1.0
diff --git a/puppet/modules/apache/templates/itk_plus/CentOS/00-listen-ssl.conf.erb b/puppet/modules/apache/templates/itk_plus/CentOS/00-listen-ssl.conf.erb
new file mode 100644
index 00000000..83f7beeb
--- /dev/null
+++ b/puppet/modules/apache/templates/itk_plus/CentOS/00-listen-ssl.conf.erb
@@ -0,0 +1,6 @@
+<IfDefine HttpdLocal>
+Listen 127.0.0.1:443
+</IfDefine>
+<IfDefine !HttpdLocal>
+Listen <%= scope.lookupvar('::ipaddress') %>:443
+</IfDefine>
diff --git a/puppet/modules/apache/templates/itk_plus/CentOS/00-listen.conf.erb b/puppet/modules/apache/templates/itk_plus/CentOS/00-listen.conf.erb
new file mode 100644
index 00000000..30b20466
--- /dev/null
+++ b/puppet/modules/apache/templates/itk_plus/CentOS/00-listen.conf.erb
@@ -0,0 +1,8 @@
+<IfDefine HttpdLocal>
+Listen 127.0.0.1:80
+PidFile run/httpdlocal.pid
+</IfDefine>
+<IfDefine !HttpdLocal>
+Listen <%= scope.lookupvar('::ipaddress') %>:80
+PidFile run/httpd.pid
+</IfDefine>
diff --git a/puppet/modules/apache/templates/vhosts/0-default_ssl.conf.erb b/puppet/modules/apache/templates/vhosts/0-default_ssl.conf.erb
new file mode 100644
index 00000000..86e4979f
--- /dev/null
+++ b/puppet/modules/apache/templates/vhosts/0-default_ssl.conf.erb
@@ -0,0 +1,21 @@
+############################################################
+### This file is managed by PUPPET! ####
+### Only modify in repo or you will loose the changes! ####
+############################################################
+
+<VirtualHost *:443>
+ Include include.d/defaults.inc
+ Include include.d/ssl_defaults.inc
+ DocumentRoot /var/www/html
+
+ # Use separate log files for the SSL virtual host; note that LogLevel
+ # is not inherited from httpd.conf.
+ ErrorLog logs/ssl_error_log
+ TransferLog logs/ssl_access_log
+ LogLevel warn
+
+<%= scope.function_templatewlv(['apache/vhosts/partials/ssl.erb',
+ {'configuration' => {}}]) %>
+</VirtualHost>
+
+# vim: ts=4 filetype=apache
diff --git a/puppet/modules/apache/templates/vhosts/default.erb b/puppet/modules/apache/templates/vhosts/default.erb
new file mode 100644
index 00000000..1ef8023d
--- /dev/null
+++ b/puppet/modules/apache/templates/vhosts/default.erb
@@ -0,0 +1,44 @@
+<%
+vhost_parts = case @ssl_mode
+ when 'only' then [:ssl]
+ when false,'false' then [:normal]
+ else [:normal,:ssl]
+end
+vhost_parts.each do |vhost_part| -%>
+<VirtualHost *:<%= vhost_part == :ssl ? '443' : '80' %> >
+
+<%= scope.function_templatewlv(['apache/vhosts/partials/header_default.erb',
+ {'vhost_part' => vhost_part,
+ 'configuration' => @configuration,}]) %>
+
+<%= scope.function_template(['apache/vhosts/partials/logs.erb']) %>
+
+<% if @run_mode.to_s =~ /(proxy\-|static\-)?itk/ -%>
+ <IfModule mpm_itk_module>
+ AssignUserId <%= "#{@run_uid} #{@run_gid}" %>
+ </IfModule>
+
+<% elsif @run_mode.to_s == 'fcgid' -%>
+ <IfModule mod_fcgid.c>
+ SuexecUserGroup <%= "#{@run_uid} #{@run_gid}" %>
+ FcgidMaxRequestsPerProcess 5000
+ FCGIWrapper /var/www/mod_fcgid-starters/<%= @name %>/<%= @name %>-starter .<%= @passing_extension %>
+ AddHandler fcgid-script .<%= @passing_extension %>
+ </IfModule>
+
+<% end -%>
+<% if @ssl_mode == 'force' && vhost_part == :normal -%>
+ RewriteEngine On
+ RewriteCond %{HTTPS} !=on
+ RewriteCond %{HTTP:X-Forwarded-Proto} !=https
+ RewriteRule (.*) https://%{SERVER_NAME}$1 [R=permanent,L]
+<% end -%>
+<%= scope.function_templatewlv([@template_partial, {'vhost_part' => vhost_part } ]) %>
+<% unless @template_partial == 'apache/vhosts/itk_plus/partial.erb' -%>
+<%= scope.function_template(['apache/vhosts/partials/mod_security.erb']) %>
+<% end -%>
+<% unless @additional_options.to_s == 'absent' -%>
+ <%= @additional_options %>
+<% end -%>
+</VirtualHost>
+<% end -%>
diff --git a/puppet/modules/apache/templates/vhosts/gitweb/partial.erb b/puppet/modules/apache/templates/vhosts/gitweb/partial.erb
new file mode 100644
index 00000000..a8475f60
--- /dev/null
+++ b/puppet/modules/apache/templates/vhosts/gitweb/partial.erb
@@ -0,0 +1,16 @@
+ SetEnv GITWEB_CONFIG <%= @gitweb_config %>
+ DirectoryIndex gitweb.cgi
+ <Directory "<%= @documentroot %>/">
+<% if @options.to_s != 'absent' || @do_includes.to_s == 'true'-%>
+ Options <% unless @options.to_s == 'absent' -%><%= @options %><% end -%><% if @do_includes.to_s == 'true' && !@options.include?('+Includes') -%> +Includes<% end -%><% unless @options.include?('+ExecCGI') -%> +ExecCGI<% end -%>
+<% end -%>
+ AddHandler cgi-script .cgi
+ <Files gitweb.cgi>
+ Options ExecCGI FollowSymLinks
+ SetHandler cgi-script
+ </Files>
+ RewriteEngine on
+ RewriteRule ^[a-zA-Z0-9_-]+.git/?(\?.)?$ /gitweb.cgi%{REQUESTURI} [L,PT]
+
+<%= scope.function_template(['apache/vhosts/partials/authentication.erb']) %>
+ </Directory>
diff --git a/puppet/modules/apache/templates/vhosts/itk_plus.erb b/puppet/modules/apache/templates/vhosts/itk_plus.erb
new file mode 100644
index 00000000..b5461968
--- /dev/null
+++ b/puppet/modules/apache/templates/vhosts/itk_plus.erb
@@ -0,0 +1,6 @@
+<IfDefine HttpdLocal>
+<%= scope.function_template(['apache/vhost/default.erb']) %>
+</IfDefine>
+<IfDefine !HttpdLocal>
+<%= scope.function_templatewlv(['apache/vhost/default.erb', {'template_partial' => 'apache/vhosts/itk_plus/partial.erb' }]) %>
+</IfDefine>
diff --git a/puppet/modules/apache/templates/vhosts/itk_plus/partial.erb b/puppet/modules/apache/templates/vhosts/itk_plus/partial.erb
new file mode 100644
index 00000000..df045433
--- /dev/null
+++ b/puppet/modules/apache/templates/vhosts/itk_plus/partial.erb
@@ -0,0 +1,31 @@
+
+ ProxyPreserveHost On
+ ProxyRequests off
+<% if vhost_part == :ssl -%>
+ SSLProxyEngine On
+<% if run_mode.to_s == 'static-itk' -%>
+ ProxyPassMatch ^/(.*\.<%= @passing_extension %>/?.*)$ https://127.0.0.1/$1
+<% else -%>
+ ProxyPass / https://127.0.0.1/
+<% end -%>
+ ProxyPassReverse / https://127.0.0.1/
+<% else -%>
+<% if run_mode.to_s == 'static-itk' -%>
+ ProxyPassMatch ^/(.*\.<%= @passing_extension %>/?.*)$ http://127.0.0.1/$1
+<% else -%>
+ ProxyPass / http://127.0.0.1/
+<% end -%>
+ ProxyPassReverse / http://127.0.0.1/
+<% end -%>
+
+<% if @run_mode.to_s == 'static-itk' && (@ssl_mode.to_s != 'force' || vhost_part == :ssl) -%>
+ <Directory "<%= @documentroot %>/">
+ AllowOverride <%= @allow_override %>
+<% if @options.to_s != 'absent' || @do_includes.to_s == 'true' -%>
+ Options <% unless @options.to_s == 'absent' -%><%= @options %><% end -%><% if @do_includes.to_s == 'true' && !@options.include?('+Includes') -%> +Includes<% end -%>
+<% end -%>
+<%= scope.function_template(['apache/vhosts/partials/authentication.erb']) %>
+ </Directory>
+<% end -%>
+
+
diff --git a/puppet/modules/apache/templates/vhosts/partials/authentication.erb b/puppet/modules/apache/templates/vhosts/partials/authentication.erb
new file mode 100644
index 00000000..ed832210
--- /dev/null
+++ b/puppet/modules/apache/templates/vhosts/partials/authentication.erb
@@ -0,0 +1,6 @@
+<% unless @htpasswd_file.to_s == 'absent' -%>
+ AuthType Basic
+ AuthName "Access fuer <%= @servername %>"
+ AuthUserFile <%= @real_htpasswd_path %>
+ require valid-user
+<% end -%>
diff --git a/puppet/modules/apache/templates/vhosts/partials/header_default.erb b/puppet/modules/apache/templates/vhosts/partials/header_default.erb
new file mode 100644
index 00000000..cd4d04ca
--- /dev/null
+++ b/puppet/modules/apache/templates/vhosts/partials/header_default.erb
@@ -0,0 +1,22 @@
+ Include include.d/defaults.inc
+<% if vhost_part == :ssl -%>
+ Include include.d/ssl_defaults.inc
+<%= scope.function_templatewlv(['apache/vhosts/partials/ssl.erb',
+ {'configuration' => configuration}]) %>
+<% end -%>
+ ServerName <%= @servername %>
+<% unless @serveralias.empty? || (@serveralias == 'absent') -%>
+ ServerAlias <%= Array(@serveralias).sort.join(' ') %>
+<% end -%>
+<% unless @server_admin.empty? || (@server_admin == 'absent') -%>
+ ServerAdmin <%= @server_admin %>
+<% end -%>
+<% unless @documentroot == 'really_absent' -%>
+ DocumentRoot <%= @documentroot %>/
+<% end -%>
+<% if @default_charset != 'absent' -%>
+ AddDefaultCharset <%= @default_charset %>
+<% end -%>
+<% if @passing_extension != 'absent' -%>
+ DirectoryIndex index.htm index.html index.<%= @passing_extension %>
+<% end -%>
diff --git a/puppet/modules/apache/templates/vhosts/partials/logs.erb b/puppet/modules/apache/templates/vhosts/partials/logs.erb
new file mode 100644
index 00000000..4fe1b0a6
--- /dev/null
+++ b/puppet/modules/apache/templates/vhosts/partials/logs.erb
@@ -0,0 +1,18 @@
+<% case @logmode.to_s
+ when 'nologs' -%>
+ ErrorLog /dev/null
+ CustomLog /dev/null %%
+<% when 'noaccess' -%>
+ ErrorLog <%= @logdir %>/<%= @logprefix %>error_log
+ CustomLog /dev/null noip
+<% when 'semianonym' -%>
+ ErrorLog <%= @logdir %>/<%= @logprefix %>error_log
+ CustomLog <%= @logdir %>/<%= @logprefix %>access_log noip
+<% when 'anonym' -%>
+ ErrorLog /dev/null
+ CustomLog <%= @logdir %>/<%= @logprefix %>access_log noip
+<% else -%>
+ ErrorLog <%= @logdir %>/<%= @logprefix %>error_log
+ CustomLog <%= @logdir %>/<%= @logprefix %>access_log combined
+<% end -%>
+
diff --git a/puppet/modules/apache/templates/vhosts/partials/mod_security.erb b/puppet/modules/apache/templates/vhosts/partials/mod_security.erb
new file mode 100644
index 00000000..380e78f1
--- /dev/null
+++ b/puppet/modules/apache/templates/vhosts/partials/mod_security.erb
@@ -0,0 +1,27 @@
+ <IfModule mod_security2.c>
+<% if @mod_security.to_s == 'true' -%>
+ SecRuleEngine On
+<% if @mod_security_relevantonly.to_s == 'true' -%>
+ SecAuditEngine RelevantOnly
+<% else -%>
+ SecAuditEngine On
+<% end -%>
+<% else -%>
+ SecRuleEngine Off
+ SecAuditEngine Off
+<% end -%>
+ SecAuditLogType Concurrent
+ SecAuditLogStorageDir <%= @logdir %>/
+ SecAuditLog <%= @logdir %>/mod_security_audit.log
+ SecDebugLog <%= @logdir %>/mod_security_debug.log
+<% unless (disabled_rules=Array(@mod_security_rules_to_disable)).empty? -%>
+
+<% disabled_rules.each do |rule| -%>
+ SecRuleRemoveById "<%= rule %>"
+<% end -%>
+<% end -%>
+<% unless (s=@mod_security_additional_options).to_s == 'absent' -%>
+
+ <%= s %>
+<% end -%>
+ </IfModule>
diff --git a/puppet/modules/apache/templates/vhosts/partials/php_settings.erb b/puppet/modules/apache/templates/vhosts/partials/php_settings.erb
new file mode 100644
index 00000000..74f6ecf2
--- /dev/null
+++ b/puppet/modules/apache/templates/vhosts/partials/php_settings.erb
@@ -0,0 +1,20 @@
+<% if @run_mode != 'fcgid'
+ @php_settings.reject{|k,v| (v == :undef) || v.nil? }.keys.sort.each do |key|
+ dvalue = @php_settings[key].to_s.downcase
+ munged_value = if dvalue == 'true'
+ 'on'
+ elsif dvalue == 'false'
+ 'off'
+ elsif ['on','off'].include?(dvalue)
+ dvalue
+ else
+ @php_settings[key]
+ end
+
+ if ['on','off' ].include?(munged_value) -%>
+ php_admin_flag <%= key %> <%= munged_value %>
+<% else -%>
+ php_admin_value <%= key %> <%= munged_value %>
+<% end -%>
+<% end -%>
+<% end -%>
diff --git a/puppet/modules/apache/templates/vhosts/partials/ssl.erb b/puppet/modules/apache/templates/vhosts/partials/ssl.erb
new file mode 100644
index 00000000..c9f39333
--- /dev/null
+++ b/puppet/modules/apache/templates/vhosts/partials/ssl.erb
@@ -0,0 +1,8 @@
+ SSLCertificateFile <%= configuration['ssl_certificate_file'] || scope.lookupvar('apache::default_ssl_certificate_file') %>
+ SSLCertificateKeyFile <%= configuration['ssl_certificate_key_file'] || scope.lookupvar('apache::default_ssl_certificate_key_file') %>
+<% if configuration['ssl_certificate_chain_file'] || scope.lookupvar('apache::default_ssl_certificate_chain_file') != 'absent' -%>
+ SSLCertificateChainFile <%= configuration['ssl_certificate_chain_file'] || scope.lookupvar('apache::default_ssl_certificate_chain_file') %>
+<% end -%>
+<% if configuration['hsts'] -%>
+ Header add Strict-Transport-Security "max-age=<%= (configuration['hsts']['age'] || 15768000) rescue 15768000 %>"
+<% end -%>
diff --git a/puppet/modules/apache/templates/vhosts/partials/std_override_options.erb b/puppet/modules/apache/templates/vhosts/partials/std_override_options.erb
new file mode 100644
index 00000000..6d8b74f8
--- /dev/null
+++ b/puppet/modules/apache/templates/vhosts/partials/std_override_options.erb
@@ -0,0 +1,4 @@
+ AllowOverride <%= @allow_override %>
+<% if @options.to_s != 'absent' || @do_includes.to_s == 'true' || @run_mode == 'fcgid' -%>
+ Options <%- unless @options.to_s == 'absent' -%><%= @options %><% end -%><% if @do_includes.to_s == 'true' && !@options.include?('+Includes') -%> +Includes<% end -%><% if @run_mode == 'fcgid' && !@options.include?('+ExecCGI') -%> +ExecCGI<% end -%>
+<% end -%>
diff --git a/puppet/modules/apache/templates/vhosts/passenger/partial.erb b/puppet/modules/apache/templates/vhosts/passenger/partial.erb
new file mode 100644
index 00000000..c3b63f55
--- /dev/null
+++ b/puppet/modules/apache/templates/vhosts/passenger/partial.erb
@@ -0,0 +1,7 @@
+ SetEnv GEM_HOME <%= @gempath %>
+ <Directory <%= @documentroot %>/>
+ AllowOverride <%= @allow_override %>
+ Options <%- unless @options.to_s == 'absent' -%><%= @options %><%- end -%><%- unless !@options.to_s.include?('MultiViews') -%>-MultiViews<%- end -%>
+
+<%= scope.function_template(['apache/vhosts/partials/authentication.erb']) %>
+ </Directory>
diff --git a/puppet/modules/apache/templates/vhosts/perl/partial.erb b/puppet/modules/apache/templates/vhosts/perl/partial.erb
new file mode 100644
index 00000000..8c1f0a5a
--- /dev/null
+++ b/puppet/modules/apache/templates/vhosts/perl/partial.erb
@@ -0,0 +1,14 @@
+ <Directory "<%= @documentroot %>/">
+<%= scope.function_template(['apache/vhosts/partials/std_override_options.erb']) %>
+<%= scope.function_template(['apache/vhosts/partials/authentication.erb']) %>
+ </Directory>
+
+<% unless @htpasswd_file.to_s == 'absent' -%>
+ <Directory "<%= @cgi_binpath %>/">
+ AuthType Basic
+ AuthName "Access fuer <%= @servername %>"
+ AuthUserFile <%= @real_htpasswd_path %>
+ require valid-user
+ </Directory>
+<% end -%>
+ ScriptAlias /cgi-bin/ <%= @cgi_binpath %>/
diff --git a/puppet/modules/apache/templates/vhosts/php/partial.erb b/puppet/modules/apache/templates/vhosts/php/partial.erb
new file mode 100644
index 00000000..c19ae7b4
--- /dev/null
+++ b/puppet/modules/apache/templates/vhosts/php/partial.erb
@@ -0,0 +1,5 @@
+ <Directory "<%= @documentroot %>/">
+<%= scope.function_template(['apache/vhosts/partials/std_override_options.erb']) %>
+<%= scope.function_template(['apache/vhosts/partials/php_settings.erb']) %>
+<%= scope.function_template(['apache/vhosts/partials/authentication.erb']) %>
+ </Directory>
diff --git a/puppet/modules/apache/templates/vhosts/php_drupal/partial.erb b/puppet/modules/apache/templates/vhosts/php_drupal/partial.erb
new file mode 100644
index 00000000..316942fd
--- /dev/null
+++ b/puppet/modules/apache/templates/vhosts/php_drupal/partial.erb
@@ -0,0 +1,22 @@
+ <Directory "<%= @documentroot %>/">
+<%= scope.function_template(['apache/vhosts/partials/std_override_options.erb']) %>
+<%= scope.function_template(['apache/vhosts/partials/authentication.erb']) %>
+<%= scope.function_template(['apache/vhosts/partials/php_settings.erb']) %>
+ # Protect files and directories from prying eyes.
+ <FilesMatch "\.(engine|inc|info|install|module|profile|po|sh|.*sql|theme|tpl(\.php)?|xtmpl)$|^(code-style\.pl|Entries.*|Repository|Root|Tag|Template)$">
+ Order allow,deny
+ </FilesMatch>
+
+ # Customized error messages.
+ ErrorDocument 404 /index.php
+
+ RewriteEngine on
+ RewriteCond %{REQUEST_FILENAME} !-f
+ RewriteCond %{REQUEST_FILENAME} !-d
+ RewriteRule ^(.*)$ index.php?q=$1 [L,QSA]
+ </Directory>
+ <Directory "<%= @documentroot %>/files/">
+ SetHandler Drupal_Security_Do_Not_Remove_See_SA_2006_006
+ Options None
+ Options +FollowSymLinks
+ </Directory>
diff --git a/puppet/modules/apache/templates/vhosts/php_gallery2/partial.erb b/puppet/modules/apache/templates/vhosts/php_gallery2/partial.erb
new file mode 100644
index 00000000..218c0e71
--- /dev/null
+++ b/puppet/modules/apache/templates/vhosts/php_gallery2/partial.erb
@@ -0,0 +1,14 @@
+ <Directory "<%= @documentroot %>/">
+<%= scope.function_template(['apache/vhosts/partials/std_override_options.erb']) %>
+<%= scope.function_template(['apache/vhosts/partials/php_settings.erb']) %>
+<%= scope.function_template(['apache/vhosts/partials/authentication.erb']) %>
+
+ # Always rewrite login's
+ # Source: http://gallery.menalto.com/node/30558
+ RewriteEngine On
+ RewriteCond %{HTTPS} !=on
+ RewriteCond %{HTTP:X-Forwarded-Proto} !=https
+ RewriteCond %{HTTP_COOKIE} ^GALLERYSID= [OR]
+ RewriteCond %{QUERY_STRING} subView=core\.UserLogin
+ RewriteRule ^ https://%{HTTP_HOST}%{REQUEST_URI} [NE,R,L]
+ </Directory>
diff --git a/puppet/modules/apache/templates/vhosts/php_joomla/partial.erb b/puppet/modules/apache/templates/vhosts/php_joomla/partial.erb
new file mode 100644
index 00000000..55445bfc
--- /dev/null
+++ b/puppet/modules/apache/templates/vhosts/php_joomla/partial.erb
@@ -0,0 +1,30 @@
+ <Directory "<%= @documentroot %>/">
+<%= scope.function_template(['apache/vhosts/partials/std_override_options.erb']) %>
+<%= scope.function_template(['apache/vhosts/partials/php_settings.erb']) %>
+<%= scope.function_template(['apache/vhosts/partials/authentication.erb']) %>
+
+ Include include.d/joomla.inc
+ </Directory>
+
+ <Directory "<%= @documentroot %>/administrator/">
+ RewriteEngine on
+
+ # Rewrite URLs to https that go for the admin area
+ RewriteCond %{REMOTE_ADDR} !^127\.[0-9]+\.[0-9]+\.[0-9]+$
+ RewriteCond %{HTTPS} !=on
+ RewriteCond %{REQUEST_URI} (.*/administrator/.*)
+ RewriteRule (.*) https://%{HTTP_HOST}%{REQUEST_URI} [R]
+ </Directory>
+
+ # Deny various directories that
+ # shouldn't be webaccessible
+ <Directory "<%= @documentroot %>/tmp/">
+ Deny From All
+ </Directory>
+ <Directory "<%= @documentroot %>/logs/">
+ Deny From All
+ </Directory>
+ <Directory "<%= @documentroot %>/cli/">
+ Deny From All
+ </Directory>
+
diff --git a/puppet/modules/apache/templates/vhosts/php_mediawiki/partial.erb b/puppet/modules/apache/templates/vhosts/php_mediawiki/partial.erb
new file mode 100644
index 00000000..1ed6ee3e
--- /dev/null
+++ b/puppet/modules/apache/templates/vhosts/php_mediawiki/partial.erb
@@ -0,0 +1,7 @@
+<% if @run_mode == 'fcgid' -%>
+ RewriteEngine On
+ RewriteCond %{DOCUMENT_ROOT}/%{REQUEST_FILENAME} !-f
+ RewriteCond %{DOCUMENT_ROOT}/%{REQUEST_FILENAME} !-d
+ RewriteRule ^/?index.php/(.*)$ /index.php?title=$1 [PT,L,QSA]
+<% end -%>
+<%= scope.function_template(['apache/vhosts/php/partial.erb']) %>
diff --git a/puppet/modules/apache/templates/vhosts/php_silverstripe/partial.erb b/puppet/modules/apache/templates/vhosts/php_silverstripe/partial.erb
new file mode 100644
index 00000000..75a81931
--- /dev/null
+++ b/puppet/modules/apache/templates/vhosts/php_silverstripe/partial.erb
@@ -0,0 +1,12 @@
+ # silverstripe
+ RewriteEngine On
+ RewriteCond %{HTTPS} !=on
+ RewriteCond %{HTTP:X-Forwarded-Proto} !=https
+ RewriteRule /(Security|admin)(.*) https://%{HTTP_HOST}/admin$1$2 [L,R,NE]
+
+ <Directory "<%= @documentroot %>/">
+<%= scope.function_template(['apache/vhosts/partials/std_override_options.erb']) %>
+<%= scope.function_template(['apache/vhosts/partials/php_settings.erb']) %>
+<%= scope.function_template(['apache/vhosts/partials/authentication.erb']) %>
+ Include include.d/silverstripe.inc
+ </Directory>
diff --git a/puppet/modules/apache/templates/vhosts/php_typo3/partial.erb b/puppet/modules/apache/templates/vhosts/php_typo3/partial.erb
new file mode 100644
index 00000000..afb756df
--- /dev/null
+++ b/puppet/modules/apache/templates/vhosts/php_typo3/partial.erb
@@ -0,0 +1,10 @@
+<%= scope.function_template(['apache/vhosts/php/partial.erb']) %>
+ <Directory "<%= @documentroot %>/typo3/">
+ RewriteEngine on
+
+ # Rewrite URLs to https that go for the admin area
+ RewriteCond %{HTTPS} !=on
+ RewriteCond %{HTTP:X-Forwarded-Proto} !=https
+ RewriteCond %{REQUEST_URI} (.*/typo3/.*)
+ RewriteRule (.*) https://%{HTTP_HOST}%{REQUEST_URI} [L,R,NE]
+ </Directory>
diff --git a/puppet/modules/apache/templates/vhosts/php_wordpress/partial.erb b/puppet/modules/apache/templates/vhosts/php_wordpress/partial.erb
new file mode 100644
index 00000000..5e6ebd5e
--- /dev/null
+++ b/puppet/modules/apache/templates/vhosts/php_wordpress/partial.erb
@@ -0,0 +1,19 @@
+<%= scope.function_template(['apache/vhosts/php/partial.erb']) %>
+
+ # fixes: http://git.zx2c4.com/w3-total-fail/tree/w3-total-fail.sh
+ <Directory "<%= @documentroot %>/wp-content/w3tc/dbcache">
+ Deny From All
+ </Directory>
+
+ # simple wp-login brute force protection
+ # http://www.frameloss.org/2013/04/26/even-easier-brute-force-login-protection-for-wordpress/
+ RewriteEngine On
+ RewriteCond %{HTTP_COOKIE} !<%= cookie = scope.function_sha1([scope.function_fqdn_rand([9999999999999,@name]).to_s + "cookie"]) %>
+ RewriteRule ^/wp-login.php /wordpress-login-<%= tmpuri = scope.function_sha1([scope.function_fqdn_rand([9999999999999,@name]).to_s + "wp-login"]) %>.php [R,L]
+ <Location /wordpress-login-<%= tmpuri %>.php>
+ CookieTracking on
+ CookieExpires 30
+ CookieName <%= cookie %>
+ </Location>
+ RewriteRule ^/wordpress-login-<%= tmpuri %>.php /wp-login.php [NE]
+
diff --git a/puppet/modules/apache/templates/vhosts/proxy/partial.erb b/puppet/modules/apache/templates/vhosts/proxy/partial.erb
new file mode 100644
index 00000000..0eecf820
--- /dev/null
+++ b/puppet/modules/apache/templates/vhosts/proxy/partial.erb
@@ -0,0 +1,8 @@
+ <Proxy *>
+ Order deny,allow
+ Allow from all
+<%= scope.function_template(['apache/vhosts/partials/authentication.erb']) %>
+ </Proxy>
+ ProxyRequests Off
+ ProxyPass / <%= @options %>/
+ ProxyPassReverse / <%= @options %>/
diff --git a/puppet/modules/apache/templates/vhosts/redirect/partial.erb b/puppet/modules/apache/templates/vhosts/redirect/partial.erb
new file mode 100644
index 00000000..c8d7d11e
--- /dev/null
+++ b/puppet/modules/apache/templates/vhosts/redirect/partial.erb
@@ -0,0 +1 @@
+ Redirect permanent / https://<%= @options %>
diff --git a/puppet/modules/apache/templates/vhosts/static/partial.erb b/puppet/modules/apache/templates/vhosts/static/partial.erb
new file mode 100644
index 00000000..dc6f11ca
--- /dev/null
+++ b/puppet/modules/apache/templates/vhosts/static/partial.erb
@@ -0,0 +1,4 @@
+ <Directory "<%= @documentroot %>/">
+<%= scope.function_template(['apache/vhosts/partials/std_override_options.erb']) %>
+<%= scope.function_template(['apache/vhosts/partials/authentication.erb']) %>
+ </Directory>
diff --git a/puppet/modules/apache/templates/vhosts/webdav/partial.erb b/puppet/modules/apache/templates/vhosts/webdav/partial.erb
new file mode 100644
index 00000000..09ce632f
--- /dev/null
+++ b/puppet/modules/apache/templates/vhosts/webdav/partial.erb
@@ -0,0 +1,21 @@
+ DAVLockDB <%= @real_dav_db_dir %>/DAVLock
+ <Directory "<%= @documentroot %>/">
+ Dav on
+ AllowOverride None
+<% if @options.to_s != 'absent' || @do_includes.to_s == 'true' -%>
+ Options <% unless @options.to_s == 'absent' -%><%= @options %><% end -%><% unless @options.include?('Indexes') -%> Indexes<%- end -%>
+
+<% else -%>
+ Options Indexes
+
+<% end -%>
+<%= scope.function_template(['apache/vhosts/partials/authentication.erb']) %>
+<% if @ldap_auth.to_s == 'true' then -%>
+ Include include.d/ldap_auth.inc
+<% unless ldap_user.to_s == 'any' -%>
+ Require ldap-user <%= ldap_user.to_s %>
+<% else -%>
+ Require valid-user
+<% end
+ end -%>
+ </Directory>
diff --git a/puppet/modules/apache/templates/webfiles/autoconfig/config.shtml.erb b/puppet/modules/apache/templates/webfiles/autoconfig/config.shtml.erb
new file mode 100644
index 00000000..3a3d6bb5
--- /dev/null
+++ b/puppet/modules/apache/templates/webfiles/autoconfig/config.shtml.erb
@@ -0,0 +1,58 @@
+<?xml version="1.0" encoding="UTF-8"?>
+<!--#if expr="$SERVER_NAME = /([^.]*\.[^.]*)$/" -->
+ <!--#set var="DOMAIN" value="$1" -->
+<!--#endif -->
+
+<clientConfig version="1.1">
+ <emailProvider id="<%= @provider %>">
+ <domain><!--#echo var="DOMAIN" --></domain>
+ <displayName><%= @display_name || @provider %> Mail (<!--#echo var="DOMAIN" -->)</displayName>
+ <displayShortName><%= @shortname || @provider.split('.').first %></displayShortName>
+ <incomingServer type="imap">
+ <hostname><%= @imap_server || "imap.#{@provider}" %></hostname>
+ <port>143</port>
+ <socketType>STARTTLS</socketType>
+ <authentication>password-cleartext</authentication>
+ <username>%EMAILADDRESS%</username>
+ </incomingServer>
+ <incomingServer type="imap">
+ <hostname><%= @imap_server || "imap.#{@provider}" %></hostname>
+ <port>993</port>
+ <socketType>SSL</socketType>
+ <authentication>password-cleartext</authentication>
+ <username>%EMAILADDRESS%</username>
+ </incomingServer>
+ <incomingServer type="pop3">
+ <hostname><%= @pop_server || "pop.#{@provider}" %></hostname>
+ <port>110</port>
+ <socketType>STARTTLS</socketType>
+ <authentication>password-cleartext</authentication>
+ <username>%EMAILADDRESS%</username>
+ </incomingServer>
+ <incomingServer type="pop3">
+ <hostname><%= @pop_server || "pop.#{@provider}" %></hostname>
+ <port>995</port>
+ <socketType>SSL</socketType>
+ <authentication>password-cleartext</authentication>
+ <username>%EMAILADDRESS%</username>
+ </incomingServer>
+ <outgoingServer type="smtp">
+ <hostname><%= @smtp_server || "smtp.#{@provider}" %></hostname>
+ <port>587</port>
+ <socketType>STARTTLS</socketType>
+ <authentication>password-cleartext</authentication>
+ <username>%EMAILADDRESS%</username>
+ </outgoingServer>
+ <outgoingServer type="smtp">
+ <hostname><%= @smtp_server || "smtp.#{@provider}"%></hostname>
+ <port>465</port>
+ <socketType>SSL</socketType>
+ <authentication>password-cleartext</authentication>
+ <username>%EMAILADDRESS%</username>
+ </outgoingServer>
+ <documentation url="<%= @documentation_url || "http://#{@provider}" %>">
+ <descr lang="de">Allgemeine Beschreibung der Einstellungen</descr>
+ <descr lang="en">Generic settings page</descr>
+ </documentation>
+ </emailProvider>
+</clientConfig>
diff --git a/puppet/modules/apt/.gitignore b/puppet/modules/apt/.gitignore
new file mode 100644
index 00000000..a54aa971
--- /dev/null
+++ b/puppet/modules/apt/.gitignore
@@ -0,0 +1,12 @@
+/pkg/
+/Gemfile.lock
+/vendor/
+/spec/fixtures/manifests/*
+/spec/fixtures/modules/*
+!/spec/fixtures/modules/apt
+!/spec/fixtures/modules/apt/*
+/.vagrant/
+/.bundle/
+/coverage/
+/.idea/
+*.iml
diff --git a/puppet/modules/apt/.gitlab-ci.yml b/puppet/modules/apt/.gitlab-ci.yml
new file mode 100644
index 00000000..f7b8ecad
--- /dev/null
+++ b/puppet/modules/apt/.gitlab-ci.yml
@@ -0,0 +1,12 @@
+before_script:
+ - ruby -v
+ - gem install bundler --no-ri --no-rdoc
+ - bundle install --jobs $(nproc) "${FLAGS[@]}"
+
+# don't fail on lint warnings
+rspec:
+ script:
+ - bundle exec rake lint || /bin/true
+ - bundle exec rake syntax
+ - bundle exec rake validate
+ - bundle exec rake spec
diff --git a/puppet/modules/apt/Gemfile b/puppet/modules/apt/Gemfile
new file mode 100644
index 00000000..8925a904
--- /dev/null
+++ b/puppet/modules/apt/Gemfile
@@ -0,0 +1,13 @@
+source "https://rubygems.org"
+
+group :test do
+ gem "rake"
+ gem "rspec", '< 3.2.0'
+ gem "puppet", ENV['PUPPET_VERSION'] || ENV['GEM_PUPPET_VERSION'] || ENV['PUPPET_GEM_VERSION'] || '~> 3.7.0'
+ gem "facter", ENV['FACTER_VERSION'] || ENV['GEM_FACTER_VERSION'] || ENV['FACTER_GEM_VERSION'] || '~> 2.2.0'
+ gem "rspec-puppet"
+ gem "puppetlabs_spec_helper"
+ gem "metadata-json-lint"
+ gem "rspec-puppet-facts"
+ gem "mocha"
+end
diff --git a/puppet/modules/apt/LICENSE b/puppet/modules/apt/LICENSE
new file mode 100644
index 00000000..94a9ed02
--- /dev/null
+++ b/puppet/modules/apt/LICENSE
@@ -0,0 +1,674 @@
+ GNU GENERAL PUBLIC LICENSE
+ Version 3, 29 June 2007
+
+ Copyright (C) 2007 Free Software Foundation, Inc. <http://fsf.org/>
+ Everyone is permitted to copy and distribute verbatim copies
+ of this license document, but changing it is not allowed.
+
+ Preamble
+
+ The GNU General Public License is a free, copyleft license for
+software and other kinds of works.
+
+ The licenses for most software and other practical works are designed
+to take away your freedom to share and change the works. By contrast,
+the GNU General Public License is intended to guarantee your freedom to
+share and change all versions of a program--to make sure it remains free
+software for all its users. We, the Free Software Foundation, use the
+GNU General Public License for most of our software; it applies also to
+any other work released this way by its authors. You can apply it to
+your programs, too.
+
+ When we speak of free software, we are referring to freedom, not
+price. Our General Public Licenses are designed to make sure that you
+have the freedom to distribute copies of free software (and charge for
+them if you wish), that you receive source code or can get it if you
+want it, that you can change the software or use pieces of it in new
+free programs, and that you know you can do these things.
+
+ To protect your rights, we need to prevent others from denying you
+these rights or asking you to surrender the rights. Therefore, you have
+certain responsibilities if you distribute copies of the software, or if
+you modify it: responsibilities to respect the freedom of others.
+
+ For example, if you distribute copies of such a program, whether
+gratis or for a fee, you must pass on to the recipients the same
+freedoms that you received. You must make sure that they, too, receive
+or can get the source code. And you must show them these terms so they
+know their rights.
+
+ Developers that use the GNU GPL protect your rights with two steps:
+(1) assert copyright on the software, and (2) offer you this License
+giving you legal permission to copy, distribute and/or modify it.
+
+ For the developers' and authors' protection, the GPL clearly explains
+that there is no warranty for this free software. For both users' and
+authors' sake, the GPL requires that modified versions be marked as
+changed, so that their problems will not be attributed erroneously to
+authors of previous versions.
+
+ Some devices are designed to deny users access to install or run
+modified versions of the software inside them, although the manufacturer
+can do so. This is fundamentally incompatible with the aim of
+protecting users' freedom to change the software. The systematic
+pattern of such abuse occurs in the area of products for individuals to
+use, which is precisely where it is most unacceptable. Therefore, we
+have designed this version of the GPL to prohibit the practice for those
+products. If such problems arise substantially in other domains, we
+stand ready to extend this provision to those domains in future versions
+of the GPL, as needed to protect the freedom of users.
+
+ Finally, every program is threatened constantly by software patents.
+States should not allow patents to restrict development and use of
+software on general-purpose computers, but in those that do, we wish to
+avoid the special danger that patents applied to a free program could
+make it effectively proprietary. To prevent this, the GPL assures that
+patents cannot be used to render the program non-free.
+
+ The precise terms and conditions for copying, distribution and
+modification follow.
+
+ TERMS AND CONDITIONS
+
+ 0. Definitions.
+
+ "This License" refers to version 3 of the GNU General Public License.
+
+ "Copyright" also means copyright-like laws that apply to other kinds of
+works, such as semiconductor masks.
+
+ "The Program" refers to any copyrightable work licensed under this
+License. Each licensee is addressed as "you". "Licensees" and
+"recipients" may be individuals or organizations.
+
+ To "modify" a work means to copy from or adapt all or part of the work
+in a fashion requiring copyright permission, other than the making of an
+exact copy. The resulting work is called a "modified version" of the
+earlier work or a work "based on" the earlier work.
+
+ A "covered work" means either the unmodified Program or a work based
+on the Program.
+
+ To "propagate" a work means to do anything with it that, without
+permission, would make you directly or secondarily liable for
+infringement under applicable copyright law, except executing it on a
+computer or modifying a private copy. Propagation includes copying,
+distribution (with or without modification), making available to the
+public, and in some countries other activities as well.
+
+ To "convey" a work means any kind of propagation that enables other
+parties to make or receive copies. Mere interaction with a user through
+a computer network, with no transfer of a copy, is not conveying.
+
+ An interactive user interface displays "Appropriate Legal Notices"
+to the extent that it includes a convenient and prominently visible
+feature that (1) displays an appropriate copyright notice, and (2)
+tells the user that there is no warranty for the work (except to the
+extent that warranties are provided), that licensees may convey the
+work under this License, and how to view a copy of this License. If
+the interface presents a list of user commands or options, such as a
+menu, a prominent item in the list meets this criterion.
+
+ 1. Source Code.
+
+ The "source code" for a work means the preferred form of the work
+for making modifications to it. "Object code" means any non-source
+form of a work.
+
+ A "Standard Interface" means an interface that either is an official
+standard defined by a recognized standards body, or, in the case of
+interfaces specified for a particular programming language, one that
+is widely used among developers working in that language.
+
+ The "System Libraries" of an executable work include anything, other
+than the work as a whole, that (a) is included in the normal form of
+packaging a Major Component, but which is not part of that Major
+Component, and (b) serves only to enable use of the work with that
+Major Component, or to implement a Standard Interface for which an
+implementation is available to the public in source code form. A
+"Major Component", in this context, means a major essential component
+(kernel, window system, and so on) of the specific operating system
+(if any) on which the executable work runs, or a compiler used to
+produce the work, or an object code interpreter used to run it.
+
+ The "Corresponding Source" for a work in object code form means all
+the source code needed to generate, install, and (for an executable
+work) run the object code and to modify the work, including scripts to
+control those activities. However, it does not include the work's
+System Libraries, or general-purpose tools or generally available free
+programs which are used unmodified in performing those activities but
+which are not part of the work. For example, Corresponding Source
+includes interface definition files associated with source files for
+the work, and the source code for shared libraries and dynamically
+linked subprograms that the work is specifically designed to require,
+such as by intimate data communication or control flow between those
+subprograms and other parts of the work.
+
+ The Corresponding Source need not include anything that users
+can regenerate automatically from other parts of the Corresponding
+Source.
+
+ The Corresponding Source for a work in source code form is that
+same work.
+
+ 2. Basic Permissions.
+
+ All rights granted under this License are granted for the term of
+copyright on the Program, and are irrevocable provided the stated
+conditions are met. This License explicitly affirms your unlimited
+permission to run the unmodified Program. The output from running a
+covered work is covered by this License only if the output, given its
+content, constitutes a covered work. This License acknowledges your
+rights of fair use or other equivalent, as provided by copyright law.
+
+ You may make, run and propagate covered works that you do not
+convey, without conditions so long as your license otherwise remains
+in force. You may convey covered works to others for the sole purpose
+of having them make modifications exclusively for you, or provide you
+with facilities for running those works, provided that you comply with
+the terms of this License in conveying all material for which you do
+not control copyright. Those thus making or running the covered works
+for you must do so exclusively on your behalf, under your direction
+and control, on terms that prohibit them from making any copies of
+your copyrighted material outside their relationship with you.
+
+ Conveying under any other circumstances is permitted solely under
+the conditions stated below. Sublicensing is not allowed; section 10
+makes it unnecessary.
+
+ 3. Protecting Users' Legal Rights From Anti-Circumvention Law.
+
+ No covered work shall be deemed part of an effective technological
+measure under any applicable law fulfilling obligations under article
+11 of the WIPO copyright treaty adopted on 20 December 1996, or
+similar laws prohibiting or restricting circumvention of such
+measures.
+
+ When you convey a covered work, you waive any legal power to forbid
+circumvention of technological measures to the extent such circumvention
+is effected by exercising rights under this License with respect to
+the covered work, and you disclaim any intention to limit operation or
+modification of the work as a means of enforcing, against the work's
+users, your or third parties' legal rights to forbid circumvention of
+technological measures.
+
+ 4. Conveying Verbatim Copies.
+
+ You may convey verbatim copies of the Program's source code as you
+receive it, in any medium, provided that you conspicuously and
+appropriately publish on each copy an appropriate copyright notice;
+keep intact all notices stating that this License and any
+non-permissive terms added in accord with section 7 apply to the code;
+keep intact all notices of the absence of any warranty; and give all
+recipients a copy of this License along with the Program.
+
+ You may charge any price or no price for each copy that you convey,
+and you may offer support or warranty protection for a fee.
+
+ 5. Conveying Modified Source Versions.
+
+ You may convey a work based on the Program, or the modifications to
+produce it from the Program, in the form of source code under the
+terms of section 4, provided that you also meet all of these conditions:
+
+ a) The work must carry prominent notices stating that you modified
+ it, and giving a relevant date.
+
+ b) The work must carry prominent notices stating that it is
+ released under this License and any conditions added under section
+ 7. This requirement modifies the requirement in section 4 to
+ "keep intact all notices".
+
+ c) You must license the entire work, as a whole, under this
+ License to anyone who comes into possession of a copy. This
+ License will therefore apply, along with any applicable section 7
+ additional terms, to the whole of the work, and all its parts,
+ regardless of how they are packaged. This License gives no
+ permission to license the work in any other way, but it does not
+ invalidate such permission if you have separately received it.
+
+ d) If the work has interactive user interfaces, each must display
+ Appropriate Legal Notices; however, if the Program has interactive
+ interfaces that do not display Appropriate Legal Notices, your
+ work need not make them do so.
+
+ A compilation of a covered work with other separate and independent
+works, which are not by their nature extensions of the covered work,
+and which are not combined with it such as to form a larger program,
+in or on a volume of a storage or distribution medium, is called an
+"aggregate" if the compilation and its resulting copyright are not
+used to limit the access or legal rights of the compilation's users
+beyond what the individual works permit. Inclusion of a covered work
+in an aggregate does not cause this License to apply to the other
+parts of the aggregate.
+
+ 6. Conveying Non-Source Forms.
+
+ You may convey a covered work in object code form under the terms
+of sections 4 and 5, provided that you also convey the
+machine-readable Corresponding Source under the terms of this License,
+in one of these ways:
+
+ a) Convey the object code in, or embodied in, a physical product
+ (including a physical distribution medium), accompanied by the
+ Corresponding Source fixed on a durable physical medium
+ customarily used for software interchange.
+
+ b) Convey the object code in, or embodied in, a physical product
+ (including a physical distribution medium), accompanied by a
+ written offer, valid for at least three years and valid for as
+ long as you offer spare parts or customer support for that product
+ model, to give anyone who possesses the object code either (1) a
+ copy of the Corresponding Source for all the software in the
+ product that is covered by this License, on a durable physical
+ medium customarily used for software interchange, for a price no
+ more than your reasonable cost of physically performing this
+ conveying of source, or (2) access to copy the
+ Corresponding Source from a network server at no charge.
+
+ c) Convey individual copies of the object code with a copy of the
+ written offer to provide the Corresponding Source. This
+ alternative is allowed only occasionally and noncommercially, and
+ only if you received the object code with such an offer, in accord
+ with subsection 6b.
+
+ d) Convey the object code by offering access from a designated
+ place (gratis or for a charge), and offer equivalent access to the
+ Corresponding Source in the same way through the same place at no
+ further charge. You need not require recipients to copy the
+ Corresponding Source along with the object code. If the place to
+ copy the object code is a network server, the Corresponding Source
+ may be on a different server (operated by you or a third party)
+ that supports equivalent copying facilities, provided you maintain
+ clear directions next to the object code saying where to find the
+ Corresponding Source. Regardless of what server hosts the
+ Corresponding Source, you remain obligated to ensure that it is
+ available for as long as needed to satisfy these requirements.
+
+ e) Convey the object code using peer-to-peer transmission, provided
+ you inform other peers where the object code and Corresponding
+ Source of the work are being offered to the general public at no
+ charge under subsection 6d.
+
+ A separable portion of the object code, whose source code is excluded
+from the Corresponding Source as a System Library, need not be
+included in conveying the object code work.
+
+ A "User Product" is either (1) a "consumer product", which means any
+tangible personal property which is normally used for personal, family,
+or household purposes, or (2) anything designed or sold for incorporation
+into a dwelling. In determining whether a product is a consumer product,
+doubtful cases shall be resolved in favor of coverage. For a particular
+product received by a particular user, "normally used" refers to a
+typical or common use of that class of product, regardless of the status
+of the particular user or of the way in which the particular user
+actually uses, or expects or is expected to use, the product. A product
+is a consumer product regardless of whether the product has substantial
+commercial, industrial or non-consumer uses, unless such uses represent
+the only significant mode of use of the product.
+
+ "Installation Information" for a User Product means any methods,
+procedures, authorization keys, or other information required to install
+and execute modified versions of a covered work in that User Product from
+a modified version of its Corresponding Source. The information must
+suffice to ensure that the continued functioning of the modified object
+code is in no case prevented or interfered with solely because
+modification has been made.
+
+ If you convey an object code work under this section in, or with, or
+specifically for use in, a User Product, and the conveying occurs as
+part of a transaction in which the right of possession and use of the
+User Product is transferred to the recipient in perpetuity or for a
+fixed term (regardless of how the transaction is characterized), the
+Corresponding Source conveyed under this section must be accompanied
+by the Installation Information. But this requirement does not apply
+if neither you nor any third party retains the ability to install
+modified object code on the User Product (for example, the work has
+been installed in ROM).
+
+ The requirement to provide Installation Information does not include a
+requirement to continue to provide support service, warranty, or updates
+for a work that has been modified or installed by the recipient, or for
+the User Product in which it has been modified or installed. Access to a
+network may be denied when the modification itself materially and
+adversely affects the operation of the network or violates the rules and
+protocols for communication across the network.
+
+ Corresponding Source conveyed, and Installation Information provided,
+in accord with this section must be in a format that is publicly
+documented (and with an implementation available to the public in
+source code form), and must require no special password or key for
+unpacking, reading or copying.
+
+ 7. Additional Terms.
+
+ "Additional permissions" are terms that supplement the terms of this
+License by making exceptions from one or more of its conditions.
+Additional permissions that are applicable to the entire Program shall
+be treated as though they were included in this License, to the extent
+that they are valid under applicable law. If additional permissions
+apply only to part of the Program, that part may be used separately
+under those permissions, but the entire Program remains governed by
+this License without regard to the additional permissions.
+
+ When you convey a copy of a covered work, you may at your option
+remove any additional permissions from that copy, or from any part of
+it. (Additional permissions may be written to require their own
+removal in certain cases when you modify the work.) You may place
+additional permissions on material, added by you to a covered work,
+for which you have or can give appropriate copyright permission.
+
+ Notwithstanding any other provision of this License, for material you
+add to a covered work, you may (if authorized by the copyright holders of
+that material) supplement the terms of this License with terms:
+
+ a) Disclaiming warranty or limiting liability differently from the
+ terms of sections 15 and 16 of this License; or
+
+ b) Requiring preservation of specified reasonable legal notices or
+ author attributions in that material or in the Appropriate Legal
+ Notices displayed by works containing it; or
+
+ c) Prohibiting misrepresentation of the origin of that material, or
+ requiring that modified versions of such material be marked in
+ reasonable ways as different from the original version; or
+
+ d) Limiting the use for publicity purposes of names of licensors or
+ authors of the material; or
+
+ e) Declining to grant rights under trademark law for use of some
+ trade names, trademarks, or service marks; or
+
+ f) Requiring indemnification of licensors and authors of that
+ material by anyone who conveys the material (or modified versions of
+ it) with contractual assumptions of liability to the recipient, for
+ any liability that these contractual assumptions directly impose on
+ those licensors and authors.
+
+ All other non-permissive additional terms are considered "further
+restrictions" within the meaning of section 10. If the Program as you
+received it, or any part of it, contains a notice stating that it is
+governed by this License along with a term that is a further
+restriction, you may remove that term. If a license document contains
+a further restriction but permits relicensing or conveying under this
+License, you may add to a covered work material governed by the terms
+of that license document, provided that the further restriction does
+not survive such relicensing or conveying.
+
+ If you add terms to a covered work in accord with this section, you
+must place, in the relevant source files, a statement of the
+additional terms that apply to those files, or a notice indicating
+where to find the applicable terms.
+
+ Additional terms, permissive or non-permissive, may be stated in the
+form of a separately written license, or stated as exceptions;
+the above requirements apply either way.
+
+ 8. Termination.
+
+ You may not propagate or modify a covered work except as expressly
+provided under this License. Any attempt otherwise to propagate or
+modify it is void, and will automatically terminate your rights under
+this License (including any patent licenses granted under the third
+paragraph of section 11).
+
+ However, if you cease all violation of this License, then your
+license from a particular copyright holder is reinstated (a)
+provisionally, unless and until the copyright holder explicitly and
+finally terminates your license, and (b) permanently, if the copyright
+holder fails to notify you of the violation by some reasonable means
+prior to 60 days after the cessation.
+
+ Moreover, your license from a particular copyright holder is
+reinstated permanently if the copyright holder notifies you of the
+violation by some reasonable means, this is the first time you have
+received notice of violation of this License (for any work) from that
+copyright holder, and you cure the violation prior to 30 days after
+your receipt of the notice.
+
+ Termination of your rights under this section does not terminate the
+licenses of parties who have received copies or rights from you under
+this License. If your rights have been terminated and not permanently
+reinstated, you do not qualify to receive new licenses for the same
+material under section 10.
+
+ 9. Acceptance Not Required for Having Copies.
+
+ You are not required to accept this License in order to receive or
+run a copy of the Program. Ancillary propagation of a covered work
+occurring solely as a consequence of using peer-to-peer transmission
+to receive a copy likewise does not require acceptance. However,
+nothing other than this License grants you permission to propagate or
+modify any covered work. These actions infringe copyright if you do
+not accept this License. Therefore, by modifying or propagating a
+covered work, you indicate your acceptance of this License to do so.
+
+ 10. Automatic Licensing of Downstream Recipients.
+
+ Each time you convey a covered work, the recipient automatically
+receives a license from the original licensors, to run, modify and
+propagate that work, subject to this License. You are not responsible
+for enforcing compliance by third parties with this License.
+
+ An "entity transaction" is a transaction transferring control of an
+organization, or substantially all assets of one, or subdividing an
+organization, or merging organizations. If propagation of a covered
+work results from an entity transaction, each party to that
+transaction who receives a copy of the work also receives whatever
+licenses to the work the party's predecessor in interest had or could
+give under the previous paragraph, plus a right to possession of the
+Corresponding Source of the work from the predecessor in interest, if
+the predecessor has it or can get it with reasonable efforts.
+
+ You may not impose any further restrictions on the exercise of the
+rights granted or affirmed under this License. For example, you may
+not impose a license fee, royalty, or other charge for exercise of
+rights granted under this License, and you may not initiate litigation
+(including a cross-claim or counterclaim in a lawsuit) alleging that
+any patent claim is infringed by making, using, selling, offering for
+sale, or importing the Program or any portion of it.
+
+ 11. Patents.
+
+ A "contributor" is a copyright holder who authorizes use under this
+License of the Program or a work on which the Program is based. The
+work thus licensed is called the contributor's "contributor version".
+
+ A contributor's "essential patent claims" are all patent claims
+owned or controlled by the contributor, whether already acquired or
+hereafter acquired, that would be infringed by some manner, permitted
+by this License, of making, using, or selling its contributor version,
+but do not include claims that would be infringed only as a
+consequence of further modification of the contributor version. For
+purposes of this definition, "control" includes the right to grant
+patent sublicenses in a manner consistent with the requirements of
+this License.
+
+ Each contributor grants you a non-exclusive, worldwide, royalty-free
+patent license under the contributor's essential patent claims, to
+make, use, sell, offer for sale, import and otherwise run, modify and
+propagate the contents of its contributor version.
+
+ In the following three paragraphs, a "patent license" is any express
+agreement or commitment, however denominated, not to enforce a patent
+(such as an express permission to practice a patent or covenant not to
+sue for patent infringement). To "grant" such a patent license to a
+party means to make such an agreement or commitment not to enforce a
+patent against the party.
+
+ If you convey a covered work, knowingly relying on a patent license,
+and the Corresponding Source of the work is not available for anyone
+to copy, free of charge and under the terms of this License, through a
+publicly available network server or other readily accessible means,
+then you must either (1) cause the Corresponding Source to be so
+available, or (2) arrange to deprive yourself of the benefit of the
+patent license for this particular work, or (3) arrange, in a manner
+consistent with the requirements of this License, to extend the patent
+license to downstream recipients. "Knowingly relying" means you have
+actual knowledge that, but for the patent license, your conveying the
+covered work in a country, or your recipient's use of the covered work
+in a country, would infringe one or more identifiable patents in that
+country that you have reason to believe are valid.
+
+ If, pursuant to or in connection with a single transaction or
+arrangement, you convey, or propagate by procuring conveyance of, a
+covered work, and grant a patent license to some of the parties
+receiving the covered work authorizing them to use, propagate, modify
+or convey a specific copy of the covered work, then the patent license
+you grant is automatically extended to all recipients of the covered
+work and works based on it.
+
+ A patent license is "discriminatory" if it does not include within
+the scope of its coverage, prohibits the exercise of, or is
+conditioned on the non-exercise of one or more of the rights that are
+specifically granted under this License. You may not convey a covered
+work if you are a party to an arrangement with a third party that is
+in the business of distributing software, under which you make payment
+to the third party based on the extent of your activity of conveying
+the work, and under which the third party grants, to any of the
+parties who would receive the covered work from you, a discriminatory
+patent license (a) in connection with copies of the covered work
+conveyed by you (or copies made from those copies), or (b) primarily
+for and in connection with specific products or compilations that
+contain the covered work, unless you entered into that arrangement,
+or that patent license was granted, prior to 28 March 2007.
+
+ Nothing in this License shall be construed as excluding or limiting
+any implied license or other defenses to infringement that may
+otherwise be available to you under applicable patent law.
+
+ 12. No Surrender of Others' Freedom.
+
+ If conditions are imposed on you (whether by court order, agreement or
+otherwise) that contradict the conditions of this License, they do not
+excuse you from the conditions of this License. If you cannot convey a
+covered work so as to satisfy simultaneously your obligations under this
+License and any other pertinent obligations, then as a consequence you may
+not convey it at all. For example, if you agree to terms that obligate you
+to collect a royalty for further conveying from those to whom you convey
+the Program, the only way you could satisfy both those terms and this
+License would be to refrain entirely from conveying the Program.
+
+ 13. Use with the GNU Affero General Public License.
+
+ Notwithstanding any other provision of this License, you have
+permission to link or combine any covered work with a work licensed
+under version 3 of the GNU Affero General Public License into a single
+combined work, and to convey the resulting work. The terms of this
+License will continue to apply to the part which is the covered work,
+but the special requirements of the GNU Affero General Public License,
+section 13, concerning interaction through a network will apply to the
+combination as such.
+
+ 14. Revised Versions of this License.
+
+ The Free Software Foundation may publish revised and/or new versions of
+the GNU General Public License from time to time. Such new versions will
+be similar in spirit to the present version, but may differ in detail to
+address new problems or concerns.
+
+ Each version is given a distinguishing version number. If the
+Program specifies that a certain numbered version of the GNU General
+Public License "or any later version" applies to it, you have the
+option of following the terms and conditions either of that numbered
+version or of any later version published by the Free Software
+Foundation. If the Program does not specify a version number of the
+GNU General Public License, you may choose any version ever published
+by the Free Software Foundation.
+
+ If the Program specifies that a proxy can decide which future
+versions of the GNU General Public License can be used, that proxy's
+public statement of acceptance of a version permanently authorizes you
+to choose that version for the Program.
+
+ Later license versions may give you additional or different
+permissions. However, no additional obligations are imposed on any
+author or copyright holder as a result of your choosing to follow a
+later version.
+
+ 15. Disclaimer of Warranty.
+
+ THERE IS NO WARRANTY FOR THE PROGRAM, TO THE EXTENT PERMITTED BY
+APPLICABLE LAW. EXCEPT WHEN OTHERWISE STATED IN WRITING THE COPYRIGHT
+HOLDERS AND/OR OTHER PARTIES PROVIDE THE PROGRAM "AS IS" WITHOUT WARRANTY
+OF ANY KIND, EITHER EXPRESSED OR IMPLIED, INCLUDING, BUT NOT LIMITED TO,
+THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR
+PURPOSE. THE ENTIRE RISK AS TO THE QUALITY AND PERFORMANCE OF THE PROGRAM
+IS WITH YOU. SHOULD THE PROGRAM PROVE DEFECTIVE, YOU ASSUME THE COST OF
+ALL NECESSARY SERVICING, REPAIR OR CORRECTION.
+
+ 16. Limitation of Liability.
+
+ IN NO EVENT UNLESS REQUIRED BY APPLICABLE LAW OR AGREED TO IN WRITING
+WILL ANY COPYRIGHT HOLDER, OR ANY OTHER PARTY WHO MODIFIES AND/OR CONVEYS
+THE PROGRAM AS PERMITTED ABOVE, BE LIABLE TO YOU FOR DAMAGES, INCLUDING ANY
+GENERAL, SPECIAL, INCIDENTAL OR CONSEQUENTIAL DAMAGES ARISING OUT OF THE
+USE OR INABILITY TO USE THE PROGRAM (INCLUDING BUT NOT LIMITED TO LOSS OF
+DATA OR DATA BEING RENDERED INACCURATE OR LOSSES SUSTAINED BY YOU OR THIRD
+PARTIES OR A FAILURE OF THE PROGRAM TO OPERATE WITH ANY OTHER PROGRAMS),
+EVEN IF SUCH HOLDER OR OTHER PARTY HAS BEEN ADVISED OF THE POSSIBILITY OF
+SUCH DAMAGES.
+
+ 17. Interpretation of Sections 15 and 16.
+
+ If the disclaimer of warranty and limitation of liability provided
+above cannot be given local legal effect according to their terms,
+reviewing courts shall apply local law that most closely approximates
+an absolute waiver of all civil liability in connection with the
+Program, unless a warranty or assumption of liability accompanies a
+copy of the Program in return for a fee.
+
+ END OF TERMS AND CONDITIONS
+
+ How to Apply These Terms to Your New Programs
+
+ If you develop a new program, and you want it to be of the greatest
+possible use to the public, the best way to achieve this is to make it
+free software which everyone can redistribute and change under these terms.
+
+ To do so, attach the following notices to the program. It is safest
+to attach them to the start of each source file to most effectively
+state the exclusion of warranty; and each file should have at least
+the "copyright" line and a pointer to where the full notice is found.
+
+ <one line to give the program's name and a brief idea of what it does.>
+ Copyright (C) <year> <name of author>
+
+ This program is free software: you can redistribute it and/or modify
+ it under the terms of the GNU General Public License as published by
+ the Free Software Foundation, either version 3 of the License, or
+ (at your option) any later version.
+
+ This program is distributed in the hope that it will be useful,
+ but WITHOUT ANY WARRANTY; without even the implied warranty of
+ MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
+ GNU General Public License for more details.
+
+ You should have received a copy of the GNU General Public License
+ along with this program. If not, see <http://www.gnu.org/licenses/>.
+
+Also add information on how to contact you by electronic and paper mail.
+
+ If the program does terminal interaction, make it output a short
+notice like this when it starts in an interactive mode:
+
+ <program> Copyright (C) <year> <name of author>
+ This program comes with ABSOLUTELY NO WARRANTY; for details type `show w'.
+ This is free software, and you are welcome to redistribute it
+ under certain conditions; type `show c' for details.
+
+The hypothetical commands `show w' and `show c' should show the appropriate
+parts of the General Public License. Of course, your program's commands
+might be different; for a GUI interface, you would use an "about box".
+
+ You should also get your employer (if you work as a programmer) or school,
+if any, to sign a "copyright disclaimer" for the program, if necessary.
+For more information on this, and how to apply and follow the GNU GPL, see
+<http://www.gnu.org/licenses/>.
+
+ The GNU General Public License does not permit incorporating your program
+into proprietary programs. If your program is a subroutine library, you
+may consider it more useful to permit linking proprietary applications with
+the library. If this is what you want to do, use the GNU Lesser General
+Public License instead of this License. But first, please read
+<http://www.gnu.org/philosophy/why-not-lgpl.html>.
diff --git a/puppet/modules/apt/README b/puppet/modules/apt/README
new file mode 100644
index 00000000..00db7d8e
--- /dev/null
+++ b/puppet/modules/apt/README
@@ -0,0 +1,602 @@
+
+Overview
+========
+
+This module manages apt on Debian.
+
+It keeps dpkg's and apt's databases as well as the keyrings for securing
+package download current.
+
+backports.debian.org is added.
+
+/etc/apt/sources.list and /etc/apt/preferences are managed. More
+recent Debian releases are pinned to very low values by default to
+prevent accidental upgrades.
+
+Ubuntu support is lagging behind but not absent either.
+
+! Upgrade Notice !
+
+ * The `disable_update` parameter has been removed. The main apt class
+ defaults to *not* run an `apt-get update` on every run anyway so this
+ parameter seems useless.
+ You can include the `apt::update` class if you want it to be run every time.
+
+ * The `apt::upgrade_package` now doesn't automatically call an Exec['apt_updated']
+ anymore, so you would need to include `apt::update` now by hand.
+
+ * The apt::codename parameter has been removed. In its place, the
+ debian_codename fact may be overridden via an environment variable. This
+ will affect all other debian_* facts, and achieve the same result.
+
+ FACTER_debian_codename=jessie puppet agent -t
+
+ * If you were using custom 50unattended-upgrades.${::lsbdistcodename} in your
+ site_apt, these are no longer supported. You should migrate to passing
+ $blacklisted_packages to the apt::unattended_upgrades class.
+
+ * the apt class has been moved to a paramterized class. if you were including
+ this class before, after passing some variables, you will need to move to
+ instantiating the class with those variables instead. For example, if you
+ had the following in your manifests:
+
+ $apt_debian_url = 'http://localhost:9999/debian/'
+ $apt_use_next_release = true
+ include apt
+
+ you will need to remove the variables, and the include and instead do
+ the following:
+
+ class { 'apt': debian_url => 'http://localhost:9999/debian/', use_next_release => true }
+
+ previously, you could manually set $lsbdistcodename which would enable forced
+ upgrades, but because this is a top-level facter variable, and newer puppet
+ versions do not let you assign variables to other namespaces, this is no
+ longer possible. However, there is a way to obtain this functionality, and
+ that is to pass the 'codename' parameter to the apt class, which will change
+ the sources.list and preferences files to be the codename you set, allowing
+ you to trigger upgrades:
+
+ include apt::dist_upgrade
+ class { 'apt': codename => 'wheezy', notify => Exec['apt_dist-upgrade'] }
+
+ * the apticron class has been moved to a parameterized class. if you were
+ including this class before, you will need to move to instantiating the
+ class instead. For example, if you had the following in your manifests:
+
+ $apticron_email = 'foo@example.com'
+ $apticron_notifynew = '1'
+ ... any $apticron_* variables
+ include apticron
+
+ you will need to remove the variables, and the include and instead do the
+ following:
+
+ class { 'apt::apticron': email => 'foo@example.com', notifynew => '1' }
+
+ * the apt::listchanges class has been moved to a paramterized class. if you
+ were including this class before, after passing some variables, you will need
+ to move to instantiating the class with those variables instead. For example,
+ if you had the following in your manifests:
+
+ $apt_listchanges_email = 'foo@example.com'
+ ... any $apt_listchanges_* variables
+ include apt::listchanges
+
+ you will need to remove the variables, and the include and instead do the
+ following:
+
+ class { 'apt::listchanges': email => 'foo@example.com' }
+
+ * the apt::proxy_client class has been moved to a paramterized class. if you
+ were including this class before, after passing some variables, you will need
+ to move to instantiating the class with those variables instead. For example,
+ if you had the following in your manifests:
+
+ $apt_proxy = 'http://proxy.domain'
+ $apt_proxy_port = 666
+ include apt::proxy_client
+
+ you will need to remove the variables, and the include and instead do the
+ following:
+
+ class { 'apt::proxy_client': proxy => 'http://proxy.domain', port => '666' }
+
+Requirements
+============
+
+This module needs:
+
+- the lsb-release package should be installed on the server prior to running
+ puppet. otherwise, all of the $::lsb* facts will be empty during runs.
+- the common module: https://gitlab.com/shared-puppet-modules-group/common
+
+By default, on normal hosts, this module sets the configuration option
+DSelect::Clean to 'auto'. On virtual servers, the value is set by default to
+'pre-auto', because virtual servers are usually more space-bound and have better
+recovery mechanisms via the host:
+
+From apt.conf(5), 0.7.2:
+ "Cache Clean mode; this value may be one of always, prompt, auto,
+ pre-auto and never. always and prompt will remove all packages
+ from the cache after upgrading, prompt (the default) does so
+ conditionally. auto removes only those packages which are no
+ longer downloadable (replaced with a new version for
+ instance). pre-auto performs this action before downloading new
+ packages."
+
+To change the default setting for DSelect::Clean, you can create a file named
+"03clean" or "03clean_vserver" in your site_apt module's files directory. You
+can also define this for a specific host by creating a file in a subdirectory of
+the site_apt modules' files directory that is named the same as the
+host. (example: site_apt/files/some.host.com/03clean, or
+site_apt/files/some.host.com/03clean_vserver)
+
+Classes
+=======
+
+apt
+---
+
+The apt class sets up most of the documented functionality. To use functionality
+that is not enabled by default, you must set one of the following parameters.
+
+Example usage:
+
+ class { 'apt': use_next_release => true, debian_url => 'http://localhost:9999/debian/' }
+
+Class parameters:
+
+* use_lts
+
+ If this variable is set to true the CODENAME-lts sources (such as
+ squeeze-lts) are added.
+
+ By default this is false for backward compatibility with older
+ versions of this module.
+
+* use_volatile
+
+ If this variable is set to true the CODENAME-updates sources (such as
+ squeeze-updates) are added.
+
+ By default this is false for backward compatibility with older
+ versions of this module.
+
+* include_src
+
+ If this variable is set to true a deb-src source is added for every
+ added binary archive source.
+
+ By default this is false for backward compatibility with older
+ versions of this module.
+
+* use_next_release
+
+ If this variable is set to true the sources for the next Debian
+ release are added. The default pinning configuration pins it to very
+ low values.
+
+ By default this is false for backward compatibility with older
+ versions of this module.
+
+* debian_url, security_url, backports_url, volatile_url
+
+ These variables allow to override the default APT mirrors respectively
+ used for the standard Debian archives, the Debian security archive,
+ the Debian official backports and the Debian Volatile archive.
+
+* ubuntu_url
+
+ These variables allows to override the default APT mirror used for all
+ standard Ubuntu archives (including updates, security, backports).
+
+* repos
+
+ If this variable is set the default repositories list ("main contrib non-free")
+ is overriden.
+
+* disable_update
+
+ Disable "apt-get update" which is normally triggered by apt::upgrade_package
+ and apt::dist_upgrade.
+
+ Note that nodes can be updated once a day by using
+ APT::Periodic::Update-Package-Lists "1";
+ in i.e. /etc/apt/apt.conf.d/80_apt_update_daily.
+
+* custom_preferences
+
+ For historical reasons (Debian Lenny's version of APT did not support the use
+ of the preferences.d directory for putting fragments of 'preferences'), this
+ module will manage a default generic apt/preferences file with more
+ recent releases pinned to very low values so that any package
+ installation will not accidentally pull in packages from those suites
+ unless you explicitly specify the version number. This file will be
+ complemented with all of the preferences_snippet calls (see below).
+
+ If the default preferences template doesn't suit your needs, you can create a
+ template located in your site_apt module, and set custom_preferences with the
+ content (eg. custom_preferences => template('site_apt/preferences') )
+
+ Setting this variable to false before including this class will force the
+ apt/preferences file to be absent:
+
+ class { 'apt': custom_preferences => false }
+
+* custom_sources_list
+
+ By default this module will use a basic apt/sources.list template with
+ a generic Debian mirror. If you need to set more specific sources,
+ e.g. changing the sections included in the source, etc. you can set
+ this variable to the content that you desire to use instead.
+
+ For example, setting this variable will pull in the
+ templates/site_apt/sources.list file:
+
+ class { 'apt': custom_sources_list => template('site_apt/sources.list') }
+
+* custom_key_dir
+
+ If you have different apt-key files that you want to get added to your
+ apt keyring, you can set this variable to a path in your fileserver
+ where individual key files can be placed. If this is set and keys
+ exist there, this module will 'apt-key add' each key.
+
+ The debian-archive-keyring package is installed and kept current up to the
+ latest revision (this includes the backports archive keyring).
+
+apt::apticron
+-------------
+
+When you instantiate this class, apticron will be installed, with the following
+defaults, which you are free to change:
+
+ $ensure_version = 'installed',
+ $config = "apt/${::operatingsystem}/apticron_${::lsbdistcodename}.erb",
+ $email = 'root',
+ $diff_only = '1',
+ $listchanges_profile = 'apticron',
+ $system = false,
+ $ipaddressnum = false,
+ $ipaddresses = false,
+ $notifyholds = '0',
+ $notifynew = '0',
+ $customsubject = ''
+
+Example usage:
+
+ class { 'apt::apticron': email => 'foo@example.com', notifynew => '1' }
+
+apt::cron::download
+-------------------
+
+This class sets up cron-apt so that it downloads upgradable packages, does not
+actually do any upgrade and emails when the output changes.
+
+cron-apt defaults to run at 4 AM. You may want to set the
+$apt_cron_hours variable before you include the class: its value will
+be passed as the "hours" parameter of a cronjob. Example:
+
+ # Run cron-apt every three hours
+ $apt_cron_hours = '*/3'
+
+Note that the default 4 AM cronjob won't be disabled.
+
+apt::cron::dist_upgrade
+-----------------------
+
+This class sets up cron-apt so that it dist-upgrades the system and
+emails when upgrades are performed.
+
+See apt::cron::download above if you need to run cron-apt more often
+than once a day.
+
+apt::dist_upgrade
+-----------------
+
+This class provides the Exec['apt_dist-upgrade'] resource that
+dist-upgrade's the system.
+
+This exec is set as refreshonly so including this class does not
+trigger any action per-se: other resources may notify it, other
+classes may inherit from this one and add to its subscription list
+using the plusignment ('+>') operator. A real-world example can be
+seen in the apt::dist_upgrade::initiator source.
+
+apt::dist_upgrade::initiator
+----------------------------
+
+This class automatically dist-upgrade's the system when an initiator
+file's content changes. The initiator file is copied from the first
+available source amongst the following ones, in decreasing priority
+order:
+
+- puppet:///modules/site_apt/${::fqdn}/upgrade_initiator
+- puppet:///modules/site_apt/upgrade_initiator
+- puppet:///modules/apt/upgrade_initiator
+
+This is useful when one does not want to setup a fully automated
+upgrade process but still needs a way to manually trigger full
+upgrades of any number of systems at scheduled times.
+
+Beware: a dist-upgrade is triggered the first time Puppet runs after
+this class has been included. This is actually the single reason why
+this class is not enabled by default.
+
+When this class is included the APT indexes are updated on every
+Puppet run due to the author's lack of Puppet wizardry.
+
+apt::dselect
+------------
+
+This class, when included, installs dselect and switches it to expert mode to
+suppress superfluous help screens.
+
+apt::listchanges
+----------------
+
+This class, when instantiated, installs apt-listchanges and configures it using
+the following parameterized variables, which can be changed:
+
+ version = 'present'
+ config = "apt/${::operatingsystem}/listchanges_${::lsbrelease}.erb"
+ frontend = 'pager'
+ email = 'root'
+ confirm = 0
+ saveseen = '/var/lib/apt/listchanges.db'
+ which = 'both'
+
+ Example usage:
+ class { 'apt::listchanges': email => 'foo@example.com' }
+
+apt::proxy_client
+-----------------
+
+This class adds the right configuration to apt to make it fetch packages via a
+proxy. The class parameters apt_proxy and apt_proxy_port need to be set:
+
+You can set the 'proxy' class parameter variable to the URL of the proxy that
+will be used. By default, the proxy will be queried on port 3142, but you can
+change the port number by setting the 'port' class parameter.
+
+Example:
+
+ class { 'apt::proxy_client': proxy => 'http://proxy.domain', port => '666' }
+
+apt::reboot_required_notify
+---------------------------
+
+This class installs a daily cronjob that checks if a package upgrade
+requires the system to be rebooted; if so, cron sends a notification
+email to root.
+
+apt::unattended_upgrades
+------------------------
+
+If this class is included, it will install the package 'unattended-upgrades'
+and configure it to daily upgrade the system.
+
+The class has the following parameters that you can use to change the contents
+of the configuration file. The values shown here are the default values:
+
+ * $config_content = undef
+ * $config_template = 'apt/50unattended-upgrades.erb'
+ * $mailonlyonerror = true
+ * $mail_recipient = 'root'
+ * $blacklisted_packages = []
+
+Note that using $config_content actually specifies all of the configuration
+contents and thus makes the other parameters useless.
+
+example:
+
+ class { 'apt::unattended_upgrades':
+ config_template => 'site_apt/50unattended-upgrades.jessie',
+ blacklisted_packages => [
+ 'libc6', 'libc6-dev', 'libc6-i686', 'mysql-server', 'redmine', 'nodejs',
+ 'bird'
+ ],
+ }
+
+Defines
+=======
+
+apt::apt_conf
+-------------
+
+Creates a file in the apt/apt.conf.d directory to easily add configuration
+components. One can use either the 'source' meta-parameter to specify a list of
+static files to include from the puppet fileserver or the 'content'
+meta-parameter to define content inline or with the help of a template.
+
+Example:
+
+ apt::apt_conf { '80download-only':
+ source => 'puppet:///modules/site_apt/80download-only',
+ }
+
+apt::preferences_snippet
+------------------------
+
+A way to add pinning information to files in /etc/apt/preferences.d/
+
+Example:
+
+ apt::preferences_snippet {
+ 'irssi-plugin-otr':
+ release => 'squeeze-backports',
+ priority => 999;
+ }
+
+ apt::preferences_snippet {
+ 'unstable_fallback':
+ package => '*',
+ release => 'unstable',
+ priority => 1;
+ }
+
+ apt::preferences_snippet {
+ 'ttdnsd':
+ pin => 'origin deb.torproject.org',
+ priority => 999;
+ }
+
+The names of the resources will be used as the names of the files in the
+preferences.d directory, so you should ensure that resource names follow the
+prescribed naming scheme.
+
+From apt_preferences(5):
+ Note that the files in the /etc/apt/preferences.d directory are parsed in
+ alphanumeric ascending order and need to obey the following naming
+ convention: The files have no or "pref" as filename extension and which
+ only contain alphanumeric, hyphen (-), underscore (_) and period (.)
+ characters - otherwise they will be silently ignored.
+
+apt::preseeded_package
+----------------------
+
+This simplifies installation of packages for which you wish to preseed the
+answers to debconf. For example, if you wish to provide a preseed file for the
+locales package, you would place the locales.seed file in
+'site_apt/templates/${::lsbdistcodename}/locales.seeds' and then include the
+following in your manifest:
+
+ apt::preseeded_package { locales: }
+
+You can also specify the content of the seed via the content parameter,
+for example:
+
+ apt::preseeded_package { 'apticron':
+ content => 'apticron apticron/notification string root@example.com',
+ }
+
+apt::sources_list
+-----------------
+
+Creates a file in the apt/sources.list.d directory to easily add additional apt
+sources. One can use either the 'source' meta-parameter to specify a list of
+static files to include from the puppet fileserver or the 'content'
+meta-parameter to define content inline or with the help of a template. Ending
+the resource name in '.list' is optional: it will be automatically added to the
+file name if not present in the resource name.
+
+Example:
+
+ apt::sources_list { 'company_internals':
+ source => [ "puppet:///modules/site_apt/${::fqdn}/company_internals.list",
+ 'puppet:///modules/site_apt/company_internals.list' ],
+ }
+
+apt::key
+--------
+
+Deploys a secure apt OpenPGP key. This usually accompanies the
+sources.list snippets above for third party repositories. For example,
+you would do:
+
+ apt::key { 'neurodebian.gpg':
+ ensure => present,
+ source => 'puppet:///modules/site_apt/neurodebian.gpg',
+ }
+
+This deploys the key in the `/etc/apt/trusted.gpg.d` directory, which
+is assumed by secure apt to be binary OpenPGP keys and *not*
+"ascii-armored" or "plain text" OpenPGP key material. For the latter,
+use `apt::key::plain`.
+
+The `.gpg` extension is compulsory for `apt` to pickup the key properly.
+
+apt::key::plain
+---------------
+
+Deploys a secure apt OpenPGP key. This usually accompanies the
+sources.list snippets above for third party repositories. For example,
+you would do:
+
+ apt::key::plain { 'neurodebian.asc':
+ source => 'puppet:///modules/site_apt/neurodebian.asc',
+ }
+
+This deploys the key in the `${apt_base_dir}/keys` directory (as
+opposed to `$custom_key_dir` which deploys it in `keys.d`). The reason
+this exists on top of `$custom_key_dir` is to allow a more
+decentralised distribution of those keys, without having all modules
+throw their keys in the same directory in the manifests.
+
+Note that this model does *not* currently allow keys to be removed!
+Use `apt::key` instead for a more practical, revokable approach, but
+that needs binary keys.
+
+apt::upgrade_package
+--------------------
+
+This simplifies upgrades for DSA security announcements or point-releases. This
+will ensure that the named package is upgraded to the version specified, only if
+the package is installed, otherwise nothing happens. If the specified version
+is 'latest' (the default), then the package is ensured to be upgraded to the
+latest package revision when it becomes available.
+
+For example, the following upgrades the perl package to version 5.8.8-7etch1
+(if it is installed), it also upgrades the syslog-ng and perl-modules packages
+to their latest (also, only if they are installed):
+
+upgrade_package { 'perl':
+ version => '5.8.8-7etch1';
+ 'syslog-ng':
+ version => latest;
+ 'perl-modules':
+}
+
+Resources
+=========
+
+File['apt_config']
+------------------
+
+Use this resource to depend on or add to a completed apt configuration
+
+Exec['apt_updated']
+-------------------
+
+After this point the APT indexes are up-to-date.
+This resource is set to `refreshonly => true` so it is not run on
+every puppetrun. To run this every time, you can include the `apt::update`
+class.
+
+This resource is usually used like this to ensure current packages are
+installed by Package resources:
+
+ include apt::update
+ Package { require => Exec['apt_updated'] }
+
+Note that nodes can be updated once a day by using
+
+ APT::Periodic::Update-Package-Lists "1";
+
+in i.e. /etc/apt/apt.conf.d/80_apt_update_daily.
+
+
+Tests
+=====
+
+To run pupept rspec tests:
+
+ bundle install --path vendor/bundle
+ bundle exec rake spec
+
+Using different facter/puppet versions:
+
+ FACTER_GEM_VERSION=1.6.10 PUPPET_GEM_VERSION=2.7.23 bundle install --path vendor/bundle
+ bundle exec rake spec
+
+Licensing
+=========
+
+This puppet module is licensed under the GPL version 3 or later. Redistribution
+and modification is encouraged.
+
+The GPL version 3 license text can be found in the "LICENSE" file accompanying
+this puppet module, or at the following URL:
+
+http://www.gnu.org/licenses/gpl-3.0.html
diff --git a/puppet/modules/apt/Rakefile b/puppet/modules/apt/Rakefile
new file mode 100644
index 00000000..85326bb4
--- /dev/null
+++ b/puppet/modules/apt/Rakefile
@@ -0,0 +1,19 @@
+require 'puppetlabs_spec_helper/rake_tasks'
+require 'puppet-lint/tasks/puppet-lint'
+PuppetLint.configuration.send('disable_80chars')
+PuppetLint.configuration.ignore_paths = ["spec/**/*.pp", "pkg/**/*.pp"]
+
+desc "Validate manifests, templates, and ruby files"
+task :validate do
+ Dir['manifests/**/*.pp'].each do |manifest|
+ sh "puppet parser validate --noop #{manifest}"
+ end
+ Dir['spec/**/*.rb','lib/**/*.rb'].each do |ruby_file|
+ sh "ruby -c #{ruby_file}" unless ruby_file =~ /spec\/fixtures/
+ end
+ Dir['templates/**/*.erb'].each do |template|
+ sh "erb -P -x -T '-' #{template} | ruby -c"
+ end
+end
+
+task :test => [:lint, :syntax , :validate, :spec]
diff --git a/puppet/modules/apt/files/02show_upgraded b/puppet/modules/apt/files/02show_upgraded
new file mode 100644
index 00000000..bb127d41
--- /dev/null
+++ b/puppet/modules/apt/files/02show_upgraded
@@ -0,0 +1,4 @@
+// This file is managed by Puppet
+// all local modifications will be overwritten
+
+APT::Get::Show-Upgraded true;
diff --git a/puppet/modules/apt/files/03clean b/puppet/modules/apt/files/03clean
new file mode 100644
index 00000000..3d20924a
--- /dev/null
+++ b/puppet/modules/apt/files/03clean
@@ -0,0 +1,4 @@
+// This file is managed by Puppet
+// all local modifications will be overwritten
+
+DSelect::Clean auto;
diff --git a/puppet/modules/apt/files/03clean_vserver b/puppet/modules/apt/files/03clean_vserver
new file mode 100644
index 00000000..6bb84e58
--- /dev/null
+++ b/puppet/modules/apt/files/03clean_vserver
@@ -0,0 +1,4 @@
+// This file is managed by Puppet
+// all local modifications will be overwritten
+
+DSelect::Clean pre-auto;
diff --git a/puppet/modules/apt/files/upgrade_initiator b/puppet/modules/apt/files/upgrade_initiator
new file mode 100644
index 00000000..8b137891
--- /dev/null
+++ b/puppet/modules/apt/files/upgrade_initiator
@@ -0,0 +1 @@
+
diff --git a/puppet/modules/apt/lib/facter/apt_running.rb b/puppet/modules/apt/lib/facter/apt_running.rb
new file mode 100644
index 00000000..e8f2156e
--- /dev/null
+++ b/puppet/modules/apt/lib/facter/apt_running.rb
@@ -0,0 +1,7 @@
+Facter.add("apt_running") do
+ setcode do
+ #Facter::Util::Resolution.exec('/usr/bin/dpkg -s mysql-server >/dev/null 2>&1 && echo true || echo false')
+ Facter::Util::Resolution.exec('pgrep apt-get >/dev/null 2>&1 && echo true || echo false')
+ end
+end
+
diff --git a/puppet/modules/apt/lib/facter/debian_codename.rb b/puppet/modules/apt/lib/facter/debian_codename.rb
new file mode 100644
index 00000000..254877aa
--- /dev/null
+++ b/puppet/modules/apt/lib/facter/debian_codename.rb
@@ -0,0 +1,42 @@
+begin
+ require 'facter/util/debian'
+rescue LoadError
+ require "#{File.dirname(__FILE__)}/util/debian"
+end
+
+def version_to_codename(version)
+ if Facter::Util::Debian::CODENAMES.has_key?(version)
+ return Facter::Util::Debian::CODENAMES[version]
+ else
+ Facter.warn("Could not determine codename from version '#{version}'")
+ end
+end
+
+Facter.add(:debian_codename) do
+ has_weight 99
+ confine :operatingsystem => 'Debian'
+ setcode do
+ Facter.value('lsbdistcodename')
+ end
+end
+
+Facter.add(:debian_codename) do
+ has_weight 66
+ confine :operatingsystem => 'Debian'
+ setcode do
+ version_to_codename(Facter.value('operatingsystemmajrelease'))
+ end
+end
+
+Facter.add(:debian_codename) do
+ has_weight 33
+ confine :operatingsystem => 'Debian'
+ setcode do
+ debian_version = File.open('/etc/debian_version', &:readline)
+ if debian_version.match(/^\d+/)
+ version_to_codename(debian_version.scan(/^(\d+)/)[0][0])
+ elsif debian_version.match(/^[a-z]+\/(sid|unstable)/)
+ debian_version.scan(/^([a-z]+)\//)[0][0]
+ end
+ end
+end
diff --git a/puppet/modules/apt/lib/facter/debian_lts.rb b/puppet/modules/apt/lib/facter/debian_lts.rb
new file mode 100644
index 00000000..f53a9eb8
--- /dev/null
+++ b/puppet/modules/apt/lib/facter/debian_lts.rb
@@ -0,0 +1,16 @@
+begin
+ require 'facter/util/debian'
+rescue LoadError
+ require "#{File.dirname(__FILE__)}/util/debian"
+end
+
+Facter.add(:debian_lts) do
+ confine :operatingsystem => 'Debian'
+ setcode do
+ if Facter::Util::Debian::LTS.include? Facter.value('debian_codename')
+ true
+ else
+ false
+ end
+ end
+end
diff --git a/puppet/modules/apt/lib/facter/debian_nextcodename.rb b/puppet/modules/apt/lib/facter/debian_nextcodename.rb
new file mode 100644
index 00000000..c4c569b2
--- /dev/null
+++ b/puppet/modules/apt/lib/facter/debian_nextcodename.rb
@@ -0,0 +1,23 @@
+begin
+ require 'facter/util/debian'
+rescue LoadError
+ require "#{File.dirname(__FILE__)}/util/debian"
+end
+
+def debian_codename_to_next(codename)
+ if codename == "sid"
+ return "experimental"
+ else
+ codenames = Facter::Util::Debian::CODENAMES
+ versions = Facter::Util::Debian::CODENAMES.invert
+ current_version = versions[codename]
+ return codenames[(current_version.to_i + 1).to_s]
+ end
+end
+
+Facter.add(:debian_nextcodename) do
+ confine :operatingsystem => 'Debian'
+ setcode do
+ debian_codename_to_next(Facter.value('debian_codename'))
+ end
+end
diff --git a/puppet/modules/apt/lib/facter/debian_nextrelease.rb b/puppet/modules/apt/lib/facter/debian_nextrelease.rb
new file mode 100644
index 00000000..2a9c4f5f
--- /dev/null
+++ b/puppet/modules/apt/lib/facter/debian_nextrelease.rb
@@ -0,0 +1,23 @@
+def debian_release_to_next(release)
+ releases = [
+ 'oldoldoldstable',
+ 'oldoldstable',
+ 'oldstable',
+ 'stable',
+ 'testing',
+ 'unstable',
+ 'experimental',
+ ]
+ if releases.include? release
+ if releases.index(release)+1 < releases.count
+ return releases[releases.index(release)+1]
+ end
+ end
+end
+
+Facter.add(:debian_nextrelease) do
+ confine :operatingsystem => 'Debian'
+ setcode do
+ debian_release_to_next(Facter.value('debian_release'))
+ end
+end
diff --git a/puppet/modules/apt/lib/facter/debian_release.rb b/puppet/modules/apt/lib/facter/debian_release.rb
new file mode 100644
index 00000000..2c334ccd
--- /dev/null
+++ b/puppet/modules/apt/lib/facter/debian_release.rb
@@ -0,0 +1,38 @@
+begin
+ require 'facter/util/debian'
+rescue LoadError
+ require "#{File.dirname(__FILE__)}/util/debian"
+end
+
+def debian_codename_to_release(codename)
+ stable = Facter::Util::Debian::STABLE
+ versions = Facter::Util::Debian::CODENAMES.invert
+ release = nil
+ if codename == "sid"
+ release = "unstable"
+ elsif versions.has_key? codename
+ version = versions[codename].to_i
+ if version == stable
+ release = "stable"
+ elsif version < stable
+ release = "stable"
+ for i in version..stable - 1
+ release = "old" + release
+ end
+ elsif version == stable + 1
+ release = "testing"
+ end
+ end
+ if release.nil?
+ Facter.warn("Could not determine release from codename #{codename}!")
+ end
+ return release
+end
+
+Facter.add(:debian_release) do
+ has_weight 99
+ confine :operatingsystem => 'Debian'
+ setcode do
+ debian_codename_to_release(Facter.value('debian_codename'))
+ end
+end
diff --git a/puppet/modules/apt/lib/facter/ubuntu_codename.rb b/puppet/modules/apt/lib/facter/ubuntu_codename.rb
new file mode 100644
index 00000000..814fd942
--- /dev/null
+++ b/puppet/modules/apt/lib/facter/ubuntu_codename.rb
@@ -0,0 +1,8 @@
+Facter.add(:ubuntu_codename) do
+ confine :operatingsystem => 'Ubuntu'
+ setcode do
+ Facter.value('lsbdistcodename')
+ end
+end
+
+
diff --git a/puppet/modules/apt/lib/facter/ubuntu_nextcodename.rb b/puppet/modules/apt/lib/facter/ubuntu_nextcodename.rb
new file mode 100644
index 00000000..dcd1d426
--- /dev/null
+++ b/puppet/modules/apt/lib/facter/ubuntu_nextcodename.rb
@@ -0,0 +1,20 @@
+begin
+ require 'facter/util/ubuntu'
+rescue LoadError
+ require "#{File.dirname(__FILE__)}/util/ubuntu"
+end
+
+def ubuntu_codename_to_next(codename)
+ codenames = Facter::Util::Ubuntu::CODENAMES
+ i = codenames.index(codename)
+ if i and i+1 < codenames.count
+ return codenames[i+1]
+ end
+end
+
+Facter.add(:ubuntu_nextcodename) do
+ confine :operatingsystem => 'Ubuntu'
+ setcode do
+ ubuntu_codename_to_next(Facter.value('ubuntu_codename'))
+ end
+end
diff --git a/puppet/modules/apt/lib/facter/util/debian.rb b/puppet/modules/apt/lib/facter/util/debian.rb
new file mode 100644
index 00000000..290c17b5
--- /dev/null
+++ b/puppet/modules/apt/lib/facter/util/debian.rb
@@ -0,0 +1,18 @@
+module Facter
+ module Util
+ module Debian
+ STABLE = 8
+ CODENAMES = {
+ "5" => "lenny",
+ "6" => "squeeze",
+ "7" => "wheezy",
+ "8" => "jessie",
+ "9" => "stretch",
+ "10" => "buster",
+ }
+ LTS = [
+ "squeeze",
+ ]
+ end
+ end
+end
diff --git a/puppet/modules/apt/lib/facter/util/ubuntu.rb b/puppet/modules/apt/lib/facter/util/ubuntu.rb
new file mode 100644
index 00000000..52c15e80
--- /dev/null
+++ b/puppet/modules/apt/lib/facter/util/ubuntu.rb
@@ -0,0 +1,21 @@
+module Facter
+ module Util
+ module Ubuntu
+ CODENAMES = [
+ "lucid",
+ "maverick",
+ "natty",
+ "oneiric",
+ "precise",
+ "quantal",
+ "raring",
+ "saucy",
+ "trusty",
+ "utopic",
+ "vivid",
+ "wily",
+ "xenial"
+ ]
+ end
+ end
+end
diff --git a/puppet/modules/apt/manifests/apt_conf.pp b/puppet/modules/apt/manifests/apt_conf.pp
new file mode 100644
index 00000000..949f6157
--- /dev/null
+++ b/puppet/modules/apt/manifests/apt_conf.pp
@@ -0,0 +1,45 @@
+define apt::apt_conf(
+ $ensure = 'present',
+ $source = '',
+ $content = undef,
+ $refresh_apt = true )
+{
+
+ if $source == '' and $content == undef {
+ fail("One of \$source or \$content must be specified for apt_conf ${name}")
+ }
+
+ if $source != '' and $content != undef {
+ fail("Only one of \$source or \$content must specified for apt_conf ${name}")
+ }
+
+ include apt::dot_d_directories
+
+ # One would expect the 'file' resource on sources.list.d to trigger an
+ # apt-get update when files are added or modified in the directory, but it
+ # apparently doesn't.
+ file { "/etc/apt/apt.conf.d/${name}":
+ ensure => $ensure,
+ owner => root,
+ group => 0,
+ mode => '0644',
+ }
+
+ if $source {
+ File["/etc/apt/apt.conf.d/${name}"] {
+ source => $source,
+ }
+ }
+ else {
+ File["/etc/apt/apt.conf.d/${name}"] {
+ content => $content,
+ }
+ }
+
+ if $refresh_apt {
+ File["/etc/apt/apt.conf.d/${name}"] {
+ notify => Exec['apt_updated'],
+ }
+ }
+
+}
diff --git a/puppet/modules/apt/manifests/apticron.pp b/puppet/modules/apt/manifests/apticron.pp
new file mode 100644
index 00000000..9c94f9c9
--- /dev/null
+++ b/puppet/modules/apt/manifests/apticron.pp
@@ -0,0 +1,24 @@
+class apt::apticron(
+ $ensure_version = 'installed',
+ $config = "apt/${::operatingsystem}/apticron_${::debian_codename}.erb",
+ $email = 'root',
+ $diff_only = '1',
+ $listchanges_profile = 'apticron',
+ $system = false,
+ $ipaddressnum = false,
+ $ipaddresses = false,
+ $notifyholds = '0',
+ $notifynew = '0',
+ $customsubject = ''
+) {
+
+ package { 'apticron': ensure => $ensure_version }
+
+ file { '/etc/apticron/apticron.conf':
+ content => template($apt::apticron::config),
+ owner => root,
+ group => root,
+ mode => '0644',
+ require => Package['apticron'];
+ }
+}
diff --git a/puppet/modules/apt/manifests/cron/base.pp b/puppet/modules/apt/manifests/cron/base.pp
new file mode 100644
index 00000000..39fc3061
--- /dev/null
+++ b/puppet/modules/apt/manifests/cron/base.pp
@@ -0,0 +1,20 @@
+class apt::cron::base {
+
+ package { 'cron-apt': ensure => installed }
+
+ case $apt_cron_hours {
+ '': {}
+ default: {
+ # cron-apt defaults to run every night at 4 o'clock
+ # so we try not to run at the same time.
+ cron { 'apt_cron_every_N_hours':
+ command => 'test -x /usr/sbin/cron-apt && /usr/sbin/cron-apt',
+ user => root,
+ hour => "${apt_cron_hours}",
+ minute => 10,
+ require => Package['cron-apt'],
+ }
+ }
+ }
+
+}
diff --git a/puppet/modules/apt/manifests/cron/dist_upgrade.pp b/puppet/modules/apt/manifests/cron/dist_upgrade.pp
new file mode 100644
index 00000000..74403bb7
--- /dev/null
+++ b/puppet/modules/apt/manifests/cron/dist_upgrade.pp
@@ -0,0 +1,29 @@
+class apt::cron::dist_upgrade inherits apt::cron::base {
+
+ $action = "autoclean -y
+dist-upgrade -y -o APT::Get::Show-Upgraded=true -o 'DPkg::Options::=--force-confold'
+"
+
+ file { '/etc/cron-apt/action.d/3-download':
+ ensure => absent,
+ }
+
+ package { 'apt-listbugs': ensure => absent }
+
+ file { '/etc/cron-apt/action.d/4-dist-upgrade':
+ content => $action,
+ owner => root,
+ group => 0,
+ mode => '0644',
+ require => Package[cron-apt];
+ }
+
+ file { '/etc/cron-apt/config.d/MAILON':
+ content => "MAILON=upgrade\n",
+ owner => root,
+ group => 0,
+ mode => '0644',
+ require => Package[cron-apt];
+ }
+
+}
diff --git a/puppet/modules/apt/manifests/cron/download.pp b/puppet/modules/apt/manifests/cron/download.pp
new file mode 100644
index 00000000..4a19fec1
--- /dev/null
+++ b/puppet/modules/apt/manifests/cron/download.pp
@@ -0,0 +1,27 @@
+class apt::cron::download inherits apt::cron::base {
+
+ $action = "autoclean -y
+dist-upgrade -d -y -o APT::Get::Show-Upgraded=true
+"
+
+ file { '/etc/cron-apt/action.d/4-dist-upgrade':
+ ensure => absent,
+ }
+
+ file { '/etc/cron-apt/action.d/3-download':
+ content => $action,
+ require => Package[cron-apt],
+ owner => root,
+ group => 0,
+ mode => '0644';
+ }
+
+ file { '/etc/cron-apt/config.d/MAILON':
+ content => "MAILON=changes\n",
+ require => Package[cron-apt],
+ owner => root,
+ group => 0,
+ mode => '0644';
+ }
+
+}
diff --git a/puppet/modules/apt/manifests/dist_upgrade.pp b/puppet/modules/apt/manifests/dist_upgrade.pp
new file mode 100644
index 00000000..19c031e0
--- /dev/null
+++ b/puppet/modules/apt/manifests/dist_upgrade.pp
@@ -0,0 +1,9 @@
+class apt::dist_upgrade {
+
+ exec { 'apt_dist-upgrade':
+ command => '/usr/bin/apt-get -q -y -o \'DPkg::Options::=--force-confold\' dist-upgrade',
+ refreshonly => true,
+ before => Exec['apt_updated']
+ }
+
+}
diff --git a/puppet/modules/apt/manifests/dist_upgrade/initiator.pp b/puppet/modules/apt/manifests/dist_upgrade/initiator.pp
new file mode 100644
index 00000000..d2389883
--- /dev/null
+++ b/puppet/modules/apt/manifests/dist_upgrade/initiator.pp
@@ -0,0 +1,23 @@
+class apt::dist_upgrade::initiator inherits apt::dist_upgrade {
+
+ $initiator = 'upgrade_initiator'
+ $initiator_abs = "${apt::apt_base_dir}/${initiator}"
+
+ file { 'apt_upgrade_initiator':
+ mode => '0644',
+ owner => root,
+ group => 0,
+ path => $initiator_abs,
+ checksum => md5,
+ source => [
+ "puppet:///modules/site_apt/${::fqdn}/${initiator}",
+ "puppet:///modules/site_apt/${initiator}",
+ "puppet:///modules/apt/${initiator}",
+ ],
+ }
+
+ Exec['apt_dist-upgrade'] {
+ subscribe +> File['apt_upgrade_initiator'],
+ }
+
+}
diff --git a/puppet/modules/apt/manifests/dot_d_directories.pp b/puppet/modules/apt/manifests/dot_d_directories.pp
new file mode 100644
index 00000000..0ace8630
--- /dev/null
+++ b/puppet/modules/apt/manifests/dot_d_directories.pp
@@ -0,0 +1,15 @@
+class apt::dot_d_directories {
+
+ # watch .d directories and ensure they are present
+ file {
+ '/etc/apt/apt.conf.d':
+ ensure => directory,
+ checksum => mtime,
+ notify => Exec['apt_updated'];
+ '/etc/apt/sources.list.d':
+ ensure => directory,
+ checksum => mtime,
+ notify => Exec['apt_updated'];
+ }
+
+}
diff --git a/puppet/modules/apt/manifests/dselect.pp b/puppet/modules/apt/manifests/dselect.pp
new file mode 100644
index 00000000..2b99a43d
--- /dev/null
+++ b/puppet/modules/apt/manifests/dselect.pp
@@ -0,0 +1,11 @@
+# manage dselect, like
+# suppressing the annoying help texts
+class apt::dselect {
+
+ file_line { 'dselect_expert':
+ path => '/etc/dpkg/dselect.cfg',
+ line => 'expert',
+ }
+
+ package { 'dselect': ensure => installed }
+}
diff --git a/puppet/modules/apt/manifests/init.pp b/puppet/modules/apt/manifests/init.pp
new file mode 100644
index 00000000..4c44af2a
--- /dev/null
+++ b/puppet/modules/apt/manifests/init.pp
@@ -0,0 +1,150 @@
+# apt.pp - common components and defaults for handling apt
+# Copyright (C) 2008 Micah Anerson <micah@riseup.net>
+# Copyright (C) 2007 David Schmitt <david@schmitt.edv-bus.at>
+# See LICENSE for the full license granted to you.
+
+class apt(
+ $use_lts = $apt::params::use_lts,
+ $use_volatile = $apt::params::use_volatile,
+ $use_backports = $apt::params::use_backports,
+ $include_src = $apt::params::include_src,
+ $use_next_release = $apt::params::use_next_release,
+ $debian_url = $apt::params::debian_url,
+ $security_url = $apt::params::security_url,
+ $backports_url = $apt::params::backports_url,
+ $lts_url = $apt::params::lts_url,
+ $volatile_url = $apt::params::volatile_url,
+ $ubuntu_url = $apt::params::ubuntu_url,
+ $repos = $apt::params::repos,
+ $custom_preferences = $apt::params::custom_preferences,
+ $custom_sources_list = '',
+ $custom_key_dir = $apt::params::custom_key_dir
+) inherits apt::params {
+ case $::operatingsystem {
+ 'debian': {
+ $real_repos = $repos ? {
+ 'auto' => 'main contrib non-free',
+ default => $repos,
+ }
+ }
+ 'ubuntu': {
+ $real_repos = $repos ? {
+ 'auto' => 'main restricted universe multiverse',
+ default => $repos,
+ }
+ }
+ }
+
+ package { 'apt':
+ ensure => installed,
+ require => undef,
+ }
+
+ $sources_content = $custom_sources_list ? {
+ '' => template( "apt/${::operatingsystem}/sources.list.erb"),
+ default => $custom_sources_list
+ }
+ file {
+ # include main and security
+ # additional sources should be included via the apt::sources_list define
+ '/etc/apt/sources.list':
+ content => $sources_content,
+ notify => Exec['apt_updated'],
+ owner => root,
+ group => 0,
+ mode => '0644';
+ }
+
+ apt_conf { '02show_upgraded':
+ source => [ "puppet:///modules/site_apt/${::fqdn}/02show_upgraded",
+ 'puppet:///modules/site_apt/02show_upgraded',
+ 'puppet:///modules/apt/02show_upgraded' ]
+ }
+
+ if ( $::virtual == 'vserver' ) {
+ apt_conf { '03clean_vserver':
+ source => [ "puppet:///modules/site_apt/${::fqdn}/03clean_vserver",
+ 'puppet:///modules/site_apt/03clean_vserver',
+ 'puppet:///modules/apt/03clean_vserver' ],
+ alias => '03clean';
+ }
+ }
+ else {
+ apt_conf { '03clean':
+ source => [ "puppet:///modules/site_apt/${::fqdn}/03clean",
+ 'puppet:///modules/site_apt/03clean',
+ 'puppet:///modules/apt/03clean' ]
+ }
+ }
+
+ case $custom_preferences {
+ false: {
+ include apt::preferences::absent
+ }
+ default: {
+ # When squeeze becomes the stable branch, transform this file's header
+ # into a preferences.d file
+ include apt::preferences
+ }
+ }
+
+ include apt::dot_d_directories
+
+ ## This package should really always be current
+ package { 'debian-archive-keyring': ensure => latest }
+
+ # backports uses the normal archive key now
+ package { 'debian-backports-keyring': ensure => absent }
+
+ if ($use_backports and !($::debian_release in ['testing', 'unstable', 'experimental'])) {
+ apt::sources_list {
+ 'backports':
+ content => "deb $backports_url ${::debian_codename}-backports ${apt::real_repos}",
+ }
+ if $include_src {
+ apt::sources_list {
+ 'backports-src':
+ content => "deb-src $backports_url ${::debian_codename}-backports ${apt::real_repos}",
+ }
+ }
+ }
+
+ include common::moduledir
+ common::module_dir { 'apt': }
+ $apt_base_dir = "${common::moduledir::module_dir_path}/apt"
+
+ if $custom_key_dir {
+ file { "${apt_base_dir}/keys.d":
+ source => $custom_key_dir,
+ recurse => true,
+ owner => root,
+ group => root,
+ mode => '0755',
+ }
+ exec { 'custom_keys':
+ command => "find ${apt_base_dir}/keys.d -type f -exec apt-key add '{}' \\;",
+ subscribe => File["${apt_base_dir}/keys.d"],
+ refreshonly => true,
+ notify => Exec[refresh_apt]
+ }
+ if $custom_preferences != false {
+ Exec['custom_keys'] {
+ before => File['apt_config']
+ }
+ }
+ }
+
+ # workaround for preseeded_package component
+ file { [ '/var/cache', '/var/cache/local', '/var/cache/local/preseeding' ]: ensure => directory }
+
+ exec { 'update_apt':
+ command => '/usr/bin/apt-get update',
+ require => [
+ File['/etc/apt/apt.conf.d', '/etc/apt/preferences' ],
+ File['/etc/apt/sources.list'] ],
+ refreshonly => true,
+ # Another Semaphor for all packages to reference
+ alias => [ 'apt_updated', 'refresh_apt']
+ }
+
+}
diff --git a/puppet/modules/apt/manifests/key.pp b/puppet/modules/apt/manifests/key.pp
new file mode 100644
index 00000000..cb70ec6a
--- /dev/null
+++ b/puppet/modules/apt/manifests/key.pp
@@ -0,0 +1,13 @@
+define apt::key ($source, $ensure = 'present') {
+ validate_re(
+ $name, '\.gpg$',
+ 'An apt::key resource name must have the .gpg extension',
+ )
+
+ file {
+ "/etc/apt/trusted.gpg.d/${name}":
+ ensure => $ensure,
+ source => $source,
+ notify => Exec['apt_updated'],
+ }
+}
diff --git a/puppet/modules/apt/manifests/key/plain.pp b/puppet/modules/apt/manifests/key/plain.pp
new file mode 100644
index 00000000..dff8b51b
--- /dev/null
+++ b/puppet/modules/apt/manifests/key/plain.pp
@@ -0,0 +1,13 @@
+define apt::key::plain ($source) {
+ file {
+ "${apt::apt_base_dir}/keys/${name}":
+ source => $source;
+ "${apt::apt_base_dir}/keys":
+ ensure => directory;
+ }
+ exec { "apt-key add '${apt::apt_base_dir}/keys/${name}'":
+ subscribe => File["${apt::apt_base_dir}/keys/${name}"],
+ refreshonly => true,
+ notify => Exec['apt_updated'],
+ }
+}
diff --git a/puppet/modules/apt/manifests/listchanges.pp b/puppet/modules/apt/manifests/listchanges.pp
new file mode 100644
index 00000000..e64bb1b7
--- /dev/null
+++ b/puppet/modules/apt/manifests/listchanges.pp
@@ -0,0 +1,19 @@
+class apt::listchanges(
+ $ensure_version = 'installed',
+ $config = "apt/${::operatingsystem}/listchanges_${::debian_codename}.erb",
+ $frontend = 'mail',
+ $email = 'root',
+ $confirm = '0',
+ $saveseen = '/var/lib/apt/listchanges.db',
+ $which = 'both'
+){
+ package { 'apt-listchanges': ensure => $ensure_version }
+
+ file { '/etc/apt/listchanges.conf':
+ content => template($apt::listchanges::config),
+ owner => root,
+ group => root,
+ mode => '0644',
+ require => Package['apt-listchanges'];
+ }
+}
diff --git a/puppet/modules/apt/manifests/params.pp b/puppet/modules/apt/manifests/params.pp
new file mode 100644
index 00000000..28af06eb
--- /dev/null
+++ b/puppet/modules/apt/manifests/params.pp
@@ -0,0 +1,22 @@
+class apt::params () {
+ $use_lts = false
+ $use_volatile = false
+ $use_backports = true
+ $include_src = false
+ $use_next_release = false
+ $debian_url = 'http://httpredir.debian.org/debian/'
+ $security_url = 'http://security.debian.org/'
+ $ubuntu_url = 'http://archive.ubuntu.com/ubuntu'
+ $backports_url = $::debian_codename ? {
+ 'squeeze' => 'http://backports.debian.org/debian-backports/',
+ default => $::operatingsystem ? {
+ 'Ubuntu' => $ubuntu_url,
+ default => $debian_url,
+ }
+ }
+ $lts_url = $debian_url
+ $volatile_url = 'http://volatile.debian.org/debian-volatile/'
+ $repos = 'auto'
+ $custom_preferences = ''
+ $custom_key_dir = false
+}
diff --git a/puppet/modules/apt/manifests/preferences.pp b/puppet/modules/apt/manifests/preferences.pp
new file mode 100644
index 00000000..6982ca05
--- /dev/null
+++ b/puppet/modules/apt/manifests/preferences.pp
@@ -0,0 +1,20 @@
+class apt::preferences {
+
+ $pref_contents = $apt::custom_preferences ? {
+ '' => $::operatingsystem ? {
+ 'debian' => template("apt/${::operatingsystem}/preferences_${::debian_codename}.erb"),
+ 'ubuntu' => template("apt/${::operatingsystem}/preferences_${::ubuntu_codename}.erb"),
+ },
+ default => $apt::custom_preferences
+ }
+
+ file { '/etc/apt/preferences':
+ ensure => present,
+ alias => 'apt_config',
+ # only update together
+ content => $pref_contents,
+ require => File['/etc/apt/sources.list'],
+ owner => root, group => 0, mode => '0644';
+ }
+
+}
diff --git a/puppet/modules/apt/manifests/preferences/absent.pp b/puppet/modules/apt/manifests/preferences/absent.pp
new file mode 100644
index 00000000..f32e0307
--- /dev/null
+++ b/puppet/modules/apt/manifests/preferences/absent.pp
@@ -0,0 +1,7 @@
+class apt::preferences::absent {
+
+ file { '/etc/apt/preferences':
+ ensure => absent,
+ alias => 'apt_config',
+ }
+}
diff --git a/puppet/modules/apt/manifests/preferences_snippet.pp b/puppet/modules/apt/manifests/preferences_snippet.pp
new file mode 100644
index 00000000..b7dba0d8
--- /dev/null
+++ b/puppet/modules/apt/manifests/preferences_snippet.pp
@@ -0,0 +1,59 @@
+define apt::preferences_snippet (
+ $priority = undef,
+ $package = false,
+ $ensure = 'present',
+ $source = '',
+ $release = '',
+ $pin = ''
+) {
+
+ $real_package = $package ? {
+ false => $name,
+ default => $package,
+ }
+
+ if $ensure == 'present' {
+ if $apt::custom_preferences == false {
+ fail('Trying to define a preferences_snippet with $custom_preferences set to false.')
+ }
+
+ if $priority == undef {
+ fail('apt::preferences_snippet requires the \'priority\' argument to be set')
+ }
+
+ if !$pin and !$release {
+ fail('apt::preferences_snippet requires one of the \'pin\' or \'release\' argument to be set')
+ }
+ if $pin and $release {
+ fail('apt::preferences_snippet requires either a \'pin\' or \'release\' argument, not both')
+ }
+ }
+
+ file { "/etc/apt/preferences.d/${name}":
+ ensure => $ensure,
+ owner => root, group => 0, mode => '0644',
+ before => Exec['apt_updated'];
+ }
+
+ case $source {
+ '': {
+ case $release {
+ '': {
+ File["/etc/apt/preferences.d/${name}"]{
+ content => template('apt/preferences_snippet.erb')
+ }
+ }
+ default: {
+ File["/etc/apt/preferences.d/${name}"]{
+ content => template('apt/preferences_snippet_release.erb')
+ }
+ }
+ }
+ }
+ default: {
+ File["/etc/apt/preferences.d/${name}"]{
+ source => $source
+ }
+ }
+ }
+}
diff --git a/puppet/modules/apt/manifests/preseeded_package.pp b/puppet/modules/apt/manifests/preseeded_package.pp
new file mode 100644
index 00000000..3ef06879
--- /dev/null
+++ b/puppet/modules/apt/manifests/preseeded_package.pp
@@ -0,0 +1,21 @@
+define apt::preseeded_package (
+ $ensure = 'installed',
+ $content = ''
+) {
+ $seedfile = "/var/cache/local/preseeding/${name}.seeds"
+ $real_content = $content ? {
+ '' => template ( "site_apt/${::debian_codename}/${name}.seeds" ),
+ default => $content
+ }
+
+ file { $seedfile:
+ content => $real_content,
+ mode => '0600', owner => root, group => root,
+ }
+
+ package { $name:
+ ensure => $ensure,
+ responsefile => $seedfile,
+ require => File[$seedfile],
+ }
+}
diff --git a/puppet/modules/apt/manifests/proxy_client.pp b/puppet/modules/apt/manifests/proxy_client.pp
new file mode 100644
index 00000000..9ba79f23
--- /dev/null
+++ b/puppet/modules/apt/manifests/proxy_client.pp
@@ -0,0 +1,9 @@
+class apt::proxy_client(
+ $proxy = 'http://localhost',
+ $port = '3142',
+){
+
+ apt_conf { '20proxy':
+ content => template('apt/20proxy.erb'),
+ }
+}
diff --git a/puppet/modules/apt/manifests/reboot_required_notify.pp b/puppet/modules/apt/manifests/reboot_required_notify.pp
new file mode 100644
index 00000000..722e8a5e
--- /dev/null
+++ b/puppet/modules/apt/manifests/reboot_required_notify.pp
@@ -0,0 +1,21 @@
+class apt::reboot_required_notify {
+
+ # This package installs the script that created /var/run/reboot-required*.
+ # This script (/usr/share/update-notifier/notify-reboot-required) is
+ # triggered e.g. by kernel packages.
+ package { 'update-notifier-common':
+ ensure => installed,
+ }
+
+ # cron-apt defaults to run every night at 4 o'clock
+ # plus some random time <1h.
+ # so we check if a reboot is required a bit later.
+ cron { 'apt_reboot_required_notify':
+ command => 'if [ -f /var/run/reboot-required ]; then echo "Reboot required\n" ; cat /var/run/reboot-required.pkgs ; fi',
+ user => root,
+ hour => 5,
+ minute => 20,
+ require => Package['update-notifier-common'],
+ }
+
+}
diff --git a/puppet/modules/apt/manifests/sources_list.pp b/puppet/modules/apt/manifests/sources_list.pp
new file mode 100644
index 00000000..0ee068d1
--- /dev/null
+++ b/puppet/modules/apt/manifests/sources_list.pp
@@ -0,0 +1,40 @@
+define apt::sources_list (
+ $ensure = 'present',
+ $source = '',
+ $content = undef
+) {
+
+ if $ensure == 'present' {
+ if $source == '' and $content == undef {
+ fail("One of \$source or \$content must be specified for apt_sources_snippet ${name}")
+ }
+ if $source != '' and $content != undef {
+ fail("Only one of \$source or \$content must specified for apt_sources_snippet ${name}")
+ }
+ }
+
+ include apt::dot_d_directories
+
+ $realname = regsubst($name, '\.list$', '')
+
+ # One would expect the 'file' resource on sources.list.d to trigger an
+ # apt-get update when files are added or modified in the directory, but it
+ # apparently doesn't.
+ file { "/etc/apt/sources.list.d/${realname}.list":
+ ensure => $ensure,
+ owner => root, group => 0, mode => '0644',
+ notify => Exec['apt_updated'],
+ }
+
+ if $source {
+ File["/etc/apt/sources.list.d/${realname}.list"] {
+ source => $source,
+ }
+ }
+ else {
+ File["/etc/apt/sources.list.d/${realname}.list"] {
+ content => $content,
+ }
+ }
+}
+
diff --git a/puppet/modules/apt/manifests/unattended_upgrades.pp b/puppet/modules/apt/manifests/unattended_upgrades.pp
new file mode 100644
index 00000000..52d75425
--- /dev/null
+++ b/puppet/modules/apt/manifests/unattended_upgrades.pp
@@ -0,0 +1,34 @@
+class apt::unattended_upgrades (
+ $config_content = undef,
+ $config_template = 'apt/50unattended-upgrades.erb',
+ $mailonlyonerror = true,
+ $mail_recipient = 'root',
+ $blacklisted_packages = [],
+ $ensure_version = present
+) {
+
+ package { 'unattended-upgrades':
+ ensure => $ensure_version
+ }
+
+ # For some reason, this directory is sometimes absent, which causes
+ # unattended-upgrades to crash.
+ file { '/var/log/unattended-upgrades':
+ ensure => directory,
+ owner => 'root',
+ group => 0,
+ mode => '0755',
+ require => Package['unattended-upgrades'],
+ }
+
+ $file_content = $config_content ? {
+ undef => template($config_template),
+ default => $config_content
+ }
+
+ apt_conf { '50unattended-upgrades':
+ content => $file_content,
+ require => Package['unattended-upgrades'],
+ refresh_apt => false
+ }
+}
diff --git a/puppet/modules/apt/manifests/update.pp b/puppet/modules/apt/manifests/update.pp
new file mode 100644
index 00000000..dde83200
--- /dev/null
+++ b/puppet/modules/apt/manifests/update.pp
@@ -0,0 +1,7 @@
+class apt::update inherits ::apt {
+
+ Exec['update_apt'] {
+ refreshonly => false
+ }
+
+}
diff --git a/puppet/modules/apt/manifests/upgrade_package.pp b/puppet/modules/apt/manifests/upgrade_package.pp
new file mode 100644
index 00000000..30572c96
--- /dev/null
+++ b/puppet/modules/apt/manifests/upgrade_package.pp
@@ -0,0 +1,31 @@
+define apt::upgrade_package (
+ $version = ''
+) {
+
+ $version_suffix = $version ? {
+ '' => '',
+ 'latest' => '',
+ default => "=${version}",
+ }
+
+ if !defined(Package['apt-show-versions']) {
+ package { 'apt-show-versions':
+ ensure => installed,
+ require => undef,
+ }
+ }
+
+ if !defined(Package['dctrl-tools']) {
+ package { 'dctrl-tools':
+ ensure => installed,
+ require => undef,
+ }
+ }
+
+ exec { "apt-get -q -y -o 'DPkg::Options::=--force-confold' install ${name}${version_suffix}":
+ onlyif => [ "grep-status -F Status installed -a -P $name -q", "apt-show-versions -u $name | grep -q upgradeable" ],
+ require => Package['apt-show-versions', 'dctrl-tools'],
+ before => Exec['apt_updated']
+ }
+
+}
diff --git a/puppet/modules/apt/spec/spec_helper.rb b/puppet/modules/apt/spec/spec_helper.rb
new file mode 100644
index 00000000..21d1a988
--- /dev/null
+++ b/puppet/modules/apt/spec/spec_helper.rb
@@ -0,0 +1,12 @@
+# https://puppetlabs.com/blog/testing-modules-in-the-puppet-forge
+require 'rspec-puppet'
+require 'mocha/api'
+
+RSpec.configure do |c|
+
+ c.module_path = File.expand_path(File.join(File.dirname(__FILE__), '..', '..'))
+ c.color = true
+
+ #Puppet.features.stubs(:root? => true)
+
+end
diff --git a/puppet/modules/apt/spec/unit/custom_facts_spec.rb b/puppet/modules/apt/spec/unit/custom_facts_spec.rb
new file mode 100644
index 00000000..9a28d92e
--- /dev/null
+++ b/puppet/modules/apt/spec/unit/custom_facts_spec.rb
@@ -0,0 +1,86 @@
+require "spec_helper"
+
+describe "Facter::Util::Fact" do
+ before {
+ Facter.clear
+ }
+
+ describe 'custom facts' do
+
+ context 'Debian 7' do
+ before do
+ Facter.fact(:operatingsystem).stubs(:value).returns("Debian")
+ Facter.fact(:operatingsystemrelease).stubs(:value).returns("7.8")
+ Facter.fact(:lsbdistcodename).stubs(:value).returns("wheezy")
+ end
+
+ it "debian_release = oldstable" do
+ expect(Facter.fact(:debian_release).value).to eq('oldstable')
+ end
+
+ it "debian_codename = wheezy" do
+ expect(Facter.fact(:debian_codename).value).to eq('wheezy')
+ end
+
+ it "debian_nextcodename = jessie" do
+ expect(Facter.fact(:debian_nextcodename).value).to eq('jessie')
+ end
+
+ it "debian_nextrelease = stable" do
+ expect(Facter.fact(:debian_nextrelease).value).to eq('stable')
+ end
+ end
+
+ context 'Debian 8' do
+ before do
+ Facter.fact(:operatingsystem).stubs(:value).returns("Debian")
+ Facter.fact(:operatingsystemrelease).stubs(:value).returns("8.0")
+ Facter.fact(:lsbdistcodename).stubs(:value).returns("jessie")
+ end
+
+ it "debian_release = stable" do
+ expect(Facter.fact(:debian_release).value).to eq('stable')
+ end
+
+ it "debian_codename = jessie" do
+ expect(Facter.fact(:debian_codename).value).to eq('jessie')
+ end
+
+ it "debian_nextcodename = stretch" do
+ expect(Facter.fact(:debian_nextcodename).value).to eq('stretch')
+ end
+
+ it "debian_nextrelease = testing" do
+ expect(Facter.fact(:debian_nextrelease).value).to eq('testing')
+ end
+ end
+
+ context 'Ubuntu 15.10' do
+ before do
+ Facter.fact(:operatingsystem).stubs(:value).returns("Ubuntu")
+ Facter.fact(:operatingsystemrelease).stubs(:value).returns("15.10")
+ Facter.fact(:lsbdistcodename).stubs(:value).returns("wily")
+ end
+
+ it "ubuntu_codename = wily" do
+ expect(Facter.fact(:ubuntu_codename).value).to eq('wily')
+ end
+
+ it "ubuntu_nextcodename = xenial" do
+ expect(Facter.fact(:ubuntu_nextcodename).value).to eq('xenial')
+ end
+ end
+ end
+
+ describe "Test 'apt_running' fact" do
+ it "should return true when apt-get is running" do
+ Facter::Util::Resolution.stubs(:exec).with("pgrep apt-get >/dev/null 2>&1 && echo true || echo false").returns("true")
+ expect(Facter.fact(:apt_running).value).to eq('true')
+ end
+ it "should return false when apt-get is not running" do
+ Facter::Util::Resolution.stubs(:exec).with("pgrep apt-get >/dev/null 2>&1 && echo true || echo false").returns("false")
+ expect(Facter.fact(:apt_running).value).to eq('false')
+ end
+ end
+
+end
diff --git a/puppet/modules/apt/templates/20proxy.erb b/puppet/modules/apt/templates/20proxy.erb
new file mode 100644
index 00000000..520e7b1b
--- /dev/null
+++ b/puppet/modules/apt/templates/20proxy.erb
@@ -0,0 +1,5 @@
+// This file is managed by Puppet
+// all local modifications will be overwritten
+
+Acquire::http { Proxy "<%= @proxy %>:<%= @port %>"; };
+Acquire::HTTP::Proxy::bugs.debian.org "DIRECT";
diff --git a/puppet/modules/apt/templates/50unattended-upgrades.erb b/puppet/modules/apt/templates/50unattended-upgrades.erb
new file mode 100644
index 00000000..7c65d102
--- /dev/null
+++ b/puppet/modules/apt/templates/50unattended-upgrades.erb
@@ -0,0 +1,38 @@
+// this file is managed by puppet !
+
+<% if scope.lookupvar('::operatingsystem') == 'Ubuntu' -%>
+Unattended-Upgrade::Allowed-Origins {
+ "${distro_id}:${distro_codename}-security";
+ "${distro_id}:${distro_codename}-updates";
+ "${distro_id}:${distro_codename}-backports";
+<% elsif scope.lookupvar('::operatingsystem') == 'Debian' and scope.lookupvar('::debian_codename') == 'squeeze' -%>
+Unattended-Upgrade::Allowed-Origins {
+ "${distro_id}:<%= scope.lookupvar('::debian_release') %>";
+ "${distro_id}:squeeze-lts";
+<% elsif scope.lookupvar('::operatingsystem') == 'Debian' and scope.lookupvar('::debian_codename') == 'wheezy' -%>
+Unattended-Upgrade::Origins-Pattern {
+ "origin=Debian,archive=<%= scope.lookupvar('::debian_release') %>,label=Debian-Security";
+ "origin=Debian,archive=${distro_codename}-lts";
+<% else -%>
+Unattended-Upgrade::Origins-Pattern {
+ "origin=Debian,codename=${distro_codename},label=Debian";
+ "origin=Debian,codename=${distro_codename},label=Debian-Security";
+<% end -%>
+};
+
+<% if not @blacklisted_packages.empty? -%>
+Unattended-Upgrade::Package-Blacklist {
+<% @blacklisted_packages.each do |pkg| -%>
+ "<%= pkg %>";
+<% end -%>
+};
+<% end -%>
+
+APT::Periodic::Update-Package-Lists "1";
+APT::Periodic::Download-Upgradeable-Packages "1";
+APT::Periodic::Unattended-Upgrade "1";
+
+Unattended-Upgrade::Mail "<%= @mail_recipient -%>";
+<% if @mailonlyonerror -%>
+Unattended-Upgrade::MailOnlyOnError "true";
+<% end -%>
diff --git a/puppet/modules/apt/templates/Debian/apticron_jessie.erb b/puppet/modules/apt/templates/Debian/apticron_jessie.erb
new file mode 120000
index 00000000..a9a3a6fd
--- /dev/null
+++ b/puppet/modules/apt/templates/Debian/apticron_jessie.erb
@@ -0,0 +1 @@
+apticron_wheezy.erb \ No newline at end of file
diff --git a/puppet/modules/apt/templates/Debian/apticron_lenny.erb b/puppet/modules/apt/templates/Debian/apticron_lenny.erb
new file mode 100644
index 00000000..86b09977
--- /dev/null
+++ b/puppet/modules/apt/templates/Debian/apticron_lenny.erb
@@ -0,0 +1,50 @@
+# apticron.conf
+#
+# set EMAIL to a list of addresses which will be notified of impending updates
+#
+EMAIL="<%= scope.lookupvar('apt::apticron::email') %>"
+
+#
+# Set DIFF_ONLY to "1" to only output the difference of the current run
+# compared to the last run (ie. only new upgrades since the last run). If there
+# are no differences, no output/email will be generated. By default, apticron
+# will output everything that needs to be upgraded.
+#
+DIFF_ONLY="<%= scope.lookupvar('apt::apticron::diff_only') %>"
+
+#
+# Set LISTCHANGES_PROFILE if you would like apticron to invoke apt-listchanges
+# with the --profile option. You should add a corresponding profile to
+# /etc/apt/listchanges.conf
+#
+LISTCHANGES_PROFILE="<%= scope.lookupvar('apt::apticron::listchanges_profile') %>"
+
+#
+# Set SYSTEM if you would like apticron to use something other than the output
+# of "hostname -f" for the system name in the mails it generates
+#
+# SYSTEM="foobar.example.com"
+<% unless (v=scope.lookupvar('apt::apticron::system')).to_s == "false" -%>
+SYSTEM="<%= v %>"
+<% end -%>
+
+#
+# Set IPADDRESSNUM if you would like to configure the maximal number of IP
+# addresses apticron displays. The default is to display 1 address of each
+# family type (inet, inet6), if available.
+#
+# IPADDRESSNUM="1"
+<% unless (v=scope.lookupvar('apt::apticron::ipaddressnum')).to_s == "false" -%>
+IPADDRESSNUM="<%= v %>"
+<% end -%>
+
+#
+# Set IPADDRESSES to a whitespace seperated list of reachable addresses for
+# this system. By default, apticron will try to work these out using the
+# "ip" command
+#
+# IPADDRESSES="192.0.2.1 2001:db8:1:2:3::1"
+<% unless (v=scope.lookupvar('apt::apticron::ipaddresses')).to_s == "false" -%>
+IPADDRESSES="<%= v %>"
+<% end -%>
+
diff --git a/puppet/modules/apt/templates/Debian/apticron_sid.erb b/puppet/modules/apt/templates/Debian/apticron_sid.erb
new file mode 120000
index 00000000..a9a3a6fd
--- /dev/null
+++ b/puppet/modules/apt/templates/Debian/apticron_sid.erb
@@ -0,0 +1 @@
+apticron_wheezy.erb \ No newline at end of file
diff --git a/puppet/modules/apt/templates/Debian/apticron_squeeze.erb b/puppet/modules/apt/templates/Debian/apticron_squeeze.erb
new file mode 100644
index 00000000..05b7c9b8
--- /dev/null
+++ b/puppet/modules/apt/templates/Debian/apticron_squeeze.erb
@@ -0,0 +1,82 @@
+# apticron.conf
+#
+# set EMAIL to a space separated list of addresses which will be notified of
+# impending updates
+#
+EMAIL="<%= scope.lookupvar('apt::apticron::email') %>"
+
+
+#
+# Set DIFF_ONLY to "1" to only output the difference of the current run
+# compared to the last run (ie. only new upgrades since the last run). If there
+# are no differences, no output/email will be generated. By default, apticron
+# will output everything that needs to be upgraded.
+#
+DIFF_ONLY="<%= scope.lookupvar('apt::apticron::diff_only') %>"
+
+#
+# Set LISTCHANGES_PROFILE if you would like apticron to invoke apt-listchanges
+# with the --profile option. You should add a corresponding profile to
+# /etc/apt/listchanges.conf
+#
+LISTCHANGES_PROFILE="<%= scope.lookupvar('apt::apticron::listchanges_profile') %>"
+
+#
+# Set SYSTEM if you would like apticron to use something other than the output
+# of "hostname -f" for the system name in the mails it generates
+#
+# SYSTEM="foobar.example.com"
+<% unless (v=scope.lookupvar('apt::apticron::system')).to_s == "false" -%>
+SYSTEM="<%= v %>"
+<% end -%>
+
+
+#
+# Set IPADDRESSNUM if you would like to configure the maximal number of IP
+# addresses apticron displays. The default is to display 1 address of each
+# family type (inet, inet6), if available.
+#
+# IPADDRESSNUM="1"
+<% unless (v=scope.lookupvar('apt::apticron::ipaddressnum')).to_s == "false" -%>
+IPADDRESSNUM="<%= v %>"
+<% end -%>
+
+
+#
+# Set IPADDRESSES to a whitespace separated list of reachable addresses for
+# this system. By default, apticron will try to work these out using the
+# "ip" command
+#
+# IPADDRESSES="192.0.2.1 2001:db8:1:2:3::1"
+<% unless (v=scope.lookupvar('apt::apticron::ipaddresses')).to_s == "false" -%>
+IPADDRESSES="<%= v %>"
+<% end -%>
+
+
+#
+# Set NOTIFY_HOLDS="0" if you don't want to be notified about new versions of
+# packages on hold in your system. The default behavior is downloading and
+# listing them as any other package.
+#
+# NOTIFY_HOLDS="0"
+NOTIFY_HOLDS="<%= scope.lookupvar('apt::apticron::notifyholds') %>"
+
+#
+# Set NOTIFY_NEW="0" if you don't want to be notified about packages which
+# are not installed in your system. Yes, it's possible! There are some issues
+# related to systems which have mixed stable/unstable sources. In these cases
+# apt-get will consider for example that packages with "Priority:
+# required"/"Essential: yes" in unstable but not in stable should be installed,
+# so they will be listed in dist-upgrade output. Please take a look at
+# http://bugs.debian.org/cgi-bin/bugreport.cgi?bug=531002#44
+#
+# NOTIFY_NEW="0"
+NOTIFY_NEW="<%= scope.lookupvar('apt::apticron::notifynew') %>"
+
+#
+# Set CUSTOM_SUBJECT if you want to replace the default subject used in
+# the notification e-mails. This may help filtering/sorting client-side e-mail.
+#
+# CUSTOM_SUBJECT=""
+CUSTOM_SUBJECT="<%= scope.lookupvar('apt::apticron::customsubject') %>"
+
diff --git a/puppet/modules/apt/templates/Debian/apticron_wheezy.erb b/puppet/modules/apt/templates/Debian/apticron_wheezy.erb
new file mode 100644
index 00000000..655854e6
--- /dev/null
+++ b/puppet/modules/apt/templates/Debian/apticron_wheezy.erb
@@ -0,0 +1,80 @@
+# apticron.conf
+#
+# set EMAIL to a space separated list of addresses which will be notified of
+# impending updates
+#
+EMAIL="<%= scope.lookupvar('apt::apticron::email') %>"
+
+#
+# Set DIFF_ONLY to "1" to only output the difference of the current run
+# compared to the last run (ie. only new upgrades since the last run). If there
+# are no differences, no output/email will be generated. By default, apticron
+# will output everything that needs to be upgraded.
+#
+DIFF_ONLY="<%= scope.lookupvar('apt::apticron::diff_only') %>"
+
+#
+# Set LISTCHANGES_PROFILE if you would like apticron to invoke apt-listchanges
+# with the --profile option. You should add a corresponding profile to
+# /etc/apt/listchanges.conf
+#
+LISTCHANGES_PROFILE="<%= scope.lookupvar('apt::apticron::listchanges_profile') %>"
+
+#
+# Set SYSTEM if you would like apticron to use something other than the output
+# of "hostname -f" for the system name in the mails it generates
+#
+# SYSTEM="foobar.example.com"
+<% unless (v=scope.lookupvar('apt::apticron::system')).to_s == "false" -%>
+SYSTEM="<%= v %>"
+<% end -%>
+
+#
+# Set IPADDRESSNUM if you would like to configure the maximal number of IP
+# addresses apticron displays. The default is to display 1 address of each
+# family type (inet, inet6), if available.
+#
+# IPADDRESSNUM="1"
+<% unless (v=scope.lookupvar('apt::apticron::ipaddressnum')).to_s == "false" -%>
+IPADDRESSNUM="<%= v %>"
+<% end -%>
+
+#
+# Set IPADDRESSES to a whitespace separated list of reachable addresses for
+# this system. By default, apticron will try to work these out using the
+# "ip" command
+#
+# IPADDRESSES="192.0.2.1 2001:db8:1:2:3::1"
+<% unless (v=scope.lookupvar('apt::apticron::ipaddresses')).to_s == "false" -%>
+IPADDRESSES=<%= v %>"
+<% end -%>
+
+#
+# Set NOTIFY_HOLDS="0" if you don't want to be notified about new versions of
+# packages on hold in your system. The default behavior is downloading and
+# listing them as any other package.
+#
+# NOTIFY_HOLDS="0"
+NOTIFY_HOLDS="<%= scope.lookupvar('apt::apticron::notifyholds') %>"
+
+#
+# Set NOTIFY_NEW="0" if you don't want to be notified about packages which
+# are not installed in your system. Yes, it's possible! There are some issues
+# related to systems which have mixed stable/unstable sources. In these cases
+# apt-get will consider for example that packages with "Priority:
+# required"/"Essential: yes" in unstable but not in stable should be installed,
+# so they will be listed in dist-upgrade output. Please take a look at
+# http://bugs.debian.org/cgi-bin/bugreport.cgi?bug=531002#44
+#
+# NOTIFY_NEW="0"
+NOTIFY_NEW="<%= scope.lookupvar('apt::apticron::notifynew') %>"
+
+
+#
+# Set CUSTOM_SUBJECT if you want to replace the default subject used in
+# the notification e-mails. This may help filtering/sorting client-side e-mail.
+# If you want to use internal vars please use single quotes here. Ex:
+# ='[apticron] : package update(s)'
+#
+# CUSTOM_SUBJECT=""
+CUSTOM_SUBJECT="<%= scope.lookupvar('apt::apticron::customsubject') %>"
diff --git a/puppet/modules/apt/templates/Debian/listchanges_jessie.erb b/puppet/modules/apt/templates/Debian/listchanges_jessie.erb
new file mode 120000
index 00000000..74ab496d
--- /dev/null
+++ b/puppet/modules/apt/templates/Debian/listchanges_jessie.erb
@@ -0,0 +1 @@
+listchanges_lenny.erb \ No newline at end of file
diff --git a/puppet/modules/apt/templates/Debian/listchanges_lenny.erb b/puppet/modules/apt/templates/Debian/listchanges_lenny.erb
new file mode 100644
index 00000000..1025dd0e
--- /dev/null
+++ b/puppet/modules/apt/templates/Debian/listchanges_lenny.erb
@@ -0,0 +1,7 @@
+[apt]
+frontend=<%= scope.lookupvar('apt::listchanges::frontend') %>
+email_address=<%= scope.lookupvar('apt::listchanges::email') %>
+confirm=<%= scope.lookupvar('apt::listchanges::confirm') %>
+save_seen=<%= scope.lookupvar('apt::listchanges::saveseen') %>
+which=<%= scope.lookupvar('apt::listchanges::which') %>
+
diff --git a/puppet/modules/apt/templates/Debian/listchanges_sid.erb b/puppet/modules/apt/templates/Debian/listchanges_sid.erb
new file mode 120000
index 00000000..74ab496d
--- /dev/null
+++ b/puppet/modules/apt/templates/Debian/listchanges_sid.erb
@@ -0,0 +1 @@
+listchanges_lenny.erb \ No newline at end of file
diff --git a/puppet/modules/apt/templates/Debian/listchanges_squeeze.erb b/puppet/modules/apt/templates/Debian/listchanges_squeeze.erb
new file mode 120000
index 00000000..74ab496d
--- /dev/null
+++ b/puppet/modules/apt/templates/Debian/listchanges_squeeze.erb
@@ -0,0 +1 @@
+listchanges_lenny.erb \ No newline at end of file
diff --git a/puppet/modules/apt/templates/Debian/listchanges_wheezy.erb b/puppet/modules/apt/templates/Debian/listchanges_wheezy.erb
new file mode 120000
index 00000000..74ab496d
--- /dev/null
+++ b/puppet/modules/apt/templates/Debian/listchanges_wheezy.erb
@@ -0,0 +1 @@
+listchanges_lenny.erb \ No newline at end of file
diff --git a/puppet/modules/apt/templates/Debian/preferences_jessie.erb b/puppet/modules/apt/templates/Debian/preferences_jessie.erb
new file mode 100644
index 00000000..0888abe5
--- /dev/null
+++ b/puppet/modules/apt/templates/Debian/preferences_jessie.erb
@@ -0,0 +1,14 @@
+Explanation: Debian <%= codename=scope.lookupvar('::debian_codename') %>
+Package: *
+Pin: release o=Debian,n=<%= codename %>
+Pin-Priority: 990
+
+Explanation: Debian sid
+Package: *
+Pin: release o=Debian,n=sid
+Pin-Priority: 1
+
+Explanation: Debian fallback
+Package: *
+Pin: release o=Debian
+Pin-Priority: -10
diff --git a/puppet/modules/apt/templates/Debian/preferences_lenny.erb b/puppet/modules/apt/templates/Debian/preferences_lenny.erb
new file mode 100644
index 00000000..65001687
--- /dev/null
+++ b/puppet/modules/apt/templates/Debian/preferences_lenny.erb
@@ -0,0 +1,25 @@
+Explanation: Debian <%= codename=scope.lookupvar('::debian_codename') %>
+Package: *
+Pin: release o=Debian,a=<%= scope.lookupvar('::debian_release') %>,v=5*
+Pin-Priority: 990
+
+Explanation: Debian backports
+Package: *
+Pin: origin backports.debian.org
+Pin-Priority: 200
+
+Explanation: Debian <%= next_release=scope.lookupvar('::debian_nextrelease') %>
+Package: *
+Pin: release o=Debian,a=<%= next_release %>
+Pin-Priority: 2
+
+Explanation: Debian sid
+Package: *
+Pin: release o=Debian,a=unstable
+Pin-Priority: 1
+
+Explanation: Debian fallback
+Package: *
+Pin: release o=Debian
+Pin-Priority: -10
+
diff --git a/puppet/modules/apt/templates/Debian/preferences_sid.erb b/puppet/modules/apt/templates/Debian/preferences_sid.erb
new file mode 100644
index 00000000..eb185543
--- /dev/null
+++ b/puppet/modules/apt/templates/Debian/preferences_sid.erb
@@ -0,0 +1,10 @@
+Explanation: Debian sid
+Package: *
+Pin: release o=Debian,n=sid
+Pin-Priority: 990
+
+Explanation: Debian fallback
+Package: *
+Pin: release o=Debian
+Pin-Priority: -10
+
diff --git a/puppet/modules/apt/templates/Debian/preferences_squeeze.erb b/puppet/modules/apt/templates/Debian/preferences_squeeze.erb
new file mode 100644
index 00000000..885edc73
--- /dev/null
+++ b/puppet/modules/apt/templates/Debian/preferences_squeeze.erb
@@ -0,0 +1,30 @@
+Explanation: Debian <%= codename=scope.lookupvar('::debian_codename') %>
+Package: *
+Pin: release o=Debian,n=<%= codename %>
+Pin-Priority: 990
+
+Explanation: Debian <%= codename %>-updates
+Package: *
+Pin: release o=Debian,n=<%= codename %>-updates
+Pin-Priority: 990
+
+Explanation: Debian <%= codename %>-lts
+Package: *
+Pin: release o=Debian,n=<%= codename %>-lts
+Pin-Priority: 990
+
+Explanation: Debian <%= next_codename=scope.lookupvar('::debian_nextcodename') %>
+Package: *
+Pin: release o=Debian,n=<%= next_codename %>
+Pin-Priority: 2
+
+Explanation: Debian sid
+Package: *
+Pin: release o=Debian,n=sid
+Pin-Priority: 1
+
+Explanation: Debian fallback
+Package: *
+Pin: release o=Debian
+Pin-Priority: -10
+
diff --git a/puppet/modules/apt/templates/Debian/preferences_wheezy.erb b/puppet/modules/apt/templates/Debian/preferences_wheezy.erb
new file mode 100644
index 00000000..106108d5
--- /dev/null
+++ b/puppet/modules/apt/templates/Debian/preferences_wheezy.erb
@@ -0,0 +1,20 @@
+Explanation: Debian <%= codename=scope.lookupvar('::debian_codename') %>
+Package: *
+Pin: release o=Debian,n=<%= codename %>
+Pin-Priority: 990
+
+Explanation: Debian <%= codename %>-updates
+Package: *
+Pin: release o=Debian,n=<%= codename %>-updates
+Pin-Priority: 990
+
+Explanation: Debian sid
+Package: *
+Pin: release o=Debian,n=sid
+Pin-Priority: 1
+
+Explanation: Debian fallback
+Package: *
+Pin: release o=Debian
+Pin-Priority: -10
+
diff --git a/puppet/modules/apt/templates/Debian/sources.list.erb b/puppet/modules/apt/templates/Debian/sources.list.erb
new file mode 100644
index 00000000..44eea538
--- /dev/null
+++ b/puppet/modules/apt/templates/Debian/sources.list.erb
@@ -0,0 +1,76 @@
+# This file is managed by puppet
+# all local modifications will be overwritten
+
+### Debian current: <%= codename=scope.lookupvar('::debian_codename') %>
+
+# basic
+deb <%= debian_url=scope.lookupvar('apt::debian_url') %> <%= codename %> <%= lrepos=scope.lookupvar('apt::real_repos') %>
+<% if include_src=scope.lookupvar('apt::include_src') -%>
+deb-src <%= debian_url %> <%= codename %> <%= lrepos %>
+<% end -%>
+
+# security
+<% if ((release=scope.lookupvar('::debian_release')) == "stable" || release == "oldstable") -%>
+deb <%= security_url=scope.lookupvar('apt::security_url') %> <%= codename %>/updates <%= lrepos %>
+<% if include_src -%>
+deb-src <%= security_url %> <%= codename %>/updates <%= lrepos %>
+<% end -%>
+<% else -%>
+# There is no security support for <%= release %>
+<% end -%>
+
+<% if use_volatile=scope.lookupvar('apt::use_volatile') -%>
+# volatile
+<% if (release == "testing" || release == "unstable" || release == "experimental") -%>
+# There is no volatile archive for <%= release %>
+<% else -%>
+deb <%= debian_url %> <%= codename %>-updates <%= lrepos %>
+<% if include_src -%>
+deb-src <%= debian_url %> <%= codename %>-updates <%= lrepos %>
+<% end
+ end
+ end -%>
+
+<% if use_lts=scope.lookupvar('apt::use_lts') -%>
+# LTS
+<% if release_lts=scope.lookupvar('::debian_lts') == "false" -%>
+# There is no LTS archive for <%= release %>
+<% else -%>
+deb <%= debian_url %> <%= codename %>-lts <%= lrepos %>
+<% if include_src -%>
+deb-src <%= debian_url %> <%= codename %>-lts <%= lrepos %>
+<% end -%>
+<% end -%>
+<% end -%>
+
+<% if next_release=scope.lookupvar('apt::use_next_release') -%>
+### Debian next: <%= next_release=scope.lookupvar('::debian_nextrelease') ; next_codename=scope.lookupvar('::debian_nextcodename') %>
+
+# basic
+deb <%= debian_url %> <%= next_codename %> <%= lrepos %>
+<% if include_src -%>
+deb-src <%= debian_url %> <%= next_codename %> <%= lrepos %>
+<% end -%>
+
+# security
+<% if (next_release == "unstable" || next_release == "experimental") -%>
+# There is no security support for <%= next_release %>
+<% else -%>
+deb <%= security_url %> <%= next_codename %>/updates <%= lrepos %>
+<% if include_src then -%>
+deb-src <%= security_url %> <%= next_codename %>/updates <%= lrepos %>
+<% end
+ end -%>
+
+<% if use_volatile -%>
+# volatile
+<% if (next_release == "testing" || next_release == "unstable" || next_release == "experimental") -%>
+# There is no volatile archive for <%= next_release %>
+<% else -%>
+deb <%= debian_url %> <%= next_codename %>-updates <%= lrepos %>
+<% if include_src -%>
+deb-src <%= debian_url %> <%= next_codename %>-updates <%= lrepos %>
+<% end
+ end
+ end
+ end -%>
diff --git a/puppet/modules/apt/templates/Ubuntu/preferences_lucid.erb b/puppet/modules/apt/templates/Ubuntu/preferences_lucid.erb
new file mode 120000
index 00000000..3debe4fc
--- /dev/null
+++ b/puppet/modules/apt/templates/Ubuntu/preferences_lucid.erb
@@ -0,0 +1 @@
+preferences_maverick.erb \ No newline at end of file
diff --git a/puppet/modules/apt/templates/Ubuntu/preferences_maverick.erb b/puppet/modules/apt/templates/Ubuntu/preferences_maverick.erb
new file mode 100644
index 00000000..8e5481d3
--- /dev/null
+++ b/puppet/modules/apt/templates/Ubuntu/preferences_maverick.erb
@@ -0,0 +1,30 @@
+Explanation: Ubuntu <%= codename=scope.lookupvar('::ubuntu_codename') %> security
+Package: *
+Pin: release o=Ubuntu,a=<%= codename %>-security
+Pin-Priority: 990
+
+Explanation: Ubuntu <%= codename %> updates
+Package: *
+Pin: release o=Ubuntu,a=<%= codename %>-updates
+Pin-Priority: 980
+
+Explanation: Ubuntu <%= codename %>
+Package: *
+Pin: release o=Ubuntu,a=<%= codename %>
+Pin-Priority: 970
+
+Explanation: Ubuntu backports
+Package: *
+Pin: release a=<%= codename %>-backports
+Pin-Priority: 200
+
+Explanation: Ubuntu <%= next_release=scope.lookupvar('::ubuntu_nextcodename') %>
+Package: *
+Pin: release o=Ubuntu,a=<%= next_release %>
+Pin-Priority: 2
+
+Explanation: Ubuntu fallback
+Package: *
+Pin: release o=Ubuntu
+Pin-Priority: -10
+
diff --git a/puppet/modules/apt/templates/Ubuntu/preferences_oneiric.erb b/puppet/modules/apt/templates/Ubuntu/preferences_oneiric.erb
new file mode 120000
index 00000000..3debe4fc
--- /dev/null
+++ b/puppet/modules/apt/templates/Ubuntu/preferences_oneiric.erb
@@ -0,0 +1 @@
+preferences_maverick.erb \ No newline at end of file
diff --git a/puppet/modules/apt/templates/Ubuntu/preferences_precise.erb b/puppet/modules/apt/templates/Ubuntu/preferences_precise.erb
new file mode 120000
index 00000000..3debe4fc
--- /dev/null
+++ b/puppet/modules/apt/templates/Ubuntu/preferences_precise.erb
@@ -0,0 +1 @@
+preferences_maverick.erb \ No newline at end of file
diff --git a/puppet/modules/apt/templates/Ubuntu/preferences_utopic.erb b/puppet/modules/apt/templates/Ubuntu/preferences_utopic.erb
new file mode 120000
index 00000000..3debe4fc
--- /dev/null
+++ b/puppet/modules/apt/templates/Ubuntu/preferences_utopic.erb
@@ -0,0 +1 @@
+preferences_maverick.erb \ No newline at end of file
diff --git a/puppet/modules/apt/templates/Ubuntu/preferences_vivid.erb b/puppet/modules/apt/templates/Ubuntu/preferences_vivid.erb
new file mode 120000
index 00000000..3debe4fc
--- /dev/null
+++ b/puppet/modules/apt/templates/Ubuntu/preferences_vivid.erb
@@ -0,0 +1 @@
+preferences_maverick.erb \ No newline at end of file
diff --git a/puppet/modules/apt/templates/Ubuntu/preferences_wily.erb b/puppet/modules/apt/templates/Ubuntu/preferences_wily.erb
new file mode 120000
index 00000000..3debe4fc
--- /dev/null
+++ b/puppet/modules/apt/templates/Ubuntu/preferences_wily.erb
@@ -0,0 +1 @@
+preferences_maverick.erb \ No newline at end of file
diff --git a/puppet/modules/apt/templates/Ubuntu/preferences_xenial.erb b/puppet/modules/apt/templates/Ubuntu/preferences_xenial.erb
new file mode 120000
index 00000000..3debe4fc
--- /dev/null
+++ b/puppet/modules/apt/templates/Ubuntu/preferences_xenial.erb
@@ -0,0 +1 @@
+preferences_maverick.erb \ No newline at end of file
diff --git a/puppet/modules/apt/templates/Ubuntu/sources.list.erb b/puppet/modules/apt/templates/Ubuntu/sources.list.erb
new file mode 100644
index 00000000..e6d2f643
--- /dev/null
+++ b/puppet/modules/apt/templates/Ubuntu/sources.list.erb
@@ -0,0 +1,22 @@
+# This file is managed by puppet
+# all local modifications will be overwritten
+
+# basic <%= codename=scope.lookupvar('::ubuntu_codename') %>
+deb <%= ubuntu_url=scope.lookupvar('apt::ubuntu_url') %> <%= codename %> <%= lrepos=scope.lookupvar('apt::real_repos') %>
+<% if include_src=scope.lookupvar('apt::include_src') -%>
+deb-src <%= ubuntu_url %> <%= codename %> <%= lrepos %>
+<% end -%>
+
+<% if use_volatile=scope.lookupvar('apt::use_volatile') -%>
+# updates
+deb <%= ubuntu_url %> <%= codename %>-updates <%= lrepos %>
+<% if include_src -%>
+deb-src <%= ubuntu_url %> <%= codename %>-updates <%= lrepos %>
+<% end
+ end -%>
+
+# security suppport
+deb <%= ubuntu_url %> <%= codename %>-security <%= lrepos %>
+<% if include_src -%>
+deb-src <%= ubuntu_url %> <%= codename %>-security <%= lrepos %>
+<% end -%>
diff --git a/puppet/modules/apt/templates/preferences_snippet.erb b/puppet/modules/apt/templates/preferences_snippet.erb
new file mode 100644
index 00000000..903e73d6
--- /dev/null
+++ b/puppet/modules/apt/templates/preferences_snippet.erb
@@ -0,0 +1,4 @@
+Package: <%= @real_package %>
+Pin: <%= @pin %>
+Pin-Priority: <%= @priority %>
+
diff --git a/puppet/modules/apt/templates/preferences_snippet_release.erb b/puppet/modules/apt/templates/preferences_snippet_release.erb
new file mode 100644
index 00000000..b95d3f81
--- /dev/null
+++ b/puppet/modules/apt/templates/preferences_snippet_release.erb
@@ -0,0 +1,4 @@
+Package: <%= @real_package %>
+Pin: release a=<%= @release %>
+Pin-Priority: <%= @priority %>
+
diff --git a/puppet/modules/bundler/.gitignore b/puppet/modules/bundler/.gitignore
new file mode 100644
index 00000000..1377554e
--- /dev/null
+++ b/puppet/modules/bundler/.gitignore
@@ -0,0 +1 @@
+*.swp
diff --git a/puppet/modules/bundler/LICENSE b/puppet/modules/bundler/LICENSE
new file mode 100644
index 00000000..9cef3784
--- /dev/null
+++ b/puppet/modules/bundler/LICENSE
@@ -0,0 +1,13 @@
+Copyright (c) 2012 Evan Stachowiak
+
+Licensed under the Apache License, Version 2.0 (the "License");
+you may not use this file except in compliance with the License.
+You may obtain a copy of the License at
+
+ http://www.apache.org/licenses/LICENSE-2.0
+
+Unless required by applicable law or agreed to in writing, software
+distributed under the License is distributed on an "AS IS" BASIS,
+WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+See the License for the specific language governing permissions and
+limitations under the License.
diff --git a/puppet/modules/bundler/README.md b/puppet/modules/bundler/README.md
new file mode 100644
index 00000000..2abb1cfc
--- /dev/null
+++ b/puppet/modules/bundler/README.md
@@ -0,0 +1,63 @@
+puppet-bundler - Bundler gem manager for Ruby
+==========================================
+
+This puppet module will install bundler and set config
+variables.
+
+This module supports Ubuntu 10.04 and Debian
+
+Installation
+------------
+
+1. Copy this directory to your puppet master module path $(git clone
+https://github.com/evanstachowiak/puppet-bundler bundler)
+
+2. Apply the `bundler` class to any nodes you want bundler installed on:
+
+ class { 'bundler::install': }
+
+ By default this will install bundler with RVM, if you wish to use another
+ method, you can pass any puppet package provider to the class as
+ 'install_method', or just use 'package' if you wish the puppet parser to
+ automatically chose the best method for your platform.
+
+ Examples: class { 'bundler::install': install_method => 'fink' }
+ class { 'bundler::install': install_method => 'gem' }
+ class { 'bundler::install': install_method => 'package' }
+
+3. Set whatever config variables are necessary:
+ bundler::config { 'linecache19':
+ user => ubuntu,
+ config_flag => "--with-ruby-include=/usr/local/rvm/src/ruby-1.9.2-p290",
+ app_dir => your_app_dir,
+ }
+
+
+Contributing
+------------
+
+- fork on github (https://github.com/evanstachowiak/puppet-bundler)
+- send a pull request
+
+Author
+------
+Evan Stachowiak (https://github.com/evanstachowiak)
+
+LICENSE
+-------
+
+ Author:: Evan Stachowiak
+ Copyright:: Copyright (c) 2012 Evan Stachowiak
+ License:: Apache License, Version 2.0
+
+ Licensed under the Apache License, Version 2.0 (the "License");
+ you may not use this file except in compliance with the License.
+ You may obtain a copy of the License at
+
+ http://www.apache.org/licenses/LICENSE-2.0
+
+ Unless required by applicable law or agreed to in writing, software
+ distributed under the License is distributed on an "AS IS" BASIS,
+ WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+ See the License for the specific language governing permissions and
+ limitations under the License.
diff --git a/puppet/modules/bundler/manifests/config.pp b/puppet/modules/bundler/manifests/config.pp
new file mode 100644
index 00000000..5937a228
--- /dev/null
+++ b/puppet/modules/bundler/manifests/config.pp
@@ -0,0 +1,74 @@
+# Define bundler::config
+#
+# All config settings for candiapp class
+#
+# == Parameters
+#
+# [*user*]
+# App directory owner
+# [*config_flag*]
+# config flag for specific gem compile settings
+# [*app_dir*]
+# App directory where Gemfile is located
+# [*home_dir_base_path*]
+# Home directory of the specified user
+# [*use_rvm*]
+# Sets whether rvm is used. Defaults to true
+# [*rvm_bin*]
+# RVM install location. Defaults to /usr/local/rvm/bin/rvm
+# [*rvm_gem_path*]
+# RVM gem directory. Defaults to /usr/local/rvm/gems
+# [*rvm_gemset*]
+# RVM gemset to use. Defaults to global.
+# [*ruby_version*]
+# Ruby version for RVM purposes.
+# [*bundler_path*]
+# Bundler install directory
+#
+# == Examples
+#
+#
+# == Requires:
+#
+# class { bundler::install: }
+#
+define bundler::config (
+ $user,
+ $config_flag,
+ $app_dir,
+ $home_dir_base_path = $bundler::params::home_dir_base_path,
+ $use_rvm = $bundler::params::use_rvm,
+ $rvm_bin = $bundler::params::rvm_bin,
+ $rvm_gem_path = $bundler::params::rvm_gem_path,
+ $rvm_gemset = $bundler::params::rvm_gemset,
+ $ruby_version = $bundler::ruby_version,
+ $bundler_path = $bundler::params::bundler_path
+) {
+
+ Class['bundler::install'] -> Bundler::Config[$name]
+
+ if $user == 'root' {
+ $home_dir = '/root'
+ }
+ else {
+ $home_dir = "${home_dir_base_path}/${user}"
+ }
+
+ # Must use $bundler_path_real, otherwise cannot reassign variable error is thrown
+ if $use_rvm == 'true' {
+ $bundler_path_rvm = "${rvm_gem_path}/${ruby_version}@${rvm_gemset}/bin"
+ $bundler_bin = "${rvm_bin} ${ruby_version} exec ${bundler_path_rvm}/bundle"
+ }
+ else {
+ $bundler_bin = "${bundler_path}/bundle"
+ }
+
+ # Bundler doesn't respect uid. Use /bin/su to override this behavior for users
+ # other than root.
+ exec { "bundler_config_${name}":
+ cwd => $app_dir,
+ command => "/bin/su -c '${bundler_bin} config build.${name} ${config_flag} --gemfile=${app_dir}/Gemfile' ${user}",
+ unless => "/bin/grep -i \"BUNDLE_BUILD__${name}: ${config_flag}\" ${home_dir}/.bundle/config",
+ }
+
+}
diff --git a/puppet/modules/bundler/manifests/install.pp b/puppet/modules/bundler/manifests/install.pp
new file mode 100644
index 00000000..1524de31
--- /dev/null
+++ b/puppet/modules/bundler/manifests/install.pp
@@ -0,0 +1,64 @@
+# Class bundler::install
+#
+# Installs bundler Ruby gem manager
+#
+# == Parameters
+#
+# [*install_method*]
+# How to install bundler, 'rvm' is the default
+# [*ruby_version*]
+# Ruby version that bundler will use.
+#
+# == Examples
+#
+#
+# == Requires:
+#
+# If use_rvm = 'true':
+# include rvm
+#
+class bundler::install (
+ $ruby_version = undef,
+ $ensure = 'present',
+ $install_method = 'rvm',
+ $use_rvm = '',
+ ) inherits bundler::params {
+
+ # deprecation warning
+ if $use_rvm != '' {
+ warning('$use_rvm is deprecated, please use $install_method instead')
+ }
+
+ if ( $install_method == undef ) or ( $install_method == 'package' ) {
+ $provider_method = undef
+ }
+ else {
+ # backwards compatibility
+ if $use_rvm == false {
+ $provider_method = gem
+ }
+ else {
+ $provider_method = $bundler::params::install_method
+ }
+ }
+
+ if $provider_method == 'rvm' {
+ if $ruby_version == undef {
+ fail('When using rvm, you must pass a ruby_version')
+ }
+ else {
+ #Install bundler with correct RVM
+ rvm_gem { 'bundler':
+ ensure => $ensure,
+ ruby_version => $ruby_version,
+ }
+ }
+ }
+ else {
+ package { 'bundler':
+ ensure => $ensure,
+ provider => $provider_method,
+ }
+ }
+
+}
diff --git a/puppet/modules/bundler/manifests/params.pp b/puppet/modules/bundler/manifests/params.pp
new file mode 100644
index 00000000..53ca86e4
--- /dev/null
+++ b/puppet/modules/bundler/manifests/params.pp
@@ -0,0 +1,31 @@
+# Class bundler::params
+#
+# All config settings for candiapp class
+#
+# == Parameters
+#
+#
+#
+# == Examples
+#
+#
+# == Requires:
+#
+class bundler::params {
+
+ case $::operatingsystem {
+ ubuntu, debian: {
+ $user = 'root'
+ $home_dir_base_path = '/home'
+ $install_method = 'rvm'
+ $rvm_bin = '/usr/local/rvm/bin/rvm'
+ $rvm_gem_path = '/usr/local/rvm/gems'
+ $rvm_gemset = 'global'
+ $bundler_path = '/usr/bin'
+ }
+ default: {
+ fail("Unsupported platform: ${::operatingsystem}")
+ }
+ }
+
+}
diff --git a/puppet/modules/clamav/files/01-leap.conf b/puppet/modules/clamav/files/01-leap.conf
new file mode 100644
index 00000000..a7e49d17
--- /dev/null
+++ b/puppet/modules/clamav/files/01-leap.conf
@@ -0,0 +1,58 @@
+# If running clamd in "LocalSocket" mode (*NOT* in TCP/IP mode), and
+# either "SOcket Cat" (socat) or the "IO::Socket::UNIX" perl module
+# are installed on the system, and you want to report whether clamd
+# is running or not, uncomment the "clamd_socket" variable below (you
+# will be warned if neither socat nor IO::Socket::UNIX are found, but
+# the script will still run). You will also need to set the correct
+# path to your clamd socket file (if unsure of the path, check the
+# "LocalSocket" setting in your clamd.conf file for socket location).
+clamd_socket="/run/clamav/clamd.ctl"
+
+# If you would like to attempt to restart ClamD if detected not running,
+# uncomment the next 2 lines. Confirm the path to the "clamd_lock" file
+# (usually can be found in the clamd init script) and also enter the clamd
+# start command for your particular distro for the "start_clamd" variable
+# (the sample start command shown below should work for most linux distros).
+# NOTE: these 2 variables are dependant on the "clamd_socket" variable
+# shown above - if not enabled, then the following 2 variables will be
+# ignored, whether enabled or not.
+clamd_lock="/run/clamav/clamd.pid"
+start_clamd="clamdscan --reload"
+
+ss_dbs="
+ junk.ndb
+ phish.ndb
+ rogue.hdb
+ sanesecurity.ftm
+ scam.ndb
+ sigwhitelist.ign2
+ spamattach.hdb
+ spamimg.hdb
+ winnow.attachments.hdb
+ winnow_bad_cw.hdb
+ winnow_extended_malware.hdb
+ winnow_malware.hdb
+ winnow_malware_links.ndb
+ malwarehash.hsb
+ doppelstern.hdb
+ bofhland_cracked_URL.ndb
+ bofhland_malware_attach.hdb
+ bofhland_malware_URL.ndb
+ bofhland_phishing_URL.ndb
+ crdfam.clamav.hdb
+ phishtank.ndb
+ porcupine.ndb
+ spear.ndb
+ spearl.ndb
+"
+
+# ========================
+# SecuriteInfo Database(s)
+# ========================
+# Add or remove database file names between quote marks as needed. To
+# disable any SecuriteInfo database downloads, remove the appropriate
+# lines below. To disable all SecuriteInfo database file downloads,
+# comment all of the following lines.
+si_dbs=""
+
+mbl_dbs="" \ No newline at end of file
diff --git a/puppet/modules/clamav/files/clamav-daemon_default b/puppet/modules/clamav/files/clamav-daemon_default
new file mode 100644
index 00000000..b4cd6a4f
--- /dev/null
+++ b/puppet/modules/clamav/files/clamav-daemon_default
@@ -0,0 +1,8 @@
+# This is a file designed only t0 set special environment variables
+# eg TMP or TMPDIR. It is sourced from a shell script, so anything
+# put in here must be in variable=value format, suitable for sourcing
+# from a shell script.
+# Examples:
+# export TMPDIR=/dev/shm
+export TMP=/var/tmp
+export TMPDIR=/var/tmp
diff --git a/puppet/modules/clamav/files/clamav-milter_default b/puppet/modules/clamav/files/clamav-milter_default
new file mode 100644
index 00000000..5e33e822
--- /dev/null
+++ b/puppet/modules/clamav/files/clamav-milter_default
@@ -0,0 +1,14 @@
+#
+# clamav-milter init options
+#
+
+## SOCKET_RWGROUP
+# by default, the socket created by the milter has permissions
+# clamav:clamav:755. SOCKET_RWGROUP changes the group and changes the
+# permissions to 775 to give read-write access to that group.
+#
+# If you are using postfix to speak to the milter, you have to give permission
+# to the postfix group to write
+#
+SOCKET_RWGROUP=postfix
+export TMPDIR=/var/tmp
diff --git a/puppet/modules/clamav/manifests/daemon.pp b/puppet/modules/clamav/manifests/daemon.pp
new file mode 100644
index 00000000..2e13a8fb
--- /dev/null
+++ b/puppet/modules/clamav/manifests/daemon.pp
@@ -0,0 +1,91 @@
+# deploy clamav daemon
+class clamav::daemon {
+
+ $domain_hash = hiera('domain')
+ $domain = $domain_hash['full_suffix']
+
+ package { [ 'clamav-daemon', 'arj' ]:
+ ensure => installed;
+ }
+
+ service {
+ 'clamav-daemon':
+ ensure => running,
+ name => clamav-daemon,
+ pattern => '/usr/sbin/clamd',
+ enable => true,
+ hasrestart => true,
+ subscribe => File['/etc/default/clamav-daemon'],
+ require => Package['clamav-daemon'];
+ }
+
+ file {
+ '/var/run/clamav':
+ ensure => directory,
+ mode => '0750',
+ owner => clamav,
+ group => postfix,
+ require => [Package['postfix'], Package['clamav-daemon']];
+
+ '/var/lib/clamav':
+ mode => '0755',
+ owner => clamav,
+ group => clamav,
+ require => Package['clamav-daemon'];
+
+ '/etc/default/clamav-daemon':
+ source => 'puppet:///modules/clamav/clamav-daemon_default',
+ mode => '0644',
+ owner => root,
+ group => root;
+
+ # this file contains additional domains that we want the clamav
+ # phishing process to look for (our domain)
+ '/var/lib/clamav/local.pdb':
+ content => template('clamav/local.pdb.erb'),
+ mode => '0644',
+ owner => clamav,
+ group => clamav,
+ require => Package['clamav-daemon'];
+ }
+
+ file_line {
+ 'clamav_daemon_tmp':
+ path => '/etc/clamav/clamd.conf',
+ line => 'TemporaryDirectory /var/tmp',
+ require => Package['clamav-daemon'],
+ notify => Service['clamav-daemon'];
+
+ 'enable_phishscanurls':
+ path => '/etc/clamav/clamd.conf',
+ match => 'PhishingScanURLs no',
+ line => 'PhishingScanURLs yes',
+ require => Package['clamav-daemon'],
+ notify => Service['clamav-daemon'];
+
+ 'clamav_LogSyslog_true':
+ path => '/etc/clamav/clamd.conf',
+ match => '^LogSyslog false',
+ line => 'LogSyslog true',
+ require => Package['clamav-daemon'],
+ notify => Service['clamav-daemon'];
+
+ 'clamav_MaxThreads':
+ path => '/etc/clamav/clamd.conf',
+ match => 'MaxThreads 20',
+ line => 'MaxThreads 100',
+ require => Package['clamav-daemon'],
+ notify => Service['clamav-daemon'];
+ }
+
+ # remove LogFile line
+ file_line {
+ 'clamav_LogFile':
+ path => '/etc/clamav/clamd.conf',
+ match => '^LogFile .*',
+ line => '',
+ require => Package['clamav-daemon'],
+ notify => Service['clamav-daemon'];
+ }
+
+}
diff --git a/puppet/modules/clamav/manifests/freshclam.pp b/puppet/modules/clamav/manifests/freshclam.pp
new file mode 100644
index 00000000..80c822a4
--- /dev/null
+++ b/puppet/modules/clamav/manifests/freshclam.pp
@@ -0,0 +1,23 @@
+class clamav::freshclam {
+
+ package { 'clamav-freshclam': ensure => installed }
+
+ service {
+ 'freshclam':
+ ensure => running,
+ enable => true,
+ name => clamav-freshclam,
+ pattern => '/usr/bin/freshclam',
+ hasrestart => true,
+ require => Package['clamav-freshclam'];
+ }
+
+ file_line {
+ 'freshclam_notify':
+ path => '/etc/clamav/freshclam.conf',
+ line => 'NotifyClamd /etc/clamav/clamd.conf',
+ require => Package['clamav-freshclam'],
+ notify => Service['freshclam'];
+ }
+
+}
diff --git a/puppet/modules/clamav/manifests/init.pp b/puppet/modules/clamav/manifests/init.pp
new file mode 100644
index 00000000..de8fb4dc
--- /dev/null
+++ b/puppet/modules/clamav/manifests/init.pp
@@ -0,0 +1,8 @@
+class clamav {
+
+ include clamav::daemon
+ include clamav::milter
+ include clamav::unofficial_sigs
+ include clamav::freshclam
+
+}
diff --git a/puppet/modules/clamav/manifests/milter.pp b/puppet/modules/clamav/manifests/milter.pp
new file mode 100644
index 00000000..e8a85e3f
--- /dev/null
+++ b/puppet/modules/clamav/manifests/milter.pp
@@ -0,0 +1,50 @@
+class clamav::milter {
+
+ $clamav = hiera('clamav')
+ $whitelisted_addresses = $clamav['whitelisted_addresses']
+ $domain_hash = hiera('domain')
+ $domain = $domain_hash['full_suffix']
+
+ package { 'clamav-milter': ensure => installed }
+
+ service {
+ 'clamav-milter':
+ ensure => running,
+ enable => true,
+ name => clamav-milter,
+ pattern => '/usr/sbin/clamav-milter',
+ hasrestart => true,
+ require => Package['clamav-milter'],
+ subscribe => File['/etc/default/clamav-milter'];
+ }
+
+ file {
+ '/run/clamav/milter.ctl':
+ mode => '0666',
+ owner => clamav,
+ group => postfix,
+ require => Class['clamav::daemon'];
+
+ '/etc/clamav/clamav-milter.conf':
+ content => template('clamav/clamav-milter.conf.erb'),
+ mode => '0644',
+ owner => root,
+ group => root,
+ require => Package['clamav-milter'],
+ subscribe => Service['clamav-milter'];
+
+ '/etc/default/clamav-milter':
+ source => 'puppet:///modules/clamav/clamav-milter_default',
+ mode => '0644',
+ owner => root,
+ group => root;
+
+ '/etc/clamav/whitelisted_addresses':
+ content => template('clamav/whitelisted_addresses.erb'),
+ mode => '0644',
+ owner => root,
+ group => root,
+ require => Package['clamav-milter'];
+ }
+
+}
diff --git a/puppet/modules/clamav/manifests/unofficial_sigs.pp b/puppet/modules/clamav/manifests/unofficial_sigs.pp
new file mode 100644
index 00000000..2d849585
--- /dev/null
+++ b/puppet/modules/clamav/manifests/unofficial_sigs.pp
@@ -0,0 +1,23 @@
+class clamav::unofficial_sigs {
+
+ package { 'clamav-unofficial-sigs':
+ ensure => installed
+ }
+
+ ensure_packages(['wget', 'gnupg', 'socat', 'rsync', 'curl'])
+
+ file {
+ '/var/log/clamav-unofficial-sigs.log':
+ ensure => file,
+ owner => clamav,
+ group => clamav,
+ require => Package['clamav-unofficial-sigs'];
+
+ '/etc/clamav-unofficial-sigs.conf.d/01-leap.conf':
+ source => 'puppet:///modules/clamav/01-leap.conf',
+ mode => '0755',
+ owner => root,
+ group => root,
+ require => Package['clamav-unofficial-sigs'];
+ }
+}
diff --git a/puppet/modules/clamav/templates/clamav-milter.conf.erb b/puppet/modules/clamav/templates/clamav-milter.conf.erb
new file mode 100644
index 00000000..9bf7099e
--- /dev/null
+++ b/puppet/modules/clamav/templates/clamav-milter.conf.erb
@@ -0,0 +1,28 @@
+# THIS FILE MANAGED BY PUPPET
+MilterSocket /var/run/clamav/milter.ctl
+FixStaleSocket true
+User clamav
+MilterSocketGroup clamav
+MilterSocketMode 666
+AllowSupplementaryGroups true
+ReadTimeout 120
+Foreground false
+PidFile /var/run/clamav/clamav-milter.pid
+ClamdSocket unix:/var/run/clamav/clamd.ctl
+OnClean Accept
+OnInfected Reject
+OnFail Defer
+AddHeader Replace
+LogSyslog true
+LogFacility LOG_LOCAL6
+LogVerbose yes
+LogInfected Basic
+LogTime true
+LogFileUnlock false
+LogClean Off
+LogRotate true
+SupportMultipleRecipients false
+MaxFileSize 10M
+TemporaryDirectory /var/tmp
+RejectMsg "Message refused due to content violation: %v - contact https://<%= @domain %>/tickets/new if this is in error"
+Whitelist /etc/clamav/whitelisted_addresses
diff --git a/puppet/modules/clamav/templates/local.pdb.erb b/puppet/modules/clamav/templates/local.pdb.erb
new file mode 100644
index 00000000..9ea0584a
--- /dev/null
+++ b/puppet/modules/clamav/templates/local.pdb.erb
@@ -0,0 +1 @@
+H:<%= @domain %>
diff --git a/puppet/modules/clamav/templates/whitelisted_addresses.erb b/puppet/modules/clamav/templates/whitelisted_addresses.erb
new file mode 100644
index 00000000..9e068ec5
--- /dev/null
+++ b/puppet/modules/clamav/templates/whitelisted_addresses.erb
@@ -0,0 +1,5 @@
+<%- if @whitelisted_addresses then -%>
+<% @whitelisted_addresses.each do |name| -%>
+From::<%= name %>
+<% end -%>
+<% end -%>
diff --git a/puppet/modules/common/LICENSE b/puppet/modules/common/LICENSE
new file mode 100644
index 00000000..94a9ed02
--- /dev/null
+++ b/puppet/modules/common/LICENSE
@@ -0,0 +1,674 @@
+ GNU GENERAL PUBLIC LICENSE
+ Version 3, 29 June 2007
+
+ Copyright (C) 2007 Free Software Foundation, Inc. <http://fsf.org/>
+ Everyone is permitted to copy and distribute verbatim copies
+ of this license document, but changing it is not allowed.
+
+ Preamble
+
+ The GNU General Public License is a free, copyleft license for
+software and other kinds of works.
+
+ The licenses for most software and other practical works are designed
+to take away your freedom to share and change the works. By contrast,
+the GNU General Public License is intended to guarantee your freedom to
+share and change all versions of a program--to make sure it remains free
+software for all its users. We, the Free Software Foundation, use the
+GNU General Public License for most of our software; it applies also to
+any other work released this way by its authors. You can apply it to
+your programs, too.
+
+ When we speak of free software, we are referring to freedom, not
+price. Our General Public Licenses are designed to make sure that you
+have the freedom to distribute copies of free software (and charge for
+them if you wish), that you receive source code or can get it if you
+want it, that you can change the software or use pieces of it in new
+free programs, and that you know you can do these things.
+
+ To protect your rights, we need to prevent others from denying you
+these rights or asking you to surrender the rights. Therefore, you have
+certain responsibilities if you distribute copies of the software, or if
+you modify it: responsibilities to respect the freedom of others.
+
+ For example, if you distribute copies of such a program, whether
+gratis or for a fee, you must pass on to the recipients the same
+freedoms that you received. You must make sure that they, too, receive
+or can get the source code. And you must show them these terms so they
+know their rights.
+
+ Developers that use the GNU GPL protect your rights with two steps:
+(1) assert copyright on the software, and (2) offer you this License
+giving you legal permission to copy, distribute and/or modify it.
+
+ For the developers' and authors' protection, the GPL clearly explains
+that there is no warranty for this free software. For both users' and
+authors' sake, the GPL requires that modified versions be marked as
+changed, so that their problems will not be attributed erroneously to
+authors of previous versions.
+
+ Some devices are designed to deny users access to install or run
+modified versions of the software inside them, although the manufacturer
+can do so. This is fundamentally incompatible with the aim of
+protecting users' freedom to change the software. The systematic
+pattern of such abuse occurs in the area of products for individuals to
+use, which is precisely where it is most unacceptable. Therefore, we
+have designed this version of the GPL to prohibit the practice for those
+products. If such problems arise substantially in other domains, we
+stand ready to extend this provision to those domains in future versions
+of the GPL, as needed to protect the freedom of users.
+
+ Finally, every program is threatened constantly by software patents.
+States should not allow patents to restrict development and use of
+software on general-purpose computers, but in those that do, we wish to
+avoid the special danger that patents applied to a free program could
+make it effectively proprietary. To prevent this, the GPL assures that
+patents cannot be used to render the program non-free.
+
+ The precise terms and conditions for copying, distribution and
+modification follow.
+
+ TERMS AND CONDITIONS
+
+ 0. Definitions.
+
+ "This License" refers to version 3 of the GNU General Public License.
+
+ "Copyright" also means copyright-like laws that apply to other kinds of
+works, such as semiconductor masks.
+
+ "The Program" refers to any copyrightable work licensed under this
+License. Each licensee is addressed as "you". "Licensees" and
+"recipients" may be individuals or organizations.
+
+ To "modify" a work means to copy from or adapt all or part of the work
+in a fashion requiring copyright permission, other than the making of an
+exact copy. The resulting work is called a "modified version" of the
+earlier work or a work "based on" the earlier work.
+
+ A "covered work" means either the unmodified Program or a work based
+on the Program.
+
+ To "propagate" a work means to do anything with it that, without
+permission, would make you directly or secondarily liable for
+infringement under applicable copyright law, except executing it on a
+computer or modifying a private copy. Propagation includes copying,
+distribution (with or without modification), making available to the
+public, and in some countries other activities as well.
+
+ To "convey" a work means any kind of propagation that enables other
+parties to make or receive copies. Mere interaction with a user through
+a computer network, with no transfer of a copy, is not conveying.
+
+ An interactive user interface displays "Appropriate Legal Notices"
+to the extent that it includes a convenient and prominently visible
+feature that (1) displays an appropriate copyright notice, and (2)
+tells the user that there is no warranty for the work (except to the
+extent that warranties are provided), that licensees may convey the
+work under this License, and how to view a copy of this License. If
+the interface presents a list of user commands or options, such as a
+menu, a prominent item in the list meets this criterion.
+
+ 1. Source Code.
+
+ The "source code" for a work means the preferred form of the work
+for making modifications to it. "Object code" means any non-source
+form of a work.
+
+ A "Standard Interface" means an interface that either is an official
+standard defined by a recognized standards body, or, in the case of
+interfaces specified for a particular programming language, one that
+is widely used among developers working in that language.
+
+ The "System Libraries" of an executable work include anything, other
+than the work as a whole, that (a) is included in the normal form of
+packaging a Major Component, but which is not part of that Major
+Component, and (b) serves only to enable use of the work with that
+Major Component, or to implement a Standard Interface for which an
+implementation is available to the public in source code form. A
+"Major Component", in this context, means a major essential component
+(kernel, window system, and so on) of the specific operating system
+(if any) on which the executable work runs, or a compiler used to
+produce the work, or an object code interpreter used to run it.
+
+ The "Corresponding Source" for a work in object code form means all
+the source code needed to generate, install, and (for an executable
+work) run the object code and to modify the work, including scripts to
+control those activities. However, it does not include the work's
+System Libraries, or general-purpose tools or generally available free
+programs which are used unmodified in performing those activities but
+which are not part of the work. For example, Corresponding Source
+includes interface definition files associated with source files for
+the work, and the source code for shared libraries and dynamically
+linked subprograms that the work is specifically designed to require,
+such as by intimate data communication or control flow between those
+subprograms and other parts of the work.
+
+ The Corresponding Source need not include anything that users
+can regenerate automatically from other parts of the Corresponding
+Source.
+
+ The Corresponding Source for a work in source code form is that
+same work.
+
+ 2. Basic Permissions.
+
+ All rights granted under this License are granted for the term of
+copyright on the Program, and are irrevocable provided the stated
+conditions are met. This License explicitly affirms your unlimited
+permission to run the unmodified Program. The output from running a
+covered work is covered by this License only if the output, given its
+content, constitutes a covered work. This License acknowledges your
+rights of fair use or other equivalent, as provided by copyright law.
+
+ You may make, run and propagate covered works that you do not
+convey, without conditions so long as your license otherwise remains
+in force. You may convey covered works to others for the sole purpose
+of having them make modifications exclusively for you, or provide you
+with facilities for running those works, provided that you comply with
+the terms of this License in conveying all material for which you do
+not control copyright. Those thus making or running the covered works
+for you must do so exclusively on your behalf, under your direction
+and control, on terms that prohibit them from making any copies of
+your copyrighted material outside their relationship with you.
+
+ Conveying under any other circumstances is permitted solely under
+the conditions stated below. Sublicensing is not allowed; section 10
+makes it unnecessary.
+
+ 3. Protecting Users' Legal Rights From Anti-Circumvention Law.
+
+ No covered work shall be deemed part of an effective technological
+measure under any applicable law fulfilling obligations under article
+11 of the WIPO copyright treaty adopted on 20 December 1996, or
+similar laws prohibiting or restricting circumvention of such
+measures.
+
+ When you convey a covered work, you waive any legal power to forbid
+circumvention of technological measures to the extent such circumvention
+is effected by exercising rights under this License with respect to
+the covered work, and you disclaim any intention to limit operation or
+modification of the work as a means of enforcing, against the work's
+users, your or third parties' legal rights to forbid circumvention of
+technological measures.
+
+ 4. Conveying Verbatim Copies.
+
+ You may convey verbatim copies of the Program's source code as you
+receive it, in any medium, provided that you conspicuously and
+appropriately publish on each copy an appropriate copyright notice;
+keep intact all notices stating that this License and any
+non-permissive terms added in accord with section 7 apply to the code;
+keep intact all notices of the absence of any warranty; and give all
+recipients a copy of this License along with the Program.
+
+ You may charge any price or no price for each copy that you convey,
+and you may offer support or warranty protection for a fee.
+
+ 5. Conveying Modified Source Versions.
+
+ You may convey a work based on the Program, or the modifications to
+produce it from the Program, in the form of source code under the
+terms of section 4, provided that you also meet all of these conditions:
+
+ a) The work must carry prominent notices stating that you modified
+ it, and giving a relevant date.
+
+ b) The work must carry prominent notices stating that it is
+ released under this License and any conditions added under section
+ 7. This requirement modifies the requirement in section 4 to
+ "keep intact all notices".
+
+ c) You must license the entire work, as a whole, under this
+ License to anyone who comes into possession of a copy. This
+ License will therefore apply, along with any applicable section 7
+ additional terms, to the whole of the work, and all its parts,
+ regardless of how they are packaged. This License gives no
+ permission to license the work in any other way, but it does not
+ invalidate such permission if you have separately received it.
+
+ d) If the work has interactive user interfaces, each must display
+ Appropriate Legal Notices; however, if the Program has interactive
+ interfaces that do not display Appropriate Legal Notices, your
+ work need not make them do so.
+
+ A compilation of a covered work with other separate and independent
+works, which are not by their nature extensions of the covered work,
+and which are not combined with it such as to form a larger program,
+in or on a volume of a storage or distribution medium, is called an
+"aggregate" if the compilation and its resulting copyright are not
+used to limit the access or legal rights of the compilation's users
+beyond what the individual works permit. Inclusion of a covered work
+in an aggregate does not cause this License to apply to the other
+parts of the aggregate.
+
+ 6. Conveying Non-Source Forms.
+
+ You may convey a covered work in object code form under the terms
+of sections 4 and 5, provided that you also convey the
+machine-readable Corresponding Source under the terms of this License,
+in one of these ways:
+
+ a) Convey the object code in, or embodied in, a physical product
+ (including a physical distribution medium), accompanied by the
+ Corresponding Source fixed on a durable physical medium
+ customarily used for software interchange.
+
+ b) Convey the object code in, or embodied in, a physical product
+ (including a physical distribution medium), accompanied by a
+ written offer, valid for at least three years and valid for as
+ long as you offer spare parts or customer support for that product
+ model, to give anyone who possesses the object code either (1) a
+ copy of the Corresponding Source for all the software in the
+ product that is covered by this License, on a durable physical
+ medium customarily used for software interchange, for a price no
+ more than your reasonable cost of physically performing this
+ conveying of source, or (2) access to copy the
+ Corresponding Source from a network server at no charge.
+
+ c) Convey individual copies of the object code with a copy of the
+ written offer to provide the Corresponding Source. This
+ alternative is allowed only occasionally and noncommercially, and
+ only if you received the object code with such an offer, in accord
+ with subsection 6b.
+
+ d) Convey the object code by offering access from a designated
+ place (gratis or for a charge), and offer equivalent access to the
+ Corresponding Source in the same way through the same place at no
+ further charge. You need not require recipients to copy the
+ Corresponding Source along with the object code. If the place to
+ copy the object code is a network server, the Corresponding Source
+ may be on a different server (operated by you or a third party)
+ that supports equivalent copying facilities, provided you maintain
+ clear directions next to the object code saying where to find the
+ Corresponding Source. Regardless of what server hosts the
+ Corresponding Source, you remain obligated to ensure that it is
+ available for as long as needed to satisfy these requirements.
+
+ e) Convey the object code using peer-to-peer transmission, provided
+ you inform other peers where the object code and Corresponding
+ Source of the work are being offered to the general public at no
+ charge under subsection 6d.
+
+ A separable portion of the object code, whose source code is excluded
+from the Corresponding Source as a System Library, need not be
+included in conveying the object code work.
+
+ A "User Product" is either (1) a "consumer product", which means any
+tangible personal property which is normally used for personal, family,
+or household purposes, or (2) anything designed or sold for incorporation
+into a dwelling. In determining whether a product is a consumer product,
+doubtful cases shall be resolved in favor of coverage. For a particular
+product received by a particular user, "normally used" refers to a
+typical or common use of that class of product, regardless of the status
+of the particular user or of the way in which the particular user
+actually uses, or expects or is expected to use, the product. A product
+is a consumer product regardless of whether the product has substantial
+commercial, industrial or non-consumer uses, unless such uses represent
+the only significant mode of use of the product.
+
+ "Installation Information" for a User Product means any methods,
+procedures, authorization keys, or other information required to install
+and execute modified versions of a covered work in that User Product from
+a modified version of its Corresponding Source. The information must
+suffice to ensure that the continued functioning of the modified object
+code is in no case prevented or interfered with solely because
+modification has been made.
+
+ If you convey an object code work under this section in, or with, or
+specifically for use in, a User Product, and the conveying occurs as
+part of a transaction in which the right of possession and use of the
+User Product is transferred to the recipient in perpetuity or for a
+fixed term (regardless of how the transaction is characterized), the
+Corresponding Source conveyed under this section must be accompanied
+by the Installation Information. But this requirement does not apply
+if neither you nor any third party retains the ability to install
+modified object code on the User Product (for example, the work has
+been installed in ROM).
+
+ The requirement to provide Installation Information does not include a
+requirement to continue to provide support service, warranty, or updates
+for a work that has been modified or installed by the recipient, or for
+the User Product in which it has been modified or installed. Access to a
+network may be denied when the modification itself materially and
+adversely affects the operation of the network or violates the rules and
+protocols for communication across the network.
+
+ Corresponding Source conveyed, and Installation Information provided,
+in accord with this section must be in a format that is publicly
+documented (and with an implementation available to the public in
+source code form), and must require no special password or key for
+unpacking, reading or copying.
+
+ 7. Additional Terms.
+
+ "Additional permissions" are terms that supplement the terms of this
+License by making exceptions from one or more of its conditions.
+Additional permissions that are applicable to the entire Program shall
+be treated as though they were included in this License, to the extent
+that they are valid under applicable law. If additional permissions
+apply only to part of the Program, that part may be used separately
+under those permissions, but the entire Program remains governed by
+this License without regard to the additional permissions.
+
+ When you convey a copy of a covered work, you may at your option
+remove any additional permissions from that copy, or from any part of
+it. (Additional permissions may be written to require their own
+removal in certain cases when you modify the work.) You may place
+additional permissions on material, added by you to a covered work,
+for which you have or can give appropriate copyright permission.
+
+ Notwithstanding any other provision of this License, for material you
+add to a covered work, you may (if authorized by the copyright holders of
+that material) supplement the terms of this License with terms:
+
+ a) Disclaiming warranty or limiting liability differently from the
+ terms of sections 15 and 16 of this License; or
+
+ b) Requiring preservation of specified reasonable legal notices or
+ author attributions in that material or in the Appropriate Legal
+ Notices displayed by works containing it; or
+
+ c) Prohibiting misrepresentation of the origin of that material, or
+ requiring that modified versions of such material be marked in
+ reasonable ways as different from the original version; or
+
+ d) Limiting the use for publicity purposes of names of licensors or
+ authors of the material; or
+
+ e) Declining to grant rights under trademark law for use of some
+ trade names, trademarks, or service marks; or
+
+ f) Requiring indemnification of licensors and authors of that
+ material by anyone who conveys the material (or modified versions of
+ it) with contractual assumptions of liability to the recipient, for
+ any liability that these contractual assumptions directly impose on
+ those licensors and authors.
+
+ All other non-permissive additional terms are considered "further
+restrictions" within the meaning of section 10. If the Program as you
+received it, or any part of it, contains a notice stating that it is
+governed by this License along with a term that is a further
+restriction, you may remove that term. If a license document contains
+a further restriction but permits relicensing or conveying under this
+License, you may add to a covered work material governed by the terms
+of that license document, provided that the further restriction does
+not survive such relicensing or conveying.
+
+ If you add terms to a covered work in accord with this section, you
+must place, in the relevant source files, a statement of the
+additional terms that apply to those files, or a notice indicating
+where to find the applicable terms.
+
+ Additional terms, permissive or non-permissive, may be stated in the
+form of a separately written license, or stated as exceptions;
+the above requirements apply either way.
+
+ 8. Termination.
+
+ You may not propagate or modify a covered work except as expressly
+provided under this License. Any attempt otherwise to propagate or
+modify it is void, and will automatically terminate your rights under
+this License (including any patent licenses granted under the third
+paragraph of section 11).
+
+ However, if you cease all violation of this License, then your
+license from a particular copyright holder is reinstated (a)
+provisionally, unless and until the copyright holder explicitly and
+finally terminates your license, and (b) permanently, if the copyright
+holder fails to notify you of the violation by some reasonable means
+prior to 60 days after the cessation.
+
+ Moreover, your license from a particular copyright holder is
+reinstated permanently if the copyright holder notifies you of the
+violation by some reasonable means, this is the first time you have
+received notice of violation of this License (for any work) from that
+copyright holder, and you cure the violation prior to 30 days after
+your receipt of the notice.
+
+ Termination of your rights under this section does not terminate the
+licenses of parties who have received copies or rights from you under
+this License. If your rights have been terminated and not permanently
+reinstated, you do not qualify to receive new licenses for the same
+material under section 10.
+
+ 9. Acceptance Not Required for Having Copies.
+
+ You are not required to accept this License in order to receive or
+run a copy of the Program. Ancillary propagation of a covered work
+occurring solely as a consequence of using peer-to-peer transmission
+to receive a copy likewise does not require acceptance. However,
+nothing other than this License grants you permission to propagate or
+modify any covered work. These actions infringe copyright if you do
+not accept this License. Therefore, by modifying or propagating a
+covered work, you indicate your acceptance of this License to do so.
+
+ 10. Automatic Licensing of Downstream Recipients.
+
+ Each time you convey a covered work, the recipient automatically
+receives a license from the original licensors, to run, modify and
+propagate that work, subject to this License. You are not responsible
+for enforcing compliance by third parties with this License.
+
+ An "entity transaction" is a transaction transferring control of an
+organization, or substantially all assets of one, or subdividing an
+organization, or merging organizations. If propagation of a covered
+work results from an entity transaction, each party to that
+transaction who receives a copy of the work also receives whatever
+licenses to the work the party's predecessor in interest had or could
+give under the previous paragraph, plus a right to possession of the
+Corresponding Source of the work from the predecessor in interest, if
+the predecessor has it or can get it with reasonable efforts.
+
+ You may not impose any further restrictions on the exercise of the
+rights granted or affirmed under this License. For example, you may
+not impose a license fee, royalty, or other charge for exercise of
+rights granted under this License, and you may not initiate litigation
+(including a cross-claim or counterclaim in a lawsuit) alleging that
+any patent claim is infringed by making, using, selling, offering for
+sale, or importing the Program or any portion of it.
+
+ 11. Patents.
+
+ A "contributor" is a copyright holder who authorizes use under this
+License of the Program or a work on which the Program is based. The
+work thus licensed is called the contributor's "contributor version".
+
+ A contributor's "essential patent claims" are all patent claims
+owned or controlled by the contributor, whether already acquired or
+hereafter acquired, that would be infringed by some manner, permitted
+by this License, of making, using, or selling its contributor version,
+but do not include claims that would be infringed only as a
+consequence of further modification of the contributor version. For
+purposes of this definition, "control" includes the right to grant
+patent sublicenses in a manner consistent with the requirements of
+this License.
+
+ Each contributor grants you a non-exclusive, worldwide, royalty-free
+patent license under the contributor's essential patent claims, to
+make, use, sell, offer for sale, import and otherwise run, modify and
+propagate the contents of its contributor version.
+
+ In the following three paragraphs, a "patent license" is any express
+agreement or commitment, however denominated, not to enforce a patent
+(such as an express permission to practice a patent or covenant not to
+sue for patent infringement). To "grant" such a patent license to a
+party means to make such an agreement or commitment not to enforce a
+patent against the party.
+
+ If you convey a covered work, knowingly relying on a patent license,
+and the Corresponding Source of the work is not available for anyone
+to copy, free of charge and under the terms of this License, through a
+publicly available network server or other readily accessible means,
+then you must either (1) cause the Corresponding Source to be so
+available, or (2) arrange to deprive yourself of the benefit of the
+patent license for this particular work, or (3) arrange, in a manner
+consistent with the requirements of this License, to extend the patent
+license to downstream recipients. "Knowingly relying" means you have
+actual knowledge that, but for the patent license, your conveying the
+covered work in a country, or your recipient's use of the covered work
+in a country, would infringe one or more identifiable patents in that
+country that you have reason to believe are valid.
+
+ If, pursuant to or in connection with a single transaction or
+arrangement, you convey, or propagate by procuring conveyance of, a
+covered work, and grant a patent license to some of the parties
+receiving the covered work authorizing them to use, propagate, modify
+or convey a specific copy of the covered work, then the patent license
+you grant is automatically extended to all recipients of the covered
+work and works based on it.
+
+ A patent license is "discriminatory" if it does not include within
+the scope of its coverage, prohibits the exercise of, or is
+conditioned on the non-exercise of one or more of the rights that are
+specifically granted under this License. You may not convey a covered
+work if you are a party to an arrangement with a third party that is
+in the business of distributing software, under which you make payment
+to the third party based on the extent of your activity of conveying
+the work, and under which the third party grants, to any of the
+parties who would receive the covered work from you, a discriminatory
+patent license (a) in connection with copies of the covered work
+conveyed by you (or copies made from those copies), or (b) primarily
+for and in connection with specific products or compilations that
+contain the covered work, unless you entered into that arrangement,
+or that patent license was granted, prior to 28 March 2007.
+
+ Nothing in this License shall be construed as excluding or limiting
+any implied license or other defenses to infringement that may
+otherwise be available to you under applicable patent law.
+
+ 12. No Surrender of Others' Freedom.
+
+ If conditions are imposed on you (whether by court order, agreement or
+otherwise) that contradict the conditions of this License, they do not
+excuse you from the conditions of this License. If you cannot convey a
+covered work so as to satisfy simultaneously your obligations under this
+License and any other pertinent obligations, then as a consequence you may
+not convey it at all. For example, if you agree to terms that obligate you
+to collect a royalty for further conveying from those to whom you convey
+the Program, the only way you could satisfy both those terms and this
+License would be to refrain entirely from conveying the Program.
+
+ 13. Use with the GNU Affero General Public License.
+
+ Notwithstanding any other provision of this License, you have
+permission to link or combine any covered work with a work licensed
+under version 3 of the GNU Affero General Public License into a single
+combined work, and to convey the resulting work. The terms of this
+License will continue to apply to the part which is the covered work,
+but the special requirements of the GNU Affero General Public License,
+section 13, concerning interaction through a network will apply to the
+combination as such.
+
+ 14. Revised Versions of this License.
+
+ The Free Software Foundation may publish revised and/or new versions of
+the GNU General Public License from time to time. Such new versions will
+be similar in spirit to the present version, but may differ in detail to
+address new problems or concerns.
+
+ Each version is given a distinguishing version number. If the
+Program specifies that a certain numbered version of the GNU General
+Public License "or any later version" applies to it, you have the
+option of following the terms and conditions either of that numbered
+version or of any later version published by the Free Software
+Foundation. If the Program does not specify a version number of the
+GNU General Public License, you may choose any version ever published
+by the Free Software Foundation.
+
+ If the Program specifies that a proxy can decide which future
+versions of the GNU General Public License can be used, that proxy's
+public statement of acceptance of a version permanently authorizes you
+to choose that version for the Program.
+
+ Later license versions may give you additional or different
+permissions. However, no additional obligations are imposed on any
+author or copyright holder as a result of your choosing to follow a
+later version.
+
+ 15. Disclaimer of Warranty.
+
+ THERE IS NO WARRANTY FOR THE PROGRAM, TO THE EXTENT PERMITTED BY
+APPLICABLE LAW. EXCEPT WHEN OTHERWISE STATED IN WRITING THE COPYRIGHT
+HOLDERS AND/OR OTHER PARTIES PROVIDE THE PROGRAM "AS IS" WITHOUT WARRANTY
+OF ANY KIND, EITHER EXPRESSED OR IMPLIED, INCLUDING, BUT NOT LIMITED TO,
+THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR
+PURPOSE. THE ENTIRE RISK AS TO THE QUALITY AND PERFORMANCE OF THE PROGRAM
+IS WITH YOU. SHOULD THE PROGRAM PROVE DEFECTIVE, YOU ASSUME THE COST OF
+ALL NECESSARY SERVICING, REPAIR OR CORRECTION.
+
+ 16. Limitation of Liability.
+
+ IN NO EVENT UNLESS REQUIRED BY APPLICABLE LAW OR AGREED TO IN WRITING
+WILL ANY COPYRIGHT HOLDER, OR ANY OTHER PARTY WHO MODIFIES AND/OR CONVEYS
+THE PROGRAM AS PERMITTED ABOVE, BE LIABLE TO YOU FOR DAMAGES, INCLUDING ANY
+GENERAL, SPECIAL, INCIDENTAL OR CONSEQUENTIAL DAMAGES ARISING OUT OF THE
+USE OR INABILITY TO USE THE PROGRAM (INCLUDING BUT NOT LIMITED TO LOSS OF
+DATA OR DATA BEING RENDERED INACCURATE OR LOSSES SUSTAINED BY YOU OR THIRD
+PARTIES OR A FAILURE OF THE PROGRAM TO OPERATE WITH ANY OTHER PROGRAMS),
+EVEN IF SUCH HOLDER OR OTHER PARTY HAS BEEN ADVISED OF THE POSSIBILITY OF
+SUCH DAMAGES.
+
+ 17. Interpretation of Sections 15 and 16.
+
+ If the disclaimer of warranty and limitation of liability provided
+above cannot be given local legal effect according to their terms,
+reviewing courts shall apply local law that most closely approximates
+an absolute waiver of all civil liability in connection with the
+Program, unless a warranty or assumption of liability accompanies a
+copy of the Program in return for a fee.
+
+ END OF TERMS AND CONDITIONS
+
+ How to Apply These Terms to Your New Programs
+
+ If you develop a new program, and you want it to be of the greatest
+possible use to the public, the best way to achieve this is to make it
+free software which everyone can redistribute and change under these terms.
+
+ To do so, attach the following notices to the program. It is safest
+to attach them to the start of each source file to most effectively
+state the exclusion of warranty; and each file should have at least
+the "copyright" line and a pointer to where the full notice is found.
+
+ <one line to give the program's name and a brief idea of what it does.>
+ Copyright (C) <year> <name of author>
+
+ This program is free software: you can redistribute it and/or modify
+ it under the terms of the GNU General Public License as published by
+ the Free Software Foundation, either version 3 of the License, or
+ (at your option) any later version.
+
+ This program is distributed in the hope that it will be useful,
+ but WITHOUT ANY WARRANTY; without even the implied warranty of
+ MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
+ GNU General Public License for more details.
+
+ You should have received a copy of the GNU General Public License
+ along with this program. If not, see <http://www.gnu.org/licenses/>.
+
+Also add information on how to contact you by electronic and paper mail.
+
+ If the program does terminal interaction, make it output a short
+notice like this when it starts in an interactive mode:
+
+ <program> Copyright (C) <year> <name of author>
+ This program comes with ABSOLUTELY NO WARRANTY; for details type `show w'.
+ This is free software, and you are welcome to redistribute it
+ under certain conditions; type `show c' for details.
+
+The hypothetical commands `show w' and `show c' should show the appropriate
+parts of the General Public License. Of course, your program's commands
+might be different; for a GUI interface, you would use an "about box".
+
+ You should also get your employer (if you work as a programmer) or school,
+if any, to sign a "copyright disclaimer" for the program, if necessary.
+For more information on this, and how to apply and follow the GNU GPL, see
+<http://www.gnu.org/licenses/>.
+
+ The GNU General Public License does not permit incorporating your program
+into proprietary programs. If your program is a subroutine library, you
+may consider it more useful to permit linking proprietary applications with
+the library. If this is what you want to do, use the GNU Lesser General
+Public License instead of this License. But first, please read
+<http://www.gnu.org/philosophy/why-not-lgpl.html>.
diff --git a/puppet/modules/common/README b/puppet/modules/common/README
new file mode 100644
index 00000000..e6df7663
--- /dev/null
+++ b/puppet/modules/common/README
@@ -0,0 +1,44 @@
+Common Module
+-------------
+
+The common module installs various functions that are required by other
+modules. This module should be installed before any of the other module.
+
+! Upgrade Notice !
+
+The 'append_if_no_such_line' define has been replaced with the 'line' define. If
+you are using 'append_if_no_such_line' anywhere in your manifests, you will need
+to transition to 'line' before upgrading to this version of the common
+module. The 'line' define is a drop-in replacement and essentially equivalent,
+so the transition is quite easy, you should only simply need to change the name
+in your manifests.
+
+To use this module, follow these directions:
+
+1. Your modules directory will need all the files included in this
+ repository placed under a directory called "common"
+
+2. Add the following line to manifests/site.pp:
+
+ import "modules.pp"
+
+3. Add the following line to manifests/modules.pp:
+
+ import "common"
+
+
+Original author: David Schmitt (mailto:david@dasz.at)
+Copyright:: Copyright (c) 2007-2009 dasz.at OG
+License:: 3-clause BSD
+
+Additional authors:
+Copyright (C) 2007 David Schmitt <david@schmitt.edv-bus.at>
+Copyright 2008-2011, admin(at)immerda.ch
+Copyright 2008, Puzzle ITC GmbH
+ Marcel Härry haerry+puppet(at)puzzle.ch
+ Simon Josi josi+puppet(at)puzzle.ch
+Copyright 2009-2011, Riseup Labs <http://riseuplabs.org>
+ Pietro Ferrari <pietro@riseup.net>
+ Micah Anderson <micah@riseup.net>
+Copyright (C) 2007 Antoine Beaupre <anarcat@koumbit.org>
+Copyright (c) 2011 intrigeri - intrigeri(at)boum.org \ No newline at end of file
diff --git a/puppet/modules/common/lib/puppet/parser/functions/basename.rb b/puppet/modules/common/lib/puppet/parser/functions/basename.rb
new file mode 100644
index 00000000..dc725375
--- /dev/null
+++ b/puppet/modules/common/lib/puppet/parser/functions/basename.rb
@@ -0,0 +1,22 @@
+# This function has two modes of operation:
+#
+# basename(string) : string
+#
+# Returns the last component of the filename given as argument, which must be
+# formed using forward slashes ("/") regardless of the separator used on the
+# local file system.
+#
+# basename(string[]) : string[]
+#
+# Returns an array of strings with the basename of each item from the argument.
+#
+module Puppet::Parser::Functions
+ newfunction(:basename, :type => :rvalue) do |args|
+ if args[0].is_a?(Array)
+ args.collect do |a| File.basename(a) end
+ else
+ File.basename(args[0])
+ end
+ end
+end
+
diff --git a/puppet/modules/common/lib/puppet/parser/functions/dirname.rb b/puppet/modules/common/lib/puppet/parser/functions/dirname.rb
new file mode 100644
index 00000000..ea0d50b4
--- /dev/null
+++ b/puppet/modules/common/lib/puppet/parser/functions/dirname.rb
@@ -0,0 +1,22 @@
+# This function has two modes of operation:
+#
+# dirname(string) : string
+#
+# Returns all components of the filename given as argument except the last
+# one. The filename must be formed using forward slashes (``/..) regardless of
+# the separator used on the local file system.
+#
+# dirname(string[]) : string[]
+#
+# Returns an array of strings with the basename of each item from the argument.
+#
+module Puppet::Parser::Functions
+ newfunction(:dirname, :type => :rvalue) do |args|
+ if args[0].is_a?(Array)
+ args.collect do |a| File.dirname(a) end
+ else
+ File.dirname(args[0])
+ end
+ end
+end
+
diff --git a/puppet/modules/common/lib/puppet/parser/functions/get_default.rb b/puppet/modules/common/lib/puppet/parser/functions/get_default.rb
new file mode 100644
index 00000000..3f4359bd
--- /dev/null
+++ b/puppet/modules/common/lib/puppet/parser/functions/get_default.rb
@@ -0,0 +1,15 @@
+# get_default($value, $default) : $value
+#
+# return $value || $default.
+module Puppet::Parser::Functions
+ newfunction(:get_default, :type => :rvalue) do |args|
+ value = nil
+ args.each { |x|
+ if ! x.nil? and x.length > 0
+ value = x
+ break
+ end
+ }
+ return value
+ end
+end
diff --git a/puppet/modules/common/lib/puppet/parser/functions/hostname.rb b/puppet/modules/common/lib/puppet/parser/functions/hostname.rb
new file mode 100644
index 00000000..7bc477f2
--- /dev/null
+++ b/puppet/modules/common/lib/puppet/parser/functions/hostname.rb
@@ -0,0 +1,13 @@
+# get an uniq array of ipaddresses for a hostname
+require 'resolv'
+
+module Puppet::Parser::Functions
+ newfunction(:hostname, :type => :rvalue) do |args|
+ res = Array.new
+ Resolv::DNS.new.each_address(args[0]){ |addr|
+ res << addr
+ }
+ res.uniq
+ end
+end
+
diff --git a/puppet/modules/common/lib/puppet/parser/functions/multi_source_template.rb b/puppet/modules/common/lib/puppet/parser/functions/multi_source_template.rb
new file mode 100644
index 00000000..e0753205
--- /dev/null
+++ b/puppet/modules/common/lib/puppet/parser/functions/multi_source_template.rb
@@ -0,0 +1,29 @@
+module Puppet::Parser::Functions
+ require 'erb'
+
+ newfunction(:multi_source_template, :type => :rvalue) do |args|
+ contents = nil
+ environment = compiler.environment
+ sources = args
+
+ sources.each do |file|
+ Puppet.debug("Looking for #{file} in #{environment}")
+ if filename = Puppet::Parser::Files.find_template(file, environment.to_s)
+ wrapper = Puppet::Parser::TemplateWrapper.new(self)
+ wrapper.file = file
+
+ begin
+ contents = wrapper.result
+ rescue => detail
+ raise Puppet::ParseError, "Failed to parse template %s: %s" % [file, detail]
+ end
+
+ break
+ end
+ end
+
+ raise Puppet::ParseError, "multi_source_template: No match found for files: #{sources.join(', ')}" if contents == nil
+
+ contents
+ end
+end
diff --git a/puppet/modules/common/lib/puppet/parser/functions/prefix_with.rb b/puppet/modules/common/lib/puppet/parser/functions/prefix_with.rb
new file mode 100644
index 00000000..6e64a4a8
--- /dev/null
+++ b/puppet/modules/common/lib/puppet/parser/functions/prefix_with.rb
@@ -0,0 +1,9 @@
+# prefix arguments 2..n with first argument
+
+module Puppet::Parser::Functions
+ newfunction(:prefix_with, :type => :rvalue) do |args|
+ prefix = args.shift
+ args.collect {|v| "%s%s" % [prefix, v] }
+ end
+end
+
diff --git a/puppet/modules/common/lib/puppet/parser/functions/re_escape.rb b/puppet/modules/common/lib/puppet/parser/functions/re_escape.rb
new file mode 100644
index 00000000..7bee90a8
--- /dev/null
+++ b/puppet/modules/common/lib/puppet/parser/functions/re_escape.rb
@@ -0,0 +1,7 @@
+# apply ruby regexp escaping to a string
+module Puppet::Parser::Functions
+ newfunction(:re_escape, :type => :rvalue) do |args|
+ Regexp.escape(args[0])
+ end
+end
+
diff --git a/puppet/modules/common/lib/puppet/parser/functions/slash_escape.rb b/puppet/modules/common/lib/puppet/parser/functions/slash_escape.rb
new file mode 100644
index 00000000..04d3b95e
--- /dev/null
+++ b/puppet/modules/common/lib/puppet/parser/functions/slash_escape.rb
@@ -0,0 +1,7 @@
+# escape slashes in a String
+module Puppet::Parser::Functions
+ newfunction(:slash_escape, :type => :rvalue) do |args|
+ args[0].gsub(/\//, '\\/')
+ end
+end
+
diff --git a/puppet/modules/common/lib/puppet/parser/functions/substitute.rb b/puppet/modules/common/lib/puppet/parser/functions/substitute.rb
new file mode 100644
index 00000000..4c97def3
--- /dev/null
+++ b/puppet/modules/common/lib/puppet/parser/functions/substitute.rb
@@ -0,0 +1,20 @@
+# subsititute($string, $regex, $replacement) : $string
+# subsititute($string[], $regex, $replacement) : $string[]
+#
+# Replace all ocurrences of $regex in $string by $replacement.
+# $regex is interpreted as Ruby regular expression.
+#
+# For long-term portability it is recommended to refrain from using Ruby's
+# extended RE features.
+module Puppet::Parser::Functions
+ newfunction(:substitute, :type => :rvalue) do |args|
+ if args[0].is_a?(Array)
+ args[0].collect do |val|
+ val.gsub(/#{args[1]}/, args[2])
+ end
+ else
+ args[0].gsub(/#{args[1]}/, args[2])
+ end
+ end
+end
+
diff --git a/puppet/modules/common/lib/puppet/parser/functions/tfile.rb b/puppet/modules/common/lib/puppet/parser/functions/tfile.rb
new file mode 100644
index 00000000..acb6609b
--- /dev/null
+++ b/puppet/modules/common/lib/puppet/parser/functions/tfile.rb
@@ -0,0 +1,19 @@
+Puppet::Parser::Functions::newfunction(
+ :tfile,
+ :type => :rvalue,
+ :doc => "Returns the content of a file. If the file or the path does not
+ yet exist, it will create the path and touch the file."
+) do |args|
+ raise Puppet::ParseError, 'tfile() needs one argument' if args.length != 1
+ path = args.to_a.first
+ unless File.exists?(path)
+ dir = File.dirname(path)
+ unless File.directory?(dir)
+ require 'fileutils'
+ FileUtils.mkdir_p(dir, :mode => 0700)
+ end
+ require 'fileutils'
+ FileUtils.touch(path)
+ end
+ File.read(path)
+end
diff --git a/puppet/modules/common/manifests/module_dir.pp b/puppet/modules/common/manifests/module_dir.pp
new file mode 100644
index 00000000..2420da94
--- /dev/null
+++ b/puppet/modules/common/manifests/module_dir.pp
@@ -0,0 +1,34 @@
+# common/manifests/modules_dir.pp -- create a default directory
+# for storing module specific information
+#
+# Copyright (C) 2007 David Schmitt <david@schmitt.edv-bus.at>
+# See LICENSE for the full license granted to you.
+
+# A module_dir is a storage place for all the stuff a module might want to
+# store. According to the FHS, this should go to /var/lib. Since this is a part
+# of puppet, the full path is /var/lib/puppet/modules/${name}. Every module
+# should # prefix its module_dirs with its name.
+#
+# Usage:
+# include common::moduledir
+# module_dir { ["common", "common/dir1", "common/dir2" ]: }
+#
+# You may refer to a file in module_dir by using :
+# file { "${common::moduledir::module_dir_path}/somedir/somefile": }
+define common::module_dir(
+ $owner = root,
+ $group = 0,
+ $mode = 0644
+) {
+ include common::moduledir
+ file {
+ "${common::moduledir::module_dir_path}/${name}":
+ ensure => directory,
+ recurse => true,
+ purge => true,
+ force => true,
+ owner => $owner,
+ group => $group,
+ mode => $mode;
+ }
+}
diff --git a/puppet/modules/common/manifests/module_file.pp b/puppet/modules/common/manifests/module_file.pp
new file mode 100644
index 00000000..c1070bcf
--- /dev/null
+++ b/puppet/modules/common/manifests/module_file.pp
@@ -0,0 +1,37 @@
+# common/manifests/module_file.pp -- use a modules_dir to store module
+# specific files
+#
+# Copyright (C) 2007 David Schmitt <david@schmitt.edv-bus.at>
+# See LICENSE for the full license granted to you.
+
+# Put a file into module-local storage.
+#
+# Usage:
+# common::module_file { "module/file":
+# source => "puppet:///...",
+# mode => 644, # default
+# owner => root, # default
+# group => 0, # default
+# }
+define common::module_file (
+ $ensure = present,
+ $source = undef,
+ $owner = root,
+ $group = 0,
+ $mode = 0644
+){
+ include common::moduledir
+ file {
+ "${common::moduledir::module_dir_path}/${name}":
+ ensure => $ensure,
+ }
+
+ if $ensure != 'absent' {
+ File["${common::moduledir::module_dir_path}/${name}"]{
+ source => $source,
+ owner => $owner,
+ group => $group,
+ mode => $mode,
+ }
+ }
+}
diff --git a/puppet/modules/common/manifests/moduledir.pp b/puppet/modules/common/manifests/moduledir.pp
new file mode 100644
index 00000000..f779085b
--- /dev/null
+++ b/puppet/modules/common/manifests/moduledir.pp
@@ -0,0 +1,18 @@
+# setup root for module_dirs
+class common::moduledir {
+ # Use this variable to reference the base path. Thus you are safe from any
+ # changes.
+ $module_dir_path = '/var/lib/puppet/modules'
+
+ # Module programmers can use /var/lib/puppet/modules/$modulename to save
+ # module-local data, e.g. for constructing config files
+ file{$module_dir_path:
+ ensure => directory,
+ recurse => true,
+ purge => true,
+ force => true,
+ owner => root,
+ group => 0,
+ mode => '0755';
+ }
+}
diff --git a/puppet/modules/common/manifests/moduledir/common.pp b/puppet/modules/common/manifests/moduledir/common.pp
new file mode 100644
index 00000000..e74c601e
--- /dev/null
+++ b/puppet/modules/common/manifests/moduledir/common.pp
@@ -0,0 +1,4 @@
+# setup a common dir
+class common::moduledir::common{
+ common::module_dir{'common': }
+}
diff --git a/puppet/modules/common/spec/spec.opts b/puppet/modules/common/spec/spec.opts
new file mode 100644
index 00000000..91cd6427
--- /dev/null
+++ b/puppet/modules/common/spec/spec.opts
@@ -0,0 +1,6 @@
+--format
+s
+--colour
+--loadby
+mtime
+--backtrace
diff --git a/puppet/modules/common/spec/spec_helper.rb b/puppet/modules/common/spec/spec_helper.rb
new file mode 100644
index 00000000..6ba62e11
--- /dev/null
+++ b/puppet/modules/common/spec/spec_helper.rb
@@ -0,0 +1,16 @@
+require 'pathname'
+dir = Pathname.new(__FILE__).parent
+$LOAD_PATH.unshift(dir, dir + 'lib', dir + '../lib')
+require 'puppet'
+gem 'rspec', '>= 1.2.9'
+require 'spec/autorun'
+
+Dir[File.join(File.dirname(__FILE__), 'support', '*.rb')].each do |support_file|
+ require support_file
+end
+
+# We need this because the RAL uses 'should' as a method. This
+# allows us the same behaviour but with a different method name.
+class Object
+ alias :must :should
+end
diff --git a/puppet/modules/common/spec/unit/parser/functions/tfile.rb b/puppet/modules/common/spec/unit/parser/functions/tfile.rb
new file mode 100644
index 00000000..5c8f636e
--- /dev/null
+++ b/puppet/modules/common/spec/unit/parser/functions/tfile.rb
@@ -0,0 +1,54 @@
+#! /usr/bin/env ruby
+
+require File.dirname(__FILE__) + '/../../../spec_helper'
+require 'mocha'
+
+describe "the tfile function" do
+
+ before :each do
+ @scope = Puppet::Parser::Scope.new
+ end
+
+ it "should exist" do
+ Puppet::Parser::Functions.function("tfile").should == "function_tfile"
+ end
+
+ it "should raise a ParseError if there is less than 1 arguments" do
+ lambda { @scope.function_tfile([]) }.should( raise_error(Puppet::ParseError))
+ end
+
+ it "should raise a ParseError if there is more than 1 arguments" do
+ lambda { @scope.function_tfile(["bar", "gazonk"]) }.should( raise_error(Puppet::ParseError))
+ end
+
+ describe "when executed properly" do
+
+ before :each do
+ File.stubs(:read).with('/some_path/aa').returns("foo1\nfoo2\n")
+ end
+
+ it "should return the content of the file" do
+ File.stubs(:exists?).with('/some_path/aa').returns(true)
+ result = @scope.function_tfile(['/some_path/aa'])
+ result.should == "foo1\nfoo2\n"
+ end
+
+ it "should touch a file if it does not exist" do
+ File.stubs(:exists?).with('/some_path/aa').returns(false)
+ File.stubs(:directory?).with('/some_path').returns(true)
+ FileUtils.expects(:touch).with('/some_path/aa')
+ result = @scope.function_tfile(['/some_path/aa'])
+ result.should == "foo1\nfoo2\n"
+ end
+
+ it "should create the path if it does not exist" do
+ File.stubs(:exists?).with('/some_path/aa').returns(false)
+ File.stubs(:directory?).with('/some_path').returns(false)
+ FileUtils.expects(:mkdir_p).with("/some_path",:mode => 0700)
+ FileUtils.expects(:touch).with('/some_path/aa')
+ result = @scope.function_tfile(['/some_path/aa'])
+ result.should == "foo1\nfoo2\n"
+ end
+ end
+
+end
diff --git a/puppet/modules/concat/CHANGELOG b/puppet/modules/concat/CHANGELOG
new file mode 100644
index 00000000..c506cf1a
--- /dev/null
+++ b/puppet/modules/concat/CHANGELOG
@@ -0,0 +1,29 @@
+KNOWN ISSUES:
+- In 0.24.8 you will see inintended notifies, if you build a file
+ in a run, the next run will also see it as changed. This is due
+ to how 0.24.8 does the purging of unhandled files, this is improved
+ in 0.25.x and we cannot work around it in our code.
+
+CHANGELOG:
+- 2010/02/19 - initial release
+- 2010/03/12 - add support for 0.24.8 and newer
+ - make the location of sort configurable
+ - add the ability to add shell comment based warnings to
+ top of files
+ - add the ablity to create empty files
+- 2010/04/05 - fix parsing of WARN and change code style to match rest
+ of the code
+ - Better and safer boolean handling for warn and force
+ - Don't use hard coded paths in the shell script, set PATH
+ top of the script
+ - Use file{} to copy the result and make all fragments owned
+ by root. This means we can chnage the ownership/group of the
+ resulting file at any time.
+ - You can specify ensure => "/some/other/file" in concat::fragment
+ to include the contents of a symlink into the final file.
+- 2010/04/16 - Add more cleaning of the fragment name - removing / from the $name
+- 2010/05/22 - Improve documentation and show the use of ensure =>
+- 2010/07/14 - Add support for setting the filebucket behavior of files
+- 2010/10/04 - Make the warning message configurable
+- 2010/12/03 - Add flags to make concat work better on Solaris - thanks Jonathan Boyett
+- 2011/02/03 - Make the shell script more portable and add a config option for root group
diff --git a/puppet/modules/concat/LICENSE b/puppet/modules/concat/LICENSE
new file mode 100644
index 00000000..6a9e9a19
--- /dev/null
+++ b/puppet/modules/concat/LICENSE
@@ -0,0 +1,14 @@
+ Copyright 2012 R.I.Pienaar
+
+ Licensed under the Apache License, Version 2.0 (the "License");
+ you may not use this file except in compliance with the License.
+ You may obtain a copy of the License at
+
+ http://www.apache.org/licenses/LICENSE-2.0
+
+ Unless required by applicable law or agreed to in writing, software
+ distributed under the License is distributed on an "AS IS" BASIS,
+ WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+ See the License for the specific language governing permissions and
+ limitations under the License.
+
diff --git a/puppet/modules/concat/Modulefile b/puppet/modules/concat/Modulefile
new file mode 100644
index 00000000..d6ab2bb0
--- /dev/null
+++ b/puppet/modules/concat/Modulefile
@@ -0,0 +1,8 @@
+name 'puppet-concat'
+version '0.1.0'
+source 'git://github.com/ripienaar/puppet-concat.git'
+author 'R.I.Pienaar'
+license 'Apache'
+summary 'Concat module'
+description 'Concat module'
+project_page 'http://github.com/ripienaar/puppet-concat'
diff --git a/puppet/modules/concat/README.markdown b/puppet/modules/concat/README.markdown
new file mode 100644
index 00000000..8736d57a
--- /dev/null
+++ b/puppet/modules/concat/README.markdown
@@ -0,0 +1,112 @@
+What is it?
+===========
+
+A Puppet module that can construct files from fragments.
+
+Please see the comments in the various .pp files for details
+as well as posts on my blog at http://www.devco.net/
+
+Released under the Apache 2.0 licence
+
+Usage:
+------
+
+Before you can use any of the concat features you should include the class
+concat::setup somewhere on your node first.
+
+If you wanted a /etc/motd file that listed all the major modules
+on the machine. And that would be maintained automatically even
+if you just remove the include lines for other modules you could
+use code like below, a sample /etc/motd would be:
+
+<pre>
+Puppet modules on this server:
+
+ -- Apache
+ -- MySQL
+</pre>
+
+Local sysadmins can also append to the file by just editing /etc/motd.local
+their changes will be incorporated into the puppet managed motd.
+
+<pre>
+# class to setup basic motd, include on all nodes
+class motd {
+ include concat::setup
+ $motd = "/etc/motd"
+
+ concat{$motd:
+ owner => root,
+ group => root,
+ mode => 644
+ }
+
+ concat::fragment{"motd_header":
+ target => $motd,
+ content => "\nPuppet modules on this server:\n\n",
+ order => 01,
+ }
+
+ # local users on the machine can append to motd by just creating
+ # /etc/motd.local
+ concat::fragment{"motd_local":
+ target => $motd,
+ ensure => "/etc/motd.local",
+ order => 15
+ }
+}
+
+# used by other modules to register themselves in the motd
+define motd::register($content="", $order=10) {
+ if $content == "" {
+ $body = $name
+ } else {
+ $body = $content
+ }
+
+ concat::fragment{"motd_fragment_$name":
+ target => "/etc/motd",
+ content => " -- $body\n"
+ }
+}
+
+# a sample apache module
+class apache {
+ include apache::install, apache::config, apache::service
+
+ motd::register{"Apache": }
+}
+</pre>
+
+Known Issues:
+-------------
+* In 0.24.8 you will see inintended notifies, if you build a file
+ in a run, the next run will also see it as changed. This is due
+ to how 0.24.8 does the purging of unhandled files, this is improved
+ in 0.25.x and we cannot work around it in our code.
+* Since puppet-concat now relies on a fact for the concat directory,
+ you will need to set up pluginsync = true for at least the first run.
+ You have this issue if puppet fails to run on the client and you have
+ a message similar to
+ "err: Failed to apply catalog: Parameter path failed: File
+ paths must be fully qualified, not 'undef' at [...]/concat/manifests/setup.pp:44".
+
+Contributors:
+-------------
+**Paul Elliot**
+
+ * Provided 0.24.8 support, shell warnings and empty file creation support.
+
+**Chad Netzer**
+
+ * Various patches to improve safety of file operations
+ * Symlink support
+
+**David Schmitt**
+
+ * Patch to remove hard coded paths relying on OS path
+ * Patch to use file{} to copy the resulting file to the final destination. This means Puppet client will show diffs and that hopefully we can change file ownerships now
+
+Contact:
+--------
+You can contact me on rip@devco.net or follow my blog at http://www.devco.net I am also on twitter as ripienaar
diff --git a/puppet/modules/concat/Rakefile b/puppet/modules/concat/Rakefile
new file mode 100644
index 00000000..764aebd2
--- /dev/null
+++ b/puppet/modules/concat/Rakefile
@@ -0,0 +1,13 @@
+require 'rake'
+require 'rspec/core/rake_task'
+
+task :default => [:spec]
+
+desc "Run all module spec tests (Requires rspec-puppet gem)"
+RSpec::Core::RakeTask.new(:spec)
+
+desc "Build package"
+task :build do
+ system("puppet-module build")
+end
+
diff --git a/puppet/modules/concat/files/concatfragments.sh b/puppet/modules/concat/files/concatfragments.sh
new file mode 100755
index 00000000..c9397975
--- /dev/null
+++ b/puppet/modules/concat/files/concatfragments.sh
@@ -0,0 +1,129 @@
+#!/bin/sh
+
+# Script to concat files to a config file.
+#
+# Given a directory like this:
+# /path/to/conf.d
+# |-- fragments
+# | |-- 00_named.conf
+# | |-- 10_domain.net
+# | `-- zz_footer
+#
+# The script supports a test option that will build the concat file to a temp location and
+# use /usr/bin/cmp to verify if it should be run or not. This would result in the concat happening
+# twice on each run but gives you the option to have an unless option in your execs to inhibit rebuilds.
+#
+# Without the test option and the unless combo your services that depend on the final file would end up
+# restarting on each run, or in other manifest models some changes might get missed.
+#
+# OPTIONS:
+# -o The file to create from the sources
+# -d The directory where the fragments are kept
+# -t Test to find out if a build is needed, basically concats the files to a temp
+# location and compare with what's in the final location, return codes are designed
+# for use with unless on an exec resource
+# -w Add a shell style comment at the top of the created file to warn users that it
+# is generated by puppet
+# -f Enables the creation of empty output files when no fragments are found
+# -n Sort the output numerically rather than the default alpha sort
+#
+# the command:
+#
+# concatfragments.sh -o /path/to/conffile.cfg -d /path/to/conf.d
+#
+# creates /path/to/conf.d/fragments.concat and copies the resulting
+# file to /path/to/conffile.cfg. The files will be sorted alphabetically
+# pass the -n switch to sort numerically.
+#
+# The script does error checking on the various dirs and files to make
+# sure things don't fail.
+
+OUTFILE=""
+WORKDIR=""
+TEST=""
+FORCE=""
+WARN=""
+SORTARG=""
+
+PATH=/sbin:/usr/sbin:/bin:/usr/bin
+
+## Well, if there's ever a bad way to do things, Nexenta has it.
+## http://nexenta.org/projects/site/wiki/Personalities
+unset SUN_PERSONALITY
+
+while getopts "o:s:d:tnw:f" options; do
+ case $options in
+ o ) OUTFILE=$OPTARG;;
+ d ) WORKDIR=$OPTARG;;
+ n ) SORTARG="-n";;
+ w ) WARNMSG="$OPTARG";;
+ f ) FORCE="true";;
+ t ) TEST="true";;
+ * ) echo "Specify output file with -o and fragments directory with -d"
+ exit 1;;
+ esac
+done
+
+# do we have -o?
+if [ x${OUTFILE} = "x" ]; then
+ echo "Please specify an output file with -o"
+ exit 1
+fi
+
+# do we have -d?
+if [ x${WORKDIR} = "x" ]; then
+ echo "Please fragments directory with -d"
+ exit 1
+fi
+
+# can we write to -o?
+if [ -f ${OUTFILE} ]; then
+ if [ ! -w ${OUTFILE} ]; then
+ echo "Cannot write to ${OUTFILE}"
+ exit 1
+ fi
+else
+ if [ ! -w `dirname ${OUTFILE}` ]; then
+ echo "Cannot write to `dirname ${OUTFILE}` to create ${OUTFILE}"
+ exit 1
+ fi
+fi
+
+# do we have a fragments subdir inside the work dir?
+if [ ! -d "${WORKDIR}/fragments" ] && [ ! -x "${WORKDIR}/fragments" ]; then
+ echo "Cannot access the fragments directory"
+ exit 1
+fi
+
+# are there actually any fragments?
+if [ ! "$(ls -A ${WORKDIR}/fragments)" ]; then
+ if [ x${FORCE} = "x" ]; then
+ echo "The fragments directory is empty, cowardly refusing to make empty config files"
+ exit 1
+ fi
+fi
+
+cd ${WORKDIR}
+
+if [ x${WARNMSG} = "x" ]; then
+ : > "fragments.concat"
+else
+ printf '%s\n' "$WARNMSG" > "fragments.concat"
+fi
+
+# find all the files in the fragments directory, sort them numerically and concat to fragments.concat in the working dir
+find fragments/ -type f -follow | sort ${SORTARG} | while read fragfile; do
+ cat "$fragfile" >> "fragments.concat"
+done
+
+if [ x${TEST} = "x" ]; then
+ # This is a real run, copy the file to outfile
+ cp fragments.concat ${OUTFILE}
+ RETVAL=$?
+else
+ # Just compare the result to outfile to help the exec decide
+ cmp ${OUTFILE} fragments.concat
+ RETVAL=$?
+fi
+
+exit $RETVAL
diff --git a/puppet/modules/concat/files/null/.gitignore b/puppet/modules/concat/files/null/.gitignore
new file mode 100644
index 00000000..e69de29b
--- /dev/null
+++ b/puppet/modules/concat/files/null/.gitignore
diff --git a/puppet/modules/concat/lib/facter/concat_basedir.rb b/puppet/modules/concat/lib/facter/concat_basedir.rb
new file mode 100644
index 00000000..02e9c5bf
--- /dev/null
+++ b/puppet/modules/concat/lib/facter/concat_basedir.rb
@@ -0,0 +1,5 @@
+Facter.add("concat_basedir") do
+ setcode do
+ File.join(Puppet[:vardir],"concat")
+ end
+end
diff --git a/puppet/modules/concat/manifests/fragment.pp b/puppet/modules/concat/manifests/fragment.pp
new file mode 100644
index 00000000..943bf671
--- /dev/null
+++ b/puppet/modules/concat/manifests/fragment.pp
@@ -0,0 +1,49 @@
+# Puts a file fragment into a directory previous setup using concat
+#
+# OPTIONS:
+# - target The file that these fragments belong to
+# - content If present puts the content into the file
+# - source If content was not specified, use the source
+# - order By default all files gets a 10_ prefix in the directory
+# you can set it to anything else using this to influence the
+# order of the content in the file
+# - ensure Present/Absent or destination to a file to include another file
+# - mode Mode for the file
+# - owner Owner of the file
+# - group Owner of the file
+# - backup Controls the filebucketing behavior of the final file and
+# see File type reference for its use. Defaults to 'puppet'
+define concat::fragment($target, $content='', $source='', $order=10, $ensure = 'present', $mode = '0644', $owner = $::id, $group = $concat::setup::root_group, $backup = 'puppet') {
+ $safe_name = regsubst($name, '/', '_', 'G')
+ $safe_target_name = regsubst($target, '/', '_', 'G')
+ $concatdir = $concat::setup::concatdir
+ $fragdir = "${concatdir}/${safe_target_name}"
+
+ # if content is passed, use that, else if source is passed use that
+ # if neither passed, but $ensure is in symlink form, make a symlink
+ case $content {
+ '': {
+ case $source {
+ '': {
+ case $ensure {
+ '', 'absent', 'present', 'file', 'directory': {
+ crit('No content, source or symlink specified')
+ }
+ }
+ }
+ default: { File{ source => $source } }
+ }
+ }
+ default: { File{ content => $content } }
+ }
+
+ file{"${fragdir}/fragments/${order}_${safe_name}":
+ ensure => $ensure,
+ mode => $mode,
+ owner => $owner,
+ group => $group,
+ backup => $backup,
+ alias => "concat_fragment_${name}",
+ notify => Exec["concat_${target}"]
+ }
+}
diff --git a/puppet/modules/concat/manifests/init.pp b/puppet/modules/concat/manifests/init.pp
new file mode 100644
index 00000000..0b3ed564
--- /dev/null
+++ b/puppet/modules/concat/manifests/init.pp
@@ -0,0 +1,178 @@
+# A system to construct files using fragments from other files or templates.
+#
+# This requires at least puppet 0.25 to work correctly as we use some
+# enhancements in recursive directory management and regular expressions
+# to do the work here.
+#
+# USAGE:
+# The basic use case is as below:
+#
+# concat{"/etc/named.conf":
+# notify => Service["named"]
+# }
+#
+# concat::fragment{"foo.com_config":
+# target => "/etc/named.conf",
+# order => 10,
+# content => template("named_conf_zone.erb")
+# }
+#
+# # add a fragment not managed by puppet so local users
+# # can add content to managed file
+# concat::fragment{"foo.com_user_config":
+# target => "/etc/named.conf",
+# order => 12,
+# ensure => "/etc/named.conf.local"
+# }
+#
+# This will use the template named_conf_zone.erb to build a single
+# bit of config up and put it into the fragments dir. The file
+# will have an number prefix of 10, you can use the order option
+# to control that and thus control the order the final file gets built in.
+#
+# SETUP:
+# The class concat::setup uses the fact concat_basedir to define the variable
+# $concatdir, where all the temporary files and fragments will be
+# durably stored. The fact concat_basedir will be set up on the client to
+# <Puppet[:vardir]>/concat, so you will be able to run different setup/flavours
+# of puppet clients.
+# However, since this requires the file lib/facter/concat_basedir.rb to be
+# deployed on the clients, so you will have to set "pluginsync = true" on
+# both the master and client, at least for the first run.
+#
+# There's some regular expression magic to figure out the puppet version but
+# if you're on an older 0.24 version just set $puppetversion = 24
+#
+# Before you can use any of the concat features you should include the
+# class concat::setup somewhere on your node first.
+#
+# DETAIL:
+# We use a helper shell script called concatfragments.sh that gets placed
+# in <Puppet[:vardir]>/concat/bin to do the concatenation. While this might
+# seem more complex than some of the one-liner alternatives you might find on
+# the net we do a lot of error checking and safety checks in the script to avoid
+# problems that might be caused by complex escaping errors etc.
+#
+# LICENSE:
+# Apache Version 2
+#
+# LATEST:
+# http://github.com/ripienaar/puppet-concat/
+#
+# CONTACT:
+# R.I.Pienaar <rip@devco.net>
+# Volcane on freenode
+# @ripienaar on twitter
+# www.devco.net
+
+
+# Sets up so that you can use fragments to build a final config file,
+#
+# OPTIONS:
+# - mode The mode of the final file
+# - owner Who will own the file
+# - group Who will own the file
+# - force Enables creating empty files if no fragments are present
+# - warn Adds a normal shell style comment top of the file indicating
+# that it is built by puppet
+# - backup Controls the filebucketing behavior of the final file and
+# see File type reference for its use. Defaults to 'puppet'
+#
+# ACTIONS:
+# - Creates fragment directories if it didn't exist already
+# - Executes the concatfragments.sh script to build the final file, this script will create
+# directory/fragments.concat. Execution happens only when:
+# * The directory changes
+# * fragments.concat != final destination, this means rebuilds will happen whenever
+# someone changes or deletes the final file. Checking is done using /usr/bin/cmp.
+# * The Exec gets notified by something else - like the concat::fragment define
+# - Copies the file over to the final destination using a file resource
+#
+# ALIASES:
+# - The exec can notified using Exec["concat_/path/to/file"] or Exec["concat_/path/to/directory"]
+# - The final file can be referened as File["/path/to/file"] or File["concat_/path/to/file"]
+define concat($mode = '0644', $owner = $::id, $group = $concat::setup::root_group, $warn = false, $force = false, $backup = 'puppet', $gnu = undef, $order='alpha') {
+ $safe_name = regsubst($name, '/', '_', 'G')
+ $concatdir = $concat::setup::concatdir
+ $version = $concat::setup::majorversion
+ $fragdir = "${concatdir}/${safe_name}"
+ $concat_name = 'fragments.concat.out'
+ $default_warn_message = '# This file is managed by Puppet. DO NOT EDIT.'
+
+ case $warn {
+ 'true',true,yes,on: { $warnmsg = $default_warn_message }
+ 'false',false,no,off: { $warnmsg = '' }
+ default: { $warnmsg = $warn }
+ }
+
+ $warnmsg_escaped = regsubst($warnmsg, "'", "'\\\\''", 'G')
+ $warnflag = $warnmsg_escaped ? {
+ '' => '',
+ default => "-w '${warnmsg_escaped}'"
+ }
+
+ case $force {
+ 'true',true,yes,on: { $forceflag = '-f' }
+ 'false',false,no,off: { $forceflag = '' }
+ default: { fail("Improper 'force' value given to concat: ${force}") }
+ }
+
+ case $order {
+ numeric: { $orderflag = '-n' }
+ alpha: { $orderflag = '' }
+ default: { fail("Improper 'order' value given to concat: ${order}") }
+ }
+
+ File{
+ owner => $::id,
+ group => $group,
+ mode => $mode,
+ backup => $backup
+ }
+
+ file{$fragdir:
+ ensure => directory;
+
+ "${fragdir}/fragments":
+ ensure => directory,
+ recurse => true,
+ purge => true,
+ force => true,
+ ignore => ['.svn', '.git', '.gitignore'],
+ source => $version ? {
+ 24 => 'puppet:///concat/null',
+ default => undef,
+ },
+ notify => Exec["concat_${name}"];
+
+ "${fragdir}/fragments.concat":
+ ensure => present;
+
+ "${fragdir}/${concat_name}":
+ ensure => present;
+
+ $name:
+ ensure => present,
+ source => "${fragdir}/${concat_name}",
+ owner => $owner,
+ group => $group,
+ checksum => md5,
+ mode => $mode,
+ alias => "concat_${name}";
+ }
+
+ exec{"concat_${name}":
+ notify => File[$name],
+ subscribe => File[$fragdir],
+ alias => "concat_${fragdir}",
+ require => [ File[$fragdir], File["${fragdir}/fragments"], File["${fragdir}/fragments.concat"] ],
+ unless => "${concat::setup::concatdir}/bin/concatfragments.sh -o ${fragdir}/${concat_name} -d ${fragdir} -t ${warnflag} ${forceflag} ${orderflag}",
+ command => "${concat::setup::concatdir}/bin/concatfragments.sh -o ${fragdir}/${concat_name} -d ${fragdir} ${warnflag} ${forceflag} ${orderflag}",
+ }
+ if $::id == 'root' {
+ Exec["concat_${name}"]{
+ user => root,
+ group => $group,
+ }
+ }
+}
diff --git a/puppet/modules/concat/manifests/setup.pp b/puppet/modules/concat/manifests/setup.pp
new file mode 100644
index 00000000..38aeb964
--- /dev/null
+++ b/puppet/modules/concat/manifests/setup.pp
@@ -0,0 +1,49 @@
+# Sets up the concat system.
+#
+# $concatdir is where the fragments live and is set on the fact concat_basedir.
+# Since puppet should always manage files in $concatdir and they should
+# not be deleted ever, /tmp is not an option.
+#
+# $puppetversion should be either 24 or 25 to enable a 24 compatible
+# mode, in 24 mode you might see phantom notifies this is a side effect
+# of the method we use to clear the fragments directory.
+#
+# The regular expression below will try to figure out your puppet version
+# but this code will only work in 0.24.8 and newer.
+#
+# It also copies out the concatfragments.sh file to ${concatdir}/bin
+class concat::setup {
+ $id = $::id
+ $root_group = $id ? {
+ root => 0,
+ default => $id
+ }
+
+ if $::concat_basedir {
+ $concatdir = $::concat_basedir
+ } else {
+ fail ("\$concat_basedir not defined. Try running again with pluginsync enabled")
+ }
+
+ $majorversion = regsubst($::puppetversion, '^[0-9]+[.]([0-9]+)[.][0-9]+$', '\1')
+
+ file{"${concatdir}/bin/concatfragments.sh":
+ owner => $id,
+ group => $root_group,
+ mode => '0755',
+ source => $majorversion ? {
+ 24 => 'puppet:///concat/concatfragments.sh',
+ default => 'puppet:///modules/concat/concatfragments.sh'
+ };
+
+ [ $concatdir, "${concatdir}/bin" ]:
+ ensure => directory,
+ owner => $id,
+ group => $root_group,
+ mode => '0750';
+
+ ## Old versions of this module used a different path.
+ '/usr/local/bin/concatfragments.sh':
+ ensure => absent;
+ }
+}
diff --git a/puppet/modules/concat/spec/defines/init_spec.rb b/puppet/modules/concat/spec/defines/init_spec.rb
new file mode 100644
index 00000000..d968a26c
--- /dev/null
+++ b/puppet/modules/concat/spec/defines/init_spec.rb
@@ -0,0 +1,20 @@
+require 'spec_helper'
+
+describe 'concat' do
+ basedir = '/var/lib/puppet/concat'
+ let(:title) { '/etc/foo.bar' }
+ let(:facts) { { :concat_basedir => '/var/lib/puppet/concat' } }
+ let :pre_condition do
+ 'include concat::setup'
+ end
+ it { should contain_file("#{basedir}/_etc_foo.bar").with('ensure' => 'directory') }
+ it { should contain_file("#{basedir}/_etc_foo.bar/fragments").with('ensure' => 'directory') }
+
+ it { should contain_file("#{basedir}/_etc_foo.bar/fragments.concat").with('ensure' => 'present') }
+ it { should contain_file("/etc/foo.bar").with('ensure' => 'present') }
+ it { should contain_exec("concat_/etc/foo.bar").with_command(
+ "#{basedir}/bin/concatfragments.sh "+
+ "-o #{basedir}/_etc_foo.bar/fragments.concat.out "+
+ "-d #{basedir}/_etc_foo.bar ")
+ }
+end
diff --git a/puppet/modules/concat/spec/spec_helper.rb b/puppet/modules/concat/spec/spec_helper.rb
new file mode 100644
index 00000000..e6e9309b
--- /dev/null
+++ b/puppet/modules/concat/spec/spec_helper.rb
@@ -0,0 +1,9 @@
+require 'puppet'
+require 'rspec'
+require 'rspec-puppet'
+
+RSpec.configure do |c|
+ c.module_path = File.expand_path(File.join(File.dirname(__FILE__), 'fixtures/modules/'))
+ # Using an empty site.pp file to avoid: https://github.com/rodjek/rspec-puppet/issues/15
+ c.manifest_dir = File.expand_path(File.join(File.dirname(__FILE__), 'fixtures/manifests'))
+end
diff --git a/puppet/modules/couchdb/.fixtures.yml b/puppet/modules/couchdb/.fixtures.yml
new file mode 100644
index 00000000..50c6c9ac
--- /dev/null
+++ b/puppet/modules/couchdb/.fixtures.yml
@@ -0,0 +1,6 @@
+fixtures:
+ symlinks:
+ couchdb: "#{source_dir}"
+ repositories:
+ stdlib: " https://leap.se/git/puppet_stdlib"
+
diff --git a/puppet/modules/couchdb/Gemfile b/puppet/modules/couchdb/Gemfile
new file mode 100644
index 00000000..1c86e980
--- /dev/null
+++ b/puppet/modules/couchdb/Gemfile
@@ -0,0 +1,11 @@
+source "https://rubygems.org"
+
+group :test do
+ gem "rake"
+ gem "puppet", ENV['PUPPET_VERSION'] || '~> 3.7.0'
+ gem "rspec", '< 3.2.0'
+ gem "rspec-puppet"
+ gem "puppetlabs_spec_helper"
+ gem "metadata-json-lint"
+ gem "rspec-puppet-facts"
+end
diff --git a/puppet/modules/couchdb/README.md b/puppet/modules/couchdb/README.md
new file mode 100644
index 00000000..096221a4
--- /dev/null
+++ b/puppet/modules/couchdb/README.md
@@ -0,0 +1,32 @@
+# Couchdb Puppet module
+
+This module is based on the one from Camptocamp_.
+
+.. _Camptocamp: http://www.camptocamp.com/
+
+For more information about couchdb see http://couchdb.apache.org/
+
+# Dependencies
+
+- ruby module from the shared-modules group
+
+# Couchdb debian packages
+
+## Jessie
+
+There are no couchdb packages for jessie, so the only way is to
+to configure apt to install couchdb from unstable by adding a
+sources list file to `/etc/apt/sources.list.d`.
+
+## Example usage
+
+This will setup couchdb:
+
+ # needed for wget call, which is unqualified by purpose so we don't force
+ # a location for the wget binary
+ Exec { path => '/usr/bin:/usr/sbin/:/bin:/sbin:/usr/local/bin:/usr/local/sbin' }
+
+ class { 'couchdb':
+ admin_pw => '123'
+ }
+
diff --git a/puppet/modules/couchdb/Rakefile b/puppet/modules/couchdb/Rakefile
new file mode 100644
index 00000000..85326bb4
--- /dev/null
+++ b/puppet/modules/couchdb/Rakefile
@@ -0,0 +1,19 @@
+require 'puppetlabs_spec_helper/rake_tasks'
+require 'puppet-lint/tasks/puppet-lint'
+PuppetLint.configuration.send('disable_80chars')
+PuppetLint.configuration.ignore_paths = ["spec/**/*.pp", "pkg/**/*.pp"]
+
+desc "Validate manifests, templates, and ruby files"
+task :validate do
+ Dir['manifests/**/*.pp'].each do |manifest|
+ sh "puppet parser validate --noop #{manifest}"
+ end
+ Dir['spec/**/*.rb','lib/**/*.rb'].each do |ruby_file|
+ sh "ruby -c #{ruby_file}" unless ruby_file =~ /spec\/fixtures/
+ end
+ Dir['templates/**/*.erb'].each do |template|
+ sh "erb -P -x -T '-' #{template} | ruby -c"
+ end
+end
+
+task :test => [:lint, :syntax , :validate, :spec]
diff --git a/puppet/modules/couchdb/files/Debian/couchdb b/puppet/modules/couchdb/files/Debian/couchdb
new file mode 100755
index 00000000..ccdfe716
--- /dev/null
+++ b/puppet/modules/couchdb/files/Debian/couchdb
@@ -0,0 +1,160 @@
+#!/bin/sh -e
+
+# Licensed under the Apache License, Version 2.0 (the "License"); you may not
+# use this file except in compliance with the License. You may obtain a copy of
+# the License at
+#
+# http://www.apache.org/licenses/LICENSE-2.0
+#
+# Unless required by applicable law or agreed to in writing, software
+# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT
+# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the
+# License for the specific language governing permissions and limitations under
+# the License.
+
+### BEGIN INIT INFO
+# Provides: couchdb
+# Required-Start: $local_fs $remote_fs
+# Required-Stop: $local_fs $remote_fs
+# Default-Start: 2 3 4 5
+# Default-Stop: 0 1 6
+# Short-Description: Apache CouchDB init script
+# Description: Apache CouchDB init script for the database server.
+### END INIT INFO
+
+SCRIPT_OK=0
+SCRIPT_ERROR=1
+
+DESCRIPTION="database server"
+NAME=couchdb
+SCRIPT_NAME=`basename $0`
+COUCHDB=/usr/bin/couchdb
+CONFIGURATION_FILE=/etc/default/couchdb
+RUN_DIR=/var/run/couchdb
+LSB_LIBRARY=/lib/lsb/init-functions
+
+if test ! -x $COUCHDB; then
+ exit $SCRIPT_ERROR
+fi
+
+if test -r $CONFIGURATION_FILE; then
+ . $CONFIGURATION_FILE
+fi
+
+log_daemon_msg () {
+ # Dummy function to be replaced by LSB library.
+
+ echo $@
+}
+
+log_end_msg () {
+ # Dummy function to be replaced by LSB library.
+
+ if test "$1" != "0"; then
+ echo "Error with $DESCRIPTION: $NAME"
+ fi
+ return $1
+}
+
+if test -r $LSB_LIBRARY; then
+ . $LSB_LIBRARY
+fi
+
+run_command () {
+ command="$1"
+ if test -n "$COUCHDB_OPTIONS"; then
+ command="$command $COUCHDB_OPTIONS"
+ fi
+ if test -n "$COUCHDB_USER"; then
+ if su $COUCHDB_USER -c "$command"; then
+ return $SCRIPT_OK
+ else
+ return $SCRIPT_ERROR
+ fi
+ else
+ if $command; then
+ return $SCRIPT_OK
+ else
+ return $SCRIPT_ERROR
+ fi
+ fi
+}
+
+start_couchdb () {
+ # Start Apache CouchDB as a background process.
+
+ mkdir -p "$RUN_DIR"
+ chown -R "$COUCHDB_USER" "$RUN_DIR"
+ command="$COUCHDB -b"
+ if test -n "$COUCHDB_STDOUT_FILE"; then
+ command="$command -o $COUCHDB_STDOUT_FILE"
+ fi
+ if test -n "$COUCHDB_STDERR_FILE"; then
+ command="$command -e $COUCHDB_STDERR_FILE"
+ fi
+ if test -n "$COUCHDB_RESPAWN_TIMEOUT"; then
+ command="$command -r $COUCHDB_RESPAWN_TIMEOUT"
+ fi
+ run_command "$command" > /dev/null
+}
+
+stop_couchdb () {
+ # Stop the running Apache CouchDB process.
+
+ run_command "$COUCHDB -d" > /dev/null
+ pkill -u couchdb
+ # always return true even if no remaining couchdb procs got killed
+ /bin/true
+}
+
+display_status () {
+ # Display the status of the running Apache CouchDB process.
+
+ run_command "$COUCHDB -s"
+}
+
+parse_script_option_list () {
+ # Parse arguments passed to the script and take appropriate action.
+
+ case "$1" in
+ start)
+ log_daemon_msg "Starting $DESCRIPTION" $NAME
+ if start_couchdb; then
+ log_end_msg $SCRIPT_OK
+ else
+ log_end_msg $SCRIPT_ERROR
+ fi
+ ;;
+ stop)
+ log_daemon_msg "Stopping $DESCRIPTION" $NAME
+ if stop_couchdb; then
+ log_end_msg $SCRIPT_OK
+ else
+ log_end_msg $SCRIPT_ERROR
+ fi
+ ;;
+ restart|force-reload)
+ log_daemon_msg "Restarting $DESCRIPTION" $NAME
+ if stop_couchdb; then
+ if start_couchdb; then
+ log_end_msg $SCRIPT_OK
+ else
+ log_end_msg $SCRIPT_ERROR
+ fi
+ else
+ log_end_msg $SCRIPT_ERROR
+ fi
+ ;;
+ status)
+ display_status
+ ;;
+ *)
+ cat << EOF >&2
+Usage: $SCRIPT_NAME {start|stop|restart|force-reload|status}
+EOF
+ exit $SCRIPT_ERROR
+ ;;
+ esac
+}
+
+parse_script_option_list $@
diff --git a/puppet/modules/couchdb/files/couch-doc-diff b/puppet/modules/couchdb/files/couch-doc-diff
new file mode 100644
index 00000000..a5907d5e
--- /dev/null
+++ b/puppet/modules/couchdb/files/couch-doc-diff
@@ -0,0 +1,17 @@
+#!/bin/bash
+
+# Run a diff between a couch document specified as the first parameter
+# and the second parameter.
+# Diff returns 0 if there is no difference. This way you can tell the data
+# is already on the couch.
+# Both the couch document and the second paramter will be pretty printed
+# before comparison so differences in spaces etc. do not matter.
+# All keys starting with an underscore on the couch such as _id and _rev
+# will be removed before the comparison - we assume we want to compare
+# the real data, not the metadata about the document as we usually do not
+# know or care about what the id and revision will be.
+
+curl -s --netrc-file /etc/couchdb/couchdb.netrc $1 \
+ | python -mjson.tool \
+ | grep -v '^\s*"_' \
+ | diff -w - <(echo $2 | python -mjson.tool)
diff --git a/puppet/modules/couchdb/files/couch-doc-update b/puppet/modules/couchdb/files/couch-doc-update
new file mode 100644
index 00000000..a137e7ff
--- /dev/null
+++ b/puppet/modules/couchdb/files/couch-doc-update
@@ -0,0 +1,219 @@
+#!/usr/bin/ruby
+require 'syslog'
+
+#
+# This script will delete or update the values of a particular couchdb document. The benefit of this little script over
+# using a simple curl command for updating a document is this:
+#
+# * exit non-zero status if document was not updated.
+# * updates existing documents easily, taking care of the _rev id for you.
+# * if document doesn't exist, it is created
+#
+# REQUIREMENTS
+#
+# gem 'couchrest'
+#
+# USAGE
+#
+# see the ouput of
+#
+# couch-doc-update
+#
+# the content of <file> will be merged with the data provided.
+# If you only want the file content use --data '{}'
+#
+# EXAMPLE
+#
+# create a new user:
+# couch-doc-update --db _users --id org.couchdb.user:ca_daemon --data '{"type": "user", "name": "ca_daemon", "roles": ["certs"], "password": "sshhhh"}'
+#
+# update a user:
+# couch-doc-update --db _users --id org.couchdb.user:ca_daemon --data '{"password":"sssshhh"}'
+#
+# To update the _users DB on bigcouch, you must connect to port 5986 instead of the default couchdb port 5984
+#
+# delete a doc:
+# couch-doc-update --delete --db invite_codes --id dfaf0ee65670c16d5a9161dc86f3bff8
+#
+
+begin; require 'rubygems'; rescue LoadError; end # optionally load rubygems
+require 'couchrest'
+
+def main
+ db, id, data, delete = process_options
+
+ result = if delete
+ delete_document(db, id)
+ else
+ set_document(db, id, data)
+ end
+
+ exit 0 if result['ok']
+ raise StandardError.new(result.inspect)
+rescue StandardError => exc
+ db_without_password = db.to_s.sub(/:[^\/]*@/, ':PASSWORD_HIDDEN@')
+ indent = " "
+ log "ERROR: " + exc.to_s
+ log indent + $@[0..4].join("\n#{indent}")
+ log indent + "Failed writing to #{db_without_password}/#{id}"
+ exit 1
+end
+
+def log(message)
+ $stderr.puts message
+ Syslog.open('couch-doc-update') do |logger|
+ logger.log(Syslog::LOG_CRIT, message)
+ end
+end
+
+def process_options
+ #
+ # parse options
+ #
+ host = nil
+ db_name = nil
+ doc_id = nil
+ new_data = nil
+ filename = nil
+ netrc_file = nil
+ delete = false
+ loop do
+ case ARGV[0]
+ when '--host' then ARGV.shift; host = ARGV.shift
+ when '--db' then ARGV.shift; db_name = ARGV.shift
+ when '--id' then ARGV.shift; doc_id = ARGV.shift
+ when '--data' then ARGV.shift; new_data = ARGV.shift
+ when '--file' then ARGV.shift; filename = ARGV.shift
+ when '--netrc-file' then ARGV.shift; netrc_file = ARGV.shift
+ when '--delete' then ARGV.shift; delete = true
+ when /^-/ then usage("Unknown option: #{ARGV[0].inspect}")
+ else break
+ end
+ end
+ usage("Missing required option") unless db_name && doc_id && (new_data || delete)
+
+ unless delete
+ new_data = MultiJson.load(new_data)
+ new_data.merge!(read_file(filename)) if filename
+ end
+ db = CouchRest.database(connection_string(db_name, host, netrc_file))
+ return db, doc_id, new_data, delete
+end
+
+def read_file(filename)
+ data = MultiJson.load( IO.read(filename) )
+ # strip off _id and _rev to avoid conflicts
+ data.delete_if {|k,v| k.start_with? '_'}
+end
+
+ #
+ # update document
+ #
+def set_document(db, id, data)
+ attempt ||= 1
+ doc = get_document(db, id)
+ if doc
+ doc.id ||= id
+ update_document(db, doc, data)
+ else
+ create_document(db, id, data)
+ end
+rescue RestClient::Conflict
+ # retry once, reraise if that does not work
+ raise if attempt > 1
+ attempt += 1
+ retry
+end
+
+COUCH_RESPONSE_OK = { 'ok' => true }
+
+# Deletes document, if exists, with retry
+def delete_document(db, id)
+ attempts ||= 1
+ doc = get_document(db, id)
+ if doc
+ db.delete_doc(doc)
+ else
+ COUCH_RESPONSE_OK
+ end
+rescue RestClient::ExceptionWithResponse => e
+ if attempts < 6 && !e.response.nil? && RETRY_CODES.include?(e.response.code)
+ attempts += 1
+ sleep 10
+ retry
+ else
+ raise e
+ end
+end
+
+def get_document(db, doc_id)
+ begin
+ db.get(doc_id)
+ rescue RestClient::ResourceNotFound
+ nil
+ end
+end
+
+# if the response status code is one of these
+# then retry instead of failing.
+RETRY_CODES = [500, 422].freeze
+
+def update_document(db, doc, data)
+ attempts ||= 1
+ doc.reject! {|k,v| !["_id", "_rev"].include? k}
+ doc.merge! data
+ db.save_doc(doc)
+rescue RestClient::ExceptionWithResponse => e
+ if attempts < 6 && !e.response.nil? && RETRY_CODES.include?(e.response.code)
+ attempts += 1
+ sleep 10
+ retry
+ else
+ raise e
+ end
+end
+
+def create_document(db, doc_id, data)
+ attempts ||= 1
+ data["_id"] = doc_id
+ db.save_doc(data)
+rescue RestClient::ExceptionWithResponse => e
+ if attempts < 6 && !e.response.nil? && RETRY_CODES.include?(e.response.code)
+ attempts += 1
+ sleep 10
+ retry
+ else
+ raise e
+ end
+end
+
+def connection_string(database, host, netrc_file = nil)
+ protocol = "http"
+ #hostname = "127.0.0.1"
+ port = "5984"
+ username = "admin"
+ password = ""
+
+ netrc = File.read(netrc_file || '/etc/couchdb/couchdb.netrc')
+ netrc.scan(/\w+ [\w\.]+/).each do |key_value|
+ key, value = key_value.split ' '
+ case key
+ when "machine" then host ||= value + ':' + port
+ when "login" then username = value
+ when "password" then password = value
+ end
+ end
+
+ host ||= '127.0.0.1:5984'
+
+ "%s://%s:%s@%s/%s" % [protocol, username, password, host, database]
+end
+
+def usage(s)
+ $stderr.puts(s)
+ $stderr.puts("Usage: #{File.basename($0)} --host <host> --db <db> --id <doc_id> --data <json> [--file <file>] [--netrc-file <netrc-file>]")
+ $stderr.puts(" #{File.basename($0)} --host <host> --db <db> --id <doc_id> --delete [--netrc-file <netrc-file>]")
+ exit(2)
+end
+
+main()
diff --git a/puppet/modules/couchdb/files/local.ini b/puppet/modules/couchdb/files/local.ini
new file mode 100644
index 00000000..7365b6c6
--- /dev/null
+++ b/puppet/modules/couchdb/files/local.ini
@@ -0,0 +1,84 @@
+; CouchDB Configuration Settings
+
+; Custom settings should be made in this file. They will override settings
+; in default.ini, but unlike changes made to default.ini, this file won't be
+; overwritten on server upgrade.
+
+[couchdb]
+;max_document_size = 4294967296 ; bytes
+
+[httpd]
+;port = 5984
+;bind_address = 127.0.0.1
+; Options for the MochiWeb HTTP server.
+;server_options = [{backlog, 128}, {acceptor_pool_size, 16}]
+; For more socket options, consult Erlang's module 'inet' man page.
+;socket_options = [{recbuf, 262144}, {sndbuf, 262144}, {nodelay, true}]
+
+; Uncomment next line to trigger basic-auth popup on unauthorized requests.
+;WWW-Authenticate = Basic realm="administrator"
+
+; Uncomment next line to set the configuration modification whitelist. Only
+; whitelisted values may be changed via the /_config URLs. To allow the admin
+; to change this value over HTTP, remember to include {httpd,config_whitelist}
+; itself. Excluding it from the list would require editing this file to update
+; the whitelist.
+;config_whitelist = [{httpd,config_whitelist}, {log,level}, {etc,etc}]
+
+[httpd_global_handlers]
+;_google = {couch_httpd_proxy, handle_proxy_req, <<"http://www.google.com">>}
+
+[couch_httpd_auth]
+; If you set this to true, you should also uncomment the WWW-Authenticate line
+; above. If you don't configure a WWW-Authenticate header, CouchDB will send
+; Basic realm="server" in order to prevent you getting logged out.
+; require_valid_user = false
+
+[log]
+;level = debug
+
+[os_daemons]
+; For any commands listed here, CouchDB will attempt to ensure that
+; the process remains alive while CouchDB runs as well as shut them
+; down when CouchDB exits.
+;foo = /path/to/command -with args
+
+[daemons]
+; enable SSL support by uncommenting the following line and supply the PEM's below.
+; the default ssl port CouchDB listens on is 6984
+; httpsd = {couch_httpd, start_link, [https]}
+
+[ssl]
+;cert_file = /full/path/to/server_cert.pem
+;key_file = /full/path/to/server_key.pem
+;password = somepassword
+; set to true to validate peer certificates
+verify_ssl_certificates = false
+; Path to file containing PEM encoded CA certificates (trusted
+; certificates used for verifying a peer certificate). May be omitted if
+; you do not want to verify the peer.
+;cacert_file = /full/path/to/cacertf
+; The verification fun (optionnal) if not specidied, the default
+; verification fun will be used.
+;verify_fun = {Module, VerifyFun}
+ssl_certificate_max_depth = 1
+; To enable Virtual Hosts in CouchDB, add a vhost = path directive. All requests to
+; the Virual Host will be redirected to the path. In the example below all requests
+; to http://example.com/ are redirected to /database.
+; If you run CouchDB on a specific port, include the port number in the vhost:
+; example.com:5984 = /database
+
+[vhosts]
+;example.com = /database/
+
+[update_notification]
+;unique notifier name=/full/path/to/exe -with "cmd line arg"
+
+; To create an admin account uncomment the '[admins]' section below and add a
+; line in the format 'username = password'. When you next start CouchDB, it
+; will change the password to a hash (so that your passwords don't linger
+; around in plain-text files). You can add more admin accounts with more
+; 'username = password' lines. Don't forget to restart CouchDB after
+; changing this.
+[admins]
+;admin = mysecretpassword
diff --git a/puppet/modules/couchdb/lib/facter/couchdb_pwhash_alg.rb b/puppet/modules/couchdb/lib/facter/couchdb_pwhash_alg.rb
new file mode 100644
index 00000000..60ae701a
--- /dev/null
+++ b/puppet/modules/couchdb/lib/facter/couchdb_pwhash_alg.rb
@@ -0,0 +1,43 @@
+require 'facter'
+
+def version_parts ( version )
+ # gives back a hash containing major, minor and patch numbers
+ # of a give version string
+
+ parts = Hash.new
+ first, *rest = version.split(".")
+ parts["major"] = first
+ parts["minor"] = rest[0]
+ parts["patch"] = rest[1]
+ return parts
+end
+
+def couchdb_pwhash_alg
+ # couchdb uses sha1 as pw hash algorithm until v. 1.2,
+ # but pbkdf2 from v.1.3 on.
+ # see http://docs.couchdb.org/en/1.4.x/configuring.html for
+ # details
+
+ couchdb_version = Facter.value(:couchdb_version)
+ version = version_parts(couchdb_version)
+ major = version["major"].to_i
+ alg = case major
+ when 0 then alg = 'n/a'
+ when 1 then
+ minor = version['minor'].to_i
+ if minor < 3
+ alg = 'sha1'
+ else
+ alg = 'pbkdf2'
+ end
+ else
+ alg = 'pbkdf2'
+ end
+ return alg
+end
+
+Facter.add(:couchdb_pwhash_alg) do
+ setcode do
+ couchdb_pwhash_alg
+ end
+end
diff --git a/puppet/modules/couchdb/lib/facter/couchdb_version.rb b/puppet/modules/couchdb/lib/facter/couchdb_version.rb
new file mode 100644
index 00000000..3a721169
--- /dev/null
+++ b/puppet/modules/couchdb/lib/facter/couchdb_version.rb
@@ -0,0 +1,34 @@
+require 'facter'
+
+def deb_installed_version ( name )
+ # returns an empty string if package is not installed,
+ # otherwise the version
+
+ version = `apt-cache policy #{name} | grep Installed 2>&1`
+ version.slice! " Installed: "
+ version.slice! "(none)"
+ return version.strip.chomp
+end
+
+def couchdb_version
+ bigcouch = deb_installed_version("bigcouch")
+ if bigcouch.empty?
+ couchdb = deb_installed_version("couchdb")
+ if couchdb.empty?
+ version = 'n/a'
+ else
+ version = couchdb
+ end
+ else
+ # bigcouch is currently only available in one version (0.4.2),
+ # which includes couchdb 1.1.1
+ version = '1.1.1'
+ end
+ return version
+end
+
+Facter.add(:couchdb_version) do
+ setcode do
+ couchdb_version
+ end
+end
diff --git a/puppet/modules/couchdb/lib/puppet/parser/functions/couchdblookup.rb b/puppet/modules/couchdb/lib/puppet/parser/functions/couchdblookup.rb
new file mode 100644
index 00000000..b9067d2a
--- /dev/null
+++ b/puppet/modules/couchdb/lib/puppet/parser/functions/couchdblookup.rb
@@ -0,0 +1,55 @@
+#
+# A basic function to retrieve data in couchdb
+#
+
+
+module Puppet::Parser::Functions
+ newfunction(:couchdblookup, :type => :rvalue) do |args|
+ require 'json'
+ require 'open-uri'
+
+ raise Puppet::ParseError, ("couchdblookup(): wrong number of arguments (#{args.length}; must be 2 or 3)") unless args.length.between?(2, 3)
+
+ url = args[0]
+ key = args[1]
+ default = args[2] if args.length >= 3
+
+ begin
+ json = JSON.parse(open(URI.parse(url)).read)
+ rescue OpenURI::HTTPError => error
+ raise Puppet::ParseError, "couchdblookup(): fetching URL #{url} failed with status '#{error.message}'"
+ rescue Timeout::Error => error
+ raise Puppet::ParseError, "couchdblookup(): connection to couchdb server timed out: '#{error.message}'"
+ rescue Errno::ECONNREFUSED => error
+ raise Puppet::ParseError, "couchdblookup(): connection to couchdb server failed: '#{error.message}'"
+ rescue JSON::ParserError => error
+ raise Puppet::ParseError, "couchdblookup(): failed to parse JSON received from couchdb: '#{error.message}'"
+ rescue StandardError => error
+ raise Puppet::ParseError, "couchdblookup(): something unexpected happened: '#{error.inspect}'"
+ end
+
+ result = nil
+
+ if json.has_key?("rows")
+
+ if json['rows'].length > 1
+ arr = json['rows'].collect do |x|
+ x[key] if x.is_a?(Hash) and x.has_key?(key)
+ end
+ arr.compact!
+ result = arr unless arr.empty?
+
+ elsif json['rows'].length == 1
+ hash = json['rows'].pop
+ result = hash[key] if hash.is_a?(Hash)
+ end
+
+ elsif json.has_key?(key)
+ result = json[key]
+ end
+
+ result or default or raise Puppet::ParseError, "couchdblookup(): key '#{key}' not found in JSON object !"
+
+ end
+end
+
diff --git a/puppet/modules/couchdb/lib/puppet/parser/functions/pbkdf2.rb b/puppet/modules/couchdb/lib/puppet/parser/functions/pbkdf2.rb
new file mode 100644
index 00000000..46400c9c
--- /dev/null
+++ b/puppet/modules/couchdb/lib/puppet/parser/functions/pbkdf2.rb
@@ -0,0 +1,62 @@
+#
+# pbkdf2.rb
+#
+
+module Puppet::Parser::Functions
+ newfunction(:pbkdf2, :type => :rvalue, :doc => <<-EOS
+This converts a password and a salt (and optional iterations and keylength
+parameters) to a hash containing the salted SHA1 password hash, salt,
+iterations and keylength.
+pbkdf2 is used i.e. for couchdb passwords since v1.3.
+
+Example usage:
+ $pbkdf2 = pbkdf2($::couchdb::admin_pw, $::couchdb::admin_salt)
+ $sha1 = $pbkdf2['sha1']
+EOS
+ ) do |arguments|
+ require 'openssl'
+ require 'base64'
+
+ raise(Puppet::ParseError, "pbkdf2(): Wrong number of arguments " +
+ "passed (#{arguments.size} but we require at least 2)") if arguments.size < 2
+
+ unless arguments.is_a?(Array)
+ raise(Puppet::ParseError, 'pbkdf2(): Requires a ' +
+ "Array argument, you passed: #{password.class}")
+ end
+
+ password = arguments[0]
+ salt = arguments[1]
+
+ if arguments.size > 2
+ iterations = arguments[2].to_i
+ else
+ iterations = 1000
+ end
+
+ if arguments.size > 3
+ keylength = arguments[3].to_i
+ else
+ keylength = 20
+ end
+
+ pbkdf2 = OpenSSL::PKCS5::pbkdf2_hmac_sha1(
+ password,
+ salt,
+ iterations,
+ keylength
+ )
+
+ return_hash = Hash.new()
+ # return hex encoded string
+ return_hash['sha1'] = pbkdf2.unpack('H*')[0]
+ return_hash['password'] = password
+ return_hash['salt'] = salt
+ return_hash['iterations'] = iterations
+ return_hash['keylength'] = keylength
+
+ return return_hash
+ end
+end
+
+# vim: set ts=2 sw=2 et :
diff --git a/puppet/modules/couchdb/manifests/add_user.pp b/puppet/modules/couchdb/manifests/add_user.pp
new file mode 100644
index 00000000..29c6a8c8
--- /dev/null
+++ b/puppet/modules/couchdb/manifests/add_user.pp
@@ -0,0 +1,39 @@
+define couchdb::add_user ( $roles, $pw, $salt = '' ) {
+ # Couchdb < 1.2 needs a pre-hashed pw and salt
+ # If you provide a salt, couchdb::add_user will assume that
+ # $pw is prehashed and pass both parameters to couchdb::update
+ # If $salt is empty, couchdb::add_user will assume that the pw
+ # is plaintext and will pass it to couchdb::update
+
+ if $::couchdb::bigcouch == true {
+ $port = 5986
+ } else {
+ $port = 5984
+ }
+
+ if $salt == '' {
+ # unhashed, plaintext pw, no salt. For couchdb >= 1.2
+ $data = "{\"type\": \"user\", \"name\": \"${name}\", \"roles\": ${roles}, \"password\": \"${pw}\"}"
+ } else {
+ # prehashed pw with salt, for couchdb < 1.2
+ # salt and encrypt pw
+ # str_and_salt2sha1 is a function from leap's stdlib module
+ $pw_and_salt = [ $pw, $salt ]
+ $sha = str_and_salt2sha1($pw_and_salt)
+ $data = "{\"type\": \"user\", \"name\": \"${name}\", \"roles\": ${roles}, \"password_sha\": \"${sha}\", \"salt\": \"${salt}\"}"
+ }
+
+ # update the user with the given password unless they already work
+ couchdb::document { "update_user_${name}":
+ host => "127.0.0.1:${port}",
+ db => '_users',
+ id => "org.couchdb.user:${name}",
+ data => $data
+ }
+
+ couchdb::query::setup { $name:
+ user => $name,
+ pw => $pw,
+ }
+
+}
diff --git a/puppet/modules/couchdb/manifests/backup.pp b/puppet/modules/couchdb/manifests/backup.pp
new file mode 100644
index 00000000..a477b5b1
--- /dev/null
+++ b/puppet/modules/couchdb/manifests/backup.pp
@@ -0,0 +1,51 @@
+# configure backup using couchdb-backup.py
+class couchdb::backup {
+
+ include couchdb::params
+
+ # used in ERB templates
+ $bind_address = $couchdb::params::bind_address
+ $port = $couchdb::params::port
+ $backupdir = $couchdb::params::backupdir
+
+ file { $couchdb::params::backupdir:
+ ensure => directory,
+ mode => '0755',
+ require => Package['couchdb'],
+ }
+
+ file { '/usr/local/sbin/couchdb-backup.py':
+ ensure => present,
+ owner => root,
+ group => root,
+ mode => '0755',
+ content => template('couchdb/couchdb-backup.py.erb'),
+ require => File[$couchdb::params::backupdir],
+ }
+
+ cron { 'couchdb-backup':
+ command => '/usr/local/sbin/couchdb-backup.py 2> /dev/null',
+ hour => 3,
+ minute => 0,
+ require => File['/usr/local/sbin/couchdb-backup.py'],
+ }
+
+ case $::operatingsystem {
+ /Debian|Ubunu/: {
+ # note: python-couchdb >= 0.8 required, which is found in debian wheezy.
+ ensure_packages (['python-couchdb', 'python-simplejson'], {
+ before => File['/usr/local/sbin/couchdb-backup.py']
+ })
+ }
+ /RedHat|Centos/: {
+ exec {'install python-couchdb using easy_install':
+ command => 'easy_install http://pypi.python.org/packages/2.6/C/CouchDB/CouchDB-0.8-py2.6.egg',
+ creates => '/usr/lib/python2.6/site-packages/CouchDB-0.8-py2.6.egg',
+ }
+ }
+ default: {
+ err('This module has not been written to support your operating system')
+ }
+ }
+
+}
diff --git a/puppet/modules/couchdb/manifests/base.pp b/puppet/modules/couchdb/manifests/base.pp
new file mode 100644
index 00000000..6c7bf25f
--- /dev/null
+++ b/puppet/modules/couchdb/manifests/base.pp
@@ -0,0 +1,124 @@
+# configure couchdb
+class couchdb::base {
+
+ if $::couchdb::bigcouch == true {
+ $couchdb_user = 'bigcouch'
+ include couchdb::bigcouch
+ } else {
+ $couchdb_user = 'couchdb'
+ }
+
+ # we use package{} here because bigcouch.pp overwrites it and
+ # this won't work with ensure_packages()
+ package {'couchdb':
+ ensure => installed
+ }
+
+ service { 'couchdb':
+ ensure => running,
+ hasstatus => true,
+ enable => true,
+ require => Package['couchdb']
+ }
+
+ # todo: make host/port configurable
+ exec { 'wait_for_couchdb':
+ command => 'wget --retry-connrefused --tries 10 --quiet "http://127.0.0.1:5984" -O /dev/null',
+ require => Service['couchdb']
+ }
+
+
+ # couchrest gem is required for couch-doc-update script,
+ # and it needs the ruby-dev package installed to build
+
+ if versioncmp($::operatingsystemrelease, '8') < 0 {
+ $couchrest_version = '1.2'
+ }
+ else {
+ # couchrest v1.2.1 doesn't build with default debian jessie rake version
+ # shipped as debian package (10.3.2)
+ # see https://leap.se/code/issues/7754
+ $couchrest_version = '1.2.0'
+ }
+
+ ensure_packages('ruby-dev')
+ ensure_packages('couchrest', {
+ provider => 'gem',
+ ensure => $couchrest_version,
+ require => Package['ruby-dev']
+ })
+
+ File['/usr/local/bin/couch-doc-update'] -> Couchdb::Update <| |>
+ File['/usr/local/bin/couch-doc-diff'] -> Couchdb::Update <| |>
+
+ Couchdb::Update <| |> -> Couchdb::Document <| |>
+
+ file {
+ '/usr/local/bin/couch-doc-update':
+ source => 'puppet:///modules/couchdb/couch-doc-update',
+ mode => '0755',
+ owner => 'root',
+ group => 'root',
+ require => Package['couchrest'];
+
+ '/usr/local/bin/couch-doc-diff':
+ source => 'puppet:///modules/couchdb/couch-doc-diff',
+ mode => '0755',
+ owner => 'root',
+ group => 'root',
+ require => Package['couchrest'];
+
+ '/etc/couchdb/local.ini':
+ source => [ "puppet:///modules/site_couchdb/${::fqdn}/local.ini",
+ 'puppet:///modules/site_couchdb/local.ini',
+ 'puppet:///modules/couchdb/local.ini' ],
+ notify => Service[couchdb],
+ owner => $couchdb_user,
+ group => $couchdb_user,
+ mode => '0660',
+ require => Package['couchdb'];
+
+ '/etc/couchdb/local.d':
+ ensure => directory,
+ require => Package['couchdb'];
+ }
+
+ $alg = $::couchdb::pwhash_alg
+ $salt = $::couchdb::admin_salt
+ case $alg {
+ 'sha1': {
+ # str_and_salt2sha1 is a function from leap's stdlib module
+ $pw_and_salt = [ $::couchdb::admin_pw, $salt ]
+ $sha1 = str_and_salt2sha1($pw_and_salt)
+ $admin_hash = "-hashed-${sha1},${salt}"
+ }
+ 'pbkdf2': {
+ $pbkdf2 = pbkdf2($::couchdb::admin_pw, $::couchdb::admin_salt, 10)
+ $sha1 = $pbkdf2['sha1']
+ $admin_hash = "-pbkdf2-${sha1},${salt},10"
+ }
+ default: { fail ("Unknown fact couchdb_pwhash_alg ${::couchdb_pwhash_alg} - Exiting.") }
+ }
+
+ file { '/etc/couchdb/local.d/admin.ini':
+ content => template('couchdb/admin.ini.erb'),
+ mode => '0600',
+ owner => $couchdb_user,
+ group => $couchdb_user,
+ notify => Service[couchdb],
+ require => File['/etc/couchdb/local.d'];
+ }
+
+ case $::couchdb::bigcouch {
+ true: { $restart_command = '/etc/init.d/bigcouch restart; sleep 6' }
+ default: { $restart_command = '/etc/init.d/couchdb restart; sleep 6' }
+ }
+
+ exec { 'couchdb_restart':
+ command => $restart_command,
+ path => ['/bin', '/usr/bin',],
+ subscribe => File['/etc/couchdb/local.d/admin.ini',
+ '/etc/couchdb/local.ini'],
+ refreshonly => true
+ }
+}
diff --git a/puppet/modules/couchdb/manifests/bigcouch.pp b/puppet/modules/couchdb/manifests/bigcouch.pp
new file mode 100644
index 00000000..a97411bf
--- /dev/null
+++ b/puppet/modules/couchdb/manifests/bigcouch.pp
@@ -0,0 +1,51 @@
+class couchdb::bigcouch inherits couchdb::base {
+
+ file {
+ '/opt/bigcouch':
+ ensure => directory,
+ mode => '0755';
+
+ '/etc/couchdb':
+ ensure => directory,
+ mode => '0755',
+ before => Package['couchdb'];
+
+ '/opt/bigcouch/etc':
+ ensure => link,
+ target => '/etc/couchdb',
+ before => Package['couchdb'];
+ }
+
+ # there's no bigcouch in the official debian repo, you need
+ # to setup a repository for that. You can use class
+ # couchdb::bigcouch::package::cloudant for unauthenticated 0.4.0 packages,
+ # or site_apt::leap_repo from the leap_platfrom repository
+ # for signed 0.4.2 packages
+
+ Package['couchdb'] {
+ name => 'bigcouch'
+ }
+
+ file { '/opt/bigcouch/etc/vm.args':
+ content => template('couchdb/bigcouch/vm.args'),
+ mode => '0640',
+ owner => 'bigcouch',
+ group => 'bigcouch',
+ require => Package['couchdb'],
+ notify => Service[couchdb]
+ }
+
+ file { '/opt/bigcouch/etc/default.ini':
+ content => template('couchdb/bigcouch/default.ini'),
+ mode => '0640',
+ owner => 'bigcouch',
+ group => 'bigcouch',
+ require => Package['couchdb'],
+ notify => Service[couchdb]
+ }
+
+ Service['couchdb'] {
+ name => 'bigcouch'
+ }
+
+}
diff --git a/puppet/modules/couchdb/manifests/bigcouch/add_node.pp b/puppet/modules/couchdb/manifests/bigcouch/add_node.pp
new file mode 100644
index 00000000..ed9db94b
--- /dev/null
+++ b/puppet/modules/couchdb/manifests/bigcouch/add_node.pp
@@ -0,0 +1,8 @@
+define couchdb::bigcouch::add_node {
+
+ couchdb::bigcouch::document { "add_${name}":
+ db => 'nodes',
+ id => "bigcouch@${name}",
+ ensure => 'present'
+ }
+}
diff --git a/puppet/modules/couchdb/manifests/bigcouch/debian.pp b/puppet/modules/couchdb/manifests/bigcouch/debian.pp
new file mode 100644
index 00000000..645c6da8
--- /dev/null
+++ b/puppet/modules/couchdb/manifests/bigcouch/debian.pp
@@ -0,0 +1,11 @@
+class couchdb::bigcouch::debian inherits couchdb::debian {
+
+ File['/etc/init.d/couchdb'] {
+ ensure => absent
+ }
+
+ file {'/etc/init.d/bigcouch':
+ ensure => link,
+ target => '/usr/bin/sv'
+ }
+}
diff --git a/puppet/modules/couchdb/manifests/bigcouch/document.pp b/puppet/modules/couchdb/manifests/bigcouch/document.pp
new file mode 100644
index 00000000..13f4ac17
--- /dev/null
+++ b/puppet/modules/couchdb/manifests/bigcouch/document.pp
@@ -0,0 +1,14 @@
+define couchdb::bigcouch::document (
+ $db,
+ $id,
+ $host = '127.0.0.1:5986',
+ $data ='{}',
+ $ensure ='content') {
+ couchdb::document { $name:
+ ensure => $ensure,
+ host => $host,
+ db => $db,
+ id => $id,
+ data => $data
+ }
+}
diff --git a/puppet/modules/couchdb/manifests/bigcouch/package/cloudant.pp b/puppet/modules/couchdb/manifests/bigcouch/package/cloudant.pp
new file mode 100644
index 00000000..cfdcf10c
--- /dev/null
+++ b/puppet/modules/couchdb/manifests/bigcouch/package/cloudant.pp
@@ -0,0 +1,35 @@
+class couchdb::bigcouch::package::cloudant (
+ $ensure = 'present'
+) {
+
+ # cloudant's signing key can be fetched from
+ # http://packages.cloudant.com/KEYS, please use the apt module to
+ # distribute it on your servers after verifying its fingerprint
+
+ # cloudant's wheezy repo will fail cause in their Release file
+ # (http://packages.cloudant.com/debian/dists/wheezy/Release) they
+ # wrongly marked the packages for squeeze
+ # so we will use their squeeze repo here
+ apt::sources_list {'bigcouch-cloudant.list':
+ ensure => $ensure,
+ content => 'deb http://packages.cloudant.com/debian squeeze main'
+ }
+
+ # right now, cloudant only provides authenticated bigcouch 0.4.2 packages
+ # for squeeze, therefore we need to allow the installation of the depending
+ # packages libicu44 and libssl0.9.8 from squeeze
+
+ if $::lsbdistcodename == 'wheezy' {
+ apt::sources_list {'squeeze.list':
+ ensure => $ensure,
+ content => 'deb http://http.debian.net/debian squeeze main
+deb http://security.debian.org/ squeeze/updates main
+' }
+ apt::preferences_snippet { 'bigcouch_squeeze_deps':
+ ensure => $ensure,
+ package => 'libicu44 libssl0.9.8',
+ priority => '980',
+ pin => 'release o=Debian,n=squeeze'
+ }
+ }
+}
diff --git a/puppet/modules/couchdb/manifests/create_db.pp b/puppet/modules/couchdb/manifests/create_db.pp
new file mode 100644
index 00000000..8a8d1144
--- /dev/null
+++ b/puppet/modules/couchdb/manifests/create_db.pp
@@ -0,0 +1,21 @@
+define couchdb::create_db (
+ $host='127.0.0.1:5984',
+ $admins="{\"names\": [], \"roles\": [] }",
+ $members="{\"names\": [], \"roles\": [] }" )
+{
+
+ couchdb::query { "create_db_${name}":
+ cmd => 'PUT',
+ host => $host,
+ path => $name,
+ unless => "/usr/bin/curl -s -f --netrc-file /etc/couchdb/couchdb.netrc ${host}/${name}"
+ }
+
+ couchdb::document { "${name}_security":
+ db => $name,
+ id => '_security',
+ host => $host,
+ data => "{ \"admins\": ${admins}, \"members\": ${members} }",
+ require => Couchdb::Query["create_db_${name}"]
+ }
+}
diff --git a/puppet/modules/couchdb/manifests/debian.pp b/puppet/modules/couchdb/manifests/debian.pp
new file mode 100644
index 00000000..b83b227a
--- /dev/null
+++ b/puppet/modules/couchdb/manifests/debian.pp
@@ -0,0 +1,15 @@
+# installs initscript and dependent packages on debian
+class couchdb::debian inherits couchdb::base {
+
+ ensure_packages('libjs-jquery')
+
+ file { '/etc/init.d/couchdb':
+ source => [
+ 'puppet:///modules/site_couchdb/Debian/couchdb',
+ 'puppet:///modules/couchdb/Debian/couchdb' ],
+ mode => '0755',
+ owner => 'root',
+ group => 'root',
+ require => Package['couchdb']
+ }
+}
diff --git a/puppet/modules/couchdb/manifests/deploy_config.pp b/puppet/modules/couchdb/manifests/deploy_config.pp
new file mode 100644
index 00000000..2ce1fd20
--- /dev/null
+++ b/puppet/modules/couchdb/manifests/deploy_config.pp
@@ -0,0 +1,12 @@
+class couchdb::deploy_config {
+
+ file { '/etc/couchdb/local.ini':
+ source => [ "puppet:///modules/site_couchdb/${::fqdn}/local.ini",
+ 'puppet:///modules/site_couchdb/local.ini',
+ 'puppet:///modules/couchdb/local.ini' ],
+ notify => Service[couchdb],
+ owner => couchdb,
+ group => couchdb,
+ mode => '0660'
+ }
+}
diff --git a/puppet/modules/couchdb/manifests/document.pp b/puppet/modules/couchdb/manifests/document.pp
new file mode 100644
index 00000000..6180474b
--- /dev/null
+++ b/puppet/modules/couchdb/manifests/document.pp
@@ -0,0 +1,47 @@
+# Usage:
+# couchdb::document { id:
+# db => "database",
+# data => "content",
+# ensure => {absent,present,*content*}
+# }
+#
+define couchdb::document(
+ $db,
+ $id,
+ $host = '127.0.0.1:5984',
+ $data = '{}',
+ $netrc = '/etc/couchdb/couchdb.netrc',
+ $ensure = 'content') {
+
+ $url = "${host}/${db}/${id}"
+
+ case $ensure {
+ default: { err ( "unknown ensure value '${ensure}'" ) }
+ content: {
+ exec { "couch-doc-update --netrc-file ${netrc} --host ${host} --db ${db} --id ${id} --data \'${data}\'":
+ require => Exec['wait_for_couchdb'],
+ unless => "couch-doc-diff $url '$data'"
+ }
+ }
+
+ present: {
+ couchdb::query { "create_${db}_${id}":
+ cmd => 'PUT',
+ host => $host,
+ path => "${db}/${id}",
+ require => Exec['wait_for_couchdb'],
+ unless => "/usr/bin/curl -s -f --netrc-file ${netrc} ${url}"
+ }
+ }
+
+ absent: {
+ couchdb::query { "destroy_${db}_${id}":
+ cmd => 'DELETE',
+ host => $host,
+ path => "${db}/${id}",
+ require => Exec['wait_for_couchdb'],
+ unless => "/usr/bin/curl -s -f --netrc-file ${netrc} ${url}"
+ }
+ }
+ }
+}
diff --git a/puppet/modules/couchdb/manifests/init.pp b/puppet/modules/couchdb/manifests/init.pp
new file mode 100644
index 00000000..12598ba0
--- /dev/null
+++ b/puppet/modules/couchdb/manifests/init.pp
@@ -0,0 +1,31 @@
+# initial couchdb class
+class couchdb (
+ $admin_pw,
+ $admin_salt = '',
+ $bigcouch = false,
+ $bigcouch_cookie = '',
+ $ednp_port = '9001',
+ $chttpd_bind_address = '0.0.0.0',
+ $pwhash_alg = 'pbkdf2' )
+{
+
+ # stdlib is needed i.e. for ensure_packages()
+ include ::stdlib
+
+ case $::operatingsystem {
+ Debian: {
+ case $::lsbdistcodename {
+ /lenny|squeeze|wheezy|jessie/: {
+ include couchdb::debian
+ if $bigcouch == true {
+ include couchdb::bigcouch::debian
+ }
+ }
+ default: { fail "couchdb not available for ${::operatingsystem}/${::lsbdistcodename}" }
+ }
+ }
+ RedHat: { include couchdb::redhat }
+ }
+
+ ensure_packages('curl')
+}
diff --git a/puppet/modules/couchdb/manifests/mirror_db.pp b/puppet/modules/couchdb/manifests/mirror_db.pp
new file mode 100644
index 00000000..b07b6749
--- /dev/null
+++ b/puppet/modules/couchdb/manifests/mirror_db.pp
@@ -0,0 +1,21 @@
+define couchdb::mirror_db (
+ $host='127.0.0.1:5984',
+ $from='',
+ $to='',
+ $user='replication',
+ $role='replication'
+ )
+{
+ $source = "${from}/${name}"
+ if $to == '' { $target = $name }
+ else { $target = "${to}/${name}" }
+
+ couchdb::document { "${name}_replication":
+ db => "_replicator",
+ id => "${name}_replication",
+ netrc => "/etc/couchdb/couchdb-${user}.netrc",
+ host => $host,
+ data => "{ \"source\": \"${source}\", \"target\": \"${target}\", \"continuous\": true, \"user_ctx\": { \"name\": \"${user}\", \"roles\": [\"${role}\"] }, \"owner\": \"${user}\" }",
+ require => Couchdb::Query["create_db_${name}"]
+ }
+}
diff --git a/puppet/modules/couchdb/manifests/params.pp b/puppet/modules/couchdb/manifests/params.pp
new file mode 100644
index 00000000..02d5f02e
--- /dev/null
+++ b/puppet/modules/couchdb/manifests/params.pp
@@ -0,0 +1,23 @@
+class couchdb::params {
+
+ $bind_address = $::couchdb_bind_address ? {
+ '' => '127.0.0.1',
+ default => $::couchdb_bind_address,
+ }
+
+ $port = $::couchdb_port ? {
+ '' => '5984',
+ default => $::couchdb_port,
+ }
+
+ $backupdir = $::couchdb_backupdir ? {
+ '' => '/var/backups/couchdb',
+ default => $::couchdb_backupdir,
+ }
+
+ $cert_path = $::couchdb_cert_path ? {
+ "" => '/etc/couchdb',
+ default => $::couchdb_cert_path,
+ }
+
+}
diff --git a/puppet/modules/couchdb/manifests/query.pp b/puppet/modules/couchdb/manifests/query.pp
new file mode 100644
index 00000000..9507ca1e
--- /dev/null
+++ b/puppet/modules/couchdb/manifests/query.pp
@@ -0,0 +1,12 @@
+define couchdb::query (
+ $cmd, $path,
+ $netrc='/etc/couchdb/couchdb.netrc',
+ $host='127.0.0.1:5984',
+ $data = '{}',
+ $unless = undef) {
+
+ exec { "/usr/bin/curl -s --netrc-file ${netrc} -X ${cmd} ${host}/${path} --data \'${data}\'":
+ require => [ Package['curl'], Exec['wait_for_couchdb'] ],
+ unless => $unless
+ }
+}
diff --git a/puppet/modules/couchdb/manifests/query/setup.pp b/puppet/modules/couchdb/manifests/query/setup.pp
new file mode 100644
index 00000000..451eb536
--- /dev/null
+++ b/puppet/modules/couchdb/manifests/query/setup.pp
@@ -0,0 +1,10 @@
+define couchdb::query::setup ($user, $pw, $host='127.0.0.1') {
+
+ file { "/etc/couchdb/couchdb-${user}.netrc":
+ content => "machine ${host} login ${user} password ${pw}",
+ mode => '0600',
+ owner => $::couchdb::base::couchdb_user,
+ group => $::couchdb::base::couchdb_user,
+ require => Package['couchdb'];
+ }
+}
diff --git a/puppet/modules/couchdb/manifests/redhat.pp b/puppet/modules/couchdb/manifests/redhat.pp
new file mode 100644
index 00000000..defa0a94
--- /dev/null
+++ b/puppet/modules/couchdb/manifests/redhat.pp
@@ -0,0 +1 @@
+class couchdb::redhat inherits couchdb::base {}
diff --git a/puppet/modules/couchdb/manifests/ssl/deploy_cert.pp b/puppet/modules/couchdb/manifests/ssl/deploy_cert.pp
new file mode 100644
index 00000000..d3e743f1
--- /dev/null
+++ b/puppet/modules/couchdb/manifests/ssl/deploy_cert.pp
@@ -0,0 +1,28 @@
+define couchdb::ssl::deploy_cert ($cert, $key) {
+
+ include couchdb::params
+
+ file { 'couchdb_cert_directory':
+ ensure => 'directory',
+ path => $couchdb::params::cert_path,
+ mode => '0600',
+ owner => 'couchdb',
+ group => 'couchdb';
+ }
+
+ file { 'couchdb_cert':
+ path => "${couchdb::params::cert_path}/server_cert.pem",
+ mode => '0644',
+ owner => 'couchdb',
+ group => 'couchdb',
+ content => $cert
+ }
+
+ file { 'couchdb_key':
+ path => "${couchdb::params::cert_path}/server_key.pem",
+ mode => '0600',
+ owner => 'couchdb',
+ group => 'couchdb',
+ content => $key
+ }
+}
diff --git a/puppet/modules/couchdb/manifests/ssl/generate_cert.pp b/puppet/modules/couchdb/manifests/ssl/generate_cert.pp
new file mode 100644
index 00000000..a443250e
--- /dev/null
+++ b/puppet/modules/couchdb/manifests/ssl/generate_cert.pp
@@ -0,0 +1,25 @@
+# configures cert for ssl access
+class couchdb::ssl::generate_cert {
+
+ ensure_packages('openssl')
+
+ file { $couchdb::cert_path:
+ ensure => 'directory',
+ mode => '0600',
+ owner => 'couchdb',
+ group => 'couchdb';
+ }
+
+exec { 'generate-certs':
+ command => "/usr/bin/openssl req -new -inform PEM -x509 -nodes -days 150 -subj \
+'/C=ZZ/ST=AutoSign/O=AutoSign/localityName=AutoSign/commonName=${::hostname}/organizationalUnitName=AutoSign/emailAddress=AutoSign/' \
+-newkey rsa:2048 -out ${couchdb::cert_path}/couchdb_cert.pem -keyout ${couchdb::cert_path}/couchdb_key.pem",
+ unless => "/usr/bin/test -f ${couchdb::cert_path}/couchdb_cert.pem &&
+/usr/bin/test -f ${couchdb::params::cert_path}/couchdb_key.pem",
+ require => [
+ File[$couchdb::params::cert_path],
+ Exec['make-install']
+ ],
+ notify => Service['couchdb'],
+ }
+}
diff --git a/puppet/modules/couchdb/manifests/update.pp b/puppet/modules/couchdb/manifests/update.pp
new file mode 100644
index 00000000..b1dba84c
--- /dev/null
+++ b/puppet/modules/couchdb/manifests/update.pp
@@ -0,0 +1,12 @@
+define couchdb::update (
+ $db,
+ $id,
+ $data,
+ $host='127.0.0.1:5984',
+ $unless=undef) {
+
+ exec { "couch-doc-update --host ${host} --db ${db} --id ${id} --data \'${data}\'":
+ require => Exec['wait_for_couchdb'],
+ unless => $unless
+ }
+}
diff --git a/puppet/modules/couchdb/spec/classes/couchdb_spec.rb b/puppet/modules/couchdb/spec/classes/couchdb_spec.rb
new file mode 100644
index 00000000..e8e4174e
--- /dev/null
+++ b/puppet/modules/couchdb/spec/classes/couchdb_spec.rb
@@ -0,0 +1,35 @@
+require 'spec_helper'
+
+describe 'couchdb' do
+ context 'given it is a wheezy system' do
+ let(:params) { {:admin_pw => 'foo'} }
+ let(:facts) do
+ {
+ :operatingsystemrelease => '7',
+ :operatingsystem => 'Debian',
+ :lsbdistcodename => 'wheezy',
+ }
+ end
+ it "should install couchrest 1.2" do
+ should contain_package('couchrest').with({
+ 'ensure'=> '1.2',
+ })
+ end
+ end
+ context 'given it is a jessie system' do
+ let(:params) { {:admin_pw => 'foo'} }
+ let(:facts) do
+ {
+ :operatingsystemrelease => '8',
+ :operatingsystem => 'Debian',
+ :lsbdistcodename => 'jessie',
+ }
+ end
+ it "should install latest couchrest version" do
+ should contain_package('couchrest').with({
+ 'ensure'=> 'latest',
+ })
+ end
+ end
+end
+
diff --git a/puppet/modules/couchdb/spec/fixtures/manifests/site.pp b/puppet/modules/couchdb/spec/fixtures/manifests/site.pp
new file mode 100644
index 00000000..a959fb77
--- /dev/null
+++ b/puppet/modules/couchdb/spec/fixtures/manifests/site.pp
@@ -0,0 +1,8 @@
+# set a default exec path
+# the logoutput exec parameter defaults to "on_error" in puppet 3,
+# but to "false" in puppet 2.7, so we need to set this globally here
+Exec {
+ logoutput => on_failure,
+ path => '/usr/bin:/usr/sbin/:/bin:/sbin:/usr/local/bin:/usr/local/sbin'
+}
+
diff --git a/puppet/modules/couchdb/spec/functions/versioncmp_spec.rb b/puppet/modules/couchdb/spec/functions/versioncmp_spec.rb
new file mode 100644
index 00000000..0a244275
--- /dev/null
+++ b/puppet/modules/couchdb/spec/functions/versioncmp_spec.rb
@@ -0,0 +1,9 @@
+require 'spec_helper'
+
+describe 'versioncmp' do
+ it { should run.with_params('7.2','8').and_return(-1) }
+ it { should run.with_params('7','8').and_return(-1) }
+ it { should run.with_params('8','8').and_return(0) }
+ it { should run.with_params('8.1','8').and_return(1) }
+end
+
diff --git a/puppet/modules/couchdb/spec/spec_helper.rb b/puppet/modules/couchdb/spec/spec_helper.rb
new file mode 100644
index 00000000..b55ede81
--- /dev/null
+++ b/puppet/modules/couchdb/spec/spec_helper.rb
@@ -0,0 +1,9 @@
+require 'rspec-puppet'
+
+fixture_path = File.expand_path(File.join(__FILE__, '..', 'fixtures'))
+
+RSpec.configure do |c|
+ c.module_path = File.join(fixture_path, 'modules')
+ c.manifest_dir = File.join(fixture_path, 'manifests')
+ c.environmentpath = File.join(Dir.pwd, 'spec')
+end
diff --git a/puppet/modules/couchdb/templates/admin.ini.erb b/puppet/modules/couchdb/templates/admin.ini.erb
new file mode 100644
index 00000000..479f8bfc
--- /dev/null
+++ b/puppet/modules/couchdb/templates/admin.ini.erb
@@ -0,0 +1,9 @@
+<%- require 'digest' -%>
+[admins]
+admin = <%= @admin_hash %>
+
+[couchdb]
+<%- # uuid uniquely identifies this couchdb instance. if not set, couchdb will set a random one
+ # but we want a stable one so that this config file doesn't change all the time.
+ # Md5 of hostname and ipaddress seems reasonable, but it could be based on anything. -%>
+uuid = <%= Digest::MD5.hexdigest(Facter.value("hostname") + Facter.value("ipaddress")) %>
diff --git a/puppet/modules/couchdb/templates/bigcouch/default.ini b/puppet/modules/couchdb/templates/bigcouch/default.ini
new file mode 100644
index 00000000..a315ddab
--- /dev/null
+++ b/puppet/modules/couchdb/templates/bigcouch/default.ini
@@ -0,0 +1,172 @@
+[couchdb]
+database_dir = /opt/bigcouch/var/lib
+view_index_dir = /opt/bigcouch/var/lib
+max_document_size = 67108864
+os_process_timeout = 5000
+max_dbs_open = 500
+delayed_commits = false
+
+[cluster]
+; Default number of shards for a new database
+q = 8
+; Default number of copies of each shard
+n = 3
+
+[chttpd]
+port = 5984
+docroot = /opt/bigcouch/share/www
+
+; Options for the MochiWeb HTTP server.
+;server_options = [{backlog, 128}, {acceptor_pool_size, 16}]
+
+; For more socket options, consult Erlang's module 'inet' man page.
+;socket_options = [{recbuf, 262144}, {sndbuf, 262144}, {nodelay, true}]
+
+bind_address = <%= scope.lookupvar('couchdb::chttpd_bind_address') %>
+
+[chttps]
+port = 6984
+
+; cert_file = /full/path/to/server_cert.pem
+; key_file = /full/path/to/server_key.pem
+; password = somepassword
+; also remember to enable the chttps daemon in [daemons] section.
+
+; set to true to validate peer certificates
+verify_ssl_certificates = false
+
+; Path to file containing PEM encoded CA certificates (trusted
+; certificates used for verifying a peer certificate). May be omitted if
+; you do not want to verify the peer.
+;cacert_file = /full/path/to/cacertf
+
+; The verification fun (optional) if not specified, the default
+; verification fun will be used.
+;verify_fun = {Module, VerifyFun}
+ssl_certificate_max_depth = 1
+
+[httpd]
+port = 5986
+bind_address = 127.0.0.1
+authentication_handlers = {couch_httpd_oauth, oauth_authentication_handler}, {couch_httpd_auth, cookie_authentication_handler}, {couch_httpd_auth, default_authentication_handler}
+default_handler = {couch_httpd_db, handle_request}
+secure_rewrites = true
+vhost_global_handlers = _utils, _uuids, _session, _oauth, _users
+allow_jsonp = false
+log_max_chunk_size = 1000000
+
+[ssl]
+port = 6984
+
+[log]
+file = /opt/bigcouch/var/log/bigcouch.log
+level = info
+include_sasl = true
+
+[couch_httpd_auth]
+authentication_db = _users
+authentication_redirect = /_utils/session.html
+require_valid_user = false
+timeout = 43200 ; (default to 12 hours) number of seconds before automatic logout
+auth_cache_size = 50 ; size is number of cache entries
+
+[query_servers]
+javascript = /opt/bigcouch/bin/couchjs /opt/bigcouch/share/couchjs/main.js
+
+[query_server_config]
+reduce_limit = true
+os_process_soft_limit = 100
+
+[daemons]
+view_manager={couch_view, start_link, []}
+external_manager={couch_external_manager, start_link, []}
+query_servers={couch_proc_manager, start_link, []}
+httpd={couch_httpd, start_link, []}
+stats_aggregator={couch_stats_aggregator, start, []}
+stats_collector={couch_stats_collector, start, []}
+uuids={couch_uuids, start, []}
+auth_cache={couch_auth_cache, start_link, []}
+replication_manager={couch_replication_manager, start_link, []}
+vhosts={couch_httpd_vhost, start_link, []}
+os_daemons={couch_os_daemons, start_link, []}
+; Uncomment next line to enable SSL daemon
+; chttpsd = {chttpd, start_link, [https]}
+
+[httpd_global_handlers]
+/ = {couch_httpd_misc_handlers, handle_welcome_req, <<"Welcome">>}
+favicon.ico = {couch_httpd_misc_handlers, handle_favicon_req, "/opt/bigcouch/share/www"}
+
+_utils = {couch_httpd_misc_handlers, handle_utils_dir_req, "/opt/bigcouch/share/www"}
+_all_dbs = {couch_httpd_misc_handlers, handle_all_dbs_req}
+_active_tasks = {couch_httpd_misc_handlers, handle_task_status_req}
+_config = {couch_httpd_misc_handlers, handle_config_req}
+_replicate = {couch_httpd_misc_handlers, handle_replicate_req}
+_uuids = {couch_httpd_misc_handlers, handle_uuids_req}
+_restart = {couch_httpd_misc_handlers, handle_restart_req}
+_stats = {couch_httpd_stats_handlers, handle_stats_req}
+_log = {couch_httpd_misc_handlers, handle_log_req}
+_session = {couch_httpd_auth, handle_session_req}
+_oauth = {couch_httpd_oauth, handle_oauth_req}
+_system = {chttpd_misc, handle_system_req}
+
+[httpd_db_handlers]
+_view_cleanup = {couch_httpd_db, handle_view_cleanup_req}
+_compact = {couch_httpd_db, handle_compact_req}
+_design = {couch_httpd_db, handle_design_req}
+_temp_view = {couch_httpd_view, handle_temp_view_req}
+_changes = {couch_httpd_db, handle_changes_req}
+
+[httpd_design_handlers]
+_view = {couch_httpd_view, handle_view_req}
+_show = {couch_httpd_show, handle_doc_show_req}
+_list = {couch_httpd_show, handle_view_list_req}
+_info = {couch_httpd_db, handle_design_info_req}
+_rewrite = {couch_httpd_rewrite, handle_rewrite_req}
+_update = {couch_httpd_show, handle_doc_update_req}
+
+; enable external as an httpd handler, then link it with commands here.
+; note, this api is still under consideration.
+; [external]
+; mykey = /path/to/mycommand
+
+; Here you can setup commands for CouchDB to manage
+; while it is alive. It will attempt to keep each command
+; alive if it exits.
+; [os_daemons]
+; some_daemon_name = /path/to/script -with args
+
+
+[uuids]
+; Known algorithms:
+; random - 128 bits of random awesome
+; All awesome, all the time.
+; sequential - monotonically increasing ids with random increments
+; First 26 hex characters are random. Last 6 increment in
+; random amounts until an overflow occurs. On overflow, the
+; random prefix is regenerated and the process starts over.
+; utc_random - Time since Jan 1, 1970 UTC with microseconds
+; First 14 characters are the time in hex. Last 18 are random.
+algorithm = sequential
+
+[stats]
+; rate is in milliseconds
+rate = 1000
+; sample intervals are in seconds
+samples = [0, 60, 300, 900]
+
+[attachments]
+compression_level = 8 ; from 1 (lowest, fastest) to 9 (highest, slowest), 0 to disable compression
+compressible_types = text/*, application/javascript, application/json, application/xml
+
+[replicator]
+db = _replicator
+; Maximum replicaton retry count can be a non-negative integer or "infinity".
+max_replication_retry_count = 10
+max_http_sessions = 20
+max_http_pipeline_size = 50
+; set to true to validate peer certificates
+verify_ssl_certificates = false
+; file containing a list of peer trusted certificates (PEM format)
+; ssl_trusted_certificates_file = /etc/ssl/certs/ca-certificates.crt
+; maximum peer certificate depth (must be set even if certificate validation is off)
+ssl_certificate_max_depth = 3
diff --git a/puppet/modules/couchdb/templates/bigcouch/vm.args b/puppet/modules/couchdb/templates/bigcouch/vm.args
new file mode 100644
index 00000000..4618a52c
--- /dev/null
+++ b/puppet/modules/couchdb/templates/bigcouch/vm.args
@@ -0,0 +1,32 @@
+# Each node in the system must have a unique name. A name can be short
+# (specified using -sname) or it can by fully qualified (-name). There can be
+# no communication between nodes running with the -sname flag and those running
+# with the -name flag.
+-name bigcouch
+
+# All nodes must share the same magic cookie for distributed Erlang to work.
+# Comment out this line if you synchronized the cookies by other means (using
+# the ~/.erlang.cookie file, for example).
+-setcookie <%= scope.lookupvar('couchdb::bigcouch_cookie') %>
+
+# Tell SASL not to log progress reports
+-sasl errlog_type error
+
+# Use kernel poll functionality if supported by emulator
++K true
+
+# Start a pool of asynchronous IO threads
++A 16
+
+# Comment this line out to enable the interactive Erlang shell on startup
++Bd -noinput
+
+# read config files
+# otherwise /etc/couchdb/local.d/admin.ini wouldn't be read mysteriously
+-couch_ini /etc/couchdb/default.ini /etc/couchdb/local.ini /etc/couchdb/local.d/admin.ini /etc/couchdb/default.ini /etc/couchdb/local.ini /etc/couchdb/local.d/admin.ini
+#
+
+# make firewalling easier, see
+# http://stackoverflow.com/questions/8459949/bigcouch-cluster-connection-issue#comment10467603_8463814
+
+-kernel inet_dist_listen_min <%= scope.lookupvar('couchdb::ednp_port') %> inet_dist_use_interface "{127,0,0,1}"
diff --git a/puppet/modules/couchdb/templates/couchdb-backup.py.erb b/puppet/modules/couchdb/templates/couchdb-backup.py.erb
new file mode 100644
index 00000000..c49df65b
--- /dev/null
+++ b/puppet/modules/couchdb/templates/couchdb-backup.py.erb
@@ -0,0 +1,32 @@
+#!/usr/bin/env python
+# file manage by puppet
+
+import os
+import gzip
+import tarfile
+import datetime
+import urllib2
+import simplejson
+import couchdb.tools.dump
+from os.path import join
+
+DB_URL="http://127.0.0.1:5984"
+DUMP_DIR="<%= backupdir %>"
+TODAY=datetime.datetime.today().strftime("%A").lower()
+
+ftar = os.path.join(DUMP_DIR,"%s.tar" % TODAY)
+tmp_ftar = os.path.join(DUMP_DIR,"_%s.tar" % TODAY)
+tar = tarfile.open(tmp_ftar, "w")
+
+databases = simplejson.load(urllib2.urlopen("%s/_all_dbs" % DB_URL))
+
+for db in databases:
+ db_file = os.path.join(DUMP_DIR,"%s.gz" % db)
+ f = gzip.open(db_file, 'wb')
+ couchdb.tools.dump.dump_db(os.path.join(DB_URL,db), output=f)
+ f.close()
+ tar.add(db_file,"%s.gz" % db)
+ os.remove(db_file)
+
+tar.close()
+os.rename(tmp_ftar,ftar)
diff --git a/puppet/modules/git/files/config/CentOS/git-daemon b/puppet/modules/git/files/config/CentOS/git-daemon
new file mode 100644
index 00000000..a9b208c2
--- /dev/null
+++ b/puppet/modules/git/files/config/CentOS/git-daemon
@@ -0,0 +1,26 @@
+# git-daemon config file
+
+# location of the lockfile
+#LOCKFILE=/var/lock/subsys/git-daemon
+
+# which directory to server
+#GITDIR=/srv/git
+
+# do we serve vhosts?
+# setting this to yes assumes that you
+# have in $GITDIR per vhost to serve
+# a subdirectory containing their repos.
+# for example:
+# - /srv/git/git.example.com
+# - /srv/git/git.example.org
+#GITVHOST=no
+
+# the user git-daemon should run with
+#GITUSER=nobody
+
+# options for the daemon
+#OPTIONS="--reuseaddr --verbose --detach"
+
+# location of the daemon
+#GITDAEMON=/usr/bin/git-daemon
+
diff --git a/puppet/modules/git/files/config/CentOS/git-daemon.vhosts b/puppet/modules/git/files/config/CentOS/git-daemon.vhosts
new file mode 100644
index 00000000..62bb9d4b
--- /dev/null
+++ b/puppet/modules/git/files/config/CentOS/git-daemon.vhosts
@@ -0,0 +1,27 @@
+# git-daemon config file
+
+# location of the lockfile
+#LOCKFILE=/var/lock/subsys/git-daemon
+
+# which directory to server
+#GITDIR=/srv/git
+
+# do we serve vhosts?
+# setting this to yes assumes that you
+# have in $GITDIR per vhost to serve
+# a subdirectory containing their repos.
+# for example:
+# - /srv/git/git.example.com
+# - /srv/git/git.example.org
+#GITVHOST=no
+GITVHOST=yes
+
+# the user git-daemon should run with
+#GITUSER=nobody
+
+# options for the daemon
+#OPTIONS="--reuseaddr --verbose --detach"
+
+# location of the daemon
+#GITDAEMON=/usr/bin/git-daemon
+
diff --git a/puppet/modules/git/files/config/Debian/git-daemon b/puppet/modules/git/files/config/Debian/git-daemon
new file mode 100644
index 00000000..b25e1e7f
--- /dev/null
+++ b/puppet/modules/git/files/config/Debian/git-daemon
@@ -0,0 +1,22 @@
+# Defaults for the git-daemon initscript
+
+# Set to yes to start git-daemon
+RUN=yes
+
+# Set to the user and group git-daemon should run as
+USER=nobody
+GROUP=nogroup
+
+# Set the base path and the directory where the repositories are.
+REPOSITORIES="/srv/git"
+
+# Provide a way to have custom setup.
+#
+# Note, when ADVANCED_OPTS is defined the REPOSITORIES setting is ignored,
+# so take good care to specify exactly what git-daemon have to do.
+#
+# Here is an example from the man page:
+#ADVANCED_OPTS="--verbose --export-all \
+# --interpolated-path=/pub/%IP/%D \
+# /pub/192.168.1.200/software \
+# /pub/10.10.220.23/software"
diff --git a/puppet/modules/git/files/init.d/CentOS/git-daemon b/puppet/modules/git/files/init.d/CentOS/git-daemon
new file mode 100644
index 00000000..aed20756
--- /dev/null
+++ b/puppet/modules/git/files/init.d/CentOS/git-daemon
@@ -0,0 +1,75 @@
+#!/bin/bash
+# puppet Init script for running the git-daemon
+#
+# Author: Marcel Haerry <mh+rpms(at)immerda.ch>
+#
+# chkconfig: - 98 02
+#
+# description: Enables the git-daemon to serve various directories. By default it serves /srv/git
+# processname: git-daemon
+# config: /etc/sysconfig/git-daemon
+
+PATH=/usr/bin:/sbin:/bin:/usr/sbin
+export PATH
+
+[ -f /etc/sysconfig/git-daemon ] && . /etc/sysconfig/git-daemon
+lockfile=${LOCKFILE-/var/lock/subsys/git-daemon}
+gitdir=${GITDIR-/srv/git}
+gitvhost=${GITVHOST-no}
+user=${GITUSER-nobody}
+options=${OPTIONS-"--reuseaddr --verbose --detach"}
+gitdaemon=${GITDAEMON-/usr/bin/git-daemon}
+RETVAL=0
+
+gitoptions="--user=${user} ${options}"
+if [ $gitvhost = yes ]; then
+ gitoptions="${gitoptions} --interpolated-path=${gitdir}/%H/%D"
+else
+ gitoptions="${gitoptions} --base-path=${gitdir}"
+fi
+
+# Source function library.
+. /etc/rc.d/init.d/functions
+
+start() {
+ echo -n $"Starting git-daemon: "
+ daemon $gitdaemon $gitoptions
+ RETVAL=$?
+ echo
+ [ $RETVAL = 0 ] && touch ${lockfile}
+ return $RETVAL
+}
+
+stop() {
+ echo -n $"Stopping git-daemon: "
+ killproc $gitdaemon
+ RETVAL=$?
+ echo
+ [ $RETVAL = 0 ] && rm -f ${lockfile}
+}
+
+restart() {
+ stop
+ start
+}
+
+case "$1" in
+ start)
+ start
+ ;;
+ stop)
+ stop
+ ;;
+ restart)
+ restart
+ ;;
+ status)
+ status $gitdaemon
+ RETVAL=$?
+ ;;
+ *)
+ echo $"Usage: $0 {start|stop|status|restart}"
+ exit 1
+esac
+
+exit $RETVAL
diff --git a/puppet/modules/git/files/init.d/Debian/git-daemon b/puppet/modules/git/files/init.d/Debian/git-daemon
new file mode 100644
index 00000000..ab57c4a1
--- /dev/null
+++ b/puppet/modules/git/files/init.d/Debian/git-daemon
@@ -0,0 +1,151 @@
+#! /bin/sh
+### BEGIN INIT INFO
+# Provides: git-daemon
+# Required-Start: $network $remote_fs $syslog
+# Required-Stop: $network $remote_fs $syslog
+# Default-Start: 2 3 4 5
+# Default-Stop: 0 1 6
+# Short-Description: git-daemon service
+# Description: git-daemon makes git repositories available via the git
+# protocol.
+### END INIT INFO
+
+# Author: Antonio Ospite <ospite@studenti.unina.it>
+#
+# Please remove the "Author" lines above and replace them
+# with your own name if you copy and modify this script.
+
+# Do NOT "set -e"
+
+# PATH should only include /usr/* if it runs after the mountnfs.sh script
+PATH=/sbin:/usr/sbin:/bin:/usr/bin:/usr/lib/git-core
+DESC="git-daemon service"
+NAME=git-daemon
+DAEMON=/usr/lib/git-core/$NAME
+PIDFILE=/var/run/$NAME.pid
+SCRIPTNAME=/etc/init.d/$NAME
+
+# Exit if the package is not installed
+[ -x "$DAEMON" ] || exit 0
+
+# Fallback options values, we use these when
+# the /etc/default/git-daemon file does not exist
+RUN=no
+USER=git
+GROUP=git
+REPOSITORIES="/srv/git/"
+
+# Read configuration variable file if it is present
+[ -r /etc/default/$NAME ] && . /etc/default/$NAME
+
+# If ADVANCED_OPTS is empty, use a default setting
+if [ "x$ADVANCED_OPTS" == "x" ];
+then
+ ADVANCED_OPTS="--base-path=$REPOSITORIES $REPOSITORIES"
+fi
+
+DAEMON_ARGS="--syslog --reuseaddr \
+ --user=$USER --group=$GROUP \
+ $ADVANCED_OPTS"
+
+
+# Load the VERBOSE setting and other rcS variables
+. /lib/init/vars.sh
+
+# Define LSB log_* functions.
+# Depend on lsb-base (>= 3.0-6) to ensure that this file is present.
+. /lib/lsb/init-functions
+
+#
+# Function that starts the daemon/service
+#
+do_start()
+{
+ # Return
+ # 0 if daemon has been started
+ # 1 if daemon was already running
+ # 2 if daemon could not be started
+ start-stop-daemon --start --quiet --pidfile $PIDFILE --exec $DAEMON --test > /dev/null \
+ || return 1
+ start-stop-daemon --start --quiet --pidfile $PIDFILE --exec $DAEMON --background --make-pidfile -- \
+ $DAEMON_ARGS \
+ || return 2
+
+ return 0
+}
+
+#
+# Function that stops the daemon/service
+#
+do_stop()
+{
+ # Return
+ # 0 if daemon has been stopped
+ # 1 if daemon was already stopped
+ # 2 if daemon could not be stopped
+ # other if a failure occurred
+ start-stop-daemon --stop --quiet --retry=TERM/30/KILL/5 --pidfile $PIDFILE --name $NAME
+ RETVAL="$?"
+ [ "$RETVAL" = 2 ] && return 2
+ # Wait for children to finish too if this is a daemon that forks
+ # and if the daemon is only ever run from this initscript.
+ # If the above conditions are not satisfied then add some other code
+ # that waits for the process to drop all resources that could be
+ # needed by services started subsequently. A last resort is to
+ # sleep for some time.
+ start-stop-daemon --stop --quiet --oknodo --retry=0/30/KILL/5 --exec $DAEMON
+ [ "$?" = 2 ] && return 2
+ # Many daemons don't delete their pidfiles when they exit.
+ rm -f $PIDFILE
+ return "$RETVAL"
+}
+
+case "$1" in
+ start)
+ [ "$VERBOSE" != no ] && log_daemon_msg "Starting $DESC" "$NAME"
+ do_start
+ case "$?" in
+ 0|1) [ "$VERBOSE" != no ] && log_end_msg 0 ;;
+ 2) [ "$VERBOSE" != no ] && log_end_msg 1 ;;
+ esac
+ ;;
+ stop)
+ [ "$VERBOSE" != no ] && log_daemon_msg "Stopping $DESC" "$NAME"
+ do_stop
+ case "$?" in
+ 0|1) [ "$VERBOSE" != no ] && log_end_msg 0 ;;
+ 2) [ "$VERBOSE" != no ] && log_end_msg 1 ;;
+ esac
+ ;;
+ status)
+ status_of_proc "$DAEMON" "$NAME" && exit 0 || exit $?
+ ;;
+ restart|force-reload)
+ #
+ # If the "reload" option is implemented then remove the
+ # 'force-reload' alias
+ #
+ log_daemon_msg "Restarting $DESC" "$NAME"
+ do_stop
+ case "$?" in
+ 0|1)
+ do_start
+ case "$?" in
+ 0) log_end_msg 0 ;;
+ 1) log_end_msg 1 ;; # Old process is still running
+ *) log_end_msg 1 ;; # Failed to start
+ esac
+ ;;
+ *)
+ # Failed to stop
+ log_end_msg 1
+ ;;
+ esac
+ ;;
+ *)
+ echo "Usage: $SCRIPTNAME {start|stop|status|restart|force-reload}" >&2
+ exit 3
+ ;;
+esac
+
+:
diff --git a/puppet/modules/git/files/web/gitweb.conf b/puppet/modules/git/files/web/gitweb.conf
new file mode 100644
index 00000000..88226aaa
--- /dev/null
+++ b/puppet/modules/git/files/web/gitweb.conf
@@ -0,0 +1,53 @@
+# The gitweb config file is a fragment of perl code. You can set variables
+# using "our $variable = value"; text from "#" character until the end of a
+# line is ignored. See perlsyn(1) man page for details.
+#
+# See /usr/share/doc/gitweb-*/README and /usr/share/doc/gitweb-*/INSTALL for
+# more details and available configuration variables.
+
+# Set the path to git projects. This is an absolute filesystem path which will
+# be prepended to the project path.
+#our $projectroot = "/var/lib/git";
+
+# Set the list of git base URLs used for URL to where fetch project from, i.e.
+# the full URL is "$git_base_url/$project". By default this is empty
+#our @git_base_url_list = qw(git://git.example.com
+# ssh://git.example.com/var/lib/git);
+
+# Enable the 'blame' blob view, showing the last commit that modified
+# each line in the file. This can be very CPU-intensive. Disabled by default
+#$feature{'blame'}{'default'} = [1];
+#
+# Allow projects to override the default setting via git config file.
+# Example: gitweb.blame = 0|1;
+#$feature{'blame'}{'override'} = 1;
+
+# Disable the 'snapshot' link, providing a compressed archive of any tree. This
+# can potentially generate high traffic if you have large project. Enabled for
+# .tar.gz snapshots by default.
+#
+# Value is a list of formats defined in %known_snapshot_formats that you wish
+# to offer.
+#$feature{'snapshot'}{'default'} = [];
+#
+# Allow projects to override the default setting via git config file.
+# Example: gitweb.snapshot = tbz2,zip; (use "none" to disable)
+#$feature{'snapshot'}{'override'} = 1;
+
+# Disable grep search, which will list the files in currently selected tree
+# containing the given string. This can be potentially CPU-intensive, of
+# course. Enabled by default.
+#$feature{'grep'}{'default'} = [0];
+#
+# Allow projects to override the default setting via git config file.
+# Example: gitweb.grep = 0|1;
+#$feature{'grep'}{'override'} = 1;
+
+# Disable the pickaxe search, which will list the commits that modified a given
+# string in a file. This can be practical and quite faster alternative to
+# 'blame', but still potentially CPU-intensive. Enabled by default.
+#$feature{'pickaxe'}{'default'} = [0];
+#
+# Allow projects to override the default setting via git config file.
+# Example: gitweb.pickaxe = 0|1;
+#$feature{'pickaxe'}{'override'} = 1;
diff --git a/puppet/modules/git/files/xinetd.d/git b/puppet/modules/git/files/xinetd.d/git
new file mode 100644
index 00000000..64c53e8b
--- /dev/null
+++ b/puppet/modules/git/files/xinetd.d/git
@@ -0,0 +1,16 @@
+# default: off
+# description: The git dæmon allows git repositories to be exported using
+# the git:// protocol.
+
+service git
+{
+ disable = no
+ socket_type = stream
+ wait = no
+ user = nobody
+ server = /usr/bin/git-daemon
+ server_args = --base-path=/srv/git --export-all --user-path=public_git --syslog --inetd --verbose
+ log_on_failure += USERID
+# xinetd doesn't do this by default. bug #195265
+ flags = IPv6
+}
diff --git a/puppet/modules/git/files/xinetd.d/git.disabled b/puppet/modules/git/files/xinetd.d/git.disabled
new file mode 100644
index 00000000..dcfae918
--- /dev/null
+++ b/puppet/modules/git/files/xinetd.d/git.disabled
@@ -0,0 +1,16 @@
+# default: off
+# description: The git dæmon allows git repositories to be exported using
+# the git:// protocol.
+
+service git
+{
+ disable = yes
+ socket_type = stream
+ wait = no
+ user = nobody
+ server = /usr/bin/git-daemon
+ server_args = --base-path=/srv/git --export-all --user-path=public_git --syslog --inetd --verbose
+ log_on_failure += USERID
+# xinetd doesn't do this by default. bug #195265
+ flags = IPv6
+}
diff --git a/puppet/modules/git/files/xinetd.d/git.vhosts b/puppet/modules/git/files/xinetd.d/git.vhosts
new file mode 100644
index 00000000..98938206
--- /dev/null
+++ b/puppet/modules/git/files/xinetd.d/git.vhosts
@@ -0,0 +1,16 @@
+# default: off
+# description: The git dæmon allows git repositories to be exported using
+# the git:// protocol.
+
+service git
+{
+ disable = no
+ socket_type = stream
+ wait = no
+ user = nobody
+ server = /usr/bin/git-daemon
+ server_args = --interpolated-path=/srv/git/%H/%D --syslog --inetd --verbose
+ log_on_failure += USERID
+# xinetd doesn't do this by default. bug #195265
+ flags = IPv6
+}
diff --git a/puppet/modules/git/manifests/base.pp b/puppet/modules/git/manifests/base.pp
new file mode 100644
index 00000000..e6188390
--- /dev/null
+++ b/puppet/modules/git/manifests/base.pp
@@ -0,0 +1,7 @@
+class git::base {
+
+ package { 'git':
+ ensure => present,
+ alias => 'git',
+ }
+}
diff --git a/puppet/modules/git/manifests/centos.pp b/puppet/modules/git/manifests/centos.pp
new file mode 100644
index 00000000..96344756
--- /dev/null
+++ b/puppet/modules/git/manifests/centos.pp
@@ -0,0 +1,2 @@
+class git::centos inherits git::base {
+}
diff --git a/puppet/modules/git/manifests/changes.pp b/puppet/modules/git/manifests/changes.pp
new file mode 100644
index 00000000..71112051
--- /dev/null
+++ b/puppet/modules/git/manifests/changes.pp
@@ -0,0 +1,33 @@
+# Usage
+# git::changes { name:
+# cwd => "/path/to/git/"
+# user => "me",
+# ensure => {*assume-unchanged*, tracked}
+# }
+#
+
+define git::changes ( $cwd, $user, $ensure='assume-unchanged' ) {
+
+ case $ensure {
+ default: { err ( "unknown ensure value '${ensure}'" ) }
+
+ assume-unchanged: {
+ exec { "assume-unchanged ${name}":
+ command => "/usr/bin/git update-index --assume-unchanged ${name}",
+ cwd => $cwd,
+ user => $user,
+ unless => "/usr/bin/git ls-files -v | grep '^[ch] ${name}'",
+ }
+ }
+
+ tracked: {
+ exec { "track changes ${name}":
+ command => "/usr/bin/git update-index --no-assume-unchanged ${name}",
+ cwd => $cwd,
+ user => $user,
+ onlyif => "/usr/bin/git ls-files -v | grep '^[ch] ${name}'",
+ }
+ }
+ }
+}
+
diff --git a/puppet/modules/git/manifests/clone.pp b/puppet/modules/git/manifests/clone.pp
new file mode 100644
index 00000000..29f0b2b3
--- /dev/null
+++ b/puppet/modules/git/manifests/clone.pp
@@ -0,0 +1,60 @@
+# submodules: Whether we should initialize and update
+# submodules as well
+# Default: false
+# clone_before: before which resources a cloning should
+# happen. This is releveant in combination
+# with submodules as the exec of submodules
+# requires the `cwd` and you might get a
+# dependency cycle if you manage $projectroot
+# somewhere else.
+define git::clone(
+ $ensure = present,
+ $git_repo,
+ $projectroot,
+ $submodules = false,
+ $clone_before = 'absent',
+ $cloneddir_user='root',
+ $cloneddir_group='0',
+ $cloneddir_restrict_mode=true
+){
+ case $ensure {
+ absent: {
+ exec{"rm -rf $projectroot":
+ onlyif => "test -d $projectroot",
+ }
+ }
+ default: {
+ require ::git
+ exec {"git-clone_${name}":
+ command => "git clone --no-hardlinks ${git_repo} ${projectroot}",
+ creates => "${projectroot}/.git",
+ user => root,
+ notify => Exec["git-clone-chown_${name}"],
+ }
+ if $clone_before != 'absent' {
+ Exec["git-clone_${name}"]{
+ before => $clone_before,
+ }
+ }
+ if $submodules {
+ exec{"git-submodules_${name}":
+ command => "git submodule init && git submodule update",
+ cwd => $projectroot,
+ refreshonly => true,
+ subscribe => Exec["git-clone_${name}"],
+ }
+ }
+ exec {"git-clone-chown_${name}":
+ command => "chown -R ${cloneddir_user}:${cloneddir_group} ${projectroot};chmod -R og-rwx ${projectroot}/.git",
+ refreshonly => true
+ }
+ if $cloneddir_restrict_mode {
+ exec {"git-clone-chmod_${name}":
+ command => "chmod -R o-rwx ${projectroot}",
+ refreshonly => true,
+ subscribe => Exec["git-clone_${name}"],
+ }
+ }
+ }
+ }
+}
diff --git a/puppet/modules/git/manifests/daemon.pp b/puppet/modules/git/manifests/daemon.pp
new file mode 100644
index 00000000..1e85ff84
--- /dev/null
+++ b/puppet/modules/git/manifests/daemon.pp
@@ -0,0 +1,17 @@
+class git::daemon {
+
+ include git
+
+ case $operatingsystem {
+ centos: { include git::daemon::centos }
+ debian: { include git::daemon::base }
+ }
+
+ if $use_shorewall {
+ include shorewall::rules::gitdaemon
+ }
+
+ if $use_nagios {
+ nagios::service { "git-daemon": check_command => "check_git!${fqdn}"; }
+ }
+}
diff --git a/puppet/modules/git/manifests/daemon/base.pp b/puppet/modules/git/manifests/daemon/base.pp
new file mode 100644
index 00000000..6a03d4fd
--- /dev/null
+++ b/puppet/modules/git/manifests/daemon/base.pp
@@ -0,0 +1,31 @@
+class git::daemon::base inherits git::base {
+
+ file { 'git-daemon_initscript':
+ source => [ "puppet://$server/modules/site_git/init.d/${fqdn}/git-daemon",
+ "puppet://$server/modules/site_git/init.d/${operatingsystem}/git-daemon",
+ "puppet://$server/modules/site_git/init.d/git-daemon",
+ "puppet://$server/modules/git/init.d/${operatingsystem}/git-daemon",
+ "puppet://$server/modules/git/init.d/git-daemon" ],
+ require => Package['git'],
+ path => "/etc/init.d/git-daemon",
+ owner => root, group => 0, mode => 0755;
+ }
+
+ file { 'git-daemon_config':
+ source => [ "puppet://$server/modules/site_git/config/${fqdn}/git-daemon",
+ "puppet://$server/modules/site_git/config/${operatingsystem}/git-daemon",
+ "puppet://$server/modules/site_git/config/git-daemon",
+ "puppet://$server/modules/git/config/${operatingsystem}/git-daemon",
+ "puppet://$server/modules/git/config/git-daemon" ],
+ require => Package['git'],
+ path => "/etc/default/git-daemon",
+ owner => root, group => 0, mode => 0644;
+ }
+
+ service { 'git-daemon':
+ ensure => running,
+ enable => true,
+ hasstatus => true,
+ require => [ File['git-daemon_initscript'], File['git-daemon_config'] ],
+ }
+}
diff --git a/puppet/modules/git/manifests/daemon/centos.pp b/puppet/modules/git/manifests/daemon/centos.pp
new file mode 100644
index 00000000..e276259d
--- /dev/null
+++ b/puppet/modules/git/manifests/daemon/centos.pp
@@ -0,0 +1,19 @@
+class git::daemon::centos inherits git::daemon::base {
+
+ package { 'git-daemon':
+ ensure => installed,
+ require => Package['git'],
+ alias => 'git-daemon',
+ }
+
+ File['git-daemon_initscript'] {
+ path => '/etc/init.d/git-daemon',
+ require +> Package['git-daemon'],
+ }
+
+ File['git-daemon_config'] {
+ path => '/etc/init.d/git-daemon',
+ require +> Package['git-daemon'],
+ }
+
+}
diff --git a/puppet/modules/git/manifests/daemon/disable.pp b/puppet/modules/git/manifests/daemon/disable.pp
new file mode 100644
index 00000000..c044e962
--- /dev/null
+++ b/puppet/modules/git/manifests/daemon/disable.pp
@@ -0,0 +1,33 @@
+class git::daemon::disable inherits git::daemon::base {
+
+ if defined(Package['git-daemon']) {
+ Package['git-daemon'] {
+ ensure => absent,
+ }
+ }
+
+ File['git-daemon_initscript'] {
+ ensure => absent,
+ }
+
+ File['git-daemon_config'] {
+ ensure => absent,
+ }
+
+ Service['git-daemon'] {
+ ensure => stopped,
+ enable => false,
+ require => undef,
+ before => File['git-daemon_initscript'],
+ }
+
+ if $use_shorewall {
+ include shorewall::rules::gitdaemon::absent
+ }
+
+ if $use_nagios {
+ nagios::service { "git-daemon": check_command => "check_git!${fqdn}", ensure => absent; }
+ }
+}
+
+
diff --git a/puppet/modules/git/manifests/daemon/vhosts.pp b/puppet/modules/git/manifests/daemon/vhosts.pp
new file mode 100644
index 00000000..9591330f
--- /dev/null
+++ b/puppet/modules/git/manifests/daemon/vhosts.pp
@@ -0,0 +1,10 @@
+class git::daemon::vhosts inherits git::daemon {
+
+ File['git-daemon_config']{
+ source => [ "puppet://$server/modules/site_git/config/${fqdn}/git-daemon.vhosts",
+ "puppet://$server/modules/site_git/config/${operatingsystem}/git-daemon.vhosts",
+ "puppet://$server/modules/site_git/config/git-daemon.vhosts",
+ "puppet://$server/modules/git/config/${operatingsystem}/git-daemon.vhosts",
+ "puppet://$server/modules/git/config/git-daemon.vhosts" ],
+ }
+}
diff --git a/puppet/modules/git/manifests/debian.pp b/puppet/modules/git/manifests/debian.pp
new file mode 100644
index 00000000..2e63d692
--- /dev/null
+++ b/puppet/modules/git/manifests/debian.pp
@@ -0,0 +1,6 @@
+class git::debian inherits git::base {
+
+ Package['git'] {
+ name => 'git-core',
+ }
+}
diff --git a/puppet/modules/git/manifests/init.pp b/puppet/modules/git/manifests/init.pp
new file mode 100644
index 00000000..4693af75
--- /dev/null
+++ b/puppet/modules/git/manifests/init.pp
@@ -0,0 +1,25 @@
+#
+# git module
+#
+# Copyright 2008, Puzzle ITC
+# Marcel Härry haerry+puppet(at)puzzle.ch
+# Simon Josi josi+puppet(at)puzzle.ch
+#
+# This program is free software; you can redistribute
+# it and/or modify it under the terms of the GNU
+# General Public License version 3 as published by
+# the Free Software Foundation.
+#
+
+class git {
+
+ case $operatingsystem {
+ debian: { include git::debian }
+ centos: { include git::centos }
+ }
+
+ if $use_shorewall {
+ include shorewall::rules::out::git
+ }
+
+}
diff --git a/puppet/modules/git/manifests/svn.pp b/puppet/modules/git/manifests/svn.pp
new file mode 100644
index 00000000..ea934749
--- /dev/null
+++ b/puppet/modules/git/manifests/svn.pp
@@ -0,0 +1,10 @@
+# manifests/svn.pp
+
+class git::svn {
+ include ::git
+ include subversion
+
+ package { 'git-svn':
+ require => [ Package['git'], Package['subversion'] ],
+ }
+}
diff --git a/puppet/modules/git/manifests/web.pp b/puppet/modules/git/manifests/web.pp
new file mode 100644
index 00000000..3cf5139e
--- /dev/null
+++ b/puppet/modules/git/manifests/web.pp
@@ -0,0 +1,20 @@
+class git::web {
+ include git
+
+ package { 'gitweb':
+ ensure => present,
+ require => Package['git'],
+ }
+
+ file { '/etc/gitweb.d':
+ ensure => directory,
+ owner => root, group => 0, mode => 0755;
+ }
+ file { '/etc/gitweb.conf':
+ source => [ "puppet:///modules/site_git/web/${fqdn}/gitweb.conf",
+ "puppet:///modules/site_git/web/gitweb.conf",
+ "puppet:///modules/git/web/gitweb.conf" ],
+ require => Package['gitweb'],
+ owner => root, group => 0, mode => 0644;
+ }
+}
diff --git a/puppet/modules/git/manifests/web/absent.pp b/puppet/modules/git/manifests/web/absent.pp
new file mode 100644
index 00000000..4d0dba33
--- /dev/null
+++ b/puppet/modules/git/manifests/web/absent.pp
@@ -0,0 +1,17 @@
+class git::web::absent {
+
+ package { 'gitweb':
+ ensure => absent,
+ }
+
+ file { '/etc/gitweb.d':
+ ensure => absent,
+ purge => true,
+ force => true,
+ recurse => true,
+ }
+ file { '/etc/gitweb.conf':
+ ensure => absent,
+ }
+}
+
diff --git a/puppet/modules/git/manifests/web/lighttpd.pp b/puppet/modules/git/manifests/web/lighttpd.pp
new file mode 100644
index 00000000..980e23c0
--- /dev/null
+++ b/puppet/modules/git/manifests/web/lighttpd.pp
@@ -0,0 +1,7 @@
+class git::web::lighttpd {
+ include ::lighttpd
+
+ lighttpd::config::file{'lighttpd-gitweb':
+ content => 'global { server.modules += ("mod_rewrite", "mod_redirect", "mod_alias", "mod_setenv", "mod_cgi" ) }',
+ }
+}
diff --git a/puppet/modules/git/manifests/web/repo.pp b/puppet/modules/git/manifests/web/repo.pp
new file mode 100644
index 00000000..da6f74f0
--- /dev/null
+++ b/puppet/modules/git/manifests/web/repo.pp
@@ -0,0 +1,56 @@
+# domain: the domain under which this repo will be avaiable
+# projectroot: where the git repos are listened
+# projects_list: which repos to export
+#
+# logmode:
+# - default: Do normal logging including ips
+# - anonym: Don't log ips
+define git::web::repo(
+ $ensure = 'present',
+ $projectroot = 'absent',
+ $projects_list = 'absent',
+ $logmode = 'default',
+ $sitename = 'absent'
+){
+ if ($ensure == 'present') and (($projects_list == 'absent') or ($projectroot == 'absent')){
+ fail("You have to pass \$project_list and \$projectroot for ${name} if it should be present!")
+ }
+ if $ensure == 'present' { include git::web }
+ $gitweb_url = $name
+ case $gitweb_sitename {
+ 'absent': { $gitweb_sitename = "${name} git repository" }
+ default: { $gitweb_sitename = $sitename }
+ }
+ $gitweb_config = "/etc/gitweb.d/${name}.conf"
+ file{"${gitweb_config}": }
+ if $ensure == 'present' {
+ File["${gitweb_config}"]{
+ content => template("git/web/config")
+ }
+ } else {
+ File["${gitweb_config}"]{
+ ensure => absent,
+ }
+ }
+ case $gitweb_webserver {
+ 'lighttpd': {
+ git::web::repo::lighttpd{$name:
+ ensure => $ensure,
+ logmode => $logmode,
+ gitweb_url => $gitweb_url,
+ gitweb_config => $gitweb_config,
+ }
+ }
+ 'apache': {
+ apache::vhost::gitweb{$gitweb_url:
+ logmode => $logmode,
+ ensure => $ensure,
+ }
+ }
+ default: {
+ if ($ensure == 'present') {
+ fail("no supported \$gitweb_webserver defined on ${fqdn}, so can't do git::web::repo: ${name}")
+ }
+ }
+ }
+}
diff --git a/puppet/modules/git/manifests/web/repo/lighttpd.pp b/puppet/modules/git/manifests/web/repo/lighttpd.pp
new file mode 100644
index 00000000..11cee4ce
--- /dev/null
+++ b/puppet/modules/git/manifests/web/repo/lighttpd.pp
@@ -0,0 +1,16 @@
+# logmode:
+# - default: Do normal logging including ips
+# - anonym: Don't log ips
+define git::web::repo::lighttpd(
+ $ensure = 'present',
+ $gitweb_url,
+ $logmode = 'default',
+ $gitweb_config
+){
+ if $ensure == 'present' { include git::web::lighttpd }
+
+ lighttpd::vhost::file{$name:
+ ensure => $ensure,
+ content => template('git/web/lighttpd');
+ }
+}
diff --git a/puppet/modules/git/templates/web/config b/puppet/modules/git/templates/web/config
new file mode 100644
index 00000000..5286f6a6
--- /dev/null
+++ b/puppet/modules/git/templates/web/config
@@ -0,0 +1,31 @@
+# Include the global configuration, if found.
+do "/etc/gitweb.conf" if -e "/etc/gitweb.conf";
+
+# Point to projects.list file generated by gitosis.
+# Here gitosis manages the user "git", who has a
+# home directory of /srv/example.com/git
+$projects_list = "<%= projects_list %>";
+
+# Where the actual repositories are located.
+$projectroot = "<%= projectroot %>";
+
+# By default, gitweb will happily let people browse any repository
+# they guess the name of. This may or may not be what you wanted. I
+# choose to allow gitweb to show only repositories that git-daemon
+# is already sharing anonymously.
+$export_ok = "git-daemon-export-ok";
+
+# Alternatively, you could set these, to allow exactly the things in
+# projects.list, which in this case is the repos with gitweb=yes
+# in gitosis.conf. This means you don't need daemon=yes, but you
+# can't have repositories hidden but browsable if you know the name.
+# And note gitweb already allows downloading the full repository,
+# so you might as well serve git-daemon too.
+# $export_ok = "";
+# $strict_export = "true";
+
+# A list of base urls where all the repositories can be cloned from.
+# Easier than having per-repository cloneurl files.
+@git_base_url_list = ('git://<%= gitweb_url %>');
+
+$GITWEB_SITENAME = "<%= gitweb_sitename %>"
diff --git a/puppet/modules/git/templates/web/lighttpd b/puppet/modules/git/templates/web/lighttpd
new file mode 100644
index 00000000..cf244691
--- /dev/null
+++ b/puppet/modules/git/templates/web/lighttpd
@@ -0,0 +1,21 @@
+$HTTP["host"] == "<%= gitweb_url %>" {
+ url.redirect += (
+ "^$" => "/",
+ )
+
+ <%- if logmode.to_s == 'anonym' -%>
+ accesslog.format = "127.0.0.1 %V %u %t \"%r\" %>s %b \"%{Referer}i\" \"%{User-Agent}i\""
+ <%- end -%>
+
+ alias.url += (
+ "/static/gitweb.css" => "/var/www/git/static/gitweb.css",
+ "/static/git-logo.png" => "/var/www/git/static/git-logo.png",
+ "/static/git-favicon.png" => "/var/www/git/static/git-favicon.png",
+ "/" => "/var/www/git/gitweb.cgi",
+ )
+
+ setenv.add-environment = (
+ "GITWEB_CONFIG" => "<%= gitweb_config %>"
+ )
+ cgi.assign = ( ".cgi" => "" )
+}
diff --git a/puppet/modules/haveged/manifests/init.pp b/puppet/modules/haveged/manifests/init.pp
new file mode 100644
index 00000000..8f901937
--- /dev/null
+++ b/puppet/modules/haveged/manifests/init.pp
@@ -0,0 +1,16 @@
+class haveged {
+
+ package { 'haveged':
+ ensure => present,
+ }
+
+ service { 'haveged':
+ ensure => running,
+ hasrestart => true,
+ hasstatus => true,
+ enable => true,
+ require => Package['haveged'];
+ }
+
+ include site_check_mk::agent::haveged
+}
diff --git a/puppet/modules/journald/manifests/init.pp b/puppet/modules/journald/manifests/init.pp
new file mode 100644
index 00000000..879baba4
--- /dev/null
+++ b/puppet/modules/journald/manifests/init.pp
@@ -0,0 +1,7 @@
+class journald {
+
+ service { 'systemd-journald':
+ ensure => running,
+ enable => true,
+ }
+}
diff --git a/puppet/modules/leap/manifests/cli/install.pp b/puppet/modules/leap/manifests/cli/install.pp
new file mode 100644
index 00000000..25e87033
--- /dev/null
+++ b/puppet/modules/leap/manifests/cli/install.pp
@@ -0,0 +1,46 @@
+# installs leap_cli on node
+class leap::cli::install ( $source = false ) {
+ if $source {
+ # needed for building leap_cli from source
+ include ::git
+ include ::rubygems
+
+ class { '::ruby':
+ install_dev => true
+ }
+
+ class { 'bundler::install': install_method => 'package' }
+
+ Class[Ruby] ->
+ Class[rubygems] ->
+ Class[bundler::install]
+
+
+ vcsrepo { '/srv/leap/cli':
+ ensure => present,
+ force => true,
+ revision => 'develop',
+ provider => 'git',
+ source => 'https://leap.se/git/leap_cli.git',
+ owner => 'root',
+ group => 'root',
+ notify => Exec['install_leap_cli'],
+ require => Package['git']
+ }
+
+ exec { 'install_leap_cli':
+ command => '/usr/bin/rake build && /usr/bin/rake install',
+ cwd => '/srv/leap/cli',
+ user => 'root',
+ environment => 'USER=root',
+ refreshonly => true,
+ require => [ Class[bundler::install] ]
+ }
+ }
+ else {
+ package { 'leap_cli':
+ ensure => installed,
+ provider => gem
+ }
+ }
+}
diff --git a/puppet/modules/leap/manifests/init.pp b/puppet/modules/leap/manifests/init.pp
new file mode 100644
index 00000000..bbae3781
--- /dev/null
+++ b/puppet/modules/leap/manifests/init.pp
@@ -0,0 +1,3 @@
+class leap {
+
+} \ No newline at end of file
diff --git a/puppet/modules/leap/manifests/logfile.pp b/puppet/modules/leap/manifests/logfile.pp
new file mode 100644
index 00000000..adb3ca8a
--- /dev/null
+++ b/puppet/modules/leap/manifests/logfile.pp
@@ -0,0 +1,34 @@
+#
+# make syslog log to a particular file for a particular process.
+#
+# arguments:
+#
+# * name: what config files are named as (eg. /etc/rsyslog.d/50-$name.conf)
+# * log: the full path of the log file (defaults to /var/log/leap/$name.log
+# * process: the syslog tag to filter on (defaults to name)
+#
+define leap::logfile($process = $name, $log = undef) {
+ if $log {
+ $logfile = $log
+ } else {
+ $logfile = "/var/log/leap/${name}.log"
+ }
+
+ rsyslog::snippet { "50-${name}":
+ content => template('leap/rsyslog.erb')
+ }
+
+ augeas {
+ "logrotate_${name}":
+ context => "/files/etc/logrotate.d/${name}/rule",
+ changes => [
+ "set file ${logfile}",
+ 'set rotate 5',
+ 'set schedule daily',
+ 'set compress compress',
+ 'set missingok missingok',
+ 'set ifempty notifempty',
+ 'set copytruncate copytruncate'
+ ]
+ }
+}
diff --git a/puppet/modules/leap/templates/rsyslog.erb b/puppet/modules/leap/templates/rsyslog.erb
new file mode 100644
index 00000000..7bb5316f
--- /dev/null
+++ b/puppet/modules/leap/templates/rsyslog.erb
@@ -0,0 +1,5 @@
+if $programname startswith '<%= @process %>' then {
+ action(type="omfile" file="<%= @logfile %>" template="RSYSLOG_TraditionalFileFormat")
+ stop
+}
+
diff --git a/puppet/modules/leap_mx/manifests/init.pp b/puppet/modules/leap_mx/manifests/init.pp
new file mode 100644
index 00000000..d758e3ab
--- /dev/null
+++ b/puppet/modules/leap_mx/manifests/init.pp
@@ -0,0 +1,119 @@
+# deploy leap mx service
+class leap_mx {
+
+ $leap_mx = hiera('couchdb_leap_mx_user')
+ $couchdb_user = $leap_mx['username']
+ $couchdb_password = $leap_mx['password']
+
+ $couchdb_host = 'localhost'
+ $couchdb_port = '4096'
+
+ $sources = hiera('sources')
+
+ include soledad::common
+
+ #
+ # USER AND GROUP
+ #
+ # Make the user for leap-mx. This user is where all legitimate, non-system
+ # mail is delivered so leap-mx can process it. Previously, we let the system
+ # pick a uid/gid, but we need to know what they are set to in order to set the
+ # virtual_uid_maps and virtual_gid_maps. Its a bit overkill write a fact just
+ # for this, so instead we pick arbitrary numbers that seem unlikely to be used
+ # and then use them in the postfix configuration
+
+ group { 'leap-mx':
+ ensure => present,
+ gid => 42424,
+ allowdupe => false;
+ }
+
+ user { 'leap-mx':
+ ensure => present,
+ comment => 'Leap Mail',
+ allowdupe => false,
+ uid => 42424,
+ gid => 'leap-mx',
+ home => '/var/mail/leap-mx',
+ shell => '/bin/false',
+ managehome => true,
+ require => Group['leap-mx'];
+ }
+
+ file {
+ '/var/mail/leap-mx':
+ ensure => directory,
+ owner => 'leap-mx',
+ group => 'leap-mx',
+ mode => '0755',
+ require => User['leap-mx'];
+
+ '/var/mail/leap-mx/Maildir':
+ ensure => directory,
+ owner => 'leap-mx',
+ group => 'leap-mx',
+ mode => '0700';
+
+ '/var/mail/leap-mx/Maildir/new':
+ ensure => directory,
+ owner => 'leap-mx',
+ group => 'leap-mx',
+ mode => '0700';
+
+ '/var/mail/leap-mx/Maildir/cur':
+ ensure => directory,
+ owner => 'leap-mx',
+ group => 'leap-mx',
+ mode => '0700';
+
+ '/var/mail/leap-mx/Maildir/tmp':
+ ensure => directory,
+ owner => 'leap-mx',
+ group => 'leap-mx',
+ mode => '0700';
+ }
+
+ #
+ # LEAP-MX CONFIG
+ #
+
+ file { '/etc/leap/mx.conf':
+ content => template('leap_mx/mx.conf.erb'),
+ owner => 'leap-mx',
+ group => 'leap-mx',
+ mode => '0600',
+ notify => Service['leap-mx'];
+ }
+
+ leap::logfile { 'leap-mx':
+ log => '/var/log/leap/mx.log',
+ process => 'leap-mx'
+ }
+
+ #
+ # LEAP-MX CODE AND DEPENDENCIES
+ #
+
+ package {
+ $sources['leap-mx']['package']:
+ ensure => $sources['leap-mx']['revision'],
+ require => [
+ Class['site_apt::leap_repo'],
+ User['leap-mx'] ];
+
+ 'leap-keymanager':
+ ensure => latest;
+ }
+
+ #
+ # LEAP-MX DAEMON
+ #
+
+ service { 'leap-mx':
+ ensure => running,
+ enable => true,
+ hasstatus => true,
+ hasrestart => true,
+ require => [ Package['leap-mx'] ];
+ }
+}
diff --git a/puppet/modules/leap_mx/templates/mx.conf.erb b/puppet/modules/leap_mx/templates/mx.conf.erb
new file mode 100644
index 00000000..b54b3a86
--- /dev/null
+++ b/puppet/modules/leap_mx/templates/mx.conf.erb
@@ -0,0 +1,18 @@
+[mail1]
+path=/var/mail/leap-mx/Maildir
+recursive=True
+
+[couchdb]
+user=<%= @couchdb_user %>
+password=<%= @couchdb_password %>
+server=<%= @couchdb_host %>
+port=<%= @couchdb_port %>
+
+[alias map]
+port=4242
+
+[check recipient]
+port=2244
+
+[fingerprint map]
+port=2424
diff --git a/puppet/modules/lsb/manifests/base.pp b/puppet/modules/lsb/manifests/base.pp
new file mode 100644
index 00000000..9dc8d5a4
--- /dev/null
+++ b/puppet/modules/lsb/manifests/base.pp
@@ -0,0 +1,3 @@
+class lsb::base {
+ package{'lsb': ensure => present }
+}
diff --git a/puppet/modules/lsb/manifests/centos.pp b/puppet/modules/lsb/manifests/centos.pp
new file mode 100644
index 00000000..b7006187
--- /dev/null
+++ b/puppet/modules/lsb/manifests/centos.pp
@@ -0,0 +1,5 @@
+class lsb::centos inherits lsb::base {
+ Package['lsb']{
+ name => 'redhat-lsb',
+ }
+}
diff --git a/puppet/modules/lsb/manifests/debian.pp b/puppet/modules/lsb/manifests/debian.pp
new file mode 100644
index 00000000..c32070f3
--- /dev/null
+++ b/puppet/modules/lsb/manifests/debian.pp
@@ -0,0 +1,6 @@
+class lsb::debian inherits lsb::base {
+ Package['lsb']{
+ name => 'lsb-release',
+ require => undef,
+ }
+}
diff --git a/puppet/modules/lsb/manifests/init.pp b/puppet/modules/lsb/manifests/init.pp
new file mode 100644
index 00000000..85b34e1f
--- /dev/null
+++ b/puppet/modules/lsb/manifests/init.pp
@@ -0,0 +1,6 @@
+class lsb {
+ case $::operatingsystem {
+ debian,ubuntu: { include lsb::debian }
+ centos: { include lsb::centos }
+ }
+}
diff --git a/puppet/modules/ntp/.fixtures.yml b/puppet/modules/ntp/.fixtures.yml
new file mode 100644
index 00000000..a4b98014
--- /dev/null
+++ b/puppet/modules/ntp/.fixtures.yml
@@ -0,0 +1,5 @@
+fixtures:
+ repositories:
+ "stdlib": "git://github.com/puppetlabs/puppetlabs-stdlib.git"
+ symlinks:
+ "ntp": "#{source_dir}"
diff --git a/puppet/modules/ntp/.gitignore b/puppet/modules/ntp/.gitignore
new file mode 100644
index 00000000..49cf4650
--- /dev/null
+++ b/puppet/modules/ntp/.gitignore
@@ -0,0 +1,3 @@
+pkg/
+metadata.json
+Gemfile.lock
diff --git a/puppet/modules/ntp/.nodeset.yml b/puppet/modules/ntp/.nodeset.yml
new file mode 100644
index 00000000..cbd0d57b
--- /dev/null
+++ b/puppet/modules/ntp/.nodeset.yml
@@ -0,0 +1,35 @@
+---
+default_set: 'centos-64-x64'
+sets:
+ 'centos-59-x64':
+ nodes:
+ "main.foo.vm":
+ prefab: 'centos-59-x64'
+ 'centos-64-x64':
+ nodes:
+ "main.foo.vm":
+ prefab: 'centos-64-x64'
+ 'fedora-18-x64':
+ nodes:
+ "main.foo.vm":
+ prefab: 'fedora-18-x64'
+ 'debian-607-x64':
+ nodes:
+ "main.foo.vm":
+ prefab: 'debian-607-x64'
+ 'debian-70rc1-x64':
+ nodes:
+ "main.foo.vm":
+ prefab: 'debian-70rc1-x64'
+ 'ubuntu-server-10044-x64':
+ nodes:
+ "main.foo.vm":
+ prefab: 'ubuntu-server-10044-x64'
+ 'ubuntu-server-12042-x64':
+ nodes:
+ "main.foo.vm":
+ prefab: 'ubuntu-server-12042-x64'
+ 'sles-11sp1-x64':
+ nodes:
+ "main.foo.vm":
+ prefab: 'sles-11sp1-x64'
diff --git a/puppet/modules/ntp/.travis.yml b/puppet/modules/ntp/.travis.yml
new file mode 100644
index 00000000..e9f0e84b
--- /dev/null
+++ b/puppet/modules/ntp/.travis.yml
@@ -0,0 +1,40 @@
+---
+branches:
+ only:
+ - master
+language: ruby
+bundler_args: --without development
+script: "bundle exec rake spec SPEC_OPTS='--format documentation'"
+after_success:
+ - git clone -q git://github.com/puppetlabs/ghpublisher.git .forge-releng
+ - .forge-releng/publish
+rvm:
+- 1.8.7
+- 1.9.3
+- 2.0.0
+env:
+ matrix:
+ - PUPPET_GEM_VERSION="~> 2.7.0"
+ - PUPPET_GEM_VERSION="~> 3.0.0"
+ - PUPPET_GEM_VERSION="~> 3.1.0"
+ - PUPPET_GEM_VERSION="~> 3.2.0"
+ global:
+ - PUBLISHER_LOGIN=puppetlabs
+ - secure: |-
+ ZiIkYd9+CdPzpwSjFPnVkCx1FIlipxpbdyD33q94h2Tj5zXjNb1GXizVy0NR
+ kVxGhU5Ld8y9z8DTqKRgCI1Yymg3H//OU++PKLOQj/X5juWVR4URBNPeBOzu
+ IJBDl1MADKA4i1+jAZPpz4mTvTtKS4pWKErgCSmhSfsY1hs7n6c=
+matrix:
+ exclude:
+ - rvm: 1.9.3
+ env: PUPPET_GEM_VERSION="~> 2.7.0"
+ - rvm: 2.0.0
+ env: PUPPET_GEM_VERSION="~> 2.7.0"
+ - rvm: 2.0.0
+ env: PUPPET_GEM_VERSION="~> 3.0.0"
+ - rvm: 2.0.0
+ env: PUPPET_GEM_VERSION="~> 3.1.0"
+ - rvm: 1.8.7
+ env: PUPPET_GEM_VERSION="~> 3.2.0"
+notifications:
+ email: false
diff --git a/puppet/modules/ntp/CHANGELOG b/puppet/modules/ntp/CHANGELOG
new file mode 100644
index 00000000..8be6c4e0
--- /dev/null
+++ b/puppet/modules/ntp/CHANGELOG
@@ -0,0 +1,61 @@
+2013-07-31 - Version 2.0.0
+
+Summary:
+
+The 2.0 release focuses on merging all the distro specific
+templates into a single reusable template across all platforms.
+
+To aid in that goal we now allow you to change the driftfile,
+ntp keys, and perferred_servers.
+
+Backwards-incompatible changes:
+
+As all the distro specific templates have been removed and a
+unified one created you may be missing functionality you
+previously relied on. Please test carefully before rolling
+out globally.
+
+Configuration directives that might possibly be affected:
+- `filegen`
+- `fudge` (for virtual machines)
+- `keys`
+- `logfile`
+- `restrict`
+- `restrictkey`
+- `statistics`
+- `trustedkey`
+
+Features:
+- All templates merged into a single template.
+- NTP Keys support added.
+- Add preferred servers support.
+- Parameters in `ntp` class:
+ - `driftfile`: path for the ntp driftfile.
+ - `keys_enable`: Enable NTP keys feature.
+ - `keys_file`: Path for the NTP keys file.
+ - `keys_trusted`: Which keys to trust.
+ - `keys_controlkey`: Which key to use for the control key.
+ - `keys_requestkey`: Which key to use for the request key.
+ - `preferred_servers`: Array of servers to prefer.
+ - `restrict`: Array of restriction options to apply.
+
+2013-07-15 - Version 1.0.1
+Bugfixes:
+- Fix deprecated warning in `autoupdate` parameter.
+- Correctly quote is_virtual fact.
+
+2013-07-08 - Version 1.0.0
+Features:
+- Completely refactored to split across several classes.
+- rspec-puppet tests rewritten to cover more options.
+- rspec-system tests added.
+- ArchLinux handled via osfamily instead of special casing.
+- parameters in `ntp` class:
+ - `autoupdate`: deprecated in favor of directly setting package_ensure.
+ - `panic`: set to false if you wish to allow large clock skews.
+
+2011-11-10 Dan Bode <dan@puppetlabs.com> - 0.0.4
+Add Amazon Linux as a supported platform
+Add unit tests
+2011-06-16 Jeff McCune <jeff@puppetlabs.com> - 0.0.3
+Initial release under puppetlabs
diff --git a/puppet/modules/ntp/CONTRIBUTING.md b/puppet/modules/ntp/CONTRIBUTING.md
new file mode 100644
index 00000000..a2b1d77b
--- /dev/null
+++ b/puppet/modules/ntp/CONTRIBUTING.md
@@ -0,0 +1,9 @@
+Puppet Labs modules on the Puppet Forge are open projects, and community contributions
+are essential for keeping them great. We can’t access the huge number of platforms and
+myriad of hardware, software, and deployment configurations that Puppet is intended to serve.
+
+We want to keep it as easy as possible to contribute changes so that our modules work
+in your environment. There are a few guidelines that we need contributors to follow so
+that we can have a chance of keeping on top of things.
+
+You can read the complete module contribution guide [on the Puppet Labs wiki.](http://projects.puppetlabs.com/projects/module-site/wiki/Module_contributing)
diff --git a/puppet/modules/ntp/Gemfile b/puppet/modules/ntp/Gemfile
new file mode 100644
index 00000000..4e733308
--- /dev/null
+++ b/puppet/modules/ntp/Gemfile
@@ -0,0 +1,19 @@
+source 'https://rubygems.org'
+
+group :development, :test do
+ gem 'rake', :require => false
+ gem 'puppetlabs_spec_helper', :require => false
+ gem 'rspec-system-puppet', :require => false
+ gem 'puppet-lint', :require => false
+ gem 'serverspec', :require => false
+ gem 'rspec-system-serverspec', :require => false
+ gem 'vagrant-wrapper', :require => false
+end
+
+if puppetversion = ENV['PUPPET_GEM_VERSION']
+ gem 'puppet', puppetversion, :require => false
+else
+ gem 'puppet', :require => false
+end
+
+# vim:ft=ruby
diff --git a/puppet/modules/ntp/LICENSE b/puppet/modules/ntp/LICENSE
new file mode 100644
index 00000000..57bc88a1
--- /dev/null
+++ b/puppet/modules/ntp/LICENSE
@@ -0,0 +1,202 @@
+ Apache License
+ Version 2.0, January 2004
+ http://www.apache.org/licenses/
+
+ TERMS AND CONDITIONS FOR USE, REPRODUCTION, AND DISTRIBUTION
+
+ 1. Definitions.
+
+ "License" shall mean the terms and conditions for use, reproduction,
+ and distribution as defined by Sections 1 through 9 of this document.
+
+ "Licensor" shall mean the copyright owner or entity authorized by
+ the copyright owner that is granting the License.
+
+ "Legal Entity" shall mean the union of the acting entity and all
+ other entities that control, are controlled by, or are under common
+ control with that entity. For the purposes of this definition,
+ "control" means (i) the power, direct or indirect, to cause the
+ direction or management of such entity, whether by contract or
+ otherwise, or (ii) ownership of fifty percent (50%) or more of the
+ outstanding shares, or (iii) beneficial ownership of such entity.
+
+ "You" (or "Your") shall mean an individual or Legal Entity
+ exercising permissions granted by this License.
+
+ "Source" form shall mean the preferred form for making modifications,
+ including but not limited to software source code, documentation
+ source, and configuration files.
+
+ "Object" form shall mean any form resulting from mechanical
+ transformation or translation of a Source form, including but
+ not limited to compiled object code, generated documentation,
+ and conversions to other media types.
+
+ "Work" shall mean the work of authorship, whether in Source or
+ Object form, made available under the License, as indicated by a
+ copyright notice that is included in or attached to the work
+ (an example is provided in the Appendix below).
+
+ "Derivative Works" shall mean any work, whether in Source or Object
+ form, that is based on (or derived from) the Work and for which the
+ editorial revisions, annotations, elaborations, or other modifications
+ represent, as a whole, an original work of authorship. For the purposes
+ of this License, Derivative Works shall not include works that remain
+ separable from, or merely link (or bind by name) to the interfaces of,
+ the Work and Derivative Works thereof.
+
+ "Contribution" shall mean any work of authorship, including
+ the original version of the Work and any modifications or additions
+ to that Work or Derivative Works thereof, that is intentionally
+ submitted to Licensor for inclusion in the Work by the copyright owner
+ or by an individual or Legal Entity authorized to submit on behalf of
+ the copyright owner. For the purposes of this definition, "submitted"
+ means any form of electronic, verbal, or written communication sent
+ to the Licensor or its representatives, including but not limited to
+ communication on electronic mailing lists, source code control systems,
+ and issue tracking systems that are managed by, or on behalf of, the
+ Licensor for the purpose of discussing and improving the Work, but
+ excluding communication that is conspicuously marked or otherwise
+ designated in writing by the copyright owner as "Not a Contribution."
+
+ "Contributor" shall mean Licensor and any individual or Legal Entity
+ on behalf of whom a Contribution has been received by Licensor and
+ subsequently incorporated within the Work.
+
+ 2. Grant of Copyright License. Subject to the terms and conditions of
+ this License, each Contributor hereby grants to You a perpetual,
+ worldwide, non-exclusive, no-charge, royalty-free, irrevocable
+ copyright license to reproduce, prepare Derivative Works of,
+ publicly display, publicly perform, sublicense, and distribute the
+ Work and such Derivative Works in Source or Object form.
+
+ 3. Grant of Patent License. Subject to the terms and conditions of
+ this License, each Contributor hereby grants to You a perpetual,
+ worldwide, non-exclusive, no-charge, royalty-free, irrevocable
+ (except as stated in this section) patent license to make, have made,
+ use, offer to sell, sell, import, and otherwise transfer the Work,
+ where such license applies only to those patent claims licensable
+ by such Contributor that are necessarily infringed by their
+ Contribution(s) alone or by combination of their Contribution(s)
+ with the Work to which such Contribution(s) was submitted. If You
+ institute patent litigation against any entity (including a
+ cross-claim or counterclaim in a lawsuit) alleging that the Work
+ or a Contribution incorporated within the Work constitutes direct
+ or contributory patent infringement, then any patent licenses
+ granted to You under this License for that Work shall terminate
+ as of the date such litigation is filed.
+
+ 4. Redistribution. You may reproduce and distribute copies of the
+ Work or Derivative Works thereof in any medium, with or without
+ modifications, and in Source or Object form, provided that You
+ meet the following conditions:
+
+ (a) You must give any other recipients of the Work or
+ Derivative Works a copy of this License; and
+
+ (b) You must cause any modified files to carry prominent notices
+ stating that You changed the files; and
+
+ (c) You must retain, in the Source form of any Derivative Works
+ that You distribute, all copyright, patent, trademark, and
+ attribution notices from the Source form of the Work,
+ excluding those notices that do not pertain to any part of
+ the Derivative Works; and
+
+ (d) If the Work includes a "NOTICE" text file as part of its
+ distribution, then any Derivative Works that You distribute must
+ include a readable copy of the attribution notices contained
+ within such NOTICE file, excluding those notices that do not
+ pertain to any part of the Derivative Works, in at least one
+ of the following places: within a NOTICE text file distributed
+ as part of the Derivative Works; within the Source form or
+ documentation, if provided along with the Derivative Works; or,
+ within a display generated by the Derivative Works, if and
+ wherever such third-party notices normally appear. The contents
+ of the NOTICE file are for informational purposes only and
+ do not modify the License. You may add Your own attribution
+ notices within Derivative Works that You distribute, alongside
+ or as an addendum to the NOTICE text from the Work, provided
+ that such additional attribution notices cannot be construed
+ as modifying the License.
+
+ You may add Your own copyright statement to Your modifications and
+ may provide additional or different license terms and conditions
+ for use, reproduction, or distribution of Your modifications, or
+ for any such Derivative Works as a whole, provided Your use,
+ reproduction, and distribution of the Work otherwise complies with
+ the conditions stated in this License.
+
+ 5. Submission of Contributions. Unless You explicitly state otherwise,
+ any Contribution intentionally submitted for inclusion in the Work
+ by You to the Licensor shall be under the terms and conditions of
+ this License, without any additional terms or conditions.
+ Notwithstanding the above, nothing herein shall supersede or modify
+ the terms of any separate license agreement you may have executed
+ with Licensor regarding such Contributions.
+
+ 6. Trademarks. This License does not grant permission to use the trade
+ names, trademarks, service marks, or product names of the Licensor,
+ except as required for reasonable and customary use in describing the
+ origin of the Work and reproducing the content of the NOTICE file.
+
+ 7. Disclaimer of Warranty. Unless required by applicable law or
+ agreed to in writing, Licensor provides the Work (and each
+ Contributor provides its Contributions) on an "AS IS" BASIS,
+ WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or
+ implied, including, without limitation, any warranties or conditions
+ of TITLE, NON-INFRINGEMENT, MERCHANTABILITY, or FITNESS FOR A
+ PARTICULAR PURPOSE. You are solely responsible for determining the
+ appropriateness of using or redistributing the Work and assume any
+ risks associated with Your exercise of permissions under this License.
+
+ 8. Limitation of Liability. In no event and under no legal theory,
+ whether in tort (including negligence), contract, or otherwise,
+ unless required by applicable law (such as deliberate and grossly
+ negligent acts) or agreed to in writing, shall any Contributor be
+ liable to You for damages, including any direct, indirect, special,
+ incidental, or consequential damages of any character arising as a
+ result of this License or out of the use or inability to use the
+ Work (including but not limited to damages for loss of goodwill,
+ work stoppage, computer failure or malfunction, or any and all
+ other commercial damages or losses), even if such Contributor
+ has been advised of the possibility of such damages.
+
+ 9. Accepting Warranty or Additional Liability. While redistributing
+ the Work or Derivative Works thereof, You may choose to offer,
+ and charge a fee for, acceptance of support, warranty, indemnity,
+ or other liability obligations and/or rights consistent with this
+ License. However, in accepting such obligations, You may act only
+ on Your own behalf and on Your sole responsibility, not on behalf
+ of any other Contributor, and only if You agree to indemnify,
+ defend, and hold each Contributor harmless for any liability
+ incurred by, or claims asserted against, such Contributor by reason
+ of your accepting any such warranty or additional liability.
+
+ END OF TERMS AND CONDITIONS
+
+ APPENDIX: How to apply the Apache License to your work.
+
+ To apply the Apache License to your work, attach the following
+ boilerplate notice, with the fields enclosed by brackets "[]"
+ replaced with your own identifying information. (Don't include
+ the brackets!) The text should be enclosed in the appropriate
+ comment syntax for the file format. We also recommend that a
+ file or class name and description of purpose be included on the
+ same "printed page" as the copyright notice for easier
+ identification within third-party archives.
+
+ Copyright [yyyy] [name of copyright owner]
+
+ Licensed under the Apache License, Version 2.0 (the "License");
+ you may not use this file except in compliance with the License.
+ You may obtain a copy of the License at
+
+ http://www.apache.org/licenses/LICENSE-2.0
+
+ Unless required by applicable law or agreed to in writing, software
+ distributed under the License is distributed on an "AS IS" BASIS,
+ WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+ See the License for the specific language governing permissions and
+ limitations under the License.
+
diff --git a/puppet/modules/ntp/Modulefile b/puppet/modules/ntp/Modulefile
new file mode 100644
index 00000000..9610ef67
--- /dev/null
+++ b/puppet/modules/ntp/Modulefile
@@ -0,0 +1,11 @@
+name 'puppetlabs-ntp'
+version '2.0.0-rc1'
+source 'git://github.com/puppetlabs/puppetlabs-ntp'
+author 'Puppet Labs'
+license 'Apache Version 2.0'
+summary 'NTP Module'
+description 'NTP Module for Debian, Ubuntu, CentOS, RHEL, OEL, Fedora, FreeBSD, ArchLinux and Gentoo.'
+project_page 'http://github.com/puppetlabs/puppetlabs-ntp'
+
+## Add dependencies, if any:
+dependency 'puppetlabs/stdlib', '>= 0.1.6'
diff --git a/puppet/modules/ntp/README.markdown b/puppet/modules/ntp/README.markdown
new file mode 100644
index 00000000..3aedd47a
--- /dev/null
+++ b/puppet/modules/ntp/README.markdown
@@ -0,0 +1,215 @@
+#ntp
+
+####Table of Contents
+
+1. [Overview](#overview)
+2. [Module Description - What the module does and why it is useful](#module-description)
+3. [Setup - The basics of getting started with ntp](#setup)
+ * [What ntp affects](#what-ntp-affects)
+ * [Setup requirements](#setup-requirements)
+ * [Beginning with ntp](#beginning-with-ntp)
+4. [Usage - Configuration options and additional functionality](#usage)
+5. [Reference - An under-the-hood peek at what the module is doing and how](#reference)
+5. [Limitations - OS compatibility, etc.](#limitations)
+6. [Development - Guide for contributing to the module](#development)
+
+##Overview
+
+The NTP module installs, configures, and manages the ntp service.
+
+##Module Description
+
+The NTP module handles running NTP across a range of operating systems and
+distributions. Where possible we use the upstream ntp templates so that the
+results closely match what you'd get if you modified the package default conf
+files.
+
+##Setup
+
+###What ntp affects
+
+* ntp package.
+* ntp configuration file.
+* ntp service.
+
+###Beginning with ntp
+
+include '::ntp' is enough to get you up and running. If you wish to pass in
+parameters like which servers to use then you can use:
+
+```puppet
+class { '::ntp':
+ servers => [ 'ntp1.corp.com', 'ntp2.corp.com' ],
+}
+```
+
+##Usage
+
+All interaction with the ntp module can do be done through the main ntp class.
+This means you can simply toggle the options in the ntp class to get at the
+full functionality.
+
+###I just want NTP, what's the minimum I need?
+
+```puppet
+include '::ntp'
+```
+
+###I just want to tweak the servers, nothing else.
+
+```puppet
+class { '::ntp':
+ servers => [ 'ntp1.corp.com', 'ntp2.corp.com' ],
+}
+```
+
+###I'd like to make sure I restrict who can connect as well.
+
+```puppet
+class { '::ntp':
+ servers => [ 'ntp1.corp.com', 'ntp2.corp.com' ],
+ restrict => 'restrict 127.0.0.1',
+}
+```
+
+###I'd like to opt out of having the service controlled, we use another tool for that.
+
+```puppet
+class { '::ntp':
+ servers => [ 'ntp1.corp.com', 'ntp2.corp.com' ],
+ restrict => 'restrict 127.0.0.1',
+ manage_service => false,
+}
+```
+
+###Looks great! But I'd like a different template, we need to do something unique here.
+
+```puppet
+class { '::ntp':
+ servers => [ 'ntp1.corp.com', 'ntp2.corp.com' ],
+ restrict => 'restrict 127.0.0.1',
+ manage_service => false,
+ config_template => 'different/module/custom.template.erb',
+}
+```
+
+##Reference
+
+###Classes
+
+* ntp: Main class, includes all the rest.
+* ntp::install: Handles the packages.
+* ntp::config: Handles the configuration file.
+* ntp::service: Handles the service.
+
+###Parameters
+
+The following parameters are available in the ntp module
+
+####`autoupdate`
+
+Deprecated: This parameter previously determined if the ntp module should be
+automatically updated to the latest version available. Replaced by package\_
+ensure.
+
+####`config`
+
+This sets the file to write ntp configuration into.
+
+####`config_template`
+
+This determines which template puppet should use for the ntp configuration.
+
+####`driftfile`
+
+This sets the location of the driftfile for ntp.
+
+####`keys_controlkey`
+
+Which of the keys is used as the control key.
+
+####`keys_enable`
+
+Should the ntp keys functionality be enabled.
+
+####`keys_file`
+
+Location of the keys file.
+
+####`keys_requestkey`
+
+Which of the keys is used as the request key.
+
+####`package_ensure`
+
+This can be set to 'present' or 'latest' or a specific version to choose the
+ntp package to be installed.
+
+####`package_name`
+
+This determines the name of the package to install.
+
+####`panic`
+
+This determines if ntp should 'panic' in the event of a very large clock skew.
+We set this to false if you're on a virtual machine by default as they don't
+do a great job with keeping time.
+
+####`preferred_servers`
+
+List of ntp servers to prefer. Will append prefer for any server in this list
+that also appears in the servers list.
+
+####`restrict`
+
+This sets the restrict options in the ntp configuration.
+
+####`servers`
+
+This selects the servers to use for ntp peers.
+
+####`service_enable`
+
+This determines if the service should be enabled at boot.
+
+####`service_ensure`
+
+This determines if the service should be running or not.
+
+####`service_manage`
+
+This selects if puppet should manage the service in the first place.
+
+####`service_name`
+
+This selects the name of the ntp service for puppet to manage.
+
+
+##Limitations
+
+This module has been built on and tested against Puppet 2.7 and higher.
+
+The module has been tested on:
+
+* RedHat Enterprise Linux 5/6
+* Debian 6/7
+* CentOS 5/6
+* Ubuntu 12.04
+* Gentoo
+* Arch Linux
+* FreeBSD
+
+Testing on other platforms has been light and cannot be guaranteed.
+
+##Development
+
+Puppet Labs modules on the Puppet Forge are open projects, and community
+contributions are essential for keeping them great. We can’t access the
+huge number of platforms and myriad of hardware, software, and deployment
+configurations that Puppet is intended to serve.
+
+We want to keep it as easy as possible to contribute changes so that our
+modules work in your environment. There are a few guidelines that we need
+contributors to follow so that we can have a chance of keeping on top of things.
+
+You can read the complete module contribution guide [on the Puppet Labs wiki.](http://projects.puppetlabs.com/projects/module-site/wiki/Module_contributing)
diff --git a/puppet/modules/ntp/Rakefile b/puppet/modules/ntp/Rakefile
new file mode 100644
index 00000000..bb60173e
--- /dev/null
+++ b/puppet/modules/ntp/Rakefile
@@ -0,0 +1,2 @@
+require 'puppetlabs_spec_helper/rake_tasks'
+require 'rspec-system/rake_task'
diff --git a/puppet/modules/ntp/manifests/config.pp b/puppet/modules/ntp/manifests/config.pp
new file mode 100644
index 00000000..1c8963dc
--- /dev/null
+++ b/puppet/modules/ntp/manifests/config.pp
@@ -0,0 +1,23 @@
+#
+class ntp::config inherits ntp {
+
+ if $keys_enable {
+ $directory = dirname($keys_file)
+ file { $directory:
+ ensure => directory,
+ owner => 0,
+ group => 0,
+ mode => '0755',
+ recurse => true,
+ }
+ }
+
+ file { $config:
+ ensure => file,
+ owner => 0,
+ group => 0,
+ mode => '0644',
+ content => template($config_template),
+ }
+
+}
diff --git a/puppet/modules/ntp/manifests/init.pp b/puppet/modules/ntp/manifests/init.pp
new file mode 100644
index 00000000..be951187
--- /dev/null
+++ b/puppet/modules/ntp/manifests/init.pp
@@ -0,0 +1,58 @@
+class ntp (
+ $autoupdate = $ntp::params::autoupdate,
+ $config = $ntp::params::config,
+ $config_template = $ntp::params::config_template,
+ $driftfile = $ntp::params::driftfile,
+ $keys_enable = $ntp::params::keys_enable,
+ $keys_file = $ntp::params::keys_file,
+ $keys_controlkey = $ntp::params::keys_controlkey,
+ $keys_requestkey = $ntp::params::keys_requestkey,
+ $keys_trusted = $ntp::params::keys_trusted,
+ $package_ensure = $ntp::params::package_ensure,
+ $package_name = $ntp::params::package_name,
+ $panic = $ntp::params::panic,
+ $preferred_servers = $ntp::params::preferred_servers,
+ $restrict = $ntp::params::restrict,
+ $servers = $ntp::params::servers,
+ $service_enable = $ntp::params::service_enable,
+ $service_ensure = $ntp::params::service_ensure,
+ $service_manage = $ntp::params::service_manage,
+ $service_name = $ntp::params::service_name,
+) inherits ntp::params {
+
+ validate_absolute_path($config)
+ validate_string($config_template)
+ validate_absolute_path($driftfile)
+ validate_bool($keys_enable)
+ validate_re($keys_controlkey, ['^\d+$', ''])
+ validate_re($keys_requestkey, ['^\d+$', ''])
+ validate_array($keys_trusted)
+ validate_string($package_ensure)
+ validate_array($package_name)
+ validate_bool($panic)
+ validate_array($preferred_servers)
+ validate_array($restrict)
+ validate_array($servers)
+ validate_bool($service_enable)
+ validate_string($service_ensure)
+ validate_bool($service_manage)
+ validate_string($service_name)
+
+ if $autoupdate {
+ notice('autoupdate parameter has been deprecated and replaced with package_ensure. Set this to latest for the same behavior as autoupdate => true.')
+ }
+
+ include '::ntp::install'
+ include '::ntp::config'
+ include '::ntp::service'
+
+ # Anchor this as per #8040 - this ensures that classes won't float off and
+ # mess everything up. You can read about this at:
+ # http://docs.puppetlabs.com/puppet/2.7/reference/lang_containment.html#known-issues
+ anchor { 'ntp::begin': }
+ anchor { 'ntp::end': }
+
+ Anchor['ntp::begin'] -> Class['::ntp::install'] -> Class['::ntp::config']
+ ~> Class['::ntp::service'] -> Anchor['ntp::end']
+
+}
diff --git a/puppet/modules/ntp/manifests/install.pp b/puppet/modules/ntp/manifests/install.pp
new file mode 100644
index 00000000..098949c3
--- /dev/null
+++ b/puppet/modules/ntp/manifests/install.pp
@@ -0,0 +1,9 @@
+#
+class ntp::install inherits ntp {
+
+ package { 'ntp':
+ ensure => $package_ensure,
+ name => $package_name,
+ }
+
+}
diff --git a/puppet/modules/ntp/manifests/params.pp b/puppet/modules/ntp/manifests/params.pp
new file mode 100644
index 00000000..10a4fb2b
--- /dev/null
+++ b/puppet/modules/ntp/manifests/params.pp
@@ -0,0 +1,116 @@
+class ntp::params {
+
+ $autoupdate = false
+ $config_template = 'ntp/ntp.conf.erb'
+ $keys_enable = false
+ $keys_controlkey = ''
+ $keys_requestkey = ''
+ $keys_trusted = []
+ $package_ensure = 'present'
+ $preferred_servers = []
+ $restrict = [
+ 'restrict default kod nomodify notrap nopeer noquery',
+ 'restrict -6 default kod nomodify notrap nopeer noquery',
+ 'restrict 127.0.0.1',
+ 'restrict -6 ::1',
+ ]
+ $service_enable = true
+ $service_ensure = 'running'
+ $service_manage = true
+
+ # On virtual machines allow large clock skews.
+ $panic = str2bool($::is_virtual) ? {
+ true => false,
+ default => true,
+ }
+
+ case $::osfamily {
+ 'Debian': {
+ $config = '/etc/ntp.conf'
+ $keys_file = '/etc/ntp/keys'
+ $driftfile = '/var/lib/ntp/drift'
+ $package_name = [ 'ntp' ]
+ $service_name = 'ntp'
+ $servers = [
+ '0.debian.pool.ntp.org iburst',
+ '1.debian.pool.ntp.org iburst',
+ '2.debian.pool.ntp.org iburst',
+ '3.debian.pool.ntp.org iburst',
+ ]
+ }
+ 'RedHat': {
+ $config = '/etc/ntp.conf'
+ $driftfile = '/var/lib/ntp/drift'
+ $keys_file = '/etc/ntp/keys'
+ $package_name = [ 'ntp' ]
+ $service_name = 'ntpd'
+ $servers = [
+ '0.centos.pool.ntp.org',
+ '1.centos.pool.ntp.org',
+ '2.centos.pool.ntp.org',
+ ]
+ }
+ 'SuSE': {
+ $config = '/etc/ntp.conf'
+ $driftfile = '/var/lib/ntp/drift/ntp.drift'
+ $keys_file = '/etc/ntp/keys'
+ $package_name = [ 'ntp' ]
+ $service_name = 'ntp'
+ $servers = [
+ '0.opensuse.pool.ntp.org',
+ '1.opensuse.pool.ntp.org',
+ '2.opensuse.pool.ntp.org',
+ '3.opensuse.pool.ntp.org',
+ ]
+ }
+ 'FreeBSD': {
+ $config = '/etc/ntp.conf'
+ $driftfile = '/var/db/ntpd.drift'
+ $keys_file = '/etc/ntp/keys'
+ $package_name = ['net/ntp']
+ $service_name = 'ntpd'
+ $servers = [
+ '0.freebsd.pool.ntp.org iburst maxpoll 9',
+ '1.freebsd.pool.ntp.org iburst maxpoll 9',
+ '2.freebsd.pool.ntp.org iburst maxpoll 9',
+ '3.freebsd.pool.ntp.org iburst maxpoll 9',
+ ]
+ }
+ 'Archlinux': {
+ $config = '/etc/ntp.conf'
+ $driftfile = '/var/lib/ntp/drift'
+ $keys_file = '/etc/ntp/keys'
+ $package_name = [ 'ntp' ]
+ $service_name = 'ntpd'
+ $servers = [
+ '0.pool.ntp.org',
+ '1.pool.ntp.org',
+ '2.pool.ntp.org',
+ ]
+ }
+ 'Linux': {
+ # Account for distributions that don't have $::osfamily specific settings.
+ case $::operatingsystem {
+ 'Gentoo': {
+ $config = '/etc/ntp.conf'
+ $driftfile = '/var/lib/ntp/drift'
+ $keys_file = '/etc/ntp/keys'
+ $package_name = ['net-misc/ntp']
+ $service_name = 'ntpd'
+ $servers = [
+ '0.gentoo.pool.ntp.org',
+ '1.gentoo.pool.ntp.org',
+ '2.gentoo.pool.ntp.org',
+ '3.gentoo.pool.ntp.org',
+ ]
+ }
+ default: {
+ fail("The ${module_name} module is not supported on an ${::operatingsystem} distribution.")
+ }
+ }
+ }
+ default: {
+ fail("The ${module_name} module is not supported on an ${::osfamily} based system.")
+ }
+ }
+}
diff --git a/puppet/modules/ntp/manifests/service.pp b/puppet/modules/ntp/manifests/service.pp
new file mode 100644
index 00000000..3f1ada0b
--- /dev/null
+++ b/puppet/modules/ntp/manifests/service.pp
@@ -0,0 +1,18 @@
+#
+class ntp::service inherits ntp {
+
+ if ! ($service_ensure in [ 'running', 'stopped' ]) {
+ fail('service_ensure parameter must be running or stopped')
+ }
+
+ if $service_manage == true {
+ service { 'ntp':
+ ensure => $service_ensure,
+ enable => $service_enable,
+ name => $service_name,
+ hasstatus => true,
+ hasrestart => true,
+ }
+ }
+
+}
diff --git a/puppet/modules/ntp/spec/classes/ntp_spec.rb b/puppet/modules/ntp/spec/classes/ntp_spec.rb
new file mode 100644
index 00000000..6c636f40
--- /dev/null
+++ b/puppet/modules/ntp/spec/classes/ntp_spec.rb
@@ -0,0 +1,261 @@
+require 'spec_helper'
+
+describe 'ntp' do
+
+ ['Debian', 'RedHat','SuSE', 'FreeBSD', 'Archlinux', 'Gentoo'].each do |system|
+ if system == 'Gentoo'
+ let(:facts) {{ :osfamily => 'Linux', :operatingsystem => system }}
+ else
+ let(:facts) {{ :osfamily => system }}
+ end
+
+ it { should include_class('ntp::install') }
+ it { should include_class('ntp::config') }
+ it { should include_class('ntp::service') }
+
+ describe 'ntp::config on #{system}' do
+ it { should contain_file('/etc/ntp.conf').with_owner('0') }
+ it { should contain_file('/etc/ntp.conf').with_group('0') }
+ it { should contain_file('/etc/ntp.conf').with_mode('0644') }
+
+ describe 'allows template to be overridden' do
+ let(:params) {{ :config_template => 'my_ntp/ntp.conf.erb' }}
+ it { should contain_file('/etc/ntp.conf').with({
+ 'content' => /server foobar/})
+ }
+ end
+
+ describe "keys for osfamily #{system}" do
+ context "when enabled" do
+ let(:params) {{
+ :keys_enable => true,
+ :keys_file => '/etc/ntp/ntp.keys',
+ :keys_trusted => ['1', '2', '3'],
+ :keys_controlkey => '2',
+ :keys_requestkey => '3',
+ }}
+
+ it { should contain_file('/etc/ntp').with({
+ 'ensure' => 'directory'})
+ }
+ it { should contain_file('/etc/ntp.conf').with({
+ 'content' => /trustedkey 1 2 3/})
+ }
+ it { should contain_file('/etc/ntp.conf').with({
+ 'content' => /controlkey 2/})
+ }
+ it { should contain_file('/etc/ntp.conf').with({
+ 'content' => /requestkey 3/})
+ }
+ end
+ end
+
+ context "when disabled" do
+ let(:params) {{
+ :keys_enable => false,
+ :keys_file => '/etc/ntp/ntp.keys',
+ :keys_trusted => ['1', '2', '3'],
+ :keys_controlkey => '2',
+ :keys_requestkey => '3',
+ }}
+
+ it { should_not contain_file('/etc/ntp').with({
+ 'ensure' => 'directory'})
+ }
+ it { should_not contain_file('/etc/ntp.conf').with({
+ 'content' => /trustedkey 1 2 3/})
+ }
+ it { should_not contain_file('/etc/ntp.conf').with({
+ 'content' => /controlkey 2/})
+ }
+ it { should_not contain_file('/etc/ntp.conf').with({
+ 'content' => /requestkey 3/})
+ }
+ end
+
+ describe 'preferred servers' do
+ context "when set" do
+ let(:params) {{
+ :servers => ['a', 'b', 'c', 'd'],
+ :preferred_servers => ['a', 'b']
+ }}
+
+ it { should contain_file('/etc/ntp.conf').with({
+ 'content' => /server a prefer\nserver b prefer\nserver c\nserver d/})
+ }
+ end
+ context "when not set" do
+ let(:params) {{
+ :servers => ['a', 'b', 'c', 'd'],
+ :preferred_servers => []
+ }}
+
+ it { should_not contain_file('/etc/ntp.conf').with({
+ 'content' => /server a prefer/})
+ }
+ end
+ end
+
+ describe 'ntp::install on #{system}' do
+ let(:params) {{ :package_ensure => 'present', :package_name => ['ntp'], }}
+
+ it { should contain_package('ntp').with(
+ :ensure => 'present',
+ :name => 'ntp'
+ )}
+
+ describe 'should allow package ensure to be overridden' do
+ let(:params) {{ :package_ensure => 'latest', :package_name => ['ntp'] }}
+ it { should contain_package('ntp').with_ensure('latest') }
+ end
+
+ describe 'should allow the package name to be overridden' do
+ let(:params) {{ :package_ensure => 'present', :package_name => ['hambaby'] }}
+ it { should contain_package('ntp').with_name('hambaby') }
+ end
+ end
+
+ describe 'ntp::service' do
+ let(:params) {{
+ :service_manage => true,
+ :service_enable => true,
+ :service_ensure => 'running',
+ :service_name => 'ntp'
+ }}
+
+ describe 'with defaults' do
+ it { should contain_service('ntp').with(
+ :enable => true,
+ :ensure => 'running',
+ :name => 'ntp'
+ )}
+ end
+
+ describe 'service_ensure' do
+ describe 'when overridden' do
+ let(:params) {{ :service_name => 'ntp', :service_ensure => 'stopped' }}
+ it { should contain_service('ntp').with_ensure('stopped') }
+ end
+ end
+
+ describe 'service_manage' do
+ let(:params) {{
+ :service_manage => false,
+ :service_enable => true,
+ :service_ensure => 'running',
+ :service_name => 'ntpd',
+ }}
+
+ it 'when set to false' do
+ should_not contain_service('ntp').with({
+ 'enable' => true,
+ 'ensure' => 'running',
+ 'name' => 'ntpd'
+ })
+ end
+ end
+ end
+ end
+
+ context 'ntp::config' do
+ describe "for operating system Gentoo" do
+ let(:facts) {{ :operatingsystem => 'Gentoo',
+ :osfamily => 'Linux' }}
+
+ it 'uses the NTP pool servers by default' do
+ should contain_file('/etc/ntp.conf').with({
+ 'content' => /server \d.gentoo.pool.ntp.org/,
+ })
+ end
+ end
+ describe "on osfamily Debian" do
+ let(:facts) {{ :osfamily => 'debian' }}
+
+ it 'uses the debian ntp servers by default' do
+ should contain_file('/etc/ntp.conf').with({
+ 'content' => /server \d.debian.pool.ntp.org iburst/,
+ })
+ end
+ end
+
+ describe "on osfamily RedHat" do
+ let(:facts) {{ :osfamily => 'RedHat' }}
+
+ it 'uses the redhat ntp servers by default' do
+ should contain_file('/etc/ntp.conf').with({
+ 'content' => /server \d.centos.pool.ntp.org/,
+ })
+ end
+ end
+
+ describe "on osfamily SuSE" do
+ let(:facts) {{ :osfamily => 'SuSE' }}
+
+ it 'uses the opensuse ntp servers by default' do
+ should contain_file('/etc/ntp.conf').with({
+ 'content' => /server \d.opensuse.pool.ntp.org/,
+ })
+ end
+ end
+
+ describe "on osfamily FreeBSD" do
+ let(:facts) {{ :osfamily => 'FreeBSD' }}
+
+ it 'uses the freebsd ntp servers by default' do
+ should contain_file('/etc/ntp.conf').with({
+ 'content' => /server \d.freebsd.pool.ntp.org iburst maxpoll 9/,
+ })
+ end
+ end
+
+ describe "on osfamily ArchLinux" do
+ let(:facts) {{ :osfamily => 'ArchLinux' }}
+
+ it 'uses the NTP pool servers by default' do
+ should contain_file('/etc/ntp.conf').with({
+ 'content' => /server \d.pool.ntp.org/,
+ })
+ end
+ end
+
+ describe "for operating system family unsupported" do
+ let(:facts) {{
+ :osfamily => 'unsupported',
+ }}
+
+ it { expect{ subject }.to raise_error(
+ /^The ntp module is not supported on an unsupported based system./
+ )}
+ end
+ end
+
+ describe 'for virtual machines' do
+ let(:facts) {{ :osfamily => 'Archlinux',
+ :is_virtual => 'true' }}
+
+ it 'should not use local clock as a time source' do
+ should_not contain_file('/etc/ntp.conf').with({
+ 'content' => /server.*127.127.1.0.*fudge.*127.127.1.0 stratum 10/,
+ })
+ end
+
+ it 'allows large clock skews' do
+ should contain_file('/etc/ntp.conf').with({
+ 'content' => /tinker panic 0/,
+ })
+ end
+ end
+
+ describe 'for physical machines' do
+ let(:facts) {{ :osfamily => 'Archlinux',
+ :is_virtual => 'false' }}
+
+ it 'disallows large clock skews' do
+ should_not contain_file('/etc/ntp.conf').with({
+ 'content' => /tinker panic 0/,
+ })
+ end
+ end
+ end
+
+end
diff --git a/puppet/modules/ntp/spec/fixtures/modules/my_ntp/templates/ntp.conf.erb b/puppet/modules/ntp/spec/fixtures/modules/my_ntp/templates/ntp.conf.erb
new file mode 100644
index 00000000..40cf67c6
--- /dev/null
+++ b/puppet/modules/ntp/spec/fixtures/modules/my_ntp/templates/ntp.conf.erb
@@ -0,0 +1,4 @@
+#my uber ntp config
+#
+
+server foobar
diff --git a/puppet/modules/ntp/spec/spec.opts b/puppet/modules/ntp/spec/spec.opts
new file mode 100644
index 00000000..91cd6427
--- /dev/null
+++ b/puppet/modules/ntp/spec/spec.opts
@@ -0,0 +1,6 @@
+--format
+s
+--colour
+--loadby
+mtime
+--backtrace
diff --git a/puppet/modules/ntp/spec/spec_helper.rb b/puppet/modules/ntp/spec/spec_helper.rb
new file mode 100644
index 00000000..2c6f5664
--- /dev/null
+++ b/puppet/modules/ntp/spec/spec_helper.rb
@@ -0,0 +1 @@
+require 'puppetlabs_spec_helper/module_spec_helper'
diff --git a/puppet/modules/ntp/spec/spec_helper_system.rb b/puppet/modules/ntp/spec/spec_helper_system.rb
new file mode 100644
index 00000000..d5208463
--- /dev/null
+++ b/puppet/modules/ntp/spec/spec_helper_system.rb
@@ -0,0 +1,26 @@
+require 'rspec-system/spec_helper'
+require 'rspec-system-puppet/helpers'
+require 'rspec-system-serverspec/helpers'
+include Serverspec::Helper::RSpecSystem
+include Serverspec::Helper::DetectOS
+include RSpecSystemPuppet::Helpers
+
+RSpec.configure do |c|
+ # Project root
+ proj_root = File.expand_path(File.join(File.dirname(__FILE__), '..'))
+
+ # Enable colour
+ c.tty = true
+
+ c.include RSpecSystemPuppet::Helpers
+
+ # This is where we 'setup' the nodes before running our tests
+ c.before :suite do
+ # Install puppet
+ puppet_install
+
+ # Install modules and dependencies
+ puppet_module_install(:source => proj_root, :module_name => 'ntp')
+ shell('puppet module install puppetlabs-stdlib')
+ end
+end
diff --git a/puppet/modules/ntp/spec/system/basic_spec.rb b/puppet/modules/ntp/spec/system/basic_spec.rb
new file mode 100644
index 00000000..7b717a04
--- /dev/null
+++ b/puppet/modules/ntp/spec/system/basic_spec.rb
@@ -0,0 +1,13 @@
+require 'spec_helper_system'
+
+# Here we put the more basic fundamental tests, ultra obvious stuff.
+describe "basic tests:" do
+ context 'make sure we have copied the module across' do
+ # No point diagnosing any more if the module wasn't copied properly
+ context shell 'ls /etc/puppet/modules/ntp' do
+ its(:stdout) { should =~ /Modulefile/ }
+ its(:stderr) { should be_empty }
+ its(:exit_code) { should be_zero }
+ end
+ end
+end
diff --git a/puppet/modules/ntp/spec/system/class_spec.rb b/puppet/modules/ntp/spec/system/class_spec.rb
new file mode 100644
index 00000000..49dfc641
--- /dev/null
+++ b/puppet/modules/ntp/spec/system/class_spec.rb
@@ -0,0 +1,39 @@
+require 'spec_helper_system'
+
+describe "ntp class:" do
+ context 'should run successfully' do
+ pp = "class { 'ntp': }"
+
+ context puppet_apply(pp) do
+ its(:stderr) { should be_empty }
+ its(:exit_code) { should_not == 1 }
+ its(:refresh) { should be_nil }
+ its(:stderr) { should be_empty }
+ its(:exit_code) { should be_zero }
+ end
+ end
+
+ context 'service_ensure => stopped:' do
+ pp = "class { 'ntp': service_ensure => stopped }"
+
+ context puppet_apply(pp) do
+ its(:stderr) { should be_empty }
+ its(:exit_code) { should_not == 1 }
+ its(:refresh) { should be_nil }
+ its(:stderr) { should be_empty }
+ its(:exit_code) { should be_zero }
+ end
+ end
+
+ context 'service_ensure => running:' do
+ pp = "class { 'ntp': service_ensure => running }"
+
+ context puppet_apply(pp) do |r|
+ its(:stderr) { should be_empty }
+ its(:exit_code) { should_not == 1 }
+ its(:refresh) { should be_nil }
+ its(:stderr) { should be_empty }
+ its(:exit_code) { should be_zero }
+ end
+ end
+end
diff --git a/puppet/modules/ntp/spec/system/ntp_config_spec.rb b/puppet/modules/ntp/spec/system/ntp_config_spec.rb
new file mode 100644
index 00000000..194cdf10
--- /dev/null
+++ b/puppet/modules/ntp/spec/system/ntp_config_spec.rb
@@ -0,0 +1,35 @@
+require 'spec_helper_system'
+
+describe 'ntp::config class' do
+ let(:os) {
+ node.facts['osfamily']
+ }
+
+ puppet_apply(%{
+ class { 'ntp': }
+ })
+
+ case node.facts['osfamily']
+ when 'FreeBSD'
+ line = '0.freebsd.pool.ntp.org iburst maxpoll 9'
+ when 'Debian'
+ line = '0.debian.pool.ntp.org iburst'
+ when 'RedHat'
+ line = '0.centos.pool.ntp.org'
+ when 'SuSE'
+ line = '0.opensuse.pool.ntp.org'
+ when 'Linux'
+ case node.facts['operatingsystem']
+ when 'ArchLinux'
+ line = '0.pool.ntp.org'
+ when 'Gentoo'
+ line = '0.gentoo.pool.ntp.org'
+ end
+ end
+
+ describe file('/etc/ntp.conf') do
+ it { should be_file }
+ it { should contain line }
+ end
+
+end
diff --git a/puppet/modules/ntp/spec/system/ntp_install_spec.rb b/puppet/modules/ntp/spec/system/ntp_install_spec.rb
new file mode 100644
index 00000000..39759c5e
--- /dev/null
+++ b/puppet/modules/ntp/spec/system/ntp_install_spec.rb
@@ -0,0 +1,31 @@
+require 'spec_helper_system'
+
+
+describe 'ntp::install class' do
+ let(:os) {
+ node.facts['osfamily']
+ }
+
+ case node.facts['osfamily']
+ when 'FreeBSD'
+ packagename = 'net/ntp'
+ when 'Linux'
+ case node.facts['operatingsystem']
+ when 'ArchLinux'
+ packagename = 'ntp'
+ when 'Gentoo'
+ packagename = 'net-misc/ntp'
+ end
+ else
+ packagename = 'ntp'
+ end
+
+ puppet_apply(%{
+ class { 'ntp': }
+ })
+
+ describe package(packagename) do
+ it { should be_installed }
+ end
+
+end
diff --git a/puppet/modules/ntp/spec/system/ntp_service_spec.rb b/puppet/modules/ntp/spec/system/ntp_service_spec.rb
new file mode 100644
index 00000000..b97e2a4e
--- /dev/null
+++ b/puppet/modules/ntp/spec/system/ntp_service_spec.rb
@@ -0,0 +1,25 @@
+require 'spec_helper_system'
+
+
+describe 'ntp::service class' do
+ let(:os) {
+ node.facts['osfamily']
+ }
+
+ case node.facts['osfamily']
+ when 'RedHat', 'FreeBSD', 'Linux'
+ servicename = 'ntpd'
+ else
+ servicename = 'ntp'
+ end
+
+ puppet_apply(%{
+ class { 'ntp': }
+ })
+
+ describe service(servicename) do
+ it { should be_enabled }
+ it { should be_running }
+ end
+
+end
diff --git a/puppet/modules/ntp/spec/system/preferred_servers_spec.rb b/puppet/modules/ntp/spec/system/preferred_servers_spec.rb
new file mode 100644
index 00000000..686861bc
--- /dev/null
+++ b/puppet/modules/ntp/spec/system/preferred_servers_spec.rb
@@ -0,0 +1,20 @@
+require 'spec_helper_system'
+
+describe 'preferred servers' do
+ it 'applies cleanly' do
+ puppet_apply(%{
+ class { '::ntp':
+ servers => ['a', 'b', 'c', 'd'],
+ preferred_servers => ['c', 'd'],
+ }
+ })
+ end
+
+ describe file('/etc/ntp.conf') do
+ it { should be_file }
+ it { should contain 'server a' }
+ it { should contain 'server b' }
+ it { should contain 'server c prefer' }
+ it { should contain 'server d prefer' }
+ end
+end
diff --git a/puppet/modules/ntp/spec/system/restrict_spec.rb b/puppet/modules/ntp/spec/system/restrict_spec.rb
new file mode 100644
index 00000000..ae23bc01
--- /dev/null
+++ b/puppet/modules/ntp/spec/system/restrict_spec.rb
@@ -0,0 +1,20 @@
+require 'spec_helper_system'
+
+describe "ntp class with restrict:" do
+ context 'should run successfully' do
+ pp = "class { 'ntp': restrict => ['test restrict']}"
+
+ context puppet_apply(pp) do
+ its(:stderr) { should be_empty }
+ its(:exit_code) { should_not == 1 }
+ its(:refresh) { should be_nil }
+ its(:stderr) { should be_empty }
+ its(:exit_code) { should be_zero }
+ end
+ end
+
+ describe file('/etc/ntp.conf') do
+ it { should contain('test restrict') }
+ end
+
+end
diff --git a/puppet/modules/ntp/spec/unit/puppet/provider/README.markdown b/puppet/modules/ntp/spec/unit/puppet/provider/README.markdown
new file mode 100644
index 00000000..70258502
--- /dev/null
+++ b/puppet/modules/ntp/spec/unit/puppet/provider/README.markdown
@@ -0,0 +1,4 @@
+Provider Specs
+==============
+
+Define specs for your providers under this directory.
diff --git a/puppet/modules/ntp/spec/unit/puppet/type/README.markdown b/puppet/modules/ntp/spec/unit/puppet/type/README.markdown
new file mode 100644
index 00000000..1ee19ac8
--- /dev/null
+++ b/puppet/modules/ntp/spec/unit/puppet/type/README.markdown
@@ -0,0 +1,4 @@
+Resource Type Specs
+===================
+
+Define specs for your resource types in this directory.
diff --git a/puppet/modules/ntp/templates/ntp.conf.erb b/puppet/modules/ntp/templates/ntp.conf.erb
new file mode 100644
index 00000000..94b36755
--- /dev/null
+++ b/puppet/modules/ntp/templates/ntp.conf.erb
@@ -0,0 +1,43 @@
+# ntp.conf: Managed by puppet.
+#
+<% if @panic == false -%>
+# Keep ntpd from panicking in the event of a large clock skew
+# when a VM guest is suspended and resumed.
+tinker panic 0
+<% end -%>
+
+<% if @restrict != [] -%>
+# Permit time synchronization with our time source, but do not'
+# permit the source to query or modify the service on this system.'
+<% @restrict.flatten.each do |restrict| -%>
+<%= restrict %>
+<% end %>
+<% end -%>
+
+# Servers
+<% [@servers].flatten.each do |server| -%>
+server <%= server %><% if @preferred_servers.include?(server) -%> prefer<% end %>
+<% end -%>
+
+<% if scope.lookupvar('::is_virtual') == "false" -%>
+# Undisciplined Local Clock. This is a fake driver intended for backup
+# and when no outside source of synchronized time is available.
+server 127.127.1.0 # local clock
+fudge 127.127.1.0 stratum 10
+<% end -%>
+
+# Driftfile.
+driftfile <%= @driftfile %>
+
+<% if @keys_enable -%>
+keys <%= @keys_file %>
+<% unless @keys_trusted.empty? -%>
+trustedkey <%= @keys_trusted.join(' ') %>
+<% end -%>
+<% if @keys_requestkey != '' -%>
+requestkey <%= @keys_requestkey %>
+<% end -%>
+<% if @keys_controlkey != '' -%>
+controlkey <%= @keys_controlkey %>
+<% end -%>
+<% end -%>
diff --git a/puppet/modules/ntp/tests/init.pp b/puppet/modules/ntp/tests/init.pp
new file mode 100644
index 00000000..e6d9b537
--- /dev/null
+++ b/puppet/modules/ntp/tests/init.pp
@@ -0,0 +1,11 @@
+node default {
+
+ notify { 'enduser-before': }
+ notify { 'enduser-after': }
+
+ class { 'ntp':
+ require => Notify['enduser-before'],
+ before => Notify['enduser-after'],
+ }
+
+}
diff --git a/puppet/modules/obfsproxy/files/obfsproxy_init b/puppet/modules/obfsproxy/files/obfsproxy_init
new file mode 100755
index 00000000..01c8013a
--- /dev/null
+++ b/puppet/modules/obfsproxy/files/obfsproxy_init
@@ -0,0 +1,93 @@
+#!/bin/sh
+
+### BEGIN INIT INFO
+# Provides: obfsproxy daemon
+# Required-Start: $remote_fs $syslog
+# Required-Stop: $remote_fs $syslog
+# Default-Start: 2 3 4 5
+# Default-Stop: 0 1 6
+# Short-Description: obfsproxy daemon
+# Description: obfsproxy daemon
+### END INIT INFO
+
+. /lib/lsb/init-functions
+
+DAEMON=/usr/bin/obfsproxy
+NAME=obfsproxy
+DESC="obfsproxy daemon"
+USER=obfsproxy
+DATDIR=/etc/obfsproxy
+PIDFILE=/var/run/obfsproxy.pid
+CONF=$DATDIR/obfsproxy.conf
+LOGFILE=/var/log/obfsproxy.log
+
+# If the daemon is not there, then exit.
+test -x $DAEMON || exit 0
+
+if [ -f $CONF ] ; then
+ . $CONF
+else
+ echo "Obfsproxy configuration file is missing, aborting..."
+ exit 2
+fi
+
+DAEMONARGS=" --log-min-severity=$LOG --log-file=$LOGFILE --data-dir=$DATDIR \
+ $TRANSPORT $PARAM --dest=$DEST_IP:$DEST_PORT server $BINDADDR:$PORT"
+
+start_obfsproxy() {
+ start-stop-daemon --start --quiet --oknodo -m --pidfile $PIDFILE \
+ -b -c $USER --startas $DAEMON --$DAEMONARGS
+}
+
+stop_obfsproxy() {
+ start-stop-daemon --stop --quiet --oknodo --pidfile $PIDFILE
+}
+
+status_obfsproxy() {
+ status_of_proc -p $PIDFILE $DAEMON $NAME
+}
+
+case $1 in
+ start)
+ if [ -e $PIDFILE ]; then
+ status_obfsproxy
+ if [ $? = "0" ]; then
+ exit
+ fi
+ fi
+ log_begin_msg "Starting $DESC"
+ start_obfsproxy
+ log_end_msg $?
+ ;;
+ stop)
+ if [ -e $PIDFILE ]; then
+ status_obfsproxy
+ if [ $? = "0" ]; then
+ log_begin_msg "Stopping $DESC"
+ stop_obfsproxy
+ rm -f $PIDFILE
+ log_end_msg $?
+ fi
+ else
+ status_obfsproxy
+ fi
+ ;;
+ restart)
+ $0 stop && sleep 2 && $0 start
+ ;;
+ status)
+ status_obfsproxy
+ ;;
+ reload)
+ if [ -e $PIDFILE ]; then
+ start-stop-daemon --stop --signal USR1 --quiet --pidfile $PIDFILE --name $NAME
+ log_success_msg "$DESC reloaded successfully"
+ else
+ log_failure_msg "$PIDFILE does not exist"
+ fi
+ ;;
+ *)
+ echo "Usage: $0 {start|stop|restart|reload|status}"
+ exit 2
+ ;;
+esac
diff --git a/puppet/modules/obfsproxy/files/obfsproxy_logrotate b/puppet/modules/obfsproxy/files/obfsproxy_logrotate
new file mode 100644
index 00000000..e5679d0c
--- /dev/null
+++ b/puppet/modules/obfsproxy/files/obfsproxy_logrotate
@@ -0,0 +1,14 @@
+/var/log/obfsproxy.log {
+ daily
+ missingok
+ rotate 3
+ compress
+ delaycompress
+ notifempty
+ create 600 obfsproxy obfsproxy
+ postrotate
+ if [ -f /var/run/obfsproxy.pid ]; then
+ /etc/init.d/obfsproxy restart > /dev/null
+ fi
+ endscript
+}
diff --git a/puppet/modules/obfsproxy/manifests/init.pp b/puppet/modules/obfsproxy/manifests/init.pp
new file mode 100644
index 00000000..6a3d2c72
--- /dev/null
+++ b/puppet/modules/obfsproxy/manifests/init.pp
@@ -0,0 +1,86 @@
+# deploy obfsproxy service
+class obfsproxy (
+ $transport,
+ $bind_address,
+ $port,
+ $param,
+ $dest_ip,
+ $dest_port,
+ $log_level = 'info'
+){
+
+ $user = 'obfsproxy'
+ $conf = '/etc/obfsproxy/obfsproxy.conf'
+
+ user { $user:
+ ensure => present,
+ system => true,
+ gid => $user,
+ }
+
+ group { $user:
+ ensure => present,
+ system => true,
+ }
+
+ file { '/etc/init.d/obfsproxy':
+ ensure => present,
+ path => '/etc/init.d/obfsproxy',
+ source => 'puppet:///modules/obfsproxy/obfsproxy_init',
+ owner => 'root',
+ group => 'root',
+ mode => '0750',
+ require => File[$conf],
+ }
+
+ file { $conf :
+ ensure => present,
+ path => $conf,
+ owner => 'root',
+ group => 'root',
+ mode => '0600',
+ content => template('obfsproxy/etc_conf.erb'),
+ }
+
+ file { '/etc/obfsproxy':
+ ensure => directory,
+ owner => $user,
+ group => $user,
+ mode => '0700',
+ require => User[$user],
+ }
+
+ file { '/var/log/obfsproxy.log':
+ ensure => present,
+ owner => $user,
+ group => $user,
+ mode => '0640',
+ require => User[$user],
+ }
+
+ file { '/etc/logrotate.d/obfsproxy':
+ ensure => present,
+ source => 'puppet:///modules/obfsproxy/obfsproxy_logrotate',
+ owner => 'root',
+ group => 'root',
+ mode => '0644',
+ require => File['/var/log/obfsproxy.log'],
+ }
+
+ package { 'obfsproxy':
+ ensure => present
+ }
+
+ service { 'obfsproxy':
+ ensure => running,
+ subscribe => File[$conf],
+ require => [
+ Package['obfsproxy'],
+ File['/etc/init.d/obfsproxy'],
+ User[$user],
+ Group[$user]]
+ }
+
+
+}
+
diff --git a/puppet/modules/obfsproxy/templates/etc_conf.erb b/puppet/modules/obfsproxy/templates/etc_conf.erb
new file mode 100644
index 00000000..8959ef78
--- /dev/null
+++ b/puppet/modules/obfsproxy/templates/etc_conf.erb
@@ -0,0 +1,11 @@
+TRANSPORT=<%= @transport %>
+PORT=<%= @port %>
+DEST_IP=<%= @dest_ip %>
+DEST_PORT=<%= @dest_port %>
+<% if @transport == "scramblesuit" -%>
+PARAM=--password=<%= @param %>
+<% else -%>
+PARAM=<%= @param %>
+<% end -%>
+LOG=<%= @log_level %>
+BINDADDR=<%= @bind_address %>
diff --git a/puppet/modules/opendkim/manifests/init.pp b/puppet/modules/opendkim/manifests/init.pp
new file mode 100644
index 00000000..4d4c5312
--- /dev/null
+++ b/puppet/modules/opendkim/manifests/init.pp
@@ -0,0 +1,67 @@
+#
+# I am not sure about what issues might arise with DKIM key sizes
+# larger than 2048. It might or might not be supported. See:
+# http://dkim.org/specs/rfc4871-dkimbase.html#rfc.section.3.3.3
+#
+class opendkim {
+
+ $domain_hash = hiera('domain')
+ $domain = $domain_hash['full_suffix']
+ $mx = hiera('mx')
+ $dkim = $mx['dkim']
+ $selector = $dkim['selector']
+ $dkim_cert = $dkim['public_key']
+ $dkim_key = $dkim['private_key']
+
+ ensure_packages(['opendkim', 'libvbr2'])
+
+ # postfix user needs to be in the opendkim group
+ # in order to access the opendkim socket located at:
+ # local:/var/run/opendkim/opendkim.sock
+ user { 'postfix':
+ groups => 'opendkim',
+ require => Package['opendkim'];
+ }
+
+ service { 'opendkim':
+ ensure => running,
+ enable => true,
+ hasstatus => true,
+ hasrestart => true,
+ subscribe => File[$dkim_key];
+ }
+
+ file {
+ '/etc/opendkim.conf':
+ ensure => file,
+ content => template('opendkim/opendkim.conf'),
+ mode => '0644',
+ owner => root,
+ group => root,
+ notify => Service['opendkim'],
+ require => Package['opendkim'];
+
+ '/etc/default/opendkim.conf':
+ ensure => file,
+ content => 'SOCKET="inet:8891@localhost" # listen on loopback on port 8891',
+ mode => '0644',
+ owner => root,
+ group => root,
+ notify => Service['opendkim'],
+ require => Package['opendkim'];
+
+ $dkim_key:
+ ensure => file,
+ mode => '0600',
+ owner => 'opendkim',
+ group => 'opendkim',
+ require => Package['opendkim'];
+
+ $dkim_cert:
+ ensure => file,
+ mode => '0600',
+ owner => 'opendkim',
+ group => 'opendkim',
+ require => Package['opendkim'];
+ }
+}
diff --git a/puppet/modules/opendkim/templates/opendkim.conf b/puppet/modules/opendkim/templates/opendkim.conf
new file mode 100644
index 00000000..5a948229
--- /dev/null
+++ b/puppet/modules/opendkim/templates/opendkim.conf
@@ -0,0 +1,45 @@
+# This is a basic configuration that can easily be adapted to suit a standard
+# installation. For more advanced options, see opendkim.conf(5) and/or
+# /usr/share/doc/opendkim/examples/opendkim.conf.sample.
+
+# Log to syslog
+Syslog yes
+SyslogSuccess yes
+LogWhy no
+# Required to use local socket with MTAs that access the socket as a non-
+# privileged user (e.g. Postfix)
+UMask 002
+
+Domain <%= @domain %>
+SubDomains yes
+
+# set internal hosts to all the known hosts, like mydomains?
+
+# can we generate a larger key and get it in dns?
+KeyFile <%= @dkim_key %>
+
+Selector <%= @selector %>
+
+# Commonly-used options; the commented-out versions show the defaults.
+Canonicalization relaxed
+#Mode sv
+#ADSPDiscard no
+
+SignatureAlgorithm rsa-sha256
+
+# Always oversign From (sign using actual From and a null From to prevent
+# malicious signatures header fields (From and/or others) between the signer
+# and the verifier. From is oversigned by default in the Debian pacakge
+# because it is often the identity key used by reputation systems and thus
+# somewhat security sensitive.
+OversignHeaders From
+
+# List domains to use for RFC 6541 DKIM Authorized Third-Party Signatures
+# (ATPS) (experimental)
+
+#ATPSDomains example.com
+
+RemoveOldSignatures yes
+
+Mode sv
+BaseDirectory /var/tmp
diff --git a/puppet/modules/openvpn/.fixtures.yml b/puppet/modules/openvpn/.fixtures.yml
new file mode 100644
index 00000000..1125ecca
--- /dev/null
+++ b/puppet/modules/openvpn/.fixtures.yml
@@ -0,0 +1,6 @@
+fixtures:
+ repositories:
+ concat: git://github.com/ripienaar/puppet-concat.git
+ symlinks:
+ openvpn: "#{source_dir}"
+
diff --git a/puppet/modules/openvpn/.gitignore b/puppet/modules/openvpn/.gitignore
new file mode 100644
index 00000000..6fd248b3
--- /dev/null
+++ b/puppet/modules/openvpn/.gitignore
@@ -0,0 +1,3 @@
+pkg
+spec/fixtures
+.vagrant
diff --git a/puppet/modules/openvpn/.rvmrc b/puppet/modules/openvpn/.rvmrc
new file mode 100644
index 00000000..6fbfb7f1
--- /dev/null
+++ b/puppet/modules/openvpn/.rvmrc
@@ -0,0 +1,38 @@
+#!/usr/bin/env bash
+
+# This is an RVM Project .rvmrc file, used to automatically load the ruby
+# development environment upon cd'ing into the directory
+
+# First we specify our desired <ruby>[@<gemset>], the @gemset name is optional,
+# Only full ruby name is supported here, for short names use:
+# echo "rvm use 1.9.3" > .rvmrc
+environment_id="ruby-1.9.3-p194@puppet"
+
+# Uncomment the following lines if you want to verify rvm version per project
+# rvmrc_rvm_version="1.15.8 (stable)" # 1.10.1 seams as a safe start
+# eval "$(echo ${rvm_version}.${rvmrc_rvm_version} | awk -F. '{print "[[ "$1*65536+$2*256+$3" -ge "$4*65536+$5*256+$6" ]]"}' )" || {
+# echo "This .rvmrc file requires at least RVM ${rvmrc_rvm_version}, aborting loading."
+# return 1
+# }
+
+# First we attempt to load the desired environment directly from the environment
+# file. This is very fast and efficient compared to running through the entire
+# CLI and selector. If you want feedback on which environment was used then
+# insert the word 'use' after --create as this triggers verbose mode.
+if [[ -d "${rvm_path:-$HOME/.rvm}/environments"
+ && -s "${rvm_path:-$HOME/.rvm}/environments/$environment_id" ]]
+then
+ \. "${rvm_path:-$HOME/.rvm}/environments/$environment_id"
+ [[ -s "${rvm_path:-$HOME/.rvm}/hooks/after_use" ]] &&
+ \. "${rvm_path:-$HOME/.rvm}/hooks/after_use" || true
+ if [[ $- == *i* ]] # check for interactive shells
+ then echo "Using: $(tput setaf 2)$GEM_HOME$(tput sgr0)" # show the user the ruby and gemset they are using in green
+ else echo "Using: $GEM_HOME" # don't use colors in non-interactive shells
+ fi
+else
+ # If the environment file has not yet been created, use the RVM CLI to select.
+ rvm --create use "$environment_id" || {
+ echo "Failed to create RVM environment '${environment_id}'."
+ return 1
+ }
+fi
diff --git a/puppet/modules/openvpn/.travis.yml b/puppet/modules/openvpn/.travis.yml
new file mode 100644
index 00000000..da5c389d
--- /dev/null
+++ b/puppet/modules/openvpn/.travis.yml
@@ -0,0 +1,29 @@
+language: ruby
+bundler_args: --without development
+script: "bundle exec rake spec SPEC_OPTS='--format documentation'"
+rvm:
+ - 1.8.7
+ - 1.9.3
+ - 2.0.0
+script:
+ - "rake lint"
+ - "rake spec SPEC_OPTS='--format documentation'"
+env:
+ - PUPPET_VERSION="~> 2.7.0"
+ - PUPPET_VERSION="~> 3.0.0"
+ - PUPPET_VERSION="~> 3.1.0"
+ - PUPPET_VERSION="~> 3.2.0"
+matrix:
+ exclude:
+ - rvm: 1.9.3
+ env: PUPPET_VERSION="~> 2.7.0"
+ - rvm: 2.0.0
+ env: PUPPET_VERSION="~> 2.7.0"
+ - rvm: 2.0.0
+ env: PUPPET_VERSION="~> 3.0.0"
+ - rvm: 2.0.0
+ env: PUPPET_VERSION="~> 3.1.0"
+notifications:
+ email: false
+ on_success: always
+ on_failure: always
diff --git a/puppet/modules/openvpn/Gemfile b/puppet/modules/openvpn/Gemfile
new file mode 100644
index 00000000..68e10e7d
--- /dev/null
+++ b/puppet/modules/openvpn/Gemfile
@@ -0,0 +1,7 @@
+source :rubygems
+
+puppetversion = ENV['PUPPET_VERSION']
+gem 'puppet', puppetversion, :require => false
+gem 'puppet-lint'
+gem 'rspec-puppet'
+gem 'puppetlabs_spec_helper'
diff --git a/puppet/modules/openvpn/Gemfile.lock b/puppet/modules/openvpn/Gemfile.lock
new file mode 100644
index 00000000..9fce3f98
--- /dev/null
+++ b/puppet/modules/openvpn/Gemfile.lock
@@ -0,0 +1,36 @@
+GEM
+ remote: http://rubygems.org/
+ specs:
+ diff-lcs (1.1.3)
+ facter (1.6.17)
+ hiera (1.0.0)
+ metaclass (0.0.1)
+ mocha (0.13.1)
+ metaclass (~> 0.0.1)
+ puppet (3.0.2)
+ facter (~> 1.6.11)
+ hiera (~> 1.0.0)
+ puppetlabs_spec_helper (0.4.0)
+ mocha (>= 0.10.5)
+ rake
+ rspec (>= 2.9.0)
+ rspec-puppet (>= 0.1.1)
+ rake (10.0.3)
+ rspec (2.12.0)
+ rspec-core (~> 2.12.0)
+ rspec-expectations (~> 2.12.0)
+ rspec-mocks (~> 2.12.0)
+ rspec-core (2.12.2)
+ rspec-expectations (2.12.1)
+ diff-lcs (~> 1.1.3)
+ rspec-mocks (2.12.1)
+ rspec-puppet (0.1.5)
+ rspec
+
+PLATFORMS
+ ruby
+
+DEPENDENCIES
+ puppet
+ puppetlabs_spec_helper
+ rspec-puppet
diff --git a/puppet/modules/openvpn/LICENSE b/puppet/modules/openvpn/LICENSE
new file mode 100644
index 00000000..f433b1a5
--- /dev/null
+++ b/puppet/modules/openvpn/LICENSE
@@ -0,0 +1,177 @@
+
+ Apache License
+ Version 2.0, January 2004
+ http://www.apache.org/licenses/
+
+ TERMS AND CONDITIONS FOR USE, REPRODUCTION, AND DISTRIBUTION
+
+ 1. Definitions.
+
+ "License" shall mean the terms and conditions for use, reproduction,
+ and distribution as defined by Sections 1 through 9 of this document.
+
+ "Licensor" shall mean the copyright owner or entity authorized by
+ the copyright owner that is granting the License.
+
+ "Legal Entity" shall mean the union of the acting entity and all
+ other entities that control, are controlled by, or are under common
+ control with that entity. For the purposes of this definition,
+ "control" means (i) the power, direct or indirect, to cause the
+ direction or management of such entity, whether by contract or
+ otherwise, or (ii) ownership of fifty percent (50%) or more of the
+ outstanding shares, or (iii) beneficial ownership of such entity.
+
+ "You" (or "Your") shall mean an individual or Legal Entity
+ exercising permissions granted by this License.
+
+ "Source" form shall mean the preferred form for making modifications,
+ including but not limited to software source code, documentation
+ source, and configuration files.
+
+ "Object" form shall mean any form resulting from mechanical
+ transformation or translation of a Source form, including but
+ not limited to compiled object code, generated documentation,
+ and conversions to other media types.
+
+ "Work" shall mean the work of authorship, whether in Source or
+ Object form, made available under the License, as indicated by a
+ copyright notice that is included in or attached to the work
+ (an example is provided in the Appendix below).
+
+ "Derivative Works" shall mean any work, whether in Source or Object
+ form, that is based on (or derived from) the Work and for which the
+ editorial revisions, annotations, elaborations, or other modifications
+ represent, as a whole, an original work of authorship. For the purposes
+ of this License, Derivative Works shall not include works that remain
+ separable from, or merely link (or bind by name) to the interfaces of,
+ the Work and Derivative Works thereof.
+
+ "Contribution" shall mean any work of authorship, including
+ the original version of the Work and any modifications or additions
+ to that Work or Derivative Works thereof, that is intentionally
+ submitted to Licensor for inclusion in the Work by the copyright owner
+ or by an individual or Legal Entity authorized to submit on behalf of
+ the copyright owner. For the purposes of this definition, "submitted"
+ means any form of electronic, verbal, or written communication sent
+ to the Licensor or its representatives, including but not limited to
+ communication on electronic mailing lists, source code control systems,
+ and issue tracking systems that are managed by, or on behalf of, the
+ Licensor for the purpose of discussing and improving the Work, but
+ excluding communication that is conspicuously marked or otherwise
+ designated in writing by the copyright owner as "Not a Contribution."
+
+ "Contributor" shall mean Licensor and any individual or Legal Entity
+ on behalf of whom a Contribution has been received by Licensor and
+ subsequently incorporated within the Work.
+
+ 2. Grant of Copyright License. Subject to the terms and conditions of
+ this License, each Contributor hereby grants to You a perpetual,
+ worldwide, non-exclusive, no-charge, royalty-free, irrevocable
+ copyright license to reproduce, prepare Derivative Works of,
+ publicly display, publicly perform, sublicense, and distribute the
+ Work and such Derivative Works in Source or Object form.
+
+ 3. Grant of Patent License. Subject to the terms and conditions of
+ this License, each Contributor hereby grants to You a perpetual,
+ worldwide, non-exclusive, no-charge, royalty-free, irrevocable
+ (except as stated in this section) patent license to make, have made,
+ use, offer to sell, sell, import, and otherwise transfer the Work,
+ where such license applies only to those patent claims licensable
+ by such Contributor that are necessarily infringed by their
+ Contribution(s) alone or by combination of their Contribution(s)
+ with the Work to which such Contribution(s) was submitted. If You
+ institute patent litigation against any entity (including a
+ cross-claim or counterclaim in a lawsuit) alleging that the Work
+ or a Contribution incorporated within the Work constitutes direct
+ or contributory patent infringement, then any patent licenses
+ granted to You under this License for that Work shall terminate
+ as of the date such litigation is filed.
+
+ 4. Redistribution. You may reproduce and distribute copies of the
+ Work or Derivative Works thereof in any medium, with or without
+ modifications, and in Source or Object form, provided that You
+ meet the following conditions:
+
+ (a) You must give any other recipients of the Work or
+ Derivative Works a copy of this License; and
+
+ (b) You must cause any modified files to carry prominent notices
+ stating that You changed the files; and
+
+ (c) You must retain, in the Source form of any Derivative Works
+ that You distribute, all copyright, patent, trademark, and
+ attribution notices from the Source form of the Work,
+ excluding those notices that do not pertain to any part of
+ the Derivative Works; and
+
+ (d) If the Work includes a "NOTICE" text file as part of its
+ distribution, then any Derivative Works that You distribute must
+ include a readable copy of the attribution notices contained
+ within such NOTICE file, excluding those notices that do not
+ pertain to any part of the Derivative Works, in at least one
+ of the following places: within a NOTICE text file distributed
+ as part of the Derivative Works; within the Source form or
+ documentation, if provided along with the Derivative Works; or,
+ within a display generated by the Derivative Works, if and
+ wherever such third-party notices normally appear. The contents
+ of the NOTICE file are for informational purposes only and
+ do not modify the License. You may add Your own attribution
+ notices within Derivative Works that You distribute, alongside
+ or as an addendum to the NOTICE text from the Work, provided
+ that such additional attribution notices cannot be construed
+ as modifying the License.
+
+ You may add Your own copyright statement to Your modifications and
+ may provide additional or different license terms and conditions
+ for use, reproduction, or distribution of Your modifications, or
+ for any such Derivative Works as a whole, provided Your use,
+ reproduction, and distribution of the Work otherwise complies with
+ the conditions stated in this License.
+
+ 5. Submission of Contributions. Unless You explicitly state otherwise,
+ any Contribution intentionally submitted for inclusion in the Work
+ by You to the Licensor shall be under the terms and conditions of
+ this License, without any additional terms or conditions.
+ Notwithstanding the above, nothing herein shall supersede or modify
+ the terms of any separate license agreement you may have executed
+ with Licensor regarding such Contributions.
+
+ 6. Trademarks. This License does not grant permission to use the trade
+ names, trademarks, service marks, or product names of the Licensor,
+ except as required for reasonable and customary use in describing the
+ origin of the Work and reproducing the content of the NOTICE file.
+
+ 7. Disclaimer of Warranty. Unless required by applicable law or
+ agreed to in writing, Licensor provides the Work (and each
+ Contributor provides its Contributions) on an "AS IS" BASIS,
+ WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or
+ implied, including, without limitation, any warranties or conditions
+ of TITLE, NON-INFRINGEMENT, MERCHANTABILITY, or FITNESS FOR A
+ PARTICULAR PURPOSE. You are solely responsible for determining the
+ appropriateness of using or redistributing the Work and assume any
+ risks associated with Your exercise of permissions under this License.
+
+ 8. Limitation of Liability. In no event and under no legal theory,
+ whether in tort (including negligence), contract, or otherwise,
+ unless required by applicable law (such as deliberate and grossly
+ negligent acts) or agreed to in writing, shall any Contributor be
+ liable to You for damages, including any direct, indirect, special,
+ incidental, or consequential damages of any character arising as a
+ result of this License or out of the use or inability to use the
+ Work (including but not limited to damages for loss of goodwill,
+ work stoppage, computer failure or malfunction, or any and all
+ other commercial damages or losses), even if such Contributor
+ has been advised of the possibility of such damages.
+
+ 9. Accepting Warranty or Additional Liability. While redistributing
+ the Work or Derivative Works thereof, You may choose to offer,
+ and charge a fee for, acceptance of support, warranty, indemnity,
+ or other liability obligations and/or rights consistent with this
+ License. However, in accepting such obligations, You may act only
+ on Your own behalf and on Your sole responsibility, not on behalf
+ of any other Contributor, and only if You agree to indemnify,
+ defend, and hold each Contributor harmless for any liability
+ incurred by, or claims asserted against, such Contributor by reason
+ of your accepting any such warranty or additional liability.
+
+ END OF TERMS AND CONDITIONS
diff --git a/puppet/modules/openvpn/Modulefile b/puppet/modules/openvpn/Modulefile
new file mode 100644
index 00000000..679e7e64
--- /dev/null
+++ b/puppet/modules/openvpn/Modulefile
@@ -0,0 +1,11 @@
+name 'luxflux-openvpn'
+version '2.1.0'
+source 'https://github.com/luxflux/puppet-openvpn'
+author 'luxflux'
+license 'Apache 2.0'
+summary 'OpenVPN server puppet module'
+description 'Puppet module to manage OpenVPN servers'
+project_page 'https://github.com/luxflux/puppet-openvpn'
+
+## Add dependencies, if any:
+dependency 'ripienaar/concat', '0.2.0'
diff --git a/puppet/modules/openvpn/Rakefile b/puppet/modules/openvpn/Rakefile
new file mode 100644
index 00000000..14f1c246
--- /dev/null
+++ b/puppet/modules/openvpn/Rakefile
@@ -0,0 +1,2 @@
+require 'rubygems'
+require 'puppetlabs_spec_helper/rake_tasks'
diff --git a/puppet/modules/openvpn/Readme.markdown b/puppet/modules/openvpn/Readme.markdown
new file mode 100644
index 00000000..6bcf49ea
--- /dev/null
+++ b/puppet/modules/openvpn/Readme.markdown
@@ -0,0 +1,54 @@
+# OpenVPN Puppet module
+
+Puppet module to manage OpenVPN servers
+
+## Features:
+
+* Client-specific rules and access policies
+* Generated client configurations and SSL-Certificates
+* Downloadable client configurations and SSL-Certificates for easy client configuration
+* Support for multiple server instances
+
+Tested on Ubuntu Precise Pangolin, CentOS 6, RedHat 6.
+
+
+## Dependencies
+ - [puppet-concat](https://github.com/ripienaar/puppet-concat)
+
+
+## Example
+
+```puppet
+ # add a server instance
+ openvpn::server { 'winterthur':
+ country => 'CH',
+ province => 'ZH',
+ city => 'Winterthur',
+ organization => 'example.org',
+ email => 'root@example.org',
+ server => '10.200.200.0 255.255.255.0'
+ }
+
+ # define clients
+ openvpn::client { 'client1':
+ server => 'winterthur'
+ }
+ openvpn::client { 'client2':
+ server => 'winterthur'
+ }
+
+ openvpn::client_specific_config { 'client1':
+ server => 'winterthur',
+ ifconfig => '10.200.200.50 255.255.255.0'
+ }
+```
+
+Don't forget the [sysctl](https://github.com/luxflux/puppet-sysctl) directive ```net.ipv4.ip_forward```!
+
+
+# Contributors
+
+These fine folks helped to get this far with this module:
+* [@jlambert121](https://github.com/jlambert121)
+* [@jlk](https://github.com/jlk)
+* [@elisiano](https://github.com/elisiano)
diff --git a/puppet/modules/openvpn/Vagrantfile b/puppet/modules/openvpn/Vagrantfile
new file mode 100644
index 00000000..88875ff8
--- /dev/null
+++ b/puppet/modules/openvpn/Vagrantfile
@@ -0,0 +1,42 @@
+# -*- mode: ruby -*-
+# vi: set ft=ruby :
+
+def server_config(config)
+ config.vm.provision :puppet, :module_path => '..' do |puppet|
+ puppet.manifests_path = "vagrant"
+ puppet.manifest_file = "server.pp"
+ end
+end
+
+def client_config(config)
+ config.vm.provision :puppet, :module_path => '..' do |puppet|
+ puppet.manifests_path = "vagrant"
+ puppet.manifest_file = "client.pp"
+ end
+end
+
+Vagrant::Config.run do |config|
+
+ config.vm.define :server_ubuntu do |c|
+ c.vm.box = 'precise64'
+ server_config c
+ c.vm.network :hostonly, '10.255.255.10'
+ end
+
+ config.vm.define :server_centos do |c|
+ c.vm.box = 'centos63'
+
+ c.vm.provision :shell, :inline => 'if [ ! -f rpmforge-release-0.5.2-2.el6.rf.x86_64.rpm ]; then wget -q http://pkgs.repoforge.org/rpmforge-release/rpmforge-release-0.5.2-2.el6.rf.x86_64.rpm; fi'
+ c.vm.provision :shell, :inline => 'yum install -y rpmforge-release-0.5.2-2.el6.rf.x86_64.rpm || exit 0'
+
+ server_config c
+ c.vm.network :hostonly, '10.255.255.11'
+ end
+
+ config.vm.define :client_ubuntu do |c|
+ c.vm.box = 'precise64'
+ client_config c
+ c.vm.network :hostonly, '10.255.255.20'
+ end
+
+end
diff --git a/puppet/modules/openvpn/manifests/client.pp b/puppet/modules/openvpn/manifests/client.pp
new file mode 100644
index 00000000..92c6aa4e
--- /dev/null
+++ b/puppet/modules/openvpn/manifests/client.pp
@@ -0,0 +1,187 @@
+# == Define: openvpn::client
+#
+# This define creates the client certs for a specified openvpn server as well
+# as creating a tarball that can be directly imported into openvpn clients
+#
+#
+# === Parameters
+#
+# [*server*]
+# String. Name of the corresponding openvpn endpoint
+# Required
+#
+# [*compression*]
+# String. Which compression algorithim to use
+# Default: comp-lzo
+# Options: comp-lzo or '' (disable compression)
+#
+# [*dev*]
+# String. Device method
+# Default: tun
+# Options: tun (routed connections), tap (bridged connections)
+#
+# [*mute*]
+# Integer. Set log mute level
+# Default: 20
+#
+# [*mute_replay_warnings*]
+# Boolean. Silence duplicate packet warnings (common on wireless networks)
+# Default: true
+#
+# [*nobind*]
+# Boolean. Whether or not to bind to a specific port number
+# Default: true
+#
+# [*persist_key*]
+# Boolean. Try to retain access to resources that may be unavailable
+# because of privilege downgrades
+# Default: true
+#
+# [*persist_tun*]
+# Boolean. Try to retain access to resources that may be unavailable
+# because of privilege downgrades
+# Default: true
+#
+# [*port*]
+# Integer. The port the openvpn server service is running on
+# Default: 1194
+#
+# [*proto*]
+# String. What IP protocol is being used.
+# Default: tcp
+# Options: tcp or udp
+#
+# [*remote_host*]
+# String. The IP or hostname of the openvpn server service
+# Default: FQDN
+#
+# [*resolv_retry*]
+# Integer/String. How many seconds should the openvpn client try to resolve
+# the server's hostname
+# Default: infinite
+# Options: Integer or infinite
+#
+# [*verb*]
+# Integer. Level of logging verbosity
+# Default: 3
+#
+#
+# === Examples
+#
+# openvpn::client {
+# 'my_user':
+# server => 'contractors',
+# remote_host => 'vpn.mycompany.com'
+# }
+#
+# * Removal:
+# Manual process right now, todo for the future
+#
+#
+# === Authors
+#
+# * Raffael Schmid <mailto:raffael@yux.ch>
+# * John Kinsella <mailto:jlkinsel@gmail.com>
+# * Justin Lambert <mailto:jlambert@letsevenup.com>
+#
+# === License
+#
+# Copyright 2013 Raffael Schmid, <raffael@yux.ch>
+#
+# Licensed under the Apache License, Version 2.0 (the "License");
+# you may not use this file except in compliance with the License.
+# You may obtain a copy of the License at
+#
+# http://www.apache.org/licenses/LICENSE-2.0
+#
+# Unless required by applicable law or agreed to in writing, software
+# distributed under the License is distributed on an "AS IS" BASIS,
+# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+# See the License for the specific language governing permissions and
+# limitations under the License.
+#
+define openvpn::client(
+ $server,
+ $compression = 'comp-lzo',
+ $dev = 'tun',
+ $mute = '20',
+ $mute_replay_warnings = true,
+ $nobind = true,
+ $persist_key = true,
+ $persist_tun = true,
+ $port = '1194',
+ $proto = 'tcp',
+ $remote_host = $::fqdn,
+ $resolv_retry = 'infinite',
+ $verb = '3',
+) {
+
+ Openvpn::Server[$server] ->
+ Openvpn::Client[$name]
+
+ exec {
+ "generate certificate for ${name} in context of ${server}":
+ command => ". ./vars && ./pkitool ${name}",
+ cwd => "/etc/openvpn/${server}/easy-rsa",
+ creates => "/etc/openvpn/${server}/easy-rsa/keys/${name}.crt",
+ provider => 'shell';
+ }
+
+ file {
+ [ "/etc/openvpn/${server}/download-configs/${name}",
+ "/etc/openvpn/${server}/download-configs/${name}/keys"]:
+ ensure => directory;
+
+ "/etc/openvpn/${server}/download-configs/${name}/keys/${name}.crt":
+ ensure => link,
+ target => "/etc/openvpn/${server}/easy-rsa/keys/${name}.crt",
+ require => Exec["generate certificate for ${name} in context of ${server}"];
+
+ "/etc/openvpn/${server}/download-configs/${name}/keys/${name}.key":
+ ensure => link,
+ target => "/etc/openvpn/${server}/easy-rsa/keys/${name}.key",
+ require => Exec["generate certificate for ${name} in context of ${server}"];
+
+ "/etc/openvpn/${server}/download-configs/${name}/keys/ca.crt":
+ ensure => link,
+ target => "/etc/openvpn/${server}/easy-rsa/keys/ca.crt",
+ require => Exec["generate certificate for ${name} in context of ${server}"];
+
+ "/etc/openvpn/${server}/download-configs/${name}/${name}.conf":
+ owner => root,
+ group => root,
+ mode => '0444',
+ content => template('openvpn/client.erb'),
+ notify => Exec["tar the thing ${server} with ${name}"];
+ }
+
+ exec {
+ "tar the thing ${server} with ${name}":
+ cwd => "/etc/openvpn/${server}/download-configs/",
+ command => "/bin/rm ${name}.tar.gz; tar --exclude=\\*.conf.d -chzvf ${name}.tar.gz ${name}",
+ refreshonly => true,
+ require => [ File["/etc/openvpn/${server}/download-configs/${name}/${name}.conf"],
+ File["/etc/openvpn/${server}/download-configs/${name}/keys/ca.crt"],
+ File["/etc/openvpn/${server}/download-configs/${name}/keys/${name}.key"],
+ File["/etc/openvpn/${server}/download-configs/${name}/keys/${name}.crt"]
+ ],
+ notify => Exec["generate ${name}.ovpn in ${server}"];
+ }
+
+ exec {
+ "generate ${name}.ovpn in ${server}":
+ cwd => "/etc/openvpn/${server}/download-configs/",
+ command => "/bin/rm ${name}.ovpn; cat ${name}/${name}.conf|perl -lne 'if(m|^ca keys/ca.crt|){ chomp(\$ca=`cat ${name}/keys/ca.crt`); print \"<ca>\n\$ca\n</ca>\"} elsif(m|^cert keys/${name}.crt|) { chomp(\$crt=`cat ${name}/keys/${name}.crt`); print \"<cert>\n\$crt\n</cert>\"} elsif(m|^key keys/${name}.key|){ chomp(\$key=`cat ${name}/keys/${name}.key`); print \"<key>\n\$key\n</key>\"} else { print} ' > ${name}.ovpn",
+ refreshonly => true,
+ require => [ File["/etc/openvpn/${server}/download-configs/${name}/${name}.conf"],
+ File["/etc/openvpn/${server}/download-configs/${name}/keys/ca.crt"],
+ File["/etc/openvpn/${server}/download-configs/${name}/keys/${name}.key"],
+ File["/etc/openvpn/${server}/download-configs/${name}/keys/${name}.crt"],
+ ],
+ }
+
+ file { "/etc/openvpn/${server}/download-configs/${name}.ovpn":
+ mode => '0400',
+ require => Exec["generate ${name}.ovpn in ${server}"],
+ }
+}
diff --git a/puppet/modules/openvpn/manifests/client_specific_config.pp b/puppet/modules/openvpn/manifests/client_specific_config.pp
new file mode 100644
index 00000000..4287421a
--- /dev/null
+++ b/puppet/modules/openvpn/manifests/client_specific_config.pp
@@ -0,0 +1,79 @@
+# == Define: openvpn::client_specific_config
+#
+# This define configures options which will be pushed by the server to a
+# specific client only. This feature is explained here:
+# http://openvpn.net/index.php/open-source/documentation/howto.html#policy
+#
+# === Parameters
+#
+# All the parameters are explained in the openvpn documentation:
+# http://openvpn.net/index.php/open-source/documentation/howto.html#policy
+#
+# [*server*]
+# String. Name of the corresponding openvpn endpoint
+# Required
+#
+# [*iroute*]
+# Array. Array of iroute combinations.
+# Default: []
+#
+# [*ifconfig*]
+# String. IP configuration to push to the client.
+# Default: false
+#
+# [*dhcp_options]
+# Array. DHCP options to push to the client.
+# Default: []
+#
+#
+# === Examples
+#
+# openvpn::client_specific_config {
+# 'vpn_client':
+# server => 'contractors',
+# iroute => ['10.0.1.0 255.255.255.0'],
+# ifconfig => '10.10.10.1 10.10.10.2',
+# dhcp_options => ['DNS 8.8.8.8']
+# }
+#
+# * Removal:
+# Manual process right now, todo for the future
+#
+#
+# === Authors
+#
+# * Raffael Schmid <mailto:raffael@yux.ch>
+#
+# === License
+#
+# Copyright 2013 Raffael Schmid, <raffael@yux.ch>
+#
+# Licensed under the Apache License, Version 2.0 (the "License");
+# you may not use this file except in compliance with the License.
+# You may obtain a copy of the License at
+#
+# http://www.apache.org/licenses/LICENSE-2.0
+#
+# Unless required by applicable law or agreed to in writing, software
+# distributed under the License is distributed on an "AS IS" BASIS,
+# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+# See the License for the specific language governing permissions and
+# limitations under the License.
+#
+define openvpn::client_specific_config(
+ $server,
+ $iroute = [],
+ $ifconfig = false,
+ $dhcp_options = []
+) {
+
+ Openvpn::Server[$server] ->
+ Openvpn::Client[$name] ->
+ Openvpn::Client_specific_config[$name]
+
+ file { "/etc/openvpn/${server}/client-configs/${name}":
+ ensure => present,
+ content => template('openvpn/client_specific_config.erb')
+ }
+
+}
diff --git a/puppet/modules/openvpn/manifests/config.pp b/puppet/modules/openvpn/manifests/config.pp
new file mode 100644
index 00000000..32b32094
--- /dev/null
+++ b/puppet/modules/openvpn/manifests/config.pp
@@ -0,0 +1,52 @@
+# == Class: openvpn::config
+#
+# This class sets up the openvpn enviornment as well as the default config file
+#
+#
+# === Examples
+#
+# This class should not be directly invoked
+#
+# === Authors
+#
+# * Raffael Schmid <mailto:raffael@yux.ch>
+# * John Kinsella <mailto:jlkinsel@gmail.com>
+# * Justin Lambert <mailto:jlambert@letsevenup.com>
+#
+# === License
+#
+# Copyright 2013 Raffael Schmid, <raffael@yux.ch>
+#
+# Licensed under the Apache License, Version 2.0 (the "License");
+# you may not use this file except in compliance with the License.
+# You may obtain a copy of the License at
+#
+# http://www.apache.org/licenses/LICENSE-2.0
+#
+# Unless required by applicable law or agreed to in writing, software
+# distributed under the License is distributed on an "AS IS" BASIS,
+# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+# See the License for the specific language governing permissions and
+# limitations under the License.
+#
+class openvpn::config {
+
+ if $::osfamily == 'Debian' {
+ include concat::setup
+
+ concat {
+ '/etc/default/openvpn':
+ owner => root,
+ group => root,
+ mode => 644,
+ warn => true;
+ }
+
+ concat::fragment {
+ 'openvpn.default.header':
+ content => template('openvpn/etc-default-openvpn.erb'),
+ target => '/etc/default/openvpn',
+ order => 01;
+ }
+ }
+}
diff --git a/puppet/modules/openvpn/manifests/init.pp b/puppet/modules/openvpn/manifests/init.pp
new file mode 100644
index 00000000..7e07f025
--- /dev/null
+++ b/puppet/modules/openvpn/manifests/init.pp
@@ -0,0 +1,43 @@
+# == Class: openvpn
+#
+# This module installs the openvpn service, configures vpn endpoints, generates
+# client certificates, and generates client config files
+#
+#
+# === Examples
+#
+# * Installation:
+# class { 'openvpn': }
+#
+#
+# === Authors
+#
+# * Raffael Schmid <mailto:raffael@yux.ch>
+# * John Kinsella <mailto:jlkinsel@gmail.com>
+# * Justin Lambert <mailto:jlambert@letsevenup.com>
+#
+# === License
+#
+# Copyright 2013 Raffael Schmid, <raffael@yux.ch>
+#
+# Licensed under the Apache License, Version 2.0 (the "License");
+# you may not use this file except in compliance with the License.
+# You may obtain a copy of the License at
+#
+# http://www.apache.org/licenses/LICENSE-2.0
+#
+# Unless required by applicable law or agreed to in writing, software
+# distributed under the License is distributed on an "AS IS" BASIS,
+# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+# See the License for the specific language governing permissions and
+# limitations under the License.
+#
+class openvpn {
+
+ class {'openvpn::params': } ->
+ class {'openvpn::install': } ->
+ class {'openvpn::config': } ~>
+ class {'openvpn::service': } ->
+ Class['openvpn']
+
+}
diff --git a/puppet/modules/openvpn/manifests/install.pp b/puppet/modules/openvpn/manifests/install.pp
new file mode 100644
index 00000000..a230373a
--- /dev/null
+++ b/puppet/modules/openvpn/manifests/install.pp
@@ -0,0 +1,46 @@
+# == Class: openvpn
+#
+# This module installs the openvpn service, configures vpn endpoints, generates
+# client certificates, and generates client config files
+#
+#
+# === Examples
+#
+# This class should not be directly invoked
+#
+#
+# === Authors
+#
+# * Raffael Schmid <mailto:raffael@yux.ch>
+# * John Kinsella <mailto:jlkinsel@gmail.com>
+# * Justin Lambert <mailto:jlambert@letsevenup.com>
+#
+# === License
+#
+# Copyright 2013 Raffael Schmid, <raffael@yux.ch>
+#
+# Licensed under the Apache License, Version 2.0 (the "License");
+# you may not use this file except in compliance with the License.
+# You may obtain a copy of the License at
+#
+# http://www.apache.org/licenses/LICENSE-2.0
+#
+# Unless required by applicable law or agreed to in writing, software
+# distributed under the License is distributed on an "AS IS" BASIS,
+# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+# See the License for the specific language governing permissions and
+# limitations under the License.
+#
+class openvpn::install {
+
+ package {
+ 'openvpn':
+ ensure => installed;
+ }
+
+ file {
+ [ '/etc/openvpn', '/etc/openvpn/keys' ]:
+ ensure => directory,
+ require => Package['openvpn'];
+ }
+}
diff --git a/puppet/modules/openvpn/manifests/params.pp b/puppet/modules/openvpn/manifests/params.pp
new file mode 100644
index 00000000..33495270
--- /dev/null
+++ b/puppet/modules/openvpn/manifests/params.pp
@@ -0,0 +1,37 @@
+# === License
+#
+# Copyright 2013 Raffael Schmid, <raffael@yux.ch>
+#
+# Licensed under the Apache License, Version 2.0 (the "License");
+# you may not use this file except in compliance with the License.
+# You may obtain a copy of the License at
+#
+# http://www.apache.org/licenses/LICENSE-2.0
+#
+# Unless required by applicable law or agreed to in writing, software
+# distributed under the License is distributed on an "AS IS" BASIS,
+# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+# See the License for the specific language governing permissions and
+# limitations under the License.
+#
+class openvpn::params {
+
+ $group = $::osfamily ? {
+ 'RedHat' => 'nobody',
+ default => 'nogroup'
+ }
+
+ $easyrsa_source = $::osfamily ? {
+ 'RedHat' => $::operatingsystemmajrelease ? {
+ 6 => '/usr/share/openvpn/easy-rsa/2.0',
+ default => '/usr/share/doc/openvpn-2.2.2/easy-rsa/2.0'
+ },
+ default => '/usr/share/doc/openvpn/examples/easy-rsa/2.0'
+ }
+
+ $link_openssl_cnf = $::osfamily ? {
+ /(Debian|RedHat)/ => true,
+ default => false
+ }
+
+}
diff --git a/puppet/modules/openvpn/manifests/server.pp b/puppet/modules/openvpn/manifests/server.pp
new file mode 100644
index 00000000..649048c4
--- /dev/null
+++ b/puppet/modules/openvpn/manifests/server.pp
@@ -0,0 +1,233 @@
+# == Define: openvpn::server
+#
+# This define creates the openvpn server instance and ssl certificates
+#
+#
+# === Parameters
+#
+# [*country*]
+# String. Country to be used for the SSL certificate
+#
+# [*province*]
+# String. Province to be used for the SSL certificate
+#
+# [*city*]
+# String. City to be used for the SSL certificate
+#
+# [*organization*]
+# String. Organization to be used for the SSL certificate
+#
+# [*email*]
+# String. Email address to be used for the SSL certificate
+#
+# [*compression*]
+# String. Which compression algorithim to use
+# Default: comp-lzo
+# Options: comp-lzo or '' (disable compression)
+#
+# [*dev*]
+# String. Device method
+# Default: tun
+# Options: tun (routed connections), tap (bridged connections)
+#
+# [*user*]
+# String. Group to drop privileges to after startup
+# Default: nobody
+#
+# [*group*]
+# String. User to drop privileges to after startup
+# Default: depends on your $::osfamily
+#
+# [*ipp*]
+# Boolean. Persist ifconfig information to a file to retain client IP
+# addresses between sessions
+# Default: false
+#
+# [*local*]
+# String. Interface for openvpn to bind to.
+# Default: $::ipaddress_eth0
+# Options: An IP address or '' to bind to all ip addresses
+#
+# [*logfile*]
+# String. Logfile for this openvpn server
+# Default: false
+# Options: false (syslog) or log file name
+#
+# [*port*]
+# Integer. The port the openvpn server service is running on
+# Default: 1194
+#
+# [*proto*]
+# String. What IP protocol is being used.
+# Default: tcp
+# Options: tcp or udp
+#
+# [*status_log*]
+# String. Logfile for periodic dumps of the vpn service status
+# Default: "${name}/openvpn-status.log"
+#
+# [*server*]
+# String. Network to assign client addresses out of
+# Default: None. Required in tun mode, not in tap mode
+#
+# [*push*]
+# Array. Options to push out to the client. This can include routes, DNS
+# servers, DNS search domains, and many other options.
+# Default: []
+#
+#
+# === Examples
+#
+# openvpn::client {
+# 'my_user':
+# server => 'contractors',
+# remote_host => 'vpn.mycompany.com'
+# }
+#
+# * Removal:
+# Manual process right now, todo for the future
+#
+#
+# === Authors
+#
+# * Raffael Schmid <mailto:raffael@yux.ch>
+# * John Kinsella <mailto:jlkinsel@gmail.com>
+# * Justin Lambert <mailto:jlambert@letsevenup.com>
+#
+# === License
+#
+# Copyright 2013 Raffael Schmid, <raffael@yux.ch>
+#
+# Licensed under the Apache License, Version 2.0 (the "License");
+# you may not use this file except in compliance with the License.
+# You may obtain a copy of the License at
+#
+# http://www.apache.org/licenses/LICENSE-2.0
+#
+# Unless required by applicable law or agreed to in writing, software
+# distributed under the License is distributed on an "AS IS" BASIS,
+# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+# See the License for the specific language governing permissions and
+# limitations under the License.
+#
+define openvpn::server(
+ $country,
+ $province,
+ $city,
+ $organization,
+ $email,
+ $compression = 'comp-lzo',
+ $dev = 'tun0',
+ $user = 'nobody',
+ $group = false,
+ $ipp = false,
+ $ip_pool = [],
+ $local = $::ipaddress_eth0,
+ $logfile = false,
+ $port = '1194',
+ $proto = 'tcp',
+ $status_log = "${name}/openvpn-status.log",
+ $server = '',
+ $push = []
+) {
+
+ include openvpn
+ Class['openvpn::install'] ->
+ Openvpn::Server[$name] ~>
+ Class['openvpn::service']
+
+ $tls_server = $proto ? {
+ /tcp/ => true,
+ default => false
+ }
+
+ $group_to_set = $group ? {
+ false => $openvpn::params::group,
+ default => $group
+ }
+
+ file {
+ ["/etc/openvpn/${name}", "/etc/openvpn/${name}/client-configs", "/etc/openvpn/${name}/download-configs" ]:
+ ensure => directory;
+ }
+
+ exec {
+ "copy easy-rsa to openvpn config folder ${name}":
+ command => "/bin/cp -r ${openvpn::params::easyrsa_source} /etc/openvpn/${name}/easy-rsa",
+ creates => "/etc/openvpn/${name}/easy-rsa",
+ notify => Exec["fix_easyrsa_file_permissions_${name}"],
+ require => File["/etc/openvpn/${name}"];
+ }
+
+ exec {
+ "fix_easyrsa_file_permissions_${name}":
+ refreshonly => true,
+ command => "/bin/chmod 755 /etc/openvpn/${name}/easy-rsa/*";
+ }
+
+ file {
+ "/etc/openvpn/${name}/easy-rsa/vars":
+ ensure => present,
+ content => template('openvpn/vars.erb'),
+ require => Exec["copy easy-rsa to openvpn config folder ${name}"];
+ }
+
+ file {
+ "/etc/openvpn/${name}/easy-rsa/openssl.cnf":
+ require => Exec["copy easy-rsa to openvpn config folder ${name}"];
+ }
+
+ if $openvpn::params::link_openssl_cnf == true {
+ File["/etc/openvpn/${name}/easy-rsa/openssl.cnf"] {
+ ensure => link,
+ target => "/etc/openvpn/${name}/easy-rsa/openssl-1.0.0.cnf"
+ }
+ }
+
+ exec {
+ "generate dh param ${name}":
+ command => '. ./vars && ./clean-all && ./build-dh',
+ cwd => "/etc/openvpn/${name}/easy-rsa",
+ creates => "/etc/openvpn/${name}/easy-rsa/keys/dh1024.pem",
+ provider => 'shell',
+ require => File["/etc/openvpn/${name}/easy-rsa/vars"];
+
+ "initca ${name}":
+ command => '. ./vars && ./pkitool --initca',
+ cwd => "/etc/openvpn/${name}/easy-rsa",
+ creates => "/etc/openvpn/${name}/easy-rsa/keys/ca.key",
+ provider => 'shell',
+ require => [ Exec["generate dh param ${name}"], File["/etc/openvpn/${name}/easy-rsa/openssl.cnf"] ];
+
+ "generate server cert ${name}":
+ command => '. ./vars && ./pkitool --server server',
+ cwd => "/etc/openvpn/${name}/easy-rsa",
+ creates => "/etc/openvpn/${name}/easy-rsa/keys/server.key",
+ provider => 'shell',
+ require => Exec["initca ${name}"];
+ }
+
+ file {
+ "/etc/openvpn/${name}/keys":
+ ensure => link,
+ target => "/etc/openvpn/${name}/easy-rsa/keys",
+ require => Exec["copy easy-rsa to openvpn config folder ${name}"];
+ }
+
+ if $::osfamily == 'Debian' {
+ concat::fragment {
+ "openvpn.default.autostart.${name}":
+ content => "AUTOSTART=\"\$AUTOSTART ${name}\"\n",
+ target => '/etc/default/openvpn',
+ order => 10;
+ }
+ }
+
+ file {
+ "/etc/openvpn/${name}.conf":
+ owner => root,
+ group => root,
+ mode => '0444',
+ content => template('openvpn/server.erb');
+ }
+}
diff --git a/puppet/modules/openvpn/manifests/service.pp b/puppet/modules/openvpn/manifests/service.pp
new file mode 100644
index 00000000..54e8db7d
--- /dev/null
+++ b/puppet/modules/openvpn/manifests/service.pp
@@ -0,0 +1,36 @@
+# == Class: openvpn::config
+#
+# This class maintains the openvpn service
+#
+#
+# === Examples
+#
+# This class should not be directly invoked
+#
+# === Authors
+#
+# * Raffael Schmid <mailto:raffael@yux.ch>
+# * John Kinsella <mailto:jlkinsel@gmail.com>
+# * Justin Lambert <mailto:jlambert@letsevenup.com>
+#
+# === License
+#
+# Copyright 2013 Raffael Schmid, <raffael@yux.ch>
+#
+# Licensed under the Apache License, Version 2.0 (the "License");
+# you may not use this file except in compliance with the License.
+# You may obtain a copy of the License at
+#
+# lied.
+# See the License for the specific language governing permissions and
+# limitations under the License.
+#
+class openvpn::service {
+ service {
+ 'openvpn':
+ ensure => running,
+ enable => true,
+ hasrestart => true,
+ hasstatus => true;
+ }
+}
diff --git a/puppet/modules/openvpn/spec/classes/openvpn_config_spec.rb b/puppet/modules/openvpn/spec/classes/openvpn_config_spec.rb
new file mode 100644
index 00000000..bbb63a77
--- /dev/null
+++ b/puppet/modules/openvpn/spec/classes/openvpn_config_spec.rb
@@ -0,0 +1,15 @@
+require 'spec_helper'
+
+describe 'openvpn::config', :type => :class do
+
+ it { should create_class('openvpn::config') }
+
+ context "on Debian based machines" do
+ let (:facts) { { :osfamily => 'Debian', :concat_basedir => '/var/lib/puppet/concat' } }
+
+ it { should contain_class('concat::setup') }
+ it { should contain_concat('/etc/default/openvpn') }
+ it { should contain_concat__fragment('openvpn.default.header') }
+ end
+
+end
diff --git a/puppet/modules/openvpn/spec/classes/openvpn_init_spec.rb b/puppet/modules/openvpn/spec/classes/openvpn_init_spec.rb
new file mode 100644
index 00000000..45dcc9bf
--- /dev/null
+++ b/puppet/modules/openvpn/spec/classes/openvpn_init_spec.rb
@@ -0,0 +1,9 @@
+require 'spec_helper'
+
+describe 'openvpn', :type => :class do
+
+ let (:facts) { { :concat_basedir => '/var/lib/puppet/concat' } }
+
+ it { should create_class('openvpn') }
+
+end
diff --git a/puppet/modules/openvpn/spec/classes/openvpn_install_spec.rb b/puppet/modules/openvpn/spec/classes/openvpn_install_spec.rb
new file mode 100644
index 00000000..cdb31358
--- /dev/null
+++ b/puppet/modules/openvpn/spec/classes/openvpn_install_spec.rb
@@ -0,0 +1,11 @@
+require 'spec_helper'
+
+describe 'openvpn::install', :type => :class do
+
+ it { should create_class('openvpn::install') }
+ it { should contain_package('openvpn') }
+
+ it { should contain_file('/etc/openvpn').with('ensure' => 'directory') }
+ it { should contain_file('/etc/openvpn/keys').with('ensure' => 'directory') }
+
+end
diff --git a/puppet/modules/openvpn/spec/classes/openvpn_service_spec.rb b/puppet/modules/openvpn/spec/classes/openvpn_service_spec.rb
new file mode 100644
index 00000000..f427e7f1
--- /dev/null
+++ b/puppet/modules/openvpn/spec/classes/openvpn_service_spec.rb
@@ -0,0 +1,13 @@
+require 'spec_helper'
+
+describe 'openvpn::service', :type => :class do
+
+ let (:facts) { { :concat_basedir => '/var/lib/puppet/concat' } }
+
+ it { should create_class('openvpn::service') }
+ it { should contain_service('openvpn').with(
+ 'ensure' => 'running',
+ 'enable' => true
+ ) }
+
+end
diff --git a/puppet/modules/openvpn/spec/defines/openvpn_client_spec.rb b/puppet/modules/openvpn/spec/defines/openvpn_client_spec.rb
new file mode 100644
index 00000000..a4b580e8
--- /dev/null
+++ b/puppet/modules/openvpn/spec/defines/openvpn_client_spec.rb
@@ -0,0 +1,88 @@
+require 'spec_helper'
+
+describe 'openvpn::client', :type => :define do
+ let(:title) { 'test_client' }
+ let(:params) { { 'server' => 'test_server' } }
+ let(:facts) { { :fqdn => 'somehost', :concat_basedir => '/var/lib/puppet/concat' } }
+ let(:pre_condition) do
+ 'openvpn::server { "test_server":
+ country => "CO",
+ province => "ST",
+ city => "Some City",
+ organization => "example.org",
+ email => "testemail@example.org"
+ }'
+ end
+
+ it { should contain_exec('generate certificate for test_client in context of test_server') }
+
+ [ 'test_client', 'test_client/keys'].each do |directory|
+ it { should contain_file("/etc/openvpn/test_server/download-configs/#{directory}") }
+ end
+
+ [ 'test_client.crt', 'test_client.key', 'ca.crt' ].each do |file|
+ it { should contain_file("/etc/openvpn/test_server/download-configs/test_client/keys/#{file}").with(
+ 'ensure' => 'link',
+ 'target' => "/etc/openvpn/test_server/easy-rsa/keys/#{file}"
+ )}
+ end
+
+ it { should contain_exec('tar the thing test_server with test_client').with(
+ 'cwd' => '/etc/openvpn/test_server/download-configs/',
+ 'command' => '/bin/rm test_client.tar.gz; tar --exclude=\*.conf.d -chzvf test_client.tar.gz test_client'
+ ) }
+
+ context "setting the minimum parameters" do
+ let(:params) { { 'server' => 'test_server' } }
+ let(:facts) { { :fqdn => 'somehost', :concat_basedir => '/var/lib/puppet/concat' } }
+
+ it { should contain_file('/etc/openvpn/test_server/download-configs/test_client/test_client.conf').with_content(/^client$/)}
+ it { should contain_file('/etc/openvpn/test_server/download-configs/test_client/test_client.conf').with_content(/^ca\s+keys\/ca\.crt$/)}
+ it { should contain_file('/etc/openvpn/test_server/download-configs/test_client/test_client.conf').with_content(/^cert\s+keys\/test_client.crt$/)}
+ it { should contain_file('/etc/openvpn/test_server/download-configs/test_client/test_client.conf').with_content(/^key\s+keys\/test_client\.key$/)}
+ it { should contain_file('/etc/openvpn/test_server/download-configs/test_client/test_client.conf').with_content(/^dev\s+tun$/)}
+ it { should contain_file('/etc/openvpn/test_server/download-configs/test_client/test_client.conf').with_content(/^proto\s+tcp$/)}
+ it { should contain_file('/etc/openvpn/test_server/download-configs/test_client/test_client.conf').with_content(/^remote\s+somehost\s+1194$/)}
+ it { should contain_file('/etc/openvpn/test_server/download-configs/test_client/test_client.conf').with_content(/^comp-lzo$/)}
+ it { should contain_file('/etc/openvpn/test_server/download-configs/test_client/test_client.conf').with_content(/^resolv-retry\s+infinite$/)}
+ it { should contain_file('/etc/openvpn/test_server/download-configs/test_client/test_client.conf').with_content(/^nobind$/)}
+ it { should contain_file('/etc/openvpn/test_server/download-configs/test_client/test_client.conf').with_content(/^persist-key$/)}
+ it { should contain_file('/etc/openvpn/test_server/download-configs/test_client/test_client.conf').with_content(/^persist-tun$/)}
+ it { should contain_file('/etc/openvpn/test_server/download-configs/test_client/test_client.conf').with_content(/^mute-replay-warnings$/)}
+ it { should contain_file('/etc/openvpn/test_server/download-configs/test_client/test_client.conf').with_content(/^ns\-cert\-type\s+server$/)}
+ it { should contain_file('/etc/openvpn/test_server/download-configs/test_client/test_client.conf').with_content(/^verb\s+3$/)}
+ it { should contain_file('/etc/openvpn/test_server/download-configs/test_client/test_client.conf').with_content(/^mute\s+20$/)}
+ end
+
+ context "setting all of the parameters" do
+ let(:params) { {
+ 'server' => 'test_server',
+ 'compression' => 'comp-something',
+ 'dev' => 'tap',
+ 'mute' => 10,
+ 'mute_replay_warnings' => false,
+ 'nobind' => false,
+ 'persist_key' => false,
+ 'persist_tun' => false,
+ 'port' => '123',
+ 'proto' => 'udp',
+ 'remote_host' => 'somewhere',
+ 'resolv_retry' => '2m',
+ 'verb' => '1'
+ } }
+ let(:facts) { { :fqdn => 'somehost', :concat_basedir => '/var/lib/puppet/concat' } }
+
+ it { should contain_file('/etc/openvpn/test_server/download-configs/test_client/test_client.conf').with_content(/^client$/)}
+ it { should contain_file('/etc/openvpn/test_server/download-configs/test_client/test_client.conf').with_content(/^ca\s+keys\/ca\.crt$/)}
+ it { should contain_file('/etc/openvpn/test_server/download-configs/test_client/test_client.conf').with_content(/^cert\s+keys\/test_client.crt$/)}
+ it { should contain_file('/etc/openvpn/test_server/download-configs/test_client/test_client.conf').with_content(/^key\s+keys\/test_client\.key$/)}
+ it { should contain_file('/etc/openvpn/test_server/download-configs/test_client/test_client.conf').with_content(/^dev\s+tap$/)}
+ it { should contain_file('/etc/openvpn/test_server/download-configs/test_client/test_client.conf').with_content(/^proto\s+udp$/)}
+ it { should contain_file('/etc/openvpn/test_server/download-configs/test_client/test_client.conf').with_content(/^remote\s+somewhere\s+123$/)}
+ it { should contain_file('/etc/openvpn/test_server/download-configs/test_client/test_client.conf').with_content(/^comp-something$/)}
+ it { should contain_file('/etc/openvpn/test_server/download-configs/test_client/test_client.conf').with_content(/^resolv-retry\s+2m$/)}
+ it { should contain_file('/etc/openvpn/test_server/download-configs/test_client/test_client.conf').with_content(/^verb\s+1$/)}
+ it { should contain_file('/etc/openvpn/test_server/download-configs/test_client/test_client.conf').with_content(/^mute\s+10$/)}
+ end
+
+end
diff --git a/puppet/modules/openvpn/spec/defines/openvpn_client_specific_config_spec.rb b/puppet/modules/openvpn/spec/defines/openvpn_client_specific_config_spec.rb
new file mode 100644
index 00000000..cfdab389
--- /dev/null
+++ b/puppet/modules/openvpn/spec/defines/openvpn_client_specific_config_spec.rb
@@ -0,0 +1,40 @@
+require 'spec_helper'
+
+describe 'openvpn::client_specific_config', :type => :define do
+ let(:title) { 'test_client' }
+ let(:params) { { 'server' => 'test_server' } }
+ let(:facts) { { :fqdn => 'somehost', :concat_basedir => '/var/lib/puppet/concat' } }
+ let(:pre_condition) do
+ [
+ 'openvpn::server { "test_server":
+ country => "CO",
+ province => "ST",
+ city => "Some City",
+ organization => "example.org",
+ email => "testemail@example.org"
+ }',
+ 'openvpn::client { "test_client":
+ server => "test_server"
+ }'
+ ].join
+ end
+
+ it { should contain_file('/etc/openvpn/test_server/client-configs/test_client') }
+
+ describe "setting no paramter at all" do
+ it { should contain_file('/etc/openvpn/test_server/client-configs/test_client').with_content(/\A\n\z/) }
+ end
+
+ describe "setting all parameters" do
+ let(:params) do
+ {:server => 'test_server',
+ :iroute => ['10.0.1.0 255.255.255.0'],
+ :ifconfig => '10.10.10.2 255.255.255.0',
+ :dhcp_options => ['DNS 8.8.8.8']}
+ end
+
+ it { should contain_file('/etc/openvpn/test_server/client-configs/test_client').with_content(/^iroute 10.0.1.0 255.255.255.0$/) }
+ it { should contain_file('/etc/openvpn/test_server/client-configs/test_client').with_content(/^ifconfig-push 10.10.10.2 255.255.255.0$/) }
+ it { should contain_file('/etc/openvpn/test_server/client-configs/test_client').with_content(/^push dhcp-option DNS 8.8.8.8$/) }
+ end
+end
diff --git a/puppet/modules/openvpn/spec/defines/openvpn_server_spec.rb b/puppet/modules/openvpn/spec/defines/openvpn_server_spec.rb
new file mode 100644
index 00000000..467be6aa
--- /dev/null
+++ b/puppet/modules/openvpn/spec/defines/openvpn_server_spec.rb
@@ -0,0 +1,165 @@
+require 'spec_helper'
+
+describe 'openvpn::server', :type => :define do
+
+ let(:title) { 'test_server' }
+
+ context "creating a server with the minimum parameters" do
+ let(:params) { {
+ 'country' => 'CO',
+ 'province' => 'ST',
+ 'city' => 'Some City',
+ 'organization' => 'example.org',
+ 'email' => 'testemail@example.org'
+ } }
+
+ let (:facts) { {
+ :ipaddress_eth0 => '1.2.3.4',
+ :network_eth0 => '1.2.3.0',
+ :netmask_eth0 => '255.255.255.0',
+ :concat_basedir => '/var/lib/puppet/concat',
+ :osfamily => 'anything_else'
+ } }
+
+ # Files associated with a server config
+ it { should contain_file('/etc/openvpn/test_server').with('ensure' => 'directory')}
+ it { should contain_file('/etc/openvpn/test_server/client-configs').with('ensure' => 'directory')}
+ it { should contain_file('/etc/openvpn/test_server/download-configs').with('ensure' => 'directory')}
+ it { should contain_file('/etc/openvpn/test_server/easy-rsa/vars')}
+ it { should contain_file('/etc/openvpn/test_server/easy-rsa/openssl.cnf')}
+ it { should contain_file('/etc/openvpn/test_server/keys').with(
+ 'ensure' => 'link',
+ 'target' => '/etc/openvpn/test_server/easy-rsa/keys'
+ )}
+
+ # Execs to working with certificates
+ it { should contain_exec('copy easy-rsa to openvpn config folder test_server').with(
+ 'command' => '/bin/cp -r /usr/share/doc/openvpn/examples/easy-rsa/2.0 /etc/openvpn/test_server/easy-rsa'
+ )}
+ it { should contain_exec('generate dh param test_server') }
+ it { should contain_exec('initca test_server') }
+ it { should contain_exec('generate server cert test_server') }
+
+ # VPN server config file itself
+ it { should contain_file('/etc/openvpn/test_server.conf').with_content(/^mode\s+server$/) }
+ it { should contain_file('/etc/openvpn/test_server.conf').with_content(/^client\-config\-dir\s+\/etc\/openvpn\/test_server\/client\-configs$/) }
+ it { should contain_file('/etc/openvpn/test_server.conf').with_content(/^ca\s+\/etc\/openvpn\/test_server\/keys\/ca.crt$/) }
+ it { should contain_file('/etc/openvpn/test_server.conf').with_content(/^cert\s+\/etc\/openvpn\/test_server\/keys\/server.crt$/) }
+ it { should contain_file('/etc/openvpn/test_server.conf').with_content(/^key\s+\/etc\/openvpn\/test_server\/keys\/server.key$/) }
+ it { should contain_file('/etc/openvpn/test_server.conf').with_content(/^dh\s+\/etc\/openvpn\/test_server\/keys\/dh1024.pem$/) }
+ it { should contain_file('/etc/openvpn/test_server.conf').with_content(/^proto\s+tcp-server$/) }
+ it { should contain_file('/etc/openvpn/test_server.conf').with_content(/^tls-server$/) }
+ it { should contain_file('/etc/openvpn/test_server.conf').with_content(/^port\s+1194$/) }
+ it { should contain_file('/etc/openvpn/test_server.conf').with_content(/^comp-lzo$/) }
+ it { should contain_file('/etc/openvpn/test_server.conf').with_content(/^group\s+nogroup$/) }
+ it { should contain_file('/etc/openvpn/test_server.conf').with_content(/^user\s+nobody$/) }
+ it { should_not contain_file('/etc/openvpn/test_server.conf').with_content(/^log\-append\s+test_server\/openvpn\.log$/) }
+ it { should contain_file('/etc/openvpn/test_server.conf').with_content(/^status\s+test_server\/openvpn\-status\.log$/) }
+ it { should contain_file('/etc/openvpn/test_server.conf').with_content(/^dev\s+tun0$/) }
+ it { should contain_file('/etc/openvpn/test_server.conf').with_content(/^local\s+1\.2\.3\.4$/) }
+ it { should_not contain_file('/etc/openvpn/test_server.conf').with_content(/^ifconfig-pool-persist/) }
+ end
+
+ context "creating a server setting all parameters" do
+ let(:params) { {
+ 'country' => 'CO',
+ 'province' => 'ST',
+ 'city' => 'Some City',
+ 'organization' => 'example.org',
+ 'email' => 'testemail@example.org',
+ 'compression' => 'fake_compression',
+ 'port' => '123',
+ 'proto' => 'udp',
+ 'group' => 'someone',
+ 'user' => 'someone',
+ 'logfile' => '/var/log/openvpn/test_server.log',
+ 'status_log' => '/var/log/openvpn/test_server_status.log',
+ 'dev' => 'tun1',
+ 'local' => '2.3.4.5',
+ 'ipp' => true,
+ 'server' => '2.3.4.0 255.255.0.0',
+ 'push' => [ 'dhcp-option DNS 172.31.0.30', 'route 172.31.0.0 255.255.0.0' ]
+ } }
+
+ let (:facts) { {
+ :ipaddress_eth0 => '1.2.3.4',
+ :network_eth0 => '1.2.3.0',
+ :netmask_eth0 => '255.255.255.0',
+ :concat_basedir => '/var/lib/puppet/concat'
+ } }
+
+ it { should contain_file('/etc/openvpn/test_server.conf').with_content(/^mode\s+server$/) }
+ it { should contain_file('/etc/openvpn/test_server.conf').with_content(/^client\-config\-dir\s+\/etc\/openvpn\/test_server\/client\-configs$/) }
+ it { should contain_file('/etc/openvpn/test_server.conf').with_content(/^ca\s+\/etc\/openvpn\/test_server\/keys\/ca.crt$/) }
+ it { should contain_file('/etc/openvpn/test_server.conf').with_content(/^cert\s+\/etc\/openvpn\/test_server\/keys\/server.crt$/) }
+ it { should contain_file('/etc/openvpn/test_server.conf').with_content(/^key\s+\/etc\/openvpn\/test_server\/keys\/server.key$/) }
+ it { should contain_file('/etc/openvpn/test_server.conf').with_content(/^dh\s+\/etc\/openvpn\/test_server\/keys\/dh1024.pem$/) }
+ it { should contain_file('/etc/openvpn/test_server.conf').with_content(/^proto\s+udp$/) }
+ it { should_not contain_file('/etc/openvpn/test_server.conf').with_content(/^proto\s+tls-server$/) }
+ it { should contain_file('/etc/openvpn/test_server.conf').with_content(/^port\s+123$/) }
+ it { should contain_file('/etc/openvpn/test_server.conf').with_content(/^fake_compression$/) }
+ it { should contain_file('/etc/openvpn/test_server.conf').with_content(/^group\s+someone$/) }
+ it { should contain_file('/etc/openvpn/test_server.conf').with_content(/^user\s+someone$/) }
+ it { should contain_file('/etc/openvpn/test_server.conf').with_content(/^log\-append\s+\/var\/log\/openvpn\/test_server\.log$/) }
+ it { should contain_file('/etc/openvpn/test_server.conf').with_content(/^status\s+\/var\/log\/openvpn\/test_server_status\.log$/) }
+ it { should contain_file('/etc/openvpn/test_server.conf').with_content(/^dev\s+tun1$/) }
+ it { should contain_file('/etc/openvpn/test_server.conf').with_content(/^local\s+2\.3\.4\.5$/) }
+ it { should contain_file('/etc/openvpn/test_server.conf').with_content(/^server\s+2\.3\.4\.0\s+255\.255\.0\.0$/) }
+ it { should contain_file('/etc/openvpn/test_server.conf').with_content(/^push\s+dhcp-option\s+DNS\s+172\.31\.0\.30$/) }
+ it { should contain_file('/etc/openvpn/test_server.conf').with_content(/^push\s+route\s+172\.31\.0\.0\s+255\.255\.0\.0$/) }
+ end
+
+ context "when RedHat based machine" do
+ let(:params) { {
+ 'country' => 'CO',
+ 'province' => 'ST',
+ 'city' => 'Some City',
+ 'organization' => 'example.org',
+ 'email' => 'testemail@example.org'
+ } }
+
+ let(:facts) { { :osfamily => 'RedHat', :concat_basedir => '/var/lib/puppet/concat' } }
+
+ it { should contain_file('/etc/openvpn/test_server/easy-rsa/openssl.cnf').with(
+ 'ensure' => 'link',
+ 'target' => '/etc/openvpn/test_server/easy-rsa/openssl-1.0.0.cnf'
+ )}
+
+ it { should contain_exec('copy easy-rsa to openvpn config folder test_server').with(
+ 'command' => '/bin/cp -r /usr/share/doc/openvpn-2.2.2/easy-rsa/2.0 /etc/openvpn/test_server/easy-rsa'
+ )}
+ it { should contain_file('/etc/openvpn/test_server.conf').with_content(/^group\s+nobody$/) }
+
+ end
+
+ context "when Debian based machine" do
+ let(:params) { {
+ 'country' => 'CO',
+ 'province' => 'ST',
+ 'city' => 'Some City',
+ 'organization' => 'example.org',
+ 'email' => 'testemail@example.org'
+ } }
+
+ let(:facts) { { :osfamily => 'Debian', :concat_basedir => '/var/lib/puppet/concat' } }
+
+ it { should contain_file('/etc/openvpn/test_server/easy-rsa/openssl.cnf').with(
+ 'ensure' => 'link',
+ 'target' => '/etc/openvpn/test_server/easy-rsa/openssl-1.0.0.cnf'
+ )}
+
+ it { should contain_exec('copy easy-rsa to openvpn config folder test_server').with(
+ 'command' => '/bin/cp -r /usr/share/doc/openvpn/examples/easy-rsa/2.0 /etc/openvpn/test_server/easy-rsa'
+ )}
+
+ # Configure to start vpn session
+ it { should contain_concat__fragment('openvpn.default.autostart.test_server').with(
+ 'content' => "AUTOSTART=\"$AUTOSTART test_server\"\n",
+ 'target' => '/etc/default/openvpn'
+ )}
+
+ it { should contain_file('/etc/openvpn/test_server.conf').with_content(/^group\s+nogroup$/) }
+
+ end
+
+end
diff --git a/puppet/modules/openvpn/spec/spec_helper.rb b/puppet/modules/openvpn/spec/spec_helper.rb
new file mode 100644
index 00000000..dc7e9f4a
--- /dev/null
+++ b/puppet/modules/openvpn/spec/spec_helper.rb
@@ -0,0 +1,2 @@
+require 'rubygems'
+require 'puppetlabs_spec_helper/module_spec_helper'
diff --git a/puppet/modules/openvpn/templates/client.erb b/puppet/modules/openvpn/templates/client.erb
new file mode 100644
index 00000000..021ed617
--- /dev/null
+++ b/puppet/modules/openvpn/templates/client.erb
@@ -0,0 +1,26 @@
+client
+ca keys/ca.crt
+cert keys/<%= scope.lookupvar('name') %>.crt
+key keys/<%= scope.lookupvar('name') %>.key
+dev <%= scope.lookupvar('dev') %>
+proto <%= scope.lookupvar('proto') %>
+remote <%= scope.lookupvar('remote_host') %> <%= scope.lookupvar('port') %>
+<% if scope.lookupvar('compression') != '' -%>
+<%= scope.lookupvar('compression') %>
+<% end -%>
+resolv-retry <%= scope.lookupvar('resolv_retry') %>
+<% if scope.lookupvar('nobind') -%>
+nobind
+<% end -%>
+<% if scope.lookupvar('persist_key') -%>
+persist-key
+<% end -%>
+<% if scope.lookupvar('persist_tun') -%>
+persist-tun
+<% end -%>
+<% if scope.lookupvar('mute_replay_warnings') -%>
+mute-replay-warnings
+<% end -%>
+ns-cert-type server
+verb <%= scope.lookupvar('verb') %>
+mute <%= scope.lookupvar('mute') %>
diff --git a/puppet/modules/openvpn/templates/client_specific_config.erb b/puppet/modules/openvpn/templates/client_specific_config.erb
new file mode 100644
index 00000000..62cc0e7a
--- /dev/null
+++ b/puppet/modules/openvpn/templates/client_specific_config.erb
@@ -0,0 +1,10 @@
+<% scope.lookupvar('iroute').each do |route| -%>
+iroute <%= route %>
+<% end -%>
+<% if ifconfig = scope.lookupvar('ifconfig') -%>
+ifconfig-push <%= ifconfig %>
+<% end -%>
+<% scope.lookupvar('dhcp_options').each do |option| -%>
+push dhcp-option <%= option %>
+<% end -%>
+
diff --git a/puppet/modules/openvpn/templates/etc-default-openvpn.erb b/puppet/modules/openvpn/templates/etc-default-openvpn.erb
new file mode 100644
index 00000000..310e462e
--- /dev/null
+++ b/puppet/modules/openvpn/templates/etc-default-openvpn.erb
@@ -0,0 +1,20 @@
+# This is the configuration file for /etc/init.d/openvpn
+
+#
+# Start only these VPNs automatically via init script.
+# Allowed values are "all", "none" or space separated list of
+# names of the VPNs. If empty, "all" is assumed.
+#
+#AUTOSTART="all"
+#AUTOSTART="none"
+#AUTOSTART="home office"
+#
+# Refresh interval (in seconds) of default status files
+# located in /var/run/openvpn.$NAME.status
+# Defaults to 10, 0 disables status file generation
+#
+#STATUSREFRESH=10
+#STATUSREFRESH=0
+# Optional arguments to openvpn's command line
+OPTARGS=""
+AUTOSTART=""
diff --git a/puppet/modules/openvpn/templates/server.erb b/puppet/modules/openvpn/templates/server.erb
new file mode 100644
index 00000000..6ef13263
--- /dev/null
+++ b/puppet/modules/openvpn/templates/server.erb
@@ -0,0 +1,37 @@
+mode server
+client-config-dir /etc/openvpn/<%= scope.lookupvar('name') %>/client-configs
+ca /etc/openvpn/<%= scope.lookupvar('name') %>/keys/ca.crt
+cert /etc/openvpn/<%= scope.lookupvar('name') %>/keys/server.crt
+key /etc/openvpn/<%= scope.lookupvar('name') %>/keys/server.key
+dh /etc/openvpn/<%= scope.lookupvar('name') %>/keys/dh1024.pem
+<% if scope.lookupvar('proto') == 'tcp' -%>
+proto <%= scope.lookupvar('proto') %>-server
+<% else -%>
+proto <%= scope.lookupvar('proto') %>
+<% end -%>
+port <%= scope.lookupvar('port') %>
+<% if scope.lookupvar('tls_server') -%>
+tls-server
+<% end -%>
+<% if scope.lookupvar('compression') != '' -%>
+<%= scope.lookupvar('compression') %>
+<% end -%>
+group <%= scope.lookupvar('group_to_set') %>
+user <%= scope.lookupvar('user') %>
+<% if scope.lookupvar('logfile') -%>
+log-append <%= scope.lookupvar('logfile') %>
+<% end -%>
+status <%= scope.lookupvar('status_log') %>
+dev <%= scope.lookupvar('dev') %>
+<% if scope.lookupvar('local') != '' -%>
+local <%= scope.lookupvar('local') %>
+<% end -%>
+<% if scope.lookupvar('ipp') -%>
+ifconfig-pool-persist <%= scope.lookupvar('name') %>/vpn-ipp.txt
+<% end -%>
+<% if scope.lookupvar('server') != '' -%>
+server <%= scope.lookupvar('server') %>
+<% end -%>
+<% scope.lookupvar('push').each do |item| -%>
+push <%= item %>
+<% end -%>
diff --git a/puppet/modules/openvpn/templates/vars.erb b/puppet/modules/openvpn/templates/vars.erb
new file mode 100644
index 00000000..20448b8b
--- /dev/null
+++ b/puppet/modules/openvpn/templates/vars.erb
@@ -0,0 +1,68 @@
+# easy-rsa parameter settings
+
+# NOTE: If you installed from an RPM,
+# don't edit this file in place in
+# /usr/share/openvpn/easy-rsa --
+# instead, you should copy the whole
+# easy-rsa directory to another location
+# (such as /etc/openvpn) so that your
+# edits will not be wiped out by a future
+# OpenVPN package upgrade.
+
+# This variable should point to
+# the top level of the easy-rsa
+# tree.
+export EASY_RSA="/etc/openvpn/<%= @name %>/easy-rsa"
+
+#
+# This variable should point to
+# the requested executables
+#
+export OPENSSL="openssl"
+export PKCS11TOOL="pkcs11-tool"
+export GREP="grep"
+
+
+# This variable should point to
+# the openssl.cnf file included
+# with easy-rsa.
+export KEY_CONFIG=`$EASY_RSA/whichopensslcnf $EASY_RSA`
+
+# Edit this variable to point to
+# your soon-to-be-created key
+# directory.
+#
+# WARNING: clean-all will do
+# a rm -rf on this directory
+# so make sure you define
+# it correctly!
+export KEY_DIR="$EASY_RSA/keys"
+
+# Issue rm -rf warning
+echo NOTE: If you run ./clean-all, I will be doing a rm -rf on $KEY_DIR
+
+# PKCS11 fixes
+export PKCS11_MODULE_PATH="dummy"
+export PKCS11_PIN="dummy"
+
+# Increase this to 2048 if you
+# are paranoid. This will slow
+# down TLS negotiation performance
+# as well as the one-time DH parms
+# generation process.
+export KEY_SIZE=1024
+
+# In how many days should the root CA key expire?
+export CA_EXPIRE=3650
+
+# In how many days should certificates expire?
+export KEY_EXPIRE=3650
+
+# These are the default values for fields
+# which will be placed in the certificate.
+# Don't leave any of these fields blank.
+export KEY_COUNTRY="<%= @country %>"
+export KEY_PROVINCE="<%= @province %>"
+export KEY_CITY="<%= @city %>"
+export KEY_ORG="<%= @organization %>"
+export KEY_EMAIL="<%= @email %>"
diff --git a/puppet/modules/openvpn/vagrant/client.pp b/puppet/modules/openvpn/vagrant/client.pp
new file mode 100644
index 00000000..7ebeb1d7
--- /dev/null
+++ b/puppet/modules/openvpn/vagrant/client.pp
@@ -0,0 +1,5 @@
+node default {
+
+ package { 'openvpn': ensure => installed; }
+
+}
diff --git a/puppet/modules/openvpn/vagrant/server.pp b/puppet/modules/openvpn/vagrant/server.pp
new file mode 100644
index 00000000..a95def06
--- /dev/null
+++ b/puppet/modules/openvpn/vagrant/server.pp
@@ -0,0 +1,23 @@
+node default {
+ openvpn::server { 'winterthur':
+ country => 'CH',
+ province => 'ZH',
+ city => 'Winterthur',
+ organization => 'example.org',
+ email => 'root@example.org',
+ server => '10.200.200.0 255.255.255.0'
+ }
+
+ openvpn::client { 'client1':
+ server => 'winterthur';
+ }
+
+ openvpn::client_specific_config { 'client1':
+ server => 'winterthur',
+ ifconfig => '10.200.200.100 255.255.255.0'
+ }
+
+ openvpn::client { 'client2':
+ server => 'winterthur';
+ }
+}
diff --git a/README b/puppet/modules/passenger/README
index 549432e2..549432e2 100644
--- a/README
+++ b/puppet/modules/passenger/README
diff --git a/puppet/modules/passenger/files/mod_passenger.conf b/puppet/modules/passenger/files/mod_passenger.conf
new file mode 100644
index 00000000..e69de29b
--- /dev/null
+++ b/puppet/modules/passenger/files/mod_passenger.conf
diff --git a/files/munin/passenger_memory_stats b/puppet/modules/passenger/files/munin/passenger_memory_stats
index eb9b2843..eb9b2843 100755
--- a/files/munin/passenger_memory_stats
+++ b/puppet/modules/passenger/files/munin/passenger_memory_stats
diff --git a/files/munin/passenger_stats b/puppet/modules/passenger/files/munin/passenger_stats
index f06e88a0..f06e88a0 100755
--- a/files/munin/passenger_stats
+++ b/puppet/modules/passenger/files/munin/passenger_stats
diff --git a/manifests/apache.pp b/puppet/modules/passenger/manifests/apache.pp
index d4181ffe..d4181ffe 100644
--- a/manifests/apache.pp
+++ b/puppet/modules/passenger/manifests/apache.pp
diff --git a/manifests/apache/base.pp b/puppet/modules/passenger/manifests/apache/base.pp
index 441c9bd5..441c9bd5 100644
--- a/manifests/apache/base.pp
+++ b/puppet/modules/passenger/manifests/apache/base.pp
diff --git a/manifests/apache/centos.pp b/puppet/modules/passenger/manifests/apache/centos.pp
index b7b80e3b..b7b80e3b 100644
--- a/manifests/apache/centos.pp
+++ b/puppet/modules/passenger/manifests/apache/centos.pp
diff --git a/manifests/apache/debian.pp b/puppet/modules/passenger/manifests/apache/debian.pp
index 38eb3fa4..38eb3fa4 100644
--- a/manifests/apache/debian.pp
+++ b/puppet/modules/passenger/manifests/apache/debian.pp
diff --git a/manifests/init.pp b/puppet/modules/passenger/manifests/init.pp
index ed9b8c31..ed9b8c31 100644
--- a/manifests/init.pp
+++ b/puppet/modules/passenger/manifests/init.pp
diff --git a/manifests/munin.pp b/puppet/modules/passenger/manifests/munin.pp
index 36bc53f2..36bc53f2 100644
--- a/manifests/munin.pp
+++ b/puppet/modules/passenger/manifests/munin.pp
diff --git a/puppet/modules/postfwd/files/postfwd_default b/puppet/modules/postfwd/files/postfwd_default
new file mode 100644
index 00000000..83742e40
--- /dev/null
+++ b/puppet/modules/postfwd/files/postfwd_default
@@ -0,0 +1,19 @@
+### This file managed by Puppet
+# Global options for postfwd(8).
+
+# Set to '1' to enable startup (daemon mode)
+STARTUP=1
+
+# Config file
+CONF=/etc/postfix/postfwd.cf
+# IP where listen to
+INET=127.0.0.1
+# Port where listen to
+PORT=10040
+# run as user postfwd
+RUNAS="postfw"
+# Arguments passed on start (--daemon implied)
+# disable summary and cache-no-size
+#ARGS="--summary=600 --cache=600 --cache-rdomain-only --cache-no-size"
+ARGS="--cache=600 --cache-rdomain-only --no-rulestats"
+
diff --git a/puppet/modules/postfwd/manifests/init.pp b/puppet/modules/postfwd/manifests/init.pp
new file mode 100644
index 00000000..6db3fa52
--- /dev/null
+++ b/puppet/modules/postfwd/manifests/init.pp
@@ -0,0 +1,43 @@
+# This class provides rate-limiting for outgoing SMTP, using postfwd
+# it is configured with some limits that seem reasonable for a generic
+# use-case. Each of the following applies to sasl_authenticated users:
+#
+# . 150 recipients at a time
+# . no more than 50 messages in 60 minutes
+# . no more than 250 recipients in 60 minutes.
+#
+# This class could be easily extended to add overrides to these rules,
+# maximum sizes per client, or additional rules
+class postfwd {
+
+ ensure_packages(['libnet-server-perl', 'libnet-dns-perl', 'postfwd'])
+
+ file {
+ '/etc/default/postfwd':
+ source => 'puppet:///modules/postfwd/postfwd_default',
+ mode => '0644',
+ owner => root,
+ group => root,
+ before => Package['postfwd'];
+
+ '/etc/postfix/postfwd.cf':
+ content => template('postfwd/postfwd.cf.erb'),
+ mode => '0644',
+ owner => root,
+ group => root,
+ require => Package['postfix'],
+ before => Package['postfwd'];
+ }
+
+ service {
+ 'postfwd':
+ ensure => running,
+ name => postfwd,
+ pattern => '/usr/sbin/postfwd',
+ enable => true,
+ hasrestart => true,
+ hasstatus => false,
+ require => [ File['/etc/default/postfwd'],
+ File['/etc/postfix/postfwd.cf']];
+ }
+}
diff --git a/puppet/modules/postfwd/templates/postfwd.cf.erb b/puppet/modules/postfwd/templates/postfwd.cf.erb
new file mode 100644
index 00000000..1c45dd03
--- /dev/null
+++ b/puppet/modules/postfwd/templates/postfwd.cf.erb
@@ -0,0 +1,28 @@
+### This file managed by Puppet
+# Before deploying a rule
+# 1. test with an additional "sender==test@domain.org;" in the rule so it
+# only applies to your test account
+# 2. then when ready to test for all users, use WARN and watch the logs
+# for a few days and make sure it working the way you like
+# 3. Then when ready to deploy for real set a proper error code
+
+## Overrides - make like the following example
+# id=exampleuser; sasl_username==exampleuser; action=dunno
+
+## Rules that apply to all senders
+# Recipient Per Message Limit
+# We only receive mail via smtp from sasl authenticated users
+# directly. We want to limit to a lower amount to prevent phished accounts
+# spamming
+id=RCPTSENDER; recipient_count=150; action=REJECT Too many recipients, please try again. Contact http://<%= @domain %>/tickets/new if this is in error. ERROR:RCPTSENDER
+
+# Message Rate Limit
+# This limits sasl authenticated users to no more than 50/60mins
+# NOTE: sasl_username needs to be set to something or this check will fail
+id=MSGRATE ; sasl_username=!!(^$); action==rate($$sasl_username/100/3600/450 4.7.1 exceeded message rate. Contact Contact http://<%= @domain %>/tickets/new if this is in error. ERROR:MSGRATE)
+
+# Total Recipient Rate Limit
+# This adds up the recipients for all the sasl authenticated users messages
+# and can't exceed more than 250/60min
+# NOTE: sasl_username needs to be set to something or this check will fail
+id=RCPTRATE ; sasl_username=!!(^$); action==rcpt($$sasl_username/500/3600/450 4.7.1 exceeded message rate. Contact http://<%= @domain %>/tickets/new if this is in error. ERROR:RCPTRATE)
diff --git a/puppet/modules/resolvconf/manifests/init.pp b/puppet/modules/resolvconf/manifests/init.pp
new file mode 100644
index 00000000..c22c4ea6
--- /dev/null
+++ b/puppet/modules/resolvconf/manifests/init.pp
@@ -0,0 +1,27 @@
+#
+# resolvconf module
+#
+# Copyright 2008, admin(at)immerda.ch
+# Copyright 2008, Puzzle ITC GmbH
+# Marcel Härry haerry+puppet(at)puzzle.ch
+# Simon Josi josi+puppet(at)puzzle.ch
+#
+# This program is free software; you can redistribute
+# it and/or modify it under the terms of the GNU
+# General Public License version 3 as published by
+# the Free Software Foundation.
+#
+
+class resolvconf(
+ $domain = $::domain,
+ $search = $::domain,
+ $nameservers = [ '8.8.8.8' ]
+) {
+ file{'/etc/resolv.conf':
+ content => $::operatingsystem ? {
+ openbsd => template("resolvconf/resolvconf.${::operatingsystem}.erb"),
+ default => template('resolvconf/resolvconf.erb'),
+ },
+ owner => root, group => 0, mode => 0444;
+ }
+}
diff --git a/puppet/modules/resolvconf/templates/resolvconf.OpenBSD.erb b/puppet/modules/resolvconf/templates/resolvconf.OpenBSD.erb
new file mode 100644
index 00000000..48daf279
--- /dev/null
+++ b/puppet/modules/resolvconf/templates/resolvconf.OpenBSD.erb
@@ -0,0 +1,5 @@
+# managed by puppet
+lookup file bind
+<% scope.lookupvar('resolvconf::nameservers').each do |nameserver| -%>
+nameserver <%= nameserver %>
+<% end -%>
diff --git a/puppet/modules/resolvconf/templates/resolvconf.erb b/puppet/modules/resolvconf/templates/resolvconf.erb
new file mode 100644
index 00000000..d8136bfb
--- /dev/null
+++ b/puppet/modules/resolvconf/templates/resolvconf.erb
@@ -0,0 +1,7 @@
+# managed by puppet
+domain <%= scope.lookupvar('resolvconf::domain') %>
+search <%= scope.lookupvar('resolvconf::search') %>
+
+<% scope.lookupvar('resolvconf::nameservers').each do |nameserver| -%>
+nameserver <%= nameserver %>
+<% end -%>
diff --git a/puppet/modules/ruby/manifests/devel.pp b/puppet/modules/ruby/manifests/devel.pp
new file mode 100644
index 00000000..7068a74a
--- /dev/null
+++ b/puppet/modules/ruby/manifests/devel.pp
@@ -0,0 +1,5 @@
+# install ruby header files and rake
+class ruby::devel {
+ include ruby
+ ensure_packages($ruby::ruby_dev)
+}
diff --git a/puppet/modules/ruby/manifests/init.pp b/puppet/modules/ruby/manifests/init.pp
new file mode 100644
index 00000000..8d8ae48e
--- /dev/null
+++ b/puppet/modules/ruby/manifests/init.pp
@@ -0,0 +1,72 @@
+# Class: ruby
+#
+# This class installs Ruby
+#
+# Parameters:
+#
+# version: (default installed)
+# Set the version of Ruby to install
+#
+# Sample Usage:
+#
+# For a standard install using the latest ruby, simply do:
+#
+# class { 'ruby': }
+#
+# On Debian this is equivilant to
+# $ apt-get install ruby
+#
+# To install a specific version of ruby, simply do:
+#
+# class { 'ruby':
+# ruby_version => '1.8.7',
+# }
+#
+# Supported versions: 1.8, 1.8.7, 1.9, 1.9.1, 1.9.3
+#
+# To install the development files, you can do:
+#
+# class { 'ruby': install_dev => true }
+
+class ruby (
+ $ruby_version = '',
+ $version = 'installed',
+ $install_dev = false
+)
+{
+
+ case $::operatingsystem {
+ 'redhat', 'suse': {
+ $ruby_package='ruby'
+ $ruby_dev='ruby-devel'
+ }
+ 'debian', 'ubuntu': {
+ case $ruby_version {
+ '1.8', '1.8.7': {
+ $ruby_package = 'ruby1.8'
+ $ruby_dev = [ 'ruby1.8-dev', 'rake' ]
+ }
+ '1.9.1': {
+ $ruby_package = 'ruby1.9.1'
+ $ruby_dev = [ 'ruby1.9.1-dev', 'rake' ]
+ }
+ '1.9', '1.9.3': {
+ $ruby_package = 'ruby1.9.3'
+ $ruby_dev = [ 'ruby-dev', 'rake' ]
+ }
+ default: {
+ $ruby_package = 'ruby'
+ $ruby_dev = [ 'ruby-dev', 'rake' ]
+ }
+ }
+ }
+ }
+
+ package{ $ruby_package:
+ ensure => $version,
+ }
+
+ if $install_dev {
+ ensure_packages($ruby_dev)
+ }
+}
diff --git a/puppet/modules/ruby/manifests/mysql.pp b/puppet/modules/ruby/manifests/mysql.pp
new file mode 100644
index 00000000..2e894789
--- /dev/null
+++ b/puppet/modules/ruby/manifests/mysql.pp
@@ -0,0 +1,7 @@
+class ruby::mysql {
+ include ruby
+ package{'ruby-mysql':
+ ensure => present,
+ require => Package['ruby'],
+ }
+}
diff --git a/puppet/modules/ruby/manifests/postgres.pp b/puppet/modules/ruby/manifests/postgres.pp
new file mode 100644
index 00000000..ec0e253a
--- /dev/null
+++ b/puppet/modules/ruby/manifests/postgres.pp
@@ -0,0 +1,6 @@
+class ruby::postgres {
+ include ruby
+ package{'ruby-postgres':
+ ensure => installed,
+ }
+}
diff --git a/puppet/modules/ruby/manifests/shadow.pp b/puppet/modules/ruby/manifests/shadow.pp
new file mode 100644
index 00000000..43f1aeab
--- /dev/null
+++ b/puppet/modules/ruby/manifests/shadow.pp
@@ -0,0 +1,6 @@
+class ruby::shadow {
+ case $::operatingsystem {
+ debian,ubuntu: { include ruby::shadow::debian }
+ default: { include ruby::shadow::base }
+ }
+}
diff --git a/puppet/modules/ruby/manifests/shadow/base.pp b/puppet/modules/ruby/manifests/shadow/base.pp
new file mode 100644
index 00000000..af8c5c92
--- /dev/null
+++ b/puppet/modules/ruby/manifests/shadow/base.pp
@@ -0,0 +1,6 @@
+class ruby::shadow::base {
+ require ::ruby
+ package{'ruby-shadow':
+ ensure => installed,
+ }
+}
diff --git a/puppet/modules/ruby/manifests/shadow/debian.pp b/puppet/modules/ruby/manifests/shadow/debian.pp
new file mode 100644
index 00000000..8182b9b1
--- /dev/null
+++ b/puppet/modules/ruby/manifests/shadow/debian.pp
@@ -0,0 +1,8 @@
+class ruby::shadow::debian inherits ruby::shadow::base {
+ Package['ruby-shadow']{
+ name => $::lsbdistcodename ? {
+ 'wheezy' => 'libshadow-ruby1.8',
+ default => 'ruby-shadow',
+ }
+ }
+}
diff --git a/puppet/modules/rubygems/files/gemrc b/puppet/modules/rubygems/files/gemrc
new file mode 100644
index 00000000..040f20ba
--- /dev/null
+++ b/puppet/modules/rubygems/files/gemrc
@@ -0,0 +1,3 @@
+---
+:sources:
+- https://rubygems.org/
diff --git a/puppet/modules/rubygems/manifests/activerecord.pp b/puppet/modules/rubygems/manifests/activerecord.pp
new file mode 100644
index 00000000..131222af
--- /dev/null
+++ b/puppet/modules/rubygems/manifests/activerecord.pp
@@ -0,0 +1,7 @@
+class rubygems::activerecord {
+ require rubygems
+ package{'activerecord':
+ ensure => present,
+ provider => gem,
+ }
+}
diff --git a/puppet/modules/rubygems/manifests/activesupport.pp b/puppet/modules/rubygems/manifests/activesupport.pp
new file mode 100644
index 00000000..ae5aee70
--- /dev/null
+++ b/puppet/modules/rubygems/manifests/activesupport.pp
@@ -0,0 +1,7 @@
+class rubygems::activesupport {
+ require rubygems
+ package{'activesupport':
+ ensure => present,
+ provider => gem,
+ }
+}
diff --git a/puppet/modules/rubygems/manifests/backports.pp b/puppet/modules/rubygems/manifests/backports.pp
new file mode 100644
index 00000000..4290e340
--- /dev/null
+++ b/puppet/modules/rubygems/manifests/backports.pp
@@ -0,0 +1,7 @@
+class rubygems::backports {
+ require rubygems::devel
+ package{'backports':
+ ensure => present,
+ provider => gem,
+ }
+}
diff --git a/puppet/modules/rubygems/manifests/bcrypt.pp b/puppet/modules/rubygems/manifests/bcrypt.pp
new file mode 100644
index 00000000..4c646477
--- /dev/null
+++ b/puppet/modules/rubygems/manifests/bcrypt.pp
@@ -0,0 +1,14 @@
+class rubygems::bcrypt {
+ if ($::osfamily == 'RedHat') and
+ versioncmp($::operatingsystemrelease,'6') > 0 {
+ package{'rubygem-bcrypt':
+ ensure => present,
+ }
+ } else {
+ require rubygems
+ package{'bcrypt-ruby':
+ ensure => present,
+ provider => gem,
+ }
+ }
+}
diff --git a/puppet/modules/rubygems/manifests/brokengem.pp b/puppet/modules/rubygems/manifests/brokengem.pp
new file mode 100644
index 00000000..b3284d97
--- /dev/null
+++ b/puppet/modules/rubygems/manifests/brokengem.pp
@@ -0,0 +1,14 @@
+define rubygems::brokengem($source,$ensure) {
+ exec { "get-gem-$name":
+ command => "/usr/bin/wget --output-document=/tmp/$name.gem $source",
+ creates => "/tmp/$name.gem",
+ before => Package[$name]
+ }
+ package{$name:
+ ensure => $ensure,
+ provider => gem,
+ source => "/tmp/$name.gem"
+ }
+}
+
+# $Id$
diff --git a/puppet/modules/rubygems/manifests/camping.pp b/puppet/modules/rubygems/manifests/camping.pp
new file mode 100644
index 00000000..f79fca13
--- /dev/null
+++ b/puppet/modules/rubygems/manifests/camping.pp
@@ -0,0 +1,7 @@
+class rubygems::camping {
+ require rubygems::rack
+ package{'camping':
+ ensure => present,
+ provider => gem,
+ }
+}
diff --git a/puppet/modules/rubygems/manifests/captcha/v_0_1_2.pp b/puppet/modules/rubygems/manifests/captcha/v_0_1_2.pp
new file mode 100644
index 00000000..2a4e7123
--- /dev/null
+++ b/puppet/modules/rubygems/manifests/captcha/v_0_1_2.pp
@@ -0,0 +1,5 @@
+class rubygems::captcha::v_0_1_2 {
+ rubygems::gem{ 'captcha-0.1.2':
+ requiresgcc => true,
+ }
+}
diff --git a/puppet/modules/rubygems/manifests/chronic_duration.pp b/puppet/modules/rubygems/manifests/chronic_duration.pp
new file mode 100644
index 00000000..c789eb51
--- /dev/null
+++ b/puppet/modules/rubygems/manifests/chronic_duration.pp
@@ -0,0 +1,5 @@
+class rubygems::chronic_duration {
+ rubygems::gem{'chronic_duration':
+ ensure => present,
+ }
+}
diff --git a/puppet/modules/rubygems/manifests/devel.pp b/puppet/modules/rubygems/manifests/devel.pp
new file mode 100644
index 00000000..2f69f892
--- /dev/null
+++ b/puppet/modules/rubygems/manifests/devel.pp
@@ -0,0 +1,6 @@
+class rubygems::devel {
+ include ::rubygems
+ include ruby::devel
+ include gcc
+}
+
diff --git a/puppet/modules/rubygems/manifests/fastercsv.pp b/puppet/modules/rubygems/manifests/fastercsv.pp
new file mode 100644
index 00000000..95ae0212
--- /dev/null
+++ b/puppet/modules/rubygems/manifests/fastercsv.pp
@@ -0,0 +1,6 @@
+class rubygems::fastercsv {
+ rubygems::gem{'fastercsv':
+ ensure => present,
+ source => 'http://rubyforge.org/frs/download.php/43190/fastercsv-1.4.0.gem',
+ }
+}
diff --git a/puppet/modules/rubygems/manifests/gd/v_0_7_4.pp b/puppet/modules/rubygems/manifests/gd/v_0_7_4.pp
new file mode 100644
index 00000000..9027ecb5
--- /dev/null
+++ b/puppet/modules/rubygems/manifests/gd/v_0_7_4.pp
@@ -0,0 +1,5 @@
+class rubygems::gd::v_0_7_4 {
+ rubygems::gem{ 'ruby-gd-0.7.4':
+ buildflags => '--with-freetype',
+ }
+}
diff --git a/puppet/modules/rubygems/manifests/gem.pp b/puppet/modules/rubygems/manifests/gem.pp
new file mode 100644
index 00000000..14b67850
--- /dev/null
+++ b/puppet/modules/rubygems/manifests/gem.pp
@@ -0,0 +1,108 @@
+# Installs gems that are slightly broken
+# As a name it expects the name of the gem.
+# If you want to want to install a certain version
+# you have to append the version to the gem name:
+#
+# install a version of mime-types:
+# rubygems::gem{'mime-types': }
+#
+# install version 0.0.4 of ruby-net-ldap:
+# rubygems::gem{'ruby-net-ldap-0.0.4': }
+#
+# uninstall polygot gem (until no such gem is anymore installed):
+# rubygems::gem{'polygot': ensure => absent }
+#
+# uninstall ruby-net-ldap version 0.0.3
+# rubygems::gem{'ruby-net-ldap-0.0.3': ensure => absent }
+#
+# You can also set your own buildlfags, which will then install
+# the gem in question by the gem command.
+#
+# You can also enforce to use the gem command to manage the gem
+# by setting provider to `exec`.
+#
+define rubygems::gem(
+ $ensure = 'present',
+ $source = 'absent',
+ $provider = 'default',
+ $buildflags = 'absent',
+ $requiresgcc = false
+) {
+ require ::rubygems
+ if $requiresgcc or ($buildflags != 'absent') {
+ require ::gcc
+ }
+
+ if $name =~ /\-(\d|\.)+$/ {
+ $real_name = regsubst($name,'^(.*)-(\d|\.)+$','\1')
+ $gem_version = regsubst($name,'^(.*)-(\d+(\d|\.)+)$','\2')
+ } else {
+ $real_name = $name
+ }
+
+ if $source != 'absent' {
+ if $ensure != 'absent' {
+ require rubygems::gem::cachedir
+ exec{"get-gem-$name":
+ command => "/usr/bin/wget -O ${rubygems::gem::cachedir::dir}/$name.gem $source",
+ creates => "${rubygems::gem::cachedir::dir}/$name.gem",
+ }
+ } else {
+ file{"${rubygems::gem::cachedir::dir}/$name.gem":
+ ensure => 'absent';
+ }
+ }
+ }
+
+ if ($buildflags != 'absent') or ($provider == 'exec') {
+ if $gem_version {
+ $gem_version_str = "-v ${gem_version}"
+ $gem_version_check_str = $gem_version
+ } else {
+ $gem_version_check_str = '.*'
+ }
+
+ if $ensure == 'present' {
+ $gem_cmd = 'install'
+ } else {
+ $gem_cmd = 'uninstall -x'
+ }
+
+ if $buildflags != 'absent' {
+ $buildflags_str = "-- --build-flags ${buildflags}"
+ } else {
+ $buildflags_str = ''
+ }
+
+ exec{"manage_gem_${name}":
+ command => "gem ${gem_cmd} ${real_name} ${gem_version_str} ${buildflags_str}",
+ }
+
+ $gem_cmd_check_str = "gem list | egrep -q '^${real_name} \\(${gem_version_check_str}\\)\$'"
+ if $ensure == 'present' {
+ Exec["manage_gem_${name}"]{
+ unless => $gem_cmd_check_str
+ }
+ } else {
+ Exec["manage_gem_${name}"]{
+ onlyif => $gem_cmd_check_str
+ }
+ }
+ } else {
+ package{"$real_name":
+ ensure => $ensure ? {
+ 'absent' => $ensure,
+ default => $gem_version ? {
+ undef => $ensure,
+ default => $gem_version
+ }
+ },
+ provider => gem,
+ }
+ if $source != 'absent' {
+ Package["$name"]{
+ source => "${rubygems::gem::cachedir::dir}/$name.gem"
+ }
+ }
+ }
+}
diff --git a/puppet/modules/rubygems/manifests/gem/cachedir.pp b/puppet/modules/rubygems/manifests/gem/cachedir.pp
new file mode 100644
index 00000000..3e371e42
--- /dev/null
+++ b/puppet/modules/rubygems/manifests/gem/cachedir.pp
@@ -0,0 +1,4 @@
+class rubygems::gem::cachedir {
+ $dir = '/var/lib/puppet/modules/rubygems_cache'
+ modules_dir{'rubygems_cache': }
+}
diff --git a/puppet/modules/rubygems/manifests/gpgme.pp b/puppet/modules/rubygems/manifests/gpgme.pp
new file mode 100644
index 00000000..e9b04a9a
--- /dev/null
+++ b/puppet/modules/rubygems/manifests/gpgme.pp
@@ -0,0 +1,35 @@
+class rubygems::gpgme{
+ case $::operatingsystem {
+ debian,ubuntu: {
+ case $::lsbdistcodename {
+ 'lenny','squeeze': {
+ # install gpgme as gem, as the squeeze deb-package is too old
+ # for i.e. gpg module
+ $provider = 'gem'
+ $packagename = 'ruby-gpgme'
+ }
+ default: {
+ # don't need to install gpgme as gem, debian package works
+ # fine with the gpg module
+ $provider = 'apt'
+ $packagename = 'libgpgme-ruby'
+ }
+ }
+ }
+ default: {
+ $provider = 'gem'
+ $packagename = 'ruby-gpgme'
+ }
+ }
+
+ if $provider == 'gem' {
+ require rubygems::devel
+ require gpg::gpgme::devel
+ }
+
+ package{'ruby-gpgme':
+ ensure => present,
+ provider => $provider,
+ name => $packagename
+ }
+}
diff --git a/puppet/modules/rubygems/manifests/hiera.pp b/puppet/modules/rubygems/manifests/hiera.pp
new file mode 100644
index 00000000..4c766a15
--- /dev/null
+++ b/puppet/modules/rubygems/manifests/hiera.pp
@@ -0,0 +1,7 @@
+class rubygems::hiera{
+ require ::rubygems
+ package{'hiera':
+ ensure => installed,
+ provider => gem,
+ }
+}
diff --git a/puppet/modules/rubygems/manifests/hiera_puppet.pp b/puppet/modules/rubygems/manifests/hiera_puppet.pp
new file mode 100644
index 00000000..319e7d0e
--- /dev/null
+++ b/puppet/modules/rubygems/manifests/hiera_puppet.pp
@@ -0,0 +1,7 @@
+class rubygems::hiera_puppet {
+ require rubygems::hiera
+ package{'hiera-puppet':
+ ensure => installed,
+ provider => gem,
+ }
+}
diff --git a/puppet/modules/rubygems/manifests/highline.pp b/puppet/modules/rubygems/manifests/highline.pp
new file mode 100644
index 00000000..e9da09a5
--- /dev/null
+++ b/puppet/modules/rubygems/manifests/highline.pp
@@ -0,0 +1,14 @@
+class rubygems::highline {
+ require rubygems
+ package{'rubygem-highline':
+ ensure => present,
+ }
+
+ case $::operatingsystem {
+ debian,ubuntu: {
+ Package['rubygem-highline']{
+ name => 'ruby-highline'
+ }
+ }
+ }
+}
diff --git a/puppet/modules/rubygems/manifests/init.pp b/puppet/modules/rubygems/manifests/init.pp
new file mode 100644
index 00000000..bca40b9e
--- /dev/null
+++ b/puppet/modules/rubygems/manifests/init.pp
@@ -0,0 +1,31 @@
+#
+# rubygems module
+# original by luke kanies
+# http://github.com/lak
+#
+# Copyright 2008, Puzzle ITC GmbH
+# Marcel Härry haerry+puppet(at)puzzle.ch
+# Simon Josi josi+puppet(at)puzzle.ch
+#
+# This program is free software; you can redistribute
+# it and/or modify it under the terms of the GNU
+# General Public License version 3 as published by
+# the Free Software Foundation.
+#
+
+# manage rubygems basics
+class rubygems {
+ # from debian 8 on this is not anymore needed as it's part of the ruby pkg
+ if ($::operatingsystem != 'Debian') or (versioncmp($::operatingsystemrelease,'8') < 0) {
+ package{'rubygems':
+ ensure => installed,
+ }
+ }
+ file { '/etc/gemrc':
+ source => [ 'puppet:///modules/site_rubygems/gemrc',
+ 'puppet:///modules/rubygems/gemrc' ],
+ mode => '0644',
+ owner => 'root',
+ group => 'root',
+ }
+}
diff --git a/puppet/modules/rubygems/manifests/ip.pp b/puppet/modules/rubygems/manifests/ip.pp
new file mode 100644
index 00000000..190d869d
--- /dev/null
+++ b/puppet/modules/rubygems/manifests/ip.pp
@@ -0,0 +1,7 @@
+class rubygems::ip {
+ require rubygems
+ package{'ip':
+ ensure => present,
+ provider => gem,
+ }
+}
diff --git a/puppet/modules/rubygems/manifests/json/v_1_4_6.pp b/puppet/modules/rubygems/manifests/json/v_1_4_6.pp
new file mode 100644
index 00000000..d0901ba3
--- /dev/null
+++ b/puppet/modules/rubygems/manifests/json/v_1_4_6.pp
@@ -0,0 +1,3 @@
+class rubygems::json::v_1_4_6 {
+ rubygems::gem{ 'json-1.4.6': }
+}
diff --git a/puppet/modules/rubygems/manifests/lockfile.pp b/puppet/modules/rubygems/manifests/lockfile.pp
new file mode 100644
index 00000000..f4ed6b0f
--- /dev/null
+++ b/puppet/modules/rubygems/manifests/lockfile.pp
@@ -0,0 +1,7 @@
+class rubygems::lockfile {
+ require rubygems
+ package{'lockfile':
+ ensure => present,
+ provider => gem,
+ }
+}
diff --git a/puppet/modules/rubygems/manifests/mail.pp b/puppet/modules/rubygems/manifests/mail.pp
new file mode 100644
index 00000000..b8b50bbe
--- /dev/null
+++ b/puppet/modules/rubygems/manifests/mail.pp
@@ -0,0 +1,19 @@
+# manage the mail rubygem
+class rubygems::mail {
+ if ($::osfamily == 'RedHat') and
+ versioncmp($::operatingsystemrelease,'6') > 0 {
+ package{'rubygem-mail':
+ ensure => present,
+ }
+ } else {
+ require rubygems::devel
+ package{'mail':
+ ensure => present,
+ provider => gem,
+ }
+
+ if $::rubyversion == '1.8.6' {
+ require rubygems::tlsmail
+ }
+ }
+}
diff --git a/puppet/modules/rubygems/manifests/maildir.pp b/puppet/modules/rubygems/manifests/maildir.pp
new file mode 100644
index 00000000..8773f37a
--- /dev/null
+++ b/puppet/modules/rubygems/manifests/maildir.pp
@@ -0,0 +1,15 @@
+# manage maildir rubygem
+class rubygems::maildir {
+ if ($::osfamily == 'RedHat') and
+ versioncmp($::operatingsystemrelease,'6') > 0 {
+ package{'rubygem-maildir':
+ ensure => present,
+ }
+ } else {
+ require rubygems::devel
+ package{'maildir':
+ ensure => present,
+ provider => gem,
+ }
+ }
+}
diff --git a/puppet/modules/rubygems/manifests/markaby.pp b/puppet/modules/rubygems/manifests/markaby.pp
new file mode 100644
index 00000000..817969e3
--- /dev/null
+++ b/puppet/modules/rubygems/manifests/markaby.pp
@@ -0,0 +1,7 @@
+class rubygems::markaby {
+ require rubygems
+ package{'markaby':
+ ensure => present,
+ provider => gem,
+ }
+}
diff --git a/puppet/modules/rubygems/manifests/moneta.pp b/puppet/modules/rubygems/manifests/moneta.pp
new file mode 100644
index 00000000..ea9bb5a6
--- /dev/null
+++ b/puppet/modules/rubygems/manifests/moneta.pp
@@ -0,0 +1,7 @@
+class rubygems::moneta {
+ require rubygems
+ package{'moneta':
+ ensure => present,
+ provider => gem,
+ }
+}
diff --git a/puppet/modules/rubygems/manifests/mysql.pp b/puppet/modules/rubygems/manifests/mysql.pp
new file mode 100644
index 00000000..cc0bbbf6
--- /dev/null
+++ b/puppet/modules/rubygems/manifests/mysql.pp
@@ -0,0 +1,5 @@
+class rubygems::mysql {
+ require ::mysql::devel
+ require gcc
+ rubygems::gem{'mysql':}
+}
diff --git a/puppet/modules/rubygems/manifests/net_ldap/v_0_0_4.pp b/puppet/modules/rubygems/manifests/net_ldap/v_0_0_4.pp
new file mode 100644
index 00000000..88e1e7b4
--- /dev/null
+++ b/puppet/modules/rubygems/manifests/net_ldap/v_0_0_4.pp
@@ -0,0 +1,3 @@
+class rubygems::net_ldap::v_0_0_4 {
+ rubygems::gem{ 'ruby-net-ldap-0.0.4': }
+}
diff --git a/puppet/modules/rubygems/manifests/ntlm/v_0_1_1.pp b/puppet/modules/rubygems/manifests/ntlm/v_0_1_1.pp
new file mode 100644
index 00000000..fd6eade3
--- /dev/null
+++ b/puppet/modules/rubygems/manifests/ntlm/v_0_1_1.pp
@@ -0,0 +1,3 @@
+class rubygems::ntlm::v_0_1_1 {
+ rubygems::gem{ 'rubyntlm-0.1.1': }
+}
diff --git a/puppet/modules/rubygems/manifests/open4.pp b/puppet/modules/rubygems/manifests/open4.pp
new file mode 100644
index 00000000..1e3fbb78
--- /dev/null
+++ b/puppet/modules/rubygems/manifests/open4.pp
@@ -0,0 +1,7 @@
+class rubygems::open4 {
+ require rubygems
+ package{'open4':
+ ensure => present,
+ provider => gem,
+ }
+}
diff --git a/puppet/modules/rubygems/manifests/pbkdf2.pp b/puppet/modules/rubygems/manifests/pbkdf2.pp
new file mode 100644
index 00000000..b2cf1136
--- /dev/null
+++ b/puppet/modules/rubygems/manifests/pbkdf2.pp
@@ -0,0 +1,8 @@
+class rubygems::pbkdf2{
+ require ::rubygems
+ package{'pbkdf2':
+ ensure => installed,
+ provider => gem,
+ }
+}
+
diff --git a/puppet/modules/rubygems/manifests/postgres.pp b/puppet/modules/rubygems/manifests/postgres.pp
new file mode 100644
index 00000000..8720f4ef
--- /dev/null
+++ b/puppet/modules/rubygems/manifests/postgres.pp
@@ -0,0 +1,11 @@
+class rubygems::postgres {
+ if $::osfamily == 'RedHat' and
+ versioncmp($::operatingsystemrelease,'5') > 0 {
+ package{'rubygem-pg':
+ ensure => installed,
+ }
+ } else {
+ require postgres::devel
+ rubygems::gem{'ruby-pg':}
+ }
+}
diff --git a/puppet/modules/rubygems/manifests/rack.pp b/puppet/modules/rubygems/manifests/rack.pp
new file mode 100644
index 00000000..953ab22b
--- /dev/null
+++ b/puppet/modules/rubygems/manifests/rack.pp
@@ -0,0 +1,7 @@
+class rubygems::rack {
+ require rubygems
+ package{'rack':
+ ensure => present,
+ provider => gem,
+ }
+}
diff --git a/puppet/modules/rubygems/manifests/sinatra.pp b/puppet/modules/rubygems/manifests/sinatra.pp
new file mode 100644
index 00000000..327f829f
--- /dev/null
+++ b/puppet/modules/rubygems/manifests/sinatra.pp
@@ -0,0 +1,7 @@
+class rubygems::sinatra {
+ require rubygems
+ package{'sinatra':
+ ensure => present,
+ provider => gem,
+ }
+}
diff --git a/puppet/modules/rubygems/manifests/sqlite.pp b/puppet/modules/rubygems/manifests/sqlite.pp
new file mode 100644
index 00000000..6b670152
--- /dev/null
+++ b/puppet/modules/rubygems/manifests/sqlite.pp
@@ -0,0 +1,6 @@
+class rubygems::sqlite {
+ require rubygems::devel
+ package{'rubygem-sqlite3-ruby':
+ ensure => present,
+ }
+}
diff --git a/puppet/modules/rubygems/manifests/systemu.pp b/puppet/modules/rubygems/manifests/systemu.pp
new file mode 100644
index 00000000..62a599cf
--- /dev/null
+++ b/puppet/modules/rubygems/manifests/systemu.pp
@@ -0,0 +1,7 @@
+class rubygems::systemu {
+ require rubygems
+ package{'systemu':
+ ensure => present,
+ provider => gem,
+ }
+}
diff --git a/puppet/modules/rubygems/manifests/thin.pp b/puppet/modules/rubygems/manifests/thin.pp
new file mode 100644
index 00000000..b2499d81
--- /dev/null
+++ b/puppet/modules/rubygems/manifests/thin.pp
@@ -0,0 +1,7 @@
+class rubygems::thin {
+ require rubygems
+ package{'thin':
+ ensure => present,
+ provider => gem,
+ }
+}
diff --git a/puppet/modules/rubygems/manifests/tlsmail.pp b/puppet/modules/rubygems/manifests/tlsmail.pp
new file mode 100644
index 00000000..71aa6158
--- /dev/null
+++ b/puppet/modules/rubygems/manifests/tlsmail.pp
@@ -0,0 +1,7 @@
+class rubygems::tlsmail {
+ require rubygems::devel
+ package{'tlsmail':
+ ensure => present,
+ provider => gem,
+ }
+}
diff --git a/puppet/modules/rubygems/manifests/tmail.pp b/puppet/modules/rubygems/manifests/tmail.pp
new file mode 100644
index 00000000..dd7117d9
--- /dev/null
+++ b/puppet/modules/rubygems/manifests/tmail.pp
@@ -0,0 +1,7 @@
+class rubygems::tmail {
+ require rubygems::devel
+ package{'tmail':
+ ensure => present,
+ provider => gem,
+ }
+}
diff --git a/puppet/modules/rubygems/manifests/xmlsimple.pp b/puppet/modules/rubygems/manifests/xmlsimple.pp
new file mode 100644
index 00000000..914156b0
--- /dev/null
+++ b/puppet/modules/rubygems/manifests/xmlsimple.pp
@@ -0,0 +1,20 @@
+# xml simple lib
+class rubygems::xmlsimple {
+ package{'rubygem-xml-simple':
+ ensure => present,
+ }
+ case $::operatingsystem {
+ debian,ubuntu: {
+ Package['rubygem-xml-simple']{
+ name => 'libxml-simple-ruby'
+ }
+ }
+ }
+ if $::operatingsystem == 'CentOS' and versioncmp($::operatingsystemrelease, '6') > 0 {
+ # not yet packaged
+ Package['rubygem-xml-simple']{
+ name => 'xml-simple',
+ provider => gem,
+ }
+ }
+}
diff --git a/puppet/modules/rubygems/manifests/xmpp4r.pp b/puppet/modules/rubygems/manifests/xmpp4r.pp
new file mode 100644
index 00000000..068d5825
--- /dev/null
+++ b/puppet/modules/rubygems/manifests/xmpp4r.pp
@@ -0,0 +1,7 @@
+class rubygems::xmpp4r {
+ require ::rubygems
+ package{'xmpp4r':
+ ensure => present,
+ provider => gem,
+ }
+}
diff --git a/puppet/modules/rubygems/manifests/ya2yaml.pp b/puppet/modules/rubygems/manifests/ya2yaml.pp
new file mode 100644
index 00000000..7df362dc
--- /dev/null
+++ b/puppet/modules/rubygems/manifests/ya2yaml.pp
@@ -0,0 +1,7 @@
+class rubygems::ya2yaml {
+ require rubygems
+ package{'ya2yaml':
+ ensure => present,
+ provider => gem,
+ }
+}
diff --git a/puppet/modules/shorewall/LICENSE b/puppet/modules/shorewall/LICENSE
new file mode 100644
index 00000000..94a9ed02
--- /dev/null
+++ b/puppet/modules/shorewall/LICENSE
@@ -0,0 +1,674 @@
+ GNU GENERAL PUBLIC LICENSE
+ Version 3, 29 June 2007
+
+ Copyright (C) 2007 Free Software Foundation, Inc. <http://fsf.org/>
+ Everyone is permitted to copy and distribute verbatim copies
+ of this license document, but changing it is not allowed.
+
+ Preamble
+
+ The GNU General Public License is a free, copyleft license for
+software and other kinds of works.
+
+ The licenses for most software and other practical works are designed
+to take away your freedom to share and change the works. By contrast,
+the GNU General Public License is intended to guarantee your freedom to
+share and change all versions of a program--to make sure it remains free
+software for all its users. We, the Free Software Foundation, use the
+GNU General Public License for most of our software; it applies also to
+any other work released this way by its authors. You can apply it to
+your programs, too.
+
+ When we speak of free software, we are referring to freedom, not
+price. Our General Public Licenses are designed to make sure that you
+have the freedom to distribute copies of free software (and charge for
+them if you wish), that you receive source code or can get it if you
+want it, that you can change the software or use pieces of it in new
+free programs, and that you know you can do these things.
+
+ To protect your rights, we need to prevent others from denying you
+these rights or asking you to surrender the rights. Therefore, you have
+certain responsibilities if you distribute copies of the software, or if
+you modify it: responsibilities to respect the freedom of others.
+
+ For example, if you distribute copies of such a program, whether
+gratis or for a fee, you must pass on to the recipients the same
+freedoms that you received. You must make sure that they, too, receive
+or can get the source code. And you must show them these terms so they
+know their rights.
+
+ Developers that use the GNU GPL protect your rights with two steps:
+(1) assert copyright on the software, and (2) offer you this License
+giving you legal permission to copy, distribute and/or modify it.
+
+ For the developers' and authors' protection, the GPL clearly explains
+that there is no warranty for this free software. For both users' and
+authors' sake, the GPL requires that modified versions be marked as
+changed, so that their problems will not be attributed erroneously to
+authors of previous versions.
+
+ Some devices are designed to deny users access to install or run
+modified versions of the software inside them, although the manufacturer
+can do so. This is fundamentally incompatible with the aim of
+protecting users' freedom to change the software. The systematic
+pattern of such abuse occurs in the area of products for individuals to
+use, which is precisely where it is most unacceptable. Therefore, we
+have designed this version of the GPL to prohibit the practice for those
+products. If such problems arise substantially in other domains, we
+stand ready to extend this provision to those domains in future versions
+of the GPL, as needed to protect the freedom of users.
+
+ Finally, every program is threatened constantly by software patents.
+States should not allow patents to restrict development and use of
+software on general-purpose computers, but in those that do, we wish to
+avoid the special danger that patents applied to a free program could
+make it effectively proprietary. To prevent this, the GPL assures that
+patents cannot be used to render the program non-free.
+
+ The precise terms and conditions for copying, distribution and
+modification follow.
+
+ TERMS AND CONDITIONS
+
+ 0. Definitions.
+
+ "This License" refers to version 3 of the GNU General Public License.
+
+ "Copyright" also means copyright-like laws that apply to other kinds of
+works, such as semiconductor masks.
+
+ "The Program" refers to any copyrightable work licensed under this
+License. Each licensee is addressed as "you". "Licensees" and
+"recipients" may be individuals or organizations.
+
+ To "modify" a work means to copy from or adapt all or part of the work
+in a fashion requiring copyright permission, other than the making of an
+exact copy. The resulting work is called a "modified version" of the
+earlier work or a work "based on" the earlier work.
+
+ A "covered work" means either the unmodified Program or a work based
+on the Program.
+
+ To "propagate" a work means to do anything with it that, without
+permission, would make you directly or secondarily liable for
+infringement under applicable copyright law, except executing it on a
+computer or modifying a private copy. Propagation includes copying,
+distribution (with or without modification), making available to the
+public, and in some countries other activities as well.
+
+ To "convey" a work means any kind of propagation that enables other
+parties to make or receive copies. Mere interaction with a user through
+a computer network, with no transfer of a copy, is not conveying.
+
+ An interactive user interface displays "Appropriate Legal Notices"
+to the extent that it includes a convenient and prominently visible
+feature that (1) displays an appropriate copyright notice, and (2)
+tells the user that there is no warranty for the work (except to the
+extent that warranties are provided), that licensees may convey the
+work under this License, and how to view a copy of this License. If
+the interface presents a list of user commands or options, such as a
+menu, a prominent item in the list meets this criterion.
+
+ 1. Source Code.
+
+ The "source code" for a work means the preferred form of the work
+for making modifications to it. "Object code" means any non-source
+form of a work.
+
+ A "Standard Interface" means an interface that either is an official
+standard defined by a recognized standards body, or, in the case of
+interfaces specified for a particular programming language, one that
+is widely used among developers working in that language.
+
+ The "System Libraries" of an executable work include anything, other
+than the work as a whole, that (a) is included in the normal form of
+packaging a Major Component, but which is not part of that Major
+Component, and (b) serves only to enable use of the work with that
+Major Component, or to implement a Standard Interface for which an
+implementation is available to the public in source code form. A
+"Major Component", in this context, means a major essential component
+(kernel, window system, and so on) of the specific operating system
+(if any) on which the executable work runs, or a compiler used to
+produce the work, or an object code interpreter used to run it.
+
+ The "Corresponding Source" for a work in object code form means all
+the source code needed to generate, install, and (for an executable
+work) run the object code and to modify the work, including scripts to
+control those activities. However, it does not include the work's
+System Libraries, or general-purpose tools or generally available free
+programs which are used unmodified in performing those activities but
+which are not part of the work. For example, Corresponding Source
+includes interface definition files associated with source files for
+the work, and the source code for shared libraries and dynamically
+linked subprograms that the work is specifically designed to require,
+such as by intimate data communication or control flow between those
+subprograms and other parts of the work.
+
+ The Corresponding Source need not include anything that users
+can regenerate automatically from other parts of the Corresponding
+Source.
+
+ The Corresponding Source for a work in source code form is that
+same work.
+
+ 2. Basic Permissions.
+
+ All rights granted under this License are granted for the term of
+copyright on the Program, and are irrevocable provided the stated
+conditions are met. This License explicitly affirms your unlimited
+permission to run the unmodified Program. The output from running a
+covered work is covered by this License only if the output, given its
+content, constitutes a covered work. This License acknowledges your
+rights of fair use or other equivalent, as provided by copyright law.
+
+ You may make, run and propagate covered works that you do not
+convey, without conditions so long as your license otherwise remains
+in force. You may convey covered works to others for the sole purpose
+of having them make modifications exclusively for you, or provide you
+with facilities for running those works, provided that you comply with
+the terms of this License in conveying all material for which you do
+not control copyright. Those thus making or running the covered works
+for you must do so exclusively on your behalf, under your direction
+and control, on terms that prohibit them from making any copies of
+your copyrighted material outside their relationship with you.
+
+ Conveying under any other circumstances is permitted solely under
+the conditions stated below. Sublicensing is not allowed; section 10
+makes it unnecessary.
+
+ 3. Protecting Users' Legal Rights From Anti-Circumvention Law.
+
+ No covered work shall be deemed part of an effective technological
+measure under any applicable law fulfilling obligations under article
+11 of the WIPO copyright treaty adopted on 20 December 1996, or
+similar laws prohibiting or restricting circumvention of such
+measures.
+
+ When you convey a covered work, you waive any legal power to forbid
+circumvention of technological measures to the extent such circumvention
+is effected by exercising rights under this License with respect to
+the covered work, and you disclaim any intention to limit operation or
+modification of the work as a means of enforcing, against the work's
+users, your or third parties' legal rights to forbid circumvention of
+technological measures.
+
+ 4. Conveying Verbatim Copies.
+
+ You may convey verbatim copies of the Program's source code as you
+receive it, in any medium, provided that you conspicuously and
+appropriately publish on each copy an appropriate copyright notice;
+keep intact all notices stating that this License and any
+non-permissive terms added in accord with section 7 apply to the code;
+keep intact all notices of the absence of any warranty; and give all
+recipients a copy of this License along with the Program.
+
+ You may charge any price or no price for each copy that you convey,
+and you may offer support or warranty protection for a fee.
+
+ 5. Conveying Modified Source Versions.
+
+ You may convey a work based on the Program, or the modifications to
+produce it from the Program, in the form of source code under the
+terms of section 4, provided that you also meet all of these conditions:
+
+ a) The work must carry prominent notices stating that you modified
+ it, and giving a relevant date.
+
+ b) The work must carry prominent notices stating that it is
+ released under this License and any conditions added under section
+ 7. This requirement modifies the requirement in section 4 to
+ "keep intact all notices".
+
+ c) You must license the entire work, as a whole, under this
+ License to anyone who comes into possession of a copy. This
+ License will therefore apply, along with any applicable section 7
+ additional terms, to the whole of the work, and all its parts,
+ regardless of how they are packaged. This License gives no
+ permission to license the work in any other way, but it does not
+ invalidate such permission if you have separately received it.
+
+ d) If the work has interactive user interfaces, each must display
+ Appropriate Legal Notices; however, if the Program has interactive
+ interfaces that do not display Appropriate Legal Notices, your
+ work need not make them do so.
+
+ A compilation of a covered work with other separate and independent
+works, which are not by their nature extensions of the covered work,
+and which are not combined with it such as to form a larger program,
+in or on a volume of a storage or distribution medium, is called an
+"aggregate" if the compilation and its resulting copyright are not
+used to limit the access or legal rights of the compilation's users
+beyond what the individual works permit. Inclusion of a covered work
+in an aggregate does not cause this License to apply to the other
+parts of the aggregate.
+
+ 6. Conveying Non-Source Forms.
+
+ You may convey a covered work in object code form under the terms
+of sections 4 and 5, provided that you also convey the
+machine-readable Corresponding Source under the terms of this License,
+in one of these ways:
+
+ a) Convey the object code in, or embodied in, a physical product
+ (including a physical distribution medium), accompanied by the
+ Corresponding Source fixed on a durable physical medium
+ customarily used for software interchange.
+
+ b) Convey the object code in, or embodied in, a physical product
+ (including a physical distribution medium), accompanied by a
+ written offer, valid for at least three years and valid for as
+ long as you offer spare parts or customer support for that product
+ model, to give anyone who possesses the object code either (1) a
+ copy of the Corresponding Source for all the software in the
+ product that is covered by this License, on a durable physical
+ medium customarily used for software interchange, for a price no
+ more than your reasonable cost of physically performing this
+ conveying of source, or (2) access to copy the
+ Corresponding Source from a network server at no charge.
+
+ c) Convey individual copies of the object code with a copy of the
+ written offer to provide the Corresponding Source. This
+ alternative is allowed only occasionally and noncommercially, and
+ only if you received the object code with such an offer, in accord
+ with subsection 6b.
+
+ d) Convey the object code by offering access from a designated
+ place (gratis or for a charge), and offer equivalent access to the
+ Corresponding Source in the same way through the same place at no
+ further charge. You need not require recipients to copy the
+ Corresponding Source along with the object code. If the place to
+ copy the object code is a network server, the Corresponding Source
+ may be on a different server (operated by you or a third party)
+ that supports equivalent copying facilities, provided you maintain
+ clear directions next to the object code saying where to find the
+ Corresponding Source. Regardless of what server hosts the
+ Corresponding Source, you remain obligated to ensure that it is
+ available for as long as needed to satisfy these requirements.
+
+ e) Convey the object code using peer-to-peer transmission, provided
+ you inform other peers where the object code and Corresponding
+ Source of the work are being offered to the general public at no
+ charge under subsection 6d.
+
+ A separable portion of the object code, whose source code is excluded
+from the Corresponding Source as a System Library, need not be
+included in conveying the object code work.
+
+ A "User Product" is either (1) a "consumer product", which means any
+tangible personal property which is normally used for personal, family,
+or household purposes, or (2) anything designed or sold for incorporation
+into a dwelling. In determining whether a product is a consumer product,
+doubtful cases shall be resolved in favor of coverage. For a particular
+product received by a particular user, "normally used" refers to a
+typical or common use of that class of product, regardless of the status
+of the particular user or of the way in which the particular user
+actually uses, or expects or is expected to use, the product. A product
+is a consumer product regardless of whether the product has substantial
+commercial, industrial or non-consumer uses, unless such uses represent
+the only significant mode of use of the product.
+
+ "Installation Information" for a User Product means any methods,
+procedures, authorization keys, or other information required to install
+and execute modified versions of a covered work in that User Product from
+a modified version of its Corresponding Source. The information must
+suffice to ensure that the continued functioning of the modified object
+code is in no case prevented or interfered with solely because
+modification has been made.
+
+ If you convey an object code work under this section in, or with, or
+specifically for use in, a User Product, and the conveying occurs as
+part of a transaction in which the right of possession and use of the
+User Product is transferred to the recipient in perpetuity or for a
+fixed term (regardless of how the transaction is characterized), the
+Corresponding Source conveyed under this section must be accompanied
+by the Installation Information. But this requirement does not apply
+if neither you nor any third party retains the ability to install
+modified object code on the User Product (for example, the work has
+been installed in ROM).
+
+ The requirement to provide Installation Information does not include a
+requirement to continue to provide support service, warranty, or updates
+for a work that has been modified or installed by the recipient, or for
+the User Product in which it has been modified or installed. Access to a
+network may be denied when the modification itself materially and
+adversely affects the operation of the network or violates the rules and
+protocols for communication across the network.
+
+ Corresponding Source conveyed, and Installation Information provided,
+in accord with this section must be in a format that is publicly
+documented (and with an implementation available to the public in
+source code form), and must require no special password or key for
+unpacking, reading or copying.
+
+ 7. Additional Terms.
+
+ "Additional permissions" are terms that supplement the terms of this
+License by making exceptions from one or more of its conditions.
+Additional permissions that are applicable to the entire Program shall
+be treated as though they were included in this License, to the extent
+that they are valid under applicable law. If additional permissions
+apply only to part of the Program, that part may be used separately
+under those permissions, but the entire Program remains governed by
+this License without regard to the additional permissions.
+
+ When you convey a copy of a covered work, you may at your option
+remove any additional permissions from that copy, or from any part of
+it. (Additional permissions may be written to require their own
+removal in certain cases when you modify the work.) You may place
+additional permissions on material, added by you to a covered work,
+for which you have or can give appropriate copyright permission.
+
+ Notwithstanding any other provision of this License, for material you
+add to a covered work, you may (if authorized by the copyright holders of
+that material) supplement the terms of this License with terms:
+
+ a) Disclaiming warranty or limiting liability differently from the
+ terms of sections 15 and 16 of this License; or
+
+ b) Requiring preservation of specified reasonable legal notices or
+ author attributions in that material or in the Appropriate Legal
+ Notices displayed by works containing it; or
+
+ c) Prohibiting misrepresentation of the origin of that material, or
+ requiring that modified versions of such material be marked in
+ reasonable ways as different from the original version; or
+
+ d) Limiting the use for publicity purposes of names of licensors or
+ authors of the material; or
+
+ e) Declining to grant rights under trademark law for use of some
+ trade names, trademarks, or service marks; or
+
+ f) Requiring indemnification of licensors and authors of that
+ material by anyone who conveys the material (or modified versions of
+ it) with contractual assumptions of liability to the recipient, for
+ any liability that these contractual assumptions directly impose on
+ those licensors and authors.
+
+ All other non-permissive additional terms are considered "further
+restrictions" within the meaning of section 10. If the Program as you
+received it, or any part of it, contains a notice stating that it is
+governed by this License along with a term that is a further
+restriction, you may remove that term. If a license document contains
+a further restriction but permits relicensing or conveying under this
+License, you may add to a covered work material governed by the terms
+of that license document, provided that the further restriction does
+not survive such relicensing or conveying.
+
+ If you add terms to a covered work in accord with this section, you
+must place, in the relevant source files, a statement of the
+additional terms that apply to those files, or a notice indicating
+where to find the applicable terms.
+
+ Additional terms, permissive or non-permissive, may be stated in the
+form of a separately written license, or stated as exceptions;
+the above requirements apply either way.
+
+ 8. Termination.
+
+ You may not propagate or modify a covered work except as expressly
+provided under this License. Any attempt otherwise to propagate or
+modify it is void, and will automatically terminate your rights under
+this License (including any patent licenses granted under the third
+paragraph of section 11).
+
+ However, if you cease all violation of this License, then your
+license from a particular copyright holder is reinstated (a)
+provisionally, unless and until the copyright holder explicitly and
+finally terminates your license, and (b) permanently, if the copyright
+holder fails to notify you of the violation by some reasonable means
+prior to 60 days after the cessation.
+
+ Moreover, your license from a particular copyright holder is
+reinstated permanently if the copyright holder notifies you of the
+violation by some reasonable means, this is the first time you have
+received notice of violation of this License (for any work) from that
+copyright holder, and you cure the violation prior to 30 days after
+your receipt of the notice.
+
+ Termination of your rights under this section does not terminate the
+licenses of parties who have received copies or rights from you under
+this License. If your rights have been terminated and not permanently
+reinstated, you do not qualify to receive new licenses for the same
+material under section 10.
+
+ 9. Acceptance Not Required for Having Copies.
+
+ You are not required to accept this License in order to receive or
+run a copy of the Program. Ancillary propagation of a covered work
+occurring solely as a consequence of using peer-to-peer transmission
+to receive a copy likewise does not require acceptance. However,
+nothing other than this License grants you permission to propagate or
+modify any covered work. These actions infringe copyright if you do
+not accept this License. Therefore, by modifying or propagating a
+covered work, you indicate your acceptance of this License to do so.
+
+ 10. Automatic Licensing of Downstream Recipients.
+
+ Each time you convey a covered work, the recipient automatically
+receives a license from the original licensors, to run, modify and
+propagate that work, subject to this License. You are not responsible
+for enforcing compliance by third parties with this License.
+
+ An "entity transaction" is a transaction transferring control of an
+organization, or substantially all assets of one, or subdividing an
+organization, or merging organizations. If propagation of a covered
+work results from an entity transaction, each party to that
+transaction who receives a copy of the work also receives whatever
+licenses to the work the party's predecessor in interest had or could
+give under the previous paragraph, plus a right to possession of the
+Corresponding Source of the work from the predecessor in interest, if
+the predecessor has it or can get it with reasonable efforts.
+
+ You may not impose any further restrictions on the exercise of the
+rights granted or affirmed under this License. For example, you may
+not impose a license fee, royalty, or other charge for exercise of
+rights granted under this License, and you may not initiate litigation
+(including a cross-claim or counterclaim in a lawsuit) alleging that
+any patent claim is infringed by making, using, selling, offering for
+sale, or importing the Program or any portion of it.
+
+ 11. Patents.
+
+ A "contributor" is a copyright holder who authorizes use under this
+License of the Program or a work on which the Program is based. The
+work thus licensed is called the contributor's "contributor version".
+
+ A contributor's "essential patent claims" are all patent claims
+owned or controlled by the contributor, whether already acquired or
+hereafter acquired, that would be infringed by some manner, permitted
+by this License, of making, using, or selling its contributor version,
+but do not include claims that would be infringed only as a
+consequence of further modification of the contributor version. For
+purposes of this definition, "control" includes the right to grant
+patent sublicenses in a manner consistent with the requirements of
+this License.
+
+ Each contributor grants you a non-exclusive, worldwide, royalty-free
+patent license under the contributor's essential patent claims, to
+make, use, sell, offer for sale, import and otherwise run, modify and
+propagate the contents of its contributor version.
+
+ In the following three paragraphs, a "patent license" is any express
+agreement or commitment, however denominated, not to enforce a patent
+(such as an express permission to practice a patent or covenant not to
+sue for patent infringement). To "grant" such a patent license to a
+party means to make such an agreement or commitment not to enforce a
+patent against the party.
+
+ If you convey a covered work, knowingly relying on a patent license,
+and the Corresponding Source of the work is not available for anyone
+to copy, free of charge and under the terms of this License, through a
+publicly available network server or other readily accessible means,
+then you must either (1) cause the Corresponding Source to be so
+available, or (2) arrange to deprive yourself of the benefit of the
+patent license for this particular work, or (3) arrange, in a manner
+consistent with the requirements of this License, to extend the patent
+license to downstream recipients. "Knowingly relying" means you have
+actual knowledge that, but for the patent license, your conveying the
+covered work in a country, or your recipient's use of the covered work
+in a country, would infringe one or more identifiable patents in that
+country that you have reason to believe are valid.
+
+ If, pursuant to or in connection with a single transaction or
+arrangement, you convey, or propagate by procuring conveyance of, a
+covered work, and grant a patent license to some of the parties
+receiving the covered work authorizing them to use, propagate, modify
+or convey a specific copy of the covered work, then the patent license
+you grant is automatically extended to all recipients of the covered
+work and works based on it.
+
+ A patent license is "discriminatory" if it does not include within
+the scope of its coverage, prohibits the exercise of, or is
+conditioned on the non-exercise of one or more of the rights that are
+specifically granted under this License. You may not convey a covered
+work if you are a party to an arrangement with a third party that is
+in the business of distributing software, under which you make payment
+to the third party based on the extent of your activity of conveying
+the work, and under which the third party grants, to any of the
+parties who would receive the covered work from you, a discriminatory
+patent license (a) in connection with copies of the covered work
+conveyed by you (or copies made from those copies), or (b) primarily
+for and in connection with specific products or compilations that
+contain the covered work, unless you entered into that arrangement,
+or that patent license was granted, prior to 28 March 2007.
+
+ Nothing in this License shall be construed as excluding or limiting
+any implied license or other defenses to infringement that may
+otherwise be available to you under applicable patent law.
+
+ 12. No Surrender of Others' Freedom.
+
+ If conditions are imposed on you (whether by court order, agreement or
+otherwise) that contradict the conditions of this License, they do not
+excuse you from the conditions of this License. If you cannot convey a
+covered work so as to satisfy simultaneously your obligations under this
+License and any other pertinent obligations, then as a consequence you may
+not convey it at all. For example, if you agree to terms that obligate you
+to collect a royalty for further conveying from those to whom you convey
+the Program, the only way you could satisfy both those terms and this
+License would be to refrain entirely from conveying the Program.
+
+ 13. Use with the GNU Affero General Public License.
+
+ Notwithstanding any other provision of this License, you have
+permission to link or combine any covered work with a work licensed
+under version 3 of the GNU Affero General Public License into a single
+combined work, and to convey the resulting work. The terms of this
+License will continue to apply to the part which is the covered work,
+but the special requirements of the GNU Affero General Public License,
+section 13, concerning interaction through a network will apply to the
+combination as such.
+
+ 14. Revised Versions of this License.
+
+ The Free Software Foundation may publish revised and/or new versions of
+the GNU General Public License from time to time. Such new versions will
+be similar in spirit to the present version, but may differ in detail to
+address new problems or concerns.
+
+ Each version is given a distinguishing version number. If the
+Program specifies that a certain numbered version of the GNU General
+Public License "or any later version" applies to it, you have the
+option of following the terms and conditions either of that numbered
+version or of any later version published by the Free Software
+Foundation. If the Program does not specify a version number of the
+GNU General Public License, you may choose any version ever published
+by the Free Software Foundation.
+
+ If the Program specifies that a proxy can decide which future
+versions of the GNU General Public License can be used, that proxy's
+public statement of acceptance of a version permanently authorizes you
+to choose that version for the Program.
+
+ Later license versions may give you additional or different
+permissions. However, no additional obligations are imposed on any
+author or copyright holder as a result of your choosing to follow a
+later version.
+
+ 15. Disclaimer of Warranty.
+
+ THERE IS NO WARRANTY FOR THE PROGRAM, TO THE EXTENT PERMITTED BY
+APPLICABLE LAW. EXCEPT WHEN OTHERWISE STATED IN WRITING THE COPYRIGHT
+HOLDERS AND/OR OTHER PARTIES PROVIDE THE PROGRAM "AS IS" WITHOUT WARRANTY
+OF ANY KIND, EITHER EXPRESSED OR IMPLIED, INCLUDING, BUT NOT LIMITED TO,
+THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR
+PURPOSE. THE ENTIRE RISK AS TO THE QUALITY AND PERFORMANCE OF THE PROGRAM
+IS WITH YOU. SHOULD THE PROGRAM PROVE DEFECTIVE, YOU ASSUME THE COST OF
+ALL NECESSARY SERVICING, REPAIR OR CORRECTION.
+
+ 16. Limitation of Liability.
+
+ IN NO EVENT UNLESS REQUIRED BY APPLICABLE LAW OR AGREED TO IN WRITING
+WILL ANY COPYRIGHT HOLDER, OR ANY OTHER PARTY WHO MODIFIES AND/OR CONVEYS
+THE PROGRAM AS PERMITTED ABOVE, BE LIABLE TO YOU FOR DAMAGES, INCLUDING ANY
+GENERAL, SPECIAL, INCIDENTAL OR CONSEQUENTIAL DAMAGES ARISING OUT OF THE
+USE OR INABILITY TO USE THE PROGRAM (INCLUDING BUT NOT LIMITED TO LOSS OF
+DATA OR DATA BEING RENDERED INACCURATE OR LOSSES SUSTAINED BY YOU OR THIRD
+PARTIES OR A FAILURE OF THE PROGRAM TO OPERATE WITH ANY OTHER PROGRAMS),
+EVEN IF SUCH HOLDER OR OTHER PARTY HAS BEEN ADVISED OF THE POSSIBILITY OF
+SUCH DAMAGES.
+
+ 17. Interpretation of Sections 15 and 16.
+
+ If the disclaimer of warranty and limitation of liability provided
+above cannot be given local legal effect according to their terms,
+reviewing courts shall apply local law that most closely approximates
+an absolute waiver of all civil liability in connection with the
+Program, unless a warranty or assumption of liability accompanies a
+copy of the Program in return for a fee.
+
+ END OF TERMS AND CONDITIONS
+
+ How to Apply These Terms to Your New Programs
+
+ If you develop a new program, and you want it to be of the greatest
+possible use to the public, the best way to achieve this is to make it
+free software which everyone can redistribute and change under these terms.
+
+ To do so, attach the following notices to the program. It is safest
+to attach them to the start of each source file to most effectively
+state the exclusion of warranty; and each file should have at least
+the "copyright" line and a pointer to where the full notice is found.
+
+ <one line to give the program's name and a brief idea of what it does.>
+ Copyright (C) <year> <name of author>
+
+ This program is free software: you can redistribute it and/or modify
+ it under the terms of the GNU General Public License as published by
+ the Free Software Foundation, either version 3 of the License, or
+ (at your option) any later version.
+
+ This program is distributed in the hope that it will be useful,
+ but WITHOUT ANY WARRANTY; without even the implied warranty of
+ MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
+ GNU General Public License for more details.
+
+ You should have received a copy of the GNU General Public License
+ along with this program. If not, see <http://www.gnu.org/licenses/>.
+
+Also add information on how to contact you by electronic and paper mail.
+
+ If the program does terminal interaction, make it output a short
+notice like this when it starts in an interactive mode:
+
+ <program> Copyright (C) <year> <name of author>
+ This program comes with ABSOLUTELY NO WARRANTY; for details type `show w'.
+ This is free software, and you are welcome to redistribute it
+ under certain conditions; type `show c' for details.
+
+The hypothetical commands `show w' and `show c' should show the appropriate
+parts of the General Public License. Of course, your program's commands
+might be different; for a GUI interface, you would use an "about box".
+
+ You should also get your employer (if you work as a programmer) or school,
+if any, to sign a "copyright disclaimer" for the program, if necessary.
+For more information on this, and how to apply and follow the GNU GPL, see
+<http://www.gnu.org/licenses/>.
+
+ The GNU General Public License does not permit incorporating your program
+into proprietary programs. If your program is a subroutine library, you
+may consider it more useful to permit linking proprietary applications with
+the library. If this is what you want to do, use the GNU Lesser General
+Public License instead of this License. But first, please read
+<http://www.gnu.org/philosophy/why-not-lgpl.html>.
diff --git a/puppet/modules/shorewall/README b/puppet/modules/shorewall/README
new file mode 100644
index 00000000..3a84b3bd
--- /dev/null
+++ b/puppet/modules/shorewall/README
@@ -0,0 +1,219 @@
+modules/shorewall/manifests/init.pp - manage firewalling with shorewall 3.x
+
+Puppet Module for Shorewall
+---------------------------
+This module manages the configuration of Shorewall (http://www.shorewall.net/)
+
+Requirements
+------------
+
+This module requires the augeas module, you can find that here:
+https://labs.riseup.net/code/projects/shared-augeas
+
+Copyright
+---------
+
+Copyright (C) 2007 David Schmitt <david@schmitt.edv-bus.at>
+adapted by immerda project group - admin+puppet(at)immerda.ch
+adapted by Puzzle ITC - haerry+puppet(at)puzzle.ch
+Copyright (c) 2009 Riseup Networks - micah(shift+2)riseup.net
+Copyright (c) 2010 intrigeri - intrigeri(at)boum.org
+See LICENSE for the full license granted to you.
+
+Based on the work of ADNET Ghislain <gadnet@aqueos.com> from AQUEOS
+at https://reductivelabs.com/trac/puppet/wiki/AqueosShorewall
+
+Merged from:
+- git://git.puppet.immerda.ch/module-shorewall.git
+- git://labs.riseup.net/module_shorewall
+
+Todo
+----
+- check if shorewall compiles without errors, otherwise fail !
+
+Configuration
+-------------
+
+If you need to install a specific version of shorewall other than
+the default one that would be installed by 'ensure => present', then
+you can set the following variable and that specific version will be
+installed instead:
+
+ $shorewall_ensure_version = "4.0.15-1"
+
+The main shorewall.conf is not managed by this module, rather the default one
+that your operatingsystem provides is used, and any modifications you wish to do
+to it should be configured with augeas, for example, to set IP_FORWARDING=Yes in
+shorewall.conf, simply do this:
+
+ augeas { 'enable_ip_forwarding':
+ changes => 'set /files/etc/shorewall/shorewall.conf/IP_FORWARDING Yes',
+ lens => 'Shellvars.lns',
+ incl => '/etc/shorewall/shorewall.conf',
+ notify => Service[shorewall];
+ }
+
+NOTE: this requires the augeas ruby bindings newer than 0.7.3.
+
+If you need to, you can provide an entire shorewall.conf by passing its
+source to the main class:
+
+class{'shorewall':
+ conf_source => "puppet:///modules/site_shorewall/${::fqdn}/shorewall.conf.${::operatingsystem}",
+}
+
+NOTE: if you distribute a file, you cannot also use augeas, puppet and augeas
+will fight forever. Secondly, you will *need* to make sure that if you are shipping your own
+shorewall.conf that you have the following value set in your shorewall.conf otherwise this
+module will not work:
+
+ CONFIG_PATH="/etc/shorewall/puppet:/etc/shorewall:/usr/share/shorewall"
+
+Documentation
+-------------
+
+see also: http://reductivelabs.com/trac/puppet/wiki/Recipes/AqueosShorewall
+
+Torify
+------
+
+The shorewall::rules::torify define can be used to force some outgoing
+TCP traffic through the Tor transparent proxy. The corresponding
+non-TCP traffic is rejected accordingly.
+
+Beware! This define only is part of a torified setup. DNS requests and
+IPv6, amongst others, might leak network activity you would prefer not
+to. You really need to read proper documentation about these matters
+before using this feature e.g.:
+
+ https://www.torproject.org/download/download.html.en#warning
+
+The Tor transparent proxy location defaults to 127.0.0.1:9040 and can
+be configured by setting the $tor_transparent_proxy_host and
+$tor_transparent_proxy_port variables before including the main
+shorewall class.
+
+Example usage follows.
+
+Torify any outgoing TCP traffic originating from user bob or alice and
+aimed at 6.6.6.6 or 7.7.7.7:
+
+ shorewall::rules::torify {
+ 'torify-some-bits':
+ users => [ 'bob', 'alice' ],
+ destinations => [ '6.6.6.6', '7.7.7.7' ];
+ }
+
+Torify any outgoing TCP traffic to 8.8.8.8:
+
+ shorewall::rules::torify {
+ 'torify-to-this-host':
+ destinations => [ '8.8.8.8' ];
+ }
+
+When no destination nor user is provided any outgoing TCP traffic (see
+restrictions bellow) is torified. In that case the user running the
+Tor client ($tor_user) is whitelisted; this variable defaults to
+"debian-tor" on Debian systems and to "tor" on others. if this does
+not suit your configuration you need to set the $tor_user variable
+before including the main shorewall class.
+
+When no destination is provided traffic directed to RFC1918 addresses
+is by default allowed and (obviously) not torified. This behaviour can
+be changed by setting the allow_rfc1918 parameter to false.
+
+Torify any outgoing TCP traffic but connections to RFC1918 addresses:
+
+ shorewall::rules::torify {
+ 'torify-everything-but-lan':
+ }
+
+Torify any outgoing TCP traffic:
+
+ shorewall::rules::torify {
+ 'torify-everything:
+ allow_rfc1918 => false;
+ }
+
+In some cases (e.g. when providing no specific destination nor user
+and denying access to RFC1918 addresses) UDP DNS requests may be
+rejected. This is intentional: it does not make sense leaking -via DNS
+requests- network activity that would otherwise be torified. In that
+case you probably want to read proper documentation about such
+matters, enable the Tor DNS resolver and redirect DNS requests through
+it.
+
+Example
+-------
+
+Example from node.pp:
+
+node xy {
+ class{'config::site_shorewall':
+ startup => "0" # create shorewall ruleset but don't startup
+ }
+ shorewall::rule {
+ 'incoming-ssh': source => 'all', destination => '$FW', action => 'SSH(ACCEPT)', order => 200;
+ 'incoming-puppetmaster': source => 'all', destination => '$FW', action => 'Puppetmaster(ACCEPT)', order => 300;
+ 'incoming-imap': source => 'all', destination => '$FW', action => 'IMAP(ACCEPT)', order => 300;
+ 'incoming-smtp': source => 'all', destination => '$FW', action => 'SMTP(ACCEPT)', order => 300;
+ }
+}
+
+
+class config::site_shorewall($startup = '1') {
+ class{'shorewall':
+ startup => $startup
+ }
+
+ # If you want logging:
+ #shorewall::params {
+ # 'LOG': value => 'debug';
+ #}
+
+ shorewall::zone {'net':
+ type => 'ipv4';
+ }
+
+ shorewall::rule_section { 'NEW':
+ order => 100;
+ }
+
+ shorewall::interface { 'eth0':
+ zone => 'net',
+ rfc1918 => true,
+ options => 'tcpflags,blacklist,nosmurfs';
+ }
+
+ shorewall::policy {
+ 'fw-to-fw':
+ sourcezone => '$FW',
+ destinationzone => '$FW',
+ policy => 'ACCEPT',
+ order => 100;
+ 'fw-to-net':
+ sourcezone => '$FW',
+ destinationzone => 'net',
+ policy => 'ACCEPT',
+ shloglevel => '$LOG',
+ order => 110;
+ 'net-to-fw':
+ sourcezone => 'net',
+ destinationzone => '$FW',
+ policy => 'DROP',
+ shloglevel => '$LOG',
+ order => 120;
+ }
+
+
+ # default Rules : ICMP
+ shorewall::rule {
+ 'allicmp-to-host':
+ source => 'all',
+ destination => '$FW',
+ order => 200,
+ action => 'AllowICMPs/(ACCEPT)';
+ }
+}
+
+
diff --git a/puppet/modules/shorewall/files/boilerplate/blacklist.footer b/puppet/modules/shorewall/files/boilerplate/blacklist.footer
new file mode 100644
index 00000000..5e12d1da
--- /dev/null
+++ b/puppet/modules/shorewall/files/boilerplate/blacklist.footer
@@ -0,0 +1 @@
+#LAST LINE -- ADD YOUR ENTRIES BEFORE THIS ONE -- DO NOT REMOVE
diff --git a/puppet/modules/shorewall/files/boilerplate/blacklist.header b/puppet/modules/shorewall/files/boilerplate/blacklist.header
new file mode 100644
index 00000000..2392e176
--- /dev/null
+++ b/puppet/modules/shorewall/files/boilerplate/blacklist.header
@@ -0,0 +1,10 @@
+#
+# Shorewall version 3.4 - Blacklist File
+#
+# For information about entries in this file, type "man shorewall-blacklist"
+#
+# Please see http://shorewall.net/blacklisting_support.htm for additional
+# information.
+#
+###############################################################################
+#ADDRESS/SUBNET PROTOCOL PORT
diff --git a/puppet/modules/shorewall/files/boilerplate/clear.footer b/puppet/modules/shorewall/files/boilerplate/clear.footer
new file mode 100644
index 00000000..662ac1cc
--- /dev/null
+++ b/puppet/modules/shorewall/files/boilerplate/clear.footer
@@ -0,0 +1 @@
+#LAST LINE - ADD YOUR ENTRIES ABOVE THIS ONE - DO NOT REMOVE
diff --git a/puppet/modules/shorewall/files/boilerplate/clear.header b/puppet/modules/shorewall/files/boilerplate/clear.header
new file mode 100644
index 00000000..6a39b0b6
--- /dev/null
+++ b/puppet/modules/shorewall/files/boilerplate/clear.header
@@ -0,0 +1,13 @@
+#
+# Shorewall version 4 - Clear
+#
+# /etc/shorewall/stop
+#
+# Add commands below that you want to be executed at the beginning of a
+# "shorewall stop" command.
+#
+# See http://shorewall.net/shorewall_extension_scripts.htm for additional
+# information.
+#
+###############################################################################
+#LAST LINE - ADD YOUR ENTRIES ABOVE THIS ONE - DO NOT REMOVE
diff --git a/puppet/modules/shorewall/files/boilerplate/continue.footer b/puppet/modules/shorewall/files/boilerplate/continue.footer
new file mode 100644
index 00000000..662ac1cc
--- /dev/null
+++ b/puppet/modules/shorewall/files/boilerplate/continue.footer
@@ -0,0 +1 @@
+#LAST LINE - ADD YOUR ENTRIES ABOVE THIS ONE - DO NOT REMOVE
diff --git a/puppet/modules/shorewall/files/boilerplate/continue.header b/puppet/modules/shorewall/files/boilerplate/continue.header
new file mode 100644
index 00000000..d2ee48a5
--- /dev/null
+++ b/puppet/modules/shorewall/files/boilerplate/continue.header
@@ -0,0 +1,14 @@
+#
+# Shorewall version 4 - Continue File
+#
+# /etc/shorewall/continue
+#
+# Add commands below that you want to be executed after shorewall has
+# cleared any existing Netfilter rules and has enabled existing
+# connections.
+#
+# For additional information, see
+# http://shorewall.net/shorewall_extension_scripts.htm
+#
+###############################################################################
+
diff --git a/puppet/modules/shorewall/files/boilerplate/hosts.footer b/puppet/modules/shorewall/files/boilerplate/hosts.footer
new file mode 100644
index 00000000..dc2fef52
--- /dev/null
+++ b/puppet/modules/shorewall/files/boilerplate/hosts.footer
@@ -0,0 +1 @@
+#LAST LINE -- ADD YOUR ENTRIES BEFORE THIS LINE -- DO NOT REMOVE
diff --git a/puppet/modules/shorewall/files/boilerplate/hosts.header b/puppet/modules/shorewall/files/boilerplate/hosts.header
new file mode 100644
index 00000000..e39d6145
--- /dev/null
+++ b/puppet/modules/shorewall/files/boilerplate/hosts.header
@@ -0,0 +1,9 @@
+#
+# Shorewall version 3.4 - Hosts file
+#
+# For information about entries in this file, type "man shorewall-hosts"
+#
+# For additional information, see http://shorewall.net/Documentation.htm#Hosts
+#
+###############################################################################
+#ZONE HOST(S) OPTIONS
diff --git a/puppet/modules/shorewall/files/boilerplate/init.footer b/puppet/modules/shorewall/files/boilerplate/init.footer
new file mode 100644
index 00000000..662ac1cc
--- /dev/null
+++ b/puppet/modules/shorewall/files/boilerplate/init.footer
@@ -0,0 +1 @@
+#LAST LINE - ADD YOUR ENTRIES ABOVE THIS ONE - DO NOT REMOVE
diff --git a/puppet/modules/shorewall/files/boilerplate/init.header b/puppet/modules/shorewall/files/boilerplate/init.header
new file mode 100644
index 00000000..cbb0393e
--- /dev/null
+++ b/puppet/modules/shorewall/files/boilerplate/init.header
@@ -0,0 +1,13 @@
+#
+# Shorewall version 4 - Init File
+#
+# /etc/shorewall/init
+#
+# Add commands below that you want to be executed at the beginning of
+# a "shorewall start" or "shorewall restart" command.
+#
+# For additional information, see
+# http://shorewall.net/shorewall_extension_scripts.htm
+#
+###############################################################################
+
diff --git a/puppet/modules/shorewall/files/boilerplate/initdone.footer b/puppet/modules/shorewall/files/boilerplate/initdone.footer
new file mode 100644
index 00000000..662ac1cc
--- /dev/null
+++ b/puppet/modules/shorewall/files/boilerplate/initdone.footer
@@ -0,0 +1 @@
+#LAST LINE - ADD YOUR ENTRIES ABOVE THIS ONE - DO NOT REMOVE
diff --git a/puppet/modules/shorewall/files/boilerplate/initdone.header b/puppet/modules/shorewall/files/boilerplate/initdone.header
new file mode 100644
index 00000000..9252a3bc
--- /dev/null
+++ b/puppet/modules/shorewall/files/boilerplate/initdone.header
@@ -0,0 +1,14 @@
+#
+# Shorewall version 4 - Initdone File
+#
+# /etc/shorewall/initdone
+#
+# Add commands below that you want to be executed during
+# "shorewall start" or "shorewall restart" commands at the point where
+# Shorewall has not yet added any perminent rules to the builtin chains.
+#
+# For additional information, see
+# http://shorewall.net/shorewall_extension_scripts.htm
+#
+###############################################################################
+
diff --git a/puppet/modules/shorewall/files/boilerplate/interfaces.footer b/puppet/modules/shorewall/files/boilerplate/interfaces.footer
new file mode 100644
index 00000000..5e12d1da
--- /dev/null
+++ b/puppet/modules/shorewall/files/boilerplate/interfaces.footer
@@ -0,0 +1 @@
+#LAST LINE -- ADD YOUR ENTRIES BEFORE THIS ONE -- DO NOT REMOVE
diff --git a/puppet/modules/shorewall/files/boilerplate/interfaces.header b/puppet/modules/shorewall/files/boilerplate/interfaces.header
new file mode 100644
index 00000000..2027523e
--- /dev/null
+++ b/puppet/modules/shorewall/files/boilerplate/interfaces.header
@@ -0,0 +1,10 @@
+#
+# Shorewall version 3.4 - Interfaces File
+#
+# For information about entries in this file, type "man shorewall-interfaces"
+#
+# For additional information, see
+# http://shorewall.net/Documentation.htm#Interfaces
+#
+###############################################################################
+#ZONE INTERFACE BROADCAST OPTIONS
diff --git a/puppet/modules/shorewall/files/boilerplate/maclog.footer b/puppet/modules/shorewall/files/boilerplate/maclog.footer
new file mode 100644
index 00000000..5e12d1da
--- /dev/null
+++ b/puppet/modules/shorewall/files/boilerplate/maclog.footer
@@ -0,0 +1 @@
+#LAST LINE -- ADD YOUR ENTRIES BEFORE THIS ONE -- DO NOT REMOVE
diff --git a/puppet/modules/shorewall/files/boilerplate/maclog.header b/puppet/modules/shorewall/files/boilerplate/maclog.header
new file mode 100644
index 00000000..b0c382ab
--- /dev/null
+++ b/puppet/modules/shorewall/files/boilerplate/maclog.header
@@ -0,0 +1,14 @@
+#
+# Shorewall version 4 - Maclog File (Added in Shorewall version 3.2.5)
+#
+# /etc/shorewall/start
+#
+# Add commands below that you want executed while mac filtering rules are
+# being created. These will be executed once for each interface having
+# 'maclist' speciied and it is invoked just before the logging rule is
+# added to the current chain (the name of that chain will be in $CHAIN)
+#
+# See http://shorewall.net/shorewall_extension_scripts.htm for additional
+# information.
+#
+###############################################################################
diff --git a/puppet/modules/shorewall/files/boilerplate/mangle.footer b/puppet/modules/shorewall/files/boilerplate/mangle.footer
new file mode 100644
index 00000000..6bebc05c
--- /dev/null
+++ b/puppet/modules/shorewall/files/boilerplate/mangle.footer
@@ -0,0 +1 @@
+#LAST LINE -- ADD YOUR ENTRIES ABOVE THIS LINE -- DO NOT REMOVE
diff --git a/puppet/modules/shorewall/files/boilerplate/mangle.header b/puppet/modules/shorewall/files/boilerplate/mangle.header
new file mode 100644
index 00000000..7a7b12ab
--- /dev/null
+++ b/puppet/modules/shorewall/files/boilerplate/mangle.header
@@ -0,0 +1,7 @@
+#
+# Shorewall - Mangle File
+#
+# For additional information, see http://shorewall.net/manpages/shorewall-mangle.html
+#
+#######################################################################################
+#ACTION SOURCE DESTINATION PROTO DSTPORT SRCPORT USER TEST LENGTH TOS CONNBYTES HELPER HEADERS
diff --git a/puppet/modules/shorewall/files/boilerplate/masq.footer b/puppet/modules/shorewall/files/boilerplate/masq.footer
new file mode 100644
index 00000000..6bebc05c
--- /dev/null
+++ b/puppet/modules/shorewall/files/boilerplate/masq.footer
@@ -0,0 +1 @@
+#LAST LINE -- ADD YOUR ENTRIES ABOVE THIS LINE -- DO NOT REMOVE
diff --git a/puppet/modules/shorewall/files/boilerplate/masq.header b/puppet/modules/shorewall/files/boilerplate/masq.header
new file mode 100644
index 00000000..f8233210
--- /dev/null
+++ b/puppet/modules/shorewall/files/boilerplate/masq.header
@@ -0,0 +1,9 @@
+#
+# Shorewall version 3.4 - Masq file
+#
+# For information about entries in this file, type "man shorewall-masq"
+#
+# For additional information, see http://shorewall.net/Documentation.htm#Masq
+#
+###############################################################################
+#INTERFACE SOURCE ADDRESS PROTO PORT(S) IPSEC MARK
diff --git a/puppet/modules/shorewall/files/boilerplate/nat.footer b/puppet/modules/shorewall/files/boilerplate/nat.footer
new file mode 100644
index 00000000..6bebc05c
--- /dev/null
+++ b/puppet/modules/shorewall/files/boilerplate/nat.footer
@@ -0,0 +1 @@
+#LAST LINE -- ADD YOUR ENTRIES ABOVE THIS LINE -- DO NOT REMOVE
diff --git a/puppet/modules/shorewall/files/boilerplate/nat.header b/puppet/modules/shorewall/files/boilerplate/nat.header
new file mode 100644
index 00000000..c2e0d922
--- /dev/null
+++ b/puppet/modules/shorewall/files/boilerplate/nat.header
@@ -0,0 +1,9 @@
+#
+# Shorewall version 3.4 - Nat File
+#
+# For information about entries in this file, type "man shorewall-nat"
+#
+# For additional information, see http://shorewall.net/NAT.htm
+#
+###############################################################################
+#EXTERNAL INTERFACE INTERNAL ALL LOCAL
diff --git a/puppet/modules/shorewall/files/boilerplate/params.footer b/puppet/modules/shorewall/files/boilerplate/params.footer
new file mode 100644
index 00000000..662ac1cc
--- /dev/null
+++ b/puppet/modules/shorewall/files/boilerplate/params.footer
@@ -0,0 +1 @@
+#LAST LINE - ADD YOUR ENTRIES ABOVE THIS ONE - DO NOT REMOVE
diff --git a/puppet/modules/shorewall/files/boilerplate/params.header b/puppet/modules/shorewall/files/boilerplate/params.header
new file mode 100644
index 00000000..b258b0de
--- /dev/null
+++ b/puppet/modules/shorewall/files/boilerplate/params.header
@@ -0,0 +1,26 @@
+#
+# Shorewall version 3.4 - Params File
+#
+# /etc/shorewall/params
+#
+# Assign any variables that you need here.
+#
+# It is suggested that variable names begin with an upper case letter
+# to distinguish them from variables used internally within the
+# Shorewall programs
+#
+# Example:
+#
+# NET_IF=eth0
+# NET_BCAST=130.252.100.255
+# NET_OPTIONS=routefilter,norfc1918
+#
+# Example (/etc/shorewall/interfaces record):
+#
+# net $NET_IF $NET_BCAST $NET_OPTIONS
+#
+# The result will be the same as if the record had been written
+#
+# net eth0 130.252.100.255 routefilter,norfc1918
+#
+###############################################################################
diff --git a/puppet/modules/shorewall/files/boilerplate/policy.footer b/puppet/modules/shorewall/files/boilerplate/policy.footer
new file mode 100644
index 00000000..16c86d0e
--- /dev/null
+++ b/puppet/modules/shorewall/files/boilerplate/policy.footer
@@ -0,0 +1 @@
+#LAST LINE -- DO NOT REMOVE
diff --git a/puppet/modules/shorewall/files/boilerplate/policy.header b/puppet/modules/shorewall/files/boilerplate/policy.header
new file mode 100644
index 00000000..a0c5d5d2
--- /dev/null
+++ b/puppet/modules/shorewall/files/boilerplate/policy.header
@@ -0,0 +1,9 @@
+#
+# Shorewall version 3.4 - Policy File
+#
+# For information about entries in this file, type "man shorewall-policy"
+#
+# See http://shorewall.net/Documentation.htm#Policy for additional information.
+#
+###############################################################################
+#SOURCE DEST POLICY LOG LIMIT:BURST
diff --git a/puppet/modules/shorewall/files/boilerplate/providers.footer b/puppet/modules/shorewall/files/boilerplate/providers.footer
new file mode 100644
index 00000000..5e12d1da
--- /dev/null
+++ b/puppet/modules/shorewall/files/boilerplate/providers.footer
@@ -0,0 +1 @@
+#LAST LINE -- ADD YOUR ENTRIES BEFORE THIS ONE -- DO NOT REMOVE
diff --git a/puppet/modules/shorewall/files/boilerplate/providers.header b/puppet/modules/shorewall/files/boilerplate/providers.header
new file mode 100644
index 00000000..b4a5990f
--- /dev/null
+++ b/puppet/modules/shorewall/files/boilerplate/providers.header
@@ -0,0 +1,9 @@
+#
+# Shorewall version 4 - Providers File
+#
+# For information about entries in this file, type "man shorewall-providers"
+#
+# For additional information, see http://shorewall.net/MultiISP.html
+#
+############################################################################################
+#NAME NUMBER MARK DUPLICATE INTERFACE GATEWAY OPTIONS COPY
diff --git a/puppet/modules/shorewall/files/boilerplate/proxyarp.footer b/puppet/modules/shorewall/files/boilerplate/proxyarp.footer
new file mode 100644
index 00000000..5e12d1da
--- /dev/null
+++ b/puppet/modules/shorewall/files/boilerplate/proxyarp.footer
@@ -0,0 +1 @@
+#LAST LINE -- ADD YOUR ENTRIES BEFORE THIS ONE -- DO NOT REMOVE
diff --git a/puppet/modules/shorewall/files/boilerplate/proxyarp.header b/puppet/modules/shorewall/files/boilerplate/proxyarp.header
new file mode 100644
index 00000000..1e168532
--- /dev/null
+++ b/puppet/modules/shorewall/files/boilerplate/proxyarp.header
@@ -0,0 +1,9 @@
+#
+# Shorewall version 3.4 - Proxyarp File
+#
+# For information about entries in this file, type "man shorewall-proxyarp"
+#
+# See http://shorewall.net/ProxyARP.htm for additional information.
+#
+###############################################################################
+#ADDRESS INTERFACE EXTERNAL HAVEROUTE PERSISTENT
diff --git a/puppet/modules/shorewall/files/boilerplate/rfc1918.footer b/puppet/modules/shorewall/files/boilerplate/rfc1918.footer
new file mode 100644
index 00000000..e07fdb15
--- /dev/null
+++ b/puppet/modules/shorewall/files/boilerplate/rfc1918.footer
@@ -0,0 +1,5 @@
+# The real subnets from RFC1918
+172.16.0.0/12 logdrop # RFC 1918
+192.168.0.0/16 logdrop # RFC 1918
+10.0.0.0/8 logdrop # RFC 1918
+#LAST LINE -- ADD YOUR ENTRIES BEFORE THIS ONE -- DO NOT REMOVE
diff --git a/puppet/modules/shorewall/files/boilerplate/rfc1918.header b/puppet/modules/shorewall/files/boilerplate/rfc1918.header
new file mode 100644
index 00000000..8d6a4162
--- /dev/null
+++ b/puppet/modules/shorewall/files/boilerplate/rfc1918.header
@@ -0,0 +1,5 @@
+#
+# Shorewall version 3.4 - Rfc1918 File
+#
+###############################################################################
+#SUBNETS TARGET
diff --git a/puppet/modules/shorewall/files/boilerplate/routestopped.footer b/puppet/modules/shorewall/files/boilerplate/routestopped.footer
new file mode 100644
index 00000000..5e12d1da
--- /dev/null
+++ b/puppet/modules/shorewall/files/boilerplate/routestopped.footer
@@ -0,0 +1 @@
+#LAST LINE -- ADD YOUR ENTRIES BEFORE THIS ONE -- DO NOT REMOVE
diff --git a/puppet/modules/shorewall/files/boilerplate/routestopped.header b/puppet/modules/shorewall/files/boilerplate/routestopped.header
new file mode 100644
index 00000000..5408aace
--- /dev/null
+++ b/puppet/modules/shorewall/files/boilerplate/routestopped.header
@@ -0,0 +1,11 @@
+#
+# Shorewall version 3.4 - Routestopped File
+#
+# For information about entries in this file, type "man shorewall-routestopped"
+#
+# See http://shorewall.net/Documentation.htm#Routestopped and
+# http://shorewall.net/starting_and_stopping_shorewall.htm for additional
+# information.
+#
+###############################################################################
+#INTERFACE HOST(S) OPTIONS
diff --git a/puppet/modules/shorewall/files/boilerplate/rtrules.footer b/puppet/modules/shorewall/files/boilerplate/rtrules.footer
new file mode 100644
index 00000000..5e12d1da
--- /dev/null
+++ b/puppet/modules/shorewall/files/boilerplate/rtrules.footer
@@ -0,0 +1 @@
+#LAST LINE -- ADD YOUR ENTRIES BEFORE THIS ONE -- DO NOT REMOVE
diff --git a/puppet/modules/shorewall/files/boilerplate/rtrules.header b/puppet/modules/shorewall/files/boilerplate/rtrules.header
new file mode 100644
index 00000000..fd9b2f48
--- /dev/null
+++ b/puppet/modules/shorewall/files/boilerplate/rtrules.header
@@ -0,0 +1,8 @@
+#
+# Shorewall version 4 - route rules File
+#
+# For information about entries in this file, type "man shorewall-rtrules"
+#
+# For additional information, see http://www.shorewall.net/MultiISP.html
+####################################################################################
+# SOURCE DEST PROVIDER PRIORITY MASK
diff --git a/puppet/modules/shorewall/files/boilerplate/rules.footer b/puppet/modules/shorewall/files/boilerplate/rules.footer
new file mode 100644
index 00000000..5e12d1da
--- /dev/null
+++ b/puppet/modules/shorewall/files/boilerplate/rules.footer
@@ -0,0 +1 @@
+#LAST LINE -- ADD YOUR ENTRIES BEFORE THIS ONE -- DO NOT REMOVE
diff --git a/puppet/modules/shorewall/files/boilerplate/rules.header b/puppet/modules/shorewall/files/boilerplate/rules.header
new file mode 100644
index 00000000..764358ac
--- /dev/null
+++ b/puppet/modules/shorewall/files/boilerplate/rules.header
@@ -0,0 +1,10 @@
+#
+# Shorewall version 3.4 - Rules File
+#
+# For information on the settings in this file, type "man shorewall-rules"
+#
+# See http://shorewall.net/Documentation.htm#Rules for additional information.
+#
+#############################################################################################################
+#ACTION SOURCE DEST PROTO DEST SOURCE ORIGINAL RATE USER/ MARK
+# PORT PORT(S) DEST LIMIT GROUP
diff --git a/puppet/modules/shorewall/files/boilerplate/start.footer b/puppet/modules/shorewall/files/boilerplate/start.footer
new file mode 100644
index 00000000..5e12d1da
--- /dev/null
+++ b/puppet/modules/shorewall/files/boilerplate/start.footer
@@ -0,0 +1 @@
+#LAST LINE -- ADD YOUR ENTRIES BEFORE THIS ONE -- DO NOT REMOVE
diff --git a/puppet/modules/shorewall/files/boilerplate/start.header b/puppet/modules/shorewall/files/boilerplate/start.header
new file mode 100644
index 00000000..689dff19
--- /dev/null
+++ b/puppet/modules/shorewall/files/boilerplate/start.header
@@ -0,0 +1,12 @@
+#
+# Shorewall version 4 - Start File
+#
+# /etc/shorewall/start
+#
+# Add commands below that you want to be executed after shorewall has
+# been started or restarted.
+#
+# See http://shorewall.net/shorewall_extension_scripts.htm for additional
+# information.
+#
+###############################################################################
diff --git a/puppet/modules/shorewall/files/boilerplate/started.footer b/puppet/modules/shorewall/files/boilerplate/started.footer
new file mode 100644
index 00000000..5e12d1da
--- /dev/null
+++ b/puppet/modules/shorewall/files/boilerplate/started.footer
@@ -0,0 +1 @@
+#LAST LINE -- ADD YOUR ENTRIES BEFORE THIS ONE -- DO NOT REMOVE
diff --git a/puppet/modules/shorewall/files/boilerplate/started.header b/puppet/modules/shorewall/files/boilerplate/started.header
new file mode 100644
index 00000000..b7704dba
--- /dev/null
+++ b/puppet/modules/shorewall/files/boilerplate/started.header
@@ -0,0 +1,20 @@
+#
+# Shorewall version 4 - Started File
+#
+# /etc/shorewall/started
+#
+# Add commands below that you want to be executed after shorewall has
+# been completely started or restarted. The difference between this
+# extension script and /etc/shorewall/start is that this one is invoked
+# after delayed loading of the blacklist (DELAYBLACKLISTLOAD=Yes) and
+# after the 'shorewall' chain has been created (thus signaling that the
+# firewall is completely up).
+#
+# This script should not change the firewall configuration directly but
+# may do so indirectly by running /sbin/shorewall with the 'nolock'
+# option.
+#
+# See http://shorewall.net/shorewall_extension_scripts.htm for additional
+# information.
+#
+###############################################################################
diff --git a/puppet/modules/shorewall/files/boilerplate/stop.footer b/puppet/modules/shorewall/files/boilerplate/stop.footer
new file mode 100644
index 00000000..5e12d1da
--- /dev/null
+++ b/puppet/modules/shorewall/files/boilerplate/stop.footer
@@ -0,0 +1 @@
+#LAST LINE -- ADD YOUR ENTRIES BEFORE THIS ONE -- DO NOT REMOVE
diff --git a/puppet/modules/shorewall/files/boilerplate/stop.header b/puppet/modules/shorewall/files/boilerplate/stop.header
new file mode 100644
index 00000000..0088abe1
--- /dev/null
+++ b/puppet/modules/shorewall/files/boilerplate/stop.header
@@ -0,0 +1,13 @@
+#
+# Shorewall version 4 - Stop File
+#
+# /etc/shorewall/stop
+#
+# Add commands below that you want to be executed at the beginning of a
+# "shorewall stop" command.
+#
+# See http://shorewall.net/shorewall_extension_scripts.htm for additional
+# information.
+#
+###############################################################################
+#LAST LINE - ADD YOUR ENTRIES ABOVE THIS ONE - DO NOT REMOVE
diff --git a/puppet/modules/shorewall/files/boilerplate/stopped.footer b/puppet/modules/shorewall/files/boilerplate/stopped.footer
new file mode 100644
index 00000000..5e12d1da
--- /dev/null
+++ b/puppet/modules/shorewall/files/boilerplate/stopped.footer
@@ -0,0 +1 @@
+#LAST LINE -- ADD YOUR ENTRIES BEFORE THIS ONE -- DO NOT REMOVE
diff --git a/puppet/modules/shorewall/files/boilerplate/stopped.header b/puppet/modules/shorewall/files/boilerplate/stopped.header
new file mode 100644
index 00000000..438e5e05
--- /dev/null
+++ b/puppet/modules/shorewall/files/boilerplate/stopped.header
@@ -0,0 +1,13 @@
+#
+# Shorewall version 4 - Stopped File
+#
+# /etc/shorewall/stopped
+#
+# Add commands below that you want to be executed at the completion of a
+# "shorewall stop" command.
+#
+# See http://shorewall.net/shorewall_extension_scripts.htm for additional
+# information.
+#
+###############################################################################
+#LAST LINE - ADD YOUR ENTRIES ABOVE THIS ONE - DO NOT REMOVE
diff --git a/puppet/modules/shorewall/files/boilerplate/tcclasses.footer b/puppet/modules/shorewall/files/boilerplate/tcclasses.footer
new file mode 100644
index 00000000..5e12d1da
--- /dev/null
+++ b/puppet/modules/shorewall/files/boilerplate/tcclasses.footer
@@ -0,0 +1 @@
+#LAST LINE -- ADD YOUR ENTRIES BEFORE THIS ONE -- DO NOT REMOVE
diff --git a/puppet/modules/shorewall/files/boilerplate/tcclasses.header b/puppet/modules/shorewall/files/boilerplate/tcclasses.header
new file mode 100644
index 00000000..025415ba
--- /dev/null
+++ b/puppet/modules/shorewall/files/boilerplate/tcclasses.header
@@ -0,0 +1,9 @@
+#
+# Shorewall version 4 - Tcclasses File
+#
+# For information about entries in this file, type "man shorewall-tcclasses"
+#
+# See http://shorewall.net/traffic_shaping.htm for additional information.
+#
+###############################################################################
+#INTERFACE:CLASS MARK RATE CEIL PRIORITY OPTIONS
diff --git a/puppet/modules/shorewall/files/boilerplate/tcdevices.footer b/puppet/modules/shorewall/files/boilerplate/tcdevices.footer
new file mode 100644
index 00000000..5e12d1da
--- /dev/null
+++ b/puppet/modules/shorewall/files/boilerplate/tcdevices.footer
@@ -0,0 +1 @@
+#LAST LINE -- ADD YOUR ENTRIES BEFORE THIS ONE -- DO NOT REMOVE
diff --git a/puppet/modules/shorewall/files/boilerplate/tcdevices.header b/puppet/modules/shorewall/files/boilerplate/tcdevices.header
new file mode 100644
index 00000000..fe7c3d1f
--- /dev/null
+++ b/puppet/modules/shorewall/files/boilerplate/tcdevices.header
@@ -0,0 +1,10 @@
+#
+# Shorewall version 4 - Tcdevices File
+#
+# For information about entries in this file, type "man shorewall-tcdevices"
+#
+# See http://shorewall.net/traffic_shaping.htm for additional information.
+#
+###############################################################################
+#NUMBER: IN-BANDWITH OUT-BANDWIDTH OPTIONS REDIRECTED
+#INTERFACE INTERFACES
diff --git a/puppet/modules/shorewall/files/boilerplate/tcrules.footer b/puppet/modules/shorewall/files/boilerplate/tcrules.footer
new file mode 100644
index 00000000..5e12d1da
--- /dev/null
+++ b/puppet/modules/shorewall/files/boilerplate/tcrules.footer
@@ -0,0 +1 @@
+#LAST LINE -- ADD YOUR ENTRIES BEFORE THIS ONE -- DO NOT REMOVE
diff --git a/puppet/modules/shorewall/files/boilerplate/tcrules.header b/puppet/modules/shorewall/files/boilerplate/tcrules.header
new file mode 100644
index 00000000..e0e7adcf
--- /dev/null
+++ b/puppet/modules/shorewall/files/boilerplate/tcrules.header
@@ -0,0 +1,15 @@
+#
+# Shorewall version 4 - Tcrules File
+#
+# For information about entries in this file, type "man shorewall-tcrules"
+#
+# See http://shorewall.net/traffic_shaping.htm for additional information.
+# For usage in selecting among multiple ISPs, see
+# http://shorewall.net/MultiISP.html
+#
+# See http://shorewall.net/PacketMarking.html for a detailed description of
+# the Netfilter/Shorewall packet marking mechanism.
+######################################################################################################################
+#MARK SOURCE DEST PROTO DEST SOURCE USER TEST LENGTH TOS CONNBYTES HELPER
+# PORT(S) PORT(S)
+
diff --git a/puppet/modules/shorewall/files/boilerplate/tunnel.footer b/puppet/modules/shorewall/files/boilerplate/tunnel.footer
new file mode 100644
index 00000000..5e12d1da
--- /dev/null
+++ b/puppet/modules/shorewall/files/boilerplate/tunnel.footer
@@ -0,0 +1 @@
+#LAST LINE -- ADD YOUR ENTRIES BEFORE THIS ONE -- DO NOT REMOVE
diff --git a/puppet/modules/shorewall/files/boilerplate/tunnel.header b/puppet/modules/shorewall/files/boilerplate/tunnel.header
new file mode 100644
index 00000000..638fd568
--- /dev/null
+++ b/puppet/modules/shorewall/files/boilerplate/tunnel.header
@@ -0,0 +1,11 @@
+#
+# Shorewall version 4 - Tunnels File
+#
+# For information about entries in this file, type "man shorewall-tunnels"
+#
+# The manpage is also online at
+# http://www.shorewall.net/manpages/shorewall-tunnels.html
+#
+###############################################################################
+#TYPE ZONE GATEWAY GATEWAY
+# ZONE
diff --git a/puppet/modules/shorewall/files/boilerplate/zones.footer b/puppet/modules/shorewall/files/boilerplate/zones.footer
new file mode 100644
index 00000000..662ac1cc
--- /dev/null
+++ b/puppet/modules/shorewall/files/boilerplate/zones.footer
@@ -0,0 +1 @@
+#LAST LINE - ADD YOUR ENTRIES ABOVE THIS ONE - DO NOT REMOVE
diff --git a/puppet/modules/shorewall/files/boilerplate/zones.header b/puppet/modules/shorewall/files/boilerplate/zones.header
new file mode 100644
index 00000000..8b82c2e5
--- /dev/null
+++ b/puppet/modules/shorewall/files/boilerplate/zones.header
@@ -0,0 +1,11 @@
+#
+# Shorewall version 3.4 - Zones File
+#
+# For information about this file, type "man shorewall-zones"
+#
+# For more information, see http://www.shorewall.net/Documentation.htm#Zones
+#
+###############################################################################
+#ZONE TYPE OPTIONS IN OUT
+# OPTIONS OPTIONS
+fw firewall
diff --git a/puppet/modules/shorewall/files/empty/.ignore b/puppet/modules/shorewall/files/empty/.ignore
new file mode 100644
index 00000000..89cb1fe9
--- /dev/null
+++ b/puppet/modules/shorewall/files/empty/.ignore
@@ -0,0 +1 @@
+# file needed for git - don't remove it
diff --git a/puppet/modules/shorewall/manifests/base.pp b/puppet/modules/shorewall/manifests/base.pp
new file mode 100644
index 00000000..7959f018
--- /dev/null
+++ b/puppet/modules/shorewall/manifests/base.pp
@@ -0,0 +1,48 @@
+# base things for shorewall
+class shorewall::base {
+
+ package { 'shorewall':
+ ensure => $shorewall::ensure_version,
+ }
+
+ # This file has to be managed in place, so shorewall can find it
+ file {
+ '/etc/shorewall/shorewall.conf':
+ require => Package[shorewall],
+ notify => Service[shorewall],
+ owner => root,
+ group => 0,
+ mode => '0644';
+ '/etc/shorewall/puppet':
+ ensure => directory,
+ require => Package[shorewall],
+ owner => root,
+ group => 0,
+ mode => '0644';
+ }
+
+ if $shorewall::conf_source {
+ File['/etc/shorewall/shorewall.conf']{
+ source => $shorewall::conf_source,
+ }
+ } else {
+
+ Class['augeas'] -> Class['shorewall::base']
+
+ augeas { 'shorewall_module_config_path':
+ changes => 'set /files/etc/shorewall/shorewall.conf/CONFIG_PATH \'"/etc/shorewall/puppet:/etc/shorewall:/usr/share/shorewall"\'',
+ lens => 'Shellvars.lns',
+ incl => '/etc/shorewall/shorewall.conf',
+ notify => Service['shorewall'],
+ require => Package['shorewall'];
+ }
+ }
+
+ service{'shorewall':
+ ensure => running,
+ enable => true,
+ hasstatus => true,
+ hasrestart => true,
+ require => Package['shorewall'],
+ }
+}
diff --git a/puppet/modules/shorewall/manifests/blacklist.pp b/puppet/modules/shorewall/manifests/blacklist.pp
new file mode 100644
index 00000000..afbe2165
--- /dev/null
+++ b/puppet/modules/shorewall/manifests/blacklist.pp
@@ -0,0 +1,9 @@
+define shorewall::blacklist(
+ $proto = '-',
+ $port = '-',
+ $order='100'
+){
+ shorewall::entry{"blacklist-${order}-${name}":
+ line => "${name} ${proto} ${port}",
+ }
+}
diff --git a/puppet/modules/shorewall/manifests/centos.pp b/puppet/modules/shorewall/manifests/centos.pp
new file mode 100644
index 00000000..f671bc9f
--- /dev/null
+++ b/puppet/modules/shorewall/manifests/centos.pp
@@ -0,0 +1,13 @@
+# things needed on centos
+class shorewall::centos inherits shorewall::base {
+ if $::lsbmajdistrelease > 5 {
+ augeas{'enable_shorewall':
+ context => '/files/etc/sysconfig/shorewall',
+ changes => 'set startup 1',
+ lens => 'Shellvars.lns',
+ incl => '/etc/sysconfig/shorewall',
+ require => Package['shorewall'],
+ notify => Service['shorewall'],
+ }
+ }
+}
diff --git a/puppet/modules/shorewall/manifests/debian.pp b/puppet/modules/shorewall/manifests/debian.pp
new file mode 100644
index 00000000..c7ed6077
--- /dev/null
+++ b/puppet/modules/shorewall/manifests/debian.pp
@@ -0,0 +1,11 @@
+class shorewall::debian inherits shorewall::base {
+ file{'/etc/default/shorewall':
+ content => template("shorewall/debian_default.erb"),
+ require => Package['shorewall'],
+ notify => Service['shorewall'],
+ owner => root, group => 0, mode => 0644;
+ }
+ Service['shorewall']{
+ status => '/sbin/shorewall status'
+ }
+}
diff --git a/puppet/modules/shorewall/manifests/entry.pp b/puppet/modules/shorewall/manifests/entry.pp
new file mode 100644
index 00000000..c8fffc72
--- /dev/null
+++ b/puppet/modules/shorewall/manifests/entry.pp
@@ -0,0 +1,12 @@
+define shorewall::entry(
+ $ensure = present,
+ $line
+){
+ $parts = split($name,'-')
+ concat::fragment{$name:
+ ensure => $ensure,
+ content => "${line}\n",
+ order => $parts[1],
+ target => "/etc/shorewall/puppet/${parts[0]}",
+ }
+}
diff --git a/puppet/modules/shorewall/manifests/extension_script.pp b/puppet/modules/shorewall/manifests/extension_script.pp
new file mode 100644
index 00000000..569fcbf8
--- /dev/null
+++ b/puppet/modules/shorewall/manifests/extension_script.pp
@@ -0,0 +1,14 @@
+# See http://shorewall.net/shorewall_extension_scripts.htm
+define shorewall::extension_script($script = '') {
+ case $name {
+ 'init', 'initdone', 'start', 'started', 'stop', 'stopped', 'clear', 'refresh', 'continue', 'maclog': {
+ file { "/etc/shorewall/puppet/${name}":
+ content => "${script}\n",
+ notify => Service[shorewall];
+ }
+ }
+ '', default: {
+ err("${name}: unknown shorewall extension script")
+ }
+ }
+}
diff --git a/puppet/modules/shorewall/manifests/gentoo.pp b/puppet/modules/shorewall/manifests/gentoo.pp
new file mode 100644
index 00000000..7b307a4e
--- /dev/null
+++ b/puppet/modules/shorewall/manifests/gentoo.pp
@@ -0,0 +1,5 @@
+class shorewall::gentoo inherits shorewall::base {
+ Package[shorewall]{
+ category => 'net-firewall',
+ }
+}
diff --git a/puppet/modules/shorewall/manifests/host.pp b/puppet/modules/shorewall/manifests/host.pp
new file mode 100644
index 00000000..f4002232
--- /dev/null
+++ b/puppet/modules/shorewall/manifests/host.pp
@@ -0,0 +1,10 @@
+define shorewall::host(
+ $zone,
+ $options = 'tcpflags,blacklist,norfc1918',
+ $order='100'
+){
+ shorewall::entry{"hosts-${order}-${name}":
+ line => "${zone} ${name} ${options}"
+ }
+}
+
diff --git a/puppet/modules/shorewall/manifests/init.pp b/puppet/modules/shorewall/manifests/init.pp
new file mode 100644
index 00000000..a5675646
--- /dev/null
+++ b/puppet/modules/shorewall/manifests/init.pp
@@ -0,0 +1,75 @@
+# Manage shorewall on your system
+class shorewall(
+ $startup = '1',
+ $conf_source = false,
+ $ensure_version = 'present',
+ $tor_transparent_proxy_host = '127.0.0.1',
+ $tor_transparent_proxy_port = '9040',
+ $tor_user = $::operatingsystem ? {
+ 'Debian' => 'debian-tor',
+ default => 'tor'
+ }
+) {
+
+ case $::operatingsystem {
+ gentoo: { include shorewall::gentoo }
+ debian: {
+ include shorewall::debian
+ $dist_tor_user = 'debian-tor'
+ }
+ centos: { include shorewall::centos }
+ ubuntu: {
+ case $::lsbdistcodename {
+ karmic: { include shorewall::ubuntu::karmic }
+ default: { include shorewall::debian }
+ }
+ }
+ default: {
+ notice "unknown operatingsystem: ${::operatingsystem}"
+ include shorewall::base
+ }
+ }
+
+ shorewall::managed_file{
+ [
+ # See http://www.shorewall.net/3.0/Documentation.htm#Zones
+ 'zones',
+ # See http://www.shorewall.net/3.0/Documentation.htm#Interfaces
+ 'interfaces',
+ # See http://www.shorewall.net/3.0/Documentation.htm#Hosts
+ 'hosts',
+ # See http://www.shorewall.net/3.0/Documentation.htm#Policy
+ 'policy',
+ # See http://www.shorewall.net/3.0/Documentation.htm#Rules
+ 'rules',
+ # See http://www.shorewall.net/3.0/Documentation.htm#Masq
+ 'masq',
+ # See http://www.shorewall.net/3.0/Documentation.htm#ProxyArp
+ 'proxyarp',
+ # See http://www.shorewall.net/3.0/Documentation.htm#NAT
+ 'nat',
+ # See http://www.shorewall.net/3.0/Documentation.htm#Blacklist
+ 'blacklist',
+ # See http://www.shorewall.net/3.0/Documentation.htm#rfc1918
+ 'rfc1918',
+ # See http://www.shorewall.net/3.0/Documentation.htm#Routestopped
+ 'routestopped',
+ # See http://www.shorewall.net/3.0/Documentation.htm#Variables
+ 'params',
+ # See http://www.shorewall.net/3.0/traffic_shaping.htm
+ 'tcdevices',
+ # See http://www.shorewall.net/3.0/traffic_shaping.htm
+ 'tcrules',
+ # See http://www.shorewall.net/3.0/traffic_shaping.htm
+ 'tcclasses',
+ # http://www.shorewall.net/manpages/shorewall-providers.html
+ 'providers',
+ # See http://www.shorewall.net/manpages/shorewall-tunnels.html
+ 'tunnel',
+ # See http://www.shorewall.net/MultiISP.html
+ 'rtrules',
+ # See http://www.shorewall.net/manpages/shorewall-mangle.html
+ 'mangle',
+ ]:;
+ }
+}
diff --git a/puppet/modules/shorewall/manifests/interface.pp b/puppet/modules/shorewall/manifests/interface.pp
new file mode 100644
index 00000000..403ee749
--- /dev/null
+++ b/puppet/modules/shorewall/manifests/interface.pp
@@ -0,0 +1,29 @@
+define shorewall::interface(
+ $zone,
+ $broadcast = 'detect',
+ $options = 'tcpflags,blacklist,routefilter,nosmurfs,logmartians',
+ $add_options = '',
+ $rfc1918 = false,
+ $dhcp = false,
+ $order = 100
+){
+ $added_opts = $add_options ? {
+ '' => '',
+ default => ",${add_options}",
+ }
+
+ $dhcp_opt = $dhcp ? {
+ false => '',
+ default => ',dhcp',
+ }
+
+ $rfc1918_opt = $rfc1918 ? {
+ false => ',norfc1918',
+ default => '',
+ }
+
+ shorewall::entry { "interfaces-${order}-${name}":
+ line => "${zone} ${name} ${broadcast} ${options}${dhcp_opt}${rfc1918_opt}${added_opts}",
+ }
+}
+
diff --git a/puppet/modules/shorewall/manifests/managed_file.pp b/puppet/modules/shorewall/manifests/managed_file.pp
new file mode 100644
index 00000000..d564daa7
--- /dev/null
+++ b/puppet/modules/shorewall/manifests/managed_file.pp
@@ -0,0 +1,17 @@
+define shorewall::managed_file () {
+ concat{ "/etc/shorewall/puppet/${name}":
+ notify => Service['shorewall'],
+ require => File['/etc/shorewall/puppet'],
+ owner => root, group => 0, mode => 0600;
+ }
+ concat::fragment {
+ "${name}-header":
+ source => "puppet:///modules/shorewall/boilerplate/${name}.header",
+ target => "/etc/shorewall/puppet/${name}",
+ order => '000';
+ "${name}-footer":
+ source => "puppet:///modules/shorewall/boilerplate/${name}.footer",
+ target => "/etc/shorewall/puppet/${name}",
+ order => '999';
+ }
+}
diff --git a/puppet/modules/shorewall/manifests/mangle.pp b/puppet/modules/shorewall/manifests/mangle.pp
new file mode 100644
index 00000000..e3fd1b3b
--- /dev/null
+++ b/puppet/modules/shorewall/manifests/mangle.pp
@@ -0,0 +1,19 @@
+define shorewall::mangle(
+ $source,
+ $destination,
+ $proto = '-',
+ $destinationport = '-',
+ $sourceport = '-',
+ $user = '-',
+ $test = '-',
+ $length = '-',
+ $tos = '-',
+ $connbytes = '-',
+ $helper = '-',
+ $headers = '-',
+ $order = '100'
+){
+ shorewall::entry{"mangle-${order}-${name}":
+ line => "${name} ${source} ${destination} ${proto} ${destinationport} ${sourceport} ${user} ${test} ${length} ${tos} ${connbytes} ${helper} ${headers}"
+ }
+}
diff --git a/puppet/modules/shorewall/manifests/masq.pp b/puppet/modules/shorewall/manifests/masq.pp
new file mode 100644
index 00000000..fb097e5e
--- /dev/null
+++ b/puppet/modules/shorewall/manifests/masq.pp
@@ -0,0 +1,17 @@
+# mark is new in 3.4.4
+# source (= subnet) = Set of hosts that you wish to masquerade.
+# address = If you specify an address here, SNAT will be used and this will be the source address.
+define shorewall::masq(
+ $interface,
+ $source, $address = '-',
+ $proto = '-',
+ $port = '-',
+ $ipsec = '-',
+ $mark = '',
+ $order='100'
+){
+ shorewall::entry{"masq-${order}-${name}":
+ line => "# ${name}\n${interface} ${source} ${address} ${proto} ${port} ${ipsec} ${mark}"
+ }
+}
+
diff --git a/puppet/modules/shorewall/manifests/nat.pp b/puppet/modules/shorewall/manifests/nat.pp
new file mode 100644
index 00000000..e29b7849
--- /dev/null
+++ b/puppet/modules/shorewall/manifests/nat.pp
@@ -0,0 +1,11 @@
+define shorewall::nat(
+ $interface,
+ $internal,
+ $all = 'no',
+ $local = 'yes',
+ $order='100'
+){
+ shorewall::entry{"nat-${order}-${name}":
+ line => "${name} ${interface} ${internal} ${all} ${local}"
+ }
+}
diff --git a/puppet/modules/shorewall/manifests/params.pp b/puppet/modules/shorewall/manifests/params.pp
new file mode 100644
index 00000000..3bc56630
--- /dev/null
+++ b/puppet/modules/shorewall/manifests/params.pp
@@ -0,0 +1,5 @@
+define shorewall::params($value, $order='100'){
+ shorewall::entry{"params-${order}-${name}":
+ line => "${name}=${value}",
+ }
+}
diff --git a/puppet/modules/shorewall/manifests/policy.pp b/puppet/modules/shorewall/manifests/policy.pp
new file mode 100644
index 00000000..efee05b5
--- /dev/null
+++ b/puppet/modules/shorewall/manifests/policy.pp
@@ -0,0 +1,12 @@
+define shorewall::policy(
+ $sourcezone,
+ $destinationzone,
+ $policy, $shloglevel = '-',
+ $limitburst = '-',
+ $order
+){
+ shorewall::entry{"policy-${order}-${name}":
+ line => "# ${name}\n${sourcezone} ${destinationzone} ${policy} ${shloglevel} ${limitburst}",
+ }
+}
+
diff --git a/puppet/modules/shorewall/manifests/providers.pp b/puppet/modules/shorewall/manifests/providers.pp
new file mode 100644
index 00000000..a1f8726a
--- /dev/null
+++ b/puppet/modules/shorewall/manifests/providers.pp
@@ -0,0 +1,16 @@
+# manage providers
+define shorewall::providers(
+ $provider = $name,
+ $number = '',
+ $mark = '',
+ $duplicate = 'main',
+ $interface = '',
+ $gateway = '',
+ $options = '',
+ $copy = '',
+ $order = '100'
+){
+ shorewall::entry{"providers-${order}-${name}":
+ line => "# ${name}\n${provider} ${number} ${mark} ${duplicate} ${interface} ${gateway} ${options} ${copy}"
+ }
+}
diff --git a/puppet/modules/shorewall/manifests/proxyarp.pp b/puppet/modules/shorewall/manifests/proxyarp.pp
new file mode 100644
index 00000000..1af554fb
--- /dev/null
+++ b/puppet/modules/shorewall/manifests/proxyarp.pp
@@ -0,0 +1,11 @@
+define shorewall::proxyarp(
+ $interface,
+ $external,
+ $haveroute = yes,
+ $persistent = no,
+ $order='100'
+ ){
+ shorewall::entry{"proxyarp-${order}-${name}":
+ line => "# ${name}\n${name} ${interface} ${external} ${haveroute} ${persistent}"
+ }
+}
diff --git a/puppet/modules/shorewall/manifests/rfc1918.pp b/puppet/modules/shorewall/manifests/rfc1918.pp
new file mode 100644
index 00000000..31dce5dc
--- /dev/null
+++ b/puppet/modules/shorewall/manifests/rfc1918.pp
@@ -0,0 +1,8 @@
+define shorewall::rfc1918(
+ $action = 'logdrop',
+ $order='100'
+){
+ shorewall::entry{"rfc1918-${order}-${name}":
+ line => "${name} ${action}"
+ }
+}
diff --git a/puppet/modules/shorewall/manifests/routestopped.pp b/puppet/modules/shorewall/manifests/routestopped.pp
new file mode 100644
index 00000000..aca57b51
--- /dev/null
+++ b/puppet/modules/shorewall/manifests/routestopped.pp
@@ -0,0 +1,14 @@
+define shorewall::routestopped(
+ $interface = $name,
+ $host = '-',
+ $options = '',
+ $order='100'
+){
+ $real_interface = $interface ? {
+ '' => $name,
+ default => $interface,
+ }
+ shorewall::entry{"routestopped-${order}-${name}":
+ line => "${real_interface} ${host} ${options}",
+ }
+}
diff --git a/puppet/modules/shorewall/manifests/rtrules.pp b/puppet/modules/shorewall/manifests/rtrules.pp
new file mode 100644
index 00000000..3810f26d
--- /dev/null
+++ b/puppet/modules/shorewall/manifests/rtrules.pp
@@ -0,0 +1,11 @@
+define shorewall::rtrules(
+ $source = '-',
+ $destination = '-',
+ $provider,
+ $priority = '10000',
+ $mark,
+){
+ shorewall::entry { "rtrules-${mark}-${name}":
+ line => "# ${name}\n${source} ${destination} ${provider} ${priority} ${mark}",
+ }
+}
diff --git a/puppet/modules/shorewall/manifests/rule.pp b/puppet/modules/shorewall/manifests/rule.pp
new file mode 100644
index 00000000..2fe91e27
--- /dev/null
+++ b/puppet/modules/shorewall/manifests/rule.pp
@@ -0,0 +1,20 @@
+# mark is new in 3.4.4
+define shorewall::rule(
+ $ensure = present,
+ $action,
+ $source,
+ $destination,
+ $proto = '-',
+ $destinationport = '-',
+ $sourceport = '-',
+ $originaldest = '-',
+ $ratelimit = '-',
+ $user = '-',
+ $mark = '',
+ $order
+){
+ shorewall::entry{"rules-${order}-${name}":
+ ensure => $ensure,
+ line => "# ${name}\n${action} ${source} ${destination} ${proto} ${destinationport} ${sourceport} ${originaldest} ${ratelimit} ${user} ${mark}",
+ }
+}
diff --git a/puppet/modules/shorewall/manifests/rule_section.pp b/puppet/modules/shorewall/manifests/rule_section.pp
new file mode 100644
index 00000000..82984ca2
--- /dev/null
+++ b/puppet/modules/shorewall/manifests/rule_section.pp
@@ -0,0 +1,7 @@
+define shorewall::rule_section(
+ $order
+){
+ shorewall::entry{"rules-${order}-${name}":
+ line => "SECTION ${name}",
+ }
+}
diff --git a/puppet/modules/shorewall/manifests/rules/cobbler.pp b/puppet/modules/shorewall/manifests/rules/cobbler.pp
new file mode 100644
index 00000000..e04e4925
--- /dev/null
+++ b/puppet/modules/shorewall/manifests/rules/cobbler.pp
@@ -0,0 +1,19 @@
+class shorewall::rules::cobbler {
+ shorewall::rule{'net-me-syslog-xmlrpc-tcp':
+ source => 'net',
+ destination => '$FW',
+ proto => 'tcp',
+ destinationport => '25150:25151',
+ order => 240,
+ action => 'ACCEPT';
+ }
+ shorewall::rule{'net-me-syslog-xmlrpc-udp':
+ source => 'net',
+ destination => '$FW',
+ proto => 'udp',
+ destinationport => '25150:25151',
+ order => 240,
+ action => 'ACCEPT';
+ }
+ include shorewall::rules::rsync
+}
diff --git a/puppet/modules/shorewall/manifests/rules/dns.pp b/puppet/modules/shorewall/manifests/rules/dns.pp
new file mode 100644
index 00000000..99311cae
--- /dev/null
+++ b/puppet/modules/shorewall/manifests/rules/dns.pp
@@ -0,0 +1,18 @@
+class shorewall::rules::dns {
+ shorewall::rule {
+ 'net-me-tcp_dns':
+ source => 'net',
+ destination => '$FW',
+ proto => 'tcp',
+ destinationport => '53',
+ order => 240,
+ action => 'ACCEPT';
+ 'net-me-udp_dns':
+ source => 'net',
+ destination => '$FW',
+ proto => 'udp',
+ destinationport => '53',
+ order => 240,
+ action => 'ACCEPT';
+ }
+}
diff --git a/puppet/modules/shorewall/manifests/rules/dns/disable.pp b/puppet/modules/shorewall/manifests/rules/dns/disable.pp
new file mode 100644
index 00000000..36541da4
--- /dev/null
+++ b/puppet/modules/shorewall/manifests/rules/dns/disable.pp
@@ -0,0 +1,5 @@
+class shorewall::rules::dns::disable inherits shorewall::rules::dns {
+ Shorewall::Rule['net-me-tcp_dns', 'net-me-udp_dns']{
+ action => 'DROP',
+ }
+}
diff --git a/puppet/modules/shorewall/manifests/rules/ekeyd.pp b/puppet/modules/shorewall/manifests/rules/ekeyd.pp
new file mode 100644
index 00000000..dbff02fe
--- /dev/null
+++ b/puppet/modules/shorewall/manifests/rules/ekeyd.pp
@@ -0,0 +1,10 @@
+class shorewall::rules::ekeyd {
+ shorewall::rule { 'net-me-tcp_ekeyd':
+ source => 'net',
+ destination => '$FW',
+ proto => 'tcp',
+ destinationport => '8888',
+ order => 240,
+ action => 'ACCEPT';
+ }
+}
diff --git a/puppet/modules/shorewall/manifests/rules/ftp.pp b/puppet/modules/shorewall/manifests/rules/ftp.pp
new file mode 100644
index 00000000..6d34c78f
--- /dev/null
+++ b/puppet/modules/shorewall/manifests/rules/ftp.pp
@@ -0,0 +1,10 @@
+class shorewall::rules::ftp {
+ shorewall::rule { 'net-me-ftp-tcp':
+ source => 'net',
+ destination => '$FW',
+ proto => 'tcp',
+ destinationport => '21',
+ order => 240,
+ action => 'FTP/ACCEPT';
+ }
+}
diff --git a/puppet/modules/shorewall/manifests/rules/gitdaemon.pp b/puppet/modules/shorewall/manifests/rules/gitdaemon.pp
new file mode 100644
index 00000000..21372f63
--- /dev/null
+++ b/puppet/modules/shorewall/manifests/rules/gitdaemon.pp
@@ -0,0 +1,10 @@
+class shorewall::rules::gitdaemon {
+ shorewall::rule {'net-me-tcp_gitdaemon':
+ source => 'net',
+ destination => '$FW',
+ proto => 'tcp',
+ destinationport => '9418',
+ order => 240,
+ action => 'ACCEPT';
+ }
+}
diff --git a/puppet/modules/shorewall/manifests/rules/gitdaemon/absent.pp b/puppet/modules/shorewall/manifests/rules/gitdaemon/absent.pp
new file mode 100644
index 00000000..ade6fba0
--- /dev/null
+++ b/puppet/modules/shorewall/manifests/rules/gitdaemon/absent.pp
@@ -0,0 +1,5 @@
+class shorewall::rules::gitdaemon::absent inherits shorewall::rules::gitdaemon {
+ Shorewall::Rule['net-me-tcp_gitdaemon']{
+ ensure => absent,
+ }
+}
diff --git a/puppet/modules/shorewall/manifests/rules/http.pp b/puppet/modules/shorewall/manifests/rules/http.pp
new file mode 100644
index 00000000..e6a9bdef
--- /dev/null
+++ b/puppet/modules/shorewall/manifests/rules/http.pp
@@ -0,0 +1,10 @@
+class shorewall::rules::http {
+ shorewall::rule { 'net-me-http-tcp':
+ source => 'net',
+ destination => '$FW',
+ proto => 'tcp',
+ destinationport => '80',
+ order => 240,
+ action => 'ACCEPT';
+ }
+}
diff --git a/puppet/modules/shorewall/manifests/rules/http/disable.pp b/puppet/modules/shorewall/manifests/rules/http/disable.pp
new file mode 100644
index 00000000..5d9170ca
--- /dev/null
+++ b/puppet/modules/shorewall/manifests/rules/http/disable.pp
@@ -0,0 +1,5 @@
+class shorewall::rules::http::disable inherits shorewall::rules::http {
+ Shorewall::Rule['net-me-http-tcp']{
+ action => 'DROP',
+ }
+}
diff --git a/puppet/modules/shorewall/manifests/rules/https.pp b/puppet/modules/shorewall/manifests/rules/https.pp
new file mode 100644
index 00000000..cc49d100
--- /dev/null
+++ b/puppet/modules/shorewall/manifests/rules/https.pp
@@ -0,0 +1,10 @@
+class shorewall::rules::https {
+ shorewall::rule { 'net-me-https-tcp':
+ source => 'net',
+ destination => '$FW',
+ proto => 'tcp',
+ destinationport => '443',
+ order => 240,
+ action => 'ACCEPT';
+ }
+}
diff --git a/puppet/modules/shorewall/manifests/rules/identd.pp b/puppet/modules/shorewall/manifests/rules/identd.pp
new file mode 100644
index 00000000..719e581c
--- /dev/null
+++ b/puppet/modules/shorewall/manifests/rules/identd.pp
@@ -0,0 +1,10 @@
+class shorewall::rules::identd {
+ shorewall::rule { 'net-me-identd-tcp':
+ source => 'net',
+ destination => '$FW',
+ proto => 'tcp',
+ destinationport => '113',
+ order => 240,
+ action => 'ACCEPT';
+ }
+}
diff --git a/puppet/modules/shorewall/manifests/rules/imap.pp b/puppet/modules/shorewall/manifests/rules/imap.pp
new file mode 100644
index 00000000..7fbe1818
--- /dev/null
+++ b/puppet/modules/shorewall/manifests/rules/imap.pp
@@ -0,0 +1,11 @@
+class shorewall::rules::imap {
+ shorewall::rule {
+ 'net-me-tcp_imap_s':
+ source => 'net',
+ destination => '$FW',
+ proto => 'tcp',
+ destinationport => '143,993',
+ order => 260,
+ action => 'ACCEPT';
+ }
+}
diff --git a/puppet/modules/shorewall/manifests/rules/ipsec.pp b/puppet/modules/shorewall/manifests/rules/ipsec.pp
new file mode 100644
index 00000000..82adff09
--- /dev/null
+++ b/puppet/modules/shorewall/manifests/rules/ipsec.pp
@@ -0,0 +1,32 @@
+class shorewall::rules::ipsec(
+ $source = 'net'
+) {
+ shorewall::rule {
+ 'net-me-ipsec-udp':
+ source => $shorewall::rules::ipsec::source,
+ destination => '$FW',
+ proto => 'udp',
+ destinationport => '500',
+ order => 240,
+ action => 'ACCEPT';
+ 'me-net-ipsec-udp':
+ source => '$FW',
+ destination => $shorewall::rules::ipsec::source,
+ proto => 'udp',
+ destinationport => '500',
+ order => 240,
+ action => 'ACCEPT';
+ 'net-me-ipsec':
+ source => $shorewall::rules::ipsec::source,
+ destination => '$FW',
+ proto => 'esp',
+ order => 240,
+ action => 'ACCEPT';
+ 'me-net-ipsec':
+ source => '$FW',
+ destination => $shorewall::rules::ipsec::source,
+ proto => 'esp',
+ order => 240,
+ action => 'ACCEPT';
+ }
+}
diff --git a/puppet/modules/shorewall/manifests/rules/ipsec_nat.pp b/puppet/modules/shorewall/manifests/rules/ipsec_nat.pp
new file mode 100644
index 00000000..6c0d5072
--- /dev/null
+++ b/puppet/modules/shorewall/manifests/rules/ipsec_nat.pp
@@ -0,0 +1,18 @@
+class shorewall::rules::ipsec_nat {
+ shorewall::rule {
+ 'net-me-ipsec-nat-udp':
+ source => 'net',
+ destination => '$FW',
+ proto => 'udp',
+ destinationport => '4500',
+ order => 240,
+ action => 'ACCEPT';
+ 'me-net-ipsec-nat-udp':
+ source => '$FW',
+ destination => 'net',
+ proto => 'udp',
+ destinationport => '4500',
+ order => 240,
+ action => 'ACCEPT';
+ }
+}
diff --git a/puppet/modules/shorewall/manifests/rules/jabberserver.pp b/puppet/modules/shorewall/manifests/rules/jabberserver.pp
new file mode 100644
index 00000000..3b38b294
--- /dev/null
+++ b/puppet/modules/shorewall/manifests/rules/jabberserver.pp
@@ -0,0 +1,19 @@
+class shorewall::rules::jabberserver {
+ shorewall::rule {
+ 'net-me-tcp_jabber':
+ source => 'net',
+ destination => '$FW',
+ proto => 'tcp',
+ destinationport => '5222,5223,5269',
+ order => 240,
+ action => 'ACCEPT';
+ 'me-net-tcp_jabber_s2s':
+ source => '$FW',
+ destination => 'net',
+ proto => 'tcp',
+ destinationport => '5260,5269,5270,5271,5272',
+ order => 240,
+ action => 'ACCEPT';
+ }
+
+}
diff --git a/puppet/modules/shorewall/manifests/rules/jetty.pp b/puppet/modules/shorewall/manifests/rules/jetty.pp
new file mode 100644
index 00000000..4080e7e6
--- /dev/null
+++ b/puppet/modules/shorewall/manifests/rules/jetty.pp
@@ -0,0 +1,12 @@
+class shorewall::rules::jetty {
+ # open jetty port
+ shorewall::rule {
+ 'net-me-jetty-tcp':
+ source => 'net',
+ destination => '$FW',
+ proto => 'tcp',
+ destinationport => '8080',
+ order => 240,
+ action => 'ACCEPT';
+ }
+}
diff --git a/puppet/modules/shorewall/manifests/rules/jetty/http.pp b/puppet/modules/shorewall/manifests/rules/jetty/http.pp
new file mode 100644
index 00000000..4c0652be
--- /dev/null
+++ b/puppet/modules/shorewall/manifests/rules/jetty/http.pp
@@ -0,0 +1,9 @@
+class shorewall::rules::jetty::http {
+ # dnat
+ shorewall::rule {
+ 'dnat-http-to-jetty':
+ destination => "net:${::ipaddress}:8080",
+ destinationport => '80',
+ source => 'net', proto => 'tcp', order => 140, action => 'DNAT';
+ }
+}
diff --git a/puppet/modules/shorewall/manifests/rules/jetty/ssl.pp b/puppet/modules/shorewall/manifests/rules/jetty/ssl.pp
new file mode 100644
index 00000000..f7517493
--- /dev/null
+++ b/puppet/modules/shorewall/manifests/rules/jetty/ssl.pp
@@ -0,0 +1,11 @@
+class shorewall::rules::jetty::ssl {
+ shorewall::rule {
+ 'net-me-jettyssl-tcp':
+ source => 'net',
+ destination => '$FW',
+ proto => 'tcp',
+ destinationport => '8443',
+ order => 240,
+ action => 'ACCEPT';
+ }
+}
diff --git a/puppet/modules/shorewall/manifests/rules/keyserver.pp b/puppet/modules/shorewall/manifests/rules/keyserver.pp
new file mode 100644
index 00000000..2ade9c1e
--- /dev/null
+++ b/puppet/modules/shorewall/manifests/rules/keyserver.pp
@@ -0,0 +1,11 @@
+class shorewall::rules::keyserver {
+ shorewall::rule {
+ 'net-me-tcp_keyserver':
+ source => 'net',
+ destination => '$FW',
+ proto => 'tcp',
+ destinationport => '11371,11372',
+ order => 240,
+ action => 'ACCEPT';
+ }
+}
diff --git a/puppet/modules/shorewall/manifests/rules/libvirt/host.pp b/puppet/modules/shorewall/manifests/rules/libvirt/host.pp
new file mode 100644
index 00000000..c2268659
--- /dev/null
+++ b/puppet/modules/shorewall/manifests/rules/libvirt/host.pp
@@ -0,0 +1,79 @@
+class shorewall::rules::libvirt::host (
+ $vmz = 'vmz',
+ $masq_iface = 'eth0',
+ $debproxy_port = 8000,
+ $accept_dhcp = true,
+ $vmz_iface = 'virbr0',
+ ) {
+
+ define shorewall::rule::accept::from_vmz (
+ $proto = '-',
+ $destinationport = '-',
+ $action = 'ACCEPT'
+ ) {
+ shorewall::rule { $name:
+ source => $shorewall::rules::libvirt::host::vmz,
+ destination => '$FW',
+ order => 300,
+ proto => $proto,
+ destinationport => $destinationport,
+ action => $action;
+ }
+ }
+
+ shorewall::policy {
+ 'fw-to-vmz':
+ sourcezone => '$FW',
+ destinationzone => $vmz,
+ policy => 'ACCEPT',
+ order => 110;
+ 'vmz-to-net':
+ sourcezone => $vmz,
+ destinationzone => 'net',
+ policy => 'ACCEPT',
+ order => 200;
+ 'vmz-to-all':
+ sourcezone => $vmz,
+ destinationzone => 'all',
+ policy => 'DROP',
+ shloglevel => 'info',
+ order => 800;
+ }
+
+ shorewall::rule::accept::from_vmz {
+ 'accept_dns_from_vmz':
+ action => 'DNS(ACCEPT)';
+ 'accept_tftp_from_vmz':
+ action => 'TFTP(ACCEPT)';
+ 'accept_puppet_from_vmz':
+ proto => 'tcp',
+ destinationport => '8140',
+ action => 'ACCEPT';
+ }
+
+ if $accept_dhcp {
+ shorewall::mangle { 'CHECKSUM:T':
+ source => '-',
+ destination => $vmz_iface,
+ proto => 'udp',
+ destinationport => '68';
+ }
+ }
+
+ if $debproxy_port {
+ shorewall::rule::accept::from_vmz { 'accept_debproxy_from_vmz':
+ proto => 'tcp',
+ destinationport => $debproxy_port,
+ action => 'ACCEPT';
+ }
+ }
+
+ if $masq_iface {
+ shorewall::masq {
+ "masq-${masq_iface}":
+ interface => $masq_iface,
+ source => '10.0.0.0/8,169.254.0.0/16,172.16.0.0/12,192.168.0.0/16';
+ }
+ }
+
+}
diff --git a/puppet/modules/shorewall/manifests/rules/managesieve.pp b/puppet/modules/shorewall/manifests/rules/managesieve.pp
new file mode 100644
index 00000000..63fafcb6
--- /dev/null
+++ b/puppet/modules/shorewall/manifests/rules/managesieve.pp
@@ -0,0 +1,11 @@
+class shorewall::rules::managesieve {
+ shorewall::rule {
+ 'net-me-tcp_managesieve':
+ source => 'net',
+ destination => '$FW',
+ proto => 'tcp',
+ destinationport => '2000',
+ order => 260,
+ action => 'ACCEPT';
+ }
+}
diff --git a/puppet/modules/shorewall/manifests/rules/mdns.pp b/puppet/modules/shorewall/manifests/rules/mdns.pp
new file mode 100644
index 00000000..76b1fd90
--- /dev/null
+++ b/puppet/modules/shorewall/manifests/rules/mdns.pp
@@ -0,0 +1,8 @@
+class shorewall::rules::mdns {
+ shorewall::rule { 'net-me-mdns':
+ source => 'net',
+ destination => '$FW',
+ order => 240,
+ action => 'mDNS(ACCEPT)';
+ }
+}
diff --git a/puppet/modules/shorewall/manifests/rules/munin.pp b/puppet/modules/shorewall/manifests/rules/munin.pp
new file mode 100644
index 00000000..a20a4e0a
--- /dev/null
+++ b/puppet/modules/shorewall/manifests/rules/munin.pp
@@ -0,0 +1,16 @@
+class shorewall::rules::munin(
+ $munin_port = '4949',
+ $munin_collector = ['127.0.0.1'],
+ $collector_source = 'net'
+){
+ shorewall::params { 'MUNINPORT': value => $munin_port }
+ shorewall::params { 'MUNINCOLLECTOR': value => join(any2array($munin_collector),',') }
+ shorewall::rule{'net-me-munin-tcp':
+ source => "${collector_source}:\$MUNINCOLLECTOR",
+ destination => '$FW',
+ proto => 'tcp',
+ destinationport => '$MUNINPORT',
+ order => 240,
+ action => 'ACCEPT';
+ }
+}
diff --git a/puppet/modules/shorewall/manifests/rules/mysql.pp b/puppet/modules/shorewall/manifests/rules/mysql.pp
new file mode 100644
index 00000000..0da68a19
--- /dev/null
+++ b/puppet/modules/shorewall/manifests/rules/mysql.pp
@@ -0,0 +1,11 @@
+class shorewall::rules::mysql {
+ shorewall::rule {
+ 'net-me-tcp_mysql':
+ source => 'net',
+ destination => '$FW',
+ proto => 'tcp',
+ destinationport => '3306',
+ order => 240,
+ action => 'ACCEPT';
+ }
+}
diff --git a/puppet/modules/shorewall/manifests/rules/nfsd.pp b/puppet/modules/shorewall/manifests/rules/nfsd.pp
new file mode 100644
index 00000000..bd509cf2
--- /dev/null
+++ b/puppet/modules/shorewall/manifests/rules/nfsd.pp
@@ -0,0 +1,115 @@
+class shorewall::rules::nfsd {
+ shorewall::rule { 'net-me-portmap-tcp':
+ source => 'net',
+ destination => '$FW',
+ proto => 'tcp',
+ destinationport => '111',
+ order => 240,
+ action => 'ACCEPT';
+ }
+ shorewall::rule { 'net-me-portmap-udp':
+ source => 'net',
+ destination => '$FW',
+ proto => 'udp',
+ destinationport => '111',
+ order => 240,
+ action => 'ACCEPT';
+ }
+ shorewall::rule { 'net-me-rpc.statd-tcp':
+ source => 'net',
+ destination => '$FW',
+ proto => 'tcp',
+ destinationport => '662',
+ order => 240,
+ action => 'ACCEPT';
+ }
+ shorewall::rule { 'net-me-rpc.statd-udp':
+ source => 'net',
+ destination => '$FW',
+ proto => 'udp',
+ destinationport => '662',
+ order => 240,
+ action => 'ACCEPT';
+ }
+ shorewall::rule { 'me-net-rpc.statd-tcp':
+ source => 'net',
+ destination => '$FW',
+ proto => 'tcp',
+ destinationport => '2020',
+ order => 240,
+ action => 'ACCEPT';
+ }
+ shorewall::rule { 'me-net-rpc.statd-udp':
+ source => 'net',
+ destination => '$FW',
+ proto => 'udp',
+ destinationport => '2020',
+ order => 240,
+ action => 'ACCEPT';
+ }
+ shorewall::rule { 'net-me-rpc.lockd-tcp':
+ source => 'net',
+ destination => '$FW',
+ proto => 'tcp',
+ destinationport => '32803',
+ order => 240,
+ action => 'ACCEPT';
+ }
+ shorewall::rule { 'net-me-rpc.lockd-udp':
+ source => 'net',
+ destination => '$FW',
+ proto => 'udp',
+ destinationport => '32769',
+ order => 240,
+ action => 'ACCEPT';
+ }
+ shorewall::rule { 'net-me-rpc.mountd-tcp':
+ source => 'net',
+ destination => '$FW',
+ proto => 'tcp',
+ destinationport => '892',
+ order => 240,
+ action => 'ACCEPT';
+ }
+ shorewall::rule { 'net-me-rpc.mountd-udp':
+ source => 'net',
+ destination => '$FW',
+ proto => 'udp',
+ destinationport => '892',
+ order => 240,
+ action => 'ACCEPT';
+ }
+ shorewall::rule { 'net-me-rpc.rquotad-tcp':
+ source => 'net',
+ destination => '$FW',
+ proto => 'tcp',
+ destinationport => '875',
+ order => 240,
+ action => 'ACCEPT';
+ }
+ shorewall::rule { 'net-me-rpc.rquoata-udp':
+ source => 'net',
+ destination => '$FW',
+ proto => 'udp',
+ destinationport => '875',
+ order => 240,
+ action => 'ACCEPT';
+ }
+ shorewall::rule { 'net-me-rpc.nfsd-tcp':
+ source => 'net',
+ destination => '$FW',
+ proto => 'tcp',
+ destinationport => '2049',
+ order => 240,
+ action => 'ACCEPT';
+ }
+ shorewall::rule { 'net-me-rpc.nfsd-udp':
+ source => 'net',
+ destination => '$FW',
+ proto => 'udp',
+ destinationport => '2049',
+ order => 240,
+ action => 'ACCEPT';
+ }
+
+}
diff --git a/puppet/modules/shorewall/manifests/rules/ntp/client.pp b/puppet/modules/shorewall/manifests/rules/ntp/client.pp
new file mode 100644
index 00000000..e0db8d45
--- /dev/null
+++ b/puppet/modules/shorewall/manifests/rules/ntp/client.pp
@@ -0,0 +1,11 @@
+class shorewall::rules::ntp::client {
+ # open ntp udp port to fetch time
+ shorewall::rule {'me-net-udp_ntp':
+ source => '$FW',
+ destination => 'net',
+ proto => 'udp',
+ destinationport => '123',
+ order => 251,
+ action => 'ACCEPT';
+ }
+}
diff --git a/puppet/modules/shorewall/manifests/rules/ntp/server.pp b/puppet/modules/shorewall/manifests/rules/ntp/server.pp
new file mode 100644
index 00000000..ed0968db
--- /dev/null
+++ b/puppet/modules/shorewall/manifests/rules/ntp/server.pp
@@ -0,0 +1,10 @@
+class shorewall::rules::ntp::server {
+ shorewall::rule {'net-me-udp_ntp':
+ source => 'net',
+ destination => '$FW',
+ proto => 'udp',
+ destinationport => '123',
+ order => 241,
+ action => 'ACCEPT';
+ }
+}
diff --git a/puppet/modules/shorewall/manifests/rules/openfire.pp b/puppet/modules/shorewall/manifests/rules/openfire.pp
new file mode 100644
index 00000000..0e6d1d80
--- /dev/null
+++ b/puppet/modules/shorewall/manifests/rules/openfire.pp
@@ -0,0 +1,12 @@
+class shorewall::rules::openfire {
+ include shorewall::rules::jaberserver
+
+ shorewall::rule { 'me-all-openfire-tcp':
+ source => '$FW',
+ destination => 'all',
+ proto => 'tcp',
+ destinationport => '7070,7443,7777',
+ order => 240,
+ action => 'ACCEPT';
+ }
+}
diff --git a/puppet/modules/shorewall/manifests/rules/out/ekeyd.pp b/puppet/modules/shorewall/manifests/rules/out/ekeyd.pp
new file mode 100644
index 00000000..8acdaad5
--- /dev/null
+++ b/puppet/modules/shorewall/manifests/rules/out/ekeyd.pp
@@ -0,0 +1,10 @@
+define shorewall::rules::out::ekeyd($host) {
+ shorewall::rule { "me-${name}-tcp_ekeyd":
+ source => '$FW',
+ destination => "${name}:${host}",
+ proto => 'tcp',
+ destinationport => '8888',
+ order => 240,
+ action => 'ACCEPT';
+ }
+}
diff --git a/puppet/modules/shorewall/manifests/rules/out/git.pp b/puppet/modules/shorewall/manifests/rules/out/git.pp
new file mode 100644
index 00000000..cb88da85
--- /dev/null
+++ b/puppet/modules/shorewall/manifests/rules/out/git.pp
@@ -0,0 +1,10 @@
+class shorewall::rules::out::git {
+ shorewall::rule{'me-net-git-tcp':
+ source => '$FW',
+ destination => 'net',
+ proto => 'tcp',
+ destinationport => '9418',
+ order => 240,
+ action => 'ACCEPT';
+ }
+}
diff --git a/puppet/modules/shorewall/manifests/rules/out/ibackup.pp b/puppet/modules/shorewall/manifests/rules/out/ibackup.pp
new file mode 100644
index 00000000..856bcdb9
--- /dev/null
+++ b/puppet/modules/shorewall/manifests/rules/out/ibackup.pp
@@ -0,0 +1,12 @@
+class shorewall::rules::out::ibackup(
+ $backup_host
+){
+ shorewall::rule { 'me-net-tcp_backupssh':
+ source => '$FW',
+ destination => "net:${backup_host}",
+ proto => 'tcp',
+ destinationport => 'ssh',
+ order => 240,
+ action => 'ACCEPT';
+ }
+}
diff --git a/puppet/modules/shorewall/manifests/rules/out/imap.pp b/puppet/modules/shorewall/manifests/rules/out/imap.pp
new file mode 100644
index 00000000..f1313d2c
--- /dev/null
+++ b/puppet/modules/shorewall/manifests/rules/out/imap.pp
@@ -0,0 +1,11 @@
+class shorewall::rules::out::imap {
+ shorewall::rule {
+ 'me-net-tcp_imap_s':
+ source => '$FW',
+ destination => 'net',
+ proto => 'tcp',
+ destinationport => '143,993',
+ order => 260,
+ action => 'ACCEPT';
+ }
+}
diff --git a/puppet/modules/shorewall/manifests/rules/out/irc.pp b/puppet/modules/shorewall/manifests/rules/out/irc.pp
new file mode 100644
index 00000000..9c8590ab
--- /dev/null
+++ b/puppet/modules/shorewall/manifests/rules/out/irc.pp
@@ -0,0 +1,10 @@
+class shorewall::rules::out::irc {
+ shorewall::rule{'me-net-irc-tcp':
+ source => '$FW',
+ destination => 'net',
+ proto => 'tcp',
+ destinationport => '6667',
+ order => 240,
+ action => 'ACCEPT';
+ }
+}
diff --git a/puppet/modules/shorewall/manifests/rules/out/ircs.pp b/puppet/modules/shorewall/manifests/rules/out/ircs.pp
new file mode 100644
index 00000000..a71585d8
--- /dev/null
+++ b/puppet/modules/shorewall/manifests/rules/out/ircs.pp
@@ -0,0 +1,10 @@
+class shorewall::rules::out::ircs {
+ shorewall::rule{'me-net-ircs-tcp':
+ source => '$FW',
+ destination => 'net',
+ proto => 'tcp',
+ destinationport => '6669',
+ order => 240,
+ action => 'ACCEPT';
+ }
+}
diff --git a/puppet/modules/shorewall/manifests/rules/out/keyserver.pp b/puppet/modules/shorewall/manifests/rules/out/keyserver.pp
new file mode 100644
index 00000000..aa7147e0
--- /dev/null
+++ b/puppet/modules/shorewall/manifests/rules/out/keyserver.pp
@@ -0,0 +1,11 @@
+class shorewall::rules::out::keyserver {
+ shorewall::rule {
+ 'me-net-tcp_keyserver':
+ source => '$FW',
+ destination => 'net',
+ proto => 'tcp',
+ destinationport => '11371,11372',
+ order => 240,
+ action => 'ACCEPT';
+ }
+}
diff --git a/puppet/modules/shorewall/manifests/rules/out/managesieve.pp b/puppet/modules/shorewall/manifests/rules/out/managesieve.pp
new file mode 100644
index 00000000..b0e1c3da
--- /dev/null
+++ b/puppet/modules/shorewall/manifests/rules/out/managesieve.pp
@@ -0,0 +1,11 @@
+class shorewall::rules::out::managesieve {
+ shorewall::rule {
+ 'me-net-tcp_managesieve':
+ source => '$FW',
+ destination => 'net',
+ proto => 'tcp',
+ destinationport => '2000',
+ order => 260,
+ action => 'ACCEPT';
+ }
+}
diff --git a/puppet/modules/shorewall/manifests/rules/out/munin.pp b/puppet/modules/shorewall/manifests/rules/out/munin.pp
new file mode 100644
index 00000000..004a3d5b
--- /dev/null
+++ b/puppet/modules/shorewall/manifests/rules/out/munin.pp
@@ -0,0 +1,10 @@
+class shorewall::rules::out::munin {
+ shorewall::rule { 'me-net-rcp_muninhost':
+ source => '$FW',
+ destination => 'net',
+ proto => 'tcp',
+ destinationport => '4949',
+ order => 340,
+ action => 'ACCEPT';
+ }
+}
diff --git a/puppet/modules/shorewall/manifests/rules/out/mysql.pp b/puppet/modules/shorewall/manifests/rules/out/mysql.pp
new file mode 100644
index 00000000..1334ba6a
--- /dev/null
+++ b/puppet/modules/shorewall/manifests/rules/out/mysql.pp
@@ -0,0 +1,11 @@
+class shorewall::rules::out::mysql {
+ shorewall::rule {
+ 'me-net-tcp_mysql':
+ source => '$FW',
+ destination => 'net',
+ proto => 'tcp',
+ destinationport => '3306',
+ order => 240,
+ action => 'ACCEPT';
+ }
+}
diff --git a/puppet/modules/shorewall/manifests/rules/out/pop3.pp b/puppet/modules/shorewall/manifests/rules/out/pop3.pp
new file mode 100644
index 00000000..ebd4828f
--- /dev/null
+++ b/puppet/modules/shorewall/manifests/rules/out/pop3.pp
@@ -0,0 +1,11 @@
+class shorewall::rules::out::pop3 {
+ shorewall::rule {
+ 'me-net-tcp_pop3_s':
+ source => '$FW',
+ destination => 'net',
+ proto => 'tcp',
+ destinationport => 'pop3,pop3s',
+ order => 260,
+ action => 'ACCEPT';
+ }
+}
diff --git a/puppet/modules/shorewall/manifests/rules/out/postgres.pp b/puppet/modules/shorewall/manifests/rules/out/postgres.pp
new file mode 100644
index 00000000..a62d75d7
--- /dev/null
+++ b/puppet/modules/shorewall/manifests/rules/out/postgres.pp
@@ -0,0 +1,11 @@
+class shorewall::rules::out::postgres {
+ shorewall::rule {
+ 'me-net-tcp_postgres':
+ source => '$FW',
+ destination => 'net',
+ proto => 'tcp',
+ destinationport => '5432',
+ order => 240,
+ action => 'ACCEPT';
+ }
+}
diff --git a/puppet/modules/shorewall/manifests/rules/out/puppet.pp b/puppet/modules/shorewall/manifests/rules/out/puppet.pp
new file mode 100644
index 00000000..cbe8cce7
--- /dev/null
+++ b/puppet/modules/shorewall/manifests/rules/out/puppet.pp
@@ -0,0 +1,20 @@
+class shorewall::rules::out::puppet(
+ $puppetserver = "puppet.${::domain}",
+ $puppetserver_port = 8140,
+ $puppetserver_signport = 8141
+) {
+ class{'shorewall::rules::puppet':
+ puppetserver => $puppetserver,
+ puppetserver_port => $puppetserver_port,
+ puppetserver_signport => $puppetserver_signport,
+ }
+ # we want to connect to the puppet server
+ shorewall::rule { 'me-net-puppet_tcp':
+ source => '$FW',
+ destination => 'net:$PUPPETSERVER',
+ proto => 'tcp',
+ destinationport => '$PUPPETSERVER_PORT,$PUPPETSERVER_SIGN_PORT',
+ order => 340,
+ action => 'ACCEPT';
+ }
+}
diff --git a/puppet/modules/shorewall/manifests/rules/out/silc.pp b/puppet/modules/shorewall/manifests/rules/out/silc.pp
new file mode 100644
index 00000000..830df9c3
--- /dev/null
+++ b/puppet/modules/shorewall/manifests/rules/out/silc.pp
@@ -0,0 +1,19 @@
+class shorewall::rules::out::silc {
+ shorewall::rule{
+ 'me-net-silc-tcp':
+ source => '$FW',
+ destination => 'net',
+ proto => 'tcp',
+ destinationport => '706',
+ order => 240,
+ action => 'ACCEPT';
+ 'me-net-silc-udp':
+ source => '$FW',
+ destination => 'net',
+ proto => 'udp',
+ destinationport => '706',
+ order => 240,
+ action => 'ACCEPT';
+
+ }
+}
diff --git a/puppet/modules/shorewall/manifests/rules/out/smtp.pp b/puppet/modules/shorewall/manifests/rules/out/smtp.pp
new file mode 100644
index 00000000..2cc77cc3
--- /dev/null
+++ b/puppet/modules/shorewall/manifests/rules/out/smtp.pp
@@ -0,0 +1,11 @@
+class shorewall::rules::out::smtp {
+ shorewall::rule {
+ 'me-net-tcp_smtp':
+ source => '$FW',
+ destination => 'net',
+ proto => 'tcp',
+ destinationport => 'smtp',
+ order => 240,
+ action => 'ACCEPT';
+ }
+}
diff --git a/puppet/modules/shorewall/manifests/rules/out/ssh.pp b/puppet/modules/shorewall/manifests/rules/out/ssh.pp
new file mode 100644
index 00000000..c18e299b
--- /dev/null
+++ b/puppet/modules/shorewall/manifests/rules/out/ssh.pp
@@ -0,0 +1,10 @@
+class shorewall::rules::out::ssh {
+ shorewall::rule { 'me-net-tcp_ssh':
+ source => '$FW',
+ destination => 'net',
+ proto => 'tcp',
+ destinationport => 'ssh',
+ order => 240,
+ action => 'ACCEPT';
+ }
+}
diff --git a/puppet/modules/shorewall/manifests/rules/out/ssh/disable.pp b/puppet/modules/shorewall/manifests/rules/out/ssh/disable.pp
new file mode 100644
index 00000000..223bf73b
--- /dev/null
+++ b/puppet/modules/shorewall/manifests/rules/out/ssh/disable.pp
@@ -0,0 +1,5 @@
+class shorewall::rules::out::ssh::disable inherits shorewall::rules::out::ssh {
+ Shorewall::Rule['me-net-tcp_ssh']{
+ action => 'DROP',
+ }
+}
diff --git a/puppet/modules/shorewall/manifests/rules/out/ssh/remove.pp b/puppet/modules/shorewall/manifests/rules/out/ssh/remove.pp
new file mode 100644
index 00000000..bc0acf37
--- /dev/null
+++ b/puppet/modules/shorewall/manifests/rules/out/ssh/remove.pp
@@ -0,0 +1,5 @@
+class shorewall::rules::out::ssh::remove inherits shorewall::rules::out::ssh {
+ Shorewall::Rule['me-net-tcp_ssh']{
+ ensure => absent,
+ }
+}
diff --git a/puppet/modules/shorewall/manifests/rules/out/whois.pp b/puppet/modules/shorewall/manifests/rules/out/whois.pp
new file mode 100644
index 00000000..d003d5c1
--- /dev/null
+++ b/puppet/modules/shorewall/manifests/rules/out/whois.pp
@@ -0,0 +1,11 @@
+class shorewall::rules::out::whois {
+ # open whois tcp port
+ shorewall::rule {'me-net-tcp_whois':
+ source => '$FW',
+ destination => 'net',
+ proto => 'tcp',
+ destinationport => '43',
+ order => 251,
+ action => 'ACCEPT';
+ }
+}
diff --git a/puppet/modules/shorewall/manifests/rules/out/xmpp.pp b/puppet/modules/shorewall/manifests/rules/out/xmpp.pp
new file mode 100644
index 00000000..a1b4577c
--- /dev/null
+++ b/puppet/modules/shorewall/manifests/rules/out/xmpp.pp
@@ -0,0 +1,10 @@
+class shorewall::rules::out::xmpp {
+ shorewall::rule{'me-net-xmpp-tcp':
+ source => '$FW',
+ destination => 'net',
+ proto => 'tcp',
+ destinationport => '5222',
+ order => 240,
+ action => 'ACCEPT';
+ }
+}
diff --git a/puppet/modules/shorewall/manifests/rules/pop3.pp b/puppet/modules/shorewall/manifests/rules/pop3.pp
new file mode 100644
index 00000000..25878568
--- /dev/null
+++ b/puppet/modules/shorewall/manifests/rules/pop3.pp
@@ -0,0 +1,11 @@
+class shorewall::rules::pop3 {
+ shorewall::rule {
+ 'net-me-tcp_pop3_s':
+ source => 'net',
+ destination => '$FW',
+ proto => 'tcp',
+ destinationport => 'pop3,pop3s',
+ order => 260,
+ action => 'ACCEPT';
+ }
+}
diff --git a/puppet/modules/shorewall/manifests/rules/postgres.pp b/puppet/modules/shorewall/manifests/rules/postgres.pp
new file mode 100644
index 00000000..1a22027e
--- /dev/null
+++ b/puppet/modules/shorewall/manifests/rules/postgres.pp
@@ -0,0 +1,10 @@
+class shorewall::rules::postgres {
+ shorewall::rule { 'net-me-tcp_postgres':
+ source => 'net',
+ destination => '$FW',
+ proto => 'tcp',
+ destinationport => '5432',
+ order => 250,
+ action => 'ACCEPT';
+ }
+}
diff --git a/puppet/modules/shorewall/manifests/rules/puppet.pp b/puppet/modules/shorewall/manifests/rules/puppet.pp
new file mode 100644
index 00000000..84e7d813
--- /dev/null
+++ b/puppet/modules/shorewall/manifests/rules/puppet.pp
@@ -0,0 +1,11 @@
+class shorewall::rules::puppet(
+ $puppetserver = "puppet.${::domain}",
+ $puppetserver_port = 8140,
+ $puppetserver_signport = 8141
+){
+ shorewall::params{
+ 'PUPPETSERVER': value => $puppetserver;
+ 'PUPPETSERVER_PORT': value => $puppetserver_port;
+ 'PUPPETSERVER_SIGN_PORT': value => $puppetserver_signport;
+ }
+}
diff --git a/puppet/modules/shorewall/manifests/rules/puppet/master.pp b/puppet/modules/shorewall/manifests/rules/puppet/master.pp
new file mode 100644
index 00000000..925979c3
--- /dev/null
+++ b/puppet/modules/shorewall/manifests/rules/puppet/master.pp
@@ -0,0 +1,10 @@
+class shorewall::rules::puppet::master {
+ shorewall::rule { 'net-me-tcp_puppet-main':
+ source => 'net',
+ destination => '$FW',
+ proto => 'tcp',
+ destinationport => '$PUPPETSERVER_PORT,$PUPPETSERVER_SIGN_PORT',
+ order => 240,
+ action => 'ACCEPT';
+ }
+}
diff --git a/puppet/modules/shorewall/manifests/rules/rsync.pp b/puppet/modules/shorewall/manifests/rules/rsync.pp
new file mode 100644
index 00000000..144624db
--- /dev/null
+++ b/puppet/modules/shorewall/manifests/rules/rsync.pp
@@ -0,0 +1,10 @@
+class shorewall::rules::rsync {
+ shorewall::rule{'me-net-rsync-tcp':
+ source => '$FW',
+ destination => 'net',
+ proto => 'tcp',
+ destinationport => '873',
+ order => 240,
+ action => 'ACCEPT';
+ }
+}
diff --git a/puppet/modules/shorewall/manifests/rules/silcd.pp b/puppet/modules/shorewall/manifests/rules/silcd.pp
new file mode 100644
index 00000000..91ee4a59
--- /dev/null
+++ b/puppet/modules/shorewall/manifests/rules/silcd.pp
@@ -0,0 +1,19 @@
+class shorewall::rules::silcd {
+ shorewall::rule{
+ 'net-me-silcd-tcp':
+ source => 'net',
+ destination => '$FW',
+ proto => 'tcp',
+ destinationport => '706',
+ order => 240,
+ action => 'ACCEPT';
+ 'net-me-silcd-udp':
+ source => 'net',
+ destination => '$FW',
+ proto => 'udp',
+ destinationport => '706',
+ order => 240,
+ action => 'ACCEPT';
+
+ }
+}
diff --git a/puppet/modules/shorewall/manifests/rules/smtp.pp b/puppet/modules/shorewall/manifests/rules/smtp.pp
new file mode 100644
index 00000000..b0389012
--- /dev/null
+++ b/puppet/modules/shorewall/manifests/rules/smtp.pp
@@ -0,0 +1,10 @@
+class shorewall::rules::smtp {
+ shorewall::rule { 'net-me-smtp-tcp':
+ source => 'net',
+ destination => '$FW',
+ proto => 'tcp',
+ destinationport => '25',
+ order => 240,
+ action => 'ACCEPT';
+ }
+}
diff --git a/puppet/modules/shorewall/manifests/rules/smtp/disable.pp b/puppet/modules/shorewall/manifests/rules/smtp/disable.pp
new file mode 100644
index 00000000..cee85b08
--- /dev/null
+++ b/puppet/modules/shorewall/manifests/rules/smtp/disable.pp
@@ -0,0 +1,5 @@
+class shorewall::rules::smtp::disable inherits shorewall::rules::smtp {
+ Shorewall::Rule['net-me-smtp-tcp']{
+ action => 'DROP'
+ }
+}
diff --git a/puppet/modules/shorewall/manifests/rules/smtp_submission.pp b/puppet/modules/shorewall/manifests/rules/smtp_submission.pp
new file mode 100644
index 00000000..dff90f35
--- /dev/null
+++ b/puppet/modules/shorewall/manifests/rules/smtp_submission.pp
@@ -0,0 +1,10 @@
+class shorewall::rules::smtp_submission {
+ shorewall::rule { 'net-me-smtp_submission-tcp':
+ source => 'net',
+ destination => '$FW',
+ proto => 'tcp',
+ destinationport => '587',
+ order => 240,
+ action => 'ACCEPT';
+ }
+}
diff --git a/puppet/modules/shorewall/manifests/rules/smtp_submission/disable.pp b/puppet/modules/shorewall/manifests/rules/smtp_submission/disable.pp
new file mode 100644
index 00000000..9724fe79
--- /dev/null
+++ b/puppet/modules/shorewall/manifests/rules/smtp_submission/disable.pp
@@ -0,0 +1,5 @@
+class shorewall::rules::smtp_submission::disable inherits shorewall::rules::smtp_submission {
+ Shorewall::Rule['net-me-smtp_submission-tcp']{
+ action => 'DROP'
+ }
+}
diff --git a/puppet/modules/shorewall/manifests/rules/smtps.pp b/puppet/modules/shorewall/manifests/rules/smtps.pp
new file mode 100644
index 00000000..48183f74
--- /dev/null
+++ b/puppet/modules/shorewall/manifests/rules/smtps.pp
@@ -0,0 +1,10 @@
+class shorewall::rules::smtps {
+ shorewall::rule {'net-me-smtps-tcp':
+ source => 'net',
+ destination => '$FW',
+ proto => 'tcp',
+ destinationport => '465',
+ order => 240,
+ action => 'ACCEPT';
+ }
+}
diff --git a/puppet/modules/shorewall/manifests/rules/smtps/disable.pp b/puppet/modules/shorewall/manifests/rules/smtps/disable.pp
new file mode 100644
index 00000000..24bd21fb
--- /dev/null
+++ b/puppet/modules/shorewall/manifests/rules/smtps/disable.pp
@@ -0,0 +1,5 @@
+class shorewall::rules::smtps::disable inherits shorewall::rules::smtps {
+ Shorewall::Rule['net-me-smtps-tcp']{
+ action => 'DROP',
+ }
+}
diff --git a/puppet/modules/shorewall/manifests/rules/sobby/instance.pp b/puppet/modules/shorewall/manifests/rules/sobby/instance.pp
new file mode 100644
index 00000000..7151976b
--- /dev/null
+++ b/puppet/modules/shorewall/manifests/rules/sobby/instance.pp
@@ -0,0 +1,11 @@
+define shorewall::rules::sobby::instance( $port ){
+ shorewall::rule {
+ "net-me-tcp_sobby_${name}":
+ source => 'net',
+ destination => '$FW',
+ proto => 'tcp',
+ destinationport => $port,
+ order => 240,
+ action => 'ACCEPT';
+ }
+}
diff --git a/puppet/modules/shorewall/manifests/rules/ssh.pp b/puppet/modules/shorewall/manifests/rules/ssh.pp
new file mode 100644
index 00000000..3a1b5309
--- /dev/null
+++ b/puppet/modules/shorewall/manifests/rules/ssh.pp
@@ -0,0 +1,13 @@
+class shorewall::rules::ssh(
+ $ports,
+ $source = 'net'
+) {
+ shorewall::rule { 'net-me-tcp_ssh':
+ source => $shorewall::rules::ssh::source,
+ destination => '$FW',
+ proto => 'tcp',
+ destinationport => join($shorewall::rules::ssh::ports,','),
+ order => 240,
+ action => 'ACCEPT';
+ }
+}
diff --git a/puppet/modules/shorewall/manifests/rules/syslog.pp b/puppet/modules/shorewall/manifests/rules/syslog.pp
new file mode 100644
index 00000000..de802e25
--- /dev/null
+++ b/puppet/modules/shorewall/manifests/rules/syslog.pp
@@ -0,0 +1,12 @@
+class shorewall::rules::syslog {
+ shorewall::rule { 'net-me-syslog-udp':
+ source => 'net',
+ destination => '$FW',
+ proto => 'udp',
+ destinationport => '514',
+ order => 240,
+ action => 'ACCEPT';
+ }
+}
+
+
diff --git a/puppet/modules/shorewall/manifests/rules/tftp.pp b/puppet/modules/shorewall/manifests/rules/tftp.pp
new file mode 100644
index 00000000..78877293
--- /dev/null
+++ b/puppet/modules/shorewall/manifests/rules/tftp.pp
@@ -0,0 +1,18 @@
+class shorewall::rules::tftp {
+ shorewall::rule { 'net-me-tftp-tcp':
+ source => 'net',
+ destination => '$FW',
+ proto => 'tcp',
+ destinationport => '69',
+ order => 240,
+ action => 'ACCEPT';
+ }
+ shorewall::rule { 'net-me-tftp-udp':
+ source => 'net',
+ destination => '$FW',
+ proto => 'udp',
+ destinationport => '69',
+ order => 240,
+ action => 'ACCEPT';
+ }
+}
diff --git a/puppet/modules/shorewall/manifests/rules/tinc.pp b/puppet/modules/shorewall/manifests/rules/tinc.pp
new file mode 100644
index 00000000..79cf92e4
--- /dev/null
+++ b/puppet/modules/shorewall/manifests/rules/tinc.pp
@@ -0,0 +1,34 @@
+class shorewall::rules::tinc {
+ shorewall::rule { 'net-me-tinc-tcp':
+ source => 'net',
+ destination => '$FW',
+ proto => 'tcp',
+ destinationport => '655',
+ order => 240,
+ action => 'ACCEPT';
+ }
+ shorewall::rule { 'me-net-tinc-tcp':
+ source => '$FW',
+ destination => 'net',
+ proto => 'tcp',
+ destinationport => '655',
+ order => 240,
+ action => 'ACCEPT';
+ }
+ shorewall::rule { 'net-me-tinc-udp':
+ source => 'net',
+ destination => '$FW',
+ proto => 'udp',
+ destinationport => '655',
+ order => 240,
+ action => 'ACCEPT';
+ }
+ shorewall::rule { 'me-net-tinc-udp':
+ source => '$FW',
+ destination => 'net',
+ proto => 'udp',
+ destinationport => '655',
+ order => 240,
+ action => 'ACCEPT';
+ }
+}
diff --git a/puppet/modules/shorewall/manifests/rules/tomcat.pp b/puppet/modules/shorewall/manifests/rules/tomcat.pp
new file mode 100644
index 00000000..3c6f9df0
--- /dev/null
+++ b/puppet/modules/shorewall/manifests/rules/tomcat.pp
@@ -0,0 +1,12 @@
+class shorewall::rules::tomcat {
+ # open tomcat port
+ shorewall::rule {
+ 'net-me-tomcat-tcp':
+ source => 'net',
+ destination => '$FW',
+ proto => 'tcp',
+ destinationport => '8080',
+ order => 240,
+ action => 'ACCEPT';
+ }
+}
diff --git a/puppet/modules/shorewall/manifests/rules/torify.pp b/puppet/modules/shorewall/manifests/rules/torify.pp
new file mode 100644
index 00000000..f6e62d81
--- /dev/null
+++ b/puppet/modules/shorewall/manifests/rules/torify.pp
@@ -0,0 +1,29 @@
+# shorewall::rules::torify
+#
+# Note: shorewall::rules::torify cannot be used several times with the
+# same user listed in the $users array. This restriction applies to
+# using this define multiple times without providing a $users
+# parameter.
+#
+# Parameters:
+#
+# - users: every element of this array must be valid in shorewall
+# rules user/group column.
+# - destinations: every element of this array must be valid in
+# shorewall rules original destination column.
+
+define shorewall::rules::torify(
+ $users = ['-'],
+ $destinations = ['-'],
+ $allow_rfc1918 = true
+){
+
+ $originaldest = join($destinations,',')
+
+ shorewall::rules::torify::user {
+ $users:
+ originaldest => $originaldest,
+ allow_rfc1918 => $allow_rfc1918;
+ }
+
+}
diff --git a/puppet/modules/shorewall/manifests/rules/torify/allow_tor_transparent_proxy.pp b/puppet/modules/shorewall/manifests/rules/torify/allow_tor_transparent_proxy.pp
new file mode 100644
index 00000000..3c18db69
--- /dev/null
+++ b/puppet/modules/shorewall/manifests/rules/torify/allow_tor_transparent_proxy.pp
@@ -0,0 +1,21 @@
+class shorewall::rules::torify::allow_tor_transparent_proxy {
+
+ $rule = "allow-tor-transparent-proxy"
+
+ if !defined(Shorewall::Rule["$rule"]) {
+ # A weirdness in shorewall forces us to explicitly allow traffic to
+ # net:$tor_transparent_proxy_host:$tor_transparent_proxy_port even
+ # if $FW->$FW traffic is allowed. This anyway avoids us special-casing
+ # the remote Tor transparent proxy situation.
+ shorewall::rule {
+ "$rule":
+ source => '$FW',
+ destination => "net:${shorewall::tor_transparent_proxy_host}",
+ proto => 'tcp',
+ destinationport => $shorewall::tor_transparent_proxy_port,
+ order => 100,
+ action => 'ACCEPT';
+ }
+ }
+
+}
diff --git a/puppet/modules/shorewall/manifests/rules/torify/allow_tor_user.pp b/puppet/modules/shorewall/manifests/rules/torify/allow_tor_user.pp
new file mode 100644
index 00000000..f44c1f01
--- /dev/null
+++ b/puppet/modules/shorewall/manifests/rules/torify/allow_tor_user.pp
@@ -0,0 +1,15 @@
+class shorewall::rules::torify::allow_tor_user {
+
+ $whitelist_rule = "allow-from-tor-user"
+ if !defined(Shorewall::Rule["$whitelist_rule"]) {
+ shorewall::rule {
+ "$whitelist_rule":
+ source => '$FW',
+ destination => 'all',
+ user => $shorewall::tor_user,
+ order => 101,
+ action => 'ACCEPT';
+ }
+ }
+
+}
diff --git a/puppet/modules/shorewall/manifests/rules/torify/redirect_tcp_to_tor.pp b/puppet/modules/shorewall/manifests/rules/torify/redirect_tcp_to_tor.pp
new file mode 100644
index 00000000..2bee6584
--- /dev/null
+++ b/puppet/modules/shorewall/manifests/rules/torify/redirect_tcp_to_tor.pp
@@ -0,0 +1,40 @@
+define shorewall::rules::torify::redirect_tcp_to_tor(
+ $user = '-',
+ $originaldest = '-'
+){
+
+ # hash the destination as it may contain slashes
+ $originaldest_sha1 = sha1($originaldest)
+ $rule = "redirect-to-tor-user=${user}-to=${originaldest_sha1}"
+
+ if !defined(Shorewall::Rule["$rule"]) {
+
+ $originaldest_real = $originaldest ? {
+ '-' => '!127.0.0.1,10.0.0.0/8,172.16.0.0/12,192.168.0.0/16',
+ default => $originaldest,
+ }
+
+ $user_real = $user ? {
+ '-' => "!${shorewall::tor_user}",
+ default => $user,
+ }
+
+ $destzone = $shorewall::tor_transparent_proxy_host ? {
+ '127.0.0.1' => '$FW',
+ default => 'net'
+ }
+
+ shorewall::rule {
+ "$rule":
+ source => '$FW',
+ destination => "${destzone}:${shorewall::tor_transparent_proxy_host}:${shorewall::tor_transparent_proxy_port}",
+ proto => 'tcp:syn',
+ originaldest => $originaldest_real,
+ user => $user_real,
+ order => 110,
+ action => 'DNAT';
+ }
+
+ }
+
+}
diff --git a/puppet/modules/shorewall/manifests/rules/torify/reject_non_tor.pp b/puppet/modules/shorewall/manifests/rules/torify/reject_non_tor.pp
new file mode 100644
index 00000000..80240ec7
--- /dev/null
+++ b/puppet/modules/shorewall/manifests/rules/torify/reject_non_tor.pp
@@ -0,0 +1,32 @@
+define shorewall::rules::torify::reject_non_tor(
+ $user = '-',
+ $originaldest = '-',
+ $allow_rfc1918 = true
+){
+
+ # hash the destination as it may contain slashes
+ $originaldest_sha1 = sha1($originaldest)
+ $rule = "reject-non-tor-from-${user}-to=${originaldest_sha1}"
+
+ if $originaldest == '-' {
+ $originaldest_real = $allow_rfc1918 ? {
+ false => '!127.0.0.1',
+ default => '!127.0.0.1,10.0.0.0/8,172.16.0.0/12,192.168.0.0/16',
+ }
+ } else {
+ $originaldest_real = $originaldest
+ }
+
+ if !defined(Shorewall::Rule["$rule"]) {
+ shorewall::rule {
+ "$rule":
+ source => '$FW',
+ destination => 'all',
+ originaldest => $originaldest_real,
+ user => $user,
+ order => 120,
+ action => 'REJECT';
+ }
+ }
+
+}
diff --git a/puppet/modules/shorewall/manifests/rules/torify/user.pp b/puppet/modules/shorewall/manifests/rules/torify/user.pp
new file mode 100644
index 00000000..5caccfd6
--- /dev/null
+++ b/puppet/modules/shorewall/manifests/rules/torify/user.pp
@@ -0,0 +1,27 @@
+define shorewall::rules::torify::user(
+ $originaldest = '-',
+ $allow_rfc1918 = true
+){
+
+ $user = $name
+
+ include shorewall::rules::torify::allow_tor_transparent_proxy
+
+ if $originaldest == '-' and $user == '-' {
+ include shorewall::rules::torify::allow_tor_user
+ }
+
+ shorewall::rules::torify::redirect_tcp_to_tor {
+ "redirect-to-tor-user=${user}-to=${originaldest}":
+ user => $user,
+ originaldest => $originaldest
+ }
+
+ shorewall::rules::torify::reject_non_tor {
+ "reject-non-tor-user=${user}-to=${originaldest}":
+ user => "$user",
+ originaldest => $originaldest,
+ allow_rfc1918 => $allow_rfc1918;
+ }
+
+}
diff --git a/puppet/modules/shorewall/manifests/tcclasses.pp b/puppet/modules/shorewall/manifests/tcclasses.pp
new file mode 100644
index 00000000..4e30a556
--- /dev/null
+++ b/puppet/modules/shorewall/manifests/tcclasses.pp
@@ -0,0 +1,12 @@
+define shorewall::tcclasses(
+ $interface,
+ $rate,
+ $ceil,
+ $priority,
+ $options = '',
+ $order = '1'
+){
+ shorewall::entry { "tcclasses-${order}-${name}":
+ line => "# ${name}\n${interface} ${order} ${rate} ${ceil} ${priority} ${options}",
+ }
+}
diff --git a/puppet/modules/shorewall/manifests/tcdevices.pp b/puppet/modules/shorewall/manifests/tcdevices.pp
new file mode 100644
index 00000000..f4e88d80
--- /dev/null
+++ b/puppet/modules/shorewall/manifests/tcdevices.pp
@@ -0,0 +1,11 @@
+define shorewall::tcdevices(
+ $in_bandwidth,
+ $out_bandwidth,
+ $options = '',
+ $redirected_interfaces = '',
+ $order = '100'
+){
+ shorewall::entry { "tcdevices-${order}-${name}":
+ line => "${name} ${in_bandwidth} ${out_bandwidth} ${options} ${redirected_interfaces}",
+ }
+}
diff --git a/puppet/modules/shorewall/manifests/tcrules.pp b/puppet/modules/shorewall/manifests/tcrules.pp
new file mode 100644
index 00000000..b9ab4a9d
--- /dev/null
+++ b/puppet/modules/shorewall/manifests/tcrules.pp
@@ -0,0 +1,12 @@
+define shorewall::tcrules(
+ $source,
+ $destination,
+ $protocol = 'all',
+ $ports,
+ $client_ports = '',
+ $order = '1'
+){
+ shorewall::entry { "tcrules-${order}-${name}":
+ line => "# ${name}\n${order} ${source} ${destination} ${protocol} ${ports} ${client_ports}",
+ }
+}
diff --git a/puppet/modules/shorewall/manifests/tunnel.pp b/puppet/modules/shorewall/manifests/tunnel.pp
new file mode 100644
index 00000000..2cac9227
--- /dev/null
+++ b/puppet/modules/shorewall/manifests/tunnel.pp
@@ -0,0 +1,11 @@
+define shorewall::tunnel(
+ $tunnel_type,
+ $zone,
+ $gateway = '0.0.0.0/0',
+ $gateway_zones = '',
+ $order = '1'
+) {
+ shorewall::entry { "tunnel-${order}-${name}":
+ line => "# ${name}\n${tunnel_type} ${zone} ${gateway} ${gateway_zones}",
+ }
+}
diff --git a/puppet/modules/shorewall/manifests/ubuntu/karmic.pp b/puppet/modules/shorewall/manifests/ubuntu/karmic.pp
new file mode 100644
index 00000000..0df37894
--- /dev/null
+++ b/puppet/modules/shorewall/manifests/ubuntu/karmic.pp
@@ -0,0 +1,5 @@
+class shorewall::ubuntu::karmic inherits shorewall::debian {
+ Package['shorewall']{
+ name => 'shorewall-shell',
+ }
+}
diff --git a/puppet/modules/shorewall/manifests/zone.pp b/puppet/modules/shorewall/manifests/zone.pp
new file mode 100644
index 00000000..81e57711
--- /dev/null
+++ b/puppet/modules/shorewall/manifests/zone.pp
@@ -0,0 +1,14 @@
+define shorewall::zone(
+ $type,
+ $options = '-',
+ $in = '-',
+ $out = '-',
+ $parent = '-',
+ $order = 100
+){
+ $real_name = $parent ? { '-' => $name, default => "${name}:${parent}" }
+ shorewall::entry { "zones-${order}-${name}":
+ line => "${real_name} ${type} ${options} ${in} ${out}"
+ }
+}
+
diff --git a/puppet/modules/shorewall/templates/debian_default.erb b/puppet/modules/shorewall/templates/debian_default.erb
new file mode 100644
index 00000000..ec64cbe0
--- /dev/null
+++ b/puppet/modules/shorewall/templates/debian_default.erb
@@ -0,0 +1,26 @@
+# prevent startup with default configuration
+# set the following varible to 1 in order to allow Shorewall to start
+
+# This file is brought to you by puppet
+
+startup=<%= scope.lookupvar('shorewall::startup') == "0" ? '0' : '1' %>
+
+# if your Shorewall configuration requires detection of the ip address of a ppp
+# interface, you must list such interfaces in "wait_interface" to get Shorewall to
+# wait until the interface is configured. Otherwise the script will fail because
+# it won't be able to detect the IP address.
+#
+# Example:
+# wait_interface="ppp0"
+# or
+# wait_interface="ppp0 ppp1"
+# or, if you have defined in /etc/shorewall/params
+# wait_interface=
+
+#
+# Startup options
+#
+
+OPTIONS=""
+
+# EOF
diff --git a/puppet/modules/site_apache/files/conf.d/security b/puppet/modules/site_apache/files/conf.d/security
new file mode 100644
index 00000000..a5ae5bdc
--- /dev/null
+++ b/puppet/modules/site_apache/files/conf.d/security
@@ -0,0 +1,55 @@
+#
+# Disable access to the entire file system except for the directories that
+# are explicitly allowed later.
+#
+# This currently breaks the configurations that come with some web application
+# Debian packages. It will be made the default for the release after lenny.
+#
+#<Directory />
+# AllowOverride None
+# Order Deny,Allow
+# Deny from all
+#</Directory>
+
+
+# Changing the following options will not really affect the security of the
+# server, but might make attacks slightly more difficult in some cases.
+
+#
+# ServerTokens
+# This directive configures what you return as the Server HTTP response
+# Header. The default is 'Full' which sends information about the OS-Type
+# and compiled in modules.
+# Set to one of: Full | OS | Minimal | Minor | Major | Prod
+# where Full conveys the most information, and Prod the least.
+#
+#ServerTokens Minimal
+ServerTokens Prod
+
+#
+# Optionally add a line containing the server version and virtual host
+# name to server-generated pages (internal error documents, FTP directory
+# listings, mod_status and mod_info output etc., but not CGI generated
+# documents or custom error documents).
+# Set to "EMail" to also include a mailto: link to the ServerAdmin.
+# Set to one of: On | Off | EMail
+#
+#ServerSignature Off
+ServerSignature Off
+
+#
+# Allow TRACE method
+#
+# Set to "extended" to also reflect the request body (only for testing and
+# diagnostic purposes).
+#
+# Set to one of: On | Off | extended
+#
+#TraceEnable Off
+TraceEnable On
+
+# Setting this header will prevent other sites from embedding pages from this
+# site as frames. This defends against clickjacking attacks.
+# Requires mod_headers to be enabled.
+#
+Header set X-Frame-Options: "DENY"
diff --git a/puppet/modules/site_apache/files/include.d/ssl_common.inc b/puppet/modules/site_apache/files/include.d/ssl_common.inc
new file mode 100644
index 00000000..2d282c84
--- /dev/null
+++ b/puppet/modules/site_apache/files/include.d/ssl_common.inc
@@ -0,0 +1,7 @@
+SSLEngine on
+SSLProtocol all -SSLv2 -SSLv3
+SSLHonorCipherOrder on
+SSLCompression off
+SSLCipherSuite "ECDHE-RSA-AES128-GCM-SHA256:ECDHE-ECDSA-AES128-GCM-SHA256:ECDHE-RSA-AES256-GCM-SHA384:ECDHE-ECDSA-AES256-GCM-SHA384:DHE-RSA-AES128-GCM-SHA256:DHE-DSS-AES128-GCM-SHA256:kEDH+AESGCM:ECDHE-RSA-AES128-SHA256:ECDHE-ECDSA-AES128-SHA256:ECDHE-RSA-AES128-SHA:ECDHE-ECDSA-AES128-SHA:ECDHE-RSA-AES256-SHA384:ECDHE-ECDSA-AES256-SHA384:ECDHE-RSA-AES256-SHA:ECDHE-ECDSA-AES256-SHA:DHE-RSA-AES128-SHA256:DHE-RSA-AES128-SHA:DHE-DSS-AES128-SHA256:DHE-RSA-AES256-SHA256:DHE-DSS-AES256-SHA:DHE-RSA-AES256-SHA:AES128-GCM-SHA256:AES256-GCM-SHA384:AES128-SHA256:AES256-SHA256:AES128-SHA:AES256-SHA:AES:CAMELLIA:DES-CBC3-SHA:!aNULL:!eNULL:!EXPORT:!DES:!3DES:!RC4:!MD5:!PSK!aECDH:!EDH-DSS-DES-CBC3-SHA:!EDH-RSA-DES-CBC3-SHA:!KRB5-DES-CBC3-SHA"
+
+RequestHeader set X_FORWARDED_PROTO 'https' \ No newline at end of file
diff --git a/puppet/modules/site_apache/manifests/common.pp b/puppet/modules/site_apache/manifests/common.pp
new file mode 100644
index 00000000..8a11759a
--- /dev/null
+++ b/puppet/modules/site_apache/manifests/common.pp
@@ -0,0 +1,30 @@
+# install basic apache modules needed for all services (nagios, webapp)
+class site_apache::common {
+
+ include apache::module::rewrite
+ include apache::module::env
+
+ class { '::apache':
+ no_default_site => true,
+ ssl => true,
+ ssl_cipher_suite => 'HIGH:MEDIUM:!aNULL:!MD5'
+ }
+
+ # needed for the mod_ssl config
+ include apache::module::mime
+
+ # load mods depending on apache version
+ if ( $::lsbdistcodename == 'jessie' ) {
+ # apache >= 2.4, debian jessie
+ # needed for mod_ssl config
+ include apache::module::socache_shmcb
+ # generally needed
+ include apache::module::mpm_prefork
+ } else {
+ # apache < 2.4, debian wheezy
+ # for "Order" directive, i.e. main apache2.conf
+ include apache::module::authz_host
+ }
+
+ include site_apache::common::tls
+}
diff --git a/puppet/modules/site_apache/manifests/common/tls.pp b/puppet/modules/site_apache/manifests/common/tls.pp
new file mode 100644
index 00000000..040868bf
--- /dev/null
+++ b/puppet/modules/site_apache/manifests/common/tls.pp
@@ -0,0 +1,6 @@
+class site_apache::common::tls {
+ # class to setup common SSL configurations
+
+ apache::config::include{ 'ssl_common.inc': }
+
+}
diff --git a/puppet/modules/site_apache/templates/vhosts.d/api.conf.erb b/puppet/modules/site_apache/templates/vhosts.d/api.conf.erb
new file mode 100644
index 00000000..bfa5d04d
--- /dev/null
+++ b/puppet/modules/site_apache/templates/vhosts.d/api.conf.erb
@@ -0,0 +1,48 @@
+<VirtualHost *:80>
+ ServerName <%= @api_domain %>
+ RewriteEngine On
+ RewriteRule ^.*$ https://<%= @api_domain -%>:<%= @api_port -%>%{REQUEST_URI} [R=permanent,L]
+ CustomLog ${APACHE_LOG_DIR}/other_vhosts_access.log common
+</VirtualHost>
+
+Listen 0.0.0.0:<%= @api_port %>
+
+<VirtualHost *:<%= @api_port -%>>
+ ServerName <%= @api_domain %>
+ CustomLog ${APACHE_LOG_DIR}/other_vhosts_access.log common
+
+ SSLCACertificatePath /etc/ssl/certs
+ SSLCertificateKeyFile <%= scope.lookupvar('x509::variables::keys') %>/<%= scope.lookupvar('site_config::params::cert_name') %>.key
+ SSLCertificateFile <%= scope.lookupvar('x509::variables::certs') %>/<%= scope.lookupvar('site_config::params::cert_name') %>.crt
+
+ Include include.d/ssl_common.inc
+
+ <IfModule mod_headers.c>
+<% if @webapp['secure'] -%>
+ Header always set Strict-Transport-Security "max-age=31536000; includeSubDomains"
+<% end -%>
+ Header always unset X-Powered-By
+ Header always unset X-Runtime
+ </IfModule>
+
+ DocumentRoot /srv/leap/webapp/public
+ <% if scope.function_guess_apache_version([]) == '2.4' %>
+ <Directory /srv/leap/webapp/public>
+ AllowOverride None
+ Require all granted
+ </Directory>
+ <% end %>
+
+ # Check for maintenance file and redirect all requests
+ RewriteEngine On
+ RewriteCond %{DOCUMENT_ROOT}/system/maintenance.html -f
+ RewriteCond %{SCRIPT_FILENAME} !maintenance.html
+ RewriteCond %{REQUEST_URI} !/images/maintenance.jpg
+ RewriteRule ^.*$ %{DOCUMENT_ROOT}/system/maintenance.html [L]
+
+ # http://www.modrails.com/documentation/Users%20guide%20Apache.html#_passengerallowencodedslashes_lt_on_off_gt
+ AllowEncodedSlashes on
+ PassengerAllowEncodedSlashes on
+ PassengerFriendlyErrorPages off
+ SetEnv TMPDIR /var/tmp
+</VirtualHost>
diff --git a/puppet/modules/site_apache/templates/vhosts.d/common.conf.erb b/puppet/modules/site_apache/templates/vhosts.d/common.conf.erb
new file mode 100644
index 00000000..bf60e794
--- /dev/null
+++ b/puppet/modules/site_apache/templates/vhosts.d/common.conf.erb
@@ -0,0 +1,76 @@
+<VirtualHost *:80>
+ ServerName <%= @webapp_domain %>
+ ServerAlias <%= @domain_name %>
+ ServerAlias <%= @domain %>
+ ServerAlias www.<%= @domain %>
+ RewriteEngine On
+ RewriteRule ^.*$ https://<%= @webapp_domain -%>%{REQUEST_URI} [R=permanent,L]
+ CustomLog ${APACHE_LOG_DIR}/other_vhosts_access.log common
+</VirtualHost>
+
+<VirtualHost *:443>
+ ServerName <%= @webapp_domain %>
+ ServerAlias <%= @domain_name %>
+ ServerAlias <%= @domain %>
+ ServerAlias www.<%= @domain %>
+ CustomLog ${APACHE_LOG_DIR}/other_vhosts_access.log common
+
+ SSLCACertificatePath /etc/ssl/certs
+ SSLCertificateKeyFile <%= scope.lookupvar('x509::variables::keys') %>/<%= scope.lookupvar('site_config::params::commercial_cert_name') %>.key
+ SSLCertificateFile <%= scope.lookupvar('x509::variables::certs') %>/<%= scope.lookupvar('site_config::params::commercial_cert_name') %>.crt
+
+ Include include.d/ssl_common.inc
+
+ <IfModule mod_headers.c>
+<% if (defined? @services) and (@services.include? 'webapp') and (@webapp['secure']) -%>
+ Header always set Strict-Transport-Security "max-age=31536000; includeSubDomains"
+<% end -%>
+ Header always unset X-Powered-By
+ Header always unset X-Runtime
+ </IfModule>
+
+<% if (defined? @services) and (@services.include? 'webapp') -%>
+ DocumentRoot /srv/leap/webapp/public
+ <% if scope.function_guess_apache_version([]) == '2.4' %>
+ <Directory /srv/leap/webapp/public>
+ AllowOverride None
+ Require all granted
+ </Directory>
+ <% end %>
+
+ RewriteEngine On
+ # Check for maintenance file and redirect all requests
+ RewriteCond %{DOCUMENT_ROOT}/system/maintenance.html -f
+ RewriteCond %{SCRIPT_FILENAME} !maintenance.html
+ RewriteCond %{REQUEST_URI} !/images/maintenance.jpg
+ RewriteRule ^.*$ %{DOCUMENT_ROOT}/system/maintenance.html [L]
+
+ # http://www.modrails.com/documentation/Users%20guide%20Apache.html#_passengerallowencodedslashes_lt_on_off_gt
+ AllowEncodedSlashes on
+ PassengerAllowEncodedSlashes on
+ PassengerFriendlyErrorPages off
+ SetEnv TMPDIR /var/tmp
+
+ # Allow rails assets to be cached for a very long time (since the URLs change whenever the content changes)
+ <Location /assets/>
+ Header unset ETag
+ FileETag None
+ ExpiresActive On
+ ExpiresDefault "access plus 1 year"
+ </Location>
+<% end -%>
+
+
+<% if (defined? @services) and (@services.include? 'monitor') -%>
+ <DirectoryMatch (/usr/share/nagios3/htdocs|/usr/lib/cgi-bin/nagios3|/etc/nagios3/stylesheets|/usr/share/pnp4nagios)>
+ <% if (defined? @services) and (@services.include? 'webapp') -%>
+ PassengerEnabled off
+ <% end -%>
+ AllowOverride all
+ # Nagios won't work with setting this option to "DENY",
+ # as set in conf.d/security (#4169). Therefor we allow
+ # it here, only for nagios.
+ Header set X-Frame-Options: "ALLOW"
+ </DirectoryMatch>
+<% end -%>
+</VirtualHost>
diff --git a/puppet/modules/site_apache/templates/vhosts.d/hidden_service.conf.erb b/puppet/modules/site_apache/templates/vhosts.d/hidden_service.conf.erb
new file mode 100644
index 00000000..232b1577
--- /dev/null
+++ b/puppet/modules/site_apache/templates/vhosts.d/hidden_service.conf.erb
@@ -0,0 +1,55 @@
+<VirtualHost 127.0.0.1:80>
+ ServerName <%= @tor_domain %>
+
+ <IfModule mod_headers.c>
+ Header always unset X-Powered-By
+ Header always unset X-Runtime
+ </IfModule>
+
+<% if (defined? @services) and (@services.include? 'webapp') -%>
+ DocumentRoot /srv/leap/webapp/public
+ <% if scope.function_guess_apache_version([]) == '2.4' %>
+ <Directory /srv/leap/webapp/public>
+ AllowOverride None
+ Require all granted
+ </Directory>
+ <% end %>
+
+ RewriteEngine On
+ # Check for maintenance file and redirect all requests
+ RewriteCond %{DOCUMENT_ROOT}/system/maintenance.html -f
+ RewriteCond %{SCRIPT_FILENAME} !maintenance.html
+ RewriteCond %{REQUEST_URI} !/images/maintenance.jpg
+ RewriteRule ^.*$ %{DOCUMENT_ROOT}/system/maintenance.html [L]
+
+ # http://www.modrails.com/documentation/Users%20guide%20Apache.html#_passengerallowencodedslashes_lt_on_off_gt
+ AllowEncodedSlashes on
+ PassengerAllowEncodedSlashes on
+ PassengerFriendlyErrorPages off
+ SetEnv TMPDIR /var/tmp
+
+ # Allow rails assets to be cached for a very long time (since the URLs change whenever the content changes)
+ <Location /assets/>
+ Header unset ETag
+ FileETag None
+ ExpiresActive On
+ ExpiresDefault "access plus 1 year"
+ </Location>
+<% end -%>
+
+<% if (defined? @services) and (@services.include? 'static') -%>
+ DocumentRoot "/srv/static/root/public"
+ <% if scope.function_guess_apache_version([]) == '2.4' %>
+ <Directory /srv/static/root/public>
+ AllowOverride None
+ Require all granted
+ </Directory>
+ <% end %>
+ AccessFileName .htaccess
+
+ Alias /provider.json /srv/leap/provider.json
+ <Location /provider.json>
+ Header set X-Minimum-Client-Version 0.5
+ </Location>
+<% end -%>
+</VirtualHost>
diff --git a/puppet/modules/site_apt/files/Debian/51unattended-upgrades-leap b/puppet/modules/site_apt/files/Debian/51unattended-upgrades-leap
new file mode 100644
index 00000000..bbaac6a2
--- /dev/null
+++ b/puppet/modules/site_apt/files/Debian/51unattended-upgrades-leap
@@ -0,0 +1,6 @@
+// this file is managed by puppet !
+
+Unattended-Upgrade::Allowed-Origins {
+ "leap.se:stable";
+}
+
diff --git a/puppet/modules/site_apt/files/keys/leap-archive.gpg b/puppet/modules/site_apt/files/keys/leap-archive.gpg
new file mode 100644
index 00000000..dd7f3be6
--- /dev/null
+++ b/puppet/modules/site_apt/files/keys/leap-archive.gpg
Binary files differ
diff --git a/puppet/modules/site_apt/files/keys/leap-experimental-archive.gpg b/puppet/modules/site_apt/files/keys/leap-experimental-archive.gpg
new file mode 100644
index 00000000..5cc9064b
--- /dev/null
+++ b/puppet/modules/site_apt/files/keys/leap-experimental-archive.gpg
Binary files differ
diff --git a/puppet/modules/site_apt/manifests/dist_upgrade.pp b/puppet/modules/site_apt/manifests/dist_upgrade.pp
new file mode 100644
index 00000000..0eb98cea
--- /dev/null
+++ b/puppet/modules/site_apt/manifests/dist_upgrade.pp
@@ -0,0 +1,17 @@
+# upgrade all packages
+class site_apt::dist_upgrade {
+
+ # facter returns 'true' as string
+ # lint:ignore:quoted_booleans
+ if $::apt_running == 'true' {
+ # lint:endignore
+ fail ('apt-get is running in background - Please wait until it finishes. Exiting.')
+ } else {
+ exec{'initial_apt_dist_upgrade':
+ command => "/usr/bin/apt-get -q -y -o 'DPkg::Options::=--force-confold' dist-upgrade",
+ refreshonly => false,
+ timeout => 1200,
+ require => Exec['apt_updated']
+ }
+ }
+}
diff --git a/puppet/modules/site_apt/manifests/init.pp b/puppet/modules/site_apt/manifests/init.pp
new file mode 100644
index 00000000..455425c1
--- /dev/null
+++ b/puppet/modules/site_apt/manifests/init.pp
@@ -0,0 +1,55 @@
+# setup apt on all nodes
+class site_apt {
+
+ $sources = hiera('sources')
+ $apt_config = $sources['apt']
+
+ # debian repo urls
+ $apt_url_basic = $apt_config['basic']
+ $apt_url_security = $apt_config['security']
+ $apt_url_backports = $apt_config['backports']
+
+ # leap repo url
+ $platform_sources = $sources['platform']
+ $apt_url_platform_basic = $platform_sources['apt']['basic']
+
+ # needed on jessie hosts for getting pnp4nagios from testing
+ if ( $::operatingsystemmajrelease == '8' ) {
+ $use_next_release = true
+ } else {
+ $use_next_release = false
+ }
+
+ class { 'apt':
+ custom_key_dir => 'puppet:///modules/site_apt/keys',
+ debian_url => $apt_url_basic,
+ security_url => $apt_url_security,
+ backports_url => $apt_url_backports,
+ use_next_release => $use_next_release
+ }
+
+ # enable http://deb.leap.se debian package repository
+ include site_apt::leap_repo
+
+ apt::apt_conf { '90disable-pdiffs':
+ content => 'Acquire::PDiffs "false";';
+ }
+
+ include ::site_apt::unattended_upgrades
+
+ # not currently used
+ #apt::sources_list { 'secondary.list':
+ # content => template('site_apt/secondary.list');
+ #}
+
+ apt::preferences_snippet { 'leap':
+ priority => 999,
+ package => '*',
+ pin => 'origin "deb.leap.se"'
+ }
+
+ # All packages should be installed after 'update_apt' is called,
+ # which does an 'apt-get update'.
+ Exec['update_apt'] -> Package <||>
+
+}
diff --git a/puppet/modules/site_apt/manifests/leap_repo.pp b/puppet/modules/site_apt/manifests/leap_repo.pp
new file mode 100644
index 00000000..5eedce45
--- /dev/null
+++ b/puppet/modules/site_apt/manifests/leap_repo.pp
@@ -0,0 +1,16 @@
+# install leap deb repo together with leap-keyring package
+# containing the apt signing key
+class site_apt::leap_repo {
+ $platform = hiera_hash('platform')
+ $major_version = $platform['major_version']
+
+ apt::sources_list { 'leap.list':
+ content => "deb ${::site_apt::apt_url_platform_basic} ${::lsbdistcodename} main\n",
+ before => Exec[refresh_apt]
+ }
+
+ package { 'leap-archive-keyring':
+ ensure => latest
+ }
+
+}
diff --git a/puppet/modules/site_apt/manifests/preferences/check_mk.pp b/puppet/modules/site_apt/manifests/preferences/check_mk.pp
new file mode 100644
index 00000000..580e0d3f
--- /dev/null
+++ b/puppet/modules/site_apt/manifests/preferences/check_mk.pp
@@ -0,0 +1,9 @@
+class site_apt::preferences::check_mk {
+
+ apt::preferences_snippet { 'check-mk':
+ package => 'check-mk-*',
+ release => "${::lsbdistcodename}-backports",
+ priority => 999;
+ }
+
+}
diff --git a/puppet/modules/site_apt/manifests/preferences/passenger.pp b/puppet/modules/site_apt/manifests/preferences/passenger.pp
new file mode 100644
index 00000000..8cd41f91
--- /dev/null
+++ b/puppet/modules/site_apt/manifests/preferences/passenger.pp
@@ -0,0 +1,14 @@
+#
+# currently, this is only used by static_site to get passenger v4.
+#
+# UPGRADE: this is not needed for jessie.
+#
+class site_apt::preferences::passenger {
+
+ apt::preferences_snippet { 'passenger':
+ package => 'libapache2-mod-passenger',
+ release => "${::lsbdistcodename}-backports",
+ priority => 999;
+ }
+
+}
diff --git a/puppet/modules/site_apt/manifests/preferences/rsyslog.pp b/puppet/modules/site_apt/manifests/preferences/rsyslog.pp
new file mode 100644
index 00000000..bfeaa7da
--- /dev/null
+++ b/puppet/modules/site_apt/manifests/preferences/rsyslog.pp
@@ -0,0 +1,13 @@
+class site_apt::preferences::rsyslog {
+
+ apt::preferences_snippet {
+ 'rsyslog_anon_depends':
+ package => 'libestr0 librelp0 rsyslog*',
+ priority => '999',
+ pin => 'release a=wheezy-backports',
+ before => Class['rsyslog::install'];
+
+ 'fixed_rsyslog_anon_package':
+ ensure => absent;
+ }
+}
diff --git a/puppet/modules/site_apt/manifests/unattended_upgrades.pp b/puppet/modules/site_apt/manifests/unattended_upgrades.pp
new file mode 100644
index 00000000..42f1f4c6
--- /dev/null
+++ b/puppet/modules/site_apt/manifests/unattended_upgrades.pp
@@ -0,0 +1,20 @@
+# configute unattended upgrades so packages from both Debian and LEAP
+# repos get upgraded unattended
+class site_apt::unattended_upgrades {
+ # override unattended-upgrades package resource to make sure
+ # that it is upgraded on every deploy (#6245)
+
+ # configure upgrades for Debian
+ class { 'apt::unattended_upgrades':
+ ensure_version => latest
+ }
+
+ # configure LEAP upgrades
+ apt::apt_conf { '51unattended-upgrades-leap':
+ source => [
+ "puppet:///modules/site_apt/${::lsbdistid}/51unattended-upgrades-leap"],
+ require => Package['unattended-upgrades'],
+ refresh_apt => false,
+ }
+
+}
diff --git a/puppet/modules/site_apt/templates/jessie/postfix.seeds b/puppet/modules/site_apt/templates/jessie/postfix.seeds
new file mode 100644
index 00000000..1a878ccc
--- /dev/null
+++ b/puppet/modules/site_apt/templates/jessie/postfix.seeds
@@ -0,0 +1 @@
+postfix postfix/main_mailer_type select No configuration
diff --git a/puppet/modules/site_apt/templates/preferences.include_squeeze b/puppet/modules/site_apt/templates/preferences.include_squeeze
new file mode 100644
index 00000000..d6d36b60
--- /dev/null
+++ b/puppet/modules/site_apt/templates/preferences.include_squeeze
@@ -0,0 +1,25 @@
+Explanation: Debian wheezy
+Package: *
+Pin: release o=Debian,n=wheezy
+Pin-Priority: 990
+
+Explanation: Debian wheezy-updates
+Package: *
+Pin: release o=Debian,n=wheezy-updates
+Pin-Priority: 990
+
+Explanation: Debian sid
+Package: *
+Pin: release o=Debian,n=sid
+Pin-Priority: 1
+
+Explanation: Debian squeeze
+Package: *
+Pin: release o=Debian,n=squeeze
+Pin-Priority: 980
+
+Explanation: Debian fallback
+Package: *
+Pin: release o=Debian
+Pin-Priority: -10
+
diff --git a/puppet/modules/site_apt/templates/secondary.list b/puppet/modules/site_apt/templates/secondary.list
new file mode 100644
index 00000000..0c024549
--- /dev/null
+++ b/puppet/modules/site_apt/templates/secondary.list
@@ -0,0 +1,3 @@
+# basic
+deb http://ftp.debian.org/debian/ <%= @lsbdistcodename %> main contrib non-free
+
diff --git a/puppet/modules/site_apt/templates/wheezy/postfix.seeds b/puppet/modules/site_apt/templates/wheezy/postfix.seeds
new file mode 100644
index 00000000..1a878ccc
--- /dev/null
+++ b/puppet/modules/site_apt/templates/wheezy/postfix.seeds
@@ -0,0 +1 @@
+postfix postfix/main_mailer_type select No configuration
diff --git a/puppet/modules/site_check_mk/files/agent/local_checks/all_hosts/run_node_tests.sh b/puppet/modules/site_check_mk/files/agent/local_checks/all_hosts/run_node_tests.sh
new file mode 100644
index 00000000..1dd0afc9
--- /dev/null
+++ b/puppet/modules/site_check_mk/files/agent/local_checks/all_hosts/run_node_tests.sh
@@ -0,0 +1,5 @@
+#!/bin/sh
+#
+# runs node tests
+
+/srv/leap/bin/run_tests --checkmk
diff --git a/puppet/modules/site_check_mk/files/agent/local_checks/couchdb/leap_couch_stats.sh b/puppet/modules/site_check_mk/files/agent/local_checks/couchdb/leap_couch_stats.sh
new file mode 100755
index 00000000..c7477b18
--- /dev/null
+++ b/puppet/modules/site_check_mk/files/agent/local_checks/couchdb/leap_couch_stats.sh
@@ -0,0 +1,122 @@
+#!/bin/bash
+#
+# todo:
+# - thresholds
+# - couch response time
+# - make CURL/URL/DBLIST_EXCLUDE vars configurable
+# - move load_nagios_utils() to helper library so we can use it from multiple scripts
+
+start_time=$(date +%s.%N)
+
+CURL='curl -s --netrc-file /etc/couchdb/couchdb.netrc'
+URL='http://127.0.0.1:5984'
+TMPFILE=$(mktemp)
+DBLIST_EXCLUDE='(user-|sessions_|tokens_|_replicator|_users)'
+PREFIX='Couchdb_'
+
+
+load_nagios_utils () {
+ # load the nagios utils
+ # in debian, the package nagios-plugins-common installs utils.sh to /usr/lib/nagios/plugins/utils.sh
+ utilsfn=
+ for d in $PROGPATH /usr/lib/nagios/plugins /usr/lib64/nagios/plugins /usr/local/nagios/libexec /opt/nagios-plugins/libexec . ; do
+ if [ -f "$d/utils.sh" ]; then
+ utilsfn=$d/utils.sh;
+ fi
+ done
+ if [ "$utilsfn" = "" ]; then
+ echo "UNKNOWN - cannot find utils.sh (part of nagios plugins)";
+ exit 3;
+ fi
+ . "$utilsfn";
+ STATE[$STATE_OK]='OK'
+ STATE[$STATE_WARNING]='Warning'
+ STATE[$STATE_CRITICAL]='Critical'
+ STATE[$STATE_UNKNOWN]='Unknown'
+ STATE[$STATE_DEPENDENT]='Dependend'
+}
+
+get_global_stats_perf () {
+ trap "localexit=3" ERR
+ local localexit db_count
+ localexit=0
+
+ # get a list of all dbs
+ $CURL -X GET $URL/_all_dbs | json_pp | egrep -v '(\[|\])' > $TMPFILE
+
+ db_count=$( wc -l < $TMPFILE)
+ excluded_db_count=$( egrep -c "$DBLIST_EXCLUDE" $TMPFILE )
+
+ echo "db_count=$db_count|excluded_db_count=$excluded_db_count"
+ return ${localexit}
+}
+
+db_stats () {
+ trap "localexit=3" ERR
+ local db db_stats doc_count del_doc_count localexit
+ localexit=0
+
+ db="$1"
+ name="$2"
+
+ if [ -z "$name" ]
+ then
+ name="$db"
+ fi
+
+ perf="$perf|${db}_docs=$( $CURL -s -X GET ${URL}/$db | json_pp |grep 'doc_count' | sed 's/[^0-9]//g' )"
+ db_stats=$( $CURL -s -X GET ${URL}/$db | json_pp )
+
+ doc_count=$( echo "$db_stats" | grep 'doc_count' | grep -v 'deleted_doc_count' | sed 's/[^0-9]//g' )
+ del_doc_count=$( echo "$db_stats" | grep 'doc_del_count' | sed 's/[^0-9]//g' )
+
+ # don't divide by zero
+ if [ $del_doc_count -eq 0 ]
+ then
+ del_doc_perc=0
+ else
+ del_doc_perc=$(( del_doc_count * 100 / doc_count ))
+ fi
+
+ bytes=$( echo "$db_stats" | grep disk_size | sed 's/[^0-9]//g' )
+ disk_size=$( echo "scale = 2; $bytes / 1024 / 1024" | bc -l )
+
+ echo -n "${localexit} ${PREFIX}${name}_database ${name}_docs=$doc_count|${name}_deleted_docs=$del_doc_count|${name}_deleted_docs_percentage=${del_doc_perc}%"
+ printf "|${name}_disksize_mb=%02.2fmb ${STATE[localexit]}: database $name\n" "$disk_size"
+
+ return ${localexit}
+}
+
+# main
+
+load_nagios_utils
+
+# per-db stats
+# get a list of all dbs
+$CURL -X GET $URL/_all_dbs | json_pp | egrep -v '(\[|\])' > $TMPFILE
+
+# get list of dbs to check
+dbs=$( egrep -v "${DBLIST_EXCLUDE}" $TMPFILE | tr -d '\n"' | sed 's/,/ /g' )
+
+for db in $dbs
+do
+ db_stats "$db"
+done
+
+# special handling for rotated dbs
+suffix=$(($(date +'%s') / (60*60*24*30)))
+db_stats "sessions_${suffix}" "sessions"
+db_stats "tokens_${suffix}" "tokens"
+
+
+# show global couchdb stats
+global_stats_perf=$(get_global_stats_perf)
+exitcode=$?
+
+end_time=$(date +%s.%N)
+duration=$( echo "scale = 2; $end_time - $start_time" | bc -l )
+
+printf "${exitcode} ${PREFIX}global_stats ${global_stats_perf}|script_duration=%02.2fs ${STATE[exitcode]}: global couchdb status\n" "$duration"
+
+rm "$TMPFILE"
+
diff --git a/puppet/modules/site_check_mk/files/agent/local_checks/mx/check_leap_mx.sh b/puppet/modules/site_check_mk/files/agent/local_checks/mx/check_leap_mx.sh
new file mode 100755
index 00000000..4711e247
--- /dev/null
+++ b/puppet/modules/site_check_mk/files/agent/local_checks/mx/check_leap_mx.sh
@@ -0,0 +1,33 @@
+#!/bin/bash
+
+
+WARN=1
+CRIT=5
+
+# in minutes
+MAXAGE=10
+
+STATUS[0]='OK'
+STATUS[1]='Warning'
+STATUS[2]='Critical'
+CHECKNAME='Leap_MX_Queue'
+
+WATCHDIR='/var/mail/leap-mx/Maildir/new/'
+
+
+total=`find $WATCHDIR -type f -mmin +$MAXAGE | wc -l`
+
+if [ $total -lt $WARN ]
+then
+ exitcode=0
+else
+ if [ $total -le $CRIT ]
+ then
+ exitcode=1
+ else
+ exitcode=2
+ fi
+fi
+
+echo "${exitcode} ${CHECKNAME} stale_files=${total} ${STATUS[exitcode]}: ${total} stale files (>=${MAXAGE} min) in ${WATCHDIR}."
+
diff --git a/puppet/modules/site_check_mk/files/agent/logwatch/bigcouch.cfg b/puppet/modules/site_check_mk/files/agent/logwatch/bigcouch.cfg
new file mode 100644
index 00000000..0f378a5a
--- /dev/null
+++ b/puppet/modules/site_check_mk/files/agent/logwatch/bigcouch.cfg
@@ -0,0 +1,28 @@
+/opt/bigcouch/var/log/bigcouch.log nocontext=1
+# ignore requests that are fine
+ I undefined - -.*200$
+ I undefined - -.*201$
+ I 127.0.0.1 undefined.* ok
+ I 127.0.0.1 localhost:5984 .* ok
+ # https://leap.se/code/issues/5246
+ I Shutting down group server
+ # ignore bigcouch conflict errors
+ I Error in process.*{{nocatch,conflict}
+ # ignore "Uncaught error in HTTP request: {exit, normal}" error
+ # it's suppressed in later versions of bigcouch anhow
+ # see https://leap.se/code/issues/5226
+ I Uncaught error in HTTP request: {exit,normal}
+ I Uncaught error in HTTP request: {exit,
+ # Ignore rexi_EXIT bigcouch error (Bug #6512)
+ I Error in process <[0-9.]+> on node .* with exit value: {{rexi_EXIT,{(killed|noproc|shutdown),\[{couch_db,collect_results
+ # Ignore "Generic server terminating" bigcouch message (Feature #6544)
+ I Generic server <.*> terminating
+ I {error_report,<.*>,
+ I {error_info,
+ C Uncaught error in HTTP request: {error,
+ C Response abnormally terminated: {nodedown,
+ C rexi_DOWN,noproc
+ C rexi_DOWN,noconnection
+ C error
+ C Connection attempt from disallowed node
+ W Apache CouchDB has started
diff --git a/puppet/modules/site_check_mk/files/agent/logwatch/leap_mx.cfg b/puppet/modules/site_check_mk/files/agent/logwatch/leap_mx.cfg
new file mode 100644
index 00000000..166d0230
--- /dev/null
+++ b/puppet/modules/site_check_mk/files/agent/logwatch/leap_mx.cfg
@@ -0,0 +1,4 @@
+/var/log/leap/mx.log
+ W Don't know how to deliver mail
+ W No public key, stopping the processing chain
+
diff --git a/puppet/modules/site_check_mk/files/agent/logwatch/logwatch.cfg b/puppet/modules/site_check_mk/files/agent/logwatch/logwatch.cfg
new file mode 100644
index 00000000..4f16d1bd
--- /dev/null
+++ b/puppet/modules/site_check_mk/files/agent/logwatch/logwatch.cfg
@@ -0,0 +1,31 @@
+# This file is managed by Puppet. DO NOT EDIT.
+
+# logwatch.cfg
+# This file configures mk_logwatch. Define your logfiles
+# and patterns to be looked for here.
+
+# Name one or more logfiles
+/var/log/messages
+# Patterns are indented with one space are prefixed with:
+# C: Critical messages
+# W: Warning messages
+# I: ignore these lines (OK)
+# The first match decided. Lines that do not match any pattern
+# are ignored
+ C Fail event detected on md device
+ I mdadm.*: Rebuild.*event detected
+ W mdadm\[
+ W ata.*hard resetting link
+ W ata.*soft reset failed (.*FIS failed)
+ W device-mapper: thin:.*reached low water mark
+ C device-mapper: thin:.*no free space
+
+/var/log/auth.log
+ W sshd.*Corrupted MAC on input
+
+/var/log/kern.log
+ C panic
+ C Oops
+ W generic protection rip
+ W .*Unrecovered read error - auto reallocate failed
+
diff --git a/puppet/modules/site_check_mk/files/agent/logwatch/openvpn.cfg b/puppet/modules/site_check_mk/files/agent/logwatch/openvpn.cfg
new file mode 100644
index 00000000..d99dcde9
--- /dev/null
+++ b/puppet/modules/site_check_mk/files/agent/logwatch/openvpn.cfg
@@ -0,0 +1,19 @@
+/var/log/leap/openvpn.log
+# ignore openvpn TLS initialization errors when clients
+# suddenly hangup before properly establishing
+# a tls connection
+ I ovpn-.*TLS Error: Unroutable control packet received from
+ I ovpn-.*TLS Error: TLS key negotiation failed to occur within 60 seconds \(check your network connectivity\)
+ I ovpn-.*TLS Error: TLS handshake failed
+ I ovpn-.*TLS Error: TLS object -> incoming plaintext read error
+ I ovpn-.*Fatal TLS error \(check_tls_errors_co\), restarting
+ I ovpn-.*TLS_ERROR: BIO read tls_read_plaintext error: error:140890B2:SSL routines:SSL3_GET_CLIENT_CERTIFICATE:no certificate
+ I ovpn-.*TLS_ERROR: BIO read tls_read_plaintext error: error:140890C7:SSL routines:SSL3_GET_CLIENT_CERTIFICATE:peer did not return a certificate
+ I ovpn-.*TLS Error: unknown opcode received from
+ I ovpn-.*Authenticate/Decrypt packet error: packet HMAC authentication failed
+ I ovpn-.*TLS Error: reading acknowledgement record from packet
+ I ovpn-.*TLS Error: session-id not found in packet from
+
+ I ovpn-.*SIGUSR1\[soft,tls-error\] received, client-instance restarting
+ I ovpn-.*VERIFY ERROR: depth=0, error=certificate has expired
+
diff --git a/puppet/modules/site_check_mk/files/agent/logwatch/soledad.cfg b/puppet/modules/site_check_mk/files/agent/logwatch/soledad.cfg
new file mode 100644
index 00000000..3af5045b
--- /dev/null
+++ b/puppet/modules/site_check_mk/files/agent/logwatch/soledad.cfg
@@ -0,0 +1,6 @@
+/var/log/soledad.log
+ C WSGI application error
+ C Error
+ C error
+# Removed this line because we determined it was better to ignore it (#6566)
+# W Timing out client:
diff --git a/puppet/modules/site_check_mk/files/agent/logwatch/stunnel.cfg b/puppet/modules/site_check_mk/files/agent/logwatch/stunnel.cfg
new file mode 100644
index 00000000..b1e6cf2f
--- /dev/null
+++ b/puppet/modules/site_check_mk/files/agent/logwatch/stunnel.cfg
@@ -0,0 +1,10 @@
+/var/log/leap/stunnel.log
+# check for stunnel failures
+#
+# these are temporary failures and happen very often, so we
+# ignore them until we tuned stunnel timeouts/logging,
+# see https://leap.se/code/issues/5218
+ I stunnel:.*Connection reset by peer
+ I stunnel:.*Peer suddenly disconnected
+ I stunnel:.*Connection refused
+
diff --git a/puppet/modules/site_check_mk/files/agent/logwatch/syslog/bigcouch.cfg b/puppet/modules/site_check_mk/files/agent/logwatch/syslog/bigcouch.cfg
new file mode 100644
index 00000000..f53f0780
--- /dev/null
+++ b/puppet/modules/site_check_mk/files/agent/logwatch/syslog/bigcouch.cfg
@@ -0,0 +1,5 @@
+# on one-node bigcouch setups, we'll get this msg
+# a lot, so we ignore it here until we fix
+# https://leap.se/code/issues/5244
+ I epmd: got partial packet only on file descriptor
+
diff --git a/puppet/modules/site_check_mk/files/agent/logwatch/syslog/couchdb.cfg b/puppet/modules/site_check_mk/files/agent/logwatch/syslog/couchdb.cfg
new file mode 100644
index 00000000..5f8d5b95
--- /dev/null
+++ b/puppet/modules/site_check_mk/files/agent/logwatch/syslog/couchdb.cfg
@@ -0,0 +1,2 @@
+ C /usr/local/bin/couch-doc-update.*failed
+ C /usr/local/bin/couch-doc-update.*ERROR
diff --git a/puppet/modules/site_check_mk/files/agent/logwatch/syslog_header.cfg b/puppet/modules/site_check_mk/files/agent/logwatch/syslog_header.cfg
new file mode 100644
index 00000000..f60d752b
--- /dev/null
+++ b/puppet/modules/site_check_mk/files/agent/logwatch/syslog_header.cfg
@@ -0,0 +1 @@
+/var/log/syslog
diff --git a/puppet/modules/site_check_mk/files/agent/logwatch/syslog_tail.cfg b/puppet/modules/site_check_mk/files/agent/logwatch/syslog_tail.cfg
new file mode 100644
index 00000000..7daf0cac
--- /dev/null
+++ b/puppet/modules/site_check_mk/files/agent/logwatch/syslog_tail.cfg
@@ -0,0 +1,21 @@
+# some general patterns
+ I Error: Driver 'pcspkr' is already registered, aborting...
+# ignore postfix errors on lost connection (Bug #6476)
+ I postfix/smtpd.*SSL_accept error from.*lost connection
+# ignore postfix too many errors after DATA (#6545)
+ I postfix/smtpd.*too many errors after DATA from
+ C panic
+ C Oops
+ C Error
+# ignore ipv6 icmp errors for now (Bug #6540)
+ I kernel: .*icmpv6_send: no reply to icmp error
+ C error
+ W generic protection rip
+ W .*Unrecovered read error - auto reallocate failed
+# 401 Unauthorized error logged by webapp and possible other
+# applications
+ C Unauthorized
+# catch abnormal termination of processes (due to segfault/fpe
+# signals etc).
+# see https://github.com/pixelated/pixelated-user-agent/issues/683
+ C systemd.*: main process exited, code=killed, status=
diff --git a/puppet/modules/site_check_mk/files/agent/logwatch/webapp.cfg b/puppet/modules/site_check_mk/files/agent/logwatch/webapp.cfg
new file mode 100644
index 00000000..337d9ec6
--- /dev/null
+++ b/puppet/modules/site_check_mk/files/agent/logwatch/webapp.cfg
@@ -0,0 +1,8 @@
+/var/log/leap/webapp.log
+# check for webapp errors
+ C Completed 500
+# couch connection issues
+ C webapp.*Could not connect to couch database messages due to 401 Unauthorized: {"error":"unauthorized","reason":"You are not a server admin."}
+# ignore RoutingErrors that rails throw when it can't handle a url
+# see https://leap.se/code/issues/5173
+ I webapp.*ActionController::RoutingError
diff --git a/puppet/modules/site_check_mk/files/agent/nagios_plugins/check_unix_open_fds.pl b/puppet/modules/site_check_mk/files/agent/nagios_plugins/check_unix_open_fds.pl
new file mode 100755
index 00000000..06163d49
--- /dev/null
+++ b/puppet/modules/site_check_mk/files/agent/nagios_plugins/check_unix_open_fds.pl
@@ -0,0 +1,322 @@
+#!/usr/bin/perl -w
+
+# check_unix_open_fds Nagios Plugin
+#
+# TComm - Carlos Peris Pla
+#
+# This nagios plugin is free software, and comes with ABSOLUTELY
+# NO WARRANTY. It may be used, redistributed and/or modified under
+# the terms of the GNU General Public Licence (see
+# http://www.fsf.org/licensing/licenses/gpl.txt).
+
+
+# MODULE DECLARATION
+
+use strict;
+use Nagios::Plugin;
+
+
+# FUNCTION DECLARATION
+
+sub CreateNagiosManager ();
+sub CheckArguments ();
+sub PerformCheck ();
+
+
+# CONSTANT DEFINITION
+
+use constant NAME => 'check_unix_open_fds';
+use constant VERSION => '0.1b';
+use constant USAGE => "Usage:\ncheck_unix_open_fds -w <process_threshold,application_threshold> -c <process_threshold,application_threshold>\n".
+ "\t\t[-V <version>]\n";
+use constant BLURB => "This plugin checks, in UNIX systems with the command lsof installed and with its SUID bit activated, the number\n".
+ "of file descriptors opened by an application and its processes.\n";
+use constant LICENSE => "This nagios plugin is free software, and comes with ABSOLUTELY\n".
+ "no WARRANTY. It may be used, redistributed and/or modified under\n".
+ "the terms of the GNU General Public Licence\n".
+ "(see http://www.fsf.org/licensing/licenses/gpl.txt).\n";
+use constant EXAMPLE => "\n\n".
+ "Example:\n".
+ "\n".
+ "check_unix_open_fds -a /usr/local/nagios/bin/ndo2db -w 20,75 -c 25,85\n".
+ "\n".
+ "It returns CRITICAL if number of file descriptors opened by ndo2db is higher than 85,\n".
+ "if not it returns WARNING if number of file descriptors opened by ndo2db is higher \n".
+ "than 75, if not it returns CRITICAL if number of file descriptors opened by any process\n".
+ "of ndo2db is higher than 25, if not it returns WARNING if number of file descriptors \n".
+ "opened by any process of ndo2db is higher than 20.\n".
+ "In other cases it returns OK if check has been performed succesfully.\n\n";
+
+
+# VARIABLE DEFINITION
+
+my $Nagios;
+my $Error;
+my $PluginResult;
+my $PluginOutput;
+my @WVRange;
+my @CVRange;
+
+
+# MAIN FUNCTION
+
+# Get command line arguments
+$Nagios = &CreateNagiosManager(USAGE, VERSION, BLURB, LICENSE, NAME, EXAMPLE);
+eval {$Nagios->getopts};
+
+if (!$@) {
+ # Command line parsed
+ if (&CheckArguments($Nagios, \$Error, \@WVRange, \@CVRange)) {
+ # Argument checking passed
+ $PluginResult = &PerformCheck($Nagios, \$PluginOutput, \@WVRange, \@CVRange)
+ }
+ else {
+ # Error checking arguments
+ $PluginOutput = $Error;
+ $PluginResult = UNKNOWN;
+ }
+ $Nagios->nagios_exit($PluginResult,$PluginOutput);
+}
+else {
+ # Error parsing command line
+ $Nagios->nagios_exit(UNKNOWN,$@);
+}
+
+
+
+# FUNCTION DEFINITIONS
+
+# Creates and configures a Nagios plugin object
+# Input: strings (usage, version, blurb, license, name and example) to configure argument parsing functionality
+# Return value: reference to a Nagios plugin object
+
+sub CreateNagiosManager() {
+ # Create GetOpt object
+ my $Nagios = Nagios::Plugin->new(usage => $_[0], version => $_[1], blurb => $_[2], license => $_[3], plugin => $_[4], extra => $_[5]);
+
+ # Add argument units
+ $Nagios->add_arg(spec => 'application|a=s',
+ help => 'Application path for which you want to check the number of open file descriptors',
+ required => 1);
+
+ # Add argument warning
+ $Nagios->add_arg(spec => 'warning|w=s',
+ help => "Warning thresholds. Format: <process_threshold,application_threshold>",
+ required => 1);
+ # Add argument critical
+ $Nagios->add_arg(spec => 'critical|c=s',
+ help => "Critical thresholds. Format: <process_threshold,application_threshold>",
+ required => 1);
+
+ # Return value
+ return $Nagios;
+}
+
+
+# Checks argument values and sets some default values
+# Input: Nagios Plugin object
+# Output: reference to Error description string, Memory Unit, Swap Unit, reference to WVRange ($_[4]), reference to CVRange ($_[5])
+# Return value: True if arguments ok, false if not
+
+sub CheckArguments() {
+ my ($Nagios, $Error, $WVRange, $CVRange) = @_;
+ my $commas;
+ my $units;
+ my $i;
+ my $firstpos;
+ my $secondpos;
+
+ # Check Warning thresholds list
+ $commas = $Nagios->opts->warning =~ tr/,//;
+ if ($commas !=1){
+ ${$Error} = "Invalid Warning list format. One comma is expected.";
+ return 0;
+ }
+ else{
+ $i=0;
+ $firstpos=0;
+ my $warning=$Nagios->opts->warning;
+ while ($warning =~ /[,]/g) {
+ $secondpos=pos $warning;
+ if ($secondpos - $firstpos==1){
+ @{$WVRange}[$i] = "~:";
+ }
+ else{
+ @{$WVRange}[$i] = substr $Nagios->opts->warning, $firstpos, ($secondpos-$firstpos-1);
+ }
+ $firstpos=$secondpos;
+ $i++
+ }
+ if (length($Nagios->opts->warning) - $firstpos==0){#La coma es el ultimo elemento del string
+ @{$WVRange}[$i] = "~:";
+ }
+ else{
+ @{$WVRange}[$i] = substr $Nagios->opts->warning, $firstpos, (length($Nagios->opts->warning)-$firstpos);
+ }
+
+ if (@{$WVRange}[0] !~/^(@?(\d+|(\d+|~):(\d+)?))?$/){
+ ${$Error} = "Invalid Process Warning threshold in ${$WVRange[0]}";
+ return 0;
+ }if (@{$WVRange}[1] !~/^(@?(\d+|(\d+|~):(\d+)?))?$/){
+ ${$Error} = "Invalid Application Warning threshold in ${$WVRange[1]}";
+ return 0;
+ }
+ }
+
+ # Check Critical thresholds list
+ $commas = $Nagios->opts->critical =~ tr/,//;
+ if ($commas !=1){
+ ${$Error} = "Invalid Critical list format. One comma is expected.";
+ return 0;
+ }
+ else{
+ $i=0;
+ $firstpos=0;
+ my $critical=$Nagios->opts->critical;
+ while ($critical =~ /[,]/g) {
+ $secondpos=pos $critical ;
+ if ($secondpos - $firstpos==1){
+ @{$CVRange}[$i] = "~:";
+ }
+ else{
+ @{$CVRange}[$i] =substr $Nagios->opts->critical, $firstpos, ($secondpos-$firstpos-1);
+ }
+ $firstpos=$secondpos;
+ $i++
+ }
+ if (length($Nagios->opts->critical) - $firstpos==0){#La coma es el ultimo elemento del string
+ @{$CVRange}[$i] = "~:";
+ }
+ else{
+ @{$CVRange}[$i] = substr $Nagios->opts->critical, $firstpos, (length($Nagios->opts->critical)-$firstpos);
+ }
+
+ if (@{$CVRange}[0] !~/^(@?(\d+|(\d+|~):(\d+)?))?$/) {
+ ${$Error} = "Invalid Process Critical threshold in @{$CVRange}[0]";
+ return 0;
+ }
+ if (@{$CVRange}[1] !~/^(@?(\d+|(\d+|~):(\d+)?))?$/) {
+ ${$Error} = "Invalid Application Critical threshold in @{$CVRange}[1]";
+ return 0;
+ }
+ }
+
+ return 1;
+}
+
+
+# Performs whole check:
+# Input: Nagios Plugin object, reference to Plugin output string, Application, referece to WVRange, reference to CVRange
+# Output: Plugin output string
+# Return value: Plugin return value
+
+sub PerformCheck() {
+ my ($Nagios, $PluginOutput, $WVRange, $CVRange) = @_;
+ my $Application;
+ my @AppNameSplitted;
+ my $ApplicationName;
+ my $PsCommand;
+ my $PsResult;
+ my @PsResultLines;
+ my $ProcLine;
+ my $ProcPid;
+ my $LsofCommand;
+ my $LsofResult;
+ my $ProcCount = 0;
+ my $FDCount = 0;
+ my $ProcFDAvg = 0;
+ my $PerProcMaxFD = 0;
+ my $ProcOKFlag = 0;
+ my $ProcWarningFlag = 0;
+ my $ProcCriticalFlag = 0;
+ my $OKFlag = 0;
+ my $WarningFlag = 0;
+ my $CriticalFlag = 0;
+ my $LastWarningProcFDs = 0;
+ my $LastWarningProc = -1;
+ my $LastCriticalProcFDs = 0;
+ my $LastCriticalProc = -1;
+ my $ProcPluginReturnValue = UNKNOWN;
+ my $AppPluginReturnValue = UNKNOWN;
+ my $PluginReturnValue = UNKNOWN;
+ my $PerformanceData = "";
+ my $PerfdataUnit = "FDs";
+
+ $Application = $Nagios->opts->application;
+ $PsCommand = "ps -eaf | grep $Application";
+ $PsResult = `$PsCommand`;
+ @AppNameSplitted = split(/\//, $Application);
+ $ApplicationName = $AppNameSplitted[$#AppNameSplitted];
+ @PsResultLines = split(/\n/, $PsResult);
+ if ( $#PsResultLines > 1 ) {
+ foreach my $Proc (split(/\n/, $PsResult)) {
+ if ($Proc !~ /check_unix_open_fds/ && $Proc !~ / grep /) {
+ $ProcCount += 1;
+ $ProcPid = (split(/\s+/, $Proc))[1];
+ $LsofCommand = "lsof -p $ProcPid | wc -l";
+ $LsofResult = `$LsofCommand`;
+ $LsofResult = ($LsofResult > 0 ) ? ($LsofResult - 1) : 0;
+ $FDCount += $LsofResult;
+ if ($LsofResult >= $PerProcMaxFD) { $PerProcMaxFD = $LsofResult; }
+ $ProcPluginReturnValue = $Nagios->check_threshold(check => $LsofResult,warning => @{$WVRange}[0],critical => @{$CVRange}[0]);
+ if ($ProcPluginReturnValue eq OK) {
+ $ProcOKFlag = 1;
+ }
+ elsif ($ProcPluginReturnValue eq WARNING) {
+ $ProcWarningFlag = 1;
+ if ($LsofResult >= $LastWarningProcFDs) {
+ $LastWarningProcFDs = $LsofResult;
+ $LastWarningProc = $ProcPid;
+ }
+ }
+ #if ($LsofResult >= $PCT) {
+ elsif ($ProcPluginReturnValue eq CRITICAL) {
+ $ProcCriticalFlag = 1;
+ if ($LsofResult >= $LastCriticalProcFDs) {
+ $LastCriticalProcFDs = $LsofResult;
+ $LastCriticalProc = $ProcPid;
+ }
+ }
+ }
+ }
+ if ($ProcCount) { $ProcFDAvg = int($FDCount / $ProcCount); }
+ $AppPluginReturnValue = $Nagios->check_threshold(check => $FDCount,warning => @{$WVRange}[1],critical => @{$CVRange}[1]);
+ #if ($FDCount >= $TWT) {
+ if ($AppPluginReturnValue eq OK) { $OKFlag = 1; }
+ elsif ($AppPluginReturnValue eq WARNING) { $WarningFlag = 1; }
+ elsif ($AppPluginReturnValue eq CRITICAL) { $CriticalFlag = 1; }
+
+ # PluginReturnValue and PluginOutput
+ if ($CriticalFlag) {
+ $PluginReturnValue = CRITICAL;
+ ${$PluginOutput} .= "$ApplicationName handling $FDCount files (critical threshold set to @{$CVRange}[1])";
+ }
+ elsif ($WarningFlag) {
+ $PluginReturnValue = WARNING;
+ ${$PluginOutput} .= "$ApplicationName handling $FDCount files (warning threshold set to @{$WVRange}[1])";
+ }
+ elsif ($ProcCriticalFlag) {
+ $PluginReturnValue = CRITICAL;
+ ${$PluginOutput} .= "Process ID $LastCriticalProc handling $LastCriticalProcFDs files (critical threshold set to @{$CVRange}[0])";
+ }
+ elsif ($ProcWarningFlag) {
+ $PluginReturnValue = WARNING;
+ ${$PluginOutput} .= "Process ID $LastWarningProc handling $LastWarningProcFDs files (warning threshold set to @{$WVRange}[0])";
+ }
+ elsif ($OKFlag && $ProcOKFlag) {
+ $PluginReturnValue = OK;
+ ${$PluginOutput} .= "$ApplicationName handling $FDCount files";
+ }
+ }
+ else {
+ ${$PluginOutput} .= "No existe la aplicacion $ApplicationName";
+ }
+
+
+ $PerformanceData .= "ProcCount=$ProcCount$PerfdataUnit FDCount=$FDCount$PerfdataUnit ProcFDAvg=$ProcFDAvg$PerfdataUnit PerProcMaxFD=$PerProcMaxFD$PerfdataUnit";
+
+ # Output with performance data:
+ ${$PluginOutput} .= " | $PerformanceData";
+
+ return $PluginReturnValue;
+}
diff --git a/puppet/modules/site_check_mk/files/agent/plugins/mk_logwatch.1.2.4 b/puppet/modules/site_check_mk/files/agent/plugins/mk_logwatch.1.2.4
new file mode 100755
index 00000000..3dbca322
--- /dev/null
+++ b/puppet/modules/site_check_mk/files/agent/plugins/mk_logwatch.1.2.4
@@ -0,0 +1,374 @@
+#!/usr/bin/python
+# -*- encoding: utf-8; py-indent-offset: 4 -*-
+# +------------------------------------------------------------------+
+# | ____ _ _ __ __ _ __ |
+# | / ___| |__ ___ ___| | __ | \/ | |/ / |
+# | | | | '_ \ / _ \/ __| |/ / | |\/| | ' / |
+# | | |___| | | | __/ (__| < | | | | . \ |
+# | \____|_| |_|\___|\___|_|\_\___|_| |_|_|\_\ |
+# | |
+# | Copyright Mathias Kettner 2010 mk@mathias-kettner.de |
+# +------------------------------------------------------------------+
+#
+# This file is part of Check_MK.
+# The official homepage is at http://mathias-kettner.de/check_mk.
+#
+# check_mk is free software; you can redistribute it and/or modify it
+# under the terms of the GNU General Public License as published by
+# the Free Software Foundation in version 2. check_mk is distributed
+# in the hope that it will be useful, but WITHOUT ANY WARRANTY; with-
+# out even the implied warranty of MERCHANTABILITY or FITNESS FOR A
+# PARTICULAR PURPOSE. See the GNU General Public License for more de-
+# ails. You should have received a copy of the GNU General Public
+# License along with GNU Make; see the file COPYING. If not, write
+# to the Free Software Foundation, Inc., 51 Franklin St, Fifth Floor,
+# Boston, MA 02110-1301 USA.
+
+# Call with -d for debug mode: colored output, no saving of status
+
+import sys, os, re, time
+import glob
+
+if '-d' in sys.argv[1:] or '--debug' in sys.argv[1:]:
+ tty_red = '\033[1;31m'
+ tty_green = '\033[1;32m'
+ tty_yellow = '\033[1;33m'
+ tty_blue = '\033[1;34m'
+ tty_normal = '\033[0m'
+ debug = True
+else:
+ tty_red = ''
+ tty_green = ''
+ tty_yellow = ''
+ tty_blue = ''
+ tty_normal = ''
+ debug = False
+
+# The configuration file and status file are searched
+# in the directory named by the environment variable
+# LOGWATCH_DIR. If that is not set, MK_CONFDIR is used.
+# If that is not set either, the current directory ist
+# used.
+logwatch_dir = os.getenv("LOGWATCH_DIR")
+if not logwatch_dir:
+ logwatch_dir = os.getenv("MK_CONFDIR")
+ if not logwatch_dir:
+ logwatch_dir = "."
+
+print "<<<logwatch>>>"
+
+config_filename = logwatch_dir + "/logwatch.cfg"
+status_filename = logwatch_dir + "/logwatch.state"
+config_dir = logwatch_dir + "/logwatch.d/*.cfg"
+
+def is_not_comment(line):
+ if line.lstrip().startswith('#') or \
+ line.strip() == '':
+ return False
+ return True
+
+def parse_filenames(line):
+ return line.split()
+
+def parse_pattern(level, pattern):
+ if level not in [ 'C', 'W', 'I', 'O' ]:
+ raise(Exception("Invalid pattern line '%s'" % line))
+ try:
+ compiled = re.compile(pattern)
+ except:
+ raise(Exception("Invalid regular expression in line '%s'" % line))
+ return (level, compiled)
+
+def read_config():
+ config_lines = [ line.rstrip() for line in filter(is_not_comment, file(config_filename).readlines()) ]
+ # Add config from a logwatch.d folder
+ for config_file in glob.glob(config_dir):
+ config_lines += [ line.rstrip() for line in filter(is_not_comment, file(config_file).readlines()) ]
+
+ have_filenames = False
+ config = []
+
+ for line in config_lines:
+ rewrite = False
+ if line[0].isspace(): # pattern line
+ if not have_filenames:
+ raise Exception("Missing logfile names")
+ level, pattern = line.split(None, 1)
+ if level == 'A':
+ cont_list.append(parse_cont_pattern(pattern))
+ elif level == 'R':
+ rewrite_list.append(pattern)
+ else:
+ level, compiled = parse_pattern(level, pattern)
+ cont_list = [] # List of continuation patterns
+ rewrite_list = [] # List of rewrite patterns
+ patterns.append((level, compiled, cont_list, rewrite_list))
+ else: # filename line
+ patterns = []
+ config.append((parse_filenames(line), patterns))
+ have_filenames = True
+ return config
+
+def parse_cont_pattern(pattern):
+ try:
+ return int(pattern)
+ except:
+ try:
+ return re.compile(pattern)
+ except:
+ if debug:
+ raise
+ raise Exception("Invalid regular expression in line '%s'" % pattern)
+
+# structure of statusfile
+# # LOGFILE OFFSET INODE
+# /var/log/messages|7767698|32455445
+# /var/test/x12134.log|12345|32444355
+def read_status():
+ if debug:
+ return {}
+
+ status = {}
+ for line in file(status_filename):
+ # TODO: Remove variants with spaces. rsplit is
+ # not portable. split fails if logfilename contains
+ # spaces
+ inode = -1
+ try:
+ parts = line.split('|')
+ filename = parts[0]
+ offset = parts[1]
+ if len(parts) >= 3:
+ inode = parts[2]
+
+ except:
+ try:
+ filename, offset = line.rsplit(None, 1)
+ except:
+ filename, offset = line.split(None, 1)
+ status[filename] = int(offset), int(inode)
+ return status
+
+def save_status(status):
+ f = file(status_filename, "w")
+ for filename, (offset, inode) in status.items():
+ f.write("%s|%d|%d\n" % (filename, offset, inode))
+
+pushed_back_line = None
+def next_line(f):
+ global pushed_back_line
+ if pushed_back_line != None:
+ line = pushed_back_line
+ pushed_back_line = None
+ return line
+ else:
+ try:
+ line = f.next()
+ return line
+ except:
+ return None
+
+
+def process_logfile(logfile, patterns):
+ global pushed_back_line
+
+ # Look at which file offset we have finished scanning
+ # the logfile last time. If we have never seen this file
+ # before, we set the offset to -1
+ offset, prev_inode = status.get(logfile, (-1, -1))
+ try:
+ fl = os.open(logfile, os.O_RDONLY)
+ inode = os.fstat(fl)[1] # 1 = st_ino
+ except:
+ if debug:
+ raise
+ print "[[[%s:cannotopen]]]" % logfile
+ return
+
+ print "[[[%s]]]" % logfile
+
+ # Seek to the current end in order to determine file size
+ current_end = os.lseek(fl, 0, 2) # os.SEEK_END not available in Python 2.4
+ status[logfile] = current_end, inode
+
+ # If we have never seen this file before, we just set the
+ # current pointer to the file end. We do not want to make
+ # a fuss about ancient log messages...
+ if offset == -1:
+ if not debug:
+ return
+ else:
+ offset = 0
+
+
+ # If the inode of the logfile has changed it has appearently
+ # been started from new (logfile rotation). At least we must
+ # assume that. In some rare cases (restore of a backup, etc)
+ # we are wrong and resend old log messages
+ if prev_inode >= 0 and inode != prev_inode:
+ offset = 0
+
+ # Our previously stored offset is the current end ->
+ # no new lines in this file
+ if offset == current_end:
+ return # nothing new
+
+ # If our offset is beyond the current end, the logfile has been
+ # truncated or wrapped while keeping the same inode. We assume
+ # that it contains all new data in that case and restart from
+ # offset 0.
+ if offset > current_end:
+ offset = 0
+
+ # now seek to offset where interesting data begins
+ os.lseek(fl, offset, 0) # os.SEEK_SET not available in Python 2.4
+ f = os.fdopen(fl)
+ worst = -1
+ outputtxt = ""
+ lines_parsed = 0
+ start_time = time.time()
+
+ while True:
+ line = next_line(f)
+ if line == None:
+ break # End of file
+
+ lines_parsed += 1
+ # Check if maximum number of new log messages is exceeded
+ if opt_maxlines != None and lines_parsed > opt_maxlines:
+ outputtxt += "%s Maximum number (%d) of new log messages exceeded.\n" % (
+ opt_overflow, opt_maxlines)
+ worst = max(worst, opt_overflow_level)
+ os.lseek(fl, 0, 2) # Seek to end of file, skip all other messages
+ break
+
+ # Check if maximum processing time (per file) is exceeded. Check only
+ # every 100'th line in order to save system calls
+ if opt_maxtime != None and lines_parsed % 100 == 10 \
+ and time.time() - start_time > opt_maxtime:
+ outputtxt += "%s Maximum parsing time (%.1f sec) of this log file exceeded.\n" % (
+ opt_overflow, opt_maxtime)
+ worst = max(worst, opt_overflow_level)
+ os.lseek(fl, 0, 2) # Seek to end of file, skip all other messages
+ break
+
+ level = "."
+ for lev, pattern, cont_patterns, replacements in patterns:
+ matches = pattern.search(line[:-1])
+ if matches:
+ level = lev
+ levelint = {'C': 2, 'W': 1, 'O': 0, 'I': -1, '.': -1}[lev]
+ worst = max(levelint, worst)
+
+ # Check for continuation lines
+ for cont_pattern in cont_patterns:
+ if type(cont_pattern) == int: # add that many lines
+ for x in range(cont_pattern):
+ cont_line = next_line(f)
+ if cont_line == None: # end of file
+ break
+ line = line[:-1] + "\1" + cont_line
+
+ else: # pattern is regex
+ while True:
+ cont_line = next_line(f)
+ if cont_line == None: # end of file
+ break
+ elif cont_pattern.search(cont_line[:-1]):
+ line = line[:-1] + "\1" + cont_line
+ else:
+ pushed_back_line = cont_line # sorry for stealing this line
+ break
+
+ # Replacement
+ for replace in replacements:
+ line = replace.replace('\\0', line) + "\n"
+ for nr, group in enumerate(matches.groups()):
+ line = line.replace('\\%d' % (nr+1), group)
+
+ break # matching rule found and executed
+
+ color = {'C': tty_red, 'W': tty_yellow, 'O': tty_green, 'I': tty_blue, '.': ''}[level]
+ if debug:
+ line = line.replace("\1", "\nCONT:")
+ if level == "I":
+ level = "."
+ if opt_nocontext and level == '.':
+ continue
+ outputtxt += "%s%s %s%s\n" % (color, level, line[:-1], tty_normal)
+
+ new_offset = os.lseek(fl, 0, 1) # os.SEEK_CUR not available in Python 2.4
+ status[logfile] = new_offset, inode
+
+ # output all lines if at least one warning, error or ok has been found
+ if worst > -1:
+ sys.stdout.write(outputtxt)
+ sys.stdout.flush()
+
+try:
+ config = read_config()
+except Exception, e:
+ if debug:
+ raise
+ print "CANNOT READ CONFIG FILE: %s" % e
+ sys.exit(1)
+
+# Simply ignore errors in the status file. In case of a corrupted status file we simply begin
+# with an empty status. That keeps the monitoring up and running - even if we might loose a
+# message in the extreme case of a corrupted status file.
+try:
+ status = read_status()
+except Exception, e:
+ status = {}
+
+
+# The filename line may contain options like 'maxlines=100' or 'maxtime=10'
+for filenames, patterns in config:
+ # Initialize options with default values
+ opt_maxlines = None
+ opt_maxtime = None
+ opt_regex = None
+ opt_overflow = 'C'
+ opt_overflow_level = 2
+ opt_nocontext = False
+ try:
+ options = [ o.split('=', 1) for o in filenames if '=' in o ]
+ for key, value in options:
+ if key == 'maxlines':
+ opt_maxlines = int(value)
+ elif key == 'maxtime':
+ opt_maxtime = float(value)
+ elif key == 'overflow':
+ if value not in [ 'C', 'I', 'W', 'O' ]:
+ raise Exception("Invalid value %s for overflow. Allowed are C, I, O and W" % value)
+ opt_overflow = value
+ opt_overflow_level = {'C':2, 'W':1, 'O':0, 'I':0}[value]
+ elif key == 'regex':
+ opt_regex = re.compile(value)
+ elif key == 'iregex':
+ opt_regex = re.compile(value, re.I)
+ elif key == 'nocontext':
+ opt_nocontext = True
+ else:
+ raise Exception("Invalid option %s" % key)
+ except Exception, e:
+ if debug:
+ raise
+ print "INVALID CONFIGURATION: %s" % e
+ sys.exit(1)
+
+
+ for glob in filenames:
+ if '=' in glob:
+ continue
+ logfiles = [ l.strip() for l in os.popen("ls %s 2>/dev/null" % glob).readlines() ]
+ if opt_regex:
+ logfiles = [ f for f in logfiles if opt_regex.search(f) ]
+ if len(logfiles) == 0:
+ print '[[[%s:missing]]]' % glob
+ else:
+ for logfile in logfiles:
+ process_logfile(logfile, patterns)
+
+if not debug:
+ save_status(status)
diff --git a/puppet/modules/site_check_mk/files/extra_service_conf.mk b/puppet/modules/site_check_mk/files/extra_service_conf.mk
new file mode 100644
index 00000000..c7120a96
--- /dev/null
+++ b/puppet/modules/site_check_mk/files/extra_service_conf.mk
@@ -0,0 +1,14 @@
+# retry 3 times before setting a service into a hard state
+# and send out notification
+extra_service_conf["max_check_attempts"] = [
+ ("4", ALL_HOSTS , ALL_SERVICES )
+]
+
+#
+# run check_mk_agent every 4 minutes if it terminates successfully.
+# see https://leap.se/code/issues/6539 for the rationale
+#
+extra_service_conf["normal_check_interval"] = [
+ ("4", ALL_HOSTS , "Check_MK" )
+]
+
diff --git a/puppet/modules/site_check_mk/files/ignored_services.mk b/puppet/modules/site_check_mk/files/ignored_services.mk
new file mode 100644
index 00000000..35dc4433
--- /dev/null
+++ b/puppet/modules/site_check_mk/files/ignored_services.mk
@@ -0,0 +1,3 @@
+ignored_services = [
+ ( ALL_HOSTS, [ "NTP Time" ] )
+]
diff --git a/puppet/modules/site_check_mk/manifests/agent.pp b/puppet/modules/site_check_mk/manifests/agent.pp
new file mode 100644
index 00000000..b95d5d64
--- /dev/null
+++ b/puppet/modules/site_check_mk/manifests/agent.pp
@@ -0,0 +1,35 @@
+# installs check-mk agent
+class site_check_mk::agent {
+
+ $ssh_hash = hiera('ssh')
+ $pubkey = $ssh_hash['authorized_keys']['monitor']['key']
+ $type = $ssh_hash['authorized_keys']['monitor']['type']
+
+
+ # /usr/bin/mk-job depends on /usr/bin/time
+ ensure_packages('time')
+
+ class { 'site_apt::preferences::check_mk': } ->
+
+ class { 'check_mk::agent':
+ agent_package_name => 'check-mk-agent',
+ agent_logwatch_package_name => 'check-mk-agent-logwatch',
+ method => 'ssh',
+ authdir => '/root/.ssh',
+ authfile => 'authorized_keys',
+ register_agent => false,
+ require => Package['time']
+ } ->
+
+ class { 'site_check_mk::agent::mrpe': } ->
+ class { 'site_check_mk::agent::logwatch': } ->
+
+ file {
+ [ '/srv/leap/nagios', '/srv/leap/nagios/plugins' ]:
+ ensure => directory;
+ '/usr/lib/check_mk_agent/local/run_node_tests.sh':
+ source => 'puppet:///modules/site_check_mk/agent/local_checks/all_hosts/run_node_tests.sh',
+ mode => '0755';
+ }
+
+}
diff --git a/puppet/modules/site_check_mk/manifests/agent/couchdb.pp b/puppet/modules/site_check_mk/manifests/agent/couchdb.pp
new file mode 100644
index 00000000..1554fd3c
--- /dev/null
+++ b/puppet/modules/site_check_mk/manifests/agent/couchdb.pp
@@ -0,0 +1,34 @@
+# configure logwatch and nagios checks for couchdb (both bigcouch and plain
+# couchdb installations)
+class site_check_mk::agent::couchdb {
+
+ concat::fragment { 'syslog_couchdb':
+ source => 'puppet:///modules/site_check_mk/agent/logwatch/syslog/couchdb.cfg',
+ target => '/etc/check_mk/logwatch.d/syslog.cfg',
+ order => '02';
+ }
+
+ # check different couchdb stats
+ file { '/usr/lib/check_mk_agent/local/leap_couch_stats.sh':
+ source => 'puppet:///modules/site_check_mk/agent/local_checks/couchdb/leap_couch_stats.sh',
+ mode => '0755',
+ require => Package['check_mk-agent']
+ }
+
+ # check open files for bigcouch proc
+ include site_check_mk::agent::package::perl_plugin
+ file { '/srv/leap/nagios/plugins/check_unix_open_fds.pl':
+ source => 'puppet:///modules/site_check_mk/agent/nagios_plugins/check_unix_open_fds.pl',
+ mode => '0755'
+ }
+ augeas {
+ 'Couchdb_open_files':
+ incl => '/etc/check_mk/mrpe.cfg',
+ lens => 'Spacevars.lns',
+ changes => [
+ 'rm /files/etc/check_mk/mrpe.cfg/Couchdb_open_files',
+ 'set Couchdb_open_files \'/srv/leap/nagios/plugins/check_unix_open_fds.pl -a beam -w 28672,28672 -c 30720,30720\'' ],
+ require => File['/etc/check_mk/mrpe.cfg'];
+ }
+
+}
diff --git a/puppet/modules/site_check_mk/manifests/agent/couchdb/bigcouch.pp b/puppet/modules/site_check_mk/manifests/agent/couchdb/bigcouch.pp
new file mode 100644
index 00000000..82c3ac72
--- /dev/null
+++ b/puppet/modules/site_check_mk/manifests/agent/couchdb/bigcouch.pp
@@ -0,0 +1,49 @@
+# configure logwatch and nagios checks for bigcouch
+class site_check_mk::agent::couchdb::bigcouch {
+
+ # watch bigcouch logs
+ # currently disabled because bigcouch is too noisy
+ # see https://leap.se/code/issues/7375 for more details
+ # and site_config::remove_files for removing leftovers
+ #file { '/etc/check_mk/logwatch.d/bigcouch.cfg':
+ # source => 'puppet:///modules/site_check_mk/agent/logwatch/bigcouch.cfg',
+ #}
+
+ # check syslog msg from:
+ # - empd
+ # - /usr/local/bin/couch-doc-update
+ concat::fragment { 'syslog_bigcouch':
+ source => 'puppet:///modules/site_check_mk/agent/logwatch/syslog/bigcouch.cfg',
+ target => '/etc/check_mk/logwatch.d/syslog.cfg',
+ order => '02';
+ }
+
+ # check bigcouch processes
+ augeas {
+ 'Bigcouch_epmd_procs':
+ incl => '/etc/check_mk/mrpe.cfg',
+ lens => 'Spacevars.lns',
+ changes => [
+ 'rm /files/etc/check_mk/mrpe.cfg/Bigcouch_epmd_procs',
+ 'set Bigcouch_epmd_procs \'/usr/lib/nagios/plugins/check_procs -w 1:1 -c 1:1 -a /opt/bigcouch/erts-5.9.1/bin/epmd\'' ],
+ require => File['/etc/check_mk/mrpe.cfg'];
+ 'Bigcouch_beam_procs':
+ incl => '/etc/check_mk/mrpe.cfg',
+ lens => 'Spacevars.lns',
+ changes => [
+ 'rm /files/etc/check_mk/mrpe.cfg/Bigcouch_beam_procs',
+ 'set Bigcouch_beam_procs \'/usr/lib/nagios/plugins/check_procs -w 1:1 -c 1:1 -a /opt/bigcouch/erts-5.9.1/bin/beam\'' ],
+ require => File['/etc/check_mk/mrpe.cfg'];
+ }
+
+ augeas {
+ 'Bigcouch_open_files':
+ incl => '/etc/check_mk/mrpe.cfg',
+ lens => 'Spacevars.lns',
+ changes => [
+ 'rm /files/etc/check_mk/mrpe.cfg/Bigcouch_open_files',
+ 'set Bigcouch_open_files \'/srv/leap/nagios/plugins/check_unix_open_fds.pl -a beam -w 28672,28672 -c 30720,30720\'' ],
+ require => File['/etc/check_mk/mrpe.cfg'];
+ }
+
+}
diff --git a/puppet/modules/site_check_mk/manifests/agent/couchdb/plain.pp b/puppet/modules/site_check_mk/manifests/agent/couchdb/plain.pp
new file mode 100644
index 00000000..3ec2267b
--- /dev/null
+++ b/puppet/modules/site_check_mk/manifests/agent/couchdb/plain.pp
@@ -0,0 +1,23 @@
+# configure logwatch and nagios checks for plain single couchdb master
+class site_check_mk::agent::couchdb::plain {
+
+ # remove bigcouch leftovers
+ augeas {
+ 'Bigcouch_epmd_procs':
+ incl => '/etc/check_mk/mrpe.cfg',
+ lens => 'Spacevars.lns',
+ changes => 'rm /files/etc/check_mk/mrpe.cfg/Bigcouch_epmd_procs',
+ require => File['/etc/check_mk/mrpe.cfg'];
+ 'Bigcouch_beam_procs':
+ incl => '/etc/check_mk/mrpe.cfg',
+ lens => 'Spacevars.lns',
+ changes => 'rm /files/etc/check_mk/mrpe.cfg/Bigcouch_beam_procs',
+ require => File['/etc/check_mk/mrpe.cfg'];
+ 'Bigcouch_open_files':
+ incl => '/etc/check_mk/mrpe.cfg',
+ lens => 'Spacevars.lns',
+ changes => 'rm /files/etc/check_mk/mrpe.cfg/Bigcouch_open_files',
+ require => File['/etc/check_mk/mrpe.cfg'];
+ }
+
+}
diff --git a/puppet/modules/site_check_mk/manifests/agent/haproxy.pp b/puppet/modules/site_check_mk/manifests/agent/haproxy.pp
new file mode 100644
index 00000000..6d52efba
--- /dev/null
+++ b/puppet/modules/site_check_mk/manifests/agent/haproxy.pp
@@ -0,0 +1,15 @@
+class site_check_mk::agent::haproxy {
+
+ include site_check_mk::agent::package::nagios_plugins_contrib
+
+ # local nagios plugin checks via mrpe
+ augeas { 'haproxy':
+ incl => '/etc/check_mk/mrpe.cfg',
+ lens => 'Spacevars.lns',
+ changes => [
+ 'rm /files/etc/check_mk/mrpe.cfg/Haproxy',
+ 'set Haproxy \'/usr/lib/nagios/plugins/check_haproxy -u "http://localhost:8000/haproxy;csv"\'' ],
+ require => File['/etc/check_mk/mrpe.cfg'];
+ }
+
+}
diff --git a/puppet/modules/site_check_mk/manifests/agent/haveged.pp b/puppet/modules/site_check_mk/manifests/agent/haveged.pp
new file mode 100644
index 00000000..cacbea8c
--- /dev/null
+++ b/puppet/modules/site_check_mk/manifests/agent/haveged.pp
@@ -0,0 +1,15 @@
+class site_check_mk::agent::haveged {
+
+# check haveged process
+ augeas {
+ 'haveged_proc':
+ incl => '/etc/check_mk/mrpe.cfg',
+ lens => 'Spacevars.lns',
+ changes => [
+ 'rm /files/etc/check_mk/mrpe.cfg/haveged_proc',
+ 'set haveged_proc \'/usr/lib/nagios/plugins/check_procs -w 1:1 -c 1:1 -a /usr/sbin/haveged\'' ],
+ require => File['/etc/check_mk/mrpe.cfg'];
+
+ }
+
+}
diff --git a/puppet/modules/site_check_mk/manifests/agent/logwatch.pp b/puppet/modules/site_check_mk/manifests/agent/logwatch.pp
new file mode 100644
index 00000000..423cace2
--- /dev/null
+++ b/puppet/modules/site_check_mk/manifests/agent/logwatch.pp
@@ -0,0 +1,36 @@
+class site_check_mk::agent::logwatch {
+ # Deploy mk_logwatch 1.2.4 so we can split the config
+ # into multiple config files in /etc/check_mk/logwatch.d
+ # see https://leap.se/code/issues/5135
+
+ file { '/usr/lib/check_mk_agent/plugins/mk_logwatch':
+ source => 'puppet:///modules/site_check_mk/agent/plugins/mk_logwatch.1.2.4',
+ mode => '0755',
+ require => Package['check-mk-agent-logwatch']
+ }
+
+ # only config files that watch a distinct logfile should go in logwatch.d/
+ file { '/etc/check_mk/logwatch.d':
+ ensure => directory,
+ recurse => true,
+ purge => true,
+ require => Package['check-mk-agent-logwatch']
+ }
+
+ # service that share a common logfile (i.e. /var/log/syslog) need to get
+ # concanated in one file, otherwise the last file sourced will override
+ # the config before
+ # see mk_logwatch: "logwatch.cfg overwrites config files in logwatch.d",
+ # https://leap.se/code/issues/5155
+
+ # first, we need to deploy a custom logwatch.cfg that doesn't include
+ # a section about /var/log/syslog
+
+ file { '/etc/check_mk/logwatch.cfg':
+ source => 'puppet:///modules/site_check_mk/agent/logwatch/logwatch.cfg',
+ require => Package['check_mk-agent-logwatch']
+ }
+
+ include concat::setup
+ include site_check_mk::agent::logwatch::syslog
+}
diff --git a/puppet/modules/site_check_mk/manifests/agent/logwatch/syslog.pp b/puppet/modules/site_check_mk/manifests/agent/logwatch/syslog.pp
new file mode 100644
index 00000000..c927780d
--- /dev/null
+++ b/puppet/modules/site_check_mk/manifests/agent/logwatch/syslog.pp
@@ -0,0 +1,18 @@
+class site_check_mk::agent::logwatch::syslog {
+
+ concat { '/etc/check_mk/logwatch.d/syslog.cfg':
+ warn => true
+ }
+
+ concat::fragment { 'syslog_header':
+ source => 'puppet:///modules/site_check_mk/agent/logwatch/syslog_header.cfg',
+ target => '/etc/check_mk/logwatch.d/syslog.cfg',
+ order => '01';
+ }
+ concat::fragment { 'syslog_tail':
+ source => 'puppet:///modules/site_check_mk/agent/logwatch/syslog_tail.cfg',
+ target => '/etc/check_mk/logwatch.d/syslog.cfg',
+ order => '99';
+ }
+
+}
diff --git a/puppet/modules/site_check_mk/manifests/agent/mrpe.pp b/puppet/modules/site_check_mk/manifests/agent/mrpe.pp
new file mode 100644
index 00000000..5e1f087a
--- /dev/null
+++ b/puppet/modules/site_check_mk/manifests/agent/mrpe.pp
@@ -0,0 +1,24 @@
+class site_check_mk::agent::mrpe {
+ # check_mk can use standard nagios plugins using
+ # a wrapper called mrpe
+ # see http://mathias-kettner.de/checkmk_mrpe.html
+
+ package { 'nagios-plugins-basic':
+ ensure => latest,
+ }
+
+ file { '/etc/check_mk/mrpe.cfg':
+ ensure => present,
+ require => Package['check-mk-agent']
+ } ->
+
+ augeas {
+ 'Apt':
+ incl => '/etc/check_mk/mrpe.cfg',
+ lens => 'Spacevars.lns',
+ changes => [
+ 'rm /files/etc/check_mk/mrpe.cfg/APT',
+ 'set APT \'/usr/lib/nagios/plugins/check_apt\'' ];
+ }
+
+}
diff --git a/puppet/modules/site_check_mk/manifests/agent/mx.pp b/puppet/modules/site_check_mk/manifests/agent/mx.pp
new file mode 100644
index 00000000..20cbcade
--- /dev/null
+++ b/puppet/modules/site_check_mk/manifests/agent/mx.pp
@@ -0,0 +1,27 @@
+# check check_mk agent checks for mx service
+class site_check_mk::agent::mx {
+
+ # watch logs
+ file { '/etc/check_mk/logwatch.d/leap_mx.cfg':
+ source => 'puppet:///modules/site_check_mk/agent/logwatch/leap_mx.cfg',
+ }
+
+ # local nagios plugin checks via mrpe
+ # removed because leap_cli integrates a check for running mx procs already,
+ # which is also integrated into nagios (called "Mx/Are_MX_daemons_running")
+ augeas {
+ 'Leap_MX_Procs':
+ incl => '/etc/check_mk/mrpe.cfg',
+ lens => 'Spacevars.lns',
+ changes => 'rm /files/etc/check_mk/mrpe.cfg/Leap_MX_Procs',
+ require => File['/etc/check_mk/mrpe.cfg'];
+ }
+
+ # check stale files in queue dir
+ file { '/usr/lib/check_mk_agent/local/check_leap_mx.sh':
+ source => 'puppet:///modules/site_check_mk/agent/local_checks/mx/check_leap_mx.sh',
+ mode => '0755',
+ require => Package['check_mk-agent']
+ }
+
+}
diff --git a/puppet/modules/site_check_mk/manifests/agent/openvpn.pp b/puppet/modules/site_check_mk/manifests/agent/openvpn.pp
new file mode 100644
index 00000000..0596a497
--- /dev/null
+++ b/puppet/modules/site_check_mk/manifests/agent/openvpn.pp
@@ -0,0 +1,10 @@
+class site_check_mk::agent::openvpn {
+
+ # check syslog
+ concat::fragment { 'syslog_openpvn':
+ source => 'puppet:///modules/site_check_mk/agent/logwatch/openvpn.cfg',
+ target => '/etc/check_mk/logwatch.d/syslog.cfg',
+ order => '02';
+ }
+
+}
diff --git a/puppet/modules/site_check_mk/manifests/agent/package/nagios_plugins_contrib.pp b/puppet/modules/site_check_mk/manifests/agent/package/nagios_plugins_contrib.pp
new file mode 100644
index 00000000..95a60d17
--- /dev/null
+++ b/puppet/modules/site_check_mk/manifests/agent/package/nagios_plugins_contrib.pp
@@ -0,0 +1,5 @@
+class site_check_mk::agent::package::nagios_plugins_contrib {
+ package { 'nagios-plugins-contrib':
+ ensure => installed,
+ }
+}
diff --git a/puppet/modules/site_check_mk/manifests/agent/package/perl_plugin.pp b/puppet/modules/site_check_mk/manifests/agent/package/perl_plugin.pp
new file mode 100644
index 00000000..4feda375
--- /dev/null
+++ b/puppet/modules/site_check_mk/manifests/agent/package/perl_plugin.pp
@@ -0,0 +1,5 @@
+class site_check_mk::agent::package::perl_plugin {
+ package { 'libnagios-plugin-perl':
+ ensure => installed,
+ }
+}
diff --git a/puppet/modules/site_check_mk/manifests/agent/soledad.pp b/puppet/modules/site_check_mk/manifests/agent/soledad.pp
new file mode 100644
index 00000000..f4a3f3a6
--- /dev/null
+++ b/puppet/modules/site_check_mk/manifests/agent/soledad.pp
@@ -0,0 +1,17 @@
+class site_check_mk::agent::soledad {
+
+ file { '/etc/check_mk/logwatch.d/soledad.cfg':
+ source => 'puppet:///modules/site_check_mk/agent/logwatch/soledad.cfg',
+ }
+
+ # local nagios plugin checks via mrpe
+
+ augeas { 'Soledad_Procs':
+ incl => '/etc/check_mk/mrpe.cfg',
+ lens => 'Spacevars.lns',
+ changes => [
+ 'rm /files/etc/check_mk/mrpe.cfg/Soledad_Procs',
+ 'set Soledad_Procs \'/usr/lib/nagios/plugins/check_procs -w 1:1 -c 1:1 -a "/usr/bin/python /usr/bin/twistd --uid=soledad --gid=soledad --pidfile=/var/run/soledad.pid --logfile=/var/log/soledad.log web --wsgi=leap.soledad.server.application --port=ssl:2323:privateKey=/etc/x509/keys/leap.key:certKey=/etc/x509/certs/leap.crt:sslmethod=SSLv23_METHOD"\'' ],
+ require => File['/etc/check_mk/mrpe.cfg'];
+ }
+}
diff --git a/puppet/modules/site_check_mk/manifests/agent/stunnel.pp b/puppet/modules/site_check_mk/manifests/agent/stunnel.pp
new file mode 100644
index 00000000..7f765771
--- /dev/null
+++ b/puppet/modules/site_check_mk/manifests/agent/stunnel.pp
@@ -0,0 +1,9 @@
+class site_check_mk::agent::stunnel {
+
+ concat::fragment { 'syslog_stunnel':
+ source => 'puppet:///modules/site_check_mk/agent/logwatch/stunnel.cfg',
+ target => '/etc/check_mk/logwatch.d/syslog.cfg',
+ order => '02';
+ }
+
+}
diff --git a/puppet/modules/site_check_mk/manifests/agent/webapp.pp b/puppet/modules/site_check_mk/manifests/agent/webapp.pp
new file mode 100644
index 00000000..9bf3b197
--- /dev/null
+++ b/puppet/modules/site_check_mk/manifests/agent/webapp.pp
@@ -0,0 +1,15 @@
+class site_check_mk::agent::webapp {
+
+ # remove leftovers of webapp python checks
+ file {
+ [ '/usr/lib/check_mk_agent/local/nagios-webapp_login.py',
+ '/usr/lib/check_mk_agent/local/soledad_sync.py' ]:
+ ensure => absent
+ }
+
+ # watch logs
+ file { '/etc/check_mk/logwatch.d/webapp.cfg':
+ source => 'puppet:///modules/site_check_mk/agent/logwatch/webapp.cfg',
+ }
+
+}
diff --git a/puppet/modules/site_check_mk/manifests/server.pp b/puppet/modules/site_check_mk/manifests/server.pp
new file mode 100644
index 00000000..7ff9eb4a
--- /dev/null
+++ b/puppet/modules/site_check_mk/manifests/server.pp
@@ -0,0 +1,103 @@
+# setup check_mk on the monitoring server
+class site_check_mk::server {
+
+ $ssh_hash = hiera('ssh')
+ $pubkey = $ssh_hash['authorized_keys']['monitor']['key']
+ $type = $ssh_hash['authorized_keys']['monitor']['type']
+ $seckey = $ssh_hash['monitor']['private_key']
+
+ $nagios_hiera = hiera_hash('nagios')
+ $hosts = $nagios_hiera['hosts']
+
+ $all_hosts = inline_template ('<% @hosts.keys.sort.each do |key| -%><% if @hosts[key]["environment"] != "disabled" %>"<%= @hosts[key]["domain_internal"] %>", <% end -%><% end -%>')
+ $domains_internal = $nagios_hiera['domains_internal']
+ $environments = $nagios_hiera['environments']
+
+ package { 'check-mk-server':
+ ensure => installed,
+ }
+
+ # we don't use check-mk-multisite, and the jessie version
+ # of this config file breaks with apache 2.4
+ # until https://gitlab.com/shared-puppet-modules-group/apache/issues/11
+ # is not fixed, we need to use a generic file type here
+ #apache::config::global { 'check-mk-multisite.conf':
+ # ensure => absent
+ #}
+
+ file { '/etc/apache2/conf-enabled/check-mk-multisite.conf':
+ ensure => absent,
+ require => Package['check-mk-server'];
+ }
+
+ # override paths to use the system check_mk rather than OMD
+ class { 'check_mk::config':
+ site => '',
+ etc_dir => '/etc',
+ nagios_subdir => 'nagios3',
+ bin_dir => '/usr/bin',
+ host_groups => undef,
+ use_storedconfigs => false,
+ inventory_only_on_changes => false,
+ require => Package['check-mk-server']
+ }
+
+ Exec['check_mk-refresh'] ->
+ Exec['check_mk-refresh-inventory-daily'] ->
+ Exec['check_mk-reload'] ->
+ Service['nagios']
+
+ file {
+ '/etc/check_mk/conf.d/use_ssh.mk':
+ content => template('site_check_mk/use_ssh.mk'),
+ notify => Exec['check_mk-refresh'],
+ require => Package['check-mk-server'];
+ '/etc/check_mk/conf.d/hostgroups.mk':
+ content => template('site_check_mk/hostgroups.mk'),
+ notify => Exec['check_mk-refresh'],
+ require => Package['check-mk-server'];
+ '/etc/check_mk/conf.d/host_contactgroups.mk':
+ content => template('site_check_mk/host_contactgroups.mk'),
+ notify => Exec['check_mk-refresh'],
+ require => Package['check-mk-server'];
+ '/etc/check_mk/conf.d/ignored_services.mk':
+ source => 'puppet:///modules/site_check_mk/ignored_services.mk',
+ notify => Exec['check_mk-refresh'],
+ require => Package['check-mk-server'];
+ '/etc/check_mk/conf.d/extra_service_conf.mk':
+ source => 'puppet:///modules/site_check_mk/extra_service_conf.mk',
+ notify => Exec['check_mk-refresh'],
+ require => Package['check-mk-server'];
+ '/etc/check_mk/conf.d/extra_host_conf.mk':
+ content => template('site_check_mk/extra_host_conf.mk'),
+ notify => Exec['check_mk-refresh'],
+ require => Package['check-mk-server'];
+
+ '/etc/check_mk/all_hosts_static':
+ content => $all_hosts,
+ notify => Exec['check_mk-refresh'],
+ require => Package['check-mk-server'];
+
+ '/etc/check_mk/.ssh':
+ ensure => directory,
+ require => Package['check-mk-server'];
+ '/etc/check_mk/.ssh/id_rsa':
+ content => $seckey,
+ owner => 'nagios',
+ mode => '0600',
+ require => Package['check-mk-server'];
+ '/etc/check_mk/.ssh/id_rsa.pub':
+ content => "${type} ${pubkey} monitor",
+ owner => 'nagios',
+ mode => '0644',
+ require => Package['check-mk-server'];
+
+ # check_icmp must be suid root or called by sudo
+ # see https://leap.se/code/issues/5171
+ '/usr/lib/nagios/plugins/check_icmp':
+ mode => '4755',
+ require => Package['nagios-plugins-basic'];
+ }
+
+ include check_mk::agent::local_checks
+}
diff --git a/puppet/modules/site_check_mk/templates/extra_host_conf.mk b/puppet/modules/site_check_mk/templates/extra_host_conf.mk
new file mode 100644
index 00000000..bc27b514
--- /dev/null
+++ b/puppet/modules/site_check_mk/templates/extra_host_conf.mk
@@ -0,0 +1,13 @@
+# retry 3 times before setting a host into a hard state
+# and send out notification
+extra_host_conf["max_check_attempts"] = [
+ ("4", ALL_HOSTS )
+]
+
+# Use hostnames as alias so notification mail subjects
+# are more readable and not so long. Alias defaults to
+# the fqdn of a host is not changed.
+extra_host_conf["alias"] = [
+<% @hosts.keys.sort.each do |key| -%> ( "<%= key.strip %>", ["<%= @hosts[key]['domain_internal']%>"]),
+<% end -%>
+]
diff --git a/puppet/modules/site_check_mk/templates/host_contactgroups.mk b/puppet/modules/site_check_mk/templates/host_contactgroups.mk
new file mode 100644
index 00000000..6a534967
--- /dev/null
+++ b/puppet/modules/site_check_mk/templates/host_contactgroups.mk
@@ -0,0 +1,17 @@
+<%
+ contact_groups = []
+ @environments.keys.sort.each do |env_name|
+ hosts = ""
+ @nagios_hosts.keys.sort.each do |hostname|
+ hostdata = @nagios_hosts[hostname]
+ domain_internal = hostdata['domain_internal']
+ if hostdata['environment'] == env_name
+ hosts << '"' + domain_internal + '", '
+ end
+ end
+ contact_groups << ' ( "%s", [%s] )' % [env_name, hosts]
+ end
+%>
+host_contactgroups = [
+<%= contact_groups.join(",\n") %>
+]
diff --git a/puppet/modules/site_check_mk/templates/hostgroups.mk b/puppet/modules/site_check_mk/templates/hostgroups.mk
new file mode 100644
index 00000000..7158dcd1
--- /dev/null
+++ b/puppet/modules/site_check_mk/templates/hostgroups.mk
@@ -0,0 +1,17 @@
+<%
+ host_groups = []
+ @environments.keys.sort.each do |env_name|
+ hosts = ""
+ @nagios_hosts.keys.sort.each do |hostname|
+ hostdata = @nagios_hosts[hostname]
+ domain_internal = hostdata['domain_internal']
+ if hostdata['environment'] == env_name
+ hosts << '"' + domain_internal + '", '
+ end
+ end
+ host_groups << ' ( "%s", [%s] )' % [env_name, hosts]
+ end
+%>
+host_groups = [
+<%= host_groups.join(",\n") %>
+]
diff --git a/puppet/modules/site_check_mk/templates/use_ssh.mk b/puppet/modules/site_check_mk/templates/use_ssh.mk
new file mode 100644
index 00000000..55269536
--- /dev/null
+++ b/puppet/modules/site_check_mk/templates/use_ssh.mk
@@ -0,0 +1,6 @@
+# http://mathias-kettner.de/checkmk_datasource_programs.html
+datasource_programs = [
+<% @nagios_hosts.sort.each do |name,config| %>
+ ( "ssh -l root -i /etc/check_mk/.ssh/id_rsa -p <%=config['ssh_port']%> <%=config['domain_internal']%> check_mk_agent", [ "<%=config['domain_internal']%>" ], ),<%- end -%>
+
+]
diff --git a/puppet/modules/site_config/files/xterm-title.sh b/puppet/modules/site_config/files/xterm-title.sh
new file mode 100644
index 00000000..3cff0e3a
--- /dev/null
+++ b/puppet/modules/site_config/files/xterm-title.sh
@@ -0,0 +1,8 @@
+# If this is an xterm set the title to user@host:dir
+case "$TERM" in
+xterm*|rxvt*)
+ PROMPT_COMMAND='echo -ne "\033]0;${USER}@${HOSTNAME}: ${PWD}\007"'
+ ;;
+*)
+ ;;
+esac
diff --git a/puppet/modules/site_config/lib/facter/dhcp_enabled.rb b/puppet/modules/site_config/lib/facter/dhcp_enabled.rb
new file mode 100644
index 00000000..33220da3
--- /dev/null
+++ b/puppet/modules/site_config/lib/facter/dhcp_enabled.rb
@@ -0,0 +1,22 @@
+require 'facter'
+def dhcp_enabled?(ifs, recurse=true)
+ dhcp = false
+ included_ifs = []
+ if FileTest.exists?(ifs)
+ File.open(ifs) do |file|
+ dhcp = file.enum_for(:each_line).any? do |line|
+ if recurse && line =~ /^\s*source\s+([^\s]+)/
+ included_ifs += Dir.glob($1)
+ end
+ line =~ /inet\s+dhcp/
+ end
+ end
+ end
+ dhcp || included_ifs.any? { |ifs| dhcp_enabled?(ifs, false) }
+end
+Facter.add(:dhcp_enabled) do
+ confine :osfamily => 'Debian'
+ setcode do
+ dhcp_enabled?('/etc/network/interfaces')
+ end
+end
diff --git a/puppet/modules/site_config/lib/facter/ip_interface.rb b/puppet/modules/site_config/lib/facter/ip_interface.rb
new file mode 100644
index 00000000..45764bfc
--- /dev/null
+++ b/puppet/modules/site_config/lib/facter/ip_interface.rb
@@ -0,0 +1,13 @@
+require 'facter/util/ip'
+
+Facter::Util::IP.get_interfaces.each do |interface|
+ ip = Facter.value("ipaddress_#{interface}")
+ if ip != nil
+ Facter.add("interface_" + ip ) do
+ setcode do
+ interface
+ end
+ end
+ end
+end
+
diff --git a/puppet/modules/site_config/manifests/caching_resolver.pp b/puppet/modules/site_config/manifests/caching_resolver.pp
new file mode 100644
index 00000000..8bf465c1
--- /dev/null
+++ b/puppet/modules/site_config/manifests/caching_resolver.pp
@@ -0,0 +1,27 @@
+# deploy local caching resolver
+class site_config::caching_resolver {
+ tag 'leap_base'
+
+ class { 'unbound':
+ root_hints => false,
+ anchor => false,
+ ssl => false,
+ settings => {
+ server => {
+ verbosity => '1',
+ interface => [ '127.0.0.1', '::1' ],
+ port => '53',
+ hide-identity => 'yes',
+ hide-version => 'yes',
+ harden-glue => 'yes',
+ access-control => [ '127.0.0.0/8 allow', '::1 allow' ]
+ }
+ }
+ }
+
+ concat::fragment { 'unbound glob include':
+ target => $unbound::params::config,
+ content => "include: /etc/unbound/unbound.conf.d/*.conf\n\n",
+ order => 10
+ }
+}
diff --git a/puppet/modules/site_config/manifests/default.pp b/puppet/modules/site_config/manifests/default.pp
new file mode 100644
index 00000000..256de1a1
--- /dev/null
+++ b/puppet/modules/site_config/manifests/default.pp
@@ -0,0 +1,71 @@
+# common things to set up on every node
+class site_config::default {
+ tag 'leap_base'
+
+ $services = hiera('services', [])
+ $domain_hash = hiera('domain')
+ include site_config::params
+ include site_config::setup
+
+ # default class, used by all hosts
+
+ include lsb, git
+
+ # configure sysctl parameters
+ include site_config::sysctl
+
+ # configure ssh and include ssh-keys
+ include site_sshd
+
+ # include classes for special environments
+ # i.e. openstack/aws nodes, vagrant nodes
+
+ # fix dhclient from changing resolver information
+ # facter returns 'true' as string
+ # lint:ignore:quoted_booleans
+ if $::dhcp_enabled == 'true' {
+ # lint:endignore
+ include site_config::dhclient
+ }
+
+ # configure /etc/resolv.conf
+ include site_config::resolvconf
+
+ # configure caching, local resolver
+ include site_config::caching_resolver
+
+ # install/configure syslog and core log rotations
+ include site_config::syslog
+
+ # provide a basic level of quality entropy
+ include haveged
+
+ # install/remove base packages
+ include site_config::packages
+
+ # include basic shorewall config
+ include site_shorewall::defaults
+
+ Package['git'] -> Vcsrepo<||>
+
+ # include basic shell config
+ include site_config::shell
+
+ # set up core leap files and directories
+ include site_config::files
+
+ # remove leftovers from previous deploys
+ include site_config::remove
+
+ if ! member($services, 'mx') {
+ include site_postfix::satellite
+ }
+
+ # if class custom exists, include it.
+ # possibility for users to define custom puppet recipes
+ if defined( '::custom') {
+ include ::custom
+ }
+
+ include site_check_mk::agent
+}
diff --git a/puppet/modules/site_config/manifests/dhclient.pp b/puppet/modules/site_config/manifests/dhclient.pp
new file mode 100644
index 00000000..a1f87d41
--- /dev/null
+++ b/puppet/modules/site_config/manifests/dhclient.pp
@@ -0,0 +1,40 @@
+# Unfortunately, there does not seem to be a way to reload the dhclient.conf
+# config file, or a convenient way to disable the modifications to
+# /etc/resolv.conf. So the following makes the functions involved noops and
+# ships a script to kill and restart dhclient. See the debian bugs:
+# #681698, #712796
+class site_config::dhclient {
+
+
+ include site_config::params
+
+ file { '/usr/local/sbin/reload_dhclient':
+ owner => 0,
+ group => 0,
+ mode => '0755',
+ content => template('site_config/reload_dhclient.erb');
+ }
+
+ exec { 'reload_dhclient':
+ refreshonly => true,
+ command => '/usr/local/sbin/reload_dhclient',
+ before => Class['site_config::resolvconf'],
+ require => File['/usr/local/sbin/reload_dhclient'],
+ }
+
+ file { '/etc/dhcp/dhclient-enter-hooks.d':
+ ensure => directory,
+ mode => '0755',
+ owner => 'root',
+ group => 'root',
+ }
+
+ file { '/etc/dhcp/dhclient-enter-hooks.d/disable_resolvconf':
+ content => 'make_resolv_conf() { : ; } ; set_hostname() { : ; }',
+ mode => '0644',
+ owner => 'root',
+ group => 'root',
+ require => File['/etc/dhcp/dhclient-enter-hooks.d'],
+ notify => Exec['reload_dhclient'];
+ }
+}
diff --git a/puppet/modules/site_config/manifests/files.pp b/puppet/modules/site_config/manifests/files.pp
new file mode 100644
index 00000000..d2ef8a98
--- /dev/null
+++ b/puppet/modules/site_config/manifests/files.pp
@@ -0,0 +1,24 @@
+# set up core leap files and directories
+class site_config::files {
+
+ file {
+ '/srv/leap':
+ ensure => directory,
+ owner => 'root',
+ group => 'root',
+ mode => '0711';
+
+ [ '/etc/leap', '/var/lib/leap']:
+ ensure => directory,
+ owner => 'root',
+ group => 'root',
+ mode => '0755';
+
+ '/var/log/leap':
+ ensure => directory,
+ owner => 'root',
+ group => 'adm',
+ mode => '0750';
+ }
+
+}
diff --git a/puppet/modules/site_config/manifests/hosts.pp b/puppet/modules/site_config/manifests/hosts.pp
new file mode 100644
index 00000000..878b6af0
--- /dev/null
+++ b/puppet/modules/site_config/manifests/hosts.pp
@@ -0,0 +1,44 @@
+class site_config::hosts() {
+ $hosts = hiera('hosts', false)
+
+ # calculate all the hostname aliases that might be used
+ $hostname = hiera('name')
+ $domain_hash = hiera('domain', {})
+ $dns = hiera('dns', {})
+ if $dns['aliases'] == undef {
+ $dns_aliases = []
+ } else {
+ $dns_aliases = $dns['aliases']
+ }
+ $my_hostnames = unique(concat(
+ [$domain_hash['full'], $hostname, $domain_hash['internal']], $dns_aliases
+ ))
+
+ file { '/etc/hostname':
+ ensure => present,
+ content => $hostname
+ }
+
+ exec { "/bin/hostname ${hostname}":
+ subscribe => [ File['/etc/hostname'], File['/etc/hosts'] ],
+ refreshonly => true;
+ }
+
+ # we depend on reliable hostnames from /etc/hosts for the stunnel services
+ # so restart stunnel service when /etc/hosts is modified
+ # because this is done in an early stage, the stunnel module may not
+ # have been deployed and will not be available for overriding, so
+ # this is handled in an unorthodox manner
+ exec { '/etc/init.d/stunnel4 restart':
+ subscribe => File['/etc/hosts'],
+ refreshonly => true,
+ onlyif => 'test -f /etc/init.d/stunnel4';
+ }
+
+ file { '/etc/hosts':
+ content => template('site_config/hosts'),
+ mode => '0644',
+ owner => root,
+ group => root;
+ }
+}
diff --git a/puppet/modules/site_config/manifests/initial_firewall.pp b/puppet/modules/site_config/manifests/initial_firewall.pp
new file mode 100644
index 00000000..93cfb847
--- /dev/null
+++ b/puppet/modules/site_config/manifests/initial_firewall.pp
@@ -0,0 +1,64 @@
+class site_config::initial_firewall {
+
+ # This class is intended to setup an initial firewall, before shorewall is
+ # configured. The purpose of this is for the rare case where shorewall fails
+ # to start, we should not expose services to the public.
+
+ $ssh_config = hiera('ssh')
+ $ssh_port = $ssh_config['port']
+
+ package { 'iptables':
+ ensure => present
+ }
+
+ file {
+ # This firewall enables ssh access, dns lookups and web lookups (for
+ # package installation) but otherwise restricts all outgoing and incoming
+ # ports
+ '/etc/network/ipv4firewall_up.rules':
+ content => template('site_config/ipv4firewall_up.rules.erb'),
+ owner => root,
+ group => 0,
+ mode => '0644';
+
+ # This firewall denys all ipv6 traffic - we will need to change this
+ # when we begin to support ipv6
+ '/etc/network/ipv6firewall_up.rules':
+ content => template('site_config/ipv6firewall_up.rules.erb'),
+ owner => root,
+ group => 0,
+ mode => '0644';
+
+ # Run the iptables-restore in if-pre-up so that the network is locked down
+ # until the correct interfaces and ips are connected
+ '/etc/network/if-pre-up.d/ipv4tables':
+ content => "#!/bin/sh\n/sbin/iptables-restore < /etc/network/ipv4firewall_up.rules\n",
+ owner => root,
+ group => 0,
+ mode => '0744';
+
+ # Same as above for IPv6
+ '/etc/network/if-pre-up.d/ipv6tables':
+ content => "#!/bin/sh\n/sbin/ip6tables-restore < /etc/network/ipv6firewall_up.rules\n",
+ owner => root,
+ group => 0,
+ mode => '0744';
+ }
+
+ # Immediately setup these firewall rules, but only if shorewall is not running
+ exec {
+ 'default_ipv4_firewall':
+ command => '/sbin/iptables-restore < /etc/network/ipv4firewall_up.rules',
+ logoutput => true,
+ unless => 'test -x /etc/init.d/shorewall && /etc/init.d/shorewall status',
+ subscribe => File['/etc/network/ipv4firewall_up.rules'],
+ require => File['/etc/network/ipv4firewall_up.rules'];
+
+ 'default_ipv6_firewall':
+ command => '/sbin/ip6tables-restore < /etc/network/ipv6firewall_up.rules',
+ logoutput => true,
+ unless => 'test -x /etc/init.d/shorewall6 && /etc/init.d/shorewall6 status',
+ subscribe => File['/etc/network/ipv6firewall_up.rules'],
+ require => File['/etc/network/ipv6firewall_up.rules'];
+ }
+}
diff --git a/puppet/modules/site_config/manifests/packages.pp b/puppet/modules/site_config/manifests/packages.pp
new file mode 100644
index 00000000..140189a4
--- /dev/null
+++ b/puppet/modules/site_config/manifests/packages.pp
@@ -0,0 +1,32 @@
+# install default packages and remove unwanted packages
+class site_config::packages {
+
+
+ # base set of packages that we want to have installed everywhere
+ package { [ 'etckeeper', 'screen', 'less', 'ntp' ]:
+ ensure => installed,
+ }
+
+ # base set of packages that we want to remove everywhere
+ package { [
+ 'acpi', 'build-essential',
+ 'cpp', 'cpp-4.6', 'cpp-4.7', 'cpp-4.8', 'cpp-4.9',
+ 'eject', 'ftp',
+ 'g++', 'g++-4.6', 'g++-4.7', 'g++-4.8', 'g++-4.9',
+ 'gcc', 'gcc-4.6', 'gcc-4.7', 'gcc-4.8', 'gcc-4.9',
+ 'laptop-detect', 'libc6-dev', 'libssl-dev', 'lpr', 'make',
+ 'pppconfig', 'pppoe', 'pump', 'qstat',
+ 'samba-common', 'samba-common-bin', 'smbclient',
+ 'tcl8.5', 'tk8.5', 'os-prober', 'unzip', 'xauth', 'x11-common',
+ 'x11-utils', 'xterm' ]:
+ ensure => purged;
+ }
+
+ # leave a few packages installed on local environments
+ # vagrant i.e. needs them for mounting shared folders
+ if $::site_config::params::environment != 'local' {
+ package { [ 'nfs-common', 'nfs-kernel-server', 'rpcbind', 'portmap' ]:
+ ensure => purged;
+ }
+ }
+}
diff --git a/puppet/modules/site_config/manifests/packages/build_essential.pp b/puppet/modules/site_config/manifests/packages/build_essential.pp
new file mode 100644
index 00000000..2b3e13b9
--- /dev/null
+++ b/puppet/modules/site_config/manifests/packages/build_essential.pp
@@ -0,0 +1,28 @@
+#
+# include this whenever you want to ensure build-essential package and related compilers are installed.
+#
+class site_config::packages::build_essential inherits ::site_config::packages {
+
+ # NICKSERVER CODE NOTE: in order to support TLS, libssl-dev must be installed
+ # before EventMachine gem is built/installed.
+ Package[ 'gcc', 'make', 'g++', 'cpp', 'libssl-dev', 'libc6-dev' ] {
+ ensure => present
+ }
+
+ case $::operatingsystemrelease {
+ /^8.*/: {
+ Package[ 'gcc-4.9','g++-4.9', 'cpp-4.9' ] {
+ ensure => present
+ }
+ }
+
+ /^7.*/: {
+ Package[ 'gcc-4.7','g++-4.7', 'cpp-4.7' ] {
+ ensure => present
+ }
+ }
+
+ default: { }
+ }
+
+}
diff --git a/puppet/modules/site_config/manifests/packages/gnutls.pp b/puppet/modules/site_config/manifests/packages/gnutls.pp
new file mode 100644
index 00000000..b1f17480
--- /dev/null
+++ b/puppet/modules/site_config/manifests/packages/gnutls.pp
@@ -0,0 +1,5 @@
+class site_config::packages::gnutls {
+
+ package { 'gnutls-bin': ensure => installed }
+
+}
diff --git a/puppet/modules/site_config/manifests/params.pp b/puppet/modules/site_config/manifests/params.pp
new file mode 100644
index 00000000..012b3ce0
--- /dev/null
+++ b/puppet/modules/site_config/manifests/params.pp
@@ -0,0 +1,35 @@
+class site_config::params {
+
+ $ip_address = hiera('ip_address')
+ $ip_address_interface = getvar("interface_${ip_address}")
+ $ec2_local_ipv4_interface = getvar("interface_${::ec2_local_ipv4}")
+ $environment = hiera('environment', undef)
+
+
+ if $environment == 'local' {
+ $interface = 'eth1'
+ include site_config::packages::build_essential
+ }
+ elsif hiera('interface','') != '' {
+ $interface = hiera('interface')
+ }
+ elsif $ip_address_interface != '' {
+ $interface = $ip_address_interface
+ }
+ elsif $ec2_local_ipv4_interface != '' {
+ $interface = $ec2_local_ipv4_interface
+ }
+ elsif $::interfaces =~ /eth0/ {
+ $interface = 'eth0'
+ }
+ else {
+ fail("unable to determine a valid interface, please set a valid interface for this node in nodes/${::hostname}.json")
+ }
+
+ $ca_name = 'leap_ca'
+ $client_ca_name = 'leap_client_ca'
+ $ca_bundle_name = 'leap_ca_bundle'
+ $cert_name = 'leap'
+ $commercial_ca_name = 'leap_commercial_ca'
+ $commercial_cert_name = 'leap_commercial'
+}
diff --git a/puppet/modules/site_config/manifests/remove.pp b/puppet/modules/site_config/manifests/remove.pp
new file mode 100644
index 00000000..443df9c2
--- /dev/null
+++ b/puppet/modules/site_config/manifests/remove.pp
@@ -0,0 +1,11 @@
+# remove leftovers from previous deploys
+class site_config::remove {
+ include site_config::remove::files
+
+ case $::operatingsystemrelease {
+ /^8.*/: {
+ include site_config::remove::jessie
+ }
+ default: { }
+ }
+}
diff --git a/puppet/modules/site_config/manifests/remove/bigcouch.pp b/puppet/modules/site_config/manifests/remove/bigcouch.pp
new file mode 100644
index 00000000..3535c3c1
--- /dev/null
+++ b/puppet/modules/site_config/manifests/remove/bigcouch.pp
@@ -0,0 +1,42 @@
+# remove bigcouch leftovers from previous installations
+class site_config::remove::bigcouch {
+
+ # Don't use check_mk logwatch to watch bigcouch logs anymore
+ # see https://leap.se/code/issues/7375 for more details
+ file { '/etc/check_mk/logwatch.d/bigcouch.cfg':
+ ensure => absent,
+ notify => [
+ Exec['remove_bigcouch_logwatch_stateline']
+ ]
+ }
+
+ exec { 'remove_bigcouch_logwatch_stateline':
+ command => "sed -i '/bigcouch.log/d' /etc/check_mk/logwatch.state",
+ refreshonly => true,
+ }
+
+ cron { 'compact_all_shards':
+ ensure => absent
+ }
+
+
+ exec { 'kill_bigcouch_stunnel_procs':
+ refreshonly => true,
+ command => '/usr/bin/pkill -f "/usr/bin/stunnel4 /etc/stunnel/(ednp|epmd)_server.conf"'
+ }
+
+ # 'tidy' doesn't notify other resources, so we need to use file here instead
+ # see https://tickets.puppetlabs.com/browse/PUP-6021
+ file {
+ [ '/etc/stunnel/ednp_server.conf', '/etc/stunnel/epmd_server.conf']:
+ ensure => absent,
+ # notifying Service[stunnel] doesn't work here because the config
+ # files contain the pid of the procs to stop/start.
+ # If we remove the config, and restart stunnel then it will only
+ # stop/start the procs for which config files are found and the stale
+ # service will continue to run.
+ # So we simply kill them.
+ notify => Exec['kill_bigcouch_stunnel_procs']
+ }
+
+}
diff --git a/puppet/modules/site_config/manifests/remove/files.pp b/puppet/modules/site_config/manifests/remove/files.pp
new file mode 100644
index 00000000..41d6462e
--- /dev/null
+++ b/puppet/modules/site_config/manifests/remove/files.pp
@@ -0,0 +1,56 @@
+#
+# Sometimes when we upgrade the platform, we need to ensure that files that
+# the platform previously created will get removed.
+#
+# These file removals don't need to be kept forever: we only need to remove
+# files that are present in the prior platform release.
+#
+# We can assume that the every node is upgraded from the previous platform
+# release.
+#
+
+class site_config::remove::files {
+
+ # Platform 0.8 removals
+ tidy {
+ '/etc/default/leap_mx':;
+ '/etc/logrotate.d/mx':;
+ '/etc/rsyslog.d/50-mx.conf':;
+ '/etc/apt/preferences.d/openvpn':;
+ '/etc/apt/sources.list.d/secondary.list.disabled.list':;
+ }
+
+ #
+ # Platform 0.7 removals
+ #
+
+ tidy {
+ '/etc/rsyslog.d/99-tapicero.conf':;
+ '/etc/rsyslog.d/01-webapp.conf':;
+ '/etc/rsyslog.d/50-stunnel.conf':;
+ '/etc/logrotate.d/stunnel':;
+ '/var/log/stunnel4/stunnel.log':;
+ 'leap_mx':
+ path => '/var/log/',
+ recurse => true,
+ matches => ['leap_mx*', 'mx.log.[1-5]', 'mx.log.[6-9](.gz)?',
+ 'mx.log.[0-9][0-9](.gz)?'];
+ '/srv/leap/webapp/public/provider.json':;
+ '/srv/leap/couchdb/designs/tmp_users':
+ recurse => true,
+ rmdirs => true;
+ '/etc/leap/soledad-server.conf':;
+ '/var/log/leap/openvpn.log':;
+ '/etc/rsyslog.d/50-openvpn.conf':;
+ }
+
+ # leax-mx logged to /var/log/leap_mx.log in the past
+ # we need to use a dumb exec here because file_line doesn't
+ # allow removing lines that match a regex in the current version
+ # of stdlib, see https://tickets.puppetlabs.com/browse/MODULES-1903
+ exec { 'rm_old_leap_mx_log_destination':
+ command => "/bin/sed -i '/leap_mx.log/d' /etc/check_mk/logwatch.state",
+ onlyif => "/bin/grep -qe 'leap_mx.log' /etc/check_mk/logwatch.state"
+ }
+
+}
diff --git a/puppet/modules/site_config/manifests/remove/jessie.pp b/puppet/modules/site_config/manifests/remove/jessie.pp
new file mode 100644
index 00000000..e9497baf
--- /dev/null
+++ b/puppet/modules/site_config/manifests/remove/jessie.pp
@@ -0,0 +1,14 @@
+# remove possible leftovers after upgrading from wheezy to jessie
+class site_config::remove::jessie {
+
+ tidy {
+ '/etc/apt/preferences.d/rsyslog_anon_depends':
+ notify => Exec['apt_updated'];
+ }
+
+ apt::preferences_snippet {
+ [ 'facter', 'obfsproxy', 'python-twisted', 'unbound' ]:
+ ensure => absent;
+ }
+
+}
diff --git a/puppet/modules/site_config/manifests/remove/monitoring.pp b/puppet/modules/site_config/manifests/remove/monitoring.pp
new file mode 100644
index 00000000..18e2949b
--- /dev/null
+++ b/puppet/modules/site_config/manifests/remove/monitoring.pp
@@ -0,0 +1,13 @@
+# remove leftovers on monitoring nodes
+class site_config::remove::monitoring {
+
+ # Remove check_mk loggwatch spoolfiles for
+ # tapicero and bigcouch
+ tidy {
+ 'remove_logwatch_spoolfiles':
+ path => '/var/lib/check_mk/logwatch',
+ recurse => true,
+ matches => [ '*tapicero.log', '*bigcouch.log'];
+ }
+
+}
diff --git a/puppet/modules/site_config/manifests/remove/tapicero.pp b/puppet/modules/site_config/manifests/remove/tapicero.pp
new file mode 100644
index 00000000..07c3c6c6
--- /dev/null
+++ b/puppet/modules/site_config/manifests/remove/tapicero.pp
@@ -0,0 +1,72 @@
+# remove tapicero leftovers from previous deploys on couchdb nodes
+class site_config::remove::tapicero {
+
+ ensure_packages('curl')
+
+ # remove tapicero couchdb user
+ $couchdb_config = hiera('couch')
+ $couchdb_mode = $couchdb_config['mode']
+
+ if $couchdb_mode == 'multimaster'
+ {
+ $port = 5986
+ } else {
+ $port = 5984
+ }
+
+ exec { 'remove_couchdb_user':
+ onlyif => "/usr/bin/curl -s 127.0.0.1:${port}/_users/org.couchdb.user:tapicero | grep -qv 'not_found'",
+ command => "/usr/local/bin/couch-doc-update --host 127.0.0.1:${port} --db _users --id org.couchdb.user:tapicero --delete",
+ require => Package['curl']
+ }
+
+
+ exec { 'kill_tapicero':
+ onlyif => '/usr/bin/test -s /var/run/tapicero.pid',
+ command => '/usr/bin/pkill --pidfile /var/run/tapicero.pid'
+ }
+
+ user { 'tapicero':
+ ensure => absent;
+ }
+
+ group { 'tapicero':
+ ensure => absent,
+ require => User['tapicero'];
+ }
+
+ tidy {
+ '/srv/leap/tapicero':
+ recurse => true,
+ require => [ Exec['kill_tapicero'] ];
+ '/var/lib/leap/tapicero':
+ require => [ Exec['kill_tapicero'] ];
+ '/var/run/tapicero':
+ require => [ Exec['kill_tapicero'] ];
+ '/etc/leap/tapicero.yaml':
+ require => [ Exec['kill_tapicero'] ];
+ '/etc/init.d/tapicero':
+ require => [ Exec['kill_tapicero'] ];
+ 'tapicero_logs':
+ path => '/var/log/leap',
+ recurse => true,
+ matches => 'tapicero*',
+ require => [ Exec['kill_tapicero'] ];
+ '/etc/check_mk/logwatch.d/tapicero.cfg':;
+ }
+
+ # remove local nagios plugin checks via mrpe
+ augeas {
+ 'Tapicero_Procs':
+ incl => '/etc/check_mk/mrpe.cfg',
+ lens => 'Spacevars.lns',
+ changes => 'rm /files/etc/check_mk/mrpe.cfg/Tapicero_Procs',
+ require => File['/etc/check_mk/mrpe.cfg'];
+ 'Tapicero_Heartbeat':
+ incl => '/etc/check_mk/mrpe.cfg',
+ lens => 'Spacevars.lns',
+ changes => 'rm Tapicero_Heartbeat',
+ require => File['/etc/check_mk/mrpe.cfg'];
+ }
+
+}
diff --git a/puppet/modules/site_config/manifests/remove/webapp.pp b/puppet/modules/site_config/manifests/remove/webapp.pp
new file mode 100644
index 00000000..58f59815
--- /dev/null
+++ b/puppet/modules/site_config/manifests/remove/webapp.pp
@@ -0,0 +1,7 @@
+# remove leftovers on webapp nodes
+class site_config::remove::webapp {
+ tidy {
+ '/etc/apache/sites-enabled/leap_webapp.conf':
+ notify => Service['apache'];
+ }
+}
diff --git a/puppet/modules/site_config/manifests/resolvconf.pp b/puppet/modules/site_config/manifests/resolvconf.pp
new file mode 100644
index 00000000..09f0b405
--- /dev/null
+++ b/puppet/modules/site_config/manifests/resolvconf.pp
@@ -0,0 +1,14 @@
+class site_config::resolvconf {
+
+ $domain_public = $site_config::default::domain_hash['full_suffix']
+
+ class { '::resolvconf':
+ domain => $domain_public,
+ search => $domain_public,
+ nameservers => [
+ '127.0.0.1 # local caching-only, unbound',
+ '85.214.20.141 # Digitalcourage, a german privacy organisation: (https://en.wikipedia.org/wiki/Digitalcourage)',
+ '172.81.176.146 # OpenNIC (https://servers.opennicproject.org/edit.php?srv=ns1.tor.ca.dns.opennic.glue)'
+ ]
+ }
+}
diff --git a/puppet/modules/site_config/manifests/ruby.pp b/puppet/modules/site_config/manifests/ruby.pp
new file mode 100644
index 00000000..5c13233d
--- /dev/null
+++ b/puppet/modules/site_config/manifests/ruby.pp
@@ -0,0 +1,8 @@
+# install ruby, rubygems and bundler
+# configure ruby settings common to all servers
+class site_config::ruby {
+ Class[Ruby] -> Class[rubygems] -> Class[bundler::install]
+ class { '::ruby': }
+ class { 'bundler::install': install_method => 'package' }
+ include rubygems
+}
diff --git a/puppet/modules/site_config/manifests/ruby/dev.pp b/puppet/modules/site_config/manifests/ruby/dev.pp
new file mode 100644
index 00000000..2b0b106d
--- /dev/null
+++ b/puppet/modules/site_config/manifests/ruby/dev.pp
@@ -0,0 +1,8 @@
+# install ruby dev packages needed for building some gems
+class site_config::ruby::dev {
+ include site_config::ruby
+ include ::ruby::devel
+
+ # building gems locally probably requires build-essential and gcc:
+ include site_config::packages::build_essential
+}
diff --git a/puppet/modules/site_config/manifests/setup.pp b/puppet/modules/site_config/manifests/setup.pp
new file mode 100644
index 00000000..82dfe76d
--- /dev/null
+++ b/puppet/modules/site_config/manifests/setup.pp
@@ -0,0 +1,50 @@
+# common things to set up on every node
+# leftover from the past, where we did two puppetruns
+# after another. We should consolidate this into site_config::default
+# in the future.
+class site_config::setup {
+ tag 'leap_base'
+
+ #
+ # this is applied before each run of site.pp
+ #
+
+ Exec { path => '/usr/bin:/usr/sbin/:/bin:/sbin:/usr/local/bin:/usr/local/sbin' }
+
+ include site_config::params
+
+ include concat::setup
+ include stdlib
+
+ # configure /etc/hosts
+ class { 'site_config::hosts': }
+
+ include site_config::initial_firewall
+
+ include site_apt
+
+ package { 'facter':
+ ensure => latest
+ }
+
+ # if squid_deb_proxy_client is set to true, install and configure
+ # squid_deb_proxy_client for apt caching
+ if hiera('squid_deb_proxy_client', false) {
+ include site_squid_deb_proxy::client
+ }
+
+ # shorewall is installed/half-configured during setup.pp (Bug #3871)
+ # we need to include shorewall::interface{eth0} in setup.pp so
+ # packages can be installed during main puppetrun, even before shorewall
+ # is configured completly
+ if ( $::site_config::params::environment == 'local' ) {
+ include site_config::vagrant
+ }
+
+ # if class site_custom::setup exists, include it.
+ # possibility for users to define custom puppet recipes
+ if defined( '::site_custom::setup') {
+ include ::site_custom::setup
+ }
+
+}
diff --git a/puppet/modules/site_config/manifests/shell.pp b/puppet/modules/site_config/manifests/shell.pp
new file mode 100644
index 00000000..5b8c025d
--- /dev/null
+++ b/puppet/modules/site_config/manifests/shell.pp
@@ -0,0 +1,22 @@
+class site_config::shell {
+
+ file {
+ '/etc/profile.d/leap_path.sh':
+ content => 'PATH=$PATH:/srv/leap/bin',
+ mode => '0644',
+ owner => root,
+ group => root;
+ }
+
+ ##
+ ## XTERM TITLE
+ ##
+
+ file { '/etc/profile.d/xterm-title.sh':
+ source => 'puppet:///modules/site_config/xterm-title.sh',
+ owner => root,
+ group => 0,
+ mode => '0644';
+ }
+
+}
diff --git a/puppet/modules/site_config/manifests/slow.pp b/puppet/modules/site_config/manifests/slow.pp
new file mode 100644
index 00000000..8e9b7035
--- /dev/null
+++ b/puppet/modules/site_config/manifests/slow.pp
@@ -0,0 +1,10 @@
+# this class is run by default, but can be excluded
+# for testing purposes by calling "leap deploy" with
+# the "--fast" parameter
+class site_config::slow {
+ tag 'leap_slow'
+
+ include site_config::default
+ include apt::update
+ class { 'site_apt::dist_upgrade': }
+}
diff --git a/puppet/modules/site_config/manifests/sysctl.pp b/puppet/modules/site_config/manifests/sysctl.pp
new file mode 100644
index 00000000..99f75123
--- /dev/null
+++ b/puppet/modules/site_config/manifests/sysctl.pp
@@ -0,0 +1,8 @@
+class site_config::sysctl {
+
+ sysctl::config {
+ 'net.ipv4.ip_nonlocal_bind':
+ value => 1,
+ comment => 'Allow applications to bind to an address when link is down (see https://leap.se/code/issues/4506)'
+ }
+}
diff --git a/puppet/modules/site_config/manifests/syslog.pp b/puppet/modules/site_config/manifests/syslog.pp
new file mode 100644
index 00000000..591e0601
--- /dev/null
+++ b/puppet/modules/site_config/manifests/syslog.pp
@@ -0,0 +1,62 @@
+# configure rsyslog on all nodes
+class site_config::syslog {
+
+ # only pin rsyslog packages to backports on wheezy
+ case $::operatingsystemrelease {
+ /^7.*/: {
+ include ::site_apt::preferences::rsyslog
+ }
+ # on jessie+ systems, systemd and journald are enabled,
+ # and journald logs IP addresses, so we need to disable
+ # it until a solution is found, (#7863):
+ # https://github.com/systemd/systemd/issues/2447
+ default: {
+ include ::journald
+ augeas {
+ 'disable_journald':
+ incl => '/etc/systemd/journald.conf',
+ lens => 'Puppet.lns',
+ changes => 'set /files/etc/systemd/journald.conf/Journal/Storage \'none\'',
+ notify => Service['systemd-journald'];
+ }
+ }
+ }
+
+ class { '::rsyslog::client':
+ log_remote => false,
+ log_local => true,
+ custom_config => 'site_rsyslog/client.conf.erb'
+ }
+
+ rsyslog::snippet { '00-anonymize_logs':
+ content => '$ModLoad mmanon
+action(type="mmanon" ipv4.bits="32" mode="rewrite")'
+ }
+
+ augeas {
+ 'logrotate_leap_deploy':
+ context => '/files/etc/logrotate.d/leap_deploy/rule',
+ changes => [
+ 'set file /var/log/leap/deploy.log',
+ 'set rotate 5',
+ 'set size 1M',
+ 'set compress compress',
+ 'set missingok missingok',
+ 'set copytruncate copytruncate' ];
+
+ # NOTE:
+ # the puppet_command script requires the option delaycompress
+ # be set on the summary log file.
+
+ 'logrotate_leap_deploy_summary':
+ context => '/files/etc/logrotate.d/leap_deploy_summary/rule',
+ changes => [
+ 'set file /var/log/leap/deploy-summary.log',
+ 'set rotate 5',
+ 'set size 100k',
+ 'set delaycompress delaycompress',
+ 'set compress compress',
+ 'set missingok missingok',
+ 'set copytruncate copytruncate' ]
+ }
+}
diff --git a/puppet/modules/site_config/manifests/vagrant.pp b/puppet/modules/site_config/manifests/vagrant.pp
new file mode 100644
index 00000000..8f50b305
--- /dev/null
+++ b/puppet/modules/site_config/manifests/vagrant.pp
@@ -0,0 +1,11 @@
+class site_config::vagrant {
+ # class for vagrant nodes
+
+ include site_shorewall::defaults
+ # eth0 on vagrant nodes is the uplink if
+ shorewall::interface { 'eth0':
+ zone => 'net',
+ options => 'tcpflags,blacklist,nosmurfs';
+ }
+
+}
diff --git a/puppet/modules/site_config/manifests/x509/ca.pp b/puppet/modules/site_config/manifests/x509/ca.pp
new file mode 100644
index 00000000..2880ecaf
--- /dev/null
+++ b/puppet/modules/site_config/manifests/x509/ca.pp
@@ -0,0 +1,11 @@
+class site_config::x509::ca {
+
+ include ::site_config::params
+
+ $x509 = hiera('x509')
+ $ca = $x509['ca_cert']
+
+ x509::ca { $site_config::params::ca_name:
+ content => $ca
+ }
+}
diff --git a/puppet/modules/site_config/manifests/x509/ca_bundle.pp b/puppet/modules/site_config/manifests/x509/ca_bundle.pp
new file mode 100644
index 00000000..5808e29e
--- /dev/null
+++ b/puppet/modules/site_config/manifests/x509/ca_bundle.pp
@@ -0,0 +1,17 @@
+class site_config::x509::ca_bundle {
+
+ # CA bundle -- we want to have the possibility of allowing multiple CAs.
+ # For now, the reason is to transition to using client CA. In the future,
+ # we will want to be able to smoothly phase out one CA and phase in another.
+ # I tried "--capath" for this, but it did not work.
+
+ include ::site_config::params
+
+ $x509 = hiera('x509')
+ $ca = $x509['ca_cert']
+ $client_ca = $x509['client_ca_cert']
+
+ x509::ca { $site_config::params::ca_bundle_name:
+ content => "${ca}${client_ca}"
+ }
+}
diff --git a/puppet/modules/site_config/manifests/x509/cert.pp b/puppet/modules/site_config/manifests/x509/cert.pp
new file mode 100644
index 00000000..7e5a36b9
--- /dev/null
+++ b/puppet/modules/site_config/manifests/x509/cert.pp
@@ -0,0 +1,12 @@
+class site_config::x509::cert {
+
+ include ::site_config::params
+
+ $x509 = hiera('x509')
+ $cert = $x509['cert']
+
+ x509::cert { $site_config::params::cert_name:
+ content => $cert
+ }
+
+}
diff --git a/puppet/modules/site_config/manifests/x509/client_ca/ca.pp b/puppet/modules/site_config/manifests/x509/client_ca/ca.pp
new file mode 100644
index 00000000..3fbafa98
--- /dev/null
+++ b/puppet/modules/site_config/manifests/x509/client_ca/ca.pp
@@ -0,0 +1,16 @@
+class site_config::x509::client_ca::ca {
+
+ ##
+ ## This is for the special CA that is used exclusively for generating
+ ## client certificates by the webapp.
+ ##
+
+ include ::site_config::params
+
+ $x509 = hiera('x509')
+ $cert = $x509['client_ca_cert']
+
+ x509::ca { $site_config::params::client_ca_name:
+ content => $cert
+ }
+}
diff --git a/puppet/modules/site_config/manifests/x509/client_ca/key.pp b/puppet/modules/site_config/manifests/x509/client_ca/key.pp
new file mode 100644
index 00000000..0b537e76
--- /dev/null
+++ b/puppet/modules/site_config/manifests/x509/client_ca/key.pp
@@ -0,0 +1,16 @@
+class site_config::x509::client_ca::key {
+
+ ##
+ ## This is for the special CA that is used exclusively for generating
+ ## client certificates by the webapp.
+ ##
+
+ include ::site_config::params
+
+ $x509 = hiera('x509')
+ $key = $x509['client_ca_key']
+
+ x509::key { $site_config::params::client_ca_name:
+ content => $key
+ }
+}
diff --git a/puppet/modules/site_config/manifests/x509/commercial/ca.pp b/puppet/modules/site_config/manifests/x509/commercial/ca.pp
new file mode 100644
index 00000000..c76a9dbb
--- /dev/null
+++ b/puppet/modules/site_config/manifests/x509/commercial/ca.pp
@@ -0,0 +1,11 @@
+class site_config::x509::commercial::ca {
+
+ include ::site_config::params
+
+ $x509 = hiera('x509')
+ $ca = $x509['commercial_ca_cert']
+
+ x509::ca { $site_config::params::commercial_ca_name:
+ content => $ca
+ }
+}
diff --git a/puppet/modules/site_config/manifests/x509/commercial/cert.pp b/puppet/modules/site_config/manifests/x509/commercial/cert.pp
new file mode 100644
index 00000000..9dd6ffcd
--- /dev/null
+++ b/puppet/modules/site_config/manifests/x509/commercial/cert.pp
@@ -0,0 +1,15 @@
+class site_config::x509::commercial::cert {
+
+ include ::site_config::params
+
+ $x509 = hiera('x509')
+ $cert = $x509['commercial_cert']
+ $ca = $x509['commercial_ca_cert']
+
+ $cafile = "${cert}\n${ca}"
+
+ x509::cert { $site_config::params::commercial_cert_name:
+ content => $cafile
+ }
+
+}
diff --git a/puppet/modules/site_config/manifests/x509/commercial/key.pp b/puppet/modules/site_config/manifests/x509/commercial/key.pp
new file mode 100644
index 00000000..2be439fd
--- /dev/null
+++ b/puppet/modules/site_config/manifests/x509/commercial/key.pp
@@ -0,0 +1,11 @@
+class site_config::x509::commercial::key {
+
+ include ::site_config::params
+
+ $x509 = hiera('x509')
+ $key = $x509['commercial_key']
+
+ x509::key { $site_config::params::commercial_cert_name:
+ content => $key
+ }
+}
diff --git a/puppet/modules/site_config/manifests/x509/key.pp b/puppet/modules/site_config/manifests/x509/key.pp
new file mode 100644
index 00000000..448dc6a6
--- /dev/null
+++ b/puppet/modules/site_config/manifests/x509/key.pp
@@ -0,0 +1,11 @@
+class site_config::x509::key {
+
+ include ::site_config::params
+
+ $x509 = hiera('x509')
+ $key = $x509['key']
+
+ x509::key { $site_config::params::cert_name:
+ content => $key
+ }
+}
diff --git a/puppet/modules/site_config/templates/hosts b/puppet/modules/site_config/templates/hosts
new file mode 100644
index 00000000..d62cbc3f
--- /dev/null
+++ b/puppet/modules/site_config/templates/hosts
@@ -0,0 +1,19 @@
+# This file is managed by puppet, any changes will be overwritten!
+
+127.0.0.1 localhost
+127.0.1.1 <%= @my_hostnames.join(' ') %>
+
+<%- if @hosts then -%>
+<% @hosts.keys.sort.each do |name| -%>
+<%- props = @hosts[name] -%>
+<%- aliases = props["aliases"] ? props["aliases"].join(' ') : nil -%>
+<%= [props["ip_address"], props["domain_full"], props["domain_internal"], aliases, name].compact.uniq.join(' ') %>
+<% end -%>
+<% end -%>
+
+# The following lines are desirable for IPv6 capable hosts
+::1 ip6-localhost ip6-loopback
+fe00::0 ip6-localnet
+ff00::0 ip6-mcastprefix
+ff02::1 ip6-allnodes
+ff02::2 ip6-allrouters
diff --git a/puppet/modules/site_config/templates/ipv4firewall_up.rules.erb b/puppet/modules/site_config/templates/ipv4firewall_up.rules.erb
new file mode 100644
index 00000000..b0c2b7ad
--- /dev/null
+++ b/puppet/modules/site_config/templates/ipv4firewall_up.rules.erb
@@ -0,0 +1,14 @@
+# Generated by iptables-save v1.4.14 on Tue Aug 20 14:40:40 2013
+*filter
+:INPUT DROP [0:0]
+:FORWARD DROP [0:0]
+:OUTPUT ACCEPT [0:0]
+-A INPUT -i lo -j ACCEPT
+-A INPUT -m state --state RELATED,ESTABLISHED -j ACCEPT
+-A INPUT -p tcp -m state --state NEW,ESTABLISHED --dport 22 -j ACCEPT
+-A INPUT -p tcp -m state --state NEW,ESTABLISHED --dport <%= @ssh_port %> -j ACCEPT
+-A INPUT -p udp -m udp --sport 53 -j ACCEPT
+-A INPUT -p icmp -m icmp --icmp-type 8 -m state --state NEW,RELATED,ESTABLISHED -j ACCEPT
+-A INPUT -p icmp -m icmp --icmp-type 0 -m state --state RELATED,ESTABLISHED -j ACCEPT
+-A INPUT -m limit --limit 5/min -j LOG --log-prefix "iptables denied: " --log-level 7
+COMMIT
diff --git a/puppet/modules/site_config/templates/ipv6firewall_up.rules.erb b/puppet/modules/site_config/templates/ipv6firewall_up.rules.erb
new file mode 100644
index 00000000..e2c92524
--- /dev/null
+++ b/puppet/modules/site_config/templates/ipv6firewall_up.rules.erb
@@ -0,0 +1,8 @@
+# Generated by ip6tables-save v1.4.20 on Tue Aug 20 12:19:43 2013
+*filter
+:INPUT DROP [24:1980]
+:FORWARD DROP [0:0]
+:OUTPUT DROP [14:8030]
+-A OUTPUT -j REJECT --reject-with icmp6-port-unreachable
+COMMIT
+# Completed on Tue Aug 20 12:19:43 2013
diff --git a/puppet/modules/site_config/templates/reload_dhclient.erb b/puppet/modules/site_config/templates/reload_dhclient.erb
new file mode 100644
index 00000000..075828b7
--- /dev/null
+++ b/puppet/modules/site_config/templates/reload_dhclient.erb
@@ -0,0 +1,13 @@
+#!/bin/sh
+
+# Get the PID
+PIDFILE='/var/run/dhclient.<%= scope.lookupvar('site_config::params::interface') %>.pid'
+
+# Capture how dhclient is currently running so we can relaunch it
+dhclient=`/bin/ps --no-headers --pid $(cat $PIDFILE) -f | /usr/bin/awk '{for(i=8;i<=NF;++i) printf("%s ", $i) }'`
+
+# Kill the current dhclient
+/usr/bin/pkill -F $PIDFILE
+
+# Restart dhclient with the arguments it had previously
+$dhclient
diff --git a/puppet/modules/site_couchdb/files/couchdb_scripts_defaults.conf b/puppet/modules/site_couchdb/files/couchdb_scripts_defaults.conf
new file mode 100644
index 00000000..1565e1a1
--- /dev/null
+++ b/puppet/modules/site_couchdb/files/couchdb_scripts_defaults.conf
@@ -0,0 +1,4 @@
+# space separated list of excluded DBs for dumping
+# sourced by couchdb_dumpall.sh
+EXCLUDE_DBS='sessions tokens'
+
diff --git a/puppet/modules/site_couchdb/files/designs/Readme.md b/puppet/modules/site_couchdb/files/designs/Readme.md
new file mode 100644
index 00000000..983f629f
--- /dev/null
+++ b/puppet/modules/site_couchdb/files/designs/Readme.md
@@ -0,0 +1,14 @@
+This directory contains design documents for the leap platform.
+
+They need to be uploaded to the couch database in order to query the
+database in certain ways.
+
+Each subdirectory corresponds to a couch database and contains the design
+documents that need to be added to that particular database.
+
+Here's an example of how to upload the users design document:
+```bash
+HOST="http://localhost:5984"
+curl -X PUT $HOST/users/_design/User --data @users/User.json
+
+```
diff --git a/puppet/modules/site_couchdb/files/designs/customers/Customer.json b/puppet/modules/site_couchdb/files/designs/customers/Customer.json
new file mode 100644
index 00000000..1b4bbddd
--- /dev/null
+++ b/puppet/modules/site_couchdb/files/designs/customers/Customer.json
@@ -0,0 +1,18 @@
+{
+ "_id": "_design/Customer",
+ "language": "javascript",
+ "views": {
+ "by_user_id": {
+ "map": " function(doc) {\n if ((doc['type'] == 'Customer') && (doc['user_id'] != null)) {\n emit(doc['user_id'], 1);\n }\n }\n",
+ "reduce": "_sum"
+ },
+ "by_braintree_customer_id": {
+ "map": " function(doc) {\n if ((doc['type'] == 'Customer') && (doc['braintree_customer_id'] != null)) {\n emit(doc['braintree_customer_id'], 1);\n }\n }\n",
+ "reduce": "_sum"
+ },
+ "all": {
+ "map": " function(doc) {\n if (doc['type'] == 'Customer') {\n emit(doc._id, null);\n }\n }\n"
+ }
+ },
+ "couchrest-hash": "688c401ec0230b75625c176a88fc4a02"
+} \ No newline at end of file
diff --git a/puppet/modules/site_couchdb/files/designs/identities/Identity.json b/puppet/modules/site_couchdb/files/designs/identities/Identity.json
new file mode 100644
index 00000000..b1c567c1
--- /dev/null
+++ b/puppet/modules/site_couchdb/files/designs/identities/Identity.json
@@ -0,0 +1,34 @@
+{
+ "_id": "_design/Identity",
+ "language": "javascript",
+ "views": {
+ "by_address_and_destination": {
+ "map": " function(doc) {\n if ((doc['type'] == 'Identity') && (doc['address'] != null) && (doc['destination'] != null)) {\n emit([doc['address'], doc['destination']], 1);\n }\n }\n",
+ "reduce": "_sum"
+ },
+ "all": {
+ "map": " function(doc) {\n if (doc['type'] == 'Identity') {\n emit(doc._id, null);\n }\n }\n"
+ },
+ "cert_fingerprints_by_expiry": {
+ "map": "function(doc) {\n if (doc.type != 'Identity') {\n return;\n }\n if (typeof doc.cert_fingerprints === \"object\") {\n for (fp in doc.cert_fingerprints) {\n if (doc.cert_fingerprints.hasOwnProperty(fp)) {\n emit(doc.cert_fingerprints[fp], fp);\n }\n }\n }\n}\n"
+ },
+ "cert_expiry_by_fingerprint": {
+ "map": "function(doc) {\n if (doc.type != 'Identity') {\n return;\n }\n if (typeof doc.cert_fingerprints === \"object\") {\n for (fp in doc.cert_fingerprints) {\n if (doc.cert_fingerprints.hasOwnProperty(fp)) {\n emit(fp, doc.cert_fingerprints[fp]);\n }\n }\n }\n}\n"
+ },
+ "disabled": {
+ "map": "function(doc) {\n if (doc.type != 'Identity') {\n return;\n }\n if (typeof doc.user_id === \"undefined\") {\n emit(doc._id, 1);\n }\n}\n"
+ },
+ "pgp_key_by_email": {
+ "map": "function(doc) {\n if (doc.type != 'Identity') {\n return;\n }\n if (typeof doc.keys === \"object\") {\n emit(doc.address, doc.keys[\"pgp\"]);\n }\n}\n"
+ },
+ "by_user_id": {
+ "map": " function(doc) {\n if ((doc['type'] == 'Identity') && (doc['user_id'] != null)) {\n emit(doc['user_id'], 1);\n }\n }\n",
+ "reduce": "_sum"
+ },
+ "by_address": {
+ "map": " function(doc) {\n if ((doc['type'] == 'Identity') && (doc['address'] != null)) {\n emit(doc['address'], 1);\n }\n }\n",
+ "reduce": "_sum"
+ }
+ },
+ "couchrest-hash": "4a774c3f56122b655a314670403b27e2"
+} \ No newline at end of file
diff --git a/puppet/modules/site_couchdb/files/designs/invite_codes/InviteCode.json b/puppet/modules/site_couchdb/files/designs/invite_codes/InviteCode.json
new file mode 100644
index 00000000..006c1ea1
--- /dev/null
+++ b/puppet/modules/site_couchdb/files/designs/invite_codes/InviteCode.json
@@ -0,0 +1,22 @@
+{
+ "_id": "_design/InviteCode",
+ "language": "javascript",
+ "views": {
+ "by__id": {
+ "map": " function(doc) {\n if ((doc['type'] == 'InviteCode') && (doc['_id'] != null)) {\n emit(doc['_id'], 1);\n }\n }\n",
+ "reduce": "_sum"
+ },
+ "by_invite_code": {
+ "map": " function(doc) {\n if ((doc['type'] == 'InviteCode') && (doc['invite_code'] != null)) {\n emit(doc['invite_code'], 1);\n }\n }\n",
+ "reduce": "_sum"
+ },
+ "by_invite_count": {
+ "map": " function(doc) {\n if ((doc['type'] == 'InviteCode') && (doc['invite_count'] != null)) {\n emit(doc['invite_count'], 1);\n }\n }\n",
+ "reduce": "_sum"
+ },
+ "all": {
+ "map": " function(doc) {\n if (doc['type'] == 'InviteCode') {\n emit(doc._id, null);\n }\n }\n"
+ }
+ },
+ "couchrest-hash": "83fb8f504520b4a9c7ddbb7928cd0ce3"
+} \ No newline at end of file
diff --git a/puppet/modules/site_couchdb/files/designs/messages/Message.json b/puppet/modules/site_couchdb/files/designs/messages/Message.json
new file mode 100644
index 00000000..6a48fc4d
--- /dev/null
+++ b/puppet/modules/site_couchdb/files/designs/messages/Message.json
@@ -0,0 +1,18 @@
+{
+ "_id": "_design/Message",
+ "language": "javascript",
+ "views": {
+ "by_user_ids_to_show": {
+ "map": "function (doc) {\n if (doc.type === 'Message' && doc.user_ids_to_show && Array.isArray(doc.user_ids_to_show)) {\n doc.user_ids_to_show.forEach(function (userId) {\n emit(userId, 1);\n });\n }\n}\n",
+ "reduce": " function(key, values, rereduce) {\n return sum(values);\n }\n"
+ },
+ "by_user_ids_to_show_and_created_at": {
+ "map": "// not using at moment\n// call with something like Message.by_user_ids_to_show_and_created_at.startkey([user_id, start_date]).endkey([user_id,end_date])\nfunction (doc) {\n if (doc.type === 'Message' && doc.user_ids_to_show && Array.isArray(doc.user_ids_to_show)) {\n doc.user_ids_to_show.forEach(function (userId) {\n emit([userId, doc.created_at], 1);\n });\n }\n}\n",
+ "reduce": " function(key, values, rereduce) {\n return sum(values);\n }\n"
+ },
+ "all": {
+ "map": " function(doc) {\n if (doc['type'] == 'Message') {\n emit(doc._id, null);\n }\n }\n"
+ }
+ },
+ "couchrest-hash": "ba80168e51015d2678cad88fc6c5b986"
+} \ No newline at end of file
diff --git a/puppet/modules/site_couchdb/files/designs/sessions/Session.json b/puppet/modules/site_couchdb/files/designs/sessions/Session.json
new file mode 100644
index 00000000..70202780
--- /dev/null
+++ b/puppet/modules/site_couchdb/files/designs/sessions/Session.json
@@ -0,0 +1,8 @@
+{
+ "views": {
+ "by_expires": {
+ "reduce": "_sum",
+ "map": "function(doc) {\n if(typeof doc.expires !== \"undefined\") {\n emit(doc.expires, 1);\n }\n}\n"
+ }
+ }
+}
diff --git a/puppet/modules/site_couchdb/files/designs/shared/docs.json b/puppet/modules/site_couchdb/files/designs/shared/docs.json
new file mode 100644
index 00000000..004180cd
--- /dev/null
+++ b/puppet/modules/site_couchdb/files/designs/shared/docs.json
@@ -0,0 +1,8 @@
+{
+ "_id": "_design/docs",
+ "views": {
+ "get": {
+ "map": "function(doc) {\n if (doc.u1db_rev) {\n var is_tombstone = true;\n var has_conflicts = false;\n if (doc._attachments) {\n if (doc._attachments.u1db_content)\n is_tombstone = false;\n if (doc._attachments.u1db_conflicts)\n has_conflicts = true;\n }\n emit(doc._id,\n {\n \"couch_rev\": doc._rev,\n \"u1db_rev\": doc.u1db_rev,\n \"is_tombstone\": is_tombstone,\n \"has_conflicts\": has_conflicts,\n }\n );\n }\n}\n"
+ }
+ }
+} \ No newline at end of file
diff --git a/puppet/modules/site_couchdb/files/designs/shared/syncs.json b/puppet/modules/site_couchdb/files/designs/shared/syncs.json
new file mode 100644
index 00000000..bab5622f
--- /dev/null
+++ b/puppet/modules/site_couchdb/files/designs/shared/syncs.json
@@ -0,0 +1,11 @@
+{
+ "_id": "_design/syncs",
+ "updates": {
+ "put": "function(doc, req){\n if (!doc) {\n doc = {}\n doc['_id'] = 'u1db_sync_log';\n doc['syncs'] = [];\n }\n body = JSON.parse(req.body);\n // remove outdated info\n doc['syncs'] = doc['syncs'].filter(\n function (entry) {\n return entry[0] != body['other_replica_uid'];\n }\n );\n // store u1db rev\n doc['syncs'].push([\n body['other_replica_uid'],\n body['other_generation'],\n body['other_transaction_id']\n ]);\n return [doc, 'ok'];\n}\n\n"
+ },
+ "views": {
+ "log": {
+ "map": "function(doc) {\n if (doc._id == 'u1db_sync_log') {\n if (doc.syncs)\n doc.syncs.forEach(function (entry) {\n emit(entry[0],\n {\n 'known_generation': entry[1],\n 'known_transaction_id': entry[2]\n });\n });\n }\n}\n"
+ }
+ }
+} \ No newline at end of file
diff --git a/puppet/modules/site_couchdb/files/designs/shared/transactions.json b/puppet/modules/site_couchdb/files/designs/shared/transactions.json
new file mode 100644
index 00000000..106ad46c
--- /dev/null
+++ b/puppet/modules/site_couchdb/files/designs/shared/transactions.json
@@ -0,0 +1,13 @@
+{
+ "_id": "_design/transactions",
+ "lists": {
+ "generation": "function(head, req) {\n var row;\n var rows=[];\n // fetch all rows\n while(row = getRow()) {\n rows.push(row);\n }\n if (rows.length > 0)\n send(JSON.stringify({\n \"generation\": rows.length,\n \"doc_id\": rows[rows.length-1]['id'],\n \"transaction_id\": rows[rows.length-1]['value']\n }));\n else\n send(JSON.stringify({\n \"generation\": 0,\n \"doc_id\": \"\",\n \"transaction_id\": \"\",\n }));\n}\n",
+ "trans_id_for_gen": "function(head, req) {\n var row;\n var rows=[];\n var i = 1;\n var gen = 1;\n if (req.query.gen)\n gen = parseInt(req.query['gen']);\n // fetch all rows\n while(row = getRow())\n rows.push(row);\n if (gen <= rows.length)\n send(JSON.stringify({\n \"generation\": gen,\n \"doc_id\": rows[gen-1]['id'],\n \"transaction_id\": rows[gen-1]['value'],\n }));\n else\n send('{}');\n}\n",
+ "whats_changed": "function(head, req) {\n var row;\n var gen = 1;\n var old_gen = 0;\n if (req.query.old_gen)\n old_gen = parseInt(req.query['old_gen']);\n send('{\"transactions\":[\\n');\n // fetch all rows\n while(row = getRow()) {\n if (gen > old_gen) {\n if (gen > old_gen+1)\n send(',\\n');\n send(JSON.stringify({\n \"generation\": gen,\n \"doc_id\": row[\"id\"],\n \"transaction_id\": row[\"value\"]\n }));\n }\n gen++;\n }\n send('\\n]}');\n}\n"
+ },
+ "views": {
+ "log": {
+ "map": "function(doc) {\n if (doc.u1db_transactions)\n doc.u1db_transactions.forEach(function(t) {\n emit(t[0], // use timestamp as key so the results are ordered\n t[1]); // value is the transaction_id\n });\n}\n"
+ }
+ }
+} \ No newline at end of file
diff --git a/puppet/modules/site_couchdb/files/designs/tickets/Ticket.json b/puppet/modules/site_couchdb/files/designs/tickets/Ticket.json
new file mode 100644
index 00000000..578f632b
--- /dev/null
+++ b/puppet/modules/site_couchdb/files/designs/tickets/Ticket.json
@@ -0,0 +1,50 @@
+{
+ "_id": "_design/Ticket",
+ "language": "javascript",
+ "views": {
+ "by_updated_at": {
+ "map": " function(doc) {\n if ((doc['type'] == 'Ticket') && (doc['updated_at'] != null)) {\n emit(doc['updated_at'], 1);\n }\n }\n",
+ "reduce": "_sum"
+ },
+ "by_created_at": {
+ "map": " function(doc) {\n if ((doc['type'] == 'Ticket') && (doc['created_at'] != null)) {\n emit(doc['created_at'], 1);\n }\n }\n",
+ "reduce": "_sum"
+ },
+ "by_created_by": {
+ "map": " function(doc) {\n if ((doc['type'] == 'Ticket') && (doc['created_by'] != null)) {\n emit(doc['created_by'], 1);\n }\n }\n",
+ "reduce": "_sum"
+ },
+ "by_is_open_and_created_at": {
+ "map": " function(doc) {\n if ((doc['type'] == 'Ticket') && (doc['is_open'] != null) && (doc['created_at'] != null)) {\n emit([doc['is_open'], doc['created_at']], 1);\n }\n }\n",
+ "reduce": "_sum"
+ },
+ "by_is_open_and_updated_at": {
+ "map": " function(doc) {\n if ((doc['type'] == 'Ticket') && (doc['is_open'] != null) && (doc['updated_at'] != null)) {\n emit([doc['is_open'], doc['updated_at']], 1);\n }\n }\n",
+ "reduce": "_sum"
+ },
+ "by_includes_post_by_and_is_open_and_created_at": {
+ "map": "function(doc) {\n var arr = {}\n if (doc['type'] == 'Ticket' && doc.comments) {\n doc.comments.forEach(function(comment){\n if (comment.posted_by && !arr[comment.posted_by]) {\n //don't add duplicates\n arr[comment.posted_by] = true;\n emit([comment.posted_by, doc.is_open, doc.created_at], 1);\n }\n });\n }\n}\n",
+ "reduce": " function(key, values, rereduce) {\n return sum(values);\n }\n"
+ },
+ "by_includes_post_by": {
+ "map": "// TODO: This view is only used in tests--should we keep it?\nfunction(doc) {\n var arr = {}\n if (doc['type'] == 'Ticket' && doc.comments) {\n doc.comments.forEach(function(comment){\n if (comment.posted_by && !arr[comment.posted_by]) {\n //don't add duplicates\n arr[comment.posted_by] = true;\n emit(comment.posted_by, 1);\n }\n });\n }\n}\n",
+ "reduce": " function(key, values, rereduce) {\n return sum(values);\n }\n"
+ },
+ "by_includes_post_by_and_is_open_and_updated_at": {
+ "map": "function(doc) {\n var arr = {}\n if (doc['type'] == 'Ticket' && doc.comments) {\n doc.comments.forEach(function(comment){\n if (comment.posted_by && !arr[comment.posted_by]) {\n //don't add duplicates\n arr[comment.posted_by] = true;\n emit([comment.posted_by, doc.is_open, doc.updated_at], 1);\n }\n });\n }\n}\n",
+ "reduce": " function(key, values, rereduce) {\n return sum(values);\n }\n"
+ },
+ "by_includes_post_by_and_created_at": {
+ "map": "function(doc) {\n var arr = {}\n if (doc['type'] == 'Ticket' && doc.comments) {\n doc.comments.forEach(function(comment){\n if (comment.posted_by && !arr[comment.posted_by]) {\n //don't add duplicates\n arr[comment.posted_by] = true;\n emit([comment.posted_by, doc.created_at], 1);\n }\n });\n }\n}\n",
+ "reduce": " function(key, values, rereduce) {\n return sum(values);\n }\n"
+ },
+ "by_includes_post_by_and_updated_at": {
+ "map": "function(doc) {\n var arr = {}\n if (doc['type'] == 'Ticket' && doc.comments) {\n doc.comments.forEach(function(comment){\n if (comment.posted_by && !arr[comment.posted_by]) {\n //don't add duplicates\n arr[comment.posted_by] = true;\n emit([comment.posted_by, doc.updated_at], 1);\n }\n });\n }\n}\n",
+ "reduce": " function(key, values, rereduce) {\n return sum(values);\n }\n"
+ },
+ "all": {
+ "map": " function(doc) {\n if (doc['type'] == 'Ticket') {\n emit(doc._id, null);\n }\n }\n"
+ }
+ },
+ "couchrest-hash": "b21eaeea8ea66bfda65581b1b7ce06af"
+} \ No newline at end of file
diff --git a/puppet/modules/site_couchdb/files/designs/tokens/Token.json b/puppet/modules/site_couchdb/files/designs/tokens/Token.json
new file mode 100644
index 00000000..b9025f15
--- /dev/null
+++ b/puppet/modules/site_couchdb/files/designs/tokens/Token.json
@@ -0,0 +1,14 @@
+{
+ "_id": "_design/Token",
+ "language": "javascript",
+ "views": {
+ "by_last_seen_at": {
+ "map": " function(doc) {\n if ((doc['type'] == 'Token') && (doc['last_seen_at'] != null)) {\n emit(doc['last_seen_at'], 1);\n }\n }\n",
+ "reduce": "_sum"
+ },
+ "all": {
+ "map": " function(doc) {\n if (doc['type'] == 'Token') {\n emit(doc._id, null);\n }\n }\n"
+ }
+ },
+ "couchrest-hash": "541dd924551c42a2317b345effbe65cc"
+} \ No newline at end of file
diff --git a/puppet/modules/site_couchdb/files/designs/users/User.json b/puppet/modules/site_couchdb/files/designs/users/User.json
new file mode 100644
index 00000000..8a82cf4a
--- /dev/null
+++ b/puppet/modules/site_couchdb/files/designs/users/User.json
@@ -0,0 +1,22 @@
+{
+ "_id": "_design/User",
+ "language": "javascript",
+ "views": {
+ "by_login": {
+ "map": " function(doc) {\n if ((doc['type'] == 'User') && (doc['login'] != null)) {\n emit(doc['login'], 1);\n }\n }\n",
+ "reduce": "_sum"
+ },
+ "all": {
+ "map": " function(doc) {\n if (doc['type'] == 'User') {\n emit(doc._id, null);\n }\n }\n"
+ },
+ "by_created_at_and_one_month_warning_not_sent": {
+ "map": "function (doc) {\n if ((doc['type'] == 'User') && (doc['created_at'] != null) && (doc['one_month_warning_sent'] == null)) {\n emit(doc['created_at'], 1);\n } \n}\n",
+ "reduce": " function(key, values, rereduce) {\n return sum(values);\n }\n"
+ },
+ "by_created_at": {
+ "map": " function(doc) {\n if ((doc['type'] == 'User') && (doc['created_at'] != null)) {\n emit(doc['created_at'], 1);\n }\n }\n",
+ "reduce": "_sum"
+ }
+ },
+ "couchrest-hash": "d854607d299887a347e554176cb79e20"
+} \ No newline at end of file
diff --git a/puppet/modules/site_couchdb/files/leap_ca_daemon b/puppet/modules/site_couchdb/files/leap_ca_daemon
new file mode 100755
index 00000000..9a1a0bc7
--- /dev/null
+++ b/puppet/modules/site_couchdb/files/leap_ca_daemon
@@ -0,0 +1,157 @@
+#! /bin/sh
+### BEGIN INIT INFO
+# Provides: leap_ca_daemon
+# Required-Start: $remote_fs $syslog
+# Required-Stop: $remote_fs $syslog
+# Default-Start: 2 3 4 5
+# Default-Stop: 0 1 6
+# Short-Description: leap_ca_daemon initscript
+# Description: Controls leap_ca_daemon (see https://github.com/leapcode/leap_ca
+# for more information.
+### END INIT INFO
+
+# Author: varac <varac@leap.se>
+#
+
+# Do NOT "set -e"
+
+# PATH should only include /usr/* if it runs after the mountnfs.sh script
+PATH=/sbin:/usr/sbin:/bin:/usr/bin
+DESC="leap_ca_daemon initscript"
+NAME=leap_ca_daemon
+DAEMON=/usr/local/bin/$NAME
+DAEMON_ARGS="run "
+PIDFILE=/var/run/$NAME.pid
+SCRIPTNAME=/etc/init.d/$NAME
+
+# Exit if the package is not installed
+[ -x "$DAEMON" ] || exit 0
+
+# Read configuration variable file if it is present
+[ -r /etc/default/$NAME ] && . /etc/default/$NAME
+
+# Load the VERBOSE setting and other rcS variables
+. /lib/init/vars.sh
+
+# Define LSB log_* functions.
+# Depend on lsb-base (>= 3.2-14) to ensure that this file is present
+# and status_of_proc is working.
+. /lib/lsb/init-functions
+
+#
+# Function that starts the daemon/service
+#
+do_start()
+{
+ # Return
+ # 0 if daemon has been started
+ # 1 if daemon was already running
+ # 2 if daemon could not be started
+ start-stop-daemon --start --quiet --pidfile $PIDFILE --exec $DAEMON --test > /dev/null \
+ || return 1
+ start-stop-daemon --start --quiet --pidfile $PIDFILE --exec $DAEMON -- \
+ $DAEMON_ARGS \
+ || return 2
+ # Add code here, if necessary, that waits for the process to be ready
+ # to handle requests from services started subsequently which depend
+ # on this one. As a last resort, sleep for some time.
+}
+
+#
+# Function that stops the daemon/service
+#
+do_stop()
+{
+ # Return
+ # 0 if daemon has been stopped
+ # 1 if daemon was already stopped
+ # 2 if daemon could not be stopped
+ # other if a failure occurred
+ start-stop-daemon --stop --quiet --retry=TERM/30/KILL/5 --pidfile $PIDFILE --name $NAME
+ RETVAL="$?"
+ [ "$RETVAL" = 2 ] && return 2
+ # Wait for children to finish too if this is a daemon that forks
+ # and if the daemon is only ever run from this initscript.
+ # If the above conditions are not satisfied then add some other code
+ # that waits for the process to drop all resources that could be
+ # needed by services started subsequently. A last resort is to
+ # sleep for some time.
+ start-stop-daemon --stop --quiet --oknodo --retry=0/30/KILL/5 --exec $DAEMON
+ [ "$?" = 2 ] && return 2
+ # Many daemons don't delete their pidfiles when they exit.
+ rm -f $PIDFILE
+ return "$RETVAL"
+}
+
+#
+# Function that sends a SIGHUP to the daemon/service
+#
+do_reload() {
+ #
+ # If the daemon can reload its configuration without
+ # restarting (for example, when it is sent a SIGHUP),
+ # then implement that here.
+ #
+ start-stop-daemon --stop --signal 1 --quiet --pidfile $PIDFILE --name $NAME
+ return 0
+}
+
+case "$1" in
+ start)
+ [ "$VERBOSE" != no ] && log_daemon_msg "Starting $DESC" "$NAME"
+ do_start
+ case "$?" in
+ 0|1) [ "$VERBOSE" != no ] && log_end_msg 0 ;;
+ 2) [ "$VERBOSE" != no ] && log_end_msg 1 ;;
+ esac
+ ;;
+ stop)
+ [ "$VERBOSE" != no ] && log_daemon_msg "Stopping $DESC" "$NAME"
+ do_stop
+ case "$?" in
+ 0|1) [ "$VERBOSE" != no ] && log_end_msg 0 ;;
+ 2) [ "$VERBOSE" != no ] && log_end_msg 1 ;;
+ esac
+ ;;
+ status)
+ status_of_proc "$DAEMON" "$NAME" && exit 0 || exit $?
+ ;;
+ #reload|force-reload)
+ #
+ # If do_reload() is not implemented then leave this commented out
+ # and leave 'force-reload' as an alias for 'restart'.
+ #
+ #log_daemon_msg "Reloading $DESC" "$NAME"
+ #do_reload
+ #log_end_msg $?
+ #;;
+ restart|force-reload)
+ #
+ # If the "reload" option is implemented then remove the
+ # 'force-reload' alias
+ #
+ log_daemon_msg "Restarting $DESC" "$NAME"
+ do_stop
+ case "$?" in
+ 0|1)
+ do_start
+ case "$?" in
+ 0) log_end_msg 0 ;;
+ 1) log_end_msg 1 ;; # Old process is still running
+ *) log_end_msg 1 ;; # Failed to start
+ esac
+ ;;
+ *)
+ # Failed to stop
+ log_end_msg 1
+ ;;
+ esac
+ ;;
+ *)
+ #echo "Usage: $SCRIPTNAME {start|stop|restart|reload|force-reload}" >&2
+ echo "Usage: $SCRIPTNAME {start|stop|status|restart|force-reload}" >&2
+ exit 3
+ ;;
+esac
+
+:
diff --git a/puppet/modules/site_couchdb/files/local.ini b/puppet/modules/site_couchdb/files/local.ini
new file mode 100644
index 00000000..b921a927
--- /dev/null
+++ b/puppet/modules/site_couchdb/files/local.ini
@@ -0,0 +1,8 @@
+; Puppet modified file !!
+
+; Custom settings should be made in this file. They will override settings
+; in default.ini, but unlike changes made to default.ini, this file won't be
+; overwritten on server upgrade.
+
+[compactions]
+_default = [{db_fragmentation, "70%"}, {view_fragmentation, "60%"}, {from, "03:00"}, {to, "05:00"}]
diff --git a/puppet/modules/site_couchdb/files/runit_config b/puppet/modules/site_couchdb/files/runit_config
new file mode 100644
index 00000000..169b4832
--- /dev/null
+++ b/puppet/modules/site_couchdb/files/runit_config
@@ -0,0 +1,6 @@
+#!/bin/bash
+exec 2>&1
+export HOME=/home/bigcouch
+ulimit -H -n 32768
+ulimit -S -n 32768
+exec chpst -u bigcouch /opt/bigcouch/bin/bigcouch
diff --git a/puppet/modules/site_couchdb/lib/puppet/parser/functions/rotated_db_name.rb b/puppet/modules/site_couchdb/lib/puppet/parser/functions/rotated_db_name.rb
new file mode 100644
index 00000000..6458ae81
--- /dev/null
+++ b/puppet/modules/site_couchdb/lib/puppet/parser/functions/rotated_db_name.rb
@@ -0,0 +1,24 @@
+module Puppet::Parser::Functions
+ newfunction(:rotated_db_name, :type => :rvalue, :doc => <<-EOS
+This function takes a database name string and returns a database name with the current rotation stamp appended.
+The first argument is the base name of the database. Subsequent arguments may contain these options:
+ * 'next' -- return the db name for the next rotation, not the current one.
+ * 'monthly' -- rotate monthly (default)
+ * 'weekly' -- rotate weekly
+*Examples:*
+ rotated_db_name('tokens') => 'tokens_551'
+ EOS
+ ) do |arguments|
+ if arguments.include?('weekly')
+ rotation_period = 604800 # 1 week
+ else
+ rotation_period = 2592000 # 1 month
+ end
+ suffix = Time.now.utc.to_i / rotation_period
+ if arguments.include?('next')
+ suffix += 1
+ end
+ "#{arguments.first}_#{suffix}"
+ end
+end
+
diff --git a/puppet/modules/site_couchdb/manifests/add_users.pp b/puppet/modules/site_couchdb/manifests/add_users.pp
new file mode 100644
index 00000000..c905316b
--- /dev/null
+++ b/puppet/modules/site_couchdb/manifests/add_users.pp
@@ -0,0 +1,57 @@
+# add couchdb users for all services
+class site_couchdb::add_users {
+
+ Class['site_couchdb::create_dbs']
+ -> Class['site_couchdb::add_users']
+
+ # Couchdb users
+
+ ## leap_mx couchdb user
+ ## read: identities
+ ## write access to user-<uuid>
+ couchdb::add_user { $site_couchdb::couchdb_leap_mx_user:
+ roles => '["identities"]',
+ pw => $site_couchdb::couchdb_leap_mx_pw,
+ salt => $site_couchdb::couchdb_leap_mx_salt,
+ require => Couchdb::Query::Setup['localhost']
+ }
+
+ ## nickserver couchdb user
+ ## r: identities
+ ## r/w: keycache
+ couchdb::add_user { $site_couchdb::couchdb_nickserver_user:
+ roles => '["identities","keycache"]',
+ pw => $site_couchdb::couchdb_nickserver_pw,
+ salt => $site_couchdb::couchdb_nickserver_salt,
+ require => Couchdb::Query::Setup['localhost']
+ }
+
+ ## soledad couchdb user
+ ## r/w: user-<uuid>, shared
+ ## read: tokens
+ couchdb::add_user { $site_couchdb::couchdb_soledad_user:
+ roles => '["tokens"]',
+ pw => $site_couchdb::couchdb_soledad_pw,
+ salt => $site_couchdb::couchdb_soledad_salt,
+ require => Couchdb::Query::Setup['localhost']
+ }
+
+ ## webapp couchdb user
+ ## read/write: users, tokens, sessions, tickets, identities, customer
+ couchdb::add_user { $site_couchdb::couchdb_webapp_user:
+ roles => '["tokens","identities","users"]',
+ pw => $site_couchdb::couchdb_webapp_pw,
+ salt => $site_couchdb::couchdb_webapp_salt,
+ require => Couchdb::Query::Setup['localhost']
+ }
+
+ ## replication couchdb user
+ ## read/write: all databases for replication
+ couchdb::add_user { $site_couchdb::couchdb_replication_user:
+ roles => '["replication"]',
+ pw => $site_couchdb::couchdb_replication_pw,
+ salt => $site_couchdb::couchdb_replication_salt,
+ require => Couchdb::Query::Setup['localhost']
+ }
+
+}
diff --git a/puppet/modules/site_couchdb/manifests/backup.pp b/puppet/modules/site_couchdb/manifests/backup.pp
new file mode 100644
index 00000000..8b5aa6ea
--- /dev/null
+++ b/puppet/modules/site_couchdb/manifests/backup.pp
@@ -0,0 +1,23 @@
+class site_couchdb::backup {
+
+ # general backupninja config
+ backupninja::config { 'backupninja_config':
+ usecolors => false,
+ }
+
+ # dump all DBs locally to /var/backups/couchdb once a day
+ backupninja::sh { 'couchdb_backup':
+ command_string => "cd /srv/leap/couchdb/scripts \n./couchdb_dumpall.sh"
+ }
+
+ # Deploy /etc/leap/couchdb_scripts_defaults.conf so we can exclude
+ # some databases
+
+ file { '/etc/leap/couchdb_scripts_defaults.conf':
+ source => 'puppet:///modules/site_couchdb/couchdb_scripts_defaults.conf',
+ mode => '0644',
+ owner => 'root',
+ group => 'root',
+ }
+
+}
diff --git a/puppet/modules/site_couchdb/manifests/bigcouch.pp b/puppet/modules/site_couchdb/manifests/bigcouch.pp
new file mode 100644
index 00000000..2de3d4d0
--- /dev/null
+++ b/puppet/modules/site_couchdb/manifests/bigcouch.pp
@@ -0,0 +1,50 @@
+# sets up bigcouch on couchdb node
+class site_couchdb::bigcouch {
+
+ $config = $::site_couchdb::couchdb_config['bigcouch']
+ $cookie = $config['cookie']
+ $ednp_port = $config['ednp_port']
+
+ class { 'couchdb':
+ admin_pw => $::site_couchdb::couchdb_admin_pw,
+ admin_salt => $::site_couchdb::couchdb_admin_salt,
+ bigcouch => true,
+ bigcouch_cookie => $cookie,
+ ednp_port => $ednp_port,
+ chttpd_bind_address => '127.0.0.1'
+ }
+
+ #
+ # stunnel must running correctly before bigcouch dbs can be set up.
+ #
+ Class['site_config::default']
+ -> Class['site_config::resolvconf']
+ -> Class['couchdb::bigcouch::package::cloudant']
+ -> Service['shorewall']
+ -> Exec['refresh_stunnel']
+ -> Class['site_couchdb::setup']
+ -> Class['site_couchdb::bigcouch::add_nodes']
+ -> Class['site_couchdb::bigcouch::settle_cluster']
+ -> Class['site_couchdb::create_dbs']
+
+ include site_couchdb::bigcouch::add_nodes
+ include site_couchdb::bigcouch::settle_cluster
+ include site_couchdb::bigcouch::compaction
+
+ file { '/var/log/bigcouch':
+ ensure => directory
+ }
+
+ file { '/etc/sv/bigcouch/run':
+ ensure => present,
+ source => 'puppet:///modules/site_couchdb/runit_config',
+ owner => root,
+ group => root,
+ mode => '0755',
+ require => Package['couchdb'],
+ notify => Service['couchdb']
+ }
+
+ include site_check_mk::agent::couchdb::bigcouch
+
+}
diff --git a/puppet/modules/site_couchdb/manifests/bigcouch/add_nodes.pp b/puppet/modules/site_couchdb/manifests/bigcouch/add_nodes.pp
new file mode 100644
index 00000000..c8c43275
--- /dev/null
+++ b/puppet/modules/site_couchdb/manifests/bigcouch/add_nodes.pp
@@ -0,0 +1,8 @@
+class site_couchdb::bigcouch::add_nodes {
+ # loop through neighbors array and add nodes
+ $nodes = $::site_couchdb::bigcouch::config['neighbors']
+
+ couchdb::bigcouch::add_node { $nodes:
+ require => Couchdb::Query::Setup['localhost']
+ }
+}
diff --git a/puppet/modules/site_couchdb/manifests/bigcouch/compaction.pp b/puppet/modules/site_couchdb/manifests/bigcouch/compaction.pp
new file mode 100644
index 00000000..84aab4ef
--- /dev/null
+++ b/puppet/modules/site_couchdb/manifests/bigcouch/compaction.pp
@@ -0,0 +1,8 @@
+class site_couchdb::bigcouch::compaction {
+ cron {
+ 'compact_all_shards':
+ command => '/srv/leap/couchdb/scripts/bigcouch_compact_all_shards.sh >> /var/log/bigcouch/compaction.log',
+ hour => 3,
+ minute => 17;
+ }
+}
diff --git a/puppet/modules/site_couchdb/manifests/bigcouch/settle_cluster.pp b/puppet/modules/site_couchdb/manifests/bigcouch/settle_cluster.pp
new file mode 100644
index 00000000..820b5be2
--- /dev/null
+++ b/puppet/modules/site_couchdb/manifests/bigcouch/settle_cluster.pp
@@ -0,0 +1,11 @@
+class site_couchdb::bigcouch::settle_cluster {
+
+ exec { 'wait_for_couch_nodes':
+ command => '/srv/leap/bin/run_tests --test CouchDB/Are_configured_nodes_online? --retry 12 --wait 10'
+ }
+
+ exec { 'settle_cluster_membership':
+ command => '/srv/leap/bin/run_tests --test CouchDB/Is_cluster_membership_ok? --retry 12 --wait 10',
+ require => Exec['wait_for_couch_nodes']
+ }
+}
diff --git a/puppet/modules/site_couchdb/manifests/create_dbs.pp b/puppet/modules/site_couchdb/manifests/create_dbs.pp
new file mode 100644
index 00000000..a2d1c655
--- /dev/null
+++ b/puppet/modules/site_couchdb/manifests/create_dbs.pp
@@ -0,0 +1,102 @@
+# creates neccesary databases
+class site_couchdb::create_dbs {
+
+ Class['site_couchdb::setup']
+ -> Class['site_couchdb::create_dbs']
+
+ ### customer database
+ ### r/w: webapp,
+ couchdb::create_db { 'customers':
+ members => "{ \"names\": [\"${site_couchdb::couchdb_webapp_user}\"], \"roles\": [\"replication\"] }",
+ require => Couchdb::Query::Setup['localhost']
+ }
+
+ ## identities database
+ ## r: nickserver, leap_mx - needs to be restrict with design document
+ ## r/w: webapp
+ couchdb::create_db { 'identities':
+ members => "{ \"names\": [], \"roles\": [\"replication\", \"identities\"] }",
+ require => Couchdb::Query::Setup['localhost']
+ }
+
+ ## keycache database
+ ## r/w: nickserver
+ couchdb::create_db { 'keycache':
+ members => "{ \"names\": [], \"roles\": [\"replication\", \"keycache\"] }",
+ require => Couchdb::Query::Setup['localhost']
+ }
+
+ ## sessions database
+ ## r/w: webapp
+ $sessions_db = rotated_db_name('sessions', 'monthly')
+ couchdb::create_db { $sessions_db:
+ members => "{ \"names\": [\"${site_couchdb::couchdb_webapp_user}\"], \"roles\": [\"replication\"] }",
+ require => Couchdb::Query::Setup['localhost']
+ }
+
+ $sessions_next_db = rotated_db_name('sessions', 'monthly', 'next')
+ couchdb::create_db { $sessions_next_db:
+ members => "{ \"names\": [\"${site_couchdb::couchdb_webapp_user}\"], \"roles\": [\"replication\"] }",
+ require => Couchdb::Query::Setup['localhost']
+ }
+
+ ## shared database
+ ## r/w: soledad
+ couchdb::create_db { 'shared':
+ members => "{ \"names\": [\"${site_couchdb::couchdb_soledad_user}\"], \"roles\": [\"replication\"] }",
+ require => Couchdb::Query::Setup['localhost']
+ }
+
+ ## tickets database
+ ## r/w: webapp
+ couchdb::create_db { 'tickets':
+ members => "{ \"names\": [\"${site_couchdb::couchdb_webapp_user}\"], \"roles\": [\"replication\"] }",
+ require => Couchdb::Query::Setup['localhost']
+ }
+
+ ## tokens database
+ ## r: soledad - needs to be restricted with a design document
+ ## r/w: webapp
+ $tokens_db = rotated_db_name('tokens', 'monthly')
+ couchdb::create_db { $tokens_db:
+ members => "{ \"names\": [], \"roles\": [\"replication\", \"tokens\"] }",
+ require => Couchdb::Query::Setup['localhost']
+ }
+
+ $tokens_next_db = rotated_db_name('tokens', 'monthly', 'next')
+ couchdb::create_db { $tokens_next_db:
+ members => "{ \"names\": [], \"roles\": [\"replication\", \"tokens\"] }",
+ require => Couchdb::Query::Setup['localhost']
+ }
+
+ ## users database
+ ## r/w: webapp
+ couchdb::create_db { 'users':
+ members => "{ \"names\": [], \"roles\": [\"replication\", \"users\"] }",
+ require => Couchdb::Query::Setup['localhost']
+ }
+
+ ## tmp_users database
+ ## r/w: webapp
+ couchdb::create_db { 'tmp_users':
+ members => "{ \"names\": [], \"roles\": [\"replication\", \"users\"] }",
+ require => Couchdb::Query::Setup['localhost']
+ }
+
+ ## messages db
+ ## store messages to the clients such as payment reminders
+ ## r/w: webapp
+ couchdb::create_db { 'messages':
+ members => "{ \"names\": [\"${site_couchdb::couchdb_webapp_user}\"], \"roles\": [\"replication\"] }",
+ require => Couchdb::Query::Setup['localhost']
+ }
+
+ ## invite_codes db
+ ## store invite codes for new signups
+ ## r/w: webapp
+ couchdb::create_db { 'invite_codes':
+ members => "{ \"names\": [\"${site_couchdb::couchdb_webapp_user}\"], \"roles\": [\"replication\"] }",
+ require => Couchdb::Query::Setup['localhost']
+ }
+
+}
diff --git a/puppet/modules/site_couchdb/manifests/designs.pp b/puppet/modules/site_couchdb/manifests/designs.pp
new file mode 100644
index 00000000..e5fd94c6
--- /dev/null
+++ b/puppet/modules/site_couchdb/manifests/designs.pp
@@ -0,0 +1,46 @@
+class site_couchdb::designs {
+
+ Class['site_couchdb::create_dbs']
+ -> Class['site_couchdb::designs']
+
+ file { '/srv/leap/couchdb/designs':
+ ensure => directory,
+ source => 'puppet:///modules/site_couchdb/designs',
+ recurse => true,
+ purge => true,
+ mode => '0755'
+ }
+
+ site_couchdb::upload_design {
+ 'customers': design => 'customers/Customer.json';
+ 'identities': design => 'identities/Identity.json';
+ 'tickets': design => 'tickets/Ticket.json';
+ 'messages': design => 'messages/Message.json';
+ 'users': design => 'users/User.json';
+ 'tmp_users': design => 'users/User.json';
+ 'invite_codes': design => 'invite_codes/InviteCode.json';
+ 'shared_docs':
+ db => 'shared',
+ design => 'shared/docs.json';
+ 'shared_syncs':
+ db => 'shared',
+ design => 'shared/syncs.json';
+ 'shared_transactions':
+ db => 'shared',
+ design => 'shared/transactions.json';
+ }
+
+ $sessions_db = rotated_db_name('sessions', 'monthly')
+ $sessions_next_db = rotated_db_name('sessions', 'monthly', 'next')
+ site_couchdb::upload_design {
+ $sessions_db: design => 'sessions/Session.json';
+ $sessions_next_db: design => 'sessions/Session.json';
+ }
+
+ $tokens_db = rotated_db_name('tokens', 'monthly')
+ $tokens_next_db = rotated_db_name('tokens', 'monthly', 'next')
+ site_couchdb::upload_design {
+ $tokens_db: design => 'tokens/Token.json';
+ $tokens_next_db: design => 'tokens/Token.json';
+ }
+}
diff --git a/puppet/modules/site_couchdb/manifests/init.pp b/puppet/modules/site_couchdb/manifests/init.pp
new file mode 100644
index 00000000..c4fe6277
--- /dev/null
+++ b/puppet/modules/site_couchdb/manifests/init.pp
@@ -0,0 +1,81 @@
+# entry class for configuring couchdb/bigcouch node
+# couchdb node
+class site_couchdb {
+ tag 'leap_service'
+
+ $couchdb_config = hiera('couch')
+ $couchdb_users = $couchdb_config['users']
+
+ $couchdb_admin = $couchdb_users['admin']
+ $couchdb_admin_user = $couchdb_admin['username']
+ $couchdb_admin_pw = $couchdb_admin['password']
+ $couchdb_admin_salt = $couchdb_admin['salt']
+
+ $couchdb_leap_mx = $couchdb_users['leap_mx']
+ $couchdb_leap_mx_user = $couchdb_leap_mx['username']
+ $couchdb_leap_mx_pw = $couchdb_leap_mx['password']
+ $couchdb_leap_mx_salt = $couchdb_leap_mx['salt']
+
+ $couchdb_nickserver = $couchdb_users['nickserver']
+ $couchdb_nickserver_user = $couchdb_nickserver['username']
+ $couchdb_nickserver_pw = $couchdb_nickserver['password']
+ $couchdb_nickserver_salt = $couchdb_nickserver['salt']
+
+ $couchdb_soledad = $couchdb_users['soledad']
+ $couchdb_soledad_user = $couchdb_soledad['username']
+ $couchdb_soledad_pw = $couchdb_soledad['password']
+ $couchdb_soledad_salt = $couchdb_soledad['salt']
+
+ $couchdb_webapp = $couchdb_users['webapp']
+ $couchdb_webapp_user = $couchdb_webapp['username']
+ $couchdb_webapp_pw = $couchdb_webapp['password']
+ $couchdb_webapp_salt = $couchdb_webapp['salt']
+
+ $couchdb_replication = $couchdb_users['replication']
+ $couchdb_replication_user = $couchdb_replication['username']
+ $couchdb_replication_pw = $couchdb_replication['password']
+ $couchdb_replication_salt = $couchdb_replication['salt']
+
+ $couchdb_backup = $couchdb_config['backup']
+ $couchdb_mode = $couchdb_config['mode']
+
+ # ensure bigcouch has been purged from the system:
+ # TODO: remove this check in 0.9 release
+ if file('/opt/bigcouch/bin/bigcouch', '/dev/null') != '' {
+ fail 'ERROR: BigCouch appears to be installed. Make sure you have migrated to CouchDB before proceeding. See https://leap.se/upgrade-0-8'
+ }
+
+ include site_couchdb::plain
+
+ Class['site_config::default']
+ -> Service['shorewall']
+ -> Exec['refresh_stunnel']
+ -> Class['couchdb']
+ -> Class['site_couchdb::setup']
+
+ include ::site_config::default
+ include site_stunnel
+
+ include site_couchdb::setup
+ include site_couchdb::create_dbs
+ include site_couchdb::add_users
+ include site_couchdb::designs
+ include site_couchdb::logrotate
+
+ if $couchdb_backup { include site_couchdb::backup }
+
+ include site_check_mk::agent::couchdb
+
+ # remove tapicero leftovers on couchdb nodes
+ include site_config::remove::tapicero
+
+ # Destroy every per-user storage database
+ # where the corresponding user record does not exist.
+ cron { 'cleanup_stale_userdbs':
+ command => '(/bin/date; /srv/leap/couchdb/scripts/cleanup-user-dbs) >> /var/log/leap/couchdb-cleanup.log',
+ user => 'root',
+ hour => 4,
+ minute => 7;
+ }
+
+}
diff --git a/puppet/modules/site_couchdb/manifests/logrotate.pp b/puppet/modules/site_couchdb/manifests/logrotate.pp
new file mode 100644
index 00000000..bb8843bb
--- /dev/null
+++ b/puppet/modules/site_couchdb/manifests/logrotate.pp
@@ -0,0 +1,14 @@
+# configure couchdb logrotation
+class site_couchdb::logrotate {
+
+ augeas {
+ 'logrotate_bigcouch':
+ context => '/files/etc/logrotate.d/bigcouch/rule',
+ changes => [
+ 'set file /opt/bigcouch/var/log/*.log', 'set rotate 7',
+ 'set schedule daily', 'set compress compress',
+ 'set missingok missingok', 'set ifempty notifempty',
+ 'set copytruncate copytruncate' ]
+ }
+
+}
diff --git a/puppet/modules/site_couchdb/manifests/mirror.pp b/puppet/modules/site_couchdb/manifests/mirror.pp
new file mode 100644
index 00000000..fb82b897
--- /dev/null
+++ b/puppet/modules/site_couchdb/manifests/mirror.pp
@@ -0,0 +1,78 @@
+# configure mirroring of couch nodes
+class site_couchdb::mirror {
+
+ Class['site_couchdb::add_users']
+ -> Class['site_couchdb::mirror']
+
+ class { 'couchdb':
+ admin_pw => $site_couchdb::couchdb_admin_pw,
+ admin_salt => $site_couchdb::couchdb_admin_salt,
+ chttpd_bind_address => '127.0.0.1'
+ }
+
+ $masters = $site_couchdb::couchdb_config['replication']['masters']
+ $master_node_names = keys($site_couchdb::couchdb_config['replication']['masters'])
+ $master_node = $masters[$master_node_names[0]]
+ $user = $site_couchdb::couchdb_replication_user
+ $password = $site_couchdb::couchdb_replication_pw
+ $from_host = $master_node['domain_internal']
+ $from_port = $master_node['couch_port']
+ $from = "http://${user}:${password}@${from_host}:${from_port}"
+
+ notice("mirror from: ${from}")
+
+ ### customer database
+ couchdb::mirror_db { 'customers':
+ from => $from,
+ require => Couchdb::Query::Setup['localhost']
+ }
+
+ ## identities database
+ couchdb::mirror_db { 'identities':
+ from => $from,
+ require => Couchdb::Query::Setup['localhost']
+ }
+
+ ## keycache database
+ couchdb::mirror_db { 'keycache':
+ from => $from,
+ require => Couchdb::Query::Setup['localhost']
+ }
+
+ ## sessions database
+ couchdb::mirror_db { 'sessions':
+ from => $from,
+ require => Couchdb::Query::Setup['localhost']
+ }
+
+ ## shared database
+ couchdb::mirror_db { 'shared':
+ from => $from,
+ require => Couchdb::Query::Setup['localhost']
+ }
+
+ ## tickets database
+ couchdb::mirror_db { 'tickets':
+ from => $from,
+ require => Couchdb::Query::Setup['localhost']
+ }
+
+ ## tokens database
+ couchdb::mirror_db { 'tokens':
+ from => $from,
+ require => Couchdb::Query::Setup['localhost']
+ }
+
+ ## users database
+ couchdb::mirror_db { 'users':
+ from => $from,
+ require => Couchdb::Query::Setup['localhost']
+ }
+
+ ## messages db
+ couchdb::mirror_db { 'messages':
+ from => $from,
+ require => Couchdb::Query::Setup['localhost']
+ }
+
+}
diff --git a/puppet/modules/site_couchdb/manifests/plain.pp b/puppet/modules/site_couchdb/manifests/plain.pp
new file mode 100644
index 00000000..b40fc100
--- /dev/null
+++ b/puppet/modules/site_couchdb/manifests/plain.pp
@@ -0,0 +1,14 @@
+# this class sets up a single, plain couchdb node
+class site_couchdb::plain {
+ class { 'couchdb':
+ admin_pw => $site_couchdb::couchdb_admin_pw,
+ admin_salt => $site_couchdb::couchdb_admin_salt,
+ chttpd_bind_address => '127.0.0.1'
+ }
+
+ include site_check_mk::agent::couchdb::plain
+
+ # remove bigcouch leftovers from previous installations
+ include ::site_config::remove::bigcouch
+
+}
diff --git a/puppet/modules/site_couchdb/manifests/setup.pp b/puppet/modules/site_couchdb/manifests/setup.pp
new file mode 100644
index 00000000..710d3c1c
--- /dev/null
+++ b/puppet/modules/site_couchdb/manifests/setup.pp
@@ -0,0 +1,61 @@
+#
+# An initial setup class. All the other classes depend on this
+#
+class site_couchdb::setup {
+
+ # ensure that we don't have leftovers from previous installations
+ # where we installed the cloudant bigcouch package
+ # https://leap.se/code/issues/4971
+ class { 'couchdb::bigcouch::package::cloudant':
+ ensure => absent
+ }
+
+ $user = $site_couchdb::couchdb_admin_user
+
+ # setup /etc/couchdb/couchdb-admin.netrc for couchdb admin access
+ couchdb::query::setup { 'localhost':
+ user => $user,
+ pw => $site_couchdb::couchdb_admin_pw
+ }
+
+ # We symlink /etc/couchdb/couchdb-admin.netrc to /etc/couchdb/couchdb.netrc
+ # for puppet commands, and to to /root/.netrc for couchdb_scripts
+ # (eg. backup) and to makes life easier for the admin on the command line
+ # (i.e. using curl/wget without passing credentials)
+ file {
+ '/etc/couchdb/couchdb.netrc':
+ ensure => link,
+ target => "/etc/couchdb/couchdb-${user}.netrc";
+ '/root/.netrc':
+ ensure => link,
+ target => '/etc/couchdb/couchdb.netrc';
+ }
+
+ # setup /etc/couchdb/couchdb-soledad-admin.netrc file for couchdb admin
+ # access, accessible only for the soledad-admin user to create soledad
+ # userdbs
+ if member(hiera('services', []), 'soledad') {
+ file { '/etc/couchdb/couchdb-soledad-admin.netrc':
+ content => "machine localhost login ${user} password ${site_couchdb::couchdb_admin_pw}",
+ mode => '0400',
+ owner => 'soledad-admin',
+ group => 'root',
+ require => [ Package['couchdb'], User['soledad-admin'] ];
+ }
+ }
+
+ # Checkout couchdb_scripts repo
+ file {
+ '/srv/leap/couchdb':
+ ensure => directory
+ }
+
+ vcsrepo { '/srv/leap/couchdb/scripts':
+ ensure => present,
+ provider => git,
+ source => 'https://leap.se/git/couchdb_scripts',
+ revision => 'origin/master',
+ require => File['/srv/leap/couchdb']
+ }
+
+}
diff --git a/puppet/modules/site_couchdb/manifests/upload_design.pp b/puppet/modules/site_couchdb/manifests/upload_design.pp
new file mode 100644
index 00000000..bd73ebf2
--- /dev/null
+++ b/puppet/modules/site_couchdb/manifests/upload_design.pp
@@ -0,0 +1,14 @@
+# upload a design doc to a db
+define site_couchdb::upload_design($design, $db = $title) {
+ $design_name = regsubst($design, '^.*\/(.*)\.json$', '\1')
+ $id = "_design/${design_name}"
+ $file = "/srv/leap/couchdb/designs/${design}"
+ exec {
+ "upload_design_${name}":
+ command => "/usr/local/bin/couch-doc-update --host 127.0.0.1:5984 --db '${db}' --id '${id}' --data '{}' --file '${file}'",
+ refreshonly => false,
+ loglevel => debug,
+ logoutput => on_failure,
+ require => File['/srv/leap/couchdb/designs'];
+ }
+}
diff --git a/puppet/modules/site_haproxy/files/haproxy-stats.cfg b/puppet/modules/site_haproxy/files/haproxy-stats.cfg
new file mode 100644
index 00000000..e6335ba2
--- /dev/null
+++ b/puppet/modules/site_haproxy/files/haproxy-stats.cfg
@@ -0,0 +1,6 @@
+# provide access to stats for the nagios plugin
+listen stats 127.0.0.1:8000
+ mode http
+ stats enable
+ stats uri /haproxy
+
diff --git a/puppet/modules/site_haproxy/manifests/init.pp b/puppet/modules/site_haproxy/manifests/init.pp
new file mode 100644
index 00000000..b28ce80e
--- /dev/null
+++ b/puppet/modules/site_haproxy/manifests/init.pp
@@ -0,0 +1,41 @@
+class site_haproxy {
+ $haproxy = hiera('haproxy')
+
+ class { 'haproxy':
+ enable => true,
+ manage_service => true,
+ global_options => {
+ 'log' => '127.0.0.1 local0',
+ 'maxconn' => '4096',
+ 'stats' => 'socket /var/run/haproxy.sock user haproxy group haproxy',
+ 'chroot' => '/usr/share/haproxy',
+ 'user' => 'haproxy',
+ 'group' => 'haproxy',
+ 'daemon' => ''
+ },
+ defaults_options => {
+ 'log' => 'global',
+ 'retries' => '3',
+ 'option' => 'redispatch',
+ 'timeout connect' => '4000',
+ 'timeout client' => '20000',
+ 'timeout server' => '20000'
+ }
+ }
+
+ # monitor haproxy
+ concat::fragment { 'stats':
+ target => '/etc/haproxy/haproxy.cfg',
+ order => '90',
+ source => 'puppet:///modules/site_haproxy/haproxy-stats.cfg';
+ }
+
+ # Template uses $haproxy
+ concat::fragment { 'leap_haproxy_webapp_couchdb':
+ target => '/etc/haproxy/haproxy.cfg',
+ order => '20',
+ content => template('site_haproxy/haproxy.cfg.erb'),
+ }
+
+ include site_check_mk::agent::haproxy
+}
diff --git a/puppet/modules/site_haproxy/templates/couch.erb b/puppet/modules/site_haproxy/templates/couch.erb
new file mode 100644
index 00000000..f42e8368
--- /dev/null
+++ b/puppet/modules/site_haproxy/templates/couch.erb
@@ -0,0 +1,32 @@
+frontend couch
+ bind localhost:<%= @listen_port %>
+ mode http
+ option httplog
+ option dontlognull
+ option http-server-close # use client keep-alive, but close server connection.
+ use_backend couch_read if METH_GET
+ default_backend couch_write
+
+backend couch_write
+ mode http
+ balance roundrobin
+ option httpchk GET / # health check using simple get to root
+ option allbackups # balance among all backups, not just one.
+ default-server inter 3000 fastinter 1000 downinter 1000 rise 2 fall 1
+<%- @servers.sort.each do |name,server| -%>
+<%- next unless server['writable'] -%>
+ # <%=name%>
+ server couchdb_<%=server['port']%> <%=server['host']%>:<%=server['port']%> <%='backup' if server['backup']%> weight <%=server['weight']%> check
+<%- end -%>
+
+backend couch_read
+ mode http
+ balance roundrobin
+ option httpchk GET / # health check using simple get to root
+ option allbackups # balance among all backups, not just one.
+ default-server inter 3000 fastinter 1000 downinter 1000 rise 2 fall 1
+<%- @servers.sort.each do |name,server| -%>
+ # <%=name%>
+ server couchdb_<%=server['port']%> <%=server['host']%>:<%=server['port']%> <%='backup' if server['backup']%> weight <%=server['weight']%> check
+<%- end -%>
+
diff --git a/puppet/modules/site_haproxy/templates/haproxy.cfg.erb b/puppet/modules/site_haproxy/templates/haproxy.cfg.erb
new file mode 100644
index 00000000..8311b1a5
--- /dev/null
+++ b/puppet/modules/site_haproxy/templates/haproxy.cfg.erb
@@ -0,0 +1,11 @@
+<%- @haproxy.each do |frontend, options| -%>
+<%- if options['servers'] -%>
+
+##
+## <%= frontend %>
+##
+
+<%= scope.function_templatewlv(["site_haproxy/#{frontend}.erb", options]) %>
+<%- end -%>
+<%- end -%>
+
diff --git a/puppet/modules/site_mx/manifests/init.pp b/puppet/modules/site_mx/manifests/init.pp
new file mode 100644
index 00000000..a9b0198b
--- /dev/null
+++ b/puppet/modules/site_mx/manifests/init.pp
@@ -0,0 +1,20 @@
+class site_mx {
+ tag 'leap_service'
+ Class['site_config::default'] -> Class['site_mx']
+
+ include site_config::default
+ include site_config::x509::cert
+ include site_config::x509::key
+ include site_config::x509::ca
+ include site_config::x509::client_ca::ca
+ include site_config::x509::client_ca::key
+
+ include site_stunnel
+
+ include site_postfix::mx
+ include site_haproxy
+ include site_shorewall::mx
+ include site_shorewall::service::smtp
+ include leap_mx
+ include site_check_mk::agent::mx
+}
diff --git a/puppet/modules/site_nagios/files/configs/Debian/nagios.cfg b/puppet/modules/site_nagios/files/configs/Debian/nagios.cfg
new file mode 100644
index 00000000..62f26f2c
--- /dev/null
+++ b/puppet/modules/site_nagios/files/configs/Debian/nagios.cfg
@@ -0,0 +1,1302 @@
+##############################################################################
+#
+# NAGIOS.CFG - Sample Main Config File for Nagios
+#
+#
+##############################################################################
+
+
+# LOG FILE
+# This is the main log file where service and host events are logged
+# for historical purposes. This should be the first option specified
+# in the config file!!!
+
+log_file=/var/log/nagios3/nagios.log
+
+
+
+# OBJECT CONFIGURATION FILE(S)
+# These are the object configuration files in which you define hosts,
+# host groups, contacts, contact groups, services, etc.
+# You can split your object definitions across several config files
+# if you wish (as shown below), or keep them all in a single config file.
+#cfg_file=/etc/nagios3/commands.cfg
+
+# Check_mk configuration files
+cfg_dir=/etc/nagios3/conf.d/check_mk
+cfg_dir=/etc/nagios3/local
+
+# Puppet-managed configuration files
+cfg_file=/etc/nagios3/nagios_templates.cfg
+cfg_file=/etc/nagios3/nagios_command.cfg
+cfg_file=/etc/nagios3/nagios_contact.cfg
+cfg_file=/etc/nagios3/nagios_contactgroup.cfg
+cfg_file=/etc/nagios3/nagios_host.cfg
+cfg_file=/etc/nagios3/nagios_hostdependency.cfg
+cfg_file=/etc/nagios3/nagios_hostescalation.cfg
+cfg_file=/etc/nagios3/nagios_hostextinfo.cfg
+cfg_file=/etc/nagios3/nagios_hostgroup.cfg
+cfg_file=/etc/nagios3/nagios_hostgroupescalation.cfg
+cfg_file=/etc/nagios3/nagios_service.cfg
+cfg_file=/etc/nagios3/nagios_servicedependency.cfg
+cfg_file=/etc/nagios3/nagios_serviceescalation.cfg
+cfg_file=/etc/nagios3/nagios_serviceextinfo.cfg
+cfg_file=/etc/nagios3/nagios_servicegroup.cfg
+cfg_file=/etc/nagios3/nagios_timeperiod.cfg
+
+# Debian also defaults to using the check commands defined by the debian
+# nagios-plugins package
+cfg_dir=/etc/nagios-plugins/config
+
+
+# OBJECT CACHE FILE
+# This option determines where object definitions are cached when
+# Nagios starts/restarts. The CGIs read object definitions from
+# this cache file (rather than looking at the object config files
+# directly) in order to prevent inconsistencies that can occur
+# when the config files are modified after Nagios starts.
+
+object_cache_file=/var/cache/nagios3/objects.cache
+
+
+
+# PRE-CACHED OBJECT FILE
+# This options determines the location of the precached object file.
+# If you run Nagios with the -p command line option, it will preprocess
+# your object configuration file(s) and write the cached config to this
+# file. You can then start Nagios with the -u option to have it read
+# object definitions from this precached file, rather than the standard
+# object configuration files (see the cfg_file and cfg_dir options above).
+# Using a precached object file can speed up the time needed to (re)start
+# the Nagios process if you've got a large and/or complex configuration.
+# Read the documentation section on optimizing Nagios to find our more
+# about how this feature works.
+
+precached_object_file=/var/lib/nagios3/objects.precache
+
+
+
+# RESOURCE FILE
+# This is an optional resource file that contains $USERx$ macro
+# definitions. Multiple resource files can be specified by using
+# multiple resource_file definitions. The CGIs will not attempt to
+# read the contents of resource files, so information that is
+# considered to be sensitive (usernames, passwords, etc) can be
+# defined as macros in this file and restrictive permissions (600)
+# can be placed on this file.
+
+resource_file=/etc/nagios3/resource.cfg
+
+
+
+# STATUS FILE
+# This is where the current status of all monitored services and
+# hosts is stored. Its contents are read and processed by the CGIs.
+# The contents of the status file are deleted every time Nagios
+# restarts.
+
+status_file=/var/cache/nagios3/status.dat
+
+
+
+# STATUS FILE UPDATE INTERVAL
+# This option determines the frequency (in seconds) that
+# Nagios will periodically dump program, host, and
+# service status data.
+
+status_update_interval=10
+
+
+
+# NAGIOS USER
+# This determines the effective user that Nagios should run as.
+# You can either supply a username or a UID.
+
+nagios_user=nagios
+
+
+
+# NAGIOS GROUP
+# This determines the effective group that Nagios should run as.
+# You can either supply a group name or a GID.
+
+nagios_group=nagios
+
+
+
+# EXTERNAL COMMAND OPTION
+# This option allows you to specify whether or not Nagios should check
+# for external commands (in the command file defined below). By default
+# Nagios will *not* check for external commands, just to be on the
+# cautious side. If you want to be able to use the CGI command interface
+# you will have to enable this.
+# Values: 0 = disable commands, 1 = enable commands
+
+check_external_commands=1
+
+
+
+# EXTERNAL COMMAND CHECK INTERVAL
+# This is the interval at which Nagios should check for external commands.
+# This value works of the interval_length you specify later. If you leave
+# that at its default value of 60 (seconds), a value of 1 here will cause
+# Nagios to check for external commands every minute. If you specify a
+# number followed by an "s" (i.e. 15s), this will be interpreted to mean
+# actual seconds rather than a multiple of the interval_length variable.
+# Note: In addition to reading the external command file at regularly
+# scheduled intervals, Nagios will also check for external commands after
+# event handlers are executed.
+# NOTE: Setting this value to -1 causes Nagios to check the external
+# command file as often as possible.
+
+#command_check_interval=15s
+command_check_interval=-1
+
+
+
+# EXTERNAL COMMAND FILE
+# This is the file that Nagios checks for external command requests.
+# It is also where the command CGI will write commands that are submitted
+# by users, so it must be writeable by the user that the web server
+# is running as (usually 'nobody'). Permissions should be set at the
+# directory level instead of on the file, as the file is deleted every
+# time its contents are processed.
+# Debian Users: In case you didn't read README.Debian yet, _NOW_ is the
+# time to do it.
+
+command_file=/var/lib/nagios3/rw/nagios.cmd
+
+
+
+# EXTERNAL COMMAND BUFFER SLOTS
+# This settings is used to tweak the number of items or "slots" that
+# the Nagios daemon should allocate to the buffer that holds incoming
+# external commands before they are processed. As external commands
+# are processed by the daemon, they are removed from the buffer.
+
+external_command_buffer_slots=4096
+
+
+
+# LOCK FILE
+# This is the lockfile that Nagios will use to store its PID number
+# in when it is running in daemon mode.
+
+lock_file=/var/run/nagios3/nagios3.pid
+
+
+
+# TEMP FILE
+# This is a temporary file that is used as scratch space when Nagios
+# updates the status log, cleans the comment file, etc. This file
+# is created, used, and deleted throughout the time that Nagios is
+# running.
+
+temp_file=/var/cache/nagios3/nagios.tmp
+
+
+
+# TEMP PATH
+# This is path where Nagios can create temp files for service and
+# host check results, etc.
+
+temp_path=/tmp
+
+
+
+# EVENT BROKER OPTIONS
+# Controls what (if any) data gets sent to the event broker.
+# Values: 0 = Broker nothing
+# -1 = Broker everything
+# <other> = See documentation
+
+event_broker_options=-1
+
+
+
+# EVENT BROKER MODULE(S)
+# This directive is used to specify an event broker module that should
+# by loaded by Nagios at startup. Use multiple directives if you want
+# to load more than one module. Arguments that should be passed to
+# the module at startup are seperated from the module path by a space.
+#
+#!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!
+# WARNING !!! WARNING !!! WARNING !!! WARNING !!! WARNING !!! WARNING
+#!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!
+#
+# Do NOT overwrite modules while they are being used by Nagios or Nagios
+# will crash in a fiery display of SEGFAULT glory. This is a bug/limitation
+# either in dlopen(), the kernel, and/or the filesystem. And maybe Nagios...
+#
+# The correct/safe way of updating a module is by using one of these methods:
+# 1. Shutdown Nagios, replace the module file, restart Nagios
+# 2. Delete the original module file, move the new module file into place, restart Nagios
+#
+# Example:
+#
+# broker_module=<modulepath> [moduleargs]
+
+#broker_module=/somewhere/module1.o
+#broker_module=/somewhere/module2.o arg1 arg2=3 debug=0
+
+
+
+# LOG ROTATION METHOD
+# This is the log rotation method that Nagios should use to rotate
+# the main log file. Values are as follows..
+# n = None - don't rotate the log
+# h = Hourly rotation (top of the hour)
+# d = Daily rotation (midnight every day)
+# w = Weekly rotation (midnight on Saturday evening)
+# m = Monthly rotation (midnight last day of month)
+
+log_rotation_method=n
+
+
+
+# LOG ARCHIVE PATH
+# This is the directory where archived (rotated) log files should be
+# placed (assuming you've chosen to do log rotation).
+
+log_archive_path=/var/log/nagios3/archives
+
+
+
+# LOGGING OPTIONS
+# If you want messages logged to the syslog facility, as well as the
+# Nagios log file set this option to 1. If not, set it to 0.
+
+use_syslog=0
+
+
+
+# NOTIFICATION LOGGING OPTION
+# If you don't want notifications to be logged, set this value to 0.
+# If notifications should be logged, set the value to 1.
+
+log_notifications=1
+
+
+
+# SERVICE RETRY LOGGING OPTION
+# If you don't want service check retries to be logged, set this value
+# to 0. If retries should be logged, set the value to 1.
+
+log_service_retries=1
+
+
+
+# HOST RETRY LOGGING OPTION
+# If you don't want host check retries to be logged, set this value to
+# 0. If retries should be logged, set the value to 1.
+
+log_host_retries=1
+
+
+
+# EVENT HANDLER LOGGING OPTION
+# If you don't want host and service event handlers to be logged, set
+# this value to 0. If event handlers should be logged, set the value
+# to 1.
+
+log_event_handlers=1
+
+
+
+# INITIAL STATES LOGGING OPTION
+# If you want Nagios to log all initial host and service states to
+# the main log file (the first time the service or host is checked)
+# you can enable this option by setting this value to 1. If you
+# are not using an external application that does long term state
+# statistics reporting, you do not need to enable this option. In
+# this case, set the value to 0.
+
+log_initial_states=0
+
+
+
+# EXTERNAL COMMANDS LOGGING OPTION
+# If you don't want Nagios to log external commands, set this value
+# to 0. If external commands should be logged, set this value to 1.
+# Note: This option does not include logging of passive service
+# checks - see the option below for controlling whether or not
+# passive checks are logged.
+
+log_external_commands=1
+
+
+
+# PASSIVE CHECKS LOGGING OPTION
+# If you don't want Nagios to log passive host and service checks, set
+# this value to 0. If passive checks should be logged, set
+# this value to 1.
+
+log_passive_checks=1
+
+
+
+# GLOBAL HOST AND SERVICE EVENT HANDLERS
+# These options allow you to specify a host and service event handler
+# command that is to be run for every host or service state change.
+# The global event handler is executed immediately prior to the event
+# handler that you have optionally specified in each host or
+# service definition. The command argument is the short name of a
+# command definition that you define in your host configuration file.
+# Read the HTML docs for more information.
+
+#global_host_event_handler=somecommand
+#global_service_event_handler=somecommand
+
+
+
+# SERVICE INTER-CHECK DELAY METHOD
+# This is the method that Nagios should use when initially
+# "spreading out" service checks when it starts monitoring. The
+# default is to use smart delay calculation, which will try to
+# space all service checks out evenly to minimize CPU load.
+# Using the dumb setting will cause all checks to be scheduled
+# at the same time (with no delay between them)! This is not a
+# good thing for production, but is useful when testing the
+# parallelization functionality.
+# n = None - don't use any delay between checks
+# d = Use a "dumb" delay of 1 second between checks
+# s = Use "smart" inter-check delay calculation
+# x.xx = Use an inter-check delay of x.xx seconds
+
+service_inter_check_delay_method=s
+
+
+
+# MAXIMUM SERVICE CHECK SPREAD
+# This variable determines the timeframe (in minutes) from the
+# program start time that an initial check of all services should
+# be completed. Default is 30 minutes.
+
+max_service_check_spread=30
+
+
+
+# SERVICE CHECK INTERLEAVE FACTOR
+# This variable determines how service checks are interleaved.
+# Interleaving the service checks allows for a more even
+# distribution of service checks and reduced load on remote
+# hosts. Setting this value to 1 is equivalent to how versions
+# of Nagios previous to 0.0.5 did service checks. Set this
+# value to s (smart) for automatic calculation of the interleave
+# factor unless you have a specific reason to change it.
+# s = Use "smart" interleave factor calculation
+# x = Use an interleave factor of x, where x is a
+# number greater than or equal to 1.
+
+service_interleave_factor=s
+
+
+
+# HOST INTER-CHECK DELAY METHOD
+# This is the method that Nagios should use when initially
+# "spreading out" host checks when it starts monitoring. The
+# default is to use smart delay calculation, which will try to
+# space all host checks out evenly to minimize CPU load.
+# Using the dumb setting will cause all checks to be scheduled
+# at the same time (with no delay between them)!
+# n = None - don't use any delay between checks
+# d = Use a "dumb" delay of 1 second between checks
+# s = Use "smart" inter-check delay calculation
+# x.xx = Use an inter-check delay of x.xx seconds
+
+host_inter_check_delay_method=s
+
+
+
+# MAXIMUM HOST CHECK SPREAD
+# This variable determines the timeframe (in minutes) from the
+# program start time that an initial check of all hosts should
+# be completed. Default is 30 minutes.
+
+max_host_check_spread=30
+
+
+
+# MAXIMUM CONCURRENT SERVICE CHECKS
+# This option allows you to specify the maximum number of
+# service checks that can be run in parallel at any given time.
+# Specifying a value of 1 for this variable essentially prevents
+# any service checks from being parallelized. A value of 0
+# will not restrict the number of concurrent checks that are
+# being executed.
+
+max_concurrent_checks=0
+
+
+
+# HOST AND SERVICE CHECK REAPER FREQUENCY
+# This is the frequency (in seconds!) that Nagios will process
+# the results of host and service checks.
+
+check_result_reaper_frequency=10
+
+
+
+
+# MAX CHECK RESULT REAPER TIME
+# This is the max amount of time (in seconds) that a single
+# check result reaper event will be allowed to run before
+# returning control back to Nagios so it can perform other
+# duties.
+
+max_check_result_reaper_time=30
+
+
+
+
+# CHECK RESULT PATH
+# This is directory where Nagios stores the results of host and
+# service checks that have not yet been processed.
+#
+# Note: Make sure that only one instance of Nagios has access
+# to this directory!
+
+check_result_path=/var/lib/nagios3/spool/checkresults
+
+
+
+
+# MAX CHECK RESULT FILE AGE
+# This option determines the maximum age (in seconds) which check
+# result files are considered to be valid. Files older than this
+# threshold will be mercilessly deleted without further processing.
+
+max_check_result_file_age=3600
+
+
+
+
+# CACHED HOST CHECK HORIZON
+# This option determines the maximum amount of time (in seconds)
+# that the state of a previous host check is considered current.
+# Cached host states (from host checks that were performed more
+# recently that the timeframe specified by this value) can immensely
+# improve performance in regards to the host check logic.
+# Too high of a value for this option may result in inaccurate host
+# states being used by Nagios, while a lower value may result in a
+# performance hit for host checks. Use a value of 0 to disable host
+# check caching.
+
+cached_host_check_horizon=15
+
+
+
+# CACHED SERVICE CHECK HORIZON
+# This option determines the maximum amount of time (in seconds)
+# that the state of a previous service check is considered current.
+# Cached service states (from service checks that were performed more
+# recently that the timeframe specified by this value) can immensely
+# improve performance in regards to predictive dependency checks.
+# Use a value of 0 to disable service check caching.
+
+cached_service_check_horizon=15
+
+
+
+# ENABLE PREDICTIVE HOST DEPENDENCY CHECKS
+# This option determines whether or not Nagios will attempt to execute
+# checks of hosts when it predicts that future dependency logic test
+# may be needed. These predictive checks can help ensure that your
+# host dependency logic works well.
+# Values:
+# 0 = Disable predictive checks
+# 1 = Enable predictive checks (default)
+
+enable_predictive_host_dependency_checks=1
+
+
+
+# ENABLE PREDICTIVE SERVICE DEPENDENCY CHECKS
+# This option determines whether or not Nagios will attempt to execute
+# checks of service when it predicts that future dependency logic test
+# may be needed. These predictive checks can help ensure that your
+# service dependency logic works well.
+# Values:
+# 0 = Disable predictive checks
+# 1 = Enable predictive checks (default)
+
+enable_predictive_service_dependency_checks=1
+
+
+
+# SOFT STATE DEPENDENCIES
+# This option determines whether or not Nagios will use soft state
+# information when checking host and service dependencies. Normally
+# Nagios will only use the latest hard host or service state when
+# checking dependencies. If you want it to use the latest state (regardless
+# of whether its a soft or hard state type), enable this option.
+# Values:
+# 0 = Don't use soft state dependencies (default)
+# 1 = Use soft state dependencies
+
+soft_state_dependencies=0
+
+
+
+# TIME CHANGE ADJUSTMENT THRESHOLDS
+# These options determine when Nagios will react to detected changes
+# in system time (either forward or backwards).
+
+#time_change_threshold=900
+
+
+
+# AUTO-RESCHEDULING OPTION
+# This option determines whether or not Nagios will attempt to
+# automatically reschedule active host and service checks to
+# "smooth" them out over time. This can help balance the load on
+# the monitoring server.
+# WARNING: THIS IS AN EXPERIMENTAL FEATURE - IT CAN DEGRADE
+# PERFORMANCE, RATHER THAN INCREASE IT, IF USED IMPROPERLY
+
+auto_reschedule_checks=0
+
+
+
+# AUTO-RESCHEDULING INTERVAL
+# This option determines how often (in seconds) Nagios will
+# attempt to automatically reschedule checks. This option only
+# has an effect if the auto_reschedule_checks option is enabled.
+# Default is 30 seconds.
+# WARNING: THIS IS AN EXPERIMENTAL FEATURE - IT CAN DEGRADE
+# PERFORMANCE, RATHER THAN INCREASE IT, IF USED IMPROPERLY
+
+auto_rescheduling_interval=30
+
+
+
+# AUTO-RESCHEDULING WINDOW
+# This option determines the "window" of time (in seconds) that
+# Nagios will look at when automatically rescheduling checks.
+# Only host and service checks that occur in the next X seconds
+# (determined by this variable) will be rescheduled. This option
+# only has an effect if the auto_reschedule_checks option is
+# enabled. Default is 180 seconds (3 minutes).
+# WARNING: THIS IS AN EXPERIMENTAL FEATURE - IT CAN DEGRADE
+# PERFORMANCE, RATHER THAN INCREASE IT, IF USED IMPROPERLY
+
+auto_rescheduling_window=180
+
+
+
+# SLEEP TIME
+# This is the number of seconds to sleep between checking for system
+# events and service checks that need to be run.
+
+sleep_time=0.25
+
+
+
+# TIMEOUT VALUES
+# These options control how much time Nagios will allow various
+# types of commands to execute before killing them off. Options
+# are available for controlling maximum time allotted for
+# service checks, host checks, event handlers, notifications, the
+# ocsp command, and performance data commands. All values are in
+# seconds.
+
+service_check_timeout=60
+host_check_timeout=30
+event_handler_timeout=30
+notification_timeout=30
+ocsp_timeout=5
+perfdata_timeout=5
+
+
+
+# RETAIN STATE INFORMATION
+# This setting determines whether or not Nagios will save state
+# information for services and hosts before it shuts down. Upon
+# startup Nagios will reload all saved service and host state
+# information before starting to monitor. This is useful for
+# maintaining long-term data on state statistics, etc, but will
+# slow Nagios down a bit when it (re)starts. Since its only
+# a one-time penalty, I think its well worth the additional
+# startup delay.
+
+retain_state_information=1
+
+
+
+# STATE RETENTION FILE
+# This is the file that Nagios should use to store host and
+# service state information before it shuts down. The state
+# information in this file is also read immediately prior to
+# starting to monitor the network when Nagios is restarted.
+# This file is used only if the preserve_state_information
+# variable is set to 1.
+
+state_retention_file=/var/lib/nagios3/retention.dat
+
+
+
+# RETENTION DATA UPDATE INTERVAL
+# This setting determines how often (in minutes) that Nagios
+# will automatically save retention data during normal operation.
+# If you set this value to 0, Nagios will not save retention
+# data at regular interval, but it will still save retention
+# data before shutting down or restarting. If you have disabled
+# state retention, this option has no effect.
+
+retention_update_interval=60
+
+
+
+# USE RETAINED PROGRAM STATE
+# This setting determines whether or not Nagios will set
+# program status variables based on the values saved in the
+# retention file. If you want to use retained program status
+# information, set this value to 1. If not, set this value
+# to 0.
+
+use_retained_program_state=1
+
+
+
+# USE RETAINED SCHEDULING INFO
+# This setting determines whether or not Nagios will retain
+# the scheduling info (next check time) for hosts and services
+# based on the values saved in the retention file. If you
+# If you want to use retained scheduling info, set this
+# value to 1. If not, set this value to 0.
+
+use_retained_scheduling_info=1
+
+
+
+# RETAINED ATTRIBUTE MASKS (ADVANCED FEATURE)
+# The following variables are used to specify specific host and
+# service attributes that should *not* be retained by Nagios during
+# program restarts.
+#
+# The values of the masks are bitwise ANDs of values specified
+# by the "MODATTR_" definitions found in include/common.h.
+# For example, if you do not want the current enabled/disabled state
+# of flap detection and event handlers for hosts to be retained, you
+# would use a value of 24 for the host attribute mask...
+# MODATTR_EVENT_HANDLER_ENABLED (8) + MODATTR_FLAP_DETECTION_ENABLED (16) = 24
+
+# This mask determines what host attributes are not retained
+retained_host_attribute_mask=0
+
+# This mask determines what service attributes are not retained
+retained_service_attribute_mask=0
+
+# These two masks determine what process attributes are not retained.
+# There are two masks, because some process attributes have host and service
+# options. For example, you can disable active host checks, but leave active
+# service checks enabled.
+retained_process_host_attribute_mask=0
+retained_process_service_attribute_mask=0
+
+# These two masks determine what contact attributes are not retained.
+# There are two masks, because some contact attributes have host and
+# service options. For example, you can disable host notifications for
+# a contact, but leave service notifications enabled for them.
+retained_contact_host_attribute_mask=0
+retained_contact_service_attribute_mask=0
+
+
+
+# INTERVAL LENGTH
+# This is the seconds per unit interval as used in the
+# host/contact/service configuration files. Setting this to 60 means
+# that each interval is one minute long (60 seconds). Other settings
+# have not been tested much, so your mileage is likely to vary...
+
+interval_length=60
+
+
+
+# AGGRESSIVE HOST CHECKING OPTION
+# If you don't want to turn on aggressive host checking features, set
+# this value to 0 (the default). Otherwise set this value to 1 to
+# enable the aggressive check option. Read the docs for more info
+# on what aggressive host check is or check out the source code in
+# base/checks.c
+
+use_aggressive_host_checking=0
+
+
+
+# SERVICE CHECK EXECUTION OPTION
+# This determines whether or not Nagios will actively execute
+# service checks when it initially starts. If this option is
+# disabled, checks are not actively made, but Nagios can still
+# receive and process passive check results that come in. Unless
+# you're implementing redundant hosts or have a special need for
+# disabling the execution of service checks, leave this enabled!
+# Values: 1 = enable checks, 0 = disable checks
+
+execute_service_checks=1
+
+
+
+# PASSIVE SERVICE CHECK ACCEPTANCE OPTION
+# This determines whether or not Nagios will accept passive
+# service checks results when it initially (re)starts.
+# Values: 1 = accept passive checks, 0 = reject passive checks
+
+accept_passive_service_checks=1
+
+
+
+# HOST CHECK EXECUTION OPTION
+# This determines whether or not Nagios will actively execute
+# host checks when it initially starts. If this option is
+# disabled, checks are not actively made, but Nagios can still
+# receive and process passive check results that come in. Unless
+# you're implementing redundant hosts or have a special need for
+# disabling the execution of host checks, leave this enabled!
+# Values: 1 = enable checks, 0 = disable checks
+
+execute_host_checks=1
+
+
+
+# PASSIVE HOST CHECK ACCEPTANCE OPTION
+# This determines whether or not Nagios will accept passive
+# host checks results when it initially (re)starts.
+# Values: 1 = accept passive checks, 0 = reject passive checks
+
+accept_passive_host_checks=1
+
+
+
+# NOTIFICATIONS OPTION
+# This determines whether or not Nagios will sent out any host or
+# service notifications when it is initially (re)started.
+# Values: 1 = enable notifications, 0 = disable notifications
+
+enable_notifications=1
+
+
+
+# EVENT HANDLER USE OPTION
+# This determines whether or not Nagios will run any host or
+# service event handlers when it is initially (re)started. Unless
+# you're implementing redundant hosts, leave this option enabled.
+# Values: 1 = enable event handlers, 0 = disable event handlers
+
+enable_event_handlers=1
+
+
+
+# PROCESS PERFORMANCE DATA OPTION
+# This determines whether or not Nagios will process performance
+# data returned from service and host checks. If this option is
+# enabled, host performance data will be processed using the
+# host_perfdata_command (defined below) and service performance
+# data will be processed using the service_perfdata_command (also
+# defined below). Read the HTML docs for more information on
+# performance data.
+# Values: 1 = process performance data, 0 = do not process performance data
+
+process_performance_data=0
+
+
+
+# HOST AND SERVICE PERFORMANCE DATA PROCESSING COMMANDS
+# These commands are run after every host and service check is
+# performed. These commands are executed only if the
+# enable_performance_data option (above) is set to 1. The command
+# argument is the short name of a command definition that you
+# define in your host configuration file. Read the HTML docs for
+# more information on performance data.
+
+#host_perfdata_command=process-host-perfdata
+#service_perfdata_command=process-service-perfdata
+
+
+
+# HOST AND SERVICE PERFORMANCE DATA FILES
+# These files are used to store host and service performance data.
+# Performance data is only written to these files if the
+# enable_performance_data option (above) is set to 1.
+
+#host_perfdata_file=/tmp/host-perfdata
+#service_perfdata_file=/tmp/service-perfdata
+
+
+
+# HOST AND SERVICE PERFORMANCE DATA FILE TEMPLATES
+# These options determine what data is written (and how) to the
+# performance data files. The templates may contain macros, special
+# characters (\t for tab, \r for carriage return, \n for newline)
+# and plain text. A newline is automatically added after each write
+# to the performance data file. Some examples of what you can do are
+# shown below.
+
+#host_perfdata_file_template=[HOSTPERFDATA]\t$TIMET$\t$HOSTNAME$\t$HOSTEXECUTIONTIME$\t$HOSTOUTPUT$\t$HOSTPERFDATA$
+#service_perfdata_file_template=[SERVICEPERFDATA]\t$TIMET$\t$HOSTNAME$\t$SERVICEDESC$\t$SERVICEEXECUTIONTIME$\t$SERVICELATENCY$\t$SERVICEOUTPUT$\t$SERVICEPERFDATA$
+
+
+
+# HOST AND SERVICE PERFORMANCE DATA FILE MODES
+# This option determines whether or not the host and service
+# performance data files are opened in write ("w") or append ("a")
+# mode. If you want to use named pipes, you should use the special
+# pipe ("p") mode which avoid blocking at startup, otherwise you will
+# likely want the defult append ("a") mode.
+
+#host_perfdata_file_mode=a
+#service_perfdata_file_mode=a
+
+
+
+# HOST AND SERVICE PERFORMANCE DATA FILE PROCESSING INTERVAL
+# These options determine how often (in seconds) the host and service
+# performance data files are processed using the commands defined
+# below. A value of 0 indicates the files should not be periodically
+# processed.
+
+#host_perfdata_file_processing_interval=0
+#service_perfdata_file_processing_interval=0
+
+
+
+# HOST AND SERVICE PERFORMANCE DATA FILE PROCESSING COMMANDS
+# These commands are used to periodically process the host and
+# service performance data files. The interval at which the
+# processing occurs is determined by the options above.
+
+#host_perfdata_file_processing_command=process-host-perfdata-file
+#service_perfdata_file_processing_command=process-service-perfdata-file
+
+
+
+# OBSESS OVER SERVICE CHECKS OPTION
+# This determines whether or not Nagios will obsess over service
+# checks and run the ocsp_command defined below. Unless you're
+# planning on implementing distributed monitoring, do not enable
+# this option. Read the HTML docs for more information on
+# implementing distributed monitoring.
+# Values: 1 = obsess over services, 0 = do not obsess (default)
+
+obsess_over_services=0
+
+
+
+# OBSESSIVE COMPULSIVE SERVICE PROCESSOR COMMAND
+# This is the command that is run for every service check that is
+# processed by Nagios. This command is executed only if the
+# obsess_over_services option (above) is set to 1. The command
+# argument is the short name of a command definition that you
+# define in your host configuration file. Read the HTML docs for
+# more information on implementing distributed monitoring.
+
+#ocsp_command=somecommand
+
+
+
+# OBSESS OVER HOST CHECKS OPTION
+# This determines whether or not Nagios will obsess over host
+# checks and run the ochp_command defined below. Unless you're
+# planning on implementing distributed monitoring, do not enable
+# this option. Read the HTML docs for more information on
+# implementing distributed monitoring.
+# Values: 1 = obsess over hosts, 0 = do not obsess (default)
+
+obsess_over_hosts=0
+
+
+
+# OBSESSIVE COMPULSIVE HOST PROCESSOR COMMAND
+# This is the command that is run for every host check that is
+# processed by Nagios. This command is executed only if the
+# obsess_over_hosts option (above) is set to 1. The command
+# argument is the short name of a command definition that you
+# define in your host configuration file. Read the HTML docs for
+# more information on implementing distributed monitoring.
+
+#ochp_command=somecommand
+
+
+
+# TRANSLATE PASSIVE HOST CHECKS OPTION
+# This determines whether or not Nagios will translate
+# DOWN/UNREACHABLE passive host check results into their proper
+# state for this instance of Nagios. This option is useful
+# if you have distributed or failover monitoring setup. In
+# these cases your other Nagios servers probably have a different
+# "view" of the network, with regards to the parent/child relationship
+# of hosts. If a distributed monitoring server thinks a host
+# is DOWN, it may actually be UNREACHABLE from the point of
+# this Nagios instance. Enabling this option will tell Nagios
+# to translate any DOWN or UNREACHABLE host states it receives
+# passively into the correct state from the view of this server.
+# Values: 1 = perform translation, 0 = do not translate (default)
+
+translate_passive_host_checks=0
+
+
+
+# PASSIVE HOST CHECKS ARE SOFT OPTION
+# This determines whether or not Nagios will treat passive host
+# checks as being HARD or SOFT. By default, a passive host check
+# result will put a host into a HARD state type. This can be changed
+# by enabling this option.
+# Values: 0 = passive checks are HARD, 1 = passive checks are SOFT
+
+passive_host_checks_are_soft=0
+
+
+
+# ORPHANED HOST/SERVICE CHECK OPTIONS
+# These options determine whether or not Nagios will periodically
+# check for orphaned host service checks. Since service checks are
+# not rescheduled until the results of their previous execution
+# instance are processed, there exists a possibility that some
+# checks may never get rescheduled. A similar situation exists for
+# host checks, although the exact scheduling details differ a bit
+# from service checks. Orphaned checks seem to be a rare
+# problem and should not happen under normal circumstances.
+# If you have problems with service checks never getting
+# rescheduled, make sure you have orphaned service checks enabled.
+# Values: 1 = enable checks, 0 = disable checks
+
+check_for_orphaned_services=1
+check_for_orphaned_hosts=1
+
+
+
+# SERVICE FRESHNESS CHECK OPTION
+# This option determines whether or not Nagios will periodically
+# check the "freshness" of service results. Enabling this option
+# is useful for ensuring passive checks are received in a timely
+# manner.
+# Values: 1 = enabled freshness checking, 0 = disable freshness checking
+
+check_service_freshness=1
+
+
+
+# SERVICE FRESHNESS CHECK INTERVAL
+# This setting determines how often (in seconds) Nagios will
+# check the "freshness" of service check results. If you have
+# disabled service freshness checking, this option has no effect.
+
+service_freshness_check_interval=60
+
+
+
+# HOST FRESHNESS CHECK OPTION
+# This option determines whether or not Nagios will periodically
+# check the "freshness" of host results. Enabling this option
+# is useful for ensuring passive checks are received in a timely
+# manner.
+# Values: 1 = enabled freshness checking, 0 = disable freshness checking
+
+check_host_freshness=0
+
+
+
+# HOST FRESHNESS CHECK INTERVAL
+# This setting determines how often (in seconds) Nagios will
+# check the "freshness" of host check results. If you have
+# disabled host freshness checking, this option has no effect.
+
+host_freshness_check_interval=60
+
+
+
+
+# ADDITIONAL FRESHNESS THRESHOLD LATENCY
+# This setting determines the number of seconds that Nagios
+# will add to any host and service freshness thresholds that
+# it calculates (those not explicitly specified by the user).
+
+additional_freshness_latency=15
+
+
+
+
+# FLAP DETECTION OPTION
+# This option determines whether or not Nagios will try
+# and detect hosts and services that are "flapping".
+# Flapping occurs when a host or service changes between
+# states too frequently. When Nagios detects that a
+# host or service is flapping, it will temporarily suppress
+# notifications for that host/service until it stops
+# flapping. Flap detection is very experimental, so read
+# the HTML documentation before enabling this feature!
+# Values: 1 = enable flap detection
+# 0 = disable flap detection (default)
+
+enable_flap_detection=1
+
+
+
+# FLAP DETECTION THRESHOLDS FOR HOSTS AND SERVICES
+# Read the HTML documentation on flap detection for
+# an explanation of what this option does. This option
+# has no effect if flap detection is disabled.
+
+low_service_flap_threshold=5.0
+high_service_flap_threshold=20.0
+low_host_flap_threshold=5.0
+high_host_flap_threshold=20.0
+
+
+
+# DATE FORMAT OPTION
+# This option determines how short dates are displayed. Valid options
+# include:
+# us (MM-DD-YYYY HH:MM:SS)
+# euro (DD-MM-YYYY HH:MM:SS)
+# iso8601 (YYYY-MM-DD HH:MM:SS)
+# strict-iso8601 (YYYY-MM-DDTHH:MM:SS)
+#
+
+date_format=iso8601
+
+
+
+
+# TIMEZONE OFFSET
+# This option is used to override the default timezone that this
+# instance of Nagios runs in. If not specified, Nagios will use
+# the system configured timezone.
+#
+# NOTE: In order to display the correct timezone in the CGIs, you
+# will also need to alter the Apache directives for the CGI path
+# to include your timezone. Example:
+#
+# <Directory "/usr/local/nagios/sbin/">
+# SetEnv TZ "Australia/Brisbane"
+# ...
+# </Directory>
+
+#use_timezone=US/Mountain
+#use_timezone=Australia/Brisbane
+
+
+
+
+# P1.PL FILE LOCATION
+# This value determines where the p1.pl perl script (used by the
+# embedded Perl interpreter) is located. If you didn't compile
+# Nagios with embedded Perl support, this option has no effect.
+
+p1_file=/usr/lib/nagios3/p1.pl
+
+
+
+# EMBEDDED PERL INTERPRETER OPTION
+# This option determines whether or not the embedded Perl interpreter
+# will be enabled during runtime. This option has no effect if Nagios
+# has not been compiled with support for embedded Perl.
+# Values: 0 = disable interpreter, 1 = enable interpreter
+
+enable_embedded_perl=1
+
+
+
+# EMBEDDED PERL USAGE OPTION
+# This option determines whether or not Nagios will process Perl plugins
+# and scripts with the embedded Perl interpreter if the plugins/scripts
+# do not explicitly indicate whether or not it is okay to do so. Read
+# the HTML documentation on the embedded Perl interpreter for more
+# information on how this option works.
+
+use_embedded_perl_implicitly=1
+
+
+
+# ILLEGAL OBJECT NAME CHARACTERS
+# This option allows you to specify illegal characters that cannot
+# be used in host names, service descriptions, or names of other
+# object types.
+
+illegal_object_name_chars=`~!$%^&*|'"<>?,()=
+
+
+
+# ILLEGAL MACRO OUTPUT CHARACTERS
+# This option allows you to specify illegal characters that are
+# stripped from macros before being used in notifications, event
+# handlers, etc. This DOES NOT affect macros used in service or
+# host check commands.
+# The following macros are stripped of the characters you specify:
+# $HOSTOUTPUT$
+# $HOSTPERFDATA$
+# $HOSTACKAUTHOR$
+# $HOSTACKCOMMENT$
+# $SERVICEOUTPUT$
+# $SERVICEPERFDATA$
+# $SERVICEACKAUTHOR$
+# $SERVICEACKCOMMENT$
+
+illegal_macro_output_chars=`~$&|'"<>
+
+
+
+# REGULAR EXPRESSION MATCHING
+# This option controls whether or not regular expression matching
+# takes place in the object config files. Regular expression
+# matching is used to match host, hostgroup, service, and service
+# group names/descriptions in some fields of various object types.
+# Values: 1 = enable regexp matching, 0 = disable regexp matching
+
+use_regexp_matching=0
+
+
+
+# "TRUE" REGULAR EXPRESSION MATCHING
+# This option controls whether or not "true" regular expression
+# matching takes place in the object config files. This option
+# only has an effect if regular expression matching is enabled
+# (see above). If this option is DISABLED, regular expression
+# matching only occurs if a string contains wildcard characters
+# (* and ?). If the option is ENABLED, regexp matching occurs
+# all the time (which can be annoying).
+# Values: 1 = enable true matching, 0 = disable true matching
+
+use_true_regexp_matching=0
+
+
+
+# ADMINISTRATOR EMAIL/PAGER ADDRESSES
+# The email and pager address of a global administrator (likely you).
+# Nagios never uses these values itself, but you can access them by
+# using the $ADMINEMAIL$ and $ADMINPAGER$ macros in your notification
+# commands.
+
+admin_email=root@localhost
+admin_pager=pageroot@localhost
+
+
+
+# DAEMON CORE DUMP OPTION
+# This option determines whether or not Nagios is allowed to create
+# a core dump when it runs as a daemon. Note that it is generally
+# considered bad form to allow this, but it may be useful for
+# debugging purposes. Enabling this option doesn't guarantee that
+# a core file will be produced, but that's just life...
+# Values: 1 - Allow core dumps
+# 0 - Do not allow core dumps (default)
+
+daemon_dumps_core=0
+
+
+
+# LARGE INSTALLATION TWEAKS OPTION
+# This option determines whether or not Nagios will take some shortcuts
+# which can save on memory and CPU usage in large Nagios installations.
+# Read the documentation for more information on the benefits/tradeoffs
+# of enabling this option.
+# Values: 1 - Enabled tweaks
+# 0 - Disable tweaks (default)
+
+use_large_installation_tweaks=0
+
+
+
+# ENABLE ENVIRONMENT MACROS
+# This option determines whether or not Nagios will make all standard
+# macros available as environment variables when host/service checks
+# and system commands (event handlers, notifications, etc.) are
+# executed. Enabling this option can cause performance issues in
+# large installations, as it will consume a bit more memory and (more
+# importantly) consume more CPU.
+# Values: 1 - Enable environment variable macros (default)
+# 0 - Disable environment variable macros
+
+enable_environment_macros=1
+
+
+
+# CHILD PROCESS MEMORY OPTION
+# This option determines whether or not Nagios will free memory in
+# child processes (processed used to execute system commands and host/
+# service checks). If you specify a value here, it will override
+# program defaults.
+# Value: 1 - Free memory in child processes
+# 0 - Do not free memory in child processes
+
+#free_child_process_memory=1
+
+
+
+# CHILD PROCESS FORKING BEHAVIOR
+# This option determines how Nagios will fork child processes
+# (used to execute system commands and host/service checks). Normally
+# child processes are fork()ed twice, which provides a very high level
+# of isolation from problems. Fork()ing once is probably enough and will
+# save a great deal on CPU usage (in large installs), so you might
+# want to consider using this. If you specify a value here, it will
+# program defaults.
+# Value: 1 - Child processes fork() twice
+# 0 - Child processes fork() just once
+
+#child_processes_fork_twice=1
+
+
+
+# DEBUG LEVEL
+# This option determines how much (if any) debugging information will
+# be written to the debug file. OR values together to log multiple
+# types of information.
+# Values:
+# -1 = Everything
+# 0 = Nothing
+# 1 = Functions
+# 2 = Configuration
+# 4 = Process information
+# 8 = Scheduled events
+# 16 = Host/service checks
+# 32 = Notifications
+# 64 = Event broker
+# 128 = External commands
+# 256 = Commands
+# 512 = Scheduled downtime
+# 1024 = Comments
+# 2048 = Macros
+
+debug_level=0
+
+
+
+# DEBUG VERBOSITY
+# This option determines how verbose the debug log out will be.
+# Values: 0 = Brief output
+# 1 = More detailed
+# 2 = Very detailed
+
+debug_verbosity=1
+
+
+
+# DEBUG FILE
+# This option determines where Nagios should write debugging information.
+
+debug_file=/var/lib/nagios3/nagios.debug
+
+
+
+# MAX DEBUG FILE SIZE
+# This option determines the maximum size (in bytes) of the debug file. If
+# the file grows larger than this size, it will be renamed with a .old
+# extension. If a file already exists with a .old extension it will
+# automatically be deleted. This helps ensure your disk space usage doesn't
+# get out of control when debugging Nagios.
+
+max_debug_file_size=1000000
+
+process_performance_data=1
+service_perfdata_file=/var/lib/nagios3/service-perfdata
+service_perfdata_file_template=DATATYPE::SERVICEPERFDATA\tTIMET::$TIMET$\tHOSTNAME::$HOSTNAME$\tSERVICEDESC::$SERVICEDESC$\tSERVICEPERFDATA::$SERVICEPERFDATA$\tSERVICECHECKCOMMAND::$SERVICECHECKCOMMAND$\tHOSTSTATE::$HOSTSTATE$\tHOSTSTATETYPE::$HOSTSTATETYPE$\tSERVICESTATE::$SERVICESTATE$\tSERVICESTATETYPE::$SERVICESTATETYPE$
+service_perfdata_file_mode=a
+service_perfdata_file_processing_interval=15
+service_perfdata_file_processing_command=process-service-perfdata-file-pnp4nagios-bulk-npcd
+host_perfdata_file=/var/lib/nagios3/host-perfdata
+host_perfdata_file_template=DATATYPE::HOSTPERFDATA\tTIMET::$TIMET$\tHOSTNAME::$HOSTNAME$\tHOSTPERFDATA::$HOSTPERFDATA$\tHOSTCHECKCOMMAND::$HOSTCHECKCOMMAND$\tHOSTSTATE::$HOSTSTATE$\tHOSTSTATETYPE::$HOSTSTATETYPE$
+host_perfdata_file_mode=a
+host_perfdata_file_processing_interval=15
+host_perfdata_file_processing_command=process-host-perfdata-file-pnp4nagios-bulk-npcd
+
diff --git a/puppet/modules/site_nagios/files/plugins/check_last_regex_in_log b/puppet/modules/site_nagios/files/plugins/check_last_regex_in_log
new file mode 100755
index 00000000..47569388
--- /dev/null
+++ b/puppet/modules/site_nagios/files/plugins/check_last_regex_in_log
@@ -0,0 +1,85 @@
+#!/bin/sh
+#
+# depends on nagios-plugins-common for /usr/lib/nagios/plugins/utils.sh
+# this package is installed using leap_platform by the Site_check_mk::Agent::Mrpe
+# class
+
+set -e
+
+usage()
+{
+cat << EOF
+usage: $0 -w <sec> -c <sec> -r <regexp> -f <filename>
+
+OPTIONS:
+ -h Show this message
+ -r <regex> regex to grep for
+ -f <file> logfile to search in
+ -w <sec> warning state after X seconds
+ -c <sec> critical state after x seconds
+
+example: $0 -f /var/log/syslog -r 'tapicero' -w 300 -c 600
+EOF
+}
+
+
+. /usr/lib/nagios/plugins/utils.sh
+
+
+warn=0
+crit=0
+log=''
+regex=''
+
+set -- $(getopt hr:f:w:c: "$@")
+while [ $# -gt 0 ]
+do
+ case "$1" in
+ (-h) usage; exit 0 ;;
+ (-f) log="$2"; shift;;
+ (-r) regex="$2"; shift;;
+ (-w) warn="$2"; shift;;
+ (-c) crit="$2"; shift;;
+ (--) shift; break;;
+ (-*) echo "$0: error - unrecognized option $1" 1>&2; exit 1;;
+ (*) break;;
+ esac
+ shift
+done
+
+[ $warn -eq 0 -o $crit -eq 0 -o -z "$regex" -o -z "$log" ] && ( usage; exit $STATE_UNKNOWN)
+[ -f "$log" ] || (echo "$log doesn't exist"; exit $STATE_UNKNOWN)
+
+lastmsg=$(tac $log | grep -i $regex | head -1 | sed 's/ / /g' | cut -d' ' -f 1-3)
+
+if [ -z "$lastmsg" ]
+then
+ summary="\"$regex\" in $log was not found"
+ state=$STATE_CRITICAL
+ state_text='CRITICAL'
+ diff_sec=0
+else
+ lastmsg_sec=$(date '+%s' -d "$lastmsg")
+ now_sec=$(date '+%s')
+
+ diff_sec=$(($now_sec - $lastmsg_sec))
+
+ if [ $diff_sec -lt $warn ]; then
+ state=$STATE_OK
+ state_text='OK'
+ elif [ $diff_sec -lt $crit ]; then
+ state=$STATE_WARNING
+ state_text='WARNING'
+ else
+ state=$STATE_CRITICAL
+ state_text='CRITICAL'
+ fi
+
+ summary="Last occurrence of \"$regex\" in $log was $diff_sec sec ago"
+fi
+
+# check_mk_agent output
+# echo "$state Tapicero_Heatbeat sec=$diff_sec;$warn;$crit;0; $state_text - $summary"
+
+echo "${state_text}: $summary | seconds=${diff_sec};$warn;$crit;0;"
+exit $state
diff --git a/puppet/modules/site_nagios/manifests/add_host_services.pp b/puppet/modules/site_nagios/manifests/add_host_services.pp
new file mode 100644
index 00000000..bd968e6f
--- /dev/null
+++ b/puppet/modules/site_nagios/manifests/add_host_services.pp
@@ -0,0 +1,32 @@
+define site_nagios::add_host_services (
+ $domain_full_suffix,
+ $domain_internal,
+ $domain_internal_suffix,
+ $ip_address,
+ $services,
+ $ssh_port,
+ $environment,
+ $openvpn_gateway_address='',
+ ) {
+
+ $nagios_hostname = $domain_internal
+
+ # Add Nagios service
+
+ # First, we need to turn the serice array into hash, using a "hash template"
+ # see https://github.com/ashak/puppet-resource-looping
+ $nagios_service_hashpart = {
+ 'hostname' => $nagios_hostname,
+ 'ip_address' => $ip_address,
+ 'openvpn_gw' => $openvpn_gateway_address,
+ 'environment' => $environment
+ }
+ $dynamic_parameters = {
+ 'service' => '%s'
+ }
+ $nagios_servicename = "${nagios_hostname}_%s"
+
+ $nagios_service_hash = create_resources_hash_from($nagios_servicename, $services, $nagios_service_hashpart, $dynamic_parameters)
+
+ create_resources ( site_nagios::add_service, $nagios_service_hash )
+}
diff --git a/puppet/modules/site_nagios/manifests/add_service.pp b/puppet/modules/site_nagios/manifests/add_service.pp
new file mode 100644
index 00000000..72cd038a
--- /dev/null
+++ b/puppet/modules/site_nagios/manifests/add_service.pp
@@ -0,0 +1,32 @@
+define site_nagios::add_service (
+ $hostname, $ip_address, $service, $environment, $openvpn_gw = '') {
+
+ $ssh = hiera_hash('ssh')
+ $ssh_port = $ssh['port']
+
+ case $service {
+ 'webapp': {
+ nagios_service {
+ "${name}_ssh":
+ use => 'generic-service',
+ check_command => "check_ssh_port!${ssh_port}",
+ service_description => 'SSH',
+ host_name => $hostname,
+ contact_groups => $environment;
+ "${name}_cert":
+ use => 'generic-service',
+ check_command => 'check_https_cert',
+ service_description => 'Website Certificate',
+ host_name => $hostname,
+ contact_groups => $environment;
+ "${name}_website":
+ use => 'generic-service',
+ check_command => 'check_https',
+ service_description => 'Website',
+ host_name => $hostname,
+ contact_groups => $environment;
+ }
+ }
+ default: {}
+ }
+}
diff --git a/puppet/modules/site_nagios/manifests/init.pp b/puppet/modules/site_nagios/manifests/init.pp
new file mode 100644
index 00000000..f91bfc26
--- /dev/null
+++ b/puppet/modules/site_nagios/manifests/init.pp
@@ -0,0 +1,13 @@
+# setup nagios on monitoring node
+class site_nagios {
+ tag 'leap_service'
+
+ include site_config::default
+
+ Class['site_config::default'] -> Class['site_nagios']
+
+ include site_nagios::server
+
+ # remove leftovers on monitoring nodes
+ include site_config::remove::monitoring
+}
diff --git a/puppet/modules/site_nagios/manifests/plugins.pp b/puppet/modules/site_nagios/manifests/plugins.pp
new file mode 100644
index 00000000..90a01cfb
--- /dev/null
+++ b/puppet/modules/site_nagios/manifests/plugins.pp
@@ -0,0 +1,16 @@
+# Deploy generic plugins useful to all nodes
+# nagios::plugin won't work to deploy a plugin
+# because it complains with:
+# Could not find dependency Package[nagios-plugins] …
+# at /srv/leap/puppet/modules/nagios/manifests/plugin.pp:18
+class site_nagios::plugins {
+
+ file { [
+ '/usr/local/lib', '/usr/local/lib/nagios',
+ '/usr/local/lib/nagios/plugins' ]:
+ ensure => directory;
+ '/usr/local/lib/nagios/plugins/check_last_regex_in_log':
+ source => 'puppet:///modules/site_nagios/plugins/check_last_regex_in_log',
+ mode => '0755';
+ }
+}
diff --git a/puppet/modules/site_nagios/manifests/server.pp b/puppet/modules/site_nagios/manifests/server.pp
new file mode 100644
index 00000000..6537124d
--- /dev/null
+++ b/puppet/modules/site_nagios/manifests/server.pp
@@ -0,0 +1,97 @@
+# configures nagios on monitoring node
+# lint:ignore:inherits_across_namespaces
+class site_nagios::server inherits nagios::base {
+# lint:endignore
+
+ $nagios_hiera = hiera('nagios')
+ $nagiosadmin_pw = htpasswd_sha1($nagios_hiera['nagiosadmin_pw'])
+ $nagios_hosts = $nagios_hiera['hosts']
+ $nagios_contacts = hiera('contacts')
+ $environment = $nagios_hiera['environments']
+
+ include nagios::base
+ include nagios::defaults::commands
+ include nagios::defaults::templates
+ include nagios::defaults::timeperiods
+ include nagios::pnp4nagios
+ include nagios::pnp4nagios::popup
+
+ class { 'nagios':
+ # don't manage apache class from nagios, cause we already include
+ # it in site_apache::common
+ httpd => 'absent',
+ allow_external_cmd => true,
+ storeconfigs => false,
+ }
+
+ # Delete nagios config files provided by packages
+ # These don't get parsed by nagios.conf, but are
+ # still irritating duplicates to the real config
+ # files deployed by puppet in /etc/nagios3/
+ file { [
+ '/etc/nagios3/conf.d/contacts_nagios2.cfg',
+ '/etc/nagios3/conf.d/extinfo_nagios2.cfg',
+ '/etc/nagios3/conf.d/generic-host_nagios2.cfg',
+ '/etc/nagios3/conf.d/generic-service_nagios2.cfg',
+ '/etc/nagios3/conf.d/hostgroups_nagios2.cfg',
+ '/etc/nagios3/conf.d/localhost_nagios2.cfg',
+ '/etc/nagios3/conf.d/pnp4nagios.cfg',
+ '/etc/nagios3/conf.d/services_nagios2.cfg',
+ '/etc/nagios3/conf.d/timeperiods_nagios2.cfg' ]:
+ ensure => absent;
+ }
+
+ # deploy apache nagios3 config
+ # until https://gitlab.com/shared-puppet-modules-group/apache/issues/11
+ # is not fixed, we need to manually deploy the config file
+ file {
+ '/etc/apache2/conf-available/nagios3.conf':
+ ensure => present,
+ source => 'puppet:///modules/nagios/configs/apache2.conf',
+ require => [ Package['nagios3'], Package['apache2'] ];
+ '/etc/apache2/conf-enabled/nagios3.conf':
+ ensure => link,
+ target => '/etc/apache2/conf-available/nagios3.conf',
+ require => [ Package['nagios3'], Package['apache2'] ];
+ }
+
+ include site_apache::common
+ include site_webapp::common_vhost
+ include apache::module::headers
+
+ File['nagios_htpasswd'] {
+ source => undef,
+ content => "nagiosadmin:${nagiosadmin_pw}",
+ mode => '0640',
+ }
+
+
+ # deploy serverside plugins
+ file { '/usr/lib/nagios/plugins/check_openvpn_server.pl':
+ source => 'puppet:///modules/nagios/plugins/check_openvpn_server.pl',
+ mode => '0755',
+ owner => 'nagios',
+ group => 'nagios',
+ require => Package['nagios-plugins'];
+ }
+
+ create_resources ( site_nagios::add_host_services, $nagios_hosts )
+
+ include site_nagios::server::apache
+ include site_check_mk::server
+ include site_shorewall::monitor
+ include site_nagios::server::icli
+
+ augeas {
+ 'logrotate_nagios':
+ context => '/files/etc/logrotate.d/nagios/rule',
+ changes => [ 'set file /var/log/nagios3/nagios.log', 'set rotate 7',
+ 'set schedule daily', 'set compress compress',
+ 'set missingok missingok', 'set ifempty notifempty',
+ 'set copytruncate copytruncate' ]
+ }
+
+ create_resources ( site_nagios::server::hostgroup, $environment )
+ create_resources ( site_nagios::server::contactgroup, $environment )
+ create_resources ( site_nagios::server::add_contacts, $environment )
+}
diff --git a/puppet/modules/site_nagios/manifests/server/add_contacts.pp b/puppet/modules/site_nagios/manifests/server/add_contacts.pp
new file mode 100644
index 00000000..b5c6f0a5
--- /dev/null
+++ b/puppet/modules/site_nagios/manifests/server/add_contacts.pp
@@ -0,0 +1,18 @@
+# configure a nagios_contact
+define site_nagios::server::add_contacts ($contact_emails) {
+
+ $environment = $name
+
+ nagios_contact {
+ $environment:
+ alias => $environment,
+ service_notification_period => '24x7',
+ host_notification_period => '24x7',
+ service_notification_options => 'w,u,c,r',
+ host_notification_options => 'd,r',
+ service_notification_commands => 'notify-service-by-email',
+ host_notification_commands => 'notify-host-by-email',
+ email => join($contact_emails, ', '),
+ require => Package['nagios']
+ }
+}
diff --git a/puppet/modules/site_nagios/manifests/server/apache.pp b/puppet/modules/site_nagios/manifests/server/apache.pp
new file mode 100644
index 00000000..82962e89
--- /dev/null
+++ b/puppet/modules/site_nagios/manifests/server/apache.pp
@@ -0,0 +1,25 @@
+# set up apache for nagios
+class site_nagios::server::apache {
+
+ include x509::variables
+
+ include site_config::x509::commercial::cert
+ include site_config::x509::commercial::key
+ include site_config::x509::commercial::ca
+
+ include apache::module::authn_file
+ # "AuthUserFile"
+ include apache::module::authz_user
+ # "AuthType Basic"
+ include apache::module::auth_basic
+ # "DirectoryIndex"
+ include apache::module::dir
+ include apache::module::php5
+ include apache::module::cgi
+
+ # apache >= 2.4, debian jessie
+ if ( $::lsbdistcodename == 'jessie' ) {
+ include apache::module::authn_core
+ }
+
+}
diff --git a/puppet/modules/site_nagios/manifests/server/contactgroup.pp b/puppet/modules/site_nagios/manifests/server/contactgroup.pp
new file mode 100644
index 00000000..5e60dd06
--- /dev/null
+++ b/puppet/modules/site_nagios/manifests/server/contactgroup.pp
@@ -0,0 +1,8 @@
+# configure a contactgroup
+define site_nagios::server::contactgroup ($contact_emails) {
+
+ nagios_contactgroup { $name:
+ members => $name,
+ require => Package['nagios']
+ }
+}
diff --git a/puppet/modules/site_nagios/manifests/server/hostgroup.pp b/puppet/modules/site_nagios/manifests/server/hostgroup.pp
new file mode 100644
index 00000000..0692fced
--- /dev/null
+++ b/puppet/modules/site_nagios/manifests/server/hostgroup.pp
@@ -0,0 +1,7 @@
+# create a nagios hostsgroup
+define site_nagios::server::hostgroup ($contact_emails) {
+ nagios_hostgroup { $name:
+ ensure => present,
+ require => Package['nagios']
+ }
+}
diff --git a/puppet/modules/site_nagios/manifests/server/icli.pp b/puppet/modules/site_nagios/manifests/server/icli.pp
new file mode 100644
index 00000000..26fba725
--- /dev/null
+++ b/puppet/modules/site_nagios/manifests/server/icli.pp
@@ -0,0 +1,26 @@
+# Install icli package and configure ncli aliases
+class site_nagios::server::icli {
+ $nagios_hiera = hiera('nagios')
+ $environments = $nagios_hiera['environments']
+
+ package { 'icli':
+ ensure => installed;
+ }
+
+ file { '/root/.bashrc':
+ ensure => present;
+ }
+
+ file_line { 'icli aliases':
+ path => '/root/.bashrc',
+ line => 'source /root/.icli_aliases';
+ }
+
+ file { '/root/.icli_aliases':
+ content => template("${module_name}/icli_aliases.erb"),
+ mode => '0644',
+ owner => root,
+ group => 0,
+ require => Package['icli'];
+ }
+} \ No newline at end of file
diff --git a/puppet/modules/site_nagios/templates/icli_aliases.erb b/puppet/modules/site_nagios/templates/icli_aliases.erb
new file mode 100644
index 00000000..bcb2abb0
--- /dev/null
+++ b/puppet/modules/site_nagios/templates/icli_aliases.erb
@@ -0,0 +1,7 @@
+alias ncli='icli -c /var/cache/nagios3/objects.cache -f /var/cache/nagios3/status.dat -F /var/lib/nagios3/rw/nagios.cmd'
+alias ncli_problems='ncli -z '!o,!A''
+
+<% @environments.keys.sort.each do |env_name| %>
+alias ncli_<%= env_name %>='ncli -z '!o,!A' -g <%= env_name %>'
+alias ncli_<%= env_name %>_recheck='ncli -s Check_MK -g <%= env_name %> -a R'
+<% end -%>
diff --git a/puppet/modules/site_nickserver/manifests/init.pp b/puppet/modules/site_nickserver/manifests/init.pp
new file mode 100644
index 00000000..eb4415e7
--- /dev/null
+++ b/puppet/modules/site_nickserver/manifests/init.pp
@@ -0,0 +1,178 @@
+#
+# TODO: currently, this is dependent on some things that are set up in
+# site_webapp
+#
+# (1) HAProxy -> couchdb
+# (2) Apache
+#
+# It would be good in the future to make nickserver installable independently of
+# site_webapp.
+#
+
+class site_nickserver {
+ tag 'leap_service'
+ Class['site_config::default'] -> Class['site_nickserver']
+
+ include site_config::ruby::dev
+
+ #
+ # VARIABLES
+ #
+
+ $nickserver = hiera('nickserver')
+ $nickserver_domain = $nickserver['domain']
+ $couchdb_user = $nickserver['couchdb_nickserver_user']['username']
+ $couchdb_password = $nickserver['couchdb_nickserver_user']['password']
+
+ # the port that public connects to (should be 6425)
+ $nickserver_port = $nickserver['port']
+ # the port that nickserver is actually running on
+ $nickserver_local_port = '64250'
+
+ # couchdb is available on localhost via haproxy, which is bound to 4096.
+ $couchdb_host = 'localhost'
+ # See site_webapp/templates/haproxy_couchdb.cfg.erg
+ $couchdb_port = '4096'
+
+ $sources = hiera('sources')
+
+ # temporarily for now:
+ $domain = hiera('domain')
+ $address_domain = $domain['full_suffix']
+
+ include site_config::x509::cert
+ include site_config::x509::key
+ include site_config::x509::ca
+
+ #
+ # USER AND GROUP
+ #
+
+ group { 'nickserver':
+ ensure => present,
+ allowdupe => false;
+ }
+
+ user { 'nickserver':
+ ensure => present,
+ allowdupe => false,
+ gid => 'nickserver',
+ home => '/srv/leap/nickserver',
+ require => Group['nickserver'];
+ }
+
+ vcsrepo { '/srv/leap/nickserver':
+ ensure => present,
+ revision => $sources['nickserver']['revision'],
+ provider => $sources['nickserver']['type'],
+ source => $sources['nickserver']['source'],
+ owner => 'nickserver',
+ group => 'nickserver',
+ require => [ User['nickserver'], Group['nickserver'] ],
+ notify => Exec['nickserver_bundler_update'];
+ }
+
+ exec { 'nickserver_bundler_update':
+ cwd => '/srv/leap/nickserver',
+ command => '/bin/bash -c "/usr/bin/bundle check || /usr/bin/bundle install --path vendor/bundle"',
+ unless => '/usr/bin/bundle check',
+ user => 'nickserver',
+ timeout => 600,
+ require => [
+ Class['bundler::install'], Vcsrepo['/srv/leap/nickserver'],
+ Package['libssl-dev'], Class['site_config::ruby::dev'] ],
+
+ notify => Service['nickserver'];
+ }
+
+ #
+ # NICKSERVER CONFIG
+ #
+
+ file { '/etc/nickserver.yml':
+ content => template('site_nickserver/nickserver.yml.erb'),
+ owner => nickserver,
+ group => nickserver,
+ mode => '0600',
+ notify => Service['nickserver'];
+ }
+
+ #
+ # NICKSERVER DAEMON
+ #
+
+ file {
+ '/usr/bin/nickserver':
+ ensure => link,
+ target => '/srv/leap/nickserver/bin/nickserver',
+ require => Vcsrepo['/srv/leap/nickserver'];
+
+ '/etc/init.d/nickserver':
+ owner => root,
+ group => 0,
+ mode => '0755',
+ source => '/srv/leap/nickserver/dist/debian-init-script',
+ require => Vcsrepo['/srv/leap/nickserver'];
+ }
+
+ # register initscript at systemd on nodes newer than wheezy
+ # see https://leap.se/code/issues/7614
+ case $::operatingsystemrelease {
+ /^7.*/: { }
+ default: {
+ exec { 'register_systemd_nickserver':
+ refreshonly => true,
+ command => '/bin/systemctl enable nickserver',
+ subscribe => File['/etc/init.d/nickserver'],
+ before => Service['nickserver'];
+ }
+ }
+ }
+
+ service { 'nickserver':
+ ensure => running,
+ enable => true,
+ hasrestart => true,
+ hasstatus => true,
+ require => [
+ File['/etc/init.d/nickserver'],
+ File['/usr/bin/nickserver'],
+ Class['Site_config::X509::Key'],
+ Class['Site_config::X509::Cert'],
+ Class['Site_config::X509::Ca'] ];
+ }
+
+ #
+ # FIREWALL
+ # poke a hole in the firewall to allow nickserver requests
+ #
+
+ file { '/etc/shorewall/macro.nickserver':
+ content => "PARAM - - tcp ${nickserver_port}",
+ notify => Service['shorewall'],
+ require => Package['shorewall'];
+ }
+
+ shorewall::rule { 'net2fw-nickserver':
+ source => 'net',
+ destination => '$FW',
+ action => 'nickserver(ACCEPT)',
+ order => 200;
+ }
+
+ #
+ # APACHE REVERSE PROXY
+ # nickserver doesn't speak TLS natively, let Apache handle that.
+ #
+
+ apache::module {
+ 'proxy': ensure => present;
+ 'proxy_http': ensure => present
+ }
+
+ apache::vhost::file {
+ 'nickserver':
+ content => template('site_nickserver/nickserver-proxy.conf.erb')
+ }
+
+}
diff --git a/puppet/modules/site_nickserver/templates/nickserver-proxy.conf.erb b/puppet/modules/site_nickserver/templates/nickserver-proxy.conf.erb
new file mode 100644
index 00000000..8f59fe38
--- /dev/null
+++ b/puppet/modules/site_nickserver/templates/nickserver-proxy.conf.erb
@@ -0,0 +1,19 @@
+#
+# Apache reverse proxy configuration for the Nickserver
+#
+
+Listen 0.0.0.0:<%= @nickserver_port -%>
+
+<VirtualHost *:<%= @nickserver_port -%>>
+ ServerName <%= @nickserver_domain %>
+ ServerAlias <%= @address_domain %>
+
+ SSLCACertificatePath /etc/ssl/certs
+ SSLCertificateKeyFile <%= scope.lookupvar('x509::variables::keys') %>/<%= scope.lookupvar('site_config::params::cert_name') %>.key
+ SSLCertificateFile <%= scope.lookupvar('x509::variables::certs') %>/<%= scope.lookupvar('site_config::params::cert_name') %>.crt
+
+ Include include.d/ssl_common.inc
+
+ ProxyPass / http://localhost:<%= @nickserver_local_port %>/
+ ProxyPreserveHost On # preserve Host header in HTTP request
+</VirtualHost>
diff --git a/puppet/modules/site_nickserver/templates/nickserver.yml.erb b/puppet/modules/site_nickserver/templates/nickserver.yml.erb
new file mode 100644
index 00000000..e717cbaa
--- /dev/null
+++ b/puppet/modules/site_nickserver/templates/nickserver.yml.erb
@@ -0,0 +1,19 @@
+#
+# configuration for nickserver.
+#
+
+domain: "<%= @address_domain %>"
+
+couch_host: "<%= @couchdb_host %>"
+couch_port: <%= @couchdb_port %>
+couch_database: "identities"
+couch_user: "<%= @couchdb_user %>"
+couch_password: "<%= @couchdb_password %>"
+
+hkp_url: "https://hkps.pool.sks-keyservers.net:/pks/lookup"
+
+user: "nickserver"
+port: <%= @nickserver_local_port %>
+pid_file: "/var/run/nickserver"
+log_file: "/var/log/nickserver.log"
+
diff --git a/puppet/modules/site_obfsproxy/README b/puppet/modules/site_obfsproxy/README
new file mode 100644
index 00000000..e69de29b
--- /dev/null
+++ b/puppet/modules/site_obfsproxy/README
diff --git a/puppet/modules/site_obfsproxy/manifests/init.pp b/puppet/modules/site_obfsproxy/manifests/init.pp
new file mode 100644
index 00000000..2ed5ec9e
--- /dev/null
+++ b/puppet/modules/site_obfsproxy/manifests/init.pp
@@ -0,0 +1,38 @@
+class site_obfsproxy {
+ tag 'leap_service'
+ Class['site_config::default'] -> Class['site_obfsproxy']
+
+ $transport = 'scramblesuit'
+
+ $obfsproxy = hiera('obfsproxy')
+ $scramblesuit = $obfsproxy['scramblesuit']
+ $scram_pass = $scramblesuit['password']
+ $scram_port = $scramblesuit['port']
+ $dest_ip = $obfsproxy['gateway_address']
+ $dest_port = '443'
+
+ if member($::services, 'openvpn') {
+ $openvpn = hiera('openvpn')
+ $bind_address = $openvpn['gateway_address']
+ }
+ elsif member($::services, 'obfsproxy') {
+ $bind_address = hiera('ip_address')
+ }
+
+ include site_config::default
+
+ class { 'obfsproxy':
+ transport => $transport,
+ bind_address => $bind_address,
+ port => $scram_port,
+ param => $scram_pass,
+ dest_ip => $dest_ip,
+ dest_port => $dest_port,
+ }
+
+ include site_shorewall::obfsproxy
+
+}
+
+
+
diff --git a/puppet/modules/site_openvpn/README b/puppet/modules/site_openvpn/README
new file mode 100644
index 00000000..cef5be23
--- /dev/null
+++ b/puppet/modules/site_openvpn/README
@@ -0,0 +1,20 @@
+Place to look when debugging problems
+========================================
+
+Log files:
+
+ openvpn: /var/log/syslog
+ shorewall: /var/log/syslog
+ shorewall startup: /var/log/shorewall-init.log
+
+Check NAT masq:
+
+ iptables -t nat --list-rules
+
+Check interfaces:
+
+ ip addr ls
+
+Scripts:
+
+ /usr/local/bin/add_gateway_ips.sh \ No newline at end of file
diff --git a/puppet/modules/site_openvpn/manifests/dh_key.pp b/puppet/modules/site_openvpn/manifests/dh_key.pp
new file mode 100644
index 00000000..13cc0f5b
--- /dev/null
+++ b/puppet/modules/site_openvpn/manifests/dh_key.pp
@@ -0,0 +1,10 @@
+class site_openvpn::dh_key {
+
+ $x509_config = hiera('x509')
+
+ file { '/etc/openvpn/keys/dh.pem':
+ content => $x509_config['dh'],
+ mode => '0644',
+ }
+
+}
diff --git a/puppet/modules/site_openvpn/manifests/init.pp b/puppet/modules/site_openvpn/manifests/init.pp
new file mode 100644
index 00000000..f1ecefb9
--- /dev/null
+++ b/puppet/modules/site_openvpn/manifests/init.pp
@@ -0,0 +1,238 @@
+#
+# An openvpn gateway can support three modes:
+#
+# (1) limited and unlimited
+# (2) unlimited only
+# (3) limited only
+#
+# The difference is that 'unlimited' gateways only allow client certs that match
+# the 'unlimited_prefix', and 'limited' gateways only allow certs that match the
+# 'limited_prefix'.
+#
+# We potentially create four openvpn config files (thus four daemons):
+#
+# (1) unlimited + tcp => tcp_config.conf
+# (2) unlimited + udp => udp_config.conf
+# (3) limited + tcp => limited_tcp_config.conf
+# (4) limited + udp => limited_udp_config.conf
+#
+
+class site_openvpn {
+ tag 'leap_service'
+
+ include site_config::x509::cert
+ include site_config::x509::key
+ include site_config::x509::ca_bundle
+
+ include site_config::default
+ Class['site_config::default'] -> Class['site_openvpn']
+
+ include ::site_obfsproxy
+
+ $openvpn = hiera('openvpn')
+ $openvpn_ports = $openvpn['ports']
+ $openvpn_config = $openvpn['configuration']
+
+ if $::ec2_instance_id {
+ $openvpn_gateway_address = $::ipaddress
+ } else {
+ $openvpn_gateway_address = $openvpn['gateway_address']
+ if $openvpn['second_gateway_address'] {
+ $openvpn_second_gateway_address = $openvpn['second_gateway_address']
+ } else {
+ $openvpn_second_gateway_address = undef
+ }
+ }
+
+ $openvpn_allow_unlimited = $openvpn['allow_unlimited']
+ $openvpn_unlimited_prefix = $openvpn['unlimited_prefix']
+ $openvpn_unlimited_tcp_network_prefix = '10.41.0'
+ $openvpn_unlimited_tcp_netmask = '255.255.248.0'
+ $openvpn_unlimited_tcp_cidr = '21'
+ $openvpn_unlimited_udp_network_prefix = '10.42.0'
+ $openvpn_unlimited_udp_netmask = '255.255.248.0'
+ $openvpn_unlimited_udp_cidr = '21'
+
+ if !$::ec2_instance_id {
+ $openvpn_allow_limited = $openvpn['allow_limited']
+ $openvpn_limited_prefix = $openvpn['limited_prefix']
+ $openvpn_rate_limit = $openvpn['rate_limit']
+ $openvpn_limited_tcp_network_prefix = '10.43.0'
+ $openvpn_limited_tcp_netmask = '255.255.248.0'
+ $openvpn_limited_tcp_cidr = '21'
+ $openvpn_limited_udp_network_prefix = '10.44.0'
+ $openvpn_limited_udp_netmask = '255.255.248.0'
+ $openvpn_limited_udp_cidr = '21'
+ }
+
+ # find out the netmask in cidr format of the primary IF
+ # thx to https://blog.kumina.nl/tag/puppet-tips-and-tricks/
+ # we can do this using an inline_template:
+ $factname_primary_netmask = "netmask_cidr_${::site_config::params::interface}"
+ $primary_netmask = inline_template('<%= scope.lookupvar(@factname_primary_netmask) %>')
+
+ # deploy dh keys
+ include site_openvpn::dh_key
+
+ if $openvpn_allow_unlimited and $openvpn_allow_limited {
+ $unlimited_gateway_address = $openvpn_gateway_address
+ $limited_gateway_address = $openvpn_second_gateway_address
+ } elsif $openvpn_allow_unlimited {
+ $unlimited_gateway_address = $openvpn_gateway_address
+ $limited_gateway_address = undef
+ } elsif $openvpn_allow_limited {
+ $unlimited_gateway_address = undef
+ $limited_gateway_address = $openvpn_gateway_address
+ }
+
+ if $openvpn_allow_unlimited {
+ site_openvpn::server_config { 'tcp_config':
+ port => '1194',
+ proto => 'tcp',
+ local => $unlimited_gateway_address,
+ tls_remote => "\"${openvpn_unlimited_prefix}\"",
+ server => "${openvpn_unlimited_tcp_network_prefix}.0 ${openvpn_unlimited_tcp_netmask}",
+ push => "\"dhcp-option DNS ${openvpn_unlimited_tcp_network_prefix}.1\"",
+ management => '127.0.0.1 1000',
+ config => $openvpn_config
+ }
+ site_openvpn::server_config { 'udp_config':
+ port => '1194',
+ proto => 'udp',
+ local => $unlimited_gateway_address,
+ tls_remote => "\"${openvpn_unlimited_prefix}\"",
+ server => "${openvpn_unlimited_udp_network_prefix}.0 ${openvpn_unlimited_udp_netmask}",
+ push => "\"dhcp-option DNS ${openvpn_unlimited_udp_network_prefix}.1\"",
+ management => '127.0.0.1 1001',
+ config => $openvpn_config
+ }
+ } else {
+ tidy { '/etc/openvpn/tcp_config.conf': }
+ tidy { '/etc/openvpn/udp_config.conf': }
+ }
+
+ if $openvpn_allow_limited {
+ site_openvpn::server_config { 'limited_tcp_config':
+ port => '1194',
+ proto => 'tcp',
+ local => $limited_gateway_address,
+ tls_remote => "\"${openvpn_limited_prefix}\"",
+ server => "${openvpn_limited_tcp_network_prefix}.0 ${openvpn_limited_tcp_netmask}",
+ push => "\"dhcp-option DNS ${openvpn_limited_tcp_network_prefix}.1\"",
+ management => '127.0.0.1 1002',
+ config => $openvpn_config
+ }
+ site_openvpn::server_config { 'limited_udp_config':
+ port => '1194',
+ proto => 'udp',
+ local => $limited_gateway_address,
+ tls_remote => "\"${openvpn_limited_prefix}\"",
+ server => "${openvpn_limited_udp_network_prefix}.0 ${openvpn_limited_udp_netmask}",
+ push => "\"dhcp-option DNS ${openvpn_limited_udp_network_prefix}.1\"",
+ management => '127.0.0.1 1003',
+ config => $openvpn_config
+ }
+ } else {
+ tidy { '/etc/openvpn/limited_tcp_config.conf': }
+ tidy { '/etc/openvpn/limited_udp_config.conf': }
+ }
+
+ file {
+ '/usr/local/bin/add_gateway_ips.sh':
+ content => template('site_openvpn/add_gateway_ips.sh.erb'),
+ mode => '0755';
+ }
+
+ exec { '/usr/local/bin/add_gateway_ips.sh':
+ subscribe => File['/usr/local/bin/add_gateway_ips.sh'],
+ }
+
+ exec { 'restart_openvpn':
+ command => '/etc/init.d/openvpn restart',
+ refreshonly => true,
+ subscribe => [
+ File['/etc/openvpn'],
+ Class['Site_config::X509::Key'],
+ Class['Site_config::X509::Cert'],
+ Class['Site_config::X509::Ca_bundle'] ],
+ require => [
+ Package['openvpn'],
+ File['/etc/openvpn'],
+ Class['Site_config::X509::Key'],
+ Class['Site_config::X509::Cert'],
+ Class['Site_config::X509::Ca_bundle'] ];
+ }
+
+ cron { 'add_gateway_ips.sh':
+ command => '/usr/local/bin/add_gateway_ips.sh',
+ user => 'root',
+ special => 'reboot',
+ }
+
+ # setup the resolver to listen on the vpn IP
+ include site_openvpn::resolver
+
+ include site_shorewall::eip
+
+ package {
+ 'openvpn': ensure => latest
+ }
+
+ service {
+ 'openvpn':
+ ensure => running,
+ hasrestart => true,
+ hasstatus => true,
+ require => [
+ Package['openvpn'],
+ Exec['concat_/etc/default/openvpn'] ];
+ }
+
+ file {
+ '/etc/openvpn':
+ ensure => directory,
+ notify => Exec['restart_openvpn'],
+ require => Package['openvpn'];
+ }
+
+ file {
+ '/etc/openvpn/keys':
+ ensure => directory,
+ require => Package['openvpn'];
+ }
+
+ concat {
+ '/etc/default/openvpn':
+ owner => root,
+ group => root,
+ mode => 644,
+ warn => true,
+ notify => Service['openvpn'];
+ }
+
+ concat::fragment {
+ 'openvpn.default.header':
+ content => template('openvpn/etc-default-openvpn.erb'),
+ target => '/etc/default/openvpn',
+ order => 01;
+ }
+
+ concat::fragment {
+ "openvpn.default.autostart.${name}":
+ content => 'AUTOSTART=all',
+ target => '/etc/default/openvpn',
+ order => 10;
+ }
+
+ leap::logfile { 'openvpn_tcp': }
+ leap::logfile { 'openvpn_udp': }
+
+ # Because we currently do not support ipv6 and instead block it (so no leaks
+ # happen), we get a large number of these messages, so we ignore them (#6540)
+ rsyslog::snippet { '01-ignore_icmpv6_send':
+ content => ':msg, contains, "icmpv6_send: no reply to icmp error" ~'
+ }
+
+ include site_check_mk::agent::openvpn
+
+}
diff --git a/puppet/modules/site_openvpn/manifests/resolver.pp b/puppet/modules/site_openvpn/manifests/resolver.pp
new file mode 100644
index 00000000..cea0153a
--- /dev/null
+++ b/puppet/modules/site_openvpn/manifests/resolver.pp
@@ -0,0 +1,50 @@
+class site_openvpn::resolver {
+
+ if $site_openvpn::openvpn_allow_unlimited {
+ $ensure_unlimited = 'present'
+ file {
+ '/etc/unbound/unbound.conf.d/vpn_unlimited_udp_resolver.conf':
+ content => "server:\n\tinterface: ${site_openvpn::openvpn_unlimited_udp_network_prefix}.1\n\taccess-control: ${site_openvpn::openvpn_unlimited_udp_network_prefix}.0/${site_openvpn::openvpn_unlimited_udp_cidr} allow\n",
+ owner => root,
+ group => root,
+ mode => '0644',
+ require => [ Class['site_config::caching_resolver'], Service['openvpn'] ],
+ notify => Service['unbound'];
+ '/etc/unbound/unbound.conf.d/vpn_unlimited_tcp_resolver.conf':
+ content => "server:\n\tinterface: ${site_openvpn::openvpn_unlimited_tcp_network_prefix}.1\n\taccess-control: ${site_openvpn::openvpn_unlimited_tcp_network_prefix}.0/${site_openvpn::openvpn_unlimited_tcp_cidr} allow\n",
+ owner => root,
+ group => root,
+ mode => '0644',
+ require => [ Class['site_config::caching_resolver'], Service['openvpn'] ],
+ notify => Service['unbound'];
+ }
+ } else {
+ $ensure_unlimited = 'absent'
+ tidy { '/etc/unbound/unbound.conf.d/vpn_unlimited_udp_resolver.conf': }
+ tidy { '/etc/unbound/unbound.conf.d/vpn_unlimited_tcp_resolver.conf': }
+ }
+
+ if $site_openvpn::openvpn_allow_limited {
+ $ensure_limited = 'present'
+ file {
+ '/etc/unbound/unbound.conf.d/vpn_limited_udp_resolver.conf':
+ content => "server:\n\tinterface: ${site_openvpn::openvpn_limited_udp_network_prefix}.1\n\taccess-control: ${site_openvpn::openvpn_limited_udp_network_prefix}.0/${site_openvpn::openvpn_limited_udp_cidr} allow\n",
+ owner => root,
+ group => root,
+ mode => '0644',
+ require => [ Class['site_config::caching_resolver'], Service['openvpn'] ],
+ notify => Service['unbound'];
+ '/etc/unbound/unbound.conf.d/vpn_limited_tcp_resolver.conf':
+ content => "server\n\tinterface: ${site_openvpn::openvpn_limited_tcp_network_prefix}.1\n\taccess-control: ${site_openvpn::openvpn_limited_tcp_network_prefix}.0/${site_openvpn::openvpn_limited_tcp_cidr} allow\n",
+ owner => root,
+ group => root,
+ mode => '0644',
+ require => [ Class['site_config::caching_resolver'], Service['openvpn'] ],
+ notify => Service['unbound'];
+ }
+ } else {
+ $ensure_limited = 'absent'
+ tidy { '/etc/unbound/unbound.conf.d/vpn_limited_udp_resolver.conf': }
+ tidy { '/etc/unbound/unbound.conf.d/vpn_limited_tcp_resolver.conf': }
+ }
+}
diff --git a/puppet/modules/site_openvpn/manifests/server_config.pp b/puppet/modules/site_openvpn/manifests/server_config.pp
new file mode 100644
index 00000000..15e6fb38
--- /dev/null
+++ b/puppet/modules/site_openvpn/manifests/server_config.pp
@@ -0,0 +1,228 @@
+#
+# Cipher discussion
+# ================================
+#
+# We want to specify explicit values for the crypto options to prevent a MiTM from forcing
+# a weaker cipher. These should be set in both the server and the client ('auth' and 'cipher'
+# MUST be the same on both ends or no data will get transmitted).
+#
+# tls-cipher DHE-RSA-AES128-SHA
+#
+# dkg: For the TLS control channel, we want to make sure we choose a
+# key exchange mechanism that has PFS (meaning probably some form of ephemeral
+# Diffie-Hellman key exchange), and that uses a standard, well-tested cipher
+# (I recommend AES, and 128 bits is probably fine, since there are some known
+# weaknesses in the 192- and 256-bit key schedules). That leaves us with the
+# choice of public key algorithms: /usr/sbin/openvpn --show-tls | grep DHE |
+# grep AES128 | grep GCM.
+#
+# elijah:
+# I could not get any of these working:
+# * openvpn --show-tls | grep GCM
+# * openvpn --show-tls | grep DHE | grep AES128 | grep SHA256
+# so, i went with this:
+# * openvpn --show-tls | grep DHE | grep AES128 | grep -v SHA256 | grep -v GCM
+# Also, i couldn't get any of the elliptical curve algorithms to work. Not sure how
+# our cert generation interacts with the tls-cipher algorithms.
+#
+# note: in my tests, DHE-RSA-AES256-SHA is the one it negotiates if no value is set.
+#
+# auth SHA1
+#
+# dkg: For HMAC digest to authenticate packets, we just want SHA256. OpenVPN lists
+# a number of "digest" with names like "RSA-SHA256", but this are legacy and
+# should be avoided.
+#
+# elijah: i am not so sure that the digest algo matters for 'auth' option, because
+# i think an attacker would have to forge the digest in real time, which is still far from
+# a possibility for SHA1. So, i am leaving the default for now (SHA1).
+#
+# cipher AES-128-CBC
+#
+# dkg: For the choice of cipher, we need to select an algorithm and a
+# cipher mode. OpenVPN defaults to Blowfish, which is a fine algorithm - but
+# our control channel is already relying on AES not being broken; if the
+# control channel is cracked, then the key material for the tunnel is exposed,
+# and the choice of algorithm is moot. So it makes more sense to me to rely on
+# the same cipher here: AES128. As for the cipher mode, OFB seems cleaner to
+# me, but CBC is more well-tested, and the OpenVPN man page (at least as of
+# version 2.2.1) says "CBC is recommended and CFB and OFB should be considered
+# advanced modes."
+#
+# note: the default is BF-CBC (blowfish)
+#
+
+define site_openvpn::server_config(
+ $port, $proto, $local, $server, $push,
+ $management, $config, $tls_remote = undef) {
+
+ $openvpn_configname = $name
+ $shortname = regsubst(regsubst($name, '_config', ''), '_', '-')
+ $openvpn_status_filename = "/var/run/openvpn-status-${shortname}"
+
+ concat {
+ "/etc/openvpn/${openvpn_configname}.conf":
+ owner => root,
+ group => root,
+ mode => 644,
+ warn => true,
+ require => File['/etc/openvpn'],
+ before => Service['openvpn'],
+ notify => Exec['restart_openvpn'];
+ }
+
+ if $tls_remote != undef {
+ openvpn::option {
+ "tls-remote ${openvpn_configname}":
+ key => 'tls-remote',
+ value => $tls_remote,
+ server => $openvpn_configname;
+ }
+ }
+
+ # according to openvpn man page: tcp-nodelay is a "generally a good latency optimization".
+ if $proto == 'tcp' {
+ openvpn::option {
+ "tcp-nodelay ${openvpn_configname}":
+ key => 'tcp-nodelay',
+ server => $openvpn_configname;
+ }
+ } elsif $proto == 'udp' {
+ if $config['fragment'] != 1500 {
+ openvpn::option {
+ "fragment ${openvpn_configname}":
+ key => 'fragment',
+ value => $config['fragment'],
+ server => $openvpn_configname;
+ "mssfix ${openvpn_configname}":
+ key => 'mssfix',
+ server => $openvpn_configname;
+ }
+ }
+ }
+
+ openvpn::option {
+ "ca ${openvpn_configname}":
+ key => 'ca',
+ value => "${x509::variables::local_CAs}/${site_config::params::ca_bundle_name}.crt",
+ server => $openvpn_configname;
+ "cert ${openvpn_configname}":
+ key => 'cert',
+ value => "${x509::variables::certs}/${site_config::params::cert_name}.crt",
+ server => $openvpn_configname;
+ "key ${openvpn_configname}":
+ key => 'key',
+ value => "${x509::variables::keys}/${site_config::params::cert_name}.key",
+ server => $openvpn_configname;
+ "dh ${openvpn_configname}":
+ key => 'dh',
+ value => '/etc/openvpn/keys/dh.pem',
+ server => $openvpn_configname;
+ "tls-cipher ${openvpn_configname}":
+ key => 'tls-cipher',
+ value => $config['tls-cipher'],
+ server => $openvpn_configname;
+ "auth ${openvpn_configname}":
+ key => 'auth',
+ value => $config['auth'],
+ server => $openvpn_configname;
+ "cipher ${openvpn_configname}":
+ key => 'cipher',
+ value => $config['cipher'],
+ server => $openvpn_configname;
+ "dev ${openvpn_configname}":
+ key => 'dev',
+ value => 'tun',
+ server => $openvpn_configname;
+ "tun-ipv6 ${openvpn_configname}":
+ key => 'tun-ipv6',
+ server => $openvpn_configname;
+ "duplicate-cn ${openvpn_configname}":
+ key => 'duplicate-cn',
+ server => $openvpn_configname;
+ "keepalive ${openvpn_configname}":
+ key => 'keepalive',
+ value => $config['keepalive'],
+ server => $openvpn_configname;
+ "local ${openvpn_configname}":
+ key => 'local',
+ value => $local,
+ server => $openvpn_configname;
+ "mute ${openvpn_configname}":
+ key => 'mute',
+ value => '5',
+ server => $openvpn_configname;
+ "mute-replay-warnings ${openvpn_configname}":
+ key => 'mute-replay-warnings',
+ server => $openvpn_configname;
+ "management ${openvpn_configname}":
+ key => 'management',
+ value => $management,
+ server => $openvpn_configname;
+ "proto ${openvpn_configname}":
+ key => 'proto',
+ value => $proto,
+ server => $openvpn_configname;
+ "push1 ${openvpn_configname}":
+ key => 'push',
+ value => $push,
+ server => $openvpn_configname;
+ "push2 ${openvpn_configname}":
+ key => 'push',
+ value => '"redirect-gateway def1"',
+ server => $openvpn_configname;
+ "push-ipv6 ${openvpn_configname}":
+ key => 'push',
+ value => '"route-ipv6 2000::/3"',
+ server => $openvpn_configname;
+ "script-security ${openvpn_configname}":
+ key => 'script-security',
+ value => '1',
+ server => $openvpn_configname;
+ "server ${openvpn_configname}":
+ key => 'server',
+ value => $server,
+ server => $openvpn_configname;
+ "server-ipv6 ${openvpn_configname}":
+ key => 'server-ipv6',
+ value => '2001:db8:123::/64',
+ server => $openvpn_configname;
+ "status ${openvpn_configname}":
+ key => 'status',
+ value => "${openvpn_status_filename} 10",
+ server => $openvpn_configname;
+ "status-version ${openvpn_configname}":
+ key => 'status-version',
+ value => '3',
+ server => $openvpn_configname;
+ "topology ${openvpn_configname}":
+ key => 'topology',
+ value => 'subnet',
+ server => $openvpn_configname;
+ "verb ${openvpn_configname}":
+ key => 'verb',
+ value => '3',
+ server => $openvpn_configname;
+ "log-append /var/log/leap/openvpn_${proto}.log":
+ key => 'log-append',
+ value => "/var/log/leap/openvpn_${proto}.log",
+ server => $openvpn_configname;
+ }
+
+ # register openvpn services at systemd on nodes newer than wheezy
+ # see https://leap.se/code/issues/7798
+ case $::operatingsystemrelease {
+ /^7.*/: { }
+ default: {
+ exec { "enable_systemd_${openvpn_configname}":
+ refreshonly => true,
+ command => "/bin/systemctl enable openvpn@${openvpn_configname}",
+ subscribe => File["/etc/openvpn/${openvpn_configname}.conf"],
+ notify => Service["openvpn@${openvpn_configname}"];
+ }
+ service { "openvpn@${openvpn_configname}":
+ ensure => running
+ }
+ }
+ }
+}
diff --git a/puppet/modules/site_openvpn/templates/add_gateway_ips.sh.erb b/puppet/modules/site_openvpn/templates/add_gateway_ips.sh.erb
new file mode 100644
index 00000000..e76b756b
--- /dev/null
+++ b/puppet/modules/site_openvpn/templates/add_gateway_ips.sh.erb
@@ -0,0 +1,11 @@
+#!/bin/sh
+
+ip addr show dev <%= scope.lookupvar('site_config::params::interface') %> | grep -q <%= @openvpn_gateway_address %>/<%= @primary_netmask %> ||
+ ip addr add <%= @openvpn_gateway_address %>/<%= @primary_netmask %> dev <%= scope.lookupvar('site_config::params::interface') %>
+
+<% if @openvpn_second_gateway_address %>
+ip addr show dev <%= scope.lookupvar('site_config::params::interface') %> | grep -q <%= @openvpn_second_gateway_address %>/<%= @primary_netmask %> ||
+ ip addr add <%= @openvpn_second_gateway_address %>/<%= @primary_netmask %> dev <%= scope.lookupvar('site_config::params::interface') %>
+<% end %>
+
+/bin/echo 1 > /proc/sys/net/ipv4/ip_forward
diff --git a/puppet/modules/site_postfix/files/checks/received_anon b/puppet/modules/site_postfix/files/checks/received_anon
new file mode 100644
index 00000000..9de25e63
--- /dev/null
+++ b/puppet/modules/site_postfix/files/checks/received_anon
@@ -0,0 +1,2 @@
+/^Received: from (.* \([-._[:alnum:]]+ \[[.[:digit:]]{7,15}\]\))([[:space:]]+).*(\(using [.[:alnum:]]+ with cipher [-A-Z0-9]+ \([0-9]+\/[0-9]+ bits\)\))[[:space:]]+\(Client CN "([-._@[:alnum:]]+)", Issuer "[[:print:]]+" \(verified OK\)\)[[:space:]]+by ([.[:alnum:]]+) \(([^)]+)\) with (E?SMTPS?A?) id ([A-F[:digit:]]+).*/
+ REPLACE Received: from [127.0.0.1] (localhost [127.0.0.1])${2}${3}${2}(Authenticated sender: $4)${2}with $7 id $8
diff --git a/puppet/modules/site_postfix/manifests/debug.pp b/puppet/modules/site_postfix/manifests/debug.pp
new file mode 100644
index 00000000..f370d166
--- /dev/null
+++ b/puppet/modules/site_postfix/manifests/debug.pp
@@ -0,0 +1,9 @@
+class site_postfix::debug {
+
+ postfix::config {
+ 'debug_peer_list': value => '127.0.0.1';
+ 'debug_peer_level': value => '1';
+ 'smtpd_tls_loglevel': value => '1';
+ }
+
+}
diff --git a/puppet/modules/site_postfix/manifests/mx.pp b/puppet/modules/site_postfix/manifests/mx.pp
new file mode 100644
index 00000000..c269946b
--- /dev/null
+++ b/puppet/modules/site_postfix/manifests/mx.pp
@@ -0,0 +1,152 @@
+#
+# configure mx node
+#
+class site_postfix::mx {
+
+ $domain_hash = hiera('domain')
+ $domain = $domain_hash['full_suffix']
+ $host_domain = $domain_hash['full']
+ $cert_name = hiera('name')
+ $mynetworks = join(hiera('mynetworks', ''), ' ')
+ $rbls = suffix(prefix(hiera('rbls', []), 'reject_rbl_client '), ',')
+
+ $root_mail_recipient = hiera('contacts')
+ $postfix_smtp_listen = 'all'
+ $postfix_use_postscreen = 'yes'
+
+ include site_config::x509::cert
+ include site_config::x509::key
+ include site_config::x509::client_ca::ca
+ include site_config::x509::client_ca::key
+
+ postfix::config {
+ 'mynetworks':
+ value => "127.0.0.0/8 [::1]/128 [fe80::]/64 ${mynetworks}";
+ # Note: mydestination should not include @domain, because this is
+ # used in virtual alias maps.
+ 'mydestination':
+ value => "\$myorigin, localhost, localhost.\$mydomain";
+ 'myhostname':
+ value => $host_domain;
+ 'mailbox_size_limit':
+ value => '0';
+ 'home_mailbox':
+ value => '';
+ 'virtual_mailbox_domains':
+ value => 'deliver.local';
+ 'virtual_mailbox_base':
+ value => '/var/mail/leap-mx';
+ 'virtual_mailbox_maps':
+ value => 'static:Maildir/';
+ # Note: virtual-aliases map will take precedence over leap-mx
+ # lookup (tcp:localhost)
+ 'virtual_alias_maps':
+ value => 'hash:/etc/postfix/virtual-aliases tcp:localhost:4242';
+ 'luser_relay':
+ value => '';
+ # uid and gid are set to an arbitrary hard-coded value here, this
+ # must match the 'leap-mx' user/group
+ 'virtual_uid_maps':
+ value => 'static:42424';
+ 'virtual_gid_maps':
+ value => 'static:42424';
+ # the two following configs are needed for matching user's client cert
+ # fingerprints to enable relaying (#3634). Satellites do not have
+ # these configured.
+ 'smtpd_tls_fingerprint_digest':
+ value => 'sha1';
+ 'relay_clientcerts':
+ value => 'tcp:localhost:2424';
+ # Note: we are setting this here, instead of in site_postfix::mx::smtp_tls
+ # because the satellites need to have a different value
+ 'smtp_tls_security_level':
+ value => 'may';
+ # reject inbound mail to system users
+ # see https://leap.se/code/issues/6829
+ # this blocks *only* mails to system users, that don't appear in the
+ # alias map
+ 'local_recipient_maps':
+ value => '$alias_maps';
+ # setup clamav and opendkim on smtpd
+ 'smtpd_milters':
+ value => 'unix:/run/clamav/milter.ctl,inet:localhost:8891';
+ # setup opendkim for smtp (non-smtpd) outgoing mail
+ 'non_smtpd_milters':
+ value => 'inet:localhost:8891';
+ 'milter_default_action':
+ value => 'accept';
+ # Make sure that the right values are set, these could be set to different
+ # things on install, depending on preseed or debconf options
+ # selected (see #7478)
+ 'relay_transport':
+ value => 'relay';
+ 'default_transport':
+ value => 'smtp';
+ 'mailbox_command':
+ value => '';
+ 'header_checks':
+ value => '';
+ 'postscreen_access_list':
+ value => 'permit_mynetworks';
+ 'postscreen_greet_action':
+ value => 'enforce';
+ }
+
+ # Make sure that the cleanup serivce is not chrooted, otherwise it cannot
+ # access the opendkim milter socket (#8020)
+ exec { 'unset_cleanup_chroot':
+ command => '/usr/sbin/postconf -F "cleanup/unix/chroot=n"',
+ onlyif => '/usr/sbin/postconf -h -F "cleanup/unix/chroot" | egrep -q ^n',
+ notify => Service['postfix'],
+ require => File['/etc/postfix/master.cf']
+ }
+
+ include ::site_postfix::mx::smtpd_checks
+ include ::site_postfix::mx::checks
+ include ::site_postfix::mx::smtp_tls
+ include ::site_postfix::mx::smtpd_tls
+ include ::site_postfix::mx::static_aliases
+ include ::site_postfix::mx::rewrite_openpgp_header
+ include ::site_postfix::mx::received_anon
+ include ::clamav
+ include ::opendkim
+ include ::postfwd
+
+ # greater verbosity for debugging, take out for production
+ #include site_postfix::debug
+
+ case $::operatingsystemrelease {
+ /^7.*/: {
+ $smtpd_relay_restrictions=''
+ }
+ default: {
+ $smtpd_relay_restrictions=" -o smtpd_relay_restrictions=\$smtps_relay_restrictions\n"
+ }
+ }
+
+ $mastercf_tail = "
+smtps inet n - - - - smtpd
+ -o smtpd_tls_wrappermode=yes
+ -o smtpd_tls_security_level=encrypt
+ -o tls_preempt_cipherlist=yes
+${smtpd_relay_restrictions} -o smtpd_recipient_restrictions=\$smtps_recipient_restrictions
+ -o smtpd_helo_restrictions=\$smtps_helo_restrictions
+ -o smtpd_client_restrictions=
+ -o cleanup_service_name=clean_smtps
+clean_smtps unix n - n - 0 cleanup
+ -o header_checks=pcre:/etc/postfix/checks/rewrite_openpgp_headers,pcre:/etc/postfix/checks/received_anon"
+
+ class { 'postfix':
+ preseed => true,
+ root_mail_recipient => $root_mail_recipient,
+ smtp_listen => 'all',
+ mastercf_tail => $mastercf_tail,
+ use_postscreen => 'yes',
+ require => [
+ Class['Site_config::X509::Key'],
+ Class['Site_config::X509::Cert'],
+ Class['Site_config::X509::Client_ca::Key'],
+ Class['Site_config::X509::Client_ca::Ca'],
+ User['leap-mx'] ]
+ }
+}
diff --git a/puppet/modules/site_postfix/manifests/mx/checks.pp b/puppet/modules/site_postfix/manifests/mx/checks.pp
new file mode 100644
index 00000000..f406ad34
--- /dev/null
+++ b/puppet/modules/site_postfix/manifests/mx/checks.pp
@@ -0,0 +1,23 @@
+class site_postfix::mx::checks {
+
+ file {
+ '/etc/postfix/checks':
+ ensure => directory,
+ mode => '0755',
+ owner => root,
+ group => postfix,
+ require => Package['postfix'];
+
+ '/etc/postfix/checks/helo_checks':
+ content => template('site_postfix/checks/helo_access.erb'),
+ mode => '0644',
+ owner => root,
+ group => root;
+ }
+
+ exec {
+ '/usr/sbin/postmap /etc/postfix/checks/helo_checks':
+ refreshonly => true,
+ subscribe => File['/etc/postfix/checks/helo_checks'];
+ }
+}
diff --git a/puppet/modules/site_postfix/manifests/mx/received_anon.pp b/puppet/modules/site_postfix/manifests/mx/received_anon.pp
new file mode 100644
index 00000000..51ba3faa
--- /dev/null
+++ b/puppet/modules/site_postfix/manifests/mx/received_anon.pp
@@ -0,0 +1,13 @@
+# Anonymize the user's home IP from the email headers (Feature #3866)
+class site_postfix::mx::received_anon {
+
+ package { 'postfix-pcre': ensure => installed, require => Package['postfix'] }
+
+ file { '/etc/postfix/checks/received_anon':
+ source => 'puppet:///modules/site_postfix/checks/received_anon',
+ mode => '0644',
+ owner => root,
+ group => root,
+ notify => Service['postfix']
+ }
+}
diff --git a/puppet/modules/site_postfix/manifests/mx/rewrite_openpgp_header.pp b/puppet/modules/site_postfix/manifests/mx/rewrite_openpgp_header.pp
new file mode 100644
index 00000000..71f945b8
--- /dev/null
+++ b/puppet/modules/site_postfix/manifests/mx/rewrite_openpgp_header.pp
@@ -0,0 +1,11 @@
+class site_postfix::mx::rewrite_openpgp_header {
+ $mx = hiera('mx')
+ $correct_domain = $mx['key_lookup_domain']
+
+ file { '/etc/postfix/checks/rewrite_openpgp_headers':
+ content => template('site_postfix/checks/rewrite_openpgp_headers.erb'),
+ mode => '0644',
+ owner => root,
+ group => root;
+ }
+}
diff --git a/puppet/modules/site_postfix/manifests/mx/smtp_auth.pp b/puppet/modules/site_postfix/manifests/mx/smtp_auth.pp
new file mode 100644
index 00000000..afa70527
--- /dev/null
+++ b/puppet/modules/site_postfix/manifests/mx/smtp_auth.pp
@@ -0,0 +1,6 @@
+class site_postfix::mx::smtp_auth {
+
+ postfix::config {
+ 'smtpd_tls_ask_ccert': value => 'yes';
+ }
+}
diff --git a/puppet/modules/site_postfix/manifests/mx/smtp_tls.pp b/puppet/modules/site_postfix/manifests/mx/smtp_tls.pp
new file mode 100644
index 00000000..c93c3ba2
--- /dev/null
+++ b/puppet/modules/site_postfix/manifests/mx/smtp_tls.pp
@@ -0,0 +1,43 @@
+# configure smtp tls
+class site_postfix::mx::smtp_tls {
+
+ include site_config::x509::ca
+ include x509::variables
+ $cert_name = hiera('name')
+ $ca_path = "${x509::variables::local_CAs}/${site_config::params::ca_name}.crt"
+ $cert_path = "${x509::variables::certs}/${site_config::params::cert_name}.crt"
+ $key_path = "${x509::variables::keys}/${site_config::params::cert_name}.key"
+
+ include site_config::x509::cert
+ include site_config::x509::key
+
+ # smtp TLS
+ postfix::config {
+ 'smtp_use_tls': value => 'yes';
+ 'smtp_tls_CApath': value => '/etc/ssl/certs/';
+ 'smtp_tls_CAfile': value => $ca_path;
+ 'smtp_tls_cert_file': value => $cert_path;
+ 'smtp_tls_key_file': value => $key_path;
+ 'smtp_tls_loglevel': value => '1';
+ 'smtp_tls_exclude_ciphers':
+ value => 'aNULL, MD5, DES';
+ # upstream default is md5 (since 2.5 and older used it), we force sha1
+ 'smtp_tls_fingerprint_digest':
+ value => 'sha1';
+ 'smtp_tls_session_cache_database':
+ value => "btree:\${data_directory}/smtp_cache";
+ # see issue #4011
+ 'smtp_tls_protocols':
+ value => '!SSLv2, !SSLv3';
+ 'smtp_tls_mandatory_protocols':
+ value => '!SSLv2, !SSLv3';
+ 'tls_ssl_options':
+ value => 'NO_COMPRESSION';
+ # We can switch between the different postfix internal list of ciphers by
+ # using smtpd_tls_ciphers. For server-to-server connections we leave this
+ # at its default because of opportunistic encryption combined with many mail
+ # servers only support outdated protocols and ciphers and if we are too
+ # strict with required ciphers, then connections *will* fall-back to
+ # plain-text. Bad ciphers are still better than plain text transmission.
+ }
+}
diff --git a/puppet/modules/site_postfix/manifests/mx/smtpd_checks.pp b/puppet/modules/site_postfix/manifests/mx/smtpd_checks.pp
new file mode 100644
index 00000000..291d7ee4
--- /dev/null
+++ b/puppet/modules/site_postfix/manifests/mx/smtpd_checks.pp
@@ -0,0 +1,36 @@
+# smtpd checks for incoming mail on smtp port 25 and
+# mail sent via the bitmask client using smtps port 465
+class site_postfix::mx::smtpd_checks {
+
+ postfix::config {
+ 'smtpd_helo_required':
+ value => 'yes';
+ 'checks_dir':
+ value => '$config_directory/checks';
+ 'smtpd_client_restrictions':
+ value => "permit_mynetworks,${site_postfix::mx::rbls},permit";
+ 'smtpd_data_restrictions':
+ value => 'permit_mynetworks, reject_unauth_pipelining, permit';
+ 'smtpd_delay_reject':
+ value => 'yes';
+ 'smtpd_helo_restrictions':
+ value => 'permit_mynetworks, reject_invalid_helo_hostname, reject_non_fqdn_helo_hostname, check_helo_access hash:$checks_dir/helo_checks, permit';
+ 'smtpd_recipient_restrictions':
+ value => 'reject_unknown_recipient_domain, permit_mynetworks, check_recipient_access tcp:localhost:2244, reject_unauth_destination, permit';
+
+ # permit_tls_clientcerts will lookup client cert fingerprints from the tcp
+ # lookup on port 2424 (based on what is configured in relay_clientcerts
+ # paramter, see site_postfix::mx postfix::config resource) to determine
+ # if a client is allowed to relay mail through us. This enables us to
+ # disable a user by removing their valid client cert (#3634)
+ 'smtps_recipient_restrictions':
+ value => 'permit_tls_clientcerts, check_recipient_access tcp:localhost:2244, reject_unauth_destination, permit';
+ 'smtps_relay_restrictions':
+ value => 'permit_mynetworks, permit_tls_clientcerts, defer_unauth_destination';
+ 'smtps_helo_restrictions':
+ value => 'permit_mynetworks, check_helo_access hash:$checks_dir/helo_checks, permit';
+ 'smtpd_sender_restrictions':
+ value => 'permit_mynetworks, reject_non_fqdn_sender, reject_unknown_sender_domain, permit';
+ }
+
+}
diff --git a/puppet/modules/site_postfix/manifests/mx/smtpd_tls.pp b/puppet/modules/site_postfix/manifests/mx/smtpd_tls.pp
new file mode 100644
index 00000000..66297f55
--- /dev/null
+++ b/puppet/modules/site_postfix/manifests/mx/smtpd_tls.pp
@@ -0,0 +1,69 @@
+# configure smtpd tls
+class site_postfix::mx::smtpd_tls {
+
+ include x509::variables
+ $ca_path = "${x509::variables::local_CAs}/${site_config::params::client_ca_name}.crt"
+ $cert_path = "${x509::variables::certs}/${site_config::params::cert_name}.crt"
+ $key_path = "${x509::variables::keys}/${site_config::params::cert_name}.key"
+
+
+ postfix::config {
+ 'smtpd_use_tls': value => 'yes';
+ 'smtpd_tls_CAfile': value => $ca_path;
+ 'smtpd_tls_cert_file': value => $cert_path;
+ 'smtpd_tls_key_file': value => $key_path;
+ 'smtpd_tls_ask_ccert': value => 'yes';
+ 'smtpd_tls_received_header':
+ value => 'yes';
+ 'smtpd_tls_security_level':
+ value => 'may';
+ 'smtpd_tls_eecdh_grade':
+ value => 'ultra';
+ 'smtpd_tls_session_cache_database':
+ value => "btree:\${data_directory}/smtpd_scache";
+ # see issue #4011
+ 'smtpd_tls_mandatory_protocols':
+ value => '!SSLv2, !SSLv3';
+ 'smtpd_tls_protocols':
+ value => '!SSLv2, !SSLv3';
+ # For connections to MUAs, TLS is mandatory and the ciphersuite is modified.
+ # MX and SMTP client configuration
+ 'smtpd_tls_mandatory_ciphers':
+ value => 'high';
+ 'tls_high_cipherlist':
+ value => 'ECDHE-RSA-AES128-GCM-SHA256:ECDHE-ECDSA-AES128-GCM-SHA256:ECDHE-RSA-AES256-GCM-SHA384:ECDHE-ECDSA-AES256-GCM-SHA384:DHE-RSA-AES128-GCM-SHA256:DHE-DSS-AES128-GCM-SHA256:kEDH+AESGCM:ECDHE-RSA-AES128-SHA256:ECDHE-ECDSA-AES128-SHA256:ECDHE-RSA-AES128-SHA:ECDHE-ECDSA-AES128-SHA:ECDHE-RSA-AES256-SHA384:ECDHE-ECDSA-AES256-SHA384:ECDHE-RSA-AES256-SHA:ECDHE-ECDSA-AES256-SHA:DHE-RSA-AES128-SHA256:DHE-RSA-AES128-SHA:DHE-DSS-AES128-SHA256:DHE-RSA-AES256-SHA256:DHE-DSS-AES256-SHA:DHE-RSA-AES256-SHA:AES128-GCM-SHA256:AES256-GCM-SHA384:AES128-SHA256:AES256-SHA256:AES128-SHA:AES256-SHA:AES:CAMELLIA:DES-CBC3-SHA:!aNULL:!eNULL:!EXPORT:!DES:!3DES:!RC4:!MD5:!PSK!aECDH:!EDH-DSS-DES-CBC3-SHA:!EDH-RSA-DES-CBC3-SHA:!KRB5-DES-CBC3-SHA';
+ }
+
+ # Setup DH parameters
+ # Instead of using the dh parameters that are created by leap cli, it is more
+ # secure to generate new parameter files that will only be used for postfix,
+ # for each machine
+
+ include site_config::packages::gnutls
+
+ # Note, the file name is called dh_1024.pem, but we are generating 2048bit dh
+ # parameters Neither Postfix nor OpenSSL actually care about the size of the
+ # prime in "smtpd_tls_dh1024_param_file". You can make it 2048 bits
+
+ exec { 'certtool-postfix-gendh':
+ command => 'certtool --generate-dh-params --bits 2048 --outfile /etc/postfix/smtpd_tls_dh_param.pem',
+ user => root,
+ group => root,
+ creates => '/etc/postfix/smtpd_tls_dh_param.pem',
+ require => [ Package['gnutls-bin'], Package['postfix'] ]
+ }
+
+ # Make sure the dh params file has correct ownership and mode
+ file {
+ '/etc/postfix/smtpd_tls_dh_param.pem':
+ owner => root,
+ group => root,
+ mode => '0600',
+ require => Exec['certtool-postfix-gendh'];
+ }
+
+ postfix::config { 'smtpd_tls_dh1024_param_file':
+ value => '/etc/postfix/smtpd_tls_dh_param.pem',
+ require => File['/etc/postfix/smtpd_tls_dh_param.pem']
+ }
+}
diff --git a/puppet/modules/site_postfix/manifests/mx/static_aliases.pp b/puppet/modules/site_postfix/manifests/mx/static_aliases.pp
new file mode 100644
index 00000000..9cd7ca02
--- /dev/null
+++ b/puppet/modules/site_postfix/manifests/mx/static_aliases.pp
@@ -0,0 +1,88 @@
+#
+# Defines static, hard coded aliases that are not in the database.
+# These aliases take precedence over the database aliases.
+#
+# There are three classes of reserved names:
+#
+# (1) forbidden_usernames:
+# Some usernames are forbidden and cannot be registered.
+# this is defined in node property webapp.forbidden_usernames
+# This is enforced by the webapp.
+#
+# (2) public aliases:
+# Some aliases for root, and are publicly exposed so that anyone
+# can deliver mail to them. For example, postmaster.
+# These are implemented in the virtual alias map, which takes
+# precedence over the local alias map.
+#
+# (3) local aliases:
+# Some aliases are only available locally: mail can be delivered
+# to the alias if the mail originates from the local host, or is
+# hostname qualified, but otherwise it will be rejected.
+# These are implemented in the local alias map.
+#
+# The alias for local 'root' is defined elsewhere. In this file, we
+# define the virtual 'root@domain' (which can be overwritten by
+# defining an entry for root in node property mx.aliases).
+#
+
+class site_postfix::mx::static_aliases {
+
+ $mx = hiera('mx')
+ $root_recipients = hiera('contacts')
+
+ #
+ # LOCAL ALIASES
+ #
+
+ # NOTE: if you remove one of these, they will still appear in the
+ # /etc/aliases file
+ $local_aliases = [
+ 'admin', 'administrator', 'bin', 'cron', 'games', 'ftp', 'lp', 'maildrop',
+ 'mysql', 'news', 'nobody', 'noc', 'postgresql', 'ssladmin', 'sys',
+ 'usenet', 'uucp', 'www', 'www-data', 'leap-mx'
+ ]
+
+ postfix::mailalias {
+ $local_aliases:
+ ensure => present,
+ recipient => 'root'
+ }
+
+ #
+ # PUBLIC ALIASES
+ #
+
+ $public_aliases = $mx['aliases']
+
+ $default_public_aliases = {
+ 'root' => $root_recipients,
+ 'abuse' => 'postmaster',
+ 'arin-admin' => 'root',
+ 'certmaster' => 'hostmaster',
+ 'domainadmin' => 'hostmaster',
+ 'hostmaster' => 'root',
+ 'mailer-daemon' => 'postmaster',
+ 'postmaster' => 'root',
+ 'security' => 'root',
+ 'webmaster' => 'hostmaster',
+ }
+
+ $aliases = merge($default_public_aliases, $public_aliases)
+
+ exec { 'postmap_virtual_aliases':
+ command => '/usr/sbin/postmap /etc/postfix/virtual-aliases',
+ refreshonly => true,
+ user => root,
+ group => root,
+ require => Package['postfix'],
+ subscribe => File['/etc/postfix/virtual-aliases']
+ }
+ file { '/etc/postfix/virtual-aliases':
+ content => template('site_postfix/virtual-aliases.erb'),
+ owner => root,
+ group => root,
+ mode => '0600',
+ require => Package['postfix']
+ }
+}
diff --git a/puppet/modules/site_postfix/manifests/satellite.pp b/puppet/modules/site_postfix/manifests/satellite.pp
new file mode 100644
index 00000000..5725e6b8
--- /dev/null
+++ b/puppet/modules/site_postfix/manifests/satellite.pp
@@ -0,0 +1,47 @@
+class site_postfix::satellite {
+
+ $root_mail_recipient = hiera ('contacts')
+ $mail = hiera ('mail')
+ $relayhost = $mail['smarthost']
+ $cert_name = hiera('name')
+
+ class { '::postfix::satellite':
+ relayhost => $relayhost,
+ root_mail_recipient => $root_mail_recipient
+ }
+
+ # There are special conditions for satellite hosts that will make them not be
+ # able to contact their relayhost:
+ #
+ # 1. they are on openstack/amazon/PC and are on the same cluster as the relay
+ # host, the MX lookup for the relay host will use the public IP, which cannot
+ # be contacted
+ #
+ # 2. When a domain is used that is not in DNS, because it is internal,
+ # a testing domain, etc. eg. a .local domain cannot be looked up in DNS
+ #
+ # to resolve this, so the satellite can contact the relayhost, we need to set
+ # the http://www.postfix.org/postconf.5.html#smtp_host_lookup to be 'native'
+ # which will cause the lookup to use the native naming service
+ # (nsswitch.conf), which typically defaults to 'files, dns' allowing the
+ # /etc/hosts to be consulted first, then DNS if the entry doesn't exist.
+ #
+ # NOTE: this will make it not possible to enable DANE support through DNSSEC
+ # with http://www.postfix.org/postconf.5.html#smtp_dns_support_level - but
+ # this parameter is not available until 2.11. If this ends up being important
+ # we could also make this an optional parameter for providers without
+ # dns / local domains
+
+ postfix::config {
+ 'smtp_host_lookup':
+ value => 'native';
+
+ # Note: we are setting this here, instead of in site_postfix::mx::smtp_tls
+ # because the mx server has to have a different value
+ 'smtp_tls_security_level':
+ value => 'encrypt';
+ }
+
+ include site_postfix::mx::smtp_tls
+
+}
diff --git a/puppet/modules/site_postfix/templates/checks/helo_access.erb b/puppet/modules/site_postfix/templates/checks/helo_access.erb
new file mode 100644
index 00000000..bac2c45a
--- /dev/null
+++ b/puppet/modules/site_postfix/templates/checks/helo_access.erb
@@ -0,0 +1,21 @@
+# THIS FILE IS MANAGED BY PUPPET
+# To make changes to this file, please edit your platform directory under
+# puppet/modules/site_postfix/templates/checks/helo_access.erb and then deploy
+
+# The format of this file is the HELO/EHLO domain followed by an action.
+# The action could be OK to allow it, REJECT to reject it, or a custom
+# status code and message. Any lines that are prefixed by an octothorpe (#)
+# will be considered comments.
+
+# Some examples:
+#
+# Reject anyone that HELO's with foobar:
+# foobar REJECT
+#
+# Allow the switches to skip this check:
+# switch1 OK
+# switch2 OK
+
+# Reject anybody that HELO's as being in our own domain(s)
+# anyone who identifies themselves as us is a virus/spammer
+<%= @domain %> 554 You are not in domain <%= @domain %>
diff --git a/puppet/modules/site_postfix/templates/checks/rewrite_openpgp_headers.erb b/puppet/modules/site_postfix/templates/checks/rewrite_openpgp_headers.erb
new file mode 100644
index 00000000..7af14f7d
--- /dev/null
+++ b/puppet/modules/site_postfix/templates/checks/rewrite_openpgp_headers.erb
@@ -0,0 +1,13 @@
+# THIS FILE IS MANAGED BY PUPPET
+#
+# This will replace the OpenPGP header that the client adds, because it is
+# sometimes incorrect (due to the client not always knowing what the proper URL
+# is for the webapp).
+# e.g. This will rewrite this header:
+# OpenPGP: id=4C0E01CD50E2F653; url="https://leap.se/key/elijah"; preference="signencrypt
+# with this replacement:
+# OpenPGP: id=4C0E01CD50E2F653; url="https://user.leap.se/key/elijah"; preference="signencrypt
+#
+# Note: whitespace in the pattern is represented by [[:space:]] to avoid these warnings from postmap:
+# "record is in "key: value" format; is this an alias file?" and "duplicate entry"
+/^(OpenPGP:[[:space:]]id=[[:alnum:]]+;[[:space:]]url="https:\/\/)<%= @domain %>(\/key\/[[:alpha:]]+";.*)/i REPLACE ${1}<%= @correct_domain %>${2}
diff --git a/puppet/modules/site_postfix/templates/virtual-aliases.erb b/puppet/modules/site_postfix/templates/virtual-aliases.erb
new file mode 100644
index 00000000..8373de97
--- /dev/null
+++ b/puppet/modules/site_postfix/templates/virtual-aliases.erb
@@ -0,0 +1,21 @@
+#
+# This file is managed by puppet.
+#
+# These virtual aliases take precedence over all other aliases.
+#
+
+#
+# enable these virtual domains:
+#
+<%= @domain %> enabled
+<%- @aliases.keys.map {|addr| addr.split('@')[1] }.compact.sort.uniq.each do |virt_domain| -%>
+<%= virt_domain %> enabled
+<%- end %>
+
+#
+# virtual aliases:
+#
+<%- @aliases.keys.sort.each do |from| -%>
+<%- full_address = from =~ /@/ ? from : from + "@" + @domain -%>
+<%= full_address %> <%= [@aliases[from]].flatten.map{|a| a =~ /@/ ? a : a + "@" + @domain}.join(', ') %>
+<%- end -%>
diff --git a/puppet/modules/site_rsyslog/templates/client.conf.erb b/puppet/modules/site_rsyslog/templates/client.conf.erb
new file mode 100644
index 00000000..7f94759d
--- /dev/null
+++ b/puppet/modules/site_rsyslog/templates/client.conf.erb
@@ -0,0 +1,134 @@
+
+# An "In-Memory Queue" is created for remote logging.
+$WorkDirectory <%= scope.lookupvar('rsyslog::spool_dir') -%> # where to place spool files
+$ActionQueueFileName queue # unique name prefix for spool files
+$ActionQueueMaxDiskSpace <%= scope.lookupvar('rsyslog::client::spool_size') -%> # spool space limit (use as much as possible)
+$ActionQueueSaveOnShutdown on # save messages to disk on shutdown
+$ActionQueueType LinkedList # run asynchronously
+$ActionResumeRetryCount -1 # infinety retries if host is down
+<% if scope.lookupvar('rsyslog::client::log_templates') and ! scope.lookupvar('rsyslog::client::log_templates').empty?-%>
+
+# Define custom logging templates
+<% scope.lookupvar('rsyslog::client::log_templates').flatten.compact.each do |log_template| -%>
+$template <%= log_template['name'] %>,"<%= log_template['template'] %>"
+<% end -%>
+<% end -%>
+<% if scope.lookupvar('rsyslog::client::actionfiletemplate') -%>
+
+# Using specified format for default logging format:
+$ActionFileDefaultTemplate <%= scope.lookupvar('rsyslog::client::actionfiletemplate') %>
+<% else -%>
+
+#Using default format for default logging format:
+$ActionFileDefaultTemplate RSYSLOG_TraditionalFileFormat
+<% end -%>
+<% if scope.lookupvar('rsyslog::client::ssl') -%>
+
+# Setup SSL connection.
+# CA/Cert
+$DefaultNetStreamDriverCAFile <%= scope.lookupvar('rsyslog::client::ssl_ca') %>
+
+# Connection settings.
+$DefaultNetstreamDriver gtls
+$ActionSendStreamDriverMode 1
+$ActionSendStreamDriverAuthMode anon
+<% end -%>
+<% if scope.lookupvar('rsyslog::client::remote_servers') -%>
+
+<% scope.lookupvar('rsyslog::client::remote_servers').flatten.compact.each do |server| -%>
+<% if server['pattern'] and server['pattern'] != ''-%>
+<% pattern = server['pattern'] -%>
+<% else -%>
+<% pattern = '*.*' -%>
+<% end -%>
+<% if server['protocol'] == 'TCP' or server['protocol'] == 'tcp'-%>
+<% protocol = '@@' -%>
+<% protocol_type = 'TCP' -%>
+<% else -%>
+<% protocol = '@' -%>
+<% protocol_type = 'UDP' -%>
+<% end -%>
+<% if server['host'] and server['host'] != ''-%>
+<% host = server['host'] -%>
+<% else -%>
+<% host = 'localhost' -%>
+<% end -%>
+<% if server['port'] and server['port'] != ''-%>
+<% port = server['port'] -%>
+<% else -%>
+<% port = '514' -%>
+<% end -%>
+<% if server['format'] -%>
+<% format = ";#{server['format']}" -%>
+<% format_type = server['format'] -%>
+<% else -%>
+<% format = '' -%>
+<% format_type = 'the default' -%>
+<% end -%>
+# Sending logs that match <%= pattern %> to <%= host %> via <%= protocol_type %> on <%= port %> using <%=format_type %> format.
+<%= pattern %> <%= protocol %><%= host %>:<%= port %><%= format %>
+<% end -%>
+<% elsif scope.lookupvar('rsyslog::client::log_remote') -%>
+
+# Log to remote syslog server using <%= scope.lookupvar('rsyslog::client::remote_type') %>
+<% if scope.lookupvar('rsyslog::client::remote_type') == 'tcp' -%>
+*.* @@<%= scope.lookupvar('rsyslog::client::server') -%>:<%= scope.lookupvar('rsyslog::client::port') -%>;<%= scope.lookupvar('remote_forward_format') -%>
+<% else -%>
+*.* @<%= scope.lookupvar('rsyslog::client::server') -%>:<%= scope.lookupvar('rsyslog::client::port') -%>;<%= scope.lookupvar('remote_forward_format') -%>
+<% end -%>
+<% end -%>
+<% if scope.lookupvar('rsyslog::client::log_auth_local') or scope.lookupvar('rsyslog::client::log_local') -%>
+
+# Logging locally.
+
+<% if scope.lookupvar('rsyslog::log_style') == 'debian' -%>
+# Log auth messages locally
+.*;auth,authpriv.none;mail.none -/var/log/syslog
+<% elsif scope.lookupvar('rsyslog::log_style') == 'redhat' -%>
+# Log auth messages locally
+auth,authpriv.* /var/log/secure
+<% end -%>
+<% end -%>
+<% if scope.lookupvar('rsyslog::client::log_local') -%>
+<% if scope.lookupvar('rsyslog::log_style') == 'debian' -%>
+# First some standard log files. Log by facility.
+#
+*.*;auth,authpriv.none -/var/log/syslog
+cron.* /var/log/cron.log
+daemon.* -/var/log/daemon.log
+kern.* -/var/log/kern.log
+mail.* -/var/log/mail.log
+user.* -/var/log/user.log
+
+#
+# Some "catch-all" log files.
+#
+*.=debug;\
+ auth,authpriv.none;\
+ news.none;mail.none -/var/log/debug
+*.=info;*.=notice;*.=warn;\
+ auth,authpriv.none;\
+ cron,daemon.none;\
+ mail,news.none -/var/log/messages
+
+# Log anything (except mail) of level info or higher.
+# Don't log private authentication messages!
+*.info;mail.none;authpriv.none;cron.none /var/log/messages
+
+# Log cron stuff
+cron.* /var/log/cron
+
+# Everybody gets emergency messages
+<% if @rsyslog_version and @rsyslog_version.split('.')[0].to_i >= 8 -%>
+*.emerg :omusrmsg:*
+<% else -%>
+*.emerg *
+<% end -%>
+
+# Save boot messages also to boot.log
+local7.* -/var/log/boot.log
+<% end -%>
+<% end -%>
+
+
+
diff --git a/puppet/modules/site_shorewall/files/Debian/shorewall.service b/puppet/modules/site_shorewall/files/Debian/shorewall.service
new file mode 100644
index 00000000..ec250ef1
--- /dev/null
+++ b/puppet/modules/site_shorewall/files/Debian/shorewall.service
@@ -0,0 +1,23 @@
+#
+# The Shoreline Firewall (Shorewall) Packet Filtering Firewall
+#
+# Copyright 2011 Jonathan Underwood <jonathan.underwood@gmail.com>
+# Copyright 2015 Tom Eastep <teastep@shorewall.net>
+#
+[Unit]
+Description=Shorewall IPv4 firewall
+Wants=network-online.target
+After=network-online.target
+Conflicts=iptables.service firewalld.service
+
+[Service]
+Type=oneshot
+RemainAfterExit=yes
+EnvironmentFile=-/etc/default/shorewall
+StandardOutput=syslog
+ExecStart=/sbin/shorewall $OPTIONS start $STARTOPTIONS
+ExecStop=/sbin/shorewall $OPTIONS stop
+ExecReload=/sbin/shorewall $OPTIONS reload $RELOADOPTIONS
+
+[Install]
+WantedBy=basic.target
diff --git a/puppet/modules/site_shorewall/manifests/defaults.pp b/puppet/modules/site_shorewall/manifests/defaults.pp
new file mode 100644
index 00000000..ceb17868
--- /dev/null
+++ b/puppet/modules/site_shorewall/manifests/defaults.pp
@@ -0,0 +1,86 @@
+class site_shorewall::defaults {
+
+ include shorewall
+ include site_config::params
+
+ # be safe for development
+ # if ( $::site_config::params::environment == 'local' ) {
+ # $shorewall_startup='0'
+ # }
+
+ # If you want logging:
+ shorewall::params {
+ 'LOG': value => 'debug';
+ }
+
+ shorewall::zone {'net': type => 'ipv4'; }
+
+ # define interfaces
+ shorewall::interface { $site_config::params::interface:
+ zone => 'net',
+ options => 'tcpflags,blacklist,nosmurfs';
+ }
+
+ shorewall::policy {
+ 'fw-to-all':
+ sourcezone => 'fw',
+ destinationzone => 'all',
+ policy => 'ACCEPT',
+ order => 100;
+ 'all-to-all':
+ sourcezone => 'all',
+ destinationzone => 'all',
+ policy => 'DROP',
+ order => 200;
+ }
+
+ shorewall::rule {
+ # ping party
+ 'all2all-ping':
+ source => 'all',
+ destination => 'all',
+ action => 'Ping(ACCEPT)',
+ order => 200;
+ }
+
+ package { 'shorewall-init':
+ ensure => installed
+ }
+
+ include ::systemd
+ file { '/etc/systemd/system/shorewall.service':
+ ensure => file,
+ owner => 'root',
+ group => 'root',
+ mode => '0644',
+ source => 'puppet:///modules/site_shorewall/Debian/shorewall.service',
+ require => Package['shorewall'],
+ notify => Service['shorewall'],
+ } ~>
+ Exec['systemctl-daemon-reload']
+
+ augeas {
+ # stop instead of clear firewall on shutdown
+ 'shorewall_SAFESTOP':
+ changes => 'set /files/etc/shorewall/shorewall.conf/SAFESTOP Yes',
+ lens => 'Shellvars.lns',
+ incl => '/etc/shorewall/shorewall.conf',
+ require => Package['shorewall'],
+ notify => Service['shorewall'];
+ # require that the interface exist
+ 'shorewall_REQUIRE_INTERFACE':
+ changes => 'set /files/etc/shorewall/shorewall.conf/REQUIRE_INTERFACE Yes',
+ lens => 'Shellvars.lns',
+ incl => '/etc/shorewall/shorewall.conf',
+ require => Package['shorewall'],
+ notify => Service['shorewall'];
+ # configure shorewall-init
+ 'shorewall-init':
+ changes => 'set /files/etc/default/shorewall-init/PRODUCTS shorewall',
+ lens => 'Shellvars.lns',
+ incl => '/etc/default/shorewall-init',
+ require => [ Package['shorewall-init'], Service['shorewall'] ]
+ }
+
+ include site_shorewall::sshd
+}
diff --git a/puppet/modules/site_shorewall/manifests/dnat.pp b/puppet/modules/site_shorewall/manifests/dnat.pp
new file mode 100644
index 00000000..a73294cc
--- /dev/null
+++ b/puppet/modules/site_shorewall/manifests/dnat.pp
@@ -0,0 +1,19 @@
+define site_shorewall::dnat (
+ $source,
+ $destination,
+ $proto,
+ $destinationport,
+ $originaldest ) {
+
+
+ shorewall::rule {
+ "dnat_${name}_${destinationport}":
+ action => 'DNAT',
+ source => $source,
+ destination => $destination,
+ proto => $proto,
+ destinationport => $destinationport,
+ originaldest => $originaldest,
+ order => 200
+ }
+}
diff --git a/puppet/modules/site_shorewall/manifests/dnat_rule.pp b/puppet/modules/site_shorewall/manifests/dnat_rule.pp
new file mode 100644
index 00000000..f9fbe950
--- /dev/null
+++ b/puppet/modules/site_shorewall/manifests/dnat_rule.pp
@@ -0,0 +1,50 @@
+define site_shorewall::dnat_rule {
+
+ $port = $name
+ if $port != 1194 {
+ if $site_openvpn::openvpn_allow_unlimited {
+ shorewall::rule {
+ "dnat_tcp_port_${port}":
+ action => 'DNAT',
+ source => 'net',
+ destination => "\$FW:${site_openvpn::unlimited_gateway_address}:1194",
+ proto => 'tcp',
+ destinationport => $port,
+ originaldest => $site_openvpn::unlimited_gateway_address,
+ order => 100;
+ }
+ shorewall::rule {
+ "dnat_udp_port_${port}":
+ action => 'DNAT',
+ source => 'net',
+ destination => "\$FW:${site_openvpn::unlimited_gateway_address}:1194",
+ proto => 'udp',
+ destinationport => $port,
+ originaldest => $site_openvpn::unlimited_gateway_address,
+ order => 100;
+ }
+ }
+ if $site_openvpn::openvpn_allow_limited {
+ shorewall::rule {
+ "dnat_free_tcp_port_${port}":
+ action => 'DNAT',
+ source => 'net',
+ destination => "\$FW:${site_openvpn::limited_gateway_address}:1194",
+ proto => 'tcp',
+ destinationport => $port,
+ originaldest => $site_openvpn::unlimited_gateway_address,
+ order => 100;
+ }
+ shorewall::rule {
+ "dnat_free_udp_port_${port}":
+ action => 'DNAT',
+ source => 'net',
+ destination => "\$FW:${site_openvpn::limited_gateway_address}:1194",
+ proto => 'udp',
+ destinationport => $port,
+ originaldest => $site_openvpn::unlimited_gateway_address,
+ order => 100;
+ }
+ }
+ }
+}
diff --git a/puppet/modules/site_shorewall/manifests/eip.pp b/puppet/modules/site_shorewall/manifests/eip.pp
new file mode 100644
index 00000000..8fbba658
--- /dev/null
+++ b/puppet/modules/site_shorewall/manifests/eip.pp
@@ -0,0 +1,92 @@
+class site_shorewall::eip {
+
+ include site_shorewall::defaults
+ include site_config::params
+ include site_shorewall::ip_forward
+
+ # define macro for incoming services
+ file { '/etc/shorewall/macro.leap_eip':
+ content => "PARAM - - tcp 1194
+ PARAM - - udp 1194
+ ",
+ notify => Service['shorewall'],
+ require => Package['shorewall']
+ }
+
+ shorewall::interface {
+ 'tun0':
+ zone => 'eip',
+ options => 'tcpflags,blacklist,nosmurfs';
+ 'tun1':
+ zone => 'eip',
+ options => 'tcpflags,blacklist,nosmurfs';
+ 'tun2':
+ zone => 'eip',
+ options => 'tcpflags,blacklist,nosmurfs';
+ 'tun3':
+ zone => 'eip',
+ options => 'tcpflags,blacklist,nosmurfs';
+ }
+
+ shorewall::zone {
+ 'eip':
+ type => 'ipv4';
+ }
+
+ $interface = $site_config::params::interface
+
+ shorewall::masq {
+ "${interface}_unlimited_tcp":
+ interface => $interface,
+ source => "${site_openvpn::openvpn_unlimited_tcp_network_prefix}.0/${site_openvpn::openvpn_unlimited_tcp_cidr}";
+ "${interface}_unlimited_udp":
+ interface => $interface,
+ source => "${site_openvpn::openvpn_unlimited_udp_network_prefix}.0/${site_openvpn::openvpn_unlimited_udp_cidr}";
+ }
+ if ! $::ec2_instance_id {
+ shorewall::masq {
+ "${interface}_limited_tcp":
+ interface => $interface,
+ source => "${site_openvpn::openvpn_limited_tcp_network_prefix}.0/${site_openvpn::openvpn_limited_tcp_cidr}";
+ "${interface}_limited_udp":
+ interface => $interface,
+ source => "${site_openvpn::openvpn_limited_udp_network_prefix}.0/${site_openvpn::openvpn_limited_udp_cidr}";
+ }
+ }
+
+ shorewall::policy {
+ 'eip-to-all':
+ sourcezone => 'eip',
+ destinationzone => 'all',
+ policy => 'ACCEPT',
+ order => 100;
+ }
+
+ shorewall::rule {
+ 'net2fw-openvpn':
+ source => 'net',
+ destination => '$FW',
+ action => 'leap_eip(ACCEPT)',
+ order => 200;
+
+ 'block_eip_dns_udp':
+ action => 'REJECT',
+ source => 'eip',
+ destination => 'net',
+ proto => 'udp',
+ destinationport => 'domain',
+ order => 300;
+
+ 'block_eip_dns_tcp':
+ action => 'REJECT',
+ source => 'eip',
+ destination => 'net',
+ proto => 'tcp',
+ destinationport => 'domain',
+ order => 301;
+ }
+
+ # create dnat rule for each port
+ site_shorewall::dnat_rule { $site_openvpn::openvpn_ports: }
+
+}
diff --git a/puppet/modules/site_shorewall/manifests/ip_forward.pp b/puppet/modules/site_shorewall/manifests/ip_forward.pp
new file mode 100644
index 00000000..d53ee8a5
--- /dev/null
+++ b/puppet/modules/site_shorewall/manifests/ip_forward.pp
@@ -0,0 +1,10 @@
+class site_shorewall::ip_forward {
+ include augeas
+ augeas { 'enable_ip_forwarding':
+ changes => 'set /files/etc/shorewall/shorewall.conf/IP_FORWARDING Yes',
+ lens => 'Shellvars.lns',
+ incl => '/etc/shorewall/shorewall.conf',
+ notify => Service[shorewall],
+ require => [ Class[augeas], Package[shorewall] ];
+ }
+}
diff --git a/puppet/modules/site_shorewall/manifests/monitor.pp b/puppet/modules/site_shorewall/manifests/monitor.pp
new file mode 100644
index 00000000..f4ed4f7c
--- /dev/null
+++ b/puppet/modules/site_shorewall/manifests/monitor.pp
@@ -0,0 +1,8 @@
+class site_shorewall::monitor {
+
+ include site_shorewall::defaults
+ include site_shorewall::service::http
+ include site_shorewall::service::https
+
+
+}
diff --git a/puppet/modules/site_shorewall/manifests/mx.pp b/puppet/modules/site_shorewall/manifests/mx.pp
new file mode 100644
index 00000000..332f164e
--- /dev/null
+++ b/puppet/modules/site_shorewall/manifests/mx.pp
@@ -0,0 +1,24 @@
+class site_shorewall::mx {
+
+ include site_shorewall::defaults
+
+ $smtpd_ports = '25,465,587'
+
+ # define macro for incoming services
+ file { '/etc/shorewall/macro.leap_mx':
+ content => "PARAM - - tcp ${smtpd_ports} ",
+ notify => Service['shorewall'],
+ require => Package['shorewall']
+ }
+
+
+ shorewall::rule {
+ 'net2fw-mx':
+ source => 'net',
+ destination => '$FW',
+ action => 'leap_mx(ACCEPT)',
+ order => 200;
+ }
+
+ include site_shorewall::service::smtp
+}
diff --git a/puppet/modules/site_shorewall/manifests/obfsproxy.pp b/puppet/modules/site_shorewall/manifests/obfsproxy.pp
new file mode 100644
index 00000000..75846705
--- /dev/null
+++ b/puppet/modules/site_shorewall/manifests/obfsproxy.pp
@@ -0,0 +1,25 @@
+# configure shorewell for obfsproxy
+class site_shorewall::obfsproxy {
+
+ include site_shorewall::defaults
+
+ $obfsproxy = hiera('obfsproxy')
+ $scramblesuit = $obfsproxy['scramblesuit']
+ $scram_port = $scramblesuit['port']
+
+ # define macro for incoming services
+ file { '/etc/shorewall/macro.leap_obfsproxy':
+ content => "PARAM - - tcp ${scram_port} ",
+ notify => Service['shorewall'],
+ require => Package['shorewall']
+ }
+
+ shorewall::rule {
+ 'net2fw-obfs':
+ source => 'net',
+ destination => '$FW',
+ action => 'leap_obfsproxy(ACCEPT)',
+ order => 200;
+ }
+
+}
diff --git a/puppet/modules/site_shorewall/manifests/service/http.pp b/puppet/modules/site_shorewall/manifests/service/http.pp
new file mode 100644
index 00000000..74b874d5
--- /dev/null
+++ b/puppet/modules/site_shorewall/manifests/service/http.pp
@@ -0,0 +1,13 @@
+class site_shorewall::service::http {
+
+ include site_shorewall::defaults
+
+ shorewall::rule {
+ 'net2fw-http':
+ source => 'net',
+ destination => '$FW',
+ action => 'HTTP(ACCEPT)',
+ order => 200;
+ }
+
+}
diff --git a/puppet/modules/site_shorewall/manifests/service/https.pp b/puppet/modules/site_shorewall/manifests/service/https.pp
new file mode 100644
index 00000000..4a8b119c
--- /dev/null
+++ b/puppet/modules/site_shorewall/manifests/service/https.pp
@@ -0,0 +1,12 @@
+class site_shorewall::service::https {
+
+ include site_shorewall::defaults
+
+ shorewall::rule {
+ 'net2fw-https':
+ source => 'net',
+ destination => '$FW',
+ action => 'HTTPS(ACCEPT)',
+ order => 200;
+ }
+}
diff --git a/puppet/modules/site_shorewall/manifests/service/smtp.pp b/puppet/modules/site_shorewall/manifests/service/smtp.pp
new file mode 100644
index 00000000..7fbdf14e
--- /dev/null
+++ b/puppet/modules/site_shorewall/manifests/service/smtp.pp
@@ -0,0 +1,13 @@
+class site_shorewall::service::smtp {
+
+ include site_shorewall::defaults
+
+ shorewall::rule {
+ 'fw2net-http':
+ source => '$FW',
+ destination => 'net',
+ action => 'SMTP(ACCEPT)',
+ order => 200;
+ }
+
+}
diff --git a/puppet/modules/site_shorewall/manifests/service/webapp_api.pp b/puppet/modules/site_shorewall/manifests/service/webapp_api.pp
new file mode 100644
index 00000000..d3a1aeed
--- /dev/null
+++ b/puppet/modules/site_shorewall/manifests/service/webapp_api.pp
@@ -0,0 +1,23 @@
+# configure shorewall for webapp api
+class site_shorewall::service::webapp_api {
+
+ $api = hiera('api')
+ $api_port = $api['port']
+
+ # define macro for incoming services
+ file { '/etc/shorewall/macro.leap_webapp_api':
+ content => "PARAM - - tcp ${api_port} ",
+ notify => Service['shorewall'],
+ require => Package['shorewall']
+ }
+
+
+ shorewall::rule {
+ 'net2fw-webapp_api':
+ source => 'net',
+ destination => '$FW',
+ action => 'leap_webapp_api(ACCEPT)',
+ order => 200;
+ }
+
+}
diff --git a/puppet/modules/site_shorewall/manifests/soledad.pp b/puppet/modules/site_shorewall/manifests/soledad.pp
new file mode 100644
index 00000000..518d8689
--- /dev/null
+++ b/puppet/modules/site_shorewall/manifests/soledad.pp
@@ -0,0 +1,23 @@
+class site_shorewall::soledad {
+
+ $soledad = hiera('soledad')
+ $soledad_port = $soledad['port']
+
+ include site_shorewall::defaults
+
+ # define macro for incoming services
+ file { '/etc/shorewall/macro.leap_soledad':
+ content => "PARAM - - tcp ${soledad_port}",
+ notify => Service['shorewall'],
+ require => Package['shorewall']
+ }
+
+ shorewall::rule {
+ 'net2fw-soledad':
+ source => 'net',
+ destination => '$FW',
+ action => 'leap_soledad(ACCEPT)',
+ order => 200;
+ }
+}
+
diff --git a/puppet/modules/site_shorewall/manifests/sshd.pp b/puppet/modules/site_shorewall/manifests/sshd.pp
new file mode 100644
index 00000000..e2332592
--- /dev/null
+++ b/puppet/modules/site_shorewall/manifests/sshd.pp
@@ -0,0 +1,31 @@
+# configure shorewall for sshd
+class site_shorewall::sshd {
+
+ $ssh_config = hiera('ssh')
+ $ssh_port = $ssh_config['port']
+
+ include shorewall
+
+ # define macro for incoming sshd
+ file { '/etc/shorewall/macro.leap_sshd':
+ content => "PARAM - - tcp ${ssh_port}",
+ notify => Service['shorewall'],
+ require => Package['shorewall']
+ }
+
+
+ shorewall::rule {
+ # outside to server
+ 'net2fw-ssh':
+ source => 'net',
+ destination => '$FW',
+ action => 'leap_sshd(ACCEPT)',
+ order => 200;
+ }
+
+ # setup a routestopped rule to allow ssh when shorewall is stopped
+ shorewall::routestopped { $site_config::params::interface:
+ options => "- tcp ${ssh_port}"
+ }
+
+}
diff --git a/puppet/modules/site_shorewall/manifests/stunnel/client.pp b/puppet/modules/site_shorewall/manifests/stunnel/client.pp
new file mode 100644
index 00000000..9a89a244
--- /dev/null
+++ b/puppet/modules/site_shorewall/manifests/stunnel/client.pp
@@ -0,0 +1,40 @@
+#
+# Adds some firewall magic to the stunnel.
+#
+# Using DNAT, this firewall rule allow a locally running program
+# to try to connect to the normal remote IP and remote port of the
+# service on another machine, but have this connection magically
+# routed through the locally running stunnel client.
+#
+# The network looks like this:
+#
+# From the client's perspective:
+#
+# |------- stunnel client --------------| |---------- stunnel server -----------------------|
+# consumer app -> localhost:accept_port -> connect:connect_port -> localhost:original_port
+#
+# From the server's perspective:
+#
+# |------- stunnel client --------------| |---------- stunnel server -----------------------|
+# ?? -> *:accept_port -> localhost:connect_port -> service
+#
+
+define site_shorewall::stunnel::client(
+ $accept_port,
+ $connect,
+ $connect_port,
+ $original_port) {
+
+ include site_shorewall::defaults
+
+ shorewall::rule {
+ "stunnel_dnat_${name}":
+ action => 'DNAT',
+ source => '$FW',
+ destination => "\$FW:127.0.0.1:${accept_port}",
+ proto => 'tcp',
+ destinationport => $original_port,
+ originaldest => $connect,
+ order => 200
+ }
+}
diff --git a/puppet/modules/site_shorewall/manifests/stunnel/server.pp b/puppet/modules/site_shorewall/manifests/stunnel/server.pp
new file mode 100644
index 00000000..798cd631
--- /dev/null
+++ b/puppet/modules/site_shorewall/manifests/stunnel/server.pp
@@ -0,0 +1,22 @@
+#
+# Allow all incoming connections to stunnel server port
+#
+
+define site_shorewall::stunnel::server($port) {
+
+ include site_shorewall::defaults
+
+ file { "/etc/shorewall/macro.stunnel_server_${name}":
+ content => "PARAM - - tcp ${port}",
+ notify => Service['shorewall'],
+ require => Package['shorewall']
+ }
+ shorewall::rule {
+ "net2fw-stunnel-server-${name}":
+ source => 'net',
+ destination => '$FW',
+ action => "stunnel_server_${name}(ACCEPT)",
+ order => 200;
+ }
+
+} \ No newline at end of file
diff --git a/puppet/modules/site_shorewall/manifests/tor.pp b/puppet/modules/site_shorewall/manifests/tor.pp
new file mode 100644
index 00000000..324b4844
--- /dev/null
+++ b/puppet/modules/site_shorewall/manifests/tor.pp
@@ -0,0 +1,26 @@
+# configure shorewall for tor
+class site_shorewall::tor {
+
+ include site_shorewall::defaults
+ include site_shorewall::ip_forward
+
+ $tor_port = '9001'
+
+ # define macro for incoming services
+ file { '/etc/shorewall/macro.leap_tor':
+ content => "PARAM - - tcp ${tor_port} ",
+ notify => Service['shorewall'],
+ require => Package['shorewall']
+ }
+
+
+ shorewall::rule {
+ 'net2fw-tor':
+ source => 'net',
+ destination => '$FW',
+ action => 'leap_tor(ACCEPT)',
+ order => 200;
+ }
+
+ include site_shorewall::service::http
+}
diff --git a/puppet/modules/site_shorewall/manifests/webapp.pp b/puppet/modules/site_shorewall/manifests/webapp.pp
new file mode 100644
index 00000000..a8d2aa5b
--- /dev/null
+++ b/puppet/modules/site_shorewall/manifests/webapp.pp
@@ -0,0 +1,7 @@
+class site_shorewall::webapp {
+
+ include site_shorewall::defaults
+ include site_shorewall::service::https
+ include site_shorewall::service::http
+ include site_shorewall::service::webapp_api
+}
diff --git a/puppet/modules/site_squid_deb_proxy/manifests/client.pp b/puppet/modules/site_squid_deb_proxy/manifests/client.pp
new file mode 100644
index 00000000..27844270
--- /dev/null
+++ b/puppet/modules/site_squid_deb_proxy/manifests/client.pp
@@ -0,0 +1,5 @@
+class site_squid_deb_proxy::client {
+ include squid_deb_proxy::client
+ include site_shorewall::defaults
+ include shorewall::rules::mdns
+}
diff --git a/puppet/modules/site_sshd/manifests/authorized_keys.pp b/puppet/modules/site_sshd/manifests/authorized_keys.pp
new file mode 100644
index 00000000..a1fde3f6
--- /dev/null
+++ b/puppet/modules/site_sshd/manifests/authorized_keys.pp
@@ -0,0 +1,34 @@
+# We want to purge unmanaged keys from the authorized_keys file so that only
+# keys added in the provider are valid. Any manually added keys will be
+# overridden.
+#
+# In order to do this, we have to use a custom define to deploy the
+# authorized_keys file because puppet's internal resource doesn't allow
+# purging before populating this file.
+#
+# See the following for more information:
+# https://tickets.puppetlabs.com/browse/PUP-1174
+# https://leap.se/code/issues/2990
+# https://leap.se/code/issues/3010
+#
+define site_sshd::authorized_keys ($keys, $ensure = 'present', $home = '') {
+ # This line allows default homedir based on $title variable.
+ # If $home is empty, the default is used.
+ $homedir = $home ? {'' => "/home/${title}", default => $home}
+ $owner = $ensure ? {'present' => $title, default => undef }
+ $group = $ensure ? {'present' => $title, default => undef }
+ file {
+ "${homedir}/.ssh":
+ ensure => 'directory',
+ owner => $title,
+ group => $title,
+ mode => '0700';
+ "${homedir}/.ssh/authorized_keys":
+ ensure => $ensure,
+ owner => $owner,
+ group => $group,
+ mode => '0600',
+ require => File["${homedir}/.ssh"],
+ content => template('site_sshd/authorized_keys.erb');
+ }
+}
diff --git a/puppet/modules/site_sshd/manifests/deploy_authorized_keys.pp b/puppet/modules/site_sshd/manifests/deploy_authorized_keys.pp
new file mode 100644
index 00000000..97ca058f
--- /dev/null
+++ b/puppet/modules/site_sshd/manifests/deploy_authorized_keys.pp
@@ -0,0 +1,9 @@
+class site_sshd::deploy_authorized_keys ( $keys ) {
+ tag 'leap_authorized_keys'
+
+ site_sshd::authorized_keys {'root':
+ keys => $keys,
+ home => '/root'
+ }
+
+}
diff --git a/puppet/modules/site_sshd/manifests/init.pp b/puppet/modules/site_sshd/manifests/init.pp
new file mode 100644
index 00000000..a9202da4
--- /dev/null
+++ b/puppet/modules/site_sshd/manifests/init.pp
@@ -0,0 +1,82 @@
+# configures sshd, mosh, authorized keys and known hosts
+class site_sshd {
+ $ssh = hiera_hash('ssh')
+ $ssh_config = $ssh['config']
+ $hosts = hiera('hosts', '')
+
+ ##
+ ## SETUP AUTHORIZED KEYS
+ ##
+
+ $authorized_keys = $ssh['authorized_keys']
+
+ class { 'site_sshd::deploy_authorized_keys':
+ keys => $authorized_keys
+ }
+
+ ##
+ ## SETUP KNOWN HOSTS and SSH_CONFIG
+ ##
+
+ file {
+ '/etc/ssh/ssh_known_hosts':
+ owner => root,
+ group => root,
+ mode => '0644',
+ content => template('site_sshd/ssh_known_hosts.erb');
+
+ '/etc/ssh/ssh_config':
+ owner => root,
+ group => root,
+ mode => '0644',
+ content => template('site_sshd/ssh_config.erb');
+ }
+
+ ##
+ ## OPTIONAL MOSH SUPPORT
+ ##
+
+ $mosh = $ssh['mosh']
+
+ if $mosh['enabled'] {
+ class { 'site_sshd::mosh':
+ ensure => present,
+ ports => $mosh['ports']
+ }
+ }
+ else {
+ class { 'site_sshd::mosh':
+ ensure => absent
+ }
+ }
+
+ # we cannot use the 'hardened' parameter because leap_cli uses an
+ # old net-ssh gem that is incompatible with the included
+ # "KexAlgorithms curve25519-sha256@libssh.org",
+ # see https://leap.se/code/issues/7591
+ # therefore we don't use it here, but include all other options
+ # that would be applied by the 'hardened' parameter
+ # not all options are available on wheezy
+ if ( $::lsbdistcodename == 'wheezy' ) {
+ $tail_additional_options = 'Ciphers aes256-ctr
+MACs hmac-sha2-512,hmac-sha2-256,hmac-ripemd160'
+ } else {
+ $tail_additional_options = 'Ciphers chacha20-poly1305@openssh.com,aes256-gcm@openssh.com,aes256-ctr
+MACs hmac-sha2-512,hmac-sha2-256,hmac-ripemd160'
+ }
+
+ ##
+ ## SSHD SERVER CONFIGURATION
+ ##
+ class { '::sshd':
+ manage_nagios => false,
+ ports => [ $ssh['port'] ],
+ use_pam => 'yes',
+ print_motd => 'no',
+ tcp_forwarding => $ssh_config['AllowTcpForwarding'],
+ manage_client => false,
+ use_storedconfigs => false,
+ tail_additional_options => $tail_additional_options,
+ hostkey_type => [ 'rsa', 'dsa', 'ecdsa' ]
+ }
+}
diff --git a/puppet/modules/site_sshd/manifests/mosh.pp b/puppet/modules/site_sshd/manifests/mosh.pp
new file mode 100644
index 00000000..49f56ca0
--- /dev/null
+++ b/puppet/modules/site_sshd/manifests/mosh.pp
@@ -0,0 +1,21 @@
+class site_sshd::mosh ( $ensure = present, $ports = '60000-61000' ) {
+
+ package { 'mosh':
+ ensure => $ensure
+ }
+
+ file { '/etc/shorewall/macro.mosh':
+ ensure => $ensure,
+ content => "PARAM - - udp ${ports}",
+ notify => Service['shorewall'],
+ require => Package['shorewall'];
+ }
+
+ shorewall::rule { 'net2fw-mosh':
+ ensure => $ensure,
+ source => 'net',
+ destination => '$FW',
+ action => 'mosh(ACCEPT)',
+ order => 200;
+ }
+}
diff --git a/puppet/modules/site_sshd/templates/authorized_keys.erb b/puppet/modules/site_sshd/templates/authorized_keys.erb
new file mode 100644
index 00000000..51bdc5b3
--- /dev/null
+++ b/puppet/modules/site_sshd/templates/authorized_keys.erb
@@ -0,0 +1,10 @@
+# NOTICE: This file is autogenerated by Puppet
+# all manually added keys will be overridden
+
+<% @keys.sort.each do |user, hash| -%>
+<% if user == 'monitor' -%>
+command="/usr/bin/check_mk_agent",no-port-forwarding,no-x11-forwarding,no-agent-forwarding,no-pty,no-user-rc, <%=hash['type']-%> <%=hash['key']%> <%=user%>
+<% else -%>
+<%=hash['type']-%> <%=hash['key']%> <%=user%>
+<% end -%>
+<% end -%>
diff --git a/puppet/modules/site_sshd/templates/ssh_config.erb b/puppet/modules/site_sshd/templates/ssh_config.erb
new file mode 100644
index 00000000..36c0b6d5
--- /dev/null
+++ b/puppet/modules/site_sshd/templates/ssh_config.erb
@@ -0,0 +1,40 @@
+# This file is generated by Puppet
+# This is the ssh client system-wide configuration file. See
+# ssh_config(5) for more information. This file provides defaults for
+# users, and the values can be changed in per-user configuration files
+# or on the command line.
+
+Host *
+ SendEnv LANG LC_*
+ HashKnownHosts yes
+ GSSAPIAuthentication yes
+ GSSAPIDelegateCredentials no
+<% if scope.lookupvar('::site_config::params::environment') == 'local' -%>
+ #
+ # Vagrant nodes should have strict host key checking
+ # turned off. The problem is that the host key for a vagrant
+ # node is specific to the particular instance of the vagrant
+ # node you have running locally. For this reason, we can't
+ # track the host keys, or your host key for vpn1 would conflict
+ # with my host key for vpn1.
+ #
+ StrictHostKeyChecking no
+<% end -%>
+
+#
+# Tell SSH what host key algorithm we should use. I don't understand why this
+# is needed, since the man page says that "if hostkeys are known for the
+# destination host then [HostKeyAlgorithms default] is modified to prefer
+# their algorithms."
+#
+
+<% @hosts.sort.each do |name, host| -%>
+Host <%= name %> <%= host['domain_full'] %> <%= host['domain_internal'] %> <%= host['ip_address'] %>
+<% if host['host_pub_key'] -%>
+HostKeyAlgorithms <%= host['host_pub_key'].split(" ").first %>
+<% end -%>
+<% if host['port'] -%>
+Port <%= host['port'] %>
+<% end -%>
+
+<% end -%>
diff --git a/puppet/modules/site_sshd/templates/ssh_known_hosts.erb b/puppet/modules/site_sshd/templates/ssh_known_hosts.erb
new file mode 100644
index 00000000..002ab732
--- /dev/null
+++ b/puppet/modules/site_sshd/templates/ssh_known_hosts.erb
@@ -0,0 +1,7 @@
+# This file is generated by Puppet
+
+<% @hosts.sort.each do |name, hash| -%>
+<% if hash['host_pub_key'] -%>
+<%= name%>,<%=hash['domain_full']%>,<%=hash['domain_internal']%>,<%=hash['ip_address']%> <%=hash['host_pub_key']%>
+<% end -%>
+<% end -%>
diff --git a/puppet/modules/site_static/README b/puppet/modules/site_static/README
new file mode 100644
index 00000000..bc719782
--- /dev/null
+++ b/puppet/modules/site_static/README
@@ -0,0 +1,3 @@
+Deploy one or more static websites to a node.
+
+For now, it only supports `amber` based static sites. Should support plain html and jekyll in the future.
diff --git a/puppet/modules/site_static/manifests/domain.pp b/puppet/modules/site_static/manifests/domain.pp
new file mode 100644
index 00000000..b26cc9e3
--- /dev/null
+++ b/puppet/modules/site_static/manifests/domain.pp
@@ -0,0 +1,33 @@
+# configure static service for domain
+define site_static::domain (
+ $ca_cert,
+ $key,
+ $cert,
+ $tls_only=true,
+ $locations=undef,
+ $aliases=undef,
+ $apache_config=undef) {
+
+ $domain = $name
+ $base_dir = '/srv/static'
+
+ $cafile = "${cert}\n${ca_cert}"
+
+ if is_hash($locations) {
+ create_resources(site_static::location, $locations)
+ }
+
+ x509::cert { $domain:
+ content => $cafile,
+ notify => Service[apache]
+ }
+ x509::key { $domain:
+ content => $key,
+ notify => Service[apache]
+ }
+
+ apache::vhost::file { $domain:
+ content => template('site_static/apache.conf.erb')
+ }
+
+}
diff --git a/puppet/modules/site_static/manifests/init.pp b/puppet/modules/site_static/manifests/init.pp
new file mode 100644
index 00000000..4a722d62
--- /dev/null
+++ b/puppet/modules/site_static/manifests/init.pp
@@ -0,0 +1,72 @@
+# deploy static service
+class site_static {
+ tag 'leap_service'
+
+ include site_config::default
+ include site_config::x509::cert
+ include site_config::x509::key
+ include site_config::x509::ca_bundle
+
+ $static = hiera('static')
+ $domains = $static['domains']
+ $formats = $static['formats']
+ $bootstrap = $static['bootstrap_files']
+ $tor = hiera('tor', false)
+
+ if $bootstrap['enabled'] {
+ $bootstrap_domain = $bootstrap['domain']
+ $bootstrap_client = $bootstrap['client_version']
+ file { '/srv/leap/provider.json':
+ content => $bootstrap['provider_json'],
+ owner => 'www-data',
+ group => 'www-data',
+ mode => '0444';
+ }
+ # It is important to always touch provider.json: the client needs to check x-min-client-version header,
+ # but this is only sent when the file has been modified (otherwise 304 is sent by apache). The problem
+ # is that changing min client version won't alter the content of provider.json, so we must touch it.
+ exec { '/bin/touch /srv/leap/provider.json':
+ require => File['/srv/leap/provider.json'];
+ }
+ }
+
+ include apache::module::headers
+ include apache::module::alias
+ include apache::module::expires
+ include apache::module::removeip
+ include apache::module::dir
+ include apache::module::negotiation
+ include site_apache::common
+ include site_config::ruby::dev
+
+ if (member($formats, 'rack')) {
+ include site_apt::preferences::passenger
+ class { 'passenger':
+ use_munin => false,
+ require => Class['site_apt::preferences::passenger']
+ }
+ }
+
+ if (member($formats, 'amber')) {
+ rubygems::gem{'amber-0.3.8':
+ require => Package['zlib1g-dev']
+ }
+
+ package { 'zlib1g-dev':
+ ensure => installed
+ }
+ }
+
+ create_resources(site_static::domain, $domains)
+
+ if $tor {
+ $hidden_service = $tor['hidden_service']
+ if $hidden_service['active'] {
+ include site_webapp::hidden_service
+ }
+ }
+
+ include site_shorewall::defaults
+ include site_shorewall::service::http
+ include site_shorewall::service::https
+}
diff --git a/puppet/modules/site_static/manifests/location.pp b/puppet/modules/site_static/manifests/location.pp
new file mode 100644
index 00000000..d116de2f
--- /dev/null
+++ b/puppet/modules/site_static/manifests/location.pp
@@ -0,0 +1,36 @@
+# configure static service for location
+define site_static::location($path, $format, $source) {
+
+ $file_path = "/srv/static/${name}"
+ $allowed_formats = ['amber','rack']
+
+ if $format == undef {
+ fail("static_site location `${path}` is missing `format` field.")
+ }
+
+ if ! member($allowed_formats, $format) {
+ $formats_str = join($allowed_formats, ', ')
+ fail("Unsupported static_site location format `${format}`. Supported formats include ${formats_str}.")
+ }
+
+ if ($format == 'amber') {
+ exec {"amber_build_${name}":
+ cwd => $file_path,
+ command => 'amber rebuild',
+ user => 'www-data',
+ timeout => 600,
+ subscribe => Vcsrepo[$file_path]
+ }
+ }
+
+ vcsrepo { $file_path:
+ ensure => present,
+ force => true,
+ revision => $source['revision'],
+ provider => $source['type'],
+ source => $source['repo'],
+ owner => 'www-data',
+ group => 'www-data'
+ }
+
+}
diff --git a/puppet/modules/site_static/templates/amber.erb b/puppet/modules/site_static/templates/amber.erb
new file mode 100644
index 00000000..694f1136
--- /dev/null
+++ b/puppet/modules/site_static/templates/amber.erb
@@ -0,0 +1,13 @@
+<%- if @location_path != '' -%>
+ AliasMatch ^/[a-z]{2}/<%=@location_path%>(/.+|/|)$ "<%=@directory%>/$1"
+ Alias /<%=@location_path%> "<%=@directory%>/"
+<%- end -%>
+ <Directory "<%=@directory%>/">
+ AllowOverride FileInfo Indexes Options=All,MultiViews
+<% if scope.function_guess_apache_version([]) == '2.4' %>
+ Require all granted
+<% else %>
+ Order deny,allow
+ Allow from all
+<% end %>
+ </Directory>
diff --git a/puppet/modules/site_static/templates/apache.conf.erb b/puppet/modules/site_static/templates/apache.conf.erb
new file mode 100644
index 00000000..6b969d1c
--- /dev/null
+++ b/puppet/modules/site_static/templates/apache.conf.erb
@@ -0,0 +1,88 @@
+<%-
+ ##
+ ## An apache config for static websites.
+ ##
+
+ def location_directory(name, location)
+ if ['amber', 'rack'].include?(location['format'])
+ File.join(@base_dir, name, 'public')
+ else
+ File.join(@base_dir, name)
+ end
+ end
+
+ @document_root = begin
+ root = '/var/www'
+ @locations && @locations.each do |name, location|
+ root = location_directory(name, location) if location['path'] == '/'
+ end
+ root.gsub(%r{^/|/$}, '')
+ end
+
+ bootstrap_domain = scope.lookupvar('site_static::bootstrap_domain')
+ bootstrap_client = scope.lookupvar('site_static::bootstrap_client')
+-%>
+
+<VirtualHost *:80>
+ ServerName <%= @domain %>
+ ServerAlias www.<%= @domain %>
+<%- @aliases && @aliases.each do |domain_alias| -%>
+ ServerAlias <%= domain_alias %>
+<%- end -%>
+<%- if @tls_only -%>
+ RewriteEngine On
+ RewriteRule ^.*$ https://<%= @domain -%>%{REQUEST_URI} [R=permanent,L]
+<%- end -%>
+</VirtualHost>
+
+<VirtualHost *:443>
+ ServerName <%= @domain %>
+ ServerAlias www.<%= @domain %>
+<%- @aliases && @aliases.each do |domain_alias| -%>
+ ServerAlias <%= domain_alias %>
+<%- end -%>
+
+ #RewriteLog "/var/log/apache2/rewrite.log"
+ #RewriteLogLevel 3
+
+ Include include.d/ssl_common.inc
+
+<%- if @tls_only -%>
+ Header always set Strict-Transport-Security: "max-age=15768000;includeSubdomains"
+<%- end -%>
+ Header set X-Frame-Options "deny"
+ Header always unset X-Powered-By
+ Header always unset X-Runtime
+
+ SSLCertificateKeyFile /etc/x509/keys/<%= @domain %>.key
+ SSLCertificateFile /etc/x509/certs/<%= @domain %>.crt
+
+ RequestHeader set X_FORWARDED_PROTO 'https'
+
+ DocumentRoot "/<%= @document_root %>/"
+ AccessFileName .htaccess
+
+<%- if ([@aliases]+[@domain]).flatten.include?(bootstrap_domain) -%>
+ Alias /provider.json /srv/leap/provider.json
+ <Location /provider.json>
+ Header set X-Minimum-Client-Version <%= bootstrap_client['min'] %>
+ </Location>
+<%- end -%>
+
+<%- if @apache_config -%>
+<%= @apache_config.gsub(':percent:','%') %>
+<%- end -%>
+
+<%- @locations && @locations.each do |name, location| -%>
+<%- location_path = location['path'].gsub(%r{^/|/$}, '') -%>
+<%- directory = location_directory(name, location) -%>
+<%- local_vars = {'location_path'=>location_path, 'directory'=>directory, 'location'=>location, 'name'=>name} -%>
+<%- template_path = File.join(File.dirname(__FILE__), location['format']) + '.erb' -%>
+<%- break unless File.exists?(template_path) -%>
+ ##
+ ## <%= name %> (<%= location['format'] %>)
+ ##
+<%= scope.function_templatewlv([template_path, local_vars]) %>
+<%- end -%>
+
+</VirtualHost>
diff --git a/puppet/modules/site_static/templates/rack.erb b/puppet/modules/site_static/templates/rack.erb
new file mode 100644
index 00000000..431778bb
--- /dev/null
+++ b/puppet/modules/site_static/templates/rack.erb
@@ -0,0 +1,19 @@
+ #PassengerLogLevel 1
+ #PassengerAppEnv production
+ #PassengerFriendlyErrorPages on
+<%- if @location_path != '' -%>
+ Alias /<%=@location_path%> "<%=@directory%>"
+ <Location /<%=@location_path%>>
+ PassengerBaseURI /<%=@location_path%>
+ PassengerAppRoot "<%=File.dirname(@directory)%>"
+ </Location>
+<%- end -%>
+ <Directory "<%=@directory%>">
+ Options -MultiViews
+<% if scope.function_guess_apache_version([]) == '2.4' %>
+ Require all granted
+<% else %>
+ Order deny,allow
+ Allow from all
+<% end %>
+ </Directory>
diff --git a/puppet/modules/site_stunnel/manifests/client.pp b/puppet/modules/site_stunnel/manifests/client.pp
new file mode 100644
index 00000000..c9e034f1
--- /dev/null
+++ b/puppet/modules/site_stunnel/manifests/client.pp
@@ -0,0 +1,64 @@
+#
+# Sets up stunnel and firewall configuration for
+# a single stunnel client
+#
+# As a client, we accept connections on localhost,
+# and connect to a remote $connect:$connect_port
+#
+
+define site_stunnel::client (
+ $accept_port,
+ $connect_port,
+ $connect,
+ $original_port,
+ $verify = '2',
+ $pid = $name,
+ $rndfile = '/var/lib/stunnel4/.rnd',
+ $debuglevel = 'warning' ) {
+
+ $logfile = "/var/log/stunnel4/${name}.log"
+
+ include site_config::x509::cert
+ include site_config::x509::key
+ include site_config::x509::ca
+ include x509::variables
+ $ca_path = "${x509::variables::local_CAs}/${site_config::params::ca_name}.crt"
+ $cert_path = "${x509::variables::certs}/${site_config::params::cert_name}.crt"
+ $key_path = "${x509::variables::keys}/${site_config::params::cert_name}.key"
+
+ stunnel::service { $name:
+ accept => "127.0.0.1:${accept_port}",
+ connect => "${connect}:${connect_port}",
+ client => true,
+ cafile => $ca_path,
+ key => $key_path,
+ cert => $cert_path,
+ verify => $verify,
+ pid => "/var/run/stunnel4/${pid}.pid",
+ rndfile => $rndfile,
+ debuglevel => $debuglevel,
+ sslversion => 'TLSv1',
+ syslog => 'no',
+ output => $logfile;
+ }
+
+ # define the log files so that we can purge the
+ # files from /var/log/stunnel4 that are not defined.
+ file {
+ $logfile:;
+ "${logfile}.1.gz":;
+ "${logfile}.2.gz":;
+ "${logfile}.3.gz":;
+ "${logfile}.4.gz":;
+ "${logfile}.5.gz":;
+ }
+
+ site_shorewall::stunnel::client { $name:
+ accept_port => $accept_port,
+ connect => $connect,
+ connect_port => $connect_port,
+ original_port => $original_port
+ }
+
+ include site_check_mk::agent::stunnel
+}
diff --git a/puppet/modules/site_stunnel/manifests/clients.pp b/puppet/modules/site_stunnel/manifests/clients.pp
new file mode 100644
index 00000000..c0958b5f
--- /dev/null
+++ b/puppet/modules/site_stunnel/manifests/clients.pp
@@ -0,0 +1,23 @@
+#
+# example hiera yaml:
+#
+# stunnel:
+# clients:
+# ednp_clients:
+# thrips_9002:
+# accept_port: 4001
+# connect: thrips.demo.bitmask.i
+# connect_port: 19002
+# epmd_clients:
+# thrips_4369:
+# accept_port: 4000
+# connect: thrips.demo.bitmask.i
+# connect_port: 14369
+#
+# In the above example, this resource definition is called twice, with $name
+# 'ednp_clients' and 'epmd_clients'
+#
+
+define site_stunnel::clients {
+ create_resources(site_stunnel::client, $site_stunnel::clients[$name])
+}
diff --git a/puppet/modules/site_stunnel/manifests/init.pp b/puppet/modules/site_stunnel/manifests/init.pp
new file mode 100644
index 00000000..a874721f
--- /dev/null
+++ b/puppet/modules/site_stunnel/manifests/init.pp
@@ -0,0 +1,48 @@
+#
+# If you need something to happen after stunnel is started,
+# you can depend on Service['stunnel'] or Class['site_stunnel']
+#
+
+class site_stunnel {
+
+ # include the generic stunnel module
+ # increase the number of open files to allow for 800 connections
+ class { 'stunnel': default_extra => 'ulimit -n 4096' }
+
+ # The stunnel.conf provided by the Debian package is broken by default
+ # so we get rid of it and just define our own. See #549384
+ if !defined(File['/etc/stunnel/stunnel.conf']) {
+ file {
+ # this file is a broken config installed by the package
+ '/etc/stunnel/stunnel.conf':
+ ensure => absent;
+ }
+ }
+
+ $stunnel = hiera('stunnel')
+
+ # add server stunnels
+ create_resources(site_stunnel::servers, $stunnel['servers'])
+
+ # add client stunnels
+ $clients = $stunnel['clients']
+ $client_sections = keys($clients)
+ site_stunnel::clients { $client_sections: }
+
+ # remove any old stunnel logs that are not
+ # defined by this puppet run
+ file {'/var/log/stunnel4': purge => true;}
+
+ # the default is to keep 356 log files for each stunnel.
+ # here we set a more reasonable number.
+ augeas {
+ 'logrotate_stunnel':
+ context => '/files/etc/logrotate.d/stunnel4/rule',
+ changes => [
+ 'set rotate 5',
+ ]
+ }
+
+ include site_stunnel::override_service
+}
+
diff --git a/puppet/modules/site_stunnel/manifests/override_service.pp b/puppet/modules/site_stunnel/manifests/override_service.pp
new file mode 100644
index 00000000..435b9aa0
--- /dev/null
+++ b/puppet/modules/site_stunnel/manifests/override_service.pp
@@ -0,0 +1,18 @@
+# override stunnel::debian defaults
+#
+# ignore puppet lint error about inheriting from different namespace
+# lint:ignore:inherits_across_namespaces
+class site_stunnel::override_service inherits stunnel::debian {
+# lint:endignore
+
+ include site_config::x509::cert
+ include site_config::x509::key
+ include site_config::x509::ca
+
+ Service[stunnel] {
+ subscribe => [
+ Class['Site_config::X509::Key'],
+ Class['Site_config::X509::Cert'],
+ Class['Site_config::X509::Ca'] ]
+ }
+}
diff --git a/puppet/modules/site_stunnel/manifests/servers.pp b/puppet/modules/site_stunnel/manifests/servers.pp
new file mode 100644
index 00000000..e76d1e9d
--- /dev/null
+++ b/puppet/modules/site_stunnel/manifests/servers.pp
@@ -0,0 +1,51 @@
+#
+# example hiera yaml:
+#
+# stunnel:
+# servers:
+# couch_server:
+# accept_port: 15984
+# connect_port: 5984
+#
+
+define site_stunnel::servers (
+ $accept_port,
+ $connect_port,
+ $verify = '2',
+ $pid = $name,
+ $rndfile = '/var/lib/stunnel4/.rnd',
+ $debuglevel = '4' ) {
+
+ $logfile = "/var/log/stunnel4/${name}.log"
+
+ include site_config::x509::cert
+ include site_config::x509::key
+ include site_config::x509::ca
+ include x509::variables
+ $ca_path = "${x509::variables::local_CAs}/${site_config::params::ca_name}.crt"
+ $cert_path = "${x509::variables::certs}/${site_config::params::cert_name}.crt"
+ $key_path = "${x509::variables::keys}/${site_config::params::cert_name}.key"
+
+ stunnel::service { $name:
+ accept => $accept_port,
+ connect => "127.0.0.1:${connect_port}",
+ client => false,
+ cafile => $ca_path,
+ key => $key_path,
+ cert => $cert_path,
+ verify => $verify,
+ pid => "/var/run/stunnel4/${pid}.pid",
+ rndfile => '/var/lib/stunnel4/.rnd',
+ debuglevel => $debuglevel,
+ sslversion => 'TLSv1',
+ syslog => 'no',
+ output => $logfile;
+ }
+
+ # allow incoming connections on $accept_port
+ site_shorewall::stunnel::server { $name:
+ port => $accept_port
+ }
+
+ include site_check_mk::agent::stunnel
+}
diff --git a/puppet/modules/site_tor/manifests/disable_exit.pp b/puppet/modules/site_tor/manifests/disable_exit.pp
new file mode 100644
index 00000000..078f80ae
--- /dev/null
+++ b/puppet/modules/site_tor/manifests/disable_exit.pp
@@ -0,0 +1,7 @@
+class site_tor::disable_exit {
+ tor::daemon::exit_policy {
+ 'no_exit_at_all':
+ reject => [ '*:*' ];
+ }
+}
+
diff --git a/puppet/modules/site_tor/manifests/init.pp b/puppet/modules/site_tor/manifests/init.pp
new file mode 100644
index 00000000..2207a5a9
--- /dev/null
+++ b/puppet/modules/site_tor/manifests/init.pp
@@ -0,0 +1,45 @@
+class site_tor {
+ tag 'leap_service'
+ Class['site_config::default'] -> Class['site_tor']
+
+ $tor = hiera('tor')
+ $bandwidth_rate = $tor['bandwidth_rate']
+ $tor_type = $tor['type']
+ $nickname = $tor['nickname']
+ $contact_emails = join($tor['contacts'],', ')
+ $family = $tor['family']
+
+ $address = hiera('ip_address')
+
+ $openvpn = hiera('openvpn', undef)
+ if $openvpn {
+ $openvpn_ports = $openvpn['ports']
+ }
+ else {
+ $openvpn_ports = []
+ }
+
+ include site_config::default
+ include tor::daemon
+ tor::daemon::relay { $nickname:
+ port => 9001,
+ address => $address,
+ contact_info => obfuscate_email($contact_emails),
+ bandwidth_rate => $bandwidth_rate,
+ my_family => $family
+ }
+
+ if ( $tor_type == 'exit'){
+ # Only enable the daemon directory if the node isn't also a webapp node
+ # or running openvpn on port 80
+ if ! member($::services, 'webapp') and ! member($openvpn_ports, '80') {
+ tor::daemon::directory { $::hostname: port => 80 }
+ }
+ }
+ else {
+ include site_tor::disable_exit
+ }
+
+ include site_shorewall::tor
+
+}
diff --git a/puppet/modules/site_webapp/files/server-status.conf b/puppet/modules/site_webapp/files/server-status.conf
new file mode 100644
index 00000000..10b2d4ed
--- /dev/null
+++ b/puppet/modules/site_webapp/files/server-status.conf
@@ -0,0 +1,26 @@
+# Keep track of extended status information for each request
+ExtendedStatus On
+
+# Determine if mod_status displays the first 63 characters of a request or
+# the last 63, assuming the request itself is greater than 63 chars.
+# Default: Off
+#SeeRequestTail On
+
+Listen 127.0.0.1:8162
+
+<VirtualHost 127.0.0.1:8162>
+
+<Location /server-status>
+ SetHandler server-status
+ Require all granted
+ Allow from 127.0.0.1
+</Location>
+
+</VirtualHost>
+
+
+<IfModule mod_proxy.c>
+ # Show Proxy LoadBalancer status in mod_status
+ ProxyStatus On
+</IfModule>
+
diff --git a/puppet/modules/site_webapp/manifests/apache.pp b/puppet/modules/site_webapp/manifests/apache.pp
new file mode 100644
index 00000000..80c7b29b
--- /dev/null
+++ b/puppet/modules/site_webapp/manifests/apache.pp
@@ -0,0 +1,28 @@
+# configure apache and passenger to serve the webapp
+class site_webapp::apache {
+
+ $web_api = hiera('api')
+ $api_domain = $web_api['domain']
+ $api_port = $web_api['port']
+
+ $web_domain = hiera('domain')
+ $domain_name = $web_domain['name']
+
+ $webapp = hiera('webapp')
+ $webapp_domain = $webapp['domain']
+
+ include site_apache::common
+ include apache::module::headers
+ include apache::module::alias
+ include apache::module::expires
+ include apache::module::removeip
+ include site_webapp::common_vhost
+
+ class { 'passenger': use_munin => false }
+
+ apache::vhost::file {
+ 'api':
+ content => template('site_apache/vhosts.d/api.conf.erb');
+ }
+
+}
diff --git a/puppet/modules/site_webapp/manifests/common_vhost.pp b/puppet/modules/site_webapp/manifests/common_vhost.pp
new file mode 100644
index 00000000..c57aad57
--- /dev/null
+++ b/puppet/modules/site_webapp/manifests/common_vhost.pp
@@ -0,0 +1,18 @@
+class site_webapp::common_vhost {
+ # installs x509 cert + key and common config
+ # that both nagios + leap webapp use
+
+ include x509::variables
+ include site_config::x509::commercial::cert
+ include site_config::x509::commercial::key
+ include site_config::x509::commercial::ca
+
+ Class['Site_config::X509::Commercial::Key'] ~> Service[apache]
+ Class['Site_config::X509::Commercial::Cert'] ~> Service[apache]
+ Class['Site_config::X509::Commercial::Ca'] ~> Service[apache]
+
+ apache::vhost::file {
+ 'common':
+ content => template('site_apache/vhosts.d/common.conf.erb')
+ }
+}
diff --git a/puppet/modules/site_webapp/manifests/couchdb.pp b/puppet/modules/site_webapp/manifests/couchdb.pp
new file mode 100644
index 00000000..71450370
--- /dev/null
+++ b/puppet/modules/site_webapp/manifests/couchdb.pp
@@ -0,0 +1,52 @@
+class site_webapp::couchdb {
+
+ $webapp = hiera('webapp')
+ # haproxy listener on port localhost:4096, see site_webapp::haproxy
+ $couchdb_host = 'localhost'
+ $couchdb_port = '4096'
+ $couchdb_webapp_user = $webapp['couchdb_webapp_user']['username']
+ $couchdb_webapp_password = $webapp['couchdb_webapp_user']['password']
+ $couchdb_admin_user = $webapp['couchdb_admin_user']['username']
+ $couchdb_admin_password = $webapp['couchdb_admin_user']['password']
+
+ include x509::variables
+
+ file {
+ '/srv/leap/webapp/config/couchdb.yml':
+ content => template('site_webapp/couchdb.yml.erb'),
+ owner => 'leap-webapp',
+ group => 'leap-webapp',
+ mode => '0600',
+ require => Vcsrepo['/srv/leap/webapp'];
+
+ # couchdb.admin.yml is a symlink to prevent the vcsrepo resource
+ # from changing its user permissions every time.
+ '/srv/leap/webapp/config/couchdb.admin.yml':
+ ensure => 'link',
+ target => '/etc/leap/couchdb.admin.yml',
+ require => Vcsrepo['/srv/leap/webapp'];
+
+ '/etc/leap/couchdb.admin.yml':
+ content => template('site_webapp/couchdb.admin.yml.erb'),
+ owner => 'root',
+ group => 'root',
+ mode => '0600',
+ require => File['/etc/leap'];
+
+ '/srv/leap/webapp/log':
+ ensure => directory,
+ owner => 'leap-webapp',
+ group => 'leap-webapp',
+ mode => '0755',
+ require => Vcsrepo['/srv/leap/webapp'];
+
+ '/srv/leap/webapp/log/production.log':
+ ensure => present,
+ owner => 'leap-webapp',
+ group => 'leap-webapp',
+ mode => '0666',
+ require => Vcsrepo['/srv/leap/webapp'];
+ }
+
+ include site_stunnel
+}
diff --git a/puppet/modules/site_webapp/manifests/cron.pp b/puppet/modules/site_webapp/manifests/cron.pp
new file mode 100644
index 00000000..70b9da04
--- /dev/null
+++ b/puppet/modules/site_webapp/manifests/cron.pp
@@ -0,0 +1,37 @@
+# setup webapp cronjobs
+class site_webapp::cron {
+
+ # cron tasks that need to be performed to cleanup the database
+ cron {
+ 'rotate_databases':
+ command => 'cd /srv/leap/webapp && bundle exec rake db:rotate',
+ environment => 'RAILS_ENV=production',
+ user => 'root',
+ hour => [0,6,12,18],
+ minute => 0;
+
+ 'delete_tmp_databases':
+ command => 'cd /srv/leap/webapp && bundle exec rake db:deletetmp',
+ environment => 'RAILS_ENV=production',
+ user => 'root',
+ hour => 1,
+ minute => 1;
+
+ # there is no longer a need to remove expired sessions, since the database
+ # will get destroyed.
+ 'remove_expired_sessions':
+ ensure => absent,
+ command => 'cd /srv/leap/webapp && bundle exec rake cleanup:sessions',
+ environment => 'RAILS_ENV=production',
+ user => 'leap-webapp',
+ hour => 2,
+ minute => 30;
+
+ 'remove_expired_tokens':
+ command => 'cd /srv/leap/webapp && bundle exec rake cleanup:tokens',
+ environment => 'RAILS_ENV=production',
+ user => 'leap-webapp',
+ hour => 3,
+ minute => 0;
+ }
+}
diff --git a/puppet/modules/site_webapp/manifests/hidden_service.pp b/puppet/modules/site_webapp/manifests/hidden_service.pp
new file mode 100644
index 00000000..72a2ce95
--- /dev/null
+++ b/puppet/modules/site_webapp/manifests/hidden_service.pp
@@ -0,0 +1,52 @@
+class site_webapp::hidden_service {
+ $tor = hiera('tor')
+ $hidden_service = $tor['hidden_service']
+ $tor_domain = "${hidden_service['address']}.onion"
+
+ include site_apache::common
+ include apache::module::headers
+ include apache::module::alias
+ include apache::module::expires
+ include apache::module::removeip
+
+ include tor::daemon
+ tor::daemon::hidden_service { 'webapp': ports => [ '80 127.0.0.1:80'] }
+
+ file {
+ '/var/lib/tor/webapp/':
+ ensure => directory,
+ owner => 'debian-tor',
+ group => 'debian-tor',
+ mode => '2700';
+
+ '/var/lib/tor/webapp/private_key':
+ ensure => present,
+ source => "/srv/leap/files/nodes/${::hostname}/tor.key",
+ owner => 'debian-tor',
+ group => 'debian-tor',
+ mode => '0600';
+
+ '/var/lib/tor/webapp/hostname':
+ ensure => present,
+ content => $tor_domain,
+ owner => 'debian-tor',
+ group => 'debian-tor',
+ mode => '0600';
+ }
+
+ # it is necessary to zero out the config of the status module
+ # because we are configuring our own version that is unavailable
+ # over the hidden service (see: #7456 and #7776)
+ apache::module { 'status': ensure => present, conf_content => ' ' }
+ # the access_compat module is required to enable Allow directives
+ apache::module { 'access_compat': ensure => present }
+
+ apache::vhost::file {
+ 'hidden_service':
+ content => template('site_apache/vhosts.d/hidden_service.conf.erb');
+ 'server_status':
+ vhost_source => 'modules/site_webapp/server-status.conf';
+ }
+
+ include site_shorewall::tor
+}
diff --git a/puppet/modules/site_webapp/manifests/init.pp b/puppet/modules/site_webapp/manifests/init.pp
new file mode 100644
index 00000000..15925aba
--- /dev/null
+++ b/puppet/modules/site_webapp/manifests/init.pp
@@ -0,0 +1,179 @@
+# configure webapp service
+class site_webapp {
+ tag 'leap_service'
+ $definition_files = hiera('definition_files')
+ $provider = $definition_files['provider']
+ $eip_service = $definition_files['eip_service']
+ $soledad_service = $definition_files['soledad_service']
+ $smtp_service = $definition_files['smtp_service']
+ $node_domain = hiera('domain')
+ $provider_domain = $node_domain['full_suffix']
+ $webapp = hiera('webapp')
+ $api_version = $webapp['api_version']
+ $secret_token = $webapp['secret_token']
+ $tor = hiera('tor', false)
+ $sources = hiera('sources')
+
+ Class['site_config::default'] -> Class['site_webapp']
+
+ include site_config::ruby::dev
+ include site_webapp::apache
+ include site_webapp::couchdb
+ include site_haproxy
+ include site_webapp::cron
+ include site_config::default
+ include site_config::x509::cert
+ include site_config::x509::key
+ include site_config::x509::ca
+ include site_config::x509::client_ca::ca
+ include site_config::x509::client_ca::key
+ include site_nickserver
+
+ # remove leftovers from previous installations on webapp nodes
+ include site_config::remove::webapp
+
+ group { 'leap-webapp':
+ ensure => present,
+ allowdupe => false;
+ }
+
+ user { 'leap-webapp':
+ ensure => present,
+ allowdupe => false,
+ gid => 'leap-webapp',
+ groups => 'ssl-cert',
+ home => '/srv/leap/webapp',
+ require => [ Group['leap-webapp'] ];
+ }
+
+ vcsrepo { '/srv/leap/webapp':
+ ensure => present,
+ force => true,
+ revision => $sources['webapp']['revision'],
+ provider => $sources['webapp']['type'],
+ source => $sources['webapp']['source'],
+ owner => 'leap-webapp',
+ group => 'leap-webapp',
+ require => [ User['leap-webapp'], Group['leap-webapp'] ],
+ notify => Exec['bundler_update']
+ }
+
+ exec { 'bundler_update':
+ cwd => '/srv/leap/webapp',
+ command => '/bin/bash -c "/usr/bin/bundle check --path vendor/bundle || /usr/bin/bundle install --path vendor/bundle --without test development debug"',
+ unless => '/usr/bin/bundle check --path vendor/bundle',
+ user => 'leap-webapp',
+ timeout => 600,
+ require => [
+ Class['bundler::install'],
+ Vcsrepo['/srv/leap/webapp'],
+ Class['site_config::ruby::dev'],
+ Service['shorewall'] ],
+ notify => Service['apache'];
+ }
+
+ #
+ # NOTE: in order to support a webapp that is running on a subpath and not the
+ # root of the domain assets:precompile needs to be run with
+ # RAILS_RELATIVE_URL_ROOT=/application-root
+ #
+
+ exec { 'compile_assets':
+ cwd => '/srv/leap/webapp',
+ command => '/bin/bash -c "RAILS_ENV=production /usr/bin/bundle exec rake assets:precompile"',
+ user => 'leap-webapp',
+ logoutput => on_failure,
+ require => Exec['bundler_update'],
+ notify => Service['apache'];
+ }
+
+ file {
+ '/srv/leap/webapp/config/provider':
+ ensure => directory,
+ require => Vcsrepo['/srv/leap/webapp'],
+ owner => leap-webapp, group => leap-webapp, mode => '0755';
+
+ '/srv/leap/webapp/config/provider/provider.json':
+ content => $provider,
+ require => Vcsrepo['/srv/leap/webapp'],
+ owner => leap-webapp, group => leap-webapp, mode => '0644';
+
+ '/srv/leap/webapp/public/ca.crt':
+ ensure => link,
+ require => Vcsrepo['/srv/leap/webapp'],
+ target => "${x509::variables::local_CAs}/${site_config::params::ca_name}.crt";
+
+ "/srv/leap/webapp/public/${api_version}":
+ ensure => directory,
+ require => Vcsrepo['/srv/leap/webapp'],
+ owner => leap-webapp, group => leap-webapp, mode => '0755';
+
+ "/srv/leap/webapp/public/${api_version}/config/":
+ ensure => directory,
+ require => Vcsrepo['/srv/leap/webapp'],
+ owner => leap-webapp, group => leap-webapp, mode => '0755';
+
+ "/srv/leap/webapp/public/${api_version}/config/eip-service.json":
+ content => $eip_service,
+ require => Vcsrepo['/srv/leap/webapp'],
+ owner => leap-webapp, group => leap-webapp, mode => '0644';
+
+ "/srv/leap/webapp/public/${api_version}/config/soledad-service.json":
+ content => $soledad_service,
+ require => Vcsrepo['/srv/leap/webapp'],
+ owner => leap-webapp, group => leap-webapp, mode => '0644';
+
+ "/srv/leap/webapp/public/${api_version}/config/smtp-service.json":
+ content => $smtp_service,
+ require => Vcsrepo['/srv/leap/webapp'],
+ owner => leap-webapp, group => leap-webapp, mode => '0644';
+ }
+
+ try::file {
+ '/srv/leap/webapp/config/customization':
+ ensure => directory,
+ recurse => true,
+ purge => true,
+ force => true,
+ owner => leap-webapp,
+ group => leap-webapp,
+ mode => 'u=rwX,go=rX',
+ require => Vcsrepo['/srv/leap/webapp'],
+ notify => Exec['compile_assets'],
+ source => $webapp['customization_dir'];
+ }
+
+ git::changes {
+ 'public/favicon.ico':
+ cwd => '/srv/leap/webapp',
+ require => Vcsrepo['/srv/leap/webapp'],
+ user => 'leap-webapp';
+ }
+
+ file {
+ '/srv/leap/webapp/config/config.yml':
+ content => template('site_webapp/config.yml.erb'),
+ owner => leap-webapp,
+ group => leap-webapp,
+ mode => '0600',
+ require => Vcsrepo['/srv/leap/webapp'],
+ notify => Service['apache'];
+ }
+
+ if $tor {
+ $hidden_service = $tor['hidden_service']
+ if $hidden_service['active'] {
+ include site_webapp::hidden_service
+ }
+ }
+
+
+ # needed for the soledad-sync check which is run on the
+ # webapp node
+ include soledad::client
+
+ leap::logfile { 'webapp': }
+
+ include site_shorewall::webapp
+ include site_check_mk::agent::webapp
+}
diff --git a/puppet/modules/site_webapp/templates/config.yml.erb b/puppet/modules/site_webapp/templates/config.yml.erb
new file mode 100644
index 00000000..dd55d3e9
--- /dev/null
+++ b/puppet/modules/site_webapp/templates/config.yml.erb
@@ -0,0 +1,36 @@
+<%
+cert_options = @webapp['client_certificates']
+production = {
+ "admins" => @webapp['admins'],
+ "default_locale" => @webapp['default_locale'],
+ "available_locales" => @webapp['locales'],
+ "domain" => @provider_domain,
+ "force_ssl" => @webapp['secure'],
+ "client_ca_key" => "%s/%s.key" % [scope.lookupvar('x509::variables::keys'), scope.lookupvar('site_config::params::client_ca_name')],
+ "client_ca_cert" => "%s/%s.crt" % [scope.lookupvar('x509::variables::local_CAs'), scope.lookupvar('site_config::params::client_ca_name')],
+ "secret_token" => @secret_token,
+ "client_cert_lifespan" => cert_options['life_span'],
+ "client_cert_bit_size" => cert_options['bit_size'].to_i,
+ "client_cert_hash" => cert_options['digest'],
+ "allow_limited_certs" => @webapp['allow_limited_certs'],
+ "allow_unlimited_certs" => @webapp['allow_unlimited_certs'],
+ "allow_anonymous_certs" => @webapp['allow_anonymous_certs'],
+ "limited_cert_prefix" => cert_options['limited_prefix'],
+ "unlimited_cert_prefix" => cert_options['unlimited_prefix'],
+ "minimum_client_version" => @webapp['client_version']['min'],
+ "default_service_level" => @webapp['default_service_level'],
+ "service_levels" => @webapp['service_levels'],
+ "allow_registration" => @webapp['allow_registration'],
+ "handle_blacklist" => @webapp['forbidden_usernames'],
+ "invite_required" => @webapp['invite_required'],
+ "api_tokens" => @webapp['api_tokens']
+}
+
+if @webapp['engines'] && @webapp['engines'].any?
+ production["engines"] = @webapp['engines']
+end
+-%>
+#
+# This file is generated by puppet. This file inherits from defaults.yml.
+#
+<%= scope.function_sorted_yaml([{"production" => production}]) %>
diff --git a/puppet/modules/site_webapp/templates/couchdb.admin.yml.erb b/puppet/modules/site_webapp/templates/couchdb.admin.yml.erb
new file mode 100644
index 00000000..a0921add
--- /dev/null
+++ b/puppet/modules/site_webapp/templates/couchdb.admin.yml.erb
@@ -0,0 +1,9 @@
+production:
+ prefix: ""
+ protocol: 'http'
+ host: <%= @couchdb_host %>
+ port: <%= @couchdb_port %>
+ auto_update_design_doc: false
+ username: <%= @couchdb_admin_user %>
+ password: <%= @couchdb_admin_password %>
+
diff --git a/puppet/modules/site_webapp/templates/couchdb.yml.erb b/puppet/modules/site_webapp/templates/couchdb.yml.erb
new file mode 100644
index 00000000..2bef0af5
--- /dev/null
+++ b/puppet/modules/site_webapp/templates/couchdb.yml.erb
@@ -0,0 +1,9 @@
+production:
+ prefix: ""
+ protocol: 'http'
+ host: <%= @couchdb_host %>
+ port: <%= @couchdb_port %>
+ auto_update_design_doc: false
+ username: <%= @couchdb_webapp_user %>
+ password: <%= @couchdb_webapp_password %>
+
diff --git a/puppet/modules/soledad/manifests/client.pp b/puppet/modules/soledad/manifests/client.pp
new file mode 100644
index 00000000..e470adeb
--- /dev/null
+++ b/puppet/modules/soledad/manifests/client.pp
@@ -0,0 +1,16 @@
+# setup soledad-client
+# currently needed on webapp node to run the soledad-sync test
+class soledad::client {
+
+ tag 'leap_service'
+ include soledad::common
+
+ package {
+ 'soledad-client':
+ ensure => latest,
+ require => Class['site_apt::leap_repo'];
+ 'python-u1db':
+ ensure => latest;
+ }
+
+}
diff --git a/puppet/modules/soledad/manifests/common.pp b/puppet/modules/soledad/manifests/common.pp
new file mode 100644
index 00000000..8d8339d4
--- /dev/null
+++ b/puppet/modules/soledad/manifests/common.pp
@@ -0,0 +1,8 @@
+# install soledad-common, both needed both soledad-client and soledad-server
+class soledad::common {
+
+ package { 'soledad-common':
+ ensure => latest;
+ }
+
+}
diff --git a/puppet/modules/soledad/manifests/server.pp b/puppet/modules/soledad/manifests/server.pp
new file mode 100644
index 00000000..8674f421
--- /dev/null
+++ b/puppet/modules/soledad/manifests/server.pp
@@ -0,0 +1,104 @@
+# setup soledad-server
+class soledad::server {
+ tag 'leap_service'
+
+ include site_config::default
+ include soledad::common
+
+ $soledad = hiera('soledad')
+ $couchdb_user = $soledad['couchdb_soledad_user']['username']
+ $couchdb_password = $soledad['couchdb_soledad_user']['password']
+ $couchdb_leap_mx_user = $soledad['couchdb_leap_mx_user']['username']
+
+ $couchdb_host = 'localhost'
+ $couchdb_port = '5984'
+
+ $soledad_port = $soledad['port']
+
+ $sources = hiera('sources')
+
+ include site_config::x509::cert
+ include site_config::x509::key
+ include site_config::x509::ca
+
+ #
+ # SOLEDAD CONFIG
+ #
+
+ file {
+ '/etc/soledad':
+ ensure => directory,
+ owner => 'root',
+ group => 'root',
+ mode => '0755';
+ '/etc/soledad/soledad-server.conf':
+ content => template('soledad/soledad-server.conf.erb'),
+ owner => 'soledad',
+ group => 'soledad',
+ mode => '0640',
+ notify => Service['soledad-server'],
+ require => [ User['soledad'], Group['soledad'] ];
+ '/srv/leap/soledad':
+ ensure => directory,
+ owner => 'soledad',
+ group => 'soledad',
+ require => [ User['soledad'], Group['soledad'] ];
+ '/var/lib/soledad':
+ ensure => directory,
+ owner => 'soledad',
+ group => 'soledad',
+ require => [ User['soledad'], Group['soledad'] ];
+ }
+
+ package { $sources['soledad']['package']:
+ ensure => $sources['soledad']['revision'],
+ require => Class['site_apt::leap_repo'];
+ }
+
+ file { '/etc/default/soledad':
+ content => template('soledad/default-soledad.erb'),
+ owner => 'soledad',
+ group => 'soledad',
+ mode => '0600',
+ notify => Service['soledad-server'],
+ require => [ User['soledad'], Group['soledad'] ];
+ }
+
+ service { 'soledad-server':
+ ensure => running,
+ enable => true,
+ hasstatus => true,
+ hasrestart => true,
+ require => [ User['soledad'], Group['soledad'] ],
+ subscribe => [
+ Package['soledad-server'],
+ Class['Site_config::X509::Key'],
+ Class['Site_config::X509::Cert'],
+ Class['Site_config::X509::Ca'] ];
+ }
+
+ include site_shorewall::soledad
+ include site_check_mk::agent::soledad
+
+ # set up users, group and directories for soledad-server
+ # although the soledad users are already created by the
+ # soledad-server package
+ group { 'soledad':
+ ensure => present,
+ system => true,
+ }
+ user {
+ 'soledad':
+ ensure => present,
+ system => true,
+ gid => 'soledad',
+ home => '/srv/leap/soledad',
+ require => Group['soledad'];
+ 'soledad-admin':
+ ensure => present,
+ system => true,
+ gid => 'soledad',
+ home => '/srv/leap/soledad',
+ require => Group['soledad'];
+ }
+}
diff --git a/puppet/modules/soledad/templates/default-soledad.erb b/puppet/modules/soledad/templates/default-soledad.erb
new file mode 100644
index 00000000..32504e38
--- /dev/null
+++ b/puppet/modules/soledad/templates/default-soledad.erb
@@ -0,0 +1,5 @@
+# this file is managed by puppet
+START=yes
+CERT_PATH=<%= scope.lookupvar('x509::variables::certs') %>/<%= scope.lookupvar('site_config::params::cert_name') %>.crt
+PRIVKEY_PATH=<%= scope.lookupvar('x509::variables::keys') %>/<%= scope.lookupvar('site_config::params::cert_name') %>.key
+HTTPS_PORT=<%=@soledad_port%>
diff --git a/puppet/modules/soledad/templates/soledad-server.conf.erb b/puppet/modules/soledad/templates/soledad-server.conf.erb
new file mode 100644
index 00000000..1c6a0d19
--- /dev/null
+++ b/puppet/modules/soledad/templates/soledad-server.conf.erb
@@ -0,0 +1,12 @@
+[soledad-server]
+couch_url = http://<%= @couchdb_user %>:<%= @couchdb_password %>@<%= @couchdb_host %>:<%= @couchdb_port %>
+create_cmd = sudo -u soledad-admin /usr/bin/create-user-db
+admin_netrc = /etc/couchdb/couchdb-soledad-admin.netrc
+
+[database-security]
+members = <%= @couchdb_user %>, <%= @couchdb_leap_mx_user %>
+# not needed, but for documentation:
+# members_roles = replication
+# admins = admin
+# admins_roles = replication
+
diff --git a/puppet/modules/sshd/.fixtures.yml b/puppet/modules/sshd/.fixtures.yml
new file mode 100644
index 00000000..42598a65
--- /dev/null
+++ b/puppet/modules/sshd/.fixtures.yml
@@ -0,0 +1,3 @@
+fixtures:
+ symlinks:
+ sshd: "#{source_dir}" \ No newline at end of file
diff --git a/puppet/modules/sshd/.gitignore b/puppet/modules/sshd/.gitignore
new file mode 100644
index 00000000..5ebb01fb
--- /dev/null
+++ b/puppet/modules/sshd/.gitignore
@@ -0,0 +1,4 @@
+.librarian/*
+.tmp/*
+*.log
+spec/fixtures/*
diff --git a/puppet/modules/sshd/.rspec b/puppet/modules/sshd/.rspec
new file mode 100644
index 00000000..f07c903a
--- /dev/null
+++ b/puppet/modules/sshd/.rspec
@@ -0,0 +1,4 @@
+--format documentation
+--color
+--pattern "spec/*/*_spec.rb"
+#--backtrace
diff --git a/puppet/modules/sshd/.travis.yml b/puppet/modules/sshd/.travis.yml
new file mode 100644
index 00000000..7bd2a2bc
--- /dev/null
+++ b/puppet/modules/sshd/.travis.yml
@@ -0,0 +1,27 @@
+before_install:
+ - gem update --system 2.1.11
+ - gem --version
+rvm:
+ - 1.8.7
+ - 1.9.3
+ - 2.0.0
+script: 'bundle exec rake spec'
+env:
+ - PUPPET_VERSION="~> 2.7.0"
+ - PUPPET_VERSION="~> 3.0.0"
+ - PUPPET_VERSION="~> 3.1.0"
+ - PUPPET_VERSION="~> 3.2.0"
+ - PUPPET_VERSION="~> 3.3.0"
+ - PUPPET_VERSION="~> 3.4.0"
+matrix:
+ exclude:
+ # No support for Ruby 1.9 before Puppet 2.7
+ - rvm: 1.9.3
+ env: PUPPET_VERSION=2.6.0
+ # No support for Ruby 2.0 before Puppet 3.2
+ - rvm: 2.0.0
+ env: PUPPET_VERSION="~> 2.7.0"
+ - rvm: 2.0.0
+ env: PUPPET_VERSION="~> 3.0.0"
+ - rvm: 2.0.0
+ env: PUPPET_VERSION="~> 3.1.0"
diff --git a/puppet/modules/sshd/Gemfile b/puppet/modules/sshd/Gemfile
new file mode 100644
index 00000000..ef74f90e
--- /dev/null
+++ b/puppet/modules/sshd/Gemfile
@@ -0,0 +1,14 @@
+source 'https://rubygems.org'
+
+group :development, :test do
+ gem 'puppet', '>= 2.7.0'
+ gem 'puppet-lint', '>=0.3.2'
+ gem 'puppetlabs_spec_helper', '>=0.2.0'
+ gem 'rake', '>=0.9.2.2'
+ gem 'librarian-puppet', '>=0.9.10'
+ gem 'rspec-system-puppet', :require => false
+ gem 'serverspec', :require => false
+ gem 'rspec-system-serverspec', :require => false
+ gem 'rspec-hiera-puppet'
+ gem 'rspec-puppet', :git => 'https://github.com/rodjek/rspec-puppet.git'
+end \ No newline at end of file
diff --git a/puppet/modules/sshd/Gemfile.lock b/puppet/modules/sshd/Gemfile.lock
new file mode 100644
index 00000000..0c2c58e9
--- /dev/null
+++ b/puppet/modules/sshd/Gemfile.lock
@@ -0,0 +1,116 @@
+GIT
+ remote: https://github.com/rodjek/rspec-puppet.git
+ revision: c44381a240ec420d4ffda7bffc55ee4d9c08d682
+ specs:
+ rspec-puppet (1.0.1)
+ rspec
+
+GEM
+ remote: https://rubygems.org/
+ specs:
+ builder (3.2.2)
+ diff-lcs (1.2.5)
+ excon (0.31.0)
+ facter (1.7.4)
+ fog (1.19.0)
+ builder
+ excon (~> 0.31.0)
+ formatador (~> 0.2.0)
+ mime-types
+ multi_json (~> 1.0)
+ net-scp (~> 1.1)
+ net-ssh (>= 2.1.3)
+ nokogiri (~> 1.5)
+ ruby-hmac
+ formatador (0.2.4)
+ hiera (1.3.1)
+ json_pure
+ hiera-puppet (1.0.0)
+ hiera (~> 1.0)
+ highline (1.6.20)
+ json (1.8.1)
+ json_pure (1.8.1)
+ kwalify (0.7.2)
+ librarian-puppet (0.9.10)
+ json
+ thor (~> 0.15)
+ metaclass (0.0.2)
+ mime-types (1.25.1)
+ mocha (1.0.0)
+ metaclass (~> 0.0.1)
+ multi_json (1.8.4)
+ net-scp (1.1.2)
+ net-ssh (>= 2.6.5)
+ net-ssh (2.7.0)
+ nokogiri (1.5.11)
+ puppet (3.4.2)
+ facter (~> 1.6)
+ hiera (~> 1.0)
+ rgen (~> 0.6.5)
+ puppet-lint (0.3.2)
+ puppetlabs_spec_helper (0.4.1)
+ mocha (>= 0.10.5)
+ rake
+ rspec (>= 2.9.0)
+ rspec-puppet (>= 0.1.1)
+ rake (10.1.1)
+ rbvmomi (1.8.1)
+ builder
+ nokogiri (>= 1.4.1)
+ trollop
+ rgen (0.6.6)
+ rspec (2.14.1)
+ rspec-core (~> 2.14.0)
+ rspec-expectations (~> 2.14.0)
+ rspec-mocks (~> 2.14.0)
+ rspec-core (2.14.7)
+ rspec-expectations (2.14.4)
+ diff-lcs (>= 1.1.3, < 2.0)
+ rspec-hiera-puppet (1.0.0)
+ hiera (>= 1.0)
+ hiera-puppet (>= 1.0)
+ puppet (>= 3.0)
+ rspec
+ rspec-puppet
+ rspec-mocks (2.14.4)
+ rspec-system (2.8.0)
+ fog (~> 1.18)
+ kwalify (~> 0.7.2)
+ mime-types (~> 1.16)
+ net-scp (~> 1.1)
+ net-ssh (~> 2.7)
+ nokogiri (~> 1.5.10)
+ rbvmomi (~> 1.6)
+ rspec (~> 2.14)
+ systemu (~> 2.5)
+ rspec-system-puppet (2.2.1)
+ rspec-system (~> 2.0)
+ rspec-system-serverspec (2.0.1)
+ rspec-system (~> 2.0)
+ serverspec (~> 0.0)
+ specinfra (~> 0.0)
+ ruby-hmac (0.4.0)
+ serverspec (0.14.4)
+ highline
+ net-ssh
+ rspec (>= 2.13.0)
+ specinfra (>= 0.1.0)
+ specinfra (0.4.1)
+ systemu (2.6.0)
+ thor (0.18.1)
+ trollop (2.0)
+
+PLATFORMS
+ ruby
+
+DEPENDENCIES
+ librarian-puppet (>= 0.9.10)
+ puppet (>= 2.7.0)
+ puppet-lint (>= 0.3.2)
+ puppetlabs_spec_helper (>= 0.2.0)
+ rake (>= 0.9.2.2)
+ rspec-hiera-puppet
+ rspec-puppet!
+ rspec-system-puppet
+ rspec-system-serverspec
+ serverspec
diff --git a/puppet/modules/sshd/LICENSE b/puppet/modules/sshd/LICENSE
new file mode 100644
index 00000000..94a9ed02
--- /dev/null
+++ b/puppet/modules/sshd/LICENSE
@@ -0,0 +1,674 @@
+ GNU GENERAL PUBLIC LICENSE
+ Version 3, 29 June 2007
+
+ Copyright (C) 2007 Free Software Foundation, Inc. <http://fsf.org/>
+ Everyone is permitted to copy and distribute verbatim copies
+ of this license document, but changing it is not allowed.
+
+ Preamble
+
+ The GNU General Public License is a free, copyleft license for
+software and other kinds of works.
+
+ The licenses for most software and other practical works are designed
+to take away your freedom to share and change the works. By contrast,
+the GNU General Public License is intended to guarantee your freedom to
+share and change all versions of a program--to make sure it remains free
+software for all its users. We, the Free Software Foundation, use the
+GNU General Public License for most of our software; it applies also to
+any other work released this way by its authors. You can apply it to
+your programs, too.
+
+ When we speak of free software, we are referring to freedom, not
+price. Our General Public Licenses are designed to make sure that you
+have the freedom to distribute copies of free software (and charge for
+them if you wish), that you receive source code or can get it if you
+want it, that you can change the software or use pieces of it in new
+free programs, and that you know you can do these things.
+
+ To protect your rights, we need to prevent others from denying you
+these rights or asking you to surrender the rights. Therefore, you have
+certain responsibilities if you distribute copies of the software, or if
+you modify it: responsibilities to respect the freedom of others.
+
+ For example, if you distribute copies of such a program, whether
+gratis or for a fee, you must pass on to the recipients the same
+freedoms that you received. You must make sure that they, too, receive
+or can get the source code. And you must show them these terms so they
+know their rights.
+
+ Developers that use the GNU GPL protect your rights with two steps:
+(1) assert copyright on the software, and (2) offer you this License
+giving you legal permission to copy, distribute and/or modify it.
+
+ For the developers' and authors' protection, the GPL clearly explains
+that there is no warranty for this free software. For both users' and
+authors' sake, the GPL requires that modified versions be marked as
+changed, so that their problems will not be attributed erroneously to
+authors of previous versions.
+
+ Some devices are designed to deny users access to install or run
+modified versions of the software inside them, although the manufacturer
+can do so. This is fundamentally incompatible with the aim of
+protecting users' freedom to change the software. The systematic
+pattern of such abuse occurs in the area of products for individuals to
+use, which is precisely where it is most unacceptable. Therefore, we
+have designed this version of the GPL to prohibit the practice for those
+products. If such problems arise substantially in other domains, we
+stand ready to extend this provision to those domains in future versions
+of the GPL, as needed to protect the freedom of users.
+
+ Finally, every program is threatened constantly by software patents.
+States should not allow patents to restrict development and use of
+software on general-purpose computers, but in those that do, we wish to
+avoid the special danger that patents applied to a free program could
+make it effectively proprietary. To prevent this, the GPL assures that
+patents cannot be used to render the program non-free.
+
+ The precise terms and conditions for copying, distribution and
+modification follow.
+
+ TERMS AND CONDITIONS
+
+ 0. Definitions.
+
+ "This License" refers to version 3 of the GNU General Public License.
+
+ "Copyright" also means copyright-like laws that apply to other kinds of
+works, such as semiconductor masks.
+
+ "The Program" refers to any copyrightable work licensed under this
+License. Each licensee is addressed as "you". "Licensees" and
+"recipients" may be individuals or organizations.
+
+ To "modify" a work means to copy from or adapt all or part of the work
+in a fashion requiring copyright permission, other than the making of an
+exact copy. The resulting work is called a "modified version" of the
+earlier work or a work "based on" the earlier work.
+
+ A "covered work" means either the unmodified Program or a work based
+on the Program.
+
+ To "propagate" a work means to do anything with it that, without
+permission, would make you directly or secondarily liable for
+infringement under applicable copyright law, except executing it on a
+computer or modifying a private copy. Propagation includes copying,
+distribution (with or without modification), making available to the
+public, and in some countries other activities as well.
+
+ To "convey" a work means any kind of propagation that enables other
+parties to make or receive copies. Mere interaction with a user through
+a computer network, with no transfer of a copy, is not conveying.
+
+ An interactive user interface displays "Appropriate Legal Notices"
+to the extent that it includes a convenient and prominently visible
+feature that (1) displays an appropriate copyright notice, and (2)
+tells the user that there is no warranty for the work (except to the
+extent that warranties are provided), that licensees may convey the
+work under this License, and how to view a copy of this License. If
+the interface presents a list of user commands or options, such as a
+menu, a prominent item in the list meets this criterion.
+
+ 1. Source Code.
+
+ The "source code" for a work means the preferred form of the work
+for making modifications to it. "Object code" means any non-source
+form of a work.
+
+ A "Standard Interface" means an interface that either is an official
+standard defined by a recognized standards body, or, in the case of
+interfaces specified for a particular programming language, one that
+is widely used among developers working in that language.
+
+ The "System Libraries" of an executable work include anything, other
+than the work as a whole, that (a) is included in the normal form of
+packaging a Major Component, but which is not part of that Major
+Component, and (b) serves only to enable use of the work with that
+Major Component, or to implement a Standard Interface for which an
+implementation is available to the public in source code form. A
+"Major Component", in this context, means a major essential component
+(kernel, window system, and so on) of the specific operating system
+(if any) on which the executable work runs, or a compiler used to
+produce the work, or an object code interpreter used to run it.
+
+ The "Corresponding Source" for a work in object code form means all
+the source code needed to generate, install, and (for an executable
+work) run the object code and to modify the work, including scripts to
+control those activities. However, it does not include the work's
+System Libraries, or general-purpose tools or generally available free
+programs which are used unmodified in performing those activities but
+which are not part of the work. For example, Corresponding Source
+includes interface definition files associated with source files for
+the work, and the source code for shared libraries and dynamically
+linked subprograms that the work is specifically designed to require,
+such as by intimate data communication or control flow between those
+subprograms and other parts of the work.
+
+ The Corresponding Source need not include anything that users
+can regenerate automatically from other parts of the Corresponding
+Source.
+
+ The Corresponding Source for a work in source code form is that
+same work.
+
+ 2. Basic Permissions.
+
+ All rights granted under this License are granted for the term of
+copyright on the Program, and are irrevocable provided the stated
+conditions are met. This License explicitly affirms your unlimited
+permission to run the unmodified Program. The output from running a
+covered work is covered by this License only if the output, given its
+content, constitutes a covered work. This License acknowledges your
+rights of fair use or other equivalent, as provided by copyright law.
+
+ You may make, run and propagate covered works that you do not
+convey, without conditions so long as your license otherwise remains
+in force. You may convey covered works to others for the sole purpose
+of having them make modifications exclusively for you, or provide you
+with facilities for running those works, provided that you comply with
+the terms of this License in conveying all material for which you do
+not control copyright. Those thus making or running the covered works
+for you must do so exclusively on your behalf, under your direction
+and control, on terms that prohibit them from making any copies of
+your copyrighted material outside their relationship with you.
+
+ Conveying under any other circumstances is permitted solely under
+the conditions stated below. Sublicensing is not allowed; section 10
+makes it unnecessary.
+
+ 3. Protecting Users' Legal Rights From Anti-Circumvention Law.
+
+ No covered work shall be deemed part of an effective technological
+measure under any applicable law fulfilling obligations under article
+11 of the WIPO copyright treaty adopted on 20 December 1996, or
+similar laws prohibiting or restricting circumvention of such
+measures.
+
+ When you convey a covered work, you waive any legal power to forbid
+circumvention of technological measures to the extent such circumvention
+is effected by exercising rights under this License with respect to
+the covered work, and you disclaim any intention to limit operation or
+modification of the work as a means of enforcing, against the work's
+users, your or third parties' legal rights to forbid circumvention of
+technological measures.
+
+ 4. Conveying Verbatim Copies.
+
+ You may convey verbatim copies of the Program's source code as you
+receive it, in any medium, provided that you conspicuously and
+appropriately publish on each copy an appropriate copyright notice;
+keep intact all notices stating that this License and any
+non-permissive terms added in accord with section 7 apply to the code;
+keep intact all notices of the absence of any warranty; and give all
+recipients a copy of this License along with the Program.
+
+ You may charge any price or no price for each copy that you convey,
+and you may offer support or warranty protection for a fee.
+
+ 5. Conveying Modified Source Versions.
+
+ You may convey a work based on the Program, or the modifications to
+produce it from the Program, in the form of source code under the
+terms of section 4, provided that you also meet all of these conditions:
+
+ a) The work must carry prominent notices stating that you modified
+ it, and giving a relevant date.
+
+ b) The work must carry prominent notices stating that it is
+ released under this License and any conditions added under section
+ 7. This requirement modifies the requirement in section 4 to
+ "keep intact all notices".
+
+ c) You must license the entire work, as a whole, under this
+ License to anyone who comes into possession of a copy. This
+ License will therefore apply, along with any applicable section 7
+ additional terms, to the whole of the work, and all its parts,
+ regardless of how they are packaged. This License gives no
+ permission to license the work in any other way, but it does not
+ invalidate such permission if you have separately received it.
+
+ d) If the work has interactive user interfaces, each must display
+ Appropriate Legal Notices; however, if the Program has interactive
+ interfaces that do not display Appropriate Legal Notices, your
+ work need not make them do so.
+
+ A compilation of a covered work with other separate and independent
+works, which are not by their nature extensions of the covered work,
+and which are not combined with it such as to form a larger program,
+in or on a volume of a storage or distribution medium, is called an
+"aggregate" if the compilation and its resulting copyright are not
+used to limit the access or legal rights of the compilation's users
+beyond what the individual works permit. Inclusion of a covered work
+in an aggregate does not cause this License to apply to the other
+parts of the aggregate.
+
+ 6. Conveying Non-Source Forms.
+
+ You may convey a covered work in object code form under the terms
+of sections 4 and 5, provided that you also convey the
+machine-readable Corresponding Source under the terms of this License,
+in one of these ways:
+
+ a) Convey the object code in, or embodied in, a physical product
+ (including a physical distribution medium), accompanied by the
+ Corresponding Source fixed on a durable physical medium
+ customarily used for software interchange.
+
+ b) Convey the object code in, or embodied in, a physical product
+ (including a physical distribution medium), accompanied by a
+ written offer, valid for at least three years and valid for as
+ long as you offer spare parts or customer support for that product
+ model, to give anyone who possesses the object code either (1) a
+ copy of the Corresponding Source for all the software in the
+ product that is covered by this License, on a durable physical
+ medium customarily used for software interchange, for a price no
+ more than your reasonable cost of physically performing this
+ conveying of source, or (2) access to copy the
+ Corresponding Source from a network server at no charge.
+
+ c) Convey individual copies of the object code with a copy of the
+ written offer to provide the Corresponding Source. This
+ alternative is allowed only occasionally and noncommercially, and
+ only if you received the object code with such an offer, in accord
+ with subsection 6b.
+
+ d) Convey the object code by offering access from a designated
+ place (gratis or for a charge), and offer equivalent access to the
+ Corresponding Source in the same way through the same place at no
+ further charge. You need not require recipients to copy the
+ Corresponding Source along with the object code. If the place to
+ copy the object code is a network server, the Corresponding Source
+ may be on a different server (operated by you or a third party)
+ that supports equivalent copying facilities, provided you maintain
+ clear directions next to the object code saying where to find the
+ Corresponding Source. Regardless of what server hosts the
+ Corresponding Source, you remain obligated to ensure that it is
+ available for as long as needed to satisfy these requirements.
+
+ e) Convey the object code using peer-to-peer transmission, provided
+ you inform other peers where the object code and Corresponding
+ Source of the work are being offered to the general public at no
+ charge under subsection 6d.
+
+ A separable portion of the object code, whose source code is excluded
+from the Corresponding Source as a System Library, need not be
+included in conveying the object code work.
+
+ A "User Product" is either (1) a "consumer product", which means any
+tangible personal property which is normally used for personal, family,
+or household purposes, or (2) anything designed or sold for incorporation
+into a dwelling. In determining whether a product is a consumer product,
+doubtful cases shall be resolved in favor of coverage. For a particular
+product received by a particular user, "normally used" refers to a
+typical or common use of that class of product, regardless of the status
+of the particular user or of the way in which the particular user
+actually uses, or expects or is expected to use, the product. A product
+is a consumer product regardless of whether the product has substantial
+commercial, industrial or non-consumer uses, unless such uses represent
+the only significant mode of use of the product.
+
+ "Installation Information" for a User Product means any methods,
+procedures, authorization keys, or other information required to install
+and execute modified versions of a covered work in that User Product from
+a modified version of its Corresponding Source. The information must
+suffice to ensure that the continued functioning of the modified object
+code is in no case prevented or interfered with solely because
+modification has been made.
+
+ If you convey an object code work under this section in, or with, or
+specifically for use in, a User Product, and the conveying occurs as
+part of a transaction in which the right of possession and use of the
+User Product is transferred to the recipient in perpetuity or for a
+fixed term (regardless of how the transaction is characterized), the
+Corresponding Source conveyed under this section must be accompanied
+by the Installation Information. But this requirement does not apply
+if neither you nor any third party retains the ability to install
+modified object code on the User Product (for example, the work has
+been installed in ROM).
+
+ The requirement to provide Installation Information does not include a
+requirement to continue to provide support service, warranty, or updates
+for a work that has been modified or installed by the recipient, or for
+the User Product in which it has been modified or installed. Access to a
+network may be denied when the modification itself materially and
+adversely affects the operation of the network or violates the rules and
+protocols for communication across the network.
+
+ Corresponding Source conveyed, and Installation Information provided,
+in accord with this section must be in a format that is publicly
+documented (and with an implementation available to the public in
+source code form), and must require no special password or key for
+unpacking, reading or copying.
+
+ 7. Additional Terms.
+
+ "Additional permissions" are terms that supplement the terms of this
+License by making exceptions from one or more of its conditions.
+Additional permissions that are applicable to the entire Program shall
+be treated as though they were included in this License, to the extent
+that they are valid under applicable law. If additional permissions
+apply only to part of the Program, that part may be used separately
+under those permissions, but the entire Program remains governed by
+this License without regard to the additional permissions.
+
+ When you convey a copy of a covered work, you may at your option
+remove any additional permissions from that copy, or from any part of
+it. (Additional permissions may be written to require their own
+removal in certain cases when you modify the work.) You may place
+additional permissions on material, added by you to a covered work,
+for which you have or can give appropriate copyright permission.
+
+ Notwithstanding any other provision of this License, for material you
+add to a covered work, you may (if authorized by the copyright holders of
+that material) supplement the terms of this License with terms:
+
+ a) Disclaiming warranty or limiting liability differently from the
+ terms of sections 15 and 16 of this License; or
+
+ b) Requiring preservation of specified reasonable legal notices or
+ author attributions in that material or in the Appropriate Legal
+ Notices displayed by works containing it; or
+
+ c) Prohibiting misrepresentation of the origin of that material, or
+ requiring that modified versions of such material be marked in
+ reasonable ways as different from the original version; or
+
+ d) Limiting the use for publicity purposes of names of licensors or
+ authors of the material; or
+
+ e) Declining to grant rights under trademark law for use of some
+ trade names, trademarks, or service marks; or
+
+ f) Requiring indemnification of licensors and authors of that
+ material by anyone who conveys the material (or modified versions of
+ it) with contractual assumptions of liability to the recipient, for
+ any liability that these contractual assumptions directly impose on
+ those licensors and authors.
+
+ All other non-permissive additional terms are considered "further
+restrictions" within the meaning of section 10. If the Program as you
+received it, or any part of it, contains a notice stating that it is
+governed by this License along with a term that is a further
+restriction, you may remove that term. If a license document contains
+a further restriction but permits relicensing or conveying under this
+License, you may add to a covered work material governed by the terms
+of that license document, provided that the further restriction does
+not survive such relicensing or conveying.
+
+ If you add terms to a covered work in accord with this section, you
+must place, in the relevant source files, a statement of the
+additional terms that apply to those files, or a notice indicating
+where to find the applicable terms.
+
+ Additional terms, permissive or non-permissive, may be stated in the
+form of a separately written license, or stated as exceptions;
+the above requirements apply either way.
+
+ 8. Termination.
+
+ You may not propagate or modify a covered work except as expressly
+provided under this License. Any attempt otherwise to propagate or
+modify it is void, and will automatically terminate your rights under
+this License (including any patent licenses granted under the third
+paragraph of section 11).
+
+ However, if you cease all violation of this License, then your
+license from a particular copyright holder is reinstated (a)
+provisionally, unless and until the copyright holder explicitly and
+finally terminates your license, and (b) permanently, if the copyright
+holder fails to notify you of the violation by some reasonable means
+prior to 60 days after the cessation.
+
+ Moreover, your license from a particular copyright holder is
+reinstated permanently if the copyright holder notifies you of the
+violation by some reasonable means, this is the first time you have
+received notice of violation of this License (for any work) from that
+copyright holder, and you cure the violation prior to 30 days after
+your receipt of the notice.
+
+ Termination of your rights under this section does not terminate the
+licenses of parties who have received copies or rights from you under
+this License. If your rights have been terminated and not permanently
+reinstated, you do not qualify to receive new licenses for the same
+material under section 10.
+
+ 9. Acceptance Not Required for Having Copies.
+
+ You are not required to accept this License in order to receive or
+run a copy of the Program. Ancillary propagation of a covered work
+occurring solely as a consequence of using peer-to-peer transmission
+to receive a copy likewise does not require acceptance. However,
+nothing other than this License grants you permission to propagate or
+modify any covered work. These actions infringe copyright if you do
+not accept this License. Therefore, by modifying or propagating a
+covered work, you indicate your acceptance of this License to do so.
+
+ 10. Automatic Licensing of Downstream Recipients.
+
+ Each time you convey a covered work, the recipient automatically
+receives a license from the original licensors, to run, modify and
+propagate that work, subject to this License. You are not responsible
+for enforcing compliance by third parties with this License.
+
+ An "entity transaction" is a transaction transferring control of an
+organization, or substantially all assets of one, or subdividing an
+organization, or merging organizations. If propagation of a covered
+work results from an entity transaction, each party to that
+transaction who receives a copy of the work also receives whatever
+licenses to the work the party's predecessor in interest had or could
+give under the previous paragraph, plus a right to possession of the
+Corresponding Source of the work from the predecessor in interest, if
+the predecessor has it or can get it with reasonable efforts.
+
+ You may not impose any further restrictions on the exercise of the
+rights granted or affirmed under this License. For example, you may
+not impose a license fee, royalty, or other charge for exercise of
+rights granted under this License, and you may not initiate litigation
+(including a cross-claim or counterclaim in a lawsuit) alleging that
+any patent claim is infringed by making, using, selling, offering for
+sale, or importing the Program or any portion of it.
+
+ 11. Patents.
+
+ A "contributor" is a copyright holder who authorizes use under this
+License of the Program or a work on which the Program is based. The
+work thus licensed is called the contributor's "contributor version".
+
+ A contributor's "essential patent claims" are all patent claims
+owned or controlled by the contributor, whether already acquired or
+hereafter acquired, that would be infringed by some manner, permitted
+by this License, of making, using, or selling its contributor version,
+but do not include claims that would be infringed only as a
+consequence of further modification of the contributor version. For
+purposes of this definition, "control" includes the right to grant
+patent sublicenses in a manner consistent with the requirements of
+this License.
+
+ Each contributor grants you a non-exclusive, worldwide, royalty-free
+patent license under the contributor's essential patent claims, to
+make, use, sell, offer for sale, import and otherwise run, modify and
+propagate the contents of its contributor version.
+
+ In the following three paragraphs, a "patent license" is any express
+agreement or commitment, however denominated, not to enforce a patent
+(such as an express permission to practice a patent or covenant not to
+sue for patent infringement). To "grant" such a patent license to a
+party means to make such an agreement or commitment not to enforce a
+patent against the party.
+
+ If you convey a covered work, knowingly relying on a patent license,
+and the Corresponding Source of the work is not available for anyone
+to copy, free of charge and under the terms of this License, through a
+publicly available network server or other readily accessible means,
+then you must either (1) cause the Corresponding Source to be so
+available, or (2) arrange to deprive yourself of the benefit of the
+patent license for this particular work, or (3) arrange, in a manner
+consistent with the requirements of this License, to extend the patent
+license to downstream recipients. "Knowingly relying" means you have
+actual knowledge that, but for the patent license, your conveying the
+covered work in a country, or your recipient's use of the covered work
+in a country, would infringe one or more identifiable patents in that
+country that you have reason to believe are valid.
+
+ If, pursuant to or in connection with a single transaction or
+arrangement, you convey, or propagate by procuring conveyance of, a
+covered work, and grant a patent license to some of the parties
+receiving the covered work authorizing them to use, propagate, modify
+or convey a specific copy of the covered work, then the patent license
+you grant is automatically extended to all recipients of the covered
+work and works based on it.
+
+ A patent license is "discriminatory" if it does not include within
+the scope of its coverage, prohibits the exercise of, or is
+conditioned on the non-exercise of one or more of the rights that are
+specifically granted under this License. You may not convey a covered
+work if you are a party to an arrangement with a third party that is
+in the business of distributing software, under which you make payment
+to the third party based on the extent of your activity of conveying
+the work, and under which the third party grants, to any of the
+parties who would receive the covered work from you, a discriminatory
+patent license (a) in connection with copies of the covered work
+conveyed by you (or copies made from those copies), or (b) primarily
+for and in connection with specific products or compilations that
+contain the covered work, unless you entered into that arrangement,
+or that patent license was granted, prior to 28 March 2007.
+
+ Nothing in this License shall be construed as excluding or limiting
+any implied license or other defenses to infringement that may
+otherwise be available to you under applicable patent law.
+
+ 12. No Surrender of Others' Freedom.
+
+ If conditions are imposed on you (whether by court order, agreement or
+otherwise) that contradict the conditions of this License, they do not
+excuse you from the conditions of this License. If you cannot convey a
+covered work so as to satisfy simultaneously your obligations under this
+License and any other pertinent obligations, then as a consequence you may
+not convey it at all. For example, if you agree to terms that obligate you
+to collect a royalty for further conveying from those to whom you convey
+the Program, the only way you could satisfy both those terms and this
+License would be to refrain entirely from conveying the Program.
+
+ 13. Use with the GNU Affero General Public License.
+
+ Notwithstanding any other provision of this License, you have
+permission to link or combine any covered work with a work licensed
+under version 3 of the GNU Affero General Public License into a single
+combined work, and to convey the resulting work. The terms of this
+License will continue to apply to the part which is the covered work,
+but the special requirements of the GNU Affero General Public License,
+section 13, concerning interaction through a network will apply to the
+combination as such.
+
+ 14. Revised Versions of this License.
+
+ The Free Software Foundation may publish revised and/or new versions of
+the GNU General Public License from time to time. Such new versions will
+be similar in spirit to the present version, but may differ in detail to
+address new problems or concerns.
+
+ Each version is given a distinguishing version number. If the
+Program specifies that a certain numbered version of the GNU General
+Public License "or any later version" applies to it, you have the
+option of following the terms and conditions either of that numbered
+version or of any later version published by the Free Software
+Foundation. If the Program does not specify a version number of the
+GNU General Public License, you may choose any version ever published
+by the Free Software Foundation.
+
+ If the Program specifies that a proxy can decide which future
+versions of the GNU General Public License can be used, that proxy's
+public statement of acceptance of a version permanently authorizes you
+to choose that version for the Program.
+
+ Later license versions may give you additional or different
+permissions. However, no additional obligations are imposed on any
+author or copyright holder as a result of your choosing to follow a
+later version.
+
+ 15. Disclaimer of Warranty.
+
+ THERE IS NO WARRANTY FOR THE PROGRAM, TO THE EXTENT PERMITTED BY
+APPLICABLE LAW. EXCEPT WHEN OTHERWISE STATED IN WRITING THE COPYRIGHT
+HOLDERS AND/OR OTHER PARTIES PROVIDE THE PROGRAM "AS IS" WITHOUT WARRANTY
+OF ANY KIND, EITHER EXPRESSED OR IMPLIED, INCLUDING, BUT NOT LIMITED TO,
+THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR
+PURPOSE. THE ENTIRE RISK AS TO THE QUALITY AND PERFORMANCE OF THE PROGRAM
+IS WITH YOU. SHOULD THE PROGRAM PROVE DEFECTIVE, YOU ASSUME THE COST OF
+ALL NECESSARY SERVICING, REPAIR OR CORRECTION.
+
+ 16. Limitation of Liability.
+
+ IN NO EVENT UNLESS REQUIRED BY APPLICABLE LAW OR AGREED TO IN WRITING
+WILL ANY COPYRIGHT HOLDER, OR ANY OTHER PARTY WHO MODIFIES AND/OR CONVEYS
+THE PROGRAM AS PERMITTED ABOVE, BE LIABLE TO YOU FOR DAMAGES, INCLUDING ANY
+GENERAL, SPECIAL, INCIDENTAL OR CONSEQUENTIAL DAMAGES ARISING OUT OF THE
+USE OR INABILITY TO USE THE PROGRAM (INCLUDING BUT NOT LIMITED TO LOSS OF
+DATA OR DATA BEING RENDERED INACCURATE OR LOSSES SUSTAINED BY YOU OR THIRD
+PARTIES OR A FAILURE OF THE PROGRAM TO OPERATE WITH ANY OTHER PROGRAMS),
+EVEN IF SUCH HOLDER OR OTHER PARTY HAS BEEN ADVISED OF THE POSSIBILITY OF
+SUCH DAMAGES.
+
+ 17. Interpretation of Sections 15 and 16.
+
+ If the disclaimer of warranty and limitation of liability provided
+above cannot be given local legal effect according to their terms,
+reviewing courts shall apply local law that most closely approximates
+an absolute waiver of all civil liability in connection with the
+Program, unless a warranty or assumption of liability accompanies a
+copy of the Program in return for a fee.
+
+ END OF TERMS AND CONDITIONS
+
+ How to Apply These Terms to Your New Programs
+
+ If you develop a new program, and you want it to be of the greatest
+possible use to the public, the best way to achieve this is to make it
+free software which everyone can redistribute and change under these terms.
+
+ To do so, attach the following notices to the program. It is safest
+to attach them to the start of each source file to most effectively
+state the exclusion of warranty; and each file should have at least
+the "copyright" line and a pointer to where the full notice is found.
+
+ <one line to give the program's name and a brief idea of what it does.>
+ Copyright (C) <year> <name of author>
+
+ This program is free software: you can redistribute it and/or modify
+ it under the terms of the GNU General Public License as published by
+ the Free Software Foundation, either version 3 of the License, or
+ (at your option) any later version.
+
+ This program is distributed in the hope that it will be useful,
+ but WITHOUT ANY WARRANTY; without even the implied warranty of
+ MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
+ GNU General Public License for more details.
+
+ You should have received a copy of the GNU General Public License
+ along with this program. If not, see <http://www.gnu.org/licenses/>.
+
+Also add information on how to contact you by electronic and paper mail.
+
+ If the program does terminal interaction, make it output a short
+notice like this when it starts in an interactive mode:
+
+ <program> Copyright (C) <year> <name of author>
+ This program comes with ABSOLUTELY NO WARRANTY; for details type `show w'.
+ This is free software, and you are welcome to redistribute it
+ under certain conditions; type `show c' for details.
+
+The hypothetical commands `show w' and `show c' should show the appropriate
+parts of the General Public License. Of course, your program's commands
+might be different; for a GUI interface, you would use an "about box".
+
+ You should also get your employer (if you work as a programmer) or school,
+if any, to sign a "copyright disclaimer" for the program, if necessary.
+For more information on this, and how to apply and follow the GNU GPL, see
+<http://www.gnu.org/licenses/>.
+
+ The GNU General Public License does not permit incorporating your program
+into proprietary programs. If your program is a subroutine library, you
+may consider it more useful to permit linking proprietary applications with
+the library. If this is what you want to do, use the GNU Lesser General
+Public License instead of this License. But first, please read
+<http://www.gnu.org/philosophy/why-not-lgpl.html>.
diff --git a/puppet/modules/sshd/Modulefile b/puppet/modules/sshd/Modulefile
new file mode 100644
index 00000000..5e4f92d6
--- /dev/null
+++ b/puppet/modules/sshd/Modulefile
@@ -0,0 +1,10 @@
+name 'puppet-sshd'
+version '0.1.0'
+source 'https://github.com/duritong/puppet-sshd'
+author 'duritong'
+license 'Apache License, Version 2.0'
+summary 'ssh daemon configuration'
+description 'Manages sshd_config'
+project_page 'https://github.com/duritong/puppet-sshd'
+
+dependency 'puppetlabs/stdlib', '>= 2.0.0' \ No newline at end of file
diff --git a/puppet/modules/sshd/Puppetfile b/puppet/modules/sshd/Puppetfile
new file mode 100644
index 00000000..166d3b4d
--- /dev/null
+++ b/puppet/modules/sshd/Puppetfile
@@ -0,0 +1,3 @@
+forge 'http://forge.puppetlabs.com'
+
+mod 'puppetlabs/stdlib', '>=2.0.0' \ No newline at end of file
diff --git a/puppet/modules/sshd/Puppetfile.lock b/puppet/modules/sshd/Puppetfile.lock
new file mode 100644
index 00000000..f9381858
--- /dev/null
+++ b/puppet/modules/sshd/Puppetfile.lock
@@ -0,0 +1,8 @@
+FORGE
+ remote: http://forge.puppetlabs.com
+ specs:
+ puppetlabs/stdlib (4.1.0)
+
+DEPENDENCIES
+ puppetlabs/stdlib (>= 2.0.0)
+
diff --git a/puppet/modules/sshd/README.md b/puppet/modules/sshd/README.md
new file mode 100644
index 00000000..77e4d29b
--- /dev/null
+++ b/puppet/modules/sshd/README.md
@@ -0,0 +1,247 @@
+# Puppet SSH Module
+
+[![Build Status](https://travis-ci.org/duritong/puppet-sshd.png?branch=master)](https://travis-ci.org/duritong/puppet-sshd)
+
+This puppet module manages OpenSSH configuration and services.
+
+**!! Upgrade Notice (05/2015) !!**
+
+The hardened_ssl parameter name was changed to simply 'hardened'.
+
+**!! Upgrade Notice (01/2013) !!**
+
+This module now uses parameterized classes, where it used global variables
+before. So please whatch out before pulling, you need to change the
+class declarations in your manifest !
+
+
+### Dependencies
+
+This module requires puppet => 2.6, and the following modules are required
+pre-dependencies:
+
+- [puppetlabs/stdlib](https://github.com/puppetlabs/puppetlabs-stdlib) >= 2.x
+
+## OpenSSH Server
+
+On a node where you wish to have an openssh server installed, you should
+include
+
+```puppet
+class { 'sshd': }
+```
+
+on that node. If you need to configure any aspects of sshd_config, set the variables before the include. Or you can adjust many parameters:
+
+```puppet
+class { 'sshd':
+ ports => [ 20002 ],
+ permit_root_login => 'no',
+}
+```
+
+See Configurable Variables below for what you can set.
+
+### Nagios
+
+To have nagios checks setup automatically for sshd services, simply set
+`manage_nagios` to `true` for that class. If you want to disable ssh
+nagios checking for a particular node (such as when ssh is firewalled), then you
+can set the class parameter `nagios_check_ssh` to `false` and that node will not be
+monitored.
+
+Nagios will automatically check the ports defined in `ports`, and the
+hostname specified by `nagios_check_ssh_hostname`.
+
+Note that if you need to use some specific logic to decide whether or not to
+create a nagios service check, you should set $manage_nagios to false, and
+use sshd::nagios from within your own manifests. You'll also need to manually
+specify the port to that define. By default, if the $port parameter is not
+specified, it will use the resource name as the port (e.g. if you call it like
+this: `sshd::nagios { '22': }` )
+
+NOTE: this requires that you are using the shared-nagios puppet module which
+supports the nagios native types via `nagios::service`:
+
+https://gitlab.com/shared-puppet-modules-group/sshd
+
+### Firewall
+
+If you wish to have firewall rules setup automatically for you, using shorewall,
+you will need to set: `use_shorewall => true`. The `ports` that you have
+specified will automatically be used.
+
+NOTE: This requires that you are using the shared-shorewall puppet module:
+git://labs.riseup.net/shared-shorewall
+
+
+### Configurable variables
+
+Configuration of sshd is strict, and may not fit all needs, however there are a
+number of variables that you can consider configuring. The defaults are set to
+the distribution shipped sshd_config file defaults.
+
+To set any of these variables, simply set them as variables in your manifests,
+before the class is included, for example:
+
+```puppet
+class {'sshd':
+ listen_address => ['10.0.0.1', '192.168.0.1'],
+ use_pam => yes
+}
+```
+
+If you need to install a version of the ssh daemon or client package other than
+the default one that would be installed by `ensure => installed`, then you can
+set the following variables:
+
+```puppet
+class {'sshd':
+ ensure_version => "1:5.2p2-6"
+}
+```
+
+The following is a list of the currently available variables:
+
+ - `listen_address`
+ specify the addresses sshd should listen on set this to `['10.0.0.1', '192.168.0.1']` to have it listen on both addresses, or leave it unset to listen on all Default: empty -> results in listening on `0.0.0.0`
+ - `allowed_users`
+ list of usernames separated by spaces. set this for example to `"foobar
+ root"` to ensure that only user foobar and root might login. Default: empty
+ -> no restriction is set
+ - `allowed_groups`
+ list of groups separated by spaces. set this for example to `"wheel sftponly"`
+ to ensure that only users in the groups wheel and sftponly might login.
+ Default: empty -> no restriction is set Note: This is set after
+ `allowed_users`, take care of the behaviour if you use these 2 options
+ together.
+ - `use_pam` if you want to use pam or not for authenticaton. Values:
+ - `no` (default)
+ - `yes`
+ - `permit_root_login` If you want to allow root logins or not. Valid values:
+ - `yes`
+ - `no`
+ - `without-password` (default)
+ - `forced-commands-only`
+ - `password_authentication`
+ If you want to enable password authentication or not. Valid values:
+ - `yes`
+ - `no` (default)
+ - `kerberos_authentication`
+ If you want the password that is provided by the user to be validated
+ through the Kerberos KDC. To use this option the server needs a Kerberos
+ servtab which allows the verification of the KDC's identity. Valid values:
+ - `yes`
+ - `no` (default)
+ - `kerberos_orlocalpasswd` If password authentication through Kerberos fails, then the password will be validated via any additional local mechanism. Valid values:
+ - `yes` (default)
+ - `no`
+ - `kerberos_ticketcleanup` Destroy the user's ticket cache file on logout? Valid values:
+ - `yes` (default)
+ - `no`
+ - `gssapi_authentication` Authenticate users based on GSSAPI? Valid values:
+ - `yes`
+ - `no` (default)
+ - `gssapi_cleanupcredentials` Destroy user's credential cache on logout? Valid values:
+ - `yes` (default)
+ - `no`
+ - `challenge_response_authentication` If you want to enable ChallengeResponseAuthentication or not When disabled, s/key passwords are disabled. Valid values:
+ - `yes`
+ - `no` (default)
+ - `tcp_forwarding` If you want to enable TcpForwarding. Valid values:
+ - `yes`
+ - `no` (default)
+ - `x11_forwarding` If you want to enable x11 forwarding. Valid values:
+ - `yes`
+ - `no` (default)
+ - `agent_forwarding` If you want to allow ssh-agent forwarding. Valid values:
+ - `yes`
+ - `no` (default)
+ - `pubkey_authentication` If you want to enable public key authentication. Valid values:
+ - `yes` (default)
+ - `no`
+ - `rsa_authentication` If you want to enable RSA Authentication. Valid values:
+ - `yes`
+ - `no` (default)
+ - `rhosts_rsa_authentication`
+ If you want to enable rhosts RSA Authentication. Valid values:
+ - `yes`
+ - `no` (default)
+ - `hostbased_authentication` If you want to enable `HostbasedAuthentication`. Valid values:
+ - `yes`
+ - `no` (default)
+ - `strict_modes` If you want to set `StrictModes` (check file modes/ownership before accepting login). Valid values:
+ - `yes` (default)
+ - `no`
+ - `permit_empty_passwords`
+ If you want enable PermitEmptyPasswords to allow empty passwords. Valid
+ Values:
+ - `yes`
+ - `no` (default)
+ - `ports` If you want to specify a list of ports other than the default `22`; Default: `[22]`
+ - `authorized_keys_file`
+ Set this to the location of the AuthorizedKeysFile
+ (e.g. `/etc/ssh/authorized_keys/%u`). Default: `AuthorizedKeysFile
+ %h/.ssh/authorized_keys`
+ - `hardened`
+ Use only strong ciphers, MAC, KexAlgorithms, etc.
+ Values:
+ - `no` (default)
+ - `yes`
+ - `print_motd`
+ Show the Message of the day when a user logs in.
+ - `sftp_subsystem`
+ Set a different sftp-subystem than the default one. Might be interesting for
+ sftponly usage. Default: empty -> no change of the default
+ - `head_additional_options`
+ Set this to any additional sshd_options which aren't listed above. Anything
+ set here will be added to the beginning of the sshd_config file. This option
+ might be useful to define complicated Match Blocks. This string is going to
+ be included, like it is defined. So take care! Default: empty -> not added.
+ - `tail_additional_options` Set this to any additional sshd_options which aren't listed above. Anything set here will be added to the end of the sshd_config file. This option might be useful to define complicated Match Blocks. This string is going to be included, like it is defined. So take care! Default: empty -> not added.
+ - `shared_ip` Whether the server uses a shared network IP address. If it does, then we don't want it to export an rsa key for its IP address. Values:
+ - `no` (default)
+ - `yes`
+
+
+### Defines and functions
+
+Deploy authorized_keys file with the define `authorized_key`.
+
+Generate a public/private keypair with the ssh_keygen function. For example, the
+following will generate ssh keys and put the different parts of the key into
+variables:
+
+```puppet
+$ssh_keys = ssh_keygen("${$ssh_key_basepath}/backup/keys/${::fqdn}/${backup_host}")
+$public_key = split($ssh_keys[1],' ')
+$sshkey_type => $public_key[0]
+$sshkey => $public_key[1]
+```
+
+## Client
+
+
+On a node where you wish to have the ssh client managed, you can do:
+
+```puppet
+class{'sshd::client':
+
+}
+```
+
+in the node definition. This will install the appropriate package.
+
+## License
+
+ - Copyright 2008-2011, Riseup Labs micah@riseup.net
+ - Copyright 2008, admin(at)immerda.ch
+ - Copyright 2008, Puzzle ITC GmbH
+ - Marcel Härry haerry+puppet(at)puzzle.ch
+ - Simon Josi josi+puppet(at)puzzle.ch
+
+This program is free software; you can redistribute
+it and/or modify it under the terms of the GNU
+General Public License version 3 as published by
+the Free Software Foundation.
+
diff --git a/puppet/modules/sshd/Rakefile b/puppet/modules/sshd/Rakefile
new file mode 100644
index 00000000..e3213518
--- /dev/null
+++ b/puppet/modules/sshd/Rakefile
@@ -0,0 +1,16 @@
+require 'bundler'
+Bundler.require(:rake)
+
+require 'puppetlabs_spec_helper/rake_tasks'
+require 'puppet-lint/tasks/puppet-lint'
+require 'rspec-system/rake_task'
+
+PuppetLint.configuration.log_format = '%{path}:%{linenumber}:%{KIND}: %{message}'
+PuppetLint.configuration.send("disable_80chars")
+
+puppet_module='sshd'
+task :librarian_spec_prep do
+ sh 'librarian-puppet install --path=spec/fixtures/modules/'
+end
+task :spec_prep => :librarian_spec_prep
+task :default => [:spec, :lint]
diff --git a/puppet/modules/sshd/files/autossh.init.d b/puppet/modules/sshd/files/autossh.init.d
new file mode 100644
index 00000000..92bd5f43
--- /dev/null
+++ b/puppet/modules/sshd/files/autossh.init.d
@@ -0,0 +1,164 @@
+#!/bin/sh
+### BEGIN INIT INFO
+# Provides: AutoSSH
+# Required-Start: $local_fs $network $remote_fs $syslog
+# Required-Stop: $local_fs $network $remote_fs $syslog
+# Default-Start: 2 3 4 5
+# Default-Stop: 0 1 6
+# Short-Description: start the autossh daemon
+# Description: start the autossh daemon
+### END INIT INFO
+
+# Author: Antoine Beaupré <anarcat@koumbit.org>
+
+# Do NOT "set -e"
+
+# PATH should only include /usr/* if it runs after the mountnfs.sh script
+PATH=/sbin:/usr/sbin:/bin:/usr/bin
+DESC="autossh"
+NAME=autossh
+USER=$NAME
+DAEMON=/usr/bin/autossh
+DAEMON_ARGS="-f"
+PIDFILE=/var/run/$NAME.pid
+SCRIPTNAME=/etc/init.d/$NAME
+
+# Read configuration variable file if it is present
+[ -r /etc/default/$NAME ] && . /etc/default/$NAME
+
+AUTOSSH_PIDFILE=$PIDFILE
+export AUTOSSH_PIDFILE
+
+# Exit if the package is not installed
+[ -x "$DAEMON" ] || exit 0
+
+# Load the VERBOSE setting and other rcS variables
+. /lib/init/vars.sh
+
+# Define LSB log_* functions.
+# Depend on lsb-base (>= 3.2-14) to ensure that this file is present
+# and status_of_proc is working.
+. /lib/lsb/init-functions
+
+#
+# Function that starts the daemon/service
+#
+do_start()
+{
+ # Return
+ # 0 if daemon has been started
+ # 1 if daemon was already running
+ # 2 if daemon could not be started
+ start-stop-daemon --start --quiet --user $USER --pidfile $PIDFILE --exec $DAEMON --test > /dev/null \
+ || return 1
+ start-stop-daemon --start --quiet --user $USER --chuid $USER --pidfile $PIDFILE --exec $DAEMON -- \
+ $DAEMON_ARGS \
+ || return 2
+ # The above code will not work for interpreted scripts, use the next
+ # six lines below instead (Ref: #643337, start-stop-daemon(8) )
+ #start-stop-daemon --start --quiet --pidfile $PIDFILE --startas $DAEMON \
+ # --name $NAME --test > /dev/null \
+ # || return 1
+ #start-stop-daemon --start --quiet --pidfile $PIDFILE --startas $DAEMON \
+ # --name $NAME -- $DAEMON_ARGS \
+ # || return 2
+
+ # Add code here, if necessary, that waits for the process to be ready
+ # to handle requests from services started subsequently which depend
+ # on this one. As a last resort, sleep for some time.
+}
+
+#
+# Function that stops the daemon/service
+#
+do_stop()
+{
+ # Return
+ # 0 if daemon has been stopped
+ # 1 if daemon was already stopped
+ # 2 if daemon could not be stopped
+ # other if a failure occurred
+ start-stop-daemon --stop --quiet --retry=TERM/30/KILL/5 --pidfile $PIDFILE --user $USER --name $NAME
+ RETVAL="$?"
+ [ "$RETVAL" = 2 ] && return 2
+ # Wait for children to finish too if this is a daemon that forks
+ # and if the daemon is only ever run from this initscript.
+ # If the above conditions are not satisfied then add some other code
+ # that waits for the process to drop all resources that could be
+ # needed by services started subsequently. A last resort is to
+ # sleep for some time.
+ start-stop-daemon --stop --quiet --oknodo --retry=0/30/KILL/5 --user $USER --exec $DAEMON
+ [ "$?" = 2 ] && return 2
+ # Many daemons don't delete their pidfiles when they exit.
+ rm -f $PIDFILE
+ return "$RETVAL"
+}
+
+#
+# Function that sends a SIGHUP to the daemon/service
+#
+do_reload() {
+ #
+ # If the daemon can reload its configuration without
+ # restarting (for example, when it is sent a SIGHUP),
+ # then implement that here.
+ #
+ start-stop-daemon --stop --signal 1 --quiet --pidfile $PIDFILE --name $NAME
+ return 0
+}
+
+case "$1" in
+ start)
+ [ "$VERBOSE" != no ] && log_daemon_msg "Starting $DESC" "$NAME"
+ do_start
+ case "$?" in
+ 0|1) [ "$VERBOSE" != no ] && log_end_msg 0 ;;
+ 2) [ "$VERBOSE" != no ] && log_end_msg 1 ;;
+ esac
+ ;;
+ stop)
+ [ "$VERBOSE" != no ] && log_daemon_msg "Stopping $DESC" "$NAME"
+ do_stop
+ case "$?" in
+ 0|1) [ "$VERBOSE" != no ] && log_end_msg 0 ;;
+ 2) [ "$VERBOSE" != no ] && log_end_msg 1 ;;
+ esac
+ ;;
+ status)
+ status_of_proc -p "$PIDFILE" "$DAEMON" "$NAME" && exit 0 || exit $?
+ ;;
+ reload|force-reload)
+ log_daemon_msg "Reloading $DESC" "$NAME"
+ do_reload
+ log_end_msg $?
+ ;;
+ restart)
+ #
+ # If the "reload" option is implemented then remove the
+ # 'force-reload' alias
+ #
+ log_daemon_msg "Restarting $DESC" "$NAME"
+ do_stop
+ case "$?" in
+ 0|1)
+ do_start
+ case "$?" in
+ 0) log_end_msg 0 ;;
+ 1) log_end_msg 1 ;; # Old process is still running
+ *) log_end_msg 1 ;; # Failed to start
+ esac
+ ;;
+ *)
+ # Failed to stop
+ log_end_msg 1
+ ;;
+ esac
+ ;;
+ *)
+ #echo "Usage: $SCRIPTNAME {start|stop|restart|reload|force-reload}" >&2
+ echo "Usage: $SCRIPTNAME {start|stop|status|restart|force-reload}" >&2
+ exit 3
+ ;;
+esac
+
+:
diff --git a/puppet/modules/sshd/lib/facter/ssh_version.rb b/puppet/modules/sshd/lib/facter/ssh_version.rb
new file mode 100644
index 00000000..51d8a00f
--- /dev/null
+++ b/puppet/modules/sshd/lib/facter/ssh_version.rb
@@ -0,0 +1,5 @@
+Facter.add("ssh_version") do
+ setcode do
+ ssh_version = Facter::Util::Resolution.exec('ssh -V 2>&1 1>/dev/null').chomp.split(' ')[0].split('_')[1]
+ end
+end
diff --git a/puppet/modules/sshd/lib/puppet/parser/functions/ssh_keygen.rb b/puppet/modules/sshd/lib/puppet/parser/functions/ssh_keygen.rb
new file mode 100644
index 00000000..e304f242
--- /dev/null
+++ b/puppet/modules/sshd/lib/puppet/parser/functions/ssh_keygen.rb
@@ -0,0 +1,30 @@
+Puppet::Parser::Functions::newfunction(:ssh_keygen, :type => :rvalue, :doc =>
+ "Returns an array containing the ssh private and public (in this order) key
+ for a certain private key path.
+ It will generate the keypair if both do not exist. It will also generate
+ the directory hierarchy if required.
+ It accepts only fully qualified paths, everything else will fail.") do |args|
+ raise Puppet::ParseError, "Wrong number of arguments" unless args.to_a.length == 1
+ private_key_path = args.to_a[0]
+ raise Puppet::ParseError, "Only fully qualified paths are accepted (#{private_key_path})" unless private_key_path =~ /^\/.+/
+ public_key_path = "#{private_key_path}.pub"
+ raise Puppet::ParseError, "Either only the private or only the public key exists" if File.exists?(private_key_path) ^ File.exists?(public_key_path)
+ [private_key_path,public_key_path].each do |path|
+ raise Puppet::ParseError, "#{path} is a directory" if File.directory?(path)
+ end
+
+ dir = File.dirname(private_key_path)
+ unless File.directory?(dir)
+ require 'fileutils'
+ FileUtils.mkdir_p(dir, :mode => 0700)
+ end
+ unless [private_key_path,public_key_path].all?{|path| File.exists?(path) }
+ executor = (Facter.value(:puppetversion).to_i < 3) ? Puppet::Util : Puppet::Util::Execution
+ output = executor.execute(
+ ['/usr/bin/ssh-keygen','-t', 'rsa', '-b', '4096',
+ '-f', private_key_path, '-P', '', '-q'])
+ raise Puppet::ParseError, "Something went wrong during key generation! Output: #{output}" unless output.empty?
+ end
+ [File.read(private_key_path),File.read(public_key_path)]
+end
+
diff --git a/puppet/modules/sshd/manifests/autossh.pp b/puppet/modules/sshd/manifests/autossh.pp
new file mode 100644
index 00000000..5650584a
--- /dev/null
+++ b/puppet/modules/sshd/manifests/autossh.pp
@@ -0,0 +1,40 @@
+class sshd::autossh($host,
+ $port = undef, # this should be a remote->local hash
+ $remote_user = undef,
+ $user = 'root',
+ $pidfile = '/var/run/autossh.pid',
+) {
+ if $port {
+ $port_ensure = $port
+ }
+ else {
+ # random port between 10000 and 20000
+ $port_ensure = fqdn_rand(10000) + 10000
+ }
+ if $remote_user {
+ $remote_user_ensure = $remote_user
+ }
+ else {
+ $remote_user_ensure = "host-$fqdn"
+ }
+ file {
+ '/etc/init.d/autossh':
+ mode => '0555',
+ source => 'puppet:///modules/sshd/autossh.init.d';
+ '/etc/default/autossh':
+ mode => '0444',
+ content => "USER=$user\nPIDFILE=$pidfile\nDAEMON_ARGS='-M0 -f -o ServerAliveInterval=15 -o ServerAliveCountMax=4 -q -N -R $port_ensure:localhost:22 $remote_user_ensure@$host'\n";
+ }
+ package { 'autossh':
+ ensure => present,
+ }
+ service { 'autossh':
+ ensure => running,
+ enable => true,
+ subscribe => [
+ File['/etc/init.d/autossh'],
+ File['/etc/default/autossh'],
+ Package['autossh'],
+ ],
+ }
+}
diff --git a/puppet/modules/sshd/manifests/base.pp b/puppet/modules/sshd/manifests/base.pp
new file mode 100644
index 00000000..dda9f26c
--- /dev/null
+++ b/puppet/modules/sshd/manifests/base.pp
@@ -0,0 +1,41 @@
+# The base class to setup the common things.
+# This is a private class and will always be used
+# throught the sshd class itself.
+class sshd::base {
+
+ $sshd_config_content = $::operatingsystem ? {
+ 'CentOS' => template("sshd/sshd_config/${::operatingsystem}_${::operatingsystemmajrelease}.erb"),
+ default => $::lsbdistcodename ? {
+ '' => template("sshd/sshd_config/${::operatingsystem}.erb"),
+ default => template("sshd/sshd_config/${::operatingsystem}_${::lsbdistcodename}.erb")
+ }
+ }
+
+ file { 'sshd_config':
+ ensure => present,
+ path => '/etc/ssh/sshd_config',
+ content => $sshd_config_content,
+ notify => Service[sshd],
+ owner => root,
+ group => 0,
+ mode => '0600';
+ }
+
+ # Now add the key, if we've got one
+ case $::sshrsakey {
+ '': { info("no sshrsakey on ${::fqdn}") }
+ default: {
+ # only export sshkey when storedconfigs is enabled
+ if $::sshd::use_storedconfigs {
+ include ::sshd::sshkey
+ }
+ }
+ }
+ service{'sshd':
+ ensure => running,
+ name => 'sshd',
+ enable => true,
+ hasstatus => true,
+ require => File[sshd_config],
+ }
+}
diff --git a/puppet/modules/sshd/manifests/client.pp b/puppet/modules/sshd/manifests/client.pp
new file mode 100644
index 00000000..84dd7abc
--- /dev/null
+++ b/puppet/modules/sshd/manifests/client.pp
@@ -0,0 +1,22 @@
+# manifests/client.pp
+
+class sshd::client(
+ $shared_ip = 'no',
+ $ensure_version = 'installed',
+ $manage_shorewall = false
+) {
+
+ case $::operatingsystem {
+ debian,ubuntu: { include sshd::client::debian }
+ default: {
+ case $::kernel {
+ linux: { include sshd::client::linux }
+ default: { include sshd::client::base }
+ }
+ }
+ }
+
+ if $manage_shorewall{
+ include shorewall::rules::out::ssh
+ }
+}
diff --git a/puppet/modules/sshd/manifests/client/base.pp b/puppet/modules/sshd/manifests/client/base.pp
new file mode 100644
index 00000000..4925c2d0
--- /dev/null
+++ b/puppet/modules/sshd/manifests/client/base.pp
@@ -0,0 +1,15 @@
+class sshd::client::base {
+ # this is needed because the gid might have changed
+ file { '/etc/ssh/ssh_known_hosts':
+ ensure => present,
+ mode => '0644',
+ owner => root,
+ group => 0;
+ }
+
+ # Now collect all server keys
+ case $sshd::client::shared_ip {
+ no: { Sshkey <<||>> }
+ yes: { Sshkey <<| tag == fqdn |>> }
+ }
+}
diff --git a/puppet/modules/sshd/manifests/client/debian.pp b/puppet/modules/sshd/manifests/client/debian.pp
new file mode 100644
index 00000000..2aaf3fb1
--- /dev/null
+++ b/puppet/modules/sshd/manifests/client/debian.pp
@@ -0,0 +1,5 @@
+class sshd::client::debian inherits sshd::client::linux {
+ Package['openssh-clients']{
+ name => 'openssh-client',
+ }
+}
diff --git a/puppet/modules/sshd/manifests/client/linux.pp b/puppet/modules/sshd/manifests/client/linux.pp
new file mode 100644
index 00000000..0c420be2
--- /dev/null
+++ b/puppet/modules/sshd/manifests/client/linux.pp
@@ -0,0 +1,5 @@
+class sshd::client::linux inherits sshd::client::base {
+ package {'openssh-clients':
+ ensure => $sshd::client::ensure_version,
+ }
+}
diff --git a/puppet/modules/sshd/manifests/debian.pp b/puppet/modules/sshd/manifests/debian.pp
new file mode 100644
index 00000000..d827078a
--- /dev/null
+++ b/puppet/modules/sshd/manifests/debian.pp
@@ -0,0 +1,13 @@
+class sshd::debian inherits sshd::linux {
+
+ Package[openssh]{
+ name => 'openssh-server',
+ }
+
+ Service[sshd]{
+ name => 'ssh',
+ pattern => 'sshd',
+ hasstatus => true,
+ hasrestart => true,
+ }
+}
diff --git a/puppet/modules/sshd/manifests/gentoo.pp b/puppet/modules/sshd/manifests/gentoo.pp
new file mode 100644
index 00000000..631f3d19
--- /dev/null
+++ b/puppet/modules/sshd/manifests/gentoo.pp
@@ -0,0 +1,5 @@
+class sshd::gentoo inherits sshd::linux {
+ Package[openssh]{
+ category => 'net-misc',
+ }
+}
diff --git a/puppet/modules/sshd/manifests/init.pp b/puppet/modules/sshd/manifests/init.pp
new file mode 100644
index 00000000..b4157418
--- /dev/null
+++ b/puppet/modules/sshd/manifests/init.pp
@@ -0,0 +1,92 @@
+# manage an sshd installation
+class sshd(
+ $manage_nagios = false,
+ $nagios_check_ssh_hostname = 'absent',
+ $ports = [ 22 ],
+ $shared_ip = 'no',
+ $ensure_version = 'installed',
+ $listen_address = [ '0.0.0.0', '::' ],
+ $allowed_users = '',
+ $allowed_groups = '',
+ $use_pam = 'no',
+ $permit_root_login = 'without-password',
+ $password_authentication = 'no',
+ $kerberos_authentication = 'no',
+ $kerberos_orlocalpasswd = 'yes',
+ $kerberos_ticketcleanup = 'yes',
+ $gssapi_authentication = 'no',
+ $gssapi_cleanupcredentials = 'yes',
+ $tcp_forwarding = 'no',
+ $x11_forwarding = 'no',
+ $agent_forwarding = 'no',
+ $challenge_response_authentication = 'no',
+ $pubkey_authentication = 'yes',
+ $rsa_authentication = 'no',
+ $strict_modes = 'yes',
+ $ignore_rhosts = 'yes',
+ $rhosts_rsa_authentication = 'no',
+ $hostbased_authentication = 'no',
+ $permit_empty_passwords = 'no',
+ $authorized_keys_file = $::osfamily ? {
+ Debian => $::lsbmajdistrelease ? {
+ 6 => '%h/.ssh/authorized_keys',
+ default => '%h/.ssh/authorized_keys %h/.ssh/authorized_keys2',
+ },
+ RedHat => $::operatingsystemmajrelease ? {
+ 5 => '%h/.ssh/authorized_keys',
+ 6 => '%h/.ssh/authorized_keys',
+ default => '%h/.ssh/authorized_keys %h/.ssh/authorized_keys2',
+ },
+ OpenBSD => '%h/.ssh/authorized_keys',
+ default => '%h/.ssh/authorized_keys %h/.ssh/authorized_keys2',
+ },
+ $hardened = 'no',
+ $sftp_subsystem = '',
+ $head_additional_options = '',
+ $tail_additional_options = '',
+ $print_motd = 'yes',
+ $manage_shorewall = false,
+ $shorewall_source = 'net',
+ $sshkey_ipaddress = $::ipaddress,
+ $manage_client = true,
+ $hostkey_type = versioncmp($::ssh_version, '6.5') ? {
+ /(^1|0)/ => [ 'rsa', 'ed25519' ],
+ /-1/ => [ 'rsa', 'dsa' ]
+ },
+ $use_storedconfigs = true
+) {
+
+ validate_bool($manage_shorewall)
+ validate_bool($manage_client)
+ validate_array($listen_address)
+ validate_array($ports)
+
+ if $manage_client {
+ class{'sshd::client':
+ shared_ip => $shared_ip,
+ ensure_version => $ensure_version,
+ manage_shorewall => $manage_shorewall,
+ }
+ }
+
+ case $::operatingsystem {
+ gentoo: { include sshd::gentoo }
+ redhat,centos: { include sshd::redhat }
+ openbsd: { include sshd::openbsd }
+ debian,ubuntu: { include sshd::debian }
+ default: { include sshd::base }
+ }
+
+ if $manage_nagios {
+ sshd::nagios{$ports:
+ check_hostname => $nagios_check_ssh_hostname
+ }
+ }
+
+ if $manage_shorewall {
+ class{'shorewall::rules::ssh':
+ ports => $ports,
+ source => $shorewall_source
+ }
+ }
+}
diff --git a/puppet/modules/sshd/manifests/libssh2.pp b/puppet/modules/sshd/manifests/libssh2.pp
new file mode 100644
index 00000000..403ac7be
--- /dev/null
+++ b/puppet/modules/sshd/manifests/libssh2.pp
@@ -0,0 +1,7 @@
+# manifests/libssh2.pp
+
+class sshd::libssh2 {
+ package{'libssh2':
+ ensure => present,
+ }
+}
diff --git a/puppet/modules/sshd/manifests/libssh2/devel.pp b/puppet/modules/sshd/manifests/libssh2/devel.pp
new file mode 100644
index 00000000..261e34c8
--- /dev/null
+++ b/puppet/modules/sshd/manifests/libssh2/devel.pp
@@ -0,0 +1,7 @@
+# manifests/libssh2/devel.pp
+
+class sshd::libssh2::devel inherits sshd::libssh2 {
+ package{"libssh2-devel.${::architecture}":
+ ensure => installed,
+ }
+}
diff --git a/puppet/modules/sshd/manifests/linux.pp b/puppet/modules/sshd/manifests/linux.pp
new file mode 100644
index 00000000..8628ff5e
--- /dev/null
+++ b/puppet/modules/sshd/manifests/linux.pp
@@ -0,0 +1,8 @@
+class sshd::linux inherits sshd::base {
+ package{'openssh':
+ ensure => $sshd::ensure_version,
+ }
+ File[sshd_config]{
+ require +> Package[openssh],
+ }
+}
diff --git a/puppet/modules/sshd/manifests/nagios.pp b/puppet/modules/sshd/manifests/nagios.pp
new file mode 100644
index 00000000..6921de91
--- /dev/null
+++ b/puppet/modules/sshd/manifests/nagios.pp
@@ -0,0 +1,24 @@
+define sshd::nagios(
+ $port = 'absent',
+ $ensure = 'present',
+ $check_hostname = 'absent'
+) {
+ $real_port = $port ? {
+ 'absent' => $name,
+ default => $port,
+ }
+ case $check_hostname {
+ 'absent': {
+ nagios::service{"ssh_port_${name}":
+ ensure => $ensure,
+ check_command => "check_ssh_port!${real_port}"
+ }
+ }
+ default: {
+ nagios::service{"ssh_port_host_${name}":
+ ensure => $ensure,
+ check_command => "check_ssh_port_host!${real_port}!${check_hostname}"
+ }
+ }
+ }
+}
diff --git a/puppet/modules/sshd/manifests/openbsd.pp b/puppet/modules/sshd/manifests/openbsd.pp
new file mode 100644
index 00000000..cb6dbba6
--- /dev/null
+++ b/puppet/modules/sshd/manifests/openbsd.pp
@@ -0,0 +1,8 @@
+class sshd::openbsd inherits sshd::base {
+ Service[sshd]{
+ restart => '/bin/kill -HUP `/bin/cat /var/run/sshd.pid`',
+ stop => '/bin/kill `/bin/cat /var/run/sshd.pid`',
+ start => '/usr/sbin/sshd',
+ status => '/usr/bin/pgrep -f /usr/sbin/sshd',
+ }
+}
diff --git a/puppet/modules/sshd/manifests/redhat.pp b/puppet/modules/sshd/manifests/redhat.pp
new file mode 100644
index 00000000..d7201774
--- /dev/null
+++ b/puppet/modules/sshd/manifests/redhat.pp
@@ -0,0 +1,5 @@
+class sshd::redhat inherits sshd::linux {
+ Package[openssh]{
+ name => 'openssh-server',
+ }
+}
diff --git a/puppet/modules/sshd/manifests/ssh_authorized_key.pp b/puppet/modules/sshd/manifests/ssh_authorized_key.pp
new file mode 100644
index 00000000..80cb3b70
--- /dev/null
+++ b/puppet/modules/sshd/manifests/ssh_authorized_key.pp
@@ -0,0 +1,85 @@
+# wrapper to have some defaults.
+define sshd::ssh_authorized_key(
+ $ensure = 'present',
+ $type = 'ssh-dss',
+ $key = 'absent',
+ $user = '',
+ $target = undef,
+ $options = 'absent',
+ $override_builtin = undef
+){
+
+ if ($ensure=='present') and ($key=='absent') {
+ fail("You have to set \$key for Sshd::Ssh_authorized_key[${name}]!")
+ }
+
+ $real_user = $user ? {
+ false => $name,
+ '' => $name,
+ default => $user,
+ }
+
+ case $target {
+ undef,'': {
+ case $real_user {
+ 'root': { $real_target = '/root/.ssh/authorized_keys' }
+ default: { $real_target = "/home/${real_user}/.ssh/authorized_keys" }
+ }
+ }
+ default: {
+ $real_target = $target
+ }
+ }
+
+ # The ssh_authorized_key built-in function (in 2.7.23 at least)
+ # will not write an authorized_keys file for a mortal user to
+ # a directory they don't have write permission to, puppet attempts to
+ # create the file as the user specified with the user parameter and fails.
+ # Since ssh will refuse to use authorized_keys files not owned by the
+ # user, or in files/directories that allow other users to write, this
+ # behavior is deliberate in order to prevent typical non-working
+ # configurations. However, it also prevents the case of puppet, running
+ # as root, writing a file owned by a mortal user to a common
+ # authorized_keys directory such as one might specify in sshd_config with
+ # something like
+ # 'AuthorizedKeysFile /etc/ssh/authorized_keys/%u'
+ # So we provide a way to override the built-in and instead just install
+ # via a file resource. There is no additional security risk here, it's
+ # nothing a user can't already do by writing their own file resources,
+ # we still depend on the filesystem permissions to keep things safe.
+ if $override_builtin {
+ $header = "# HEADER: This file is managed by Puppet.\n"
+
+ if $options == 'absent' {
+ info("not setting any option for ssh_authorized_key: ${name}")
+ $content = "${header}${type} ${key}\n"
+ } else {
+ $content = "${header}${options} ${type} ${key}\n"
+ }
+
+ file { $real_target:
+ ensure => $ensure,
+ content => $content,
+ owner => $real_user,
+ mode => '0600',
+ }
+
+ } else {
+
+ if $options == 'absent' {
+ info("not setting any option for ssh_authorized_key: ${name}")
+ } else {
+ $real_options = $options
+ }
+
+ ssh_authorized_key{$name:
+ ensure => $ensure,
+ type => $type,
+ key => $key,
+ user => $real_user,
+ target => $real_target,
+ options => $real_options,
+ }
+ }
+
+}
diff --git a/puppet/modules/sshd/manifests/sshkey.pp b/puppet/modules/sshd/manifests/sshkey.pp
new file mode 100644
index 00000000..df37a66c
--- /dev/null
+++ b/puppet/modules/sshd/manifests/sshkey.pp
@@ -0,0 +1,21 @@
+# deploys the
+class sshd::sshkey {
+
+ @@sshkey{$::fqdn:
+ ensure => present,
+ tag => 'fqdn',
+ type => 'ssh-rsa',
+ key => $::sshrsakey,
+ }
+
+ # In case the node has uses a shared network address,
+ # we don't define a sshkey resource using an IP address
+ if $sshd::shared_ip == 'no' {
+ @@sshkey{$::sshd::sshkey_ipaddress:
+ ensure => present,
+ tag => 'ipaddress',
+ type => 'ssh-rsa',
+ key => $::sshrsakey,
+ }
+ }
+}
diff --git a/puppet/modules/sshd/spec/classes/client_spec.rb b/puppet/modules/sshd/spec/classes/client_spec.rb
new file mode 100644
index 00000000..bd3e35af
--- /dev/null
+++ b/puppet/modules/sshd/spec/classes/client_spec.rb
@@ -0,0 +1,42 @@
+require 'spec_helper'
+
+describe 'sshd::client' do
+
+ shared_examples "a Linux OS" do
+ it { should contain_file('/etc/ssh/ssh_known_hosts').with(
+ {
+ 'ensure' => 'present',
+ 'owner' => 'root',
+ 'group' => '0',
+ 'mode' => '0644',
+ }
+ )}
+ end
+
+ context "Debian OS" do
+ let :facts do
+ {
+ :operatingsystem => 'Debian',
+ :osfamily => 'Debian',
+ :lsbdistcodename => 'wheezy',
+ }
+ end
+ it_behaves_like "a Linux OS"
+ it { should contain_package('openssh-clients').with({
+ 'name' => 'openssh-client'
+ }) }
+ end
+
+ context "CentOS" do
+ it_behaves_like "a Linux OS" do
+ let :facts do
+ {
+ :operatingsystem => 'CentOS',
+ :osfamily => 'RedHat',
+ :lsbdistcodename => 'Final',
+ }
+ end
+ end
+ end
+
+end \ No newline at end of file
diff --git a/puppet/modules/sshd/spec/classes/init_spec.rb b/puppet/modules/sshd/spec/classes/init_spec.rb
new file mode 100644
index 00000000..e3003d14
--- /dev/null
+++ b/puppet/modules/sshd/spec/classes/init_spec.rb
@@ -0,0 +1,122 @@
+require 'spec_helper'
+
+describe 'sshd' do
+
+ shared_examples "a Linux OS" do
+ it { should compile.with_all_deps }
+ it { should contain_class('sshd') }
+ it { should contain_class('sshd::client') }
+
+ it { should contain_service('sshd').with({
+ :ensure => 'running',
+ :enable => true,
+ :hasstatus => true
+ })}
+
+ it { should contain_file('sshd_config').with(
+ {
+ 'ensure' => 'present',
+ 'owner' => 'root',
+ 'group' => '0',
+ 'mode' => '0600',
+ }
+ )}
+
+ context 'change ssh port' do
+ let(:params){{
+ :ports => [ 22222],
+ }}
+ it { should contain_file(
+ 'sshd_config'
+ ).with_content(/Port 22222/)}
+ end
+ end
+
+ context "Debian OS" do
+ let :facts do
+ {
+ :operatingsystem => 'Debian',
+ :osfamily => 'Debian',
+ :lsbdistcodename => 'wheezy',
+ }
+ end
+ it_behaves_like "a Linux OS"
+ it { should contain_package('openssh') }
+ it { should contain_class('sshd::debian') }
+ it { should contain_service('sshd').with(
+ :hasrestart => true
+ )}
+
+ context "Ubuntu" do
+ let :facts do
+ {
+ :operatingsystem => 'Ubuntu',
+ :lsbdistcodename => 'precise',
+ }
+ end
+ it_behaves_like "a Linux OS"
+ it { should contain_package('openssh') }
+ it { should contain_service('sshd').with({
+ :hasrestart => true
+ })}
+ end
+ end
+
+
+# context "RedHat OS" do
+# it_behaves_like "a Linux OS" do
+# let :facts do
+# {
+# :operatingsystem => 'RedHat',
+# :osfamily => 'RedHat',
+# }
+# end
+# end
+# end
+
+ context "CentOS" do
+ it_behaves_like "a Linux OS" do
+ let :facts do
+ {
+ :operatingsystem => 'CentOS',
+ :osfamily => 'RedHat',
+ :lsbdistcodename => 'Final',
+ }
+ end
+ end
+ end
+
+ context "Gentoo" do
+ let :facts do
+ {
+ :operatingsystem => 'Gentoo',
+ :osfamily => 'Gentoo',
+ }
+ end
+ it_behaves_like "a Linux OS"
+ it { should contain_class('sshd::gentoo') }
+ end
+
+ context "OpenBSD" do
+ let :facts do
+ {
+ :operatingsystem => 'OpenBSD',
+ :osfamily => 'OpenBSD',
+ }
+ end
+ it_behaves_like "a Linux OS"
+ it { should contain_class('sshd::openbsd') }
+ end
+
+# context "FreeBSD" do
+# it_behaves_like "a Linux OS" do
+# let :facts do
+# {
+# :operatingsystem => 'FreeBSD',
+# :osfamily => 'FreeBSD',
+# }
+# end
+# end
+# end
+
+end \ No newline at end of file
diff --git a/puppet/modules/sshd/spec/defines/ssh_authorized_key_spec.rb b/puppet/modules/sshd/spec/defines/ssh_authorized_key_spec.rb
new file mode 100644
index 00000000..c73a91cc
--- /dev/null
+++ b/puppet/modules/sshd/spec/defines/ssh_authorized_key_spec.rb
@@ -0,0 +1,45 @@
+require 'spec_helper'
+
+describe 'sshd::ssh_authorized_key' do
+
+ context 'manage authorized key' do
+ let(:title) { 'foo' }
+ let(:ssh_key) { 'some_secret_ssh_key' }
+
+ let(:params) {{
+ :key => ssh_key,
+ }}
+
+ it { should contain_ssh_authorized_key('foo').with({
+ 'ensure' => 'present',
+ 'type' => 'ssh-dss',
+ 'user' => 'foo',
+ 'target' => '/home/foo/.ssh/authorized_keys',
+ 'key' => ssh_key,
+ })
+ }
+ end
+ context 'manage authoried key with options' do
+ let(:title) { 'foo2' }
+ let(:ssh_key) { 'some_secret_ssh_key' }
+
+ let(:params) {{
+ :key => ssh_key,
+ :options => ['command="/usr/bin/date"',
+ 'no-pty','no-X11-forwarding','no-agent-forwarding',
+ 'no-port-forwarding']
+ }}
+
+ it { should contain_ssh_authorized_key('foo2').with({
+ 'ensure' => 'present',
+ 'type' => 'ssh-dss',
+ 'user' => 'foo2',
+ 'target' => '/home/foo2/.ssh/authorized_keys',
+ 'key' => ssh_key,
+ 'options' => ['command="/usr/bin/date"',
+ 'no-pty','no-X11-forwarding','no-agent-forwarding',
+ 'no-port-forwarding']
+ })
+ }
+ end
+end
diff --git a/puppet/modules/sshd/spec/functions/ssh_keygen_spec.rb b/puppet/modules/sshd/spec/functions/ssh_keygen_spec.rb
new file mode 100644
index 00000000..a6b51173
--- /dev/null
+++ b/puppet/modules/sshd/spec/functions/ssh_keygen_spec.rb
@@ -0,0 +1,116 @@
+#! /usr/bin/env ruby -S rspec
+require 'spec_helper'
+require 'rspec-puppet'
+require 'mocha'
+require 'fileutils'
+
+describe 'ssh_keygen' do
+
+ let(:scope) { PuppetlabsSpec::PuppetInternals.scope }
+
+ it 'should exist' do
+ Puppet::Parser::Functions.function("ssh_keygen").should == "function_ssh_keygen"
+ end
+
+ it 'should raise a ParseError if no argument is passed' do
+ lambda {
+ scope.function_ssh_keygen([])
+ }.should(raise_error(Puppet::ParseError))
+ end
+
+ it 'should raise a ParseError if there is more than 1 arguments' do
+ lambda {
+ scope.function_ssh_keygen(["foo", "bar"])
+ }.should( raise_error(Puppet::ParseError))
+ end
+
+ it 'should raise a ParseError if the argument is not fully qualified' do
+ lambda {
+ scope.function_ssh_keygen(["foo"])
+ }.should( raise_error(Puppet::ParseError))
+ end
+
+ it "should raise a ParseError if the private key path is a directory" do
+ File.stubs(:directory?).with("/some_dir").returns(true)
+ lambda {
+ scope.function_ssh_keygen(["/some_dir"])
+ }.should( raise_error(Puppet::ParseError))
+ end
+
+ it "should raise a ParseError if the public key path is a directory" do
+ File.stubs(:directory?).with("/some_dir.pub").returns(true)
+ lambda {
+ scope.function_ssh_keygen(["/some_dir.pub"])
+ }.should( raise_error(Puppet::ParseError))
+ end
+
+ describe 'when executing properly' do
+ before do
+ File.stubs(:directory?).with('/tmp/a/b/c').returns(false)
+ File.stubs(:directory?).with('/tmp/a/b/c.pub').returns(false)
+ File.stubs(:read).with('/tmp/a/b/c').returns('privatekey')
+ File.stubs(:read).with('/tmp/a/b/c.pub').returns('publickey')
+ end
+
+ it 'should fail if the public but not the private key exists' do
+ File.stubs(:exists?).with('/tmp/a/b/c').returns(true)
+ File.stubs(:exists?).with('/tmp/a/b/c.pub').returns(false)
+ lambda {
+ scope.function_ssh_keygen(['/tmp/a/b/c'])
+ }.should( raise_error(Puppet::ParseError))
+ end
+
+ it "should fail if the private but not the public key exists" do
+ File.stubs(:exists?).with("/tmp/a/b/c").returns(false)
+ File.stubs(:exists?).with("/tmp/a/b/c.pub").returns(true)
+ lambda {
+ scope.function_ssh_keygen(["/tmp/a/b/c"])
+ }.should( raise_error(Puppet::ParseError))
+ end
+
+
+ it "should return an array of size 2 with the right conent if the keyfiles exists" do
+ File.stubs(:exists?).with("/tmp/a/b/c").returns(true)
+ File.stubs(:exists?).with("/tmp/a/b/c.pub").returns(true)
+ File.stubs(:directory?).with('/tmp/a/b').returns(true)
+ Puppet::Util.expects(:execute).never
+ result = scope.function_ssh_keygen(['/tmp/a/b/c'])
+ result.length.should == 2
+ result[0].should == 'privatekey'
+ result[1].should == 'publickey'
+ end
+
+ it "should create the directory path if it does not exist" do
+ File.stubs(:exists?).with("/tmp/a/b/c").returns(false)
+ File.stubs(:exists?).with("/tmp/a/b/c.pub").returns(false)
+ File.stubs(:directory?).with("/tmp/a/b").returns(false)
+ FileUtils.expects(:mkdir_p).with("/tmp/a/b", :mode => 0700)
+ Puppet::Util::Execution.expects(:execute).returns("")
+ result = scope.function_ssh_keygen(['/tmp/a/b/c'])
+ result.length.should == 2
+ result[0].should == 'privatekey'
+ result[1].should == 'publickey'
+ end
+
+ it "should generate the key if the keyfiles do not exist" do
+ File.stubs(:exists?).with("/tmp/a/b/c").returns(false)
+ File.stubs(:exists?).with("/tmp/a/b/c.pub").returns(false)
+ File.stubs(:directory?).with("/tmp/a/b").returns(true)
+ Puppet::Util::Execution.expects(:execute).with(['/usr/bin/ssh-keygen','-t', 'rsa', '-b', '4096', '-f', '/tmp/a/b/c', '-P', '', '-q']).returns("")
+ result = scope.function_ssh_keygen(['/tmp/a/b/c'])
+ result.length.should == 2
+ result[0].should == 'privatekey'
+ result[1].should == 'publickey'
+ end
+
+ it "should fail if something goes wrong during generation" do
+ File.stubs(:exists?).with("/tmp/a/b/c").returns(false)
+ File.stubs(:exists?).with("/tmp/a/b/c.pub").returns(false)
+ File.stubs(:directory?).with("/tmp/a/b").returns(true)
+ Puppet::Util::Execution.expects(:execute).with(['/usr/bin/ssh-keygen','-t', 'rsa', '-b', '4096', '-f', '/tmp/a/b/c', '-P', '', '-q']).returns("something is wrong")
+ lambda {
+ scope.function_ssh_keygen(["/tmp/a/b/c"])
+ }.should( raise_error(Puppet::ParseError))
+ end
+ end
+end
diff --git a/puppet/modules/sshd/spec/spec_helper.rb b/puppet/modules/sshd/spec/spec_helper.rb
new file mode 100644
index 00000000..b4123fde
--- /dev/null
+++ b/puppet/modules/sshd/spec/spec_helper.rb
@@ -0,0 +1,21 @@
+dir = File.expand_path(File.dirname(__FILE__))
+$LOAD_PATH.unshift File.join(dir, 'lib')
+require 'puppet'
+require 'rspec'
+require 'puppetlabs_spec_helper/module_spec_helper'
+#require 'rspec-hiera-puppet'
+require 'rspec-puppet/coverage'
+require 'rspec/autorun'
+
+fixture_path = File.expand_path(File.join(__FILE__, '..', 'fixtures'))
+
+RSpec.configure do |c|
+ c.module_path = File.join(fixture_path, 'modules')
+ c.manifest_dir = File.join(fixture_path, 'manifests')
+ c.pattern = "spec/*/*_spec.rb"
+end
+
+Puppet::Util::Log.level = :warning
+Puppet::Util::Log.newdestination(:console)
+
+at_exit { RSpec::Puppet::Coverage.report! } \ No newline at end of file
diff --git a/puppet/modules/sshd/spec/spec_helper_system.rb b/puppet/modules/sshd/spec/spec_helper_system.rb
new file mode 100644
index 00000000..2c6812fc
--- /dev/null
+++ b/puppet/modules/sshd/spec/spec_helper_system.rb
@@ -0,0 +1,25 @@
+require 'rspec-system/spec_helper'
+require 'rspec-system-puppet/helpers'
+require 'rspec-system-serverspec/helpers'
+include Serverspec::Helper::RSpecSystem
+include Serverspec::Helper::DetectOS
+include RSpecSystemPuppet::Helpers
+
+RSpec.configure do |c|
+ # Project root
+ proj_root = File.expand_path(File.join(File.dirname(__FILE__), '..'))
+
+ # Enable colour
+ c.tty = true
+
+ c.include RSpecSystemPuppet::Helpers
+
+ # This is where we 'setup' the nodes before running our tests
+ c.before :suite do
+ # Install puppet
+ puppet_install
+ # Install modules and dependencies
+ puppet_module_install(:source => proj_root, :module_name => 'sshd')
+ shell('puppet module install puppetlabs-stdlib')
+ end
+end
diff --git a/puppet/modules/sshd/templates/sshd_config/CentOS_5.erb b/puppet/modules/sshd/templates/sshd_config/CentOS_5.erb
new file mode 120000
index 00000000..71b767a5
--- /dev/null
+++ b/puppet/modules/sshd/templates/sshd_config/CentOS_5.erb
@@ -0,0 +1 @@
+CentOS_6.erb \ No newline at end of file
diff --git a/puppet/modules/sshd/templates/sshd_config/CentOS_6.erb b/puppet/modules/sshd/templates/sshd_config/CentOS_6.erb
new file mode 100644
index 00000000..4593a91a
--- /dev/null
+++ b/puppet/modules/sshd/templates/sshd_config/CentOS_6.erb
@@ -0,0 +1,172 @@
+# $OpenBSD: sshd_config,v 1.73 2005/12/06 22:38:28 reyk Exp $
+
+# This is the sshd server system-wide configuration file. See
+# sshd_config(5) for more information.
+
+# This sshd was compiled with PATH=/usr/local/bin:/bin:/usr/bin
+
+# The strategy used for options in the default sshd_config shipped with
+# OpenSSH is to specify options with their default value where
+# possible, but leave them commented. Uncommented options change a
+# default value.
+
+<% unless (s=scope.lookupvar('::sshd::head_additional_options')).empty? -%>
+<%= s %>
+<% end -%>
+
+<% scope.lookupvar('::sshd::ports').to_a.each do |port| -%>
+<% if port == 'off' -%>
+#Port -- disabled by puppet
+<% else -%>
+Port <%= port %>
+<% end -%>
+<% end -%>
+
+# Use these options to restrict which interfaces/protocols sshd will bind to
+<% scope.lookupvar('::sshd::listen_address').to_a.each do |address| -%>
+ListenAddress <%= address %>
+<% end -%>
+
+# Disable legacy (protocol version 1) support in the server for new
+# installations. In future the default will change to require explicit
+# activation of protocol 1
+Protocol 2
+
+# HostKey for protocol version 1
+#HostKey /etc/ssh/ssh_host_key
+# HostKeys for protocol version 2
+#HostKey /etc/ssh/ssh_host_rsa_key
+#HostKey /etc/ssh/ssh_host_dsa_key
+
+# Lifetime and size of ephemeral version 1 server key
+#KeyRegenerationInterval 1h
+#ServerKeyBits 1024
+
+# Logging
+# obsoletes QuietMode and FascistLogging
+#SyslogFacility AUTH
+SyslogFacility AUTHPRIV
+#LogLevel INFO
+
+# Authentication:
+
+#LoginGraceTime 2m
+PermitRootLogin <%= scope.lookupvar('::sshd::permit_root_login') %>
+
+StrictModes <%= scope.lookupvar('::sshd::strict_modes') %>
+
+#MaxAuthTries 6
+
+RSAAuthentication <%= scope.lookupvar('::sshd::rsa_authentication') %>
+PubkeyAuthentication <%= scope.lookupvar('::sshd::pubkey_authentication') %>
+AuthorizedKeysFile <%= scope.lookupvar('::sshd::authorized_keys_file') %>
+#AuthorizedKeysCommand none
+#AuthorizedKeysCommandRunAs nobody
+
+# For this to work you will also need host keys in /etc/ssh/ssh_known_hosts
+RhostsRSAAuthentication <%= scope.lookupvar('::sshd::rhosts_rsa_authentication') %>
+
+# similar for protocol version 2
+HostbasedAuthentication <%= scope.lookupvar('::sshd::hostbased_authentication') %>
+
+# Change to yes if you don't trust ~/.ssh/known_hosts for
+# RhostsRSAAuthentication and HostbasedAuthentication
+#IgnoreUserKnownHosts no
+
+# Don't read the user's ~/.rhosts and ~/.shosts files
+IgnoreRhosts <%= scope.lookupvar('::sshd::ignore_rhosts') %>
+
+# To disable tunneled clear text passwords, change to no here!
+PasswordAuthentication <%= scope.lookupvar('::sshd::password_authentication') %>
+
+# To enable empty passwords, change to yes (NOT RECOMMENDED)
+PermitEmptyPasswords <%= scope.lookupvar('::sshd::permit_empty_passwords') %>
+
+# Change to no to disable s/key passwords
+ChallengeResponseAuthentication <%= scope.lookupvar('::sshd::challenge_response_authentication') %>
+
+# Kerberos options
+#KerberosAuthentication no
+#KerberosOrLocalPasswd yes
+#KerberosTicketCleanup yes
+#KerberosGetAFSToken no
+#KerberosUseKuserok yes
+
+# GSSAPI options
+#GSSAPIAuthentication no
+#GSSAPICleanupCredentials yes
+
+# Set this to 'yes' to enable PAM authentication, account processing,
+# and session processing. If this is enabled, PAM authentication will
+# be allowed through the ChallengeResponseAuthentication and
+# PasswordAuthentication. Depending on your PAM configuration,
+# PAM authentication via ChallengeResponseAuthentication may bypass
+# the setting of "PermitRootLogin without-password".
+# If you just want the PAM account and session checks to run without
+# PAM authentication, then enable this but set PasswordAuthentication
+# and ChallengeResponseAuthentication to 'no'.
+#UsePAM no
+UsePAM <%= scope.lookupvar('::sshd::use_pam') %>
+
+# Accept locale-related environment variables
+AcceptEnv LANG LC_CTYPE LC_NUMERIC LC_TIME LC_COLLATE LC_MONETARY LC_MESSAGES
+AcceptEnv LC_PAPER LC_NAME LC_ADDRESS LC_TELEPHONE LC_MEASUREMENT
+AcceptEnv LC_IDENTIFICATION LC_ALL LANGUAGE
+AcceptEnv XMODIFIERS
+
+#AllowAgentForwarding yes
+AllowTcpForwarding <%= scope.lookupvar('::sshd::tcp_forwarding') %>
+#GatewayPorts no
+#X11Forwarding no
+X11Forwarding <%= scope.lookupvar('::sshd::x11_forwarding') %>
+#X11DisplayOffset 10
+#X11UseLocalhost yes
+PrintMotd <%= scope.lookupvar('::sshd::print_motd') %>
+#PrintLastLog yes
+#TCPKeepAlive yes
+#UseLogin no
+#UsePrivilegeSeparation yes
+#PermitUserEnvironment no
+#Compression delayed
+#ClientAliveInterval 0
+#ClientAliveCountMax 3
+#ShowPatchLevel no
+#UseDNS yes
+#PidFile /var/run/sshd.pid
+#MaxStartups 10:30:100
+#PermitTunnel no
+#ChrootDirectory none
+
+# no default banner path
+#Banner /some/path
+
+# override default of no subsystems
+Subsystem sftp <%= (s=scope.lookupvar('::sshd::sftp_subsystem')).empty? ? '/usr/libexec/openssh/sftp-server' : s %>
+
+<% unless (s=scope.lookupvar('::sshd::allowed_users')).empty? -%>
+AllowUsers <%= s %>
+<% end -%>
+<% unless (s=scope.lookupvar('::sshd::allowed_groups')).empty? -%>
+AllowGroups <%= s %>
+<%- end -%>
+
+<% if scope.lookupvar('::sshd::hardened') == 'yes' -%>
+<% if (scope.function_versioncmp([scope.lookupvar('::ssh_version'),'6.5'])) >= 0 -%>
+KexAlgorithms curve25519-sha256@libssh.org
+Ciphers chacha20-poly1305@openssh.com,aes256-gcm@openssh.com,aes256-ctr
+MACs hmac-sha2-512-etm@openssh.com,hmac-sha2-256-etm@openssh.com,hmac-ripemd160-etm@openssh.com,umac-128-etm@openssh.com,hmac-sha2-512,hmac-sha2-256,hmac-ripemd160,umac-128@openssh.com
+<% else -%>
+Ciphers aes256-ctr
+MACs hmac-sha1
+<% end -%>
+<% end -%>
+
+# Example of overriding settings on a per-user basis
+#Match User anoncvs
+# X11Forwarding no
+# AllowTcpForwarding no
+# ForceCommand cvs server
+#
+<% unless (s=scope.lookupvar('::sshd::tail_additional_options')).empty? -%>
+<%= s %>
+<% end -%>
diff --git a/puppet/modules/sshd/templates/sshd_config/CentOS_7.erb b/puppet/modules/sshd/templates/sshd_config/CentOS_7.erb
new file mode 100644
index 00000000..f55fb9d0
--- /dev/null
+++ b/puppet/modules/sshd/templates/sshd_config/CentOS_7.erb
@@ -0,0 +1,186 @@
+# $OpenBSD: sshd_config,v 1.90 2013/05/16 04:09:14 dtucker Exp $
+
+# This is the sshd server system-wide configuration file. See
+# sshd_config(5) for more information.
+
+# This sshd was compiled with PATH=/usr/local/bin:/bin:/usr/bin
+
+# The strategy used for options in the default sshd_config shipped with
+# OpenSSH is to specify options with their default value where
+# possible, but leave them commented. Uncommented options change a
+# default value.
+
+<% unless (s=scope.lookupvar('::sshd::head_additional_options')).empty? -%>
+<%= s %>
+<% end -%>
+
+# If you want to change the port on a SELinux system, you have to tell
+# SELinux about this change.
+# semanage port -a -t ssh_port_t -p tcp #PORTNUMBER
+#
+<% scope.lookupvar('::sshd::ports').to_a.each do |port| -%>
+<% if port == 'off' -%>
+#Port -- disabled by puppet
+<% else -%>
+Port <%= port %>
+<% end -%>
+<% end -%>
+<% scope.lookupvar('::sshd::listen_address').to_a.each do |address| -%>
+ListenAddress <%= address %>
+<% end -%>
+
+# The default requires explicit activation of protocol 1
+#Protocol 2
+
+# HostKey for protocol version 1
+#HostKey /etc/ssh/ssh_host_key
+# HostKeys for protocol version 2
+<% scope.lookupvar('::sshd::hostkey_type').to_a.each do |hostkey_type| -%>
+HostKey /etc/ssh/ssh_host_<%=hostkey_type %>_key
+<% end -%>
+
+# Lifetime and size of ephemeral version 1 server key
+#KeyRegenerationInterval 1h
+#ServerKeyBits 1024
+
+# Ciphers and keying
+#RekeyLimit default none
+
+# Logging
+# obsoletes QuietMode and FascistLogging
+#SyslogFacility AUTH
+SyslogFacility AUTHPRIV
+#LogLevel INFO
+
+# Authentication:
+
+#LoginGraceTime 2m
+PermitRootLogin <%= scope.lookupvar('::sshd::permit_root_login') %>
+StrictModes <%= scope.lookupvar('::sshd::strict_modes') %>
+#MaxAuthTries 6
+#MaxSessions 10
+
+RSAAuthentication <%= scope.lookupvar('::sshd::rsa_authentication') %>
+PubkeyAuthentication <%= scope.lookupvar('::sshd::pubkey_authentication') %>
+AuthorizedKeysFile <%= scope.lookupvar('::sshd::authorized_keys_file') %>
+#AuthorizedPrincipalsFile none
+#AuthorizedKeysCommand none
+#AuthorizedKeysCommandRunAs nobody
+
+# For this to work you will also need host keys in /etc/ssh/ssh_known_hosts
+RhostsRSAAuthentication <%= scope.lookupvar('::sshd::rhosts_rsa_authentication') %>
+
+# similar for protocol version 2
+HostbasedAuthentication <%= scope.lookupvar('::sshd::hostbased_authentication') %>
+
+# Change to yes if you don't trust ~/.ssh/known_hosts for
+# RhostsRSAAuthentication and HostbasedAuthentication
+#IgnoreUserKnownHosts no
+
+# Don't read the user's ~/.rhosts and ~/.shosts files
+IgnoreRhosts <%= scope.lookupvar('::sshd::ignore_rhosts') %>
+
+# To disable tunneled clear text passwords, change to no here!
+PasswordAuthentication <%= scope.lookupvar('::sshd::password_authentication') %>
+
+# To enable empty passwords, change to yes (NOT RECOMMENDED)
+PermitEmptyPasswords <%= scope.lookupvar('::sshd::permit_empty_passwords') %>
+
+# Change to no to disable s/key passwords
+ChallengeResponseAuthentication <%= scope.lookupvar('::sshd::challenge_response_authentication') %>
+
+# Kerberos options
+#KerberosAuthentication no
+#KerberosOrLocalPasswd yes
+#KerberosTicketCleanup yes
+#KerberosGetAFSToken no
+#KerberosUseKuserok yes
+
+# GSSAPI options
+GSSAPIAuthentication no
+GSSAPICleanupCredentials yes
+#GSSAPIStrictAcceptorCheck yes
+#GSSAPIKeyExchange no
+
+# Set this to 'yes' to enable PAM authentication, account processing,
+# and session processing. If this is enabled, PAM authentication will
+# be allowed through the ChallengeResponseAuthentication and
+# PasswordAuthentication. Depending on your PAM configuration,
+# PAM authentication via ChallengeResponseAuthentication may bypass
+# the setting of "PermitRootLogin without-password".
+# If you just want the PAM account and session checks to run without
+# PAM authentication, then enable this but set PasswordAuthentication
+# and ChallengeResponseAuthentication to 'no'.
+# WARNING: 'UsePAM no' is not supported in Red Hat Enterprise Linux and may cause several
+# problems.
+#UsePAM no
+UsePAM <%= scope.lookupvar('::sshd::use_pam') %>
+
+#AllowAgentForwarding yes
+AllowTcpForwarding <%= scope.lookupvar('::sshd::tcp_forwarding') %>
+#GatewayPorts no
+#X11Forwarding no
+X11Forwarding <%= scope.lookupvar('::sshd::x11_forwarding') %>
+#X11DisplayOffset 10
+#X11UseLocalhost yes
+PrintMotd <%= scope.lookupvar('::sshd::print_motd') %>
+#PrintLastLog yes
+#TCPKeepAlive yes
+#UseLogin no
+UsePrivilegeSeparation sandbox # Default for new installations.
+#PermitUserEnvironment no
+#Compression delayed
+#ClientAliveInterval 0
+#ClientAliveCountMax 3
+#ShowPatchLevel no
+#UseDNS yes
+#PidFile /var/run/sshd.pid
+#MaxStartups 10:30:100
+#PermitTunnel no
+#ChrootDirectory none
+#VersionAddendum none
+
+# no default banner path
+#Banner none
+
+# Accept locale-related environment variables
+AcceptEnv LANG LC_CTYPE LC_NUMERIC LC_TIME LC_COLLATE LC_MONETARY LC_MESSAGES
+AcceptEnv LC_PAPER LC_NAME LC_ADDRESS LC_TELEPHONE LC_MEASUREMENT
+AcceptEnv LC_IDENTIFICATION LC_ALL LANGUAGE
+AcceptEnv XMODIFIERS
+
+
+# override default of no subsystems
+Subsystem sftp <%= (s=scope.lookupvar('::sshd::sftp_subsystem')).empty? ? '/usr/libexec/openssh/sftp-server' : s %>
+
+<% unless (s=scope.lookupvar('::sshd::allowed_users')).empty? -%>
+AllowUsers <%= s %>
+<% end -%>
+<% unless (s=scope.lookupvar('::sshd::allowed_groups')).empty? -%>
+AllowGroups <%= s %>
+<%- end -%>
+
+# Uncomment this if you want to use .local domain
+#Host *.local
+# CheckHostIP no
+
+<% if scope.lookupvar('::sshd::hardened') == 'yes' -%>
+<% if (scope.function_versioncmp([scope.lookupvar('::ssh_version'),'6.5'])) >= 0 -%>
+KexAlgorithms curve25519-sha256@libssh.org
+Ciphers chacha20-poly1305@openssh.com,aes256-gcm@openssh.com,aes256-ctr
+MACs hmac-sha2-512-etm@openssh.com,hmac-sha2-256-etm@openssh.com,hmac-ripemd160-etm@openssh.com,umac-128-etm@openssh.com,hmac-sha2-512,hmac-sha2-256,hmac-ripemd160,umac-128@openssh.com
+<% else -%>
+Ciphers aes256-ctr
+MACs hmac-sha1
+<% end -%>
+<% end -%>
+
+# Example of overriding settings on a per-user basis
+#Match User anoncvs
+# X11Forwarding no
+# AllowTcpForwarding no
+# ForceCommand cvs server
+
+<% unless (s=scope.lookupvar('::sshd::tail_additional_options')).empty? -%>
+<%= s %>
+<% end -%>
diff --git a/puppet/modules/sshd/templates/sshd_config/Debian_jessie.erb b/puppet/modules/sshd/templates/sshd_config/Debian_jessie.erb
new file mode 100644
index 00000000..91dbfff0
--- /dev/null
+++ b/puppet/modules/sshd/templates/sshd_config/Debian_jessie.erb
@@ -0,0 +1,124 @@
+# This file is managed by Puppet, all local modifications will be overwritten
+#
+# Package generated configuration file
+# See the sshd_config(5) manpage for details
+
+<% unless (s=scope.lookupvar('::sshd::head_additional_options')).empty? -%>
+<%= s %>
+<% end -%>
+
+# What ports, IPs and protocols we listen for
+<% scope.lookupvar('::sshd::ports').to_a.each do |port| -%>
+<% if port == 'off' -%>
+#Port -- disabled by puppet
+<% else -%>
+Port <%= port %>
+<% end -%>
+<% end -%>
+
+# Use these options to restrict which interfaces/protocols sshd will bind to
+<% scope.lookupvar('::sshd::listen_address').to_a.each do |address| -%>
+ListenAddress <%= address %>
+<% end -%>
+Protocol 2
+# HostKeys for protocol version 2
+<% scope.lookupvar('::sshd::hostkey_type').to_a.each do |hostkey_type| -%>
+HostKey /etc/ssh/ssh_host_<%=hostkey_type %>_key
+<% end -%>
+#Privilege Separation is turned on for security
+UsePrivilegeSeparation yes
+
+# Lifetime and size of ephemeral version 1 server key
+KeyRegenerationInterval 3600
+ServerKeyBits 1024
+
+# Logging
+SyslogFacility AUTH
+LogLevel INFO
+
+# Authentication:
+LoginGraceTime 120
+PermitRootLogin <%= scope.lookupvar('::sshd::permit_root_login') %>
+StrictModes <%= scope.lookupvar('::sshd::strict_modes') %>
+
+RSAAuthentication <%= scope.lookupvar('::sshd::rsa_authentication') %>
+PubkeyAuthentication <%= scope.lookupvar('::sshd::pubkey_authentication') %>
+AuthorizedKeysFile <%= scope.lookupvar('::sshd::authorized_keys_file') %>
+
+# Don't read the user's ~/.rhosts and ~/.shosts files
+IgnoreRhosts <%= scope.lookupvar('::sshd::ignore_rhosts') %>
+# For this to work you will also need host keys in /etc/ssh_known_hosts
+RhostsRSAAuthentication <%= scope.lookupvar('::sshd::rhosts_rsa_authentication') %>
+# similar for protocol version 2
+HostbasedAuthentication <%= scope.lookupvar('::sshd::hostbased_authentication') %>
+# Uncomment if you don't trust ~/.ssh/known_hosts for RhostsRSAAuthentication
+#IgnoreUserKnownHosts yes
+
+# To enable empty passwords, change to yes (NOT RECOMMENDED)
+PermitEmptyPasswords <%= scope.lookupvar('::sshd::permit_empty_passwords') %>
+
+# Change to yes to enable challenge-response passwords (beware issues with
+# some PAM modules and threads)
+ChallengeResponseAuthentication <%= scope.lookupvar('::sshd::challenge_response_authentication') %>
+
+# Change to no to disable tunnelled clear text passwords
+PasswordAuthentication <%= scope.lookupvar('::sshd::password_authentication') %>
+
+# Kerberos options
+KerberosAuthentication <%= scope.lookupvar('::sshd::kerberos_authentication') %>
+#KerberosGetAFSToken no
+KerberosOrLocalPasswd <%= scope.lookupvar('::sshd::kerberos_orlocalpasswd') %>
+KerberosTicketCleanup <%= scope.lookupvar('::sshd::kerberos_ticketcleanup') %>
+
+# GSSAPI options
+GSSAPIAuthentication <%= scope.lookupvar('::sshd::gssapi_authentication') %>
+GSSAPICleanupCredentials <%= scope.lookupvar('::sshd::gssapi_cleanupcredentials') %>
+
+X11Forwarding <%= scope.lookupvar('::sshd::x11_forwarding') %>
+X11DisplayOffset 10
+PrintMotd <%= scope.lookupvar('::sshd::print_motd') %>
+PrintLastLog yes
+TCPKeepAlive yes
+#UseLogin no
+
+#MaxStartups 10:30:60
+#Banner /etc/issue.net
+# do not reveal debian version (default is yes)
+DebianBanner no
+
+# Allow client to pass locale environment variables
+AcceptEnv LANG LC_*
+
+Subsystem sftp <%= (s=scope.lookupvar('::sshd::sftp_subsystem')).empty? ? '/usr/lib/openssh/sftp-server' : s %>
+
+# Set this to 'yes' to enable PAM authentication, account processing,
+# and session processing. If this is enabled, PAM authentication will
+# be allowed through the ChallengeResponseAuthentication and
+# PasswordAuthentication. Depending on your PAM configuration,
+# PAM authentication via ChallengeResponseAuthentication may bypass
+# the setting of "PermitRootLogin without-password".
+# If you just want the PAM account and session checks to run without
+# PAM authentication, then enable this but set PasswordAuthentication
+# and ChallengeResponseAuthentication to 'no'.
+UsePAM <%= scope.lookupvar('::sshd::use_pam') %>
+
+AllowTcpForwarding <%= scope.lookupvar('::sshd::tcp_forwarding') %>
+
+AllowAgentForwarding <%= scope.lookupvar('::sshd::agent_forwarding') %>
+
+<% unless (s=scope.lookupvar('::sshd::allowed_users')).empty? -%>
+AllowUsers <%= s %>
+<% end -%>
+<% unless (s=scope.lookupvar('::sshd::allowed_groups')).empty? -%>
+AllowGroups <%= s %>
+<%- end -%>
+
+<% if scope.lookupvar('::sshd::hardened') == 'yes' -%>
+KexAlgorithms curve25519-sha256@libssh.org
+Ciphers chacha20-poly1305@openssh.com,aes256-gcm@openssh.com,aes256-ctr
+MACs hmac-sha2-512-etm@openssh.com,hmac-sha2-256-etm@openssh.com,hmac-ripemd160-etm@openssh.com,umac-128-etm@openssh.com,hmac-sha2-512,hmac-sha2-256,hmac-ripemd160,umac-128@openssh.com
+<% end -%>
+
+<% unless (s=scope.lookupvar('::sshd::tail_additional_options')).empty? -%>
+<%= s %>
+<% end -%>
diff --git a/puppet/modules/sshd/templates/sshd_config/Debian_sid.erb b/puppet/modules/sshd/templates/sshd_config/Debian_sid.erb
new file mode 100644
index 00000000..91dbfff0
--- /dev/null
+++ b/puppet/modules/sshd/templates/sshd_config/Debian_sid.erb
@@ -0,0 +1,124 @@
+# This file is managed by Puppet, all local modifications will be overwritten
+#
+# Package generated configuration file
+# See the sshd_config(5) manpage for details
+
+<% unless (s=scope.lookupvar('::sshd::head_additional_options')).empty? -%>
+<%= s %>
+<% end -%>
+
+# What ports, IPs and protocols we listen for
+<% scope.lookupvar('::sshd::ports').to_a.each do |port| -%>
+<% if port == 'off' -%>
+#Port -- disabled by puppet
+<% else -%>
+Port <%= port %>
+<% end -%>
+<% end -%>
+
+# Use these options to restrict which interfaces/protocols sshd will bind to
+<% scope.lookupvar('::sshd::listen_address').to_a.each do |address| -%>
+ListenAddress <%= address %>
+<% end -%>
+Protocol 2
+# HostKeys for protocol version 2
+<% scope.lookupvar('::sshd::hostkey_type').to_a.each do |hostkey_type| -%>
+HostKey /etc/ssh/ssh_host_<%=hostkey_type %>_key
+<% end -%>
+#Privilege Separation is turned on for security
+UsePrivilegeSeparation yes
+
+# Lifetime and size of ephemeral version 1 server key
+KeyRegenerationInterval 3600
+ServerKeyBits 1024
+
+# Logging
+SyslogFacility AUTH
+LogLevel INFO
+
+# Authentication:
+LoginGraceTime 120
+PermitRootLogin <%= scope.lookupvar('::sshd::permit_root_login') %>
+StrictModes <%= scope.lookupvar('::sshd::strict_modes') %>
+
+RSAAuthentication <%= scope.lookupvar('::sshd::rsa_authentication') %>
+PubkeyAuthentication <%= scope.lookupvar('::sshd::pubkey_authentication') %>
+AuthorizedKeysFile <%= scope.lookupvar('::sshd::authorized_keys_file') %>
+
+# Don't read the user's ~/.rhosts and ~/.shosts files
+IgnoreRhosts <%= scope.lookupvar('::sshd::ignore_rhosts') %>
+# For this to work you will also need host keys in /etc/ssh_known_hosts
+RhostsRSAAuthentication <%= scope.lookupvar('::sshd::rhosts_rsa_authentication') %>
+# similar for protocol version 2
+HostbasedAuthentication <%= scope.lookupvar('::sshd::hostbased_authentication') %>
+# Uncomment if you don't trust ~/.ssh/known_hosts for RhostsRSAAuthentication
+#IgnoreUserKnownHosts yes
+
+# To enable empty passwords, change to yes (NOT RECOMMENDED)
+PermitEmptyPasswords <%= scope.lookupvar('::sshd::permit_empty_passwords') %>
+
+# Change to yes to enable challenge-response passwords (beware issues with
+# some PAM modules and threads)
+ChallengeResponseAuthentication <%= scope.lookupvar('::sshd::challenge_response_authentication') %>
+
+# Change to no to disable tunnelled clear text passwords
+PasswordAuthentication <%= scope.lookupvar('::sshd::password_authentication') %>
+
+# Kerberos options
+KerberosAuthentication <%= scope.lookupvar('::sshd::kerberos_authentication') %>
+#KerberosGetAFSToken no
+KerberosOrLocalPasswd <%= scope.lookupvar('::sshd::kerberos_orlocalpasswd') %>
+KerberosTicketCleanup <%= scope.lookupvar('::sshd::kerberos_ticketcleanup') %>
+
+# GSSAPI options
+GSSAPIAuthentication <%= scope.lookupvar('::sshd::gssapi_authentication') %>
+GSSAPICleanupCredentials <%= scope.lookupvar('::sshd::gssapi_cleanupcredentials') %>
+
+X11Forwarding <%= scope.lookupvar('::sshd::x11_forwarding') %>
+X11DisplayOffset 10
+PrintMotd <%= scope.lookupvar('::sshd::print_motd') %>
+PrintLastLog yes
+TCPKeepAlive yes
+#UseLogin no
+
+#MaxStartups 10:30:60
+#Banner /etc/issue.net
+# do not reveal debian version (default is yes)
+DebianBanner no
+
+# Allow client to pass locale environment variables
+AcceptEnv LANG LC_*
+
+Subsystem sftp <%= (s=scope.lookupvar('::sshd::sftp_subsystem')).empty? ? '/usr/lib/openssh/sftp-server' : s %>
+
+# Set this to 'yes' to enable PAM authentication, account processing,
+# and session processing. If this is enabled, PAM authentication will
+# be allowed through the ChallengeResponseAuthentication and
+# PasswordAuthentication. Depending on your PAM configuration,
+# PAM authentication via ChallengeResponseAuthentication may bypass
+# the setting of "PermitRootLogin without-password".
+# If you just want the PAM account and session checks to run without
+# PAM authentication, then enable this but set PasswordAuthentication
+# and ChallengeResponseAuthentication to 'no'.
+UsePAM <%= scope.lookupvar('::sshd::use_pam') %>
+
+AllowTcpForwarding <%= scope.lookupvar('::sshd::tcp_forwarding') %>
+
+AllowAgentForwarding <%= scope.lookupvar('::sshd::agent_forwarding') %>
+
+<% unless (s=scope.lookupvar('::sshd::allowed_users')).empty? -%>
+AllowUsers <%= s %>
+<% end -%>
+<% unless (s=scope.lookupvar('::sshd::allowed_groups')).empty? -%>
+AllowGroups <%= s %>
+<%- end -%>
+
+<% if scope.lookupvar('::sshd::hardened') == 'yes' -%>
+KexAlgorithms curve25519-sha256@libssh.org
+Ciphers chacha20-poly1305@openssh.com,aes256-gcm@openssh.com,aes256-ctr
+MACs hmac-sha2-512-etm@openssh.com,hmac-sha2-256-etm@openssh.com,hmac-ripemd160-etm@openssh.com,umac-128-etm@openssh.com,hmac-sha2-512,hmac-sha2-256,hmac-ripemd160,umac-128@openssh.com
+<% end -%>
+
+<% unless (s=scope.lookupvar('::sshd::tail_additional_options')).empty? -%>
+<%= s %>
+<% end -%>
diff --git a/puppet/modules/sshd/templates/sshd_config/Debian_squeeze.erb b/puppet/modules/sshd/templates/sshd_config/Debian_squeeze.erb
new file mode 100644
index 00000000..649b320a
--- /dev/null
+++ b/puppet/modules/sshd/templates/sshd_config/Debian_squeeze.erb
@@ -0,0 +1,127 @@
+# This file is managed by Puppet, all local modifications will be overwritten
+#
+# Package generated configuration file
+# See the sshd(8) manpage for details
+
+<% unless (s=scope.lookupvar('::sshd::head_additional_options')).empty? -%>
+<%= s %>
+<% end -%>
+
+# What ports, IPs and protocols we listen for
+<% scope.lookupvar('::sshd::ports').to_a.each do |port| -%>
+<% if port == 'off' -%>
+#Port -- disabled by puppet
+<% else -%>
+Port <%= port %>
+<% end -%>
+<% end -%>
+
+# Use these options to restrict which interfaces/protocols sshd will bind to
+<% scope.lookupvar('::sshd::listen_address').to_a.each do |address| -%>
+ListenAddress <%= address %>
+<% end -%>
+Protocol 2
+# HostKeys for protocol version 2
+<% scope.lookupvar('::sshd::hostkey_type').to_a.each do |hostkey_type| -%>
+HostKey /etc/ssh/ssh_host_<%=hostkey_type %>_key
+<% end -%>
+
+#Privilege Separation is turned on for security
+UsePrivilegeSeparation yes
+
+# Lifetime and size of ephemeral version 1 server key
+KeyRegenerationInterval 3600
+ServerKeyBits 768
+
+# Logging
+SyslogFacility AUTH
+LogLevel INFO
+
+# Authentication:
+LoginGraceTime 120
+PermitRootLogin <%= scope.lookupvar('::sshd::permit_root_login') %>
+
+StrictModes <%= scope.lookupvar('::sshd::strict_modes') %>
+
+RSAAuthentication <%= scope.lookupvar('::sshd::rsa_authentication') %>
+
+PubkeyAuthentication <%= scope.lookupvar('::sshd::pubkey_authentication') %>
+
+AuthorizedKeysFile <%= scope.lookupvar('::sshd::authorized_keys_file') %>
+
+# Don't read the user's ~/.rhosts and ~/.shosts files
+IgnoreRhosts <%= scope.lookupvar('::sshd::ignore_rhosts') %>
+# For this to work you will also need host keys in /etc/ssh_known_hosts
+RhostsRSAAuthentication <%= scope.lookupvar('::sshd::rhosts_rsa_authentication') %>
+# similar for protocol version 2
+HostbasedAuthentication <%= scope.lookupvar('::sshd::hostbased_authentication') %>
+# Uncomment if you don't trust ~/.ssh/known_hosts for RhostsRSAAuthentication
+#IgnoreUserKnownHosts yes
+
+# To enable empty passwords, change to yes (NOT RECOMMENDED)
+PermitEmptyPasswords <%= scope.lookupvar('::sshd::permit_empty_passwords') %>
+
+# Change to yes to enable challenge-response passwords (beware issues with
+# some PAM modules and threads)
+ChallengeResponseAuthentication <%= scope.lookupvar('::sshd::challenge_response_authentication') %>
+
+# To disable tunneled clear text passwords, change to no here!
+PasswordAuthentication <%= scope.lookupvar('::sshd::password_authentication') %>
+
+# Kerberos options
+KerberosAuthentication <%= scope.lookupvar('::sshd::kerberos_authentication') %>
+KerberosOrLocalPasswd <%= scope.lookupvar('::sshd::kerberos_orlocalpasswd') %>
+KerberosTicketCleanup <%= scope.lookupvar('::sshd::kerberos_ticketcleanup') %>
+
+# GSSAPI options
+GSSAPIAuthentication <%= scope.lookupvar('::sshd::gssapi_authentication') %>
+GSSAPICleanupCredentials <%= scope.lookupvar('::sshd::gssapi_cleanupcredentials') %>
+
+X11Forwarding <%= scope.lookupvar('::sshd::x11_forwarding') %>
+X11DisplayOffset 10
+PrintMotd <%= scope.lookupvar('::sshd::print_motd') %>
+PrintLastLog yes
+TCPKeepAlive yes
+
+#UseLogin no
+
+#MaxStartups 10:30:60
+#Banner /etc/issue.net
+# do not reveal debian version (default is yes)
+DebianBanner no
+
+# Allow client to pass locale environment variables
+AcceptEnv LANG LC_*
+
+Subsystem sftp <%= (s=scope.lookupvar('::sshd::sftp_subsystem')).empty? ? '/usr/lib/openssh/sftp-server' : s %>
+
+# Set this to 'yes' to enable PAM authentication, account processing,
+# and session processing. If this is enabled, PAM authentication will
+# be allowed through the ChallengeResponseAuthentication and
+# PasswordAuthentication. Depending on your PAM configuration,
+# PAM authentication via ChallengeResponseAuthentication may bypass
+# the setting of "PermitRootLogin without-password".
+# If you just want the PAM account and session checks to run without
+# PAM authentication, then enable this but set PasswordAuthentication
+# and ChallengeResponseAuthentication to 'no'.
+UsePAM <%= scope.lookupvar('::sshd::use_pam') %>
+
+AllowTcpForwarding <%= scope.lookupvar('::sshd::tcp_forwarding') %>
+
+AllowAgentForwarding <%= scope.lookupvar('::sshd::agent_forwarding') %>
+
+<% unless (s=scope.lookupvar('::sshd::allowed_users')).empty? -%>
+AllowUsers <%= s %>
+<% end -%>
+<% unless (s=scope.lookupvar('::sshd::allowed_groups')).empty? -%>
+AllowGroups <%= s %>
+<%- end -%>
+
+<% if scope.lookupvar('::sshd::hardened') == 'yes' -%>
+Ciphers aes256-ctr
+MACs hmac-sha2-512
+<% end -%>
+
+<% unless (s=scope.lookupvar('::sshd::tail_additional_options')).empty? -%>
+<%= s %>
+<% end -%>
diff --git a/puppet/modules/sshd/templates/sshd_config/Debian_wheezy.erb b/puppet/modules/sshd/templates/sshd_config/Debian_wheezy.erb
new file mode 100644
index 00000000..bcb15286
--- /dev/null
+++ b/puppet/modules/sshd/templates/sshd_config/Debian_wheezy.erb
@@ -0,0 +1,132 @@
+# This file is managed by Puppet, all local modifications will be overwritten
+#
+# Package generated configuration file
+# See the sshd(8) manpage for details
+
+<% unless (s=scope.lookupvar('::sshd::head_additional_options')).empty? -%>
+<%= s %>
+<% end -%>
+
+# What ports, IPs and protocols we listen for
+<% scope.lookupvar('::sshd::ports').to_a.each do |port| -%>
+<% if port == 'off' -%>
+#Port -- disabled by puppet
+<% else -%>
+Port <%= port %>
+<% end -%>
+<% end -%>
+
+# Use these options to restrict which interfaces/protocols sshd will bind to
+<% scope.lookupvar('::sshd::listen_address').to_a.each do |address| -%>
+ListenAddress <%= address %>
+<% end -%>
+Protocol 2
+# HostKeys for protocol version 2
+<% scope.lookupvar('::sshd::hostkey_type').to_a.each do |hostkey_type| -%>
+HostKey /etc/ssh/ssh_host_<%=hostkey_type %>_key
+<% end -%>
+#Privilege Separation is turned on for security
+UsePrivilegeSeparation yes
+
+# Lifetime and size of ephemeral version 1 server key
+KeyRegenerationInterval 3600
+ServerKeyBits 768
+
+# Logging
+SyslogFacility AUTH
+LogLevel INFO
+
+# Authentication:
+LoginGraceTime 120
+PermitRootLogin <%= scope.lookupvar('::sshd::permit_root_login') %>
+
+StrictModes <%= scope.lookupvar('::sshd::strict_modes') %>
+
+RSAAuthentication <%= scope.lookupvar('::sshd::rsa_authentication') %>
+
+PubkeyAuthentication <%= scope.lookupvar('::sshd::pubkey_authentication') %>
+
+AuthorizedKeysFile <%= scope.lookupvar('::sshd::authorized_keys_file') %>
+
+# Don't read the user's ~/.rhosts and ~/.shosts files
+IgnoreRhosts <%= scope.lookupvar('::sshd::ignore_rhosts') %>
+# For this to work you will also need host keys in /etc/ssh_known_hosts
+RhostsRSAAuthentication <%= scope.lookupvar('::sshd::rhosts_rsa_authentication') %>
+# similar for protocol version 2
+HostbasedAuthentication <%= scope.lookupvar('::sshd::hostbased_authentication') %>
+# Uncomment if you don't trust ~/.ssh/known_hosts for RhostsRSAAuthentication
+#IgnoreUserKnownHosts yes
+
+# To enable empty passwords, change to yes (NOT RECOMMENDED)
+PermitEmptyPasswords <%= scope.lookupvar('::sshd::permit_empty_passwords') %>
+
+# Change to yes to enable challenge-response passwords (beware issues with
+# some PAM modules and threads)
+ChallengeResponseAuthentication <%= scope.lookupvar('::sshd::challenge_response_authentication') %>
+
+# To disable tunneled clear text passwords, change to no here!
+PasswordAuthentication <%= scope.lookupvar('::sshd::password_authentication') %>
+
+# Kerberos options
+KerberosAuthentication <%= scope.lookupvar('::sshd::kerberos_authentication') %>
+KerberosOrLocalPasswd <%= scope.lookupvar('::sshd::kerberos_orlocalpasswd') %>
+KerberosTicketCleanup <%= scope.lookupvar('::sshd::kerberos_ticketcleanup') %>
+
+# GSSAPI options
+GSSAPIAuthentication <%= scope.lookupvar('::sshd::gssapi_authentication') %>
+GSSAPICleanupCredentials <%= scope.lookupvar('::sshd::gssapi_cleanupcredentials') %>
+
+X11Forwarding <%= scope.lookupvar('::sshd::x11_forwarding') %>
+X11DisplayOffset 10
+PrintMotd <%= scope.lookupvar('::sshd::print_motd') %>
+PrintLastLog yes
+TCPKeepAlive yes
+
+#UseLogin no
+
+#MaxStartups 10:30:60
+#Banner /etc/issue.net
+# do not reveal debian version (default is yes)
+DebianBanner no
+
+# Allow client to pass locale environment variables
+AcceptEnv LANG LC_*
+
+Subsystem sftp <%= (s=scope.lookupvar('::sshd::sftp_subsystem')).empty? ? '/usr/lib/openssh/sftp-server' : s %>
+
+# Set this to 'yes' to enable PAM authentication, account processing,
+# and session processing. If this is enabled, PAM authentication will
+# be allowed through the ChallengeResponseAuthentication and
+# PasswordAuthentication. Depending on your PAM configuration,
+# PAM authentication via ChallengeResponseAuthentication may bypass
+# the setting of "PermitRootLogin without-password".
+# If you just want the PAM account and session checks to run without
+# PAM authentication, then enable this but set PasswordAuthentication
+# and ChallengeResponseAuthentication to 'no'.
+UsePAM <%= scope.lookupvar('::sshd::use_pam') %>
+
+AllowTcpForwarding <%= scope.lookupvar('::sshd::tcp_forwarding') %>
+
+AllowAgentForwarding <%= scope.lookupvar('::sshd::agent_forwarding') %>
+
+<% unless (s=scope.lookupvar('::sshd::allowed_users')).empty? -%>
+AllowUsers <%= s %>
+<% end -%>
+<% unless (s=scope.lookupvar('::sshd::allowed_groups')).empty? -%>
+AllowGroups <%= s %>
+<%- end -%>
+
+<% if scope.lookupvar('::sshd::hardened') == 'yes' -%>
+<% if (scope.function_versioncmp([scope.lookupvar('::ssh_version'),'6.5'])) >= 0 -%>
+KexAlgorithms curve25519-sha256@libssh.org
+Ciphers chacha20-poly1305@openssh.com,aes256-gcm@openssh.com,aes256-ctr
+MACs hmac-sha2-512-etm@openssh.com,hmac-sha2-256-etm@openssh.com,hmac-ripemd160-etm@openssh.com,umac-128-etm@openssh.com,hmac-sha2-512,hmac-sha2-256,hmac-ripemd160,umac-128@openssh.com
+<% else -%>
+Ciphers aes256-ctr
+MACs hmac-sha2-512
+<% end -%>
+<% end -%>
+
+<% unless (s=scope.lookupvar('::sshd::tail_additional_options')).empty? -%>
+<%= s %>
+<% end -%>
diff --git a/puppet/modules/sshd/templates/sshd_config/FreeBSD.erb b/puppet/modules/sshd/templates/sshd_config/FreeBSD.erb
new file mode 100644
index 00000000..5298ade9
--- /dev/null
+++ b/puppet/modules/sshd/templates/sshd_config/FreeBSD.erb
@@ -0,0 +1,168 @@
+# $OpenBSD: sshd_config,v 1.81 2009/10/08 14:03:41 markus Exp $
+# $FreeBSD: src/crypto/openssh/sshd_config,v 1.49.2.2.2.1 2010/06/14 02:09:06 kensmith Exp $
+
+# This is the sshd server system-wide configuration file. See
+# sshd_config(5) for more information.
+
+# This sshd was compiled with PATH=/usr/bin:/bin:/usr/sbin:/sbin
+
+# The strategy used for options in the default sshd_config shipped with
+# OpenSSH is to specify options with their default value where
+# possible, but leave them commented. Uncommented options change a
+# default value.
+
+# Note that some of FreeBSD's defaults differ from OpenBSD's, and
+# FreeBSD has a few additional options.
+
+#VersionAddendum FreeBSD-20100308
+
+<% unless (s=scope.lookupvar('::sshd::head_additional_options')).empty? -%>
+<%= s %>
+<% end -%>
+
+# What ports, IPs and protocols we listen for
+<% scope.lookupvar('::sshd::ports').to_a.each do |port| -%>
+<% if port == 'off' -%>
+#Port -- disabled by puppet
+<% else -%>
+Port <%= port %>
+<% end -%>
+<% end -%>
+
+#AddressFamily any
+<% scope.lookupvar('::sshd::listen_address').to_a.each do |address| -%>
+ListenAddress <%= address %>
+<% end -%>
+
+# The default requires explicit activation of protocol 1
+Protocol 2
+
+# HostKey for protocol version 1
+#HostKey /etc/ssh/ssh_host_key
+# HostKeys for protocol version 2
+<% scope.lookupvar('::sshd::hostkey_type').to_a.each do |hostkey_type| -%>
+HostKey /etc/ssh/ssh_host_<%=hostkey_type %>_key
+<% end -%>
+
+# Lifetime and size of ephemeral version 1 server key
+#KeyRegenerationInterval 1h
+#ServerKeyBits 1024
+
+# Logging
+# obsoletes QuietMode and FascistLogging
+SyslogFacility AUTH
+LogLevel INFO
+
+# Authentication:
+
+LoginGraceTime 600
+PermitRootLogin <%= scope.lookupvar('::sshd::permit_root_login') %>
+
+StrictModes <%= scope.lookupvar('::sshd::strict_modes') %>
+
+#MaxAuthTries 6
+#MaxSessions 10
+
+RSAAuthentication <%= scope.lookupvar('::sshd::rsa_authentication') %>
+
+PubkeyAuthentication <%= scope.lookupvar('::sshd::pubkey_authentication') %>
+
+AuthorizedKeysFile <%= scope.lookupvar('::sshd::authorized_keys_file') %>
+
+# For this to work you will also need host keys in /etc/ssh/ssh_known_hosts
+RhostsRSAAuthentication <%= scope.lookupvar('::sshd::rhosts_rsa_authentication') %>
+
+# similar for protocol version 2
+HostbasedAuthentication <%= scope.lookupvar('::sshd::hostbased_authentication') %>
+
+# Change to yes if you don't trust ~/.ssh/known_hosts for
+# RhostsRSAAuthentication and HostbasedAuthentication
+#IgnoreUserKnownHosts no
+# Don't read the user's ~/.rhosts and ~/.shosts files
+#IgnoreRhosts yes
+
+# Change to yes to enable built-in password authentication.
+PasswordAuthentication <%= scope.lookupvar('::sshd::password_authentication') %>
+
+PermitEmptyPasswords <%= scope.lookupvar('::sshd::permit_empty_passwords') %>
+
+# Change to no to disable PAM authentication
+ChallengeResponseAuthentication <%= scope.lookupvar('::sshd::challenge_response_authentication') %>
+
+# Kerberos options
+KerberosAuthentication <%= scope.lookupvar('::sshd::kerberos_authentication') %>
+KerberosOrLocalPasswd <%= scope.lookupvar('::sshd::kerberos_orlocalpasswd') %>
+KerberosTicketCleanup <%= scope.lookupvar('::sshd::kerberos_ticketcleanup') %>
+
+# GSSAPI options
+GSSAPIAuthentication <%= scope.lookupvar('::sshd::gssapi_authentication') %>
+GSSAPICleanupCredentials <%= scope.lookupvar('::sshd::gssapi_cleanupcredentials') %>
+
+# Set this to 'no' to disable PAM authentication, account processing,
+# and session processing. If this is enabled, PAM authentication will
+# be allowed through the ChallengeResponseAuthentication and
+# PasswordAuthentication. Depending on your PAM configuration,
+# PAM authentication via ChallengeResponseAuthentication may bypass
+# the setting of "PermitRootLogin without-password".
+# If you just want the PAM account and session checks to run without
+# PAM authentication, then enable this but set PasswordAuthentication
+# and ChallengeResponseAuthentication to 'no'.
+UsePAM <%= scope.lookupvar('::sshd::use_pam') %>
+
+AllowAgentForwarding <%= scope.lookupvar('::sshd::agent_forwarding') %>
+
+AllowTcpForwarding <%= scope.lookupvar('::sshd::tcp_forwarding') %>
+
+#GatewayPorts no
+X11Forwarding <%= scope.lookupvar('::sshd::x11_forwarding') %>
+
+X11DisplayOffset 10
+#X11UseLocalhost yes
+PrintMotd <%= sshd_print_motd %>
+#PrintLastLog yes
+TCPKeepAlive yes
+#UseLogin no
+#UsePrivilegeSeparation yes
+#PermitUserEnvironment no
+#Compression delayed
+#ClientAliveInterval 0
+#ClientAliveCountMax 3
+#UseDNS yes
+#PidFile /var/run/sshd.pid
+#MaxStartups 10
+#PermitTunnel no
+#ChrootDirectory none
+
+# no default banner path
+#Banner none
+
+# override default of no subsystems
+Subsystem sftp <%= (s=scope.lookupvar('::sshd::sftp_subsystem')).empty? ? '/usr/libexec/sftp-server' : s %>
+
+# Example of overriding settings on a per-user basis
+#Match User anoncvs
+# X11Forwarding no
+# AllowTcpForwarding no
+# ForceCommand cvs server
+
+<% unless (s=scope.lookupvar('::sshd::allowed_users')).empty? -%>
+AllowUsers <%= s %>
+<% end -%>
+<% unless (s=scope.lookupvar('::sshd::allowed_groups')).empty? -%>
+AllowGroups <%= s %>
+<%- end -%>
+
+<% if scope.lookupvar('::sshd::hardened') == 'yes' -%>
+<% if (scope.function_versioncmp([scope.lookupvar('::ssh_version'),'6.5'])) >= 0 -%>
+KexAlgorithms curve25519-sha256@libssh.org
+Ciphers chacha20-poly1305@openssh.com,aes256-gcm@openssh.com,aes256-ctr
+MACs hmac-sha2-512-etm@openssh.com,hmac-sha2-256-etm@openssh.com,hmac-ripemd160-etm@openssh.com,umac-128-etm@openssh.com,hmac-sha2-512,hmac-sha2-256,hmac-ripemd160,umac-128@openssh.com
+<% else -%>
+Ciphers aes256-ctr
+MACs hmac-sha1
+<% end -%>
+<% end -%>
+
+<% unless (s=scope.lookupvar('::sshd::tail_additional_options')).empty? -%>
+<%= s %>
+<% end -%>
diff --git a/puppet/modules/sshd/templates/sshd_config/Gentoo.erb b/puppet/modules/sshd/templates/sshd_config/Gentoo.erb
new file mode 100644
index 00000000..022a26e7
--- /dev/null
+++ b/puppet/modules/sshd/templates/sshd_config/Gentoo.erb
@@ -0,0 +1,164 @@
+# $OpenBSD: sshd_config,v 1.75 2007/03/19 01:01:29 djm Exp $
+
+# This is the sshd server system-wide configuration file. See
+# sshd_config(5) for more information.
+
+# This sshd was compiled with PATH=/usr/bin:/bin:/usr/sbin:/sbin
+
+# The strategy used for options in the default sshd_config shipped with
+# OpenSSH is to specify options with their default value where
+# possible, but leave them commented. Uncommented options change a
+# default value.
+
+<% unless (s=scope.lookupvar('::sshd::head_additional_options')).empty? -%>
+<%= s %>
+<% end -%>
+
+<% scope.lookupvar('::sshd::ports').to_a.each do |port| -%>
+<% if port == 'off' -%>
+#Port -- disabled by puppet
+<% else -%>
+Port <%= port %>
+<% end -%>
+<% end -%>
+
+# Use these options to restrict which interfaces/protocols sshd will bind to
+<% scope.lookupvar('::sshd::listen_address').to_a.each do |address| -%>
+ListenAddress <%= address %>
+<% end -%>
+#AddressFamily any
+
+# Disable legacy (protocol version 1) support in the server for new
+# installations. In future the default will change to require explicit
+# activation of protocol 1
+Protocol 2
+
+# HostKey for protocol version 1
+#HostKey /etc/ssh/ssh_host_key
+# HostKeys for protocol version 2
+#HostKey /etc/ssh/ssh_host_rsa_key
+#HostKey /etc/ssh/ssh_host_dsa_key
+
+# Lifetime and size of ephemeral version 1 server key
+#KeyRegenerationInterval 1h
+#ServerKeyBits 768
+
+# Logging
+# obsoletes QuietMode and FascistLogging
+#SyslogFacility AUTH
+#LogLevel INFO
+
+# Authentication:
+
+#LoginGraceTime 2m
+PermitRootLogin <%= scope.lookupvar('::sshd::permit_root_login') %>
+
+StrictModes <%= scope.lookupvar('::sshd::strict_modes') %>
+
+#MaxAuthTries 6
+
+RSAAuthentication <%= scope.lookupvar('::sshd::rsa_authentication') %>
+
+PubkeyAuthentication <%= scope.lookupvar('::sshd::pubkey_authentication') %>
+
+AuthorizedKeysFile <%= scope.lookupvar('::sshd::authorized_keys_file') %>
+
+# For this to work you will also need host keys in /etc/ssh/ssh_known_hosts
+RhostsRSAAuthentication <%= scope.lookupvar('::sshd::rhosts_rsa_authentication') %>
+
+# similar for protocol version 2
+HostbasedAuthentication <%= scope.lookupvar('::sshd::hostbased_authentication') %>
+
+# Change to yes if you don't trust ~/.ssh/known_hosts for
+# RhostsRSAAuthentication and HostbasedAuthentication
+#IgnoreUserKnownHosts no
+
+# Don't read the user's ~/.rhosts and ~/.shosts files
+IgnoreRhosts <%= scope.lookupvar('::sshd::ignore_rhosts') %>
+
+# To disable tunneled clear text passwords, change to no here!
+PasswordAuthentication <%= scope.lookupvar('::sshd::password_authentication') %>
+
+# To enable empty passwords, change to yes (NOT RECOMMENDED)
+PermitEmptyPasswords <%= scope.lookupvar('::sshd::permit_empty_passwords') %>
+
+# Change to no to disable s/key passwords
+ChallengeResponseAuthentication <%= scope.lookupvar('::sshd::challenge_response_authentication') %>
+
+# Kerberos options
+#KerberosAuthentication no
+#KerberosOrLocalPasswd yes
+#KerberosTicketCleanup yes
+#KerberosGetAFSToken no
+
+# GSSAPI options
+#GSSAPIAuthentication no
+#GSSAPICleanupCredentials yes
+#GSSAPIStrictAcceptorCheck yes
+#GSSAPIKeyExchange no
+
+# Set this to 'yes' to enable PAM authentication, account processing,
+# and session processing. If this is enabled, PAM authentication will
+# be allowed through the ChallengeResponseAuthentication and
+# PasswordAuthentication. Depending on your PAM configuration,
+# PAM authentication via ChallengeResponseAuthentication may bypass
+# the setting of "PermitRootLogin without-password".
+# If you just want the PAM account and session checks to run without
+# PAM authentication, then enable this but set PasswordAuthentication
+# and ChallengeResponseAuthentication to 'no'.
+UsePAM <%= scope.lookupvar('::sshd::use_pam') %>
+
+AllowTcpForwarding <%= scope.lookupvar('::sshd::tcp_forwarding') %>
+
+#GatewayPorts no
+X11Forwarding <%= scope.lookupvar('::sshd::x11_forwarding') %>
+#X11DisplayOffset 10
+#X11UseLocalhost yes
+PrintMotd <%= scope.lookupvar('::sshd::print_motd') %>
+#PrintLastLog yes
+#TCPKeepAlive yes
+#UseLogin no
+#UsePrivilegeSeparation yes
+#PermitUserEnvironment no
+#Compression delayed
+#ClientAliveInterval 0
+#ClientAliveCountMax 3
+#UseDNS yes
+#PidFile /var/run/sshd.pid
+#MaxStartups 10
+#PermitTunnel no
+
+# no default banner path
+#Banner /some/path
+
+# override default of no subsystems
+Subsystem sftp <%= (s=scope.lookupvar('::sshd::sftp_subsystem')).empty? ? '/usr/lib/misc/sftp-server' : s %>
+
+# Example of overriding settings on a per-user basis
+#Match User anoncvs
+# X11Forwarding no
+# AllowTcpForwarding no
+# ForceCommand cvs server
+
+<% unless (s=scope.lookupvar('::sshd::allowed_users')).empty? -%>
+AllowUsers <%= s %>
+<% end -%>
+<% unless (s=scope.lookupvar('::sshd::allowed_groups')).empty? -%>
+AllowGroups <%= s %>
+<%- end -%>
+
+<% if scope.lookupvar('::sshd::hardened') == 'yes' -%>
+<% if (scope.function_versioncmp([scope.lookupvar('::ssh_version'),'6.5'])) >= 0 -%>
+KexAlgorithms curve25519-sha256@libssh.org
+Ciphers chacha20-poly1305@openssh.com,aes256-gcm@openssh.com,aes256-ctr
+MACs hmac-sha2-512-etm@openssh.com,hmac-sha2-256-etm@openssh.com,hmac-ripemd160-etm@openssh.com,umac-128-etm@openssh.com,hmac-sha2-512,hmac-sha2-256,hmac-ripemd160,umac-128@openssh.com
+<% else -%>
+Ciphers aes256-ctr
+MACs hmac-sha1
+<% end -%>
+<% end -%>
+
+<% unless (s=scope.lookupvar('::sshd::tail_additional_options')).empty? -%>
+<%= s %>
+<% end -%>
+
diff --git a/puppet/modules/sshd/templates/sshd_config/OpenBSD.erb b/puppet/modules/sshd/templates/sshd_config/OpenBSD.erb
new file mode 100644
index 00000000..db730300
--- /dev/null
+++ b/puppet/modules/sshd/templates/sshd_config/OpenBSD.erb
@@ -0,0 +1,144 @@
+# $OpenBSD: sshd_config,v 1.74 2006/07/19 13:07:10 dtucker Exp $
+
+# This is the sshd server system-wide configuration file. See
+# sshd_config(5) for more information.
+
+# The strategy used for options in the default sshd_config shipped with
+# OpenSSH is to specify options with their default value where
+# possible, but leave them commented. Uncommented options change a
+# default value.
+
+<% unless (s=scope.lookupvar('::sshd::head_additional_options')).empty? -%>
+<%= s %>
+<% end -%>
+
+<% scope.lookupvar('::sshd::ports').to_a.each do |port| -%>
+<% if port == 'off' -%>
+#Port -- disabled by puppet
+<% else -%>
+Port <%= port %>
+<% end -%>
+<% end -%>
+
+# Use these options to restrict which interfaces/protocols sshd will bind to
+<% scope.lookupvar('::sshd::listen_address').to_a.each do |address| -%>
+ListenAddress <%= address %>
+<% end -%>
+#Protocol 2,1
+#AddressFamily any
+
+# HostKey for protocol version 1
+#HostKey /etc/ssh/ssh_host_key
+# HostKeys for protocol version 2
+#HostKey /etc/ssh/ssh_host_rsa_key
+#HostKey /etc/ssh/ssh_host_dsa_key
+
+# Lifetime and size of ephemeral version 1 server key
+#KeyRegenerationInterval 1h
+#ServerKeyBits 768
+
+# Logging
+# obsoletes QuietMode and FascistLogging
+#SyslogFacility AUTH
+#LogLevel INFO
+
+# Authentication:
+
+#LoginGraceTime 2m
+PermitRootLogin <%= scope.lookupvar('::sshd::permit_root_login') %>
+
+StrictModes <%= scope.lookupvar('::sshd::strict_modes') %>
+
+#MaxAuthTries 6
+
+RSAAuthentication <%= scope.lookupvar('::sshd::rsa_authentication') %>
+
+PubkeyAuthentication <%= scope.lookupvar('::sshd::pubkey_authentication') %>
+
+AuthorizedKeysFile <%= scope.lookupvar('::sshd::authorized_keys_file') %>
+
+# For this to work you will also need host keys in /etc/ssh/ssh_known_hosts
+RhostsRSAAuthentication <%= scope.lookupvar('::sshd::rhosts_rsa_authentication') %>
+
+# similar for protocol version 2
+HostbasedAuthentication <%= scope.lookupvar('::sshd::hostbased_authentication') %>
+
+# Change to yes if you don't trust ~/.ssh/known_hosts for
+# RhostsRSAAuthentication and HostbasedAuthentication
+#IgnoreUserKnownHosts no
+
+# Don't read the user's ~/.rhosts and ~/.shosts files
+IgnoreRhosts <%= scope.lookupvar('::sshd::ignore_rhosts') %>
+
+# To disable tunneled clear text passwords, change to no here!
+PasswordAuthentication <%= scope.lookupvar('::sshd::password_authentication') %>
+
+# To enable empty passwords, change to yes (NOT RECOMMENDED)
+PermitEmptyPasswords <%= scope.lookupvar('::sshd::permit_empty_passwords') %>
+
+# Change to no to disable s/key passwords
+ChallengeResponseAuthentication <%= scope.lookupvar('::sshd::challenge_response_authentication') %>
+
+# Kerberos options
+#KerberosAuthentication no
+#KerberosOrLocalPasswd yes
+#KerberosTicketCleanup yes
+#KerberosGetAFSToken no
+
+# GSSAPI options
+#GSSAPIAuthentication no
+#GSSAPICleanupCredentials yes
+
+AllowTcpForwarding <%= scope.lookupvar('::sshd::tcp_forwarding') %>
+
+#GatewayPorts no
+X11Forwarding <%= scope.lookupvar('::sshd::x11_forwarding') %>
+#X11DisplayOffset 10
+#X11UseLocalhost yes
+PrintMotd <%= scope.lookupvar('::sshd::print_motd') %>
+#PrintLastLog yes
+#TCPKeepAlive yes
+#UseLogin no
+#UsePrivilegeSeparation yes
+#PermitUserEnvironment no
+#Compression delayed
+#ClientAliveInterval 0
+#ClientAliveCountMax 3
+#UseDNS yes
+#PidFile /var/run/sshd.pid
+#MaxStartups 10
+#PermitTunnel no
+
+# no default banner path
+#Banner /some/path
+
+# override default of no subsystems
+Subsystem sftp <%= (s=scope.lookupvar('::sshd::sftp_subsystem')).empty? ? '/usr/libexec/sftp-server' : s %>
+
+<% unless (s=scope.lookupvar('::sshd::allowed_users')).empty? -%>
+AllowUsers <%= s %>
+<% end -%>
+<% unless (s=scope.lookupvar('::sshd::allowed_groups')).empty? -%>
+AllowGroups <%= s %>
+<%- end -%>
+
+# Example of overriding settings on a per-user basis
+#Match User anoncvs
+# X11Forwarding no
+# AllowTcpForwarding no
+# ForceCommand cvs server
+
+<% if scope.lookupvar('::sshd::hardened') == 'yes' -%>
+<% if (scope.function_versioncmp([scope.lookupvar('::ssh_version'),'6.5'])) >= 0 -%>
+KexAlgorithms curve25519-sha256@libssh.org
+Ciphers chacha20-poly1305@openssh.com,aes256-gcm@openssh.com,aes256-ctr
+MACs hmac-sha2-512-etm@openssh.com,hmac-sha2-256-etm@openssh.com,hmac-ripemd160-etm@openssh.com,umac-128-etm@openssh.com,hmac-sha2-512,hmac-sha2-256,hmac-ripemd160,umac-128@openssh.com
+<% else -%>
+Ciphers aes256-ctr
+MACs hmac-sha1
+<% end -%>
+<% end -%>
+
+<% unless (s=scope.lookupvar('::sshd::tail_additional_options')).empty? -%>
+<%= s %>
+<% end -%>
diff --git a/puppet/modules/sshd/templates/sshd_config/Ubuntu.erb b/puppet/modules/sshd/templates/sshd_config/Ubuntu.erb
new file mode 100644
index 00000000..a326ab87
--- /dev/null
+++ b/puppet/modules/sshd/templates/sshd_config/Ubuntu.erb
@@ -0,0 +1,133 @@
+# This file is managed by Puppet, all local modifications will be overwritten
+#
+# Package generated configuration file
+# See the sshd(8) manpage for details
+
+<% unless (s=scope.lookupvar('::sshd::head_additional_options')).empty? -%>
+<%= s %>
+<% end -%>
+
+# What ports, IPs and protocols we listen for
+<% scope.lookupvar('::sshd::ports').to_a.each do |port| -%>
+<% if port == 'off' -%>
+#Port -- disabled by puppet
+<% else -%>
+Port <%= port %>
+<% end -%>
+<% end -%>
+
+# Use these options to restrict which interfaces/protocols sshd will bind to
+<% scope.lookupvar('::sshd::listen_address').to_a.each do |address| -%>
+ListenAddress <%= address %>
+<% end -%>
+Protocol 2
+# HostKeys for protocol version 2
+<% scope.lookupvar('::sshd::hostkey_type').to_a.each do |hostkey_type| -%>
+HostKey /etc/ssh/ssh_host_<%=hostkey_type %>_key
+<% end -%>
+
+#Privilege Separation is turned on for security
+UsePrivilegeSeparation yes
+
+# Lifetime and size of ephemeral version 1 server key
+KeyRegenerationInterval 3600
+ServerKeyBits 768
+
+# Logging
+SyslogFacility AUTH
+LogLevel INFO
+
+# Authentication:
+LoginGraceTime 120
+PermitRootLogin <%= scope.lookupvar('::sshd::permit_root_login') %>
+
+StrictModes <%= scope.lookupvar('::sshd::strict_modes') %>
+
+RSAAuthentication <%= scope.lookupvar('::sshd::rsa_authentication') %>
+
+PubkeyAuthentication <%= scope.lookupvar('::sshd::pubkey_authentication') %>
+
+AuthorizedKeysFile <%= scope.lookupvar('::sshd::authorized_keys_file') %>
+
+# Don't read the user's ~/.rhosts and ~/.shosts files
+IgnoreRhosts <%= scope.lookupvar('::sshd::ignore_rhosts') %>
+# For this to work you will also need host keys in /etc/ssh_known_hosts
+RhostsRSAAuthentication <%= scope.lookupvar('::sshd::rhosts_rsa_authentication') %>
+# similar for protocol version 2
+HostbasedAuthentication <%= scope.lookupvar('::sshd::hostbased_authentication') %>
+# Uncomment if you don't trust ~/.ssh/known_hosts for RhostsRSAAuthentication
+#IgnoreUserKnownHosts yes
+
+# To enable empty passwords, change to yes (NOT RECOMMENDED)
+PermitEmptyPasswords <%= scope.lookupvar('::sshd::permit_empty_passwords') %>
+
+# Change to yes to enable challenge-response passwords (beware issues with
+# some PAM modules and threads)
+ChallengeResponseAuthentication <%= scope.lookupvar('::sshd::challenge_response_authentication') %>
+
+# To disable tunneled clear text passwords, change to no here!
+PasswordAuthentication <%= scope.lookupvar('::sshd::password_authentication') %>
+
+# Kerberos options
+KerberosAuthentication <%= scope.lookupvar('::sshd::kerberos_authentication') %>
+KerberosOrLocalPasswd <%= scope.lookupvar('::sshd::kerberos_orlocalpasswd') %>
+KerberosTicketCleanup <%= scope.lookupvar('::sshd::kerberos_ticketcleanup') %>
+
+# GSSAPI options
+GSSAPIAuthentication <%= scope.lookupvar('::sshd::gssapi_authentication') %>
+GSSAPICleanupCredentials <%= scope.lookupvar('::sshd::gssapi_cleanupcredentials') %>
+
+X11Forwarding <%= scope.lookupvar('::sshd::x11_forwarding') %>
+X11DisplayOffset 10
+PrintMotd <%= scope.lookupvar('::sshd::print_motd') %>
+PrintLastLog yes
+TCPKeepAlive yes
+
+#UseLogin no
+
+#MaxStartups 10:30:60
+#Banner /etc/issue.net
+# do not reveal debian version (default is yes)
+DebianBanner no
+
+# Allow client to pass locale environment variables
+AcceptEnv LANG LC_*
+
+Subsystem sftp <%= (s=scope.lookupvar('::sshd::sftp_subsystem')).empty? ? '/usr/lib/openssh/sftp-server' : s %>
+
+# Set this to 'yes' to enable PAM authentication, account processing,
+# and session processing. If this is enabled, PAM authentication will
+# be allowed through the ChallengeResponseAuthentication and
+# PasswordAuthentication. Depending on your PAM configuration,
+# PAM authentication via ChallengeResponseAuthentication may bypass
+# the setting of "PermitRootLogin without-password".
+# If you just want the PAM account and session checks to run without
+# PAM authentication, then enable this but set PasswordAuthentication
+# and ChallengeResponseAuthentication to 'no'.
+UsePAM <%= scope.lookupvar('::sshd::use_pam') %>
+
+AllowTcpForwarding <%= scope.lookupvar('::sshd::tcp_forwarding') %>
+
+AllowAgentForwarding <%= scope.lookupvar('::sshd::agent_forwarding') %>
+
+<% unless (s=scope.lookupvar('::sshd::allowed_users')).empty? -%>
+AllowUsers <%= s %>
+<% end -%>
+<% unless (s=scope.lookupvar('::sshd::allowed_groups')).empty? -%>
+AllowGroups <%= s %>
+<%- end -%>
+
+<% if scope.lookupvar('::sshd::hardened') == 'yes' -%>
+<% if (scope.function_versioncmp([scope.lookupvar('::ssh_version'),'6.5'])) >= 0 -%>
+KexAlgorithms curve25519-sha256@libssh.org
+Ciphers chacha20-poly1305@openssh.com,aes256-gcm@openssh.com,aes256-ctr
+MACs hmac-sha2-512-etm@openssh.com,hmac-sha2-256-etm@openssh.com,hmac-ripemd160-etm@openssh.com,umac-128-etm@openssh.com,hmac-sha2-512,hmac-sha2-256,hmac-ripemd160,umac-128@openssh.com
+<% else -%>
+Ciphers aes256-ctr
+MACs hmac-sha1
+<% end -%>
+<% end -%>
+
+<% unless (s=scope.lookupvar('::sshd::tail_additional_options')).empty? -%>
+<%= s %>
+<% end -%>
diff --git a/puppet/modules/sshd/templates/sshd_config/Ubuntu_lucid.erb b/puppet/modules/sshd/templates/sshd_config/Ubuntu_lucid.erb
new file mode 100644
index 00000000..be7c56d0
--- /dev/null
+++ b/puppet/modules/sshd/templates/sshd_config/Ubuntu_lucid.erb
@@ -0,0 +1,136 @@
+# Package generated configuration file
+# See the sshd(8) manpage for details
+
+<% unless (s=scope.lookupvar('::sshd::head_additional_options')).empty? -%>
+<%= s %>
+<% end -%>
+
+# What ports, IPs and protocols we listen for
+<% scope.lookupvar('::sshd::ports').to_a.each do |port| -%>
+<% if port == 'off' -%>
+#Port -- disabled by puppet
+<% else -%>
+Port <%= port %>
+<% end -%>
+<% end -%>
+
+# Use these options to restrict which interfaces/protocols sshd will bind to
+<% scope.lookupvar('::sshd::listen_address').to_a.each do |address| -%>
+ListenAddress <%= address %>
+<% end -%>
+Protocol 2
+# HostKeys for protocol version 2
+<% scope.lookupvar('::sshd::hostkey_type').to_a.each do |hostkey_type| -%>
+HostKey /etc/ssh/ssh_host_<%=hostkey_type %>_key
+<% end -%>
+
+#Privilege Separation is turned on for security
+UsePrivilegeSeparation yes
+
+# ...but breaks Pam auth via kbdint, so we have to turn it off
+# Use PAM authentication via keyboard-interactive so PAM modules can
+# properly interface with the user (off due to PrivSep)
+#PAMAuthenticationViaKbdInt no
+# Lifetime and size of ephemeral version 1 server key
+KeyRegenerationInterval 3600
+ServerKeyBits 768
+
+# Logging
+SyslogFacility AUTH
+LogLevel INFO
+
+# Authentication:
+LoginGraceTime 600
+PermitRootLogin <%= scope.lookupvar('::sshd::permit_root_login') %>
+
+StrictModes <%= scope.lookupvar('::sshd::strict_modes') %>
+
+RSAAuthentication <%= scope.lookupvar('::sshd::rsa_authentication') %>
+
+PubkeyAuthentication <%= scope.lookupvar('::sshd::pubkey_authentication') %>
+
+AuthorizedKeysFile <%= scope.lookupvar('::sshd::authorized_keys_file') %>
+
+# For this to work you will also need host keys in /etc/ssh_known_hosts
+RhostsRSAAuthentication <%= scope.lookupvar('::sshd::rhosts_rsa_authentication') %>
+
+# Don't read the user's ~/.rhosts and ~/.shosts files
+IgnoreRhosts <%= scope.lookupvar('::sshd::ignore_rhosts') %>
+
+# similar for protocol version 2
+HostbasedAuthentication <%= scope.lookupvar('::sshd::hostbased_authentication') %>
+
+# Uncomment if you don't trust ~/.ssh/known_hosts for RhostsRSAAuthentication
+#IgnoreUserKnownHosts yes
+
+# To enable empty passwords, change to yes (NOT RECOMMENDED)
+PermitEmptyPasswords <%= scope.lookupvar('::sshd::permit_empty_passwords') %>
+
+# Change to no to disable s/key passwords
+ChallengeResponseAuthentication <%= scope.lookupvar('::sshd::challenge_response_authentication') %>
+
+# To disable tunneled clear text passwords, change to no here!
+PasswordAuthentication <%= scope.lookupvar('::sshd::password_authentication') %>
+
+# To change Kerberos options
+#KerberosAuthentication no
+#KerberosOrLocalPasswd yes
+#AFSTokenPassing no
+#KerberosTicketCleanup no
+
+# Kerberos TGT Passing does only work with the AFS kaserver
+#KerberosTgtPassing yes
+
+X11Forwarding <%= scope.lookupvar('::sshd::x11_forwarding') %>
+X11DisplayOffset 10
+KeepAlive yes
+#UseLogin no
+
+#MaxStartups 10:30:60
+#Banner /etc/issue.net
+# do not reveal debian version (default is yes)
+DebianBanner no
+#ReverseMappingCheck yes
+
+Subsystem sftp <%= (s=scope.lookupvar('::sshd::sftp_subsystem')).empty? ? '/usr/lib/openssh/sftp-server' : s %>
+
+# Set this to 'yes' to enable PAM authentication, account processing,
+# and session processing. If this is enabled, PAM authentication will
+# be allowed through the ChallengeResponseAuthentication and
+# PasswordAuthentication. Depending on your PAM configuration,
+# PAM authentication via ChallengeResponseAuthentication may bypass
+# the setting of "PermitRootLogin without-password".
+# If you just want the PAM account and session checks to run without
+# PAM authentication, then enable this but set PasswordAuthentication
+# and ChallengeResponseAuthentication to 'no'.
+UsePAM <%= scope.lookupvar('::sshd::use_pam') %>
+
+HostbasedUsesNameFromPacketOnly yes
+
+AllowTcpForwarding <%= scope.lookupvar('::sshd::tcp_forwarding') %>
+
+AllowAgentForwarding <%= scope.lookupvar('::sshd::agent_forwarding') %>
+
+<% unless (s=scope.lookupvar('::sshd::allowed_users')).empty? -%>
+AllowUsers <%= s %>
+<% end -%>
+<% unless (s=scope.lookupvar('::sshd::allowed_groups')).empty? -%>
+AllowGroups <%= s %>
+<%- end -%>
+
+PrintMotd <%= scope.lookupvar('::sshd::print_motd') %>
+
+<% if scope.lookupvar('::sshd::hardened') == 'yes' -%>
+<% if (scope.function_versioncmp([scope.lookupvar('::ssh_version'),'6.5'])) >= 0 -%>
+KexAlgorithms curve25519-sha256@libssh.org
+Ciphers chacha20-poly1305@openssh.com,aes256-gcm@openssh.com,aes256-ctr
+MACs hmac-sha2-512-etm@openssh.com,hmac-sha2-256-etm@openssh.com,hmac-ripemd160-etm@openssh.com,umac-128-etm@openssh.com,hmac-sha2-512,hmac-sha2-256,hmac-ripemd160,umac-128@openssh.com
+<% else -%>
+Ciphers aes256-ctr
+MACs hmac-sha1
+<% end -%>
+<% end -%>
+
+<% unless (s=scope.lookupvar('::sshd::tail_additional_options')).empty? -%>
+<%= s %>
+<% end -%>
diff --git a/puppet/modules/sshd/templates/sshd_config/Ubuntu_oneiric.erb b/puppet/modules/sshd/templates/sshd_config/Ubuntu_oneiric.erb
new file mode 120000
index 00000000..ccfb67c8
--- /dev/null
+++ b/puppet/modules/sshd/templates/sshd_config/Ubuntu_oneiric.erb
@@ -0,0 +1 @@
+Ubuntu_lucid.erb \ No newline at end of file
diff --git a/puppet/modules/sshd/templates/sshd_config/Ubuntu_precise.erb b/puppet/modules/sshd/templates/sshd_config/Ubuntu_precise.erb
new file mode 120000
index 00000000..6502bfce
--- /dev/null
+++ b/puppet/modules/sshd/templates/sshd_config/Ubuntu_precise.erb
@@ -0,0 +1 @@
+Ubuntu.erb \ No newline at end of file
diff --git a/puppet/modules/sshd/templates/sshd_config/XenServer_xenenterprise.erb b/puppet/modules/sshd/templates/sshd_config/XenServer_xenenterprise.erb
new file mode 120000
index 00000000..71b767a5
--- /dev/null
+++ b/puppet/modules/sshd/templates/sshd_config/XenServer_xenenterprise.erb
@@ -0,0 +1 @@
+CentOS_6.erb \ No newline at end of file
diff --git a/puppet/modules/templatewlv/Modulefile b/puppet/modules/templatewlv/Modulefile
new file mode 100644
index 00000000..8007a070
--- /dev/null
+++ b/puppet/modules/templatewlv/Modulefile
@@ -0,0 +1,11 @@
+name 'duritong-templatewlv'
+version '0.0.1'
+source 'https://github.com/duritong/puppet-templatewlv.git'
+author 'duritong'
+license 'Apache License, Version 2.0'
+summary 'Template With Local Variables'
+description 'Pass local variables to templates'
+project_page 'https://github.com/duritong/puppet-templatewlv'
+
+## Add dependencies, if any:
+# dependency 'username/name', '>= 1.2.0'
diff --git a/puppet/modules/templatewlv/README.md b/puppet/modules/templatewlv/README.md
new file mode 100644
index 00000000..5ab01e45
--- /dev/null
+++ b/puppet/modules/templatewlv/README.md
@@ -0,0 +1,21 @@
+# templatewlv
+
+## Template With Local Variables
+
+A wrapper around puppet's template function. See
+[the templating docs](http://docs.puppetlabs.com/guides/templating.html) for
+the basic functionality.
+
+Additionally, you can pass a hash, as the last argument, which will be turned into
+local variables and available to the template itself. This will allow you to define
+variables in a template and pass them down to a template you include in the current
+template. An example:
+
+ scope.function_templatewlv(['sub_template', { 'local_var' => 'value' }])
+
+Note that if multiple templates are specified, their output is all
+concatenated and returned as the output of the function.
+
+# Who - License
+
+duritong - Apache License, Version 2.0
diff --git a/puppet/modules/templatewlv/lib/puppet/parser/functions/templatewlv.rb b/puppet/modules/templatewlv/lib/puppet/parser/functions/templatewlv.rb
new file mode 100644
index 00000000..c9579e2c
--- /dev/null
+++ b/puppet/modules/templatewlv/lib/puppet/parser/functions/templatewlv.rb
@@ -0,0 +1,41 @@
+require File.join(File.dirname(__FILE__),'../templatewrapperwlv')
+Puppet::Parser::Functions::newfunction(:templatewlv, :type => :rvalue, :arity => -2, :doc =>
+ "A wrapper around puppet's template function. See
+ [the templating docs](http://docs.puppetlabs.com/guides/templating.html) for
+ the basic functionality.
+
+ Additionally, you can pass a hash, as the last argument, which will be turned into
+ local variables and available to the template itself. This will allow you to define
+ variables in a template and pass them down to a template you include in the current
+ template. An example:
+
+ scope.function_templatewlv(['sub_template', { 'local_var' => 'value' }])
+
+ Note that if multiple templates are specified, their output is all
+ concatenated and returned as the output of the function.") do |vals|
+
+ if vals.last.is_a?(Hash)
+ local_vars = vals.last
+ local_vals = vals[0..-2]
+ else
+ local_vars = {}
+ local_vals = vals
+ end
+
+ result = nil
+ local_vals.collect do |file|
+ # Use a wrapper, so the template can't get access to the full
+ # Scope object.
+ debug "Retrieving template #{file}"
+
+ wrapper = Puppet::Parser::TemplateWrapperWlv.new(self,local_vars)
+ wrapper.file = file
+ begin
+ wrapper.result
+ rescue => detail
+ info = detail.backtrace.first.split(':')
+ raise Puppet::ParseError,
+ "Failed to parse template #{file}:\n Filepath: #{info[0]}\n Line: #{info[1]}\n Detail: #{detail}\n"
+ end
+ end.join("")
+end
diff --git a/puppet/modules/templatewlv/lib/puppet/parser/templatewrapperwlv.rb b/puppet/modules/templatewlv/lib/puppet/parser/templatewrapperwlv.rb
new file mode 100644
index 00000000..f1753e18
--- /dev/null
+++ b/puppet/modules/templatewlv/lib/puppet/parser/templatewrapperwlv.rb
@@ -0,0 +1,39 @@
+# A wrapper for templates, that allows you to additionally define
+# local variables
+class Puppet::Parser::TemplateWrapperWlv < Puppet::Parser::TemplateWrapper
+ attr_reader :local_vars
+ def initialize(scope, local_vars)
+ super(scope)
+ @local_vars = local_vars
+ end
+
+ # Should return true if a variable is defined, false if it is not
+ def has_variable?(name)
+ super(name) || local_vars.keys.include?(name.to_s)
+ end
+
+ def method_missing(name, *args)
+ if local_vars.keys.include?(n=name.to_s)
+ local_vars[n]
+ else
+ super(name, *args)
+ end
+ end
+
+ def result(string = nil)
+ # Expose all the variables in our scope as instance variables of the
+ # current object, making it possible to access them without conflict
+ # to the regular methods.
+ benchmark(:debug, "Bound local template variables for #{@__file__}") do
+ local_vars.each do |name, value|
+ if name.kind_of?(String)
+ realname = name.gsub(/[^\w]/, "_")
+ else
+ realname = name
+ end
+ instance_variable_set("@#{realname}", value)
+ end
+ end
+ super(string)
+ end
+end
diff --git a/puppet/modules/try/README.md b/puppet/modules/try/README.md
new file mode 100644
index 00000000..3888661e
--- /dev/null
+++ b/puppet/modules/try/README.md
@@ -0,0 +1,13 @@
+This module provides a "try" wrapper around common resource types.
+
+For example:
+
+ try::file {
+ '/path/to/file':
+ ensure => 'link',
+ target => $target;
+ }
+
+This will work just like `file`, but will silently fail if `$target` is undefined or the file does not exist.
+
+So far, only `file` type with symlinks works.
diff --git a/puppet/modules/try/manifests/file.pp b/puppet/modules/try/manifests/file.pp
new file mode 100644
index 00000000..2493d343
--- /dev/null
+++ b/puppet/modules/try/manifests/file.pp
@@ -0,0 +1,114 @@
+#
+# Works like the built-in type "file", but gets gracefully ignored if the target/source does not exist or is undefined.
+#
+# Also, if the source or target doesn't exist, and the destination is a git repo, then the file is restored from git.
+#
+# All executable paths are hardcoded to their paths in debian.
+#
+# known limitations:
+# * this is far too noisy
+# * $restore does not work for directories
+# * only file:// $source is supported
+# * $content is not supported, only $target or $source.
+# * does not auto-require all the parent directories like 'file' does
+#
+define try::file (
+ $ensure = undef,
+ $target = undef,
+ $source = undef,
+ $owner = undef,
+ $group = undef,
+ $recurse = undef,
+ $purge = undef,
+ $force = undef,
+ $mode = undef,
+ $restore = true) {
+
+ # dummy exec to propagate requires:
+ # metaparameter 'require' will get triggered by this dummy exec
+ # so then we just need to depend on this to capture all requires.
+ # exec { $name: command => "/bin/true" }
+
+ exec {
+ "chmod_${name}":
+ command => "/bin/chmod -R ${mode} '${name}'",
+ onlyif => "/usr/bin/test ${mode}",
+ refreshonly => true,
+ loglevel => debug;
+ "chown_${name}":
+ command => "/bin/chown -R ${owner} '${name}'",
+ onlyif => "/usr/bin/test ${owner}",
+ refreshonly => true,
+ loglevel => debug;
+ "chgrp_${name}":
+ command => "/bin/chgrp -R ${group} '${name}'",
+ onlyif => "/usr/bin/test ${group}",
+ refreshonly => true,
+ loglevel => debug;
+ }
+
+ if $target {
+ exec { "symlink_${name}":
+ command => "/bin/ln -s ${target} ${name}",
+ onlyif => "/usr/bin/test -d '${target}'",
+ }
+ } elsif $source {
+ if $ensure == 'directory' {
+ if $purge {
+ exec { "rsync_${name}":
+ command => "/usr/bin/rsync -r --delete '${source}/' '${name}'",
+ onlyif => "/usr/bin/test -d '${source}'",
+ unless => "/usr/bin/diff -rq '${source}' '${name}'",
+ notify => [Exec["chmod_${name}"], Exec["chown_${name}"], Exec["chgrp_${name}"]]
+ }
+ } else {
+ exec { "cp_r_${name}":
+ command => "/bin/cp -r '${source}' '${name}'",
+ onlyif => "/usr/bin/test -d '${source}'",
+ unless => "/usr/bin/diff -rq '${source}' '${name}'",
+ notify => [Exec["chmod_${name}"], Exec["chown_${name}"], Exec["chgrp_${name}"]]
+ }
+ }
+ } else {
+ exec { "cp_${name}":
+ command => "/bin/cp --remove-destination '${source}' '${name}'",
+ onlyif => "/usr/bin/test -e '${source}'",
+ unless => "/usr/bin/test ! -h '${name}' && /usr/bin/diff -q '${source}' '${name}'",
+ notify => [Exec["chmod_${name}"], Exec["chown_${name}"], Exec["chgrp_${name}"]]
+ }
+ }
+ }
+
+ #
+ # if the target/source does not exist (or is undef), and the file happens to be in a git repo,
+ # then restore the file to its original state.
+ #
+
+ if $target {
+ $target_or_source = $target
+ } else {
+ $target_or_source = $source
+ }
+
+ if ($target_or_source == undef) or $restore {
+ $file_basename = basename($name)
+ $file_dirname = dirname($name)
+ $command = "git rev-parse && unlink '${name}'; git checkout -- '${file_basename}' && chown --reference='${file_dirname}' '${name}'; true"
+ debug($command)
+
+ if $target_or_source == undef {
+ exec { "restore_${name}":
+ command => $command,
+ cwd => $file_dirname,
+ loglevel => info;
+ }
+ } else {
+ exec { "restore_${name}":
+ unless => "/usr/bin/test -e '${target_or_source}'",
+ command => $command,
+ cwd => $file_dirname,
+ loglevel => info;
+ }
+ }
+ }
+}
diff --git a/puppet/modules/try/manifests/init.pp b/puppet/modules/try/manifests/init.pp
new file mode 100644
index 00000000..1d2108c9
--- /dev/null
+++ b/puppet/modules/try/manifests/init.pp
@@ -0,0 +1,3 @@
+class try {
+
+}
diff --git a/puppet/modules/x509/manifests/base.pp b/puppet/modules/x509/manifests/base.pp
new file mode 100644
index 00000000..b88cce64
--- /dev/null
+++ b/puppet/modules/x509/manifests/base.pp
@@ -0,0 +1,45 @@
+class x509::base {
+ include x509::variables
+
+ package { [ 'ssl-cert', 'ca-certificates' ]:
+ ensure => installed;
+ }
+
+ group { 'ssl-cert':
+ ensure => present,
+ system => true,
+ require => Package['ssl-cert'];
+ }
+
+ file {
+ $x509::variables::root:
+ ensure => directory,
+ mode => '0755',
+ owner => root,
+ group => root;
+
+ $x509::variables::keys:
+ ensure => directory,
+ mode => '0750',
+ owner => root,
+ group => ssl-cert;
+
+ $x509::variables::certs:
+ ensure => directory,
+ mode => '0755',
+ owner => root,
+ group => root;
+
+ $x509::variables::local_CAs:
+ ensure => directory,
+ mode => '2775',
+ owner => root,
+ group => root;
+ }
+
+ exec { 'update-ca-certificates':
+ command => '/usr/sbin/update-ca-certificates',
+ refreshonly => true,
+ subscribe => File[$x509::variables::local_CAs]
+ }
+}
diff --git a/puppet/modules/x509/manifests/ca.pp b/puppet/modules/x509/manifests/ca.pp
new file mode 100644
index 00000000..0e068cd3
--- /dev/null
+++ b/puppet/modules/x509/manifests/ca.pp
@@ -0,0 +1,34 @@
+define x509::ca (
+ $content = 'absent',
+ $source = 'absent'
+) {
+ include x509::variables
+ include x509::base
+
+ file { "${x509::variables::local_CAs}/${name}.crt" :
+ ensure => file,
+ mode => '0444',
+ group => 'ssl-cert',
+ require => Package['ca-certificates'],
+ notify => Exec['update-ca-certificates'],
+ }
+ case $content {
+ 'absent': {
+ $real_source = $source ? {
+ 'absent' => [
+ "puppet:///modules/site_x509/CAs/${::fqdn}/${name}.crt",
+ "puppet:///modules/site_x509/CAs/${name}.crt"
+ ],
+ default => "puppet:///$source",
+ }
+ File["${x509::variables::local_CAs}/${name}.crt"] {
+ source => $real_source
+ }
+ }
+ default: {
+ File["${x509::variables::local_CAs}/${name}.crt"] {
+ content => $content
+ }
+ }
+ }
+}
diff --git a/puppet/modules/x509/manifests/cert.pp b/puppet/modules/x509/manifests/cert.pp
new file mode 100644
index 00000000..0aafb76d
--- /dev/null
+++ b/puppet/modules/x509/manifests/cert.pp
@@ -0,0 +1,34 @@
+define x509::cert (
+ $content = 'absent',
+ $source = 'absent'
+) {
+ include x509::variables
+ include x509::base
+
+ file { "${x509::variables::certs}/${name}.crt":
+ ensure => file,
+ mode => '0444',
+ group => 'ssl-cert',
+ require => Package['ssl-cert']
+ }
+
+ case $content {
+ 'absent': {
+ $real_source = $source ? {
+ 'absent' => [
+ "puppet:///modules/site_x509/certs/${::fqdn}/${name}.crt",
+ "puppet:///modules/site_x509/certs/${name}.crt"
+ ],
+ default => "puppet:///$source",
+ }
+ File["${x509::variables::certs}/${name}.crt"] {
+ source => $real_source
+ }
+ }
+ default: {
+ File["${x509::variables::certs}/${name}.crt"] {
+ content => $content
+ }
+ }
+ }
+}
diff --git a/puppet/modules/x509/manifests/init.pp b/puppet/modules/x509/manifests/init.pp
new file mode 100644
index 00000000..8283e482
--- /dev/null
+++ b/puppet/modules/x509/manifests/init.pp
@@ -0,0 +1,2 @@
+class x509 {
+}
diff --git a/puppet/modules/x509/manifests/key.pp b/puppet/modules/x509/manifests/key.pp
new file mode 100644
index 00000000..fd7e25fd
--- /dev/null
+++ b/puppet/modules/x509/manifests/key.pp
@@ -0,0 +1,37 @@
+define x509::key (
+ $content = 'absent',
+ $source = 'absent',
+ $owner = 'root',
+ $group = 'ssl-cert'
+) {
+ include x509::variables
+ include x509::base
+
+ file { "${x509::variables::keys}/${name}.key":
+ ensure => file,
+ mode => '0640',
+ owner => $owner,
+ group => $group,
+ require => Package['ssl-cert']
+ }
+
+ case $content {
+ 'absent': {
+ $real_source = $source ? {
+ 'absent' => [
+ "puppet:///modules/site_x509/keys/${::fqdn}/${name}.key",
+ "puppet:///modules/site_x509/keys/${name}.key"
+ ],
+ default => "puppet:///$source",
+ }
+ File["${x509::variables::keys}/${name}.key"] {
+ source => $real_source
+ }
+ }
+ default: {
+ File["${x509::variables::keys}/${name}.key"] {
+ content => $content
+ }
+ }
+ }
+}
diff --git a/puppet/modules/x509/manifests/variables.pp b/puppet/modules/x509/manifests/variables.pp
new file mode 100644
index 00000000..e6bd2359
--- /dev/null
+++ b/puppet/modules/x509/manifests/variables.pp
@@ -0,0 +1,7 @@
+class x509::variables {
+ $root = '/etc/x509'
+ $certs = "${root}/certs"
+ $keys = "${root}/keys"
+ $x509_chain = "${root}/certs"
+ $local_CAs = '/usr/local/share/ca-certificates'
+}
diff --git a/tests/README.md b/tests/README.md
new file mode 100644
index 00000000..814c25b1
--- /dev/null
+++ b/tests/README.md
@@ -0,0 +1,25 @@
+Tests
+---------------------------------
+
+tests/white-box/
+
+ These tests are run on the server as superuser. They are for
+ troubleshooting any problems with the internal setup of the server.
+
+tests/black-box/
+
+ These test are run the user's local machine. They are for troubleshooting
+ any external problems with the service exposed by the server.
+
+Additional Files
+---------------------------------
+
+tests/helpers/
+
+ Utility functions made available to all tests.
+
+tests/order.rb
+
+ Configuration file to specify which nodes should be tested in which order.
+
+
diff --git a/tests/helpers/bonafide_helper.rb b/tests/helpers/bonafide_helper.rb
new file mode 100644
index 00000000..5b886228
--- /dev/null
+++ b/tests/helpers/bonafide_helper.rb
@@ -0,0 +1,235 @@
+#
+# helper for the communication with the provider API for creating, authenticating, and deleting accounts.
+#
+
+class LeapTest
+
+ def assert_tmp_user
+ user = assert_create_user
+ assert_authenticate_user(user)
+ yield user if block_given?
+ assert_delete_user(user)
+ rescue StandardError, MiniTest::Assertion => exc
+ begin
+ assert_delete_user(user)
+ rescue
+ end
+ raise exc
+ end
+
+ #
+ # attempts to create a user account via the API,
+ # returning the user object if successful.
+ #
+ def assert_create_user(username=nil, auth=nil)
+ user = SRP::User.new(username)
+ url = api_url("/users.json")
+ params = user.to_params
+ if auth
+ options = api_options(:auth => auth)
+ else
+ options = api_options
+ if property('webapp.invite_required')
+ @invite_code = generate_invite_code
+ params['user[invite_code]'] = @invite_code
+ end
+ end
+
+ assert_post(url, params, options) do |body|
+ assert response = JSON.parse(body), 'response should be JSON'
+ assert response['ok'], "Creating a user should be successful, got #{response.inspect} instead."
+ user.ok = true
+ user.id = response['id']
+ end
+ return user
+ end
+
+ # TODO: use the api for this instead.
+ def generate_invite_code
+ `cd /srv/leap/webapp/ && sudo -u leap-webapp RAILS_ENV=production bundle exec rake generate_invites[1]`.gsub(/\n/, "")
+ end
+
+ #
+ # attempts to authenticate user. if successful,
+ # user object is updated with id and session token.
+ #
+ def assert_authenticate_user(user)
+ url = api_url("/sessions.json")
+ session = SRP::Session.new(user)
+ params = {'login' => user.username, 'A' => session.aa}
+ assert_post(url, params, api_options) do |body, response|
+ cookie = response['Set-Cookie'].split(';').first
+ assert(response = JSON.parse(body), 'response should be JSON')
+ assert(session.bb = response["B"], 'response should include "B"')
+ url = api_url("/sessions/login.json")
+ params = {'client_auth' => session.m, 'A' => session.aa}
+ assert_put(url, params, api_options('Cookie' => cookie)) do |body|
+ assert(response = JSON.parse(body), 'response should be JSON')
+ assert(response['M2'], 'response should include M2')
+ user.session_token = response['token']
+ user.id = response['id']
+ assert(user.session_token, 'response should include token')
+ assert(user.id, 'response should include user id')
+ end
+ end
+ end
+
+ #
+ # attempts to destroy a user account via the API.
+ #
+ def assert_delete_user(user)
+ if user.is_a? String
+ assert_delete_user_by_login(user)
+ elsif user.is_a? SRP::User
+ assert_delete_srp_user(user)
+ end
+ end
+
+ #
+ # returns true if the identity exists, uses monitor token auth
+ #
+ def identity_exists?(address)
+ url = api_url("/identities/#{URI.encode(address)}.json")
+ options = {:ok_codes => [200, 404]}.merge(
+ api_options(:auth => :monitor)
+ )
+ assert_get(url, nil, options) do |body, response|
+ return response.code == "200"
+ end
+ end
+
+ def upload_public_key(user_id, public_key)
+ url = api_url("/users/#{user_id}.json")
+ params = {"user[public_key]" => public_key}
+ assert_put(url, params, api_options(:auth => :monitor))
+ end
+
+ #
+ # return user document as a Hash. uses monitor token auth
+ #
+ def find_user_by_id(user_id)
+ url = api_url("/users/#{user_id}.json")
+ assert_get(url, nil, api_options(:auth => :monitor)) do |body|
+ return JSON.parse(body)
+ end
+ end
+
+ #
+ # return user document as a Hash. uses monitor token auth
+ # NOTE: this relies on deprecated behavior of the API
+ # and will not work when multi-domain support is added.
+ #
+ def find_user_by_login(login)
+ url = api_url("/users/0.json?login=#{login}")
+ options = {:ok_codes => [200, 404]}.merge(
+ api_options(:auth => :monitor)
+ )
+ assert_get(url, nil, options) do |body, response|
+ if response.code == "200"
+ return JSON.parse(body)
+ else
+ return nil
+ end
+ end
+ end
+
+ private
+
+ def api_url(path)
+ unless path =~ /^\//
+ path = '/' + path
+ end
+ if property('testing.api_uri')
+ return property('testing.api_uri') + path
+ elsif property('api')
+ api = property('api')
+ return "https://%{domain}:%{port}/%{version}#{path}" % {
+ :domain => api['domain'],
+ :port => api['port'],
+ :version => api['version'] || 1
+ }
+ else
+ fail 'This node needs to have either testing.api_url or api.{domain,port} configured.'
+ end
+ end
+
+ #
+ # produces an options hash used for api http requests.
+ #
+ # argument options hash gets added to "headers"
+ # of the http request.
+ #
+ # special :auth key in argument will expand to
+ # add api_token_auth header.
+ #
+ # if you want to try manually:
+ #
+ # export API_URI=`grep api_uri /etc/leap/hiera.yaml | cut -d\" -f2`
+ # export TOKEN=`grep monitor_auth_token /etc/leap/hiera.yaml | awk '{print $2}'`
+ # curl -H "Accept: application/json" -H "Token: $TOKEN" $API_URI
+ #
+ def api_options(options={})
+ # note: must be :headers, not "headers"
+ hsh = {
+ :headers => {
+ "Accept" => "application/json"
+ }
+ }
+ if options[:auth]
+ hsh[:headers].merge!(api_token_auth(options.delete(:auth)))
+ end
+ hsh[:headers].merge!(options)
+ return hsh
+ end
+
+ #
+ # add token authentication to a http request.
+ #
+ # returns a hash suitable for adding to the 'headers' option
+ # of an http function.
+ #
+ def api_token_auth(token)
+ if token.is_a?(Symbol) && property('testing')
+ if token == :monitor
+ token_str = property('testing.monitor_auth_token')
+ else
+ raise ArgumentError.new 'no such token'
+ end
+ else
+ token_str = token
+ end
+ {"Authorization" => "Token token=\"#{token_str}\""}
+ end
+
+ #
+ # not actually used in any test, but useful when
+ # writing new tests.
+ #
+ def assert_delete_user_by_login(login_name)
+ user = find_user_by_login(login_name)
+ url = api_url("/users/#{user['id']}.json")
+ params = {:identities => 'destroy'}
+ delete(url, params, api_options(:auth => :monitor)) do |body, response, error|
+ assert error.nil?, "Error deleting user: #{error}"
+ assert response.code.to_i == 200, "Unable to delete user: HTTP response from API should have code 200, was #{response.code} #{error} #{body}"
+ assert(response = JSON.parse(body), 'Delete response should be JSON')
+ assert(response["success"], 'Deleting user should be a success')
+ end
+ end
+
+ def assert_delete_srp_user(user)
+ if user && user.ok && user.id && user.session_token && !user.deleted
+ url = api_url("users/#{user.id}.json")
+ params = {:identities => 'destroy'}
+ user.deleted = true
+ delete(url, params, api_options(:auth => user.session_token)) do |body, response, error|
+ assert error.nil?, "Error deleting user: #{error}"
+ assert response.code.to_i == 200, "Unable to delete user: HTTP response from API should have code 200, was #{response.code} #{error} #{body}"
+ assert(response = JSON.parse(body), 'Delete response should be JSON')
+ assert(response["success"], 'Deleting user should be a success')
+ end
+ end
+ end
+
+
+end
diff --git a/tests/helpers/client_side_db.py b/tests/helpers/client_side_db.py
new file mode 100644
index 00000000..2f8c220f
--- /dev/null
+++ b/tests/helpers/client_side_db.py
@@ -0,0 +1,167 @@
+import logging
+import os
+import tempfile
+import getpass
+import binascii
+import json
+
+try:
+ import requests
+ import srp._pysrp as srp
+except ImportError:
+ pass
+
+from twisted.internet.defer import inlineCallbacks
+
+from leap.soledad.client import Soledad
+
+
+"""
+Helper functions to give access to client-side Soledad database.
+Copied over from soledad/scripts folder.
+"""
+
+# create a logger
+logger = logging.getLogger(__name__)
+
+# DEBUG: enable debug logs
+# LOG_FORMAT = '%(asctime)s %(message)s'
+# logging.basicConfig(format=LOG_FORMAT, level=logging.DEBUG)
+
+
+safe_unhexlify = lambda x: binascii.unhexlify(x) if (
+ len(x) % 2 == 0) else binascii.unhexlify('0' + x)
+
+
+def _fail(reason):
+ logger.error('Fail: ' + reason)
+ exit(2)
+
+
+def get_soledad_instance(uuid, passphrase, basedir, server_url, cert_file,
+ token):
+ # setup soledad info
+ logger.info('UUID is %s' % uuid)
+ logger.info('Server URL is %s' % server_url)
+ secrets_path = os.path.join(
+ basedir, '%s.secret' % uuid)
+ local_db_path = os.path.join(
+ basedir, '%s.db' % uuid)
+ # instantiate soledad
+ return Soledad(
+ uuid,
+ unicode(passphrase),
+ secrets_path=secrets_path,
+ local_db_path=local_db_path,
+ server_url=server_url,
+ cert_file=cert_file,
+ auth_token=token,
+ defer_encryption=True)
+
+
+def _get_api_info(provider):
+ info = requests.get(
+ 'https://'+provider+'/provider.json', verify=False).json()
+ return info['api_uri'], info['api_version']
+
+
+def _login(username, passphrase, provider, api_uri, api_version):
+ usr = srp.User(username, passphrase, srp.SHA256, srp.NG_1024)
+ auth = None
+ try:
+ auth = _authenticate(api_uri, api_version, usr).json()
+ except requests.exceptions.ConnectionError:
+ _fail('Could not connect to server.')
+ if 'errors' in auth:
+ _fail(str(auth['errors']))
+ return api_uri, api_version, auth
+
+
+def _authenticate(api_uri, api_version, usr):
+ api_url = "%s/%s" % (api_uri, api_version)
+ session = requests.session()
+ uname, A = usr.start_authentication()
+ params = {'login': uname, 'A': binascii.hexlify(A)}
+ init = session.post(
+ api_url + '/sessions', data=params, verify=False).json()
+ if 'errors' in init:
+ _fail('test user not found')
+ M = usr.process_challenge(
+ safe_unhexlify(init['salt']), safe_unhexlify(init['B']))
+ return session.put(api_url + '/sessions/' + uname, verify=False,
+ data={'client_auth': binascii.hexlify(M)})
+
+
+def _get_soledad_info(username, provider, passphrase, basedir):
+ api_uri, api_version = _get_api_info(provider)
+ auth = _login(username, passphrase, provider, api_uri, api_version)
+ # get soledad server url
+ service_url = '%s/%s/config/soledad-service.json' % \
+ (api_uri, api_version)
+ soledad_hosts = requests.get(service_url, verify=False).json()['hosts']
+ hostnames = soledad_hosts.keys()
+ # allow for choosing the host
+ host = hostnames[0]
+ if len(hostnames) > 1:
+ i = 1
+ print "There are many available hosts:"
+ for h in hostnames:
+ print " (%d) %s.%s" % (i, h, provider)
+ i += 1
+ choice = raw_input("Choose a host to use (default: 1): ")
+ if choice != '':
+ host = hostnames[int(choice) - 1]
+ server_url = 'https://%s:%d/user-%s' % \
+ (soledad_hosts[host]['hostname'], soledad_hosts[host]['port'],
+ auth[2]['id'])
+ # get provider ca certificate
+ ca_cert = requests.get('https://%s/ca.crt' % provider, verify=False).text
+ cert_file = os.path.join(basedir, 'ca.crt')
+ with open(cert_file, 'w') as f:
+ f.write(ca_cert)
+ return auth[2]['id'], server_url, cert_file, auth[2]['token']
+
+
+def _get_passphrase(args):
+ passphrase = args.passphrase
+ if passphrase is None:
+ passphrase = getpass.getpass(
+ 'Password for %s@%s: ' % (args.username, args.provider))
+ return passphrase
+
+
+def _get_basedir(args):
+ basedir = args.basedir
+ if basedir is None:
+ basedir = tempfile.mkdtemp()
+ elif not os.path.isdir(basedir):
+ os.mkdir(basedir)
+ logger.info('Using %s as base directory.' % basedir)
+ return basedir
+
+
+@inlineCallbacks
+def _export_key(args, km, fname, private=False):
+ address = args.username + "@" + args.provider
+ pkey = yield km.get_key(
+ address, OpenPGPKey, private=private, fetch_remote=False)
+ with open(args.export_private_key, "w") as f:
+ f.write(pkey.key_data)
+
+
+@inlineCallbacks
+def _export_incoming_messages(soledad, directory):
+ yield soledad.create_index("by-incoming", "bool(incoming)")
+ docs = yield soledad.get_from_index("by-incoming", '1')
+ i = 1
+ for doc in docs:
+ with open(os.path.join(directory, "message_%d.gpg" % i), "w") as f:
+ f.write(doc.content["_enc_json"])
+ i += 1
+
+
+@inlineCallbacks
+def _get_all_docs(soledad):
+ _, docs = yield soledad.get_all_docs()
+ for doc in docs:
+ print json.dumps(doc.content, indent=4)
diff --git a/tests/helpers/couchdb_helper.rb b/tests/helpers/couchdb_helper.rb
new file mode 100644
index 00000000..b9085c1e
--- /dev/null
+++ b/tests/helpers/couchdb_helper.rb
@@ -0,0 +1,142 @@
+class LeapTest
+
+ #
+ # generates a couchdb url for when couchdb is running
+ # remotely and is available via stunnel.
+ #
+ # example properties:
+ #
+ # stunnel:
+ # clients:
+ # couch_client:
+ # couch1_5984:
+ # accept_port: 4000
+ # connect: couch1.bitmask.i
+ # connect_port: 15984
+ #
+ def couchdb_urls_via_stunnel(path="", options=nil)
+ path = path.gsub('"', '%22')
+ if options && options[:username] && options[:password]
+ userpart = "%{username}:%{password}@" % options
+ else
+ userpart = ""
+ end
+ assert_property('stunnel.clients.couch_client').values.collect do |stunnel_conf|
+ assert port = stunnel_conf['accept_port'], 'Field `accept_port` must be present in `stunnel` property.'
+ URLString.new("http://#{userpart}localhost:#{port}#{path}").tap {|url|
+ remote_ip_address = TCPSocket.gethostbyname(stunnel_conf['connect']).last
+ url.memo = "(via stunnel to %s:%s, aka %s)" % [stunnel_conf['connect'], stunnel_conf['connect_port'], remote_ip_address]
+ }
+ end
+ end
+
+ #
+ # generates a couchdb url for accessing couchdb via haproxy
+ #
+ # example properties:
+ #
+ # haproxy:
+ # couch:
+ # listen_port: 4096
+ # servers:
+ # panda:
+ # backup: false
+ # host: localhost
+ # port: 4000
+ # weight: 100
+ # writable: true
+ #
+ def couchdb_url_via_haproxy(path="", options=nil)
+ path = path.gsub('"', '%22')
+ if options && options[:username] && options[:password]
+ userpart = "%{username}:%{password}@" % options
+ else
+ userpart = ""
+ end
+ port = assert_property('haproxy.couch.listen_port')
+ return URLString.new("http://#{userpart}localhost:#{port}#{path}").tap { |url|
+ url.memo = '(via haproxy)'
+ }
+ end
+
+ #
+ # generates a couchdb url for when couchdb is running locally.
+ #
+ # example properties:
+ #
+ # couch:
+ # port: 5984
+ #
+ def couchdb_url_via_localhost(path="", options=nil)
+ path = path.gsub('"', '%22')
+ port = (options && options[:port]) || assert_property('couch.port')
+ if options && options[:username]
+ password = property("couch.users.%{username}.password" % options)
+ userpart = "%s:%s@" % [options[:username], password]
+ else
+ userpart = ""
+ end
+ return URLString.new("http://#{userpart}localhost:#{port}#{path}").tap { |url|
+ url.memo = '(via direct localhost connection)'
+ }
+ end
+
+ #
+ # returns a single url for accessing couchdb
+ #
+ def couchdb_url(path="", options=nil)
+ if property('couch.port')
+ couchdb_url_via_localhost(path, options)
+ elsif property('stunnel.clients.couch_client')
+ couchdb_urls_via_stunnel(path, options).first
+ end
+ end
+
+ #
+ # returns an array of urls for accessing couchdb
+ #
+ def couchdb_urls(path="", options=nil)
+ if property('couch.port')
+ [couchdb_url_via_localhost(path, options)]
+ elsif property('stunnel.clients.couch_client')
+ couchdb_urls_via_stunnel(path, options)
+ end
+ end
+
+ def assert_destroy_user_db(user_id, options=nil)
+ db_name = "user-#{user_id}"
+ url = couchdb_url("/#{db_name}", options)
+ http_options = {:ok_codes => [200, 404]} # ignore missing dbs
+ assert_delete(url, nil, http_options)
+ end
+
+ def assert_create_user_db(user_id, options=nil)
+ db_name = "user-#{user_id}"
+ url = couchdb_url("/#{db_name}", options)
+ http_options = {:ok_codes => [200, 404]} # ignore missing dbs
+ assert_put(url, nil, :format => :json) do |body|
+ assert response = JSON.parse(body), "PUT response should be JSON"
+ assert response["ok"], "PUT response should be OK"
+ end
+ end
+
+ #
+ # returns true if the per-user db created by soledad-server exists.
+ #
+ def user_db_exists?(user_id, options=nil)
+ db_name = "user-#{user_id}"
+ url = couchdb_url("/#{db_name}", options)
+ get(url) do |body, response, error|
+ if response.nil?
+ fail "could not query couchdb #{url}: #{error}\n#{body}"
+ elsif response.code.to_i == 200
+ return true
+ elsif response.code.to_i == 404
+ return false
+ else
+ fail ["could not query couchdb #{url}: expected response code 200 or 404, but got #{response.code}.", error, body].compact.join("\n")
+ end
+ end
+ end
+
+end \ No newline at end of file
diff --git a/tests/helpers/files_helper.rb b/tests/helpers/files_helper.rb
new file mode 100644
index 00000000..d6795889
--- /dev/null
+++ b/tests/helpers/files_helper.rb
@@ -0,0 +1,54 @@
+class LeapTest
+
+ #
+ # Matches the regexp in the file, and returns the first matched string (or fails if no match).
+ #
+ def file_match(filename, regexp)
+ if match = File.read(filename).match(regexp)
+ match.captures.first
+ else
+ fail "Regexp #{regexp.inspect} not found in file #{filename.inspect}."
+ end
+ end
+
+ #
+ # Matches the regexp in the file, and returns array of matched strings (or fails if no match).
+ #
+ def file_matches(filename, regexp)
+ if match = File.read(filename).match(regexp)
+ match.captures
+ else
+ fail "Regexp #{regexp.inspect} not found in file #{filename.inspect}."
+ end
+ end
+
+ #
+ # checks to make sure the given property path exists in $node (e.g. hiera.yaml)
+ # and returns the value
+ #
+ def assert_property(property)
+ latest = $node
+ property.split('.').each do |segment|
+ latest = latest[segment]
+ fail "Required node property `#{property}` is missing." if latest.nil?
+ end
+ return latest
+ end
+
+ #
+ # a handy function to get the value of a long property path
+ # without needing to test the existance individually of each part
+ # in the tree.
+ #
+ # e.g. property("stunnel.clients.couch_client")
+ #
+ def property(property)
+ latest = $node
+ property.split('.').each do |segment|
+ latest = latest[segment]
+ return nil if latest.nil?
+ end
+ return latest
+ end
+
+end \ No newline at end of file
diff --git a/tests/helpers/http_helper.rb b/tests/helpers/http_helper.rb
new file mode 100644
index 00000000..0d0bb7d5
--- /dev/null
+++ b/tests/helpers/http_helper.rb
@@ -0,0 +1,157 @@
+require 'net/http'
+
+class LeapTest
+
+ #
+ # In order to easily provide detailed error messages, it is useful
+ # to append a memo to a url string that details what this url is for
+ # (e.g. stunnel, haproxy, etc).
+ #
+ # So, the url happens to be a UrlString, the memo field is used
+ # if there is an error in assert_get.
+ #
+ class URLString < String
+ attr_accessor :memo
+ end
+
+ #
+ # aliases for http_send()
+ #
+ def get(url, params=nil, options=nil, &block)
+ http_send("GET", url, params, options, &block)
+ end
+ def delete(url, params=nil, options=nil, &block)
+ http_send("DELETE", url, params, options, &block)
+ end
+ def post(url, params=nil, options=nil, &block)
+ http_send("POST", url, params, options, &block)
+ end
+ def put(url, params=nil, options=nil, &block)
+ http_send("PUT", url, params, options, &block)
+ end
+
+ #
+ # send a GET, DELETE, POST, or PUT
+ # yields |body, response, error|
+ #
+ def http_send(method, url, params=nil, options=nil)
+ options ||= {}
+ response = nil
+
+ # build uri
+ uri = URI(url)
+ if params && (method == 'GET' || method == 'DELETE')
+ uri.query = URI.encode_www_form(params)
+ end
+
+ # build http
+ http = Net::HTTP.new uri.host, uri.port
+ if uri.scheme == 'https'
+ http.verify_mode = OpenSSL::SSL::VERIFY_NONE
+ http.use_ssl = true
+ end
+
+ # build request
+ request = build_request(method, uri, params, options)
+
+ # make http request
+ http.start do |agent|
+ response = agent.request(request)
+ yield response.body, response, nil
+ end
+ rescue => exc
+ yield nil, response, exc
+ end
+
+ #
+ # Aliases for assert_http_send()
+ #
+ def assert_get(url, params=nil, options=nil, &block)
+ assert_http_send("GET", url, params, options, &block)
+ end
+ def assert_delete(url, params=nil, options=nil, &block)
+ assert_http_send("DELETE", url, params, options, &block)
+ end
+ def assert_post(url, params=nil, options=nil, &block)
+ assert_http_send("POST", url, params, options, &block)
+ end
+ def assert_put(url, params=nil, options=nil, &block)
+ assert_http_send("PUT", url, params, options, &block)
+ end
+
+ #
+ # calls http_send, yielding results if successful or failing with
+ # descriptive info otherwise.
+ #
+ # options:
+ # - error_msg: custom error message to display.
+ # - ok_codes: in addition to 2xx, codes in this array will not produce an error.
+ #
+ def assert_http_send(method, url, params=nil, options=nil, &block)
+ options ||= {}
+ error_msg = options[:error_msg] || (url.respond_to?(:memo) ? url.memo : nil)
+ http_send(method, url, params, options) do |body, response, error|
+ if response
+ code = response.code.to_i
+ ok = code >= 200 && code < 300
+ if options[:ok_codes]
+ ok ||= options[:ok_codes].include?(code)
+ end
+ if ok
+ if block
+ yield(body) if block.arity == 1
+ yield(body, response) if block.arity == 2
+ yield(body, response, error) if block.arity == 3
+ end
+ else
+ fail ["Expected success code from #{method} #{url}, but got #{response.code} instead.", error_msg, body].compact.join("\n")
+ end
+ else
+ fail ["Expected a response from #{method} #{url}, but got \"#{error}\" instead.", error_msg, body].compact.join("\n"), error
+ end
+ end
+ end
+
+ #
+ # only a warning for now, should be a failure in the future
+ #
+ def assert_auth_fail(url, params)
+ uri = URI(url)
+ get(url, params) do |body, response, error|
+ unless response.code.to_s == "401"
+ warn "Expected a '401 Unauthorized' response, but got #{response.code} instead (GET #{uri.request_uri} with username '#{uri.user}')."
+ return false
+ end
+ end
+ true
+ end
+
+ private
+
+ def build_request(method, uri, params, options)
+ request = case method
+ when "GET" then Net::HTTP::Get.new(uri.request_uri)
+ when "DELETE" then Net::HTTP::Delete.new(uri.request_uri)
+ when "POST" then Net::HTTP::Post.new(uri.request_uri)
+ when "PUT" then Net::HTTP::Put.new(uri.request_uri)
+ end
+ if uri.user
+ request.basic_auth uri.user, uri.password
+ end
+ if params && (method == 'POST' || method == 'PUT')
+ if options[:format] == :json || options[:format] == 'json'
+ request["Content-Type"] = "application/json"
+ request.body = params.to_json
+ else
+ request.set_form_data(params) if params
+ end
+ end
+ if options[:headers]
+ options[:headers].each do |key, value|
+ request[key] = value
+ end
+ end
+ request
+ end
+
+end \ No newline at end of file
diff --git a/tests/helpers/network_helper.rb b/tests/helpers/network_helper.rb
new file mode 100644
index 00000000..713d57aa
--- /dev/null
+++ b/tests/helpers/network_helper.rb
@@ -0,0 +1,79 @@
+class LeapTest
+
+ #
+ # tcp connection helper with timeout
+ #
+ def try_tcp_connect(host, port, timeout = 5)
+ addr = Socket.getaddrinfo(host, nil)
+ sockaddr = Socket.pack_sockaddr_in(port, addr[0][3])
+
+ Socket.new(Socket.const_get(addr[0][0]), Socket::SOCK_STREAM, 0).tap do |socket|
+ socket.setsockopt(Socket::IPPROTO_TCP, Socket::TCP_NODELAY, 1)
+ begin
+ socket.connect_nonblock(sockaddr)
+ rescue IO::WaitReadable
+ if IO.select([socket], nil, nil, timeout) == nil
+ raise "Connection timeout"
+ else
+ socket.connect_nonblock(sockaddr)
+ end
+ rescue IO::WaitWritable
+ if IO.select(nil, [socket], nil, timeout) == nil
+ raise "Connection timeout"
+ else
+ socket.connect_nonblock(sockaddr)
+ end
+ end
+ return socket
+ end
+ end
+
+ def try_tcp_write(socket, timeout = 5)
+ begin
+ socket.write_nonblock("\0")
+ rescue IO::WaitReadable
+ if IO.select([socket], nil, nil, timeout) == nil
+ raise "Write timeout"
+ else
+ retry
+ end
+ rescue IO::WaitWritable
+ if IO.select(nil, [socket], nil, timeout) == nil
+ raise "Write timeout"
+ else
+ retry
+ end
+ end
+ end
+
+ def try_tcp_read(socket, timeout = 5)
+ begin
+ socket.read_nonblock(1)
+ rescue IO::WaitReadable
+ if IO.select([socket], nil, nil, timeout) == nil
+ raise "Read timeout"
+ else
+ retry
+ end
+ rescue IO::WaitWritable
+ if IO.select(nil, [socket], nil, timeout) == nil
+ raise "Read timeout"
+ else
+ retry
+ end
+ end
+ end
+
+ def assert_tcp_socket(host, port, msg=nil)
+ begin
+ socket = try_tcp_connect(host, port, 1)
+ #try_tcp_write(socket,1)
+ #try_tcp_read(socket,1)
+ rescue StandardError => exc
+ fail ["Failed to open socket #{host}:#{port}", exc, msg].compact.join("\n")
+ ensure
+ socket.close if socket
+ end
+ end
+
+end \ No newline at end of file
diff --git a/tests/helpers/os_helper.rb b/tests/helpers/os_helper.rb
new file mode 100644
index 00000000..9923d5b1
--- /dev/null
+++ b/tests/helpers/os_helper.rb
@@ -0,0 +1,41 @@
+class LeapTest
+
+ #
+ # works like pgrep command line
+ # return an array of hashes like so [{:pid => "1234", :process => "ls"}]
+ #
+ def pgrep(match)
+ output = `pgrep --full --list-name '#{match}'`
+ output.each_line.map{|line|
+ pid = line.split(' ')[0]
+ process = line.gsub(/(#{pid} |\n)/, '')
+ # filter out pgrep cmd itself
+ # on wheezy hosts, the "process" var contains the whole cmd including all parameters
+ # on jessie hosts, it only contains the first cmd (which is the default sheel invoked by 'sh')
+ if process =~ /^sh/
+ nil
+ else
+ {:pid => pid, :process => process}
+ end
+ }.compact
+ end
+
+ def assert_running(process, options={})
+ processes = pgrep(process)
+ assert processes.any?, "No running process for #{process}"
+ if options[:single]
+ assert processes.length == 1, "More than one process for #{process}"
+ end
+ end
+
+ #
+ # runs the specified command, failing on a non-zero exit status.
+ #
+ def assert_run(command)
+ output = `#{command} 2>&1`
+ if $?.exitstatus != 0
+ fail "Error running `#{command}`:\n#{output}"
+ end
+ end
+
+end \ No newline at end of file
diff --git a/tests/helpers/smtp_helper.rb b/tests/helpers/smtp_helper.rb
new file mode 100644
index 00000000..ea7fb9fa
--- /dev/null
+++ b/tests/helpers/smtp_helper.rb
@@ -0,0 +1,45 @@
+require 'net/smtp'
+
+class LeapTest
+
+ TEST_EMAIL_USER = "test_user_email"
+ TEST_BAD_USER = "test_user_bad"
+
+ MSG_BODY = %(Since it seems that any heart which beats for freedom has the right only to a
+lump of lead, I too claim my share. If you let me live, I shall never stop
+crying for revenge and I shall avenge my brothers. I have finished. If you are
+not cowards, kill me!
+
+--Louise Michel)
+
+ def send_email(recipient, options={})
+ sender = options[:sender] || recipient
+ helo_domain = property('domain.full_suffix')
+ headers = {
+ "Date" => Time.now.utc,
+ "From" => sender,
+ "To" => recipient,
+ "Subject" => "Test Message",
+ "X-LEAP-TEST" => "true"
+ }.merge(options[:headers]||{})
+ message = []
+ headers.each do |key, value|
+ message << "#{key}: #{value}"
+ end
+ message << ""
+ message << MSG_BODY
+ Net::SMTP.start('localhost', 25, helo_domain) do |smtp|
+ smtp.send_message message.join("\n"), recipient, sender
+ end
+ end
+
+ def assert_send_email(recipient, options={})
+ begin
+ send_email(recipient, options)
+ rescue IOError, Net::OpenTimeout,
+ Net::ReadTimeout, Net::SMTPError => e
+ fail "Could not send mail to #{recipient} (#{e})"
+ end
+ end
+
+end \ No newline at end of file
diff --git a/tests/helpers/soledad_sync.py b/tests/helpers/soledad_sync.py
new file mode 100755
index 00000000..f4fc81ae
--- /dev/null
+++ b/tests/helpers/soledad_sync.py
@@ -0,0 +1,89 @@
+#!/usr/bin/env python
+"""
+soledad_sync.py
+
+This script exercises soledad synchronization.
+Its exit code is 0 if the sync took place correctly, 1 otherwise.
+
+It takes 5 arguments:
+
+ uuid: uuid of the user to sync
+ token: a valid session token
+ server: the url of the soledad server we should connect to
+ cert_file: the file containing the certificate for the CA that signed the
+ cert for the soledad server.
+ password: the password for the user to sync
+
+__author__: kali@leap.se
+"""
+import os
+import shutil
+import sys
+import tempfile
+
+# This is needed because the twisted shipped with wheezy is too old
+# to do proper ssl verification.
+os.environ['SKIP_TWISTED_SSL_CHECK'] = '1'
+
+from twisted.internet import defer, reactor
+from twisted.python import log
+
+from client_side_db import get_soledad_instance
+from leap.common.events import flags
+
+flags.set_events_enabled(False)
+
+NUMDOCS = 1
+USAGE = "Usage: %s uuid token server cert_file password" % sys.argv[0]
+
+
+def bail(msg, exitcode):
+ print "[!] %s" % msg
+ sys.exit(exitcode)
+
+
+def create_docs(soledad):
+ """
+ Populates the soledad database with dummy messages, so we can exercise
+ sending payloads during the sync.
+ """
+ deferreds = []
+ for index in xrange(NUMDOCS):
+ deferreds.append(soledad.create_doc({'payload': 'dummy'}))
+ return defer.gatherResults(deferreds)
+
+# main program
+
+if __name__ == '__main__':
+
+ tempdir = tempfile.mkdtemp()
+
+ def rm_tempdir():
+ shutil.rmtree(tempdir)
+
+ if len(sys.argv) < 6:
+ bail(USAGE, 2)
+
+ uuid, token, server, cert_file, passphrase = sys.argv[1:]
+ s = get_soledad_instance(
+ uuid, passphrase, tempdir, server, cert_file, token)
+
+ def onSyncDone(sync_result):
+ print "SYNC_RESULT:", sync_result
+ s.close()
+ rm_tempdir()
+ reactor.stop()
+
+ def log_and_exit(f):
+ log.err(f)
+ rm_tempdir()
+ reactor.stop()
+
+ def start_sync():
+ d = create_docs(s)
+ d.addCallback(lambda _: s.sync())
+ d.addCallback(onSyncDone)
+ d.addErrback(log_and_exit)
+
+ reactor.callWhenRunning(start_sync)
+ reactor.run()
diff --git a/tests/helpers/srp_helper.rb b/tests/helpers/srp_helper.rb
new file mode 100644
index 00000000..b30fa768
--- /dev/null
+++ b/tests/helpers/srp_helper.rb
@@ -0,0 +1,171 @@
+#
+# Here are some very stripped down helper methods for SRP, useful only for
+# testing the client side.
+#
+
+require 'digest'
+require 'openssl'
+require 'securerandom'
+require 'base64'
+
+module SRP
+
+ ##
+ ## UTIL
+ ##
+
+ module Util
+ PRIME_N = <<-EOS.split.join.hex
+115b8b692e0e045692cf280b436735c77a5a9e8a9e7ed56c965f87db5b2a2ece3
+ EOS
+ BIG_PRIME_N = <<-EOS.split.join.hex # 1024 bits modulus (N)
+eeaf0ab9adb38dd69c33f80afa8fc5e86072618775ff3c0b9ea2314c9c25657
+6d674df7496ea81d3383b4813d692c6e0e0d5d8e250b98be48e495c1d6089da
+d15dc7d7b46154d6b6ce8ef4ad69b15d4982559b297bcf1885c529f566660e5
+7ec68edbc3c05726cc02fd4cbf4976eaa9afd5138fe8376435b9fc61d2fc0eb
+06e3
+ EOS
+ GENERATOR = 2 # g
+
+ def hn_xor_hg
+ byte_xor_hex(sha256_int(BIG_PRIME_N), sha256_int(GENERATOR))
+ end
+
+ # a^n (mod m)
+ def modpow(a, n, m = BIG_PRIME_N)
+ r = 1
+ while true
+ r = r * a % m if n[0] == 1
+ n >>= 1
+ return r if n == 0
+ a = a * a % m
+ end
+ end
+
+ # Hashes the (long) int args
+ def sha256_int(*args)
+ sha256_hex(*args.map{|a| "%02x" % a})
+ end
+
+ # Hashes the hex args
+ def sha256_hex(*args)
+ h = args.map{|a| a.length.odd? ? "0#{a}" : a }.join('')
+ sha256_str([h].pack('H*'))
+ end
+
+ def sha256_str(s)
+ Digest::SHA2.hexdigest(s)
+ end
+
+ def bigrand(bytes)
+ OpenSSL::Random.random_bytes(bytes).unpack("H*")[0]
+ end
+
+ def multiplier
+ @muliplier ||= calculate_multiplier
+ end
+
+ protected
+
+ def calculate_multiplier
+ sha256_int(BIG_PRIME_N, GENERATOR).hex
+ end
+
+ def byte_xor_hex(a, b)
+ a = [a].pack('H*')
+ b = [b].pack('H*')
+ a.bytes.each_with_index.map do |a_byte, i|
+ (a_byte ^ (b[i].ord || 0)).chr
+ end.join
+ end
+ end
+
+ ##
+ ## SESSION
+ ##
+
+ class Session
+ include SRP::Util
+ attr_accessor :user
+ attr_accessor :bb
+
+ def initialize(user, aa=nil)
+ @user = user
+ @a = bigrand(32).hex
+ end
+
+ def m
+ @m ||= sha256_hex(n_xor_g_long, login_hash, @user.salt.to_s(16), aa, bb, k)
+ end
+
+ def aa
+ @aa ||= modpow(GENERATOR, @a).to_s(16) # A = g^a (mod N)
+ end
+
+ protected
+
+ # client: K = H( (B - kg^x) ^ (a + ux) )
+ def client_secret
+ base = bb.hex
+ base -= modpow(GENERATOR, @user.private_key) * multiplier
+ base = base % BIG_PRIME_N
+ modpow(base, @user.private_key * u.hex + @a)
+ end
+
+ def k
+ @k ||= sha256_int(client_secret)
+ end
+
+ def n_xor_g_long
+ @n_xor_g_long ||= hn_xor_hg.bytes.map{|b| "%02x" % b.ord}.join
+ end
+
+ def login_hash
+ @login_hash ||= sha256_str(@user.username)
+ end
+
+ def u
+ @u ||= sha256_hex(aa, bb)
+ end
+ end
+
+ ##
+ ## Dummy USER
+ ##
+
+ class User
+ include SRP::Util
+
+ attr_accessor :username, :password, :salt, :verifier, :id, :session_token, :ok, :deleted
+
+ def initialize(username=nil)
+ @username = username || "tmp_user_" + SecureRandom.urlsafe_base64(10).downcase.gsub(/[_-]/, '')
+ @password = "password_" + SecureRandom.urlsafe_base64(10)
+ @salt = bigrand(4).hex
+ @verifier = modpow(GENERATOR, private_key)
+ @ok = false
+ @deleted = false
+ end
+
+ def private_key
+ @private_key ||= calculate_private_key
+ end
+
+ def to_params
+ {
+ 'user[login]' => @username,
+ 'user[password_verifier]' => @verifier.to_s(16),
+ 'user[password_salt]' => @salt.to_s(16)
+ }
+ end
+
+ private
+
+ def calculate_private_key
+ shex = '%x' % [@salt]
+ inner = sha256_str([@username, @password].join(':'))
+ sha256_hex(shex, inner).hex
+ end
+ end
+
+end
diff --git a/tests/order.rb b/tests/order.rb
new file mode 100644
index 00000000..14aad9be
--- /dev/null
+++ b/tests/order.rb
@@ -0,0 +1,22 @@
+class LeapCli::Config::Node
+ #
+ # returns a list of node names that should be tested before this node.
+ # make sure to not return ourselves (please no dependency loops!).
+ #
+ # NOTE: this method determines the order that nodes are tested in. To specify
+ # the order of tests on a particular node, each test can call class method
+ # LeapTest.depends_on().
+ #
+ def test_dependencies
+ dependents = LeapCli::Config::ObjectList.new
+
+ # webapp, mx, and soledad depend on couchdb nodes
+ if services.include?('webapp') || services.include?('mx') || services.include?('soledad')
+ if !services.include?('couchdb')
+ dependents.merge! nodes_like_me[:services => 'couchdb']
+ end
+ end
+
+ dependents.keys.delete_if {|name| self.name == name}
+ end
+end \ No newline at end of file
diff --git a/tests/white-box/couchdb.rb b/tests/white-box/couchdb.rb
new file mode 100644
index 00000000..85dc6840
--- /dev/null
+++ b/tests/white-box/couchdb.rb
@@ -0,0 +1,186 @@
+raise SkipTest unless service?(:couchdb)
+
+require 'json'
+
+class CouchDB < LeapTest
+ depends_on "Network"
+
+ def setup
+ end
+
+ def test_00_Are_daemons_running?
+ assert_running 'bin/beam'
+ if multimaster?
+ assert_running 'bin/epmd'
+ end
+ pass
+ end
+
+ #
+ # check to make sure we can get welcome response from local couchdb
+ #
+ def test_01_Is_CouchDB_running?
+ assert_get(couchdb_url) do |body|
+ assert_match /"couchdb":"Welcome"/, body, "Could not get welcome message from #{couchdb_url}. Probably couchdb is not running."
+ end
+ pass
+ end
+
+ #
+ # compare the configured nodes to the nodes that are actually listed in bigcouch
+ #
+ def test_02_Is_cluster_membership_ok?
+ return unless multimaster?
+ url = couchdb_backend_url("/nodes/_all_docs")
+ neighbors = assert_property('couch.bigcouch.neighbors')
+ neighbors << assert_property('domain.full')
+ neighbors.sort!
+ assert_get(url) do |body|
+ response = JSON.parse(body)
+ nodes_in_db = response['rows'].collect{|row| row['id'].sub(/^bigcouch@/, '')}.sort
+ assert_equal neighbors, nodes_in_db, "The couchdb replication node list is wrong (/nodes/_all_docs)"
+ end
+ pass
+ end
+
+ #
+ # all configured nodes are in 'cluster_nodes'
+ # all nodes online and communicating are in 'all_nodes'
+ #
+ # this seems backward to me, so it might be the other way around.
+ #
+ def test_03_Are_configured_nodes_online?
+ return unless multimaster?
+ url = couchdb_url("/_membership", :username => 'admin')
+ assert_get(url) do |body|
+ response = JSON.parse(body)
+ nodes_configured_but_not_available = response['cluster_nodes'] - response['all_nodes']
+ nodes_available_but_not_configured = response['all_nodes'] - response['cluster_nodes']
+ if nodes_configured_but_not_available.any?
+ warn "These nodes are configured but not available:", nodes_configured_but_not_available
+ end
+ if nodes_available_but_not_configured.any?
+ warn "These nodes are available but not configured:", nodes_available_but_not_configured
+ end
+ if response['cluster_nodes'] == response['all_nodes']
+ pass
+ end
+ end
+ end
+
+ def test_04_Do_ACL_users_exist?
+ acl_users = ['_design/_auth', 'leap_mx', 'nickserver', 'soledad', 'webapp', 'replication']
+ url = couchdb_backend_url("/_users/_all_docs", :username => 'admin')
+ assert_get(url) do |body|
+ response = JSON.parse(body)
+ assert_equal acl_users.count, response['total_rows']
+ actual_users = response['rows'].map{|row| row['id'].sub(/^org.couchdb.user:/, '') }
+ assert_equal acl_users.sort, actual_users.sort
+ end
+ pass
+ end
+
+ def test_05_Do_required_databases_exist?
+ dbs_that_should_exist = ["customers","identities","keycache","shared","tickets","users", "tmp_users"]
+ dbs_that_should_exist << "tokens_#{rotation_suffix}"
+ dbs_that_should_exist << "sessions_#{rotation_suffix}"
+ dbs_that_should_exist.each do |db_name|
+ url = couchdb_url("/"+db_name, :username => 'admin')
+ assert_get(url) do |body|
+ assert response = JSON.parse(body)
+ assert_equal db_name, response['db_name']
+ end
+ end
+ pass
+ end
+
+ # disable ACL enforcement, because it's a known issue with bigcouch
+ # and will only confuse the user
+ # see https://leap.se/code/issues/6030 for more details
+ #
+ ## for now, this just prints warnings, since we are failing these tests.
+ ##
+
+ #def test_06_Is_ACL_enforced?
+ # ok = assert_auth_fail(
+ # couchdb_url('/users/_all_docs', :username => 'leap_mx'),
+ # {:limit => 1}
+ # )
+ # ok = assert_auth_fail(
+ # couchdb_url('/users/_all_docs', :username => 'leap_mx'),
+ # {:limit => 1}
+ # ) && ok
+ # pass if ok
+ #end
+
+ def test_07_Can_records_be_created?
+ record = DummyRecord.new
+ url = couchdb_url("/tokens_#{rotation_suffix}", :username => 'admin')
+ assert_post(url, record, :format => :json) do |body|
+ assert response = JSON.parse(body), "POST response should be JSON"
+ assert response["ok"], "POST response should be OK"
+ assert_delete(File.join(url, response["id"]), :rev => response["rev"]) do |body|
+ assert response = JSON.parse(body), "DELETE response should be JSON"
+ assert response["ok"], "DELETE response should be OK"
+ end
+ end
+ pass
+ end
+
+ #
+ # This is not really a "test", just an attempt to make sure that
+ # the mx tests that fire off dummy emails don't fill up the
+ # storage db.
+ #
+ # mx tests can't run this because they don't have access to
+ # the storage db.
+ #
+ # This "test" is responsible for both creating the db if it does not
+ # exist, and destroying if it does.
+ #
+ # Yes, this is super hacky. Properly, we should add something to
+ # the soledad api to support create/delete of user storage dbs.
+ #
+ def test_99_Delete_mail_storage_used_in_mx_tests
+ user = find_user_by_login(TEST_EMAIL_USER)
+ if user
+ if user_db_exists?(user["id"])
+ # keep the test email db from filling up:
+ assert_destroy_user_db(user["id"], :username => 'admin')
+ end
+ # either way, make sure we leave a db for the mx tests:
+ assert_create_user_db(user["id"], :username => 'admin')
+ end
+ silent_pass
+ end
+
+ private
+
+ def multimaster?
+ mode == "multimaster"
+ end
+
+ def mode
+ assert_property('couch.mode')
+ end
+
+ # TODO: admin port is hardcoded for now but should be configurable.
+ def couchdb_backend_url(path="", options={})
+ options = {port: multimaster? && "5986"}.merge options
+ couchdb_url(path, options)
+ end
+
+ def rotation_suffix
+ rotation_suffix = Time.now.utc.to_i / 2592000 # monthly
+ end
+
+ require 'securerandom'
+ require 'digest/sha2'
+ class DummyRecord < Hash
+ def initialize
+ self['data'] = SecureRandom.urlsafe_base64(32).gsub(/^_*/, '')
+ self['_id'] = Digest::SHA512.hexdigest(self['data'])
+ end
+ end
+
+end
diff --git a/tests/white-box/dummy.rb b/tests/white-box/dummy.rb
new file mode 100644
index 00000000..a3e8ad68
--- /dev/null
+++ b/tests/white-box/dummy.rb
@@ -0,0 +1,71 @@
+# only run in the dummy case where there is no hiera.yaml file.
+raise SkipTest unless $node["dummy"]
+
+class Robot
+ def can_shoot_lasers?
+ "OHAI!"
+ end
+
+ def can_fly?
+ "YES!"
+ end
+end
+
+class TestDummy < LeapTest
+ def setup
+ @robot = Robot.new
+ end
+
+ def test_lasers
+ assert_equal "OHAI!", @robot.can_shoot_lasers?
+ pass
+ end
+
+ def test_fly
+ refute_match /^no/i, @robot.can_fly?
+ pass
+ end
+
+ def test_fail
+ fail "fail"
+ pass
+ end
+
+ def test_01_will_be_skipped
+ skip "test this later"
+ pass
+ end
+
+ def test_socket_failure
+ assert_tcp_socket('localhost', 900000)
+ pass
+ end
+
+ def test_warn
+ block_test do
+ warn "not everything", "is a success or failure"
+ end
+ end
+
+ # used to test extracting the proper caller even when in a block
+ def block_test
+ yield
+ end
+
+ def test_socket_success
+ fork {
+ Socket.tcp_server_loop('localhost', 12345) do |sock, client_addrinfo|
+ begin
+ sock.write('hi')
+ ensure
+ sock.close
+ exit
+ end
+ end
+ }
+ sleep 0.2
+ assert_tcp_socket('localhost', 12345)
+ pass
+ end
+
+end
diff --git a/tests/white-box/mx.rb b/tests/white-box/mx.rb
new file mode 100644
index 00000000..e0cb273a
--- /dev/null
+++ b/tests/white-box/mx.rb
@@ -0,0 +1,267 @@
+raise SkipTest unless service?(:mx)
+
+require 'date'
+require 'json'
+require 'net/smtp'
+
+class Mx < LeapTest
+ depends_on "Network"
+ depends_on "Webapp" if service?(:webapp)
+
+ def setup
+ end
+
+ def test_01_Can_contact_couchdb?
+ dbs = ["identities"]
+ dbs.each do |db_name|
+ couchdb_urls("/"+db_name, couch_url_options).each do |url|
+ assert_get(url) do |body|
+ assert response = JSON.parse(body)
+ assert_equal db_name, response['db_name']
+ end
+ end
+ end
+ pass
+ end
+
+ def test_02_Can_contact_couchdb_via_haproxy?
+ if property('haproxy.couch')
+ url = couchdb_url_via_haproxy("", couch_url_options)
+ assert_get(url) do |body|
+ assert_match /"couchdb":"Welcome"/, body, "Request to #{url} should return couchdb welcome message."
+ end
+ pass
+ end
+ end
+
+ #
+ # this test picks a random identity document, then queries
+ # using the by_address view for that same document again.
+ #
+ def test_03_Can_query_identities_db?
+ ident = pick_random_identity
+ address = ident['address']
+ url_base = %(/identities/_design/Identity/_view/by_address)
+ params = %(?include_docs=true&reduce=false&startkey="#{address}"&endkey="#{address}")
+ assert_get(couchdb_url(url_base+params, couch_url_options)) do |body|
+ assert response = JSON.parse(body)
+ assert record = response['rows'].first
+ assert_equal address, record['doc']['address']
+ pass
+ end
+ end
+
+ def test_04_Are_MX_daemons_running?
+ assert_running '.*/usr/bin/twistd.*mx.tac'
+ assert_running '^/usr/lib/postfix/master$'
+ assert_running '^/usr/sbin/postfwd'
+ assert_running 'postfwd2::cache$'
+ assert_running 'postfwd2::policy$'
+ assert_running '^/usr/sbin/unbound$'
+ assert_running '^/usr/bin/freshclam'
+ assert_running '^/usr/sbin/opendkim'
+ if Dir.glob("/var/lib/clamav/main.{c[vl]d,inc}").size > 0 and Dir.glob("/var/lib/clamav/daily.{c[vl]d,inc}").size > 0
+ assert_running '^/usr/sbin/clamd'
+ assert_running '^/usr/sbin/clamav-milter'
+ else
+ skip "Downloading the clamav signature files (/var/lib/clamav/{daily,main}.{c[vl]d,inc}) is still in progress, so clamd is not running.\nDon't worry, mail delivery will work without clamav. The download should finish soon."
+ end
+ pass
+ end
+
+ #
+ # TODO: test to make sure postmap returned the right result
+ #
+ def test_05_Can_postfix_query_leapmx?
+ ident = pick_random_identity(10, :with_public_key => true)
+ address = ident["address"]
+
+ #
+ # virtual alias map:
+ #
+ # user@domain => 41c29a80a44f4775513c64ac9cab91b9@deliver.local
+ #
+ assert_run("postmap -v -q \"#{address}\" tcp:localhost:4242")
+
+ #
+ # recipient access map:
+ #
+ # user@domain => [OK|REJECT|TEMP_FAIL]
+ #
+ # This map is queried by the mail server before delivery to the mail spool
+ # directory, and should check if the address is able to receive messages.
+ # Examples of reasons for denying delivery would be that the user is out of
+ # quota, is user, or have no pgp public key in the server.
+ #
+ # NOTE: in the future, when we support quota, we need to make sure that
+ # we don't randomly pick a user for this test that happens to be over quota.
+ #
+ assert_run("postmap -v -q \"#{address}\" tcp:localhost:2244")
+
+ #
+ # certificate validity map:
+ #
+ # fa:2a:70:1f:d8:16:4e:1a:3b:15:c1:67:00:f0 => [200|500]
+ #
+ # Determines whether a particular SMTP client cert is authorized
+ # to relay mail, based on the fingerprint.
+ #
+ if ident["cert_fingerprints"]
+ not_expired = ident["cert_fingerprints"].select {|key, value|
+ Time.now.utc < DateTime.strptime("2016-01-03", "%F").to_time.utc
+ }
+ if not_expired.any?
+ fingerprint = not_expired.first
+ assert_run("postmap -v -q #{fingerprint} tcp:localhost:2424")
+ end
+ end
+
+ pass
+ end
+
+ #
+ # The email sent by this test might get bounced back.
+ # In this case, the test will pass, but the bounce message will
+ # get sent to root, so the sysadmin will still figure out pretty
+ # quickly that something is wrong.
+ #
+ def test_06_Can_deliver_email?
+ addr = [TEST_EMAIL_USER, property('domain.full_suffix')].join('@')
+ bad_addr = [TEST_BAD_USER, property('domain.full_suffix')].join('@')
+
+ assert !identity_exists?(bad_addr), "the address #{bad_addr} must not exist."
+ if !identity_exists?(addr)
+ user = assert_create_user(TEST_EMAIL_USER, :monitor)
+ upload_public_key(user.id, TEST_EMAIL_PUBLIC_KEY)
+ end
+ assert identity_exists?(addr), "The identity #{addr} should have been created, but it doesn't exist yet."
+ assert_send_email(addr)
+ assert_raises(Net::SMTPError) do
+ send_email(bad_addr)
+ end
+ pass
+ end
+
+ private
+
+ def couch_url_options
+ {
+ :username => property('couchdb_leap_mx_user.username'),
+ :password => property('couchdb_leap_mx_user.password')
+ }
+ end
+
+ #
+ # returns a random identity record that also has valid address
+ # and destination fields.
+ #
+ # options:
+ #
+ # * :with_public_key -- searches only for identities with public keys
+ #
+ # note to self: for debugging, here is the curl you want:
+ # curl --netrc "127.0.0.1:5984/identities/_design/Identity/_view/by_address?startkey=\"xxxx@leap.se\"&endkey=\"xxxx@leap.se\"&reduce=false&include_docs=true"
+ #
+ def pick_random_identity(tries=5, options={})
+ assert_get(couchdb_url("/identities", couch_url_options)) do |body|
+ assert response = JSON.parse(body)
+ doc_count = response['doc_count'].to_i
+ if doc_count <= 1
+ # the design document counts as one document.
+ skip "There are no identity documents yet."
+ else
+ # try repeatedly to get a valid doc
+ for i in 1..tries
+ offset = rand(doc_count) # pick a random document
+ url = couchdb_url("/identities/_all_docs?include_docs=true&limit=1&skip=#{offset}", couch_url_options)
+ assert_get(url) do |body|
+ assert response = JSON.parse(body)
+ record = response['rows'].first
+ if record['id'] =~ /_design/
+ next
+ elsif record['doc'] && record['doc']['address']
+ next if record['doc']['destination'].nil? || record['doc']['destination'].empty?
+ next if options[:with_public_key] && !record_has_key?(record)
+ return record['doc']
+ else
+ fail "Identity document #{record['id']} is missing an address field. #{record['doc'].inspect}"
+ end
+ end
+ end
+ if options[:with_public_key]
+ skip "Could not find an Identity document with a public key for testing."
+ else
+ fail "Failed to find a valid Identity document (with address and destination)."
+ end
+ end
+ end
+ end
+
+ def record_has_key?(record)
+ !record['doc']['keys'].nil? &&
+ !record['doc']['keys'].empty? &&
+ !record['doc']['keys']['pgp'].nil? &&
+ !record['doc']['keys']['pgp'].empty?
+ end
+
+ TEST_EMAIL_PUBLIC_KEY=<<HERE
+-----BEGIN PGP PUBLIC KEY BLOCK-----
+mI0EVvzIKQEEAN4f8FOGntJGTTD+fFUQS6y/ihn6tYLtyGZZbCOd0t/9kHt/raoR
+xEUks8rCOPMqHX+yeHsvDBtDyZYTvyhtfuWrBUbYGW+QZ4Pdvo+7NyLHPW0dKsCB
+Czrx7pxqpq1oq+LpUFqpSfjJTfYaGVDNXrPK144a7Rox2+MCbgq3twnFABEBAAG0
+EiA8dGVzdF91c2VyX2VtYWlsPoi4BBMBAgAiBQJW/MgpAhsvBgsJCAcDAgYVCAIJ
+CgsEFgIDAQIeAQIXgAAKCRAqYf65XmeSk0orBADUXjEiGnjzyBpXqaiVmJr4MyfP
+IfKTK4a+4qvR+2fseD7hteF98m26i1YRI5omLp4/MnxGSpgKFKIuWIdkEiLg7IJc
+pFZVdoDVufEtzbj9gmOHlnteksbCtuESyB0Hytsba4uS9afcTJdGiPNMHeniI/SY
+UKcCcIrQmpNIoOA5OLiNBFb8yCkBBAC+WMUQ+FC6GQ+pyaWlwTRsBAT4+Tp8w9jD
+7PK4xeEmVZDirP0VkW18UeQEueWJ63ia7wIGf1WyVH1tbvgVyRLsjT2cpKo8c6Ok
+NkhfGfjTnUJPeBNy8734UDIdqZLXJl0z6Z1R0CfOjBqvV25kWUvMkz/NEgZBhE+c
+m3JuZy1k7QARAQABiQE9BBgBAgAJBQJW/MgpAhsuAKgJECph/rleZ5KTnSAEGQEC
+AAYFAlb8yCkACgkQsJSYitQUOv4w1wQAn3atI5EsmRyw6iC6UVWWJv/lKi1Priyt
+DsrdH5xUmHUgp6VU8Pw9Y6G+sv50KLfbVQ1l+8/3B71TjadsOxh+PBPsEyYpK6WX
+TVGy44IDvFWGyOod8tmfcFN9IpU5DmSk/vny9G7RK/nbnta2VnfZOzwm5i3cNkPr
+FGPL1z0K3qs0VwP+M7BXdqBRSFDDBpG1J0TrZioEjvKeOsT/Ul8mbVt7HQpcN93I
+wTO4uky0Woy2nb7SbTQw6wOpU54u7+5dSQ03ltUHg1owy6Y3CMOeFL+e9ALpAZAU
+aMwY7zMFhqlPVZZMfdMLRsdLin67RIM+OJ6A925AM52bEQT1YwkQlP4mvQY=
+=qclE
+-----END PGP PUBLIC KEY BLOCK-----
+HERE
+
+ TEST_EMAIL_PRIVATE_KEY = <<HERE
+-----BEGIN PGP PRIVATE KEY BLOCK-----
+lQHYBFb8yCkBBADeH/BThp7SRk0w/nxVEEusv4oZ+rWC7chmWWwjndLf/ZB7f62q
+EcRFJLPKwjjzKh1/snh7LwwbQ8mWE78obX7lqwVG2BlvkGeD3b6Puzcixz1tHSrA
+gQs68e6caqataKvi6VBaqUn4yU32GhlQzV6zyteOGu0aMdvjAm4Kt7cJxQARAQAB
+AAP8DTFfcE6UG1AioJDU6KZ9oCaGONHLuxmNaArSofDrR/ODA9rLAUlp22N5LEdJ
+46NyOhXrEwHx2aK2k+vbVDbgrP4ZTH7GxIK/2KzmH4zX0fWUNsaRy94Q12lJegXH
+sH2Im8Jjxu16YwGgFNTX1fCPqLB6WdQpf1796s6+/3PnCDcCAOXTCul3N7V5Yl+9
+N2Anupn+qNDXKT/kiKIZLHsMbo7EriGWReG3lLj1cOJPC6Nf0uOEri4ErSjFEadR
+F2TNITsCAPdsZjc5RGppUXyBfxhQkAnZ0r+UT2meCH3g3EVh3W9SBrXNhwipNpW3
+bPzRjUCDtmA8EOvd93oPCZv4/tb50P8B/jC+QIZ3GncP1CFPSVDoIZ7OUU5M1330
+DP77vG1GxeQvYO/hlxL5/KdtTR6m5zlIuooDxUaNJz1w5/oVjlG3NZKpl7QSIDx0
+ZXN0X3VzZXJfZW1haWw+iLgEEwECACIFAlb8yCkCGy8GCwkIBwMCBhUIAgkKCwQW
+AgMBAh4BAheAAAoJECph/rleZ5KTSisEANReMSIaePPIGlepqJWYmvgzJ88h8pMr
+hr7iq9H7Z+x4PuG14X3ybbqLVhEjmiYunj8yfEZKmAoUoi5Yh2QSIuDsglykVlV2
+gNW58S3NuP2CY4eWe16SxsK24RLIHQfK2xtri5L1p9xMl0aI80wd6eIj9JhQpwJw
+itCak0ig4Dk4nQHYBFb8yCkBBAC+WMUQ+FC6GQ+pyaWlwTRsBAT4+Tp8w9jD7PK4
+xeEmVZDirP0VkW18UeQEueWJ63ia7wIGf1WyVH1tbvgVyRLsjT2cpKo8c6OkNkhf
+GfjTnUJPeBNy8734UDIdqZLXJl0z6Z1R0CfOjBqvV25kWUvMkz/NEgZBhE+cm3Ju
+Zy1k7QARAQABAAP9HrUaGvdpqTwVx3cHyXUhId6GzCuuKyaP4mZoGeBCcaQS2vQR
+YtiykwBwX/AlfwSFJmmHKB6EErWIA+QyaEFR/fO56cHD2TY3Ql0BGcuHIx3+9pkp
+biPBZdiiGz7oa6k6GWsbKSksqwV8poSXV7qbn+Bjm2xCM4VnjNZIrFtL7fkCAMOf
+e9yHBFoXfc175bkNXEUXrNS34kv2ODAlx6KyY+PS77D+nprpHpGCnLn77G+xH1Xi
+qvX1Dr/iSQU5Tzsd+tcCAPkYZulaC/9itwme7wIT3ur+mdqMHymsCzv9193iLgjJ
+9t7fARo18yB845hI9Xv7TwRcoyuSpfvuM05rCMRzydsCAOI1MZeKtZSogXVa9QTX
+sVGZeCkrujSVOgsA3w48OLc2OrwZskDfx5QHfeJnumjQLut5qsnZ+1onj9P2dGdn
+JaChe4kBPQQYAQIACQUCVvzIKQIbLgCoCRAqYf65XmeSk50gBBkBAgAGBQJW/Mgp
+AAoJELCUmIrUFDr+MNcEAJ92rSORLJkcsOogulFVlib/5SotT64srQ7K3R+cVJh1
+IKelVPD8PWOhvrL+dCi321UNZfvP9we9U42nbDsYfjwT7BMmKSull01RsuOCA7xV
+hsjqHfLZn3BTfSKVOQ5kpP758vRu0Sv5257WtlZ32Ts8JuYt3DZD6xRjy9c9Ct6r
+NFcD/jOwV3agUUhQwwaRtSdE62YqBI7ynjrE/1JfJm1bex0KXDfdyMEzuLpMtFqM
+tp2+0m00MOsDqVOeLu/uXUkNN5bVB4NaMMumNwjDnhS/nvQC6QGQFGjMGO8zBYap
+T1WWTH3TC0bHS4p+u0SDPjiegPduQDOdmxEE9WMJEJT+Jr0G
+=hvJM
+-----END PGP PRIVATE KEY BLOCK-----
+HERE
+
+end
diff --git a/tests/white-box/network.rb b/tests/white-box/network.rb
new file mode 100644
index 00000000..436fc8a8
--- /dev/null
+++ b/tests/white-box/network.rb
@@ -0,0 +1,90 @@
+require 'socket'
+require 'openssl'
+
+raise SkipTest if $node["dummy"]
+
+class Network < LeapTest
+
+ def setup
+ end
+
+ def test_01_Can_connect_to_internet?
+ assert_get('http://www.google.com/images/srpr/logo11w.png')
+ pass
+ end
+
+ #
+ # example properties:
+ #
+ # stunnel:
+ # ednp_clients:
+ # elk_9002:
+ # accept_port: 4003
+ # connect: elk.dev.bitmask.i
+ # connect_port: 19002
+ # couch_server:
+ # accept: 15984
+ # connect: "127.0.0.1:5984"
+ #
+ def test_02_Is_stunnel_running?
+ ignore unless $node['stunnel']
+ good_stunnel_pids = []
+ release = `facter lsbmajdistrelease`
+ if release.to_i > 7
+ # on jessie, there is only one stunnel proc running instead of 6
+ expected = 1
+ else
+ expected = 6
+ end
+ $node['stunnel']['clients'].each do |stunnel_type, stunnel_configs|
+ stunnel_configs.each do |stunnel_name, stunnel_conf|
+ config_file_name = "/etc/stunnel/#{stunnel_name}.conf"
+ processes = pgrep(config_file_name)
+ assert_equal expected, processes.length, "There should be #{expected} stunnel processes running for `#{config_file_name}`"
+ good_stunnel_pids += processes.map{|ps| ps[:pid]}
+ assert port = stunnel_conf['accept_port'], 'Field `accept_port` must be present in `stunnel` property.'
+ assert_tcp_socket('localhost', port)
+ end
+ end
+ $node['stunnel']['servers'].each do |stunnel_name, stunnel_conf|
+ config_file_name = "/etc/stunnel/#{stunnel_name}.conf"
+ processes = pgrep(config_file_name)
+ assert_equal expected, processes.length, "There should be #{expected} stunnel processes running for `#{config_file_name}`"
+ good_stunnel_pids += processes.map{|ps| ps[:pid]}
+ assert accept_port = stunnel_conf['accept_port'], "Field `accept` must be present in property `stunnel.servers.#{stunnel_name}`"
+ assert_tcp_socket('localhost', accept_port)
+ assert connect_port = stunnel_conf['connect_port'], "Field `connect` must be present in property `stunnel.servers.#{stunnel_name}`"
+ assert_tcp_socket('localhost', connect_port,
+ "The local connect endpoint for stunnel `#{stunnel_name}` is unavailable.\n"+
+ "This is probably caused by a daemon that died or failed to start on\n"+
+ "port `#{connect_port}`, not stunnel itself.")
+ end
+ all_stunnel_pids = pgrep('/usr/bin/stunnel').collect{|process| process[:pid]}.uniq
+ assert_equal good_stunnel_pids.sort, all_stunnel_pids.sort, "There should not be any extra stunnel processes that are not configured in /etc/stunnel"
+ pass
+ end
+
+ def test_03_Is_shorewall_running?
+ ignore unless File.exists?('/sbin/shorewall')
+ assert_run('/sbin/shorewall status')
+ pass
+ end
+
+ THIRTY_DAYS = 60*60*24*30
+
+ def test_04_Are_server_certificates_valid?
+ cert_paths = ["/etc/x509/certs/leap_commercial.crt", "/etc/x509/certs/leap.crt"]
+ cert_paths.each do |cert_path|
+ if File.exists?(cert_path)
+ cert = OpenSSL::X509::Certificate.new(File.read(cert_path))
+ if Time.now > cert.not_after
+ fail "The certificate #{cert_path} expired on #{cert.not_after}"
+ elsif Time.now + THIRTY_DAYS > cert.not_after
+ fail "The certificate #{cert_path} will expire soon, on #{cert.not_after}"
+ end
+ end
+ end
+ pass
+ end
+
+end
diff --git a/tests/white-box/openvpn.rb b/tests/white-box/openvpn.rb
new file mode 100644
index 00000000..170d4503
--- /dev/null
+++ b/tests/white-box/openvpn.rb
@@ -0,0 +1,16 @@
+raise SkipTest unless service?(:openvpn)
+
+class OpenVPN < LeapTest
+ depends_on "Network"
+
+ def setup
+ end
+
+ def test_01_Are_daemons_running?
+ assert_running '^/usr/sbin/openvpn .* /etc/openvpn/tcp_config.conf$'
+ assert_running '^/usr/sbin/openvpn .* /etc/openvpn/udp_config.conf$'
+ assert_running '^/usr/sbin/unbound$'
+ pass
+ end
+
+end
diff --git a/tests/white-box/soledad.rb b/tests/white-box/soledad.rb
new file mode 100644
index 00000000..d41bee58
--- /dev/null
+++ b/tests/white-box/soledad.rb
@@ -0,0 +1,17 @@
+raise SkipTest unless service?(:soledad)
+
+require 'json'
+
+class Soledad < LeapTest
+ depends_on "Network"
+ depends_on "CouchDB" if service?(:couchdb)
+
+ def setup
+ end
+
+ def test_00_Is_Soledad_running?
+ assert_running '.*/usr/bin/twistd.*--wsgi=leap.soledad.server.application'
+ pass
+ end
+
+end
diff --git a/tests/white-box/webapp.rb b/tests/white-box/webapp.rb
new file mode 100644
index 00000000..68f3dcd2
--- /dev/null
+++ b/tests/white-box/webapp.rb
@@ -0,0 +1,134 @@
+raise SkipTest unless service?(:webapp)
+
+require 'json'
+
+class Webapp < LeapTest
+ depends_on "Network"
+
+ def setup
+ end
+
+ def test_01_Can_contact_couchdb?
+ url = couchdb_url("", url_options)
+ assert_get(url) do |body|
+ assert_match /"couchdb":"Welcome"/, body, "Request to #{url} should return couchdb welcome message."
+ end
+ pass
+ end
+
+ def test_02_Can_contact_couchdb_via_haproxy?
+ if property('haproxy.couch')
+ url = couchdb_url_via_haproxy("", url_options)
+ assert_get(url) do |body|
+ assert_match /"couchdb":"Welcome"/, body, "Request to #{url} should return couchdb welcome message."
+ end
+ pass
+ end
+ end
+
+ def test_03_Are_daemons_running?
+ assert_running '^/usr/sbin/apache2'
+ assert_running '^/usr/bin/ruby /usr/bin/nickserver'
+ pass
+ end
+
+ #
+ # this is technically a black-box test. so, move this when we have support
+ # for black box tests.
+ #
+ def test_04_Can_access_webapp?
+ assert_get('https://' + $node['webapp']['domain'] + '/')
+ pass
+ end
+
+ def test_05_Can_create_and_authenticate_and_delete_user_via_API?
+ if property('webapp.allow_registration')
+ assert_tmp_user
+ pass
+ else
+ skip "New user registrations are disabled."
+ end
+ end
+
+ def test_06_Can_sync_Soledad?
+ return unless property('webapp.allow_registration')
+ soledad_config = property('definition_files.soledad_service')
+ if soledad_config && !soledad_config.empty?
+ soledad_server = pick_soledad_server(soledad_config)
+ if soledad_server
+ assert_tmp_user do |user|
+ command = File.expand_path "../../helpers/soledad_sync.py", __FILE__
+ soledad_url = "https://#{soledad_server}/user-#{user.id}"
+ soledad_cert = "/usr/local/share/ca-certificates/leap_ca.crt"
+ assert_run "#{command} #{user.id} #{user.session_token} #{soledad_url} #{soledad_cert} #{user.password}"
+ assert_user_db_exists(user)
+ pass
+ end
+ end
+ else
+ skip 'No soledad service configuration'
+ end
+ end
+
+ private
+
+ def url_options
+ {
+ :username => property('webapp.couchdb_webapp_user.username'),
+ :password => property('webapp.couchdb_webapp_user.password')
+ }
+ end
+
+ #
+ # pick a random soledad server.
+ # I am not sure why, but using IP address directly does not work.
+ #
+ def pick_soledad_server(soledad_config_json_str)
+ soledad_config = JSON.parse(soledad_config_json_str)
+ host_name = soledad_config['hosts'].keys.shuffle.first
+ if host_name
+ hostname = soledad_config['hosts'][host_name]['hostname']
+ port = soledad_config['hosts'][host_name]['port']
+ return "#{hostname}:#{port}"
+ else
+ return nil
+ end
+ end
+
+ #
+ # returns true if the per-user db created by soledad-server exists.
+ # we try three times, and give up after that.
+ #
+ def assert_user_db_exists(user)
+ db_name = "user-#{user.id}"
+ repeatedly_try("/#{db_name}") do |body, response, error|
+ assert false, "Could not find user db `#{db_name}` for test user `#{user.username}`\nuuid=#{user.id}\nHTTP #{response.code} #{error} #{body}"
+ end
+ repeatedly_try("/#{db_name}/_design/docs") do |body, response, error|
+ assert false, "Could not find design docs for user db `#{db_name}` for test user `#{user.username}`\nuuid=#{user.id}\nHTTP #{response.code} #{error} #{body}"
+ end
+ end
+
+ #
+ # tries the URL repeatedly, giving up and yield the last response if
+ # no try returned a 200 http status code.
+ #
+ def repeatedly_try(url, &block)
+ last_body, last_response, last_error = nil
+ 3.times do
+ sleep 0.2
+ get(couchdb_url(url)) do |body, response, error|
+ last_body, last_response, last_error = body, response, error
+ # After moving to couchdb, webapp user is not allowed to Read user dbs,
+ # but the return code for non-existent databases is 404. See #7674
+ if response.code.to_i == 401
+ return
+ end
+ end
+ sleep 1
+ end
+ yield last_body, last_response, last_error
+ return
+ end
+
+end
diff --git a/vagrant/add-pixelated.sh b/vagrant/add-pixelated.sh
new file mode 100755
index 00000000..f9908947
--- /dev/null
+++ b/vagrant/add-pixelated.sh
@@ -0,0 +1,32 @@
+#!/bin/bash
+#
+# adds pixelated-server to the node
+
+. /vagrant/vagrant/vagrant.config
+
+cd "$PROVIDERDIR"
+
+if ! git submodule status files/puppet/modules/pixelated > /dev/null 2>&1; then
+ git submodule add https://github.com/pixelated/puppet-pixelated.git files/puppet/modules/pixelated
+fi
+
+echo '{}' > services/pixelated.json
+[ -d files/puppet/modules/custom/manifests ] || mkdir -p files/puppet/modules/custom/manifests
+echo 'class custom { include ::pixelated}' > files/puppet/modules/custom/manifests/init.pp
+
+$LEAP $OPTS -v 2 deploy
+
+echo '==============================================='
+echo 'testing the platform'
+echo '==============================================='
+
+$LEAP $OPTS -v 2 test --continue
+
+
+echo -e '\n===========================================================================================================\n\n'
+echo -e 'You are now ready to use your vagrant Pixelated provider.\n'
+
+echo -e 'The LEAP webapp is available at https://localhost:4443. Use it to register an account before using the Pixelated Useragent.\n'
+echo -e 'The Pixelated Useragent is available at https://localhost:8080\n'
+
+echo -e 'Please add an exception for both sites in your browser dialog to allow the self-signed certificate.\n'
diff --git a/vagrant/configure-leap.sh b/vagrant/configure-leap.sh
new file mode 100755
index 00000000..9ddee039
--- /dev/null
+++ b/vagrant/configure-leap.sh
@@ -0,0 +1,92 @@
+#!/bin/bash
+
+
+. /vagrant/vagrant/vagrant.config
+
+echo '==============================================='
+echo 'configuring leap'
+echo '==============================================='
+
+# purge $PROVIDERDIR so this script can be run multiple times
+[ -e $PROVIDERDIR ] && rm -rf $PROVIDERDIR
+
+mkdir -p $PROVIDERDIR
+chown ${USER}:${USER} ${PROVIDERDIR}
+cd $PROVIDERDIR
+
+$LEAP $OPTS new --contacts "$contacts" --domain "$provider_domain" --name "$provider_name" --platform=/vagrant .
+echo -e '\n@log = "./deploy.log"' >> Leapfile
+
+if [ ! -e /home/${USER}/.ssh/id_rsa ]; then
+ $SUDO ssh-keygen -f /home/${USER}/.ssh/id_rsa -P ''
+ [ -d /root/.ssh ] || mkdir /root/.ssh
+ cat /home/${USER}/.ssh/id_rsa.pub >> /root/.ssh/authorized_keys
+fi
+
+$SUDO mkdir -p ${PROVIDERDIR}/files/nodes/${NODE}
+sh -c "cat /etc/ssh/ssh_host_rsa_key.pub | cut -d' ' -f1,2 >> $PROVIDERDIR/files/nodes/$NODE/${NODE}_ssh.pub"
+chown ${USER}:${USER} ${PROVIDERDIR}/files/nodes/${NODE}/${NODE}_ssh.pub
+
+$LEAP $OPTS add-user --self
+$LEAP $OPTS cert ca
+$LEAP $OPTS cert csr
+$LEAP $OPTS node add $NODE ip_address:"$(facter ipaddress)" couch.mode:plain services:"$services" tags:production
+echo '{ "webapp": { "admins": ["testadmin"] } }' > services/webapp.json
+
+$LEAP $OPTS compile
+
+$GIT init
+$GIT add .
+$GIT commit -m'configured provider'
+
+$LEAP $OPTS node init $NODE
+if [ $? -eq 1 ]; then
+ echo 'node init failed'
+ exit 1
+fi
+
+# couchrest gem does currently not install on jessie
+# https://leap.se/code/issues/7754
+# workaround is to install rake as gem
+gem install rake
+
+$LEAP $OPTS -v 2 deploy
+
+$GIT add .
+$GIT commit -m'initialized and deployed provider'
+
+# Vagrant: leap_mx fails to start on jessie
+# https://leap.se/code/issues/7755
+# Workaround: we stop and start leap-mx after deploy and
+# before testing
+
+service leap-mx stop
+service leap-mx start
+
+
+
+echo '==============================================='
+echo 'testing the platform'
+echo '==============================================='
+
+$LEAP $OPTS -v 2 test --continue
+
+echo '==============================================='
+echo 'setting node to demo-mode'
+echo '==============================================='
+postconf -e default_transport='error: in demo mode'
+
+# add users: testadmin and testuser with passwords "hallo123"
+curl -s -k https://localhost/1/users.json -d "user%5Blogin%5D=testuser&user%5Bpassword_salt%5D=7d4880237a038e0e&user%5Bpassword_verifier%5D=b98dc393afcd16e5a40fb57ce9cddfa6a978b84be326196627c111d426cada898cdaf3a6427e98b27daf4b0ed61d278bc856515aeceb2312e50c8f816659fcaa4460d839a1e2d7ffb867d32ac869962061368141c7571a53443d58dc84ca1fca34776894414c1090a93e296db6cef12c2cc3f7a991b05d49728ed358fd868286"
+curl -s -k https://localhost/1/users.json -d "user%5Blogin%5D=testadmin&user%5Bpassword_salt%5D=ece1c457014d8282&user%5Bpassword_verifier%5D=9654d93ab409edf4ff1543d07e08f321107c3fd00de05c646c637866a94f28b3eb263ea9129dacebb7291b3374cc6f0bf88eb3d231eb3a76eed330a0e8fd2a5c477ed2693694efc1cc23ae83c2ae351a21139701983dd595b6c3225a1bebd2a4e6122f83df87606f1a41152d9890e5a11ac3749b3bfcf4407fc83ef60b4ced68"
+
+echo -e '\n===========================================================================================================\n\n'
+echo -e 'You are now ready to use your local LEAP provider.\n'
+echo 'If you want to use the *Bitmask client* with your provider, please update your /etc/hosts with following dns overrides:'
+
+$LEAP list --print ip_address,domain.full,dns.aliases | sed 's/^.* //' | sed 's/, null//g' | tr -d '\]\[",'
+
+echo 'Please see https://leap.se/en/docs/platform/tutorials/vagrant#use-the-bitmask-client-to-do-an-initial-soledad-sync for more details how to use and test your LEAP provider.'
+echo -e "\nIf you don't want to use the Bitmask client, please ignore the above instructions.\n"
+echo -e 'The LEAP webapp is now available at https://localhost:4443\n'
+echo -e 'Please add an exception in your browser dialog to allow the self-signed certificate.\n'
diff --git a/vagrant/install-platform.pp b/vagrant/install-platform.pp
new file mode 100755
index 00000000..223853c1
--- /dev/null
+++ b/vagrant/install-platform.pp
@@ -0,0 +1,15 @@
+class {'apt': }
+Exec['update_apt'] -> Package <||>
+
+# install leap_cli from source, so it will work with the develop
+# branch of leap_platform
+class { '::leap::cli::install':
+ source => true,
+}
+
+file { [ '/srv/leap', '/srv/leap/configuration', '/var/log/leap' ]:
+ ensure => directory
+}
+
+# install prerequisites for configuring the provider
+include ::git
diff --git a/vagrant/vagrant.config b/vagrant/vagrant.config
new file mode 100644
index 00000000..e601488d
--- /dev/null
+++ b/vagrant/vagrant.config
@@ -0,0 +1,22 @@
+# provider config values used by vagrant provision scripts
+provider_domain='example.org'
+provider_name='Leap Example Provider'
+contacts="no-reply@$provider_domain"
+
+# serivces that get configured
+# note that the "openvpn" service does currently *not* work
+# in a vagrant setup,
+# see https://leap.se/en/docs/platform/troubleshooting/known-issues#Special.Environments
+# to speed up things, don't deploy monitor service by default
+# services='webapp,mx,couchdb,soledad,monitor'
+services='webapp,mx,couchdb,soledad'
+
+# default vars used by vagrant provision scripts
+OPTS=''
+USER='vagrant'
+NODE='node1'
+SUDO="sudo -u ${USER}"
+PROVIDERDIR="/home/${USER}/leap/configuration"
+LEAP="$SUDO /usr/local/bin/leap"
+GIT="$SUDO git"
+