summaryrefslogtreecommitdiff
path: root/puppet
diff options
context:
space:
mode:
Diffstat (limited to 'puppet')
-rwxr-xr-xpuppet/bin/apply_on_node.sh30
-rw-r--r--puppet/hiera.yaml15
-rw-r--r--puppet/lib/puppet/parser/functions/create_resources_hash_from.rb116
-rw-r--r--puppet/lib/puppet/parser/functions/sorted_json.rb47
-rw-r--r--puppet/lib/puppet/parser/functions/sorted_yaml.rb400
-rw-r--r--puppet/manifests/site.pp60
-rw-r--r--puppet/modules/apt/.gitignore12
-rw-r--r--puppet/modules/apt/.gitlab-ci.yml12
-rw-r--r--puppet/modules/apt/Gemfile13
-rw-r--r--puppet/modules/apt/LICENSE674
-rw-r--r--puppet/modules/apt/README602
-rw-r--r--puppet/modules/apt/Rakefile19
-rw-r--r--puppet/modules/apt/files/02show_upgraded4
-rw-r--r--puppet/modules/apt/files/03clean4
-rw-r--r--puppet/modules/apt/files/03clean_vserver4
-rw-r--r--puppet/modules/apt/files/upgrade_initiator1
-rw-r--r--puppet/modules/apt/lib/facter/apt_running.rb7
-rw-r--r--puppet/modules/apt/lib/facter/debian_codename.rb42
-rw-r--r--puppet/modules/apt/lib/facter/debian_lts.rb16
-rw-r--r--puppet/modules/apt/lib/facter/debian_nextcodename.rb23
-rw-r--r--puppet/modules/apt/lib/facter/debian_nextrelease.rb23
-rw-r--r--puppet/modules/apt/lib/facter/debian_release.rb38
-rw-r--r--puppet/modules/apt/lib/facter/ubuntu_codename.rb8
-rw-r--r--puppet/modules/apt/lib/facter/ubuntu_nextcodename.rb20
-rw-r--r--puppet/modules/apt/lib/facter/util/debian.rb18
-rw-r--r--puppet/modules/apt/lib/facter/util/ubuntu.rb21
-rw-r--r--puppet/modules/apt/manifests/apt_conf.pp45
-rw-r--r--puppet/modules/apt/manifests/apticron.pp24
-rw-r--r--puppet/modules/apt/manifests/cron/base.pp20
-rw-r--r--puppet/modules/apt/manifests/cron/dist_upgrade.pp29
-rw-r--r--puppet/modules/apt/manifests/cron/download.pp27
-rw-r--r--puppet/modules/apt/manifests/dist_upgrade.pp9
-rw-r--r--puppet/modules/apt/manifests/dist_upgrade/initiator.pp23
-rw-r--r--puppet/modules/apt/manifests/dot_d_directories.pp15
-rw-r--r--puppet/modules/apt/manifests/dselect.pp11
-rw-r--r--puppet/modules/apt/manifests/init.pp150
-rw-r--r--puppet/modules/apt/manifests/key.pp13
-rw-r--r--puppet/modules/apt/manifests/key/plain.pp13
-rw-r--r--puppet/modules/apt/manifests/listchanges.pp19
-rw-r--r--puppet/modules/apt/manifests/params.pp22
-rw-r--r--puppet/modules/apt/manifests/preferences.pp20
-rw-r--r--puppet/modules/apt/manifests/preferences/absent.pp7
-rw-r--r--puppet/modules/apt/manifests/preferences_snippet.pp59
-rw-r--r--puppet/modules/apt/manifests/preseeded_package.pp21
-rw-r--r--puppet/modules/apt/manifests/proxy_client.pp9
-rw-r--r--puppet/modules/apt/manifests/reboot_required_notify.pp21
-rw-r--r--puppet/modules/apt/manifests/sources_list.pp40
-rw-r--r--puppet/modules/apt/manifests/unattended_upgrades.pp34
-rw-r--r--puppet/modules/apt/manifests/update.pp7
-rw-r--r--puppet/modules/apt/manifests/upgrade_package.pp31
-rw-r--r--puppet/modules/apt/spec/spec_helper.rb12
-rw-r--r--puppet/modules/apt/spec/unit/custom_facts_spec.rb86
-rw-r--r--puppet/modules/apt/templates/20proxy.erb5
-rw-r--r--puppet/modules/apt/templates/50unattended-upgrades.erb38
l---------puppet/modules/apt/templates/Debian/apticron_jessie.erb1
-rw-r--r--puppet/modules/apt/templates/Debian/apticron_lenny.erb50
l---------puppet/modules/apt/templates/Debian/apticron_sid.erb1
-rw-r--r--puppet/modules/apt/templates/Debian/apticron_squeeze.erb82
-rw-r--r--puppet/modules/apt/templates/Debian/apticron_wheezy.erb80
l---------puppet/modules/apt/templates/Debian/listchanges_jessie.erb1
-rw-r--r--puppet/modules/apt/templates/Debian/listchanges_lenny.erb7
l---------puppet/modules/apt/templates/Debian/listchanges_sid.erb1
l---------puppet/modules/apt/templates/Debian/listchanges_squeeze.erb1
l---------puppet/modules/apt/templates/Debian/listchanges_wheezy.erb1
-rw-r--r--puppet/modules/apt/templates/Debian/preferences_jessie.erb14
-rw-r--r--puppet/modules/apt/templates/Debian/preferences_lenny.erb25
-rw-r--r--puppet/modules/apt/templates/Debian/preferences_sid.erb10
-rw-r--r--puppet/modules/apt/templates/Debian/preferences_squeeze.erb30
-rw-r--r--puppet/modules/apt/templates/Debian/preferences_wheezy.erb20
-rw-r--r--puppet/modules/apt/templates/Debian/sources.list.erb76
l---------puppet/modules/apt/templates/Ubuntu/preferences_lucid.erb1
-rw-r--r--puppet/modules/apt/templates/Ubuntu/preferences_maverick.erb30
l---------puppet/modules/apt/templates/Ubuntu/preferences_oneiric.erb1
l---------puppet/modules/apt/templates/Ubuntu/preferences_precise.erb1
l---------puppet/modules/apt/templates/Ubuntu/preferences_utopic.erb1
l---------puppet/modules/apt/templates/Ubuntu/preferences_vivid.erb1
l---------puppet/modules/apt/templates/Ubuntu/preferences_wily.erb1
l---------puppet/modules/apt/templates/Ubuntu/preferences_xenial.erb1
-rw-r--r--puppet/modules/apt/templates/Ubuntu/sources.list.erb22
-rw-r--r--puppet/modules/apt/templates/preferences_snippet.erb4
-rw-r--r--puppet/modules/apt/templates/preferences_snippet_release.erb4
-rw-r--r--puppet/modules/clamav/files/01-leap.conf58
-rw-r--r--puppet/modules/clamav/files/clamav-daemon_default8
-rw-r--r--puppet/modules/clamav/files/clamav-milter_default14
-rw-r--r--puppet/modules/clamav/manifests/daemon.pp91
-rw-r--r--puppet/modules/clamav/manifests/freshclam.pp23
-rw-r--r--puppet/modules/clamav/manifests/init.pp8
-rw-r--r--puppet/modules/clamav/manifests/milter.pp50
-rw-r--r--puppet/modules/clamav/manifests/unofficial_sigs.pp23
-rw-r--r--puppet/modules/clamav/templates/clamav-milter.conf.erb28
-rw-r--r--puppet/modules/clamav/templates/local.pdb.erb1
-rw-r--r--puppet/modules/clamav/templates/whitelisted_addresses.erb5
-rw-r--r--puppet/modules/common/LICENSE674
-rw-r--r--puppet/modules/common/README44
-rw-r--r--puppet/modules/common/lib/puppet/parser/functions/basename.rb22
-rw-r--r--puppet/modules/common/lib/puppet/parser/functions/dirname.rb22
-rw-r--r--puppet/modules/common/lib/puppet/parser/functions/get_default.rb15
-rw-r--r--puppet/modules/common/lib/puppet/parser/functions/hostname.rb13
-rw-r--r--puppet/modules/common/lib/puppet/parser/functions/multi_source_template.rb29
-rw-r--r--puppet/modules/common/lib/puppet/parser/functions/prefix_with.rb9
-rw-r--r--puppet/modules/common/lib/puppet/parser/functions/re_escape.rb7
-rw-r--r--puppet/modules/common/lib/puppet/parser/functions/slash_escape.rb7
-rw-r--r--puppet/modules/common/lib/puppet/parser/functions/substitute.rb20
-rw-r--r--puppet/modules/common/lib/puppet/parser/functions/tfile.rb19
-rw-r--r--puppet/modules/common/manifests/module_dir.pp34
-rw-r--r--puppet/modules/common/manifests/module_file.pp37
-rw-r--r--puppet/modules/common/manifests/moduledir.pp18
-rw-r--r--puppet/modules/common/manifests/moduledir/common.pp4
-rw-r--r--puppet/modules/common/spec/spec.opts6
-rw-r--r--puppet/modules/common/spec/spec_helper.rb16
-rw-r--r--puppet/modules/common/spec/unit/parser/functions/tfile.rb54
-rw-r--r--puppet/modules/concat/CHANGELOG29
-rw-r--r--puppet/modules/concat/LICENSE14
-rw-r--r--puppet/modules/concat/Modulefile8
-rw-r--r--puppet/modules/concat/README.markdown112
-rw-r--r--puppet/modules/concat/Rakefile13
-rwxr-xr-xpuppet/modules/concat/files/concatfragments.sh129
-rw-r--r--puppet/modules/concat/files/null/.gitignore0
-rw-r--r--puppet/modules/concat/lib/facter/concat_basedir.rb5
-rw-r--r--puppet/modules/concat/manifests/fragment.pp49
-rw-r--r--puppet/modules/concat/manifests/init.pp178
-rw-r--r--puppet/modules/concat/manifests/setup.pp49
-rw-r--r--puppet/modules/concat/spec/defines/init_spec.rb20
-rw-r--r--puppet/modules/concat/spec/spec_helper.rb9
-rw-r--r--puppet/modules/git/files/config/CentOS/git-daemon26
-rw-r--r--puppet/modules/git/files/config/CentOS/git-daemon.vhosts27
-rw-r--r--puppet/modules/git/files/config/Debian/git-daemon22
-rw-r--r--puppet/modules/git/files/init.d/CentOS/git-daemon75
-rw-r--r--puppet/modules/git/files/init.d/Debian/git-daemon151
-rw-r--r--puppet/modules/git/files/web/gitweb.conf53
-rw-r--r--puppet/modules/git/files/xinetd.d/git16
-rw-r--r--puppet/modules/git/files/xinetd.d/git.disabled16
-rw-r--r--puppet/modules/git/files/xinetd.d/git.vhosts16
-rw-r--r--puppet/modules/git/manifests/base.pp7
-rw-r--r--puppet/modules/git/manifests/centos.pp2
-rw-r--r--puppet/modules/git/manifests/changes.pp33
-rw-r--r--puppet/modules/git/manifests/clone.pp60
-rw-r--r--puppet/modules/git/manifests/daemon.pp17
-rw-r--r--puppet/modules/git/manifests/daemon/base.pp31
-rw-r--r--puppet/modules/git/manifests/daemon/centos.pp19
-rw-r--r--puppet/modules/git/manifests/daemon/disable.pp33
-rw-r--r--puppet/modules/git/manifests/daemon/vhosts.pp10
-rw-r--r--puppet/modules/git/manifests/debian.pp6
-rw-r--r--puppet/modules/git/manifests/init.pp25
-rw-r--r--puppet/modules/git/manifests/svn.pp10
-rw-r--r--puppet/modules/git/manifests/web.pp20
-rw-r--r--puppet/modules/git/manifests/web/absent.pp17
-rw-r--r--puppet/modules/git/manifests/web/lighttpd.pp7
-rw-r--r--puppet/modules/git/manifests/web/repo.pp56
-rw-r--r--puppet/modules/git/manifests/web/repo/lighttpd.pp16
-rw-r--r--puppet/modules/git/templates/web/config31
-rw-r--r--puppet/modules/git/templates/web/lighttpd21
-rw-r--r--puppet/modules/haveged/manifests/init.pp16
-rw-r--r--puppet/modules/journald/manifests/init.pp7
-rw-r--r--puppet/modules/leap/manifests/cli/install.pp46
-rw-r--r--puppet/modules/leap/manifests/init.pp3
-rw-r--r--puppet/modules/leap/manifests/logfile.pp34
-rw-r--r--puppet/modules/leap/templates/rsyslog.erb5
-rw-r--r--puppet/modules/leap_mx/manifests/init.pp119
-rw-r--r--puppet/modules/leap_mx/templates/mx.conf.erb18
-rw-r--r--puppet/modules/lsb/manifests/base.pp3
-rw-r--r--puppet/modules/lsb/manifests/centos.pp5
-rw-r--r--puppet/modules/lsb/manifests/debian.pp6
-rw-r--r--puppet/modules/lsb/manifests/init.pp6
-rw-r--r--puppet/modules/ntp/.fixtures.yml5
-rw-r--r--puppet/modules/ntp/.gitignore3
-rw-r--r--puppet/modules/ntp/.nodeset.yml35
-rw-r--r--puppet/modules/ntp/.travis.yml40
-rw-r--r--puppet/modules/ntp/CHANGELOG61
-rw-r--r--puppet/modules/ntp/CONTRIBUTING.md9
-rw-r--r--puppet/modules/ntp/Gemfile19
-rw-r--r--puppet/modules/ntp/LICENSE202
-rw-r--r--puppet/modules/ntp/Modulefile11
-rw-r--r--puppet/modules/ntp/README.markdown215
-rw-r--r--puppet/modules/ntp/Rakefile2
-rw-r--r--puppet/modules/ntp/manifests/config.pp23
-rw-r--r--puppet/modules/ntp/manifests/init.pp58
-rw-r--r--puppet/modules/ntp/manifests/install.pp9
-rw-r--r--puppet/modules/ntp/manifests/params.pp116
-rw-r--r--puppet/modules/ntp/manifests/service.pp18
-rw-r--r--puppet/modules/ntp/spec/classes/ntp_spec.rb261
-rw-r--r--puppet/modules/ntp/spec/fixtures/modules/my_ntp/templates/ntp.conf.erb4
-rw-r--r--puppet/modules/ntp/spec/spec.opts6
-rw-r--r--puppet/modules/ntp/spec/spec_helper.rb1
-rw-r--r--puppet/modules/ntp/spec/spec_helper_system.rb26
-rw-r--r--puppet/modules/ntp/spec/system/basic_spec.rb13
-rw-r--r--puppet/modules/ntp/spec/system/class_spec.rb39
-rw-r--r--puppet/modules/ntp/spec/system/ntp_config_spec.rb35
-rw-r--r--puppet/modules/ntp/spec/system/ntp_install_spec.rb31
-rw-r--r--puppet/modules/ntp/spec/system/ntp_service_spec.rb25
-rw-r--r--puppet/modules/ntp/spec/system/preferred_servers_spec.rb20
-rw-r--r--puppet/modules/ntp/spec/system/restrict_spec.rb20
-rw-r--r--puppet/modules/ntp/spec/unit/puppet/provider/README.markdown4
-rw-r--r--puppet/modules/ntp/spec/unit/puppet/type/README.markdown4
-rw-r--r--puppet/modules/ntp/templates/ntp.conf.erb43
-rw-r--r--puppet/modules/ntp/tests/init.pp11
-rwxr-xr-xpuppet/modules/obfsproxy/files/obfsproxy_init93
-rw-r--r--puppet/modules/obfsproxy/files/obfsproxy_logrotate14
-rw-r--r--puppet/modules/obfsproxy/manifests/init.pp86
-rw-r--r--puppet/modules/obfsproxy/templates/etc_conf.erb11
-rw-r--r--puppet/modules/opendkim/manifests/init.pp67
-rw-r--r--puppet/modules/opendkim/templates/opendkim.conf45
-rw-r--r--puppet/modules/openvpn/.fixtures.yml6
-rw-r--r--puppet/modules/openvpn/.gitignore3
-rw-r--r--puppet/modules/openvpn/.rvmrc38
-rw-r--r--puppet/modules/openvpn/.travis.yml29
-rw-r--r--puppet/modules/openvpn/Gemfile7
-rw-r--r--puppet/modules/openvpn/Gemfile.lock36
-rw-r--r--puppet/modules/openvpn/LICENSE177
-rw-r--r--puppet/modules/openvpn/Modulefile11
-rw-r--r--puppet/modules/openvpn/Rakefile2
-rw-r--r--puppet/modules/openvpn/Readme.markdown54
-rw-r--r--puppet/modules/openvpn/Vagrantfile42
-rw-r--r--puppet/modules/openvpn/manifests/client.pp187
-rw-r--r--puppet/modules/openvpn/manifests/client_specific_config.pp79
-rw-r--r--puppet/modules/openvpn/manifests/config.pp52
-rw-r--r--puppet/modules/openvpn/manifests/init.pp43
-rw-r--r--puppet/modules/openvpn/manifests/install.pp46
-rw-r--r--puppet/modules/openvpn/manifests/params.pp37
-rw-r--r--puppet/modules/openvpn/manifests/server.pp233
-rw-r--r--puppet/modules/openvpn/manifests/service.pp36
-rw-r--r--puppet/modules/openvpn/spec/classes/openvpn_config_spec.rb15
-rw-r--r--puppet/modules/openvpn/spec/classes/openvpn_init_spec.rb9
-rw-r--r--puppet/modules/openvpn/spec/classes/openvpn_install_spec.rb11
-rw-r--r--puppet/modules/openvpn/spec/classes/openvpn_service_spec.rb13
-rw-r--r--puppet/modules/openvpn/spec/defines/openvpn_client_spec.rb88
-rw-r--r--puppet/modules/openvpn/spec/defines/openvpn_client_specific_config_spec.rb40
-rw-r--r--puppet/modules/openvpn/spec/defines/openvpn_server_spec.rb165
-rw-r--r--puppet/modules/openvpn/spec/spec_helper.rb2
-rw-r--r--puppet/modules/openvpn/templates/client.erb26
-rw-r--r--puppet/modules/openvpn/templates/client_specific_config.erb10
-rw-r--r--puppet/modules/openvpn/templates/etc-default-openvpn.erb20
-rw-r--r--puppet/modules/openvpn/templates/server.erb37
-rw-r--r--puppet/modules/openvpn/templates/vars.erb68
-rw-r--r--puppet/modules/openvpn/vagrant/client.pp5
-rw-r--r--puppet/modules/openvpn/vagrant/server.pp23
-rw-r--r--puppet/modules/postfwd/files/postfwd_default19
-rw-r--r--puppet/modules/postfwd/manifests/init.pp43
-rw-r--r--puppet/modules/postfwd/templates/postfwd.cf.erb28
-rw-r--r--puppet/modules/site_apache/files/conf.d/security55
-rw-r--r--puppet/modules/site_apache/files/include.d/ssl_common.inc7
-rw-r--r--puppet/modules/site_apache/manifests/common.pp30
-rw-r--r--puppet/modules/site_apache/manifests/common/tls.pp6
-rw-r--r--puppet/modules/site_apache/templates/vhosts.d/api.conf.erb48
-rw-r--r--puppet/modules/site_apache/templates/vhosts.d/common.conf.erb76
-rw-r--r--puppet/modules/site_apache/templates/vhosts.d/hidden_service.conf.erb55
-rw-r--r--puppet/modules/site_apt/files/Debian/51unattended-upgrades-leap6
-rw-r--r--puppet/modules/site_apt/files/keys/leap-archive.gpgbin0 -> 20188 bytes
-rw-r--r--puppet/modules/site_apt/files/keys/leap-experimental-archive.gpgbin0 -> 3423 bytes
-rw-r--r--puppet/modules/site_apt/manifests/dist_upgrade.pp17
-rw-r--r--puppet/modules/site_apt/manifests/init.pp55
-rw-r--r--puppet/modules/site_apt/manifests/leap_repo.pp16
-rw-r--r--puppet/modules/site_apt/manifests/preferences/check_mk.pp9
-rw-r--r--puppet/modules/site_apt/manifests/preferences/passenger.pp14
-rw-r--r--puppet/modules/site_apt/manifests/preferences/rsyslog.pp13
-rw-r--r--puppet/modules/site_apt/manifests/unattended_upgrades.pp20
-rw-r--r--puppet/modules/site_apt/templates/jessie/postfix.seeds1
-rw-r--r--puppet/modules/site_apt/templates/preferences.include_squeeze25
-rw-r--r--puppet/modules/site_apt/templates/secondary.list3
-rw-r--r--puppet/modules/site_apt/templates/wheezy/postfix.seeds1
-rw-r--r--puppet/modules/site_check_mk/files/agent/local_checks/all_hosts/run_node_tests.sh5
-rwxr-xr-xpuppet/modules/site_check_mk/files/agent/local_checks/couchdb/leap_couch_stats.sh122
-rwxr-xr-xpuppet/modules/site_check_mk/files/agent/local_checks/mx/check_leap_mx.sh33
-rw-r--r--puppet/modules/site_check_mk/files/agent/logwatch/bigcouch.cfg28
-rw-r--r--puppet/modules/site_check_mk/files/agent/logwatch/leap_mx.cfg4
-rw-r--r--puppet/modules/site_check_mk/files/agent/logwatch/logwatch.cfg31
-rw-r--r--puppet/modules/site_check_mk/files/agent/logwatch/openvpn.cfg19
-rw-r--r--puppet/modules/site_check_mk/files/agent/logwatch/soledad.cfg6
-rw-r--r--puppet/modules/site_check_mk/files/agent/logwatch/stunnel.cfg10
-rw-r--r--puppet/modules/site_check_mk/files/agent/logwatch/syslog/bigcouch.cfg5
-rw-r--r--puppet/modules/site_check_mk/files/agent/logwatch/syslog/couchdb.cfg2
-rw-r--r--puppet/modules/site_check_mk/files/agent/logwatch/syslog_header.cfg1
-rw-r--r--puppet/modules/site_check_mk/files/agent/logwatch/syslog_tail.cfg21
-rw-r--r--puppet/modules/site_check_mk/files/agent/logwatch/webapp.cfg8
-rwxr-xr-xpuppet/modules/site_check_mk/files/agent/nagios_plugins/check_unix_open_fds.pl322
-rwxr-xr-xpuppet/modules/site_check_mk/files/agent/plugins/mk_logwatch.1.2.4374
-rw-r--r--puppet/modules/site_check_mk/files/extra_service_conf.mk14
-rw-r--r--puppet/modules/site_check_mk/files/ignored_services.mk3
-rw-r--r--puppet/modules/site_check_mk/manifests/agent.pp35
-rw-r--r--puppet/modules/site_check_mk/manifests/agent/couchdb.pp34
-rw-r--r--puppet/modules/site_check_mk/manifests/agent/couchdb/bigcouch.pp49
-rw-r--r--puppet/modules/site_check_mk/manifests/agent/couchdb/plain.pp23
-rw-r--r--puppet/modules/site_check_mk/manifests/agent/haproxy.pp15
-rw-r--r--puppet/modules/site_check_mk/manifests/agent/haveged.pp15
-rw-r--r--puppet/modules/site_check_mk/manifests/agent/logwatch.pp36
-rw-r--r--puppet/modules/site_check_mk/manifests/agent/logwatch/syslog.pp18
-rw-r--r--puppet/modules/site_check_mk/manifests/agent/mrpe.pp24
-rw-r--r--puppet/modules/site_check_mk/manifests/agent/mx.pp27
-rw-r--r--puppet/modules/site_check_mk/manifests/agent/openvpn.pp10
-rw-r--r--puppet/modules/site_check_mk/manifests/agent/package/nagios_plugins_contrib.pp5
-rw-r--r--puppet/modules/site_check_mk/manifests/agent/package/perl_plugin.pp5
-rw-r--r--puppet/modules/site_check_mk/manifests/agent/soledad.pp17
-rw-r--r--puppet/modules/site_check_mk/manifests/agent/stunnel.pp9
-rw-r--r--puppet/modules/site_check_mk/manifests/agent/webapp.pp15
-rw-r--r--puppet/modules/site_check_mk/manifests/server.pp103
-rw-r--r--puppet/modules/site_check_mk/templates/extra_host_conf.mk13
-rw-r--r--puppet/modules/site_check_mk/templates/host_contactgroups.mk17
-rw-r--r--puppet/modules/site_check_mk/templates/hostgroups.mk17
-rw-r--r--puppet/modules/site_check_mk/templates/use_ssh.mk6
-rw-r--r--puppet/modules/site_config/files/xterm-title.sh8
-rw-r--r--puppet/modules/site_config/lib/facter/dhcp_enabled.rb22
-rw-r--r--puppet/modules/site_config/lib/facter/ip_interface.rb13
-rw-r--r--puppet/modules/site_config/manifests/caching_resolver.pp27
-rw-r--r--puppet/modules/site_config/manifests/default.pp71
-rw-r--r--puppet/modules/site_config/manifests/dhclient.pp40
-rw-r--r--puppet/modules/site_config/manifests/files.pp24
-rw-r--r--puppet/modules/site_config/manifests/hosts.pp44
-rw-r--r--puppet/modules/site_config/manifests/initial_firewall.pp64
-rw-r--r--puppet/modules/site_config/manifests/packages.pp32
-rw-r--r--puppet/modules/site_config/manifests/packages/build_essential.pp28
-rw-r--r--puppet/modules/site_config/manifests/packages/gnutls.pp5
-rw-r--r--puppet/modules/site_config/manifests/params.pp35
-rw-r--r--puppet/modules/site_config/manifests/remove.pp11
-rw-r--r--puppet/modules/site_config/manifests/remove/bigcouch.pp42
-rw-r--r--puppet/modules/site_config/manifests/remove/files.pp56
-rw-r--r--puppet/modules/site_config/manifests/remove/jessie.pp14
-rw-r--r--puppet/modules/site_config/manifests/remove/monitoring.pp13
-rw-r--r--puppet/modules/site_config/manifests/remove/tapicero.pp72
-rw-r--r--puppet/modules/site_config/manifests/remove/webapp.pp7
-rw-r--r--puppet/modules/site_config/manifests/resolvconf.pp14
-rw-r--r--puppet/modules/site_config/manifests/ruby.pp8
-rw-r--r--puppet/modules/site_config/manifests/ruby/dev.pp8
-rw-r--r--puppet/modules/site_config/manifests/setup.pp50
-rw-r--r--puppet/modules/site_config/manifests/shell.pp22
-rw-r--r--puppet/modules/site_config/manifests/slow.pp10
-rw-r--r--puppet/modules/site_config/manifests/sysctl.pp8
-rw-r--r--puppet/modules/site_config/manifests/syslog.pp62
-rw-r--r--puppet/modules/site_config/manifests/vagrant.pp11
-rw-r--r--puppet/modules/site_config/manifests/x509/ca.pp11
-rw-r--r--puppet/modules/site_config/manifests/x509/ca_bundle.pp17
-rw-r--r--puppet/modules/site_config/manifests/x509/cert.pp12
-rw-r--r--puppet/modules/site_config/manifests/x509/client_ca/ca.pp16
-rw-r--r--puppet/modules/site_config/manifests/x509/client_ca/key.pp16
-rw-r--r--puppet/modules/site_config/manifests/x509/commercial/ca.pp11
-rw-r--r--puppet/modules/site_config/manifests/x509/commercial/cert.pp15
-rw-r--r--puppet/modules/site_config/manifests/x509/commercial/key.pp11
-rw-r--r--puppet/modules/site_config/manifests/x509/key.pp11
-rw-r--r--puppet/modules/site_config/templates/hosts19
-rw-r--r--puppet/modules/site_config/templates/ipv4firewall_up.rules.erb14
-rw-r--r--puppet/modules/site_config/templates/ipv6firewall_up.rules.erb8
-rw-r--r--puppet/modules/site_config/templates/reload_dhclient.erb13
-rw-r--r--puppet/modules/site_couchdb/files/couchdb_scripts_defaults.conf4
-rw-r--r--puppet/modules/site_couchdb/files/designs/Readme.md14
-rw-r--r--puppet/modules/site_couchdb/files/designs/customers/Customer.json18
-rw-r--r--puppet/modules/site_couchdb/files/designs/identities/Identity.json34
-rw-r--r--puppet/modules/site_couchdb/files/designs/invite_codes/InviteCode.json22
-rw-r--r--puppet/modules/site_couchdb/files/designs/messages/Message.json18
-rw-r--r--puppet/modules/site_couchdb/files/designs/sessions/Session.json8
-rw-r--r--puppet/modules/site_couchdb/files/designs/shared/docs.json8
-rw-r--r--puppet/modules/site_couchdb/files/designs/shared/syncs.json11
-rw-r--r--puppet/modules/site_couchdb/files/designs/shared/transactions.json13
-rw-r--r--puppet/modules/site_couchdb/files/designs/tickets/Ticket.json50
-rw-r--r--puppet/modules/site_couchdb/files/designs/tokens/Token.json14
-rw-r--r--puppet/modules/site_couchdb/files/designs/users/User.json22
-rwxr-xr-xpuppet/modules/site_couchdb/files/leap_ca_daemon157
-rw-r--r--puppet/modules/site_couchdb/files/local.ini8
-rw-r--r--puppet/modules/site_couchdb/files/runit_config6
-rw-r--r--puppet/modules/site_couchdb/lib/puppet/parser/functions/rotated_db_name.rb24
-rw-r--r--puppet/modules/site_couchdb/manifests/add_users.pp57
-rw-r--r--puppet/modules/site_couchdb/manifests/backup.pp23
-rw-r--r--puppet/modules/site_couchdb/manifests/bigcouch.pp50
-rw-r--r--puppet/modules/site_couchdb/manifests/bigcouch/add_nodes.pp8
-rw-r--r--puppet/modules/site_couchdb/manifests/bigcouch/compaction.pp8
-rw-r--r--puppet/modules/site_couchdb/manifests/bigcouch/settle_cluster.pp11
-rw-r--r--puppet/modules/site_couchdb/manifests/create_dbs.pp102
-rw-r--r--puppet/modules/site_couchdb/manifests/designs.pp46
-rw-r--r--puppet/modules/site_couchdb/manifests/init.pp81
-rw-r--r--puppet/modules/site_couchdb/manifests/logrotate.pp14
-rw-r--r--puppet/modules/site_couchdb/manifests/mirror.pp78
-rw-r--r--puppet/modules/site_couchdb/manifests/plain.pp14
-rw-r--r--puppet/modules/site_couchdb/manifests/setup.pp61
-rw-r--r--puppet/modules/site_couchdb/manifests/upload_design.pp14
-rw-r--r--puppet/modules/site_haproxy/files/haproxy-stats.cfg6
-rw-r--r--puppet/modules/site_haproxy/manifests/init.pp41
-rw-r--r--puppet/modules/site_haproxy/templates/couch.erb32
-rw-r--r--puppet/modules/site_haproxy/templates/haproxy.cfg.erb11
-rw-r--r--puppet/modules/site_mx/manifests/init.pp20
-rw-r--r--puppet/modules/site_nagios/files/configs/Debian/nagios.cfg1302
-rwxr-xr-xpuppet/modules/site_nagios/files/plugins/check_last_regex_in_log85
-rw-r--r--puppet/modules/site_nagios/manifests/add_host_services.pp32
-rw-r--r--puppet/modules/site_nagios/manifests/add_service.pp32
-rw-r--r--puppet/modules/site_nagios/manifests/init.pp13
-rw-r--r--puppet/modules/site_nagios/manifests/plugins.pp16
-rw-r--r--puppet/modules/site_nagios/manifests/server.pp97
-rw-r--r--puppet/modules/site_nagios/manifests/server/add_contacts.pp18
-rw-r--r--puppet/modules/site_nagios/manifests/server/apache.pp25
-rw-r--r--puppet/modules/site_nagios/manifests/server/contactgroup.pp8
-rw-r--r--puppet/modules/site_nagios/manifests/server/hostgroup.pp7
-rw-r--r--puppet/modules/site_nagios/manifests/server/icli.pp26
-rw-r--r--puppet/modules/site_nagios/templates/icli_aliases.erb7
-rw-r--r--puppet/modules/site_nickserver/manifests/init.pp178
-rw-r--r--puppet/modules/site_nickserver/templates/nickserver-proxy.conf.erb19
-rw-r--r--puppet/modules/site_nickserver/templates/nickserver.yml.erb19
-rw-r--r--puppet/modules/site_obfsproxy/README0
-rw-r--r--puppet/modules/site_obfsproxy/manifests/init.pp38
-rw-r--r--puppet/modules/site_openvpn/README20
-rw-r--r--puppet/modules/site_openvpn/manifests/dh_key.pp10
-rw-r--r--puppet/modules/site_openvpn/manifests/init.pp238
-rw-r--r--puppet/modules/site_openvpn/manifests/resolver.pp50
-rw-r--r--puppet/modules/site_openvpn/manifests/server_config.pp228
-rw-r--r--puppet/modules/site_openvpn/templates/add_gateway_ips.sh.erb11
-rw-r--r--puppet/modules/site_postfix/files/checks/received_anon2
-rw-r--r--puppet/modules/site_postfix/manifests/debug.pp9
-rw-r--r--puppet/modules/site_postfix/manifests/mx.pp152
-rw-r--r--puppet/modules/site_postfix/manifests/mx/checks.pp23
-rw-r--r--puppet/modules/site_postfix/manifests/mx/received_anon.pp13
-rw-r--r--puppet/modules/site_postfix/manifests/mx/rewrite_openpgp_header.pp11
-rw-r--r--puppet/modules/site_postfix/manifests/mx/smtp_auth.pp6
-rw-r--r--puppet/modules/site_postfix/manifests/mx/smtp_tls.pp43
-rw-r--r--puppet/modules/site_postfix/manifests/mx/smtpd_checks.pp36
-rw-r--r--puppet/modules/site_postfix/manifests/mx/smtpd_tls.pp69
-rw-r--r--puppet/modules/site_postfix/manifests/mx/static_aliases.pp88
-rw-r--r--puppet/modules/site_postfix/manifests/satellite.pp47
-rw-r--r--puppet/modules/site_postfix/templates/checks/helo_access.erb21
-rw-r--r--puppet/modules/site_postfix/templates/checks/rewrite_openpgp_headers.erb13
-rw-r--r--puppet/modules/site_postfix/templates/virtual-aliases.erb21
-rw-r--r--puppet/modules/site_rsyslog/templates/client.conf.erb134
-rw-r--r--puppet/modules/site_shorewall/files/Debian/shorewall.service23
-rw-r--r--puppet/modules/site_shorewall/manifests/defaults.pp86
-rw-r--r--puppet/modules/site_shorewall/manifests/dnat.pp19
-rw-r--r--puppet/modules/site_shorewall/manifests/dnat_rule.pp50
-rw-r--r--puppet/modules/site_shorewall/manifests/eip.pp92
-rw-r--r--puppet/modules/site_shorewall/manifests/ip_forward.pp10
-rw-r--r--puppet/modules/site_shorewall/manifests/monitor.pp8
-rw-r--r--puppet/modules/site_shorewall/manifests/mx.pp24
-rw-r--r--puppet/modules/site_shorewall/manifests/obfsproxy.pp25
-rw-r--r--puppet/modules/site_shorewall/manifests/service/http.pp13
-rw-r--r--puppet/modules/site_shorewall/manifests/service/https.pp12
-rw-r--r--puppet/modules/site_shorewall/manifests/service/smtp.pp13
-rw-r--r--puppet/modules/site_shorewall/manifests/service/webapp_api.pp23
-rw-r--r--puppet/modules/site_shorewall/manifests/soledad.pp23
-rw-r--r--puppet/modules/site_shorewall/manifests/sshd.pp31
-rw-r--r--puppet/modules/site_shorewall/manifests/stunnel/client.pp40
-rw-r--r--puppet/modules/site_shorewall/manifests/stunnel/server.pp22
-rw-r--r--puppet/modules/site_shorewall/manifests/tor.pp26
-rw-r--r--puppet/modules/site_shorewall/manifests/webapp.pp7
-rw-r--r--puppet/modules/site_squid_deb_proxy/manifests/client.pp5
-rw-r--r--puppet/modules/site_sshd/manifests/authorized_keys.pp34
-rw-r--r--puppet/modules/site_sshd/manifests/deploy_authorized_keys.pp9
-rw-r--r--puppet/modules/site_sshd/manifests/init.pp82
-rw-r--r--puppet/modules/site_sshd/manifests/mosh.pp21
-rw-r--r--puppet/modules/site_sshd/templates/authorized_keys.erb10
-rw-r--r--puppet/modules/site_sshd/templates/ssh_config.erb40
-rw-r--r--puppet/modules/site_sshd/templates/ssh_known_hosts.erb7
-rw-r--r--puppet/modules/site_static/README3
-rw-r--r--puppet/modules/site_static/manifests/domain.pp33
-rw-r--r--puppet/modules/site_static/manifests/init.pp72
-rw-r--r--puppet/modules/site_static/manifests/location.pp36
-rw-r--r--puppet/modules/site_static/templates/amber.erb13
-rw-r--r--puppet/modules/site_static/templates/apache.conf.erb88
-rw-r--r--puppet/modules/site_static/templates/rack.erb19
-rw-r--r--puppet/modules/site_stunnel/manifests/client.pp64
-rw-r--r--puppet/modules/site_stunnel/manifests/clients.pp23
-rw-r--r--puppet/modules/site_stunnel/manifests/init.pp48
-rw-r--r--puppet/modules/site_stunnel/manifests/override_service.pp18
-rw-r--r--puppet/modules/site_stunnel/manifests/servers.pp51
-rw-r--r--puppet/modules/site_tor/manifests/disable_exit.pp7
-rw-r--r--puppet/modules/site_tor/manifests/init.pp45
-rw-r--r--puppet/modules/site_webapp/files/server-status.conf26
-rw-r--r--puppet/modules/site_webapp/manifests/apache.pp28
-rw-r--r--puppet/modules/site_webapp/manifests/common_vhost.pp18
-rw-r--r--puppet/modules/site_webapp/manifests/couchdb.pp52
-rw-r--r--puppet/modules/site_webapp/manifests/cron.pp37
-rw-r--r--puppet/modules/site_webapp/manifests/hidden_service.pp52
-rw-r--r--puppet/modules/site_webapp/manifests/init.pp179
-rw-r--r--puppet/modules/site_webapp/templates/config.yml.erb36
-rw-r--r--puppet/modules/site_webapp/templates/couchdb.admin.yml.erb9
-rw-r--r--puppet/modules/site_webapp/templates/couchdb.yml.erb9
-rw-r--r--puppet/modules/soledad/manifests/client.pp16
-rw-r--r--puppet/modules/soledad/manifests/common.pp8
-rw-r--r--puppet/modules/soledad/manifests/server.pp104
-rw-r--r--puppet/modules/soledad/templates/default-soledad.erb5
-rw-r--r--puppet/modules/soledad/templates/soledad-server.conf.erb12
-rw-r--r--puppet/modules/sshd/.fixtures.yml3
-rw-r--r--puppet/modules/sshd/.gitignore4
-rw-r--r--puppet/modules/sshd/.rspec4
-rw-r--r--puppet/modules/sshd/.travis.yml27
-rw-r--r--puppet/modules/sshd/Gemfile14
-rw-r--r--puppet/modules/sshd/Gemfile.lock116
-rw-r--r--puppet/modules/sshd/LICENSE674
-rw-r--r--puppet/modules/sshd/Modulefile10
-rw-r--r--puppet/modules/sshd/Puppetfile3
-rw-r--r--puppet/modules/sshd/Puppetfile.lock8
-rw-r--r--puppet/modules/sshd/README.md247
-rw-r--r--puppet/modules/sshd/Rakefile16
-rw-r--r--puppet/modules/sshd/files/autossh.init.d164
-rw-r--r--puppet/modules/sshd/lib/facter/ssh_version.rb5
-rw-r--r--puppet/modules/sshd/lib/puppet/parser/functions/ssh_keygen.rb30
-rw-r--r--puppet/modules/sshd/manifests/autossh.pp40
-rw-r--r--puppet/modules/sshd/manifests/base.pp41
-rw-r--r--puppet/modules/sshd/manifests/client.pp22
-rw-r--r--puppet/modules/sshd/manifests/client/base.pp15
-rw-r--r--puppet/modules/sshd/manifests/client/debian.pp5
-rw-r--r--puppet/modules/sshd/manifests/client/linux.pp5
-rw-r--r--puppet/modules/sshd/manifests/debian.pp13
-rw-r--r--puppet/modules/sshd/manifests/gentoo.pp5
-rw-r--r--puppet/modules/sshd/manifests/init.pp92
-rw-r--r--puppet/modules/sshd/manifests/libssh2.pp7
-rw-r--r--puppet/modules/sshd/manifests/libssh2/devel.pp7
-rw-r--r--puppet/modules/sshd/manifests/linux.pp8
-rw-r--r--puppet/modules/sshd/manifests/nagios.pp24
-rw-r--r--puppet/modules/sshd/manifests/openbsd.pp8
-rw-r--r--puppet/modules/sshd/manifests/redhat.pp5
-rw-r--r--puppet/modules/sshd/manifests/ssh_authorized_key.pp85
-rw-r--r--puppet/modules/sshd/manifests/sshkey.pp21
-rw-r--r--puppet/modules/sshd/spec/classes/client_spec.rb42
-rw-r--r--puppet/modules/sshd/spec/classes/init_spec.rb122
-rw-r--r--puppet/modules/sshd/spec/defines/ssh_authorized_key_spec.rb45
-rw-r--r--puppet/modules/sshd/spec/functions/ssh_keygen_spec.rb116
-rw-r--r--puppet/modules/sshd/spec/spec_helper.rb21
-rw-r--r--puppet/modules/sshd/spec/spec_helper_system.rb25
l---------puppet/modules/sshd/templates/sshd_config/CentOS_5.erb1
-rw-r--r--puppet/modules/sshd/templates/sshd_config/CentOS_6.erb172
-rw-r--r--puppet/modules/sshd/templates/sshd_config/CentOS_7.erb186
-rw-r--r--puppet/modules/sshd/templates/sshd_config/Debian_jessie.erb124
-rw-r--r--puppet/modules/sshd/templates/sshd_config/Debian_sid.erb124
-rw-r--r--puppet/modules/sshd/templates/sshd_config/Debian_squeeze.erb127
-rw-r--r--puppet/modules/sshd/templates/sshd_config/Debian_wheezy.erb132
-rw-r--r--puppet/modules/sshd/templates/sshd_config/FreeBSD.erb168
-rw-r--r--puppet/modules/sshd/templates/sshd_config/Gentoo.erb164
-rw-r--r--puppet/modules/sshd/templates/sshd_config/OpenBSD.erb144
-rw-r--r--puppet/modules/sshd/templates/sshd_config/Ubuntu.erb133
-rw-r--r--puppet/modules/sshd/templates/sshd_config/Ubuntu_lucid.erb136
l---------puppet/modules/sshd/templates/sshd_config/Ubuntu_oneiric.erb1
l---------puppet/modules/sshd/templates/sshd_config/Ubuntu_precise.erb1
l---------puppet/modules/sshd/templates/sshd_config/XenServer_xenenterprise.erb1
-rw-r--r--puppet/modules/templatewlv/Modulefile11
-rw-r--r--puppet/modules/templatewlv/README.md21
-rw-r--r--puppet/modules/templatewlv/lib/puppet/parser/functions/templatewlv.rb41
-rw-r--r--puppet/modules/templatewlv/lib/puppet/parser/templatewrapperwlv.rb39
-rw-r--r--puppet/modules/try/README.md13
-rw-r--r--puppet/modules/try/manifests/file.pp114
-rw-r--r--puppet/modules/try/manifests/init.pp3
533 files changed, 23045 insertions, 0 deletions
diff --git a/puppet/bin/apply_on_node.sh b/puppet/bin/apply_on_node.sh
new file mode 100755
index 00000000..09e5b035
--- /dev/null
+++ b/puppet/bin/apply_on_node.sh
@@ -0,0 +1,30 @@
+#!/bin/sh
+
+# Script to use on a node for debugging
+# Usage: ./apply_on_node.sh <puppet parameters>
+#
+# Example: ./apply_on_node.sh --debug --verbose
+
+ROOTDIR='/srv/leap'
+PLATFORM="$ROOTDIR"
+MODULEPATH="$PLATFORM/puppet/modules"
+LOG=/var/log/leap.log
+
+# example tags to use
+#TAGS='--tags=leap_base,leap_service,leap_slow'
+#TAGS='--tags=leap_base,leap_slow'
+#TAGS='--tags=leap_base,leap_service'
+
+#######
+# Setup
+#######
+
+puppet apply -v --confdir $PLATFORM/puppet --libdir $PLATFORM/puppet/lib --modulepath=$MODULEPATH $PLATFORM/puppet/manifests/setup.pp $TAGS $@ |tee $LOG 2>&1
+
+#########
+# site.pp
+#########
+
+puppet apply -v --confdir $PLATFORM/puppet --libdir $PLATFORM/puppet/lib --modulepath=$MODULEPATH $PLATFORM/puppet/manifests/site.pp $TAGS $@ |tee $LOG 2>&1
+
+
diff --git a/puppet/hiera.yaml b/puppet/hiera.yaml
new file mode 100644
index 00000000..93448e23
--- /dev/null
+++ b/puppet/hiera.yaml
@@ -0,0 +1,15 @@
+---
+:backends:
+ - yaml
+ - puppet
+
+:logger: console
+
+:yaml:
+ :datadir: /etc/leap
+
+:hierarchy:
+ - hiera
+
+:puppet:
+ :datasource: data
diff --git a/puppet/lib/puppet/parser/functions/create_resources_hash_from.rb b/puppet/lib/puppet/parser/functions/create_resources_hash_from.rb
new file mode 100644
index 00000000..47d0df9c
--- /dev/null
+++ b/puppet/lib/puppet/parser/functions/create_resources_hash_from.rb
@@ -0,0 +1,116 @@
+#
+# create_resources_hash_from.rb
+#
+# Licensed under the Apache License, Version 2.0 (the "License");
+# you may not use this file except in compliance with the License.
+# You may obtain a copy of the License at
+#
+# http://www.apache.org/licenses/LICENSE-2.0
+#
+# Unless required by applicable law or agreed to in writing, software
+# distributed under the License is distributed on an "AS IS" BASIS,
+# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+# See the License for the specific language governing permissions and
+# limitations under the License.
+#
+
+module Puppet::Parser::Functions
+ newfunction(:create_resources_hash_from, :type => :rvalue, :doc => <<-EOS
+Given:
+ A formatted string (to use as the resource name)
+ An array to loop through (because puppet cannot loop)
+ A hash defining the parameters for a resource
+ And optionally an hash of parameter names to add to the resource and an
+ associated formatted string that should be configured with the current
+ element of the loop array
+
+This function will return a hash of hashes that can be used with the
+create_resources function.
+
+*Examples:*
+ $allowed_hosts = ['10.0.0.0/8', '192.168.0.0/24']
+ $resource_name = "100 allow %s to apache on ports 80"
+ $my_resource_hash = {
+ 'proto' => 'tcp',
+ 'action' => 'accept',
+ 'dport' => 80
+ }
+ $dynamic_parameters = {
+ 'source' => '%s'
+ }
+
+ $created_resource_hash = create_resources_hash_from($resource_name, $allowed_hosts, $my_resource_hash, $dynamic_parameters)
+
+$created_resource_hash would equal:
+ {
+ '100 allow 10.0.0.0/8 to apache on ports 80' => {
+ 'proto' => 'tcp',
+ 'action' => 'accept',
+ 'dport' => 80,
+ 'source' => '10.0.0.0/8'
+ },
+ '100 allow 192.168.0.0/24 to apache on ports 80' => {
+ 'proto' => 'tcp',
+ 'action' => 'accept',
+ 'dport' => 80,
+ 'source' => '192.168.0.0/24'
+ }
+ }
+
+$created_resource_hash could then be used with create_resources
+
+ create_resources(firewall, $created_resource_hash)
+
+To create a bunch of resources in a way that would only otherwise be possible
+with a loop of some description.
+ EOS
+ ) do |arguments|
+
+ raise Puppet::ParseError, "create_resources_hash_from(): Wrong number of arguments " +
+ "given (#{arguments.size} for 3 or 4)" if arguments.size < 3 or arguments.size > 4
+
+ formatted_string = arguments[0]
+
+ unless formatted_string.is_a?(String)
+ raise(Puppet::ParseError, 'create_resources_hash_from(): first argument must be a string')
+ end
+
+ loop_array = arguments[1]
+
+ unless loop_array.is_a?(Array)
+ raise(Puppet::ParseError, 'create_resources_hash_from(): second argument must be an array')
+ end
+
+ resource_hash = arguments[2]
+ unless resource_hash.is_a?(Hash)
+ raise(Puppet::ParseError, 'create_resources_hash_from(): third argument must be a hash')
+ end
+
+ if arguments.size == 4
+ dynamic_parameters = arguments[3]
+ unless dynamic_parameters.is_a?(Hash)
+ raise(Puppet::ParseError, 'create_resources_hash_from(): fourth argument must be a hash')
+ end
+ end
+
+ result = {}
+
+ loop_array.each do |i|
+ my_resource_hash = resource_hash.clone
+ if dynamic_parameters
+ dynamic_parameters.each do |param, value|
+ if my_resource_hash.member?(param)
+ raise(Puppet::ParseError, "create_resources_hash_from(): dynamic_parameter '#{param}' already exists in resource hash")
+ end
+ my_resource_hash[param] = sprintf(value,[i])
+ end
+ end
+ result[sprintf(formatted_string,[i])] = my_resource_hash
+ end
+
+ result
+ end
+end
+
+# vim: set ts=2 sw=2 et :
+# encoding: utf-8
diff --git a/puppet/lib/puppet/parser/functions/sorted_json.rb b/puppet/lib/puppet/parser/functions/sorted_json.rb
new file mode 100644
index 00000000..605da00e
--- /dev/null
+++ b/puppet/lib/puppet/parser/functions/sorted_json.rb
@@ -0,0 +1,47 @@
+#
+# Written by Gavin Mogan, from https://gist.github.com/halkeye/2287885
+# Put in the public domain by the author.
+#
+
+require 'json'
+
+def sorted_json(obj)
+ case obj
+ when String, Fixnum, Float, TrueClass, FalseClass, NilClass
+ return obj.to_json
+ when Array
+ arrayRet = []
+ obj.each do |a|
+ arrayRet.push(sorted_json(a))
+ end
+ return "[" << arrayRet.join(',') << "]";
+ when Hash
+ ret = []
+ obj.keys.sort.each do |k|
+ ret.push(k.to_json << ":" << sorted_json(obj[k]))
+ end
+ return "{" << ret.join(",") << "}";
+ else
+ raise Exception("Unable to handle object of type <%s>" % obj.class.to_s)
+ end
+end
+
+module Puppet::Parser::Functions
+ newfunction(:sorted_json, :type => :rvalue, :doc => <<-EOS
+This function takes data, outputs making sure the hash keys are sorted
+
+*Examples:*
+
+ sorted_json({'key'=>'value'})
+
+Would return: {'key':'value'}
+ EOS
+ ) do |arguments|
+ raise(Puppet::ParseError, "sorted_json(): Wrong number of arguments " +
+ "given (#{arguments.size} for 1)") if arguments.size != 1
+
+ json = arguments[0]
+ return sorted_json(json)
+ end
+end
+
diff --git a/puppet/lib/puppet/parser/functions/sorted_yaml.rb b/puppet/lib/puppet/parser/functions/sorted_yaml.rb
new file mode 100644
index 00000000..46cd46ce
--- /dev/null
+++ b/puppet/lib/puppet/parser/functions/sorted_yaml.rb
@@ -0,0 +1,400 @@
+# encoding: UTF-8
+#
+# provides sorted_yaml() function, using Ya2YAML.
+# see https://github.com/afunai/ya2yaml
+#
+
+class Ya2YAML
+ #
+ # Author:: Akira FUNAI
+ # Copyright:: Copyright (c) 2006-2010 Akira FUNAI
+ # License:: MIT License
+ #
+
+ def initialize(opts = {})
+ options = opts.dup
+ options[:indent_size] = 2 if options[:indent_size].to_i <= 0
+ options[:minimum_block_length] = 0 if options[:minimum_block_length].to_i <= 0
+ options.update(
+ {
+ :printable_with_syck => true,
+ :escape_b_specific => true,
+ :escape_as_utf8 => true,
+ }
+ ) if options[:syck_compatible]
+
+ @options = options
+ end
+
+ def _ya2yaml(obj)
+ #raise 'set $KCODE to "UTF8".' if (RUBY_VERSION < '1.9.0') && ($KCODE != 'UTF8')
+ if (RUBY_VERSION < '1.9.0')
+ $KCODE = 'UTF8'
+ end
+ '--- ' + emit(obj, 1) + "\n"
+ rescue SystemStackError
+ raise ArgumentError, "ya2yaml can't handle circular references"
+ end
+
+ private
+
+ def emit(obj, level)
+ case obj
+ when Array
+ if (obj.length == 0)
+ '[]'
+ else
+ indent = "\n" + s_indent(level - 1)
+ ###
+ ### NOTE: a minor modification to normal Ya2YAML...
+ ### We want arrays to be output in sorted order, not just
+ ### Hashes.
+ ###
+ #obj.collect {|o|
+ # indent + '- ' + emit(o, level + 1)
+ #}.join('')
+ obj.sort {|a,b| a.to_s <=> b.to_s}.collect {|o|
+ indent + '- ' + emit(o, level + 1)
+ }.join('')
+ end
+ when Hash
+ if (obj.length == 0)
+ '{}'
+ else
+ indent = "\n" + s_indent(level - 1)
+ hash_order = @options[:hash_order]
+ if (hash_order && level == 1)
+ hash_keys = obj.keys.sort {|x, y|
+ x_order = hash_order.index(x) ? hash_order.index(x) : Float::MAX
+ y_order = hash_order.index(y) ? hash_order.index(y) : Float::MAX
+ o = (x_order <=> y_order)
+ (o != 0) ? o : (x.to_s <=> y.to_s)
+ }
+ elsif @options[:preserve_order]
+ hash_keys = obj.keys
+ else
+ hash_keys = obj.keys.sort {|x, y| x.to_s <=> y.to_s }
+ end
+ hash_keys.collect {|k|
+ key = emit(k, level + 1)
+ if (
+ is_one_plain_line?(key) ||
+ key =~ /\A(#{REX_BOOL}|#{REX_FLOAT}|#{REX_INT}|#{REX_NULL})\z/x
+ )
+ indent + key + ': ' + emit(obj[k], level + 1)
+ else
+ indent + '? ' + key +
+ indent + ': ' + emit(obj[k], level + 1)
+ end
+ }.join('')
+ end
+ when NilClass
+ '~'
+ when String
+ emit_string(obj, level)
+ when TrueClass, FalseClass
+ obj.to_s
+ when Fixnum, Bignum, Float
+ obj.to_s
+ when Date
+ obj.to_s
+ when Time
+ offset = obj.gmtoff
+ off_hm = sprintf(
+ '%+.2d:%.2d',
+ (offset / 3600.0).to_i,
+ (offset % 3600.0) / 60
+ )
+ u_sec = (obj.usec != 0) ? sprintf(".%.6d", obj.usec) : ''
+ obj.strftime("%Y-%m-%d %H:%M:%S#{u_sec} #{off_hm}")
+ when Symbol
+ '!ruby/symbol ' + emit_string(obj.to_s, level)
+ when Range
+ '!ruby/range ' + obj.to_s
+ when Regexp
+ '!ruby/regexp ' + obj.inspect
+ else
+ case
+ when obj.is_a?(Struct)
+ struct_members = {}
+ obj.each_pair{|k, v| struct_members[k.to_s] = v }
+ '!ruby/struct:' + obj.class.to_s.sub(/^(Struct::(.+)|.*)$/, '\2') + ' ' +
+ emit(struct_members, level + 1)
+ else
+ # serialized as a generic object
+ object_members = {}
+ obj.instance_variables.each{|k, v|
+ object_members[k.to_s.sub(/^@/, '')] = obj.instance_variable_get(k)
+ }
+ '!ruby/object:' + obj.class.to_s + ' ' +
+ emit(object_members, level + 1)
+ end
+ end
+ end
+
+ def emit_string(str, level)
+ (is_string, is_printable, is_one_line, is_one_plain_line) = string_type(str)
+ if is_string
+ if is_printable
+ if is_one_plain_line
+ emit_simple_string(str, level)
+ else
+ (is_one_line || str.length < @options[:minimum_block_length]) ?
+ emit_quoted_string(str, level) :
+ emit_block_string(str, level)
+ end
+ else
+ emit_quoted_string(str, level)
+ end
+ else
+ emit_base64_binary(str, level)
+ end
+ end
+
+ def emit_simple_string(str, level)
+ str
+ end
+
+ def emit_block_string(str, level)
+ str = normalize_line_break(str)
+
+ indent = s_indent(level)
+ indentation_indicator = (str =~ /\A /) ? indent.size.to_s : ''
+ str =~ /(#{REX_NORMAL_LB}*)\z/
+ chomping_indicator = case $1.length
+ when 0
+ '-'
+ when 1
+ ''
+ else
+ '+'
+ end
+
+ str.chomp!
+ str.gsub!(/#{REX_NORMAL_LB}/) {
+ $1 + indent
+ }
+ '|' + indentation_indicator + chomping_indicator + "\n" + indent + str
+ end
+
+ def emit_quoted_string(str, level)
+ str = yaml_escape(normalize_line_break(str))
+ if (str.length < @options[:minimum_block_length])
+ str.gsub!(/#{REX_NORMAL_LB}/) { ESCAPE_SEQ_LB[$1] }
+ else
+ str.gsub!(/#{REX_NORMAL_LB}$/) { ESCAPE_SEQ_LB[$1] }
+ str.gsub!(/(#{REX_NORMAL_LB}+)(.)/) {
+ trail_c = $3
+ $1 + trail_c.sub(/([\t ])/) { ESCAPE_SEQ_WS[$1] }
+ }
+ indent = s_indent(level)
+ str.gsub!(/#{REX_NORMAL_LB}/) {
+ ESCAPE_SEQ_LB[$1] + "\\\n" + indent
+ }
+ end
+ '"' + str + '"'
+ end
+
+ def emit_base64_binary(str, level)
+ indent = "\n" + s_indent(level)
+ base64 = [str].pack('m')
+ '!binary |' + indent + base64.gsub(/\n(?!\z)/, indent)
+ end
+
+ def string_type(str)
+ if str.respond_to?(:encoding) && (!str.valid_encoding? || str.encoding == Encoding::ASCII_8BIT)
+ return false, false, false, false
+ end
+ (ucs_codes = str.unpack('U*')) rescue (
+ # ArgumentError -> binary data
+ return false, false, false, false
+ )
+ if (
+ @options[:printable_with_syck] &&
+ str =~ /\A#{REX_ANY_LB}* | #{REX_ANY_LB}*\z|#{REX_ANY_LB}{2}\z/
+ )
+ # detour Syck bug
+ return true, false, nil, false
+ end
+ ucs_codes.each {|ucs_code|
+ return true, false, nil, false unless is_printable?(ucs_code)
+ }
+ return true, true, is_one_line?(str), is_one_plain_line?(str)
+ end
+
+ def is_printable?(ucs_code)
+ # YAML 1.1 / 4.1.1.
+ (
+ [0x09, 0x0a, 0x0d, 0x85].include?(ucs_code) ||
+ (ucs_code <= 0x7e && ucs_code >= 0x20) ||
+ (ucs_code <= 0xd7ff && ucs_code >= 0xa0) ||
+ (ucs_code <= 0xfffd && ucs_code >= 0xe000) ||
+ (ucs_code <= 0x10ffff && ucs_code >= 0x10000)
+ ) &&
+ !(
+ # treat LS/PS as non-printable characters
+ @options[:escape_b_specific] &&
+ (ucs_code == 0x2028 || ucs_code == 0x2029)
+ )
+ end
+
+ def is_one_line?(str)
+ str !~ /#{REX_ANY_LB}(?!\z)/
+ end
+
+ def is_one_plain_line?(str)
+ # YAML 1.1 / 4.6.11.
+ str !~ /^([\-\?:,\[\]\{\}\#&\*!\|>'"%@`\s]|---|\.\.\.)/ &&
+ str !~ /[:\#\s\[\]\{\},]/ &&
+ str !~ /#{REX_ANY_LB}/ &&
+ str !~ /^(#{REX_BOOL}|#{REX_FLOAT}|#{REX_INT}|#{REX_MERGE}
+ |#{REX_NULL}|#{REX_TIMESTAMP}|#{REX_VALUE})$/x
+ end
+
+ def s_indent(level)
+ # YAML 1.1 / 4.2.2.
+ ' ' * (level * @options[:indent_size])
+ end
+
+ def normalize_line_break(str)
+ # YAML 1.1 / 4.1.4.
+ str.gsub(/(#{REX_CRLF}|#{REX_CR}|#{REX_NEL})/, "\n")
+ end
+
+ def yaml_escape(str)
+ # YAML 1.1 / 4.1.6.
+ str.gsub(/[^a-zA-Z0-9]/u) {|c|
+ ucs_code, = (c.unpack('U') rescue [??])
+ case
+ when ESCAPE_SEQ[c]
+ ESCAPE_SEQ[c]
+ when is_printable?(ucs_code)
+ c
+ when @options[:escape_as_utf8]
+ c.respond_to?(:bytes) ?
+ c.bytes.collect {|b| '\\x%.2x' % b }.join :
+ '\\x' + c.unpack('H2' * c.size).join('\\x')
+ when ucs_code == 0x2028 || ucs_code == 0x2029
+ ESCAPE_SEQ_LB[c]
+ when ucs_code <= 0x7f
+ sprintf('\\x%.2x', ucs_code)
+ when ucs_code <= 0xffff
+ sprintf('\\u%.4x', ucs_code)
+ else
+ sprintf('\\U%.8x', ucs_code)
+ end
+ }
+ end
+
+ module Constants
+ UCS_0X85 = [0x85].pack('U') # c285@UTF8 Unicode next line
+ UCS_0XA0 = [0xa0].pack('U') # c2a0@UTF8 Unicode non-breaking space
+ UCS_0X2028 = [0x2028].pack('U') # e280a8@UTF8 Unicode line separator
+ UCS_0X2029 = [0x2029].pack('U') # e280a9@UTF8 Unicode paragraph separator
+
+ # non-break characters
+ ESCAPE_SEQ = {
+ "\x00" => '\\0',
+ "\x07" => '\\a',
+ "\x08" => '\\b',
+ "\x0b" => '\\v',
+ "\x0c" => '\\f',
+ "\x1b" => '\\e',
+ "\"" => '\\"',
+ "\\" => '\\\\',
+ }
+
+ # non-breaking space
+ ESCAPE_SEQ_NS = {
+ UCS_0XA0 => '\\_',
+ }
+
+ # white spaces
+ ESCAPE_SEQ_WS = {
+ "\x09" => '\\t',
+ " " => '\\x20',
+ }
+
+ # line breaks
+ ESCAPE_SEQ_LB ={
+ "\x0a" => '\\n',
+ "\x0d" => '\\r',
+ UCS_0X85 => '\\N',
+ UCS_0X2028 => '\\L',
+ UCS_0X2029 => '\\P',
+ }
+
+ # regexps for line breaks
+ REX_LF = Regexp.escape("\x0a")
+ REX_CR = Regexp.escape("\x0d")
+ REX_CRLF = Regexp.escape("\x0d\x0a")
+ REX_NEL = Regexp.escape(UCS_0X85)
+ REX_LS = Regexp.escape(UCS_0X2028)
+ REX_PS = Regexp.escape(UCS_0X2029)
+
+ REX_ANY_LB = /(#{REX_LF}|#{REX_CR}|#{REX_NEL}|#{REX_LS}|#{REX_PS})/
+ REX_NORMAL_LB = /(#{REX_LF}|#{REX_LS}|#{REX_PS})/
+
+ # regexps for language-Independent types for YAML1.1
+ REX_BOOL = /
+ y|Y|yes|Yes|YES|n|N|no|No|NO
+ |true|True|TRUE|false|False|FALSE
+ |on|On|ON|off|Off|OFF
+ /x
+ REX_FLOAT = /
+ [-+]?([0-9][0-9_]*)?\.[0-9.]*([eE][-+][0-9]+)? # (base 10)
+ |[-+]?[0-9][0-9_]*(:[0-5]?[0-9])+\.[0-9_]* # (base 60)
+ |[-+]?\.(inf|Inf|INF) # (infinity)
+ |\.(nan|NaN|NAN) # (not a number)
+ /x
+ REX_INT = /
+ [-+]?0b[0-1_]+ # (base 2)
+ |[-+]?0[0-7_]+ # (base 8)
+ |[-+]?(0|[1-9][0-9_]*) # (base 10)
+ |[-+]?0x[0-9a-fA-F_]+ # (base 16)
+ |[-+]?[1-9][0-9_]*(:[0-5]?[0-9])+ # (base 60)
+ /x
+ REX_MERGE = /
+ <<
+ /x
+ REX_NULL = /
+ ~ # (canonical)
+ |null|Null|NULL # (English)
+ | # (Empty)
+ /x
+ REX_TIMESTAMP = /
+ [0-9][0-9][0-9][0-9]-[0-9][0-9]-[0-9][0-9] # (ymd)
+ |[0-9][0-9][0-9][0-9] # (year)
+ -[0-9][0-9]? # (month)
+ -[0-9][0-9]? # (day)
+ ([Tt]|[ \t]+)[0-9][0-9]? # (hour)
+ :[0-9][0-9] # (minute)
+ :[0-9][0-9] # (second)
+ (\.[0-9]*)? # (fraction)
+ (([ \t]*)Z|[-+][0-9][0-9]?(:[0-9][0-9])?)? # (time zone)
+ /x
+ REX_VALUE = /
+ =
+ /x
+ end
+
+ include Constants
+end
+
+module Puppet::Parser::Functions
+ newfunction(:sorted_yaml,
+ :type => :rvalue,
+ :doc => "This function outputs yaml, but ensures the keys are sorted."
+ ) do |arguments|
+
+ if arguments.is_a?(Array)
+ if arguments.size != 1
+ raise(Puppet::ParseError, "sorted_yaml(): Wrong number of arguments given (#{arguments.size} for 1)")
+ end
+ yaml = arguments.first
+ else
+ yaml = arguments
+ end
+ return Ya2YAML.new()._ya2yaml(yaml)
+ end
+end
diff --git a/puppet/manifests/site.pp b/puppet/manifests/site.pp
new file mode 100644
index 00000000..ecda4012
--- /dev/null
+++ b/puppet/manifests/site.pp
@@ -0,0 +1,60 @@
+# set a default exec path
+# the logoutput exec parameter defaults to "on_error" in puppet 3,
+# but to "false" in puppet 2.7, so we need to set this globally here
+Exec {
+ logoutput => on_failure,
+ path => '/usr/bin:/usr/sbin/:/bin:/sbin:/usr/local/bin:/usr/local/sbin'
+}
+
+Package <| provider == 'apt' |> {
+ install_options => ['--no-install-recommends'],
+}
+
+$services = hiera('services', [])
+$services_str = join($services, ', ')
+notice("Services for ${fqdn}: ${services_str}")
+
+# In the default deployment case, we want to run an 'apt-get dist-upgrade'
+# to ensure the latest packages are installed. This is done by including the
+# class 'site_config::slow' here. However, you only changed a small bit of
+# the platform and want to skip this slow part of deployment, you can do that
+# by using 'leap deploy --fast' which will only apply those resources that are
+# tagged with 'leap_base' or 'leap_service'.
+# See https://leap.se/en/docs/platform/details/under-the-hood#tags
+include site_config::slow
+
+if member($services, 'openvpn') {
+ include site_openvpn
+}
+
+if member($services, 'couchdb') {
+ include site_couchdb
+}
+
+if member($services, 'webapp') {
+ include site_webapp
+}
+
+if member($services, 'soledad') {
+ include soledad::server
+}
+
+if member($services, 'monitor') {
+ include site_nagios
+}
+
+if member($services, 'tor') {
+ include site_tor
+}
+
+if member($services, 'mx') {
+ include site_mx
+}
+
+if member($services, 'static') {
+ include site_static
+}
+
+if member($services, 'obfsproxy') {
+ include site_obfsproxy
+}
diff --git a/puppet/modules/apt/.gitignore b/puppet/modules/apt/.gitignore
new file mode 100644
index 00000000..a54aa971
--- /dev/null
+++ b/puppet/modules/apt/.gitignore
@@ -0,0 +1,12 @@
+/pkg/
+/Gemfile.lock
+/vendor/
+/spec/fixtures/manifests/*
+/spec/fixtures/modules/*
+!/spec/fixtures/modules/apt
+!/spec/fixtures/modules/apt/*
+/.vagrant/
+/.bundle/
+/coverage/
+/.idea/
+*.iml
diff --git a/puppet/modules/apt/.gitlab-ci.yml b/puppet/modules/apt/.gitlab-ci.yml
new file mode 100644
index 00000000..f7b8ecad
--- /dev/null
+++ b/puppet/modules/apt/.gitlab-ci.yml
@@ -0,0 +1,12 @@
+before_script:
+ - ruby -v
+ - gem install bundler --no-ri --no-rdoc
+ - bundle install --jobs $(nproc) "${FLAGS[@]}"
+
+# don't fail on lint warnings
+rspec:
+ script:
+ - bundle exec rake lint || /bin/true
+ - bundle exec rake syntax
+ - bundle exec rake validate
+ - bundle exec rake spec
diff --git a/puppet/modules/apt/Gemfile b/puppet/modules/apt/Gemfile
new file mode 100644
index 00000000..8925a904
--- /dev/null
+++ b/puppet/modules/apt/Gemfile
@@ -0,0 +1,13 @@
+source "https://rubygems.org"
+
+group :test do
+ gem "rake"
+ gem "rspec", '< 3.2.0'
+ gem "puppet", ENV['PUPPET_VERSION'] || ENV['GEM_PUPPET_VERSION'] || ENV['PUPPET_GEM_VERSION'] || '~> 3.7.0'
+ gem "facter", ENV['FACTER_VERSION'] || ENV['GEM_FACTER_VERSION'] || ENV['FACTER_GEM_VERSION'] || '~> 2.2.0'
+ gem "rspec-puppet"
+ gem "puppetlabs_spec_helper"
+ gem "metadata-json-lint"
+ gem "rspec-puppet-facts"
+ gem "mocha"
+end
diff --git a/puppet/modules/apt/LICENSE b/puppet/modules/apt/LICENSE
new file mode 100644
index 00000000..94a9ed02
--- /dev/null
+++ b/puppet/modules/apt/LICENSE
@@ -0,0 +1,674 @@
+ GNU GENERAL PUBLIC LICENSE
+ Version 3, 29 June 2007
+
+ Copyright (C) 2007 Free Software Foundation, Inc. <http://fsf.org/>
+ Everyone is permitted to copy and distribute verbatim copies
+ of this license document, but changing it is not allowed.
+
+ Preamble
+
+ The GNU General Public License is a free, copyleft license for
+software and other kinds of works.
+
+ The licenses for most software and other practical works are designed
+to take away your freedom to share and change the works. By contrast,
+the GNU General Public License is intended to guarantee your freedom to
+share and change all versions of a program--to make sure it remains free
+software for all its users. We, the Free Software Foundation, use the
+GNU General Public License for most of our software; it applies also to
+any other work released this way by its authors. You can apply it to
+your programs, too.
+
+ When we speak of free software, we are referring to freedom, not
+price. Our General Public Licenses are designed to make sure that you
+have the freedom to distribute copies of free software (and charge for
+them if you wish), that you receive source code or can get it if you
+want it, that you can change the software or use pieces of it in new
+free programs, and that you know you can do these things.
+
+ To protect your rights, we need to prevent others from denying you
+these rights or asking you to surrender the rights. Therefore, you have
+certain responsibilities if you distribute copies of the software, or if
+you modify it: responsibilities to respect the freedom of others.
+
+ For example, if you distribute copies of such a program, whether
+gratis or for a fee, you must pass on to the recipients the same
+freedoms that you received. You must make sure that they, too, receive
+or can get the source code. And you must show them these terms so they
+know their rights.
+
+ Developers that use the GNU GPL protect your rights with two steps:
+(1) assert copyright on the software, and (2) offer you this License
+giving you legal permission to copy, distribute and/or modify it.
+
+ For the developers' and authors' protection, the GPL clearly explains
+that there is no warranty for this free software. For both users' and
+authors' sake, the GPL requires that modified versions be marked as
+changed, so that their problems will not be attributed erroneously to
+authors of previous versions.
+
+ Some devices are designed to deny users access to install or run
+modified versions of the software inside them, although the manufacturer
+can do so. This is fundamentally incompatible with the aim of
+protecting users' freedom to change the software. The systematic
+pattern of such abuse occurs in the area of products for individuals to
+use, which is precisely where it is most unacceptable. Therefore, we
+have designed this version of the GPL to prohibit the practice for those
+products. If such problems arise substantially in other domains, we
+stand ready to extend this provision to those domains in future versions
+of the GPL, as needed to protect the freedom of users.
+
+ Finally, every program is threatened constantly by software patents.
+States should not allow patents to restrict development and use of
+software on general-purpose computers, but in those that do, we wish to
+avoid the special danger that patents applied to a free program could
+make it effectively proprietary. To prevent this, the GPL assures that
+patents cannot be used to render the program non-free.
+
+ The precise terms and conditions for copying, distribution and
+modification follow.
+
+ TERMS AND CONDITIONS
+
+ 0. Definitions.
+
+ "This License" refers to version 3 of the GNU General Public License.
+
+ "Copyright" also means copyright-like laws that apply to other kinds of
+works, such as semiconductor masks.
+
+ "The Program" refers to any copyrightable work licensed under this
+License. Each licensee is addressed as "you". "Licensees" and
+"recipients" may be individuals or organizations.
+
+ To "modify" a work means to copy from or adapt all or part of the work
+in a fashion requiring copyright permission, other than the making of an
+exact copy. The resulting work is called a "modified version" of the
+earlier work or a work "based on" the earlier work.
+
+ A "covered work" means either the unmodified Program or a work based
+on the Program.
+
+ To "propagate" a work means to do anything with it that, without
+permission, would make you directly or secondarily liable for
+infringement under applicable copyright law, except executing it on a
+computer or modifying a private copy. Propagation includes copying,
+distribution (with or without modification), making available to the
+public, and in some countries other activities as well.
+
+ To "convey" a work means any kind of propagation that enables other
+parties to make or receive copies. Mere interaction with a user through
+a computer network, with no transfer of a copy, is not conveying.
+
+ An interactive user interface displays "Appropriate Legal Notices"
+to the extent that it includes a convenient and prominently visible
+feature that (1) displays an appropriate copyright notice, and (2)
+tells the user that there is no warranty for the work (except to the
+extent that warranties are provided), that licensees may convey the
+work under this License, and how to view a copy of this License. If
+the interface presents a list of user commands or options, such as a
+menu, a prominent item in the list meets this criterion.
+
+ 1. Source Code.
+
+ The "source code" for a work means the preferred form of the work
+for making modifications to it. "Object code" means any non-source
+form of a work.
+
+ A "Standard Interface" means an interface that either is an official
+standard defined by a recognized standards body, or, in the case of
+interfaces specified for a particular programming language, one that
+is widely used among developers working in that language.
+
+ The "System Libraries" of an executable work include anything, other
+than the work as a whole, that (a) is included in the normal form of
+packaging a Major Component, but which is not part of that Major
+Component, and (b) serves only to enable use of the work with that
+Major Component, or to implement a Standard Interface for which an
+implementation is available to the public in source code form. A
+"Major Component", in this context, means a major essential component
+(kernel, window system, and so on) of the specific operating system
+(if any) on which the executable work runs, or a compiler used to
+produce the work, or an object code interpreter used to run it.
+
+ The "Corresponding Source" for a work in object code form means all
+the source code needed to generate, install, and (for an executable
+work) run the object code and to modify the work, including scripts to
+control those activities. However, it does not include the work's
+System Libraries, or general-purpose tools or generally available free
+programs which are used unmodified in performing those activities but
+which are not part of the work. For example, Corresponding Source
+includes interface definition files associated with source files for
+the work, and the source code for shared libraries and dynamically
+linked subprograms that the work is specifically designed to require,
+such as by intimate data communication or control flow between those
+subprograms and other parts of the work.
+
+ The Corresponding Source need not include anything that users
+can regenerate automatically from other parts of the Corresponding
+Source.
+
+ The Corresponding Source for a work in source code form is that
+same work.
+
+ 2. Basic Permissions.
+
+ All rights granted under this License are granted for the term of
+copyright on the Program, and are irrevocable provided the stated
+conditions are met. This License explicitly affirms your unlimited
+permission to run the unmodified Program. The output from running a
+covered work is covered by this License only if the output, given its
+content, constitutes a covered work. This License acknowledges your
+rights of fair use or other equivalent, as provided by copyright law.
+
+ You may make, run and propagate covered works that you do not
+convey, without conditions so long as your license otherwise remains
+in force. You may convey covered works to others for the sole purpose
+of having them make modifications exclusively for you, or provide you
+with facilities for running those works, provided that you comply with
+the terms of this License in conveying all material for which you do
+not control copyright. Those thus making or running the covered works
+for you must do so exclusively on your behalf, under your direction
+and control, on terms that prohibit them from making any copies of
+your copyrighted material outside their relationship with you.
+
+ Conveying under any other circumstances is permitted solely under
+the conditions stated below. Sublicensing is not allowed; section 10
+makes it unnecessary.
+
+ 3. Protecting Users' Legal Rights From Anti-Circumvention Law.
+
+ No covered work shall be deemed part of an effective technological
+measure under any applicable law fulfilling obligations under article
+11 of the WIPO copyright treaty adopted on 20 December 1996, or
+similar laws prohibiting or restricting circumvention of such
+measures.
+
+ When you convey a covered work, you waive any legal power to forbid
+circumvention of technological measures to the extent such circumvention
+is effected by exercising rights under this License with respect to
+the covered work, and you disclaim any intention to limit operation or
+modification of the work as a means of enforcing, against the work's
+users, your or third parties' legal rights to forbid circumvention of
+technological measures.
+
+ 4. Conveying Verbatim Copies.
+
+ You may convey verbatim copies of the Program's source code as you
+receive it, in any medium, provided that you conspicuously and
+appropriately publish on each copy an appropriate copyright notice;
+keep intact all notices stating that this License and any
+non-permissive terms added in accord with section 7 apply to the code;
+keep intact all notices of the absence of any warranty; and give all
+recipients a copy of this License along with the Program.
+
+ You may charge any price or no price for each copy that you convey,
+and you may offer support or warranty protection for a fee.
+
+ 5. Conveying Modified Source Versions.
+
+ You may convey a work based on the Program, or the modifications to
+produce it from the Program, in the form of source code under the
+terms of section 4, provided that you also meet all of these conditions:
+
+ a) The work must carry prominent notices stating that you modified
+ it, and giving a relevant date.
+
+ b) The work must carry prominent notices stating that it is
+ released under this License and any conditions added under section
+ 7. This requirement modifies the requirement in section 4 to
+ "keep intact all notices".
+
+ c) You must license the entire work, as a whole, under this
+ License to anyone who comes into possession of a copy. This
+ License will therefore apply, along with any applicable section 7
+ additional terms, to the whole of the work, and all its parts,
+ regardless of how they are packaged. This License gives no
+ permission to license the work in any other way, but it does not
+ invalidate such permission if you have separately received it.
+
+ d) If the work has interactive user interfaces, each must display
+ Appropriate Legal Notices; however, if the Program has interactive
+ interfaces that do not display Appropriate Legal Notices, your
+ work need not make them do so.
+
+ A compilation of a covered work with other separate and independent
+works, which are not by their nature extensions of the covered work,
+and which are not combined with it such as to form a larger program,
+in or on a volume of a storage or distribution medium, is called an
+"aggregate" if the compilation and its resulting copyright are not
+used to limit the access or legal rights of the compilation's users
+beyond what the individual works permit. Inclusion of a covered work
+in an aggregate does not cause this License to apply to the other
+parts of the aggregate.
+
+ 6. Conveying Non-Source Forms.
+
+ You may convey a covered work in object code form under the terms
+of sections 4 and 5, provided that you also convey the
+machine-readable Corresponding Source under the terms of this License,
+in one of these ways:
+
+ a) Convey the object code in, or embodied in, a physical product
+ (including a physical distribution medium), accompanied by the
+ Corresponding Source fixed on a durable physical medium
+ customarily used for software interchange.
+
+ b) Convey the object code in, or embodied in, a physical product
+ (including a physical distribution medium), accompanied by a
+ written offer, valid for at least three years and valid for as
+ long as you offer spare parts or customer support for that product
+ model, to give anyone who possesses the object code either (1) a
+ copy of the Corresponding Source for all the software in the
+ product that is covered by this License, on a durable physical
+ medium customarily used for software interchange, for a price no
+ more than your reasonable cost of physically performing this
+ conveying of source, or (2) access to copy the
+ Corresponding Source from a network server at no charge.
+
+ c) Convey individual copies of the object code with a copy of the
+ written offer to provide the Corresponding Source. This
+ alternative is allowed only occasionally and noncommercially, and
+ only if you received the object code with such an offer, in accord
+ with subsection 6b.
+
+ d) Convey the object code by offering access from a designated
+ place (gratis or for a charge), and offer equivalent access to the
+ Corresponding Source in the same way through the same place at no
+ further charge. You need not require recipients to copy the
+ Corresponding Source along with the object code. If the place to
+ copy the object code is a network server, the Corresponding Source
+ may be on a different server (operated by you or a third party)
+ that supports equivalent copying facilities, provided you maintain
+ clear directions next to the object code saying where to find the
+ Corresponding Source. Regardless of what server hosts the
+ Corresponding Source, you remain obligated to ensure that it is
+ available for as long as needed to satisfy these requirements.
+
+ e) Convey the object code using peer-to-peer transmission, provided
+ you inform other peers where the object code and Corresponding
+ Source of the work are being offered to the general public at no
+ charge under subsection 6d.
+
+ A separable portion of the object code, whose source code is excluded
+from the Corresponding Source as a System Library, need not be
+included in conveying the object code work.
+
+ A "User Product" is either (1) a "consumer product", which means any
+tangible personal property which is normally used for personal, family,
+or household purposes, or (2) anything designed or sold for incorporation
+into a dwelling. In determining whether a product is a consumer product,
+doubtful cases shall be resolved in favor of coverage. For a particular
+product received by a particular user, "normally used" refers to a
+typical or common use of that class of product, regardless of the status
+of the particular user or of the way in which the particular user
+actually uses, or expects or is expected to use, the product. A product
+is a consumer product regardless of whether the product has substantial
+commercial, industrial or non-consumer uses, unless such uses represent
+the only significant mode of use of the product.
+
+ "Installation Information" for a User Product means any methods,
+procedures, authorization keys, or other information required to install
+and execute modified versions of a covered work in that User Product from
+a modified version of its Corresponding Source. The information must
+suffice to ensure that the continued functioning of the modified object
+code is in no case prevented or interfered with solely because
+modification has been made.
+
+ If you convey an object code work under this section in, or with, or
+specifically for use in, a User Product, and the conveying occurs as
+part of a transaction in which the right of possession and use of the
+User Product is transferred to the recipient in perpetuity or for a
+fixed term (regardless of how the transaction is characterized), the
+Corresponding Source conveyed under this section must be accompanied
+by the Installation Information. But this requirement does not apply
+if neither you nor any third party retains the ability to install
+modified object code on the User Product (for example, the work has
+been installed in ROM).
+
+ The requirement to provide Installation Information does not include a
+requirement to continue to provide support service, warranty, or updates
+for a work that has been modified or installed by the recipient, or for
+the User Product in which it has been modified or installed. Access to a
+network may be denied when the modification itself materially and
+adversely affects the operation of the network or violates the rules and
+protocols for communication across the network.
+
+ Corresponding Source conveyed, and Installation Information provided,
+in accord with this section must be in a format that is publicly
+documented (and with an implementation available to the public in
+source code form), and must require no special password or key for
+unpacking, reading or copying.
+
+ 7. Additional Terms.
+
+ "Additional permissions" are terms that supplement the terms of this
+License by making exceptions from one or more of its conditions.
+Additional permissions that are applicable to the entire Program shall
+be treated as though they were included in this License, to the extent
+that they are valid under applicable law. If additional permissions
+apply only to part of the Program, that part may be used separately
+under those permissions, but the entire Program remains governed by
+this License without regard to the additional permissions.
+
+ When you convey a copy of a covered work, you may at your option
+remove any additional permissions from that copy, or from any part of
+it. (Additional permissions may be written to require their own
+removal in certain cases when you modify the work.) You may place
+additional permissions on material, added by you to a covered work,
+for which you have or can give appropriate copyright permission.
+
+ Notwithstanding any other provision of this License, for material you
+add to a covered work, you may (if authorized by the copyright holders of
+that material) supplement the terms of this License with terms:
+
+ a) Disclaiming warranty or limiting liability differently from the
+ terms of sections 15 and 16 of this License; or
+
+ b) Requiring preservation of specified reasonable legal notices or
+ author attributions in that material or in the Appropriate Legal
+ Notices displayed by works containing it; or
+
+ c) Prohibiting misrepresentation of the origin of that material, or
+ requiring that modified versions of such material be marked in
+ reasonable ways as different from the original version; or
+
+ d) Limiting the use for publicity purposes of names of licensors or
+ authors of the material; or
+
+ e) Declining to grant rights under trademark law for use of some
+ trade names, trademarks, or service marks; or
+
+ f) Requiring indemnification of licensors and authors of that
+ material by anyone who conveys the material (or modified versions of
+ it) with contractual assumptions of liability to the recipient, for
+ any liability that these contractual assumptions directly impose on
+ those licensors and authors.
+
+ All other non-permissive additional terms are considered "further
+restrictions" within the meaning of section 10. If the Program as you
+received it, or any part of it, contains a notice stating that it is
+governed by this License along with a term that is a further
+restriction, you may remove that term. If a license document contains
+a further restriction but permits relicensing or conveying under this
+License, you may add to a covered work material governed by the terms
+of that license document, provided that the further restriction does
+not survive such relicensing or conveying.
+
+ If you add terms to a covered work in accord with this section, you
+must place, in the relevant source files, a statement of the
+additional terms that apply to those files, or a notice indicating
+where to find the applicable terms.
+
+ Additional terms, permissive or non-permissive, may be stated in the
+form of a separately written license, or stated as exceptions;
+the above requirements apply either way.
+
+ 8. Termination.
+
+ You may not propagate or modify a covered work except as expressly
+provided under this License. Any attempt otherwise to propagate or
+modify it is void, and will automatically terminate your rights under
+this License (including any patent licenses granted under the third
+paragraph of section 11).
+
+ However, if you cease all violation of this License, then your
+license from a particular copyright holder is reinstated (a)
+provisionally, unless and until the copyright holder explicitly and
+finally terminates your license, and (b) permanently, if the copyright
+holder fails to notify you of the violation by some reasonable means
+prior to 60 days after the cessation.
+
+ Moreover, your license from a particular copyright holder is
+reinstated permanently if the copyright holder notifies you of the
+violation by some reasonable means, this is the first time you have
+received notice of violation of this License (for any work) from that
+copyright holder, and you cure the violation prior to 30 days after
+your receipt of the notice.
+
+ Termination of your rights under this section does not terminate the
+licenses of parties who have received copies or rights from you under
+this License. If your rights have been terminated and not permanently
+reinstated, you do not qualify to receive new licenses for the same
+material under section 10.
+
+ 9. Acceptance Not Required for Having Copies.
+
+ You are not required to accept this License in order to receive or
+run a copy of the Program. Ancillary propagation of a covered work
+occurring solely as a consequence of using peer-to-peer transmission
+to receive a copy likewise does not require acceptance. However,
+nothing other than this License grants you permission to propagate or
+modify any covered work. These actions infringe copyright if you do
+not accept this License. Therefore, by modifying or propagating a
+covered work, you indicate your acceptance of this License to do so.
+
+ 10. Automatic Licensing of Downstream Recipients.
+
+ Each time you convey a covered work, the recipient automatically
+receives a license from the original licensors, to run, modify and
+propagate that work, subject to this License. You are not responsible
+for enforcing compliance by third parties with this License.
+
+ An "entity transaction" is a transaction transferring control of an
+organization, or substantially all assets of one, or subdividing an
+organization, or merging organizations. If propagation of a covered
+work results from an entity transaction, each party to that
+transaction who receives a copy of the work also receives whatever
+licenses to the work the party's predecessor in interest had or could
+give under the previous paragraph, plus a right to possession of the
+Corresponding Source of the work from the predecessor in interest, if
+the predecessor has it or can get it with reasonable efforts.
+
+ You may not impose any further restrictions on the exercise of the
+rights granted or affirmed under this License. For example, you may
+not impose a license fee, royalty, or other charge for exercise of
+rights granted under this License, and you may not initiate litigation
+(including a cross-claim or counterclaim in a lawsuit) alleging that
+any patent claim is infringed by making, using, selling, offering for
+sale, or importing the Program or any portion of it.
+
+ 11. Patents.
+
+ A "contributor" is a copyright holder who authorizes use under this
+License of the Program or a work on which the Program is based. The
+work thus licensed is called the contributor's "contributor version".
+
+ A contributor's "essential patent claims" are all patent claims
+owned or controlled by the contributor, whether already acquired or
+hereafter acquired, that would be infringed by some manner, permitted
+by this License, of making, using, or selling its contributor version,
+but do not include claims that would be infringed only as a
+consequence of further modification of the contributor version. For
+purposes of this definition, "control" includes the right to grant
+patent sublicenses in a manner consistent with the requirements of
+this License.
+
+ Each contributor grants you a non-exclusive, worldwide, royalty-free
+patent license under the contributor's essential patent claims, to
+make, use, sell, offer for sale, import and otherwise run, modify and
+propagate the contents of its contributor version.
+
+ In the following three paragraphs, a "patent license" is any express
+agreement or commitment, however denominated, not to enforce a patent
+(such as an express permission to practice a patent or covenant not to
+sue for patent infringement). To "grant" such a patent license to a
+party means to make such an agreement or commitment not to enforce a
+patent against the party.
+
+ If you convey a covered work, knowingly relying on a patent license,
+and the Corresponding Source of the work is not available for anyone
+to copy, free of charge and under the terms of this License, through a
+publicly available network server or other readily accessible means,
+then you must either (1) cause the Corresponding Source to be so
+available, or (2) arrange to deprive yourself of the benefit of the
+patent license for this particular work, or (3) arrange, in a manner
+consistent with the requirements of this License, to extend the patent
+license to downstream recipients. "Knowingly relying" means you have
+actual knowledge that, but for the patent license, your conveying the
+covered work in a country, or your recipient's use of the covered work
+in a country, would infringe one or more identifiable patents in that
+country that you have reason to believe are valid.
+
+ If, pursuant to or in connection with a single transaction or
+arrangement, you convey, or propagate by procuring conveyance of, a
+covered work, and grant a patent license to some of the parties
+receiving the covered work authorizing them to use, propagate, modify
+or convey a specific copy of the covered work, then the patent license
+you grant is automatically extended to all recipients of the covered
+work and works based on it.
+
+ A patent license is "discriminatory" if it does not include within
+the scope of its coverage, prohibits the exercise of, or is
+conditioned on the non-exercise of one or more of the rights that are
+specifically granted under this License. You may not convey a covered
+work if you are a party to an arrangement with a third party that is
+in the business of distributing software, under which you make payment
+to the third party based on the extent of your activity of conveying
+the work, and under which the third party grants, to any of the
+parties who would receive the covered work from you, a discriminatory
+patent license (a) in connection with copies of the covered work
+conveyed by you (or copies made from those copies), or (b) primarily
+for and in connection with specific products or compilations that
+contain the covered work, unless you entered into that arrangement,
+or that patent license was granted, prior to 28 March 2007.
+
+ Nothing in this License shall be construed as excluding or limiting
+any implied license or other defenses to infringement that may
+otherwise be available to you under applicable patent law.
+
+ 12. No Surrender of Others' Freedom.
+
+ If conditions are imposed on you (whether by court order, agreement or
+otherwise) that contradict the conditions of this License, they do not
+excuse you from the conditions of this License. If you cannot convey a
+covered work so as to satisfy simultaneously your obligations under this
+License and any other pertinent obligations, then as a consequence you may
+not convey it at all. For example, if you agree to terms that obligate you
+to collect a royalty for further conveying from those to whom you convey
+the Program, the only way you could satisfy both those terms and this
+License would be to refrain entirely from conveying the Program.
+
+ 13. Use with the GNU Affero General Public License.
+
+ Notwithstanding any other provision of this License, you have
+permission to link or combine any covered work with a work licensed
+under version 3 of the GNU Affero General Public License into a single
+combined work, and to convey the resulting work. The terms of this
+License will continue to apply to the part which is the covered work,
+but the special requirements of the GNU Affero General Public License,
+section 13, concerning interaction through a network will apply to the
+combination as such.
+
+ 14. Revised Versions of this License.
+
+ The Free Software Foundation may publish revised and/or new versions of
+the GNU General Public License from time to time. Such new versions will
+be similar in spirit to the present version, but may differ in detail to
+address new problems or concerns.
+
+ Each version is given a distinguishing version number. If the
+Program specifies that a certain numbered version of the GNU General
+Public License "or any later version" applies to it, you have the
+option of following the terms and conditions either of that numbered
+version or of any later version published by the Free Software
+Foundation. If the Program does not specify a version number of the
+GNU General Public License, you may choose any version ever published
+by the Free Software Foundation.
+
+ If the Program specifies that a proxy can decide which future
+versions of the GNU General Public License can be used, that proxy's
+public statement of acceptance of a version permanently authorizes you
+to choose that version for the Program.
+
+ Later license versions may give you additional or different
+permissions. However, no additional obligations are imposed on any
+author or copyright holder as a result of your choosing to follow a
+later version.
+
+ 15. Disclaimer of Warranty.
+
+ THERE IS NO WARRANTY FOR THE PROGRAM, TO THE EXTENT PERMITTED BY
+APPLICABLE LAW. EXCEPT WHEN OTHERWISE STATED IN WRITING THE COPYRIGHT
+HOLDERS AND/OR OTHER PARTIES PROVIDE THE PROGRAM "AS IS" WITHOUT WARRANTY
+OF ANY KIND, EITHER EXPRESSED OR IMPLIED, INCLUDING, BUT NOT LIMITED TO,
+THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR
+PURPOSE. THE ENTIRE RISK AS TO THE QUALITY AND PERFORMANCE OF THE PROGRAM
+IS WITH YOU. SHOULD THE PROGRAM PROVE DEFECTIVE, YOU ASSUME THE COST OF
+ALL NECESSARY SERVICING, REPAIR OR CORRECTION.
+
+ 16. Limitation of Liability.
+
+ IN NO EVENT UNLESS REQUIRED BY APPLICABLE LAW OR AGREED TO IN WRITING
+WILL ANY COPYRIGHT HOLDER, OR ANY OTHER PARTY WHO MODIFIES AND/OR CONVEYS
+THE PROGRAM AS PERMITTED ABOVE, BE LIABLE TO YOU FOR DAMAGES, INCLUDING ANY
+GENERAL, SPECIAL, INCIDENTAL OR CONSEQUENTIAL DAMAGES ARISING OUT OF THE
+USE OR INABILITY TO USE THE PROGRAM (INCLUDING BUT NOT LIMITED TO LOSS OF
+DATA OR DATA BEING RENDERED INACCURATE OR LOSSES SUSTAINED BY YOU OR THIRD
+PARTIES OR A FAILURE OF THE PROGRAM TO OPERATE WITH ANY OTHER PROGRAMS),
+EVEN IF SUCH HOLDER OR OTHER PARTY HAS BEEN ADVISED OF THE POSSIBILITY OF
+SUCH DAMAGES.
+
+ 17. Interpretation of Sections 15 and 16.
+
+ If the disclaimer of warranty and limitation of liability provided
+above cannot be given local legal effect according to their terms,
+reviewing courts shall apply local law that most closely approximates
+an absolute waiver of all civil liability in connection with the
+Program, unless a warranty or assumption of liability accompanies a
+copy of the Program in return for a fee.
+
+ END OF TERMS AND CONDITIONS
+
+ How to Apply These Terms to Your New Programs
+
+ If you develop a new program, and you want it to be of the greatest
+possible use to the public, the best way to achieve this is to make it
+free software which everyone can redistribute and change under these terms.
+
+ To do so, attach the following notices to the program. It is safest
+to attach them to the start of each source file to most effectively
+state the exclusion of warranty; and each file should have at least
+the "copyright" line and a pointer to where the full notice is found.
+
+ <one line to give the program's name and a brief idea of what it does.>
+ Copyright (C) <year> <name of author>
+
+ This program is free software: you can redistribute it and/or modify
+ it under the terms of the GNU General Public License as published by
+ the Free Software Foundation, either version 3 of the License, or
+ (at your option) any later version.
+
+ This program is distributed in the hope that it will be useful,
+ but WITHOUT ANY WARRANTY; without even the implied warranty of
+ MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
+ GNU General Public License for more details.
+
+ You should have received a copy of the GNU General Public License
+ along with this program. If not, see <http://www.gnu.org/licenses/>.
+
+Also add information on how to contact you by electronic and paper mail.
+
+ If the program does terminal interaction, make it output a short
+notice like this when it starts in an interactive mode:
+
+ <program> Copyright (C) <year> <name of author>
+ This program comes with ABSOLUTELY NO WARRANTY; for details type `show w'.
+ This is free software, and you are welcome to redistribute it
+ under certain conditions; type `show c' for details.
+
+The hypothetical commands `show w' and `show c' should show the appropriate
+parts of the General Public License. Of course, your program's commands
+might be different; for a GUI interface, you would use an "about box".
+
+ You should also get your employer (if you work as a programmer) or school,
+if any, to sign a "copyright disclaimer" for the program, if necessary.
+For more information on this, and how to apply and follow the GNU GPL, see
+<http://www.gnu.org/licenses/>.
+
+ The GNU General Public License does not permit incorporating your program
+into proprietary programs. If your program is a subroutine library, you
+may consider it more useful to permit linking proprietary applications with
+the library. If this is what you want to do, use the GNU Lesser General
+Public License instead of this License. But first, please read
+<http://www.gnu.org/philosophy/why-not-lgpl.html>.
diff --git a/puppet/modules/apt/README b/puppet/modules/apt/README
new file mode 100644
index 00000000..00db7d8e
--- /dev/null
+++ b/puppet/modules/apt/README
@@ -0,0 +1,602 @@
+
+Overview
+========
+
+This module manages apt on Debian.
+
+It keeps dpkg's and apt's databases as well as the keyrings for securing
+package download current.
+
+backports.debian.org is added.
+
+/etc/apt/sources.list and /etc/apt/preferences are managed. More
+recent Debian releases are pinned to very low values by default to
+prevent accidental upgrades.
+
+Ubuntu support is lagging behind but not absent either.
+
+! Upgrade Notice !
+
+ * The `disable_update` parameter has been removed. The main apt class
+ defaults to *not* run an `apt-get update` on every run anyway so this
+ parameter seems useless.
+ You can include the `apt::update` class if you want it to be run every time.
+
+ * The `apt::upgrade_package` now doesn't automatically call an Exec['apt_updated']
+ anymore, so you would need to include `apt::update` now by hand.
+
+ * The apt::codename parameter has been removed. In its place, the
+ debian_codename fact may be overridden via an environment variable. This
+ will affect all other debian_* facts, and achieve the same result.
+
+ FACTER_debian_codename=jessie puppet agent -t
+
+ * If you were using custom 50unattended-upgrades.${::lsbdistcodename} in your
+ site_apt, these are no longer supported. You should migrate to passing
+ $blacklisted_packages to the apt::unattended_upgrades class.
+
+ * the apt class has been moved to a paramterized class. if you were including
+ this class before, after passing some variables, you will need to move to
+ instantiating the class with those variables instead. For example, if you
+ had the following in your manifests:
+
+ $apt_debian_url = 'http://localhost:9999/debian/'
+ $apt_use_next_release = true
+ include apt
+
+ you will need to remove the variables, and the include and instead do
+ the following:
+
+ class { 'apt': debian_url => 'http://localhost:9999/debian/', use_next_release => true }
+
+ previously, you could manually set $lsbdistcodename which would enable forced
+ upgrades, but because this is a top-level facter variable, and newer puppet
+ versions do not let you assign variables to other namespaces, this is no
+ longer possible. However, there is a way to obtain this functionality, and
+ that is to pass the 'codename' parameter to the apt class, which will change
+ the sources.list and preferences files to be the codename you set, allowing
+ you to trigger upgrades:
+
+ include apt::dist_upgrade
+ class { 'apt': codename => 'wheezy', notify => Exec['apt_dist-upgrade'] }
+
+ * the apticron class has been moved to a parameterized class. if you were
+ including this class before, you will need to move to instantiating the
+ class instead. For example, if you had the following in your manifests:
+
+ $apticron_email = 'foo@example.com'
+ $apticron_notifynew = '1'
+ ... any $apticron_* variables
+ include apticron
+
+ you will need to remove the variables, and the include and instead do the
+ following:
+
+ class { 'apt::apticron': email => 'foo@example.com', notifynew => '1' }
+
+ * the apt::listchanges class has been moved to a paramterized class. if you
+ were including this class before, after passing some variables, you will need
+ to move to instantiating the class with those variables instead. For example,
+ if you had the following in your manifests:
+
+ $apt_listchanges_email = 'foo@example.com'
+ ... any $apt_listchanges_* variables
+ include apt::listchanges
+
+ you will need to remove the variables, and the include and instead do the
+ following:
+
+ class { 'apt::listchanges': email => 'foo@example.com' }
+
+ * the apt::proxy_client class has been moved to a paramterized class. if you
+ were including this class before, after passing some variables, you will need
+ to move to instantiating the class with those variables instead. For example,
+ if you had the following in your manifests:
+
+ $apt_proxy = 'http://proxy.domain'
+ $apt_proxy_port = 666
+ include apt::proxy_client
+
+ you will need to remove the variables, and the include and instead do the
+ following:
+
+ class { 'apt::proxy_client': proxy => 'http://proxy.domain', port => '666' }
+
+Requirements
+============
+
+This module needs:
+
+- the lsb-release package should be installed on the server prior to running
+ puppet. otherwise, all of the $::lsb* facts will be empty during runs.
+- the common module: https://gitlab.com/shared-puppet-modules-group/common
+
+By default, on normal hosts, this module sets the configuration option
+DSelect::Clean to 'auto'. On virtual servers, the value is set by default to
+'pre-auto', because virtual servers are usually more space-bound and have better
+recovery mechanisms via the host:
+
+From apt.conf(5), 0.7.2:
+ "Cache Clean mode; this value may be one of always, prompt, auto,
+ pre-auto and never. always and prompt will remove all packages
+ from the cache after upgrading, prompt (the default) does so
+ conditionally. auto removes only those packages which are no
+ longer downloadable (replaced with a new version for
+ instance). pre-auto performs this action before downloading new
+ packages."
+
+To change the default setting for DSelect::Clean, you can create a file named
+"03clean" or "03clean_vserver" in your site_apt module's files directory. You
+can also define this for a specific host by creating a file in a subdirectory of
+the site_apt modules' files directory that is named the same as the
+host. (example: site_apt/files/some.host.com/03clean, or
+site_apt/files/some.host.com/03clean_vserver)
+
+Classes
+=======
+
+apt
+---
+
+The apt class sets up most of the documented functionality. To use functionality
+that is not enabled by default, you must set one of the following parameters.
+
+Example usage:
+
+ class { 'apt': use_next_release => true, debian_url => 'http://localhost:9999/debian/' }
+
+Class parameters:
+
+* use_lts
+
+ If this variable is set to true the CODENAME-lts sources (such as
+ squeeze-lts) are added.
+
+ By default this is false for backward compatibility with older
+ versions of this module.
+
+* use_volatile
+
+ If this variable is set to true the CODENAME-updates sources (such as
+ squeeze-updates) are added.
+
+ By default this is false for backward compatibility with older
+ versions of this module.
+
+* include_src
+
+ If this variable is set to true a deb-src source is added for every
+ added binary archive source.
+
+ By default this is false for backward compatibility with older
+ versions of this module.
+
+* use_next_release
+
+ If this variable is set to true the sources for the next Debian
+ release are added. The default pinning configuration pins it to very
+ low values.
+
+ By default this is false for backward compatibility with older
+ versions of this module.
+
+* debian_url, security_url, backports_url, volatile_url
+
+ These variables allow to override the default APT mirrors respectively
+ used for the standard Debian archives, the Debian security archive,
+ the Debian official backports and the Debian Volatile archive.
+
+* ubuntu_url
+
+ These variables allows to override the default APT mirror used for all
+ standard Ubuntu archives (including updates, security, backports).
+
+* repos
+
+ If this variable is set the default repositories list ("main contrib non-free")
+ is overriden.
+
+* disable_update
+
+ Disable "apt-get update" which is normally triggered by apt::upgrade_package
+ and apt::dist_upgrade.
+
+ Note that nodes can be updated once a day by using
+ APT::Periodic::Update-Package-Lists "1";
+ in i.e. /etc/apt/apt.conf.d/80_apt_update_daily.
+
+* custom_preferences
+
+ For historical reasons (Debian Lenny's version of APT did not support the use
+ of the preferences.d directory for putting fragments of 'preferences'), this
+ module will manage a default generic apt/preferences file with more
+ recent releases pinned to very low values so that any package
+ installation will not accidentally pull in packages from those suites
+ unless you explicitly specify the version number. This file will be
+ complemented with all of the preferences_snippet calls (see below).
+
+ If the default preferences template doesn't suit your needs, you can create a
+ template located in your site_apt module, and set custom_preferences with the
+ content (eg. custom_preferences => template('site_apt/preferences') )
+
+ Setting this variable to false before including this class will force the
+ apt/preferences file to be absent:
+
+ class { 'apt': custom_preferences => false }
+
+* custom_sources_list
+
+ By default this module will use a basic apt/sources.list template with
+ a generic Debian mirror. If you need to set more specific sources,
+ e.g. changing the sections included in the source, etc. you can set
+ this variable to the content that you desire to use instead.
+
+ For example, setting this variable will pull in the
+ templates/site_apt/sources.list file:
+
+ class { 'apt': custom_sources_list => template('site_apt/sources.list') }
+
+* custom_key_dir
+
+ If you have different apt-key files that you want to get added to your
+ apt keyring, you can set this variable to a path in your fileserver
+ where individual key files can be placed. If this is set and keys
+ exist there, this module will 'apt-key add' each key.
+
+ The debian-archive-keyring package is installed and kept current up to the
+ latest revision (this includes the backports archive keyring).
+
+apt::apticron
+-------------
+
+When you instantiate this class, apticron will be installed, with the following
+defaults, which you are free to change:
+
+ $ensure_version = 'installed',
+ $config = "apt/${::operatingsystem}/apticron_${::lsbdistcodename}.erb",
+ $email = 'root',
+ $diff_only = '1',
+ $listchanges_profile = 'apticron',
+ $system = false,
+ $ipaddressnum = false,
+ $ipaddresses = false,
+ $notifyholds = '0',
+ $notifynew = '0',
+ $customsubject = ''
+
+Example usage:
+
+ class { 'apt::apticron': email => 'foo@example.com', notifynew => '1' }
+
+apt::cron::download
+-------------------
+
+This class sets up cron-apt so that it downloads upgradable packages, does not
+actually do any upgrade and emails when the output changes.
+
+cron-apt defaults to run at 4 AM. You may want to set the
+$apt_cron_hours variable before you include the class: its value will
+be passed as the "hours" parameter of a cronjob. Example:
+
+ # Run cron-apt every three hours
+ $apt_cron_hours = '*/3'
+
+Note that the default 4 AM cronjob won't be disabled.
+
+apt::cron::dist_upgrade
+-----------------------
+
+This class sets up cron-apt so that it dist-upgrades the system and
+emails when upgrades are performed.
+
+See apt::cron::download above if you need to run cron-apt more often
+than once a day.
+
+apt::dist_upgrade
+-----------------
+
+This class provides the Exec['apt_dist-upgrade'] resource that
+dist-upgrade's the system.
+
+This exec is set as refreshonly so including this class does not
+trigger any action per-se: other resources may notify it, other
+classes may inherit from this one and add to its subscription list
+using the plusignment ('+>') operator. A real-world example can be
+seen in the apt::dist_upgrade::initiator source.
+
+apt::dist_upgrade::initiator
+----------------------------
+
+This class automatically dist-upgrade's the system when an initiator
+file's content changes. The initiator file is copied from the first
+available source amongst the following ones, in decreasing priority
+order:
+
+- puppet:///modules/site_apt/${::fqdn}/upgrade_initiator
+- puppet:///modules/site_apt/upgrade_initiator
+- puppet:///modules/apt/upgrade_initiator
+
+This is useful when one does not want to setup a fully automated
+upgrade process but still needs a way to manually trigger full
+upgrades of any number of systems at scheduled times.
+
+Beware: a dist-upgrade is triggered the first time Puppet runs after
+this class has been included. This is actually the single reason why
+this class is not enabled by default.
+
+When this class is included the APT indexes are updated on every
+Puppet run due to the author's lack of Puppet wizardry.
+
+apt::dselect
+------------
+
+This class, when included, installs dselect and switches it to expert mode to
+suppress superfluous help screens.
+
+apt::listchanges
+----------------
+
+This class, when instantiated, installs apt-listchanges and configures it using
+the following parameterized variables, which can be changed:
+
+ version = 'present'
+ config = "apt/${::operatingsystem}/listchanges_${::lsbrelease}.erb"
+ frontend = 'pager'
+ email = 'root'
+ confirm = 0
+ saveseen = '/var/lib/apt/listchanges.db'
+ which = 'both'
+
+ Example usage:
+ class { 'apt::listchanges': email => 'foo@example.com' }
+
+apt::proxy_client
+-----------------
+
+This class adds the right configuration to apt to make it fetch packages via a
+proxy. The class parameters apt_proxy and apt_proxy_port need to be set:
+
+You can set the 'proxy' class parameter variable to the URL of the proxy that
+will be used. By default, the proxy will be queried on port 3142, but you can
+change the port number by setting the 'port' class parameter.
+
+Example:
+
+ class { 'apt::proxy_client': proxy => 'http://proxy.domain', port => '666' }
+
+apt::reboot_required_notify
+---------------------------
+
+This class installs a daily cronjob that checks if a package upgrade
+requires the system to be rebooted; if so, cron sends a notification
+email to root.
+
+apt::unattended_upgrades
+------------------------
+
+If this class is included, it will install the package 'unattended-upgrades'
+and configure it to daily upgrade the system.
+
+The class has the following parameters that you can use to change the contents
+of the configuration file. The values shown here are the default values:
+
+ * $config_content = undef
+ * $config_template = 'apt/50unattended-upgrades.erb'
+ * $mailonlyonerror = true
+ * $mail_recipient = 'root'
+ * $blacklisted_packages = []
+
+Note that using $config_content actually specifies all of the configuration
+contents and thus makes the other parameters useless.
+
+example:
+
+ class { 'apt::unattended_upgrades':
+ config_template => 'site_apt/50unattended-upgrades.jessie',
+ blacklisted_packages => [
+ 'libc6', 'libc6-dev', 'libc6-i686', 'mysql-server', 'redmine', 'nodejs',
+ 'bird'
+ ],
+ }
+
+Defines
+=======
+
+apt::apt_conf
+-------------
+
+Creates a file in the apt/apt.conf.d directory to easily add configuration
+components. One can use either the 'source' meta-parameter to specify a list of
+static files to include from the puppet fileserver or the 'content'
+meta-parameter to define content inline or with the help of a template.
+
+Example:
+
+ apt::apt_conf { '80download-only':
+ source => 'puppet:///modules/site_apt/80download-only',
+ }
+
+apt::preferences_snippet
+------------------------
+
+A way to add pinning information to files in /etc/apt/preferences.d/
+
+Example:
+
+ apt::preferences_snippet {
+ 'irssi-plugin-otr':
+ release => 'squeeze-backports',
+ priority => 999;
+ }
+
+ apt::preferences_snippet {
+ 'unstable_fallback':
+ package => '*',
+ release => 'unstable',
+ priority => 1;
+ }
+
+ apt::preferences_snippet {
+ 'ttdnsd':
+ pin => 'origin deb.torproject.org',
+ priority => 999;
+ }
+
+The names of the resources will be used as the names of the files in the
+preferences.d directory, so you should ensure that resource names follow the
+prescribed naming scheme.
+
+From apt_preferences(5):
+ Note that the files in the /etc/apt/preferences.d directory are parsed in
+ alphanumeric ascending order and need to obey the following naming
+ convention: The files have no or "pref" as filename extension and which
+ only contain alphanumeric, hyphen (-), underscore (_) and period (.)
+ characters - otherwise they will be silently ignored.
+
+apt::preseeded_package
+----------------------
+
+This simplifies installation of packages for which you wish to preseed the
+answers to debconf. For example, if you wish to provide a preseed file for the
+locales package, you would place the locales.seed file in
+'site_apt/templates/${::lsbdistcodename}/locales.seeds' and then include the
+following in your manifest:
+
+ apt::preseeded_package { locales: }
+
+You can also specify the content of the seed via the content parameter,
+for example:
+
+ apt::preseeded_package { 'apticron':
+ content => 'apticron apticron/notification string root@example.com',
+ }
+
+apt::sources_list
+-----------------
+
+Creates a file in the apt/sources.list.d directory to easily add additional apt
+sources. One can use either the 'source' meta-parameter to specify a list of
+static files to include from the puppet fileserver or the 'content'
+meta-parameter to define content inline or with the help of a template. Ending
+the resource name in '.list' is optional: it will be automatically added to the
+file name if not present in the resource name.
+
+Example:
+
+ apt::sources_list { 'company_internals':
+ source => [ "puppet:///modules/site_apt/${::fqdn}/company_internals.list",
+ 'puppet:///modules/site_apt/company_internals.list' ],
+ }
+
+apt::key
+--------
+
+Deploys a secure apt OpenPGP key. This usually accompanies the
+sources.list snippets above for third party repositories. For example,
+you would do:
+
+ apt::key { 'neurodebian.gpg':
+ ensure => present,
+ source => 'puppet:///modules/site_apt/neurodebian.gpg',
+ }
+
+This deploys the key in the `/etc/apt/trusted.gpg.d` directory, which
+is assumed by secure apt to be binary OpenPGP keys and *not*
+"ascii-armored" or "plain text" OpenPGP key material. For the latter,
+use `apt::key::plain`.
+
+The `.gpg` extension is compulsory for `apt` to pickup the key properly.
+
+apt::key::plain
+---------------
+
+Deploys a secure apt OpenPGP key. This usually accompanies the
+sources.list snippets above for third party repositories. For example,
+you would do:
+
+ apt::key::plain { 'neurodebian.asc':
+ source => 'puppet:///modules/site_apt/neurodebian.asc',
+ }
+
+This deploys the key in the `${apt_base_dir}/keys` directory (as
+opposed to `$custom_key_dir` which deploys it in `keys.d`). The reason
+this exists on top of `$custom_key_dir` is to allow a more
+decentralised distribution of those keys, without having all modules
+throw their keys in the same directory in the manifests.
+
+Note that this model does *not* currently allow keys to be removed!
+Use `apt::key` instead for a more practical, revokable approach, but
+that needs binary keys.
+
+apt::upgrade_package
+--------------------
+
+This simplifies upgrades for DSA security announcements or point-releases. This
+will ensure that the named package is upgraded to the version specified, only if
+the package is installed, otherwise nothing happens. If the specified version
+is 'latest' (the default), then the package is ensured to be upgraded to the
+latest package revision when it becomes available.
+
+For example, the following upgrades the perl package to version 5.8.8-7etch1
+(if it is installed), it also upgrades the syslog-ng and perl-modules packages
+to their latest (also, only if they are installed):
+
+upgrade_package { 'perl':
+ version => '5.8.8-7etch1';
+ 'syslog-ng':
+ version => latest;
+ 'perl-modules':
+}
+
+Resources
+=========
+
+File['apt_config']
+------------------
+
+Use this resource to depend on or add to a completed apt configuration
+
+Exec['apt_updated']
+-------------------
+
+After this point the APT indexes are up-to-date.
+This resource is set to `refreshonly => true` so it is not run on
+every puppetrun. To run this every time, you can include the `apt::update`
+class.
+
+This resource is usually used like this to ensure current packages are
+installed by Package resources:
+
+ include apt::update
+ Package { require => Exec['apt_updated'] }
+
+Note that nodes can be updated once a day by using
+
+ APT::Periodic::Update-Package-Lists "1";
+
+in i.e. /etc/apt/apt.conf.d/80_apt_update_daily.
+
+
+Tests
+=====
+
+To run pupept rspec tests:
+
+ bundle install --path vendor/bundle
+ bundle exec rake spec
+
+Using different facter/puppet versions:
+
+ FACTER_GEM_VERSION=1.6.10 PUPPET_GEM_VERSION=2.7.23 bundle install --path vendor/bundle
+ bundle exec rake spec
+
+Licensing
+=========
+
+This puppet module is licensed under the GPL version 3 or later. Redistribution
+and modification is encouraged.
+
+The GPL version 3 license text can be found in the "LICENSE" file accompanying
+this puppet module, or at the following URL:
+
+http://www.gnu.org/licenses/gpl-3.0.html
diff --git a/puppet/modules/apt/Rakefile b/puppet/modules/apt/Rakefile
new file mode 100644
index 00000000..85326bb4
--- /dev/null
+++ b/puppet/modules/apt/Rakefile
@@ -0,0 +1,19 @@
+require 'puppetlabs_spec_helper/rake_tasks'
+require 'puppet-lint/tasks/puppet-lint'
+PuppetLint.configuration.send('disable_80chars')
+PuppetLint.configuration.ignore_paths = ["spec/**/*.pp", "pkg/**/*.pp"]
+
+desc "Validate manifests, templates, and ruby files"
+task :validate do
+ Dir['manifests/**/*.pp'].each do |manifest|
+ sh "puppet parser validate --noop #{manifest}"
+ end
+ Dir['spec/**/*.rb','lib/**/*.rb'].each do |ruby_file|
+ sh "ruby -c #{ruby_file}" unless ruby_file =~ /spec\/fixtures/
+ end
+ Dir['templates/**/*.erb'].each do |template|
+ sh "erb -P -x -T '-' #{template} | ruby -c"
+ end
+end
+
+task :test => [:lint, :syntax , :validate, :spec]
diff --git a/puppet/modules/apt/files/02show_upgraded b/puppet/modules/apt/files/02show_upgraded
new file mode 100644
index 00000000..bb127d41
--- /dev/null
+++ b/puppet/modules/apt/files/02show_upgraded
@@ -0,0 +1,4 @@
+// This file is managed by Puppet
+// all local modifications will be overwritten
+
+APT::Get::Show-Upgraded true;
diff --git a/puppet/modules/apt/files/03clean b/puppet/modules/apt/files/03clean
new file mode 100644
index 00000000..3d20924a
--- /dev/null
+++ b/puppet/modules/apt/files/03clean
@@ -0,0 +1,4 @@
+// This file is managed by Puppet
+// all local modifications will be overwritten
+
+DSelect::Clean auto;
diff --git a/puppet/modules/apt/files/03clean_vserver b/puppet/modules/apt/files/03clean_vserver
new file mode 100644
index 00000000..6bb84e58
--- /dev/null
+++ b/puppet/modules/apt/files/03clean_vserver
@@ -0,0 +1,4 @@
+// This file is managed by Puppet
+// all local modifications will be overwritten
+
+DSelect::Clean pre-auto;
diff --git a/puppet/modules/apt/files/upgrade_initiator b/puppet/modules/apt/files/upgrade_initiator
new file mode 100644
index 00000000..8b137891
--- /dev/null
+++ b/puppet/modules/apt/files/upgrade_initiator
@@ -0,0 +1 @@
+
diff --git a/puppet/modules/apt/lib/facter/apt_running.rb b/puppet/modules/apt/lib/facter/apt_running.rb
new file mode 100644
index 00000000..e8f2156e
--- /dev/null
+++ b/puppet/modules/apt/lib/facter/apt_running.rb
@@ -0,0 +1,7 @@
+Facter.add("apt_running") do
+ setcode do
+ #Facter::Util::Resolution.exec('/usr/bin/dpkg -s mysql-server >/dev/null 2>&1 && echo true || echo false')
+ Facter::Util::Resolution.exec('pgrep apt-get >/dev/null 2>&1 && echo true || echo false')
+ end
+end
+
diff --git a/puppet/modules/apt/lib/facter/debian_codename.rb b/puppet/modules/apt/lib/facter/debian_codename.rb
new file mode 100644
index 00000000..254877aa
--- /dev/null
+++ b/puppet/modules/apt/lib/facter/debian_codename.rb
@@ -0,0 +1,42 @@
+begin
+ require 'facter/util/debian'
+rescue LoadError
+ require "#{File.dirname(__FILE__)}/util/debian"
+end
+
+def version_to_codename(version)
+ if Facter::Util::Debian::CODENAMES.has_key?(version)
+ return Facter::Util::Debian::CODENAMES[version]
+ else
+ Facter.warn("Could not determine codename from version '#{version}'")
+ end
+end
+
+Facter.add(:debian_codename) do
+ has_weight 99
+ confine :operatingsystem => 'Debian'
+ setcode do
+ Facter.value('lsbdistcodename')
+ end
+end
+
+Facter.add(:debian_codename) do
+ has_weight 66
+ confine :operatingsystem => 'Debian'
+ setcode do
+ version_to_codename(Facter.value('operatingsystemmajrelease'))
+ end
+end
+
+Facter.add(:debian_codename) do
+ has_weight 33
+ confine :operatingsystem => 'Debian'
+ setcode do
+ debian_version = File.open('/etc/debian_version', &:readline)
+ if debian_version.match(/^\d+/)
+ version_to_codename(debian_version.scan(/^(\d+)/)[0][0])
+ elsif debian_version.match(/^[a-z]+\/(sid|unstable)/)
+ debian_version.scan(/^([a-z]+)\//)[0][0]
+ end
+ end
+end
diff --git a/puppet/modules/apt/lib/facter/debian_lts.rb b/puppet/modules/apt/lib/facter/debian_lts.rb
new file mode 100644
index 00000000..f53a9eb8
--- /dev/null
+++ b/puppet/modules/apt/lib/facter/debian_lts.rb
@@ -0,0 +1,16 @@
+begin
+ require 'facter/util/debian'
+rescue LoadError
+ require "#{File.dirname(__FILE__)}/util/debian"
+end
+
+Facter.add(:debian_lts) do
+ confine :operatingsystem => 'Debian'
+ setcode do
+ if Facter::Util::Debian::LTS.include? Facter.value('debian_codename')
+ true
+ else
+ false
+ end
+ end
+end
diff --git a/puppet/modules/apt/lib/facter/debian_nextcodename.rb b/puppet/modules/apt/lib/facter/debian_nextcodename.rb
new file mode 100644
index 00000000..c4c569b2
--- /dev/null
+++ b/puppet/modules/apt/lib/facter/debian_nextcodename.rb
@@ -0,0 +1,23 @@
+begin
+ require 'facter/util/debian'
+rescue LoadError
+ require "#{File.dirname(__FILE__)}/util/debian"
+end
+
+def debian_codename_to_next(codename)
+ if codename == "sid"
+ return "experimental"
+ else
+ codenames = Facter::Util::Debian::CODENAMES
+ versions = Facter::Util::Debian::CODENAMES.invert
+ current_version = versions[codename]
+ return codenames[(current_version.to_i + 1).to_s]
+ end
+end
+
+Facter.add(:debian_nextcodename) do
+ confine :operatingsystem => 'Debian'
+ setcode do
+ debian_codename_to_next(Facter.value('debian_codename'))
+ end
+end
diff --git a/puppet/modules/apt/lib/facter/debian_nextrelease.rb b/puppet/modules/apt/lib/facter/debian_nextrelease.rb
new file mode 100644
index 00000000..2a9c4f5f
--- /dev/null
+++ b/puppet/modules/apt/lib/facter/debian_nextrelease.rb
@@ -0,0 +1,23 @@
+def debian_release_to_next(release)
+ releases = [
+ 'oldoldoldstable',
+ 'oldoldstable',
+ 'oldstable',
+ 'stable',
+ 'testing',
+ 'unstable',
+ 'experimental',
+ ]
+ if releases.include? release
+ if releases.index(release)+1 < releases.count
+ return releases[releases.index(release)+1]
+ end
+ end
+end
+
+Facter.add(:debian_nextrelease) do
+ confine :operatingsystem => 'Debian'
+ setcode do
+ debian_release_to_next(Facter.value('debian_release'))
+ end
+end
diff --git a/puppet/modules/apt/lib/facter/debian_release.rb b/puppet/modules/apt/lib/facter/debian_release.rb
new file mode 100644
index 00000000..2c334ccd
--- /dev/null
+++ b/puppet/modules/apt/lib/facter/debian_release.rb
@@ -0,0 +1,38 @@
+begin
+ require 'facter/util/debian'
+rescue LoadError
+ require "#{File.dirname(__FILE__)}/util/debian"
+end
+
+def debian_codename_to_release(codename)
+ stable = Facter::Util::Debian::STABLE
+ versions = Facter::Util::Debian::CODENAMES.invert
+ release = nil
+ if codename == "sid"
+ release = "unstable"
+ elsif versions.has_key? codename
+ version = versions[codename].to_i
+ if version == stable
+ release = "stable"
+ elsif version < stable
+ release = "stable"
+ for i in version..stable - 1
+ release = "old" + release
+ end
+ elsif version == stable + 1
+ release = "testing"
+ end
+ end
+ if release.nil?
+ Facter.warn("Could not determine release from codename #{codename}!")
+ end
+ return release
+end
+
+Facter.add(:debian_release) do
+ has_weight 99
+ confine :operatingsystem => 'Debian'
+ setcode do
+ debian_codename_to_release(Facter.value('debian_codename'))
+ end
+end
diff --git a/puppet/modules/apt/lib/facter/ubuntu_codename.rb b/puppet/modules/apt/lib/facter/ubuntu_codename.rb
new file mode 100644
index 00000000..814fd942
--- /dev/null
+++ b/puppet/modules/apt/lib/facter/ubuntu_codename.rb
@@ -0,0 +1,8 @@
+Facter.add(:ubuntu_codename) do
+ confine :operatingsystem => 'Ubuntu'
+ setcode do
+ Facter.value('lsbdistcodename')
+ end
+end
+
+
diff --git a/puppet/modules/apt/lib/facter/ubuntu_nextcodename.rb b/puppet/modules/apt/lib/facter/ubuntu_nextcodename.rb
new file mode 100644
index 00000000..dcd1d426
--- /dev/null
+++ b/puppet/modules/apt/lib/facter/ubuntu_nextcodename.rb
@@ -0,0 +1,20 @@
+begin
+ require 'facter/util/ubuntu'
+rescue LoadError
+ require "#{File.dirname(__FILE__)}/util/ubuntu"
+end
+
+def ubuntu_codename_to_next(codename)
+ codenames = Facter::Util::Ubuntu::CODENAMES
+ i = codenames.index(codename)
+ if i and i+1 < codenames.count
+ return codenames[i+1]
+ end
+end
+
+Facter.add(:ubuntu_nextcodename) do
+ confine :operatingsystem => 'Ubuntu'
+ setcode do
+ ubuntu_codename_to_next(Facter.value('ubuntu_codename'))
+ end
+end
diff --git a/puppet/modules/apt/lib/facter/util/debian.rb b/puppet/modules/apt/lib/facter/util/debian.rb
new file mode 100644
index 00000000..290c17b5
--- /dev/null
+++ b/puppet/modules/apt/lib/facter/util/debian.rb
@@ -0,0 +1,18 @@
+module Facter
+ module Util
+ module Debian
+ STABLE = 8
+ CODENAMES = {
+ "5" => "lenny",
+ "6" => "squeeze",
+ "7" => "wheezy",
+ "8" => "jessie",
+ "9" => "stretch",
+ "10" => "buster",
+ }
+ LTS = [
+ "squeeze",
+ ]
+ end
+ end
+end
diff --git a/puppet/modules/apt/lib/facter/util/ubuntu.rb b/puppet/modules/apt/lib/facter/util/ubuntu.rb
new file mode 100644
index 00000000..52c15e80
--- /dev/null
+++ b/puppet/modules/apt/lib/facter/util/ubuntu.rb
@@ -0,0 +1,21 @@
+module Facter
+ module Util
+ module Ubuntu
+ CODENAMES = [
+ "lucid",
+ "maverick",
+ "natty",
+ "oneiric",
+ "precise",
+ "quantal",
+ "raring",
+ "saucy",
+ "trusty",
+ "utopic",
+ "vivid",
+ "wily",
+ "xenial"
+ ]
+ end
+ end
+end
diff --git a/puppet/modules/apt/manifests/apt_conf.pp b/puppet/modules/apt/manifests/apt_conf.pp
new file mode 100644
index 00000000..949f6157
--- /dev/null
+++ b/puppet/modules/apt/manifests/apt_conf.pp
@@ -0,0 +1,45 @@
+define apt::apt_conf(
+ $ensure = 'present',
+ $source = '',
+ $content = undef,
+ $refresh_apt = true )
+{
+
+ if $source == '' and $content == undef {
+ fail("One of \$source or \$content must be specified for apt_conf ${name}")
+ }
+
+ if $source != '' and $content != undef {
+ fail("Only one of \$source or \$content must specified for apt_conf ${name}")
+ }
+
+ include apt::dot_d_directories
+
+ # One would expect the 'file' resource on sources.list.d to trigger an
+ # apt-get update when files are added or modified in the directory, but it
+ # apparently doesn't.
+ file { "/etc/apt/apt.conf.d/${name}":
+ ensure => $ensure,
+ owner => root,
+ group => 0,
+ mode => '0644',
+ }
+
+ if $source {
+ File["/etc/apt/apt.conf.d/${name}"] {
+ source => $source,
+ }
+ }
+ else {
+ File["/etc/apt/apt.conf.d/${name}"] {
+ content => $content,
+ }
+ }
+
+ if $refresh_apt {
+ File["/etc/apt/apt.conf.d/${name}"] {
+ notify => Exec['apt_updated'],
+ }
+ }
+
+}
diff --git a/puppet/modules/apt/manifests/apticron.pp b/puppet/modules/apt/manifests/apticron.pp
new file mode 100644
index 00000000..9c94f9c9
--- /dev/null
+++ b/puppet/modules/apt/manifests/apticron.pp
@@ -0,0 +1,24 @@
+class apt::apticron(
+ $ensure_version = 'installed',
+ $config = "apt/${::operatingsystem}/apticron_${::debian_codename}.erb",
+ $email = 'root',
+ $diff_only = '1',
+ $listchanges_profile = 'apticron',
+ $system = false,
+ $ipaddressnum = false,
+ $ipaddresses = false,
+ $notifyholds = '0',
+ $notifynew = '0',
+ $customsubject = ''
+) {
+
+ package { 'apticron': ensure => $ensure_version }
+
+ file { '/etc/apticron/apticron.conf':
+ content => template($apt::apticron::config),
+ owner => root,
+ group => root,
+ mode => '0644',
+ require => Package['apticron'];
+ }
+}
diff --git a/puppet/modules/apt/manifests/cron/base.pp b/puppet/modules/apt/manifests/cron/base.pp
new file mode 100644
index 00000000..39fc3061
--- /dev/null
+++ b/puppet/modules/apt/manifests/cron/base.pp
@@ -0,0 +1,20 @@
+class apt::cron::base {
+
+ package { 'cron-apt': ensure => installed }
+
+ case $apt_cron_hours {
+ '': {}
+ default: {
+ # cron-apt defaults to run every night at 4 o'clock
+ # so we try not to run at the same time.
+ cron { 'apt_cron_every_N_hours':
+ command => 'test -x /usr/sbin/cron-apt && /usr/sbin/cron-apt',
+ user => root,
+ hour => "${apt_cron_hours}",
+ minute => 10,
+ require => Package['cron-apt'],
+ }
+ }
+ }
+
+}
diff --git a/puppet/modules/apt/manifests/cron/dist_upgrade.pp b/puppet/modules/apt/manifests/cron/dist_upgrade.pp
new file mode 100644
index 00000000..74403bb7
--- /dev/null
+++ b/puppet/modules/apt/manifests/cron/dist_upgrade.pp
@@ -0,0 +1,29 @@
+class apt::cron::dist_upgrade inherits apt::cron::base {
+
+ $action = "autoclean -y
+dist-upgrade -y -o APT::Get::Show-Upgraded=true -o 'DPkg::Options::=--force-confold'
+"
+
+ file { '/etc/cron-apt/action.d/3-download':
+ ensure => absent,
+ }
+
+ package { 'apt-listbugs': ensure => absent }
+
+ file { '/etc/cron-apt/action.d/4-dist-upgrade':
+ content => $action,
+ owner => root,
+ group => 0,
+ mode => '0644',
+ require => Package[cron-apt];
+ }
+
+ file { '/etc/cron-apt/config.d/MAILON':
+ content => "MAILON=upgrade\n",
+ owner => root,
+ group => 0,
+ mode => '0644',
+ require => Package[cron-apt];
+ }
+
+}
diff --git a/puppet/modules/apt/manifests/cron/download.pp b/puppet/modules/apt/manifests/cron/download.pp
new file mode 100644
index 00000000..4a19fec1
--- /dev/null
+++ b/puppet/modules/apt/manifests/cron/download.pp
@@ -0,0 +1,27 @@
+class apt::cron::download inherits apt::cron::base {
+
+ $action = "autoclean -y
+dist-upgrade -d -y -o APT::Get::Show-Upgraded=true
+"
+
+ file { '/etc/cron-apt/action.d/4-dist-upgrade':
+ ensure => absent,
+ }
+
+ file { '/etc/cron-apt/action.d/3-download':
+ content => $action,
+ require => Package[cron-apt],
+ owner => root,
+ group => 0,
+ mode => '0644';
+ }
+
+ file { '/etc/cron-apt/config.d/MAILON':
+ content => "MAILON=changes\n",
+ require => Package[cron-apt],
+ owner => root,
+ group => 0,
+ mode => '0644';
+ }
+
+}
diff --git a/puppet/modules/apt/manifests/dist_upgrade.pp b/puppet/modules/apt/manifests/dist_upgrade.pp
new file mode 100644
index 00000000..19c031e0
--- /dev/null
+++ b/puppet/modules/apt/manifests/dist_upgrade.pp
@@ -0,0 +1,9 @@
+class apt::dist_upgrade {
+
+ exec { 'apt_dist-upgrade':
+ command => '/usr/bin/apt-get -q -y -o \'DPkg::Options::=--force-confold\' dist-upgrade',
+ refreshonly => true,
+ before => Exec['apt_updated']
+ }
+
+}
diff --git a/puppet/modules/apt/manifests/dist_upgrade/initiator.pp b/puppet/modules/apt/manifests/dist_upgrade/initiator.pp
new file mode 100644
index 00000000..d2389883
--- /dev/null
+++ b/puppet/modules/apt/manifests/dist_upgrade/initiator.pp
@@ -0,0 +1,23 @@
+class apt::dist_upgrade::initiator inherits apt::dist_upgrade {
+
+ $initiator = 'upgrade_initiator'
+ $initiator_abs = "${apt::apt_base_dir}/${initiator}"
+
+ file { 'apt_upgrade_initiator':
+ mode => '0644',
+ owner => root,
+ group => 0,
+ path => $initiator_abs,
+ checksum => md5,
+ source => [
+ "puppet:///modules/site_apt/${::fqdn}/${initiator}",
+ "puppet:///modules/site_apt/${initiator}",
+ "puppet:///modules/apt/${initiator}",
+ ],
+ }
+
+ Exec['apt_dist-upgrade'] {
+ subscribe +> File['apt_upgrade_initiator'],
+ }
+
+}
diff --git a/puppet/modules/apt/manifests/dot_d_directories.pp b/puppet/modules/apt/manifests/dot_d_directories.pp
new file mode 100644
index 00000000..0ace8630
--- /dev/null
+++ b/puppet/modules/apt/manifests/dot_d_directories.pp
@@ -0,0 +1,15 @@
+class apt::dot_d_directories {
+
+ # watch .d directories and ensure they are present
+ file {
+ '/etc/apt/apt.conf.d':
+ ensure => directory,
+ checksum => mtime,
+ notify => Exec['apt_updated'];
+ '/etc/apt/sources.list.d':
+ ensure => directory,
+ checksum => mtime,
+ notify => Exec['apt_updated'];
+ }
+
+}
diff --git a/puppet/modules/apt/manifests/dselect.pp b/puppet/modules/apt/manifests/dselect.pp
new file mode 100644
index 00000000..2b99a43d
--- /dev/null
+++ b/puppet/modules/apt/manifests/dselect.pp
@@ -0,0 +1,11 @@
+# manage dselect, like
+# suppressing the annoying help texts
+class apt::dselect {
+
+ file_line { 'dselect_expert':
+ path => '/etc/dpkg/dselect.cfg',
+ line => 'expert',
+ }
+
+ package { 'dselect': ensure => installed }
+}
diff --git a/puppet/modules/apt/manifests/init.pp b/puppet/modules/apt/manifests/init.pp
new file mode 100644
index 00000000..4c44af2a
--- /dev/null
+++ b/puppet/modules/apt/manifests/init.pp
@@ -0,0 +1,150 @@
+# apt.pp - common components and defaults for handling apt
+# Copyright (C) 2008 Micah Anerson <micah@riseup.net>
+# Copyright (C) 2007 David Schmitt <david@schmitt.edv-bus.at>
+# See LICENSE for the full license granted to you.
+
+class apt(
+ $use_lts = $apt::params::use_lts,
+ $use_volatile = $apt::params::use_volatile,
+ $use_backports = $apt::params::use_backports,
+ $include_src = $apt::params::include_src,
+ $use_next_release = $apt::params::use_next_release,
+ $debian_url = $apt::params::debian_url,
+ $security_url = $apt::params::security_url,
+ $backports_url = $apt::params::backports_url,
+ $lts_url = $apt::params::lts_url,
+ $volatile_url = $apt::params::volatile_url,
+ $ubuntu_url = $apt::params::ubuntu_url,
+ $repos = $apt::params::repos,
+ $custom_preferences = $apt::params::custom_preferences,
+ $custom_sources_list = '',
+ $custom_key_dir = $apt::params::custom_key_dir
+) inherits apt::params {
+ case $::operatingsystem {
+ 'debian': {
+ $real_repos = $repos ? {
+ 'auto' => 'main contrib non-free',
+ default => $repos,
+ }
+ }
+ 'ubuntu': {
+ $real_repos = $repos ? {
+ 'auto' => 'main restricted universe multiverse',
+ default => $repos,
+ }
+ }
+ }
+
+ package { 'apt':
+ ensure => installed,
+ require => undef,
+ }
+
+ $sources_content = $custom_sources_list ? {
+ '' => template( "apt/${::operatingsystem}/sources.list.erb"),
+ default => $custom_sources_list
+ }
+ file {
+ # include main and security
+ # additional sources should be included via the apt::sources_list define
+ '/etc/apt/sources.list':
+ content => $sources_content,
+ notify => Exec['apt_updated'],
+ owner => root,
+ group => 0,
+ mode => '0644';
+ }
+
+ apt_conf { '02show_upgraded':
+ source => [ "puppet:///modules/site_apt/${::fqdn}/02show_upgraded",
+ 'puppet:///modules/site_apt/02show_upgraded',
+ 'puppet:///modules/apt/02show_upgraded' ]
+ }
+
+ if ( $::virtual == 'vserver' ) {
+ apt_conf { '03clean_vserver':
+ source => [ "puppet:///modules/site_apt/${::fqdn}/03clean_vserver",
+ 'puppet:///modules/site_apt/03clean_vserver',
+ 'puppet:///modules/apt/03clean_vserver' ],
+ alias => '03clean';
+ }
+ }
+ else {
+ apt_conf { '03clean':
+ source => [ "puppet:///modules/site_apt/${::fqdn}/03clean",
+ 'puppet:///modules/site_apt/03clean',
+ 'puppet:///modules/apt/03clean' ]
+ }
+ }
+
+ case $custom_preferences {
+ false: {
+ include apt::preferences::absent
+ }
+ default: {
+ # When squeeze becomes the stable branch, transform this file's header
+ # into a preferences.d file
+ include apt::preferences
+ }
+ }
+
+ include apt::dot_d_directories
+
+ ## This package should really always be current
+ package { 'debian-archive-keyring': ensure => latest }
+
+ # backports uses the normal archive key now
+ package { 'debian-backports-keyring': ensure => absent }
+
+ if ($use_backports and !($::debian_release in ['testing', 'unstable', 'experimental'])) {
+ apt::sources_list {
+ 'backports':
+ content => "deb $backports_url ${::debian_codename}-backports ${apt::real_repos}",
+ }
+ if $include_src {
+ apt::sources_list {
+ 'backports-src':
+ content => "deb-src $backports_url ${::debian_codename}-backports ${apt::real_repos}",
+ }
+ }
+ }
+
+ include common::moduledir
+ common::module_dir { 'apt': }
+ $apt_base_dir = "${common::moduledir::module_dir_path}/apt"
+
+ if $custom_key_dir {
+ file { "${apt_base_dir}/keys.d":
+ source => $custom_key_dir,
+ recurse => true,
+ owner => root,
+ group => root,
+ mode => '0755',
+ }
+ exec { 'custom_keys':
+ command => "find ${apt_base_dir}/keys.d -type f -exec apt-key add '{}' \\;",
+ subscribe => File["${apt_base_dir}/keys.d"],
+ refreshonly => true,
+ notify => Exec[refresh_apt]
+ }
+ if $custom_preferences != false {
+ Exec['custom_keys'] {
+ before => File['apt_config']
+ }
+ }
+ }
+
+ # workaround for preseeded_package component
+ file { [ '/var/cache', '/var/cache/local', '/var/cache/local/preseeding' ]: ensure => directory }
+
+ exec { 'update_apt':
+ command => '/usr/bin/apt-get update',
+ require => [
+ File['/etc/apt/apt.conf.d', '/etc/apt/preferences' ],
+ File['/etc/apt/sources.list'] ],
+ refreshonly => true,
+ # Another Semaphor for all packages to reference
+ alias => [ 'apt_updated', 'refresh_apt']
+ }
+
+}
diff --git a/puppet/modules/apt/manifests/key.pp b/puppet/modules/apt/manifests/key.pp
new file mode 100644
index 00000000..cb70ec6a
--- /dev/null
+++ b/puppet/modules/apt/manifests/key.pp
@@ -0,0 +1,13 @@
+define apt::key ($source, $ensure = 'present') {
+ validate_re(
+ $name, '\.gpg$',
+ 'An apt::key resource name must have the .gpg extension',
+ )
+
+ file {
+ "/etc/apt/trusted.gpg.d/${name}":
+ ensure => $ensure,
+ source => $source,
+ notify => Exec['apt_updated'],
+ }
+}
diff --git a/puppet/modules/apt/manifests/key/plain.pp b/puppet/modules/apt/manifests/key/plain.pp
new file mode 100644
index 00000000..dff8b51b
--- /dev/null
+++ b/puppet/modules/apt/manifests/key/plain.pp
@@ -0,0 +1,13 @@
+define apt::key::plain ($source) {
+ file {
+ "${apt::apt_base_dir}/keys/${name}":
+ source => $source;
+ "${apt::apt_base_dir}/keys":
+ ensure => directory;
+ }
+ exec { "apt-key add '${apt::apt_base_dir}/keys/${name}'":
+ subscribe => File["${apt::apt_base_dir}/keys/${name}"],
+ refreshonly => true,
+ notify => Exec['apt_updated'],
+ }
+}
diff --git a/puppet/modules/apt/manifests/listchanges.pp b/puppet/modules/apt/manifests/listchanges.pp
new file mode 100644
index 00000000..e64bb1b7
--- /dev/null
+++ b/puppet/modules/apt/manifests/listchanges.pp
@@ -0,0 +1,19 @@
+class apt::listchanges(
+ $ensure_version = 'installed',
+ $config = "apt/${::operatingsystem}/listchanges_${::debian_codename}.erb",
+ $frontend = 'mail',
+ $email = 'root',
+ $confirm = '0',
+ $saveseen = '/var/lib/apt/listchanges.db',
+ $which = 'both'
+){
+ package { 'apt-listchanges': ensure => $ensure_version }
+
+ file { '/etc/apt/listchanges.conf':
+ content => template($apt::listchanges::config),
+ owner => root,
+ group => root,
+ mode => '0644',
+ require => Package['apt-listchanges'];
+ }
+}
diff --git a/puppet/modules/apt/manifests/params.pp b/puppet/modules/apt/manifests/params.pp
new file mode 100644
index 00000000..28af06eb
--- /dev/null
+++ b/puppet/modules/apt/manifests/params.pp
@@ -0,0 +1,22 @@
+class apt::params () {
+ $use_lts = false
+ $use_volatile = false
+ $use_backports = true
+ $include_src = false
+ $use_next_release = false
+ $debian_url = 'http://httpredir.debian.org/debian/'
+ $security_url = 'http://security.debian.org/'
+ $ubuntu_url = 'http://archive.ubuntu.com/ubuntu'
+ $backports_url = $::debian_codename ? {
+ 'squeeze' => 'http://backports.debian.org/debian-backports/',
+ default => $::operatingsystem ? {
+ 'Ubuntu' => $ubuntu_url,
+ default => $debian_url,
+ }
+ }
+ $lts_url = $debian_url
+ $volatile_url = 'http://volatile.debian.org/debian-volatile/'
+ $repos = 'auto'
+ $custom_preferences = ''
+ $custom_key_dir = false
+}
diff --git a/puppet/modules/apt/manifests/preferences.pp b/puppet/modules/apt/manifests/preferences.pp
new file mode 100644
index 00000000..6982ca05
--- /dev/null
+++ b/puppet/modules/apt/manifests/preferences.pp
@@ -0,0 +1,20 @@
+class apt::preferences {
+
+ $pref_contents = $apt::custom_preferences ? {
+ '' => $::operatingsystem ? {
+ 'debian' => template("apt/${::operatingsystem}/preferences_${::debian_codename}.erb"),
+ 'ubuntu' => template("apt/${::operatingsystem}/preferences_${::ubuntu_codename}.erb"),
+ },
+ default => $apt::custom_preferences
+ }
+
+ file { '/etc/apt/preferences':
+ ensure => present,
+ alias => 'apt_config',
+ # only update together
+ content => $pref_contents,
+ require => File['/etc/apt/sources.list'],
+ owner => root, group => 0, mode => '0644';
+ }
+
+}
diff --git a/puppet/modules/apt/manifests/preferences/absent.pp b/puppet/modules/apt/manifests/preferences/absent.pp
new file mode 100644
index 00000000..f32e0307
--- /dev/null
+++ b/puppet/modules/apt/manifests/preferences/absent.pp
@@ -0,0 +1,7 @@
+class apt::preferences::absent {
+
+ file { '/etc/apt/preferences':
+ ensure => absent,
+ alias => 'apt_config',
+ }
+}
diff --git a/puppet/modules/apt/manifests/preferences_snippet.pp b/puppet/modules/apt/manifests/preferences_snippet.pp
new file mode 100644
index 00000000..b7dba0d8
--- /dev/null
+++ b/puppet/modules/apt/manifests/preferences_snippet.pp
@@ -0,0 +1,59 @@
+define apt::preferences_snippet (
+ $priority = undef,
+ $package = false,
+ $ensure = 'present',
+ $source = '',
+ $release = '',
+ $pin = ''
+) {
+
+ $real_package = $package ? {
+ false => $name,
+ default => $package,
+ }
+
+ if $ensure == 'present' {
+ if $apt::custom_preferences == false {
+ fail('Trying to define a preferences_snippet with $custom_preferences set to false.')
+ }
+
+ if $priority == undef {
+ fail('apt::preferences_snippet requires the \'priority\' argument to be set')
+ }
+
+ if !$pin and !$release {
+ fail('apt::preferences_snippet requires one of the \'pin\' or \'release\' argument to be set')
+ }
+ if $pin and $release {
+ fail('apt::preferences_snippet requires either a \'pin\' or \'release\' argument, not both')
+ }
+ }
+
+ file { "/etc/apt/preferences.d/${name}":
+ ensure => $ensure,
+ owner => root, group => 0, mode => '0644',
+ before => Exec['apt_updated'];
+ }
+
+ case $source {
+ '': {
+ case $release {
+ '': {
+ File["/etc/apt/preferences.d/${name}"]{
+ content => template('apt/preferences_snippet.erb')
+ }
+ }
+ default: {
+ File["/etc/apt/preferences.d/${name}"]{
+ content => template('apt/preferences_snippet_release.erb')
+ }
+ }
+ }
+ }
+ default: {
+ File["/etc/apt/preferences.d/${name}"]{
+ source => $source
+ }
+ }
+ }
+}
diff --git a/puppet/modules/apt/manifests/preseeded_package.pp b/puppet/modules/apt/manifests/preseeded_package.pp
new file mode 100644
index 00000000..3ef06879
--- /dev/null
+++ b/puppet/modules/apt/manifests/preseeded_package.pp
@@ -0,0 +1,21 @@
+define apt::preseeded_package (
+ $ensure = 'installed',
+ $content = ''
+) {
+ $seedfile = "/var/cache/local/preseeding/${name}.seeds"
+ $real_content = $content ? {
+ '' => template ( "site_apt/${::debian_codename}/${name}.seeds" ),
+ default => $content
+ }
+
+ file { $seedfile:
+ content => $real_content,
+ mode => '0600', owner => root, group => root,
+ }
+
+ package { $name:
+ ensure => $ensure,
+ responsefile => $seedfile,
+ require => File[$seedfile],
+ }
+}
diff --git a/puppet/modules/apt/manifests/proxy_client.pp b/puppet/modules/apt/manifests/proxy_client.pp
new file mode 100644
index 00000000..9ba79f23
--- /dev/null
+++ b/puppet/modules/apt/manifests/proxy_client.pp
@@ -0,0 +1,9 @@
+class apt::proxy_client(
+ $proxy = 'http://localhost',
+ $port = '3142',
+){
+
+ apt_conf { '20proxy':
+ content => template('apt/20proxy.erb'),
+ }
+}
diff --git a/puppet/modules/apt/manifests/reboot_required_notify.pp b/puppet/modules/apt/manifests/reboot_required_notify.pp
new file mode 100644
index 00000000..722e8a5e
--- /dev/null
+++ b/puppet/modules/apt/manifests/reboot_required_notify.pp
@@ -0,0 +1,21 @@
+class apt::reboot_required_notify {
+
+ # This package installs the script that created /var/run/reboot-required*.
+ # This script (/usr/share/update-notifier/notify-reboot-required) is
+ # triggered e.g. by kernel packages.
+ package { 'update-notifier-common':
+ ensure => installed,
+ }
+
+ # cron-apt defaults to run every night at 4 o'clock
+ # plus some random time <1h.
+ # so we check if a reboot is required a bit later.
+ cron { 'apt_reboot_required_notify':
+ command => 'if [ -f /var/run/reboot-required ]; then echo "Reboot required\n" ; cat /var/run/reboot-required.pkgs ; fi',
+ user => root,
+ hour => 5,
+ minute => 20,
+ require => Package['update-notifier-common'],
+ }
+
+}
diff --git a/puppet/modules/apt/manifests/sources_list.pp b/puppet/modules/apt/manifests/sources_list.pp
new file mode 100644
index 00000000..0ee068d1
--- /dev/null
+++ b/puppet/modules/apt/manifests/sources_list.pp
@@ -0,0 +1,40 @@
+define apt::sources_list (
+ $ensure = 'present',
+ $source = '',
+ $content = undef
+) {
+
+ if $ensure == 'present' {
+ if $source == '' and $content == undef {
+ fail("One of \$source or \$content must be specified for apt_sources_snippet ${name}")
+ }
+ if $source != '' and $content != undef {
+ fail("Only one of \$source or \$content must specified for apt_sources_snippet ${name}")
+ }
+ }
+
+ include apt::dot_d_directories
+
+ $realname = regsubst($name, '\.list$', '')
+
+ # One would expect the 'file' resource on sources.list.d to trigger an
+ # apt-get update when files are added or modified in the directory, but it
+ # apparently doesn't.
+ file { "/etc/apt/sources.list.d/${realname}.list":
+ ensure => $ensure,
+ owner => root, group => 0, mode => '0644',
+ notify => Exec['apt_updated'],
+ }
+
+ if $source {
+ File["/etc/apt/sources.list.d/${realname}.list"] {
+ source => $source,
+ }
+ }
+ else {
+ File["/etc/apt/sources.list.d/${realname}.list"] {
+ content => $content,
+ }
+ }
+}
+
diff --git a/puppet/modules/apt/manifests/unattended_upgrades.pp b/puppet/modules/apt/manifests/unattended_upgrades.pp
new file mode 100644
index 00000000..52d75425
--- /dev/null
+++ b/puppet/modules/apt/manifests/unattended_upgrades.pp
@@ -0,0 +1,34 @@
+class apt::unattended_upgrades (
+ $config_content = undef,
+ $config_template = 'apt/50unattended-upgrades.erb',
+ $mailonlyonerror = true,
+ $mail_recipient = 'root',
+ $blacklisted_packages = [],
+ $ensure_version = present
+) {
+
+ package { 'unattended-upgrades':
+ ensure => $ensure_version
+ }
+
+ # For some reason, this directory is sometimes absent, which causes
+ # unattended-upgrades to crash.
+ file { '/var/log/unattended-upgrades':
+ ensure => directory,
+ owner => 'root',
+ group => 0,
+ mode => '0755',
+ require => Package['unattended-upgrades'],
+ }
+
+ $file_content = $config_content ? {
+ undef => template($config_template),
+ default => $config_content
+ }
+
+ apt_conf { '50unattended-upgrades':
+ content => $file_content,
+ require => Package['unattended-upgrades'],
+ refresh_apt => false
+ }
+}
diff --git a/puppet/modules/apt/manifests/update.pp b/puppet/modules/apt/manifests/update.pp
new file mode 100644
index 00000000..dde83200
--- /dev/null
+++ b/puppet/modules/apt/manifests/update.pp
@@ -0,0 +1,7 @@
+class apt::update inherits ::apt {
+
+ Exec['update_apt'] {
+ refreshonly => false
+ }
+
+}
diff --git a/puppet/modules/apt/manifests/upgrade_package.pp b/puppet/modules/apt/manifests/upgrade_package.pp
new file mode 100644
index 00000000..30572c96
--- /dev/null
+++ b/puppet/modules/apt/manifests/upgrade_package.pp
@@ -0,0 +1,31 @@
+define apt::upgrade_package (
+ $version = ''
+) {
+
+ $version_suffix = $version ? {
+ '' => '',
+ 'latest' => '',
+ default => "=${version}",
+ }
+
+ if !defined(Package['apt-show-versions']) {
+ package { 'apt-show-versions':
+ ensure => installed,
+ require => undef,
+ }
+ }
+
+ if !defined(Package['dctrl-tools']) {
+ package { 'dctrl-tools':
+ ensure => installed,
+ require => undef,
+ }
+ }
+
+ exec { "apt-get -q -y -o 'DPkg::Options::=--force-confold' install ${name}${version_suffix}":
+ onlyif => [ "grep-status -F Status installed -a -P $name -q", "apt-show-versions -u $name | grep -q upgradeable" ],
+ require => Package['apt-show-versions', 'dctrl-tools'],
+ before => Exec['apt_updated']
+ }
+
+}
diff --git a/puppet/modules/apt/spec/spec_helper.rb b/puppet/modules/apt/spec/spec_helper.rb
new file mode 100644
index 00000000..21d1a988
--- /dev/null
+++ b/puppet/modules/apt/spec/spec_helper.rb
@@ -0,0 +1,12 @@
+# https://puppetlabs.com/blog/testing-modules-in-the-puppet-forge
+require 'rspec-puppet'
+require 'mocha/api'
+
+RSpec.configure do |c|
+
+ c.module_path = File.expand_path(File.join(File.dirname(__FILE__), '..', '..'))
+ c.color = true
+
+ #Puppet.features.stubs(:root? => true)
+
+end
diff --git a/puppet/modules/apt/spec/unit/custom_facts_spec.rb b/puppet/modules/apt/spec/unit/custom_facts_spec.rb
new file mode 100644
index 00000000..9a28d92e
--- /dev/null
+++ b/puppet/modules/apt/spec/unit/custom_facts_spec.rb
@@ -0,0 +1,86 @@
+require "spec_helper"
+
+describe "Facter::Util::Fact" do
+ before {
+ Facter.clear
+ }
+
+ describe 'custom facts' do
+
+ context 'Debian 7' do
+ before do
+ Facter.fact(:operatingsystem).stubs(:value).returns("Debian")
+ Facter.fact(:operatingsystemrelease).stubs(:value).returns("7.8")
+ Facter.fact(:lsbdistcodename).stubs(:value).returns("wheezy")
+ end
+
+ it "debian_release = oldstable" do
+ expect(Facter.fact(:debian_release).value).to eq('oldstable')
+ end
+
+ it "debian_codename = wheezy" do
+ expect(Facter.fact(:debian_codename).value).to eq('wheezy')
+ end
+
+ it "debian_nextcodename = jessie" do
+ expect(Facter.fact(:debian_nextcodename).value).to eq('jessie')
+ end
+
+ it "debian_nextrelease = stable" do
+ expect(Facter.fact(:debian_nextrelease).value).to eq('stable')
+ end
+ end
+
+ context 'Debian 8' do
+ before do
+ Facter.fact(:operatingsystem).stubs(:value).returns("Debian")
+ Facter.fact(:operatingsystemrelease).stubs(:value).returns("8.0")
+ Facter.fact(:lsbdistcodename).stubs(:value).returns("jessie")
+ end
+
+ it "debian_release = stable" do
+ expect(Facter.fact(:debian_release).value).to eq('stable')
+ end
+
+ it "debian_codename = jessie" do
+ expect(Facter.fact(:debian_codename).value).to eq('jessie')
+ end
+
+ it "debian_nextcodename = stretch" do
+ expect(Facter.fact(:debian_nextcodename).value).to eq('stretch')
+ end
+
+ it "debian_nextrelease = testing" do
+ expect(Facter.fact(:debian_nextrelease).value).to eq('testing')
+ end
+ end
+
+ context 'Ubuntu 15.10' do
+ before do
+ Facter.fact(:operatingsystem).stubs(:value).returns("Ubuntu")
+ Facter.fact(:operatingsystemrelease).stubs(:value).returns("15.10")
+ Facter.fact(:lsbdistcodename).stubs(:value).returns("wily")
+ end
+
+ it "ubuntu_codename = wily" do
+ expect(Facter.fact(:ubuntu_codename).value).to eq('wily')
+ end
+
+ it "ubuntu_nextcodename = xenial" do
+ expect(Facter.fact(:ubuntu_nextcodename).value).to eq('xenial')
+ end
+ end
+ end
+
+ describe "Test 'apt_running' fact" do
+ it "should return true when apt-get is running" do
+ Facter::Util::Resolution.stubs(:exec).with("pgrep apt-get >/dev/null 2>&1 && echo true || echo false").returns("true")
+ expect(Facter.fact(:apt_running).value).to eq('true')
+ end
+ it "should return false when apt-get is not running" do
+ Facter::Util::Resolution.stubs(:exec).with("pgrep apt-get >/dev/null 2>&1 && echo true || echo false").returns("false")
+ expect(Facter.fact(:apt_running).value).to eq('false')
+ end
+ end
+
+end
diff --git a/puppet/modules/apt/templates/20proxy.erb b/puppet/modules/apt/templates/20proxy.erb
new file mode 100644
index 00000000..520e7b1b
--- /dev/null
+++ b/puppet/modules/apt/templates/20proxy.erb
@@ -0,0 +1,5 @@
+// This file is managed by Puppet
+// all local modifications will be overwritten
+
+Acquire::http { Proxy "<%= @proxy %>:<%= @port %>"; };
+Acquire::HTTP::Proxy::bugs.debian.org "DIRECT";
diff --git a/puppet/modules/apt/templates/50unattended-upgrades.erb b/puppet/modules/apt/templates/50unattended-upgrades.erb
new file mode 100644
index 00000000..7c65d102
--- /dev/null
+++ b/puppet/modules/apt/templates/50unattended-upgrades.erb
@@ -0,0 +1,38 @@
+// this file is managed by puppet !
+
+<% if scope.lookupvar('::operatingsystem') == 'Ubuntu' -%>
+Unattended-Upgrade::Allowed-Origins {
+ "${distro_id}:${distro_codename}-security";
+ "${distro_id}:${distro_codename}-updates";
+ "${distro_id}:${distro_codename}-backports";
+<% elsif scope.lookupvar('::operatingsystem') == 'Debian' and scope.lookupvar('::debian_codename') == 'squeeze' -%>
+Unattended-Upgrade::Allowed-Origins {
+ "${distro_id}:<%= scope.lookupvar('::debian_release') %>";
+ "${distro_id}:squeeze-lts";
+<% elsif scope.lookupvar('::operatingsystem') == 'Debian' and scope.lookupvar('::debian_codename') == 'wheezy' -%>
+Unattended-Upgrade::Origins-Pattern {
+ "origin=Debian,archive=<%= scope.lookupvar('::debian_release') %>,label=Debian-Security";
+ "origin=Debian,archive=${distro_codename}-lts";
+<% else -%>
+Unattended-Upgrade::Origins-Pattern {
+ "origin=Debian,codename=${distro_codename},label=Debian";
+ "origin=Debian,codename=${distro_codename},label=Debian-Security";
+<% end -%>
+};
+
+<% if not @blacklisted_packages.empty? -%>
+Unattended-Upgrade::Package-Blacklist {
+<% @blacklisted_packages.each do |pkg| -%>
+ "<%= pkg %>";
+<% end -%>
+};
+<% end -%>
+
+APT::Periodic::Update-Package-Lists "1";
+APT::Periodic::Download-Upgradeable-Packages "1";
+APT::Periodic::Unattended-Upgrade "1";
+
+Unattended-Upgrade::Mail "<%= @mail_recipient -%>";
+<% if @mailonlyonerror -%>
+Unattended-Upgrade::MailOnlyOnError "true";
+<% end -%>
diff --git a/puppet/modules/apt/templates/Debian/apticron_jessie.erb b/puppet/modules/apt/templates/Debian/apticron_jessie.erb
new file mode 120000
index 00000000..a9a3a6fd
--- /dev/null
+++ b/puppet/modules/apt/templates/Debian/apticron_jessie.erb
@@ -0,0 +1 @@
+apticron_wheezy.erb \ No newline at end of file
diff --git a/puppet/modules/apt/templates/Debian/apticron_lenny.erb b/puppet/modules/apt/templates/Debian/apticron_lenny.erb
new file mode 100644
index 00000000..86b09977
--- /dev/null
+++ b/puppet/modules/apt/templates/Debian/apticron_lenny.erb
@@ -0,0 +1,50 @@
+# apticron.conf
+#
+# set EMAIL to a list of addresses which will be notified of impending updates
+#
+EMAIL="<%= scope.lookupvar('apt::apticron::email') %>"
+
+#
+# Set DIFF_ONLY to "1" to only output the difference of the current run
+# compared to the last run (ie. only new upgrades since the last run). If there
+# are no differences, no output/email will be generated. By default, apticron
+# will output everything that needs to be upgraded.
+#
+DIFF_ONLY="<%= scope.lookupvar('apt::apticron::diff_only') %>"
+
+#
+# Set LISTCHANGES_PROFILE if you would like apticron to invoke apt-listchanges
+# with the --profile option. You should add a corresponding profile to
+# /etc/apt/listchanges.conf
+#
+LISTCHANGES_PROFILE="<%= scope.lookupvar('apt::apticron::listchanges_profile') %>"
+
+#
+# Set SYSTEM if you would like apticron to use something other than the output
+# of "hostname -f" for the system name in the mails it generates
+#
+# SYSTEM="foobar.example.com"
+<% unless (v=scope.lookupvar('apt::apticron::system')).to_s == "false" -%>
+SYSTEM="<%= v %>"
+<% end -%>
+
+#
+# Set IPADDRESSNUM if you would like to configure the maximal number of IP
+# addresses apticron displays. The default is to display 1 address of each
+# family type (inet, inet6), if available.
+#
+# IPADDRESSNUM="1"
+<% unless (v=scope.lookupvar('apt::apticron::ipaddressnum')).to_s == "false" -%>
+IPADDRESSNUM="<%= v %>"
+<% end -%>
+
+#
+# Set IPADDRESSES to a whitespace seperated list of reachable addresses for
+# this system. By default, apticron will try to work these out using the
+# "ip" command
+#
+# IPADDRESSES="192.0.2.1 2001:db8:1:2:3::1"
+<% unless (v=scope.lookupvar('apt::apticron::ipaddresses')).to_s == "false" -%>
+IPADDRESSES="<%= v %>"
+<% end -%>
+
diff --git a/puppet/modules/apt/templates/Debian/apticron_sid.erb b/puppet/modules/apt/templates/Debian/apticron_sid.erb
new file mode 120000
index 00000000..a9a3a6fd
--- /dev/null
+++ b/puppet/modules/apt/templates/Debian/apticron_sid.erb
@@ -0,0 +1 @@
+apticron_wheezy.erb \ No newline at end of file
diff --git a/puppet/modules/apt/templates/Debian/apticron_squeeze.erb b/puppet/modules/apt/templates/Debian/apticron_squeeze.erb
new file mode 100644
index 00000000..05b7c9b8
--- /dev/null
+++ b/puppet/modules/apt/templates/Debian/apticron_squeeze.erb
@@ -0,0 +1,82 @@
+# apticron.conf
+#
+# set EMAIL to a space separated list of addresses which will be notified of
+# impending updates
+#
+EMAIL="<%= scope.lookupvar('apt::apticron::email') %>"
+
+
+#
+# Set DIFF_ONLY to "1" to only output the difference of the current run
+# compared to the last run (ie. only new upgrades since the last run). If there
+# are no differences, no output/email will be generated. By default, apticron
+# will output everything that needs to be upgraded.
+#
+DIFF_ONLY="<%= scope.lookupvar('apt::apticron::diff_only') %>"
+
+#
+# Set LISTCHANGES_PROFILE if you would like apticron to invoke apt-listchanges
+# with the --profile option. You should add a corresponding profile to
+# /etc/apt/listchanges.conf
+#
+LISTCHANGES_PROFILE="<%= scope.lookupvar('apt::apticron::listchanges_profile') %>"
+
+#
+# Set SYSTEM if you would like apticron to use something other than the output
+# of "hostname -f" for the system name in the mails it generates
+#
+# SYSTEM="foobar.example.com"
+<% unless (v=scope.lookupvar('apt::apticron::system')).to_s == "false" -%>
+SYSTEM="<%= v %>"
+<% end -%>
+
+
+#
+# Set IPADDRESSNUM if you would like to configure the maximal number of IP
+# addresses apticron displays. The default is to display 1 address of each
+# family type (inet, inet6), if available.
+#
+# IPADDRESSNUM="1"
+<% unless (v=scope.lookupvar('apt::apticron::ipaddressnum')).to_s == "false" -%>
+IPADDRESSNUM="<%= v %>"
+<% end -%>
+
+
+#
+# Set IPADDRESSES to a whitespace separated list of reachable addresses for
+# this system. By default, apticron will try to work these out using the
+# "ip" command
+#
+# IPADDRESSES="192.0.2.1 2001:db8:1:2:3::1"
+<% unless (v=scope.lookupvar('apt::apticron::ipaddresses')).to_s == "false" -%>
+IPADDRESSES="<%= v %>"
+<% end -%>
+
+
+#
+# Set NOTIFY_HOLDS="0" if you don't want to be notified about new versions of
+# packages on hold in your system. The default behavior is downloading and
+# listing them as any other package.
+#
+# NOTIFY_HOLDS="0"
+NOTIFY_HOLDS="<%= scope.lookupvar('apt::apticron::notifyholds') %>"
+
+#
+# Set NOTIFY_NEW="0" if you don't want to be notified about packages which
+# are not installed in your system. Yes, it's possible! There are some issues
+# related to systems which have mixed stable/unstable sources. In these cases
+# apt-get will consider for example that packages with "Priority:
+# required"/"Essential: yes" in unstable but not in stable should be installed,
+# so they will be listed in dist-upgrade output. Please take a look at
+# http://bugs.debian.org/cgi-bin/bugreport.cgi?bug=531002#44
+#
+# NOTIFY_NEW="0"
+NOTIFY_NEW="<%= scope.lookupvar('apt::apticron::notifynew') %>"
+
+#
+# Set CUSTOM_SUBJECT if you want to replace the default subject used in
+# the notification e-mails. This may help filtering/sorting client-side e-mail.
+#
+# CUSTOM_SUBJECT=""
+CUSTOM_SUBJECT="<%= scope.lookupvar('apt::apticron::customsubject') %>"
+
diff --git a/puppet/modules/apt/templates/Debian/apticron_wheezy.erb b/puppet/modules/apt/templates/Debian/apticron_wheezy.erb
new file mode 100644
index 00000000..655854e6
--- /dev/null
+++ b/puppet/modules/apt/templates/Debian/apticron_wheezy.erb
@@ -0,0 +1,80 @@
+# apticron.conf
+#
+# set EMAIL to a space separated list of addresses which will be notified of
+# impending updates
+#
+EMAIL="<%= scope.lookupvar('apt::apticron::email') %>"
+
+#
+# Set DIFF_ONLY to "1" to only output the difference of the current run
+# compared to the last run (ie. only new upgrades since the last run). If there
+# are no differences, no output/email will be generated. By default, apticron
+# will output everything that needs to be upgraded.
+#
+DIFF_ONLY="<%= scope.lookupvar('apt::apticron::diff_only') %>"
+
+#
+# Set LISTCHANGES_PROFILE if you would like apticron to invoke apt-listchanges
+# with the --profile option. You should add a corresponding profile to
+# /etc/apt/listchanges.conf
+#
+LISTCHANGES_PROFILE="<%= scope.lookupvar('apt::apticron::listchanges_profile') %>"
+
+#
+# Set SYSTEM if you would like apticron to use something other than the output
+# of "hostname -f" for the system name in the mails it generates
+#
+# SYSTEM="foobar.example.com"
+<% unless (v=scope.lookupvar('apt::apticron::system')).to_s == "false" -%>
+SYSTEM="<%= v %>"
+<% end -%>
+
+#
+# Set IPADDRESSNUM if you would like to configure the maximal number of IP
+# addresses apticron displays. The default is to display 1 address of each
+# family type (inet, inet6), if available.
+#
+# IPADDRESSNUM="1"
+<% unless (v=scope.lookupvar('apt::apticron::ipaddressnum')).to_s == "false" -%>
+IPADDRESSNUM="<%= v %>"
+<% end -%>
+
+#
+# Set IPADDRESSES to a whitespace separated list of reachable addresses for
+# this system. By default, apticron will try to work these out using the
+# "ip" command
+#
+# IPADDRESSES="192.0.2.1 2001:db8:1:2:3::1"
+<% unless (v=scope.lookupvar('apt::apticron::ipaddresses')).to_s == "false" -%>
+IPADDRESSES=<%= v %>"
+<% end -%>
+
+#
+# Set NOTIFY_HOLDS="0" if you don't want to be notified about new versions of
+# packages on hold in your system. The default behavior is downloading and
+# listing them as any other package.
+#
+# NOTIFY_HOLDS="0"
+NOTIFY_HOLDS="<%= scope.lookupvar('apt::apticron::notifyholds') %>"
+
+#
+# Set NOTIFY_NEW="0" if you don't want to be notified about packages which
+# are not installed in your system. Yes, it's possible! There are some issues
+# related to systems which have mixed stable/unstable sources. In these cases
+# apt-get will consider for example that packages with "Priority:
+# required"/"Essential: yes" in unstable but not in stable should be installed,
+# so they will be listed in dist-upgrade output. Please take a look at
+# http://bugs.debian.org/cgi-bin/bugreport.cgi?bug=531002#44
+#
+# NOTIFY_NEW="0"
+NOTIFY_NEW="<%= scope.lookupvar('apt::apticron::notifynew') %>"
+
+
+#
+# Set CUSTOM_SUBJECT if you want to replace the default subject used in
+# the notification e-mails. This may help filtering/sorting client-side e-mail.
+# If you want to use internal vars please use single quotes here. Ex:
+# ='[apticron] : package update(s)'
+#
+# CUSTOM_SUBJECT=""
+CUSTOM_SUBJECT="<%= scope.lookupvar('apt::apticron::customsubject') %>"
diff --git a/puppet/modules/apt/templates/Debian/listchanges_jessie.erb b/puppet/modules/apt/templates/Debian/listchanges_jessie.erb
new file mode 120000
index 00000000..74ab496d
--- /dev/null
+++ b/puppet/modules/apt/templates/Debian/listchanges_jessie.erb
@@ -0,0 +1 @@
+listchanges_lenny.erb \ No newline at end of file
diff --git a/puppet/modules/apt/templates/Debian/listchanges_lenny.erb b/puppet/modules/apt/templates/Debian/listchanges_lenny.erb
new file mode 100644
index 00000000..1025dd0e
--- /dev/null
+++ b/puppet/modules/apt/templates/Debian/listchanges_lenny.erb
@@ -0,0 +1,7 @@
+[apt]
+frontend=<%= scope.lookupvar('apt::listchanges::frontend') %>
+email_address=<%= scope.lookupvar('apt::listchanges::email') %>
+confirm=<%= scope.lookupvar('apt::listchanges::confirm') %>
+save_seen=<%= scope.lookupvar('apt::listchanges::saveseen') %>
+which=<%= scope.lookupvar('apt::listchanges::which') %>
+
diff --git a/puppet/modules/apt/templates/Debian/listchanges_sid.erb b/puppet/modules/apt/templates/Debian/listchanges_sid.erb
new file mode 120000
index 00000000..74ab496d
--- /dev/null
+++ b/puppet/modules/apt/templates/Debian/listchanges_sid.erb
@@ -0,0 +1 @@
+listchanges_lenny.erb \ No newline at end of file
diff --git a/puppet/modules/apt/templates/Debian/listchanges_squeeze.erb b/puppet/modules/apt/templates/Debian/listchanges_squeeze.erb
new file mode 120000
index 00000000..74ab496d
--- /dev/null
+++ b/puppet/modules/apt/templates/Debian/listchanges_squeeze.erb
@@ -0,0 +1 @@
+listchanges_lenny.erb \ No newline at end of file
diff --git a/puppet/modules/apt/templates/Debian/listchanges_wheezy.erb b/puppet/modules/apt/templates/Debian/listchanges_wheezy.erb
new file mode 120000
index 00000000..74ab496d
--- /dev/null
+++ b/puppet/modules/apt/templates/Debian/listchanges_wheezy.erb
@@ -0,0 +1 @@
+listchanges_lenny.erb \ No newline at end of file
diff --git a/puppet/modules/apt/templates/Debian/preferences_jessie.erb b/puppet/modules/apt/templates/Debian/preferences_jessie.erb
new file mode 100644
index 00000000..0888abe5
--- /dev/null
+++ b/puppet/modules/apt/templates/Debian/preferences_jessie.erb
@@ -0,0 +1,14 @@
+Explanation: Debian <%= codename=scope.lookupvar('::debian_codename') %>
+Package: *
+Pin: release o=Debian,n=<%= codename %>
+Pin-Priority: 990
+
+Explanation: Debian sid
+Package: *
+Pin: release o=Debian,n=sid
+Pin-Priority: 1
+
+Explanation: Debian fallback
+Package: *
+Pin: release o=Debian
+Pin-Priority: -10
diff --git a/puppet/modules/apt/templates/Debian/preferences_lenny.erb b/puppet/modules/apt/templates/Debian/preferences_lenny.erb
new file mode 100644
index 00000000..65001687
--- /dev/null
+++ b/puppet/modules/apt/templates/Debian/preferences_lenny.erb
@@ -0,0 +1,25 @@
+Explanation: Debian <%= codename=scope.lookupvar('::debian_codename') %>
+Package: *
+Pin: release o=Debian,a=<%= scope.lookupvar('::debian_release') %>,v=5*
+Pin-Priority: 990
+
+Explanation: Debian backports
+Package: *
+Pin: origin backports.debian.org
+Pin-Priority: 200
+
+Explanation: Debian <%= next_release=scope.lookupvar('::debian_nextrelease') %>
+Package: *
+Pin: release o=Debian,a=<%= next_release %>
+Pin-Priority: 2
+
+Explanation: Debian sid
+Package: *
+Pin: release o=Debian,a=unstable
+Pin-Priority: 1
+
+Explanation: Debian fallback
+Package: *
+Pin: release o=Debian
+Pin-Priority: -10
+
diff --git a/puppet/modules/apt/templates/Debian/preferences_sid.erb b/puppet/modules/apt/templates/Debian/preferences_sid.erb
new file mode 100644
index 00000000..eb185543
--- /dev/null
+++ b/puppet/modules/apt/templates/Debian/preferences_sid.erb
@@ -0,0 +1,10 @@
+Explanation: Debian sid
+Package: *
+Pin: release o=Debian,n=sid
+Pin-Priority: 990
+
+Explanation: Debian fallback
+Package: *
+Pin: release o=Debian
+Pin-Priority: -10
+
diff --git a/puppet/modules/apt/templates/Debian/preferences_squeeze.erb b/puppet/modules/apt/templates/Debian/preferences_squeeze.erb
new file mode 100644
index 00000000..885edc73
--- /dev/null
+++ b/puppet/modules/apt/templates/Debian/preferences_squeeze.erb
@@ -0,0 +1,30 @@
+Explanation: Debian <%= codename=scope.lookupvar('::debian_codename') %>
+Package: *
+Pin: release o=Debian,n=<%= codename %>
+Pin-Priority: 990
+
+Explanation: Debian <%= codename %>-updates
+Package: *
+Pin: release o=Debian,n=<%= codename %>-updates
+Pin-Priority: 990
+
+Explanation: Debian <%= codename %>-lts
+Package: *
+Pin: release o=Debian,n=<%= codename %>-lts
+Pin-Priority: 990
+
+Explanation: Debian <%= next_codename=scope.lookupvar('::debian_nextcodename') %>
+Package: *
+Pin: release o=Debian,n=<%= next_codename %>
+Pin-Priority: 2
+
+Explanation: Debian sid
+Package: *
+Pin: release o=Debian,n=sid
+Pin-Priority: 1
+
+Explanation: Debian fallback
+Package: *
+Pin: release o=Debian
+Pin-Priority: -10
+
diff --git a/puppet/modules/apt/templates/Debian/preferences_wheezy.erb b/puppet/modules/apt/templates/Debian/preferences_wheezy.erb
new file mode 100644
index 00000000..106108d5
--- /dev/null
+++ b/puppet/modules/apt/templates/Debian/preferences_wheezy.erb
@@ -0,0 +1,20 @@
+Explanation: Debian <%= codename=scope.lookupvar('::debian_codename') %>
+Package: *
+Pin: release o=Debian,n=<%= codename %>
+Pin-Priority: 990
+
+Explanation: Debian <%= codename %>-updates
+Package: *
+Pin: release o=Debian,n=<%= codename %>-updates
+Pin-Priority: 990
+
+Explanation: Debian sid
+Package: *
+Pin: release o=Debian,n=sid
+Pin-Priority: 1
+
+Explanation: Debian fallback
+Package: *
+Pin: release o=Debian
+Pin-Priority: -10
+
diff --git a/puppet/modules/apt/templates/Debian/sources.list.erb b/puppet/modules/apt/templates/Debian/sources.list.erb
new file mode 100644
index 00000000..44eea538
--- /dev/null
+++ b/puppet/modules/apt/templates/Debian/sources.list.erb
@@ -0,0 +1,76 @@
+# This file is managed by puppet
+# all local modifications will be overwritten
+
+### Debian current: <%= codename=scope.lookupvar('::debian_codename') %>
+
+# basic
+deb <%= debian_url=scope.lookupvar('apt::debian_url') %> <%= codename %> <%= lrepos=scope.lookupvar('apt::real_repos') %>
+<% if include_src=scope.lookupvar('apt::include_src') -%>
+deb-src <%= debian_url %> <%= codename %> <%= lrepos %>
+<% end -%>
+
+# security
+<% if ((release=scope.lookupvar('::debian_release')) == "stable" || release == "oldstable") -%>
+deb <%= security_url=scope.lookupvar('apt::security_url') %> <%= codename %>/updates <%= lrepos %>
+<% if include_src -%>
+deb-src <%= security_url %> <%= codename %>/updates <%= lrepos %>
+<% end -%>
+<% else -%>
+# There is no security support for <%= release %>
+<% end -%>
+
+<% if use_volatile=scope.lookupvar('apt::use_volatile') -%>
+# volatile
+<% if (release == "testing" || release == "unstable" || release == "experimental") -%>
+# There is no volatile archive for <%= release %>
+<% else -%>
+deb <%= debian_url %> <%= codename %>-updates <%= lrepos %>
+<% if include_src -%>
+deb-src <%= debian_url %> <%= codename %>-updates <%= lrepos %>
+<% end
+ end
+ end -%>
+
+<% if use_lts=scope.lookupvar('apt::use_lts') -%>
+# LTS
+<% if release_lts=scope.lookupvar('::debian_lts') == "false" -%>
+# There is no LTS archive for <%= release %>
+<% else -%>
+deb <%= debian_url %> <%= codename %>-lts <%= lrepos %>
+<% if include_src -%>
+deb-src <%= debian_url %> <%= codename %>-lts <%= lrepos %>
+<% end -%>
+<% end -%>
+<% end -%>
+
+<% if next_release=scope.lookupvar('apt::use_next_release') -%>
+### Debian next: <%= next_release=scope.lookupvar('::debian_nextrelease') ; next_codename=scope.lookupvar('::debian_nextcodename') %>
+
+# basic
+deb <%= debian_url %> <%= next_codename %> <%= lrepos %>
+<% if include_src -%>
+deb-src <%= debian_url %> <%= next_codename %> <%= lrepos %>
+<% end -%>
+
+# security
+<% if (next_release == "unstable" || next_release == "experimental") -%>
+# There is no security support for <%= next_release %>
+<% else -%>
+deb <%= security_url %> <%= next_codename %>/updates <%= lrepos %>
+<% if include_src then -%>
+deb-src <%= security_url %> <%= next_codename %>/updates <%= lrepos %>
+<% end
+ end -%>
+
+<% if use_volatile -%>
+# volatile
+<% if (next_release == "testing" || next_release == "unstable" || next_release == "experimental") -%>
+# There is no volatile archive for <%= next_release %>
+<% else -%>
+deb <%= debian_url %> <%= next_codename %>-updates <%= lrepos %>
+<% if include_src -%>
+deb-src <%= debian_url %> <%= next_codename %>-updates <%= lrepos %>
+<% end
+ end
+ end
+ end -%>
diff --git a/puppet/modules/apt/templates/Ubuntu/preferences_lucid.erb b/puppet/modules/apt/templates/Ubuntu/preferences_lucid.erb
new file mode 120000
index 00000000..3debe4fc
--- /dev/null
+++ b/puppet/modules/apt/templates/Ubuntu/preferences_lucid.erb
@@ -0,0 +1 @@
+preferences_maverick.erb \ No newline at end of file
diff --git a/puppet/modules/apt/templates/Ubuntu/preferences_maverick.erb b/puppet/modules/apt/templates/Ubuntu/preferences_maverick.erb
new file mode 100644
index 00000000..8e5481d3
--- /dev/null
+++ b/puppet/modules/apt/templates/Ubuntu/preferences_maverick.erb
@@ -0,0 +1,30 @@
+Explanation: Ubuntu <%= codename=scope.lookupvar('::ubuntu_codename') %> security
+Package: *
+Pin: release o=Ubuntu,a=<%= codename %>-security
+Pin-Priority: 990
+
+Explanation: Ubuntu <%= codename %> updates
+Package: *
+Pin: release o=Ubuntu,a=<%= codename %>-updates
+Pin-Priority: 980
+
+Explanation: Ubuntu <%= codename %>
+Package: *
+Pin: release o=Ubuntu,a=<%= codename %>
+Pin-Priority: 970
+
+Explanation: Ubuntu backports
+Package: *
+Pin: release a=<%= codename %>-backports
+Pin-Priority: 200
+
+Explanation: Ubuntu <%= next_release=scope.lookupvar('::ubuntu_nextcodename') %>
+Package: *
+Pin: release o=Ubuntu,a=<%= next_release %>
+Pin-Priority: 2
+
+Explanation: Ubuntu fallback
+Package: *
+Pin: release o=Ubuntu
+Pin-Priority: -10
+
diff --git a/puppet/modules/apt/templates/Ubuntu/preferences_oneiric.erb b/puppet/modules/apt/templates/Ubuntu/preferences_oneiric.erb
new file mode 120000
index 00000000..3debe4fc
--- /dev/null
+++ b/puppet/modules/apt/templates/Ubuntu/preferences_oneiric.erb
@@ -0,0 +1 @@
+preferences_maverick.erb \ No newline at end of file
diff --git a/puppet/modules/apt/templates/Ubuntu/preferences_precise.erb b/puppet/modules/apt/templates/Ubuntu/preferences_precise.erb
new file mode 120000
index 00000000..3debe4fc
--- /dev/null
+++ b/puppet/modules/apt/templates/Ubuntu/preferences_precise.erb
@@ -0,0 +1 @@
+preferences_maverick.erb \ No newline at end of file
diff --git a/puppet/modules/apt/templates/Ubuntu/preferences_utopic.erb b/puppet/modules/apt/templates/Ubuntu/preferences_utopic.erb
new file mode 120000
index 00000000..3debe4fc
--- /dev/null
+++ b/puppet/modules/apt/templates/Ubuntu/preferences_utopic.erb
@@ -0,0 +1 @@
+preferences_maverick.erb \ No newline at end of file
diff --git a/puppet/modules/apt/templates/Ubuntu/preferences_vivid.erb b/puppet/modules/apt/templates/Ubuntu/preferences_vivid.erb
new file mode 120000
index 00000000..3debe4fc
--- /dev/null
+++ b/puppet/modules/apt/templates/Ubuntu/preferences_vivid.erb
@@ -0,0 +1 @@
+preferences_maverick.erb \ No newline at end of file
diff --git a/puppet/modules/apt/templates/Ubuntu/preferences_wily.erb b/puppet/modules/apt/templates/Ubuntu/preferences_wily.erb
new file mode 120000
index 00000000..3debe4fc
--- /dev/null
+++ b/puppet/modules/apt/templates/Ubuntu/preferences_wily.erb
@@ -0,0 +1 @@
+preferences_maverick.erb \ No newline at end of file
diff --git a/puppet/modules/apt/templates/Ubuntu/preferences_xenial.erb b/puppet/modules/apt/templates/Ubuntu/preferences_xenial.erb
new file mode 120000
index 00000000..3debe4fc
--- /dev/null
+++ b/puppet/modules/apt/templates/Ubuntu/preferences_xenial.erb
@@ -0,0 +1 @@
+preferences_maverick.erb \ No newline at end of file
diff --git a/puppet/modules/apt/templates/Ubuntu/sources.list.erb b/puppet/modules/apt/templates/Ubuntu/sources.list.erb
new file mode 100644
index 00000000..e6d2f643
--- /dev/null
+++ b/puppet/modules/apt/templates/Ubuntu/sources.list.erb
@@ -0,0 +1,22 @@
+# This file is managed by puppet
+# all local modifications will be overwritten
+
+# basic <%= codename=scope.lookupvar('::ubuntu_codename') %>
+deb <%= ubuntu_url=scope.lookupvar('apt::ubuntu_url') %> <%= codename %> <%= lrepos=scope.lookupvar('apt::real_repos') %>
+<% if include_src=scope.lookupvar('apt::include_src') -%>
+deb-src <%= ubuntu_url %> <%= codename %> <%= lrepos %>
+<% end -%>
+
+<% if use_volatile=scope.lookupvar('apt::use_volatile') -%>
+# updates
+deb <%= ubuntu_url %> <%= codename %>-updates <%= lrepos %>
+<% if include_src -%>
+deb-src <%= ubuntu_url %> <%= codename %>-updates <%= lrepos %>
+<% end
+ end -%>
+
+# security suppport
+deb <%= ubuntu_url %> <%= codename %>-security <%= lrepos %>
+<% if include_src -%>
+deb-src <%= ubuntu_url %> <%= codename %>-security <%= lrepos %>
+<% end -%>
diff --git a/puppet/modules/apt/templates/preferences_snippet.erb b/puppet/modules/apt/templates/preferences_snippet.erb
new file mode 100644
index 00000000..903e73d6
--- /dev/null
+++ b/puppet/modules/apt/templates/preferences_snippet.erb
@@ -0,0 +1,4 @@
+Package: <%= @real_package %>
+Pin: <%= @pin %>
+Pin-Priority: <%= @priority %>
+
diff --git a/puppet/modules/apt/templates/preferences_snippet_release.erb b/puppet/modules/apt/templates/preferences_snippet_release.erb
new file mode 100644
index 00000000..b95d3f81
--- /dev/null
+++ b/puppet/modules/apt/templates/preferences_snippet_release.erb
@@ -0,0 +1,4 @@
+Package: <%= @real_package %>
+Pin: release a=<%= @release %>
+Pin-Priority: <%= @priority %>
+
diff --git a/puppet/modules/clamav/files/01-leap.conf b/puppet/modules/clamav/files/01-leap.conf
new file mode 100644
index 00000000..a7e49d17
--- /dev/null
+++ b/puppet/modules/clamav/files/01-leap.conf
@@ -0,0 +1,58 @@
+# If running clamd in "LocalSocket" mode (*NOT* in TCP/IP mode), and
+# either "SOcket Cat" (socat) or the "IO::Socket::UNIX" perl module
+# are installed on the system, and you want to report whether clamd
+# is running or not, uncomment the "clamd_socket" variable below (you
+# will be warned if neither socat nor IO::Socket::UNIX are found, but
+# the script will still run). You will also need to set the correct
+# path to your clamd socket file (if unsure of the path, check the
+# "LocalSocket" setting in your clamd.conf file for socket location).
+clamd_socket="/run/clamav/clamd.ctl"
+
+# If you would like to attempt to restart ClamD if detected not running,
+# uncomment the next 2 lines. Confirm the path to the "clamd_lock" file
+# (usually can be found in the clamd init script) and also enter the clamd
+# start command for your particular distro for the "start_clamd" variable
+# (the sample start command shown below should work for most linux distros).
+# NOTE: these 2 variables are dependant on the "clamd_socket" variable
+# shown above - if not enabled, then the following 2 variables will be
+# ignored, whether enabled or not.
+clamd_lock="/run/clamav/clamd.pid"
+start_clamd="clamdscan --reload"
+
+ss_dbs="
+ junk.ndb
+ phish.ndb
+ rogue.hdb
+ sanesecurity.ftm
+ scam.ndb
+ sigwhitelist.ign2
+ spamattach.hdb
+ spamimg.hdb
+ winnow.attachments.hdb
+ winnow_bad_cw.hdb
+ winnow_extended_malware.hdb
+ winnow_malware.hdb
+ winnow_malware_links.ndb
+ malwarehash.hsb
+ doppelstern.hdb
+ bofhland_cracked_URL.ndb
+ bofhland_malware_attach.hdb
+ bofhland_malware_URL.ndb
+ bofhland_phishing_URL.ndb
+ crdfam.clamav.hdb
+ phishtank.ndb
+ porcupine.ndb
+ spear.ndb
+ spearl.ndb
+"
+
+# ========================
+# SecuriteInfo Database(s)
+# ========================
+# Add or remove database file names between quote marks as needed. To
+# disable any SecuriteInfo database downloads, remove the appropriate
+# lines below. To disable all SecuriteInfo database file downloads,
+# comment all of the following lines.
+si_dbs=""
+
+mbl_dbs="" \ No newline at end of file
diff --git a/puppet/modules/clamav/files/clamav-daemon_default b/puppet/modules/clamav/files/clamav-daemon_default
new file mode 100644
index 00000000..b4cd6a4f
--- /dev/null
+++ b/puppet/modules/clamav/files/clamav-daemon_default
@@ -0,0 +1,8 @@
+# This is a file designed only t0 set special environment variables
+# eg TMP or TMPDIR. It is sourced from a shell script, so anything
+# put in here must be in variable=value format, suitable for sourcing
+# from a shell script.
+# Examples:
+# export TMPDIR=/dev/shm
+export TMP=/var/tmp
+export TMPDIR=/var/tmp
diff --git a/puppet/modules/clamav/files/clamav-milter_default b/puppet/modules/clamav/files/clamav-milter_default
new file mode 100644
index 00000000..5e33e822
--- /dev/null
+++ b/puppet/modules/clamav/files/clamav-milter_default
@@ -0,0 +1,14 @@
+#
+# clamav-milter init options
+#
+
+## SOCKET_RWGROUP
+# by default, the socket created by the milter has permissions
+# clamav:clamav:755. SOCKET_RWGROUP changes the group and changes the
+# permissions to 775 to give read-write access to that group.
+#
+# If you are using postfix to speak to the milter, you have to give permission
+# to the postfix group to write
+#
+SOCKET_RWGROUP=postfix
+export TMPDIR=/var/tmp
diff --git a/puppet/modules/clamav/manifests/daemon.pp b/puppet/modules/clamav/manifests/daemon.pp
new file mode 100644
index 00000000..2e13a8fb
--- /dev/null
+++ b/puppet/modules/clamav/manifests/daemon.pp
@@ -0,0 +1,91 @@
+# deploy clamav daemon
+class clamav::daemon {
+
+ $domain_hash = hiera('domain')
+ $domain = $domain_hash['full_suffix']
+
+ package { [ 'clamav-daemon', 'arj' ]:
+ ensure => installed;
+ }
+
+ service {
+ 'clamav-daemon':
+ ensure => running,
+ name => clamav-daemon,
+ pattern => '/usr/sbin/clamd',
+ enable => true,
+ hasrestart => true,
+ subscribe => File['/etc/default/clamav-daemon'],
+ require => Package['clamav-daemon'];
+ }
+
+ file {
+ '/var/run/clamav':
+ ensure => directory,
+ mode => '0750',
+ owner => clamav,
+ group => postfix,
+ require => [Package['postfix'], Package['clamav-daemon']];
+
+ '/var/lib/clamav':
+ mode => '0755',
+ owner => clamav,
+ group => clamav,
+ require => Package['clamav-daemon'];
+
+ '/etc/default/clamav-daemon':
+ source => 'puppet:///modules/clamav/clamav-daemon_default',
+ mode => '0644',
+ owner => root,
+ group => root;
+
+ # this file contains additional domains that we want the clamav
+ # phishing process to look for (our domain)
+ '/var/lib/clamav/local.pdb':
+ content => template('clamav/local.pdb.erb'),
+ mode => '0644',
+ owner => clamav,
+ group => clamav,
+ require => Package['clamav-daemon'];
+ }
+
+ file_line {
+ 'clamav_daemon_tmp':
+ path => '/etc/clamav/clamd.conf',
+ line => 'TemporaryDirectory /var/tmp',
+ require => Package['clamav-daemon'],
+ notify => Service['clamav-daemon'];
+
+ 'enable_phishscanurls':
+ path => '/etc/clamav/clamd.conf',
+ match => 'PhishingScanURLs no',
+ line => 'PhishingScanURLs yes',
+ require => Package['clamav-daemon'],
+ notify => Service['clamav-daemon'];
+
+ 'clamav_LogSyslog_true':
+ path => '/etc/clamav/clamd.conf',
+ match => '^LogSyslog false',
+ line => 'LogSyslog true',
+ require => Package['clamav-daemon'],
+ notify => Service['clamav-daemon'];
+
+ 'clamav_MaxThreads':
+ path => '/etc/clamav/clamd.conf',
+ match => 'MaxThreads 20',
+ line => 'MaxThreads 100',
+ require => Package['clamav-daemon'],
+ notify => Service['clamav-daemon'];
+ }
+
+ # remove LogFile line
+ file_line {
+ 'clamav_LogFile':
+ path => '/etc/clamav/clamd.conf',
+ match => '^LogFile .*',
+ line => '',
+ require => Package['clamav-daemon'],
+ notify => Service['clamav-daemon'];
+ }
+
+}
diff --git a/puppet/modules/clamav/manifests/freshclam.pp b/puppet/modules/clamav/manifests/freshclam.pp
new file mode 100644
index 00000000..80c822a4
--- /dev/null
+++ b/puppet/modules/clamav/manifests/freshclam.pp
@@ -0,0 +1,23 @@
+class clamav::freshclam {
+
+ package { 'clamav-freshclam': ensure => installed }
+
+ service {
+ 'freshclam':
+ ensure => running,
+ enable => true,
+ name => clamav-freshclam,
+ pattern => '/usr/bin/freshclam',
+ hasrestart => true,
+ require => Package['clamav-freshclam'];
+ }
+
+ file_line {
+ 'freshclam_notify':
+ path => '/etc/clamav/freshclam.conf',
+ line => 'NotifyClamd /etc/clamav/clamd.conf',
+ require => Package['clamav-freshclam'],
+ notify => Service['freshclam'];
+ }
+
+}
diff --git a/puppet/modules/clamav/manifests/init.pp b/puppet/modules/clamav/manifests/init.pp
new file mode 100644
index 00000000..de8fb4dc
--- /dev/null
+++ b/puppet/modules/clamav/manifests/init.pp
@@ -0,0 +1,8 @@
+class clamav {
+
+ include clamav::daemon
+ include clamav::milter
+ include clamav::unofficial_sigs
+ include clamav::freshclam
+
+}
diff --git a/puppet/modules/clamav/manifests/milter.pp b/puppet/modules/clamav/manifests/milter.pp
new file mode 100644
index 00000000..e8a85e3f
--- /dev/null
+++ b/puppet/modules/clamav/manifests/milter.pp
@@ -0,0 +1,50 @@
+class clamav::milter {
+
+ $clamav = hiera('clamav')
+ $whitelisted_addresses = $clamav['whitelisted_addresses']
+ $domain_hash = hiera('domain')
+ $domain = $domain_hash['full_suffix']
+
+ package { 'clamav-milter': ensure => installed }
+
+ service {
+ 'clamav-milter':
+ ensure => running,
+ enable => true,
+ name => clamav-milter,
+ pattern => '/usr/sbin/clamav-milter',
+ hasrestart => true,
+ require => Package['clamav-milter'],
+ subscribe => File['/etc/default/clamav-milter'];
+ }
+
+ file {
+ '/run/clamav/milter.ctl':
+ mode => '0666',
+ owner => clamav,
+ group => postfix,
+ require => Class['clamav::daemon'];
+
+ '/etc/clamav/clamav-milter.conf':
+ content => template('clamav/clamav-milter.conf.erb'),
+ mode => '0644',
+ owner => root,
+ group => root,
+ require => Package['clamav-milter'],
+ subscribe => Service['clamav-milter'];
+
+ '/etc/default/clamav-milter':
+ source => 'puppet:///modules/clamav/clamav-milter_default',
+ mode => '0644',
+ owner => root,
+ group => root;
+
+ '/etc/clamav/whitelisted_addresses':
+ content => template('clamav/whitelisted_addresses.erb'),
+ mode => '0644',
+ owner => root,
+ group => root,
+ require => Package['clamav-milter'];
+ }
+
+}
diff --git a/puppet/modules/clamav/manifests/unofficial_sigs.pp b/puppet/modules/clamav/manifests/unofficial_sigs.pp
new file mode 100644
index 00000000..2d849585
--- /dev/null
+++ b/puppet/modules/clamav/manifests/unofficial_sigs.pp
@@ -0,0 +1,23 @@
+class clamav::unofficial_sigs {
+
+ package { 'clamav-unofficial-sigs':
+ ensure => installed
+ }
+
+ ensure_packages(['wget', 'gnupg', 'socat', 'rsync', 'curl'])
+
+ file {
+ '/var/log/clamav-unofficial-sigs.log':
+ ensure => file,
+ owner => clamav,
+ group => clamav,
+ require => Package['clamav-unofficial-sigs'];
+
+ '/etc/clamav-unofficial-sigs.conf.d/01-leap.conf':
+ source => 'puppet:///modules/clamav/01-leap.conf',
+ mode => '0755',
+ owner => root,
+ group => root,
+ require => Package['clamav-unofficial-sigs'];
+ }
+}
diff --git a/puppet/modules/clamav/templates/clamav-milter.conf.erb b/puppet/modules/clamav/templates/clamav-milter.conf.erb
new file mode 100644
index 00000000..9bf7099e
--- /dev/null
+++ b/puppet/modules/clamav/templates/clamav-milter.conf.erb
@@ -0,0 +1,28 @@
+# THIS FILE MANAGED BY PUPPET
+MilterSocket /var/run/clamav/milter.ctl
+FixStaleSocket true
+User clamav
+MilterSocketGroup clamav
+MilterSocketMode 666
+AllowSupplementaryGroups true
+ReadTimeout 120
+Foreground false
+PidFile /var/run/clamav/clamav-milter.pid
+ClamdSocket unix:/var/run/clamav/clamd.ctl
+OnClean Accept
+OnInfected Reject
+OnFail Defer
+AddHeader Replace
+LogSyslog true
+LogFacility LOG_LOCAL6
+LogVerbose yes
+LogInfected Basic
+LogTime true
+LogFileUnlock false
+LogClean Off
+LogRotate true
+SupportMultipleRecipients false
+MaxFileSize 10M
+TemporaryDirectory /var/tmp
+RejectMsg "Message refused due to content violation: %v - contact https://<%= @domain %>/tickets/new if this is in error"
+Whitelist /etc/clamav/whitelisted_addresses
diff --git a/puppet/modules/clamav/templates/local.pdb.erb b/puppet/modules/clamav/templates/local.pdb.erb
new file mode 100644
index 00000000..9ea0584a
--- /dev/null
+++ b/puppet/modules/clamav/templates/local.pdb.erb
@@ -0,0 +1 @@
+H:<%= @domain %>
diff --git a/puppet/modules/clamav/templates/whitelisted_addresses.erb b/puppet/modules/clamav/templates/whitelisted_addresses.erb
new file mode 100644
index 00000000..9e068ec5
--- /dev/null
+++ b/puppet/modules/clamav/templates/whitelisted_addresses.erb
@@ -0,0 +1,5 @@
+<%- if @whitelisted_addresses then -%>
+<% @whitelisted_addresses.each do |name| -%>
+From::<%= name %>
+<% end -%>
+<% end -%>
diff --git a/puppet/modules/common/LICENSE b/puppet/modules/common/LICENSE
new file mode 100644
index 00000000..94a9ed02
--- /dev/null
+++ b/puppet/modules/common/LICENSE
@@ -0,0 +1,674 @@
+ GNU GENERAL PUBLIC LICENSE
+ Version 3, 29 June 2007
+
+ Copyright (C) 2007 Free Software Foundation, Inc. <http://fsf.org/>
+ Everyone is permitted to copy and distribute verbatim copies
+ of this license document, but changing it is not allowed.
+
+ Preamble
+
+ The GNU General Public License is a free, copyleft license for
+software and other kinds of works.
+
+ The licenses for most software and other practical works are designed
+to take away your freedom to share and change the works. By contrast,
+the GNU General Public License is intended to guarantee your freedom to
+share and change all versions of a program--to make sure it remains free
+software for all its users. We, the Free Software Foundation, use the
+GNU General Public License for most of our software; it applies also to
+any other work released this way by its authors. You can apply it to
+your programs, too.
+
+ When we speak of free software, we are referring to freedom, not
+price. Our General Public Licenses are designed to make sure that you
+have the freedom to distribute copies of free software (and charge for
+them if you wish), that you receive source code or can get it if you
+want it, that you can change the software or use pieces of it in new
+free programs, and that you know you can do these things.
+
+ To protect your rights, we need to prevent others from denying you
+these rights or asking you to surrender the rights. Therefore, you have
+certain responsibilities if you distribute copies of the software, or if
+you modify it: responsibilities to respect the freedom of others.
+
+ For example, if you distribute copies of such a program, whether
+gratis or for a fee, you must pass on to the recipients the same
+freedoms that you received. You must make sure that they, too, receive
+or can get the source code. And you must show them these terms so they
+know their rights.
+
+ Developers that use the GNU GPL protect your rights with two steps:
+(1) assert copyright on the software, and (2) offer you this License
+giving you legal permission to copy, distribute and/or modify it.
+
+ For the developers' and authors' protection, the GPL clearly explains
+that there is no warranty for this free software. For both users' and
+authors' sake, the GPL requires that modified versions be marked as
+changed, so that their problems will not be attributed erroneously to
+authors of previous versions.
+
+ Some devices are designed to deny users access to install or run
+modified versions of the software inside them, although the manufacturer
+can do so. This is fundamentally incompatible with the aim of
+protecting users' freedom to change the software. The systematic
+pattern of such abuse occurs in the area of products for individuals to
+use, which is precisely where it is most unacceptable. Therefore, we
+have designed this version of the GPL to prohibit the practice for those
+products. If such problems arise substantially in other domains, we
+stand ready to extend this provision to those domains in future versions
+of the GPL, as needed to protect the freedom of users.
+
+ Finally, every program is threatened constantly by software patents.
+States should not allow patents to restrict development and use of
+software on general-purpose computers, but in those that do, we wish to
+avoid the special danger that patents applied to a free program could
+make it effectively proprietary. To prevent this, the GPL assures that
+patents cannot be used to render the program non-free.
+
+ The precise terms and conditions for copying, distribution and
+modification follow.
+
+ TERMS AND CONDITIONS
+
+ 0. Definitions.
+
+ "This License" refers to version 3 of the GNU General Public License.
+
+ "Copyright" also means copyright-like laws that apply to other kinds of
+works, such as semiconductor masks.
+
+ "The Program" refers to any copyrightable work licensed under this
+License. Each licensee is addressed as "you". "Licensees" and
+"recipients" may be individuals or organizations.
+
+ To "modify" a work means to copy from or adapt all or part of the work
+in a fashion requiring copyright permission, other than the making of an
+exact copy. The resulting work is called a "modified version" of the
+earlier work or a work "based on" the earlier work.
+
+ A "covered work" means either the unmodified Program or a work based
+on the Program.
+
+ To "propagate" a work means to do anything with it that, without
+permission, would make you directly or secondarily liable for
+infringement under applicable copyright law, except executing it on a
+computer or modifying a private copy. Propagation includes copying,
+distribution (with or without modification), making available to the
+public, and in some countries other activities as well.
+
+ To "convey" a work means any kind of propagation that enables other
+parties to make or receive copies. Mere interaction with a user through
+a computer network, with no transfer of a copy, is not conveying.
+
+ An interactive user interface displays "Appropriate Legal Notices"
+to the extent that it includes a convenient and prominently visible
+feature that (1) displays an appropriate copyright notice, and (2)
+tells the user that there is no warranty for the work (except to the
+extent that warranties are provided), that licensees may convey the
+work under this License, and how to view a copy of this License. If
+the interface presents a list of user commands or options, such as a
+menu, a prominent item in the list meets this criterion.
+
+ 1. Source Code.
+
+ The "source code" for a work means the preferred form of the work
+for making modifications to it. "Object code" means any non-source
+form of a work.
+
+ A "Standard Interface" means an interface that either is an official
+standard defined by a recognized standards body, or, in the case of
+interfaces specified for a particular programming language, one that
+is widely used among developers working in that language.
+
+ The "System Libraries" of an executable work include anything, other
+than the work as a whole, that (a) is included in the normal form of
+packaging a Major Component, but which is not part of that Major
+Component, and (b) serves only to enable use of the work with that
+Major Component, or to implement a Standard Interface for which an
+implementation is available to the public in source code form. A
+"Major Component", in this context, means a major essential component
+(kernel, window system, and so on) of the specific operating system
+(if any) on which the executable work runs, or a compiler used to
+produce the work, or an object code interpreter used to run it.
+
+ The "Corresponding Source" for a work in object code form means all
+the source code needed to generate, install, and (for an executable
+work) run the object code and to modify the work, including scripts to
+control those activities. However, it does not include the work's
+System Libraries, or general-purpose tools or generally available free
+programs which are used unmodified in performing those activities but
+which are not part of the work. For example, Corresponding Source
+includes interface definition files associated with source files for
+the work, and the source code for shared libraries and dynamically
+linked subprograms that the work is specifically designed to require,
+such as by intimate data communication or control flow between those
+subprograms and other parts of the work.
+
+ The Corresponding Source need not include anything that users
+can regenerate automatically from other parts of the Corresponding
+Source.
+
+ The Corresponding Source for a work in source code form is that
+same work.
+
+ 2. Basic Permissions.
+
+ All rights granted under this License are granted for the term of
+copyright on the Program, and are irrevocable provided the stated
+conditions are met. This License explicitly affirms your unlimited
+permission to run the unmodified Program. The output from running a
+covered work is covered by this License only if the output, given its
+content, constitutes a covered work. This License acknowledges your
+rights of fair use or other equivalent, as provided by copyright law.
+
+ You may make, run and propagate covered works that you do not
+convey, without conditions so long as your license otherwise remains
+in force. You may convey covered works to others for the sole purpose
+of having them make modifications exclusively for you, or provide you
+with facilities for running those works, provided that you comply with
+the terms of this License in conveying all material for which you do
+not control copyright. Those thus making or running the covered works
+for you must do so exclusively on your behalf, under your direction
+and control, on terms that prohibit them from making any copies of
+your copyrighted material outside their relationship with you.
+
+ Conveying under any other circumstances is permitted solely under
+the conditions stated below. Sublicensing is not allowed; section 10
+makes it unnecessary.
+
+ 3. Protecting Users' Legal Rights From Anti-Circumvention Law.
+
+ No covered work shall be deemed part of an effective technological
+measure under any applicable law fulfilling obligations under article
+11 of the WIPO copyright treaty adopted on 20 December 1996, or
+similar laws prohibiting or restricting circumvention of such
+measures.
+
+ When you convey a covered work, you waive any legal power to forbid
+circumvention of technological measures to the extent such circumvention
+is effected by exercising rights under this License with respect to
+the covered work, and you disclaim any intention to limit operation or
+modification of the work as a means of enforcing, against the work's
+users, your or third parties' legal rights to forbid circumvention of
+technological measures.
+
+ 4. Conveying Verbatim Copies.
+
+ You may convey verbatim copies of the Program's source code as you
+receive it, in any medium, provided that you conspicuously and
+appropriately publish on each copy an appropriate copyright notice;
+keep intact all notices stating that this License and any
+non-permissive terms added in accord with section 7 apply to the code;
+keep intact all notices of the absence of any warranty; and give all
+recipients a copy of this License along with the Program.
+
+ You may charge any price or no price for each copy that you convey,
+and you may offer support or warranty protection for a fee.
+
+ 5. Conveying Modified Source Versions.
+
+ You may convey a work based on the Program, or the modifications to
+produce it from the Program, in the form of source code under the
+terms of section 4, provided that you also meet all of these conditions:
+
+ a) The work must carry prominent notices stating that you modified
+ it, and giving a relevant date.
+
+ b) The work must carry prominent notices stating that it is
+ released under this License and any conditions added under section
+ 7. This requirement modifies the requirement in section 4 to
+ "keep intact all notices".
+
+ c) You must license the entire work, as a whole, under this
+ License to anyone who comes into possession of a copy. This
+ License will therefore apply, along with any applicable section 7
+ additional terms, to the whole of the work, and all its parts,
+ regardless of how they are packaged. This License gives no
+ permission to license the work in any other way, but it does not
+ invalidate such permission if you have separately received it.
+
+ d) If the work has interactive user interfaces, each must display
+ Appropriate Legal Notices; however, if the Program has interactive
+ interfaces that do not display Appropriate Legal Notices, your
+ work need not make them do so.
+
+ A compilation of a covered work with other separate and independent
+works, which are not by their nature extensions of the covered work,
+and which are not combined with it such as to form a larger program,
+in or on a volume of a storage or distribution medium, is called an
+"aggregate" if the compilation and its resulting copyright are not
+used to limit the access or legal rights of the compilation's users
+beyond what the individual works permit. Inclusion of a covered work
+in an aggregate does not cause this License to apply to the other
+parts of the aggregate.
+
+ 6. Conveying Non-Source Forms.
+
+ You may convey a covered work in object code form under the terms
+of sections 4 and 5, provided that you also convey the
+machine-readable Corresponding Source under the terms of this License,
+in one of these ways:
+
+ a) Convey the object code in, or embodied in, a physical product
+ (including a physical distribution medium), accompanied by the
+ Corresponding Source fixed on a durable physical medium
+ customarily used for software interchange.
+
+ b) Convey the object code in, or embodied in, a physical product
+ (including a physical distribution medium), accompanied by a
+ written offer, valid for at least three years and valid for as
+ long as you offer spare parts or customer support for that product
+ model, to give anyone who possesses the object code either (1) a
+ copy of the Corresponding Source for all the software in the
+ product that is covered by this License, on a durable physical
+ medium customarily used for software interchange, for a price no
+ more than your reasonable cost of physically performing this
+ conveying of source, or (2) access to copy the
+ Corresponding Source from a network server at no charge.
+
+ c) Convey individual copies of the object code with a copy of the
+ written offer to provide the Corresponding Source. This
+ alternative is allowed only occasionally and noncommercially, and
+ only if you received the object code with such an offer, in accord
+ with subsection 6b.
+
+ d) Convey the object code by offering access from a designated
+ place (gratis or for a charge), and offer equivalent access to the
+ Corresponding Source in the same way through the same place at no
+ further charge. You need not require recipients to copy the
+ Corresponding Source along with the object code. If the place to
+ copy the object code is a network server, the Corresponding Source
+ may be on a different server (operated by you or a third party)
+ that supports equivalent copying facilities, provided you maintain
+ clear directions next to the object code saying where to find the
+ Corresponding Source. Regardless of what server hosts the
+ Corresponding Source, you remain obligated to ensure that it is
+ available for as long as needed to satisfy these requirements.
+
+ e) Convey the object code using peer-to-peer transmission, provided
+ you inform other peers where the object code and Corresponding
+ Source of the work are being offered to the general public at no
+ charge under subsection 6d.
+
+ A separable portion of the object code, whose source code is excluded
+from the Corresponding Source as a System Library, need not be
+included in conveying the object code work.
+
+ A "User Product" is either (1) a "consumer product", which means any
+tangible personal property which is normally used for personal, family,
+or household purposes, or (2) anything designed or sold for incorporation
+into a dwelling. In determining whether a product is a consumer product,
+doubtful cases shall be resolved in favor of coverage. For a particular
+product received by a particular user, "normally used" refers to a
+typical or common use of that class of product, regardless of the status
+of the particular user or of the way in which the particular user
+actually uses, or expects or is expected to use, the product. A product
+is a consumer product regardless of whether the product has substantial
+commercial, industrial or non-consumer uses, unless such uses represent
+the only significant mode of use of the product.
+
+ "Installation Information" for a User Product means any methods,
+procedures, authorization keys, or other information required to install
+and execute modified versions of a covered work in that User Product from
+a modified version of its Corresponding Source. The information must
+suffice to ensure that the continued functioning of the modified object
+code is in no case prevented or interfered with solely because
+modification has been made.
+
+ If you convey an object code work under this section in, or with, or
+specifically for use in, a User Product, and the conveying occurs as
+part of a transaction in which the right of possession and use of the
+User Product is transferred to the recipient in perpetuity or for a
+fixed term (regardless of how the transaction is characterized), the
+Corresponding Source conveyed under this section must be accompanied
+by the Installation Information. But this requirement does not apply
+if neither you nor any third party retains the ability to install
+modified object code on the User Product (for example, the work has
+been installed in ROM).
+
+ The requirement to provide Installation Information does not include a
+requirement to continue to provide support service, warranty, or updates
+for a work that has been modified or installed by the recipient, or for
+the User Product in which it has been modified or installed. Access to a
+network may be denied when the modification itself materially and
+adversely affects the operation of the network or violates the rules and
+protocols for communication across the network.
+
+ Corresponding Source conveyed, and Installation Information provided,
+in accord with this section must be in a format that is publicly
+documented (and with an implementation available to the public in
+source code form), and must require no special password or key for
+unpacking, reading or copying.
+
+ 7. Additional Terms.
+
+ "Additional permissions" are terms that supplement the terms of this
+License by making exceptions from one or more of its conditions.
+Additional permissions that are applicable to the entire Program shall
+be treated as though they were included in this License, to the extent
+that they are valid under applicable law. If additional permissions
+apply only to part of the Program, that part may be used separately
+under those permissions, but the entire Program remains governed by
+this License without regard to the additional permissions.
+
+ When you convey a copy of a covered work, you may at your option
+remove any additional permissions from that copy, or from any part of
+it. (Additional permissions may be written to require their own
+removal in certain cases when you modify the work.) You may place
+additional permissions on material, added by you to a covered work,
+for which you have or can give appropriate copyright permission.
+
+ Notwithstanding any other provision of this License, for material you
+add to a covered work, you may (if authorized by the copyright holders of
+that material) supplement the terms of this License with terms:
+
+ a) Disclaiming warranty or limiting liability differently from the
+ terms of sections 15 and 16 of this License; or
+
+ b) Requiring preservation of specified reasonable legal notices or
+ author attributions in that material or in the Appropriate Legal
+ Notices displayed by works containing it; or
+
+ c) Prohibiting misrepresentation of the origin of that material, or
+ requiring that modified versions of such material be marked in
+ reasonable ways as different from the original version; or
+
+ d) Limiting the use for publicity purposes of names of licensors or
+ authors of the material; or
+
+ e) Declining to grant rights under trademark law for use of some
+ trade names, trademarks, or service marks; or
+
+ f) Requiring indemnification of licensors and authors of that
+ material by anyone who conveys the material (or modified versions of
+ it) with contractual assumptions of liability to the recipient, for
+ any liability that these contractual assumptions directly impose on
+ those licensors and authors.
+
+ All other non-permissive additional terms are considered "further
+restrictions" within the meaning of section 10. If the Program as you
+received it, or any part of it, contains a notice stating that it is
+governed by this License along with a term that is a further
+restriction, you may remove that term. If a license document contains
+a further restriction but permits relicensing or conveying under this
+License, you may add to a covered work material governed by the terms
+of that license document, provided that the further restriction does
+not survive such relicensing or conveying.
+
+ If you add terms to a covered work in accord with this section, you
+must place, in the relevant source files, a statement of the
+additional terms that apply to those files, or a notice indicating
+where to find the applicable terms.
+
+ Additional terms, permissive or non-permissive, may be stated in the
+form of a separately written license, or stated as exceptions;
+the above requirements apply either way.
+
+ 8. Termination.
+
+ You may not propagate or modify a covered work except as expressly
+provided under this License. Any attempt otherwise to propagate or
+modify it is void, and will automatically terminate your rights under
+this License (including any patent licenses granted under the third
+paragraph of section 11).
+
+ However, if you cease all violation of this License, then your
+license from a particular copyright holder is reinstated (a)
+provisionally, unless and until the copyright holder explicitly and
+finally terminates your license, and (b) permanently, if the copyright
+holder fails to notify you of the violation by some reasonable means
+prior to 60 days after the cessation.
+
+ Moreover, your license from a particular copyright holder is
+reinstated permanently if the copyright holder notifies you of the
+violation by some reasonable means, this is the first time you have
+received notice of violation of this License (for any work) from that
+copyright holder, and you cure the violation prior to 30 days after
+your receipt of the notice.
+
+ Termination of your rights under this section does not terminate the
+licenses of parties who have received copies or rights from you under
+this License. If your rights have been terminated and not permanently
+reinstated, you do not qualify to receive new licenses for the same
+material under section 10.
+
+ 9. Acceptance Not Required for Having Copies.
+
+ You are not required to accept this License in order to receive or
+run a copy of the Program. Ancillary propagation of a covered work
+occurring solely as a consequence of using peer-to-peer transmission
+to receive a copy likewise does not require acceptance. However,
+nothing other than this License grants you permission to propagate or
+modify any covered work. These actions infringe copyright if you do
+not accept this License. Therefore, by modifying or propagating a
+covered work, you indicate your acceptance of this License to do so.
+
+ 10. Automatic Licensing of Downstream Recipients.
+
+ Each time you convey a covered work, the recipient automatically
+receives a license from the original licensors, to run, modify and
+propagate that work, subject to this License. You are not responsible
+for enforcing compliance by third parties with this License.
+
+ An "entity transaction" is a transaction transferring control of an
+organization, or substantially all assets of one, or subdividing an
+organization, or merging organizations. If propagation of a covered
+work results from an entity transaction, each party to that
+transaction who receives a copy of the work also receives whatever
+licenses to the work the party's predecessor in interest had or could
+give under the previous paragraph, plus a right to possession of the
+Corresponding Source of the work from the predecessor in interest, if
+the predecessor has it or can get it with reasonable efforts.
+
+ You may not impose any further restrictions on the exercise of the
+rights granted or affirmed under this License. For example, you may
+not impose a license fee, royalty, or other charge for exercise of
+rights granted under this License, and you may not initiate litigation
+(including a cross-claim or counterclaim in a lawsuit) alleging that
+any patent claim is infringed by making, using, selling, offering for
+sale, or importing the Program or any portion of it.
+
+ 11. Patents.
+
+ A "contributor" is a copyright holder who authorizes use under this
+License of the Program or a work on which the Program is based. The
+work thus licensed is called the contributor's "contributor version".
+
+ A contributor's "essential patent claims" are all patent claims
+owned or controlled by the contributor, whether already acquired or
+hereafter acquired, that would be infringed by some manner, permitted
+by this License, of making, using, or selling its contributor version,
+but do not include claims that would be infringed only as a
+consequence of further modification of the contributor version. For
+purposes of this definition, "control" includes the right to grant
+patent sublicenses in a manner consistent with the requirements of
+this License.
+
+ Each contributor grants you a non-exclusive, worldwide, royalty-free
+patent license under the contributor's essential patent claims, to
+make, use, sell, offer for sale, import and otherwise run, modify and
+propagate the contents of its contributor version.
+
+ In the following three paragraphs, a "patent license" is any express
+agreement or commitment, however denominated, not to enforce a patent
+(such as an express permission to practice a patent or covenant not to
+sue for patent infringement). To "grant" such a patent license to a
+party means to make such an agreement or commitment not to enforce a
+patent against the party.
+
+ If you convey a covered work, knowingly relying on a patent license,
+and the Corresponding Source of the work is not available for anyone
+to copy, free of charge and under the terms of this License, through a
+publicly available network server or other readily accessible means,
+then you must either (1) cause the Corresponding Source to be so
+available, or (2) arrange to deprive yourself of the benefit of the
+patent license for this particular work, or (3) arrange, in a manner
+consistent with the requirements of this License, to extend the patent
+license to downstream recipients. "Knowingly relying" means you have
+actual knowledge that, but for the patent license, your conveying the
+covered work in a country, or your recipient's use of the covered work
+in a country, would infringe one or more identifiable patents in that
+country that you have reason to believe are valid.
+
+ If, pursuant to or in connection with a single transaction or
+arrangement, you convey, or propagate by procuring conveyance of, a
+covered work, and grant a patent license to some of the parties
+receiving the covered work authorizing them to use, propagate, modify
+or convey a specific copy of the covered work, then the patent license
+you grant is automatically extended to all recipients of the covered
+work and works based on it.
+
+ A patent license is "discriminatory" if it does not include within
+the scope of its coverage, prohibits the exercise of, or is
+conditioned on the non-exercise of one or more of the rights that are
+specifically granted under this License. You may not convey a covered
+work if you are a party to an arrangement with a third party that is
+in the business of distributing software, under which you make payment
+to the third party based on the extent of your activity of conveying
+the work, and under which the third party grants, to any of the
+parties who would receive the covered work from you, a discriminatory
+patent license (a) in connection with copies of the covered work
+conveyed by you (or copies made from those copies), or (b) primarily
+for and in connection with specific products or compilations that
+contain the covered work, unless you entered into that arrangement,
+or that patent license was granted, prior to 28 March 2007.
+
+ Nothing in this License shall be construed as excluding or limiting
+any implied license or other defenses to infringement that may
+otherwise be available to you under applicable patent law.
+
+ 12. No Surrender of Others' Freedom.
+
+ If conditions are imposed on you (whether by court order, agreement or
+otherwise) that contradict the conditions of this License, they do not
+excuse you from the conditions of this License. If you cannot convey a
+covered work so as to satisfy simultaneously your obligations under this
+License and any other pertinent obligations, then as a consequence you may
+not convey it at all. For example, if you agree to terms that obligate you
+to collect a royalty for further conveying from those to whom you convey
+the Program, the only way you could satisfy both those terms and this
+License would be to refrain entirely from conveying the Program.
+
+ 13. Use with the GNU Affero General Public License.
+
+ Notwithstanding any other provision of this License, you have
+permission to link or combine any covered work with a work licensed
+under version 3 of the GNU Affero General Public License into a single
+combined work, and to convey the resulting work. The terms of this
+License will continue to apply to the part which is the covered work,
+but the special requirements of the GNU Affero General Public License,
+section 13, concerning interaction through a network will apply to the
+combination as such.
+
+ 14. Revised Versions of this License.
+
+ The Free Software Foundation may publish revised and/or new versions of
+the GNU General Public License from time to time. Such new versions will
+be similar in spirit to the present version, but may differ in detail to
+address new problems or concerns.
+
+ Each version is given a distinguishing version number. If the
+Program specifies that a certain numbered version of the GNU General
+Public License "or any later version" applies to it, you have the
+option of following the terms and conditions either of that numbered
+version or of any later version published by the Free Software
+Foundation. If the Program does not specify a version number of the
+GNU General Public License, you may choose any version ever published
+by the Free Software Foundation.
+
+ If the Program specifies that a proxy can decide which future
+versions of the GNU General Public License can be used, that proxy's
+public statement of acceptance of a version permanently authorizes you
+to choose that version for the Program.
+
+ Later license versions may give you additional or different
+permissions. However, no additional obligations are imposed on any
+author or copyright holder as a result of your choosing to follow a
+later version.
+
+ 15. Disclaimer of Warranty.
+
+ THERE IS NO WARRANTY FOR THE PROGRAM, TO THE EXTENT PERMITTED BY
+APPLICABLE LAW. EXCEPT WHEN OTHERWISE STATED IN WRITING THE COPYRIGHT
+HOLDERS AND/OR OTHER PARTIES PROVIDE THE PROGRAM "AS IS" WITHOUT WARRANTY
+OF ANY KIND, EITHER EXPRESSED OR IMPLIED, INCLUDING, BUT NOT LIMITED TO,
+THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR
+PURPOSE. THE ENTIRE RISK AS TO THE QUALITY AND PERFORMANCE OF THE PROGRAM
+IS WITH YOU. SHOULD THE PROGRAM PROVE DEFECTIVE, YOU ASSUME THE COST OF
+ALL NECESSARY SERVICING, REPAIR OR CORRECTION.
+
+ 16. Limitation of Liability.
+
+ IN NO EVENT UNLESS REQUIRED BY APPLICABLE LAW OR AGREED TO IN WRITING
+WILL ANY COPYRIGHT HOLDER, OR ANY OTHER PARTY WHO MODIFIES AND/OR CONVEYS
+THE PROGRAM AS PERMITTED ABOVE, BE LIABLE TO YOU FOR DAMAGES, INCLUDING ANY
+GENERAL, SPECIAL, INCIDENTAL OR CONSEQUENTIAL DAMAGES ARISING OUT OF THE
+USE OR INABILITY TO USE THE PROGRAM (INCLUDING BUT NOT LIMITED TO LOSS OF
+DATA OR DATA BEING RENDERED INACCURATE OR LOSSES SUSTAINED BY YOU OR THIRD
+PARTIES OR A FAILURE OF THE PROGRAM TO OPERATE WITH ANY OTHER PROGRAMS),
+EVEN IF SUCH HOLDER OR OTHER PARTY HAS BEEN ADVISED OF THE POSSIBILITY OF
+SUCH DAMAGES.
+
+ 17. Interpretation of Sections 15 and 16.
+
+ If the disclaimer of warranty and limitation of liability provided
+above cannot be given local legal effect according to their terms,
+reviewing courts shall apply local law that most closely approximates
+an absolute waiver of all civil liability in connection with the
+Program, unless a warranty or assumption of liability accompanies a
+copy of the Program in return for a fee.
+
+ END OF TERMS AND CONDITIONS
+
+ How to Apply These Terms to Your New Programs
+
+ If you develop a new program, and you want it to be of the greatest
+possible use to the public, the best way to achieve this is to make it
+free software which everyone can redistribute and change under these terms.
+
+ To do so, attach the following notices to the program. It is safest
+to attach them to the start of each source file to most effectively
+state the exclusion of warranty; and each file should have at least
+the "copyright" line and a pointer to where the full notice is found.
+
+ <one line to give the program's name and a brief idea of what it does.>
+ Copyright (C) <year> <name of author>
+
+ This program is free software: you can redistribute it and/or modify
+ it under the terms of the GNU General Public License as published by
+ the Free Software Foundation, either version 3 of the License, or
+ (at your option) any later version.
+
+ This program is distributed in the hope that it will be useful,
+ but WITHOUT ANY WARRANTY; without even the implied warranty of
+ MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
+ GNU General Public License for more details.
+
+ You should have received a copy of the GNU General Public License
+ along with this program. If not, see <http://www.gnu.org/licenses/>.
+
+Also add information on how to contact you by electronic and paper mail.
+
+ If the program does terminal interaction, make it output a short
+notice like this when it starts in an interactive mode:
+
+ <program> Copyright (C) <year> <name of author>
+ This program comes with ABSOLUTELY NO WARRANTY; for details type `show w'.
+ This is free software, and you are welcome to redistribute it
+ under certain conditions; type `show c' for details.
+
+The hypothetical commands `show w' and `show c' should show the appropriate
+parts of the General Public License. Of course, your program's commands
+might be different; for a GUI interface, you would use an "about box".
+
+ You should also get your employer (if you work as a programmer) or school,
+if any, to sign a "copyright disclaimer" for the program, if necessary.
+For more information on this, and how to apply and follow the GNU GPL, see
+<http://www.gnu.org/licenses/>.
+
+ The GNU General Public License does not permit incorporating your program
+into proprietary programs. If your program is a subroutine library, you
+may consider it more useful to permit linking proprietary applications with
+the library. If this is what you want to do, use the GNU Lesser General
+Public License instead of this License. But first, please read
+<http://www.gnu.org/philosophy/why-not-lgpl.html>.
diff --git a/puppet/modules/common/README b/puppet/modules/common/README
new file mode 100644
index 00000000..e6df7663
--- /dev/null
+++ b/puppet/modules/common/README
@@ -0,0 +1,44 @@
+Common Module
+-------------
+
+The common module installs various functions that are required by other
+modules. This module should be installed before any of the other module.
+
+! Upgrade Notice !
+
+The 'append_if_no_such_line' define has been replaced with the 'line' define. If
+you are using 'append_if_no_such_line' anywhere in your manifests, you will need
+to transition to 'line' before upgrading to this version of the common
+module. The 'line' define is a drop-in replacement and essentially equivalent,
+so the transition is quite easy, you should only simply need to change the name
+in your manifests.
+
+To use this module, follow these directions:
+
+1. Your modules directory will need all the files included in this
+ repository placed under a directory called "common"
+
+2. Add the following line to manifests/site.pp:
+
+ import "modules.pp"
+
+3. Add the following line to manifests/modules.pp:
+
+ import "common"
+
+
+Original author: David Schmitt (mailto:david@dasz.at)
+Copyright:: Copyright (c) 2007-2009 dasz.at OG
+License:: 3-clause BSD
+
+Additional authors:
+Copyright (C) 2007 David Schmitt <david@schmitt.edv-bus.at>
+Copyright 2008-2011, admin(at)immerda.ch
+Copyright 2008, Puzzle ITC GmbH
+ Marcel Härry haerry+puppet(at)puzzle.ch
+ Simon Josi josi+puppet(at)puzzle.ch
+Copyright 2009-2011, Riseup Labs <http://riseuplabs.org>
+ Pietro Ferrari <pietro@riseup.net>
+ Micah Anderson <micah@riseup.net>
+Copyright (C) 2007 Antoine Beaupre <anarcat@koumbit.org>
+Copyright (c) 2011 intrigeri - intrigeri(at)boum.org \ No newline at end of file
diff --git a/puppet/modules/common/lib/puppet/parser/functions/basename.rb b/puppet/modules/common/lib/puppet/parser/functions/basename.rb
new file mode 100644
index 00000000..dc725375
--- /dev/null
+++ b/puppet/modules/common/lib/puppet/parser/functions/basename.rb
@@ -0,0 +1,22 @@
+# This function has two modes of operation:
+#
+# basename(string) : string
+#
+# Returns the last component of the filename given as argument, which must be
+# formed using forward slashes ("/") regardless of the separator used on the
+# local file system.
+#
+# basename(string[]) : string[]
+#
+# Returns an array of strings with the basename of each item from the argument.
+#
+module Puppet::Parser::Functions
+ newfunction(:basename, :type => :rvalue) do |args|
+ if args[0].is_a?(Array)
+ args.collect do |a| File.basename(a) end
+ else
+ File.basename(args[0])
+ end
+ end
+end
+
diff --git a/puppet/modules/common/lib/puppet/parser/functions/dirname.rb b/puppet/modules/common/lib/puppet/parser/functions/dirname.rb
new file mode 100644
index 00000000..ea0d50b4
--- /dev/null
+++ b/puppet/modules/common/lib/puppet/parser/functions/dirname.rb
@@ -0,0 +1,22 @@
+# This function has two modes of operation:
+#
+# dirname(string) : string
+#
+# Returns all components of the filename given as argument except the last
+# one. The filename must be formed using forward slashes (``/..) regardless of
+# the separator used on the local file system.
+#
+# dirname(string[]) : string[]
+#
+# Returns an array of strings with the basename of each item from the argument.
+#
+module Puppet::Parser::Functions
+ newfunction(:dirname, :type => :rvalue) do |args|
+ if args[0].is_a?(Array)
+ args.collect do |a| File.dirname(a) end
+ else
+ File.dirname(args[0])
+ end
+ end
+end
+
diff --git a/puppet/modules/common/lib/puppet/parser/functions/get_default.rb b/puppet/modules/common/lib/puppet/parser/functions/get_default.rb
new file mode 100644
index 00000000..3f4359bd
--- /dev/null
+++ b/puppet/modules/common/lib/puppet/parser/functions/get_default.rb
@@ -0,0 +1,15 @@
+# get_default($value, $default) : $value
+#
+# return $value || $default.
+module Puppet::Parser::Functions
+ newfunction(:get_default, :type => :rvalue) do |args|
+ value = nil
+ args.each { |x|
+ if ! x.nil? and x.length > 0
+ value = x
+ break
+ end
+ }
+ return value
+ end
+end
diff --git a/puppet/modules/common/lib/puppet/parser/functions/hostname.rb b/puppet/modules/common/lib/puppet/parser/functions/hostname.rb
new file mode 100644
index 00000000..7bc477f2
--- /dev/null
+++ b/puppet/modules/common/lib/puppet/parser/functions/hostname.rb
@@ -0,0 +1,13 @@
+# get an uniq array of ipaddresses for a hostname
+require 'resolv'
+
+module Puppet::Parser::Functions
+ newfunction(:hostname, :type => :rvalue) do |args|
+ res = Array.new
+ Resolv::DNS.new.each_address(args[0]){ |addr|
+ res << addr
+ }
+ res.uniq
+ end
+end
+
diff --git a/puppet/modules/common/lib/puppet/parser/functions/multi_source_template.rb b/puppet/modules/common/lib/puppet/parser/functions/multi_source_template.rb
new file mode 100644
index 00000000..e0753205
--- /dev/null
+++ b/puppet/modules/common/lib/puppet/parser/functions/multi_source_template.rb
@@ -0,0 +1,29 @@
+module Puppet::Parser::Functions
+ require 'erb'
+
+ newfunction(:multi_source_template, :type => :rvalue) do |args|
+ contents = nil
+ environment = compiler.environment
+ sources = args
+
+ sources.each do |file|
+ Puppet.debug("Looking for #{file} in #{environment}")
+ if filename = Puppet::Parser::Files.find_template(file, environment.to_s)
+ wrapper = Puppet::Parser::TemplateWrapper.new(self)
+ wrapper.file = file
+
+ begin
+ contents = wrapper.result
+ rescue => detail
+ raise Puppet::ParseError, "Failed to parse template %s: %s" % [file, detail]
+ end
+
+ break
+ end
+ end
+
+ raise Puppet::ParseError, "multi_source_template: No match found for files: #{sources.join(', ')}" if contents == nil
+
+ contents
+ end
+end
diff --git a/puppet/modules/common/lib/puppet/parser/functions/prefix_with.rb b/puppet/modules/common/lib/puppet/parser/functions/prefix_with.rb
new file mode 100644
index 00000000..6e64a4a8
--- /dev/null
+++ b/puppet/modules/common/lib/puppet/parser/functions/prefix_with.rb
@@ -0,0 +1,9 @@
+# prefix arguments 2..n with first argument
+
+module Puppet::Parser::Functions
+ newfunction(:prefix_with, :type => :rvalue) do |args|
+ prefix = args.shift
+ args.collect {|v| "%s%s" % [prefix, v] }
+ end
+end
+
diff --git a/puppet/modules/common/lib/puppet/parser/functions/re_escape.rb b/puppet/modules/common/lib/puppet/parser/functions/re_escape.rb
new file mode 100644
index 00000000..7bee90a8
--- /dev/null
+++ b/puppet/modules/common/lib/puppet/parser/functions/re_escape.rb
@@ -0,0 +1,7 @@
+# apply ruby regexp escaping to a string
+module Puppet::Parser::Functions
+ newfunction(:re_escape, :type => :rvalue) do |args|
+ Regexp.escape(args[0])
+ end
+end
+
diff --git a/puppet/modules/common/lib/puppet/parser/functions/slash_escape.rb b/puppet/modules/common/lib/puppet/parser/functions/slash_escape.rb
new file mode 100644
index 00000000..04d3b95e
--- /dev/null
+++ b/puppet/modules/common/lib/puppet/parser/functions/slash_escape.rb
@@ -0,0 +1,7 @@
+# escape slashes in a String
+module Puppet::Parser::Functions
+ newfunction(:slash_escape, :type => :rvalue) do |args|
+ args[0].gsub(/\//, '\\/')
+ end
+end
+
diff --git a/puppet/modules/common/lib/puppet/parser/functions/substitute.rb b/puppet/modules/common/lib/puppet/parser/functions/substitute.rb
new file mode 100644
index 00000000..4c97def3
--- /dev/null
+++ b/puppet/modules/common/lib/puppet/parser/functions/substitute.rb
@@ -0,0 +1,20 @@
+# subsititute($string, $regex, $replacement) : $string
+# subsititute($string[], $regex, $replacement) : $string[]
+#
+# Replace all ocurrences of $regex in $string by $replacement.
+# $regex is interpreted as Ruby regular expression.
+#
+# For long-term portability it is recommended to refrain from using Ruby's
+# extended RE features.
+module Puppet::Parser::Functions
+ newfunction(:substitute, :type => :rvalue) do |args|
+ if args[0].is_a?(Array)
+ args[0].collect do |val|
+ val.gsub(/#{args[1]}/, args[2])
+ end
+ else
+ args[0].gsub(/#{args[1]}/, args[2])
+ end
+ end
+end
+
diff --git a/puppet/modules/common/lib/puppet/parser/functions/tfile.rb b/puppet/modules/common/lib/puppet/parser/functions/tfile.rb
new file mode 100644
index 00000000..acb6609b
--- /dev/null
+++ b/puppet/modules/common/lib/puppet/parser/functions/tfile.rb
@@ -0,0 +1,19 @@
+Puppet::Parser::Functions::newfunction(
+ :tfile,
+ :type => :rvalue,
+ :doc => "Returns the content of a file. If the file or the path does not
+ yet exist, it will create the path and touch the file."
+) do |args|
+ raise Puppet::ParseError, 'tfile() needs one argument' if args.length != 1
+ path = args.to_a.first
+ unless File.exists?(path)
+ dir = File.dirname(path)
+ unless File.directory?(dir)
+ require 'fileutils'
+ FileUtils.mkdir_p(dir, :mode => 0700)
+ end
+ require 'fileutils'
+ FileUtils.touch(path)
+ end
+ File.read(path)
+end
diff --git a/puppet/modules/common/manifests/module_dir.pp b/puppet/modules/common/manifests/module_dir.pp
new file mode 100644
index 00000000..2420da94
--- /dev/null
+++ b/puppet/modules/common/manifests/module_dir.pp
@@ -0,0 +1,34 @@
+# common/manifests/modules_dir.pp -- create a default directory
+# for storing module specific information
+#
+# Copyright (C) 2007 David Schmitt <david@schmitt.edv-bus.at>
+# See LICENSE for the full license granted to you.
+
+# A module_dir is a storage place for all the stuff a module might want to
+# store. According to the FHS, this should go to /var/lib. Since this is a part
+# of puppet, the full path is /var/lib/puppet/modules/${name}. Every module
+# should # prefix its module_dirs with its name.
+#
+# Usage:
+# include common::moduledir
+# module_dir { ["common", "common/dir1", "common/dir2" ]: }
+#
+# You may refer to a file in module_dir by using :
+# file { "${common::moduledir::module_dir_path}/somedir/somefile": }
+define common::module_dir(
+ $owner = root,
+ $group = 0,
+ $mode = 0644
+) {
+ include common::moduledir
+ file {
+ "${common::moduledir::module_dir_path}/${name}":
+ ensure => directory,
+ recurse => true,
+ purge => true,
+ force => true,
+ owner => $owner,
+ group => $group,
+ mode => $mode;
+ }
+}
diff --git a/puppet/modules/common/manifests/module_file.pp b/puppet/modules/common/manifests/module_file.pp
new file mode 100644
index 00000000..c1070bcf
--- /dev/null
+++ b/puppet/modules/common/manifests/module_file.pp
@@ -0,0 +1,37 @@
+# common/manifests/module_file.pp -- use a modules_dir to store module
+# specific files
+#
+# Copyright (C) 2007 David Schmitt <david@schmitt.edv-bus.at>
+# See LICENSE for the full license granted to you.
+
+# Put a file into module-local storage.
+#
+# Usage:
+# common::module_file { "module/file":
+# source => "puppet:///...",
+# mode => 644, # default
+# owner => root, # default
+# group => 0, # default
+# }
+define common::module_file (
+ $ensure = present,
+ $source = undef,
+ $owner = root,
+ $group = 0,
+ $mode = 0644
+){
+ include common::moduledir
+ file {
+ "${common::moduledir::module_dir_path}/${name}":
+ ensure => $ensure,
+ }
+
+ if $ensure != 'absent' {
+ File["${common::moduledir::module_dir_path}/${name}"]{
+ source => $source,
+ owner => $owner,
+ group => $group,
+ mode => $mode,
+ }
+ }
+}
diff --git a/puppet/modules/common/manifests/moduledir.pp b/puppet/modules/common/manifests/moduledir.pp
new file mode 100644
index 00000000..f779085b
--- /dev/null
+++ b/puppet/modules/common/manifests/moduledir.pp
@@ -0,0 +1,18 @@
+# setup root for module_dirs
+class common::moduledir {
+ # Use this variable to reference the base path. Thus you are safe from any
+ # changes.
+ $module_dir_path = '/var/lib/puppet/modules'
+
+ # Module programmers can use /var/lib/puppet/modules/$modulename to save
+ # module-local data, e.g. for constructing config files
+ file{$module_dir_path:
+ ensure => directory,
+ recurse => true,
+ purge => true,
+ force => true,
+ owner => root,
+ group => 0,
+ mode => '0755';
+ }
+}
diff --git a/puppet/modules/common/manifests/moduledir/common.pp b/puppet/modules/common/manifests/moduledir/common.pp
new file mode 100644
index 00000000..e74c601e
--- /dev/null
+++ b/puppet/modules/common/manifests/moduledir/common.pp
@@ -0,0 +1,4 @@
+# setup a common dir
+class common::moduledir::common{
+ common::module_dir{'common': }
+}
diff --git a/puppet/modules/common/spec/spec.opts b/puppet/modules/common/spec/spec.opts
new file mode 100644
index 00000000..91cd6427
--- /dev/null
+++ b/puppet/modules/common/spec/spec.opts
@@ -0,0 +1,6 @@
+--format
+s
+--colour
+--loadby
+mtime
+--backtrace
diff --git a/puppet/modules/common/spec/spec_helper.rb b/puppet/modules/common/spec/spec_helper.rb
new file mode 100644
index 00000000..6ba62e11
--- /dev/null
+++ b/puppet/modules/common/spec/spec_helper.rb
@@ -0,0 +1,16 @@
+require 'pathname'
+dir = Pathname.new(__FILE__).parent
+$LOAD_PATH.unshift(dir, dir + 'lib', dir + '../lib')
+require 'puppet'
+gem 'rspec', '>= 1.2.9'
+require 'spec/autorun'
+
+Dir[File.join(File.dirname(__FILE__), 'support', '*.rb')].each do |support_file|
+ require support_file
+end
+
+# We need this because the RAL uses 'should' as a method. This
+# allows us the same behaviour but with a different method name.
+class Object
+ alias :must :should
+end
diff --git a/puppet/modules/common/spec/unit/parser/functions/tfile.rb b/puppet/modules/common/spec/unit/parser/functions/tfile.rb
new file mode 100644
index 00000000..5c8f636e
--- /dev/null
+++ b/puppet/modules/common/spec/unit/parser/functions/tfile.rb
@@ -0,0 +1,54 @@
+#! /usr/bin/env ruby
+
+require File.dirname(__FILE__) + '/../../../spec_helper'
+require 'mocha'
+
+describe "the tfile function" do
+
+ before :each do
+ @scope = Puppet::Parser::Scope.new
+ end
+
+ it "should exist" do
+ Puppet::Parser::Functions.function("tfile").should == "function_tfile"
+ end
+
+ it "should raise a ParseError if there is less than 1 arguments" do
+ lambda { @scope.function_tfile([]) }.should( raise_error(Puppet::ParseError))
+ end
+
+ it "should raise a ParseError if there is more than 1 arguments" do
+ lambda { @scope.function_tfile(["bar", "gazonk"]) }.should( raise_error(Puppet::ParseError))
+ end
+
+ describe "when executed properly" do
+
+ before :each do
+ File.stubs(:read).with('/some_path/aa').returns("foo1\nfoo2\n")
+ end
+
+ it "should return the content of the file" do
+ File.stubs(:exists?).with('/some_path/aa').returns(true)
+ result = @scope.function_tfile(['/some_path/aa'])
+ result.should == "foo1\nfoo2\n"
+ end
+
+ it "should touch a file if it does not exist" do
+ File.stubs(:exists?).with('/some_path/aa').returns(false)
+ File.stubs(:directory?).with('/some_path').returns(true)
+ FileUtils.expects(:touch).with('/some_path/aa')
+ result = @scope.function_tfile(['/some_path/aa'])
+ result.should == "foo1\nfoo2\n"
+ end
+
+ it "should create the path if it does not exist" do
+ File.stubs(:exists?).with('/some_path/aa').returns(false)
+ File.stubs(:directory?).with('/some_path').returns(false)
+ FileUtils.expects(:mkdir_p).with("/some_path",:mode => 0700)
+ FileUtils.expects(:touch).with('/some_path/aa')
+ result = @scope.function_tfile(['/some_path/aa'])
+ result.should == "foo1\nfoo2\n"
+ end
+ end
+
+end
diff --git a/puppet/modules/concat/CHANGELOG b/puppet/modules/concat/CHANGELOG
new file mode 100644
index 00000000..c506cf1a
--- /dev/null
+++ b/puppet/modules/concat/CHANGELOG
@@ -0,0 +1,29 @@
+KNOWN ISSUES:
+- In 0.24.8 you will see inintended notifies, if you build a file
+ in a run, the next run will also see it as changed. This is due
+ to how 0.24.8 does the purging of unhandled files, this is improved
+ in 0.25.x and we cannot work around it in our code.
+
+CHANGELOG:
+- 2010/02/19 - initial release
+- 2010/03/12 - add support for 0.24.8 and newer
+ - make the location of sort configurable
+ - add the ability to add shell comment based warnings to
+ top of files
+ - add the ablity to create empty files
+- 2010/04/05 - fix parsing of WARN and change code style to match rest
+ of the code
+ - Better and safer boolean handling for warn and force
+ - Don't use hard coded paths in the shell script, set PATH
+ top of the script
+ - Use file{} to copy the result and make all fragments owned
+ by root. This means we can chnage the ownership/group of the
+ resulting file at any time.
+ - You can specify ensure => "/some/other/file" in concat::fragment
+ to include the contents of a symlink into the final file.
+- 2010/04/16 - Add more cleaning of the fragment name - removing / from the $name
+- 2010/05/22 - Improve documentation and show the use of ensure =>
+- 2010/07/14 - Add support for setting the filebucket behavior of files
+- 2010/10/04 - Make the warning message configurable
+- 2010/12/03 - Add flags to make concat work better on Solaris - thanks Jonathan Boyett
+- 2011/02/03 - Make the shell script more portable and add a config option for root group
diff --git a/puppet/modules/concat/LICENSE b/puppet/modules/concat/LICENSE
new file mode 100644
index 00000000..6a9e9a19
--- /dev/null
+++ b/puppet/modules/concat/LICENSE
@@ -0,0 +1,14 @@
+ Copyright 2012 R.I.Pienaar
+
+ Licensed under the Apache License, Version 2.0 (the "License");
+ you may not use this file except in compliance with the License.
+ You may obtain a copy of the License at
+
+ http://www.apache.org/licenses/LICENSE-2.0
+
+ Unless required by applicable law or agreed to in writing, software
+ distributed under the License is distributed on an "AS IS" BASIS,
+ WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+ See the License for the specific language governing permissions and
+ limitations under the License.
+
diff --git a/puppet/modules/concat/Modulefile b/puppet/modules/concat/Modulefile
new file mode 100644
index 00000000..d6ab2bb0
--- /dev/null
+++ b/puppet/modules/concat/Modulefile
@@ -0,0 +1,8 @@
+name 'puppet-concat'
+version '0.1.0'
+source 'git://github.com/ripienaar/puppet-concat.git'
+author 'R.I.Pienaar'
+license 'Apache'
+summary 'Concat module'
+description 'Concat module'
+project_page 'http://github.com/ripienaar/puppet-concat'
diff --git a/puppet/modules/concat/README.markdown b/puppet/modules/concat/README.markdown
new file mode 100644
index 00000000..8736d57a
--- /dev/null
+++ b/puppet/modules/concat/README.markdown
@@ -0,0 +1,112 @@
+What is it?
+===========
+
+A Puppet module that can construct files from fragments.
+
+Please see the comments in the various .pp files for details
+as well as posts on my blog at http://www.devco.net/
+
+Released under the Apache 2.0 licence
+
+Usage:
+------
+
+Before you can use any of the concat features you should include the class
+concat::setup somewhere on your node first.
+
+If you wanted a /etc/motd file that listed all the major modules
+on the machine. And that would be maintained automatically even
+if you just remove the include lines for other modules you could
+use code like below, a sample /etc/motd would be:
+
+<pre>
+Puppet modules on this server:
+
+ -- Apache
+ -- MySQL
+</pre>
+
+Local sysadmins can also append to the file by just editing /etc/motd.local
+their changes will be incorporated into the puppet managed motd.
+
+<pre>
+# class to setup basic motd, include on all nodes
+class motd {
+ include concat::setup
+ $motd = "/etc/motd"
+
+ concat{$motd:
+ owner => root,
+ group => root,
+ mode => 644
+ }
+
+ concat::fragment{"motd_header":
+ target => $motd,
+ content => "\nPuppet modules on this server:\n\n",
+ order => 01,
+ }
+
+ # local users on the machine can append to motd by just creating
+ # /etc/motd.local
+ concat::fragment{"motd_local":
+ target => $motd,
+ ensure => "/etc/motd.local",
+ order => 15
+ }
+}
+
+# used by other modules to register themselves in the motd
+define motd::register($content="", $order=10) {
+ if $content == "" {
+ $body = $name
+ } else {
+ $body = $content
+ }
+
+ concat::fragment{"motd_fragment_$name":
+ target => "/etc/motd",
+ content => " -- $body\n"
+ }
+}
+
+# a sample apache module
+class apache {
+ include apache::install, apache::config, apache::service
+
+ motd::register{"Apache": }
+}
+</pre>
+
+Known Issues:
+-------------
+* In 0.24.8 you will see inintended notifies, if you build a file
+ in a run, the next run will also see it as changed. This is due
+ to how 0.24.8 does the purging of unhandled files, this is improved
+ in 0.25.x and we cannot work around it in our code.
+* Since puppet-concat now relies on a fact for the concat directory,
+ you will need to set up pluginsync = true for at least the first run.
+ You have this issue if puppet fails to run on the client and you have
+ a message similar to
+ "err: Failed to apply catalog: Parameter path failed: File
+ paths must be fully qualified, not 'undef' at [...]/concat/manifests/setup.pp:44".
+
+Contributors:
+-------------
+**Paul Elliot**
+
+ * Provided 0.24.8 support, shell warnings and empty file creation support.
+
+**Chad Netzer**
+
+ * Various patches to improve safety of file operations
+ * Symlink support
+
+**David Schmitt**
+
+ * Patch to remove hard coded paths relying on OS path
+ * Patch to use file{} to copy the resulting file to the final destination. This means Puppet client will show diffs and that hopefully we can change file ownerships now
+
+Contact:
+--------
+You can contact me on rip@devco.net or follow my blog at http://www.devco.net I am also on twitter as ripienaar
diff --git a/puppet/modules/concat/Rakefile b/puppet/modules/concat/Rakefile
new file mode 100644
index 00000000..764aebd2
--- /dev/null
+++ b/puppet/modules/concat/Rakefile
@@ -0,0 +1,13 @@
+require 'rake'
+require 'rspec/core/rake_task'
+
+task :default => [:spec]
+
+desc "Run all module spec tests (Requires rspec-puppet gem)"
+RSpec::Core::RakeTask.new(:spec)
+
+desc "Build package"
+task :build do
+ system("puppet-module build")
+end
+
diff --git a/puppet/modules/concat/files/concatfragments.sh b/puppet/modules/concat/files/concatfragments.sh
new file mode 100755
index 00000000..c9397975
--- /dev/null
+++ b/puppet/modules/concat/files/concatfragments.sh
@@ -0,0 +1,129 @@
+#!/bin/sh
+
+# Script to concat files to a config file.
+#
+# Given a directory like this:
+# /path/to/conf.d
+# |-- fragments
+# | |-- 00_named.conf
+# | |-- 10_domain.net
+# | `-- zz_footer
+#
+# The script supports a test option that will build the concat file to a temp location and
+# use /usr/bin/cmp to verify if it should be run or not. This would result in the concat happening
+# twice on each run but gives you the option to have an unless option in your execs to inhibit rebuilds.
+#
+# Without the test option and the unless combo your services that depend on the final file would end up
+# restarting on each run, or in other manifest models some changes might get missed.
+#
+# OPTIONS:
+# -o The file to create from the sources
+# -d The directory where the fragments are kept
+# -t Test to find out if a build is needed, basically concats the files to a temp
+# location and compare with what's in the final location, return codes are designed
+# for use with unless on an exec resource
+# -w Add a shell style comment at the top of the created file to warn users that it
+# is generated by puppet
+# -f Enables the creation of empty output files when no fragments are found
+# -n Sort the output numerically rather than the default alpha sort
+#
+# the command:
+#
+# concatfragments.sh -o /path/to/conffile.cfg -d /path/to/conf.d
+#
+# creates /path/to/conf.d/fragments.concat and copies the resulting
+# file to /path/to/conffile.cfg. The files will be sorted alphabetically
+# pass the -n switch to sort numerically.
+#
+# The script does error checking on the various dirs and files to make
+# sure things don't fail.
+
+OUTFILE=""
+WORKDIR=""
+TEST=""
+FORCE=""
+WARN=""
+SORTARG=""
+
+PATH=/sbin:/usr/sbin:/bin:/usr/bin
+
+## Well, if there's ever a bad way to do things, Nexenta has it.
+## http://nexenta.org/projects/site/wiki/Personalities
+unset SUN_PERSONALITY
+
+while getopts "o:s:d:tnw:f" options; do
+ case $options in
+ o ) OUTFILE=$OPTARG;;
+ d ) WORKDIR=$OPTARG;;
+ n ) SORTARG="-n";;
+ w ) WARNMSG="$OPTARG";;
+ f ) FORCE="true";;
+ t ) TEST="true";;
+ * ) echo "Specify output file with -o and fragments directory with -d"
+ exit 1;;
+ esac
+done
+
+# do we have -o?
+if [ x${OUTFILE} = "x" ]; then
+ echo "Please specify an output file with -o"
+ exit 1
+fi
+
+# do we have -d?
+if [ x${WORKDIR} = "x" ]; then
+ echo "Please fragments directory with -d"
+ exit 1
+fi
+
+# can we write to -o?
+if [ -f ${OUTFILE} ]; then
+ if [ ! -w ${OUTFILE} ]; then
+ echo "Cannot write to ${OUTFILE}"
+ exit 1
+ fi
+else
+ if [ ! -w `dirname ${OUTFILE}` ]; then
+ echo "Cannot write to `dirname ${OUTFILE}` to create ${OUTFILE}"
+ exit 1
+ fi
+fi
+
+# do we have a fragments subdir inside the work dir?
+if [ ! -d "${WORKDIR}/fragments" ] && [ ! -x "${WORKDIR}/fragments" ]; then
+ echo "Cannot access the fragments directory"
+ exit 1
+fi
+
+# are there actually any fragments?
+if [ ! "$(ls -A ${WORKDIR}/fragments)" ]; then
+ if [ x${FORCE} = "x" ]; then
+ echo "The fragments directory is empty, cowardly refusing to make empty config files"
+ exit 1
+ fi
+fi
+
+cd ${WORKDIR}
+
+if [ x${WARNMSG} = "x" ]; then
+ : > "fragments.concat"
+else
+ printf '%s\n' "$WARNMSG" > "fragments.concat"
+fi
+
+# find all the files in the fragments directory, sort them numerically and concat to fragments.concat in the working dir
+find fragments/ -type f -follow | sort ${SORTARG} | while read fragfile; do
+ cat "$fragfile" >> "fragments.concat"
+done
+
+if [ x${TEST} = "x" ]; then
+ # This is a real run, copy the file to outfile
+ cp fragments.concat ${OUTFILE}
+ RETVAL=$?
+else
+ # Just compare the result to outfile to help the exec decide
+ cmp ${OUTFILE} fragments.concat
+ RETVAL=$?
+fi
+
+exit $RETVAL
diff --git a/puppet/modules/concat/files/null/.gitignore b/puppet/modules/concat/files/null/.gitignore
new file mode 100644
index 00000000..e69de29b
--- /dev/null
+++ b/puppet/modules/concat/files/null/.gitignore
diff --git a/puppet/modules/concat/lib/facter/concat_basedir.rb b/puppet/modules/concat/lib/facter/concat_basedir.rb
new file mode 100644
index 00000000..02e9c5bf
--- /dev/null
+++ b/puppet/modules/concat/lib/facter/concat_basedir.rb
@@ -0,0 +1,5 @@
+Facter.add("concat_basedir") do
+ setcode do
+ File.join(Puppet[:vardir],"concat")
+ end
+end
diff --git a/puppet/modules/concat/manifests/fragment.pp b/puppet/modules/concat/manifests/fragment.pp
new file mode 100644
index 00000000..943bf671
--- /dev/null
+++ b/puppet/modules/concat/manifests/fragment.pp
@@ -0,0 +1,49 @@
+# Puts a file fragment into a directory previous setup using concat
+#
+# OPTIONS:
+# - target The file that these fragments belong to
+# - content If present puts the content into the file
+# - source If content was not specified, use the source
+# - order By default all files gets a 10_ prefix in the directory
+# you can set it to anything else using this to influence the
+# order of the content in the file
+# - ensure Present/Absent or destination to a file to include another file
+# - mode Mode for the file
+# - owner Owner of the file
+# - group Owner of the file
+# - backup Controls the filebucketing behavior of the final file and
+# see File type reference for its use. Defaults to 'puppet'
+define concat::fragment($target, $content='', $source='', $order=10, $ensure = 'present', $mode = '0644', $owner = $::id, $group = $concat::setup::root_group, $backup = 'puppet') {
+ $safe_name = regsubst($name, '/', '_', 'G')
+ $safe_target_name = regsubst($target, '/', '_', 'G')
+ $concatdir = $concat::setup::concatdir
+ $fragdir = "${concatdir}/${safe_target_name}"
+
+ # if content is passed, use that, else if source is passed use that
+ # if neither passed, but $ensure is in symlink form, make a symlink
+ case $content {
+ '': {
+ case $source {
+ '': {
+ case $ensure {
+ '', 'absent', 'present', 'file', 'directory': {
+ crit('No content, source or symlink specified')
+ }
+ }
+ }
+ default: { File{ source => $source } }
+ }
+ }
+ default: { File{ content => $content } }
+ }
+
+ file{"${fragdir}/fragments/${order}_${safe_name}":
+ ensure => $ensure,
+ mode => $mode,
+ owner => $owner,
+ group => $group,
+ backup => $backup,
+ alias => "concat_fragment_${name}",
+ notify => Exec["concat_${target}"]
+ }
+}
diff --git a/puppet/modules/concat/manifests/init.pp b/puppet/modules/concat/manifests/init.pp
new file mode 100644
index 00000000..0b3ed564
--- /dev/null
+++ b/puppet/modules/concat/manifests/init.pp
@@ -0,0 +1,178 @@
+# A system to construct files using fragments from other files or templates.
+#
+# This requires at least puppet 0.25 to work correctly as we use some
+# enhancements in recursive directory management and regular expressions
+# to do the work here.
+#
+# USAGE:
+# The basic use case is as below:
+#
+# concat{"/etc/named.conf":
+# notify => Service["named"]
+# }
+#
+# concat::fragment{"foo.com_config":
+# target => "/etc/named.conf",
+# order => 10,
+# content => template("named_conf_zone.erb")
+# }
+#
+# # add a fragment not managed by puppet so local users
+# # can add content to managed file
+# concat::fragment{"foo.com_user_config":
+# target => "/etc/named.conf",
+# order => 12,
+# ensure => "/etc/named.conf.local"
+# }
+#
+# This will use the template named_conf_zone.erb to build a single
+# bit of config up and put it into the fragments dir. The file
+# will have an number prefix of 10, you can use the order option
+# to control that and thus control the order the final file gets built in.
+#
+# SETUP:
+# The class concat::setup uses the fact concat_basedir to define the variable
+# $concatdir, where all the temporary files and fragments will be
+# durably stored. The fact concat_basedir will be set up on the client to
+# <Puppet[:vardir]>/concat, so you will be able to run different setup/flavours
+# of puppet clients.
+# However, since this requires the file lib/facter/concat_basedir.rb to be
+# deployed on the clients, so you will have to set "pluginsync = true" on
+# both the master and client, at least for the first run.
+#
+# There's some regular expression magic to figure out the puppet version but
+# if you're on an older 0.24 version just set $puppetversion = 24
+#
+# Before you can use any of the concat features you should include the
+# class concat::setup somewhere on your node first.
+#
+# DETAIL:
+# We use a helper shell script called concatfragments.sh that gets placed
+# in <Puppet[:vardir]>/concat/bin to do the concatenation. While this might
+# seem more complex than some of the one-liner alternatives you might find on
+# the net we do a lot of error checking and safety checks in the script to avoid
+# problems that might be caused by complex escaping errors etc.
+#
+# LICENSE:
+# Apache Version 2
+#
+# LATEST:
+# http://github.com/ripienaar/puppet-concat/
+#
+# CONTACT:
+# R.I.Pienaar <rip@devco.net>
+# Volcane on freenode
+# @ripienaar on twitter
+# www.devco.net
+
+
+# Sets up so that you can use fragments to build a final config file,
+#
+# OPTIONS:
+# - mode The mode of the final file
+# - owner Who will own the file
+# - group Who will own the file
+# - force Enables creating empty files if no fragments are present
+# - warn Adds a normal shell style comment top of the file indicating
+# that it is built by puppet
+# - backup Controls the filebucketing behavior of the final file and
+# see File type reference for its use. Defaults to 'puppet'
+#
+# ACTIONS:
+# - Creates fragment directories if it didn't exist already
+# - Executes the concatfragments.sh script to build the final file, this script will create
+# directory/fragments.concat. Execution happens only when:
+# * The directory changes
+# * fragments.concat != final destination, this means rebuilds will happen whenever
+# someone changes or deletes the final file. Checking is done using /usr/bin/cmp.
+# * The Exec gets notified by something else - like the concat::fragment define
+# - Copies the file over to the final destination using a file resource
+#
+# ALIASES:
+# - The exec can notified using Exec["concat_/path/to/file"] or Exec["concat_/path/to/directory"]
+# - The final file can be referened as File["/path/to/file"] or File["concat_/path/to/file"]
+define concat($mode = '0644', $owner = $::id, $group = $concat::setup::root_group, $warn = false, $force = false, $backup = 'puppet', $gnu = undef, $order='alpha') {
+ $safe_name = regsubst($name, '/', '_', 'G')
+ $concatdir = $concat::setup::concatdir
+ $version = $concat::setup::majorversion
+ $fragdir = "${concatdir}/${safe_name}"
+ $concat_name = 'fragments.concat.out'
+ $default_warn_message = '# This file is managed by Puppet. DO NOT EDIT.'
+
+ case $warn {
+ 'true',true,yes,on: { $warnmsg = $default_warn_message }
+ 'false',false,no,off: { $warnmsg = '' }
+ default: { $warnmsg = $warn }
+ }
+
+ $warnmsg_escaped = regsubst($warnmsg, "'", "'\\\\''", 'G')
+ $warnflag = $warnmsg_escaped ? {
+ '' => '',
+ default => "-w '${warnmsg_escaped}'"
+ }
+
+ case $force {
+ 'true',true,yes,on: { $forceflag = '-f' }
+ 'false',false,no,off: { $forceflag = '' }
+ default: { fail("Improper 'force' value given to concat: ${force}") }
+ }
+
+ case $order {
+ numeric: { $orderflag = '-n' }
+ alpha: { $orderflag = '' }
+ default: { fail("Improper 'order' value given to concat: ${order}") }
+ }
+
+ File{
+ owner => $::id,
+ group => $group,
+ mode => $mode,
+ backup => $backup
+ }
+
+ file{$fragdir:
+ ensure => directory;
+
+ "${fragdir}/fragments":
+ ensure => directory,
+ recurse => true,
+ purge => true,
+ force => true,
+ ignore => ['.svn', '.git', '.gitignore'],
+ source => $version ? {
+ 24 => 'puppet:///concat/null',
+ default => undef,
+ },
+ notify => Exec["concat_${name}"];
+
+ "${fragdir}/fragments.concat":
+ ensure => present;
+
+ "${fragdir}/${concat_name}":
+ ensure => present;
+
+ $name:
+ ensure => present,
+ source => "${fragdir}/${concat_name}",
+ owner => $owner,
+ group => $group,
+ checksum => md5,
+ mode => $mode,
+ alias => "concat_${name}";
+ }
+
+ exec{"concat_${name}":
+ notify => File[$name],
+ subscribe => File[$fragdir],
+ alias => "concat_${fragdir}",
+ require => [ File[$fragdir], File["${fragdir}/fragments"], File["${fragdir}/fragments.concat"] ],
+ unless => "${concat::setup::concatdir}/bin/concatfragments.sh -o ${fragdir}/${concat_name} -d ${fragdir} -t ${warnflag} ${forceflag} ${orderflag}",
+ command => "${concat::setup::concatdir}/bin/concatfragments.sh -o ${fragdir}/${concat_name} -d ${fragdir} ${warnflag} ${forceflag} ${orderflag}",
+ }
+ if $::id == 'root' {
+ Exec["concat_${name}"]{
+ user => root,
+ group => $group,
+ }
+ }
+}
diff --git a/puppet/modules/concat/manifests/setup.pp b/puppet/modules/concat/manifests/setup.pp
new file mode 100644
index 00000000..38aeb964
--- /dev/null
+++ b/puppet/modules/concat/manifests/setup.pp
@@ -0,0 +1,49 @@
+# Sets up the concat system.
+#
+# $concatdir is where the fragments live and is set on the fact concat_basedir.
+# Since puppet should always manage files in $concatdir and they should
+# not be deleted ever, /tmp is not an option.
+#
+# $puppetversion should be either 24 or 25 to enable a 24 compatible
+# mode, in 24 mode you might see phantom notifies this is a side effect
+# of the method we use to clear the fragments directory.
+#
+# The regular expression below will try to figure out your puppet version
+# but this code will only work in 0.24.8 and newer.
+#
+# It also copies out the concatfragments.sh file to ${concatdir}/bin
+class concat::setup {
+ $id = $::id
+ $root_group = $id ? {
+ root => 0,
+ default => $id
+ }
+
+ if $::concat_basedir {
+ $concatdir = $::concat_basedir
+ } else {
+ fail ("\$concat_basedir not defined. Try running again with pluginsync enabled")
+ }
+
+ $majorversion = regsubst($::puppetversion, '^[0-9]+[.]([0-9]+)[.][0-9]+$', '\1')
+
+ file{"${concatdir}/bin/concatfragments.sh":
+ owner => $id,
+ group => $root_group,
+ mode => '0755',
+ source => $majorversion ? {
+ 24 => 'puppet:///concat/concatfragments.sh',
+ default => 'puppet:///modules/concat/concatfragments.sh'
+ };
+
+ [ $concatdir, "${concatdir}/bin" ]:
+ ensure => directory,
+ owner => $id,
+ group => $root_group,
+ mode => '0750';
+
+ ## Old versions of this module used a different path.
+ '/usr/local/bin/concatfragments.sh':
+ ensure => absent;
+ }
+}
diff --git a/puppet/modules/concat/spec/defines/init_spec.rb b/puppet/modules/concat/spec/defines/init_spec.rb
new file mode 100644
index 00000000..d968a26c
--- /dev/null
+++ b/puppet/modules/concat/spec/defines/init_spec.rb
@@ -0,0 +1,20 @@
+require 'spec_helper'
+
+describe 'concat' do
+ basedir = '/var/lib/puppet/concat'
+ let(:title) { '/etc/foo.bar' }
+ let(:facts) { { :concat_basedir => '/var/lib/puppet/concat' } }
+ let :pre_condition do
+ 'include concat::setup'
+ end
+ it { should contain_file("#{basedir}/_etc_foo.bar").with('ensure' => 'directory') }
+ it { should contain_file("#{basedir}/_etc_foo.bar/fragments").with('ensure' => 'directory') }
+
+ it { should contain_file("#{basedir}/_etc_foo.bar/fragments.concat").with('ensure' => 'present') }
+ it { should contain_file("/etc/foo.bar").with('ensure' => 'present') }
+ it { should contain_exec("concat_/etc/foo.bar").with_command(
+ "#{basedir}/bin/concatfragments.sh "+
+ "-o #{basedir}/_etc_foo.bar/fragments.concat.out "+
+ "-d #{basedir}/_etc_foo.bar ")
+ }
+end
diff --git a/puppet/modules/concat/spec/spec_helper.rb b/puppet/modules/concat/spec/spec_helper.rb
new file mode 100644
index 00000000..e6e9309b
--- /dev/null
+++ b/puppet/modules/concat/spec/spec_helper.rb
@@ -0,0 +1,9 @@
+require 'puppet'
+require 'rspec'
+require 'rspec-puppet'
+
+RSpec.configure do |c|
+ c.module_path = File.expand_path(File.join(File.dirname(__FILE__), 'fixtures/modules/'))
+ # Using an empty site.pp file to avoid: https://github.com/rodjek/rspec-puppet/issues/15
+ c.manifest_dir = File.expand_path(File.join(File.dirname(__FILE__), 'fixtures/manifests'))
+end
diff --git a/puppet/modules/git/files/config/CentOS/git-daemon b/puppet/modules/git/files/config/CentOS/git-daemon
new file mode 100644
index 00000000..a9b208c2
--- /dev/null
+++ b/puppet/modules/git/files/config/CentOS/git-daemon
@@ -0,0 +1,26 @@
+# git-daemon config file
+
+# location of the lockfile
+#LOCKFILE=/var/lock/subsys/git-daemon
+
+# which directory to server
+#GITDIR=/srv/git
+
+# do we serve vhosts?
+# setting this to yes assumes that you
+# have in $GITDIR per vhost to serve
+# a subdirectory containing their repos.
+# for example:
+# - /srv/git/git.example.com
+# - /srv/git/git.example.org
+#GITVHOST=no
+
+# the user git-daemon should run with
+#GITUSER=nobody
+
+# options for the daemon
+#OPTIONS="--reuseaddr --verbose --detach"
+
+# location of the daemon
+#GITDAEMON=/usr/bin/git-daemon
+
diff --git a/puppet/modules/git/files/config/CentOS/git-daemon.vhosts b/puppet/modules/git/files/config/CentOS/git-daemon.vhosts
new file mode 100644
index 00000000..62bb9d4b
--- /dev/null
+++ b/puppet/modules/git/files/config/CentOS/git-daemon.vhosts
@@ -0,0 +1,27 @@
+# git-daemon config file
+
+# location of the lockfile
+#LOCKFILE=/var/lock/subsys/git-daemon
+
+# which directory to server
+#GITDIR=/srv/git
+
+# do we serve vhosts?
+# setting this to yes assumes that you
+# have in $GITDIR per vhost to serve
+# a subdirectory containing their repos.
+# for example:
+# - /srv/git/git.example.com
+# - /srv/git/git.example.org
+#GITVHOST=no
+GITVHOST=yes
+
+# the user git-daemon should run with
+#GITUSER=nobody
+
+# options for the daemon
+#OPTIONS="--reuseaddr --verbose --detach"
+
+# location of the daemon
+#GITDAEMON=/usr/bin/git-daemon
+
diff --git a/puppet/modules/git/files/config/Debian/git-daemon b/puppet/modules/git/files/config/Debian/git-daemon
new file mode 100644
index 00000000..b25e1e7f
--- /dev/null
+++ b/puppet/modules/git/files/config/Debian/git-daemon
@@ -0,0 +1,22 @@
+# Defaults for the git-daemon initscript
+
+# Set to yes to start git-daemon
+RUN=yes
+
+# Set to the user and group git-daemon should run as
+USER=nobody
+GROUP=nogroup
+
+# Set the base path and the directory where the repositories are.
+REPOSITORIES="/srv/git"
+
+# Provide a way to have custom setup.
+#
+# Note, when ADVANCED_OPTS is defined the REPOSITORIES setting is ignored,
+# so take good care to specify exactly what git-daemon have to do.
+#
+# Here is an example from the man page:
+#ADVANCED_OPTS="--verbose --export-all \
+# --interpolated-path=/pub/%IP/%D \
+# /pub/192.168.1.200/software \
+# /pub/10.10.220.23/software"
diff --git a/puppet/modules/git/files/init.d/CentOS/git-daemon b/puppet/modules/git/files/init.d/CentOS/git-daemon
new file mode 100644
index 00000000..aed20756
--- /dev/null
+++ b/puppet/modules/git/files/init.d/CentOS/git-daemon
@@ -0,0 +1,75 @@
+#!/bin/bash
+# puppet Init script for running the git-daemon
+#
+# Author: Marcel Haerry <mh+rpms(at)immerda.ch>
+#
+# chkconfig: - 98 02
+#
+# description: Enables the git-daemon to serve various directories. By default it serves /srv/git
+# processname: git-daemon
+# config: /etc/sysconfig/git-daemon
+
+PATH=/usr/bin:/sbin:/bin:/usr/sbin
+export PATH
+
+[ -f /etc/sysconfig/git-daemon ] && . /etc/sysconfig/git-daemon
+lockfile=${LOCKFILE-/var/lock/subsys/git-daemon}
+gitdir=${GITDIR-/srv/git}
+gitvhost=${GITVHOST-no}
+user=${GITUSER-nobody}
+options=${OPTIONS-"--reuseaddr --verbose --detach"}
+gitdaemon=${GITDAEMON-/usr/bin/git-daemon}
+RETVAL=0
+
+gitoptions="--user=${user} ${options}"
+if [ $gitvhost = yes ]; then
+ gitoptions="${gitoptions} --interpolated-path=${gitdir}/%H/%D"
+else
+ gitoptions="${gitoptions} --base-path=${gitdir}"
+fi
+
+# Source function library.
+. /etc/rc.d/init.d/functions
+
+start() {
+ echo -n $"Starting git-daemon: "
+ daemon $gitdaemon $gitoptions
+ RETVAL=$?
+ echo
+ [ $RETVAL = 0 ] && touch ${lockfile}
+ return $RETVAL
+}
+
+stop() {
+ echo -n $"Stopping git-daemon: "
+ killproc $gitdaemon
+ RETVAL=$?
+ echo
+ [ $RETVAL = 0 ] && rm -f ${lockfile}
+}
+
+restart() {
+ stop
+ start
+}
+
+case "$1" in
+ start)
+ start
+ ;;
+ stop)
+ stop
+ ;;
+ restart)
+ restart
+ ;;
+ status)
+ status $gitdaemon
+ RETVAL=$?
+ ;;
+ *)
+ echo $"Usage: $0 {start|stop|status|restart}"
+ exit 1
+esac
+
+exit $RETVAL
diff --git a/puppet/modules/git/files/init.d/Debian/git-daemon b/puppet/modules/git/files/init.d/Debian/git-daemon
new file mode 100644
index 00000000..ab57c4a1
--- /dev/null
+++ b/puppet/modules/git/files/init.d/Debian/git-daemon
@@ -0,0 +1,151 @@
+#! /bin/sh
+### BEGIN INIT INFO
+# Provides: git-daemon
+# Required-Start: $network $remote_fs $syslog
+# Required-Stop: $network $remote_fs $syslog
+# Default-Start: 2 3 4 5
+# Default-Stop: 0 1 6
+# Short-Description: git-daemon service
+# Description: git-daemon makes git repositories available via the git
+# protocol.
+### END INIT INFO
+
+# Author: Antonio Ospite <ospite@studenti.unina.it>
+#
+# Please remove the "Author" lines above and replace them
+# with your own name if you copy and modify this script.
+
+# Do NOT "set -e"
+
+# PATH should only include /usr/* if it runs after the mountnfs.sh script
+PATH=/sbin:/usr/sbin:/bin:/usr/bin:/usr/lib/git-core
+DESC="git-daemon service"
+NAME=git-daemon
+DAEMON=/usr/lib/git-core/$NAME
+PIDFILE=/var/run/$NAME.pid
+SCRIPTNAME=/etc/init.d/$NAME
+
+# Exit if the package is not installed
+[ -x "$DAEMON" ] || exit 0
+
+# Fallback options values, we use these when
+# the /etc/default/git-daemon file does not exist
+RUN=no
+USER=git
+GROUP=git
+REPOSITORIES="/srv/git/"
+
+# Read configuration variable file if it is present
+[ -r /etc/default/$NAME ] && . /etc/default/$NAME
+
+# If ADVANCED_OPTS is empty, use a default setting
+if [ "x$ADVANCED_OPTS" == "x" ];
+then
+ ADVANCED_OPTS="--base-path=$REPOSITORIES $REPOSITORIES"
+fi
+
+DAEMON_ARGS="--syslog --reuseaddr \
+ --user=$USER --group=$GROUP \
+ $ADVANCED_OPTS"
+
+
+# Load the VERBOSE setting and other rcS variables
+. /lib/init/vars.sh
+
+# Define LSB log_* functions.
+# Depend on lsb-base (>= 3.0-6) to ensure that this file is present.
+. /lib/lsb/init-functions
+
+#
+# Function that starts the daemon/service
+#
+do_start()
+{
+ # Return
+ # 0 if daemon has been started
+ # 1 if daemon was already running
+ # 2 if daemon could not be started
+ start-stop-daemon --start --quiet --pidfile $PIDFILE --exec $DAEMON --test > /dev/null \
+ || return 1
+ start-stop-daemon --start --quiet --pidfile $PIDFILE --exec $DAEMON --background --make-pidfile -- \
+ $DAEMON_ARGS \
+ || return 2
+
+ return 0
+}
+
+#
+# Function that stops the daemon/service
+#
+do_stop()
+{
+ # Return
+ # 0 if daemon has been stopped
+ # 1 if daemon was already stopped
+ # 2 if daemon could not be stopped
+ # other if a failure occurred
+ start-stop-daemon --stop --quiet --retry=TERM/30/KILL/5 --pidfile $PIDFILE --name $NAME
+ RETVAL="$?"
+ [ "$RETVAL" = 2 ] && return 2
+ # Wait for children to finish too if this is a daemon that forks
+ # and if the daemon is only ever run from this initscript.
+ # If the above conditions are not satisfied then add some other code
+ # that waits for the process to drop all resources that could be
+ # needed by services started subsequently. A last resort is to
+ # sleep for some time.
+ start-stop-daemon --stop --quiet --oknodo --retry=0/30/KILL/5 --exec $DAEMON
+ [ "$?" = 2 ] && return 2
+ # Many daemons don't delete their pidfiles when they exit.
+ rm -f $PIDFILE
+ return "$RETVAL"
+}
+
+case "$1" in
+ start)
+ [ "$VERBOSE" != no ] && log_daemon_msg "Starting $DESC" "$NAME"
+ do_start
+ case "$?" in
+ 0|1) [ "$VERBOSE" != no ] && log_end_msg 0 ;;
+ 2) [ "$VERBOSE" != no ] && log_end_msg 1 ;;
+ esac
+ ;;
+ stop)
+ [ "$VERBOSE" != no ] && log_daemon_msg "Stopping $DESC" "$NAME"
+ do_stop
+ case "$?" in
+ 0|1) [ "$VERBOSE" != no ] && log_end_msg 0 ;;
+ 2) [ "$VERBOSE" != no ] && log_end_msg 1 ;;
+ esac
+ ;;
+ status)
+ status_of_proc "$DAEMON" "$NAME" && exit 0 || exit $?
+ ;;
+ restart|force-reload)
+ #
+ # If the "reload" option is implemented then remove the
+ # 'force-reload' alias
+ #
+ log_daemon_msg "Restarting $DESC" "$NAME"
+ do_stop
+ case "$?" in
+ 0|1)
+ do_start
+ case "$?" in
+ 0) log_end_msg 0 ;;
+ 1) log_end_msg 1 ;; # Old process is still running
+ *) log_end_msg 1 ;; # Failed to start
+ esac
+ ;;
+ *)
+ # Failed to stop
+ log_end_msg 1
+ ;;
+ esac
+ ;;
+ *)
+ echo "Usage: $SCRIPTNAME {start|stop|status|restart|force-reload}" >&2
+ exit 3
+ ;;
+esac
+
+:
diff --git a/puppet/modules/git/files/web/gitweb.conf b/puppet/modules/git/files/web/gitweb.conf
new file mode 100644
index 00000000..88226aaa
--- /dev/null
+++ b/puppet/modules/git/files/web/gitweb.conf
@@ -0,0 +1,53 @@
+# The gitweb config file is a fragment of perl code. You can set variables
+# using "our $variable = value"; text from "#" character until the end of a
+# line is ignored. See perlsyn(1) man page for details.
+#
+# See /usr/share/doc/gitweb-*/README and /usr/share/doc/gitweb-*/INSTALL for
+# more details and available configuration variables.
+
+# Set the path to git projects. This is an absolute filesystem path which will
+# be prepended to the project path.
+#our $projectroot = "/var/lib/git";
+
+# Set the list of git base URLs used for URL to where fetch project from, i.e.
+# the full URL is "$git_base_url/$project". By default this is empty
+#our @git_base_url_list = qw(git://git.example.com
+# ssh://git.example.com/var/lib/git);
+
+# Enable the 'blame' blob view, showing the last commit that modified
+# each line in the file. This can be very CPU-intensive. Disabled by default
+#$feature{'blame'}{'default'} = [1];
+#
+# Allow projects to override the default setting via git config file.
+# Example: gitweb.blame = 0|1;
+#$feature{'blame'}{'override'} = 1;
+
+# Disable the 'snapshot' link, providing a compressed archive of any tree. This
+# can potentially generate high traffic if you have large project. Enabled for
+# .tar.gz snapshots by default.
+#
+# Value is a list of formats defined in %known_snapshot_formats that you wish
+# to offer.
+#$feature{'snapshot'}{'default'} = [];
+#
+# Allow projects to override the default setting via git config file.
+# Example: gitweb.snapshot = tbz2,zip; (use "none" to disable)
+#$feature{'snapshot'}{'override'} = 1;
+
+# Disable grep search, which will list the files in currently selected tree
+# containing the given string. This can be potentially CPU-intensive, of
+# course. Enabled by default.
+#$feature{'grep'}{'default'} = [0];
+#
+# Allow projects to override the default setting via git config file.
+# Example: gitweb.grep = 0|1;
+#$feature{'grep'}{'override'} = 1;
+
+# Disable the pickaxe search, which will list the commits that modified a given
+# string in a file. This can be practical and quite faster alternative to
+# 'blame', but still potentially CPU-intensive. Enabled by default.
+#$feature{'pickaxe'}{'default'} = [0];
+#
+# Allow projects to override the default setting via git config file.
+# Example: gitweb.pickaxe = 0|1;
+#$feature{'pickaxe'}{'override'} = 1;
diff --git a/puppet/modules/git/files/xinetd.d/git b/puppet/modules/git/files/xinetd.d/git
new file mode 100644
index 00000000..64c53e8b
--- /dev/null
+++ b/puppet/modules/git/files/xinetd.d/git
@@ -0,0 +1,16 @@
+# default: off
+# description: The git dæmon allows git repositories to be exported using
+# the git:// protocol.
+
+service git
+{
+ disable = no
+ socket_type = stream
+ wait = no
+ user = nobody
+ server = /usr/bin/git-daemon
+ server_args = --base-path=/srv/git --export-all --user-path=public_git --syslog --inetd --verbose
+ log_on_failure += USERID
+# xinetd doesn't do this by default. bug #195265
+ flags = IPv6
+}
diff --git a/puppet/modules/git/files/xinetd.d/git.disabled b/puppet/modules/git/files/xinetd.d/git.disabled
new file mode 100644
index 00000000..dcfae918
--- /dev/null
+++ b/puppet/modules/git/files/xinetd.d/git.disabled
@@ -0,0 +1,16 @@
+# default: off
+# description: The git dæmon allows git repositories to be exported using
+# the git:// protocol.
+
+service git
+{
+ disable = yes
+ socket_type = stream
+ wait = no
+ user = nobody
+ server = /usr/bin/git-daemon
+ server_args = --base-path=/srv/git --export-all --user-path=public_git --syslog --inetd --verbose
+ log_on_failure += USERID
+# xinetd doesn't do this by default. bug #195265
+ flags = IPv6
+}
diff --git a/puppet/modules/git/files/xinetd.d/git.vhosts b/puppet/modules/git/files/xinetd.d/git.vhosts
new file mode 100644
index 00000000..98938206
--- /dev/null
+++ b/puppet/modules/git/files/xinetd.d/git.vhosts
@@ -0,0 +1,16 @@
+# default: off
+# description: The git dæmon allows git repositories to be exported using
+# the git:// protocol.
+
+service git
+{
+ disable = no
+ socket_type = stream
+ wait = no
+ user = nobody
+ server = /usr/bin/git-daemon
+ server_args = --interpolated-path=/srv/git/%H/%D --syslog --inetd --verbose
+ log_on_failure += USERID
+# xinetd doesn't do this by default. bug #195265
+ flags = IPv6
+}
diff --git a/puppet/modules/git/manifests/base.pp b/puppet/modules/git/manifests/base.pp
new file mode 100644
index 00000000..e6188390
--- /dev/null
+++ b/puppet/modules/git/manifests/base.pp
@@ -0,0 +1,7 @@
+class git::base {
+
+ package { 'git':
+ ensure => present,
+ alias => 'git',
+ }
+}
diff --git a/puppet/modules/git/manifests/centos.pp b/puppet/modules/git/manifests/centos.pp
new file mode 100644
index 00000000..96344756
--- /dev/null
+++ b/puppet/modules/git/manifests/centos.pp
@@ -0,0 +1,2 @@
+class git::centos inherits git::base {
+}
diff --git a/puppet/modules/git/manifests/changes.pp b/puppet/modules/git/manifests/changes.pp
new file mode 100644
index 00000000..71112051
--- /dev/null
+++ b/puppet/modules/git/manifests/changes.pp
@@ -0,0 +1,33 @@
+# Usage
+# git::changes { name:
+# cwd => "/path/to/git/"
+# user => "me",
+# ensure => {*assume-unchanged*, tracked}
+# }
+#
+
+define git::changes ( $cwd, $user, $ensure='assume-unchanged' ) {
+
+ case $ensure {
+ default: { err ( "unknown ensure value '${ensure}'" ) }
+
+ assume-unchanged: {
+ exec { "assume-unchanged ${name}":
+ command => "/usr/bin/git update-index --assume-unchanged ${name}",
+ cwd => $cwd,
+ user => $user,
+ unless => "/usr/bin/git ls-files -v | grep '^[ch] ${name}'",
+ }
+ }
+
+ tracked: {
+ exec { "track changes ${name}":
+ command => "/usr/bin/git update-index --no-assume-unchanged ${name}",
+ cwd => $cwd,
+ user => $user,
+ onlyif => "/usr/bin/git ls-files -v | grep '^[ch] ${name}'",
+ }
+ }
+ }
+}
+
diff --git a/puppet/modules/git/manifests/clone.pp b/puppet/modules/git/manifests/clone.pp
new file mode 100644
index 00000000..29f0b2b3
--- /dev/null
+++ b/puppet/modules/git/manifests/clone.pp
@@ -0,0 +1,60 @@
+# submodules: Whether we should initialize and update
+# submodules as well
+# Default: false
+# clone_before: before which resources a cloning should
+# happen. This is releveant in combination
+# with submodules as the exec of submodules
+# requires the `cwd` and you might get a
+# dependency cycle if you manage $projectroot
+# somewhere else.
+define git::clone(
+ $ensure = present,
+ $git_repo,
+ $projectroot,
+ $submodules = false,
+ $clone_before = 'absent',
+ $cloneddir_user='root',
+ $cloneddir_group='0',
+ $cloneddir_restrict_mode=true
+){
+ case $ensure {
+ absent: {
+ exec{"rm -rf $projectroot":
+ onlyif => "test -d $projectroot",
+ }
+ }
+ default: {
+ require ::git
+ exec {"git-clone_${name}":
+ command => "git clone --no-hardlinks ${git_repo} ${projectroot}",
+ creates => "${projectroot}/.git",
+ user => root,
+ notify => Exec["git-clone-chown_${name}"],
+ }
+ if $clone_before != 'absent' {
+ Exec["git-clone_${name}"]{
+ before => $clone_before,
+ }
+ }
+ if $submodules {
+ exec{"git-submodules_${name}":
+ command => "git submodule init && git submodule update",
+ cwd => $projectroot,
+ refreshonly => true,
+ subscribe => Exec["git-clone_${name}"],
+ }
+ }
+ exec {"git-clone-chown_${name}":
+ command => "chown -R ${cloneddir_user}:${cloneddir_group} ${projectroot};chmod -R og-rwx ${projectroot}/.git",
+ refreshonly => true
+ }
+ if $cloneddir_restrict_mode {
+ exec {"git-clone-chmod_${name}":
+ command => "chmod -R o-rwx ${projectroot}",
+ refreshonly => true,
+ subscribe => Exec["git-clone_${name}"],
+ }
+ }
+ }
+ }
+}
diff --git a/puppet/modules/git/manifests/daemon.pp b/puppet/modules/git/manifests/daemon.pp
new file mode 100644
index 00000000..1e85ff84
--- /dev/null
+++ b/puppet/modules/git/manifests/daemon.pp
@@ -0,0 +1,17 @@
+class git::daemon {
+
+ include git
+
+ case $operatingsystem {
+ centos: { include git::daemon::centos }
+ debian: { include git::daemon::base }
+ }
+
+ if $use_shorewall {
+ include shorewall::rules::gitdaemon
+ }
+
+ if $use_nagios {
+ nagios::service { "git-daemon": check_command => "check_git!${fqdn}"; }
+ }
+}
diff --git a/puppet/modules/git/manifests/daemon/base.pp b/puppet/modules/git/manifests/daemon/base.pp
new file mode 100644
index 00000000..6a03d4fd
--- /dev/null
+++ b/puppet/modules/git/manifests/daemon/base.pp
@@ -0,0 +1,31 @@
+class git::daemon::base inherits git::base {
+
+ file { 'git-daemon_initscript':
+ source => [ "puppet://$server/modules/site_git/init.d/${fqdn}/git-daemon",
+ "puppet://$server/modules/site_git/init.d/${operatingsystem}/git-daemon",
+ "puppet://$server/modules/site_git/init.d/git-daemon",
+ "puppet://$server/modules/git/init.d/${operatingsystem}/git-daemon",
+ "puppet://$server/modules/git/init.d/git-daemon" ],
+ require => Package['git'],
+ path => "/etc/init.d/git-daemon",
+ owner => root, group => 0, mode => 0755;
+ }
+
+ file { 'git-daemon_config':
+ source => [ "puppet://$server/modules/site_git/config/${fqdn}/git-daemon",
+ "puppet://$server/modules/site_git/config/${operatingsystem}/git-daemon",
+ "puppet://$server/modules/site_git/config/git-daemon",
+ "puppet://$server/modules/git/config/${operatingsystem}/git-daemon",
+ "puppet://$server/modules/git/config/git-daemon" ],
+ require => Package['git'],
+ path => "/etc/default/git-daemon",
+ owner => root, group => 0, mode => 0644;
+ }
+
+ service { 'git-daemon':
+ ensure => running,
+ enable => true,
+ hasstatus => true,
+ require => [ File['git-daemon_initscript'], File['git-daemon_config'] ],
+ }
+}
diff --git a/puppet/modules/git/manifests/daemon/centos.pp b/puppet/modules/git/manifests/daemon/centos.pp
new file mode 100644
index 00000000..e276259d
--- /dev/null
+++ b/puppet/modules/git/manifests/daemon/centos.pp
@@ -0,0 +1,19 @@
+class git::daemon::centos inherits git::daemon::base {
+
+ package { 'git-daemon':
+ ensure => installed,
+ require => Package['git'],
+ alias => 'git-daemon',
+ }
+
+ File['git-daemon_initscript'] {
+ path => '/etc/init.d/git-daemon',
+ require +> Package['git-daemon'],
+ }
+
+ File['git-daemon_config'] {
+ path => '/etc/init.d/git-daemon',
+ require +> Package['git-daemon'],
+ }
+
+}
diff --git a/puppet/modules/git/manifests/daemon/disable.pp b/puppet/modules/git/manifests/daemon/disable.pp
new file mode 100644
index 00000000..c044e962
--- /dev/null
+++ b/puppet/modules/git/manifests/daemon/disable.pp
@@ -0,0 +1,33 @@
+class git::daemon::disable inherits git::daemon::base {
+
+ if defined(Package['git-daemon']) {
+ Package['git-daemon'] {
+ ensure => absent,
+ }
+ }
+
+ File['git-daemon_initscript'] {
+ ensure => absent,
+ }
+
+ File['git-daemon_config'] {
+ ensure => absent,
+ }
+
+ Service['git-daemon'] {
+ ensure => stopped,
+ enable => false,
+ require => undef,
+ before => File['git-daemon_initscript'],
+ }
+
+ if $use_shorewall {
+ include shorewall::rules::gitdaemon::absent
+ }
+
+ if $use_nagios {
+ nagios::service { "git-daemon": check_command => "check_git!${fqdn}", ensure => absent; }
+ }
+}
+
+
diff --git a/puppet/modules/git/manifests/daemon/vhosts.pp b/puppet/modules/git/manifests/daemon/vhosts.pp
new file mode 100644
index 00000000..9591330f
--- /dev/null
+++ b/puppet/modules/git/manifests/daemon/vhosts.pp
@@ -0,0 +1,10 @@
+class git::daemon::vhosts inherits git::daemon {
+
+ File['git-daemon_config']{
+ source => [ "puppet://$server/modules/site_git/config/${fqdn}/git-daemon.vhosts",
+ "puppet://$server/modules/site_git/config/${operatingsystem}/git-daemon.vhosts",
+ "puppet://$server/modules/site_git/config/git-daemon.vhosts",
+ "puppet://$server/modules/git/config/${operatingsystem}/git-daemon.vhosts",
+ "puppet://$server/modules/git/config/git-daemon.vhosts" ],
+ }
+}
diff --git a/puppet/modules/git/manifests/debian.pp b/puppet/modules/git/manifests/debian.pp
new file mode 100644
index 00000000..2e63d692
--- /dev/null
+++ b/puppet/modules/git/manifests/debian.pp
@@ -0,0 +1,6 @@
+class git::debian inherits git::base {
+
+ Package['git'] {
+ name => 'git-core',
+ }
+}
diff --git a/puppet/modules/git/manifests/init.pp b/puppet/modules/git/manifests/init.pp
new file mode 100644
index 00000000..4693af75
--- /dev/null
+++ b/puppet/modules/git/manifests/init.pp
@@ -0,0 +1,25 @@
+#
+# git module
+#
+# Copyright 2008, Puzzle ITC
+# Marcel Härry haerry+puppet(at)puzzle.ch
+# Simon Josi josi+puppet(at)puzzle.ch
+#
+# This program is free software; you can redistribute
+# it and/or modify it under the terms of the GNU
+# General Public License version 3 as published by
+# the Free Software Foundation.
+#
+
+class git {
+
+ case $operatingsystem {
+ debian: { include git::debian }
+ centos: { include git::centos }
+ }
+
+ if $use_shorewall {
+ include shorewall::rules::out::git
+ }
+
+}
diff --git a/puppet/modules/git/manifests/svn.pp b/puppet/modules/git/manifests/svn.pp
new file mode 100644
index 00000000..ea934749
--- /dev/null
+++ b/puppet/modules/git/manifests/svn.pp
@@ -0,0 +1,10 @@
+# manifests/svn.pp
+
+class git::svn {
+ include ::git
+ include subversion
+
+ package { 'git-svn':
+ require => [ Package['git'], Package['subversion'] ],
+ }
+}
diff --git a/puppet/modules/git/manifests/web.pp b/puppet/modules/git/manifests/web.pp
new file mode 100644
index 00000000..3cf5139e
--- /dev/null
+++ b/puppet/modules/git/manifests/web.pp
@@ -0,0 +1,20 @@
+class git::web {
+ include git
+
+ package { 'gitweb':
+ ensure => present,
+ require => Package['git'],
+ }
+
+ file { '/etc/gitweb.d':
+ ensure => directory,
+ owner => root, group => 0, mode => 0755;
+ }
+ file { '/etc/gitweb.conf':
+ source => [ "puppet:///modules/site_git/web/${fqdn}/gitweb.conf",
+ "puppet:///modules/site_git/web/gitweb.conf",
+ "puppet:///modules/git/web/gitweb.conf" ],
+ require => Package['gitweb'],
+ owner => root, group => 0, mode => 0644;
+ }
+}
diff --git a/puppet/modules/git/manifests/web/absent.pp b/puppet/modules/git/manifests/web/absent.pp
new file mode 100644
index 00000000..4d0dba33
--- /dev/null
+++ b/puppet/modules/git/manifests/web/absent.pp
@@ -0,0 +1,17 @@
+class git::web::absent {
+
+ package { 'gitweb':
+ ensure => absent,
+ }
+
+ file { '/etc/gitweb.d':
+ ensure => absent,
+ purge => true,
+ force => true,
+ recurse => true,
+ }
+ file { '/etc/gitweb.conf':
+ ensure => absent,
+ }
+}
+
diff --git a/puppet/modules/git/manifests/web/lighttpd.pp b/puppet/modules/git/manifests/web/lighttpd.pp
new file mode 100644
index 00000000..980e23c0
--- /dev/null
+++ b/puppet/modules/git/manifests/web/lighttpd.pp
@@ -0,0 +1,7 @@
+class git::web::lighttpd {
+ include ::lighttpd
+
+ lighttpd::config::file{'lighttpd-gitweb':
+ content => 'global { server.modules += ("mod_rewrite", "mod_redirect", "mod_alias", "mod_setenv", "mod_cgi" ) }',
+ }
+}
diff --git a/puppet/modules/git/manifests/web/repo.pp b/puppet/modules/git/manifests/web/repo.pp
new file mode 100644
index 00000000..da6f74f0
--- /dev/null
+++ b/puppet/modules/git/manifests/web/repo.pp
@@ -0,0 +1,56 @@
+# domain: the domain under which this repo will be avaiable
+# projectroot: where the git repos are listened
+# projects_list: which repos to export
+#
+# logmode:
+# - default: Do normal logging including ips
+# - anonym: Don't log ips
+define git::web::repo(
+ $ensure = 'present',
+ $projectroot = 'absent',
+ $projects_list = 'absent',
+ $logmode = 'default',
+ $sitename = 'absent'
+){
+ if ($ensure == 'present') and (($projects_list == 'absent') or ($projectroot == 'absent')){
+ fail("You have to pass \$project_list and \$projectroot for ${name} if it should be present!")
+ }
+ if $ensure == 'present' { include git::web }
+ $gitweb_url = $name
+ case $gitweb_sitename {
+ 'absent': { $gitweb_sitename = "${name} git repository" }
+ default: { $gitweb_sitename = $sitename }
+ }
+ $gitweb_config = "/etc/gitweb.d/${name}.conf"
+ file{"${gitweb_config}": }
+ if $ensure == 'present' {
+ File["${gitweb_config}"]{
+ content => template("git/web/config")
+ }
+ } else {
+ File["${gitweb_config}"]{
+ ensure => absent,
+ }
+ }
+ case $gitweb_webserver {
+ 'lighttpd': {
+ git::web::repo::lighttpd{$name:
+ ensure => $ensure,
+ logmode => $logmode,
+ gitweb_url => $gitweb_url,
+ gitweb_config => $gitweb_config,
+ }
+ }
+ 'apache': {
+ apache::vhost::gitweb{$gitweb_url:
+ logmode => $logmode,
+ ensure => $ensure,
+ }
+ }
+ default: {
+ if ($ensure == 'present') {
+ fail("no supported \$gitweb_webserver defined on ${fqdn}, so can't do git::web::repo: ${name}")
+ }
+ }
+ }
+}
diff --git a/puppet/modules/git/manifests/web/repo/lighttpd.pp b/puppet/modules/git/manifests/web/repo/lighttpd.pp
new file mode 100644
index 00000000..11cee4ce
--- /dev/null
+++ b/puppet/modules/git/manifests/web/repo/lighttpd.pp
@@ -0,0 +1,16 @@
+# logmode:
+# - default: Do normal logging including ips
+# - anonym: Don't log ips
+define git::web::repo::lighttpd(
+ $ensure = 'present',
+ $gitweb_url,
+ $logmode = 'default',
+ $gitweb_config
+){
+ if $ensure == 'present' { include git::web::lighttpd }
+
+ lighttpd::vhost::file{$name:
+ ensure => $ensure,
+ content => template('git/web/lighttpd');
+ }
+}
diff --git a/puppet/modules/git/templates/web/config b/puppet/modules/git/templates/web/config
new file mode 100644
index 00000000..5286f6a6
--- /dev/null
+++ b/puppet/modules/git/templates/web/config
@@ -0,0 +1,31 @@
+# Include the global configuration, if found.
+do "/etc/gitweb.conf" if -e "/etc/gitweb.conf";
+
+# Point to projects.list file generated by gitosis.
+# Here gitosis manages the user "git", who has a
+# home directory of /srv/example.com/git
+$projects_list = "<%= projects_list %>";
+
+# Where the actual repositories are located.
+$projectroot = "<%= projectroot %>";
+
+# By default, gitweb will happily let people browse any repository
+# they guess the name of. This may or may not be what you wanted. I
+# choose to allow gitweb to show only repositories that git-daemon
+# is already sharing anonymously.
+$export_ok = "git-daemon-export-ok";
+
+# Alternatively, you could set these, to allow exactly the things in
+# projects.list, which in this case is the repos with gitweb=yes
+# in gitosis.conf. This means you don't need daemon=yes, but you
+# can't have repositories hidden but browsable if you know the name.
+# And note gitweb already allows downloading the full repository,
+# so you might as well serve git-daemon too.
+# $export_ok = "";
+# $strict_export = "true";
+
+# A list of base urls where all the repositories can be cloned from.
+# Easier than having per-repository cloneurl files.
+@git_base_url_list = ('git://<%= gitweb_url %>');
+
+$GITWEB_SITENAME = "<%= gitweb_sitename %>"
diff --git a/puppet/modules/git/templates/web/lighttpd b/puppet/modules/git/templates/web/lighttpd
new file mode 100644
index 00000000..cf244691
--- /dev/null
+++ b/puppet/modules/git/templates/web/lighttpd
@@ -0,0 +1,21 @@
+$HTTP["host"] == "<%= gitweb_url %>" {
+ url.redirect += (
+ "^$" => "/",
+ )
+
+ <%- if logmode.to_s == 'anonym' -%>
+ accesslog.format = "127.0.0.1 %V %u %t \"%r\" %>s %b \"%{Referer}i\" \"%{User-Agent}i\""
+ <%- end -%>
+
+ alias.url += (
+ "/static/gitweb.css" => "/var/www/git/static/gitweb.css",
+ "/static/git-logo.png" => "/var/www/git/static/git-logo.png",
+ "/static/git-favicon.png" => "/var/www/git/static/git-favicon.png",
+ "/" => "/var/www/git/gitweb.cgi",
+ )
+
+ setenv.add-environment = (
+ "GITWEB_CONFIG" => "<%= gitweb_config %>"
+ )
+ cgi.assign = ( ".cgi" => "" )
+}
diff --git a/puppet/modules/haveged/manifests/init.pp b/puppet/modules/haveged/manifests/init.pp
new file mode 100644
index 00000000..8f901937
--- /dev/null
+++ b/puppet/modules/haveged/manifests/init.pp
@@ -0,0 +1,16 @@
+class haveged {
+
+ package { 'haveged':
+ ensure => present,
+ }
+
+ service { 'haveged':
+ ensure => running,
+ hasrestart => true,
+ hasstatus => true,
+ enable => true,
+ require => Package['haveged'];
+ }
+
+ include site_check_mk::agent::haveged
+}
diff --git a/puppet/modules/journald/manifests/init.pp b/puppet/modules/journald/manifests/init.pp
new file mode 100644
index 00000000..879baba4
--- /dev/null
+++ b/puppet/modules/journald/manifests/init.pp
@@ -0,0 +1,7 @@
+class journald {
+
+ service { 'systemd-journald':
+ ensure => running,
+ enable => true,
+ }
+}
diff --git a/puppet/modules/leap/manifests/cli/install.pp b/puppet/modules/leap/manifests/cli/install.pp
new file mode 100644
index 00000000..25e87033
--- /dev/null
+++ b/puppet/modules/leap/manifests/cli/install.pp
@@ -0,0 +1,46 @@
+# installs leap_cli on node
+class leap::cli::install ( $source = false ) {
+ if $source {
+ # needed for building leap_cli from source
+ include ::git
+ include ::rubygems
+
+ class { '::ruby':
+ install_dev => true
+ }
+
+ class { 'bundler::install': install_method => 'package' }
+
+ Class[Ruby] ->
+ Class[rubygems] ->
+ Class[bundler::install]
+
+
+ vcsrepo { '/srv/leap/cli':
+ ensure => present,
+ force => true,
+ revision => 'develop',
+ provider => 'git',
+ source => 'https://leap.se/git/leap_cli.git',
+ owner => 'root',
+ group => 'root',
+ notify => Exec['install_leap_cli'],
+ require => Package['git']
+ }
+
+ exec { 'install_leap_cli':
+ command => '/usr/bin/rake build && /usr/bin/rake install',
+ cwd => '/srv/leap/cli',
+ user => 'root',
+ environment => 'USER=root',
+ refreshonly => true,
+ require => [ Class[bundler::install] ]
+ }
+ }
+ else {
+ package { 'leap_cli':
+ ensure => installed,
+ provider => gem
+ }
+ }
+}
diff --git a/puppet/modules/leap/manifests/init.pp b/puppet/modules/leap/manifests/init.pp
new file mode 100644
index 00000000..bbae3781
--- /dev/null
+++ b/puppet/modules/leap/manifests/init.pp
@@ -0,0 +1,3 @@
+class leap {
+
+} \ No newline at end of file
diff --git a/puppet/modules/leap/manifests/logfile.pp b/puppet/modules/leap/manifests/logfile.pp
new file mode 100644
index 00000000..adb3ca8a
--- /dev/null
+++ b/puppet/modules/leap/manifests/logfile.pp
@@ -0,0 +1,34 @@
+#
+# make syslog log to a particular file for a particular process.
+#
+# arguments:
+#
+# * name: what config files are named as (eg. /etc/rsyslog.d/50-$name.conf)
+# * log: the full path of the log file (defaults to /var/log/leap/$name.log
+# * process: the syslog tag to filter on (defaults to name)
+#
+define leap::logfile($process = $name, $log = undef) {
+ if $log {
+ $logfile = $log
+ } else {
+ $logfile = "/var/log/leap/${name}.log"
+ }
+
+ rsyslog::snippet { "50-${name}":
+ content => template('leap/rsyslog.erb')
+ }
+
+ augeas {
+ "logrotate_${name}":
+ context => "/files/etc/logrotate.d/${name}/rule",
+ changes => [
+ "set file ${logfile}",
+ 'set rotate 5',
+ 'set schedule daily',
+ 'set compress compress',
+ 'set missingok missingok',
+ 'set ifempty notifempty',
+ 'set copytruncate copytruncate'
+ ]
+ }
+}
diff --git a/puppet/modules/leap/templates/rsyslog.erb b/puppet/modules/leap/templates/rsyslog.erb
new file mode 100644
index 00000000..7bb5316f
--- /dev/null
+++ b/puppet/modules/leap/templates/rsyslog.erb
@@ -0,0 +1,5 @@
+if $programname startswith '<%= @process %>' then {
+ action(type="omfile" file="<%= @logfile %>" template="RSYSLOG_TraditionalFileFormat")
+ stop
+}
+
diff --git a/puppet/modules/leap_mx/manifests/init.pp b/puppet/modules/leap_mx/manifests/init.pp
new file mode 100644
index 00000000..d758e3ab
--- /dev/null
+++ b/puppet/modules/leap_mx/manifests/init.pp
@@ -0,0 +1,119 @@
+# deploy leap mx service
+class leap_mx {
+
+ $leap_mx = hiera('couchdb_leap_mx_user')
+ $couchdb_user = $leap_mx['username']
+ $couchdb_password = $leap_mx['password']
+
+ $couchdb_host = 'localhost'
+ $couchdb_port = '4096'
+
+ $sources = hiera('sources')
+
+ include soledad::common
+
+ #
+ # USER AND GROUP
+ #
+ # Make the user for leap-mx. This user is where all legitimate, non-system
+ # mail is delivered so leap-mx can process it. Previously, we let the system
+ # pick a uid/gid, but we need to know what they are set to in order to set the
+ # virtual_uid_maps and virtual_gid_maps. Its a bit overkill write a fact just
+ # for this, so instead we pick arbitrary numbers that seem unlikely to be used
+ # and then use them in the postfix configuration
+
+ group { 'leap-mx':
+ ensure => present,
+ gid => 42424,
+ allowdupe => false;
+ }
+
+ user { 'leap-mx':
+ ensure => present,
+ comment => 'Leap Mail',
+ allowdupe => false,
+ uid => 42424,
+ gid => 'leap-mx',
+ home => '/var/mail/leap-mx',
+ shell => '/bin/false',
+ managehome => true,
+ require => Group['leap-mx'];
+ }
+
+ file {
+ '/var/mail/leap-mx':
+ ensure => directory,
+ owner => 'leap-mx',
+ group => 'leap-mx',
+ mode => '0755',
+ require => User['leap-mx'];
+
+ '/var/mail/leap-mx/Maildir':
+ ensure => directory,
+ owner => 'leap-mx',
+ group => 'leap-mx',
+ mode => '0700';
+
+ '/var/mail/leap-mx/Maildir/new':
+ ensure => directory,
+ owner => 'leap-mx',
+ group => 'leap-mx',
+ mode => '0700';
+
+ '/var/mail/leap-mx/Maildir/cur':
+ ensure => directory,
+ owner => 'leap-mx',
+ group => 'leap-mx',
+ mode => '0700';
+
+ '/var/mail/leap-mx/Maildir/tmp':
+ ensure => directory,
+ owner => 'leap-mx',
+ group => 'leap-mx',
+ mode => '0700';
+ }
+
+ #
+ # LEAP-MX CONFIG
+ #
+
+ file { '/etc/leap/mx.conf':
+ content => template('leap_mx/mx.conf.erb'),
+ owner => 'leap-mx',
+ group => 'leap-mx',
+ mode => '0600',
+ notify => Service['leap-mx'];
+ }
+
+ leap::logfile { 'leap-mx':
+ log => '/var/log/leap/mx.log',
+ process => 'leap-mx'
+ }
+
+ #
+ # LEAP-MX CODE AND DEPENDENCIES
+ #
+
+ package {
+ $sources['leap-mx']['package']:
+ ensure => $sources['leap-mx']['revision'],
+ require => [
+ Class['site_apt::leap_repo'],
+ User['leap-mx'] ];
+
+ 'leap-keymanager':
+ ensure => latest;
+ }
+
+ #
+ # LEAP-MX DAEMON
+ #
+
+ service { 'leap-mx':
+ ensure => running,
+ enable => true,
+ hasstatus => true,
+ hasrestart => true,
+ require => [ Package['leap-mx'] ];
+ }
+}
diff --git a/puppet/modules/leap_mx/templates/mx.conf.erb b/puppet/modules/leap_mx/templates/mx.conf.erb
new file mode 100644
index 00000000..b54b3a86
--- /dev/null
+++ b/puppet/modules/leap_mx/templates/mx.conf.erb
@@ -0,0 +1,18 @@
+[mail1]
+path=/var/mail/leap-mx/Maildir
+recursive=True
+
+[couchdb]
+user=<%= @couchdb_user %>
+password=<%= @couchdb_password %>
+server=<%= @couchdb_host %>
+port=<%= @couchdb_port %>
+
+[alias map]
+port=4242
+
+[check recipient]
+port=2244
+
+[fingerprint map]
+port=2424
diff --git a/puppet/modules/lsb/manifests/base.pp b/puppet/modules/lsb/manifests/base.pp
new file mode 100644
index 00000000..9dc8d5a4
--- /dev/null
+++ b/puppet/modules/lsb/manifests/base.pp
@@ -0,0 +1,3 @@
+class lsb::base {
+ package{'lsb': ensure => present }
+}
diff --git a/puppet/modules/lsb/manifests/centos.pp b/puppet/modules/lsb/manifests/centos.pp
new file mode 100644
index 00000000..b7006187
--- /dev/null
+++ b/puppet/modules/lsb/manifests/centos.pp
@@ -0,0 +1,5 @@
+class lsb::centos inherits lsb::base {
+ Package['lsb']{
+ name => 'redhat-lsb',
+ }
+}
diff --git a/puppet/modules/lsb/manifests/debian.pp b/puppet/modules/lsb/manifests/debian.pp
new file mode 100644
index 00000000..c32070f3
--- /dev/null
+++ b/puppet/modules/lsb/manifests/debian.pp
@@ -0,0 +1,6 @@
+class lsb::debian inherits lsb::base {
+ Package['lsb']{
+ name => 'lsb-release',
+ require => undef,
+ }
+}
diff --git a/puppet/modules/lsb/manifests/init.pp b/puppet/modules/lsb/manifests/init.pp
new file mode 100644
index 00000000..85b34e1f
--- /dev/null
+++ b/puppet/modules/lsb/manifests/init.pp
@@ -0,0 +1,6 @@
+class lsb {
+ case $::operatingsystem {
+ debian,ubuntu: { include lsb::debian }
+ centos: { include lsb::centos }
+ }
+}
diff --git a/puppet/modules/ntp/.fixtures.yml b/puppet/modules/ntp/.fixtures.yml
new file mode 100644
index 00000000..a4b98014
--- /dev/null
+++ b/puppet/modules/ntp/.fixtures.yml
@@ -0,0 +1,5 @@
+fixtures:
+ repositories:
+ "stdlib": "git://github.com/puppetlabs/puppetlabs-stdlib.git"
+ symlinks:
+ "ntp": "#{source_dir}"
diff --git a/puppet/modules/ntp/.gitignore b/puppet/modules/ntp/.gitignore
new file mode 100644
index 00000000..49cf4650
--- /dev/null
+++ b/puppet/modules/ntp/.gitignore
@@ -0,0 +1,3 @@
+pkg/
+metadata.json
+Gemfile.lock
diff --git a/puppet/modules/ntp/.nodeset.yml b/puppet/modules/ntp/.nodeset.yml
new file mode 100644
index 00000000..cbd0d57b
--- /dev/null
+++ b/puppet/modules/ntp/.nodeset.yml
@@ -0,0 +1,35 @@
+---
+default_set: 'centos-64-x64'
+sets:
+ 'centos-59-x64':
+ nodes:
+ "main.foo.vm":
+ prefab: 'centos-59-x64'
+ 'centos-64-x64':
+ nodes:
+ "main.foo.vm":
+ prefab: 'centos-64-x64'
+ 'fedora-18-x64':
+ nodes:
+ "main.foo.vm":
+ prefab: 'fedora-18-x64'
+ 'debian-607-x64':
+ nodes:
+ "main.foo.vm":
+ prefab: 'debian-607-x64'
+ 'debian-70rc1-x64':
+ nodes:
+ "main.foo.vm":
+ prefab: 'debian-70rc1-x64'
+ 'ubuntu-server-10044-x64':
+ nodes:
+ "main.foo.vm":
+ prefab: 'ubuntu-server-10044-x64'
+ 'ubuntu-server-12042-x64':
+ nodes:
+ "main.foo.vm":
+ prefab: 'ubuntu-server-12042-x64'
+ 'sles-11sp1-x64':
+ nodes:
+ "main.foo.vm":
+ prefab: 'sles-11sp1-x64'
diff --git a/puppet/modules/ntp/.travis.yml b/puppet/modules/ntp/.travis.yml
new file mode 100644
index 00000000..e9f0e84b
--- /dev/null
+++ b/puppet/modules/ntp/.travis.yml
@@ -0,0 +1,40 @@
+---
+branches:
+ only:
+ - master
+language: ruby
+bundler_args: --without development
+script: "bundle exec rake spec SPEC_OPTS='--format documentation'"
+after_success:
+ - git clone -q git://github.com/puppetlabs/ghpublisher.git .forge-releng
+ - .forge-releng/publish
+rvm:
+- 1.8.7
+- 1.9.3
+- 2.0.0
+env:
+ matrix:
+ - PUPPET_GEM_VERSION="~> 2.7.0"
+ - PUPPET_GEM_VERSION="~> 3.0.0"
+ - PUPPET_GEM_VERSION="~> 3.1.0"
+ - PUPPET_GEM_VERSION="~> 3.2.0"
+ global:
+ - PUBLISHER_LOGIN=puppetlabs
+ - secure: |-
+ ZiIkYd9+CdPzpwSjFPnVkCx1FIlipxpbdyD33q94h2Tj5zXjNb1GXizVy0NR
+ kVxGhU5Ld8y9z8DTqKRgCI1Yymg3H//OU++PKLOQj/X5juWVR4URBNPeBOzu
+ IJBDl1MADKA4i1+jAZPpz4mTvTtKS4pWKErgCSmhSfsY1hs7n6c=
+matrix:
+ exclude:
+ - rvm: 1.9.3
+ env: PUPPET_GEM_VERSION="~> 2.7.0"
+ - rvm: 2.0.0
+ env: PUPPET_GEM_VERSION="~> 2.7.0"
+ - rvm: 2.0.0
+ env: PUPPET_GEM_VERSION="~> 3.0.0"
+ - rvm: 2.0.0
+ env: PUPPET_GEM_VERSION="~> 3.1.0"
+ - rvm: 1.8.7
+ env: PUPPET_GEM_VERSION="~> 3.2.0"
+notifications:
+ email: false
diff --git a/puppet/modules/ntp/CHANGELOG b/puppet/modules/ntp/CHANGELOG
new file mode 100644
index 00000000..8be6c4e0
--- /dev/null
+++ b/puppet/modules/ntp/CHANGELOG
@@ -0,0 +1,61 @@
+2013-07-31 - Version 2.0.0
+
+Summary:
+
+The 2.0 release focuses on merging all the distro specific
+templates into a single reusable template across all platforms.
+
+To aid in that goal we now allow you to change the driftfile,
+ntp keys, and perferred_servers.
+
+Backwards-incompatible changes:
+
+As all the distro specific templates have been removed and a
+unified one created you may be missing functionality you
+previously relied on. Please test carefully before rolling
+out globally.
+
+Configuration directives that might possibly be affected:
+- `filegen`
+- `fudge` (for virtual machines)
+- `keys`
+- `logfile`
+- `restrict`
+- `restrictkey`
+- `statistics`
+- `trustedkey`
+
+Features:
+- All templates merged into a single template.
+- NTP Keys support added.
+- Add preferred servers support.
+- Parameters in `ntp` class:
+ - `driftfile`: path for the ntp driftfile.
+ - `keys_enable`: Enable NTP keys feature.
+ - `keys_file`: Path for the NTP keys file.
+ - `keys_trusted`: Which keys to trust.
+ - `keys_controlkey`: Which key to use for the control key.
+ - `keys_requestkey`: Which key to use for the request key.
+ - `preferred_servers`: Array of servers to prefer.
+ - `restrict`: Array of restriction options to apply.
+
+2013-07-15 - Version 1.0.1
+Bugfixes:
+- Fix deprecated warning in `autoupdate` parameter.
+- Correctly quote is_virtual fact.
+
+2013-07-08 - Version 1.0.0
+Features:
+- Completely refactored to split across several classes.
+- rspec-puppet tests rewritten to cover more options.
+- rspec-system tests added.
+- ArchLinux handled via osfamily instead of special casing.
+- parameters in `ntp` class:
+ - `autoupdate`: deprecated in favor of directly setting package_ensure.
+ - `panic`: set to false if you wish to allow large clock skews.
+
+2011-11-10 Dan Bode <dan@puppetlabs.com> - 0.0.4
+Add Amazon Linux as a supported platform
+Add unit tests
+2011-06-16 Jeff McCune <jeff@puppetlabs.com> - 0.0.3
+Initial release under puppetlabs
diff --git a/puppet/modules/ntp/CONTRIBUTING.md b/puppet/modules/ntp/CONTRIBUTING.md
new file mode 100644
index 00000000..a2b1d77b
--- /dev/null
+++ b/puppet/modules/ntp/CONTRIBUTING.md
@@ -0,0 +1,9 @@
+Puppet Labs modules on the Puppet Forge are open projects, and community contributions
+are essential for keeping them great. We can’t access the huge number of platforms and
+myriad of hardware, software, and deployment configurations that Puppet is intended to serve.
+
+We want to keep it as easy as possible to contribute changes so that our modules work
+in your environment. There are a few guidelines that we need contributors to follow so
+that we can have a chance of keeping on top of things.
+
+You can read the complete module contribution guide [on the Puppet Labs wiki.](http://projects.puppetlabs.com/projects/module-site/wiki/Module_contributing)
diff --git a/puppet/modules/ntp/Gemfile b/puppet/modules/ntp/Gemfile
new file mode 100644
index 00000000..4e733308
--- /dev/null
+++ b/puppet/modules/ntp/Gemfile
@@ -0,0 +1,19 @@
+source 'https://rubygems.org'
+
+group :development, :test do
+ gem 'rake', :require => false
+ gem 'puppetlabs_spec_helper', :require => false
+ gem 'rspec-system-puppet', :require => false
+ gem 'puppet-lint', :require => false
+ gem 'serverspec', :require => false
+ gem 'rspec-system-serverspec', :require => false
+ gem 'vagrant-wrapper', :require => false
+end
+
+if puppetversion = ENV['PUPPET_GEM_VERSION']
+ gem 'puppet', puppetversion, :require => false
+else
+ gem 'puppet', :require => false
+end
+
+# vim:ft=ruby
diff --git a/puppet/modules/ntp/LICENSE b/puppet/modules/ntp/LICENSE
new file mode 100644
index 00000000..57bc88a1
--- /dev/null
+++ b/puppet/modules/ntp/LICENSE
@@ -0,0 +1,202 @@
+ Apache License
+ Version 2.0, January 2004
+ http://www.apache.org/licenses/
+
+ TERMS AND CONDITIONS FOR USE, REPRODUCTION, AND DISTRIBUTION
+
+ 1. Definitions.
+
+ "License" shall mean the terms and conditions for use, reproduction,
+ and distribution as defined by Sections 1 through 9 of this document.
+
+ "Licensor" shall mean the copyright owner or entity authorized by
+ the copyright owner that is granting the License.
+
+ "Legal Entity" shall mean the union of the acting entity and all
+ other entities that control, are controlled by, or are under common
+ control with that entity. For the purposes of this definition,
+ "control" means (i) the power, direct or indirect, to cause the
+ direction or management of such entity, whether by contract or
+ otherwise, or (ii) ownership of fifty percent (50%) or more of the
+ outstanding shares, or (iii) beneficial ownership of such entity.
+
+ "You" (or "Your") shall mean an individual or Legal Entity
+ exercising permissions granted by this License.
+
+ "Source" form shall mean the preferred form for making modifications,
+ including but not limited to software source code, documentation
+ source, and configuration files.
+
+ "Object" form shall mean any form resulting from mechanical
+ transformation or translation of a Source form, including but
+ not limited to compiled object code, generated documentation,
+ and conversions to other media types.
+
+ "Work" shall mean the work of authorship, whether in Source or
+ Object form, made available under the License, as indicated by a
+ copyright notice that is included in or attached to the work
+ (an example is provided in the Appendix below).
+
+ "Derivative Works" shall mean any work, whether in Source or Object
+ form, that is based on (or derived from) the Work and for which the
+ editorial revisions, annotations, elaborations, or other modifications
+ represent, as a whole, an original work of authorship. For the purposes
+ of this License, Derivative Works shall not include works that remain
+ separable from, or merely link (or bind by name) to the interfaces of,
+ the Work and Derivative Works thereof.
+
+ "Contribution" shall mean any work of authorship, including
+ the original version of the Work and any modifications or additions
+ to that Work or Derivative Works thereof, that is intentionally
+ submitted to Licensor for inclusion in the Work by the copyright owner
+ or by an individual or Legal Entity authorized to submit on behalf of
+ the copyright owner. For the purposes of this definition, "submitted"
+ means any form of electronic, verbal, or written communication sent
+ to the Licensor or its representatives, including but not limited to
+ communication on electronic mailing lists, source code control systems,
+ and issue tracking systems that are managed by, or on behalf of, the
+ Licensor for the purpose of discussing and improving the Work, but
+ excluding communication that is conspicuously marked or otherwise
+ designated in writing by the copyright owner as "Not a Contribution."
+
+ "Contributor" shall mean Licensor and any individual or Legal Entity
+ on behalf of whom a Contribution has been received by Licensor and
+ subsequently incorporated within the Work.
+
+ 2. Grant of Copyright License. Subject to the terms and conditions of
+ this License, each Contributor hereby grants to You a perpetual,
+ worldwide, non-exclusive, no-charge, royalty-free, irrevocable
+ copyright license to reproduce, prepare Derivative Works of,
+ publicly display, publicly perform, sublicense, and distribute the
+ Work and such Derivative Works in Source or Object form.
+
+ 3. Grant of Patent License. Subject to the terms and conditions of
+ this License, each Contributor hereby grants to You a perpetual,
+ worldwide, non-exclusive, no-charge, royalty-free, irrevocable
+ (except as stated in this section) patent license to make, have made,
+ use, offer to sell, sell, import, and otherwise transfer the Work,
+ where such license applies only to those patent claims licensable
+ by such Contributor that are necessarily infringed by their
+ Contribution(s) alone or by combination of their Contribution(s)
+ with the Work to which such Contribution(s) was submitted. If You
+ institute patent litigation against any entity (including a
+ cross-claim or counterclaim in a lawsuit) alleging that the Work
+ or a Contribution incorporated within the Work constitutes direct
+ or contributory patent infringement, then any patent licenses
+ granted to You under this License for that Work shall terminate
+ as of the date such litigation is filed.
+
+ 4. Redistribution. You may reproduce and distribute copies of the
+ Work or Derivative Works thereof in any medium, with or without
+ modifications, and in Source or Object form, provided that You
+ meet the following conditions:
+
+ (a) You must give any other recipients of the Work or
+ Derivative Works a copy of this License; and
+
+ (b) You must cause any modified files to carry prominent notices
+ stating that You changed the files; and
+
+ (c) You must retain, in the Source form of any Derivative Works
+ that You distribute, all copyright, patent, trademark, and
+ attribution notices from the Source form of the Work,
+ excluding those notices that do not pertain to any part of
+ the Derivative Works; and
+
+ (d) If the Work includes a "NOTICE" text file as part of its
+ distribution, then any Derivative Works that You distribute must
+ include a readable copy of the attribution notices contained
+ within such NOTICE file, excluding those notices that do not
+ pertain to any part of the Derivative Works, in at least one
+ of the following places: within a NOTICE text file distributed
+ as part of the Derivative Works; within the Source form or
+ documentation, if provided along with the Derivative Works; or,
+ within a display generated by the Derivative Works, if and
+ wherever such third-party notices normally appear. The contents
+ of the NOTICE file are for informational purposes only and
+ do not modify the License. You may add Your own attribution
+ notices within Derivative Works that You distribute, alongside
+ or as an addendum to the NOTICE text from the Work, provided
+ that such additional attribution notices cannot be construed
+ as modifying the License.
+
+ You may add Your own copyright statement to Your modifications and
+ may provide additional or different license terms and conditions
+ for use, reproduction, or distribution of Your modifications, or
+ for any such Derivative Works as a whole, provided Your use,
+ reproduction, and distribution of the Work otherwise complies with
+ the conditions stated in this License.
+
+ 5. Submission of Contributions. Unless You explicitly state otherwise,
+ any Contribution intentionally submitted for inclusion in the Work
+ by You to the Licensor shall be under the terms and conditions of
+ this License, without any additional terms or conditions.
+ Notwithstanding the above, nothing herein shall supersede or modify
+ the terms of any separate license agreement you may have executed
+ with Licensor regarding such Contributions.
+
+ 6. Trademarks. This License does not grant permission to use the trade
+ names, trademarks, service marks, or product names of the Licensor,
+ except as required for reasonable and customary use in describing the
+ origin of the Work and reproducing the content of the NOTICE file.
+
+ 7. Disclaimer of Warranty. Unless required by applicable law or
+ agreed to in writing, Licensor provides the Work (and each
+ Contributor provides its Contributions) on an "AS IS" BASIS,
+ WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or
+ implied, including, without limitation, any warranties or conditions
+ of TITLE, NON-INFRINGEMENT, MERCHANTABILITY, or FITNESS FOR A
+ PARTICULAR PURPOSE. You are solely responsible for determining the
+ appropriateness of using or redistributing the Work and assume any
+ risks associated with Your exercise of permissions under this License.
+
+ 8. Limitation of Liability. In no event and under no legal theory,
+ whether in tort (including negligence), contract, or otherwise,
+ unless required by applicable law (such as deliberate and grossly
+ negligent acts) or agreed to in writing, shall any Contributor be
+ liable to You for damages, including any direct, indirect, special,
+ incidental, or consequential damages of any character arising as a
+ result of this License or out of the use or inability to use the
+ Work (including but not limited to damages for loss of goodwill,
+ work stoppage, computer failure or malfunction, or any and all
+ other commercial damages or losses), even if such Contributor
+ has been advised of the possibility of such damages.
+
+ 9. Accepting Warranty or Additional Liability. While redistributing
+ the Work or Derivative Works thereof, You may choose to offer,
+ and charge a fee for, acceptance of support, warranty, indemnity,
+ or other liability obligations and/or rights consistent with this
+ License. However, in accepting such obligations, You may act only
+ on Your own behalf and on Your sole responsibility, not on behalf
+ of any other Contributor, and only if You agree to indemnify,
+ defend, and hold each Contributor harmless for any liability
+ incurred by, or claims asserted against, such Contributor by reason
+ of your accepting any such warranty or additional liability.
+
+ END OF TERMS AND CONDITIONS
+
+ APPENDIX: How to apply the Apache License to your work.
+
+ To apply the Apache License to your work, attach the following
+ boilerplate notice, with the fields enclosed by brackets "[]"
+ replaced with your own identifying information. (Don't include
+ the brackets!) The text should be enclosed in the appropriate
+ comment syntax for the file format. We also recommend that a
+ file or class name and description of purpose be included on the
+ same "printed page" as the copyright notice for easier
+ identification within third-party archives.
+
+ Copyright [yyyy] [name of copyright owner]
+
+ Licensed under the Apache License, Version 2.0 (the "License");
+ you may not use this file except in compliance with the License.
+ You may obtain a copy of the License at
+
+ http://www.apache.org/licenses/LICENSE-2.0
+
+ Unless required by applicable law or agreed to in writing, software
+ distributed under the License is distributed on an "AS IS" BASIS,
+ WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+ See the License for the specific language governing permissions and
+ limitations under the License.
+
diff --git a/puppet/modules/ntp/Modulefile b/puppet/modules/ntp/Modulefile
new file mode 100644
index 00000000..9610ef67
--- /dev/null
+++ b/puppet/modules/ntp/Modulefile
@@ -0,0 +1,11 @@
+name 'puppetlabs-ntp'
+version '2.0.0-rc1'
+source 'git://github.com/puppetlabs/puppetlabs-ntp'
+author 'Puppet Labs'
+license 'Apache Version 2.0'
+summary 'NTP Module'
+description 'NTP Module for Debian, Ubuntu, CentOS, RHEL, OEL, Fedora, FreeBSD, ArchLinux and Gentoo.'
+project_page 'http://github.com/puppetlabs/puppetlabs-ntp'
+
+## Add dependencies, if any:
+dependency 'puppetlabs/stdlib', '>= 0.1.6'
diff --git a/puppet/modules/ntp/README.markdown b/puppet/modules/ntp/README.markdown
new file mode 100644
index 00000000..3aedd47a
--- /dev/null
+++ b/puppet/modules/ntp/README.markdown
@@ -0,0 +1,215 @@
+#ntp
+
+####Table of Contents
+
+1. [Overview](#overview)
+2. [Module Description - What the module does and why it is useful](#module-description)
+3. [Setup - The basics of getting started with ntp](#setup)
+ * [What ntp affects](#what-ntp-affects)
+ * [Setup requirements](#setup-requirements)
+ * [Beginning with ntp](#beginning-with-ntp)
+4. [Usage - Configuration options and additional functionality](#usage)
+5. [Reference - An under-the-hood peek at what the module is doing and how](#reference)
+5. [Limitations - OS compatibility, etc.](#limitations)
+6. [Development - Guide for contributing to the module](#development)
+
+##Overview
+
+The NTP module installs, configures, and manages the ntp service.
+
+##Module Description
+
+The NTP module handles running NTP across a range of operating systems and
+distributions. Where possible we use the upstream ntp templates so that the
+results closely match what you'd get if you modified the package default conf
+files.
+
+##Setup
+
+###What ntp affects
+
+* ntp package.
+* ntp configuration file.
+* ntp service.
+
+###Beginning with ntp
+
+include '::ntp' is enough to get you up and running. If you wish to pass in
+parameters like which servers to use then you can use:
+
+```puppet
+class { '::ntp':
+ servers => [ 'ntp1.corp.com', 'ntp2.corp.com' ],
+}
+```
+
+##Usage
+
+All interaction with the ntp module can do be done through the main ntp class.
+This means you can simply toggle the options in the ntp class to get at the
+full functionality.
+
+###I just want NTP, what's the minimum I need?
+
+```puppet
+include '::ntp'
+```
+
+###I just want to tweak the servers, nothing else.
+
+```puppet
+class { '::ntp':
+ servers => [ 'ntp1.corp.com', 'ntp2.corp.com' ],
+}
+```
+
+###I'd like to make sure I restrict who can connect as well.
+
+```puppet
+class { '::ntp':
+ servers => [ 'ntp1.corp.com', 'ntp2.corp.com' ],
+ restrict => 'restrict 127.0.0.1',
+}
+```
+
+###I'd like to opt out of having the service controlled, we use another tool for that.
+
+```puppet
+class { '::ntp':
+ servers => [ 'ntp1.corp.com', 'ntp2.corp.com' ],
+ restrict => 'restrict 127.0.0.1',
+ manage_service => false,
+}
+```
+
+###Looks great! But I'd like a different template, we need to do something unique here.
+
+```puppet
+class { '::ntp':
+ servers => [ 'ntp1.corp.com', 'ntp2.corp.com' ],
+ restrict => 'restrict 127.0.0.1',
+ manage_service => false,
+ config_template => 'different/module/custom.template.erb',
+}
+```
+
+##Reference
+
+###Classes
+
+* ntp: Main class, includes all the rest.
+* ntp::install: Handles the packages.
+* ntp::config: Handles the configuration file.
+* ntp::service: Handles the service.
+
+###Parameters
+
+The following parameters are available in the ntp module
+
+####`autoupdate`
+
+Deprecated: This parameter previously determined if the ntp module should be
+automatically updated to the latest version available. Replaced by package\_
+ensure.
+
+####`config`
+
+This sets the file to write ntp configuration into.
+
+####`config_template`
+
+This determines which template puppet should use for the ntp configuration.
+
+####`driftfile`
+
+This sets the location of the driftfile for ntp.
+
+####`keys_controlkey`
+
+Which of the keys is used as the control key.
+
+####`keys_enable`
+
+Should the ntp keys functionality be enabled.
+
+####`keys_file`
+
+Location of the keys file.
+
+####`keys_requestkey`
+
+Which of the keys is used as the request key.
+
+####`package_ensure`
+
+This can be set to 'present' or 'latest' or a specific version to choose the
+ntp package to be installed.
+
+####`package_name`
+
+This determines the name of the package to install.
+
+####`panic`
+
+This determines if ntp should 'panic' in the event of a very large clock skew.
+We set this to false if you're on a virtual machine by default as they don't
+do a great job with keeping time.
+
+####`preferred_servers`
+
+List of ntp servers to prefer. Will append prefer for any server in this list
+that also appears in the servers list.
+
+####`restrict`
+
+This sets the restrict options in the ntp configuration.
+
+####`servers`
+
+This selects the servers to use for ntp peers.
+
+####`service_enable`
+
+This determines if the service should be enabled at boot.
+
+####`service_ensure`
+
+This determines if the service should be running or not.
+
+####`service_manage`
+
+This selects if puppet should manage the service in the first place.
+
+####`service_name`
+
+This selects the name of the ntp service for puppet to manage.
+
+
+##Limitations
+
+This module has been built on and tested against Puppet 2.7 and higher.
+
+The module has been tested on:
+
+* RedHat Enterprise Linux 5/6
+* Debian 6/7
+* CentOS 5/6
+* Ubuntu 12.04
+* Gentoo
+* Arch Linux
+* FreeBSD
+
+Testing on other platforms has been light and cannot be guaranteed.
+
+##Development
+
+Puppet Labs modules on the Puppet Forge are open projects, and community
+contributions are essential for keeping them great. We can’t access the
+huge number of platforms and myriad of hardware, software, and deployment
+configurations that Puppet is intended to serve.
+
+We want to keep it as easy as possible to contribute changes so that our
+modules work in your environment. There are a few guidelines that we need
+contributors to follow so that we can have a chance of keeping on top of things.
+
+You can read the complete module contribution guide [on the Puppet Labs wiki.](http://projects.puppetlabs.com/projects/module-site/wiki/Module_contributing)
diff --git a/puppet/modules/ntp/Rakefile b/puppet/modules/ntp/Rakefile
new file mode 100644
index 00000000..bb60173e
--- /dev/null
+++ b/puppet/modules/ntp/Rakefile
@@ -0,0 +1,2 @@
+require 'puppetlabs_spec_helper/rake_tasks'
+require 'rspec-system/rake_task'
diff --git a/puppet/modules/ntp/manifests/config.pp b/puppet/modules/ntp/manifests/config.pp
new file mode 100644
index 00000000..1c8963dc
--- /dev/null
+++ b/puppet/modules/ntp/manifests/config.pp
@@ -0,0 +1,23 @@
+#
+class ntp::config inherits ntp {
+
+ if $keys_enable {
+ $directory = dirname($keys_file)
+ file { $directory:
+ ensure => directory,
+ owner => 0,
+ group => 0,
+ mode => '0755',
+ recurse => true,
+ }
+ }
+
+ file { $config:
+ ensure => file,
+ owner => 0,
+ group => 0,
+ mode => '0644',
+ content => template($config_template),
+ }
+
+}
diff --git a/puppet/modules/ntp/manifests/init.pp b/puppet/modules/ntp/manifests/init.pp
new file mode 100644
index 00000000..be951187
--- /dev/null
+++ b/puppet/modules/ntp/manifests/init.pp
@@ -0,0 +1,58 @@
+class ntp (
+ $autoupdate = $ntp::params::autoupdate,
+ $config = $ntp::params::config,
+ $config_template = $ntp::params::config_template,
+ $driftfile = $ntp::params::driftfile,
+ $keys_enable = $ntp::params::keys_enable,
+ $keys_file = $ntp::params::keys_file,
+ $keys_controlkey = $ntp::params::keys_controlkey,
+ $keys_requestkey = $ntp::params::keys_requestkey,
+ $keys_trusted = $ntp::params::keys_trusted,
+ $package_ensure = $ntp::params::package_ensure,
+ $package_name = $ntp::params::package_name,
+ $panic = $ntp::params::panic,
+ $preferred_servers = $ntp::params::preferred_servers,
+ $restrict = $ntp::params::restrict,
+ $servers = $ntp::params::servers,
+ $service_enable = $ntp::params::service_enable,
+ $service_ensure = $ntp::params::service_ensure,
+ $service_manage = $ntp::params::service_manage,
+ $service_name = $ntp::params::service_name,
+) inherits ntp::params {
+
+ validate_absolute_path($config)
+ validate_string($config_template)
+ validate_absolute_path($driftfile)
+ validate_bool($keys_enable)
+ validate_re($keys_controlkey, ['^\d+$', ''])
+ validate_re($keys_requestkey, ['^\d+$', ''])
+ validate_array($keys_trusted)
+ validate_string($package_ensure)
+ validate_array($package_name)
+ validate_bool($panic)
+ validate_array($preferred_servers)
+ validate_array($restrict)
+ validate_array($servers)
+ validate_bool($service_enable)
+ validate_string($service_ensure)
+ validate_bool($service_manage)
+ validate_string($service_name)
+
+ if $autoupdate {
+ notice('autoupdate parameter has been deprecated and replaced with package_ensure. Set this to latest for the same behavior as autoupdate => true.')
+ }
+
+ include '::ntp::install'
+ include '::ntp::config'
+ include '::ntp::service'
+
+ # Anchor this as per #8040 - this ensures that classes won't float off and
+ # mess everything up. You can read about this at:
+ # http://docs.puppetlabs.com/puppet/2.7/reference/lang_containment.html#known-issues
+ anchor { 'ntp::begin': }
+ anchor { 'ntp::end': }
+
+ Anchor['ntp::begin'] -> Class['::ntp::install'] -> Class['::ntp::config']
+ ~> Class['::ntp::service'] -> Anchor['ntp::end']
+
+}
diff --git a/puppet/modules/ntp/manifests/install.pp b/puppet/modules/ntp/manifests/install.pp
new file mode 100644
index 00000000..098949c3
--- /dev/null
+++ b/puppet/modules/ntp/manifests/install.pp
@@ -0,0 +1,9 @@
+#
+class ntp::install inherits ntp {
+
+ package { 'ntp':
+ ensure => $package_ensure,
+ name => $package_name,
+ }
+
+}
diff --git a/puppet/modules/ntp/manifests/params.pp b/puppet/modules/ntp/manifests/params.pp
new file mode 100644
index 00000000..10a4fb2b
--- /dev/null
+++ b/puppet/modules/ntp/manifests/params.pp
@@ -0,0 +1,116 @@
+class ntp::params {
+
+ $autoupdate = false
+ $config_template = 'ntp/ntp.conf.erb'
+ $keys_enable = false
+ $keys_controlkey = ''
+ $keys_requestkey = ''
+ $keys_trusted = []
+ $package_ensure = 'present'
+ $preferred_servers = []
+ $restrict = [
+ 'restrict default kod nomodify notrap nopeer noquery',
+ 'restrict -6 default kod nomodify notrap nopeer noquery',
+ 'restrict 127.0.0.1',
+ 'restrict -6 ::1',
+ ]
+ $service_enable = true
+ $service_ensure = 'running'
+ $service_manage = true
+
+ # On virtual machines allow large clock skews.
+ $panic = str2bool($::is_virtual) ? {
+ true => false,
+ default => true,
+ }
+
+ case $::osfamily {
+ 'Debian': {
+ $config = '/etc/ntp.conf'
+ $keys_file = '/etc/ntp/keys'
+ $driftfile = '/var/lib/ntp/drift'
+ $package_name = [ 'ntp' ]
+ $service_name = 'ntp'
+ $servers = [
+ '0.debian.pool.ntp.org iburst',
+ '1.debian.pool.ntp.org iburst',
+ '2.debian.pool.ntp.org iburst',
+ '3.debian.pool.ntp.org iburst',
+ ]
+ }
+ 'RedHat': {
+ $config = '/etc/ntp.conf'
+ $driftfile = '/var/lib/ntp/drift'
+ $keys_file = '/etc/ntp/keys'
+ $package_name = [ 'ntp' ]
+ $service_name = 'ntpd'
+ $servers = [
+ '0.centos.pool.ntp.org',
+ '1.centos.pool.ntp.org',
+ '2.centos.pool.ntp.org',
+ ]
+ }
+ 'SuSE': {
+ $config = '/etc/ntp.conf'
+ $driftfile = '/var/lib/ntp/drift/ntp.drift'
+ $keys_file = '/etc/ntp/keys'
+ $package_name = [ 'ntp' ]
+ $service_name = 'ntp'
+ $servers = [
+ '0.opensuse.pool.ntp.org',
+ '1.opensuse.pool.ntp.org',
+ '2.opensuse.pool.ntp.org',
+ '3.opensuse.pool.ntp.org',
+ ]
+ }
+ 'FreeBSD': {
+ $config = '/etc/ntp.conf'
+ $driftfile = '/var/db/ntpd.drift'
+ $keys_file = '/etc/ntp/keys'
+ $package_name = ['net/ntp']
+ $service_name = 'ntpd'
+ $servers = [
+ '0.freebsd.pool.ntp.org iburst maxpoll 9',
+ '1.freebsd.pool.ntp.org iburst maxpoll 9',
+ '2.freebsd.pool.ntp.org iburst maxpoll 9',
+ '3.freebsd.pool.ntp.org iburst maxpoll 9',
+ ]
+ }
+ 'Archlinux': {
+ $config = '/etc/ntp.conf'
+ $driftfile = '/var/lib/ntp/drift'
+ $keys_file = '/etc/ntp/keys'
+ $package_name = [ 'ntp' ]
+ $service_name = 'ntpd'
+ $servers = [
+ '0.pool.ntp.org',
+ '1.pool.ntp.org',
+ '2.pool.ntp.org',
+ ]
+ }
+ 'Linux': {
+ # Account for distributions that don't have $::osfamily specific settings.
+ case $::operatingsystem {
+ 'Gentoo': {
+ $config = '/etc/ntp.conf'
+ $driftfile = '/var/lib/ntp/drift'
+ $keys_file = '/etc/ntp/keys'
+ $package_name = ['net-misc/ntp']
+ $service_name = 'ntpd'
+ $servers = [
+ '0.gentoo.pool.ntp.org',
+ '1.gentoo.pool.ntp.org',
+ '2.gentoo.pool.ntp.org',
+ '3.gentoo.pool.ntp.org',
+ ]
+ }
+ default: {
+ fail("The ${module_name} module is not supported on an ${::operatingsystem} distribution.")
+ }
+ }
+ }
+ default: {
+ fail("The ${module_name} module is not supported on an ${::osfamily} based system.")
+ }
+ }
+}
diff --git a/puppet/modules/ntp/manifests/service.pp b/puppet/modules/ntp/manifests/service.pp
new file mode 100644
index 00000000..3f1ada0b
--- /dev/null
+++ b/puppet/modules/ntp/manifests/service.pp
@@ -0,0 +1,18 @@
+#
+class ntp::service inherits ntp {
+
+ if ! ($service_ensure in [ 'running', 'stopped' ]) {
+ fail('service_ensure parameter must be running or stopped')
+ }
+
+ if $service_manage == true {
+ service { 'ntp':
+ ensure => $service_ensure,
+ enable => $service_enable,
+ name => $service_name,
+ hasstatus => true,
+ hasrestart => true,
+ }
+ }
+
+}
diff --git a/puppet/modules/ntp/spec/classes/ntp_spec.rb b/puppet/modules/ntp/spec/classes/ntp_spec.rb
new file mode 100644
index 00000000..6c636f40
--- /dev/null
+++ b/puppet/modules/ntp/spec/classes/ntp_spec.rb
@@ -0,0 +1,261 @@
+require 'spec_helper'
+
+describe 'ntp' do
+
+ ['Debian', 'RedHat','SuSE', 'FreeBSD', 'Archlinux', 'Gentoo'].each do |system|
+ if system == 'Gentoo'
+ let(:facts) {{ :osfamily => 'Linux', :operatingsystem => system }}
+ else
+ let(:facts) {{ :osfamily => system }}
+ end
+
+ it { should include_class('ntp::install') }
+ it { should include_class('ntp::config') }
+ it { should include_class('ntp::service') }
+
+ describe 'ntp::config on #{system}' do
+ it { should contain_file('/etc/ntp.conf').with_owner('0') }
+ it { should contain_file('/etc/ntp.conf').with_group('0') }
+ it { should contain_file('/etc/ntp.conf').with_mode('0644') }
+
+ describe 'allows template to be overridden' do
+ let(:params) {{ :config_template => 'my_ntp/ntp.conf.erb' }}
+ it { should contain_file('/etc/ntp.conf').with({
+ 'content' => /server foobar/})
+ }
+ end
+
+ describe "keys for osfamily #{system}" do
+ context "when enabled" do
+ let(:params) {{
+ :keys_enable => true,
+ :keys_file => '/etc/ntp/ntp.keys',
+ :keys_trusted => ['1', '2', '3'],
+ :keys_controlkey => '2',
+ :keys_requestkey => '3',
+ }}
+
+ it { should contain_file('/etc/ntp').with({
+ 'ensure' => 'directory'})
+ }
+ it { should contain_file('/etc/ntp.conf').with({
+ 'content' => /trustedkey 1 2 3/})
+ }
+ it { should contain_file('/etc/ntp.conf').with({
+ 'content' => /controlkey 2/})
+ }
+ it { should contain_file('/etc/ntp.conf').with({
+ 'content' => /requestkey 3/})
+ }
+ end
+ end
+
+ context "when disabled" do
+ let(:params) {{
+ :keys_enable => false,
+ :keys_file => '/etc/ntp/ntp.keys',
+ :keys_trusted => ['1', '2', '3'],
+ :keys_controlkey => '2',
+ :keys_requestkey => '3',
+ }}
+
+ it { should_not contain_file('/etc/ntp').with({
+ 'ensure' => 'directory'})
+ }
+ it { should_not contain_file('/etc/ntp.conf').with({
+ 'content' => /trustedkey 1 2 3/})
+ }
+ it { should_not contain_file('/etc/ntp.conf').with({
+ 'content' => /controlkey 2/})
+ }
+ it { should_not contain_file('/etc/ntp.conf').with({
+ 'content' => /requestkey 3/})
+ }
+ end
+
+ describe 'preferred servers' do
+ context "when set" do
+ let(:params) {{
+ :servers => ['a', 'b', 'c', 'd'],
+ :preferred_servers => ['a', 'b']
+ }}
+
+ it { should contain_file('/etc/ntp.conf').with({
+ 'content' => /server a prefer\nserver b prefer\nserver c\nserver d/})
+ }
+ end
+ context "when not set" do
+ let(:params) {{
+ :servers => ['a', 'b', 'c', 'd'],
+ :preferred_servers => []
+ }}
+
+ it { should_not contain_file('/etc/ntp.conf').with({
+ 'content' => /server a prefer/})
+ }
+ end
+ end
+
+ describe 'ntp::install on #{system}' do
+ let(:params) {{ :package_ensure => 'present', :package_name => ['ntp'], }}
+
+ it { should contain_package('ntp').with(
+ :ensure => 'present',
+ :name => 'ntp'
+ )}
+
+ describe 'should allow package ensure to be overridden' do
+ let(:params) {{ :package_ensure => 'latest', :package_name => ['ntp'] }}
+ it { should contain_package('ntp').with_ensure('latest') }
+ end
+
+ describe 'should allow the package name to be overridden' do
+ let(:params) {{ :package_ensure => 'present', :package_name => ['hambaby'] }}
+ it { should contain_package('ntp').with_name('hambaby') }
+ end
+ end
+
+ describe 'ntp::service' do
+ let(:params) {{
+ :service_manage => true,
+ :service_enable => true,
+ :service_ensure => 'running',
+ :service_name => 'ntp'
+ }}
+
+ describe 'with defaults' do
+ it { should contain_service('ntp').with(
+ :enable => true,
+ :ensure => 'running',
+ :name => 'ntp'
+ )}
+ end
+
+ describe 'service_ensure' do
+ describe 'when overridden' do
+ let(:params) {{ :service_name => 'ntp', :service_ensure => 'stopped' }}
+ it { should contain_service('ntp').with_ensure('stopped') }
+ end
+ end
+
+ describe 'service_manage' do
+ let(:params) {{
+ :service_manage => false,
+ :service_enable => true,
+ :service_ensure => 'running',
+ :service_name => 'ntpd',
+ }}
+
+ it 'when set to false' do
+ should_not contain_service('ntp').with({
+ 'enable' => true,
+ 'ensure' => 'running',
+ 'name' => 'ntpd'
+ })
+ end
+ end
+ end
+ end
+
+ context 'ntp::config' do
+ describe "for operating system Gentoo" do
+ let(:facts) {{ :operatingsystem => 'Gentoo',
+ :osfamily => 'Linux' }}
+
+ it 'uses the NTP pool servers by default' do
+ should contain_file('/etc/ntp.conf').with({
+ 'content' => /server \d.gentoo.pool.ntp.org/,
+ })
+ end
+ end
+ describe "on osfamily Debian" do
+ let(:facts) {{ :osfamily => 'debian' }}
+
+ it 'uses the debian ntp servers by default' do
+ should contain_file('/etc/ntp.conf').with({
+ 'content' => /server \d.debian.pool.ntp.org iburst/,
+ })
+ end
+ end
+
+ describe "on osfamily RedHat" do
+ let(:facts) {{ :osfamily => 'RedHat' }}
+
+ it 'uses the redhat ntp servers by default' do
+ should contain_file('/etc/ntp.conf').with({
+ 'content' => /server \d.centos.pool.ntp.org/,
+ })
+ end
+ end
+
+ describe "on osfamily SuSE" do
+ let(:facts) {{ :osfamily => 'SuSE' }}
+
+ it 'uses the opensuse ntp servers by default' do
+ should contain_file('/etc/ntp.conf').with({
+ 'content' => /server \d.opensuse.pool.ntp.org/,
+ })
+ end
+ end
+
+ describe "on osfamily FreeBSD" do
+ let(:facts) {{ :osfamily => 'FreeBSD' }}
+
+ it 'uses the freebsd ntp servers by default' do
+ should contain_file('/etc/ntp.conf').with({
+ 'content' => /server \d.freebsd.pool.ntp.org iburst maxpoll 9/,
+ })
+ end
+ end
+
+ describe "on osfamily ArchLinux" do
+ let(:facts) {{ :osfamily => 'ArchLinux' }}
+
+ it 'uses the NTP pool servers by default' do
+ should contain_file('/etc/ntp.conf').with({
+ 'content' => /server \d.pool.ntp.org/,
+ })
+ end
+ end
+
+ describe "for operating system family unsupported" do
+ let(:facts) {{
+ :osfamily => 'unsupported',
+ }}
+
+ it { expect{ subject }.to raise_error(
+ /^The ntp module is not supported on an unsupported based system./
+ )}
+ end
+ end
+
+ describe 'for virtual machines' do
+ let(:facts) {{ :osfamily => 'Archlinux',
+ :is_virtual => 'true' }}
+
+ it 'should not use local clock as a time source' do
+ should_not contain_file('/etc/ntp.conf').with({
+ 'content' => /server.*127.127.1.0.*fudge.*127.127.1.0 stratum 10/,
+ })
+ end
+
+ it 'allows large clock skews' do
+ should contain_file('/etc/ntp.conf').with({
+ 'content' => /tinker panic 0/,
+ })
+ end
+ end
+
+ describe 'for physical machines' do
+ let(:facts) {{ :osfamily => 'Archlinux',
+ :is_virtual => 'false' }}
+
+ it 'disallows large clock skews' do
+ should_not contain_file('/etc/ntp.conf').with({
+ 'content' => /tinker panic 0/,
+ })
+ end
+ end
+ end
+
+end
diff --git a/puppet/modules/ntp/spec/fixtures/modules/my_ntp/templates/ntp.conf.erb b/puppet/modules/ntp/spec/fixtures/modules/my_ntp/templates/ntp.conf.erb
new file mode 100644
index 00000000..40cf67c6
--- /dev/null
+++ b/puppet/modules/ntp/spec/fixtures/modules/my_ntp/templates/ntp.conf.erb
@@ -0,0 +1,4 @@
+#my uber ntp config
+#
+
+server foobar
diff --git a/puppet/modules/ntp/spec/spec.opts b/puppet/modules/ntp/spec/spec.opts
new file mode 100644
index 00000000..91cd6427
--- /dev/null
+++ b/puppet/modules/ntp/spec/spec.opts
@@ -0,0 +1,6 @@
+--format
+s
+--colour
+--loadby
+mtime
+--backtrace
diff --git a/puppet/modules/ntp/spec/spec_helper.rb b/puppet/modules/ntp/spec/spec_helper.rb
new file mode 100644
index 00000000..2c6f5664
--- /dev/null
+++ b/puppet/modules/ntp/spec/spec_helper.rb
@@ -0,0 +1 @@
+require 'puppetlabs_spec_helper/module_spec_helper'
diff --git a/puppet/modules/ntp/spec/spec_helper_system.rb b/puppet/modules/ntp/spec/spec_helper_system.rb
new file mode 100644
index 00000000..d5208463
--- /dev/null
+++ b/puppet/modules/ntp/spec/spec_helper_system.rb
@@ -0,0 +1,26 @@
+require 'rspec-system/spec_helper'
+require 'rspec-system-puppet/helpers'
+require 'rspec-system-serverspec/helpers'
+include Serverspec::Helper::RSpecSystem
+include Serverspec::Helper::DetectOS
+include RSpecSystemPuppet::Helpers
+
+RSpec.configure do |c|
+ # Project root
+ proj_root = File.expand_path(File.join(File.dirname(__FILE__), '..'))
+
+ # Enable colour
+ c.tty = true
+
+ c.include RSpecSystemPuppet::Helpers
+
+ # This is where we 'setup' the nodes before running our tests
+ c.before :suite do
+ # Install puppet
+ puppet_install
+
+ # Install modules and dependencies
+ puppet_module_install(:source => proj_root, :module_name => 'ntp')
+ shell('puppet module install puppetlabs-stdlib')
+ end
+end
diff --git a/puppet/modules/ntp/spec/system/basic_spec.rb b/puppet/modules/ntp/spec/system/basic_spec.rb
new file mode 100644
index 00000000..7b717a04
--- /dev/null
+++ b/puppet/modules/ntp/spec/system/basic_spec.rb
@@ -0,0 +1,13 @@
+require 'spec_helper_system'
+
+# Here we put the more basic fundamental tests, ultra obvious stuff.
+describe "basic tests:" do
+ context 'make sure we have copied the module across' do
+ # No point diagnosing any more if the module wasn't copied properly
+ context shell 'ls /etc/puppet/modules/ntp' do
+ its(:stdout) { should =~ /Modulefile/ }
+ its(:stderr) { should be_empty }
+ its(:exit_code) { should be_zero }
+ end
+ end
+end
diff --git a/puppet/modules/ntp/spec/system/class_spec.rb b/puppet/modules/ntp/spec/system/class_spec.rb
new file mode 100644
index 00000000..49dfc641
--- /dev/null
+++ b/puppet/modules/ntp/spec/system/class_spec.rb
@@ -0,0 +1,39 @@
+require 'spec_helper_system'
+
+describe "ntp class:" do
+ context 'should run successfully' do
+ pp = "class { 'ntp': }"
+
+ context puppet_apply(pp) do
+ its(:stderr) { should be_empty }
+ its(:exit_code) { should_not == 1 }
+ its(:refresh) { should be_nil }
+ its(:stderr) { should be_empty }
+ its(:exit_code) { should be_zero }
+ end
+ end
+
+ context 'service_ensure => stopped:' do
+ pp = "class { 'ntp': service_ensure => stopped }"
+
+ context puppet_apply(pp) do
+ its(:stderr) { should be_empty }
+ its(:exit_code) { should_not == 1 }
+ its(:refresh) { should be_nil }
+ its(:stderr) { should be_empty }
+ its(:exit_code) { should be_zero }
+ end
+ end
+
+ context 'service_ensure => running:' do
+ pp = "class { 'ntp': service_ensure => running }"
+
+ context puppet_apply(pp) do |r|
+ its(:stderr) { should be_empty }
+ its(:exit_code) { should_not == 1 }
+ its(:refresh) { should be_nil }
+ its(:stderr) { should be_empty }
+ its(:exit_code) { should be_zero }
+ end
+ end
+end
diff --git a/puppet/modules/ntp/spec/system/ntp_config_spec.rb b/puppet/modules/ntp/spec/system/ntp_config_spec.rb
new file mode 100644
index 00000000..194cdf10
--- /dev/null
+++ b/puppet/modules/ntp/spec/system/ntp_config_spec.rb
@@ -0,0 +1,35 @@
+require 'spec_helper_system'
+
+describe 'ntp::config class' do
+ let(:os) {
+ node.facts['osfamily']
+ }
+
+ puppet_apply(%{
+ class { 'ntp': }
+ })
+
+ case node.facts['osfamily']
+ when 'FreeBSD'
+ line = '0.freebsd.pool.ntp.org iburst maxpoll 9'
+ when 'Debian'
+ line = '0.debian.pool.ntp.org iburst'
+ when 'RedHat'
+ line = '0.centos.pool.ntp.org'
+ when 'SuSE'
+ line = '0.opensuse.pool.ntp.org'
+ when 'Linux'
+ case node.facts['operatingsystem']
+ when 'ArchLinux'
+ line = '0.pool.ntp.org'
+ when 'Gentoo'
+ line = '0.gentoo.pool.ntp.org'
+ end
+ end
+
+ describe file('/etc/ntp.conf') do
+ it { should be_file }
+ it { should contain line }
+ end
+
+end
diff --git a/puppet/modules/ntp/spec/system/ntp_install_spec.rb b/puppet/modules/ntp/spec/system/ntp_install_spec.rb
new file mode 100644
index 00000000..39759c5e
--- /dev/null
+++ b/puppet/modules/ntp/spec/system/ntp_install_spec.rb
@@ -0,0 +1,31 @@
+require 'spec_helper_system'
+
+
+describe 'ntp::install class' do
+ let(:os) {
+ node.facts['osfamily']
+ }
+
+ case node.facts['osfamily']
+ when 'FreeBSD'
+ packagename = 'net/ntp'
+ when 'Linux'
+ case node.facts['operatingsystem']
+ when 'ArchLinux'
+ packagename = 'ntp'
+ when 'Gentoo'
+ packagename = 'net-misc/ntp'
+ end
+ else
+ packagename = 'ntp'
+ end
+
+ puppet_apply(%{
+ class { 'ntp': }
+ })
+
+ describe package(packagename) do
+ it { should be_installed }
+ end
+
+end
diff --git a/puppet/modules/ntp/spec/system/ntp_service_spec.rb b/puppet/modules/ntp/spec/system/ntp_service_spec.rb
new file mode 100644
index 00000000..b97e2a4e
--- /dev/null
+++ b/puppet/modules/ntp/spec/system/ntp_service_spec.rb
@@ -0,0 +1,25 @@
+require 'spec_helper_system'
+
+
+describe 'ntp::service class' do
+ let(:os) {
+ node.facts['osfamily']
+ }
+
+ case node.facts['osfamily']
+ when 'RedHat', 'FreeBSD', 'Linux'
+ servicename = 'ntpd'
+ else
+ servicename = 'ntp'
+ end
+
+ puppet_apply(%{
+ class { 'ntp': }
+ })
+
+ describe service(servicename) do
+ it { should be_enabled }
+ it { should be_running }
+ end
+
+end
diff --git a/puppet/modules/ntp/spec/system/preferred_servers_spec.rb b/puppet/modules/ntp/spec/system/preferred_servers_spec.rb
new file mode 100644
index 00000000..686861bc
--- /dev/null
+++ b/puppet/modules/ntp/spec/system/preferred_servers_spec.rb
@@ -0,0 +1,20 @@
+require 'spec_helper_system'
+
+describe 'preferred servers' do
+ it 'applies cleanly' do
+ puppet_apply(%{
+ class { '::ntp':
+ servers => ['a', 'b', 'c', 'd'],
+ preferred_servers => ['c', 'd'],
+ }
+ })
+ end
+
+ describe file('/etc/ntp.conf') do
+ it { should be_file }
+ it { should contain 'server a' }
+ it { should contain 'server b' }
+ it { should contain 'server c prefer' }
+ it { should contain 'server d prefer' }
+ end
+end
diff --git a/puppet/modules/ntp/spec/system/restrict_spec.rb b/puppet/modules/ntp/spec/system/restrict_spec.rb
new file mode 100644
index 00000000..ae23bc01
--- /dev/null
+++ b/puppet/modules/ntp/spec/system/restrict_spec.rb
@@ -0,0 +1,20 @@
+require 'spec_helper_system'
+
+describe "ntp class with restrict:" do
+ context 'should run successfully' do
+ pp = "class { 'ntp': restrict => ['test restrict']}"
+
+ context puppet_apply(pp) do
+ its(:stderr) { should be_empty }
+ its(:exit_code) { should_not == 1 }
+ its(:refresh) { should be_nil }
+ its(:stderr) { should be_empty }
+ its(:exit_code) { should be_zero }
+ end
+ end
+
+ describe file('/etc/ntp.conf') do
+ it { should contain('test restrict') }
+ end
+
+end
diff --git a/puppet/modules/ntp/spec/unit/puppet/provider/README.markdown b/puppet/modules/ntp/spec/unit/puppet/provider/README.markdown
new file mode 100644
index 00000000..70258502
--- /dev/null
+++ b/puppet/modules/ntp/spec/unit/puppet/provider/README.markdown
@@ -0,0 +1,4 @@
+Provider Specs
+==============
+
+Define specs for your providers under this directory.
diff --git a/puppet/modules/ntp/spec/unit/puppet/type/README.markdown b/puppet/modules/ntp/spec/unit/puppet/type/README.markdown
new file mode 100644
index 00000000..1ee19ac8
--- /dev/null
+++ b/puppet/modules/ntp/spec/unit/puppet/type/README.markdown
@@ -0,0 +1,4 @@
+Resource Type Specs
+===================
+
+Define specs for your resource types in this directory.
diff --git a/puppet/modules/ntp/templates/ntp.conf.erb b/puppet/modules/ntp/templates/ntp.conf.erb
new file mode 100644
index 00000000..94b36755
--- /dev/null
+++ b/puppet/modules/ntp/templates/ntp.conf.erb
@@ -0,0 +1,43 @@
+# ntp.conf: Managed by puppet.
+#
+<% if @panic == false -%>
+# Keep ntpd from panicking in the event of a large clock skew
+# when a VM guest is suspended and resumed.
+tinker panic 0
+<% end -%>
+
+<% if @restrict != [] -%>
+# Permit time synchronization with our time source, but do not'
+# permit the source to query or modify the service on this system.'
+<% @restrict.flatten.each do |restrict| -%>
+<%= restrict %>
+<% end %>
+<% end -%>
+
+# Servers
+<% [@servers].flatten.each do |server| -%>
+server <%= server %><% if @preferred_servers.include?(server) -%> prefer<% end %>
+<% end -%>
+
+<% if scope.lookupvar('::is_virtual') == "false" -%>
+# Undisciplined Local Clock. This is a fake driver intended for backup
+# and when no outside source of synchronized time is available.
+server 127.127.1.0 # local clock
+fudge 127.127.1.0 stratum 10
+<% end -%>
+
+# Driftfile.
+driftfile <%= @driftfile %>
+
+<% if @keys_enable -%>
+keys <%= @keys_file %>
+<% unless @keys_trusted.empty? -%>
+trustedkey <%= @keys_trusted.join(' ') %>
+<% end -%>
+<% if @keys_requestkey != '' -%>
+requestkey <%= @keys_requestkey %>
+<% end -%>
+<% if @keys_controlkey != '' -%>
+controlkey <%= @keys_controlkey %>
+<% end -%>
+<% end -%>
diff --git a/puppet/modules/ntp/tests/init.pp b/puppet/modules/ntp/tests/init.pp
new file mode 100644
index 00000000..e6d9b537
--- /dev/null
+++ b/puppet/modules/ntp/tests/init.pp
@@ -0,0 +1,11 @@
+node default {
+
+ notify { 'enduser-before': }
+ notify { 'enduser-after': }
+
+ class { 'ntp':
+ require => Notify['enduser-before'],
+ before => Notify['enduser-after'],
+ }
+
+}
diff --git a/puppet/modules/obfsproxy/files/obfsproxy_init b/puppet/modules/obfsproxy/files/obfsproxy_init
new file mode 100755
index 00000000..01c8013a
--- /dev/null
+++ b/puppet/modules/obfsproxy/files/obfsproxy_init
@@ -0,0 +1,93 @@
+#!/bin/sh
+
+### BEGIN INIT INFO
+# Provides: obfsproxy daemon
+# Required-Start: $remote_fs $syslog
+# Required-Stop: $remote_fs $syslog
+# Default-Start: 2 3 4 5
+# Default-Stop: 0 1 6
+# Short-Description: obfsproxy daemon
+# Description: obfsproxy daemon
+### END INIT INFO
+
+. /lib/lsb/init-functions
+
+DAEMON=/usr/bin/obfsproxy
+NAME=obfsproxy
+DESC="obfsproxy daemon"
+USER=obfsproxy
+DATDIR=/etc/obfsproxy
+PIDFILE=/var/run/obfsproxy.pid
+CONF=$DATDIR/obfsproxy.conf
+LOGFILE=/var/log/obfsproxy.log
+
+# If the daemon is not there, then exit.
+test -x $DAEMON || exit 0
+
+if [ -f $CONF ] ; then
+ . $CONF
+else
+ echo "Obfsproxy configuration file is missing, aborting..."
+ exit 2
+fi
+
+DAEMONARGS=" --log-min-severity=$LOG --log-file=$LOGFILE --data-dir=$DATDIR \
+ $TRANSPORT $PARAM --dest=$DEST_IP:$DEST_PORT server $BINDADDR:$PORT"
+
+start_obfsproxy() {
+ start-stop-daemon --start --quiet --oknodo -m --pidfile $PIDFILE \
+ -b -c $USER --startas $DAEMON --$DAEMONARGS
+}
+
+stop_obfsproxy() {
+ start-stop-daemon --stop --quiet --oknodo --pidfile $PIDFILE
+}
+
+status_obfsproxy() {
+ status_of_proc -p $PIDFILE $DAEMON $NAME
+}
+
+case $1 in
+ start)
+ if [ -e $PIDFILE ]; then
+ status_obfsproxy
+ if [ $? = "0" ]; then
+ exit
+ fi
+ fi
+ log_begin_msg "Starting $DESC"
+ start_obfsproxy
+ log_end_msg $?
+ ;;
+ stop)
+ if [ -e $PIDFILE ]; then
+ status_obfsproxy
+ if [ $? = "0" ]; then
+ log_begin_msg "Stopping $DESC"
+ stop_obfsproxy
+ rm -f $PIDFILE
+ log_end_msg $?
+ fi
+ else
+ status_obfsproxy
+ fi
+ ;;
+ restart)
+ $0 stop && sleep 2 && $0 start
+ ;;
+ status)
+ status_obfsproxy
+ ;;
+ reload)
+ if [ -e $PIDFILE ]; then
+ start-stop-daemon --stop --signal USR1 --quiet --pidfile $PIDFILE --name $NAME
+ log_success_msg "$DESC reloaded successfully"
+ else
+ log_failure_msg "$PIDFILE does not exist"
+ fi
+ ;;
+ *)
+ echo "Usage: $0 {start|stop|restart|reload|status}"
+ exit 2
+ ;;
+esac
diff --git a/puppet/modules/obfsproxy/files/obfsproxy_logrotate b/puppet/modules/obfsproxy/files/obfsproxy_logrotate
new file mode 100644
index 00000000..e5679d0c
--- /dev/null
+++ b/puppet/modules/obfsproxy/files/obfsproxy_logrotate
@@ -0,0 +1,14 @@
+/var/log/obfsproxy.log {
+ daily
+ missingok
+ rotate 3
+ compress
+ delaycompress
+ notifempty
+ create 600 obfsproxy obfsproxy
+ postrotate
+ if [ -f /var/run/obfsproxy.pid ]; then
+ /etc/init.d/obfsproxy restart > /dev/null
+ fi
+ endscript
+}
diff --git a/puppet/modules/obfsproxy/manifests/init.pp b/puppet/modules/obfsproxy/manifests/init.pp
new file mode 100644
index 00000000..6a3d2c72
--- /dev/null
+++ b/puppet/modules/obfsproxy/manifests/init.pp
@@ -0,0 +1,86 @@
+# deploy obfsproxy service
+class obfsproxy (
+ $transport,
+ $bind_address,
+ $port,
+ $param,
+ $dest_ip,
+ $dest_port,
+ $log_level = 'info'
+){
+
+ $user = 'obfsproxy'
+ $conf = '/etc/obfsproxy/obfsproxy.conf'
+
+ user { $user:
+ ensure => present,
+ system => true,
+ gid => $user,
+ }
+
+ group { $user:
+ ensure => present,
+ system => true,
+ }
+
+ file { '/etc/init.d/obfsproxy':
+ ensure => present,
+ path => '/etc/init.d/obfsproxy',
+ source => 'puppet:///modules/obfsproxy/obfsproxy_init',
+ owner => 'root',
+ group => 'root',
+ mode => '0750',
+ require => File[$conf],
+ }
+
+ file { $conf :
+ ensure => present,
+ path => $conf,
+ owner => 'root',
+ group => 'root',
+ mode => '0600',
+ content => template('obfsproxy/etc_conf.erb'),
+ }
+
+ file { '/etc/obfsproxy':
+ ensure => directory,
+ owner => $user,
+ group => $user,
+ mode => '0700',
+ require => User[$user],
+ }
+
+ file { '/var/log/obfsproxy.log':
+ ensure => present,
+ owner => $user,
+ group => $user,
+ mode => '0640',
+ require => User[$user],
+ }
+
+ file { '/etc/logrotate.d/obfsproxy':
+ ensure => present,
+ source => 'puppet:///modules/obfsproxy/obfsproxy_logrotate',
+ owner => 'root',
+ group => 'root',
+ mode => '0644',
+ require => File['/var/log/obfsproxy.log'],
+ }
+
+ package { 'obfsproxy':
+ ensure => present
+ }
+
+ service { 'obfsproxy':
+ ensure => running,
+ subscribe => File[$conf],
+ require => [
+ Package['obfsproxy'],
+ File['/etc/init.d/obfsproxy'],
+ User[$user],
+ Group[$user]]
+ }
+
+
+}
+
diff --git a/puppet/modules/obfsproxy/templates/etc_conf.erb b/puppet/modules/obfsproxy/templates/etc_conf.erb
new file mode 100644
index 00000000..8959ef78
--- /dev/null
+++ b/puppet/modules/obfsproxy/templates/etc_conf.erb
@@ -0,0 +1,11 @@
+TRANSPORT=<%= @transport %>
+PORT=<%= @port %>
+DEST_IP=<%= @dest_ip %>
+DEST_PORT=<%= @dest_port %>
+<% if @transport == "scramblesuit" -%>
+PARAM=--password=<%= @param %>
+<% else -%>
+PARAM=<%= @param %>
+<% end -%>
+LOG=<%= @log_level %>
+BINDADDR=<%= @bind_address %>
diff --git a/puppet/modules/opendkim/manifests/init.pp b/puppet/modules/opendkim/manifests/init.pp
new file mode 100644
index 00000000..4d4c5312
--- /dev/null
+++ b/puppet/modules/opendkim/manifests/init.pp
@@ -0,0 +1,67 @@
+#
+# I am not sure about what issues might arise with DKIM key sizes
+# larger than 2048. It might or might not be supported. See:
+# http://dkim.org/specs/rfc4871-dkimbase.html#rfc.section.3.3.3
+#
+class opendkim {
+
+ $domain_hash = hiera('domain')
+ $domain = $domain_hash['full_suffix']
+ $mx = hiera('mx')
+ $dkim = $mx['dkim']
+ $selector = $dkim['selector']
+ $dkim_cert = $dkim['public_key']
+ $dkim_key = $dkim['private_key']
+
+ ensure_packages(['opendkim', 'libvbr2'])
+
+ # postfix user needs to be in the opendkim group
+ # in order to access the opendkim socket located at:
+ # local:/var/run/opendkim/opendkim.sock
+ user { 'postfix':
+ groups => 'opendkim',
+ require => Package['opendkim'];
+ }
+
+ service { 'opendkim':
+ ensure => running,
+ enable => true,
+ hasstatus => true,
+ hasrestart => true,
+ subscribe => File[$dkim_key];
+ }
+
+ file {
+ '/etc/opendkim.conf':
+ ensure => file,
+ content => template('opendkim/opendkim.conf'),
+ mode => '0644',
+ owner => root,
+ group => root,
+ notify => Service['opendkim'],
+ require => Package['opendkim'];
+
+ '/etc/default/opendkim.conf':
+ ensure => file,
+ content => 'SOCKET="inet:8891@localhost" # listen on loopback on port 8891',
+ mode => '0644',
+ owner => root,
+ group => root,
+ notify => Service['opendkim'],
+ require => Package['opendkim'];
+
+ $dkim_key:
+ ensure => file,
+ mode => '0600',
+ owner => 'opendkim',
+ group => 'opendkim',
+ require => Package['opendkim'];
+
+ $dkim_cert:
+ ensure => file,
+ mode => '0600',
+ owner => 'opendkim',
+ group => 'opendkim',
+ require => Package['opendkim'];
+ }
+}
diff --git a/puppet/modules/opendkim/templates/opendkim.conf b/puppet/modules/opendkim/templates/opendkim.conf
new file mode 100644
index 00000000..5a948229
--- /dev/null
+++ b/puppet/modules/opendkim/templates/opendkim.conf
@@ -0,0 +1,45 @@
+# This is a basic configuration that can easily be adapted to suit a standard
+# installation. For more advanced options, see opendkim.conf(5) and/or
+# /usr/share/doc/opendkim/examples/opendkim.conf.sample.
+
+# Log to syslog
+Syslog yes
+SyslogSuccess yes
+LogWhy no
+# Required to use local socket with MTAs that access the socket as a non-
+# privileged user (e.g. Postfix)
+UMask 002
+
+Domain <%= @domain %>
+SubDomains yes
+
+# set internal hosts to all the known hosts, like mydomains?
+
+# can we generate a larger key and get it in dns?
+KeyFile <%= @dkim_key %>
+
+Selector <%= @selector %>
+
+# Commonly-used options; the commented-out versions show the defaults.
+Canonicalization relaxed
+#Mode sv
+#ADSPDiscard no
+
+SignatureAlgorithm rsa-sha256
+
+# Always oversign From (sign using actual From and a null From to prevent
+# malicious signatures header fields (From and/or others) between the signer
+# and the verifier. From is oversigned by default in the Debian pacakge
+# because it is often the identity key used by reputation systems and thus
+# somewhat security sensitive.
+OversignHeaders From
+
+# List domains to use for RFC 6541 DKIM Authorized Third-Party Signatures
+# (ATPS) (experimental)
+
+#ATPSDomains example.com
+
+RemoveOldSignatures yes
+
+Mode sv
+BaseDirectory /var/tmp
diff --git a/puppet/modules/openvpn/.fixtures.yml b/puppet/modules/openvpn/.fixtures.yml
new file mode 100644
index 00000000..1125ecca
--- /dev/null
+++ b/puppet/modules/openvpn/.fixtures.yml
@@ -0,0 +1,6 @@
+fixtures:
+ repositories:
+ concat: git://github.com/ripienaar/puppet-concat.git
+ symlinks:
+ openvpn: "#{source_dir}"
+
diff --git a/puppet/modules/openvpn/.gitignore b/puppet/modules/openvpn/.gitignore
new file mode 100644
index 00000000..6fd248b3
--- /dev/null
+++ b/puppet/modules/openvpn/.gitignore
@@ -0,0 +1,3 @@
+pkg
+spec/fixtures
+.vagrant
diff --git a/puppet/modules/openvpn/.rvmrc b/puppet/modules/openvpn/.rvmrc
new file mode 100644
index 00000000..6fbfb7f1
--- /dev/null
+++ b/puppet/modules/openvpn/.rvmrc
@@ -0,0 +1,38 @@
+#!/usr/bin/env bash
+
+# This is an RVM Project .rvmrc file, used to automatically load the ruby
+# development environment upon cd'ing into the directory
+
+# First we specify our desired <ruby>[@<gemset>], the @gemset name is optional,
+# Only full ruby name is supported here, for short names use:
+# echo "rvm use 1.9.3" > .rvmrc
+environment_id="ruby-1.9.3-p194@puppet"
+
+# Uncomment the following lines if you want to verify rvm version per project
+# rvmrc_rvm_version="1.15.8 (stable)" # 1.10.1 seams as a safe start
+# eval "$(echo ${rvm_version}.${rvmrc_rvm_version} | awk -F. '{print "[[ "$1*65536+$2*256+$3" -ge "$4*65536+$5*256+$6" ]]"}' )" || {
+# echo "This .rvmrc file requires at least RVM ${rvmrc_rvm_version}, aborting loading."
+# return 1
+# }
+
+# First we attempt to load the desired environment directly from the environment
+# file. This is very fast and efficient compared to running through the entire
+# CLI and selector. If you want feedback on which environment was used then
+# insert the word 'use' after --create as this triggers verbose mode.
+if [[ -d "${rvm_path:-$HOME/.rvm}/environments"
+ && -s "${rvm_path:-$HOME/.rvm}/environments/$environment_id" ]]
+then
+ \. "${rvm_path:-$HOME/.rvm}/environments/$environment_id"
+ [[ -s "${rvm_path:-$HOME/.rvm}/hooks/after_use" ]] &&
+ \. "${rvm_path:-$HOME/.rvm}/hooks/after_use" || true
+ if [[ $- == *i* ]] # check for interactive shells
+ then echo "Using: $(tput setaf 2)$GEM_HOME$(tput sgr0)" # show the user the ruby and gemset they are using in green
+ else echo "Using: $GEM_HOME" # don't use colors in non-interactive shells
+ fi
+else
+ # If the environment file has not yet been created, use the RVM CLI to select.
+ rvm --create use "$environment_id" || {
+ echo "Failed to create RVM environment '${environment_id}'."
+ return 1
+ }
+fi
diff --git a/puppet/modules/openvpn/.travis.yml b/puppet/modules/openvpn/.travis.yml
new file mode 100644
index 00000000..da5c389d
--- /dev/null
+++ b/puppet/modules/openvpn/.travis.yml
@@ -0,0 +1,29 @@
+language: ruby
+bundler_args: --without development
+script: "bundle exec rake spec SPEC_OPTS='--format documentation'"
+rvm:
+ - 1.8.7
+ - 1.9.3
+ - 2.0.0
+script:
+ - "rake lint"
+ - "rake spec SPEC_OPTS='--format documentation'"
+env:
+ - PUPPET_VERSION="~> 2.7.0"
+ - PUPPET_VERSION="~> 3.0.0"
+ - PUPPET_VERSION="~> 3.1.0"
+ - PUPPET_VERSION="~> 3.2.0"
+matrix:
+ exclude:
+ - rvm: 1.9.3
+ env: PUPPET_VERSION="~> 2.7.0"
+ - rvm: 2.0.0
+ env: PUPPET_VERSION="~> 2.7.0"
+ - rvm: 2.0.0
+ env: PUPPET_VERSION="~> 3.0.0"
+ - rvm: 2.0.0
+ env: PUPPET_VERSION="~> 3.1.0"
+notifications:
+ email: false
+ on_success: always
+ on_failure: always
diff --git a/puppet/modules/openvpn/Gemfile b/puppet/modules/openvpn/Gemfile
new file mode 100644
index 00000000..68e10e7d
--- /dev/null
+++ b/puppet/modules/openvpn/Gemfile
@@ -0,0 +1,7 @@
+source :rubygems
+
+puppetversion = ENV['PUPPET_VERSION']
+gem 'puppet', puppetversion, :require => false
+gem 'puppet-lint'
+gem 'rspec-puppet'
+gem 'puppetlabs_spec_helper'
diff --git a/puppet/modules/openvpn/Gemfile.lock b/puppet/modules/openvpn/Gemfile.lock
new file mode 100644
index 00000000..9fce3f98
--- /dev/null
+++ b/puppet/modules/openvpn/Gemfile.lock
@@ -0,0 +1,36 @@
+GEM
+ remote: http://rubygems.org/
+ specs:
+ diff-lcs (1.1.3)
+ facter (1.6.17)
+ hiera (1.0.0)
+ metaclass (0.0.1)
+ mocha (0.13.1)
+ metaclass (~> 0.0.1)
+ puppet (3.0.2)
+ facter (~> 1.6.11)
+ hiera (~> 1.0.0)
+ puppetlabs_spec_helper (0.4.0)
+ mocha (>= 0.10.5)
+ rake
+ rspec (>= 2.9.0)
+ rspec-puppet (>= 0.1.1)
+ rake (10.0.3)
+ rspec (2.12.0)
+ rspec-core (~> 2.12.0)
+ rspec-expectations (~> 2.12.0)
+ rspec-mocks (~> 2.12.0)
+ rspec-core (2.12.2)
+ rspec-expectations (2.12.1)
+ diff-lcs (~> 1.1.3)
+ rspec-mocks (2.12.1)
+ rspec-puppet (0.1.5)
+ rspec
+
+PLATFORMS
+ ruby
+
+DEPENDENCIES
+ puppet
+ puppetlabs_spec_helper
+ rspec-puppet
diff --git a/puppet/modules/openvpn/LICENSE b/puppet/modules/openvpn/LICENSE
new file mode 100644
index 00000000..f433b1a5
--- /dev/null
+++ b/puppet/modules/openvpn/LICENSE
@@ -0,0 +1,177 @@
+
+ Apache License
+ Version 2.0, January 2004
+ http://www.apache.org/licenses/
+
+ TERMS AND CONDITIONS FOR USE, REPRODUCTION, AND DISTRIBUTION
+
+ 1. Definitions.
+
+ "License" shall mean the terms and conditions for use, reproduction,
+ and distribution as defined by Sections 1 through 9 of this document.
+
+ "Licensor" shall mean the copyright owner or entity authorized by
+ the copyright owner that is granting the License.
+
+ "Legal Entity" shall mean the union of the acting entity and all
+ other entities that control, are controlled by, or are under common
+ control with that entity. For the purposes of this definition,
+ "control" means (i) the power, direct or indirect, to cause the
+ direction or management of such entity, whether by contract or
+ otherwise, or (ii) ownership of fifty percent (50%) or more of the
+ outstanding shares, or (iii) beneficial ownership of such entity.
+
+ "You" (or "Your") shall mean an individual or Legal Entity
+ exercising permissions granted by this License.
+
+ "Source" form shall mean the preferred form for making modifications,
+ including but not limited to software source code, documentation
+ source, and configuration files.
+
+ "Object" form shall mean any form resulting from mechanical
+ transformation or translation of a Source form, including but
+ not limited to compiled object code, generated documentation,
+ and conversions to other media types.
+
+ "Work" shall mean the work of authorship, whether in Source or
+ Object form, made available under the License, as indicated by a
+ copyright notice that is included in or attached to the work
+ (an example is provided in the Appendix below).
+
+ "Derivative Works" shall mean any work, whether in Source or Object
+ form, that is based on (or derived from) the Work and for which the
+ editorial revisions, annotations, elaborations, or other modifications
+ represent, as a whole, an original work of authorship. For the purposes
+ of this License, Derivative Works shall not include works that remain
+ separable from, or merely link (or bind by name) to the interfaces of,
+ the Work and Derivative Works thereof.
+
+ "Contribution" shall mean any work of authorship, including
+ the original version of the Work and any modifications or additions
+ to that Work or Derivative Works thereof, that is intentionally
+ submitted to Licensor for inclusion in the Work by the copyright owner
+ or by an individual or Legal Entity authorized to submit on behalf of
+ the copyright owner. For the purposes of this definition, "submitted"
+ means any form of electronic, verbal, or written communication sent
+ to the Licensor or its representatives, including but not limited to
+ communication on electronic mailing lists, source code control systems,
+ and issue tracking systems that are managed by, or on behalf of, the
+ Licensor for the purpose of discussing and improving the Work, but
+ excluding communication that is conspicuously marked or otherwise
+ designated in writing by the copyright owner as "Not a Contribution."
+
+ "Contributor" shall mean Licensor and any individual or Legal Entity
+ on behalf of whom a Contribution has been received by Licensor and
+ subsequently incorporated within the Work.
+
+ 2. Grant of Copyright License. Subject to the terms and conditions of
+ this License, each Contributor hereby grants to You a perpetual,
+ worldwide, non-exclusive, no-charge, royalty-free, irrevocable
+ copyright license to reproduce, prepare Derivative Works of,
+ publicly display, publicly perform, sublicense, and distribute the
+ Work and such Derivative Works in Source or Object form.
+
+ 3. Grant of Patent License. Subject to the terms and conditions of
+ this License, each Contributor hereby grants to You a perpetual,
+ worldwide, non-exclusive, no-charge, royalty-free, irrevocable
+ (except as stated in this section) patent license to make, have made,
+ use, offer to sell, sell, import, and otherwise transfer the Work,
+ where such license applies only to those patent claims licensable
+ by such Contributor that are necessarily infringed by their
+ Contribution(s) alone or by combination of their Contribution(s)
+ with the Work to which such Contribution(s) was submitted. If You
+ institute patent litigation against any entity (including a
+ cross-claim or counterclaim in a lawsuit) alleging that the Work
+ or a Contribution incorporated within the Work constitutes direct
+ or contributory patent infringement, then any patent licenses
+ granted to You under this License for that Work shall terminate
+ as of the date such litigation is filed.
+
+ 4. Redistribution. You may reproduce and distribute copies of the
+ Work or Derivative Works thereof in any medium, with or without
+ modifications, and in Source or Object form, provided that You
+ meet the following conditions:
+
+ (a) You must give any other recipients of the Work or
+ Derivative Works a copy of this License; and
+
+ (b) You must cause any modified files to carry prominent notices
+ stating that You changed the files; and
+
+ (c) You must retain, in the Source form of any Derivative Works
+ that You distribute, all copyright, patent, trademark, and
+ attribution notices from the Source form of the Work,
+ excluding those notices that do not pertain to any part of
+ the Derivative Works; and
+
+ (d) If the Work includes a "NOTICE" text file as part of its
+ distribution, then any Derivative Works that You distribute must
+ include a readable copy of the attribution notices contained
+ within such NOTICE file, excluding those notices that do not
+ pertain to any part of the Derivative Works, in at least one
+ of the following places: within a NOTICE text file distributed
+ as part of the Derivative Works; within the Source form or
+ documentation, if provided along with the Derivative Works; or,
+ within a display generated by the Derivative Works, if and
+ wherever such third-party notices normally appear. The contents
+ of the NOTICE file are for informational purposes only and
+ do not modify the License. You may add Your own attribution
+ notices within Derivative Works that You distribute, alongside
+ or as an addendum to the NOTICE text from the Work, provided
+ that such additional attribution notices cannot be construed
+ as modifying the License.
+
+ You may add Your own copyright statement to Your modifications and
+ may provide additional or different license terms and conditions
+ for use, reproduction, or distribution of Your modifications, or
+ for any such Derivative Works as a whole, provided Your use,
+ reproduction, and distribution of the Work otherwise complies with
+ the conditions stated in this License.
+
+ 5. Submission of Contributions. Unless You explicitly state otherwise,
+ any Contribution intentionally submitted for inclusion in the Work
+ by You to the Licensor shall be under the terms and conditions of
+ this License, without any additional terms or conditions.
+ Notwithstanding the above, nothing herein shall supersede or modify
+ the terms of any separate license agreement you may have executed
+ with Licensor regarding such Contributions.
+
+ 6. Trademarks. This License does not grant permission to use the trade
+ names, trademarks, service marks, or product names of the Licensor,
+ except as required for reasonable and customary use in describing the
+ origin of the Work and reproducing the content of the NOTICE file.
+
+ 7. Disclaimer of Warranty. Unless required by applicable law or
+ agreed to in writing, Licensor provides the Work (and each
+ Contributor provides its Contributions) on an "AS IS" BASIS,
+ WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or
+ implied, including, without limitation, any warranties or conditions
+ of TITLE, NON-INFRINGEMENT, MERCHANTABILITY, or FITNESS FOR A
+ PARTICULAR PURPOSE. You are solely responsible for determining the
+ appropriateness of using or redistributing the Work and assume any
+ risks associated with Your exercise of permissions under this License.
+
+ 8. Limitation of Liability. In no event and under no legal theory,
+ whether in tort (including negligence), contract, or otherwise,
+ unless required by applicable law (such as deliberate and grossly
+ negligent acts) or agreed to in writing, shall any Contributor be
+ liable to You for damages, including any direct, indirect, special,
+ incidental, or consequential damages of any character arising as a
+ result of this License or out of the use or inability to use the
+ Work (including but not limited to damages for loss of goodwill,
+ work stoppage, computer failure or malfunction, or any and all
+ other commercial damages or losses), even if such Contributor
+ has been advised of the possibility of such damages.
+
+ 9. Accepting Warranty or Additional Liability. While redistributing
+ the Work or Derivative Works thereof, You may choose to offer,
+ and charge a fee for, acceptance of support, warranty, indemnity,
+ or other liability obligations and/or rights consistent with this
+ License. However, in accepting such obligations, You may act only
+ on Your own behalf and on Your sole responsibility, not on behalf
+ of any other Contributor, and only if You agree to indemnify,
+ defend, and hold each Contributor harmless for any liability
+ incurred by, or claims asserted against, such Contributor by reason
+ of your accepting any such warranty or additional liability.
+
+ END OF TERMS AND CONDITIONS
diff --git a/puppet/modules/openvpn/Modulefile b/puppet/modules/openvpn/Modulefile
new file mode 100644
index 00000000..679e7e64
--- /dev/null
+++ b/puppet/modules/openvpn/Modulefile
@@ -0,0 +1,11 @@
+name 'luxflux-openvpn'
+version '2.1.0'
+source 'https://github.com/luxflux/puppet-openvpn'
+author 'luxflux'
+license 'Apache 2.0'
+summary 'OpenVPN server puppet module'
+description 'Puppet module to manage OpenVPN servers'
+project_page 'https://github.com/luxflux/puppet-openvpn'
+
+## Add dependencies, if any:
+dependency 'ripienaar/concat', '0.2.0'
diff --git a/puppet/modules/openvpn/Rakefile b/puppet/modules/openvpn/Rakefile
new file mode 100644
index 00000000..14f1c246
--- /dev/null
+++ b/puppet/modules/openvpn/Rakefile
@@ -0,0 +1,2 @@
+require 'rubygems'
+require 'puppetlabs_spec_helper/rake_tasks'
diff --git a/puppet/modules/openvpn/Readme.markdown b/puppet/modules/openvpn/Readme.markdown
new file mode 100644
index 00000000..6bcf49ea
--- /dev/null
+++ b/puppet/modules/openvpn/Readme.markdown
@@ -0,0 +1,54 @@
+# OpenVPN Puppet module
+
+Puppet module to manage OpenVPN servers
+
+## Features:
+
+* Client-specific rules and access policies
+* Generated client configurations and SSL-Certificates
+* Downloadable client configurations and SSL-Certificates for easy client configuration
+* Support for multiple server instances
+
+Tested on Ubuntu Precise Pangolin, CentOS 6, RedHat 6.
+
+
+## Dependencies
+ - [puppet-concat](https://github.com/ripienaar/puppet-concat)
+
+
+## Example
+
+```puppet
+ # add a server instance
+ openvpn::server { 'winterthur':
+ country => 'CH',
+ province => 'ZH',
+ city => 'Winterthur',
+ organization => 'example.org',
+ email => 'root@example.org',
+ server => '10.200.200.0 255.255.255.0'
+ }
+
+ # define clients
+ openvpn::client { 'client1':
+ server => 'winterthur'
+ }
+ openvpn::client { 'client2':
+ server => 'winterthur'
+ }
+
+ openvpn::client_specific_config { 'client1':
+ server => 'winterthur',
+ ifconfig => '10.200.200.50 255.255.255.0'
+ }
+```
+
+Don't forget the [sysctl](https://github.com/luxflux/puppet-sysctl) directive ```net.ipv4.ip_forward```!
+
+
+# Contributors
+
+These fine folks helped to get this far with this module:
+* [@jlambert121](https://github.com/jlambert121)
+* [@jlk](https://github.com/jlk)
+* [@elisiano](https://github.com/elisiano)
diff --git a/puppet/modules/openvpn/Vagrantfile b/puppet/modules/openvpn/Vagrantfile
new file mode 100644
index 00000000..88875ff8
--- /dev/null
+++ b/puppet/modules/openvpn/Vagrantfile
@@ -0,0 +1,42 @@
+# -*- mode: ruby -*-
+# vi: set ft=ruby :
+
+def server_config(config)
+ config.vm.provision :puppet, :module_path => '..' do |puppet|
+ puppet.manifests_path = "vagrant"
+ puppet.manifest_file = "server.pp"
+ end
+end
+
+def client_config(config)
+ config.vm.provision :puppet, :module_path => '..' do |puppet|
+ puppet.manifests_path = "vagrant"
+ puppet.manifest_file = "client.pp"
+ end
+end
+
+Vagrant::Config.run do |config|
+
+ config.vm.define :server_ubuntu do |c|
+ c.vm.box = 'precise64'
+ server_config c
+ c.vm.network :hostonly, '10.255.255.10'
+ end
+
+ config.vm.define :server_centos do |c|
+ c.vm.box = 'centos63'
+
+ c.vm.provision :shell, :inline => 'if [ ! -f rpmforge-release-0.5.2-2.el6.rf.x86_64.rpm ]; then wget -q http://pkgs.repoforge.org/rpmforge-release/rpmforge-release-0.5.2-2.el6.rf.x86_64.rpm; fi'
+ c.vm.provision :shell, :inline => 'yum install -y rpmforge-release-0.5.2-2.el6.rf.x86_64.rpm || exit 0'
+
+ server_config c
+ c.vm.network :hostonly, '10.255.255.11'
+ end
+
+ config.vm.define :client_ubuntu do |c|
+ c.vm.box = 'precise64'
+ client_config c
+ c.vm.network :hostonly, '10.255.255.20'
+ end
+
+end
diff --git a/puppet/modules/openvpn/manifests/client.pp b/puppet/modules/openvpn/manifests/client.pp
new file mode 100644
index 00000000..92c6aa4e
--- /dev/null
+++ b/puppet/modules/openvpn/manifests/client.pp
@@ -0,0 +1,187 @@
+# == Define: openvpn::client
+#
+# This define creates the client certs for a specified openvpn server as well
+# as creating a tarball that can be directly imported into openvpn clients
+#
+#
+# === Parameters
+#
+# [*server*]
+# String. Name of the corresponding openvpn endpoint
+# Required
+#
+# [*compression*]
+# String. Which compression algorithim to use
+# Default: comp-lzo
+# Options: comp-lzo or '' (disable compression)
+#
+# [*dev*]
+# String. Device method
+# Default: tun
+# Options: tun (routed connections), tap (bridged connections)
+#
+# [*mute*]
+# Integer. Set log mute level
+# Default: 20
+#
+# [*mute_replay_warnings*]
+# Boolean. Silence duplicate packet warnings (common on wireless networks)
+# Default: true
+#
+# [*nobind*]
+# Boolean. Whether or not to bind to a specific port number
+# Default: true
+#
+# [*persist_key*]
+# Boolean. Try to retain access to resources that may be unavailable
+# because of privilege downgrades
+# Default: true
+#
+# [*persist_tun*]
+# Boolean. Try to retain access to resources that may be unavailable
+# because of privilege downgrades
+# Default: true
+#
+# [*port*]
+# Integer. The port the openvpn server service is running on
+# Default: 1194
+#
+# [*proto*]
+# String. What IP protocol is being used.
+# Default: tcp
+# Options: tcp or udp
+#
+# [*remote_host*]
+# String. The IP or hostname of the openvpn server service
+# Default: FQDN
+#
+# [*resolv_retry*]
+# Integer/String. How many seconds should the openvpn client try to resolve
+# the server's hostname
+# Default: infinite
+# Options: Integer or infinite
+#
+# [*verb*]
+# Integer. Level of logging verbosity
+# Default: 3
+#
+#
+# === Examples
+#
+# openvpn::client {
+# 'my_user':
+# server => 'contractors',
+# remote_host => 'vpn.mycompany.com'
+# }
+#
+# * Removal:
+# Manual process right now, todo for the future
+#
+#
+# === Authors
+#
+# * Raffael Schmid <mailto:raffael@yux.ch>
+# * John Kinsella <mailto:jlkinsel@gmail.com>
+# * Justin Lambert <mailto:jlambert@letsevenup.com>
+#
+# === License
+#
+# Copyright 2013 Raffael Schmid, <raffael@yux.ch>
+#
+# Licensed under the Apache License, Version 2.0 (the "License");
+# you may not use this file except in compliance with the License.
+# You may obtain a copy of the License at
+#
+# http://www.apache.org/licenses/LICENSE-2.0
+#
+# Unless required by applicable law or agreed to in writing, software
+# distributed under the License is distributed on an "AS IS" BASIS,
+# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+# See the License for the specific language governing permissions and
+# limitations under the License.
+#
+define openvpn::client(
+ $server,
+ $compression = 'comp-lzo',
+ $dev = 'tun',
+ $mute = '20',
+ $mute_replay_warnings = true,
+ $nobind = true,
+ $persist_key = true,
+ $persist_tun = true,
+ $port = '1194',
+ $proto = 'tcp',
+ $remote_host = $::fqdn,
+ $resolv_retry = 'infinite',
+ $verb = '3',
+) {
+
+ Openvpn::Server[$server] ->
+ Openvpn::Client[$name]
+
+ exec {
+ "generate certificate for ${name} in context of ${server}":
+ command => ". ./vars && ./pkitool ${name}",
+ cwd => "/etc/openvpn/${server}/easy-rsa",
+ creates => "/etc/openvpn/${server}/easy-rsa/keys/${name}.crt",
+ provider => 'shell';
+ }
+
+ file {
+ [ "/etc/openvpn/${server}/download-configs/${name}",
+ "/etc/openvpn/${server}/download-configs/${name}/keys"]:
+ ensure => directory;
+
+ "/etc/openvpn/${server}/download-configs/${name}/keys/${name}.crt":
+ ensure => link,
+ target => "/etc/openvpn/${server}/easy-rsa/keys/${name}.crt",
+ require => Exec["generate certificate for ${name} in context of ${server}"];
+
+ "/etc/openvpn/${server}/download-configs/${name}/keys/${name}.key":
+ ensure => link,
+ target => "/etc/openvpn/${server}/easy-rsa/keys/${name}.key",
+ require => Exec["generate certificate for ${name} in context of ${server}"];
+
+ "/etc/openvpn/${server}/download-configs/${name}/keys/ca.crt":
+ ensure => link,
+ target => "/etc/openvpn/${server}/easy-rsa/keys/ca.crt",
+ require => Exec["generate certificate for ${name} in context of ${server}"];
+
+ "/etc/openvpn/${server}/download-configs/${name}/${name}.conf":
+ owner => root,
+ group => root,
+ mode => '0444',
+ content => template('openvpn/client.erb'),
+ notify => Exec["tar the thing ${server} with ${name}"];
+ }
+
+ exec {
+ "tar the thing ${server} with ${name}":
+ cwd => "/etc/openvpn/${server}/download-configs/",
+ command => "/bin/rm ${name}.tar.gz; tar --exclude=\\*.conf.d -chzvf ${name}.tar.gz ${name}",
+ refreshonly => true,
+ require => [ File["/etc/openvpn/${server}/download-configs/${name}/${name}.conf"],
+ File["/etc/openvpn/${server}/download-configs/${name}/keys/ca.crt"],
+ File["/etc/openvpn/${server}/download-configs/${name}/keys/${name}.key"],
+ File["/etc/openvpn/${server}/download-configs/${name}/keys/${name}.crt"]
+ ],
+ notify => Exec["generate ${name}.ovpn in ${server}"];
+ }
+
+ exec {
+ "generate ${name}.ovpn in ${server}":
+ cwd => "/etc/openvpn/${server}/download-configs/",
+ command => "/bin/rm ${name}.ovpn; cat ${name}/${name}.conf|perl -lne 'if(m|^ca keys/ca.crt|){ chomp(\$ca=`cat ${name}/keys/ca.crt`); print \"<ca>\n\$ca\n</ca>\"} elsif(m|^cert keys/${name}.crt|) { chomp(\$crt=`cat ${name}/keys/${name}.crt`); print \"<cert>\n\$crt\n</cert>\"} elsif(m|^key keys/${name}.key|){ chomp(\$key=`cat ${name}/keys/${name}.key`); print \"<key>\n\$key\n</key>\"} else { print} ' > ${name}.ovpn",
+ refreshonly => true,
+ require => [ File["/etc/openvpn/${server}/download-configs/${name}/${name}.conf"],
+ File["/etc/openvpn/${server}/download-configs/${name}/keys/ca.crt"],
+ File["/etc/openvpn/${server}/download-configs/${name}/keys/${name}.key"],
+ File["/etc/openvpn/${server}/download-configs/${name}/keys/${name}.crt"],
+ ],
+ }
+
+ file { "/etc/openvpn/${server}/download-configs/${name}.ovpn":
+ mode => '0400',
+ require => Exec["generate ${name}.ovpn in ${server}"],
+ }
+}
diff --git a/puppet/modules/openvpn/manifests/client_specific_config.pp b/puppet/modules/openvpn/manifests/client_specific_config.pp
new file mode 100644
index 00000000..4287421a
--- /dev/null
+++ b/puppet/modules/openvpn/manifests/client_specific_config.pp
@@ -0,0 +1,79 @@
+# == Define: openvpn::client_specific_config
+#
+# This define configures options which will be pushed by the server to a
+# specific client only. This feature is explained here:
+# http://openvpn.net/index.php/open-source/documentation/howto.html#policy
+#
+# === Parameters
+#
+# All the parameters are explained in the openvpn documentation:
+# http://openvpn.net/index.php/open-source/documentation/howto.html#policy
+#
+# [*server*]
+# String. Name of the corresponding openvpn endpoint
+# Required
+#
+# [*iroute*]
+# Array. Array of iroute combinations.
+# Default: []
+#
+# [*ifconfig*]
+# String. IP configuration to push to the client.
+# Default: false
+#
+# [*dhcp_options]
+# Array. DHCP options to push to the client.
+# Default: []
+#
+#
+# === Examples
+#
+# openvpn::client_specific_config {
+# 'vpn_client':
+# server => 'contractors',
+# iroute => ['10.0.1.0 255.255.255.0'],
+# ifconfig => '10.10.10.1 10.10.10.2',
+# dhcp_options => ['DNS 8.8.8.8']
+# }
+#
+# * Removal:
+# Manual process right now, todo for the future
+#
+#
+# === Authors
+#
+# * Raffael Schmid <mailto:raffael@yux.ch>
+#
+# === License
+#
+# Copyright 2013 Raffael Schmid, <raffael@yux.ch>
+#
+# Licensed under the Apache License, Version 2.0 (the "License");
+# you may not use this file except in compliance with the License.
+# You may obtain a copy of the License at
+#
+# http://www.apache.org/licenses/LICENSE-2.0
+#
+# Unless required by applicable law or agreed to in writing, software
+# distributed under the License is distributed on an "AS IS" BASIS,
+# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+# See the License for the specific language governing permissions and
+# limitations under the License.
+#
+define openvpn::client_specific_config(
+ $server,
+ $iroute = [],
+ $ifconfig = false,
+ $dhcp_options = []
+) {
+
+ Openvpn::Server[$server] ->
+ Openvpn::Client[$name] ->
+ Openvpn::Client_specific_config[$name]
+
+ file { "/etc/openvpn/${server}/client-configs/${name}":
+ ensure => present,
+ content => template('openvpn/client_specific_config.erb')
+ }
+
+}
diff --git a/puppet/modules/openvpn/manifests/config.pp b/puppet/modules/openvpn/manifests/config.pp
new file mode 100644
index 00000000..32b32094
--- /dev/null
+++ b/puppet/modules/openvpn/manifests/config.pp
@@ -0,0 +1,52 @@
+# == Class: openvpn::config
+#
+# This class sets up the openvpn enviornment as well as the default config file
+#
+#
+# === Examples
+#
+# This class should not be directly invoked
+#
+# === Authors
+#
+# * Raffael Schmid <mailto:raffael@yux.ch>
+# * John Kinsella <mailto:jlkinsel@gmail.com>
+# * Justin Lambert <mailto:jlambert@letsevenup.com>
+#
+# === License
+#
+# Copyright 2013 Raffael Schmid, <raffael@yux.ch>
+#
+# Licensed under the Apache License, Version 2.0 (the "License");
+# you may not use this file except in compliance with the License.
+# You may obtain a copy of the License at
+#
+# http://www.apache.org/licenses/LICENSE-2.0
+#
+# Unless required by applicable law or agreed to in writing, software
+# distributed under the License is distributed on an "AS IS" BASIS,
+# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+# See the License for the specific language governing permissions and
+# limitations under the License.
+#
+class openvpn::config {
+
+ if $::osfamily == 'Debian' {
+ include concat::setup
+
+ concat {
+ '/etc/default/openvpn':
+ owner => root,
+ group => root,
+ mode => 644,
+ warn => true;
+ }
+
+ concat::fragment {
+ 'openvpn.default.header':
+ content => template('openvpn/etc-default-openvpn.erb'),
+ target => '/etc/default/openvpn',
+ order => 01;
+ }
+ }
+}
diff --git a/puppet/modules/openvpn/manifests/init.pp b/puppet/modules/openvpn/manifests/init.pp
new file mode 100644
index 00000000..7e07f025
--- /dev/null
+++ b/puppet/modules/openvpn/manifests/init.pp
@@ -0,0 +1,43 @@
+# == Class: openvpn
+#
+# This module installs the openvpn service, configures vpn endpoints, generates
+# client certificates, and generates client config files
+#
+#
+# === Examples
+#
+# * Installation:
+# class { 'openvpn': }
+#
+#
+# === Authors
+#
+# * Raffael Schmid <mailto:raffael@yux.ch>
+# * John Kinsella <mailto:jlkinsel@gmail.com>
+# * Justin Lambert <mailto:jlambert@letsevenup.com>
+#
+# === License
+#
+# Copyright 2013 Raffael Schmid, <raffael@yux.ch>
+#
+# Licensed under the Apache License, Version 2.0 (the "License");
+# you may not use this file except in compliance with the License.
+# You may obtain a copy of the License at
+#
+# http://www.apache.org/licenses/LICENSE-2.0
+#
+# Unless required by applicable law or agreed to in writing, software
+# distributed under the License is distributed on an "AS IS" BASIS,
+# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+# See the License for the specific language governing permissions and
+# limitations under the License.
+#
+class openvpn {
+
+ class {'openvpn::params': } ->
+ class {'openvpn::install': } ->
+ class {'openvpn::config': } ~>
+ class {'openvpn::service': } ->
+ Class['openvpn']
+
+}
diff --git a/puppet/modules/openvpn/manifests/install.pp b/puppet/modules/openvpn/manifests/install.pp
new file mode 100644
index 00000000..a230373a
--- /dev/null
+++ b/puppet/modules/openvpn/manifests/install.pp
@@ -0,0 +1,46 @@
+# == Class: openvpn
+#
+# This module installs the openvpn service, configures vpn endpoints, generates
+# client certificates, and generates client config files
+#
+#
+# === Examples
+#
+# This class should not be directly invoked
+#
+#
+# === Authors
+#
+# * Raffael Schmid <mailto:raffael@yux.ch>
+# * John Kinsella <mailto:jlkinsel@gmail.com>
+# * Justin Lambert <mailto:jlambert@letsevenup.com>
+#
+# === License
+#
+# Copyright 2013 Raffael Schmid, <raffael@yux.ch>
+#
+# Licensed under the Apache License, Version 2.0 (the "License");
+# you may not use this file except in compliance with the License.
+# You may obtain a copy of the License at
+#
+# http://www.apache.org/licenses/LICENSE-2.0
+#
+# Unless required by applicable law or agreed to in writing, software
+# distributed under the License is distributed on an "AS IS" BASIS,
+# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+# See the License for the specific language governing permissions and
+# limitations under the License.
+#
+class openvpn::install {
+
+ package {
+ 'openvpn':
+ ensure => installed;
+ }
+
+ file {
+ [ '/etc/openvpn', '/etc/openvpn/keys' ]:
+ ensure => directory,
+ require => Package['openvpn'];
+ }
+}
diff --git a/puppet/modules/openvpn/manifests/params.pp b/puppet/modules/openvpn/manifests/params.pp
new file mode 100644
index 00000000..33495270
--- /dev/null
+++ b/puppet/modules/openvpn/manifests/params.pp
@@ -0,0 +1,37 @@
+# === License
+#
+# Copyright 2013 Raffael Schmid, <raffael@yux.ch>
+#
+# Licensed under the Apache License, Version 2.0 (the "License");
+# you may not use this file except in compliance with the License.
+# You may obtain a copy of the License at
+#
+# http://www.apache.org/licenses/LICENSE-2.0
+#
+# Unless required by applicable law or agreed to in writing, software
+# distributed under the License is distributed on an "AS IS" BASIS,
+# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+# See the License for the specific language governing permissions and
+# limitations under the License.
+#
+class openvpn::params {
+
+ $group = $::osfamily ? {
+ 'RedHat' => 'nobody',
+ default => 'nogroup'
+ }
+
+ $easyrsa_source = $::osfamily ? {
+ 'RedHat' => $::operatingsystemmajrelease ? {
+ 6 => '/usr/share/openvpn/easy-rsa/2.0',
+ default => '/usr/share/doc/openvpn-2.2.2/easy-rsa/2.0'
+ },
+ default => '/usr/share/doc/openvpn/examples/easy-rsa/2.0'
+ }
+
+ $link_openssl_cnf = $::osfamily ? {
+ /(Debian|RedHat)/ => true,
+ default => false
+ }
+
+}
diff --git a/puppet/modules/openvpn/manifests/server.pp b/puppet/modules/openvpn/manifests/server.pp
new file mode 100644
index 00000000..649048c4
--- /dev/null
+++ b/puppet/modules/openvpn/manifests/server.pp
@@ -0,0 +1,233 @@
+# == Define: openvpn::server
+#
+# This define creates the openvpn server instance and ssl certificates
+#
+#
+# === Parameters
+#
+# [*country*]
+# String. Country to be used for the SSL certificate
+#
+# [*province*]
+# String. Province to be used for the SSL certificate
+#
+# [*city*]
+# String. City to be used for the SSL certificate
+#
+# [*organization*]
+# String. Organization to be used for the SSL certificate
+#
+# [*email*]
+# String. Email address to be used for the SSL certificate
+#
+# [*compression*]
+# String. Which compression algorithim to use
+# Default: comp-lzo
+# Options: comp-lzo or '' (disable compression)
+#
+# [*dev*]
+# String. Device method
+# Default: tun
+# Options: tun (routed connections), tap (bridged connections)
+#
+# [*user*]
+# String. Group to drop privileges to after startup
+# Default: nobody
+#
+# [*group*]
+# String. User to drop privileges to after startup
+# Default: depends on your $::osfamily
+#
+# [*ipp*]
+# Boolean. Persist ifconfig information to a file to retain client IP
+# addresses between sessions
+# Default: false
+#
+# [*local*]
+# String. Interface for openvpn to bind to.
+# Default: $::ipaddress_eth0
+# Options: An IP address or '' to bind to all ip addresses
+#
+# [*logfile*]
+# String. Logfile for this openvpn server
+# Default: false
+# Options: false (syslog) or log file name
+#
+# [*port*]
+# Integer. The port the openvpn server service is running on
+# Default: 1194
+#
+# [*proto*]
+# String. What IP protocol is being used.
+# Default: tcp
+# Options: tcp or udp
+#
+# [*status_log*]
+# String. Logfile for periodic dumps of the vpn service status
+# Default: "${name}/openvpn-status.log"
+#
+# [*server*]
+# String. Network to assign client addresses out of
+# Default: None. Required in tun mode, not in tap mode
+#
+# [*push*]
+# Array. Options to push out to the client. This can include routes, DNS
+# servers, DNS search domains, and many other options.
+# Default: []
+#
+#
+# === Examples
+#
+# openvpn::client {
+# 'my_user':
+# server => 'contractors',
+# remote_host => 'vpn.mycompany.com'
+# }
+#
+# * Removal:
+# Manual process right now, todo for the future
+#
+#
+# === Authors
+#
+# * Raffael Schmid <mailto:raffael@yux.ch>
+# * John Kinsella <mailto:jlkinsel@gmail.com>
+# * Justin Lambert <mailto:jlambert@letsevenup.com>
+#
+# === License
+#
+# Copyright 2013 Raffael Schmid, <raffael@yux.ch>
+#
+# Licensed under the Apache License, Version 2.0 (the "License");
+# you may not use this file except in compliance with the License.
+# You may obtain a copy of the License at
+#
+# http://www.apache.org/licenses/LICENSE-2.0
+#
+# Unless required by applicable law or agreed to in writing, software
+# distributed under the License is distributed on an "AS IS" BASIS,
+# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+# See the License for the specific language governing permissions and
+# limitations under the License.
+#
+define openvpn::server(
+ $country,
+ $province,
+ $city,
+ $organization,
+ $email,
+ $compression = 'comp-lzo',
+ $dev = 'tun0',
+ $user = 'nobody',
+ $group = false,
+ $ipp = false,
+ $ip_pool = [],
+ $local = $::ipaddress_eth0,
+ $logfile = false,
+ $port = '1194',
+ $proto = 'tcp',
+ $status_log = "${name}/openvpn-status.log",
+ $server = '',
+ $push = []
+) {
+
+ include openvpn
+ Class['openvpn::install'] ->
+ Openvpn::Server[$name] ~>
+ Class['openvpn::service']
+
+ $tls_server = $proto ? {
+ /tcp/ => true,
+ default => false
+ }
+
+ $group_to_set = $group ? {
+ false => $openvpn::params::group,
+ default => $group
+ }
+
+ file {
+ ["/etc/openvpn/${name}", "/etc/openvpn/${name}/client-configs", "/etc/openvpn/${name}/download-configs" ]:
+ ensure => directory;
+ }
+
+ exec {
+ "copy easy-rsa to openvpn config folder ${name}":
+ command => "/bin/cp -r ${openvpn::params::easyrsa_source} /etc/openvpn/${name}/easy-rsa",
+ creates => "/etc/openvpn/${name}/easy-rsa",
+ notify => Exec["fix_easyrsa_file_permissions_${name}"],
+ require => File["/etc/openvpn/${name}"];
+ }
+
+ exec {
+ "fix_easyrsa_file_permissions_${name}":
+ refreshonly => true,
+ command => "/bin/chmod 755 /etc/openvpn/${name}/easy-rsa/*";
+ }
+
+ file {
+ "/etc/openvpn/${name}/easy-rsa/vars":
+ ensure => present,
+ content => template('openvpn/vars.erb'),
+ require => Exec["copy easy-rsa to openvpn config folder ${name}"];
+ }
+
+ file {
+ "/etc/openvpn/${name}/easy-rsa/openssl.cnf":
+ require => Exec["copy easy-rsa to openvpn config folder ${name}"];
+ }
+
+ if $openvpn::params::link_openssl_cnf == true {
+ File["/etc/openvpn/${name}/easy-rsa/openssl.cnf"] {
+ ensure => link,
+ target => "/etc/openvpn/${name}/easy-rsa/openssl-1.0.0.cnf"
+ }
+ }
+
+ exec {
+ "generate dh param ${name}":
+ command => '. ./vars && ./clean-all && ./build-dh',
+ cwd => "/etc/openvpn/${name}/easy-rsa",
+ creates => "/etc/openvpn/${name}/easy-rsa/keys/dh1024.pem",
+ provider => 'shell',
+ require => File["/etc/openvpn/${name}/easy-rsa/vars"];
+
+ "initca ${name}":
+ command => '. ./vars && ./pkitool --initca',
+ cwd => "/etc/openvpn/${name}/easy-rsa",
+ creates => "/etc/openvpn/${name}/easy-rsa/keys/ca.key",
+ provider => 'shell',
+ require => [ Exec["generate dh param ${name}"], File["/etc/openvpn/${name}/easy-rsa/openssl.cnf"] ];
+
+ "generate server cert ${name}":
+ command => '. ./vars && ./pkitool --server server',
+ cwd => "/etc/openvpn/${name}/easy-rsa",
+ creates => "/etc/openvpn/${name}/easy-rsa/keys/server.key",
+ provider => 'shell',
+ require => Exec["initca ${name}"];
+ }
+
+ file {
+ "/etc/openvpn/${name}/keys":
+ ensure => link,
+ target => "/etc/openvpn/${name}/easy-rsa/keys",
+ require => Exec["copy easy-rsa to openvpn config folder ${name}"];
+ }
+
+ if $::osfamily == 'Debian' {
+ concat::fragment {
+ "openvpn.default.autostart.${name}":
+ content => "AUTOSTART=\"\$AUTOSTART ${name}\"\n",
+ target => '/etc/default/openvpn',
+ order => 10;
+ }
+ }
+
+ file {
+ "/etc/openvpn/${name}.conf":
+ owner => root,
+ group => root,
+ mode => '0444',
+ content => template('openvpn/server.erb');
+ }
+}
diff --git a/puppet/modules/openvpn/manifests/service.pp b/puppet/modules/openvpn/manifests/service.pp
new file mode 100644
index 00000000..54e8db7d
--- /dev/null
+++ b/puppet/modules/openvpn/manifests/service.pp
@@ -0,0 +1,36 @@
+# == Class: openvpn::config
+#
+# This class maintains the openvpn service
+#
+#
+# === Examples
+#
+# This class should not be directly invoked
+#
+# === Authors
+#
+# * Raffael Schmid <mailto:raffael@yux.ch>
+# * John Kinsella <mailto:jlkinsel@gmail.com>
+# * Justin Lambert <mailto:jlambert@letsevenup.com>
+#
+# === License
+#
+# Copyright 2013 Raffael Schmid, <raffael@yux.ch>
+#
+# Licensed under the Apache License, Version 2.0 (the "License");
+# you may not use this file except in compliance with the License.
+# You may obtain a copy of the License at
+#
+# lied.
+# See the License for the specific language governing permissions and
+# limitations under the License.
+#
+class openvpn::service {
+ service {
+ 'openvpn':
+ ensure => running,
+ enable => true,
+ hasrestart => true,
+ hasstatus => true;
+ }
+}
diff --git a/puppet/modules/openvpn/spec/classes/openvpn_config_spec.rb b/puppet/modules/openvpn/spec/classes/openvpn_config_spec.rb
new file mode 100644
index 00000000..bbb63a77
--- /dev/null
+++ b/puppet/modules/openvpn/spec/classes/openvpn_config_spec.rb
@@ -0,0 +1,15 @@
+require 'spec_helper'
+
+describe 'openvpn::config', :type => :class do
+
+ it { should create_class('openvpn::config') }
+
+ context "on Debian based machines" do
+ let (:facts) { { :osfamily => 'Debian', :concat_basedir => '/var/lib/puppet/concat' } }
+
+ it { should contain_class('concat::setup') }
+ it { should contain_concat('/etc/default/openvpn') }
+ it { should contain_concat__fragment('openvpn.default.header') }
+ end
+
+end
diff --git a/puppet/modules/openvpn/spec/classes/openvpn_init_spec.rb b/puppet/modules/openvpn/spec/classes/openvpn_init_spec.rb
new file mode 100644
index 00000000..45dcc9bf
--- /dev/null
+++ b/puppet/modules/openvpn/spec/classes/openvpn_init_spec.rb
@@ -0,0 +1,9 @@
+require 'spec_helper'
+
+describe 'openvpn', :type => :class do
+
+ let (:facts) { { :concat_basedir => '/var/lib/puppet/concat' } }
+
+ it { should create_class('openvpn') }
+
+end
diff --git a/puppet/modules/openvpn/spec/classes/openvpn_install_spec.rb b/puppet/modules/openvpn/spec/classes/openvpn_install_spec.rb
new file mode 100644
index 00000000..cdb31358
--- /dev/null
+++ b/puppet/modules/openvpn/spec/classes/openvpn_install_spec.rb
@@ -0,0 +1,11 @@
+require 'spec_helper'
+
+describe 'openvpn::install', :type => :class do
+
+ it { should create_class('openvpn::install') }
+ it { should contain_package('openvpn') }
+
+ it { should contain_file('/etc/openvpn').with('ensure' => 'directory') }
+ it { should contain_file('/etc/openvpn/keys').with('ensure' => 'directory') }
+
+end
diff --git a/puppet/modules/openvpn/spec/classes/openvpn_service_spec.rb b/puppet/modules/openvpn/spec/classes/openvpn_service_spec.rb
new file mode 100644
index 00000000..f427e7f1
--- /dev/null
+++ b/puppet/modules/openvpn/spec/classes/openvpn_service_spec.rb
@@ -0,0 +1,13 @@
+require 'spec_helper'
+
+describe 'openvpn::service', :type => :class do
+
+ let (:facts) { { :concat_basedir => '/var/lib/puppet/concat' } }
+
+ it { should create_class('openvpn::service') }
+ it { should contain_service('openvpn').with(
+ 'ensure' => 'running',
+ 'enable' => true
+ ) }
+
+end
diff --git a/puppet/modules/openvpn/spec/defines/openvpn_client_spec.rb b/puppet/modules/openvpn/spec/defines/openvpn_client_spec.rb
new file mode 100644
index 00000000..a4b580e8
--- /dev/null
+++ b/puppet/modules/openvpn/spec/defines/openvpn_client_spec.rb
@@ -0,0 +1,88 @@
+require 'spec_helper'
+
+describe 'openvpn::client', :type => :define do
+ let(:title) { 'test_client' }
+ let(:params) { { 'server' => 'test_server' } }
+ let(:facts) { { :fqdn => 'somehost', :concat_basedir => '/var/lib/puppet/concat' } }
+ let(:pre_condition) do
+ 'openvpn::server { "test_server":
+ country => "CO",
+ province => "ST",
+ city => "Some City",
+ organization => "example.org",
+ email => "testemail@example.org"
+ }'
+ end
+
+ it { should contain_exec('generate certificate for test_client in context of test_server') }
+
+ [ 'test_client', 'test_client/keys'].each do |directory|
+ it { should contain_file("/etc/openvpn/test_server/download-configs/#{directory}") }
+ end
+
+ [ 'test_client.crt', 'test_client.key', 'ca.crt' ].each do |file|
+ it { should contain_file("/etc/openvpn/test_server/download-configs/test_client/keys/#{file}").with(
+ 'ensure' => 'link',
+ 'target' => "/etc/openvpn/test_server/easy-rsa/keys/#{file}"
+ )}
+ end
+
+ it { should contain_exec('tar the thing test_server with test_client').with(
+ 'cwd' => '/etc/openvpn/test_server/download-configs/',
+ 'command' => '/bin/rm test_client.tar.gz; tar --exclude=\*.conf.d -chzvf test_client.tar.gz test_client'
+ ) }
+
+ context "setting the minimum parameters" do
+ let(:params) { { 'server' => 'test_server' } }
+ let(:facts) { { :fqdn => 'somehost', :concat_basedir => '/var/lib/puppet/concat' } }
+
+ it { should contain_file('/etc/openvpn/test_server/download-configs/test_client/test_client.conf').with_content(/^client$/)}
+ it { should contain_file('/etc/openvpn/test_server/download-configs/test_client/test_client.conf').with_content(/^ca\s+keys\/ca\.crt$/)}
+ it { should contain_file('/etc/openvpn/test_server/download-configs/test_client/test_client.conf').with_content(/^cert\s+keys\/test_client.crt$/)}
+ it { should contain_file('/etc/openvpn/test_server/download-configs/test_client/test_client.conf').with_content(/^key\s+keys\/test_client\.key$/)}
+ it { should contain_file('/etc/openvpn/test_server/download-configs/test_client/test_client.conf').with_content(/^dev\s+tun$/)}
+ it { should contain_file('/etc/openvpn/test_server/download-configs/test_client/test_client.conf').with_content(/^proto\s+tcp$/)}
+ it { should contain_file('/etc/openvpn/test_server/download-configs/test_client/test_client.conf').with_content(/^remote\s+somehost\s+1194$/)}
+ it { should contain_file('/etc/openvpn/test_server/download-configs/test_client/test_client.conf').with_content(/^comp-lzo$/)}
+ it { should contain_file('/etc/openvpn/test_server/download-configs/test_client/test_client.conf').with_content(/^resolv-retry\s+infinite$/)}
+ it { should contain_file('/etc/openvpn/test_server/download-configs/test_client/test_client.conf').with_content(/^nobind$/)}
+ it { should contain_file('/etc/openvpn/test_server/download-configs/test_client/test_client.conf').with_content(/^persist-key$/)}
+ it { should contain_file('/etc/openvpn/test_server/download-configs/test_client/test_client.conf').with_content(/^persist-tun$/)}
+ it { should contain_file('/etc/openvpn/test_server/download-configs/test_client/test_client.conf').with_content(/^mute-replay-warnings$/)}
+ it { should contain_file('/etc/openvpn/test_server/download-configs/test_client/test_client.conf').with_content(/^ns\-cert\-type\s+server$/)}
+ it { should contain_file('/etc/openvpn/test_server/download-configs/test_client/test_client.conf').with_content(/^verb\s+3$/)}
+ it { should contain_file('/etc/openvpn/test_server/download-configs/test_client/test_client.conf').with_content(/^mute\s+20$/)}
+ end
+
+ context "setting all of the parameters" do
+ let(:params) { {
+ 'server' => 'test_server',
+ 'compression' => 'comp-something',
+ 'dev' => 'tap',
+ 'mute' => 10,
+ 'mute_replay_warnings' => false,
+ 'nobind' => false,
+ 'persist_key' => false,
+ 'persist_tun' => false,
+ 'port' => '123',
+ 'proto' => 'udp',
+ 'remote_host' => 'somewhere',
+ 'resolv_retry' => '2m',
+ 'verb' => '1'
+ } }
+ let(:facts) { { :fqdn => 'somehost', :concat_basedir => '/var/lib/puppet/concat' } }
+
+ it { should contain_file('/etc/openvpn/test_server/download-configs/test_client/test_client.conf').with_content(/^client$/)}
+ it { should contain_file('/etc/openvpn/test_server/download-configs/test_client/test_client.conf').with_content(/^ca\s+keys\/ca\.crt$/)}
+ it { should contain_file('/etc/openvpn/test_server/download-configs/test_client/test_client.conf').with_content(/^cert\s+keys\/test_client.crt$/)}
+ it { should contain_file('/etc/openvpn/test_server/download-configs/test_client/test_client.conf').with_content(/^key\s+keys\/test_client\.key$/)}
+ it { should contain_file('/etc/openvpn/test_server/download-configs/test_client/test_client.conf').with_content(/^dev\s+tap$/)}
+ it { should contain_file('/etc/openvpn/test_server/download-configs/test_client/test_client.conf').with_content(/^proto\s+udp$/)}
+ it { should contain_file('/etc/openvpn/test_server/download-configs/test_client/test_client.conf').with_content(/^remote\s+somewhere\s+123$/)}
+ it { should contain_file('/etc/openvpn/test_server/download-configs/test_client/test_client.conf').with_content(/^comp-something$/)}
+ it { should contain_file('/etc/openvpn/test_server/download-configs/test_client/test_client.conf').with_content(/^resolv-retry\s+2m$/)}
+ it { should contain_file('/etc/openvpn/test_server/download-configs/test_client/test_client.conf').with_content(/^verb\s+1$/)}
+ it { should contain_file('/etc/openvpn/test_server/download-configs/test_client/test_client.conf').with_content(/^mute\s+10$/)}
+ end
+
+end
diff --git a/puppet/modules/openvpn/spec/defines/openvpn_client_specific_config_spec.rb b/puppet/modules/openvpn/spec/defines/openvpn_client_specific_config_spec.rb
new file mode 100644
index 00000000..cfdab389
--- /dev/null
+++ b/puppet/modules/openvpn/spec/defines/openvpn_client_specific_config_spec.rb
@@ -0,0 +1,40 @@
+require 'spec_helper'
+
+describe 'openvpn::client_specific_config', :type => :define do
+ let(:title) { 'test_client' }
+ let(:params) { { 'server' => 'test_server' } }
+ let(:facts) { { :fqdn => 'somehost', :concat_basedir => '/var/lib/puppet/concat' } }
+ let(:pre_condition) do
+ [
+ 'openvpn::server { "test_server":
+ country => "CO",
+ province => "ST",
+ city => "Some City",
+ organization => "example.org",
+ email => "testemail@example.org"
+ }',
+ 'openvpn::client { "test_client":
+ server => "test_server"
+ }'
+ ].join
+ end
+
+ it { should contain_file('/etc/openvpn/test_server/client-configs/test_client') }
+
+ describe "setting no paramter at all" do
+ it { should contain_file('/etc/openvpn/test_server/client-configs/test_client').with_content(/\A\n\z/) }
+ end
+
+ describe "setting all parameters" do
+ let(:params) do
+ {:server => 'test_server',
+ :iroute => ['10.0.1.0 255.255.255.0'],
+ :ifconfig => '10.10.10.2 255.255.255.0',
+ :dhcp_options => ['DNS 8.8.8.8']}
+ end
+
+ it { should contain_file('/etc/openvpn/test_server/client-configs/test_client').with_content(/^iroute 10.0.1.0 255.255.255.0$/) }
+ it { should contain_file('/etc/openvpn/test_server/client-configs/test_client').with_content(/^ifconfig-push 10.10.10.2 255.255.255.0$/) }
+ it { should contain_file('/etc/openvpn/test_server/client-configs/test_client').with_content(/^push dhcp-option DNS 8.8.8.8$/) }
+ end
+end
diff --git a/puppet/modules/openvpn/spec/defines/openvpn_server_spec.rb b/puppet/modules/openvpn/spec/defines/openvpn_server_spec.rb
new file mode 100644
index 00000000..467be6aa
--- /dev/null
+++ b/puppet/modules/openvpn/spec/defines/openvpn_server_spec.rb
@@ -0,0 +1,165 @@
+require 'spec_helper'
+
+describe 'openvpn::server', :type => :define do
+
+ let(:title) { 'test_server' }
+
+ context "creating a server with the minimum parameters" do
+ let(:params) { {
+ 'country' => 'CO',
+ 'province' => 'ST',
+ 'city' => 'Some City',
+ 'organization' => 'example.org',
+ 'email' => 'testemail@example.org'
+ } }
+
+ let (:facts) { {
+ :ipaddress_eth0 => '1.2.3.4',
+ :network_eth0 => '1.2.3.0',
+ :netmask_eth0 => '255.255.255.0',
+ :concat_basedir => '/var/lib/puppet/concat',
+ :osfamily => 'anything_else'
+ } }
+
+ # Files associated with a server config
+ it { should contain_file('/etc/openvpn/test_server').with('ensure' => 'directory')}
+ it { should contain_file('/etc/openvpn/test_server/client-configs').with('ensure' => 'directory')}
+ it { should contain_file('/etc/openvpn/test_server/download-configs').with('ensure' => 'directory')}
+ it { should contain_file('/etc/openvpn/test_server/easy-rsa/vars')}
+ it { should contain_file('/etc/openvpn/test_server/easy-rsa/openssl.cnf')}
+ it { should contain_file('/etc/openvpn/test_server/keys').with(
+ 'ensure' => 'link',
+ 'target' => '/etc/openvpn/test_server/easy-rsa/keys'
+ )}
+
+ # Execs to working with certificates
+ it { should contain_exec('copy easy-rsa to openvpn config folder test_server').with(
+ 'command' => '/bin/cp -r /usr/share/doc/openvpn/examples/easy-rsa/2.0 /etc/openvpn/test_server/easy-rsa'
+ )}
+ it { should contain_exec('generate dh param test_server') }
+ it { should contain_exec('initca test_server') }
+ it { should contain_exec('generate server cert test_server') }
+
+ # VPN server config file itself
+ it { should contain_file('/etc/openvpn/test_server.conf').with_content(/^mode\s+server$/) }
+ it { should contain_file('/etc/openvpn/test_server.conf').with_content(/^client\-config\-dir\s+\/etc\/openvpn\/test_server\/client\-configs$/) }
+ it { should contain_file('/etc/openvpn/test_server.conf').with_content(/^ca\s+\/etc\/openvpn\/test_server\/keys\/ca.crt$/) }
+ it { should contain_file('/etc/openvpn/test_server.conf').with_content(/^cert\s+\/etc\/openvpn\/test_server\/keys\/server.crt$/) }
+ it { should contain_file('/etc/openvpn/test_server.conf').with_content(/^key\s+\/etc\/openvpn\/test_server\/keys\/server.key$/) }
+ it { should contain_file('/etc/openvpn/test_server.conf').with_content(/^dh\s+\/etc\/openvpn\/test_server\/keys\/dh1024.pem$/) }
+ it { should contain_file('/etc/openvpn/test_server.conf').with_content(/^proto\s+tcp-server$/) }
+ it { should contain_file('/etc/openvpn/test_server.conf').with_content(/^tls-server$/) }
+ it { should contain_file('/etc/openvpn/test_server.conf').with_content(/^port\s+1194$/) }
+ it { should contain_file('/etc/openvpn/test_server.conf').with_content(/^comp-lzo$/) }
+ it { should contain_file('/etc/openvpn/test_server.conf').with_content(/^group\s+nogroup$/) }
+ it { should contain_file('/etc/openvpn/test_server.conf').with_content(/^user\s+nobody$/) }
+ it { should_not contain_file('/etc/openvpn/test_server.conf').with_content(/^log\-append\s+test_server\/openvpn\.log$/) }
+ it { should contain_file('/etc/openvpn/test_server.conf').with_content(/^status\s+test_server\/openvpn\-status\.log$/) }
+ it { should contain_file('/etc/openvpn/test_server.conf').with_content(/^dev\s+tun0$/) }
+ it { should contain_file('/etc/openvpn/test_server.conf').with_content(/^local\s+1\.2\.3\.4$/) }
+ it { should_not contain_file('/etc/openvpn/test_server.conf').with_content(/^ifconfig-pool-persist/) }
+ end
+
+ context "creating a server setting all parameters" do
+ let(:params) { {
+ 'country' => 'CO',
+ 'province' => 'ST',
+ 'city' => 'Some City',
+ 'organization' => 'example.org',
+ 'email' => 'testemail@example.org',
+ 'compression' => 'fake_compression',
+ 'port' => '123',
+ 'proto' => 'udp',
+ 'group' => 'someone',
+ 'user' => 'someone',
+ 'logfile' => '/var/log/openvpn/test_server.log',
+ 'status_log' => '/var/log/openvpn/test_server_status.log',
+ 'dev' => 'tun1',
+ 'local' => '2.3.4.5',
+ 'ipp' => true,
+ 'server' => '2.3.4.0 255.255.0.0',
+ 'push' => [ 'dhcp-option DNS 172.31.0.30', 'route 172.31.0.0 255.255.0.0' ]
+ } }
+
+ let (:facts) { {
+ :ipaddress_eth0 => '1.2.3.4',
+ :network_eth0 => '1.2.3.0',
+ :netmask_eth0 => '255.255.255.0',
+ :concat_basedir => '/var/lib/puppet/concat'
+ } }
+
+ it { should contain_file('/etc/openvpn/test_server.conf').with_content(/^mode\s+server$/) }
+ it { should contain_file('/etc/openvpn/test_server.conf').with_content(/^client\-config\-dir\s+\/etc\/openvpn\/test_server\/client\-configs$/) }
+ it { should contain_file('/etc/openvpn/test_server.conf').with_content(/^ca\s+\/etc\/openvpn\/test_server\/keys\/ca.crt$/) }
+ it { should contain_file('/etc/openvpn/test_server.conf').with_content(/^cert\s+\/etc\/openvpn\/test_server\/keys\/server.crt$/) }
+ it { should contain_file('/etc/openvpn/test_server.conf').with_content(/^key\s+\/etc\/openvpn\/test_server\/keys\/server.key$/) }
+ it { should contain_file('/etc/openvpn/test_server.conf').with_content(/^dh\s+\/etc\/openvpn\/test_server\/keys\/dh1024.pem$/) }
+ it { should contain_file('/etc/openvpn/test_server.conf').with_content(/^proto\s+udp$/) }
+ it { should_not contain_file('/etc/openvpn/test_server.conf').with_content(/^proto\s+tls-server$/) }
+ it { should contain_file('/etc/openvpn/test_server.conf').with_content(/^port\s+123$/) }
+ it { should contain_file('/etc/openvpn/test_server.conf').with_content(/^fake_compression$/) }
+ it { should contain_file('/etc/openvpn/test_server.conf').with_content(/^group\s+someone$/) }
+ it { should contain_file('/etc/openvpn/test_server.conf').with_content(/^user\s+someone$/) }
+ it { should contain_file('/etc/openvpn/test_server.conf').with_content(/^log\-append\s+\/var\/log\/openvpn\/test_server\.log$/) }
+ it { should contain_file('/etc/openvpn/test_server.conf').with_content(/^status\s+\/var\/log\/openvpn\/test_server_status\.log$/) }
+ it { should contain_file('/etc/openvpn/test_server.conf').with_content(/^dev\s+tun1$/) }
+ it { should contain_file('/etc/openvpn/test_server.conf').with_content(/^local\s+2\.3\.4\.5$/) }
+ it { should contain_file('/etc/openvpn/test_server.conf').with_content(/^server\s+2\.3\.4\.0\s+255\.255\.0\.0$/) }
+ it { should contain_file('/etc/openvpn/test_server.conf').with_content(/^push\s+dhcp-option\s+DNS\s+172\.31\.0\.30$/) }
+ it { should contain_file('/etc/openvpn/test_server.conf').with_content(/^push\s+route\s+172\.31\.0\.0\s+255\.255\.0\.0$/) }
+ end
+
+ context "when RedHat based machine" do
+ let(:params) { {
+ 'country' => 'CO',
+ 'province' => 'ST',
+ 'city' => 'Some City',
+ 'organization' => 'example.org',
+ 'email' => 'testemail@example.org'
+ } }
+
+ let(:facts) { { :osfamily => 'RedHat', :concat_basedir => '/var/lib/puppet/concat' } }
+
+ it { should contain_file('/etc/openvpn/test_server/easy-rsa/openssl.cnf').with(
+ 'ensure' => 'link',
+ 'target' => '/etc/openvpn/test_server/easy-rsa/openssl-1.0.0.cnf'
+ )}
+
+ it { should contain_exec('copy easy-rsa to openvpn config folder test_server').with(
+ 'command' => '/bin/cp -r /usr/share/doc/openvpn-2.2.2/easy-rsa/2.0 /etc/openvpn/test_server/easy-rsa'
+ )}
+ it { should contain_file('/etc/openvpn/test_server.conf').with_content(/^group\s+nobody$/) }
+
+ end
+
+ context "when Debian based machine" do
+ let(:params) { {
+ 'country' => 'CO',
+ 'province' => 'ST',
+ 'city' => 'Some City',
+ 'organization' => 'example.org',
+ 'email' => 'testemail@example.org'
+ } }
+
+ let(:facts) { { :osfamily => 'Debian', :concat_basedir => '/var/lib/puppet/concat' } }
+
+ it { should contain_file('/etc/openvpn/test_server/easy-rsa/openssl.cnf').with(
+ 'ensure' => 'link',
+ 'target' => '/etc/openvpn/test_server/easy-rsa/openssl-1.0.0.cnf'
+ )}
+
+ it { should contain_exec('copy easy-rsa to openvpn config folder test_server').with(
+ 'command' => '/bin/cp -r /usr/share/doc/openvpn/examples/easy-rsa/2.0 /etc/openvpn/test_server/easy-rsa'
+ )}
+
+ # Configure to start vpn session
+ it { should contain_concat__fragment('openvpn.default.autostart.test_server').with(
+ 'content' => "AUTOSTART=\"$AUTOSTART test_server\"\n",
+ 'target' => '/etc/default/openvpn'
+ )}
+
+ it { should contain_file('/etc/openvpn/test_server.conf').with_content(/^group\s+nogroup$/) }
+
+ end
+
+end
diff --git a/puppet/modules/openvpn/spec/spec_helper.rb b/puppet/modules/openvpn/spec/spec_helper.rb
new file mode 100644
index 00000000..dc7e9f4a
--- /dev/null
+++ b/puppet/modules/openvpn/spec/spec_helper.rb
@@ -0,0 +1,2 @@
+require 'rubygems'
+require 'puppetlabs_spec_helper/module_spec_helper'
diff --git a/puppet/modules/openvpn/templates/client.erb b/puppet/modules/openvpn/templates/client.erb
new file mode 100644
index 00000000..021ed617
--- /dev/null
+++ b/puppet/modules/openvpn/templates/client.erb
@@ -0,0 +1,26 @@
+client
+ca keys/ca.crt
+cert keys/<%= scope.lookupvar('name') %>.crt
+key keys/<%= scope.lookupvar('name') %>.key
+dev <%= scope.lookupvar('dev') %>
+proto <%= scope.lookupvar('proto') %>
+remote <%= scope.lookupvar('remote_host') %> <%= scope.lookupvar('port') %>
+<% if scope.lookupvar('compression') != '' -%>
+<%= scope.lookupvar('compression') %>
+<% end -%>
+resolv-retry <%= scope.lookupvar('resolv_retry') %>
+<% if scope.lookupvar('nobind') -%>
+nobind
+<% end -%>
+<% if scope.lookupvar('persist_key') -%>
+persist-key
+<% end -%>
+<% if scope.lookupvar('persist_tun') -%>
+persist-tun
+<% end -%>
+<% if scope.lookupvar('mute_replay_warnings') -%>
+mute-replay-warnings
+<% end -%>
+ns-cert-type server
+verb <%= scope.lookupvar('verb') %>
+mute <%= scope.lookupvar('mute') %>
diff --git a/puppet/modules/openvpn/templates/client_specific_config.erb b/puppet/modules/openvpn/templates/client_specific_config.erb
new file mode 100644
index 00000000..62cc0e7a
--- /dev/null
+++ b/puppet/modules/openvpn/templates/client_specific_config.erb
@@ -0,0 +1,10 @@
+<% scope.lookupvar('iroute').each do |route| -%>
+iroute <%= route %>
+<% end -%>
+<% if ifconfig = scope.lookupvar('ifconfig') -%>
+ifconfig-push <%= ifconfig %>
+<% end -%>
+<% scope.lookupvar('dhcp_options').each do |option| -%>
+push dhcp-option <%= option %>
+<% end -%>
+
diff --git a/puppet/modules/openvpn/templates/etc-default-openvpn.erb b/puppet/modules/openvpn/templates/etc-default-openvpn.erb
new file mode 100644
index 00000000..310e462e
--- /dev/null
+++ b/puppet/modules/openvpn/templates/etc-default-openvpn.erb
@@ -0,0 +1,20 @@
+# This is the configuration file for /etc/init.d/openvpn
+
+#
+# Start only these VPNs automatically via init script.
+# Allowed values are "all", "none" or space separated list of
+# names of the VPNs. If empty, "all" is assumed.
+#
+#AUTOSTART="all"
+#AUTOSTART="none"
+#AUTOSTART="home office"
+#
+# Refresh interval (in seconds) of default status files
+# located in /var/run/openvpn.$NAME.status
+# Defaults to 10, 0 disables status file generation
+#
+#STATUSREFRESH=10
+#STATUSREFRESH=0
+# Optional arguments to openvpn's command line
+OPTARGS=""
+AUTOSTART=""
diff --git a/puppet/modules/openvpn/templates/server.erb b/puppet/modules/openvpn/templates/server.erb
new file mode 100644
index 00000000..6ef13263
--- /dev/null
+++ b/puppet/modules/openvpn/templates/server.erb
@@ -0,0 +1,37 @@
+mode server
+client-config-dir /etc/openvpn/<%= scope.lookupvar('name') %>/client-configs
+ca /etc/openvpn/<%= scope.lookupvar('name') %>/keys/ca.crt
+cert /etc/openvpn/<%= scope.lookupvar('name') %>/keys/server.crt
+key /etc/openvpn/<%= scope.lookupvar('name') %>/keys/server.key
+dh /etc/openvpn/<%= scope.lookupvar('name') %>/keys/dh1024.pem
+<% if scope.lookupvar('proto') == 'tcp' -%>
+proto <%= scope.lookupvar('proto') %>-server
+<% else -%>
+proto <%= scope.lookupvar('proto') %>
+<% end -%>
+port <%= scope.lookupvar('port') %>
+<% if scope.lookupvar('tls_server') -%>
+tls-server
+<% end -%>
+<% if scope.lookupvar('compression') != '' -%>
+<%= scope.lookupvar('compression') %>
+<% end -%>
+group <%= scope.lookupvar('group_to_set') %>
+user <%= scope.lookupvar('user') %>
+<% if scope.lookupvar('logfile') -%>
+log-append <%= scope.lookupvar('logfile') %>
+<% end -%>
+status <%= scope.lookupvar('status_log') %>
+dev <%= scope.lookupvar('dev') %>
+<% if scope.lookupvar('local') != '' -%>
+local <%= scope.lookupvar('local') %>
+<% end -%>
+<% if scope.lookupvar('ipp') -%>
+ifconfig-pool-persist <%= scope.lookupvar('name') %>/vpn-ipp.txt
+<% end -%>
+<% if scope.lookupvar('server') != '' -%>
+server <%= scope.lookupvar('server') %>
+<% end -%>
+<% scope.lookupvar('push').each do |item| -%>
+push <%= item %>
+<% end -%>
diff --git a/puppet/modules/openvpn/templates/vars.erb b/puppet/modules/openvpn/templates/vars.erb
new file mode 100644
index 00000000..20448b8b
--- /dev/null
+++ b/puppet/modules/openvpn/templates/vars.erb
@@ -0,0 +1,68 @@
+# easy-rsa parameter settings
+
+# NOTE: If you installed from an RPM,
+# don't edit this file in place in
+# /usr/share/openvpn/easy-rsa --
+# instead, you should copy the whole
+# easy-rsa directory to another location
+# (such as /etc/openvpn) so that your
+# edits will not be wiped out by a future
+# OpenVPN package upgrade.
+
+# This variable should point to
+# the top level of the easy-rsa
+# tree.
+export EASY_RSA="/etc/openvpn/<%= @name %>/easy-rsa"
+
+#
+# This variable should point to
+# the requested executables
+#
+export OPENSSL="openssl"
+export PKCS11TOOL="pkcs11-tool"
+export GREP="grep"
+
+
+# This variable should point to
+# the openssl.cnf file included
+# with easy-rsa.
+export KEY_CONFIG=`$EASY_RSA/whichopensslcnf $EASY_RSA`
+
+# Edit this variable to point to
+# your soon-to-be-created key
+# directory.
+#
+# WARNING: clean-all will do
+# a rm -rf on this directory
+# so make sure you define
+# it correctly!
+export KEY_DIR="$EASY_RSA/keys"
+
+# Issue rm -rf warning
+echo NOTE: If you run ./clean-all, I will be doing a rm -rf on $KEY_DIR
+
+# PKCS11 fixes
+export PKCS11_MODULE_PATH="dummy"
+export PKCS11_PIN="dummy"
+
+# Increase this to 2048 if you
+# are paranoid. This will slow
+# down TLS negotiation performance
+# as well as the one-time DH parms
+# generation process.
+export KEY_SIZE=1024
+
+# In how many days should the root CA key expire?
+export CA_EXPIRE=3650
+
+# In how many days should certificates expire?
+export KEY_EXPIRE=3650
+
+# These are the default values for fields
+# which will be placed in the certificate.
+# Don't leave any of these fields blank.
+export KEY_COUNTRY="<%= @country %>"
+export KEY_PROVINCE="<%= @province %>"
+export KEY_CITY="<%= @city %>"
+export KEY_ORG="<%= @organization %>"
+export KEY_EMAIL="<%= @email %>"
diff --git a/puppet/modules/openvpn/vagrant/client.pp b/puppet/modules/openvpn/vagrant/client.pp
new file mode 100644
index 00000000..7ebeb1d7
--- /dev/null
+++ b/puppet/modules/openvpn/vagrant/client.pp
@@ -0,0 +1,5 @@
+node default {
+
+ package { 'openvpn': ensure => installed; }
+
+}
diff --git a/puppet/modules/openvpn/vagrant/server.pp b/puppet/modules/openvpn/vagrant/server.pp
new file mode 100644
index 00000000..a95def06
--- /dev/null
+++ b/puppet/modules/openvpn/vagrant/server.pp
@@ -0,0 +1,23 @@
+node default {
+ openvpn::server { 'winterthur':
+ country => 'CH',
+ province => 'ZH',
+ city => 'Winterthur',
+ organization => 'example.org',
+ email => 'root@example.org',
+ server => '10.200.200.0 255.255.255.0'
+ }
+
+ openvpn::client { 'client1':
+ server => 'winterthur';
+ }
+
+ openvpn::client_specific_config { 'client1':
+ server => 'winterthur',
+ ifconfig => '10.200.200.100 255.255.255.0'
+ }
+
+ openvpn::client { 'client2':
+ server => 'winterthur';
+ }
+}
diff --git a/puppet/modules/postfwd/files/postfwd_default b/puppet/modules/postfwd/files/postfwd_default
new file mode 100644
index 00000000..83742e40
--- /dev/null
+++ b/puppet/modules/postfwd/files/postfwd_default
@@ -0,0 +1,19 @@
+### This file managed by Puppet
+# Global options for postfwd(8).
+
+# Set to '1' to enable startup (daemon mode)
+STARTUP=1
+
+# Config file
+CONF=/etc/postfix/postfwd.cf
+# IP where listen to
+INET=127.0.0.1
+# Port where listen to
+PORT=10040
+# run as user postfwd
+RUNAS="postfw"
+# Arguments passed on start (--daemon implied)
+# disable summary and cache-no-size
+#ARGS="--summary=600 --cache=600 --cache-rdomain-only --cache-no-size"
+ARGS="--cache=600 --cache-rdomain-only --no-rulestats"
+
diff --git a/puppet/modules/postfwd/manifests/init.pp b/puppet/modules/postfwd/manifests/init.pp
new file mode 100644
index 00000000..6db3fa52
--- /dev/null
+++ b/puppet/modules/postfwd/manifests/init.pp
@@ -0,0 +1,43 @@
+# This class provides rate-limiting for outgoing SMTP, using postfwd
+# it is configured with some limits that seem reasonable for a generic
+# use-case. Each of the following applies to sasl_authenticated users:
+#
+# . 150 recipients at a time
+# . no more than 50 messages in 60 minutes
+# . no more than 250 recipients in 60 minutes.
+#
+# This class could be easily extended to add overrides to these rules,
+# maximum sizes per client, or additional rules
+class postfwd {
+
+ ensure_packages(['libnet-server-perl', 'libnet-dns-perl', 'postfwd'])
+
+ file {
+ '/etc/default/postfwd':
+ source => 'puppet:///modules/postfwd/postfwd_default',
+ mode => '0644',
+ owner => root,
+ group => root,
+ before => Package['postfwd'];
+
+ '/etc/postfix/postfwd.cf':
+ content => template('postfwd/postfwd.cf.erb'),
+ mode => '0644',
+ owner => root,
+ group => root,
+ require => Package['postfix'],
+ before => Package['postfwd'];
+ }
+
+ service {
+ 'postfwd':
+ ensure => running,
+ name => postfwd,
+ pattern => '/usr/sbin/postfwd',
+ enable => true,
+ hasrestart => true,
+ hasstatus => false,
+ require => [ File['/etc/default/postfwd'],
+ File['/etc/postfix/postfwd.cf']];
+ }
+}
diff --git a/puppet/modules/postfwd/templates/postfwd.cf.erb b/puppet/modules/postfwd/templates/postfwd.cf.erb
new file mode 100644
index 00000000..1c45dd03
--- /dev/null
+++ b/puppet/modules/postfwd/templates/postfwd.cf.erb
@@ -0,0 +1,28 @@
+### This file managed by Puppet
+# Before deploying a rule
+# 1. test with an additional "sender==test@domain.org;" in the rule so it
+# only applies to your test account
+# 2. then when ready to test for all users, use WARN and watch the logs
+# for a few days and make sure it working the way you like
+# 3. Then when ready to deploy for real set a proper error code
+
+## Overrides - make like the following example
+# id=exampleuser; sasl_username==exampleuser; action=dunno
+
+## Rules that apply to all senders
+# Recipient Per Message Limit
+# We only receive mail via smtp from sasl authenticated users
+# directly. We want to limit to a lower amount to prevent phished accounts
+# spamming
+id=RCPTSENDER; recipient_count=150; action=REJECT Too many recipients, please try again. Contact http://<%= @domain %>/tickets/new if this is in error. ERROR:RCPTSENDER
+
+# Message Rate Limit
+# This limits sasl authenticated users to no more than 50/60mins
+# NOTE: sasl_username needs to be set to something or this check will fail
+id=MSGRATE ; sasl_username=!!(^$); action==rate($$sasl_username/100/3600/450 4.7.1 exceeded message rate. Contact Contact http://<%= @domain %>/tickets/new if this is in error. ERROR:MSGRATE)
+
+# Total Recipient Rate Limit
+# This adds up the recipients for all the sasl authenticated users messages
+# and can't exceed more than 250/60min
+# NOTE: sasl_username needs to be set to something or this check will fail
+id=RCPTRATE ; sasl_username=!!(^$); action==rcpt($$sasl_username/500/3600/450 4.7.1 exceeded message rate. Contact http://<%= @domain %>/tickets/new if this is in error. ERROR:RCPTRATE)
diff --git a/puppet/modules/site_apache/files/conf.d/security b/puppet/modules/site_apache/files/conf.d/security
new file mode 100644
index 00000000..a5ae5bdc
--- /dev/null
+++ b/puppet/modules/site_apache/files/conf.d/security
@@ -0,0 +1,55 @@
+#
+# Disable access to the entire file system except for the directories that
+# are explicitly allowed later.
+#
+# This currently breaks the configurations that come with some web application
+# Debian packages. It will be made the default for the release after lenny.
+#
+#<Directory />
+# AllowOverride None
+# Order Deny,Allow
+# Deny from all
+#</Directory>
+
+
+# Changing the following options will not really affect the security of the
+# server, but might make attacks slightly more difficult in some cases.
+
+#
+# ServerTokens
+# This directive configures what you return as the Server HTTP response
+# Header. The default is 'Full' which sends information about the OS-Type
+# and compiled in modules.
+# Set to one of: Full | OS | Minimal | Minor | Major | Prod
+# where Full conveys the most information, and Prod the least.
+#
+#ServerTokens Minimal
+ServerTokens Prod
+
+#
+# Optionally add a line containing the server version and virtual host
+# name to server-generated pages (internal error documents, FTP directory
+# listings, mod_status and mod_info output etc., but not CGI generated
+# documents or custom error documents).
+# Set to "EMail" to also include a mailto: link to the ServerAdmin.
+# Set to one of: On | Off | EMail
+#
+#ServerSignature Off
+ServerSignature Off
+
+#
+# Allow TRACE method
+#
+# Set to "extended" to also reflect the request body (only for testing and
+# diagnostic purposes).
+#
+# Set to one of: On | Off | extended
+#
+#TraceEnable Off
+TraceEnable On
+
+# Setting this header will prevent other sites from embedding pages from this
+# site as frames. This defends against clickjacking attacks.
+# Requires mod_headers to be enabled.
+#
+Header set X-Frame-Options: "DENY"
diff --git a/puppet/modules/site_apache/files/include.d/ssl_common.inc b/puppet/modules/site_apache/files/include.d/ssl_common.inc
new file mode 100644
index 00000000..2d282c84
--- /dev/null
+++ b/puppet/modules/site_apache/files/include.d/ssl_common.inc
@@ -0,0 +1,7 @@
+SSLEngine on
+SSLProtocol all -SSLv2 -SSLv3
+SSLHonorCipherOrder on
+SSLCompression off
+SSLCipherSuite "ECDHE-RSA-AES128-GCM-SHA256:ECDHE-ECDSA-AES128-GCM-SHA256:ECDHE-RSA-AES256-GCM-SHA384:ECDHE-ECDSA-AES256-GCM-SHA384:DHE-RSA-AES128-GCM-SHA256:DHE-DSS-AES128-GCM-SHA256:kEDH+AESGCM:ECDHE-RSA-AES128-SHA256:ECDHE-ECDSA-AES128-SHA256:ECDHE-RSA-AES128-SHA:ECDHE-ECDSA-AES128-SHA:ECDHE-RSA-AES256-SHA384:ECDHE-ECDSA-AES256-SHA384:ECDHE-RSA-AES256-SHA:ECDHE-ECDSA-AES256-SHA:DHE-RSA-AES128-SHA256:DHE-RSA-AES128-SHA:DHE-DSS-AES128-SHA256:DHE-RSA-AES256-SHA256:DHE-DSS-AES256-SHA:DHE-RSA-AES256-SHA:AES128-GCM-SHA256:AES256-GCM-SHA384:AES128-SHA256:AES256-SHA256:AES128-SHA:AES256-SHA:AES:CAMELLIA:DES-CBC3-SHA:!aNULL:!eNULL:!EXPORT:!DES:!3DES:!RC4:!MD5:!PSK!aECDH:!EDH-DSS-DES-CBC3-SHA:!EDH-RSA-DES-CBC3-SHA:!KRB5-DES-CBC3-SHA"
+
+RequestHeader set X_FORWARDED_PROTO 'https' \ No newline at end of file
diff --git a/puppet/modules/site_apache/manifests/common.pp b/puppet/modules/site_apache/manifests/common.pp
new file mode 100644
index 00000000..8a11759a
--- /dev/null
+++ b/puppet/modules/site_apache/manifests/common.pp
@@ -0,0 +1,30 @@
+# install basic apache modules needed for all services (nagios, webapp)
+class site_apache::common {
+
+ include apache::module::rewrite
+ include apache::module::env
+
+ class { '::apache':
+ no_default_site => true,
+ ssl => true,
+ ssl_cipher_suite => 'HIGH:MEDIUM:!aNULL:!MD5'
+ }
+
+ # needed for the mod_ssl config
+ include apache::module::mime
+
+ # load mods depending on apache version
+ if ( $::lsbdistcodename == 'jessie' ) {
+ # apache >= 2.4, debian jessie
+ # needed for mod_ssl config
+ include apache::module::socache_shmcb
+ # generally needed
+ include apache::module::mpm_prefork
+ } else {
+ # apache < 2.4, debian wheezy
+ # for "Order" directive, i.e. main apache2.conf
+ include apache::module::authz_host
+ }
+
+ include site_apache::common::tls
+}
diff --git a/puppet/modules/site_apache/manifests/common/tls.pp b/puppet/modules/site_apache/manifests/common/tls.pp
new file mode 100644
index 00000000..040868bf
--- /dev/null
+++ b/puppet/modules/site_apache/manifests/common/tls.pp
@@ -0,0 +1,6 @@
+class site_apache::common::tls {
+ # class to setup common SSL configurations
+
+ apache::config::include{ 'ssl_common.inc': }
+
+}
diff --git a/puppet/modules/site_apache/templates/vhosts.d/api.conf.erb b/puppet/modules/site_apache/templates/vhosts.d/api.conf.erb
new file mode 100644
index 00000000..bfa5d04d
--- /dev/null
+++ b/puppet/modules/site_apache/templates/vhosts.d/api.conf.erb
@@ -0,0 +1,48 @@
+<VirtualHost *:80>
+ ServerName <%= @api_domain %>
+ RewriteEngine On
+ RewriteRule ^.*$ https://<%= @api_domain -%>:<%= @api_port -%>%{REQUEST_URI} [R=permanent,L]
+ CustomLog ${APACHE_LOG_DIR}/other_vhosts_access.log common
+</VirtualHost>
+
+Listen 0.0.0.0:<%= @api_port %>
+
+<VirtualHost *:<%= @api_port -%>>
+ ServerName <%= @api_domain %>
+ CustomLog ${APACHE_LOG_DIR}/other_vhosts_access.log common
+
+ SSLCACertificatePath /etc/ssl/certs
+ SSLCertificateKeyFile <%= scope.lookupvar('x509::variables::keys') %>/<%= scope.lookupvar('site_config::params::cert_name') %>.key
+ SSLCertificateFile <%= scope.lookupvar('x509::variables::certs') %>/<%= scope.lookupvar('site_config::params::cert_name') %>.crt
+
+ Include include.d/ssl_common.inc
+
+ <IfModule mod_headers.c>
+<% if @webapp['secure'] -%>
+ Header always set Strict-Transport-Security "max-age=31536000; includeSubDomains"
+<% end -%>
+ Header always unset X-Powered-By
+ Header always unset X-Runtime
+ </IfModule>
+
+ DocumentRoot /srv/leap/webapp/public
+ <% if scope.function_guess_apache_version([]) == '2.4' %>
+ <Directory /srv/leap/webapp/public>
+ AllowOverride None
+ Require all granted
+ </Directory>
+ <% end %>
+
+ # Check for maintenance file and redirect all requests
+ RewriteEngine On
+ RewriteCond %{DOCUMENT_ROOT}/system/maintenance.html -f
+ RewriteCond %{SCRIPT_FILENAME} !maintenance.html
+ RewriteCond %{REQUEST_URI} !/images/maintenance.jpg
+ RewriteRule ^.*$ %{DOCUMENT_ROOT}/system/maintenance.html [L]
+
+ # http://www.modrails.com/documentation/Users%20guide%20Apache.html#_passengerallowencodedslashes_lt_on_off_gt
+ AllowEncodedSlashes on
+ PassengerAllowEncodedSlashes on
+ PassengerFriendlyErrorPages off
+ SetEnv TMPDIR /var/tmp
+</VirtualHost>
diff --git a/puppet/modules/site_apache/templates/vhosts.d/common.conf.erb b/puppet/modules/site_apache/templates/vhosts.d/common.conf.erb
new file mode 100644
index 00000000..bf60e794
--- /dev/null
+++ b/puppet/modules/site_apache/templates/vhosts.d/common.conf.erb
@@ -0,0 +1,76 @@
+<VirtualHost *:80>
+ ServerName <%= @webapp_domain %>
+ ServerAlias <%= @domain_name %>
+ ServerAlias <%= @domain %>
+ ServerAlias www.<%= @domain %>
+ RewriteEngine On
+ RewriteRule ^.*$ https://<%= @webapp_domain -%>%{REQUEST_URI} [R=permanent,L]
+ CustomLog ${APACHE_LOG_DIR}/other_vhosts_access.log common
+</VirtualHost>
+
+<VirtualHost *:443>
+ ServerName <%= @webapp_domain %>
+ ServerAlias <%= @domain_name %>
+ ServerAlias <%= @domain %>
+ ServerAlias www.<%= @domain %>
+ CustomLog ${APACHE_LOG_DIR}/other_vhosts_access.log common
+
+ SSLCACertificatePath /etc/ssl/certs
+ SSLCertificateKeyFile <%= scope.lookupvar('x509::variables::keys') %>/<%= scope.lookupvar('site_config::params::commercial_cert_name') %>.key
+ SSLCertificateFile <%= scope.lookupvar('x509::variables::certs') %>/<%= scope.lookupvar('site_config::params::commercial_cert_name') %>.crt
+
+ Include include.d/ssl_common.inc
+
+ <IfModule mod_headers.c>
+<% if (defined? @services) and (@services.include? 'webapp') and (@webapp['secure']) -%>
+ Header always set Strict-Transport-Security "max-age=31536000; includeSubDomains"
+<% end -%>
+ Header always unset X-Powered-By
+ Header always unset X-Runtime
+ </IfModule>
+
+<% if (defined? @services) and (@services.include? 'webapp') -%>
+ DocumentRoot /srv/leap/webapp/public
+ <% if scope.function_guess_apache_version([]) == '2.4' %>
+ <Directory /srv/leap/webapp/public>
+ AllowOverride None
+ Require all granted
+ </Directory>
+ <% end %>
+
+ RewriteEngine On
+ # Check for maintenance file and redirect all requests
+ RewriteCond %{DOCUMENT_ROOT}/system/maintenance.html -f
+ RewriteCond %{SCRIPT_FILENAME} !maintenance.html
+ RewriteCond %{REQUEST_URI} !/images/maintenance.jpg
+ RewriteRule ^.*$ %{DOCUMENT_ROOT}/system/maintenance.html [L]
+
+ # http://www.modrails.com/documentation/Users%20guide%20Apache.html#_passengerallowencodedslashes_lt_on_off_gt
+ AllowEncodedSlashes on
+ PassengerAllowEncodedSlashes on
+ PassengerFriendlyErrorPages off
+ SetEnv TMPDIR /var/tmp
+
+ # Allow rails assets to be cached for a very long time (since the URLs change whenever the content changes)
+ <Location /assets/>
+ Header unset ETag
+ FileETag None
+ ExpiresActive On
+ ExpiresDefault "access plus 1 year"
+ </Location>
+<% end -%>
+
+
+<% if (defined? @services) and (@services.include? 'monitor') -%>
+ <DirectoryMatch (/usr/share/nagios3/htdocs|/usr/lib/cgi-bin/nagios3|/etc/nagios3/stylesheets|/usr/share/pnp4nagios)>
+ <% if (defined? @services) and (@services.include? 'webapp') -%>
+ PassengerEnabled off
+ <% end -%>
+ AllowOverride all
+ # Nagios won't work with setting this option to "DENY",
+ # as set in conf.d/security (#4169). Therefor we allow
+ # it here, only for nagios.
+ Header set X-Frame-Options: "ALLOW"
+ </DirectoryMatch>
+<% end -%>
+</VirtualHost>
diff --git a/puppet/modules/site_apache/templates/vhosts.d/hidden_service.conf.erb b/puppet/modules/site_apache/templates/vhosts.d/hidden_service.conf.erb
new file mode 100644
index 00000000..232b1577
--- /dev/null
+++ b/puppet/modules/site_apache/templates/vhosts.d/hidden_service.conf.erb
@@ -0,0 +1,55 @@
+<VirtualHost 127.0.0.1:80>
+ ServerName <%= @tor_domain %>
+
+ <IfModule mod_headers.c>
+ Header always unset X-Powered-By
+ Header always unset X-Runtime
+ </IfModule>
+
+<% if (defined? @services) and (@services.include? 'webapp') -%>
+ DocumentRoot /srv/leap/webapp/public
+ <% if scope.function_guess_apache_version([]) == '2.4' %>
+ <Directory /srv/leap/webapp/public>
+ AllowOverride None
+ Require all granted
+ </Directory>
+ <% end %>
+
+ RewriteEngine On
+ # Check for maintenance file and redirect all requests
+ RewriteCond %{DOCUMENT_ROOT}/system/maintenance.html -f
+ RewriteCond %{SCRIPT_FILENAME} !maintenance.html
+ RewriteCond %{REQUEST_URI} !/images/maintenance.jpg
+ RewriteRule ^.*$ %{DOCUMENT_ROOT}/system/maintenance.html [L]
+
+ # http://www.modrails.com/documentation/Users%20guide%20Apache.html#_passengerallowencodedslashes_lt_on_off_gt
+ AllowEncodedSlashes on
+ PassengerAllowEncodedSlashes on
+ PassengerFriendlyErrorPages off
+ SetEnv TMPDIR /var/tmp
+
+ # Allow rails assets to be cached for a very long time (since the URLs change whenever the content changes)
+ <Location /assets/>
+ Header unset ETag
+ FileETag None
+ ExpiresActive On
+ ExpiresDefault "access plus 1 year"
+ </Location>
+<% end -%>
+
+<% if (defined? @services) and (@services.include? 'static') -%>
+ DocumentRoot "/srv/static/root/public"
+ <% if scope.function_guess_apache_version([]) == '2.4' %>
+ <Directory /srv/static/root/public>
+ AllowOverride None
+ Require all granted
+ </Directory>
+ <% end %>
+ AccessFileName .htaccess
+
+ Alias /provider.json /srv/leap/provider.json
+ <Location /provider.json>
+ Header set X-Minimum-Client-Version 0.5
+ </Location>
+<% end -%>
+</VirtualHost>
diff --git a/puppet/modules/site_apt/files/Debian/51unattended-upgrades-leap b/puppet/modules/site_apt/files/Debian/51unattended-upgrades-leap
new file mode 100644
index 00000000..bbaac6a2
--- /dev/null
+++ b/puppet/modules/site_apt/files/Debian/51unattended-upgrades-leap
@@ -0,0 +1,6 @@
+// this file is managed by puppet !
+
+Unattended-Upgrade::Allowed-Origins {
+ "leap.se:stable";
+}
+
diff --git a/puppet/modules/site_apt/files/keys/leap-archive.gpg b/puppet/modules/site_apt/files/keys/leap-archive.gpg
new file mode 100644
index 00000000..dd7f3be6
--- /dev/null
+++ b/puppet/modules/site_apt/files/keys/leap-archive.gpg
Binary files differ
diff --git a/puppet/modules/site_apt/files/keys/leap-experimental-archive.gpg b/puppet/modules/site_apt/files/keys/leap-experimental-archive.gpg
new file mode 100644
index 00000000..5cc9064b
--- /dev/null
+++ b/puppet/modules/site_apt/files/keys/leap-experimental-archive.gpg
Binary files differ
diff --git a/puppet/modules/site_apt/manifests/dist_upgrade.pp b/puppet/modules/site_apt/manifests/dist_upgrade.pp
new file mode 100644
index 00000000..0eb98cea
--- /dev/null
+++ b/puppet/modules/site_apt/manifests/dist_upgrade.pp
@@ -0,0 +1,17 @@
+# upgrade all packages
+class site_apt::dist_upgrade {
+
+ # facter returns 'true' as string
+ # lint:ignore:quoted_booleans
+ if $::apt_running == 'true' {
+ # lint:endignore
+ fail ('apt-get is running in background - Please wait until it finishes. Exiting.')
+ } else {
+ exec{'initial_apt_dist_upgrade':
+ command => "/usr/bin/apt-get -q -y -o 'DPkg::Options::=--force-confold' dist-upgrade",
+ refreshonly => false,
+ timeout => 1200,
+ require => Exec['apt_updated']
+ }
+ }
+}
diff --git a/puppet/modules/site_apt/manifests/init.pp b/puppet/modules/site_apt/manifests/init.pp
new file mode 100644
index 00000000..455425c1
--- /dev/null
+++ b/puppet/modules/site_apt/manifests/init.pp
@@ -0,0 +1,55 @@
+# setup apt on all nodes
+class site_apt {
+
+ $sources = hiera('sources')
+ $apt_config = $sources['apt']
+
+ # debian repo urls
+ $apt_url_basic = $apt_config['basic']
+ $apt_url_security = $apt_config['security']
+ $apt_url_backports = $apt_config['backports']
+
+ # leap repo url
+ $platform_sources = $sources['platform']
+ $apt_url_platform_basic = $platform_sources['apt']['basic']
+
+ # needed on jessie hosts for getting pnp4nagios from testing
+ if ( $::operatingsystemmajrelease == '8' ) {
+ $use_next_release = true
+ } else {
+ $use_next_release = false
+ }
+
+ class { 'apt':
+ custom_key_dir => 'puppet:///modules/site_apt/keys',
+ debian_url => $apt_url_basic,
+ security_url => $apt_url_security,
+ backports_url => $apt_url_backports,
+ use_next_release => $use_next_release
+ }
+
+ # enable http://deb.leap.se debian package repository
+ include site_apt::leap_repo
+
+ apt::apt_conf { '90disable-pdiffs':
+ content => 'Acquire::PDiffs "false";';
+ }
+
+ include ::site_apt::unattended_upgrades
+
+ # not currently used
+ #apt::sources_list { 'secondary.list':
+ # content => template('site_apt/secondary.list');
+ #}
+
+ apt::preferences_snippet { 'leap':
+ priority => 999,
+ package => '*',
+ pin => 'origin "deb.leap.se"'
+ }
+
+ # All packages should be installed after 'update_apt' is called,
+ # which does an 'apt-get update'.
+ Exec['update_apt'] -> Package <||>
+
+}
diff --git a/puppet/modules/site_apt/manifests/leap_repo.pp b/puppet/modules/site_apt/manifests/leap_repo.pp
new file mode 100644
index 00000000..5eedce45
--- /dev/null
+++ b/puppet/modules/site_apt/manifests/leap_repo.pp
@@ -0,0 +1,16 @@
+# install leap deb repo together with leap-keyring package
+# containing the apt signing key
+class site_apt::leap_repo {
+ $platform = hiera_hash('platform')
+ $major_version = $platform['major_version']
+
+ apt::sources_list { 'leap.list':
+ content => "deb ${::site_apt::apt_url_platform_basic} ${::lsbdistcodename} main\n",
+ before => Exec[refresh_apt]
+ }
+
+ package { 'leap-archive-keyring':
+ ensure => latest
+ }
+
+}
diff --git a/puppet/modules/site_apt/manifests/preferences/check_mk.pp b/puppet/modules/site_apt/manifests/preferences/check_mk.pp
new file mode 100644
index 00000000..580e0d3f
--- /dev/null
+++ b/puppet/modules/site_apt/manifests/preferences/check_mk.pp
@@ -0,0 +1,9 @@
+class site_apt::preferences::check_mk {
+
+ apt::preferences_snippet { 'check-mk':
+ package => 'check-mk-*',
+ release => "${::lsbdistcodename}-backports",
+ priority => 999;
+ }
+
+}
diff --git a/puppet/modules/site_apt/manifests/preferences/passenger.pp b/puppet/modules/site_apt/manifests/preferences/passenger.pp
new file mode 100644
index 00000000..8cd41f91
--- /dev/null
+++ b/puppet/modules/site_apt/manifests/preferences/passenger.pp
@@ -0,0 +1,14 @@
+#
+# currently, this is only used by static_site to get passenger v4.
+#
+# UPGRADE: this is not needed for jessie.
+#
+class site_apt::preferences::passenger {
+
+ apt::preferences_snippet { 'passenger':
+ package => 'libapache2-mod-passenger',
+ release => "${::lsbdistcodename}-backports",
+ priority => 999;
+ }
+
+}
diff --git a/puppet/modules/site_apt/manifests/preferences/rsyslog.pp b/puppet/modules/site_apt/manifests/preferences/rsyslog.pp
new file mode 100644
index 00000000..bfeaa7da
--- /dev/null
+++ b/puppet/modules/site_apt/manifests/preferences/rsyslog.pp
@@ -0,0 +1,13 @@
+class site_apt::preferences::rsyslog {
+
+ apt::preferences_snippet {
+ 'rsyslog_anon_depends':
+ package => 'libestr0 librelp0 rsyslog*',
+ priority => '999',
+ pin => 'release a=wheezy-backports',
+ before => Class['rsyslog::install'];
+
+ 'fixed_rsyslog_anon_package':
+ ensure => absent;
+ }
+}
diff --git a/puppet/modules/site_apt/manifests/unattended_upgrades.pp b/puppet/modules/site_apt/manifests/unattended_upgrades.pp
new file mode 100644
index 00000000..42f1f4c6
--- /dev/null
+++ b/puppet/modules/site_apt/manifests/unattended_upgrades.pp
@@ -0,0 +1,20 @@
+# configute unattended upgrades so packages from both Debian and LEAP
+# repos get upgraded unattended
+class site_apt::unattended_upgrades {
+ # override unattended-upgrades package resource to make sure
+ # that it is upgraded on every deploy (#6245)
+
+ # configure upgrades for Debian
+ class { 'apt::unattended_upgrades':
+ ensure_version => latest
+ }
+
+ # configure LEAP upgrades
+ apt::apt_conf { '51unattended-upgrades-leap':
+ source => [
+ "puppet:///modules/site_apt/${::lsbdistid}/51unattended-upgrades-leap"],
+ require => Package['unattended-upgrades'],
+ refresh_apt => false,
+ }
+
+}
diff --git a/puppet/modules/site_apt/templates/jessie/postfix.seeds b/puppet/modules/site_apt/templates/jessie/postfix.seeds
new file mode 100644
index 00000000..1a878ccc
--- /dev/null
+++ b/puppet/modules/site_apt/templates/jessie/postfix.seeds
@@ -0,0 +1 @@
+postfix postfix/main_mailer_type select No configuration
diff --git a/puppet/modules/site_apt/templates/preferences.include_squeeze b/puppet/modules/site_apt/templates/preferences.include_squeeze
new file mode 100644
index 00000000..d6d36b60
--- /dev/null
+++ b/puppet/modules/site_apt/templates/preferences.include_squeeze
@@ -0,0 +1,25 @@
+Explanation: Debian wheezy
+Package: *
+Pin: release o=Debian,n=wheezy
+Pin-Priority: 990
+
+Explanation: Debian wheezy-updates
+Package: *
+Pin: release o=Debian,n=wheezy-updates
+Pin-Priority: 990
+
+Explanation: Debian sid
+Package: *
+Pin: release o=Debian,n=sid
+Pin-Priority: 1
+
+Explanation: Debian squeeze
+Package: *
+Pin: release o=Debian,n=squeeze
+Pin-Priority: 980
+
+Explanation: Debian fallback
+Package: *
+Pin: release o=Debian
+Pin-Priority: -10
+
diff --git a/puppet/modules/site_apt/templates/secondary.list b/puppet/modules/site_apt/templates/secondary.list
new file mode 100644
index 00000000..0c024549
--- /dev/null
+++ b/puppet/modules/site_apt/templates/secondary.list
@@ -0,0 +1,3 @@
+# basic
+deb http://ftp.debian.org/debian/ <%= @lsbdistcodename %> main contrib non-free
+
diff --git a/puppet/modules/site_apt/templates/wheezy/postfix.seeds b/puppet/modules/site_apt/templates/wheezy/postfix.seeds
new file mode 100644
index 00000000..1a878ccc
--- /dev/null
+++ b/puppet/modules/site_apt/templates/wheezy/postfix.seeds
@@ -0,0 +1 @@
+postfix postfix/main_mailer_type select No configuration
diff --git a/puppet/modules/site_check_mk/files/agent/local_checks/all_hosts/run_node_tests.sh b/puppet/modules/site_check_mk/files/agent/local_checks/all_hosts/run_node_tests.sh
new file mode 100644
index 00000000..1dd0afc9
--- /dev/null
+++ b/puppet/modules/site_check_mk/files/agent/local_checks/all_hosts/run_node_tests.sh
@@ -0,0 +1,5 @@
+#!/bin/sh
+#
+# runs node tests
+
+/srv/leap/bin/run_tests --checkmk
diff --git a/puppet/modules/site_check_mk/files/agent/local_checks/couchdb/leap_couch_stats.sh b/puppet/modules/site_check_mk/files/agent/local_checks/couchdb/leap_couch_stats.sh
new file mode 100755
index 00000000..c7477b18
--- /dev/null
+++ b/puppet/modules/site_check_mk/files/agent/local_checks/couchdb/leap_couch_stats.sh
@@ -0,0 +1,122 @@
+#!/bin/bash
+#
+# todo:
+# - thresholds
+# - couch response time
+# - make CURL/URL/DBLIST_EXCLUDE vars configurable
+# - move load_nagios_utils() to helper library so we can use it from multiple scripts
+
+start_time=$(date +%s.%N)
+
+CURL='curl -s --netrc-file /etc/couchdb/couchdb.netrc'
+URL='http://127.0.0.1:5984'
+TMPFILE=$(mktemp)
+DBLIST_EXCLUDE='(user-|sessions_|tokens_|_replicator|_users)'
+PREFIX='Couchdb_'
+
+
+load_nagios_utils () {
+ # load the nagios utils
+ # in debian, the package nagios-plugins-common installs utils.sh to /usr/lib/nagios/plugins/utils.sh
+ utilsfn=
+ for d in $PROGPATH /usr/lib/nagios/plugins /usr/lib64/nagios/plugins /usr/local/nagios/libexec /opt/nagios-plugins/libexec . ; do
+ if [ -f "$d/utils.sh" ]; then
+ utilsfn=$d/utils.sh;
+ fi
+ done
+ if [ "$utilsfn" = "" ]; then
+ echo "UNKNOWN - cannot find utils.sh (part of nagios plugins)";
+ exit 3;
+ fi
+ . "$utilsfn";
+ STATE[$STATE_OK]='OK'
+ STATE[$STATE_WARNING]='Warning'
+ STATE[$STATE_CRITICAL]='Critical'
+ STATE[$STATE_UNKNOWN]='Unknown'
+ STATE[$STATE_DEPENDENT]='Dependend'
+}
+
+get_global_stats_perf () {
+ trap "localexit=3" ERR
+ local localexit db_count
+ localexit=0
+
+ # get a list of all dbs
+ $CURL -X GET $URL/_all_dbs | json_pp | egrep -v '(\[|\])' > $TMPFILE
+
+ db_count=$( wc -l < $TMPFILE)
+ excluded_db_count=$( egrep -c "$DBLIST_EXCLUDE" $TMPFILE )
+
+ echo "db_count=$db_count|excluded_db_count=$excluded_db_count"
+ return ${localexit}
+}
+
+db_stats () {
+ trap "localexit=3" ERR
+ local db db_stats doc_count del_doc_count localexit
+ localexit=0
+
+ db="$1"
+ name="$2"
+
+ if [ -z "$name" ]
+ then
+ name="$db"
+ fi
+
+ perf="$perf|${db}_docs=$( $CURL -s -X GET ${URL}/$db | json_pp |grep 'doc_count' | sed 's/[^0-9]//g' )"
+ db_stats=$( $CURL -s -X GET ${URL}/$db | json_pp )
+
+ doc_count=$( echo "$db_stats" | grep 'doc_count' | grep -v 'deleted_doc_count' | sed 's/[^0-9]//g' )
+ del_doc_count=$( echo "$db_stats" | grep 'doc_del_count' | sed 's/[^0-9]//g' )
+
+ # don't divide by zero
+ if [ $del_doc_count -eq 0 ]
+ then
+ del_doc_perc=0
+ else
+ del_doc_perc=$(( del_doc_count * 100 / doc_count ))
+ fi
+
+ bytes=$( echo "$db_stats" | grep disk_size | sed 's/[^0-9]//g' )
+ disk_size=$( echo "scale = 2; $bytes / 1024 / 1024" | bc -l )
+
+ echo -n "${localexit} ${PREFIX}${name}_database ${name}_docs=$doc_count|${name}_deleted_docs=$del_doc_count|${name}_deleted_docs_percentage=${del_doc_perc}%"
+ printf "|${name}_disksize_mb=%02.2fmb ${STATE[localexit]}: database $name\n" "$disk_size"
+
+ return ${localexit}
+}
+
+# main
+
+load_nagios_utils
+
+# per-db stats
+# get a list of all dbs
+$CURL -X GET $URL/_all_dbs | json_pp | egrep -v '(\[|\])' > $TMPFILE
+
+# get list of dbs to check
+dbs=$( egrep -v "${DBLIST_EXCLUDE}" $TMPFILE | tr -d '\n"' | sed 's/,/ /g' )
+
+for db in $dbs
+do
+ db_stats "$db"
+done
+
+# special handling for rotated dbs
+suffix=$(($(date +'%s') / (60*60*24*30)))
+db_stats "sessions_${suffix}" "sessions"
+db_stats "tokens_${suffix}" "tokens"
+
+
+# show global couchdb stats
+global_stats_perf=$(get_global_stats_perf)
+exitcode=$?
+
+end_time=$(date +%s.%N)
+duration=$( echo "scale = 2; $end_time - $start_time" | bc -l )
+
+printf "${exitcode} ${PREFIX}global_stats ${global_stats_perf}|script_duration=%02.2fs ${STATE[exitcode]}: global couchdb status\n" "$duration"
+
+rm "$TMPFILE"
+
diff --git a/puppet/modules/site_check_mk/files/agent/local_checks/mx/check_leap_mx.sh b/puppet/modules/site_check_mk/files/agent/local_checks/mx/check_leap_mx.sh
new file mode 100755
index 00000000..4711e247
--- /dev/null
+++ b/puppet/modules/site_check_mk/files/agent/local_checks/mx/check_leap_mx.sh
@@ -0,0 +1,33 @@
+#!/bin/bash
+
+
+WARN=1
+CRIT=5
+
+# in minutes
+MAXAGE=10
+
+STATUS[0]='OK'
+STATUS[1]='Warning'
+STATUS[2]='Critical'
+CHECKNAME='Leap_MX_Queue'
+
+WATCHDIR='/var/mail/leap-mx/Maildir/new/'
+
+
+total=`find $WATCHDIR -type f -mmin +$MAXAGE | wc -l`
+
+if [ $total -lt $WARN ]
+then
+ exitcode=0
+else
+ if [ $total -le $CRIT ]
+ then
+ exitcode=1
+ else
+ exitcode=2
+ fi
+fi
+
+echo "${exitcode} ${CHECKNAME} stale_files=${total} ${STATUS[exitcode]}: ${total} stale files (>=${MAXAGE} min) in ${WATCHDIR}."
+
diff --git a/puppet/modules/site_check_mk/files/agent/logwatch/bigcouch.cfg b/puppet/modules/site_check_mk/files/agent/logwatch/bigcouch.cfg
new file mode 100644
index 00000000..0f378a5a
--- /dev/null
+++ b/puppet/modules/site_check_mk/files/agent/logwatch/bigcouch.cfg
@@ -0,0 +1,28 @@
+/opt/bigcouch/var/log/bigcouch.log nocontext=1
+# ignore requests that are fine
+ I undefined - -.*200$
+ I undefined - -.*201$
+ I 127.0.0.1 undefined.* ok
+ I 127.0.0.1 localhost:5984 .* ok
+ # https://leap.se/code/issues/5246
+ I Shutting down group server
+ # ignore bigcouch conflict errors
+ I Error in process.*{{nocatch,conflict}
+ # ignore "Uncaught error in HTTP request: {exit, normal}" error
+ # it's suppressed in later versions of bigcouch anhow
+ # see https://leap.se/code/issues/5226
+ I Uncaught error in HTTP request: {exit,normal}
+ I Uncaught error in HTTP request: {exit,
+ # Ignore rexi_EXIT bigcouch error (Bug #6512)
+ I Error in process <[0-9.]+> on node .* with exit value: {{rexi_EXIT,{(killed|noproc|shutdown),\[{couch_db,collect_results
+ # Ignore "Generic server terminating" bigcouch message (Feature #6544)
+ I Generic server <.*> terminating
+ I {error_report,<.*>,
+ I {error_info,
+ C Uncaught error in HTTP request: {error,
+ C Response abnormally terminated: {nodedown,
+ C rexi_DOWN,noproc
+ C rexi_DOWN,noconnection
+ C error
+ C Connection attempt from disallowed node
+ W Apache CouchDB has started
diff --git a/puppet/modules/site_check_mk/files/agent/logwatch/leap_mx.cfg b/puppet/modules/site_check_mk/files/agent/logwatch/leap_mx.cfg
new file mode 100644
index 00000000..166d0230
--- /dev/null
+++ b/puppet/modules/site_check_mk/files/agent/logwatch/leap_mx.cfg
@@ -0,0 +1,4 @@
+/var/log/leap/mx.log
+ W Don't know how to deliver mail
+ W No public key, stopping the processing chain
+
diff --git a/puppet/modules/site_check_mk/files/agent/logwatch/logwatch.cfg b/puppet/modules/site_check_mk/files/agent/logwatch/logwatch.cfg
new file mode 100644
index 00000000..4f16d1bd
--- /dev/null
+++ b/puppet/modules/site_check_mk/files/agent/logwatch/logwatch.cfg
@@ -0,0 +1,31 @@
+# This file is managed by Puppet. DO NOT EDIT.
+
+# logwatch.cfg
+# This file configures mk_logwatch. Define your logfiles
+# and patterns to be looked for here.
+
+# Name one or more logfiles
+/var/log/messages
+# Patterns are indented with one space are prefixed with:
+# C: Critical messages
+# W: Warning messages
+# I: ignore these lines (OK)
+# The first match decided. Lines that do not match any pattern
+# are ignored
+ C Fail event detected on md device
+ I mdadm.*: Rebuild.*event detected
+ W mdadm\[
+ W ata.*hard resetting link
+ W ata.*soft reset failed (.*FIS failed)
+ W device-mapper: thin:.*reached low water mark
+ C device-mapper: thin:.*no free space
+
+/var/log/auth.log
+ W sshd.*Corrupted MAC on input
+
+/var/log/kern.log
+ C panic
+ C Oops
+ W generic protection rip
+ W .*Unrecovered read error - auto reallocate failed
+
diff --git a/puppet/modules/site_check_mk/files/agent/logwatch/openvpn.cfg b/puppet/modules/site_check_mk/files/agent/logwatch/openvpn.cfg
new file mode 100644
index 00000000..d99dcde9
--- /dev/null
+++ b/puppet/modules/site_check_mk/files/agent/logwatch/openvpn.cfg
@@ -0,0 +1,19 @@
+/var/log/leap/openvpn.log
+# ignore openvpn TLS initialization errors when clients
+# suddenly hangup before properly establishing
+# a tls connection
+ I ovpn-.*TLS Error: Unroutable control packet received from
+ I ovpn-.*TLS Error: TLS key negotiation failed to occur within 60 seconds \(check your network connectivity\)
+ I ovpn-.*TLS Error: TLS handshake failed
+ I ovpn-.*TLS Error: TLS object -> incoming plaintext read error
+ I ovpn-.*Fatal TLS error \(check_tls_errors_co\), restarting
+ I ovpn-.*TLS_ERROR: BIO read tls_read_plaintext error: error:140890B2:SSL routines:SSL3_GET_CLIENT_CERTIFICATE:no certificate
+ I ovpn-.*TLS_ERROR: BIO read tls_read_plaintext error: error:140890C7:SSL routines:SSL3_GET_CLIENT_CERTIFICATE:peer did not return a certificate
+ I ovpn-.*TLS Error: unknown opcode received from
+ I ovpn-.*Authenticate/Decrypt packet error: packet HMAC authentication failed
+ I ovpn-.*TLS Error: reading acknowledgement record from packet
+ I ovpn-.*TLS Error: session-id not found in packet from
+
+ I ovpn-.*SIGUSR1\[soft,tls-error\] received, client-instance restarting
+ I ovpn-.*VERIFY ERROR: depth=0, error=certificate has expired
+
diff --git a/puppet/modules/site_check_mk/files/agent/logwatch/soledad.cfg b/puppet/modules/site_check_mk/files/agent/logwatch/soledad.cfg
new file mode 100644
index 00000000..3af5045b
--- /dev/null
+++ b/puppet/modules/site_check_mk/files/agent/logwatch/soledad.cfg
@@ -0,0 +1,6 @@
+/var/log/soledad.log
+ C WSGI application error
+ C Error
+ C error
+# Removed this line because we determined it was better to ignore it (#6566)
+# W Timing out client:
diff --git a/puppet/modules/site_check_mk/files/agent/logwatch/stunnel.cfg b/puppet/modules/site_check_mk/files/agent/logwatch/stunnel.cfg
new file mode 100644
index 00000000..b1e6cf2f
--- /dev/null
+++ b/puppet/modules/site_check_mk/files/agent/logwatch/stunnel.cfg
@@ -0,0 +1,10 @@
+/var/log/leap/stunnel.log
+# check for stunnel failures
+#
+# these are temporary failures and happen very often, so we
+# ignore them until we tuned stunnel timeouts/logging,
+# see https://leap.se/code/issues/5218
+ I stunnel:.*Connection reset by peer
+ I stunnel:.*Peer suddenly disconnected
+ I stunnel:.*Connection refused
+
diff --git a/puppet/modules/site_check_mk/files/agent/logwatch/syslog/bigcouch.cfg b/puppet/modules/site_check_mk/files/agent/logwatch/syslog/bigcouch.cfg
new file mode 100644
index 00000000..f53f0780
--- /dev/null
+++ b/puppet/modules/site_check_mk/files/agent/logwatch/syslog/bigcouch.cfg
@@ -0,0 +1,5 @@
+# on one-node bigcouch setups, we'll get this msg
+# a lot, so we ignore it here until we fix
+# https://leap.se/code/issues/5244
+ I epmd: got partial packet only on file descriptor
+
diff --git a/puppet/modules/site_check_mk/files/agent/logwatch/syslog/couchdb.cfg b/puppet/modules/site_check_mk/files/agent/logwatch/syslog/couchdb.cfg
new file mode 100644
index 00000000..5f8d5b95
--- /dev/null
+++ b/puppet/modules/site_check_mk/files/agent/logwatch/syslog/couchdb.cfg
@@ -0,0 +1,2 @@
+ C /usr/local/bin/couch-doc-update.*failed
+ C /usr/local/bin/couch-doc-update.*ERROR
diff --git a/puppet/modules/site_check_mk/files/agent/logwatch/syslog_header.cfg b/puppet/modules/site_check_mk/files/agent/logwatch/syslog_header.cfg
new file mode 100644
index 00000000..f60d752b
--- /dev/null
+++ b/puppet/modules/site_check_mk/files/agent/logwatch/syslog_header.cfg
@@ -0,0 +1 @@
+/var/log/syslog
diff --git a/puppet/modules/site_check_mk/files/agent/logwatch/syslog_tail.cfg b/puppet/modules/site_check_mk/files/agent/logwatch/syslog_tail.cfg
new file mode 100644
index 00000000..7daf0cac
--- /dev/null
+++ b/puppet/modules/site_check_mk/files/agent/logwatch/syslog_tail.cfg
@@ -0,0 +1,21 @@
+# some general patterns
+ I Error: Driver 'pcspkr' is already registered, aborting...
+# ignore postfix errors on lost connection (Bug #6476)
+ I postfix/smtpd.*SSL_accept error from.*lost connection
+# ignore postfix too many errors after DATA (#6545)
+ I postfix/smtpd.*too many errors after DATA from
+ C panic
+ C Oops
+ C Error
+# ignore ipv6 icmp errors for now (Bug #6540)
+ I kernel: .*icmpv6_send: no reply to icmp error
+ C error
+ W generic protection rip
+ W .*Unrecovered read error - auto reallocate failed
+# 401 Unauthorized error logged by webapp and possible other
+# applications
+ C Unauthorized
+# catch abnormal termination of processes (due to segfault/fpe
+# signals etc).
+# see https://github.com/pixelated/pixelated-user-agent/issues/683
+ C systemd.*: main process exited, code=killed, status=
diff --git a/puppet/modules/site_check_mk/files/agent/logwatch/webapp.cfg b/puppet/modules/site_check_mk/files/agent/logwatch/webapp.cfg
new file mode 100644
index 00000000..337d9ec6
--- /dev/null
+++ b/puppet/modules/site_check_mk/files/agent/logwatch/webapp.cfg
@@ -0,0 +1,8 @@
+/var/log/leap/webapp.log
+# check for webapp errors
+ C Completed 500
+# couch connection issues
+ C webapp.*Could not connect to couch database messages due to 401 Unauthorized: {"error":"unauthorized","reason":"You are not a server admin."}
+# ignore RoutingErrors that rails throw when it can't handle a url
+# see https://leap.se/code/issues/5173
+ I webapp.*ActionController::RoutingError
diff --git a/puppet/modules/site_check_mk/files/agent/nagios_plugins/check_unix_open_fds.pl b/puppet/modules/site_check_mk/files/agent/nagios_plugins/check_unix_open_fds.pl
new file mode 100755
index 00000000..06163d49
--- /dev/null
+++ b/puppet/modules/site_check_mk/files/agent/nagios_plugins/check_unix_open_fds.pl
@@ -0,0 +1,322 @@
+#!/usr/bin/perl -w
+
+# check_unix_open_fds Nagios Plugin
+#
+# TComm - Carlos Peris Pla
+#
+# This nagios plugin is free software, and comes with ABSOLUTELY
+# NO WARRANTY. It may be used, redistributed and/or modified under
+# the terms of the GNU General Public Licence (see
+# http://www.fsf.org/licensing/licenses/gpl.txt).
+
+
+# MODULE DECLARATION
+
+use strict;
+use Nagios::Plugin;
+
+
+# FUNCTION DECLARATION
+
+sub CreateNagiosManager ();
+sub CheckArguments ();
+sub PerformCheck ();
+
+
+# CONSTANT DEFINITION
+
+use constant NAME => 'check_unix_open_fds';
+use constant VERSION => '0.1b';
+use constant USAGE => "Usage:\ncheck_unix_open_fds -w <process_threshold,application_threshold> -c <process_threshold,application_threshold>\n".
+ "\t\t[-V <version>]\n";
+use constant BLURB => "This plugin checks, in UNIX systems with the command lsof installed and with its SUID bit activated, the number\n".
+ "of file descriptors opened by an application and its processes.\n";
+use constant LICENSE => "This nagios plugin is free software, and comes with ABSOLUTELY\n".
+ "no WARRANTY. It may be used, redistributed and/or modified under\n".
+ "the terms of the GNU General Public Licence\n".
+ "(see http://www.fsf.org/licensing/licenses/gpl.txt).\n";
+use constant EXAMPLE => "\n\n".
+ "Example:\n".
+ "\n".
+ "check_unix_open_fds -a /usr/local/nagios/bin/ndo2db -w 20,75 -c 25,85\n".
+ "\n".
+ "It returns CRITICAL if number of file descriptors opened by ndo2db is higher than 85,\n".
+ "if not it returns WARNING if number of file descriptors opened by ndo2db is higher \n".
+ "than 75, if not it returns CRITICAL if number of file descriptors opened by any process\n".
+ "of ndo2db is higher than 25, if not it returns WARNING if number of file descriptors \n".
+ "opened by any process of ndo2db is higher than 20.\n".
+ "In other cases it returns OK if check has been performed succesfully.\n\n";
+
+
+# VARIABLE DEFINITION
+
+my $Nagios;
+my $Error;
+my $PluginResult;
+my $PluginOutput;
+my @WVRange;
+my @CVRange;
+
+
+# MAIN FUNCTION
+
+# Get command line arguments
+$Nagios = &CreateNagiosManager(USAGE, VERSION, BLURB, LICENSE, NAME, EXAMPLE);
+eval {$Nagios->getopts};
+
+if (!$@) {
+ # Command line parsed
+ if (&CheckArguments($Nagios, \$Error, \@WVRange, \@CVRange)) {
+ # Argument checking passed
+ $PluginResult = &PerformCheck($Nagios, \$PluginOutput, \@WVRange, \@CVRange)
+ }
+ else {
+ # Error checking arguments
+ $PluginOutput = $Error;
+ $PluginResult = UNKNOWN;
+ }
+ $Nagios->nagios_exit($PluginResult,$PluginOutput);
+}
+else {
+ # Error parsing command line
+ $Nagios->nagios_exit(UNKNOWN,$@);
+}
+
+
+
+# FUNCTION DEFINITIONS
+
+# Creates and configures a Nagios plugin object
+# Input: strings (usage, version, blurb, license, name and example) to configure argument parsing functionality
+# Return value: reference to a Nagios plugin object
+
+sub CreateNagiosManager() {
+ # Create GetOpt object
+ my $Nagios = Nagios::Plugin->new(usage => $_[0], version => $_[1], blurb => $_[2], license => $_[3], plugin => $_[4], extra => $_[5]);
+
+ # Add argument units
+ $Nagios->add_arg(spec => 'application|a=s',
+ help => 'Application path for which you want to check the number of open file descriptors',
+ required => 1);
+
+ # Add argument warning
+ $Nagios->add_arg(spec => 'warning|w=s',
+ help => "Warning thresholds. Format: <process_threshold,application_threshold>",
+ required => 1);
+ # Add argument critical
+ $Nagios->add_arg(spec => 'critical|c=s',
+ help => "Critical thresholds. Format: <process_threshold,application_threshold>",
+ required => 1);
+
+ # Return value
+ return $Nagios;
+}
+
+
+# Checks argument values and sets some default values
+# Input: Nagios Plugin object
+# Output: reference to Error description string, Memory Unit, Swap Unit, reference to WVRange ($_[4]), reference to CVRange ($_[5])
+# Return value: True if arguments ok, false if not
+
+sub CheckArguments() {
+ my ($Nagios, $Error, $WVRange, $CVRange) = @_;
+ my $commas;
+ my $units;
+ my $i;
+ my $firstpos;
+ my $secondpos;
+
+ # Check Warning thresholds list
+ $commas = $Nagios->opts->warning =~ tr/,//;
+ if ($commas !=1){
+ ${$Error} = "Invalid Warning list format. One comma is expected.";
+ return 0;
+ }
+ else{
+ $i=0;
+ $firstpos=0;
+ my $warning=$Nagios->opts->warning;
+ while ($warning =~ /[,]/g) {
+ $secondpos=pos $warning;
+ if ($secondpos - $firstpos==1){
+ @{$WVRange}[$i] = "~:";
+ }
+ else{
+ @{$WVRange}[$i] = substr $Nagios->opts->warning, $firstpos, ($secondpos-$firstpos-1);
+ }
+ $firstpos=$secondpos;
+ $i++
+ }
+ if (length($Nagios->opts->warning) - $firstpos==0){#La coma es el ultimo elemento del string
+ @{$WVRange}[$i] = "~:";
+ }
+ else{
+ @{$WVRange}[$i] = substr $Nagios->opts->warning, $firstpos, (length($Nagios->opts->warning)-$firstpos);
+ }
+
+ if (@{$WVRange}[0] !~/^(@?(\d+|(\d+|~):(\d+)?))?$/){
+ ${$Error} = "Invalid Process Warning threshold in ${$WVRange[0]}";
+ return 0;
+ }if (@{$WVRange}[1] !~/^(@?(\d+|(\d+|~):(\d+)?))?$/){
+ ${$Error} = "Invalid Application Warning threshold in ${$WVRange[1]}";
+ return 0;
+ }
+ }
+
+ # Check Critical thresholds list
+ $commas = $Nagios->opts->critical =~ tr/,//;
+ if ($commas !=1){
+ ${$Error} = "Invalid Critical list format. One comma is expected.";
+ return 0;
+ }
+ else{
+ $i=0;
+ $firstpos=0;
+ my $critical=$Nagios->opts->critical;
+ while ($critical =~ /[,]/g) {
+ $secondpos=pos $critical ;
+ if ($secondpos - $firstpos==1){
+ @{$CVRange}[$i] = "~:";
+ }
+ else{
+ @{$CVRange}[$i] =substr $Nagios->opts->critical, $firstpos, ($secondpos-$firstpos-1);
+ }
+ $firstpos=$secondpos;
+ $i++
+ }
+ if (length($Nagios->opts->critical) - $firstpos==0){#La coma es el ultimo elemento del string
+ @{$CVRange}[$i] = "~:";
+ }
+ else{
+ @{$CVRange}[$i] = substr $Nagios->opts->critical, $firstpos, (length($Nagios->opts->critical)-$firstpos);
+ }
+
+ if (@{$CVRange}[0] !~/^(@?(\d+|(\d+|~):(\d+)?))?$/) {
+ ${$Error} = "Invalid Process Critical threshold in @{$CVRange}[0]";
+ return 0;
+ }
+ if (@{$CVRange}[1] !~/^(@?(\d+|(\d+|~):(\d+)?))?$/) {
+ ${$Error} = "Invalid Application Critical threshold in @{$CVRange}[1]";
+ return 0;
+ }
+ }
+
+ return 1;
+}
+
+
+# Performs whole check:
+# Input: Nagios Plugin object, reference to Plugin output string, Application, referece to WVRange, reference to CVRange
+# Output: Plugin output string
+# Return value: Plugin return value
+
+sub PerformCheck() {
+ my ($Nagios, $PluginOutput, $WVRange, $CVRange) = @_;
+ my $Application;
+ my @AppNameSplitted;
+ my $ApplicationName;
+ my $PsCommand;
+ my $PsResult;
+ my @PsResultLines;
+ my $ProcLine;
+ my $ProcPid;
+ my $LsofCommand;
+ my $LsofResult;
+ my $ProcCount = 0;
+ my $FDCount = 0;
+ my $ProcFDAvg = 0;
+ my $PerProcMaxFD = 0;
+ my $ProcOKFlag = 0;
+ my $ProcWarningFlag = 0;
+ my $ProcCriticalFlag = 0;
+ my $OKFlag = 0;
+ my $WarningFlag = 0;
+ my $CriticalFlag = 0;
+ my $LastWarningProcFDs = 0;
+ my $LastWarningProc = -1;
+ my $LastCriticalProcFDs = 0;
+ my $LastCriticalProc = -1;
+ my $ProcPluginReturnValue = UNKNOWN;
+ my $AppPluginReturnValue = UNKNOWN;
+ my $PluginReturnValue = UNKNOWN;
+ my $PerformanceData = "";
+ my $PerfdataUnit = "FDs";
+
+ $Application = $Nagios->opts->application;
+ $PsCommand = "ps -eaf | grep $Application";
+ $PsResult = `$PsCommand`;
+ @AppNameSplitted = split(/\//, $Application);
+ $ApplicationName = $AppNameSplitted[$#AppNameSplitted];
+ @PsResultLines = split(/\n/, $PsResult);
+ if ( $#PsResultLines > 1 ) {
+ foreach my $Proc (split(/\n/, $PsResult)) {
+ if ($Proc !~ /check_unix_open_fds/ && $Proc !~ / grep /) {
+ $ProcCount += 1;
+ $ProcPid = (split(/\s+/, $Proc))[1];
+ $LsofCommand = "lsof -p $ProcPid | wc -l";
+ $LsofResult = `$LsofCommand`;
+ $LsofResult = ($LsofResult > 0 ) ? ($LsofResult - 1) : 0;
+ $FDCount += $LsofResult;
+ if ($LsofResult >= $PerProcMaxFD) { $PerProcMaxFD = $LsofResult; }
+ $ProcPluginReturnValue = $Nagios->check_threshold(check => $LsofResult,warning => @{$WVRange}[0],critical => @{$CVRange}[0]);
+ if ($ProcPluginReturnValue eq OK) {
+ $ProcOKFlag = 1;
+ }
+ elsif ($ProcPluginReturnValue eq WARNING) {
+ $ProcWarningFlag = 1;
+ if ($LsofResult >= $LastWarningProcFDs) {
+ $LastWarningProcFDs = $LsofResult;
+ $LastWarningProc = $ProcPid;
+ }
+ }
+ #if ($LsofResult >= $PCT) {
+ elsif ($ProcPluginReturnValue eq CRITICAL) {
+ $ProcCriticalFlag = 1;
+ if ($LsofResult >= $LastCriticalProcFDs) {
+ $LastCriticalProcFDs = $LsofResult;
+ $LastCriticalProc = $ProcPid;
+ }
+ }
+ }
+ }
+ if ($ProcCount) { $ProcFDAvg = int($FDCount / $ProcCount); }
+ $AppPluginReturnValue = $Nagios->check_threshold(check => $FDCount,warning => @{$WVRange}[1],critical => @{$CVRange}[1]);
+ #if ($FDCount >= $TWT) {
+ if ($AppPluginReturnValue eq OK) { $OKFlag = 1; }
+ elsif ($AppPluginReturnValue eq WARNING) { $WarningFlag = 1; }
+ elsif ($AppPluginReturnValue eq CRITICAL) { $CriticalFlag = 1; }
+
+ # PluginReturnValue and PluginOutput
+ if ($CriticalFlag) {
+ $PluginReturnValue = CRITICAL;
+ ${$PluginOutput} .= "$ApplicationName handling $FDCount files (critical threshold set to @{$CVRange}[1])";
+ }
+ elsif ($WarningFlag) {
+ $PluginReturnValue = WARNING;
+ ${$PluginOutput} .= "$ApplicationName handling $FDCount files (warning threshold set to @{$WVRange}[1])";
+ }
+ elsif ($ProcCriticalFlag) {
+ $PluginReturnValue = CRITICAL;
+ ${$PluginOutput} .= "Process ID $LastCriticalProc handling $LastCriticalProcFDs files (critical threshold set to @{$CVRange}[0])";
+ }
+ elsif ($ProcWarningFlag) {
+ $PluginReturnValue = WARNING;
+ ${$PluginOutput} .= "Process ID $LastWarningProc handling $LastWarningProcFDs files (warning threshold set to @{$WVRange}[0])";
+ }
+ elsif ($OKFlag && $ProcOKFlag) {
+ $PluginReturnValue = OK;
+ ${$PluginOutput} .= "$ApplicationName handling $FDCount files";
+ }
+ }
+ else {
+ ${$PluginOutput} .= "No existe la aplicacion $ApplicationName";
+ }
+
+
+ $PerformanceData .= "ProcCount=$ProcCount$PerfdataUnit FDCount=$FDCount$PerfdataUnit ProcFDAvg=$ProcFDAvg$PerfdataUnit PerProcMaxFD=$PerProcMaxFD$PerfdataUnit";
+
+ # Output with performance data:
+ ${$PluginOutput} .= " | $PerformanceData";
+
+ return $PluginReturnValue;
+}
diff --git a/puppet/modules/site_check_mk/files/agent/plugins/mk_logwatch.1.2.4 b/puppet/modules/site_check_mk/files/agent/plugins/mk_logwatch.1.2.4
new file mode 100755
index 00000000..3dbca322
--- /dev/null
+++ b/puppet/modules/site_check_mk/files/agent/plugins/mk_logwatch.1.2.4
@@ -0,0 +1,374 @@
+#!/usr/bin/python
+# -*- encoding: utf-8; py-indent-offset: 4 -*-
+# +------------------------------------------------------------------+
+# | ____ _ _ __ __ _ __ |
+# | / ___| |__ ___ ___| | __ | \/ | |/ / |
+# | | | | '_ \ / _ \/ __| |/ / | |\/| | ' / |
+# | | |___| | | | __/ (__| < | | | | . \ |
+# | \____|_| |_|\___|\___|_|\_\___|_| |_|_|\_\ |
+# | |
+# | Copyright Mathias Kettner 2010 mk@mathias-kettner.de |
+# +------------------------------------------------------------------+
+#
+# This file is part of Check_MK.
+# The official homepage is at http://mathias-kettner.de/check_mk.
+#
+# check_mk is free software; you can redistribute it and/or modify it
+# under the terms of the GNU General Public License as published by
+# the Free Software Foundation in version 2. check_mk is distributed
+# in the hope that it will be useful, but WITHOUT ANY WARRANTY; with-
+# out even the implied warranty of MERCHANTABILITY or FITNESS FOR A
+# PARTICULAR PURPOSE. See the GNU General Public License for more de-
+# ails. You should have received a copy of the GNU General Public
+# License along with GNU Make; see the file COPYING. If not, write
+# to the Free Software Foundation, Inc., 51 Franklin St, Fifth Floor,
+# Boston, MA 02110-1301 USA.
+
+# Call with -d for debug mode: colored output, no saving of status
+
+import sys, os, re, time
+import glob
+
+if '-d' in sys.argv[1:] or '--debug' in sys.argv[1:]:
+ tty_red = '\033[1;31m'
+ tty_green = '\033[1;32m'
+ tty_yellow = '\033[1;33m'
+ tty_blue = '\033[1;34m'
+ tty_normal = '\033[0m'
+ debug = True
+else:
+ tty_red = ''
+ tty_green = ''
+ tty_yellow = ''
+ tty_blue = ''
+ tty_normal = ''
+ debug = False
+
+# The configuration file and status file are searched
+# in the directory named by the environment variable
+# LOGWATCH_DIR. If that is not set, MK_CONFDIR is used.
+# If that is not set either, the current directory ist
+# used.
+logwatch_dir = os.getenv("LOGWATCH_DIR")
+if not logwatch_dir:
+ logwatch_dir = os.getenv("MK_CONFDIR")
+ if not logwatch_dir:
+ logwatch_dir = "."
+
+print "<<<logwatch>>>"
+
+config_filename = logwatch_dir + "/logwatch.cfg"
+status_filename = logwatch_dir + "/logwatch.state"
+config_dir = logwatch_dir + "/logwatch.d/*.cfg"
+
+def is_not_comment(line):
+ if line.lstrip().startswith('#') or \
+ line.strip() == '':
+ return False
+ return True
+
+def parse_filenames(line):
+ return line.split()
+
+def parse_pattern(level, pattern):
+ if level not in [ 'C', 'W', 'I', 'O' ]:
+ raise(Exception("Invalid pattern line '%s'" % line))
+ try:
+ compiled = re.compile(pattern)
+ except:
+ raise(Exception("Invalid regular expression in line '%s'" % line))
+ return (level, compiled)
+
+def read_config():
+ config_lines = [ line.rstrip() for line in filter(is_not_comment, file(config_filename).readlines()) ]
+ # Add config from a logwatch.d folder
+ for config_file in glob.glob(config_dir):
+ config_lines += [ line.rstrip() for line in filter(is_not_comment, file(config_file).readlines()) ]
+
+ have_filenames = False
+ config = []
+
+ for line in config_lines:
+ rewrite = False
+ if line[0].isspace(): # pattern line
+ if not have_filenames:
+ raise Exception("Missing logfile names")
+ level, pattern = line.split(None, 1)
+ if level == 'A':
+ cont_list.append(parse_cont_pattern(pattern))
+ elif level == 'R':
+ rewrite_list.append(pattern)
+ else:
+ level, compiled = parse_pattern(level, pattern)
+ cont_list = [] # List of continuation patterns
+ rewrite_list = [] # List of rewrite patterns
+ patterns.append((level, compiled, cont_list, rewrite_list))
+ else: # filename line
+ patterns = []
+ config.append((parse_filenames(line), patterns))
+ have_filenames = True
+ return config
+
+def parse_cont_pattern(pattern):
+ try:
+ return int(pattern)
+ except:
+ try:
+ return re.compile(pattern)
+ except:
+ if debug:
+ raise
+ raise Exception("Invalid regular expression in line '%s'" % pattern)
+
+# structure of statusfile
+# # LOGFILE OFFSET INODE
+# /var/log/messages|7767698|32455445
+# /var/test/x12134.log|12345|32444355
+def read_status():
+ if debug:
+ return {}
+
+ status = {}
+ for line in file(status_filename):
+ # TODO: Remove variants with spaces. rsplit is
+ # not portable. split fails if logfilename contains
+ # spaces
+ inode = -1
+ try:
+ parts = line.split('|')
+ filename = parts[0]
+ offset = parts[1]
+ if len(parts) >= 3:
+ inode = parts[2]
+
+ except:
+ try:
+ filename, offset = line.rsplit(None, 1)
+ except:
+ filename, offset = line.split(None, 1)
+ status[filename] = int(offset), int(inode)
+ return status
+
+def save_status(status):
+ f = file(status_filename, "w")
+ for filename, (offset, inode) in status.items():
+ f.write("%s|%d|%d\n" % (filename, offset, inode))
+
+pushed_back_line = None
+def next_line(f):
+ global pushed_back_line
+ if pushed_back_line != None:
+ line = pushed_back_line
+ pushed_back_line = None
+ return line
+ else:
+ try:
+ line = f.next()
+ return line
+ except:
+ return None
+
+
+def process_logfile(logfile, patterns):
+ global pushed_back_line
+
+ # Look at which file offset we have finished scanning
+ # the logfile last time. If we have never seen this file
+ # before, we set the offset to -1
+ offset, prev_inode = status.get(logfile, (-1, -1))
+ try:
+ fl = os.open(logfile, os.O_RDONLY)
+ inode = os.fstat(fl)[1] # 1 = st_ino
+ except:
+ if debug:
+ raise
+ print "[[[%s:cannotopen]]]" % logfile
+ return
+
+ print "[[[%s]]]" % logfile
+
+ # Seek to the current end in order to determine file size
+ current_end = os.lseek(fl, 0, 2) # os.SEEK_END not available in Python 2.4
+ status[logfile] = current_end, inode
+
+ # If we have never seen this file before, we just set the
+ # current pointer to the file end. We do not want to make
+ # a fuss about ancient log messages...
+ if offset == -1:
+ if not debug:
+ return
+ else:
+ offset = 0
+
+
+ # If the inode of the logfile has changed it has appearently
+ # been started from new (logfile rotation). At least we must
+ # assume that. In some rare cases (restore of a backup, etc)
+ # we are wrong and resend old log messages
+ if prev_inode >= 0 and inode != prev_inode:
+ offset = 0
+
+ # Our previously stored offset is the current end ->
+ # no new lines in this file
+ if offset == current_end:
+ return # nothing new
+
+ # If our offset is beyond the current end, the logfile has been
+ # truncated or wrapped while keeping the same inode. We assume
+ # that it contains all new data in that case and restart from
+ # offset 0.
+ if offset > current_end:
+ offset = 0
+
+ # now seek to offset where interesting data begins
+ os.lseek(fl, offset, 0) # os.SEEK_SET not available in Python 2.4
+ f = os.fdopen(fl)
+ worst = -1
+ outputtxt = ""
+ lines_parsed = 0
+ start_time = time.time()
+
+ while True:
+ line = next_line(f)
+ if line == None:
+ break # End of file
+
+ lines_parsed += 1
+ # Check if maximum number of new log messages is exceeded
+ if opt_maxlines != None and lines_parsed > opt_maxlines:
+ outputtxt += "%s Maximum number (%d) of new log messages exceeded.\n" % (
+ opt_overflow, opt_maxlines)
+ worst = max(worst, opt_overflow_level)
+ os.lseek(fl, 0, 2) # Seek to end of file, skip all other messages
+ break
+
+ # Check if maximum processing time (per file) is exceeded. Check only
+ # every 100'th line in order to save system calls
+ if opt_maxtime != None and lines_parsed % 100 == 10 \
+ and time.time() - start_time > opt_maxtime:
+ outputtxt += "%s Maximum parsing time (%.1f sec) of this log file exceeded.\n" % (
+ opt_overflow, opt_maxtime)
+ worst = max(worst, opt_overflow_level)
+ os.lseek(fl, 0, 2) # Seek to end of file, skip all other messages
+ break
+
+ level = "."
+ for lev, pattern, cont_patterns, replacements in patterns:
+ matches = pattern.search(line[:-1])
+ if matches:
+ level = lev
+ levelint = {'C': 2, 'W': 1, 'O': 0, 'I': -1, '.': -1}[lev]
+ worst = max(levelint, worst)
+
+ # Check for continuation lines
+ for cont_pattern in cont_patterns:
+ if type(cont_pattern) == int: # add that many lines
+ for x in range(cont_pattern):
+ cont_line = next_line(f)
+ if cont_line == None: # end of file
+ break
+ line = line[:-1] + "\1" + cont_line
+
+ else: # pattern is regex
+ while True:
+ cont_line = next_line(f)
+ if cont_line == None: # end of file
+ break
+ elif cont_pattern.search(cont_line[:-1]):
+ line = line[:-1] + "\1" + cont_line
+ else:
+ pushed_back_line = cont_line # sorry for stealing this line
+ break
+
+ # Replacement
+ for replace in replacements:
+ line = replace.replace('\\0', line) + "\n"
+ for nr, group in enumerate(matches.groups()):
+ line = line.replace('\\%d' % (nr+1), group)
+
+ break # matching rule found and executed
+
+ color = {'C': tty_red, 'W': tty_yellow, 'O': tty_green, 'I': tty_blue, '.': ''}[level]
+ if debug:
+ line = line.replace("\1", "\nCONT:")
+ if level == "I":
+ level = "."
+ if opt_nocontext and level == '.':
+ continue
+ outputtxt += "%s%s %s%s\n" % (color, level, line[:-1], tty_normal)
+
+ new_offset = os.lseek(fl, 0, 1) # os.SEEK_CUR not available in Python 2.4
+ status[logfile] = new_offset, inode
+
+ # output all lines if at least one warning, error or ok has been found
+ if worst > -1:
+ sys.stdout.write(outputtxt)
+ sys.stdout.flush()
+
+try:
+ config = read_config()
+except Exception, e:
+ if debug:
+ raise
+ print "CANNOT READ CONFIG FILE: %s" % e
+ sys.exit(1)
+
+# Simply ignore errors in the status file. In case of a corrupted status file we simply begin
+# with an empty status. That keeps the monitoring up and running - even if we might loose a
+# message in the extreme case of a corrupted status file.
+try:
+ status = read_status()
+except Exception, e:
+ status = {}
+
+
+# The filename line may contain options like 'maxlines=100' or 'maxtime=10'
+for filenames, patterns in config:
+ # Initialize options with default values
+ opt_maxlines = None
+ opt_maxtime = None
+ opt_regex = None
+ opt_overflow = 'C'
+ opt_overflow_level = 2
+ opt_nocontext = False
+ try:
+ options = [ o.split('=', 1) for o in filenames if '=' in o ]
+ for key, value in options:
+ if key == 'maxlines':
+ opt_maxlines = int(value)
+ elif key == 'maxtime':
+ opt_maxtime = float(value)
+ elif key == 'overflow':
+ if value not in [ 'C', 'I', 'W', 'O' ]:
+ raise Exception("Invalid value %s for overflow. Allowed are C, I, O and W" % value)
+ opt_overflow = value
+ opt_overflow_level = {'C':2, 'W':1, 'O':0, 'I':0}[value]
+ elif key == 'regex':
+ opt_regex = re.compile(value)
+ elif key == 'iregex':
+ opt_regex = re.compile(value, re.I)
+ elif key == 'nocontext':
+ opt_nocontext = True
+ else:
+ raise Exception("Invalid option %s" % key)
+ except Exception, e:
+ if debug:
+ raise
+ print "INVALID CONFIGURATION: %s" % e
+ sys.exit(1)
+
+
+ for glob in filenames:
+ if '=' in glob:
+ continue
+ logfiles = [ l.strip() for l in os.popen("ls %s 2>/dev/null" % glob).readlines() ]
+ if opt_regex:
+ logfiles = [ f for f in logfiles if opt_regex.search(f) ]
+ if len(logfiles) == 0:
+ print '[[[%s:missing]]]' % glob
+ else:
+ for logfile in logfiles:
+ process_logfile(logfile, patterns)
+
+if not debug:
+ save_status(status)
diff --git a/puppet/modules/site_check_mk/files/extra_service_conf.mk b/puppet/modules/site_check_mk/files/extra_service_conf.mk
new file mode 100644
index 00000000..c7120a96
--- /dev/null
+++ b/puppet/modules/site_check_mk/files/extra_service_conf.mk
@@ -0,0 +1,14 @@
+# retry 3 times before setting a service into a hard state
+# and send out notification
+extra_service_conf["max_check_attempts"] = [
+ ("4", ALL_HOSTS , ALL_SERVICES )
+]
+
+#
+# run check_mk_agent every 4 minutes if it terminates successfully.
+# see https://leap.se/code/issues/6539 for the rationale
+#
+extra_service_conf["normal_check_interval"] = [
+ ("4", ALL_HOSTS , "Check_MK" )
+]
+
diff --git a/puppet/modules/site_check_mk/files/ignored_services.mk b/puppet/modules/site_check_mk/files/ignored_services.mk
new file mode 100644
index 00000000..35dc4433
--- /dev/null
+++ b/puppet/modules/site_check_mk/files/ignored_services.mk
@@ -0,0 +1,3 @@
+ignored_services = [
+ ( ALL_HOSTS, [ "NTP Time" ] )
+]
diff --git a/puppet/modules/site_check_mk/manifests/agent.pp b/puppet/modules/site_check_mk/manifests/agent.pp
new file mode 100644
index 00000000..b95d5d64
--- /dev/null
+++ b/puppet/modules/site_check_mk/manifests/agent.pp
@@ -0,0 +1,35 @@
+# installs check-mk agent
+class site_check_mk::agent {
+
+ $ssh_hash = hiera('ssh')
+ $pubkey = $ssh_hash['authorized_keys']['monitor']['key']
+ $type = $ssh_hash['authorized_keys']['monitor']['type']
+
+
+ # /usr/bin/mk-job depends on /usr/bin/time
+ ensure_packages('time')
+
+ class { 'site_apt::preferences::check_mk': } ->
+
+ class { 'check_mk::agent':
+ agent_package_name => 'check-mk-agent',
+ agent_logwatch_package_name => 'check-mk-agent-logwatch',
+ method => 'ssh',
+ authdir => '/root/.ssh',
+ authfile => 'authorized_keys',
+ register_agent => false,
+ require => Package['time']
+ } ->
+
+ class { 'site_check_mk::agent::mrpe': } ->
+ class { 'site_check_mk::agent::logwatch': } ->
+
+ file {
+ [ '/srv/leap/nagios', '/srv/leap/nagios/plugins' ]:
+ ensure => directory;
+ '/usr/lib/check_mk_agent/local/run_node_tests.sh':
+ source => 'puppet:///modules/site_check_mk/agent/local_checks/all_hosts/run_node_tests.sh',
+ mode => '0755';
+ }
+
+}
diff --git a/puppet/modules/site_check_mk/manifests/agent/couchdb.pp b/puppet/modules/site_check_mk/manifests/agent/couchdb.pp
new file mode 100644
index 00000000..1554fd3c
--- /dev/null
+++ b/puppet/modules/site_check_mk/manifests/agent/couchdb.pp
@@ -0,0 +1,34 @@
+# configure logwatch and nagios checks for couchdb (both bigcouch and plain
+# couchdb installations)
+class site_check_mk::agent::couchdb {
+
+ concat::fragment { 'syslog_couchdb':
+ source => 'puppet:///modules/site_check_mk/agent/logwatch/syslog/couchdb.cfg',
+ target => '/etc/check_mk/logwatch.d/syslog.cfg',
+ order => '02';
+ }
+
+ # check different couchdb stats
+ file { '/usr/lib/check_mk_agent/local/leap_couch_stats.sh':
+ source => 'puppet:///modules/site_check_mk/agent/local_checks/couchdb/leap_couch_stats.sh',
+ mode => '0755',
+ require => Package['check_mk-agent']
+ }
+
+ # check open files for bigcouch proc
+ include site_check_mk::agent::package::perl_plugin
+ file { '/srv/leap/nagios/plugins/check_unix_open_fds.pl':
+ source => 'puppet:///modules/site_check_mk/agent/nagios_plugins/check_unix_open_fds.pl',
+ mode => '0755'
+ }
+ augeas {
+ 'Couchdb_open_files':
+ incl => '/etc/check_mk/mrpe.cfg',
+ lens => 'Spacevars.lns',
+ changes => [
+ 'rm /files/etc/check_mk/mrpe.cfg/Couchdb_open_files',
+ 'set Couchdb_open_files \'/srv/leap/nagios/plugins/check_unix_open_fds.pl -a beam -w 28672,28672 -c 30720,30720\'' ],
+ require => File['/etc/check_mk/mrpe.cfg'];
+ }
+
+}
diff --git a/puppet/modules/site_check_mk/manifests/agent/couchdb/bigcouch.pp b/puppet/modules/site_check_mk/manifests/agent/couchdb/bigcouch.pp
new file mode 100644
index 00000000..82c3ac72
--- /dev/null
+++ b/puppet/modules/site_check_mk/manifests/agent/couchdb/bigcouch.pp
@@ -0,0 +1,49 @@
+# configure logwatch and nagios checks for bigcouch
+class site_check_mk::agent::couchdb::bigcouch {
+
+ # watch bigcouch logs
+ # currently disabled because bigcouch is too noisy
+ # see https://leap.se/code/issues/7375 for more details
+ # and site_config::remove_files for removing leftovers
+ #file { '/etc/check_mk/logwatch.d/bigcouch.cfg':
+ # source => 'puppet:///modules/site_check_mk/agent/logwatch/bigcouch.cfg',
+ #}
+
+ # check syslog msg from:
+ # - empd
+ # - /usr/local/bin/couch-doc-update
+ concat::fragment { 'syslog_bigcouch':
+ source => 'puppet:///modules/site_check_mk/agent/logwatch/syslog/bigcouch.cfg',
+ target => '/etc/check_mk/logwatch.d/syslog.cfg',
+ order => '02';
+ }
+
+ # check bigcouch processes
+ augeas {
+ 'Bigcouch_epmd_procs':
+ incl => '/etc/check_mk/mrpe.cfg',
+ lens => 'Spacevars.lns',
+ changes => [
+ 'rm /files/etc/check_mk/mrpe.cfg/Bigcouch_epmd_procs',
+ 'set Bigcouch_epmd_procs \'/usr/lib/nagios/plugins/check_procs -w 1:1 -c 1:1 -a /opt/bigcouch/erts-5.9.1/bin/epmd\'' ],
+ require => File['/etc/check_mk/mrpe.cfg'];
+ 'Bigcouch_beam_procs':
+ incl => '/etc/check_mk/mrpe.cfg',
+ lens => 'Spacevars.lns',
+ changes => [
+ 'rm /files/etc/check_mk/mrpe.cfg/Bigcouch_beam_procs',
+ 'set Bigcouch_beam_procs \'/usr/lib/nagios/plugins/check_procs -w 1:1 -c 1:1 -a /opt/bigcouch/erts-5.9.1/bin/beam\'' ],
+ require => File['/etc/check_mk/mrpe.cfg'];
+ }
+
+ augeas {
+ 'Bigcouch_open_files':
+ incl => '/etc/check_mk/mrpe.cfg',
+ lens => 'Spacevars.lns',
+ changes => [
+ 'rm /files/etc/check_mk/mrpe.cfg/Bigcouch_open_files',
+ 'set Bigcouch_open_files \'/srv/leap/nagios/plugins/check_unix_open_fds.pl -a beam -w 28672,28672 -c 30720,30720\'' ],
+ require => File['/etc/check_mk/mrpe.cfg'];
+ }
+
+}
diff --git a/puppet/modules/site_check_mk/manifests/agent/couchdb/plain.pp b/puppet/modules/site_check_mk/manifests/agent/couchdb/plain.pp
new file mode 100644
index 00000000..3ec2267b
--- /dev/null
+++ b/puppet/modules/site_check_mk/manifests/agent/couchdb/plain.pp
@@ -0,0 +1,23 @@
+# configure logwatch and nagios checks for plain single couchdb master
+class site_check_mk::agent::couchdb::plain {
+
+ # remove bigcouch leftovers
+ augeas {
+ 'Bigcouch_epmd_procs':
+ incl => '/etc/check_mk/mrpe.cfg',
+ lens => 'Spacevars.lns',
+ changes => 'rm /files/etc/check_mk/mrpe.cfg/Bigcouch_epmd_procs',
+ require => File['/etc/check_mk/mrpe.cfg'];
+ 'Bigcouch_beam_procs':
+ incl => '/etc/check_mk/mrpe.cfg',
+ lens => 'Spacevars.lns',
+ changes => 'rm /files/etc/check_mk/mrpe.cfg/Bigcouch_beam_procs',
+ require => File['/etc/check_mk/mrpe.cfg'];
+ 'Bigcouch_open_files':
+ incl => '/etc/check_mk/mrpe.cfg',
+ lens => 'Spacevars.lns',
+ changes => 'rm /files/etc/check_mk/mrpe.cfg/Bigcouch_open_files',
+ require => File['/etc/check_mk/mrpe.cfg'];
+ }
+
+}
diff --git a/puppet/modules/site_check_mk/manifests/agent/haproxy.pp b/puppet/modules/site_check_mk/manifests/agent/haproxy.pp
new file mode 100644
index 00000000..6d52efba
--- /dev/null
+++ b/puppet/modules/site_check_mk/manifests/agent/haproxy.pp
@@ -0,0 +1,15 @@
+class site_check_mk::agent::haproxy {
+
+ include site_check_mk::agent::package::nagios_plugins_contrib
+
+ # local nagios plugin checks via mrpe
+ augeas { 'haproxy':
+ incl => '/etc/check_mk/mrpe.cfg',
+ lens => 'Spacevars.lns',
+ changes => [
+ 'rm /files/etc/check_mk/mrpe.cfg/Haproxy',
+ 'set Haproxy \'/usr/lib/nagios/plugins/check_haproxy -u "http://localhost:8000/haproxy;csv"\'' ],
+ require => File['/etc/check_mk/mrpe.cfg'];
+ }
+
+}
diff --git a/puppet/modules/site_check_mk/manifests/agent/haveged.pp b/puppet/modules/site_check_mk/manifests/agent/haveged.pp
new file mode 100644
index 00000000..cacbea8c
--- /dev/null
+++ b/puppet/modules/site_check_mk/manifests/agent/haveged.pp
@@ -0,0 +1,15 @@
+class site_check_mk::agent::haveged {
+
+# check haveged process
+ augeas {
+ 'haveged_proc':
+ incl => '/etc/check_mk/mrpe.cfg',
+ lens => 'Spacevars.lns',
+ changes => [
+ 'rm /files/etc/check_mk/mrpe.cfg/haveged_proc',
+ 'set haveged_proc \'/usr/lib/nagios/plugins/check_procs -w 1:1 -c 1:1 -a /usr/sbin/haveged\'' ],
+ require => File['/etc/check_mk/mrpe.cfg'];
+
+ }
+
+}
diff --git a/puppet/modules/site_check_mk/manifests/agent/logwatch.pp b/puppet/modules/site_check_mk/manifests/agent/logwatch.pp
new file mode 100644
index 00000000..423cace2
--- /dev/null
+++ b/puppet/modules/site_check_mk/manifests/agent/logwatch.pp
@@ -0,0 +1,36 @@
+class site_check_mk::agent::logwatch {
+ # Deploy mk_logwatch 1.2.4 so we can split the config
+ # into multiple config files in /etc/check_mk/logwatch.d
+ # see https://leap.se/code/issues/5135
+
+ file { '/usr/lib/check_mk_agent/plugins/mk_logwatch':
+ source => 'puppet:///modules/site_check_mk/agent/plugins/mk_logwatch.1.2.4',
+ mode => '0755',
+ require => Package['check-mk-agent-logwatch']
+ }
+
+ # only config files that watch a distinct logfile should go in logwatch.d/
+ file { '/etc/check_mk/logwatch.d':
+ ensure => directory,
+ recurse => true,
+ purge => true,
+ require => Package['check-mk-agent-logwatch']
+ }
+
+ # service that share a common logfile (i.e. /var/log/syslog) need to get
+ # concanated in one file, otherwise the last file sourced will override
+ # the config before
+ # see mk_logwatch: "logwatch.cfg overwrites config files in logwatch.d",
+ # https://leap.se/code/issues/5155
+
+ # first, we need to deploy a custom logwatch.cfg that doesn't include
+ # a section about /var/log/syslog
+
+ file { '/etc/check_mk/logwatch.cfg':
+ source => 'puppet:///modules/site_check_mk/agent/logwatch/logwatch.cfg',
+ require => Package['check_mk-agent-logwatch']
+ }
+
+ include concat::setup
+ include site_check_mk::agent::logwatch::syslog
+}
diff --git a/puppet/modules/site_check_mk/manifests/agent/logwatch/syslog.pp b/puppet/modules/site_check_mk/manifests/agent/logwatch/syslog.pp
new file mode 100644
index 00000000..c927780d
--- /dev/null
+++ b/puppet/modules/site_check_mk/manifests/agent/logwatch/syslog.pp
@@ -0,0 +1,18 @@
+class site_check_mk::agent::logwatch::syslog {
+
+ concat { '/etc/check_mk/logwatch.d/syslog.cfg':
+ warn => true
+ }
+
+ concat::fragment { 'syslog_header':
+ source => 'puppet:///modules/site_check_mk/agent/logwatch/syslog_header.cfg',
+ target => '/etc/check_mk/logwatch.d/syslog.cfg',
+ order => '01';
+ }
+ concat::fragment { 'syslog_tail':
+ source => 'puppet:///modules/site_check_mk/agent/logwatch/syslog_tail.cfg',
+ target => '/etc/check_mk/logwatch.d/syslog.cfg',
+ order => '99';
+ }
+
+}
diff --git a/puppet/modules/site_check_mk/manifests/agent/mrpe.pp b/puppet/modules/site_check_mk/manifests/agent/mrpe.pp
new file mode 100644
index 00000000..5e1f087a
--- /dev/null
+++ b/puppet/modules/site_check_mk/manifests/agent/mrpe.pp
@@ -0,0 +1,24 @@
+class site_check_mk::agent::mrpe {
+ # check_mk can use standard nagios plugins using
+ # a wrapper called mrpe
+ # see http://mathias-kettner.de/checkmk_mrpe.html
+
+ package { 'nagios-plugins-basic':
+ ensure => latest,
+ }
+
+ file { '/etc/check_mk/mrpe.cfg':
+ ensure => present,
+ require => Package['check-mk-agent']
+ } ->
+
+ augeas {
+ 'Apt':
+ incl => '/etc/check_mk/mrpe.cfg',
+ lens => 'Spacevars.lns',
+ changes => [
+ 'rm /files/etc/check_mk/mrpe.cfg/APT',
+ 'set APT \'/usr/lib/nagios/plugins/check_apt\'' ];
+ }
+
+}
diff --git a/puppet/modules/site_check_mk/manifests/agent/mx.pp b/puppet/modules/site_check_mk/manifests/agent/mx.pp
new file mode 100644
index 00000000..20cbcade
--- /dev/null
+++ b/puppet/modules/site_check_mk/manifests/agent/mx.pp
@@ -0,0 +1,27 @@
+# check check_mk agent checks for mx service
+class site_check_mk::agent::mx {
+
+ # watch logs
+ file { '/etc/check_mk/logwatch.d/leap_mx.cfg':
+ source => 'puppet:///modules/site_check_mk/agent/logwatch/leap_mx.cfg',
+ }
+
+ # local nagios plugin checks via mrpe
+ # removed because leap_cli integrates a check for running mx procs already,
+ # which is also integrated into nagios (called "Mx/Are_MX_daemons_running")
+ augeas {
+ 'Leap_MX_Procs':
+ incl => '/etc/check_mk/mrpe.cfg',
+ lens => 'Spacevars.lns',
+ changes => 'rm /files/etc/check_mk/mrpe.cfg/Leap_MX_Procs',
+ require => File['/etc/check_mk/mrpe.cfg'];
+ }
+
+ # check stale files in queue dir
+ file { '/usr/lib/check_mk_agent/local/check_leap_mx.sh':
+ source => 'puppet:///modules/site_check_mk/agent/local_checks/mx/check_leap_mx.sh',
+ mode => '0755',
+ require => Package['check_mk-agent']
+ }
+
+}
diff --git a/puppet/modules/site_check_mk/manifests/agent/openvpn.pp b/puppet/modules/site_check_mk/manifests/agent/openvpn.pp
new file mode 100644
index 00000000..0596a497
--- /dev/null
+++ b/puppet/modules/site_check_mk/manifests/agent/openvpn.pp
@@ -0,0 +1,10 @@
+class site_check_mk::agent::openvpn {
+
+ # check syslog
+ concat::fragment { 'syslog_openpvn':
+ source => 'puppet:///modules/site_check_mk/agent/logwatch/openvpn.cfg',
+ target => '/etc/check_mk/logwatch.d/syslog.cfg',
+ order => '02';
+ }
+
+}
diff --git a/puppet/modules/site_check_mk/manifests/agent/package/nagios_plugins_contrib.pp b/puppet/modules/site_check_mk/manifests/agent/package/nagios_plugins_contrib.pp
new file mode 100644
index 00000000..95a60d17
--- /dev/null
+++ b/puppet/modules/site_check_mk/manifests/agent/package/nagios_plugins_contrib.pp
@@ -0,0 +1,5 @@
+class site_check_mk::agent::package::nagios_plugins_contrib {
+ package { 'nagios-plugins-contrib':
+ ensure => installed,
+ }
+}
diff --git a/puppet/modules/site_check_mk/manifests/agent/package/perl_plugin.pp b/puppet/modules/site_check_mk/manifests/agent/package/perl_plugin.pp
new file mode 100644
index 00000000..4feda375
--- /dev/null
+++ b/puppet/modules/site_check_mk/manifests/agent/package/perl_plugin.pp
@@ -0,0 +1,5 @@
+class site_check_mk::agent::package::perl_plugin {
+ package { 'libnagios-plugin-perl':
+ ensure => installed,
+ }
+}
diff --git a/puppet/modules/site_check_mk/manifests/agent/soledad.pp b/puppet/modules/site_check_mk/manifests/agent/soledad.pp
new file mode 100644
index 00000000..f4a3f3a6
--- /dev/null
+++ b/puppet/modules/site_check_mk/manifests/agent/soledad.pp
@@ -0,0 +1,17 @@
+class site_check_mk::agent::soledad {
+
+ file { '/etc/check_mk/logwatch.d/soledad.cfg':
+ source => 'puppet:///modules/site_check_mk/agent/logwatch/soledad.cfg',
+ }
+
+ # local nagios plugin checks via mrpe
+
+ augeas { 'Soledad_Procs':
+ incl => '/etc/check_mk/mrpe.cfg',
+ lens => 'Spacevars.lns',
+ changes => [
+ 'rm /files/etc/check_mk/mrpe.cfg/Soledad_Procs',
+ 'set Soledad_Procs \'/usr/lib/nagios/plugins/check_procs -w 1:1 -c 1:1 -a "/usr/bin/python /usr/bin/twistd --uid=soledad --gid=soledad --pidfile=/var/run/soledad.pid --logfile=/var/log/soledad.log web --wsgi=leap.soledad.server.application --port=ssl:2323:privateKey=/etc/x509/keys/leap.key:certKey=/etc/x509/certs/leap.crt:sslmethod=SSLv23_METHOD"\'' ],
+ require => File['/etc/check_mk/mrpe.cfg'];
+ }
+}
diff --git a/puppet/modules/site_check_mk/manifests/agent/stunnel.pp b/puppet/modules/site_check_mk/manifests/agent/stunnel.pp
new file mode 100644
index 00000000..7f765771
--- /dev/null
+++ b/puppet/modules/site_check_mk/manifests/agent/stunnel.pp
@@ -0,0 +1,9 @@
+class site_check_mk::agent::stunnel {
+
+ concat::fragment { 'syslog_stunnel':
+ source => 'puppet:///modules/site_check_mk/agent/logwatch/stunnel.cfg',
+ target => '/etc/check_mk/logwatch.d/syslog.cfg',
+ order => '02';
+ }
+
+}
diff --git a/puppet/modules/site_check_mk/manifests/agent/webapp.pp b/puppet/modules/site_check_mk/manifests/agent/webapp.pp
new file mode 100644
index 00000000..9bf3b197
--- /dev/null
+++ b/puppet/modules/site_check_mk/manifests/agent/webapp.pp
@@ -0,0 +1,15 @@
+class site_check_mk::agent::webapp {
+
+ # remove leftovers of webapp python checks
+ file {
+ [ '/usr/lib/check_mk_agent/local/nagios-webapp_login.py',
+ '/usr/lib/check_mk_agent/local/soledad_sync.py' ]:
+ ensure => absent
+ }
+
+ # watch logs
+ file { '/etc/check_mk/logwatch.d/webapp.cfg':
+ source => 'puppet:///modules/site_check_mk/agent/logwatch/webapp.cfg',
+ }
+
+}
diff --git a/puppet/modules/site_check_mk/manifests/server.pp b/puppet/modules/site_check_mk/manifests/server.pp
new file mode 100644
index 00000000..7ff9eb4a
--- /dev/null
+++ b/puppet/modules/site_check_mk/manifests/server.pp
@@ -0,0 +1,103 @@
+# setup check_mk on the monitoring server
+class site_check_mk::server {
+
+ $ssh_hash = hiera('ssh')
+ $pubkey = $ssh_hash['authorized_keys']['monitor']['key']
+ $type = $ssh_hash['authorized_keys']['monitor']['type']
+ $seckey = $ssh_hash['monitor']['private_key']
+
+ $nagios_hiera = hiera_hash('nagios')
+ $hosts = $nagios_hiera['hosts']
+
+ $all_hosts = inline_template ('<% @hosts.keys.sort.each do |key| -%><% if @hosts[key]["environment"] != "disabled" %>"<%= @hosts[key]["domain_internal"] %>", <% end -%><% end -%>')
+ $domains_internal = $nagios_hiera['domains_internal']
+ $environments = $nagios_hiera['environments']
+
+ package { 'check-mk-server':
+ ensure => installed,
+ }
+
+ # we don't use check-mk-multisite, and the jessie version
+ # of this config file breaks with apache 2.4
+ # until https://gitlab.com/shared-puppet-modules-group/apache/issues/11
+ # is not fixed, we need to use a generic file type here
+ #apache::config::global { 'check-mk-multisite.conf':
+ # ensure => absent
+ #}
+
+ file { '/etc/apache2/conf-enabled/check-mk-multisite.conf':
+ ensure => absent,
+ require => Package['check-mk-server'];
+ }
+
+ # override paths to use the system check_mk rather than OMD
+ class { 'check_mk::config':
+ site => '',
+ etc_dir => '/etc',
+ nagios_subdir => 'nagios3',
+ bin_dir => '/usr/bin',
+ host_groups => undef,
+ use_storedconfigs => false,
+ inventory_only_on_changes => false,
+ require => Package['check-mk-server']
+ }
+
+ Exec['check_mk-refresh'] ->
+ Exec['check_mk-refresh-inventory-daily'] ->
+ Exec['check_mk-reload'] ->
+ Service['nagios']
+
+ file {
+ '/etc/check_mk/conf.d/use_ssh.mk':
+ content => template('site_check_mk/use_ssh.mk'),
+ notify => Exec['check_mk-refresh'],
+ require => Package['check-mk-server'];
+ '/etc/check_mk/conf.d/hostgroups.mk':
+ content => template('site_check_mk/hostgroups.mk'),
+ notify => Exec['check_mk-refresh'],
+ require => Package['check-mk-server'];
+ '/etc/check_mk/conf.d/host_contactgroups.mk':
+ content => template('site_check_mk/host_contactgroups.mk'),
+ notify => Exec['check_mk-refresh'],
+ require => Package['check-mk-server'];
+ '/etc/check_mk/conf.d/ignored_services.mk':
+ source => 'puppet:///modules/site_check_mk/ignored_services.mk',
+ notify => Exec['check_mk-refresh'],
+ require => Package['check-mk-server'];
+ '/etc/check_mk/conf.d/extra_service_conf.mk':
+ source => 'puppet:///modules/site_check_mk/extra_service_conf.mk',
+ notify => Exec['check_mk-refresh'],
+ require => Package['check-mk-server'];
+ '/etc/check_mk/conf.d/extra_host_conf.mk':
+ content => template('site_check_mk/extra_host_conf.mk'),
+ notify => Exec['check_mk-refresh'],
+ require => Package['check-mk-server'];
+
+ '/etc/check_mk/all_hosts_static':
+ content => $all_hosts,
+ notify => Exec['check_mk-refresh'],
+ require => Package['check-mk-server'];
+
+ '/etc/check_mk/.ssh':
+ ensure => directory,
+ require => Package['check-mk-server'];
+ '/etc/check_mk/.ssh/id_rsa':
+ content => $seckey,
+ owner => 'nagios',
+ mode => '0600',
+ require => Package['check-mk-server'];
+ '/etc/check_mk/.ssh/id_rsa.pub':
+ content => "${type} ${pubkey} monitor",
+ owner => 'nagios',
+ mode => '0644',
+ require => Package['check-mk-server'];
+
+ # check_icmp must be suid root or called by sudo
+ # see https://leap.se/code/issues/5171
+ '/usr/lib/nagios/plugins/check_icmp':
+ mode => '4755',
+ require => Package['nagios-plugins-basic'];
+ }
+
+ include check_mk::agent::local_checks
+}
diff --git a/puppet/modules/site_check_mk/templates/extra_host_conf.mk b/puppet/modules/site_check_mk/templates/extra_host_conf.mk
new file mode 100644
index 00000000..bc27b514
--- /dev/null
+++ b/puppet/modules/site_check_mk/templates/extra_host_conf.mk
@@ -0,0 +1,13 @@
+# retry 3 times before setting a host into a hard state
+# and send out notification
+extra_host_conf["max_check_attempts"] = [
+ ("4", ALL_HOSTS )
+]
+
+# Use hostnames as alias so notification mail subjects
+# are more readable and not so long. Alias defaults to
+# the fqdn of a host is not changed.
+extra_host_conf["alias"] = [
+<% @hosts.keys.sort.each do |key| -%> ( "<%= key.strip %>", ["<%= @hosts[key]['domain_internal']%>"]),
+<% end -%>
+]
diff --git a/puppet/modules/site_check_mk/templates/host_contactgroups.mk b/puppet/modules/site_check_mk/templates/host_contactgroups.mk
new file mode 100644
index 00000000..6a534967
--- /dev/null
+++ b/puppet/modules/site_check_mk/templates/host_contactgroups.mk
@@ -0,0 +1,17 @@
+<%
+ contact_groups = []
+ @environments.keys.sort.each do |env_name|
+ hosts = ""
+ @nagios_hosts.keys.sort.each do |hostname|
+ hostdata = @nagios_hosts[hostname]
+ domain_internal = hostdata['domain_internal']
+ if hostdata['environment'] == env_name
+ hosts << '"' + domain_internal + '", '
+ end
+ end
+ contact_groups << ' ( "%s", [%s] )' % [env_name, hosts]
+ end
+%>
+host_contactgroups = [
+<%= contact_groups.join(",\n") %>
+]
diff --git a/puppet/modules/site_check_mk/templates/hostgroups.mk b/puppet/modules/site_check_mk/templates/hostgroups.mk
new file mode 100644
index 00000000..7158dcd1
--- /dev/null
+++ b/puppet/modules/site_check_mk/templates/hostgroups.mk
@@ -0,0 +1,17 @@
+<%
+ host_groups = []
+ @environments.keys.sort.each do |env_name|
+ hosts = ""
+ @nagios_hosts.keys.sort.each do |hostname|
+ hostdata = @nagios_hosts[hostname]
+ domain_internal = hostdata['domain_internal']
+ if hostdata['environment'] == env_name
+ hosts << '"' + domain_internal + '", '
+ end
+ end
+ host_groups << ' ( "%s", [%s] )' % [env_name, hosts]
+ end
+%>
+host_groups = [
+<%= host_groups.join(",\n") %>
+]
diff --git a/puppet/modules/site_check_mk/templates/use_ssh.mk b/puppet/modules/site_check_mk/templates/use_ssh.mk
new file mode 100644
index 00000000..55269536
--- /dev/null
+++ b/puppet/modules/site_check_mk/templates/use_ssh.mk
@@ -0,0 +1,6 @@
+# http://mathias-kettner.de/checkmk_datasource_programs.html
+datasource_programs = [
+<% @nagios_hosts.sort.each do |name,config| %>
+ ( "ssh -l root -i /etc/check_mk/.ssh/id_rsa -p <%=config['ssh_port']%> <%=config['domain_internal']%> check_mk_agent", [ "<%=config['domain_internal']%>" ], ),<%- end -%>
+
+]
diff --git a/puppet/modules/site_config/files/xterm-title.sh b/puppet/modules/site_config/files/xterm-title.sh
new file mode 100644
index 00000000..3cff0e3a
--- /dev/null
+++ b/puppet/modules/site_config/files/xterm-title.sh
@@ -0,0 +1,8 @@
+# If this is an xterm set the title to user@host:dir
+case "$TERM" in
+xterm*|rxvt*)
+ PROMPT_COMMAND='echo -ne "\033]0;${USER}@${HOSTNAME}: ${PWD}\007"'
+ ;;
+*)
+ ;;
+esac
diff --git a/puppet/modules/site_config/lib/facter/dhcp_enabled.rb b/puppet/modules/site_config/lib/facter/dhcp_enabled.rb
new file mode 100644
index 00000000..33220da3
--- /dev/null
+++ b/puppet/modules/site_config/lib/facter/dhcp_enabled.rb
@@ -0,0 +1,22 @@
+require 'facter'
+def dhcp_enabled?(ifs, recurse=true)
+ dhcp = false
+ included_ifs = []
+ if FileTest.exists?(ifs)
+ File.open(ifs) do |file|
+ dhcp = file.enum_for(:each_line).any? do |line|
+ if recurse && line =~ /^\s*source\s+([^\s]+)/
+ included_ifs += Dir.glob($1)
+ end
+ line =~ /inet\s+dhcp/
+ end
+ end
+ end
+ dhcp || included_ifs.any? { |ifs| dhcp_enabled?(ifs, false) }
+end
+Facter.add(:dhcp_enabled) do
+ confine :osfamily => 'Debian'
+ setcode do
+ dhcp_enabled?('/etc/network/interfaces')
+ end
+end
diff --git a/puppet/modules/site_config/lib/facter/ip_interface.rb b/puppet/modules/site_config/lib/facter/ip_interface.rb
new file mode 100644
index 00000000..45764bfc
--- /dev/null
+++ b/puppet/modules/site_config/lib/facter/ip_interface.rb
@@ -0,0 +1,13 @@
+require 'facter/util/ip'
+
+Facter::Util::IP.get_interfaces.each do |interface|
+ ip = Facter.value("ipaddress_#{interface}")
+ if ip != nil
+ Facter.add("interface_" + ip ) do
+ setcode do
+ interface
+ end
+ end
+ end
+end
+
diff --git a/puppet/modules/site_config/manifests/caching_resolver.pp b/puppet/modules/site_config/manifests/caching_resolver.pp
new file mode 100644
index 00000000..8bf465c1
--- /dev/null
+++ b/puppet/modules/site_config/manifests/caching_resolver.pp
@@ -0,0 +1,27 @@
+# deploy local caching resolver
+class site_config::caching_resolver {
+ tag 'leap_base'
+
+ class { 'unbound':
+ root_hints => false,
+ anchor => false,
+ ssl => false,
+ settings => {
+ server => {
+ verbosity => '1',
+ interface => [ '127.0.0.1', '::1' ],
+ port => '53',
+ hide-identity => 'yes',
+ hide-version => 'yes',
+ harden-glue => 'yes',
+ access-control => [ '127.0.0.0/8 allow', '::1 allow' ]
+ }
+ }
+ }
+
+ concat::fragment { 'unbound glob include':
+ target => $unbound::params::config,
+ content => "include: /etc/unbound/unbound.conf.d/*.conf\n\n",
+ order => 10
+ }
+}
diff --git a/puppet/modules/site_config/manifests/default.pp b/puppet/modules/site_config/manifests/default.pp
new file mode 100644
index 00000000..256de1a1
--- /dev/null
+++ b/puppet/modules/site_config/manifests/default.pp
@@ -0,0 +1,71 @@
+# common things to set up on every node
+class site_config::default {
+ tag 'leap_base'
+
+ $services = hiera('services', [])
+ $domain_hash = hiera('domain')
+ include site_config::params
+ include site_config::setup
+
+ # default class, used by all hosts
+
+ include lsb, git
+
+ # configure sysctl parameters
+ include site_config::sysctl
+
+ # configure ssh and include ssh-keys
+ include site_sshd
+
+ # include classes for special environments
+ # i.e. openstack/aws nodes, vagrant nodes
+
+ # fix dhclient from changing resolver information
+ # facter returns 'true' as string
+ # lint:ignore:quoted_booleans
+ if $::dhcp_enabled == 'true' {
+ # lint:endignore
+ include site_config::dhclient
+ }
+
+ # configure /etc/resolv.conf
+ include site_config::resolvconf
+
+ # configure caching, local resolver
+ include site_config::caching_resolver
+
+ # install/configure syslog and core log rotations
+ include site_config::syslog
+
+ # provide a basic level of quality entropy
+ include haveged
+
+ # install/remove base packages
+ include site_config::packages
+
+ # include basic shorewall config
+ include site_shorewall::defaults
+
+ Package['git'] -> Vcsrepo<||>
+
+ # include basic shell config
+ include site_config::shell
+
+ # set up core leap files and directories
+ include site_config::files
+
+ # remove leftovers from previous deploys
+ include site_config::remove
+
+ if ! member($services, 'mx') {
+ include site_postfix::satellite
+ }
+
+ # if class custom exists, include it.
+ # possibility for users to define custom puppet recipes
+ if defined( '::custom') {
+ include ::custom
+ }
+
+ include site_check_mk::agent
+}
diff --git a/puppet/modules/site_config/manifests/dhclient.pp b/puppet/modules/site_config/manifests/dhclient.pp
new file mode 100644
index 00000000..a1f87d41
--- /dev/null
+++ b/puppet/modules/site_config/manifests/dhclient.pp
@@ -0,0 +1,40 @@
+# Unfortunately, there does not seem to be a way to reload the dhclient.conf
+# config file, or a convenient way to disable the modifications to
+# /etc/resolv.conf. So the following makes the functions involved noops and
+# ships a script to kill and restart dhclient. See the debian bugs:
+# #681698, #712796
+class site_config::dhclient {
+
+
+ include site_config::params
+
+ file { '/usr/local/sbin/reload_dhclient':
+ owner => 0,
+ group => 0,
+ mode => '0755',
+ content => template('site_config/reload_dhclient.erb');
+ }
+
+ exec { 'reload_dhclient':
+ refreshonly => true,
+ command => '/usr/local/sbin/reload_dhclient',
+ before => Class['site_config::resolvconf'],
+ require => File['/usr/local/sbin/reload_dhclient'],
+ }
+
+ file { '/etc/dhcp/dhclient-enter-hooks.d':
+ ensure => directory,
+ mode => '0755',
+ owner => 'root',
+ group => 'root',
+ }
+
+ file { '/etc/dhcp/dhclient-enter-hooks.d/disable_resolvconf':
+ content => 'make_resolv_conf() { : ; } ; set_hostname() { : ; }',
+ mode => '0644',
+ owner => 'root',
+ group => 'root',
+ require => File['/etc/dhcp/dhclient-enter-hooks.d'],
+ notify => Exec['reload_dhclient'];
+ }
+}
diff --git a/puppet/modules/site_config/manifests/files.pp b/puppet/modules/site_config/manifests/files.pp
new file mode 100644
index 00000000..d2ef8a98
--- /dev/null
+++ b/puppet/modules/site_config/manifests/files.pp
@@ -0,0 +1,24 @@
+# set up core leap files and directories
+class site_config::files {
+
+ file {
+ '/srv/leap':
+ ensure => directory,
+ owner => 'root',
+ group => 'root',
+ mode => '0711';
+
+ [ '/etc/leap', '/var/lib/leap']:
+ ensure => directory,
+ owner => 'root',
+ group => 'root',
+ mode => '0755';
+
+ '/var/log/leap':
+ ensure => directory,
+ owner => 'root',
+ group => 'adm',
+ mode => '0750';
+ }
+
+}
diff --git a/puppet/modules/site_config/manifests/hosts.pp b/puppet/modules/site_config/manifests/hosts.pp
new file mode 100644
index 00000000..878b6af0
--- /dev/null
+++ b/puppet/modules/site_config/manifests/hosts.pp
@@ -0,0 +1,44 @@
+class site_config::hosts() {
+ $hosts = hiera('hosts', false)
+
+ # calculate all the hostname aliases that might be used
+ $hostname = hiera('name')
+ $domain_hash = hiera('domain', {})
+ $dns = hiera('dns', {})
+ if $dns['aliases'] == undef {
+ $dns_aliases = []
+ } else {
+ $dns_aliases = $dns['aliases']
+ }
+ $my_hostnames = unique(concat(
+ [$domain_hash['full'], $hostname, $domain_hash['internal']], $dns_aliases
+ ))
+
+ file { '/etc/hostname':
+ ensure => present,
+ content => $hostname
+ }
+
+ exec { "/bin/hostname ${hostname}":
+ subscribe => [ File['/etc/hostname'], File['/etc/hosts'] ],
+ refreshonly => true;
+ }
+
+ # we depend on reliable hostnames from /etc/hosts for the stunnel services
+ # so restart stunnel service when /etc/hosts is modified
+ # because this is done in an early stage, the stunnel module may not
+ # have been deployed and will not be available for overriding, so
+ # this is handled in an unorthodox manner
+ exec { '/etc/init.d/stunnel4 restart':
+ subscribe => File['/etc/hosts'],
+ refreshonly => true,
+ onlyif => 'test -f /etc/init.d/stunnel4';
+ }
+
+ file { '/etc/hosts':
+ content => template('site_config/hosts'),
+ mode => '0644',
+ owner => root,
+ group => root;
+ }
+}
diff --git a/puppet/modules/site_config/manifests/initial_firewall.pp b/puppet/modules/site_config/manifests/initial_firewall.pp
new file mode 100644
index 00000000..93cfb847
--- /dev/null
+++ b/puppet/modules/site_config/manifests/initial_firewall.pp
@@ -0,0 +1,64 @@
+class site_config::initial_firewall {
+
+ # This class is intended to setup an initial firewall, before shorewall is
+ # configured. The purpose of this is for the rare case where shorewall fails
+ # to start, we should not expose services to the public.
+
+ $ssh_config = hiera('ssh')
+ $ssh_port = $ssh_config['port']
+
+ package { 'iptables':
+ ensure => present
+ }
+
+ file {
+ # This firewall enables ssh access, dns lookups and web lookups (for
+ # package installation) but otherwise restricts all outgoing and incoming
+ # ports
+ '/etc/network/ipv4firewall_up.rules':
+ content => template('site_config/ipv4firewall_up.rules.erb'),
+ owner => root,
+ group => 0,
+ mode => '0644';
+
+ # This firewall denys all ipv6 traffic - we will need to change this
+ # when we begin to support ipv6
+ '/etc/network/ipv6firewall_up.rules':
+ content => template('site_config/ipv6firewall_up.rules.erb'),
+ owner => root,
+ group => 0,
+ mode => '0644';
+
+ # Run the iptables-restore in if-pre-up so that the network is locked down
+ # until the correct interfaces and ips are connected
+ '/etc/network/if-pre-up.d/ipv4tables':
+ content => "#!/bin/sh\n/sbin/iptables-restore < /etc/network/ipv4firewall_up.rules\n",
+ owner => root,
+ group => 0,
+ mode => '0744';
+
+ # Same as above for IPv6
+ '/etc/network/if-pre-up.d/ipv6tables':
+ content => "#!/bin/sh\n/sbin/ip6tables-restore < /etc/network/ipv6firewall_up.rules\n",
+ owner => root,
+ group => 0,
+ mode => '0744';
+ }
+
+ # Immediately setup these firewall rules, but only if shorewall is not running
+ exec {
+ 'default_ipv4_firewall':
+ command => '/sbin/iptables-restore < /etc/network/ipv4firewall_up.rules',
+ logoutput => true,
+ unless => 'test -x /etc/init.d/shorewall && /etc/init.d/shorewall status',
+ subscribe => File['/etc/network/ipv4firewall_up.rules'],
+ require => File['/etc/network/ipv4firewall_up.rules'];
+
+ 'default_ipv6_firewall':
+ command => '/sbin/ip6tables-restore < /etc/network/ipv6firewall_up.rules',
+ logoutput => true,
+ unless => 'test -x /etc/init.d/shorewall6 && /etc/init.d/shorewall6 status',
+ subscribe => File['/etc/network/ipv6firewall_up.rules'],
+ require => File['/etc/network/ipv6firewall_up.rules'];
+ }
+}
diff --git a/puppet/modules/site_config/manifests/packages.pp b/puppet/modules/site_config/manifests/packages.pp
new file mode 100644
index 00000000..140189a4
--- /dev/null
+++ b/puppet/modules/site_config/manifests/packages.pp
@@ -0,0 +1,32 @@
+# install default packages and remove unwanted packages
+class site_config::packages {
+
+
+ # base set of packages that we want to have installed everywhere
+ package { [ 'etckeeper', 'screen', 'less', 'ntp' ]:
+ ensure => installed,
+ }
+
+ # base set of packages that we want to remove everywhere
+ package { [
+ 'acpi', 'build-essential',
+ 'cpp', 'cpp-4.6', 'cpp-4.7', 'cpp-4.8', 'cpp-4.9',
+ 'eject', 'ftp',
+ 'g++', 'g++-4.6', 'g++-4.7', 'g++-4.8', 'g++-4.9',
+ 'gcc', 'gcc-4.6', 'gcc-4.7', 'gcc-4.8', 'gcc-4.9',
+ 'laptop-detect', 'libc6-dev', 'libssl-dev', 'lpr', 'make',
+ 'pppconfig', 'pppoe', 'pump', 'qstat',
+ 'samba-common', 'samba-common-bin', 'smbclient',
+ 'tcl8.5', 'tk8.5', 'os-prober', 'unzip', 'xauth', 'x11-common',
+ 'x11-utils', 'xterm' ]:
+ ensure => purged;
+ }
+
+ # leave a few packages installed on local environments
+ # vagrant i.e. needs them for mounting shared folders
+ if $::site_config::params::environment != 'local' {
+ package { [ 'nfs-common', 'nfs-kernel-server', 'rpcbind', 'portmap' ]:
+ ensure => purged;
+ }
+ }
+}
diff --git a/puppet/modules/site_config/manifests/packages/build_essential.pp b/puppet/modules/site_config/manifests/packages/build_essential.pp
new file mode 100644
index 00000000..2b3e13b9
--- /dev/null
+++ b/puppet/modules/site_config/manifests/packages/build_essential.pp
@@ -0,0 +1,28 @@
+#
+# include this whenever you want to ensure build-essential package and related compilers are installed.
+#
+class site_config::packages::build_essential inherits ::site_config::packages {
+
+ # NICKSERVER CODE NOTE: in order to support TLS, libssl-dev must be installed
+ # before EventMachine gem is built/installed.
+ Package[ 'gcc', 'make', 'g++', 'cpp', 'libssl-dev', 'libc6-dev' ] {
+ ensure => present
+ }
+
+ case $::operatingsystemrelease {
+ /^8.*/: {
+ Package[ 'gcc-4.9','g++-4.9', 'cpp-4.9' ] {
+ ensure => present
+ }
+ }
+
+ /^7.*/: {
+ Package[ 'gcc-4.7','g++-4.7', 'cpp-4.7' ] {
+ ensure => present
+ }
+ }
+
+ default: { }
+ }
+
+}
diff --git a/puppet/modules/site_config/manifests/packages/gnutls.pp b/puppet/modules/site_config/manifests/packages/gnutls.pp
new file mode 100644
index 00000000..b1f17480
--- /dev/null
+++ b/puppet/modules/site_config/manifests/packages/gnutls.pp
@@ -0,0 +1,5 @@
+class site_config::packages::gnutls {
+
+ package { 'gnutls-bin': ensure => installed }
+
+}
diff --git a/puppet/modules/site_config/manifests/params.pp b/puppet/modules/site_config/manifests/params.pp
new file mode 100644
index 00000000..012b3ce0
--- /dev/null
+++ b/puppet/modules/site_config/manifests/params.pp
@@ -0,0 +1,35 @@
+class site_config::params {
+
+ $ip_address = hiera('ip_address')
+ $ip_address_interface = getvar("interface_${ip_address}")
+ $ec2_local_ipv4_interface = getvar("interface_${::ec2_local_ipv4}")
+ $environment = hiera('environment', undef)
+
+
+ if $environment == 'local' {
+ $interface = 'eth1'
+ include site_config::packages::build_essential
+ }
+ elsif hiera('interface','') != '' {
+ $interface = hiera('interface')
+ }
+ elsif $ip_address_interface != '' {
+ $interface = $ip_address_interface
+ }
+ elsif $ec2_local_ipv4_interface != '' {
+ $interface = $ec2_local_ipv4_interface
+ }
+ elsif $::interfaces =~ /eth0/ {
+ $interface = 'eth0'
+ }
+ else {
+ fail("unable to determine a valid interface, please set a valid interface for this node in nodes/${::hostname}.json")
+ }
+
+ $ca_name = 'leap_ca'
+ $client_ca_name = 'leap_client_ca'
+ $ca_bundle_name = 'leap_ca_bundle'
+ $cert_name = 'leap'
+ $commercial_ca_name = 'leap_commercial_ca'
+ $commercial_cert_name = 'leap_commercial'
+}
diff --git a/puppet/modules/site_config/manifests/remove.pp b/puppet/modules/site_config/manifests/remove.pp
new file mode 100644
index 00000000..443df9c2
--- /dev/null
+++ b/puppet/modules/site_config/manifests/remove.pp
@@ -0,0 +1,11 @@
+# remove leftovers from previous deploys
+class site_config::remove {
+ include site_config::remove::files
+
+ case $::operatingsystemrelease {
+ /^8.*/: {
+ include site_config::remove::jessie
+ }
+ default: { }
+ }
+}
diff --git a/puppet/modules/site_config/manifests/remove/bigcouch.pp b/puppet/modules/site_config/manifests/remove/bigcouch.pp
new file mode 100644
index 00000000..3535c3c1
--- /dev/null
+++ b/puppet/modules/site_config/manifests/remove/bigcouch.pp
@@ -0,0 +1,42 @@
+# remove bigcouch leftovers from previous installations
+class site_config::remove::bigcouch {
+
+ # Don't use check_mk logwatch to watch bigcouch logs anymore
+ # see https://leap.se/code/issues/7375 for more details
+ file { '/etc/check_mk/logwatch.d/bigcouch.cfg':
+ ensure => absent,
+ notify => [
+ Exec['remove_bigcouch_logwatch_stateline']
+ ]
+ }
+
+ exec { 'remove_bigcouch_logwatch_stateline':
+ command => "sed -i '/bigcouch.log/d' /etc/check_mk/logwatch.state",
+ refreshonly => true,
+ }
+
+ cron { 'compact_all_shards':
+ ensure => absent
+ }
+
+
+ exec { 'kill_bigcouch_stunnel_procs':
+ refreshonly => true,
+ command => '/usr/bin/pkill -f "/usr/bin/stunnel4 /etc/stunnel/(ednp|epmd)_server.conf"'
+ }
+
+ # 'tidy' doesn't notify other resources, so we need to use file here instead
+ # see https://tickets.puppetlabs.com/browse/PUP-6021
+ file {
+ [ '/etc/stunnel/ednp_server.conf', '/etc/stunnel/epmd_server.conf']:
+ ensure => absent,
+ # notifying Service[stunnel] doesn't work here because the config
+ # files contain the pid of the procs to stop/start.
+ # If we remove the config, and restart stunnel then it will only
+ # stop/start the procs for which config files are found and the stale
+ # service will continue to run.
+ # So we simply kill them.
+ notify => Exec['kill_bigcouch_stunnel_procs']
+ }
+
+}
diff --git a/puppet/modules/site_config/manifests/remove/files.pp b/puppet/modules/site_config/manifests/remove/files.pp
new file mode 100644
index 00000000..41d6462e
--- /dev/null
+++ b/puppet/modules/site_config/manifests/remove/files.pp
@@ -0,0 +1,56 @@
+#
+# Sometimes when we upgrade the platform, we need to ensure that files that
+# the platform previously created will get removed.
+#
+# These file removals don't need to be kept forever: we only need to remove
+# files that are present in the prior platform release.
+#
+# We can assume that the every node is upgraded from the previous platform
+# release.
+#
+
+class site_config::remove::files {
+
+ # Platform 0.8 removals
+ tidy {
+ '/etc/default/leap_mx':;
+ '/etc/logrotate.d/mx':;
+ '/etc/rsyslog.d/50-mx.conf':;
+ '/etc/apt/preferences.d/openvpn':;
+ '/etc/apt/sources.list.d/secondary.list.disabled.list':;
+ }
+
+ #
+ # Platform 0.7 removals
+ #
+
+ tidy {
+ '/etc/rsyslog.d/99-tapicero.conf':;
+ '/etc/rsyslog.d/01-webapp.conf':;
+ '/etc/rsyslog.d/50-stunnel.conf':;
+ '/etc/logrotate.d/stunnel':;
+ '/var/log/stunnel4/stunnel.log':;
+ 'leap_mx':
+ path => '/var/log/',
+ recurse => true,
+ matches => ['leap_mx*', 'mx.log.[1-5]', 'mx.log.[6-9](.gz)?',
+ 'mx.log.[0-9][0-9](.gz)?'];
+ '/srv/leap/webapp/public/provider.json':;
+ '/srv/leap/couchdb/designs/tmp_users':
+ recurse => true,
+ rmdirs => true;
+ '/etc/leap/soledad-server.conf':;
+ '/var/log/leap/openvpn.log':;
+ '/etc/rsyslog.d/50-openvpn.conf':;
+ }
+
+ # leax-mx logged to /var/log/leap_mx.log in the past
+ # we need to use a dumb exec here because file_line doesn't
+ # allow removing lines that match a regex in the current version
+ # of stdlib, see https://tickets.puppetlabs.com/browse/MODULES-1903
+ exec { 'rm_old_leap_mx_log_destination':
+ command => "/bin/sed -i '/leap_mx.log/d' /etc/check_mk/logwatch.state",
+ onlyif => "/bin/grep -qe 'leap_mx.log' /etc/check_mk/logwatch.state"
+ }
+
+}
diff --git a/puppet/modules/site_config/manifests/remove/jessie.pp b/puppet/modules/site_config/manifests/remove/jessie.pp
new file mode 100644
index 00000000..e9497baf
--- /dev/null
+++ b/puppet/modules/site_config/manifests/remove/jessie.pp
@@ -0,0 +1,14 @@
+# remove possible leftovers after upgrading from wheezy to jessie
+class site_config::remove::jessie {
+
+ tidy {
+ '/etc/apt/preferences.d/rsyslog_anon_depends':
+ notify => Exec['apt_updated'];
+ }
+
+ apt::preferences_snippet {
+ [ 'facter', 'obfsproxy', 'python-twisted', 'unbound' ]:
+ ensure => absent;
+ }
+
+}
diff --git a/puppet/modules/site_config/manifests/remove/monitoring.pp b/puppet/modules/site_config/manifests/remove/monitoring.pp
new file mode 100644
index 00000000..18e2949b
--- /dev/null
+++ b/puppet/modules/site_config/manifests/remove/monitoring.pp
@@ -0,0 +1,13 @@
+# remove leftovers on monitoring nodes
+class site_config::remove::monitoring {
+
+ # Remove check_mk loggwatch spoolfiles for
+ # tapicero and bigcouch
+ tidy {
+ 'remove_logwatch_spoolfiles':
+ path => '/var/lib/check_mk/logwatch',
+ recurse => true,
+ matches => [ '*tapicero.log', '*bigcouch.log'];
+ }
+
+}
diff --git a/puppet/modules/site_config/manifests/remove/tapicero.pp b/puppet/modules/site_config/manifests/remove/tapicero.pp
new file mode 100644
index 00000000..07c3c6c6
--- /dev/null
+++ b/puppet/modules/site_config/manifests/remove/tapicero.pp
@@ -0,0 +1,72 @@
+# remove tapicero leftovers from previous deploys on couchdb nodes
+class site_config::remove::tapicero {
+
+ ensure_packages('curl')
+
+ # remove tapicero couchdb user
+ $couchdb_config = hiera('couch')
+ $couchdb_mode = $couchdb_config['mode']
+
+ if $couchdb_mode == 'multimaster'
+ {
+ $port = 5986
+ } else {
+ $port = 5984
+ }
+
+ exec { 'remove_couchdb_user':
+ onlyif => "/usr/bin/curl -s 127.0.0.1:${port}/_users/org.couchdb.user:tapicero | grep -qv 'not_found'",
+ command => "/usr/local/bin/couch-doc-update --host 127.0.0.1:${port} --db _users --id org.couchdb.user:tapicero --delete",
+ require => Package['curl']
+ }
+
+
+ exec { 'kill_tapicero':
+ onlyif => '/usr/bin/test -s /var/run/tapicero.pid',
+ command => '/usr/bin/pkill --pidfile /var/run/tapicero.pid'
+ }
+
+ user { 'tapicero':
+ ensure => absent;
+ }
+
+ group { 'tapicero':
+ ensure => absent,
+ require => User['tapicero'];
+ }
+
+ tidy {
+ '/srv/leap/tapicero':
+ recurse => true,
+ require => [ Exec['kill_tapicero'] ];
+ '/var/lib/leap/tapicero':
+ require => [ Exec['kill_tapicero'] ];
+ '/var/run/tapicero':
+ require => [ Exec['kill_tapicero'] ];
+ '/etc/leap/tapicero.yaml':
+ require => [ Exec['kill_tapicero'] ];
+ '/etc/init.d/tapicero':
+ require => [ Exec['kill_tapicero'] ];
+ 'tapicero_logs':
+ path => '/var/log/leap',
+ recurse => true,
+ matches => 'tapicero*',
+ require => [ Exec['kill_tapicero'] ];
+ '/etc/check_mk/logwatch.d/tapicero.cfg':;
+ }
+
+ # remove local nagios plugin checks via mrpe
+ augeas {
+ 'Tapicero_Procs':
+ incl => '/etc/check_mk/mrpe.cfg',
+ lens => 'Spacevars.lns',
+ changes => 'rm /files/etc/check_mk/mrpe.cfg/Tapicero_Procs',
+ require => File['/etc/check_mk/mrpe.cfg'];
+ 'Tapicero_Heartbeat':
+ incl => '/etc/check_mk/mrpe.cfg',
+ lens => 'Spacevars.lns',
+ changes => 'rm Tapicero_Heartbeat',
+ require => File['/etc/check_mk/mrpe.cfg'];
+ }
+
+}
diff --git a/puppet/modules/site_config/manifests/remove/webapp.pp b/puppet/modules/site_config/manifests/remove/webapp.pp
new file mode 100644
index 00000000..58f59815
--- /dev/null
+++ b/puppet/modules/site_config/manifests/remove/webapp.pp
@@ -0,0 +1,7 @@
+# remove leftovers on webapp nodes
+class site_config::remove::webapp {
+ tidy {
+ '/etc/apache/sites-enabled/leap_webapp.conf':
+ notify => Service['apache'];
+ }
+}
diff --git a/puppet/modules/site_config/manifests/resolvconf.pp b/puppet/modules/site_config/manifests/resolvconf.pp
new file mode 100644
index 00000000..09f0b405
--- /dev/null
+++ b/puppet/modules/site_config/manifests/resolvconf.pp
@@ -0,0 +1,14 @@
+class site_config::resolvconf {
+
+ $domain_public = $site_config::default::domain_hash['full_suffix']
+
+ class { '::resolvconf':
+ domain => $domain_public,
+ search => $domain_public,
+ nameservers => [
+ '127.0.0.1 # local caching-only, unbound',
+ '85.214.20.141 # Digitalcourage, a german privacy organisation: (https://en.wikipedia.org/wiki/Digitalcourage)',
+ '172.81.176.146 # OpenNIC (https://servers.opennicproject.org/edit.php?srv=ns1.tor.ca.dns.opennic.glue)'
+ ]
+ }
+}
diff --git a/puppet/modules/site_config/manifests/ruby.pp b/puppet/modules/site_config/manifests/ruby.pp
new file mode 100644
index 00000000..5c13233d
--- /dev/null
+++ b/puppet/modules/site_config/manifests/ruby.pp
@@ -0,0 +1,8 @@
+# install ruby, rubygems and bundler
+# configure ruby settings common to all servers
+class site_config::ruby {
+ Class[Ruby] -> Class[rubygems] -> Class[bundler::install]
+ class { '::ruby': }
+ class { 'bundler::install': install_method => 'package' }
+ include rubygems
+}
diff --git a/puppet/modules/site_config/manifests/ruby/dev.pp b/puppet/modules/site_config/manifests/ruby/dev.pp
new file mode 100644
index 00000000..2b0b106d
--- /dev/null
+++ b/puppet/modules/site_config/manifests/ruby/dev.pp
@@ -0,0 +1,8 @@
+# install ruby dev packages needed for building some gems
+class site_config::ruby::dev {
+ include site_config::ruby
+ include ::ruby::devel
+
+ # building gems locally probably requires build-essential and gcc:
+ include site_config::packages::build_essential
+}
diff --git a/puppet/modules/site_config/manifests/setup.pp b/puppet/modules/site_config/manifests/setup.pp
new file mode 100644
index 00000000..82dfe76d
--- /dev/null
+++ b/puppet/modules/site_config/manifests/setup.pp
@@ -0,0 +1,50 @@
+# common things to set up on every node
+# leftover from the past, where we did two puppetruns
+# after another. We should consolidate this into site_config::default
+# in the future.
+class site_config::setup {
+ tag 'leap_base'
+
+ #
+ # this is applied before each run of site.pp
+ #
+
+ Exec { path => '/usr/bin:/usr/sbin/:/bin:/sbin:/usr/local/bin:/usr/local/sbin' }
+
+ include site_config::params
+
+ include concat::setup
+ include stdlib
+
+ # configure /etc/hosts
+ class { 'site_config::hosts': }
+
+ include site_config::initial_firewall
+
+ include site_apt
+
+ package { 'facter':
+ ensure => latest
+ }
+
+ # if squid_deb_proxy_client is set to true, install and configure
+ # squid_deb_proxy_client for apt caching
+ if hiera('squid_deb_proxy_client', false) {
+ include site_squid_deb_proxy::client
+ }
+
+ # shorewall is installed/half-configured during setup.pp (Bug #3871)
+ # we need to include shorewall::interface{eth0} in setup.pp so
+ # packages can be installed during main puppetrun, even before shorewall
+ # is configured completly
+ if ( $::site_config::params::environment == 'local' ) {
+ include site_config::vagrant
+ }
+
+ # if class site_custom::setup exists, include it.
+ # possibility for users to define custom puppet recipes
+ if defined( '::site_custom::setup') {
+ include ::site_custom::setup
+ }
+
+}
diff --git a/puppet/modules/site_config/manifests/shell.pp b/puppet/modules/site_config/manifests/shell.pp
new file mode 100644
index 00000000..5b8c025d
--- /dev/null
+++ b/puppet/modules/site_config/manifests/shell.pp
@@ -0,0 +1,22 @@
+class site_config::shell {
+
+ file {
+ '/etc/profile.d/leap_path.sh':
+ content => 'PATH=$PATH:/srv/leap/bin',
+ mode => '0644',
+ owner => root,
+ group => root;
+ }
+
+ ##
+ ## XTERM TITLE
+ ##
+
+ file { '/etc/profile.d/xterm-title.sh':
+ source => 'puppet:///modules/site_config/xterm-title.sh',
+ owner => root,
+ group => 0,
+ mode => '0644';
+ }
+
+}
diff --git a/puppet/modules/site_config/manifests/slow.pp b/puppet/modules/site_config/manifests/slow.pp
new file mode 100644
index 00000000..8e9b7035
--- /dev/null
+++ b/puppet/modules/site_config/manifests/slow.pp
@@ -0,0 +1,10 @@
+# this class is run by default, but can be excluded
+# for testing purposes by calling "leap deploy" with
+# the "--fast" parameter
+class site_config::slow {
+ tag 'leap_slow'
+
+ include site_config::default
+ include apt::update
+ class { 'site_apt::dist_upgrade': }
+}
diff --git a/puppet/modules/site_config/manifests/sysctl.pp b/puppet/modules/site_config/manifests/sysctl.pp
new file mode 100644
index 00000000..99f75123
--- /dev/null
+++ b/puppet/modules/site_config/manifests/sysctl.pp
@@ -0,0 +1,8 @@
+class site_config::sysctl {
+
+ sysctl::config {
+ 'net.ipv4.ip_nonlocal_bind':
+ value => 1,
+ comment => 'Allow applications to bind to an address when link is down (see https://leap.se/code/issues/4506)'
+ }
+}
diff --git a/puppet/modules/site_config/manifests/syslog.pp b/puppet/modules/site_config/manifests/syslog.pp
new file mode 100644
index 00000000..591e0601
--- /dev/null
+++ b/puppet/modules/site_config/manifests/syslog.pp
@@ -0,0 +1,62 @@
+# configure rsyslog on all nodes
+class site_config::syslog {
+
+ # only pin rsyslog packages to backports on wheezy
+ case $::operatingsystemrelease {
+ /^7.*/: {
+ include ::site_apt::preferences::rsyslog
+ }
+ # on jessie+ systems, systemd and journald are enabled,
+ # and journald logs IP addresses, so we need to disable
+ # it until a solution is found, (#7863):
+ # https://github.com/systemd/systemd/issues/2447
+ default: {
+ include ::journald
+ augeas {
+ 'disable_journald':
+ incl => '/etc/systemd/journald.conf',
+ lens => 'Puppet.lns',
+ changes => 'set /files/etc/systemd/journald.conf/Journal/Storage \'none\'',
+ notify => Service['systemd-journald'];
+ }
+ }
+ }
+
+ class { '::rsyslog::client':
+ log_remote => false,
+ log_local => true,
+ custom_config => 'site_rsyslog/client.conf.erb'
+ }
+
+ rsyslog::snippet { '00-anonymize_logs':
+ content => '$ModLoad mmanon
+action(type="mmanon" ipv4.bits="32" mode="rewrite")'
+ }
+
+ augeas {
+ 'logrotate_leap_deploy':
+ context => '/files/etc/logrotate.d/leap_deploy/rule',
+ changes => [
+ 'set file /var/log/leap/deploy.log',
+ 'set rotate 5',
+ 'set size 1M',
+ 'set compress compress',
+ 'set missingok missingok',
+ 'set copytruncate copytruncate' ];
+
+ # NOTE:
+ # the puppet_command script requires the option delaycompress
+ # be set on the summary log file.
+
+ 'logrotate_leap_deploy_summary':
+ context => '/files/etc/logrotate.d/leap_deploy_summary/rule',
+ changes => [
+ 'set file /var/log/leap/deploy-summary.log',
+ 'set rotate 5',
+ 'set size 100k',
+ 'set delaycompress delaycompress',
+ 'set compress compress',
+ 'set missingok missingok',
+ 'set copytruncate copytruncate' ]
+ }
+}
diff --git a/puppet/modules/site_config/manifests/vagrant.pp b/puppet/modules/site_config/manifests/vagrant.pp
new file mode 100644
index 00000000..8f50b305
--- /dev/null
+++ b/puppet/modules/site_config/manifests/vagrant.pp
@@ -0,0 +1,11 @@
+class site_config::vagrant {
+ # class for vagrant nodes
+
+ include site_shorewall::defaults
+ # eth0 on vagrant nodes is the uplink if
+ shorewall::interface { 'eth0':
+ zone => 'net',
+ options => 'tcpflags,blacklist,nosmurfs';
+ }
+
+}
diff --git a/puppet/modules/site_config/manifests/x509/ca.pp b/puppet/modules/site_config/manifests/x509/ca.pp
new file mode 100644
index 00000000..2880ecaf
--- /dev/null
+++ b/puppet/modules/site_config/manifests/x509/ca.pp
@@ -0,0 +1,11 @@
+class site_config::x509::ca {
+
+ include ::site_config::params
+
+ $x509 = hiera('x509')
+ $ca = $x509['ca_cert']
+
+ x509::ca { $site_config::params::ca_name:
+ content => $ca
+ }
+}
diff --git a/puppet/modules/site_config/manifests/x509/ca_bundle.pp b/puppet/modules/site_config/manifests/x509/ca_bundle.pp
new file mode 100644
index 00000000..5808e29e
--- /dev/null
+++ b/puppet/modules/site_config/manifests/x509/ca_bundle.pp
@@ -0,0 +1,17 @@
+class site_config::x509::ca_bundle {
+
+ # CA bundle -- we want to have the possibility of allowing multiple CAs.
+ # For now, the reason is to transition to using client CA. In the future,
+ # we will want to be able to smoothly phase out one CA and phase in another.
+ # I tried "--capath" for this, but it did not work.
+
+ include ::site_config::params
+
+ $x509 = hiera('x509')
+ $ca = $x509['ca_cert']
+ $client_ca = $x509['client_ca_cert']
+
+ x509::ca { $site_config::params::ca_bundle_name:
+ content => "${ca}${client_ca}"
+ }
+}
diff --git a/puppet/modules/site_config/manifests/x509/cert.pp b/puppet/modules/site_config/manifests/x509/cert.pp
new file mode 100644
index 00000000..7e5a36b9
--- /dev/null
+++ b/puppet/modules/site_config/manifests/x509/cert.pp
@@ -0,0 +1,12 @@
+class site_config::x509::cert {
+
+ include ::site_config::params
+
+ $x509 = hiera('x509')
+ $cert = $x509['cert']
+
+ x509::cert { $site_config::params::cert_name:
+ content => $cert
+ }
+
+}
diff --git a/puppet/modules/site_config/manifests/x509/client_ca/ca.pp b/puppet/modules/site_config/manifests/x509/client_ca/ca.pp
new file mode 100644
index 00000000..3fbafa98
--- /dev/null
+++ b/puppet/modules/site_config/manifests/x509/client_ca/ca.pp
@@ -0,0 +1,16 @@
+class site_config::x509::client_ca::ca {
+
+ ##
+ ## This is for the special CA that is used exclusively for generating
+ ## client certificates by the webapp.
+ ##
+
+ include ::site_config::params
+
+ $x509 = hiera('x509')
+ $cert = $x509['client_ca_cert']
+
+ x509::ca { $site_config::params::client_ca_name:
+ content => $cert
+ }
+}
diff --git a/puppet/modules/site_config/manifests/x509/client_ca/key.pp b/puppet/modules/site_config/manifests/x509/client_ca/key.pp
new file mode 100644
index 00000000..0b537e76
--- /dev/null
+++ b/puppet/modules/site_config/manifests/x509/client_ca/key.pp
@@ -0,0 +1,16 @@
+class site_config::x509::client_ca::key {
+
+ ##
+ ## This is for the special CA that is used exclusively for generating
+ ## client certificates by the webapp.
+ ##
+
+ include ::site_config::params
+
+ $x509 = hiera('x509')
+ $key = $x509['client_ca_key']
+
+ x509::key { $site_config::params::client_ca_name:
+ content => $key
+ }
+}
diff --git a/puppet/modules/site_config/manifests/x509/commercial/ca.pp b/puppet/modules/site_config/manifests/x509/commercial/ca.pp
new file mode 100644
index 00000000..c76a9dbb
--- /dev/null
+++ b/puppet/modules/site_config/manifests/x509/commercial/ca.pp
@@ -0,0 +1,11 @@
+class site_config::x509::commercial::ca {
+
+ include ::site_config::params
+
+ $x509 = hiera('x509')
+ $ca = $x509['commercial_ca_cert']
+
+ x509::ca { $site_config::params::commercial_ca_name:
+ content => $ca
+ }
+}
diff --git a/puppet/modules/site_config/manifests/x509/commercial/cert.pp b/puppet/modules/site_config/manifests/x509/commercial/cert.pp
new file mode 100644
index 00000000..9dd6ffcd
--- /dev/null
+++ b/puppet/modules/site_config/manifests/x509/commercial/cert.pp
@@ -0,0 +1,15 @@
+class site_config::x509::commercial::cert {
+
+ include ::site_config::params
+
+ $x509 = hiera('x509')
+ $cert = $x509['commercial_cert']
+ $ca = $x509['commercial_ca_cert']
+
+ $cafile = "${cert}\n${ca}"
+
+ x509::cert { $site_config::params::commercial_cert_name:
+ content => $cafile
+ }
+
+}
diff --git a/puppet/modules/site_config/manifests/x509/commercial/key.pp b/puppet/modules/site_config/manifests/x509/commercial/key.pp
new file mode 100644
index 00000000..2be439fd
--- /dev/null
+++ b/puppet/modules/site_config/manifests/x509/commercial/key.pp
@@ -0,0 +1,11 @@
+class site_config::x509::commercial::key {
+
+ include ::site_config::params
+
+ $x509 = hiera('x509')
+ $key = $x509['commercial_key']
+
+ x509::key { $site_config::params::commercial_cert_name:
+ content => $key
+ }
+}
diff --git a/puppet/modules/site_config/manifests/x509/key.pp b/puppet/modules/site_config/manifests/x509/key.pp
new file mode 100644
index 00000000..448dc6a6
--- /dev/null
+++ b/puppet/modules/site_config/manifests/x509/key.pp
@@ -0,0 +1,11 @@
+class site_config::x509::key {
+
+ include ::site_config::params
+
+ $x509 = hiera('x509')
+ $key = $x509['key']
+
+ x509::key { $site_config::params::cert_name:
+ content => $key
+ }
+}
diff --git a/puppet/modules/site_config/templates/hosts b/puppet/modules/site_config/templates/hosts
new file mode 100644
index 00000000..d62cbc3f
--- /dev/null
+++ b/puppet/modules/site_config/templates/hosts
@@ -0,0 +1,19 @@
+# This file is managed by puppet, any changes will be overwritten!
+
+127.0.0.1 localhost
+127.0.1.1 <%= @my_hostnames.join(' ') %>
+
+<%- if @hosts then -%>
+<% @hosts.keys.sort.each do |name| -%>
+<%- props = @hosts[name] -%>
+<%- aliases = props["aliases"] ? props["aliases"].join(' ') : nil -%>
+<%= [props["ip_address"], props["domain_full"], props["domain_internal"], aliases, name].compact.uniq.join(' ') %>
+<% end -%>
+<% end -%>
+
+# The following lines are desirable for IPv6 capable hosts
+::1 ip6-localhost ip6-loopback
+fe00::0 ip6-localnet
+ff00::0 ip6-mcastprefix
+ff02::1 ip6-allnodes
+ff02::2 ip6-allrouters
diff --git a/puppet/modules/site_config/templates/ipv4firewall_up.rules.erb b/puppet/modules/site_config/templates/ipv4firewall_up.rules.erb
new file mode 100644
index 00000000..b0c2b7ad
--- /dev/null
+++ b/puppet/modules/site_config/templates/ipv4firewall_up.rules.erb
@@ -0,0 +1,14 @@
+# Generated by iptables-save v1.4.14 on Tue Aug 20 14:40:40 2013
+*filter
+:INPUT DROP [0:0]
+:FORWARD DROP [0:0]
+:OUTPUT ACCEPT [0:0]
+-A INPUT -i lo -j ACCEPT
+-A INPUT -m state --state RELATED,ESTABLISHED -j ACCEPT
+-A INPUT -p tcp -m state --state NEW,ESTABLISHED --dport 22 -j ACCEPT
+-A INPUT -p tcp -m state --state NEW,ESTABLISHED --dport <%= @ssh_port %> -j ACCEPT
+-A INPUT -p udp -m udp --sport 53 -j ACCEPT
+-A INPUT -p icmp -m icmp --icmp-type 8 -m state --state NEW,RELATED,ESTABLISHED -j ACCEPT
+-A INPUT -p icmp -m icmp --icmp-type 0 -m state --state RELATED,ESTABLISHED -j ACCEPT
+-A INPUT -m limit --limit 5/min -j LOG --log-prefix "iptables denied: " --log-level 7
+COMMIT
diff --git a/puppet/modules/site_config/templates/ipv6firewall_up.rules.erb b/puppet/modules/site_config/templates/ipv6firewall_up.rules.erb
new file mode 100644
index 00000000..e2c92524
--- /dev/null
+++ b/puppet/modules/site_config/templates/ipv6firewall_up.rules.erb
@@ -0,0 +1,8 @@
+# Generated by ip6tables-save v1.4.20 on Tue Aug 20 12:19:43 2013
+*filter
+:INPUT DROP [24:1980]
+:FORWARD DROP [0:0]
+:OUTPUT DROP [14:8030]
+-A OUTPUT -j REJECT --reject-with icmp6-port-unreachable
+COMMIT
+# Completed on Tue Aug 20 12:19:43 2013
diff --git a/puppet/modules/site_config/templates/reload_dhclient.erb b/puppet/modules/site_config/templates/reload_dhclient.erb
new file mode 100644
index 00000000..075828b7
--- /dev/null
+++ b/puppet/modules/site_config/templates/reload_dhclient.erb
@@ -0,0 +1,13 @@
+#!/bin/sh
+
+# Get the PID
+PIDFILE='/var/run/dhclient.<%= scope.lookupvar('site_config::params::interface') %>.pid'
+
+# Capture how dhclient is currently running so we can relaunch it
+dhclient=`/bin/ps --no-headers --pid $(cat $PIDFILE) -f | /usr/bin/awk '{for(i=8;i<=NF;++i) printf("%s ", $i) }'`
+
+# Kill the current dhclient
+/usr/bin/pkill -F $PIDFILE
+
+# Restart dhclient with the arguments it had previously
+$dhclient
diff --git a/puppet/modules/site_couchdb/files/couchdb_scripts_defaults.conf b/puppet/modules/site_couchdb/files/couchdb_scripts_defaults.conf
new file mode 100644
index 00000000..1565e1a1
--- /dev/null
+++ b/puppet/modules/site_couchdb/files/couchdb_scripts_defaults.conf
@@ -0,0 +1,4 @@
+# space separated list of excluded DBs for dumping
+# sourced by couchdb_dumpall.sh
+EXCLUDE_DBS='sessions tokens'
+
diff --git a/puppet/modules/site_couchdb/files/designs/Readme.md b/puppet/modules/site_couchdb/files/designs/Readme.md
new file mode 100644
index 00000000..983f629f
--- /dev/null
+++ b/puppet/modules/site_couchdb/files/designs/Readme.md
@@ -0,0 +1,14 @@
+This directory contains design documents for the leap platform.
+
+They need to be uploaded to the couch database in order to query the
+database in certain ways.
+
+Each subdirectory corresponds to a couch database and contains the design
+documents that need to be added to that particular database.
+
+Here's an example of how to upload the users design document:
+```bash
+HOST="http://localhost:5984"
+curl -X PUT $HOST/users/_design/User --data @users/User.json
+
+```
diff --git a/puppet/modules/site_couchdb/files/designs/customers/Customer.json b/puppet/modules/site_couchdb/files/designs/customers/Customer.json
new file mode 100644
index 00000000..1b4bbddd
--- /dev/null
+++ b/puppet/modules/site_couchdb/files/designs/customers/Customer.json
@@ -0,0 +1,18 @@
+{
+ "_id": "_design/Customer",
+ "language": "javascript",
+ "views": {
+ "by_user_id": {
+ "map": " function(doc) {\n if ((doc['type'] == 'Customer') && (doc['user_id'] != null)) {\n emit(doc['user_id'], 1);\n }\n }\n",
+ "reduce": "_sum"
+ },
+ "by_braintree_customer_id": {
+ "map": " function(doc) {\n if ((doc['type'] == 'Customer') && (doc['braintree_customer_id'] != null)) {\n emit(doc['braintree_customer_id'], 1);\n }\n }\n",
+ "reduce": "_sum"
+ },
+ "all": {
+ "map": " function(doc) {\n if (doc['type'] == 'Customer') {\n emit(doc._id, null);\n }\n }\n"
+ }
+ },
+ "couchrest-hash": "688c401ec0230b75625c176a88fc4a02"
+} \ No newline at end of file
diff --git a/puppet/modules/site_couchdb/files/designs/identities/Identity.json b/puppet/modules/site_couchdb/files/designs/identities/Identity.json
new file mode 100644
index 00000000..b1c567c1
--- /dev/null
+++ b/puppet/modules/site_couchdb/files/designs/identities/Identity.json
@@ -0,0 +1,34 @@
+{
+ "_id": "_design/Identity",
+ "language": "javascript",
+ "views": {
+ "by_address_and_destination": {
+ "map": " function(doc) {\n if ((doc['type'] == 'Identity') && (doc['address'] != null) && (doc['destination'] != null)) {\n emit([doc['address'], doc['destination']], 1);\n }\n }\n",
+ "reduce": "_sum"
+ },
+ "all": {
+ "map": " function(doc) {\n if (doc['type'] == 'Identity') {\n emit(doc._id, null);\n }\n }\n"
+ },
+ "cert_fingerprints_by_expiry": {
+ "map": "function(doc) {\n if (doc.type != 'Identity') {\n return;\n }\n if (typeof doc.cert_fingerprints === \"object\") {\n for (fp in doc.cert_fingerprints) {\n if (doc.cert_fingerprints.hasOwnProperty(fp)) {\n emit(doc.cert_fingerprints[fp], fp);\n }\n }\n }\n}\n"
+ },
+ "cert_expiry_by_fingerprint": {
+ "map": "function(doc) {\n if (doc.type != 'Identity') {\n return;\n }\n if (typeof doc.cert_fingerprints === \"object\") {\n for (fp in doc.cert_fingerprints) {\n if (doc.cert_fingerprints.hasOwnProperty(fp)) {\n emit(fp, doc.cert_fingerprints[fp]);\n }\n }\n }\n}\n"
+ },
+ "disabled": {
+ "map": "function(doc) {\n if (doc.type != 'Identity') {\n return;\n }\n if (typeof doc.user_id === \"undefined\") {\n emit(doc._id, 1);\n }\n}\n"
+ },
+ "pgp_key_by_email": {
+ "map": "function(doc) {\n if (doc.type != 'Identity') {\n return;\n }\n if (typeof doc.keys === \"object\") {\n emit(doc.address, doc.keys[\"pgp\"]);\n }\n}\n"
+ },
+ "by_user_id": {
+ "map": " function(doc) {\n if ((doc['type'] == 'Identity') && (doc['user_id'] != null)) {\n emit(doc['user_id'], 1);\n }\n }\n",
+ "reduce": "_sum"
+ },
+ "by_address": {
+ "map": " function(doc) {\n if ((doc['type'] == 'Identity') && (doc['address'] != null)) {\n emit(doc['address'], 1);\n }\n }\n",
+ "reduce": "_sum"
+ }
+ },
+ "couchrest-hash": "4a774c3f56122b655a314670403b27e2"
+} \ No newline at end of file
diff --git a/puppet/modules/site_couchdb/files/designs/invite_codes/InviteCode.json b/puppet/modules/site_couchdb/files/designs/invite_codes/InviteCode.json
new file mode 100644
index 00000000..006c1ea1
--- /dev/null
+++ b/puppet/modules/site_couchdb/files/designs/invite_codes/InviteCode.json
@@ -0,0 +1,22 @@
+{
+ "_id": "_design/InviteCode",
+ "language": "javascript",
+ "views": {
+ "by__id": {
+ "map": " function(doc) {\n if ((doc['type'] == 'InviteCode') && (doc['_id'] != null)) {\n emit(doc['_id'], 1);\n }\n }\n",
+ "reduce": "_sum"
+ },
+ "by_invite_code": {
+ "map": " function(doc) {\n if ((doc['type'] == 'InviteCode') && (doc['invite_code'] != null)) {\n emit(doc['invite_code'], 1);\n }\n }\n",
+ "reduce": "_sum"
+ },
+ "by_invite_count": {
+ "map": " function(doc) {\n if ((doc['type'] == 'InviteCode') && (doc['invite_count'] != null)) {\n emit(doc['invite_count'], 1);\n }\n }\n",
+ "reduce": "_sum"
+ },
+ "all": {
+ "map": " function(doc) {\n if (doc['type'] == 'InviteCode') {\n emit(doc._id, null);\n }\n }\n"
+ }
+ },
+ "couchrest-hash": "83fb8f504520b4a9c7ddbb7928cd0ce3"
+} \ No newline at end of file
diff --git a/puppet/modules/site_couchdb/files/designs/messages/Message.json b/puppet/modules/site_couchdb/files/designs/messages/Message.json
new file mode 100644
index 00000000..6a48fc4d
--- /dev/null
+++ b/puppet/modules/site_couchdb/files/designs/messages/Message.json
@@ -0,0 +1,18 @@
+{
+ "_id": "_design/Message",
+ "language": "javascript",
+ "views": {
+ "by_user_ids_to_show": {
+ "map": "function (doc) {\n if (doc.type === 'Message' && doc.user_ids_to_show && Array.isArray(doc.user_ids_to_show)) {\n doc.user_ids_to_show.forEach(function (userId) {\n emit(userId, 1);\n });\n }\n}\n",
+ "reduce": " function(key, values, rereduce) {\n return sum(values);\n }\n"
+ },
+ "by_user_ids_to_show_and_created_at": {
+ "map": "// not using at moment\n// call with something like Message.by_user_ids_to_show_and_created_at.startkey([user_id, start_date]).endkey([user_id,end_date])\nfunction (doc) {\n if (doc.type === 'Message' && doc.user_ids_to_show && Array.isArray(doc.user_ids_to_show)) {\n doc.user_ids_to_show.forEach(function (userId) {\n emit([userId, doc.created_at], 1);\n });\n }\n}\n",
+ "reduce": " function(key, values, rereduce) {\n return sum(values);\n }\n"
+ },
+ "all": {
+ "map": " function(doc) {\n if (doc['type'] == 'Message') {\n emit(doc._id, null);\n }\n }\n"
+ }
+ },
+ "couchrest-hash": "ba80168e51015d2678cad88fc6c5b986"
+} \ No newline at end of file
diff --git a/puppet/modules/site_couchdb/files/designs/sessions/Session.json b/puppet/modules/site_couchdb/files/designs/sessions/Session.json
new file mode 100644
index 00000000..70202780
--- /dev/null
+++ b/puppet/modules/site_couchdb/files/designs/sessions/Session.json
@@ -0,0 +1,8 @@
+{
+ "views": {
+ "by_expires": {
+ "reduce": "_sum",
+ "map": "function(doc) {\n if(typeof doc.expires !== \"undefined\") {\n emit(doc.expires, 1);\n }\n}\n"
+ }
+ }
+}
diff --git a/puppet/modules/site_couchdb/files/designs/shared/docs.json b/puppet/modules/site_couchdb/files/designs/shared/docs.json
new file mode 100644
index 00000000..004180cd
--- /dev/null
+++ b/puppet/modules/site_couchdb/files/designs/shared/docs.json
@@ -0,0 +1,8 @@
+{
+ "_id": "_design/docs",
+ "views": {
+ "get": {
+ "map": "function(doc) {\n if (doc.u1db_rev) {\n var is_tombstone = true;\n var has_conflicts = false;\n if (doc._attachments) {\n if (doc._attachments.u1db_content)\n is_tombstone = false;\n if (doc._attachments.u1db_conflicts)\n has_conflicts = true;\n }\n emit(doc._id,\n {\n \"couch_rev\": doc._rev,\n \"u1db_rev\": doc.u1db_rev,\n \"is_tombstone\": is_tombstone,\n \"has_conflicts\": has_conflicts,\n }\n );\n }\n}\n"
+ }
+ }
+} \ No newline at end of file
diff --git a/puppet/modules/site_couchdb/files/designs/shared/syncs.json b/puppet/modules/site_couchdb/files/designs/shared/syncs.json
new file mode 100644
index 00000000..bab5622f
--- /dev/null
+++ b/puppet/modules/site_couchdb/files/designs/shared/syncs.json
@@ -0,0 +1,11 @@
+{
+ "_id": "_design/syncs",
+ "updates": {
+ "put": "function(doc, req){\n if (!doc) {\n doc = {}\n doc['_id'] = 'u1db_sync_log';\n doc['syncs'] = [];\n }\n body = JSON.parse(req.body);\n // remove outdated info\n doc['syncs'] = doc['syncs'].filter(\n function (entry) {\n return entry[0] != body['other_replica_uid'];\n }\n );\n // store u1db rev\n doc['syncs'].push([\n body['other_replica_uid'],\n body['other_generation'],\n body['other_transaction_id']\n ]);\n return [doc, 'ok'];\n}\n\n"
+ },
+ "views": {
+ "log": {
+ "map": "function(doc) {\n if (doc._id == 'u1db_sync_log') {\n if (doc.syncs)\n doc.syncs.forEach(function (entry) {\n emit(entry[0],\n {\n 'known_generation': entry[1],\n 'known_transaction_id': entry[2]\n });\n });\n }\n}\n"
+ }
+ }
+} \ No newline at end of file
diff --git a/puppet/modules/site_couchdb/files/designs/shared/transactions.json b/puppet/modules/site_couchdb/files/designs/shared/transactions.json
new file mode 100644
index 00000000..106ad46c
--- /dev/null
+++ b/puppet/modules/site_couchdb/files/designs/shared/transactions.json
@@ -0,0 +1,13 @@
+{
+ "_id": "_design/transactions",
+ "lists": {
+ "generation": "function(head, req) {\n var row;\n var rows=[];\n // fetch all rows\n while(row = getRow()) {\n rows.push(row);\n }\n if (rows.length > 0)\n send(JSON.stringify({\n \"generation\": rows.length,\n \"doc_id\": rows[rows.length-1]['id'],\n \"transaction_id\": rows[rows.length-1]['value']\n }));\n else\n send(JSON.stringify({\n \"generation\": 0,\n \"doc_id\": \"\",\n \"transaction_id\": \"\",\n }));\n}\n",
+ "trans_id_for_gen": "function(head, req) {\n var row;\n var rows=[];\n var i = 1;\n var gen = 1;\n if (req.query.gen)\n gen = parseInt(req.query['gen']);\n // fetch all rows\n while(row = getRow())\n rows.push(row);\n if (gen <= rows.length)\n send(JSON.stringify({\n \"generation\": gen,\n \"doc_id\": rows[gen-1]['id'],\n \"transaction_id\": rows[gen-1]['value'],\n }));\n else\n send('{}');\n}\n",
+ "whats_changed": "function(head, req) {\n var row;\n var gen = 1;\n var old_gen = 0;\n if (req.query.old_gen)\n old_gen = parseInt(req.query['old_gen']);\n send('{\"transactions\":[\\n');\n // fetch all rows\n while(row = getRow()) {\n if (gen > old_gen) {\n if (gen > old_gen+1)\n send(',\\n');\n send(JSON.stringify({\n \"generation\": gen,\n \"doc_id\": row[\"id\"],\n \"transaction_id\": row[\"value\"]\n }));\n }\n gen++;\n }\n send('\\n]}');\n}\n"
+ },
+ "views": {
+ "log": {
+ "map": "function(doc) {\n if (doc.u1db_transactions)\n doc.u1db_transactions.forEach(function(t) {\n emit(t[0], // use timestamp as key so the results are ordered\n t[1]); // value is the transaction_id\n });\n}\n"
+ }
+ }
+} \ No newline at end of file
diff --git a/puppet/modules/site_couchdb/files/designs/tickets/Ticket.json b/puppet/modules/site_couchdb/files/designs/tickets/Ticket.json
new file mode 100644
index 00000000..578f632b
--- /dev/null
+++ b/puppet/modules/site_couchdb/files/designs/tickets/Ticket.json
@@ -0,0 +1,50 @@
+{
+ "_id": "_design/Ticket",
+ "language": "javascript",
+ "views": {
+ "by_updated_at": {
+ "map": " function(doc) {\n if ((doc['type'] == 'Ticket') && (doc['updated_at'] != null)) {\n emit(doc['updated_at'], 1);\n }\n }\n",
+ "reduce": "_sum"
+ },
+ "by_created_at": {
+ "map": " function(doc) {\n if ((doc['type'] == 'Ticket') && (doc['created_at'] != null)) {\n emit(doc['created_at'], 1);\n }\n }\n",
+ "reduce": "_sum"
+ },
+ "by_created_by": {
+ "map": " function(doc) {\n if ((doc['type'] == 'Ticket') && (doc['created_by'] != null)) {\n emit(doc['created_by'], 1);\n }\n }\n",
+ "reduce": "_sum"
+ },
+ "by_is_open_and_created_at": {
+ "map": " function(doc) {\n if ((doc['type'] == 'Ticket') && (doc['is_open'] != null) && (doc['created_at'] != null)) {\n emit([doc['is_open'], doc['created_at']], 1);\n }\n }\n",
+ "reduce": "_sum"
+ },
+ "by_is_open_and_updated_at": {
+ "map": " function(doc) {\n if ((doc['type'] == 'Ticket') && (doc['is_open'] != null) && (doc['updated_at'] != null)) {\n emit([doc['is_open'], doc['updated_at']], 1);\n }\n }\n",
+ "reduce": "_sum"
+ },
+ "by_includes_post_by_and_is_open_and_created_at": {
+ "map": "function(doc) {\n var arr = {}\n if (doc['type'] == 'Ticket' && doc.comments) {\n doc.comments.forEach(function(comment){\n if (comment.posted_by && !arr[comment.posted_by]) {\n //don't add duplicates\n arr[comment.posted_by] = true;\n emit([comment.posted_by, doc.is_open, doc.created_at], 1);\n }\n });\n }\n}\n",
+ "reduce": " function(key, values, rereduce) {\n return sum(values);\n }\n"
+ },
+ "by_includes_post_by": {
+ "map": "// TODO: This view is only used in tests--should we keep it?\nfunction(doc) {\n var arr = {}\n if (doc['type'] == 'Ticket' && doc.comments) {\n doc.comments.forEach(function(comment){\n if (comment.posted_by && !arr[comment.posted_by]) {\n //don't add duplicates\n arr[comment.posted_by] = true;\n emit(comment.posted_by, 1);\n }\n });\n }\n}\n",
+ "reduce": " function(key, values, rereduce) {\n return sum(values);\n }\n"
+ },
+ "by_includes_post_by_and_is_open_and_updated_at": {
+ "map": "function(doc) {\n var arr = {}\n if (doc['type'] == 'Ticket' && doc.comments) {\n doc.comments.forEach(function(comment){\n if (comment.posted_by && !arr[comment.posted_by]) {\n //don't add duplicates\n arr[comment.posted_by] = true;\n emit([comment.posted_by, doc.is_open, doc.updated_at], 1);\n }\n });\n }\n}\n",
+ "reduce": " function(key, values, rereduce) {\n return sum(values);\n }\n"
+ },
+ "by_includes_post_by_and_created_at": {
+ "map": "function(doc) {\n var arr = {}\n if (doc['type'] == 'Ticket' && doc.comments) {\n doc.comments.forEach(function(comment){\n if (comment.posted_by && !arr[comment.posted_by]) {\n //don't add duplicates\n arr[comment.posted_by] = true;\n emit([comment.posted_by, doc.created_at], 1);\n }\n });\n }\n}\n",
+ "reduce": " function(key, values, rereduce) {\n return sum(values);\n }\n"
+ },
+ "by_includes_post_by_and_updated_at": {
+ "map": "function(doc) {\n var arr = {}\n if (doc['type'] == 'Ticket' && doc.comments) {\n doc.comments.forEach(function(comment){\n if (comment.posted_by && !arr[comment.posted_by]) {\n //don't add duplicates\n arr[comment.posted_by] = true;\n emit([comment.posted_by, doc.updated_at], 1);\n }\n });\n }\n}\n",
+ "reduce": " function(key, values, rereduce) {\n return sum(values);\n }\n"
+ },
+ "all": {
+ "map": " function(doc) {\n if (doc['type'] == 'Ticket') {\n emit(doc._id, null);\n }\n }\n"
+ }
+ },
+ "couchrest-hash": "b21eaeea8ea66bfda65581b1b7ce06af"
+} \ No newline at end of file
diff --git a/puppet/modules/site_couchdb/files/designs/tokens/Token.json b/puppet/modules/site_couchdb/files/designs/tokens/Token.json
new file mode 100644
index 00000000..b9025f15
--- /dev/null
+++ b/puppet/modules/site_couchdb/files/designs/tokens/Token.json
@@ -0,0 +1,14 @@
+{
+ "_id": "_design/Token",
+ "language": "javascript",
+ "views": {
+ "by_last_seen_at": {
+ "map": " function(doc) {\n if ((doc['type'] == 'Token') && (doc['last_seen_at'] != null)) {\n emit(doc['last_seen_at'], 1);\n }\n }\n",
+ "reduce": "_sum"
+ },
+ "all": {
+ "map": " function(doc) {\n if (doc['type'] == 'Token') {\n emit(doc._id, null);\n }\n }\n"
+ }
+ },
+ "couchrest-hash": "541dd924551c42a2317b345effbe65cc"
+} \ No newline at end of file
diff --git a/puppet/modules/site_couchdb/files/designs/users/User.json b/puppet/modules/site_couchdb/files/designs/users/User.json
new file mode 100644
index 00000000..8a82cf4a
--- /dev/null
+++ b/puppet/modules/site_couchdb/files/designs/users/User.json
@@ -0,0 +1,22 @@
+{
+ "_id": "_design/User",
+ "language": "javascript",
+ "views": {
+ "by_login": {
+ "map": " function(doc) {\n if ((doc['type'] == 'User') && (doc['login'] != null)) {\n emit(doc['login'], 1);\n }\n }\n",
+ "reduce": "_sum"
+ },
+ "all": {
+ "map": " function(doc) {\n if (doc['type'] == 'User') {\n emit(doc._id, null);\n }\n }\n"
+ },
+ "by_created_at_and_one_month_warning_not_sent": {
+ "map": "function (doc) {\n if ((doc['type'] == 'User') && (doc['created_at'] != null) && (doc['one_month_warning_sent'] == null)) {\n emit(doc['created_at'], 1);\n } \n}\n",
+ "reduce": " function(key, values, rereduce) {\n return sum(values);\n }\n"
+ },
+ "by_created_at": {
+ "map": " function(doc) {\n if ((doc['type'] == 'User') && (doc['created_at'] != null)) {\n emit(doc['created_at'], 1);\n }\n }\n",
+ "reduce": "_sum"
+ }
+ },
+ "couchrest-hash": "d854607d299887a347e554176cb79e20"
+} \ No newline at end of file
diff --git a/puppet/modules/site_couchdb/files/leap_ca_daemon b/puppet/modules/site_couchdb/files/leap_ca_daemon
new file mode 100755
index 00000000..9a1a0bc7
--- /dev/null
+++ b/puppet/modules/site_couchdb/files/leap_ca_daemon
@@ -0,0 +1,157 @@
+#! /bin/sh
+### BEGIN INIT INFO
+# Provides: leap_ca_daemon
+# Required-Start: $remote_fs $syslog
+# Required-Stop: $remote_fs $syslog
+# Default-Start: 2 3 4 5
+# Default-Stop: 0 1 6
+# Short-Description: leap_ca_daemon initscript
+# Description: Controls leap_ca_daemon (see https://github.com/leapcode/leap_ca
+# for more information.
+### END INIT INFO
+
+# Author: varac <varac@leap.se>
+#
+
+# Do NOT "set -e"
+
+# PATH should only include /usr/* if it runs after the mountnfs.sh script
+PATH=/sbin:/usr/sbin:/bin:/usr/bin
+DESC="leap_ca_daemon initscript"
+NAME=leap_ca_daemon
+DAEMON=/usr/local/bin/$NAME
+DAEMON_ARGS="run "
+PIDFILE=/var/run/$NAME.pid
+SCRIPTNAME=/etc/init.d/$NAME
+
+# Exit if the package is not installed
+[ -x "$DAEMON" ] || exit 0
+
+# Read configuration variable file if it is present
+[ -r /etc/default/$NAME ] && . /etc/default/$NAME
+
+# Load the VERBOSE setting and other rcS variables
+. /lib/init/vars.sh
+
+# Define LSB log_* functions.
+# Depend on lsb-base (>= 3.2-14) to ensure that this file is present
+# and status_of_proc is working.
+. /lib/lsb/init-functions
+
+#
+# Function that starts the daemon/service
+#
+do_start()
+{
+ # Return
+ # 0 if daemon has been started
+ # 1 if daemon was already running
+ # 2 if daemon could not be started
+ start-stop-daemon --start --quiet --pidfile $PIDFILE --exec $DAEMON --test > /dev/null \
+ || return 1
+ start-stop-daemon --start --quiet --pidfile $PIDFILE --exec $DAEMON -- \
+ $DAEMON_ARGS \
+ || return 2
+ # Add code here, if necessary, that waits for the process to be ready
+ # to handle requests from services started subsequently which depend
+ # on this one. As a last resort, sleep for some time.
+}
+
+#
+# Function that stops the daemon/service
+#
+do_stop()
+{
+ # Return
+ # 0 if daemon has been stopped
+ # 1 if daemon was already stopped
+ # 2 if daemon could not be stopped
+ # other if a failure occurred
+ start-stop-daemon --stop --quiet --retry=TERM/30/KILL/5 --pidfile $PIDFILE --name $NAME
+ RETVAL="$?"
+ [ "$RETVAL" = 2 ] && return 2
+ # Wait for children to finish too if this is a daemon that forks
+ # and if the daemon is only ever run from this initscript.
+ # If the above conditions are not satisfied then add some other code
+ # that waits for the process to drop all resources that could be
+ # needed by services started subsequently. A last resort is to
+ # sleep for some time.
+ start-stop-daemon --stop --quiet --oknodo --retry=0/30/KILL/5 --exec $DAEMON
+ [ "$?" = 2 ] && return 2
+ # Many daemons don't delete their pidfiles when they exit.
+ rm -f $PIDFILE
+ return "$RETVAL"
+}
+
+#
+# Function that sends a SIGHUP to the daemon/service
+#
+do_reload() {
+ #
+ # If the daemon can reload its configuration without
+ # restarting (for example, when it is sent a SIGHUP),
+ # then implement that here.
+ #
+ start-stop-daemon --stop --signal 1 --quiet --pidfile $PIDFILE --name $NAME
+ return 0
+}
+
+case "$1" in
+ start)
+ [ "$VERBOSE" != no ] && log_daemon_msg "Starting $DESC" "$NAME"
+ do_start
+ case "$?" in
+ 0|1) [ "$VERBOSE" != no ] && log_end_msg 0 ;;
+ 2) [ "$VERBOSE" != no ] && log_end_msg 1 ;;
+ esac
+ ;;
+ stop)
+ [ "$VERBOSE" != no ] && log_daemon_msg "Stopping $DESC" "$NAME"
+ do_stop
+ case "$?" in
+ 0|1) [ "$VERBOSE" != no ] && log_end_msg 0 ;;
+ 2) [ "$VERBOSE" != no ] && log_end_msg 1 ;;
+ esac
+ ;;
+ status)
+ status_of_proc "$DAEMON" "$NAME" && exit 0 || exit $?
+ ;;
+ #reload|force-reload)
+ #
+ # If do_reload() is not implemented then leave this commented out
+ # and leave 'force-reload' as an alias for 'restart'.
+ #
+ #log_daemon_msg "Reloading $DESC" "$NAME"
+ #do_reload
+ #log_end_msg $?
+ #;;
+ restart|force-reload)
+ #
+ # If the "reload" option is implemented then remove the
+ # 'force-reload' alias
+ #
+ log_daemon_msg "Restarting $DESC" "$NAME"
+ do_stop
+ case "$?" in
+ 0|1)
+ do_start
+ case "$?" in
+ 0) log_end_msg 0 ;;
+ 1) log_end_msg 1 ;; # Old process is still running
+ *) log_end_msg 1 ;; # Failed to start
+ esac
+ ;;
+ *)
+ # Failed to stop
+ log_end_msg 1
+ ;;
+ esac
+ ;;
+ *)
+ #echo "Usage: $SCRIPTNAME {start|stop|restart|reload|force-reload}" >&2
+ echo "Usage: $SCRIPTNAME {start|stop|status|restart|force-reload}" >&2
+ exit 3
+ ;;
+esac
+
+:
diff --git a/puppet/modules/site_couchdb/files/local.ini b/puppet/modules/site_couchdb/files/local.ini
new file mode 100644
index 00000000..b921a927
--- /dev/null
+++ b/puppet/modules/site_couchdb/files/local.ini
@@ -0,0 +1,8 @@
+; Puppet modified file !!
+
+; Custom settings should be made in this file. They will override settings
+; in default.ini, but unlike changes made to default.ini, this file won't be
+; overwritten on server upgrade.
+
+[compactions]
+_default = [{db_fragmentation, "70%"}, {view_fragmentation, "60%"}, {from, "03:00"}, {to, "05:00"}]
diff --git a/puppet/modules/site_couchdb/files/runit_config b/puppet/modules/site_couchdb/files/runit_config
new file mode 100644
index 00000000..169b4832
--- /dev/null
+++ b/puppet/modules/site_couchdb/files/runit_config
@@ -0,0 +1,6 @@
+#!/bin/bash
+exec 2>&1
+export HOME=/home/bigcouch
+ulimit -H -n 32768
+ulimit -S -n 32768
+exec chpst -u bigcouch /opt/bigcouch/bin/bigcouch
diff --git a/puppet/modules/site_couchdb/lib/puppet/parser/functions/rotated_db_name.rb b/puppet/modules/site_couchdb/lib/puppet/parser/functions/rotated_db_name.rb
new file mode 100644
index 00000000..6458ae81
--- /dev/null
+++ b/puppet/modules/site_couchdb/lib/puppet/parser/functions/rotated_db_name.rb
@@ -0,0 +1,24 @@
+module Puppet::Parser::Functions
+ newfunction(:rotated_db_name, :type => :rvalue, :doc => <<-EOS
+This function takes a database name string and returns a database name with the current rotation stamp appended.
+The first argument is the base name of the database. Subsequent arguments may contain these options:
+ * 'next' -- return the db name for the next rotation, not the current one.
+ * 'monthly' -- rotate monthly (default)
+ * 'weekly' -- rotate weekly
+*Examples:*
+ rotated_db_name('tokens') => 'tokens_551'
+ EOS
+ ) do |arguments|
+ if arguments.include?('weekly')
+ rotation_period = 604800 # 1 week
+ else
+ rotation_period = 2592000 # 1 month
+ end
+ suffix = Time.now.utc.to_i / rotation_period
+ if arguments.include?('next')
+ suffix += 1
+ end
+ "#{arguments.first}_#{suffix}"
+ end
+end
+
diff --git a/puppet/modules/site_couchdb/manifests/add_users.pp b/puppet/modules/site_couchdb/manifests/add_users.pp
new file mode 100644
index 00000000..c905316b
--- /dev/null
+++ b/puppet/modules/site_couchdb/manifests/add_users.pp
@@ -0,0 +1,57 @@
+# add couchdb users for all services
+class site_couchdb::add_users {
+
+ Class['site_couchdb::create_dbs']
+ -> Class['site_couchdb::add_users']
+
+ # Couchdb users
+
+ ## leap_mx couchdb user
+ ## read: identities
+ ## write access to user-<uuid>
+ couchdb::add_user { $site_couchdb::couchdb_leap_mx_user:
+ roles => '["identities"]',
+ pw => $site_couchdb::couchdb_leap_mx_pw,
+ salt => $site_couchdb::couchdb_leap_mx_salt,
+ require => Couchdb::Query::Setup['localhost']
+ }
+
+ ## nickserver couchdb user
+ ## r: identities
+ ## r/w: keycache
+ couchdb::add_user { $site_couchdb::couchdb_nickserver_user:
+ roles => '["identities","keycache"]',
+ pw => $site_couchdb::couchdb_nickserver_pw,
+ salt => $site_couchdb::couchdb_nickserver_salt,
+ require => Couchdb::Query::Setup['localhost']
+ }
+
+ ## soledad couchdb user
+ ## r/w: user-<uuid>, shared
+ ## read: tokens
+ couchdb::add_user { $site_couchdb::couchdb_soledad_user:
+ roles => '["tokens"]',
+ pw => $site_couchdb::couchdb_soledad_pw,
+ salt => $site_couchdb::couchdb_soledad_salt,
+ require => Couchdb::Query::Setup['localhost']
+ }
+
+ ## webapp couchdb user
+ ## read/write: users, tokens, sessions, tickets, identities, customer
+ couchdb::add_user { $site_couchdb::couchdb_webapp_user:
+ roles => '["tokens","identities","users"]',
+ pw => $site_couchdb::couchdb_webapp_pw,
+ salt => $site_couchdb::couchdb_webapp_salt,
+ require => Couchdb::Query::Setup['localhost']
+ }
+
+ ## replication couchdb user
+ ## read/write: all databases for replication
+ couchdb::add_user { $site_couchdb::couchdb_replication_user:
+ roles => '["replication"]',
+ pw => $site_couchdb::couchdb_replication_pw,
+ salt => $site_couchdb::couchdb_replication_salt,
+ require => Couchdb::Query::Setup['localhost']
+ }
+
+}
diff --git a/puppet/modules/site_couchdb/manifests/backup.pp b/puppet/modules/site_couchdb/manifests/backup.pp
new file mode 100644
index 00000000..8b5aa6ea
--- /dev/null
+++ b/puppet/modules/site_couchdb/manifests/backup.pp
@@ -0,0 +1,23 @@
+class site_couchdb::backup {
+
+ # general backupninja config
+ backupninja::config { 'backupninja_config':
+ usecolors => false,
+ }
+
+ # dump all DBs locally to /var/backups/couchdb once a day
+ backupninja::sh { 'couchdb_backup':
+ command_string => "cd /srv/leap/couchdb/scripts \n./couchdb_dumpall.sh"
+ }
+
+ # Deploy /etc/leap/couchdb_scripts_defaults.conf so we can exclude
+ # some databases
+
+ file { '/etc/leap/couchdb_scripts_defaults.conf':
+ source => 'puppet:///modules/site_couchdb/couchdb_scripts_defaults.conf',
+ mode => '0644',
+ owner => 'root',
+ group => 'root',
+ }
+
+}
diff --git a/puppet/modules/site_couchdb/manifests/bigcouch.pp b/puppet/modules/site_couchdb/manifests/bigcouch.pp
new file mode 100644
index 00000000..2de3d4d0
--- /dev/null
+++ b/puppet/modules/site_couchdb/manifests/bigcouch.pp
@@ -0,0 +1,50 @@
+# sets up bigcouch on couchdb node
+class site_couchdb::bigcouch {
+
+ $config = $::site_couchdb::couchdb_config['bigcouch']
+ $cookie = $config['cookie']
+ $ednp_port = $config['ednp_port']
+
+ class { 'couchdb':
+ admin_pw => $::site_couchdb::couchdb_admin_pw,
+ admin_salt => $::site_couchdb::couchdb_admin_salt,
+ bigcouch => true,
+ bigcouch_cookie => $cookie,
+ ednp_port => $ednp_port,
+ chttpd_bind_address => '127.0.0.1'
+ }
+
+ #
+ # stunnel must running correctly before bigcouch dbs can be set up.
+ #
+ Class['site_config::default']
+ -> Class['site_config::resolvconf']
+ -> Class['couchdb::bigcouch::package::cloudant']
+ -> Service['shorewall']
+ -> Exec['refresh_stunnel']
+ -> Class['site_couchdb::setup']
+ -> Class['site_couchdb::bigcouch::add_nodes']
+ -> Class['site_couchdb::bigcouch::settle_cluster']
+ -> Class['site_couchdb::create_dbs']
+
+ include site_couchdb::bigcouch::add_nodes
+ include site_couchdb::bigcouch::settle_cluster
+ include site_couchdb::bigcouch::compaction
+
+ file { '/var/log/bigcouch':
+ ensure => directory
+ }
+
+ file { '/etc/sv/bigcouch/run':
+ ensure => present,
+ source => 'puppet:///modules/site_couchdb/runit_config',
+ owner => root,
+ group => root,
+ mode => '0755',
+ require => Package['couchdb'],
+ notify => Service['couchdb']
+ }
+
+ include site_check_mk::agent::couchdb::bigcouch
+
+}
diff --git a/puppet/modules/site_couchdb/manifests/bigcouch/add_nodes.pp b/puppet/modules/site_couchdb/manifests/bigcouch/add_nodes.pp
new file mode 100644
index 00000000..c8c43275
--- /dev/null
+++ b/puppet/modules/site_couchdb/manifests/bigcouch/add_nodes.pp
@@ -0,0 +1,8 @@
+class site_couchdb::bigcouch::add_nodes {
+ # loop through neighbors array and add nodes
+ $nodes = $::site_couchdb::bigcouch::config['neighbors']
+
+ couchdb::bigcouch::add_node { $nodes:
+ require => Couchdb::Query::Setup['localhost']
+ }
+}
diff --git a/puppet/modules/site_couchdb/manifests/bigcouch/compaction.pp b/puppet/modules/site_couchdb/manifests/bigcouch/compaction.pp
new file mode 100644
index 00000000..84aab4ef
--- /dev/null
+++ b/puppet/modules/site_couchdb/manifests/bigcouch/compaction.pp
@@ -0,0 +1,8 @@
+class site_couchdb::bigcouch::compaction {
+ cron {
+ 'compact_all_shards':
+ command => '/srv/leap/couchdb/scripts/bigcouch_compact_all_shards.sh >> /var/log/bigcouch/compaction.log',
+ hour => 3,
+ minute => 17;
+ }
+}
diff --git a/puppet/modules/site_couchdb/manifests/bigcouch/settle_cluster.pp b/puppet/modules/site_couchdb/manifests/bigcouch/settle_cluster.pp
new file mode 100644
index 00000000..820b5be2
--- /dev/null
+++ b/puppet/modules/site_couchdb/manifests/bigcouch/settle_cluster.pp
@@ -0,0 +1,11 @@
+class site_couchdb::bigcouch::settle_cluster {
+
+ exec { 'wait_for_couch_nodes':
+ command => '/srv/leap/bin/run_tests --test CouchDB/Are_configured_nodes_online? --retry 12 --wait 10'
+ }
+
+ exec { 'settle_cluster_membership':
+ command => '/srv/leap/bin/run_tests --test CouchDB/Is_cluster_membership_ok? --retry 12 --wait 10',
+ require => Exec['wait_for_couch_nodes']
+ }
+}
diff --git a/puppet/modules/site_couchdb/manifests/create_dbs.pp b/puppet/modules/site_couchdb/manifests/create_dbs.pp
new file mode 100644
index 00000000..a2d1c655
--- /dev/null
+++ b/puppet/modules/site_couchdb/manifests/create_dbs.pp
@@ -0,0 +1,102 @@
+# creates neccesary databases
+class site_couchdb::create_dbs {
+
+ Class['site_couchdb::setup']
+ -> Class['site_couchdb::create_dbs']
+
+ ### customer database
+ ### r/w: webapp,
+ couchdb::create_db { 'customers':
+ members => "{ \"names\": [\"${site_couchdb::couchdb_webapp_user}\"], \"roles\": [\"replication\"] }",
+ require => Couchdb::Query::Setup['localhost']
+ }
+
+ ## identities database
+ ## r: nickserver, leap_mx - needs to be restrict with design document
+ ## r/w: webapp
+ couchdb::create_db { 'identities':
+ members => "{ \"names\": [], \"roles\": [\"replication\", \"identities\"] }",
+ require => Couchdb::Query::Setup['localhost']
+ }
+
+ ## keycache database
+ ## r/w: nickserver
+ couchdb::create_db { 'keycache':
+ members => "{ \"names\": [], \"roles\": [\"replication\", \"keycache\"] }",
+ require => Couchdb::Query::Setup['localhost']
+ }
+
+ ## sessions database
+ ## r/w: webapp
+ $sessions_db = rotated_db_name('sessions', 'monthly')
+ couchdb::create_db { $sessions_db:
+ members => "{ \"names\": [\"${site_couchdb::couchdb_webapp_user}\"], \"roles\": [\"replication\"] }",
+ require => Couchdb::Query::Setup['localhost']
+ }
+
+ $sessions_next_db = rotated_db_name('sessions', 'monthly', 'next')
+ couchdb::create_db { $sessions_next_db:
+ members => "{ \"names\": [\"${site_couchdb::couchdb_webapp_user}\"], \"roles\": [\"replication\"] }",
+ require => Couchdb::Query::Setup['localhost']
+ }
+
+ ## shared database
+ ## r/w: soledad
+ couchdb::create_db { 'shared':
+ members => "{ \"names\": [\"${site_couchdb::couchdb_soledad_user}\"], \"roles\": [\"replication\"] }",
+ require => Couchdb::Query::Setup['localhost']
+ }
+
+ ## tickets database
+ ## r/w: webapp
+ couchdb::create_db { 'tickets':
+ members => "{ \"names\": [\"${site_couchdb::couchdb_webapp_user}\"], \"roles\": [\"replication\"] }",
+ require => Couchdb::Query::Setup['localhost']
+ }
+
+ ## tokens database
+ ## r: soledad - needs to be restricted with a design document
+ ## r/w: webapp
+ $tokens_db = rotated_db_name('tokens', 'monthly')
+ couchdb::create_db { $tokens_db:
+ members => "{ \"names\": [], \"roles\": [\"replication\", \"tokens\"] }",
+ require => Couchdb::Query::Setup['localhost']
+ }
+
+ $tokens_next_db = rotated_db_name('tokens', 'monthly', 'next')
+ couchdb::create_db { $tokens_next_db:
+ members => "{ \"names\": [], \"roles\": [\"replication\", \"tokens\"] }",
+ require => Couchdb::Query::Setup['localhost']
+ }
+
+ ## users database
+ ## r/w: webapp
+ couchdb::create_db { 'users':
+ members => "{ \"names\": [], \"roles\": [\"replication\", \"users\"] }",
+ require => Couchdb::Query::Setup['localhost']
+ }
+
+ ## tmp_users database
+ ## r/w: webapp
+ couchdb::create_db { 'tmp_users':
+ members => "{ \"names\": [], \"roles\": [\"replication\", \"users\"] }",
+ require => Couchdb::Query::Setup['localhost']
+ }
+
+ ## messages db
+ ## store messages to the clients such as payment reminders
+ ## r/w: webapp
+ couchdb::create_db { 'messages':
+ members => "{ \"names\": [\"${site_couchdb::couchdb_webapp_user}\"], \"roles\": [\"replication\"] }",
+ require => Couchdb::Query::Setup['localhost']
+ }
+
+ ## invite_codes db
+ ## store invite codes for new signups
+ ## r/w: webapp
+ couchdb::create_db { 'invite_codes':
+ members => "{ \"names\": [\"${site_couchdb::couchdb_webapp_user}\"], \"roles\": [\"replication\"] }",
+ require => Couchdb::Query::Setup['localhost']
+ }
+
+}
diff --git a/puppet/modules/site_couchdb/manifests/designs.pp b/puppet/modules/site_couchdb/manifests/designs.pp
new file mode 100644
index 00000000..e5fd94c6
--- /dev/null
+++ b/puppet/modules/site_couchdb/manifests/designs.pp
@@ -0,0 +1,46 @@
+class site_couchdb::designs {
+
+ Class['site_couchdb::create_dbs']
+ -> Class['site_couchdb::designs']
+
+ file { '/srv/leap/couchdb/designs':
+ ensure => directory,
+ source => 'puppet:///modules/site_couchdb/designs',
+ recurse => true,
+ purge => true,
+ mode => '0755'
+ }
+
+ site_couchdb::upload_design {
+ 'customers': design => 'customers/Customer.json';
+ 'identities': design => 'identities/Identity.json';
+ 'tickets': design => 'tickets/Ticket.json';
+ 'messages': design => 'messages/Message.json';
+ 'users': design => 'users/User.json';
+ 'tmp_users': design => 'users/User.json';
+ 'invite_codes': design => 'invite_codes/InviteCode.json';
+ 'shared_docs':
+ db => 'shared',
+ design => 'shared/docs.json';
+ 'shared_syncs':
+ db => 'shared',
+ design => 'shared/syncs.json';
+ 'shared_transactions':
+ db => 'shared',
+ design => 'shared/transactions.json';
+ }
+
+ $sessions_db = rotated_db_name('sessions', 'monthly')
+ $sessions_next_db = rotated_db_name('sessions', 'monthly', 'next')
+ site_couchdb::upload_design {
+ $sessions_db: design => 'sessions/Session.json';
+ $sessions_next_db: design => 'sessions/Session.json';
+ }
+
+ $tokens_db = rotated_db_name('tokens', 'monthly')
+ $tokens_next_db = rotated_db_name('tokens', 'monthly', 'next')
+ site_couchdb::upload_design {
+ $tokens_db: design => 'tokens/Token.json';
+ $tokens_next_db: design => 'tokens/Token.json';
+ }
+}
diff --git a/puppet/modules/site_couchdb/manifests/init.pp b/puppet/modules/site_couchdb/manifests/init.pp
new file mode 100644
index 00000000..c4fe6277
--- /dev/null
+++ b/puppet/modules/site_couchdb/manifests/init.pp
@@ -0,0 +1,81 @@
+# entry class for configuring couchdb/bigcouch node
+# couchdb node
+class site_couchdb {
+ tag 'leap_service'
+
+ $couchdb_config = hiera('couch')
+ $couchdb_users = $couchdb_config['users']
+
+ $couchdb_admin = $couchdb_users['admin']
+ $couchdb_admin_user = $couchdb_admin['username']
+ $couchdb_admin_pw = $couchdb_admin['password']
+ $couchdb_admin_salt = $couchdb_admin['salt']
+
+ $couchdb_leap_mx = $couchdb_users['leap_mx']
+ $couchdb_leap_mx_user = $couchdb_leap_mx['username']
+ $couchdb_leap_mx_pw = $couchdb_leap_mx['password']
+ $couchdb_leap_mx_salt = $couchdb_leap_mx['salt']
+
+ $couchdb_nickserver = $couchdb_users['nickserver']
+ $couchdb_nickserver_user = $couchdb_nickserver['username']
+ $couchdb_nickserver_pw = $couchdb_nickserver['password']
+ $couchdb_nickserver_salt = $couchdb_nickserver['salt']
+
+ $couchdb_soledad = $couchdb_users['soledad']
+ $couchdb_soledad_user = $couchdb_soledad['username']
+ $couchdb_soledad_pw = $couchdb_soledad['password']
+ $couchdb_soledad_salt = $couchdb_soledad['salt']
+
+ $couchdb_webapp = $couchdb_users['webapp']
+ $couchdb_webapp_user = $couchdb_webapp['username']
+ $couchdb_webapp_pw = $couchdb_webapp['password']
+ $couchdb_webapp_salt = $couchdb_webapp['salt']
+
+ $couchdb_replication = $couchdb_users['replication']
+ $couchdb_replication_user = $couchdb_replication['username']
+ $couchdb_replication_pw = $couchdb_replication['password']
+ $couchdb_replication_salt = $couchdb_replication['salt']
+
+ $couchdb_backup = $couchdb_config['backup']
+ $couchdb_mode = $couchdb_config['mode']
+
+ # ensure bigcouch has been purged from the system:
+ # TODO: remove this check in 0.9 release
+ if file('/opt/bigcouch/bin/bigcouch', '/dev/null') != '' {
+ fail 'ERROR: BigCouch appears to be installed. Make sure you have migrated to CouchDB before proceeding. See https://leap.se/upgrade-0-8'
+ }
+
+ include site_couchdb::plain
+
+ Class['site_config::default']
+ -> Service['shorewall']
+ -> Exec['refresh_stunnel']
+ -> Class['couchdb']
+ -> Class['site_couchdb::setup']
+
+ include ::site_config::default
+ include site_stunnel
+
+ include site_couchdb::setup
+ include site_couchdb::create_dbs
+ include site_couchdb::add_users
+ include site_couchdb::designs
+ include site_couchdb::logrotate
+
+ if $couchdb_backup { include site_couchdb::backup }
+
+ include site_check_mk::agent::couchdb
+
+ # remove tapicero leftovers on couchdb nodes
+ include site_config::remove::tapicero
+
+ # Destroy every per-user storage database
+ # where the corresponding user record does not exist.
+ cron { 'cleanup_stale_userdbs':
+ command => '(/bin/date; /srv/leap/couchdb/scripts/cleanup-user-dbs) >> /var/log/leap/couchdb-cleanup.log',
+ user => 'root',
+ hour => 4,
+ minute => 7;
+ }
+
+}
diff --git a/puppet/modules/site_couchdb/manifests/logrotate.pp b/puppet/modules/site_couchdb/manifests/logrotate.pp
new file mode 100644
index 00000000..bb8843bb
--- /dev/null
+++ b/puppet/modules/site_couchdb/manifests/logrotate.pp
@@ -0,0 +1,14 @@
+# configure couchdb logrotation
+class site_couchdb::logrotate {
+
+ augeas {
+ 'logrotate_bigcouch':
+ context => '/files/etc/logrotate.d/bigcouch/rule',
+ changes => [
+ 'set file /opt/bigcouch/var/log/*.log', 'set rotate 7',
+ 'set schedule daily', 'set compress compress',
+ 'set missingok missingok', 'set ifempty notifempty',
+ 'set copytruncate copytruncate' ]
+ }
+
+}
diff --git a/puppet/modules/site_couchdb/manifests/mirror.pp b/puppet/modules/site_couchdb/manifests/mirror.pp
new file mode 100644
index 00000000..fb82b897
--- /dev/null
+++ b/puppet/modules/site_couchdb/manifests/mirror.pp
@@ -0,0 +1,78 @@
+# configure mirroring of couch nodes
+class site_couchdb::mirror {
+
+ Class['site_couchdb::add_users']
+ -> Class['site_couchdb::mirror']
+
+ class { 'couchdb':
+ admin_pw => $site_couchdb::couchdb_admin_pw,
+ admin_salt => $site_couchdb::couchdb_admin_salt,
+ chttpd_bind_address => '127.0.0.1'
+ }
+
+ $masters = $site_couchdb::couchdb_config['replication']['masters']
+ $master_node_names = keys($site_couchdb::couchdb_config['replication']['masters'])
+ $master_node = $masters[$master_node_names[0]]
+ $user = $site_couchdb::couchdb_replication_user
+ $password = $site_couchdb::couchdb_replication_pw
+ $from_host = $master_node['domain_internal']
+ $from_port = $master_node['couch_port']
+ $from = "http://${user}:${password}@${from_host}:${from_port}"
+
+ notice("mirror from: ${from}")
+
+ ### customer database
+ couchdb::mirror_db { 'customers':
+ from => $from,
+ require => Couchdb::Query::Setup['localhost']
+ }
+
+ ## identities database
+ couchdb::mirror_db { 'identities':
+ from => $from,
+ require => Couchdb::Query::Setup['localhost']
+ }
+
+ ## keycache database
+ couchdb::mirror_db { 'keycache':
+ from => $from,
+ require => Couchdb::Query::Setup['localhost']
+ }
+
+ ## sessions database
+ couchdb::mirror_db { 'sessions':
+ from => $from,
+ require => Couchdb::Query::Setup['localhost']
+ }
+
+ ## shared database
+ couchdb::mirror_db { 'shared':
+ from => $from,
+ require => Couchdb::Query::Setup['localhost']
+ }
+
+ ## tickets database
+ couchdb::mirror_db { 'tickets':
+ from => $from,
+ require => Couchdb::Query::Setup['localhost']
+ }
+
+ ## tokens database
+ couchdb::mirror_db { 'tokens':
+ from => $from,
+ require => Couchdb::Query::Setup['localhost']
+ }
+
+ ## users database
+ couchdb::mirror_db { 'users':
+ from => $from,
+ require => Couchdb::Query::Setup['localhost']
+ }
+
+ ## messages db
+ couchdb::mirror_db { 'messages':
+ from => $from,
+ require => Couchdb::Query::Setup['localhost']
+ }
+
+}
diff --git a/puppet/modules/site_couchdb/manifests/plain.pp b/puppet/modules/site_couchdb/manifests/plain.pp
new file mode 100644
index 00000000..b40fc100
--- /dev/null
+++ b/puppet/modules/site_couchdb/manifests/plain.pp
@@ -0,0 +1,14 @@
+# this class sets up a single, plain couchdb node
+class site_couchdb::plain {
+ class { 'couchdb':
+ admin_pw => $site_couchdb::couchdb_admin_pw,
+ admin_salt => $site_couchdb::couchdb_admin_salt,
+ chttpd_bind_address => '127.0.0.1'
+ }
+
+ include site_check_mk::agent::couchdb::plain
+
+ # remove bigcouch leftovers from previous installations
+ include ::site_config::remove::bigcouch
+
+}
diff --git a/puppet/modules/site_couchdb/manifests/setup.pp b/puppet/modules/site_couchdb/manifests/setup.pp
new file mode 100644
index 00000000..710d3c1c
--- /dev/null
+++ b/puppet/modules/site_couchdb/manifests/setup.pp
@@ -0,0 +1,61 @@
+#
+# An initial setup class. All the other classes depend on this
+#
+class site_couchdb::setup {
+
+ # ensure that we don't have leftovers from previous installations
+ # where we installed the cloudant bigcouch package
+ # https://leap.se/code/issues/4971
+ class { 'couchdb::bigcouch::package::cloudant':
+ ensure => absent
+ }
+
+ $user = $site_couchdb::couchdb_admin_user
+
+ # setup /etc/couchdb/couchdb-admin.netrc for couchdb admin access
+ couchdb::query::setup { 'localhost':
+ user => $user,
+ pw => $site_couchdb::couchdb_admin_pw
+ }
+
+ # We symlink /etc/couchdb/couchdb-admin.netrc to /etc/couchdb/couchdb.netrc
+ # for puppet commands, and to to /root/.netrc for couchdb_scripts
+ # (eg. backup) and to makes life easier for the admin on the command line
+ # (i.e. using curl/wget without passing credentials)
+ file {
+ '/etc/couchdb/couchdb.netrc':
+ ensure => link,
+ target => "/etc/couchdb/couchdb-${user}.netrc";
+ '/root/.netrc':
+ ensure => link,
+ target => '/etc/couchdb/couchdb.netrc';
+ }
+
+ # setup /etc/couchdb/couchdb-soledad-admin.netrc file for couchdb admin
+ # access, accessible only for the soledad-admin user to create soledad
+ # userdbs
+ if member(hiera('services', []), 'soledad') {
+ file { '/etc/couchdb/couchdb-soledad-admin.netrc':
+ content => "machine localhost login ${user} password ${site_couchdb::couchdb_admin_pw}",
+ mode => '0400',
+ owner => 'soledad-admin',
+ group => 'root',
+ require => [ Package['couchdb'], User['soledad-admin'] ];
+ }
+ }
+
+ # Checkout couchdb_scripts repo
+ file {
+ '/srv/leap/couchdb':
+ ensure => directory
+ }
+
+ vcsrepo { '/srv/leap/couchdb/scripts':
+ ensure => present,
+ provider => git,
+ source => 'https://leap.se/git/couchdb_scripts',
+ revision => 'origin/master',
+ require => File['/srv/leap/couchdb']
+ }
+
+}
diff --git a/puppet/modules/site_couchdb/manifests/upload_design.pp b/puppet/modules/site_couchdb/manifests/upload_design.pp
new file mode 100644
index 00000000..bd73ebf2
--- /dev/null
+++ b/puppet/modules/site_couchdb/manifests/upload_design.pp
@@ -0,0 +1,14 @@
+# upload a design doc to a db
+define site_couchdb::upload_design($design, $db = $title) {
+ $design_name = regsubst($design, '^.*\/(.*)\.json$', '\1')
+ $id = "_design/${design_name}"
+ $file = "/srv/leap/couchdb/designs/${design}"
+ exec {
+ "upload_design_${name}":
+ command => "/usr/local/bin/couch-doc-update --host 127.0.0.1:5984 --db '${db}' --id '${id}' --data '{}' --file '${file}'",
+ refreshonly => false,
+ loglevel => debug,
+ logoutput => on_failure,
+ require => File['/srv/leap/couchdb/designs'];
+ }
+}
diff --git a/puppet/modules/site_haproxy/files/haproxy-stats.cfg b/puppet/modules/site_haproxy/files/haproxy-stats.cfg
new file mode 100644
index 00000000..e6335ba2
--- /dev/null
+++ b/puppet/modules/site_haproxy/files/haproxy-stats.cfg
@@ -0,0 +1,6 @@
+# provide access to stats for the nagios plugin
+listen stats 127.0.0.1:8000
+ mode http
+ stats enable
+ stats uri /haproxy
+
diff --git a/puppet/modules/site_haproxy/manifests/init.pp b/puppet/modules/site_haproxy/manifests/init.pp
new file mode 100644
index 00000000..b28ce80e
--- /dev/null
+++ b/puppet/modules/site_haproxy/manifests/init.pp
@@ -0,0 +1,41 @@
+class site_haproxy {
+ $haproxy = hiera('haproxy')
+
+ class { 'haproxy':
+ enable => true,
+ manage_service => true,
+ global_options => {
+ 'log' => '127.0.0.1 local0',
+ 'maxconn' => '4096',
+ 'stats' => 'socket /var/run/haproxy.sock user haproxy group haproxy',
+ 'chroot' => '/usr/share/haproxy',
+ 'user' => 'haproxy',
+ 'group' => 'haproxy',
+ 'daemon' => ''
+ },
+ defaults_options => {
+ 'log' => 'global',
+ 'retries' => '3',
+ 'option' => 'redispatch',
+ 'timeout connect' => '4000',
+ 'timeout client' => '20000',
+ 'timeout server' => '20000'
+ }
+ }
+
+ # monitor haproxy
+ concat::fragment { 'stats':
+ target => '/etc/haproxy/haproxy.cfg',
+ order => '90',
+ source => 'puppet:///modules/site_haproxy/haproxy-stats.cfg';
+ }
+
+ # Template uses $haproxy
+ concat::fragment { 'leap_haproxy_webapp_couchdb':
+ target => '/etc/haproxy/haproxy.cfg',
+ order => '20',
+ content => template('site_haproxy/haproxy.cfg.erb'),
+ }
+
+ include site_check_mk::agent::haproxy
+}
diff --git a/puppet/modules/site_haproxy/templates/couch.erb b/puppet/modules/site_haproxy/templates/couch.erb
new file mode 100644
index 00000000..f42e8368
--- /dev/null
+++ b/puppet/modules/site_haproxy/templates/couch.erb
@@ -0,0 +1,32 @@
+frontend couch
+ bind localhost:<%= @listen_port %>
+ mode http
+ option httplog
+ option dontlognull
+ option http-server-close # use client keep-alive, but close server connection.
+ use_backend couch_read if METH_GET
+ default_backend couch_write
+
+backend couch_write
+ mode http
+ balance roundrobin
+ option httpchk GET / # health check using simple get to root
+ option allbackups # balance among all backups, not just one.
+ default-server inter 3000 fastinter 1000 downinter 1000 rise 2 fall 1
+<%- @servers.sort.each do |name,server| -%>
+<%- next unless server['writable'] -%>
+ # <%=name%>
+ server couchdb_<%=server['port']%> <%=server['host']%>:<%=server['port']%> <%='backup' if server['backup']%> weight <%=server['weight']%> check
+<%- end -%>
+
+backend couch_read
+ mode http
+ balance roundrobin
+ option httpchk GET / # health check using simple get to root
+ option allbackups # balance among all backups, not just one.
+ default-server inter 3000 fastinter 1000 downinter 1000 rise 2 fall 1
+<%- @servers.sort.each do |name,server| -%>
+ # <%=name%>
+ server couchdb_<%=server['port']%> <%=server['host']%>:<%=server['port']%> <%='backup' if server['backup']%> weight <%=server['weight']%> check
+<%- end -%>
+
diff --git a/puppet/modules/site_haproxy/templates/haproxy.cfg.erb b/puppet/modules/site_haproxy/templates/haproxy.cfg.erb
new file mode 100644
index 00000000..8311b1a5
--- /dev/null
+++ b/puppet/modules/site_haproxy/templates/haproxy.cfg.erb
@@ -0,0 +1,11 @@
+<%- @haproxy.each do |frontend, options| -%>
+<%- if options['servers'] -%>
+
+##
+## <%= frontend %>
+##
+
+<%= scope.function_templatewlv(["site_haproxy/#{frontend}.erb", options]) %>
+<%- end -%>
+<%- end -%>
+
diff --git a/puppet/modules/site_mx/manifests/init.pp b/puppet/modules/site_mx/manifests/init.pp
new file mode 100644
index 00000000..a9b0198b
--- /dev/null
+++ b/puppet/modules/site_mx/manifests/init.pp
@@ -0,0 +1,20 @@
+class site_mx {
+ tag 'leap_service'
+ Class['site_config::default'] -> Class['site_mx']
+
+ include site_config::default
+ include site_config::x509::cert
+ include site_config::x509::key
+ include site_config::x509::ca
+ include site_config::x509::client_ca::ca
+ include site_config::x509::client_ca::key
+
+ include site_stunnel
+
+ include site_postfix::mx
+ include site_haproxy
+ include site_shorewall::mx
+ include site_shorewall::service::smtp
+ include leap_mx
+ include site_check_mk::agent::mx
+}
diff --git a/puppet/modules/site_nagios/files/configs/Debian/nagios.cfg b/puppet/modules/site_nagios/files/configs/Debian/nagios.cfg
new file mode 100644
index 00000000..62f26f2c
--- /dev/null
+++ b/puppet/modules/site_nagios/files/configs/Debian/nagios.cfg
@@ -0,0 +1,1302 @@
+##############################################################################
+#
+# NAGIOS.CFG - Sample Main Config File for Nagios
+#
+#
+##############################################################################
+
+
+# LOG FILE
+# This is the main log file where service and host events are logged
+# for historical purposes. This should be the first option specified
+# in the config file!!!
+
+log_file=/var/log/nagios3/nagios.log
+
+
+
+# OBJECT CONFIGURATION FILE(S)
+# These are the object configuration files in which you define hosts,
+# host groups, contacts, contact groups, services, etc.
+# You can split your object definitions across several config files
+# if you wish (as shown below), or keep them all in a single config file.
+#cfg_file=/etc/nagios3/commands.cfg
+
+# Check_mk configuration files
+cfg_dir=/etc/nagios3/conf.d/check_mk
+cfg_dir=/etc/nagios3/local
+
+# Puppet-managed configuration files
+cfg_file=/etc/nagios3/nagios_templates.cfg
+cfg_file=/etc/nagios3/nagios_command.cfg
+cfg_file=/etc/nagios3/nagios_contact.cfg
+cfg_file=/etc/nagios3/nagios_contactgroup.cfg
+cfg_file=/etc/nagios3/nagios_host.cfg
+cfg_file=/etc/nagios3/nagios_hostdependency.cfg
+cfg_file=/etc/nagios3/nagios_hostescalation.cfg
+cfg_file=/etc/nagios3/nagios_hostextinfo.cfg
+cfg_file=/etc/nagios3/nagios_hostgroup.cfg
+cfg_file=/etc/nagios3/nagios_hostgroupescalation.cfg
+cfg_file=/etc/nagios3/nagios_service.cfg
+cfg_file=/etc/nagios3/nagios_servicedependency.cfg
+cfg_file=/etc/nagios3/nagios_serviceescalation.cfg
+cfg_file=/etc/nagios3/nagios_serviceextinfo.cfg
+cfg_file=/etc/nagios3/nagios_servicegroup.cfg
+cfg_file=/etc/nagios3/nagios_timeperiod.cfg
+
+# Debian also defaults to using the check commands defined by the debian
+# nagios-plugins package
+cfg_dir=/etc/nagios-plugins/config
+
+
+# OBJECT CACHE FILE
+# This option determines where object definitions are cached when
+# Nagios starts/restarts. The CGIs read object definitions from
+# this cache file (rather than looking at the object config files
+# directly) in order to prevent inconsistencies that can occur
+# when the config files are modified after Nagios starts.
+
+object_cache_file=/var/cache/nagios3/objects.cache
+
+
+
+# PRE-CACHED OBJECT FILE
+# This options determines the location of the precached object file.
+# If you run Nagios with the -p command line option, it will preprocess
+# your object configuration file(s) and write the cached config to this
+# file. You can then start Nagios with the -u option to have it read
+# object definitions from this precached file, rather than the standard
+# object configuration files (see the cfg_file and cfg_dir options above).
+# Using a precached object file can speed up the time needed to (re)start
+# the Nagios process if you've got a large and/or complex configuration.
+# Read the documentation section on optimizing Nagios to find our more
+# about how this feature works.
+
+precached_object_file=/var/lib/nagios3/objects.precache
+
+
+
+# RESOURCE FILE
+# This is an optional resource file that contains $USERx$ macro
+# definitions. Multiple resource files can be specified by using
+# multiple resource_file definitions. The CGIs will not attempt to
+# read the contents of resource files, so information that is
+# considered to be sensitive (usernames, passwords, etc) can be
+# defined as macros in this file and restrictive permissions (600)
+# can be placed on this file.
+
+resource_file=/etc/nagios3/resource.cfg
+
+
+
+# STATUS FILE
+# This is where the current status of all monitored services and
+# hosts is stored. Its contents are read and processed by the CGIs.
+# The contents of the status file are deleted every time Nagios
+# restarts.
+
+status_file=/var/cache/nagios3/status.dat
+
+
+
+# STATUS FILE UPDATE INTERVAL
+# This option determines the frequency (in seconds) that
+# Nagios will periodically dump program, host, and
+# service status data.
+
+status_update_interval=10
+
+
+
+# NAGIOS USER
+# This determines the effective user that Nagios should run as.
+# You can either supply a username or a UID.
+
+nagios_user=nagios
+
+
+
+# NAGIOS GROUP
+# This determines the effective group that Nagios should run as.
+# You can either supply a group name or a GID.
+
+nagios_group=nagios
+
+
+
+# EXTERNAL COMMAND OPTION
+# This option allows you to specify whether or not Nagios should check
+# for external commands (in the command file defined below). By default
+# Nagios will *not* check for external commands, just to be on the
+# cautious side. If you want to be able to use the CGI command interface
+# you will have to enable this.
+# Values: 0 = disable commands, 1 = enable commands
+
+check_external_commands=1
+
+
+
+# EXTERNAL COMMAND CHECK INTERVAL
+# This is the interval at which Nagios should check for external commands.
+# This value works of the interval_length you specify later. If you leave
+# that at its default value of 60 (seconds), a value of 1 here will cause
+# Nagios to check for external commands every minute. If you specify a
+# number followed by an "s" (i.e. 15s), this will be interpreted to mean
+# actual seconds rather than a multiple of the interval_length variable.
+# Note: In addition to reading the external command file at regularly
+# scheduled intervals, Nagios will also check for external commands after
+# event handlers are executed.
+# NOTE: Setting this value to -1 causes Nagios to check the external
+# command file as often as possible.
+
+#command_check_interval=15s
+command_check_interval=-1
+
+
+
+# EXTERNAL COMMAND FILE
+# This is the file that Nagios checks for external command requests.
+# It is also where the command CGI will write commands that are submitted
+# by users, so it must be writeable by the user that the web server
+# is running as (usually 'nobody'). Permissions should be set at the
+# directory level instead of on the file, as the file is deleted every
+# time its contents are processed.
+# Debian Users: In case you didn't read README.Debian yet, _NOW_ is the
+# time to do it.
+
+command_file=/var/lib/nagios3/rw/nagios.cmd
+
+
+
+# EXTERNAL COMMAND BUFFER SLOTS
+# This settings is used to tweak the number of items or "slots" that
+# the Nagios daemon should allocate to the buffer that holds incoming
+# external commands before they are processed. As external commands
+# are processed by the daemon, they are removed from the buffer.
+
+external_command_buffer_slots=4096
+
+
+
+# LOCK FILE
+# This is the lockfile that Nagios will use to store its PID number
+# in when it is running in daemon mode.
+
+lock_file=/var/run/nagios3/nagios3.pid
+
+
+
+# TEMP FILE
+# This is a temporary file that is used as scratch space when Nagios
+# updates the status log, cleans the comment file, etc. This file
+# is created, used, and deleted throughout the time that Nagios is
+# running.
+
+temp_file=/var/cache/nagios3/nagios.tmp
+
+
+
+# TEMP PATH
+# This is path where Nagios can create temp files for service and
+# host check results, etc.
+
+temp_path=/tmp
+
+
+
+# EVENT BROKER OPTIONS
+# Controls what (if any) data gets sent to the event broker.
+# Values: 0 = Broker nothing
+# -1 = Broker everything
+# <other> = See documentation
+
+event_broker_options=-1
+
+
+
+# EVENT BROKER MODULE(S)
+# This directive is used to specify an event broker module that should
+# by loaded by Nagios at startup. Use multiple directives if you want
+# to load more than one module. Arguments that should be passed to
+# the module at startup are seperated from the module path by a space.
+#
+#!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!
+# WARNING !!! WARNING !!! WARNING !!! WARNING !!! WARNING !!! WARNING
+#!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!
+#
+# Do NOT overwrite modules while they are being used by Nagios or Nagios
+# will crash in a fiery display of SEGFAULT glory. This is a bug/limitation
+# either in dlopen(), the kernel, and/or the filesystem. And maybe Nagios...
+#
+# The correct/safe way of updating a module is by using one of these methods:
+# 1. Shutdown Nagios, replace the module file, restart Nagios
+# 2. Delete the original module file, move the new module file into place, restart Nagios
+#
+# Example:
+#
+# broker_module=<modulepath> [moduleargs]
+
+#broker_module=/somewhere/module1.o
+#broker_module=/somewhere/module2.o arg1 arg2=3 debug=0
+
+
+
+# LOG ROTATION METHOD
+# This is the log rotation method that Nagios should use to rotate
+# the main log file. Values are as follows..
+# n = None - don't rotate the log
+# h = Hourly rotation (top of the hour)
+# d = Daily rotation (midnight every day)
+# w = Weekly rotation (midnight on Saturday evening)
+# m = Monthly rotation (midnight last day of month)
+
+log_rotation_method=n
+
+
+
+# LOG ARCHIVE PATH
+# This is the directory where archived (rotated) log files should be
+# placed (assuming you've chosen to do log rotation).
+
+log_archive_path=/var/log/nagios3/archives
+
+
+
+# LOGGING OPTIONS
+# If you want messages logged to the syslog facility, as well as the
+# Nagios log file set this option to 1. If not, set it to 0.
+
+use_syslog=0
+
+
+
+# NOTIFICATION LOGGING OPTION
+# If you don't want notifications to be logged, set this value to 0.
+# If notifications should be logged, set the value to 1.
+
+log_notifications=1
+
+
+
+# SERVICE RETRY LOGGING OPTION
+# If you don't want service check retries to be logged, set this value
+# to 0. If retries should be logged, set the value to 1.
+
+log_service_retries=1
+
+
+
+# HOST RETRY LOGGING OPTION
+# If you don't want host check retries to be logged, set this value to
+# 0. If retries should be logged, set the value to 1.
+
+log_host_retries=1
+
+
+
+# EVENT HANDLER LOGGING OPTION
+# If you don't want host and service event handlers to be logged, set
+# this value to 0. If event handlers should be logged, set the value
+# to 1.
+
+log_event_handlers=1
+
+
+
+# INITIAL STATES LOGGING OPTION
+# If you want Nagios to log all initial host and service states to
+# the main log file (the first time the service or host is checked)
+# you can enable this option by setting this value to 1. If you
+# are not using an external application that does long term state
+# statistics reporting, you do not need to enable this option. In
+# this case, set the value to 0.
+
+log_initial_states=0
+
+
+
+# EXTERNAL COMMANDS LOGGING OPTION
+# If you don't want Nagios to log external commands, set this value
+# to 0. If external commands should be logged, set this value to 1.
+# Note: This option does not include logging of passive service
+# checks - see the option below for controlling whether or not
+# passive checks are logged.
+
+log_external_commands=1
+
+
+
+# PASSIVE CHECKS LOGGING OPTION
+# If you don't want Nagios to log passive host and service checks, set
+# this value to 0. If passive checks should be logged, set
+# this value to 1.
+
+log_passive_checks=1
+
+
+
+# GLOBAL HOST AND SERVICE EVENT HANDLERS
+# These options allow you to specify a host and service event handler
+# command that is to be run for every host or service state change.
+# The global event handler is executed immediately prior to the event
+# handler that you have optionally specified in each host or
+# service definition. The command argument is the short name of a
+# command definition that you define in your host configuration file.
+# Read the HTML docs for more information.
+
+#global_host_event_handler=somecommand
+#global_service_event_handler=somecommand
+
+
+
+# SERVICE INTER-CHECK DELAY METHOD
+# This is the method that Nagios should use when initially
+# "spreading out" service checks when it starts monitoring. The
+# default is to use smart delay calculation, which will try to
+# space all service checks out evenly to minimize CPU load.
+# Using the dumb setting will cause all checks to be scheduled
+# at the same time (with no delay between them)! This is not a
+# good thing for production, but is useful when testing the
+# parallelization functionality.
+# n = None - don't use any delay between checks
+# d = Use a "dumb" delay of 1 second between checks
+# s = Use "smart" inter-check delay calculation
+# x.xx = Use an inter-check delay of x.xx seconds
+
+service_inter_check_delay_method=s
+
+
+
+# MAXIMUM SERVICE CHECK SPREAD
+# This variable determines the timeframe (in minutes) from the
+# program start time that an initial check of all services should
+# be completed. Default is 30 minutes.
+
+max_service_check_spread=30
+
+
+
+# SERVICE CHECK INTERLEAVE FACTOR
+# This variable determines how service checks are interleaved.
+# Interleaving the service checks allows for a more even
+# distribution of service checks and reduced load on remote
+# hosts. Setting this value to 1 is equivalent to how versions
+# of Nagios previous to 0.0.5 did service checks. Set this
+# value to s (smart) for automatic calculation of the interleave
+# factor unless you have a specific reason to change it.
+# s = Use "smart" interleave factor calculation
+# x = Use an interleave factor of x, where x is a
+# number greater than or equal to 1.
+
+service_interleave_factor=s
+
+
+
+# HOST INTER-CHECK DELAY METHOD
+# This is the method that Nagios should use when initially
+# "spreading out" host checks when it starts monitoring. The
+# default is to use smart delay calculation, which will try to
+# space all host checks out evenly to minimize CPU load.
+# Using the dumb setting will cause all checks to be scheduled
+# at the same time (with no delay between them)!
+# n = None - don't use any delay between checks
+# d = Use a "dumb" delay of 1 second between checks
+# s = Use "smart" inter-check delay calculation
+# x.xx = Use an inter-check delay of x.xx seconds
+
+host_inter_check_delay_method=s
+
+
+
+# MAXIMUM HOST CHECK SPREAD
+# This variable determines the timeframe (in minutes) from the
+# program start time that an initial check of all hosts should
+# be completed. Default is 30 minutes.
+
+max_host_check_spread=30
+
+
+
+# MAXIMUM CONCURRENT SERVICE CHECKS
+# This option allows you to specify the maximum number of
+# service checks that can be run in parallel at any given time.
+# Specifying a value of 1 for this variable essentially prevents
+# any service checks from being parallelized. A value of 0
+# will not restrict the number of concurrent checks that are
+# being executed.
+
+max_concurrent_checks=0
+
+
+
+# HOST AND SERVICE CHECK REAPER FREQUENCY
+# This is the frequency (in seconds!) that Nagios will process
+# the results of host and service checks.
+
+check_result_reaper_frequency=10
+
+
+
+
+# MAX CHECK RESULT REAPER TIME
+# This is the max amount of time (in seconds) that a single
+# check result reaper event will be allowed to run before
+# returning control back to Nagios so it can perform other
+# duties.
+
+max_check_result_reaper_time=30
+
+
+
+
+# CHECK RESULT PATH
+# This is directory where Nagios stores the results of host and
+# service checks that have not yet been processed.
+#
+# Note: Make sure that only one instance of Nagios has access
+# to this directory!
+
+check_result_path=/var/lib/nagios3/spool/checkresults
+
+
+
+
+# MAX CHECK RESULT FILE AGE
+# This option determines the maximum age (in seconds) which check
+# result files are considered to be valid. Files older than this
+# threshold will be mercilessly deleted without further processing.
+
+max_check_result_file_age=3600
+
+
+
+
+# CACHED HOST CHECK HORIZON
+# This option determines the maximum amount of time (in seconds)
+# that the state of a previous host check is considered current.
+# Cached host states (from host checks that were performed more
+# recently that the timeframe specified by this value) can immensely
+# improve performance in regards to the host check logic.
+# Too high of a value for this option may result in inaccurate host
+# states being used by Nagios, while a lower value may result in a
+# performance hit for host checks. Use a value of 0 to disable host
+# check caching.
+
+cached_host_check_horizon=15
+
+
+
+# CACHED SERVICE CHECK HORIZON
+# This option determines the maximum amount of time (in seconds)
+# that the state of a previous service check is considered current.
+# Cached service states (from service checks that were performed more
+# recently that the timeframe specified by this value) can immensely
+# improve performance in regards to predictive dependency checks.
+# Use a value of 0 to disable service check caching.
+
+cached_service_check_horizon=15
+
+
+
+# ENABLE PREDICTIVE HOST DEPENDENCY CHECKS
+# This option determines whether or not Nagios will attempt to execute
+# checks of hosts when it predicts that future dependency logic test
+# may be needed. These predictive checks can help ensure that your
+# host dependency logic works well.
+# Values:
+# 0 = Disable predictive checks
+# 1 = Enable predictive checks (default)
+
+enable_predictive_host_dependency_checks=1
+
+
+
+# ENABLE PREDICTIVE SERVICE DEPENDENCY CHECKS
+# This option determines whether or not Nagios will attempt to execute
+# checks of service when it predicts that future dependency logic test
+# may be needed. These predictive checks can help ensure that your
+# service dependency logic works well.
+# Values:
+# 0 = Disable predictive checks
+# 1 = Enable predictive checks (default)
+
+enable_predictive_service_dependency_checks=1
+
+
+
+# SOFT STATE DEPENDENCIES
+# This option determines whether or not Nagios will use soft state
+# information when checking host and service dependencies. Normally
+# Nagios will only use the latest hard host or service state when
+# checking dependencies. If you want it to use the latest state (regardless
+# of whether its a soft or hard state type), enable this option.
+# Values:
+# 0 = Don't use soft state dependencies (default)
+# 1 = Use soft state dependencies
+
+soft_state_dependencies=0
+
+
+
+# TIME CHANGE ADJUSTMENT THRESHOLDS
+# These options determine when Nagios will react to detected changes
+# in system time (either forward or backwards).
+
+#time_change_threshold=900
+
+
+
+# AUTO-RESCHEDULING OPTION
+# This option determines whether or not Nagios will attempt to
+# automatically reschedule active host and service checks to
+# "smooth" them out over time. This can help balance the load on
+# the monitoring server.
+# WARNING: THIS IS AN EXPERIMENTAL FEATURE - IT CAN DEGRADE
+# PERFORMANCE, RATHER THAN INCREASE IT, IF USED IMPROPERLY
+
+auto_reschedule_checks=0
+
+
+
+# AUTO-RESCHEDULING INTERVAL
+# This option determines how often (in seconds) Nagios will
+# attempt to automatically reschedule checks. This option only
+# has an effect if the auto_reschedule_checks option is enabled.
+# Default is 30 seconds.
+# WARNING: THIS IS AN EXPERIMENTAL FEATURE - IT CAN DEGRADE
+# PERFORMANCE, RATHER THAN INCREASE IT, IF USED IMPROPERLY
+
+auto_rescheduling_interval=30
+
+
+
+# AUTO-RESCHEDULING WINDOW
+# This option determines the "window" of time (in seconds) that
+# Nagios will look at when automatically rescheduling checks.
+# Only host and service checks that occur in the next X seconds
+# (determined by this variable) will be rescheduled. This option
+# only has an effect if the auto_reschedule_checks option is
+# enabled. Default is 180 seconds (3 minutes).
+# WARNING: THIS IS AN EXPERIMENTAL FEATURE - IT CAN DEGRADE
+# PERFORMANCE, RATHER THAN INCREASE IT, IF USED IMPROPERLY
+
+auto_rescheduling_window=180
+
+
+
+# SLEEP TIME
+# This is the number of seconds to sleep between checking for system
+# events and service checks that need to be run.
+
+sleep_time=0.25
+
+
+
+# TIMEOUT VALUES
+# These options control how much time Nagios will allow various
+# types of commands to execute before killing them off. Options
+# are available for controlling maximum time allotted for
+# service checks, host checks, event handlers, notifications, the
+# ocsp command, and performance data commands. All values are in
+# seconds.
+
+service_check_timeout=60
+host_check_timeout=30
+event_handler_timeout=30
+notification_timeout=30
+ocsp_timeout=5
+perfdata_timeout=5
+
+
+
+# RETAIN STATE INFORMATION
+# This setting determines whether or not Nagios will save state
+# information for services and hosts before it shuts down. Upon
+# startup Nagios will reload all saved service and host state
+# information before starting to monitor. This is useful for
+# maintaining long-term data on state statistics, etc, but will
+# slow Nagios down a bit when it (re)starts. Since its only
+# a one-time penalty, I think its well worth the additional
+# startup delay.
+
+retain_state_information=1
+
+
+
+# STATE RETENTION FILE
+# This is the file that Nagios should use to store host and
+# service state information before it shuts down. The state
+# information in this file is also read immediately prior to
+# starting to monitor the network when Nagios is restarted.
+# This file is used only if the preserve_state_information
+# variable is set to 1.
+
+state_retention_file=/var/lib/nagios3/retention.dat
+
+
+
+# RETENTION DATA UPDATE INTERVAL
+# This setting determines how often (in minutes) that Nagios
+# will automatically save retention data during normal operation.
+# If you set this value to 0, Nagios will not save retention
+# data at regular interval, but it will still save retention
+# data before shutting down or restarting. If you have disabled
+# state retention, this option has no effect.
+
+retention_update_interval=60
+
+
+
+# USE RETAINED PROGRAM STATE
+# This setting determines whether or not Nagios will set
+# program status variables based on the values saved in the
+# retention file. If you want to use retained program status
+# information, set this value to 1. If not, set this value
+# to 0.
+
+use_retained_program_state=1
+
+
+
+# USE RETAINED SCHEDULING INFO
+# This setting determines whether or not Nagios will retain
+# the scheduling info (next check time) for hosts and services
+# based on the values saved in the retention file. If you
+# If you want to use retained scheduling info, set this
+# value to 1. If not, set this value to 0.
+
+use_retained_scheduling_info=1
+
+
+
+# RETAINED ATTRIBUTE MASKS (ADVANCED FEATURE)
+# The following variables are used to specify specific host and
+# service attributes that should *not* be retained by Nagios during
+# program restarts.
+#
+# The values of the masks are bitwise ANDs of values specified
+# by the "MODATTR_" definitions found in include/common.h.
+# For example, if you do not want the current enabled/disabled state
+# of flap detection and event handlers for hosts to be retained, you
+# would use a value of 24 for the host attribute mask...
+# MODATTR_EVENT_HANDLER_ENABLED (8) + MODATTR_FLAP_DETECTION_ENABLED (16) = 24
+
+# This mask determines what host attributes are not retained
+retained_host_attribute_mask=0
+
+# This mask determines what service attributes are not retained
+retained_service_attribute_mask=0
+
+# These two masks determine what process attributes are not retained.
+# There are two masks, because some process attributes have host and service
+# options. For example, you can disable active host checks, but leave active
+# service checks enabled.
+retained_process_host_attribute_mask=0
+retained_process_service_attribute_mask=0
+
+# These two masks determine what contact attributes are not retained.
+# There are two masks, because some contact attributes have host and
+# service options. For example, you can disable host notifications for
+# a contact, but leave service notifications enabled for them.
+retained_contact_host_attribute_mask=0
+retained_contact_service_attribute_mask=0
+
+
+
+# INTERVAL LENGTH
+# This is the seconds per unit interval as used in the
+# host/contact/service configuration files. Setting this to 60 means
+# that each interval is one minute long (60 seconds). Other settings
+# have not been tested much, so your mileage is likely to vary...
+
+interval_length=60
+
+
+
+# AGGRESSIVE HOST CHECKING OPTION
+# If you don't want to turn on aggressive host checking features, set
+# this value to 0 (the default). Otherwise set this value to 1 to
+# enable the aggressive check option. Read the docs for more info
+# on what aggressive host check is or check out the source code in
+# base/checks.c
+
+use_aggressive_host_checking=0
+
+
+
+# SERVICE CHECK EXECUTION OPTION
+# This determines whether or not Nagios will actively execute
+# service checks when it initially starts. If this option is
+# disabled, checks are not actively made, but Nagios can still
+# receive and process passive check results that come in. Unless
+# you're implementing redundant hosts or have a special need for
+# disabling the execution of service checks, leave this enabled!
+# Values: 1 = enable checks, 0 = disable checks
+
+execute_service_checks=1
+
+
+
+# PASSIVE SERVICE CHECK ACCEPTANCE OPTION
+# This determines whether or not Nagios will accept passive
+# service checks results when it initially (re)starts.
+# Values: 1 = accept passive checks, 0 = reject passive checks
+
+accept_passive_service_checks=1
+
+
+
+# HOST CHECK EXECUTION OPTION
+# This determines whether or not Nagios will actively execute
+# host checks when it initially starts. If this option is
+# disabled, checks are not actively made, but Nagios can still
+# receive and process passive check results that come in. Unless
+# you're implementing redundant hosts or have a special need for
+# disabling the execution of host checks, leave this enabled!
+# Values: 1 = enable checks, 0 = disable checks
+
+execute_host_checks=1
+
+
+
+# PASSIVE HOST CHECK ACCEPTANCE OPTION
+# This determines whether or not Nagios will accept passive
+# host checks results when it initially (re)starts.
+# Values: 1 = accept passive checks, 0 = reject passive checks
+
+accept_passive_host_checks=1
+
+
+
+# NOTIFICATIONS OPTION
+# This determines whether or not Nagios will sent out any host or
+# service notifications when it is initially (re)started.
+# Values: 1 = enable notifications, 0 = disable notifications
+
+enable_notifications=1
+
+
+
+# EVENT HANDLER USE OPTION
+# This determines whether or not Nagios will run any host or
+# service event handlers when it is initially (re)started. Unless
+# you're implementing redundant hosts, leave this option enabled.
+# Values: 1 = enable event handlers, 0 = disable event handlers
+
+enable_event_handlers=1
+
+
+
+# PROCESS PERFORMANCE DATA OPTION
+# This determines whether or not Nagios will process performance
+# data returned from service and host checks. If this option is
+# enabled, host performance data will be processed using the
+# host_perfdata_command (defined below) and service performance
+# data will be processed using the service_perfdata_command (also
+# defined below). Read the HTML docs for more information on
+# performance data.
+# Values: 1 = process performance data, 0 = do not process performance data
+
+process_performance_data=0
+
+
+
+# HOST AND SERVICE PERFORMANCE DATA PROCESSING COMMANDS
+# These commands are run after every host and service check is
+# performed. These commands are executed only if the
+# enable_performance_data option (above) is set to 1. The command
+# argument is the short name of a command definition that you
+# define in your host configuration file. Read the HTML docs for
+# more information on performance data.
+
+#host_perfdata_command=process-host-perfdata
+#service_perfdata_command=process-service-perfdata
+
+
+
+# HOST AND SERVICE PERFORMANCE DATA FILES
+# These files are used to store host and service performance data.
+# Performance data is only written to these files if the
+# enable_performance_data option (above) is set to 1.
+
+#host_perfdata_file=/tmp/host-perfdata
+#service_perfdata_file=/tmp/service-perfdata
+
+
+
+# HOST AND SERVICE PERFORMANCE DATA FILE TEMPLATES
+# These options determine what data is written (and how) to the
+# performance data files. The templates may contain macros, special
+# characters (\t for tab, \r for carriage return, \n for newline)
+# and plain text. A newline is automatically added after each write
+# to the performance data file. Some examples of what you can do are
+# shown below.
+
+#host_perfdata_file_template=[HOSTPERFDATA]\t$TIMET$\t$HOSTNAME$\t$HOSTEXECUTIONTIME$\t$HOSTOUTPUT$\t$HOSTPERFDATA$
+#service_perfdata_file_template=[SERVICEPERFDATA]\t$TIMET$\t$HOSTNAME$\t$SERVICEDESC$\t$SERVICEEXECUTIONTIME$\t$SERVICELATENCY$\t$SERVICEOUTPUT$\t$SERVICEPERFDATA$
+
+
+
+# HOST AND SERVICE PERFORMANCE DATA FILE MODES
+# This option determines whether or not the host and service
+# performance data files are opened in write ("w") or append ("a")
+# mode. If you want to use named pipes, you should use the special
+# pipe ("p") mode which avoid blocking at startup, otherwise you will
+# likely want the defult append ("a") mode.
+
+#host_perfdata_file_mode=a
+#service_perfdata_file_mode=a
+
+
+
+# HOST AND SERVICE PERFORMANCE DATA FILE PROCESSING INTERVAL
+# These options determine how often (in seconds) the host and service
+# performance data files are processed using the commands defined
+# below. A value of 0 indicates the files should not be periodically
+# processed.
+
+#host_perfdata_file_processing_interval=0
+#service_perfdata_file_processing_interval=0
+
+
+
+# HOST AND SERVICE PERFORMANCE DATA FILE PROCESSING COMMANDS
+# These commands are used to periodically process the host and
+# service performance data files. The interval at which the
+# processing occurs is determined by the options above.
+
+#host_perfdata_file_processing_command=process-host-perfdata-file
+#service_perfdata_file_processing_command=process-service-perfdata-file
+
+
+
+# OBSESS OVER SERVICE CHECKS OPTION
+# This determines whether or not Nagios will obsess over service
+# checks and run the ocsp_command defined below. Unless you're
+# planning on implementing distributed monitoring, do not enable
+# this option. Read the HTML docs for more information on
+# implementing distributed monitoring.
+# Values: 1 = obsess over services, 0 = do not obsess (default)
+
+obsess_over_services=0
+
+
+
+# OBSESSIVE COMPULSIVE SERVICE PROCESSOR COMMAND
+# This is the command that is run for every service check that is
+# processed by Nagios. This command is executed only if the
+# obsess_over_services option (above) is set to 1. The command
+# argument is the short name of a command definition that you
+# define in your host configuration file. Read the HTML docs for
+# more information on implementing distributed monitoring.
+
+#ocsp_command=somecommand
+
+
+
+# OBSESS OVER HOST CHECKS OPTION
+# This determines whether or not Nagios will obsess over host
+# checks and run the ochp_command defined below. Unless you're
+# planning on implementing distributed monitoring, do not enable
+# this option. Read the HTML docs for more information on
+# implementing distributed monitoring.
+# Values: 1 = obsess over hosts, 0 = do not obsess (default)
+
+obsess_over_hosts=0
+
+
+
+# OBSESSIVE COMPULSIVE HOST PROCESSOR COMMAND
+# This is the command that is run for every host check that is
+# processed by Nagios. This command is executed only if the
+# obsess_over_hosts option (above) is set to 1. The command
+# argument is the short name of a command definition that you
+# define in your host configuration file. Read the HTML docs for
+# more information on implementing distributed monitoring.
+
+#ochp_command=somecommand
+
+
+
+# TRANSLATE PASSIVE HOST CHECKS OPTION
+# This determines whether or not Nagios will translate
+# DOWN/UNREACHABLE passive host check results into their proper
+# state for this instance of Nagios. This option is useful
+# if you have distributed or failover monitoring setup. In
+# these cases your other Nagios servers probably have a different
+# "view" of the network, with regards to the parent/child relationship
+# of hosts. If a distributed monitoring server thinks a host
+# is DOWN, it may actually be UNREACHABLE from the point of
+# this Nagios instance. Enabling this option will tell Nagios
+# to translate any DOWN or UNREACHABLE host states it receives
+# passively into the correct state from the view of this server.
+# Values: 1 = perform translation, 0 = do not translate (default)
+
+translate_passive_host_checks=0
+
+
+
+# PASSIVE HOST CHECKS ARE SOFT OPTION
+# This determines whether or not Nagios will treat passive host
+# checks as being HARD or SOFT. By default, a passive host check
+# result will put a host into a HARD state type. This can be changed
+# by enabling this option.
+# Values: 0 = passive checks are HARD, 1 = passive checks are SOFT
+
+passive_host_checks_are_soft=0
+
+
+
+# ORPHANED HOST/SERVICE CHECK OPTIONS
+# These options determine whether or not Nagios will periodically
+# check for orphaned host service checks. Since service checks are
+# not rescheduled until the results of their previous execution
+# instance are processed, there exists a possibility that some
+# checks may never get rescheduled. A similar situation exists for
+# host checks, although the exact scheduling details differ a bit
+# from service checks. Orphaned checks seem to be a rare
+# problem and should not happen under normal circumstances.
+# If you have problems with service checks never getting
+# rescheduled, make sure you have orphaned service checks enabled.
+# Values: 1 = enable checks, 0 = disable checks
+
+check_for_orphaned_services=1
+check_for_orphaned_hosts=1
+
+
+
+# SERVICE FRESHNESS CHECK OPTION
+# This option determines whether or not Nagios will periodically
+# check the "freshness" of service results. Enabling this option
+# is useful for ensuring passive checks are received in a timely
+# manner.
+# Values: 1 = enabled freshness checking, 0 = disable freshness checking
+
+check_service_freshness=1
+
+
+
+# SERVICE FRESHNESS CHECK INTERVAL
+# This setting determines how often (in seconds) Nagios will
+# check the "freshness" of service check results. If you have
+# disabled service freshness checking, this option has no effect.
+
+service_freshness_check_interval=60
+
+
+
+# HOST FRESHNESS CHECK OPTION
+# This option determines whether or not Nagios will periodically
+# check the "freshness" of host results. Enabling this option
+# is useful for ensuring passive checks are received in a timely
+# manner.
+# Values: 1 = enabled freshness checking, 0 = disable freshness checking
+
+check_host_freshness=0
+
+
+
+# HOST FRESHNESS CHECK INTERVAL
+# This setting determines how often (in seconds) Nagios will
+# check the "freshness" of host check results. If you have
+# disabled host freshness checking, this option has no effect.
+
+host_freshness_check_interval=60
+
+
+
+
+# ADDITIONAL FRESHNESS THRESHOLD LATENCY
+# This setting determines the number of seconds that Nagios
+# will add to any host and service freshness thresholds that
+# it calculates (those not explicitly specified by the user).
+
+additional_freshness_latency=15
+
+
+
+
+# FLAP DETECTION OPTION
+# This option determines whether or not Nagios will try
+# and detect hosts and services that are "flapping".
+# Flapping occurs when a host or service changes between
+# states too frequently. When Nagios detects that a
+# host or service is flapping, it will temporarily suppress
+# notifications for that host/service until it stops
+# flapping. Flap detection is very experimental, so read
+# the HTML documentation before enabling this feature!
+# Values: 1 = enable flap detection
+# 0 = disable flap detection (default)
+
+enable_flap_detection=1
+
+
+
+# FLAP DETECTION THRESHOLDS FOR HOSTS AND SERVICES
+# Read the HTML documentation on flap detection for
+# an explanation of what this option does. This option
+# has no effect if flap detection is disabled.
+
+low_service_flap_threshold=5.0
+high_service_flap_threshold=20.0
+low_host_flap_threshold=5.0
+high_host_flap_threshold=20.0
+
+
+
+# DATE FORMAT OPTION
+# This option determines how short dates are displayed. Valid options
+# include:
+# us (MM-DD-YYYY HH:MM:SS)
+# euro (DD-MM-YYYY HH:MM:SS)
+# iso8601 (YYYY-MM-DD HH:MM:SS)
+# strict-iso8601 (YYYY-MM-DDTHH:MM:SS)
+#
+
+date_format=iso8601
+
+
+
+
+# TIMEZONE OFFSET
+# This option is used to override the default timezone that this
+# instance of Nagios runs in. If not specified, Nagios will use
+# the system configured timezone.
+#
+# NOTE: In order to display the correct timezone in the CGIs, you
+# will also need to alter the Apache directives for the CGI path
+# to include your timezone. Example:
+#
+# <Directory "/usr/local/nagios/sbin/">
+# SetEnv TZ "Australia/Brisbane"
+# ...
+# </Directory>
+
+#use_timezone=US/Mountain
+#use_timezone=Australia/Brisbane
+
+
+
+
+# P1.PL FILE LOCATION
+# This value determines where the p1.pl perl script (used by the
+# embedded Perl interpreter) is located. If you didn't compile
+# Nagios with embedded Perl support, this option has no effect.
+
+p1_file=/usr/lib/nagios3/p1.pl
+
+
+
+# EMBEDDED PERL INTERPRETER OPTION
+# This option determines whether or not the embedded Perl interpreter
+# will be enabled during runtime. This option has no effect if Nagios
+# has not been compiled with support for embedded Perl.
+# Values: 0 = disable interpreter, 1 = enable interpreter
+
+enable_embedded_perl=1
+
+
+
+# EMBEDDED PERL USAGE OPTION
+# This option determines whether or not Nagios will process Perl plugins
+# and scripts with the embedded Perl interpreter if the plugins/scripts
+# do not explicitly indicate whether or not it is okay to do so. Read
+# the HTML documentation on the embedded Perl interpreter for more
+# information on how this option works.
+
+use_embedded_perl_implicitly=1
+
+
+
+# ILLEGAL OBJECT NAME CHARACTERS
+# This option allows you to specify illegal characters that cannot
+# be used in host names, service descriptions, or names of other
+# object types.
+
+illegal_object_name_chars=`~!$%^&*|'"<>?,()=
+
+
+
+# ILLEGAL MACRO OUTPUT CHARACTERS
+# This option allows you to specify illegal characters that are
+# stripped from macros before being used in notifications, event
+# handlers, etc. This DOES NOT affect macros used in service or
+# host check commands.
+# The following macros are stripped of the characters you specify:
+# $HOSTOUTPUT$
+# $HOSTPERFDATA$
+# $HOSTACKAUTHOR$
+# $HOSTACKCOMMENT$
+# $SERVICEOUTPUT$
+# $SERVICEPERFDATA$
+# $SERVICEACKAUTHOR$
+# $SERVICEACKCOMMENT$
+
+illegal_macro_output_chars=`~$&|'"<>
+
+
+
+# REGULAR EXPRESSION MATCHING
+# This option controls whether or not regular expression matching
+# takes place in the object config files. Regular expression
+# matching is used to match host, hostgroup, service, and service
+# group names/descriptions in some fields of various object types.
+# Values: 1 = enable regexp matching, 0 = disable regexp matching
+
+use_regexp_matching=0
+
+
+
+# "TRUE" REGULAR EXPRESSION MATCHING
+# This option controls whether or not "true" regular expression
+# matching takes place in the object config files. This option
+# only has an effect if regular expression matching is enabled
+# (see above). If this option is DISABLED, regular expression
+# matching only occurs if a string contains wildcard characters
+# (* and ?). If the option is ENABLED, regexp matching occurs
+# all the time (which can be annoying).
+# Values: 1 = enable true matching, 0 = disable true matching
+
+use_true_regexp_matching=0
+
+
+
+# ADMINISTRATOR EMAIL/PAGER ADDRESSES
+# The email and pager address of a global administrator (likely you).
+# Nagios never uses these values itself, but you can access them by
+# using the $ADMINEMAIL$ and $ADMINPAGER$ macros in your notification
+# commands.
+
+admin_email=root@localhost
+admin_pager=pageroot@localhost
+
+
+
+# DAEMON CORE DUMP OPTION
+# This option determines whether or not Nagios is allowed to create
+# a core dump when it runs as a daemon. Note that it is generally
+# considered bad form to allow this, but it may be useful for
+# debugging purposes. Enabling this option doesn't guarantee that
+# a core file will be produced, but that's just life...
+# Values: 1 - Allow core dumps
+# 0 - Do not allow core dumps (default)
+
+daemon_dumps_core=0
+
+
+
+# LARGE INSTALLATION TWEAKS OPTION
+# This option determines whether or not Nagios will take some shortcuts
+# which can save on memory and CPU usage in large Nagios installations.
+# Read the documentation for more information on the benefits/tradeoffs
+# of enabling this option.
+# Values: 1 - Enabled tweaks
+# 0 - Disable tweaks (default)
+
+use_large_installation_tweaks=0
+
+
+
+# ENABLE ENVIRONMENT MACROS
+# This option determines whether or not Nagios will make all standard
+# macros available as environment variables when host/service checks
+# and system commands (event handlers, notifications, etc.) are
+# executed. Enabling this option can cause performance issues in
+# large installations, as it will consume a bit more memory and (more
+# importantly) consume more CPU.
+# Values: 1 - Enable environment variable macros (default)
+# 0 - Disable environment variable macros
+
+enable_environment_macros=1
+
+
+
+# CHILD PROCESS MEMORY OPTION
+# This option determines whether or not Nagios will free memory in
+# child processes (processed used to execute system commands and host/
+# service checks). If you specify a value here, it will override
+# program defaults.
+# Value: 1 - Free memory in child processes
+# 0 - Do not free memory in child processes
+
+#free_child_process_memory=1
+
+
+
+# CHILD PROCESS FORKING BEHAVIOR
+# This option determines how Nagios will fork child processes
+# (used to execute system commands and host/service checks). Normally
+# child processes are fork()ed twice, which provides a very high level
+# of isolation from problems. Fork()ing once is probably enough and will
+# save a great deal on CPU usage (in large installs), so you might
+# want to consider using this. If you specify a value here, it will
+# program defaults.
+# Value: 1 - Child processes fork() twice
+# 0 - Child processes fork() just once
+
+#child_processes_fork_twice=1
+
+
+
+# DEBUG LEVEL
+# This option determines how much (if any) debugging information will
+# be written to the debug file. OR values together to log multiple
+# types of information.
+# Values:
+# -1 = Everything
+# 0 = Nothing
+# 1 = Functions
+# 2 = Configuration
+# 4 = Process information
+# 8 = Scheduled events
+# 16 = Host/service checks
+# 32 = Notifications
+# 64 = Event broker
+# 128 = External commands
+# 256 = Commands
+# 512 = Scheduled downtime
+# 1024 = Comments
+# 2048 = Macros
+
+debug_level=0
+
+
+
+# DEBUG VERBOSITY
+# This option determines how verbose the debug log out will be.
+# Values: 0 = Brief output
+# 1 = More detailed
+# 2 = Very detailed
+
+debug_verbosity=1
+
+
+
+# DEBUG FILE
+# This option determines where Nagios should write debugging information.
+
+debug_file=/var/lib/nagios3/nagios.debug
+
+
+
+# MAX DEBUG FILE SIZE
+# This option determines the maximum size (in bytes) of the debug file. If
+# the file grows larger than this size, it will be renamed with a .old
+# extension. If a file already exists with a .old extension it will
+# automatically be deleted. This helps ensure your disk space usage doesn't
+# get out of control when debugging Nagios.
+
+max_debug_file_size=1000000
+
+process_performance_data=1
+service_perfdata_file=/var/lib/nagios3/service-perfdata
+service_perfdata_file_template=DATATYPE::SERVICEPERFDATA\tTIMET::$TIMET$\tHOSTNAME::$HOSTNAME$\tSERVICEDESC::$SERVICEDESC$\tSERVICEPERFDATA::$SERVICEPERFDATA$\tSERVICECHECKCOMMAND::$SERVICECHECKCOMMAND$\tHOSTSTATE::$HOSTSTATE$\tHOSTSTATETYPE::$HOSTSTATETYPE$\tSERVICESTATE::$SERVICESTATE$\tSERVICESTATETYPE::$SERVICESTATETYPE$
+service_perfdata_file_mode=a
+service_perfdata_file_processing_interval=15
+service_perfdata_file_processing_command=process-service-perfdata-file-pnp4nagios-bulk-npcd
+host_perfdata_file=/var/lib/nagios3/host-perfdata
+host_perfdata_file_template=DATATYPE::HOSTPERFDATA\tTIMET::$TIMET$\tHOSTNAME::$HOSTNAME$\tHOSTPERFDATA::$HOSTPERFDATA$\tHOSTCHECKCOMMAND::$HOSTCHECKCOMMAND$\tHOSTSTATE::$HOSTSTATE$\tHOSTSTATETYPE::$HOSTSTATETYPE$
+host_perfdata_file_mode=a
+host_perfdata_file_processing_interval=15
+host_perfdata_file_processing_command=process-host-perfdata-file-pnp4nagios-bulk-npcd
+
diff --git a/puppet/modules/site_nagios/files/plugins/check_last_regex_in_log b/puppet/modules/site_nagios/files/plugins/check_last_regex_in_log
new file mode 100755
index 00000000..47569388
--- /dev/null
+++ b/puppet/modules/site_nagios/files/plugins/check_last_regex_in_log
@@ -0,0 +1,85 @@
+#!/bin/sh
+#
+# depends on nagios-plugins-common for /usr/lib/nagios/plugins/utils.sh
+# this package is installed using leap_platform by the Site_check_mk::Agent::Mrpe
+# class
+
+set -e
+
+usage()
+{
+cat << EOF
+usage: $0 -w <sec> -c <sec> -r <regexp> -f <filename>
+
+OPTIONS:
+ -h Show this message
+ -r <regex> regex to grep for
+ -f <file> logfile to search in
+ -w <sec> warning state after X seconds
+ -c <sec> critical state after x seconds
+
+example: $0 -f /var/log/syslog -r 'tapicero' -w 300 -c 600
+EOF
+}
+
+
+. /usr/lib/nagios/plugins/utils.sh
+
+
+warn=0
+crit=0
+log=''
+regex=''
+
+set -- $(getopt hr:f:w:c: "$@")
+while [ $# -gt 0 ]
+do
+ case "$1" in
+ (-h) usage; exit 0 ;;
+ (-f) log="$2"; shift;;
+ (-r) regex="$2"; shift;;
+ (-w) warn="$2"; shift;;
+ (-c) crit="$2"; shift;;
+ (--) shift; break;;
+ (-*) echo "$0: error - unrecognized option $1" 1>&2; exit 1;;
+ (*) break;;
+ esac
+ shift
+done
+
+[ $warn -eq 0 -o $crit -eq 0 -o -z "$regex" -o -z "$log" ] && ( usage; exit $STATE_UNKNOWN)
+[ -f "$log" ] || (echo "$log doesn't exist"; exit $STATE_UNKNOWN)
+
+lastmsg=$(tac $log | grep -i $regex | head -1 | sed 's/ / /g' | cut -d' ' -f 1-3)
+
+if [ -z "$lastmsg" ]
+then
+ summary="\"$regex\" in $log was not found"
+ state=$STATE_CRITICAL
+ state_text='CRITICAL'
+ diff_sec=0
+else
+ lastmsg_sec=$(date '+%s' -d "$lastmsg")
+ now_sec=$(date '+%s')
+
+ diff_sec=$(($now_sec - $lastmsg_sec))
+
+ if [ $diff_sec -lt $warn ]; then
+ state=$STATE_OK
+ state_text='OK'
+ elif [ $diff_sec -lt $crit ]; then
+ state=$STATE_WARNING
+ state_text='WARNING'
+ else
+ state=$STATE_CRITICAL
+ state_text='CRITICAL'
+ fi
+
+ summary="Last occurrence of \"$regex\" in $log was $diff_sec sec ago"
+fi
+
+# check_mk_agent output
+# echo "$state Tapicero_Heatbeat sec=$diff_sec;$warn;$crit;0; $state_text - $summary"
+
+echo "${state_text}: $summary | seconds=${diff_sec};$warn;$crit;0;"
+exit $state
diff --git a/puppet/modules/site_nagios/manifests/add_host_services.pp b/puppet/modules/site_nagios/manifests/add_host_services.pp
new file mode 100644
index 00000000..bd968e6f
--- /dev/null
+++ b/puppet/modules/site_nagios/manifests/add_host_services.pp
@@ -0,0 +1,32 @@
+define site_nagios::add_host_services (
+ $domain_full_suffix,
+ $domain_internal,
+ $domain_internal_suffix,
+ $ip_address,
+ $services,
+ $ssh_port,
+ $environment,
+ $openvpn_gateway_address='',
+ ) {
+
+ $nagios_hostname = $domain_internal
+
+ # Add Nagios service
+
+ # First, we need to turn the serice array into hash, using a "hash template"
+ # see https://github.com/ashak/puppet-resource-looping
+ $nagios_service_hashpart = {
+ 'hostname' => $nagios_hostname,
+ 'ip_address' => $ip_address,
+ 'openvpn_gw' => $openvpn_gateway_address,
+ 'environment' => $environment
+ }
+ $dynamic_parameters = {
+ 'service' => '%s'
+ }
+ $nagios_servicename = "${nagios_hostname}_%s"
+
+ $nagios_service_hash = create_resources_hash_from($nagios_servicename, $services, $nagios_service_hashpart, $dynamic_parameters)
+
+ create_resources ( site_nagios::add_service, $nagios_service_hash )
+}
diff --git a/puppet/modules/site_nagios/manifests/add_service.pp b/puppet/modules/site_nagios/manifests/add_service.pp
new file mode 100644
index 00000000..72cd038a
--- /dev/null
+++ b/puppet/modules/site_nagios/manifests/add_service.pp
@@ -0,0 +1,32 @@
+define site_nagios::add_service (
+ $hostname, $ip_address, $service, $environment, $openvpn_gw = '') {
+
+ $ssh = hiera_hash('ssh')
+ $ssh_port = $ssh['port']
+
+ case $service {
+ 'webapp': {
+ nagios_service {
+ "${name}_ssh":
+ use => 'generic-service',
+ check_command => "check_ssh_port!${ssh_port}",
+ service_description => 'SSH',
+ host_name => $hostname,
+ contact_groups => $environment;
+ "${name}_cert":
+ use => 'generic-service',
+ check_command => 'check_https_cert',
+ service_description => 'Website Certificate',
+ host_name => $hostname,
+ contact_groups => $environment;
+ "${name}_website":
+ use => 'generic-service',
+ check_command => 'check_https',
+ service_description => 'Website',
+ host_name => $hostname,
+ contact_groups => $environment;
+ }
+ }
+ default: {}
+ }
+}
diff --git a/puppet/modules/site_nagios/manifests/init.pp b/puppet/modules/site_nagios/manifests/init.pp
new file mode 100644
index 00000000..f91bfc26
--- /dev/null
+++ b/puppet/modules/site_nagios/manifests/init.pp
@@ -0,0 +1,13 @@
+# setup nagios on monitoring node
+class site_nagios {
+ tag 'leap_service'
+
+ include site_config::default
+
+ Class['site_config::default'] -> Class['site_nagios']
+
+ include site_nagios::server
+
+ # remove leftovers on monitoring nodes
+ include site_config::remove::monitoring
+}
diff --git a/puppet/modules/site_nagios/manifests/plugins.pp b/puppet/modules/site_nagios/manifests/plugins.pp
new file mode 100644
index 00000000..90a01cfb
--- /dev/null
+++ b/puppet/modules/site_nagios/manifests/plugins.pp
@@ -0,0 +1,16 @@
+# Deploy generic plugins useful to all nodes
+# nagios::plugin won't work to deploy a plugin
+# because it complains with:
+# Could not find dependency Package[nagios-plugins] …
+# at /srv/leap/puppet/modules/nagios/manifests/plugin.pp:18
+class site_nagios::plugins {
+
+ file { [
+ '/usr/local/lib', '/usr/local/lib/nagios',
+ '/usr/local/lib/nagios/plugins' ]:
+ ensure => directory;
+ '/usr/local/lib/nagios/plugins/check_last_regex_in_log':
+ source => 'puppet:///modules/site_nagios/plugins/check_last_regex_in_log',
+ mode => '0755';
+ }
+}
diff --git a/puppet/modules/site_nagios/manifests/server.pp b/puppet/modules/site_nagios/manifests/server.pp
new file mode 100644
index 00000000..6537124d
--- /dev/null
+++ b/puppet/modules/site_nagios/manifests/server.pp
@@ -0,0 +1,97 @@
+# configures nagios on monitoring node
+# lint:ignore:inherits_across_namespaces
+class site_nagios::server inherits nagios::base {
+# lint:endignore
+
+ $nagios_hiera = hiera('nagios')
+ $nagiosadmin_pw = htpasswd_sha1($nagios_hiera['nagiosadmin_pw'])
+ $nagios_hosts = $nagios_hiera['hosts']
+ $nagios_contacts = hiera('contacts')
+ $environment = $nagios_hiera['environments']
+
+ include nagios::base
+ include nagios::defaults::commands
+ include nagios::defaults::templates
+ include nagios::defaults::timeperiods
+ include nagios::pnp4nagios
+ include nagios::pnp4nagios::popup
+
+ class { 'nagios':
+ # don't manage apache class from nagios, cause we already include
+ # it in site_apache::common
+ httpd => 'absent',
+ allow_external_cmd => true,
+ storeconfigs => false,
+ }
+
+ # Delete nagios config files provided by packages
+ # These don't get parsed by nagios.conf, but are
+ # still irritating duplicates to the real config
+ # files deployed by puppet in /etc/nagios3/
+ file { [
+ '/etc/nagios3/conf.d/contacts_nagios2.cfg',
+ '/etc/nagios3/conf.d/extinfo_nagios2.cfg',
+ '/etc/nagios3/conf.d/generic-host_nagios2.cfg',
+ '/etc/nagios3/conf.d/generic-service_nagios2.cfg',
+ '/etc/nagios3/conf.d/hostgroups_nagios2.cfg',
+ '/etc/nagios3/conf.d/localhost_nagios2.cfg',
+ '/etc/nagios3/conf.d/pnp4nagios.cfg',
+ '/etc/nagios3/conf.d/services_nagios2.cfg',
+ '/etc/nagios3/conf.d/timeperiods_nagios2.cfg' ]:
+ ensure => absent;
+ }
+
+ # deploy apache nagios3 config
+ # until https://gitlab.com/shared-puppet-modules-group/apache/issues/11
+ # is not fixed, we need to manually deploy the config file
+ file {
+ '/etc/apache2/conf-available/nagios3.conf':
+ ensure => present,
+ source => 'puppet:///modules/nagios/configs/apache2.conf',
+ require => [ Package['nagios3'], Package['apache2'] ];
+ '/etc/apache2/conf-enabled/nagios3.conf':
+ ensure => link,
+ target => '/etc/apache2/conf-available/nagios3.conf',
+ require => [ Package['nagios3'], Package['apache2'] ];
+ }
+
+ include site_apache::common
+ include site_webapp::common_vhost
+ include apache::module::headers
+
+ File['nagios_htpasswd'] {
+ source => undef,
+ content => "nagiosadmin:${nagiosadmin_pw}",
+ mode => '0640',
+ }
+
+
+ # deploy serverside plugins
+ file { '/usr/lib/nagios/plugins/check_openvpn_server.pl':
+ source => 'puppet:///modules/nagios/plugins/check_openvpn_server.pl',
+ mode => '0755',
+ owner => 'nagios',
+ group => 'nagios',
+ require => Package['nagios-plugins'];
+ }
+
+ create_resources ( site_nagios::add_host_services, $nagios_hosts )
+
+ include site_nagios::server::apache
+ include site_check_mk::server
+ include site_shorewall::monitor
+ include site_nagios::server::icli
+
+ augeas {
+ 'logrotate_nagios':
+ context => '/files/etc/logrotate.d/nagios/rule',
+ changes => [ 'set file /var/log/nagios3/nagios.log', 'set rotate 7',
+ 'set schedule daily', 'set compress compress',
+ 'set missingok missingok', 'set ifempty notifempty',
+ 'set copytruncate copytruncate' ]
+ }
+
+ create_resources ( site_nagios::server::hostgroup, $environment )
+ create_resources ( site_nagios::server::contactgroup, $environment )
+ create_resources ( site_nagios::server::add_contacts, $environment )
+}
diff --git a/puppet/modules/site_nagios/manifests/server/add_contacts.pp b/puppet/modules/site_nagios/manifests/server/add_contacts.pp
new file mode 100644
index 00000000..b5c6f0a5
--- /dev/null
+++ b/puppet/modules/site_nagios/manifests/server/add_contacts.pp
@@ -0,0 +1,18 @@
+# configure a nagios_contact
+define site_nagios::server::add_contacts ($contact_emails) {
+
+ $environment = $name
+
+ nagios_contact {
+ $environment:
+ alias => $environment,
+ service_notification_period => '24x7',
+ host_notification_period => '24x7',
+ service_notification_options => 'w,u,c,r',
+ host_notification_options => 'd,r',
+ service_notification_commands => 'notify-service-by-email',
+ host_notification_commands => 'notify-host-by-email',
+ email => join($contact_emails, ', '),
+ require => Package['nagios']
+ }
+}
diff --git a/puppet/modules/site_nagios/manifests/server/apache.pp b/puppet/modules/site_nagios/manifests/server/apache.pp
new file mode 100644
index 00000000..82962e89
--- /dev/null
+++ b/puppet/modules/site_nagios/manifests/server/apache.pp
@@ -0,0 +1,25 @@
+# set up apache for nagios
+class site_nagios::server::apache {
+
+ include x509::variables
+
+ include site_config::x509::commercial::cert
+ include site_config::x509::commercial::key
+ include site_config::x509::commercial::ca
+
+ include apache::module::authn_file
+ # "AuthUserFile"
+ include apache::module::authz_user
+ # "AuthType Basic"
+ include apache::module::auth_basic
+ # "DirectoryIndex"
+ include apache::module::dir
+ include apache::module::php5
+ include apache::module::cgi
+
+ # apache >= 2.4, debian jessie
+ if ( $::lsbdistcodename == 'jessie' ) {
+ include apache::module::authn_core
+ }
+
+}
diff --git a/puppet/modules/site_nagios/manifests/server/contactgroup.pp b/puppet/modules/site_nagios/manifests/server/contactgroup.pp
new file mode 100644
index 00000000..5e60dd06
--- /dev/null
+++ b/puppet/modules/site_nagios/manifests/server/contactgroup.pp
@@ -0,0 +1,8 @@
+# configure a contactgroup
+define site_nagios::server::contactgroup ($contact_emails) {
+
+ nagios_contactgroup { $name:
+ members => $name,
+ require => Package['nagios']
+ }
+}
diff --git a/puppet/modules/site_nagios/manifests/server/hostgroup.pp b/puppet/modules/site_nagios/manifests/server/hostgroup.pp
new file mode 100644
index 00000000..0692fced
--- /dev/null
+++ b/puppet/modules/site_nagios/manifests/server/hostgroup.pp
@@ -0,0 +1,7 @@
+# create a nagios hostsgroup
+define site_nagios::server::hostgroup ($contact_emails) {
+ nagios_hostgroup { $name:
+ ensure => present,
+ require => Package['nagios']
+ }
+}
diff --git a/puppet/modules/site_nagios/manifests/server/icli.pp b/puppet/modules/site_nagios/manifests/server/icli.pp
new file mode 100644
index 00000000..26fba725
--- /dev/null
+++ b/puppet/modules/site_nagios/manifests/server/icli.pp
@@ -0,0 +1,26 @@
+# Install icli package and configure ncli aliases
+class site_nagios::server::icli {
+ $nagios_hiera = hiera('nagios')
+ $environments = $nagios_hiera['environments']
+
+ package { 'icli':
+ ensure => installed;
+ }
+
+ file { '/root/.bashrc':
+ ensure => present;
+ }
+
+ file_line { 'icli aliases':
+ path => '/root/.bashrc',
+ line => 'source /root/.icli_aliases';
+ }
+
+ file { '/root/.icli_aliases':
+ content => template("${module_name}/icli_aliases.erb"),
+ mode => '0644',
+ owner => root,
+ group => 0,
+ require => Package['icli'];
+ }
+} \ No newline at end of file
diff --git a/puppet/modules/site_nagios/templates/icli_aliases.erb b/puppet/modules/site_nagios/templates/icli_aliases.erb
new file mode 100644
index 00000000..bcb2abb0
--- /dev/null
+++ b/puppet/modules/site_nagios/templates/icli_aliases.erb
@@ -0,0 +1,7 @@
+alias ncli='icli -c /var/cache/nagios3/objects.cache -f /var/cache/nagios3/status.dat -F /var/lib/nagios3/rw/nagios.cmd'
+alias ncli_problems='ncli -z '!o,!A''
+
+<% @environments.keys.sort.each do |env_name| %>
+alias ncli_<%= env_name %>='ncli -z '!o,!A' -g <%= env_name %>'
+alias ncli_<%= env_name %>_recheck='ncli -s Check_MK -g <%= env_name %> -a R'
+<% end -%>
diff --git a/puppet/modules/site_nickserver/manifests/init.pp b/puppet/modules/site_nickserver/manifests/init.pp
new file mode 100644
index 00000000..eb4415e7
--- /dev/null
+++ b/puppet/modules/site_nickserver/manifests/init.pp
@@ -0,0 +1,178 @@
+#
+# TODO: currently, this is dependent on some things that are set up in
+# site_webapp
+#
+# (1) HAProxy -> couchdb
+# (2) Apache
+#
+# It would be good in the future to make nickserver installable independently of
+# site_webapp.
+#
+
+class site_nickserver {
+ tag 'leap_service'
+ Class['site_config::default'] -> Class['site_nickserver']
+
+ include site_config::ruby::dev
+
+ #
+ # VARIABLES
+ #
+
+ $nickserver = hiera('nickserver')
+ $nickserver_domain = $nickserver['domain']
+ $couchdb_user = $nickserver['couchdb_nickserver_user']['username']
+ $couchdb_password = $nickserver['couchdb_nickserver_user']['password']
+
+ # the port that public connects to (should be 6425)
+ $nickserver_port = $nickserver['port']
+ # the port that nickserver is actually running on
+ $nickserver_local_port = '64250'
+
+ # couchdb is available on localhost via haproxy, which is bound to 4096.
+ $couchdb_host = 'localhost'
+ # See site_webapp/templates/haproxy_couchdb.cfg.erg
+ $couchdb_port = '4096'
+
+ $sources = hiera('sources')
+
+ # temporarily for now:
+ $domain = hiera('domain')
+ $address_domain = $domain['full_suffix']
+
+ include site_config::x509::cert
+ include site_config::x509::key
+ include site_config::x509::ca
+
+ #
+ # USER AND GROUP
+ #
+
+ group { 'nickserver':
+ ensure => present,
+ allowdupe => false;
+ }
+
+ user { 'nickserver':
+ ensure => present,
+ allowdupe => false,
+ gid => 'nickserver',
+ home => '/srv/leap/nickserver',
+ require => Group['nickserver'];
+ }
+
+ vcsrepo { '/srv/leap/nickserver':
+ ensure => present,
+ revision => $sources['nickserver']['revision'],
+ provider => $sources['nickserver']['type'],
+ source => $sources['nickserver']['source'],
+ owner => 'nickserver',
+ group => 'nickserver',
+ require => [ User['nickserver'], Group['nickserver'] ],
+ notify => Exec['nickserver_bundler_update'];
+ }
+
+ exec { 'nickserver_bundler_update':
+ cwd => '/srv/leap/nickserver',
+ command => '/bin/bash -c "/usr/bin/bundle check || /usr/bin/bundle install --path vendor/bundle"',
+ unless => '/usr/bin/bundle check',
+ user => 'nickserver',
+ timeout => 600,
+ require => [
+ Class['bundler::install'], Vcsrepo['/srv/leap/nickserver'],
+ Package['libssl-dev'], Class['site_config::ruby::dev'] ],
+
+ notify => Service['nickserver'];
+ }
+
+ #
+ # NICKSERVER CONFIG
+ #
+
+ file { '/etc/nickserver.yml':
+ content => template('site_nickserver/nickserver.yml.erb'),
+ owner => nickserver,
+ group => nickserver,
+ mode => '0600',
+ notify => Service['nickserver'];
+ }
+
+ #
+ # NICKSERVER DAEMON
+ #
+
+ file {
+ '/usr/bin/nickserver':
+ ensure => link,
+ target => '/srv/leap/nickserver/bin/nickserver',
+ require => Vcsrepo['/srv/leap/nickserver'];
+
+ '/etc/init.d/nickserver':
+ owner => root,
+ group => 0,
+ mode => '0755',
+ source => '/srv/leap/nickserver/dist/debian-init-script',
+ require => Vcsrepo['/srv/leap/nickserver'];
+ }
+
+ # register initscript at systemd on nodes newer than wheezy
+ # see https://leap.se/code/issues/7614
+ case $::operatingsystemrelease {
+ /^7.*/: { }
+ default: {
+ exec { 'register_systemd_nickserver':
+ refreshonly => true,
+ command => '/bin/systemctl enable nickserver',
+ subscribe => File['/etc/init.d/nickserver'],
+ before => Service['nickserver'];
+ }
+ }
+ }
+
+ service { 'nickserver':
+ ensure => running,
+ enable => true,
+ hasrestart => true,
+ hasstatus => true,
+ require => [
+ File['/etc/init.d/nickserver'],
+ File['/usr/bin/nickserver'],
+ Class['Site_config::X509::Key'],
+ Class['Site_config::X509::Cert'],
+ Class['Site_config::X509::Ca'] ];
+ }
+
+ #
+ # FIREWALL
+ # poke a hole in the firewall to allow nickserver requests
+ #
+
+ file { '/etc/shorewall/macro.nickserver':
+ content => "PARAM - - tcp ${nickserver_port}",
+ notify => Service['shorewall'],
+ require => Package['shorewall'];
+ }
+
+ shorewall::rule { 'net2fw-nickserver':
+ source => 'net',
+ destination => '$FW',
+ action => 'nickserver(ACCEPT)',
+ order => 200;
+ }
+
+ #
+ # APACHE REVERSE PROXY
+ # nickserver doesn't speak TLS natively, let Apache handle that.
+ #
+
+ apache::module {
+ 'proxy': ensure => present;
+ 'proxy_http': ensure => present
+ }
+
+ apache::vhost::file {
+ 'nickserver':
+ content => template('site_nickserver/nickserver-proxy.conf.erb')
+ }
+
+}
diff --git a/puppet/modules/site_nickserver/templates/nickserver-proxy.conf.erb b/puppet/modules/site_nickserver/templates/nickserver-proxy.conf.erb
new file mode 100644
index 00000000..8f59fe38
--- /dev/null
+++ b/puppet/modules/site_nickserver/templates/nickserver-proxy.conf.erb
@@ -0,0 +1,19 @@
+#
+# Apache reverse proxy configuration for the Nickserver
+#
+
+Listen 0.0.0.0:<%= @nickserver_port -%>
+
+<VirtualHost *:<%= @nickserver_port -%>>
+ ServerName <%= @nickserver_domain %>
+ ServerAlias <%= @address_domain %>
+
+ SSLCACertificatePath /etc/ssl/certs
+ SSLCertificateKeyFile <%= scope.lookupvar('x509::variables::keys') %>/<%= scope.lookupvar('site_config::params::cert_name') %>.key
+ SSLCertificateFile <%= scope.lookupvar('x509::variables::certs') %>/<%= scope.lookupvar('site_config::params::cert_name') %>.crt
+
+ Include include.d/ssl_common.inc
+
+ ProxyPass / http://localhost:<%= @nickserver_local_port %>/
+ ProxyPreserveHost On # preserve Host header in HTTP request
+</VirtualHost>
diff --git a/puppet/modules/site_nickserver/templates/nickserver.yml.erb b/puppet/modules/site_nickserver/templates/nickserver.yml.erb
new file mode 100644
index 00000000..e717cbaa
--- /dev/null
+++ b/puppet/modules/site_nickserver/templates/nickserver.yml.erb
@@ -0,0 +1,19 @@
+#
+# configuration for nickserver.
+#
+
+domain: "<%= @address_domain %>"
+
+couch_host: "<%= @couchdb_host %>"
+couch_port: <%= @couchdb_port %>
+couch_database: "identities"
+couch_user: "<%= @couchdb_user %>"
+couch_password: "<%= @couchdb_password %>"
+
+hkp_url: "https://hkps.pool.sks-keyservers.net:/pks/lookup"
+
+user: "nickserver"
+port: <%= @nickserver_local_port %>
+pid_file: "/var/run/nickserver"
+log_file: "/var/log/nickserver.log"
+
diff --git a/puppet/modules/site_obfsproxy/README b/puppet/modules/site_obfsproxy/README
new file mode 100644
index 00000000..e69de29b
--- /dev/null
+++ b/puppet/modules/site_obfsproxy/README
diff --git a/puppet/modules/site_obfsproxy/manifests/init.pp b/puppet/modules/site_obfsproxy/manifests/init.pp
new file mode 100644
index 00000000..2ed5ec9e
--- /dev/null
+++ b/puppet/modules/site_obfsproxy/manifests/init.pp
@@ -0,0 +1,38 @@
+class site_obfsproxy {
+ tag 'leap_service'
+ Class['site_config::default'] -> Class['site_obfsproxy']
+
+ $transport = 'scramblesuit'
+
+ $obfsproxy = hiera('obfsproxy')
+ $scramblesuit = $obfsproxy['scramblesuit']
+ $scram_pass = $scramblesuit['password']
+ $scram_port = $scramblesuit['port']
+ $dest_ip = $obfsproxy['gateway_address']
+ $dest_port = '443'
+
+ if member($::services, 'openvpn') {
+ $openvpn = hiera('openvpn')
+ $bind_address = $openvpn['gateway_address']
+ }
+ elsif member($::services, 'obfsproxy') {
+ $bind_address = hiera('ip_address')
+ }
+
+ include site_config::default
+
+ class { 'obfsproxy':
+ transport => $transport,
+ bind_address => $bind_address,
+ port => $scram_port,
+ param => $scram_pass,
+ dest_ip => $dest_ip,
+ dest_port => $dest_port,
+ }
+
+ include site_shorewall::obfsproxy
+
+}
+
+
+
diff --git a/puppet/modules/site_openvpn/README b/puppet/modules/site_openvpn/README
new file mode 100644
index 00000000..cef5be23
--- /dev/null
+++ b/puppet/modules/site_openvpn/README
@@ -0,0 +1,20 @@
+Place to look when debugging problems
+========================================
+
+Log files:
+
+ openvpn: /var/log/syslog
+ shorewall: /var/log/syslog
+ shorewall startup: /var/log/shorewall-init.log
+
+Check NAT masq:
+
+ iptables -t nat --list-rules
+
+Check interfaces:
+
+ ip addr ls
+
+Scripts:
+
+ /usr/local/bin/add_gateway_ips.sh \ No newline at end of file
diff --git a/puppet/modules/site_openvpn/manifests/dh_key.pp b/puppet/modules/site_openvpn/manifests/dh_key.pp
new file mode 100644
index 00000000..13cc0f5b
--- /dev/null
+++ b/puppet/modules/site_openvpn/manifests/dh_key.pp
@@ -0,0 +1,10 @@
+class site_openvpn::dh_key {
+
+ $x509_config = hiera('x509')
+
+ file { '/etc/openvpn/keys/dh.pem':
+ content => $x509_config['dh'],
+ mode => '0644',
+ }
+
+}
diff --git a/puppet/modules/site_openvpn/manifests/init.pp b/puppet/modules/site_openvpn/manifests/init.pp
new file mode 100644
index 00000000..f1ecefb9
--- /dev/null
+++ b/puppet/modules/site_openvpn/manifests/init.pp
@@ -0,0 +1,238 @@
+#
+# An openvpn gateway can support three modes:
+#
+# (1) limited and unlimited
+# (2) unlimited only
+# (3) limited only
+#
+# The difference is that 'unlimited' gateways only allow client certs that match
+# the 'unlimited_prefix', and 'limited' gateways only allow certs that match the
+# 'limited_prefix'.
+#
+# We potentially create four openvpn config files (thus four daemons):
+#
+# (1) unlimited + tcp => tcp_config.conf
+# (2) unlimited + udp => udp_config.conf
+# (3) limited + tcp => limited_tcp_config.conf
+# (4) limited + udp => limited_udp_config.conf
+#
+
+class site_openvpn {
+ tag 'leap_service'
+
+ include site_config::x509::cert
+ include site_config::x509::key
+ include site_config::x509::ca_bundle
+
+ include site_config::default
+ Class['site_config::default'] -> Class['site_openvpn']
+
+ include ::site_obfsproxy
+
+ $openvpn = hiera('openvpn')
+ $openvpn_ports = $openvpn['ports']
+ $openvpn_config = $openvpn['configuration']
+
+ if $::ec2_instance_id {
+ $openvpn_gateway_address = $::ipaddress
+ } else {
+ $openvpn_gateway_address = $openvpn['gateway_address']
+ if $openvpn['second_gateway_address'] {
+ $openvpn_second_gateway_address = $openvpn['second_gateway_address']
+ } else {
+ $openvpn_second_gateway_address = undef
+ }
+ }
+
+ $openvpn_allow_unlimited = $openvpn['allow_unlimited']
+ $openvpn_unlimited_prefix = $openvpn['unlimited_prefix']
+ $openvpn_unlimited_tcp_network_prefix = '10.41.0'
+ $openvpn_unlimited_tcp_netmask = '255.255.248.0'
+ $openvpn_unlimited_tcp_cidr = '21'
+ $openvpn_unlimited_udp_network_prefix = '10.42.0'
+ $openvpn_unlimited_udp_netmask = '255.255.248.0'
+ $openvpn_unlimited_udp_cidr = '21'
+
+ if !$::ec2_instance_id {
+ $openvpn_allow_limited = $openvpn['allow_limited']
+ $openvpn_limited_prefix = $openvpn['limited_prefix']
+ $openvpn_rate_limit = $openvpn['rate_limit']
+ $openvpn_limited_tcp_network_prefix = '10.43.0'
+ $openvpn_limited_tcp_netmask = '255.255.248.0'
+ $openvpn_limited_tcp_cidr = '21'
+ $openvpn_limited_udp_network_prefix = '10.44.0'
+ $openvpn_limited_udp_netmask = '255.255.248.0'
+ $openvpn_limited_udp_cidr = '21'
+ }
+
+ # find out the netmask in cidr format of the primary IF
+ # thx to https://blog.kumina.nl/tag/puppet-tips-and-tricks/
+ # we can do this using an inline_template:
+ $factname_primary_netmask = "netmask_cidr_${::site_config::params::interface}"
+ $primary_netmask = inline_template('<%= scope.lookupvar(@factname_primary_netmask) %>')
+
+ # deploy dh keys
+ include site_openvpn::dh_key
+
+ if $openvpn_allow_unlimited and $openvpn_allow_limited {
+ $unlimited_gateway_address = $openvpn_gateway_address
+ $limited_gateway_address = $openvpn_second_gateway_address
+ } elsif $openvpn_allow_unlimited {
+ $unlimited_gateway_address = $openvpn_gateway_address
+ $limited_gateway_address = undef
+ } elsif $openvpn_allow_limited {
+ $unlimited_gateway_address = undef
+ $limited_gateway_address = $openvpn_gateway_address
+ }
+
+ if $openvpn_allow_unlimited {
+ site_openvpn::server_config { 'tcp_config':
+ port => '1194',
+ proto => 'tcp',
+ local => $unlimited_gateway_address,
+ tls_remote => "\"${openvpn_unlimited_prefix}\"",
+ server => "${openvpn_unlimited_tcp_network_prefix}.0 ${openvpn_unlimited_tcp_netmask}",
+ push => "\"dhcp-option DNS ${openvpn_unlimited_tcp_network_prefix}.1\"",
+ management => '127.0.0.1 1000',
+ config => $openvpn_config
+ }
+ site_openvpn::server_config { 'udp_config':
+ port => '1194',
+ proto => 'udp',
+ local => $unlimited_gateway_address,
+ tls_remote => "\"${openvpn_unlimited_prefix}\"",
+ server => "${openvpn_unlimited_udp_network_prefix}.0 ${openvpn_unlimited_udp_netmask}",
+ push => "\"dhcp-option DNS ${openvpn_unlimited_udp_network_prefix}.1\"",
+ management => '127.0.0.1 1001',
+ config => $openvpn_config
+ }
+ } else {
+ tidy { '/etc/openvpn/tcp_config.conf': }
+ tidy { '/etc/openvpn/udp_config.conf': }
+ }
+
+ if $openvpn_allow_limited {
+ site_openvpn::server_config { 'limited_tcp_config':
+ port => '1194',
+ proto => 'tcp',
+ local => $limited_gateway_address,
+ tls_remote => "\"${openvpn_limited_prefix}\"",
+ server => "${openvpn_limited_tcp_network_prefix}.0 ${openvpn_limited_tcp_netmask}",
+ push => "\"dhcp-option DNS ${openvpn_limited_tcp_network_prefix}.1\"",
+ management => '127.0.0.1 1002',
+ config => $openvpn_config
+ }
+ site_openvpn::server_config { 'limited_udp_config':
+ port => '1194',
+ proto => 'udp',
+ local => $limited_gateway_address,
+ tls_remote => "\"${openvpn_limited_prefix}\"",
+ server => "${openvpn_limited_udp_network_prefix}.0 ${openvpn_limited_udp_netmask}",
+ push => "\"dhcp-option DNS ${openvpn_limited_udp_network_prefix}.1\"",
+ management => '127.0.0.1 1003',
+ config => $openvpn_config
+ }
+ } else {
+ tidy { '/etc/openvpn/limited_tcp_config.conf': }
+ tidy { '/etc/openvpn/limited_udp_config.conf': }
+ }
+
+ file {
+ '/usr/local/bin/add_gateway_ips.sh':
+ content => template('site_openvpn/add_gateway_ips.sh.erb'),
+ mode => '0755';
+ }
+
+ exec { '/usr/local/bin/add_gateway_ips.sh':
+ subscribe => File['/usr/local/bin/add_gateway_ips.sh'],
+ }
+
+ exec { 'restart_openvpn':
+ command => '/etc/init.d/openvpn restart',
+ refreshonly => true,
+ subscribe => [
+ File['/etc/openvpn'],
+ Class['Site_config::X509::Key'],
+ Class['Site_config::X509::Cert'],
+ Class['Site_config::X509::Ca_bundle'] ],
+ require => [
+ Package['openvpn'],
+ File['/etc/openvpn'],
+ Class['Site_config::X509::Key'],
+ Class['Site_config::X509::Cert'],
+ Class['Site_config::X509::Ca_bundle'] ];
+ }
+
+ cron { 'add_gateway_ips.sh':
+ command => '/usr/local/bin/add_gateway_ips.sh',
+ user => 'root',
+ special => 'reboot',
+ }
+
+ # setup the resolver to listen on the vpn IP
+ include site_openvpn::resolver
+
+ include site_shorewall::eip
+
+ package {
+ 'openvpn': ensure => latest
+ }
+
+ service {
+ 'openvpn':
+ ensure => running,
+ hasrestart => true,
+ hasstatus => true,
+ require => [
+ Package['openvpn'],
+ Exec['concat_/etc/default/openvpn'] ];
+ }
+
+ file {
+ '/etc/openvpn':
+ ensure => directory,
+ notify => Exec['restart_openvpn'],
+ require => Package['openvpn'];
+ }
+
+ file {
+ '/etc/openvpn/keys':
+ ensure => directory,
+ require => Package['openvpn'];
+ }
+
+ concat {
+ '/etc/default/openvpn':
+ owner => root,
+ group => root,
+ mode => 644,
+ warn => true,
+ notify => Service['openvpn'];
+ }
+
+ concat::fragment {
+ 'openvpn.default.header':
+ content => template('openvpn/etc-default-openvpn.erb'),
+ target => '/etc/default/openvpn',
+ order => 01;
+ }
+
+ concat::fragment {
+ "openvpn.default.autostart.${name}":
+ content => 'AUTOSTART=all',
+ target => '/etc/default/openvpn',
+ order => 10;
+ }
+
+ leap::logfile { 'openvpn_tcp': }
+ leap::logfile { 'openvpn_udp': }
+
+ # Because we currently do not support ipv6 and instead block it (so no leaks
+ # happen), we get a large number of these messages, so we ignore them (#6540)
+ rsyslog::snippet { '01-ignore_icmpv6_send':
+ content => ':msg, contains, "icmpv6_send: no reply to icmp error" ~'
+ }
+
+ include site_check_mk::agent::openvpn
+
+}
diff --git a/puppet/modules/site_openvpn/manifests/resolver.pp b/puppet/modules/site_openvpn/manifests/resolver.pp
new file mode 100644
index 00000000..cea0153a
--- /dev/null
+++ b/puppet/modules/site_openvpn/manifests/resolver.pp
@@ -0,0 +1,50 @@
+class site_openvpn::resolver {
+
+ if $site_openvpn::openvpn_allow_unlimited {
+ $ensure_unlimited = 'present'
+ file {
+ '/etc/unbound/unbound.conf.d/vpn_unlimited_udp_resolver.conf':
+ content => "server:\n\tinterface: ${site_openvpn::openvpn_unlimited_udp_network_prefix}.1\n\taccess-control: ${site_openvpn::openvpn_unlimited_udp_network_prefix}.0/${site_openvpn::openvpn_unlimited_udp_cidr} allow\n",
+ owner => root,
+ group => root,
+ mode => '0644',
+ require => [ Class['site_config::caching_resolver'], Service['openvpn'] ],
+ notify => Service['unbound'];
+ '/etc/unbound/unbound.conf.d/vpn_unlimited_tcp_resolver.conf':
+ content => "server:\n\tinterface: ${site_openvpn::openvpn_unlimited_tcp_network_prefix}.1\n\taccess-control: ${site_openvpn::openvpn_unlimited_tcp_network_prefix}.0/${site_openvpn::openvpn_unlimited_tcp_cidr} allow\n",
+ owner => root,
+ group => root,
+ mode => '0644',
+ require => [ Class['site_config::caching_resolver'], Service['openvpn'] ],
+ notify => Service['unbound'];
+ }
+ } else {
+ $ensure_unlimited = 'absent'
+ tidy { '/etc/unbound/unbound.conf.d/vpn_unlimited_udp_resolver.conf': }
+ tidy { '/etc/unbound/unbound.conf.d/vpn_unlimited_tcp_resolver.conf': }
+ }
+
+ if $site_openvpn::openvpn_allow_limited {
+ $ensure_limited = 'present'
+ file {
+ '/etc/unbound/unbound.conf.d/vpn_limited_udp_resolver.conf':
+ content => "server:\n\tinterface: ${site_openvpn::openvpn_limited_udp_network_prefix}.1\n\taccess-control: ${site_openvpn::openvpn_limited_udp_network_prefix}.0/${site_openvpn::openvpn_limited_udp_cidr} allow\n",
+ owner => root,
+ group => root,
+ mode => '0644',
+ require => [ Class['site_config::caching_resolver'], Service['openvpn'] ],
+ notify => Service['unbound'];
+ '/etc/unbound/unbound.conf.d/vpn_limited_tcp_resolver.conf':
+ content => "server\n\tinterface: ${site_openvpn::openvpn_limited_tcp_network_prefix}.1\n\taccess-control: ${site_openvpn::openvpn_limited_tcp_network_prefix}.0/${site_openvpn::openvpn_limited_tcp_cidr} allow\n",
+ owner => root,
+ group => root,
+ mode => '0644',
+ require => [ Class['site_config::caching_resolver'], Service['openvpn'] ],
+ notify => Service['unbound'];
+ }
+ } else {
+ $ensure_limited = 'absent'
+ tidy { '/etc/unbound/unbound.conf.d/vpn_limited_udp_resolver.conf': }
+ tidy { '/etc/unbound/unbound.conf.d/vpn_limited_tcp_resolver.conf': }
+ }
+}
diff --git a/puppet/modules/site_openvpn/manifests/server_config.pp b/puppet/modules/site_openvpn/manifests/server_config.pp
new file mode 100644
index 00000000..15e6fb38
--- /dev/null
+++ b/puppet/modules/site_openvpn/manifests/server_config.pp
@@ -0,0 +1,228 @@
+#
+# Cipher discussion
+# ================================
+#
+# We want to specify explicit values for the crypto options to prevent a MiTM from forcing
+# a weaker cipher. These should be set in both the server and the client ('auth' and 'cipher'
+# MUST be the same on both ends or no data will get transmitted).
+#
+# tls-cipher DHE-RSA-AES128-SHA
+#
+# dkg: For the TLS control channel, we want to make sure we choose a
+# key exchange mechanism that has PFS (meaning probably some form of ephemeral
+# Diffie-Hellman key exchange), and that uses a standard, well-tested cipher
+# (I recommend AES, and 128 bits is probably fine, since there are some known
+# weaknesses in the 192- and 256-bit key schedules). That leaves us with the
+# choice of public key algorithms: /usr/sbin/openvpn --show-tls | grep DHE |
+# grep AES128 | grep GCM.
+#
+# elijah:
+# I could not get any of these working:
+# * openvpn --show-tls | grep GCM
+# * openvpn --show-tls | grep DHE | grep AES128 | grep SHA256
+# so, i went with this:
+# * openvpn --show-tls | grep DHE | grep AES128 | grep -v SHA256 | grep -v GCM
+# Also, i couldn't get any of the elliptical curve algorithms to work. Not sure how
+# our cert generation interacts with the tls-cipher algorithms.
+#
+# note: in my tests, DHE-RSA-AES256-SHA is the one it negotiates if no value is set.
+#
+# auth SHA1
+#
+# dkg: For HMAC digest to authenticate packets, we just want SHA256. OpenVPN lists
+# a number of "digest" with names like "RSA-SHA256", but this are legacy and
+# should be avoided.
+#
+# elijah: i am not so sure that the digest algo matters for 'auth' option, because
+# i think an attacker would have to forge the digest in real time, which is still far from
+# a possibility for SHA1. So, i am leaving the default for now (SHA1).
+#
+# cipher AES-128-CBC
+#
+# dkg: For the choice of cipher, we need to select an algorithm and a
+# cipher mode. OpenVPN defaults to Blowfish, which is a fine algorithm - but
+# our control channel is already relying on AES not being broken; if the
+# control channel is cracked, then the key material for the tunnel is exposed,
+# and the choice of algorithm is moot. So it makes more sense to me to rely on
+# the same cipher here: AES128. As for the cipher mode, OFB seems cleaner to
+# me, but CBC is more well-tested, and the OpenVPN man page (at least as of
+# version 2.2.1) says "CBC is recommended and CFB and OFB should be considered
+# advanced modes."
+#
+# note: the default is BF-CBC (blowfish)
+#
+
+define site_openvpn::server_config(
+ $port, $proto, $local, $server, $push,
+ $management, $config, $tls_remote = undef) {
+
+ $openvpn_configname = $name
+ $shortname = regsubst(regsubst($name, '_config', ''), '_', '-')
+ $openvpn_status_filename = "/var/run/openvpn-status-${shortname}"
+
+ concat {
+ "/etc/openvpn/${openvpn_configname}.conf":
+ owner => root,
+ group => root,
+ mode => 644,
+ warn => true,
+ require => File['/etc/openvpn'],
+ before => Service['openvpn'],
+ notify => Exec['restart_openvpn'];
+ }
+
+ if $tls_remote != undef {
+ openvpn::option {
+ "tls-remote ${openvpn_configname}":
+ key => 'tls-remote',
+ value => $tls_remote,
+ server => $openvpn_configname;
+ }
+ }
+
+ # according to openvpn man page: tcp-nodelay is a "generally a good latency optimization".
+ if $proto == 'tcp' {
+ openvpn::option {
+ "tcp-nodelay ${openvpn_configname}":
+ key => 'tcp-nodelay',
+ server => $openvpn_configname;
+ }
+ } elsif $proto == 'udp' {
+ if $config['fragment'] != 1500 {
+ openvpn::option {
+ "fragment ${openvpn_configname}":
+ key => 'fragment',
+ value => $config['fragment'],
+ server => $openvpn_configname;
+ "mssfix ${openvpn_configname}":
+ key => 'mssfix',
+ server => $openvpn_configname;
+ }
+ }
+ }
+
+ openvpn::option {
+ "ca ${openvpn_configname}":
+ key => 'ca',
+ value => "${x509::variables::local_CAs}/${site_config::params::ca_bundle_name}.crt",
+ server => $openvpn_configname;
+ "cert ${openvpn_configname}":
+ key => 'cert',
+ value => "${x509::variables::certs}/${site_config::params::cert_name}.crt",
+ server => $openvpn_configname;
+ "key ${openvpn_configname}":
+ key => 'key',
+ value => "${x509::variables::keys}/${site_config::params::cert_name}.key",
+ server => $openvpn_configname;
+ "dh ${openvpn_configname}":
+ key => 'dh',
+ value => '/etc/openvpn/keys/dh.pem',
+ server => $openvpn_configname;
+ "tls-cipher ${openvpn_configname}":
+ key => 'tls-cipher',
+ value => $config['tls-cipher'],
+ server => $openvpn_configname;
+ "auth ${openvpn_configname}":
+ key => 'auth',
+ value => $config['auth'],
+ server => $openvpn_configname;
+ "cipher ${openvpn_configname}":
+ key => 'cipher',
+ value => $config['cipher'],
+ server => $openvpn_configname;
+ "dev ${openvpn_configname}":
+ key => 'dev',
+ value => 'tun',
+ server => $openvpn_configname;
+ "tun-ipv6 ${openvpn_configname}":
+ key => 'tun-ipv6',
+ server => $openvpn_configname;
+ "duplicate-cn ${openvpn_configname}":
+ key => 'duplicate-cn',
+ server => $openvpn_configname;
+ "keepalive ${openvpn_configname}":
+ key => 'keepalive',
+ value => $config['keepalive'],
+ server => $openvpn_configname;
+ "local ${openvpn_configname}":
+ key => 'local',
+ value => $local,
+ server => $openvpn_configname;
+ "mute ${openvpn_configname}":
+ key => 'mute',
+ value => '5',
+ server => $openvpn_configname;
+ "mute-replay-warnings ${openvpn_configname}":
+ key => 'mute-replay-warnings',
+ server => $openvpn_configname;
+ "management ${openvpn_configname}":
+ key => 'management',
+ value => $management,
+ server => $openvpn_configname;
+ "proto ${openvpn_configname}":
+ key => 'proto',
+ value => $proto,
+ server => $openvpn_configname;
+ "push1 ${openvpn_configname}":
+ key => 'push',
+ value => $push,
+ server => $openvpn_configname;
+ "push2 ${openvpn_configname}":
+ key => 'push',
+ value => '"redirect-gateway def1"',
+ server => $openvpn_configname;
+ "push-ipv6 ${openvpn_configname}":
+ key => 'push',
+ value => '"route-ipv6 2000::/3"',
+ server => $openvpn_configname;
+ "script-security ${openvpn_configname}":
+ key => 'script-security',
+ value => '1',
+ server => $openvpn_configname;
+ "server ${openvpn_configname}":
+ key => 'server',
+ value => $server,
+ server => $openvpn_configname;
+ "server-ipv6 ${openvpn_configname}":
+ key => 'server-ipv6',
+ value => '2001:db8:123::/64',
+ server => $openvpn_configname;
+ "status ${openvpn_configname}":
+ key => 'status',
+ value => "${openvpn_status_filename} 10",
+ server => $openvpn_configname;
+ "status-version ${openvpn_configname}":
+ key => 'status-version',
+ value => '3',
+ server => $openvpn_configname;
+ "topology ${openvpn_configname}":
+ key => 'topology',
+ value => 'subnet',
+ server => $openvpn_configname;
+ "verb ${openvpn_configname}":
+ key => 'verb',
+ value => '3',
+ server => $openvpn_configname;
+ "log-append /var/log/leap/openvpn_${proto}.log":
+ key => 'log-append',
+ value => "/var/log/leap/openvpn_${proto}.log",
+ server => $openvpn_configname;
+ }
+
+ # register openvpn services at systemd on nodes newer than wheezy
+ # see https://leap.se/code/issues/7798
+ case $::operatingsystemrelease {
+ /^7.*/: { }
+ default: {
+ exec { "enable_systemd_${openvpn_configname}":
+ refreshonly => true,
+ command => "/bin/systemctl enable openvpn@${openvpn_configname}",
+ subscribe => File["/etc/openvpn/${openvpn_configname}.conf"],
+ notify => Service["openvpn@${openvpn_configname}"];
+ }
+ service { "openvpn@${openvpn_configname}":
+ ensure => running
+ }
+ }
+ }
+}
diff --git a/puppet/modules/site_openvpn/templates/add_gateway_ips.sh.erb b/puppet/modules/site_openvpn/templates/add_gateway_ips.sh.erb
new file mode 100644
index 00000000..e76b756b
--- /dev/null
+++ b/puppet/modules/site_openvpn/templates/add_gateway_ips.sh.erb
@@ -0,0 +1,11 @@
+#!/bin/sh
+
+ip addr show dev <%= scope.lookupvar('site_config::params::interface') %> | grep -q <%= @openvpn_gateway_address %>/<%= @primary_netmask %> ||
+ ip addr add <%= @openvpn_gateway_address %>/<%= @primary_netmask %> dev <%= scope.lookupvar('site_config::params::interface') %>
+
+<% if @openvpn_second_gateway_address %>
+ip addr show dev <%= scope.lookupvar('site_config::params::interface') %> | grep -q <%= @openvpn_second_gateway_address %>/<%= @primary_netmask %> ||
+ ip addr add <%= @openvpn_second_gateway_address %>/<%= @primary_netmask %> dev <%= scope.lookupvar('site_config::params::interface') %>
+<% end %>
+
+/bin/echo 1 > /proc/sys/net/ipv4/ip_forward
diff --git a/puppet/modules/site_postfix/files/checks/received_anon b/puppet/modules/site_postfix/files/checks/received_anon
new file mode 100644
index 00000000..9de25e63
--- /dev/null
+++ b/puppet/modules/site_postfix/files/checks/received_anon
@@ -0,0 +1,2 @@
+/^Received: from (.* \([-._[:alnum:]]+ \[[.[:digit:]]{7,15}\]\))([[:space:]]+).*(\(using [.[:alnum:]]+ with cipher [-A-Z0-9]+ \([0-9]+\/[0-9]+ bits\)\))[[:space:]]+\(Client CN "([-._@[:alnum:]]+)", Issuer "[[:print:]]+" \(verified OK\)\)[[:space:]]+by ([.[:alnum:]]+) \(([^)]+)\) with (E?SMTPS?A?) id ([A-F[:digit:]]+).*/
+ REPLACE Received: from [127.0.0.1] (localhost [127.0.0.1])${2}${3}${2}(Authenticated sender: $4)${2}with $7 id $8
diff --git a/puppet/modules/site_postfix/manifests/debug.pp b/puppet/modules/site_postfix/manifests/debug.pp
new file mode 100644
index 00000000..f370d166
--- /dev/null
+++ b/puppet/modules/site_postfix/manifests/debug.pp
@@ -0,0 +1,9 @@
+class site_postfix::debug {
+
+ postfix::config {
+ 'debug_peer_list': value => '127.0.0.1';
+ 'debug_peer_level': value => '1';
+ 'smtpd_tls_loglevel': value => '1';
+ }
+
+}
diff --git a/puppet/modules/site_postfix/manifests/mx.pp b/puppet/modules/site_postfix/manifests/mx.pp
new file mode 100644
index 00000000..c269946b
--- /dev/null
+++ b/puppet/modules/site_postfix/manifests/mx.pp
@@ -0,0 +1,152 @@
+#
+# configure mx node
+#
+class site_postfix::mx {
+
+ $domain_hash = hiera('domain')
+ $domain = $domain_hash['full_suffix']
+ $host_domain = $domain_hash['full']
+ $cert_name = hiera('name')
+ $mynetworks = join(hiera('mynetworks', ''), ' ')
+ $rbls = suffix(prefix(hiera('rbls', []), 'reject_rbl_client '), ',')
+
+ $root_mail_recipient = hiera('contacts')
+ $postfix_smtp_listen = 'all'
+ $postfix_use_postscreen = 'yes'
+
+ include site_config::x509::cert
+ include site_config::x509::key
+ include site_config::x509::client_ca::ca
+ include site_config::x509::client_ca::key
+
+ postfix::config {
+ 'mynetworks':
+ value => "127.0.0.0/8 [::1]/128 [fe80::]/64 ${mynetworks}";
+ # Note: mydestination should not include @domain, because this is
+ # used in virtual alias maps.
+ 'mydestination':
+ value => "\$myorigin, localhost, localhost.\$mydomain";
+ 'myhostname':
+ value => $host_domain;
+ 'mailbox_size_limit':
+ value => '0';
+ 'home_mailbox':
+ value => '';
+ 'virtual_mailbox_domains':
+ value => 'deliver.local';
+ 'virtual_mailbox_base':
+ value => '/var/mail/leap-mx';
+ 'virtual_mailbox_maps':
+ value => 'static:Maildir/';
+ # Note: virtual-aliases map will take precedence over leap-mx
+ # lookup (tcp:localhost)
+ 'virtual_alias_maps':
+ value => 'hash:/etc/postfix/virtual-aliases tcp:localhost:4242';
+ 'luser_relay':
+ value => '';
+ # uid and gid are set to an arbitrary hard-coded value here, this
+ # must match the 'leap-mx' user/group
+ 'virtual_uid_maps':
+ value => 'static:42424';
+ 'virtual_gid_maps':
+ value => 'static:42424';
+ # the two following configs are needed for matching user's client cert
+ # fingerprints to enable relaying (#3634). Satellites do not have
+ # these configured.
+ 'smtpd_tls_fingerprint_digest':
+ value => 'sha1';
+ 'relay_clientcerts':
+ value => 'tcp:localhost:2424';
+ # Note: we are setting this here, instead of in site_postfix::mx::smtp_tls
+ # because the satellites need to have a different value
+ 'smtp_tls_security_level':
+ value => 'may';
+ # reject inbound mail to system users
+ # see https://leap.se/code/issues/6829
+ # this blocks *only* mails to system users, that don't appear in the
+ # alias map
+ 'local_recipient_maps':
+ value => '$alias_maps';
+ # setup clamav and opendkim on smtpd
+ 'smtpd_milters':
+ value => 'unix:/run/clamav/milter.ctl,inet:localhost:8891';
+ # setup opendkim for smtp (non-smtpd) outgoing mail
+ 'non_smtpd_milters':
+ value => 'inet:localhost:8891';
+ 'milter_default_action':
+ value => 'accept';
+ # Make sure that the right values are set, these could be set to different
+ # things on install, depending on preseed or debconf options
+ # selected (see #7478)
+ 'relay_transport':
+ value => 'relay';
+ 'default_transport':
+ value => 'smtp';
+ 'mailbox_command':
+ value => '';
+ 'header_checks':
+ value => '';
+ 'postscreen_access_list':
+ value => 'permit_mynetworks';
+ 'postscreen_greet_action':
+ value => 'enforce';
+ }
+
+ # Make sure that the cleanup serivce is not chrooted, otherwise it cannot
+ # access the opendkim milter socket (#8020)
+ exec { 'unset_cleanup_chroot':
+ command => '/usr/sbin/postconf -F "cleanup/unix/chroot=n"',
+ onlyif => '/usr/sbin/postconf -h -F "cleanup/unix/chroot" | egrep -q ^n',
+ notify => Service['postfix'],
+ require => File['/etc/postfix/master.cf']
+ }
+
+ include ::site_postfix::mx::smtpd_checks
+ include ::site_postfix::mx::checks
+ include ::site_postfix::mx::smtp_tls
+ include ::site_postfix::mx::smtpd_tls
+ include ::site_postfix::mx::static_aliases
+ include ::site_postfix::mx::rewrite_openpgp_header
+ include ::site_postfix::mx::received_anon
+ include ::clamav
+ include ::opendkim
+ include ::postfwd
+
+ # greater verbosity for debugging, take out for production
+ #include site_postfix::debug
+
+ case $::operatingsystemrelease {
+ /^7.*/: {
+ $smtpd_relay_restrictions=''
+ }
+ default: {
+ $smtpd_relay_restrictions=" -o smtpd_relay_restrictions=\$smtps_relay_restrictions\n"
+ }
+ }
+
+ $mastercf_tail = "
+smtps inet n - - - - smtpd
+ -o smtpd_tls_wrappermode=yes
+ -o smtpd_tls_security_level=encrypt
+ -o tls_preempt_cipherlist=yes
+${smtpd_relay_restrictions} -o smtpd_recipient_restrictions=\$smtps_recipient_restrictions
+ -o smtpd_helo_restrictions=\$smtps_helo_restrictions
+ -o smtpd_client_restrictions=
+ -o cleanup_service_name=clean_smtps
+clean_smtps unix n - n - 0 cleanup
+ -o header_checks=pcre:/etc/postfix/checks/rewrite_openpgp_headers,pcre:/etc/postfix/checks/received_anon"
+
+ class { 'postfix':
+ preseed => true,
+ root_mail_recipient => $root_mail_recipient,
+ smtp_listen => 'all',
+ mastercf_tail => $mastercf_tail,
+ use_postscreen => 'yes',
+ require => [
+ Class['Site_config::X509::Key'],
+ Class['Site_config::X509::Cert'],
+ Class['Site_config::X509::Client_ca::Key'],
+ Class['Site_config::X509::Client_ca::Ca'],
+ User['leap-mx'] ]
+ }
+}
diff --git a/puppet/modules/site_postfix/manifests/mx/checks.pp b/puppet/modules/site_postfix/manifests/mx/checks.pp
new file mode 100644
index 00000000..f406ad34
--- /dev/null
+++ b/puppet/modules/site_postfix/manifests/mx/checks.pp
@@ -0,0 +1,23 @@
+class site_postfix::mx::checks {
+
+ file {
+ '/etc/postfix/checks':
+ ensure => directory,
+ mode => '0755',
+ owner => root,
+ group => postfix,
+ require => Package['postfix'];
+
+ '/etc/postfix/checks/helo_checks':
+ content => template('site_postfix/checks/helo_access.erb'),
+ mode => '0644',
+ owner => root,
+ group => root;
+ }
+
+ exec {
+ '/usr/sbin/postmap /etc/postfix/checks/helo_checks':
+ refreshonly => true,
+ subscribe => File['/etc/postfix/checks/helo_checks'];
+ }
+}
diff --git a/puppet/modules/site_postfix/manifests/mx/received_anon.pp b/puppet/modules/site_postfix/manifests/mx/received_anon.pp
new file mode 100644
index 00000000..51ba3faa
--- /dev/null
+++ b/puppet/modules/site_postfix/manifests/mx/received_anon.pp
@@ -0,0 +1,13 @@
+# Anonymize the user's home IP from the email headers (Feature #3866)
+class site_postfix::mx::received_anon {
+
+ package { 'postfix-pcre': ensure => installed, require => Package['postfix'] }
+
+ file { '/etc/postfix/checks/received_anon':
+ source => 'puppet:///modules/site_postfix/checks/received_anon',
+ mode => '0644',
+ owner => root,
+ group => root,
+ notify => Service['postfix']
+ }
+}
diff --git a/puppet/modules/site_postfix/manifests/mx/rewrite_openpgp_header.pp b/puppet/modules/site_postfix/manifests/mx/rewrite_openpgp_header.pp
new file mode 100644
index 00000000..71f945b8
--- /dev/null
+++ b/puppet/modules/site_postfix/manifests/mx/rewrite_openpgp_header.pp
@@ -0,0 +1,11 @@
+class site_postfix::mx::rewrite_openpgp_header {
+ $mx = hiera('mx')
+ $correct_domain = $mx['key_lookup_domain']
+
+ file { '/etc/postfix/checks/rewrite_openpgp_headers':
+ content => template('site_postfix/checks/rewrite_openpgp_headers.erb'),
+ mode => '0644',
+ owner => root,
+ group => root;
+ }
+}
diff --git a/puppet/modules/site_postfix/manifests/mx/smtp_auth.pp b/puppet/modules/site_postfix/manifests/mx/smtp_auth.pp
new file mode 100644
index 00000000..afa70527
--- /dev/null
+++ b/puppet/modules/site_postfix/manifests/mx/smtp_auth.pp
@@ -0,0 +1,6 @@
+class site_postfix::mx::smtp_auth {
+
+ postfix::config {
+ 'smtpd_tls_ask_ccert': value => 'yes';
+ }
+}
diff --git a/puppet/modules/site_postfix/manifests/mx/smtp_tls.pp b/puppet/modules/site_postfix/manifests/mx/smtp_tls.pp
new file mode 100644
index 00000000..c93c3ba2
--- /dev/null
+++ b/puppet/modules/site_postfix/manifests/mx/smtp_tls.pp
@@ -0,0 +1,43 @@
+# configure smtp tls
+class site_postfix::mx::smtp_tls {
+
+ include site_config::x509::ca
+ include x509::variables
+ $cert_name = hiera('name')
+ $ca_path = "${x509::variables::local_CAs}/${site_config::params::ca_name}.crt"
+ $cert_path = "${x509::variables::certs}/${site_config::params::cert_name}.crt"
+ $key_path = "${x509::variables::keys}/${site_config::params::cert_name}.key"
+
+ include site_config::x509::cert
+ include site_config::x509::key
+
+ # smtp TLS
+ postfix::config {
+ 'smtp_use_tls': value => 'yes';
+ 'smtp_tls_CApath': value => '/etc/ssl/certs/';
+ 'smtp_tls_CAfile': value => $ca_path;
+ 'smtp_tls_cert_file': value => $cert_path;
+ 'smtp_tls_key_file': value => $key_path;
+ 'smtp_tls_loglevel': value => '1';
+ 'smtp_tls_exclude_ciphers':
+ value => 'aNULL, MD5, DES';
+ # upstream default is md5 (since 2.5 and older used it), we force sha1
+ 'smtp_tls_fingerprint_digest':
+ value => 'sha1';
+ 'smtp_tls_session_cache_database':
+ value => "btree:\${data_directory}/smtp_cache";
+ # see issue #4011
+ 'smtp_tls_protocols':
+ value => '!SSLv2, !SSLv3';
+ 'smtp_tls_mandatory_protocols':
+ value => '!SSLv2, !SSLv3';
+ 'tls_ssl_options':
+ value => 'NO_COMPRESSION';
+ # We can switch between the different postfix internal list of ciphers by
+ # using smtpd_tls_ciphers. For server-to-server connections we leave this
+ # at its default because of opportunistic encryption combined with many mail
+ # servers only support outdated protocols and ciphers and if we are too
+ # strict with required ciphers, then connections *will* fall-back to
+ # plain-text. Bad ciphers are still better than plain text transmission.
+ }
+}
diff --git a/puppet/modules/site_postfix/manifests/mx/smtpd_checks.pp b/puppet/modules/site_postfix/manifests/mx/smtpd_checks.pp
new file mode 100644
index 00000000..291d7ee4
--- /dev/null
+++ b/puppet/modules/site_postfix/manifests/mx/smtpd_checks.pp
@@ -0,0 +1,36 @@
+# smtpd checks for incoming mail on smtp port 25 and
+# mail sent via the bitmask client using smtps port 465
+class site_postfix::mx::smtpd_checks {
+
+ postfix::config {
+ 'smtpd_helo_required':
+ value => 'yes';
+ 'checks_dir':
+ value => '$config_directory/checks';
+ 'smtpd_client_restrictions':
+ value => "permit_mynetworks,${site_postfix::mx::rbls},permit";
+ 'smtpd_data_restrictions':
+ value => 'permit_mynetworks, reject_unauth_pipelining, permit';
+ 'smtpd_delay_reject':
+ value => 'yes';
+ 'smtpd_helo_restrictions':
+ value => 'permit_mynetworks, reject_invalid_helo_hostname, reject_non_fqdn_helo_hostname, check_helo_access hash:$checks_dir/helo_checks, permit';
+ 'smtpd_recipient_restrictions':
+ value => 'reject_unknown_recipient_domain, permit_mynetworks, check_recipient_access tcp:localhost:2244, reject_unauth_destination, permit';
+
+ # permit_tls_clientcerts will lookup client cert fingerprints from the tcp
+ # lookup on port 2424 (based on what is configured in relay_clientcerts
+ # paramter, see site_postfix::mx postfix::config resource) to determine
+ # if a client is allowed to relay mail through us. This enables us to
+ # disable a user by removing their valid client cert (#3634)
+ 'smtps_recipient_restrictions':
+ value => 'permit_tls_clientcerts, check_recipient_access tcp:localhost:2244, reject_unauth_destination, permit';
+ 'smtps_relay_restrictions':
+ value => 'permit_mynetworks, permit_tls_clientcerts, defer_unauth_destination';
+ 'smtps_helo_restrictions':
+ value => 'permit_mynetworks, check_helo_access hash:$checks_dir/helo_checks, permit';
+ 'smtpd_sender_restrictions':
+ value => 'permit_mynetworks, reject_non_fqdn_sender, reject_unknown_sender_domain, permit';
+ }
+
+}
diff --git a/puppet/modules/site_postfix/manifests/mx/smtpd_tls.pp b/puppet/modules/site_postfix/manifests/mx/smtpd_tls.pp
new file mode 100644
index 00000000..66297f55
--- /dev/null
+++ b/puppet/modules/site_postfix/manifests/mx/smtpd_tls.pp
@@ -0,0 +1,69 @@
+# configure smtpd tls
+class site_postfix::mx::smtpd_tls {
+
+ include x509::variables
+ $ca_path = "${x509::variables::local_CAs}/${site_config::params::client_ca_name}.crt"
+ $cert_path = "${x509::variables::certs}/${site_config::params::cert_name}.crt"
+ $key_path = "${x509::variables::keys}/${site_config::params::cert_name}.key"
+
+
+ postfix::config {
+ 'smtpd_use_tls': value => 'yes';
+ 'smtpd_tls_CAfile': value => $ca_path;
+ 'smtpd_tls_cert_file': value => $cert_path;
+ 'smtpd_tls_key_file': value => $key_path;
+ 'smtpd_tls_ask_ccert': value => 'yes';
+ 'smtpd_tls_received_header':
+ value => 'yes';
+ 'smtpd_tls_security_level':
+ value => 'may';
+ 'smtpd_tls_eecdh_grade':
+ value => 'ultra';
+ 'smtpd_tls_session_cache_database':
+ value => "btree:\${data_directory}/smtpd_scache";
+ # see issue #4011
+ 'smtpd_tls_mandatory_protocols':
+ value => '!SSLv2, !SSLv3';
+ 'smtpd_tls_protocols':
+ value => '!SSLv2, !SSLv3';
+ # For connections to MUAs, TLS is mandatory and the ciphersuite is modified.
+ # MX and SMTP client configuration
+ 'smtpd_tls_mandatory_ciphers':
+ value => 'high';
+ 'tls_high_cipherlist':
+ value => 'ECDHE-RSA-AES128-GCM-SHA256:ECDHE-ECDSA-AES128-GCM-SHA256:ECDHE-RSA-AES256-GCM-SHA384:ECDHE-ECDSA-AES256-GCM-SHA384:DHE-RSA-AES128-GCM-SHA256:DHE-DSS-AES128-GCM-SHA256:kEDH+AESGCM:ECDHE-RSA-AES128-SHA256:ECDHE-ECDSA-AES128-SHA256:ECDHE-RSA-AES128-SHA:ECDHE-ECDSA-AES128-SHA:ECDHE-RSA-AES256-SHA384:ECDHE-ECDSA-AES256-SHA384:ECDHE-RSA-AES256-SHA:ECDHE-ECDSA-AES256-SHA:DHE-RSA-AES128-SHA256:DHE-RSA-AES128-SHA:DHE-DSS-AES128-SHA256:DHE-RSA-AES256-SHA256:DHE-DSS-AES256-SHA:DHE-RSA-AES256-SHA:AES128-GCM-SHA256:AES256-GCM-SHA384:AES128-SHA256:AES256-SHA256:AES128-SHA:AES256-SHA:AES:CAMELLIA:DES-CBC3-SHA:!aNULL:!eNULL:!EXPORT:!DES:!3DES:!RC4:!MD5:!PSK!aECDH:!EDH-DSS-DES-CBC3-SHA:!EDH-RSA-DES-CBC3-SHA:!KRB5-DES-CBC3-SHA';
+ }
+
+ # Setup DH parameters
+ # Instead of using the dh parameters that are created by leap cli, it is more
+ # secure to generate new parameter files that will only be used for postfix,
+ # for each machine
+
+ include site_config::packages::gnutls
+
+ # Note, the file name is called dh_1024.pem, but we are generating 2048bit dh
+ # parameters Neither Postfix nor OpenSSL actually care about the size of the
+ # prime in "smtpd_tls_dh1024_param_file". You can make it 2048 bits
+
+ exec { 'certtool-postfix-gendh':
+ command => 'certtool --generate-dh-params --bits 2048 --outfile /etc/postfix/smtpd_tls_dh_param.pem',
+ user => root,
+ group => root,
+ creates => '/etc/postfix/smtpd_tls_dh_param.pem',
+ require => [ Package['gnutls-bin'], Package['postfix'] ]
+ }
+
+ # Make sure the dh params file has correct ownership and mode
+ file {
+ '/etc/postfix/smtpd_tls_dh_param.pem':
+ owner => root,
+ group => root,
+ mode => '0600',
+ require => Exec['certtool-postfix-gendh'];
+ }
+
+ postfix::config { 'smtpd_tls_dh1024_param_file':
+ value => '/etc/postfix/smtpd_tls_dh_param.pem',
+ require => File['/etc/postfix/smtpd_tls_dh_param.pem']
+ }
+}
diff --git a/puppet/modules/site_postfix/manifests/mx/static_aliases.pp b/puppet/modules/site_postfix/manifests/mx/static_aliases.pp
new file mode 100644
index 00000000..9cd7ca02
--- /dev/null
+++ b/puppet/modules/site_postfix/manifests/mx/static_aliases.pp
@@ -0,0 +1,88 @@
+#
+# Defines static, hard coded aliases that are not in the database.
+# These aliases take precedence over the database aliases.
+#
+# There are three classes of reserved names:
+#
+# (1) forbidden_usernames:
+# Some usernames are forbidden and cannot be registered.
+# this is defined in node property webapp.forbidden_usernames
+# This is enforced by the webapp.
+#
+# (2) public aliases:
+# Some aliases for root, and are publicly exposed so that anyone
+# can deliver mail to them. For example, postmaster.
+# These are implemented in the virtual alias map, which takes
+# precedence over the local alias map.
+#
+# (3) local aliases:
+# Some aliases are only available locally: mail can be delivered
+# to the alias if the mail originates from the local host, or is
+# hostname qualified, but otherwise it will be rejected.
+# These are implemented in the local alias map.
+#
+# The alias for local 'root' is defined elsewhere. In this file, we
+# define the virtual 'root@domain' (which can be overwritten by
+# defining an entry for root in node property mx.aliases).
+#
+
+class site_postfix::mx::static_aliases {
+
+ $mx = hiera('mx')
+ $root_recipients = hiera('contacts')
+
+ #
+ # LOCAL ALIASES
+ #
+
+ # NOTE: if you remove one of these, they will still appear in the
+ # /etc/aliases file
+ $local_aliases = [
+ 'admin', 'administrator', 'bin', 'cron', 'games', 'ftp', 'lp', 'maildrop',
+ 'mysql', 'news', 'nobody', 'noc', 'postgresql', 'ssladmin', 'sys',
+ 'usenet', 'uucp', 'www', 'www-data', 'leap-mx'
+ ]
+
+ postfix::mailalias {
+ $local_aliases:
+ ensure => present,
+ recipient => 'root'
+ }
+
+ #
+ # PUBLIC ALIASES
+ #
+
+ $public_aliases = $mx['aliases']
+
+ $default_public_aliases = {
+ 'root' => $root_recipients,
+ 'abuse' => 'postmaster',
+ 'arin-admin' => 'root',
+ 'certmaster' => 'hostmaster',
+ 'domainadmin' => 'hostmaster',
+ 'hostmaster' => 'root',
+ 'mailer-daemon' => 'postmaster',
+ 'postmaster' => 'root',
+ 'security' => 'root',
+ 'webmaster' => 'hostmaster',
+ }
+
+ $aliases = merge($default_public_aliases, $public_aliases)
+
+ exec { 'postmap_virtual_aliases':
+ command => '/usr/sbin/postmap /etc/postfix/virtual-aliases',
+ refreshonly => true,
+ user => root,
+ group => root,
+ require => Package['postfix'],
+ subscribe => File['/etc/postfix/virtual-aliases']
+ }
+ file { '/etc/postfix/virtual-aliases':
+ content => template('site_postfix/virtual-aliases.erb'),
+ owner => root,
+ group => root,
+ mode => '0600',
+ require => Package['postfix']
+ }
+}
diff --git a/puppet/modules/site_postfix/manifests/satellite.pp b/puppet/modules/site_postfix/manifests/satellite.pp
new file mode 100644
index 00000000..5725e6b8
--- /dev/null
+++ b/puppet/modules/site_postfix/manifests/satellite.pp
@@ -0,0 +1,47 @@
+class site_postfix::satellite {
+
+ $root_mail_recipient = hiera ('contacts')
+ $mail = hiera ('mail')
+ $relayhost = $mail['smarthost']
+ $cert_name = hiera('name')
+
+ class { '::postfix::satellite':
+ relayhost => $relayhost,
+ root_mail_recipient => $root_mail_recipient
+ }
+
+ # There are special conditions for satellite hosts that will make them not be
+ # able to contact their relayhost:
+ #
+ # 1. they are on openstack/amazon/PC and are on the same cluster as the relay
+ # host, the MX lookup for the relay host will use the public IP, which cannot
+ # be contacted
+ #
+ # 2. When a domain is used that is not in DNS, because it is internal,
+ # a testing domain, etc. eg. a .local domain cannot be looked up in DNS
+ #
+ # to resolve this, so the satellite can contact the relayhost, we need to set
+ # the http://www.postfix.org/postconf.5.html#smtp_host_lookup to be 'native'
+ # which will cause the lookup to use the native naming service
+ # (nsswitch.conf), which typically defaults to 'files, dns' allowing the
+ # /etc/hosts to be consulted first, then DNS if the entry doesn't exist.
+ #
+ # NOTE: this will make it not possible to enable DANE support through DNSSEC
+ # with http://www.postfix.org/postconf.5.html#smtp_dns_support_level - but
+ # this parameter is not available until 2.11. If this ends up being important
+ # we could also make this an optional parameter for providers without
+ # dns / local domains
+
+ postfix::config {
+ 'smtp_host_lookup':
+ value => 'native';
+
+ # Note: we are setting this here, instead of in site_postfix::mx::smtp_tls
+ # because the mx server has to have a different value
+ 'smtp_tls_security_level':
+ value => 'encrypt';
+ }
+
+ include site_postfix::mx::smtp_tls
+
+}
diff --git a/puppet/modules/site_postfix/templates/checks/helo_access.erb b/puppet/modules/site_postfix/templates/checks/helo_access.erb
new file mode 100644
index 00000000..bac2c45a
--- /dev/null
+++ b/puppet/modules/site_postfix/templates/checks/helo_access.erb
@@ -0,0 +1,21 @@
+# THIS FILE IS MANAGED BY PUPPET
+# To make changes to this file, please edit your platform directory under
+# puppet/modules/site_postfix/templates/checks/helo_access.erb and then deploy
+
+# The format of this file is the HELO/EHLO domain followed by an action.
+# The action could be OK to allow it, REJECT to reject it, or a custom
+# status code and message. Any lines that are prefixed by an octothorpe (#)
+# will be considered comments.
+
+# Some examples:
+#
+# Reject anyone that HELO's with foobar:
+# foobar REJECT
+#
+# Allow the switches to skip this check:
+# switch1 OK
+# switch2 OK
+
+# Reject anybody that HELO's as being in our own domain(s)
+# anyone who identifies themselves as us is a virus/spammer
+<%= @domain %> 554 You are not in domain <%= @domain %>
diff --git a/puppet/modules/site_postfix/templates/checks/rewrite_openpgp_headers.erb b/puppet/modules/site_postfix/templates/checks/rewrite_openpgp_headers.erb
new file mode 100644
index 00000000..7af14f7d
--- /dev/null
+++ b/puppet/modules/site_postfix/templates/checks/rewrite_openpgp_headers.erb
@@ -0,0 +1,13 @@
+# THIS FILE IS MANAGED BY PUPPET
+#
+# This will replace the OpenPGP header that the client adds, because it is
+# sometimes incorrect (due to the client not always knowing what the proper URL
+# is for the webapp).
+# e.g. This will rewrite this header:
+# OpenPGP: id=4C0E01CD50E2F653; url="https://leap.se/key/elijah"; preference="signencrypt
+# with this replacement:
+# OpenPGP: id=4C0E01CD50E2F653; url="https://user.leap.se/key/elijah"; preference="signencrypt
+#
+# Note: whitespace in the pattern is represented by [[:space:]] to avoid these warnings from postmap:
+# "record is in "key: value" format; is this an alias file?" and "duplicate entry"
+/^(OpenPGP:[[:space:]]id=[[:alnum:]]+;[[:space:]]url="https:\/\/)<%= @domain %>(\/key\/[[:alpha:]]+";.*)/i REPLACE ${1}<%= @correct_domain %>${2}
diff --git a/puppet/modules/site_postfix/templates/virtual-aliases.erb b/puppet/modules/site_postfix/templates/virtual-aliases.erb
new file mode 100644
index 00000000..8373de97
--- /dev/null
+++ b/puppet/modules/site_postfix/templates/virtual-aliases.erb
@@ -0,0 +1,21 @@
+#
+# This file is managed by puppet.
+#
+# These virtual aliases take precedence over all other aliases.
+#
+
+#
+# enable these virtual domains:
+#
+<%= @domain %> enabled
+<%- @aliases.keys.map {|addr| addr.split('@')[1] }.compact.sort.uniq.each do |virt_domain| -%>
+<%= virt_domain %> enabled
+<%- end %>
+
+#
+# virtual aliases:
+#
+<%- @aliases.keys.sort.each do |from| -%>
+<%- full_address = from =~ /@/ ? from : from + "@" + @domain -%>
+<%= full_address %> <%= [@aliases[from]].flatten.map{|a| a =~ /@/ ? a : a + "@" + @domain}.join(', ') %>
+<%- end -%>
diff --git a/puppet/modules/site_rsyslog/templates/client.conf.erb b/puppet/modules/site_rsyslog/templates/client.conf.erb
new file mode 100644
index 00000000..7f94759d
--- /dev/null
+++ b/puppet/modules/site_rsyslog/templates/client.conf.erb
@@ -0,0 +1,134 @@
+
+# An "In-Memory Queue" is created for remote logging.
+$WorkDirectory <%= scope.lookupvar('rsyslog::spool_dir') -%> # where to place spool files
+$ActionQueueFileName queue # unique name prefix for spool files
+$ActionQueueMaxDiskSpace <%= scope.lookupvar('rsyslog::client::spool_size') -%> # spool space limit (use as much as possible)
+$ActionQueueSaveOnShutdown on # save messages to disk on shutdown
+$ActionQueueType LinkedList # run asynchronously
+$ActionResumeRetryCount -1 # infinety retries if host is down
+<% if scope.lookupvar('rsyslog::client::log_templates') and ! scope.lookupvar('rsyslog::client::log_templates').empty?-%>
+
+# Define custom logging templates
+<% scope.lookupvar('rsyslog::client::log_templates').flatten.compact.each do |log_template| -%>
+$template <%= log_template['name'] %>,"<%= log_template['template'] %>"
+<% end -%>
+<% end -%>
+<% if scope.lookupvar('rsyslog::client::actionfiletemplate') -%>
+
+# Using specified format for default logging format:
+$ActionFileDefaultTemplate <%= scope.lookupvar('rsyslog::client::actionfiletemplate') %>
+<% else -%>
+
+#Using default format for default logging format:
+$ActionFileDefaultTemplate RSYSLOG_TraditionalFileFormat
+<% end -%>
+<% if scope.lookupvar('rsyslog::client::ssl') -%>
+
+# Setup SSL connection.
+# CA/Cert
+$DefaultNetStreamDriverCAFile <%= scope.lookupvar('rsyslog::client::ssl_ca') %>
+
+# Connection settings.
+$DefaultNetstreamDriver gtls
+$ActionSendStreamDriverMode 1
+$ActionSendStreamDriverAuthMode anon
+<% end -%>
+<% if scope.lookupvar('rsyslog::client::remote_servers') -%>
+
+<% scope.lookupvar('rsyslog::client::remote_servers').flatten.compact.each do |server| -%>
+<% if server['pattern'] and server['pattern'] != ''-%>
+<% pattern = server['pattern'] -%>
+<% else -%>
+<% pattern = '*.*' -%>
+<% end -%>
+<% if server['protocol'] == 'TCP' or server['protocol'] == 'tcp'-%>
+<% protocol = '@@' -%>
+<% protocol_type = 'TCP' -%>
+<% else -%>
+<% protocol = '@' -%>
+<% protocol_type = 'UDP' -%>
+<% end -%>
+<% if server['host'] and server['host'] != ''-%>
+<% host = server['host'] -%>
+<% else -%>
+<% host = 'localhost' -%>
+<% end -%>
+<% if server['port'] and server['port'] != ''-%>
+<% port = server['port'] -%>
+<% else -%>
+<% port = '514' -%>
+<% end -%>
+<% if server['format'] -%>
+<% format = ";#{server['format']}" -%>
+<% format_type = server['format'] -%>
+<% else -%>
+<% format = '' -%>
+<% format_type = 'the default' -%>
+<% end -%>
+# Sending logs that match <%= pattern %> to <%= host %> via <%= protocol_type %> on <%= port %> using <%=format_type %> format.
+<%= pattern %> <%= protocol %><%= host %>:<%= port %><%= format %>
+<% end -%>
+<% elsif scope.lookupvar('rsyslog::client::log_remote') -%>
+
+# Log to remote syslog server using <%= scope.lookupvar('rsyslog::client::remote_type') %>
+<% if scope.lookupvar('rsyslog::client::remote_type') == 'tcp' -%>
+*.* @@<%= scope.lookupvar('rsyslog::client::server') -%>:<%= scope.lookupvar('rsyslog::client::port') -%>;<%= scope.lookupvar('remote_forward_format') -%>
+<% else -%>
+*.* @<%= scope.lookupvar('rsyslog::client::server') -%>:<%= scope.lookupvar('rsyslog::client::port') -%>;<%= scope.lookupvar('remote_forward_format') -%>
+<% end -%>
+<% end -%>
+<% if scope.lookupvar('rsyslog::client::log_auth_local') or scope.lookupvar('rsyslog::client::log_local') -%>
+
+# Logging locally.
+
+<% if scope.lookupvar('rsyslog::log_style') == 'debian' -%>
+# Log auth messages locally
+.*;auth,authpriv.none;mail.none -/var/log/syslog
+<% elsif scope.lookupvar('rsyslog::log_style') == 'redhat' -%>
+# Log auth messages locally
+auth,authpriv.* /var/log/secure
+<% end -%>
+<% end -%>
+<% if scope.lookupvar('rsyslog::client::log_local') -%>
+<% if scope.lookupvar('rsyslog::log_style') == 'debian' -%>
+# First some standard log files. Log by facility.
+#
+*.*;auth,authpriv.none -/var/log/syslog
+cron.* /var/log/cron.log
+daemon.* -/var/log/daemon.log
+kern.* -/var/log/kern.log
+mail.* -/var/log/mail.log
+user.* -/var/log/user.log
+
+#
+# Some "catch-all" log files.
+#
+*.=debug;\
+ auth,authpriv.none;\
+ news.none;mail.none -/var/log/debug
+*.=info;*.=notice;*.=warn;\
+ auth,authpriv.none;\
+ cron,daemon.none;\
+ mail,news.none -/var/log/messages
+
+# Log anything (except mail) of level info or higher.
+# Don't log private authentication messages!
+*.info;mail.none;authpriv.none;cron.none /var/log/messages
+
+# Log cron stuff
+cron.* /var/log/cron
+
+# Everybody gets emergency messages
+<% if @rsyslog_version and @rsyslog_version.split('.')[0].to_i >= 8 -%>
+*.emerg :omusrmsg:*
+<% else -%>
+*.emerg *
+<% end -%>
+
+# Save boot messages also to boot.log
+local7.* -/var/log/boot.log
+<% end -%>
+<% end -%>
+
+
+
diff --git a/puppet/modules/site_shorewall/files/Debian/shorewall.service b/puppet/modules/site_shorewall/files/Debian/shorewall.service
new file mode 100644
index 00000000..ec250ef1
--- /dev/null
+++ b/puppet/modules/site_shorewall/files/Debian/shorewall.service
@@ -0,0 +1,23 @@
+#
+# The Shoreline Firewall (Shorewall) Packet Filtering Firewall
+#
+# Copyright 2011 Jonathan Underwood <jonathan.underwood@gmail.com>
+# Copyright 2015 Tom Eastep <teastep@shorewall.net>
+#
+[Unit]
+Description=Shorewall IPv4 firewall
+Wants=network-online.target
+After=network-online.target
+Conflicts=iptables.service firewalld.service
+
+[Service]
+Type=oneshot
+RemainAfterExit=yes
+EnvironmentFile=-/etc/default/shorewall
+StandardOutput=syslog
+ExecStart=/sbin/shorewall $OPTIONS start $STARTOPTIONS
+ExecStop=/sbin/shorewall $OPTIONS stop
+ExecReload=/sbin/shorewall $OPTIONS reload $RELOADOPTIONS
+
+[Install]
+WantedBy=basic.target
diff --git a/puppet/modules/site_shorewall/manifests/defaults.pp b/puppet/modules/site_shorewall/manifests/defaults.pp
new file mode 100644
index 00000000..ceb17868
--- /dev/null
+++ b/puppet/modules/site_shorewall/manifests/defaults.pp
@@ -0,0 +1,86 @@
+class site_shorewall::defaults {
+
+ include shorewall
+ include site_config::params
+
+ # be safe for development
+ # if ( $::site_config::params::environment == 'local' ) {
+ # $shorewall_startup='0'
+ # }
+
+ # If you want logging:
+ shorewall::params {
+ 'LOG': value => 'debug';
+ }
+
+ shorewall::zone {'net': type => 'ipv4'; }
+
+ # define interfaces
+ shorewall::interface { $site_config::params::interface:
+ zone => 'net',
+ options => 'tcpflags,blacklist,nosmurfs';
+ }
+
+ shorewall::policy {
+ 'fw-to-all':
+ sourcezone => 'fw',
+ destinationzone => 'all',
+ policy => 'ACCEPT',
+ order => 100;
+ 'all-to-all':
+ sourcezone => 'all',
+ destinationzone => 'all',
+ policy => 'DROP',
+ order => 200;
+ }
+
+ shorewall::rule {
+ # ping party
+ 'all2all-ping':
+ source => 'all',
+ destination => 'all',
+ action => 'Ping(ACCEPT)',
+ order => 200;
+ }
+
+ package { 'shorewall-init':
+ ensure => installed
+ }
+
+ include ::systemd
+ file { '/etc/systemd/system/shorewall.service':
+ ensure => file,
+ owner => 'root',
+ group => 'root',
+ mode => '0644',
+ source => 'puppet:///modules/site_shorewall/Debian/shorewall.service',
+ require => Package['shorewall'],
+ notify => Service['shorewall'],
+ } ~>
+ Exec['systemctl-daemon-reload']
+
+ augeas {
+ # stop instead of clear firewall on shutdown
+ 'shorewall_SAFESTOP':
+ changes => 'set /files/etc/shorewall/shorewall.conf/SAFESTOP Yes',
+ lens => 'Shellvars.lns',
+ incl => '/etc/shorewall/shorewall.conf',
+ require => Package['shorewall'],
+ notify => Service['shorewall'];
+ # require that the interface exist
+ 'shorewall_REQUIRE_INTERFACE':
+ changes => 'set /files/etc/shorewall/shorewall.conf/REQUIRE_INTERFACE Yes',
+ lens => 'Shellvars.lns',
+ incl => '/etc/shorewall/shorewall.conf',
+ require => Package['shorewall'],
+ notify => Service['shorewall'];
+ # configure shorewall-init
+ 'shorewall-init':
+ changes => 'set /files/etc/default/shorewall-init/PRODUCTS shorewall',
+ lens => 'Shellvars.lns',
+ incl => '/etc/default/shorewall-init',
+ require => [ Package['shorewall-init'], Service['shorewall'] ]
+ }
+
+ include site_shorewall::sshd
+}
diff --git a/puppet/modules/site_shorewall/manifests/dnat.pp b/puppet/modules/site_shorewall/manifests/dnat.pp
new file mode 100644
index 00000000..a73294cc
--- /dev/null
+++ b/puppet/modules/site_shorewall/manifests/dnat.pp
@@ -0,0 +1,19 @@
+define site_shorewall::dnat (
+ $source,
+ $destination,
+ $proto,
+ $destinationport,
+ $originaldest ) {
+
+
+ shorewall::rule {
+ "dnat_${name}_${destinationport}":
+ action => 'DNAT',
+ source => $source,
+ destination => $destination,
+ proto => $proto,
+ destinationport => $destinationport,
+ originaldest => $originaldest,
+ order => 200
+ }
+}
diff --git a/puppet/modules/site_shorewall/manifests/dnat_rule.pp b/puppet/modules/site_shorewall/manifests/dnat_rule.pp
new file mode 100644
index 00000000..f9fbe950
--- /dev/null
+++ b/puppet/modules/site_shorewall/manifests/dnat_rule.pp
@@ -0,0 +1,50 @@
+define site_shorewall::dnat_rule {
+
+ $port = $name
+ if $port != 1194 {
+ if $site_openvpn::openvpn_allow_unlimited {
+ shorewall::rule {
+ "dnat_tcp_port_${port}":
+ action => 'DNAT',
+ source => 'net',
+ destination => "\$FW:${site_openvpn::unlimited_gateway_address}:1194",
+ proto => 'tcp',
+ destinationport => $port,
+ originaldest => $site_openvpn::unlimited_gateway_address,
+ order => 100;
+ }
+ shorewall::rule {
+ "dnat_udp_port_${port}":
+ action => 'DNAT',
+ source => 'net',
+ destination => "\$FW:${site_openvpn::unlimited_gateway_address}:1194",
+ proto => 'udp',
+ destinationport => $port,
+ originaldest => $site_openvpn::unlimited_gateway_address,
+ order => 100;
+ }
+ }
+ if $site_openvpn::openvpn_allow_limited {
+ shorewall::rule {
+ "dnat_free_tcp_port_${port}":
+ action => 'DNAT',
+ source => 'net',
+ destination => "\$FW:${site_openvpn::limited_gateway_address}:1194",
+ proto => 'tcp',
+ destinationport => $port,
+ originaldest => $site_openvpn::unlimited_gateway_address,
+ order => 100;
+ }
+ shorewall::rule {
+ "dnat_free_udp_port_${port}":
+ action => 'DNAT',
+ source => 'net',
+ destination => "\$FW:${site_openvpn::limited_gateway_address}:1194",
+ proto => 'udp',
+ destinationport => $port,
+ originaldest => $site_openvpn::unlimited_gateway_address,
+ order => 100;
+ }
+ }
+ }
+}
diff --git a/puppet/modules/site_shorewall/manifests/eip.pp b/puppet/modules/site_shorewall/manifests/eip.pp
new file mode 100644
index 00000000..8fbba658
--- /dev/null
+++ b/puppet/modules/site_shorewall/manifests/eip.pp
@@ -0,0 +1,92 @@
+class site_shorewall::eip {
+
+ include site_shorewall::defaults
+ include site_config::params
+ include site_shorewall::ip_forward
+
+ # define macro for incoming services
+ file { '/etc/shorewall/macro.leap_eip':
+ content => "PARAM - - tcp 1194
+ PARAM - - udp 1194
+ ",
+ notify => Service['shorewall'],
+ require => Package['shorewall']
+ }
+
+ shorewall::interface {
+ 'tun0':
+ zone => 'eip',
+ options => 'tcpflags,blacklist,nosmurfs';
+ 'tun1':
+ zone => 'eip',
+ options => 'tcpflags,blacklist,nosmurfs';
+ 'tun2':
+ zone => 'eip',
+ options => 'tcpflags,blacklist,nosmurfs';
+ 'tun3':
+ zone => 'eip',
+ options => 'tcpflags,blacklist,nosmurfs';
+ }
+
+ shorewall::zone {
+ 'eip':
+ type => 'ipv4';
+ }
+
+ $interface = $site_config::params::interface
+
+ shorewall::masq {
+ "${interface}_unlimited_tcp":
+ interface => $interface,
+ source => "${site_openvpn::openvpn_unlimited_tcp_network_prefix}.0/${site_openvpn::openvpn_unlimited_tcp_cidr}";
+ "${interface}_unlimited_udp":
+ interface => $interface,
+ source => "${site_openvpn::openvpn_unlimited_udp_network_prefix}.0/${site_openvpn::openvpn_unlimited_udp_cidr}";
+ }
+ if ! $::ec2_instance_id {
+ shorewall::masq {
+ "${interface}_limited_tcp":
+ interface => $interface,
+ source => "${site_openvpn::openvpn_limited_tcp_network_prefix}.0/${site_openvpn::openvpn_limited_tcp_cidr}";
+ "${interface}_limited_udp":
+ interface => $interface,
+ source => "${site_openvpn::openvpn_limited_udp_network_prefix}.0/${site_openvpn::openvpn_limited_udp_cidr}";
+ }
+ }
+
+ shorewall::policy {
+ 'eip-to-all':
+ sourcezone => 'eip',
+ destinationzone => 'all',
+ policy => 'ACCEPT',
+ order => 100;
+ }
+
+ shorewall::rule {
+ 'net2fw-openvpn':
+ source => 'net',
+ destination => '$FW',
+ action => 'leap_eip(ACCEPT)',
+ order => 200;
+
+ 'block_eip_dns_udp':
+ action => 'REJECT',
+ source => 'eip',
+ destination => 'net',
+ proto => 'udp',
+ destinationport => 'domain',
+ order => 300;
+
+ 'block_eip_dns_tcp':
+ action => 'REJECT',
+ source => 'eip',
+ destination => 'net',
+ proto => 'tcp',
+ destinationport => 'domain',
+ order => 301;
+ }
+
+ # create dnat rule for each port
+ site_shorewall::dnat_rule { $site_openvpn::openvpn_ports: }
+
+}
diff --git a/puppet/modules/site_shorewall/manifests/ip_forward.pp b/puppet/modules/site_shorewall/manifests/ip_forward.pp
new file mode 100644
index 00000000..d53ee8a5
--- /dev/null
+++ b/puppet/modules/site_shorewall/manifests/ip_forward.pp
@@ -0,0 +1,10 @@
+class site_shorewall::ip_forward {
+ include augeas
+ augeas { 'enable_ip_forwarding':
+ changes => 'set /files/etc/shorewall/shorewall.conf/IP_FORWARDING Yes',
+ lens => 'Shellvars.lns',
+ incl => '/etc/shorewall/shorewall.conf',
+ notify => Service[shorewall],
+ require => [ Class[augeas], Package[shorewall] ];
+ }
+}
diff --git a/puppet/modules/site_shorewall/manifests/monitor.pp b/puppet/modules/site_shorewall/manifests/monitor.pp
new file mode 100644
index 00000000..f4ed4f7c
--- /dev/null
+++ b/puppet/modules/site_shorewall/manifests/monitor.pp
@@ -0,0 +1,8 @@
+class site_shorewall::monitor {
+
+ include site_shorewall::defaults
+ include site_shorewall::service::http
+ include site_shorewall::service::https
+
+
+}
diff --git a/puppet/modules/site_shorewall/manifests/mx.pp b/puppet/modules/site_shorewall/manifests/mx.pp
new file mode 100644
index 00000000..332f164e
--- /dev/null
+++ b/puppet/modules/site_shorewall/manifests/mx.pp
@@ -0,0 +1,24 @@
+class site_shorewall::mx {
+
+ include site_shorewall::defaults
+
+ $smtpd_ports = '25,465,587'
+
+ # define macro for incoming services
+ file { '/etc/shorewall/macro.leap_mx':
+ content => "PARAM - - tcp ${smtpd_ports} ",
+ notify => Service['shorewall'],
+ require => Package['shorewall']
+ }
+
+
+ shorewall::rule {
+ 'net2fw-mx':
+ source => 'net',
+ destination => '$FW',
+ action => 'leap_mx(ACCEPT)',
+ order => 200;
+ }
+
+ include site_shorewall::service::smtp
+}
diff --git a/puppet/modules/site_shorewall/manifests/obfsproxy.pp b/puppet/modules/site_shorewall/manifests/obfsproxy.pp
new file mode 100644
index 00000000..75846705
--- /dev/null
+++ b/puppet/modules/site_shorewall/manifests/obfsproxy.pp
@@ -0,0 +1,25 @@
+# configure shorewell for obfsproxy
+class site_shorewall::obfsproxy {
+
+ include site_shorewall::defaults
+
+ $obfsproxy = hiera('obfsproxy')
+ $scramblesuit = $obfsproxy['scramblesuit']
+ $scram_port = $scramblesuit['port']
+
+ # define macro for incoming services
+ file { '/etc/shorewall/macro.leap_obfsproxy':
+ content => "PARAM - - tcp ${scram_port} ",
+ notify => Service['shorewall'],
+ require => Package['shorewall']
+ }
+
+ shorewall::rule {
+ 'net2fw-obfs':
+ source => 'net',
+ destination => '$FW',
+ action => 'leap_obfsproxy(ACCEPT)',
+ order => 200;
+ }
+
+}
diff --git a/puppet/modules/site_shorewall/manifests/service/http.pp b/puppet/modules/site_shorewall/manifests/service/http.pp
new file mode 100644
index 00000000..74b874d5
--- /dev/null
+++ b/puppet/modules/site_shorewall/manifests/service/http.pp
@@ -0,0 +1,13 @@
+class site_shorewall::service::http {
+
+ include site_shorewall::defaults
+
+ shorewall::rule {
+ 'net2fw-http':
+ source => 'net',
+ destination => '$FW',
+ action => 'HTTP(ACCEPT)',
+ order => 200;
+ }
+
+}
diff --git a/puppet/modules/site_shorewall/manifests/service/https.pp b/puppet/modules/site_shorewall/manifests/service/https.pp
new file mode 100644
index 00000000..4a8b119c
--- /dev/null
+++ b/puppet/modules/site_shorewall/manifests/service/https.pp
@@ -0,0 +1,12 @@
+class site_shorewall::service::https {
+
+ include site_shorewall::defaults
+
+ shorewall::rule {
+ 'net2fw-https':
+ source => 'net',
+ destination => '$FW',
+ action => 'HTTPS(ACCEPT)',
+ order => 200;
+ }
+}
diff --git a/puppet/modules/site_shorewall/manifests/service/smtp.pp b/puppet/modules/site_shorewall/manifests/service/smtp.pp
new file mode 100644
index 00000000..7fbdf14e
--- /dev/null
+++ b/puppet/modules/site_shorewall/manifests/service/smtp.pp
@@ -0,0 +1,13 @@
+class site_shorewall::service::smtp {
+
+ include site_shorewall::defaults
+
+ shorewall::rule {
+ 'fw2net-http':
+ source => '$FW',
+ destination => 'net',
+ action => 'SMTP(ACCEPT)',
+ order => 200;
+ }
+
+}
diff --git a/puppet/modules/site_shorewall/manifests/service/webapp_api.pp b/puppet/modules/site_shorewall/manifests/service/webapp_api.pp
new file mode 100644
index 00000000..d3a1aeed
--- /dev/null
+++ b/puppet/modules/site_shorewall/manifests/service/webapp_api.pp
@@ -0,0 +1,23 @@
+# configure shorewall for webapp api
+class site_shorewall::service::webapp_api {
+
+ $api = hiera('api')
+ $api_port = $api['port']
+
+ # define macro for incoming services
+ file { '/etc/shorewall/macro.leap_webapp_api':
+ content => "PARAM - - tcp ${api_port} ",
+ notify => Service['shorewall'],
+ require => Package['shorewall']
+ }
+
+
+ shorewall::rule {
+ 'net2fw-webapp_api':
+ source => 'net',
+ destination => '$FW',
+ action => 'leap_webapp_api(ACCEPT)',
+ order => 200;
+ }
+
+}
diff --git a/puppet/modules/site_shorewall/manifests/soledad.pp b/puppet/modules/site_shorewall/manifests/soledad.pp
new file mode 100644
index 00000000..518d8689
--- /dev/null
+++ b/puppet/modules/site_shorewall/manifests/soledad.pp
@@ -0,0 +1,23 @@
+class site_shorewall::soledad {
+
+ $soledad = hiera('soledad')
+ $soledad_port = $soledad['port']
+
+ include site_shorewall::defaults
+
+ # define macro for incoming services
+ file { '/etc/shorewall/macro.leap_soledad':
+ content => "PARAM - - tcp ${soledad_port}",
+ notify => Service['shorewall'],
+ require => Package['shorewall']
+ }
+
+ shorewall::rule {
+ 'net2fw-soledad':
+ source => 'net',
+ destination => '$FW',
+ action => 'leap_soledad(ACCEPT)',
+ order => 200;
+ }
+}
+
diff --git a/puppet/modules/site_shorewall/manifests/sshd.pp b/puppet/modules/site_shorewall/manifests/sshd.pp
new file mode 100644
index 00000000..e2332592
--- /dev/null
+++ b/puppet/modules/site_shorewall/manifests/sshd.pp
@@ -0,0 +1,31 @@
+# configure shorewall for sshd
+class site_shorewall::sshd {
+
+ $ssh_config = hiera('ssh')
+ $ssh_port = $ssh_config['port']
+
+ include shorewall
+
+ # define macro for incoming sshd
+ file { '/etc/shorewall/macro.leap_sshd':
+ content => "PARAM - - tcp ${ssh_port}",
+ notify => Service['shorewall'],
+ require => Package['shorewall']
+ }
+
+
+ shorewall::rule {
+ # outside to server
+ 'net2fw-ssh':
+ source => 'net',
+ destination => '$FW',
+ action => 'leap_sshd(ACCEPT)',
+ order => 200;
+ }
+
+ # setup a routestopped rule to allow ssh when shorewall is stopped
+ shorewall::routestopped { $site_config::params::interface:
+ options => "- tcp ${ssh_port}"
+ }
+
+}
diff --git a/puppet/modules/site_shorewall/manifests/stunnel/client.pp b/puppet/modules/site_shorewall/manifests/stunnel/client.pp
new file mode 100644
index 00000000..9a89a244
--- /dev/null
+++ b/puppet/modules/site_shorewall/manifests/stunnel/client.pp
@@ -0,0 +1,40 @@
+#
+# Adds some firewall magic to the stunnel.
+#
+# Using DNAT, this firewall rule allow a locally running program
+# to try to connect to the normal remote IP and remote port of the
+# service on another machine, but have this connection magically
+# routed through the locally running stunnel client.
+#
+# The network looks like this:
+#
+# From the client's perspective:
+#
+# |------- stunnel client --------------| |---------- stunnel server -----------------------|
+# consumer app -> localhost:accept_port -> connect:connect_port -> localhost:original_port
+#
+# From the server's perspective:
+#
+# |------- stunnel client --------------| |---------- stunnel server -----------------------|
+# ?? -> *:accept_port -> localhost:connect_port -> service
+#
+
+define site_shorewall::stunnel::client(
+ $accept_port,
+ $connect,
+ $connect_port,
+ $original_port) {
+
+ include site_shorewall::defaults
+
+ shorewall::rule {
+ "stunnel_dnat_${name}":
+ action => 'DNAT',
+ source => '$FW',
+ destination => "\$FW:127.0.0.1:${accept_port}",
+ proto => 'tcp',
+ destinationport => $original_port,
+ originaldest => $connect,
+ order => 200
+ }
+}
diff --git a/puppet/modules/site_shorewall/manifests/stunnel/server.pp b/puppet/modules/site_shorewall/manifests/stunnel/server.pp
new file mode 100644
index 00000000..798cd631
--- /dev/null
+++ b/puppet/modules/site_shorewall/manifests/stunnel/server.pp
@@ -0,0 +1,22 @@
+#
+# Allow all incoming connections to stunnel server port
+#
+
+define site_shorewall::stunnel::server($port) {
+
+ include site_shorewall::defaults
+
+ file { "/etc/shorewall/macro.stunnel_server_${name}":
+ content => "PARAM - - tcp ${port}",
+ notify => Service['shorewall'],
+ require => Package['shorewall']
+ }
+ shorewall::rule {
+ "net2fw-stunnel-server-${name}":
+ source => 'net',
+ destination => '$FW',
+ action => "stunnel_server_${name}(ACCEPT)",
+ order => 200;
+ }
+
+} \ No newline at end of file
diff --git a/puppet/modules/site_shorewall/manifests/tor.pp b/puppet/modules/site_shorewall/manifests/tor.pp
new file mode 100644
index 00000000..324b4844
--- /dev/null
+++ b/puppet/modules/site_shorewall/manifests/tor.pp
@@ -0,0 +1,26 @@
+# configure shorewall for tor
+class site_shorewall::tor {
+
+ include site_shorewall::defaults
+ include site_shorewall::ip_forward
+
+ $tor_port = '9001'
+
+ # define macro for incoming services
+ file { '/etc/shorewall/macro.leap_tor':
+ content => "PARAM - - tcp ${tor_port} ",
+ notify => Service['shorewall'],
+ require => Package['shorewall']
+ }
+
+
+ shorewall::rule {
+ 'net2fw-tor':
+ source => 'net',
+ destination => '$FW',
+ action => 'leap_tor(ACCEPT)',
+ order => 200;
+ }
+
+ include site_shorewall::service::http
+}
diff --git a/puppet/modules/site_shorewall/manifests/webapp.pp b/puppet/modules/site_shorewall/manifests/webapp.pp
new file mode 100644
index 00000000..a8d2aa5b
--- /dev/null
+++ b/puppet/modules/site_shorewall/manifests/webapp.pp
@@ -0,0 +1,7 @@
+class site_shorewall::webapp {
+
+ include site_shorewall::defaults
+ include site_shorewall::service::https
+ include site_shorewall::service::http
+ include site_shorewall::service::webapp_api
+}
diff --git a/puppet/modules/site_squid_deb_proxy/manifests/client.pp b/puppet/modules/site_squid_deb_proxy/manifests/client.pp
new file mode 100644
index 00000000..27844270
--- /dev/null
+++ b/puppet/modules/site_squid_deb_proxy/manifests/client.pp
@@ -0,0 +1,5 @@
+class site_squid_deb_proxy::client {
+ include squid_deb_proxy::client
+ include site_shorewall::defaults
+ include shorewall::rules::mdns
+}
diff --git a/puppet/modules/site_sshd/manifests/authorized_keys.pp b/puppet/modules/site_sshd/manifests/authorized_keys.pp
new file mode 100644
index 00000000..a1fde3f6
--- /dev/null
+++ b/puppet/modules/site_sshd/manifests/authorized_keys.pp
@@ -0,0 +1,34 @@
+# We want to purge unmanaged keys from the authorized_keys file so that only
+# keys added in the provider are valid. Any manually added keys will be
+# overridden.
+#
+# In order to do this, we have to use a custom define to deploy the
+# authorized_keys file because puppet's internal resource doesn't allow
+# purging before populating this file.
+#
+# See the following for more information:
+# https://tickets.puppetlabs.com/browse/PUP-1174
+# https://leap.se/code/issues/2990
+# https://leap.se/code/issues/3010
+#
+define site_sshd::authorized_keys ($keys, $ensure = 'present', $home = '') {
+ # This line allows default homedir based on $title variable.
+ # If $home is empty, the default is used.
+ $homedir = $home ? {'' => "/home/${title}", default => $home}
+ $owner = $ensure ? {'present' => $title, default => undef }
+ $group = $ensure ? {'present' => $title, default => undef }
+ file {
+ "${homedir}/.ssh":
+ ensure => 'directory',
+ owner => $title,
+ group => $title,
+ mode => '0700';
+ "${homedir}/.ssh/authorized_keys":
+ ensure => $ensure,
+ owner => $owner,
+ group => $group,
+ mode => '0600',
+ require => File["${homedir}/.ssh"],
+ content => template('site_sshd/authorized_keys.erb');
+ }
+}
diff --git a/puppet/modules/site_sshd/manifests/deploy_authorized_keys.pp b/puppet/modules/site_sshd/manifests/deploy_authorized_keys.pp
new file mode 100644
index 00000000..97ca058f
--- /dev/null
+++ b/puppet/modules/site_sshd/manifests/deploy_authorized_keys.pp
@@ -0,0 +1,9 @@
+class site_sshd::deploy_authorized_keys ( $keys ) {
+ tag 'leap_authorized_keys'
+
+ site_sshd::authorized_keys {'root':
+ keys => $keys,
+ home => '/root'
+ }
+
+}
diff --git a/puppet/modules/site_sshd/manifests/init.pp b/puppet/modules/site_sshd/manifests/init.pp
new file mode 100644
index 00000000..a9202da4
--- /dev/null
+++ b/puppet/modules/site_sshd/manifests/init.pp
@@ -0,0 +1,82 @@
+# configures sshd, mosh, authorized keys and known hosts
+class site_sshd {
+ $ssh = hiera_hash('ssh')
+ $ssh_config = $ssh['config']
+ $hosts = hiera('hosts', '')
+
+ ##
+ ## SETUP AUTHORIZED KEYS
+ ##
+
+ $authorized_keys = $ssh['authorized_keys']
+
+ class { 'site_sshd::deploy_authorized_keys':
+ keys => $authorized_keys
+ }
+
+ ##
+ ## SETUP KNOWN HOSTS and SSH_CONFIG
+ ##
+
+ file {
+ '/etc/ssh/ssh_known_hosts':
+ owner => root,
+ group => root,
+ mode => '0644',
+ content => template('site_sshd/ssh_known_hosts.erb');
+
+ '/etc/ssh/ssh_config':
+ owner => root,
+ group => root,
+ mode => '0644',
+ content => template('site_sshd/ssh_config.erb');
+ }
+
+ ##
+ ## OPTIONAL MOSH SUPPORT
+ ##
+
+ $mosh = $ssh['mosh']
+
+ if $mosh['enabled'] {
+ class { 'site_sshd::mosh':
+ ensure => present,
+ ports => $mosh['ports']
+ }
+ }
+ else {
+ class { 'site_sshd::mosh':
+ ensure => absent
+ }
+ }
+
+ # we cannot use the 'hardened' parameter because leap_cli uses an
+ # old net-ssh gem that is incompatible with the included
+ # "KexAlgorithms curve25519-sha256@libssh.org",
+ # see https://leap.se/code/issues/7591
+ # therefore we don't use it here, but include all other options
+ # that would be applied by the 'hardened' parameter
+ # not all options are available on wheezy
+ if ( $::lsbdistcodename == 'wheezy' ) {
+ $tail_additional_options = 'Ciphers aes256-ctr
+MACs hmac-sha2-512,hmac-sha2-256,hmac-ripemd160'
+ } else {
+ $tail_additional_options = 'Ciphers chacha20-poly1305@openssh.com,aes256-gcm@openssh.com,aes256-ctr
+MACs hmac-sha2-512,hmac-sha2-256,hmac-ripemd160'
+ }
+
+ ##
+ ## SSHD SERVER CONFIGURATION
+ ##
+ class { '::sshd':
+ manage_nagios => false,
+ ports => [ $ssh['port'] ],
+ use_pam => 'yes',
+ print_motd => 'no',
+ tcp_forwarding => $ssh_config['AllowTcpForwarding'],
+ manage_client => false,
+ use_storedconfigs => false,
+ tail_additional_options => $tail_additional_options,
+ hostkey_type => [ 'rsa', 'dsa', 'ecdsa' ]
+ }
+}
diff --git a/puppet/modules/site_sshd/manifests/mosh.pp b/puppet/modules/site_sshd/manifests/mosh.pp
new file mode 100644
index 00000000..49f56ca0
--- /dev/null
+++ b/puppet/modules/site_sshd/manifests/mosh.pp
@@ -0,0 +1,21 @@
+class site_sshd::mosh ( $ensure = present, $ports = '60000-61000' ) {
+
+ package { 'mosh':
+ ensure => $ensure
+ }
+
+ file { '/etc/shorewall/macro.mosh':
+ ensure => $ensure,
+ content => "PARAM - - udp ${ports}",
+ notify => Service['shorewall'],
+ require => Package['shorewall'];
+ }
+
+ shorewall::rule { 'net2fw-mosh':
+ ensure => $ensure,
+ source => 'net',
+ destination => '$FW',
+ action => 'mosh(ACCEPT)',
+ order => 200;
+ }
+}
diff --git a/puppet/modules/site_sshd/templates/authorized_keys.erb b/puppet/modules/site_sshd/templates/authorized_keys.erb
new file mode 100644
index 00000000..51bdc5b3
--- /dev/null
+++ b/puppet/modules/site_sshd/templates/authorized_keys.erb
@@ -0,0 +1,10 @@
+# NOTICE: This file is autogenerated by Puppet
+# all manually added keys will be overridden
+
+<% @keys.sort.each do |user, hash| -%>
+<% if user == 'monitor' -%>
+command="/usr/bin/check_mk_agent",no-port-forwarding,no-x11-forwarding,no-agent-forwarding,no-pty,no-user-rc, <%=hash['type']-%> <%=hash['key']%> <%=user%>
+<% else -%>
+<%=hash['type']-%> <%=hash['key']%> <%=user%>
+<% end -%>
+<% end -%>
diff --git a/puppet/modules/site_sshd/templates/ssh_config.erb b/puppet/modules/site_sshd/templates/ssh_config.erb
new file mode 100644
index 00000000..36c0b6d5
--- /dev/null
+++ b/puppet/modules/site_sshd/templates/ssh_config.erb
@@ -0,0 +1,40 @@
+# This file is generated by Puppet
+# This is the ssh client system-wide configuration file. See
+# ssh_config(5) for more information. This file provides defaults for
+# users, and the values can be changed in per-user configuration files
+# or on the command line.
+
+Host *
+ SendEnv LANG LC_*
+ HashKnownHosts yes
+ GSSAPIAuthentication yes
+ GSSAPIDelegateCredentials no
+<% if scope.lookupvar('::site_config::params::environment') == 'local' -%>
+ #
+ # Vagrant nodes should have strict host key checking
+ # turned off. The problem is that the host key for a vagrant
+ # node is specific to the particular instance of the vagrant
+ # node you have running locally. For this reason, we can't
+ # track the host keys, or your host key for vpn1 would conflict
+ # with my host key for vpn1.
+ #
+ StrictHostKeyChecking no
+<% end -%>
+
+#
+# Tell SSH what host key algorithm we should use. I don't understand why this
+# is needed, since the man page says that "if hostkeys are known for the
+# destination host then [HostKeyAlgorithms default] is modified to prefer
+# their algorithms."
+#
+
+<% @hosts.sort.each do |name, host| -%>
+Host <%= name %> <%= host['domain_full'] %> <%= host['domain_internal'] %> <%= host['ip_address'] %>
+<% if host['host_pub_key'] -%>
+HostKeyAlgorithms <%= host['host_pub_key'].split(" ").first %>
+<% end -%>
+<% if host['port'] -%>
+Port <%= host['port'] %>
+<% end -%>
+
+<% end -%>
diff --git a/puppet/modules/site_sshd/templates/ssh_known_hosts.erb b/puppet/modules/site_sshd/templates/ssh_known_hosts.erb
new file mode 100644
index 00000000..002ab732
--- /dev/null
+++ b/puppet/modules/site_sshd/templates/ssh_known_hosts.erb
@@ -0,0 +1,7 @@
+# This file is generated by Puppet
+
+<% @hosts.sort.each do |name, hash| -%>
+<% if hash['host_pub_key'] -%>
+<%= name%>,<%=hash['domain_full']%>,<%=hash['domain_internal']%>,<%=hash['ip_address']%> <%=hash['host_pub_key']%>
+<% end -%>
+<% end -%>
diff --git a/puppet/modules/site_static/README b/puppet/modules/site_static/README
new file mode 100644
index 00000000..bc719782
--- /dev/null
+++ b/puppet/modules/site_static/README
@@ -0,0 +1,3 @@
+Deploy one or more static websites to a node.
+
+For now, it only supports `amber` based static sites. Should support plain html and jekyll in the future.
diff --git a/puppet/modules/site_static/manifests/domain.pp b/puppet/modules/site_static/manifests/domain.pp
new file mode 100644
index 00000000..b26cc9e3
--- /dev/null
+++ b/puppet/modules/site_static/manifests/domain.pp
@@ -0,0 +1,33 @@
+# configure static service for domain
+define site_static::domain (
+ $ca_cert,
+ $key,
+ $cert,
+ $tls_only=true,
+ $locations=undef,
+ $aliases=undef,
+ $apache_config=undef) {
+
+ $domain = $name
+ $base_dir = '/srv/static'
+
+ $cafile = "${cert}\n${ca_cert}"
+
+ if is_hash($locations) {
+ create_resources(site_static::location, $locations)
+ }
+
+ x509::cert { $domain:
+ content => $cafile,
+ notify => Service[apache]
+ }
+ x509::key { $domain:
+ content => $key,
+ notify => Service[apache]
+ }
+
+ apache::vhost::file { $domain:
+ content => template('site_static/apache.conf.erb')
+ }
+
+}
diff --git a/puppet/modules/site_static/manifests/init.pp b/puppet/modules/site_static/manifests/init.pp
new file mode 100644
index 00000000..4a722d62
--- /dev/null
+++ b/puppet/modules/site_static/manifests/init.pp
@@ -0,0 +1,72 @@
+# deploy static service
+class site_static {
+ tag 'leap_service'
+
+ include site_config::default
+ include site_config::x509::cert
+ include site_config::x509::key
+ include site_config::x509::ca_bundle
+
+ $static = hiera('static')
+ $domains = $static['domains']
+ $formats = $static['formats']
+ $bootstrap = $static['bootstrap_files']
+ $tor = hiera('tor', false)
+
+ if $bootstrap['enabled'] {
+ $bootstrap_domain = $bootstrap['domain']
+ $bootstrap_client = $bootstrap['client_version']
+ file { '/srv/leap/provider.json':
+ content => $bootstrap['provider_json'],
+ owner => 'www-data',
+ group => 'www-data',
+ mode => '0444';
+ }
+ # It is important to always touch provider.json: the client needs to check x-min-client-version header,
+ # but this is only sent when the file has been modified (otherwise 304 is sent by apache). The problem
+ # is that changing min client version won't alter the content of provider.json, so we must touch it.
+ exec { '/bin/touch /srv/leap/provider.json':
+ require => File['/srv/leap/provider.json'];
+ }
+ }
+
+ include apache::module::headers
+ include apache::module::alias
+ include apache::module::expires
+ include apache::module::removeip
+ include apache::module::dir
+ include apache::module::negotiation
+ include site_apache::common
+ include site_config::ruby::dev
+
+ if (member($formats, 'rack')) {
+ include site_apt::preferences::passenger
+ class { 'passenger':
+ use_munin => false,
+ require => Class['site_apt::preferences::passenger']
+ }
+ }
+
+ if (member($formats, 'amber')) {
+ rubygems::gem{'amber-0.3.8':
+ require => Package['zlib1g-dev']
+ }
+
+ package { 'zlib1g-dev':
+ ensure => installed
+ }
+ }
+
+ create_resources(site_static::domain, $domains)
+
+ if $tor {
+ $hidden_service = $tor['hidden_service']
+ if $hidden_service['active'] {
+ include site_webapp::hidden_service
+ }
+ }
+
+ include site_shorewall::defaults
+ include site_shorewall::service::http
+ include site_shorewall::service::https
+}
diff --git a/puppet/modules/site_static/manifests/location.pp b/puppet/modules/site_static/manifests/location.pp
new file mode 100644
index 00000000..d116de2f
--- /dev/null
+++ b/puppet/modules/site_static/manifests/location.pp
@@ -0,0 +1,36 @@
+# configure static service for location
+define site_static::location($path, $format, $source) {
+
+ $file_path = "/srv/static/${name}"
+ $allowed_formats = ['amber','rack']
+
+ if $format == undef {
+ fail("static_site location `${path}` is missing `format` field.")
+ }
+
+ if ! member($allowed_formats, $format) {
+ $formats_str = join($allowed_formats, ', ')
+ fail("Unsupported static_site location format `${format}`. Supported formats include ${formats_str}.")
+ }
+
+ if ($format == 'amber') {
+ exec {"amber_build_${name}":
+ cwd => $file_path,
+ command => 'amber rebuild',
+ user => 'www-data',
+ timeout => 600,
+ subscribe => Vcsrepo[$file_path]
+ }
+ }
+
+ vcsrepo { $file_path:
+ ensure => present,
+ force => true,
+ revision => $source['revision'],
+ provider => $source['type'],
+ source => $source['repo'],
+ owner => 'www-data',
+ group => 'www-data'
+ }
+
+}
diff --git a/puppet/modules/site_static/templates/amber.erb b/puppet/modules/site_static/templates/amber.erb
new file mode 100644
index 00000000..694f1136
--- /dev/null
+++ b/puppet/modules/site_static/templates/amber.erb
@@ -0,0 +1,13 @@
+<%- if @location_path != '' -%>
+ AliasMatch ^/[a-z]{2}/<%=@location_path%>(/.+|/|)$ "<%=@directory%>/$1"
+ Alias /<%=@location_path%> "<%=@directory%>/"
+<%- end -%>
+ <Directory "<%=@directory%>/">
+ AllowOverride FileInfo Indexes Options=All,MultiViews
+<% if scope.function_guess_apache_version([]) == '2.4' %>
+ Require all granted
+<% else %>
+ Order deny,allow
+ Allow from all
+<% end %>
+ </Directory>
diff --git a/puppet/modules/site_static/templates/apache.conf.erb b/puppet/modules/site_static/templates/apache.conf.erb
new file mode 100644
index 00000000..6b969d1c
--- /dev/null
+++ b/puppet/modules/site_static/templates/apache.conf.erb
@@ -0,0 +1,88 @@
+<%-
+ ##
+ ## An apache config for static websites.
+ ##
+
+ def location_directory(name, location)
+ if ['amber', 'rack'].include?(location['format'])
+ File.join(@base_dir, name, 'public')
+ else
+ File.join(@base_dir, name)
+ end
+ end
+
+ @document_root = begin
+ root = '/var/www'
+ @locations && @locations.each do |name, location|
+ root = location_directory(name, location) if location['path'] == '/'
+ end
+ root.gsub(%r{^/|/$}, '')
+ end
+
+ bootstrap_domain = scope.lookupvar('site_static::bootstrap_domain')
+ bootstrap_client = scope.lookupvar('site_static::bootstrap_client')
+-%>
+
+<VirtualHost *:80>
+ ServerName <%= @domain %>
+ ServerAlias www.<%= @domain %>
+<%- @aliases && @aliases.each do |domain_alias| -%>
+ ServerAlias <%= domain_alias %>
+<%- end -%>
+<%- if @tls_only -%>
+ RewriteEngine On
+ RewriteRule ^.*$ https://<%= @domain -%>%{REQUEST_URI} [R=permanent,L]
+<%- end -%>
+</VirtualHost>
+
+<VirtualHost *:443>
+ ServerName <%= @domain %>
+ ServerAlias www.<%= @domain %>
+<%- @aliases && @aliases.each do |domain_alias| -%>
+ ServerAlias <%= domain_alias %>
+<%- end -%>
+
+ #RewriteLog "/var/log/apache2/rewrite.log"
+ #RewriteLogLevel 3
+
+ Include include.d/ssl_common.inc
+
+<%- if @tls_only -%>
+ Header always set Strict-Transport-Security: "max-age=15768000;includeSubdomains"
+<%- end -%>
+ Header set X-Frame-Options "deny"
+ Header always unset X-Powered-By
+ Header always unset X-Runtime
+
+ SSLCertificateKeyFile /etc/x509/keys/<%= @domain %>.key
+ SSLCertificateFile /etc/x509/certs/<%= @domain %>.crt
+
+ RequestHeader set X_FORWARDED_PROTO 'https'
+
+ DocumentRoot "/<%= @document_root %>/"
+ AccessFileName .htaccess
+
+<%- if ([@aliases]+[@domain]).flatten.include?(bootstrap_domain) -%>
+ Alias /provider.json /srv/leap/provider.json
+ <Location /provider.json>
+ Header set X-Minimum-Client-Version <%= bootstrap_client['min'] %>
+ </Location>
+<%- end -%>
+
+<%- if @apache_config -%>
+<%= @apache_config.gsub(':percent:','%') %>
+<%- end -%>
+
+<%- @locations && @locations.each do |name, location| -%>
+<%- location_path = location['path'].gsub(%r{^/|/$}, '') -%>
+<%- directory = location_directory(name, location) -%>
+<%- local_vars = {'location_path'=>location_path, 'directory'=>directory, 'location'=>location, 'name'=>name} -%>
+<%- template_path = File.join(File.dirname(__FILE__), location['format']) + '.erb' -%>
+<%- break unless File.exists?(template_path) -%>
+ ##
+ ## <%= name %> (<%= location['format'] %>)
+ ##
+<%= scope.function_templatewlv([template_path, local_vars]) %>
+<%- end -%>
+
+</VirtualHost>
diff --git a/puppet/modules/site_static/templates/rack.erb b/puppet/modules/site_static/templates/rack.erb
new file mode 100644
index 00000000..431778bb
--- /dev/null
+++ b/puppet/modules/site_static/templates/rack.erb
@@ -0,0 +1,19 @@
+ #PassengerLogLevel 1
+ #PassengerAppEnv production
+ #PassengerFriendlyErrorPages on
+<%- if @location_path != '' -%>
+ Alias /<%=@location_path%> "<%=@directory%>"
+ <Location /<%=@location_path%>>
+ PassengerBaseURI /<%=@location_path%>
+ PassengerAppRoot "<%=File.dirname(@directory)%>"
+ </Location>
+<%- end -%>
+ <Directory "<%=@directory%>">
+ Options -MultiViews
+<% if scope.function_guess_apache_version([]) == '2.4' %>
+ Require all granted
+<% else %>
+ Order deny,allow
+ Allow from all
+<% end %>
+ </Directory>
diff --git a/puppet/modules/site_stunnel/manifests/client.pp b/puppet/modules/site_stunnel/manifests/client.pp
new file mode 100644
index 00000000..c9e034f1
--- /dev/null
+++ b/puppet/modules/site_stunnel/manifests/client.pp
@@ -0,0 +1,64 @@
+#
+# Sets up stunnel and firewall configuration for
+# a single stunnel client
+#
+# As a client, we accept connections on localhost,
+# and connect to a remote $connect:$connect_port
+#
+
+define site_stunnel::client (
+ $accept_port,
+ $connect_port,
+ $connect,
+ $original_port,
+ $verify = '2',
+ $pid = $name,
+ $rndfile = '/var/lib/stunnel4/.rnd',
+ $debuglevel = 'warning' ) {
+
+ $logfile = "/var/log/stunnel4/${name}.log"
+
+ include site_config::x509::cert
+ include site_config::x509::key
+ include site_config::x509::ca
+ include x509::variables
+ $ca_path = "${x509::variables::local_CAs}/${site_config::params::ca_name}.crt"
+ $cert_path = "${x509::variables::certs}/${site_config::params::cert_name}.crt"
+ $key_path = "${x509::variables::keys}/${site_config::params::cert_name}.key"
+
+ stunnel::service { $name:
+ accept => "127.0.0.1:${accept_port}",
+ connect => "${connect}:${connect_port}",
+ client => true,
+ cafile => $ca_path,
+ key => $key_path,
+ cert => $cert_path,
+ verify => $verify,
+ pid => "/var/run/stunnel4/${pid}.pid",
+ rndfile => $rndfile,
+ debuglevel => $debuglevel,
+ sslversion => 'TLSv1',
+ syslog => 'no',
+ output => $logfile;
+ }
+
+ # define the log files so that we can purge the
+ # files from /var/log/stunnel4 that are not defined.
+ file {
+ $logfile:;
+ "${logfile}.1.gz":;
+ "${logfile}.2.gz":;
+ "${logfile}.3.gz":;
+ "${logfile}.4.gz":;
+ "${logfile}.5.gz":;
+ }
+
+ site_shorewall::stunnel::client { $name:
+ accept_port => $accept_port,
+ connect => $connect,
+ connect_port => $connect_port,
+ original_port => $original_port
+ }
+
+ include site_check_mk::agent::stunnel
+}
diff --git a/puppet/modules/site_stunnel/manifests/clients.pp b/puppet/modules/site_stunnel/manifests/clients.pp
new file mode 100644
index 00000000..c0958b5f
--- /dev/null
+++ b/puppet/modules/site_stunnel/manifests/clients.pp
@@ -0,0 +1,23 @@
+#
+# example hiera yaml:
+#
+# stunnel:
+# clients:
+# ednp_clients:
+# thrips_9002:
+# accept_port: 4001
+# connect: thrips.demo.bitmask.i
+# connect_port: 19002
+# epmd_clients:
+# thrips_4369:
+# accept_port: 4000
+# connect: thrips.demo.bitmask.i
+# connect_port: 14369
+#
+# In the above example, this resource definition is called twice, with $name
+# 'ednp_clients' and 'epmd_clients'
+#
+
+define site_stunnel::clients {
+ create_resources(site_stunnel::client, $site_stunnel::clients[$name])
+}
diff --git a/puppet/modules/site_stunnel/manifests/init.pp b/puppet/modules/site_stunnel/manifests/init.pp
new file mode 100644
index 00000000..a874721f
--- /dev/null
+++ b/puppet/modules/site_stunnel/manifests/init.pp
@@ -0,0 +1,48 @@
+#
+# If you need something to happen after stunnel is started,
+# you can depend on Service['stunnel'] or Class['site_stunnel']
+#
+
+class site_stunnel {
+
+ # include the generic stunnel module
+ # increase the number of open files to allow for 800 connections
+ class { 'stunnel': default_extra => 'ulimit -n 4096' }
+
+ # The stunnel.conf provided by the Debian package is broken by default
+ # so we get rid of it and just define our own. See #549384
+ if !defined(File['/etc/stunnel/stunnel.conf']) {
+ file {
+ # this file is a broken config installed by the package
+ '/etc/stunnel/stunnel.conf':
+ ensure => absent;
+ }
+ }
+
+ $stunnel = hiera('stunnel')
+
+ # add server stunnels
+ create_resources(site_stunnel::servers, $stunnel['servers'])
+
+ # add client stunnels
+ $clients = $stunnel['clients']
+ $client_sections = keys($clients)
+ site_stunnel::clients { $client_sections: }
+
+ # remove any old stunnel logs that are not
+ # defined by this puppet run
+ file {'/var/log/stunnel4': purge => true;}
+
+ # the default is to keep 356 log files for each stunnel.
+ # here we set a more reasonable number.
+ augeas {
+ 'logrotate_stunnel':
+ context => '/files/etc/logrotate.d/stunnel4/rule',
+ changes => [
+ 'set rotate 5',
+ ]
+ }
+
+ include site_stunnel::override_service
+}
+
diff --git a/puppet/modules/site_stunnel/manifests/override_service.pp b/puppet/modules/site_stunnel/manifests/override_service.pp
new file mode 100644
index 00000000..435b9aa0
--- /dev/null
+++ b/puppet/modules/site_stunnel/manifests/override_service.pp
@@ -0,0 +1,18 @@
+# override stunnel::debian defaults
+#
+# ignore puppet lint error about inheriting from different namespace
+# lint:ignore:inherits_across_namespaces
+class site_stunnel::override_service inherits stunnel::debian {
+# lint:endignore
+
+ include site_config::x509::cert
+ include site_config::x509::key
+ include site_config::x509::ca
+
+ Service[stunnel] {
+ subscribe => [
+ Class['Site_config::X509::Key'],
+ Class['Site_config::X509::Cert'],
+ Class['Site_config::X509::Ca'] ]
+ }
+}
diff --git a/puppet/modules/site_stunnel/manifests/servers.pp b/puppet/modules/site_stunnel/manifests/servers.pp
new file mode 100644
index 00000000..e76d1e9d
--- /dev/null
+++ b/puppet/modules/site_stunnel/manifests/servers.pp
@@ -0,0 +1,51 @@
+#
+# example hiera yaml:
+#
+# stunnel:
+# servers:
+# couch_server:
+# accept_port: 15984
+# connect_port: 5984
+#
+
+define site_stunnel::servers (
+ $accept_port,
+ $connect_port,
+ $verify = '2',
+ $pid = $name,
+ $rndfile = '/var/lib/stunnel4/.rnd',
+ $debuglevel = '4' ) {
+
+ $logfile = "/var/log/stunnel4/${name}.log"
+
+ include site_config::x509::cert
+ include site_config::x509::key
+ include site_config::x509::ca
+ include x509::variables
+ $ca_path = "${x509::variables::local_CAs}/${site_config::params::ca_name}.crt"
+ $cert_path = "${x509::variables::certs}/${site_config::params::cert_name}.crt"
+ $key_path = "${x509::variables::keys}/${site_config::params::cert_name}.key"
+
+ stunnel::service { $name:
+ accept => $accept_port,
+ connect => "127.0.0.1:${connect_port}",
+ client => false,
+ cafile => $ca_path,
+ key => $key_path,
+ cert => $cert_path,
+ verify => $verify,
+ pid => "/var/run/stunnel4/${pid}.pid",
+ rndfile => '/var/lib/stunnel4/.rnd',
+ debuglevel => $debuglevel,
+ sslversion => 'TLSv1',
+ syslog => 'no',
+ output => $logfile;
+ }
+
+ # allow incoming connections on $accept_port
+ site_shorewall::stunnel::server { $name:
+ port => $accept_port
+ }
+
+ include site_check_mk::agent::stunnel
+}
diff --git a/puppet/modules/site_tor/manifests/disable_exit.pp b/puppet/modules/site_tor/manifests/disable_exit.pp
new file mode 100644
index 00000000..078f80ae
--- /dev/null
+++ b/puppet/modules/site_tor/manifests/disable_exit.pp
@@ -0,0 +1,7 @@
+class site_tor::disable_exit {
+ tor::daemon::exit_policy {
+ 'no_exit_at_all':
+ reject => [ '*:*' ];
+ }
+}
+
diff --git a/puppet/modules/site_tor/manifests/init.pp b/puppet/modules/site_tor/manifests/init.pp
new file mode 100644
index 00000000..2207a5a9
--- /dev/null
+++ b/puppet/modules/site_tor/manifests/init.pp
@@ -0,0 +1,45 @@
+class site_tor {
+ tag 'leap_service'
+ Class['site_config::default'] -> Class['site_tor']
+
+ $tor = hiera('tor')
+ $bandwidth_rate = $tor['bandwidth_rate']
+ $tor_type = $tor['type']
+ $nickname = $tor['nickname']
+ $contact_emails = join($tor['contacts'],', ')
+ $family = $tor['family']
+
+ $address = hiera('ip_address')
+
+ $openvpn = hiera('openvpn', undef)
+ if $openvpn {
+ $openvpn_ports = $openvpn['ports']
+ }
+ else {
+ $openvpn_ports = []
+ }
+
+ include site_config::default
+ include tor::daemon
+ tor::daemon::relay { $nickname:
+ port => 9001,
+ address => $address,
+ contact_info => obfuscate_email($contact_emails),
+ bandwidth_rate => $bandwidth_rate,
+ my_family => $family
+ }
+
+ if ( $tor_type == 'exit'){
+ # Only enable the daemon directory if the node isn't also a webapp node
+ # or running openvpn on port 80
+ if ! member($::services, 'webapp') and ! member($openvpn_ports, '80') {
+ tor::daemon::directory { $::hostname: port => 80 }
+ }
+ }
+ else {
+ include site_tor::disable_exit
+ }
+
+ include site_shorewall::tor
+
+}
diff --git a/puppet/modules/site_webapp/files/server-status.conf b/puppet/modules/site_webapp/files/server-status.conf
new file mode 100644
index 00000000..10b2d4ed
--- /dev/null
+++ b/puppet/modules/site_webapp/files/server-status.conf
@@ -0,0 +1,26 @@
+# Keep track of extended status information for each request
+ExtendedStatus On
+
+# Determine if mod_status displays the first 63 characters of a request or
+# the last 63, assuming the request itself is greater than 63 chars.
+# Default: Off
+#SeeRequestTail On
+
+Listen 127.0.0.1:8162
+
+<VirtualHost 127.0.0.1:8162>
+
+<Location /server-status>
+ SetHandler server-status
+ Require all granted
+ Allow from 127.0.0.1
+</Location>
+
+</VirtualHost>
+
+
+<IfModule mod_proxy.c>
+ # Show Proxy LoadBalancer status in mod_status
+ ProxyStatus On
+</IfModule>
+
diff --git a/puppet/modules/site_webapp/manifests/apache.pp b/puppet/modules/site_webapp/manifests/apache.pp
new file mode 100644
index 00000000..80c7b29b
--- /dev/null
+++ b/puppet/modules/site_webapp/manifests/apache.pp
@@ -0,0 +1,28 @@
+# configure apache and passenger to serve the webapp
+class site_webapp::apache {
+
+ $web_api = hiera('api')
+ $api_domain = $web_api['domain']
+ $api_port = $web_api['port']
+
+ $web_domain = hiera('domain')
+ $domain_name = $web_domain['name']
+
+ $webapp = hiera('webapp')
+ $webapp_domain = $webapp['domain']
+
+ include site_apache::common
+ include apache::module::headers
+ include apache::module::alias
+ include apache::module::expires
+ include apache::module::removeip
+ include site_webapp::common_vhost
+
+ class { 'passenger': use_munin => false }
+
+ apache::vhost::file {
+ 'api':
+ content => template('site_apache/vhosts.d/api.conf.erb');
+ }
+
+}
diff --git a/puppet/modules/site_webapp/manifests/common_vhost.pp b/puppet/modules/site_webapp/manifests/common_vhost.pp
new file mode 100644
index 00000000..c57aad57
--- /dev/null
+++ b/puppet/modules/site_webapp/manifests/common_vhost.pp
@@ -0,0 +1,18 @@
+class site_webapp::common_vhost {
+ # installs x509 cert + key and common config
+ # that both nagios + leap webapp use
+
+ include x509::variables
+ include site_config::x509::commercial::cert
+ include site_config::x509::commercial::key
+ include site_config::x509::commercial::ca
+
+ Class['Site_config::X509::Commercial::Key'] ~> Service[apache]
+ Class['Site_config::X509::Commercial::Cert'] ~> Service[apache]
+ Class['Site_config::X509::Commercial::Ca'] ~> Service[apache]
+
+ apache::vhost::file {
+ 'common':
+ content => template('site_apache/vhosts.d/common.conf.erb')
+ }
+}
diff --git a/puppet/modules/site_webapp/manifests/couchdb.pp b/puppet/modules/site_webapp/manifests/couchdb.pp
new file mode 100644
index 00000000..71450370
--- /dev/null
+++ b/puppet/modules/site_webapp/manifests/couchdb.pp
@@ -0,0 +1,52 @@
+class site_webapp::couchdb {
+
+ $webapp = hiera('webapp')
+ # haproxy listener on port localhost:4096, see site_webapp::haproxy
+ $couchdb_host = 'localhost'
+ $couchdb_port = '4096'
+ $couchdb_webapp_user = $webapp['couchdb_webapp_user']['username']
+ $couchdb_webapp_password = $webapp['couchdb_webapp_user']['password']
+ $couchdb_admin_user = $webapp['couchdb_admin_user']['username']
+ $couchdb_admin_password = $webapp['couchdb_admin_user']['password']
+
+ include x509::variables
+
+ file {
+ '/srv/leap/webapp/config/couchdb.yml':
+ content => template('site_webapp/couchdb.yml.erb'),
+ owner => 'leap-webapp',
+ group => 'leap-webapp',
+ mode => '0600',
+ require => Vcsrepo['/srv/leap/webapp'];
+
+ # couchdb.admin.yml is a symlink to prevent the vcsrepo resource
+ # from changing its user permissions every time.
+ '/srv/leap/webapp/config/couchdb.admin.yml':
+ ensure => 'link',
+ target => '/etc/leap/couchdb.admin.yml',
+ require => Vcsrepo['/srv/leap/webapp'];
+
+ '/etc/leap/couchdb.admin.yml':
+ content => template('site_webapp/couchdb.admin.yml.erb'),
+ owner => 'root',
+ group => 'root',
+ mode => '0600',
+ require => File['/etc/leap'];
+
+ '/srv/leap/webapp/log':
+ ensure => directory,
+ owner => 'leap-webapp',
+ group => 'leap-webapp',
+ mode => '0755',
+ require => Vcsrepo['/srv/leap/webapp'];
+
+ '/srv/leap/webapp/log/production.log':
+ ensure => present,
+ owner => 'leap-webapp',
+ group => 'leap-webapp',
+ mode => '0666',
+ require => Vcsrepo['/srv/leap/webapp'];
+ }
+
+ include site_stunnel
+}
diff --git a/puppet/modules/site_webapp/manifests/cron.pp b/puppet/modules/site_webapp/manifests/cron.pp
new file mode 100644
index 00000000..70b9da04
--- /dev/null
+++ b/puppet/modules/site_webapp/manifests/cron.pp
@@ -0,0 +1,37 @@
+# setup webapp cronjobs
+class site_webapp::cron {
+
+ # cron tasks that need to be performed to cleanup the database
+ cron {
+ 'rotate_databases':
+ command => 'cd /srv/leap/webapp && bundle exec rake db:rotate',
+ environment => 'RAILS_ENV=production',
+ user => 'root',
+ hour => [0,6,12,18],
+ minute => 0;
+
+ 'delete_tmp_databases':
+ command => 'cd /srv/leap/webapp && bundle exec rake db:deletetmp',
+ environment => 'RAILS_ENV=production',
+ user => 'root',
+ hour => 1,
+ minute => 1;
+
+ # there is no longer a need to remove expired sessions, since the database
+ # will get destroyed.
+ 'remove_expired_sessions':
+ ensure => absent,
+ command => 'cd /srv/leap/webapp && bundle exec rake cleanup:sessions',
+ environment => 'RAILS_ENV=production',
+ user => 'leap-webapp',
+ hour => 2,
+ minute => 30;
+
+ 'remove_expired_tokens':
+ command => 'cd /srv/leap/webapp && bundle exec rake cleanup:tokens',
+ environment => 'RAILS_ENV=production',
+ user => 'leap-webapp',
+ hour => 3,
+ minute => 0;
+ }
+}
diff --git a/puppet/modules/site_webapp/manifests/hidden_service.pp b/puppet/modules/site_webapp/manifests/hidden_service.pp
new file mode 100644
index 00000000..72a2ce95
--- /dev/null
+++ b/puppet/modules/site_webapp/manifests/hidden_service.pp
@@ -0,0 +1,52 @@
+class site_webapp::hidden_service {
+ $tor = hiera('tor')
+ $hidden_service = $tor['hidden_service']
+ $tor_domain = "${hidden_service['address']}.onion"
+
+ include site_apache::common
+ include apache::module::headers
+ include apache::module::alias
+ include apache::module::expires
+ include apache::module::removeip
+
+ include tor::daemon
+ tor::daemon::hidden_service { 'webapp': ports => [ '80 127.0.0.1:80'] }
+
+ file {
+ '/var/lib/tor/webapp/':
+ ensure => directory,
+ owner => 'debian-tor',
+ group => 'debian-tor',
+ mode => '2700';
+
+ '/var/lib/tor/webapp/private_key':
+ ensure => present,
+ source => "/srv/leap/files/nodes/${::hostname}/tor.key",
+ owner => 'debian-tor',
+ group => 'debian-tor',
+ mode => '0600';
+
+ '/var/lib/tor/webapp/hostname':
+ ensure => present,
+ content => $tor_domain,
+ owner => 'debian-tor',
+ group => 'debian-tor',
+ mode => '0600';
+ }
+
+ # it is necessary to zero out the config of the status module
+ # because we are configuring our own version that is unavailable
+ # over the hidden service (see: #7456 and #7776)
+ apache::module { 'status': ensure => present, conf_content => ' ' }
+ # the access_compat module is required to enable Allow directives
+ apache::module { 'access_compat': ensure => present }
+
+ apache::vhost::file {
+ 'hidden_service':
+ content => template('site_apache/vhosts.d/hidden_service.conf.erb');
+ 'server_status':
+ vhost_source => 'modules/site_webapp/server-status.conf';
+ }
+
+ include site_shorewall::tor
+}
diff --git a/puppet/modules/site_webapp/manifests/init.pp b/puppet/modules/site_webapp/manifests/init.pp
new file mode 100644
index 00000000..15925aba
--- /dev/null
+++ b/puppet/modules/site_webapp/manifests/init.pp
@@ -0,0 +1,179 @@
+# configure webapp service
+class site_webapp {
+ tag 'leap_service'
+ $definition_files = hiera('definition_files')
+ $provider = $definition_files['provider']
+ $eip_service = $definition_files['eip_service']
+ $soledad_service = $definition_files['soledad_service']
+ $smtp_service = $definition_files['smtp_service']
+ $node_domain = hiera('domain')
+ $provider_domain = $node_domain['full_suffix']
+ $webapp = hiera('webapp')
+ $api_version = $webapp['api_version']
+ $secret_token = $webapp['secret_token']
+ $tor = hiera('tor', false)
+ $sources = hiera('sources')
+
+ Class['site_config::default'] -> Class['site_webapp']
+
+ include site_config::ruby::dev
+ include site_webapp::apache
+ include site_webapp::couchdb
+ include site_haproxy
+ include site_webapp::cron
+ include site_config::default
+ include site_config::x509::cert
+ include site_config::x509::key
+ include site_config::x509::ca
+ include site_config::x509::client_ca::ca
+ include site_config::x509::client_ca::key
+ include site_nickserver
+
+ # remove leftovers from previous installations on webapp nodes
+ include site_config::remove::webapp
+
+ group { 'leap-webapp':
+ ensure => present,
+ allowdupe => false;
+ }
+
+ user { 'leap-webapp':
+ ensure => present,
+ allowdupe => false,
+ gid => 'leap-webapp',
+ groups => 'ssl-cert',
+ home => '/srv/leap/webapp',
+ require => [ Group['leap-webapp'] ];
+ }
+
+ vcsrepo { '/srv/leap/webapp':
+ ensure => present,
+ force => true,
+ revision => $sources['webapp']['revision'],
+ provider => $sources['webapp']['type'],
+ source => $sources['webapp']['source'],
+ owner => 'leap-webapp',
+ group => 'leap-webapp',
+ require => [ User['leap-webapp'], Group['leap-webapp'] ],
+ notify => Exec['bundler_update']
+ }
+
+ exec { 'bundler_update':
+ cwd => '/srv/leap/webapp',
+ command => '/bin/bash -c "/usr/bin/bundle check --path vendor/bundle || /usr/bin/bundle install --path vendor/bundle --without test development debug"',
+ unless => '/usr/bin/bundle check --path vendor/bundle',
+ user => 'leap-webapp',
+ timeout => 600,
+ require => [
+ Class['bundler::install'],
+ Vcsrepo['/srv/leap/webapp'],
+ Class['site_config::ruby::dev'],
+ Service['shorewall'] ],
+ notify => Service['apache'];
+ }
+
+ #
+ # NOTE: in order to support a webapp that is running on a subpath and not the
+ # root of the domain assets:precompile needs to be run with
+ # RAILS_RELATIVE_URL_ROOT=/application-root
+ #
+
+ exec { 'compile_assets':
+ cwd => '/srv/leap/webapp',
+ command => '/bin/bash -c "RAILS_ENV=production /usr/bin/bundle exec rake assets:precompile"',
+ user => 'leap-webapp',
+ logoutput => on_failure,
+ require => Exec['bundler_update'],
+ notify => Service['apache'];
+ }
+
+ file {
+ '/srv/leap/webapp/config/provider':
+ ensure => directory,
+ require => Vcsrepo['/srv/leap/webapp'],
+ owner => leap-webapp, group => leap-webapp, mode => '0755';
+
+ '/srv/leap/webapp/config/provider/provider.json':
+ content => $provider,
+ require => Vcsrepo['/srv/leap/webapp'],
+ owner => leap-webapp, group => leap-webapp, mode => '0644';
+
+ '/srv/leap/webapp/public/ca.crt':
+ ensure => link,
+ require => Vcsrepo['/srv/leap/webapp'],
+ target => "${x509::variables::local_CAs}/${site_config::params::ca_name}.crt";
+
+ "/srv/leap/webapp/public/${api_version}":
+ ensure => directory,
+ require => Vcsrepo['/srv/leap/webapp'],
+ owner => leap-webapp, group => leap-webapp, mode => '0755';
+
+ "/srv/leap/webapp/public/${api_version}/config/":
+ ensure => directory,
+ require => Vcsrepo['/srv/leap/webapp'],
+ owner => leap-webapp, group => leap-webapp, mode => '0755';
+
+ "/srv/leap/webapp/public/${api_version}/config/eip-service.json":
+ content => $eip_service,
+ require => Vcsrepo['/srv/leap/webapp'],
+ owner => leap-webapp, group => leap-webapp, mode => '0644';
+
+ "/srv/leap/webapp/public/${api_version}/config/soledad-service.json":
+ content => $soledad_service,
+ require => Vcsrepo['/srv/leap/webapp'],
+ owner => leap-webapp, group => leap-webapp, mode => '0644';
+
+ "/srv/leap/webapp/public/${api_version}/config/smtp-service.json":
+ content => $smtp_service,
+ require => Vcsrepo['/srv/leap/webapp'],
+ owner => leap-webapp, group => leap-webapp, mode => '0644';
+ }
+
+ try::file {
+ '/srv/leap/webapp/config/customization':
+ ensure => directory,
+ recurse => true,
+ purge => true,
+ force => true,
+ owner => leap-webapp,
+ group => leap-webapp,
+ mode => 'u=rwX,go=rX',
+ require => Vcsrepo['/srv/leap/webapp'],
+ notify => Exec['compile_assets'],
+ source => $webapp['customization_dir'];
+ }
+
+ git::changes {
+ 'public/favicon.ico':
+ cwd => '/srv/leap/webapp',
+ require => Vcsrepo['/srv/leap/webapp'],
+ user => 'leap-webapp';
+ }
+
+ file {
+ '/srv/leap/webapp/config/config.yml':
+ content => template('site_webapp/config.yml.erb'),
+ owner => leap-webapp,
+ group => leap-webapp,
+ mode => '0600',
+ require => Vcsrepo['/srv/leap/webapp'],
+ notify => Service['apache'];
+ }
+
+ if $tor {
+ $hidden_service = $tor['hidden_service']
+ if $hidden_service['active'] {
+ include site_webapp::hidden_service
+ }
+ }
+
+
+ # needed for the soledad-sync check which is run on the
+ # webapp node
+ include soledad::client
+
+ leap::logfile { 'webapp': }
+
+ include site_shorewall::webapp
+ include site_check_mk::agent::webapp
+}
diff --git a/puppet/modules/site_webapp/templates/config.yml.erb b/puppet/modules/site_webapp/templates/config.yml.erb
new file mode 100644
index 00000000..dd55d3e9
--- /dev/null
+++ b/puppet/modules/site_webapp/templates/config.yml.erb
@@ -0,0 +1,36 @@
+<%
+cert_options = @webapp['client_certificates']
+production = {
+ "admins" => @webapp['admins'],
+ "default_locale" => @webapp['default_locale'],
+ "available_locales" => @webapp['locales'],
+ "domain" => @provider_domain,
+ "force_ssl" => @webapp['secure'],
+ "client_ca_key" => "%s/%s.key" % [scope.lookupvar('x509::variables::keys'), scope.lookupvar('site_config::params::client_ca_name')],
+ "client_ca_cert" => "%s/%s.crt" % [scope.lookupvar('x509::variables::local_CAs'), scope.lookupvar('site_config::params::client_ca_name')],
+ "secret_token" => @secret_token,
+ "client_cert_lifespan" => cert_options['life_span'],
+ "client_cert_bit_size" => cert_options['bit_size'].to_i,
+ "client_cert_hash" => cert_options['digest'],
+ "allow_limited_certs" => @webapp['allow_limited_certs'],
+ "allow_unlimited_certs" => @webapp['allow_unlimited_certs'],
+ "allow_anonymous_certs" => @webapp['allow_anonymous_certs'],
+ "limited_cert_prefix" => cert_options['limited_prefix'],
+ "unlimited_cert_prefix" => cert_options['unlimited_prefix'],
+ "minimum_client_version" => @webapp['client_version']['min'],
+ "default_service_level" => @webapp['default_service_level'],
+ "service_levels" => @webapp['service_levels'],
+ "allow_registration" => @webapp['allow_registration'],
+ "handle_blacklist" => @webapp['forbidden_usernames'],
+ "invite_required" => @webapp['invite_required'],
+ "api_tokens" => @webapp['api_tokens']
+}
+
+if @webapp['engines'] && @webapp['engines'].any?
+ production["engines"] = @webapp['engines']
+end
+-%>
+#
+# This file is generated by puppet. This file inherits from defaults.yml.
+#
+<%= scope.function_sorted_yaml([{"production" => production}]) %>
diff --git a/puppet/modules/site_webapp/templates/couchdb.admin.yml.erb b/puppet/modules/site_webapp/templates/couchdb.admin.yml.erb
new file mode 100644
index 00000000..a0921add
--- /dev/null
+++ b/puppet/modules/site_webapp/templates/couchdb.admin.yml.erb
@@ -0,0 +1,9 @@
+production:
+ prefix: ""
+ protocol: 'http'
+ host: <%= @couchdb_host %>
+ port: <%= @couchdb_port %>
+ auto_update_design_doc: false
+ username: <%= @couchdb_admin_user %>
+ password: <%= @couchdb_admin_password %>
+
diff --git a/puppet/modules/site_webapp/templates/couchdb.yml.erb b/puppet/modules/site_webapp/templates/couchdb.yml.erb
new file mode 100644
index 00000000..2bef0af5
--- /dev/null
+++ b/puppet/modules/site_webapp/templates/couchdb.yml.erb
@@ -0,0 +1,9 @@
+production:
+ prefix: ""
+ protocol: 'http'
+ host: <%= @couchdb_host %>
+ port: <%= @couchdb_port %>
+ auto_update_design_doc: false
+ username: <%= @couchdb_webapp_user %>
+ password: <%= @couchdb_webapp_password %>
+
diff --git a/puppet/modules/soledad/manifests/client.pp b/puppet/modules/soledad/manifests/client.pp
new file mode 100644
index 00000000..e470adeb
--- /dev/null
+++ b/puppet/modules/soledad/manifests/client.pp
@@ -0,0 +1,16 @@
+# setup soledad-client
+# currently needed on webapp node to run the soledad-sync test
+class soledad::client {
+
+ tag 'leap_service'
+ include soledad::common
+
+ package {
+ 'soledad-client':
+ ensure => latest,
+ require => Class['site_apt::leap_repo'];
+ 'python-u1db':
+ ensure => latest;
+ }
+
+}
diff --git a/puppet/modules/soledad/manifests/common.pp b/puppet/modules/soledad/manifests/common.pp
new file mode 100644
index 00000000..8d8339d4
--- /dev/null
+++ b/puppet/modules/soledad/manifests/common.pp
@@ -0,0 +1,8 @@
+# install soledad-common, both needed both soledad-client and soledad-server
+class soledad::common {
+
+ package { 'soledad-common':
+ ensure => latest;
+ }
+
+}
diff --git a/puppet/modules/soledad/manifests/server.pp b/puppet/modules/soledad/manifests/server.pp
new file mode 100644
index 00000000..8674f421
--- /dev/null
+++ b/puppet/modules/soledad/manifests/server.pp
@@ -0,0 +1,104 @@
+# setup soledad-server
+class soledad::server {
+ tag 'leap_service'
+
+ include site_config::default
+ include soledad::common
+
+ $soledad = hiera('soledad')
+ $couchdb_user = $soledad['couchdb_soledad_user']['username']
+ $couchdb_password = $soledad['couchdb_soledad_user']['password']
+ $couchdb_leap_mx_user = $soledad['couchdb_leap_mx_user']['username']
+
+ $couchdb_host = 'localhost'
+ $couchdb_port = '5984'
+
+ $soledad_port = $soledad['port']
+
+ $sources = hiera('sources')
+
+ include site_config::x509::cert
+ include site_config::x509::key
+ include site_config::x509::ca
+
+ #
+ # SOLEDAD CONFIG
+ #
+
+ file {
+ '/etc/soledad':
+ ensure => directory,
+ owner => 'root',
+ group => 'root',
+ mode => '0755';
+ '/etc/soledad/soledad-server.conf':
+ content => template('soledad/soledad-server.conf.erb'),
+ owner => 'soledad',
+ group => 'soledad',
+ mode => '0640',
+ notify => Service['soledad-server'],
+ require => [ User['soledad'], Group['soledad'] ];
+ '/srv/leap/soledad':
+ ensure => directory,
+ owner => 'soledad',
+ group => 'soledad',
+ require => [ User['soledad'], Group['soledad'] ];
+ '/var/lib/soledad':
+ ensure => directory,
+ owner => 'soledad',
+ group => 'soledad',
+ require => [ User['soledad'], Group['soledad'] ];
+ }
+
+ package { $sources['soledad']['package']:
+ ensure => $sources['soledad']['revision'],
+ require => Class['site_apt::leap_repo'];
+ }
+
+ file { '/etc/default/soledad':
+ content => template('soledad/default-soledad.erb'),
+ owner => 'soledad',
+ group => 'soledad',
+ mode => '0600',
+ notify => Service['soledad-server'],
+ require => [ User['soledad'], Group['soledad'] ];
+ }
+
+ service { 'soledad-server':
+ ensure => running,
+ enable => true,
+ hasstatus => true,
+ hasrestart => true,
+ require => [ User['soledad'], Group['soledad'] ],
+ subscribe => [
+ Package['soledad-server'],
+ Class['Site_config::X509::Key'],
+ Class['Site_config::X509::Cert'],
+ Class['Site_config::X509::Ca'] ];
+ }
+
+ include site_shorewall::soledad
+ include site_check_mk::agent::soledad
+
+ # set up users, group and directories for soledad-server
+ # although the soledad users are already created by the
+ # soledad-server package
+ group { 'soledad':
+ ensure => present,
+ system => true,
+ }
+ user {
+ 'soledad':
+ ensure => present,
+ system => true,
+ gid => 'soledad',
+ home => '/srv/leap/soledad',
+ require => Group['soledad'];
+ 'soledad-admin':
+ ensure => present,
+ system => true,
+ gid => 'soledad',
+ home => '/srv/leap/soledad',
+ require => Group['soledad'];
+ }
+}
diff --git a/puppet/modules/soledad/templates/default-soledad.erb b/puppet/modules/soledad/templates/default-soledad.erb
new file mode 100644
index 00000000..32504e38
--- /dev/null
+++ b/puppet/modules/soledad/templates/default-soledad.erb
@@ -0,0 +1,5 @@
+# this file is managed by puppet
+START=yes
+CERT_PATH=<%= scope.lookupvar('x509::variables::certs') %>/<%= scope.lookupvar('site_config::params::cert_name') %>.crt
+PRIVKEY_PATH=<%= scope.lookupvar('x509::variables::keys') %>/<%= scope.lookupvar('site_config::params::cert_name') %>.key
+HTTPS_PORT=<%=@soledad_port%>
diff --git a/puppet/modules/soledad/templates/soledad-server.conf.erb b/puppet/modules/soledad/templates/soledad-server.conf.erb
new file mode 100644
index 00000000..1c6a0d19
--- /dev/null
+++ b/puppet/modules/soledad/templates/soledad-server.conf.erb
@@ -0,0 +1,12 @@
+[soledad-server]
+couch_url = http://<%= @couchdb_user %>:<%= @couchdb_password %>@<%= @couchdb_host %>:<%= @couchdb_port %>
+create_cmd = sudo -u soledad-admin /usr/bin/create-user-db
+admin_netrc = /etc/couchdb/couchdb-soledad-admin.netrc
+
+[database-security]
+members = <%= @couchdb_user %>, <%= @couchdb_leap_mx_user %>
+# not needed, but for documentation:
+# members_roles = replication
+# admins = admin
+# admins_roles = replication
+
diff --git a/puppet/modules/sshd/.fixtures.yml b/puppet/modules/sshd/.fixtures.yml
new file mode 100644
index 00000000..42598a65
--- /dev/null
+++ b/puppet/modules/sshd/.fixtures.yml
@@ -0,0 +1,3 @@
+fixtures:
+ symlinks:
+ sshd: "#{source_dir}" \ No newline at end of file
diff --git a/puppet/modules/sshd/.gitignore b/puppet/modules/sshd/.gitignore
new file mode 100644
index 00000000..5ebb01fb
--- /dev/null
+++ b/puppet/modules/sshd/.gitignore
@@ -0,0 +1,4 @@
+.librarian/*
+.tmp/*
+*.log
+spec/fixtures/*
diff --git a/puppet/modules/sshd/.rspec b/puppet/modules/sshd/.rspec
new file mode 100644
index 00000000..f07c903a
--- /dev/null
+++ b/puppet/modules/sshd/.rspec
@@ -0,0 +1,4 @@
+--format documentation
+--color
+--pattern "spec/*/*_spec.rb"
+#--backtrace
diff --git a/puppet/modules/sshd/.travis.yml b/puppet/modules/sshd/.travis.yml
new file mode 100644
index 00000000..7bd2a2bc
--- /dev/null
+++ b/puppet/modules/sshd/.travis.yml
@@ -0,0 +1,27 @@
+before_install:
+ - gem update --system 2.1.11
+ - gem --version
+rvm:
+ - 1.8.7
+ - 1.9.3
+ - 2.0.0
+script: 'bundle exec rake spec'
+env:
+ - PUPPET_VERSION="~> 2.7.0"
+ - PUPPET_VERSION="~> 3.0.0"
+ - PUPPET_VERSION="~> 3.1.0"
+ - PUPPET_VERSION="~> 3.2.0"
+ - PUPPET_VERSION="~> 3.3.0"
+ - PUPPET_VERSION="~> 3.4.0"
+matrix:
+ exclude:
+ # No support for Ruby 1.9 before Puppet 2.7
+ - rvm: 1.9.3
+ env: PUPPET_VERSION=2.6.0
+ # No support for Ruby 2.0 before Puppet 3.2
+ - rvm: 2.0.0
+ env: PUPPET_VERSION="~> 2.7.0"
+ - rvm: 2.0.0
+ env: PUPPET_VERSION="~> 3.0.0"
+ - rvm: 2.0.0
+ env: PUPPET_VERSION="~> 3.1.0"
diff --git a/puppet/modules/sshd/Gemfile b/puppet/modules/sshd/Gemfile
new file mode 100644
index 00000000..ef74f90e
--- /dev/null
+++ b/puppet/modules/sshd/Gemfile
@@ -0,0 +1,14 @@
+source 'https://rubygems.org'
+
+group :development, :test do
+ gem 'puppet', '>= 2.7.0'
+ gem 'puppet-lint', '>=0.3.2'
+ gem 'puppetlabs_spec_helper', '>=0.2.0'
+ gem 'rake', '>=0.9.2.2'
+ gem 'librarian-puppet', '>=0.9.10'
+ gem 'rspec-system-puppet', :require => false
+ gem 'serverspec', :require => false
+ gem 'rspec-system-serverspec', :require => false
+ gem 'rspec-hiera-puppet'
+ gem 'rspec-puppet', :git => 'https://github.com/rodjek/rspec-puppet.git'
+end \ No newline at end of file
diff --git a/puppet/modules/sshd/Gemfile.lock b/puppet/modules/sshd/Gemfile.lock
new file mode 100644
index 00000000..0c2c58e9
--- /dev/null
+++ b/puppet/modules/sshd/Gemfile.lock
@@ -0,0 +1,116 @@
+GIT
+ remote: https://github.com/rodjek/rspec-puppet.git
+ revision: c44381a240ec420d4ffda7bffc55ee4d9c08d682
+ specs:
+ rspec-puppet (1.0.1)
+ rspec
+
+GEM
+ remote: https://rubygems.org/
+ specs:
+ builder (3.2.2)
+ diff-lcs (1.2.5)
+ excon (0.31.0)
+ facter (1.7.4)
+ fog (1.19.0)
+ builder
+ excon (~> 0.31.0)
+ formatador (~> 0.2.0)
+ mime-types
+ multi_json (~> 1.0)
+ net-scp (~> 1.1)
+ net-ssh (>= 2.1.3)
+ nokogiri (~> 1.5)
+ ruby-hmac
+ formatador (0.2.4)
+ hiera (1.3.1)
+ json_pure
+ hiera-puppet (1.0.0)
+ hiera (~> 1.0)
+ highline (1.6.20)
+ json (1.8.1)
+ json_pure (1.8.1)
+ kwalify (0.7.2)
+ librarian-puppet (0.9.10)
+ json
+ thor (~> 0.15)
+ metaclass (0.0.2)
+ mime-types (1.25.1)
+ mocha (1.0.0)
+ metaclass (~> 0.0.1)
+ multi_json (1.8.4)
+ net-scp (1.1.2)
+ net-ssh (>= 2.6.5)
+ net-ssh (2.7.0)
+ nokogiri (1.5.11)
+ puppet (3.4.2)
+ facter (~> 1.6)
+ hiera (~> 1.0)
+ rgen (~> 0.6.5)
+ puppet-lint (0.3.2)
+ puppetlabs_spec_helper (0.4.1)
+ mocha (>= 0.10.5)
+ rake
+ rspec (>= 2.9.0)
+ rspec-puppet (>= 0.1.1)
+ rake (10.1.1)
+ rbvmomi (1.8.1)
+ builder
+ nokogiri (>= 1.4.1)
+ trollop
+ rgen (0.6.6)
+ rspec (2.14.1)
+ rspec-core (~> 2.14.0)
+ rspec-expectations (~> 2.14.0)
+ rspec-mocks (~> 2.14.0)
+ rspec-core (2.14.7)
+ rspec-expectations (2.14.4)
+ diff-lcs (>= 1.1.3, < 2.0)
+ rspec-hiera-puppet (1.0.0)
+ hiera (>= 1.0)
+ hiera-puppet (>= 1.0)
+ puppet (>= 3.0)
+ rspec
+ rspec-puppet
+ rspec-mocks (2.14.4)
+ rspec-system (2.8.0)
+ fog (~> 1.18)
+ kwalify (~> 0.7.2)
+ mime-types (~> 1.16)
+ net-scp (~> 1.1)
+ net-ssh (~> 2.7)
+ nokogiri (~> 1.5.10)
+ rbvmomi (~> 1.6)
+ rspec (~> 2.14)
+ systemu (~> 2.5)
+ rspec-system-puppet (2.2.1)
+ rspec-system (~> 2.0)
+ rspec-system-serverspec (2.0.1)
+ rspec-system (~> 2.0)
+ serverspec (~> 0.0)
+ specinfra (~> 0.0)
+ ruby-hmac (0.4.0)
+ serverspec (0.14.4)
+ highline
+ net-ssh
+ rspec (>= 2.13.0)
+ specinfra (>= 0.1.0)
+ specinfra (0.4.1)
+ systemu (2.6.0)
+ thor (0.18.1)
+ trollop (2.0)
+
+PLATFORMS
+ ruby
+
+DEPENDENCIES
+ librarian-puppet (>= 0.9.10)
+ puppet (>= 2.7.0)
+ puppet-lint (>= 0.3.2)
+ puppetlabs_spec_helper (>= 0.2.0)
+ rake (>= 0.9.2.2)
+ rspec-hiera-puppet
+ rspec-puppet!
+ rspec-system-puppet
+ rspec-system-serverspec
+ serverspec
diff --git a/puppet/modules/sshd/LICENSE b/puppet/modules/sshd/LICENSE
new file mode 100644
index 00000000..94a9ed02
--- /dev/null
+++ b/puppet/modules/sshd/LICENSE
@@ -0,0 +1,674 @@
+ GNU GENERAL PUBLIC LICENSE
+ Version 3, 29 June 2007
+
+ Copyright (C) 2007 Free Software Foundation, Inc. <http://fsf.org/>
+ Everyone is permitted to copy and distribute verbatim copies
+ of this license document, but changing it is not allowed.
+
+ Preamble
+
+ The GNU General Public License is a free, copyleft license for
+software and other kinds of works.
+
+ The licenses for most software and other practical works are designed
+to take away your freedom to share and change the works. By contrast,
+the GNU General Public License is intended to guarantee your freedom to
+share and change all versions of a program--to make sure it remains free
+software for all its users. We, the Free Software Foundation, use the
+GNU General Public License for most of our software; it applies also to
+any other work released this way by its authors. You can apply it to
+your programs, too.
+
+ When we speak of free software, we are referring to freedom, not
+price. Our General Public Licenses are designed to make sure that you
+have the freedom to distribute copies of free software (and charge for
+them if you wish), that you receive source code or can get it if you
+want it, that you can change the software or use pieces of it in new
+free programs, and that you know you can do these things.
+
+ To protect your rights, we need to prevent others from denying you
+these rights or asking you to surrender the rights. Therefore, you have
+certain responsibilities if you distribute copies of the software, or if
+you modify it: responsibilities to respect the freedom of others.
+
+ For example, if you distribute copies of such a program, whether
+gratis or for a fee, you must pass on to the recipients the same
+freedoms that you received. You must make sure that they, too, receive
+or can get the source code. And you must show them these terms so they
+know their rights.
+
+ Developers that use the GNU GPL protect your rights with two steps:
+(1) assert copyright on the software, and (2) offer you this License
+giving you legal permission to copy, distribute and/or modify it.
+
+ For the developers' and authors' protection, the GPL clearly explains
+that there is no warranty for this free software. For both users' and
+authors' sake, the GPL requires that modified versions be marked as
+changed, so that their problems will not be attributed erroneously to
+authors of previous versions.
+
+ Some devices are designed to deny users access to install or run
+modified versions of the software inside them, although the manufacturer
+can do so. This is fundamentally incompatible with the aim of
+protecting users' freedom to change the software. The systematic
+pattern of such abuse occurs in the area of products for individuals to
+use, which is precisely where it is most unacceptable. Therefore, we
+have designed this version of the GPL to prohibit the practice for those
+products. If such problems arise substantially in other domains, we
+stand ready to extend this provision to those domains in future versions
+of the GPL, as needed to protect the freedom of users.
+
+ Finally, every program is threatened constantly by software patents.
+States should not allow patents to restrict development and use of
+software on general-purpose computers, but in those that do, we wish to
+avoid the special danger that patents applied to a free program could
+make it effectively proprietary. To prevent this, the GPL assures that
+patents cannot be used to render the program non-free.
+
+ The precise terms and conditions for copying, distribution and
+modification follow.
+
+ TERMS AND CONDITIONS
+
+ 0. Definitions.
+
+ "This License" refers to version 3 of the GNU General Public License.
+
+ "Copyright" also means copyright-like laws that apply to other kinds of
+works, such as semiconductor masks.
+
+ "The Program" refers to any copyrightable work licensed under this
+License. Each licensee is addressed as "you". "Licensees" and
+"recipients" may be individuals or organizations.
+
+ To "modify" a work means to copy from or adapt all or part of the work
+in a fashion requiring copyright permission, other than the making of an
+exact copy. The resulting work is called a "modified version" of the
+earlier work or a work "based on" the earlier work.
+
+ A "covered work" means either the unmodified Program or a work based
+on the Program.
+
+ To "propagate" a work means to do anything with it that, without
+permission, would make you directly or secondarily liable for
+infringement under applicable copyright law, except executing it on a
+computer or modifying a private copy. Propagation includes copying,
+distribution (with or without modification), making available to the
+public, and in some countries other activities as well.
+
+ To "convey" a work means any kind of propagation that enables other
+parties to make or receive copies. Mere interaction with a user through
+a computer network, with no transfer of a copy, is not conveying.
+
+ An interactive user interface displays "Appropriate Legal Notices"
+to the extent that it includes a convenient and prominently visible
+feature that (1) displays an appropriate copyright notice, and (2)
+tells the user that there is no warranty for the work (except to the
+extent that warranties are provided), that licensees may convey the
+work under this License, and how to view a copy of this License. If
+the interface presents a list of user commands or options, such as a
+menu, a prominent item in the list meets this criterion.
+
+ 1. Source Code.
+
+ The "source code" for a work means the preferred form of the work
+for making modifications to it. "Object code" means any non-source
+form of a work.
+
+ A "Standard Interface" means an interface that either is an official
+standard defined by a recognized standards body, or, in the case of
+interfaces specified for a particular programming language, one that
+is widely used among developers working in that language.
+
+ The "System Libraries" of an executable work include anything, other
+than the work as a whole, that (a) is included in the normal form of
+packaging a Major Component, but which is not part of that Major
+Component, and (b) serves only to enable use of the work with that
+Major Component, or to implement a Standard Interface for which an
+implementation is available to the public in source code form. A
+"Major Component", in this context, means a major essential component
+(kernel, window system, and so on) of the specific operating system
+(if any) on which the executable work runs, or a compiler used to
+produce the work, or an object code interpreter used to run it.
+
+ The "Corresponding Source" for a work in object code form means all
+the source code needed to generate, install, and (for an executable
+work) run the object code and to modify the work, including scripts to
+control those activities. However, it does not include the work's
+System Libraries, or general-purpose tools or generally available free
+programs which are used unmodified in performing those activities but
+which are not part of the work. For example, Corresponding Source
+includes interface definition files associated with source files for
+the work, and the source code for shared libraries and dynamically
+linked subprograms that the work is specifically designed to require,
+such as by intimate data communication or control flow between those
+subprograms and other parts of the work.
+
+ The Corresponding Source need not include anything that users
+can regenerate automatically from other parts of the Corresponding
+Source.
+
+ The Corresponding Source for a work in source code form is that
+same work.
+
+ 2. Basic Permissions.
+
+ All rights granted under this License are granted for the term of
+copyright on the Program, and are irrevocable provided the stated
+conditions are met. This License explicitly affirms your unlimited
+permission to run the unmodified Program. The output from running a
+covered work is covered by this License only if the output, given its
+content, constitutes a covered work. This License acknowledges your
+rights of fair use or other equivalent, as provided by copyright law.
+
+ You may make, run and propagate covered works that you do not
+convey, without conditions so long as your license otherwise remains
+in force. You may convey covered works to others for the sole purpose
+of having them make modifications exclusively for you, or provide you
+with facilities for running those works, provided that you comply with
+the terms of this License in conveying all material for which you do
+not control copyright. Those thus making or running the covered works
+for you must do so exclusively on your behalf, under your direction
+and control, on terms that prohibit them from making any copies of
+your copyrighted material outside their relationship with you.
+
+ Conveying under any other circumstances is permitted solely under
+the conditions stated below. Sublicensing is not allowed; section 10
+makes it unnecessary.
+
+ 3. Protecting Users' Legal Rights From Anti-Circumvention Law.
+
+ No covered work shall be deemed part of an effective technological
+measure under any applicable law fulfilling obligations under article
+11 of the WIPO copyright treaty adopted on 20 December 1996, or
+similar laws prohibiting or restricting circumvention of such
+measures.
+
+ When you convey a covered work, you waive any legal power to forbid
+circumvention of technological measures to the extent such circumvention
+is effected by exercising rights under this License with respect to
+the covered work, and you disclaim any intention to limit operation or
+modification of the work as a means of enforcing, against the work's
+users, your or third parties' legal rights to forbid circumvention of
+technological measures.
+
+ 4. Conveying Verbatim Copies.
+
+ You may convey verbatim copies of the Program's source code as you
+receive it, in any medium, provided that you conspicuously and
+appropriately publish on each copy an appropriate copyright notice;
+keep intact all notices stating that this License and any
+non-permissive terms added in accord with section 7 apply to the code;
+keep intact all notices of the absence of any warranty; and give all
+recipients a copy of this License along with the Program.
+
+ You may charge any price or no price for each copy that you convey,
+and you may offer support or warranty protection for a fee.
+
+ 5. Conveying Modified Source Versions.
+
+ You may convey a work based on the Program, or the modifications to
+produce it from the Program, in the form of source code under the
+terms of section 4, provided that you also meet all of these conditions:
+
+ a) The work must carry prominent notices stating that you modified
+ it, and giving a relevant date.
+
+ b) The work must carry prominent notices stating that it is
+ released under this License and any conditions added under section
+ 7. This requirement modifies the requirement in section 4 to
+ "keep intact all notices".
+
+ c) You must license the entire work, as a whole, under this
+ License to anyone who comes into possession of a copy. This
+ License will therefore apply, along with any applicable section 7
+ additional terms, to the whole of the work, and all its parts,
+ regardless of how they are packaged. This License gives no
+ permission to license the work in any other way, but it does not
+ invalidate such permission if you have separately received it.
+
+ d) If the work has interactive user interfaces, each must display
+ Appropriate Legal Notices; however, if the Program has interactive
+ interfaces that do not display Appropriate Legal Notices, your
+ work need not make them do so.
+
+ A compilation of a covered work with other separate and independent
+works, which are not by their nature extensions of the covered work,
+and which are not combined with it such as to form a larger program,
+in or on a volume of a storage or distribution medium, is called an
+"aggregate" if the compilation and its resulting copyright are not
+used to limit the access or legal rights of the compilation's users
+beyond what the individual works permit. Inclusion of a covered work
+in an aggregate does not cause this License to apply to the other
+parts of the aggregate.
+
+ 6. Conveying Non-Source Forms.
+
+ You may convey a covered work in object code form under the terms
+of sections 4 and 5, provided that you also convey the
+machine-readable Corresponding Source under the terms of this License,
+in one of these ways:
+
+ a) Convey the object code in, or embodied in, a physical product
+ (including a physical distribution medium), accompanied by the
+ Corresponding Source fixed on a durable physical medium
+ customarily used for software interchange.
+
+ b) Convey the object code in, or embodied in, a physical product
+ (including a physical distribution medium), accompanied by a
+ written offer, valid for at least three years and valid for as
+ long as you offer spare parts or customer support for that product
+ model, to give anyone who possesses the object code either (1) a
+ copy of the Corresponding Source for all the software in the
+ product that is covered by this License, on a durable physical
+ medium customarily used for software interchange, for a price no
+ more than your reasonable cost of physically performing this
+ conveying of source, or (2) access to copy the
+ Corresponding Source from a network server at no charge.
+
+ c) Convey individual copies of the object code with a copy of the
+ written offer to provide the Corresponding Source. This
+ alternative is allowed only occasionally and noncommercially, and
+ only if you received the object code with such an offer, in accord
+ with subsection 6b.
+
+ d) Convey the object code by offering access from a designated
+ place (gratis or for a charge), and offer equivalent access to the
+ Corresponding Source in the same way through the same place at no
+ further charge. You need not require recipients to copy the
+ Corresponding Source along with the object code. If the place to
+ copy the object code is a network server, the Corresponding Source
+ may be on a different server (operated by you or a third party)
+ that supports equivalent copying facilities, provided you maintain
+ clear directions next to the object code saying where to find the
+ Corresponding Source. Regardless of what server hosts the
+ Corresponding Source, you remain obligated to ensure that it is
+ available for as long as needed to satisfy these requirements.
+
+ e) Convey the object code using peer-to-peer transmission, provided
+ you inform other peers where the object code and Corresponding
+ Source of the work are being offered to the general public at no
+ charge under subsection 6d.
+
+ A separable portion of the object code, whose source code is excluded
+from the Corresponding Source as a System Library, need not be
+included in conveying the object code work.
+
+ A "User Product" is either (1) a "consumer product", which means any
+tangible personal property which is normally used for personal, family,
+or household purposes, or (2) anything designed or sold for incorporation
+into a dwelling. In determining whether a product is a consumer product,
+doubtful cases shall be resolved in favor of coverage. For a particular
+product received by a particular user, "normally used" refers to a
+typical or common use of that class of product, regardless of the status
+of the particular user or of the way in which the particular user
+actually uses, or expects or is expected to use, the product. A product
+is a consumer product regardless of whether the product has substantial
+commercial, industrial or non-consumer uses, unless such uses represent
+the only significant mode of use of the product.
+
+ "Installation Information" for a User Product means any methods,
+procedures, authorization keys, or other information required to install
+and execute modified versions of a covered work in that User Product from
+a modified version of its Corresponding Source. The information must
+suffice to ensure that the continued functioning of the modified object
+code is in no case prevented or interfered with solely because
+modification has been made.
+
+ If you convey an object code work under this section in, or with, or
+specifically for use in, a User Product, and the conveying occurs as
+part of a transaction in which the right of possession and use of the
+User Product is transferred to the recipient in perpetuity or for a
+fixed term (regardless of how the transaction is characterized), the
+Corresponding Source conveyed under this section must be accompanied
+by the Installation Information. But this requirement does not apply
+if neither you nor any third party retains the ability to install
+modified object code on the User Product (for example, the work has
+been installed in ROM).
+
+ The requirement to provide Installation Information does not include a
+requirement to continue to provide support service, warranty, or updates
+for a work that has been modified or installed by the recipient, or for
+the User Product in which it has been modified or installed. Access to a
+network may be denied when the modification itself materially and
+adversely affects the operation of the network or violates the rules and
+protocols for communication across the network.
+
+ Corresponding Source conveyed, and Installation Information provided,
+in accord with this section must be in a format that is publicly
+documented (and with an implementation available to the public in
+source code form), and must require no special password or key for
+unpacking, reading or copying.
+
+ 7. Additional Terms.
+
+ "Additional permissions" are terms that supplement the terms of this
+License by making exceptions from one or more of its conditions.
+Additional permissions that are applicable to the entire Program shall
+be treated as though they were included in this License, to the extent
+that they are valid under applicable law. If additional permissions
+apply only to part of the Program, that part may be used separately
+under those permissions, but the entire Program remains governed by
+this License without regard to the additional permissions.
+
+ When you convey a copy of a covered work, you may at your option
+remove any additional permissions from that copy, or from any part of
+it. (Additional permissions may be written to require their own
+removal in certain cases when you modify the work.) You may place
+additional permissions on material, added by you to a covered work,
+for which you have or can give appropriate copyright permission.
+
+ Notwithstanding any other provision of this License, for material you
+add to a covered work, you may (if authorized by the copyright holders of
+that material) supplement the terms of this License with terms:
+
+ a) Disclaiming warranty or limiting liability differently from the
+ terms of sections 15 and 16 of this License; or
+
+ b) Requiring preservation of specified reasonable legal notices or
+ author attributions in that material or in the Appropriate Legal
+ Notices displayed by works containing it; or
+
+ c) Prohibiting misrepresentation of the origin of that material, or
+ requiring that modified versions of such material be marked in
+ reasonable ways as different from the original version; or
+
+ d) Limiting the use for publicity purposes of names of licensors or
+ authors of the material; or
+
+ e) Declining to grant rights under trademark law for use of some
+ trade names, trademarks, or service marks; or
+
+ f) Requiring indemnification of licensors and authors of that
+ material by anyone who conveys the material (or modified versions of
+ it) with contractual assumptions of liability to the recipient, for
+ any liability that these contractual assumptions directly impose on
+ those licensors and authors.
+
+ All other non-permissive additional terms are considered "further
+restrictions" within the meaning of section 10. If the Program as you
+received it, or any part of it, contains a notice stating that it is
+governed by this License along with a term that is a further
+restriction, you may remove that term. If a license document contains
+a further restriction but permits relicensing or conveying under this
+License, you may add to a covered work material governed by the terms
+of that license document, provided that the further restriction does
+not survive such relicensing or conveying.
+
+ If you add terms to a covered work in accord with this section, you
+must place, in the relevant source files, a statement of the
+additional terms that apply to those files, or a notice indicating
+where to find the applicable terms.
+
+ Additional terms, permissive or non-permissive, may be stated in the
+form of a separately written license, or stated as exceptions;
+the above requirements apply either way.
+
+ 8. Termination.
+
+ You may not propagate or modify a covered work except as expressly
+provided under this License. Any attempt otherwise to propagate or
+modify it is void, and will automatically terminate your rights under
+this License (including any patent licenses granted under the third
+paragraph of section 11).
+
+ However, if you cease all violation of this License, then your
+license from a particular copyright holder is reinstated (a)
+provisionally, unless and until the copyright holder explicitly and
+finally terminates your license, and (b) permanently, if the copyright
+holder fails to notify you of the violation by some reasonable means
+prior to 60 days after the cessation.
+
+ Moreover, your license from a particular copyright holder is
+reinstated permanently if the copyright holder notifies you of the
+violation by some reasonable means, this is the first time you have
+received notice of violation of this License (for any work) from that
+copyright holder, and you cure the violation prior to 30 days after
+your receipt of the notice.
+
+ Termination of your rights under this section does not terminate the
+licenses of parties who have received copies or rights from you under
+this License. If your rights have been terminated and not permanently
+reinstated, you do not qualify to receive new licenses for the same
+material under section 10.
+
+ 9. Acceptance Not Required for Having Copies.
+
+ You are not required to accept this License in order to receive or
+run a copy of the Program. Ancillary propagation of a covered work
+occurring solely as a consequence of using peer-to-peer transmission
+to receive a copy likewise does not require acceptance. However,
+nothing other than this License grants you permission to propagate or
+modify any covered work. These actions infringe copyright if you do
+not accept this License. Therefore, by modifying or propagating a
+covered work, you indicate your acceptance of this License to do so.
+
+ 10. Automatic Licensing of Downstream Recipients.
+
+ Each time you convey a covered work, the recipient automatically
+receives a license from the original licensors, to run, modify and
+propagate that work, subject to this License. You are not responsible
+for enforcing compliance by third parties with this License.
+
+ An "entity transaction" is a transaction transferring control of an
+organization, or substantially all assets of one, or subdividing an
+organization, or merging organizations. If propagation of a covered
+work results from an entity transaction, each party to that
+transaction who receives a copy of the work also receives whatever
+licenses to the work the party's predecessor in interest had or could
+give under the previous paragraph, plus a right to possession of the
+Corresponding Source of the work from the predecessor in interest, if
+the predecessor has it or can get it with reasonable efforts.
+
+ You may not impose any further restrictions on the exercise of the
+rights granted or affirmed under this License. For example, you may
+not impose a license fee, royalty, or other charge for exercise of
+rights granted under this License, and you may not initiate litigation
+(including a cross-claim or counterclaim in a lawsuit) alleging that
+any patent claim is infringed by making, using, selling, offering for
+sale, or importing the Program or any portion of it.
+
+ 11. Patents.
+
+ A "contributor" is a copyright holder who authorizes use under this
+License of the Program or a work on which the Program is based. The
+work thus licensed is called the contributor's "contributor version".
+
+ A contributor's "essential patent claims" are all patent claims
+owned or controlled by the contributor, whether already acquired or
+hereafter acquired, that would be infringed by some manner, permitted
+by this License, of making, using, or selling its contributor version,
+but do not include claims that would be infringed only as a
+consequence of further modification of the contributor version. For
+purposes of this definition, "control" includes the right to grant
+patent sublicenses in a manner consistent with the requirements of
+this License.
+
+ Each contributor grants you a non-exclusive, worldwide, royalty-free
+patent license under the contributor's essential patent claims, to
+make, use, sell, offer for sale, import and otherwise run, modify and
+propagate the contents of its contributor version.
+
+ In the following three paragraphs, a "patent license" is any express
+agreement or commitment, however denominated, not to enforce a patent
+(such as an express permission to practice a patent or covenant not to
+sue for patent infringement). To "grant" such a patent license to a
+party means to make such an agreement or commitment not to enforce a
+patent against the party.
+
+ If you convey a covered work, knowingly relying on a patent license,
+and the Corresponding Source of the work is not available for anyone
+to copy, free of charge and under the terms of this License, through a
+publicly available network server or other readily accessible means,
+then you must either (1) cause the Corresponding Source to be so
+available, or (2) arrange to deprive yourself of the benefit of the
+patent license for this particular work, or (3) arrange, in a manner
+consistent with the requirements of this License, to extend the patent
+license to downstream recipients. "Knowingly relying" means you have
+actual knowledge that, but for the patent license, your conveying the
+covered work in a country, or your recipient's use of the covered work
+in a country, would infringe one or more identifiable patents in that
+country that you have reason to believe are valid.
+
+ If, pursuant to or in connection with a single transaction or
+arrangement, you convey, or propagate by procuring conveyance of, a
+covered work, and grant a patent license to some of the parties
+receiving the covered work authorizing them to use, propagate, modify
+or convey a specific copy of the covered work, then the patent license
+you grant is automatically extended to all recipients of the covered
+work and works based on it.
+
+ A patent license is "discriminatory" if it does not include within
+the scope of its coverage, prohibits the exercise of, or is
+conditioned on the non-exercise of one or more of the rights that are
+specifically granted under this License. You may not convey a covered
+work if you are a party to an arrangement with a third party that is
+in the business of distributing software, under which you make payment
+to the third party based on the extent of your activity of conveying
+the work, and under which the third party grants, to any of the
+parties who would receive the covered work from you, a discriminatory
+patent license (a) in connection with copies of the covered work
+conveyed by you (or copies made from those copies), or (b) primarily
+for and in connection with specific products or compilations that
+contain the covered work, unless you entered into that arrangement,
+or that patent license was granted, prior to 28 March 2007.
+
+ Nothing in this License shall be construed as excluding or limiting
+any implied license or other defenses to infringement that may
+otherwise be available to you under applicable patent law.
+
+ 12. No Surrender of Others' Freedom.
+
+ If conditions are imposed on you (whether by court order, agreement or
+otherwise) that contradict the conditions of this License, they do not
+excuse you from the conditions of this License. If you cannot convey a
+covered work so as to satisfy simultaneously your obligations under this
+License and any other pertinent obligations, then as a consequence you may
+not convey it at all. For example, if you agree to terms that obligate you
+to collect a royalty for further conveying from those to whom you convey
+the Program, the only way you could satisfy both those terms and this
+License would be to refrain entirely from conveying the Program.
+
+ 13. Use with the GNU Affero General Public License.
+
+ Notwithstanding any other provision of this License, you have
+permission to link or combine any covered work with a work licensed
+under version 3 of the GNU Affero General Public License into a single
+combined work, and to convey the resulting work. The terms of this
+License will continue to apply to the part which is the covered work,
+but the special requirements of the GNU Affero General Public License,
+section 13, concerning interaction through a network will apply to the
+combination as such.
+
+ 14. Revised Versions of this License.
+
+ The Free Software Foundation may publish revised and/or new versions of
+the GNU General Public License from time to time. Such new versions will
+be similar in spirit to the present version, but may differ in detail to
+address new problems or concerns.
+
+ Each version is given a distinguishing version number. If the
+Program specifies that a certain numbered version of the GNU General
+Public License "or any later version" applies to it, you have the
+option of following the terms and conditions either of that numbered
+version or of any later version published by the Free Software
+Foundation. If the Program does not specify a version number of the
+GNU General Public License, you may choose any version ever published
+by the Free Software Foundation.
+
+ If the Program specifies that a proxy can decide which future
+versions of the GNU General Public License can be used, that proxy's
+public statement of acceptance of a version permanently authorizes you
+to choose that version for the Program.
+
+ Later license versions may give you additional or different
+permissions. However, no additional obligations are imposed on any
+author or copyright holder as a result of your choosing to follow a
+later version.
+
+ 15. Disclaimer of Warranty.
+
+ THERE IS NO WARRANTY FOR THE PROGRAM, TO THE EXTENT PERMITTED BY
+APPLICABLE LAW. EXCEPT WHEN OTHERWISE STATED IN WRITING THE COPYRIGHT
+HOLDERS AND/OR OTHER PARTIES PROVIDE THE PROGRAM "AS IS" WITHOUT WARRANTY
+OF ANY KIND, EITHER EXPRESSED OR IMPLIED, INCLUDING, BUT NOT LIMITED TO,
+THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR
+PURPOSE. THE ENTIRE RISK AS TO THE QUALITY AND PERFORMANCE OF THE PROGRAM
+IS WITH YOU. SHOULD THE PROGRAM PROVE DEFECTIVE, YOU ASSUME THE COST OF
+ALL NECESSARY SERVICING, REPAIR OR CORRECTION.
+
+ 16. Limitation of Liability.
+
+ IN NO EVENT UNLESS REQUIRED BY APPLICABLE LAW OR AGREED TO IN WRITING
+WILL ANY COPYRIGHT HOLDER, OR ANY OTHER PARTY WHO MODIFIES AND/OR CONVEYS
+THE PROGRAM AS PERMITTED ABOVE, BE LIABLE TO YOU FOR DAMAGES, INCLUDING ANY
+GENERAL, SPECIAL, INCIDENTAL OR CONSEQUENTIAL DAMAGES ARISING OUT OF THE
+USE OR INABILITY TO USE THE PROGRAM (INCLUDING BUT NOT LIMITED TO LOSS OF
+DATA OR DATA BEING RENDERED INACCURATE OR LOSSES SUSTAINED BY YOU OR THIRD
+PARTIES OR A FAILURE OF THE PROGRAM TO OPERATE WITH ANY OTHER PROGRAMS),
+EVEN IF SUCH HOLDER OR OTHER PARTY HAS BEEN ADVISED OF THE POSSIBILITY OF
+SUCH DAMAGES.
+
+ 17. Interpretation of Sections 15 and 16.
+
+ If the disclaimer of warranty and limitation of liability provided
+above cannot be given local legal effect according to their terms,
+reviewing courts shall apply local law that most closely approximates
+an absolute waiver of all civil liability in connection with the
+Program, unless a warranty or assumption of liability accompanies a
+copy of the Program in return for a fee.
+
+ END OF TERMS AND CONDITIONS
+
+ How to Apply These Terms to Your New Programs
+
+ If you develop a new program, and you want it to be of the greatest
+possible use to the public, the best way to achieve this is to make it
+free software which everyone can redistribute and change under these terms.
+
+ To do so, attach the following notices to the program. It is safest
+to attach them to the start of each source file to most effectively
+state the exclusion of warranty; and each file should have at least
+the "copyright" line and a pointer to where the full notice is found.
+
+ <one line to give the program's name and a brief idea of what it does.>
+ Copyright (C) <year> <name of author>
+
+ This program is free software: you can redistribute it and/or modify
+ it under the terms of the GNU General Public License as published by
+ the Free Software Foundation, either version 3 of the License, or
+ (at your option) any later version.
+
+ This program is distributed in the hope that it will be useful,
+ but WITHOUT ANY WARRANTY; without even the implied warranty of
+ MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
+ GNU General Public License for more details.
+
+ You should have received a copy of the GNU General Public License
+ along with this program. If not, see <http://www.gnu.org/licenses/>.
+
+Also add information on how to contact you by electronic and paper mail.
+
+ If the program does terminal interaction, make it output a short
+notice like this when it starts in an interactive mode:
+
+ <program> Copyright (C) <year> <name of author>
+ This program comes with ABSOLUTELY NO WARRANTY; for details type `show w'.
+ This is free software, and you are welcome to redistribute it
+ under certain conditions; type `show c' for details.
+
+The hypothetical commands `show w' and `show c' should show the appropriate
+parts of the General Public License. Of course, your program's commands
+might be different; for a GUI interface, you would use an "about box".
+
+ You should also get your employer (if you work as a programmer) or school,
+if any, to sign a "copyright disclaimer" for the program, if necessary.
+For more information on this, and how to apply and follow the GNU GPL, see
+<http://www.gnu.org/licenses/>.
+
+ The GNU General Public License does not permit incorporating your program
+into proprietary programs. If your program is a subroutine library, you
+may consider it more useful to permit linking proprietary applications with
+the library. If this is what you want to do, use the GNU Lesser General
+Public License instead of this License. But first, please read
+<http://www.gnu.org/philosophy/why-not-lgpl.html>.
diff --git a/puppet/modules/sshd/Modulefile b/puppet/modules/sshd/Modulefile
new file mode 100644
index 00000000..5e4f92d6
--- /dev/null
+++ b/puppet/modules/sshd/Modulefile
@@ -0,0 +1,10 @@
+name 'puppet-sshd'
+version '0.1.0'
+source 'https://github.com/duritong/puppet-sshd'
+author 'duritong'
+license 'Apache License, Version 2.0'
+summary 'ssh daemon configuration'
+description 'Manages sshd_config'
+project_page 'https://github.com/duritong/puppet-sshd'
+
+dependency 'puppetlabs/stdlib', '>= 2.0.0' \ No newline at end of file
diff --git a/puppet/modules/sshd/Puppetfile b/puppet/modules/sshd/Puppetfile
new file mode 100644
index 00000000..166d3b4d
--- /dev/null
+++ b/puppet/modules/sshd/Puppetfile
@@ -0,0 +1,3 @@
+forge 'http://forge.puppetlabs.com'
+
+mod 'puppetlabs/stdlib', '>=2.0.0' \ No newline at end of file
diff --git a/puppet/modules/sshd/Puppetfile.lock b/puppet/modules/sshd/Puppetfile.lock
new file mode 100644
index 00000000..f9381858
--- /dev/null
+++ b/puppet/modules/sshd/Puppetfile.lock
@@ -0,0 +1,8 @@
+FORGE
+ remote: http://forge.puppetlabs.com
+ specs:
+ puppetlabs/stdlib (4.1.0)
+
+DEPENDENCIES
+ puppetlabs/stdlib (>= 2.0.0)
+
diff --git a/puppet/modules/sshd/README.md b/puppet/modules/sshd/README.md
new file mode 100644
index 00000000..77e4d29b
--- /dev/null
+++ b/puppet/modules/sshd/README.md
@@ -0,0 +1,247 @@
+# Puppet SSH Module
+
+[![Build Status](https://travis-ci.org/duritong/puppet-sshd.png?branch=master)](https://travis-ci.org/duritong/puppet-sshd)
+
+This puppet module manages OpenSSH configuration and services.
+
+**!! Upgrade Notice (05/2015) !!**
+
+The hardened_ssl parameter name was changed to simply 'hardened'.
+
+**!! Upgrade Notice (01/2013) !!**
+
+This module now uses parameterized classes, where it used global variables
+before. So please whatch out before pulling, you need to change the
+class declarations in your manifest !
+
+
+### Dependencies
+
+This module requires puppet => 2.6, and the following modules are required
+pre-dependencies:
+
+- [puppetlabs/stdlib](https://github.com/puppetlabs/puppetlabs-stdlib) >= 2.x
+
+## OpenSSH Server
+
+On a node where you wish to have an openssh server installed, you should
+include
+
+```puppet
+class { 'sshd': }
+```
+
+on that node. If you need to configure any aspects of sshd_config, set the variables before the include. Or you can adjust many parameters:
+
+```puppet
+class { 'sshd':
+ ports => [ 20002 ],
+ permit_root_login => 'no',
+}
+```
+
+See Configurable Variables below for what you can set.
+
+### Nagios
+
+To have nagios checks setup automatically for sshd services, simply set
+`manage_nagios` to `true` for that class. If you want to disable ssh
+nagios checking for a particular node (such as when ssh is firewalled), then you
+can set the class parameter `nagios_check_ssh` to `false` and that node will not be
+monitored.
+
+Nagios will automatically check the ports defined in `ports`, and the
+hostname specified by `nagios_check_ssh_hostname`.
+
+Note that if you need to use some specific logic to decide whether or not to
+create a nagios service check, you should set $manage_nagios to false, and
+use sshd::nagios from within your own manifests. You'll also need to manually
+specify the port to that define. By default, if the $port parameter is not
+specified, it will use the resource name as the port (e.g. if you call it like
+this: `sshd::nagios { '22': }` )
+
+NOTE: this requires that you are using the shared-nagios puppet module which
+supports the nagios native types via `nagios::service`:
+
+https://gitlab.com/shared-puppet-modules-group/sshd
+
+### Firewall
+
+If you wish to have firewall rules setup automatically for you, using shorewall,
+you will need to set: `use_shorewall => true`. The `ports` that you have
+specified will automatically be used.
+
+NOTE: This requires that you are using the shared-shorewall puppet module:
+git://labs.riseup.net/shared-shorewall
+
+
+### Configurable variables
+
+Configuration of sshd is strict, and may not fit all needs, however there are a
+number of variables that you can consider configuring. The defaults are set to
+the distribution shipped sshd_config file defaults.
+
+To set any of these variables, simply set them as variables in your manifests,
+before the class is included, for example:
+
+```puppet
+class {'sshd':
+ listen_address => ['10.0.0.1', '192.168.0.1'],
+ use_pam => yes
+}
+```
+
+If you need to install a version of the ssh daemon or client package other than
+the default one that would be installed by `ensure => installed`, then you can
+set the following variables:
+
+```puppet
+class {'sshd':
+ ensure_version => "1:5.2p2-6"
+}
+```
+
+The following is a list of the currently available variables:
+
+ - `listen_address`
+ specify the addresses sshd should listen on set this to `['10.0.0.1', '192.168.0.1']` to have it listen on both addresses, or leave it unset to listen on all Default: empty -> results in listening on `0.0.0.0`
+ - `allowed_users`
+ list of usernames separated by spaces. set this for example to `"foobar
+ root"` to ensure that only user foobar and root might login. Default: empty
+ -> no restriction is set
+ - `allowed_groups`
+ list of groups separated by spaces. set this for example to `"wheel sftponly"`
+ to ensure that only users in the groups wheel and sftponly might login.
+ Default: empty -> no restriction is set Note: This is set after
+ `allowed_users`, take care of the behaviour if you use these 2 options
+ together.
+ - `use_pam` if you want to use pam or not for authenticaton. Values:
+ - `no` (default)
+ - `yes`
+ - `permit_root_login` If you want to allow root logins or not. Valid values:
+ - `yes`
+ - `no`
+ - `without-password` (default)
+ - `forced-commands-only`
+ - `password_authentication`
+ If you want to enable password authentication or not. Valid values:
+ - `yes`
+ - `no` (default)
+ - `kerberos_authentication`
+ If you want the password that is provided by the user to be validated
+ through the Kerberos KDC. To use this option the server needs a Kerberos
+ servtab which allows the verification of the KDC's identity. Valid values:
+ - `yes`
+ - `no` (default)
+ - `kerberos_orlocalpasswd` If password authentication through Kerberos fails, then the password will be validated via any additional local mechanism. Valid values:
+ - `yes` (default)
+ - `no`
+ - `kerberos_ticketcleanup` Destroy the user's ticket cache file on logout? Valid values:
+ - `yes` (default)
+ - `no`
+ - `gssapi_authentication` Authenticate users based on GSSAPI? Valid values:
+ - `yes`
+ - `no` (default)
+ - `gssapi_cleanupcredentials` Destroy user's credential cache on logout? Valid values:
+ - `yes` (default)
+ - `no`
+ - `challenge_response_authentication` If you want to enable ChallengeResponseAuthentication or not When disabled, s/key passwords are disabled. Valid values:
+ - `yes`
+ - `no` (default)
+ - `tcp_forwarding` If you want to enable TcpForwarding. Valid values:
+ - `yes`
+ - `no` (default)
+ - `x11_forwarding` If you want to enable x11 forwarding. Valid values:
+ - `yes`
+ - `no` (default)
+ - `agent_forwarding` If you want to allow ssh-agent forwarding. Valid values:
+ - `yes`
+ - `no` (default)
+ - `pubkey_authentication` If you want to enable public key authentication. Valid values:
+ - `yes` (default)
+ - `no`
+ - `rsa_authentication` If you want to enable RSA Authentication. Valid values:
+ - `yes`
+ - `no` (default)
+ - `rhosts_rsa_authentication`
+ If you want to enable rhosts RSA Authentication. Valid values:
+ - `yes`
+ - `no` (default)
+ - `hostbased_authentication` If you want to enable `HostbasedAuthentication`. Valid values:
+ - `yes`
+ - `no` (default)
+ - `strict_modes` If you want to set `StrictModes` (check file modes/ownership before accepting login). Valid values:
+ - `yes` (default)
+ - `no`
+ - `permit_empty_passwords`
+ If you want enable PermitEmptyPasswords to allow empty passwords. Valid
+ Values:
+ - `yes`
+ - `no` (default)
+ - `ports` If you want to specify a list of ports other than the default `22`; Default: `[22]`
+ - `authorized_keys_file`
+ Set this to the location of the AuthorizedKeysFile
+ (e.g. `/etc/ssh/authorized_keys/%u`). Default: `AuthorizedKeysFile
+ %h/.ssh/authorized_keys`
+ - `hardened`
+ Use only strong ciphers, MAC, KexAlgorithms, etc.
+ Values:
+ - `no` (default)
+ - `yes`
+ - `print_motd`
+ Show the Message of the day when a user logs in.
+ - `sftp_subsystem`
+ Set a different sftp-subystem than the default one. Might be interesting for
+ sftponly usage. Default: empty -> no change of the default
+ - `head_additional_options`
+ Set this to any additional sshd_options which aren't listed above. Anything
+ set here will be added to the beginning of the sshd_config file. This option
+ might be useful to define complicated Match Blocks. This string is going to
+ be included, like it is defined. So take care! Default: empty -> not added.
+ - `tail_additional_options` Set this to any additional sshd_options which aren't listed above. Anything set here will be added to the end of the sshd_config file. This option might be useful to define complicated Match Blocks. This string is going to be included, like it is defined. So take care! Default: empty -> not added.
+ - `shared_ip` Whether the server uses a shared network IP address. If it does, then we don't want it to export an rsa key for its IP address. Values:
+ - `no` (default)
+ - `yes`
+
+
+### Defines and functions
+
+Deploy authorized_keys file with the define `authorized_key`.
+
+Generate a public/private keypair with the ssh_keygen function. For example, the
+following will generate ssh keys and put the different parts of the key into
+variables:
+
+```puppet
+$ssh_keys = ssh_keygen("${$ssh_key_basepath}/backup/keys/${::fqdn}/${backup_host}")
+$public_key = split($ssh_keys[1],' ')
+$sshkey_type => $public_key[0]
+$sshkey => $public_key[1]
+```
+
+## Client
+
+
+On a node where you wish to have the ssh client managed, you can do:
+
+```puppet
+class{'sshd::client':
+
+}
+```
+
+in the node definition. This will install the appropriate package.
+
+## License
+
+ - Copyright 2008-2011, Riseup Labs micah@riseup.net
+ - Copyright 2008, admin(at)immerda.ch
+ - Copyright 2008, Puzzle ITC GmbH
+ - Marcel Härry haerry+puppet(at)puzzle.ch
+ - Simon Josi josi+puppet(at)puzzle.ch
+
+This program is free software; you can redistribute
+it and/or modify it under the terms of the GNU
+General Public License version 3 as published by
+the Free Software Foundation.
+
diff --git a/puppet/modules/sshd/Rakefile b/puppet/modules/sshd/Rakefile
new file mode 100644
index 00000000..e3213518
--- /dev/null
+++ b/puppet/modules/sshd/Rakefile
@@ -0,0 +1,16 @@
+require 'bundler'
+Bundler.require(:rake)
+
+require 'puppetlabs_spec_helper/rake_tasks'
+require 'puppet-lint/tasks/puppet-lint'
+require 'rspec-system/rake_task'
+
+PuppetLint.configuration.log_format = '%{path}:%{linenumber}:%{KIND}: %{message}'
+PuppetLint.configuration.send("disable_80chars")
+
+puppet_module='sshd'
+task :librarian_spec_prep do
+ sh 'librarian-puppet install --path=spec/fixtures/modules/'
+end
+task :spec_prep => :librarian_spec_prep
+task :default => [:spec, :lint]
diff --git a/puppet/modules/sshd/files/autossh.init.d b/puppet/modules/sshd/files/autossh.init.d
new file mode 100644
index 00000000..92bd5f43
--- /dev/null
+++ b/puppet/modules/sshd/files/autossh.init.d
@@ -0,0 +1,164 @@
+#!/bin/sh
+### BEGIN INIT INFO
+# Provides: AutoSSH
+# Required-Start: $local_fs $network $remote_fs $syslog
+# Required-Stop: $local_fs $network $remote_fs $syslog
+# Default-Start: 2 3 4 5
+# Default-Stop: 0 1 6
+# Short-Description: start the autossh daemon
+# Description: start the autossh daemon
+### END INIT INFO
+
+# Author: Antoine Beaupré <anarcat@koumbit.org>
+
+# Do NOT "set -e"
+
+# PATH should only include /usr/* if it runs after the mountnfs.sh script
+PATH=/sbin:/usr/sbin:/bin:/usr/bin
+DESC="autossh"
+NAME=autossh
+USER=$NAME
+DAEMON=/usr/bin/autossh
+DAEMON_ARGS="-f"
+PIDFILE=/var/run/$NAME.pid
+SCRIPTNAME=/etc/init.d/$NAME
+
+# Read configuration variable file if it is present
+[ -r /etc/default/$NAME ] && . /etc/default/$NAME
+
+AUTOSSH_PIDFILE=$PIDFILE
+export AUTOSSH_PIDFILE
+
+# Exit if the package is not installed
+[ -x "$DAEMON" ] || exit 0
+
+# Load the VERBOSE setting and other rcS variables
+. /lib/init/vars.sh
+
+# Define LSB log_* functions.
+# Depend on lsb-base (>= 3.2-14) to ensure that this file is present
+# and status_of_proc is working.
+. /lib/lsb/init-functions
+
+#
+# Function that starts the daemon/service
+#
+do_start()
+{
+ # Return
+ # 0 if daemon has been started
+ # 1 if daemon was already running
+ # 2 if daemon could not be started
+ start-stop-daemon --start --quiet --user $USER --pidfile $PIDFILE --exec $DAEMON --test > /dev/null \
+ || return 1
+ start-stop-daemon --start --quiet --user $USER --chuid $USER --pidfile $PIDFILE --exec $DAEMON -- \
+ $DAEMON_ARGS \
+ || return 2
+ # The above code will not work for interpreted scripts, use the next
+ # six lines below instead (Ref: #643337, start-stop-daemon(8) )
+ #start-stop-daemon --start --quiet --pidfile $PIDFILE --startas $DAEMON \
+ # --name $NAME --test > /dev/null \
+ # || return 1
+ #start-stop-daemon --start --quiet --pidfile $PIDFILE --startas $DAEMON \
+ # --name $NAME -- $DAEMON_ARGS \
+ # || return 2
+
+ # Add code here, if necessary, that waits for the process to be ready
+ # to handle requests from services started subsequently which depend
+ # on this one. As a last resort, sleep for some time.
+}
+
+#
+# Function that stops the daemon/service
+#
+do_stop()
+{
+ # Return
+ # 0 if daemon has been stopped
+ # 1 if daemon was already stopped
+ # 2 if daemon could not be stopped
+ # other if a failure occurred
+ start-stop-daemon --stop --quiet --retry=TERM/30/KILL/5 --pidfile $PIDFILE --user $USER --name $NAME
+ RETVAL="$?"
+ [ "$RETVAL" = 2 ] && return 2
+ # Wait for children to finish too if this is a daemon that forks
+ # and if the daemon is only ever run from this initscript.
+ # If the above conditions are not satisfied then add some other code
+ # that waits for the process to drop all resources that could be
+ # needed by services started subsequently. A last resort is to
+ # sleep for some time.
+ start-stop-daemon --stop --quiet --oknodo --retry=0/30/KILL/5 --user $USER --exec $DAEMON
+ [ "$?" = 2 ] && return 2
+ # Many daemons don't delete their pidfiles when they exit.
+ rm -f $PIDFILE
+ return "$RETVAL"
+}
+
+#
+# Function that sends a SIGHUP to the daemon/service
+#
+do_reload() {
+ #
+ # If the daemon can reload its configuration without
+ # restarting (for example, when it is sent a SIGHUP),
+ # then implement that here.
+ #
+ start-stop-daemon --stop --signal 1 --quiet --pidfile $PIDFILE --name $NAME
+ return 0
+}
+
+case "$1" in
+ start)
+ [ "$VERBOSE" != no ] && log_daemon_msg "Starting $DESC" "$NAME"
+ do_start
+ case "$?" in
+ 0|1) [ "$VERBOSE" != no ] && log_end_msg 0 ;;
+ 2) [ "$VERBOSE" != no ] && log_end_msg 1 ;;
+ esac
+ ;;
+ stop)
+ [ "$VERBOSE" != no ] && log_daemon_msg "Stopping $DESC" "$NAME"
+ do_stop
+ case "$?" in
+ 0|1) [ "$VERBOSE" != no ] && log_end_msg 0 ;;
+ 2) [ "$VERBOSE" != no ] && log_end_msg 1 ;;
+ esac
+ ;;
+ status)
+ status_of_proc -p "$PIDFILE" "$DAEMON" "$NAME" && exit 0 || exit $?
+ ;;
+ reload|force-reload)
+ log_daemon_msg "Reloading $DESC" "$NAME"
+ do_reload
+ log_end_msg $?
+ ;;
+ restart)
+ #
+ # If the "reload" option is implemented then remove the
+ # 'force-reload' alias
+ #
+ log_daemon_msg "Restarting $DESC" "$NAME"
+ do_stop
+ case "$?" in
+ 0|1)
+ do_start
+ case "$?" in
+ 0) log_end_msg 0 ;;
+ 1) log_end_msg 1 ;; # Old process is still running
+ *) log_end_msg 1 ;; # Failed to start
+ esac
+ ;;
+ *)
+ # Failed to stop
+ log_end_msg 1
+ ;;
+ esac
+ ;;
+ *)
+ #echo "Usage: $SCRIPTNAME {start|stop|restart|reload|force-reload}" >&2
+ echo "Usage: $SCRIPTNAME {start|stop|status|restart|force-reload}" >&2
+ exit 3
+ ;;
+esac
+
+:
diff --git a/puppet/modules/sshd/lib/facter/ssh_version.rb b/puppet/modules/sshd/lib/facter/ssh_version.rb
new file mode 100644
index 00000000..51d8a00f
--- /dev/null
+++ b/puppet/modules/sshd/lib/facter/ssh_version.rb
@@ -0,0 +1,5 @@
+Facter.add("ssh_version") do
+ setcode do
+ ssh_version = Facter::Util::Resolution.exec('ssh -V 2>&1 1>/dev/null').chomp.split(' ')[0].split('_')[1]
+ end
+end
diff --git a/puppet/modules/sshd/lib/puppet/parser/functions/ssh_keygen.rb b/puppet/modules/sshd/lib/puppet/parser/functions/ssh_keygen.rb
new file mode 100644
index 00000000..e304f242
--- /dev/null
+++ b/puppet/modules/sshd/lib/puppet/parser/functions/ssh_keygen.rb
@@ -0,0 +1,30 @@
+Puppet::Parser::Functions::newfunction(:ssh_keygen, :type => :rvalue, :doc =>
+ "Returns an array containing the ssh private and public (in this order) key
+ for a certain private key path.
+ It will generate the keypair if both do not exist. It will also generate
+ the directory hierarchy if required.
+ It accepts only fully qualified paths, everything else will fail.") do |args|
+ raise Puppet::ParseError, "Wrong number of arguments" unless args.to_a.length == 1
+ private_key_path = args.to_a[0]
+ raise Puppet::ParseError, "Only fully qualified paths are accepted (#{private_key_path})" unless private_key_path =~ /^\/.+/
+ public_key_path = "#{private_key_path}.pub"
+ raise Puppet::ParseError, "Either only the private or only the public key exists" if File.exists?(private_key_path) ^ File.exists?(public_key_path)
+ [private_key_path,public_key_path].each do |path|
+ raise Puppet::ParseError, "#{path} is a directory" if File.directory?(path)
+ end
+
+ dir = File.dirname(private_key_path)
+ unless File.directory?(dir)
+ require 'fileutils'
+ FileUtils.mkdir_p(dir, :mode => 0700)
+ end
+ unless [private_key_path,public_key_path].all?{|path| File.exists?(path) }
+ executor = (Facter.value(:puppetversion).to_i < 3) ? Puppet::Util : Puppet::Util::Execution
+ output = executor.execute(
+ ['/usr/bin/ssh-keygen','-t', 'rsa', '-b', '4096',
+ '-f', private_key_path, '-P', '', '-q'])
+ raise Puppet::ParseError, "Something went wrong during key generation! Output: #{output}" unless output.empty?
+ end
+ [File.read(private_key_path),File.read(public_key_path)]
+end
+
diff --git a/puppet/modules/sshd/manifests/autossh.pp b/puppet/modules/sshd/manifests/autossh.pp
new file mode 100644
index 00000000..5650584a
--- /dev/null
+++ b/puppet/modules/sshd/manifests/autossh.pp
@@ -0,0 +1,40 @@
+class sshd::autossh($host,
+ $port = undef, # this should be a remote->local hash
+ $remote_user = undef,
+ $user = 'root',
+ $pidfile = '/var/run/autossh.pid',
+) {
+ if $port {
+ $port_ensure = $port
+ }
+ else {
+ # random port between 10000 and 20000
+ $port_ensure = fqdn_rand(10000) + 10000
+ }
+ if $remote_user {
+ $remote_user_ensure = $remote_user
+ }
+ else {
+ $remote_user_ensure = "host-$fqdn"
+ }
+ file {
+ '/etc/init.d/autossh':
+ mode => '0555',
+ source => 'puppet:///modules/sshd/autossh.init.d';
+ '/etc/default/autossh':
+ mode => '0444',
+ content => "USER=$user\nPIDFILE=$pidfile\nDAEMON_ARGS='-M0 -f -o ServerAliveInterval=15 -o ServerAliveCountMax=4 -q -N -R $port_ensure:localhost:22 $remote_user_ensure@$host'\n";
+ }
+ package { 'autossh':
+ ensure => present,
+ }
+ service { 'autossh':
+ ensure => running,
+ enable => true,
+ subscribe => [
+ File['/etc/init.d/autossh'],
+ File['/etc/default/autossh'],
+ Package['autossh'],
+ ],
+ }
+}
diff --git a/puppet/modules/sshd/manifests/base.pp b/puppet/modules/sshd/manifests/base.pp
new file mode 100644
index 00000000..dda9f26c
--- /dev/null
+++ b/puppet/modules/sshd/manifests/base.pp
@@ -0,0 +1,41 @@
+# The base class to setup the common things.
+# This is a private class and will always be used
+# throught the sshd class itself.
+class sshd::base {
+
+ $sshd_config_content = $::operatingsystem ? {
+ 'CentOS' => template("sshd/sshd_config/${::operatingsystem}_${::operatingsystemmajrelease}.erb"),
+ default => $::lsbdistcodename ? {
+ '' => template("sshd/sshd_config/${::operatingsystem}.erb"),
+ default => template("sshd/sshd_config/${::operatingsystem}_${::lsbdistcodename}.erb")
+ }
+ }
+
+ file { 'sshd_config':
+ ensure => present,
+ path => '/etc/ssh/sshd_config',
+ content => $sshd_config_content,
+ notify => Service[sshd],
+ owner => root,
+ group => 0,
+ mode => '0600';
+ }
+
+ # Now add the key, if we've got one
+ case $::sshrsakey {
+ '': { info("no sshrsakey on ${::fqdn}") }
+ default: {
+ # only export sshkey when storedconfigs is enabled
+ if $::sshd::use_storedconfigs {
+ include ::sshd::sshkey
+ }
+ }
+ }
+ service{'sshd':
+ ensure => running,
+ name => 'sshd',
+ enable => true,
+ hasstatus => true,
+ require => File[sshd_config],
+ }
+}
diff --git a/puppet/modules/sshd/manifests/client.pp b/puppet/modules/sshd/manifests/client.pp
new file mode 100644
index 00000000..84dd7abc
--- /dev/null
+++ b/puppet/modules/sshd/manifests/client.pp
@@ -0,0 +1,22 @@
+# manifests/client.pp
+
+class sshd::client(
+ $shared_ip = 'no',
+ $ensure_version = 'installed',
+ $manage_shorewall = false
+) {
+
+ case $::operatingsystem {
+ debian,ubuntu: { include sshd::client::debian }
+ default: {
+ case $::kernel {
+ linux: { include sshd::client::linux }
+ default: { include sshd::client::base }
+ }
+ }
+ }
+
+ if $manage_shorewall{
+ include shorewall::rules::out::ssh
+ }
+}
diff --git a/puppet/modules/sshd/manifests/client/base.pp b/puppet/modules/sshd/manifests/client/base.pp
new file mode 100644
index 00000000..4925c2d0
--- /dev/null
+++ b/puppet/modules/sshd/manifests/client/base.pp
@@ -0,0 +1,15 @@
+class sshd::client::base {
+ # this is needed because the gid might have changed
+ file { '/etc/ssh/ssh_known_hosts':
+ ensure => present,
+ mode => '0644',
+ owner => root,
+ group => 0;
+ }
+
+ # Now collect all server keys
+ case $sshd::client::shared_ip {
+ no: { Sshkey <<||>> }
+ yes: { Sshkey <<| tag == fqdn |>> }
+ }
+}
diff --git a/puppet/modules/sshd/manifests/client/debian.pp b/puppet/modules/sshd/manifests/client/debian.pp
new file mode 100644
index 00000000..2aaf3fb1
--- /dev/null
+++ b/puppet/modules/sshd/manifests/client/debian.pp
@@ -0,0 +1,5 @@
+class sshd::client::debian inherits sshd::client::linux {
+ Package['openssh-clients']{
+ name => 'openssh-client',
+ }
+}
diff --git a/puppet/modules/sshd/manifests/client/linux.pp b/puppet/modules/sshd/manifests/client/linux.pp
new file mode 100644
index 00000000..0c420be2
--- /dev/null
+++ b/puppet/modules/sshd/manifests/client/linux.pp
@@ -0,0 +1,5 @@
+class sshd::client::linux inherits sshd::client::base {
+ package {'openssh-clients':
+ ensure => $sshd::client::ensure_version,
+ }
+}
diff --git a/puppet/modules/sshd/manifests/debian.pp b/puppet/modules/sshd/manifests/debian.pp
new file mode 100644
index 00000000..d827078a
--- /dev/null
+++ b/puppet/modules/sshd/manifests/debian.pp
@@ -0,0 +1,13 @@
+class sshd::debian inherits sshd::linux {
+
+ Package[openssh]{
+ name => 'openssh-server',
+ }
+
+ Service[sshd]{
+ name => 'ssh',
+ pattern => 'sshd',
+ hasstatus => true,
+ hasrestart => true,
+ }
+}
diff --git a/puppet/modules/sshd/manifests/gentoo.pp b/puppet/modules/sshd/manifests/gentoo.pp
new file mode 100644
index 00000000..631f3d19
--- /dev/null
+++ b/puppet/modules/sshd/manifests/gentoo.pp
@@ -0,0 +1,5 @@
+class sshd::gentoo inherits sshd::linux {
+ Package[openssh]{
+ category => 'net-misc',
+ }
+}
diff --git a/puppet/modules/sshd/manifests/init.pp b/puppet/modules/sshd/manifests/init.pp
new file mode 100644
index 00000000..b4157418
--- /dev/null
+++ b/puppet/modules/sshd/manifests/init.pp
@@ -0,0 +1,92 @@
+# manage an sshd installation
+class sshd(
+ $manage_nagios = false,
+ $nagios_check_ssh_hostname = 'absent',
+ $ports = [ 22 ],
+ $shared_ip = 'no',
+ $ensure_version = 'installed',
+ $listen_address = [ '0.0.0.0', '::' ],
+ $allowed_users = '',
+ $allowed_groups = '',
+ $use_pam = 'no',
+ $permit_root_login = 'without-password',
+ $password_authentication = 'no',
+ $kerberos_authentication = 'no',
+ $kerberos_orlocalpasswd = 'yes',
+ $kerberos_ticketcleanup = 'yes',
+ $gssapi_authentication = 'no',
+ $gssapi_cleanupcredentials = 'yes',
+ $tcp_forwarding = 'no',
+ $x11_forwarding = 'no',
+ $agent_forwarding = 'no',
+ $challenge_response_authentication = 'no',
+ $pubkey_authentication = 'yes',
+ $rsa_authentication = 'no',
+ $strict_modes = 'yes',
+ $ignore_rhosts = 'yes',
+ $rhosts_rsa_authentication = 'no',
+ $hostbased_authentication = 'no',
+ $permit_empty_passwords = 'no',
+ $authorized_keys_file = $::osfamily ? {
+ Debian => $::lsbmajdistrelease ? {
+ 6 => '%h/.ssh/authorized_keys',
+ default => '%h/.ssh/authorized_keys %h/.ssh/authorized_keys2',
+ },
+ RedHat => $::operatingsystemmajrelease ? {
+ 5 => '%h/.ssh/authorized_keys',
+ 6 => '%h/.ssh/authorized_keys',
+ default => '%h/.ssh/authorized_keys %h/.ssh/authorized_keys2',
+ },
+ OpenBSD => '%h/.ssh/authorized_keys',
+ default => '%h/.ssh/authorized_keys %h/.ssh/authorized_keys2',
+ },
+ $hardened = 'no',
+ $sftp_subsystem = '',
+ $head_additional_options = '',
+ $tail_additional_options = '',
+ $print_motd = 'yes',
+ $manage_shorewall = false,
+ $shorewall_source = 'net',
+ $sshkey_ipaddress = $::ipaddress,
+ $manage_client = true,
+ $hostkey_type = versioncmp($::ssh_version, '6.5') ? {
+ /(^1|0)/ => [ 'rsa', 'ed25519' ],
+ /-1/ => [ 'rsa', 'dsa' ]
+ },
+ $use_storedconfigs = true
+) {
+
+ validate_bool($manage_shorewall)
+ validate_bool($manage_client)
+ validate_array($listen_address)
+ validate_array($ports)
+
+ if $manage_client {
+ class{'sshd::client':
+ shared_ip => $shared_ip,
+ ensure_version => $ensure_version,
+ manage_shorewall => $manage_shorewall,
+ }
+ }
+
+ case $::operatingsystem {
+ gentoo: { include sshd::gentoo }
+ redhat,centos: { include sshd::redhat }
+ openbsd: { include sshd::openbsd }
+ debian,ubuntu: { include sshd::debian }
+ default: { include sshd::base }
+ }
+
+ if $manage_nagios {
+ sshd::nagios{$ports:
+ check_hostname => $nagios_check_ssh_hostname
+ }
+ }
+
+ if $manage_shorewall {
+ class{'shorewall::rules::ssh':
+ ports => $ports,
+ source => $shorewall_source
+ }
+ }
+}
diff --git a/puppet/modules/sshd/manifests/libssh2.pp b/puppet/modules/sshd/manifests/libssh2.pp
new file mode 100644
index 00000000..403ac7be
--- /dev/null
+++ b/puppet/modules/sshd/manifests/libssh2.pp
@@ -0,0 +1,7 @@
+# manifests/libssh2.pp
+
+class sshd::libssh2 {
+ package{'libssh2':
+ ensure => present,
+ }
+}
diff --git a/puppet/modules/sshd/manifests/libssh2/devel.pp b/puppet/modules/sshd/manifests/libssh2/devel.pp
new file mode 100644
index 00000000..261e34c8
--- /dev/null
+++ b/puppet/modules/sshd/manifests/libssh2/devel.pp
@@ -0,0 +1,7 @@
+# manifests/libssh2/devel.pp
+
+class sshd::libssh2::devel inherits sshd::libssh2 {
+ package{"libssh2-devel.${::architecture}":
+ ensure => installed,
+ }
+}
diff --git a/puppet/modules/sshd/manifests/linux.pp b/puppet/modules/sshd/manifests/linux.pp
new file mode 100644
index 00000000..8628ff5e
--- /dev/null
+++ b/puppet/modules/sshd/manifests/linux.pp
@@ -0,0 +1,8 @@
+class sshd::linux inherits sshd::base {
+ package{'openssh':
+ ensure => $sshd::ensure_version,
+ }
+ File[sshd_config]{
+ require +> Package[openssh],
+ }
+}
diff --git a/puppet/modules/sshd/manifests/nagios.pp b/puppet/modules/sshd/manifests/nagios.pp
new file mode 100644
index 00000000..6921de91
--- /dev/null
+++ b/puppet/modules/sshd/manifests/nagios.pp
@@ -0,0 +1,24 @@
+define sshd::nagios(
+ $port = 'absent',
+ $ensure = 'present',
+ $check_hostname = 'absent'
+) {
+ $real_port = $port ? {
+ 'absent' => $name,
+ default => $port,
+ }
+ case $check_hostname {
+ 'absent': {
+ nagios::service{"ssh_port_${name}":
+ ensure => $ensure,
+ check_command => "check_ssh_port!${real_port}"
+ }
+ }
+ default: {
+ nagios::service{"ssh_port_host_${name}":
+ ensure => $ensure,
+ check_command => "check_ssh_port_host!${real_port}!${check_hostname}"
+ }
+ }
+ }
+}
diff --git a/puppet/modules/sshd/manifests/openbsd.pp b/puppet/modules/sshd/manifests/openbsd.pp
new file mode 100644
index 00000000..cb6dbba6
--- /dev/null
+++ b/puppet/modules/sshd/manifests/openbsd.pp
@@ -0,0 +1,8 @@
+class sshd::openbsd inherits sshd::base {
+ Service[sshd]{
+ restart => '/bin/kill -HUP `/bin/cat /var/run/sshd.pid`',
+ stop => '/bin/kill `/bin/cat /var/run/sshd.pid`',
+ start => '/usr/sbin/sshd',
+ status => '/usr/bin/pgrep -f /usr/sbin/sshd',
+ }
+}
diff --git a/puppet/modules/sshd/manifests/redhat.pp b/puppet/modules/sshd/manifests/redhat.pp
new file mode 100644
index 00000000..d7201774
--- /dev/null
+++ b/puppet/modules/sshd/manifests/redhat.pp
@@ -0,0 +1,5 @@
+class sshd::redhat inherits sshd::linux {
+ Package[openssh]{
+ name => 'openssh-server',
+ }
+}
diff --git a/puppet/modules/sshd/manifests/ssh_authorized_key.pp b/puppet/modules/sshd/manifests/ssh_authorized_key.pp
new file mode 100644
index 00000000..80cb3b70
--- /dev/null
+++ b/puppet/modules/sshd/manifests/ssh_authorized_key.pp
@@ -0,0 +1,85 @@
+# wrapper to have some defaults.
+define sshd::ssh_authorized_key(
+ $ensure = 'present',
+ $type = 'ssh-dss',
+ $key = 'absent',
+ $user = '',
+ $target = undef,
+ $options = 'absent',
+ $override_builtin = undef
+){
+
+ if ($ensure=='present') and ($key=='absent') {
+ fail("You have to set \$key for Sshd::Ssh_authorized_key[${name}]!")
+ }
+
+ $real_user = $user ? {
+ false => $name,
+ '' => $name,
+ default => $user,
+ }
+
+ case $target {
+ undef,'': {
+ case $real_user {
+ 'root': { $real_target = '/root/.ssh/authorized_keys' }
+ default: { $real_target = "/home/${real_user}/.ssh/authorized_keys" }
+ }
+ }
+ default: {
+ $real_target = $target
+ }
+ }
+
+ # The ssh_authorized_key built-in function (in 2.7.23 at least)
+ # will not write an authorized_keys file for a mortal user to
+ # a directory they don't have write permission to, puppet attempts to
+ # create the file as the user specified with the user parameter and fails.
+ # Since ssh will refuse to use authorized_keys files not owned by the
+ # user, or in files/directories that allow other users to write, this
+ # behavior is deliberate in order to prevent typical non-working
+ # configurations. However, it also prevents the case of puppet, running
+ # as root, writing a file owned by a mortal user to a common
+ # authorized_keys directory such as one might specify in sshd_config with
+ # something like
+ # 'AuthorizedKeysFile /etc/ssh/authorized_keys/%u'
+ # So we provide a way to override the built-in and instead just install
+ # via a file resource. There is no additional security risk here, it's
+ # nothing a user can't already do by writing their own file resources,
+ # we still depend on the filesystem permissions to keep things safe.
+ if $override_builtin {
+ $header = "# HEADER: This file is managed by Puppet.\n"
+
+ if $options == 'absent' {
+ info("not setting any option for ssh_authorized_key: ${name}")
+ $content = "${header}${type} ${key}\n"
+ } else {
+ $content = "${header}${options} ${type} ${key}\n"
+ }
+
+ file { $real_target:
+ ensure => $ensure,
+ content => $content,
+ owner => $real_user,
+ mode => '0600',
+ }
+
+ } else {
+
+ if $options == 'absent' {
+ info("not setting any option for ssh_authorized_key: ${name}")
+ } else {
+ $real_options = $options
+ }
+
+ ssh_authorized_key{$name:
+ ensure => $ensure,
+ type => $type,
+ key => $key,
+ user => $real_user,
+ target => $real_target,
+ options => $real_options,
+ }
+ }
+
+}
diff --git a/puppet/modules/sshd/manifests/sshkey.pp b/puppet/modules/sshd/manifests/sshkey.pp
new file mode 100644
index 00000000..df37a66c
--- /dev/null
+++ b/puppet/modules/sshd/manifests/sshkey.pp
@@ -0,0 +1,21 @@
+# deploys the
+class sshd::sshkey {
+
+ @@sshkey{$::fqdn:
+ ensure => present,
+ tag => 'fqdn',
+ type => 'ssh-rsa',
+ key => $::sshrsakey,
+ }
+
+ # In case the node has uses a shared network address,
+ # we don't define a sshkey resource using an IP address
+ if $sshd::shared_ip == 'no' {
+ @@sshkey{$::sshd::sshkey_ipaddress:
+ ensure => present,
+ tag => 'ipaddress',
+ type => 'ssh-rsa',
+ key => $::sshrsakey,
+ }
+ }
+}
diff --git a/puppet/modules/sshd/spec/classes/client_spec.rb b/puppet/modules/sshd/spec/classes/client_spec.rb
new file mode 100644
index 00000000..bd3e35af
--- /dev/null
+++ b/puppet/modules/sshd/spec/classes/client_spec.rb
@@ -0,0 +1,42 @@
+require 'spec_helper'
+
+describe 'sshd::client' do
+
+ shared_examples "a Linux OS" do
+ it { should contain_file('/etc/ssh/ssh_known_hosts').with(
+ {
+ 'ensure' => 'present',
+ 'owner' => 'root',
+ 'group' => '0',
+ 'mode' => '0644',
+ }
+ )}
+ end
+
+ context "Debian OS" do
+ let :facts do
+ {
+ :operatingsystem => 'Debian',
+ :osfamily => 'Debian',
+ :lsbdistcodename => 'wheezy',
+ }
+ end
+ it_behaves_like "a Linux OS"
+ it { should contain_package('openssh-clients').with({
+ 'name' => 'openssh-client'
+ }) }
+ end
+
+ context "CentOS" do
+ it_behaves_like "a Linux OS" do
+ let :facts do
+ {
+ :operatingsystem => 'CentOS',
+ :osfamily => 'RedHat',
+ :lsbdistcodename => 'Final',
+ }
+ end
+ end
+ end
+
+end \ No newline at end of file
diff --git a/puppet/modules/sshd/spec/classes/init_spec.rb b/puppet/modules/sshd/spec/classes/init_spec.rb
new file mode 100644
index 00000000..e3003d14
--- /dev/null
+++ b/puppet/modules/sshd/spec/classes/init_spec.rb
@@ -0,0 +1,122 @@
+require 'spec_helper'
+
+describe 'sshd' do
+
+ shared_examples "a Linux OS" do
+ it { should compile.with_all_deps }
+ it { should contain_class('sshd') }
+ it { should contain_class('sshd::client') }
+
+ it { should contain_service('sshd').with({
+ :ensure => 'running',
+ :enable => true,
+ :hasstatus => true
+ })}
+
+ it { should contain_file('sshd_config').with(
+ {
+ 'ensure' => 'present',
+ 'owner' => 'root',
+ 'group' => '0',
+ 'mode' => '0600',
+ }
+ )}
+
+ context 'change ssh port' do
+ let(:params){{
+ :ports => [ 22222],
+ }}
+ it { should contain_file(
+ 'sshd_config'
+ ).with_content(/Port 22222/)}
+ end
+ end
+
+ context "Debian OS" do
+ let :facts do
+ {
+ :operatingsystem => 'Debian',
+ :osfamily => 'Debian',
+ :lsbdistcodename => 'wheezy',
+ }
+ end
+ it_behaves_like "a Linux OS"
+ it { should contain_package('openssh') }
+ it { should contain_class('sshd::debian') }
+ it { should contain_service('sshd').with(
+ :hasrestart => true
+ )}
+
+ context "Ubuntu" do
+ let :facts do
+ {
+ :operatingsystem => 'Ubuntu',
+ :lsbdistcodename => 'precise',
+ }
+ end
+ it_behaves_like "a Linux OS"
+ it { should contain_package('openssh') }
+ it { should contain_service('sshd').with({
+ :hasrestart => true
+ })}
+ end
+ end
+
+
+# context "RedHat OS" do
+# it_behaves_like "a Linux OS" do
+# let :facts do
+# {
+# :operatingsystem => 'RedHat',
+# :osfamily => 'RedHat',
+# }
+# end
+# end
+# end
+
+ context "CentOS" do
+ it_behaves_like "a Linux OS" do
+ let :facts do
+ {
+ :operatingsystem => 'CentOS',
+ :osfamily => 'RedHat',
+ :lsbdistcodename => 'Final',
+ }
+ end
+ end
+ end
+
+ context "Gentoo" do
+ let :facts do
+ {
+ :operatingsystem => 'Gentoo',
+ :osfamily => 'Gentoo',
+ }
+ end
+ it_behaves_like "a Linux OS"
+ it { should contain_class('sshd::gentoo') }
+ end
+
+ context "OpenBSD" do
+ let :facts do
+ {
+ :operatingsystem => 'OpenBSD',
+ :osfamily => 'OpenBSD',
+ }
+ end
+ it_behaves_like "a Linux OS"
+ it { should contain_class('sshd::openbsd') }
+ end
+
+# context "FreeBSD" do
+# it_behaves_like "a Linux OS" do
+# let :facts do
+# {
+# :operatingsystem => 'FreeBSD',
+# :osfamily => 'FreeBSD',
+# }
+# end
+# end
+# end
+
+end \ No newline at end of file
diff --git a/puppet/modules/sshd/spec/defines/ssh_authorized_key_spec.rb b/puppet/modules/sshd/spec/defines/ssh_authorized_key_spec.rb
new file mode 100644
index 00000000..c73a91cc
--- /dev/null
+++ b/puppet/modules/sshd/spec/defines/ssh_authorized_key_spec.rb
@@ -0,0 +1,45 @@
+require 'spec_helper'
+
+describe 'sshd::ssh_authorized_key' do
+
+ context 'manage authorized key' do
+ let(:title) { 'foo' }
+ let(:ssh_key) { 'some_secret_ssh_key' }
+
+ let(:params) {{
+ :key => ssh_key,
+ }}
+
+ it { should contain_ssh_authorized_key('foo').with({
+ 'ensure' => 'present',
+ 'type' => 'ssh-dss',
+ 'user' => 'foo',
+ 'target' => '/home/foo/.ssh/authorized_keys',
+ 'key' => ssh_key,
+ })
+ }
+ end
+ context 'manage authoried key with options' do
+ let(:title) { 'foo2' }
+ let(:ssh_key) { 'some_secret_ssh_key' }
+
+ let(:params) {{
+ :key => ssh_key,
+ :options => ['command="/usr/bin/date"',
+ 'no-pty','no-X11-forwarding','no-agent-forwarding',
+ 'no-port-forwarding']
+ }}
+
+ it { should contain_ssh_authorized_key('foo2').with({
+ 'ensure' => 'present',
+ 'type' => 'ssh-dss',
+ 'user' => 'foo2',
+ 'target' => '/home/foo2/.ssh/authorized_keys',
+ 'key' => ssh_key,
+ 'options' => ['command="/usr/bin/date"',
+ 'no-pty','no-X11-forwarding','no-agent-forwarding',
+ 'no-port-forwarding']
+ })
+ }
+ end
+end
diff --git a/puppet/modules/sshd/spec/functions/ssh_keygen_spec.rb b/puppet/modules/sshd/spec/functions/ssh_keygen_spec.rb
new file mode 100644
index 00000000..a6b51173
--- /dev/null
+++ b/puppet/modules/sshd/spec/functions/ssh_keygen_spec.rb
@@ -0,0 +1,116 @@
+#! /usr/bin/env ruby -S rspec
+require 'spec_helper'
+require 'rspec-puppet'
+require 'mocha'
+require 'fileutils'
+
+describe 'ssh_keygen' do
+
+ let(:scope) { PuppetlabsSpec::PuppetInternals.scope }
+
+ it 'should exist' do
+ Puppet::Parser::Functions.function("ssh_keygen").should == "function_ssh_keygen"
+ end
+
+ it 'should raise a ParseError if no argument is passed' do
+ lambda {
+ scope.function_ssh_keygen([])
+ }.should(raise_error(Puppet::ParseError))
+ end
+
+ it 'should raise a ParseError if there is more than 1 arguments' do
+ lambda {
+ scope.function_ssh_keygen(["foo", "bar"])
+ }.should( raise_error(Puppet::ParseError))
+ end
+
+ it 'should raise a ParseError if the argument is not fully qualified' do
+ lambda {
+ scope.function_ssh_keygen(["foo"])
+ }.should( raise_error(Puppet::ParseError))
+ end
+
+ it "should raise a ParseError if the private key path is a directory" do
+ File.stubs(:directory?).with("/some_dir").returns(true)
+ lambda {
+ scope.function_ssh_keygen(["/some_dir"])
+ }.should( raise_error(Puppet::ParseError))
+ end
+
+ it "should raise a ParseError if the public key path is a directory" do
+ File.stubs(:directory?).with("/some_dir.pub").returns(true)
+ lambda {
+ scope.function_ssh_keygen(["/some_dir.pub"])
+ }.should( raise_error(Puppet::ParseError))
+ end
+
+ describe 'when executing properly' do
+ before do
+ File.stubs(:directory?).with('/tmp/a/b/c').returns(false)
+ File.stubs(:directory?).with('/tmp/a/b/c.pub').returns(false)
+ File.stubs(:read).with('/tmp/a/b/c').returns('privatekey')
+ File.stubs(:read).with('/tmp/a/b/c.pub').returns('publickey')
+ end
+
+ it 'should fail if the public but not the private key exists' do
+ File.stubs(:exists?).with('/tmp/a/b/c').returns(true)
+ File.stubs(:exists?).with('/tmp/a/b/c.pub').returns(false)
+ lambda {
+ scope.function_ssh_keygen(['/tmp/a/b/c'])
+ }.should( raise_error(Puppet::ParseError))
+ end
+
+ it "should fail if the private but not the public key exists" do
+ File.stubs(:exists?).with("/tmp/a/b/c").returns(false)
+ File.stubs(:exists?).with("/tmp/a/b/c.pub").returns(true)
+ lambda {
+ scope.function_ssh_keygen(["/tmp/a/b/c"])
+ }.should( raise_error(Puppet::ParseError))
+ end
+
+
+ it "should return an array of size 2 with the right conent if the keyfiles exists" do
+ File.stubs(:exists?).with("/tmp/a/b/c").returns(true)
+ File.stubs(:exists?).with("/tmp/a/b/c.pub").returns(true)
+ File.stubs(:directory?).with('/tmp/a/b').returns(true)
+ Puppet::Util.expects(:execute).never
+ result = scope.function_ssh_keygen(['/tmp/a/b/c'])
+ result.length.should == 2
+ result[0].should == 'privatekey'
+ result[1].should == 'publickey'
+ end
+
+ it "should create the directory path if it does not exist" do
+ File.stubs(:exists?).with("/tmp/a/b/c").returns(false)
+ File.stubs(:exists?).with("/tmp/a/b/c.pub").returns(false)
+ File.stubs(:directory?).with("/tmp/a/b").returns(false)
+ FileUtils.expects(:mkdir_p).with("/tmp/a/b", :mode => 0700)
+ Puppet::Util::Execution.expects(:execute).returns("")
+ result = scope.function_ssh_keygen(['/tmp/a/b/c'])
+ result.length.should == 2
+ result[0].should == 'privatekey'
+ result[1].should == 'publickey'
+ end
+
+ it "should generate the key if the keyfiles do not exist" do
+ File.stubs(:exists?).with("/tmp/a/b/c").returns(false)
+ File.stubs(:exists?).with("/tmp/a/b/c.pub").returns(false)
+ File.stubs(:directory?).with("/tmp/a/b").returns(true)
+ Puppet::Util::Execution.expects(:execute).with(['/usr/bin/ssh-keygen','-t', 'rsa', '-b', '4096', '-f', '/tmp/a/b/c', '-P', '', '-q']).returns("")
+ result = scope.function_ssh_keygen(['/tmp/a/b/c'])
+ result.length.should == 2
+ result[0].should == 'privatekey'
+ result[1].should == 'publickey'
+ end
+
+ it "should fail if something goes wrong during generation" do
+ File.stubs(:exists?).with("/tmp/a/b/c").returns(false)
+ File.stubs(:exists?).with("/tmp/a/b/c.pub").returns(false)
+ File.stubs(:directory?).with("/tmp/a/b").returns(true)
+ Puppet::Util::Execution.expects(:execute).with(['/usr/bin/ssh-keygen','-t', 'rsa', '-b', '4096', '-f', '/tmp/a/b/c', '-P', '', '-q']).returns("something is wrong")
+ lambda {
+ scope.function_ssh_keygen(["/tmp/a/b/c"])
+ }.should( raise_error(Puppet::ParseError))
+ end
+ end
+end
diff --git a/puppet/modules/sshd/spec/spec_helper.rb b/puppet/modules/sshd/spec/spec_helper.rb
new file mode 100644
index 00000000..b4123fde
--- /dev/null
+++ b/puppet/modules/sshd/spec/spec_helper.rb
@@ -0,0 +1,21 @@
+dir = File.expand_path(File.dirname(__FILE__))
+$LOAD_PATH.unshift File.join(dir, 'lib')
+require 'puppet'
+require 'rspec'
+require 'puppetlabs_spec_helper/module_spec_helper'
+#require 'rspec-hiera-puppet'
+require 'rspec-puppet/coverage'
+require 'rspec/autorun'
+
+fixture_path = File.expand_path(File.join(__FILE__, '..', 'fixtures'))
+
+RSpec.configure do |c|
+ c.module_path = File.join(fixture_path, 'modules')
+ c.manifest_dir = File.join(fixture_path, 'manifests')
+ c.pattern = "spec/*/*_spec.rb"
+end
+
+Puppet::Util::Log.level = :warning
+Puppet::Util::Log.newdestination(:console)
+
+at_exit { RSpec::Puppet::Coverage.report! } \ No newline at end of file
diff --git a/puppet/modules/sshd/spec/spec_helper_system.rb b/puppet/modules/sshd/spec/spec_helper_system.rb
new file mode 100644
index 00000000..2c6812fc
--- /dev/null
+++ b/puppet/modules/sshd/spec/spec_helper_system.rb
@@ -0,0 +1,25 @@
+require 'rspec-system/spec_helper'
+require 'rspec-system-puppet/helpers'
+require 'rspec-system-serverspec/helpers'
+include Serverspec::Helper::RSpecSystem
+include Serverspec::Helper::DetectOS
+include RSpecSystemPuppet::Helpers
+
+RSpec.configure do |c|
+ # Project root
+ proj_root = File.expand_path(File.join(File.dirname(__FILE__), '..'))
+
+ # Enable colour
+ c.tty = true
+
+ c.include RSpecSystemPuppet::Helpers
+
+ # This is where we 'setup' the nodes before running our tests
+ c.before :suite do
+ # Install puppet
+ puppet_install
+ # Install modules and dependencies
+ puppet_module_install(:source => proj_root, :module_name => 'sshd')
+ shell('puppet module install puppetlabs-stdlib')
+ end
+end
diff --git a/puppet/modules/sshd/templates/sshd_config/CentOS_5.erb b/puppet/modules/sshd/templates/sshd_config/CentOS_5.erb
new file mode 120000
index 00000000..71b767a5
--- /dev/null
+++ b/puppet/modules/sshd/templates/sshd_config/CentOS_5.erb
@@ -0,0 +1 @@
+CentOS_6.erb \ No newline at end of file
diff --git a/puppet/modules/sshd/templates/sshd_config/CentOS_6.erb b/puppet/modules/sshd/templates/sshd_config/CentOS_6.erb
new file mode 100644
index 00000000..4593a91a
--- /dev/null
+++ b/puppet/modules/sshd/templates/sshd_config/CentOS_6.erb
@@ -0,0 +1,172 @@
+# $OpenBSD: sshd_config,v 1.73 2005/12/06 22:38:28 reyk Exp $
+
+# This is the sshd server system-wide configuration file. See
+# sshd_config(5) for more information.
+
+# This sshd was compiled with PATH=/usr/local/bin:/bin:/usr/bin
+
+# The strategy used for options in the default sshd_config shipped with
+# OpenSSH is to specify options with their default value where
+# possible, but leave them commented. Uncommented options change a
+# default value.
+
+<% unless (s=scope.lookupvar('::sshd::head_additional_options')).empty? -%>
+<%= s %>
+<% end -%>
+
+<% scope.lookupvar('::sshd::ports').to_a.each do |port| -%>
+<% if port == 'off' -%>
+#Port -- disabled by puppet
+<% else -%>
+Port <%= port %>
+<% end -%>
+<% end -%>
+
+# Use these options to restrict which interfaces/protocols sshd will bind to
+<% scope.lookupvar('::sshd::listen_address').to_a.each do |address| -%>
+ListenAddress <%= address %>
+<% end -%>
+
+# Disable legacy (protocol version 1) support in the server for new
+# installations. In future the default will change to require explicit
+# activation of protocol 1
+Protocol 2
+
+# HostKey for protocol version 1
+#HostKey /etc/ssh/ssh_host_key
+# HostKeys for protocol version 2
+#HostKey /etc/ssh/ssh_host_rsa_key
+#HostKey /etc/ssh/ssh_host_dsa_key
+
+# Lifetime and size of ephemeral version 1 server key
+#KeyRegenerationInterval 1h
+#ServerKeyBits 1024
+
+# Logging
+# obsoletes QuietMode and FascistLogging
+#SyslogFacility AUTH
+SyslogFacility AUTHPRIV
+#LogLevel INFO
+
+# Authentication:
+
+#LoginGraceTime 2m
+PermitRootLogin <%= scope.lookupvar('::sshd::permit_root_login') %>
+
+StrictModes <%= scope.lookupvar('::sshd::strict_modes') %>
+
+#MaxAuthTries 6
+
+RSAAuthentication <%= scope.lookupvar('::sshd::rsa_authentication') %>
+PubkeyAuthentication <%= scope.lookupvar('::sshd::pubkey_authentication') %>
+AuthorizedKeysFile <%= scope.lookupvar('::sshd::authorized_keys_file') %>
+#AuthorizedKeysCommand none
+#AuthorizedKeysCommandRunAs nobody
+
+# For this to work you will also need host keys in /etc/ssh/ssh_known_hosts
+RhostsRSAAuthentication <%= scope.lookupvar('::sshd::rhosts_rsa_authentication') %>
+
+# similar for protocol version 2
+HostbasedAuthentication <%= scope.lookupvar('::sshd::hostbased_authentication') %>
+
+# Change to yes if you don't trust ~/.ssh/known_hosts for
+# RhostsRSAAuthentication and HostbasedAuthentication
+#IgnoreUserKnownHosts no
+
+# Don't read the user's ~/.rhosts and ~/.shosts files
+IgnoreRhosts <%= scope.lookupvar('::sshd::ignore_rhosts') %>
+
+# To disable tunneled clear text passwords, change to no here!
+PasswordAuthentication <%= scope.lookupvar('::sshd::password_authentication') %>
+
+# To enable empty passwords, change to yes (NOT RECOMMENDED)
+PermitEmptyPasswords <%= scope.lookupvar('::sshd::permit_empty_passwords') %>
+
+# Change to no to disable s/key passwords
+ChallengeResponseAuthentication <%= scope.lookupvar('::sshd::challenge_response_authentication') %>
+
+# Kerberos options
+#KerberosAuthentication no
+#KerberosOrLocalPasswd yes
+#KerberosTicketCleanup yes
+#KerberosGetAFSToken no
+#KerberosUseKuserok yes
+
+# GSSAPI options
+#GSSAPIAuthentication no
+#GSSAPICleanupCredentials yes
+
+# Set this to 'yes' to enable PAM authentication, account processing,
+# and session processing. If this is enabled, PAM authentication will
+# be allowed through the ChallengeResponseAuthentication and
+# PasswordAuthentication. Depending on your PAM configuration,
+# PAM authentication via ChallengeResponseAuthentication may bypass
+# the setting of "PermitRootLogin without-password".
+# If you just want the PAM account and session checks to run without
+# PAM authentication, then enable this but set PasswordAuthentication
+# and ChallengeResponseAuthentication to 'no'.
+#UsePAM no
+UsePAM <%= scope.lookupvar('::sshd::use_pam') %>
+
+# Accept locale-related environment variables
+AcceptEnv LANG LC_CTYPE LC_NUMERIC LC_TIME LC_COLLATE LC_MONETARY LC_MESSAGES
+AcceptEnv LC_PAPER LC_NAME LC_ADDRESS LC_TELEPHONE LC_MEASUREMENT
+AcceptEnv LC_IDENTIFICATION LC_ALL LANGUAGE
+AcceptEnv XMODIFIERS
+
+#AllowAgentForwarding yes
+AllowTcpForwarding <%= scope.lookupvar('::sshd::tcp_forwarding') %>
+#GatewayPorts no
+#X11Forwarding no
+X11Forwarding <%= scope.lookupvar('::sshd::x11_forwarding') %>
+#X11DisplayOffset 10
+#X11UseLocalhost yes
+PrintMotd <%= scope.lookupvar('::sshd::print_motd') %>
+#PrintLastLog yes
+#TCPKeepAlive yes
+#UseLogin no
+#UsePrivilegeSeparation yes
+#PermitUserEnvironment no
+#Compression delayed
+#ClientAliveInterval 0
+#ClientAliveCountMax 3
+#ShowPatchLevel no
+#UseDNS yes
+#PidFile /var/run/sshd.pid
+#MaxStartups 10:30:100
+#PermitTunnel no
+#ChrootDirectory none
+
+# no default banner path
+#Banner /some/path
+
+# override default of no subsystems
+Subsystem sftp <%= (s=scope.lookupvar('::sshd::sftp_subsystem')).empty? ? '/usr/libexec/openssh/sftp-server' : s %>
+
+<% unless (s=scope.lookupvar('::sshd::allowed_users')).empty? -%>
+AllowUsers <%= s %>
+<% end -%>
+<% unless (s=scope.lookupvar('::sshd::allowed_groups')).empty? -%>
+AllowGroups <%= s %>
+<%- end -%>
+
+<% if scope.lookupvar('::sshd::hardened') == 'yes' -%>
+<% if (scope.function_versioncmp([scope.lookupvar('::ssh_version'),'6.5'])) >= 0 -%>
+KexAlgorithms curve25519-sha256@libssh.org
+Ciphers chacha20-poly1305@openssh.com,aes256-gcm@openssh.com,aes256-ctr
+MACs hmac-sha2-512-etm@openssh.com,hmac-sha2-256-etm@openssh.com,hmac-ripemd160-etm@openssh.com,umac-128-etm@openssh.com,hmac-sha2-512,hmac-sha2-256,hmac-ripemd160,umac-128@openssh.com
+<% else -%>
+Ciphers aes256-ctr
+MACs hmac-sha1
+<% end -%>
+<% end -%>
+
+# Example of overriding settings on a per-user basis
+#Match User anoncvs
+# X11Forwarding no
+# AllowTcpForwarding no
+# ForceCommand cvs server
+#
+<% unless (s=scope.lookupvar('::sshd::tail_additional_options')).empty? -%>
+<%= s %>
+<% end -%>
diff --git a/puppet/modules/sshd/templates/sshd_config/CentOS_7.erb b/puppet/modules/sshd/templates/sshd_config/CentOS_7.erb
new file mode 100644
index 00000000..f55fb9d0
--- /dev/null
+++ b/puppet/modules/sshd/templates/sshd_config/CentOS_7.erb
@@ -0,0 +1,186 @@
+# $OpenBSD: sshd_config,v 1.90 2013/05/16 04:09:14 dtucker Exp $
+
+# This is the sshd server system-wide configuration file. See
+# sshd_config(5) for more information.
+
+# This sshd was compiled with PATH=/usr/local/bin:/bin:/usr/bin
+
+# The strategy used for options in the default sshd_config shipped with
+# OpenSSH is to specify options with their default value where
+# possible, but leave them commented. Uncommented options change a
+# default value.
+
+<% unless (s=scope.lookupvar('::sshd::head_additional_options')).empty? -%>
+<%= s %>
+<% end -%>
+
+# If you want to change the port on a SELinux system, you have to tell
+# SELinux about this change.
+# semanage port -a -t ssh_port_t -p tcp #PORTNUMBER
+#
+<% scope.lookupvar('::sshd::ports').to_a.each do |port| -%>
+<% if port == 'off' -%>
+#Port -- disabled by puppet
+<% else -%>
+Port <%= port %>
+<% end -%>
+<% end -%>
+<% scope.lookupvar('::sshd::listen_address').to_a.each do |address| -%>
+ListenAddress <%= address %>
+<% end -%>
+
+# The default requires explicit activation of protocol 1
+#Protocol 2
+
+# HostKey for protocol version 1
+#HostKey /etc/ssh/ssh_host_key
+# HostKeys for protocol version 2
+<% scope.lookupvar('::sshd::hostkey_type').to_a.each do |hostkey_type| -%>
+HostKey /etc/ssh/ssh_host_<%=hostkey_type %>_key
+<% end -%>
+
+# Lifetime and size of ephemeral version 1 server key
+#KeyRegenerationInterval 1h
+#ServerKeyBits 1024
+
+# Ciphers and keying
+#RekeyLimit default none
+
+# Logging
+# obsoletes QuietMode and FascistLogging
+#SyslogFacility AUTH
+SyslogFacility AUTHPRIV
+#LogLevel INFO
+
+# Authentication:
+
+#LoginGraceTime 2m
+PermitRootLogin <%= scope.lookupvar('::sshd::permit_root_login') %>
+StrictModes <%= scope.lookupvar('::sshd::strict_modes') %>
+#MaxAuthTries 6
+#MaxSessions 10
+
+RSAAuthentication <%= scope.lookupvar('::sshd::rsa_authentication') %>
+PubkeyAuthentication <%= scope.lookupvar('::sshd::pubkey_authentication') %>
+AuthorizedKeysFile <%= scope.lookupvar('::sshd::authorized_keys_file') %>
+#AuthorizedPrincipalsFile none
+#AuthorizedKeysCommand none
+#AuthorizedKeysCommandRunAs nobody
+
+# For this to work you will also need host keys in /etc/ssh/ssh_known_hosts
+RhostsRSAAuthentication <%= scope.lookupvar('::sshd::rhosts_rsa_authentication') %>
+
+# similar for protocol version 2
+HostbasedAuthentication <%= scope.lookupvar('::sshd::hostbased_authentication') %>
+
+# Change to yes if you don't trust ~/.ssh/known_hosts for
+# RhostsRSAAuthentication and HostbasedAuthentication
+#IgnoreUserKnownHosts no
+
+# Don't read the user's ~/.rhosts and ~/.shosts files
+IgnoreRhosts <%= scope.lookupvar('::sshd::ignore_rhosts') %>
+
+# To disable tunneled clear text passwords, change to no here!
+PasswordAuthentication <%= scope.lookupvar('::sshd::password_authentication') %>
+
+# To enable empty passwords, change to yes (NOT RECOMMENDED)
+PermitEmptyPasswords <%= scope.lookupvar('::sshd::permit_empty_passwords') %>
+
+# Change to no to disable s/key passwords
+ChallengeResponseAuthentication <%= scope.lookupvar('::sshd::challenge_response_authentication') %>
+
+# Kerberos options
+#KerberosAuthentication no
+#KerberosOrLocalPasswd yes
+#KerberosTicketCleanup yes
+#KerberosGetAFSToken no
+#KerberosUseKuserok yes
+
+# GSSAPI options
+GSSAPIAuthentication no
+GSSAPICleanupCredentials yes
+#GSSAPIStrictAcceptorCheck yes
+#GSSAPIKeyExchange no
+
+# Set this to 'yes' to enable PAM authentication, account processing,
+# and session processing. If this is enabled, PAM authentication will
+# be allowed through the ChallengeResponseAuthentication and
+# PasswordAuthentication. Depending on your PAM configuration,
+# PAM authentication via ChallengeResponseAuthentication may bypass
+# the setting of "PermitRootLogin without-password".
+# If you just want the PAM account and session checks to run without
+# PAM authentication, then enable this but set PasswordAuthentication
+# and ChallengeResponseAuthentication to 'no'.
+# WARNING: 'UsePAM no' is not supported in Red Hat Enterprise Linux and may cause several
+# problems.
+#UsePAM no
+UsePAM <%= scope.lookupvar('::sshd::use_pam') %>
+
+#AllowAgentForwarding yes
+AllowTcpForwarding <%= scope.lookupvar('::sshd::tcp_forwarding') %>
+#GatewayPorts no
+#X11Forwarding no
+X11Forwarding <%= scope.lookupvar('::sshd::x11_forwarding') %>
+#X11DisplayOffset 10
+#X11UseLocalhost yes
+PrintMotd <%= scope.lookupvar('::sshd::print_motd') %>
+#PrintLastLog yes
+#TCPKeepAlive yes
+#UseLogin no
+UsePrivilegeSeparation sandbox # Default for new installations.
+#PermitUserEnvironment no
+#Compression delayed
+#ClientAliveInterval 0
+#ClientAliveCountMax 3
+#ShowPatchLevel no
+#UseDNS yes
+#PidFile /var/run/sshd.pid
+#MaxStartups 10:30:100
+#PermitTunnel no
+#ChrootDirectory none
+#VersionAddendum none
+
+# no default banner path
+#Banner none
+
+# Accept locale-related environment variables
+AcceptEnv LANG LC_CTYPE LC_NUMERIC LC_TIME LC_COLLATE LC_MONETARY LC_MESSAGES
+AcceptEnv LC_PAPER LC_NAME LC_ADDRESS LC_TELEPHONE LC_MEASUREMENT
+AcceptEnv LC_IDENTIFICATION LC_ALL LANGUAGE
+AcceptEnv XMODIFIERS
+
+
+# override default of no subsystems
+Subsystem sftp <%= (s=scope.lookupvar('::sshd::sftp_subsystem')).empty? ? '/usr/libexec/openssh/sftp-server' : s %>
+
+<% unless (s=scope.lookupvar('::sshd::allowed_users')).empty? -%>
+AllowUsers <%= s %>
+<% end -%>
+<% unless (s=scope.lookupvar('::sshd::allowed_groups')).empty? -%>
+AllowGroups <%= s %>
+<%- end -%>
+
+# Uncomment this if you want to use .local domain
+#Host *.local
+# CheckHostIP no
+
+<% if scope.lookupvar('::sshd::hardened') == 'yes' -%>
+<% if (scope.function_versioncmp([scope.lookupvar('::ssh_version'),'6.5'])) >= 0 -%>
+KexAlgorithms curve25519-sha256@libssh.org
+Ciphers chacha20-poly1305@openssh.com,aes256-gcm@openssh.com,aes256-ctr
+MACs hmac-sha2-512-etm@openssh.com,hmac-sha2-256-etm@openssh.com,hmac-ripemd160-etm@openssh.com,umac-128-etm@openssh.com,hmac-sha2-512,hmac-sha2-256,hmac-ripemd160,umac-128@openssh.com
+<% else -%>
+Ciphers aes256-ctr
+MACs hmac-sha1
+<% end -%>
+<% end -%>
+
+# Example of overriding settings on a per-user basis
+#Match User anoncvs
+# X11Forwarding no
+# AllowTcpForwarding no
+# ForceCommand cvs server
+
+<% unless (s=scope.lookupvar('::sshd::tail_additional_options')).empty? -%>
+<%= s %>
+<% end -%>
diff --git a/puppet/modules/sshd/templates/sshd_config/Debian_jessie.erb b/puppet/modules/sshd/templates/sshd_config/Debian_jessie.erb
new file mode 100644
index 00000000..91dbfff0
--- /dev/null
+++ b/puppet/modules/sshd/templates/sshd_config/Debian_jessie.erb
@@ -0,0 +1,124 @@
+# This file is managed by Puppet, all local modifications will be overwritten
+#
+# Package generated configuration file
+# See the sshd_config(5) manpage for details
+
+<% unless (s=scope.lookupvar('::sshd::head_additional_options')).empty? -%>
+<%= s %>
+<% end -%>
+
+# What ports, IPs and protocols we listen for
+<% scope.lookupvar('::sshd::ports').to_a.each do |port| -%>
+<% if port == 'off' -%>
+#Port -- disabled by puppet
+<% else -%>
+Port <%= port %>
+<% end -%>
+<% end -%>
+
+# Use these options to restrict which interfaces/protocols sshd will bind to
+<% scope.lookupvar('::sshd::listen_address').to_a.each do |address| -%>
+ListenAddress <%= address %>
+<% end -%>
+Protocol 2
+# HostKeys for protocol version 2
+<% scope.lookupvar('::sshd::hostkey_type').to_a.each do |hostkey_type| -%>
+HostKey /etc/ssh/ssh_host_<%=hostkey_type %>_key
+<% end -%>
+#Privilege Separation is turned on for security
+UsePrivilegeSeparation yes
+
+# Lifetime and size of ephemeral version 1 server key
+KeyRegenerationInterval 3600
+ServerKeyBits 1024
+
+# Logging
+SyslogFacility AUTH
+LogLevel INFO
+
+# Authentication:
+LoginGraceTime 120
+PermitRootLogin <%= scope.lookupvar('::sshd::permit_root_login') %>
+StrictModes <%= scope.lookupvar('::sshd::strict_modes') %>
+
+RSAAuthentication <%= scope.lookupvar('::sshd::rsa_authentication') %>
+PubkeyAuthentication <%= scope.lookupvar('::sshd::pubkey_authentication') %>
+AuthorizedKeysFile <%= scope.lookupvar('::sshd::authorized_keys_file') %>
+
+# Don't read the user's ~/.rhosts and ~/.shosts files
+IgnoreRhosts <%= scope.lookupvar('::sshd::ignore_rhosts') %>
+# For this to work you will also need host keys in /etc/ssh_known_hosts
+RhostsRSAAuthentication <%= scope.lookupvar('::sshd::rhosts_rsa_authentication') %>
+# similar for protocol version 2
+HostbasedAuthentication <%= scope.lookupvar('::sshd::hostbased_authentication') %>
+# Uncomment if you don't trust ~/.ssh/known_hosts for RhostsRSAAuthentication
+#IgnoreUserKnownHosts yes
+
+# To enable empty passwords, change to yes (NOT RECOMMENDED)
+PermitEmptyPasswords <%= scope.lookupvar('::sshd::permit_empty_passwords') %>
+
+# Change to yes to enable challenge-response passwords (beware issues with
+# some PAM modules and threads)
+ChallengeResponseAuthentication <%= scope.lookupvar('::sshd::challenge_response_authentication') %>
+
+# Change to no to disable tunnelled clear text passwords
+PasswordAuthentication <%= scope.lookupvar('::sshd::password_authentication') %>
+
+# Kerberos options
+KerberosAuthentication <%= scope.lookupvar('::sshd::kerberos_authentication') %>
+#KerberosGetAFSToken no
+KerberosOrLocalPasswd <%= scope.lookupvar('::sshd::kerberos_orlocalpasswd') %>
+KerberosTicketCleanup <%= scope.lookupvar('::sshd::kerberos_ticketcleanup') %>
+
+# GSSAPI options
+GSSAPIAuthentication <%= scope.lookupvar('::sshd::gssapi_authentication') %>
+GSSAPICleanupCredentials <%= scope.lookupvar('::sshd::gssapi_cleanupcredentials') %>
+
+X11Forwarding <%= scope.lookupvar('::sshd::x11_forwarding') %>
+X11DisplayOffset 10
+PrintMotd <%= scope.lookupvar('::sshd::print_motd') %>
+PrintLastLog yes
+TCPKeepAlive yes
+#UseLogin no
+
+#MaxStartups 10:30:60
+#Banner /etc/issue.net
+# do not reveal debian version (default is yes)
+DebianBanner no
+
+# Allow client to pass locale environment variables
+AcceptEnv LANG LC_*
+
+Subsystem sftp <%= (s=scope.lookupvar('::sshd::sftp_subsystem')).empty? ? '/usr/lib/openssh/sftp-server' : s %>
+
+# Set this to 'yes' to enable PAM authentication, account processing,
+# and session processing. If this is enabled, PAM authentication will
+# be allowed through the ChallengeResponseAuthentication and
+# PasswordAuthentication. Depending on your PAM configuration,
+# PAM authentication via ChallengeResponseAuthentication may bypass
+# the setting of "PermitRootLogin without-password".
+# If you just want the PAM account and session checks to run without
+# PAM authentication, then enable this but set PasswordAuthentication
+# and ChallengeResponseAuthentication to 'no'.
+UsePAM <%= scope.lookupvar('::sshd::use_pam') %>
+
+AllowTcpForwarding <%= scope.lookupvar('::sshd::tcp_forwarding') %>
+
+AllowAgentForwarding <%= scope.lookupvar('::sshd::agent_forwarding') %>
+
+<% unless (s=scope.lookupvar('::sshd::allowed_users')).empty? -%>
+AllowUsers <%= s %>
+<% end -%>
+<% unless (s=scope.lookupvar('::sshd::allowed_groups')).empty? -%>
+AllowGroups <%= s %>
+<%- end -%>
+
+<% if scope.lookupvar('::sshd::hardened') == 'yes' -%>
+KexAlgorithms curve25519-sha256@libssh.org
+Ciphers chacha20-poly1305@openssh.com,aes256-gcm@openssh.com,aes256-ctr
+MACs hmac-sha2-512-etm@openssh.com,hmac-sha2-256-etm@openssh.com,hmac-ripemd160-etm@openssh.com,umac-128-etm@openssh.com,hmac-sha2-512,hmac-sha2-256,hmac-ripemd160,umac-128@openssh.com
+<% end -%>
+
+<% unless (s=scope.lookupvar('::sshd::tail_additional_options')).empty? -%>
+<%= s %>
+<% end -%>
diff --git a/puppet/modules/sshd/templates/sshd_config/Debian_sid.erb b/puppet/modules/sshd/templates/sshd_config/Debian_sid.erb
new file mode 100644
index 00000000..91dbfff0
--- /dev/null
+++ b/puppet/modules/sshd/templates/sshd_config/Debian_sid.erb
@@ -0,0 +1,124 @@
+# This file is managed by Puppet, all local modifications will be overwritten
+#
+# Package generated configuration file
+# See the sshd_config(5) manpage for details
+
+<% unless (s=scope.lookupvar('::sshd::head_additional_options')).empty? -%>
+<%= s %>
+<% end -%>
+
+# What ports, IPs and protocols we listen for
+<% scope.lookupvar('::sshd::ports').to_a.each do |port| -%>
+<% if port == 'off' -%>
+#Port -- disabled by puppet
+<% else -%>
+Port <%= port %>
+<% end -%>
+<% end -%>
+
+# Use these options to restrict which interfaces/protocols sshd will bind to
+<% scope.lookupvar('::sshd::listen_address').to_a.each do |address| -%>
+ListenAddress <%= address %>
+<% end -%>
+Protocol 2
+# HostKeys for protocol version 2
+<% scope.lookupvar('::sshd::hostkey_type').to_a.each do |hostkey_type| -%>
+HostKey /etc/ssh/ssh_host_<%=hostkey_type %>_key
+<% end -%>
+#Privilege Separation is turned on for security
+UsePrivilegeSeparation yes
+
+# Lifetime and size of ephemeral version 1 server key
+KeyRegenerationInterval 3600
+ServerKeyBits 1024
+
+# Logging
+SyslogFacility AUTH
+LogLevel INFO
+
+# Authentication:
+LoginGraceTime 120
+PermitRootLogin <%= scope.lookupvar('::sshd::permit_root_login') %>
+StrictModes <%= scope.lookupvar('::sshd::strict_modes') %>
+
+RSAAuthentication <%= scope.lookupvar('::sshd::rsa_authentication') %>
+PubkeyAuthentication <%= scope.lookupvar('::sshd::pubkey_authentication') %>
+AuthorizedKeysFile <%= scope.lookupvar('::sshd::authorized_keys_file') %>
+
+# Don't read the user's ~/.rhosts and ~/.shosts files
+IgnoreRhosts <%= scope.lookupvar('::sshd::ignore_rhosts') %>
+# For this to work you will also need host keys in /etc/ssh_known_hosts
+RhostsRSAAuthentication <%= scope.lookupvar('::sshd::rhosts_rsa_authentication') %>
+# similar for protocol version 2
+HostbasedAuthentication <%= scope.lookupvar('::sshd::hostbased_authentication') %>
+# Uncomment if you don't trust ~/.ssh/known_hosts for RhostsRSAAuthentication
+#IgnoreUserKnownHosts yes
+
+# To enable empty passwords, change to yes (NOT RECOMMENDED)
+PermitEmptyPasswords <%= scope.lookupvar('::sshd::permit_empty_passwords') %>
+
+# Change to yes to enable challenge-response passwords (beware issues with
+# some PAM modules and threads)
+ChallengeResponseAuthentication <%= scope.lookupvar('::sshd::challenge_response_authentication') %>
+
+# Change to no to disable tunnelled clear text passwords
+PasswordAuthentication <%= scope.lookupvar('::sshd::password_authentication') %>
+
+# Kerberos options
+KerberosAuthentication <%= scope.lookupvar('::sshd::kerberos_authentication') %>
+#KerberosGetAFSToken no
+KerberosOrLocalPasswd <%= scope.lookupvar('::sshd::kerberos_orlocalpasswd') %>
+KerberosTicketCleanup <%= scope.lookupvar('::sshd::kerberos_ticketcleanup') %>
+
+# GSSAPI options
+GSSAPIAuthentication <%= scope.lookupvar('::sshd::gssapi_authentication') %>
+GSSAPICleanupCredentials <%= scope.lookupvar('::sshd::gssapi_cleanupcredentials') %>
+
+X11Forwarding <%= scope.lookupvar('::sshd::x11_forwarding') %>
+X11DisplayOffset 10
+PrintMotd <%= scope.lookupvar('::sshd::print_motd') %>
+PrintLastLog yes
+TCPKeepAlive yes
+#UseLogin no
+
+#MaxStartups 10:30:60
+#Banner /etc/issue.net
+# do not reveal debian version (default is yes)
+DebianBanner no
+
+# Allow client to pass locale environment variables
+AcceptEnv LANG LC_*
+
+Subsystem sftp <%= (s=scope.lookupvar('::sshd::sftp_subsystem')).empty? ? '/usr/lib/openssh/sftp-server' : s %>
+
+# Set this to 'yes' to enable PAM authentication, account processing,
+# and session processing. If this is enabled, PAM authentication will
+# be allowed through the ChallengeResponseAuthentication and
+# PasswordAuthentication. Depending on your PAM configuration,
+# PAM authentication via ChallengeResponseAuthentication may bypass
+# the setting of "PermitRootLogin without-password".
+# If you just want the PAM account and session checks to run without
+# PAM authentication, then enable this but set PasswordAuthentication
+# and ChallengeResponseAuthentication to 'no'.
+UsePAM <%= scope.lookupvar('::sshd::use_pam') %>
+
+AllowTcpForwarding <%= scope.lookupvar('::sshd::tcp_forwarding') %>
+
+AllowAgentForwarding <%= scope.lookupvar('::sshd::agent_forwarding') %>
+
+<% unless (s=scope.lookupvar('::sshd::allowed_users')).empty? -%>
+AllowUsers <%= s %>
+<% end -%>
+<% unless (s=scope.lookupvar('::sshd::allowed_groups')).empty? -%>
+AllowGroups <%= s %>
+<%- end -%>
+
+<% if scope.lookupvar('::sshd::hardened') == 'yes' -%>
+KexAlgorithms curve25519-sha256@libssh.org
+Ciphers chacha20-poly1305@openssh.com,aes256-gcm@openssh.com,aes256-ctr
+MACs hmac-sha2-512-etm@openssh.com,hmac-sha2-256-etm@openssh.com,hmac-ripemd160-etm@openssh.com,umac-128-etm@openssh.com,hmac-sha2-512,hmac-sha2-256,hmac-ripemd160,umac-128@openssh.com
+<% end -%>
+
+<% unless (s=scope.lookupvar('::sshd::tail_additional_options')).empty? -%>
+<%= s %>
+<% end -%>
diff --git a/puppet/modules/sshd/templates/sshd_config/Debian_squeeze.erb b/puppet/modules/sshd/templates/sshd_config/Debian_squeeze.erb
new file mode 100644
index 00000000..649b320a
--- /dev/null
+++ b/puppet/modules/sshd/templates/sshd_config/Debian_squeeze.erb
@@ -0,0 +1,127 @@
+# This file is managed by Puppet, all local modifications will be overwritten
+#
+# Package generated configuration file
+# See the sshd(8) manpage for details
+
+<% unless (s=scope.lookupvar('::sshd::head_additional_options')).empty? -%>
+<%= s %>
+<% end -%>
+
+# What ports, IPs and protocols we listen for
+<% scope.lookupvar('::sshd::ports').to_a.each do |port| -%>
+<% if port == 'off' -%>
+#Port -- disabled by puppet
+<% else -%>
+Port <%= port %>
+<% end -%>
+<% end -%>
+
+# Use these options to restrict which interfaces/protocols sshd will bind to
+<% scope.lookupvar('::sshd::listen_address').to_a.each do |address| -%>
+ListenAddress <%= address %>
+<% end -%>
+Protocol 2
+# HostKeys for protocol version 2
+<% scope.lookupvar('::sshd::hostkey_type').to_a.each do |hostkey_type| -%>
+HostKey /etc/ssh/ssh_host_<%=hostkey_type %>_key
+<% end -%>
+
+#Privilege Separation is turned on for security
+UsePrivilegeSeparation yes
+
+# Lifetime and size of ephemeral version 1 server key
+KeyRegenerationInterval 3600
+ServerKeyBits 768
+
+# Logging
+SyslogFacility AUTH
+LogLevel INFO
+
+# Authentication:
+LoginGraceTime 120
+PermitRootLogin <%= scope.lookupvar('::sshd::permit_root_login') %>
+
+StrictModes <%= scope.lookupvar('::sshd::strict_modes') %>
+
+RSAAuthentication <%= scope.lookupvar('::sshd::rsa_authentication') %>
+
+PubkeyAuthentication <%= scope.lookupvar('::sshd::pubkey_authentication') %>
+
+AuthorizedKeysFile <%= scope.lookupvar('::sshd::authorized_keys_file') %>
+
+# Don't read the user's ~/.rhosts and ~/.shosts files
+IgnoreRhosts <%= scope.lookupvar('::sshd::ignore_rhosts') %>
+# For this to work you will also need host keys in /etc/ssh_known_hosts
+RhostsRSAAuthentication <%= scope.lookupvar('::sshd::rhosts_rsa_authentication') %>
+# similar for protocol version 2
+HostbasedAuthentication <%= scope.lookupvar('::sshd::hostbased_authentication') %>
+# Uncomment if you don't trust ~/.ssh/known_hosts for RhostsRSAAuthentication
+#IgnoreUserKnownHosts yes
+
+# To enable empty passwords, change to yes (NOT RECOMMENDED)
+PermitEmptyPasswords <%= scope.lookupvar('::sshd::permit_empty_passwords') %>
+
+# Change to yes to enable challenge-response passwords (beware issues with
+# some PAM modules and threads)
+ChallengeResponseAuthentication <%= scope.lookupvar('::sshd::challenge_response_authentication') %>
+
+# To disable tunneled clear text passwords, change to no here!
+PasswordAuthentication <%= scope.lookupvar('::sshd::password_authentication') %>
+
+# Kerberos options
+KerberosAuthentication <%= scope.lookupvar('::sshd::kerberos_authentication') %>
+KerberosOrLocalPasswd <%= scope.lookupvar('::sshd::kerberos_orlocalpasswd') %>
+KerberosTicketCleanup <%= scope.lookupvar('::sshd::kerberos_ticketcleanup') %>
+
+# GSSAPI options
+GSSAPIAuthentication <%= scope.lookupvar('::sshd::gssapi_authentication') %>
+GSSAPICleanupCredentials <%= scope.lookupvar('::sshd::gssapi_cleanupcredentials') %>
+
+X11Forwarding <%= scope.lookupvar('::sshd::x11_forwarding') %>
+X11DisplayOffset 10
+PrintMotd <%= scope.lookupvar('::sshd::print_motd') %>
+PrintLastLog yes
+TCPKeepAlive yes
+
+#UseLogin no
+
+#MaxStartups 10:30:60
+#Banner /etc/issue.net
+# do not reveal debian version (default is yes)
+DebianBanner no
+
+# Allow client to pass locale environment variables
+AcceptEnv LANG LC_*
+
+Subsystem sftp <%= (s=scope.lookupvar('::sshd::sftp_subsystem')).empty? ? '/usr/lib/openssh/sftp-server' : s %>
+
+# Set this to 'yes' to enable PAM authentication, account processing,
+# and session processing. If this is enabled, PAM authentication will
+# be allowed through the ChallengeResponseAuthentication and
+# PasswordAuthentication. Depending on your PAM configuration,
+# PAM authentication via ChallengeResponseAuthentication may bypass
+# the setting of "PermitRootLogin without-password".
+# If you just want the PAM account and session checks to run without
+# PAM authentication, then enable this but set PasswordAuthentication
+# and ChallengeResponseAuthentication to 'no'.
+UsePAM <%= scope.lookupvar('::sshd::use_pam') %>
+
+AllowTcpForwarding <%= scope.lookupvar('::sshd::tcp_forwarding') %>
+
+AllowAgentForwarding <%= scope.lookupvar('::sshd::agent_forwarding') %>
+
+<% unless (s=scope.lookupvar('::sshd::allowed_users')).empty? -%>
+AllowUsers <%= s %>
+<% end -%>
+<% unless (s=scope.lookupvar('::sshd::allowed_groups')).empty? -%>
+AllowGroups <%= s %>
+<%- end -%>
+
+<% if scope.lookupvar('::sshd::hardened') == 'yes' -%>
+Ciphers aes256-ctr
+MACs hmac-sha2-512
+<% end -%>
+
+<% unless (s=scope.lookupvar('::sshd::tail_additional_options')).empty? -%>
+<%= s %>
+<% end -%>
diff --git a/puppet/modules/sshd/templates/sshd_config/Debian_wheezy.erb b/puppet/modules/sshd/templates/sshd_config/Debian_wheezy.erb
new file mode 100644
index 00000000..bcb15286
--- /dev/null
+++ b/puppet/modules/sshd/templates/sshd_config/Debian_wheezy.erb
@@ -0,0 +1,132 @@
+# This file is managed by Puppet, all local modifications will be overwritten
+#
+# Package generated configuration file
+# See the sshd(8) manpage for details
+
+<% unless (s=scope.lookupvar('::sshd::head_additional_options')).empty? -%>
+<%= s %>
+<% end -%>
+
+# What ports, IPs and protocols we listen for
+<% scope.lookupvar('::sshd::ports').to_a.each do |port| -%>
+<% if port == 'off' -%>
+#Port -- disabled by puppet
+<% else -%>
+Port <%= port %>
+<% end -%>
+<% end -%>
+
+# Use these options to restrict which interfaces/protocols sshd will bind to
+<% scope.lookupvar('::sshd::listen_address').to_a.each do |address| -%>
+ListenAddress <%= address %>
+<% end -%>
+Protocol 2
+# HostKeys for protocol version 2
+<% scope.lookupvar('::sshd::hostkey_type').to_a.each do |hostkey_type| -%>
+HostKey /etc/ssh/ssh_host_<%=hostkey_type %>_key
+<% end -%>
+#Privilege Separation is turned on for security
+UsePrivilegeSeparation yes
+
+# Lifetime and size of ephemeral version 1 server key
+KeyRegenerationInterval 3600
+ServerKeyBits 768
+
+# Logging
+SyslogFacility AUTH
+LogLevel INFO
+
+# Authentication:
+LoginGraceTime 120
+PermitRootLogin <%= scope.lookupvar('::sshd::permit_root_login') %>
+
+StrictModes <%= scope.lookupvar('::sshd::strict_modes') %>
+
+RSAAuthentication <%= scope.lookupvar('::sshd::rsa_authentication') %>
+
+PubkeyAuthentication <%= scope.lookupvar('::sshd::pubkey_authentication') %>
+
+AuthorizedKeysFile <%= scope.lookupvar('::sshd::authorized_keys_file') %>
+
+# Don't read the user's ~/.rhosts and ~/.shosts files
+IgnoreRhosts <%= scope.lookupvar('::sshd::ignore_rhosts') %>
+# For this to work you will also need host keys in /etc/ssh_known_hosts
+RhostsRSAAuthentication <%= scope.lookupvar('::sshd::rhosts_rsa_authentication') %>
+# similar for protocol version 2
+HostbasedAuthentication <%= scope.lookupvar('::sshd::hostbased_authentication') %>
+# Uncomment if you don't trust ~/.ssh/known_hosts for RhostsRSAAuthentication
+#IgnoreUserKnownHosts yes
+
+# To enable empty passwords, change to yes (NOT RECOMMENDED)
+PermitEmptyPasswords <%= scope.lookupvar('::sshd::permit_empty_passwords') %>
+
+# Change to yes to enable challenge-response passwords (beware issues with
+# some PAM modules and threads)
+ChallengeResponseAuthentication <%= scope.lookupvar('::sshd::challenge_response_authentication') %>
+
+# To disable tunneled clear text passwords, change to no here!
+PasswordAuthentication <%= scope.lookupvar('::sshd::password_authentication') %>
+
+# Kerberos options
+KerberosAuthentication <%= scope.lookupvar('::sshd::kerberos_authentication') %>
+KerberosOrLocalPasswd <%= scope.lookupvar('::sshd::kerberos_orlocalpasswd') %>
+KerberosTicketCleanup <%= scope.lookupvar('::sshd::kerberos_ticketcleanup') %>
+
+# GSSAPI options
+GSSAPIAuthentication <%= scope.lookupvar('::sshd::gssapi_authentication') %>
+GSSAPICleanupCredentials <%= scope.lookupvar('::sshd::gssapi_cleanupcredentials') %>
+
+X11Forwarding <%= scope.lookupvar('::sshd::x11_forwarding') %>
+X11DisplayOffset 10
+PrintMotd <%= scope.lookupvar('::sshd::print_motd') %>
+PrintLastLog yes
+TCPKeepAlive yes
+
+#UseLogin no
+
+#MaxStartups 10:30:60
+#Banner /etc/issue.net
+# do not reveal debian version (default is yes)
+DebianBanner no
+
+# Allow client to pass locale environment variables
+AcceptEnv LANG LC_*
+
+Subsystem sftp <%= (s=scope.lookupvar('::sshd::sftp_subsystem')).empty? ? '/usr/lib/openssh/sftp-server' : s %>
+
+# Set this to 'yes' to enable PAM authentication, account processing,
+# and session processing. If this is enabled, PAM authentication will
+# be allowed through the ChallengeResponseAuthentication and
+# PasswordAuthentication. Depending on your PAM configuration,
+# PAM authentication via ChallengeResponseAuthentication may bypass
+# the setting of "PermitRootLogin without-password".
+# If you just want the PAM account and session checks to run without
+# PAM authentication, then enable this but set PasswordAuthentication
+# and ChallengeResponseAuthentication to 'no'.
+UsePAM <%= scope.lookupvar('::sshd::use_pam') %>
+
+AllowTcpForwarding <%= scope.lookupvar('::sshd::tcp_forwarding') %>
+
+AllowAgentForwarding <%= scope.lookupvar('::sshd::agent_forwarding') %>
+
+<% unless (s=scope.lookupvar('::sshd::allowed_users')).empty? -%>
+AllowUsers <%= s %>
+<% end -%>
+<% unless (s=scope.lookupvar('::sshd::allowed_groups')).empty? -%>
+AllowGroups <%= s %>
+<%- end -%>
+
+<% if scope.lookupvar('::sshd::hardened') == 'yes' -%>
+<% if (scope.function_versioncmp([scope.lookupvar('::ssh_version'),'6.5'])) >= 0 -%>
+KexAlgorithms curve25519-sha256@libssh.org
+Ciphers chacha20-poly1305@openssh.com,aes256-gcm@openssh.com,aes256-ctr
+MACs hmac-sha2-512-etm@openssh.com,hmac-sha2-256-etm@openssh.com,hmac-ripemd160-etm@openssh.com,umac-128-etm@openssh.com,hmac-sha2-512,hmac-sha2-256,hmac-ripemd160,umac-128@openssh.com
+<% else -%>
+Ciphers aes256-ctr
+MACs hmac-sha2-512
+<% end -%>
+<% end -%>
+
+<% unless (s=scope.lookupvar('::sshd::tail_additional_options')).empty? -%>
+<%= s %>
+<% end -%>
diff --git a/puppet/modules/sshd/templates/sshd_config/FreeBSD.erb b/puppet/modules/sshd/templates/sshd_config/FreeBSD.erb
new file mode 100644
index 00000000..5298ade9
--- /dev/null
+++ b/puppet/modules/sshd/templates/sshd_config/FreeBSD.erb
@@ -0,0 +1,168 @@
+# $OpenBSD: sshd_config,v 1.81 2009/10/08 14:03:41 markus Exp $
+# $FreeBSD: src/crypto/openssh/sshd_config,v 1.49.2.2.2.1 2010/06/14 02:09:06 kensmith Exp $
+
+# This is the sshd server system-wide configuration file. See
+# sshd_config(5) for more information.
+
+# This sshd was compiled with PATH=/usr/bin:/bin:/usr/sbin:/sbin
+
+# The strategy used for options in the default sshd_config shipped with
+# OpenSSH is to specify options with their default value where
+# possible, but leave them commented. Uncommented options change a
+# default value.
+
+# Note that some of FreeBSD's defaults differ from OpenBSD's, and
+# FreeBSD has a few additional options.
+
+#VersionAddendum FreeBSD-20100308
+
+<% unless (s=scope.lookupvar('::sshd::head_additional_options')).empty? -%>
+<%= s %>
+<% end -%>
+
+# What ports, IPs and protocols we listen for
+<% scope.lookupvar('::sshd::ports').to_a.each do |port| -%>
+<% if port == 'off' -%>
+#Port -- disabled by puppet
+<% else -%>
+Port <%= port %>
+<% end -%>
+<% end -%>
+
+#AddressFamily any
+<% scope.lookupvar('::sshd::listen_address').to_a.each do |address| -%>
+ListenAddress <%= address %>
+<% end -%>
+
+# The default requires explicit activation of protocol 1
+Protocol 2
+
+# HostKey for protocol version 1
+#HostKey /etc/ssh/ssh_host_key
+# HostKeys for protocol version 2
+<% scope.lookupvar('::sshd::hostkey_type').to_a.each do |hostkey_type| -%>
+HostKey /etc/ssh/ssh_host_<%=hostkey_type %>_key
+<% end -%>
+
+# Lifetime and size of ephemeral version 1 server key
+#KeyRegenerationInterval 1h
+#ServerKeyBits 1024
+
+# Logging
+# obsoletes QuietMode and FascistLogging
+SyslogFacility AUTH
+LogLevel INFO
+
+# Authentication:
+
+LoginGraceTime 600
+PermitRootLogin <%= scope.lookupvar('::sshd::permit_root_login') %>
+
+StrictModes <%= scope.lookupvar('::sshd::strict_modes') %>
+
+#MaxAuthTries 6
+#MaxSessions 10
+
+RSAAuthentication <%= scope.lookupvar('::sshd::rsa_authentication') %>
+
+PubkeyAuthentication <%= scope.lookupvar('::sshd::pubkey_authentication') %>
+
+AuthorizedKeysFile <%= scope.lookupvar('::sshd::authorized_keys_file') %>
+
+# For this to work you will also need host keys in /etc/ssh/ssh_known_hosts
+RhostsRSAAuthentication <%= scope.lookupvar('::sshd::rhosts_rsa_authentication') %>
+
+# similar for protocol version 2
+HostbasedAuthentication <%= scope.lookupvar('::sshd::hostbased_authentication') %>
+
+# Change to yes if you don't trust ~/.ssh/known_hosts for
+# RhostsRSAAuthentication and HostbasedAuthentication
+#IgnoreUserKnownHosts no
+# Don't read the user's ~/.rhosts and ~/.shosts files
+#IgnoreRhosts yes
+
+# Change to yes to enable built-in password authentication.
+PasswordAuthentication <%= scope.lookupvar('::sshd::password_authentication') %>
+
+PermitEmptyPasswords <%= scope.lookupvar('::sshd::permit_empty_passwords') %>
+
+# Change to no to disable PAM authentication
+ChallengeResponseAuthentication <%= scope.lookupvar('::sshd::challenge_response_authentication') %>
+
+# Kerberos options
+KerberosAuthentication <%= scope.lookupvar('::sshd::kerberos_authentication') %>
+KerberosOrLocalPasswd <%= scope.lookupvar('::sshd::kerberos_orlocalpasswd') %>
+KerberosTicketCleanup <%= scope.lookupvar('::sshd::kerberos_ticketcleanup') %>
+
+# GSSAPI options
+GSSAPIAuthentication <%= scope.lookupvar('::sshd::gssapi_authentication') %>
+GSSAPICleanupCredentials <%= scope.lookupvar('::sshd::gssapi_cleanupcredentials') %>
+
+# Set this to 'no' to disable PAM authentication, account processing,
+# and session processing. If this is enabled, PAM authentication will
+# be allowed through the ChallengeResponseAuthentication and
+# PasswordAuthentication. Depending on your PAM configuration,
+# PAM authentication via ChallengeResponseAuthentication may bypass
+# the setting of "PermitRootLogin without-password".
+# If you just want the PAM account and session checks to run without
+# PAM authentication, then enable this but set PasswordAuthentication
+# and ChallengeResponseAuthentication to 'no'.
+UsePAM <%= scope.lookupvar('::sshd::use_pam') %>
+
+AllowAgentForwarding <%= scope.lookupvar('::sshd::agent_forwarding') %>
+
+AllowTcpForwarding <%= scope.lookupvar('::sshd::tcp_forwarding') %>
+
+#GatewayPorts no
+X11Forwarding <%= scope.lookupvar('::sshd::x11_forwarding') %>
+
+X11DisplayOffset 10
+#X11UseLocalhost yes
+PrintMotd <%= sshd_print_motd %>
+#PrintLastLog yes
+TCPKeepAlive yes
+#UseLogin no
+#UsePrivilegeSeparation yes
+#PermitUserEnvironment no
+#Compression delayed
+#ClientAliveInterval 0
+#ClientAliveCountMax 3
+#UseDNS yes
+#PidFile /var/run/sshd.pid
+#MaxStartups 10
+#PermitTunnel no
+#ChrootDirectory none
+
+# no default banner path
+#Banner none
+
+# override default of no subsystems
+Subsystem sftp <%= (s=scope.lookupvar('::sshd::sftp_subsystem')).empty? ? '/usr/libexec/sftp-server' : s %>
+
+# Example of overriding settings on a per-user basis
+#Match User anoncvs
+# X11Forwarding no
+# AllowTcpForwarding no
+# ForceCommand cvs server
+
+<% unless (s=scope.lookupvar('::sshd::allowed_users')).empty? -%>
+AllowUsers <%= s %>
+<% end -%>
+<% unless (s=scope.lookupvar('::sshd::allowed_groups')).empty? -%>
+AllowGroups <%= s %>
+<%- end -%>
+
+<% if scope.lookupvar('::sshd::hardened') == 'yes' -%>
+<% if (scope.function_versioncmp([scope.lookupvar('::ssh_version'),'6.5'])) >= 0 -%>
+KexAlgorithms curve25519-sha256@libssh.org
+Ciphers chacha20-poly1305@openssh.com,aes256-gcm@openssh.com,aes256-ctr
+MACs hmac-sha2-512-etm@openssh.com,hmac-sha2-256-etm@openssh.com,hmac-ripemd160-etm@openssh.com,umac-128-etm@openssh.com,hmac-sha2-512,hmac-sha2-256,hmac-ripemd160,umac-128@openssh.com
+<% else -%>
+Ciphers aes256-ctr
+MACs hmac-sha1
+<% end -%>
+<% end -%>
+
+<% unless (s=scope.lookupvar('::sshd::tail_additional_options')).empty? -%>
+<%= s %>
+<% end -%>
diff --git a/puppet/modules/sshd/templates/sshd_config/Gentoo.erb b/puppet/modules/sshd/templates/sshd_config/Gentoo.erb
new file mode 100644
index 00000000..022a26e7
--- /dev/null
+++ b/puppet/modules/sshd/templates/sshd_config/Gentoo.erb
@@ -0,0 +1,164 @@
+# $OpenBSD: sshd_config,v 1.75 2007/03/19 01:01:29 djm Exp $
+
+# This is the sshd server system-wide configuration file. See
+# sshd_config(5) for more information.
+
+# This sshd was compiled with PATH=/usr/bin:/bin:/usr/sbin:/sbin
+
+# The strategy used for options in the default sshd_config shipped with
+# OpenSSH is to specify options with their default value where
+# possible, but leave them commented. Uncommented options change a
+# default value.
+
+<% unless (s=scope.lookupvar('::sshd::head_additional_options')).empty? -%>
+<%= s %>
+<% end -%>
+
+<% scope.lookupvar('::sshd::ports').to_a.each do |port| -%>
+<% if port == 'off' -%>
+#Port -- disabled by puppet
+<% else -%>
+Port <%= port %>
+<% end -%>
+<% end -%>
+
+# Use these options to restrict which interfaces/protocols sshd will bind to
+<% scope.lookupvar('::sshd::listen_address').to_a.each do |address| -%>
+ListenAddress <%= address %>
+<% end -%>
+#AddressFamily any
+
+# Disable legacy (protocol version 1) support in the server for new
+# installations. In future the default will change to require explicit
+# activation of protocol 1
+Protocol 2
+
+# HostKey for protocol version 1
+#HostKey /etc/ssh/ssh_host_key
+# HostKeys for protocol version 2
+#HostKey /etc/ssh/ssh_host_rsa_key
+#HostKey /etc/ssh/ssh_host_dsa_key
+
+# Lifetime and size of ephemeral version 1 server key
+#KeyRegenerationInterval 1h
+#ServerKeyBits 768
+
+# Logging
+# obsoletes QuietMode and FascistLogging
+#SyslogFacility AUTH
+#LogLevel INFO
+
+# Authentication:
+
+#LoginGraceTime 2m
+PermitRootLogin <%= scope.lookupvar('::sshd::permit_root_login') %>
+
+StrictModes <%= scope.lookupvar('::sshd::strict_modes') %>
+
+#MaxAuthTries 6
+
+RSAAuthentication <%= scope.lookupvar('::sshd::rsa_authentication') %>
+
+PubkeyAuthentication <%= scope.lookupvar('::sshd::pubkey_authentication') %>
+
+AuthorizedKeysFile <%= scope.lookupvar('::sshd::authorized_keys_file') %>
+
+# For this to work you will also need host keys in /etc/ssh/ssh_known_hosts
+RhostsRSAAuthentication <%= scope.lookupvar('::sshd::rhosts_rsa_authentication') %>
+
+# similar for protocol version 2
+HostbasedAuthentication <%= scope.lookupvar('::sshd::hostbased_authentication') %>
+
+# Change to yes if you don't trust ~/.ssh/known_hosts for
+# RhostsRSAAuthentication and HostbasedAuthentication
+#IgnoreUserKnownHosts no
+
+# Don't read the user's ~/.rhosts and ~/.shosts files
+IgnoreRhosts <%= scope.lookupvar('::sshd::ignore_rhosts') %>
+
+# To disable tunneled clear text passwords, change to no here!
+PasswordAuthentication <%= scope.lookupvar('::sshd::password_authentication') %>
+
+# To enable empty passwords, change to yes (NOT RECOMMENDED)
+PermitEmptyPasswords <%= scope.lookupvar('::sshd::permit_empty_passwords') %>
+
+# Change to no to disable s/key passwords
+ChallengeResponseAuthentication <%= scope.lookupvar('::sshd::challenge_response_authentication') %>
+
+# Kerberos options
+#KerberosAuthentication no
+#KerberosOrLocalPasswd yes
+#KerberosTicketCleanup yes
+#KerberosGetAFSToken no
+
+# GSSAPI options
+#GSSAPIAuthentication no
+#GSSAPICleanupCredentials yes
+#GSSAPIStrictAcceptorCheck yes
+#GSSAPIKeyExchange no
+
+# Set this to 'yes' to enable PAM authentication, account processing,
+# and session processing. If this is enabled, PAM authentication will
+# be allowed through the ChallengeResponseAuthentication and
+# PasswordAuthentication. Depending on your PAM configuration,
+# PAM authentication via ChallengeResponseAuthentication may bypass
+# the setting of "PermitRootLogin without-password".
+# If you just want the PAM account and session checks to run without
+# PAM authentication, then enable this but set PasswordAuthentication
+# and ChallengeResponseAuthentication to 'no'.
+UsePAM <%= scope.lookupvar('::sshd::use_pam') %>
+
+AllowTcpForwarding <%= scope.lookupvar('::sshd::tcp_forwarding') %>
+
+#GatewayPorts no
+X11Forwarding <%= scope.lookupvar('::sshd::x11_forwarding') %>
+#X11DisplayOffset 10
+#X11UseLocalhost yes
+PrintMotd <%= scope.lookupvar('::sshd::print_motd') %>
+#PrintLastLog yes
+#TCPKeepAlive yes
+#UseLogin no
+#UsePrivilegeSeparation yes
+#PermitUserEnvironment no
+#Compression delayed
+#ClientAliveInterval 0
+#ClientAliveCountMax 3
+#UseDNS yes
+#PidFile /var/run/sshd.pid
+#MaxStartups 10
+#PermitTunnel no
+
+# no default banner path
+#Banner /some/path
+
+# override default of no subsystems
+Subsystem sftp <%= (s=scope.lookupvar('::sshd::sftp_subsystem')).empty? ? '/usr/lib/misc/sftp-server' : s %>
+
+# Example of overriding settings on a per-user basis
+#Match User anoncvs
+# X11Forwarding no
+# AllowTcpForwarding no
+# ForceCommand cvs server
+
+<% unless (s=scope.lookupvar('::sshd::allowed_users')).empty? -%>
+AllowUsers <%= s %>
+<% end -%>
+<% unless (s=scope.lookupvar('::sshd::allowed_groups')).empty? -%>
+AllowGroups <%= s %>
+<%- end -%>
+
+<% if scope.lookupvar('::sshd::hardened') == 'yes' -%>
+<% if (scope.function_versioncmp([scope.lookupvar('::ssh_version'),'6.5'])) >= 0 -%>
+KexAlgorithms curve25519-sha256@libssh.org
+Ciphers chacha20-poly1305@openssh.com,aes256-gcm@openssh.com,aes256-ctr
+MACs hmac-sha2-512-etm@openssh.com,hmac-sha2-256-etm@openssh.com,hmac-ripemd160-etm@openssh.com,umac-128-etm@openssh.com,hmac-sha2-512,hmac-sha2-256,hmac-ripemd160,umac-128@openssh.com
+<% else -%>
+Ciphers aes256-ctr
+MACs hmac-sha1
+<% end -%>
+<% end -%>
+
+<% unless (s=scope.lookupvar('::sshd::tail_additional_options')).empty? -%>
+<%= s %>
+<% end -%>
+
diff --git a/puppet/modules/sshd/templates/sshd_config/OpenBSD.erb b/puppet/modules/sshd/templates/sshd_config/OpenBSD.erb
new file mode 100644
index 00000000..db730300
--- /dev/null
+++ b/puppet/modules/sshd/templates/sshd_config/OpenBSD.erb
@@ -0,0 +1,144 @@
+# $OpenBSD: sshd_config,v 1.74 2006/07/19 13:07:10 dtucker Exp $
+
+# This is the sshd server system-wide configuration file. See
+# sshd_config(5) for more information.
+
+# The strategy used for options in the default sshd_config shipped with
+# OpenSSH is to specify options with their default value where
+# possible, but leave them commented. Uncommented options change a
+# default value.
+
+<% unless (s=scope.lookupvar('::sshd::head_additional_options')).empty? -%>
+<%= s %>
+<% end -%>
+
+<% scope.lookupvar('::sshd::ports').to_a.each do |port| -%>
+<% if port == 'off' -%>
+#Port -- disabled by puppet
+<% else -%>
+Port <%= port %>
+<% end -%>
+<% end -%>
+
+# Use these options to restrict which interfaces/protocols sshd will bind to
+<% scope.lookupvar('::sshd::listen_address').to_a.each do |address| -%>
+ListenAddress <%= address %>
+<% end -%>
+#Protocol 2,1
+#AddressFamily any
+
+# HostKey for protocol version 1
+#HostKey /etc/ssh/ssh_host_key
+# HostKeys for protocol version 2
+#HostKey /etc/ssh/ssh_host_rsa_key
+#HostKey /etc/ssh/ssh_host_dsa_key
+
+# Lifetime and size of ephemeral version 1 server key
+#KeyRegenerationInterval 1h
+#ServerKeyBits 768
+
+# Logging
+# obsoletes QuietMode and FascistLogging
+#SyslogFacility AUTH
+#LogLevel INFO
+
+# Authentication:
+
+#LoginGraceTime 2m
+PermitRootLogin <%= scope.lookupvar('::sshd::permit_root_login') %>
+
+StrictModes <%= scope.lookupvar('::sshd::strict_modes') %>
+
+#MaxAuthTries 6
+
+RSAAuthentication <%= scope.lookupvar('::sshd::rsa_authentication') %>
+
+PubkeyAuthentication <%= scope.lookupvar('::sshd::pubkey_authentication') %>
+
+AuthorizedKeysFile <%= scope.lookupvar('::sshd::authorized_keys_file') %>
+
+# For this to work you will also need host keys in /etc/ssh/ssh_known_hosts
+RhostsRSAAuthentication <%= scope.lookupvar('::sshd::rhosts_rsa_authentication') %>
+
+# similar for protocol version 2
+HostbasedAuthentication <%= scope.lookupvar('::sshd::hostbased_authentication') %>
+
+# Change to yes if you don't trust ~/.ssh/known_hosts for
+# RhostsRSAAuthentication and HostbasedAuthentication
+#IgnoreUserKnownHosts no
+
+# Don't read the user's ~/.rhosts and ~/.shosts files
+IgnoreRhosts <%= scope.lookupvar('::sshd::ignore_rhosts') %>
+
+# To disable tunneled clear text passwords, change to no here!
+PasswordAuthentication <%= scope.lookupvar('::sshd::password_authentication') %>
+
+# To enable empty passwords, change to yes (NOT RECOMMENDED)
+PermitEmptyPasswords <%= scope.lookupvar('::sshd::permit_empty_passwords') %>
+
+# Change to no to disable s/key passwords
+ChallengeResponseAuthentication <%= scope.lookupvar('::sshd::challenge_response_authentication') %>
+
+# Kerberos options
+#KerberosAuthentication no
+#KerberosOrLocalPasswd yes
+#KerberosTicketCleanup yes
+#KerberosGetAFSToken no
+
+# GSSAPI options
+#GSSAPIAuthentication no
+#GSSAPICleanupCredentials yes
+
+AllowTcpForwarding <%= scope.lookupvar('::sshd::tcp_forwarding') %>
+
+#GatewayPorts no
+X11Forwarding <%= scope.lookupvar('::sshd::x11_forwarding') %>
+#X11DisplayOffset 10
+#X11UseLocalhost yes
+PrintMotd <%= scope.lookupvar('::sshd::print_motd') %>
+#PrintLastLog yes
+#TCPKeepAlive yes
+#UseLogin no
+#UsePrivilegeSeparation yes
+#PermitUserEnvironment no
+#Compression delayed
+#ClientAliveInterval 0
+#ClientAliveCountMax 3
+#UseDNS yes
+#PidFile /var/run/sshd.pid
+#MaxStartups 10
+#PermitTunnel no
+
+# no default banner path
+#Banner /some/path
+
+# override default of no subsystems
+Subsystem sftp <%= (s=scope.lookupvar('::sshd::sftp_subsystem')).empty? ? '/usr/libexec/sftp-server' : s %>
+
+<% unless (s=scope.lookupvar('::sshd::allowed_users')).empty? -%>
+AllowUsers <%= s %>
+<% end -%>
+<% unless (s=scope.lookupvar('::sshd::allowed_groups')).empty? -%>
+AllowGroups <%= s %>
+<%- end -%>
+
+# Example of overriding settings on a per-user basis
+#Match User anoncvs
+# X11Forwarding no
+# AllowTcpForwarding no
+# ForceCommand cvs server
+
+<% if scope.lookupvar('::sshd::hardened') == 'yes' -%>
+<% if (scope.function_versioncmp([scope.lookupvar('::ssh_version'),'6.5'])) >= 0 -%>
+KexAlgorithms curve25519-sha256@libssh.org
+Ciphers chacha20-poly1305@openssh.com,aes256-gcm@openssh.com,aes256-ctr
+MACs hmac-sha2-512-etm@openssh.com,hmac-sha2-256-etm@openssh.com,hmac-ripemd160-etm@openssh.com,umac-128-etm@openssh.com,hmac-sha2-512,hmac-sha2-256,hmac-ripemd160,umac-128@openssh.com
+<% else -%>
+Ciphers aes256-ctr
+MACs hmac-sha1
+<% end -%>
+<% end -%>
+
+<% unless (s=scope.lookupvar('::sshd::tail_additional_options')).empty? -%>
+<%= s %>
+<% end -%>
diff --git a/puppet/modules/sshd/templates/sshd_config/Ubuntu.erb b/puppet/modules/sshd/templates/sshd_config/Ubuntu.erb
new file mode 100644
index 00000000..a326ab87
--- /dev/null
+++ b/puppet/modules/sshd/templates/sshd_config/Ubuntu.erb
@@ -0,0 +1,133 @@
+# This file is managed by Puppet, all local modifications will be overwritten
+#
+# Package generated configuration file
+# See the sshd(8) manpage for details
+
+<% unless (s=scope.lookupvar('::sshd::head_additional_options')).empty? -%>
+<%= s %>
+<% end -%>
+
+# What ports, IPs and protocols we listen for
+<% scope.lookupvar('::sshd::ports').to_a.each do |port| -%>
+<% if port == 'off' -%>
+#Port -- disabled by puppet
+<% else -%>
+Port <%= port %>
+<% end -%>
+<% end -%>
+
+# Use these options to restrict which interfaces/protocols sshd will bind to
+<% scope.lookupvar('::sshd::listen_address').to_a.each do |address| -%>
+ListenAddress <%= address %>
+<% end -%>
+Protocol 2
+# HostKeys for protocol version 2
+<% scope.lookupvar('::sshd::hostkey_type').to_a.each do |hostkey_type| -%>
+HostKey /etc/ssh/ssh_host_<%=hostkey_type %>_key
+<% end -%>
+
+#Privilege Separation is turned on for security
+UsePrivilegeSeparation yes
+
+# Lifetime and size of ephemeral version 1 server key
+KeyRegenerationInterval 3600
+ServerKeyBits 768
+
+# Logging
+SyslogFacility AUTH
+LogLevel INFO
+
+# Authentication:
+LoginGraceTime 120
+PermitRootLogin <%= scope.lookupvar('::sshd::permit_root_login') %>
+
+StrictModes <%= scope.lookupvar('::sshd::strict_modes') %>
+
+RSAAuthentication <%= scope.lookupvar('::sshd::rsa_authentication') %>
+
+PubkeyAuthentication <%= scope.lookupvar('::sshd::pubkey_authentication') %>
+
+AuthorizedKeysFile <%= scope.lookupvar('::sshd::authorized_keys_file') %>
+
+# Don't read the user's ~/.rhosts and ~/.shosts files
+IgnoreRhosts <%= scope.lookupvar('::sshd::ignore_rhosts') %>
+# For this to work you will also need host keys in /etc/ssh_known_hosts
+RhostsRSAAuthentication <%= scope.lookupvar('::sshd::rhosts_rsa_authentication') %>
+# similar for protocol version 2
+HostbasedAuthentication <%= scope.lookupvar('::sshd::hostbased_authentication') %>
+# Uncomment if you don't trust ~/.ssh/known_hosts for RhostsRSAAuthentication
+#IgnoreUserKnownHosts yes
+
+# To enable empty passwords, change to yes (NOT RECOMMENDED)
+PermitEmptyPasswords <%= scope.lookupvar('::sshd::permit_empty_passwords') %>
+
+# Change to yes to enable challenge-response passwords (beware issues with
+# some PAM modules and threads)
+ChallengeResponseAuthentication <%= scope.lookupvar('::sshd::challenge_response_authentication') %>
+
+# To disable tunneled clear text passwords, change to no here!
+PasswordAuthentication <%= scope.lookupvar('::sshd::password_authentication') %>
+
+# Kerberos options
+KerberosAuthentication <%= scope.lookupvar('::sshd::kerberos_authentication') %>
+KerberosOrLocalPasswd <%= scope.lookupvar('::sshd::kerberos_orlocalpasswd') %>
+KerberosTicketCleanup <%= scope.lookupvar('::sshd::kerberos_ticketcleanup') %>
+
+# GSSAPI options
+GSSAPIAuthentication <%= scope.lookupvar('::sshd::gssapi_authentication') %>
+GSSAPICleanupCredentials <%= scope.lookupvar('::sshd::gssapi_cleanupcredentials') %>
+
+X11Forwarding <%= scope.lookupvar('::sshd::x11_forwarding') %>
+X11DisplayOffset 10
+PrintMotd <%= scope.lookupvar('::sshd::print_motd') %>
+PrintLastLog yes
+TCPKeepAlive yes
+
+#UseLogin no
+
+#MaxStartups 10:30:60
+#Banner /etc/issue.net
+# do not reveal debian version (default is yes)
+DebianBanner no
+
+# Allow client to pass locale environment variables
+AcceptEnv LANG LC_*
+
+Subsystem sftp <%= (s=scope.lookupvar('::sshd::sftp_subsystem')).empty? ? '/usr/lib/openssh/sftp-server' : s %>
+
+# Set this to 'yes' to enable PAM authentication, account processing,
+# and session processing. If this is enabled, PAM authentication will
+# be allowed through the ChallengeResponseAuthentication and
+# PasswordAuthentication. Depending on your PAM configuration,
+# PAM authentication via ChallengeResponseAuthentication may bypass
+# the setting of "PermitRootLogin without-password".
+# If you just want the PAM account and session checks to run without
+# PAM authentication, then enable this but set PasswordAuthentication
+# and ChallengeResponseAuthentication to 'no'.
+UsePAM <%= scope.lookupvar('::sshd::use_pam') %>
+
+AllowTcpForwarding <%= scope.lookupvar('::sshd::tcp_forwarding') %>
+
+AllowAgentForwarding <%= scope.lookupvar('::sshd::agent_forwarding') %>
+
+<% unless (s=scope.lookupvar('::sshd::allowed_users')).empty? -%>
+AllowUsers <%= s %>
+<% end -%>
+<% unless (s=scope.lookupvar('::sshd::allowed_groups')).empty? -%>
+AllowGroups <%= s %>
+<%- end -%>
+
+<% if scope.lookupvar('::sshd::hardened') == 'yes' -%>
+<% if (scope.function_versioncmp([scope.lookupvar('::ssh_version'),'6.5'])) >= 0 -%>
+KexAlgorithms curve25519-sha256@libssh.org
+Ciphers chacha20-poly1305@openssh.com,aes256-gcm@openssh.com,aes256-ctr
+MACs hmac-sha2-512-etm@openssh.com,hmac-sha2-256-etm@openssh.com,hmac-ripemd160-etm@openssh.com,umac-128-etm@openssh.com,hmac-sha2-512,hmac-sha2-256,hmac-ripemd160,umac-128@openssh.com
+<% else -%>
+Ciphers aes256-ctr
+MACs hmac-sha1
+<% end -%>
+<% end -%>
+
+<% unless (s=scope.lookupvar('::sshd::tail_additional_options')).empty? -%>
+<%= s %>
+<% end -%>
diff --git a/puppet/modules/sshd/templates/sshd_config/Ubuntu_lucid.erb b/puppet/modules/sshd/templates/sshd_config/Ubuntu_lucid.erb
new file mode 100644
index 00000000..be7c56d0
--- /dev/null
+++ b/puppet/modules/sshd/templates/sshd_config/Ubuntu_lucid.erb
@@ -0,0 +1,136 @@
+# Package generated configuration file
+# See the sshd(8) manpage for details
+
+<% unless (s=scope.lookupvar('::sshd::head_additional_options')).empty? -%>
+<%= s %>
+<% end -%>
+
+# What ports, IPs and protocols we listen for
+<% scope.lookupvar('::sshd::ports').to_a.each do |port| -%>
+<% if port == 'off' -%>
+#Port -- disabled by puppet
+<% else -%>
+Port <%= port %>
+<% end -%>
+<% end -%>
+
+# Use these options to restrict which interfaces/protocols sshd will bind to
+<% scope.lookupvar('::sshd::listen_address').to_a.each do |address| -%>
+ListenAddress <%= address %>
+<% end -%>
+Protocol 2
+# HostKeys for protocol version 2
+<% scope.lookupvar('::sshd::hostkey_type').to_a.each do |hostkey_type| -%>
+HostKey /etc/ssh/ssh_host_<%=hostkey_type %>_key
+<% end -%>
+
+#Privilege Separation is turned on for security
+UsePrivilegeSeparation yes
+
+# ...but breaks Pam auth via kbdint, so we have to turn it off
+# Use PAM authentication via keyboard-interactive so PAM modules can
+# properly interface with the user (off due to PrivSep)
+#PAMAuthenticationViaKbdInt no
+# Lifetime and size of ephemeral version 1 server key
+KeyRegenerationInterval 3600
+ServerKeyBits 768
+
+# Logging
+SyslogFacility AUTH
+LogLevel INFO
+
+# Authentication:
+LoginGraceTime 600
+PermitRootLogin <%= scope.lookupvar('::sshd::permit_root_login') %>
+
+StrictModes <%= scope.lookupvar('::sshd::strict_modes') %>
+
+RSAAuthentication <%= scope.lookupvar('::sshd::rsa_authentication') %>
+
+PubkeyAuthentication <%= scope.lookupvar('::sshd::pubkey_authentication') %>
+
+AuthorizedKeysFile <%= scope.lookupvar('::sshd::authorized_keys_file') %>
+
+# For this to work you will also need host keys in /etc/ssh_known_hosts
+RhostsRSAAuthentication <%= scope.lookupvar('::sshd::rhosts_rsa_authentication') %>
+
+# Don't read the user's ~/.rhosts and ~/.shosts files
+IgnoreRhosts <%= scope.lookupvar('::sshd::ignore_rhosts') %>
+
+# similar for protocol version 2
+HostbasedAuthentication <%= scope.lookupvar('::sshd::hostbased_authentication') %>
+
+# Uncomment if you don't trust ~/.ssh/known_hosts for RhostsRSAAuthentication
+#IgnoreUserKnownHosts yes
+
+# To enable empty passwords, change to yes (NOT RECOMMENDED)
+PermitEmptyPasswords <%= scope.lookupvar('::sshd::permit_empty_passwords') %>
+
+# Change to no to disable s/key passwords
+ChallengeResponseAuthentication <%= scope.lookupvar('::sshd::challenge_response_authentication') %>
+
+# To disable tunneled clear text passwords, change to no here!
+PasswordAuthentication <%= scope.lookupvar('::sshd::password_authentication') %>
+
+# To change Kerberos options
+#KerberosAuthentication no
+#KerberosOrLocalPasswd yes
+#AFSTokenPassing no
+#KerberosTicketCleanup no
+
+# Kerberos TGT Passing does only work with the AFS kaserver
+#KerberosTgtPassing yes
+
+X11Forwarding <%= scope.lookupvar('::sshd::x11_forwarding') %>
+X11DisplayOffset 10
+KeepAlive yes
+#UseLogin no
+
+#MaxStartups 10:30:60
+#Banner /etc/issue.net
+# do not reveal debian version (default is yes)
+DebianBanner no
+#ReverseMappingCheck yes
+
+Subsystem sftp <%= (s=scope.lookupvar('::sshd::sftp_subsystem')).empty? ? '/usr/lib/openssh/sftp-server' : s %>
+
+# Set this to 'yes' to enable PAM authentication, account processing,
+# and session processing. If this is enabled, PAM authentication will
+# be allowed through the ChallengeResponseAuthentication and
+# PasswordAuthentication. Depending on your PAM configuration,
+# PAM authentication via ChallengeResponseAuthentication may bypass
+# the setting of "PermitRootLogin without-password".
+# If you just want the PAM account and session checks to run without
+# PAM authentication, then enable this but set PasswordAuthentication
+# and ChallengeResponseAuthentication to 'no'.
+UsePAM <%= scope.lookupvar('::sshd::use_pam') %>
+
+HostbasedUsesNameFromPacketOnly yes
+
+AllowTcpForwarding <%= scope.lookupvar('::sshd::tcp_forwarding') %>
+
+AllowAgentForwarding <%= scope.lookupvar('::sshd::agent_forwarding') %>
+
+<% unless (s=scope.lookupvar('::sshd::allowed_users')).empty? -%>
+AllowUsers <%= s %>
+<% end -%>
+<% unless (s=scope.lookupvar('::sshd::allowed_groups')).empty? -%>
+AllowGroups <%= s %>
+<%- end -%>
+
+PrintMotd <%= scope.lookupvar('::sshd::print_motd') %>
+
+<% if scope.lookupvar('::sshd::hardened') == 'yes' -%>
+<% if (scope.function_versioncmp([scope.lookupvar('::ssh_version'),'6.5'])) >= 0 -%>
+KexAlgorithms curve25519-sha256@libssh.org
+Ciphers chacha20-poly1305@openssh.com,aes256-gcm@openssh.com,aes256-ctr
+MACs hmac-sha2-512-etm@openssh.com,hmac-sha2-256-etm@openssh.com,hmac-ripemd160-etm@openssh.com,umac-128-etm@openssh.com,hmac-sha2-512,hmac-sha2-256,hmac-ripemd160,umac-128@openssh.com
+<% else -%>
+Ciphers aes256-ctr
+MACs hmac-sha1
+<% end -%>
+<% end -%>
+
+<% unless (s=scope.lookupvar('::sshd::tail_additional_options')).empty? -%>
+<%= s %>
+<% end -%>
diff --git a/puppet/modules/sshd/templates/sshd_config/Ubuntu_oneiric.erb b/puppet/modules/sshd/templates/sshd_config/Ubuntu_oneiric.erb
new file mode 120000
index 00000000..ccfb67c8
--- /dev/null
+++ b/puppet/modules/sshd/templates/sshd_config/Ubuntu_oneiric.erb
@@ -0,0 +1 @@
+Ubuntu_lucid.erb \ No newline at end of file
diff --git a/puppet/modules/sshd/templates/sshd_config/Ubuntu_precise.erb b/puppet/modules/sshd/templates/sshd_config/Ubuntu_precise.erb
new file mode 120000
index 00000000..6502bfce
--- /dev/null
+++ b/puppet/modules/sshd/templates/sshd_config/Ubuntu_precise.erb
@@ -0,0 +1 @@
+Ubuntu.erb \ No newline at end of file
diff --git a/puppet/modules/sshd/templates/sshd_config/XenServer_xenenterprise.erb b/puppet/modules/sshd/templates/sshd_config/XenServer_xenenterprise.erb
new file mode 120000
index 00000000..71b767a5
--- /dev/null
+++ b/puppet/modules/sshd/templates/sshd_config/XenServer_xenenterprise.erb
@@ -0,0 +1 @@
+CentOS_6.erb \ No newline at end of file
diff --git a/puppet/modules/templatewlv/Modulefile b/puppet/modules/templatewlv/Modulefile
new file mode 100644
index 00000000..8007a070
--- /dev/null
+++ b/puppet/modules/templatewlv/Modulefile
@@ -0,0 +1,11 @@
+name 'duritong-templatewlv'
+version '0.0.1'
+source 'https://github.com/duritong/puppet-templatewlv.git'
+author 'duritong'
+license 'Apache License, Version 2.0'
+summary 'Template With Local Variables'
+description 'Pass local variables to templates'
+project_page 'https://github.com/duritong/puppet-templatewlv'
+
+## Add dependencies, if any:
+# dependency 'username/name', '>= 1.2.0'
diff --git a/puppet/modules/templatewlv/README.md b/puppet/modules/templatewlv/README.md
new file mode 100644
index 00000000..5ab01e45
--- /dev/null
+++ b/puppet/modules/templatewlv/README.md
@@ -0,0 +1,21 @@
+# templatewlv
+
+## Template With Local Variables
+
+A wrapper around puppet's template function. See
+[the templating docs](http://docs.puppetlabs.com/guides/templating.html) for
+the basic functionality.
+
+Additionally, you can pass a hash, as the last argument, which will be turned into
+local variables and available to the template itself. This will allow you to define
+variables in a template and pass them down to a template you include in the current
+template. An example:
+
+ scope.function_templatewlv(['sub_template', { 'local_var' => 'value' }])
+
+Note that if multiple templates are specified, their output is all
+concatenated and returned as the output of the function.
+
+# Who - License
+
+duritong - Apache License, Version 2.0
diff --git a/puppet/modules/templatewlv/lib/puppet/parser/functions/templatewlv.rb b/puppet/modules/templatewlv/lib/puppet/parser/functions/templatewlv.rb
new file mode 100644
index 00000000..c9579e2c
--- /dev/null
+++ b/puppet/modules/templatewlv/lib/puppet/parser/functions/templatewlv.rb
@@ -0,0 +1,41 @@
+require File.join(File.dirname(__FILE__),'../templatewrapperwlv')
+Puppet::Parser::Functions::newfunction(:templatewlv, :type => :rvalue, :arity => -2, :doc =>
+ "A wrapper around puppet's template function. See
+ [the templating docs](http://docs.puppetlabs.com/guides/templating.html) for
+ the basic functionality.
+
+ Additionally, you can pass a hash, as the last argument, which will be turned into
+ local variables and available to the template itself. This will allow you to define
+ variables in a template and pass them down to a template you include in the current
+ template. An example:
+
+ scope.function_templatewlv(['sub_template', { 'local_var' => 'value' }])
+
+ Note that if multiple templates are specified, their output is all
+ concatenated and returned as the output of the function.") do |vals|
+
+ if vals.last.is_a?(Hash)
+ local_vars = vals.last
+ local_vals = vals[0..-2]
+ else
+ local_vars = {}
+ local_vals = vals
+ end
+
+ result = nil
+ local_vals.collect do |file|
+ # Use a wrapper, so the template can't get access to the full
+ # Scope object.
+ debug "Retrieving template #{file}"
+
+ wrapper = Puppet::Parser::TemplateWrapperWlv.new(self,local_vars)
+ wrapper.file = file
+ begin
+ wrapper.result
+ rescue => detail
+ info = detail.backtrace.first.split(':')
+ raise Puppet::ParseError,
+ "Failed to parse template #{file}:\n Filepath: #{info[0]}\n Line: #{info[1]}\n Detail: #{detail}\n"
+ end
+ end.join("")
+end
diff --git a/puppet/modules/templatewlv/lib/puppet/parser/templatewrapperwlv.rb b/puppet/modules/templatewlv/lib/puppet/parser/templatewrapperwlv.rb
new file mode 100644
index 00000000..f1753e18
--- /dev/null
+++ b/puppet/modules/templatewlv/lib/puppet/parser/templatewrapperwlv.rb
@@ -0,0 +1,39 @@
+# A wrapper for templates, that allows you to additionally define
+# local variables
+class Puppet::Parser::TemplateWrapperWlv < Puppet::Parser::TemplateWrapper
+ attr_reader :local_vars
+ def initialize(scope, local_vars)
+ super(scope)
+ @local_vars = local_vars
+ end
+
+ # Should return true if a variable is defined, false if it is not
+ def has_variable?(name)
+ super(name) || local_vars.keys.include?(name.to_s)
+ end
+
+ def method_missing(name, *args)
+ if local_vars.keys.include?(n=name.to_s)
+ local_vars[n]
+ else
+ super(name, *args)
+ end
+ end
+
+ def result(string = nil)
+ # Expose all the variables in our scope as instance variables of the
+ # current object, making it possible to access them without conflict
+ # to the regular methods.
+ benchmark(:debug, "Bound local template variables for #{@__file__}") do
+ local_vars.each do |name, value|
+ if name.kind_of?(String)
+ realname = name.gsub(/[^\w]/, "_")
+ else
+ realname = name
+ end
+ instance_variable_set("@#{realname}", value)
+ end
+ end
+ super(string)
+ end
+end
diff --git a/puppet/modules/try/README.md b/puppet/modules/try/README.md
new file mode 100644
index 00000000..3888661e
--- /dev/null
+++ b/puppet/modules/try/README.md
@@ -0,0 +1,13 @@
+This module provides a "try" wrapper around common resource types.
+
+For example:
+
+ try::file {
+ '/path/to/file':
+ ensure => 'link',
+ target => $target;
+ }
+
+This will work just like `file`, but will silently fail if `$target` is undefined or the file does not exist.
+
+So far, only `file` type with symlinks works.
diff --git a/puppet/modules/try/manifests/file.pp b/puppet/modules/try/manifests/file.pp
new file mode 100644
index 00000000..2493d343
--- /dev/null
+++ b/puppet/modules/try/manifests/file.pp
@@ -0,0 +1,114 @@
+#
+# Works like the built-in type "file", but gets gracefully ignored if the target/source does not exist or is undefined.
+#
+# Also, if the source or target doesn't exist, and the destination is a git repo, then the file is restored from git.
+#
+# All executable paths are hardcoded to their paths in debian.
+#
+# known limitations:
+# * this is far too noisy
+# * $restore does not work for directories
+# * only file:// $source is supported
+# * $content is not supported, only $target or $source.
+# * does not auto-require all the parent directories like 'file' does
+#
+define try::file (
+ $ensure = undef,
+ $target = undef,
+ $source = undef,
+ $owner = undef,
+ $group = undef,
+ $recurse = undef,
+ $purge = undef,
+ $force = undef,
+ $mode = undef,
+ $restore = true) {
+
+ # dummy exec to propagate requires:
+ # metaparameter 'require' will get triggered by this dummy exec
+ # so then we just need to depend on this to capture all requires.
+ # exec { $name: command => "/bin/true" }
+
+ exec {
+ "chmod_${name}":
+ command => "/bin/chmod -R ${mode} '${name}'",
+ onlyif => "/usr/bin/test ${mode}",
+ refreshonly => true,
+ loglevel => debug;
+ "chown_${name}":
+ command => "/bin/chown -R ${owner} '${name}'",
+ onlyif => "/usr/bin/test ${owner}",
+ refreshonly => true,
+ loglevel => debug;
+ "chgrp_${name}":
+ command => "/bin/chgrp -R ${group} '${name}'",
+ onlyif => "/usr/bin/test ${group}",
+ refreshonly => true,
+ loglevel => debug;
+ }
+
+ if $target {
+ exec { "symlink_${name}":
+ command => "/bin/ln -s ${target} ${name}",
+ onlyif => "/usr/bin/test -d '${target}'",
+ }
+ } elsif $source {
+ if $ensure == 'directory' {
+ if $purge {
+ exec { "rsync_${name}":
+ command => "/usr/bin/rsync -r --delete '${source}/' '${name}'",
+ onlyif => "/usr/bin/test -d '${source}'",
+ unless => "/usr/bin/diff -rq '${source}' '${name}'",
+ notify => [Exec["chmod_${name}"], Exec["chown_${name}"], Exec["chgrp_${name}"]]
+ }
+ } else {
+ exec { "cp_r_${name}":
+ command => "/bin/cp -r '${source}' '${name}'",
+ onlyif => "/usr/bin/test -d '${source}'",
+ unless => "/usr/bin/diff -rq '${source}' '${name}'",
+ notify => [Exec["chmod_${name}"], Exec["chown_${name}"], Exec["chgrp_${name}"]]
+ }
+ }
+ } else {
+ exec { "cp_${name}":
+ command => "/bin/cp --remove-destination '${source}' '${name}'",
+ onlyif => "/usr/bin/test -e '${source}'",
+ unless => "/usr/bin/test ! -h '${name}' && /usr/bin/diff -q '${source}' '${name}'",
+ notify => [Exec["chmod_${name}"], Exec["chown_${name}"], Exec["chgrp_${name}"]]
+ }
+ }
+ }
+
+ #
+ # if the target/source does not exist (or is undef), and the file happens to be in a git repo,
+ # then restore the file to its original state.
+ #
+
+ if $target {
+ $target_or_source = $target
+ } else {
+ $target_or_source = $source
+ }
+
+ if ($target_or_source == undef) or $restore {
+ $file_basename = basename($name)
+ $file_dirname = dirname($name)
+ $command = "git rev-parse && unlink '${name}'; git checkout -- '${file_basename}' && chown --reference='${file_dirname}' '${name}'; true"
+ debug($command)
+
+ if $target_or_source == undef {
+ exec { "restore_${name}":
+ command => $command,
+ cwd => $file_dirname,
+ loglevel => info;
+ }
+ } else {
+ exec { "restore_${name}":
+ unless => "/usr/bin/test -e '${target_or_source}'",
+ command => $command,
+ cwd => $file_dirname,
+ loglevel => info;
+ }
+ }
+ }
+}
diff --git a/puppet/modules/try/manifests/init.pp b/puppet/modules/try/manifests/init.pp
new file mode 100644
index 00000000..1d2108c9
--- /dev/null
+++ b/puppet/modules/try/manifests/init.pp
@@ -0,0 +1,3 @@
+class try {
+
+}