summaryrefslogtreecommitdiff
path: root/lib
diff options
context:
space:
mode:
Diffstat (limited to 'lib')
-rw-r--r--lib/leap_cli/commands/README11
-rw-r--r--lib/leap_cli/commands/ca.rb541
-rw-r--r--lib/leap_cli/commands/clean.rb16
-rw-r--r--lib/leap_cli/commands/compile.rb531
-rw-r--r--lib/leap_cli/commands/db.rb86
-rw-r--r--lib/leap_cli/commands/deploy.rb374
-rw-r--r--lib/leap_cli/commands/env.rb76
-rw-r--r--lib/leap_cli/commands/facts.rb100
-rw-r--r--lib/leap_cli/commands/info.rb15
-rw-r--r--lib/leap_cli/commands/inspect.rb144
-rw-r--r--lib/leap_cli/commands/list.rb132
-rw-r--r--lib/leap_cli/commands/node.rb188
-rw-r--r--lib/leap_cli/commands/node_init.rb169
-rw-r--r--lib/leap_cli/commands/ssh.rb225
-rw-r--r--lib/leap_cli/commands/test.rb74
-rw-r--r--lib/leap_cli/commands/user.rb136
-rw-r--r--lib/leap_cli/commands/util.rb50
-rw-r--r--lib/leap_cli/commands/vagrant.rb180
-rw-r--r--lib/leap_cli/macros.rb16
-rw-r--r--lib/leap_cli/macros/core.rb92
-rw-r--r--lib/leap_cli/macros/files.rb124
-rw-r--r--lib/leap_cli/macros/haproxy.rb73
-rw-r--r--lib/leap_cli/macros/hosts.rb90
-rw-r--r--lib/leap_cli/macros/keys.rb97
-rw-r--r--lib/leap_cli/macros/nodes.rb88
-rw-r--r--lib/leap_cli/macros/provider.rb90
-rw-r--r--lib/leap_cli/macros/secrets.rb39
-rw-r--r--lib/leap_cli/macros/stunnel.rb106
-rw-r--r--lib/puppet/provider/vcsrepo.rb42
-rw-r--r--lib/puppet/provider/vcsrepo/bzr.rb93
-rw-r--r--lib/puppet/provider/vcsrepo/cvs.rb135
-rw-r--r--lib/puppet/provider/vcsrepo/dummy.rb12
-rw-r--r--lib/puppet/provider/vcsrepo/git.rb483
-rw-r--r--lib/puppet/provider/vcsrepo/hg.rb130
-rw-r--r--lib/puppet/provider/vcsrepo/p4.rb278
-rw-r--r--lib/puppet/provider/vcsrepo/svn.rb139
-rw-r--r--lib/puppet/type/vcsrepo.rb248
37 files changed, 3863 insertions, 1560 deletions
diff --git a/lib/leap_cli/commands/README b/lib/leap_cli/commands/README
new file mode 100644
index 00000000..bec78179
--- /dev/null
+++ b/lib/leap_cli/commands/README
@@ -0,0 +1,11 @@
+This directory contains ruby source files that define the available sub-
+commands of the `leap` executable.
+
+For example, the command:
+
+ leap compile
+
+Lives in lib/leap_cli/commands/init.rb
+
+These files use a DSL (called GLI) for defining command suites.
+See https://github.com/davetron5000/gli for more information.
diff --git a/lib/leap_cli/commands/ca.rb b/lib/leap_cli/commands/ca.rb
new file mode 100644
index 00000000..1b311eee
--- /dev/null
+++ b/lib/leap_cli/commands/ca.rb
@@ -0,0 +1,541 @@
+autoload :OpenSSL, 'openssl'
+autoload :CertificateAuthority, 'certificate_authority'
+autoload :Date, 'date'
+require 'digest/md5'
+
+module LeapCli; module Commands
+
+ desc "Manage X.509 certificates"
+ command :cert do |cert|
+
+ cert.desc 'Creates two Certificate Authorities (one for validating servers and one for validating clients).'
+ cert.long_desc 'See see what values are used in the generation of the certificates (like name and key size), run `leap inspect provider` and look for the "ca" property. To see the details of the created certs, run `leap inspect <file>`.'
+ cert.command :ca do |ca|
+ ca.action do |global_options,options,args|
+ assert_config! 'provider.ca.name'
+ generate_new_certificate_authority(:ca_key, :ca_cert, provider.ca.name)
+ generate_new_certificate_authority(:client_ca_key, :client_ca_cert, provider.ca.name + ' (client certificates only!)')
+ end
+ end
+
+ cert.desc 'Creates or renews a X.509 certificate/key pair for a single node or all nodes, but only if needed.'
+ cert.long_desc 'This command will a generate new certificate for a node if some value in the node has changed ' +
+ 'that is included in the certificate (like hostname or IP address), or if the old certificate will be expiring soon. ' +
+ 'Sometimes, you might want to force the generation of a new certificate, ' +
+ 'such as in the cases where you have changed a CA parameter for server certificates, like bit size or digest hash. ' +
+ 'In this case, use --force. If <node-filter> is empty, this command will apply to all nodes.'
+ cert.arg_name 'FILTER'
+ cert.command :update do |update|
+ update.switch 'force', :desc => 'Always generate new certificates', :negatable => false
+ update.action do |global_options,options,args|
+ update_certificates(manager.filter!(args), options)
+ end
+ end
+
+ cert.desc 'Creates a Diffie-Hellman parameter file, needed for forward secret OpenVPN ciphers.' # (needed for server-side of some TLS connections)
+ cert.command :dh do |dh|
+ dh.action do |global_options,options,args|
+ long_running do
+ if cmd_exists?('certtool')
+ log 0, 'Generating DH parameters (takes a long time)...'
+ output = assert_run!('certtool --generate-dh-params --sec-param high')
+ output.sub! /.*(-----BEGIN DH PARAMETERS-----.*-----END DH PARAMETERS-----).*/m, '\1'
+ output << "\n"
+ write_file!(:dh_params, output)
+ else
+ log 0, 'Generating DH parameters (takes a REALLY long time)...'
+ output = OpenSSL::PKey::DH.generate(3248).to_pem
+ write_file!(:dh_params, output)
+ end
+ end
+ end
+ end
+
+ #
+ # hints:
+ #
+ # inspect CSR:
+ # openssl req -noout -text -in files/cert/x.csr
+ #
+ # generate CSR with openssl to see how it compares:
+ # openssl req -sha256 -nodes -newkey rsa:2048 -keyout example.key -out example.csr
+ #
+ # validate a CSR:
+ # http://certlogik.com/decoder/
+ #
+ # nice details about CSRs:
+ # http://www.redkestrel.co.uk/Articles/CSR.html
+ #
+ cert.desc "Creates a CSR for use in buying a commercial X.509 certificate."
+ cert.long_desc "Unless specified, the CSR is created for the provider's primary domain. "+
+ "The properties used for this CSR come from `provider.ca.server_certificates`, "+
+ "but may be overridden here."
+ cert.command :csr do |csr|
+ csr.flag 'domain', :arg_name => 'DOMAIN', :desc => 'Specify what domain to create the CSR for.'
+ csr.flag ['organization', 'O'], :arg_name => 'ORGANIZATION', :desc => "Override default O in distinguished name."
+ csr.flag ['unit', 'OU'], :arg_name => 'UNIT', :desc => "Set OU in distinguished name."
+ csr.flag 'email', :arg_name => 'EMAIL', :desc => "Set emailAddress in distinguished name."
+ csr.flag ['locality', 'L'], :arg_name => 'LOCALITY', :desc => "Set L in distinguished name."
+ csr.flag ['state', 'ST'], :arg_name => 'STATE', :desc => "Set ST in distinguished name."
+ csr.flag ['country', 'C'], :arg_name => 'COUNTRY', :desc => "Set C in distinguished name."
+ csr.flag :bits, :arg_name => 'BITS', :desc => "Override default certificate bit length"
+ csr.flag :digest, :arg_name => 'DIGEST', :desc => "Override default signature digest"
+ csr.action do |global_options,options,args|
+ assert_config! 'provider.domain'
+ assert_config! 'provider.name'
+ assert_config! 'provider.default_language'
+ assert_config! 'provider.ca.server_certificates.bit_size'
+ assert_config! 'provider.ca.server_certificates.digest'
+ domain = options[:domain] || provider.domain
+
+ unless global_options[:force]
+ assert_files_missing! [:commercial_key, domain], [:commercial_csr, domain],
+ :msg => 'If you really want to create a new key and CSR, remove these files first or run with --force.'
+ end
+
+ server_certificates = provider.ca.server_certificates
+
+ # RSA key
+ keypair = CertificateAuthority::MemoryKeyMaterial.new
+ bit_size = (options[:bits] || server_certificates.bit_size).to_i
+ log :generating, "%s bit RSA key" % bit_size do
+ keypair.generate_key(bit_size)
+ write_file! [:commercial_key, domain], keypair.private_key.to_pem
+ end
+
+ # CSR
+ dn = CertificateAuthority::DistinguishedName.new
+ dn.common_name = domain
+ dn.organization = options[:organization] || provider.name[provider.default_language]
+ dn.ou = options[:organizational_unit] # optional
+ dn.email_address = options[:email] # optional
+ dn.country = options[:country] || server_certificates['country'] # optional
+ dn.state = options[:state] || server_certificates['state'] # optional
+ dn.locality = options[:locality] || server_certificates['locality'] # optional
+
+ digest = options[:digest] || server_certificates.digest
+ log :generating, "CSR with #{digest} digest and #{print_dn(dn)}" do
+ csr = create_csr(dn, keypair, digest)
+ request = csr.to_x509_csr
+ write_file! [:commercial_csr, domain], csr.to_pem
+ end
+
+ # Sign using our own CA, for use in testing but hopefully not production.
+ # It is not that commerical CAs are so secure, it is just that signing your own certs is
+ # a total drag for the user because they must click through dire warnings.
+ #if options[:sign]
+ log :generating, "self-signed x509 server certificate for testing purposes" do
+ cert = csr.to_cert
+ cert.serial_number.number = cert_serial_number(domain)
+ cert.not_before = yesterday
+ cert.not_after = yesterday.advance(:years => 1)
+ cert.parent = ca_root
+ cert.sign! domain_test_signing_profile
+ write_file! [:commercial_cert, domain], cert.to_pem
+ log "please replace this file with the real certificate you get from a CA using #{Path.relative_path([:commercial_csr, domain])}"
+ end
+ #end
+
+ # FAKE CA
+ unless file_exists? :commercial_ca_cert
+ log :using, "generated CA in place of commercial CA for testing purposes" do
+ write_file! :commercial_ca_cert, read_file!(:ca_cert)
+ log "please also replace this file with the CA cert from the commercial authority you use."
+ end
+ end
+ end
+ end
+ end
+
+ protected
+
+ #
+ # will generate new certificates for the specified nodes, if needed.
+ #
+ def update_certificates(nodes, options={})
+ assert_files_exist! :ca_cert, :ca_key, :msg => 'Run `leap cert ca` to create them'
+ assert_config! 'provider.ca.server_certificates.bit_size'
+ assert_config! 'provider.ca.server_certificates.digest'
+ assert_config! 'provider.ca.server_certificates.life_span'
+ assert_config! 'common.x509.use'
+
+ nodes.each_node do |node|
+ warn_if_commercial_cert_will_soon_expire(node)
+ if !node.x509.use
+ remove_file!([:node_x509_key, node.name])
+ remove_file!([:node_x509_cert, node.name])
+ elsif options[:force] || cert_needs_updating?(node)
+ generate_cert_for_node(node)
+ end
+ end
+ end
+
+ private
+
+ def generate_new_certificate_authority(key_file, cert_file, common_name)
+ assert_files_missing! key_file, cert_file
+ assert_config! 'provider.ca.name'
+ assert_config! 'provider.ca.bit_size'
+ assert_config! 'provider.ca.life_span'
+
+ root = CertificateAuthority::Certificate.new
+
+ # set subject
+ root.subject.common_name = common_name
+ possible = ['country', 'state', 'locality', 'organization', 'organizational_unit', 'email_address']
+ provider.ca.keys.each do |key|
+ if possible.include?(key)
+ root.subject.send(key + '=', provider.ca[key])
+ end
+ end
+
+ # set expiration
+ root.not_before = yesterday
+ root.not_after = yesterday_advance(provider.ca.life_span)
+
+ # generate private key
+ root.serial_number.number = 1
+ root.key_material.generate_key(provider.ca.bit_size)
+
+ # sign self
+ root.signing_entity = true
+ root.parent = root
+ root.sign!(ca_root_signing_profile)
+
+ # save
+ write_file!(key_file, root.key_material.private_key.to_pem)
+ write_file!(cert_file, root.to_pem)
+ end
+
+ #
+ # returns true if the certs associated with +node+ need to be regenerated.
+ #
+ def cert_needs_updating?(node)
+ if !file_exists?([:node_x509_cert, node.name], [:node_x509_key, node.name])
+ return true
+ else
+ cert = load_certificate_file([:node_x509_cert, node.name])
+ if !created_by_authority?(cert, ca_root)
+ log :updating, "cert for node '#{node.name}' because it was signed by an old CA root cert."
+ return true
+ end
+ if cert.not_after < Time.now.advance(:months => 2)
+ log :updating, "cert for node '#{node.name}' because it will expire soon"
+ return true
+ end
+ if cert.subject.common_name != node.domain.full
+ log :updating, "cert for node '#{node.name}' because domain.full has changed (was #{cert.subject.common_name}, now #{node.domain.full})"
+ return true
+ end
+ cert.openssl_body.extensions.each do |ext|
+ if ext.oid == "subjectAltName"
+ ips = []
+ dns_names = []
+ ext.value.split(",").each do |value|
+ value.strip!
+ ips << $1 if value =~ /^IP Address:(.*)$/
+ dns_names << $1 if value =~ /^DNS:(.*)$/
+ end
+ dns_names.sort!
+ if ips.first != node.ip_address
+ log :updating, "cert for node '#{node.name}' because ip_address has changed (from #{ips.first} to #{node.ip_address})"
+ return true
+ elsif dns_names != dns_names_for_node(node)
+ log :updating, "cert for node '#{node.name}' because domain name aliases have changed\n from: #{dns_names.inspect}\n to: #{dns_names_for_node(node).inspect})"
+ return true
+ end
+ end
+ end
+ end
+ return false
+ end
+
+ def created_by_authority?(cert, ca)
+ authority_key_id = cert.extensions["authorityKeyIdentifier"].identifier.sub(/^keyid:/, '')
+ authority_key_id == public_key_id_for_ca(ca)
+ end
+
+ # calculate the "key id" for a root CA, that matches the value
+ # Authority Key Identifier in the x509 extensions of a cert.
+ def public_key_id_for_ca(ca_cert)
+ @ca_key_ids ||= {}
+ @ca_key_ids[ca_cert.object_id] ||= begin
+ pubkey = ca_cert.key_material.public_key
+ seq = OpenSSL::ASN1::Sequence([
+ OpenSSL::ASN1::Integer.new(pubkey.n),
+ OpenSSL::ASN1::Integer.new(pubkey.e)
+ ])
+ Digest::SHA1.hexdigest(seq.to_der).upcase.scan(/../).join(':')
+ end
+ end
+
+ def warn_if_commercial_cert_will_soon_expire(node)
+ dns_names_for_node(node).each do |domain|
+ if file_exists?([:commercial_cert, domain])
+ cert = load_certificate_file([:commercial_cert, domain])
+ path = Path.relative_path([:commercial_cert, domain])
+ if cert.not_after < Time.now.utc
+ log :error, "the commercial certificate '#{path}' has EXPIRED! " +
+ "You should renew it with `leap cert csr --domain #{domain}`."
+ elsif cert.not_after < Time.now.advance(:months => 2)
+ log :warning, "the commercial certificate '#{path}' will expire soon (#{cert.not_after}). "+
+ "You should renew it with `leap cert csr --domain #{domain}`."
+ end
+ end
+ end
+ end
+
+ def generate_cert_for_node(node)
+ return if node.x509.use == false
+
+ cert = CertificateAuthority::Certificate.new
+
+ # set subject
+ cert.subject.common_name = node.domain.full
+ cert.serial_number.number = cert_serial_number(node.domain.full)
+
+ # set expiration
+ cert.not_before = yesterday
+ cert.not_after = yesterday_advance(provider.ca.server_certificates.life_span)
+
+ # generate key
+ cert.key_material.generate_key(provider.ca.server_certificates.bit_size)
+
+ # sign
+ cert.parent = ca_root
+ cert.sign!(server_signing_profile(node))
+
+ # save
+ write_file!([:node_x509_key, node.name], cert.key_material.private_key.to_pem)
+ write_file!([:node_x509_cert, node.name], cert.to_pem)
+ end
+
+ #
+ # yields client key and cert suitable for testing
+ #
+ def generate_test_client_cert(prefix=nil)
+ cert = CertificateAuthority::Certificate.new
+ cert.serial_number.number = cert_serial_number(provider.domain)
+ cert.subject.common_name = [prefix, random_common_name(provider.domain)].join
+ cert.not_before = yesterday
+ cert.not_after = yesterday.advance(:years => 1)
+ cert.key_material.generate_key(1024) # just for testing, remember!
+ cert.parent = client_ca_root
+ cert.sign! client_test_signing_profile
+ yield cert.key_material.private_key.to_pem, cert.to_pem
+ end
+
+ #
+ # creates a CSR and returns it.
+ # with the correct extReq attribute so that the CA
+ # doens't generate certs with extensions we don't want.
+ #
+ def create_csr(dn, keypair, digest)
+ csr = CertificateAuthority::SigningRequest.new
+ csr.distinguished_name = dn
+ csr.key_material = keypair
+ csr.digest = digest
+
+ # define extensions manually (library doesn't support setting these on CSRs)
+ extensions = []
+ extensions << CertificateAuthority::Extensions::BasicConstraints.new.tap {|basic|
+ basic.ca = false
+ }
+ extensions << CertificateAuthority::Extensions::KeyUsage.new.tap {|keyusage|
+ keyusage.usage = ["digitalSignature", "keyEncipherment"]
+ }
+ extensions << CertificateAuthority::Extensions::ExtendedKeyUsage.new.tap {|extkeyusage|
+ extkeyusage.usage = [ "serverAuth"]
+ }
+
+ # convert extensions to attribute 'extReq'
+ # aka "Requested Extensions"
+ factory = OpenSSL::X509::ExtensionFactory.new
+ attrval = OpenSSL::ASN1::Set([OpenSSL::ASN1::Sequence(
+ extensions.map{|e| factory.create_ext(e.openssl_identifier, e.to_s, e.critical)}
+ )])
+ attrs = [
+ OpenSSL::X509::Attribute.new("extReq", attrval),
+ ]
+ csr.attributes = attrs
+
+ return csr
+ end
+
+ def ca_root
+ @ca_root ||= begin
+ load_certificate_file(:ca_cert, :ca_key)
+ end
+ end
+
+ def client_ca_root
+ @client_ca_root ||= begin
+ load_certificate_file(:client_ca_cert, :client_ca_key)
+ end
+ end
+
+ def load_certificate_file(crt_file, key_file=nil, password=nil)
+ crt = read_file!(crt_file)
+ openssl_cert = OpenSSL::X509::Certificate.new(crt)
+ cert = CertificateAuthority::Certificate.from_openssl(openssl_cert)
+ if key_file
+ key = read_file!(key_file)
+ cert.key_material.private_key = OpenSSL::PKey::RSA.new(key, password)
+ end
+ return cert
+ end
+
+ def ca_root_signing_profile
+ {
+ "extensions" => {
+ "basicConstraints" => {"ca" => true},
+ "keyUsage" => {
+ "usage" => ["critical", "keyCertSign"]
+ },
+ "extendedKeyUsage" => {
+ "usage" => []
+ }
+ }
+ }
+ end
+
+ #
+ # For keyusage, openvpn server certs can have keyEncipherment or keyAgreement.
+ # Web browsers seem to break without keyEncipherment.
+ # For now, I am using digitalSignature + keyEncipherment
+ #
+ # * digitalSignature -- for (EC)DHE cipher suites
+ # "The digitalSignature bit is asserted when the subject public key is used
+ # with a digital signature mechanism to support security services other
+ # than certificate signing (bit 5), or CRL signing (bit 6). Digital
+ # signature mechanisms are often used for entity authentication and data
+ # origin authentication with integrity."
+ #
+ # * keyEncipherment ==> for plain RSA cipher suites
+ # "The keyEncipherment bit is asserted when the subject public key is used for
+ # key transport. For example, when an RSA key is to be used for key management,
+ # then this bit is set."
+ #
+ # * keyAgreement ==> for used with DH, not RSA.
+ # "The keyAgreement bit is asserted when the subject public key is used for key
+ # agreement. For example, when a Diffie-Hellman key is to be used for key
+ # management, then this bit is set."
+ #
+ # digest options: SHA512, SHA256, SHA1
+ #
+ def server_signing_profile(node)
+ {
+ "digest" => provider.ca.server_certificates.digest,
+ "extensions" => {
+ "keyUsage" => {
+ "usage" => ["digitalSignature", "keyEncipherment"]
+ },
+ "extendedKeyUsage" => {
+ "usage" => ["serverAuth", "clientAuth"]
+ },
+ "subjectAltName" => {
+ "ips" => [node.ip_address],
+ "dns_names" => dns_names_for_node(node)
+ }
+ }
+ }
+ end
+
+ #
+ # This is used when signing the main cert for the provider's domain
+ # with our own CA (for testing purposes). Typically, this cert would
+ # be purchased from a commercial CA, and not signed this way.
+ #
+ def domain_test_signing_profile
+ {
+ "digest" => "SHA256",
+ "extensions" => {
+ "keyUsage" => {
+ "usage" => ["digitalSignature", "keyEncipherment"]
+ },
+ "extendedKeyUsage" => {
+ "usage" => ["serverAuth"]
+ }
+ }
+ }
+ end
+
+ #
+ # This is used when signing a dummy client certificate that is only to be
+ # used for testing.
+ #
+ def client_test_signing_profile
+ {
+ "digest" => "SHA256",
+ "extensions" => {
+ "keyUsage" => {
+ "usage" => ["digitalSignature"]
+ },
+ "extendedKeyUsage" => {
+ "usage" => ["clientAuth"]
+ }
+ }
+ }
+ end
+
+ def dns_names_for_node(node)
+ names = [node.domain.internal, node.domain.full]
+ if node['dns'] && node.dns['aliases'] && node.dns.aliases.any?
+ names += node.dns.aliases
+ end
+ names.compact!
+ names.sort!
+ names.uniq!
+ return names
+ end
+
+ #
+ # For cert serial numbers, we need a non-colliding number less than 160 bits.
+ # md5 will do nicely, since there is no need for a secure hash, just a short one.
+ # (md5 is 128 bits)
+ #
+ def cert_serial_number(domain_name)
+ Digest::MD5.hexdigest("#{domain_name} -- #{Time.now}").to_i(16)
+ end
+
+ #
+ # for the random common name, we need a text string that will be unique across all certs.
+ # ruby 1.8 doesn't have a built-in uuid generator, or we would use SecureRandom.uuid
+ #
+ def random_common_name(domain_name)
+ cert_serial_number(domain_name).to_s(36)
+ end
+
+ # prints CertificateAuthority::DistinguishedName fields
+ def print_dn(dn)
+ fields = {}
+ [:common_name, :locality, :state, :country, :organization, :organizational_unit, :email_address].each do |attr|
+ fields[attr] = dn.send(attr) if dn.send(attr)
+ end
+ fields.inspect
+ end
+
+ ##
+ ## TIME HELPERS
+ ##
+ ## note: we use 'yesterday' instead of 'today', because times are in UTC, and some people on the planet
+ ## are behind UTC.
+ ##
+
+ def yesterday
+ t = Time.now - 24*24*60
+ Time.utc t.year, t.month, t.day
+ end
+
+ def yesterday_advance(string)
+ number, unit = string.split(' ')
+ unless ['years', 'months', 'days', 'hours', 'minutes'].include? unit
+ bail!("The time property '#{string}' is missing a unit (one of: years, months, days, hours, minutes).")
+ end
+ unless number.to_i.to_s == number
+ bail!("The time property '#{string}' is missing a number.")
+ end
+ yesterday.advance(unit.to_sym => number.to_i)
+ end
+
+end; end
diff --git a/lib/leap_cli/commands/clean.rb b/lib/leap_cli/commands/clean.rb
new file mode 100644
index 00000000..a9afff53
--- /dev/null
+++ b/lib/leap_cli/commands/clean.rb
@@ -0,0 +1,16 @@
+module LeapCli
+ module Commands
+
+ desc 'Removes all files generated with the "compile" command.'
+ command :clean do |c|
+ c.action do |global_options,options,args|
+ Dir.glob(path([:hiera, '*'])).each do |file|
+ remove_file! file
+ end
+ remove_file! path(:authorized_keys)
+ remove_file! path(:known_hosts)
+ end
+ end
+
+ end
+end \ No newline at end of file
diff --git a/lib/leap_cli/commands/compile.rb b/lib/leap_cli/commands/compile.rb
new file mode 100644
index 00000000..f9079279
--- /dev/null
+++ b/lib/leap_cli/commands/compile.rb
@@ -0,0 +1,531 @@
+require 'socket'
+
+module LeapCli
+ module Commands
+
+ desc "Compile generated files."
+ command [:compile, :c] do |c|
+ c.desc 'Compiles node configuration files into hiera files used for deployment.'
+ c.arg_name 'ENVIRONMENT', :optional => true
+ c.command :all do |all|
+ all.action do |global_options,options,args|
+ environment = args.first
+ compile_command(environment)
+ end
+ end
+
+ c.desc "Prints a DNS zone file for your provider."
+ c.command :zone do |zone|
+ zone.action do |global_options, options, args|
+ compile_command(nil)
+ compile_zone_file(global_options[:yes] || global_options[:force])
+ end
+ end
+
+ c.desc "Print entries suitable for an /etc/hosts file, useful for testing your provider."
+ c.command :hosts do |hosts|
+ hosts.action do |global_options, options, args|
+ compile_command(nil)
+ compile_hosts_file
+ end
+ end
+
+ c.desc "Compile provider.json bootstrap files for your provider."
+ c.command 'provider.json' do |provider|
+ provider.action do |global_options, options, args|
+ compile_command(nil)
+ compile_provider_json
+ end
+ end
+
+ c.desc "Prints a list of firewall rules. These rules are already "+
+ "implemented on each node, but you might want the list of all "+
+ "rules in case you also have a restrictive network firewall."
+ c.command :firewall do |zone|
+ zone.action do |global_options, options, args|
+ compile_command(nil)
+ compile_firewall
+ end
+ end
+
+ c.default_command :all
+ end
+
+ protected
+
+ def compile_command(environment)
+ if !LeapCli.leapfile.environment.nil? && !environment.nil? && environment != LeapCli.leapfile.environment
+ bail! "You cannot specify an ENVIRONMENT argument while the environment is pinned."
+ end
+ if environment
+ if manager.environment_names.include?(environment)
+ compile_hiera_files(manager.filter([environment]), false)
+ else
+ bail! "There is no environment named `#{environment}`."
+ end
+ else
+ clean_export = LeapCli.leapfile.environment.nil?
+ compile_hiera_files(manager.filter, clean_export)
+ end
+ if file_exists?(:static_web_readme)
+ compile_provider_json(environment)
+ end
+ end
+
+ #
+ # a "clean" export of secrets will also remove keys that are no longer used,
+ # but this should not be done if we are not examining all possible nodes.
+ #
+ def compile_hiera_files(nodes, clean_export)
+ update_certificates(nodes) # \ must come first so that output will
+ update_compiled_ssh_configs # / get included in compiled hiera files.
+ sanity_check(nodes)
+ manager.export_nodes(nodes)
+ manager.export_secrets(clean_export)
+ end
+
+ def update_compiled_ssh_configs
+ generate_monitor_ssh_keys
+ update_authorized_keys
+ update_known_hosts
+ end
+
+ def sanity_check(nodes)
+ # confirm that every node has a unique ip address
+ ips = {}
+ nodes.pick_fields('ip_address').each do |name, ip_address|
+ if ips.key?(ip_address)
+ bail! {
+ log(:fatal_error, "Every node must have its own IP address.") {
+ log "Nodes `#{name}` and `#{ips[ip_address]}` are both configured with `#{ip_address}`."
+ }
+ }
+ else
+ ips[ip_address] = name
+ end
+ end
+ # confirm that the IP address of this machine is not also used for a node.
+ Socket.ip_address_list.each do |addrinfo|
+ if !addrinfo.ipv4_private? && ips.key?(addrinfo.ip_address)
+ ip = addrinfo.ip_address
+ name = ips[ip]
+ bail! {
+ log(:fatal_error, "Something is very wrong. The `leap` command must only be run on your sysadmin machine, not on a provider node.") {
+ log "This machine has the same IP address (#{ip}) as node `#{name}`."
+ }
+ }
+ end
+ end
+ end
+
+ ##
+ ## SSH
+ ##
+
+ #
+ # generates a ssh key pair that is used only by remote monitors
+ # to connect to nodes and run certain allowed commands.
+ #
+ # every node has the public monitor key added to their authorized
+ # keys, and every monitor node has a copy of the private monitor key.
+ #
+ def generate_monitor_ssh_keys
+ priv_key_file = path(:monitor_priv_key)
+ pub_key_file = path(:monitor_pub_key)
+ unless file_exists?(priv_key_file, pub_key_file)
+ ensure_dir(File.dirname(priv_key_file))
+ ensure_dir(File.dirname(pub_key_file))
+ cmd = %(ssh-keygen -N '' -C 'monitor' -t rsa -b 4096 -f '%s') % priv_key_file
+ assert_run! cmd
+ if file_exists?(priv_key_file, pub_key_file)
+ log :created, priv_key_file
+ log :created, pub_key_file
+ else
+ log :failed, 'to create monitor ssh keys'
+ end
+ end
+ end
+
+ #
+ # Compiles the authorized keys file, which gets installed on every during init.
+ # Afterwards, puppet installs an authorized keys file that is generated differently
+ # (see authorized_keys() in macros.rb)
+ #
+ def update_authorized_keys
+ buffer = StringIO.new
+ keys = Dir.glob(path([:user_ssh, '*']))
+ if keys.empty?
+ bail! "You must have at least one public SSH user key configured in order to proceed. See `leap help add-user`."
+ end
+ if file_exists?(path(:monitor_pub_key))
+ keys << path(:monitor_pub_key)
+ end
+ keys.sort.each do |keyfile|
+ ssh_type, ssh_key = File.read(keyfile).strip.split(" ")
+ buffer << ssh_type
+ buffer << " "
+ buffer << ssh_key
+ buffer << " "
+ buffer << Path.relative_path(keyfile)
+ buffer << "\n"
+ end
+ write_file!(:authorized_keys, buffer.string)
+ end
+
+ #
+ # generates the known_hosts file.
+ #
+ # we do a 'late' binding on the hostnames and ip part of the ssh pub key record in order to allow
+ # for the possibility that the hostnames or ip has changed in the node configuration.
+ #
+ def update_known_hosts
+ buffer = StringIO.new
+ buffer << "#\n"
+ buffer << "# This file is automatically generated by the command `leap`. You should NOT modify this file.\n"
+ buffer << "# Instead, rerun `leap node init` on whatever node is causing SSH problems.\n"
+ buffer << "#\n"
+ manager.nodes.keys.sort.each do |node_name|
+ node = manager.nodes[node_name]
+ hostnames = [node.name, node.domain.internal, node.domain.full, node.ip_address].join(',')
+ pub_key = read_file([:node_ssh_pub_key,node.name])
+ if pub_key
+ buffer << [hostnames, pub_key].join(' ')
+ buffer << "\n"
+ end
+ end
+ write_file!(:known_hosts, buffer.string)
+ end
+
+ ##
+ ## provider.json
+ ##
+
+ #
+ # generates static provider.json files that can put into place
+ # (e.g. https://domain/provider.json) for the cases where the
+ # webapp domain does not match the provider's domain.
+ #
+ def compile_provider_json(environments=nil)
+ webapp_nodes = manager.nodes[:services => 'webapp']
+ write_file!(:static_web_readme, STATIC_WEB_README)
+ environments ||= manager.environment_names
+ environments.each do |env|
+ node = webapp_nodes[:environment => env].values.first
+ if node
+ env ||= 'default'
+ write_file!(
+ [:static_web_provider_json, env],
+ node['definition_files']['provider']
+ )
+ write_file!(
+ [:static_web_htaccess, env],
+ HTACCESS_FILE % {:min_version => manager.env(env).provider.client_version['min']}
+ )
+ end
+ end
+ end
+
+ HTACCESS_FILE = %[
+<Files provider.json>
+ Header set X-Minimum-Client-Version %{min_version}
+</Files>
+]
+
+ STATIC_WEB_README = %[
+This directory contains statically rendered copies of the `provider.json` file
+used by the client to "bootstrap" configure itself for use with your service
+provider.
+
+There is a separate provider.json file for each environment, although you
+should only need 'production/provider.json' or, if you have no environments
+configured, 'default/provider.json'.
+
+To clarify, this is the public `provider.json` file used by the client, not the
+`provider.json` file that is used to configure the provider.
+
+The provider.json file must be available at `https://domain/provider.json`
+(unless this provider is included in the list of providers which are pre-
+seeded in client).
+
+This provider.json file can be served correctly in one of three ways:
+
+(1) If the property webapp.domain is not configured, then the web app will be
+ installed at https://domain/ and it will handle serving the provider.json file.
+
+(2) If one or more nodes have the 'static' service configured for the provider's
+ domain, then these 'static' nodes will correctly serve provider.json.
+
+(3) Otherwise, you must copy the provider.json file to your web
+ server and make it available at '/provider.json'. The example htaccess
+ file shows what header options should be sent by the web server
+ with the response.
+
+This directory is needed for method (3), but not for methods (1) or (2).
+
+This directory has been created by the command `leap compile provider.json`.
+Once created, it will be kept up to date everytime you compile. You may safely
+remove this directory if you don't use it.
+]
+
+ ##
+ ##
+ ## ZONE FILE
+ ##
+
+ def relative_hostname(fqdn, provider)
+ @domain_regexp ||= /\.?#{Regexp.escape(provider.domain)}$/
+ fqdn.sub(@domain_regexp, '')
+ end
+
+ #
+ # serial is any number less than 2^32 (4294967296)
+ #
+ def compile_zone_file(force=false)
+ # note: we use the default provider for all nodes, because we use it
+ # to generate hostnames that are relative to the default domain.
+ provider = manager.env('default').provider
+ hosts_seen = {}
+ lines = []
+
+ #
+ # header
+ #
+ lines << ZONE_HEADER % {
+ :domain => provider.domain,
+ :ns => provider.domain,
+ :contact => provider.contacts.default.first.sub('@','.'),
+ :serial => generate_zone_serial
+ }
+
+ #
+ # common records
+ #
+ lines << ORIGIN_HEADER
+ # 'A' records for primary domain
+ manager.nodes[:environment => '!local'].each_node do |node|
+ if node.dns['aliases'] && node.dns.aliases.include?(provider.domain)
+ lines << ["@", "IN A #{node.ip_address}"]
+ end
+ end
+
+ # NS records
+ if provider['dns'] && provider.dns['nameservers']
+ unless provider.dns.nameservers.is_a?(Array)
+ # TODO: remove me once we have JSON schema working
+ bail! {log :error, 'dns.nameservers must be an array' }
+ end
+ provider.dns.nameservers.each do |ns|
+ lines << ["@", "IN NS #{ns}."]
+ end
+ elsif !force
+ log :warning, "Property dns.nameservers is not configured in provider.json." do
+ log "This will produce a zone file without any NS records."
+ log "Use --force to skip this warning."
+ end
+ return unless agree("Continue? ")
+ end
+
+ # environment records
+ manager.environment_names.each do |env|
+ next if env == 'local'
+ nodes = manager.nodes[:environment => env]
+ next unless nodes.any?
+ spf = nil
+ dkim = nil
+ lines << ENV_HEADER % (env.nil? ? 'default' : env)
+ nodes.each_node do |node|
+ if node.dns.public
+ lines << [relative_hostname(node.domain.full, provider), "IN A #{node.ip_address}"]
+ end
+ if node.dns['aliases']
+ node.dns.aliases.each do |host_alias|
+ if host_alias != node.domain.full && host_alias != provider.domain
+ lines << [relative_hostname(host_alias, provider), "IN A #{node.ip_address}"]
+ end
+ end
+ end
+ if node.services.include? 'mx'
+ mx_domain = relative_hostname(node.domain.full_suffix, provider)
+ lines << [mx_domain, "IN MX 10 #{relative_hostname(node.domain.full, provider)}"]
+ spf ||= [mx_domain, spf_record(node)]
+ dkim ||= dkim_record(node, provider)
+ end
+ end
+ lines << spf if spf
+ lines << dkim if dkim
+ end
+
+ # print the lines
+ max_width = lines.inject(0) {|max, line| line.is_a?(Array) ? [max, line[0].length].max : max}
+ max_width = [max_width, 24].min
+ lines.each do |host, line|
+ if line.nil?
+ puts(host)
+ else
+ host = '@' if host == ''
+ puts("%-#{max_width}s %s" % [host, line])
+ end
+ end
+ end
+
+ #
+ # outputs entries suitable for an /etc/hosts file
+ #
+ def compile_hosts_file
+ manager.environment_names.each do |env|
+ nodes = manager.nodes[:environment => env]
+ next unless nodes.any?
+ puts
+ puts "## environment '#{env || 'default'}'"
+ nodes.each do |name, node|
+ puts "%s %s" % [
+ node.ip_address,
+ [name, node.get('domain.full'), node.get('dns.aliases')].compact.join(' ')
+ ]
+ end
+ end
+ end
+
+ private
+
+ #
+ # allow mail from any mx node, plus the webapp nodes.
+ #
+ # TODO: ipv6
+ #
+ def spf_record(node)
+ ips = node.nodes_like_me['services' => 'webapp'].values.collect {|n|
+ "ip4:" + n.ip_address
+ }
+ # TXT strings may not be longer than 255 characters, although
+ # you can chain multiple strings together.
+ strings = "v=spf1 MX #{ips.join(' ')} -all".scan(/.{1,255}/).join('" "')
+ %(IN TXT "#{strings}")
+ end
+
+ #
+ # for example:
+ #
+ # selector._domainkey IN TXT "v=DKIM1;h=sha256;k=rsa;s=email;p=MIGfMA0GCSq...GSIb3DQ"
+ #
+ # specification: http://dkim.org/specs/rfc4871-dkimbase.html#rfc.section.7.4
+ #
+ def dkim_record(node, provider)
+ # PEM encoded public key (base64), without the ---PUBLIC KEY--- armor parts.
+ assert_files_exist! :dkim_pub_key
+ dkim_pub_key = Path.named_path(:dkim_pub_key)
+ public_key = File.readlines(dkim_pub_key).grep(/^[^\-]+/).join
+
+ host = relative_hostname(
+ node.mx.dkim.selector + "._domainkey." + node.domain.full_suffix,
+ provider)
+
+ attrs = [
+ "v=DKIM1",
+ "h=sha256",
+ "k=rsa",
+ "s=email",
+ "p=" + public_key
+ ]
+
+ return [host, "IN TXT " + txt_wrap(attrs.join(';'))]
+ end
+
+ #
+ # DNS TXT records cannot be longer than 255 characters.
+ #
+ # However, multiple responses will be concatenated together.
+ # It looks like this:
+ #
+ # IN TXT "v=spf1 .... first" "second string..."
+ #
+ def txt_wrap(str)
+ '"' + str.scan(/.{1,255}/).join('" "') + '"'
+ end
+
+ #
+ # For zone serial number, we want something that will be
+ # different each time you deploy but also will be greater
+ # than any prior likely serial that was prefixed by the
+ # year, such as 2016040600.
+ #
+ # so, we use time_t of right now, modified with first
+ # digit incremented by one.
+ #
+ # this will work until Time.at(2**32 - 1_000_000_000)
+ # aka 2074-05-31 04:41:36 UTC.
+ #
+ def generate_zone_serial
+ Time.now.utc.to_i + 1_000_000_000
+ end
+
+ ENV_HEADER = %[
+;;
+;; ENVIRONMENT %s
+;;
+
+]
+
+ ZONE_HEADER = %[
+;;
+;; BIND data file for %{domain}
+;;
+
+$TTL 600
+$ORIGIN %{domain}.
+
+@ IN SOA %{ns}. %{contact}. (
+ %{serial} ; serial
+ 7200 ; refresh ( 24 hours)
+ 3600 ; retry ( 2 hours)
+ 1209600 ; expire (1000 hours)
+ 600 ) ; minimum ( 2 days)
+;
+]
+
+ ORIGIN_HEADER = %[
+;;
+;; ZONE ORIGIN
+;;
+
+]
+
+ ##
+ ## FIREWALL
+ ##
+
+ public
+
+ def compile_firewall
+ manager.nodes.each_node(&:evaluate)
+
+ rules = [["ALLOW TO", "PORTS", "ALLOW FROM"]]
+ manager.nodes[:environment => '!local'].values.each do |node|
+ next unless node['firewall']
+ node.firewall.each do |name, rule|
+ if rule.is_a? Hash
+ rules << add_rule(rule)
+ elsif rule.is_a? Array
+ rule.each do |r|
+ rules << add_rule(r)
+ end
+ end
+ end
+ end
+
+ max_to = rules.inject(0) {|max, r| [max, r[0].length].max}
+ max_port = rules.inject(0) {|max, r| [max, r[1].length].max}
+ max_from = rules.inject(0) {|max, r| [max, r[2].length].max}
+ rules.each do |rule|
+ puts "%-#{max_to}s %-#{max_port}s %-#{max_from}s" % rule
+ end
+ end
+
+ private
+
+ def add_rule(rule)
+ [rule["to"], [rule["port"]].compact.join(','), rule["from"]]
+ end
+
+ end
+end \ No newline at end of file
diff --git a/lib/leap_cli/commands/db.rb b/lib/leap_cli/commands/db.rb
new file mode 100644
index 00000000..5307ac4d
--- /dev/null
+++ b/lib/leap_cli/commands/db.rb
@@ -0,0 +1,86 @@
+module LeapCli; module Commands
+
+ desc 'Database commands.'
+ command :db do |db|
+ db.desc 'Destroy one or more databases. If present, limit to FILTER nodes. For example `leap db destroy --db sessions,tokens testing`.'
+ db.arg_name 'FILTER', :optional => true
+ db.command :destroy do |destroy|
+ destroy.flag :db, :arg_name => "DATABASES", :desc => 'Comma separated list of databases to destroy (no space). Use "--db all" to destroy all databases.', :optional => true
+ destroy.flag :user, :arg_name => "USERS", :desc => 'Comma separated list of usernames. The storage databases for these user(s) will be destroyed.', :optional => true
+ destroy.action do |global_options,options,args|
+ dbs = (options[:db]||"").split(',')
+ users = (options[:user]||"").split(',')
+ if dbs.empty? && users.empty?
+ bail!('Either --db or --user is required.')
+ end
+ nodes = manager.filter(args)
+ if nodes.any?
+ nodes = nodes[:services => 'couchdb']
+ end
+ unless nodes.any?
+ bail! 'No db nodes selected.'
+ end
+ if users.any?
+ unless global_options[:yes]
+ say 'You are about to permanently destroy user databases for [%s] for nodes [%s].' % [users.join(', '), nodes.keys.join(', ')]
+ bail! unless agree("Continue? ")
+ end
+ destroy_user_dbs(nodes, users)
+ elsif dbs.any?
+ unless global_options[:yes]
+ if dbs.include?('all')
+ say 'You are about to permanently destroy all database data for nodes [%s].' % nodes.keys.join(', ')
+ else
+ say 'You are about to permanently destroy databases [%s] for nodes [%s].' % [dbs.join(', '), nodes.keys.join(', ')]
+ end
+ bail! unless agree("Continue? ")
+ end
+ if dbs.include?('all')
+ destroy_all_dbs(nodes)
+ else
+ destroy_dbs(nodes, dbs)
+ end
+ say 'You must run `leap deploy` in order to create the databases again.'
+ end
+ end
+ end
+ end
+
+ private
+
+ def destroy_all_dbs(nodes)
+ ssh_connect(nodes) do |ssh|
+ ssh.run('/etc/init.d/bigcouch stop && test ! -z "$(ls /opt/bigcouch/var/lib/ 2> /dev/null)" && rm -r /opt/bigcouch/var/lib/* && echo "All DBs destroyed" || echo "DBs already destroyed"')
+ end
+ end
+
+ def destroy_dbs(nodes, dbs)
+ nodes.each_node do |node|
+ ssh_connect(node) do |ssh|
+ dbs.each do |db|
+ ssh.run(DESTROY_DB_COMMAND % {:db => db})
+ end
+ end
+ end
+ end
+
+ def destroy_user_dbs(nodes, users)
+ nodes.each_node do |node|
+ ssh_connect(node) do |ssh|
+ users.each do |user|
+ ssh.run(DESTROY_USER_DB_COMMAND % {:user => user})
+ end
+ end
+ end
+ end
+
+ DESTROY_DB_COMMAND = %{
+if [ 200 = `curl -ns -w "%%{http_code}" -X GET "127.0.0.1:5984/%{db}" -o /dev/null` ]; then
+ echo "Result from DELETE /%{db}:" `curl -ns -X DELETE "127.0.0.1:5984/%{db}"`;
+else
+ echo "Skipping db '%{db}': it does not exist or has already been deleted.";
+fi
+}
+
+ DESTROY_USER_DB_COMMAND = %{/srv/leap/couchdb/scripts/destroy-user-db --username %{user}}
+end; end
diff --git a/lib/leap_cli/commands/deploy.rb b/lib/leap_cli/commands/deploy.rb
new file mode 100644
index 00000000..9dd190ab
--- /dev/null
+++ b/lib/leap_cli/commands/deploy.rb
@@ -0,0 +1,374 @@
+require 'etc'
+
+module LeapCli
+ module Commands
+
+ desc 'Apply recipes to a node or set of nodes.'
+ long_desc 'The FILTER can be the name of a node, service, or tag.'
+ arg_name 'FILTER'
+ command [:deploy, :d] do |c|
+
+ c.switch :fast, :desc => 'Makes the deploy command faster by skipping some slow steps. A "fast" deploy can be used safely if you recently completed a normal deploy.',
+ :negatable => false
+
+ c.switch :sync, :desc => "Sync files, but don't actually apply recipes.", :negatable => false
+
+ c.switch :force, :desc => 'Deploy even if there is a lockfile.', :negatable => false
+
+ c.switch :downgrade, :desc => 'Allows deploy to run with an older platform version.', :negatable => false
+
+ c.switch :dev, :desc => "Development mode: don't run 'git submodule update' before deploy.", :negatable => false
+
+ c.flag :tags, :desc => 'Specify tags to pass through to puppet (overriding the default).',
+ :arg_name => 'TAG[,TAG]'
+
+ c.flag :port, :desc => 'Override the default SSH port.',
+ :arg_name => 'PORT'
+
+ c.flag :ip, :desc => 'Override the default SSH IP address.',
+ :arg_name => 'IPADDRESS'
+
+ c.action do |global,options,args|
+
+ if options[:dev] != true
+ init_submodules
+ end
+
+ nodes = manager.filter!(args, :disabled => false)
+ if nodes.size > 1
+ say "Deploying to these nodes: #{nodes.keys.join(', ')}"
+ if !global[:yes] && !agree("Continue? ")
+ quit! "OK. Bye."
+ end
+ end
+
+ environments = nodes.field('environment').uniq
+ if environments.empty?
+ environments = [nil]
+ end
+ environments.each do |env|
+ check_platform_pinning(env, global)
+ end
+
+ # compile hiera files for all the nodes in every environment that is
+ # being deployed and only those environments.
+ compile_hiera_files(manager.filter(environments), false)
+
+ ssh_connect(nodes, connect_options(options)) do |ssh|
+ ssh.leap.log :checking, 'node' do
+ ssh.leap.check_for_no_deploy
+ ssh.leap.assert_initialized
+ end
+ ssh.leap.log :synching, "configuration files" do
+ sync_hiera_config(ssh)
+ sync_support_files(ssh)
+ end
+ ssh.leap.log :synching, "puppet manifests" do
+ sync_puppet_files(ssh)
+ end
+ unless options[:sync]
+ ssh.leap.log :applying, "puppet" do
+ ssh.puppet.apply(:verbosity => [LeapCli.log_level,5].min,
+ :tags => tags(options),
+ :force => options[:force],
+ :info => deploy_info,
+ :downgrade => options[:downgrade]
+ )
+ end
+ end
+ end
+ if !Util.exit_status.nil? && Util.exit_status != 0
+ log :warning, "puppet did not finish successfully."
+ end
+ end
+ end
+
+ desc 'Display recent deployment history for a set of nodes.'
+ long_desc 'The FILTER can be the name of a node, service, or tag.'
+ arg_name 'FILTER'
+ command [:history, :h] do |c|
+ c.flag :port, :desc => 'Override the default SSH port.',
+ :arg_name => 'PORT'
+ c.flag :ip, :desc => 'Override the default SSH IP address.',
+ :arg_name => 'IPADDRESS'
+ c.switch :last, :desc => 'Show last deploy only',
+ :negatable => false
+ c.action do |global,options,args|
+ if options[:last] == true
+ lines = 1
+ else
+ lines = 10
+ end
+ nodes = manager.filter!(args)
+ ssh_connect(nodes, connect_options(options)) do |ssh|
+ ssh.leap.history(lines)
+ end
+ end
+ end
+
+ private
+
+ def forcible_prompt(forced, msg, prompt)
+ say(msg)
+ if forced
+ log :warning, "continuing anyway because of --force"
+ else
+ say "hint: use --force to skip this prompt."
+ quit!("OK. Bye.") unless agree(prompt)
+ end
+ end
+
+ #
+ # The currently activated provider.json could have loaded some pinning
+ # information for the platform. If this is the case, refuse to deploy
+ # if there is a mismatch.
+ #
+ # For example:
+ #
+ # "platform": {
+ # "branch": "develop"
+ # "version": "1.0..99"
+ # "commit": "e1d6280e0a8c565b7fb1a4ed3969ea6fea31a5e2..HEAD"
+ # }
+ #
+ def check_platform_pinning(environment, global_options)
+ provider = manager.env(environment).provider
+ return unless provider['platform']
+
+ if environment.nil? || environment == 'default'
+ provider_json = 'provider.json'
+ else
+ provider_json = 'provider.' + environment + '.json'
+ end
+
+ # can we have json schema verification already?
+ unless provider.platform.is_a? Hash
+ bail!('`platform` attribute in #{provider_json} must be a hash (was %s).' % provider.platform.inspect)
+ end
+
+ # check version
+ if provider.platform['version']
+ if !Leap::Platform.version_in_range?(provider.platform.version)
+ forcible_prompt(
+ global_options[:force],
+ "The platform is pinned to a version range of '#{provider.platform.version}' "+
+ "by the `platform.version` property in #{provider_json}, but the platform "+
+ "(#{Path.platform}) has version #{Leap::Platform.version}.",
+ "Do you really want to deploy from the wrong version? "
+ )
+ end
+ end
+
+ # check branch
+ if provider.platform['branch']
+ if !is_git_directory?(Path.platform)
+ forcible_prompt(
+ global_options[:force],
+ "The platform is pinned to a particular branch by the `platform.branch` property "+
+ "in #{provider_json}, but the platform directory (#{Path.platform}) is not a git repository.",
+ "Do you really want to deploy anyway? "
+ )
+ end
+ unless provider.platform.branch == current_git_branch(Path.platform)
+ forcible_prompt(
+ global_options[:force],
+ "The platform is pinned to branch '#{provider.platform.branch}' by the `platform.branch` property "+
+ "in #{provider_json}, but the current branch is '#{current_git_branch(Path.platform)}' " +
+ "(for directory '#{Path.platform}')",
+ "Do you really want to deploy from the wrong branch? "
+ )
+ end
+ end
+
+ # check commit
+ if provider.platform['commit']
+ if !is_git_directory?(Path.platform)
+ forcible_prompt(
+ global_options[:force],
+ "The platform is pinned to a particular commit range by the `platform.commit` property "+
+ "in #{provider_json}, but the platform directory (#{Path.platform}) is not a git repository.",
+ "Do you really want to deploy anyway? "
+ )
+ end
+ current_commit = current_git_commit(Path.platform)
+ Dir.chdir(Path.platform) do
+ commit_range = assert_run!("git log --pretty='format:%H' '#{provider.platform.commit}'",
+ "The platform is pinned to a particular commit range by the `platform.commit` property "+
+ "in #{provider_json}, but git was not able to find commits in the range specified "+
+ "(#{provider.platform.commit}).")
+ commit_range = commit_range.split("\n")
+ if !commit_range.include?(current_commit) &&
+ provider.platform.commit.split('..').first != current_commit
+ forcible_prompt(
+ global_options[:force],
+ "The platform is pinned via the `platform.commit` property in #{provider_json} " +
+ "to a commit in the range #{provider.platform.commit}, but the current HEAD " +
+ "(#{current_commit}) is not in that range.",
+ "Do you really want to deploy from the wrong commit? "
+ )
+ end
+ end
+ end
+ end
+
+ def sync_hiera_config(ssh)
+ ssh.rsync.update do |server|
+ node = manager.node(server.host)
+ hiera_file = Path.relative_path([:hiera, node.name])
+ ssh.leap.log hiera_file + ' -> ' + node.name + ':' + Leap::Platform.hiera_path
+ {
+ :source => hiera_file,
+ :dest => Leap::Platform.hiera_path,
+ :flags => "-rltp --chmod=u+rX,go-rwx"
+ }
+ end
+ end
+
+ #
+ # sync various support files.
+ #
+ def sync_support_files(ssh)
+ dest_dir = Leap::Platform.files_dir
+ custom_files = build_custom_file_list
+ ssh.rsync.update do |server|
+ node = manager.node(server.host)
+ files_to_sync = node.file_paths.collect {|path| Path.relative_path(path, Path.provider) }
+ files_to_sync += custom_files
+ if files_to_sync.any?
+ ssh.leap.log(files_to_sync.join(', ') + ' -> ' + node.name + ':' + dest_dir)
+ {
+ :chdir => Path.named_path(:files_dir),
+ :source => ".",
+ :dest => dest_dir,
+ :excludes => "*",
+ :includes => calculate_includes_from_files(files_to_sync, '/files'),
+ :flags => "-rltp --chmod=u+rX,go-rwx --relative --delete --delete-excluded --copy-links"
+ }
+ else
+ nil
+ end
+ end
+ end
+
+ def sync_puppet_files(ssh)
+ ssh.rsync.update do |server|
+ ssh.leap.log(Path.platform + '/[bin,tests,puppet] -> ' + server.host + ':' + Leap::Platform.leap_dir)
+ {
+ :dest => Leap::Platform.leap_dir,
+ :source => '.',
+ :chdir => Path.platform,
+ :excludes => '*',
+ :includes => ['/bin', '/bin/**', '/puppet', '/puppet/**', '/tests', '/tests/**'],
+ :flags => "-rlt --relative --delete --copy-links"
+ }
+ end
+ end
+
+ #
+ # ensure submodules are up to date, if the platform is a git
+ # repository.
+ #
+ def init_submodules
+ return unless is_git_directory?(Path.platform)
+ Dir.chdir Path.platform do
+ assert_run! "git submodule sync"
+ statuses = assert_run! "git submodule status"
+ statuses.strip.split("\n").each do |status_line|
+ if status_line =~ /^[\+-]/
+ submodule = status_line.split(' ')[1]
+ log "Updating submodule #{submodule}"
+ assert_run! "git submodule update --init #{submodule}"
+ end
+ end
+ end
+ end
+
+ #
+ # converts an array of file paths into an array
+ # suitable for --include of rsync
+ #
+ # if set, `prefix` is stripped off.
+ #
+ def calculate_includes_from_files(files, prefix=nil)
+ return nil unless files and files.any?
+
+ # prepend '/' (kind of like ^ for rsync)
+ includes = files.collect {|file| file =~ /^\// ? file : '/' + file }
+
+ # include all sub files of specified directories
+ includes.size.times do |i|
+ if includes[i] =~ /\/$/
+ includes << includes[i] + '**'
+ end
+ end
+
+ # include all parent directories (required because of --exclude '*')
+ includes.size.times do |i|
+ path = File.dirname(includes[i])
+ while(path != '/')
+ includes << path unless includes.include?(path)
+ path = File.dirname(path)
+ end
+ end
+
+ if prefix
+ includes.map! {|path| path.sub(/^#{Regexp.escape(prefix)}\//, '/')}
+ end
+
+ return includes
+ end
+
+ def tags(options)
+ if options[:tags]
+ tags = options[:tags].split(',')
+ else
+ tags = Leap::Platform.default_puppet_tags.dup
+ end
+ tags << 'leap_slow' unless options[:fast]
+ tags.join(',')
+ end
+
+ #
+ # a provider might have various customization files that should be sync'ed to the server.
+ # this method builds that list of files to sync.
+ #
+ def build_custom_file_list
+ custom_files = []
+ Leap::Platform.paths.keys.grep(/^custom_/).each do |path|
+ if file_exists?(path)
+ relative_path = Path.relative_path(path, Path.provider)
+ if dir_exists?(path)
+ custom_files << relative_path + '/' # rsync needs trailing slash
+ else
+ custom_files << relative_path
+ end
+ end
+ end
+ return custom_files
+ end
+
+ def deploy_info
+ info = []
+ info << "user: %s" % Etc.getpwuid(Process.euid).name
+ if is_git_directory?(Path.platform) && current_git_branch(Path.platform) != 'master'
+ info << "platform: %s (%s %s)" % [
+ Leap::Platform.version,
+ current_git_branch(Path.platform),
+ current_git_commit(Path.platform)[0..4]
+ ]
+ else
+ info << "platform: %s" % Leap::Platform.version
+ end
+ if is_git_directory?(LEAP_CLI_BASE_DIR)
+ info << "leap_cli: %s (%s %s)" % [
+ LeapCli::VERSION,
+ current_git_branch(LEAP_CLI_BASE_DIR),
+ current_git_commit(LEAP_CLI_BASE_DIR)[0..4]
+ ]
+ else
+ info << "leap_cli: %s" % LeapCli::VERSION
+ end
+ info.join(', ')
+ end
+ end
+end
diff --git a/lib/leap_cli/commands/env.rb b/lib/leap_cli/commands/env.rb
new file mode 100644
index 00000000..80be2174
--- /dev/null
+++ b/lib/leap_cli/commands/env.rb
@@ -0,0 +1,76 @@
+module LeapCli
+ module Commands
+
+ desc "Manipulate and query environment information."
+ long_desc "The 'environment' node property can be used to isolate sets of nodes into entirely separate environments. "+
+ "A node in one environment will never interact with a node from another environment. "+
+ "Environment pinning works by modifying your ~/.leaprc file and is dependent on the "+
+ "absolute file path of your provider directory (pins don't apply if you move the directory)"
+ command [:env, :e] do |c|
+ c.desc "List the available environments. The pinned environment, if any, will be marked with '*'. Will also set the pin if run with an environment argument."
+ c.arg_name 'ENVIRONMENT', :optional => true
+ c.command :ls do |ls|
+ ls.action do |global_options, options, args|
+ environment = get_env_from_args(args)
+ if environment
+ pin(environment)
+ LeapCli.leapfile.load
+ end
+ print_envs
+ end
+ end
+
+ c.desc 'Pin the environment to ENVIRONMENT. All subsequent commands will only apply to nodes in this environment.'
+ c.arg_name 'ENVIRONMENT'
+ c.command :pin do |pin|
+ pin.action do |global_options,options,args|
+ environment = get_env_from_args(args)
+ if environment
+ pin(environment)
+ else
+ bail! "There is no environment `#{environment}`"
+ end
+ end
+ end
+
+ c.desc "Unpin the environment. All subsequent commands will apply to all nodes."
+ c.command :unpin do |unpin|
+ unpin.action do |global_options, options, args|
+ LeapCli.leapfile.unset('environment')
+ log 0, :saved, "~/.leaprc, removing environment property."
+ end
+ end
+
+ c.default_command :ls
+ end
+
+ protected
+
+ def get_env_from_args(args)
+ environment = args.first
+ if environment == 'default' || (environment && manager.environment_names.include?(environment))
+ return environment
+ else
+ return nil
+ end
+ end
+
+ def pin(environment)
+ LeapCli.leapfile.set('environment', environment)
+ log 0, :saved, "~/.leaprc with environment set to #{environment}."
+ end
+
+ def print_envs
+ envs = ["default"] + manager.environment_names.compact.sort
+ envs.each do |env|
+ if env
+ if LeapCli.leapfile.environment == env
+ puts "* #{env}"
+ else
+ puts " #{env}"
+ end
+ end
+ end
+ end
+ end
+end \ No newline at end of file
diff --git a/lib/leap_cli/commands/facts.rb b/lib/leap_cli/commands/facts.rb
new file mode 100644
index 00000000..11329ccc
--- /dev/null
+++ b/lib/leap_cli/commands/facts.rb
@@ -0,0 +1,100 @@
+#
+# Gather facter facts
+#
+
+module LeapCli; module Commands
+
+ desc 'Gather information on nodes.'
+ command :facts do |facts|
+ facts.desc 'Query servers to update facts.json.'
+ facts.long_desc "Queries every node included in FILTER and saves the important information to facts.json"
+ facts.arg_name 'FILTER'
+ facts.command :update do |update|
+ update.action do |global_options,options,args|
+ update_facts(global_options, options, args)
+ end
+ end
+ end
+
+ protected
+
+ def facter_cmd
+ 'facter --json ' + Leap::Platform.facts.join(' ')
+ end
+
+ def remove_node_facts(name)
+ if file_exists?(:facts)
+ update_facts_file({name => nil})
+ end
+ end
+
+ def update_node_facts(name, facts)
+ update_facts_file({name => facts})
+ end
+
+ def rename_node_facts(old_name, new_name)
+ if file_exists?(:facts)
+ facts = JSON.parse(read_file(:facts) || {})
+ facts[new_name] = facts[old_name]
+ facts[old_name] = nil
+ update_facts_file(facts, true)
+ end
+ end
+
+ #
+ # if overwrite = true, then ignore existing facts.json.
+ #
+ def update_facts_file(new_facts, overwrite=false)
+ replace_file!(:facts) do |content|
+ if overwrite || content.nil? || content.empty?
+ old_facts = {}
+ else
+ old_facts = manager.facts
+ end
+ facts = old_facts.merge(new_facts)
+ facts.each do |name, value|
+ if value.is_a? String
+ if value == ""
+ value = nil
+ else
+ value = JSON.parse(value) rescue JSON::ParserError
+ end
+ end
+ if value.is_a? Hash
+ value.delete_if {|key,v| v.nil?}
+ end
+ facts[name] = value
+ end
+ facts.delete_if do |name, value|
+ value.nil? || value.empty?
+ end
+ if facts.empty?
+ "{}\n"
+ else
+ JSON.sorted_generate(facts) + "\n"
+ end
+ end
+ end
+
+ private
+
+ def update_facts(global_options, options, args)
+ nodes = manager.filter(args, :local => false, :disabled => false)
+ new_facts = {}
+ ssh_connect(nodes) do |ssh|
+ ssh.leap.run_with_progress(facter_cmd) do |response|
+ node = manager.node(response[:host])
+ if node
+ new_facts[node.name] = response[:data].strip
+ else
+ log :warning, 'Could not find node for hostname %s' % response[:host]
+ end
+ end
+ end
+ # only overwrite the entire facts file if and only if we are gathering facts
+ # for all nodes in all environments.
+ overwrite_existing = args.empty? && LeapCli.leapfile.environment.nil?
+ update_facts_file(new_facts, overwrite_existing)
+ end
+
+end; end \ No newline at end of file
diff --git a/lib/leap_cli/commands/info.rb b/lib/leap_cli/commands/info.rb
new file mode 100644
index 00000000..52225a94
--- /dev/null
+++ b/lib/leap_cli/commands/info.rb
@@ -0,0 +1,15 @@
+module LeapCli; module Commands
+
+ desc 'Prints information regarding facts, history, and running processes for a node or nodes.'
+ long_desc 'The FILTER can be the name of a node, service, or tag.'
+ arg_name 'FILTER'
+ command [:info] do |c|
+ c.action do |global,options,args|
+ nodes = manager.filter!(args)
+ ssh_connect(nodes, connect_options(options)) do |ssh|
+ ssh.leap.debug
+ end
+ end
+ end
+
+end; end
diff --git a/lib/leap_cli/commands/inspect.rb b/lib/leap_cli/commands/inspect.rb
new file mode 100644
index 00000000..20654fa7
--- /dev/null
+++ b/lib/leap_cli/commands/inspect.rb
@@ -0,0 +1,144 @@
+module LeapCli; module Commands
+
+ desc 'Prints details about a file. Alternately, the argument FILE can be the name of a node, service or tag.'
+ arg_name 'FILE'
+ command [:inspect, :i] do |c|
+ c.switch 'base', :desc => 'Inspect the FILE from the provider_base (i.e. without local inheritance).', :negatable => false
+ c.action do |global_options,options,args|
+ object = args.first
+ assert! object, 'A file path or node/service/tag name is required'
+ method = inspection_method(object)
+ if method && defined?(method)
+ self.send(method, object, options)
+ else
+ log "Sorry, I don't know how to inspect that."
+ end
+ end
+ end
+
+ private
+
+ FTYPE_MAP = {
+ "PEM certificate" => :inspect_x509_cert,
+ "PEM RSA private key" => :inspect_x509_key,
+ "OpenSSH RSA public key" => :inspect_ssh_pub_key,
+ "PEM certificate request" => :inspect_x509_csr
+ }
+
+ def inspection_method(object)
+ if File.exists?(object)
+ ftype = `file #{object}`.split(':').last.strip
+ log 2, "file is of type '#{ftype}'"
+ if FTYPE_MAP[ftype]
+ FTYPE_MAP[ftype]
+ elsif File.extname(object) == ".json"
+ full_path = File.expand_path(object, Dir.pwd)
+ if path_match?(:node_config, full_path)
+ :inspect_node
+ elsif path_match?(:service_config, full_path)
+ :inspect_service
+ elsif path_match?(:tag_config, full_path)
+ :inspect_tag
+ elsif path_match?(:provider_config, full_path) || path_match?(:provider_env_config, full_path)
+ :inspect_provider
+ elsif path_match?(:common_config, full_path)
+ :inspect_common
+ else
+ nil
+ end
+ end
+ elsif manager.nodes[object]
+ :inspect_node
+ elsif manager.services[object]
+ :inspect_service
+ elsif manager.tags[object]
+ :inspect_tag
+ elsif object == "common"
+ :inspect_common
+ elsif object == "provider"
+ :inspect_provider
+ else
+ nil
+ end
+ end
+
+ #
+ # inspectors
+ #
+
+ def inspect_x509_key(file_path, options)
+ assert_bin! 'openssl'
+ puts assert_run! 'openssl rsa -in %s -text -check' % file_path
+ end
+
+ def inspect_x509_cert(file_path, options)
+ assert_bin! 'openssl'
+ puts assert_run! 'openssl x509 -in %s -text -noout' % file_path
+ log 0, :"SHA256 fingerprint", X509.fingerprint("SHA256", file_path)
+ end
+
+ def inspect_x509_csr(file_path, options)
+ assert_bin! 'openssl'
+ puts assert_run! 'openssl req -text -noout -verify -in %s' % file_path
+ end
+
+ #def inspect_ssh_pub_key(file_path)
+ #end
+
+ def inspect_node(arg, options)
+ inspect_json manager.nodes[name(arg)]
+ end
+
+ def inspect_service(arg, options)
+ if options[:base]
+ inspect_json manager.base_services[name(arg)]
+ else
+ inspect_json manager.services[name(arg)]
+ end
+ end
+
+ def inspect_tag(arg, options)
+ if options[:base]
+ inspect_json manager.base_tags[name(arg)]
+ else
+ inspect_json manager.tags[name(arg)]
+ end
+ end
+
+ def inspect_provider(arg, options)
+ if options[:base]
+ inspect_json manager.base_provider
+ elsif arg =~ /provider\.(.*)\.json/
+ inspect_json manager.env($1).provider
+ else
+ inspect_json manager.provider
+ end
+ end
+
+ def inspect_common(arg, options)
+ if options[:base]
+ inspect_json manager.base_common
+ else
+ inspect_json manager.common
+ end
+ end
+
+ #
+ # helpers
+ #
+
+ def name(arg)
+ File.basename(arg).sub(/\.json$/, '')
+ end
+
+ def inspect_json(config)
+ if config
+ puts JSON.sorted_generate(config)
+ end
+ end
+
+ def path_match?(path_symbol, path)
+ Dir.glob(Path.named_path([path_symbol, '*'])).include?(path)
+ end
+
+end; end
diff --git a/lib/leap_cli/commands/list.rb b/lib/leap_cli/commands/list.rb
new file mode 100644
index 00000000..aa425432
--- /dev/null
+++ b/lib/leap_cli/commands/list.rb
@@ -0,0 +1,132 @@
+require 'command_line_reporter'
+
+module LeapCli; module Commands
+
+ desc 'List nodes and their classifications'
+ long_desc 'Prints out a listing of nodes, services, or tags. ' +
+ 'If present, the FILTER can be a list of names of nodes, services, or tags. ' +
+ 'If the name is prefixed with +, this acts like an AND condition. ' +
+ "For example:\n\n" +
+ "`leap list node1 node2` matches all nodes named \"node1\" OR \"node2\"\n\n" +
+ "`leap list openvpn +local` matches all nodes with service \"openvpn\" AND tag \"local\""
+
+ arg_name 'FILTER', :optional => true
+ command [:list,:ls] do |c|
+ c.flag 'print', :desc => 'What attributes to print (optional)'
+ c.switch 'disabled', :desc => 'Include disabled nodes in the list.', :negatable => false
+ c.action do |global_options,options,args|
+ # don't rely on default manager(), because we want to pass custom options to load()
+ manager = LeapCli::Config::Manager.new
+ if global_options[:color]
+ colors = ['cyan', 'white']
+ else
+ colors = [nil, nil]
+ end
+ puts
+ manager.load(:include_disabled => options['disabled'], :continue_on_error => true)
+ if options['print']
+ print_node_properties(manager.filter(args), options['print'])
+ else
+ if args.any?
+ NodeTable.new(manager.filter(args), colors).run
+ else
+ environment = LeapCli.leapfile.environment || '_all_'
+ TagTable.new('SERVICES', manager.env(environment).services, colors).run
+ TagTable.new('TAGS', manager.env(environment).tags, colors).run
+ NodeTable.new(manager.filter(), colors).run
+ end
+ end
+ end
+ end
+
+ private
+
+ def self.print_node_properties(nodes, properties)
+ properties = properties.split(',')
+ max_width = nodes.keys.inject(0) {|max,i| [i.size,max].max}
+ nodes.each_node do |node|
+ value = properties.collect{|prop|
+ prop_value = node[prop]
+ if prop_value.nil?
+ "null"
+ elsif prop_value == ""
+ "empty"
+ elsif prop_value.is_a? LeapCli::Config::Object
+ node[prop].dump_json(:format => :compact) # TODO: add option of getting pre-evaluation values.
+ else
+ prop_value.to_s
+ end
+ }.join(', ')
+ printf("%#{max_width}s %s\n", node.name, value)
+ end
+ puts
+ end
+
+ class TagTable
+ include CommandLineReporter
+ def initialize(heading, tag_list, colors)
+ @heading = heading
+ @tag_list = tag_list
+ @colors = colors
+ end
+ def run
+ tags = @tag_list.keys.select{|tag| tag !~ /^_/}.sort # sorted list of tags, excluding _partials
+ max_width = [20, (tags+[@heading]).inject(0) {|max,i| [i.size,max].max}].max
+ table :border => false do
+ row :color => @colors[0] do
+ column @heading, :align => 'right', :width => max_width
+ column "NODES", :width => HighLine::SystemExtensions.terminal_size.first - max_width - 2, :padding => 2
+ end
+ tags.each do |tag|
+ next if @tag_list[tag].node_list.empty?
+ row :color => @colors[1] do
+ column tag
+ column @tag_list[tag].node_list.keys.sort.join(', ')
+ end
+ end
+ end
+ vertical_spacing
+ end
+ end
+
+ #
+ # might be handy: HighLine::SystemExtensions.terminal_size.first
+ #
+ class NodeTable
+ include CommandLineReporter
+ def initialize(node_list, colors)
+ @node_list = node_list
+ @colors = colors
+ end
+ def run
+ rows = @node_list.keys.sort.collect do |node_name|
+ [node_name, @node_list[node_name].services.sort.join(', '), @node_list[node_name].tags.sort.join(', ')]
+ end
+ unless rows.any?
+ puts Paint["no results", :red]
+ puts
+ return
+ end
+ padding = 2
+ max_node_width = [20, (rows.map{|i|i[0]} + ["NODES"] ).inject(0) {|max,i| [i.size,max].max}].max
+ max_service_width = (rows.map{|i|i[1]} + ["SERVICES"]).inject(0) {|max,i| [i.size+padding+padding,max].max}
+ max_tag_width = (rows.map{|i|i[2]} + ["TAGS"] ).inject(0) {|max,i| [i.size,max].max}
+ table :border => false do
+ row :color => @colors[0] do
+ column "NODES", :align => 'right', :width => max_node_width
+ column "SERVICES", :width => max_service_width, :padding => 2
+ column "TAGS", :width => max_tag_width
+ end
+ rows.each do |r|
+ row :color => @colors[1] do
+ column r[0]
+ column r[1]
+ column r[2]
+ end
+ end
+ end
+ vertical_spacing
+ end
+ end
+
+end; end
diff --git a/lib/leap_cli/commands/node.rb b/lib/leap_cli/commands/node.rb
new file mode 100644
index 00000000..a23661b3
--- /dev/null
+++ b/lib/leap_cli/commands/node.rb
@@ -0,0 +1,188 @@
+#
+# fyi: the `node init` command lives in node_init.rb,
+# but all other `node x` commands live here.
+#
+
+autoload :IPAddr, 'ipaddr'
+
+module LeapCli; module Commands
+
+ ##
+ ## COMMANDS
+ ##
+
+ desc 'Node management'
+ command [:node, :n] do |node|
+ node.desc 'Create a new configuration file for a node named NAME.'
+ node.long_desc ["If specified, the optional argument SEED can be used to seed values in the node configuration file.",
+ "The format is property_name:value.",
+ "For example: `leap node add web1 ip_address:1.2.3.4 services:webapp`.",
+ "To set nested properties, property name can contain '.', like so: `leap node add web1 ssh.port:44`",
+ "Separeate multiple values for a single property with a comma, like so: `leap node add mynode services:webapp,dns`"].join("\n\n")
+ node.arg_name 'NAME [SEED]' # , :optional => false, :multiple => false
+ node.command :add do |add|
+ add.switch :local, :desc => 'Make a local testing node (by automatically assigning the next available local IP address). Local nodes are run as virtual machines on your computer.', :negatable => false
+ add.action do |global_options,options,args|
+ # argument sanity checks
+ name = args.first
+ assert_valid_node_name!(name, options[:local])
+ assert_files_missing! [:node_config, name]
+
+ # create and seed new node
+ node = Config::Node.new(manager.env)
+ if options[:local]
+ node['ip_address'] = pick_next_vagrant_ip_address
+ end
+ seed_node_data_from_cmd_line(node, args[1..-1])
+ seed_node_data_from_template(node)
+ validate_ip_address(node)
+ begin
+ node['name'] = name
+ json = node.dump_json(:exclude => ['name'])
+ write_file!([:node_config, name], json + "\n")
+ if file_exists? :ca_cert, :ca_key
+ generate_cert_for_node(manager.reload_node!(node))
+ end
+ rescue LeapCli::ConfigError => exc
+ remove_node_files(name)
+ end
+ end
+ end
+
+ node.desc 'Renames a node file, and all its related files.'
+ node.arg_name 'OLD_NAME NEW_NAME'
+ node.command :mv do |mv|
+ mv.action do |global_options,options,args|
+ node = get_node_from_args(args, include_disabled: true)
+ new_name = args.last
+ assert_valid_node_name!(new_name, node.vagrant?)
+ ensure_dir [:node_files_dir, new_name]
+ Leap::Platform.node_files.each do |path|
+ rename_file! [path, node.name], [path, new_name]
+ end
+ remove_directory! [:node_files_dir, node.name]
+ rename_node_facts(node.name, new_name)
+ end
+ end
+
+ node.desc 'Removes all the files related to the node named NAME.'
+ node.arg_name 'NAME' #:optional => false #, :multiple => false
+ node.command :rm do |rm|
+ rm.action do |global_options,options,args|
+ node = get_node_from_args(args, include_disabled: true)
+ remove_node_files(node.name)
+ if node.vagrant?
+ vagrant_command("destroy --force", [node.name])
+ end
+ remove_node_facts(node.name)
+ end
+ end
+ end
+
+ ##
+ ## PUBLIC HELPERS
+ ##
+
+ def get_node_from_args(args, options={})
+ node_name = args.first
+ node = manager.node(node_name)
+ if node.nil? && options[:include_disabled]
+ node = manager.disabled_node(node_name)
+ end
+ assert!(node, "Node '#{node_name}' not found.")
+ node
+ end
+
+ def seed_node_data_from_cmd_line(node, args)
+ args.each do |seed|
+ key, value = seed.split(':', 2)
+ value = format_seed_value(value)
+ assert! key =~ /^[0-9a-z\._]+$/, "illegal characters used in property '#{key}'"
+ if key =~ /\./
+ key_parts = key.split('.')
+ final_key = key_parts.pop
+ current_object = node
+ key_parts.each do |key_part|
+ current_object[key_part] ||= Config::Object.new
+ current_object = current_object[key_part]
+ end
+ current_object[final_key] = value
+ else
+ node[key] = value
+ end
+ end
+ end
+
+ #
+ # load "new node template" information into the `node`, modifying `node`.
+ # values in the template will not override existing node values.
+ #
+ def seed_node_data_from_template(node)
+ node.inherit_from!(manager.template('common'))
+ [node['services']].flatten.each do |service|
+ if service
+ template = manager.template(service)
+ if template
+ node.inherit_from!(template)
+ end
+ end
+ end
+ end
+
+ def remove_node_files(node_name)
+ (Leap::Platform.node_files + [:node_files_dir]).each do |path|
+ remove_file! [path, node_name]
+ end
+ end
+
+ #
+ # conversions:
+ #
+ # "x,y,z" => ["x","y","z"]
+ #
+ # "22" => 22
+ #
+ # "5.1" => 5.1
+ #
+ def format_seed_value(v)
+ if v =~ /,/
+ v = v.split(',')
+ v.map! do |i|
+ i = i.to_i if i.to_i.to_s == i
+ i = i.to_f if i.to_f.to_s == i
+ i
+ end
+ else
+ v = v.to_i if v.to_i.to_s == v
+ v = v.to_f if v.to_f.to_s == v
+ end
+ return v
+ end
+
+ def validate_ip_address(node)
+ if node['ip_address'] == "REQUIRED"
+ bail! do
+ log :error, "ip_address is not set. Specify with `leap node add NAME ip_address:ADDRESS`."
+ end
+ end
+ IPAddr.new(node['ip_address'])
+ rescue ArgumentError
+ bail! do
+ if node['ip_address']
+ log :invalid, "ip_address #{node['ip_address'].inspect}"
+ else
+ log :missing, "ip_address"
+ end
+ end
+ end
+
+ def assert_valid_node_name!(name, local=false)
+ assert! name, 'No <node-name> specified.'
+ if local
+ assert! name =~ /^[0-9a-z]+$/, "illegal characters used in node name '#{name}' (note: Vagrant does not allow hyphens or underscores)"
+ else
+ assert! name =~ /^[0-9a-z-]+$/, "illegal characters used in node name '#{name}' (note: Linux does not allow underscores)"
+ end
+ end
+
+end; end
diff --git a/lib/leap_cli/commands/node_init.rb b/lib/leap_cli/commands/node_init.rb
new file mode 100644
index 00000000..33f6288d
--- /dev/null
+++ b/lib/leap_cli/commands/node_init.rb
@@ -0,0 +1,169 @@
+#
+# Node initialization.
+# Most of the fun stuff is in tasks.rb.
+#
+
+module LeapCli; module Commands
+
+ desc 'Node management'
+ command :node do |node|
+ node.desc 'Bootstraps a node or nodes, setting up SSH keys and installing prerequisite packages'
+ node.long_desc "This command prepares a server to be used with the LEAP Platform by saving the server's SSH host key, " +
+ "copying the authorized_keys file, installing packages that are required for deploying, and registering important facts. " +
+ "Node init must be run before deploying to a server, and the server must be running and available via the network. " +
+ "This command only needs to be run once, but there is no harm in running it multiple times."
+ node.arg_name 'FILTER'
+ node.command :init do |init|
+ init.switch 'echo', :desc => 'If set, passwords are visible as you type them (default is hidden)', :negatable => false
+ init.flag :port, :desc => 'Override the default SSH port.', :arg_name => 'PORT'
+ init.flag :ip, :desc => 'Override the default SSH IP address.', :arg_name => 'IPADDRESS'
+
+ init.action do |global,options,args|
+ assert! args.any?, 'You must specify a FILTER'
+ finished = []
+ manager.filter!(args).each_node do |node|
+ is_node_alive(node, options)
+ save_public_host_key(node, global, options) unless node.vagrant?
+ update_compiled_ssh_configs
+ ssh_connect_options = connect_options(options).merge({:bootstrap => true, :echo => options[:echo]})
+ ssh_connect(node, ssh_connect_options) do |ssh|
+ if node.vagrant?
+ ssh.install_insecure_vagrant_key
+ end
+ ssh.install_authorized_keys
+ ssh.install_prerequisites
+ unless node.vagrant?
+ ssh.leap.log(:checking, "SSH host keys") do
+ ssh.leap.capture(get_ssh_keys_cmd) do |response|
+ update_local_ssh_host_keys(node, response[:data]) if response[:exitcode] == 0
+ end
+ end
+ end
+ ssh.leap.log(:updating, "facts") do
+ ssh.leap.capture(facter_cmd) do |response|
+ if response[:exitcode] == 0
+ update_node_facts(node.name, response[:data])
+ else
+ log :failed, "to run facter on #{node.name}"
+ end
+ end
+ end
+ end
+ finished << node.name
+ end
+ log :completed, "initialization of nodes #{finished.join(', ')}"
+ end
+ end
+ end
+
+ private
+
+ ##
+ ## PRIVATE HELPERS
+ ##
+
+ def is_node_alive(node, options)
+ address = options[:ip] || node.ip_address
+ port = options[:port] || node.ssh.port
+ log :connecting, "to node #{node.name}"
+ assert_run! "nc -zw3 #{address} #{port}",
+ "Failed to reach #{node.name} (address #{address}, port #{port}). You can override the configured IP address and port with --ip or --port."
+ end
+
+ #
+ # saves the public ssh host key for node into the provider directory.
+ #
+ # see `man sshd` for the format of known_hosts
+ #
+ def save_public_host_key(node, global, options)
+ log :fetching, "public SSH host key for #{node.name}"
+ address = options[:ip] || node.ip_address
+ port = options[:port] || node.ssh.port
+ host_keys = get_public_keys_for_ip(address, port)
+ pub_key_path = Path.named_path([:node_ssh_pub_key, node.name])
+
+ if Path.exists?(pub_key_path)
+ if host_keys.include? SshKey.load(pub_key_path)
+ log :trusted, "- Public SSH host key for #{node.name} matches previously saved key", :indent => 1
+ else
+ bail! do
+ log :error, "The public SSH host keys we just fetched for #{node.name} doesn't match what we have saved previously.", :indent => 1
+ log "Delete the file #{pub_key_path} if you really want to remove the trusted SSH host key.", :indent => 2
+ end
+ end
+ else
+ known_key = host_keys.detect{|k|k.in_known_hosts?(node.name, node.ip_address, node.domain.name)}
+ if known_key
+ log :trusted, "- Public SSH host key for #{node.name} is trusted (key found in your ~/.ssh/known_hosts)"
+ else
+ public_key = SshKey.pick_best_key(host_keys)
+ if public_key.nil?
+ bail!("We got back #{host_keys.size} host keys from #{node.name}, but we can't support any of them.")
+ else
+ say(" This is the SSH host key you got back from node \"#{node.name}\"")
+ say(" Type -- #{public_key.bits} bit #{public_key.type.upcase}")
+ say(" Fingerprint -- " + public_key.fingerprint)
+ say(" Public Key -- " + public_key.key)
+ if !global[:yes] && !agree(" Is this correct? ")
+ bail!
+ else
+ known_key = public_key
+ end
+ end
+ end
+ puts
+ write_file! [:node_ssh_pub_key, node.name], known_key.to_s
+ end
+ end
+
+ #
+ # Get the public host keys for a host using ssh-keyscan.
+ # Return an array of SshKey objects, one for each key.
+ #
+ def get_public_keys_for_ip(address, port=22)
+ assert_bin!('ssh-keyscan')
+ output = assert_run! "ssh-keyscan -p #{port} #{address}", "Could not get the public host key from #{address}:#{port}. Maybe sshd is not running?"
+ if output.empty?
+ bail! :failed, "ssh-keyscan returned empty output."
+ end
+
+ if output =~ /No route to host/
+ bail! :failed, 'ssh-keyscan: no route to %s' % address
+ else
+ keys = SshKey.parse_keys(output)
+ if keys.empty?
+ bail! "ssh-keyscan got zero host keys back (that we understand)! Output was: #{output}"
+ else
+ return keys
+ end
+ end
+ end
+
+ # run on the server to generate a string suitable for passing to SshKey.parse_keys()
+ def get_ssh_keys_cmd
+ "/bin/grep ^HostKey /etc/ssh/sshd_config | /usr/bin/awk '{print $2 \".pub\"}' | /usr/bin/xargs /bin/cat"
+ end
+
+ #
+ # Sometimes the ssh host keys on the server will be better than what we have
+ # stored locally. In these cases, ask the user if they want to upgrade.
+ #
+ def update_local_ssh_host_keys(node, remote_keys_string)
+ remote_keys = SshKey.parse_keys(remote_keys_string)
+ return unless remote_keys.any?
+ current_key = SshKey.load(Path.named_path([:node_ssh_pub_key, node.name]))
+ best_key = SshKey.pick_best_key(remote_keys)
+ return unless best_key && current_key
+ if current_key != best_key
+ say(" One of the SSH host keys for node '#{node.name}' is better than what you currently have trusted.")
+ say(" Current key: #{current_key.summary}")
+ say(" Better key: #{best_key.summary}")
+ if agree(" Do you want to use the better key? ")
+ write_file! [:node_ssh_pub_key, node.name], best_key.to_s
+ end
+ else
+ log(3, "current host key does not need updating")
+ end
+ end
+
+end; end
diff --git a/lib/leap_cli/commands/ssh.rb b/lib/leap_cli/commands/ssh.rb
new file mode 100644
index 00000000..3887618e
--- /dev/null
+++ b/lib/leap_cli/commands/ssh.rb
@@ -0,0 +1,225 @@
+module LeapCli; module Commands
+
+ desc 'Log in to the specified node with an interactive shell.'
+ arg_name 'NAME' #, :optional => false, :multiple => false
+ command :ssh do |c|
+ c.flag 'ssh', :desc => "Pass through raw options to ssh (e.g. `--ssh '-F ~/sshconfig'`)."
+ c.flag 'port', :arg_name => 'SSH_PORT', :desc => 'Override default SSH port used when trying to connect to the server. Same as `--ssh "-p SSH_PORT"`.'
+ c.action do |global_options,options,args|
+ exec_ssh(:ssh, options, args)
+ end
+ end
+
+ desc 'Log in to the specified node with an interactive shell using mosh (requires node to have mosh.enabled set to true).'
+ arg_name 'NAME'
+ command :mosh do |c|
+ c.flag 'ssh', :desc => "Pass through raw options to ssh (e.g. `--ssh '-F ~/sshconfig'`)."
+ c.flag 'port', :arg_name => 'SSH_PORT', :desc => 'Override default SSH port used when trying to connect to the server. Same as `--ssh "-p SSH_PORT"`.'
+ c.action do |global_options,options,args|
+ exec_ssh(:mosh, options, args)
+ end
+ end
+
+ desc 'Creates an SSH port forward (tunnel) to the node NAME. REMOTE_PORT is the port on the remote node that the tunnel will connect to. LOCAL_PORT is the optional port on your local machine. For example: `leap tunnel couch1:5984`.'
+ arg_name '[LOCAL_PORT:]NAME:REMOTE_PORT'
+ command :tunnel do |c|
+ c.flag 'ssh', :desc => "Pass through raw options to ssh (e.g. --ssh '-F ~/sshconfig')."
+ c.flag 'port', :arg_name => 'SSH_PORT', :desc => 'Override default SSH port used when trying to connect to the server. Same as `--ssh "-p SSH_PORT"`.'
+ c.action do |global_options,options,args|
+ local_port, node, remote_port = parse_tunnel_arg(args.first)
+ unless node.ssh.config.AllowTcpForwarding == "yes"
+ log :warning, "It looks like TCP forwarding is not enabled. "+
+ "The tunnel command requires that the node property ssh.config.AllowTcpForwarding "+
+ "be set to 'yes'. Add this property to #{node.name}.json, deploy, and then try tunnel again."
+ end
+ options[:ssh] = [options[:ssh], "-N -L 127.0.0.1:#{local_port}:0.0.0.0:#{remote_port}"].join(' ')
+ log("Forward port localhost:#{local_port} to #{node.name}:#{remote_port}")
+ if is_port_available?(local_port)
+ exec_ssh(:ssh, options, [node.name])
+ end
+ end
+ end
+
+ desc 'Secure copy from FILE1 to FILE2. Files are specified as NODE_NAME:FILE_PATH. For local paths, omit "NODE_NAME:".'
+ arg_name 'FILE1 FILE2'
+ command :scp do |c|
+ c.switch :r, :desc => 'Copy recursively'
+ c.action do |global_options, options, args|
+ if args.size != 2
+ bail!('You must specificy both FILE1 and FILE2')
+ end
+ from, to = args
+ if (from !~ /:/ && to !~ /:/) || (from =~ /:/ && to =~ /:/)
+ bail!('One FILE must be remote and the other local.')
+ end
+ src_node_name = src_file_path = src_node = nil
+ dst_node_name = dst_file_path = dst_node = nil
+ if from =~ /:/
+ src_node_name, src_file_path = from.split(':')
+ src_node = get_node_from_args([src_node_name], :include_disabled => true)
+ dst_file_path = to
+ else
+ dst_node_name, dst_file_path = to.split(':')
+ dst_node = get_node_from_args([dst_node_name], :include_disabled => true)
+ src_file_path = from
+ end
+ exec_scp(options, src_node, src_file_path, dst_node, dst_file_path)
+ end
+ end
+
+ protected
+
+ #
+ # allow for ssh overrides of all commands that use ssh_connect
+ #
+ def connect_options(options)
+ connect_options = {:ssh_options=>{}}
+ if options[:port]
+ connect_options[:ssh_options][:port] = options[:port]
+ end
+ if options[:ip]
+ connect_options[:ssh_options][:host_name] = options[:ip]
+ end
+ return connect_options
+ end
+
+ def ssh_config_help_message
+ puts ""
+ puts "Are 'too many authentication failures' getting you down?"
+ puts "Then we have the solution for you! Add something like this to your ~/.ssh/config file:"
+ puts " Host *.#{manager.provider.domain}"
+ puts " IdentityFile ~/.ssh/id_rsa"
+ puts " IdentitiesOnly=yes"
+ puts "(replace `id_rsa` with the actual private key filename that you use for this provider)"
+ end
+
+ require 'socket'
+ def is_port_available?(port)
+ TCPServer.open('127.0.0.1', port) {}
+ true
+ rescue Errno::EACCES
+ bail!("You don't have permission to bind to port #{port}.")
+ rescue Errno::EADDRINUSE
+ bail!("Local port #{port} is already in use. Specify LOCAL_PORT to pick another.")
+ rescue Exception => exc
+ bail!(exc.to_s)
+ end
+
+ private
+
+ def exec_ssh(cmd, cli_options, args)
+ node = get_node_from_args(args, :include_disabled => true)
+ port = node.ssh.port
+ options = ssh_config(node)
+ username = 'root'
+ if LeapCli.log_level >= 3
+ options << "-vv"
+ elsif LeapCli.log_level >= 2
+ options << "-v"
+ end
+ if cli_options[:port]
+ port = cli_options[:port]
+ end
+ if cli_options[:ssh]
+ options << cli_options[:ssh]
+ end
+ ssh = "ssh -l #{username} -p #{port} #{options.join(' ')}"
+ if cmd == :ssh
+ command = "#{ssh} #{node.domain.full}"
+ elsif cmd == :mosh
+ command = "MOSH_TITLE_NOPREFIX=1 mosh --ssh \"#{ssh}\" #{node.domain.full}"
+ end
+ log 2, command
+
+ # exec the shell command in a subprocess
+ pid = fork { exec "#{command}" }
+
+ Signal.trap("SIGINT") do
+ Process.kill("KILL", pid)
+ Process.wait(pid)
+ exit(0)
+ end
+
+ # wait for shell to exit so we can grab the exit status
+ _, status = Process.waitpid2(pid)
+
+ if status.exitstatus == 255
+ ssh_config_help_message
+ elsif status.exitstatus != 0
+ exit(status.exitstatus)
+ end
+ end
+
+ def exec_scp(cli_options, src_node, src_file_path, dst_node, dst_file_path)
+ node = src_node || dst_node
+ options = ssh_config(node)
+ port = node.ssh.port
+ username = 'root'
+ options << "-r" if cli_options[:r]
+ scp = "scp -P #{port} #{options.join(' ')}"
+ if src_node
+ command = "#{scp} #{username}@#{src_node.domain.full}:#{src_file_path} #{dst_file_path}"
+ elsif dst_node
+ command = "#{scp} #{src_file_path} #{username}@#{dst_node.domain.full}:#{dst_file_path}"
+ end
+ log 2, command
+
+ # exec the shell command in a subprocess
+ pid = fork { exec "#{command}" }
+
+ Signal.trap("SIGINT") do
+ Process.kill("KILL", pid)
+ Process.wait(pid)
+ exit(0)
+ end
+
+ # wait for shell to exit so we can grab the exit status
+ _, status = Process.waitpid2(pid)
+ exit(status.exitstatus)
+ end
+
+ #
+ # SSH command line -o options. See `man ssh_config`
+ #
+ # NOTES:
+ #
+ # The option 'HostKeyAlias=#{node.name}' is oddly incompatible with ports in
+ # known_hosts file, so we must not use this or non-standard ports break.
+ #
+ def ssh_config(node)
+ options = [
+ "-o 'HostName=#{node.ip_address}'",
+ "-o 'GlobalKnownHostsFile=#{path(:known_hosts)}'",
+ "-o 'UserKnownHostsFile=/dev/null'"
+ ]
+ if node.vagrant?
+ options << "-i #{vagrant_ssh_key_file}" # use the universal vagrant insecure key
+ options << "-o IdentitiesOnly=yes" # force the use of the insecure vagrant key
+ options << "-o 'StrictHostKeyChecking=no'" # blindly accept host key and don't save it
+ # (since userknownhostsfile is /dev/null)
+ else
+ options << "-o 'StrictHostKeyChecking=yes'"
+ end
+ if !node.supported_ssh_host_key_algorithms.empty?
+ options << "-o 'HostKeyAlgorithms=#{node.supported_ssh_host_key_algorithms}'"
+ end
+ return options
+ end
+
+ def parse_tunnel_arg(arg)
+ if arg.count(':') == 1
+ node_name, remote = arg.split(':')
+ local = nil
+ elsif arg.count(':') == 2
+ local, node_name, remote = arg.split(':')
+ else
+ bail!('Argument NAME:REMOTE_PORT required.')
+ end
+ node = get_node_from_args([node_name], :include_disabled => true)
+ remote = remote.to_i
+ local = local || remote
+ local = local.to_i
+ return [local, node, remote]
+ end
+
+end; end \ No newline at end of file
diff --git a/lib/leap_cli/commands/test.rb b/lib/leap_cli/commands/test.rb
new file mode 100644
index 00000000..73207b31
--- /dev/null
+++ b/lib/leap_cli/commands/test.rb
@@ -0,0 +1,74 @@
+module LeapCli; module Commands
+
+ desc 'Run tests.'
+ command [:test, :t] do |test|
+ test.desc 'Run the test suit on FILTER nodes.'
+ test.arg_name 'FILTER', :optional => true
+ test.command :run do |run|
+ run.switch 'continue', :desc => 'Continue over errors and failures (default is --no-continue).', :negatable => true
+ run.action do |global_options,options,args|
+ test_order = File.join(Path.platform, 'tests/order.rb')
+ if File.exists?(test_order)
+ require test_order
+ end
+ manager.filter!(args).names_in_test_dependency_order.each do |node_name|
+ node = manager.nodes[node_name]
+ begin
+ ssh_connect(node) do |ssh|
+ ssh.run(test_cmd(options))
+ end
+ rescue Capistrano::CommandError => exc
+ if options[:continue]
+ exit_status(1)
+ else
+ bail!
+ end
+ end
+ end
+ end
+ end
+
+ test.desc 'Creates files needed to run tests.'
+ test.command :init do |init|
+ init.action do |global_options,options,args|
+ generate_test_client_openvpn_configs
+ end
+ end
+
+ test.default_command :run
+ end
+
+ private
+
+ def test_cmd(options)
+ if options[:continue]
+ "#{Leap::Platform.leap_dir}/bin/run_tests --continue"
+ else
+ "#{Leap::Platform.leap_dir}/bin/run_tests"
+ end
+ end
+
+ #
+ # generates a whole bunch of openvpn configs that can be used to connect to different openvpn gateways
+ #
+ def generate_test_client_openvpn_configs
+ assert_config! 'provider.ca.client_certificates.unlimited_prefix'
+ assert_config! 'provider.ca.client_certificates.limited_prefix'
+ template = read_file! Path.find_file(:test_client_openvpn_template)
+ manager.environment_names.each do |env|
+ vpn_nodes = manager.nodes[:environment => env][:services => 'openvpn']['openvpn.allow_limited' => true]
+ if vpn_nodes.any?
+ generate_test_client_cert(provider.ca.client_certificates.limited_prefix) do |key, cert|
+ write_file! [:test_openvpn_config, [env, 'limited'].compact.join('_')], Util.erb_eval(template, binding)
+ end
+ end
+ vpn_nodes = manager.nodes[:environment => env][:services => 'openvpn']['openvpn.allow_unlimited' => true]
+ if vpn_nodes.any?
+ generate_test_client_cert(provider.ca.client_certificates.unlimited_prefix) do |key, cert|
+ write_file! [:test_openvpn_config, [env, 'unlimited'].compact.join('_')], Util.erb_eval(template, binding)
+ end
+ end
+ end
+ end
+
+end; end
diff --git a/lib/leap_cli/commands/user.rb b/lib/leap_cli/commands/user.rb
new file mode 100644
index 00000000..b842e854
--- /dev/null
+++ b/lib/leap_cli/commands/user.rb
@@ -0,0 +1,136 @@
+
+#
+# perhaps we want to verify that the key files are actually the key files we expect.
+# we could use 'file' for this:
+#
+# > file ~/.gnupg/00440025.asc
+# ~/.gnupg/00440025.asc: PGP public key block
+#
+# > file ~/.ssh/id_rsa.pub
+# ~/.ssh/id_rsa.pub: OpenSSH RSA public key
+#
+
+module LeapCli
+ module Commands
+
+ desc 'Adds a new trusted sysadmin by adding public keys to the "users" directory.'
+ arg_name 'USERNAME' #, :optional => false, :multiple => false
+ command :'add-user' do |c|
+
+ c.switch 'self', :desc => 'Add yourself as a trusted sysadmin by choosing among the public keys available for the current user.', :negatable => false
+ c.flag 'ssh-pub-key', :desc => 'SSH public key file for this new user'
+ c.flag 'pgp-pub-key', :desc => 'OpenPGP public key file for this new user'
+
+ c.action do |global_options,options,args|
+ username = args.first
+ if !username.any?
+ if options[:self]
+ username ||= `whoami`.strip
+ else
+ help! "Either USERNAME argument or --self flag is required."
+ end
+ end
+ if Leap::Platform.reserved_usernames.include? username
+ bail! %(The username "#{username}" is reserved. Sorry, pick another.)
+ end
+
+ ssh_pub_key = nil
+ pgp_pub_key = nil
+
+ if options['ssh-pub-key']
+ ssh_pub_key = read_file!(options['ssh-pub-key'])
+ end
+ if options['pgp-pub-key']
+ pgp_pub_key = read_file!(options['pgp-pub-key'])
+ end
+
+ if options[:self]
+ ssh_pub_key ||= pick_ssh_key.to_s
+ pgp_pub_key ||= pick_pgp_key
+ end
+
+ assert!(ssh_pub_key, 'Sorry, could not find SSH public key.')
+
+ if ssh_pub_key
+ write_file!([:user_ssh, username], ssh_pub_key)
+ end
+ if pgp_pub_key
+ write_file!([:user_pgp, username], pgp_pub_key)
+ end
+
+ update_authorized_keys
+ end
+ end
+
+ #
+ # let the the user choose among the ssh public keys that we encounter, or just pick the key if there is only one.
+ #
+ def pick_ssh_key
+ ssh_keys = []
+ Dir.glob("#{ENV['HOME']}/.ssh/*.pub").each do |keyfile|
+ ssh_keys << SshKey.load(keyfile)
+ end
+
+ if `which ssh-add`.strip.any?
+ `ssh-add -L 2> /dev/null`.split("\n").compact.each do |line|
+ key = SshKey.load(line)
+ if key
+ key.comment = 'ssh-agent'
+ ssh_keys << key unless ssh_keys.include?(key)
+ end
+ end
+ end
+ ssh_keys.compact!
+
+ assert! ssh_keys.any?, 'Sorry, could not find any SSH public key for you. Have you run ssh-keygen?'
+
+ if ssh_keys.length > 1
+ key_index = numbered_choice_menu('Choose your SSH public key', ssh_keys.collect(&:summary)) do |line, i|
+ say("#{i+1}. #{line}")
+ end
+ else
+ key_index = 0
+ end
+
+ return ssh_keys[key_index]
+ end
+
+ #
+ # let the the user choose among the gpg public keys that we encounter, or just pick the key if there is only one.
+ #
+ def pick_pgp_key
+ begin
+ require 'gpgme'
+ rescue LoadError
+ log "Skipping OpenPGP setup because gpgme is not installed."
+ return
+ end
+
+ secret_keys = GPGME::Key.find(:secret)
+ if secret_keys.empty?
+ log "Skipping OpenPGP setup because I could not find any OpenPGP keys for you"
+ return nil
+ end
+
+ secret_keys.select!{|key| !key.expired}
+
+ if secret_keys.length > 1
+ key_index = numbered_choice_menu('Choose your OpenPGP public key', secret_keys) do |key, i|
+ key_info = key.to_s.split("\n")[0..1].map{|line| line.sub(/^\s*(sec|uid)\s*/,'')}.join(' -- ')
+ say("#{i+1}. #{key_info}")
+ end
+ else
+ key_index = 0
+ end
+
+ key_id = secret_keys[key_index].sha
+
+ # can't use this, it includes signatures:
+ #puts GPGME::Key.export(key_id, :armor => true, :export_options => :export_minimal)
+
+ # export with signatures removed:
+ return `gpg --armor --export-options export-minimal --export #{key_id}`.strip
+ end
+
+ end
+end
diff --git a/lib/leap_cli/commands/util.rb b/lib/leap_cli/commands/util.rb
new file mode 100644
index 00000000..c1da570e
--- /dev/null
+++ b/lib/leap_cli/commands/util.rb
@@ -0,0 +1,50 @@
+module LeapCli; module Commands
+
+ extend self
+ extend LeapCli::Util
+ extend LeapCli::Util::RemoteCommand
+
+ def path(name)
+ Path.named_path(name)
+ end
+
+ #
+ # keeps prompting the user for a numbered choice, until they pick a good one or bail out.
+ #
+ # block is yielded and is responsible for rendering the choices.
+ #
+ def numbered_choice_menu(msg, items, &block)
+ while true
+ say("\n" + msg + ':')
+ items.each_with_index &block
+ say("q. quit")
+ index = ask("number 1-#{items.length}> ")
+ if index.empty?
+ next
+ elsif index =~ /q/
+ bail!
+ else
+ i = index.to_i - 1
+ if i < 0 || i >= items.length
+ bail!
+ else
+ return i
+ end
+ end
+ end
+ end
+
+
+ def parse_node_list(nodes)
+ if nodes.is_a? Config::Object
+ Config::ObjectList.new(nodes)
+ elsif nodes.is_a? Config::ObjectList
+ nodes
+ elsif nodes.is_a? String
+ manager.filter!(nodes)
+ else
+ bail! "argument error"
+ end
+ end
+
+end; end
diff --git a/lib/leap_cli/commands/vagrant.rb b/lib/leap_cli/commands/vagrant.rb
new file mode 100644
index 00000000..9fdd48e3
--- /dev/null
+++ b/lib/leap_cli/commands/vagrant.rb
@@ -0,0 +1,180 @@
+autoload :IPAddr, 'ipaddr'
+require 'fileutils'
+
+module LeapCli; module Commands
+
+ desc "Manage local virtual machines."
+ long_desc "This command provides a convient way to manage Vagrant-based virtual machines. If FILTER argument is missing, the command runs on all local virtual machines. The Vagrantfile is automatically generated in 'test/Vagrantfile'. If you want to run vagrant commands manually, cd to 'test'."
+ command [:local, :l] do |local|
+ local.desc 'Starts up the virtual machine(s)'
+ local.arg_name 'FILTER', :optional => true #, :multiple => false
+ local.command :start do |start|
+ start.flag(:basebox,
+ :desc => "The basebox to use. This value is passed to vagrant as the "+
+ "`config.vm.box` option. The value here should be the name of an installed box or a "+
+ "shorthand name of a box in HashiCorp's Atlas.",
+ :arg_name => 'BASEBOX',
+ :default_value => 'LEAP/jessie'
+ )
+ start.action do |global_options,options,args|
+ vagrant_command(["up", "sandbox on"], args, options)
+ end
+ end
+
+ local.desc 'Shuts down the virtual machine(s)'
+ local.arg_name 'FILTER', :optional => true #, :multiple => false
+ local.command :stop do |stop|
+ stop.action do |global_options,options,args|
+ if global_options[:yes]
+ vagrant_command("halt --force", args)
+ else
+ vagrant_command("halt", args)
+ end
+ end
+ end
+
+ local.desc 'Destroys the virtual machine(s), reclaiming the disk space'
+ local.arg_name 'FILTER', :optional => true #, :multiple => false
+ local.command :destroy do |destroy|
+ destroy.action do |global_options,options,args|
+ if global_options[:yes]
+ vagrant_command("destroy --force", args)
+ else
+ vagrant_command("destroy", args)
+ end
+ end
+ end
+
+ local.desc 'Print the status of local virtual machine(s)'
+ local.arg_name 'FILTER', :optional => true #, :multiple => false
+ local.command :status do |status|
+ status.action do |global_options,options,args|
+ vagrant_command("status", args)
+ end
+ end
+
+ local.desc 'Saves the current state of the virtual machine as a new snapshot'
+ local.arg_name 'FILTER', :optional => true #, :multiple => false
+ local.command :save do |status|
+ status.action do |global_options,options,args|
+ vagrant_command("sandbox commit", args)
+ end
+ end
+
+ local.desc 'Resets virtual machine(s) to the last saved snapshot'
+ local.arg_name 'FILTER', :optional => true #, :multiple => false
+ local.command :reset do |reset|
+ reset.action do |global_options,options,args|
+ vagrant_command("sandbox rollback", args)
+ end
+ end
+ end
+
+ public
+
+ #
+ # returns the path to a vagrant ssh private key file.
+ #
+ # if the vagrant.key file is owned by root or ourselves, then
+ # we need to make sure that it owned by us and not world readable.
+ #
+ def vagrant_ssh_key_file
+ file_path = Path.vagrant_ssh_priv_key_file
+ Util.assert_files_exist! file_path
+ uid = File.new(file_path).stat.uid
+ if uid == 0 || uid == Process.euid
+ FileUtils.install file_path, '/tmp/vagrant.key', :mode => 0600
+ file_path = '/tmp/vagrant.key'
+ end
+ return file_path
+ end
+
+ protected
+
+ def vagrant_command(cmds, args, options={})
+ vagrant_setup(options)
+ cmds = cmds.to_a
+ if args.empty?
+ nodes = [""]
+ else
+ nodes = manager.filter(args)[:environment => "local"].field(:name)
+ end
+ if nodes.any?
+ vagrant_dir = File.dirname(Path.named_path(:vagrantfile))
+ exec = ["cd #{vagrant_dir}"]
+ cmds.each do |cmd|
+ nodes.each do |node|
+ exec << "vagrant #{cmd} #{node}"
+ end
+ end
+ execute exec.join('; ')
+ else
+ bail! "No nodes found. This command only works on nodes with ip_address in the network #{LeapCli.leapfile.vagrant_network}"
+ end
+ end
+
+ private
+
+ def vagrant_setup(options)
+ assert_bin! 'vagrant', 'Vagrant is required for running local virtual machines. Run "sudo apt-get install vagrant".'
+ assert! (vagrant_version >= Gem::Version.new('1.1')), 'Vagrant version >= 1.1 is required for running local virtual machines. Please upgrade.'
+
+ unless assert_run!('vagrant plugin list | grep sahara | cat').chars.any?
+ log :installing, "vagrant plugin 'sahara'"
+ assert_run! 'vagrant plugin install sahara'
+ end
+ create_vagrant_file(options)
+ end
+
+ def vagrant_version
+ @vagrant_version ||= Gem::Version.new(assert_run!('vagrant --version').split(' ')[1])
+ end
+
+ def execute(cmd)
+ log 2, :run, cmd
+ exec cmd
+ end
+
+ def create_vagrant_file(options)
+ lines = []
+
+ basebox = options[:basebox] || 'LEAP/jessie'
+ # override basebox with custom setting from Leapfile or ~/.leaprc
+ basebox = leapfile.vagrant_basebox || basebox
+
+ lines << %[Vagrant.configure("2") do |config|]
+ manager.each_node do |node|
+ if node.vagrant?
+ lines << %[ config.vm.define :#{node.name} do |config|]
+ lines << %[ config.vm.box = "#{basebox}"]
+ lines << %[ config.vm.network :private_network, ip: "#{node.ip_address}"]
+ lines << %[ config.vm.provider "virtualbox" do |v|]
+ lines << %[ v.customize ["modifyvm", :id, "--natdnshostresolver1", "on"]]
+ lines << %[ v.name = "#{node.name}"]
+ lines << %[ v.memory = 1536]
+ lines << %[ end]
+ lines << %[ config.vm.provider "libvirt" do |v|]
+ lines << %[ v.memory = 1536]
+ lines << %[ end]
+ lines << %[ #{leapfile.custom_vagrant_vm_line}] if leapfile.custom_vagrant_vm_line
+ lines << %[ end]
+ end
+ end
+
+ lines << %[end]
+ lines << ""
+ write_file! :vagrantfile, lines.join("\n")
+ end
+
+ def pick_next_vagrant_ip_address
+ taken_ips = manager.nodes[:environment => "local"].field(:ip_address)
+ if taken_ips.any?
+ highest_ip = taken_ips.map{|ip| IPAddr.new(ip)}.max
+ new_ip = highest_ip.succ
+ else
+ new_ip = IPAddr.new(LeapCli.leapfile.vagrant_network).succ.succ
+ end
+ return new_ip.to_s
+ end
+
+end; end
diff --git a/lib/leap_cli/macros.rb b/lib/leap_cli/macros.rb
new file mode 100644
index 00000000..fdb9a94e
--- /dev/null
+++ b/lib/leap_cli/macros.rb
@@ -0,0 +1,16 @@
+#
+# MACROS
+#
+# The methods in these files are available in the context of a .json configuration file.
+# (The module LeapCli::Macro is included in Config::Object)
+#
+
+require_relative 'macros/core'
+require_relative 'macros/files'
+require_relative 'macros/haproxy'
+require_relative 'macros/hosts'
+require_relative 'macros/keys'
+require_relative 'macros/nodes'
+require_relative 'macros/secrets'
+require_relative 'macros/stunnel'
+require_relative 'macros/provider'
diff --git a/lib/leap_cli/macros/core.rb b/lib/leap_cli/macros/core.rb
new file mode 100644
index 00000000..873da358
--- /dev/null
+++ b/lib/leap_cli/macros/core.rb
@@ -0,0 +1,92 @@
+# encoding: utf-8
+
+module LeapCli
+ module Macro
+
+ #
+ # Creates a hash from the ssh key info in users directory, for use in
+ # updating authorized_keys file. Additionally, the 'monitor' public key is
+ # included, which is used by the monitor nodes to run particular commands
+ # remotely.
+ #
+ def authorized_keys
+ hash = {}
+ keys = Dir.glob(Path.named_path([:user_ssh, '*']))
+ keys.sort.each do |keyfile|
+ ssh_type, ssh_key = File.read(keyfile, :encoding => 'UTF-8').strip.split(" ")
+ name = File.basename(File.dirname(keyfile))
+ until hash[name].nil?
+ i ||= 1; name = "#{name}#{i+=1}"
+ end
+ hash[name] = {
+ "type" => ssh_type,
+ "key" => ssh_key
+ }
+ end
+ ssh_type, ssh_key = File.read(Path.named_path(:monitor_pub_key), :encoding => 'UTF-8').strip.split(" ")
+ hash[Leap::Platform.monitor_username] = {
+ "type" => ssh_type,
+ "key" => ssh_key
+ }
+ hash
+ end
+
+ def assert(assertion)
+ if instance_eval(assertion)
+ true
+ else
+ raise AssertionFailed.new(assertion), assertion, caller
+ end
+ end
+
+ def error(msg)
+ raise ConfigError.new(@node, msg), msg, caller
+ end
+
+ #
+ # applies a JSON partial to this node
+ #
+ def apply_partial(partial_path)
+ if env.partials[partial_path]
+ self.deep_merge!(env.partials[partial_path])
+ else
+ raise ArgumentError.new(
+ "No such partial `%s`. Available partials include:\n%s" %
+ [partial_path, env.partials.keys.join(", ")]
+ )
+ end
+ end
+
+ #
+ # If at first you don't succeed, then it is time to give up.
+ #
+ # try{} returns nil if anything in the block throws an exception.
+ #
+ # You can wrap something that might fail in `try`, like so.
+ #
+ # "= try{ nodes[:services => 'tor'].first.ip_address } "
+ #
+ def try(&block)
+ yield
+ rescue NoMethodError
+ rescue ArgumentError
+ nil
+ end
+
+ protected
+
+ #
+ # returns a node list, if argument is not already one
+ #
+ def listify(node_list)
+ if node_list.is_a? Config::ObjectList
+ node_list
+ elsif node_list.is_a? Config::Object
+ Config::ObjectList.new(node_list)
+ else
+ raise ArgumentError, 'argument must be a node or node list, not a `%s`' % node_list.class, caller
+ end
+ end
+
+ end
+end
diff --git a/lib/leap_cli/macros/files.rb b/lib/leap_cli/macros/files.rb
new file mode 100644
index 00000000..04c94edf
--- /dev/null
+++ b/lib/leap_cli/macros/files.rb
@@ -0,0 +1,124 @@
+# encoding: utf-8
+
+##
+## FILES
+##
+
+module LeapCli
+ module Macro
+
+ #
+ # inserts the contents of a file
+ #
+ def file(filename, options={})
+ if filename.is_a? Symbol
+ filename = [filename, @node.name]
+ end
+ filepath = Path.find_file(filename)
+ if filepath
+ if filepath =~ /\.erb$/
+ return ERB.new(File.read(filepath, :encoding => 'UTF-8'), nil, '%<>').result(binding)
+ else
+ return File.read(filepath, :encoding => 'UTF-8')
+ end
+ else
+ raise FileMissing.new(Path.named_path(filename), options)
+ end
+ end
+
+ #
+ # like #file, but allow missing files
+ #
+ def try_file(filename)
+ return file(filename)
+ rescue FileMissing
+ return nil
+ end
+
+ #
+ # returns the location of a file that is stored on the local
+ # host, under PROVIDER_DIR/files.
+ #
+ def local_file_path(path, options={})
+ if path.is_a? Symbol
+ path = [path, @node.name]
+ elsif path.is_a? String
+ # ensure it prefixed with files/
+ unless path =~ /^files\//
+ path = "files/" + path
+ end
+ end
+ local_path = Path.find_file(path)
+ if local_path.nil?
+ if options[:missing]
+ raise FileMissing.new(Path.named_path(path), options)
+ elsif block_given?
+ yield
+ return local_file_path(path, options) # try again.
+ else
+ Util::log 2, :skipping, "local_file_path(\"#{path}\") because there is no such file."
+ return nil
+ end
+ else
+ return local_path
+ end
+ end
+
+ #
+ # Returns the location of a file once it is deployed via rsync to the a
+ # remote server. An internal list of discovered file paths is saved, in
+ # order to rsync these files when needed.
+ #
+ # If the file does not exist, nil is returned.
+ #
+ # If there is a block given and the file does not actually exist, the
+ # block will be yielded to give an opportunity for some code to create the
+ # file.
+ #
+ # For example:
+ #
+ # file_path(:dkim_priv_key) {generate_dkim_key}
+ #
+ # notes:
+ #
+ # * argument 'path' is relative to Path.provider/files or
+ # Path.provider_base/files
+ # * the path returned by this method is absolute
+ # * the path stored for use later by rsync is relative to Path.provider
+ # * if the path does not exist locally, but exists in provider_base,
+ # then the default file from provider_base is copied locally. this
+ # is required for rsync to work correctly.
+ #
+ def remote_file_path(path, options={}, &block)
+ local_path = local_file_path(path, options, &block)
+
+ return nil if local_path.nil?
+
+ # if file is under Path.provider_base, we must copy the default file to
+ # to Path.provider in order for rsync to be able to sync the file.
+ if local_path =~ /^#{Regexp.escape(Path.provider_base)}/
+ local_provider_path = local_path.sub(/^#{Regexp.escape(Path.provider_base)}/, Path.provider)
+ FileUtils.mkdir_p File.dirname(local_provider_path), :mode => 0700
+ FileUtils.install local_path, local_provider_path, :mode => 0600
+ Util.log :created, Path.relative_path(local_provider_path)
+ local_path = local_provider_path
+ end
+
+ # ensure directories end with /, important for building rsync command
+ if File.directory?(local_path) && local_path !~ /\/$/
+ local_path += '/'
+ end
+
+ relative_path = Path.relative_path(local_path)
+ relative_path.sub!(/^files\//, '') # remove "files/" prefix
+ @node.file_paths << relative_path
+ return File.join(Leap::Platform.files_dir, relative_path)
+ end
+
+ # deprecated
+ def file_path(path, options={})
+ return remote_file_path(path, options)
+ end
+
+ end
+end \ No newline at end of file
diff --git a/lib/leap_cli/macros/haproxy.rb b/lib/leap_cli/macros/haproxy.rb
new file mode 100644
index 00000000..602ae726
--- /dev/null
+++ b/lib/leap_cli/macros/haproxy.rb
@@ -0,0 +1,73 @@
+# encoding: utf-8
+
+##
+## HAPROXY
+##
+
+module LeapCli
+ module Macro
+
+ #
+ # creates a hash suitable for configuring haproxy. the key is the node name of the server we are proxying to.
+ #
+ # * node_list - a hash of nodes for the haproxy servers
+ # * stunnel_client - contains the mappings to local ports for each server node.
+ # * non_stunnel_port - in case self is included in node_list, the port to connect to.
+ #
+ # 1000 weight is used for nodes in the same location.
+ # 100 otherwise.
+ #
+ def haproxy_servers(node_list, stunnel_clients, non_stunnel_port=nil)
+ default_weight = 10
+ local_weight = 100
+
+ # record the hosts_file
+ hostnames(node_list)
+
+ # create a simple map for node name -> local stunnel accept port
+ accept_ports = stunnel_clients.inject({}) do |hsh, stunnel_entry|
+ name = stunnel_entry.first.sub /_[0-9]+$/, ''
+ hsh[name] = stunnel_entry.last['accept_port']
+ hsh
+ end
+
+ # if one the nodes in the node list is ourself, then there will not be a stunnel to it,
+ # but we need to include it anyway in the haproxy config.
+ if node_list[self.name] && non_stunnel_port
+ accept_ports[self.name] = non_stunnel_port
+ end
+
+ # create the first pass of the servers hash
+ servers = node_list.values.inject(Config::ObjectList.new) do |hsh, node|
+ # make sure we have a port to talk to
+ unless accept_ports[node.name]
+ error "haproxy needs a local port to talk to when connecting to #{node.name}"
+ end
+ weight = default_weight
+ try {
+ weight = local_weight if self.location.name == node.location.name
+ }
+ hsh[node.name] = Config::Object[
+ 'backup', false,
+ 'host', 'localhost',
+ 'port', accept_ports[node.name],
+ 'weight', weight
+ ]
+ if node.services.include?('couchdb')
+ hsh[node.name]['writable'] = node.couch.mode != 'mirror'
+ end
+ hsh
+ end
+
+ # if there are some local servers, make the others backup
+ if servers.detect{|k,v| v.weight == local_weight}
+ servers.each do |k,server|
+ server['backup'] = server['weight'] == default_weight
+ end
+ end
+
+ return servers
+ end
+
+ end
+end
diff --git a/lib/leap_cli/macros/hosts.rb b/lib/leap_cli/macros/hosts.rb
new file mode 100644
index 00000000..963857ae
--- /dev/null
+++ b/lib/leap_cli/macros/hosts.rb
@@ -0,0 +1,90 @@
+# encoding: utf-8
+
+module LeapCli
+ module Macro
+
+ ##
+ ## IPs
+ ##
+
+ #
+ # returns a simple array of all the IPs for the specified node list
+ #
+ def host_ips(node_list)
+ if self.vagrant?
+ node_list = node_list['environment' => 'local']
+ else
+ node_list = node_list['environment' => '!local']
+ end
+ node_list.map {|name, n|
+ [n.ip_address, (manager.facts[name]||{})['ec2_public_ipv4']]
+ }.flatten.compact.uniq
+ end
+
+ ##
+ ## HOSTS
+ ##
+
+ #
+ # records the list of hosts that are encountered for this node
+ #
+ def hostnames(nodes)
+ @referenced_nodes ||= Config::ObjectList.new
+ nodes = listify(nodes)
+ nodes.each_node do |node|
+ @referenced_nodes[node.name] ||= node
+ end
+ return nodes.values.collect {|node| node.domain.name}
+ end
+
+ #
+ # Generates entries needed for updating /etc/hosts on a node (as a hash).
+ #
+ # Argument `nodes` can be nil or a list of nodes. If nil, only include the
+ # IPs of the other nodes this @node as has encountered (plus all mx nodes).
+ #
+ # Also, for virtual machines, we use the local address if this @node is in
+ # the same location as the node in question.
+ #
+ # We include the ssh public key for each host, so that the hash can also
+ # be used to generate the /etc/ssh/known_hosts
+ #
+ def hosts_file(nodes=nil)
+ if nodes.nil?
+ if @referenced_nodes && @referenced_nodes.any?
+ nodes = @referenced_nodes
+ nodes = nodes.merge(nodes_like_me[:services => 'mx']) # all nodes always need to communicate with mx nodes.
+ end
+ end
+ return {} unless nodes
+ hosts = {}
+ my_location = @node['location'] ? @node['location']['name'] : nil
+ nodes.each_node do |node|
+ hosts[node.name] = {
+ 'ip_address' => node.ip_address,
+ 'domain_internal' => node.domain.internal,
+ 'domain_full' => node.domain.full,
+ 'port' => node.ssh.port
+ }
+ if node.dns['aliases'] && node.dns['aliases'].any?
+ # include aliases, but without domain.full
+ hosts[node.name]['aliases'] = node.dns['aliases'] - [node.domain.full]
+ end
+ node_location = node['location'] ? node['location']['name'] : nil
+ if my_location == node_location
+ if facts = @node.manager.facts[node.name]
+ if facts['ec2_public_ipv4']
+ hosts[node.name]['ip_address'] = facts['ec2_public_ipv4']
+ end
+ end
+ end
+ host_pub_key = Util::read_file([:node_ssh_pub_key,node.name])
+ if host_pub_key
+ hosts[node.name]['host_pub_key'] = host_pub_key
+ end
+ end
+ hosts
+ end
+
+ end
+end \ No newline at end of file
diff --git a/lib/leap_cli/macros/keys.rb b/lib/leap_cli/macros/keys.rb
new file mode 100644
index 00000000..e7a75cfb
--- /dev/null
+++ b/lib/leap_cli/macros/keys.rb
@@ -0,0 +1,97 @@
+# encoding: utf-8
+
+#
+# Macro for dealing with cryptographic keys
+#
+
+module LeapCli
+ module Macro
+
+ #
+ # return a fingerprint for a key or certificate
+ #
+ def fingerprint(filename, options={})
+ options[:mode] ||= :x509
+ if options[:mode] == :x509
+ "SHA256: " + X509.fingerprint("SHA256", Path.named_path(filename))
+ elsif options[:mode] == :rsa
+ key = OpenSSL::PKey::RSA.new(File.read(filename))
+ Digest::SHA1.new.hexdigest(key.to_der)
+ end
+ end
+
+ ##
+ ## TOR
+ ##
+
+ #
+ # return the path to the tor public key
+ # generating key if it is missing
+ #
+ def tor_public_key_path(path_name, key_type)
+ file_path(path_name) { generate_tor_key(key_type) }
+ end
+
+ #
+ # return the path to the tor private key
+ # generating key if it is missing
+ #
+ def tor_private_key_path(path_name, key_type)
+ file_path(path_name) { generate_tor_key(key_type) }
+ end
+
+ #
+ # Generates a onion_address from a public RSA key file.
+ #
+ # path_name is the named path of the Tor public key.
+ #
+ # Basically, an onion address is nothing more than a base32 encoding
+ # of the first 10 bytes of a sha1 digest of the public key.
+ #
+ # Additionally, Tor ignores the 22 byte header of the public key
+ # before taking the sha1 digest.
+ #
+ def onion_address(path_name)
+ require 'base32'
+ require 'base64'
+ require 'openssl'
+ path = Path.find_file([path_name, self.name])
+ if path && File.exists?(path)
+ public_key_str = File.readlines(path).grep(/^[^-]/).join
+ public_key = Base64.decode64(public_key_str)
+ public_key = public_key.slice(22..-1) # Tor ignores the 22 byte SPKI header
+ sha1sum = Digest::SHA1.new.digest(public_key)
+ Base32.encode(sha1sum.slice(0,10)).downcase
+ else
+ LeapCli.log :warning, 'Tor public key file "%s" does not exist' % tor_public_key_path
+ end
+ end
+
+ def generate_dkim_key(bit_size=2048)
+ LeapCli.log :generating, "%s bit RSA DKIM key" % bit_size do
+ private_key = OpenSSL::PKey::RSA.new(bit_size)
+ public_key = private_key.public_key
+ LeapCli::Util.write_file! :dkim_priv_key, private_key.to_pem
+ LeapCli::Util.write_file! :dkim_pub_key, public_key.to_pem
+ end
+ end
+
+ private
+
+ def generate_tor_key(key_type)
+ if key_type == 'RSA'
+ require 'certificate_authority'
+ keypair = CertificateAuthority::MemoryKeyMaterial.new
+ bit_size = 1024
+ LeapCli.log :generating, "%s bit RSA Tor key" % bit_size do
+ keypair.generate_key(bit_size)
+ LeapCli::Util.write_file! [:node_tor_priv_key, self.name], keypair.private_key.to_pem
+ LeapCli::Util.write_file! [:node_tor_pub_key, self.name], keypair.public_key.to_pem
+ end
+ else
+ LeapCli.bail! 'tor.key.type of %s is not yet supported' % key_type
+ end
+ end
+
+ end
+end
diff --git a/lib/leap_cli/macros/nodes.rb b/lib/leap_cli/macros/nodes.rb
new file mode 100644
index 00000000..0e23831d
--- /dev/null
+++ b/lib/leap_cli/macros/nodes.rb
@@ -0,0 +1,88 @@
+# encoding: utf-8
+
+##
+## node related macros
+##
+
+module LeapCli
+ module Macro
+
+ #
+ # the list of all the nodes
+ #
+ def nodes
+ env.nodes
+ end
+
+ #
+ # simple alias for global.provider
+ #
+ def provider
+ env.provider
+ end
+
+ #
+ # returns a list of nodes that match the same environment
+ #
+ # if @node.environment is not set, we return other nodes
+ # where environment is not set.
+ #
+ def nodes_like_me
+ nodes[:environment => @node.environment]
+ end
+
+ #
+ # returns a list of nodes that match the location name
+ # and environment of @node.
+ #
+ def nodes_near_me
+ if @node['location'] && @node['location']['name']
+ nodes_like_me['location.name' => @node.location.name]
+ else
+ nodes_like_me['location' => nil]
+ end
+ end
+
+ #
+ #
+ # picks a node out from the node list in such a way that:
+ #
+ # (1) which nodes picked which nodes is saved in secrets.json
+ # (2) when other nodes call this macro with the same node list, they are guaranteed to get a different node
+ # (3) if all the nodes in the pick_node list have been picked, remaining nodes are distributed randomly.
+ #
+ # if the node_list is empty, an exception is raised.
+ # if node_list size is 1, then that node is returned and nothing is
+ # memorized via the secrets.json file.
+ #
+ # `label` is needed to distinguish between pools of nodes for different purposes.
+ #
+ # TODO: more evenly balance after all the nodes have been picked.
+ #
+ def pick_node(label, node_list)
+ if node_list.any?
+ if node_list.size == 1
+ return node_list.values.first
+ else
+ secrets_key = "pick_node(:#{label},#{node_list.keys.sort.join(',')})"
+ secrets_value = @manager.secrets.retrieve(secrets_key, @node.environment) || {}
+ secrets_value[@node.name] ||= begin
+ node_to_pick = nil
+ node_list.each_node do |node|
+ next if secrets_value.values.include?(node.name)
+ node_to_pick = node.name
+ end
+ node_to_pick ||= secrets_value.values.shuffle.first # all picked already, so pick a random one.
+ node_to_pick
+ end
+ picked_node_name = secrets_value[@node.name]
+ @manager.secrets.set(secrets_key, secrets_value, @node.environment)
+ return node_list[picked_node_name]
+ end
+ else
+ raise ArgumentError.new('pick_node(node_list): node_list cannot be empty')
+ end
+ end
+
+ end
+end \ No newline at end of file
diff --git a/lib/leap_cli/macros/provider.rb b/lib/leap_cli/macros/provider.rb
new file mode 100644
index 00000000..4e74da01
--- /dev/null
+++ b/lib/leap_cli/macros/provider.rb
@@ -0,0 +1,90 @@
+#
+# These macros are intended only for use in provider.json, although they are
+# currently loaded in all .json contexts.
+#
+
+module LeapCli
+ module Macro
+
+ #
+ # returns an array of the service names, including only those services that
+ # are enabled for this environment.
+ #
+ def enabled_services
+ manager.env(self.environment).services[:service_type => :user_service].field(:name).select { |service|
+ manager.nodes[:environment => self.environment][:services => service].any?
+ }
+ end
+
+ #
+ # The webapp will not work unless the service level configuration is precisely defined.
+ # Here, we take what the sysadmin has specified in provider.json and clean it up to
+ # ensure it is OK.
+ #
+ # It would be better to add support for JSON schema.
+ #
+ def service_levels()
+ levels = {}
+ provider.service.levels.each do |name, level|
+ if name =~ /^[0-9]+$/
+ name = name.to_i
+ end
+ levels[name] = level_cleanup(name, level.clone)
+ end
+ levels
+ end
+
+ private
+
+ def print_warning(name, msg)
+ if self.environment
+ provider_str = "provider.json or %s" % ['provider', self.environment, 'json'].join('.')
+ else
+ provider_str = "provider.json"
+ end
+ LeapCli::log :warning, "In #{provider_str}, you have an incorrect definition for service level '#{name}':" do
+ LeapCli::log msg
+ end
+ end
+
+ def level_cleanup(name, level)
+ unless level['name']
+ print_warning(name, 'required field "name" is missing')
+ end
+ unless level['description']
+ print_warning(name, 'required field "description" is missing')
+ end
+ unless level['bandwidth'].nil? || level['bandwidth'] == 'limited'
+ print_warning(name, 'field "bandwidth" must be nil or "limited"')
+ end
+ unless level['rate'].nil? || level['rate'].is_a?(Hash)
+ print_warning(name, 'field "rate" must be nil or a hash (e.g. {"USD":10, "EUR":10})')
+ end
+ possible_services = enabled_services
+ if level['services']
+ level['services'].each do |service|
+ unless possible_services.include? service
+ print_warning(name, "the service '#{service}' does not exist or there are no nodes that provide this service.")
+ LeapCli::Util::bail!
+ end
+ end
+ else
+ level['services'] = possible_services
+ end
+ level['services'] = remap_services(level['services'])
+ level
+ end
+
+ #
+ # the service names that the webapp uses and that leap_platform uses are different. ugh.
+ #
+ SERVICE_MAP = {
+ "mx" => "email",
+ "openvpn" => "eip"
+ }
+ def remap_services(services)
+ services.map {|srv| SERVICE_MAP[srv]}
+ end
+
+ end
+end
diff --git a/lib/leap_cli/macros/secrets.rb b/lib/leap_cli/macros/secrets.rb
new file mode 100644
index 00000000..8d1feb55
--- /dev/null
+++ b/lib/leap_cli/macros/secrets.rb
@@ -0,0 +1,39 @@
+# encoding: utf-8
+
+require 'base32'
+
+module LeapCli
+ module Macro
+
+ #
+ # inserts a named secret, generating it if needed.
+ #
+ # manager.export_secrets should be called later to capture any newly generated secrets.
+ #
+ # +length+ is the character length of the generated password.
+ #
+ def secret(name, length=32)
+ manager.secrets.set(name, @node.environment) { Util::Secret.generate(length) }
+ end
+
+ # inserts a base32 encoded secret
+ def base32_secret(name, length=20)
+ manager.secrets.set(name, @node.environment) { Base32.encode(Util::Secret.generate(length)) }
+ end
+
+ # Picks a random obfsproxy port from given range
+ def rand_range(name, range)
+ manager.secrets.set(name, @node.environment) { rand(range) }
+ end
+
+ #
+ # inserts an hexidecimal secret string, generating it if needed.
+ #
+ # +bit_length+ is the bits in the secret, (ie length of resulting hex string will be bit_length/4)
+ #
+ def hex_secret(name, bit_length=128)
+ manager.secrets.set(name, @node.environment) { Util::Secret.generate_hex(bit_length) }
+ end
+
+ end
+end \ No newline at end of file
diff --git a/lib/leap_cli/macros/stunnel.rb b/lib/leap_cli/macros/stunnel.rb
new file mode 100644
index 00000000..821bda38
--- /dev/null
+++ b/lib/leap_cli/macros/stunnel.rb
@@ -0,0 +1,106 @@
+##
+## STUNNEL
+##
+
+#
+# About stunnel
+# --------------------------
+#
+# The network looks like this:
+#
+# From the client's perspective:
+#
+# |------- stunnel client --------------| |---------- stunnel server -----------------------|
+# consumer app -> localhost:accept_port -> connect:connect_port -> ??
+#
+# From the server's perspective:
+#
+# |------- stunnel client --------------| |---------- stunnel server -----------------------|
+# ?? -> *:accept_port -> localhost:connect_port -> service
+#
+
+module LeapCli
+ module Macro
+
+ #
+ # stunnel configuration for the client side.
+ #
+ # +node_list+ is a ObjectList of nodes running stunnel servers.
+ #
+ # +port+ is the real port of the ultimate service running on the servers
+ # that the client wants to connect to.
+ #
+ # * accept_port is the port on localhost to which local clients
+ # can connect. it is auto generated serially.
+ #
+ # * connect_port is the port on the stunnel server to connect to.
+ # it is auto generated from the +port+ argument.
+ #
+ # generates an entry appropriate to be passed directly to
+ # create_resources(stunnel::service, hiera('..'), defaults)
+ #
+ # local ports are automatically generated, starting at 4000
+ # and incrementing in sorted order (by node name).
+ #
+ def stunnel_client(node_list, port, options={})
+ @next_stunnel_port ||= 4000
+ node_list = listify(node_list)
+ hostnames(node_list) # record the hosts
+ result = Config::ObjectList.new
+ node_list.each_node do |node|
+ if node.name != self.name || options[:include_self]
+ s_port = stunnel_port(port)
+ result["#{node.name}_#{port}"] = Config::Object[
+ 'accept_port', @next_stunnel_port,
+ 'connect', node.domain.internal,
+ 'connect_port', s_port,
+ 'original_port', port
+ ]
+ manager.connections.add(:from => @node.ip_address, :to => node.ip_address, :port => s_port)
+ @next_stunnel_port += 1
+ end
+ end
+ result
+ end
+
+ #
+ # generates a stunnel server entry.
+ #
+ # +port+ is the real port targeted service.
+ #
+ # * `accept_port` is the publicly bound port
+ # * `connect_port` is the port that the local service is running on.
+ #
+ def stunnel_server(port)
+ {
+ "accept_port" => stunnel_port(port),
+ "connect_port" => port
+ }
+ end
+
+ #
+ # lists the ips that connect to this node, on particular ports.
+ #
+ def stunnel_firewall
+ manager.connections.select {|connection|
+ connection['to'] == @node.ip_address
+ }
+ end
+
+ private
+
+ #
+ # maps a real port to a stunnel port (used as the connect_port in the client config
+ # and the accept_port in the server config)
+ #
+ def stunnel_port(port)
+ port = port.to_i
+ if port < 50000
+ return port + 10000
+ else
+ return port - 10000
+ end
+ end
+
+ end
+end \ No newline at end of file
diff --git a/lib/puppet/provider/vcsrepo.rb b/lib/puppet/provider/vcsrepo.rb
deleted file mode 100644
index 8793e632..00000000
--- a/lib/puppet/provider/vcsrepo.rb
+++ /dev/null
@@ -1,42 +0,0 @@
-require 'tmpdir'
-require 'digest/md5'
-require 'fileutils'
-
-# Abstract
-class Puppet::Provider::Vcsrepo < Puppet::Provider
-
- private
-
- def set_ownership
- owner = @resource.value(:owner) || nil
- group = @resource.value(:group) || nil
- FileUtils.chown_R(owner, group, @resource.value(:path))
- end
-
- def path_exists?
- File.directory?(@resource.value(:path))
- end
-
- def path_empty?
- # Path is empty if the only entries are '.' and '..'
- d = Dir.new(@resource.value(:path))
- d.read # should return '.'
- d.read # should return '..'
- d.read.nil?
- end
-
- # Note: We don't rely on Dir.chdir's behavior of automatically returning the
- # value of the last statement -- for easier stubbing.
- def at_path(&block) #:nodoc:
- value = nil
- Dir.chdir(@resource.value(:path)) do
- value = yield
- end
- value
- end
-
- def tempdir
- @tempdir ||= File.join(Dir.tmpdir, 'vcsrepo-' + Digest::MD5.hexdigest(@resource.value(:path)))
- end
-
-end
diff --git a/lib/puppet/provider/vcsrepo/bzr.rb b/lib/puppet/provider/vcsrepo/bzr.rb
deleted file mode 100644
index 797d84d2..00000000
--- a/lib/puppet/provider/vcsrepo/bzr.rb
+++ /dev/null
@@ -1,93 +0,0 @@
-require File.join(File.dirname(__FILE__), '..', 'vcsrepo')
-
-Puppet::Type.type(:vcsrepo).provide(:bzr, :parent => Puppet::Provider::Vcsrepo) do
- desc "Supports Bazaar repositories"
-
- commands :bzr => 'bzr'
- has_features :reference_tracking
-
- def create
- if !@resource.value(:source)
- create_repository(@resource.value(:path))
- else
- clone_repository(@resource.value(:revision))
- end
- end
-
- def working_copy_exists?
- File.directory?(File.join(@resource.value(:path), '.bzr'))
- end
-
- def exists?
- working_copy_exists?
- end
-
- def destroy
- FileUtils.rm_rf(@resource.value(:path))
- end
-
- def revision
- at_path do
- current_revid = bzr('version-info')[/^revision-id:\s+(\S+)/, 1]
- desired = @resource.value(:revision)
- begin
- desired_revid = bzr('revision-info', desired).strip.split(/\s+/).last
- rescue Puppet::ExecutionFailure
- # Possible revid available during update (but definitely not current)
- desired_revid = nil
- end
- if current_revid == desired_revid
- desired
- else
- current_revid
- end
- end
- end
-
- def revision=(desired)
- at_path do
- begin
- bzr('update', '-r', desired)
- rescue Puppet::ExecutionFailure
- bzr('update', '-r', desired, ':parent')
- end
- end
- update_owner
- end
-
- def latest
- at_path do
- bzr('version-info', ':parent')[/^revision-id:\s+(\S+)/, 1]
- end
- end
-
- def latest?
- at_path do
- return self.revision == self.latest
- end
- end
-
- private
-
- def create_repository(path)
- bzr('init', path)
- update_owner
- end
-
- def clone_repository(revision)
- args = ['branch']
- if revision
- args.push('-r', revision)
- end
- args.push(@resource.value(:source),
- @resource.value(:path))
- bzr(*args)
- update_owner
- end
-
- def update_owner
- if @resource.value(:owner) or @resource.value(:group)
- set_ownership
- end
- end
-end
diff --git a/lib/puppet/provider/vcsrepo/cvs.rb b/lib/puppet/provider/vcsrepo/cvs.rb
deleted file mode 100644
index 7a8f6ef3..00000000
--- a/lib/puppet/provider/vcsrepo/cvs.rb
+++ /dev/null
@@ -1,135 +0,0 @@
-require File.join(File.dirname(__FILE__), '..', 'vcsrepo')
-
-Puppet::Type.type(:vcsrepo).provide(:cvs, :parent => Puppet::Provider::Vcsrepo) do
- desc "Supports CVS repositories/workspaces"
-
- commands :cvs => 'cvs'
- has_features :gzip_compression, :reference_tracking, :modules, :cvs_rsh, :user
-
- def create
- if !@resource.value(:source)
- create_repository(@resource.value(:path))
- else
- checkout_repository
- end
- update_owner
- end
-
- def exists?
- if @resource.value(:source)
- directory = File.join(@resource.value(:path), 'CVS')
- else
- directory = File.join(@resource.value(:path), 'CVSROOT')
- end
- File.directory?(directory)
- end
-
- def working_copy_exists?
- File.directory?(File.join(@resource.value(:path), 'CVS'))
- end
-
- def destroy
- FileUtils.rm_rf(@resource.value(:path))
- end
-
- def latest?
- Puppet.debug "Checking for updates because 'ensure => latest'"
- at_path do
- # We cannot use -P to prune empty dirs, otherwise
- # CVS would report those as "missing", regardless
- # if they have contents or updates.
- is_current = (runcvs('-nq', 'update', '-d').strip == "")
- if (!is_current) then Puppet.debug "There are updates available on the checkout's current branch/tag." end
- return is_current
- end
- end
-
- def latest
- # CVS does not have a conecpt like commit-IDs or change
- # sets, so we can only have the current branch name (or the
- # requested one, if that differs) as the "latest" revision.
- should = @resource.value(:revision)
- current = self.revision
- return should != current ? should : current
- end
-
- def revision
- if !@rev
- if File.exist?(tag_file)
- contents = File.read(tag_file).strip
- # Note: Doesn't differentiate between N and T entries
- @rev = contents[1..-1]
- else
- @rev = 'HEAD'
- end
- Puppet.debug "Checkout is on branch/tag '#{@rev}'"
- end
- return @rev
- end
-
- def revision=(desired)
- at_path do
- runcvs('update', '-dr', desired, '.')
- update_owner
- @rev = desired
- end
- end
-
- private
-
- def tag_file
- File.join(@resource.value(:path), 'CVS', 'Tag')
- end
-
- def checkout_repository
- dirname, basename = File.split(@resource.value(:path))
- Dir.chdir(dirname) do
- args = ['-d', @resource.value(:source)]
- if @resource.value(:compression)
- args.push('-z', @resource.value(:compression))
- end
- args.push('checkout')
- if @resource.value(:revision)
- args.push('-r', @resource.value(:revision))
- end
- args.push('-d', basename, module_name)
- runcvs(*args)
- end
- end
-
- # When the source:
- # * Starts with ':' (eg, :pserver:...)
- def module_name
- if (m = @resource.value(:module))
- m
- elsif (source = @resource.value(:source))
- source[0, 1] == ':' ? File.basename(source) : '.'
- end
- end
-
- def create_repository(path)
- runcvs('-d', path, 'init')
- end
-
- def update_owner
- if @resource.value(:owner) or @resource.value(:group)
- set_ownership
- end
- end
-
- def runcvs(*args)
- if @resource.value(:cvs_rsh)
- Puppet.debug "Using CVS_RSH = " + @resource.value(:cvs_rsh)
- e = { :CVS_RSH => @resource.value(:cvs_rsh) }
- else
- e = {}
- end
-
- if @resource.value(:user) and @resource.value(:user) != Facter['id'].value
- Puppet.debug "Running as user " + @resource.value(:user)
- Puppet::Util::Execution.execute([:cvs, *args], :uid => @resource.value(:user), :custom_environment => e)
- else
- Puppet::Util::Execution.execute([:cvs, *args], :custom_environment => e)
- end
- end
-end
diff --git a/lib/puppet/provider/vcsrepo/dummy.rb b/lib/puppet/provider/vcsrepo/dummy.rb
deleted file mode 100644
index 27bfbbed..00000000
--- a/lib/puppet/provider/vcsrepo/dummy.rb
+++ /dev/null
@@ -1,12 +0,0 @@
-require File.join(File.dirname(__FILE__), '..', 'vcsrepo')
-
-Puppet::Type.type(:vcsrepo).provide(:dummy, :parent => Puppet::Provider::Vcsrepo) do
- desc "Dummy default provider"
-
- defaultfor :feature => :posix
-
- def working_copy_exists?
- providers = @resource.class.providers.map{|x| x.to_s}.sort.reject{|x| x == "dummy"}.join(", ") rescue "none"
- raise("vcsrepo resource must have a provider, available: #{providers}")
- end
-end
diff --git a/lib/puppet/provider/vcsrepo/git.rb b/lib/puppet/provider/vcsrepo/git.rb
deleted file mode 100644
index 9d18b474..00000000
--- a/lib/puppet/provider/vcsrepo/git.rb
+++ /dev/null
@@ -1,483 +0,0 @@
-require File.join(File.dirname(__FILE__), '..', 'vcsrepo')
-
-Puppet::Type.type(:vcsrepo).provide(:git, :parent => Puppet::Provider::Vcsrepo) do
- desc "Supports Git repositories"
-
- has_command(:git, 'git') do
- environment({ 'HOME' => ENV['HOME'] })
- end
-
- has_features :bare_repositories, :reference_tracking, :ssh_identity, :multiple_remotes, :user, :depth, :branch, :submodules
-
- def create
- if @resource.value(:revision) and ensure_bare_or_mirror?
- fail("Cannot set a revision (#{@resource.value(:revision)}) on a bare repository")
- end
- if !@resource.value(:source)
- if @resource.value(:ensure) == :mirror
- fail("Cannot init repository with mirror option, try bare instead")
- end
-
- init_repository(@resource.value(:path))
- else
- clone_repository(default_url, @resource.value(:path))
- update_remotes
-
- if @resource.value(:revision)
- checkout
- end
- if !ensure_bare_or_mirror? && @resource.value(:submodules) == :true
- update_submodules
- end
-
- end
- update_owner_and_excludes
- end
-
- def destroy
- FileUtils.rm_rf(@resource.value(:path))
- end
-
- # Checks to see if the current revision is equal to the revision on the
- # remote (whether on a branch, tag, or reference)
- #
- # @return [Boolean] Returns true if the repo is on the latest revision
- def latest?
- return revision == latest_revision
- end
-
- # Just gives the `should` value that we should be setting the repo to if
- # latest? returns false
- #
- # @return [String] Returns the target sha/tag/branch
- def latest
- if not @resource.value(:revision) and branch = on_branch?
- return branch
- else
- return @resource.value(:revision)
- end
- end
-
- # Get the current revision of the repo (tag/branch/sha)
- #
- # @return [String] Returns the branch/tag if the current sha matches the
- # remote; otherwise returns the current sha.
- def revision
- #HEAD is the default, but lets just be explicit here.
- get_revision('HEAD')
- end
-
- # Is passed the desired reference, whether a tag, rev, or branch. Should
- # handle transitions from a rev/branch/tag to a rev/branch/tag. Detached
- # heads should be treated like bare revisions.
- #
- # @param [String] desired The desired revision to which the repo should be
- # set.
- def revision=(desired)
- #just checkout tags and shas; fetch has already happened so they should be updated.
- checkout(desired)
- #branches require more work.
- if local_branch_revision?(desired)
- #reset instead of pull to avoid merge conflicts. assuming remote is
- #updated and authoritative.
- #TODO might be worthwhile to have an allow_local_changes param to decide
- #whether to reset or pull when we're ensuring latest.
- if @resource.value(:source)
- at_path { git_with_identity('reset', '--hard', "#{@resource.value(:remote)}/#{desired}") }
- else
- at_path { git_with_identity('reset', '--hard', "#{desired}") }
- end
- end
- #TODO Would this ever reach here if it is bare?
- if !ensure_bare_or_mirror? && @resource.value(:submodules) == :true
- update_submodules
- end
- update_owner_and_excludes
- end
-
- def bare_exists?
- bare_git_config_exists? && !working_copy_exists?
- end
-
- def ensure_bare_or_mirror?
- [:bare, :mirror].include? @resource.value(:ensure)
- end
-
- # If :source is set to a hash (for supporting multiple remotes),
- # we search for the URL for :remote. If it doesn't exist,
- # we throw an error. If :source is just a string, we use that
- # value for the default URL.
- def default_url
- if @resource.value(:source).is_a?(Hash)
- if @resource.value(:source).has_key?(@resource.value(:remote))
- @resource.value(:source)[@resource.value(:remote)]
- else
- fail("You must specify the URL for #{@resource.value(:remote)} in the :source hash")
- end
- else
- @resource.value(:source)
- end
- end
-
- def working_copy_exists?
- if @resource.value(:source) and File.exists?(File.join(@resource.value(:path), '.git', 'config'))
- File.readlines(File.join(@resource.value(:path), '.git', 'config')).grep(/#{Regexp.escape(default_url)}/).any?
- else
- File.directory?(File.join(@resource.value(:path), '.git'))
- end
- end
-
- def exists?
- working_copy_exists? || bare_exists?
- end
-
- def update_remote_url(remote_name, remote_url)
- do_update = false
- current = git_with_identity('config', '-l')
-
- unless remote_url.nil?
- # Check if remote exists at all, regardless of URL.
- # If remote doesn't exist, add it
- if not current.include? "remote.#{remote_name}.url"
- git_with_identity('remote','add', remote_name, remote_url)
- return true
-
- # If remote exists, but URL doesn't match, update URL
- elsif not current.include? "remote.#{remote_name}.url=#{remote_url}"
- git_with_identity('remote','set-url', remote_name, remote_url)
- return true
- else
- return false
- end
- end
-
- end
-
- def update_remotes
- do_update = false
-
- # If supplied source is a hash of remote name and remote url pairs, then
- # we loop around the hash. Otherwise, we assume single url specified
- # in source property
- if @resource.value(:source).is_a?(Hash)
- @resource.value(:source).keys.sort.each do |remote_name|
- remote_url = @resource.value(:source)[remote_name]
- at_path { do_update |= update_remote_url(remote_name, remote_url) }
- end
- else
- at_path { do_update |= update_remote_url(@resource.value(:remote), @resource.value(:source)) }
- end
-
- # If at least one remote was added or updated, then we must
- # call the 'git remote update' command
- if do_update == true
- at_path { git_with_identity('remote','update') }
- end
-
- end
-
- def update_references
- at_path do
- update_remotes
- git_with_identity('fetch', @resource.value(:remote))
- git_with_identity('fetch', '--tags', @resource.value(:remote))
- update_owner_and_excludes
- end
- end
-
- private
-
- def valid_repo?
- Dir.chdir(@resource.value(:path)){ system('git rev-parse > /dev/null 2>&1')}
- end
-
- def bare_git_config_exists?
- File.exist?(File.join(@resource.value(:path), 'config')) && valid_repo?
- end
-
- # @!visibility private
- def clone_repository(source, path)
- check_force
- args = ['clone']
- if @resource.value(:depth) and @resource.value(:depth).to_i > 0
- args.push('--depth', @resource.value(:depth).to_s)
- if @resource.value(:revision)
- args.push('--branch', @resource.value(:revision).to_s)
- end
- end
- if @resource.value(:branch)
- args.push('--branch', @resource.value(:branch).to_s)
- end
-
- case @resource.value(:ensure)
- when :bare then args << '--bare'
- when :mirror then args << '--mirror'
- end
-
- if @resource.value(:remote) != 'origin'
- args.push('--origin', @resource.value(:remote))
- end
- if !working_copy_exists?
- args.push(source, path)
- Dir.chdir("/") do
- git_with_identity(*args)
- end
- else
- notice "Repo has already been cloned"
- end
- end
-
- # @!visibility private
- def check_force
- if path_exists? and not path_empty?
- if @resource.value(:force) && !valid_repo?
- notice "Removing %s to replace with vcsrepo." % @resource.value(:path)
- destroy
- else
- raise Puppet::Error, "Could not create repository (non-repository at path)"
- end
- end
- end
-
- # @!visibility private
- def init_repository(path)
- check_force
- if @resource.value(:ensure) == :bare && working_copy_exists?
- convert_working_copy_to_bare
- elsif @resource.value(:ensure) == :present && bare_exists?
- convert_bare_to_working_copy
- else
- # normal init
- FileUtils.mkdir(@resource.value(:path))
- FileUtils.chown(@resource.value(:user), nil, @resource.value(:path)) if @resource.value(:user)
- args = ['init']
- if @resource.value(:ensure) == :bare
- args << '--bare'
- end
- at_path do
- git_with_identity(*args)
- end
- end
- end
-
- # Convert working copy to bare
- #
- # Moves:
- # <path>/.git
- # to:
- # <path>/
- # @!visibility private
- def convert_working_copy_to_bare
- notice "Converting working copy repository to bare repository"
- FileUtils.mv(File.join(@resource.value(:path), '.git'), tempdir)
- FileUtils.rm_rf(@resource.value(:path))
- FileUtils.mv(tempdir, @resource.value(:path))
- end
-
- # Convert bare to working copy
- #
- # Moves:
- # <path>/
- # to:
- # <path>/.git
- # @!visibility private
- def convert_bare_to_working_copy
- notice "Converting bare repository to working copy repository"
- FileUtils.mv(@resource.value(:path), tempdir)
- FileUtils.mkdir(@resource.value(:path))
- FileUtils.mv(tempdir, File.join(@resource.value(:path), '.git'))
- if commits_in?(File.join(@resource.value(:path), '.git'))
- reset('HEAD')
- git_with_identity('checkout', '--force')
- update_owner_and_excludes
- end
- end
-
- # @!visibility private
- def commits_in?(dot_git)
- Dir.glob(File.join(dot_git, 'objects/info/*'), File::FNM_DOTMATCH) do |e|
- return true unless %w(. ..).include?(File::basename(e))
- end
- false
- end
-
- # Will checkout a rev/branch/tag using the locally cached versions. Does not
- # handle upstream branch changes
- # @!visibility private
- def checkout(revision = @resource.value(:revision))
- if !local_branch_revision?(revision) && remote_branch_revision?(revision)
- #non-locally existant branches (perhaps switching to a branch that has never been checked out)
- at_path { git_with_identity('checkout', '--force', '-b', revision, '--track', "#{@resource.value(:remote)}/#{revision}") }
- else
- #tags, locally existant branches (perhaps outdated), and shas
- at_path { git_with_identity('checkout', '--force', revision) }
- end
- end
-
- # @!visibility private
- def reset(desired)
- at_path do
- git_with_identity('reset', '--hard', desired)
- end
- end
-
- # @!visibility private
- def update_submodules
- at_path do
- git_with_identity('submodule', 'update', '--init', '--recursive')
- end
- end
-
- # Determins if the branch exists at the upstream but has not yet been locally committed
- # @!visibility private
- def remote_branch_revision?(revision = @resource.value(:revision))
- # git < 1.6 returns '#{@resource.value(:remote)}/#{revision}'
- # git 1.6+ returns 'remotes/#{@resource.value(:remote)}/#{revision}'
- branch = at_path { branches.grep /(remotes\/)?#{@resource.value(:remote)}\/#{revision}$/ }
- branch unless branch.empty?
- end
-
- # Determins if the branch is already cached locally
- # @!visibility private
- def local_branch_revision?(revision = @resource.value(:revision))
- at_path { branches.include?(revision) }
- end
-
- # @!visibility private
- def tag_revision?(revision = @resource.value(:revision))
- at_path { tags.include?(revision) }
- end
-
- # @!visibility private
- def branches
- at_path { git_with_identity('branch', '-a') }.gsub('*', ' ').split(/\n/).map { |line| line.strip }
- end
-
- # git < 2.4 returns 'detached from'
- # git 2.4+ returns 'HEAD detached at'
- # @!visibility private
- def on_branch?
- at_path {
- matches = git_with_identity('branch', '-a').match /\*\s+(.*)/
- matches[1] unless matches[1].match /(\(detached from|\(HEAD detached at|\(no branch)/
- }
- end
-
- # @!visibility private
- def tags
- at_path { git_with_identity('tag', '-l') }.split(/\n/).map { |line| line.strip }
- end
-
- # @!visibility private
- def set_excludes
- # Excludes may be an Array or a String.
- at_path do
- open('.git/info/exclude', 'w') do |f|
- if @resource.value(:excludes).respond_to?(:each)
- @resource.value(:excludes).each { |ex| f.puts ex }
- else
- f.puts @resource.value(:excludes)
- end
- end
- end
- end
-
- # Finds the latest revision or sha of the current branch if on a branch, or
- # of HEAD otherwise.
- # @note Calls create which can forcibly destroy and re-clone the repo if
- # force => true
- # @see get_revision
- #
- # @!visibility private
- # @return [String] Returns the output of get_revision
- def latest_revision
- #TODO Why is create called here anyway?
- create if @resource.value(:force) && working_copy_exists?
- create if !working_copy_exists?
-
- if branch = on_branch?
- return get_revision("#{@resource.value(:remote)}/#{branch}")
- else
- return get_revision
- end
- end
-
- # Returns the current revision given if the revision is a tag or branch and
- # matches the current sha. If the current sha does not match the sha of a tag
- # or branch, then it will just return the sha (ie, is not in sync)
- #
- # @!visibility private
- #
- # @param [String] rev The revision of which to check if it is current
- # @return [String] Returns the tag/branch of the current repo if it's up to
- # date; otherwise returns the sha of the requested revision.
- def get_revision(rev = 'HEAD')
- if @resource.value(:source)
- update_references
- else
- status = at_path { git_with_identity('status')}
- is_it_new = status =~ /Initial commit/
- if is_it_new
- status =~ /On branch (.*)/
- branch = $1
- return branch
- end
- end
- current = at_path { git_with_identity('rev-parse', rev).strip }
- if @resource.value(:revision)
- if tag_revision?
- # git-rev-parse will give you the hash of the tag object itself rather
- # than the commit it points to by default. Using tag^0 will return the
- # actual commit.
- canonical = at_path { git_with_identity('rev-parse', "#{@resource.value(:revision)}^0").strip }
- elsif local_branch_revision?
- canonical = at_path { git_with_identity('rev-parse', @resource.value(:revision)).strip }
- elsif remote_branch_revision?
- canonical = at_path { git_with_identity('rev-parse', "#{@resource.value(:remote)}/#{@resource.value(:revision)}").strip }
- else
- #look for a sha (could match invalid shas)
- canonical = at_path { git_with_identity('rev-parse', '--revs-only', @resource.value(:revision)).strip }
- end
- fail("#{@resource.value(:revision)} is not a local or remote ref") if canonical.nil? or canonical.empty?
- current = @resource.value(:revision) if current == canonical
- end
- return current
- end
-
- # @!visibility private
- def update_owner_and_excludes
- if @resource.value(:owner) or @resource.value(:group)
- set_ownership
- end
- if @resource.value(:excludes)
- set_excludes
- end
- end
-
- # @!visibility private
- def git_with_identity(*args)
- if @resource.value(:identity)
- Tempfile.open('git-helper', Puppet[:statedir]) do |f|
- f.puts '#!/bin/sh'
- f.puts 'export SSH_AUTH_SOCKET='
- f.puts "exec ssh -oStrictHostKeyChecking=no -oPasswordAuthentication=no -oKbdInteractiveAuthentication=no -oChallengeResponseAuthentication=no -oConnectTimeout=120 -i #{@resource.value(:identity)} $*"
- f.close
-
- FileUtils.chmod(0755, f.path)
- env_save = ENV['GIT_SSH']
- ENV['GIT_SSH'] = f.path
-
- ret = git(*args)
-
- ENV['GIT_SSH'] = env_save
-
- return ret
- end
- elsif @resource.value(:user) and @resource.value(:user) != Facter['id'].value
- env = Etc.getpwnam(@resource.value(:user))
- Puppet::Util::Execution.execute("git #{args.join(' ')}", :uid => @resource.value(:user), :failonfail => true, :custom_environment => {'HOME' => env['dir']})
- else
- git(*args)
- end
- end
-end
diff --git a/lib/puppet/provider/vcsrepo/hg.rb b/lib/puppet/provider/vcsrepo/hg.rb
deleted file mode 100644
index 294c2a97..00000000
--- a/lib/puppet/provider/vcsrepo/hg.rb
+++ /dev/null
@@ -1,130 +0,0 @@
-require File.join(File.dirname(__FILE__), '..', 'vcsrepo')
-
-Puppet::Type.type(:vcsrepo).provide(:hg, :parent => Puppet::Provider::Vcsrepo) do
- desc "Supports Mercurial repositories"
-
- commands :hg => 'hg'
-
- has_features :reference_tracking, :ssh_identity, :user, :basic_auth
-
- def create
- if !@resource.value(:source)
- create_repository(@resource.value(:path))
- else
- clone_repository(@resource.value(:revision))
- end
- update_owner
- end
-
- def working_copy_exists?
- File.directory?(File.join(@resource.value(:path), '.hg'))
- end
-
- def exists?
- working_copy_exists?
- end
-
- def destroy
- FileUtils.rm_rf(@resource.value(:path))
- end
-
- def latest?
- at_path do
- return self.revision == self.latest
- end
- end
-
- def latest
- at_path do
- begin
- hg_wrapper('incoming', '--branch', '.', '--newest-first', '--limit', '1', { :remote => true })[/^changeset:\s+(?:-?\d+):(\S+)/m, 1]
- rescue Puppet::ExecutionFailure
- # If there are no new changesets, return the current nodeid
- self.revision
- end
- end
- end
-
- def revision
- at_path do
- current = hg_wrapper('parents')[/^changeset:\s+(?:-?\d+):(\S+)/m, 1]
- desired = @resource.value(:revision)
- if desired
- # Return the tag name if it maps to the current nodeid
- mapped = hg_wrapper('tags')[/^#{Regexp.quote(desired)}\s+\d+:(\S+)/m, 1]
- if current == mapped
- desired
- else
- current
- end
- else
- current
- end
- end
- end
-
- def revision=(desired)
- at_path do
- begin
- hg_wrapper('pull', { :remote => true })
- rescue
- end
- begin
- hg_wrapper('merge')
- rescue Puppet::ExecutionFailure
- # If there's nothing to merge, just skip
- end
- hg_wrapper('update', '--clean', '-r', desired)
- end
- update_owner
- end
-
- private
-
- def create_repository(path)
- hg_wrapper('init', path)
- end
-
- def clone_repository(revision)
- args = ['clone']
- if revision
- args.push('-u', revision)
- end
- args.push(@resource.value(:source),
- @resource.value(:path))
- args.push({ :remote => true })
- hg_wrapper(*args)
- end
-
- def update_owner
- if @resource.value(:owner) or @resource.value(:group)
- set_ownership
- end
- end
-
- def hg_wrapper(*args)
- options = { :remote => false }
- if args.length > 0 and args[-1].is_a? Hash
- options.merge!(args.pop)
- end
-
- if @resource.value(:basic_auth_username) && @resource.value(:basic_auth_password)
- args += [
- "--config", "\"auth.x.prefix=#{@resource.value(:source)}\"",
- "--config", "\"auth.x.username=#{@resource.value(:basic_auth_username)}\"",
- "--config", "\"auth.x.password=#{@resource.value(:basic_auth_password)}\"",
- "--config", "\"auth.x.schemes=http https\""
- ]
- end
-
- if options[:remote] and @resource.value(:identity)
- args += ["--ssh", "ssh -oStrictHostKeyChecking=no -oPasswordAuthentication=no -oKbdInteractiveAuthentication=no -oChallengeResponseAuthentication=no -i #{@resource.value(:identity)}"]
- end
- if @resource.value(:user) and @resource.value(:user) != Facter['id'].value
- args.map! { |a| if a =~ /\s/ then "'#{a}'" else a end } # Adds quotes to arguments with whitespaces.
- Puppet::Util::Execution.execute("hg #{args.join(' ')}", :uid => @resource.value(:user), :failonfail => true)
- else
- hg(*args)
- end
- end
-end
diff --git a/lib/puppet/provider/vcsrepo/p4.rb b/lib/puppet/provider/vcsrepo/p4.rb
deleted file mode 100644
index b429bcbb..00000000
--- a/lib/puppet/provider/vcsrepo/p4.rb
+++ /dev/null
@@ -1,278 +0,0 @@
-require File.join(File.dirname(__FILE__), '..', 'vcsrepo')
-
-Puppet::Type.type(:vcsrepo).provide(:p4, :parent => Puppet::Provider::Vcsrepo) do
- desc "Supports Perforce depots"
-
- has_features :filesystem_types, :reference_tracking, :p4config
-
- def create
- # create or update client
- create_client(client_name)
-
- # if source provided, sync client
- source = @resource.value(:source)
- if source
- revision = @resource.value(:revision)
- sync_client(source, revision)
- end
-
- update_owner
- end
-
- def working_copy_exists?
- # Check if the server is there, or raise error
- p4(['info'], {:marshal => false})
-
- # Check if workspace is setup
- args = ['where']
- args.push(@resource.value(:path) + "...")
- hash = p4(args, {:raise => false})
-
- return (hash['code'] != "error")
- end
-
- def exists?
- working_copy_exists?
- end
-
- def destroy
- args = ['client']
- args.push('-d', '-f')
- args.push(client_name)
- p4(args)
- FileUtils.rm_rf(@resource.value(:path))
- end
-
- def latest?
- rev = self.revision
- if rev
- (rev >= self.latest)
- else
- true
- end
- end
-
- def latest
- args = ['changes']
- args.push('-m1', @resource.value(:source))
- hash = p4(args)
-
- return hash['change'].to_i
- end
-
- def revision
- args = ['cstat']
- args.push(@resource.value(:source))
- hash = p4(args, {:marshal => false})
- hash = marshal_cstat(hash)
-
- revision = 0
- if hash && hash['code'] != 'error'
- hash['data'].each do |c|
- if c['status'] == 'have'
- change = c['change'].to_i
- revision = change if change > revision
- end
- end
- end
- return revision
- end
-
- def revision=(desired)
- sync_client(@resource.value(:source), desired)
- update_owner
- end
-
- private
-
- def update_owner
- if @resource.value(:owner) or @resource.value(:group)
- set_ownership
- end
- end
-
- # Sync the client workspace files to head or specified revision.
- # Params:
- # +source+:: Depot path to sync
- # +revision+:: Perforce change list to sync to (optional)
- def sync_client(source, revision)
- Puppet.debug "Syncing: #{source}"
- args = ['sync']
- if revision
- args.push(source + "@#{revision}")
- else
- args.push(source)
- end
- p4(args)
- end
-
- # Returns the name of the Perforce client workspace
- def client_name
- p4config = @resource.value(:p4config)
-
- # default (generated) client name
- path = @resource.value(:path)
- host = Facter.value('hostname')
- default = "puppet-" + Digest::MD5.hexdigest(path + host)
-
- # check config for client name
- set_client = nil
- if p4config && File.file?(p4config)
- open(p4config) do |f|
- m = f.grep(/^P4CLIENT=/).pop
- p = /^P4CLIENT=(.*)$/
- set_client = p.match(m)[1] if m
- end
- end
-
- return set_client || ENV['P4CLIENT'] || default
- end
-
- # Create (or update) a client workspace spec.
- # If a client name is not provided then a hash based on the path is used.
- # Params:
- # +client+:: Name of client workspace
- # +path+:: The Root location of the Perforce client workspace
- def create_client(client)
- Puppet.debug "Creating client: #{client}"
-
- # fetch client spec
- hash = parse_client(client)
- hash['Root'] = @resource.value(:path)
- hash['Description'] = "Generated by Puppet VCSrepo"
-
- # check is source is a Stream
- source = @resource.value(:source)
- if source
- parts = source.split(/\//)
- if parts && parts.length >= 4
- source = "//" + parts[2] + "/" + parts[3]
- streams = p4(['streams', source], {:raise => false})
- if streams['code'] == "stat"
- hash['Stream'] = streams['Stream']
- notice "Streams" + streams['Stream'].inspect
- end
- end
- end
-
- # save client spec
- save_client(hash)
- end
-
-
- # Fetches a client workspace spec from Perforce and returns a hash map representation.
- # Params:
- # +client+:: name of the client workspace
- def parse_client(client)
- args = ['client']
- args.push('-o', client)
- hash = p4(args)
-
- return hash
- end
-
-
- # Saves the client workspace spec from the given hash
- # Params:
- # +hash+:: hash map of client spec
- def save_client(hash)
- spec = String.new
- view = "\nView:\n"
-
- hash.keys.sort.each do |k|
- v = hash[k]
- next if( k == "code" )
- if(k.to_s =~ /View/ )
- view += "\t#{v}\n"
- else
- spec += "#{k.to_s}: #{v.to_s}\n"
- end
- end
- spec += view
-
- args = ['client']
- args.push('-i')
- p4(args, {:input => spec, :marshal => false})
- end
-
- # Sets Perforce Configuration environment.
- # P4CLIENT generated, but overwitten if defined in config.
- def config
- p4config = @resource.value(:p4config)
-
- cfg = Hash.new
- cfg.store 'P4CONFIG', p4config if p4config
- cfg.store 'P4CLIENT', client_name
- return cfg
- end
-
- def p4(args, options = {})
- # Merge custom options with defaults
- opts = {
- :raise => true, # Raise errors
- :marshal => true, # Marshal output
- }.merge(options)
-
- cmd = ['p4']
- cmd.push '-R' if opts[:marshal]
- cmd.push args
- cmd_str = cmd.respond_to?(:join) ? cmd.join(' ') : cmd
-
- Puppet.debug "environment: #{config}"
- Puppet.debug "command: #{cmd_str}"
-
- hash = Hash.new
- Open3.popen3(config, cmd_str) do |i, o, e, t|
- # Send input stream if provided
- if(opts[:input])
- Puppet.debug "input:\n" + opts[:input]
- i.write opts[:input]
- i.close
- end
-
- if(opts[:marshal])
- hash = Marshal.load(o)
- else
- hash['data'] = o.read
- end
-
- # Raise errors, Perforce or Exec
- if(opts[:raise] && !e.eof && t.value != 0)
- raise Puppet::Error, "\nP4: #{e.read}"
- end
- if(opts[:raise] && hash['code'] == 'error' && t.value != 0)
- raise Puppet::Error, "\nP4: #{hash['data']}"
- end
- end
-
- Puppet.debug "hash: #{hash}\n"
- return hash
- end
-
- # helper method as cstat does not Marshal
- def marshal_cstat(hash)
- data = hash['data']
- code = 'error'
-
- list = Array.new
- change = Hash.new
- data.each_line do |l|
- p = /^\.\.\. (.*) (.*)$/
- m = p.match(l)
- if m
- change[m[1]] = m[2]
- if m[1] == 'status'
- code = 'stat'
- list.push change
- change = Hash.new
- end
- end
- end
-
- hash = Hash.new
- hash.store 'code', code
- hash.store 'data', list
- return hash
- end
-
-end
diff --git a/lib/puppet/provider/vcsrepo/svn.rb b/lib/puppet/provider/vcsrepo/svn.rb
deleted file mode 100644
index fccfaa5a..00000000
--- a/lib/puppet/provider/vcsrepo/svn.rb
+++ /dev/null
@@ -1,139 +0,0 @@
-require File.join(File.dirname(__FILE__), '..', 'vcsrepo')
-
-Puppet::Type.type(:vcsrepo).provide(:svn, :parent => Puppet::Provider::Vcsrepo) do
- desc "Supports Subversion repositories"
-
- commands :svn => 'svn',
- :svnadmin => 'svnadmin',
- :svnlook => 'svnlook'
-
- has_features :filesystem_types, :reference_tracking, :basic_auth, :configuration, :conflict, :depth
-
- def create
- if !@resource.value(:source)
- create_repository(@resource.value(:path))
- else
- checkout_repository(@resource.value(:source),
- @resource.value(:path),
- @resource.value(:revision),
- @resource.value(:depth))
- end
- update_owner
- end
-
- def working_copy_exists?
- if File.directory?(@resource.value(:path))
- # :path is an svn checkout
- return true if File.directory?(File.join(@resource.value(:path), '.svn'))
- if File.file?(File.join(@resource.value(:path), 'format'))
- # :path is an svn server
- return true if svnlook('uuid', @resource.value(:path))
- end
- end
- false
- end
-
- def exists?
- working_copy_exists?
- end
-
- def destroy
- FileUtils.rm_rf(@resource.value(:path))
- end
-
- def latest?
- at_path do
- (self.revision >= self.latest) and (@resource.value(:source) == self.sourceurl)
- end
- end
-
- def buildargs
- args = ['--non-interactive']
- if @resource.value(:basic_auth_username) && @resource.value(:basic_auth_password)
- args.push('--username', @resource.value(:basic_auth_username))
- args.push('--password', @resource.value(:basic_auth_password))
- args.push('--no-auth-cache')
- end
-
- if @resource.value(:force)
- args.push('--force')
- end
-
- if @resource.value(:configuration)
- args.push('--config-dir', @resource.value(:configuration))
- end
-
- if @resource.value(:trust_server_cert) != :false
- args.push('--trust-server-cert')
- end
-
- args
- end
-
- def latest
- args = buildargs.push('info', '-r', 'HEAD')
- at_path do
- svn(*args)[/^Revision:\s+(\d+)/m, 1]
- end
- end
-
- def sourceurl
- args = buildargs.push('info')
- at_path do
- svn(*args)[/^URL:\s+(\S+)/m, 1]
- end
- end
-
- def revision
- args = buildargs.push('info')
- at_path do
- svn(*args)[/^Revision:\s+(\d+)/m, 1]
- end
- end
-
- def revision=(desired)
- args = if @resource.value(:source)
- buildargs.push('switch', '-r', desired, @resource.value(:source))
- else
- buildargs.push('update', '-r', desired)
- end
-
- if @resource.value(:conflict)
- args.push('--accept', @resource.value(:conflict))
- end
-
- at_path do
- svn(*args)
- end
- update_owner
- end
-
- private
-
- def checkout_repository(source, path, revision, depth)
- args = buildargs.push('checkout')
- if revision
- args.push('-r', revision)
- end
- if depth
- args.push('--depth', depth)
- end
- args.push(source, path)
- svn(*args)
- end
-
- def create_repository(path)
- args = ['create']
- if @resource.value(:fstype)
- args.push('--fs-type', @resource.value(:fstype))
- end
- args << path
- svnadmin(*args)
- end
-
- def update_owner
- if @resource.value(:owner) or @resource.value(:group)
- set_ownership
- end
- end
-end
diff --git a/lib/puppet/type/vcsrepo.rb b/lib/puppet/type/vcsrepo.rb
deleted file mode 100644
index e2ef0b7e..00000000
--- a/lib/puppet/type/vcsrepo.rb
+++ /dev/null
@@ -1,248 +0,0 @@
-require 'pathname'
-
-Puppet::Type.newtype(:vcsrepo) do
- desc "A local version control repository"
-
- feature :gzip_compression,
- "The provider supports explicit GZip compression levels"
- feature :basic_auth,
- "The provider supports HTTP Basic Authentication"
- feature :bare_repositories,
- "The provider differentiates between bare repositories
- and those with working copies",
- :methods => [:bare_exists?, :working_copy_exists?]
-
- feature :filesystem_types,
- "The provider supports different filesystem types"
-
- feature :reference_tracking,
- "The provider supports tracking revision references that can change
- over time (eg, some VCS tags and branch names)"
-
- feature :ssh_identity,
- "The provider supports a configurable SSH identity file"
-
- feature :user,
- "The provider can run as a different user"
-
- feature :modules,
- "The repository contains modules that can be chosen of"
-
- feature :multiple_remotes,
- "The repository tracks multiple remote repositories"
-
- feature :configuration,
- "The configuration directory to use"
-
- feature :cvs_rsh,
- "The provider understands the CVS_RSH environment variable"
-
- feature :depth,
- "The provider can do shallow clones or set scope limit"
-
- feature :branch,
- "The name of the branch"
-
- feature :p4config,
- "The provider understands Perforce Configuration"
-
- feature :submodules,
- "The repository contains submodules which can be optionally initialized"
-
- feature :conflict,
- "The provider supports automatic conflict resolution"
-
- ensurable do
- attr_accessor :latest
-
- def insync?(is)
- @should ||= []
-
- case should
- when :present
- return true unless [:absent, :purged, :held].include?(is)
- when :latest
- if is == :latest
- return true
- else
- return false
- end
- when :bare
- return is == :bare
- when :mirror
- return is == :mirror
- end
- end
-
- newvalue :present do
- notice "Creating repository from present"
- provider.create
- end
-
- newvalue :bare, :required_features => [:bare_repositories] do
- if !provider.exists?
- provider.create
- end
- end
-
- newvalue :mirror, :required_features => [:bare_repositories] do
- if !provider.exists?
- provider.create
- end
- end
-
- newvalue :absent do
- provider.destroy
- end
-
- newvalue :latest, :required_features => [:reference_tracking] do
- if provider.exists? && !@resource.value(:force)
- if provider.respond_to?(:update_references)
- provider.update_references
- end
- if provider.respond_to?(:latest?)
- reference = provider.latest || provider.revision
- else
- reference = resource.value(:revision) || provider.revision
- end
- notice "Updating to latest '#{reference}' revision"
- provider.revision = reference
- else
- notice "Creating repository from latest"
- provider.create
- end
- end
-
- def retrieve
- prov = @resource.provider
- if prov
- if prov.working_copy_exists?
- (@should.include?(:latest) && prov.latest?) ? :latest : :present
- elsif prov.class.feature?(:bare_repositories) and prov.bare_exists?
- :bare
- else
- :absent
- end
- else
- raise Puppet::Error, "Could not find provider"
- end
- end
-
- end
-
- newparam :path do
- desc "Absolute path to repository"
- isnamevar
- validate do |value|
- path = Pathname.new(value)
- unless path.absolute?
- raise ArgumentError, "Path must be absolute: #{path}"
- end
- end
- end
-
- newparam :source do
- desc "The source URI for the repository"
- end
-
- newparam :fstype, :required_features => [:filesystem_types] do
- desc "Filesystem type"
- end
-
- newproperty :revision do
- desc "The revision of the repository"
- newvalue(/^\S+$/)
- end
-
- newparam :owner do
- desc "The user/uid that owns the repository files"
- end
-
- newparam :group do
- desc "The group/gid that owns the repository files"
- end
-
- newparam :user do
- desc "The user to run for repository operations"
- end
-
- newparam :excludes do
- desc "Files to be excluded from the repository"
- end
-
- newparam :force do
- desc "Force repository creation, destroying any files on the path in the process."
- newvalues(:true, :false)
- defaultto false
- end
-
- newparam :compression, :required_features => [:gzip_compression] do
- desc "Compression level"
- validate do |amount|
- unless Integer(amount).between?(0, 6)
- raise ArgumentError, "Unsupported compression level: #{amount} (expected 0-6)"
- end
- end
- end
-
- newparam :basic_auth_username, :required_features => [:basic_auth] do
- desc "HTTP Basic Auth username"
- end
-
- newparam :basic_auth_password, :required_features => [:basic_auth] do
- desc "HTTP Basic Auth password"
- end
-
- newparam :identity, :required_features => [:ssh_identity] do
- desc "SSH identity file"
- end
-
- newparam :module, :required_features => [:modules] do
- desc "The repository module to manage"
- end
-
- newparam :remote, :required_features => [:multiple_remotes] do
- desc "The remote repository to track"
- defaultto "origin"
- end
-
- newparam :configuration, :required_features => [:configuration] do
- desc "The configuration directory to use"
- end
-
- newparam :cvs_rsh, :required_features => [:cvs_rsh] do
- desc "The value to be used for the CVS_RSH environment variable."
- end
-
- newparam :depth, :required_features => [:depth] do
- desc "The value to be used to do a shallow clone."
- end
-
- newparam :branch, :required_features => [:branch] do
- desc "The name of the branch to clone."
- end
-
- newparam :p4config, :required_features => [:p4config] do
- desc "The Perforce P4CONFIG environment."
- end
-
- newparam :submodules, :required_features => [:submodules] do
- desc "Initialize and update each submodule in the repository."
- newvalues(:true, :false)
- defaultto true
- end
-
- newparam :conflict do
- desc "The action to take if conflicts exist between repository and working copy"
- end
-
- newparam :trust_server_cert do
- desc "Trust server certificate"
- newvalues(:true, :false)
- defaultto :false
- end
-
- autorequire(:package) do
- ['git', 'git-core', 'mercurial']
- end
-end