diff --git a/acceptance/tests/modules/install/with_modulepath.rb b/acceptance/tests/modules/install/with_modulepath.rb new file mode 100644 index 000000000..1637ea5a8 --- /dev/null +++ b/acceptance/tests/modules/install/with_modulepath.rb @@ -0,0 +1,37 @@ +begin test_name "puppet module install (with modulepath)" + +step 'Setup' +require 'resolv'; ip = Resolv.getaddress('forge-dev.puppetlabs.lan') +apply_manifest_on master, "host { 'forge.puppetlabs.com': ip => '#{ip}' }" +apply_manifest_on master, "file { ['/etc/puppet/modules2']: ensure => directory, recurse => true, purge => true, force => true }" + +step "Install a module with relative modulepath" +on master, "cd /etc/puppet/modules2 && puppet module install pmtacceptance-nginx --modulepath=." do + assert_output <<-OUTPUT + Preparing to install into /etc/puppet/modules2 ... + Downloading from http://forge.puppetlabs.com ... + Installing -- do not interrupt ... + /etc/puppet/modules2 + └── pmtacceptance-nginx (\e[0;36mv0.0.1\e[0m) + OUTPUT +end +on master, '[ -d /etc/puppet/modules2/nginx ]' +apply_manifest_on master, "file { ['/etc/puppet/modules2']: ensure => directory, recurse => true, purge => true, force => true }" + +step "Install a module with absolute modulepath" +on master, puppet('module install pmtacceptance-nginx --modulepath=/etc/puppet/modules2') do + assert_output <<-OUTPUT + Preparing to install into /etc/puppet/modules2 ... + Downloading from http://forge.puppetlabs.com ... + Installing -- do not interrupt ... + /etc/puppet/modules2 + └── pmtacceptance-nginx (\e[0;36mv0.0.1\e[0m) + OUTPUT +end +on master, '[ -d /etc/puppet/modules2/nginx ]' +apply_manifest_on master, "file { ['/etc/puppet/modules2']: ensure => directory, recurse => true, purge => true, force => true }" + +ensure step "Teardown" +apply_manifest_on master, "host { 'forge.puppetlabs.com': ensure => absent }" +apply_manifest_on master, "file { ['/etc/puppet/modules2']: ensure => directory, recurse => true, purge => true, force => true }" +end diff --git a/acceptance/tests/modules/list/with_modulepath.rb b/acceptance/tests/modules/list/with_modulepath.rb new file mode 100644 index 000000000..02726db19 --- /dev/null +++ b/acceptance/tests/modules/list/with_modulepath.rb @@ -0,0 +1,76 @@ +begin test_name "puppet module list (with modulepath)" + +step "Setup" +apply_manifest_on master, <<-PP +file { + [ + '/etc/puppet/modules2', + '/etc/puppet/modules2/crakorn', + '/etc/puppet/modules2/appleseed', + '/etc/puppet/modules2/thelock', + ]: ensure => directory, + recurse => true, + purge => true, + force => true; + '/etc/puppet/modules2/crakorn/metadata.json': + content => '{ + "name": "jimmy/crakorn", + "version": "0.4.0", + "source": "", + "author": "jimmy", + "license": "MIT", + "dependencies": [] + }'; + '/etc/puppet/modules2/appleseed/metadata.json': + content => '{ + "name": "jimmy/appleseed", + "version": "1.1.0", + "source": "", + "author": "jimmy", + "license": "MIT", + "dependencies": [ + { "name": "jimmy/crakorn", "version_requirement": "0.4.0" } + ] + }'; + '/etc/puppet/modules2/thelock/metadata.json': + content => '{ + "name": "jimmy/thelock", + "version": "1.0.0", + "source": "", + "author": "jimmy", + "license": "MIT", + "dependencies": [ + { "name": "jimmy/appleseed", "version_requirement": "1.x" } + ] + }'; +} +PP +on master, '[ -d /etc/puppet/modules2/crakorn ]' +on master, '[ -d /etc/puppet/modules2/appleseed ]' +on master, '[ -d /etc/puppet/modules2/thelock ]' + +step "List the installed modules with relative modulepath" +on master, 'cd /etc/puppet/modules2 && puppet module list --modulepath=.' do + assert_equal '', stderr + assert_equal <<-STDOUT, stdout +/etc/puppet/modules2 +├── jimmy-appleseed (\e[0;36mv1.1.0\e[0m) +├── jimmy-crakorn (\e[0;36mv0.4.0\e[0m) +└── jimmy-thelock (\e[0;36mv1.0.0\e[0m) +STDOUT +end + +step "List the installed modules with absolute modulepath" +on master, puppet('module list --modulepath=/etc/puppet/modules2') do + assert_equal '', stderr + assert_equal <<-STDOUT, stdout +/etc/puppet/modules2 +├── jimmy-appleseed (\e[0;36mv1.1.0\e[0m) +├── jimmy-crakorn (\e[0;36mv0.4.0\e[0m) +└── jimmy-thelock (\e[0;36mv1.0.0\e[0m) +STDOUT +end + +ensure step "Teardown" +apply_manifest_on master, "file { ['/etc/puppet/modules2']: ensure => directory, recurse => true, purge => true, force => true }" +end diff --git a/acceptance/tests/modules/uninstall/with_modulepath.rb b/acceptance/tests/modules/uninstall/with_modulepath.rb new file mode 100644 index 000000000..e26ba1c62 --- /dev/null +++ b/acceptance/tests/modules/uninstall/with_modulepath.rb @@ -0,0 +1,54 @@ +begin test_name "puppet module uninstall (with modulepath)" + +step "Setup" +apply_manifest_on master, <<-PP +file { + [ + '/etc/puppet/modules2', + '/etc/puppet/modules2/crakorn', + '/etc/puppet/modules2/absolute', + ]: ensure => directory; + '/etc/puppet/modules2/crakorn/metadata.json': + content => '{ + "name": "jimmy/crakorn", + "version": "0.4.0", + "source": "", + "author": "jimmy", + "license": "MIT", + "dependencies": [] + }'; + '/etc/puppet/modules2/absolute/metadata.json': + content => '{ + "name": "jimmy/absolute", + "version": "0.4.0", + "source": "", + "author": "jimmy", + "license": "MIT", + "dependencies": [] + }'; +} +PP +on master, '[ -d /etc/puppet/modules2/crakorn ]' +on master, '[ -d /etc/puppet/modules2/absolute ]' + +step "Try to uninstall the module jimmy-crakorn using relative modulepath" +on master, 'cd /etc/puppet/modules2 && puppet module uninstall jimmy-crakorn --modulepath=.' do + assert_output <<-OUTPUT + Preparing to uninstall 'jimmy-crakorn' ... + Removed 'jimmy-crakorn' (\e[0;36mv0.4.0\e[0m) from /etc/puppet/modules2 + OUTPUT +end +on master, '[ ! -d /etc/puppet/modules2/crakorn ]' + +step "Try to uninstall the module jimmy-absolute using an absolute modulepath" +on master, 'cd /etc/puppet/modules2 && puppet module uninstall jimmy-absolute --modulepath=/etc/puppet/modules2' do + assert_output <<-OUTPUT + Preparing to uninstall 'jimmy-absolute' ... + Removed 'jimmy-absolute' (\e[0;36mv0.4.0\e[0m) from /etc/puppet/modules2 + OUTPUT +end +on master, '[ ! -d /etc/puppet/modules2/absolute ]' + +ensure step "Teardown" +apply_manifest_on master, "file { ['/etc/puppet/modules2']: ensure => directory, recurse => true, purge => true, force => true }" +end diff --git a/acceptance/tests/resource/scheduled_task/should_create.rb b/acceptance/tests/resource/scheduled_task/should_create.rb new file mode 100644 index 000000000..507273964 --- /dev/null +++ b/acceptance/tests/resource/scheduled_task/should_create.rb @@ -0,0 +1,33 @@ +test_name "should create a scheduled task" + +name = "pl#{rand(999999).to_i}" +confine :to, :platform => 'windows' + +agents.each do |agent| + # query only supports /tn parameter on Vista and later + query_cmd = "schtasks.exe /query /v /fo list /tn #{name}" + on agents, facter('kernelmajversion') do + query_cmd = "schtasks.exe /query /v /fo list | grep -q #{name}" if stdout.chomp.to_f < 6.0 + end + + step "create the task" + args = ['ensure=present', + 'command=c:\\\\windows\\\\system32\\\\notepad.exe', + 'arguments="foo bar baz"', + 'working_dir=c:\\\\windows'] + on agent, puppet_resource('scheduled_task', name, args) + + step "verify the task exists" + on agent, query_cmd + + step "verify task properties" + on agent, puppet_resource('scheduled_task', name) do + assert_match(/command\s*=>\s*'c:\\windows\\system32\\notepad.exe'/, stdout) + assert_match(/arguments\s*=>\s*'foo bar baz'/, stdout) + assert_match(/enabled\s*=>\s*'true'/, stdout) + assert_match(/working_dir\s*=>\s*'c:\\windows'/, stdout) + end + + step "delete the task" + on agent, "schtasks.exe /delete /tn #{name} /f" +end diff --git a/acceptance/tests/resource/scheduled_task/should_destroy.rb b/acceptance/tests/resource/scheduled_task/should_destroy.rb new file mode 100644 index 000000000..e61caa45b --- /dev/null +++ b/acceptance/tests/resource/scheduled_task/should_destroy.rb @@ -0,0 +1,27 @@ +test_name "should delete a scheduled task" + +name = "pl#{rand(999999).to_i}" +confine :to, :platform => 'windows' + +agents.each do |agent| + # Have to use /v1 parameter for Vista and later, older versions + # don't accept the parameter + version = '/v1' + # query only supports /tn parameter on Vista and later + query_cmd = "schtasks.exe /query /v /fo list /tn #{name}" + on agents, facter('kernelmajversion') do + if stdout.chomp.to_f < 6.0 + version = '' + query_cmd = "schtasks.exe /query /v /fo list | grep -vq #{name}" + end + end + + step "create the task" + on agent, "schtasks.exe /create #{version} /tn #{name} /tr c:\\\\windows\\\\system32\\\\notepad.exe /sc daily /ru system" + + step "delete the task" + on agent, puppet_resource('scheduled_task', name, 'ensure=absent') + + step "verify the task was deleted" + on agent, query_cmd +end diff --git a/acceptance/tests/resource/scheduled_task/should_modify.rb b/acceptance/tests/resource/scheduled_task/should_modify.rb new file mode 100644 index 000000000..35d9d3ef7 --- /dev/null +++ b/acceptance/tests/resource/scheduled_task/should_modify.rb @@ -0,0 +1,28 @@ +test_name "should modify a scheduled task" + +name = "pl#{rand(999999).to_i}" +confine :to, :platform => 'windows' + +agents.each do |agent| + # Have to use /v1 parameter for Vista and later, older versions + # don't accept the parameter + version = '/v1' + on agents, facter('kernelmajversion') do + version = '' if stdout.chomp.to_f < 6.0 + end + + step "create the task" + on agent, "schtasks.exe /create #{version} /tn #{name} /tr c:\\\\windows\\\\system32\\\\notepad.exe /sc daily /ru system" + + step "modify the task" + on agent, puppet_resource('scheduled_task', name, ['ensure=present', 'command=c:\\\\windows\\\\system32\\\\notepad2.exe', "arguments=args-#{name}"]) + + step "verify the arguments were updated" + on agent, puppet_resource('scheduled_task', name) do + assert_match(/command\s*=>\s*'c:\\windows\\system32\\notepad2.exe'/, stdout) + assert_match(/arguments\s*=>\s*'args-#{name}'/, stdout) + end + + step "delete the task" + on agent, "schtasks.exe /delete /tn #{name} /f" +end diff --git a/acceptance/tests/resource/scheduled_task/should_query.rb b/acceptance/tests/resource/scheduled_task/should_query.rb new file mode 100644 index 000000000..a28054a6a --- /dev/null +++ b/acceptance/tests/resource/scheduled_task/should_query.rb @@ -0,0 +1,24 @@ +test_name "test that we can query and find a scheduled task that exists." + +name = "pl#{rand(999999).to_i}" +confine :to, :platform => 'windows' + +agents.each do |agent| + # Have to use /v1 parameter for Vista and later, older versions + # don't accept the parameter + version = '/v1' + on agents, facter('kernelmajversion') do + version = '' if stdout.chomp.to_f < 6.0 + end + + step "create the task" + on agent, "schtasks.exe /create #{version} /tn #{name} /tr c:\\\\windows\\\\system32\\\\notepad.exe /sc daily /ru system" + + step "query for the task and verify it was found" + on agent, puppet_resource('scheduled_task', name) do + fail_test "didn't find the scheduled_task #{name}" unless stdout.include? 'present' + end + + step "delete the task" + on agent, "schtasks.exe /delete /tn #{name} /f" +end diff --git a/acceptance/tests/ticket_13489_service_refresh.pp b/acceptance/tests/ticket_13489_service_refresh.pp new file mode 100644 index 000000000..ba3e48600 --- /dev/null +++ b/acceptance/tests/ticket_13489_service_refresh.pp @@ -0,0 +1,20 @@ +test_name "#13489: refresh service" + +confine :to, :platform => 'windows' + +manifest = < 'running', +} + +exec { 'hello': + command => "cmd /c echo hello", + path => $::path, + logoutput => true, +} + +Exec['hello'] ~> Service['BITS'] +MANIFEST + +step "Refresh service" +apply_manifest_on(agents, manifest) diff --git a/conf/osx/createpackage.sh b/conf/osx/createpackage.sh index 0c153f25a..0f63ec8fa 100755 --- a/conf/osx/createpackage.sh +++ b/conf/osx/createpackage.sh @@ -1,185 +1,187 @@ #!/bin/bash # # Script to build an "old style" not flat pkg out of the puppet repository. # # Author: Nigel Kersten (nigelk@google.com) # # Last Updated: 2008-07-31 # # Copyright 2008 Google Inc. # # Licensed under the Apache License, Version 2.0 (the "License"); # you may not use this file except in compliance with the License. # You may obtain a copy of the License at # # http://www.apache.org/licenses/LICENSE-2.0 # # Unless required by applicable law or agreed to in writing, software # distributed under the License is distributed on an "AS IS" BASIS, # WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. # See the License for the specific language governing permissions and # limitations under the License INSTALLRB="install.rb" BINDIR="/usr/bin" SBINDIR="/usr/sbin" SITELIBDIR="/usr/lib/ruby/site_ruby/1.8" PACKAGEMAKER="/Developer/usr/bin/packagemaker" PROTO_PLIST="PackageInfo.plist" PREFLIGHT="preflight" function find_installer() { # we walk up three directories to make this executable from the root, # root/conf or root/conf/osx if [ -f "./${INSTALLRB}" ]; then installer="$(pwd)/${INSTALLRB}" elif [ -f "../${INSTALLRB}" ]; then installer="$(pwd)/../${INSTALLRB}" elif [ -f "../../${INSTALLRB}" ]; then installer="$(pwd)/../../${INSTALLRB}" else installer="" fi } function find_puppet_root() { puppet_root=$(dirname "${installer}") } function install_puppet() { echo "Installing Puppet to ${pkgroot}" "${installer}" --destdir="${pkgroot}" --bindir="${BINDIR}" --sbindir="${SBINDIR}" --sitelibdir="${SITELIBDIR}" mkdir -p ${pkgroot}/var/lib/puppet chown -R root:admin "${pkgroot}" + chmod -R go-w "${pkgroot}" } function install_docs() { echo "Installing docs to ${pkgroot}" docdir="${pkgroot}/usr/share/doc/puppet" mkdir -p "${docdir}" for docfile in CHANGELOG CHANGELOG.old COPYING LICENSE README README.queueing README.rst; do install -m 0644 "${puppet_root}/${docfile}" "${docdir}" done chown -R root:wheel "${docdir}" chmod 0755 "${docdir}" } function get_puppet_version() { puppet_version=$(RUBYLIB="${pkgroot}/${SITELIBDIR}:${RUBYLIB}" ruby -e "require 'puppet'; puts Puppet.version") } function prepare_package() { # As we can't specify to follow symlinks from the command line, we have # to go through the hassle of creating an Info.plist file for packagemaker # to look at for package creation and substitue the version strings out. # Major/Minor versions can only be integers, so we have "0" and "245" for # puppet version 0.24.5 # Note too that for 10.5 compatibility this Info.plist *must* be set to # follow symlinks. VER1=$(echo ${puppet_version} | awk -F "." '{print $1}') VER2=$(echo ${puppet_version} | awk -F "." '{print $2}') VER3=$(echo ${puppet_version} | awk -F "." '{print $3}') major_version="${VER1}" minor_version="${VER2}${VER3}" cp "${puppet_root}/conf/osx/${PROTO_PLIST}" "${pkgtemp}" sed -i '' "s/{SHORTVERSION}/${puppet_version}/g" "${pkgtemp}/${PROTO_PLIST}" sed -i '' "s/{MAJORVERSION}/${major_version}/g" "${pkgtemp}/${PROTO_PLIST}" sed -i '' "s/{MINORVERSION}/${minor_version}/g" "${pkgtemp}/${PROTO_PLIST}" # We need to create a preflight script to remove traces of previous # puppet installs due to limitations in Apple's pkg format. mkdir "${pkgtemp}/scripts" cp "${puppet_root}/conf/osx/${PREFLIGHT}" "${pkgtemp}/scripts" # substitute in the sitelibdir specified above on the assumption that this # is where any previous puppet install exists that should be cleaned out. sed -i '' "s|{SITELIBDIR}|${SITELIBDIR}|g" "${pkgtemp}/scripts/${PREFLIGHT}" # substitute in the bindir sepcified on the assumption that this is where # any old executables that have moved from bindir->sbindir should be # cleaned out from. sed -i '' "s|{BINDIR}|${BINDIR}|g" "${pkgtemp}/scripts/${PREFLIGHT}" chmod 0755 "${pkgtemp}/scripts/${PREFLIGHT}" } function create_package() { rm -fr "$(pwd)/puppet-${puppet_version}.pkg" echo "Building package" echo "Note that packagemaker is reknowned for spurious errors. Don't panic." - "${PACKAGEMAKER}" --root "${pkgroot}" \ + "${PACKAGEMAKER}" --verbose --no-recommend --no-relocate \ + --root "${pkgroot}" \ --info "${pkgtemp}/${PROTO_PLIST}" \ --scripts ${pkgtemp}/scripts \ --out "$(pwd)/puppet-${puppet_version}.pkg" if [ $? -ne 0 ]; then echo "There was a problem building the package." cleanup_and_exit 1 exit 1 else echo "The package has been built at:" echo "$(pwd)/puppet-${puppet_version}.pkg" fi } function cleanup_and_exit() { if [ -d "${pkgroot}" ]; then rm -fr "${pkgroot}" fi if [ -d "${pkgtemp}" ]; then rm -fr "${pkgtemp}" fi exit $1 } # Program entry point function main() { if [ $(whoami) != "root" ]; then echo "This script needs to be run as root via su or sudo." cleanup_and_exit 1 fi find_installer if [ ! "${installer}" ]; then echo "Unable to find ${INSTALLRB}" cleanup_and_exit 1 fi find_puppet_root if [ ! "${puppet_root}" ]; then echo "Unable to find puppet repository root." cleanup_and_exit 1 fi pkgroot=$(mktemp -d -t puppetpkg) if [ ! "${pkgroot}" ]; then echo "Unable to create temporary package root." cleanup_and_exit 1 fi pkgtemp=$(mktemp -d -t puppettmp) if [ ! "${pkgtemp}" ]; then echo "Unable to create temporary package root." cleanup_and_exit 1 fi install_puppet install_docs get_puppet_version if [ ! "${puppet_version}" ]; then echo "Unable to retrieve puppet version" cleanup_and_exit 1 fi prepare_package create_package cleanup_and_exit 0 } main "$@" diff --git a/lib/puppet/application/agent.rb b/lib/puppet/application/agent.rb index 5a7a3114e..a0aef3bda 100644 --- a/lib/puppet/application/agent.rb +++ b/lib/puppet/application/agent.rb @@ -1,469 +1,471 @@ require 'puppet/application' class Puppet::Application::Agent < Puppet::Application run_mode :agent attr_accessor :args, :agent, :daemon, :host def app_defaults super.merge({ :catalog_terminus => :rest, :node_terminus => :rest, :facts_terminus => :facter, }) end def preinit # Do an initial trap, so that cancels don't get a stack trace. Signal.trap(:INT) do $stderr.puts "Cancelling startup" exit(0) end { :waitforcert => nil, :detailed_exitcodes => false, :verbose => false, :debug => false, :centrallogs => false, :setdest => false, :enable => false, :disable => false, :client => true, :fqdn => nil, :serve => [], :digest => :MD5, :graph => true, :fingerprint => false, }.each do |opt,val| options[opt] = val end @args = {} require 'puppet/daemon' @daemon = Puppet::Daemon.new @daemon.argv = ARGV.dup end option("--centrallogging") option("--disable [MESSAGE]") do |message| options[:disable] = true options[:disable_message] = message end option("--enable") option("--debug","-d") option("--fqdn FQDN","-f") option("--test","-t") option("--verbose","-v") option("--fingerprint") option("--digest DIGEST") option("--no-client") do |arg| options[:client] = false end option("--detailed-exitcodes") do |arg| options[:detailed_exitcodes] = true end option("--logdest DEST", "-l DEST") do |arg| begin Puppet::Util::Log.newdestination(arg) options[:setdest] = true rescue => detail Puppet.log_exception(detail) end end option("--waitforcert WAITFORCERT", "-w") do |arg| options[:waitforcert] = arg.to_i end option("--port PORT","-p") do |arg| @args[:Port] = arg end def help <<-HELP puppet-agent(8) -- The puppet agent daemon ======== SYNOPSIS -------- Retrieves the client configuration from the puppet master and applies it to the local host. This service may be run as a daemon, run periodically using cron (or something similar), or run interactively for testing purposes. USAGE ----- puppet agent [--certname ] [-D|--daemonize|--no-daemonize] [-d|--debug] [--detailed-exitcodes] [--digest ] [--disable [message]] [--enable] [--fingerprint] [-h|--help] [-l|--logdest syslog||console] [--no-client] [--noop] [-o|--onetime] [-t|--test] [-v|--verbose] [-V|--version] [-w|--waitforcert ] DESCRIPTION ----------- This is the main puppet client. Its job is to retrieve the local machine's configuration from a remote server and apply it. In order to successfully communicate with the remote server, the client must have a certificate signed by a certificate authority that the server trusts; the recommended method for this, at the moment, is to run a certificate authority as part of the puppet server (which is the default). The client will connect and request a signed certificate, and will continue connecting until it receives one. Once the client has a signed certificate, it will retrieve its configuration and apply it. USAGE NOTES ----------- 'puppet agent' does its best to find a compromise between interactive use and daemon use. Run with no arguments and no configuration, it will go into the background, attempt to get a signed certificate, and retrieve and apply its configuration every 30 minutes. Some flags are meant specifically for interactive use -- in particular, 'test', 'tags' or 'fingerprint' are useful. 'test' enables verbose logging, causes the daemon to stay in the foreground, exits if the server's configuration is invalid (this happens if, for instance, you've left a syntax error on the server), and exits after running the configuration once (rather than hanging around as a long-running process). 'tags' allows you to specify what portions of a configuration you want to apply. Puppet elements are tagged with all of the class or definition names that contain them, and you can use the 'tags' flag to specify one of these names, causing only configuration elements contained within that class or definition to be applied. This is very useful when you are testing new configurations -- for instance, if you are just starting to manage 'ntpd', you would put all of the new elements into an 'ntpd' class, and call puppet with '--tags ntpd', which would only apply that small portion of the configuration during your testing, rather than applying the whole thing. 'fingerprint' is a one-time flag. In this mode 'puppet agent' will run once and display on the console (and in the log) the current certificate (or certificate request) fingerprint. Providing the '--digest' option allows to use a different digest algorithm to generate the fingerprint. The main use is to verify that before signing a certificate request on the master, the certificate request the master received is the same as the one the client sent (to prevent against man-in-the-middle attacks when signing certificates). OPTIONS ------- Note that any configuration parameter that's valid in the configuration file is also a valid long argument. For example, 'server' is a valid configuration parameter, so you can specify '--server ' as an argument. See the configuration file documentation at http://docs.puppetlabs.com/references/stable/configuration.html for the full list of acceptable parameters. A commented list of all configuration options can also be generated by running puppet agent with '--genconfig'. * --certname: Set the certname (unique ID) of the client. The master reads this unique identifying string, which is usually set to the node's fully-qualified domain name, to determine which configurations the node will receive. Use this option to debug setup problems or implement unusual node identification schemes. * --daemonize: Send the process into the background. This is the default. * --no-daemonize: Do not send the process into the background. * --debug: Enable full debugging. * --detailed-exitcodes: Provide transaction information via exit codes. If this is enabled, an exit code of '2' means there were changes, an exit code of '4' means there were failures during the transaction, and an exit code of '6' means there were both changes and failures. * --digest: Change the certificate fingerprinting digest algorithm. The default is MD5. Valid values depends on the version of OpenSSL installed, but should always at least contain MD5, MD2, SHA1 and SHA256. * --disable: Disable working on the local system. This puts a lock file in place, causing 'puppet agent' not to work on the system until the lock file is removed. This is useful if you are testing a configuration and do not want the central configuration to override the local state until everything is tested and committed. Disable can also take an optional message that will be reported by the 'puppet agent' at the next disabled run. 'puppet agent' uses the same lock file while it is running, so no more than one 'puppet agent' process is working at a time. 'puppet agent' exits after executing this. * --enable: Enable working on the local system. This removes any lock file, causing 'puppet agent' to start managing the local system again (although it will continue to use its normal scheduling, so it might not start for another half hour). 'puppet agent' exits after executing this. * --fingerprint: Display the current certificate or certificate signing request fingerprint and then exit. Use the '--digest' option to change the digest algorithm used. * --help: Print this help message * --logdest: Where to send messages. Choose between syslog, the console, and a log file. Defaults to sending messages to syslog, or the console if debugging or verbosity is enabled. * --no-client: Do not create a config client. This will cause the daemon to run without ever checking for its configuration automatically, and only makes sense when puppet agent is being run with listen = true in puppet.conf or was started with the `--listen` option. * --noop: Use 'noop' mode where the daemon runs in a no-op or dry-run mode. This is useful for seeing what changes Puppet will make without actually executing the changes. * --onetime: Run the configuration once. Runs a single (normally daemonized) Puppet run. Useful for interactively running puppet agent when used in conjunction with the --no-daemonize option. * --test: Enable the most common options used for testing. These are 'onetime', 'verbose', 'ignorecache', 'no-daemonize', 'no-usecacheonfailure', 'detailed-exit-codes', 'no-splay', and 'show_diff'. * --verbose: Turn on verbose reporting. * --version: Print the puppet version number and exit. * --waitforcert: This option only matters for daemons that do not yet have certificates and it is enabled by default, with a value of 120 (seconds). This causes 'puppet agent' to connect to the server every 2 minutes and ask it to sign a certificate request. This is useful for the initial setup of a puppet client. You can turn off waiting for certificates by specifying a time of 0. EXAMPLE ------- $ puppet agent --server puppet.domain.com DIAGNOSTICS ----------- Puppet agent accepts the following signals: * SIGHUP: Restart the puppet agent daemon. * SIGINT and SIGTERM: Shut down the puppet agent daemon. * SIGUSR1: Immediately retrieve and apply configurations from the puppet master. +* SIGUSR2: + Close file descriptors for log files and reopen them. Used with logrotate. AUTHOR ------ Luke Kanies COPYRIGHT --------- Copyright (c) 2011 Puppet Labs, LLC Licensed under the Apache 2.0 License HELP end def run_command return fingerprint if options[:fingerprint] return onetime if Puppet[:onetime] main end def fingerprint unless cert = host.certificate || host.certificate_request $stderr.puts "Fingerprint asked but no certificate nor certificate request have yet been issued" exit(1) return end unless fingerprint = cert.fingerprint(options[:digest]) raise ArgumentError, "Could not get fingerprint for digest '#{options[:digest]}'" end puts fingerprint end def onetime unless options[:client] $stderr.puts "onetime is specified but there is no client" exit(43) return end @daemon.set_signal_traps begin @agent.should_fork = false exitstatus = @agent.run rescue => detail Puppet.log_exception(detail) end @daemon.stop(:exit => false) if not exitstatus exit(1) elsif options[:detailed_exitcodes] then exit(exitstatus) else exit(0) end end def main Puppet.notice "Starting Puppet client version #{Puppet.version}" @daemon.start end # Enable all of the most common test options. def setup_test Puppet.settings.handlearg("--ignorecache") Puppet.settings.handlearg("--no-usecacheonfailure") Puppet.settings.handlearg("--no-splay") Puppet.settings.handlearg("--show_diff") Puppet.settings.handlearg("--no-daemonize") options[:verbose] = true Puppet[:onetime] = true options[:detailed_exitcodes] = true end def enable_disable_client(agent) if options[:enable] agent.enable elsif options[:disable] agent.disable(options[:disable_message] || 'reason not specified') end exit(0) end def setup_listen unless FileTest.exists?(Puppet[:rest_authconfig]) Puppet.err "Will not start without authorization file #{Puppet[:rest_authconfig]}" exit(14) end require 'puppet/network/server' # No REST handlers yet. server = Puppet::Network::Server.new(:port => Puppet[:puppetport]) @daemon.server = server end def setup_host @host = Puppet::SSL::Host.new waitforcert = options[:waitforcert] || (Puppet[:onetime] ? 0 : Puppet[:waitforcert]) cert = @host.wait_for_cert(waitforcert) unless options[:fingerprint] end def setup_agent # We need tomake the client either way, we just don't start it # if --no-client is set. require 'puppet/agent' require 'puppet/configurer' @agent = Puppet::Agent.new(Puppet::Configurer) enable_disable_client(@agent) if options[:enable] or options[:disable] @daemon.agent = agent if options[:client] # It'd be nice to daemonize later, but we have to daemonize before the # waitforcert happens. @daemon.daemonize if Puppet[:daemonize] setup_host @objects = [] # This has to go after the certs are dealt with. if Puppet[:listen] unless Puppet[:onetime] setup_listen else Puppet.notice "Ignoring --listen on onetime run" end end end def setup setup_test if options[:test] setup_logs exit(Puppet.settings.print_configs ? 0 : 1) if Puppet.settings.print_configs? args[:Server] = Puppet[:server] if options[:fqdn] args[:FQDN] = options[:fqdn] Puppet[:certname] = options[:fqdn] end if options[:centrallogs] logdest = args[:Server] logdest += ":" + args[:Port] if args.include?(:Port) Puppet::Util::Log.newdestination(logdest) end Puppet.settings.use :main, :agent, :ssl # Always ignoreimport for agent. It really shouldn't even try to import, # but this is just a temporary band-aid. Puppet[:ignoreimport] = true # We need to specify a ca location for all of the SSL-related i # indirected classes to work; in fingerprint mode we just need # access to the local files and we don't need a ca. Puppet::SSL::Host.ca_location = options[:fingerprint] ? :none : :remote Puppet::Transaction::Report.indirection.terminus_class = :rest # we want the last report to be persisted locally Puppet::Transaction::Report.indirection.cache_class = :yaml Puppet::Resource::Catalog.indirection.cache_class = :yaml unless options[:fingerprint] setup_agent else setup_host end end end diff --git a/lib/puppet/application/master.rb b/lib/puppet/application/master.rb index f44a75e2a..f299711d2 100644 --- a/lib/puppet/application/master.rb +++ b/lib/puppet/application/master.rb @@ -1,237 +1,239 @@ require 'puppet/application' class Puppet::Application::Master < Puppet::Application run_mode :master option("--debug", "-d") option("--verbose", "-v") # internal option, only to be used by ext/rack/config.ru option("--rack") option("--compile host", "-c host") do |arg| options[:node] = arg end option("--logdest DEST", "-l DEST") do |arg| begin Puppet::Util::Log.newdestination(arg) options[:setdest] = true rescue => detail Puppet.log_exception(detail) end end option("--parseonly") do puts "--parseonly has been removed. Please use 'puppet parser validate '" exit 1 end def help <<-HELP puppet-master(8) -- The puppet master daemon ======== SYNOPSIS -------- The central puppet server. Functions as a certificate authority by default. USAGE ----- puppet master [-D|--daemonize|--no-daemonize] [-d|--debug] [-h|--help] [-l|--logdest |console|syslog] [-v|--verbose] [-V|--version] [--compile ] DESCRIPTION ----------- This command starts an instance of puppet master, running as a daemon and using Ruby's built-in Webrick webserver. Puppet master can also be managed by other application servers; when this is the case, this executable is not used. OPTIONS ------- Note that any configuration parameter that's valid in the configuration file is also a valid long argument. For example, 'ssldir' is a valid configuration parameter, so you can specify '--ssldir ' as an argument. See the configuration file documentation at http://docs.puppetlabs.com/references/stable/configuration.html for the full list of acceptable parameters. A commented list of all configuration options can also be generated by running puppet master with '--genconfig'. * --daemonize: Send the process into the background. This is the default. * --no-daemonize: Do not send the process into the background. * --debug: Enable full debugging. * --help: Print this help message. * --logdest: Where to send messages. Choose between syslog, the console, and a log file. Defaults to sending messages to syslog, or the console if debugging or verbosity is enabled. * --verbose: Enable verbosity. * --version: Print the puppet version number and exit. * --compile: Compile a catalogue and output it in JSON from the puppet master. Uses facts contained in the $vardir/yaml/ directory to compile the catalog. EXAMPLE ------- puppet master DIAGNOSTICS ----------- When running as a standalone daemon, puppet master accepts the following signals: * SIGHUP: Restart the puppet master server. * SIGINT and SIGTERM: Shut down the puppet master server. +* SIGUSR2: + Close file descriptors for log files and reopen them. Used with logrotate. AUTHOR ------ Luke Kanies COPYRIGHT --------- Copyright (c) 2011 Puppet Labs, LLC Licensed under the Apache 2.0 License HELP end def app_defaults() super.merge :facts_terminus => 'yaml' end def preinit Signal.trap(:INT) do $stderr.puts "Cancelling startup" exit(0) end # Create this first-off, so we have ARGV require 'puppet/daemon' @daemon = Puppet::Daemon.new @daemon.argv = ARGV.dup end def run_command if options[:node] compile else main end end def compile Puppet::Util::Log.newdestination :console raise ArgumentError, "Cannot render compiled catalogs without pson support" unless Puppet.features.pson? begin unless catalog = Puppet::Resource::Catalog.indirection.find(options[:node]) raise "Could not compile catalog for #{options[:node]}" end jj catalog.to_resource rescue => detail $stderr.puts detail exit(30) end exit(0) end def main require 'etc' require 'puppet/file_serving/content' require 'puppet/file_serving/metadata' # Make sure we've got a localhost ssl cert Puppet::SSL::Host.localhost # And now configure our server to *only* hit the CA for data, because that's # all it will have write access to. Puppet::SSL::Host.ca_location = :only if Puppet::SSL::CertificateAuthority.ca? if Puppet.features.root? begin Puppet::Util.chuser rescue => detail Puppet.log_exception(detail, "Could not change user to #{Puppet[:user]}: #{detail}") exit(39) end end unless options[:rack] require 'puppet/network/server' @daemon.server = Puppet::Network::Server.new() @daemon.daemonize if Puppet[:daemonize] else require 'puppet/network/http/rack' @app = Puppet::Network::HTTP::Rack.new() end Puppet.notice "Starting Puppet master version #{Puppet.version}" unless options[:rack] @daemon.start else return @app end end def setup raise Puppet::Error.new("Puppet master is not supported on Microsoft Windows") if Puppet.features.microsoft_windows? # Handle the logging settings. if options[:debug] or options[:verbose] if options[:debug] Puppet::Util::Log.level = :debug else Puppet::Util::Log.level = :info end unless Puppet[:daemonize] or options[:rack] Puppet::Util::Log.newdestination(:console) options[:setdest] = true end end Puppet::Util::Log.newdestination(:syslog) unless options[:setdest] exit(Puppet.settings.print_configs ? 0 : 1) if Puppet.settings.print_configs? Puppet.settings.use :main, :master, :ssl, :metrics # Configure all of the SSL stuff. if Puppet::SSL::CertificateAuthority.ca? Puppet::SSL::Host.ca_location = :local Puppet.settings.use :ca Puppet::SSL::CertificateAuthority.instance else Puppet::SSL::Host.ca_location = :none end end end diff --git a/lib/puppet/file_bucket/dipper.rb b/lib/puppet/file_bucket/dipper.rb index a4cc9ed32..2433a0ea0 100644 --- a/lib/puppet/file_bucket/dipper.rb +++ b/lib/puppet/file_bucket/dipper.rb @@ -1,108 +1,108 @@ +require 'pathname' require 'puppet/file_bucket' require 'puppet/file_bucket/file' require 'puppet/indirector/request' class Puppet::FileBucket::Dipper # This is a transitional implementation that uses REST # to access remote filebucket files. attr_accessor :name # Create our bucket client def initialize(hash = {}) # Emulate the XMLRPC client server = hash[:Server] port = hash[:Port] || Puppet[:masterport] environment = Puppet[:environment] if hash.include?(:Path) @local_path = hash[:Path] @rest_path = nil else @local_path = nil @rest_path = "https://#{server}:#{port}/#{environment}/file_bucket_file/" end end def local? !! @local_path end # Back up a file to our bucket def backup(file) raise(ArgumentError, "File #{file} does not exist") unless ::File.exist?(file) contents = IO.binread(file) begin file_bucket_file = Puppet::FileBucket::File.new(contents, :bucket_path => @local_path) files_original_path = absolutize_path(file) dest_path = "#{@rest_path}#{file_bucket_file.name}/#{files_original_path}" file_bucket_path = "#{@rest_path}#{file_bucket_file.checksum_type}/#{file_bucket_file.checksum_data}/#{files_original_path}" # Make a HEAD request for the file so that we don't waste time # uploading it if it already exists in the bucket. unless Puppet::FileBucket::File.indirection.head(file_bucket_path) Puppet::FileBucket::File.indirection.save(file_bucket_file, dest_path) end return file_bucket_file.checksum_data rescue => detail message = "Could not back up #{file}: #{detail}" Puppet.log_exception(detail, message) raise Puppet::Error, message end end # Retrieve a file by sum. def getfile(sum) source_path = "#{@rest_path}md5/#{sum}" file_bucket_file = Puppet::FileBucket::File.indirection.find(source_path, :bucket_path => @local_path) raise Puppet::Error, "File not found" unless file_bucket_file file_bucket_file.to_s end # Restore the file def restore(file,sum) restore = true if FileTest.exists?(file) cursum = Digest::MD5.hexdigest(IO.binread(file)) # if the checksum has changed... # this might be extra effort if cursum == sum restore = false end end if restore if newcontents = getfile(sum) tmp = "" newsum = Digest::MD5.hexdigest(newcontents) changed = nil if FileTest.exists?(file) and ! FileTest.writable?(file) changed = ::File.stat(file).mode ::File.chmod(changed | 0200, file) end ::File.open(file, ::File::WRONLY|::File::TRUNC|::File::CREAT) { |of| of.binmode of.print(newcontents) } ::File.chmod(changed, file) if changed else Puppet.err "Could not find file with checksum #{sum}" return nil end return newsum else return nil end end private def absolutize_path( path ) - require 'pathname' Pathname.new(path).realpath end end diff --git a/lib/puppet/interface/action_manager.rb b/lib/puppet/interface/action_manager.rb index 21fe508c0..a275587b3 100644 --- a/lib/puppet/interface/action_manager.rb +++ b/lib/puppet/interface/action_manager.rb @@ -1,75 +1,74 @@ require 'puppet/interface/action' +require 'puppet/interface/action_builder' module Puppet::Interface::ActionManager # Declare that this app can take a specific action, and provide # the code to do so. def action(name, &block) - require 'puppet/interface/action_builder' - @actions ||= {} Puppet.warning "Redefining action #{name} for #{self}" if action?(name) action = Puppet::Interface::ActionBuilder.build(self, name, &block) if action.default and current = get_default_action raise "Actions #{current.name} and #{name} cannot both be default" end @actions[action.name] = action end # This is the short-form of an action definition; it doesn't use the # builder, just creates the action directly from the block. def script(name, &block) @actions ||= {} raise "Action #{name} already defined for #{self}" if action?(name) @actions[name] = Puppet::Interface::Action.new(self, name, :when_invoked => block) end def actions @actions ||= {} result = @actions.keys if self.is_a?(Class) and superclass.respond_to?(:actions) result += superclass.actions elsif self.class.respond_to?(:actions) result += self.class.actions end # We need to uniq the result, because we duplicate actions when they are # fetched to ensure that they have the correct bindings; they shadow the # parent, and uniq implements that. --daniel 2011-06-01 result.uniq.sort end def get_action(name) @actions ||= {} result = @actions[name.to_sym] if result.nil? if self.is_a?(Class) and superclass.respond_to?(:get_action) found = superclass.get_action(name) elsif self.class.respond_to?(:get_action) found = self.class.get_action(name) end if found then # This is not the nicest way to make action equivalent to the Ruby # Method object, rather than UnboundMethod, but it will do for now, # and we only have to make this change in *one* place. --daniel 2011-04-12 result = @actions[name.to_sym] = found.__dup_and_rebind_to(self) end end return result end def get_default_action default = actions.map {|x| get_action(x) }.select {|x| x.default } if default.length > 1 raise "The actions #{default.map(&:name).join(", ")} cannot all be default" end default.first end def action?(name) actions.include?(name.to_sym) end end diff --git a/lib/puppet/module_tool.rb b/lib/puppet/module_tool.rb index 0ee3433a2..acfae25dd 100644 --- a/lib/puppet/module_tool.rb +++ b/lib/puppet/module_tool.rb @@ -1,116 +1,117 @@ # encoding: UTF-8 # Load standard libraries require 'pathname' require 'fileutils' require 'puppet/util/colors' # Define tool module Puppet module ModuleTool extend Puppet::Util::Colors # Directory and names that should not be checksummed. ARTIFACTS = ['pkg', /^\./, /^~/, /^#/, 'coverage', 'metadata.json', 'REVISION'] FULL_MODULE_NAME_PATTERN = /\A([^-\/|.]+)[-|\/](.+)\z/ REPOSITORY_URL = Puppet.settings[:module_repository] # Is this a directory that shouldn't be checksummed? # # TODO: Should this be part of Checksums? # TODO: Rename this method to reflect it's purpose? # TODO: Shouldn't this be used when building packages too? def self.artifact?(path) case File.basename(path) when *ARTIFACTS true else false end end # Return the +username+ and +modname+ for a given +full_module_name+, or raise an # ArgumentError if the argument isn't parseable. def self.username_and_modname_from(full_module_name) if matcher = full_module_name.match(FULL_MODULE_NAME_PATTERN) return matcher.captures else raise ArgumentError, "Not a valid full name: #{full_module_name}" end end def self.find_module_root(path) for dir in [path, Dir.pwd].compact if File.exist?(File.join(dir, 'Modulefile')) return dir end end raise ArgumentError, "Could not find a valid module at #{path ? path.inspect : 'current directory'}" end # Builds a formatted tree from a list of node hashes containing +:text+ # and +:dependencies+ keys. def self.format_tree(nodes, level = 0) str = '' nodes.each_with_index do |node, i| last_node = nodes.length - 1 == i deps = node[:dependencies] || [] str << (indent = " " * level) str << (last_node ? "└" : "├") str << "─" str << (deps.empty? ? "─" : "┬") str << " #{node[:text]}\n" branch = format_tree(deps, level + 1) branch.gsub!(/^#{indent} /, indent + '│') unless last_node str << branch end return str end def self.build_tree(mods, dir) mods.each do |mod| version_string = mod[:version][:vstring].sub(/^(?!v)/, 'v') if mod[:action] == :upgrade previous_version = mod[:previous_version].sub(/^(?!v)/, 'v') version_string = "#{previous_version} -> #{version_string}" end mod[:text] = "#{mod[:module]} (#{colorize(:cyan, version_string)})" mod[:text] += " [#{mod[:path]}]" unless mod[:path] == dir build_tree(mod[:dependencies], dir) end end def self.set_option_defaults(options) sep = File::PATH_SEPARATOR if options[:target_dir] options[:target_dir] = File.expand_path(options[:target_dir]) end prepend_target_dir = !! options[:target_dir] options[:modulepath] ||= Puppet.settings[:modulepath] options[:environment] ||= Puppet.settings[:environment] options[:modulepath] = "#{options[:target_dir]}#{sep}#{options[:modulepath]}" if prepend_target_dir Puppet[:modulepath] = options[:modulepath] Puppet[:environment] = options[:environment] options[:target_dir] = options[:modulepath].split(sep).first + options[:target_dir] = File.expand_path(options[:target_dir]) end end end # Load remaining libraries require 'puppet/module_tool/errors' require 'puppet/module_tool/applications' require 'puppet/module_tool/checksums' require 'puppet/module_tool/contents_description' require 'puppet/module_tool/dependency' require 'puppet/module_tool/metadata' require 'puppet/module_tool/modulefile' require 'puppet/module_tool/skeleton' require 'puppet/forge/cache' require 'puppet/forge' diff --git a/lib/puppet/module_tool/applications/unpacker.rb b/lib/puppet/module_tool/applications/unpacker.rb index e9b0b50d1..3ed6058f3 100644 --- a/lib/puppet/module_tool/applications/unpacker.rb +++ b/lib/puppet/module_tool/applications/unpacker.rb @@ -1,48 +1,67 @@ require 'pathname' require 'tmpdir' module Puppet::ModuleTool module Applications class Unpacker < Application def initialize(filename, options = {}) @filename = Pathname.new(filename) parsed = parse_filename(filename) super(options) @module_dir = Pathname.new(options[:target_dir]) + parsed[:dir_name] end def run extract_module_to_install_dir # Return the Pathname object representing the directory where the # module release archive was unpacked the to, and the module release # name. @module_dir end + # Obtain a suitable temporary path for building and unpacking tarballs + # + # @return [Pathname] path to temporary build location + def build_dir + Puppet::Forge::Cache.base_path + "tmp-unpacker-#{Digest::SHA1.hexdigest(@filename.basename.to_s)}" + end + private def extract_module_to_install_dir delete_existing_installation_or_abort! - build_dir = Puppet::Forge::Cache.base_path + "tmp-unpacker-#{Digest::SHA1.hexdigest(@filename.basename.to_s)}" build_dir.mkpath begin - unless system "tar xzf #{@filename} -C #{build_dir}" - raise RuntimeError, "Could not extract contents of module archive." + begin + if Facter.value('operatingsystem') == "Solaris" + # Solaris tar is not as safe and works differently, so we prefer + # gnutar instead. + if Puppet::Util.which('gtar') + Puppet::Util.execute("gtar xzf #{@filename} -C #{build_dir}") + else + raise RuntimeError, "Cannot find the command 'gtar'. Make sure GNU tar is installed, and is in your PATH." + end + else + Puppet::Util.execute("tar xzf #{@filename} -C #{build_dir}") + end + rescue Puppet::ExecutionFailure => e + raise RuntimeError, "Could not extract contents of module archive: #{e.message}" end + # grab the first directory extracted = build_dir.children.detect { |c| c.directory? } FileUtils.mv extracted, @module_dir ensure build_dir.rmtree end end def delete_existing_installation_or_abort! return unless @module_dir.exist? FileUtils.rm_rf(@module_dir, :secure => true) end end end end diff --git a/lib/puppet/parser/functions/fqdn_rand.rb b/lib/puppet/parser/functions/fqdn_rand.rb index 3b1df6d95..916338e98 100644 --- a/lib/puppet/parser/functions/fqdn_rand.rb +++ b/lib/puppet/parser/functions/fqdn_rand.rb @@ -1,12 +1,13 @@ +require 'digest/md5' + Puppet::Parser::Functions::newfunction(:fqdn_rand, :type => :rvalue, :doc => "Generates random numbers based on the node's fqdn. Generated random values will be a range from 0 up to and excluding n, where n is the first parameter. The second argument specifies a number to add to the seed and is optional, for example: $random_number = fqdn_rand(30) $random_number_seed = fqdn_rand(30,30)") do |args| - require 'digest/md5' max = args.shift.to_i srand(Digest::MD5.hexdigest([self['::fqdn'],args].join(':')).hex) rand(max).to_s end diff --git a/lib/puppet/parser/functions/md5.rb b/lib/puppet/parser/functions/md5.rb index f7a4f7222..864aae048 100644 --- a/lib/puppet/parser/functions/md5.rb +++ b/lib/puppet/parser/functions/md5.rb @@ -1,5 +1,5 @@ -Puppet::Parser::Functions::newfunction(:md5, :type => :rvalue, :doc => "Returns a MD5 hash value from a provided string.") do |args| - require 'md5' +require 'md5' +Puppet::Parser::Functions::newfunction(:md5, :type => :rvalue, :doc => "Returns a MD5 hash value from a provided string.") do |args| Digest::MD5.hexdigest(args[0]) end diff --git a/lib/puppet/parser/functions/sha1.rb b/lib/puppet/parser/functions/sha1.rb index 1e7d5abe4..c52df4d28 100644 --- a/lib/puppet/parser/functions/sha1.rb +++ b/lib/puppet/parser/functions/sha1.rb @@ -1,5 +1,5 @@ -Puppet::Parser::Functions::newfunction(:sha1, :type => :rvalue, :doc => "Returns a SHA1 hash value from a provided string.") do |args| - require 'digest/sha1' +require 'digest/sha1' +Puppet::Parser::Functions::newfunction(:sha1, :type => :rvalue, :doc => "Returns a SHA1 hash value from a provided string.") do |args| Digest::SHA1.hexdigest(args[0]) end diff --git a/lib/puppet/parser/functions/template.rb b/lib/puppet/parser/functions/template.rb index 8105e2cac..5e4b00e1e 100644 --- a/lib/puppet/parser/functions/template.rb +++ b/lib/puppet/parser/functions/template.rb @@ -1,25 +1,23 @@ Puppet::Parser::Functions::newfunction(:template, :type => :rvalue, :doc => "Evaluate a template and return its value. See [the templating docs](http://docs.puppetlabs.com/guides/templating.html) for more information. Note that if multiple templates are specified, their output is all concatenated and returned as the output of the function.") do |vals| - require 'erb' - vals.collect do |file| # Use a wrapper, so the template can't get access to the full # Scope object. debug "Retrieving template #{file}" wrapper = Puppet::Parser::TemplateWrapper.new(self) wrapper.file = file begin wrapper.result rescue => detail info = detail.backtrace.first.split(':') raise Puppet::ParseError, "Failed to parse template #{file}:\n Filepath: #{info[0]}\n Line: #{info[1]}\n Detail: #{detail}\n" end end.join("") end diff --git a/lib/puppet/provider/augeas/augeas.rb b/lib/puppet/provider/augeas/augeas.rb index 48651982d..9f3bb0b82 100644 --- a/lib/puppet/provider/augeas/augeas.rb +++ b/lib/puppet/provider/augeas/augeas.rb @@ -1,459 +1,477 @@ # # Copyright 2011 Bryan Kearney # # Licensed under the Apache License, Version 2.0 (the "License"); # you may not use this file except in compliance with the License. # You may obtain a copy of the License at # # http://www.apache.org/licenses/LICENSE-2.0 # # Unless required by applicable law or agreed to in writing, software # distributed under the License is distributed on an "AS IS" BASIS, # WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. # See the License for the specific language governing permissions and # limitations under the License. require 'augeas' if Puppet.features.augeas? require 'strscan' require 'puppet/util' require 'puppet/util/diff' require 'puppet/util/package' Puppet::Type.type(:augeas).provide(:augeas) do include Puppet::Util include Puppet::Util::Diff include Puppet::Util::Package confine :true => Puppet.features.augeas? has_features :parse_commands, :need_to_run?,:execute_changes SAVE_NOOP = "noop" SAVE_OVERWRITE = "overwrite" SAVE_NEWFILE = "newfile" SAVE_BACKUP = "backup" COMMANDS = { "set" => [ :path, :string ], "setm" => [ :path, :string, :string ], "rm" => [ :path ], "clear" => [ :path ], "mv" => [ :path, :path ], "insert" => [ :string, :string, :path ], "get" => [ :path, :comparator, :string ], "defvar" => [ :string, :path ], "defnode" => [ :string, :path, :string ], "match" => [ :path, :glob ], "size" => [:comparator, :int], "include" => [:string], "not_include" => [:string], "==" => [:glob], "!=" => [:glob] } COMMANDS["ins"] = COMMANDS["insert"] COMMANDS["remove"] = COMMANDS["rm"] COMMANDS["move"] = COMMANDS["mv"] attr_accessor :aug # Extracts an 2 dimensional array of commands which are in the # form of command path value. # The input can be # - A string with one command # - A string with many commands per line # - An array of strings. def parse_commands(data) context = resource[:context] # Add a trailing / if it is not there if (context.length > 0) context << "/" if context[-1, 1] != "/" end data = data.split($/) if data.is_a?(String) data = data.flatten args = [] data.each do |line| line.strip! next if line.nil? || line.empty? argline = [] sc = StringScanner.new(line) cmd = sc.scan(/\w+|==|!=/) formals = COMMANDS[cmd] fail("Unknown command #{cmd}") unless formals argline << cmd narg = 0 formals.each do |f| sc.skip(/\s+/) narg += 1 if f == :path start = sc.pos nbracket = 0 inSingleTick = false inDoubleTick = false begin sc.skip(/([^\]\[\s\\'"]|\\.)+/) ch = sc.getch nbracket += 1 if ch == "[" nbracket -= 1 if ch == "]" inSingleTick = !inSingleTick if ch == "'" inDoubleTick = !inDoubleTick if ch == "\"" fail("unmatched [") if nbracket < 0 end until ((nbracket == 0 && !inSingleTick && !inDoubleTick && (ch =~ /\s/)) || sc.eos?) len = sc.pos - start len -= 1 unless sc.eos? unless p = sc.string[start, len] fail("missing path argument #{narg} for #{cmd}") end # Rip off any ticks if they are there. p = p[1, (p.size - 2)] if p[0,1] == "'" || p[0,1] == "\"" p.chomp!("/") if p[0,1] != '$' && p[0,1] != "/" argline << context + p else argline << p end elsif f == :string delim = sc.peek(1) if delim == "'" || delim == "\"" sc.getch argline << sc.scan(/([^\\#{delim}]|(\\.))*/) sc.getch else argline << sc.scan(/[^\s]+/) end fail("missing string argument #{narg} for #{cmd}") unless argline[-1] elsif f == :comparator argline << sc.scan(/(==|!=|=~|<|<=|>|>=)/) unless argline[-1] puts sc.rest fail("invalid comparator for command #{cmd}") end elsif f == :int argline << sc.scan(/\d+/).to_i elsif f== :glob argline << sc.rest end end args << argline end args end def open_augeas unless @aug flags = Augeas::NONE flags = Augeas::TYPE_CHECK if resource[:type_check] == :true if resource[:incl] flags |= Augeas::NO_MODL_AUTOLOAD else flags |= Augeas::NO_LOAD end root = resource[:root] - load_path = resource[:load_path] + load_path = get_load_path(resource) debug("Opening augeas with root #{root}, lens path #{load_path}, flags #{flags}") @aug = Augeas::open(root, load_path,flags) debug("Augeas version #{get_augeas_version} is installed") if versioncmp(get_augeas_version, "0.3.6") >= 0 # Optimize loading if the context is given and it's a simple path, # requires the glob function from Augeas 0.8.2 or up glob_avail = !aug.match("/augeas/version/pathx/functions/glob").empty? opt_ctx = resource[:context].match("^/files/[^'\"\\[\\]]+$") if resource[:context] restricted = false if resource[:incl] aug.set("/augeas/load/Xfm/lens", resource[:lens]) aug.set("/augeas/load/Xfm/incl", resource[:incl]) elsif glob_avail and opt_ctx restricted = true # Optimize loading if the context is given, requires the glob function # from Augeas 0.8.2 or up ctx_path = resource[:context].sub(/^\/files(.*?)\/?$/, '\1/') load_path = "/augeas/load/*['%s' !~ glob(incl) + regexp('/.*')]" % ctx_path if aug.match(load_path).size < aug.match("/augeas/load/*").size aug.rm(load_path) restricted = true else # This will occur if the context is less specific than any glob debug("Unable to optimize files loaded by context path, no glob matches") end end aug.load print_load_errors(:warning => restricted) end @aug end def close_augeas if @aug @aug.close debug("Closed the augeas connection") @aug = nil end end # Used by the need_to_run? method to process get filters. Returns # true if there is a match, false if otherwise # Assumes a syntax of get /files/path [COMPARATOR] value def process_get(cmd_array) return_value = false #validate and tear apart the command fail ("Invalid command: #{cmd_array.join(" ")}") if cmd_array.length < 4 cmd = cmd_array.shift path = cmd_array.shift comparator = cmd_array.shift arg = cmd_array.join(" ") #check the value in augeas result = @aug.get(path) || '' case comparator when "!=" return_value = (result != arg) when "=~" regex = Regexp.new(arg) return_value = (result =~ regex) else return_value = (result.send(comparator, arg)) end !!return_value end # Used by the need_to_run? method to process match filters. Returns # true if there is a match, false if otherwise def process_match(cmd_array) return_value = false #validate and tear apart the command fail("Invalid command: #{cmd_array.join(" ")}") if cmd_array.length < 3 cmd = cmd_array.shift path = cmd_array.shift # Need to break apart the clause clause_array = parse_commands(cmd_array.shift)[0] verb = clause_array.shift #Get the values from augeas result = @aug.match(path) || [] fail("Error trying to match path '#{path}'") if (result == -1) # Now do the work case verb when "size" fail("Invalid command: #{cmd_array.join(" ")}") if clause_array.length != 2 comparator = clause_array.shift arg = clause_array.shift case comparator when "!=" return_value = !(result.size.send(:==, arg)) else return_value = (result.size.send(comparator, arg)) end when "include" arg = clause_array.shift return_value = result.include?(arg) when "not_include" arg = clause_array.shift return_value = !result.include?(arg) when "==" begin arg = clause_array.shift new_array = eval arg return_value = (result == new_array) rescue fail("Invalid array in command: #{cmd_array.join(" ")}") end when "!=" begin arg = clause_array.shift new_array = eval arg return_value = (result != new_array) rescue fail("Invalid array in command: #{cmd_array.join(" ")}") end end !!return_value end + # Generate lens load paths from user given paths and local pluginsync dir + def get_load_path(resource) + load_path = [] + + # Permits colon separated strings or arrays + if resource[:load_path] + load_path = [resource[:load_path]].flatten + load_path.map! { |path| path.split(/:/) } + load_path.flatten! + end + + if File.exists?("#{Puppet[:libdir]}/augeas/lenses") + load_path << "#{Puppet[:libdir]}/augeas/lenses" + end + + load_path.join(":") + end + def get_augeas_version @aug.get("/augeas/version") || "" end def set_augeas_save_mode(mode) @aug.set("/augeas/save", mode) end def print_load_errors(args={}) errors = @aug.match("/augeas//error") unless errors.empty? if args[:warning] warning("Loading failed for one or more files, see debug for /augeas//error output") else debug("Loading failed for one or more files, output from /augeas//error:") end end print_errors(errors) end def print_put_errors errors = @aug.match("/augeas//error[. = 'put_failed']") debug("Put failed on one or more files, output from /augeas//error:") unless errors.empty? print_errors(errors) end def print_errors(errors) errors.each do |errnode| @aug.match("#{errnode}/*").each do |subnode| subvalue = @aug.get(subnode) debug("#{subnode} = #{subvalue}") end end end # Determines if augeas acutally needs to run. def need_to_run? force = resource[:force] return_value = true begin open_augeas filter = resource[:onlyif] unless filter == "" cmd_array = parse_commands(filter)[0] command = cmd_array[0]; begin case command when "get"; return_value = process_get(cmd_array) when "match"; return_value = process_match(cmd_array) end rescue SystemExit,NoMemoryError raise rescue Exception => e fail("Error sending command '#{command}' with params #{cmd_array[1..-1].inspect}/#{e.message}") end end unless force # If we have a verison of augeas which is at least 0.3.6 then we # can make the changes now and see if changes were made. if return_value and versioncmp(get_augeas_version, "0.3.6") >= 0 debug("Will attempt to save and only run if files changed") # Execute in NEWFILE mode so we can show a diff set_augeas_save_mode(SAVE_NEWFILE) do_execute_changes save_result = @aug.save unless save_result print_put_errors fail("Save failed with return code #{save_result}, see debug") end saved_files = @aug.match("/augeas/events/saved") if saved_files.size > 0 root = resource[:root].sub(/^\/$/, "") saved_files.map! {|key| @aug.get(key).sub(/^\/files/, root) } saved_files.uniq.each do |saved_file| if Puppet[:show_diff] notice "\n" + diff(saved_file, saved_file + ".augnew") end File.delete(saved_file + ".augnew") end debug("Files changed, should execute") return_value = true else debug("Skipping because no files were changed") return_value = false end end end ensure if not return_value or resource.noop? or not save_result close_augeas end end return_value end def execute_changes # Workaround Augeas bug where changing the save mode doesn't trigger a # reload of the previously saved file(s) when we call Augeas#load @aug.match("/augeas/events/saved").each do |file| @aug.rm("/augeas#{@aug.get(file)}/mtime") end # Reload augeas, and execute the changes for real set_augeas_save_mode(SAVE_OVERWRITE) if versioncmp(get_augeas_version, "0.3.6") >= 0 @aug.load do_execute_changes unless @aug.save print_put_errors fail("Save failed with return code #{success}, see debug") end :executed ensure close_augeas end # Actually execute the augeas changes. def do_execute_changes commands = parse_commands(resource[:changes]) commands.each do |cmd_array| fail("invalid command #{cmd_array.join[" "]}") if cmd_array.length < 2 command = cmd_array[0] cmd_array.shift begin case command when "set" debug("sending command '#{command}' with params #{cmd_array.inspect}") rv = aug.set(cmd_array[0], cmd_array[1]) fail("Error sending command '#{command}' with params #{cmd_array.inspect}") if (!rv) when "setm" debug("sending command '#{command}' with params #{cmd_array.inspect}") rv = aug.setm(cmd_array[0], cmd_array[1], cmd_array[2]) fail("Error sending command '#{command}' with params #{cmd_array.inspect}") if (rv == -1) when "rm", "remove" debug("sending command '#{command}' with params #{cmd_array.inspect}") rv = aug.rm(cmd_array[0]) fail("Error sending command '#{command}' with params #{cmd_array.inspect}") if (rv == -1) when "clear" debug("sending command '#{command}' with params #{cmd_array.inspect}") rv = aug.clear(cmd_array[0]) fail("Error sending command '#{command}' with params #{cmd_array.inspect}") if (!rv) when "insert", "ins" label = cmd_array[0] where = cmd_array[1] path = cmd_array[2] case where when "before"; before = true when "after"; before = false else fail("Invalid value '#{where}' for where param") end debug("sending command '#{command}' with params #{[label, where, path].inspect}") rv = aug.insert(path, label, before) fail("Error sending command '#{command}' with params #{cmd_array.inspect}") if (rv == -1) when "defvar" debug("sending command '#{command}' with params #{cmd_array.inspect}") rv = aug.defvar(cmd_array[0], cmd_array[1]) fail("Error sending command '#{command}' with params #{cmd_array.inspect}") if (!rv) when "defnode" debug("sending command '#{command}' with params #{cmd_array.inspect}") rv = aug.defnode(cmd_array[0], cmd_array[1], cmd_array[2]) fail("Error sending command '#{command}' with params #{cmd_array.inspect}") if (!rv) when "mv", "move" debug("sending command '#{command}' with params #{cmd_array.inspect}") rv = aug.mv(cmd_array[0], cmd_array[1]) fail("Error sending command '#{command}' with params #{cmd_array.inspect}") if (rv == -1) else fail("Command '#{command}' is not supported") end rescue SystemExit,NoMemoryError raise rescue Exception => e fail("Error sending command '#{command}' with params #{cmd_array.inspect}/#{e.message}") end end end end diff --git a/lib/puppet/provider/package/msi.rb b/lib/puppet/provider/package/msi.rb index 86b5eacdc..575d4562a 100644 --- a/lib/puppet/provider/package/msi.rb +++ b/lib/puppet/provider/package/msi.rb @@ -1,95 +1,141 @@ require 'puppet/provider/package' Puppet::Type.type(:package).provide(:msi, :parent => Puppet::Provider::Package) do desc "Windows package management by installing and removing MSIs. This provider requires a `source` attribute, and will accept paths to local - files or files on mapped drives. - - This provider cannot uninstall arbitrary MSI packages; it can only uninstall - packages which were originally installed by Puppet." + files, mapped drives, or UNC paths." confine :operatingsystem => :windows defaultfor :operatingsystem => :windows + has_feature :installable + has_feature :uninstallable has_feature :install_options - # This is just here to make sure we can find it, and fail if we - # can't. Unfortunately, we need to do "special" quoting of the - # install options or msiexec.exe won't know what to do with them, if - # the value contains a space. - commands :msiexec => "msiexec.exe" + class MsiPackage + extend Enumerable - def self.instances - Dir.entries(installed_listing_dir).reject {|d| d == '.' or d == '..'}.collect do |name| - new(:name => File.basename(name, '.yml'), :provider => :msi, :ensure => :installed) + # From msi.h + INSTALLSTATE_DEFAULT = 5 # product is installed for the current user + INSTALLUILEVEL_NONE = 2 # completely silent installation + + def self.installer + require 'win32ole' + WIN32OLE.new("WindowsInstaller.Installer") + end + + def self.each(&block) + inst = installer + inst.UILevel = INSTALLUILEVEL_NONE + + inst.Products.each do |guid| + # products may be advertised, installed in a different user + # context, etc, we only want to know about products currently + # installed in our context. + next unless inst.ProductState(guid) == INSTALLSTATE_DEFAULT + + package = { + :name => inst.ProductInfo(guid, 'ProductName'), + # although packages have a version, the provider isn't versionable, + # so we can't return a version + # :ensure => inst.ProductInfo(guid, 'VersionString'), + :ensure => :installed, + :provider => :msi, + :productcode => guid, + :packagecode => inst.ProductInfo(guid, 'PackageCode') + } + + yield package + end end end + # Get an array of provider instances for currently installed packages + def self.instances + MsiPackage.enum_for.map { |package| new(package) } + end + + # Find first package whose PackageCode, e.g. {B2BE95D2-CD2C-46D6-8D27-35D150E58EC9}, + # matches the resource name (case-insensitively due to hex) or the ProductName matches + # the resource name. The ProductName is not guaranteed to be unique, but the PackageCode + # should be if the package is authored correctly. def query - {:name => resource[:name], :ensure => :installed} if FileTest.exists?(state_file) + MsiPackage.enum_for.find do |package| + resource[:name].casecmp(package[:packagecode]) == 0 || resource[:name] == package[:name] + end end def install - properties_for_command = nil - if resource[:install_options] - properties_for_command = resource[:install_options].collect do |k,v| - property = shell_quote k - value = shell_quote v - - "#{property}=#{value}" - end - end + fail("The source parameter is required when using the MSI provider.") unless resource[:source] # Unfortunately, we can't use the msiexec method defined earlier, # because of the special quoting we need to do around the MSI # properties to use. - execute ['msiexec.exe', '/qn', '/norestart', '/i', shell_quote(msi_source), properties_for_command].flatten.compact.join(' ') - - File.open(state_file, 'w') do |f| - metadata = { - 'name' => resource[:name], - 'install_options' => resource[:install_options], - 'source' => msi_source - } + command = ['msiexec.exe', '/qn', '/norestart', '/i', shell_quote(resource[:source]), install_options].flatten.compact.join(' ') + execute(command, :combine => true) - f.puts(YAML.dump(metadata)) - end + check_result(exit_status) end def uninstall - msiexec '/qn', '/norestart', '/x', msi_source + fail("The productcode property is missing.") unless properties[:productcode] - File.delete state_file - end + command = ['msiexec.exe', '/qn', '/norestart', '/x', properties[:productcode]].flatten.compact.join(' ') + execute(command, :combine => true) - def validate_source(value) - fail("The source parameter cannot be empty when using the MSI provider.") if value.empty? + check_result(exit_status) end - private - - def msi_source - resource[:source] ||= YAML.load_file(state_file)['source'] rescue nil - - fail("The source parameter is required when using the MSI provider.") unless resource[:source] + def exit_status + $CHILD_STATUS.exitstatus + end - resource[:source] + # http://msdn.microsoft.com/en-us/library/windows/desktop/aa368542(v=vs.85).aspx + ERROR_SUCCESS = 0 + ERROR_SUCCESS_REBOOT_INITIATED = 1641 + ERROR_SUCCESS_REBOOT_REQUIRED = 3010 + + # (Un)install may "fail" because the package requested a reboot, the system requested a + # reboot, or something else entirely. Reboot requests mean the package was installed + # successfully, but we warn since we don't have a good reboot strategy. + def check_result(hr) + operation = resource[:ensure] == :absent ? 'uninstall' : 'install' + + case hr + when ERROR_SUCCESS + # yeah + when 194 + warning("The package requested a reboot to finish the operation.") + when ERROR_SUCCESS_REBOOT_INITIATED + warning("The package #{operation}ed successfully and the system is rebooting now.") + when ERROR_SUCCESS_REBOOT_REQUIRED + warning("The package #{operation}ed successfully, but the system must be rebooted.") + else + raise Puppet::Util::Windows::Error.new("Failed to #{operation}", hr) + end end - def self.installed_listing_dir - listing_dir = File.join(Puppet[:vardir], 'db', 'package', 'msi') + def validate_source(value) + fail("The source parameter cannot be empty when using the MSI provider.") if value.empty? + end - FileUtils.mkdir_p listing_dir unless File.directory? listing_dir + def install_options + # properties is a string delimited by spaces, so each key value must be quoted + properties_for_command = nil + if resource[:install_options] + properties_for_command = resource[:install_options].collect do |k,v| + property = shell_quote k + value = shell_quote v - listing_dir - end + "#{property}=#{value}" + end + end - def state_file - File.join(self.class.installed_listing_dir, "#{resource[:name]}.yml") + properties_for_command end def shell_quote(value) value.include?(' ') ? %Q["#{value.gsub(/"/, '\"')}"] : value end end diff --git a/lib/puppet/provider/scheduled_task/win32_taskscheduler.rb b/lib/puppet/provider/scheduled_task/win32_taskscheduler.rb index 3e8f2bc9b..b9491294d 100644 --- a/lib/puppet/provider/scheduled_task/win32_taskscheduler.rb +++ b/lib/puppet/provider/scheduled_task/win32_taskscheduler.rb @@ -1,564 +1,565 @@ require 'puppet/parameter' if Puppet.features.microsoft_windows? require 'win32/taskscheduler' require 'puppet/util/adsi' end Puppet::Type.type(:scheduled_task).provide(:win32_taskscheduler) do desc %q{This provider uses the win32-taskscheduler gem to manage scheduled tasks on Windows. Puppet requires version 0.2.1 or later of the win32-taskscheduler gem; previous versions can cause "Could not evaluate: The operation completed successfully" errors.} defaultfor :operatingsystem => :windows confine :operatingsystem => :windows def self.instances Win32::TaskScheduler.new.tasks.collect do |job_file| job_title = File.basename(job_file, '.job') new( :provider => :win32_taskscheduler, :name => job_title ) end end def exists? Win32::TaskScheduler.new.exists? resource[:name] end def task return @task if @task @task ||= Win32::TaskScheduler.new @task.activate(resource[:name] + '.job') if exists? @task end def clear_task @task = nil @triggers = nil end def enabled task.flags & Win32::TaskScheduler::DISABLED == 0 ? :true : :false end def command task.application_name end def arguments task.parameters end def working_dir task.working_directory end def user account = task.account_information return 'system' if account == '' account end def trigger return @triggers if @triggers @triggers = [] task.trigger_count.times do |i| trigger = begin task.trigger(i) rescue Win32::TaskScheduler::Error => e # Win32::TaskScheduler can't handle all of the # trigger types Windows uses, so we need to skip the # unhandled types to prevent "puppet resource" from # blowing up. nil end next unless trigger and scheduler_trigger_types.include?(trigger['trigger_type']) puppet_trigger = {} case trigger['trigger_type'] when Win32::TaskScheduler::TASK_TIME_TRIGGER_DAILY puppet_trigger['schedule'] = 'daily' puppet_trigger['every'] = trigger['type']['days_interval'].to_s when Win32::TaskScheduler::TASK_TIME_TRIGGER_WEEKLY puppet_trigger['schedule'] = 'weekly' puppet_trigger['every'] = trigger['type']['weeks_interval'].to_s puppet_trigger['on'] = days_of_week_from_bitfield(trigger['type']['days_of_week']) when Win32::TaskScheduler::TASK_TIME_TRIGGER_MONTHLYDATE puppet_trigger['schedule'] = 'monthly' puppet_trigger['months'] = months_from_bitfield(trigger['type']['months']) puppet_trigger['on'] = days_from_bitfield(trigger['type']['days']) when Win32::TaskScheduler::TASK_TIME_TRIGGER_MONTHLYDOW puppet_trigger['schedule'] = 'monthly' puppet_trigger['months'] = months_from_bitfield(trigger['type']['months']) puppet_trigger['which_occurrence'] = occurrence_constant_to_name(trigger['type']['weeks']) puppet_trigger['day_of_week'] = days_of_week_from_bitfield(trigger['type']['days_of_week']) when Win32::TaskScheduler::TASK_TIME_TRIGGER_ONCE puppet_trigger['schedule'] = 'once' end puppet_trigger['start_date'] = self.class.normalized_date("#{trigger['start_year']}-#{trigger['start_month']}-#{trigger['start_day']}") puppet_trigger['start_time'] = self.class.normalized_time("#{trigger['start_hour']}:#{trigger['start_minute']}") puppet_trigger['enabled'] = trigger['flags'] & Win32::TaskScheduler::TASK_TRIGGER_FLAG_DISABLED == 0 puppet_trigger['index'] = i @triggers << puppet_trigger end @triggers = @triggers[0] if @triggers.length == 1 @triggers end def user_insync?(current, should) return false unless current # Win32::TaskScheduler can return the 'SYSTEM' account as the # empty string. current = 'system' if current == '' # By comparing account SIDs we don't have to worry about case # sensitivity, or canonicalization of the account name. Puppet::Util::ADSI.sid_for_account(current) == Puppet::Util::ADSI.sid_for_account(should[0]) end def trigger_insync?(current, should) should = [should] unless should.is_a?(Array) current = [current] unless current.is_a?(Array) return false unless current.length == should.length current_in_sync = current.all? do |c| should.any? {|s| triggers_same?(c, s)} end should_in_sync = should.all? do |s| current.any? {|c| triggers_same?(c,s)} end current_in_sync && should_in_sync end def command=(value) task.application_name = value end def arguments=(value) task.parameters = value end def working_dir=(value) task.working_directory = value end def enabled=(value) if value == :true task.flags = task.flags & ~Win32::TaskScheduler::DISABLED else task.flags = task.flags | Win32::TaskScheduler::DISABLED end end def trigger=(value) desired_triggers = value.is_a?(Array) ? value : [value] current_triggers = trigger.is_a?(Array) ? trigger : [trigger] extra_triggers = [] desired_to_search = desired_triggers.dup current_triggers.each do |current| if found = desired_to_search.find {|desired| triggers_same?(current, desired)} desired_to_search.delete(found) else extra_triggers << current['index'] end end needed_triggers = [] current_to_search = current_triggers.dup desired_triggers.each do |desired| if found = current_to_search.find {|current| triggers_same?(current, desired)} current_to_search.delete(found) else needed_triggers << desired end end extra_triggers.reverse_each do |index| task.delete_trigger(index) end needed_triggers.each do |trigger_hash| # Even though this is an assignment, the API for # Win32::TaskScheduler ends up appending this trigger to the # list of triggers for the task, while #add_trigger is only able # to replace existing triggers. *shrug* task.trigger = translate_hash_to_trigger(trigger_hash) end end def user=(value) self.fail("Invalid user: #{value}") unless Puppet::Util::ADSI.sid_for_account(value) if value.to_s.downcase != 'system' task.set_account_information(value, resource[:password]) else # Win32::TaskScheduler treats a nil/empty username & password as # requesting the SYSTEM account. task.set_account_information(nil, nil) end end def create clear_task @task = Win32::TaskScheduler.new(resource[:name], dummy_time_trigger) self.command = resource[:command] [:arguments, :working_dir, :enabled, :trigger, :user].each do |prop| send("#{prop}=", resource[prop]) if resource[prop] end end def destroy Win32::TaskScheduler.new.delete(resource[:name] + '.job') end def flush unless resource[:ensure] == :absent self.fail('Parameter command is required.') unless resource[:command] task.save + @task = nil end end def triggers_same?(current_trigger, desired_trigger) return false unless current_trigger['schedule'] == desired_trigger['schedule'] return false if current_trigger.has_key?('enabled') && !current_trigger['enabled'] desired = desired_trigger.dup desired['every'] ||= current_trigger['every'] if current_trigger.has_key?('every') desired['months'] ||= current_trigger['months'] if current_trigger.has_key?('months') desired['on'] ||= current_trigger['on'] if current_trigger.has_key?('on') desired['day_of_week'] ||= current_trigger['day_of_week'] if current_trigger.has_key?('day_of_week') translate_hash_to_trigger(current_trigger) == translate_hash_to_trigger(desired) end def self.normalized_date(date_string) date = Date.parse("#{date_string}") "#{date.year}-#{date.month}-#{date.day}" end def self.normalized_time(time_string) Time.parse("#{time_string}").strftime('%H:%M') end def dummy_time_trigger now = Time.now { 'flags' => 0, 'random_minutes_interval' => 0, 'end_day' => 0, "end_year" => 0, "trigger_type" => 0, "minutes_interval" => 0, "end_month" => 0, "minutes_duration" => 0, 'start_year' => now.year, 'start_month' => now.month, 'start_day' => now.day, 'start_hour' => now.hour, 'start_minute' => now.min, 'trigger_type' => Win32::TaskScheduler::ONCE, } end def translate_hash_to_trigger(puppet_trigger, user_provided_input=false) trigger = dummy_time_trigger if user_provided_input self.fail "'enabled' is read-only on triggers" if puppet_trigger.has_key?('enabled') self.fail "'index' is read-only on triggers" if puppet_trigger.has_key?('index') end puppet_trigger.delete('index') if puppet_trigger.delete('enabled') == false trigger['flags'] |= Win32::TaskScheduler::TASK_TRIGGER_FLAG_DISABLED else trigger['flags'] &= ~Win32::TaskScheduler::TASK_TRIGGER_FLAG_DISABLED end extra_keys = puppet_trigger.keys.sort - ['schedule', 'start_date', 'start_time', 'every', 'months', 'on', 'which_occurrence', 'day_of_week'] self.fail "Unknown trigger option(s): #{Puppet::Parameter.format_value_for_display(extra_keys)}" unless extra_keys.empty? self.fail "Must specify 'start_time' when defining a trigger" unless puppet_trigger['start_time'] case puppet_trigger['schedule'] when 'daily' trigger['trigger_type'] = Win32::TaskScheduler::DAILY trigger['type'] = { 'days_interval' => Integer(puppet_trigger['every'] || 1) } when 'weekly' trigger['trigger_type'] = Win32::TaskScheduler::WEEKLY trigger['type'] = { 'weeks_interval' => Integer(puppet_trigger['every'] || 1) } trigger['type']['days_of_week'] = if puppet_trigger['day_of_week'] bitfield_from_days_of_week(puppet_trigger['day_of_week']) else scheduler_days_of_week.inject(0) {|day_flags,day| day_flags |= day} end when 'monthly' trigger['type'] = { 'months' => bitfield_from_months(puppet_trigger['months'] || (1..12).to_a), } if puppet_trigger.keys.include?('on') if puppet_trigger.has_key?('day_of_week') or puppet_trigger.has_key?('which_occurrence') self.fail "Neither 'day_of_week' nor 'which_occurrence' can be specified when creating a monthly date-based trigger" end trigger['trigger_type'] = Win32::TaskScheduler::MONTHLYDATE trigger['type']['days'] = bitfield_from_days(puppet_trigger['on']) elsif puppet_trigger.keys.include?('which_occurrence') or puppet_trigger.keys.include?('day_of_week') self.fail 'which_occurrence cannot be specified as an array' if puppet_trigger['which_occurrence'].is_a?(Array) %w{day_of_week which_occurrence}.each do |field| self.fail "#{field} must be specified when creating a monthly day-of-week based trigger" unless puppet_trigger.has_key?(field) end trigger['trigger_type'] = Win32::TaskScheduler::MONTHLYDOW trigger['type']['weeks'] = occurrence_name_to_constant(puppet_trigger['which_occurrence']) trigger['type']['days_of_week'] = bitfield_from_days_of_week(puppet_trigger['day_of_week']) else self.fail "Don't know how to create a 'monthly' schedule with the options: #{puppet_trigger.keys.sort.join(', ')}" end when 'once' self.fail "Must specify 'start_date' when defining a one-time trigger" unless puppet_trigger['start_date'] trigger['trigger_type'] = Win32::TaskScheduler::ONCE else self.fail "Unknown schedule type: #{puppet_trigger["schedule"].inspect}" end if start_date = puppet_trigger['start_date'] start_date = Date.parse(start_date) self.fail "start_date must be on or after 1753-01-01" unless start_date >= Date.new(1753, 1, 1) trigger['start_year'] = start_date.year trigger['start_month'] = start_date.month trigger['start_day'] = start_date.day end start_time = Time.parse(puppet_trigger['start_time']) trigger['start_hour'] = start_time.hour trigger['start_minute'] = start_time.min trigger end def validate_trigger(value) value = [value] unless value.is_a?(Array) # translate_hash_to_trigger handles the same validation that we # would be doing here at the individual trigger level. value.each {|t| translate_hash_to_trigger(t, true)} true end private def bitfield_from_months(months) bitfield = 0 months = [months] unless months.is_a?(Array) months.each do |month| integer_month = Integer(month) rescue nil self.fail 'Month must be specified as an integer in the range 1-12' unless integer_month == month.to_f and integer_month.between?(1,12) bitfield |= scheduler_months[integer_month - 1] end bitfield end def bitfield_from_days(days) bitfield = 0 days = [days] unless days.is_a?(Array) days.each do |day| # The special "day" of 'last' is represented by day "number" # 32. 'last' has the special meaning of "the last day of the # month", no matter how many days there are in the month. day = 32 if day == 'last' integer_day = Integer(day) self.fail "Day must be specified as an integer in the range 1-31, or as 'last'" unless integer_day = day.to_f and integer_day.between?(1,32) bitfield |= 1 << integer_day - 1 end bitfield end def bitfield_from_days_of_week(days_of_week) bitfield = 0 days_of_week = [days_of_week] unless days_of_week.is_a?(Array) days_of_week.each do |day_of_week| bitfield |= day_of_week_name_to_constant(day_of_week) end bitfield end def months_from_bitfield(bitfield) months = [] scheduler_months.each do |month| if bitfield & month != 0 months << month_constant_to_number(month) end end months end def days_from_bitfield(bitfield) days = [] i = 0 while bitfield > 0 if bitfield & 1 > 0 # Day 32 has the special meaning of "the last day of the # month", no matter how many days there are in the month. days << (i == 31 ? 'last' : i + 1) end bitfield = bitfield >> 1 i += 1 end days end def days_of_week_from_bitfield(bitfield) days_of_week = [] scheduler_days_of_week.each do |day_of_week| if bitfield & day_of_week != 0 days_of_week << day_of_week_constant_to_name(day_of_week) end end days_of_week end def scheduler_trigger_types [ Win32::TaskScheduler::TASK_TIME_TRIGGER_DAILY, Win32::TaskScheduler::TASK_TIME_TRIGGER_WEEKLY, Win32::TaskScheduler::TASK_TIME_TRIGGER_MONTHLYDATE, Win32::TaskScheduler::TASK_TIME_TRIGGER_MONTHLYDOW, Win32::TaskScheduler::TASK_TIME_TRIGGER_ONCE ] end def scheduler_days_of_week [ Win32::TaskScheduler::SUNDAY, Win32::TaskScheduler::MONDAY, Win32::TaskScheduler::TUESDAY, Win32::TaskScheduler::WEDNESDAY, Win32::TaskScheduler::THURSDAY, Win32::TaskScheduler::FRIDAY, Win32::TaskScheduler::SATURDAY ] end def scheduler_months [ Win32::TaskScheduler::JANUARY, Win32::TaskScheduler::FEBRUARY, Win32::TaskScheduler::MARCH, Win32::TaskScheduler::APRIL, Win32::TaskScheduler::MAY, Win32::TaskScheduler::JUNE, Win32::TaskScheduler::JULY, Win32::TaskScheduler::AUGUST, Win32::TaskScheduler::SEPTEMBER, Win32::TaskScheduler::OCTOBER, Win32::TaskScheduler::NOVEMBER, Win32::TaskScheduler::DECEMBER ] end def scheduler_occurrences [ Win32::TaskScheduler::FIRST_WEEK, Win32::TaskScheduler::SECOND_WEEK, Win32::TaskScheduler::THIRD_WEEK, Win32::TaskScheduler::FOURTH_WEEK, Win32::TaskScheduler::LAST_WEEK ] end def day_of_week_constant_to_name(constant) case constant when Win32::TaskScheduler::SUNDAY; 'sun' when Win32::TaskScheduler::MONDAY; 'mon' when Win32::TaskScheduler::TUESDAY; 'tues' when Win32::TaskScheduler::WEDNESDAY; 'wed' when Win32::TaskScheduler::THURSDAY; 'thurs' when Win32::TaskScheduler::FRIDAY; 'fri' when Win32::TaskScheduler::SATURDAY; 'sat' end end def day_of_week_name_to_constant(name) case name when 'sun'; Win32::TaskScheduler::SUNDAY when 'mon'; Win32::TaskScheduler::MONDAY when 'tues'; Win32::TaskScheduler::TUESDAY when 'wed'; Win32::TaskScheduler::WEDNESDAY when 'thurs'; Win32::TaskScheduler::THURSDAY when 'fri'; Win32::TaskScheduler::FRIDAY when 'sat'; Win32::TaskScheduler::SATURDAY end end def month_constant_to_number(constant) month_num = 1 while constant >> month_num - 1 > 1 month_num += 1 end month_num end def occurrence_constant_to_name(constant) case constant when Win32::TaskScheduler::FIRST_WEEK; 'first' when Win32::TaskScheduler::SECOND_WEEK; 'second' when Win32::TaskScheduler::THIRD_WEEK; 'third' when Win32::TaskScheduler::FOURTH_WEEK; 'fourth' when Win32::TaskScheduler::LAST_WEEK; 'last' end end def occurrence_name_to_constant(name) case name when 'first'; Win32::TaskScheduler::FIRST_WEEK when 'second'; Win32::TaskScheduler::SECOND_WEEK when 'third'; Win32::TaskScheduler::THIRD_WEEK when 'fourth'; Win32::TaskScheduler::FOURTH_WEEK when 'last'; Win32::TaskScheduler::LAST_WEEK end end end diff --git a/lib/puppet/provider/service/windows.rb b/lib/puppet/provider/service/windows.rb index 717e585fd..d8ba69961 100644 --- a/lib/puppet/provider/service/windows.rb +++ b/lib/puppet/provider/service/windows.rb @@ -1,111 +1,113 @@ # Windows Service Control Manager (SCM) provider require 'win32/service' if Puppet.features.microsoft_windows? Puppet::Type.type(:service).provide :windows do desc <<-EOT Support for Windows Service Control Manager (SCM). This provider can start, stop, enable, and disable services, and the SCM provides working status methods for all services. Control of service groups (dependencies) is not yet supported, nor is running services as a specific user. EOT defaultfor :operatingsystem => :windows confine :operatingsystem => :windows has_feature :refreshable + commands :net => 'net.exe' + def enable w32ss = Win32::Service.configure( 'service_name' => @resource[:name], 'start_type' => Win32::Service::SERVICE_AUTO_START ) raise Puppet::Error.new("Win32 service enable of #{@resource[:name]} failed" ) if( w32ss.nil? ) rescue Win32::Service::Error => detail raise Puppet::Error.new("Cannot enable #{@resource[:name]}, error was: #{detail}" ) end def disable w32ss = Win32::Service.configure( 'service_name' => @resource[:name], 'start_type' => Win32::Service::SERVICE_DISABLED ) raise Puppet::Error.new("Win32 service disable of #{@resource[:name]} failed" ) if( w32ss.nil? ) rescue Win32::Service::Error => detail raise Puppet::Error.new("Cannot disable #{@resource[:name]}, error was: #{detail}" ) end def manual_start w32ss = Win32::Service.configure( 'service_name' => @resource[:name], 'start_type' => Win32::Service::SERVICE_DEMAND_START ) raise Puppet::Error.new("Win32 service manual enable of #{@resource[:name]} failed" ) if( w32ss.nil? ) rescue Win32::Service::Error => detail raise Puppet::Error.new("Cannot enable #{@resource[:name]} for manual start, error was: #{detail}" ) end def enabled? w32ss = Win32::Service.config_info( @resource[:name] ) raise Puppet::Error.new("Win32 service query of #{@resource[:name]} failed" ) unless( !w32ss.nil? && w32ss.instance_of?( Struct::ServiceConfigInfo ) ) debug("Service #{@resource[:name]} start type is #{w32ss.start_type}") case w32ss.start_type when Win32::Service.get_start_type(Win32::Service::SERVICE_AUTO_START), Win32::Service.get_start_type(Win32::Service::SERVICE_BOOT_START), Win32::Service.get_start_type(Win32::Service::SERVICE_SYSTEM_START) :true when Win32::Service.get_start_type(Win32::Service::SERVICE_DEMAND_START) :manual when Win32::Service.get_start_type(Win32::Service::SERVICE_DISABLED) :false else raise Puppet::Error.new("Unknown start type: #{w32ss.start_type}") end rescue Win32::Service::Error => detail raise Puppet::Error.new("Cannot get start type for #{@resource[:name]}, error was: #{detail}" ) end def start if enabled? == :false # If disabled and not managing enable, respect disabled and fail. if @resource[:enable].nil? raise Puppet::Error, "Will not start disabled service #{@resource[:name]} without managing enable. Specify 'enable => false' to override." # Otherwise start. If enable => false, we will later sync enable and # disable the service again. elsif @resource[:enable] == :true enable else manual_start end end - Win32::Service.start( @resource[:name] ) - rescue Win32::Service::Error => detail + net(:start, @resource[:name]) + rescue Puppet::ExecutionFailure => detail raise Puppet::Error.new("Cannot start #{@resource[:name]}, error was: #{detail}" ) end def stop - Win32::Service.stop( @resource[:name] ) - rescue Win32::Service::Error => detail + net(:stop, @resource[:name]) + rescue Puppet::ExecutionFailure => detail raise Puppet::Error.new("Cannot stop #{@resource[:name]}, error was: #{detail}" ) end def restart self.stop self.start end def status w32ss = Win32::Service.status( @resource[:name] ) raise Puppet::Error.new("Win32 service query of #{@resource[:name]} failed" ) unless( !w32ss.nil? && w32ss.instance_of?( Struct::ServiceStatus ) ) state = case w32ss.current_state when "stopped", "pause pending", "stop pending", "paused" then :stopped when "running", "continue pending", "start pending" then :running else raise Puppet::Error.new("Unknown service state '#{w32ss.current_state}' for service '#{@resource[:name]}'") end debug("Service #{@resource[:name]} is #{w32ss.current_state}") return state rescue Win32::Service::Error => detail raise Puppet::Error.new("Cannot get status of #{@resource[:name]}, error was: #{detail}" ) end # returns all providers for all existing services and startup state def self.instances Win32::Service.services.collect { |s| new(:name => s.service_name) } end end diff --git a/lib/puppet/rails/benchmark.rb b/lib/puppet/rails/benchmark.rb index e1e92bb79..741b6d5bd 100644 --- a/lib/puppet/rails/benchmark.rb +++ b/lib/puppet/rails/benchmark.rb @@ -1,63 +1,63 @@ require 'benchmark' +require 'yaml' + module Puppet::Rails::Benchmark $benchmarks = {:accumulated => {}} def time_debug? Puppet::Rails::TIME_DEBUG end def railsmark(message) result = nil seconds = Benchmark.realtime { result = yield } Puppet.debug(message + " in %0.2f seconds" % seconds) $benchmarks[message] = seconds if time_debug? result end def debug_benchmark(message) return yield unless Puppet::Rails::TIME_DEBUG railsmark(message) { yield } end # Collect partial benchmarks to be logged when they're # all done. # These are always low-level debugging so we only # print them if time_debug is enabled. def accumulate_benchmark(message, label) return yield unless time_debug? $benchmarks[:accumulated][message] ||= Hash.new(0) $benchmarks[:accumulated][message][label] += Benchmark.realtime { yield } end # Log the accumulated marks. def log_accumulated_marks(message) return unless time_debug? return if $benchmarks[:accumulated].empty? or $benchmarks[:accumulated][message].nil? or $benchmarks[:accumulated][message].empty? $benchmarks[:accumulated][message].each do |label, value| Puppet.debug(message + ("(#{label})") + (" in %0.2f seconds" % value)) end end def write_benchmarks return unless time_debug? branch = %x{git branch}.split("\n").find { |l| l =~ /^\*/ }.sub("* ", '') file = "/tmp/time_debugging.yaml" - require 'yaml' - if FileTest.exist?(file) data = YAML.load_file(file) else data = {} end data[branch] = $benchmarks Puppet::Util.replace_file(file, 0644) { |f| f.print YAML.dump(data) } end end diff --git a/lib/puppet/resource/catalog.rb b/lib/puppet/resource/catalog.rb index 2f139bd7e..25232d947 100644 --- a/lib/puppet/resource/catalog.rb +++ b/lib/puppet/resource/catalog.rb @@ -1,611 +1,612 @@ require 'puppet/node' require 'puppet/indirector' require 'puppet/simple_graph' require 'puppet/transaction' require 'puppet/util/pson' require 'puppet/util/tagging' # This class models a node catalog. It is the thing # meant to be passed from server to client, and it contains all # of the information in the catalog, including the resources # and the relationships between them. class Puppet::Resource::Catalog < Puppet::SimpleGraph class DuplicateResourceError < Puppet::Error; end extend Puppet::Indirector indirects :catalog, :terminus_setting => :catalog_terminus include Puppet::Util::Tagging extend Puppet::Util::Pson # The host name this is a catalog for. attr_accessor :name # The catalog version. Used for testing whether a catalog # is up to date. attr_accessor :version # How long this catalog took to retrieve. Used for reporting stats. attr_accessor :retrieval_duration # Whether this is a host catalog, which behaves very differently. # In particular, reports are sent, graphs are made, and state is # stored in the state database. If this is set incorrectly, then you often # end up in infinite loops, because catalogs are used to make things # that the host catalog needs. attr_accessor :host_config # Whether this catalog was retrieved from the cache, which affects # whether it is written back out again. attr_accessor :from_cache # Some metadata to help us compile and generally respond to the current state. attr_accessor :client_version, :server_version # The environment for this catalog attr_accessor :environment # Add classes to our class list. def add_class(*classes) classes.each do |klass| @classes << klass end # Add the class names as tags, too. tag(*classes) end def title_key_for_ref( ref ) ref =~ /^([-\w:]+)\[(.*)\]$/m [$1, $2] end # Add a resource to our graph and to our resource table. # This is actually a relatively complicated method, because it handles multiple # aspects of Catalog behaviour: # * Add the resource to the resource table # * Add the resource to the resource graph # * Add the resource to the relationship graph # * Add any aliases that make sense for the resource (e.g., name != title) def add_resource(*resource) add_resource(*resource[0..-2]) if resource.length > 1 resource = resource.pop raise ArgumentError, "Can only add objects that respond to :ref, not instances of #{resource.class}" unless resource.respond_to?(:ref) fail_on_duplicate_type_and_title(resource) title_key = title_key_for_ref(resource.ref) @transient_resources << resource if applying? @resource_table[title_key] = resource # If the name and title differ, set up an alias if resource.respond_to?(:name) and resource.respond_to?(:title) and resource.respond_to?(:isomorphic?) and resource.name != resource.title self.alias(resource, resource.uniqueness_key) if resource.isomorphic? end resource.catalog = self if resource.respond_to?(:catalog=) add_vertex(resource) @relationship_graph.add_vertex(resource) if @relationship_graph end # Create an alias for a resource. def alias(resource, key) resource.ref =~ /^(.+)\[/ class_name = $1 || resource.class.name newref = [class_name, key].flatten if key.is_a? String ref_string = "#{class_name}[#{key}]" return if ref_string == resource.ref end # LAK:NOTE It's important that we directly compare the references, # because sometimes an alias is created before the resource is # added to the catalog, so comparing inside the below if block # isn't sufficient. if existing = @resource_table[newref] return if existing == resource resource_declaration = " at #{resource.file}:#{resource.line}" if resource.file and resource.line existing_declaration = " at #{existing.file}:#{existing.line}" if existing.file and existing.line msg = "Cannot alias #{resource.ref} to #{key.inspect}#{resource_declaration}; resource #{newref.inspect} already declared#{existing_declaration}" raise ArgumentError, msg end @resource_table[newref] = resource @aliases[resource.ref] ||= [] @aliases[resource.ref] << newref end # Apply our catalog to the local host. Valid options # are: # :tags - set the tags that restrict what resources run # during the transaction # :ignoreschedules - tell the transaction to ignore schedules # when determining the resources to run def apply(options = {}) @applying = true Puppet::Util::Storage.load if host_config? transaction = Puppet::Transaction.new(self, options[:report]) register_report = options[:report].nil? transaction.tags = options[:tags] if options[:tags] transaction.ignoreschedules = true if options[:ignoreschedules] transaction.for_network_device = options[:network_device] transaction.add_times :config_retrieval => self.retrieval_duration || 0 begin Puppet::Util::Log.newdestination(transaction.report) if register_report begin transaction.evaluate ensure Puppet::Util::Log.close(transaction.report) if register_report end rescue Puppet::Error => detail Puppet.log_exception(detail, "Could not apply complete catalog: #{detail}") rescue => detail Puppet.log_exception(detail, "Got an uncaught exception of type #{detail.class}: #{detail}") ensure # Don't try to store state unless we're a host config # too recursive. Puppet::Util::Storage.store if host_config? end yield transaction if block_given? return transaction ensure @applying = false end # Are we in the middle of applying the catalog? def applying? @applying end def clear(remove_resources = true) super() # We have to do this so that the resources clean themselves up. @resource_table.values.each { |resource| resource.remove } if remove_resources @resource_table.clear if @relationship_graph @relationship_graph.clear @relationship_graph = nil end end def classes @classes.dup end # Create a new resource and register it in the catalog. def create_resource(type, options) unless klass = Puppet::Type.type(type) raise ArgumentError, "Unknown resource type #{type}" end return unless resource = klass.new(options) add_resource(resource) resource end # Make sure all of our resources are "finished". def finalize make_default_resources @resource_table.values.each { |resource| resource.finish } write_graph(:resources) end def host_config? host_config end def initialize(name = nil) super() @name = name if name @classes = [] @resource_table = {} @transient_resources = [] @applying = false @relationship_graph = nil @host_config = true @aliases = {} if block_given? yield(self) finalize end end # Make the default objects necessary for function. def make_default_resources # We have to add the resources to the catalog, or else they won't get cleaned up after # the transaction. # First create the default scheduling objects Puppet::Type.type(:schedule).mkdefaultschedules.each { |res| add_resource(res) unless resource(res.ref) } # And filebuckets if bucket = Puppet::Type.type(:filebucket).mkdefaultbucket add_resource(bucket) unless resource(bucket.ref) end end # Create a graph of all of the relationships in our catalog. def relationship_graph unless @relationship_graph # It's important that we assign the graph immediately, because # the debug messages below use the relationships in the # relationship graph to determine the path to the resources # spitting out the messages. If this is not set, # then we get into an infinite loop. @relationship_graph = Puppet::SimpleGraph.new # First create the dependency graph self.vertices.each do |vertex| @relationship_graph.add_vertex vertex vertex.builddepends.each do |edge| @relationship_graph.add_edge(edge) end end # Lastly, add in any autorequires @relationship_graph.vertices.each do |vertex| vertex.autorequire(self).each do |edge| unless @relationship_graph.edge?(edge.source, edge.target) # don't let automatic relationships conflict with manual ones. unless @relationship_graph.edge?(edge.target, edge.source) vertex.debug "Autorequiring #{edge.source}" @relationship_graph.add_edge(edge) else vertex.debug "Skipping automatic relationship with #{(edge.source == vertex ? edge.target : edge.source)}" end end end end @relationship_graph.write_graph(:relationships) if host_config? # Then splice in the container information splice!(@relationship_graph) @relationship_graph.write_graph(:expanded_relationships) if host_config? end @relationship_graph end # Impose our container information on another graph by using it # to replace any container vertices X with a pair of verticies # { admissible_X and completed_X } such that that # # 0) completed_X depends on admissible_X # 1) contents of X each depend on admissible_X # 2) completed_X depends on each on the contents of X # 3) everything which depended on X depens on completed_X # 4) admissible_X depends on everything X depended on # 5) the containers and their edges must be removed # # Note that this requires attention to the possible case of containers # which contain or depend on other containers, but has the advantage # that the number of new edges created scales linearly with the number # of contained verticies regardless of how containers are related; # alternatives such as replacing container-edges with content-edges # scale as the product of the number of external dependences, which is # to say geometrically in the case of nested / chained containers. # Default_label = { :callback => :refresh, :event => :ALL_EVENTS } def splice!(other) stage_class = Puppet::Type.type(:stage) whit_class = Puppet::Type.type(:whit) component_class = Puppet::Type.type(:component) containers = vertices.find_all { |v| (v.is_a?(component_class) or v.is_a?(stage_class)) and vertex?(v) } # # These two hashes comprise the aforementioned attention to the possible # case of containers that contain / depend on other containers; they map # containers to their sentinels but pass other verticies through. Thus we # can "do the right thing" for references to other verticies that may or # may not be containers. # admissible = Hash.new { |h,k| k } completed = Hash.new { |h,k| k } containers.each { |x| admissible[x] = whit_class.new(:name => "admissible_#{x.ref}", :catalog => self) completed[x] = whit_class.new(:name => "completed_#{x.ref}", :catalog => self) } # # Implement the six requierments listed above # containers.each { |x| contents = adjacent(x, :direction => :out) other.add_edge(admissible[x],completed[x]) if contents.empty? # (0) contents.each { |v| other.add_edge(admissible[x],admissible[v],Default_label) # (1) other.add_edge(completed[v], completed[x], Default_label) # (2) } # (3) & (5) other.adjacent(x,:direction => :in,:type => :edges).each { |e| other.add_edge(completed[e.source],admissible[x],e.label) other.remove_edge! e } # (4) & (5) other.adjacent(x,:direction => :out,:type => :edges).each { |e| other.add_edge(completed[x],admissible[e.target],e.label) other.remove_edge! e } } containers.each { |x| other.remove_vertex! x } # (5) end # Remove the resource from our catalog. Notice that we also call # 'remove' on the resource, at least until resource classes no longer maintain # references to the resource instances. def remove_resource(*resources) resources.each do |resource| - @resource_table.delete(resource.ref) + title_key = title_key_for_ref(resource.ref) + @resource_table.delete(title_key) if aliases = @aliases[resource.ref] aliases.each { |res_alias| @resource_table.delete(res_alias) } @aliases.delete(resource.ref) end remove_vertex!(resource) if vertex?(resource) @relationship_graph.remove_vertex!(resource) if @relationship_graph and @relationship_graph.vertex?(resource) resource.remove end end # Look a resource up by its reference (e.g., File[/etc/passwd]). def resource(type, title = nil) # Always create a resource reference, so that it always canonizes how we # are referring to them. if title res = Puppet::Resource.new(type, title) else # If they didn't provide a title, then we expect the first # argument to be of the form 'Class[name]', which our # Reference class canonizes for us. res = Puppet::Resource.new(nil, type) end title_key = [res.type, res.title.to_s] uniqueness_key = [res.type, res.uniqueness_key].flatten @resource_table[title_key] || @resource_table[uniqueness_key] end def resource_refs resource_keys.collect{ |type, name| name.is_a?( String ) ? "#{type}[#{name}]" : nil}.compact end def resource_keys @resource_table.keys end def resources @resource_table.values.uniq end def self.from_pson(data) result = new(data['name']) if tags = data['tags'] result.tag(*tags) end if version = data['version'] result.version = version end if environment = data['environment'] result.environment = environment end if resources = data['resources'] resources = PSON.parse(resources) if resources.is_a?(String) resources.each do |res| resource_from_pson(result, res) end end if edges = data['edges'] edges = PSON.parse(edges) if edges.is_a?(String) edges.each do |edge| edge_from_pson(result, edge) end end if classes = data['classes'] result.add_class(*classes) end result end def self.edge_from_pson(result, edge) # If no type information was presented, we manually find # the class. edge = Puppet::Relationship.from_pson(edge) if edge.is_a?(Hash) unless source = result.resource(edge.source) raise ArgumentError, "Could not convert from pson: Could not find relationship source #{edge.source.inspect}" end edge.source = source unless target = result.resource(edge.target) raise ArgumentError, "Could not convert from pson: Could not find relationship target #{edge.target.inspect}" end edge.target = target result.add_edge(edge) end def self.resource_from_pson(result, res) res = Puppet::Resource.from_pson(res) if res.is_a? Hash result.add_resource(res) end PSON.register_document_type('Catalog',self) def to_pson_data_hash { 'document_type' => 'Catalog', 'data' => { 'tags' => tags, 'name' => name, 'version' => version, 'environment' => environment.to_s, 'resources' => vertices.collect { |v| v.to_pson_data_hash }, 'edges' => edges. collect { |e| e.to_pson_data_hash }, 'classes' => classes }, 'metadata' => { 'api_version' => 1 } } end def to_pson(*args) to_pson_data_hash.to_pson(*args) end # Convert our catalog into a RAL catalog. def to_ral to_catalog :to_ral end # Convert our catalog into a catalog of Puppet::Resource instances. def to_resource to_catalog :to_resource end # filter out the catalog, applying +block+ to each resource. # If the block result is false, the resource will # be kept otherwise it will be skipped def filter(&block) to_catalog :to_resource, &block end # Store the classes in the classfile. def write_class_file ::File.open(Puppet[:classfile], "w") do |f| f.puts classes.join("\n") end rescue => detail Puppet.err "Could not create class file #{Puppet[:classfile]}: #{detail}" end # Store the list of resources we manage def write_resource_file ::File.open(Puppet[:resourcefile], "w") do |f| to_print = resources.map do |resource| next unless resource.managed? if resource.name_var "#{resource.type}[#{resource[resource.name_var]}]" else "#{resource.ref.downcase}" end end.compact f.puts to_print.join("\n") end rescue => detail Puppet.err "Could not create resource file #{Puppet[:resourcefile]}: #{detail}" end # Produce the graph files if requested. def write_graph(name) # We only want to graph the main host catalog. return unless host_config? super end private # Verify that the given resource isn't declared elsewhere. def fail_on_duplicate_type_and_title(resource) # Short-curcuit the common case, return unless existing_resource = @resource_table[title_key_for_ref(resource.ref)] # If we've gotten this far, it's a real conflict msg = "Duplicate declaration: #{resource.ref} is already declared" msg << " in file #{existing_resource.file} at line #{existing_resource.line}" if existing_resource.file and existing_resource.line msg << "; cannot redeclare" if resource.line or resource.file raise DuplicateResourceError.new(msg) end # An abstracted method for converting one catalog into another type of catalog. # This pretty much just converts all of the resources from one class to another, using # a conversion method. def to_catalog(convert) result = self.class.new(self.name) result.version = self.version result.environment = self.environment map = {} vertices.each do |resource| next if virtual_not_exported?(resource) next if block_given? and yield resource #This is hackity hack for 1094 #Aliases aren't working in the ral catalog because the current instance of the resource #has a reference to the catalog being converted. . . So, give it a reference to the new one #problem solved. . . if resource.class == Puppet::Resource resource = resource.dup resource.catalog = result elsif resource.is_a?(Puppet::Parser::Resource) resource = resource.to_resource resource.catalog = result end if resource.is_a?(Puppet::Resource) and convert.to_s == "to_resource" newres = resource else newres = resource.send(convert) end # We can't guarantee that resources don't munge their names # (like files do with trailing slashes), so we have to keep track # of what a resource got converted to. map[resource.ref] = newres result.add_resource newres end message = convert.to_s.gsub "_", " " edges.each do |edge| # Skip edges between virtual resources. next if virtual_not_exported?(edge.source) next if block_given? and yield edge.source next if virtual_not_exported?(edge.target) next if block_given? and yield edge.target unless source = map[edge.source.ref] raise Puppet::DevError, "Could not find resource #{edge.source.ref} when converting #{message} resources" end unless target = map[edge.target.ref] raise Puppet::DevError, "Could not find resource #{edge.target.ref} when converting #{message} resources" end result.add_edge(source, target, edge.label) end map.clear result.add_class(*self.classes) result.tag(*self.tags) result end def virtual_not_exported?(resource) resource.respond_to?(:virtual?) and resource.virtual? and (resource.respond_to?(:exported?) and not resource.exported?) end end diff --git a/lib/puppet/resource/type_collection.rb b/lib/puppet/resource/type_collection.rb index 8306b778d..0c02c4eaa 100644 --- a/lib/puppet/resource/type_collection.rb +++ b/lib/puppet/resource/type_collection.rb @@ -1,215 +1,216 @@ +require 'puppet/parser/type_loader' + class Puppet::Resource::TypeCollection attr_reader :environment attr_accessor :parse_failed def clear @hostclasses.clear @definitions.clear @nodes.clear @watched_files.clear end def initialize(env) @environment = env.is_a?(String) ? Puppet::Node::Environment.new(env) : env @hostclasses = {} @definitions = {} @nodes = {} # So we can keep a list and match the first-defined regex @node_list = [] @watched_files = {} end def import_ast(ast, modname) ast.instantiate(modname).each do |instance| add(instance) end end def inspect "TypeCollection" + { :hostclasses => @hostclasses.keys, :definitions => @definitions.keys, :nodes => @nodes.keys }.inspect end def <<(thing) add(thing) self end def add(instance) if instance.type == :hostclass and other = @hostclasses[instance.name] and other.type == :hostclass other.merge(instance) return other end method = "add_#{instance.type}" send(method, instance) instance.resource_type_collection = self instance end def add_hostclass(instance) dupe_check(instance, @hostclasses) { |dupe| "Class '#{instance.name}' is already defined#{dupe.error_context}; cannot redefine" } dupe_check(instance, @definitions) { |dupe| "Definition '#{instance.name}' is already defined#{dupe.error_context}; cannot be redefined as a class" } @hostclasses[instance.name] = instance instance end def hostclass(name) @hostclasses[munge_name(name)] end def add_node(instance) dupe_check(instance, @nodes) { |dupe| "Node '#{instance.name}' is already defined#{dupe.error_context}; cannot redefine" } @node_list << instance @nodes[instance.name] = instance instance end def loader - require 'puppet/parser/type_loader' @loader ||= Puppet::Parser::TypeLoader.new(environment) end def node(name) name = munge_name(name) if node = @nodes[name] return node end @node_list.each do |node| next unless node.name_is_regex? return node if node.match(name) end nil end def node_exists?(name) @nodes[munge_name(name)] end def nodes? @nodes.length > 0 end def add_definition(instance) dupe_check(instance, @hostclasses) { |dupe| "'#{instance.name}' is already defined#{dupe.error_context} as a class; cannot redefine as a definition" } dupe_check(instance, @definitions) { |dupe| "Definition '#{instance.name}' is already defined#{dupe.error_context}; cannot be redefined" } @definitions[instance.name] = instance end def definition(name) @definitions[munge_name(name)] end def find_node(namespaces, name) @nodes[munge_name(name)] end def find_hostclass(namespaces, name, options = {}) find_or_load(namespaces, name, :hostclass, options) end def find_definition(namespaces, name) find_or_load(namespaces, name, :definition) end [:hostclasses, :nodes, :definitions].each do |m| define_method(m) do instance_variable_get("@#{m}").dup end end def require_reparse? @parse_failed || stale? end def stale? @watched_files.values.detect { |file| file.changed? } end def version return @version if defined?(@version) if environment[:config_version] == "" @version = Time.now.to_i return @version end @version = Puppet::Util::Execution.execute([environment[:config_version]]).strip rescue Puppet::ExecutionFailure => e raise Puppet::ParseError, "Unable to set config_version: #{e.message}" end def watch_file(file) @watched_files[file] = Puppet::Util::LoadedFile.new(file) end def watching_file?(file) @watched_files.include?(file) end private # Return a list of all possible fully-qualified names that might be # meant by the given name, in the context of namespaces. def resolve_namespaces(namespaces, name) name = name.downcase if name =~ /^::/ # name is explicitly fully qualified, so just return it, sans # initial "::". return [name.sub(/^::/, '')] end if name == "" # The name "" has special meaning--it always refers to a "main" # hostclass which contains all toplevel resources. return [""] end namespaces = [namespaces] unless namespaces.is_a?(Array) namespaces = namespaces.collect { |ns| ns.downcase } result = [] namespaces.each do |namespace| ary = namespace.split("::") # Search each namespace nesting in innermost-to-outermost order. while ary.length > 0 result << "#{ary.join("::")}::#{name}" ary.pop end # Finally, search the toplevel namespace. result << name end return result.uniq end # Resolve namespaces and find the given object. Autoload it if # necessary. def find_or_load(namespaces, name, type, options = {}) searchspace = options[:assume_fqname] ? [name].flatten : resolve_namespaces(namespaces, name) searchspace.each do |fqname| if result = send(type, fqname) || loader.try_load_fqname(type, fqname) return result end end # Nothing found. return nil end def munge_name(name) name.to_s.downcase end def dupe_check(instance, hash) return unless dupe = hash[instance.name] message = yield dupe instance.fail Puppet::ParseError, message end end diff --git a/lib/puppet/ssl/base.rb b/lib/puppet/ssl/base.rb index 668134f86..d7533a511 100644 --- a/lib/puppet/ssl/base.rb +++ b/lib/puppet/ssl/base.rb @@ -1,79 +1,78 @@ +require 'openssl' require 'puppet/ssl' # The base class for wrapping SSL instances. class Puppet::SSL::Base # For now, use the YAML separator. SEPARATOR = "\n---\n" def self.from_multiple_s(text) text.split(SEPARATOR).collect { |inst| from_s(inst) } end def self.to_multiple_s(instances) instances.collect { |inst| inst.to_s }.join(SEPARATOR) end def self.wraps(klass) @wrapped_class = klass end def self.wrapped_class raise(Puppet::DevError, "#{self} has not declared what class it wraps") unless defined?(@wrapped_class) @wrapped_class end attr_accessor :name, :content # Is this file for the CA? def ca? name == Puppet::SSL::Host.ca_name end def generate raise Puppet::DevError, "#{self.class} did not override 'generate'" end def initialize(name) @name = name.to_s.downcase end # Read content from disk appropriately. def read(path) @content = wrapped_class.new(File.read(path)) end # Convert our thing to pem. def to_s return "" unless content content.to_pem end # Provide the full text of the thing we're dealing with. def to_text return "" unless content content.to_text end def fingerprint(md = :SHA256) - require 'openssl' - # ruby 1.8.x openssl digest constants are string # but in 1.9.x they are symbols mds = md.to_s.upcase if OpenSSL::Digest.constants.include?(mds) md = mds elsif OpenSSL::Digest.constants.include?(mds.to_sym) md = mds.to_sym else raise ArgumentError, "#{md} is not a valid digest algorithm for fingerprinting certificate #{name}" end OpenSSL::Digest.const_get(md).hexdigest(content.to_der).scan(/../).join(':').upcase end private def wrapped_class self.class.wrapped_class end end diff --git a/lib/puppet/type/augeas.rb b/lib/puppet/type/augeas.rb index cc2ca6b1d..ef075c326 100644 --- a/lib/puppet/type/augeas.rb +++ b/lib/puppet/type/augeas.rb @@ -1,218 +1,218 @@ # # Copyright 2011 Bryan Kearney # # Licensed under the Apache License, Version 2.0 (the "License"); # you may not use this file except in compliance with the License. # You may obtain a copy of the License at # # http://www.apache.org/licenses/LICENSE-2.0 # # Unless required by applicable law or agreed to in writing, software # distributed under the License is distributed on an "AS IS" BASIS, # WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. # See the License for the specific language governing permissions and # limitations under the License. Puppet::Type.newtype(:augeas) do include Puppet::Util feature :parse_commands, "Parse the command string" feature :need_to_run?, "If the command should run" feature :execute_changes, "Actually make the changes" @doc = <<-EOT Apply a change or an array of changes to the filesystem using the augeas tool. Requires: - [Augeas](http://www.augeas.net) - The ruby-augeas bindings Sample usage with a string: augeas{"test1" : context => "/files/etc/sysconfig/firstboot", changes => "set RUN_FIRSTBOOT YES", onlyif => "match other_value size > 0", } Sample usage with an array and custom lenses: augeas{"jboss_conf": context => "/files", changes => [ "set etc/jbossas/jbossas.conf/JBOSS_IP $ipaddress", "set etc/jbossas/jbossas.conf/JAVA_HOME /usr", ], load_path => "$/usr/share/jbossas/lenses", } EOT newparam (:name) do desc "The name of this task. Used for uniqueness." isnamevar end newparam (:context) do desc "Optional context path. This value is prepended to the paths of all changes if the path is relative. If the `incl` parameter is set, defaults to `/files + incl`; otherwise, defaults to the empty string." defaultto "" munge do |value| if value.empty? and resource[:incl] "/files" + resource[:incl] else value end end end newparam (:onlyif) do desc "Optional augeas command and comparisons to control the execution of this type. Supported onlyif syntax: * `get ` * `match size ` * `match include ` * `match not_include ` * `match == ` * `match != ` where: * `AUGEAS_PATH` is a valid path scoped by the context * `MATCH_PATH` is a valid match synatx scoped by the context * `COMPARATOR` is one of `>, >=, !=, ==, <=,` or `<` * `STRING` is a string * `INT` is a number * `AN_ARRAY` is in the form `['a string', 'another']`" defaultto "" end newparam(:changes) do desc "The changes which should be applied to the filesystem. This can be a command or an array of commands. The following commands are supported: `set ` : Sets the value `VALUE` at loction `PATH` `setm ` : Sets multiple nodes (matching `SUB` relative to `PATH`) to `VALUE` `rm ` : Removes the node at location `PATH` `remove ` : Synonym for `rm` `clear ` : Sets the node at `PATH` to `NULL`, creating it if needed `ins