diff --git a/lib/puppet/configurer.rb b/lib/puppet/configurer.rb index 3d6e8557c..5581917a1 100644 --- a/lib/puppet/configurer.rb +++ b/lib/puppet/configurer.rb @@ -1,250 +1,246 @@ # The client for interacting with the puppetmaster config server. require 'sync' require 'timeout' require 'puppet/network/http_pool' require 'puppet/util' class Puppet::Configurer require 'puppet/configurer/fact_handler' require 'puppet/configurer/plugin_handler' include Puppet::Configurer::FactHandler include Puppet::Configurer::PluginHandler # For benchmarking include Puppet::Util attr_reader :compile_time # Provide more helpful strings to the logging that the Agent does def self.to_s "Puppet configuration client" end class << self # Puppetd should only have one instance running, and we need a way # to retrieve it. attr_accessor :instance include Puppet::Util end # How to lock instances of this class. def self.lockfile_path Puppet[:puppetdlockfile] end def clear @catalog.clear(true) if @catalog @catalog = nil end def execute_postrun_command execute_from_setting(:postrun_command) end def execute_prerun_command execute_from_setting(:prerun_command) end # Initialize and load storage def dostorage Puppet::Util::Storage.load @compile_time ||= Puppet::Util::Storage.cache(:configuration)[:compile_time] rescue => detail puts detail.backtrace if Puppet[:trace] Puppet.err "Corrupt state file #{Puppet[:statefile]}: #{detail}" begin ::File.unlink(Puppet[:statefile]) retry rescue => detail raise Puppet::Error.new("Cannot remove #{Puppet[:statefile]}: #{detail}") end end # Just so we can specify that we are "the" instance. def initialize Puppet.settings.use(:main, :ssl, :agent) self.class.instance = self @running = false @splayed = false end # Prepare for catalog retrieval. Downloads everything necessary, etc. def prepare(options) dostorage download_plugins unless options[:skip_plugin_download] download_fact_plugins unless options[:skip_plugin_download] end # Get the remote catalog, yo. Returns nil if no catalog can be found. def retrieve_catalog(fact_options) fact_options ||= {} # First try it with no cache, then with the cache. unless (Puppet[:use_cached_catalog] and result = retrieve_catalog_from_cache(fact_options)) or result = retrieve_new_catalog(fact_options) if ! Puppet[:usecacheonfailure] Puppet.warning "Not using cache on failed catalog" return nil end result = retrieve_catalog_from_cache(fact_options) end return nil unless result convert_catalog(result, @duration) end # Convert a plain resource catalog into our full host catalog. def convert_catalog(result, duration) catalog = result.to_ral catalog.finalize catalog.retrieval_duration = duration catalog.write_class_file catalog end # Retrieve (optionally) and apply a catalog. If a catalog is passed in # the options, then apply that one, otherwise retrieve it. def retrieve_and_apply_catalog(options, fact_options) unless catalog = (options.delete(:catalog) || retrieve_catalog(fact_options)) Puppet.err "Could not retrieve catalog; skipping run" return end report = options[:report] report.configuration_version = catalog.version benchmark(:notice, "Finished catalog run") do catalog.apply(options) end report.finalize_report report end # The code that actually runs the catalog. # This just passes any options on to the catalog, # which accepts :tags and :ignoreschedules. def run(options = {}) options[:report] ||= Puppet::Transaction::Report.new("apply") report = options[:report] Puppet::Util::Log.newdestination(report) begin prepare(options) if Puppet::Resource::Catalog.indirection.terminus_class == :rest # This is a bit complicated. We need the serialized and escaped facts, # and we need to know which format they're encoded in. Thus, we # get a hash with both of these pieces of information. fact_options = facts_for_uploading end # set report host name now that we have the fact report.host = Puppet[:node_name_value] begin execute_prerun_command or return nil retrieve_and_apply_catalog(options, fact_options) rescue SystemExit,NoMemoryError raise rescue => detail puts detail.backtrace if Puppet[:trace] Puppet.err "Failed to apply catalog: #{detail}" return nil ensure execute_postrun_command or return nil end ensure # Make sure we forget the retained module_directories of any autoload # we might have used. Thread.current[:env_module_directories] = nil - - # Now close all of our existing http connections, since there's no - # reason to leave them lying open. - Puppet::Network::HttpPool.clear_http_instances end ensure Puppet::Util::Log.close(report) send_report(report) end def send_report(report) puts report.summary if Puppet[:summarize] save_last_run_summary(report) Puppet::Transaction::Report.indirection.save(report) if Puppet[:report] rescue => detail puts detail.backtrace if Puppet[:trace] Puppet.err "Could not send report: #{detail}" end def save_last_run_summary(report) Puppet::Util::FileLocking.writelock(Puppet[:lastrunfile], 0660) do |file| file.print YAML.dump(report.raw_summary) end rescue => detail puts detail.backtrace if Puppet[:trace] Puppet.err "Could not save last run local report: #{detail}" end private def self.timeout timeout = Puppet[:configtimeout] case timeout when String if timeout =~ /^\d+$/ timeout = Integer(timeout) else raise ArgumentError, "Configuration timeout must be an integer" end when Integer # nothing else raise ArgumentError, "Configuration timeout must be an integer" end timeout end def execute_from_setting(setting) return true if (command = Puppet[setting]) == "" begin Puppet::Util.execute([command]) true rescue => detail puts detail.backtrace if Puppet[:trace] Puppet.err "Could not run command from #{setting}: #{detail}" false end end def retrieve_catalog_from_cache(fact_options) result = nil @duration = thinmark do result = Puppet::Resource::Catalog.indirection.find(Puppet[:node_name_value], fact_options.merge(:ignore_terminus => true)) end Puppet.notice "Using cached catalog" result rescue => detail puts detail.backtrace if Puppet[:trace] Puppet.err "Could not retrieve catalog from cache: #{detail}" return nil end def retrieve_new_catalog(fact_options) result = nil @duration = thinmark do result = Puppet::Resource::Catalog.indirection.find(Puppet[:node_name_value], fact_options.merge(:ignore_cache => true)) end result rescue SystemExit,NoMemoryError raise rescue Exception => detail puts detail.backtrace if Puppet[:trace] Puppet.err "Could not retrieve catalog from remote server: #{detail}" return nil end end diff --git a/lib/puppet/network/client.rb b/lib/puppet/network/client.rb index c56b21393..f9c4c5fea 100644 --- a/lib/puppet/network/client.rb +++ b/lib/puppet/network/client.rb @@ -1,179 +1,174 @@ # the available clients require 'puppet' require 'puppet/network/xmlrpc/client' require 'puppet/util/subclass_loader' require 'puppet/util/methodhelper' require 'puppet/sslcertificates/support' require 'puppet/network/handler' require 'net/http' # Some versions of ruby don't have this method defined, which basically causes # us to never use ssl. Yay. class Net::HTTP def use_ssl? if defined?(@use_ssl) @use_ssl else false end end # JJM: This is a "backport" of sorts to older ruby versions which # do not have this accessor. See #896 for more information. attr_accessor :enable_post_connection_check unless Net::HTTP.method_defined? "enable_post_connection_check" end # The base class for all of the clients. Many clients just directly # call methods, but some of them need to do some extra work or # provide a different interface. class Puppet::Network::Client Client = self include Puppet::Util extend Puppet::Util::SubclassLoader include Puppet::Util::MethodHelper # This handles reading in the key and such-like. include Puppet::SSLCertificates::Support attr_accessor :schedule, :lastrun, :local, :stopping attr_reader :driver # Set up subclass loading handle_subclasses :client, "puppet/network/client" # Determine what clients look for when being passed an object for local # client/server stuff. E.g., you could call Client::CA.new(:CA => ca). def self.drivername @drivername ||= self.name end # Figure out the handler for our client. def self.handler @handler ||= Puppet::Network::Handler.handler(self.name) end # The class that handles xmlrpc interaction for us. def self.xmlrpc_client @xmlrpc_client ||= Puppet::Network::XMLRPCClient.handler_class(self.handler) end # Create our client. def initialize(hash) # to whom do we connect? @server = nil if hash.include?(:Cache) @cache = hash[:Cache] else @cache = true end driverparam = self.class.drivername if hash.include?(:Server) args = {:Server => hash[:Server]} @server = hash[:Server] args[:Port] = hash[:Port] || Puppet[:masterport] @driver = self.class.xmlrpc_client.new(args) self.read_cert - # We have to start the HTTP connection manually before we start - # sending it requests or keep-alive won't work. Note that with #1010, - # we don't currently actually want keep-alive. - @driver.start if @driver.respond_to? :start and Puppet::Network::HttpPool.keep_alive? - @local = false elsif hash.include?(driverparam) @driver = hash[driverparam] if @driver == true @driver = self.class.handler.new end @local = true else raise Puppet::Network::ClientError, "#{self.class} must be passed a Server or #{driverparam}" end end # Are we a local client? def local? if @local true else false end end # Make sure we set the driver up when we read the cert in. def recycle_connection @driver.recycle_connection if @driver.respond_to?(:recycle_connection) end # A wrapper method to run and then store the last run time def runnow if self.stopping Puppet.notice "In shutdown progress; skipping run" return end begin self.run self.lastrun = Time.now.to_i rescue => detail puts detail.backtrace if Puppet[:trace] Puppet.err "Could not run #{self.class}: #{detail}" end end def run raise Puppet::DevError, "Client type #{self.class} did not override run" end def scheduled? if sched = self.schedule return sched.match?(self.lastrun) else return true end end def shutdown if self.stopping Puppet.notice "Already in shutdown" else self.stopping = true Puppet::Util::Storage.store if self.respond_to? :running? and self.running? rmpidfile end end # Start listening for events. We're pretty much just listening for # timer events here. def start # Create our timer. Puppet will handle observing it and such. timer = Puppet.newtimer( :interval => Puppet[:runinterval], :tolerance => 1, :start? => true ) do begin self.runnow if self.scheduled? rescue => detail puts detail.backtrace if Puppet[:trace] Puppet.err "Could not run client; got otherwise uncaught exception: #{detail}" end end # Run once before we start following the timer self.runnow end require 'puppet/network/client/proxy' end diff --git a/lib/puppet/network/http_pool.rb b/lib/puppet/network/http_pool.rb index 7d227b4d4..d4ec48d22 100644 --- a/lib/puppet/network/http_pool.rb +++ b/lib/puppet/network/http_pool.rb @@ -1,104 +1,49 @@ require 'puppet/ssl/host' require 'net/https' require 'puppet/util/cacher' module Puppet::Network; end -# Manage Net::HTTP instances for keep-alive. module Puppet::Network::HttpPool - class << self - include Puppet::Util::Cacher - - private - - cached_attr(:http_cache) { Hash.new } - end - # Use the global localhost instance. def self.ssl_host Puppet::SSL::Host.localhost end - # 2008/03/23 - # LAK:WARNING: Enabling this has a high propability of - # causing corrupt files and who knows what else. See #1010. - HTTP_KEEP_ALIVE = false - - def self.keep_alive? - HTTP_KEEP_ALIVE - end - - # Clear our http cache, closing all connections. - def self.clear_http_instances - http_cache.each do |name, connection| - connection.finish if connection.started? - end - Puppet::Util::Cacher.expire - end - - # Make sure we set the driver up when we read the cert in. - def self.read_cert - if val = super # This calls read_cert from the Puppet::SSLCertificates::Support module. - # Clear out all of our connections, since they previously had no cert and now they - # should have them. - clear_http_instances - return val - else - return false - end - end - # Use cert information from a Puppet client to set up the http object. def self.cert_setup(http) # Just no-op if we don't have certs. return false unless FileTest.exist?(Puppet[:hostcert]) and FileTest.exist?(Puppet[:localcacert]) http.cert_store = ssl_host.ssl_store http.ca_file = Puppet[:localcacert] http.cert = ssl_host.certificate.content http.verify_mode = OpenSSL::SSL::VERIFY_PEER http.key = ssl_host.key.content end # Retrieve a cached http instance if caching is enabled, else return # a new one. def self.http_instance(host, port, reset = false) - # We overwrite the uninitialized @http here with a cached one. - key = "#{host}:#{port}" - - # Return our cached instance if we've got a cache, as long as we're not - # resetting the instance. - if keep_alive? - return http_cache[key] if ! reset and http_cache[key] - - # Clean up old connections if we have them. - if http = http_cache[key] - http_cache.delete(key) - http.finish if http.started? - end - end - args = [host, port] if Puppet[:http_proxy_host] == "none" args << nil << nil else args << Puppet[:http_proxy_host] << Puppet[:http_proxy_port] end http = Net::HTTP.new(*args) # Pop open the http client a little; older versions of Net::HTTP(s) didn't # give us a reader for ca_file... Grr... class << http; attr_accessor :ca_file; end http.use_ssl = true # Use configured timeout (#1176) http.read_timeout = Puppet[:configtimeout] http.open_timeout = Puppet[:configtimeout] cert_setup(http) - http_cache[key] = http if keep_alive? - http end end diff --git a/spec/unit/network/client_spec.rb b/spec/unit/network/client_spec.rb deleted file mode 100755 index 102a053c0..000000000 --- a/spec/unit/network/client_spec.rb +++ /dev/null @@ -1,45 +0,0 @@ -#!/usr/bin/env rspec -# -# Created by Luke Kanies on 2008-3-24. -# Copyright (c) 2008. All rights reserved. - -require 'spec_helper' - -require 'puppet/network/client' - -describe Puppet::Network::Client do - before do - Puppet.settings.stubs(:use).returns(true) - Puppet::Network::HttpPool.stubs(:cert_setup) - end - - describe "when keep-alive is enabled" do - before do - Puppet::Network::HttpPool.stubs(:keep_alive?).returns true - end - it "should start the http client up on creation" do - http = mock 'http' - http.stub_everything - http.expects(:start) - Net::HTTP.stubs(:new).returns http - - # Pick a random subclass... - Puppet::Network::Client.runner.new :Server => Puppet[:server] - end - end - - describe "when keep-alive is disabled" do - before do - Puppet::Network::HttpPool.stubs(:keep_alive?).returns false - end - it "should not start the http client up on creation" do - http = mock 'http' - http.stub_everything - http.expects(:start).never - Net::HTTP.stubs(:new).returns http - - # Pick a random subclass... - Puppet::Network::Client.runner.new :Server => Puppet[:server] - end - end -end diff --git a/spec/unit/network/http_pool_spec.rb b/spec/unit/network/http_pool_spec.rb index 32c7a90fe..1961f3029 100755 --- a/spec/unit/network/http_pool_spec.rb +++ b/spec/unit/network/http_pool_spec.rb @@ -1,206 +1,139 @@ #!/usr/bin/env rspec # # Created by Luke Kanies on 2007-11-26. # Copyright (c) 2007. All rights reserved. require 'spec_helper' require 'puppet/network/http_pool' describe Puppet::Network::HttpPool do after do - Puppet::Util::Cacher.expire - Puppet::Network::HttpPool.clear_http_instances Puppet::Network::HttpPool.instance_variable_set("@ssl_host", nil) end - it "should have keep-alive disabled" do - Puppet::Network::HttpPool::HTTP_KEEP_ALIVE.should be_false - end - it "should use the global SSL::Host instance to get its certificate information" do host = mock 'host' Puppet::SSL::Host.expects(:localhost).with.returns host Puppet::Network::HttpPool.ssl_host.should equal(host) end describe "when managing http instances" do def stub_settings(settings) settings.each do |param, value| Puppet.settings.stubs(:value).with(param).returns(value) end end before do # All of the cert stuff is tested elsewhere Puppet::Network::HttpPool.stubs(:cert_setup) end it "should return an http instance created with the passed host and port" do http = stub 'http', :use_ssl= => nil, :read_timeout= => nil, :open_timeout= => nil, :started? => false Net::HTTP.expects(:new).with("me", 54321, nil, nil).returns(http) Puppet::Network::HttpPool.http_instance("me", 54321).should equal(http) end it "should enable ssl on the http instance" do Puppet::Network::HttpPool.http_instance("me", 54321).instance_variable_get("@use_ssl").should be_true end it "should set the read timeout" do Puppet::Network::HttpPool.http_instance("me", 54321).read_timeout.should == 120 end it "should set the open timeout" do Puppet::Network::HttpPool.http_instance("me", 54321).open_timeout.should == 120 end it "should create the http instance with the proxy host and port set if the http_proxy is not set to 'none'" do stub_settings :http_proxy_host => "myhost", :http_proxy_port => 432, :configtimeout => 120 Puppet::Network::HttpPool.http_instance("me", 54321).open_timeout.should == 120 end - describe "and http keep-alive is enabled" do - before do - Puppet::Network::HttpPool.stubs(:keep_alive?).returns true - end - - it "should cache http instances" do - stub_settings :http_proxy_host => "myhost", :http_proxy_port => 432, :configtimeout => 120 - old = Puppet::Network::HttpPool.http_instance("me", 54321) - Puppet::Network::HttpPool.http_instance("me", 54321).should equal(old) - end - - it "should have a mechanism for getting a new http instance instead of the cached instance" do - stub_settings :http_proxy_host => "myhost", :http_proxy_port => 432, :configtimeout => 120 - old = Puppet::Network::HttpPool.http_instance("me", 54321) - Puppet::Network::HttpPool.http_instance("me", 54321, true).should_not equal(old) - end - - it "should close existing, open connections when requesting a new connection" do - stub_settings :http_proxy_host => "myhost", :http_proxy_port => 432, :configtimeout => 120 - old = Puppet::Network::HttpPool.http_instance("me", 54321) - old.expects(:started?).returns(true) - old.expects(:finish) - Puppet::Network::HttpPool.http_instance("me", 54321, true) - end - - it "should have a mechanism for clearing the http cache", :fails_on_windows => true do - stub_settings :http_proxy_host => "myhost", :http_proxy_port => 432, :configtimeout => 120 - old = Puppet::Network::HttpPool.http_instance("me", 54321) - Puppet::Network::HttpPool.http_instance("me", 54321).should equal(old) - old = Puppet::Network::HttpPool.http_instance("me", 54321) - Puppet::Network::HttpPool.clear_http_instances - Puppet::Network::HttpPool.http_instance("me", 54321).should_not equal(old) - end - - it "should close open http connections when clearing the cache" do - stub_settings :http_proxy_host => "myhost", :http_proxy_port => 432, :configtimeout => 120 - one = Puppet::Network::HttpPool.http_instance("me", 54321) - one.expects(:started?).returns(true) - one.expects(:finish).returns(true) - Puppet::Network::HttpPool.clear_http_instances - end - - it "should not close unopened http connections when clearing the cache" do - stub_settings :http_proxy_host => "myhost", :http_proxy_port => 432, :configtimeout => 120 - one = Puppet::Network::HttpPool.http_instance("me", 54321) - one.expects(:started?).returns(false) - one.expects(:finish).never - Puppet::Network::HttpPool.clear_http_instances - end - end - - describe "and http keep-alive is disabled" do - before do - Puppet::Network::HttpPool.stubs(:keep_alive?).returns false - end - - it "should not cache http instances" do - stub_settings :http_proxy_host => "myhost", :http_proxy_port => 432, :configtimeout => 120 - old = Puppet::Network::HttpPool.http_instance("me", 54321) - Puppet::Network::HttpPool.http_instance("me", 54321).should_not equal(old) - end - end - - after do - Puppet::Network::HttpPool.clear_http_instances + it "should not cache http instances" do + stub_settings :http_proxy_host => "myhost", :http_proxy_port => 432, :configtimeout => 120 + old = Puppet::Network::HttpPool.http_instance("me", 54321) + Puppet::Network::HttpPool.http_instance("me", 54321).should_not equal(old) end end describe "when adding certificate information to http instances" do before do @http = mock 'http' [:cert_store=, :verify_mode=, :ca_file=, :cert=, :key=].each { |m| @http.stubs(m) } @store = stub 'store' @cert = stub 'cert', :content => "real_cert" @key = stub 'key', :content => "real_key" @host = stub 'host', :certificate => @cert, :key => @key, :ssl_store => @store Puppet[:confdir] = "/sometthing/else" Puppet.settings.stubs(:value).returns "/some/file" Puppet.settings.stubs(:value).with(:hostcert).returns "/host/cert" Puppet.settings.stubs(:value).with(:localcacert).returns "/local/ca/cert" FileTest.stubs(:exist?).with("/host/cert").returns true FileTest.stubs(:exist?).with("/local/ca/cert").returns true Puppet::Network::HttpPool.stubs(:ssl_host).returns @host end after do Puppet.settings.clear end it "should do nothing if no host certificate is on disk" do FileTest.expects(:exist?).with("/host/cert").returns false @http.expects(:cert=).never Puppet::Network::HttpPool.cert_setup(@http) end it "should do nothing if no local certificate is on disk" do FileTest.expects(:exist?).with("/local/ca/cert").returns false @http.expects(:cert=).never Puppet::Network::HttpPool.cert_setup(@http) end it "should add a certificate store from the ssl host" do @http.expects(:cert_store=).with(@store) Puppet::Network::HttpPool.cert_setup(@http) end it "should add the client certificate" do @http.expects(:cert=).with("real_cert") Puppet::Network::HttpPool.cert_setup(@http) end it "should add the client key" do @http.expects(:key=).with("real_key") Puppet::Network::HttpPool.cert_setup(@http) end it "should set the verify mode to OpenSSL::SSL::VERIFY_PEER" do @http.expects(:verify_mode=).with(OpenSSL::SSL::VERIFY_PEER) Puppet::Network::HttpPool.cert_setup(@http) end it "should set the ca file" do Puppet.settings.stubs(:value).returns "/some/file" FileTest.stubs(:exist?).with(Puppet[:hostcert]).returns true Puppet.settings.stubs(:value).with(:localcacert).returns "/ca/cert/file" FileTest.stubs(:exist?).with("/ca/cert/file").returns true @http.expects(:ca_file=).with("/ca/cert/file") Puppet::Network::HttpPool.cert_setup(@http) end it "should set up certificate information when creating http instances" do Puppet::Network::HttpPool.expects(:cert_setup).with { |i| i.is_a?(Net::HTTP) } Puppet::Network::HttpPool.http_instance("one", "two") end end end diff --git a/test/network/server/webrick.rb b/test/network/server/webrick.rb index 624147b6c..9eed5d862 100755 --- a/test/network/server/webrick.rb +++ b/test/network/server/webrick.rb @@ -1,128 +1,127 @@ #!/usr/bin/env ruby require File.expand_path(File.dirname(__FILE__) + '/../../lib/puppettest') require 'puppettest' require 'puppet/network/http_server/webrick' require 'mocha' class TestWebrickServer < Test::Unit::TestCase include PuppetTest::ServerTest def setup Puppet::Util::SUIDManager.stubs(:asuser).yields super end def teardown super - Puppet::Network::HttpPool.clear_http_instances end # Make sure we can create a server, and that it knows how to create its # certs by default. def test_basics server = nil assert_raise(Puppet::Error, "server succeeded with no cert") do server = Puppet::Network::HTTPServer::WEBrick.new( :Port => @@port, :Handlers => { :Status => nil } ) end assert_nothing_raised("Could not create simple server") do server = Puppet::Network::HTTPServer::WEBrick.new( :Port => @@port, :Handlers => { :CA => {}, # so that certs autogenerate :Status => nil } ) end assert(server, "did not create server") assert(server.cert, "did not retrieve cert") end # test that we can connect to the server # we have to use fork here, because we apparently can't use threads # to talk to other threads def test_connect_with_fork Puppet[:autosign] = true serverpid, server = mk_status_server # create a status client, and verify it can talk client = mk_status_client assert(client.cert, "did not get cert for client") retval = nil assert_nothing_raised("Could not connect to server") { retval = client.status } assert_equal(1, retval) end def mk_status_client client = nil assert_nothing_raised { client = Puppet::Network::Client.status.new( :Server => "localhost", :Port => @@port ) } client end def mk_status_server server = nil Puppet[:certdnsnames] = "localhost" assert_nothing_raised { server = Puppet::Network::HTTPServer::WEBrick.new( :Port => @@port, :Handlers => { :CA => {}, # so that certs autogenerate :Status => nil } ) } pid = fork { Puppet.run_mode.stubs(:master?).returns true assert_nothing_raised { trap(:INT) { server.shutdown } server.start } } @@tmppids << pid [pid, server] end def kill_and_wait(pid, file) %x{kill -INT #{pid} 2>/dev/null} count = 0 while count < 30 && File::exist?(file) count += 1 sleep(1) end assert(count < 30, "Killing server #{pid} failed") end end diff --git a/test/ral/type/filesources.rb b/test/ral/type/filesources.rb index 3363aafb3..f39d53907 100755 --- a/test/ral/type/filesources.rb +++ b/test/ral/type/filesources.rb @@ -1,507 +1,506 @@ #!/usr/bin/env ruby require File.expand_path(File.dirname(__FILE__) + '/../../lib/puppettest') require 'puppettest' require 'puppettest/support/utils' require 'cgi' require 'fileutils' require 'mocha' class TestFileSources < Test::Unit::TestCase include PuppetTest::Support::Utils include PuppetTest::FileTesting def setup super if defined?(@port) @port += 1 else @port = 12345 end @file = Puppet::Type.type(:file) Puppet[:filetimeout] = -1 Puppet::Util::SUIDManager.stubs(:asuser).yields Facter.stubs(:to_hash).returns({}) end def teardown super - Puppet::Network::HttpPool.clear_http_instances end def use_storage initstorage rescue system("rm -rf #{Puppet[:statefile]}") end def initstorage Puppet::Util::Storage.init Puppet::Util::Storage.load end # Make a simple recursive tree. def mk_sourcetree source = tempfile sourcefile = File.join(source, "file") Dir.mkdir source File.open(sourcefile, "w") { |f| f.puts "yay" } dest = tempfile destfile = File.join(dest, "file") return source, dest, sourcefile, destfile end def recursive_source_test(fromdir, todir) initstorage tofile = nil trans = nil tofile = Puppet::Type.type(:file).new( :path => todir, :recurse => true, :backup => false, :source => fromdir ) catalog = mk_catalog(tofile) catalog.apply assert(FileTest.exists?(todir), "Created dir #{todir} does not exist") end def run_complex_sources(networked = false) path = tempfile # first create the source directory FileUtils.mkdir_p path # okay, let's create a directory structure fromdir = File.join(path,"fromdir") Dir.mkdir(fromdir) FileUtils.cd(fromdir) { File.open("one", "w") { |f| f.puts "onefile"} File.open("two", "w") { |f| f.puts "twofile"} } todir = File.join(path, "todir") source = fromdir source = "puppet://localhost/#{networked}#{fromdir}" if networked recursive_source_test(source, todir) [fromdir,todir, File.join(todir, "one"), File.join(todir, "two")] end def test_complex_sources_twice fromdir, todir, one, two = run_complex_sources assert_trees_equal(fromdir,todir) recursive_source_test(fromdir, todir) assert_trees_equal(fromdir,todir) # Now remove the whole tree and try it again. [one, two].each do |f| File.unlink(f) end Dir.rmdir(todir) recursive_source_test(fromdir, todir) assert_trees_equal(fromdir,todir) end def test_sources_with_deleted_destfiles fromdir, todir, one, two = run_complex_sources assert(FileTest.exists?(todir)) # then delete a file File.unlink(two) # and run recursive_source_test(fromdir, todir) assert(FileTest.exists?(two), "Deleted file was not recopied") # and make sure they're still equal assert_trees_equal(fromdir,todir) end def test_sources_with_readonly_destfiles fromdir, todir, one, two = run_complex_sources assert(FileTest.exists?(todir)) File.chmod(0600, one) recursive_source_test(fromdir, todir) # and make sure they're still equal assert_trees_equal(fromdir,todir) # Now try it with the directory being read-only File.chmod(0111, todir) recursive_source_test(fromdir, todir) # and make sure they're still equal assert_trees_equal(fromdir,todir) end def test_sources_with_modified_dest_files fromdir, todir, one, two = run_complex_sources assert(FileTest.exists?(todir)) # Modify a dest file File.open(two, "w") { |f| f.puts "something else" } recursive_source_test(fromdir, todir) # and make sure they're still equal assert_trees_equal(fromdir,todir) end def test_sources_with_added_destfiles fromdir, todir = run_complex_sources assert(FileTest.exists?(todir)) # and finally, add some new files add_random_files(todir) recursive_source_test(fromdir, todir) fromtree = file_list(fromdir) totree = file_list(todir) assert(fromtree != totree, "Trees are incorrectly equal") # then remove our new files FileUtils.cd(todir) { %x{find . 2>/dev/null}.chomp.split(/\n/).each { |file| if file =~ /file[0-9]+/ FileUtils.rm_rf(file) end } } # and make sure they're still equal assert_trees_equal(fromdir,todir) end # Make sure added files get correctly caught during recursion def test_RecursionWithAddedFiles basedir = tempfile Dir.mkdir(basedir) @@tmpfiles << basedir file1 = File.join(basedir, "file1") file2 = File.join(basedir, "file2") subdir1 = File.join(basedir, "subdir1") file3 = File.join(subdir1, "file") File.open(file1, "w") { |f| f.puts "yay" } rootobj = nil assert_nothing_raised { rootobj = Puppet::Type.type(:file).new( :name => basedir, :recurse => true, :check => %w{type owner}, :mode => 0755 ) } assert_apply(rootobj) assert_equal(0755, filemode(file1)) File.open(file2, "w") { |f| f.puts "rah" } assert_apply(rootobj) assert_equal(0755, filemode(file2)) Dir.mkdir(subdir1) File.open(file3, "w") { |f| f.puts "foo" } assert_apply(rootobj) assert_equal(0755, filemode(file3)) end def mkfileserverconf(mounts) file = tempfile File.open(file, "w") { |f| mounts.each { |path, name| f.puts "[#{name}]\n\tpath #{path}\n\tallow *\n" } } @@tmpfiles << file file end def test_unmountedNetworkSources server = nil mounts = { "/" => "root", "/noexistokay" => "noexist" } fileserverconf = mkfileserverconf(mounts) Puppet[:autosign] = true Puppet[:masterport] = @port Puppet[:certdnsnames] = "localhost" serverpid = nil assert_nothing_raised("Could not start on port #{@port}") { server = Puppet::Network::HTTPServer::WEBrick.new( :Port => @port, :Handlers => { :CA => {}, # so that certs autogenerate :FileServer => { :Config => fileserverconf } } ) } serverpid = fork { assert_nothing_raised { #trap(:INT) { server.shutdown; Kernel.exit! } trap(:INT) { server.shutdown } server.start } } @@tmppids << serverpid sleep(1) name = File.join(tmpdir, "nosourcefile") file = Puppet::Type.type(:file).new( :source => "puppet://localhost/noexist/file", :name => name ) assert_raise Puppet::Error do file.retrieve end comp = mk_catalog(file) comp.apply assert(!FileTest.exists?(name), "File with no source exists anyway") end def test_sourcepaths files = [] 3.times { files << tempfile } to = tempfile File.open(files[-1], "w") { |f| f.puts "yee-haw" } file = nil assert_nothing_raised { file = Puppet::Type.type(:file).new( :name => to, :source => files ) } comp = mk_catalog(file) assert_events([:file_created], comp) assert(File.exists?(to), "File does not exist") txt = nil File.open(to) { |f| txt = f.read.chomp } assert_equal("yee-haw", txt, "Contents do not match") end # Make sure that source-copying updates the checksum on the same run def test_sourcebeatsensure source = tempfile dest = tempfile File.open(source, "w") { |f| f.puts "yay" } file = nil assert_nothing_raised { file = Puppet::Type.type(:file).new( :name => dest, :ensure => "file", :source => source ) } file.retrieve assert_events([:file_created], file) file.retrieve assert_events([], file) assert_events([], file) end def test_sourcewithlinks source = tempfile link = tempfile dest = tempfile File.open(source, "w") { |f| f.puts "yay" } File.symlink(source, link) file = Puppet::Type.type(:file).new(:name => dest, :source => link) catalog = mk_catalog(file) # Default to managing links catalog.apply assert(FileTest.symlink?(dest), "Did not create link") # Now follow the links file[:links] = :follow catalog.apply assert(FileTest.file?(dest), "Destination is not a file") end # Make sure files aren't replaced when replace is false, but otherwise # are. def test_replace dest = tempfile file = Puppet::Type.newfile( :path => dest, :content => "foobar", :recurse => true ) assert_apply(file) File.open(dest, "w") { |f| f.puts "yayness" } file[:replace] = false assert_apply(file) # Make sure it doesn't change. assert_equal("yayness\n", File.read(dest), "File got replaced when :replace was false") file[:replace] = true assert_apply(file) # Make sure it changes. assert_equal("foobar", File.read(dest), "File was not replaced when :replace was true") end def test_sourceselect dest = tempfile sources = [] 2.times { |i| i = i + 1 source = tempfile sources << source file = File.join(source, "file#{i}") Dir.mkdir(source) File.open(file, "w") { |f| f.print "yay" } } file1 = File.join(dest, "file1") file2 = File.join(dest, "file2") file3 = File.join(dest, "file3") # Now make different files with the same name in each source dir sources.each_with_index do |source, i| File.open(File.join(source, "file3"), "w") { |f| f.print i.to_s } end obj = Puppet::Type.newfile( :path => dest, :recurse => true, :source => sources) assert_equal(:first, obj[:sourceselect], "sourceselect has the wrong default") # First, make sure we default to just copying file1 assert_apply(obj) assert(FileTest.exists?(file1), "File from source 1 was not copied") assert(! FileTest.exists?(file2), "File from source 2 was copied") assert(FileTest.exists?(file3), "File from source 1 was not copied") assert_equal("0", File.read(file3), "file3 got wrong contents") # Now reset sourceselect assert_nothing_raised do obj[:sourceselect] = :all end File.unlink(file1) File.unlink(file3) Puppet.err :yay assert_apply(obj) assert(FileTest.exists?(file1), "File from source 1 was not copied") assert(FileTest.exists?(file2), "File from source 2 was copied") assert(FileTest.exists?(file3), "File from source 1 was not copied") assert_equal("0", File.read(file3), "file3 got wrong contents") end def test_recursive_sourceselect dest = tempfile source1 = tempfile source2 = tempfile files = [] [source1, source2, File.join(source1, "subdir"), File.join(source2, "subdir")].each_with_index do |dir, i| Dir.mkdir(dir) # Make a single file in each directory file = File.join(dir, "file#{i}") File.open(file, "w") { |f| f.puts "yay#{i}"} # Now make a second one in each directory file = File.join(dir, "second-file#{i}") File.open(file, "w") { |f| f.puts "yaysecond-#{i}"} files << file end obj = Puppet::Type.newfile(:path => dest, :source => [source1, source2], :sourceselect => :all, :recurse => true) assert_apply(obj) ["file0", "file1", "second-file0", "second-file1", "subdir/file2", "subdir/second-file2", "subdir/file3", "subdir/second-file3"].each do |file| path = File.join(dest, file) assert(FileTest.exists?(path), "did not create #{file}") assert_equal("yay#{File.basename(file).sub("file", '')}\n", File.read(path), "file was not copied correctly") end end # #594 def test_purging_missing_remote_files source = tempfile dest = tempfile s1 = File.join(source, "file1") s2 = File.join(source, "file2") d1 = File.join(dest, "file1") d2 = File.join(dest, "file2") Dir.mkdir(source) [s1, s2].each { |name| File.open(name, "w") { |file| file.puts "something" } } # We have to add a second parameter, because that's the only way to expose the "bug". file = Puppet::Type.newfile(:path => dest, :source => source, :recurse => true, :purge => true, :mode => "755") assert_apply(file) assert(FileTest.exists?(d1), "File1 was not copied") assert(FileTest.exists?(d2), "File2 was not copied") File.unlink(s2) assert_apply(file) assert(FileTest.exists?(d1), "File1 was not kept") assert(! FileTest.exists?(d2), "File2 was not purged") end end