diff --git a/benchmarks/evaluations/benchmarker.rb b/benchmarks/evaluations/benchmarker.rb index 968786501..3bf94d6dd 100644 --- a/benchmarks/evaluations/benchmarker.rb +++ b/benchmarks/evaluations/benchmarker.rb @@ -1,134 +1,140 @@ require 'erb' require 'ostruct' require 'fileutils' require 'json' class Benchmarker include FileUtils def initialize(target, size) @target = target @size = size @micro_benchmarks = {} @parsecount = 100 @evalcount = 100 end def setup require 'puppet' require 'puppet/pops' config = File.join(@target, 'puppet.conf') Puppet.initialize_settings(['--config', config]) manifests = File.join('benchmarks', 'evaluations', 'manifests') Dir.foreach(manifests) do |f| if f =~ /^(.*)\.pp$/ @micro_benchmarks[$1] = File.read(File.join(manifests, f)) end end # Run / Evaluate the common puppet logic @env = Puppet.lookup(:environments).get('benchmarking') @node = Puppet::Node.new("testing", :environment => @env) @parser = Puppet::Pops::Parser::EvaluatingParser::Transitional.new @compiler = Puppet::Parser::Compiler.new(@node) @scope = @compiler.topscope # Perform a portion of what a compile does (just enough to evaluate the site.pp logic) @compiler.catalog.environment_instance = @compiler.environment @compiler.send(:evaluate_main) # Then pretend we are running as part of a compilation Puppet.push_context(@compiler.context_overrides, "Benchmark masquerading as compiler configured context") end - def run(args = {:detail=>'all'}) - details = args[:detail] + def run(args = {}) + details = args[:detail] || 'all' measurements = [] @micro_benchmarks.each do |name, source| # skip if all but the wanted if a single benchmark is wanted - next unless details == 'all' || details == name - measurements << Benchmark.measure("#{name} parse") do - 1..@parsecount.times { @parser.parse_string(source, name) } + next unless details == 'all' || match = details.match(/#{name}(?:[\._\s](parse|eval))?$/) + # if name ends with .parse or .eval only do that part, else do both parts + ending = match ? match[1] : nil # parse, eval or nil ending + unless ending == 'eval' + measurements << Benchmark.measure("#{name} parse") do + 1..@parsecount.times { @parser.parse_string(source, name) } + end end - model = @parser.parse_string(source, name) - measurements << Benchmark.measure("#{name} eval") do - 1..@evalcount.times do - begin - # Run each in a local scope - scope_memo = @scope.ephemeral_level - @scope.new_ephemeral(true) - @parser.evaluate(@scope, model) - ensure - # Toss the created local scope - @scope.unset_ephemeral_var(scope_memo) + unless ending == 'parse' + model = @parser.parse_string(source, name) + measurements << Benchmark.measure("#{name} eval") do + 1..@evalcount.times do + begin + # Run each in a local scope + scope_memo = @scope.ephemeral_level + @scope.new_ephemeral(true) + @parser.evaluate(@scope, model) + ensure + # Toss the created local scope + @scope.unset_ephemeral_var(scope_memo) + end end end end end measurements end def generate environment = File.join(@target, 'environments', 'benchmarking') templates = File.join('benchmarks', 'evaluations') mkdir_p(File.join(environment, 'modules')) mkdir_p(File.join(environment, 'manifests')) render(File.join(templates, 'site.pp.erb'), File.join(environment, 'manifests', 'site.pp'),{}) render(File.join(templates, 'puppet.conf.erb'), File.join(@target, 'puppet.conf'), :location => @target) # Generate one module with a 3x function and a 4x function (namespaces) module_name = "module1" module_base = File.join(environment, 'modules', module_name) manifests = File.join(module_base, 'manifests') mkdir_p(manifests) functions_3x = File.join(module_base, 'lib', 'puppet', 'parser', 'functions') functions_4x = File.join(module_base, 'lib', 'puppet', 'functions') mkdir_p(functions_3x) mkdir_p(functions_4x) File.open(File.join(module_base, 'metadata.json'), 'w') do |f| JSON.dump({ "types" => [], "source" => "", "author" => "Evaluations Benchmark", "license" => "Apache 2.0", "version" => "1.0.0", "description" => "Evaluations Benchmark module 1", "summary" => "Module with supporting logic for evaluations benchmark", "dependencies" => [], }, f) end render(File.join(templates, 'module', 'init.pp.erb'), File.join(manifests, 'init.pp'), :name => module_name) render(File.join(templates, 'module', 'func3.rb.erb'), File.join(functions_3x, 'func3.rb'), :name => module_name) # namespaced function mkdir_p(File.join(functions_4x, module_name)) render(File.join(templates, 'module', 'module1_func4.rb.erb'), File.join(functions_4x, module_name, 'func4.rb'), :name => module_name) # non namespaced render(File.join(templates, 'module', 'func4.rb.erb'), File.join(functions_4x, 'func4.rb'), :name => module_name) end def render(erb_file, output_file, bindings) site = ERB.new(File.read(erb_file)) File.open(output_file, 'w') do |fh| fh.write(site.result(OpenStruct.new(bindings).instance_eval { binding })) end end end diff --git a/benchmarks/evaluations/benchmarker_task.rb b/benchmarks/evaluations/benchmarker_task.rb new file mode 100644 index 000000000..d38ed1088 --- /dev/null +++ b/benchmarks/evaluations/benchmarker_task.rb @@ -0,0 +1,11 @@ +# Helper class that is used by the Rake task generator. +# Currently only supports defining arguments that are passed to run +# (The rake task generator always passes :warm_up_runs as an Integer when profiling). +# Other benchmarks, and for regular runs that wants arguments must specified them +# as an Array of symbols. +# +class BenchmarkerTask + def self.run_args + [:detail] + end +end \ No newline at end of file diff --git a/tasks/benchmark.rake b/tasks/benchmark.rake index e42d850da..69c0fc27c 100644 --- a/tasks/benchmark.rake +++ b/tasks/benchmark.rake @@ -1,137 +1,144 @@ require 'benchmark' require 'tmpdir' require 'csv' namespace :benchmark do def generate_scenario_tasks(location, name) desc File.read(File.join(location, 'description')) task name => "#{name}:run" + # Load a BenchmarkerTask to handle config of the benchmark + task_handler_file = File.expand_path(File.join(location, 'benchmarker_task.rb')) + if File.exist?(task_handler_file) + require task_handler_file + run_args = BenchmarkerTask.run_args + else + run_args = [] + end namespace name do task :setup do ENV['ITERATIONS'] ||= '10' ENV['SIZE'] ||= '100' ENV['TARGET'] ||= Dir.mktmpdir(name) ENV['TARGET'] = File.expand_path(ENV['TARGET']) mkdir_p(ENV['TARGET']) require File.expand_path(File.join(location, 'benchmarker.rb')) @benchmark = Benchmarker.new(ENV['TARGET'], ENV['SIZE'].to_i) end task :generate => :setup do @benchmark.generate @benchmark.setup end desc "Run the #{name} scenario." - task :run => :generate do + task :run, [*run_args] => :generate do |_, args| format = if RUBY_VERSION =~ /^1\.8/ Benchmark::FMTSTR else Benchmark::FORMAT end - report = [] details = [] Benchmark.benchmark(Benchmark::CAPTION, 10, format, "> total:", "> avg:") do |b| times = [] ENV['ITERATIONS'].to_i.times do |i| start_time = Time.now.to_i times << b.report("Run #{i + 1}") do - details << @benchmark.run + details << @benchmark.run(args) end report << [to_millis(start_time), to_millis(times.last.real), 200, true, name] end sum = times.inject(Benchmark::Tms.new, &:+) [sum, sum / times.length] end write_csv("#{name}.samples", %w{timestamp elapsed responsecode success name}, report) # report details, if any were produced if details[0].is_a?(Array) && details[0][0].is_a?(Benchmark::Tms) # assume all entries are Tms if the first is # turn each into a hash of label => tms (since labels are lost when doing arithmetic on Tms) hashed = details.reduce([]) do |memo, measures| memo << measures.reduce({}) {|memo2, measure| memo2[measure.label] = measure; memo2} memo end # sum across all hashes result = {} hashed_totals = hashed.reduce {|memo, h| memo.merge(h) {|k, old, new| old + new }} # average the totals hashed_totals.keys.each {|k| hashed_totals[k] /= details.length } min_width = 14 max_width = (hashed_totals.keys.map(&:length) << min_width).max puts "\n" puts sprintf("%2$*1$s %3$s", -max_width, 'Details (avg)', " user system total real") puts "-" * (46 + max_width) hashed_totals.sort.each {|k,v| puts sprintf("%2$*1$s %3$s", -max_width, k, v.format) } end end desc "Profile a single run of the #{name} scenario." - task :profile, [:warm_up_runs, :detail] => :generate do |_, args| + task :profile, [:warm_up_runs, *run_args] => :generate do |_, args| warm_up_runs = (args[:warm_up_runs] || '0').to_i warm_up_runs.times do @benchmark.run(args) end require 'ruby-prof' result = RubyProf.profile do @benchmark.run(args) end printer = RubyProf::CallTreePrinter.new(result) File.open(File.join("callgrind.#{name}.#{Time.now.to_i}.trace"), "w") do |f| printer.print(f) end end def to_millis(seconds) (seconds * 1000).round end def write_csv(file, header, data) CSV.open(file, 'w') do |csv| csv << header data.each do |line| csv << line end end end end end scenarios = [] Dir.glob('benchmarks/*') do |location| name = File.basename(location) scenarios << name generate_scenario_tasks(location, File.basename(location)) end namespace :all do desc "Profile all of the scenarios. (#{scenarios.join(', ')})" task :profile do scenarios.each do |name| sh "rake benchmark:#{name}:profile" end end desc "Run all of the scenarios. (#{scenarios.join(', ')})" task :run do scenarios.each do |name| sh "rake benchmark:#{name}:run" end end end end