diff --git a/benchmarks/evaluations/benchmarker.rb b/benchmarks/evaluations/benchmarker.rb new file mode 100644 index 000000000..782c5bd5e --- /dev/null +++ b/benchmarks/evaluations/benchmarker.rb @@ -0,0 +1,86 @@ +require 'erb' +require 'ostruct' +require 'fileutils' +require 'json' + +class Benchmarker + include FileUtils + + def initialize(target, size) + @target = target + @size = size + @micro_benchmarks = {} + @parsecount = 100 + @evalcount = 100 + end + + def setup + require 'puppet' + require 'puppet/pops' + config = File.join(@target, 'puppet.conf') + Puppet.initialize_settings(['--config', config]) + manifests = File.join('benchmarks', 'evaluations', 'manifests') + Dir.foreach(manifests) do |f| + if f =~ /^(.*)\.pp$/ + @micro_benchmarks[$1] = File.read(File.join(manifests, f)) + end + end + # Run / Evaluate the common puppet logic + @env = Puppet.lookup(:environments).get('benchmarking') + @node = Puppet::Node.new("testing", :environment => @env) + @parser = Puppet::Pops::Parser::EvaluatingParser::Transitional.new + @compiler = Puppet::Parser::Compiler.new(@node) + @scope = @compiler.topscope + + # Perform a portion of what a compile does (just enough to evaluate the site.pp logic) + @compiler.catalog.environment_instance = @compiler.environment + @compiler.send(:evaluate_main) + end + + def run + measurements = [] + @micro_benchmarks.each do |name, source| + measurements << Benchmark.measure("#{name} parse") do + 1..@parsecount.times { @parser.parse_string(source, name) } + end + model = @parser.parse_string(source, name) + measurements << Benchmark.measure("#{name} eval") do + 1..@evalcount.times do + begin + # Run each in a local scope + scope_memo = @scope.ephemeral_level + @scope.new_ephemeral(true) + @parser.evaluate(@scope, model) + ensure + # Toss the created local scope + @scope.unset_ephemeral_var(scope_memo) + end + end + end + end + measurements + end + + def generate + environment = File.join(@target, 'environments', 'benchmarking') + templates = File.join('benchmarks', 'evaluations') + + mkdir_p(File.join(environment, 'modules')) + mkdir_p(File.join(environment, 'manifests')) + + render(File.join(templates, 'site.pp.erb'), + File.join(environment, 'manifests', 'site.pp'),{}) + + render(File.join(templates, 'puppet.conf.erb'), + File.join(@target, 'puppet.conf'), + :location => @target) + end + + def render(erb_file, output_file, bindings) + site = ERB.new(File.read(erb_file)) + File.open(output_file, 'w') do |fh| + fh.write(site.result(OpenStruct.new(bindings).instance_eval { binding })) + end + end + +end diff --git a/benchmarks/evaluations/description b/benchmarks/evaluations/description new file mode 100644 index 000000000..812037e6d --- /dev/null +++ b/benchmarks/evaluations/description @@ -0,0 +1,13 @@ +Benchmark scenario: evaluates a select set of time critical expressions +Benchmark target: measuring individual use cases of evaluation +Parser: Future + +Evaluations: +* fcall_3x - calls sprintf 20x times +* fcall_4x - calls assert_type 20x times (is heavier than sprintf, have no similar simple 4x function) +* interpolation - does 20x interpolations of variying length +* var_absolute - references a top scope variable 20x times with absolute reference +* var_relative - references a top scope variable 20x times with non absolute reference +* var_class_absolute - references a class variable 20x times with absolute reference +* var_class_relative - references a class variable 20x times with non absolute reference + diff --git a/benchmarks/evaluations/manifests/fcall_3x.pp b/benchmarks/evaluations/manifests/fcall_3x.pp new file mode 100644 index 000000000..3b2e079b1 --- /dev/null +++ b/benchmarks/evaluations/manifests/fcall_3x.pp @@ -0,0 +1,6 @@ +$tmp = [ +sprintf('%s',a), sprintf('%s',a), sprintf('%s',a), sprintf('%s',a), sprintf('%s',a), +sprintf('%s',a), sprintf('%s',a), sprintf('%s',a), sprintf('%s',a), sprintf('%s',a), +sprintf('%s',a), sprintf('%s',a), sprintf('%s',a), sprintf('%s',a), sprintf('%s',a), +sprintf('%s',a), sprintf('%s',a), sprintf('%s',a), sprintf('%s',a), sprintf('%s',a), +] diff --git a/benchmarks/evaluations/manifests/fcall_4x.pp b/benchmarks/evaluations/manifests/fcall_4x.pp new file mode 100644 index 000000000..0be757281 --- /dev/null +++ b/benchmarks/evaluations/manifests/fcall_4x.pp @@ -0,0 +1,6 @@ +$tmp = [ +assert_type(Integer,1), assert_type(Integer,1), assert_type(Integer,1), assert_type(Integer,1), assert_type(Integer,1), +assert_type(Integer,1), assert_type(Integer,1), assert_type(Integer,1), assert_type(Integer,1), assert_type(Integer,1), +assert_type(Integer,1), assert_type(Integer,1), assert_type(Integer,1), assert_type(Integer,1), assert_type(Integer,1), +assert_type(Integer,1), assert_type(Integer,1), assert_type(Integer,1), assert_type(Integer,1), assert_type(Integer,1), +] diff --git a/benchmarks/evaluations/manifests/interpolation.pp b/benchmarks/evaluations/manifests/interpolation.pp new file mode 100644 index 000000000..817770146 --- /dev/null +++ b/benchmarks/evaluations/manifests/interpolation.pp @@ -0,0 +1,11 @@ +$tmp = [ "...$x...", + "...$x...$x", + "...$x...$x...", + "...$x...$x...$x...", + "...$x...$x...$x...$x", + "...$x...$x...$x...$x...", + "...$x...$x...$x...$x...$x", + "...$x...$x...$x...$x...$x...", + "...$x...$x...$x...$x...$x...$x", + "...$x...$x...$x...$x...$x...$x...", +] \ No newline at end of file diff --git a/benchmarks/evaluations/manifests/var_absolute.pp b/benchmarks/evaluations/manifests/var_absolute.pp new file mode 100644 index 000000000..ed4731817 --- /dev/null +++ b/benchmarks/evaluations/manifests/var_absolute.pp @@ -0,0 +1,3 @@ +$tmp = [ $::x, $::x, $::x, $::x, $::x, $::x, $::x, $::x, $::x, $::x, + $::x, $::x, $::x, $::x, $::x, $::x, $::x, $::x, $::x, $::x, +] diff --git a/benchmarks/evaluations/manifests/var_class_absolute.pp b/benchmarks/evaluations/manifests/var_class_absolute.pp new file mode 100644 index 000000000..e86940f6e --- /dev/null +++ b/benchmarks/evaluations/manifests/var_class_absolute.pp @@ -0,0 +1,5 @@ +$tmp = [ $::testing::param_a, $::testing::param_a, $::testing::param_a, $::testing::param_a, $::testing::param_a, + $::testing::param_a, $::testing::param_a, $::testing::param_a, $::testing::param_a, $::testing::param_a, + $::testing::param_a, $::testing::param_a, $::testing::param_a, $::testing::param_a, $::testing::param_a, + $::testing::param_a, $::testing::param_a, $::testing::param_a, $::testing::param_a, $::testing::param_a, +] diff --git a/benchmarks/evaluations/manifests/var_class_relative.pp b/benchmarks/evaluations/manifests/var_class_relative.pp new file mode 100644 index 000000000..3048db6ad --- /dev/null +++ b/benchmarks/evaluations/manifests/var_class_relative.pp @@ -0,0 +1,5 @@ +$tmp = [ $testing::param_a, $testing::param_a, $testing::param_a, $testing::param_a, $testing::param_a, + $testing::param_a, $testing::param_a, $testing::param_a, $testing::param_a, $testing::param_a, + $testing::param_a, $testing::param_a, $testing::param_a, $testing::param_a, $testing::param_a, + $testing::param_a, $testing::param_a, $testing::param_a, $testing::param_a, $testing::param_a, +] diff --git a/benchmarks/evaluations/manifests/var_relative.pp b/benchmarks/evaluations/manifests/var_relative.pp new file mode 100644 index 000000000..3fba7b6ef --- /dev/null +++ b/benchmarks/evaluations/manifests/var_relative.pp @@ -0,0 +1,3 @@ +$tmp = [$x, $x, $x, $x, $x, $x, $x, $x, $x, $x, + $x, $x, $x, $x, $x, $x, $x, $x, $x, $x, +] diff --git a/benchmarks/evaluations/puppet.conf.erb b/benchmarks/evaluations/puppet.conf.erb new file mode 100644 index 000000000..d9f1db9df --- /dev/null +++ b/benchmarks/evaluations/puppet.conf.erb @@ -0,0 +1,7 @@ +confdir = <%= location %> +vardir = <%= location %> +environmentpath = <%= File.join(location, 'environments') %> +environment_timeout = '0' +parser = future +strict_variables = true + diff --git a/benchmarks/evaluations/site.pp.erb b/benchmarks/evaluations/site.pp.erb new file mode 100644 index 000000000..05cc8d8c5 --- /dev/null +++ b/benchmarks/evaluations/site.pp.erb @@ -0,0 +1,10 @@ +# Common setup done once for all micro benchmarks +# +class testing { + $param_a = 10 + $param_b = 20 +} +include testing +$x = 'aaaaaaaa' + + diff --git a/tasks/benchmark.rake b/tasks/benchmark.rake index 7456c5c0a..99bca2182 100644 --- a/tasks/benchmark.rake +++ b/tasks/benchmark.rake @@ -1,109 +1,132 @@ require 'benchmark' require 'tmpdir' require 'csv' namespace :benchmark do def generate_scenario_tasks(location, name) desc File.read(File.join(location, 'description')) task name => "#{name}:run" namespace name do task :setup do ENV['ITERATIONS'] ||= '10' ENV['SIZE'] ||= '100' ENV['TARGET'] ||= Dir.mktmpdir(name) ENV['TARGET'] = File.expand_path(ENV['TARGET']) mkdir_p(ENV['TARGET']) require File.expand_path(File.join(location, 'benchmarker.rb')) @benchmark = Benchmarker.new(ENV['TARGET'], ENV['SIZE'].to_i) end task :generate => :setup do @benchmark.generate @benchmark.setup end desc "Run the #{name} scenario." task :run => :generate do format = if RUBY_VERSION =~ /^1\.8/ Benchmark::FMTSTR else Benchmark::FORMAT end report = [] + details = [] Benchmark.benchmark(Benchmark::CAPTION, 10, format, "> total:", "> avg:") do |b| times = [] ENV['ITERATIONS'].to_i.times do |i| start_time = Time.now.to_i times << b.report("Run #{i + 1}") do - @benchmark.run + details << @benchmark.run end report << [to_millis(start_time), to_millis(times.last.real), 200, true, name] end sum = times.inject(Benchmark::Tms.new, &:+) [sum, sum / times.length] end write_csv("#{name}.samples", %w{timestamp elapsed responsecode success name}, report) + + # report details, if any were produced + if details[0].is_a?(Array) && details[0][0].is_a?(Benchmark::Tms) + # assume all entries are Tms if the first is + # turn each into a hash of label => tms (since labels are lost when doing arithmetic on Tms) + hashed = details.reduce([]) do |memo, measures| + memo << measures.reduce({}) {|memo2, measure| memo2[measure.label] = measure; memo2} + memo + end + # sum across all hashes + result = {} + + hashed_totals = hashed.reduce {|memo, h| memo.merge(h) {|k, old, new| old + new }} + # average the totals + hashed_totals.keys.each {|k| hashed_totals[k] /= details.length } + min_width = 14 + max_width = (hashed_totals.keys.map(&:length) << min_width).max + puts "\n" + puts sprintf("%2$*1$s %3$s", -max_width, 'Details (avg)', " user system total real") + puts "-" * (46 + max_width) + hashed_totals.sort.each {|k,v| puts sprintf("%2$*1$s %3$s", -max_width, k, v.format) } + end end desc "Profile a single run of the #{name} scenario." task :profile => :generate do require 'ruby-prof' result = RubyProf.profile do @benchmark.run end printer = RubyProf::CallTreePrinter.new(result) File.open(File.join("callgrind.#{name}.#{Time.now.to_i}.trace"), "w") do |f| printer.print(f) end end def to_millis(seconds) (seconds * 1000).round end def write_csv(file, header, data) CSV.open(file, 'w') do |csv| csv << header data.each do |line| csv << line end end end end end scenarios = [] Dir.glob('benchmarks/*') do |location| name = File.basename(location) scenarios << name generate_scenario_tasks(location, File.basename(location)) end namespace :all do desc "Profile all of the scenarios. (#{scenarios.join(', ')})" task :profile do scenarios.each do |name| sh "rake benchmark:#{name}:profile" end end desc "Run all of the scenarios. (#{scenarios.join(', ')})" task :run do scenarios.each do |name| sh "rake benchmark:#{name}:run" end end end end