png_conform 0.1.1 → 0.1.2

This diff represents the content of publicly available package versions that have been released to one of the supported registries. The information contained in this diff is provided for informational purposes only and reflects changes between package versions as they appear in their respective public registries.
@@ -0,0 +1,204 @@
1
+ # frozen_string_literal: true
2
+
3
+ require_relative "png_conform_runner"
4
+ require_relative "pngcheck_runner"
5
+ require_relative "metrics_collector"
6
+ require_relative "report_generator"
7
+ require "yaml"
8
+
9
+ # Main orchestrator for benchmark execution.
10
+ #
11
+ # Coordinates file discovery, tool execution, metrics collection,
12
+ # and report generation for comparing PNG validation tools.
13
+ class BenchmarkRunner
14
+ attr_reader :config, :metrics_collector, :runners
15
+
16
+ def initialize(config = {})
17
+ @config = deep_merge(default_config, config)
18
+ @metrics_collector = MetricsCollector.new
19
+ @runners = initialize_runners
20
+ end
21
+
22
+ # Run the complete benchmark suite.
23
+ #
24
+ # @return [Hash] Benchmark results
25
+ def run
26
+ puts "Initializing benchmark..."
27
+ validate_tools
28
+
29
+ files = discover_files
30
+ puts "Found #{files.size} PNG files to test"
31
+
32
+ return { error: "No files found matching pattern" } if files.empty?
33
+
34
+ puts "\nRunning benchmarks..."
35
+ puts " Iterations: #{config[:iterations]}"
36
+ puts " Warmup runs: #{config[:warmup_runs]}"
37
+ puts ""
38
+
39
+ run_benchmarks(files)
40
+
41
+ puts "\nGenerating report..."
42
+ generate_report
43
+ end
44
+
45
+ private
46
+
47
+ def default_config
48
+ {
49
+ test_files: {
50
+ pattern: "spec/fixtures/pngsuite/**/*.png",
51
+ exclude: [],
52
+ limit: nil,
53
+ },
54
+ iterations: 3,
55
+ warmup_runs: 1,
56
+ timeout: 30,
57
+ tools: {
58
+ png_conform: { enabled: true, options: {} },
59
+ pngcheck: { enabled: true, options: {} },
60
+ },
61
+ output: {
62
+ format: "text",
63
+ file: nil,
64
+ verbose: true,
65
+ },
66
+ }
67
+ end
68
+
69
+ def initialize_runners
70
+ runners = {}
71
+
72
+ if config[:tools][:png_conform][:enabled]
73
+ runners[:png_conform] = PngConformRunner.new(
74
+ config[:tools][:png_conform][:options],
75
+ )
76
+ end
77
+
78
+ if config[:tools][:pngcheck][:enabled]
79
+ runners[:pngcheck] = PngcheckRunner.new(
80
+ config[:tools][:pngcheck][:options],
81
+ )
82
+ end
83
+
84
+ runners
85
+ end
86
+
87
+ def validate_tools
88
+ puts "Checking tool availability..."
89
+
90
+ runners.each do |name, runner|
91
+ if runner.available?
92
+ puts " ✓ #{name} is available"
93
+ else
94
+ puts " ✗ #{name} is NOT available"
95
+ if name == :pngcheck
96
+ puts " Install with: brew install pngcheck (macOS) or apt-get install pngcheck (Linux)"
97
+ end
98
+ end
99
+ end
100
+
101
+ puts ""
102
+ end
103
+
104
+ def discover_files
105
+ pattern = config.dig(:test_files, :pattern)
106
+ exclude = config.dig(:test_files, :exclude) || []
107
+ limit = config.dig(:test_files, :limit)
108
+
109
+ files = Dir.glob(pattern).select { |f| File.file?(f) }
110
+
111
+ # Apply exclusions
112
+ exclude.each do |pattern|
113
+ files.reject! { |f| File.fnmatch?(pattern, f) }
114
+ end
115
+
116
+ # Apply limit if specified
117
+ files = files.first(limit) if limit
118
+
119
+ files.sort
120
+ end
121
+
122
+ def run_benchmarks(files)
123
+ total_iterations = config[:iterations] + config[:warmup_runs]
124
+ total_runs = files.size * runners.size * total_iterations
125
+ current_run = 0
126
+
127
+ runners.each do |tool_name, runner|
128
+ next unless runner.available?
129
+
130
+ puts "Benchmarking #{tool_name}..."
131
+
132
+ files.each do |file|
133
+ # Warmup runs (not recorded)
134
+ config[:warmup_runs].times do
135
+ current_run += 1
136
+ if config[:output][:verbose]
137
+ print_progress("Warmup", current_run,
138
+ total_runs)
139
+ end
140
+ runner.run(file)
141
+ end
142
+
143
+ # Actual benchmark runs (recorded)
144
+ config[:iterations].times do |_iteration|
145
+ current_run += 1
146
+ if config[:output][:verbose]
147
+ print_progress(tool_name, current_run,
148
+ total_runs)
149
+ end
150
+
151
+ metrics = runner.run(file)
152
+ metrics_collector.record_run(
153
+ tool_name.to_s,
154
+ file,
155
+ metrics,
156
+ )
157
+ end
158
+ end
159
+
160
+ puts "" if config[:output][:verbose]
161
+ end
162
+ end
163
+
164
+ def print_progress(label, current, total)
165
+ pct = (current.to_f / total * 100).round(1)
166
+ bar_width = 40
167
+ filled = (bar_width * current / total).to_i
168
+ bar = "=" * filled + " " * (bar_width - filled)
169
+
170
+ print "\r [#{bar}] #{pct}% (#{current}/#{total}) #{label} "
171
+ $stdout.flush
172
+ end
173
+
174
+ def generate_report
175
+ report_gen = ReportGenerator.new(metrics_collector, config[:output])
176
+ report = report_gen.generate(config[:output][:format])
177
+
178
+ if config[:output][:file]
179
+ File.write(config[:output][:file], report)
180
+ puts "Report saved to: #{config[:output][:file]}"
181
+ else
182
+ puts "\n"
183
+ puts report
184
+ end
185
+
186
+ {
187
+ summary: metrics_collector.summary,
188
+ report: report,
189
+ }
190
+ end
191
+
192
+ # Deep merge two hashes
193
+ def deep_merge(hash1, hash2)
194
+ result = hash1.dup
195
+ hash2.each do |key, value|
196
+ result[key] = if result[key].is_a?(Hash) && value.is_a?(Hash)
197
+ deep_merge(result[key], value)
198
+ else
199
+ value
200
+ end
201
+ end
202
+ result
203
+ end
204
+ end
@@ -0,0 +1,193 @@
1
+ # frozen_string_literal: true
2
+
3
+ # Collects and analyzes performance metrics from benchmark runs.
4
+ #
5
+ # Tracks execution time, memory usage, throughput, and calculates
6
+ # statistical measures for comparison between tools.
7
+ class MetricsCollector
8
+ attr_reader :runs
9
+
10
+ def initialize
11
+ @runs = []
12
+ end
13
+
14
+ # Record a single benchmark run.
15
+ #
16
+ # @param tool [String] Name of the tool
17
+ # @param file [String] File path
18
+ # @param metrics [Hash] Performance metrics
19
+ def record_run(tool, file, metrics)
20
+ @runs << {
21
+ tool: tool,
22
+ file: file,
23
+ execution_time: metrics[:execution_time],
24
+ memory_used: metrics[:memory_used],
25
+ peak_memory: metrics[:peak_memory],
26
+ success: metrics[:result][:success],
27
+ exit_code: metrics[:result][:exit_code],
28
+ timed_out: metrics[:result][:timed_out],
29
+ timestamp: Time.now,
30
+ }
31
+ end
32
+
33
+ # Get all runs for a specific tool.
34
+ #
35
+ # @param tool [String] Tool name
36
+ # @return [Array<Hash>] Runs for the tool
37
+ def runs_for_tool(tool)
38
+ @runs.select { |run| run[:tool] == tool }
39
+ end
40
+
41
+ # Calculate statistics for a tool.
42
+ #
43
+ # @param tool [String] Tool name
44
+ # @return [Hash] Statistical measures
45
+ def calculate_statistics(tool)
46
+ tool_runs = runs_for_tool(tool)
47
+ return nil if tool_runs.empty?
48
+
49
+ execution_times = tool_runs.map { |r| r[:execution_time] }.compact
50
+ memory_values = tool_runs.map { |r| r[:peak_memory] }.compact
51
+
52
+ {
53
+ tool: tool,
54
+ total_runs: tool_runs.size,
55
+ successful_runs: tool_runs.count { |r| r[:success] },
56
+ failed_runs: tool_runs.count { |r| !r[:success] },
57
+ timeouts: tool_runs.count { |r| r[:timed_out] },
58
+ execution_time: calculate_stats(execution_times),
59
+ memory: calculate_stats(memory_values),
60
+ throughput: calculate_throughput(tool_runs),
61
+ }
62
+ end
63
+
64
+ # Compare two tools.
65
+ #
66
+ # @param tool1 [String] First tool name
67
+ # @param tool2 [String] Second tool name
68
+ # @return [Hash] Comparison results
69
+ def compare_tools(tool1, tool2)
70
+ stats1 = calculate_statistics(tool1)
71
+ stats2 = calculate_statistics(tool2)
72
+
73
+ return nil if stats1.nil? || stats2.nil?
74
+
75
+ time1 = stats1[:execution_time][:mean]
76
+ time2 = stats2[:execution_time][:mean]
77
+ mem1 = stats1[:memory][:mean]
78
+ mem2 = stats2[:memory][:mean]
79
+
80
+ faster_tool = time1 < time2 ? tool1 : tool2
81
+ time_diff_pct = ((time1 - time2).abs / [time1, time2].min * 100).round(1)
82
+ time_multiplier = ([time1, time2].max / [time1, time2].min).round(2)
83
+
84
+ memory_efficient = mem1 < mem2 ? tool1 : tool2
85
+ mem_diff_pct = ((mem1 - mem2).abs / [mem1, mem2].min * 100).round(1)
86
+ mem_multiplier = ([mem1, mem2].max / [mem1, mem2].min).round(2)
87
+
88
+ {
89
+ tool1: tool1,
90
+ tool2: tool2,
91
+ faster_tool: faster_tool,
92
+ time_difference_percent: time_diff_pct,
93
+ time_multiplier: time_multiplier,
94
+ memory_efficient_tool: memory_efficient,
95
+ memory_difference_percent: mem_diff_pct,
96
+ memory_multiplier: mem_multiplier,
97
+ stats: {
98
+ tool1 => stats1,
99
+ tool2 => stats2,
100
+ },
101
+ }
102
+ end
103
+
104
+ # Export raw data for external analysis.
105
+ #
106
+ # @return [Array<Hash>] All run data
107
+ def export_raw_data
108
+ @runs
109
+ end
110
+
111
+ # Get summary statistics across all tools.
112
+ #
113
+ # @return [Hash] Summary data
114
+ def summary
115
+ tools = @runs.map { |r| r[:tool] }.uniq
116
+
117
+ {
118
+ total_runs: @runs.size,
119
+ tools: tools,
120
+ files_tested: @runs.map { |r| r[:file] }.uniq.size,
121
+ tool_statistics: tools.map { |tool| calculate_statistics(tool) }.compact,
122
+ }
123
+ end
124
+
125
+ private
126
+
127
+ # Calculate statistical measures for a dataset.
128
+ #
129
+ # @param values [Array<Numeric>] Data values
130
+ # @return [Hash] Statistical measures
131
+ def calculate_stats(values)
132
+ return nil if values.empty?
133
+
134
+ sorted = values.sort
135
+ size = values.size
136
+
137
+ {
138
+ mean: (values.sum / size.to_f).round(3),
139
+ median: calculate_median(sorted),
140
+ std_dev: calculate_std_dev(values).round(3),
141
+ min: sorted.first.round(3),
142
+ max: sorted.last.round(3),
143
+ count: size,
144
+ }
145
+ end
146
+
147
+ # Calculate median value.
148
+ #
149
+ # @param sorted_values [Array<Numeric>] Sorted array
150
+ # @return [Float] Median value
151
+ def calculate_median(sorted_values)
152
+ size = sorted_values.size
153
+ mid = size / 2
154
+
155
+ if size.even?
156
+ ((sorted_values[mid - 1] + sorted_values[mid]) / 2.0).round(3)
157
+ else
158
+ sorted_values[mid].round(3)
159
+ end
160
+ end
161
+
162
+ # Calculate standard deviation.
163
+ #
164
+ # @param values [Array<Numeric>] Data values
165
+ # @return [Float] Standard deviation
166
+ def calculate_std_dev(values)
167
+ return 0.0 if values.size <= 1
168
+
169
+ mean = values.sum / values.size.to_f
170
+ variance = values.sum { |v| (v - mean)**2 } / values.size.to_f
171
+ Math.sqrt(variance)
172
+ end
173
+
174
+ # Calculate throughput metrics.
175
+ #
176
+ # @param runs [Array<Hash>] Run data
177
+ # @return [Hash] Throughput metrics
178
+ def calculate_throughput(runs)
179
+ successful = runs.select { |r| r[:success] }
180
+ return nil if successful.empty?
181
+
182
+ total_time = # Convert to seconds
183
+ successful.sum do |r|
184
+ r[:execution_time]
185
+ end / 1000.0
186
+ files_count = successful.size
187
+
188
+ {
189
+ files_per_second: (files_count / total_time).round(2),
190
+ avg_time_per_file: (total_time / files_count * 1000).round(3), # milliseconds
191
+ }
192
+ end
193
+ end
@@ -0,0 +1,68 @@
1
+ # frozen_string_literal: true
2
+
3
+ require_relative "tool_runner"
4
+
5
+ # Runner for the png_conform validation tool.
6
+ #
7
+ # Executes png_conform CLI and collects performance metrics.
8
+ class PngConformRunner < ToolRunner
9
+ def initialize(options = {})
10
+ # Use the exe/png_conform in the project root
11
+ command = File.expand_path("../../exe/png_conform", __dir__)
12
+ super("png_conform", command, options)
13
+ end
14
+
15
+ # Run png_conform on a single file.
16
+ #
17
+ # @param file_path [String] Path to the PNG file
18
+ # @return [Hash] Performance metrics and validation results
19
+ def run(file_path)
20
+ return error_result("File not found: #{file_path}") unless File.exist?(file_path)
21
+
22
+ measure_performance do
23
+ cli_options = build_cli_options
24
+ cmd = "#{command} check #{cli_options} #{file_path}"
25
+ result = execute_command(cmd)
26
+
27
+ {
28
+ file: file_path,
29
+ tool: name,
30
+ success: result[:exit_code].zero?,
31
+ exit_code: result[:exit_code],
32
+ stdout: result[:stdout],
33
+ stderr: result[:stderr],
34
+ timed_out: result[:timed_out],
35
+ }
36
+ end
37
+ end
38
+
39
+ private
40
+
41
+ def build_cli_options
42
+ cli_opts = options[:cli_options] || []
43
+
44
+ # Add --quiet by default to reduce output parsing overhead
45
+ cli_opts << "--quiet" unless cli_opts.include?("--quiet") ||
46
+ cli_opts.include?("-q") ||
47
+ cli_opts.include?("--verbose")
48
+
49
+ cli_opts.join(" ")
50
+ end
51
+
52
+ def error_result(message)
53
+ {
54
+ execution_time: 0,
55
+ memory_used: 0,
56
+ peak_memory: 0,
57
+ result: {
58
+ file: nil,
59
+ tool: name,
60
+ success: false,
61
+ exit_code: -1,
62
+ stdout: "",
63
+ stderr: message,
64
+ timed_out: false,
65
+ },
66
+ }
67
+ end
68
+ end
@@ -0,0 +1,67 @@
1
+ # frozen_string_literal: true
2
+
3
+ require_relative "tool_runner"
4
+
5
+ # Runner for the pngcheck validation tool.
6
+ #
7
+ # Executes the system pngcheck binary and collects performance metrics.
8
+ class PngcheckRunner < ToolRunner
9
+ def initialize(options = {})
10
+ super("pngcheck", "pngcheck", options)
11
+ end
12
+
13
+ # Run pngcheck on a single file.
14
+ #
15
+ # @param file_path [String] Path to the PNG file
16
+ # @return [Hash] Performance metrics and validation results
17
+ def run(file_path)
18
+ return error_result("File not found: #{file_path}") unless File.exist?(file_path)
19
+ return error_result("pngcheck not found in PATH") unless available?
20
+
21
+ measure_performance do
22
+ cli_options = build_cli_options
23
+ cmd = "pngcheck #{cli_options} #{file_path}"
24
+ result = execute_command(cmd)
25
+
26
+ {
27
+ file: file_path,
28
+ tool: name,
29
+ success: result[:exit_code].zero?,
30
+ exit_code: result[:exit_code],
31
+ stdout: result[:stdout],
32
+ stderr: result[:stderr],
33
+ timed_out: result[:timed_out],
34
+ }
35
+ end
36
+ end
37
+
38
+ private
39
+
40
+ def build_cli_options
41
+ cli_opts = options[:cli_options] || []
42
+
43
+ # Add -q (quiet) by default for consistent comparison
44
+ cli_opts << "-q" unless cli_opts.include?("-q") ||
45
+ cli_opts.include?("-v") ||
46
+ cli_opts.include?("-vv")
47
+
48
+ cli_opts.join(" ")
49
+ end
50
+
51
+ def error_result(message)
52
+ {
53
+ execution_time: 0,
54
+ memory_used: 0,
55
+ peak_memory: 0,
56
+ result: {
57
+ file: nil,
58
+ tool: name,
59
+ success: false,
60
+ exit_code: -1,
61
+ stdout: "",
62
+ stderr: message,
63
+ timed_out: false,
64
+ },
65
+ }
66
+ end
67
+ end