png_conform 0.1.1 → 0.1.3

This diff represents the content of publicly available package versions that have been released to one of the supported registries. The information contained in this diff is provided for informational purposes only and reflects changes between package versions as they appear in their respective public registries.
Files changed (64) hide show
  1. checksums.yaml +4 -4
  2. data/.rubocop_todo.yml +82 -42
  3. data/Gemfile +2 -0
  4. data/README.adoc +3 -2
  5. data/benchmarks/README.adoc +570 -0
  6. data/benchmarks/config/default.yml +35 -0
  7. data/benchmarks/config/full.yml +32 -0
  8. data/benchmarks/config/quick.yml +32 -0
  9. data/benchmarks/direct_validation.rb +18 -0
  10. data/benchmarks/lib/benchmark_runner.rb +204 -0
  11. data/benchmarks/lib/metrics_collector.rb +193 -0
  12. data/benchmarks/lib/png_conform_runner.rb +68 -0
  13. data/benchmarks/lib/pngcheck_runner.rb +67 -0
  14. data/benchmarks/lib/report_generator.rb +301 -0
  15. data/benchmarks/lib/tool_runner.rb +104 -0
  16. data/benchmarks/profile_loading.rb +12 -0
  17. data/benchmarks/profile_validation.rb +18 -0
  18. data/benchmarks/results/.gitkeep +0 -0
  19. data/benchmarks/run_benchmark.rb +159 -0
  20. data/config/validation_profiles.yml +105 -0
  21. data/docs/CHUNK_TYPES.adoc +42 -0
  22. data/examples/README.md +282 -0
  23. data/lib/png_conform/analyzers/comparison_analyzer.rb +41 -7
  24. data/lib/png_conform/analyzers/metrics_analyzer.rb +6 -9
  25. data/lib/png_conform/analyzers/optimization_analyzer.rb +30 -24
  26. data/lib/png_conform/analyzers/resolution_analyzer.rb +31 -32
  27. data/lib/png_conform/cli.rb +12 -0
  28. data/lib/png_conform/commands/check_command.rb +118 -52
  29. data/lib/png_conform/configuration.rb +147 -0
  30. data/lib/png_conform/container.rb +113 -0
  31. data/lib/png_conform/models/decoded_chunk_data.rb +33 -0
  32. data/lib/png_conform/models/validation_result.rb +30 -4
  33. data/lib/png_conform/pipelines/pipeline_result.rb +39 -0
  34. data/lib/png_conform/pipelines/stages/analysis_stage.rb +35 -0
  35. data/lib/png_conform/pipelines/stages/base_stage.rb +23 -0
  36. data/lib/png_conform/pipelines/stages/chunk_validation_stage.rb +74 -0
  37. data/lib/png_conform/pipelines/stages/sequence_validation_stage.rb +77 -0
  38. data/lib/png_conform/pipelines/stages/signature_validation_stage.rb +41 -0
  39. data/lib/png_conform/pipelines/validation_pipeline.rb +90 -0
  40. data/lib/png_conform/readers/full_load_reader.rb +13 -4
  41. data/lib/png_conform/readers/streaming_reader.rb +27 -2
  42. data/lib/png_conform/reporters/color_reporter.rb +17 -14
  43. data/lib/png_conform/reporters/reporter_factory.rb +18 -11
  44. data/lib/png_conform/reporters/visual_elements.rb +22 -16
  45. data/lib/png_conform/services/analysis_manager.rb +120 -0
  46. data/lib/png_conform/services/chunk_processor.rb +195 -0
  47. data/lib/png_conform/services/file_signature.rb +226 -0
  48. data/lib/png_conform/services/file_strategy.rb +78 -0
  49. data/lib/png_conform/services/lru_cache.rb +170 -0
  50. data/lib/png_conform/services/parallel_validator.rb +118 -0
  51. data/lib/png_conform/services/profile_manager.rb +41 -12
  52. data/lib/png_conform/services/result_builder.rb +299 -0
  53. data/lib/png_conform/services/validation_cache.rb +210 -0
  54. data/lib/png_conform/services/validation_orchestrator.rb +188 -0
  55. data/lib/png_conform/services/validation_service.rb +82 -321
  56. data/lib/png_conform/services/validator_pool.rb +142 -0
  57. data/lib/png_conform/utils/colorizer.rb +149 -0
  58. data/lib/png_conform/validators/ancillary/idot_validator.rb +102 -0
  59. data/lib/png_conform/validators/chunk_registry.rb +143 -128
  60. data/lib/png_conform/validators/streaming_idat_validator.rb +123 -0
  61. data/lib/png_conform/version.rb +1 -1
  62. data/lib/png_conform.rb +7 -46
  63. data/png_conform.gemspec +1 -0
  64. metadata +55 -2
@@ -0,0 +1,301 @@
1
+ # frozen_string_literal: true
2
+
3
+ require "json"
4
+ require "csv"
5
+
6
+ # Generates benchmark reports in multiple formats.
7
+ #
8
+ # Supports text, JSON, CSV, and Markdown output formats with
9
+ # detailed performance comparisons and statistics.
10
+ class ReportGenerator
11
+ attr_reader :metrics_collector, :config
12
+
13
+ def initialize(metrics_collector, config = {})
14
+ @metrics_collector = metrics_collector
15
+ @config = config
16
+ end
17
+
18
+ # Generate report in specified format.
19
+ #
20
+ # @param format [String] Output format: text, json, csv, markdown
21
+ # @return [String] Formatted report
22
+ def generate(format = "text")
23
+ case format.to_s.downcase
24
+ when "json"
25
+ generate_json
26
+ when "csv"
27
+ generate_csv
28
+ when "markdown", "md"
29
+ generate_markdown
30
+ else
31
+ generate_text
32
+ end
33
+ end
34
+
35
+ private
36
+
37
+ # Generate text report with tables and colors.
38
+ def generate_text
39
+ summary = metrics_collector.summary
40
+ tools = summary[:tools]
41
+
42
+ return "No benchmark data available.\n" if tools.empty?
43
+
44
+ output = []
45
+ output << "=" * 80
46
+ output << "PNG Validation Tool Benchmark Comparison"
47
+ output << "=" * 80
48
+ output << ""
49
+ output << "Configuration:"
50
+ output << " Files tested: #{summary[:files_tested]} PNG files"
51
+ output << " Total runs: #{summary[:total_runs]}"
52
+ output << ""
53
+
54
+ # Tool availability
55
+ output << "Tools:"
56
+ tools.each do |tool|
57
+ stats = metrics_collector.calculate_statistics(tool)
58
+ output << " #{tool}: #{stats[:successful_runs]}/#{stats[:total_runs]} successful"
59
+ end
60
+ output << ""
61
+
62
+ # Performance comparison (if we have 2 tools)
63
+ if tools.size == 2
64
+ comparison = metrics_collector.compare_tools(tools[0], tools[1])
65
+ output << "-" * 80
66
+ output << "PERFORMANCE SUMMARY"
67
+ output << "-" * 80
68
+ output << ""
69
+ output << format_comparison_table(comparison)
70
+ output << ""
71
+ output << format_winner_summary(comparison)
72
+ output << ""
73
+ end
74
+
75
+ # Detailed statistics per tool
76
+ output << "-" * 80
77
+ output << "DETAILED STATISTICS"
78
+ output << "-" * 80
79
+ output << ""
80
+
81
+ tools.each do |tool|
82
+ stats = metrics_collector.calculate_statistics(tool)
83
+ output << format_tool_statistics(stats)
84
+ output << ""
85
+ end
86
+
87
+ output.join("\n")
88
+ end
89
+
90
+ # Generate JSON report.
91
+ def generate_json
92
+ summary = metrics_collector.summary
93
+ tools = summary[:tools]
94
+
95
+ data = {
96
+ benchmark_info: {
97
+ timestamp: Time.now.iso8601,
98
+ files_tested: summary[:files_tested],
99
+ total_runs: summary[:total_runs],
100
+ tools: tools,
101
+ },
102
+ tool_statistics: summary[:tool_statistics],
103
+ raw_data: config[:include_raw_data] ? metrics_collector.export_raw_data : nil,
104
+ }.compact
105
+
106
+ # Add comparison if we have 2 tools
107
+ if tools.size == 2
108
+ data[:comparison] = metrics_collector.compare_tools(tools[0], tools[1])
109
+ end
110
+
111
+ JSON.pretty_generate(data)
112
+ end
113
+
114
+ # Generate CSV report.
115
+ def generate_csv
116
+ runs = metrics_collector.export_raw_data
117
+
118
+ CSV.generate do |csv|
119
+ csv << ["Tool", "File", "Execution Time (ms)", "Peak Memory (MB)",
120
+ "Success", "Exit Code", "Timed Out", "Timestamp"]
121
+
122
+ runs.each do |run|
123
+ csv << [
124
+ run[:tool],
125
+ run[:file],
126
+ run[:execution_time],
127
+ run[:peak_memory],
128
+ run[:success],
129
+ run[:exit_code],
130
+ run[:timed_out],
131
+ run[:timestamp].iso8601,
132
+ ]
133
+ end
134
+ end
135
+ end
136
+
137
+ # Generate Markdown report.
138
+ def generate_markdown
139
+ summary = metrics_collector.summary
140
+ tools = summary[:tools]
141
+
142
+ return "# No benchmark data available\n" if tools.empty?
143
+
144
+ output = []
145
+ output << "# PNG Validation Tool Benchmark Comparison"
146
+ output << ""
147
+ output << "## Configuration"
148
+ output << ""
149
+ output << "- **Files tested**: #{summary[:files_tested]} PNG files"
150
+ output << "- **Total runs**: #{summary[:total_runs]}"
151
+ output << "- **Tools**: #{tools.join(', ')}"
152
+ output << ""
153
+
154
+ # Performance comparison
155
+ if tools.size == 2
156
+ comparison = metrics_collector.compare_tools(tools[0], tools[1])
157
+ output << "## Performance Summary"
158
+ output << ""
159
+ output << format_markdown_comparison(comparison)
160
+ output << ""
161
+ end
162
+
163
+ # Statistics per tool
164
+ output << "## Detailed Statistics"
165
+ output << ""
166
+
167
+ tools.each do |tool|
168
+ stats = metrics_collector.calculate_statistics(tool)
169
+ output << format_markdown_statistics(stats)
170
+ output << ""
171
+ end
172
+
173
+ output.join("\n")
174
+ end
175
+
176
+ # Format comparison table for text output.
177
+ def format_comparison_table(comparison)
178
+ lines = []
179
+ lines << sprintf("%-15s %12s %12s %12s %8s",
180
+ "Tool", "Avg Time", "Files/sec", "Peak Memory", "Winner")
181
+ lines << "-" * 80
182
+
183
+ [comparison[:tool1], comparison[:tool2]].each do |tool|
184
+ stats = comparison[:stats][tool]
185
+ is_winner = tool == comparison[:faster_tool]
186
+
187
+ # Handle nil throughput gracefully
188
+ files_per_sec = stats[:throughput]&.[](:files_per_second) || 0.0
189
+
190
+ lines << sprintf("%-15s %10.1fms %10.1f/s %10.1f MB %8s",
191
+ tool,
192
+ stats[:execution_time][:mean],
193
+ files_per_sec,
194
+ stats[:memory][:mean],
195
+ is_winner ? "✓" : "")
196
+ end
197
+
198
+ lines.join("\n")
199
+ end
200
+
201
+ # Format winner summary.
202
+ def format_winner_summary(comparison)
203
+ lines = []
204
+ lines << "Performance Difference:"
205
+ lines << " #{comparison[:faster_tool]} is #{comparison[:time_multiplier]}x faster " \
206
+ "(#{comparison[:time_difference_percent]}% faster)"
207
+ lines << " #{comparison[:memory_efficient_tool]} uses #{comparison[:memory_multiplier]}x less memory " \
208
+ "(#{comparison[:memory_difference_percent]}% less)"
209
+ lines.join("\n")
210
+ end
211
+
212
+ # Format tool statistics for text output.
213
+ def format_tool_statistics(stats)
214
+ lines = []
215
+ lines << "#{stats[:tool]}:"
216
+ lines << " Runs: #{stats[:successful_runs]}/#{stats[:total_runs]} successful"
217
+ lines << " Timeouts: #{stats[:timeouts]}" if stats[:timeouts].positive?
218
+ lines << ""
219
+ lines << " Execution Time:"
220
+ lines << " Mean: #{stats[:execution_time][:mean]}ms"
221
+ lines << " Median: #{stats[:execution_time][:median]}ms"
222
+ lines << " Std Dev: #{stats[:execution_time][:std_dev]}ms"
223
+ lines << " Min: #{stats[:execution_time][:min]}ms"
224
+ lines << " Max: #{stats[:execution_time][:max]}ms"
225
+ lines << ""
226
+ lines << " Memory Usage:"
227
+ lines << " Mean: #{stats[:memory][:mean]} MB"
228
+ lines << " Median: #{stats[:memory][:median]} MB"
229
+ lines << " Min: #{stats[:memory][:min]} MB"
230
+ lines << " Max: #{stats[:memory][:max]} MB"
231
+
232
+ # Handle nil throughput gracefully
233
+ if stats[:throughput]
234
+ lines << ""
235
+ lines << " Throughput:"
236
+ lines << " Files/sec: #{stats[:throughput][:files_per_second]}"
237
+ lines << " Time/file: #{stats[:throughput][:avg_time_per_file]}ms"
238
+ end
239
+
240
+ lines.join("\n")
241
+ end
242
+
243
+ # Format comparison for markdown.
244
+ def format_markdown_comparison(comparison)
245
+ lines = []
246
+ lines << "| Metric | #{comparison[:tool1]} | #{comparison[:tool2]} | Winner |"
247
+ lines << "|--------|----------|----------|--------|"
248
+
249
+ stats1 = comparison[:stats][comparison[:tool1]]
250
+ stats2 = comparison[:stats][comparison[:tool2]]
251
+
252
+ # Handle nil throughput gracefully
253
+ fps1 = stats1[:throughput]&.[](:files_per_second) || "N/A"
254
+ fps2 = stats2[:throughput]&.[](:files_per_second) || "N/A"
255
+
256
+ lines << "| Avg Time | #{stats1[:execution_time][:mean]}ms | " \
257
+ "#{stats2[:execution_time][:mean]}ms | " \
258
+ "#{comparison[:faster_tool]} |"
259
+ lines << "| Files/sec | #{fps1} | #{fps2} | " \
260
+ "#{comparison[:faster_tool]} |"
261
+ lines << "| Peak Memory | #{stats1[:memory][:mean]} MB | " \
262
+ "#{stats2[:memory][:mean]} MB | " \
263
+ "#{comparison[:memory_efficient_tool]} |"
264
+ lines << ""
265
+ lines << "**Performance:** #{comparison[:faster_tool]} is " \
266
+ "#{comparison[:time_multiplier]}x faster " \
267
+ "(#{comparison[:time_difference_percent]}% improvement)"
268
+ lines << ""
269
+ lines << "**Memory:** #{comparison[:memory_efficient_tool]} uses " \
270
+ "#{comparison[:memory_multiplier]}x less memory " \
271
+ "(#{comparison[:memory_difference_percent]}% improvement)"
272
+
273
+ lines.join("\n")
274
+ end
275
+
276
+ # Format statistics for markdown.
277
+ def format_markdown_statistics(stats)
278
+ lines = []
279
+ lines << "### #{stats[:tool]}"
280
+ lines << ""
281
+ lines << "- **Successful runs**: #{stats[:successful_runs]}/#{stats[:total_runs]}"
282
+ lines << "- **Timeouts**: #{stats[:timeouts]}" if stats[:timeouts].positive?
283
+ lines << ""
284
+ lines << "**Execution Time:**"
285
+ lines << "- Mean: #{stats[:execution_time][:mean]}ms"
286
+ lines << "- Median: #{stats[:execution_time][:median]}ms"
287
+ lines << "- Range: #{stats[:execution_time][:min]}ms - #{stats[:execution_time][:max]}ms"
288
+ lines << ""
289
+ lines << "**Memory Usage:**"
290
+ lines << "- Mean: #{stats[:memory][:mean]} MB"
291
+ lines << "- Range: #{stats[:memory][:min]} MB - #{stats[:memory][:max]} MB"
292
+
293
+ # Handle nil throughput gracefully
294
+ if stats[:throughput]
295
+ lines << ""
296
+ lines << "**Throughput:** #{stats[:throughput][:files_per_second]} files/sec"
297
+ end
298
+
299
+ lines.join("\n")
300
+ end
301
+ end
@@ -0,0 +1,104 @@
1
+ # frozen_string_literal: true
2
+
3
+ # Base class for running validation tools and measuring their performance.
4
+ #
5
+ # Provides a common interface for executing different PNG validation tools
6
+ # (png_conform, pngcheck) and collecting performance metrics.
7
+ class ToolRunner
8
+ attr_reader :name, :command, :options
9
+
10
+ # @param name [String] The name of the tool
11
+ # @param command [String] The command to execute
12
+ # @param options [Hash] Tool-specific options
13
+ def initialize(name, command, options = {})
14
+ @name = name
15
+ @command = command
16
+ @options = options
17
+ end
18
+
19
+ # Check if the tool is available on the system.
20
+ #
21
+ # @return [Boolean] True if tool is available
22
+ def available?
23
+ system("which #{command} > /dev/null 2>&1")
24
+ end
25
+
26
+ # Run the tool on a single file and measure performance.
27
+ #
28
+ # @param file_path [String] Path to the PNG file
29
+ # @return [Hash] Performance metrics and results
30
+ def run(file_path)
31
+ raise NotImplementedError, "Subclasses must implement #run"
32
+ end
33
+
34
+ # Run the tool on multiple files.
35
+ #
36
+ # @param file_paths [Array<String>] Paths to PNG files
37
+ # @return [Array<Hash>] Array of performance metrics
38
+ def run_batch(file_paths)
39
+ file_paths.map { |path| run(path) }
40
+ end
41
+
42
+ protected
43
+
44
+ # Measure execution time and memory usage.
45
+ #
46
+ # @yield Block to measure
47
+ # @return [Hash] Performance metrics
48
+ def measure_performance
49
+ start_time = Process.clock_gettime(Process::CLOCK_MONOTONIC)
50
+ start_memory = get_memory_usage
51
+
52
+ result = yield
53
+
54
+ end_time = Process.clock_gettime(Process::CLOCK_MONOTONIC)
55
+ end_memory = get_memory_usage
56
+
57
+ {
58
+ execution_time: ((end_time - start_time) * 1000).round(3), # milliseconds
59
+ memory_used: end_memory - start_memory,
60
+ peak_memory: end_memory,
61
+ result: result,
62
+ }
63
+ end
64
+
65
+ # Get current memory usage in MB.
66
+ #
67
+ # @return [Float] Memory usage in megabytes
68
+ def get_memory_usage
69
+ # Get RSS (Resident Set Size) in KB, convert to MB
70
+ rss_kb = `ps -o rss= -p #{Process.pid}`.strip.to_i
71
+ (rss_kb / 1024.0).round(2)
72
+ end
73
+
74
+ # Execute a command and capture output.
75
+ #
76
+ # @param cmd [String] Command to execute
77
+ # @param timeout [Integer] Timeout in seconds
78
+ # @return [Hash] Command result with stdout, stderr, and exit status
79
+ def execute_command(cmd, timeout: 30)
80
+ require "open3"
81
+ require "timeout"
82
+
83
+ stdout, stderr, status = nil
84
+ begin
85
+ Timeout.timeout(timeout) do
86
+ stdout, stderr, status = Open3.capture3(cmd)
87
+ end
88
+ rescue Timeout::Error
89
+ return {
90
+ stdout: "",
91
+ stderr: "Command timed out after #{timeout} seconds",
92
+ exit_code: -1,
93
+ timed_out: true,
94
+ }
95
+ end
96
+
97
+ {
98
+ stdout: stdout,
99
+ stderr: stderr,
100
+ exit_code: status.exitstatus,
101
+ timed_out: false,
102
+ }
103
+ end
104
+ end
@@ -0,0 +1,12 @@
1
+ #!/usr/bin/env ruby
2
+ # frozen_string_literal: true
3
+
4
+ require "ruby-prof"
5
+
6
+ # Profile just the loading of png_conform
7
+ result = RubyProf::Profile.profile do
8
+ require_relative "../lib/png_conform"
9
+ end
10
+
11
+ printer = RubyProf::FlatPrinter.new(result)
12
+ printer.print($stdout, min_percent: 2)
@@ -0,0 +1,18 @@
1
+ #!/usr/bin/env ruby
2
+ # frozen_string_literal: true
3
+
4
+ require "ruby-prof"
5
+ require_relative "../lib/png_conform"
6
+
7
+ # Pre-load everything
8
+ PngConform::Services::ValidationService
9
+
10
+ # Now profile just the validation work
11
+ file = ARGV[0] || "spec/fixtures/pngsuite/compression/z00n2c08.png"
12
+
13
+ result = RubyProf.profile do
14
+ PngConform::Services::ValidationService.validate_file(file)
15
+ end
16
+
17
+ printer = RubyProf::FlatPrinter.new(result)
18
+ printer.print($stdout, min_percent: 1)
File without changes
@@ -0,0 +1,159 @@
1
+ #!/usr/bin/env ruby
2
+ # frozen_string_literal: true
3
+
4
+ require "optparse"
5
+ require "yaml"
6
+ require_relative "lib/benchmark_runner"
7
+
8
+ # Parse command-line options
9
+ options = {
10
+ config_file: nil,
11
+ pattern: nil,
12
+ format: nil,
13
+ output: nil,
14
+ iterations: nil,
15
+ warmup: nil,
16
+ limit: nil,
17
+ tools: [],
18
+ verbose: true,
19
+ }
20
+
21
+ OptionParser.new do |opts|
22
+ opts.banner = "Usage: run_benchmark.rb [options]"
23
+
24
+ opts.separator ""
25
+ opts.separator "Configuration:"
26
+
27
+ opts.on("-c", "--config FILE", "Load configuration from YAML file") do |file|
28
+ options[:config_file] = file
29
+ end
30
+
31
+ opts.separator ""
32
+ opts.separator "File Selection:"
33
+
34
+ opts.on("-p", "--pattern PATTERN",
35
+ "File pattern (e.g., 'spec/fixtures/**/*.png')") do |pattern|
36
+ options[:pattern] = pattern
37
+ end
38
+
39
+ opts.on("-l", "--limit N", Integer, "Limit number of files to test") do |n|
40
+ options[:limit] = n
41
+ end
42
+
43
+ opts.separator ""
44
+ opts.separator "Execution:"
45
+
46
+ opts.on("-i", "--iterations N", Integer,
47
+ "Number of iterations per file (default: 3)") do |n|
48
+ options[:iterations] = n
49
+ end
50
+
51
+ opts.on("-w", "--warmup N", Integer,
52
+ "Number of warmup runs (default: 1)") do |n|
53
+ options[:warmup] = n
54
+ end
55
+
56
+ opts.on("-t", "--tool TOOL",
57
+ "Enable specific tool (png_conform, pngcheck)") do |tool|
58
+ options[:tools] << tool.to_sym
59
+ end
60
+
61
+ opts.separator ""
62
+ opts.separator "Output:"
63
+
64
+ opts.on("-f", "--format FORMAT",
65
+ "Output format: text, json, csv, markdown (default: text)") do |format|
66
+ options[:format] = format
67
+ end
68
+
69
+ opts.on("-o", "--output FILE",
70
+ "Write report to file instead of stdout") do |file|
71
+ options[:output] = file
72
+ end
73
+
74
+ opts.on("-q", "--quiet", "Suppress progress output") do
75
+ options[:verbose] = false
76
+ end
77
+
78
+ opts.separator ""
79
+ opts.separator "Other:"
80
+
81
+ opts.on("-h", "--help", "Show this help message") do
82
+ puts opts
83
+ exit
84
+ end
85
+
86
+ opts.on("-v", "--version", "Show version") do
87
+ puts "PNG Benchmark Runner v1.0.0"
88
+ exit
89
+ end
90
+ end.parse!
91
+
92
+ # Load configuration
93
+ config = {}
94
+
95
+ if options[:config_file]
96
+ unless File.exist?(options[:config_file])
97
+ puts "Error: Configuration file not found: #{options[:config_file]}"
98
+ exit 1
99
+ end
100
+
101
+ config = YAML.load_file(options[:config_file], symbolize_names: true)
102
+ end
103
+
104
+ # Apply command-line overrides
105
+ if options[:pattern]
106
+ config[:test_files] ||= {}
107
+ config[:test_files][:pattern] = options[:pattern]
108
+ end
109
+
110
+ if options[:limit]
111
+ config[:test_files] ||= {}
112
+ config[:test_files][:limit] = options[:limit]
113
+ end
114
+
115
+ config[:iterations] = options[:iterations] if options[:iterations]
116
+ config[:warmup_runs] = options[:warmup] if options[:warmup]
117
+
118
+ if options[:format]
119
+ config[:output] ||= {}
120
+ config[:output][:format] = options[:format]
121
+ end
122
+
123
+ if options[:output]
124
+ config[:output] ||= {}
125
+ config[:output][:file] = options[:output]
126
+ end
127
+
128
+ config[:output] ||= {}
129
+ config[:output][:verbose] = options[:verbose]
130
+
131
+ # Configure tools if specified
132
+ if options[:tools].any?
133
+ config[:tools] ||= {}
134
+
135
+ # Disable all tools by default if specific tools are requested
136
+ config[:tools][:png_conform] = { enabled: false, options: {} }
137
+ config[:tools][:pngcheck] = { enabled: false, options: {} }
138
+
139
+ # Enable requested tools
140
+ options[:tools].each do |tool|
141
+ if config[:tools].key?(tool)
142
+ config[:tools][tool][:enabled] = true
143
+ else
144
+ puts "Warning: Unknown tool '#{tool}', ignoring"
145
+ end
146
+ end
147
+ end
148
+
149
+ # Run the benchmark
150
+ begin
151
+ runner = BenchmarkRunner.new(config)
152
+ runner.run
153
+
154
+ exit 0
155
+ rescue StandardError => e
156
+ puts "\nError: #{e.message}"
157
+ puts e.backtrace.join("\n") if ENV["DEBUG"]
158
+ exit 1
159
+ end
@@ -0,0 +1,105 @@
1
+ # PNG Conform Validation Profiles
2
+ #
3
+ # This file defines validation profiles for PNG files. Each profile specifies
4
+ # which chunks are required, optional, or prohibited.
5
+ #
6
+ # Profile structure:
7
+ # name: Display name
8
+ # description: Human-readable description
9
+ # required_chunks: List of chunk types that MUST be present
10
+ # optional_chunks: List of chunk types that MAY be present (use "*" for all)
11
+ # prohibited_chunks: List of chunk types that MUST NOT be present
12
+ #
13
+
14
+ minimal:
15
+ name: "Minimal validation"
16
+ description: "Only critical chunks required"
17
+ required_chunks:
18
+ - IHDR
19
+ - IDAT
20
+ - IEND
21
+ optional_chunks: "*"
22
+ prohibited_chunks: []
23
+
24
+ web:
25
+ name: "Web-optimized"
26
+ description: "Recommended for web images"
27
+ required_chunks:
28
+ - IHDR
29
+ - IDAT
30
+ - IEND
31
+ - gAMA
32
+ - sRGB
33
+ optional_chunks:
34
+ - tRNS
35
+ - bKGD
36
+ - tEXt
37
+ - iTXt
38
+ - zTXt
39
+ - pHYs
40
+ prohibited_chunks: []
41
+
42
+ print:
43
+ name: "Print-ready"
44
+ description: "For high-quality print output"
45
+ required_chunks:
46
+ - IHDR
47
+ - IDAT
48
+ - IEND
49
+ - pHYs
50
+ optional_chunks:
51
+ - gAMA
52
+ - cHRM
53
+ - sRGB
54
+ - iCCP
55
+ - tRNS
56
+ - bKGD
57
+ prohibited_chunks: []
58
+
59
+ archive:
60
+ name: "Archive quality"
61
+ description: "For long-term image storage"
62
+ required_chunks:
63
+ - IHDR
64
+ - IDAT
65
+ - IEND
66
+ optional_chunks:
67
+ - gAMA
68
+ - cHRM
69
+ - sRGB
70
+ - iCCP
71
+ - tEXt
72
+ - iTXt
73
+ - zTXt
74
+ - tIME
75
+ prohibited_chunks: []
76
+
77
+ strict:
78
+ name: "Strict PNG specification"
79
+ description: "Full compliance with PNG specification"
80
+ required_chunks:
81
+ - IHDR
82
+ - IDAT
83
+ - IEND
84
+ optional_chunks:
85
+ - PLTE
86
+ - gAMA
87
+ - cHRM
88
+ - sRGB
89
+ - iCCP
90
+ - tRNS
91
+ - bKGD
92
+ - hIST
93
+ - tEXt
94
+ - zTXt
95
+ - iTXt
96
+ - pHYs
97
+ - sPLT
98
+ - sBIT
99
+ - oFFs
100
+ - pCAL
101
+ - sCAL
102
+ - tIME
103
+ - cICP
104
+ - mDCv
105
+ prohibited_chunks: []