png_conform 0.1.1 → 0.1.2

This diff represents the content of publicly available package versions that have been released to one of the supported registries. The information contained in this diff is provided for informational purposes only and reflects changes between package versions as they appear in their respective public registries.
@@ -0,0 +1,301 @@
1
+ # frozen_string_literal: true
2
+
3
+ require "json"
4
+ require "csv"
5
+
6
+ # Generates benchmark reports in multiple formats.
7
+ #
8
+ # Supports text, JSON, CSV, and Markdown output formats with
9
+ # detailed performance comparisons and statistics.
10
+ class ReportGenerator
11
+ attr_reader :metrics_collector, :config
12
+
13
+ def initialize(metrics_collector, config = {})
14
+ @metrics_collector = metrics_collector
15
+ @config = config
16
+ end
17
+
18
+ # Generate report in specified format.
19
+ #
20
+ # @param format [String] Output format: text, json, csv, markdown
21
+ # @return [String] Formatted report
22
+ def generate(format = "text")
23
+ case format.to_s.downcase
24
+ when "json"
25
+ generate_json
26
+ when "csv"
27
+ generate_csv
28
+ when "markdown", "md"
29
+ generate_markdown
30
+ else
31
+ generate_text
32
+ end
33
+ end
34
+
35
+ private
36
+
37
+ # Generate text report with tables and colors.
38
+ def generate_text
39
+ summary = metrics_collector.summary
40
+ tools = summary[:tools]
41
+
42
+ return "No benchmark data available.\n" if tools.empty?
43
+
44
+ output = []
45
+ output << "=" * 80
46
+ output << "PNG Validation Tool Benchmark Comparison"
47
+ output << "=" * 80
48
+ output << ""
49
+ output << "Configuration:"
50
+ output << " Files tested: #{summary[:files_tested]} PNG files"
51
+ output << " Total runs: #{summary[:total_runs]}"
52
+ output << ""
53
+
54
+ # Tool availability
55
+ output << "Tools:"
56
+ tools.each do |tool|
57
+ stats = metrics_collector.calculate_statistics(tool)
58
+ output << " #{tool}: #{stats[:successful_runs]}/#{stats[:total_runs]} successful"
59
+ end
60
+ output << ""
61
+
62
+ # Performance comparison (if we have 2 tools)
63
+ if tools.size == 2
64
+ comparison = metrics_collector.compare_tools(tools[0], tools[1])
65
+ output << "-" * 80
66
+ output << "PERFORMANCE SUMMARY"
67
+ output << "-" * 80
68
+ output << ""
69
+ output << format_comparison_table(comparison)
70
+ output << ""
71
+ output << format_winner_summary(comparison)
72
+ output << ""
73
+ end
74
+
75
+ # Detailed statistics per tool
76
+ output << "-" * 80
77
+ output << "DETAILED STATISTICS"
78
+ output << "-" * 80
79
+ output << ""
80
+
81
+ tools.each do |tool|
82
+ stats = metrics_collector.calculate_statistics(tool)
83
+ output << format_tool_statistics(stats)
84
+ output << ""
85
+ end
86
+
87
+ output.join("\n")
88
+ end
89
+
90
+ # Generate JSON report.
91
+ def generate_json
92
+ summary = metrics_collector.summary
93
+ tools = summary[:tools]
94
+
95
+ data = {
96
+ benchmark_info: {
97
+ timestamp: Time.now.iso8601,
98
+ files_tested: summary[:files_tested],
99
+ total_runs: summary[:total_runs],
100
+ tools: tools,
101
+ },
102
+ tool_statistics: summary[:tool_statistics],
103
+ raw_data: config[:include_raw_data] ? metrics_collector.export_raw_data : nil,
104
+ }.compact
105
+
106
+ # Add comparison if we have 2 tools
107
+ if tools.size == 2
108
+ data[:comparison] = metrics_collector.compare_tools(tools[0], tools[1])
109
+ end
110
+
111
+ JSON.pretty_generate(data)
112
+ end
113
+
114
+ # Generate CSV report.
115
+ def generate_csv
116
+ runs = metrics_collector.export_raw_data
117
+
118
+ CSV.generate do |csv|
119
+ csv << ["Tool", "File", "Execution Time (ms)", "Peak Memory (MB)",
120
+ "Success", "Exit Code", "Timed Out", "Timestamp"]
121
+
122
+ runs.each do |run|
123
+ csv << [
124
+ run[:tool],
125
+ run[:file],
126
+ run[:execution_time],
127
+ run[:peak_memory],
128
+ run[:success],
129
+ run[:exit_code],
130
+ run[:timed_out],
131
+ run[:timestamp].iso8601,
132
+ ]
133
+ end
134
+ end
135
+ end
136
+
137
+ # Generate Markdown report.
138
+ def generate_markdown
139
+ summary = metrics_collector.summary
140
+ tools = summary[:tools]
141
+
142
+ return "# No benchmark data available\n" if tools.empty?
143
+
144
+ output = []
145
+ output << "# PNG Validation Tool Benchmark Comparison"
146
+ output << ""
147
+ output << "## Configuration"
148
+ output << ""
149
+ output << "- **Files tested**: #{summary[:files_tested]} PNG files"
150
+ output << "- **Total runs**: #{summary[:total_runs]}"
151
+ output << "- **Tools**: #{tools.join(', ')}"
152
+ output << ""
153
+
154
+ # Performance comparison
155
+ if tools.size == 2
156
+ comparison = metrics_collector.compare_tools(tools[0], tools[1])
157
+ output << "## Performance Summary"
158
+ output << ""
159
+ output << format_markdown_comparison(comparison)
160
+ output << ""
161
+ end
162
+
163
+ # Statistics per tool
164
+ output << "## Detailed Statistics"
165
+ output << ""
166
+
167
+ tools.each do |tool|
168
+ stats = metrics_collector.calculate_statistics(tool)
169
+ output << format_markdown_statistics(stats)
170
+ output << ""
171
+ end
172
+
173
+ output.join("\n")
174
+ end
175
+
176
+ # Format comparison table for text output.
177
+ def format_comparison_table(comparison)
178
+ lines = []
179
+ lines << sprintf("%-15s %12s %12s %12s %8s",
180
+ "Tool", "Avg Time", "Files/sec", "Peak Memory", "Winner")
181
+ lines << "-" * 80
182
+
183
+ [comparison[:tool1], comparison[:tool2]].each do |tool|
184
+ stats = comparison[:stats][tool]
185
+ is_winner = tool == comparison[:faster_tool]
186
+
187
+ # Handle nil throughput gracefully
188
+ files_per_sec = stats[:throughput]&.[](:files_per_second) || 0.0
189
+
190
+ lines << sprintf("%-15s %10.1fms %10.1f/s %10.1f MB %8s",
191
+ tool,
192
+ stats[:execution_time][:mean],
193
+ files_per_sec,
194
+ stats[:memory][:mean],
195
+ is_winner ? "✓" : "")
196
+ end
197
+
198
+ lines.join("\n")
199
+ end
200
+
201
+ # Format winner summary.
202
+ def format_winner_summary(comparison)
203
+ lines = []
204
+ lines << "Performance Difference:"
205
+ lines << " #{comparison[:faster_tool]} is #{comparison[:time_multiplier]}x faster " \
206
+ "(#{comparison[:time_difference_percent]}% faster)"
207
+ lines << " #{comparison[:memory_efficient_tool]} uses #{comparison[:memory_multiplier]}x less memory " \
208
+ "(#{comparison[:memory_difference_percent]}% less)"
209
+ lines.join("\n")
210
+ end
211
+
212
+ # Format tool statistics for text output.
213
+ def format_tool_statistics(stats)
214
+ lines = []
215
+ lines << "#{stats[:tool]}:"
216
+ lines << " Runs: #{stats[:successful_runs]}/#{stats[:total_runs]} successful"
217
+ lines << " Timeouts: #{stats[:timeouts]}" if stats[:timeouts].positive?
218
+ lines << ""
219
+ lines << " Execution Time:"
220
+ lines << " Mean: #{stats[:execution_time][:mean]}ms"
221
+ lines << " Median: #{stats[:execution_time][:median]}ms"
222
+ lines << " Std Dev: #{stats[:execution_time][:std_dev]}ms"
223
+ lines << " Min: #{stats[:execution_time][:min]}ms"
224
+ lines << " Max: #{stats[:execution_time][:max]}ms"
225
+ lines << ""
226
+ lines << " Memory Usage:"
227
+ lines << " Mean: #{stats[:memory][:mean]} MB"
228
+ lines << " Median: #{stats[:memory][:median]} MB"
229
+ lines << " Min: #{stats[:memory][:min]} MB"
230
+ lines << " Max: #{stats[:memory][:max]} MB"
231
+
232
+ # Handle nil throughput gracefully
233
+ if stats[:throughput]
234
+ lines << ""
235
+ lines << " Throughput:"
236
+ lines << " Files/sec: #{stats[:throughput][:files_per_second]}"
237
+ lines << " Time/file: #{stats[:throughput][:avg_time_per_file]}ms"
238
+ end
239
+
240
+ lines.join("\n")
241
+ end
242
+
243
+ # Format comparison for markdown.
244
+ def format_markdown_comparison(comparison)
245
+ lines = []
246
+ lines << "| Metric | #{comparison[:tool1]} | #{comparison[:tool2]} | Winner |"
247
+ lines << "|--------|----------|----------|--------|"
248
+
249
+ stats1 = comparison[:stats][comparison[:tool1]]
250
+ stats2 = comparison[:stats][comparison[:tool2]]
251
+
252
+ # Handle nil throughput gracefully
253
+ fps1 = stats1[:throughput]&.[](:files_per_second) || "N/A"
254
+ fps2 = stats2[:throughput]&.[](:files_per_second) || "N/A"
255
+
256
+ lines << "| Avg Time | #{stats1[:execution_time][:mean]}ms | " \
257
+ "#{stats2[:execution_time][:mean]}ms | " \
258
+ "#{comparison[:faster_tool]} |"
259
+ lines << "| Files/sec | #{fps1} | #{fps2} | " \
260
+ "#{comparison[:faster_tool]} |"
261
+ lines << "| Peak Memory | #{stats1[:memory][:mean]} MB | " \
262
+ "#{stats2[:memory][:mean]} MB | " \
263
+ "#{comparison[:memory_efficient_tool]} |"
264
+ lines << ""
265
+ lines << "**Performance:** #{comparison[:faster_tool]} is " \
266
+ "#{comparison[:time_multiplier]}x faster " \
267
+ "(#{comparison[:time_difference_percent]}% improvement)"
268
+ lines << ""
269
+ lines << "**Memory:** #{comparison[:memory_efficient_tool]} uses " \
270
+ "#{comparison[:memory_multiplier]}x less memory " \
271
+ "(#{comparison[:memory_difference_percent]}% improvement)"
272
+
273
+ lines.join("\n")
274
+ end
275
+
276
+ # Format statistics for markdown.
277
+ def format_markdown_statistics(stats)
278
+ lines = []
279
+ lines << "### #{stats[:tool]}"
280
+ lines << ""
281
+ lines << "- **Successful runs**: #{stats[:successful_runs]}/#{stats[:total_runs]}"
282
+ lines << "- **Timeouts**: #{stats[:timeouts]}" if stats[:timeouts].positive?
283
+ lines << ""
284
+ lines << "**Execution Time:**"
285
+ lines << "- Mean: #{stats[:execution_time][:mean]}ms"
286
+ lines << "- Median: #{stats[:execution_time][:median]}ms"
287
+ lines << "- Range: #{stats[:execution_time][:min]}ms - #{stats[:execution_time][:max]}ms"
288
+ lines << ""
289
+ lines << "**Memory Usage:**"
290
+ lines << "- Mean: #{stats[:memory][:mean]} MB"
291
+ lines << "- Range: #{stats[:memory][:min]} MB - #{stats[:memory][:max]} MB"
292
+
293
+ # Handle nil throughput gracefully
294
+ if stats[:throughput]
295
+ lines << ""
296
+ lines << "**Throughput:** #{stats[:throughput][:files_per_second]} files/sec"
297
+ end
298
+
299
+ lines.join("\n")
300
+ end
301
+ end
@@ -0,0 +1,104 @@
1
+ # frozen_string_literal: true
2
+
3
+ # Base class for running validation tools and measuring their performance.
4
+ #
5
+ # Provides a common interface for executing different PNG validation tools
6
+ # (png_conform, pngcheck) and collecting performance metrics.
7
+ class ToolRunner
8
+ attr_reader :name, :command, :options
9
+
10
+ # @param name [String] The name of the tool
11
+ # @param command [String] The command to execute
12
+ # @param options [Hash] Tool-specific options
13
+ def initialize(name, command, options = {})
14
+ @name = name
15
+ @command = command
16
+ @options = options
17
+ end
18
+
19
+ # Check if the tool is available on the system.
20
+ #
21
+ # @return [Boolean] True if tool is available
22
+ def available?
23
+ system("which #{command} > /dev/null 2>&1")
24
+ end
25
+
26
+ # Run the tool on a single file and measure performance.
27
+ #
28
+ # @param file_path [String] Path to the PNG file
29
+ # @return [Hash] Performance metrics and results
30
+ def run(file_path)
31
+ raise NotImplementedError, "Subclasses must implement #run"
32
+ end
33
+
34
+ # Run the tool on multiple files.
35
+ #
36
+ # @param file_paths [Array<String>] Paths to PNG files
37
+ # @return [Array<Hash>] Array of performance metrics
38
+ def run_batch(file_paths)
39
+ file_paths.map { |path| run(path) }
40
+ end
41
+
42
+ protected
43
+
44
+ # Measure execution time and memory usage.
45
+ #
46
+ # @yield Block to measure
47
+ # @return [Hash] Performance metrics
48
+ def measure_performance
49
+ start_time = Process.clock_gettime(Process::CLOCK_MONOTONIC)
50
+ start_memory = get_memory_usage
51
+
52
+ result = yield
53
+
54
+ end_time = Process.clock_gettime(Process::CLOCK_MONOTONIC)
55
+ end_memory = get_memory_usage
56
+
57
+ {
58
+ execution_time: ((end_time - start_time) * 1000).round(3), # milliseconds
59
+ memory_used: end_memory - start_memory,
60
+ peak_memory: end_memory,
61
+ result: result,
62
+ }
63
+ end
64
+
65
+ # Get current memory usage in MB.
66
+ #
67
+ # @return [Float] Memory usage in megabytes
68
+ def get_memory_usage
69
+ # Get RSS (Resident Set Size) in KB, convert to MB
70
+ rss_kb = `ps -o rss= -p #{Process.pid}`.strip.to_i
71
+ (rss_kb / 1024.0).round(2)
72
+ end
73
+
74
+ # Execute a command and capture output.
75
+ #
76
+ # @param cmd [String] Command to execute
77
+ # @param timeout [Integer] Timeout in seconds
78
+ # @return [Hash] Command result with stdout, stderr, and exit status
79
+ def execute_command(cmd, timeout: 30)
80
+ require "open3"
81
+ require "timeout"
82
+
83
+ stdout, stderr, status = nil
84
+ begin
85
+ Timeout.timeout(timeout) do
86
+ stdout, stderr, status = Open3.capture3(cmd)
87
+ end
88
+ rescue Timeout::Error
89
+ return {
90
+ stdout: "",
91
+ stderr: "Command timed out after #{timeout} seconds",
92
+ exit_code: -1,
93
+ timed_out: true,
94
+ }
95
+ end
96
+
97
+ {
98
+ stdout: stdout,
99
+ stderr: stderr,
100
+ exit_code: status.exitstatus,
101
+ timed_out: false,
102
+ }
103
+ end
104
+ end
@@ -0,0 +1,12 @@
1
+ #!/usr/bin/env ruby
2
+ # frozen_string_literal: true
3
+
4
+ require "ruby-prof"
5
+
6
+ # Profile just the loading of png_conform
7
+ result = RubyProf::Profile.profile do
8
+ require_relative "../lib/png_conform"
9
+ end
10
+
11
+ printer = RubyProf::FlatPrinter.new(result)
12
+ printer.print($stdout, min_percent: 2)
@@ -0,0 +1,18 @@
1
+ #!/usr/bin/env ruby
2
+ # frozen_string_literal: true
3
+
4
+ require "ruby-prof"
5
+ require_relative "../lib/png_conform"
6
+
7
+ # Pre-load everything
8
+ PngConform::Services::ValidationService
9
+
10
+ # Now profile just the validation work
11
+ file = ARGV[0] || "spec/fixtures/pngsuite/compression/z00n2c08.png"
12
+
13
+ result = RubyProf.profile do
14
+ PngConform::Services::ValidationService.validate_file(file)
15
+ end
16
+
17
+ printer = RubyProf::FlatPrinter.new(result)
18
+ printer.print($stdout, min_percent: 1)
File without changes
@@ -0,0 +1,159 @@
1
+ #!/usr/bin/env ruby
2
+ # frozen_string_literal: true
3
+
4
+ require "optparse"
5
+ require "yaml"
6
+ require_relative "lib/benchmark_runner"
7
+
8
+ # Parse command-line options
9
+ options = {
10
+ config_file: nil,
11
+ pattern: nil,
12
+ format: nil,
13
+ output: nil,
14
+ iterations: nil,
15
+ warmup: nil,
16
+ limit: nil,
17
+ tools: [],
18
+ verbose: true,
19
+ }
20
+
21
+ OptionParser.new do |opts|
22
+ opts.banner = "Usage: run_benchmark.rb [options]"
23
+
24
+ opts.separator ""
25
+ opts.separator "Configuration:"
26
+
27
+ opts.on("-c", "--config FILE", "Load configuration from YAML file") do |file|
28
+ options[:config_file] = file
29
+ end
30
+
31
+ opts.separator ""
32
+ opts.separator "File Selection:"
33
+
34
+ opts.on("-p", "--pattern PATTERN",
35
+ "File pattern (e.g., 'spec/fixtures/**/*.png')") do |pattern|
36
+ options[:pattern] = pattern
37
+ end
38
+
39
+ opts.on("-l", "--limit N", Integer, "Limit number of files to test") do |n|
40
+ options[:limit] = n
41
+ end
42
+
43
+ opts.separator ""
44
+ opts.separator "Execution:"
45
+
46
+ opts.on("-i", "--iterations N", Integer,
47
+ "Number of iterations per file (default: 3)") do |n|
48
+ options[:iterations] = n
49
+ end
50
+
51
+ opts.on("-w", "--warmup N", Integer,
52
+ "Number of warmup runs (default: 1)") do |n|
53
+ options[:warmup] = n
54
+ end
55
+
56
+ opts.on("-t", "--tool TOOL",
57
+ "Enable specific tool (png_conform, pngcheck)") do |tool|
58
+ options[:tools] << tool.to_sym
59
+ end
60
+
61
+ opts.separator ""
62
+ opts.separator "Output:"
63
+
64
+ opts.on("-f", "--format FORMAT",
65
+ "Output format: text, json, csv, markdown (default: text)") do |format|
66
+ options[:format] = format
67
+ end
68
+
69
+ opts.on("-o", "--output FILE",
70
+ "Write report to file instead of stdout") do |file|
71
+ options[:output] = file
72
+ end
73
+
74
+ opts.on("-q", "--quiet", "Suppress progress output") do
75
+ options[:verbose] = false
76
+ end
77
+
78
+ opts.separator ""
79
+ opts.separator "Other:"
80
+
81
+ opts.on("-h", "--help", "Show this help message") do
82
+ puts opts
83
+ exit
84
+ end
85
+
86
+ opts.on("-v", "--version", "Show version") do
87
+ puts "PNG Benchmark Runner v1.0.0"
88
+ exit
89
+ end
90
+ end.parse!
91
+
92
+ # Load configuration
93
+ config = {}
94
+
95
+ if options[:config_file]
96
+ unless File.exist?(options[:config_file])
97
+ puts "Error: Configuration file not found: #{options[:config_file]}"
98
+ exit 1
99
+ end
100
+
101
+ config = YAML.load_file(options[:config_file], symbolize_names: true)
102
+ end
103
+
104
+ # Apply command-line overrides
105
+ if options[:pattern]
106
+ config[:test_files] ||= {}
107
+ config[:test_files][:pattern] = options[:pattern]
108
+ end
109
+
110
+ if options[:limit]
111
+ config[:test_files] ||= {}
112
+ config[:test_files][:limit] = options[:limit]
113
+ end
114
+
115
+ config[:iterations] = options[:iterations] if options[:iterations]
116
+ config[:warmup_runs] = options[:warmup] if options[:warmup]
117
+
118
+ if options[:format]
119
+ config[:output] ||= {}
120
+ config[:output][:format] = options[:format]
121
+ end
122
+
123
+ if options[:output]
124
+ config[:output] ||= {}
125
+ config[:output][:file] = options[:output]
126
+ end
127
+
128
+ config[:output] ||= {}
129
+ config[:output][:verbose] = options[:verbose]
130
+
131
+ # Configure tools if specified
132
+ if options[:tools].any?
133
+ config[:tools] ||= {}
134
+
135
+ # Disable all tools by default if specific tools are requested
136
+ config[:tools][:png_conform] = { enabled: false, options: {} }
137
+ config[:tools][:pngcheck] = { enabled: false, options: {} }
138
+
139
+ # Enable requested tools
140
+ options[:tools].each do |tool|
141
+ if config[:tools].key?(tool)
142
+ config[:tools][tool][:enabled] = true
143
+ else
144
+ puts "Warning: Unknown tool '#{tool}', ignoring"
145
+ end
146
+ end
147
+ end
148
+
149
+ # Run the benchmark
150
+ begin
151
+ runner = BenchmarkRunner.new(config)
152
+ runner.run
153
+
154
+ exit 0
155
+ rescue StandardError => e
156
+ puts "\nError: #{e.message}"
157
+ puts e.backtrace.join("\n") if ENV["DEBUG"]
158
+ exit 1
159
+ end
@@ -240,6 +240,48 @@ Complete reference of all PNG, MNG, JNG, and APNG chunk types validated by PngCo
240
240
  **Size**: 1 byte
241
241
  **Values**: 0=cross-fuse, 1=diverging-fuse
242
242
 
243
+ == Apple Extensions
244
+
245
+ === iDOT (Apple Display Optimization)
246
+
247
+ **Purpose**: Apple-specific display optimization data for Retina displays
248
+ **Size**: 28 bytes
249
+ **Optional**: Only present in screenshots and images saved from macOS/iOS devices
250
+
251
+ **Contents**:
252
+
253
+ * Display scale factor (4 bytes, little-endian)
254
+ * Pixel format information (4 bytes, little-endian)
255
+ * Color space information (4 bytes, little-endian)
256
+ * Backing scale factor (4 bytes, little-endian)
257
+ * Flags (4 bytes, little-endian)
258
+ * Reserved field 1 (4 bytes, little-endian)
259
+ * Reserved field 2 (4 bytes, little-endian)
260
+
261
+ **Validation**:
262
+
263
+ * Chunk must be exactly 28 bytes
264
+ * Only one iDOT chunk allowed per file
265
+ * Must appear before IDAT chunk
266
+ * CRC must be valid
267
+
268
+ **Usage**:
269
+
270
+ The iDOT chunk is automatically added by macOS and iOS when saving screenshots or images through system APIs. It contains display optimization data for:
271
+
272
+ * Retina display rendering
273
+ * Multi-core decoding performance
274
+ * Display color space information
275
+ * Backing store scale factors
276
+
277
+ This chunk is safe to ignore for standard PNG decoders, as it follows the ancillary chunk naming convention (lowercase first letter).
278
+
279
+ **References**:
280
+
281
+ * Apple proprietary format
282
+ * Found in PNG files generated by macOS 10.7+ and iOS 5+
283
+ * Commonly seen in screenshot files
284
+
243
285
  == APNG (Animated PNG) Chunks
244
286
 
245
287
  === acTL (Animation Control)