smart_message 0.0.8 → 0.0.9

This diff represents the content of publicly available package versions that have been released to one of the supported registries. The information contained in this diff is provided for informational purposes only and reflects changes between package versions as they appear in their respective public registries.
Files changed (80) hide show
  1. checksums.yaml +4 -4
  2. data/.gitignore +1 -0
  3. data/.irbrc +24 -0
  4. data/CHANGELOG.md +96 -0
  5. data/Gemfile.lock +6 -1
  6. data/README.md +289 -15
  7. data/docs/README.md +3 -1
  8. data/docs/addressing.md +119 -13
  9. data/docs/architecture.md +68 -0
  10. data/docs/dead_letter_queue.md +673 -0
  11. data/docs/dispatcher.md +87 -0
  12. data/docs/examples.md +59 -1
  13. data/docs/getting-started.md +8 -1
  14. data/docs/logging.md +382 -326
  15. data/docs/message_filtering.md +451 -0
  16. data/examples/01_point_to_point_orders.rb +54 -53
  17. data/examples/02_publish_subscribe_events.rb +14 -10
  18. data/examples/03_many_to_many_chat.rb +16 -8
  19. data/examples/04_redis_smart_home_iot.rb +20 -10
  20. data/examples/05_proc_handlers.rb +12 -11
  21. data/examples/06_custom_logger_example.rb +95 -100
  22. data/examples/07_error_handling_scenarios.rb +4 -2
  23. data/examples/08_entity_addressing_basic.rb +18 -6
  24. data/examples/08_entity_addressing_with_filtering.rb +27 -9
  25. data/examples/09_dead_letter_queue_demo.rb +559 -0
  26. data/examples/09_regex_filtering_microservices.rb +407 -0
  27. data/examples/10_header_block_configuration.rb +263 -0
  28. data/examples/11_global_configuration_example.rb +219 -0
  29. data/examples/README.md +102 -0
  30. data/examples/dead_letters.jsonl +12 -0
  31. data/examples/performance_metrics/benchmark_results_ractor_20250818_205603.json +135 -0
  32. data/examples/performance_metrics/benchmark_results_ractor_20250818_205831.json +135 -0
  33. data/examples/performance_metrics/benchmark_results_test_20250818_204942.json +130 -0
  34. data/examples/performance_metrics/benchmark_results_threadpool_20250818_204942.json +130 -0
  35. data/examples/performance_metrics/benchmark_results_threadpool_20250818_204959.json +130 -0
  36. data/examples/performance_metrics/benchmark_results_threadpool_20250818_205044.json +130 -0
  37. data/examples/performance_metrics/benchmark_results_threadpool_20250818_205109.json +130 -0
  38. data/examples/performance_metrics/benchmark_results_threadpool_20250818_205252.json +130 -0
  39. data/examples/performance_metrics/benchmark_results_unknown_20250819_172852.json +130 -0
  40. data/examples/performance_metrics/compare_benchmarks.rb +519 -0
  41. data/examples/performance_metrics/dead_letters.jsonl +3100 -0
  42. data/examples/performance_metrics/performance_benchmark.rb +344 -0
  43. data/examples/show_logger.rb +367 -0
  44. data/examples/show_me.rb +145 -0
  45. data/examples/temp.txt +94 -0
  46. data/examples/tmux_chat/bot_agent.rb +4 -2
  47. data/examples/tmux_chat/human_agent.rb +4 -2
  48. data/examples/tmux_chat/room_monitor.rb +4 -2
  49. data/examples/tmux_chat/shared_chat_system.rb +6 -3
  50. data/lib/smart_message/addressing.rb +259 -0
  51. data/lib/smart_message/base.rb +121 -599
  52. data/lib/smart_message/circuit_breaker.rb +2 -1
  53. data/lib/smart_message/configuration.rb +199 -0
  54. data/lib/smart_message/dead_letter_queue.rb +27 -10
  55. data/lib/smart_message/dispatcher.rb +90 -49
  56. data/lib/smart_message/header.rb +5 -0
  57. data/lib/smart_message/logger/base.rb +21 -1
  58. data/lib/smart_message/logger/default.rb +88 -138
  59. data/lib/smart_message/logger/lumberjack.rb +324 -0
  60. data/lib/smart_message/logger/null.rb +81 -0
  61. data/lib/smart_message/logger.rb +17 -9
  62. data/lib/smart_message/messaging.rb +100 -0
  63. data/lib/smart_message/plugins.rb +132 -0
  64. data/lib/smart_message/serializer/base.rb +25 -8
  65. data/lib/smart_message/serializer/json.rb +5 -4
  66. data/lib/smart_message/subscription.rb +193 -0
  67. data/lib/smart_message/transport/base.rb +72 -41
  68. data/lib/smart_message/transport/memory_transport.rb +7 -5
  69. data/lib/smart_message/transport/redis_transport.rb +15 -45
  70. data/lib/smart_message/transport/stdout_transport.rb +18 -8
  71. data/lib/smart_message/transport.rb +1 -34
  72. data/lib/smart_message/utilities.rb +142 -0
  73. data/lib/smart_message/version.rb +1 -1
  74. data/lib/smart_message/versioning.rb +85 -0
  75. data/lib/smart_message/wrapper.rb.bak +132 -0
  76. data/lib/smart_message.rb +74 -28
  77. data/smart_message.gemspec +3 -0
  78. metadata +76 -3
  79. data/lib/smart_message/serializer.rb +0 -10
  80. data/lib/smart_message/wrapper.rb +0 -43
@@ -0,0 +1,519 @@
1
+ #!/usr/bin/env ruby
2
+ # compare_benchmarks.rb
3
+ #
4
+ # Compares benchmark results between multiple subjects/implementations
5
+ # Usage: ruby compare_benchmarks.rb subject1 subject2 [subject3 ...]
6
+ # Will find and compare JSON files for each subject
7
+
8
+ require 'json'
9
+ require 'optparse'
10
+ require 'fileutils'
11
+
12
+ # Try to load visualization gems, but don't fail if they're not available
13
+ begin
14
+ require 'unicode_plot'
15
+ GRAPHICS_AVAILABLE = true
16
+ rescue LoadError
17
+ GRAPHICS_AVAILABLE = false
18
+ puts "šŸ“Š Note: Install 'unicode_plot' gem for graphical visualizations"
19
+ puts " Run: gem install unicode_plot"
20
+ puts ""
21
+ end
22
+
23
+ class BenchmarkComparator
24
+ def initialize(subjects)
25
+ @subjects = subjects
26
+ @subject_data = {}
27
+ @subject_files = {}
28
+
29
+ load_subject_data
30
+ end
31
+
32
+ def compare
33
+ puts "šŸ”„ SMARTMESSAGE PERFORMANCE COMPARISON"
34
+ puts "=" * 80
35
+
36
+ @subjects.each_with_index do |subject, index|
37
+ puts "Subject #{index + 1}: #{subject.upcase} (#{@subject_files[subject]})"
38
+ end
39
+ puts "=" * 80
40
+
41
+ compare_overall_performance
42
+ compare_scenario_performance
43
+ compare_resource_usage
44
+ generate_summary
45
+
46
+ # Add graphical visualizations if available
47
+ if GRAPHICS_AVAILABLE
48
+ generate_visualizations
49
+ end
50
+ end
51
+
52
+ private
53
+
54
+ def load_subject_data
55
+ @subjects.each do |subject|
56
+ # Find the most recent JSON file for this subject
57
+ pattern = "benchmark_results_#{subject}_*.json"
58
+ matching_files = Dir.glob(pattern).sort_by { |f| File.mtime(f) }
59
+
60
+ if matching_files.empty?
61
+ puts "āŒ No benchmark files found for subject: #{subject}"
62
+ puts " Looking for pattern: #{pattern}"
63
+ exit 1
64
+ end
65
+
66
+ latest_file = matching_files.last
67
+ @subject_files[subject] = latest_file
68
+
69
+ begin
70
+ @subject_data[subject] = JSON.parse(File.read(latest_file))
71
+ rescue JSON::ParserError => e
72
+ puts "āŒ Error parsing JSON file #{latest_file}: #{e.message}"
73
+ exit 1
74
+ rescue => e
75
+ puts "āŒ Error reading file #{latest_file}: #{e.message}"
76
+ exit 1
77
+ end
78
+ end
79
+ end
80
+
81
+ def compare_overall_performance
82
+ puts "\nšŸ“Š OVERALL PERFORMANCE COMPARISON"
83
+ puts "-" * 60
84
+
85
+ # Calculate overall metrics for each subject
86
+ overall_metrics = {}
87
+ @subjects.each do |subject|
88
+ data = @subject_data[subject]
89
+ runtime = data['overall_stats']['total_runtime']
90
+ total_processed = total_messages_processed(data)
91
+ throughput = total_processed / runtime
92
+
93
+ overall_metrics[subject] = {
94
+ runtime: runtime,
95
+ total_processed: total_processed,
96
+ throughput: throughput,
97
+ memory: data['overall_stats']['memory_used_mb']
98
+ }
99
+ end
100
+
101
+ # Runtime comparison
102
+ puts "\nRuntime (seconds):"
103
+ sorted_by_runtime = overall_metrics.sort_by { |_, metrics| metrics[:runtime] }
104
+ sorted_by_runtime.each_with_index do |(subject, metrics), index|
105
+ ranking = index == 0 ? "šŸ†" : " "
106
+ puts " #{ranking} #{subject.ljust(15)}: #{metrics[:runtime].round(3)}s"
107
+ end
108
+
109
+ # Throughput comparison
110
+ puts "\nThroughput (messages/second):"
111
+ sorted_by_throughput = overall_metrics.sort_by { |_, metrics| -metrics[:throughput] }
112
+ sorted_by_throughput.each_with_index do |(subject, metrics), index|
113
+ ranking = index == 0 ? "šŸ†" : " "
114
+ puts " #{ranking} #{subject.ljust(15)}: #{metrics[:throughput].round(1)} msg/sec"
115
+ end
116
+
117
+ # Memory usage comparison
118
+ puts "\nMemory Usage (MB):"
119
+ sorted_by_memory = overall_metrics.sort_by { |_, metrics| metrics[:memory] }
120
+ sorted_by_memory.each_with_index do |(subject, metrics), index|
121
+ ranking = index == 0 ? "šŸ†" : " "
122
+ puts " #{ranking} #{subject.ljust(15)}: #{metrics[:memory].round(2)} MB"
123
+ end
124
+ end
125
+
126
+ def compare_scenario_performance
127
+ puts "\nšŸ“ˆ SCENARIO-BY-SCENARIO COMPARISON"
128
+ puts "-" * 60
129
+
130
+ # Get all scenarios from the first subject
131
+ scenarios = @subject_data[@subjects.first]['scenario_results'].keys
132
+
133
+ scenarios.each do |scenario|
134
+ first_subject_data = @subject_data[@subjects.first]['scenario_results'][scenario]
135
+ puts "\n#{first_subject_data['name']}:"
136
+
137
+ # Collect metrics for all subjects
138
+ scenario_metrics = {}
139
+ @subjects.each do |subject|
140
+ data = @subject_data[subject]['scenario_results'][scenario]
141
+ scenario_metrics[subject] = {
142
+ throughput: data['throughput']['messages_per_second'],
143
+ time: data['times']['total'],
144
+ errors: data['errors'] || 0,
145
+ processed: data['messages_processed']
146
+ }
147
+ end
148
+
149
+ # Throughput comparison
150
+ puts " Throughput (msg/sec):"
151
+ sorted_by_throughput = scenario_metrics.sort_by { |_, metrics| -metrics[:throughput] }
152
+ sorted_by_throughput.each_with_index do |(subject, metrics), index|
153
+ ranking = index == 0 ? "šŸ†" : " "
154
+ puts " #{ranking} #{subject.ljust(12)}: #{metrics[:throughput].round(1)}"
155
+ end
156
+
157
+ # Processing time comparison
158
+ puts " Processing Time (seconds):"
159
+ sorted_by_time = scenario_metrics.sort_by { |_, metrics| metrics[:time] }
160
+ sorted_by_time.each_with_index do |(subject, metrics), index|
161
+ ranking = index == 0 ? "šŸ†" : " "
162
+ puts " #{ranking} #{subject.ljust(12)}: #{metrics[:time].round(3)}"
163
+ end
164
+
165
+ # Error comparison (if any errors exist)
166
+ total_errors = scenario_metrics.values.sum { |m| m[:errors] }
167
+ if total_errors > 0
168
+ puts " Errors:"
169
+ scenario_metrics.each do |subject, metrics|
170
+ puts " #{subject.ljust(15)}: #{metrics[:errors]}"
171
+ end
172
+ end
173
+ end
174
+ end
175
+
176
+ def compare_resource_usage
177
+ puts "\nšŸ’¾ RESOURCE USAGE COMPARISON"
178
+ puts "-" * 60
179
+
180
+ puts "Memory Usage:"
181
+ @subjects.each do |subject|
182
+ data = @subject_data[subject]
183
+ memory = data['overall_stats']['memory_used_mb']
184
+ puts " #{subject.ljust(15)}: #{memory.round(2)} MB"
185
+ end
186
+
187
+ puts "\nPlatform Information:"
188
+ @subjects.each_with_index do |subject, index|
189
+ platform_info = @subject_data[subject]['benchmark_info']
190
+ puts " #{subject.upcase}:"
191
+ puts " Ruby Version: #{platform_info['ruby_version']}"
192
+ puts " Platform: #{platform_info['platform']}"
193
+ puts " Processors: #{platform_info['processors'] || platform_info['processor_count']}"
194
+ puts " Timestamp: #{platform_info['timestamp']}"
195
+ puts "" unless index == @subjects.length - 1
196
+ end
197
+ end
198
+
199
+ def generate_summary
200
+ puts "\nšŸ† PERFORMANCE SUMMARY"
201
+ puts "=" * 80
202
+
203
+ # Calculate overall rankings
204
+ overall_metrics = {}
205
+ @subjects.each do |subject|
206
+ data = @subject_data[subject]
207
+ runtime = data['overall_stats']['total_runtime']
208
+ total_processed = total_messages_processed(data)
209
+ throughput = total_processed / runtime
210
+ memory = data['overall_stats']['memory_used_mb']
211
+
212
+ overall_metrics[subject] = {
213
+ throughput: throughput,
214
+ runtime: runtime,
215
+ memory: memory,
216
+ total_processed: total_processed
217
+ }
218
+ end
219
+
220
+ # Overall winners by category
221
+ puts "\nšŸ„‡ CATEGORY WINNERS:"
222
+
223
+ best_throughput = overall_metrics.max_by { |_, metrics| metrics[:throughput] }
224
+ fastest_runtime = overall_metrics.min_by { |_, metrics| metrics[:runtime] }
225
+ lowest_memory = overall_metrics.min_by { |_, metrics| metrics[:memory] }
226
+
227
+ puts " šŸš€ Highest Throughput: #{best_throughput[0].upcase} (#{best_throughput[1][:throughput].round(1)} msg/sec)"
228
+ puts " ⚔ Fastest Runtime: #{fastest_runtime[0].upcase} (#{fastest_runtime[1][:runtime].round(3)}s)"
229
+ puts " šŸ’¾ Lowest Memory: #{lowest_memory[0].upcase} (#{lowest_memory[1][:memory].round(2)} MB)"
230
+
231
+ # Scenario-by-scenario winners
232
+ puts "\nšŸ“Š SCENARIO WINNERS:"
233
+ scenarios = @subject_data[@subjects.first]['scenario_results'].keys
234
+ scenarios.each do |scenario|
235
+ scenario_metrics = {}
236
+ @subjects.each do |subject|
237
+ data = @subject_data[subject]['scenario_results'][scenario]
238
+ scenario_metrics[subject] = data['throughput']['messages_per_second']
239
+ end
240
+
241
+ winner = scenario_metrics.max_by { |_, throughput| throughput }
242
+ scenario_name = @subject_data[@subjects.first]['scenario_results'][scenario]['name']
243
+ puts " #{scenario_name.ljust(20)}: #{winner[0].upcase} (#{winner[1].round(1)} msg/sec)"
244
+ end
245
+
246
+ # Overall recommendation
247
+ puts "\nšŸ’” RECOMMENDATIONS:"
248
+
249
+ # Find the best overall performer (combining throughput and memory efficiency)
250
+ performance_scores = overall_metrics.map do |subject, metrics|
251
+ # Normalize scores (higher throughput is better, lower memory is better)
252
+ max_throughput = overall_metrics.values.map { |m| m[:throughput] }.max
253
+ min_memory = overall_metrics.values.map { |m| m[:memory] }.min
254
+
255
+ throughput_score = metrics[:throughput] / max_throughput
256
+ memory_score = min_memory / metrics[:memory]
257
+ combined_score = (throughput_score + memory_score) / 2
258
+
259
+ [subject, combined_score]
260
+ end
261
+
262
+ best_overall = performance_scores.max_by { |_, score| score }
263
+
264
+ puts " āœ… Best Overall Performance: #{best_overall[0].upcase}"
265
+ puts " - Balanced combination of throughput and memory efficiency"
266
+
267
+ # Generate optimization suggestions
268
+ puts "\nšŸ”§ OPTIMIZATION SUGGESTIONS:"
269
+ puts " • Test with larger message volumes to validate scalability"
270
+ puts " • Profile CPU utilization patterns during peak loads"
271
+ puts " • Consider workload-specific optimizations based on scenario winners"
272
+
273
+ # Check for significant memory differences
274
+ memory_values = overall_metrics.values.map { |m| m[:memory] }
275
+ if memory_values.max > memory_values.min * 1.5
276
+ puts " • Investigate memory usage patterns - significant differences detected"
277
+ end
278
+
279
+ # Check for error patterns
280
+ total_errors_by_subject = {}
281
+ @subjects.each do |subject|
282
+ total_errors = @subject_data[subject]['scenario_results'].values.sum { |s| s['errors'] || 0 }
283
+ total_errors_by_subject[subject] = total_errors
284
+ end
285
+
286
+ if total_errors_by_subject.values.any? { |errors| errors > 0 }
287
+ puts " • Address error handling - some subjects had processing errors"
288
+ end
289
+ end
290
+
291
+ def total_messages_processed(data)
292
+ data['scenario_results'].values.sum { |scenario| scenario['messages_processed'] }
293
+ end
294
+
295
+ def generate_visualizations
296
+ puts "\n" + "=" * 80
297
+ puts "šŸ“Š GRAPHICAL VISUALIZATIONS"
298
+ puts "=" * 80
299
+
300
+ # Show color legend for subjects
301
+ puts "\nšŸŽØ Color Legend:"
302
+ @subjects.each_with_index do |subject, idx|
303
+ color_name = get_subject_color_name(idx)
304
+ puts " #{subject.upcase.ljust(15)}: #{color_name}"
305
+ end
306
+
307
+ visualize_overall_throughput
308
+ visualize_scenario_throughput
309
+ visualize_memory_usage
310
+ visualize_processing_times
311
+ end
312
+
313
+ private
314
+
315
+ def get_subject_color(index)
316
+ colors = [:blue, :red, :green, :yellow, :cyan, :magenta]
317
+ colors[index % colors.length]
318
+ end
319
+
320
+ def get_subject_color_name(index)
321
+ color_names = ["Blue", "Red", "Green", "Yellow", "Cyan", "Magenta"]
322
+ color_names[index % color_names.length]
323
+ end
324
+
325
+ def visualize_overall_throughput
326
+ puts "\nšŸ“ˆ Overall Throughput Comparison"
327
+ puts "-" * 60
328
+
329
+ # Collect overall throughput data with colors
330
+ labels = []
331
+ values = []
332
+ colors = []
333
+
334
+ @subjects.each_with_index do |subject, idx|
335
+ data = @subject_data[subject]
336
+ runtime = data['overall_stats']['total_runtime']
337
+ total_processed = total_messages_processed(data)
338
+ throughput = total_processed / runtime
339
+
340
+ labels << subject.upcase
341
+ values << throughput
342
+ colors << get_subject_color(idx)
343
+ end
344
+
345
+ # Create individual barplots for each subject with their own color
346
+ # Then combine them into a single visualization
347
+ max_value = values.max
348
+
349
+ puts "\n Overall Throughput (messages/second)"
350
+ puts " " + "─" * 60
351
+
352
+ labels.each_with_index do |label, idx|
353
+ bar_length = ((values[idx] / max_value) * 50).to_i
354
+ bar = "ā–ˆ" * bar_length
355
+ color = colors[idx]
356
+
357
+ # Apply color to the bar using ANSI codes
358
+ colored_bar = colorize_bar(bar, color)
359
+
360
+ puts sprintf(" %-12s │%s %.1f", label, colored_bar, values[idx])
361
+ end
362
+ puts " " + "─" * 60
363
+ puts " 0" + " " * 45 + sprintf("%.0f", max_value)
364
+ end
365
+
366
+ def colorize_bar(text, color)
367
+ color_codes = {
368
+ blue: "\e[34m",
369
+ red: "\e[31m",
370
+ green: "\e[32m",
371
+ yellow: "\e[33m",
372
+ cyan: "\e[36m",
373
+ magenta: "\e[35m"
374
+ }
375
+ reset = "\e[0m"
376
+
377
+ "#{color_codes[color]}#{text}#{reset}"
378
+ end
379
+
380
+ def visualize_scenario_throughput
381
+ puts "\nšŸ“Š Scenario Throughput Comparison"
382
+ puts "-" * 60
383
+
384
+ scenarios = @subject_data[@subjects.first]['scenario_results'].keys
385
+
386
+ scenarios.each do |scenario|
387
+ scenario_name = @subject_data[@subjects.first]['scenario_results'][scenario]['name']
388
+
389
+ # Collect data for this scenario
390
+ labels = []
391
+ values = []
392
+ colors = []
393
+
394
+ @subjects.each_with_index do |subject, idx|
395
+ data = @subject_data[subject]['scenario_results'][scenario]
396
+ labels << subject.upcase
397
+ values << data['throughput']['messages_per_second']
398
+ colors << get_subject_color(idx)
399
+ end
400
+
401
+ # Create colored bar chart
402
+ max_value = values.max
403
+
404
+ puts "\n #{scenario_name} - Throughput (msg/sec)"
405
+ puts " " + "─" * 60
406
+
407
+ labels.each_with_index do |label, idx|
408
+ bar_length = ((values[idx] / max_value) * 45).to_i
409
+ bar = "ā–ˆ" * bar_length
410
+ colored_bar = colorize_bar(bar, colors[idx])
411
+
412
+ puts sprintf(" %-12s │%s %.1f", label, colored_bar, values[idx])
413
+ end
414
+ puts " " + "─" * 60
415
+ end
416
+ end
417
+
418
+ def visualize_memory_usage
419
+ puts "\nšŸ’¾ Memory Usage Comparison"
420
+ puts "-" * 60
421
+
422
+ # Collect memory data with colors
423
+ labels = []
424
+ values = []
425
+ colors = []
426
+
427
+ @subjects.each_with_index do |subject, idx|
428
+ data = @subject_data[subject]
429
+ labels << subject.upcase
430
+ values << data['overall_stats']['memory_used_mb']
431
+ colors << get_subject_color(idx)
432
+ end
433
+
434
+ # Create colored bar chart
435
+ max_value = values.max
436
+
437
+ puts "\n Memory Usage (MB)"
438
+ puts " " + "─" * 60
439
+
440
+ labels.each_with_index do |label, idx|
441
+ bar_length = ((values[idx] / max_value) * 50).to_i
442
+ bar = "ā–ˆ" * bar_length
443
+ colored_bar = colorize_bar(bar, colors[idx])
444
+
445
+ puts sprintf(" %-12s │%s %.2f MB", label, colored_bar, values[idx])
446
+ end
447
+ puts " " + "─" * 60
448
+ puts " 0" + " " * 45 + sprintf("%.0f MB", max_value)
449
+ end
450
+
451
+ def visualize_processing_times
452
+ puts "\nā±ļø Processing Time Comparison by Scenario"
453
+ puts "-" * 60
454
+
455
+ scenarios = @subject_data[@subjects.first]['scenario_results'].keys
456
+
457
+ # For each scenario, show processing times
458
+ scenarios.each do |scenario|
459
+ scenario_name = @subject_data[@subjects.first]['scenario_results'][scenario]['name']
460
+
461
+ # Collect data for this scenario
462
+ labels = []
463
+ values = []
464
+ colors = []
465
+
466
+ @subjects.each_with_index do |subject, idx|
467
+ data = @subject_data[subject]['scenario_results'][scenario]
468
+ labels << subject.upcase
469
+ values << data['times']['total']
470
+ colors << get_subject_color(idx)
471
+ end
472
+
473
+ # Create colored bar chart
474
+ max_value = values.max
475
+
476
+ puts "\n #{scenario_name} - Processing Time (seconds)"
477
+ puts " " + "─" * 60
478
+
479
+ labels.each_with_index do |label, idx|
480
+ bar_length = ((values[idx] / max_value) * 45).to_i
481
+ bar = "ā–ˆ" * bar_length
482
+ colored_bar = colorize_bar(bar, colors[idx])
483
+
484
+ puts sprintf(" %-12s │%s %.3f", label, colored_bar, values[idx])
485
+ end
486
+ puts " " + "─" * 60
487
+ end
488
+ end
489
+ end
490
+
491
+ # Command line interface
492
+ if __FILE__ == $0
493
+ if ARGV.length < 2
494
+ puts "Usage: ruby compare_benchmarks.rb subject1 subject2 [subject3 ...]"
495
+ puts ""
496
+ puts "Compares benchmark results between multiple subjects/implementations."
497
+ puts "Will automatically find the most recent JSON file for each subject."
498
+ puts ""
499
+ puts "Example:"
500
+ puts " ruby compare_benchmarks.rb threadpool ractor"
501
+ puts " ruby compare_benchmarks.rb threadpool ractor async_queue"
502
+ puts ""
503
+ puts "Available benchmark files:"
504
+ Dir.glob("benchmark_results_*.json").group_by { |f| f.match(/benchmark_results_(.+?)_\d+_\d+\.json/)[1] rescue "unknown" }.each do |subject, files|
505
+ puts " #{subject}: #{files.length} file(s), latest: #{files.sort_by { |f| File.mtime(f) }.last}"
506
+ end
507
+ exit 1
508
+ end
509
+
510
+ subjects = ARGV
511
+
512
+ begin
513
+ comparator = BenchmarkComparator.new(subjects)
514
+ comparator.compare
515
+ rescue => e
516
+ puts "Error: #{e.message}"
517
+ exit 1
518
+ end
519
+ end