decision_agent 0.1.2 → 0.1.4

This diff represents the content of publicly available package versions that have been released to one of the supported registries. The information contained in this diff is provided for informational purposes only and reflects changes between package versions as they appear in their respective public registries.
Files changed (87) hide show
  1. checksums.yaml +4 -4
  2. data/README.md +212 -35
  3. data/bin/decision_agent +3 -8
  4. data/lib/decision_agent/ab_testing/ab_test.rb +197 -0
  5. data/lib/decision_agent/ab_testing/ab_test_assignment.rb +76 -0
  6. data/lib/decision_agent/ab_testing/ab_test_manager.rb +317 -0
  7. data/lib/decision_agent/ab_testing/ab_testing_agent.rb +152 -0
  8. data/lib/decision_agent/ab_testing/storage/activerecord_adapter.rb +155 -0
  9. data/lib/decision_agent/ab_testing/storage/adapter.rb +67 -0
  10. data/lib/decision_agent/ab_testing/storage/memory_adapter.rb +116 -0
  11. data/lib/decision_agent/agent.rb +19 -26
  12. data/lib/decision_agent/audit/null_adapter.rb +1 -2
  13. data/lib/decision_agent/decision.rb +3 -1
  14. data/lib/decision_agent/dsl/condition_evaluator.rb +4 -3
  15. data/lib/decision_agent/dsl/rule_parser.rb +4 -6
  16. data/lib/decision_agent/dsl/schema_validator.rb +27 -31
  17. data/lib/decision_agent/errors.rb +11 -8
  18. data/lib/decision_agent/evaluation.rb +3 -1
  19. data/lib/decision_agent/evaluation_validator.rb +78 -0
  20. data/lib/decision_agent/evaluators/json_rule_evaluator.rb +26 -0
  21. data/lib/decision_agent/evaluators/static_evaluator.rb +2 -6
  22. data/lib/decision_agent/monitoring/alert_manager.rb +282 -0
  23. data/lib/decision_agent/monitoring/dashboard/public/dashboard.css +381 -0
  24. data/lib/decision_agent/monitoring/dashboard/public/dashboard.js +471 -0
  25. data/lib/decision_agent/monitoring/dashboard/public/index.html +161 -0
  26. data/lib/decision_agent/monitoring/dashboard_server.rb +340 -0
  27. data/lib/decision_agent/monitoring/metrics_collector.rb +423 -0
  28. data/lib/decision_agent/monitoring/monitored_agent.rb +71 -0
  29. data/lib/decision_agent/monitoring/prometheus_exporter.rb +247 -0
  30. data/lib/decision_agent/monitoring/storage/activerecord_adapter.rb +253 -0
  31. data/lib/decision_agent/monitoring/storage/base_adapter.rb +90 -0
  32. data/lib/decision_agent/monitoring/storage/memory_adapter.rb +222 -0
  33. data/lib/decision_agent/replay/replay.rb +12 -22
  34. data/lib/decision_agent/scoring/base.rb +1 -1
  35. data/lib/decision_agent/scoring/consensus.rb +5 -5
  36. data/lib/decision_agent/scoring/weighted_average.rb +1 -1
  37. data/lib/decision_agent/version.rb +1 -1
  38. data/lib/decision_agent/versioning/activerecord_adapter.rb +69 -33
  39. data/lib/decision_agent/versioning/adapter.rb +1 -3
  40. data/lib/decision_agent/versioning/file_storage_adapter.rb +143 -35
  41. data/lib/decision_agent/versioning/version_manager.rb +4 -12
  42. data/lib/decision_agent/web/public/index.html +1 -1
  43. data/lib/decision_agent/web/server.rb +19 -24
  44. data/lib/decision_agent.rb +14 -0
  45. data/lib/generators/decision_agent/install/install_generator.rb +42 -5
  46. data/lib/generators/decision_agent/install/templates/ab_test_assignment_model.rb +45 -0
  47. data/lib/generators/decision_agent/install/templates/ab_test_model.rb +54 -0
  48. data/lib/generators/decision_agent/install/templates/ab_testing_migration.rb +43 -0
  49. data/lib/generators/decision_agent/install/templates/ab_testing_tasks.rake +189 -0
  50. data/lib/generators/decision_agent/install/templates/decision_agent_tasks.rake +114 -0
  51. data/lib/generators/decision_agent/install/templates/decision_log.rb +57 -0
  52. data/lib/generators/decision_agent/install/templates/error_metric.rb +53 -0
  53. data/lib/generators/decision_agent/install/templates/evaluation_metric.rb +43 -0
  54. data/lib/generators/decision_agent/install/templates/migration.rb +17 -6
  55. data/lib/generators/decision_agent/install/templates/monitoring_migration.rb +109 -0
  56. data/lib/generators/decision_agent/install/templates/performance_metric.rb +76 -0
  57. data/lib/generators/decision_agent/install/templates/rule.rb +3 -3
  58. data/lib/generators/decision_agent/install/templates/rule_version.rb +13 -7
  59. data/spec/ab_testing/ab_test_manager_spec.rb +330 -0
  60. data/spec/ab_testing/ab_test_spec.rb +270 -0
  61. data/spec/activerecord_thread_safety_spec.rb +553 -0
  62. data/spec/agent_spec.rb +13 -13
  63. data/spec/api_contract_spec.rb +16 -16
  64. data/spec/audit_adapters_spec.rb +3 -3
  65. data/spec/comprehensive_edge_cases_spec.rb +86 -86
  66. data/spec/dsl_validation_spec.rb +83 -83
  67. data/spec/edge_cases_spec.rb +23 -23
  68. data/spec/examples/feedback_aware_evaluator_spec.rb +7 -7
  69. data/spec/examples.txt +612 -0
  70. data/spec/issue_verification_spec.rb +759 -0
  71. data/spec/json_rule_evaluator_spec.rb +15 -15
  72. data/spec/monitoring/alert_manager_spec.rb +378 -0
  73. data/spec/monitoring/metrics_collector_spec.rb +281 -0
  74. data/spec/monitoring/monitored_agent_spec.rb +222 -0
  75. data/spec/monitoring/prometheus_exporter_spec.rb +242 -0
  76. data/spec/monitoring/storage/activerecord_adapter_spec.rb +346 -0
  77. data/spec/monitoring/storage/memory_adapter_spec.rb +247 -0
  78. data/spec/replay_edge_cases_spec.rb +58 -58
  79. data/spec/replay_spec.rb +11 -11
  80. data/spec/rfc8785_canonicalization_spec.rb +215 -0
  81. data/spec/scoring_spec.rb +1 -1
  82. data/spec/spec_helper.rb +9 -0
  83. data/spec/thread_safety_spec.rb +482 -0
  84. data/spec/thread_safety_spec.rb.broken +878 -0
  85. data/spec/versioning_spec.rb +141 -37
  86. data/spec/web_ui_rack_spec.rb +135 -0
  87. metadata +93 -6
@@ -0,0 +1,247 @@
1
+ require "monitor"
2
+
3
+ module DecisionAgent
4
+ module Monitoring
5
+ # Prometheus-compatible metrics exporter
6
+ class PrometheusExporter
7
+ include MonitorMixin
8
+
9
+ CONTENT_TYPE = "text/plain; version=0.0.4".freeze
10
+
11
+ def initialize(metrics_collector:, namespace: "decision_agent")
12
+ super()
13
+ @metrics_collector = metrics_collector
14
+ @namespace = namespace
15
+ @custom_metrics = {}
16
+ freeze_config
17
+ end
18
+
19
+ # Export metrics in Prometheus format
20
+ def export
21
+ synchronize do
22
+ lines = []
23
+
24
+ # Add header
25
+ lines << "# DecisionAgent Metrics Export"
26
+ lines << "# Timestamp: #{Time.now.utc.iso8601}"
27
+ lines << ""
28
+
29
+ # Decision metrics
30
+ lines.concat(export_decision_metrics)
31
+
32
+ # Performance metrics
33
+ lines.concat(export_performance_metrics)
34
+
35
+ # Error metrics
36
+ lines.concat(export_error_metrics)
37
+
38
+ # Custom KPI metrics
39
+ lines.concat(export_custom_metrics)
40
+
41
+ # System info
42
+ lines.concat(export_system_metrics)
43
+
44
+ lines.join("\n")
45
+ end
46
+ end
47
+
48
+ # Register custom KPI
49
+ def register_kpi(name:, value:, labels: {}, help: nil)
50
+ synchronize do
51
+ metric_name = sanitize_name(name)
52
+ @custom_metrics[metric_name] = {
53
+ value: value,
54
+ labels: labels,
55
+ help: help || "Custom KPI: #{name}",
56
+ timestamp: Time.now.utc
57
+ }
58
+ end
59
+ end
60
+
61
+ # Get metrics in hash format
62
+ def metrics_hash
63
+ synchronize do
64
+ stats = @metrics_collector.statistics
65
+
66
+ {
67
+ decisions: {
68
+ total: counter_metric("decisions_total", stats.dig(:decisions, :total) || 0),
69
+ avg_confidence: gauge_metric("decision_confidence_avg", stats.dig(:decisions, :avg_confidence) || 0),
70
+ avg_duration_ms: gauge_metric("decision_duration_ms_avg", stats.dig(:decisions, :avg_duration_ms) || 0)
71
+ },
72
+ performance: {
73
+ success_rate: gauge_metric("success_rate", stats.dig(:performance, :success_rate) || 0),
74
+ avg_duration_ms: gauge_metric("operation_duration_ms_avg",
75
+ stats.dig(:performance, :avg_duration_ms) || 0),
76
+ p95_duration_ms: gauge_metric("operation_duration_ms_p95",
77
+ stats.dig(:performance, :p95_duration_ms) || 0),
78
+ p99_duration_ms: gauge_metric("operation_duration_ms_p99", stats.dig(:performance, :p99_duration_ms) || 0)
79
+ },
80
+ errors: {
81
+ total: counter_metric("errors_total", stats.dig(:errors, :total) || 0)
82
+ },
83
+ system: {
84
+ version: info_metric("version", DecisionAgent::VERSION)
85
+ }
86
+ }
87
+ end
88
+ end
89
+
90
+ private
91
+
92
+ def freeze_config
93
+ @namespace.freeze
94
+ end
95
+
96
+ def export_decision_metrics
97
+ stats = @metrics_collector.statistics
98
+ lines = []
99
+
100
+ # Total decisions
101
+ lines << "# HELP #{metric_name('decisions_total')} Total number of decisions made"
102
+ lines << "# TYPE #{metric_name('decisions_total')} counter"
103
+ lines << "#{metric_name('decisions_total')} #{stats.dig(:decisions, :total) || 0}"
104
+ lines << ""
105
+
106
+ # Average confidence
107
+ lines << "# HELP #{metric_name('decision_confidence_avg')} Average decision confidence"
108
+ lines << "# TYPE #{metric_name('decision_confidence_avg')} gauge"
109
+ lines << "#{metric_name('decision_confidence_avg')} #{stats.dig(:decisions, :avg_confidence) || 0}"
110
+ lines << ""
111
+
112
+ # Decision distribution
113
+ if stats.dig(:decisions, :decision_distribution)
114
+ lines << "# HELP #{metric_name('decisions_by_type')} Decisions grouped by type"
115
+ lines << "# TYPE #{metric_name('decisions_by_type')} counter"
116
+ stats[:decisions][:decision_distribution].each do |decision, count|
117
+ lines << "#{metric_name('decisions_by_type')}{decision=\"#{decision}\"} #{count}"
118
+ end
119
+ lines << ""
120
+ end
121
+
122
+ # Average duration
123
+ if stats.dig(:decisions, :avg_duration_ms)
124
+ lines << "# HELP #{metric_name('decision_duration_ms_avg')} Average decision duration in milliseconds"
125
+ lines << "# TYPE #{metric_name('decision_duration_ms_avg')} gauge"
126
+ lines << "#{metric_name('decision_duration_ms_avg')} #{stats[:decisions][:avg_duration_ms]}"
127
+ lines << ""
128
+ end
129
+
130
+ lines
131
+ end
132
+
133
+ def export_performance_metrics
134
+ stats = @metrics_collector.statistics
135
+ lines = []
136
+
137
+ # Success rate
138
+ lines << "# HELP #{metric_name('success_rate')} Operation success rate (0-1)"
139
+ lines << "# TYPE #{metric_name('success_rate')} gauge"
140
+ lines << "#{metric_name('success_rate')} #{stats.dig(:performance, :success_rate) || 0}"
141
+ lines << ""
142
+
143
+ # Duration metrics
144
+ if stats.dig(:performance, :avg_duration_ms)
145
+ lines << "# HELP #{metric_name('operation_duration_ms')} Operation duration in milliseconds"
146
+ lines << "# TYPE #{metric_name('operation_duration_ms')} summary"
147
+ lines << "#{metric_name('operation_duration_ms')}{quantile=\"0.5\"} #{stats[:performance][:avg_duration_ms]}"
148
+ lines << "#{metric_name('operation_duration_ms')}{quantile=\"0.95\"} #{stats[:performance][:p95_duration_ms]}"
149
+ lines << "#{metric_name('operation_duration_ms')}{quantile=\"0.99\"} #{stats[:performance][:p99_duration_ms]}"
150
+ lines << "#{metric_name('operation_duration_ms_sum')} #{stats[:performance][:avg_duration_ms] * stats[:performance][:total_operations]}"
151
+ lines << "#{metric_name('operation_duration_ms_count')} #{stats[:performance][:total_operations]}"
152
+ lines << ""
153
+ end
154
+
155
+ lines
156
+ end
157
+
158
+ def export_error_metrics
159
+ stats = @metrics_collector.statistics
160
+ lines = []
161
+
162
+ # Total errors
163
+ lines << "# HELP #{metric_name('errors_total')} Total number of errors"
164
+ lines << "# TYPE #{metric_name('errors_total')} counter"
165
+ lines << "#{metric_name('errors_total')} #{stats.dig(:errors, :total) || 0}"
166
+ lines << ""
167
+
168
+ # Errors by type
169
+ if stats.dig(:errors, :by_type)
170
+ lines << "# HELP #{metric_name('errors_by_type')} Errors grouped by type"
171
+ lines << "# TYPE #{metric_name('errors_by_type')} counter"
172
+ stats[:errors][:by_type].each do |error_type, count|
173
+ lines << "#{metric_name('errors_by_type')}{error=\"#{sanitize_label(error_type)}\"} #{count}"
174
+ end
175
+ lines << ""
176
+ end
177
+
178
+ lines
179
+ end
180
+
181
+ def export_custom_metrics
182
+ lines = []
183
+
184
+ @custom_metrics.each do |name, metric|
185
+ full_name = metric_name(name)
186
+ lines << "# HELP #{full_name} #{metric[:help]}"
187
+ lines << "# TYPE #{full_name} gauge"
188
+
189
+ if metric[:labels].empty?
190
+ lines << "#{full_name} #{metric[:value]}"
191
+ else
192
+ label_str = metric[:labels].map { |k, v| "#{k}=\"#{sanitize_label(v)}\"" }.join(",")
193
+ lines << "#{full_name}{#{label_str}} #{metric[:value]}"
194
+ end
195
+ lines << ""
196
+ end
197
+
198
+ lines
199
+ end
200
+
201
+ def export_system_metrics
202
+ lines = []
203
+
204
+ # Version info
205
+ lines << "# HELP #{metric_name('info')} DecisionAgent version info"
206
+ lines << "# TYPE #{metric_name('info')} gauge"
207
+ lines << "#{metric_name('info')}{version=\"#{DecisionAgent::VERSION}\"} 1"
208
+ lines << ""
209
+
210
+ # Metrics count
211
+ counts = @metrics_collector.metrics_count
212
+ lines << "# HELP #{metric_name('metrics_stored')} Number of metrics stored in memory"
213
+ lines << "# TYPE #{metric_name('metrics_stored')} gauge"
214
+ counts.each do |type, count|
215
+ lines << "#{metric_name('metrics_stored')}{type=\"#{type}\"} #{count}"
216
+ end
217
+ lines << ""
218
+
219
+ lines
220
+ end
221
+
222
+ def metric_name(name)
223
+ "#{@namespace}_#{sanitize_name(name)}"
224
+ end
225
+
226
+ def sanitize_name(name)
227
+ name.to_s.gsub(/[^a-zA-Z0-9_]/, "_")
228
+ end
229
+
230
+ def sanitize_label(value)
231
+ value.to_s.gsub("\\", "\\\\").gsub('"', '\\"').gsub("\n", "\\n")
232
+ end
233
+
234
+ def counter_metric(name, value)
235
+ { name: name, type: "counter", value: value }
236
+ end
237
+
238
+ def gauge_metric(name, value)
239
+ { name: name, type: "gauge", value: value }
240
+ end
241
+
242
+ def info_metric(name, value)
243
+ { name: name, type: "info", value: value }
244
+ end
245
+ end
246
+ end
247
+ end
@@ -0,0 +1,253 @@
1
+ # frozen_string_literal: true
2
+
3
+ require_relative "base_adapter"
4
+
5
+ module DecisionAgent
6
+ module Monitoring
7
+ module Storage
8
+ # ActiveRecord adapter for persistent database storage
9
+ class ActiveRecordAdapter < BaseAdapter
10
+ def initialize
11
+ super
12
+ validate_models!
13
+ end
14
+
15
+ def record_decision(decision, context, confidence: nil, evaluations_count: 0, duration_ms: nil, status: nil)
16
+ ::DecisionLog.create!(
17
+ decision: decision,
18
+ context: context.to_json,
19
+ confidence: confidence,
20
+ evaluations_count: evaluations_count,
21
+ duration_ms: duration_ms,
22
+ status: status
23
+ )
24
+ rescue StandardError => e
25
+ warn "Failed to record decision to database: #{e.message}"
26
+ end
27
+
28
+ def record_evaluation(evaluator_name, score: nil, success: nil, duration_ms: nil, details: {})
29
+ ::EvaluationMetric.create!(
30
+ evaluator_name: evaluator_name,
31
+ score: score,
32
+ success: success,
33
+ duration_ms: duration_ms,
34
+ details: details.to_json
35
+ )
36
+ rescue StandardError => e
37
+ warn "Failed to record evaluation to database: #{e.message}"
38
+ end
39
+
40
+ def record_performance(operation, duration_ms: nil, status: nil, metadata: {})
41
+ ::PerformanceMetric.create!(
42
+ operation: operation,
43
+ duration_ms: duration_ms,
44
+ status: status,
45
+ metadata: metadata.to_json
46
+ )
47
+ rescue StandardError => e
48
+ warn "Failed to record performance to database: #{e.message}"
49
+ end
50
+
51
+ def record_error(error_type, message: nil, stack_trace: nil, severity: nil, context: {})
52
+ ::ErrorMetric.create!(
53
+ error_type: error_type,
54
+ message: message,
55
+ stack_trace: stack_trace&.to_json,
56
+ severity: severity,
57
+ context: context.to_json
58
+ )
59
+ rescue StandardError => e
60
+ warn "Failed to record error to database: #{e.message}"
61
+ end
62
+
63
+ def statistics(time_range: 3600)
64
+ decisions = ::DecisionLog.recent(time_range)
65
+ evaluations = ::EvaluationMetric.recent(time_range)
66
+ performance = ::PerformanceMetric.recent(time_range)
67
+ errors = ::ErrorMetric.recent(time_range)
68
+
69
+ {
70
+ decisions: decision_statistics(decisions, time_range),
71
+ evaluations: evaluation_statistics(evaluations),
72
+ performance: performance_statistics(performance, time_range),
73
+ errors: error_statistics(errors)
74
+ }
75
+ rescue StandardError => e
76
+ warn "Failed to retrieve statistics from database: #{e.message}"
77
+ default_statistics
78
+ end
79
+
80
+ def time_series(metric_type, bucket_size: 60, time_range: 3600)
81
+ case metric_type
82
+ when :decisions
83
+ decisions_time_series(bucket_size, time_range)
84
+ when :evaluations
85
+ evaluations_time_series(bucket_size, time_range)
86
+ when :performance
87
+ performance_time_series(bucket_size, time_range)
88
+ when :errors
89
+ errors_time_series(bucket_size, time_range)
90
+ else
91
+ { data: [], timestamps: [] }
92
+ end
93
+ rescue StandardError => e
94
+ warn "Failed to retrieve time series from database: #{e.message}"
95
+ { data: [], timestamps: [] }
96
+ end
97
+
98
+ def metrics_count
99
+ {
100
+ decisions: ::DecisionLog.count,
101
+ evaluations: ::EvaluationMetric.count,
102
+ performance: ::PerformanceMetric.count,
103
+ errors: ::ErrorMetric.count
104
+ }
105
+ rescue StandardError => e
106
+ warn "Failed to get metrics count from database: #{e.message}"
107
+ { decisions: 0, evaluations: 0, performance: 0, errors: 0 }
108
+ end
109
+
110
+ def cleanup(older_than:)
111
+ cutoff_time = Time.now - older_than
112
+ count = 0
113
+
114
+ count += ::DecisionLog.where("created_at < ?", cutoff_time).delete_all
115
+ count += ::EvaluationMetric.where("created_at < ?", cutoff_time).delete_all
116
+ count += ::PerformanceMetric.where("created_at < ?", cutoff_time).delete_all
117
+ count += ::ErrorMetric.where("created_at < ?", cutoff_time).delete_all
118
+
119
+ count
120
+ rescue StandardError => e
121
+ warn "Failed to cleanup old metrics from database: #{e.message}"
122
+ 0
123
+ end
124
+
125
+ def self.available?
126
+ defined?(ActiveRecord) &&
127
+ defined?(::DecisionLog) &&
128
+ defined?(::EvaluationMetric) &&
129
+ defined?(::PerformanceMetric) &&
130
+ defined?(::ErrorMetric)
131
+ end
132
+
133
+ private
134
+
135
+ def decision_statistics(decisions, time_range)
136
+ {
137
+ total: decisions.count,
138
+ by_decision: decisions.group(:decision).count,
139
+ average_confidence: decisions.where.not(confidence: nil).average(:confidence).to_f,
140
+ success_rate: ::DecisionLog.success_rate(time_range: time_range)
141
+ }
142
+ end
143
+
144
+ def evaluation_statistics(evaluations)
145
+ {
146
+ total: evaluations.count,
147
+ by_evaluator: evaluations.group(:evaluator_name).count,
148
+ average_score: evaluations.where.not(score: nil).average(:score).to_f,
149
+ success_rate_by_evaluator: evaluations.successful.group(:evaluator_name).count
150
+ }
151
+ end
152
+
153
+ def performance_statistics(performance, time_range)
154
+ {
155
+ total: performance.count,
156
+ average_duration_ms: performance.average_duration(time_range: time_range),
157
+ p50: performance.p50(time_range: time_range),
158
+ p95: performance.p95(time_range: time_range),
159
+ p99: performance.p99(time_range: time_range),
160
+ success_rate: performance.success_rate(time_range: time_range)
161
+ }
162
+ end
163
+
164
+ def error_statistics(errors)
165
+ {
166
+ total: errors.count,
167
+ by_type: errors.group(:error_type).count,
168
+ by_severity: errors.group(:severity).count,
169
+ critical_count: errors.critical.count
170
+ }
171
+ end
172
+
173
+ def validate_models!
174
+ required_models = %w[DecisionLog EvaluationMetric PerformanceMetric ErrorMetric]
175
+ missing_models = required_models.reject { |model| Object.const_defined?(model) }
176
+
177
+ return if missing_models.empty?
178
+
179
+ raise "Missing required models: #{missing_models.join(', ')}. " \
180
+ "Run 'rails generate decision_agent:install --monitoring' to create them."
181
+ end
182
+
183
+ def decisions_time_series(bucket_size, time_range)
184
+ counts = ::DecisionLog.recent(time_range)
185
+ .group(time_bucket_sql(:created_at, bucket_size))
186
+ .count
187
+
188
+ format_time_series(counts)
189
+ end
190
+
191
+ def evaluations_time_series(bucket_size, time_range)
192
+ counts = ::EvaluationMetric.recent(time_range)
193
+ .group(time_bucket_sql(:created_at, bucket_size))
194
+ .count
195
+
196
+ format_time_series(counts)
197
+ end
198
+
199
+ def performance_time_series(bucket_size, time_range)
200
+ durations = ::PerformanceMetric.recent(time_range)
201
+ .where.not(duration_ms: nil)
202
+ .group(time_bucket_sql(:created_at, bucket_size))
203
+ .average(:duration_ms)
204
+
205
+ format_time_series(durations)
206
+ end
207
+
208
+ def errors_time_series(bucket_size, time_range)
209
+ counts = ::ErrorMetric.recent(time_range)
210
+ .group(time_bucket_sql(:created_at, bucket_size))
211
+ .count
212
+
213
+ format_time_series(counts)
214
+ end
215
+
216
+ def time_bucket_sql(column, bucket_size)
217
+ adapter = ActiveRecord::Base.connection.adapter_name.downcase
218
+
219
+ case adapter
220
+ when /postgres/
221
+ "(EXTRACT(EPOCH FROM #{column})::bigint / #{bucket_size}) * #{bucket_size}"
222
+ when /mysql/
223
+ "(UNIX_TIMESTAMP(#{column}) DIV #{bucket_size}) * #{bucket_size}"
224
+ when /sqlite/
225
+ "(CAST(strftime('%s', #{column}) AS INTEGER) / #{bucket_size}) * #{bucket_size}"
226
+ else
227
+ # Fallback: use group by timestamp truncated to bucket
228
+ column.to_s
229
+ end
230
+ end
231
+
232
+ def format_time_series(data)
233
+ timestamps = data.keys.sort
234
+ values = timestamps.map { |ts| data[ts] }
235
+
236
+ {
237
+ timestamps: timestamps.map { |ts| Time.at(ts).iso8601 },
238
+ data: values.map(&:to_f)
239
+ }
240
+ end
241
+
242
+ def default_statistics
243
+ {
244
+ decisions: { total: 0, by_decision: {}, average_confidence: 0.0, success_rate: 0.0 },
245
+ evaluations: { total: 0, by_evaluator: {}, average_score: 0.0, success_rate_by_evaluator: {} },
246
+ performance: { total: 0, average_duration_ms: 0.0, p50: 0.0, p95: 0.0, p99: 0.0, success_rate: 0.0 },
247
+ errors: { total: 0, by_type: {}, by_severity: {}, critical_count: 0 }
248
+ }
249
+ end
250
+ end
251
+ end
252
+ end
253
+ end
@@ -0,0 +1,90 @@
1
+ # frozen_string_literal: true
2
+
3
+ module DecisionAgent
4
+ module Monitoring
5
+ module Storage
6
+ # Base adapter interface for metrics storage
7
+ # Subclasses must implement all abstract methods
8
+ class BaseAdapter
9
+ # Record a decision
10
+ # @param decision [String] The decision made
11
+ # @param context [Hash] Decision context
12
+ # @param confidence [Float, nil] Confidence score (0-1)
13
+ # @param evaluations_count [Integer] Number of evaluations
14
+ # @param duration_ms [Float, nil] Decision duration in milliseconds
15
+ # @param status [String, nil] Decision status (success, failure, error)
16
+ # @return [void]
17
+ def record_decision(decision, context, confidence: nil, evaluations_count: 0, duration_ms: nil, status: nil)
18
+ raise NotImplementedError, "#{self.class} must implement #record_decision"
19
+ end
20
+
21
+ # Record an evaluation
22
+ # @param evaluator_name [String] Name of the evaluator
23
+ # @param score [Float, nil] Evaluation score
24
+ # @param success [Boolean, nil] Whether evaluation succeeded
25
+ # @param duration_ms [Float, nil] Evaluation duration
26
+ # @param details [Hash] Additional details
27
+ # @return [void]
28
+ def record_evaluation(evaluator_name, score: nil, success: nil, duration_ms: nil, details: {})
29
+ raise NotImplementedError, "#{self.class} must implement #record_evaluation"
30
+ end
31
+
32
+ # Record a performance metric
33
+ # @param operation [String] Operation name
34
+ # @param duration_ms [Float, nil] Duration in milliseconds
35
+ # @param status [String, nil] Status (success, failure, error)
36
+ # @param metadata [Hash] Additional metadata
37
+ # @return [void]
38
+ def record_performance(operation, duration_ms: nil, status: nil, metadata: {})
39
+ raise NotImplementedError, "#{self.class} must implement #record_performance"
40
+ end
41
+
42
+ # Record an error
43
+ # @param error_type [String] Type of error
44
+ # @param message [String, nil] Error message
45
+ # @param stack_trace [Array, nil] Stack trace
46
+ # @param severity [String, nil] Error severity (low, medium, high, critical)
47
+ # @param context [Hash] Error context
48
+ # @return [void]
49
+ def record_error(error_type, message: nil, stack_trace: nil, severity: nil, context: {})
50
+ raise NotImplementedError, "#{self.class} must implement #record_error"
51
+ end
52
+
53
+ # Get statistics for a time range
54
+ # @param time_range [Integer] Time range in seconds
55
+ # @return [Hash] Statistics summary
56
+ def statistics(time_range: 3600)
57
+ raise NotImplementedError, "#{self.class} must implement #statistics"
58
+ end
59
+
60
+ # Get time series data
61
+ # @param metric_type [Symbol] Type of metric (:decisions, :evaluations, :performance, :errors)
62
+ # @param bucket_size [Integer] Bucket size in seconds
63
+ # @param time_range [Integer] Time range in seconds
64
+ # @return [Hash] Time series data
65
+ def time_series(metric_type, bucket_size: 60, time_range: 3600)
66
+ raise NotImplementedError, "#{self.class} must implement #time_series"
67
+ end
68
+
69
+ # Get count of metrics stored
70
+ # @return [Hash] Count by metric type
71
+ def metrics_count
72
+ raise NotImplementedError, "#{self.class} must implement #metrics_count"
73
+ end
74
+
75
+ # Clean up old metrics
76
+ # @param older_than [Integer] Remove metrics older than this many seconds
77
+ # @return [Integer] Number of metrics removed
78
+ def cleanup(older_than:)
79
+ raise NotImplementedError, "#{self.class} must implement #cleanup"
80
+ end
81
+
82
+ # Check if adapter is available (dependencies installed)
83
+ # @return [Boolean]
84
+ def self.available?
85
+ raise NotImplementedError, "#{self} must implement .available?"
86
+ end
87
+ end
88
+ end
89
+ end
90
+ end