aidp 0.7.0 → 0.8.1

This diff represents the content of publicly available package versions that have been released to one of the supported registries. The information contained in this diff is provided for informational purposes only and reflects changes between package versions as they appear in their respective public registries.
Files changed (119) hide show
  1. checksums.yaml +4 -4
  2. data/README.md +60 -214
  3. data/bin/aidp +1 -1
  4. data/lib/aidp/analysis/kb_inspector.rb +38 -23
  5. data/lib/aidp/analysis/seams.rb +2 -31
  6. data/lib/aidp/analysis/tree_sitter_grammar_loader.rb +1 -13
  7. data/lib/aidp/analysis/tree_sitter_scan.rb +3 -20
  8. data/lib/aidp/analyze/error_handler.rb +2 -75
  9. data/lib/aidp/analyze/json_file_storage.rb +292 -0
  10. data/lib/aidp/analyze/progress.rb +12 -0
  11. data/lib/aidp/analyze/progress_visualizer.rb +12 -17
  12. data/lib/aidp/analyze/ruby_maat_integration.rb +13 -31
  13. data/lib/aidp/analyze/runner.rb +256 -87
  14. data/lib/aidp/cli/jobs_command.rb +100 -432
  15. data/lib/aidp/cli.rb +309 -239
  16. data/lib/aidp/config.rb +298 -10
  17. data/lib/aidp/debug_logger.rb +195 -0
  18. data/lib/aidp/debug_mixin.rb +187 -0
  19. data/lib/aidp/execute/progress.rb +9 -0
  20. data/lib/aidp/execute/runner.rb +221 -40
  21. data/lib/aidp/execute/steps.rb +17 -7
  22. data/lib/aidp/execute/workflow_selector.rb +211 -0
  23. data/lib/aidp/harness/completion_checker.rb +268 -0
  24. data/lib/aidp/harness/condition_detector.rb +1526 -0
  25. data/lib/aidp/harness/config_loader.rb +373 -0
  26. data/lib/aidp/harness/config_manager.rb +382 -0
  27. data/lib/aidp/harness/config_schema.rb +1006 -0
  28. data/lib/aidp/harness/config_validator.rb +355 -0
  29. data/lib/aidp/harness/configuration.rb +477 -0
  30. data/lib/aidp/harness/enhanced_runner.rb +494 -0
  31. data/lib/aidp/harness/error_handler.rb +616 -0
  32. data/lib/aidp/harness/provider_config.rb +423 -0
  33. data/lib/aidp/harness/provider_factory.rb +306 -0
  34. data/lib/aidp/harness/provider_manager.rb +1269 -0
  35. data/lib/aidp/harness/provider_type_checker.rb +88 -0
  36. data/lib/aidp/harness/runner.rb +411 -0
  37. data/lib/aidp/harness/state/errors.rb +28 -0
  38. data/lib/aidp/harness/state/metrics.rb +219 -0
  39. data/lib/aidp/harness/state/persistence.rb +128 -0
  40. data/lib/aidp/harness/state/provider_state.rb +132 -0
  41. data/lib/aidp/harness/state/ui_state.rb +68 -0
  42. data/lib/aidp/harness/state/workflow_state.rb +123 -0
  43. data/lib/aidp/harness/state_manager.rb +586 -0
  44. data/lib/aidp/harness/status_display.rb +888 -0
  45. data/lib/aidp/harness/ui/base.rb +16 -0
  46. data/lib/aidp/harness/ui/enhanced_tui.rb +545 -0
  47. data/lib/aidp/harness/ui/enhanced_workflow_selector.rb +252 -0
  48. data/lib/aidp/harness/ui/error_handler.rb +132 -0
  49. data/lib/aidp/harness/ui/frame_manager.rb +361 -0
  50. data/lib/aidp/harness/ui/job_monitor.rb +500 -0
  51. data/lib/aidp/harness/ui/navigation/main_menu.rb +311 -0
  52. data/lib/aidp/harness/ui/navigation/menu_formatter.rb +120 -0
  53. data/lib/aidp/harness/ui/navigation/menu_item.rb +142 -0
  54. data/lib/aidp/harness/ui/navigation/menu_state.rb +139 -0
  55. data/lib/aidp/harness/ui/navigation/submenu.rb +202 -0
  56. data/lib/aidp/harness/ui/navigation/workflow_selector.rb +176 -0
  57. data/lib/aidp/harness/ui/progress_display.rb +280 -0
  58. data/lib/aidp/harness/ui/question_collector.rb +141 -0
  59. data/lib/aidp/harness/ui/spinner_group.rb +184 -0
  60. data/lib/aidp/harness/ui/spinner_helper.rb +152 -0
  61. data/lib/aidp/harness/ui/status_manager.rb +312 -0
  62. data/lib/aidp/harness/ui/status_widget.rb +280 -0
  63. data/lib/aidp/harness/ui/workflow_controller.rb +312 -0
  64. data/lib/aidp/harness/user_interface.rb +2381 -0
  65. data/lib/aidp/provider_manager.rb +131 -7
  66. data/lib/aidp/providers/anthropic.rb +28 -103
  67. data/lib/aidp/providers/base.rb +170 -0
  68. data/lib/aidp/providers/cursor.rb +52 -181
  69. data/lib/aidp/providers/gemini.rb +24 -107
  70. data/lib/aidp/providers/macos_ui.rb +99 -5
  71. data/lib/aidp/providers/opencode.rb +194 -0
  72. data/lib/aidp/storage/csv_storage.rb +172 -0
  73. data/lib/aidp/storage/file_manager.rb +214 -0
  74. data/lib/aidp/storage/json_storage.rb +140 -0
  75. data/lib/aidp/version.rb +1 -1
  76. data/lib/aidp.rb +54 -39
  77. data/templates/COMMON/AGENT_BASE.md +11 -0
  78. data/templates/EXECUTE/00_PRD.md +4 -4
  79. data/templates/EXECUTE/02_ARCHITECTURE.md +5 -4
  80. data/templates/EXECUTE/07_TEST_PLAN.md +4 -1
  81. data/templates/EXECUTE/08_TASKS.md +4 -4
  82. data/templates/EXECUTE/10_IMPLEMENTATION_AGENT.md +4 -4
  83. data/templates/README.md +279 -0
  84. data/templates/aidp-development.yml.example +373 -0
  85. data/templates/aidp-minimal.yml.example +48 -0
  86. data/templates/aidp-production.yml.example +475 -0
  87. data/templates/aidp.yml.example +598 -0
  88. metadata +93 -69
  89. data/lib/aidp/analyze/agent_personas.rb +0 -71
  90. data/lib/aidp/analyze/agent_tool_executor.rb +0 -439
  91. data/lib/aidp/analyze/data_retention_manager.rb +0 -421
  92. data/lib/aidp/analyze/database.rb +0 -260
  93. data/lib/aidp/analyze/dependencies.rb +0 -335
  94. data/lib/aidp/analyze/export_manager.rb +0 -418
  95. data/lib/aidp/analyze/focus_guidance.rb +0 -517
  96. data/lib/aidp/analyze/incremental_analyzer.rb +0 -533
  97. data/lib/aidp/analyze/language_analysis_strategies.rb +0 -897
  98. data/lib/aidp/analyze/large_analysis_progress.rb +0 -499
  99. data/lib/aidp/analyze/memory_manager.rb +0 -339
  100. data/lib/aidp/analyze/metrics_storage.rb +0 -336
  101. data/lib/aidp/analyze/parallel_processor.rb +0 -454
  102. data/lib/aidp/analyze/performance_optimizer.rb +0 -691
  103. data/lib/aidp/analyze/repository_chunker.rb +0 -697
  104. data/lib/aidp/analyze/static_analysis_detector.rb +0 -577
  105. data/lib/aidp/analyze/storage.rb +0 -655
  106. data/lib/aidp/analyze/tool_configuration.rb +0 -441
  107. data/lib/aidp/analyze/tool_modernization.rb +0 -750
  108. data/lib/aidp/database/pg_adapter.rb +0 -148
  109. data/lib/aidp/database_config.rb +0 -69
  110. data/lib/aidp/database_connection.rb +0 -72
  111. data/lib/aidp/job_manager.rb +0 -41
  112. data/lib/aidp/jobs/base_job.rb +0 -45
  113. data/lib/aidp/jobs/provider_execution_job.rb +0 -83
  114. data/lib/aidp/project_detector.rb +0 -117
  115. data/lib/aidp/providers/agent_supervisor.rb +0 -348
  116. data/lib/aidp/providers/supervised_base.rb +0 -317
  117. data/lib/aidp/providers/supervised_cursor.rb +0 -22
  118. data/lib/aidp/sync.rb +0 -13
  119. data/lib/aidp/workspace.rb +0 -19
@@ -1,339 +0,0 @@
1
- # frozen_string_literal: true
2
-
3
- require "json"
4
- require "yaml"
5
- require "digest"
6
-
7
- module Aidp
8
- class MemoryManager
9
- # Memory management strategies
10
- MEMORY_STRATEGIES = %w[streaming chunking caching garbage_collection].freeze
11
-
12
- # Default configuration
13
- DEFAULT_CONFIG = {
14
- max_memory_usage: 1024 * 1024 * 1024, # 1GB
15
- chunk_size: 1000,
16
- cache_size: 100,
17
- gc_threshold: 0.8, # 80% memory usage triggers GC
18
- streaming_enabled: true,
19
- compression_enabled: false
20
- }.freeze
21
-
22
- def initialize(config = {})
23
- @config = DEFAULT_CONFIG.merge(config)
24
- @cache = {}
25
- @memory_usage = 0
26
- @peak_memory_usage = 0
27
- @gc_count = 0
28
- @streaming_data = []
29
- end
30
-
31
- # Process large dataset with memory management
32
- def process_large_dataset(dataset, processor_method, options = {})
33
- strategy = options[:strategy] || "streaming"
34
-
35
- case strategy
36
- when "streaming"
37
- process_with_streaming(dataset, processor_method, options)
38
- when "chunking"
39
- process_with_chunking(dataset, processor_method, options)
40
- when "caching"
41
- process_with_caching(dataset, processor_method, options)
42
- else
43
- raise "Unknown memory management strategy: #{strategy}"
44
- end
45
- end
46
-
47
- # Process data with streaming approach
48
- def process_with_streaming(dataset, processor_method, options = {})
49
- results = {
50
- processed_items: 0,
51
- memory_usage: [],
52
- gc_count: 0,
53
- results: [],
54
- errors: []
55
- }
56
-
57
- dataset.each_with_index do |item, index|
58
- # Check memory usage
59
- current_memory = get_memory_usage
60
- results[:memory_usage] << current_memory
61
-
62
- # Trigger garbage collection if needed
63
- if should_trigger_gc?(current_memory)
64
- trigger_garbage_collection
65
- results[:gc_count] += 1
66
- end
67
-
68
- # Process item
69
- result = processor_method.call(item, options)
70
- results[:results] << result
71
- results[:processed_items] += 1
72
-
73
- # Update memory tracking
74
- update_memory_tracking(current_memory)
75
- end
76
-
77
- results
78
- end
79
-
80
- # Process data with chunking approach
81
- def process_with_chunking(dataset, processor_method, options = {})
82
- chunk_size = options[:chunk_size] || @config[:chunk_size]
83
- results = {
84
- processed_chunks: 0,
85
- processed_items: 0,
86
- memory_usage: [],
87
- gc_count: 0,
88
- results: [],
89
- errors: []
90
- }
91
-
92
- dataset.each_slice(chunk_size) do |chunk|
93
- # Check memory before processing chunk
94
- pre_chunk_memory = get_memory_usage
95
- results[:memory_usage] << pre_chunk_memory
96
-
97
- # Process chunk
98
- chunk_results = process_chunk(chunk, processor_method, options)
99
- results[:results].concat(chunk_results[:results])
100
- results[:errors].concat(chunk_results[:errors])
101
- results[:processed_items] += chunk_results[:processed_items]
102
-
103
- # Trigger garbage collection after chunk
104
- if should_trigger_gc?(pre_chunk_memory)
105
- trigger_garbage_collection
106
- results[:gc_count] += 1
107
- end
108
-
109
- results[:processed_chunks] += 1
110
- update_memory_tracking(pre_chunk_memory)
111
- end
112
-
113
- results
114
- end
115
-
116
- # Process data with caching approach
117
- def process_with_caching(dataset, processor_method, options = {})
118
- cache_size = options[:cache_size] || @config[:cache_size]
119
- results = {
120
- processed_items: 0,
121
- cache_hits: 0,
122
- cache_misses: 0,
123
- memory_usage: [],
124
- gc_count: 0,
125
- results: [],
126
- errors: []
127
- }
128
-
129
- begin
130
- dataset.each_with_index do |item, index|
131
- # Check memory usage
132
- current_memory = get_memory_usage
133
- results[:memory_usage] << current_memory
134
-
135
- # Check cache
136
- cache_key = generate_cache_key(item)
137
- if @cache.key?(cache_key)
138
- results[:cache_hits] += 1
139
- result = @cache[cache_key]
140
- else
141
- results[:cache_misses] += 1
142
- begin
143
- result = processor_method.call(item, options)
144
- cache_result(cache_key, result, cache_size)
145
- rescue => e
146
- results[:errors] << {
147
- item_index: index,
148
- error: e.message
149
- }
150
- next
151
- end
152
- end
153
-
154
- results[:results] << result
155
- results[:processed_items] += 1
156
-
157
- # Trigger garbage collection if needed
158
- if should_trigger_gc?(current_memory)
159
- trigger_garbage_collection
160
- results[:gc_count] += 1
161
- end
162
-
163
- update_memory_tracking(current_memory)
164
- end
165
- rescue => e
166
- results[:errors] << {
167
- type: "caching_error",
168
- message: e.message
169
- }
170
- end
171
-
172
- results
173
- end
174
-
175
- # Optimize memory usage
176
- def optimize_memory_usage(options = {})
177
- optimizations = {
178
- memory_before: get_memory_usage,
179
- optimizations_applied: [],
180
- memory_after: 0,
181
- memory_saved: 0
182
- }
183
-
184
- # Clear cache if memory usage is high
185
- if get_memory_usage > @config[:max_memory_usage] * 0.8
186
- clear_cache
187
- optimizations[:optimizations_applied] << "cache_cleared"
188
- end
189
-
190
- # Trigger garbage collection
191
- trigger_garbage_collection
192
- optimizations[:optimizations_applied] << "garbage_collection"
193
-
194
- # Compress data if enabled
195
- if @config[:compression_enabled]
196
- compress_data
197
- optimizations[:optimizations_applied] << "data_compression"
198
- end
199
-
200
- optimizations[:memory_after] = get_memory_usage
201
- optimizations[:memory_saved] = optimizations[:memory_before] - optimizations[:memory_after]
202
-
203
- optimizations
204
- end
205
-
206
- # Get memory statistics
207
- def get_memory_statistics
208
- {
209
- current_memory: get_memory_usage,
210
- peak_memory: @peak_memory_usage,
211
- cache_size: @cache.length,
212
- gc_count: @gc_count,
213
- streaming_data_size: @streaming_data.length,
214
- memory_limit: @config[:max_memory_usage],
215
- memory_usage_percentage: (get_memory_usage.to_f / @config[:max_memory_usage] * 100).round(2)
216
- }
217
- end
218
-
219
- # Clear memory
220
- def clear_memory
221
- clear_cache
222
- @streaming_data.clear
223
- trigger_garbage_collection
224
-
225
- {
226
- memory_cleared: true,
227
- memory_after_clear: get_memory_usage
228
- }
229
- end
230
-
231
- # Monitor memory usage
232
- def monitor_memory_usage(duration = 60, interval = 1)
233
- monitoring_data = {
234
- start_time: Time.now,
235
- duration: duration,
236
- interval: interval,
237
- measurements: [],
238
- alerts: []
239
- }
240
-
241
- start_time = Time.now
242
- end_time = start_time + duration
243
-
244
- while Time.now < end_time
245
- current_memory = get_memory_usage
246
- current_time = Time.now
247
-
248
- measurement = {
249
- timestamp: current_time,
250
- memory_usage: current_memory,
251
- memory_percentage: (current_memory.to_f / @config[:max_memory_usage] * 100).round(2)
252
- }
253
-
254
- monitoring_data[:measurements] << measurement
255
-
256
- # Check for memory alerts
257
- if current_memory > @config[:max_memory_usage] * 0.9
258
- monitoring_data[:alerts] << {
259
- timestamp: current_time,
260
- type: "high_memory_usage",
261
- message: "Memory usage is at #{measurement[:memory_percentage]}%"
262
- }
263
- end
264
-
265
- sleep(interval)
266
- end
267
-
268
- monitoring_data[:end_time] = Time.now
269
- monitoring_data
270
- end
271
-
272
- private
273
-
274
- def process_chunk(chunk, processor_method, options)
275
- results = {
276
- processed_items: 0,
277
- results: [],
278
- errors: []
279
- }
280
-
281
- chunk.each_with_index do |item, index|
282
- result = processor_method.call(item, options)
283
- results[:results] << result
284
- results[:processed_items] += 1
285
- end
286
-
287
- results
288
- end
289
-
290
- def should_trigger_gc?(current_memory)
291
- current_memory > @config[:max_memory_usage] * @config[:gc_threshold]
292
- end
293
-
294
- def trigger_garbage_collection
295
- GC.start
296
- @gc_count += 1
297
- end
298
-
299
- def get_memory_usage
300
- # Get current memory usage in bytes
301
- Process.getrusage(:SELF).maxrss * 1024
302
- end
303
-
304
- def update_memory_tracking(current_memory)
305
- @memory_usage = current_memory
306
- @peak_memory_usage = [@peak_memory_usage, current_memory].max
307
- end
308
-
309
- def generate_cache_key(item)
310
- # Generate a cache key for the item
311
- Digest::MD5.hexdigest(item.to_json)
312
- rescue JSON::GeneratorError
313
- # Fallback to object_id if JSON serialization fails
314
- "item_#{item.object_id}"
315
- end
316
-
317
- def cache_result(key, result, max_cache_size)
318
- # Add result to cache
319
- @cache[key] = result
320
-
321
- # Remove oldest entries if cache is full
322
- return unless @cache.length > max_cache_size
323
-
324
- oldest_key = @cache.keys.first
325
- @cache.delete(oldest_key)
326
- end
327
-
328
- def clear_cache
329
- @cache.clear
330
- end
331
-
332
- def compress_data
333
- # Compress streaming data if it's large
334
- return unless @streaming_data.length > 1000
335
-
336
- @streaming_data = @streaming_data.last(500) # Keep only recent data
337
- end
338
- end
339
- end
@@ -1,336 +0,0 @@
1
- # frozen_string_literal: true
2
-
3
- require "pg"
4
- require "json"
5
-
6
- module Aidp
7
- module Analyze
8
- class MetricsStorage
9
- # Database schema version
10
- SCHEMA_VERSION = 1
11
-
12
- def initialize(project_dir = Dir.pwd, db_config = nil)
13
- @project_dir = project_dir
14
- @db_config = db_config || default_db_config
15
- @db = nil
16
-
17
- ensure_database_exists
18
- end
19
-
20
- # Store step execution metrics
21
- def store_step_metrics(step_name, provider_name, duration, success, metadata = {})
22
- ensure_connection
23
-
24
- timestamp = Time.now
25
-
26
- result = @db.exec_params(
27
- "INSERT INTO step_executions (step_name, provider_name, duration, success, metadata, created_at) VALUES ($1, $2, $3, $4, $5, $6) RETURNING id",
28
- [step_name, provider_name, duration, success, metadata.to_json, timestamp]
29
- )
30
-
31
- {
32
- id: result[0]["id"],
33
- step_name: step_name,
34
- provider_name: provider_name,
35
- duration: duration,
36
- success: success,
37
- stored_at: timestamp
38
- }
39
- end
40
-
41
- # Store provider activity metrics
42
- def store_provider_activity(provider_name, step_name, activity_summary)
43
- ensure_connection
44
-
45
- timestamp = Time.now
46
-
47
- result = @db.exec_params(
48
- "INSERT INTO provider_activities (provider_name, step_name, start_time, end_time, duration, final_state, stuck_detected, created_at) VALUES ($1, $2, $3, $4, $5, $6, $7, $8) RETURNING id",
49
- [
50
- provider_name,
51
- step_name,
52
- activity_summary[:start_time],
53
- activity_summary[:end_time],
54
- activity_summary[:duration],
55
- activity_summary[:final_state].to_s,
56
- activity_summary[:stuck_detected],
57
- timestamp
58
- ]
59
- )
60
-
61
- {
62
- id: result[0]["id"],
63
- provider_name: provider_name,
64
- step_name: step_name,
65
- stored_at: timestamp
66
- }
67
- end
68
-
69
- # Get step execution statistics
70
- def get_step_statistics(step_name = nil, provider_name = nil, limit = 100)
71
- ensure_connection
72
-
73
- query = "SELECT * FROM step_executions WHERE 1=1"
74
- params = []
75
- param_index = 1
76
-
77
- if step_name
78
- query += " AND step_name = $#{param_index}"
79
- params << step_name
80
- param_index += 1
81
- end
82
-
83
- if provider_name
84
- query += " AND provider_name = $#{param_index}"
85
- params << provider_name
86
- param_index += 1
87
- end
88
-
89
- query += " ORDER BY created_at DESC LIMIT $#{param_index}"
90
- params << limit
91
-
92
- results = @db.exec_params(query, params)
93
- results.map { |row| parse_step_execution(row) }
94
- end
95
-
96
- # Get provider activity statistics
97
- def get_provider_activity_statistics(provider_name = nil, step_name = nil, limit = 100)
98
- ensure_connection
99
-
100
- query = "SELECT * FROM provider_activities WHERE 1=1"
101
- params = []
102
- param_index = 1
103
-
104
- if provider_name
105
- query += " AND provider_name = $#{param_index}"
106
- params << provider_name
107
- param_index += 1
108
- end
109
-
110
- if step_name
111
- query += " AND step_name = $#{param_index}"
112
- params << step_name
113
- param_index += 1
114
- end
115
-
116
- query += " ORDER BY created_at DESC LIMIT $#{param_index}"
117
- params << limit
118
-
119
- results = @db.exec_params(query, params)
120
- results.map { |row| parse_provider_activity(row) }
121
- end
122
-
123
- # Calculate timeout recommendations based on p95 of execution times
124
- def calculate_timeout_recommendations
125
- ensure_connection
126
-
127
- recommendations = {}
128
-
129
- # Get all step names
130
- step_names = @db.exec("SELECT DISTINCT step_name FROM step_executions WHERE success = true")
131
-
132
- step_names.each do |row|
133
- step_name = row["step_name"]
134
-
135
- # Get successful executions for this step
136
- durations = @db.exec_params(
137
- "SELECT duration FROM step_executions WHERE step_name = $1 AND success = true ORDER BY duration",
138
- [step_name]
139
- ).map { |r| r["duration"].to_f }
140
-
141
- next if durations.empty?
142
-
143
- # Calculate p95
144
- p95_index = (durations.length * 0.95).ceil - 1
145
- p95_duration = durations[p95_index]
146
-
147
- # Round up to nearest second and add 10% buffer
148
- recommended_timeout = (p95_duration * 1.1).ceil
149
-
150
- recommendations[step_name] = {
151
- p95_duration: p95_duration,
152
- recommended_timeout: recommended_timeout,
153
- sample_count: durations.length,
154
- min_duration: durations.first,
155
- max_duration: durations.last,
156
- avg_duration: durations.sum.to_f / durations.length
157
- }
158
- end
159
-
160
- recommendations
161
- end
162
-
163
- # Get overall metrics summary
164
- def get_metrics_summary
165
- ensure_connection
166
-
167
- summary = {}
168
-
169
- # Total executions
170
- total_executions = @db.exec("SELECT COUNT(*) FROM step_executions").first["count"].to_i
171
- summary[:total_executions] = total_executions
172
-
173
- # Successful executions
174
- successful_executions = @db.exec("SELECT COUNT(*) FROM step_executions WHERE success = true").first["count"].to_i
175
- summary[:successful_executions] = successful_executions
176
-
177
- # Success rate
178
- summary[:success_rate] = (total_executions > 0) ? (successful_executions.to_f / total_executions * 100).round(2) : 0
179
-
180
- # Average duration
181
- avg_duration = @db.exec("SELECT AVG(duration) FROM step_executions WHERE success = true").first["avg"]
182
- summary[:average_duration] = avg_duration ? avg_duration.to_f.round(2) : 0
183
-
184
- # Stuck detections
185
- stuck_count = @db.exec("SELECT COUNT(*) FROM provider_activities WHERE stuck_detected = true").first["count"].to_i
186
- summary[:stuck_detections] = stuck_count
187
-
188
- # Date range
189
- date_range = @db.exec("SELECT MIN(created_at), MAX(created_at) FROM step_executions").first
190
- if date_range && date_range["min"]
191
- summary[:date_range] = {
192
- start: Time.parse(date_range["min"]),
193
- end: Time.parse(date_range["max"])
194
- }
195
- end
196
-
197
- summary
198
- end
199
-
200
- # Clean up old metrics data
201
- def cleanup_old_metrics(retention_days = 30)
202
- ensure_connection
203
-
204
- cutoff_time = Time.now - (retention_days * 24 * 60 * 60)
205
-
206
- # Delete old step executions
207
- deleted_executions = @db.exec_params(
208
- "DELETE FROM step_executions WHERE created_at < $1 RETURNING id",
209
- [cutoff_time]
210
- ).ntuples
211
-
212
- # Delete old provider activities
213
- deleted_activities = @db.exec_params(
214
- "DELETE FROM provider_activities WHERE created_at < $1 RETURNING id",
215
- [cutoff_time]
216
- ).ntuples
217
-
218
- {
219
- deleted_executions: deleted_executions,
220
- deleted_activities: deleted_activities,
221
- cutoff_time: cutoff_time
222
- }
223
- end
224
-
225
- # Export metrics data
226
- def export_metrics(format = :json)
227
- ensure_connection
228
-
229
- case format
230
- when :json
231
- {
232
- step_executions: get_step_statistics(nil, nil, 1000),
233
- provider_activities: get_provider_activity_statistics(nil, nil, 1000),
234
- summary: get_metrics_summary,
235
- recommendations: calculate_timeout_recommendations,
236
- exported_at: Time.now.iso8601
237
- }
238
- when :csv
239
- # TODO: Implement CSV export
240
- raise NotImplementedError, "CSV export not yet implemented"
241
- else
242
- raise ArgumentError, "Unsupported export format: #{format}"
243
- end
244
- end
245
-
246
- private
247
-
248
- def default_db_config
249
- {
250
- host: ENV["AIDP_DB_HOST"] || "localhost",
251
- port: ENV["AIDP_DB_PORT"] || 5432,
252
- dbname: ENV["AIDP_DB_NAME"] || "aidp",
253
- user: ENV["AIDP_DB_USER"] || ENV["USER"],
254
- password: ENV["AIDP_DB_PASSWORD"]
255
- }
256
- end
257
-
258
- def ensure_connection
259
- return if @db
260
-
261
- @db = PG.connect(@db_config)
262
- @db.type_map_for_results = PG::BasicTypeMapForResults.new(@db)
263
- end
264
-
265
- def ensure_database_exists
266
- ensure_connection
267
-
268
- # Create step_executions table if it doesn't exist
269
- @db.exec(<<~SQL)
270
- CREATE TABLE IF NOT EXISTS step_executions (
271
- id SERIAL PRIMARY KEY,
272
- step_name TEXT NOT NULL,
273
- provider_name TEXT NOT NULL,
274
- duration REAL NOT NULL,
275
- success BOOLEAN NOT NULL,
276
- metadata JSONB,
277
- created_at TIMESTAMP WITH TIME ZONE NOT NULL
278
- )
279
- SQL
280
-
281
- # Create provider_activities table if it doesn't exist
282
- @db.exec(<<~SQL)
283
- CREATE TABLE IF NOT EXISTS provider_activities (
284
- id SERIAL PRIMARY KEY,
285
- provider_name TEXT NOT NULL,
286
- step_name TEXT NOT NULL,
287
- start_time TIMESTAMP WITH TIME ZONE,
288
- end_time TIMESTAMP WITH TIME ZONE,
289
- duration REAL,
290
- final_state TEXT,
291
- stuck_detected BOOLEAN DEFAULT FALSE,
292
- created_at TIMESTAMP WITH TIME ZONE NOT NULL
293
- )
294
- SQL
295
-
296
- # Create indexes separately
297
- @db.exec("CREATE INDEX IF NOT EXISTS idx_step_executions_step_name ON step_executions(step_name)")
298
- @db.exec("CREATE INDEX IF NOT EXISTS idx_step_executions_provider_name ON step_executions(provider_name)")
299
- @db.exec("CREATE INDEX IF NOT EXISTS idx_step_executions_created_at ON step_executions(created_at)")
300
- @db.exec("CREATE INDEX IF NOT EXISTS idx_provider_activities_provider_name ON provider_activities(provider_name)")
301
- @db.exec("CREATE INDEX IF NOT EXISTS idx_provider_activities_step_name ON provider_activities(step_name)")
302
- @db.exec("CREATE INDEX IF NOT EXISTS idx_provider_activities_created_at ON provider_activities(created_at)")
303
-
304
- # Create metrics_schema_version table if it doesn't exist
305
- @db.exec("CREATE TABLE IF NOT EXISTS metrics_schema_version (version INTEGER NOT NULL)")
306
- @db.exec_params("INSERT INTO metrics_schema_version (version) VALUES ($1) ON CONFLICT DO NOTHING", [SCHEMA_VERSION])
307
- end
308
-
309
- def parse_step_execution(row)
310
- {
311
- id: row["id"].to_i,
312
- step_name: row["step_name"],
313
- provider_name: row["provider_name"],
314
- duration: row["duration"].to_f,
315
- success: row["success"],
316
- metadata: row["metadata"] ? JSON.parse(row["metadata"]) : {},
317
- created_at: Time.parse(row["created_at"])
318
- }
319
- end
320
-
321
- def parse_provider_activity(row)
322
- {
323
- id: row["id"].to_i,
324
- provider_name: row["provider_name"],
325
- step_name: row["step_name"],
326
- start_time: row["start_time"] ? Time.parse(row["start_time"]) : nil,
327
- end_time: row["end_time"] ? Time.parse(row["end_time"]) : nil,
328
- duration: row["duration"].to_f,
329
- final_state: row["final_state"]&.to_sym,
330
- stuck_detected: row["stuck_detected"],
331
- created_at: Time.parse(row["created_at"])
332
- }
333
- end
334
- end
335
- end
336
- end