aidp 0.5.0 → 0.8.0
This diff represents the content of publicly available package versions that have been released to one of the supported registries. The information contained in this diff is provided for informational purposes only and reflects changes between package versions as they appear in their respective public registries.
- checksums.yaml +4 -4
- data/README.md +128 -151
- data/bin/aidp +1 -1
- data/lib/aidp/analysis/kb_inspector.rb +471 -0
- data/lib/aidp/analysis/seams.rb +159 -0
- data/lib/aidp/analysis/tree_sitter_grammar_loader.rb +480 -0
- data/lib/aidp/analysis/tree_sitter_scan.rb +686 -0
- data/lib/aidp/analyze/error_handler.rb +2 -78
- data/lib/aidp/analyze/json_file_storage.rb +292 -0
- data/lib/aidp/analyze/progress.rb +12 -0
- data/lib/aidp/analyze/progress_visualizer.rb +12 -17
- data/lib/aidp/analyze/ruby_maat_integration.rb +13 -31
- data/lib/aidp/analyze/runner.rb +256 -87
- data/lib/aidp/analyze/steps.rb +6 -0
- data/lib/aidp/cli/jobs_command.rb +103 -435
- data/lib/aidp/cli.rb +317 -191
- data/lib/aidp/config.rb +298 -10
- data/lib/aidp/debug_logger.rb +195 -0
- data/lib/aidp/debug_mixin.rb +187 -0
- data/lib/aidp/execute/progress.rb +9 -0
- data/lib/aidp/execute/runner.rb +221 -40
- data/lib/aidp/execute/steps.rb +17 -7
- data/lib/aidp/execute/workflow_selector.rb +211 -0
- data/lib/aidp/harness/completion_checker.rb +268 -0
- data/lib/aidp/harness/condition_detector.rb +1526 -0
- data/lib/aidp/harness/config_loader.rb +373 -0
- data/lib/aidp/harness/config_manager.rb +382 -0
- data/lib/aidp/harness/config_schema.rb +1006 -0
- data/lib/aidp/harness/config_validator.rb +355 -0
- data/lib/aidp/harness/configuration.rb +477 -0
- data/lib/aidp/harness/enhanced_runner.rb +494 -0
- data/lib/aidp/harness/error_handler.rb +616 -0
- data/lib/aidp/harness/provider_config.rb +423 -0
- data/lib/aidp/harness/provider_factory.rb +306 -0
- data/lib/aidp/harness/provider_manager.rb +1269 -0
- data/lib/aidp/harness/provider_type_checker.rb +88 -0
- data/lib/aidp/harness/runner.rb +411 -0
- data/lib/aidp/harness/state/errors.rb +28 -0
- data/lib/aidp/harness/state/metrics.rb +219 -0
- data/lib/aidp/harness/state/persistence.rb +128 -0
- data/lib/aidp/harness/state/provider_state.rb +132 -0
- data/lib/aidp/harness/state/ui_state.rb +68 -0
- data/lib/aidp/harness/state/workflow_state.rb +123 -0
- data/lib/aidp/harness/state_manager.rb +586 -0
- data/lib/aidp/harness/status_display.rb +888 -0
- data/lib/aidp/harness/ui/base.rb +16 -0
- data/lib/aidp/harness/ui/enhanced_tui.rb +545 -0
- data/lib/aidp/harness/ui/enhanced_workflow_selector.rb +252 -0
- data/lib/aidp/harness/ui/error_handler.rb +132 -0
- data/lib/aidp/harness/ui/frame_manager.rb +361 -0
- data/lib/aidp/harness/ui/job_monitor.rb +500 -0
- data/lib/aidp/harness/ui/navigation/main_menu.rb +311 -0
- data/lib/aidp/harness/ui/navigation/menu_formatter.rb +120 -0
- data/lib/aidp/harness/ui/navigation/menu_item.rb +142 -0
- data/lib/aidp/harness/ui/navigation/menu_state.rb +139 -0
- data/lib/aidp/harness/ui/navigation/submenu.rb +202 -0
- data/lib/aidp/harness/ui/navigation/workflow_selector.rb +176 -0
- data/lib/aidp/harness/ui/progress_display.rb +280 -0
- data/lib/aidp/harness/ui/question_collector.rb +141 -0
- data/lib/aidp/harness/ui/spinner_group.rb +184 -0
- data/lib/aidp/harness/ui/spinner_helper.rb +152 -0
- data/lib/aidp/harness/ui/status_manager.rb +312 -0
- data/lib/aidp/harness/ui/status_widget.rb +280 -0
- data/lib/aidp/harness/ui/workflow_controller.rb +312 -0
- data/lib/aidp/harness/user_interface.rb +2381 -0
- data/lib/aidp/provider_manager.rb +131 -7
- data/lib/aidp/providers/anthropic.rb +28 -109
- data/lib/aidp/providers/base.rb +170 -0
- data/lib/aidp/providers/cursor.rb +52 -183
- data/lib/aidp/providers/gemini.rb +24 -109
- data/lib/aidp/providers/macos_ui.rb +99 -5
- data/lib/aidp/providers/opencode.rb +194 -0
- data/lib/aidp/storage/csv_storage.rb +172 -0
- data/lib/aidp/storage/file_manager.rb +214 -0
- data/lib/aidp/storage/json_storage.rb +140 -0
- data/lib/aidp/version.rb +1 -1
- data/lib/aidp.rb +56 -35
- data/templates/ANALYZE/06a_tree_sitter_scan.md +217 -0
- data/templates/COMMON/AGENT_BASE.md +11 -0
- data/templates/EXECUTE/00_PRD.md +4 -4
- data/templates/EXECUTE/02_ARCHITECTURE.md +5 -4
- data/templates/EXECUTE/07_TEST_PLAN.md +4 -1
- data/templates/EXECUTE/08_TASKS.md +4 -4
- data/templates/EXECUTE/10_IMPLEMENTATION_AGENT.md +4 -4
- data/templates/README.md +279 -0
- data/templates/aidp-development.yml.example +373 -0
- data/templates/aidp-minimal.yml.example +48 -0
- data/templates/aidp-production.yml.example +475 -0
- data/templates/aidp.yml.example +598 -0
- metadata +106 -64
- data/lib/aidp/analyze/agent_personas.rb +0 -71
- data/lib/aidp/analyze/agent_tool_executor.rb +0 -445
- data/lib/aidp/analyze/data_retention_manager.rb +0 -426
- data/lib/aidp/analyze/database.rb +0 -260
- data/lib/aidp/analyze/dependencies.rb +0 -335
- data/lib/aidp/analyze/export_manager.rb +0 -425
- data/lib/aidp/analyze/focus_guidance.rb +0 -517
- data/lib/aidp/analyze/incremental_analyzer.rb +0 -543
- data/lib/aidp/analyze/language_analysis_strategies.rb +0 -897
- data/lib/aidp/analyze/large_analysis_progress.rb +0 -504
- data/lib/aidp/analyze/memory_manager.rb +0 -365
- data/lib/aidp/analyze/metrics_storage.rb +0 -336
- data/lib/aidp/analyze/parallel_processor.rb +0 -460
- data/lib/aidp/analyze/performance_optimizer.rb +0 -694
- data/lib/aidp/analyze/repository_chunker.rb +0 -704
- data/lib/aidp/analyze/static_analysis_detector.rb +0 -577
- data/lib/aidp/analyze/storage.rb +0 -662
- data/lib/aidp/analyze/tool_configuration.rb +0 -456
- data/lib/aidp/analyze/tool_modernization.rb +0 -750
- data/lib/aidp/database/pg_adapter.rb +0 -148
- data/lib/aidp/database_config.rb +0 -69
- data/lib/aidp/database_connection.rb +0 -72
- data/lib/aidp/database_migration.rb +0 -158
- data/lib/aidp/job_manager.rb +0 -41
- data/lib/aidp/jobs/base_job.rb +0 -47
- data/lib/aidp/jobs/provider_execution_job.rb +0 -96
- data/lib/aidp/project_detector.rb +0 -117
- data/lib/aidp/providers/agent_supervisor.rb +0 -348
- data/lib/aidp/providers/supervised_base.rb +0 -317
- data/lib/aidp/providers/supervised_cursor.rb +0 -22
- data/lib/aidp/sync.rb +0 -13
- data/lib/aidp/workspace.rb +0 -19
@@ -1,460 +0,0 @@
|
|
1
|
-
# frozen_string_literal: true
|
2
|
-
|
3
|
-
require "concurrent"
|
4
|
-
require "json"
|
5
|
-
|
6
|
-
module Aidp
|
7
|
-
module Analyze
|
8
|
-
class ParallelProcessor
|
9
|
-
# Default configuration
|
10
|
-
DEFAULT_CONFIG = {
|
11
|
-
max_workers: 4,
|
12
|
-
chunk_size: 10,
|
13
|
-
timeout: 300, # 5 minutes
|
14
|
-
retry_attempts: 2,
|
15
|
-
memory_limit: 1024 * 1024 * 1024, # 1GB
|
16
|
-
cpu_limit: 0.8 # 80% CPU usage
|
17
|
-
}.freeze
|
18
|
-
|
19
|
-
def initialize(config = {})
|
20
|
-
@config = DEFAULT_CONFIG.merge(config)
|
21
|
-
@executor = nil
|
22
|
-
@results = Concurrent::Array.new
|
23
|
-
@errors = Concurrent::Array.new
|
24
|
-
@progress = Concurrent::AtomicFixnum.new(0)
|
25
|
-
end
|
26
|
-
|
27
|
-
# Process chunks in parallel
|
28
|
-
def process_chunks_parallel(chunks, processor_method, options = {})
|
29
|
-
return [] if chunks.empty?
|
30
|
-
|
31
|
-
setup_executor
|
32
|
-
start_time = Time.now
|
33
|
-
|
34
|
-
results = {
|
35
|
-
total_chunks: chunks.length,
|
36
|
-
processed_chunks: 0,
|
37
|
-
failed_chunks: 0,
|
38
|
-
start_time: start_time,
|
39
|
-
end_time: nil,
|
40
|
-
duration: nil,
|
41
|
-
results: [],
|
42
|
-
errors: [],
|
43
|
-
statistics: {}
|
44
|
-
}
|
45
|
-
|
46
|
-
begin
|
47
|
-
# Create futures for each chunk
|
48
|
-
futures = create_futures(chunks, processor_method, options)
|
49
|
-
|
50
|
-
# Wait for all futures to complete
|
51
|
-
completed_futures = wait_for_completion(futures, options)
|
52
|
-
|
53
|
-
# Collect results
|
54
|
-
collect_results(completed_futures, results)
|
55
|
-
rescue => e
|
56
|
-
results[:errors] << {
|
57
|
-
type: "processing_error",
|
58
|
-
message: e.message,
|
59
|
-
backtrace: e.backtrace
|
60
|
-
}
|
61
|
-
ensure
|
62
|
-
cleanup_executor
|
63
|
-
results[:end_time] = Time.now
|
64
|
-
results[:duration] = results[:end_time] - results[:start_time]
|
65
|
-
results[:statistics] = calculate_statistics(results)
|
66
|
-
end
|
67
|
-
|
68
|
-
results
|
69
|
-
end
|
70
|
-
|
71
|
-
# Process chunks with dependency management
|
72
|
-
def process_chunks_with_dependencies(chunks, dependencies, processor_method, options = {})
|
73
|
-
return [] if chunks.empty?
|
74
|
-
|
75
|
-
setup_executor
|
76
|
-
start_time = Time.now
|
77
|
-
|
78
|
-
results = {
|
79
|
-
total_chunks: chunks.length,
|
80
|
-
processed_chunks: 0,
|
81
|
-
failed_chunks: 0,
|
82
|
-
start_time: start_time,
|
83
|
-
end_time: nil,
|
84
|
-
duration: nil,
|
85
|
-
results: [],
|
86
|
-
errors: [],
|
87
|
-
execution_order: [],
|
88
|
-
statistics: {}
|
89
|
-
}
|
90
|
-
|
91
|
-
begin
|
92
|
-
# Create execution plan based on dependencies
|
93
|
-
execution_plan = create_execution_plan(chunks, dependencies)
|
94
|
-
|
95
|
-
# Execute chunks in dependency order
|
96
|
-
execution_plan.each do |phase|
|
97
|
-
phase_results = process_phase_parallel(phase, processor_method, options)
|
98
|
-
results[:results].concat(phase_results[:results])
|
99
|
-
results[:errors].concat(phase_results[:errors])
|
100
|
-
results[:processed_chunks] += phase_results[:processed_chunks]
|
101
|
-
results[:failed_chunks] += phase_results[:failed_chunks]
|
102
|
-
results[:execution_order].concat(phase.map { |chunk| chunk[:id] })
|
103
|
-
end
|
104
|
-
rescue => e
|
105
|
-
results[:errors] << {
|
106
|
-
type: "dependency_error",
|
107
|
-
message: e.message,
|
108
|
-
backtrace: e.backtrace
|
109
|
-
}
|
110
|
-
ensure
|
111
|
-
cleanup_executor
|
112
|
-
results[:end_time] = Time.now
|
113
|
-
results[:duration] = results[:end_time] - results[:start_time]
|
114
|
-
results[:statistics] = calculate_statistics(results)
|
115
|
-
end
|
116
|
-
|
117
|
-
results
|
118
|
-
end
|
119
|
-
|
120
|
-
# Process chunks with resource management
|
121
|
-
def process_chunks_with_resource_management(chunks, processor_method, options = {})
|
122
|
-
return [] if chunks.empty?
|
123
|
-
|
124
|
-
setup_executor
|
125
|
-
start_time = Time.now
|
126
|
-
|
127
|
-
results = {
|
128
|
-
total_chunks: chunks.length,
|
129
|
-
processed_chunks: 0,
|
130
|
-
failed_chunks: 0,
|
131
|
-
start_time: start_time,
|
132
|
-
end_time: nil,
|
133
|
-
duration: nil,
|
134
|
-
results: [],
|
135
|
-
errors: [],
|
136
|
-
resource_usage: {},
|
137
|
-
statistics: {}
|
138
|
-
}
|
139
|
-
|
140
|
-
begin
|
141
|
-
# Monitor system resources
|
142
|
-
resource_monitor = start_resource_monitoring
|
143
|
-
|
144
|
-
# Process chunks with resource constraints
|
145
|
-
chunk_results = process_with_resource_constraints(chunks, processor_method, options, resource_monitor)
|
146
|
-
|
147
|
-
results[:results] = chunk_results[:results]
|
148
|
-
results[:errors] = chunk_results[:errors]
|
149
|
-
results[:processed_chunks] = chunk_results[:processed_chunks]
|
150
|
-
results[:failed_chunks] = chunk_results[:failed_chunks]
|
151
|
-
results[:resource_usage] = resource_monitor[:usage]
|
152
|
-
rescue => e
|
153
|
-
results[:errors] << {
|
154
|
-
type: "resource_error",
|
155
|
-
message: e.message,
|
156
|
-
backtrace: e.backtrace
|
157
|
-
}
|
158
|
-
ensure
|
159
|
-
stop_resource_monitoring
|
160
|
-
cleanup_executor
|
161
|
-
results[:end_time] = Time.now
|
162
|
-
results[:duration] = results[:end_time] - results[:start_time]
|
163
|
-
results[:statistics] = calculate_statistics(results)
|
164
|
-
end
|
165
|
-
|
166
|
-
results
|
167
|
-
end
|
168
|
-
|
169
|
-
# Get processing statistics
|
170
|
-
def get_processing_statistics
|
171
|
-
{
|
172
|
-
total_processed: @progress.value,
|
173
|
-
total_errors: @errors.length,
|
174
|
-
executor_status: executor_status,
|
175
|
-
memory_usage: get_memory_usage,
|
176
|
-
cpu_usage: get_cpu_usage
|
177
|
-
}
|
178
|
-
end
|
179
|
-
|
180
|
-
# Cancel ongoing processing
|
181
|
-
def cancel_processing
|
182
|
-
cleanup_executor
|
183
|
-
{
|
184
|
-
cancelled: true,
|
185
|
-
processed_count: @progress.value,
|
186
|
-
error_count: @errors.length
|
187
|
-
}
|
188
|
-
end
|
189
|
-
|
190
|
-
private
|
191
|
-
|
192
|
-
def setup_executor
|
193
|
-
@executor = Concurrent::ThreadPoolExecutor.new(
|
194
|
-
min_threads: 1,
|
195
|
-
max_threads: @config[:max_workers],
|
196
|
-
max_queue: @config[:max_workers] * 2,
|
197
|
-
fallback_policy: :caller_runs
|
198
|
-
)
|
199
|
-
end
|
200
|
-
|
201
|
-
def cleanup_executor
|
202
|
-
return unless @executor
|
203
|
-
|
204
|
-
@executor.shutdown
|
205
|
-
@executor.wait_for_termination(@config[:timeout])
|
206
|
-
@executor = nil
|
207
|
-
end
|
208
|
-
|
209
|
-
def create_futures(chunks, processor_method, options)
|
210
|
-
futures = []
|
211
|
-
|
212
|
-
chunks.each_with_index do |chunk, index|
|
213
|
-
future = @executor.post do
|
214
|
-
process_chunk_with_retry(chunk, processor_method, options, index)
|
215
|
-
end
|
216
|
-
|
217
|
-
futures << {
|
218
|
-
future: future,
|
219
|
-
chunk: chunk,
|
220
|
-
index: index
|
221
|
-
}
|
222
|
-
end
|
223
|
-
|
224
|
-
futures
|
225
|
-
end
|
226
|
-
|
227
|
-
def wait_for_completion(futures, options)
|
228
|
-
timeout = options[:timeout] || @config[:timeout]
|
229
|
-
completed_futures = []
|
230
|
-
|
231
|
-
futures.each do |future_info|
|
232
|
-
result = future_info[:future].value(timeout)
|
233
|
-
completed_futures << {
|
234
|
-
chunk: future_info[:chunk],
|
235
|
-
result: result,
|
236
|
-
index: future_info[:index]
|
237
|
-
}
|
238
|
-
@progress.increment
|
239
|
-
rescue => e
|
240
|
-
@errors << {
|
241
|
-
chunk_id: future_info[:chunk][:id],
|
242
|
-
error: e.message,
|
243
|
-
index: future_info[:index]
|
244
|
-
}
|
245
|
-
end
|
246
|
-
|
247
|
-
completed_futures
|
248
|
-
end
|
249
|
-
|
250
|
-
def collect_results(completed_futures, results)
|
251
|
-
completed_futures.each do |future_result|
|
252
|
-
if future_result[:result][:success]
|
253
|
-
results[:results] << future_result[:result]
|
254
|
-
results[:processed_chunks] += 1
|
255
|
-
else
|
256
|
-
results[:errors] << {
|
257
|
-
chunk_id: future_result[:chunk][:id],
|
258
|
-
error: future_result[:result][:error],
|
259
|
-
index: future_result[:index]
|
260
|
-
}
|
261
|
-
results[:failed_chunks] += 1
|
262
|
-
end
|
263
|
-
end
|
264
|
-
end
|
265
|
-
|
266
|
-
def process_chunk_with_retry(chunk, processor_method, options, index)
|
267
|
-
retry_attempts = options[:retry_attempts] || @config[:retry_attempts]
|
268
|
-
attempt = 0
|
269
|
-
|
270
|
-
begin
|
271
|
-
attempt += 1
|
272
|
-
result = processor_method.call(chunk, options)
|
273
|
-
result[:success] = true
|
274
|
-
result[:attempt] = attempt
|
275
|
-
result
|
276
|
-
rescue => e
|
277
|
-
if attempt < retry_attempts
|
278
|
-
sleep(2**attempt) # Exponential backoff
|
279
|
-
retry
|
280
|
-
else
|
281
|
-
{
|
282
|
-
success: false,
|
283
|
-
error: e.message,
|
284
|
-
attempt: attempt,
|
285
|
-
chunk_id: chunk[:id]
|
286
|
-
}
|
287
|
-
end
|
288
|
-
end
|
289
|
-
end
|
290
|
-
|
291
|
-
def create_execution_plan(chunks, dependencies)
|
292
|
-
# Create a topological sort of chunks based on dependencies
|
293
|
-
execution_plan = []
|
294
|
-
remaining_chunks = chunks.dup
|
295
|
-
completed_chunks = Set.new
|
296
|
-
|
297
|
-
until remaining_chunks.empty?
|
298
|
-
phase = []
|
299
|
-
|
300
|
-
remaining_chunks.each do |chunk|
|
301
|
-
chunk_deps = dependencies[chunk[:id]] || []
|
302
|
-
phase << chunk if chunk_deps.all? { |dep| completed_chunks.include?(dep) }
|
303
|
-
end
|
304
|
-
|
305
|
-
if phase.empty?
|
306
|
-
# Circular dependency detected
|
307
|
-
raise "Circular dependency detected in chunks"
|
308
|
-
end
|
309
|
-
|
310
|
-
execution_plan << phase
|
311
|
-
phase.each { |chunk| completed_chunks.add(chunk[:id]) }
|
312
|
-
remaining_chunks.reject! { |chunk| phase.include?(chunk) }
|
313
|
-
end
|
314
|
-
|
315
|
-
execution_plan
|
316
|
-
end
|
317
|
-
|
318
|
-
def process_phase_parallel(phase_chunks, processor_method, options)
|
319
|
-
return {results: [], errors: [], processed_chunks: 0, failed_chunks: 0} if phase_chunks.empty?
|
320
|
-
|
321
|
-
phase_results = process_chunks_parallel(phase_chunks, processor_method, options)
|
322
|
-
|
323
|
-
{
|
324
|
-
results: phase_results[:results],
|
325
|
-
errors: phase_results[:errors],
|
326
|
-
processed_chunks: phase_results[:processed_chunks],
|
327
|
-
failed_chunks: phase_results[:failed_chunks]
|
328
|
-
}
|
329
|
-
end
|
330
|
-
|
331
|
-
def start_resource_monitoring
|
332
|
-
monitor = {
|
333
|
-
start_time: Time.now,
|
334
|
-
usage: {
|
335
|
-
memory: [],
|
336
|
-
cpu: [],
|
337
|
-
disk: []
|
338
|
-
},
|
339
|
-
running: true
|
340
|
-
}
|
341
|
-
|
342
|
-
# Start monitoring thread
|
343
|
-
Thread.new do
|
344
|
-
while monitor[:running]
|
345
|
-
monitor[:usage][:memory] << get_memory_usage
|
346
|
-
monitor[:usage][:cpu] << get_cpu_usage
|
347
|
-
monitor[:usage][:disk] << get_disk_usage
|
348
|
-
sleep(1)
|
349
|
-
end
|
350
|
-
end
|
351
|
-
|
352
|
-
monitor
|
353
|
-
end
|
354
|
-
|
355
|
-
def stop_resource_monitoring
|
356
|
-
# This would be called to stop the monitoring thread
|
357
|
-
# For now, just return
|
358
|
-
end
|
359
|
-
|
360
|
-
def process_with_resource_constraints(chunks, processor_method, options, resource_monitor)
|
361
|
-
results = {
|
362
|
-
results: [],
|
363
|
-
errors: [],
|
364
|
-
processed_chunks: 0,
|
365
|
-
failed_chunks: 0
|
366
|
-
}
|
367
|
-
|
368
|
-
chunks.each do |chunk|
|
369
|
-
# Check resource constraints
|
370
|
-
if resource_constraints_exceeded(resource_monitor)
|
371
|
-
# Wait for resources to become available
|
372
|
-
wait_for_resources(resource_monitor)
|
373
|
-
end
|
374
|
-
|
375
|
-
# Process chunk
|
376
|
-
begin
|
377
|
-
result = processor_method.call(chunk, options)
|
378
|
-
if result[:success]
|
379
|
-
results[:results] << result
|
380
|
-
results[:processed_chunks] += 1
|
381
|
-
else
|
382
|
-
results[:errors] << {
|
383
|
-
chunk_id: chunk[:id],
|
384
|
-
error: result[:error]
|
385
|
-
}
|
386
|
-
results[:failed_chunks] += 1
|
387
|
-
end
|
388
|
-
rescue => e
|
389
|
-
results[:errors] << {
|
390
|
-
chunk_id: chunk[:id],
|
391
|
-
error: e.message
|
392
|
-
}
|
393
|
-
results[:failed_chunks] += 1
|
394
|
-
end
|
395
|
-
end
|
396
|
-
|
397
|
-
results
|
398
|
-
end
|
399
|
-
|
400
|
-
def resource_constraints_exceeded(resource_monitor)
|
401
|
-
memory_usage = get_memory_usage
|
402
|
-
cpu_usage = get_cpu_usage
|
403
|
-
|
404
|
-
memory_usage > @config[:memory_limit] || cpu_usage > @config[:cpu_limit]
|
405
|
-
end
|
406
|
-
|
407
|
-
def wait_for_resources(resource_monitor)
|
408
|
-
# Wait until resources are available
|
409
|
-
sleep(1)
|
410
|
-
end
|
411
|
-
|
412
|
-
def get_memory_usage
|
413
|
-
# Get current memory usage
|
414
|
-
# This is a simplified implementation
|
415
|
-
Process.getrusage(:SELF).maxrss * 1024 # Convert to bytes
|
416
|
-
end
|
417
|
-
|
418
|
-
def get_cpu_usage
|
419
|
-
# Get current CPU usage
|
420
|
-
# This is a simplified implementation
|
421
|
-
0.5 # Return 50% as default
|
422
|
-
end
|
423
|
-
|
424
|
-
def get_disk_usage
|
425
|
-
# Get current disk usage
|
426
|
-
# This is a simplified implementation
|
427
|
-
0.3 # Return 30% as default
|
428
|
-
end
|
429
|
-
|
430
|
-
def executor_status
|
431
|
-
return "not_initialized" unless @executor
|
432
|
-
|
433
|
-
if @executor.shutdown?
|
434
|
-
"shutdown"
|
435
|
-
elsif @executor.shuttingdown?
|
436
|
-
"shutting_down"
|
437
|
-
else
|
438
|
-
"running"
|
439
|
-
end
|
440
|
-
end
|
441
|
-
|
442
|
-
def calculate_statistics(results)
|
443
|
-
return {} if results[:results].empty?
|
444
|
-
|
445
|
-
durations = results[:results].map { |r| r[:duration] || 0 }
|
446
|
-
memory_usage = results[:results].map { |r| r[:memory_usage] || 0 }
|
447
|
-
|
448
|
-
{
|
449
|
-
average_duration: durations.sum.to_f / durations.length,
|
450
|
-
min_duration: durations.min,
|
451
|
-
max_duration: durations.max,
|
452
|
-
total_duration: durations.sum,
|
453
|
-
average_memory: memory_usage.sum.to_f / memory_usage.length,
|
454
|
-
success_rate: results[:processed_chunks].to_f / results[:total_chunks] * 100,
|
455
|
-
throughput: results[:processed_chunks].to_f / results[:duration]
|
456
|
-
}
|
457
|
-
end
|
458
|
-
end
|
459
|
-
end
|
460
|
-
end
|