aidp 0.1.0
This diff represents the content of publicly available package versions that have been released to one of the supported registries. The information contained in this diff is provided for informational purposes only and reflects changes between package versions as they appear in their respective public registries.
- checksums.yaml +7 -0
- data/LICENSE +21 -0
- data/README.md +210 -0
- data/bin/aidp +5 -0
- data/lib/aidp/analyze/agent_personas.rb +71 -0
- data/lib/aidp/analyze/agent_tool_executor.rb +445 -0
- data/lib/aidp/analyze/data_retention_manager.rb +426 -0
- data/lib/aidp/analyze/database.rb +243 -0
- data/lib/aidp/analyze/dependencies.rb +335 -0
- data/lib/aidp/analyze/error_handler.rb +486 -0
- data/lib/aidp/analyze/export_manager.rb +425 -0
- data/lib/aidp/analyze/feature_analyzer.rb +397 -0
- data/lib/aidp/analyze/focus_guidance.rb +517 -0
- data/lib/aidp/analyze/incremental_analyzer.rb +543 -0
- data/lib/aidp/analyze/language_analysis_strategies.rb +897 -0
- data/lib/aidp/analyze/large_analysis_progress.rb +504 -0
- data/lib/aidp/analyze/memory_manager.rb +365 -0
- data/lib/aidp/analyze/parallel_processor.rb +460 -0
- data/lib/aidp/analyze/performance_optimizer.rb +694 -0
- data/lib/aidp/analyze/prioritizer.rb +402 -0
- data/lib/aidp/analyze/progress.rb +75 -0
- data/lib/aidp/analyze/progress_visualizer.rb +320 -0
- data/lib/aidp/analyze/report_generator.rb +582 -0
- data/lib/aidp/analyze/repository_chunker.rb +702 -0
- data/lib/aidp/analyze/ruby_maat_integration.rb +572 -0
- data/lib/aidp/analyze/runner.rb +245 -0
- data/lib/aidp/analyze/static_analysis_detector.rb +577 -0
- data/lib/aidp/analyze/steps.rb +53 -0
- data/lib/aidp/analyze/storage.rb +600 -0
- data/lib/aidp/analyze/tool_configuration.rb +456 -0
- data/lib/aidp/analyze/tool_modernization.rb +750 -0
- data/lib/aidp/execute/progress.rb +76 -0
- data/lib/aidp/execute/runner.rb +135 -0
- data/lib/aidp/execute/steps.rb +113 -0
- data/lib/aidp/shared/cli.rb +117 -0
- data/lib/aidp/shared/config.rb +35 -0
- data/lib/aidp/shared/project_detector.rb +119 -0
- data/lib/aidp/shared/providers/anthropic.rb +26 -0
- data/lib/aidp/shared/providers/base.rb +17 -0
- data/lib/aidp/shared/providers/cursor.rb +102 -0
- data/lib/aidp/shared/providers/gemini.rb +26 -0
- data/lib/aidp/shared/providers/macos_ui.rb +26 -0
- data/lib/aidp/shared/sync.rb +15 -0
- data/lib/aidp/shared/util.rb +41 -0
- data/lib/aidp/shared/version.rb +7 -0
- data/lib/aidp/shared/workspace.rb +21 -0
- data/lib/aidp.rb +53 -0
- data/templates/ANALYZE/01_REPOSITORY_ANALYSIS.md +100 -0
- data/templates/ANALYZE/02_ARCHITECTURE_ANALYSIS.md +151 -0
- data/templates/ANALYZE/03_TEST_ANALYSIS.md +182 -0
- data/templates/ANALYZE/04_FUNCTIONALITY_ANALYSIS.md +200 -0
- data/templates/ANALYZE/05_DOCUMENTATION_ANALYSIS.md +202 -0
- data/templates/ANALYZE/06_STATIC_ANALYSIS.md +233 -0
- data/templates/ANALYZE/07_REFACTORING_RECOMMENDATIONS.md +316 -0
- data/templates/COMMON/AGENT_BASE.md +129 -0
- data/templates/COMMON/CONVENTIONS.md +19 -0
- data/templates/COMMON/TEMPLATES/ADR_TEMPLATE.md +21 -0
- data/templates/COMMON/TEMPLATES/DOMAIN_CHARTER.md +27 -0
- data/templates/COMMON/TEMPLATES/EVENT_EXAMPLE.yaml +16 -0
- data/templates/COMMON/TEMPLATES/MERMAID_C4.md +46 -0
- data/templates/COMMON/TEMPLATES/OPENAPI_STUB.yaml +11 -0
- data/templates/EXECUTE/00_PRD.md +36 -0
- data/templates/EXECUTE/01_NFRS.md +27 -0
- data/templates/EXECUTE/02A_ARCH_GATE_QUESTIONS.md +13 -0
- data/templates/EXECUTE/02_ARCHITECTURE.md +42 -0
- data/templates/EXECUTE/03_ADR_FACTORY.md +22 -0
- data/templates/EXECUTE/04_DOMAIN_DECOMPOSITION.md +24 -0
- data/templates/EXECUTE/05_CONTRACTS.md +27 -0
- data/templates/EXECUTE/06_THREAT_MODEL.md +23 -0
- data/templates/EXECUTE/07_TEST_PLAN.md +24 -0
- data/templates/EXECUTE/08_TASKS.md +29 -0
- data/templates/EXECUTE/09_SCAFFOLDING_DEVEX.md +25 -0
- data/templates/EXECUTE/10_IMPLEMENTATION_AGENT.md +30 -0
- data/templates/EXECUTE/11_STATIC_ANALYSIS.md +22 -0
- data/templates/EXECUTE/12_OBSERVABILITY_SLOS.md +21 -0
- data/templates/EXECUTE/13_DELIVERY_ROLLOUT.md +21 -0
- data/templates/EXECUTE/14_DOCS_PORTAL.md +23 -0
- data/templates/EXECUTE/15_POST_RELEASE.md +25 -0
- metadata +301 -0
@@ -0,0 +1,486 @@
|
|
1
|
+
# frozen_string_literal: true
|
2
|
+
|
3
|
+
require "logger"
|
4
|
+
|
5
|
+
module Aidp
|
6
|
+
module Analyze
|
7
|
+
# Comprehensive error handling system for analyze mode
|
8
|
+
class ErrorHandler
|
9
|
+
attr_reader :logger, :error_counts, :recovery_strategies
|
10
|
+
|
11
|
+
def initialize(log_file: nil, verbose: false)
|
12
|
+
@logger = setup_logger(log_file, verbose)
|
13
|
+
@error_counts = Hash.new(0)
|
14
|
+
@recovery_strategies = setup_recovery_strategies
|
15
|
+
@error_history = []
|
16
|
+
end
|
17
|
+
|
18
|
+
# Handle errors with appropriate recovery strategies
|
19
|
+
def handle_error(error, context: {}, step: nil, retry_count: 0)
|
20
|
+
error_info = {
|
21
|
+
error: error,
|
22
|
+
context: context,
|
23
|
+
step: step,
|
24
|
+
retry_count: retry_count,
|
25
|
+
timestamp: Time.current
|
26
|
+
}
|
27
|
+
|
28
|
+
log_error(error_info)
|
29
|
+
increment_error_count(error.class)
|
30
|
+
add_to_history(error_info)
|
31
|
+
|
32
|
+
recovery_strategy = determine_recovery_strategy(error, context)
|
33
|
+
apply_recovery_strategy(recovery_strategy, error_info)
|
34
|
+
end
|
35
|
+
|
36
|
+
# Handle specific error types with custom logic
|
37
|
+
def handle_network_error(error, context: {})
|
38
|
+
case error
|
39
|
+
when Net::TimeoutError
|
40
|
+
handle_timeout_error(error, context)
|
41
|
+
when Net::HTTPError
|
42
|
+
handle_http_error(error, context)
|
43
|
+
when SocketError
|
44
|
+
handle_socket_error(error, context)
|
45
|
+
else
|
46
|
+
handle_generic_network_error(error, context)
|
47
|
+
end
|
48
|
+
end
|
49
|
+
|
50
|
+
def handle_file_system_error(error, context: {})
|
51
|
+
case error
|
52
|
+
when Errno::ENOENT
|
53
|
+
handle_file_not_found(error, context)
|
54
|
+
when Errno::EACCES
|
55
|
+
handle_permission_denied(error, context)
|
56
|
+
when Errno::ENOSPC
|
57
|
+
handle_disk_full(error, context)
|
58
|
+
else
|
59
|
+
handle_generic_file_error(error, context)
|
60
|
+
end
|
61
|
+
end
|
62
|
+
|
63
|
+
def handle_database_error(error, context: {})
|
64
|
+
case error
|
65
|
+
when SQLite3::BusyException
|
66
|
+
handle_database_busy(error, context)
|
67
|
+
when SQLite3::CorruptException
|
68
|
+
handle_database_corrupt(error, context)
|
69
|
+
when SQLite3::ReadOnlyException
|
70
|
+
handle_database_readonly(error, context)
|
71
|
+
else
|
72
|
+
handle_generic_database_error(error, context)
|
73
|
+
end
|
74
|
+
end
|
75
|
+
|
76
|
+
def handle_analysis_error(error, context: {})
|
77
|
+
case error
|
78
|
+
when AnalysisTimeoutError
|
79
|
+
handle_analysis_timeout(error, context)
|
80
|
+
when AnalysisDataError
|
81
|
+
handle_analysis_data_error(error, context)
|
82
|
+
when AnalysisToolError
|
83
|
+
handle_analysis_tool_error(error, context)
|
84
|
+
else
|
85
|
+
handle_generic_analysis_error(error, context)
|
86
|
+
end
|
87
|
+
end
|
88
|
+
|
89
|
+
# Recovery strategies
|
90
|
+
def retry_with_backoff(operation, max_retries: 3, base_delay: 1)
|
91
|
+
retry_count = 0
|
92
|
+
begin
|
93
|
+
operation.call
|
94
|
+
rescue => e
|
95
|
+
retry_count += 1
|
96
|
+
if retry_count <= max_retries
|
97
|
+
delay = base_delay * (2**(retry_count - 1))
|
98
|
+
logger.warn("Retrying operation in #{delay} seconds (attempt #{retry_count}/#{max_retries})")
|
99
|
+
sleep(delay)
|
100
|
+
retry
|
101
|
+
else
|
102
|
+
logger.error("Operation failed after #{max_retries} retries: #{e.message}")
|
103
|
+
raise e
|
104
|
+
end
|
105
|
+
end
|
106
|
+
end
|
107
|
+
|
108
|
+
def fallback_to_mock_data(operation, fallback_data)
|
109
|
+
operation.call
|
110
|
+
rescue => e
|
111
|
+
logger.warn("Operation failed, using fallback data: #{e.message}")
|
112
|
+
fallback_data
|
113
|
+
end
|
114
|
+
|
115
|
+
def skip_step_with_warning(step_name, error)
|
116
|
+
logger.warn("Skipping step '#{step_name}' due to error: #{error.message}")
|
117
|
+
{
|
118
|
+
status: "skipped",
|
119
|
+
reason: error.message,
|
120
|
+
timestamp: Time.current
|
121
|
+
}
|
122
|
+
end
|
123
|
+
|
124
|
+
def continue_with_partial_data(operation, partial_data_handler)
|
125
|
+
operation.call
|
126
|
+
rescue => e
|
127
|
+
logger.warn("Operation failed, continuing with partial data: #{e.message}")
|
128
|
+
partial_data_handler.call(e)
|
129
|
+
end
|
130
|
+
|
131
|
+
# Error reporting and statistics
|
132
|
+
def get_error_summary
|
133
|
+
{
|
134
|
+
total_errors: @error_counts.values.sum,
|
135
|
+
error_breakdown: @error_counts,
|
136
|
+
recent_errors: @error_history.last(10),
|
137
|
+
recovery_success_rate: calculate_recovery_success_rate
|
138
|
+
}
|
139
|
+
end
|
140
|
+
|
141
|
+
def get_error_recommendations
|
142
|
+
recommendations = []
|
143
|
+
|
144
|
+
if @error_counts[Net::TimeoutError] > 5
|
145
|
+
recommendations << "Consider increasing timeout values for network operations"
|
146
|
+
end
|
147
|
+
|
148
|
+
if @error_counts[Errno::ENOSPC] > 0
|
149
|
+
recommendations << "Check available disk space and implement cleanup procedures"
|
150
|
+
end
|
151
|
+
|
152
|
+
if @error_counts[SQLite3::BusyException] > 3
|
153
|
+
recommendations << "Consider implementing database connection pooling"
|
154
|
+
end
|
155
|
+
|
156
|
+
if @error_counts[AnalysisTimeoutError] > 2
|
157
|
+
recommendations << "Consider chunking large analysis tasks or increasing timeouts"
|
158
|
+
end
|
159
|
+
|
160
|
+
recommendations
|
161
|
+
end
|
162
|
+
|
163
|
+
# Cleanup and resource management
|
164
|
+
def cleanup
|
165
|
+
logger.info("Cleaning up error handler resources")
|
166
|
+
@error_history.clear
|
167
|
+
@error_counts.clear
|
168
|
+
end
|
169
|
+
|
170
|
+
private
|
171
|
+
|
172
|
+
def setup_logger(log_file, verbose)
|
173
|
+
logger = Logger.new(log_file || $stdout)
|
174
|
+
logger.level = verbose ? Logger::DEBUG : Logger::INFO
|
175
|
+
logger.formatter = proc do |severity, datetime, progname, msg|
|
176
|
+
"#{datetime.strftime("%Y-%m-%d %H:%M:%S")} [#{severity}] #{msg}\n"
|
177
|
+
end
|
178
|
+
logger
|
179
|
+
end
|
180
|
+
|
181
|
+
def setup_recovery_strategies
|
182
|
+
{
|
183
|
+
Net::TimeoutError => :retry_with_backoff,
|
184
|
+
Net::HTTPError => :retry_with_backoff,
|
185
|
+
SocketError => :retry_with_backoff,
|
186
|
+
Errno::ENOENT => :skip_step_with_warning,
|
187
|
+
Errno::EACCES => :skip_step_with_warning,
|
188
|
+
Errno::ENOSPC => :critical_error,
|
189
|
+
SQLite3::BusyException => :retry_with_backoff,
|
190
|
+
SQLite3::CorruptException => :critical_error,
|
191
|
+
AnalysisTimeoutError => :chunk_and_retry,
|
192
|
+
AnalysisDataError => :continue_with_partial_data,
|
193
|
+
AnalysisToolError => :fallback_to_mock_data
|
194
|
+
}
|
195
|
+
end
|
196
|
+
|
197
|
+
def log_error(error_info)
|
198
|
+
error = error_info[:error]
|
199
|
+
context = error_info[:context]
|
200
|
+
step = error_info[:step]
|
201
|
+
|
202
|
+
logger.error("Error in step '#{step}': #{error.class} - #{error.message}")
|
203
|
+
logger.error("Context: #{context}") unless context.empty?
|
204
|
+
logger.error("Backtrace: #{error.backtrace.first(5).join("\n")}") if error.backtrace
|
205
|
+
end
|
206
|
+
|
207
|
+
def increment_error_count(error_class)
|
208
|
+
@error_counts[error_class] += 1
|
209
|
+
end
|
210
|
+
|
211
|
+
def add_to_history(error_info)
|
212
|
+
@error_history << error_info
|
213
|
+
@error_history.shift if @error_history.length > 100
|
214
|
+
end
|
215
|
+
|
216
|
+
def determine_recovery_strategy(error, context)
|
217
|
+
strategy = @recovery_strategies[error.class] || :log_and_continue
|
218
|
+
|
219
|
+
# Override strategy based on context
|
220
|
+
strategy = :critical_error if context[:critical] && strategy == :skip_step_with_warning
|
221
|
+
|
222
|
+
strategy = :retry_with_backoff if context[:retryable] && strategy == :log_and_continue
|
223
|
+
|
224
|
+
strategy
|
225
|
+
end
|
226
|
+
|
227
|
+
def apply_recovery_strategy(strategy, error_info)
|
228
|
+
case strategy
|
229
|
+
when :retry_with_backoff
|
230
|
+
retry_operation(error_info)
|
231
|
+
when :skip_step_with_warning
|
232
|
+
skip_step(error_info)
|
233
|
+
when :critical_error
|
234
|
+
raise_critical_error(error_info)
|
235
|
+
when :chunk_and_retry
|
236
|
+
chunk_and_retry(error_info)
|
237
|
+
when :continue_with_partial_data
|
238
|
+
continue_with_partial(error_info)
|
239
|
+
when :fallback_to_mock_data
|
240
|
+
fallback_to_mock(error_info)
|
241
|
+
when :log_and_continue
|
242
|
+
log_and_continue(error_info)
|
243
|
+
else
|
244
|
+
log_and_continue(error_info)
|
245
|
+
end
|
246
|
+
end
|
247
|
+
|
248
|
+
# Specific error handlers
|
249
|
+
def handle_timeout_error(error, context)
|
250
|
+
logger.warn("Network timeout: #{error.message}")
|
251
|
+
if context[:retryable]
|
252
|
+
retry_with_backoff(-> { context[:operation].call }, max_retries: 2)
|
253
|
+
else
|
254
|
+
skip_step_with_warning(context[:step], error)
|
255
|
+
end
|
256
|
+
end
|
257
|
+
|
258
|
+
def handle_http_error(error, context)
|
259
|
+
logger.warn("HTTP error: #{error.message}")
|
260
|
+
case error.response&.code
|
261
|
+
when "429" # Rate limited
|
262
|
+
sleep(60) # Wait 1 minute
|
263
|
+
retry_with_backoff(-> { context[:operation].call }, max_retries: 2)
|
264
|
+
when "500".."599" # Server errors
|
265
|
+
retry_with_backoff(-> { context[:operation].call }, max_retries: 3)
|
266
|
+
else
|
267
|
+
skip_step_with_warning(context[:step], error)
|
268
|
+
end
|
269
|
+
end
|
270
|
+
|
271
|
+
def handle_socket_error(error, context)
|
272
|
+
logger.warn("Socket error: #{error.message}")
|
273
|
+
if context[:network_required]
|
274
|
+
raise_critical_error({error: error, context: context})
|
275
|
+
else
|
276
|
+
fallback_to_mock_data(-> { context[:operation].call }, context[:fallback_data])
|
277
|
+
end
|
278
|
+
end
|
279
|
+
|
280
|
+
def handle_file_not_found(error, context)
|
281
|
+
logger.warn("File not found: #{error.message}")
|
282
|
+
if context[:required]
|
283
|
+
raise_critical_error({error: error, context: context})
|
284
|
+
else
|
285
|
+
skip_step_with_warning(context[:step], error)
|
286
|
+
end
|
287
|
+
end
|
288
|
+
|
289
|
+
def handle_permission_denied(error, context)
|
290
|
+
logger.error("Permission denied: #{error.message}")
|
291
|
+
raise_critical_error({error: error, context: context})
|
292
|
+
end
|
293
|
+
|
294
|
+
def handle_disk_full(error, context)
|
295
|
+
logger.error("Disk full: #{error.message}")
|
296
|
+
raise_critical_error({error: error, context: context})
|
297
|
+
end
|
298
|
+
|
299
|
+
def handle_database_busy(error, context)
|
300
|
+
logger.warn("Database busy: #{error.message}")
|
301
|
+
retry_with_backoff(-> { context[:operation].call }, max_retries: 5, base_delay: 0.5)
|
302
|
+
end
|
303
|
+
|
304
|
+
def handle_database_corrupt(error, context)
|
305
|
+
logger.error("Database corrupt: #{error.message}")
|
306
|
+
raise_critical_error({error: error, context: context})
|
307
|
+
end
|
308
|
+
|
309
|
+
def handle_database_readonly(error, context)
|
310
|
+
logger.error("Database read-only: #{error.message}")
|
311
|
+
raise_critical_error({error: error, context: context})
|
312
|
+
end
|
313
|
+
|
314
|
+
def handle_analysis_timeout(error, context)
|
315
|
+
logger.warn("Analysis timeout: #{error.message}")
|
316
|
+
if context[:chunkable]
|
317
|
+
chunk_and_retry({error: error, context: context})
|
318
|
+
else
|
319
|
+
skip_step_with_warning(context[:step], error)
|
320
|
+
end
|
321
|
+
end
|
322
|
+
|
323
|
+
def handle_analysis_data_error(error, context)
|
324
|
+
logger.warn("Analysis data error: #{error.message}")
|
325
|
+
continue_with_partial_data(
|
326
|
+
-> { context[:operation].call },
|
327
|
+
->(e) { context[:partial_data_handler]&.call(e) || {} }
|
328
|
+
)
|
329
|
+
end
|
330
|
+
|
331
|
+
def handle_analysis_tool_error(error, context)
|
332
|
+
logger.warn("Analysis tool error: #{error.message}")
|
333
|
+
fallback_to_mock_data(
|
334
|
+
-> { context[:operation].call },
|
335
|
+
context[:mock_data] || generate_mock_data(context)
|
336
|
+
)
|
337
|
+
end
|
338
|
+
|
339
|
+
# Recovery strategy implementations
|
340
|
+
def retry_operation(error_info)
|
341
|
+
operation = error_info[:context][:operation]
|
342
|
+
max_retries = error_info[:context][:max_retries] || 3
|
343
|
+
base_delay = error_info[:context][:base_delay] || 1
|
344
|
+
|
345
|
+
retry_with_backoff(operation, max_retries: max_retries, base_delay: base_delay)
|
346
|
+
end
|
347
|
+
|
348
|
+
def skip_step(error_info)
|
349
|
+
step = error_info[:step]
|
350
|
+
error = error_info[:error]
|
351
|
+
skip_step_with_warning(step, error)
|
352
|
+
end
|
353
|
+
|
354
|
+
def raise_critical_error(error_info)
|
355
|
+
error = error_info[:error]
|
356
|
+
context = error_info[:context]
|
357
|
+
|
358
|
+
logger.error("Critical error: #{error.message}")
|
359
|
+
logger.error("Context: #{context}")
|
360
|
+
|
361
|
+
raise CriticalAnalysisError.new(error.message, error_info)
|
362
|
+
end
|
363
|
+
|
364
|
+
def chunk_and_retry(error_info)
|
365
|
+
context = error_info[:context]
|
366
|
+
chunker = context[:chunker]
|
367
|
+
operation = context[:operation]
|
368
|
+
|
369
|
+
logger.info("Chunking analysis and retrying")
|
370
|
+
|
371
|
+
chunks = chunker.chunk_repository("size_based")
|
372
|
+
results = []
|
373
|
+
|
374
|
+
chunks[:chunks].each do |chunk|
|
375
|
+
result = operation.call(chunk)
|
376
|
+
results << result
|
377
|
+
rescue => e
|
378
|
+
logger.warn("Chunk failed: #{e.message}")
|
379
|
+
results << {status: "failed", error: e.message}
|
380
|
+
end
|
381
|
+
|
382
|
+
results
|
383
|
+
end
|
384
|
+
|
385
|
+
def continue_with_partial(error_info)
|
386
|
+
context = error_info[:context]
|
387
|
+
operation = context[:operation]
|
388
|
+
partial_handler = context[:partial_data_handler]
|
389
|
+
|
390
|
+
continue_with_partial_data(operation, partial_handler)
|
391
|
+
end
|
392
|
+
|
393
|
+
def fallback_to_mock(error_info)
|
394
|
+
context = error_info[:context]
|
395
|
+
operation = context[:operation]
|
396
|
+
mock_data = context[:mock_data] || generate_mock_data(context)
|
397
|
+
|
398
|
+
fallback_to_mock_data(operation, mock_data)
|
399
|
+
end
|
400
|
+
|
401
|
+
def log_and_continue(error_info)
|
402
|
+
error = error_info[:error]
|
403
|
+
logger.warn("Continuing after error: #{error.message}")
|
404
|
+
{status: "continued_with_error", error: error.message}
|
405
|
+
end
|
406
|
+
|
407
|
+
def generate_mock_data(context)
|
408
|
+
case context[:analysis_type]
|
409
|
+
when "repository"
|
410
|
+
generate_mock_repository_data
|
411
|
+
when "architecture"
|
412
|
+
generate_mock_architecture_data
|
413
|
+
when "test_coverage"
|
414
|
+
generate_mock_test_data
|
415
|
+
else
|
416
|
+
{status: "mock_data", message: "Mock data generated due to error"}
|
417
|
+
end
|
418
|
+
end
|
419
|
+
|
420
|
+
def generate_mock_repository_data
|
421
|
+
{
|
422
|
+
analysis_type: "repository",
|
423
|
+
status: "completed",
|
424
|
+
data: [
|
425
|
+
{entity: "mock_file.rb", nrev: 5, nloc: 100, churn: 20}
|
426
|
+
],
|
427
|
+
statistics: {
|
428
|
+
total_files: 1,
|
429
|
+
total_commits: 5,
|
430
|
+
total_lines: 100
|
431
|
+
}
|
432
|
+
}
|
433
|
+
end
|
434
|
+
|
435
|
+
def generate_mock_architecture_data
|
436
|
+
{
|
437
|
+
analysis_type: "architecture",
|
438
|
+
status: "completed",
|
439
|
+
data: {
|
440
|
+
pattern: "monolithic",
|
441
|
+
components: ["mock_component"],
|
442
|
+
dependencies: []
|
443
|
+
}
|
444
|
+
}
|
445
|
+
end
|
446
|
+
|
447
|
+
def generate_mock_test_data
|
448
|
+
{
|
449
|
+
analysis_type: "test_coverage",
|
450
|
+
status: "completed",
|
451
|
+
data: {
|
452
|
+
coverage: 75.0,
|
453
|
+
tests: 10,
|
454
|
+
files: 5
|
455
|
+
}
|
456
|
+
}
|
457
|
+
end
|
458
|
+
|
459
|
+
def calculate_recovery_success_rate
|
460
|
+
return 0.0 if @error_history.empty?
|
461
|
+
|
462
|
+
successful_recoveries = @error_history.count do |error_info|
|
463
|
+
error_info[:recovery_successful]
|
464
|
+
end
|
465
|
+
|
466
|
+
(successful_recoveries.to_f / @error_history.length * 100).round(2)
|
467
|
+
end
|
468
|
+
end
|
469
|
+
|
470
|
+
# Custom error classes
|
471
|
+
class CriticalAnalysisError < StandardError
|
472
|
+
attr_reader :error_info
|
473
|
+
|
474
|
+
def initialize(message, error_info = {})
|
475
|
+
super(message)
|
476
|
+
@error_info = error_info
|
477
|
+
end
|
478
|
+
end
|
479
|
+
|
480
|
+
class AnalysisTimeoutError < StandardError; end
|
481
|
+
|
482
|
+
class AnalysisDataError < StandardError; end
|
483
|
+
|
484
|
+
class AnalysisToolError < StandardError; end
|
485
|
+
end
|
486
|
+
end
|