code_healer 0.1.0
This diff represents the content of publicly available package versions that have been released to one of the supported registries. The information contained in this diff is provided for informational purposes only and reflects changes between package versions as they appear in their respective public registries.
- checksums.yaml +7 -0
- data/CHANGELOG.md +70 -0
- data/GEM_SUMMARY.md +307 -0
- data/README.md +281 -0
- data/code_healer.gemspec +77 -0
- data/config/code_healer.yml.example +104 -0
- data/docs/INSTALLATION.md +439 -0
- data/examples/basic_usage.rb +160 -0
- data/exe/code_healer-setup +7 -0
- data/lib/code_healer/application_job.rb +7 -0
- data/lib/code_healer/business_context_analyzer.rb +464 -0
- data/lib/code_healer/business_context_loader.rb +273 -0
- data/lib/code_healer/business_context_manager.rb +297 -0
- data/lib/code_healer/business_logic_generator.rb +94 -0
- data/lib/code_healer/business_rule_applier.rb +54 -0
- data/lib/code_healer/claude_code_evolution_handler.rb +224 -0
- data/lib/code_healer/claude_error_monitor.rb +48 -0
- data/lib/code_healer/config_manager.rb +275 -0
- data/lib/code_healer/context_aware_prompt_builder.rb +153 -0
- data/lib/code_healer/core.rb +513 -0
- data/lib/code_healer/error_handler.rb +141 -0
- data/lib/code_healer/evolution_job.rb +99 -0
- data/lib/code_healer/global_handler.rb +130 -0
- data/lib/code_healer/healing_job.rb +167 -0
- data/lib/code_healer/mcp.rb +108 -0
- data/lib/code_healer/mcp_prompts.rb +111 -0
- data/lib/code_healer/mcp_server.rb +389 -0
- data/lib/code_healer/mcp_tools.rb +2364 -0
- data/lib/code_healer/pull_request_creator.rb +143 -0
- data/lib/code_healer/setup.rb +390 -0
- data/lib/code_healer/simple_evolution.rb +737 -0
- data/lib/code_healer/simple_global_handler.rb +122 -0
- data/lib/code_healer/simple_healer.rb +515 -0
- data/lib/code_healer/terminal_integration.rb +87 -0
- data/lib/code_healer/usage_analyzer.rb +92 -0
- data/lib/code_healer/version.rb +5 -0
- data/lib/code_healer.rb +67 -0
- metadata +411 -0
@@ -0,0 +1,2364 @@
|
|
1
|
+
module CodeHealer
|
2
|
+
# Tool for analyzing errors with context
|
3
|
+
class ErrorAnalysisTool < MCP::Tool
|
4
|
+
description "Analyzes errors with business context and provides intelligent analysis"
|
5
|
+
input_schema(
|
6
|
+
properties: {
|
7
|
+
error_type: { type: "string" },
|
8
|
+
error_message: { type: "string" },
|
9
|
+
class_name: { type: "string" },
|
10
|
+
method_name: { type: "string" },
|
11
|
+
server_context: { type: "object" }
|
12
|
+
},
|
13
|
+
required: ["error_type", "error_message", "class_name", "method_name"]
|
14
|
+
)
|
15
|
+
annotations(
|
16
|
+
title: "Error Analysis Tool",
|
17
|
+
read_only_hint: true,
|
18
|
+
destructive_hint: false,
|
19
|
+
idempotent_hint: true,
|
20
|
+
open_world_hint: false
|
21
|
+
)
|
22
|
+
|
23
|
+
def self.call(error_type:, error_message:, class_name:, method_name:, server_context:)
|
24
|
+
context = server_context&.dig(:codebase_context) || {}
|
25
|
+
|
26
|
+
analysis = {
|
27
|
+
severity: assess_error_severity(error_type, context),
|
28
|
+
impact: assess_business_impact(error_type, context),
|
29
|
+
root_cause: identify_root_cause(error_type, error_message, context),
|
30
|
+
suggested_fixes: generate_suggested_fixes(error_type, context),
|
31
|
+
risks: assess_evolution_risks(error_type, context),
|
32
|
+
performance_implications: assess_performance_implications(error_type, context),
|
33
|
+
security_considerations: assess_security_considerations(error_type, context)
|
34
|
+
}
|
35
|
+
|
36
|
+
MCP::Tool::Response.new([{ type: "text", text: analysis.to_json }])
|
37
|
+
end
|
38
|
+
|
39
|
+
private
|
40
|
+
|
41
|
+
def self.assess_error_severity(error_type, context)
|
42
|
+
'medium'
|
43
|
+
end
|
44
|
+
|
45
|
+
def self.assess_business_impact(error_type, context)
|
46
|
+
{
|
47
|
+
user_experience: 'minimal',
|
48
|
+
data_integrity: 'none',
|
49
|
+
financial_impact: 'none',
|
50
|
+
compliance_impact: 'none'
|
51
|
+
}
|
52
|
+
end
|
53
|
+
|
54
|
+
def self.identify_root_cause(error_type, error_message, context)
|
55
|
+
{
|
56
|
+
immediate_cause: error_message,
|
57
|
+
underlying_cause: 'insufficient_validation',
|
58
|
+
contributing_factors: ['missing_input_validation', 'lack_of_error_handling'],
|
59
|
+
prevention_strategies: ['add_input_validation', 'implement_defensive_programming']
|
60
|
+
}
|
61
|
+
end
|
62
|
+
|
63
|
+
def self.generate_suggested_fixes(error_type, context)
|
64
|
+
fixes = ['add_error_handling', 'improve_validation', 'add_logging']
|
65
|
+
case error_type
|
66
|
+
when 'ZeroDivisionError'
|
67
|
+
fixes.unshift('add_zero_division_check')
|
68
|
+
when 'TypeError'
|
69
|
+
fixes.unshift('add_type_validation')
|
70
|
+
when 'NoMethodError'
|
71
|
+
fixes.unshift('implement_missing_method', 'provide_fallback_implementation')
|
72
|
+
end
|
73
|
+
fixes
|
74
|
+
end
|
75
|
+
|
76
|
+
def self.assess_evolution_risks(error_type, context)
|
77
|
+
{
|
78
|
+
regression_risk: 'low',
|
79
|
+
performance_risk: 'low',
|
80
|
+
security_risk: 'low',
|
81
|
+
compatibility_risk: 'low'
|
82
|
+
}
|
83
|
+
end
|
84
|
+
|
85
|
+
def self.assess_performance_implications(error_type, context)
|
86
|
+
{
|
87
|
+
execution_time: 'minimal',
|
88
|
+
memory_usage: 'minimal',
|
89
|
+
cpu_usage: 'minimal',
|
90
|
+
overall_impact: 'minimal'
|
91
|
+
}
|
92
|
+
end
|
93
|
+
|
94
|
+
def self.assess_security_considerations(error_type, context)
|
95
|
+
{
|
96
|
+
vulnerability_risk: 'low',
|
97
|
+
data_exposure: 'none',
|
98
|
+
authentication_impact: 'none',
|
99
|
+
authorization_impact: 'none'
|
100
|
+
}
|
101
|
+
end
|
102
|
+
end
|
103
|
+
|
104
|
+
# Tool for generating intelligent code fixes
|
105
|
+
class CodeFixTool < MCP::Tool
|
106
|
+
description "Generates intelligent code fixes using AI with business context"
|
107
|
+
input_schema(
|
108
|
+
properties: {
|
109
|
+
error_type: { type: "string" },
|
110
|
+
error_message: { type: "string" },
|
111
|
+
class_name: { type: "string" },
|
112
|
+
method_name: { type: "string" },
|
113
|
+
analysis: { type: "object" },
|
114
|
+
context: { type: "object" },
|
115
|
+
server_context: { type: "object" }
|
116
|
+
},
|
117
|
+
required: ["error_type", "error_message", "class_name", "method_name"]
|
118
|
+
)
|
119
|
+
annotations(
|
120
|
+
title: "Code Fix Tool",
|
121
|
+
read_only_hint: false,
|
122
|
+
destructive_hint: false,
|
123
|
+
idempotent_hint: false,
|
124
|
+
open_world_hint: false
|
125
|
+
)
|
126
|
+
|
127
|
+
def self.call(error_type:, error_message:, class_name:, method_name:, analysis: nil, context: nil, server_context:)
|
128
|
+
# puts "🔍 Debug: CodeFixTool.call called with error_type=#{error_type}, method_name=#{method_name}"
|
129
|
+
|
130
|
+
# Merge context and server_context for comprehensive context
|
131
|
+
comprehensive_context = merge_contexts(context, server_context)
|
132
|
+
puts "🔍 Debug: Comprehensive context merged: #{comprehensive_context.keys.inspect}"
|
133
|
+
|
134
|
+
# Build intelligent prompt with comprehensive context
|
135
|
+
prompt = build_intelligent_prompt(error_type, error_message, class_name, method_name, analysis, comprehensive_context)
|
136
|
+
puts "Deepan - #{prompt}"
|
137
|
+
puts "🔍 Debug: Prompt built successfully"
|
138
|
+
puts "🔍 Debug: Prompt length = #{prompt.length} characters"
|
139
|
+
|
140
|
+
# Generate fix using AI
|
141
|
+
fix = generate_ai_fix(prompt, method_name)
|
142
|
+
|
143
|
+
puts "🔍 Debug: AI fix generated: #{fix.inspect}"
|
144
|
+
|
145
|
+
# Return the fix as MCP response
|
146
|
+
MCP::Tool::Response.new([{ type: "text", text: fix.to_json }])
|
147
|
+
end
|
148
|
+
|
149
|
+
private
|
150
|
+
|
151
|
+
def self.merge_contexts(context, server_context)
|
152
|
+
# Start with server_context as base
|
153
|
+
merged = server_context&.dup || {}
|
154
|
+
|
155
|
+
# Merge in context if provided
|
156
|
+
if context
|
157
|
+
# Handle different context structures
|
158
|
+
if context.is_a?(Hash)
|
159
|
+
# Deep merge context into merged
|
160
|
+
context.each do |key, value|
|
161
|
+
if merged[key].is_a?(Hash) && value.is_a?(Hash)
|
162
|
+
merged[key] = merged[key].merge(value)
|
163
|
+
else
|
164
|
+
merged[key] = value
|
165
|
+
end
|
166
|
+
end
|
167
|
+
elsif context.is_a?(Array)
|
168
|
+
# If context is an array, add it to merged
|
169
|
+
merged[:context_array] = context
|
170
|
+
end
|
171
|
+
end
|
172
|
+
|
173
|
+
puts "🔍 Debug: Merged context keys: #{merged.keys.inspect}"
|
174
|
+
merged
|
175
|
+
end
|
176
|
+
|
177
|
+
def self.build_intelligent_prompt(error_type, error_message, class_name, method_name, analysis, comprehensive_context)
|
178
|
+
# puts "🔍 Debug: build_intelligent_prompt called"
|
179
|
+
|
180
|
+
# Safely access nested hash values from the comprehensive context
|
181
|
+
codebase_context = comprehensive_context&.dig(:codebase_context) || comprehensive_context&.dig('codebase_context') || {}
|
182
|
+
puts "🔍 Debug: codebase_context = #{codebase_context.inspect}"
|
183
|
+
|
184
|
+
business_rules = comprehensive_context&.dig('business_context') || comprehensive_context&.dig(:business_context) || {}
|
185
|
+
puts "🔍 Debug: business_rules = #{business_rules.inspect}"
|
186
|
+
|
187
|
+
coding_standards = comprehensive_context&.dig('coding_standards') || codebase_context&.dig('coding_standards') || {}
|
188
|
+
# puts "🔍 Debug: coding_standards = #{coding_standards.inspect}"
|
189
|
+
|
190
|
+
# Extract actual method signature from source code
|
191
|
+
actual_signature = extract_method_signature(class_name, method_name)
|
192
|
+
# puts "🔍 Debug: Actual method signature: #{actual_signature}"
|
193
|
+
|
194
|
+
<<~PROMPT
|
195
|
+
You are an expert Ruby developer and code evolution specialist. Generate a production-ready fix for the following error:
|
196
|
+
|
197
|
+
ERROR DETAILS:
|
198
|
+
Type: #{error_type}
|
199
|
+
Message: #{error_message}
|
200
|
+
Class: #{class_name}
|
201
|
+
Method: #{method_name}
|
202
|
+
ACTUAL METHOD SIGNATURE: #{actual_signature}
|
203
|
+
|
204
|
+
BUSINESS CONTEXT:
|
205
|
+
The following business context has been loaded from business requirements documents:
|
206
|
+
|
207
|
+
#{format_business_context_for_prompt(business_rules, codebase_context, error_type)}
|
208
|
+
|
209
|
+
Please review this business context and apply the business rules naturally in your code generation.
|
210
|
+
|
211
|
+
CODING STANDARDS:
|
212
|
+
Error Handling: #{coding_standards['error_handling']}
|
213
|
+
Logging: #{coding_standards['logging']}
|
214
|
+
Validation: #{coding_standards['validation']}
|
215
|
+
Performance: #{coding_standards['performance']}
|
216
|
+
|
217
|
+
BUSINESS RULE COMPLIANCE:
|
218
|
+
- The system has loaded business context and rules that specify how to handle errors
|
219
|
+
- CRITICAL: Business rules specify specific return values for different error types
|
220
|
+
- Review the business context provided and apply the business rules naturally
|
221
|
+
- Business rules may specify specific return values, logging requirements, or handling strategies
|
222
|
+
- Ensure your error handling aligns with the business requirements provided
|
223
|
+
- The business context above contains specific requirements for this error type
|
224
|
+
- PAY SPECIAL ATTENTION to the "CRITICAL BUSINESS RULE" and "CRITICAL CALCULATION ERROR RULE" sections above
|
225
|
+
|
226
|
+
CODE REQUIREMENTS:
|
227
|
+
- Generate ONLY the complete method implementation (def method_name...end)
|
228
|
+
- IMPORTANT: Use the EXACT method signature: #{actual_signature}
|
229
|
+
- Include comprehensive error handling specific to #{error_type}
|
230
|
+
- Add business-appropriate logging using Rails.logger
|
231
|
+
- Include input validation and parameter checking
|
232
|
+
- Follow Ruby best practices and conventions
|
233
|
+
- Ensure the fix is production-ready and secure
|
234
|
+
- Add performance considerations where relevant
|
235
|
+
- Include proper return values and error responses
|
236
|
+
- Use the exact method name: #{method_name}
|
237
|
+
- Apply business rules from the provided context naturally
|
238
|
+
|
239
|
+
EXAMPLE FORMAT (adapt to your actual method signature):
|
240
|
+
#{actual_signature}
|
241
|
+
# Input validation
|
242
|
+
return business_rule_default_value if param1.nil? || param2.nil?
|
243
|
+
|
244
|
+
# Business logic with error handling
|
245
|
+
begin
|
246
|
+
result = param1 / param2
|
247
|
+
Rails.logger.info("Operation successful: \#{param1} / \#{param2} = \#{result}")
|
248
|
+
result
|
249
|
+
rescue #{error_type} => e
|
250
|
+
Rails.logger.warn("#{error_type} occurred: \#{e.message}")
|
251
|
+
# Apply business rules from context for appropriate return value
|
252
|
+
return business_rule_default_value
|
253
|
+
rescue => e
|
254
|
+
Rails.logger.error("Unexpected error in operation: \#{e.message}")
|
255
|
+
# Apply business rules from context for appropriate return value
|
256
|
+
return business_rule_default_value
|
257
|
+
end
|
258
|
+
end
|
259
|
+
|
260
|
+
NOTE: Replace 'business_rule_default_value' with the actual value specified in your business context above.
|
261
|
+
IMPORTANT: Look for the "CRITICAL BUSINESS RULE - Return Value" section to find the exact return value to use.
|
262
|
+
|
263
|
+
Generate a complete, intelligent fix for the #{method_name} method that specifically addresses the #{error_type}:
|
264
|
+
PROMPT
|
265
|
+
end
|
266
|
+
|
267
|
+
|
268
|
+
|
269
|
+
def self.format_business_context_for_prompt(business_rules, codebase_context, error_type)
|
270
|
+
puts "🔍 Debug: format_business_context_for_prompt called"
|
271
|
+
puts "🔍 Debug: business_rules = #{business_rules.inspect}"
|
272
|
+
puts "🔍 Debug: codebase_context keys = #{codebase_context.keys.inspect}"
|
273
|
+
|
274
|
+
context_parts = []
|
275
|
+
|
276
|
+
# First, try to extract from business_rules (which should contain the analyzer output)
|
277
|
+
if business_rules && business_rules.any?
|
278
|
+
if business_rules[:business_rules]&.any?
|
279
|
+
context_parts << "=== BUSINESS RULES FROM ANALYZER ==="
|
280
|
+
business_rules[:business_rules].each do |rule|
|
281
|
+
if rule.is_a?(Hash) && rule[:content]
|
282
|
+
context_parts << rule[:content]
|
283
|
+
elsif rule.is_a?(String)
|
284
|
+
context_parts << rule
|
285
|
+
end
|
286
|
+
end
|
287
|
+
end
|
288
|
+
|
289
|
+
if business_rules[:domain_specific]&.any?
|
290
|
+
context_parts << "\n=== DOMAIN-SPECIFIC RULES ==="
|
291
|
+
business_rules[:domain_specific].each do |key, value|
|
292
|
+
if value.is_a?(Array)
|
293
|
+
value.each { |line| context_parts << line }
|
294
|
+
elsif value.is_a?(String)
|
295
|
+
context_parts << value
|
296
|
+
end
|
297
|
+
end
|
298
|
+
end
|
299
|
+
end
|
300
|
+
|
301
|
+
# Also check codebase_context for additional business context
|
302
|
+
business_context = codebase_context[:business_context] || codebase_context['business_context']
|
303
|
+
if business_context && business_context.any?
|
304
|
+
if business_context[:business_rules]&.any?
|
305
|
+
context_parts << "\n=== ADDITIONAL BUSINESS RULES ==="
|
306
|
+
business_context[:business_rules].each do |rule|
|
307
|
+
if rule.is_a?(Hash) && rule[:content]
|
308
|
+
context_parts << rule[:content]
|
309
|
+
elsif rule.is_a?(String)
|
310
|
+
context_parts << rule
|
311
|
+
end
|
312
|
+
end
|
313
|
+
end
|
314
|
+
end
|
315
|
+
|
316
|
+
# Include markdown requirement documents verbatim from business_rules
|
317
|
+
markdown_requirements = business_rules[:markdown_requirements] || business_rules['markdown_requirements']
|
318
|
+
if markdown_requirements
|
319
|
+
context_parts << "\n=== BUSINESS REQUIREMENTS FROM MARKDOWN DOCUMENTS ==="
|
320
|
+
context_parts << markdown_requirements.to_s
|
321
|
+
end
|
322
|
+
|
323
|
+
result = context_parts.empty? ? "No specific business context loaded." : context_parts.join("\n")
|
324
|
+
puts "🔍 Debug: Final result length = #{result.length} characters"
|
325
|
+
result
|
326
|
+
end
|
327
|
+
|
328
|
+
def self.extract_method_signature(class_name, method_name)
|
329
|
+
# Try to find the actual method signature from the source code
|
330
|
+
file_path = find_class_file(class_name)
|
331
|
+
return "def #{method_name}(*args, **kwargs, &block)" unless file_path && File.exist?(file_path)
|
332
|
+
|
333
|
+
content = File.read(file_path)
|
334
|
+
method_pattern = /def\s+#{method_name}\s*\([^)]*\)/
|
335
|
+
match = content.match(method_pattern)
|
336
|
+
|
337
|
+
if match
|
338
|
+
match[0] # Return the complete method signature line
|
339
|
+
else
|
340
|
+
"def #{method_name}(*args, **kwargs, &block)" # Fallback
|
341
|
+
end
|
342
|
+
rescue => e
|
343
|
+
puts "🔍 Debug: Error extracting method signature: #{e.message}"
|
344
|
+
"def #{method_name}(*args, **kwargs, &block)" # Fallback
|
345
|
+
end
|
346
|
+
|
347
|
+
def self.find_class_file(class_name)
|
348
|
+
# Look for the class file in common Rails locations
|
349
|
+
possible_paths = [
|
350
|
+
"app/models/#{class_name.underscore}.rb",
|
351
|
+
"app/controllers/#{class_name.underscore}.rb",
|
352
|
+
"app/services/#{class_name.underscore}.rb",
|
353
|
+
"lib/#{class_name.underscore}.rb"
|
354
|
+
]
|
355
|
+
|
356
|
+
possible_paths.find { |path| File.exist?(path) }
|
357
|
+
end
|
358
|
+
|
359
|
+
def self.generate_ai_fix(prompt, method_name)
|
360
|
+
puts "🔍 Debug: generate_ai_fix called with method_name=#{method_name}"
|
361
|
+
|
362
|
+
begin
|
363
|
+
unless ENV['OPENAI_API_KEY']
|
364
|
+
puts "❌ OpenAI API key not found. Please set OPENAI_API_KEY environment variable."
|
365
|
+
return generate_fallback_fix(method_name)
|
366
|
+
end
|
367
|
+
|
368
|
+
puts "🤖 Calling OpenAI API for intelligent fix generation..."
|
369
|
+
client = OpenAI::Client.new(api_key: ENV['OPENAI_API_KEY'])
|
370
|
+
|
371
|
+
response = client.chat.completions.create(
|
372
|
+
messages: [
|
373
|
+
{
|
374
|
+
role: "system",
|
375
|
+
content: "You are an expert Ruby developer and code evolution specialist. Generate intelligent, production-ready code fixes that are context-aware, secure, and follow Ruby best practices. Always return complete, syntactically correct Ruby method implementations."
|
376
|
+
},
|
377
|
+
{
|
378
|
+
role: "user",
|
379
|
+
content: prompt
|
380
|
+
}
|
381
|
+
],
|
382
|
+
model: :"gpt-4",
|
383
|
+
temperature: 0.3,
|
384
|
+
max_tokens: 1000
|
385
|
+
)
|
386
|
+
|
387
|
+
puts "🔍 Debug: OpenAI response received"
|
388
|
+
|
389
|
+
ai_response = response.choices.first.message.content
|
390
|
+
puts "🤖 AI generated code: #{ai_response}"
|
391
|
+
|
392
|
+
# Parse the AI response
|
393
|
+
parsed_fix = parse_ai_response(ai_response, method_name)
|
394
|
+
|
395
|
+
if parsed_fix
|
396
|
+
puts "✅ Successfully parsed AI-generated method: #{method_name}"
|
397
|
+
puts "🔍 Debug: parsed_fix = #{parsed_fix.inspect}"
|
398
|
+
parsed_fix
|
399
|
+
else
|
400
|
+
puts "❌ Failed to parse AI response, using fallback"
|
401
|
+
generate_fallback_fix(method_name)
|
402
|
+
end
|
403
|
+
|
404
|
+
rescue => e
|
405
|
+
puts "❌ OpenAI API error: #{e.message}"
|
406
|
+
puts "🔧 Falling back to template-based fix"
|
407
|
+
generate_fallback_fix(method_name)
|
408
|
+
end
|
409
|
+
end
|
410
|
+
|
411
|
+
def self.parse_ai_response(ai_response, method_name)
|
412
|
+
# Parse AI response to extract the method implementation
|
413
|
+
# Escape special regex characters in method name
|
414
|
+
escaped_method_name = Regexp.escape(method_name)
|
415
|
+
|
416
|
+
# Look for the complete method from def to the final end
|
417
|
+
method_pattern = /def\s+#{escaped_method_name}\s*\([^)]*\)(.*?)\nend/m
|
418
|
+
|
419
|
+
if ai_response.match(method_pattern)
|
420
|
+
match = ai_response.match(method_pattern)
|
421
|
+
method_code = match[0] # Use the complete matched method
|
422
|
+
|
423
|
+
puts "🔍 Debug: parse_ai_response - extracted complete method: #{method_code.inspect}"
|
424
|
+
|
425
|
+
# Ensure proper indentation
|
426
|
+
method_code = method_code.gsub(/^/, ' ') # Add 2 spaces indentation
|
427
|
+
|
428
|
+
puts "🔍 Debug: parse_ai_response - final method code: #{method_code.inspect}"
|
429
|
+
|
430
|
+
{
|
431
|
+
method_name: method_name,
|
432
|
+
new_code: method_code,
|
433
|
+
source: 'ai_generated'
|
434
|
+
}
|
435
|
+
else
|
436
|
+
puts "❌ Could not parse AI response, using fallback"
|
437
|
+
puts "🔍 Debug: method_pattern = #{method_pattern.inspect}"
|
438
|
+
puts "🔍 Debug: ai_response = #{ai_response[0..200]}..."
|
439
|
+
generate_fallback_fix(method_name)
|
440
|
+
end
|
441
|
+
end
|
442
|
+
|
443
|
+
def self.generate_fallback_fix(method_name)
|
444
|
+
puts "🔧 Using fallback fix"
|
445
|
+
{
|
446
|
+
method_name: method_name,
|
447
|
+
new_code: <<~CODE,
|
448
|
+
def #{method_name}(a, b)
|
449
|
+
# Input validation
|
450
|
+
return business_rule_default_value if a.nil? || b.nil?
|
451
|
+
|
452
|
+
# Business logic with error handling
|
453
|
+
begin
|
454
|
+
if b.zero?
|
455
|
+
Rails.logger.warn("Division by zero attempted: \#{a} / \#{b}")
|
456
|
+
return business_rule_default_value
|
457
|
+
end
|
458
|
+
|
459
|
+
result = a / b
|
460
|
+
Rails.logger.info("Operation successful: \#{a} / \#{b} = \#{result}")
|
461
|
+
result
|
462
|
+
rescue => e
|
463
|
+
Rails.logger.error("Unexpected error in operation: \#{e.message}")
|
464
|
+
# Apply business rules from context for appropriate return value
|
465
|
+
return business_rule_default_value
|
466
|
+
end
|
467
|
+
end
|
468
|
+
CODE
|
469
|
+
source: 'fallback_template'
|
470
|
+
}
|
471
|
+
end
|
472
|
+
end
|
473
|
+
|
474
|
+
# Tool for analyzing context and validating fixes
|
475
|
+
class ContextAnalysisTool < MCP::Tool
|
476
|
+
description "Validates fixes with business context and provides recommendations"
|
477
|
+
input_schema(
|
478
|
+
properties: {
|
479
|
+
fix: { type: "object" },
|
480
|
+
context: { type: "object" },
|
481
|
+
server_context: { type: "object" }
|
482
|
+
},
|
483
|
+
required: ["fix", "context"]
|
484
|
+
)
|
485
|
+
annotations(
|
486
|
+
title: "Context Analysis Tool",
|
487
|
+
read_only_hint: true,
|
488
|
+
destructive_hint: false,
|
489
|
+
idempotent_hint: true,
|
490
|
+
open_world_hint: false
|
491
|
+
)
|
492
|
+
|
493
|
+
def self.call(fix:, context:, server_context:)
|
494
|
+
validation = {
|
495
|
+
syntax_valid: validate_syntax(fix),
|
496
|
+
business_logic_valid: validate_business_logic(fix, context),
|
497
|
+
performance_acceptable: validate_performance(fix, context),
|
498
|
+
security_safe: validate_security(fix, context),
|
499
|
+
test_coverage: suggest_test_coverage(fix, context),
|
500
|
+
documentation_needed: suggest_documentation(fix, context),
|
501
|
+
approved: true, # Will be set based on validation results
|
502
|
+
confidence_score: calculate_confidence_score(fix, context),
|
503
|
+
recommendations: generate_recommendations(fix, context)
|
504
|
+
}
|
505
|
+
|
506
|
+
# Set approval based on validation results - be more lenient for AI-generated fixes
|
507
|
+
validation[:approved] = validation[:syntax_valid] &&
|
508
|
+
validation[:business_logic_valid]
|
509
|
+
|
510
|
+
MCP::Tool::Response.new([{ type: "text", text: validation.to_json }])
|
511
|
+
end
|
512
|
+
|
513
|
+
private
|
514
|
+
|
515
|
+
def self.validate_syntax(fix)
|
516
|
+
# Basic syntax validation
|
517
|
+
begin
|
518
|
+
# Parse fix if it's a string (JSON response)
|
519
|
+
fix_data = fix.is_a?(String) ? JSON.parse(fix) : fix
|
520
|
+
|
521
|
+
# Add missing 'end' if the method is incomplete
|
522
|
+
code = fix_data['new_code'] || fix_data[:new_code]
|
523
|
+
puts "🔍 Debug: validate_syntax - original code: #{code.inspect}"
|
524
|
+
|
525
|
+
return false if code.nil?
|
526
|
+
|
527
|
+
if code.count('def') > code.count('end')
|
528
|
+
code += "\nend"
|
529
|
+
puts "🔍 Debug: validate_syntax - added missing end"
|
530
|
+
end
|
531
|
+
|
532
|
+
puts "🔍 Debug: validate_syntax - final code: #{code.inspect}"
|
533
|
+
RubyVM::InstructionSequence.compile(code)
|
534
|
+
puts "🔍 Debug: validate_syntax - syntax validation passed"
|
535
|
+
true
|
536
|
+
rescue SyntaxError => e
|
537
|
+
puts "🔍 Debug: validate_syntax - syntax validation failed: #{e.message}"
|
538
|
+
false
|
539
|
+
rescue => e
|
540
|
+
puts "🔍 Debug: validate_syntax - validation error: #{e.message}"
|
541
|
+
false
|
542
|
+
end
|
543
|
+
end
|
544
|
+
|
545
|
+
def self.validate_business_logic(fix, context)
|
546
|
+
# Validate business logic appropriateness
|
547
|
+
true
|
548
|
+
end
|
549
|
+
|
550
|
+
def self.validate_performance(fix, context)
|
551
|
+
# Validate performance characteristics
|
552
|
+
true
|
553
|
+
end
|
554
|
+
|
555
|
+
def self.validate_security(fix, context)
|
556
|
+
# Validate security aspects
|
557
|
+
true
|
558
|
+
end
|
559
|
+
|
560
|
+
def self.suggest_test_coverage(fix, context)
|
561
|
+
[
|
562
|
+
"test_normal_operation",
|
563
|
+
"test_error_conditions",
|
564
|
+
"test_edge_cases",
|
565
|
+
"test_input_validation"
|
566
|
+
]
|
567
|
+
end
|
568
|
+
|
569
|
+
def self.suggest_documentation(fix, context)
|
570
|
+
[
|
571
|
+
"add_method_documentation",
|
572
|
+
"document_error_handling",
|
573
|
+
"add_usage_examples"
|
574
|
+
]
|
575
|
+
end
|
576
|
+
|
577
|
+
def self.calculate_confidence_score(fix, context)
|
578
|
+
rand(0.7..1.0)
|
579
|
+
end
|
580
|
+
|
581
|
+
def self.generate_recommendations(fix, context)
|
582
|
+
[
|
583
|
+
"monitor_performance_after_deployment",
|
584
|
+
"add_comprehensive_tests",
|
585
|
+
"review_logging_levels",
|
586
|
+
"consider_error_metrics"
|
587
|
+
]
|
588
|
+
end
|
589
|
+
end
|
590
|
+
|
591
|
+
# Tool for analyzing Git history and commit changes
|
592
|
+
class GitHistoryAnalysisTool < MCP::Tool
|
593
|
+
description "Analyzes Git history to understand changes, commits, and their impact"
|
594
|
+
input_schema(
|
595
|
+
properties: {
|
596
|
+
file_path: { type: "string" },
|
597
|
+
class_name: { type: "string" },
|
598
|
+
method_name: { type: "string" },
|
599
|
+
search_query: { type: "string" },
|
600
|
+
server_context: { type: "object" }
|
601
|
+
},
|
602
|
+
required: ["file_path"]
|
603
|
+
)
|
604
|
+
annotations(
|
605
|
+
title: "Git History Analysis Tool",
|
606
|
+
read_only_hint: true,
|
607
|
+
destructive_hint: false,
|
608
|
+
idempotent_hint: true,
|
609
|
+
open_world_hint: false
|
610
|
+
)
|
611
|
+
|
612
|
+
def self.call(file_path:, class_name: nil, method_name: nil, search_query: nil, server_context:)
|
613
|
+
analysis = {
|
614
|
+
file_history: analyze_file_history(file_path),
|
615
|
+
recent_changes: get_recent_changes(file_path),
|
616
|
+
commit_impact: analyze_commit_impact(file_path, class_name, method_name),
|
617
|
+
related_commits: find_related_commits(file_path, search_query),
|
618
|
+
change_patterns: identify_change_patterns(file_path),
|
619
|
+
author_analysis: analyze_author_patterns(file_path),
|
620
|
+
risk_assessment: assess_change_risks(file_path),
|
621
|
+
recommendations: generate_git_recommendations(file_path, class_name, method_name)
|
622
|
+
}
|
623
|
+
|
624
|
+
MCP::Tool::Response.new([{ type: "text", text: analysis.to_json }])
|
625
|
+
end
|
626
|
+
|
627
|
+
private
|
628
|
+
|
629
|
+
def self.analyze_file_history(file_path)
|
630
|
+
return { error: "File not found" } unless File.exist?(file_path)
|
631
|
+
|
632
|
+
begin
|
633
|
+
# Get git log for the file
|
634
|
+
git_log = `git log --oneline --follow "#{file_path}" 2>/dev/null`
|
635
|
+
commits = git_log.strip.split("\n").map { |line| line.split(' ', 2) }
|
636
|
+
|
637
|
+
{
|
638
|
+
total_commits: commits.length,
|
639
|
+
first_commit: commits.last&.first,
|
640
|
+
last_commit: commits.first&.first,
|
641
|
+
commit_summary: commits.first(10).map { |hash, msg| { hash: hash, message: msg } }
|
642
|
+
}
|
643
|
+
rescue => e
|
644
|
+
{ error: "Git analysis failed: #{e.message}" }
|
645
|
+
end
|
646
|
+
end
|
647
|
+
|
648
|
+
def self.get_recent_changes(file_path)
|
649
|
+
return { error: "File not found" } unless File.exist?(file_path)
|
650
|
+
|
651
|
+
begin
|
652
|
+
# Get recent changes (last 5 commits)
|
653
|
+
git_log = `git log -p -5 "#{file_path}" 2>/dev/null`
|
654
|
+
|
655
|
+
# Parse the git log to extract meaningful changes
|
656
|
+
changes = parse_git_changes(git_log)
|
657
|
+
|
658
|
+
{
|
659
|
+
recent_commits: changes[:commits],
|
660
|
+
change_summary: changes[:summary],
|
661
|
+
lines_added: changes[:lines_added],
|
662
|
+
lines_removed: changes[:lines_removed]
|
663
|
+
}
|
664
|
+
rescue => e
|
665
|
+
{ error: "Recent changes analysis failed: #{e.message}" }
|
666
|
+
end
|
667
|
+
end
|
668
|
+
|
669
|
+
def self.analyze_commit_impact(file_path, class_name, method_name)
|
670
|
+
return { error: "File not found" } unless File.exist?(file_path)
|
671
|
+
|
672
|
+
begin
|
673
|
+
# Analyze impact of recent commits on specific class/method
|
674
|
+
if class_name && method_name
|
675
|
+
git_blame = `git blame -L "/#{method_name}/" "#{file_path}" 2>/dev/null`
|
676
|
+
blame_analysis = parse_git_blame(git_blame)
|
677
|
+
|
678
|
+
{
|
679
|
+
method_changes: blame_analysis[:method_changes],
|
680
|
+
last_modified: blame_analysis[:last_modified],
|
681
|
+
change_frequency: blame_analysis[:change_frequency],
|
682
|
+
impact_level: assess_method_impact(blame_analysis)
|
683
|
+
}
|
684
|
+
else
|
685
|
+
{ message: "Class and method names required for detailed impact analysis" }
|
686
|
+
end
|
687
|
+
rescue => e
|
688
|
+
{ error: "Commit impact analysis failed: #{e.message}" }
|
689
|
+
end
|
690
|
+
end
|
691
|
+
|
692
|
+
def self.find_related_commits(file_path, search_query)
|
693
|
+
return { error: "File not found" } unless File.exist?(file_path)
|
694
|
+
|
695
|
+
begin
|
696
|
+
if search_query
|
697
|
+
# Search for commits containing the query
|
698
|
+
git_log = `git log --grep="#{search_query}" --oneline "#{file_path}" 2>/dev/null`
|
699
|
+
commits = git_log.strip.split("\n").map { |line| line.split(' ', 2) }
|
700
|
+
|
701
|
+
{
|
702
|
+
search_query: search_query,
|
703
|
+
matching_commits: commits.map { |hash, msg| { hash: hash, message: msg } },
|
704
|
+
total_matches: commits.length
|
705
|
+
}
|
706
|
+
else
|
707
|
+
{ message: "Search query required for related commits analysis" }
|
708
|
+
end
|
709
|
+
rescue => e
|
710
|
+
{ error: "Related commits search failed: #{e.message}" }
|
711
|
+
end
|
712
|
+
end
|
713
|
+
|
714
|
+
def self.identify_change_patterns(file_path)
|
715
|
+
return { error: "File not found" } unless File.exist?(file_path)
|
716
|
+
|
717
|
+
begin
|
718
|
+
# Analyze patterns in file changes
|
719
|
+
git_log = `git log --stat "#{file_path}" 2>/dev/null`
|
720
|
+
patterns = analyze_change_statistics(git_log)
|
721
|
+
|
722
|
+
{
|
723
|
+
change_frequency: patterns[:frequency],
|
724
|
+
typical_change_size: patterns[:typical_size],
|
725
|
+
change_types: patterns[:types],
|
726
|
+
seasonal_patterns: patterns[:seasonal]
|
727
|
+
}
|
728
|
+
rescue => e
|
729
|
+
{ error: "Change pattern analysis failed: #{e.message}" }
|
730
|
+
end
|
731
|
+
end
|
732
|
+
|
733
|
+
def self.analyze_author_patterns(file_path)
|
734
|
+
return { error: "File not found" } unless File.exist?(file_path)
|
735
|
+
|
736
|
+
begin
|
737
|
+
# Analyze who has been making changes to the file
|
738
|
+
git_log = `git shortlog -sn "#{file_path}" 2>/dev/null`
|
739
|
+
authors = git_log.strip.split("\n").map { |line| line.split("\t") }
|
740
|
+
|
741
|
+
{
|
742
|
+
contributors: authors.map { |count, author| { author: author, commits: count.to_i } },
|
743
|
+
primary_owner: authors.first&.last,
|
744
|
+
ownership_distribution: calculate_ownership_distribution(authors)
|
745
|
+
}
|
746
|
+
rescue => e
|
747
|
+
{ error: "Author pattern analysis failed: #{e.message}" }
|
748
|
+
end
|
749
|
+
end
|
750
|
+
|
751
|
+
def self.assess_change_risks(file_path)
|
752
|
+
return { error: "File not found" } unless File.exist?(file_path)
|
753
|
+
|
754
|
+
begin
|
755
|
+
# Assess risks based on change patterns
|
756
|
+
git_log = `git log --oneline "#{file_path}" 2>/dev/null`
|
757
|
+
recent_commits = git_log.strip.split("\n").first(5)
|
758
|
+
|
759
|
+
risk_factors = []
|
760
|
+
risk_factors << "frequent_changes" if recent_commits.length > 3
|
761
|
+
risk_factors << "multiple_authors" if `git shortlog -sn "#{file_path}" 2>/dev/null`.strip.split("\n").length > 2
|
762
|
+
risk_factors << "recent_modifications" if recent_commits.any?
|
763
|
+
|
764
|
+
{
|
765
|
+
risk_level: calculate_risk_level(risk_factors),
|
766
|
+
risk_factors: risk_factors,
|
767
|
+
recommendations: generate_risk_recommendations(risk_factors)
|
768
|
+
}
|
769
|
+
rescue => e
|
770
|
+
{ error: "Risk assessment failed: #{e.message}" }
|
771
|
+
end
|
772
|
+
end
|
773
|
+
|
774
|
+
def self.generate_git_recommendations(file_path, class_name, method_name)
|
775
|
+
recommendations = []
|
776
|
+
|
777
|
+
# Generate recommendations based on analysis
|
778
|
+
recommendations << "review_recent_changes" if File.exist?(file_path)
|
779
|
+
recommendations << "assess_test_coverage" if class_name && method_name
|
780
|
+
recommendations << "document_changes" if File.exist?(file_path)
|
781
|
+
recommendations << "peer_review_required" if `git shortlog -sn "#{file_path}" 2>/dev/null`.strip.split("\n").length > 1
|
782
|
+
|
783
|
+
recommendations
|
784
|
+
end
|
785
|
+
|
786
|
+
# Helper methods for parsing git output
|
787
|
+
def self.parse_git_changes(git_log)
|
788
|
+
# Parse git log output to extract meaningful information
|
789
|
+
commits = []
|
790
|
+
lines_added = 0
|
791
|
+
lines_removed = 0
|
792
|
+
|
793
|
+
git_log.scan(/commit (\w+)\nAuthor: (.+)\nDate: (.+)\n\n(.+?)(?=commit|\z)/m).each do |match|
|
794
|
+
commits << {
|
795
|
+
hash: match[0],
|
796
|
+
author: match[1],
|
797
|
+
date: match[2],
|
798
|
+
message: match[3].strip
|
799
|
+
}
|
800
|
+
end
|
801
|
+
|
802
|
+
{
|
803
|
+
commits: commits,
|
804
|
+
summary: "Recent changes analyzed",
|
805
|
+
lines_added: lines_added,
|
806
|
+
lines_removed: lines_removed
|
807
|
+
}
|
808
|
+
end
|
809
|
+
|
810
|
+
def self.parse_git_blame(git_blame)
|
811
|
+
# Parse git blame output
|
812
|
+
method_changes = []
|
813
|
+
last_modified = nil
|
814
|
+
|
815
|
+
git_blame.scan(/^(\w+)\s+\((.+?)\s+\d{4}-\d{2}-\d{2}/).each do |match|
|
816
|
+
hash = match[0]
|
817
|
+
author = match[1]
|
818
|
+
method_changes << { hash: hash, author: author }
|
819
|
+
last_modified = hash unless last_modified
|
820
|
+
end
|
821
|
+
|
822
|
+
{
|
823
|
+
method_changes: method_changes,
|
824
|
+
last_modified: last_modified,
|
825
|
+
change_frequency: method_changes.length
|
826
|
+
}
|
827
|
+
end
|
828
|
+
|
829
|
+
def self.analyze_change_statistics(git_log)
|
830
|
+
# Analyze git log statistics
|
831
|
+
{
|
832
|
+
frequency: "moderate",
|
833
|
+
typical_size: "small",
|
834
|
+
types: ["bug_fixes", "refactoring"],
|
835
|
+
seasonal: "no_pattern"
|
836
|
+
}
|
837
|
+
end
|
838
|
+
|
839
|
+
def self.calculate_ownership_distribution(authors)
|
840
|
+
return "single_owner" if authors.length == 1
|
841
|
+
return "shared_ownership" if authors.length <= 3
|
842
|
+
"distributed_ownership"
|
843
|
+
end
|
844
|
+
|
845
|
+
def self.calculate_risk_level(risk_factors)
|
846
|
+
return "high" if risk_factors.length >= 3
|
847
|
+
return "medium" if risk_factors.length >= 2
|
848
|
+
"low"
|
849
|
+
end
|
850
|
+
|
851
|
+
def self.generate_risk_recommendations(risk_factors)
|
852
|
+
recommendations = []
|
853
|
+
recommendations << "increase_test_coverage" if risk_factors.include?("frequent_changes")
|
854
|
+
recommendations << "implement_code_review" if risk_factors.include?("multiple_authors")
|
855
|
+
recommendations << "monitor_performance" if risk_factors.include?("recent_modifications")
|
856
|
+
recommendations
|
857
|
+
end
|
858
|
+
|
859
|
+
def self.assess_method_impact(blame_analysis)
|
860
|
+
return "high" if blame_analysis[:change_frequency] > 5
|
861
|
+
return "medium" if blame_analysis[:change_frequency] > 2
|
862
|
+
"low"
|
863
|
+
end
|
864
|
+
end
|
865
|
+
|
866
|
+
# Tool for referencing coding standards and best practices
|
867
|
+
class StandardsReferenceTool < MCP::Tool
|
868
|
+
description "Provides access to coding standards, best practices, and architectural guidelines"
|
869
|
+
input_schema(
|
870
|
+
properties: {
|
871
|
+
standard_type: { type: "string" },
|
872
|
+
domain: { type: "string" },
|
873
|
+
class_name: { type: "string" },
|
874
|
+
method_name: { type: "string" },
|
875
|
+
search_query: { type: "string" },
|
876
|
+
server_context: { type: "object" }
|
877
|
+
},
|
878
|
+
required: ["standard_type"]
|
879
|
+
)
|
880
|
+
annotations(
|
881
|
+
title: "Standards Reference Tool",
|
882
|
+
read_only_hint: true,
|
883
|
+
destructive_hint: false,
|
884
|
+
idempotent_hint: true,
|
885
|
+
open_world_hint: false
|
886
|
+
)
|
887
|
+
|
888
|
+
def self.call(standard_type:, domain: nil, class_name: nil, method_name: nil, search_query: nil, server_context:)
|
889
|
+
standards = {
|
890
|
+
coding_standards: get_coding_standards(standard_type, domain),
|
891
|
+
best_practices: get_best_practices(standard_type, domain, class_name),
|
892
|
+
architectural_guidelines: get_architectural_guidelines(domain),
|
893
|
+
domain_specific_rules: get_domain_specific_rules(domain, class_name),
|
894
|
+
compliance_requirements: get_compliance_requirements(domain),
|
895
|
+
performance_standards: get_performance_standards(domain),
|
896
|
+
security_standards: get_security_standards(domain),
|
897
|
+
testing_standards: get_testing_standards(standard_type),
|
898
|
+
documentation_standards: get_documentation_standards(standard_type),
|
899
|
+
recommendations: generate_standards_recommendations(standard_type, domain, class_name, method_name)
|
900
|
+
}
|
901
|
+
|
902
|
+
MCP::Tool::Response.new([{ type: "text", text: standards.to_json }])
|
903
|
+
end
|
904
|
+
|
905
|
+
private
|
906
|
+
|
907
|
+
def self.get_coding_standards(standard_type, domain)
|
908
|
+
case standard_type
|
909
|
+
when 'error_handling'
|
910
|
+
{
|
911
|
+
general: 'comprehensive_error_handling',
|
912
|
+
strategy: 'defensive_programming',
|
913
|
+
logging: 'structured_logging',
|
914
|
+
user_experience: 'graceful_degradation',
|
915
|
+
recovery: 'automatic_recovery_when_possible'
|
916
|
+
}
|
917
|
+
when 'validation'
|
918
|
+
{
|
919
|
+
input_validation: 'strict_validation',
|
920
|
+
data_sanitization: 'required',
|
921
|
+
type_checking: 'enforced',
|
922
|
+
boundary_checks: 'mandatory'
|
923
|
+
}
|
924
|
+
when 'performance'
|
925
|
+
{
|
926
|
+
response_time: 'under_500ms',
|
927
|
+
memory_usage: 'optimized',
|
928
|
+
database_queries: 'minimal',
|
929
|
+
caching: 'strategic'
|
930
|
+
}
|
931
|
+
when 'security'
|
932
|
+
{
|
933
|
+
authentication: 'required',
|
934
|
+
authorization: 'role_based',
|
935
|
+
data_encryption: 'sensitive_data',
|
936
|
+
input_sanitization: 'mandatory'
|
937
|
+
}
|
938
|
+
else
|
939
|
+
{ message: "Standard type '#{standard_type}' not found" }
|
940
|
+
end
|
941
|
+
end
|
942
|
+
|
943
|
+
def self.get_best_practices(standard_type, domain, class_name)
|
944
|
+
practices = []
|
945
|
+
|
946
|
+
case standard_type
|
947
|
+
when 'error_handling'
|
948
|
+
practices << 'use_specific_exception_types'
|
949
|
+
practices << 'provide_meaningful_error_messages'
|
950
|
+
practices << 'implement_graceful_fallbacks'
|
951
|
+
practices << 'log_errors_with_context'
|
952
|
+
when 'validation'
|
953
|
+
practices << 'validate_at_boundaries'
|
954
|
+
practices << 'use_strong_typing'
|
955
|
+
practices << 'implement_business_rule_validation'
|
956
|
+
practices << 'provide_clear_validation_errors'
|
957
|
+
when 'performance'
|
958
|
+
practices << 'profile_before_optimizing'
|
959
|
+
practices << 'use_appropriate_data_structures'
|
960
|
+
practices << 'implement_caching_strategies'
|
961
|
+
practices << 'minimize_database_round_trips'
|
962
|
+
when 'security'
|
963
|
+
practices << 'follow_owasp_guidelines'
|
964
|
+
practices << 'implement_least_privilege'
|
965
|
+
practices << 'validate_all_inputs'
|
966
|
+
practices << 'encrypt_sensitive_data'
|
967
|
+
end
|
968
|
+
|
969
|
+
practices
|
970
|
+
end
|
971
|
+
|
972
|
+
def self.get_architectural_guidelines(domain)
|
973
|
+
case domain
|
974
|
+
when 'user_management'
|
975
|
+
{
|
976
|
+
pattern: 'repository_pattern',
|
977
|
+
separation: 'business_logic_from_presentation',
|
978
|
+
data_access: 'through_models',
|
979
|
+
security: 'layered_security'
|
980
|
+
}
|
981
|
+
when 'inventory_management'
|
982
|
+
{
|
983
|
+
pattern: 'domain_driven_design',
|
984
|
+
consistency: 'eventual_consistency',
|
985
|
+
caching: 'distributed_caching',
|
986
|
+
monitoring: 'real_time_monitoring'
|
987
|
+
}
|
988
|
+
when 'order_management'
|
989
|
+
{
|
990
|
+
pattern: 'saga_pattern',
|
991
|
+
transactions: 'distributed_transactions',
|
992
|
+
reliability: 'fault_tolerance',
|
993
|
+
monitoring: 'business_metrics'
|
994
|
+
}
|
995
|
+
when 'payment_processing'
|
996
|
+
{
|
997
|
+
pattern: 'facade_pattern',
|
998
|
+
security: 'end_to_end_encryption',
|
999
|
+
compliance: 'pci_dss_compliance',
|
1000
|
+
monitoring: 'fraud_detection'
|
1001
|
+
}
|
1002
|
+
else
|
1003
|
+
{ message: "Domain '#{domain}' not found" }
|
1004
|
+
end
|
1005
|
+
end
|
1006
|
+
|
1007
|
+
def self.get_domain_specific_rules(domain, class_name)
|
1008
|
+
return {} unless domain
|
1009
|
+
|
1010
|
+
case domain
|
1011
|
+
when 'user_management'
|
1012
|
+
{
|
1013
|
+
data_privacy: 'gdpr_compliant',
|
1014
|
+
authentication: 'multi_factor_required',
|
1015
|
+
session_management: 'secure_session_handling',
|
1016
|
+
audit_trail: 'comprehensive_logging'
|
1017
|
+
}
|
1018
|
+
when 'inventory_management'
|
1019
|
+
{
|
1020
|
+
data_consistency: 'eventual_consistency',
|
1021
|
+
stock_validation: 'real_time_validation',
|
1022
|
+
availability: 'high_availability',
|
1023
|
+
backup_strategy: 'continuous_backup'
|
1024
|
+
}
|
1025
|
+
when 'order_management'
|
1026
|
+
{
|
1027
|
+
data_integrity: 'acid_compliance',
|
1028
|
+
order_status: 'immutable_status_transitions',
|
1029
|
+
payment_validation: 'pre_authorization_required',
|
1030
|
+
fulfillment: 'automated_fulfillment'
|
1031
|
+
}
|
1032
|
+
when 'payment_processing'
|
1033
|
+
{
|
1034
|
+
security: 'pci_dss_compliance',
|
1035
|
+
encryption: 'end_to_end_encryption',
|
1036
|
+
fraud_detection: 'real_time_monitoring',
|
1037
|
+
compliance: 'regulatory_compliance'
|
1038
|
+
}
|
1039
|
+
else
|
1040
|
+
{}
|
1041
|
+
end
|
1042
|
+
end
|
1043
|
+
|
1044
|
+
def self.get_compliance_requirements(domain)
|
1045
|
+
case domain
|
1046
|
+
when 'user_management'
|
1047
|
+
['GDPR', 'CCPA', 'SOX']
|
1048
|
+
when 'payment_processing'
|
1049
|
+
['PCI-DSS', 'SOX', 'GLBA']
|
1050
|
+
when 'inventory_management'
|
1051
|
+
['SOX', 'ISO_27001']
|
1052
|
+
when 'order_management'
|
1053
|
+
['SOX', 'PCI-DSS']
|
1054
|
+
else
|
1055
|
+
['SOX']
|
1056
|
+
end
|
1057
|
+
end
|
1058
|
+
|
1059
|
+
def self.get_performance_standards(domain)
|
1060
|
+
case domain
|
1061
|
+
when 'user_management'
|
1062
|
+
{ response_time: 'under_200ms', sla: '99.9%' }
|
1063
|
+
when 'inventory_management'
|
1064
|
+
{ response_time: 'under_100ms', sla: '99.95%' }
|
1065
|
+
when 'order_management'
|
1066
|
+
{ response_time: 'under_500ms', sla: '99.99%' }
|
1067
|
+
when 'payment_processing'
|
1068
|
+
{ response_time: 'under_1000ms', sla: '99.99%' }
|
1069
|
+
else
|
1070
|
+
{ response_time: 'under_500ms', sla: '99.5%' }
|
1071
|
+
end
|
1072
|
+
end
|
1073
|
+
|
1074
|
+
def self.get_security_standards(domain)
|
1075
|
+
case domain
|
1076
|
+
when 'user_management'
|
1077
|
+
{ authentication: 'required', encryption: 'sensitive_data_only' }
|
1078
|
+
when 'payment_processing'
|
1079
|
+
{ authentication: 'required', encryption: 'all_data' }
|
1080
|
+
when 'inventory_management'
|
1081
|
+
{ authentication: 'required', encryption: 'standard' }
|
1082
|
+
else
|
1083
|
+
{ authentication: 'required', encryption: 'standard' }
|
1084
|
+
end
|
1085
|
+
end
|
1086
|
+
|
1087
|
+
def self.get_testing_standards(standard_type)
|
1088
|
+
case standard_type
|
1089
|
+
when 'error_handling'
|
1090
|
+
['test_error_conditions', 'test_edge_cases', 'test_recovery_scenarios']
|
1091
|
+
when 'validation'
|
1092
|
+
['test_invalid_inputs', 'test_boundary_conditions', 'test_business_rules']
|
1093
|
+
when 'performance'
|
1094
|
+
['test_under_load', 'test_memory_usage', 'test_response_times']
|
1095
|
+
when 'security'
|
1096
|
+
['test_authentication', 'test_authorization', 'test_input_validation']
|
1097
|
+
else
|
1098
|
+
['test_normal_operation', 'test_error_conditions', 'test_edge_cases']
|
1099
|
+
end
|
1100
|
+
end
|
1101
|
+
|
1102
|
+
def self.get_documentation_standards(standard_type)
|
1103
|
+
case standard_type
|
1104
|
+
when 'error_handling'
|
1105
|
+
['document_error_scenarios', 'document_recovery_procedures', 'document_logging_format']
|
1106
|
+
when 'validation'
|
1107
|
+
['document_validation_rules', 'document_error_messages', 'document_business_constraints']
|
1108
|
+
when 'performance'
|
1109
|
+
['document_performance_requirements', 'document_monitoring_metrics', 'document_optimization_strategies']
|
1110
|
+
when 'security'
|
1111
|
+
['document_security_requirements', 'document_compliance_measures', 'document_incident_response']
|
1112
|
+
else
|
1113
|
+
['document_usage', 'document_parameters', 'document_return_values']
|
1114
|
+
end
|
1115
|
+
end
|
1116
|
+
|
1117
|
+
def self.generate_standards_recommendations(standard_type, domain, class_name, method_name)
|
1118
|
+
recommendations = []
|
1119
|
+
|
1120
|
+
recommendations << "follow_#{standard_type}_standards"
|
1121
|
+
recommendations << "implement_domain_specific_rules" if domain
|
1122
|
+
recommendations << "add_comprehensive_tests" if class_name && method_name
|
1123
|
+
recommendations << "document_implementation" if class_name && method_name
|
1124
|
+
recommendations << "review_with_team" if standard_type == 'security'
|
1125
|
+
|
1126
|
+
recommendations
|
1127
|
+
end
|
1128
|
+
end
|
1129
|
+
|
1130
|
+
# Tool for searching and accessing supporting documentation
|
1131
|
+
class DocumentationSearchTool < MCP::Tool
|
1132
|
+
description "Searches and provides access to supporting documentation, README files, and feature documentation"
|
1133
|
+
input_schema(
|
1134
|
+
properties: {
|
1135
|
+
search_query: { type: "string" },
|
1136
|
+
document_type: { type: "string" },
|
1137
|
+
feature_name: { type: "string" },
|
1138
|
+
class_name: { type: "string" },
|
1139
|
+
method_name: { type: "string" },
|
1140
|
+
server_context: { type: "object" }
|
1141
|
+
},
|
1142
|
+
required: ["search_query"]
|
1143
|
+
)
|
1144
|
+
annotations(
|
1145
|
+
title: "Documentation Search Tool",
|
1146
|
+
read_only_hint: true,
|
1147
|
+
destructive_hint: false,
|
1148
|
+
idempotent_hint: true,
|
1149
|
+
open_world_hint: false
|
1150
|
+
)
|
1151
|
+
|
1152
|
+
def self.call(search_query:, document_type: nil, feature_name: nil, class_name: nil, method_name: nil, server_context:)
|
1153
|
+
search_results = {
|
1154
|
+
documentation_files: search_documentation_files(search_query),
|
1155
|
+
readme_files: search_readme_files(search_query),
|
1156
|
+
feature_docs: search_feature_documentation(search_query, feature_name),
|
1157
|
+
api_documentation: search_api_documentation(search_query),
|
1158
|
+
code_comments: search_code_comments(search_query, class_name, method_name),
|
1159
|
+
configuration_files: search_configuration_files(search_query),
|
1160
|
+
related_documents: find_related_documents(search_query),
|
1161
|
+
recommendations: generate_documentation_recommendations(search_query, document_type, feature_name)
|
1162
|
+
}
|
1163
|
+
|
1164
|
+
MCP::Tool::Response.new([{ type: "text", text: search_results.to_json }])
|
1165
|
+
end
|
1166
|
+
|
1167
|
+
private
|
1168
|
+
|
1169
|
+
def self.search_documentation_files(search_query)
|
1170
|
+
documentation_files = []
|
1171
|
+
|
1172
|
+
# Search in common documentation directories
|
1173
|
+
doc_dirs = ['doc', 'docs', 'documentation', 'README.md', 'API_README.md']
|
1174
|
+
|
1175
|
+
doc_dirs.each do |dir|
|
1176
|
+
if Dir.exist?(dir)
|
1177
|
+
Dir.glob("#{dir}/**/*").each do |file|
|
1178
|
+
next unless File.file?(file) && File.readable?(file)
|
1179
|
+
|
1180
|
+
content = File.read(file)
|
1181
|
+
if content.downcase.include?(search_query.downcase)
|
1182
|
+
documentation_files << {
|
1183
|
+
file_path: file,
|
1184
|
+
file_type: File.extname(file),
|
1185
|
+
relevance_score: calculate_relevance(content, search_query),
|
1186
|
+
excerpt: extract_relevant_excerpt(content, search_query)
|
1187
|
+
}
|
1188
|
+
end
|
1189
|
+
end
|
1190
|
+
elsif File.exist?(dir)
|
1191
|
+
content = File.read(dir)
|
1192
|
+
if content.downcase.include?(search_query.downcase)
|
1193
|
+
documentation_files << {
|
1194
|
+
file_path: dir,
|
1195
|
+
file_type: File.extname(dir),
|
1196
|
+
relevance_score: calculate_relevance(content, search_query),
|
1197
|
+
excerpt: extract_relevant_excerpt(content, search_query)
|
1198
|
+
}
|
1199
|
+
end
|
1200
|
+
end
|
1201
|
+
end
|
1202
|
+
|
1203
|
+
documentation_files.sort_by { |file| -file[:relevance_score] }
|
1204
|
+
end
|
1205
|
+
|
1206
|
+
def self.search_readme_files(search_query)
|
1207
|
+
readme_files = []
|
1208
|
+
|
1209
|
+
# Search for README files in the project
|
1210
|
+
Dir.glob("**/README*").each do |file|
|
1211
|
+
next unless File.file?(file) && File.readable?(file)
|
1212
|
+
|
1213
|
+
content = File.read(file)
|
1214
|
+
if content.downcase.include?(search_query.downcase)
|
1215
|
+
readme_files << {
|
1216
|
+
file_path: file,
|
1217
|
+
relevance_score: calculate_relevance(content, search_query),
|
1218
|
+
excerpt: extract_relevant_excerpt(content, search_query),
|
1219
|
+
section_matches: find_matching_sections(content, search_query)
|
1220
|
+
}
|
1221
|
+
end
|
1222
|
+
end
|
1223
|
+
|
1224
|
+
readme_files.sort_by { |file| -file[:relevance_score] }
|
1225
|
+
end
|
1226
|
+
|
1227
|
+
def self.search_feature_documentation(search_query, feature_name)
|
1228
|
+
feature_docs = []
|
1229
|
+
|
1230
|
+
# Search for feature-specific documentation
|
1231
|
+
if feature_name
|
1232
|
+
# Look for feature-specific files
|
1233
|
+
feature_files = Dir.glob("**/*#{feature_name}*").select { |f| File.file?(f) && File.readable?(f) }
|
1234
|
+
|
1235
|
+
feature_files.each do |file|
|
1236
|
+
content = File.read(file)
|
1237
|
+
if content.downcase.include?(search_query.downcase)
|
1238
|
+
feature_docs << {
|
1239
|
+
file_path: file,
|
1240
|
+
feature_name: feature_name,
|
1241
|
+
relevance_score: calculate_relevance(content, search_query),
|
1242
|
+
excerpt: extract_relevant_excerpt(content, search_query)
|
1243
|
+
}
|
1244
|
+
end
|
1245
|
+
end
|
1246
|
+
end
|
1247
|
+
|
1248
|
+
feature_docs.sort_by { |doc| -doc[:relevance_score] }
|
1249
|
+
end
|
1250
|
+
|
1251
|
+
def self.search_api_documentation(search_query)
|
1252
|
+
api_docs = []
|
1253
|
+
|
1254
|
+
# Search for API documentation
|
1255
|
+
api_files = Dir.glob("**/API_README*").select { |f| File.file?(f) && File.readable?(f) }
|
1256
|
+
|
1257
|
+
api_files.each do |file|
|
1258
|
+
content = File.read(file)
|
1259
|
+
if content.downcase.include?(search_query.downcase)
|
1260
|
+
api_docs << {
|
1261
|
+
file_path: file,
|
1262
|
+
relevance_score: calculate_relevance(content, search_query),
|
1263
|
+
excerpt: extract_relevant_excerpt(content, search_query),
|
1264
|
+
endpoint_matches: find_api_endpoints(content, search_query)
|
1265
|
+
}
|
1266
|
+
end
|
1267
|
+
end
|
1268
|
+
|
1269
|
+
api_docs
|
1270
|
+
end
|
1271
|
+
|
1272
|
+
def self.search_code_comments(search_query, class_name, method_name)
|
1273
|
+
code_comments = []
|
1274
|
+
|
1275
|
+
# Search for code comments and documentation
|
1276
|
+
if class_name
|
1277
|
+
class_file = find_class_file(class_name)
|
1278
|
+
if class_file && File.exist?(class_file)
|
1279
|
+
content = File.read(class_file)
|
1280
|
+
|
1281
|
+
# Extract comments and documentation
|
1282
|
+
comments = extract_code_comments(content)
|
1283
|
+
|
1284
|
+
comments.each do |comment|
|
1285
|
+
if comment.downcase.include?(search_query.downcase)
|
1286
|
+
code_comments << {
|
1287
|
+
file_path: class_file,
|
1288
|
+
class_name: class_name,
|
1289
|
+
method_name: method_name,
|
1290
|
+
comment: comment,
|
1291
|
+
relevance_score: calculate_relevance(comment, search_query)
|
1292
|
+
}
|
1293
|
+
end
|
1294
|
+
end
|
1295
|
+
end
|
1296
|
+
end
|
1297
|
+
|
1298
|
+
code_comments.sort_by { |comment| -comment[:relevance_score] }
|
1299
|
+
end
|
1300
|
+
|
1301
|
+
def self.search_configuration_files(search_query)
|
1302
|
+
config_files = []
|
1303
|
+
|
1304
|
+
# Search in configuration files
|
1305
|
+
config_dirs = ['config', 'config/initializers']
|
1306
|
+
|
1307
|
+
config_dirs.each do |dir|
|
1308
|
+
if Dir.exist?(dir)
|
1309
|
+
Dir.glob("#{dir}/**/*").each do |file|
|
1310
|
+
next unless File.file?(file) && File.readable?(file)
|
1311
|
+
|
1312
|
+
content = File.read(file)
|
1313
|
+
if content.downcase.include?(search_query.downcase)
|
1314
|
+
config_files << {
|
1315
|
+
file_path: file,
|
1316
|
+
file_type: File.extname(file),
|
1317
|
+
relevance_score: calculate_relevance(content, search_query),
|
1318
|
+
excerpt: extract_relevant_excerpt(content, search_query)
|
1319
|
+
}
|
1320
|
+
end
|
1321
|
+
end
|
1322
|
+
end
|
1323
|
+
end
|
1324
|
+
|
1325
|
+
config_files.sort_by { |file| -file[:relevance_score] }
|
1326
|
+
end
|
1327
|
+
|
1328
|
+
def self.find_related_documents(search_query)
|
1329
|
+
related_docs = []
|
1330
|
+
|
1331
|
+
# Find related documents based on search query
|
1332
|
+
all_files = Dir.glob("**/*").select { |f| File.file?(f) && File.readable?(f) && File.extname(f) =~ /\.(md|txt|yml|yaml|rb)$/ }
|
1333
|
+
|
1334
|
+
all_files.each do |file|
|
1335
|
+
next if file.include?('vendor/') || file.include?('node_modules/') || file.include?('.git/')
|
1336
|
+
|
1337
|
+
content = File.read(file)
|
1338
|
+
if content.downcase.include?(search_query.downcase)
|
1339
|
+
related_docs << {
|
1340
|
+
file_path: file,
|
1341
|
+
file_type: File.extname(file),
|
1342
|
+
relevance_score: calculate_relevance(content, search_query),
|
1343
|
+
excerpt: extract_relevant_excerpt(content, search_query)
|
1344
|
+
}
|
1345
|
+
end
|
1346
|
+
end
|
1347
|
+
|
1348
|
+
related_docs.sort_by { |doc| -doc[:relevance_score] }.first(10)
|
1349
|
+
end
|
1350
|
+
|
1351
|
+
def self.generate_documentation_recommendations(search_query, document_type, feature_name)
|
1352
|
+
recommendations = []
|
1353
|
+
|
1354
|
+
recommendations << "review_related_documentation"
|
1355
|
+
recommendations << "update_documentation_if_outdated" if document_type
|
1356
|
+
recommendations << "add_feature_documentation" if feature_name
|
1357
|
+
recommendations << "improve_search_indexing"
|
1358
|
+
recommendations << "create_documentation_links"
|
1359
|
+
|
1360
|
+
recommendations
|
1361
|
+
end
|
1362
|
+
|
1363
|
+
# Helper methods
|
1364
|
+
def self.calculate_relevance(content, search_query)
|
1365
|
+
query_terms = search_query.downcase.split(/\s+/)
|
1366
|
+
content_lower = content.downcase
|
1367
|
+
|
1368
|
+
relevance = 0
|
1369
|
+
query_terms.each do |term|
|
1370
|
+
if content_lower.include?(term)
|
1371
|
+
relevance += 1
|
1372
|
+
# Bonus for exact matches
|
1373
|
+
relevance += 2 if content_lower.include?(search_query.downcase)
|
1374
|
+
end
|
1375
|
+
end
|
1376
|
+
|
1377
|
+
relevance
|
1378
|
+
end
|
1379
|
+
|
1380
|
+
def self.extract_relevant_excerpt(content, search_query, max_length = 200)
|
1381
|
+
content_lower = content.downcase
|
1382
|
+
query_lower = search_query.downcase
|
1383
|
+
|
1384
|
+
start_pos = content_lower.index(query_lower)
|
1385
|
+
return content[0, max_length] unless start_pos
|
1386
|
+
|
1387
|
+
excerpt_start = [start_pos - 50, 0].max
|
1388
|
+
excerpt_end = [start_pos + search_query.length + 50, content.length].min
|
1389
|
+
|
1390
|
+
excerpt = content[excerpt_start, excerpt_end - excerpt_start]
|
1391
|
+
|
1392
|
+
if excerpt_start > 0
|
1393
|
+
excerpt = "..." + excerpt
|
1394
|
+
end
|
1395
|
+
|
1396
|
+
if excerpt_end < content.length
|
1397
|
+
excerpt = excerpt + "..."
|
1398
|
+
end
|
1399
|
+
|
1400
|
+
excerpt
|
1401
|
+
end
|
1402
|
+
|
1403
|
+
def self.find_matching_sections(content, search_query)
|
1404
|
+
sections = []
|
1405
|
+
lines = content.split("\n")
|
1406
|
+
|
1407
|
+
lines.each_with_index do |line, index|
|
1408
|
+
if line.downcase.include?(search_query.downcase)
|
1409
|
+
# Find section header (lines starting with #)
|
1410
|
+
section_header = find_section_header(lines, index)
|
1411
|
+
sections << section_header if section_header
|
1412
|
+
end
|
1413
|
+
end
|
1414
|
+
|
1415
|
+
sections.uniq
|
1416
|
+
end
|
1417
|
+
|
1418
|
+
def self.find_section_header(lines, current_index)
|
1419
|
+
# Look backwards for section header
|
1420
|
+
(current_index - 1).downto(0) do |i|
|
1421
|
+
line = lines[i]
|
1422
|
+
if line.strip.start_with?('#')
|
1423
|
+
return line.strip
|
1424
|
+
end
|
1425
|
+
end
|
1426
|
+
|
1427
|
+
nil
|
1428
|
+
end
|
1429
|
+
|
1430
|
+
def self.find_api_endpoints(content, search_query)
|
1431
|
+
endpoints = []
|
1432
|
+
|
1433
|
+
# Look for API endpoint definitions
|
1434
|
+
content.scan(/get|post|put|delete|patch/i).each do |method|
|
1435
|
+
endpoints << method.upcase
|
1436
|
+
end
|
1437
|
+
|
1438
|
+
endpoints.uniq
|
1439
|
+
end
|
1440
|
+
|
1441
|
+
def self.extract_code_comments(content)
|
1442
|
+
comments = []
|
1443
|
+
|
1444
|
+
# Extract Ruby comments
|
1445
|
+
content.scan(/#(.+)$/).each do |match|
|
1446
|
+
comments << match[0].strip
|
1447
|
+
end
|
1448
|
+
|
1449
|
+
# Extract multi-line comments
|
1450
|
+
content.scan(/=begin(.+?)=end/m).each do |match|
|
1451
|
+
comments << match[0].strip
|
1452
|
+
end
|
1453
|
+
|
1454
|
+
comments
|
1455
|
+
end
|
1456
|
+
|
1457
|
+
def self.find_class_file(class_name)
|
1458
|
+
# Look for the class file in common Rails locations
|
1459
|
+
possible_paths = [
|
1460
|
+
"app/models/#{class_name.underscore}.rb",
|
1461
|
+
"app/controllers/#{class_name.underscore}.rb",
|
1462
|
+
"app/services/#{class_name.underscore}.rb",
|
1463
|
+
"lib/#{class_name.underscore}.rb"
|
1464
|
+
]
|
1465
|
+
|
1466
|
+
possible_paths.find { |path| File.exist?(path) }
|
1467
|
+
end
|
1468
|
+
end
|
1469
|
+
|
1470
|
+
# Tool for integrating with JIRA to access tickets and project information
|
1471
|
+
class JIRAIntegrationTool < MCP::Tool
|
1472
|
+
description "Integrates with JIRA to access tickets, project information, and issue tracking"
|
1473
|
+
input_schema(
|
1474
|
+
properties: {
|
1475
|
+
action: { type: "string" },
|
1476
|
+
ticket_id: { type: "string" },
|
1477
|
+
project_key: { type: "string" },
|
1478
|
+
search_query: { type: "string" },
|
1479
|
+
issue_type: { type: "string" },
|
1480
|
+
status: { type: "string" },
|
1481
|
+
assignee: { type: "string" },
|
1482
|
+
server_context: { type: "object" }
|
1483
|
+
},
|
1484
|
+
required: ["action"]
|
1485
|
+
)
|
1486
|
+
annotations(
|
1487
|
+
title: "JIRA Integration Tool",
|
1488
|
+
read_only_hint: true,
|
1489
|
+
destructive_hint: false,
|
1490
|
+
idempotent_hint: true,
|
1491
|
+
open_world_hint: false
|
1492
|
+
)
|
1493
|
+
|
1494
|
+
def self.call(action:, ticket_id: nil, project_key: nil, search_query: nil, issue_type: nil, status: nil, assignee: nil, server_context:)
|
1495
|
+
result = case action
|
1496
|
+
when 'get_ticket'
|
1497
|
+
get_ticket_details(ticket_id)
|
1498
|
+
when 'search_tickets'
|
1499
|
+
search_tickets(search_query, project_key, issue_type, status, assignee)
|
1500
|
+
when 'get_project_info'
|
1501
|
+
get_project_information(project_key)
|
1502
|
+
when 'get_issue_types'
|
1503
|
+
get_issue_types(project_key)
|
1504
|
+
when 'get_statuses'
|
1505
|
+
get_project_statuses(project_key)
|
1506
|
+
when 'get_assignees'
|
1507
|
+
get_project_assignees(project_key)
|
1508
|
+
when 'get_recent_activity'
|
1509
|
+
get_recent_project_activity(project_key)
|
1510
|
+
when 'analyze_ticket_patterns'
|
1511
|
+
analyze_ticket_patterns(project_key, issue_type)
|
1512
|
+
else
|
1513
|
+
{ error: "Unknown action: #{action}" }
|
1514
|
+
end
|
1515
|
+
|
1516
|
+
MCP::Tool::Response.new([{ type: "text", text: result.to_json }])
|
1517
|
+
end
|
1518
|
+
|
1519
|
+
private
|
1520
|
+
|
1521
|
+
def self.get_ticket_details(ticket_id)
|
1522
|
+
return { error: "Ticket ID required" } unless ticket_id
|
1523
|
+
|
1524
|
+
begin
|
1525
|
+
# Simulate JIRA API call - in production, this would use actual JIRA REST API
|
1526
|
+
if simulate_jira_available?
|
1527
|
+
ticket_data = simulate_jira_ticket(ticket_id)
|
1528
|
+
|
1529
|
+
{
|
1530
|
+
ticket_id: ticket_id,
|
1531
|
+
summary: ticket_data[:summary],
|
1532
|
+
description: ticket_data[:description],
|
1533
|
+
status: ticket_data[:status],
|
1534
|
+
priority: ticket_data[:priority],
|
1535
|
+
assignee: ticket_data[:assignee],
|
1536
|
+
reporter: ticket_data[:reporter],
|
1537
|
+
created: ticket_data[:created],
|
1538
|
+
updated: ticket_data[:updated],
|
1539
|
+
issue_type: ticket_data[:issue_type],
|
1540
|
+
project: ticket_data[:project],
|
1541
|
+
components: ticket_data[:components],
|
1542
|
+
labels: ticket_data[:labels],
|
1543
|
+
comments: ticket_data[:comments],
|
1544
|
+
attachments: ticket_data[:attachments],
|
1545
|
+
related_issues: ticket_data[:related_issues],
|
1546
|
+
time_tracking: ticket_data[:time_tracking]
|
1547
|
+
}
|
1548
|
+
else
|
1549
|
+
{ error: "JIRA integration not available" }
|
1550
|
+
end
|
1551
|
+
rescue => e
|
1552
|
+
{ error: "Failed to get ticket details: #{e.message}" }
|
1553
|
+
end
|
1554
|
+
end
|
1555
|
+
|
1556
|
+
def self.search_tickets(search_query, project_key, issue_type, status, assignee)
|
1557
|
+
begin
|
1558
|
+
if simulate_jira_available?
|
1559
|
+
search_results = simulate_jira_search(search_query, project_key, issue_type, status, assignee)
|
1560
|
+
|
1561
|
+
{
|
1562
|
+
search_query: search_query,
|
1563
|
+
project_key: project_key,
|
1564
|
+
issue_type: issue_type,
|
1565
|
+
status: status,
|
1566
|
+
assignee: assignee,
|
1567
|
+
total_results: search_results.length,
|
1568
|
+
tickets: search_results.map do |ticket|
|
1569
|
+
{
|
1570
|
+
ticket_id: ticket[:id],
|
1571
|
+
summary: ticket[:summary],
|
1572
|
+
status: ticket[:status],
|
1573
|
+
priority: ticket[:priority],
|
1574
|
+
assignee: ticket[:assignee],
|
1575
|
+
issue_type: ticket[:issue_type],
|
1576
|
+
created: ticket[:created],
|
1577
|
+
updated: ticket[:updated]
|
1578
|
+
}
|
1579
|
+
end
|
1580
|
+
}
|
1581
|
+
else
|
1582
|
+
{ error: "JIRA integration not available" }
|
1583
|
+
end
|
1584
|
+
rescue => e
|
1585
|
+
{ error: "Failed to search tickets: #{e.message}" }
|
1586
|
+
end
|
1587
|
+
end
|
1588
|
+
|
1589
|
+
def self.get_project_information(project_key)
|
1590
|
+
return { error: "Project key required" } unless project_key
|
1591
|
+
|
1592
|
+
begin
|
1593
|
+
if simulate_jira_available?
|
1594
|
+
project_data = simulate_jira_project(project_key)
|
1595
|
+
|
1596
|
+
{
|
1597
|
+
project_key: project_key,
|
1598
|
+
name: project_data[:name],
|
1599
|
+
description: project_data[:description],
|
1600
|
+
lead: project_data[:lead],
|
1601
|
+
url: project_data[:url],
|
1602
|
+
components: project_data[:components],
|
1603
|
+
issue_types: project_data[:issue_types],
|
1604
|
+
statuses: project_data[:statuses],
|
1605
|
+
versions: project_data[:versions],
|
1606
|
+
permissions: project_data[:permissions]
|
1607
|
+
}
|
1608
|
+
else
|
1609
|
+
{ error: "JIRA integration not available" }
|
1610
|
+
end
|
1611
|
+
rescue => e
|
1612
|
+
{ error: "Failed to get project information: #{e.message}" }
|
1613
|
+
end
|
1614
|
+
end
|
1615
|
+
|
1616
|
+
def self.get_issue_types(project_key)
|
1617
|
+
return { error: "Project key required" } unless project_key
|
1618
|
+
|
1619
|
+
begin
|
1620
|
+
if simulate_jira_available?
|
1621
|
+
issue_types = simulate_jira_issue_types(project_key)
|
1622
|
+
|
1623
|
+
{
|
1624
|
+
project_key: project_key,
|
1625
|
+
issue_types: issue_types.map do |type|
|
1626
|
+
{
|
1627
|
+
id: type[:id],
|
1628
|
+
name: type[:name],
|
1629
|
+
description: type[:description],
|
1630
|
+
icon_url: type[:icon_url]
|
1631
|
+
}
|
1632
|
+
end
|
1633
|
+
}
|
1634
|
+
else
|
1635
|
+
{ error: "JIRA integration not available" }
|
1636
|
+
end
|
1637
|
+
rescue => e
|
1638
|
+
{ error: "Failed to get issue types: #{e.message}" }
|
1639
|
+
end
|
1640
|
+
end
|
1641
|
+
|
1642
|
+
def self.get_project_statuses(project_key)
|
1643
|
+
return { error: "Project key required" } unless project_key
|
1644
|
+
|
1645
|
+
begin
|
1646
|
+
if simulate_jira_available?
|
1647
|
+
statuses = simulate_jira_statuses(project_key)
|
1648
|
+
|
1649
|
+
{
|
1650
|
+
project_key: project_key,
|
1651
|
+
statuses: statuses.map do |status|
|
1652
|
+
{
|
1653
|
+
id: status[:id],
|
1654
|
+
name: status[:name],
|
1655
|
+
description: status[:description],
|
1656
|
+
category: status[:category]
|
1657
|
+
}
|
1658
|
+
end
|
1659
|
+
}
|
1660
|
+
else
|
1661
|
+
{ error: "JIRA integration not available" }
|
1662
|
+
end
|
1663
|
+
rescue => e
|
1664
|
+
{ error: "Failed to get project statuses: #{e.message}" }
|
1665
|
+
end
|
1666
|
+
end
|
1667
|
+
|
1668
|
+
def self.get_project_assignees(project_key)
|
1669
|
+
return { error: "Project key required" } unless project_key
|
1670
|
+
|
1671
|
+
begin
|
1672
|
+
if simulate_jira_available?
|
1673
|
+
assignees = simulate_jira_assignees(project_key)
|
1674
|
+
|
1675
|
+
{
|
1676
|
+
project_key: project_key,
|
1677
|
+
assignees: assignees.map do |assignee|
|
1678
|
+
{
|
1679
|
+
username: assignee[:username],
|
1680
|
+
display_name: assignee[:display_name],
|
1681
|
+
email: assignee[:email],
|
1682
|
+
active: assignee[:active]
|
1683
|
+
}
|
1684
|
+
end
|
1685
|
+
}
|
1686
|
+
else
|
1687
|
+
{ error: "JIRA integration not available" }
|
1688
|
+
end
|
1689
|
+
rescue => e
|
1690
|
+
{ error: "Failed to get project assignees: #{e.message}" }
|
1691
|
+
end
|
1692
|
+
end
|
1693
|
+
|
1694
|
+
def self.get_recent_project_activity(project_key)
|
1695
|
+
return { error: "Project key required" } unless project_key
|
1696
|
+
|
1697
|
+
begin
|
1698
|
+
if simulate_jira_available?
|
1699
|
+
activity = simulate_jira_recent_activity(project_key)
|
1700
|
+
|
1701
|
+
{
|
1702
|
+
project_key: project_key,
|
1703
|
+
recent_activity: activity.map do |item|
|
1704
|
+
{
|
1705
|
+
type: item[:type],
|
1706
|
+
user: item[:user],
|
1707
|
+
timestamp: item[:timestamp],
|
1708
|
+
description: item[:description],
|
1709
|
+
ticket_id: item[:ticket_id]
|
1710
|
+
}
|
1711
|
+
end
|
1712
|
+
}
|
1713
|
+
else
|
1714
|
+
{ error: "JIRA integration not available" }
|
1715
|
+
end
|
1716
|
+
rescue => e
|
1717
|
+
{ error: "Failed to get recent activity: #{e.message}" }
|
1718
|
+
end
|
1719
|
+
end
|
1720
|
+
|
1721
|
+
def self.analyze_ticket_patterns(project_key, issue_type)
|
1722
|
+
return { error: "Project key required" } unless project_key
|
1723
|
+
|
1724
|
+
begin
|
1725
|
+
if simulate_jira_available?
|
1726
|
+
patterns = simulate_jira_pattern_analysis(project_key, issue_type)
|
1727
|
+
|
1728
|
+
{
|
1729
|
+
project_key: project_key,
|
1730
|
+
issue_type: issue_type,
|
1731
|
+
patterns: {
|
1732
|
+
common_issues: patterns[:common_issues],
|
1733
|
+
resolution_times: patterns[:resolution_times],
|
1734
|
+
assignee_distribution: patterns[:assignee_distribution],
|
1735
|
+
status_transitions: patterns[:status_transitions],
|
1736
|
+
priority_distribution: patterns[:priority_distribution]
|
1737
|
+
}
|
1738
|
+
}
|
1739
|
+
else
|
1740
|
+
{ error: "JIRA integration not available" }
|
1741
|
+
end
|
1742
|
+
rescue => e
|
1743
|
+
{ error: "Failed to analyze ticket patterns: #{e.message}" }
|
1744
|
+
end
|
1745
|
+
end
|
1746
|
+
|
1747
|
+
# Simulation methods for JIRA integration
|
1748
|
+
def self.simulate_jira_available?
|
1749
|
+
# Check if JIRA configuration is available
|
1750
|
+
ENV['JIRA_URL'] && ENV['JIRA_USERNAME'] && ENV['JIRA_API_TOKEN']
|
1751
|
+
end
|
1752
|
+
|
1753
|
+
def self.simulate_jira_ticket(ticket_id)
|
1754
|
+
{
|
1755
|
+
id: ticket_id,
|
1756
|
+
summary: "Sample ticket summary for #{ticket_id}",
|
1757
|
+
description: "This is a sample ticket description for demonstration purposes.",
|
1758
|
+
status: "In Progress",
|
1759
|
+
priority: "Medium",
|
1760
|
+
assignee: "developer@example.com",
|
1761
|
+
reporter: "product@example.com",
|
1762
|
+
created: "2024-01-01T10:00:00Z",
|
1763
|
+
updated: "2024-01-15T14:30:00Z",
|
1764
|
+
issue_type: "Bug",
|
1765
|
+
project: "SELFRUBY",
|
1766
|
+
components: ["Backend", "API"],
|
1767
|
+
labels: ["bug", "backend"],
|
1768
|
+
comments: [
|
1769
|
+
{ author: "developer@example.com", body: "Working on this issue", created: "2024-01-10T09:00:00Z" }
|
1770
|
+
],
|
1771
|
+
attachments: [],
|
1772
|
+
related_issues: [],
|
1773
|
+
time_tracking: { original_estimate: "2h", time_spent: "1h", remaining_estimate: "1h" }
|
1774
|
+
}
|
1775
|
+
end
|
1776
|
+
|
1777
|
+
def self.simulate_jira_search(query, project_key, issue_type, status, assignee)
|
1778
|
+
[
|
1779
|
+
{
|
1780
|
+
id: "SELFRUBY-123",
|
1781
|
+
summary: "Search result for: #{query}",
|
1782
|
+
status: status || "Open",
|
1783
|
+
priority: "High",
|
1784
|
+
assignee: assignee || "developer@example.com",
|
1785
|
+
issue_type: issue_type || "Bug",
|
1786
|
+
created: "2024-01-01T10:00:00Z",
|
1787
|
+
updated: "2024-01-15T14:30:00Z"
|
1788
|
+
}
|
1789
|
+
]
|
1790
|
+
end
|
1791
|
+
|
1792
|
+
def self.simulate_jira_project(project_key)
|
1793
|
+
{
|
1794
|
+
key: project_key,
|
1795
|
+
name: "SelfRuby Project",
|
1796
|
+
description: "Self-evolving Ruby application",
|
1797
|
+
lead: "project.lead@example.com",
|
1798
|
+
url: "https://jira.example.com/browse/#{project_key}",
|
1799
|
+
components: ["Backend", "Frontend", "API", "Database"],
|
1800
|
+
issue_types: ["Bug", "Feature", "Task", "Story"],
|
1801
|
+
statuses: ["Open", "In Progress", "Review", "Done"],
|
1802
|
+
versions: ["1.0.0", "1.1.0", "2.0.0"],
|
1803
|
+
permissions: ["Browse", "Create", "Edit", "Delete"]
|
1804
|
+
}
|
1805
|
+
end
|
1806
|
+
|
1807
|
+
def self.simulate_jira_issue_types(project_key)
|
1808
|
+
[
|
1809
|
+
{ id: "1", name: "Bug", description: "A problem which impairs or prevents the functions of the product", icon_url: "https://jira.example.com/images/icons/issuetypes/bug.png" },
|
1810
|
+
{ id: "2", name: "Feature", description: "A new feature of the product", icon_url: "https://jira.example.com/images/icons/issuetypes/newfeature.png" },
|
1811
|
+
{ id: "3", name: "Task", description: "A task that needs to be done", icon_url: "https://jira.example.com/images/icons/issuetypes/task.png" }
|
1812
|
+
]
|
1813
|
+
end
|
1814
|
+
|
1815
|
+
def self.simulate_jira_statuses(project_key)
|
1816
|
+
[
|
1817
|
+
{ id: "1", name: "Open", description: "Issue is open and ready for work", category: "To Do" },
|
1818
|
+
{ id: "2", name: "In Progress", description: "Issue is currently being worked on", category: "In Progress" },
|
1819
|
+
{ id: "3", name: "Review", description: "Issue is ready for review", category: "In Progress" },
|
1820
|
+
{ id: "4", name: "Done", description: "Issue is completed", category: "Done" }
|
1821
|
+
]
|
1822
|
+
end
|
1823
|
+
|
1824
|
+
def self.simulate_jira_assignees(project_key)
|
1825
|
+
[
|
1826
|
+
{ username: "developer1", display_name: "Developer One", email: "developer1@example.com", active: true },
|
1827
|
+
{ username: "developer2", display_name: "Developer Two", email: "developer2@example.com", active: true },
|
1828
|
+
{ username: "qa", display_name: "QA Engineer", email: "qa@example.com", active: true }
|
1829
|
+
]
|
1830
|
+
end
|
1831
|
+
|
1832
|
+
def self.simulate_jira_recent_activity(project_key)
|
1833
|
+
[
|
1834
|
+
{ type: "comment", user: "developer1@example.com", timestamp: "2024-01-15T14:30:00Z", description: "Added comment to SELFRUBY-123", ticket_id: "SELFRUBY-123" },
|
1835
|
+
{ type: "status_change", user: "developer1@example.com", timestamp: "2024-01-15T13:00:00Z", description: "Changed status to In Progress", ticket_id: "SELFRUBY-123" }
|
1836
|
+
]
|
1837
|
+
end
|
1838
|
+
|
1839
|
+
def self.simulate_jira_pattern_analysis(project_key, issue_type)
|
1840
|
+
{
|
1841
|
+
common_issues: ["API errors", "Performance issues", "UI bugs"],
|
1842
|
+
resolution_times: { average: "3.5 days", median: "2 days", max: "10 days" },
|
1843
|
+
assignee_distribution: { "developer1": 40, "developer2": 35, "qa": 25 },
|
1844
|
+
status_transitions: { "Open": "In Progress", "In Progress": "Review", "Review": "Done" },
|
1845
|
+
priority_distribution: { "High": 20, "Medium": 60, "Low": 20 }
|
1846
|
+
}
|
1847
|
+
end
|
1848
|
+
end
|
1849
|
+
|
1850
|
+
# Tool for intelligent context analysis and runtime decision making
|
1851
|
+
class IntelligentContextAnalysisTool < MCP::Tool
|
1852
|
+
description "Provides intelligent context analysis, runtime decision making, and Cursor IDE-like capabilities"
|
1853
|
+
input_schema(
|
1854
|
+
properties: {
|
1855
|
+
analysis_type: { type: "string" },
|
1856
|
+
context_data: { type: "object" },
|
1857
|
+
class_name: { type: "string" },
|
1858
|
+
method_name: { type: "string" },
|
1859
|
+
error_context: { type: "object" },
|
1860
|
+
business_context: { type: "object" },
|
1861
|
+
server_context: { type: "object" }
|
1862
|
+
},
|
1863
|
+
required: ["analysis_type"]
|
1864
|
+
)
|
1865
|
+
annotations(
|
1866
|
+
title: "Intelligent Context Analysis Tool",
|
1867
|
+
read_only_hint: true,
|
1868
|
+
destructive_hint: false,
|
1869
|
+
idempotent_hint: true,
|
1870
|
+
open_world_hint: false
|
1871
|
+
)
|
1872
|
+
|
1873
|
+
def self.call(analysis_type:, context_data: nil, class_name: nil, method_name: nil, error_context: nil, business_context: nil, server_context:)
|
1874
|
+
analysis = case analysis_type
|
1875
|
+
when 'comprehensive_context'
|
1876
|
+
analyze_comprehensive_context(context_data, class_name, method_name, server_context)
|
1877
|
+
when 'error_pattern_analysis'
|
1878
|
+
analyze_error_patterns(error_context, class_name, method_name, server_context)
|
1879
|
+
when 'business_impact_assessment'
|
1880
|
+
assess_business_impact(context_data, business_context, server_context)
|
1881
|
+
when 'evolution_recommendations'
|
1882
|
+
generate_evolution_recommendations(context_data, class_name, method_name, server_context)
|
1883
|
+
when 'runtime_optimization'
|
1884
|
+
suggest_runtime_optimizations(context_data, class_name, method_name, server_context)
|
1885
|
+
when 'intelligent_fix_generation'
|
1886
|
+
generate_intelligent_fix(context_data, error_context, class_name, method_name, server_context)
|
1887
|
+
when 'context_aware_validation'
|
1888
|
+
perform_context_aware_validation(context_data, business_context, server_context)
|
1889
|
+
else
|
1890
|
+
{ error: "Unknown analysis type: #{analysis_type}" }
|
1891
|
+
end
|
1892
|
+
|
1893
|
+
MCP::Tool::Response.new([{ type: "text", text: analysis.to_json }])
|
1894
|
+
end
|
1895
|
+
|
1896
|
+
private
|
1897
|
+
|
1898
|
+
def self.analyze_comprehensive_context(context_data, class_name, method_name, server_context)
|
1899
|
+
return { error: "Context data required" } unless context_data
|
1900
|
+
|
1901
|
+
begin
|
1902
|
+
# Load business context and standards
|
1903
|
+
business_context = server_context&.dig(:codebase_context) || {}
|
1904
|
+
coding_standards = business_context['coding_standards'] || {}
|
1905
|
+
|
1906
|
+
# Analyze the current context comprehensively
|
1907
|
+
context_analysis = {
|
1908
|
+
class_analysis: analyze_class_context(class_name, business_context),
|
1909
|
+
method_analysis: analyze_method_context(class_name, method_name, business_context),
|
1910
|
+
business_rules: extract_relevant_business_rules(class_name, method_name, business_context),
|
1911
|
+
coding_standards: extract_relevant_coding_standards(coding_standards),
|
1912
|
+
dependencies: analyze_dependencies(class_name, method_name),
|
1913
|
+
risk_assessment: assess_context_risks(class_name, method_name, business_context),
|
1914
|
+
recommendations: generate_context_recommendations(class_name, method_name, business_context)
|
1915
|
+
}
|
1916
|
+
|
1917
|
+
context_analysis
|
1918
|
+
rescue => e
|
1919
|
+
{ error: "Comprehensive context analysis failed: #{e.message}" }
|
1920
|
+
end
|
1921
|
+
end
|
1922
|
+
|
1923
|
+
def self.analyze_error_patterns(error_context, class_name, method_name, server_context)
|
1924
|
+
return { error: "Error context required" } unless error_context
|
1925
|
+
|
1926
|
+
begin
|
1927
|
+
business_context = server_context&.dig(:codebase_context) || {}
|
1928
|
+
|
1929
|
+
error_analysis = {
|
1930
|
+
error_type: error_context[:error_type],
|
1931
|
+
error_message: error_context[:error_message],
|
1932
|
+
pattern_analysis: identify_error_patterns(error_context, class_name, method_name),
|
1933
|
+
business_impact: assess_error_business_impact(error_context, business_context),
|
1934
|
+
prevention_strategies: suggest_error_prevention_strategies(error_context, business_context),
|
1935
|
+
monitoring_recommendations: suggest_error_monitoring(error_context, business_context),
|
1936
|
+
evolution_opportunities: identify_evolution_opportunities(error_context, business_context)
|
1937
|
+
}
|
1938
|
+
|
1939
|
+
error_analysis
|
1940
|
+
rescue => e
|
1941
|
+
{ error: "Error pattern analysis failed: #{e.message}" }
|
1942
|
+
end
|
1943
|
+
end
|
1944
|
+
|
1945
|
+
def self.assess_business_impact(context_data, business_context, server_context)
|
1946
|
+
return { error: "Context data required" } unless context_data
|
1947
|
+
|
1948
|
+
begin
|
1949
|
+
impact_analysis = {
|
1950
|
+
user_experience_impact: assess_user_experience_impact(context_data, business_context),
|
1951
|
+
data_integrity_impact: assess_data_integrity_impact(context_data, business_context),
|
1952
|
+
financial_impact: assess_financial_impact(context_data, business_context),
|
1953
|
+
compliance_impact: assess_compliance_impact(context_data, business_context),
|
1954
|
+
operational_impact: assess_operational_impact(context_data, business_context),
|
1955
|
+
risk_level: calculate_overall_risk_level(context_data, business_context),
|
1956
|
+
mitigation_strategies: suggest_mitigation_strategies(context_data, business_context)
|
1957
|
+
}
|
1958
|
+
|
1959
|
+
impact_analysis
|
1960
|
+
rescue => e
|
1961
|
+
{ error: "Business impact assessment failed: #{e.message}" }
|
1962
|
+
end
|
1963
|
+
end
|
1964
|
+
|
1965
|
+
def self.generate_evolution_recommendations(context_data, class_name, method_name, server_context)
|
1966
|
+
return { error: "Context data required" } unless context_data
|
1967
|
+
|
1968
|
+
begin
|
1969
|
+
business_context = server_context&.dig(:codebase_context) || {}
|
1970
|
+
|
1971
|
+
evolution_recommendations = {
|
1972
|
+
immediate_actions: suggest_immediate_actions(context_data, class_name, method_name),
|
1973
|
+
short_term_improvements: suggest_short_term_improvements(context_data, business_context),
|
1974
|
+
long_term_evolution: suggest_long_term_evolution(context_data, business_context),
|
1975
|
+
architectural_changes: suggest_architectural_changes(context_data, business_context),
|
1976
|
+
testing_strategies: suggest_testing_strategies(context_data, business_context),
|
1977
|
+
monitoring_enhancements: suggest_monitoring_enhancements(context_data, business_context),
|
1978
|
+
documentation_updates: suggest_documentation_updates(context_data, business_context)
|
1979
|
+
}
|
1980
|
+
|
1981
|
+
evolution_recommendations
|
1982
|
+
rescue => e
|
1983
|
+
{ error: "Evolution recommendations generation failed: #{e.message}" }
|
1984
|
+
end
|
1985
|
+
end
|
1986
|
+
|
1987
|
+
def self.suggest_runtime_optimizations(context_data, class_name, method_name, server_context)
|
1988
|
+
return { error: "Context data required" } unless context_data
|
1989
|
+
|
1990
|
+
begin
|
1991
|
+
business_context = server_context&.dig(:codebase_context) || {}
|
1992
|
+
|
1993
|
+
optimization_suggestions = {
|
1994
|
+
performance_optimizations: suggest_performance_optimizations(context_data, class_name, method_name),
|
1995
|
+
memory_optimizations: suggest_memory_optimizations(context_data, class_name, method_name),
|
1996
|
+
caching_strategies: suggest_caching_strategies(context_data, business_context),
|
1997
|
+
database_optimizations: suggest_database_optimizations(context_data, business_context),
|
1998
|
+
algorithm_improvements: suggest_algorithm_improvements(context_data, class_name, method_name),
|
1999
|
+
resource_management: suggest_resource_management_improvements(context_data, business_context)
|
2000
|
+
}
|
2001
|
+
|
2002
|
+
optimization_suggestions
|
2003
|
+
rescue => e
|
2004
|
+
{ error: "Runtime optimization suggestions failed: #{e.message}" }
|
2005
|
+
end
|
2006
|
+
end
|
2007
|
+
|
2008
|
+
def self.generate_intelligent_fix(context_data, error_context, class_name, method_name, server_context)
|
2009
|
+
return { error: "Context data and error context required" } unless context_data && error_context
|
2010
|
+
|
2011
|
+
begin
|
2012
|
+
business_context = server_context&.dig(:codebase_context) || {}
|
2013
|
+
|
2014
|
+
intelligent_fix = {
|
2015
|
+
fix_strategy: determine_fix_strategy(error_context, business_context),
|
2016
|
+
implementation_approach: determine_implementation_approach(error_context, business_context),
|
2017
|
+
business_rule_compliance: ensure_business_rule_compliance(error_context, business_context),
|
2018
|
+
performance_considerations: include_performance_considerations(error_context, business_context),
|
2019
|
+
security_considerations: include_security_considerations(error_context, business_context),
|
2020
|
+
testing_requirements: determine_testing_requirements(error_context, business_context),
|
2021
|
+
deployment_considerations: determine_deployment_considerations(error_context, business_context)
|
2022
|
+
}
|
2023
|
+
|
2024
|
+
intelligent_fix
|
2025
|
+
rescue => e
|
2026
|
+
{ error: "Intelligent fix generation failed: #{e.message}" }
|
2027
|
+
end
|
2028
|
+
end
|
2029
|
+
|
2030
|
+
def self.perform_context_aware_validation(context_data, business_context, server_context)
|
2031
|
+
return { error: "Context data required" } unless context_data
|
2032
|
+
|
2033
|
+
begin
|
2034
|
+
validation_results = {
|
2035
|
+
business_rule_validation: validate_business_rules(context_data, business_context),
|
2036
|
+
coding_standard_validation: validate_coding_standards(context_data, business_context),
|
2037
|
+
performance_validation: validate_performance_requirements(context_data, business_context),
|
2038
|
+
security_validation: validate_security_requirements(context_data, business_context),
|
2039
|
+
compliance_validation: validate_compliance_requirements(context_data, business_context),
|
2040
|
+
overall_validation_score: calculate_validation_score(context_data, business_context),
|
2041
|
+
validation_recommendations: generate_validation_recommendations(context_data, business_context)
|
2042
|
+
}
|
2043
|
+
|
2044
|
+
validation_results
|
2045
|
+
rescue => e
|
2046
|
+
{ error: "Context-aware validation failed: #{e.message}" }
|
2047
|
+
end
|
2048
|
+
end
|
2049
|
+
|
2050
|
+
# Helper methods for comprehensive analysis
|
2051
|
+
def self.analyze_class_context(class_name, business_context)
|
2052
|
+
return {} unless class_name
|
2053
|
+
|
2054
|
+
domains = business_context['domains'] || {}
|
2055
|
+
domain = determine_class_domain(class_name, domains)
|
2056
|
+
|
2057
|
+
{
|
2058
|
+
domain: domain,
|
2059
|
+
business_criticality: domains.dig(domain, 'criticality') || 'medium',
|
2060
|
+
compliance_requirements: domains.dig(domain, 'compliance') || [],
|
2061
|
+
sla_requirements: domains.dig(domain, 'sla_requirements') || '99.5%',
|
2062
|
+
security_requirements: domains.dig(domain, 'security_requirements') || 'standard'
|
2063
|
+
}
|
2064
|
+
end
|
2065
|
+
|
2066
|
+
def self.analyze_method_context(class_name, method_name, business_context)
|
2067
|
+
return {} unless class_name && method_name
|
2068
|
+
|
2069
|
+
domains = business_context['domains'] || {}
|
2070
|
+
domain = determine_class_domain(class_name, domains)
|
2071
|
+
class_info = domains.dig(domain, 'classes', class_name) || {}
|
2072
|
+
method_info = class_info.dig('methods', method_name) || {}
|
2073
|
+
|
2074
|
+
{
|
2075
|
+
method_description: method_info['description'] || 'No description available',
|
2076
|
+
error_handling: method_info['error_handling'] || 'standard',
|
2077
|
+
logging: method_info['logging'] || 'standard',
|
2078
|
+
return_values: method_info['return_values'] || {},
|
2079
|
+
business_criticality: class_info['business_criticality'] || 'medium'
|
2080
|
+
}
|
2081
|
+
end
|
2082
|
+
|
2083
|
+
def self.extract_relevant_business_rules(class_name, method_name, business_context)
|
2084
|
+
return {} unless business_context
|
2085
|
+
|
2086
|
+
common_patterns = business_context['common_patterns'] || {}
|
2087
|
+
|
2088
|
+
{
|
2089
|
+
calculator_operations: common_patterns['calculator_operations'] || {},
|
2090
|
+
model_operations: common_patterns['model_operations'] || {},
|
2091
|
+
service_operations: common_patterns['service_operations'] || {}
|
2092
|
+
}
|
2093
|
+
end
|
2094
|
+
|
2095
|
+
def self.extract_relevant_coding_standards(coding_standards)
|
2096
|
+
return {} unless coding_standards
|
2097
|
+
|
2098
|
+
{
|
2099
|
+
error_handling: coding_standards['error_handling'] || 'standard',
|
2100
|
+
logging: coding_standards['logging'] || 'standard',
|
2101
|
+
validation: coding_standards['validation'] || 'standard',
|
2102
|
+
performance: coding_standards['performance'] || 'standard',
|
2103
|
+
security: coding_standards['security'] || 'standard',
|
2104
|
+
documentation: coding_standards['documentation'] || 'standard',
|
2105
|
+
testing: coding_standards['testing'] || 'standard'
|
2106
|
+
}
|
2107
|
+
end
|
2108
|
+
|
2109
|
+
def self.analyze_dependencies(class_name, method_name)
|
2110
|
+
return {} unless class_name
|
2111
|
+
|
2112
|
+
# Analyze dependencies based on class and method
|
2113
|
+
{
|
2114
|
+
model_dependencies: find_model_dependencies(class_name),
|
2115
|
+
service_dependencies: find_service_dependencies(class_name),
|
2116
|
+
external_dependencies: find_external_dependencies(class_name),
|
2117
|
+
database_dependencies: find_database_dependencies(class_name)
|
2118
|
+
}
|
2119
|
+
end
|
2120
|
+
|
2121
|
+
def self.assess_context_risks(class_name, method_name, business_context)
|
2122
|
+
return {} unless business_context
|
2123
|
+
|
2124
|
+
domains = business_context['domains'] || {}
|
2125
|
+
domain = determine_class_domain(class_name, domains)
|
2126
|
+
|
2127
|
+
{
|
2128
|
+
business_risk: domains.dig(domain, 'criticality') == 'critical' ? 'high' : 'medium',
|
2129
|
+
technical_risk: assess_technical_risk(class_name, method_name),
|
2130
|
+
compliance_risk: assess_compliance_risk(domain, business_context),
|
2131
|
+
security_risk: assess_security_risk(domain, business_context)
|
2132
|
+
}
|
2133
|
+
end
|
2134
|
+
|
2135
|
+
def self.generate_context_recommendations(class_name, method_name, business_context)
|
2136
|
+
recommendations = []
|
2137
|
+
|
2138
|
+
recommendations << "implement_comprehensive_error_handling" if class_name && method_name
|
2139
|
+
recommendations << "add_business_rule_validation" if business_context
|
2140
|
+
recommendations << "implement_structured_logging" if business_context
|
2141
|
+
recommendations << "add_performance_monitoring" if business_context
|
2142
|
+
recommendations << "implement_security_measures" if business_context
|
2143
|
+
|
2144
|
+
recommendations
|
2145
|
+
end
|
2146
|
+
|
2147
|
+
# Additional helper methods for specific analysis types
|
2148
|
+
def self.determine_class_domain(class_name, domains)
|
2149
|
+
domains.keys.find { |domain| class_name.downcase.include?(domain.downcase) } || 'general'
|
2150
|
+
end
|
2151
|
+
|
2152
|
+
def self.assess_technical_risk(class_name, method_name)
|
2153
|
+
# Simple technical risk assessment
|
2154
|
+
if class_name&.include?('Controller') || class_name&.include?('Service')
|
2155
|
+
'medium'
|
2156
|
+
else
|
2157
|
+
'low'
|
2158
|
+
end
|
2159
|
+
end
|
2160
|
+
|
2161
|
+
def self.assess_compliance_risk(domain, business_context)
|
2162
|
+
domains = business_context['domains'] || {}
|
2163
|
+
domain_info = domains[domain] || {}
|
2164
|
+
|
2165
|
+
if domain_info['compliance']&.any?
|
2166
|
+
'medium'
|
2167
|
+
else
|
2168
|
+
'low'
|
2169
|
+
end
|
2170
|
+
end
|
2171
|
+
|
2172
|
+
def self.assess_security_risk(domain, business_context)
|
2173
|
+
domains = business_context['domains'] || {}
|
2174
|
+
domain_info = domains[domain] || {}
|
2175
|
+
|
2176
|
+
if domain_info['security'] == 'maximum'
|
2177
|
+
'high'
|
2178
|
+
elsif domain_info['security'] == 'high'
|
2179
|
+
'medium'
|
2180
|
+
else
|
2181
|
+
'low'
|
2182
|
+
end
|
2183
|
+
end
|
2184
|
+
|
2185
|
+
# Placeholder methods for dependency analysis
|
2186
|
+
def self.find_model_dependencies(class_name)
|
2187
|
+
[]
|
2188
|
+
end
|
2189
|
+
|
2190
|
+
def self.find_service_dependencies(class_name)
|
2191
|
+
[]
|
2192
|
+
end
|
2193
|
+
|
2194
|
+
def self.find_external_dependencies(class_name)
|
2195
|
+
[]
|
2196
|
+
end
|
2197
|
+
|
2198
|
+
def self.find_database_dependencies(class_name)
|
2199
|
+
[]
|
2200
|
+
end
|
2201
|
+
|
2202
|
+
# Additional analysis methods (simplified for brevity)
|
2203
|
+
def self.identify_error_patterns(error_context, class_name, method_name)
|
2204
|
+
{ pattern: 'common_error_pattern', frequency: 'occasional' }
|
2205
|
+
end
|
2206
|
+
|
2207
|
+
def self.assess_error_business_impact(error_context, business_context)
|
2208
|
+
{ impact_level: 'medium', user_experience: 'affected', data_integrity: 'maintained' }
|
2209
|
+
end
|
2210
|
+
|
2211
|
+
def self.suggest_error_prevention_strategies(error_context, business_context)
|
2212
|
+
['input_validation', 'defensive_programming', 'comprehensive_testing']
|
2213
|
+
end
|
2214
|
+
|
2215
|
+
def self.suggest_error_monitoring(error_context, business_context)
|
2216
|
+
['error_tracking', 'performance_monitoring', 'user_feedback_collection']
|
2217
|
+
end
|
2218
|
+
|
2219
|
+
def self.identify_evolution_opportunities(error_context, business_context)
|
2220
|
+
['improve_error_handling', 'enhance_user_experience', 'add_monitoring']
|
2221
|
+
end
|
2222
|
+
|
2223
|
+
# Additional helper methods for other analysis types
|
2224
|
+
def self.assess_user_experience_impact(context_data, business_context)
|
2225
|
+
{ impact_level: 'minimal', user_satisfaction: 'maintained' }
|
2226
|
+
end
|
2227
|
+
|
2228
|
+
def self.assess_data_integrity_impact(context_data, business_context)
|
2229
|
+
{ impact_level: 'none', data_consistency: 'maintained' }
|
2230
|
+
end
|
2231
|
+
|
2232
|
+
def self.assess_financial_impact(context_data, business_context)
|
2233
|
+
{ impact_level: 'none', cost_implications: 'minimal' }
|
2234
|
+
end
|
2235
|
+
|
2236
|
+
def self.assess_compliance_impact(context_data, business_context)
|
2237
|
+
{ impact_level: 'none', compliance_status: 'maintained' }
|
2238
|
+
end
|
2239
|
+
|
2240
|
+
def self.assess_operational_impact(context_data, business_context)
|
2241
|
+
{ impact_level: 'minimal', operational_efficiency: 'maintained' }
|
2242
|
+
end
|
2243
|
+
|
2244
|
+
def self.calculate_overall_risk_level(context_data, business_context)
|
2245
|
+
'low'
|
2246
|
+
end
|
2247
|
+
|
2248
|
+
def self.suggest_mitigation_strategies(context_data, business_context)
|
2249
|
+
['monitor_performance', 'implement_alerting', 'add_logging']
|
2250
|
+
end
|
2251
|
+
|
2252
|
+
# Additional helper methods for evolution recommendations
|
2253
|
+
def self.suggest_immediate_actions(context_data, class_name, method_name)
|
2254
|
+
['fix_critical_issues', 'add_error_handling', 'improve_logging']
|
2255
|
+
end
|
2256
|
+
|
2257
|
+
def self.suggest_short_term_improvements(context_data, business_context)
|
2258
|
+
['enhance_validation', 'improve_error_messages', 'add_monitoring']
|
2259
|
+
end
|
2260
|
+
|
2261
|
+
def self.suggest_long_term_evolution(context_data, business_context)
|
2262
|
+
['architectural_refactoring', 'performance_optimization', 'security_enhancement']
|
2263
|
+
end
|
2264
|
+
|
2265
|
+
def self.suggest_architectural_changes(context_data, business_context)
|
2266
|
+
['improve_separation_of_concerns', 'enhance_modularity', 'optimize_data_flow']
|
2267
|
+
end
|
2268
|
+
|
2269
|
+
def self.suggest_testing_strategies(context_data, business_context)
|
2270
|
+
['unit_testing', 'integration_testing', 'performance_testing']
|
2271
|
+
end
|
2272
|
+
|
2273
|
+
def self.suggest_monitoring_enhancements(context_data, business_context)
|
2274
|
+
['real_time_monitoring', 'alerting_systems', 'performance_metrics']
|
2275
|
+
end
|
2276
|
+
|
2277
|
+
def self.suggest_documentation_updates(context_data, business_context)
|
2278
|
+
['update_api_documentation', 'improve_code_comments', 'create_user_guides']
|
2279
|
+
end
|
2280
|
+
|
2281
|
+
# Additional helper methods for runtime optimizations
|
2282
|
+
def self.suggest_performance_optimizations(context_data, class_name, method_name)
|
2283
|
+
['algorithm_optimization', 'caching_implementation', 'database_query_optimization']
|
2284
|
+
end
|
2285
|
+
|
2286
|
+
def self.suggest_memory_optimizations(context_data, class_name, method_name)
|
2287
|
+
['memory_pool_management', 'garbage_collection_optimization', 'data_structure_optimization']
|
2288
|
+
end
|
2289
|
+
|
2290
|
+
def self.suggest_caching_strategies(context_data, business_context)
|
2291
|
+
['result_caching', 'query_caching', 'session_caching']
|
2292
|
+
end
|
2293
|
+
|
2294
|
+
def self.suggest_database_optimizations(context_data, business_context)
|
2295
|
+
['index_optimization', 'query_optimization', 'connection_pooling']
|
2296
|
+
end
|
2297
|
+
|
2298
|
+
def self.suggest_algorithm_improvements(context_data, class_name, method_name)
|
2299
|
+
['time_complexity_reduction', 'space_complexity_optimization', 'algorithm_selection']
|
2300
|
+
end
|
2301
|
+
|
2302
|
+
def self.suggest_resource_management_improvements(context_data, business_context)
|
2303
|
+
['connection_pooling', 'memory_management', 'thread_management']
|
2304
|
+
end
|
2305
|
+
|
2306
|
+
# Additional helper methods for intelligent fix generation
|
2307
|
+
def self.determine_fix_strategy(error_context, business_context)
|
2308
|
+
'defensive_programming_with_graceful_degradation'
|
2309
|
+
end
|
2310
|
+
|
2311
|
+
def self.determine_implementation_approach(error_context, business_context)
|
2312
|
+
'incremental_improvement_with_backward_compatibility'
|
2313
|
+
end
|
2314
|
+
|
2315
|
+
def self.ensure_business_rule_compliance(error_context, business_context)
|
2316
|
+
'strict_compliance_with_business_rules'
|
2317
|
+
end
|
2318
|
+
|
2319
|
+
def self.include_performance_considerations(error_context, business_context)
|
2320
|
+
'optimize_for_performance_with_monitoring'
|
2321
|
+
end
|
2322
|
+
|
2323
|
+
def self.include_security_considerations(error_context, business_context)
|
2324
|
+
'security_first_approach_with_validation'
|
2325
|
+
end
|
2326
|
+
|
2327
|
+
def self.determine_testing_requirements(error_context, business_context)
|
2328
|
+
'comprehensive_testing_with_edge_cases'
|
2329
|
+
end
|
2330
|
+
|
2331
|
+
def self.determine_deployment_considerations(error_context, business_context)
|
2332
|
+
'gradual_rollout_with_rollback_capability'
|
2333
|
+
end
|
2334
|
+
|
2335
|
+
# Additional helper methods for context-aware validation
|
2336
|
+
def self.validate_business_rules(context_data, business_context)
|
2337
|
+
{ valid: true, score: 0.9, recommendations: ['maintain_current_implementation'] }
|
2338
|
+
end
|
2339
|
+
|
2340
|
+
def self.validate_coding_standards(context_data, business_context)
|
2341
|
+
{ valid: true, score: 0.85, recommendations: ['improve_documentation'] }
|
2342
|
+
end
|
2343
|
+
|
2344
|
+
def self.validate_performance_requirements(context_data, business_context)
|
2345
|
+
{ valid: true, score: 0.8, recommendations: ['optimize_algorithm'] }
|
2346
|
+
end
|
2347
|
+
|
2348
|
+
def self.validate_security_requirements(context_data, business_context)
|
2349
|
+
{ valid: true, score: 0.9, recommendations: ['add_input_validation'] }
|
2350
|
+
end
|
2351
|
+
|
2352
|
+
def self.validate_compliance_requirements(context_data, business_context)
|
2353
|
+
{ valid: true, score: 0.95, recommendations: ['maintain_compliance'] }
|
2354
|
+
end
|
2355
|
+
|
2356
|
+
def self.calculate_validation_score(context_data, business_context)
|
2357
|
+
0.88
|
2358
|
+
end
|
2359
|
+
|
2360
|
+
def self.generate_validation_recommendations(context_data, business_context)
|
2361
|
+
['improve_documentation', 'add_input_validation', 'optimize_algorithm']
|
2362
|
+
end
|
2363
|
+
end
|
2364
|
+
end
|