crackerjack 0.38.15__py3-none-any.whl → 0.39.0__py3-none-any.whl
This diff represents the content of publicly available package versions that have been released to one of the supported registries. The information contained in this diff is provided for informational purposes only and reflects changes between package versions as they appear in their respective public registries.
Potentially problematic release.
This version of crackerjack might be problematic. Click here for more details.
- crackerjack/__main__.py +134 -13
- crackerjack/agents/__init__.py +2 -0
- crackerjack/agents/base.py +1 -0
- crackerjack/agents/claude_code_bridge.py +319 -0
- crackerjack/agents/coordinator.py +6 -3
- crackerjack/agents/dry_agent.py +187 -3
- crackerjack/agents/enhanced_coordinator.py +279 -0
- crackerjack/agents/enhanced_proactive_agent.py +185 -0
- crackerjack/agents/performance_agent.py +324 -3
- crackerjack/agents/refactoring_agent.py +254 -5
- crackerjack/agents/semantic_agent.py +479 -0
- crackerjack/agents/semantic_helpers.py +356 -0
- crackerjack/cli/options.py +27 -0
- crackerjack/cli/semantic_handlers.py +290 -0
- crackerjack/core/async_workflow_orchestrator.py +9 -8
- crackerjack/core/enhanced_container.py +1 -1
- crackerjack/core/phase_coordinator.py +1 -1
- crackerjack/core/proactive_workflow.py +1 -1
- crackerjack/core/workflow_orchestrator.py +9 -6
- crackerjack/documentation/ai_templates.py +1 -1
- crackerjack/interactive.py +1 -1
- crackerjack/mcp/server_core.py +2 -0
- crackerjack/mcp/tools/__init__.py +2 -0
- crackerjack/mcp/tools/semantic_tools.py +584 -0
- crackerjack/models/semantic_models.py +271 -0
- crackerjack/plugins/loader.py +2 -2
- crackerjack/py313.py +4 -1
- crackerjack/services/embeddings.py +444 -0
- crackerjack/services/quality_intelligence.py +11 -1
- crackerjack/services/smart_scheduling.py +1 -1
- crackerjack/services/vector_store.py +681 -0
- crackerjack/slash_commands/run.md +84 -50
- {crackerjack-0.38.15.dist-info → crackerjack-0.39.0.dist-info}/METADATA +7 -2
- {crackerjack-0.38.15.dist-info → crackerjack-0.39.0.dist-info}/RECORD +37 -27
- {crackerjack-0.38.15.dist-info → crackerjack-0.39.0.dist-info}/WHEEL +0 -0
- {crackerjack-0.38.15.dist-info → crackerjack-0.39.0.dist-info}/entry_points.txt +0 -0
- {crackerjack-0.38.15.dist-info → crackerjack-0.39.0.dist-info}/licenses/LICENSE +0 -0
|
@@ -0,0 +1,479 @@
|
|
|
1
|
+
"""Semantic search and context analysis agent for code pattern discovery and semantic improvements."""
|
|
2
|
+
|
|
3
|
+
import typing as t
|
|
4
|
+
from pathlib import Path
|
|
5
|
+
|
|
6
|
+
from ..models.semantic_models import SearchQuery, SemanticConfig
|
|
7
|
+
from ..services.vector_store import VectorStore
|
|
8
|
+
from .base import (
|
|
9
|
+
AgentContext,
|
|
10
|
+
FixResult,
|
|
11
|
+
Issue,
|
|
12
|
+
IssueType,
|
|
13
|
+
SubAgent,
|
|
14
|
+
agent_registry,
|
|
15
|
+
)
|
|
16
|
+
|
|
17
|
+
|
|
18
|
+
class SemanticAgent(SubAgent):
|
|
19
|
+
"""AI agent specialized in semantic search and code context analysis.
|
|
20
|
+
|
|
21
|
+
This agent enhances code understanding by providing semantic context,
|
|
22
|
+
finding similar code patterns, and suggesting improvements based on
|
|
23
|
+
codebase-wide analysis using vector embeddings.
|
|
24
|
+
"""
|
|
25
|
+
|
|
26
|
+
def __init__(self, context: AgentContext) -> None:
|
|
27
|
+
super().__init__(context)
|
|
28
|
+
self.semantic_insights: dict[str, t.Any] = {}
|
|
29
|
+
self.pattern_stats: dict[str, int] = {
|
|
30
|
+
"patterns_discovered": 0,
|
|
31
|
+
"context_enhancements": 0,
|
|
32
|
+
"semantic_suggestions": 0,
|
|
33
|
+
"similar_patterns_found": 0,
|
|
34
|
+
}
|
|
35
|
+
|
|
36
|
+
def get_supported_types(self) -> set[IssueType]:
|
|
37
|
+
return {IssueType.SEMANTIC_CONTEXT}
|
|
38
|
+
|
|
39
|
+
async def can_handle(self, issue: Issue) -> float:
|
|
40
|
+
"""Determine confidence level for handling semantic context issues."""
|
|
41
|
+
if issue.type != IssueType.SEMANTIC_CONTEXT:
|
|
42
|
+
return 0.0
|
|
43
|
+
|
|
44
|
+
confidence = 0.8
|
|
45
|
+
message_lower = issue.message.lower()
|
|
46
|
+
|
|
47
|
+
# Higher confidence for semantic-specific terms
|
|
48
|
+
if any(
|
|
49
|
+
pattern in message_lower
|
|
50
|
+
for pattern in (
|
|
51
|
+
"semantic",
|
|
52
|
+
"context",
|
|
53
|
+
"pattern",
|
|
54
|
+
"similarity",
|
|
55
|
+
"related code",
|
|
56
|
+
"code understanding",
|
|
57
|
+
"similar implementation",
|
|
58
|
+
)
|
|
59
|
+
):
|
|
60
|
+
confidence = 0.85
|
|
61
|
+
|
|
62
|
+
return confidence
|
|
63
|
+
|
|
64
|
+
async def analyze_and_fix(self, issue: Issue) -> FixResult:
|
|
65
|
+
"""Analyze code using semantic search and provide contextual insights."""
|
|
66
|
+
self.log(f"Analyzing semantic context issue: {issue.message}")
|
|
67
|
+
|
|
68
|
+
validation_result = self._validate_semantic_issue(issue)
|
|
69
|
+
if validation_result:
|
|
70
|
+
return validation_result
|
|
71
|
+
|
|
72
|
+
if issue.file_path is None:
|
|
73
|
+
return FixResult(
|
|
74
|
+
success=False,
|
|
75
|
+
confidence=0.0,
|
|
76
|
+
remaining_issues=["No file path provided for semantic analysis"],
|
|
77
|
+
)
|
|
78
|
+
|
|
79
|
+
file_path = Path(issue.file_path)
|
|
80
|
+
|
|
81
|
+
try:
|
|
82
|
+
# Initialize semantic services
|
|
83
|
+
config = self._create_semantic_config()
|
|
84
|
+
vector_store = self._get_vector_store(config)
|
|
85
|
+
|
|
86
|
+
# Perform semantic analysis
|
|
87
|
+
result = await self._perform_semantic_analysis(
|
|
88
|
+
file_path, vector_store, issue
|
|
89
|
+
)
|
|
90
|
+
|
|
91
|
+
# Update stats
|
|
92
|
+
self._update_pattern_stats(result)
|
|
93
|
+
|
|
94
|
+
return result
|
|
95
|
+
|
|
96
|
+
except Exception as e:
|
|
97
|
+
return self._create_semantic_error_result(e)
|
|
98
|
+
|
|
99
|
+
@staticmethod
|
|
100
|
+
def _validate_semantic_issue(issue: Issue) -> FixResult | None:
|
|
101
|
+
"""Validate that the semantic issue can be processed."""
|
|
102
|
+
if not issue.file_path:
|
|
103
|
+
return FixResult(
|
|
104
|
+
success=False,
|
|
105
|
+
confidence=0.0,
|
|
106
|
+
remaining_issues=["No file path specified for semantic analysis"],
|
|
107
|
+
)
|
|
108
|
+
|
|
109
|
+
file_path = Path(issue.file_path)
|
|
110
|
+
if not file_path.exists():
|
|
111
|
+
return FixResult(
|
|
112
|
+
success=False,
|
|
113
|
+
confidence=0.0,
|
|
114
|
+
remaining_issues=[f"File not found: {file_path}"],
|
|
115
|
+
)
|
|
116
|
+
|
|
117
|
+
return None
|
|
118
|
+
|
|
119
|
+
@staticmethod
|
|
120
|
+
def _create_semantic_config() -> SemanticConfig:
|
|
121
|
+
"""Create semantic search configuration."""
|
|
122
|
+
return SemanticConfig(
|
|
123
|
+
embedding_model="sentence-transformers/all-MiniLM-L6-v2",
|
|
124
|
+
chunk_size=512,
|
|
125
|
+
chunk_overlap=50,
|
|
126
|
+
max_search_results=10,
|
|
127
|
+
similarity_threshold=0.7,
|
|
128
|
+
embedding_dimension=384,
|
|
129
|
+
)
|
|
130
|
+
|
|
131
|
+
def _get_vector_store(self, config: SemanticConfig) -> VectorStore:
|
|
132
|
+
"""Get vector store instance with persistent database."""
|
|
133
|
+
db_path = self._get_persistent_db_path()
|
|
134
|
+
return VectorStore(config, db_path=db_path)
|
|
135
|
+
|
|
136
|
+
def _get_persistent_db_path(self) -> Path:
|
|
137
|
+
"""Get the path to the persistent semantic search database."""
|
|
138
|
+
db_path = self.context.project_path / ".crackerjack" / "semantic_index.db"
|
|
139
|
+
db_path.parent.mkdir(exist_ok=True)
|
|
140
|
+
return db_path
|
|
141
|
+
|
|
142
|
+
async def _perform_semantic_analysis(
|
|
143
|
+
self, file_path: Path, vector_store: VectorStore, issue: Issue
|
|
144
|
+
) -> FixResult:
|
|
145
|
+
"""Perform comprehensive semantic analysis of the code file."""
|
|
146
|
+
# Read file content
|
|
147
|
+
content = self.context.get_file_content(file_path)
|
|
148
|
+
if not content:
|
|
149
|
+
return FixResult(
|
|
150
|
+
success=False,
|
|
151
|
+
confidence=0.0,
|
|
152
|
+
remaining_issues=[f"Could not read file: {file_path}"],
|
|
153
|
+
)
|
|
154
|
+
|
|
155
|
+
# Index the file if not already indexed
|
|
156
|
+
try:
|
|
157
|
+
embeddings = vector_store.index_file(file_path)
|
|
158
|
+
self.log(f"Indexed {len(embeddings)} chunks from {file_path.name}")
|
|
159
|
+
except Exception as e:
|
|
160
|
+
self.log(f"Warning: Could not index file {file_path}: {e}")
|
|
161
|
+
|
|
162
|
+
# Perform semantic search for related patterns
|
|
163
|
+
semantic_insights = await self._discover_semantic_patterns(
|
|
164
|
+
vector_store, file_path, content, issue
|
|
165
|
+
)
|
|
166
|
+
|
|
167
|
+
# Generate recommendations based on semantic analysis
|
|
168
|
+
recommendations = self._generate_semantic_recommendations(semantic_insights)
|
|
169
|
+
|
|
170
|
+
return FixResult(
|
|
171
|
+
success=True,
|
|
172
|
+
confidence=0.8,
|
|
173
|
+
fixes_applied=[
|
|
174
|
+
f"Semantic analysis completed for {file_path.name}",
|
|
175
|
+
f"Discovered {len(semantic_insights.get('related_patterns', []))} related patterns",
|
|
176
|
+
f"Generated {len(recommendations)} semantic recommendations",
|
|
177
|
+
],
|
|
178
|
+
recommendations=recommendations,
|
|
179
|
+
files_modified=[], # Semantic agent provides insights, doesn't modify files
|
|
180
|
+
)
|
|
181
|
+
|
|
182
|
+
async def _discover_semantic_patterns(
|
|
183
|
+
self,
|
|
184
|
+
vector_store: VectorStore,
|
|
185
|
+
file_path: Path,
|
|
186
|
+
content: str,
|
|
187
|
+
issue: Issue,
|
|
188
|
+
) -> dict[str, t.Any]:
|
|
189
|
+
"""Discover semantic patterns and related code through vector search."""
|
|
190
|
+
insights: dict[str, t.Any] = {
|
|
191
|
+
"related_patterns": [],
|
|
192
|
+
"similar_functions": [],
|
|
193
|
+
"context_suggestions": [],
|
|
194
|
+
"pattern_clusters": [],
|
|
195
|
+
}
|
|
196
|
+
|
|
197
|
+
# Extract key functions and classes for semantic analysis
|
|
198
|
+
code_elements = self._extract_code_elements(content)
|
|
199
|
+
|
|
200
|
+
for element in code_elements:
|
|
201
|
+
# Search for similar patterns
|
|
202
|
+
search_query = SearchQuery(
|
|
203
|
+
query=element["signature"],
|
|
204
|
+
max_results=5,
|
|
205
|
+
min_similarity=0.6,
|
|
206
|
+
file_types=["py"],
|
|
207
|
+
)
|
|
208
|
+
|
|
209
|
+
try:
|
|
210
|
+
results = vector_store.search(search_query)
|
|
211
|
+
if results:
|
|
212
|
+
# Filter out results from the same file
|
|
213
|
+
related_results = [
|
|
214
|
+
result for result in results if result.file_path != file_path
|
|
215
|
+
]
|
|
216
|
+
|
|
217
|
+
if related_results:
|
|
218
|
+
insights["related_patterns"].append(
|
|
219
|
+
{
|
|
220
|
+
"element": element,
|
|
221
|
+
"related_code": [
|
|
222
|
+
{
|
|
223
|
+
"file_path": str(result.file_path),
|
|
224
|
+
"content": result.content[
|
|
225
|
+
:200
|
|
226
|
+
], # Truncate for readability
|
|
227
|
+
"similarity_score": result.similarity_score,
|
|
228
|
+
"lines": f"{result.start_line}-{result.end_line}",
|
|
229
|
+
}
|
|
230
|
+
for result in related_results[:3] # Top 3 matches
|
|
231
|
+
],
|
|
232
|
+
}
|
|
233
|
+
)
|
|
234
|
+
|
|
235
|
+
except Exception as e:
|
|
236
|
+
self.log(f"Warning: Semantic search failed for {element['name']}: {e}")
|
|
237
|
+
|
|
238
|
+
# Analyze issue-specific context
|
|
239
|
+
if issue.message:
|
|
240
|
+
issue_insights = await self._analyze_issue_context(vector_store, issue)
|
|
241
|
+
insights["context_suggestions"].extend(issue_insights)
|
|
242
|
+
|
|
243
|
+
return insights
|
|
244
|
+
|
|
245
|
+
def _extract_docstring_from_node(self, node: t.Any) -> str:
|
|
246
|
+
"""Extract docstring from AST node, handling both old and new formats."""
|
|
247
|
+
import ast
|
|
248
|
+
|
|
249
|
+
if not node.body or not isinstance(node.body[0], ast.Expr):
|
|
250
|
+
return ""
|
|
251
|
+
|
|
252
|
+
value = node.body[0].value
|
|
253
|
+
if hasattr(value, "s"): # Old ast.Str format
|
|
254
|
+
return str(value.s)[:100]
|
|
255
|
+
elif hasattr(value, "value") and isinstance(
|
|
256
|
+
value.value, str
|
|
257
|
+
): # New ast.Constant format
|
|
258
|
+
return str(value.value)[:100]
|
|
259
|
+
return ""
|
|
260
|
+
|
|
261
|
+
def _build_function_signature(self, node: t.Any) -> str:
|
|
262
|
+
"""Build function signature from AST FunctionDef node."""
|
|
263
|
+
signature = f"def {node.name}("
|
|
264
|
+
if node.args.args:
|
|
265
|
+
args = [arg.arg for arg in node.args.args[:3]] # First 3 args
|
|
266
|
+
signature += ", ".join(args)
|
|
267
|
+
signature += ")"
|
|
268
|
+
return signature
|
|
269
|
+
|
|
270
|
+
def _build_class_signature(self, node: t.Any) -> str:
|
|
271
|
+
"""Build class signature from AST ClassDef node."""
|
|
272
|
+
signature = f"class {node.name}"
|
|
273
|
+
if node.bases:
|
|
274
|
+
bases = [self._get_ast_name(base) for base in node.bases[:2]]
|
|
275
|
+
signature += f"({', '.join(bases)})"
|
|
276
|
+
return signature
|
|
277
|
+
|
|
278
|
+
def _get_ast_name(self, node: t.Any) -> str:
|
|
279
|
+
"""Get name from AST node."""
|
|
280
|
+
import ast
|
|
281
|
+
|
|
282
|
+
if isinstance(node, ast.Name):
|
|
283
|
+
return node.id
|
|
284
|
+
elif isinstance(node, ast.Attribute):
|
|
285
|
+
return f"{self._get_ast_name(node.value)}.{node.attr}"
|
|
286
|
+
return "Unknown"
|
|
287
|
+
|
|
288
|
+
def _extract_ast_elements(self, content: str) -> list[dict[str, t.Any]]:
|
|
289
|
+
"""Extract code elements using AST parsing."""
|
|
290
|
+
import ast
|
|
291
|
+
|
|
292
|
+
class CodeElementExtractor(ast.NodeVisitor):
|
|
293
|
+
def __init__(self, parent: "SemanticAgent") -> None:
|
|
294
|
+
self.elements: list[dict[str, t.Any]] = []
|
|
295
|
+
self.parent = parent
|
|
296
|
+
|
|
297
|
+
def visit_FunctionDef(self, node: ast.FunctionDef) -> None:
|
|
298
|
+
self.elements.append(
|
|
299
|
+
{
|
|
300
|
+
"type": "function",
|
|
301
|
+
"name": node.name,
|
|
302
|
+
"signature": self.parent._build_function_signature(node),
|
|
303
|
+
"docstring": self.parent._extract_docstring_from_node(node),
|
|
304
|
+
"line_number": node.lineno,
|
|
305
|
+
}
|
|
306
|
+
)
|
|
307
|
+
self.generic_visit(node)
|
|
308
|
+
|
|
309
|
+
def visit_ClassDef(self, node: ast.ClassDef) -> None:
|
|
310
|
+
self.elements.append(
|
|
311
|
+
{
|
|
312
|
+
"type": "class",
|
|
313
|
+
"name": node.name,
|
|
314
|
+
"signature": self.parent._build_class_signature(node),
|
|
315
|
+
"line_number": node.lineno,
|
|
316
|
+
}
|
|
317
|
+
)
|
|
318
|
+
self.generic_visit(node)
|
|
319
|
+
|
|
320
|
+
tree = ast.parse(content)
|
|
321
|
+
extractor = CodeElementExtractor(self)
|
|
322
|
+
extractor.visit(tree)
|
|
323
|
+
return extractor.elements[:10] # Limit to top 10 elements
|
|
324
|
+
|
|
325
|
+
def _extract_text_elements(self, content: str) -> list[dict[str, t.Any]]:
|
|
326
|
+
"""Extract code elements using simple text patterns."""
|
|
327
|
+
elements = []
|
|
328
|
+
lines = content.split("\n")
|
|
329
|
+
for i, line in enumerate(lines[:50]): # Check first 50 lines
|
|
330
|
+
stripped = line.strip()
|
|
331
|
+
if stripped.startswith("def ") and "(" in stripped:
|
|
332
|
+
func_name = stripped.split("(")[0].replace("def ", "").strip()
|
|
333
|
+
elements.append(
|
|
334
|
+
{
|
|
335
|
+
"type": "function",
|
|
336
|
+
"name": func_name,
|
|
337
|
+
"signature": stripped,
|
|
338
|
+
"line_number": i + 1,
|
|
339
|
+
}
|
|
340
|
+
)
|
|
341
|
+
elif stripped.startswith("class ") and ":" in stripped:
|
|
342
|
+
class_name = stripped.split(":")[0].replace("class ", "").strip()
|
|
343
|
+
elements.append(
|
|
344
|
+
{
|
|
345
|
+
"type": "class",
|
|
346
|
+
"name": class_name,
|
|
347
|
+
"signature": stripped,
|
|
348
|
+
"line_number": i + 1,
|
|
349
|
+
}
|
|
350
|
+
)
|
|
351
|
+
return elements
|
|
352
|
+
|
|
353
|
+
def _extract_code_elements(self, content: str) -> list[dict[str, t.Any]]:
|
|
354
|
+
"""Extract key code elements for semantic analysis."""
|
|
355
|
+
try:
|
|
356
|
+
return self._extract_ast_elements(content)
|
|
357
|
+
except SyntaxError:
|
|
358
|
+
return self._extract_text_elements(content)
|
|
359
|
+
|
|
360
|
+
async def _analyze_issue_context(
|
|
361
|
+
self, vector_store: VectorStore, issue: Issue
|
|
362
|
+
) -> list[dict[str, t.Any]]:
|
|
363
|
+
"""Analyze the specific issue context using semantic search."""
|
|
364
|
+
suggestions = []
|
|
365
|
+
|
|
366
|
+
# Search for similar issues or patterns
|
|
367
|
+
search_query = SearchQuery(
|
|
368
|
+
query=issue.message,
|
|
369
|
+
max_results=5,
|
|
370
|
+
min_similarity=0.5,
|
|
371
|
+
)
|
|
372
|
+
|
|
373
|
+
try:
|
|
374
|
+
results = vector_store.search(search_query)
|
|
375
|
+
if results:
|
|
376
|
+
suggestions.append(
|
|
377
|
+
{
|
|
378
|
+
"type": "similar_issues",
|
|
379
|
+
"description": f"Found {len(results)} similar patterns in codebase",
|
|
380
|
+
"examples": [
|
|
381
|
+
{
|
|
382
|
+
"file": str(result.file_path.name),
|
|
383
|
+
"content": result.content[:150],
|
|
384
|
+
"similarity": result.similarity_score,
|
|
385
|
+
}
|
|
386
|
+
for result in results[:3]
|
|
387
|
+
],
|
|
388
|
+
}
|
|
389
|
+
)
|
|
390
|
+
except Exception as e:
|
|
391
|
+
self.log(f"Warning: Issue context analysis failed: {e}")
|
|
392
|
+
|
|
393
|
+
return suggestions
|
|
394
|
+
|
|
395
|
+
def _generate_semantic_recommendations(
|
|
396
|
+
self, insights: dict[str, t.Any]
|
|
397
|
+
) -> list[str]:
|
|
398
|
+
"""Generate actionable recommendations based on semantic analysis."""
|
|
399
|
+
recommendations = []
|
|
400
|
+
|
|
401
|
+
related_patterns = insights.get("related_patterns", [])
|
|
402
|
+
context_suggestions = insights.get("context_suggestions", [])
|
|
403
|
+
|
|
404
|
+
if related_patterns:
|
|
405
|
+
recommendations.append(
|
|
406
|
+
f"Found {len(related_patterns)} similar code patterns across the codebase"
|
|
407
|
+
)
|
|
408
|
+
|
|
409
|
+
# Analyze pattern consistency
|
|
410
|
+
high_similarity_count = sum(
|
|
411
|
+
1
|
|
412
|
+
for pattern in related_patterns
|
|
413
|
+
for code in pattern["related_code"]
|
|
414
|
+
if code["similarity_score"] > 0.8
|
|
415
|
+
)
|
|
416
|
+
|
|
417
|
+
if high_similarity_count > 0:
|
|
418
|
+
recommendations.append(
|
|
419
|
+
f"Detected {high_similarity_count} highly similar implementations - "
|
|
420
|
+
"consider refactoring for DRY principle compliance"
|
|
421
|
+
)
|
|
422
|
+
|
|
423
|
+
if context_suggestions:
|
|
424
|
+
recommendations.append(
|
|
425
|
+
"Semantic analysis revealed contextual insights for code understanding"
|
|
426
|
+
)
|
|
427
|
+
|
|
428
|
+
# General semantic recommendations
|
|
429
|
+
recommendations.extend(
|
|
430
|
+
[
|
|
431
|
+
"Consider semantic indexing of related modules for better code discovery",
|
|
432
|
+
"Review similar patterns for consistency in naming and implementation",
|
|
433
|
+
"Use semantic search to discover reusable components before implementing new ones",
|
|
434
|
+
]
|
|
435
|
+
)
|
|
436
|
+
|
|
437
|
+
return recommendations
|
|
438
|
+
|
|
439
|
+
def _update_pattern_stats(self, result: FixResult) -> None:
|
|
440
|
+
"""Update pattern discovery statistics."""
|
|
441
|
+
if result.success:
|
|
442
|
+
self.pattern_stats["patterns_discovered"] += len(result.fixes_applied)
|
|
443
|
+
self.pattern_stats["semantic_suggestions"] += len(result.recommendations)
|
|
444
|
+
|
|
445
|
+
@staticmethod
|
|
446
|
+
def _create_semantic_error_result(error: Exception) -> FixResult:
|
|
447
|
+
"""Create error result for semantic analysis failures."""
|
|
448
|
+
return FixResult(
|
|
449
|
+
success=False,
|
|
450
|
+
confidence=0.0,
|
|
451
|
+
remaining_issues=[f"Semantic analysis failed: {error}"],
|
|
452
|
+
recommendations=[
|
|
453
|
+
"Ensure semantic search index is properly initialized",
|
|
454
|
+
"Check if file contains valid code for analysis",
|
|
455
|
+
"Verify semantic search configuration is correct",
|
|
456
|
+
],
|
|
457
|
+
)
|
|
458
|
+
|
|
459
|
+
async def plan_before_action(self, issue: Issue) -> dict[str, t.Any]:
|
|
460
|
+
"""Plan semantic analysis strategy before execution."""
|
|
461
|
+
return {
|
|
462
|
+
"strategy": "semantic_context_analysis",
|
|
463
|
+
"confidence": 0.8,
|
|
464
|
+
"approach": [
|
|
465
|
+
"Index file content for semantic search",
|
|
466
|
+
"Discover related code patterns using vector embeddings",
|
|
467
|
+
"Analyze semantic similarity across codebase",
|
|
468
|
+
"Generate contextual recommendations",
|
|
469
|
+
],
|
|
470
|
+
"expected_insights": [
|
|
471
|
+
"Similar code patterns and implementations",
|
|
472
|
+
"Opportunities for code reuse and refactoring",
|
|
473
|
+
"Contextual understanding of code relationships",
|
|
474
|
+
],
|
|
475
|
+
}
|
|
476
|
+
|
|
477
|
+
|
|
478
|
+
# Register the agent with the agent registry
|
|
479
|
+
agent_registry.register(SemanticAgent)
|