codegraph-cli 2.0.0__py3-none-any.whl

This diff represents the content of publicly available package versions that have been released to one of the supported registries. The information contained in this diff is provided for informational purposes only and reflects changes between package versions as they appear in their respective public registries.
Files changed (43) hide show
  1. codegraph_cli/__init__.py +4 -0
  2. codegraph_cli/agents.py +191 -0
  3. codegraph_cli/bug_detector.py +386 -0
  4. codegraph_cli/chat_agent.py +352 -0
  5. codegraph_cli/chat_session.py +220 -0
  6. codegraph_cli/cli.py +330 -0
  7. codegraph_cli/cli_chat.py +367 -0
  8. codegraph_cli/cli_diagnose.py +133 -0
  9. codegraph_cli/cli_refactor.py +230 -0
  10. codegraph_cli/cli_setup.py +470 -0
  11. codegraph_cli/cli_test.py +177 -0
  12. codegraph_cli/cli_v2.py +267 -0
  13. codegraph_cli/codegen_agent.py +265 -0
  14. codegraph_cli/config.py +31 -0
  15. codegraph_cli/config_manager.py +341 -0
  16. codegraph_cli/context_manager.py +500 -0
  17. codegraph_cli/crew_agents.py +123 -0
  18. codegraph_cli/crew_chat.py +159 -0
  19. codegraph_cli/crew_tools.py +497 -0
  20. codegraph_cli/diff_engine.py +265 -0
  21. codegraph_cli/embeddings.py +241 -0
  22. codegraph_cli/graph_export.py +144 -0
  23. codegraph_cli/llm.py +642 -0
  24. codegraph_cli/models.py +47 -0
  25. codegraph_cli/models_v2.py +185 -0
  26. codegraph_cli/orchestrator.py +49 -0
  27. codegraph_cli/parser.py +800 -0
  28. codegraph_cli/performance_analyzer.py +223 -0
  29. codegraph_cli/project_context.py +230 -0
  30. codegraph_cli/rag.py +200 -0
  31. codegraph_cli/refactor_agent.py +452 -0
  32. codegraph_cli/security_scanner.py +366 -0
  33. codegraph_cli/storage.py +390 -0
  34. codegraph_cli/templates/graph_interactive.html +257 -0
  35. codegraph_cli/testgen_agent.py +316 -0
  36. codegraph_cli/validation_engine.py +285 -0
  37. codegraph_cli/vector_store.py +293 -0
  38. codegraph_cli-2.0.0.dist-info/METADATA +318 -0
  39. codegraph_cli-2.0.0.dist-info/RECORD +43 -0
  40. codegraph_cli-2.0.0.dist-info/WHEEL +5 -0
  41. codegraph_cli-2.0.0.dist-info/entry_points.txt +2 -0
  42. codegraph_cli-2.0.0.dist-info/licenses/LICENSE +21 -0
  43. codegraph_cli-2.0.0.dist-info/top_level.txt +1 -0
@@ -0,0 +1,352 @@
1
+ """Chat agent for interactive conversational coding assistance."""
2
+
3
+ from __future__ import annotations
4
+
5
+ from datetime import datetime
6
+ from typing import Optional
7
+
8
+ from .chat_session import SessionManager
9
+ from .codegen_agent import CodeGenAgent
10
+ from .context_manager import assemble_context_for_llm, detect_intent
11
+ from .llm import LocalLLM
12
+ from .models_v2 import ChatSession, CodeProposal
13
+ from .orchestrator import MCPOrchestrator
14
+ from .rag import RAGRetriever
15
+ from .refactor_agent import RefactorAgent
16
+ from .storage import GraphStore
17
+
18
+
19
+ SYSTEM_PROMPT = """You are an AI coding assistant integrated with CodeGraph CLI.
20
+
21
+ You have access to a semantic code graph of the user's project and can:
22
+ - Search for code semantically
23
+ - Analyze impact of changes
24
+ - Generate new code
25
+ - Refactor existing code
26
+ - Explain code functionality
27
+
28
+ When the user asks you to make changes:
29
+ 1. Use the provided code context to understand existing patterns
30
+ 2. Generate code that follows the project's style
31
+ 3. Explain what you're doing
32
+ 4. Create a proposal that the user can review before applying
33
+
34
+ Be concise, helpful, and always explain your reasoning.
35
+ """
36
+
37
+
38
+ class ChatAgent:
39
+ """Orchestrates interactive chat with RAG-based context management and real file access."""
40
+
41
+ def __init__(
42
+ self,
43
+ context: ProjectContext,
44
+ llm: LocalLLM,
45
+ orchestrator: MCPOrchestrator,
46
+ rag_retriever: RAGRetriever
47
+ ):
48
+ """Initialize chat agent.
49
+
50
+ Args:
51
+ context: ProjectContext for real file access
52
+ llm: LLM for generating responses
53
+ orchestrator: Orchestrator for accessing other agents
54
+ rag_retriever: RAG retriever for semantic search
55
+ """
56
+ self.context = context
57
+ self.llm = llm
58
+ self.orchestrator = orchestrator
59
+ self.rag_retriever = rag_retriever
60
+ self.session_manager = SessionManager()
61
+
62
+ # Initialize specialized agents
63
+ from .codegen_agent import CodeGenAgent
64
+ from .refactor_agent import RefactorAgent
65
+ self.codegen_agent = CodeGenAgent(context.store, llm, project_context=context)
66
+ self.refactor_agent = RefactorAgent(context.store)
67
+
68
+ def process_message(
69
+ self,
70
+ user_message: str,
71
+ session: ChatSession
72
+ ) -> str:
73
+ """Process user message and generate response.
74
+
75
+ Args:
76
+ user_message: User's message
77
+ session: Current chat session
78
+
79
+ Returns:
80
+ Assistant's response
81
+ """
82
+ # Add user message to session
83
+ timestamp = datetime.now().isoformat()
84
+ session.add_message("user", user_message, timestamp)
85
+
86
+ # Detect intent
87
+ intent = detect_intent(user_message)
88
+
89
+ # Handle special intents with existing agents
90
+ if intent == "read":
91
+ response = self._handle_read(user_message)
92
+ elif intent == "list":
93
+ response = self._handle_list()
94
+ elif intent == "search":
95
+ response = self._handle_search(user_message)
96
+ elif intent == "impact":
97
+ response = self._handle_impact(user_message)
98
+ elif intent == "generate":
99
+ response = self._handle_generate(user_message, session)
100
+ elif intent == "refactor":
101
+ response = self._handle_refactor(user_message, session)
102
+ else:
103
+ # General chat - use LLM with RAG context
104
+ response = self._handle_chat(user_message, session)
105
+
106
+ # Add assistant response to session
107
+ session.add_message("assistant", response, datetime.now().isoformat())
108
+
109
+ # Save session
110
+ self.session_manager.save_session(session)
111
+
112
+ return response
113
+
114
+ def _handle_list(self) -> str:
115
+ """Handle list files intent - show actual project files."""
116
+ if not self.context.has_source_access:
117
+ return (
118
+ "❌ Source directory not available for this project.\n"
119
+ "Re-index the project to enable file access: cg index <path> --name <project>"
120
+ )
121
+
122
+ try:
123
+ items = self.context.list_directory(".")
124
+
125
+ if not items:
126
+ return "📁 Project directory is empty."
127
+
128
+ # Separate files and directories
129
+ dirs = [item for item in items if item["type"] == "dir"]
130
+ files = [item for item in items if item["type"] == "file"]
131
+
132
+ response_parts = [f"📁 Project: {self.context.project_name}"]
133
+ response_parts.append(f"📂 Location: {self.context.source_path}\n")
134
+
135
+ # Show directories
136
+ if dirs:
137
+ response_parts.append(f"Directories ({len(dirs)}):")
138
+ for d in dirs[:10]: # Limit to 10
139
+ response_parts.append(f" 📂 {d['name']}/")
140
+ if len(dirs) > 10:
141
+ response_parts.append(f" ... and {len(dirs) - 10} more")
142
+ response_parts.append("")
143
+
144
+ # Show files
145
+ if files:
146
+ response_parts.append(f"Files ({len(files)}):")
147
+ for f in files[:15]: # Limit to 15
148
+ size_kb = f['size'] / 1024 if f['size'] else 0
149
+ response_parts.append(f" 📄 {f['name']} ({size_kb:.1f} KB)")
150
+ if len(files) > 15:
151
+ response_parts.append(f" ... and {len(files) - 15} more")
152
+
153
+ # Show indexed files info
154
+ indexed_files = self.context.get_indexed_files()
155
+ response_parts.append(f"\n✅ Indexed: {len(indexed_files)} Python file(s)")
156
+
157
+ return "\n".join(response_parts)
158
+
159
+ except Exception as e:
160
+ return f"❌ Error listing directory: {str(e)}"
161
+
162
+ def _handle_read(self, message: str) -> str:
163
+ """Handle read file intent - show file contents."""
164
+ if not self.context.has_source_access:
165
+ return (
166
+ "❌ Source directory not available for this project.\n"
167
+ "Re-index the project to enable file access: cg index <path> --name <project>"
168
+ )
169
+
170
+ # Extract filename from message
171
+ import re
172
+ # Look for common file extensions
173
+ file_pattern = r'([\w/.-]+\.(?:py|txt|md|json|yaml|yml|toml|cfg|ini|sh|js|ts|html|css))'
174
+ matches = re.findall(file_pattern, message, re.IGNORECASE)
175
+
176
+ if not matches:
177
+ return "❌ Please specify a file to read (e.g., 'show me basic.py')"
178
+
179
+ filename = matches[0]
180
+
181
+ try:
182
+ content = self.context.read_file(filename)
183
+
184
+ # Detect language for syntax highlighting
185
+ ext = filename.split('.')[-1]
186
+ lang_map = {
187
+ 'py': 'python', 'js': 'javascript', 'ts': 'typescript',
188
+ 'md': 'markdown', 'json': 'json', 'yaml': 'yaml', 'yml': 'yaml',
189
+ 'sh': 'bash', 'html': 'html', 'css': 'css'
190
+ }
191
+ lang = lang_map.get(ext, '')
192
+
193
+ response_parts = [f"📄 {filename}\n"]
194
+ response_parts.append(f"```{lang}")
195
+ response_parts.append(content)
196
+ response_parts.append("```")
197
+
198
+ # Add file info
199
+ file_info = self.context.get_file_info(filename)
200
+ if file_info:
201
+ size_kb = file_info['size'] / 1024 if file_info['size'] else 0
202
+ response_parts.append(f"\n📊 Size: {size_kb:.1f} KB | Modified: {file_info['modified'][:10]}")
203
+
204
+ return "\n".join(response_parts)
205
+
206
+ except FileNotFoundError:
207
+ return f"❌ File not found: {filename}\nAvailable files: {', '.join([f['name'] for f in self.context.list_directory('.') if f['type'] == 'file'][:5])}"
208
+ except Exception as e:
209
+ return f"❌ Error reading file: {str(e)}"
210
+
211
+ def _handle_search(self, message: str) -> str:
212
+ """Handle search intent."""
213
+ # Extract query (remove search keywords)
214
+ query = message.lower()
215
+ for kw in ["find", "search", "where is", "show me", "locate"]:
216
+ query = query.replace(kw, "").strip()
217
+
218
+ # Use RAG agent
219
+ results = self.rag_retriever.search(query, top_k=5)
220
+
221
+ if not results:
222
+ return "I couldn't find any matching code. Try a different search query."
223
+
224
+ # Format results
225
+ response_parts = [f"Found {len(results)} results:\n"]
226
+ for i, result in enumerate(results, 1):
227
+ response_parts.append(
228
+ f"{i}. [{result.node_type}] {result.qualname}\n"
229
+ f" Location: {result.file_path}:{result.start_line}\n"
230
+ f" Relevance: {result.score:.2f}\n"
231
+ )
232
+
233
+ return "\n".join(response_parts)
234
+
235
+ def _handle_impact(self, message: str) -> str:
236
+ """Handle impact analysis intent."""
237
+ # Extract symbol name
238
+ import re
239
+ symbols = re.findall(r'\b[A-Z][a-zA-Z0-9_]*\b|\b[a-z_][a-z0-9_]*\b', message)
240
+
241
+ if not symbols:
242
+ return "Please specify a function or class name to analyze impact."
243
+
244
+ symbol = symbols[0]
245
+
246
+ # Use summarization agent for impact analysis
247
+ try:
248
+ report = self.orchestrator.summarization_agent.impact_analysis(symbol, hops=2)
249
+ return f"**Impact Analysis for {symbol}:**\n\n{report.explanation}"
250
+ except Exception as e:
251
+ return f"Couldn't analyze impact: {str(e)}"
252
+
253
+ def _handle_generate(self, message: str, session: ChatSession) -> str:
254
+ """Handle code generation intent."""
255
+ try:
256
+ # Use codegen agent
257
+ proposal = self.codegen_agent.generate(
258
+ prompt=message,
259
+ max_files=3
260
+ )
261
+
262
+ # Add to pending proposals
263
+ session.pending_proposals.append(proposal)
264
+
265
+ # Format response
266
+ response_parts = [
267
+ f"I've created a code proposal: {proposal.description}\n",
268
+ f"Files to change: {proposal.num_files_changed}",
269
+ f" - New files: {proposal.num_files_created}",
270
+ f" - Modified files: {proposal.num_files_modified}\n",
271
+ "To apply these changes, say 'apply' or '/apply'.",
272
+ "To see the diff, say 'show diff' or '/preview'."
273
+ ]
274
+
275
+ return "\n".join(response_parts)
276
+ except Exception as e:
277
+ return f"I encountered an error generating code: {str(e)}\n\nCould you provide more details?"
278
+
279
+ def _handle_refactor(self, message: str, session: ChatSession) -> str:
280
+ """Handle refactoring intent."""
281
+ # For now, provide guidance
282
+ return (
283
+ "I can help with refactoring! Here are some operations I support:\n\n"
284
+ "- Rename a symbol: 'Rename process_payment to handle_payment'\n"
285
+ "- Extract function: 'Extract lines 45-60 into a new function'\n"
286
+ "- Extract service: 'Move payment functions to a new service'\n\n"
287
+ "What would you like to refactor?"
288
+ )
289
+
290
+ def _handle_chat(self, message: str, session: ChatSession) -> str:
291
+ """Handle general chat with LLM and RAG context."""
292
+ # Assemble context using smart RAG strategy
293
+ context_messages = assemble_context_for_llm(
294
+ user_message=message,
295
+ session=session,
296
+ rag_retriever=self.rag_retriever,
297
+ system_prompt=SYSTEM_PROMPT,
298
+ max_tokens=8000
299
+ )
300
+
301
+ # Call LLM
302
+ response = self.llm.chat_completion(
303
+ messages=context_messages,
304
+ max_tokens=1000,
305
+ temperature=0.7
306
+ )
307
+
308
+ if response:
309
+ return response
310
+
311
+ # Fallback if LLM fails
312
+ return (
313
+ "I'm having trouble connecting to the LLM. "
314
+ "Try asking me to search for code, analyze impact, or generate code."
315
+ )
316
+
317
+ def apply_pending_proposal(self, session: ChatSession, proposal_index: int = 0) -> str:
318
+ """Apply a pending proposal.
319
+
320
+ Args:
321
+ session: Chat session
322
+ proposal_index: Index of proposal to apply (default: most recent)
323
+
324
+ Returns:
325
+ Result message
326
+ """
327
+ if not session.pending_proposals:
328
+ return "No pending proposals to apply."
329
+
330
+ if proposal_index >= len(session.pending_proposals):
331
+ return f"Invalid proposal index. You have {len(session.pending_proposals)} pending proposal(s)."
332
+
333
+ proposal = session.pending_proposals[proposal_index]
334
+
335
+ try:
336
+ # Apply using codegen agent
337
+ result = self.codegen_agent.apply_changes(proposal, backup=True)
338
+
339
+ if result.success:
340
+ # Remove from pending
341
+ session.pending_proposals.pop(proposal_index)
342
+ self.session_manager.save_session(session)
343
+
344
+ return (
345
+ f"✅ Successfully applied changes to {len(result.files_changed)} file(s).\n"
346
+ f"Backup ID: {result.backup_id}\n"
347
+ f"You can rollback with: cg v2 rollback {result.backup_id}"
348
+ )
349
+ else:
350
+ return f"❌ Failed to apply changes: {result.error}"
351
+ except Exception as e:
352
+ return f"❌ Error applying changes: {str(e)}"
@@ -0,0 +1,220 @@
1
+ """Chat session management with persistence."""
2
+
3
+ from __future__ import annotations
4
+
5
+ import json
6
+ import uuid
7
+ from datetime import datetime
8
+ from pathlib import Path
9
+ from typing import List, Optional
10
+
11
+ from .models_v2 import ChatMessage, ChatSession, CodeProposal
12
+
13
+
14
+ class SessionManager:
15
+ """Manages chat session persistence and loading."""
16
+
17
+ def __init__(self, sessions_dir: Optional[Path] = None):
18
+ """Initialize session manager.
19
+
20
+ Args:
21
+ sessions_dir: Directory to store sessions (default: ~/.codegraph/chat_sessions/)
22
+ """
23
+ if sessions_dir is None:
24
+ home = Path.home()
25
+ sessions_dir = home / ".codegraph" / "chat_sessions"
26
+
27
+ self.sessions_dir = sessions_dir
28
+ self.sessions_dir.mkdir(parents=True, exist_ok=True)
29
+
30
+ def create_session(self, project_name: str) -> ChatSession:
31
+ """Create a new chat session.
32
+
33
+ Args:
34
+ project_name: Name of the project this session is for
35
+
36
+ Returns:
37
+ New ChatSession instance
38
+ """
39
+ session_id = str(uuid.uuid4())
40
+ timestamp = datetime.now().isoformat()
41
+
42
+ session = ChatSession(
43
+ id=session_id,
44
+ project_name=project_name,
45
+ created_at=timestamp,
46
+ updated_at=timestamp
47
+ )
48
+
49
+ return session
50
+
51
+ def save_session(self, session: ChatSession) -> None:
52
+ """Save session to disk.
53
+
54
+ Args:
55
+ session: Session to save
56
+ """
57
+ session_file = self.sessions_dir / f"{session.id}.json"
58
+
59
+ # Convert to dict
60
+ data = {
61
+ "id": session.id,
62
+ "project_name": session.project_name,
63
+ "created_at": session.created_at,
64
+ "updated_at": session.updated_at,
65
+ "messages": [
66
+ {
67
+ "role": msg.role,
68
+ "content": msg.content,
69
+ "timestamp": msg.timestamp,
70
+ "metadata": msg.metadata
71
+ }
72
+ for msg in session.messages
73
+ ],
74
+ "pending_proposals": [
75
+ {
76
+ "id": p.id,
77
+ "description": p.description,
78
+ "impact_summary": p.impact_summary,
79
+ "metadata": p.metadata,
80
+ "changes": [
81
+ {
82
+ "file_path": c.file_path,
83
+ "change_type": c.change_type,
84
+ "original_content": c.original_content,
85
+ "new_content": c.new_content,
86
+ "diff": c.diff
87
+ }
88
+ for c in p.changes
89
+ ]
90
+ }
91
+ for p in session.pending_proposals
92
+ ]
93
+ }
94
+
95
+ session_file.write_text(json.dumps(data, indent=2))
96
+
97
+ def load_session(self, session_id: str) -> Optional[ChatSession]:
98
+ """Load session from disk.
99
+
100
+ Args:
101
+ session_id: ID of session to load
102
+
103
+ Returns:
104
+ ChatSession if found, None otherwise
105
+ """
106
+ session_file = self.sessions_dir / f"{session_id}.json"
107
+
108
+ if not session_file.exists():
109
+ return None
110
+
111
+ data = json.loads(session_file.read_text())
112
+
113
+ # Reconstruct session
114
+ session = ChatSession(
115
+ id=data["id"],
116
+ project_name=data["project_name"],
117
+ created_at=data["created_at"],
118
+ updated_at=data["updated_at"]
119
+ )
120
+
121
+ # Reconstruct messages
122
+ for msg_data in data["messages"]:
123
+ session.messages.append(ChatMessage(
124
+ role=msg_data["role"],
125
+ content=msg_data["content"],
126
+ timestamp=msg_data["timestamp"],
127
+ metadata=msg_data.get("metadata", {})
128
+ ))
129
+
130
+ # Reconstruct pending proposals
131
+ from .models_v2 import FileChange
132
+ for prop_data in data.get("pending_proposals", []):
133
+ changes = [
134
+ FileChange(
135
+ file_path=c["file_path"],
136
+ change_type=c["change_type"],
137
+ original_content=c.get("original_content"),
138
+ new_content=c.get("new_content"),
139
+ diff=c.get("diff", "")
140
+ )
141
+ for c in prop_data["changes"]
142
+ ]
143
+
144
+ session.pending_proposals.append(CodeProposal(
145
+ id=prop_data["id"],
146
+ description=prop_data["description"],
147
+ changes=changes,
148
+ impact_summary=prop_data.get("impact_summary", ""),
149
+ metadata=prop_data.get("metadata", {})
150
+ ))
151
+
152
+ return session
153
+
154
+ def list_sessions(self, project_name: Optional[str] = None) -> List[dict]:
155
+ """List all saved sessions.
156
+
157
+ Args:
158
+ project_name: Optional filter by project name
159
+
160
+ Returns:
161
+ List of session metadata dicts
162
+ """
163
+ sessions = []
164
+
165
+ for session_file in self.sessions_dir.glob("*.json"):
166
+ try:
167
+ data = json.loads(session_file.read_text())
168
+
169
+ # Filter by project if specified
170
+ if project_name and data.get("project_name") != project_name:
171
+ continue
172
+
173
+ sessions.append({
174
+ "id": data["id"],
175
+ "project_name": data["project_name"],
176
+ "created_at": data["created_at"],
177
+ "updated_at": data["updated_at"],
178
+ "message_count": len(data.get("messages", []))
179
+ })
180
+ except Exception:
181
+ # Skip corrupted files
182
+ continue
183
+
184
+ # Sort by updated_at (most recent first)
185
+ sessions.sort(key=lambda s: s["updated_at"], reverse=True)
186
+
187
+ return sessions
188
+
189
+ def get_latest_session(self, project_name: str) -> Optional[str]:
190
+ """Get the most recent session ID for a project.
191
+
192
+ Args:
193
+ project_name: Project to find session for
194
+
195
+ Returns:
196
+ Session ID if found, None otherwise
197
+ """
198
+ sessions = self.list_sessions(project_name=project_name)
199
+
200
+ if sessions:
201
+ return sessions[0]["id"]
202
+
203
+ return None
204
+
205
+ def delete_session(self, session_id: str) -> bool:
206
+ """Delete a session.
207
+
208
+ Args:
209
+ session_id: ID of session to delete
210
+
211
+ Returns:
212
+ True if deleted, False if not found
213
+ """
214
+ session_file = self.sessions_dir / f"{session_id}.json"
215
+
216
+ if session_file.exists():
217
+ session_file.unlink()
218
+ return True
219
+
220
+ return False