codegraph-cli 2.0.0__py3-none-any.whl

This diff represents the content of publicly available package versions that have been released to one of the supported registries. The information contained in this diff is provided for informational purposes only and reflects changes between package versions as they appear in their respective public registries.
Files changed (43) hide show
  1. codegraph_cli/__init__.py +4 -0
  2. codegraph_cli/agents.py +191 -0
  3. codegraph_cli/bug_detector.py +386 -0
  4. codegraph_cli/chat_agent.py +352 -0
  5. codegraph_cli/chat_session.py +220 -0
  6. codegraph_cli/cli.py +330 -0
  7. codegraph_cli/cli_chat.py +367 -0
  8. codegraph_cli/cli_diagnose.py +133 -0
  9. codegraph_cli/cli_refactor.py +230 -0
  10. codegraph_cli/cli_setup.py +470 -0
  11. codegraph_cli/cli_test.py +177 -0
  12. codegraph_cli/cli_v2.py +267 -0
  13. codegraph_cli/codegen_agent.py +265 -0
  14. codegraph_cli/config.py +31 -0
  15. codegraph_cli/config_manager.py +341 -0
  16. codegraph_cli/context_manager.py +500 -0
  17. codegraph_cli/crew_agents.py +123 -0
  18. codegraph_cli/crew_chat.py +159 -0
  19. codegraph_cli/crew_tools.py +497 -0
  20. codegraph_cli/diff_engine.py +265 -0
  21. codegraph_cli/embeddings.py +241 -0
  22. codegraph_cli/graph_export.py +144 -0
  23. codegraph_cli/llm.py +642 -0
  24. codegraph_cli/models.py +47 -0
  25. codegraph_cli/models_v2.py +185 -0
  26. codegraph_cli/orchestrator.py +49 -0
  27. codegraph_cli/parser.py +800 -0
  28. codegraph_cli/performance_analyzer.py +223 -0
  29. codegraph_cli/project_context.py +230 -0
  30. codegraph_cli/rag.py +200 -0
  31. codegraph_cli/refactor_agent.py +452 -0
  32. codegraph_cli/security_scanner.py +366 -0
  33. codegraph_cli/storage.py +390 -0
  34. codegraph_cli/templates/graph_interactive.html +257 -0
  35. codegraph_cli/testgen_agent.py +316 -0
  36. codegraph_cli/validation_engine.py +285 -0
  37. codegraph_cli/vector_store.py +293 -0
  38. codegraph_cli-2.0.0.dist-info/METADATA +318 -0
  39. codegraph_cli-2.0.0.dist-info/RECORD +43 -0
  40. codegraph_cli-2.0.0.dist-info/WHEEL +5 -0
  41. codegraph_cli-2.0.0.dist-info/entry_points.txt +2 -0
  42. codegraph_cli-2.0.0.dist-info/licenses/LICENSE +21 -0
  43. codegraph_cli-2.0.0.dist-info/top_level.txt +1 -0
@@ -0,0 +1,267 @@
1
+ """CLI commands for v2.0 code generation features."""
2
+
3
+ from __future__ import annotations
4
+
5
+ from pathlib import Path
6
+ from typing import Optional
7
+
8
+ import typer
9
+
10
+ from . import config
11
+ from .cli_diagnose import diagnose_app
12
+ from .cli_refactor import refactor_app
13
+ from .cli_test import test_app
14
+ from .codegen_agent import CodeGenAgent
15
+ from .diff_engine import DiffEngine
16
+ from .llm import LocalLLM
17
+ from .storage import GraphStore, ProjectManager
18
+
19
+ # Create sub-app for v2 commands
20
+ v2_app = typer.Typer(help="v2.0 code generation features (experimental)")
21
+
22
+ # Register sub-commands under v2
23
+ v2_app.add_typer(refactor_app, name="refactor")
24
+ v2_app.add_typer(diagnose_app, name="diagnose")
25
+ v2_app.add_typer(test_app, name="test")
26
+
27
+
28
+ def _get_codegen_agent(pm: ProjectManager) -> CodeGenAgent:
29
+ """Get CodeGenAgent with current project context."""
30
+ project = pm.get_current_project()
31
+ if not project:
32
+ raise typer.BadParameter("No project loaded. Use 'cg load-project <name>' or run 'cg index <path>'.")
33
+
34
+ project_dir = pm.project_dir(project)
35
+ if not project_dir.exists():
36
+ raise typer.BadParameter(f"Loaded project '{project}' does not exist in memory.")
37
+
38
+ store = GraphStore(project_dir)
39
+ llm = LocalLLM(
40
+ model=config.LLM_MODEL,
41
+ provider=config.LLM_PROVIDER,
42
+ api_key=config.LLM_API_KEY
43
+ )
44
+
45
+ return CodeGenAgent(store, llm)
46
+
47
+
48
+ @v2_app.command("generate")
49
+ def generate_code(
50
+ prompt: str = typer.Argument(..., help="Natural language description of code to generate"),
51
+ context_file: Optional[str] = typer.Option(None, "--file", "-f", help="File to use as context"),
52
+ output: Optional[str] = typer.Option(None, "--output", "-o", help="Output file path or directory"),
53
+ preview_only: bool = typer.Option(False, "--preview", "-p", help="Preview changes without applying"),
54
+ auto_apply: bool = typer.Option(False, "--auto-apply", "-y", help="Apply changes without confirmation"),
55
+ llm_provider: str = typer.Option(config.LLM_PROVIDER, help="LLM provider"),
56
+ llm_api_key: Optional[str] = typer.Option(config.LLM_API_KEY, help="API key for LLM"),
57
+ llm_model: str = typer.Option(config.LLM_MODEL, help="LLM model"),
58
+ ):
59
+ """Generate code from natural language description (v2.0 experimental)."""
60
+ pm = ProjectManager()
61
+ agent = _get_codegen_agent(pm)
62
+
63
+ typer.echo("🤖 Generating code...")
64
+
65
+ # Generate code
66
+ proposal = agent.generate(prompt, context_file=context_file)
67
+
68
+ # Update file paths if output directory specified
69
+ if output:
70
+ output_path = Path(output)
71
+ for change in proposal.changes:
72
+ if output_path.is_dir() or not output_path.suffix:
73
+ # Output is a directory
74
+ output_path.mkdir(parents=True, exist_ok=True)
75
+ filename = Path(change.file_path).name
76
+ change.file_path = str(output_path / filename)
77
+ else:
78
+ # Output is a specific file
79
+ change.file_path = str(output_path)
80
+
81
+ # Preview changes
82
+ diff_engine = DiffEngine()
83
+ preview = diff_engine.preview_changes(proposal)
84
+ typer.echo(preview)
85
+
86
+ # Analyze impact
87
+ typer.echo("")
88
+ impact = agent.preview_impact(proposal)
89
+ typer.echo(impact)
90
+
91
+ # Apply if requested
92
+ if preview_only:
93
+ typer.echo("\n📋 Preview only mode - no changes applied")
94
+ return
95
+
96
+ if not auto_apply:
97
+ apply = typer.confirm("\n❓ Apply these changes?", default=False)
98
+ if not apply:
99
+ typer.echo("❌ Changes not applied")
100
+ return
101
+
102
+ # Apply changes
103
+ typer.echo("\n✨ Applying changes...")
104
+ result = agent.apply_changes(proposal)
105
+
106
+ if result.success:
107
+ typer.echo(f"✅ Successfully applied changes to {len(result.files_changed)} file(s)")
108
+ if result.backup_id:
109
+ typer.echo(f"💾 Backup created: {result.backup_id}")
110
+ typer.echo(f" Rollback with: cg v2 rollback {result.backup_id}")
111
+ else:
112
+ typer.echo(f"❌ Failed to apply changes: {result.error}")
113
+
114
+
115
+ @v2_app.command("rollback")
116
+ def rollback_changes(
117
+ backup_id: str = typer.Argument(..., help="Backup ID to rollback to"),
118
+ ):
119
+ """Rollback to a previous backup."""
120
+ diff_engine = DiffEngine()
121
+
122
+ typer.echo(f"🔄 Rolling back to backup: {backup_id}")
123
+ success = diff_engine.rollback(backup_id)
124
+
125
+ if success:
126
+ typer.echo("✅ Successfully rolled back changes")
127
+ else:
128
+ typer.echo(f"❌ Failed to rollback - backup not found: {backup_id}")
129
+
130
+
131
+ @v2_app.command("list-backups")
132
+ def list_backups():
133
+ """List all available backups."""
134
+ diff_engine = DiffEngine()
135
+ backups = diff_engine.list_backups()
136
+
137
+ if not backups:
138
+ typer.echo("No backups found")
139
+ return
140
+
141
+ typer.echo(f"📦 Found {len(backups)} backup(s):\n")
142
+ for backup in backups:
143
+ typer.echo(f"ID: {backup['backup_id']}")
144
+ typer.echo(f" Description: {backup['description']}")
145
+ typer.echo(f" Timestamp: {backup['timestamp']}")
146
+ typer.echo(f" Files: {len(backup['files'])}")
147
+ typer.echo("")
148
+
149
+
150
+ @v2_app.command("review")
151
+ def review_code(
152
+ file_path: str = typer.Argument(..., help="File to review"),
153
+ check: str = typer.Option("all", help="Check type: bugs, security, performance, all"),
154
+ severity: str = typer.Option("all", help="Minimum severity: low, medium, high, critical"),
155
+ verbose: bool = typer.Option(False, "--verbose", "-v", help="Show detailed output"),
156
+ use_llm: bool = typer.Option(False, "--llm", help="Use LLM for deeper analysis"),
157
+ show_fixes: bool = typer.Option(False, "--fix", help="Show auto-fix suggestions"),
158
+ ):
159
+ """Run AI-powered code review on a file."""
160
+ from .bug_detector import BugDetector
161
+ from .security_scanner import SecurityScanner
162
+ from .performance_analyzer import PerformanceAnalyzer
163
+
164
+ pm = ProjectManager()
165
+ project = pm.get_current_project()
166
+ if not project:
167
+ typer.echo("❌ No project loaded. Use 'cg load-project <name>' first.", err=True)
168
+ raise typer.Exit(1)
169
+
170
+ project_dir = pm.project_dir(project)
171
+ store = GraphStore(project_dir)
172
+ llm = LocalLLM(
173
+ model=config.LLM_MODEL,
174
+ provider=config.LLM_PROVIDER,
175
+ api_key=config.LLM_API_KEY
176
+ ) if use_llm else None
177
+
178
+ all_issues = []
179
+
180
+ # Run selected checks
181
+ typer.echo(f"🔍 Analyzing {file_path}...")
182
+ if use_llm:
183
+ typer.echo(" 🤖 LLM analysis enabled")
184
+ if show_fixes:
185
+ typer.echo(" 🔧 Auto-fix suggestions enabled")
186
+ typer.echo("")
187
+
188
+ if check in ["bugs", "all"]:
189
+ detector = BugDetector(store, llm)
190
+ bug_issues = detector.analyze_file(file_path, use_llm=use_llm)
191
+ all_issues.extend(bug_issues)
192
+ if verbose:
193
+ typer.echo(f" Bug detection: {len(bug_issues)} issue(s)")
194
+
195
+ if check in ["security", "all"]:
196
+ scanner = SecurityScanner(store)
197
+ security_issues = scanner.scan_file(file_path, generate_fixes=show_fixes)
198
+ all_issues.extend(security_issues)
199
+ if verbose:
200
+ typer.echo(f" Security scan: {len(security_issues)} issue(s)")
201
+
202
+ if check in ["performance", "all"]:
203
+ analyzer = PerformanceAnalyzer(store)
204
+ perf_issues = analyzer.analyze_file(file_path)
205
+ all_issues.extend(perf_issues)
206
+ if verbose:
207
+ typer.echo(f" Performance analysis: {len(perf_issues)} issue(s)")
208
+
209
+ # Filter by severity
210
+ if severity != "all":
211
+ severity_order = ["low", "medium", "high", "critical"]
212
+ min_level = severity_order.index(severity)
213
+ all_issues = [
214
+ i for i in all_issues
215
+ if severity_order.index(i["severity"]) >= min_level
216
+ ]
217
+
218
+ # Display results
219
+ if not all_issues:
220
+ typer.echo("✅ No issues found!")
221
+ store.close()
222
+ return
223
+
224
+ typer.echo(f"\n🔍 Found {len(all_issues)} issue(s):\n")
225
+
226
+ # Group by severity
227
+ by_severity = {"critical": [], "high": [], "medium": [], "low": []}
228
+ for issue in all_issues:
229
+ by_severity[issue["severity"]].append(issue)
230
+
231
+ severity_icons = {
232
+ "critical": "🚨",
233
+ "high": "🔴",
234
+ "medium": "⚠️",
235
+ "low": "ℹ️"
236
+ }
237
+
238
+ # Display grouped by severity
239
+ for sev in ["critical", "high", "medium", "low"]:
240
+ issues = by_severity[sev]
241
+ if not issues:
242
+ continue
243
+
244
+ typer.echo(f"{severity_icons[sev]} {sev.upper()} ({len(issues)} issue(s)):")
245
+ for issue in sorted(issues, key=lambda x: x["line"]):
246
+ typer.echo(f" Line {issue['line']}: {issue['message']}")
247
+ typer.echo(f" Type: {issue['type']}")
248
+
249
+ if verbose and "code_snippet" in issue:
250
+ typer.echo(f" Code: {issue['code_snippet']}")
251
+
252
+ # Show LLM explanation if available
253
+ if "llm_explanation" in issue:
254
+ typer.echo(f" 🤖 Analysis: {issue['llm_explanation']}")
255
+
256
+ typer.echo(f" 💡 {issue['suggestion']}")
257
+
258
+ # Show auto-fix if available
259
+ if show_fixes and "auto_fix" in issue:
260
+ typer.echo(f" 🔧 Auto-fix:")
261
+ for line in issue["auto_fix"].split("\n"):
262
+ typer.echo(f" {line}")
263
+
264
+ typer.echo("")
265
+
266
+ store.close()
267
+
@@ -0,0 +1,265 @@
1
+ """CodeGenAgent for AI-powered code generation with impact analysis."""
2
+
3
+ from __future__ import annotations
4
+
5
+ import uuid
6
+ from pathlib import Path
7
+ from typing import Optional
8
+
9
+ from .diff_engine import DiffEngine
10
+ from .llm import LocalLLM
11
+ from .models_v2 import ApplyResult, CodeProposal, FileChange
12
+ from .orchestrator import MCPOrchestrator
13
+ from .storage import GraphStore
14
+
15
+
16
+ class CodeGenAgent:
17
+ """Generates code based on natural language requests with impact preview."""
18
+
19
+ def __init__(
20
+ self,
21
+ store: GraphStore,
22
+ llm: LocalLLM,
23
+ diff_engine: Optional[DiffEngine] = None,
24
+ project_context=None # ProjectContext for file operations
25
+ ):
26
+ """Initialize CodeGenAgent.
27
+
28
+ Args:
29
+ store: Graph store for context
30
+ llm: LLM for code generation
31
+ diff_engine: Engine for managing diffs (optional)
32
+ project_context: ProjectContext for file operations (optional)
33
+ """
34
+ self.store = store
35
+ self.llm = llm
36
+ self.diff_engine = diff_engine or DiffEngine()
37
+ self.project_context = project_context
38
+ # Pass provider name as string, not the provider object
39
+ self.orchestrator = MCPOrchestrator(
40
+ store,
41
+ llm_model=llm.model,
42
+ llm_provider=llm.provider_name, # Fixed: use provider_name string
43
+ llm_api_key=llm.api_key
44
+ )
45
+
46
+ def generate(
47
+ self,
48
+ prompt: str,
49
+ context_file: Optional[str] = None,
50
+ max_files: int = 3
51
+ ) -> CodeProposal:
52
+ """Generate code from natural language prompt.
53
+
54
+ Args:
55
+ prompt: Natural language description of what to generate
56
+ context_file: Optional file to use as context
57
+ max_files: Maximum number of context files to include
58
+
59
+ Returns:
60
+ CodeProposal with generated changes
61
+ """
62
+ # Gather context from codebase
63
+ context = self._gather_context(prompt, context_file, max_files)
64
+
65
+ # Generate code using LLM
66
+ generation_prompt = self._build_generation_prompt(prompt, context)
67
+ generated_code = self.llm.explain(generation_prompt)
68
+
69
+ # Parse generated code into changes
70
+ changes = self._parse_generated_code(generated_code, prompt)
71
+
72
+ # Create proposal
73
+ proposal = CodeProposal(
74
+ id=str(uuid.uuid4()),
75
+ description=prompt,
76
+ changes=changes,
77
+ metadata={"context_files": context.get("files", [])}
78
+ )
79
+
80
+ # Add diffs for modified files
81
+ for change in proposal.changes:
82
+ if change.change_type == "modify" and change.original_content and change.new_content:
83
+ change.diff = self.diff_engine.create_diff(
84
+ change.original_content,
85
+ change.new_content,
86
+ change.file_path
87
+ )
88
+
89
+ return proposal
90
+
91
+ def preview_impact(self, proposal: CodeProposal) -> str:
92
+ """Analyze impact of proposed changes.
93
+
94
+ Args:
95
+ proposal: Code proposal to analyze
96
+
97
+ Returns:
98
+ Impact analysis summary
99
+ """
100
+ impact_lines = []
101
+
102
+ # Analyze each change
103
+ for change in proposal.changes:
104
+ if change.change_type == "create":
105
+ impact_lines.append(f"✨ New file: {change.file_path}")
106
+ elif change.change_type == "modify":
107
+ # Try to find affected symbols
108
+ file_path = Path(change.file_path)
109
+ if file_path.exists():
110
+ impact_lines.append(f"📝 Modified: {change.file_path}")
111
+ # Could add more detailed analysis here
112
+ elif change.change_type == "delete":
113
+ impact_lines.append(f"🗑️ Deleted: {change.file_path}")
114
+
115
+ # Add recommendations
116
+ impact_lines.append("")
117
+ impact_lines.append("💡 Recommendations:")
118
+ impact_lines.append(" - Review generated code for correctness")
119
+ impact_lines.append(" - Run tests after applying changes")
120
+ impact_lines.append(" - Backup is created automatically")
121
+
122
+ return "\n".join(impact_lines)
123
+
124
+ def apply_changes(
125
+ self,
126
+ proposal: CodeProposal,
127
+ backup: bool = True
128
+ ) -> ApplyResult:
129
+ """Apply proposed changes to filesystem.
130
+
131
+ Args:
132
+ proposal: Proposal to apply
133
+ backup: Whether to create backup
134
+
135
+ Returns:
136
+ Result of applying changes
137
+ """
138
+ return self.diff_engine.apply_changes(proposal, backup=backup)
139
+
140
+ def _gather_context(
141
+ self,
142
+ prompt: str,
143
+ context_file: Optional[str],
144
+ max_files: int
145
+ ) -> dict:
146
+ """Gather relevant context from codebase.
147
+
148
+ Args:
149
+ prompt: User's request
150
+ context_file: Specific file to include
151
+ max_files: Max number of files to include
152
+
153
+ Returns:
154
+ Context dictionary
155
+ """
156
+ context = {"files": [], "snippets": []}
157
+
158
+ # If specific file provided, use it
159
+ if context_file:
160
+ file_path = Path(context_file)
161
+ if file_path.exists():
162
+ context["files"].append({
163
+ "path": str(file_path),
164
+ "content": file_path.read_text()
165
+ })
166
+
167
+ # Search for relevant code
168
+ try:
169
+ search_results = self.orchestrator.search(prompt, top_k=max_files)
170
+ for result in search_results[:max_files]:
171
+ context["snippets"].append({
172
+ "qualname": result.qualname,
173
+ "file": result.file_path,
174
+ "code": result.code
175
+ })
176
+ except Exception:
177
+ pass # Continue even if search fails
178
+
179
+ return context
180
+
181
+ def _build_generation_prompt(self, user_prompt: str, context: dict) -> str:
182
+ """Build prompt for LLM code generation.
183
+
184
+ Args:
185
+ user_prompt: User's request
186
+ context: Gathered context
187
+
188
+ Returns:
189
+ Formatted prompt for LLM
190
+ """
191
+ prompt_parts = []
192
+
193
+ prompt_parts.append("You are a code generation assistant. Generate clean, working code based on the user's request.")
194
+ prompt_parts.append("")
195
+ prompt_parts.append(f"User Request: {user_prompt}")
196
+ prompt_parts.append("")
197
+
198
+ # Add context if available
199
+ if context.get("snippets"):
200
+ prompt_parts.append("Relevant existing code:")
201
+ for snippet in context["snippets"][:3]:
202
+ prompt_parts.append(f"\n# {snippet['qualname']} ({snippet['file']})")
203
+ prompt_parts.append(snippet["code"][:500]) # Limit length
204
+ prompt_parts.append("")
205
+
206
+ prompt_parts.append("Generate the requested code. Include:")
207
+ prompt_parts.append("1. Complete, working implementation")
208
+ prompt_parts.append("2. Proper error handling")
209
+ prompt_parts.append("3. Clear docstrings")
210
+ prompt_parts.append("4. Type hints where appropriate")
211
+ prompt_parts.append("")
212
+ prompt_parts.append("Output only the code, no explanations.")
213
+
214
+ return "\n".join(prompt_parts)
215
+
216
+ def _parse_generated_code(self, generated: str, prompt: str) -> list[FileChange]:
217
+ """Parse LLM output into file changes.
218
+
219
+ Args:
220
+ generated: Generated code from LLM
221
+ prompt: Original user prompt
222
+
223
+ Returns:
224
+ List of FileChange objects
225
+ """
226
+ # Simple implementation: treat as new file
227
+ # In production, would parse file markers, detect modifications, etc.
228
+
229
+ # Try to infer filename from prompt
230
+ filename = self._infer_filename(prompt)
231
+
232
+ return [
233
+ FileChange(
234
+ file_path=filename,
235
+ change_type="create",
236
+ new_content=generated.strip()
237
+ )
238
+ ]
239
+
240
+ def _infer_filename(self, prompt: str) -> str:
241
+ """Infer filename from user prompt.
242
+
243
+ Args:
244
+ prompt: User's request
245
+
246
+ Returns:
247
+ Suggested filename (relative to project source if context available)
248
+ """
249
+ # Simple heuristic: look for common patterns
250
+ prompt_lower = prompt.lower()
251
+
252
+ if "test" in prompt_lower:
253
+ basename = "test_generated.py"
254
+ elif "api" in prompt_lower or "endpoint" in prompt_lower:
255
+ basename = "api_generated.py"
256
+ elif "model" in prompt_lower:
257
+ basename = "models_generated.py"
258
+ else:
259
+ basename = "generated.py"
260
+
261
+ # If we have project context, prepend source path
262
+ if self.project_context and self.project_context.has_source_access:
263
+ return str(self.project_context.source_path / basename)
264
+ else:
265
+ return basename
@@ -0,0 +1,31 @@
1
+ """Configuration paths for local CodeGraph memory."""
2
+
3
+ from __future__ import annotations
4
+
5
+ import os
6
+ from pathlib import Path
7
+
8
+ BASE_DIR = Path(os.environ.get("CODEGRAPH_HOME", str(Path.home() / ".codegraph"))).expanduser()
9
+ MEMORY_DIR = BASE_DIR / "memory"
10
+ STATE_FILE = BASE_DIR / "state.json"
11
+ DEFAULT_EMBEDDING_DIM = 256
12
+ SUPPORTED_EXTENSIONS = {".py"}
13
+
14
+ # Load configuration from TOML file (if available)
15
+ try:
16
+ from .config_manager import load_config
17
+ _toml_config = load_config()
18
+ except ImportError:
19
+ _toml_config = {}
20
+
21
+ # LLM Provider Configuration — loaded from ~/.codegraph/config.toml (set via `cg setup` or `cg set-llm`)
22
+ LLM_PROVIDER = _toml_config.get("provider", "ollama")
23
+ LLM_API_KEY = _toml_config.get("api_key", "")
24
+ LLM_MODEL = _toml_config.get("model", "qwen2.5-coder:7b")
25
+ LLM_ENDPOINT = _toml_config.get("endpoint", "http://127.0.0.1:11434/api/generate")
26
+
27
+
28
+ def ensure_base_dirs() -> None:
29
+ """Create base directories for local storage if needed."""
30
+ MEMORY_DIR.mkdir(parents=True, exist_ok=True)
31
+ BASE_DIR.mkdir(parents=True, exist_ok=True)