codegraph-cli 2.0.0__py3-none-any.whl
This diff represents the content of publicly available package versions that have been released to one of the supported registries. The information contained in this diff is provided for informational purposes only and reflects changes between package versions as they appear in their respective public registries.
- codegraph_cli/__init__.py +4 -0
- codegraph_cli/agents.py +191 -0
- codegraph_cli/bug_detector.py +386 -0
- codegraph_cli/chat_agent.py +352 -0
- codegraph_cli/chat_session.py +220 -0
- codegraph_cli/cli.py +330 -0
- codegraph_cli/cli_chat.py +367 -0
- codegraph_cli/cli_diagnose.py +133 -0
- codegraph_cli/cli_refactor.py +230 -0
- codegraph_cli/cli_setup.py +470 -0
- codegraph_cli/cli_test.py +177 -0
- codegraph_cli/cli_v2.py +267 -0
- codegraph_cli/codegen_agent.py +265 -0
- codegraph_cli/config.py +31 -0
- codegraph_cli/config_manager.py +341 -0
- codegraph_cli/context_manager.py +500 -0
- codegraph_cli/crew_agents.py +123 -0
- codegraph_cli/crew_chat.py +159 -0
- codegraph_cli/crew_tools.py +497 -0
- codegraph_cli/diff_engine.py +265 -0
- codegraph_cli/embeddings.py +241 -0
- codegraph_cli/graph_export.py +144 -0
- codegraph_cli/llm.py +642 -0
- codegraph_cli/models.py +47 -0
- codegraph_cli/models_v2.py +185 -0
- codegraph_cli/orchestrator.py +49 -0
- codegraph_cli/parser.py +800 -0
- codegraph_cli/performance_analyzer.py +223 -0
- codegraph_cli/project_context.py +230 -0
- codegraph_cli/rag.py +200 -0
- codegraph_cli/refactor_agent.py +452 -0
- codegraph_cli/security_scanner.py +366 -0
- codegraph_cli/storage.py +390 -0
- codegraph_cli/templates/graph_interactive.html +257 -0
- codegraph_cli/testgen_agent.py +316 -0
- codegraph_cli/validation_engine.py +285 -0
- codegraph_cli/vector_store.py +293 -0
- codegraph_cli-2.0.0.dist-info/METADATA +318 -0
- codegraph_cli-2.0.0.dist-info/RECORD +43 -0
- codegraph_cli-2.0.0.dist-info/WHEEL +5 -0
- codegraph_cli-2.0.0.dist-info/entry_points.txt +2 -0
- codegraph_cli-2.0.0.dist-info/licenses/LICENSE +21 -0
- codegraph_cli-2.0.0.dist-info/top_level.txt +1 -0
|
@@ -0,0 +1,470 @@
|
|
|
1
|
+
"""Interactive setup wizard for CodeGraph CLI."""
|
|
2
|
+
|
|
3
|
+
from __future__ import annotations
|
|
4
|
+
|
|
5
|
+
import sys
|
|
6
|
+
from typing import Optional
|
|
7
|
+
|
|
8
|
+
import typer
|
|
9
|
+
|
|
10
|
+
from . import config_manager
|
|
11
|
+
|
|
12
|
+
app = typer.Typer(help="Setup wizard for LLM provider configuration")
|
|
13
|
+
|
|
14
|
+
|
|
15
|
+
# Provider model options
|
|
16
|
+
PROVIDER_MODELS = {
|
|
17
|
+
"groq": [
|
|
18
|
+
"llama-3.3-70b-versatile",
|
|
19
|
+
"llama-3.1-70b-versatile",
|
|
20
|
+
"mixtral-8x7b-32768",
|
|
21
|
+
"gemma2-9b-it",
|
|
22
|
+
],
|
|
23
|
+
"openai": [
|
|
24
|
+
"gpt-4",
|
|
25
|
+
"gpt-4-turbo",
|
|
26
|
+
"gpt-3.5-turbo",
|
|
27
|
+
],
|
|
28
|
+
"anthropic": [
|
|
29
|
+
"claude-3-5-sonnet-20241022",
|
|
30
|
+
"claude-3-opus-20240229",
|
|
31
|
+
"claude-3-sonnet-20240229",
|
|
32
|
+
],
|
|
33
|
+
"gemini": [
|
|
34
|
+
"gemini-2.0-flash",
|
|
35
|
+
"gemini-1.5-pro",
|
|
36
|
+
"gemini-1.5-flash",
|
|
37
|
+
"gemini-1.0-pro",
|
|
38
|
+
],
|
|
39
|
+
"openrouter": [
|
|
40
|
+
"google/gemini-2.0-flash-exp:free",
|
|
41
|
+
"meta-llama/llama-3.3-70b-instruct:free",
|
|
42
|
+
"deepseek/deepseek-chat-v3-0324:free",
|
|
43
|
+
"qwen/qwen3-235b-a22b:free",
|
|
44
|
+
"stepfun/step-3.5-flash:free",
|
|
45
|
+
"anthropic/claude-sonnet-4",
|
|
46
|
+
"openai/gpt-4o",
|
|
47
|
+
],
|
|
48
|
+
}
|
|
49
|
+
|
|
50
|
+
|
|
51
|
+
def print_header():
|
|
52
|
+
"""Print setup wizard header."""
|
|
53
|
+
typer.echo("")
|
|
54
|
+
typer.echo(typer.style("╭──────────────────────────────────────────────╮", fg=typer.colors.CYAN))
|
|
55
|
+
typer.echo(typer.style("│", fg=typer.colors.CYAN) + typer.style(" 🔧 CodeGraph LLM Setup Wizard ", bold=True) + typer.style("│", fg=typer.colors.CYAN))
|
|
56
|
+
typer.echo(typer.style("╰──────────────────────────────────────────────╯", fg=typer.colors.CYAN))
|
|
57
|
+
|
|
58
|
+
|
|
59
|
+
def print_success(message: str):
|
|
60
|
+
"""Print success message."""
|
|
61
|
+
typer.echo(typer.style(f"✅ {message}", fg=typer.colors.GREEN))
|
|
62
|
+
|
|
63
|
+
|
|
64
|
+
def print_error(message: str):
|
|
65
|
+
"""Print error message."""
|
|
66
|
+
typer.echo(typer.style(f"❌ {message}", fg=typer.colors.RED), err=True)
|
|
67
|
+
|
|
68
|
+
|
|
69
|
+
def print_info(message: str):
|
|
70
|
+
"""Print info message."""
|
|
71
|
+
typer.echo(typer.style(f"ℹ️ {message}", fg=typer.colors.BLUE))
|
|
72
|
+
|
|
73
|
+
|
|
74
|
+
# All supported providers for quick lookup
|
|
75
|
+
ALL_PROVIDERS = ["ollama", "groq", "openai", "anthropic", "gemini", "openrouter"]
|
|
76
|
+
|
|
77
|
+
|
|
78
|
+
def select_provider() -> str:
|
|
79
|
+
"""Interactive provider selection.
|
|
80
|
+
|
|
81
|
+
Returns:
|
|
82
|
+
Selected provider name
|
|
83
|
+
"""
|
|
84
|
+
typer.echo("\nChoose your LLM provider:")
|
|
85
|
+
typer.echo(" 1) Ollama (local, free)")
|
|
86
|
+
typer.echo(" 2) Groq (cloud, fast, free tier)")
|
|
87
|
+
typer.echo(" 3) OpenAI (cloud, paid)")
|
|
88
|
+
typer.echo(" 4) Anthropic (cloud, paid)")
|
|
89
|
+
typer.echo(" 5) Gemini (cloud, free tier available)")
|
|
90
|
+
typer.echo(" 6) OpenRouter (cloud, multi-model, free tier available)")
|
|
91
|
+
|
|
92
|
+
provider_map = {
|
|
93
|
+
"1": "ollama",
|
|
94
|
+
"2": "groq",
|
|
95
|
+
"3": "openai",
|
|
96
|
+
"4": "anthropic",
|
|
97
|
+
"5": "gemini",
|
|
98
|
+
"6": "openrouter",
|
|
99
|
+
}
|
|
100
|
+
|
|
101
|
+
while True:
|
|
102
|
+
choice = typer.prompt("\nEnter choice [1-6]", type=str)
|
|
103
|
+
if choice in provider_map:
|
|
104
|
+
return provider_map[choice]
|
|
105
|
+
print_error("Invalid choice. Please enter 1-6.")
|
|
106
|
+
|
|
107
|
+
|
|
108
|
+
def setup_ollama() -> tuple[str, str, str]:
|
|
109
|
+
"""Setup Ollama provider.
|
|
110
|
+
|
|
111
|
+
Returns:
|
|
112
|
+
Tuple of (provider, model, endpoint)
|
|
113
|
+
"""
|
|
114
|
+
typer.echo("\n" + typer.style("Setting up Ollama", bold=True))
|
|
115
|
+
typer.echo("━" * 50)
|
|
116
|
+
|
|
117
|
+
# Check if Ollama is running
|
|
118
|
+
endpoint = typer.prompt("Ollama endpoint", default="http://127.0.0.1:11434")
|
|
119
|
+
|
|
120
|
+
typer.echo("\n⏳ Checking Ollama connection...")
|
|
121
|
+
if not config_manager.validate_ollama_connection(endpoint):
|
|
122
|
+
print_error("Cannot connect to Ollama!")
|
|
123
|
+
print_info("Make sure Ollama is running: https://ollama.ai")
|
|
124
|
+
print_info("Start Ollama and run this setup again.")
|
|
125
|
+
raise typer.Exit(code=1)
|
|
126
|
+
|
|
127
|
+
print_success("Connected to Ollama")
|
|
128
|
+
|
|
129
|
+
# Fetch available models
|
|
130
|
+
typer.echo("\n⏳ Fetching available models...")
|
|
131
|
+
models = config_manager.get_ollama_models(endpoint)
|
|
132
|
+
|
|
133
|
+
if not models:
|
|
134
|
+
print_error("No models found!")
|
|
135
|
+
print_info("Pull a model first: ollama pull qwen2.5-coder:7b")
|
|
136
|
+
raise typer.Exit(code=1)
|
|
137
|
+
|
|
138
|
+
# Display models
|
|
139
|
+
typer.echo("\nAvailable models:")
|
|
140
|
+
for i, model in enumerate(models, 1):
|
|
141
|
+
typer.echo(f" {i}) {model}")
|
|
142
|
+
|
|
143
|
+
# Select model
|
|
144
|
+
while True:
|
|
145
|
+
choice = typer.prompt(f"\nSelect model [1-{len(models)}]", type=int)
|
|
146
|
+
if 1 <= choice <= len(models):
|
|
147
|
+
selected_model = models[choice - 1]
|
|
148
|
+
break
|
|
149
|
+
print_error(f"Invalid choice. Please enter a number between 1 and {len(models)}.")
|
|
150
|
+
|
|
151
|
+
return "ollama", selected_model, endpoint
|
|
152
|
+
|
|
153
|
+
|
|
154
|
+
def setup_cloud_provider(provider: str) -> tuple[str, str, str]:
|
|
155
|
+
"""Setup cloud provider (Groq, OpenAI, Anthropic, Gemini, OpenRouter).
|
|
156
|
+
|
|
157
|
+
Args:
|
|
158
|
+
provider: Provider name
|
|
159
|
+
|
|
160
|
+
Returns:
|
|
161
|
+
Tuple of (provider, model, api_key)
|
|
162
|
+
"""
|
|
163
|
+
provider_display = {
|
|
164
|
+
"openrouter": "OpenRouter",
|
|
165
|
+
"openai": "OpenAI",
|
|
166
|
+
}.get(provider, provider.title())
|
|
167
|
+
|
|
168
|
+
typer.echo(f"\n" + typer.style(f"Setting up {provider_display}", bold=True))
|
|
169
|
+
typer.echo("━" * 50)
|
|
170
|
+
|
|
171
|
+
# Provider-specific hints
|
|
172
|
+
if provider == "gemini":
|
|
173
|
+
print_info("Get your Gemini API key at: https://aistudio.google.com/apikey")
|
|
174
|
+
elif provider == "openrouter":
|
|
175
|
+
print_info("Get your OpenRouter API key at: https://openrouter.ai/keys")
|
|
176
|
+
print_info("Many free models available! Look for models ending with ':free'")
|
|
177
|
+
|
|
178
|
+
# Get API key
|
|
179
|
+
api_key = typer.prompt(f"\nEnter your {provider_display} API key", hide_input=True)
|
|
180
|
+
|
|
181
|
+
if not api_key.strip():
|
|
182
|
+
print_error("API key cannot be empty!")
|
|
183
|
+
raise typer.Exit(code=1)
|
|
184
|
+
|
|
185
|
+
# Display available models
|
|
186
|
+
models = PROVIDER_MODELS.get(provider, [])
|
|
187
|
+
typer.echo("\nAvailable models:")
|
|
188
|
+
for i, model in enumerate(models, 1):
|
|
189
|
+
typer.echo(f" {i}) {model}")
|
|
190
|
+
|
|
191
|
+
# Select model
|
|
192
|
+
while True:
|
|
193
|
+
choice = typer.prompt(f"\nSelect model [1-{len(models)}] or enter custom model name", type=str)
|
|
194
|
+
|
|
195
|
+
# Check if it's a number
|
|
196
|
+
try:
|
|
197
|
+
idx = int(choice)
|
|
198
|
+
if 1 <= idx <= len(models):
|
|
199
|
+
selected_model = models[idx - 1]
|
|
200
|
+
break
|
|
201
|
+
print_error(f"Invalid choice. Please enter a number between 1 and {len(models)}.")
|
|
202
|
+
except ValueError:
|
|
203
|
+
# Custom model name
|
|
204
|
+
selected_model = choice.strip()
|
|
205
|
+
if selected_model:
|
|
206
|
+
break
|
|
207
|
+
print_error("Model name cannot be empty!")
|
|
208
|
+
|
|
209
|
+
# Validate API key
|
|
210
|
+
typer.echo("\n⏳ Validating API key...")
|
|
211
|
+
is_valid, error_msg = config_manager.validate_api_key(provider, api_key, selected_model)
|
|
212
|
+
|
|
213
|
+
if not is_valid:
|
|
214
|
+
print_error(f"API key validation failed: {error_msg}")
|
|
215
|
+
print_info("Please check your API key and try again.")
|
|
216
|
+
raise typer.Exit(code=1)
|
|
217
|
+
|
|
218
|
+
print_success("API key validated successfully")
|
|
219
|
+
|
|
220
|
+
return provider, selected_model, api_key
|
|
221
|
+
|
|
222
|
+
|
|
223
|
+
def display_summary(provider: str, model: str, api_key: str = "", endpoint: str = ""):
|
|
224
|
+
"""Display configuration summary.
|
|
225
|
+
|
|
226
|
+
Args:
|
|
227
|
+
provider: Provider name
|
|
228
|
+
model: Model name
|
|
229
|
+
api_key: API key (masked for display)
|
|
230
|
+
endpoint: Endpoint URL
|
|
231
|
+
"""
|
|
232
|
+
typer.echo("\n" + typer.style("✅ Configuration Summary", bold=True, fg=typer.colors.GREEN))
|
|
233
|
+
typer.echo("━" * 50)
|
|
234
|
+
typer.echo(f"Provider: {typer.style(provider, fg=typer.colors.CYAN)}")
|
|
235
|
+
typer.echo(f"Model: {typer.style(model, fg=typer.colors.CYAN)}")
|
|
236
|
+
|
|
237
|
+
if api_key:
|
|
238
|
+
masked_key = api_key[:8] + "*" * (len(api_key) - 8)
|
|
239
|
+
typer.echo(f"API Key: {masked_key}")
|
|
240
|
+
|
|
241
|
+
if endpoint:
|
|
242
|
+
typer.echo(f"Endpoint: {endpoint}")
|
|
243
|
+
|
|
244
|
+
typer.echo("")
|
|
245
|
+
|
|
246
|
+
|
|
247
|
+
def setup():
|
|
248
|
+
"""Interactive setup wizard for LLM provider configuration."""
|
|
249
|
+
print_header()
|
|
250
|
+
|
|
251
|
+
# Select provider
|
|
252
|
+
provider = select_provider()
|
|
253
|
+
|
|
254
|
+
# Provider-specific setup
|
|
255
|
+
api_key = ""
|
|
256
|
+
endpoint = ""
|
|
257
|
+
|
|
258
|
+
if provider == "ollama":
|
|
259
|
+
provider, model, endpoint = setup_ollama()
|
|
260
|
+
elif provider == "openrouter":
|
|
261
|
+
provider, model, api_key = setup_cloud_provider(provider)
|
|
262
|
+
endpoint = "https://openrouter.ai/api/v1/chat/completions"
|
|
263
|
+
else:
|
|
264
|
+
provider, model, api_key = setup_cloud_provider(provider)
|
|
265
|
+
|
|
266
|
+
# Display summary
|
|
267
|
+
display_summary(provider, model, api_key, endpoint)
|
|
268
|
+
|
|
269
|
+
# Confirm save
|
|
270
|
+
save = typer.confirm(f"Save to {config_manager.CONFIG_FILE}?", default=True)
|
|
271
|
+
|
|
272
|
+
if not save:
|
|
273
|
+
print_info("Configuration not saved.")
|
|
274
|
+
raise typer.Exit(code=0)
|
|
275
|
+
|
|
276
|
+
# Save configuration
|
|
277
|
+
success = config_manager.save_config(provider, model, api_key, endpoint)
|
|
278
|
+
|
|
279
|
+
if success:
|
|
280
|
+
print_success(f"Configuration saved to {config_manager.CONFIG_FILE}")
|
|
281
|
+
print_info(f"You can now use 'cg' commands without specifying provider options!")
|
|
282
|
+
typer.echo("\nExample commands:")
|
|
283
|
+
typer.echo(" cg index ./my-project")
|
|
284
|
+
typer.echo(" cg search 'authentication logic'")
|
|
285
|
+
typer.echo(" cg impact main")
|
|
286
|
+
else:
|
|
287
|
+
print_error("Failed to save configuration!")
|
|
288
|
+
raise typer.Exit(code=1)
|
|
289
|
+
|
|
290
|
+
|
|
291
|
+
def set_llm(
|
|
292
|
+
provider: str = typer.Argument(..., help="LLM provider: ollama, groq, openai, anthropic, gemini, openrouter"),
|
|
293
|
+
model: str = typer.Option(None, "--model", "-m", help="Model name (uses provider default if not set)."),
|
|
294
|
+
api_key: str = typer.Option(None, "--api-key", "-k", help="API key for cloud providers."),
|
|
295
|
+
endpoint: str = typer.Option(None, "--endpoint", "-e", help="Custom endpoint URL."),
|
|
296
|
+
no_validate: bool = typer.Option(False, "--no-validate", help="Skip API key validation."),
|
|
297
|
+
):
|
|
298
|
+
"""Quickly switch LLM provider without full setup wizard.
|
|
299
|
+
|
|
300
|
+
Examples:
|
|
301
|
+
cg set-llm groq -k YOUR_API_KEY
|
|
302
|
+
cg set-llm gemini -k YOUR_API_KEY -m gemini-2.0-flash
|
|
303
|
+
cg set-llm openrouter -k YOUR_API_KEY -m google/gemini-2.0-flash-exp:free
|
|
304
|
+
cg set-llm ollama -m qwen2.5-coder:7b
|
|
305
|
+
"""
|
|
306
|
+
provider = provider.lower().strip()
|
|
307
|
+
|
|
308
|
+
if provider not in ALL_PROVIDERS:
|
|
309
|
+
print_error(f"Unknown provider '{provider}'. Choose from: {', '.join(ALL_PROVIDERS)}")
|
|
310
|
+
raise typer.Exit(code=1)
|
|
311
|
+
|
|
312
|
+
# Load current config as base
|
|
313
|
+
current = config_manager.load_config()
|
|
314
|
+
|
|
315
|
+
# Get defaults for the chosen provider
|
|
316
|
+
defaults = config_manager.get_provider_config(provider)
|
|
317
|
+
|
|
318
|
+
# Resolve model
|
|
319
|
+
resolved_model = model or defaults.get("model", "")
|
|
320
|
+
|
|
321
|
+
# Resolve endpoint
|
|
322
|
+
if provider == "openrouter" and not endpoint:
|
|
323
|
+
resolved_endpoint = "https://openrouter.ai/api/v1/chat/completions"
|
|
324
|
+
elif provider == "ollama" and not endpoint:
|
|
325
|
+
resolved_endpoint = defaults.get("endpoint", "http://127.0.0.1:11434/api/generate")
|
|
326
|
+
else:
|
|
327
|
+
resolved_endpoint = endpoint or ""
|
|
328
|
+
|
|
329
|
+
# Resolve API key
|
|
330
|
+
resolved_api_key = api_key or ""
|
|
331
|
+
|
|
332
|
+
# Cloud providers need an API key
|
|
333
|
+
if provider not in ("ollama",) and not resolved_api_key:
|
|
334
|
+
# Check if there's one in current config for same provider
|
|
335
|
+
if current.get("provider") == provider and current.get("api_key"):
|
|
336
|
+
resolved_api_key = current["api_key"]
|
|
337
|
+
print_info(f"Reusing existing API key for {provider}")
|
|
338
|
+
else:
|
|
339
|
+
resolved_api_key = typer.prompt(f"Enter your {provider} API key", hide_input=True)
|
|
340
|
+
|
|
341
|
+
# Validate if needed
|
|
342
|
+
if not no_validate and provider not in ("ollama",) and resolved_api_key:
|
|
343
|
+
typer.echo("⏳ Validating API key...")
|
|
344
|
+
is_valid, error_msg = config_manager.validate_api_key(provider, resolved_api_key, resolved_model)
|
|
345
|
+
if not is_valid:
|
|
346
|
+
print_error(f"Validation failed: {error_msg}")
|
|
347
|
+
force = typer.confirm("Save anyway?", default=False)
|
|
348
|
+
if not force:
|
|
349
|
+
raise typer.Exit(code=1)
|
|
350
|
+
elif provider == "ollama":
|
|
351
|
+
typer.echo("⏳ Checking Ollama connection...")
|
|
352
|
+
base_ep = resolved_endpoint.replace("/api/generate", "")
|
|
353
|
+
if not config_manager.validate_ollama_connection(base_ep):
|
|
354
|
+
print_error("Cannot connect to Ollama!")
|
|
355
|
+
force = typer.confirm("Save anyway?", default=False)
|
|
356
|
+
if not force:
|
|
357
|
+
raise typer.Exit(code=1)
|
|
358
|
+
|
|
359
|
+
# Save
|
|
360
|
+
success = config_manager.save_config(provider, resolved_model, resolved_api_key, resolved_endpoint)
|
|
361
|
+
|
|
362
|
+
if success:
|
|
363
|
+
print_success(f"LLM provider set to: {provider}")
|
|
364
|
+
typer.echo(f" Provider: {typer.style(provider, fg=typer.colors.CYAN)}")
|
|
365
|
+
typer.echo(f" Model: {typer.style(resolved_model, fg=typer.colors.CYAN)}")
|
|
366
|
+
if resolved_endpoint:
|
|
367
|
+
typer.echo(f" Endpoint: {resolved_endpoint}")
|
|
368
|
+
else:
|
|
369
|
+
print_error("Failed to save configuration!")
|
|
370
|
+
raise typer.Exit(code=1)
|
|
371
|
+
|
|
372
|
+
|
|
373
|
+
def unset_llm():
|
|
374
|
+
"""Reset LLM configuration to defaults (removes API keys and provider settings)."""
|
|
375
|
+
typer.echo("\n🔧 " + typer.style("Unset LLM Configuration", bold=True, fg=typer.colors.CYAN))
|
|
376
|
+
typer.echo("━" * 50)
|
|
377
|
+
|
|
378
|
+
if not config_manager.CONFIG_FILE.exists():
|
|
379
|
+
print_info("No LLM configuration found. Nothing to unset.")
|
|
380
|
+
raise typer.Exit(code=0)
|
|
381
|
+
|
|
382
|
+
# Show current config
|
|
383
|
+
current = config_manager.load_config()
|
|
384
|
+
typer.echo(f"\nCurrent provider: {typer.style(current.get('provider', 'none'), fg=typer.colors.YELLOW)}")
|
|
385
|
+
typer.echo(f"Current model: {typer.style(current.get('model', 'none'), fg=typer.colors.YELLOW)}")
|
|
386
|
+
if current.get('api_key'):
|
|
387
|
+
masked = current['api_key'][:8] + '****'
|
|
388
|
+
typer.echo(f"API key: {masked}")
|
|
389
|
+
|
|
390
|
+
typer.echo("")
|
|
391
|
+
typer.echo("Choose what to do:")
|
|
392
|
+
typer.echo(" 1) Reset to Ollama defaults (remove cloud keys)")
|
|
393
|
+
typer.echo(" 2) Delete entire config file")
|
|
394
|
+
typer.echo(" 3) Cancel")
|
|
395
|
+
|
|
396
|
+
choice = typer.prompt("\nEnter choice [1-3]", type=str)
|
|
397
|
+
|
|
398
|
+
if choice == "1":
|
|
399
|
+
success = config_manager.save_config("ollama", "qwen2.5-coder:7b", "", "http://127.0.0.1:11434/api/generate")
|
|
400
|
+
if success:
|
|
401
|
+
print_success("Configuration reset to Ollama defaults.")
|
|
402
|
+
print_info("API keys have been removed.")
|
|
403
|
+
else:
|
|
404
|
+
print_error("Failed to reset configuration!")
|
|
405
|
+
raise typer.Exit(code=1)
|
|
406
|
+
|
|
407
|
+
elif choice == "2":
|
|
408
|
+
confirm = typer.confirm("Are you sure you want to delete the config file?", default=False)
|
|
409
|
+
if confirm:
|
|
410
|
+
try:
|
|
411
|
+
config_manager.CONFIG_FILE.unlink()
|
|
412
|
+
print_success(f"Deleted {config_manager.CONFIG_FILE}")
|
|
413
|
+
print_info("Will use Ollama defaults on next run.")
|
|
414
|
+
except OSError as e:
|
|
415
|
+
print_error(f"Failed to delete config: {e}")
|
|
416
|
+
raise typer.Exit(code=1)
|
|
417
|
+
else:
|
|
418
|
+
print_info("Cancelled.")
|
|
419
|
+
|
|
420
|
+
else:
|
|
421
|
+
print_info("Cancelled.")
|
|
422
|
+
|
|
423
|
+
|
|
424
|
+
def show_llm():
|
|
425
|
+
"""Show current LLM provider configuration."""
|
|
426
|
+
typer.echo("")
|
|
427
|
+
typer.echo(typer.style("╭──────────────────────────────────────────────╮", fg=typer.colors.CYAN))
|
|
428
|
+
typer.echo(typer.style("│", fg=typer.colors.CYAN) + typer.style(" 🔍 LLM Configuration ", bold=True) + typer.style("│", fg=typer.colors.CYAN))
|
|
429
|
+
typer.echo(typer.style("╰──────────────────────────────────────────────╯", fg=typer.colors.CYAN))
|
|
430
|
+
|
|
431
|
+
exists = config_manager.CONFIG_FILE.exists()
|
|
432
|
+
cfg = config_manager.load_config()
|
|
433
|
+
|
|
434
|
+
provider = cfg.get("provider", "ollama")
|
|
435
|
+
model = cfg.get("model", "qwen2.5-coder:7b")
|
|
436
|
+
endpoint = cfg.get("endpoint", "")
|
|
437
|
+
api_key = cfg.get("api_key", "")
|
|
438
|
+
|
|
439
|
+
# Provider badge colors
|
|
440
|
+
provider_color = {
|
|
441
|
+
"ollama": typer.colors.GREEN,
|
|
442
|
+
"groq": typer.colors.YELLOW,
|
|
443
|
+
"openai": typer.colors.CYAN,
|
|
444
|
+
"anthropic": typer.colors.MAGENTA,
|
|
445
|
+
"gemini": typer.colors.BLUE,
|
|
446
|
+
"openrouter": typer.colors.BRIGHT_CYAN,
|
|
447
|
+
}.get(provider, typer.colors.WHITE)
|
|
448
|
+
|
|
449
|
+
typer.echo(f" Provider {typer.style(f' {provider.upper()} ', bg=provider_color, fg=typer.colors.WHITE, bold=True)}")
|
|
450
|
+
typer.echo(f" Model {typer.style(model, fg=typer.colors.WHITE, bold=True)}")
|
|
451
|
+
if endpoint:
|
|
452
|
+
typer.echo(f" Endpoint {typer.style(endpoint, dim=True)}")
|
|
453
|
+
if api_key:
|
|
454
|
+
masked = api_key[:8] + '•' * min(len(api_key) - 8, 16)
|
|
455
|
+
typer.echo(f" API Key {masked}")
|
|
456
|
+
else:
|
|
457
|
+
typer.echo(f" API Key {typer.style('(not set)', dim=True)}")
|
|
458
|
+
typer.echo(f" Config {typer.style(str(config_manager.CONFIG_FILE), dim=True)}")
|
|
459
|
+
|
|
460
|
+
typer.echo("")
|
|
461
|
+
typer.echo(typer.style(" Quick Commands", bold=True))
|
|
462
|
+
typer.echo(typer.style(" ─────────────────────────────────────────", dim=True))
|
|
463
|
+
typer.echo(f" {typer.style('cg setup', fg=typer.colors.YELLOW)} Full interactive wizard")
|
|
464
|
+
typer.echo(f" {typer.style('cg set-llm <name>', fg=typer.colors.YELLOW)} Quick switch provider")
|
|
465
|
+
typer.echo(f" {typer.style('cg unset-llm', fg=typer.colors.YELLOW)} Reset / clear config")
|
|
466
|
+
typer.echo("")
|
|
467
|
+
|
|
468
|
+
|
|
469
|
+
if __name__ == "__main__":
|
|
470
|
+
app()
|
|
@@ -0,0 +1,177 @@
|
|
|
1
|
+
"""CLI commands for test generation."""
|
|
2
|
+
|
|
3
|
+
from __future__ import annotations
|
|
4
|
+
|
|
5
|
+
from pathlib import Path
|
|
6
|
+
from typing import Optional
|
|
7
|
+
|
|
8
|
+
import typer
|
|
9
|
+
|
|
10
|
+
from . import config
|
|
11
|
+
from .llm import LocalLLM
|
|
12
|
+
from .storage import GraphStore, ProjectManager
|
|
13
|
+
from .testgen_agent import TestGenAgent
|
|
14
|
+
|
|
15
|
+
# Create sub-app for test generation
|
|
16
|
+
test_app = typer.Typer(help="Generate tests from code graph")
|
|
17
|
+
|
|
18
|
+
|
|
19
|
+
def _get_testgen_agent(pm: ProjectManager) -> TestGenAgent:
|
|
20
|
+
"""Get TestGenAgent with current project context."""
|
|
21
|
+
project = pm.get_current_project()
|
|
22
|
+
if not project:
|
|
23
|
+
raise typer.BadParameter("No project loaded. Use 'cg load-project <name>' or run 'cg index <path>'.")
|
|
24
|
+
|
|
25
|
+
project_dir = pm.project_dir(project)
|
|
26
|
+
if not project_dir.exists():
|
|
27
|
+
raise typer.BadParameter(f"Loaded project '{project}' does not exist in memory.")
|
|
28
|
+
|
|
29
|
+
store = GraphStore(project_dir)
|
|
30
|
+
|
|
31
|
+
# Create LLM if available
|
|
32
|
+
llm = None
|
|
33
|
+
if config.LLM_PROVIDER and config.LLM_MODEL:
|
|
34
|
+
llm = LocalLLM(
|
|
35
|
+
model=config.LLM_MODEL,
|
|
36
|
+
provider=config.LLM_PROVIDER,
|
|
37
|
+
api_key=config.LLM_API_KEY
|
|
38
|
+
)
|
|
39
|
+
|
|
40
|
+
return TestGenAgent(store, llm)
|
|
41
|
+
|
|
42
|
+
|
|
43
|
+
@test_app.command("unit")
|
|
44
|
+
def generate_unit_tests(
|
|
45
|
+
symbol: str = typer.Argument(..., help="Function name to generate tests for"),
|
|
46
|
+
output: Optional[str] = typer.Option(None, "--output", "-o", help="Output test file path"),
|
|
47
|
+
):
|
|
48
|
+
"""Generate unit tests for a function."""
|
|
49
|
+
pm = ProjectManager()
|
|
50
|
+
agent = _get_testgen_agent(pm)
|
|
51
|
+
|
|
52
|
+
typer.echo(f"🧪 Generating unit tests for '{symbol}'...")
|
|
53
|
+
|
|
54
|
+
try:
|
|
55
|
+
tests = agent.generate_unit_tests(symbol)
|
|
56
|
+
except ValueError as e:
|
|
57
|
+
typer.echo(f"❌ Error: {e}")
|
|
58
|
+
raise typer.Exit(1)
|
|
59
|
+
|
|
60
|
+
if not tests:
|
|
61
|
+
typer.echo("❌ No tests generated")
|
|
62
|
+
return
|
|
63
|
+
|
|
64
|
+
typer.echo(f"\n✅ Generated {len(tests)} test(s):\n")
|
|
65
|
+
|
|
66
|
+
# Show tests
|
|
67
|
+
all_test_code = []
|
|
68
|
+
for test in tests:
|
|
69
|
+
typer.echo(f"📝 {test.name}")
|
|
70
|
+
typer.echo(f" {test.description}")
|
|
71
|
+
typer.echo("")
|
|
72
|
+
all_test_code.append(test.test_code)
|
|
73
|
+
|
|
74
|
+
# Show coverage impact
|
|
75
|
+
coverage = agent.analyze_coverage_impact(tests)
|
|
76
|
+
typer.echo(f"📊 Coverage Impact:")
|
|
77
|
+
typer.echo(f" Estimated increase: +{coverage['coverage_increase']:.1f}%")
|
|
78
|
+
typer.echo(f" Functions covered: {coverage['functions_covered']}")
|
|
79
|
+
typer.echo("")
|
|
80
|
+
|
|
81
|
+
# Show full test code
|
|
82
|
+
typer.echo("="*60)
|
|
83
|
+
typer.echo("GENERATED TEST CODE")
|
|
84
|
+
typer.echo("="*60)
|
|
85
|
+
full_code = "\n\n".join(all_test_code)
|
|
86
|
+
typer.echo(full_code)
|
|
87
|
+
typer.echo("")
|
|
88
|
+
|
|
89
|
+
# Write to file if requested
|
|
90
|
+
if output:
|
|
91
|
+
output_path = Path(output)
|
|
92
|
+
output_path.parent.mkdir(parents=True, exist_ok=True)
|
|
93
|
+
|
|
94
|
+
# Add imports
|
|
95
|
+
test_file_content = "import pytest\n\n"
|
|
96
|
+
test_file_content += full_code
|
|
97
|
+
|
|
98
|
+
output_path.write_text(test_file_content)
|
|
99
|
+
typer.echo(f"✅ Tests written to {output}")
|
|
100
|
+
else:
|
|
101
|
+
typer.echo("💡 Use --output to save tests to a file")
|
|
102
|
+
|
|
103
|
+
|
|
104
|
+
@test_app.command("integration")
|
|
105
|
+
def generate_integration_tests(
|
|
106
|
+
flow: str = typer.Argument(..., help="User flow description"),
|
|
107
|
+
output: Optional[str] = typer.Option(None, "--output", "-o", help="Output test file path"),
|
|
108
|
+
):
|
|
109
|
+
"""Generate integration tests for a user flow."""
|
|
110
|
+
pm = ProjectManager()
|
|
111
|
+
agent = _get_testgen_agent(pm)
|
|
112
|
+
|
|
113
|
+
typer.echo(f"🧪 Generating integration test for '{flow}'...")
|
|
114
|
+
|
|
115
|
+
tests = agent.generate_integration_tests(flow)
|
|
116
|
+
|
|
117
|
+
if not tests:
|
|
118
|
+
typer.echo("❌ No tests generated")
|
|
119
|
+
return
|
|
120
|
+
|
|
121
|
+
typer.echo(f"\n✅ Generated {len(tests)} test(s):\n")
|
|
122
|
+
|
|
123
|
+
# Show tests
|
|
124
|
+
all_test_code = []
|
|
125
|
+
for test in tests:
|
|
126
|
+
typer.echo(f"📝 {test.name}")
|
|
127
|
+
typer.echo(f" {test.description}")
|
|
128
|
+
typer.echo("")
|
|
129
|
+
all_test_code.append(test.test_code)
|
|
130
|
+
|
|
131
|
+
# Show full test code
|
|
132
|
+
typer.echo("="*60)
|
|
133
|
+
typer.echo("GENERATED TEST CODE")
|
|
134
|
+
typer.echo("="*60)
|
|
135
|
+
full_code = "\n\n".join(all_test_code)
|
|
136
|
+
typer.echo(full_code)
|
|
137
|
+
typer.echo("")
|
|
138
|
+
|
|
139
|
+
# Write to file if requested
|
|
140
|
+
if output:
|
|
141
|
+
output_path = Path(output)
|
|
142
|
+
output_path.parent.mkdir(parents=True, exist_ok=True)
|
|
143
|
+
|
|
144
|
+
# Add imports
|
|
145
|
+
test_file_content = "import pytest\n\n"
|
|
146
|
+
test_file_content += full_code
|
|
147
|
+
|
|
148
|
+
output_path.write_text(test_file_content)
|
|
149
|
+
typer.echo(f"✅ Tests written to {output}")
|
|
150
|
+
else:
|
|
151
|
+
typer.echo("💡 Use --output to save tests to a file")
|
|
152
|
+
|
|
153
|
+
|
|
154
|
+
@test_app.command("coverage")
|
|
155
|
+
def show_coverage_prediction(
|
|
156
|
+
symbol: str = typer.Argument(..., help="Function to analyze"),
|
|
157
|
+
):
|
|
158
|
+
"""Show predicted coverage impact of generating tests."""
|
|
159
|
+
pm = ProjectManager()
|
|
160
|
+
agent = _get_testgen_agent(pm)
|
|
161
|
+
|
|
162
|
+
typer.echo(f"📊 Analyzing coverage impact for '{symbol}'...")
|
|
163
|
+
|
|
164
|
+
try:
|
|
165
|
+
tests = agent.generate_unit_tests(symbol)
|
|
166
|
+
coverage = agent.analyze_coverage_impact(tests)
|
|
167
|
+
|
|
168
|
+
typer.echo(f"\n📈 Coverage Analysis:")
|
|
169
|
+
typer.echo(f" Current coverage: {coverage['current_coverage']:.1f}%")
|
|
170
|
+
typer.echo(f" Estimated after tests: {coverage['estimated_coverage']:.1f}%")
|
|
171
|
+
typer.echo(f" Increase: +{coverage['coverage_increase']:.1f}%")
|
|
172
|
+
typer.echo(f" Tests to generate: {coverage['tests_generated']}")
|
|
173
|
+
typer.echo(f" Functions covered: {coverage['functions_covered']}")
|
|
174
|
+
|
|
175
|
+
except ValueError as e:
|
|
176
|
+
typer.echo(f"❌ Error: {e}")
|
|
177
|
+
raise typer.Exit(1)
|