codegraph-cli 2.0.0__py3-none-any.whl
This diff represents the content of publicly available package versions that have been released to one of the supported registries. The information contained in this diff is provided for informational purposes only and reflects changes between package versions as they appear in their respective public registries.
- codegraph_cli/__init__.py +4 -0
- codegraph_cli/agents.py +191 -0
- codegraph_cli/bug_detector.py +386 -0
- codegraph_cli/chat_agent.py +352 -0
- codegraph_cli/chat_session.py +220 -0
- codegraph_cli/cli.py +330 -0
- codegraph_cli/cli_chat.py +367 -0
- codegraph_cli/cli_diagnose.py +133 -0
- codegraph_cli/cli_refactor.py +230 -0
- codegraph_cli/cli_setup.py +470 -0
- codegraph_cli/cli_test.py +177 -0
- codegraph_cli/cli_v2.py +267 -0
- codegraph_cli/codegen_agent.py +265 -0
- codegraph_cli/config.py +31 -0
- codegraph_cli/config_manager.py +341 -0
- codegraph_cli/context_manager.py +500 -0
- codegraph_cli/crew_agents.py +123 -0
- codegraph_cli/crew_chat.py +159 -0
- codegraph_cli/crew_tools.py +497 -0
- codegraph_cli/diff_engine.py +265 -0
- codegraph_cli/embeddings.py +241 -0
- codegraph_cli/graph_export.py +144 -0
- codegraph_cli/llm.py +642 -0
- codegraph_cli/models.py +47 -0
- codegraph_cli/models_v2.py +185 -0
- codegraph_cli/orchestrator.py +49 -0
- codegraph_cli/parser.py +800 -0
- codegraph_cli/performance_analyzer.py +223 -0
- codegraph_cli/project_context.py +230 -0
- codegraph_cli/rag.py +200 -0
- codegraph_cli/refactor_agent.py +452 -0
- codegraph_cli/security_scanner.py +366 -0
- codegraph_cli/storage.py +390 -0
- codegraph_cli/templates/graph_interactive.html +257 -0
- codegraph_cli/testgen_agent.py +316 -0
- codegraph_cli/validation_engine.py +285 -0
- codegraph_cli/vector_store.py +293 -0
- codegraph_cli-2.0.0.dist-info/METADATA +318 -0
- codegraph_cli-2.0.0.dist-info/RECORD +43 -0
- codegraph_cli-2.0.0.dist-info/WHEEL +5 -0
- codegraph_cli-2.0.0.dist-info/entry_points.txt +2 -0
- codegraph_cli-2.0.0.dist-info/licenses/LICENSE +21 -0
- codegraph_cli-2.0.0.dist-info/top_level.txt +1 -0
|
@@ -0,0 +1,341 @@
|
|
|
1
|
+
"""Configuration manager for CodeGraph CLI using TOML files."""
|
|
2
|
+
|
|
3
|
+
from __future__ import annotations
|
|
4
|
+
|
|
5
|
+
import json
|
|
6
|
+
import os
|
|
7
|
+
import urllib.error
|
|
8
|
+
import urllib.request
|
|
9
|
+
from pathlib import Path
|
|
10
|
+
from typing import Any, Dict, Optional
|
|
11
|
+
|
|
12
|
+
try:
|
|
13
|
+
import toml
|
|
14
|
+
except ImportError:
|
|
15
|
+
toml = None # type: ignore
|
|
16
|
+
|
|
17
|
+
from .config import BASE_DIR
|
|
18
|
+
|
|
19
|
+
|
|
20
|
+
CONFIG_FILE = BASE_DIR / "config.toml"
|
|
21
|
+
|
|
22
|
+
|
|
23
|
+
# Default configurations for each provider
|
|
24
|
+
DEFAULT_CONFIGS = {
|
|
25
|
+
"ollama": {
|
|
26
|
+
"provider": "ollama",
|
|
27
|
+
"model": "qwen2.5-coder:7b",
|
|
28
|
+
"endpoint": "http://127.0.0.1:11434/api/generate",
|
|
29
|
+
},
|
|
30
|
+
"groq": {
|
|
31
|
+
"provider": "groq",
|
|
32
|
+
"model": "llama-3.3-70b-versatile",
|
|
33
|
+
"api_key": "",
|
|
34
|
+
},
|
|
35
|
+
"openai": {
|
|
36
|
+
"provider": "openai",
|
|
37
|
+
"model": "gpt-4",
|
|
38
|
+
"api_key": "",
|
|
39
|
+
},
|
|
40
|
+
"anthropic": {
|
|
41
|
+
"provider": "anthropic",
|
|
42
|
+
"model": "claude-3-5-sonnet-20241022",
|
|
43
|
+
"api_key": "",
|
|
44
|
+
},
|
|
45
|
+
"gemini": {
|
|
46
|
+
"provider": "gemini",
|
|
47
|
+
"model": "gemini-2.0-flash",
|
|
48
|
+
"api_key": "",
|
|
49
|
+
},
|
|
50
|
+
"openrouter": {
|
|
51
|
+
"provider": "openrouter",
|
|
52
|
+
"model": "google/gemini-2.0-flash-exp:free",
|
|
53
|
+
"api_key": "",
|
|
54
|
+
"endpoint": "https://openrouter.ai/api/v1/chat/completions",
|
|
55
|
+
},
|
|
56
|
+
}
|
|
57
|
+
|
|
58
|
+
|
|
59
|
+
def load_config() -> Dict[str, Any]:
|
|
60
|
+
"""Load configuration from TOML file.
|
|
61
|
+
|
|
62
|
+
Returns:
|
|
63
|
+
Configuration dictionary with provider settings.
|
|
64
|
+
Falls back to Ollama defaults if file doesn't exist.
|
|
65
|
+
"""
|
|
66
|
+
if not CONFIG_FILE.exists():
|
|
67
|
+
return DEFAULT_CONFIGS["ollama"].copy()
|
|
68
|
+
|
|
69
|
+
if toml is None:
|
|
70
|
+
# Fallback if toml not installed
|
|
71
|
+
return DEFAULT_CONFIGS["ollama"].copy()
|
|
72
|
+
|
|
73
|
+
try:
|
|
74
|
+
with open(CONFIG_FILE, "r") as f:
|
|
75
|
+
config = toml.load(f)
|
|
76
|
+
return config.get("llm", DEFAULT_CONFIGS["ollama"].copy())
|
|
77
|
+
except Exception:
|
|
78
|
+
return DEFAULT_CONFIGS["ollama"].copy()
|
|
79
|
+
|
|
80
|
+
|
|
81
|
+
def save_config(provider: str, model: str, api_key: str = "", endpoint: str = "") -> bool:
|
|
82
|
+
"""Save LLM configuration to TOML file.
|
|
83
|
+
|
|
84
|
+
Args:
|
|
85
|
+
provider: Provider name (ollama, groq, openai, anthropic)
|
|
86
|
+
model: Model name
|
|
87
|
+
api_key: API key for cloud providers
|
|
88
|
+
endpoint: Custom endpoint (for Ollama)
|
|
89
|
+
|
|
90
|
+
Returns:
|
|
91
|
+
True if saved successfully, False otherwise
|
|
92
|
+
"""
|
|
93
|
+
if toml is None:
|
|
94
|
+
return False
|
|
95
|
+
|
|
96
|
+
# Ensure directory exists
|
|
97
|
+
BASE_DIR.mkdir(parents=True, exist_ok=True)
|
|
98
|
+
|
|
99
|
+
# Build config
|
|
100
|
+
config = {
|
|
101
|
+
"llm": {
|
|
102
|
+
"provider": provider,
|
|
103
|
+
"model": model,
|
|
104
|
+
}
|
|
105
|
+
}
|
|
106
|
+
|
|
107
|
+
if api_key:
|
|
108
|
+
config["llm"]["api_key"] = api_key
|
|
109
|
+
|
|
110
|
+
if endpoint:
|
|
111
|
+
config["llm"]["endpoint"] = endpoint
|
|
112
|
+
|
|
113
|
+
try:
|
|
114
|
+
with open(CONFIG_FILE, "w") as f:
|
|
115
|
+
toml.dump(config, f)
|
|
116
|
+
return True
|
|
117
|
+
except Exception:
|
|
118
|
+
return False
|
|
119
|
+
|
|
120
|
+
|
|
121
|
+
def get_provider_config(provider: str) -> Dict[str, Any]:
|
|
122
|
+
"""Get default configuration for a specific provider.
|
|
123
|
+
|
|
124
|
+
Args:
|
|
125
|
+
provider: Provider name
|
|
126
|
+
|
|
127
|
+
Returns:
|
|
128
|
+
Default configuration dictionary
|
|
129
|
+
"""
|
|
130
|
+
return DEFAULT_CONFIGS.get(provider, DEFAULT_CONFIGS["ollama"]).copy()
|
|
131
|
+
|
|
132
|
+
|
|
133
|
+
def validate_ollama_connection(endpoint: str = "http://127.0.0.1:11434") -> bool:
|
|
134
|
+
"""Check if Ollama is running and accessible.
|
|
135
|
+
|
|
136
|
+
Args:
|
|
137
|
+
endpoint: Ollama endpoint URL
|
|
138
|
+
|
|
139
|
+
Returns:
|
|
140
|
+
True if Ollama is accessible, False otherwise
|
|
141
|
+
"""
|
|
142
|
+
try:
|
|
143
|
+
req = urllib.request.Request(f"{endpoint}/api/tags", method="GET")
|
|
144
|
+
with urllib.request.urlopen(req, timeout=3) as resp:
|
|
145
|
+
return resp.status == 200
|
|
146
|
+
except (urllib.error.URLError, TimeoutError):
|
|
147
|
+
return False
|
|
148
|
+
|
|
149
|
+
|
|
150
|
+
def get_ollama_models(endpoint: str = "http://127.0.0.1:11434") -> list[str]:
|
|
151
|
+
"""Fetch available models from Ollama.
|
|
152
|
+
|
|
153
|
+
Args:
|
|
154
|
+
endpoint: Ollama endpoint URL
|
|
155
|
+
|
|
156
|
+
Returns:
|
|
157
|
+
List of available model names
|
|
158
|
+
"""
|
|
159
|
+
try:
|
|
160
|
+
req = urllib.request.Request(f"{endpoint}/api/tags", method="GET")
|
|
161
|
+
with urllib.request.urlopen(req, timeout=5) as resp:
|
|
162
|
+
data = json.loads(resp.read().decode("utf-8"))
|
|
163
|
+
models = data.get("models", [])
|
|
164
|
+
return [model["name"] for model in models]
|
|
165
|
+
except (urllib.error.URLError, TimeoutError, json.JSONDecodeError, KeyError):
|
|
166
|
+
return []
|
|
167
|
+
|
|
168
|
+
|
|
169
|
+
def validate_api_key(provider: str, api_key: str, model: str) -> tuple[bool, str]:
|
|
170
|
+
"""Validate API key by making a test request.
|
|
171
|
+
|
|
172
|
+
Args:
|
|
173
|
+
provider: Provider name (groq, openai, anthropic)
|
|
174
|
+
api_key: API key to validate
|
|
175
|
+
model: Model name to test
|
|
176
|
+
|
|
177
|
+
Returns:
|
|
178
|
+
Tuple of (is_valid, error_message)
|
|
179
|
+
"""
|
|
180
|
+
if not api_key:
|
|
181
|
+
return False, "API key is required"
|
|
182
|
+
|
|
183
|
+
try:
|
|
184
|
+
if provider == "groq":
|
|
185
|
+
return _validate_groq(api_key, model)
|
|
186
|
+
elif provider == "openai":
|
|
187
|
+
return _validate_openai(api_key, model)
|
|
188
|
+
elif provider == "anthropic":
|
|
189
|
+
return _validate_anthropic(api_key, model)
|
|
190
|
+
elif provider == "gemini":
|
|
191
|
+
return _validate_gemini(api_key, model)
|
|
192
|
+
elif provider == "openrouter":
|
|
193
|
+
return _validate_openrouter(api_key, model)
|
|
194
|
+
else:
|
|
195
|
+
return False, f"Unknown provider: {provider}"
|
|
196
|
+
except Exception as e:
|
|
197
|
+
return False, f"Validation error: {str(e)}"
|
|
198
|
+
|
|
199
|
+
|
|
200
|
+
def _validate_groq(api_key: str, model: str) -> tuple[bool, str]:
|
|
201
|
+
"""Validate Groq API key."""
|
|
202
|
+
import subprocess
|
|
203
|
+
|
|
204
|
+
payload = json.dumps({
|
|
205
|
+
"model": model,
|
|
206
|
+
"messages": [{"role": "user", "content": "test"}],
|
|
207
|
+
"max_tokens": 5,
|
|
208
|
+
})
|
|
209
|
+
|
|
210
|
+
try:
|
|
211
|
+
result = subprocess.run(
|
|
212
|
+
[
|
|
213
|
+
"curl", "-s", "-X", "POST",
|
|
214
|
+
"https://api.groq.com/openai/v1/chat/completions",
|
|
215
|
+
"-H", "Content-Type: application/json",
|
|
216
|
+
"-H", f"Authorization: Bearer {api_key}",
|
|
217
|
+
"-d", payload,
|
|
218
|
+
"--max-time", "10"
|
|
219
|
+
],
|
|
220
|
+
capture_output=True,
|
|
221
|
+
text=True,
|
|
222
|
+
timeout=15
|
|
223
|
+
)
|
|
224
|
+
|
|
225
|
+
if result.returncode == 0 and result.stdout:
|
|
226
|
+
response = json.loads(result.stdout)
|
|
227
|
+
if "error" in response:
|
|
228
|
+
return False, response["error"].get("message", "Invalid API key")
|
|
229
|
+
return True, "Valid"
|
|
230
|
+
return False, "Connection failed"
|
|
231
|
+
except Exception as e:
|
|
232
|
+
return False, str(e)
|
|
233
|
+
|
|
234
|
+
|
|
235
|
+
def _validate_openai(api_key: str, model: str) -> tuple[bool, str]:
|
|
236
|
+
"""Validate OpenAI API key."""
|
|
237
|
+
payload = json.dumps({
|
|
238
|
+
"model": model,
|
|
239
|
+
"messages": [{"role": "user", "content": "test"}],
|
|
240
|
+
"max_tokens": 5,
|
|
241
|
+
}).encode("utf-8")
|
|
242
|
+
|
|
243
|
+
req = urllib.request.Request(
|
|
244
|
+
"https://api.openai.com/v1/chat/completions",
|
|
245
|
+
data=payload,
|
|
246
|
+
headers={
|
|
247
|
+
"Content-Type": "application/json",
|
|
248
|
+
"Authorization": f"Bearer {api_key}",
|
|
249
|
+
},
|
|
250
|
+
method="POST",
|
|
251
|
+
)
|
|
252
|
+
|
|
253
|
+
try:
|
|
254
|
+
with urllib.request.urlopen(req, timeout=10) as resp:
|
|
255
|
+
return True, "Valid"
|
|
256
|
+
except urllib.error.HTTPError as e:
|
|
257
|
+
if e.code == 401:
|
|
258
|
+
return False, "Invalid API key"
|
|
259
|
+
return False, f"HTTP error: {e.code}"
|
|
260
|
+
except Exception as e:
|
|
261
|
+
return False, str(e)
|
|
262
|
+
|
|
263
|
+
|
|
264
|
+
def _validate_anthropic(api_key: str, model: str) -> tuple[bool, str]:
|
|
265
|
+
"""Validate Anthropic API key."""
|
|
266
|
+
payload = json.dumps({
|
|
267
|
+
"model": model,
|
|
268
|
+
"messages": [{"role": "user", "content": "test"}],
|
|
269
|
+
"max_tokens": 5,
|
|
270
|
+
}).encode("utf-8")
|
|
271
|
+
|
|
272
|
+
req = urllib.request.Request(
|
|
273
|
+
"https://api.anthropic.com/v1/messages",
|
|
274
|
+
data=payload,
|
|
275
|
+
headers={
|
|
276
|
+
"Content-Type": "application/json",
|
|
277
|
+
"x-api-key": api_key,
|
|
278
|
+
"anthropic-version": "2023-06-01",
|
|
279
|
+
},
|
|
280
|
+
method="POST",
|
|
281
|
+
)
|
|
282
|
+
|
|
283
|
+
try:
|
|
284
|
+
with urllib.request.urlopen(req, timeout=10) as resp:
|
|
285
|
+
return True, "Valid"
|
|
286
|
+
except urllib.error.HTTPError as e:
|
|
287
|
+
if e.code == 401:
|
|
288
|
+
return False, "Invalid API key"
|
|
289
|
+
return False, f"HTTP error: {e.code}"
|
|
290
|
+
except Exception as e:
|
|
291
|
+
return False, str(e)
|
|
292
|
+
|
|
293
|
+
|
|
294
|
+
def _validate_gemini(api_key: str, model: str) -> tuple[bool, str]:
|
|
295
|
+
"""Validate Gemini API key."""
|
|
296
|
+
# Use the list models endpoint for validation
|
|
297
|
+
url = f"https://generativelanguage.googleapis.com/v1beta/models?key={api_key}"
|
|
298
|
+
|
|
299
|
+
req = urllib.request.Request(url, method="GET")
|
|
300
|
+
|
|
301
|
+
try:
|
|
302
|
+
with urllib.request.urlopen(req, timeout=10) as resp:
|
|
303
|
+
return True, "Valid"
|
|
304
|
+
except urllib.error.HTTPError as e:
|
|
305
|
+
if e.code in (401, 403):
|
|
306
|
+
return False, "Invalid API key"
|
|
307
|
+
return False, f"HTTP error: {e.code}"
|
|
308
|
+
except Exception as e:
|
|
309
|
+
return False, str(e)
|
|
310
|
+
|
|
311
|
+
|
|
312
|
+
def _validate_openrouter(api_key: str, model: str) -> tuple[bool, str]:
|
|
313
|
+
"""Validate OpenRouter API key."""
|
|
314
|
+
payload = json.dumps({
|
|
315
|
+
"model": model,
|
|
316
|
+
"messages": [{"role": "user", "content": "test"}],
|
|
317
|
+
"max_tokens": 5,
|
|
318
|
+
}).encode("utf-8")
|
|
319
|
+
|
|
320
|
+
req = urllib.request.Request(
|
|
321
|
+
"https://openrouter.ai/api/v1/chat/completions",
|
|
322
|
+
data=payload,
|
|
323
|
+
headers={
|
|
324
|
+
"Content-Type": "application/json",
|
|
325
|
+
"Authorization": f"Bearer {api_key}",
|
|
326
|
+
},
|
|
327
|
+
method="POST",
|
|
328
|
+
)
|
|
329
|
+
|
|
330
|
+
try:
|
|
331
|
+
with urllib.request.urlopen(req, timeout=10) as resp:
|
|
332
|
+
return True, "Valid"
|
|
333
|
+
except urllib.error.HTTPError as e:
|
|
334
|
+
if e.code == 401:
|
|
335
|
+
return False, "Invalid API key"
|
|
336
|
+
# 402 means valid key but no credits - still a valid key
|
|
337
|
+
if e.code == 402:
|
|
338
|
+
return True, "Valid (no credits)"
|
|
339
|
+
return False, f"HTTP error: {e.code}"
|
|
340
|
+
except Exception as e:
|
|
341
|
+
return False, str(e)
|