codegraph-cli 2.0.0__py3-none-any.whl

This diff represents the content of publicly available package versions that have been released to one of the supported registries. The information contained in this diff is provided for informational purposes only and reflects changes between package versions as they appear in their respective public registries.
Files changed (43) hide show
  1. codegraph_cli/__init__.py +4 -0
  2. codegraph_cli/agents.py +191 -0
  3. codegraph_cli/bug_detector.py +386 -0
  4. codegraph_cli/chat_agent.py +352 -0
  5. codegraph_cli/chat_session.py +220 -0
  6. codegraph_cli/cli.py +330 -0
  7. codegraph_cli/cli_chat.py +367 -0
  8. codegraph_cli/cli_diagnose.py +133 -0
  9. codegraph_cli/cli_refactor.py +230 -0
  10. codegraph_cli/cli_setup.py +470 -0
  11. codegraph_cli/cli_test.py +177 -0
  12. codegraph_cli/cli_v2.py +267 -0
  13. codegraph_cli/codegen_agent.py +265 -0
  14. codegraph_cli/config.py +31 -0
  15. codegraph_cli/config_manager.py +341 -0
  16. codegraph_cli/context_manager.py +500 -0
  17. codegraph_cli/crew_agents.py +123 -0
  18. codegraph_cli/crew_chat.py +159 -0
  19. codegraph_cli/crew_tools.py +497 -0
  20. codegraph_cli/diff_engine.py +265 -0
  21. codegraph_cli/embeddings.py +241 -0
  22. codegraph_cli/graph_export.py +144 -0
  23. codegraph_cli/llm.py +642 -0
  24. codegraph_cli/models.py +47 -0
  25. codegraph_cli/models_v2.py +185 -0
  26. codegraph_cli/orchestrator.py +49 -0
  27. codegraph_cli/parser.py +800 -0
  28. codegraph_cli/performance_analyzer.py +223 -0
  29. codegraph_cli/project_context.py +230 -0
  30. codegraph_cli/rag.py +200 -0
  31. codegraph_cli/refactor_agent.py +452 -0
  32. codegraph_cli/security_scanner.py +366 -0
  33. codegraph_cli/storage.py +390 -0
  34. codegraph_cli/templates/graph_interactive.html +257 -0
  35. codegraph_cli/testgen_agent.py +316 -0
  36. codegraph_cli/validation_engine.py +285 -0
  37. codegraph_cli/vector_store.py +293 -0
  38. codegraph_cli-2.0.0.dist-info/METADATA +318 -0
  39. codegraph_cli-2.0.0.dist-info/RECORD +43 -0
  40. codegraph_cli-2.0.0.dist-info/WHEEL +5 -0
  41. codegraph_cli-2.0.0.dist-info/entry_points.txt +2 -0
  42. codegraph_cli-2.0.0.dist-info/licenses/LICENSE +21 -0
  43. codegraph_cli-2.0.0.dist-info/top_level.txt +1 -0
codegraph_cli/llm.py ADDED
@@ -0,0 +1,642 @@
1
+ """Multi-provider LLM adapter supporting Ollama, Groq, OpenAI, and Anthropic."""
2
+
3
+ from __future__ import annotations
4
+
5
+ import json
6
+ import os
7
+ import urllib.error
8
+ import urllib.request
9
+ from typing import Dict, List, Optional
10
+
11
+ from .config import LLM_API_KEY, LLM_ENDPOINT, LLM_MODEL, LLM_PROVIDER
12
+
13
+
14
+ class LLMProvider:
15
+ """Base class for LLM providers."""
16
+
17
+ def generate(self, prompt: str) -> Optional[str]:
18
+ """Generate a response from the LLM."""
19
+ raise NotImplementedError
20
+
21
+
22
+ class OllamaProvider(LLMProvider):
23
+ """Ollama local LLM provider."""
24
+
25
+ def __init__(self, model: str, endpoint: str):
26
+ self.model = model
27
+ self.endpoint = endpoint
28
+
29
+ def generate(self, prompt: str) -> Optional[str]:
30
+ payload = json.dumps({
31
+ "model": self.model,
32
+ "prompt": prompt,
33
+ "stream": False,
34
+ "options": {"temperature": 0.1},
35
+ }).encode("utf-8")
36
+
37
+ req = urllib.request.Request(
38
+ self.endpoint,
39
+ data=payload,
40
+ headers={"Content-Type": "application/json"},
41
+ method="POST",
42
+ )
43
+
44
+ try:
45
+ with urllib.request.urlopen(req, timeout=30) as resp:
46
+ body = resp.read().decode("utf-8")
47
+ parsed = json.loads(body)
48
+ return parsed.get("response")
49
+ except (urllib.error.URLError, TimeoutError, json.JSONDecodeError):
50
+ return None
51
+
52
+
53
+ class GroqProvider(LLMProvider):
54
+ """Groq cloud API provider using curl (workaround for Python urllib timeout)."""
55
+
56
+ def __init__(self, model: str, api_key: str):
57
+ self.model = model
58
+ self.api_key = api_key
59
+ self.endpoint = "https://api.groq.com/openai/v1/chat/completions"
60
+
61
+ def generate(self, prompt: str) -> Optional[str]:
62
+ if not self.api_key:
63
+ return None
64
+
65
+ try:
66
+ import subprocess
67
+ import tempfile
68
+
69
+ # Create JSON payload
70
+ payload = json.dumps({
71
+ "model": self.model,
72
+ "messages": [{"role": "user", "content": prompt}],
73
+ "temperature": 0.1,
74
+ "max_tokens": 1024,
75
+ })
76
+
77
+ # Use curl (which works!) instead of Python HTTP libraries
78
+ result = subprocess.run(
79
+ [
80
+ "curl", "-s", "-X", "POST",
81
+ self.endpoint,
82
+ "-H", "Content-Type: application/json",
83
+ "-H", f"Authorization: Bearer {self.api_key}",
84
+ "-d", payload,
85
+ "--max-time", "15"
86
+ ],
87
+ capture_output=True,
88
+ text=True,
89
+ timeout=20
90
+ )
91
+
92
+ if result.returncode == 0 and result.stdout:
93
+ response = json.loads(result.stdout)
94
+ return response["choices"][0]["message"]["content"]
95
+ return None
96
+ except Exception:
97
+ return None
98
+
99
+
100
+ class OpenAIProvider(LLMProvider):
101
+ """OpenAI API provider (also works with OpenRouter and other OpenAI-compatible APIs)."""
102
+
103
+ def __init__(self, model: str, api_key: str, endpoint: str = "https://api.openai.com/v1/chat/completions"):
104
+ self.model = model
105
+ self.api_key = api_key
106
+ self.endpoint = endpoint
107
+
108
+ def generate(self, prompt: str) -> Optional[str]:
109
+ if not self.api_key:
110
+ return None
111
+
112
+ payload = json.dumps({
113
+ "model": self.model,
114
+ "messages": [{"role": "user", "content": prompt}],
115
+ "temperature": 0.1,
116
+ "max_tokens": 1024,
117
+ }).encode("utf-8")
118
+
119
+ req = urllib.request.Request(
120
+ self.endpoint,
121
+ data=payload,
122
+ headers={
123
+ "Content-Type": "application/json",
124
+ "Authorization": f"Bearer {self.api_key}",
125
+ },
126
+ method="POST",
127
+ )
128
+
129
+ try:
130
+ with urllib.request.urlopen(req, timeout=30) as resp:
131
+ body = resp.read().decode("utf-8")
132
+ parsed = json.loads(body)
133
+ return parsed["choices"][0]["message"]["content"]
134
+ except (urllib.error.URLError, TimeoutError, json.JSONDecodeError, KeyError):
135
+ return None
136
+
137
+
138
+ class AnthropicProvider(LLMProvider):
139
+ """Anthropic Claude API provider."""
140
+
141
+ def __init__(self, model: str, api_key: str):
142
+ self.model = model
143
+ self.api_key = api_key
144
+ self.endpoint = "https://api.anthropic.com/v1/messages"
145
+
146
+ def generate(self, prompt: str) -> Optional[str]:
147
+ if not self.api_key:
148
+ return None
149
+
150
+ payload = json.dumps({
151
+ "model": self.model,
152
+ "messages": [{"role": "user", "content": prompt}],
153
+ "max_tokens": 1024,
154
+ "temperature": 0.1,
155
+ }).encode("utf-8")
156
+
157
+ req = urllib.request.Request(
158
+ self.endpoint,
159
+ data=payload,
160
+ headers={
161
+ "Content-Type": "application/json",
162
+ "x-api-key": self.api_key,
163
+ "anthropic-version": "2023-06-01",
164
+ },
165
+ method="POST",
166
+ )
167
+
168
+ try:
169
+ with urllib.request.urlopen(req, timeout=30) as resp:
170
+ body = resp.read().decode("utf-8")
171
+ parsed = json.loads(body)
172
+ return parsed["content"][0]["text"]
173
+ except (urllib.error.URLError, TimeoutError, json.JSONDecodeError, KeyError):
174
+ return None
175
+
176
+
177
+ class GeminiProvider(LLMProvider):
178
+ """Google Gemini API provider."""
179
+
180
+ def __init__(self, model: str, api_key: str):
181
+ self.model = model
182
+ self.api_key = api_key
183
+ self.endpoint = f"https://generativelanguage.googleapis.com/v1beta/models/{model}:generateContent"
184
+
185
+ def generate(self, prompt: str) -> Optional[str]:
186
+ if not self.api_key:
187
+ return None
188
+
189
+ payload = json.dumps({
190
+ "contents": [{"parts": [{"text": prompt}]}],
191
+ "generationConfig": {
192
+ "temperature": 0.1,
193
+ "maxOutputTokens": 1024,
194
+ },
195
+ }).encode("utf-8")
196
+
197
+ url = f"{self.endpoint}?key={self.api_key}"
198
+
199
+ req = urllib.request.Request(
200
+ url,
201
+ data=payload,
202
+ headers={"Content-Type": "application/json"},
203
+ method="POST",
204
+ )
205
+
206
+ try:
207
+ with urllib.request.urlopen(req, timeout=30) as resp:
208
+ body = resp.read().decode("utf-8")
209
+ parsed = json.loads(body)
210
+ return parsed["candidates"][0]["content"]["parts"][0]["text"]
211
+ except (urllib.error.URLError, TimeoutError, json.JSONDecodeError, KeyError, IndexError):
212
+ return None
213
+
214
+
215
+ class OpenRouterProvider(LLMProvider):
216
+ """OpenRouter API provider (OpenAI-compatible, multi-model gateway)."""
217
+
218
+ def __init__(self, model: str, api_key: str, endpoint: str = "https://openrouter.ai/api/v1/chat/completions"):
219
+ self.model = model
220
+ self.api_key = api_key
221
+ self.endpoint = endpoint
222
+
223
+ def generate(self, prompt: str) -> Optional[str]:
224
+ if not self.api_key:
225
+ return None
226
+
227
+ payload = json.dumps({
228
+ "model": self.model,
229
+ "messages": [{"role": "user", "content": prompt}],
230
+ "temperature": 0.1,
231
+ "max_tokens": 4096,
232
+ }).encode("utf-8")
233
+
234
+ req = urllib.request.Request(
235
+ self.endpoint,
236
+ data=payload,
237
+ headers={
238
+ "Content-Type": "application/json",
239
+ "Authorization": f"Bearer {self.api_key}",
240
+ },
241
+ method="POST",
242
+ )
243
+
244
+ try:
245
+ with urllib.request.urlopen(req, timeout=60) as resp:
246
+ body = resp.read().decode("utf-8")
247
+ parsed = json.loads(body)
248
+ return self._extract_response(parsed)
249
+ except (urllib.error.URLError, TimeoutError, json.JSONDecodeError, KeyError):
250
+ return None
251
+
252
+ @staticmethod
253
+ def _extract_response(parsed: dict) -> Optional[str]:
254
+ """Extract response text, handling reasoning models that return empty content."""
255
+ msg = parsed["choices"][0]["message"]
256
+ content = msg.get("content") or ""
257
+ if content.strip():
258
+ return content
259
+ # Reasoning models put output in 'reasoning' field
260
+ reasoning = msg.get("reasoning") or ""
261
+ if reasoning.strip():
262
+ return reasoning
263
+ # Check reasoning_details array
264
+ for detail in msg.get("reasoning_details") or []:
265
+ if isinstance(detail, dict) and detail.get("text", "").strip():
266
+ return detail["text"]
267
+ return content or None
268
+
269
+
270
+ class LocalLLM:
271
+ """Multi-provider LLM manager with automatic fallback."""
272
+
273
+ def __init__(
274
+ self,
275
+ model: Optional[str] = None,
276
+ provider: Optional[str] = None,
277
+ api_key: Optional[str] = None,
278
+ endpoint: Optional[str] = None,
279
+ ):
280
+ """Initialize LLM with provider selection.
281
+
282
+ Args:
283
+ model: Model name (defaults to config or "qwen2.5-coder:7b")
284
+ provider: Provider name: "ollama", "groq", "openai", "anthropic" (defaults to config)
285
+ api_key: API key for cloud providers (defaults to config)
286
+ endpoint: Custom endpoint for Ollama (defaults to config)
287
+ """
288
+ self.provider_name = provider or LLM_PROVIDER
289
+ self.model = model or LLM_MODEL
290
+ self.api_key = api_key or LLM_API_KEY
291
+ self.endpoint = endpoint or LLM_ENDPOINT
292
+
293
+ self.provider = self._create_provider()
294
+
295
+ def _create_provider(self) -> LLMProvider:
296
+ """Create the appropriate provider based on configuration."""
297
+ provider_name = self.provider_name.lower()
298
+
299
+ if provider_name == "groq":
300
+ # Default Groq models: llama-3.3-70b-versatile, mixtral-8x7b-32768
301
+ model = self.model if self.model != "qwen2.5-coder:7b" else "llama-3.3-70b-versatile"
302
+ return GroqProvider(model, self.api_key)
303
+
304
+ elif provider_name == "openai":
305
+ # Default OpenAI models: gpt-4, gpt-3.5-turbo
306
+ # Also supports OpenRouter and other OpenAI-compatible APIs via custom endpoint
307
+ model = self.model if self.model != "qwen2.5-coder:7b" else "gpt-4"
308
+ endpoint = self.endpoint if self.endpoint else "https://api.openai.com/v1/chat/completions"
309
+ return OpenAIProvider(model, self.api_key, endpoint)
310
+
311
+ elif provider_name == "anthropic":
312
+ # Default Anthropic models: claude-3-5-sonnet-20241022, claude-3-opus-20240229
313
+ model = self.model if self.model != "qwen2.5-coder:7b" else "claude-3-5-sonnet-20241022"
314
+ return AnthropicProvider(model, self.api_key)
315
+
316
+ elif provider_name == "gemini":
317
+ # Google Gemini models: gemini-2.0-flash, gemini-1.5-pro, etc.
318
+ model = self.model if self.model != "qwen2.5-coder:7b" else "gemini-2.0-flash"
319
+ return GeminiProvider(model, self.api_key)
320
+
321
+ elif provider_name == "openrouter":
322
+ # OpenRouter: multi-model gateway with OpenAI-compatible API
323
+ model = self.model if self.model != "qwen2.5-coder:7b" else "google/gemini-2.0-flash-exp:free"
324
+ endpoint = self.endpoint if self.endpoint else "https://openrouter.ai/api/v1/chat/completions"
325
+ return OpenRouterProvider(model, self.api_key, endpoint)
326
+
327
+ else: # Default to Ollama
328
+ return OllamaProvider(self.model, self.endpoint)
329
+
330
+ def explain(self, prompt: str) -> str:
331
+ """Generate explanation using configured provider with fallback."""
332
+ response = self.provider.generate(prompt)
333
+ if response:
334
+ return response
335
+ return self._fallback(prompt)
336
+
337
+ def chat_completion(
338
+ self,
339
+ messages: List[Dict[str, str]],
340
+ max_tokens: int = 2000,
341
+ temperature: float = 0.7
342
+ ) -> Optional[str]:
343
+ """Generate response for multi-turn chat conversation.
344
+
345
+ Args:
346
+ messages: List of message dicts with 'role' and 'content'
347
+ max_tokens: Maximum tokens in response
348
+ temperature: Sampling temperature
349
+
350
+ Returns:
351
+ Assistant response or None if failed
352
+ """
353
+ provider_name = self.provider_name.lower()
354
+
355
+ try:
356
+ if provider_name == "groq":
357
+ return self._chat_groq(messages, max_tokens, temperature)
358
+ elif provider_name == "openai":
359
+ return self._chat_openai(messages, max_tokens, temperature)
360
+ elif provider_name == "anthropic":
361
+ return self._chat_anthropic(messages, max_tokens, temperature)
362
+ elif provider_name == "gemini":
363
+ return self._chat_gemini(messages, max_tokens, temperature)
364
+ elif provider_name == "openrouter":
365
+ return self._chat_openrouter(messages, max_tokens, temperature)
366
+ else: # Ollama
367
+ # Ollama doesn't support chat format, convert to single prompt
368
+ prompt = self._messages_to_prompt(messages)
369
+ return self.provider.generate(prompt)
370
+ except Exception:
371
+ return None
372
+
373
+ def _chat_groq(self, messages: List[Dict], max_tokens: int, temperature: float) -> Optional[str]:
374
+ """Chat completion for Groq."""
375
+ import subprocess
376
+
377
+ payload = json.dumps({
378
+ "model": self.model,
379
+ "messages": messages,
380
+ "temperature": temperature,
381
+ "max_tokens": max_tokens,
382
+ })
383
+
384
+ result = subprocess.run(
385
+ [
386
+ "curl", "-s", "-X", "POST",
387
+ self.provider.endpoint,
388
+ "-H", "Content-Type: application/json",
389
+ "-H", f"Authorization: Bearer {self.api_key}",
390
+ "-d", payload,
391
+ "--max-time", "30"
392
+ ],
393
+ capture_output=True,
394
+ text=True,
395
+ timeout=35
396
+ )
397
+
398
+ if result.returncode == 0 and result.stdout:
399
+ response = json.loads(result.stdout)
400
+ return response["choices"][0]["message"]["content"]
401
+ return None
402
+
403
+ def _chat_openai(self, messages: List[Dict], max_tokens: int, temperature: float) -> Optional[str]:
404
+ """Chat completion for OpenAI."""
405
+ payload = json.dumps({
406
+ "model": self.model,
407
+ "messages": messages,
408
+ "temperature": temperature,
409
+ "max_tokens": max_tokens,
410
+ }).encode("utf-8")
411
+
412
+ req = urllib.request.Request(
413
+ self.provider.endpoint,
414
+ data=payload,
415
+ headers={
416
+ "Content-Type": "application/json",
417
+ "Authorization": f"Bearer {self.api_key}",
418
+ },
419
+ method="POST",
420
+ )
421
+
422
+ with urllib.request.urlopen(req, timeout=30) as resp:
423
+ body = resp.read().decode("utf-8")
424
+ parsed = json.loads(body)
425
+ return parsed["choices"][0]["message"]["content"]
426
+
427
+ def _chat_anthropic(self, messages: List[Dict], max_tokens: int, temperature: float) -> Optional[str]:
428
+ """Chat completion for Anthropic."""
429
+ payload = json.dumps({
430
+ "model": self.model,
431
+ "messages": messages,
432
+ "max_tokens": max_tokens,
433
+ "temperature": temperature,
434
+ }).encode("utf-8")
435
+
436
+ req = urllib.request.Request(
437
+ self.provider.endpoint,
438
+ data=payload,
439
+ headers={
440
+ "Content-Type": "application/json",
441
+ "x-api-key": self.api_key,
442
+ "anthropic-version": "2023-06-01",
443
+ },
444
+ method="POST",
445
+ )
446
+
447
+ with urllib.request.urlopen(req, timeout=30) as resp:
448
+ body = resp.read().decode("utf-8")
449
+ parsed = json.loads(body)
450
+ return parsed["content"][0]["text"]
451
+
452
+ def _chat_gemini(self, messages: List[Dict], max_tokens: int, temperature: float) -> Optional[str]:
453
+ """Chat completion for Gemini."""
454
+ # Convert messages to Gemini format
455
+ contents = []
456
+ system_instruction = None
457
+ for msg in messages:
458
+ role = msg["role"]
459
+ if role == "system":
460
+ system_instruction = msg["content"]
461
+ elif role == "user":
462
+ contents.append({"role": "user", "parts": [{"text": msg["content"]}]})
463
+ elif role == "assistant":
464
+ contents.append({"role": "model", "parts": [{"text": msg["content"]}]})
465
+
466
+ body: dict = {
467
+ "contents": contents,
468
+ "generationConfig": {
469
+ "temperature": temperature,
470
+ "maxOutputTokens": max_tokens,
471
+ },
472
+ }
473
+ if system_instruction:
474
+ body["systemInstruction"] = {"parts": [{"text": system_instruction}]}
475
+
476
+ payload = json.dumps(body).encode("utf-8")
477
+ url = f"https://generativelanguage.googleapis.com/v1beta/models/{self.model}:generateContent?key={self.api_key}"
478
+
479
+ req = urllib.request.Request(
480
+ url,
481
+ data=payload,
482
+ headers={"Content-Type": "application/json"},
483
+ method="POST",
484
+ )
485
+
486
+ with urllib.request.urlopen(req, timeout=30) as resp:
487
+ body_resp = resp.read().decode("utf-8")
488
+ parsed = json.loads(body_resp)
489
+ return parsed["candidates"][0]["content"]["parts"][0]["text"]
490
+
491
+ def _chat_openrouter(self, messages: List[Dict], max_tokens: int, temperature: float) -> Optional[str]:
492
+ """Chat completion for OpenRouter (OpenAI-compatible)."""
493
+ payload = json.dumps({
494
+ "model": self.model,
495
+ "messages": messages,
496
+ "temperature": temperature,
497
+ "max_tokens": max_tokens,
498
+ }).encode("utf-8")
499
+
500
+ req = urllib.request.Request(
501
+ self.provider.endpoint,
502
+ data=payload,
503
+ headers={
504
+ "Content-Type": "application/json",
505
+ "Authorization": f"Bearer {self.api_key}",
506
+ },
507
+ method="POST",
508
+ )
509
+
510
+ with urllib.request.urlopen(req, timeout=60) as resp:
511
+ body_resp = resp.read().decode("utf-8")
512
+ parsed = json.loads(body_resp)
513
+ return OpenRouterProvider._extract_response(parsed)
514
+
515
+ def _messages_to_prompt(self, messages: List[Dict]) -> str:
516
+ """Convert chat messages to single prompt for Ollama."""
517
+ parts = []
518
+ for msg in messages:
519
+ role = msg["role"]
520
+ content = msg["content"]
521
+ if role == "system":
522
+ parts.append(f"System: {content}")
523
+ elif role == "user":
524
+ parts.append(f"User: {content}")
525
+ elif role == "assistant":
526
+ parts.append(f"Assistant: {content}")
527
+ return "\n\n".join(parts)
528
+
529
+ def _fallback(self, prompt: str) -> str:
530
+ """Deterministic fallback when LLM is unavailable."""
531
+ head = prompt[:600].strip().replace("\n", " ")
532
+ provider_msg = f"LLM provider '{self.provider_name}' was unavailable"
533
+
534
+ return (
535
+ f"{provider_msg}; returning a deterministic fallback summary.\n"
536
+ "Context excerpt:\n"
537
+ f"{head}\n\n"
538
+ "Recommendation:\n"
539
+ "- Inspect the listed call/dependency chain\n"
540
+ "- Run unit tests around impacted functions\n"
541
+ "- Validate side effects at integration boundaries"
542
+ )
543
+
544
+
545
+ def create_crewai_llm(local_llm: LocalLLM):
546
+ """Create CrewAI-compatible LLM from LocalLLM configuration.
547
+
548
+ This factory function creates native CrewAI LLM instances based on the
549
+ configured provider. CrewAI has its own LLM handling and doesn't work
550
+ with custom adapter objects.
551
+
552
+ Args:
553
+ local_llm: LocalLLM instance with provider configuration
554
+
555
+ Returns:
556
+ CrewAI-compatible LLM instance
557
+
558
+ Raises:
559
+ ImportError: If crewai package is not installed
560
+ """
561
+ try:
562
+ from crewai import LLM
563
+ except ImportError:
564
+ raise ImportError(
565
+ "CrewAI is required for multi-agent chat. "
566
+ "Install it with: pip install crewai"
567
+ )
568
+
569
+ provider = local_llm.provider_name.lower()
570
+
571
+ if provider == "ollama":
572
+ # CrewAI supports Ollama with custom base_url
573
+ # Remove /api/generate suffix if present
574
+ base_url = local_llm.endpoint
575
+ if base_url.endswith("/api/generate"):
576
+ base_url = base_url.replace("/api/generate", "")
577
+
578
+ return LLM(
579
+ model=f"ollama/{local_llm.model}",
580
+ base_url=base_url,
581
+ )
582
+
583
+ elif provider == "groq":
584
+ # Groq uses OpenAI-compatible API
585
+ return LLM(
586
+ model=f"groq/{local_llm.model}",
587
+ api_key=local_llm.api_key,
588
+ )
589
+
590
+ elif provider == "openai":
591
+ # OpenAI or OpenAI-compatible APIs (OpenRouter, etc.)
592
+ # Check if using OpenRouter
593
+ if local_llm.endpoint and "openrouter.ai" in local_llm.endpoint:
594
+ # OpenRouter - use special prefix for LiteLLM routing
595
+ return LLM(
596
+ model=f"openrouter/{local_llm.model}",
597
+ api_key=local_llm.api_key,
598
+ )
599
+ elif local_llm.endpoint and local_llm.endpoint != "https://api.openai.com/v1/chat/completions":
600
+ # Other custom endpoint - use base_url
601
+ return LLM(
602
+ model=local_llm.model,
603
+ api_key=local_llm.api_key,
604
+ base_url=local_llm.endpoint.replace("/chat/completions", "")
605
+ )
606
+ else:
607
+ # Standard OpenAI
608
+ return LLM(
609
+ model=f"openai/{local_llm.model}",
610
+ api_key=local_llm.api_key,
611
+ )
612
+
613
+ elif provider == "anthropic":
614
+ return LLM(
615
+ model=f"anthropic/{local_llm.model}",
616
+ api_key=local_llm.api_key,
617
+ )
618
+
619
+ elif provider == "gemini":
620
+ # CrewAI supports Gemini via LiteLLM
621
+ return LLM(
622
+ model=f"gemini/{local_llm.model}",
623
+ api_key=local_llm.api_key,
624
+ )
625
+
626
+ elif provider == "openrouter":
627
+ # OpenRouter uses OpenAI-compatible API
628
+ return LLM(
629
+ model=f"openrouter/{local_llm.model}",
630
+ api_key=local_llm.api_key,
631
+ )
632
+
633
+ else:
634
+ # Fallback to Ollama for unknown providers
635
+ base_url = local_llm.endpoint
636
+ if base_url.endswith("/api/generate"):
637
+ base_url = base_url.replace("/api/generate", "")
638
+
639
+ return LLM(
640
+ model=f"ollama/{local_llm.model}",
641
+ base_url=base_url,
642
+ )
@@ -0,0 +1,47 @@
1
+ """Core data models used by indexing, retrieval, and orchestration layers."""
2
+
3
+ from __future__ import annotations
4
+
5
+ from dataclasses import dataclass, field
6
+ from typing import Dict, List
7
+
8
+
9
+ @dataclass
10
+ class Node:
11
+ node_id: str
12
+ node_type: str
13
+ name: str
14
+ qualname: str
15
+ file_path: str
16
+ start_line: int
17
+ end_line: int
18
+ code: str
19
+ docstring: str = ""
20
+ metadata: Dict[str, str] = field(default_factory=dict)
21
+
22
+
23
+ @dataclass
24
+ class Edge:
25
+ src: str
26
+ dst: str
27
+ edge_type: str
28
+
29
+
30
+ @dataclass
31
+ class SearchResult:
32
+ node_id: str
33
+ score: float
34
+ node_type: str
35
+ qualname: str
36
+ file_path: str
37
+ start_line: int
38
+ end_line: int
39
+ snippet: str
40
+
41
+
42
+ @dataclass
43
+ class ImpactReport:
44
+ root: str
45
+ impacted: List[str]
46
+ explanation: str
47
+ ascii_graph: str