cognify-code 0.2.0__py3-none-any.whl
This diff represents the content of publicly available package versions that have been released to one of the supported registries. The information contained in this diff is provided for informational purposes only and reflects changes between package versions as they appear in their respective public registries.
- ai_code_assistant/__init__.py +14 -0
- ai_code_assistant/agent/__init__.py +63 -0
- ai_code_assistant/agent/code_agent.py +461 -0
- ai_code_assistant/agent/code_generator.py +388 -0
- ai_code_assistant/agent/code_reviewer.py +365 -0
- ai_code_assistant/agent/diff_engine.py +308 -0
- ai_code_assistant/agent/file_manager.py +300 -0
- ai_code_assistant/agent/intent_classifier.py +284 -0
- ai_code_assistant/chat/__init__.py +11 -0
- ai_code_assistant/chat/agent_session.py +156 -0
- ai_code_assistant/chat/session.py +165 -0
- ai_code_assistant/cli.py +1571 -0
- ai_code_assistant/config.py +149 -0
- ai_code_assistant/editor/__init__.py +8 -0
- ai_code_assistant/editor/diff_handler.py +270 -0
- ai_code_assistant/editor/file_editor.py +350 -0
- ai_code_assistant/editor/prompts.py +146 -0
- ai_code_assistant/generator/__init__.py +7 -0
- ai_code_assistant/generator/code_gen.py +265 -0
- ai_code_assistant/generator/prompts.py +114 -0
- ai_code_assistant/git/__init__.py +6 -0
- ai_code_assistant/git/commit_generator.py +130 -0
- ai_code_assistant/git/manager.py +203 -0
- ai_code_assistant/llm.py +111 -0
- ai_code_assistant/providers/__init__.py +23 -0
- ai_code_assistant/providers/base.py +124 -0
- ai_code_assistant/providers/cerebras.py +97 -0
- ai_code_assistant/providers/factory.py +148 -0
- ai_code_assistant/providers/google.py +103 -0
- ai_code_assistant/providers/groq.py +111 -0
- ai_code_assistant/providers/ollama.py +86 -0
- ai_code_assistant/providers/openai.py +114 -0
- ai_code_assistant/providers/openrouter.py +130 -0
- ai_code_assistant/py.typed +0 -0
- ai_code_assistant/refactor/__init__.py +20 -0
- ai_code_assistant/refactor/analyzer.py +189 -0
- ai_code_assistant/refactor/change_plan.py +172 -0
- ai_code_assistant/refactor/multi_file_editor.py +346 -0
- ai_code_assistant/refactor/prompts.py +175 -0
- ai_code_assistant/retrieval/__init__.py +19 -0
- ai_code_assistant/retrieval/chunker.py +215 -0
- ai_code_assistant/retrieval/indexer.py +236 -0
- ai_code_assistant/retrieval/search.py +239 -0
- ai_code_assistant/reviewer/__init__.py +7 -0
- ai_code_assistant/reviewer/analyzer.py +278 -0
- ai_code_assistant/reviewer/prompts.py +113 -0
- ai_code_assistant/utils/__init__.py +18 -0
- ai_code_assistant/utils/file_handler.py +155 -0
- ai_code_assistant/utils/formatters.py +259 -0
- cognify_code-0.2.0.dist-info/METADATA +383 -0
- cognify_code-0.2.0.dist-info/RECORD +55 -0
- cognify_code-0.2.0.dist-info/WHEEL +5 -0
- cognify_code-0.2.0.dist-info/entry_points.txt +3 -0
- cognify_code-0.2.0.dist-info/licenses/LICENSE +22 -0
- cognify_code-0.2.0.dist-info/top_level.txt +1 -0
|
@@ -0,0 +1,148 @@
|
|
|
1
|
+
"""Factory for creating LLM providers."""
|
|
2
|
+
|
|
3
|
+
from typing import Dict, List, Optional, Type
|
|
4
|
+
|
|
5
|
+
from ai_code_assistant.providers.base import (
|
|
6
|
+
BaseProvider,
|
|
7
|
+
ModelInfo,
|
|
8
|
+
ProviderConfig,
|
|
9
|
+
ProviderType,
|
|
10
|
+
)
|
|
11
|
+
from ai_code_assistant.providers.ollama import OllamaProvider
|
|
12
|
+
from ai_code_assistant.providers.google import GoogleProvider
|
|
13
|
+
from ai_code_assistant.providers.groq import GroqProvider
|
|
14
|
+
from ai_code_assistant.providers.cerebras import CerebrasProvider
|
|
15
|
+
from ai_code_assistant.providers.openrouter import OpenRouterProvider
|
|
16
|
+
from ai_code_assistant.providers.openai import OpenAIProvider
|
|
17
|
+
|
|
18
|
+
|
|
19
|
+
# Registry of all available providers
|
|
20
|
+
PROVIDER_REGISTRY: Dict[ProviderType, Type[BaseProvider]] = {
|
|
21
|
+
ProviderType.OLLAMA: OllamaProvider,
|
|
22
|
+
ProviderType.GOOGLE: GoogleProvider,
|
|
23
|
+
ProviderType.GROQ: GroqProvider,
|
|
24
|
+
ProviderType.CEREBRAS: CerebrasProvider,
|
|
25
|
+
ProviderType.OPENROUTER: OpenRouterProvider,
|
|
26
|
+
ProviderType.OPENAI: OpenAIProvider,
|
|
27
|
+
}
|
|
28
|
+
|
|
29
|
+
|
|
30
|
+
def get_provider(config: ProviderConfig) -> BaseProvider:
|
|
31
|
+
"""
|
|
32
|
+
Create a provider instance based on configuration.
|
|
33
|
+
|
|
34
|
+
Args:
|
|
35
|
+
config: Provider configuration
|
|
36
|
+
|
|
37
|
+
Returns:
|
|
38
|
+
Configured provider instance
|
|
39
|
+
|
|
40
|
+
Raises:
|
|
41
|
+
ValueError: If provider type is not supported
|
|
42
|
+
"""
|
|
43
|
+
provider_class = PROVIDER_REGISTRY.get(config.provider)
|
|
44
|
+
if not provider_class:
|
|
45
|
+
supported = ", ".join(p.value for p in ProviderType)
|
|
46
|
+
raise ValueError(
|
|
47
|
+
f"Unsupported provider: {config.provider}. "
|
|
48
|
+
f"Supported providers: {supported}"
|
|
49
|
+
)
|
|
50
|
+
|
|
51
|
+
return provider_class(config)
|
|
52
|
+
|
|
53
|
+
|
|
54
|
+
def get_available_providers() -> Dict[str, Dict]:
|
|
55
|
+
"""
|
|
56
|
+
Get information about all available providers.
|
|
57
|
+
|
|
58
|
+
Returns:
|
|
59
|
+
Dictionary with provider info including name, models, and setup instructions
|
|
60
|
+
"""
|
|
61
|
+
providers = {}
|
|
62
|
+
for provider_type, provider_class in PROVIDER_REGISTRY.items():
|
|
63
|
+
providers[provider_type.value] = {
|
|
64
|
+
"display_name": provider_class.display_name,
|
|
65
|
+
"requires_api_key": provider_class.requires_api_key,
|
|
66
|
+
"free_tier": provider_class.free_tier,
|
|
67
|
+
"default_model": provider_class.default_model,
|
|
68
|
+
"models": [m.model_dump() for m in provider_class.available_models],
|
|
69
|
+
"setup_instructions": provider_class.get_setup_instructions(),
|
|
70
|
+
}
|
|
71
|
+
return providers
|
|
72
|
+
|
|
73
|
+
|
|
74
|
+
def get_all_models() -> List[ModelInfo]:
|
|
75
|
+
"""
|
|
76
|
+
Get all available models across all providers.
|
|
77
|
+
|
|
78
|
+
Returns:
|
|
79
|
+
List of all available models
|
|
80
|
+
"""
|
|
81
|
+
models = []
|
|
82
|
+
for provider_class in PROVIDER_REGISTRY.values():
|
|
83
|
+
models.extend(provider_class.available_models)
|
|
84
|
+
return models
|
|
85
|
+
|
|
86
|
+
|
|
87
|
+
def get_free_models() -> List[ModelInfo]:
|
|
88
|
+
"""
|
|
89
|
+
Get all free models across all providers.
|
|
90
|
+
|
|
91
|
+
Returns:
|
|
92
|
+
List of free models
|
|
93
|
+
"""
|
|
94
|
+
return [m for m in get_all_models() if m.is_free]
|
|
95
|
+
|
|
96
|
+
|
|
97
|
+
def get_provider_for_model(model_name: str) -> Optional[ProviderType]:
|
|
98
|
+
"""
|
|
99
|
+
Find which provider a model belongs to.
|
|
100
|
+
|
|
101
|
+
Args:
|
|
102
|
+
model_name: Name of the model
|
|
103
|
+
|
|
104
|
+
Returns:
|
|
105
|
+
Provider type or None if not found
|
|
106
|
+
"""
|
|
107
|
+
for provider_class in PROVIDER_REGISTRY.values():
|
|
108
|
+
for model in provider_class.available_models:
|
|
109
|
+
if model.name == model_name:
|
|
110
|
+
return provider_class.provider_type
|
|
111
|
+
return None
|
|
112
|
+
|
|
113
|
+
|
|
114
|
+
def create_provider_from_env() -> BaseProvider:
|
|
115
|
+
"""
|
|
116
|
+
Create a provider based on available environment variables.
|
|
117
|
+
|
|
118
|
+
Checks for API keys in order of preference and creates the appropriate provider.
|
|
119
|
+
|
|
120
|
+
Returns:
|
|
121
|
+
Configured provider instance
|
|
122
|
+
"""
|
|
123
|
+
import os
|
|
124
|
+
|
|
125
|
+
# Check providers in order of preference (free tiers first)
|
|
126
|
+
env_checks = [
|
|
127
|
+
("GROQ_API_KEY", ProviderType.GROQ, GroqProvider.default_model),
|
|
128
|
+
("GOOGLE_API_KEY", ProviderType.GOOGLE, GoogleProvider.default_model),
|
|
129
|
+
("CEREBRAS_API_KEY", ProviderType.CEREBRAS, CerebrasProvider.default_model),
|
|
130
|
+
("OPENROUTER_API_KEY", ProviderType.OPENROUTER, OpenRouterProvider.default_model),
|
|
131
|
+
("OPENAI_API_KEY", ProviderType.OPENAI, OpenAIProvider.default_model),
|
|
132
|
+
]
|
|
133
|
+
|
|
134
|
+
for env_var, provider_type, default_model in env_checks:
|
|
135
|
+
if os.getenv(env_var):
|
|
136
|
+
config = ProviderConfig(
|
|
137
|
+
provider=provider_type,
|
|
138
|
+
model=default_model,
|
|
139
|
+
api_key=os.getenv(env_var),
|
|
140
|
+
)
|
|
141
|
+
return get_provider(config)
|
|
142
|
+
|
|
143
|
+
# Default to Ollama (no API key required)
|
|
144
|
+
config = ProviderConfig(
|
|
145
|
+
provider=ProviderType.OLLAMA,
|
|
146
|
+
model=OllamaProvider.default_model,
|
|
147
|
+
)
|
|
148
|
+
return get_provider(config)
|
|
@@ -0,0 +1,103 @@
|
|
|
1
|
+
"""Google AI Studio (Gemini) provider."""
|
|
2
|
+
|
|
3
|
+
import os
|
|
4
|
+
from langchain_core.language_models import BaseChatModel
|
|
5
|
+
|
|
6
|
+
from ai_code_assistant.providers.base import (
|
|
7
|
+
BaseProvider,
|
|
8
|
+
ModelInfo,
|
|
9
|
+
ProviderConfig,
|
|
10
|
+
ProviderType,
|
|
11
|
+
)
|
|
12
|
+
|
|
13
|
+
|
|
14
|
+
class GoogleProvider(BaseProvider):
|
|
15
|
+
"""Provider for Google AI Studio / Gemini."""
|
|
16
|
+
|
|
17
|
+
provider_type = ProviderType.GOOGLE
|
|
18
|
+
display_name = "Google AI Studio (Gemini)"
|
|
19
|
+
requires_api_key = True
|
|
20
|
+
default_model = "gemini-2.0-flash"
|
|
21
|
+
free_tier = True
|
|
22
|
+
|
|
23
|
+
available_models = [
|
|
24
|
+
ModelInfo(
|
|
25
|
+
name="gemini-2.0-flash",
|
|
26
|
+
provider=ProviderType.GOOGLE,
|
|
27
|
+
description="Gemini 2.0 Flash - Fast and capable, great free tier",
|
|
28
|
+
context_window=1000000,
|
|
29
|
+
is_free=True,
|
|
30
|
+
),
|
|
31
|
+
ModelInfo(
|
|
32
|
+
name="gemini-2.0-flash-lite",
|
|
33
|
+
provider=ProviderType.GOOGLE,
|
|
34
|
+
description="Gemini 2.0 Flash Lite - Faster, lighter version",
|
|
35
|
+
context_window=1000000,
|
|
36
|
+
is_free=True,
|
|
37
|
+
),
|
|
38
|
+
ModelInfo(
|
|
39
|
+
name="gemini-1.5-pro",
|
|
40
|
+
provider=ProviderType.GOOGLE,
|
|
41
|
+
description="Gemini 1.5 Pro - Most capable Gemini model",
|
|
42
|
+
context_window=2000000,
|
|
43
|
+
is_free=True,
|
|
44
|
+
),
|
|
45
|
+
ModelInfo(
|
|
46
|
+
name="gemini-1.5-flash",
|
|
47
|
+
provider=ProviderType.GOOGLE,
|
|
48
|
+
description="Gemini 1.5 Flash - Fast and efficient",
|
|
49
|
+
context_window=1000000,
|
|
50
|
+
is_free=True,
|
|
51
|
+
),
|
|
52
|
+
]
|
|
53
|
+
|
|
54
|
+
def _create_llm(self) -> BaseChatModel:
|
|
55
|
+
"""Create Google Gemini LLM instance."""
|
|
56
|
+
try:
|
|
57
|
+
from langchain_google_genai import ChatGoogleGenerativeAI
|
|
58
|
+
except ImportError:
|
|
59
|
+
raise ImportError(
|
|
60
|
+
"langchain-google-genai is required for Google provider. "
|
|
61
|
+
"Install with: pip install langchain-google-genai"
|
|
62
|
+
)
|
|
63
|
+
|
|
64
|
+
api_key = self.config.api_key or os.getenv("GOOGLE_API_KEY")
|
|
65
|
+
if not api_key:
|
|
66
|
+
raise ValueError(
|
|
67
|
+
"Google API key is required. Set GOOGLE_API_KEY environment variable "
|
|
68
|
+
"or provide api_key in config. Get your key at: https://aistudio.google.com/apikey"
|
|
69
|
+
)
|
|
70
|
+
|
|
71
|
+
return ChatGoogleGenerativeAI(
|
|
72
|
+
model=self.config.model,
|
|
73
|
+
google_api_key=api_key,
|
|
74
|
+
temperature=self.config.temperature,
|
|
75
|
+
max_output_tokens=self.config.max_tokens,
|
|
76
|
+
timeout=self.config.timeout,
|
|
77
|
+
)
|
|
78
|
+
|
|
79
|
+
def validate_config(self) -> tuple[bool, str]:
|
|
80
|
+
"""Validate Google configuration."""
|
|
81
|
+
api_key = self.config.api_key or os.getenv("GOOGLE_API_KEY")
|
|
82
|
+
if not api_key:
|
|
83
|
+
return False, "GOOGLE_API_KEY environment variable or api_key config is required"
|
|
84
|
+
if not self.config.model:
|
|
85
|
+
return False, "Model name is required"
|
|
86
|
+
return True, ""
|
|
87
|
+
|
|
88
|
+
@classmethod
|
|
89
|
+
def get_setup_instructions(cls) -> str:
|
|
90
|
+
"""Get Google AI Studio setup instructions."""
|
|
91
|
+
return """
|
|
92
|
+
Google AI Studio Setup Instructions:
|
|
93
|
+
1. Go to https://aistudio.google.com/apikey
|
|
94
|
+
2. Create a new API key
|
|
95
|
+
3. Set environment variable: export GOOGLE_API_KEY="your-key"
|
|
96
|
+
4. Or add to config.yaml:
|
|
97
|
+
llm:
|
|
98
|
+
provider: google
|
|
99
|
+
api_key: "your-key"
|
|
100
|
+
model: gemini-2.0-flash
|
|
101
|
+
|
|
102
|
+
Free tier: 15 requests/minute, 1M tokens/day
|
|
103
|
+
"""
|
|
@@ -0,0 +1,111 @@
|
|
|
1
|
+
"""Groq provider for fast LLM inference."""
|
|
2
|
+
|
|
3
|
+
import os
|
|
4
|
+
from langchain_core.language_models import BaseChatModel
|
|
5
|
+
|
|
6
|
+
from ai_code_assistant.providers.base import (
|
|
7
|
+
BaseProvider,
|
|
8
|
+
ModelInfo,
|
|
9
|
+
ProviderConfig,
|
|
10
|
+
ProviderType,
|
|
11
|
+
)
|
|
12
|
+
|
|
13
|
+
|
|
14
|
+
class GroqProvider(BaseProvider):
|
|
15
|
+
"""Provider for Groq - extremely fast inference."""
|
|
16
|
+
|
|
17
|
+
provider_type = ProviderType.GROQ
|
|
18
|
+
display_name = "Groq (Fast Inference)"
|
|
19
|
+
requires_api_key = True
|
|
20
|
+
default_model = "llama-3.3-70b-versatile"
|
|
21
|
+
free_tier = True
|
|
22
|
+
|
|
23
|
+
available_models = [
|
|
24
|
+
ModelInfo(
|
|
25
|
+
name="llama-3.3-70b-versatile",
|
|
26
|
+
provider=ProviderType.GROQ,
|
|
27
|
+
description="Llama 3.3 70B - Most capable, 1000 req/day free",
|
|
28
|
+
context_window=128000,
|
|
29
|
+
is_free=True,
|
|
30
|
+
),
|
|
31
|
+
ModelInfo(
|
|
32
|
+
name="llama-3.1-8b-instant",
|
|
33
|
+
provider=ProviderType.GROQ,
|
|
34
|
+
description="Llama 3.1 8B - Fast, 14400 req/day free",
|
|
35
|
+
context_window=128000,
|
|
36
|
+
is_free=True,
|
|
37
|
+
),
|
|
38
|
+
ModelInfo(
|
|
39
|
+
name="llama3-70b-8192",
|
|
40
|
+
provider=ProviderType.GROQ,
|
|
41
|
+
description="Llama 3 70B - Powerful general model",
|
|
42
|
+
context_window=8192,
|
|
43
|
+
is_free=True,
|
|
44
|
+
),
|
|
45
|
+
ModelInfo(
|
|
46
|
+
name="mixtral-8x7b-32768",
|
|
47
|
+
provider=ProviderType.GROQ,
|
|
48
|
+
description="Mixtral 8x7B - Great for code",
|
|
49
|
+
context_window=32768,
|
|
50
|
+
is_free=True,
|
|
51
|
+
),
|
|
52
|
+
ModelInfo(
|
|
53
|
+
name="gemma2-9b-it",
|
|
54
|
+
provider=ProviderType.GROQ,
|
|
55
|
+
description="Gemma 2 9B - Google's efficient model",
|
|
56
|
+
context_window=8192,
|
|
57
|
+
is_free=True,
|
|
58
|
+
),
|
|
59
|
+
]
|
|
60
|
+
|
|
61
|
+
def _create_llm(self) -> BaseChatModel:
|
|
62
|
+
"""Create Groq LLM instance."""
|
|
63
|
+
try:
|
|
64
|
+
from langchain_groq import ChatGroq
|
|
65
|
+
except ImportError:
|
|
66
|
+
raise ImportError(
|
|
67
|
+
"langchain-groq is required for Groq provider. "
|
|
68
|
+
"Install with: pip install langchain-groq"
|
|
69
|
+
)
|
|
70
|
+
|
|
71
|
+
api_key = self.config.api_key or os.getenv("GROQ_API_KEY")
|
|
72
|
+
if not api_key:
|
|
73
|
+
raise ValueError(
|
|
74
|
+
"Groq API key is required. Set GROQ_API_KEY environment variable "
|
|
75
|
+
"or provide api_key in config. Get your key at: https://console.groq.com/keys"
|
|
76
|
+
)
|
|
77
|
+
|
|
78
|
+
return ChatGroq(
|
|
79
|
+
model=self.config.model,
|
|
80
|
+
api_key=api_key,
|
|
81
|
+
temperature=self.config.temperature,
|
|
82
|
+
max_tokens=self.config.max_tokens,
|
|
83
|
+
timeout=self.config.timeout,
|
|
84
|
+
)
|
|
85
|
+
|
|
86
|
+
def validate_config(self) -> tuple[bool, str]:
|
|
87
|
+
"""Validate Groq configuration."""
|
|
88
|
+
api_key = self.config.api_key or os.getenv("GROQ_API_KEY")
|
|
89
|
+
if not api_key:
|
|
90
|
+
return False, "GROQ_API_KEY environment variable or api_key config is required"
|
|
91
|
+
if not self.config.model:
|
|
92
|
+
return False, "Model name is required"
|
|
93
|
+
return True, ""
|
|
94
|
+
|
|
95
|
+
@classmethod
|
|
96
|
+
def get_setup_instructions(cls) -> str:
|
|
97
|
+
"""Get Groq setup instructions."""
|
|
98
|
+
return """
|
|
99
|
+
Groq Setup Instructions:
|
|
100
|
+
1. Go to https://console.groq.com/keys
|
|
101
|
+
2. Create a free account and generate an API key
|
|
102
|
+
3. Set environment variable: export GROQ_API_KEY="your-key"
|
|
103
|
+
4. Or add to config.yaml:
|
|
104
|
+
llm:
|
|
105
|
+
provider: groq
|
|
106
|
+
api_key: "your-key"
|
|
107
|
+
model: llama-3.3-70b-versatile
|
|
108
|
+
|
|
109
|
+
Free tier: Up to 14,400 requests/day (model dependent)
|
|
110
|
+
Note: Groq is EXTREMELY fast - responses in milliseconds!
|
|
111
|
+
"""
|
|
@@ -0,0 +1,86 @@
|
|
|
1
|
+
"""Ollama provider for local LLM inference."""
|
|
2
|
+
|
|
3
|
+
from langchain_core.language_models import BaseChatModel
|
|
4
|
+
from langchain_ollama import ChatOllama
|
|
5
|
+
|
|
6
|
+
from ai_code_assistant.providers.base import (
|
|
7
|
+
BaseProvider,
|
|
8
|
+
ModelInfo,
|
|
9
|
+
ProviderConfig,
|
|
10
|
+
ProviderType,
|
|
11
|
+
)
|
|
12
|
+
|
|
13
|
+
|
|
14
|
+
class OllamaProvider(BaseProvider):
|
|
15
|
+
"""Provider for Ollama local LLM inference."""
|
|
16
|
+
|
|
17
|
+
provider_type = ProviderType.OLLAMA
|
|
18
|
+
display_name = "Ollama (Local)"
|
|
19
|
+
requires_api_key = False
|
|
20
|
+
default_model = "deepseek-coder:6.7b"
|
|
21
|
+
free_tier = True
|
|
22
|
+
|
|
23
|
+
available_models = [
|
|
24
|
+
ModelInfo(
|
|
25
|
+
name="deepseek-coder:6.7b",
|
|
26
|
+
provider=ProviderType.OLLAMA,
|
|
27
|
+
description="DeepSeek Coder 6.7B - Great for code tasks",
|
|
28
|
+
context_window=16384,
|
|
29
|
+
is_free=True,
|
|
30
|
+
),
|
|
31
|
+
ModelInfo(
|
|
32
|
+
name="codellama:7b",
|
|
33
|
+
provider=ProviderType.OLLAMA,
|
|
34
|
+
description="Code Llama 7B - Meta's code model",
|
|
35
|
+
context_window=16384,
|
|
36
|
+
is_free=True,
|
|
37
|
+
),
|
|
38
|
+
ModelInfo(
|
|
39
|
+
name="llama3.1:8b",
|
|
40
|
+
provider=ProviderType.OLLAMA,
|
|
41
|
+
description="Llama 3.1 8B - General purpose",
|
|
42
|
+
context_window=128000,
|
|
43
|
+
is_free=True,
|
|
44
|
+
),
|
|
45
|
+
ModelInfo(
|
|
46
|
+
name="qwen2.5-coder:7b",
|
|
47
|
+
provider=ProviderType.OLLAMA,
|
|
48
|
+
description="Qwen 2.5 Coder 7B - Excellent for code",
|
|
49
|
+
context_window=32768,
|
|
50
|
+
is_free=True,
|
|
51
|
+
),
|
|
52
|
+
ModelInfo(
|
|
53
|
+
name="mistral:7b",
|
|
54
|
+
provider=ProviderType.OLLAMA,
|
|
55
|
+
description="Mistral 7B - Fast and capable",
|
|
56
|
+
context_window=32768,
|
|
57
|
+
is_free=True,
|
|
58
|
+
),
|
|
59
|
+
]
|
|
60
|
+
|
|
61
|
+
def _create_llm(self) -> BaseChatModel:
|
|
62
|
+
"""Create Ollama LLM instance."""
|
|
63
|
+
return ChatOllama(
|
|
64
|
+
model=self.config.model,
|
|
65
|
+
base_url=self.config.base_url or "http://localhost:11434",
|
|
66
|
+
temperature=self.config.temperature,
|
|
67
|
+
num_predict=self.config.max_tokens,
|
|
68
|
+
timeout=self.config.timeout,
|
|
69
|
+
)
|
|
70
|
+
|
|
71
|
+
def validate_config(self) -> tuple[bool, str]:
|
|
72
|
+
"""Validate Ollama configuration."""
|
|
73
|
+
if not self.config.model:
|
|
74
|
+
return False, "Model name is required"
|
|
75
|
+
return True, ""
|
|
76
|
+
|
|
77
|
+
@classmethod
|
|
78
|
+
def get_setup_instructions(cls) -> str:
|
|
79
|
+
"""Get Ollama setup instructions."""
|
|
80
|
+
return """
|
|
81
|
+
Ollama Setup Instructions:
|
|
82
|
+
1. Install Ollama: https://ollama.ai
|
|
83
|
+
2. Pull a model: ollama pull deepseek-coder:6.7b
|
|
84
|
+
3. Start Ollama: ollama serve
|
|
85
|
+
4. No API key required - runs completely locally!
|
|
86
|
+
"""
|
|
@@ -0,0 +1,114 @@
|
|
|
1
|
+
"""OpenAI provider for GPT models."""
|
|
2
|
+
|
|
3
|
+
import os
|
|
4
|
+
from langchain_core.language_models import BaseChatModel
|
|
5
|
+
|
|
6
|
+
from ai_code_assistant.providers.base import (
|
|
7
|
+
BaseProvider,
|
|
8
|
+
ModelInfo,
|
|
9
|
+
ProviderConfig,
|
|
10
|
+
ProviderType,
|
|
11
|
+
)
|
|
12
|
+
|
|
13
|
+
|
|
14
|
+
class OpenAIProvider(BaseProvider):
|
|
15
|
+
"""Provider for OpenAI GPT models."""
|
|
16
|
+
|
|
17
|
+
provider_type = ProviderType.OPENAI
|
|
18
|
+
display_name = "OpenAI"
|
|
19
|
+
requires_api_key = True
|
|
20
|
+
default_model = "gpt-4o-mini"
|
|
21
|
+
free_tier = False
|
|
22
|
+
|
|
23
|
+
available_models = [
|
|
24
|
+
ModelInfo(
|
|
25
|
+
name="gpt-4o-mini",
|
|
26
|
+
provider=ProviderType.OPENAI,
|
|
27
|
+
description="GPT-4o Mini - Fast and affordable",
|
|
28
|
+
context_window=128000,
|
|
29
|
+
is_free=False,
|
|
30
|
+
),
|
|
31
|
+
ModelInfo(
|
|
32
|
+
name="gpt-4o",
|
|
33
|
+
provider=ProviderType.OPENAI,
|
|
34
|
+
description="GPT-4o - Most capable",
|
|
35
|
+
context_window=128000,
|
|
36
|
+
is_free=False,
|
|
37
|
+
),
|
|
38
|
+
ModelInfo(
|
|
39
|
+
name="gpt-4-turbo",
|
|
40
|
+
provider=ProviderType.OPENAI,
|
|
41
|
+
description="GPT-4 Turbo - Fast GPT-4",
|
|
42
|
+
context_window=128000,
|
|
43
|
+
is_free=False,
|
|
44
|
+
),
|
|
45
|
+
ModelInfo(
|
|
46
|
+
name="gpt-3.5-turbo",
|
|
47
|
+
provider=ProviderType.OPENAI,
|
|
48
|
+
description="GPT-3.5 Turbo - Fast and cheap",
|
|
49
|
+
context_window=16385,
|
|
50
|
+
is_free=False,
|
|
51
|
+
),
|
|
52
|
+
ModelInfo(
|
|
53
|
+
name="o1-mini",
|
|
54
|
+
provider=ProviderType.OPENAI,
|
|
55
|
+
description="o1-mini - Reasoning model",
|
|
56
|
+
context_window=128000,
|
|
57
|
+
is_free=False,
|
|
58
|
+
),
|
|
59
|
+
]
|
|
60
|
+
|
|
61
|
+
def _create_llm(self) -> BaseChatModel:
|
|
62
|
+
"""Create OpenAI LLM instance."""
|
|
63
|
+
try:
|
|
64
|
+
from langchain_openai import ChatOpenAI
|
|
65
|
+
except ImportError:
|
|
66
|
+
raise ImportError(
|
|
67
|
+
"langchain-openai is required for OpenAI provider. "
|
|
68
|
+
"Install with: pip install langchain-openai"
|
|
69
|
+
)
|
|
70
|
+
|
|
71
|
+
api_key = self.config.api_key or os.getenv("OPENAI_API_KEY")
|
|
72
|
+
if not api_key:
|
|
73
|
+
raise ValueError(
|
|
74
|
+
"OpenAI API key is required. Set OPENAI_API_KEY environment variable "
|
|
75
|
+
"or provide api_key in config. Get your key at: https://platform.openai.com/api-keys"
|
|
76
|
+
)
|
|
77
|
+
|
|
78
|
+
return ChatOpenAI(
|
|
79
|
+
model=self.config.model,
|
|
80
|
+
api_key=api_key,
|
|
81
|
+
base_url=self.config.base_url, # Allow custom base URL for Azure, etc.
|
|
82
|
+
temperature=self.config.temperature,
|
|
83
|
+
max_tokens=self.config.max_tokens,
|
|
84
|
+
timeout=self.config.timeout,
|
|
85
|
+
)
|
|
86
|
+
|
|
87
|
+
def validate_config(self) -> tuple[bool, str]:
|
|
88
|
+
"""Validate OpenAI configuration."""
|
|
89
|
+
api_key = self.config.api_key or os.getenv("OPENAI_API_KEY")
|
|
90
|
+
if not api_key:
|
|
91
|
+
return False, "OPENAI_API_KEY environment variable or api_key config is required"
|
|
92
|
+
if not self.config.model:
|
|
93
|
+
return False, "Model name is required"
|
|
94
|
+
return True, ""
|
|
95
|
+
|
|
96
|
+
@classmethod
|
|
97
|
+
def get_setup_instructions(cls) -> str:
|
|
98
|
+
"""Get OpenAI setup instructions."""
|
|
99
|
+
return """
|
|
100
|
+
OpenAI Setup Instructions:
|
|
101
|
+
1. Go to https://platform.openai.com/api-keys
|
|
102
|
+
2. Create an API key (requires payment method)
|
|
103
|
+
3. Set environment variable: export OPENAI_API_KEY="your-key"
|
|
104
|
+
4. Or add to config.yaml:
|
|
105
|
+
llm:
|
|
106
|
+
provider: openai
|
|
107
|
+
api_key: "your-key"
|
|
108
|
+
model: gpt-4o-mini
|
|
109
|
+
|
|
110
|
+
Note: OpenAI requires a paid account. Consider using free alternatives:
|
|
111
|
+
- Groq (free tier with Llama models)
|
|
112
|
+
- Google AI Studio (free tier with Gemini)
|
|
113
|
+
- OpenRouter (free tier with various models)
|
|
114
|
+
"""
|
|
@@ -0,0 +1,130 @@
|
|
|
1
|
+
"""OpenRouter provider for access to multiple LLM providers."""
|
|
2
|
+
|
|
3
|
+
import os
|
|
4
|
+
from langchain_core.language_models import BaseChatModel
|
|
5
|
+
|
|
6
|
+
from ai_code_assistant.providers.base import (
|
|
7
|
+
BaseProvider,
|
|
8
|
+
ModelInfo,
|
|
9
|
+
ProviderConfig,
|
|
10
|
+
ProviderType,
|
|
11
|
+
)
|
|
12
|
+
|
|
13
|
+
|
|
14
|
+
class OpenRouterProvider(BaseProvider):
|
|
15
|
+
"""Provider for OpenRouter - access to multiple LLM providers."""
|
|
16
|
+
|
|
17
|
+
provider_type = ProviderType.OPENROUTER
|
|
18
|
+
display_name = "OpenRouter (Multi-Provider)"
|
|
19
|
+
requires_api_key = True
|
|
20
|
+
default_model = "meta-llama/llama-3.3-70b-instruct:free"
|
|
21
|
+
free_tier = True
|
|
22
|
+
|
|
23
|
+
available_models = [
|
|
24
|
+
ModelInfo(
|
|
25
|
+
name="meta-llama/llama-3.3-70b-instruct:free",
|
|
26
|
+
provider=ProviderType.OPENROUTER,
|
|
27
|
+
description="Llama 3.3 70B - Free tier",
|
|
28
|
+
context_window=128000,
|
|
29
|
+
is_free=True,
|
|
30
|
+
),
|
|
31
|
+
ModelInfo(
|
|
32
|
+
name="google/gemma-3-27b-it:free",
|
|
33
|
+
provider=ProviderType.OPENROUTER,
|
|
34
|
+
description="Gemma 3 27B - Free tier",
|
|
35
|
+
context_window=8192,
|
|
36
|
+
is_free=True,
|
|
37
|
+
),
|
|
38
|
+
ModelInfo(
|
|
39
|
+
name="mistralai/mistral-small-3.1-24b-instruct:free",
|
|
40
|
+
provider=ProviderType.OPENROUTER,
|
|
41
|
+
description="Mistral Small 3.1 - Free tier",
|
|
42
|
+
context_window=32768,
|
|
43
|
+
is_free=True,
|
|
44
|
+
),
|
|
45
|
+
ModelInfo(
|
|
46
|
+
name="qwen/qwen3-4b:free",
|
|
47
|
+
provider=ProviderType.OPENROUTER,
|
|
48
|
+
description="Qwen 3 4B - Free tier, fast",
|
|
49
|
+
context_window=32768,
|
|
50
|
+
is_free=True,
|
|
51
|
+
),
|
|
52
|
+
ModelInfo(
|
|
53
|
+
name="deepseek/deepseek-r1:free",
|
|
54
|
+
provider=ProviderType.OPENROUTER,
|
|
55
|
+
description="DeepSeek R1 - Reasoning model, free",
|
|
56
|
+
context_window=64000,
|
|
57
|
+
is_free=True,
|
|
58
|
+
),
|
|
59
|
+
ModelInfo(
|
|
60
|
+
name="anthropic/claude-3.5-sonnet",
|
|
61
|
+
provider=ProviderType.OPENROUTER,
|
|
62
|
+
description="Claude 3.5 Sonnet - Paid, excellent for code",
|
|
63
|
+
context_window=200000,
|
|
64
|
+
is_free=False,
|
|
65
|
+
),
|
|
66
|
+
ModelInfo(
|
|
67
|
+
name="openai/gpt-4o",
|
|
68
|
+
provider=ProviderType.OPENROUTER,
|
|
69
|
+
description="GPT-4o - Paid, OpenAI's best",
|
|
70
|
+
context_window=128000,
|
|
71
|
+
is_free=False,
|
|
72
|
+
),
|
|
73
|
+
]
|
|
74
|
+
|
|
75
|
+
def _create_llm(self) -> BaseChatModel:
|
|
76
|
+
"""Create OpenRouter LLM instance using OpenAI-compatible API."""
|
|
77
|
+
try:
|
|
78
|
+
from langchain_openai import ChatOpenAI
|
|
79
|
+
except ImportError:
|
|
80
|
+
raise ImportError(
|
|
81
|
+
"langchain-openai is required for OpenRouter provider. "
|
|
82
|
+
"Install with: pip install langchain-openai"
|
|
83
|
+
)
|
|
84
|
+
|
|
85
|
+
api_key = self.config.api_key or os.getenv("OPENROUTER_API_KEY")
|
|
86
|
+
if not api_key:
|
|
87
|
+
raise ValueError(
|
|
88
|
+
"OpenRouter API key is required. Set OPENROUTER_API_KEY environment variable "
|
|
89
|
+
"or provide api_key in config. Get your key at: https://openrouter.ai/keys"
|
|
90
|
+
)
|
|
91
|
+
|
|
92
|
+
return ChatOpenAI(
|
|
93
|
+
model=self.config.model,
|
|
94
|
+
api_key=api_key,
|
|
95
|
+
base_url="https://openrouter.ai/api/v1",
|
|
96
|
+
temperature=self.config.temperature,
|
|
97
|
+
max_tokens=self.config.max_tokens,
|
|
98
|
+
timeout=self.config.timeout,
|
|
99
|
+
default_headers={
|
|
100
|
+
"HTTP-Referer": "https://github.com/akkssy/cognify-ai",
|
|
101
|
+
"X-Title": "Cognify AI",
|
|
102
|
+
},
|
|
103
|
+
)
|
|
104
|
+
|
|
105
|
+
def validate_config(self) -> tuple[bool, str]:
|
|
106
|
+
"""Validate OpenRouter configuration."""
|
|
107
|
+
api_key = self.config.api_key or os.getenv("OPENROUTER_API_KEY")
|
|
108
|
+
if not api_key:
|
|
109
|
+
return False, "OPENROUTER_API_KEY environment variable or api_key config is required"
|
|
110
|
+
if not self.config.model:
|
|
111
|
+
return False, "Model name is required"
|
|
112
|
+
return True, ""
|
|
113
|
+
|
|
114
|
+
@classmethod
|
|
115
|
+
def get_setup_instructions(cls) -> str:
|
|
116
|
+
"""Get OpenRouter setup instructions."""
|
|
117
|
+
return """
|
|
118
|
+
OpenRouter Setup Instructions:
|
|
119
|
+
1. Go to https://openrouter.ai/keys
|
|
120
|
+
2. Create a free account and generate an API key
|
|
121
|
+
3. Set environment variable: export OPENROUTER_API_KEY="your-key"
|
|
122
|
+
4. Or add to config.yaml:
|
|
123
|
+
llm:
|
|
124
|
+
provider: openrouter
|
|
125
|
+
api_key: "your-key"
|
|
126
|
+
model: meta-llama/llama-3.3-70b-instruct:free
|
|
127
|
+
|
|
128
|
+
Free tier: 50 requests/day on free models (models ending with :free)
|
|
129
|
+
Paid: Access to GPT-4, Claude, and many more models
|
|
130
|
+
"""
|
|
File without changes
|