gac 3.10.3__py3-none-any.whl
This diff represents the content of publicly available package versions that have been released to one of the supported registries. The information contained in this diff is provided for informational purposes only and reflects changes between package versions as they appear in their respective public registries.
Potentially problematic release.
This version of gac might be problematic. Click here for more details.
- gac/__init__.py +15 -0
- gac/__version__.py +3 -0
- gac/ai.py +109 -0
- gac/ai_utils.py +246 -0
- gac/auth_cli.py +214 -0
- gac/cli.py +218 -0
- gac/commit_executor.py +62 -0
- gac/config.py +125 -0
- gac/config_cli.py +95 -0
- gac/constants.py +328 -0
- gac/diff_cli.py +159 -0
- gac/errors.py +231 -0
- gac/git.py +372 -0
- gac/git_state_validator.py +184 -0
- gac/grouped_commit_workflow.py +423 -0
- gac/init_cli.py +70 -0
- gac/interactive_mode.py +182 -0
- gac/language_cli.py +377 -0
- gac/main.py +476 -0
- gac/model_cli.py +430 -0
- gac/oauth/__init__.py +27 -0
- gac/oauth/claude_code.py +464 -0
- gac/oauth/qwen_oauth.py +327 -0
- gac/oauth/token_store.py +81 -0
- gac/preprocess.py +511 -0
- gac/prompt.py +878 -0
- gac/prompt_builder.py +88 -0
- gac/providers/README.md +437 -0
- gac/providers/__init__.py +80 -0
- gac/providers/anthropic.py +17 -0
- gac/providers/azure_openai.py +57 -0
- gac/providers/base.py +329 -0
- gac/providers/cerebras.py +15 -0
- gac/providers/chutes.py +25 -0
- gac/providers/claude_code.py +79 -0
- gac/providers/custom_anthropic.py +103 -0
- gac/providers/custom_openai.py +44 -0
- gac/providers/deepseek.py +15 -0
- gac/providers/error_handler.py +139 -0
- gac/providers/fireworks.py +15 -0
- gac/providers/gemini.py +90 -0
- gac/providers/groq.py +15 -0
- gac/providers/kimi_coding.py +27 -0
- gac/providers/lmstudio.py +80 -0
- gac/providers/minimax.py +15 -0
- gac/providers/mistral.py +15 -0
- gac/providers/moonshot.py +15 -0
- gac/providers/ollama.py +73 -0
- gac/providers/openai.py +32 -0
- gac/providers/openrouter.py +21 -0
- gac/providers/protocol.py +71 -0
- gac/providers/qwen.py +64 -0
- gac/providers/registry.py +58 -0
- gac/providers/replicate.py +156 -0
- gac/providers/streamlake.py +31 -0
- gac/providers/synthetic.py +40 -0
- gac/providers/together.py +15 -0
- gac/providers/zai.py +31 -0
- gac/py.typed +0 -0
- gac/security.py +293 -0
- gac/utils.py +401 -0
- gac/workflow_utils.py +217 -0
- gac-3.10.3.dist-info/METADATA +283 -0
- gac-3.10.3.dist-info/RECORD +67 -0
- gac-3.10.3.dist-info/WHEEL +4 -0
- gac-3.10.3.dist-info/entry_points.txt +2 -0
- gac-3.10.3.dist-info/licenses/LICENSE +16 -0
gac/prompt_builder.py
ADDED
|
@@ -0,0 +1,88 @@
|
|
|
1
|
+
#!/usr/bin/env python3
|
|
2
|
+
"""Prompt building logic for gac."""
|
|
3
|
+
|
|
4
|
+
from typing import NamedTuple
|
|
5
|
+
|
|
6
|
+
from rich.console import Console
|
|
7
|
+
from rich.panel import Panel
|
|
8
|
+
|
|
9
|
+
from gac.config import GACConfig
|
|
10
|
+
from gac.git_state_validator import GitState
|
|
11
|
+
|
|
12
|
+
|
|
13
|
+
class PromptBundle(NamedTuple):
|
|
14
|
+
"""Bundle of system and user prompts."""
|
|
15
|
+
|
|
16
|
+
system_prompt: str
|
|
17
|
+
user_prompt: str
|
|
18
|
+
|
|
19
|
+
|
|
20
|
+
class PromptBuilder:
|
|
21
|
+
"""Builds prompts for AI commit message generation."""
|
|
22
|
+
|
|
23
|
+
def __init__(self, config: GACConfig):
|
|
24
|
+
self.config = config
|
|
25
|
+
|
|
26
|
+
def build_prompts(
|
|
27
|
+
self,
|
|
28
|
+
git_state: GitState,
|
|
29
|
+
group: bool = False,
|
|
30
|
+
one_liner: bool = False,
|
|
31
|
+
hint: str = "",
|
|
32
|
+
infer_scope: bool = False,
|
|
33
|
+
verbose: bool = False,
|
|
34
|
+
language: str | None = None,
|
|
35
|
+
) -> PromptBundle:
|
|
36
|
+
"""Build prompts from git state."""
|
|
37
|
+
from gac.prompt import build_group_prompt, build_prompt
|
|
38
|
+
|
|
39
|
+
# Get language and translate prefixes from config
|
|
40
|
+
if language is None:
|
|
41
|
+
language_value = self.config.get("language")
|
|
42
|
+
language = language_value if isinstance(language_value, str) else None
|
|
43
|
+
|
|
44
|
+
translate_prefixes_value = self.config.get("translate_prefixes")
|
|
45
|
+
translate_prefixes: bool = (
|
|
46
|
+
bool(translate_prefixes_value) if isinstance(translate_prefixes_value, bool) else False
|
|
47
|
+
)
|
|
48
|
+
|
|
49
|
+
# Get system template path from config
|
|
50
|
+
system_template_path_value = self.config.get("system_prompt_path")
|
|
51
|
+
system_template_path: str | None = (
|
|
52
|
+
system_template_path_value if isinstance(system_template_path_value, str) else None
|
|
53
|
+
)
|
|
54
|
+
|
|
55
|
+
if group:
|
|
56
|
+
system_prompt, user_prompt = build_group_prompt(
|
|
57
|
+
status=git_state.status,
|
|
58
|
+
processed_diff=git_state.processed_diff,
|
|
59
|
+
diff_stat=git_state.diff_stat,
|
|
60
|
+
one_liner=one_liner,
|
|
61
|
+
hint=hint,
|
|
62
|
+
infer_scope=infer_scope,
|
|
63
|
+
verbose=verbose,
|
|
64
|
+
system_template_path=system_template_path,
|
|
65
|
+
language=language,
|
|
66
|
+
translate_prefixes=translate_prefixes,
|
|
67
|
+
)
|
|
68
|
+
else:
|
|
69
|
+
system_prompt, user_prompt = build_prompt(
|
|
70
|
+
status=git_state.status,
|
|
71
|
+
processed_diff=git_state.processed_diff,
|
|
72
|
+
diff_stat=git_state.diff_stat,
|
|
73
|
+
one_liner=one_liner,
|
|
74
|
+
hint=hint,
|
|
75
|
+
infer_scope=infer_scope,
|
|
76
|
+
verbose=verbose,
|
|
77
|
+
system_template_path=system_template_path,
|
|
78
|
+
language=language,
|
|
79
|
+
translate_prefixes=translate_prefixes,
|
|
80
|
+
)
|
|
81
|
+
|
|
82
|
+
return PromptBundle(system_prompt=system_prompt, user_prompt=user_prompt)
|
|
83
|
+
|
|
84
|
+
def display_prompts(self, system_prompt: str, user_prompt: str) -> None:
|
|
85
|
+
"""Display prompts for debugging purposes."""
|
|
86
|
+
console = Console()
|
|
87
|
+
full_prompt = f"SYSTEM PROMPT:\n{system_prompt}\n\nUSER PROMPT:\n{user_prompt}"
|
|
88
|
+
console.print(Panel(full_prompt, title="Prompt for LLM", border_style="bright_blue"))
|
gac/providers/README.md
ADDED
|
@@ -0,0 +1,437 @@
|
|
|
1
|
+
# AI Provider Architecture
|
|
2
|
+
|
|
3
|
+
This directory contains AI provider implementations for GAC (Git Auto Commit). All providers follow a class-based architecture with shared base classes to reduce code duplication and improve maintainability.
|
|
4
|
+
|
|
5
|
+
## Architecture Overview
|
|
6
|
+
|
|
7
|
+
```text
|
|
8
|
+
ProviderProtocol (Protocol - type contract)
|
|
9
|
+
↓
|
|
10
|
+
BaseConfiguredProvider (ABC - core logic)
|
|
11
|
+
↓
|
|
12
|
+
├── OpenAICompatibleProvider (OpenAI-style APIs)
|
|
13
|
+
├── AnthropicCompatibleProvider (Anthropic-style APIs)
|
|
14
|
+
└── GenericHTTPProvider (Fully custom implementations)
|
|
15
|
+
↓
|
|
16
|
+
Concrete Providers (e.g., OpenAIProvider, GeminiProvider)
|
|
17
|
+
```
|
|
18
|
+
|
|
19
|
+
## Core Components
|
|
20
|
+
|
|
21
|
+
### BaseConfiguredProvider
|
|
22
|
+
|
|
23
|
+
Abstract base class implementing the template method pattern. All providers inherit from this class.
|
|
24
|
+
|
|
25
|
+
**Key Features:**
|
|
26
|
+
|
|
27
|
+
- Standardized HTTP handling with httpx
|
|
28
|
+
- Common error handling patterns
|
|
29
|
+
- Flexible configuration via ProviderConfig
|
|
30
|
+
- Template methods for customization:
|
|
31
|
+
- `_get_api_key()` - Load API key from environment
|
|
32
|
+
- `_build_headers()` - Build request headers
|
|
33
|
+
- `_build_request_body()` - Build request body
|
|
34
|
+
- `_get_api_url()` - Get API endpoint URL
|
|
35
|
+
- `_parse_response()` - Parse API response
|
|
36
|
+
- `_make_http_request()` - Execute HTTP request
|
|
37
|
+
- `generate()` - Main entry point
|
|
38
|
+
|
|
39
|
+
### OpenAICompatibleProvider
|
|
40
|
+
|
|
41
|
+
Specialized base class for OpenAI-compatible APIs (standard format).
|
|
42
|
+
|
|
43
|
+
**Request Format:**
|
|
44
|
+
|
|
45
|
+
```json
|
|
46
|
+
{
|
|
47
|
+
"model": "gpt-5",
|
|
48
|
+
"messages": [...],
|
|
49
|
+
"temperature": 0.7,
|
|
50
|
+
"max_tokens": 1024
|
|
51
|
+
}
|
|
52
|
+
```
|
|
53
|
+
|
|
54
|
+
**Response Format:**
|
|
55
|
+
|
|
56
|
+
```json
|
|
57
|
+
{
|
|
58
|
+
"choices": [
|
|
59
|
+
{
|
|
60
|
+
"message": {
|
|
61
|
+
"content": "..."
|
|
62
|
+
}
|
|
63
|
+
}
|
|
64
|
+
]
|
|
65
|
+
}
|
|
66
|
+
```
|
|
67
|
+
|
|
68
|
+
**Providers Using This Base:**
|
|
69
|
+
|
|
70
|
+
- OpenAI, DeepSeek, Together, Fireworks, Cerebras, Mistral, Minimax, Moonshot, Groq, OpenRouter
|
|
71
|
+
- Custom OpenAI, Azure OpenAI, LM Studio
|
|
72
|
+
- Kimi Coding, Streamlake, Synthetic, Z.AI
|
|
73
|
+
|
|
74
|
+
### AnthropicCompatibleProvider
|
|
75
|
+
|
|
76
|
+
Specialized base class for Anthropic-style APIs.
|
|
77
|
+
|
|
78
|
+
**Request Format:**
|
|
79
|
+
|
|
80
|
+
```json
|
|
81
|
+
{
|
|
82
|
+
"model": "claude-sonnet-4-5",
|
|
83
|
+
"messages": [...],
|
|
84
|
+
"system": "You are a helpful assistant",
|
|
85
|
+
"temperature": 0.7,
|
|
86
|
+
"max_tokens": 1024
|
|
87
|
+
}
|
|
88
|
+
```
|
|
89
|
+
|
|
90
|
+
**Response Format:**
|
|
91
|
+
|
|
92
|
+
```json
|
|
93
|
+
{
|
|
94
|
+
"content": [
|
|
95
|
+
{
|
|
96
|
+
"type": "text",
|
|
97
|
+
"text": "..."
|
|
98
|
+
}
|
|
99
|
+
]
|
|
100
|
+
}
|
|
101
|
+
```
|
|
102
|
+
|
|
103
|
+
**Providers Using This Base:**
|
|
104
|
+
|
|
105
|
+
- Anthropic, Custom Anthropic
|
|
106
|
+
- Claude Code
|
|
107
|
+
|
|
108
|
+
### GenericHTTPProvider
|
|
109
|
+
|
|
110
|
+
Base class for providers with completely custom API formats.
|
|
111
|
+
|
|
112
|
+
**Providers Using This Base:**
|
|
113
|
+
|
|
114
|
+
- Gemini (Google's unique format)
|
|
115
|
+
- Replicate (async prediction polling)
|
|
116
|
+
|
|
117
|
+
## Creating a New Provider
|
|
118
|
+
|
|
119
|
+
### Step 1: Choose the Right Base Class
|
|
120
|
+
|
|
121
|
+
```python
|
|
122
|
+
from gac.providers.base import OpenAICompatibleProvider, ProviderConfig
|
|
123
|
+
from gac.providers.error_handler import handle_provider_errors
|
|
124
|
+
|
|
125
|
+
# Most providers fit one of these patterns:
|
|
126
|
+
# 1. OpenAI-compatible format → inherit from OpenAICompatibleProvider
|
|
127
|
+
# 2. Anthropic-compatible format → inherit from AnthropicCompatibleProvider
|
|
128
|
+
# 3. Custom format → inherit from GenericHTTPProvider
|
|
129
|
+
```
|
|
130
|
+
|
|
131
|
+
### Step 2: Define Provider Configuration
|
|
132
|
+
|
|
133
|
+
```python
|
|
134
|
+
class MyProvider(OpenAICompatibleProvider):
|
|
135
|
+
config = ProviderConfig(
|
|
136
|
+
name="My Provider",
|
|
137
|
+
api_key_env="MY_PROVIDER_API_KEY",
|
|
138
|
+
base_url="https://api.myprovider.com/v1/chat/completions",
|
|
139
|
+
)
|
|
140
|
+
```
|
|
141
|
+
|
|
142
|
+
### Step 3: Override Template Methods (If Needed)
|
|
143
|
+
|
|
144
|
+
```python
|
|
145
|
+
class MyProvider(OpenAICompatibleProvider):
|
|
146
|
+
# Override only what's needed
|
|
147
|
+
|
|
148
|
+
def _build_headers(self) -> dict[str, str]:
|
|
149
|
+
"""Custom header handling."""
|
|
150
|
+
headers = super()._build_headers()
|
|
151
|
+
# Add provider-specific headers
|
|
152
|
+
return headers
|
|
153
|
+
|
|
154
|
+
def _get_api_url(self, model: str | None = None) -> str:
|
|
155
|
+
"""Custom URL construction."""
|
|
156
|
+
if model is None:
|
|
157
|
+
return super()._get_api_url(model)
|
|
158
|
+
return f"https://custom.endpoint/{model}/chat"
|
|
159
|
+
```
|
|
160
|
+
|
|
161
|
+
### Step 4: Create Lazy Getter and Decorated Function
|
|
162
|
+
|
|
163
|
+
```python
|
|
164
|
+
def _get_my_provider() -> MyProvider:
|
|
165
|
+
"""Lazy getter to initialize provider at call time."""
|
|
166
|
+
return MyProvider(MyProvider.config)
|
|
167
|
+
|
|
168
|
+
@handle_provider_errors("My Provider")
|
|
169
|
+
def call_my_provider_api(model: str, messages: list[dict], temperature: float, max_tokens: int) -> str:
|
|
170
|
+
"""Call My Provider API."""
|
|
171
|
+
provider = _get_my_provider()
|
|
172
|
+
return provider.generate(model=model, messages=messages, temperature=temperature, max_tokens=max_tokens)
|
|
173
|
+
```
|
|
174
|
+
|
|
175
|
+
### Step 5: Export in `__init__.py`
|
|
176
|
+
|
|
177
|
+
```python
|
|
178
|
+
# In src/gac/providers/__init__.py
|
|
179
|
+
from .my_provider import call_my_provider_api
|
|
180
|
+
|
|
181
|
+
# Add to PROVIDER_REGISTRY
|
|
182
|
+
PROVIDER_REGISTRY = {
|
|
183
|
+
...
|
|
184
|
+
"my-provider": call_my_provider_api,
|
|
185
|
+
...
|
|
186
|
+
}
|
|
187
|
+
|
|
188
|
+
# Add to __all__
|
|
189
|
+
__all__ = [
|
|
190
|
+
...
|
|
191
|
+
"call_my_provider_api",
|
|
192
|
+
...
|
|
193
|
+
]
|
|
194
|
+
```
|
|
195
|
+
|
|
196
|
+
## Common Customization Patterns
|
|
197
|
+
|
|
198
|
+
### Pattern 1: Optional API Key (e.g., Ollama, LM Studio)
|
|
199
|
+
|
|
200
|
+
```python
|
|
201
|
+
def _get_api_key(self) -> str:
|
|
202
|
+
"""Get optional API key."""
|
|
203
|
+
api_key = os.getenv(self.config.api_key_env)
|
|
204
|
+
if not api_key:
|
|
205
|
+
return "" # Optional
|
|
206
|
+
return api_key
|
|
207
|
+
```
|
|
208
|
+
|
|
209
|
+
### Pattern 2: Custom URL Construction (e.g., Azure OpenAI)
|
|
210
|
+
|
|
211
|
+
```python
|
|
212
|
+
def _get_api_url(self, model: str | None = None) -> str:
|
|
213
|
+
"""Build custom URL with model in path."""
|
|
214
|
+
if model is None:
|
|
215
|
+
return super()._get_api_url(model)
|
|
216
|
+
return f"{self.endpoint}/openai/deployments/{model}/chat/completions?api-version={self.api_version}"
|
|
217
|
+
```
|
|
218
|
+
|
|
219
|
+
### Pattern 3: Alternative Environment Variables (e.g., Streamlake)
|
|
220
|
+
|
|
221
|
+
```python
|
|
222
|
+
def _get_api_key(self) -> str:
|
|
223
|
+
"""Try primary key, then fallback."""
|
|
224
|
+
api_key = os.getenv(self.config.api_key_env)
|
|
225
|
+
if api_key:
|
|
226
|
+
return api_key
|
|
227
|
+
# Fallback to alternative
|
|
228
|
+
api_key = os.getenv("ALTERNATIVE_KEY_ENV")
|
|
229
|
+
if api_key:
|
|
230
|
+
return api_key
|
|
231
|
+
raise AIError.authentication_error("No API key found")
|
|
232
|
+
```
|
|
233
|
+
|
|
234
|
+
### Pattern 4: Model Preprocessing (e.g., Synthetic - adding prefixes)
|
|
235
|
+
|
|
236
|
+
```python
|
|
237
|
+
def _build_request_body(self, messages: list[dict], temperature: float, max_tokens: int, model: str, **kwargs) -> dict[str, Any]:
|
|
238
|
+
"""Preprocess model name."""
|
|
239
|
+
data = super()._build_request_body(messages, temperature, max_tokens, model, **kwargs)
|
|
240
|
+
# Add "hf:" prefix for HuggingFace models
|
|
241
|
+
data["model"] = f"hf:{model}"
|
|
242
|
+
return data
|
|
243
|
+
```
|
|
244
|
+
|
|
245
|
+
### Pattern 5: Custom Response Parsing (e.g., LM Studio with text field fallback)
|
|
246
|
+
|
|
247
|
+
```python
|
|
248
|
+
def _parse_response(self, response: dict[str, Any]) -> str:
|
|
249
|
+
"""Parse response with fallback."""
|
|
250
|
+
# Try standard OpenAI format first
|
|
251
|
+
choices = response.get("choices")
|
|
252
|
+
if choices:
|
|
253
|
+
content = choices[0].get("message", {}).get("content")
|
|
254
|
+
if content:
|
|
255
|
+
return content
|
|
256
|
+
|
|
257
|
+
# Fallback to text field
|
|
258
|
+
content = choices[0].get("text")
|
|
259
|
+
if content:
|
|
260
|
+
return content
|
|
261
|
+
|
|
262
|
+
raise AIError.model_error("No content found")
|
|
263
|
+
```
|
|
264
|
+
|
|
265
|
+
### Pattern 6: System Message Handling (e.g., Claude Code)
|
|
266
|
+
|
|
267
|
+
```python
|
|
268
|
+
def _build_request_body(self, messages: list[dict], temperature: float, max_tokens: int, model: str, **kwargs) -> dict[str, Any]:
|
|
269
|
+
"""Extract and handle system messages."""
|
|
270
|
+
anthropic_messages = []
|
|
271
|
+
system_instructions = ""
|
|
272
|
+
|
|
273
|
+
for msg in messages:
|
|
274
|
+
if msg["role"] == "system":
|
|
275
|
+
system_instructions = msg["content"]
|
|
276
|
+
else:
|
|
277
|
+
anthropic_messages.append(msg)
|
|
278
|
+
|
|
279
|
+
# Move system instructions to first user message
|
|
280
|
+
if system_instructions and anthropic_messages:
|
|
281
|
+
anthropic_messages[0]["content"] = f"{system_instructions}\n\n{anthropic_messages[0]['content']}"
|
|
282
|
+
|
|
283
|
+
return {
|
|
284
|
+
"messages": anthropic_messages,
|
|
285
|
+
"system": "REQUIRED_FIXED_MESSAGE",
|
|
286
|
+
"temperature": temperature,
|
|
287
|
+
"max_tokens": max_tokens,
|
|
288
|
+
**kwargs,
|
|
289
|
+
}
|
|
290
|
+
```
|
|
291
|
+
|
|
292
|
+
### Pattern 7: Async Operations (e.g., Replicate with polling)
|
|
293
|
+
|
|
294
|
+
```python
|
|
295
|
+
def generate(self, model: str, messages: list[dict], temperature: float = 0.7, max_tokens: int = 1024, **kwargs) -> str:
|
|
296
|
+
"""Override for async/polling operations."""
|
|
297
|
+
# Create prediction
|
|
298
|
+
response = httpx.post(url, json=body, headers=headers, ...)
|
|
299
|
+
prediction_id = response.json()["id"]
|
|
300
|
+
|
|
301
|
+
# Poll for completion
|
|
302
|
+
while elapsed_time < max_wait_time:
|
|
303
|
+
status_response = httpx.get(f"{url}/{prediction_id}", headers=headers, ...)
|
|
304
|
+
status_data = status_response.json()
|
|
305
|
+
|
|
306
|
+
if status_data["status"] == "succeeded":
|
|
307
|
+
return status_data["output"]
|
|
308
|
+
elif status_data["status"] == "failed":
|
|
309
|
+
raise AIError.model_error("Prediction failed")
|
|
310
|
+
|
|
311
|
+
time.sleep(wait_interval)
|
|
312
|
+
elapsed_time += wait_interval
|
|
313
|
+
|
|
314
|
+
raise AIError.timeout_error("Prediction timed out")
|
|
315
|
+
```
|
|
316
|
+
|
|
317
|
+
## Error Handling
|
|
318
|
+
|
|
319
|
+
All providers use the `@handle_provider_errors` decorator to normalize error handling:
|
|
320
|
+
|
|
321
|
+
```python
|
|
322
|
+
from gac.providers.error_handler import handle_provider_errors
|
|
323
|
+
|
|
324
|
+
@handle_provider_errors("My Provider")
|
|
325
|
+
def call_my_provider_api(...) -> str:
|
|
326
|
+
# Errors are automatically caught and converted to AIError types
|
|
327
|
+
pass
|
|
328
|
+
```
|
|
329
|
+
|
|
330
|
+
**Error Mapping:**
|
|
331
|
+
|
|
332
|
+
- HTTP 401 → `AIError.authentication_error()`
|
|
333
|
+
- HTTP 429 → `AIError.rate_limit_error()`
|
|
334
|
+
- Timeout → `AIError.timeout_error()`
|
|
335
|
+
- Connection error → `AIError.connection_error()`
|
|
336
|
+
- Other → `AIError.model_error()`
|
|
337
|
+
|
|
338
|
+
## Testing Providers
|
|
339
|
+
|
|
340
|
+
Each provider has comprehensive tests in `tests/providers/test_<provider>.py`.
|
|
341
|
+
|
|
342
|
+
### Test Structure
|
|
343
|
+
|
|
344
|
+
```python
|
|
345
|
+
class TestProviderImports:
|
|
346
|
+
"""Test imports."""
|
|
347
|
+
def test_import_provider(self): ...
|
|
348
|
+
|
|
349
|
+
class TestProviderMocked(BaseProviderTest):
|
|
350
|
+
"""Standard mocked tests (inherited from BaseProviderTest)."""
|
|
351
|
+
@property
|
|
352
|
+
def provider_name(self) -> str: return "my-provider"
|
|
353
|
+
|
|
354
|
+
@property
|
|
355
|
+
def provider_module(self) -> str: return "gac.providers.my_provider"
|
|
356
|
+
|
|
357
|
+
@property
|
|
358
|
+
def api_function(self): return call_my_provider_api
|
|
359
|
+
|
|
360
|
+
@property
|
|
361
|
+
def api_key_env_var(self) -> str: return "MY_PROVIDER_API_KEY"
|
|
362
|
+
|
|
363
|
+
class TestProviderEdgeCases:
|
|
364
|
+
"""Provider-specific edge cases."""
|
|
365
|
+
def test_custom_behavior(self): ...
|
|
366
|
+
|
|
367
|
+
@pytest.mark.integration
|
|
368
|
+
class TestProviderIntegration:
|
|
369
|
+
"""Real API tests (skipped by default)."""
|
|
370
|
+
def test_real_api_call(self): ...
|
|
371
|
+
```
|
|
372
|
+
|
|
373
|
+
## SSL Verification
|
|
374
|
+
|
|
375
|
+
All HTTP requests use GAC's SSL verification settings via `get_ssl_verify()`:
|
|
376
|
+
|
|
377
|
+
```python
|
|
378
|
+
from gac.utils import get_ssl_verify
|
|
379
|
+
|
|
380
|
+
response = httpx.post(url, ..., verify=get_ssl_verify())
|
|
381
|
+
```
|
|
382
|
+
|
|
383
|
+
This respects environment configurations for custom certificates.
|
|
384
|
+
|
|
385
|
+
## Timeout Configuration
|
|
386
|
+
|
|
387
|
+
All providers use `ProviderDefaults.HTTP_TIMEOUT` for consistency:
|
|
388
|
+
|
|
389
|
+
```python
|
|
390
|
+
from gac.constants import ProviderDefaults
|
|
391
|
+
|
|
392
|
+
config = ProviderConfig(
|
|
393
|
+
name="My Provider",
|
|
394
|
+
api_key_env="MY_KEY",
|
|
395
|
+
base_url="https://api.example.com",
|
|
396
|
+
timeout=ProviderDefaults.HTTP_TIMEOUT, # Default: 120 seconds
|
|
397
|
+
)
|
|
398
|
+
```
|
|
399
|
+
|
|
400
|
+
## Provider-Specific Documentation
|
|
401
|
+
|
|
402
|
+
See individual provider files for detailed documentation:
|
|
403
|
+
|
|
404
|
+
- `openai.py` - OpenAI API reference
|
|
405
|
+
- `anthropic.py` - Anthropic API reference
|
|
406
|
+
- `azure_openai.py` - Azure OpenAI configuration
|
|
407
|
+
- `gemini.py` - Google Gemini custom format
|
|
408
|
+
- `replicate.py` - Async prediction handling
|
|
409
|
+
- And others...
|
|
410
|
+
|
|
411
|
+
## Contributing
|
|
412
|
+
|
|
413
|
+
When adding a new provider:
|
|
414
|
+
|
|
415
|
+
1. Follow the architecture and patterns above
|
|
416
|
+
2. Write comprehensive tests (unit, mocked, integration)
|
|
417
|
+
3. Update `__init__.py` exports
|
|
418
|
+
4. Document the provider in its docstring
|
|
419
|
+
5. Run `mypy` for type checking: `uv run -- mypy src/gac`
|
|
420
|
+
6. Run tests: `uv run -- pytest tests/providers/test_<provider>.py -v`
|
|
421
|
+
7. Update this README if adding new patterns
|
|
422
|
+
|
|
423
|
+
## Best Practices
|
|
424
|
+
|
|
425
|
+
1. **Lazy Initialization**: Use getter functions to initialize providers at call time, not import time. This allows tests to mock environment variables.
|
|
426
|
+
|
|
427
|
+
2. **Error Preservation**: Always re-raise `AIError` exceptions without wrapping them. The error decorator handles generic exceptions.
|
|
428
|
+
|
|
429
|
+
3. **Optional Parameters**: Match superclass signatures exactly, especially for `_get_api_url(model: str | None = None)`.
|
|
430
|
+
|
|
431
|
+
4. **Response Validation**: Always validate responses for null/empty content before returning.
|
|
432
|
+
|
|
433
|
+
5. **Configuration Over Code**: Use environment variables and `ProviderConfig` rather than hardcoding values.
|
|
434
|
+
|
|
435
|
+
6. **Documentation**: Include docstrings with API endpoint references and required environment variables.
|
|
436
|
+
|
|
437
|
+
7. **Test Coverage**: Aim for 100% test coverage of provider logic.
|
|
@@ -0,0 +1,80 @@
|
|
|
1
|
+
"""AI provider implementations for commit message generation.
|
|
2
|
+
|
|
3
|
+
This module provides a unified interface to all AI providers. Provider classes
|
|
4
|
+
are registered and wrapper functions are auto-generated with error handling.
|
|
5
|
+
|
|
6
|
+
Usage:
|
|
7
|
+
from gac.providers import PROVIDER_REGISTRY
|
|
8
|
+
|
|
9
|
+
# Get the function for a provider
|
|
10
|
+
func = PROVIDER_REGISTRY["openai"]
|
|
11
|
+
result = func(model="gpt-4", messages=[...], temperature=0.7, max_tokens=1000)
|
|
12
|
+
"""
|
|
13
|
+
|
|
14
|
+
# Import provider classes for registration
|
|
15
|
+
from .anthropic import AnthropicProvider
|
|
16
|
+
from .azure_openai import AzureOpenAIProvider
|
|
17
|
+
from .cerebras import CerebrasProvider
|
|
18
|
+
from .chutes import ChutesProvider
|
|
19
|
+
from .claude_code import ClaudeCodeProvider
|
|
20
|
+
from .custom_anthropic import CustomAnthropicProvider
|
|
21
|
+
from .custom_openai import CustomOpenAIProvider
|
|
22
|
+
from .deepseek import DeepSeekProvider
|
|
23
|
+
from .fireworks import FireworksProvider
|
|
24
|
+
from .gemini import GeminiProvider
|
|
25
|
+
from .groq import GroqProvider
|
|
26
|
+
from .kimi_coding import KimiCodingProvider
|
|
27
|
+
from .lmstudio import LMStudioProvider
|
|
28
|
+
from .minimax import MinimaxProvider
|
|
29
|
+
from .mistral import MistralProvider
|
|
30
|
+
from .moonshot import MoonshotProvider
|
|
31
|
+
from .ollama import OllamaProvider
|
|
32
|
+
from .openai import OpenAIProvider
|
|
33
|
+
from .openrouter import OpenRouterProvider
|
|
34
|
+
from .qwen import QwenProvider
|
|
35
|
+
from .registry import (
|
|
36
|
+
PROVIDER_REGISTRY,
|
|
37
|
+
register_provider,
|
|
38
|
+
)
|
|
39
|
+
from .replicate import ReplicateProvider
|
|
40
|
+
from .streamlake import StreamlakeProvider
|
|
41
|
+
from .synthetic import SyntheticProvider
|
|
42
|
+
from .together import TogetherProvider
|
|
43
|
+
from .zai import ZAICodingProvider, ZAIProvider
|
|
44
|
+
|
|
45
|
+
# Register all providers - this populates PROVIDER_REGISTRY automatically
|
|
46
|
+
register_provider("anthropic", AnthropicProvider)
|
|
47
|
+
register_provider("azure-openai", AzureOpenAIProvider)
|
|
48
|
+
register_provider("cerebras", CerebrasProvider)
|
|
49
|
+
register_provider("chutes", ChutesProvider)
|
|
50
|
+
register_provider("claude-code", ClaudeCodeProvider)
|
|
51
|
+
register_provider("custom-anthropic", CustomAnthropicProvider)
|
|
52
|
+
register_provider("custom-openai", CustomOpenAIProvider)
|
|
53
|
+
register_provider("deepseek", DeepSeekProvider)
|
|
54
|
+
register_provider("fireworks", FireworksProvider)
|
|
55
|
+
register_provider("gemini", GeminiProvider)
|
|
56
|
+
register_provider("groq", GroqProvider)
|
|
57
|
+
register_provider("kimi-coding", KimiCodingProvider)
|
|
58
|
+
register_provider("lm-studio", LMStudioProvider)
|
|
59
|
+
register_provider("minimax", MinimaxProvider)
|
|
60
|
+
register_provider("mistral", MistralProvider)
|
|
61
|
+
register_provider("moonshot", MoonshotProvider)
|
|
62
|
+
register_provider("ollama", OllamaProvider)
|
|
63
|
+
register_provider("openai", OpenAIProvider)
|
|
64
|
+
register_provider("openrouter", OpenRouterProvider)
|
|
65
|
+
register_provider("qwen", QwenProvider)
|
|
66
|
+
register_provider("replicate", ReplicateProvider)
|
|
67
|
+
register_provider("streamlake", StreamlakeProvider)
|
|
68
|
+
register_provider("synthetic", SyntheticProvider)
|
|
69
|
+
register_provider("together", TogetherProvider)
|
|
70
|
+
register_provider("zai", ZAIProvider)
|
|
71
|
+
register_provider("zai-coding", ZAICodingProvider)
|
|
72
|
+
|
|
73
|
+
# List of supported provider names - derived from registry keys
|
|
74
|
+
SUPPORTED_PROVIDERS = sorted(PROVIDER_REGISTRY.keys())
|
|
75
|
+
|
|
76
|
+
__all__ = [
|
|
77
|
+
"PROVIDER_REGISTRY",
|
|
78
|
+
"SUPPORTED_PROVIDERS",
|
|
79
|
+
"register_provider",
|
|
80
|
+
]
|
|
@@ -0,0 +1,17 @@
|
|
|
1
|
+
"""Anthropic AI provider for gac."""
|
|
2
|
+
|
|
3
|
+
from gac.providers.base import AnthropicCompatibleProvider, ProviderConfig
|
|
4
|
+
|
|
5
|
+
|
|
6
|
+
class AnthropicProvider(AnthropicCompatibleProvider):
|
|
7
|
+
"""Anthropic Claude API provider."""
|
|
8
|
+
|
|
9
|
+
config = ProviderConfig(
|
|
10
|
+
name="Anthropic",
|
|
11
|
+
api_key_env="ANTHROPIC_API_KEY",
|
|
12
|
+
base_url="https://api.anthropic.com/v1",
|
|
13
|
+
)
|
|
14
|
+
|
|
15
|
+
def _get_api_url(self, model: str | None = None) -> str:
|
|
16
|
+
"""Get Anthropic API URL with /messages endpoint."""
|
|
17
|
+
return f"{self.config.base_url}/messages"
|
|
@@ -0,0 +1,57 @@
|
|
|
1
|
+
"""Azure OpenAI provider for gac.
|
|
2
|
+
|
|
3
|
+
This provider provides native support for Azure OpenAI Service with proper
|
|
4
|
+
endpoint construction and API version handling.
|
|
5
|
+
"""
|
|
6
|
+
|
|
7
|
+
import os
|
|
8
|
+
from typing import Any
|
|
9
|
+
|
|
10
|
+
from gac.errors import AIError
|
|
11
|
+
from gac.providers.base import OpenAICompatibleProvider, ProviderConfig
|
|
12
|
+
|
|
13
|
+
|
|
14
|
+
class AzureOpenAIProvider(OpenAICompatibleProvider):
|
|
15
|
+
"""Azure OpenAI-compatible provider with custom URL construction and headers."""
|
|
16
|
+
|
|
17
|
+
config = ProviderConfig(
|
|
18
|
+
name="Azure OpenAI",
|
|
19
|
+
api_key_env="AZURE_OPENAI_API_KEY",
|
|
20
|
+
base_url="", # Will be set in __init__
|
|
21
|
+
)
|
|
22
|
+
|
|
23
|
+
def __init__(self, config: ProviderConfig):
|
|
24
|
+
"""Initialize with Azure-specific endpoint and API version."""
|
|
25
|
+
endpoint = os.getenv("AZURE_OPENAI_ENDPOINT")
|
|
26
|
+
if not endpoint:
|
|
27
|
+
raise AIError.model_error("AZURE_OPENAI_ENDPOINT environment variable not set")
|
|
28
|
+
|
|
29
|
+
api_version = os.getenv("AZURE_OPENAI_API_VERSION")
|
|
30
|
+
if not api_version:
|
|
31
|
+
raise AIError.model_error("AZURE_OPENAI_API_VERSION environment variable not set")
|
|
32
|
+
|
|
33
|
+
self.api_version = api_version
|
|
34
|
+
self.endpoint = endpoint.rstrip("/")
|
|
35
|
+
config.base_url = "" # Will be set dynamically in _get_api_url
|
|
36
|
+
super().__init__(config)
|
|
37
|
+
|
|
38
|
+
def _get_api_url(self, model: str | None = None) -> str:
|
|
39
|
+
"""Build Azure-specific URL with deployment name and API version."""
|
|
40
|
+
if model is None:
|
|
41
|
+
return super()._get_api_url(model)
|
|
42
|
+
return f"{self.endpoint}/openai/deployments/{model}/chat/completions?api-version={self.api_version}"
|
|
43
|
+
|
|
44
|
+
def _build_headers(self) -> dict[str, str]:
|
|
45
|
+
"""Build headers with api-key instead of Bearer token."""
|
|
46
|
+
headers = super()._build_headers()
|
|
47
|
+
# Replace Bearer token with api-key
|
|
48
|
+
if "Authorization" in headers:
|
|
49
|
+
del headers["Authorization"]
|
|
50
|
+
headers["api-key"] = self.api_key
|
|
51
|
+
return headers
|
|
52
|
+
|
|
53
|
+
def _build_request_body(
|
|
54
|
+
self, messages: list[dict[str, Any]], temperature: float, max_tokens: int, model: str, **kwargs: Any
|
|
55
|
+
) -> dict[str, Any]:
|
|
56
|
+
"""Build request body for Azure OpenAI."""
|
|
57
|
+
return {"messages": messages, "temperature": temperature, "max_tokens": max_tokens, **kwargs}
|