gac 3.10.3__py3-none-any.whl
This diff represents the content of publicly available package versions that have been released to one of the supported registries. The information contained in this diff is provided for informational purposes only and reflects changes between package versions as they appear in their respective public registries.
Potentially problematic release.
This version of gac might be problematic. Click here for more details.
- gac/__init__.py +15 -0
- gac/__version__.py +3 -0
- gac/ai.py +109 -0
- gac/ai_utils.py +246 -0
- gac/auth_cli.py +214 -0
- gac/cli.py +218 -0
- gac/commit_executor.py +62 -0
- gac/config.py +125 -0
- gac/config_cli.py +95 -0
- gac/constants.py +328 -0
- gac/diff_cli.py +159 -0
- gac/errors.py +231 -0
- gac/git.py +372 -0
- gac/git_state_validator.py +184 -0
- gac/grouped_commit_workflow.py +423 -0
- gac/init_cli.py +70 -0
- gac/interactive_mode.py +182 -0
- gac/language_cli.py +377 -0
- gac/main.py +476 -0
- gac/model_cli.py +430 -0
- gac/oauth/__init__.py +27 -0
- gac/oauth/claude_code.py +464 -0
- gac/oauth/qwen_oauth.py +327 -0
- gac/oauth/token_store.py +81 -0
- gac/preprocess.py +511 -0
- gac/prompt.py +878 -0
- gac/prompt_builder.py +88 -0
- gac/providers/README.md +437 -0
- gac/providers/__init__.py +80 -0
- gac/providers/anthropic.py +17 -0
- gac/providers/azure_openai.py +57 -0
- gac/providers/base.py +329 -0
- gac/providers/cerebras.py +15 -0
- gac/providers/chutes.py +25 -0
- gac/providers/claude_code.py +79 -0
- gac/providers/custom_anthropic.py +103 -0
- gac/providers/custom_openai.py +44 -0
- gac/providers/deepseek.py +15 -0
- gac/providers/error_handler.py +139 -0
- gac/providers/fireworks.py +15 -0
- gac/providers/gemini.py +90 -0
- gac/providers/groq.py +15 -0
- gac/providers/kimi_coding.py +27 -0
- gac/providers/lmstudio.py +80 -0
- gac/providers/minimax.py +15 -0
- gac/providers/mistral.py +15 -0
- gac/providers/moonshot.py +15 -0
- gac/providers/ollama.py +73 -0
- gac/providers/openai.py +32 -0
- gac/providers/openrouter.py +21 -0
- gac/providers/protocol.py +71 -0
- gac/providers/qwen.py +64 -0
- gac/providers/registry.py +58 -0
- gac/providers/replicate.py +156 -0
- gac/providers/streamlake.py +31 -0
- gac/providers/synthetic.py +40 -0
- gac/providers/together.py +15 -0
- gac/providers/zai.py +31 -0
- gac/py.typed +0 -0
- gac/security.py +293 -0
- gac/utils.py +401 -0
- gac/workflow_utils.py +217 -0
- gac-3.10.3.dist-info/METADATA +283 -0
- gac-3.10.3.dist-info/RECORD +67 -0
- gac-3.10.3.dist-info/WHEEL +4 -0
- gac-3.10.3.dist-info/entry_points.txt +2 -0
- gac-3.10.3.dist-info/licenses/LICENSE +16 -0
gac/providers/base.py
ADDED
|
@@ -0,0 +1,329 @@
|
|
|
1
|
+
"""Base configured provider class to eliminate code duplication."""
|
|
2
|
+
|
|
3
|
+
import logging
|
|
4
|
+
import os
|
|
5
|
+
from abc import ABC, abstractmethod
|
|
6
|
+
from dataclasses import dataclass
|
|
7
|
+
from typing import Any
|
|
8
|
+
|
|
9
|
+
import httpx
|
|
10
|
+
|
|
11
|
+
from gac.constants import ProviderDefaults
|
|
12
|
+
from gac.errors import AIError
|
|
13
|
+
from gac.providers.protocol import ProviderProtocol
|
|
14
|
+
from gac.utils import get_ssl_verify
|
|
15
|
+
|
|
16
|
+
logger = logging.getLogger(__name__)
|
|
17
|
+
|
|
18
|
+
|
|
19
|
+
@dataclass
|
|
20
|
+
class ProviderConfig:
|
|
21
|
+
"""Configuration for AI providers."""
|
|
22
|
+
|
|
23
|
+
name: str
|
|
24
|
+
api_key_env: str
|
|
25
|
+
base_url: str
|
|
26
|
+
timeout: int = ProviderDefaults.HTTP_TIMEOUT
|
|
27
|
+
headers: dict[str, str] | None = None
|
|
28
|
+
|
|
29
|
+
def __post_init__(self) -> None:
|
|
30
|
+
"""Initialize default headers if not provided."""
|
|
31
|
+
if self.headers is None:
|
|
32
|
+
self.headers = {"Content-Type": "application/json"}
|
|
33
|
+
|
|
34
|
+
|
|
35
|
+
class BaseConfiguredProvider(ABC, ProviderProtocol):
|
|
36
|
+
"""Base class for configured AI providers.
|
|
37
|
+
|
|
38
|
+
This class eliminates code duplication by providing:
|
|
39
|
+
- Standardized HTTP handling with httpx
|
|
40
|
+
- Common error handling patterns
|
|
41
|
+
- Flexible configuration via ProviderConfig
|
|
42
|
+
- Template methods for customization
|
|
43
|
+
|
|
44
|
+
Implements ProviderProtocol for type safety.
|
|
45
|
+
"""
|
|
46
|
+
|
|
47
|
+
def __init__(self, config: ProviderConfig):
|
|
48
|
+
self.config = config
|
|
49
|
+
self._api_key: str | None = None # Lazy load
|
|
50
|
+
|
|
51
|
+
@property
|
|
52
|
+
def api_key(self) -> str:
|
|
53
|
+
"""Lazy-load API key when needed."""
|
|
54
|
+
if self.config.api_key_env:
|
|
55
|
+
# Always check environment for fresh value to support test isolation
|
|
56
|
+
return self._get_api_key()
|
|
57
|
+
return ""
|
|
58
|
+
|
|
59
|
+
@property
|
|
60
|
+
def name(self) -> str:
|
|
61
|
+
"""Get the provider name."""
|
|
62
|
+
return self.config.name
|
|
63
|
+
|
|
64
|
+
@property
|
|
65
|
+
def api_key_env(self) -> str:
|
|
66
|
+
"""Get the environment variable name for the API key."""
|
|
67
|
+
return self.config.api_key_env
|
|
68
|
+
|
|
69
|
+
@property
|
|
70
|
+
def base_url(self) -> str:
|
|
71
|
+
"""Get the base URL for the API."""
|
|
72
|
+
return self.config.base_url
|
|
73
|
+
|
|
74
|
+
@property
|
|
75
|
+
def timeout(self) -> int:
|
|
76
|
+
"""Get the timeout in seconds."""
|
|
77
|
+
return self.config.timeout
|
|
78
|
+
|
|
79
|
+
def _get_api_key(self) -> str:
|
|
80
|
+
"""Get API key from environment variables."""
|
|
81
|
+
api_key = os.getenv(self.config.api_key_env)
|
|
82
|
+
if not api_key:
|
|
83
|
+
raise AIError.authentication_error(f"{self.config.api_key_env} not found in environment variables")
|
|
84
|
+
return api_key
|
|
85
|
+
|
|
86
|
+
@abstractmethod
|
|
87
|
+
def _build_request_body(
|
|
88
|
+
self, messages: list[dict[str, Any]], temperature: float, max_tokens: int, model: str, **kwargs: Any
|
|
89
|
+
) -> dict[str, Any]:
|
|
90
|
+
"""Build the request body for the API call.
|
|
91
|
+
|
|
92
|
+
Args:
|
|
93
|
+
messages: List of message dictionaries
|
|
94
|
+
temperature: Temperature parameter
|
|
95
|
+
max_tokens: Maximum tokens in response
|
|
96
|
+
**kwargs: Additional provider-specific parameters
|
|
97
|
+
|
|
98
|
+
Returns:
|
|
99
|
+
Request body dictionary
|
|
100
|
+
"""
|
|
101
|
+
pass
|
|
102
|
+
|
|
103
|
+
@abstractmethod
|
|
104
|
+
def _parse_response(self, response: dict[str, Any]) -> str:
|
|
105
|
+
"""Parse the API response and extract content.
|
|
106
|
+
|
|
107
|
+
Args:
|
|
108
|
+
response: Response dictionary from API
|
|
109
|
+
|
|
110
|
+
Returns:
|
|
111
|
+
Generated text content
|
|
112
|
+
"""
|
|
113
|
+
pass
|
|
114
|
+
|
|
115
|
+
def _build_headers(self) -> dict[str, str]:
|
|
116
|
+
"""Build headers for the API request.
|
|
117
|
+
|
|
118
|
+
Can be overridden by subclasses to add provider-specific headers.
|
|
119
|
+
"""
|
|
120
|
+
headers = self.config.headers.copy() if self.config.headers else {}
|
|
121
|
+
return headers
|
|
122
|
+
|
|
123
|
+
def _get_api_url(self, model: str | None = None) -> str:
|
|
124
|
+
"""Get the API URL for the request.
|
|
125
|
+
|
|
126
|
+
Can be overridden by subclasses for dynamic URLs.
|
|
127
|
+
|
|
128
|
+
Args:
|
|
129
|
+
model: Model name (for providers that need model-specific URLs)
|
|
130
|
+
|
|
131
|
+
Returns:
|
|
132
|
+
API URL string
|
|
133
|
+
"""
|
|
134
|
+
return self.config.base_url
|
|
135
|
+
|
|
136
|
+
def _make_http_request(self, url: str, body: dict[str, Any], headers: dict[str, str]) -> dict[str, Any]:
|
|
137
|
+
"""Make the HTTP request.
|
|
138
|
+
|
|
139
|
+
Error handling is delegated to the @handle_provider_errors decorator
|
|
140
|
+
which wraps the provider's API function. This avoids duplicate exception
|
|
141
|
+
handling and ensures consistent error classification across all providers.
|
|
142
|
+
|
|
143
|
+
Args:
|
|
144
|
+
url: API URL
|
|
145
|
+
body: Request body
|
|
146
|
+
headers: Request headers
|
|
147
|
+
|
|
148
|
+
Returns:
|
|
149
|
+
Response JSON dictionary
|
|
150
|
+
|
|
151
|
+
Raises:
|
|
152
|
+
httpx.HTTPStatusError: For HTTP errors (handled by decorator)
|
|
153
|
+
httpx.TimeoutException: For timeout errors (handled by decorator)
|
|
154
|
+
httpx.RequestError: For network errors (handled by decorator)
|
|
155
|
+
"""
|
|
156
|
+
response = httpx.post(url, json=body, headers=headers, timeout=self.config.timeout, verify=get_ssl_verify())
|
|
157
|
+
response.raise_for_status()
|
|
158
|
+
return response.json()
|
|
159
|
+
|
|
160
|
+
def generate(
|
|
161
|
+
self,
|
|
162
|
+
model: str,
|
|
163
|
+
messages: list[dict[str, Any]],
|
|
164
|
+
temperature: float = 0.7,
|
|
165
|
+
max_tokens: int = 1024,
|
|
166
|
+
**kwargs: Any,
|
|
167
|
+
) -> str:
|
|
168
|
+
"""Generate text using the AI provider.
|
|
169
|
+
|
|
170
|
+
Error handling is delegated to the @handle_provider_errors decorator
|
|
171
|
+
which wraps the provider's API function. This ensures consistent error
|
|
172
|
+
classification across all providers.
|
|
173
|
+
|
|
174
|
+
Args:
|
|
175
|
+
model: Model name to use
|
|
176
|
+
messages: List of message dictionaries
|
|
177
|
+
temperature: Temperature parameter (0.0-2.0)
|
|
178
|
+
max_tokens: Maximum tokens in response
|
|
179
|
+
**kwargs: Additional provider-specific parameters
|
|
180
|
+
|
|
181
|
+
Returns:
|
|
182
|
+
Generated text content
|
|
183
|
+
|
|
184
|
+
Raises:
|
|
185
|
+
AIError: For any API-related errors (via decorator)
|
|
186
|
+
"""
|
|
187
|
+
logger.debug(f"Generating with {self.config.name} provider (model={model})")
|
|
188
|
+
|
|
189
|
+
# Build request components
|
|
190
|
+
url = self._get_api_url(model)
|
|
191
|
+
headers = self._build_headers()
|
|
192
|
+
body = self._build_request_body(messages, temperature, max_tokens, model, **kwargs)
|
|
193
|
+
|
|
194
|
+
# Add model to body if not already present
|
|
195
|
+
if "model" not in body:
|
|
196
|
+
body["model"] = model
|
|
197
|
+
|
|
198
|
+
# Make HTTP request
|
|
199
|
+
response_data = self._make_http_request(url, body, headers)
|
|
200
|
+
|
|
201
|
+
# Parse response
|
|
202
|
+
return self._parse_response(response_data)
|
|
203
|
+
|
|
204
|
+
|
|
205
|
+
class OpenAICompatibleProvider(BaseConfiguredProvider):
|
|
206
|
+
"""Base class for OpenAI-compatible providers.
|
|
207
|
+
|
|
208
|
+
Handles standard OpenAI API format with minimal customization needed.
|
|
209
|
+
"""
|
|
210
|
+
|
|
211
|
+
def _build_request_body(
|
|
212
|
+
self, messages: list[dict[str, Any]], temperature: float, max_tokens: int, model: str, **kwargs: Any
|
|
213
|
+
) -> dict[str, Any]:
|
|
214
|
+
"""Build OpenAI-style request body.
|
|
215
|
+
|
|
216
|
+
Note: Subclasses should override this if they need max_completion_tokens
|
|
217
|
+
instead of max_tokens (like OpenAI provider does).
|
|
218
|
+
"""
|
|
219
|
+
return {"messages": messages, "temperature": temperature, "max_tokens": max_tokens, **kwargs}
|
|
220
|
+
|
|
221
|
+
def _build_headers(self) -> dict[str, str]:
|
|
222
|
+
"""Build headers with OpenAI-style authorization."""
|
|
223
|
+
headers = super()._build_headers()
|
|
224
|
+
if self.api_key:
|
|
225
|
+
headers["Authorization"] = f"Bearer {self.api_key}"
|
|
226
|
+
return headers
|
|
227
|
+
|
|
228
|
+
def _parse_response(self, response: dict[str, Any]) -> str:
|
|
229
|
+
"""Parse OpenAI-style response."""
|
|
230
|
+
choices = response.get("choices")
|
|
231
|
+
if not choices or not isinstance(choices, list):
|
|
232
|
+
raise AIError.model_error("Invalid response: missing choices")
|
|
233
|
+
content = choices[0].get("message", {}).get("content")
|
|
234
|
+
if content is None:
|
|
235
|
+
raise AIError.model_error("Invalid response: null content")
|
|
236
|
+
if content == "":
|
|
237
|
+
raise AIError.model_error("Invalid response: empty content")
|
|
238
|
+
return content
|
|
239
|
+
|
|
240
|
+
|
|
241
|
+
class AnthropicCompatibleProvider(BaseConfiguredProvider):
|
|
242
|
+
"""Base class for Anthropic-compatible providers."""
|
|
243
|
+
|
|
244
|
+
def _build_headers(self) -> dict[str, str]:
|
|
245
|
+
"""Build headers with Anthropic-style authorization."""
|
|
246
|
+
headers = super()._build_headers()
|
|
247
|
+
api_key = self._get_api_key()
|
|
248
|
+
headers["x-api-key"] = api_key
|
|
249
|
+
headers["anthropic-version"] = "2023-06-01"
|
|
250
|
+
return headers
|
|
251
|
+
|
|
252
|
+
def _build_request_body(
|
|
253
|
+
self, messages: list[dict[str, Any]], temperature: float, max_tokens: int, model: str, **kwargs: Any
|
|
254
|
+
) -> dict[str, Any]:
|
|
255
|
+
"""Build Anthropic-style request body."""
|
|
256
|
+
# Convert messages to Anthropic format
|
|
257
|
+
anthropic_messages = []
|
|
258
|
+
system_message = ""
|
|
259
|
+
|
|
260
|
+
for msg in messages:
|
|
261
|
+
if msg["role"] == "system":
|
|
262
|
+
system_message = msg["content"]
|
|
263
|
+
else:
|
|
264
|
+
anthropic_messages.append({"role": msg["role"], "content": msg["content"]})
|
|
265
|
+
|
|
266
|
+
body = {"messages": anthropic_messages, "temperature": temperature, "max_tokens": max_tokens, **kwargs}
|
|
267
|
+
|
|
268
|
+
if system_message:
|
|
269
|
+
body["system"] = system_message
|
|
270
|
+
|
|
271
|
+
return body
|
|
272
|
+
|
|
273
|
+
def _parse_response(self, response: dict[str, Any]) -> str:
|
|
274
|
+
"""Parse Anthropic-style response."""
|
|
275
|
+
content = response.get("content")
|
|
276
|
+
if not content or not isinstance(content, list):
|
|
277
|
+
raise AIError.model_error("Invalid response: missing content")
|
|
278
|
+
|
|
279
|
+
text_content = content[0].get("text")
|
|
280
|
+
if text_content is None:
|
|
281
|
+
raise AIError.model_error("Invalid response: null content")
|
|
282
|
+
if text_content == "":
|
|
283
|
+
raise AIError.model_error("Invalid response: empty content")
|
|
284
|
+
return text_content
|
|
285
|
+
|
|
286
|
+
|
|
287
|
+
class GenericHTTPProvider(BaseConfiguredProvider):
|
|
288
|
+
"""Base class for completely custom providers."""
|
|
289
|
+
|
|
290
|
+
def _build_request_body(
|
|
291
|
+
self, messages: list[dict[str, Any]], temperature: float, max_tokens: int, model: str, **kwargs: Any
|
|
292
|
+
) -> dict[str, Any]:
|
|
293
|
+
"""Default implementation - override this in subclasses."""
|
|
294
|
+
return {"messages": messages, "temperature": temperature, "max_tokens": max_tokens, **kwargs}
|
|
295
|
+
|
|
296
|
+
def _parse_response(self, response: dict[str, Any]) -> str:
|
|
297
|
+
"""Default implementation - override this in subclasses."""
|
|
298
|
+
# Try OpenAI-style first
|
|
299
|
+
choices = response.get("choices")
|
|
300
|
+
if choices and isinstance(choices, list):
|
|
301
|
+
content = choices[0].get("message", {}).get("content")
|
|
302
|
+
if content:
|
|
303
|
+
return content
|
|
304
|
+
|
|
305
|
+
# Try Anthropic-style
|
|
306
|
+
content = response.get("content")
|
|
307
|
+
if content and isinstance(content, list):
|
|
308
|
+
return content[0].get("text", "")
|
|
309
|
+
|
|
310
|
+
# Try Ollama-style
|
|
311
|
+
message = response.get("message", {})
|
|
312
|
+
if "content" in message:
|
|
313
|
+
return message["content"]
|
|
314
|
+
|
|
315
|
+
# Fallback - try to find any string content
|
|
316
|
+
for value in response.values():
|
|
317
|
+
if isinstance(value, str) and len(value) > 10: # Assume longer strings are content
|
|
318
|
+
return value
|
|
319
|
+
|
|
320
|
+
raise AIError.model_error("Could not extract content from response")
|
|
321
|
+
|
|
322
|
+
|
|
323
|
+
__all__ = [
|
|
324
|
+
"AnthropicCompatibleProvider",
|
|
325
|
+
"BaseConfiguredProvider",
|
|
326
|
+
"GenericHTTPProvider",
|
|
327
|
+
"OpenAICompatibleProvider",
|
|
328
|
+
"ProviderConfig",
|
|
329
|
+
]
|
|
@@ -0,0 +1,15 @@
|
|
|
1
|
+
"""Cerebras AI provider implementation."""
|
|
2
|
+
|
|
3
|
+
from gac.providers.base import OpenAICompatibleProvider, ProviderConfig
|
|
4
|
+
|
|
5
|
+
|
|
6
|
+
class CerebrasProvider(OpenAICompatibleProvider):
|
|
7
|
+
config = ProviderConfig(
|
|
8
|
+
name="Cerebras",
|
|
9
|
+
api_key_env="CEREBRAS_API_KEY",
|
|
10
|
+
base_url="https://api.cerebras.ai/v1",
|
|
11
|
+
)
|
|
12
|
+
|
|
13
|
+
def _get_api_url(self, model: str | None = None) -> str:
|
|
14
|
+
"""Get Cerebras API URL with /chat/completions endpoint."""
|
|
15
|
+
return f"{self.config.base_url}/chat/completions"
|
gac/providers/chutes.py
ADDED
|
@@ -0,0 +1,25 @@
|
|
|
1
|
+
"""Chutes.ai API provider for gac."""
|
|
2
|
+
|
|
3
|
+
import os
|
|
4
|
+
|
|
5
|
+
from gac.providers.base import OpenAICompatibleProvider, ProviderConfig
|
|
6
|
+
|
|
7
|
+
|
|
8
|
+
class ChutesProvider(OpenAICompatibleProvider):
|
|
9
|
+
"""Chutes.ai OpenAI-compatible provider with custom base URL."""
|
|
10
|
+
|
|
11
|
+
config = ProviderConfig(
|
|
12
|
+
name="Chutes",
|
|
13
|
+
api_key_env="CHUTES_API_KEY",
|
|
14
|
+
base_url="", # Will be set in __init__
|
|
15
|
+
)
|
|
16
|
+
|
|
17
|
+
def __init__(self, config: ProviderConfig):
|
|
18
|
+
"""Initialize with base URL from environment or default."""
|
|
19
|
+
base_url = os.getenv("CHUTES_BASE_URL", "https://llm.chutes.ai")
|
|
20
|
+
config.base_url = f"{base_url.rstrip('/')}/v1"
|
|
21
|
+
super().__init__(config)
|
|
22
|
+
|
|
23
|
+
def _get_api_url(self, model: str | None = None) -> str:
|
|
24
|
+
"""Get Chutes API URL with /chat/completions endpoint."""
|
|
25
|
+
return f"{self.config.base_url}/chat/completions"
|
|
@@ -0,0 +1,79 @@
|
|
|
1
|
+
"""Claude Code API provider for gac.
|
|
2
|
+
|
|
3
|
+
This provider allows users with Claude Code subscriptions to use their OAuth tokens
|
|
4
|
+
instead of paying for the expensive Anthropic API.
|
|
5
|
+
"""
|
|
6
|
+
|
|
7
|
+
from typing import Any
|
|
8
|
+
|
|
9
|
+
from gac.errors import AIError
|
|
10
|
+
from gac.oauth.claude_code import load_stored_token
|
|
11
|
+
from gac.providers.base import AnthropicCompatibleProvider, ProviderConfig
|
|
12
|
+
|
|
13
|
+
|
|
14
|
+
class ClaudeCodeProvider(AnthropicCompatibleProvider):
|
|
15
|
+
"""Claude Code OAuth provider with special system message requirements."""
|
|
16
|
+
|
|
17
|
+
config = ProviderConfig(
|
|
18
|
+
name="Claude Code",
|
|
19
|
+
api_key_env="CLAUDE_CODE_ACCESS_TOKEN",
|
|
20
|
+
base_url="https://api.anthropic.com/v1/messages",
|
|
21
|
+
)
|
|
22
|
+
|
|
23
|
+
def _get_api_key(self) -> str:
|
|
24
|
+
"""Get OAuth token from token store."""
|
|
25
|
+
token = load_stored_token()
|
|
26
|
+
if token:
|
|
27
|
+
return token
|
|
28
|
+
|
|
29
|
+
raise AIError.authentication_error(
|
|
30
|
+
"Claude Code authentication not found. Run 'gac auth claude-code login' to authenticate."
|
|
31
|
+
)
|
|
32
|
+
|
|
33
|
+
def _build_headers(self) -> dict[str, str]:
|
|
34
|
+
"""Build headers with OAuth token and special anthropic-beta."""
|
|
35
|
+
headers = super()._build_headers()
|
|
36
|
+
# Replace x-api-key with Bearer token
|
|
37
|
+
if "x-api-key" in headers:
|
|
38
|
+
del headers["x-api-key"]
|
|
39
|
+
headers["Authorization"] = f"Bearer {self.api_key}"
|
|
40
|
+
# Add special OAuth beta header
|
|
41
|
+
headers["anthropic-beta"] = "oauth-2025-04-20"
|
|
42
|
+
return headers
|
|
43
|
+
|
|
44
|
+
def _build_request_body(
|
|
45
|
+
self, messages: list[dict[str, Any]], temperature: float, max_tokens: int, model: str, **kwargs: Any
|
|
46
|
+
) -> dict[str, Any]:
|
|
47
|
+
"""Build Anthropic-style request with fixed system message.
|
|
48
|
+
|
|
49
|
+
IMPORTANT: Claude Code OAuth tokens require the system message to be EXACTLY
|
|
50
|
+
"You are Claude Code, Anthropic's official CLI for Claude." with NO additional content.
|
|
51
|
+
Any other instructions must be moved to the first user message.
|
|
52
|
+
"""
|
|
53
|
+
# Extract and process messages
|
|
54
|
+
anthropic_messages = []
|
|
55
|
+
system_instructions = ""
|
|
56
|
+
|
|
57
|
+
for msg in messages:
|
|
58
|
+
if msg["role"] == "system":
|
|
59
|
+
system_instructions = msg["content"]
|
|
60
|
+
else:
|
|
61
|
+
anthropic_messages.append({"role": msg["role"], "content": msg["content"]})
|
|
62
|
+
|
|
63
|
+
# Move any system instructions into the first user message
|
|
64
|
+
if system_instructions and anthropic_messages:
|
|
65
|
+
first_user_msg = anthropic_messages[0]
|
|
66
|
+
first_user_msg["content"] = f"{system_instructions}\n\n{first_user_msg['content']}"
|
|
67
|
+
|
|
68
|
+
# Claude Code requires this exact system message
|
|
69
|
+
system_message = "You are Claude Code, Anthropic's official CLI for Claude."
|
|
70
|
+
|
|
71
|
+
body = {
|
|
72
|
+
"messages": anthropic_messages,
|
|
73
|
+
"temperature": temperature,
|
|
74
|
+
"max_tokens": max_tokens,
|
|
75
|
+
"system": system_message,
|
|
76
|
+
**kwargs,
|
|
77
|
+
}
|
|
78
|
+
|
|
79
|
+
return body
|
|
@@ -0,0 +1,103 @@
|
|
|
1
|
+
"""Custom Anthropic-compatible API provider for gac.
|
|
2
|
+
|
|
3
|
+
This provider allows users to specify a custom Anthropic-compatible endpoint
|
|
4
|
+
while using the same model capabilities as the standard Anthropic provider.
|
|
5
|
+
"""
|
|
6
|
+
|
|
7
|
+
import json
|
|
8
|
+
import logging
|
|
9
|
+
import os
|
|
10
|
+
from typing import Any
|
|
11
|
+
|
|
12
|
+
from gac.errors import AIError
|
|
13
|
+
from gac.providers.base import AnthropicCompatibleProvider, ProviderConfig
|
|
14
|
+
|
|
15
|
+
logger = logging.getLogger(__name__)
|
|
16
|
+
|
|
17
|
+
|
|
18
|
+
class CustomAnthropicProvider(AnthropicCompatibleProvider):
|
|
19
|
+
"""Custom Anthropic-compatible provider with configurable endpoint and version."""
|
|
20
|
+
|
|
21
|
+
config = ProviderConfig(
|
|
22
|
+
name="Custom Anthropic",
|
|
23
|
+
api_key_env="CUSTOM_ANTHROPIC_API_KEY",
|
|
24
|
+
base_url="", # Will be set in __init__ from environment
|
|
25
|
+
)
|
|
26
|
+
|
|
27
|
+
def __init__(self, config: ProviderConfig):
|
|
28
|
+
"""Initialize the provider with custom configuration from environment variables.
|
|
29
|
+
|
|
30
|
+
Environment variables:
|
|
31
|
+
CUSTOM_ANTHROPIC_API_KEY: API key for authentication (required)
|
|
32
|
+
CUSTOM_ANTHROPIC_BASE_URL: Base URL for the API endpoint (required)
|
|
33
|
+
CUSTOM_ANTHROPIC_VERSION: API version header (optional, defaults to '2023-06-01')
|
|
34
|
+
"""
|
|
35
|
+
# Get base_url from environment and normalize it
|
|
36
|
+
base_url = os.getenv("CUSTOM_ANTHROPIC_BASE_URL")
|
|
37
|
+
if not base_url:
|
|
38
|
+
raise AIError.model_error("CUSTOM_ANTHROPIC_BASE_URL environment variable not set")
|
|
39
|
+
|
|
40
|
+
base_url = base_url.rstrip("/")
|
|
41
|
+
if base_url.endswith("/messages"):
|
|
42
|
+
pass # Already a complete endpoint URL
|
|
43
|
+
elif base_url.endswith("/v1"):
|
|
44
|
+
base_url = f"{base_url}/messages"
|
|
45
|
+
else:
|
|
46
|
+
base_url = f"{base_url}/v1/messages"
|
|
47
|
+
|
|
48
|
+
# Update config with the custom base URL
|
|
49
|
+
config.base_url = base_url
|
|
50
|
+
|
|
51
|
+
# Store the custom version for use in headers
|
|
52
|
+
self.custom_version = os.getenv("CUSTOM_ANTHROPIC_VERSION", "2023-06-01")
|
|
53
|
+
|
|
54
|
+
super().__init__(config)
|
|
55
|
+
|
|
56
|
+
def _build_headers(self) -> dict[str, str]:
|
|
57
|
+
"""Build headers with custom Anthropic version."""
|
|
58
|
+
headers = super()._build_headers()
|
|
59
|
+
headers["anthropic-version"] = self.custom_version
|
|
60
|
+
return headers
|
|
61
|
+
|
|
62
|
+
def _parse_response(self, response: dict[str, Any]) -> str:
|
|
63
|
+
"""Parse response with support for extended format (e.g., MiniMax with thinking).
|
|
64
|
+
|
|
65
|
+
Handles both:
|
|
66
|
+
- Standard Anthropic format: content[0].text
|
|
67
|
+
- Extended format: first item with type="text"
|
|
68
|
+
"""
|
|
69
|
+
try:
|
|
70
|
+
content_list = response.get("content", [])
|
|
71
|
+
if not content_list:
|
|
72
|
+
raise AIError.model_error("Custom Anthropic API returned empty content array")
|
|
73
|
+
|
|
74
|
+
# Try standard Anthropic format first: content[0].text
|
|
75
|
+
if "text" in content_list[0]:
|
|
76
|
+
content = content_list[0]["text"]
|
|
77
|
+
else:
|
|
78
|
+
# Extended format (e.g., MiniMax with thinking): find first item with type="text"
|
|
79
|
+
text_item = next((item for item in content_list if item.get("type") == "text"), None)
|
|
80
|
+
if text_item and "text" in text_item:
|
|
81
|
+
content = text_item["text"]
|
|
82
|
+
else:
|
|
83
|
+
logger.error(
|
|
84
|
+
f"Unexpected response format from Custom Anthropic API. Response: {json.dumps(response)}"
|
|
85
|
+
)
|
|
86
|
+
raise AIError.model_error(
|
|
87
|
+
"Custom Anthropic API returned unexpected format. Expected 'text' field in content array."
|
|
88
|
+
)
|
|
89
|
+
|
|
90
|
+
if content is None:
|
|
91
|
+
raise AIError.model_error("Custom Anthropic API returned null content")
|
|
92
|
+
if content == "":
|
|
93
|
+
raise AIError.model_error("Custom Anthropic API returned empty content")
|
|
94
|
+
return content
|
|
95
|
+
except AIError:
|
|
96
|
+
raise
|
|
97
|
+
except (KeyError, IndexError, TypeError, StopIteration) as e:
|
|
98
|
+
logger.error(f"Unexpected response format from Custom Anthropic API. Response: {json.dumps(response)}")
|
|
99
|
+
raise AIError.model_error(
|
|
100
|
+
f"Custom Anthropic API returned unexpected format. Expected Anthropic-compatible response with "
|
|
101
|
+
f"'content[0].text' or items with type='text', but got: {type(e).__name__}. "
|
|
102
|
+
f"Check logs for full response structure."
|
|
103
|
+
) from e
|
|
@@ -0,0 +1,44 @@
|
|
|
1
|
+
"""Custom OpenAI-compatible API provider for gac.
|
|
2
|
+
|
|
3
|
+
This provider allows users to specify a custom OpenAI-compatible endpoint
|
|
4
|
+
while using the same model capabilities as the standard OpenAI provider.
|
|
5
|
+
"""
|
|
6
|
+
|
|
7
|
+
import os
|
|
8
|
+
from typing import Any
|
|
9
|
+
|
|
10
|
+
from gac.errors import AIError
|
|
11
|
+
from gac.providers.base import OpenAICompatibleProvider, ProviderConfig
|
|
12
|
+
|
|
13
|
+
|
|
14
|
+
class CustomOpenAIProvider(OpenAICompatibleProvider):
|
|
15
|
+
"""Custom OpenAI-compatible provider with configurable base URL."""
|
|
16
|
+
|
|
17
|
+
config = ProviderConfig(
|
|
18
|
+
name="Custom OpenAI",
|
|
19
|
+
api_key_env="CUSTOM_OPENAI_API_KEY",
|
|
20
|
+
base_url="", # Will be set in __init__
|
|
21
|
+
)
|
|
22
|
+
|
|
23
|
+
def __init__(self, config: ProviderConfig):
|
|
24
|
+
"""Initialize with base URL from environment."""
|
|
25
|
+
base_url = os.getenv("CUSTOM_OPENAI_BASE_URL")
|
|
26
|
+
if not base_url:
|
|
27
|
+
raise AIError.model_error("CUSTOM_OPENAI_BASE_URL environment variable not set")
|
|
28
|
+
|
|
29
|
+
if "/chat/completions" not in base_url:
|
|
30
|
+
base_url = base_url.rstrip("/")
|
|
31
|
+
url = f"{base_url}/chat/completions"
|
|
32
|
+
else:
|
|
33
|
+
url = base_url
|
|
34
|
+
|
|
35
|
+
config.base_url = url
|
|
36
|
+
super().__init__(config)
|
|
37
|
+
|
|
38
|
+
def _build_request_body(
|
|
39
|
+
self, messages: list[dict[str, Any]], temperature: float, max_tokens: int, model: str, **kwargs: Any
|
|
40
|
+
) -> dict[str, Any]:
|
|
41
|
+
"""Build request body with max_completion_tokens instead of max_tokens."""
|
|
42
|
+
data = super()._build_request_body(messages, temperature, max_tokens, model, **kwargs)
|
|
43
|
+
data["max_completion_tokens"] = data.pop("max_tokens")
|
|
44
|
+
return data
|
|
@@ -0,0 +1,15 @@
|
|
|
1
|
+
"""DeepSeek API provider for gac."""
|
|
2
|
+
|
|
3
|
+
from gac.providers.base import OpenAICompatibleProvider, ProviderConfig
|
|
4
|
+
|
|
5
|
+
|
|
6
|
+
class DeepSeekProvider(OpenAICompatibleProvider):
|
|
7
|
+
config = ProviderConfig(
|
|
8
|
+
name="DeepSeek",
|
|
9
|
+
api_key_env="DEEPSEEK_API_KEY",
|
|
10
|
+
base_url="https://api.deepseek.com/v1",
|
|
11
|
+
)
|
|
12
|
+
|
|
13
|
+
def _get_api_url(self, model: str | None = None) -> str:
|
|
14
|
+
"""Get DeepSeek API URL with /chat/completions endpoint."""
|
|
15
|
+
return f"{self.config.base_url}/chat/completions"
|