gac 3.6.0__py3-none-any.whl → 3.10.10__py3-none-any.whl

This diff represents the content of publicly available package versions that have been released to one of the supported registries. The information contained in this diff is provided for informational purposes only and reflects changes between package versions as they appear in their respective public registries.
Files changed (79) hide show
  1. gac/__init__.py +4 -6
  2. gac/__version__.py +1 -1
  3. gac/ai_utils.py +59 -43
  4. gac/auth_cli.py +181 -36
  5. gac/cli.py +26 -9
  6. gac/commit_executor.py +59 -0
  7. gac/config.py +81 -2
  8. gac/config_cli.py +19 -7
  9. gac/constants/__init__.py +34 -0
  10. gac/constants/commit.py +63 -0
  11. gac/constants/defaults.py +40 -0
  12. gac/constants/file_patterns.py +110 -0
  13. gac/constants/languages.py +119 -0
  14. gac/diff_cli.py +0 -22
  15. gac/errors.py +8 -2
  16. gac/git.py +6 -6
  17. gac/git_state_validator.py +193 -0
  18. gac/grouped_commit_workflow.py +458 -0
  19. gac/init_cli.py +2 -1
  20. gac/interactive_mode.py +179 -0
  21. gac/language_cli.py +0 -1
  22. gac/main.py +231 -926
  23. gac/model_cli.py +67 -11
  24. gac/model_identifier.py +70 -0
  25. gac/oauth/__init__.py +26 -0
  26. gac/oauth/claude_code.py +89 -22
  27. gac/oauth/qwen_oauth.py +327 -0
  28. gac/oauth/token_store.py +81 -0
  29. gac/oauth_retry.py +161 -0
  30. gac/postprocess.py +155 -0
  31. gac/prompt.py +21 -479
  32. gac/prompt_builder.py +88 -0
  33. gac/providers/README.md +437 -0
  34. gac/providers/__init__.py +70 -78
  35. gac/providers/anthropic.py +12 -46
  36. gac/providers/azure_openai.py +48 -88
  37. gac/providers/base.py +329 -0
  38. gac/providers/cerebras.py +10 -33
  39. gac/providers/chutes.py +16 -62
  40. gac/providers/claude_code.py +64 -87
  41. gac/providers/custom_anthropic.py +51 -81
  42. gac/providers/custom_openai.py +29 -83
  43. gac/providers/deepseek.py +10 -33
  44. gac/providers/error_handler.py +139 -0
  45. gac/providers/fireworks.py +10 -33
  46. gac/providers/gemini.py +66 -63
  47. gac/providers/groq.py +10 -58
  48. gac/providers/kimi_coding.py +19 -55
  49. gac/providers/lmstudio.py +64 -43
  50. gac/providers/minimax.py +10 -33
  51. gac/providers/mistral.py +10 -33
  52. gac/providers/moonshot.py +10 -33
  53. gac/providers/ollama.py +56 -33
  54. gac/providers/openai.py +30 -36
  55. gac/providers/openrouter.py +15 -52
  56. gac/providers/protocol.py +71 -0
  57. gac/providers/qwen.py +64 -0
  58. gac/providers/registry.py +58 -0
  59. gac/providers/replicate.py +140 -82
  60. gac/providers/streamlake.py +26 -46
  61. gac/providers/synthetic.py +35 -37
  62. gac/providers/together.py +10 -33
  63. gac/providers/zai.py +29 -57
  64. gac/py.typed +0 -0
  65. gac/security.py +1 -1
  66. gac/templates/__init__.py +1 -0
  67. gac/templates/question_generation.txt +60 -0
  68. gac/templates/system_prompt.txt +224 -0
  69. gac/templates/user_prompt.txt +28 -0
  70. gac/utils.py +36 -6
  71. gac/workflow_context.py +162 -0
  72. gac/workflow_utils.py +3 -8
  73. {gac-3.6.0.dist-info → gac-3.10.10.dist-info}/METADATA +6 -4
  74. gac-3.10.10.dist-info/RECORD +79 -0
  75. gac/constants.py +0 -321
  76. gac-3.6.0.dist-info/RECORD +0 -53
  77. {gac-3.6.0.dist-info → gac-3.10.10.dist-info}/WHEEL +0 -0
  78. {gac-3.6.0.dist-info → gac-3.10.10.dist-info}/entry_points.txt +0 -0
  79. {gac-3.6.0.dist-info → gac-3.10.10.dist-info}/licenses/LICENSE +0 -0
@@ -0,0 +1,437 @@
1
+ # AI Provider Architecture
2
+
3
+ This directory contains AI provider implementations for GAC (Git Auto Commit). All providers follow a class-based architecture with shared base classes to reduce code duplication and improve maintainability.
4
+
5
+ ## Architecture Overview
6
+
7
+ ```text
8
+ ProviderProtocol (Protocol - type contract)
9
+
10
+ BaseConfiguredProvider (ABC - core logic)
11
+
12
+ ├── OpenAICompatibleProvider (OpenAI-style APIs)
13
+ ├── AnthropicCompatibleProvider (Anthropic-style APIs)
14
+ └── GenericHTTPProvider (Fully custom implementations)
15
+
16
+ Concrete Providers (e.g., OpenAIProvider, GeminiProvider)
17
+ ```
18
+
19
+ ## Core Components
20
+
21
+ ### BaseConfiguredProvider
22
+
23
+ Abstract base class implementing the template method pattern. All providers inherit from this class.
24
+
25
+ **Key Features:**
26
+
27
+ - Standardized HTTP handling with httpx
28
+ - Common error handling patterns
29
+ - Flexible configuration via ProviderConfig
30
+ - Template methods for customization:
31
+ - `_get_api_key()` - Load API key from environment
32
+ - `_build_headers()` - Build request headers
33
+ - `_build_request_body()` - Build request body
34
+ - `_get_api_url()` - Get API endpoint URL
35
+ - `_parse_response()` - Parse API response
36
+ - `_make_http_request()` - Execute HTTP request
37
+ - `generate()` - Main entry point
38
+
39
+ ### OpenAICompatibleProvider
40
+
41
+ Specialized base class for OpenAI-compatible APIs (standard format).
42
+
43
+ **Request Format:**
44
+
45
+ ```json
46
+ {
47
+ "model": "gpt-5",
48
+ "messages": [...],
49
+ "temperature": 0.7,
50
+ "max_tokens": 1024
51
+ }
52
+ ```
53
+
54
+ **Response Format:**
55
+
56
+ ```json
57
+ {
58
+ "choices": [
59
+ {
60
+ "message": {
61
+ "content": "..."
62
+ }
63
+ }
64
+ ]
65
+ }
66
+ ```
67
+
68
+ **Providers Using This Base:**
69
+
70
+ - OpenAI, DeepSeek, Together, Fireworks, Cerebras, Mistral, Minimax, Moonshot, Groq, OpenRouter
71
+ - Custom OpenAI, Azure OpenAI, LM Studio
72
+ - Kimi Coding, Streamlake, Synthetic, Z.AI
73
+
74
+ ### AnthropicCompatibleProvider
75
+
76
+ Specialized base class for Anthropic-style APIs.
77
+
78
+ **Request Format:**
79
+
80
+ ```json
81
+ {
82
+ "model": "claude-sonnet-4-5",
83
+ "messages": [...],
84
+ "system": "You are a helpful assistant",
85
+ "temperature": 0.7,
86
+ "max_tokens": 1024
87
+ }
88
+ ```
89
+
90
+ **Response Format:**
91
+
92
+ ```json
93
+ {
94
+ "content": [
95
+ {
96
+ "type": "text",
97
+ "text": "..."
98
+ }
99
+ ]
100
+ }
101
+ ```
102
+
103
+ **Providers Using This Base:**
104
+
105
+ - Anthropic, Custom Anthropic
106
+ - Claude Code
107
+
108
+ ### GenericHTTPProvider
109
+
110
+ Base class for providers with completely custom API formats.
111
+
112
+ **Providers Using This Base:**
113
+
114
+ - Gemini (Google's unique format)
115
+ - Replicate (async prediction polling)
116
+
117
+ ## Creating a New Provider
118
+
119
+ ### Step 1: Choose the Right Base Class
120
+
121
+ ```python
122
+ from gac.providers.base import OpenAICompatibleProvider, ProviderConfig
123
+ from gac.providers.error_handler import handle_provider_errors
124
+
125
+ # Most providers fit one of these patterns:
126
+ # 1. OpenAI-compatible format → inherit from OpenAICompatibleProvider
127
+ # 2. Anthropic-compatible format → inherit from AnthropicCompatibleProvider
128
+ # 3. Custom format → inherit from GenericHTTPProvider
129
+ ```
130
+
131
+ ### Step 2: Define Provider Configuration
132
+
133
+ ```python
134
+ class MyProvider(OpenAICompatibleProvider):
135
+ config = ProviderConfig(
136
+ name="My Provider",
137
+ api_key_env="MY_PROVIDER_API_KEY",
138
+ base_url="https://api.myprovider.com/v1/chat/completions",
139
+ )
140
+ ```
141
+
142
+ ### Step 3: Override Template Methods (If Needed)
143
+
144
+ ```python
145
+ class MyProvider(OpenAICompatibleProvider):
146
+ # Override only what's needed
147
+
148
+ def _build_headers(self) -> dict[str, str]:
149
+ """Custom header handling."""
150
+ headers = super()._build_headers()
151
+ # Add provider-specific headers
152
+ return headers
153
+
154
+ def _get_api_url(self, model: str | None = None) -> str:
155
+ """Custom URL construction."""
156
+ if model is None:
157
+ return super()._get_api_url(model)
158
+ return f"https://custom.endpoint/{model}/chat"
159
+ ```
160
+
161
+ ### Step 4: Create Lazy Getter and Decorated Function
162
+
163
+ ```python
164
+ def _get_my_provider() -> MyProvider:
165
+ """Lazy getter to initialize provider at call time."""
166
+ return MyProvider(MyProvider.config)
167
+
168
+ @handle_provider_errors("My Provider")
169
+ def call_my_provider_api(model: str, messages: list[dict], temperature: float, max_tokens: int) -> str:
170
+ """Call My Provider API."""
171
+ provider = _get_my_provider()
172
+ return provider.generate(model=model, messages=messages, temperature=temperature, max_tokens=max_tokens)
173
+ ```
174
+
175
+ ### Step 5: Export in `__init__.py`
176
+
177
+ ```python
178
+ # In src/gac/providers/__init__.py
179
+ from .my_provider import call_my_provider_api
180
+
181
+ # Add to PROVIDER_REGISTRY
182
+ PROVIDER_REGISTRY = {
183
+ ...
184
+ "my-provider": call_my_provider_api,
185
+ ...
186
+ }
187
+
188
+ # Add to __all__
189
+ __all__ = [
190
+ ...
191
+ "call_my_provider_api",
192
+ ...
193
+ ]
194
+ ```
195
+
196
+ ## Common Customization Patterns
197
+
198
+ ### Pattern 1: Optional API Key (e.g., Ollama, LM Studio)
199
+
200
+ ```python
201
+ def _get_api_key(self) -> str:
202
+ """Get optional API key."""
203
+ api_key = os.getenv(self.config.api_key_env)
204
+ if not api_key:
205
+ return "" # Optional
206
+ return api_key
207
+ ```
208
+
209
+ ### Pattern 2: Custom URL Construction (e.g., Azure OpenAI)
210
+
211
+ ```python
212
+ def _get_api_url(self, model: str | None = None) -> str:
213
+ """Build custom URL with model in path."""
214
+ if model is None:
215
+ return super()._get_api_url(model)
216
+ return f"{self.endpoint}/openai/deployments/{model}/chat/completions?api-version={self.api_version}"
217
+ ```
218
+
219
+ ### Pattern 3: Alternative Environment Variables (e.g., Streamlake)
220
+
221
+ ```python
222
+ def _get_api_key(self) -> str:
223
+ """Try primary key, then fallback."""
224
+ api_key = os.getenv(self.config.api_key_env)
225
+ if api_key:
226
+ return api_key
227
+ # Fallback to alternative
228
+ api_key = os.getenv("ALTERNATIVE_KEY_ENV")
229
+ if api_key:
230
+ return api_key
231
+ raise AIError.authentication_error("No API key found")
232
+ ```
233
+
234
+ ### Pattern 4: Model Preprocessing (e.g., Synthetic - adding prefixes)
235
+
236
+ ```python
237
+ def _build_request_body(self, messages: list[dict], temperature: float, max_tokens: int, model: str, **kwargs) -> dict[str, Any]:
238
+ """Preprocess model name."""
239
+ data = super()._build_request_body(messages, temperature, max_tokens, model, **kwargs)
240
+ # Add "hf:" prefix for HuggingFace models
241
+ data["model"] = f"hf:{model}"
242
+ return data
243
+ ```
244
+
245
+ ### Pattern 5: Custom Response Parsing (e.g., LM Studio with text field fallback)
246
+
247
+ ```python
248
+ def _parse_response(self, response: dict[str, Any]) -> str:
249
+ """Parse response with fallback."""
250
+ # Try standard OpenAI format first
251
+ choices = response.get("choices")
252
+ if choices:
253
+ content = choices[0].get("message", {}).get("content")
254
+ if content:
255
+ return content
256
+
257
+ # Fallback to text field
258
+ content = choices[0].get("text")
259
+ if content:
260
+ return content
261
+
262
+ raise AIError.model_error("No content found")
263
+ ```
264
+
265
+ ### Pattern 6: System Message Handling (e.g., Claude Code)
266
+
267
+ ```python
268
+ def _build_request_body(self, messages: list[dict], temperature: float, max_tokens: int, model: str, **kwargs) -> dict[str, Any]:
269
+ """Extract and handle system messages."""
270
+ anthropic_messages = []
271
+ system_instructions = ""
272
+
273
+ for msg in messages:
274
+ if msg["role"] == "system":
275
+ system_instructions = msg["content"]
276
+ else:
277
+ anthropic_messages.append(msg)
278
+
279
+ # Move system instructions to first user message
280
+ if system_instructions and anthropic_messages:
281
+ anthropic_messages[0]["content"] = f"{system_instructions}\n\n{anthropic_messages[0]['content']}"
282
+
283
+ return {
284
+ "messages": anthropic_messages,
285
+ "system": "REQUIRED_FIXED_MESSAGE",
286
+ "temperature": temperature,
287
+ "max_tokens": max_tokens,
288
+ **kwargs,
289
+ }
290
+ ```
291
+
292
+ ### Pattern 7: Async Operations (e.g., Replicate with polling)
293
+
294
+ ```python
295
+ def generate(self, model: str, messages: list[dict], temperature: float = 0.7, max_tokens: int = 1024, **kwargs) -> str:
296
+ """Override for async/polling operations."""
297
+ # Create prediction
298
+ response = httpx.post(url, json=body, headers=headers, ...)
299
+ prediction_id = response.json()["id"]
300
+
301
+ # Poll for completion
302
+ while elapsed_time < max_wait_time:
303
+ status_response = httpx.get(f"{url}/{prediction_id}", headers=headers, ...)
304
+ status_data = status_response.json()
305
+
306
+ if status_data["status"] == "succeeded":
307
+ return status_data["output"]
308
+ elif status_data["status"] == "failed":
309
+ raise AIError.model_error("Prediction failed")
310
+
311
+ time.sleep(wait_interval)
312
+ elapsed_time += wait_interval
313
+
314
+ raise AIError.timeout_error("Prediction timed out")
315
+ ```
316
+
317
+ ## Error Handling
318
+
319
+ All providers use the `@handle_provider_errors` decorator to normalize error handling:
320
+
321
+ ```python
322
+ from gac.providers.error_handler import handle_provider_errors
323
+
324
+ @handle_provider_errors("My Provider")
325
+ def call_my_provider_api(...) -> str:
326
+ # Errors are automatically caught and converted to AIError types
327
+ pass
328
+ ```
329
+
330
+ **Error Mapping:**
331
+
332
+ - HTTP 401 → `AIError.authentication_error()`
333
+ - HTTP 429 → `AIError.rate_limit_error()`
334
+ - Timeout → `AIError.timeout_error()`
335
+ - Connection error → `AIError.connection_error()`
336
+ - Other → `AIError.model_error()`
337
+
338
+ ## Testing Providers
339
+
340
+ Each provider has comprehensive tests in `tests/providers/test_<provider>.py`.
341
+
342
+ ### Test Structure
343
+
344
+ ```python
345
+ class TestProviderImports:
346
+ """Test imports."""
347
+ def test_import_provider(self): ...
348
+
349
+ class TestProviderMocked(BaseProviderTest):
350
+ """Standard mocked tests (inherited from BaseProviderTest)."""
351
+ @property
352
+ def provider_name(self) -> str: return "my-provider"
353
+
354
+ @property
355
+ def provider_module(self) -> str: return "gac.providers.my_provider"
356
+
357
+ @property
358
+ def api_function(self): return call_my_provider_api
359
+
360
+ @property
361
+ def api_key_env_var(self) -> str: return "MY_PROVIDER_API_KEY"
362
+
363
+ class TestProviderEdgeCases:
364
+ """Provider-specific edge cases."""
365
+ def test_custom_behavior(self): ...
366
+
367
+ @pytest.mark.integration
368
+ class TestProviderIntegration:
369
+ """Real API tests (skipped by default)."""
370
+ def test_real_api_call(self): ...
371
+ ```
372
+
373
+ ## SSL Verification
374
+
375
+ All HTTP requests use GAC's SSL verification settings via `get_ssl_verify()`:
376
+
377
+ ```python
378
+ from gac.utils import get_ssl_verify
379
+
380
+ response = httpx.post(url, ..., verify=get_ssl_verify())
381
+ ```
382
+
383
+ This respects environment configurations for custom certificates.
384
+
385
+ ## Timeout Configuration
386
+
387
+ All providers use `ProviderDefaults.HTTP_TIMEOUT` for consistency:
388
+
389
+ ```python
390
+ from gac.constants import ProviderDefaults
391
+
392
+ config = ProviderConfig(
393
+ name="My Provider",
394
+ api_key_env="MY_KEY",
395
+ base_url="https://api.example.com",
396
+ timeout=ProviderDefaults.HTTP_TIMEOUT, # Default: 120 seconds
397
+ )
398
+ ```
399
+
400
+ ## Provider-Specific Documentation
401
+
402
+ See individual provider files for detailed documentation:
403
+
404
+ - `openai.py` - OpenAI API reference
405
+ - `anthropic.py` - Anthropic API reference
406
+ - `azure_openai.py` - Azure OpenAI configuration
407
+ - `gemini.py` - Google Gemini custom format
408
+ - `replicate.py` - Async prediction handling
409
+ - And others...
410
+
411
+ ## Contributing
412
+
413
+ When adding a new provider:
414
+
415
+ 1. Follow the architecture and patterns above
416
+ 2. Write comprehensive tests (unit, mocked, integration)
417
+ 3. Update `__init__.py` exports
418
+ 4. Document the provider in its docstring
419
+ 5. Run `mypy` for type checking: `uv run -- mypy src/gac`
420
+ 6. Run tests: `uv run -- pytest tests/providers/test_<provider>.py -v`
421
+ 7. Update this README if adding new patterns
422
+
423
+ ## Best Practices
424
+
425
+ 1. **Lazy Initialization**: Use getter functions to initialize providers at call time, not import time. This allows tests to mock environment variables.
426
+
427
+ 2. **Error Preservation**: Always re-raise `AIError` exceptions without wrapping them. The error decorator handles generic exceptions.
428
+
429
+ 3. **Optional Parameters**: Match superclass signatures exactly, especially for `_get_api_url(model: str | None = None)`.
430
+
431
+ 4. **Response Validation**: Always validate responses for null/empty content before returning.
432
+
433
+ 5. **Configuration Over Code**: Use environment variables and `ProviderConfig` rather than hardcoding values.
434
+
435
+ 6. **Documentation**: Include docstrings with API endpoint references and required environment variables.
436
+
437
+ 7. **Test Coverage**: Aim for 100% test coverage of provider logic.
gac/providers/__init__.py CHANGED
@@ -1,88 +1,80 @@
1
- """AI provider implementations for commit message generation."""
1
+ """AI provider implementations for commit message generation.
2
2
 
3
- from .anthropic import call_anthropic_api
4
- from .azure_openai import call_azure_openai_api
5
- from .cerebras import call_cerebras_api
6
- from .chutes import call_chutes_api
7
- from .claude_code import call_claude_code_api
8
- from .custom_anthropic import call_custom_anthropic_api
9
- from .custom_openai import call_custom_openai_api
10
- from .deepseek import call_deepseek_api
11
- from .fireworks import call_fireworks_api
12
- from .gemini import call_gemini_api
13
- from .groq import call_groq_api
14
- from .kimi_coding import call_kimi_coding_api
15
- from .lmstudio import call_lmstudio_api
16
- from .minimax import call_minimax_api
17
- from .mistral import call_mistral_api
18
- from .moonshot import call_moonshot_api
19
- from .ollama import call_ollama_api
20
- from .openai import call_openai_api
21
- from .openrouter import call_openrouter_api
22
- from .replicate import call_replicate_api
23
- from .streamlake import call_streamlake_api
24
- from .synthetic import call_synthetic_api
25
- from .together import call_together_api
26
- from .zai import call_zai_api, call_zai_coding_api
3
+ This module provides a unified interface to all AI providers. Provider classes
4
+ are registered and wrapper functions are auto-generated with error handling.
27
5
 
28
- # Provider registry - single source of truth for all providers
29
- PROVIDER_REGISTRY = {
30
- "anthropic": call_anthropic_api,
31
- "azure-openai": call_azure_openai_api,
32
- "cerebras": call_cerebras_api,
33
- "claude-code": call_claude_code_api,
34
- "chutes": call_chutes_api,
35
- "custom-anthropic": call_custom_anthropic_api,
36
- "custom-openai": call_custom_openai_api,
37
- "deepseek": call_deepseek_api,
38
- "fireworks": call_fireworks_api,
39
- "gemini": call_gemini_api,
40
- "groq": call_groq_api,
41
- "kimi-coding": call_kimi_coding_api,
42
- "lm-studio": call_lmstudio_api,
43
- "minimax": call_minimax_api,
44
- "mistral": call_mistral_api,
45
- "moonshot": call_moonshot_api,
46
- "ollama": call_ollama_api,
47
- "openai": call_openai_api,
48
- "openrouter": call_openrouter_api,
49
- "replicate": call_replicate_api,
50
- "streamlake": call_streamlake_api,
51
- "synthetic": call_synthetic_api,
52
- "together": call_together_api,
53
- "zai": call_zai_api,
54
- "zai-coding": call_zai_coding_api,
55
- }
6
+ Usage:
7
+ from gac.providers import PROVIDER_REGISTRY
8
+
9
+ # Get the function for a provider
10
+ func = PROVIDER_REGISTRY["openai"]
11
+ result = func(model="gpt-4", messages=[...], temperature=0.7, max_tokens=1000)
12
+ """
13
+
14
+ # Import provider classes for registration
15
+ from .anthropic import AnthropicProvider
16
+ from .azure_openai import AzureOpenAIProvider
17
+ from .cerebras import CerebrasProvider
18
+ from .chutes import ChutesProvider
19
+ from .claude_code import ClaudeCodeProvider
20
+ from .custom_anthropic import CustomAnthropicProvider
21
+ from .custom_openai import CustomOpenAIProvider
22
+ from .deepseek import DeepSeekProvider
23
+ from .fireworks import FireworksProvider
24
+ from .gemini import GeminiProvider
25
+ from .groq import GroqProvider
26
+ from .kimi_coding import KimiCodingProvider
27
+ from .lmstudio import LMStudioProvider
28
+ from .minimax import MinimaxProvider
29
+ from .mistral import MistralProvider
30
+ from .moonshot import MoonshotProvider
31
+ from .ollama import OllamaProvider
32
+ from .openai import OpenAIProvider
33
+ from .openrouter import OpenRouterProvider
34
+ from .qwen import QwenProvider
35
+ from .registry import (
36
+ PROVIDER_REGISTRY,
37
+ register_provider,
38
+ )
39
+ from .replicate import ReplicateProvider
40
+ from .streamlake import StreamlakeProvider
41
+ from .synthetic import SyntheticProvider
42
+ from .together import TogetherProvider
43
+ from .zai import ZAICodingProvider, ZAIProvider
44
+
45
+ # Register all providers - this populates PROVIDER_REGISTRY automatically
46
+ register_provider("anthropic", AnthropicProvider)
47
+ register_provider("azure-openai", AzureOpenAIProvider)
48
+ register_provider("cerebras", CerebrasProvider)
49
+ register_provider("chutes", ChutesProvider)
50
+ register_provider("claude-code", ClaudeCodeProvider)
51
+ register_provider("custom-anthropic", CustomAnthropicProvider)
52
+ register_provider("custom-openai", CustomOpenAIProvider)
53
+ register_provider("deepseek", DeepSeekProvider)
54
+ register_provider("fireworks", FireworksProvider)
55
+ register_provider("gemini", GeminiProvider)
56
+ register_provider("groq", GroqProvider)
57
+ register_provider("kimi-coding", KimiCodingProvider)
58
+ register_provider("lm-studio", LMStudioProvider)
59
+ register_provider("minimax", MinimaxProvider)
60
+ register_provider("mistral", MistralProvider)
61
+ register_provider("moonshot", MoonshotProvider)
62
+ register_provider("ollama", OllamaProvider)
63
+ register_provider("openai", OpenAIProvider)
64
+ register_provider("openrouter", OpenRouterProvider)
65
+ register_provider("qwen", QwenProvider)
66
+ register_provider("replicate", ReplicateProvider)
67
+ register_provider("streamlake", StreamlakeProvider)
68
+ register_provider("synthetic", SyntheticProvider)
69
+ register_provider("together", TogetherProvider)
70
+ register_provider("zai", ZAIProvider)
71
+ register_provider("zai-coding", ZAICodingProvider)
56
72
 
57
73
  # List of supported provider names - derived from registry keys
58
74
  SUPPORTED_PROVIDERS = sorted(PROVIDER_REGISTRY.keys())
59
75
 
60
76
  __all__ = [
61
- "call_anthropic_api",
62
- "call_azure_openai_api",
63
- "call_cerebras_api",
64
- "call_chutes_api",
65
- "call_claude_code_api",
66
- "call_custom_anthropic_api",
67
- "call_custom_openai_api",
68
- "call_deepseek_api",
69
- "call_fireworks_api",
70
- "call_gemini_api",
71
- "call_groq_api",
72
- "call_kimi_coding_api",
73
- "call_lmstudio_api",
74
- "call_minimax_api",
75
- "call_mistral_api",
76
- "call_moonshot_api",
77
- "call_ollama_api",
78
- "call_openai_api",
79
- "call_openrouter_api",
80
- "call_replicate_api",
81
- "call_streamlake_api",
82
- "call_synthetic_api",
83
- "call_together_api",
84
- "call_zai_api",
85
- "call_zai_coding_api",
86
77
  "PROVIDER_REGISTRY",
87
78
  "SUPPORTED_PROVIDERS",
79
+ "register_provider",
88
80
  ]
@@ -1,51 +1,17 @@
1
- """Anthropic AI provider implementation."""
1
+ """Anthropic AI provider for gac."""
2
2
 
3
- import os
3
+ from gac.providers.base import AnthropicCompatibleProvider, ProviderConfig
4
4
 
5
- import httpx
6
5
 
7
- from gac.errors import AIError
6
+ class AnthropicProvider(AnthropicCompatibleProvider):
7
+ """Anthropic Claude API provider."""
8
8
 
9
+ config = ProviderConfig(
10
+ name="Anthropic",
11
+ api_key_env="ANTHROPIC_API_KEY",
12
+ base_url="https://api.anthropic.com/v1",
13
+ )
9
14
 
10
- def call_anthropic_api(model: str, messages: list[dict], temperature: float, max_tokens: int) -> str:
11
- """Call Anthropic API directly."""
12
- api_key = os.getenv("ANTHROPIC_API_KEY")
13
- if not api_key:
14
- raise AIError.authentication_error("ANTHROPIC_API_KEY not found in environment variables")
15
-
16
- url = "https://api.anthropic.com/v1/messages"
17
- headers = {"x-api-key": api_key, "anthropic-version": "2023-06-01", "content-type": "application/json"}
18
-
19
- # Convert messages to Anthropic format
20
- anthropic_messages = []
21
- system_message = ""
22
-
23
- for msg in messages:
24
- if msg["role"] == "system":
25
- system_message = msg["content"]
26
- else:
27
- anthropic_messages.append({"role": msg["role"], "content": msg["content"]})
28
-
29
- data = {"model": model, "messages": anthropic_messages, "temperature": temperature, "max_tokens": max_tokens}
30
-
31
- if system_message:
32
- data["system"] = system_message
33
-
34
- try:
35
- response = httpx.post(url, headers=headers, json=data, timeout=120)
36
- response.raise_for_status()
37
- response_data = response.json()
38
- content = response_data["content"][0]["text"]
39
- if content is None:
40
- raise AIError.model_error("Anthropic API returned null content")
41
- if content == "":
42
- raise AIError.model_error("Anthropic API returned empty content")
43
- return content
44
- except httpx.HTTPStatusError as e:
45
- if e.response.status_code == 429:
46
- raise AIError.rate_limit_error(f"Anthropic API rate limit exceeded: {e.response.text}") from e
47
- raise AIError.model_error(f"Anthropic API error: {e.response.status_code} - {e.response.text}") from e
48
- except httpx.TimeoutException as e:
49
- raise AIError.timeout_error(f"Anthropic API request timed out: {str(e)}") from e
50
- except Exception as e:
51
- raise AIError.model_error(f"Error calling Anthropic API: {str(e)}") from e
15
+ def _get_api_url(self, model: str | None = None) -> str:
16
+ """Get Anthropic API URL with /messages endpoint."""
17
+ return f"{self.config.base_url}/messages"