gac 3.6.0__py3-none-any.whl → 3.10.10__py3-none-any.whl
This diff represents the content of publicly available package versions that have been released to one of the supported registries. The information contained in this diff is provided for informational purposes only and reflects changes between package versions as they appear in their respective public registries.
- gac/__init__.py +4 -6
- gac/__version__.py +1 -1
- gac/ai_utils.py +59 -43
- gac/auth_cli.py +181 -36
- gac/cli.py +26 -9
- gac/commit_executor.py +59 -0
- gac/config.py +81 -2
- gac/config_cli.py +19 -7
- gac/constants/__init__.py +34 -0
- gac/constants/commit.py +63 -0
- gac/constants/defaults.py +40 -0
- gac/constants/file_patterns.py +110 -0
- gac/constants/languages.py +119 -0
- gac/diff_cli.py +0 -22
- gac/errors.py +8 -2
- gac/git.py +6 -6
- gac/git_state_validator.py +193 -0
- gac/grouped_commit_workflow.py +458 -0
- gac/init_cli.py +2 -1
- gac/interactive_mode.py +179 -0
- gac/language_cli.py +0 -1
- gac/main.py +231 -926
- gac/model_cli.py +67 -11
- gac/model_identifier.py +70 -0
- gac/oauth/__init__.py +26 -0
- gac/oauth/claude_code.py +89 -22
- gac/oauth/qwen_oauth.py +327 -0
- gac/oauth/token_store.py +81 -0
- gac/oauth_retry.py +161 -0
- gac/postprocess.py +155 -0
- gac/prompt.py +21 -479
- gac/prompt_builder.py +88 -0
- gac/providers/README.md +437 -0
- gac/providers/__init__.py +70 -78
- gac/providers/anthropic.py +12 -46
- gac/providers/azure_openai.py +48 -88
- gac/providers/base.py +329 -0
- gac/providers/cerebras.py +10 -33
- gac/providers/chutes.py +16 -62
- gac/providers/claude_code.py +64 -87
- gac/providers/custom_anthropic.py +51 -81
- gac/providers/custom_openai.py +29 -83
- gac/providers/deepseek.py +10 -33
- gac/providers/error_handler.py +139 -0
- gac/providers/fireworks.py +10 -33
- gac/providers/gemini.py +66 -63
- gac/providers/groq.py +10 -58
- gac/providers/kimi_coding.py +19 -55
- gac/providers/lmstudio.py +64 -43
- gac/providers/minimax.py +10 -33
- gac/providers/mistral.py +10 -33
- gac/providers/moonshot.py +10 -33
- gac/providers/ollama.py +56 -33
- gac/providers/openai.py +30 -36
- gac/providers/openrouter.py +15 -52
- gac/providers/protocol.py +71 -0
- gac/providers/qwen.py +64 -0
- gac/providers/registry.py +58 -0
- gac/providers/replicate.py +140 -82
- gac/providers/streamlake.py +26 -46
- gac/providers/synthetic.py +35 -37
- gac/providers/together.py +10 -33
- gac/providers/zai.py +29 -57
- gac/py.typed +0 -0
- gac/security.py +1 -1
- gac/templates/__init__.py +1 -0
- gac/templates/question_generation.txt +60 -0
- gac/templates/system_prompt.txt +224 -0
- gac/templates/user_prompt.txt +28 -0
- gac/utils.py +36 -6
- gac/workflow_context.py +162 -0
- gac/workflow_utils.py +3 -8
- {gac-3.6.0.dist-info → gac-3.10.10.dist-info}/METADATA +6 -4
- gac-3.10.10.dist-info/RECORD +79 -0
- gac/constants.py +0 -321
- gac-3.6.0.dist-info/RECORD +0 -53
- {gac-3.6.0.dist-info → gac-3.10.10.dist-info}/WHEEL +0 -0
- {gac-3.6.0.dist-info → gac-3.10.10.dist-info}/entry_points.txt +0 -0
- {gac-3.6.0.dist-info → gac-3.10.10.dist-info}/licenses/LICENSE +0 -0
gac/providers/claude_code.py
CHANGED
|
@@ -1,102 +1,79 @@
|
|
|
1
|
-
"""Claude Code provider
|
|
1
|
+
"""Claude Code API provider for gac.
|
|
2
2
|
|
|
3
3
|
This provider allows users with Claude Code subscriptions to use their OAuth tokens
|
|
4
4
|
instead of paying for the expensive Anthropic API.
|
|
5
5
|
"""
|
|
6
6
|
|
|
7
|
-
import
|
|
8
|
-
|
|
9
|
-
import httpx
|
|
7
|
+
from typing import Any
|
|
10
8
|
|
|
11
9
|
from gac.errors import AIError
|
|
10
|
+
from gac.oauth.claude_code import load_stored_token
|
|
11
|
+
from gac.providers.base import AnthropicCompatibleProvider, ProviderConfig
|
|
12
12
|
|
|
13
13
|
|
|
14
|
-
|
|
15
|
-
"""
|
|
16
|
-
|
|
17
|
-
This provider uses the Claude Code subscription OAuth token instead of the Anthropic API key.
|
|
18
|
-
It authenticates using Bearer token authentication with the special anthropic-beta header.
|
|
19
|
-
|
|
20
|
-
Environment variables:
|
|
21
|
-
CLAUDE_CODE_ACCESS_TOKEN: OAuth access token from Claude Code authentication
|
|
14
|
+
class ClaudeCodeProvider(AnthropicCompatibleProvider):
|
|
15
|
+
"""Claude Code OAuth provider with special system message requirements."""
|
|
22
16
|
|
|
23
|
-
|
|
24
|
-
|
|
25
|
-
|
|
26
|
-
|
|
27
|
-
|
|
17
|
+
config = ProviderConfig(
|
|
18
|
+
name="Claude Code",
|
|
19
|
+
api_key_env="CLAUDE_CODE_ACCESS_TOKEN",
|
|
20
|
+
base_url="https://api.anthropic.com/v1/messages",
|
|
21
|
+
)
|
|
28
22
|
|
|
29
|
-
|
|
30
|
-
|
|
23
|
+
def _get_api_key(self) -> str:
|
|
24
|
+
"""Get OAuth token from token store."""
|
|
25
|
+
token = load_stored_token()
|
|
26
|
+
if token:
|
|
27
|
+
return token
|
|
31
28
|
|
|
32
|
-
Raises:
|
|
33
|
-
AIError: If authentication fails or API call fails
|
|
34
|
-
"""
|
|
35
|
-
access_token = os.getenv("CLAUDE_CODE_ACCESS_TOKEN")
|
|
36
|
-
if not access_token:
|
|
37
29
|
raise AIError.authentication_error(
|
|
38
|
-
"
|
|
39
|
-
"Please authenticate with Claude Code and set this token."
|
|
30
|
+
"Claude Code authentication not found. Run 'gac auth claude-code login' to authenticate."
|
|
40
31
|
)
|
|
41
32
|
|
|
42
|
-
|
|
43
|
-
|
|
44
|
-
|
|
45
|
-
|
|
46
|
-
|
|
47
|
-
|
|
48
|
-
|
|
49
|
-
|
|
50
|
-
|
|
51
|
-
|
|
52
|
-
|
|
53
|
-
|
|
54
|
-
|
|
55
|
-
|
|
56
|
-
|
|
57
|
-
|
|
58
|
-
|
|
59
|
-
|
|
60
|
-
|
|
61
|
-
|
|
62
|
-
|
|
63
|
-
|
|
64
|
-
|
|
65
|
-
|
|
66
|
-
|
|
67
|
-
|
|
68
|
-
|
|
69
|
-
|
|
70
|
-
|
|
71
|
-
|
|
72
|
-
|
|
73
|
-
|
|
74
|
-
|
|
75
|
-
|
|
76
|
-
|
|
77
|
-
|
|
78
|
-
|
|
79
|
-
|
|
80
|
-
|
|
81
|
-
|
|
82
|
-
|
|
83
|
-
|
|
84
|
-
|
|
85
|
-
|
|
86
|
-
|
|
87
|
-
|
|
88
|
-
|
|
89
|
-
return content
|
|
90
|
-
except httpx.HTTPStatusError as e:
|
|
91
|
-
if e.response.status_code == 401:
|
|
92
|
-
raise AIError.authentication_error(
|
|
93
|
-
f"Claude Code authentication failed: {e.response.text}. "
|
|
94
|
-
"Your token may have expired. Please re-authenticate."
|
|
95
|
-
) from e
|
|
96
|
-
if e.response.status_code == 429:
|
|
97
|
-
raise AIError.rate_limit_error(f"Claude Code API rate limit exceeded: {e.response.text}") from e
|
|
98
|
-
raise AIError.model_error(f"Claude Code API error: {e.response.status_code} - {e.response.text}") from e
|
|
99
|
-
except httpx.TimeoutException as e:
|
|
100
|
-
raise AIError.timeout_error(f"Claude Code API request timed out: {str(e)}") from e
|
|
101
|
-
except Exception as e:
|
|
102
|
-
raise AIError.model_error(f"Error calling Claude Code API: {str(e)}") from e
|
|
33
|
+
def _build_headers(self) -> dict[str, str]:
|
|
34
|
+
"""Build headers with OAuth token and special anthropic-beta."""
|
|
35
|
+
headers = super()._build_headers()
|
|
36
|
+
# Replace x-api-key with Bearer token
|
|
37
|
+
if "x-api-key" in headers:
|
|
38
|
+
del headers["x-api-key"]
|
|
39
|
+
headers["Authorization"] = f"Bearer {self.api_key}"
|
|
40
|
+
# Add special OAuth beta header
|
|
41
|
+
headers["anthropic-beta"] = "oauth-2025-04-20"
|
|
42
|
+
return headers
|
|
43
|
+
|
|
44
|
+
def _build_request_body(
|
|
45
|
+
self, messages: list[dict[str, Any]], temperature: float, max_tokens: int, model: str, **kwargs: Any
|
|
46
|
+
) -> dict[str, Any]:
|
|
47
|
+
"""Build Anthropic-style request with fixed system message.
|
|
48
|
+
|
|
49
|
+
IMPORTANT: Claude Code OAuth tokens require the system message to be EXACTLY
|
|
50
|
+
"You are Claude Code, Anthropic's official CLI for Claude." with NO additional content.
|
|
51
|
+
Any other instructions must be moved to the first user message.
|
|
52
|
+
"""
|
|
53
|
+
# Extract and process messages
|
|
54
|
+
anthropic_messages = []
|
|
55
|
+
system_instructions = ""
|
|
56
|
+
|
|
57
|
+
for msg in messages:
|
|
58
|
+
if msg["role"] == "system":
|
|
59
|
+
system_instructions = msg["content"]
|
|
60
|
+
else:
|
|
61
|
+
anthropic_messages.append({"role": msg["role"], "content": msg["content"]})
|
|
62
|
+
|
|
63
|
+
# Move any system instructions into the first user message
|
|
64
|
+
if system_instructions and anthropic_messages:
|
|
65
|
+
first_user_msg = anthropic_messages[0]
|
|
66
|
+
first_user_msg["content"] = f"{system_instructions}\n\n{first_user_msg['content']}"
|
|
67
|
+
|
|
68
|
+
# Claude Code requires this exact system message
|
|
69
|
+
system_message = "You are Claude Code, Anthropic's official CLI for Claude."
|
|
70
|
+
|
|
71
|
+
body = {
|
|
72
|
+
"messages": anthropic_messages,
|
|
73
|
+
"temperature": temperature,
|
|
74
|
+
"max_tokens": max_tokens,
|
|
75
|
+
"system": system_message,
|
|
76
|
+
**kwargs,
|
|
77
|
+
}
|
|
78
|
+
|
|
79
|
+
return body
|
|
@@ -7,79 +7,67 @@ while using the same model capabilities as the standard Anthropic provider.
|
|
|
7
7
|
import json
|
|
8
8
|
import logging
|
|
9
9
|
import os
|
|
10
|
-
|
|
11
|
-
import httpx
|
|
10
|
+
from typing import Any
|
|
12
11
|
|
|
13
12
|
from gac.errors import AIError
|
|
13
|
+
from gac.providers.base import AnthropicCompatibleProvider, ProviderConfig
|
|
14
14
|
|
|
15
15
|
logger = logging.getLogger(__name__)
|
|
16
16
|
|
|
17
17
|
|
|
18
|
-
|
|
19
|
-
"""
|
|
20
|
-
|
|
21
|
-
This provider is useful for:
|
|
22
|
-
- Anthropic-compatible proxies or gateways
|
|
23
|
-
- Self-hosted Anthropic-compatible services
|
|
24
|
-
- Other services implementing the Anthropic Messages API
|
|
25
|
-
|
|
26
|
-
Environment variables:
|
|
27
|
-
CUSTOM_ANTHROPIC_API_KEY: API key for authentication (required)
|
|
28
|
-
CUSTOM_ANTHROPIC_BASE_URL: Base URL for the API endpoint (required)
|
|
29
|
-
Example: https://your-proxy.example.com
|
|
30
|
-
CUSTOM_ANTHROPIC_VERSION: API version header (optional, defaults to '2023-06-01')
|
|
18
|
+
class CustomAnthropicProvider(AnthropicCompatibleProvider):
|
|
19
|
+
"""Custom Anthropic-compatible provider with configurable endpoint and version."""
|
|
31
20
|
|
|
32
|
-
|
|
33
|
-
|
|
34
|
-
|
|
35
|
-
|
|
36
|
-
|
|
21
|
+
config = ProviderConfig(
|
|
22
|
+
name="Custom Anthropic",
|
|
23
|
+
api_key_env="CUSTOM_ANTHROPIC_API_KEY",
|
|
24
|
+
base_url="", # Will be set in __init__ from environment
|
|
25
|
+
)
|
|
37
26
|
|
|
38
|
-
|
|
39
|
-
|
|
27
|
+
def __init__(self, config: ProviderConfig):
|
|
28
|
+
"""Initialize the provider with custom configuration from environment variables.
|
|
40
29
|
|
|
41
|
-
|
|
42
|
-
|
|
43
|
-
|
|
44
|
-
|
|
45
|
-
|
|
46
|
-
|
|
30
|
+
Environment variables:
|
|
31
|
+
CUSTOM_ANTHROPIC_API_KEY: API key for authentication (required)
|
|
32
|
+
CUSTOM_ANTHROPIC_BASE_URL: Base URL for the API endpoint (required)
|
|
33
|
+
CUSTOM_ANTHROPIC_VERSION: API version header (optional, defaults to '2023-06-01')
|
|
34
|
+
"""
|
|
35
|
+
# Get base_url from environment and normalize it
|
|
36
|
+
base_url = os.getenv("CUSTOM_ANTHROPIC_BASE_URL")
|
|
37
|
+
if not base_url:
|
|
38
|
+
raise AIError.model_error("CUSTOM_ANTHROPIC_BASE_URL environment variable not set")
|
|
47
39
|
|
|
48
|
-
base_url = os.getenv("CUSTOM_ANTHROPIC_BASE_URL")
|
|
49
|
-
if not base_url:
|
|
50
|
-
raise AIError.model_error("CUSTOM_ANTHROPIC_BASE_URL environment variable not set")
|
|
51
|
-
|
|
52
|
-
api_version = os.getenv("CUSTOM_ANTHROPIC_VERSION", "2023-06-01")
|
|
53
|
-
|
|
54
|
-
if "/v1/messages" not in base_url:
|
|
55
40
|
base_url = base_url.rstrip("/")
|
|
56
|
-
|
|
57
|
-
|
|
58
|
-
|
|
59
|
-
|
|
60
|
-
|
|
41
|
+
if base_url.endswith("/messages"):
|
|
42
|
+
pass # Already a complete endpoint URL
|
|
43
|
+
elif base_url.endswith("/v1"):
|
|
44
|
+
base_url = f"{base_url}/messages"
|
|
45
|
+
else:
|
|
46
|
+
base_url = f"{base_url}/v1/messages"
|
|
61
47
|
|
|
62
|
-
|
|
63
|
-
|
|
48
|
+
# Update config with the custom base URL
|
|
49
|
+
config.base_url = base_url
|
|
64
50
|
|
|
65
|
-
|
|
66
|
-
|
|
67
|
-
system_message = msg["content"]
|
|
68
|
-
else:
|
|
69
|
-
anthropic_messages.append({"role": msg["role"], "content": msg["content"]})
|
|
51
|
+
# Store the custom version for use in headers
|
|
52
|
+
self.custom_version = os.getenv("CUSTOM_ANTHROPIC_VERSION", "2023-06-01")
|
|
70
53
|
|
|
71
|
-
|
|
54
|
+
super().__init__(config)
|
|
72
55
|
|
|
73
|
-
|
|
74
|
-
|
|
56
|
+
def _build_headers(self) -> dict[str, str]:
|
|
57
|
+
"""Build headers with custom Anthropic version."""
|
|
58
|
+
headers = super()._build_headers()
|
|
59
|
+
headers["anthropic-version"] = self.custom_version
|
|
60
|
+
return headers
|
|
75
61
|
|
|
76
|
-
|
|
77
|
-
response
|
|
78
|
-
response.raise_for_status()
|
|
79
|
-
response_data = response.json()
|
|
62
|
+
def _parse_response(self, response: dict[str, Any]) -> str:
|
|
63
|
+
"""Parse response with support for extended format (e.g., MiniMax with thinking).
|
|
80
64
|
|
|
65
|
+
Handles both:
|
|
66
|
+
- Standard Anthropic format: content[0].text
|
|
67
|
+
- Extended format: first item with type="text"
|
|
68
|
+
"""
|
|
81
69
|
try:
|
|
82
|
-
content_list =
|
|
70
|
+
content_list = response.get("content", [])
|
|
83
71
|
if not content_list:
|
|
84
72
|
raise AIError.model_error("Custom Anthropic API returned empty content array")
|
|
85
73
|
|
|
@@ -93,41 +81,23 @@ def call_custom_anthropic_api(model: str, messages: list[dict], temperature: flo
|
|
|
93
81
|
content = text_item["text"]
|
|
94
82
|
else:
|
|
95
83
|
logger.error(
|
|
96
|
-
f"Unexpected response format from Custom Anthropic API. Response: {json.dumps(
|
|
84
|
+
f"Unexpected response format from Custom Anthropic API. Response: {json.dumps(response)}"
|
|
97
85
|
)
|
|
98
86
|
raise AIError.model_error(
|
|
99
87
|
"Custom Anthropic API returned unexpected format. Expected 'text' field in content array."
|
|
100
88
|
)
|
|
89
|
+
|
|
90
|
+
if content is None:
|
|
91
|
+
raise AIError.model_error("Custom Anthropic API returned null content")
|
|
92
|
+
if content == "":
|
|
93
|
+
raise AIError.model_error("Custom Anthropic API returned empty content")
|
|
94
|
+
return content
|
|
101
95
|
except AIError:
|
|
102
96
|
raise
|
|
103
97
|
except (KeyError, IndexError, TypeError, StopIteration) as e:
|
|
104
|
-
logger.error(f"Unexpected response format from Custom Anthropic API. Response: {json.dumps(
|
|
98
|
+
logger.error(f"Unexpected response format from Custom Anthropic API. Response: {json.dumps(response)}")
|
|
105
99
|
raise AIError.model_error(
|
|
106
100
|
f"Custom Anthropic API returned unexpected format. Expected Anthropic-compatible response with "
|
|
107
101
|
f"'content[0].text' or items with type='text', but got: {type(e).__name__}. "
|
|
108
102
|
f"Check logs for full response structure."
|
|
109
103
|
) from e
|
|
110
|
-
|
|
111
|
-
if content is None:
|
|
112
|
-
raise AIError.model_error("Custom Anthropic API returned null content")
|
|
113
|
-
if content == "":
|
|
114
|
-
raise AIError.model_error("Custom Anthropic API returned empty content")
|
|
115
|
-
return content
|
|
116
|
-
except httpx.ConnectError as e:
|
|
117
|
-
raise AIError.connection_error(f"Custom Anthropic API connection failed: {str(e)}") from e
|
|
118
|
-
except httpx.HTTPStatusError as e:
|
|
119
|
-
status_code = e.response.status_code
|
|
120
|
-
error_text = e.response.text
|
|
121
|
-
|
|
122
|
-
if status_code == 401:
|
|
123
|
-
raise AIError.authentication_error(f"Custom Anthropic API authentication failed: {error_text}") from e
|
|
124
|
-
elif status_code == 429:
|
|
125
|
-
raise AIError.rate_limit_error(f"Custom Anthropic API rate limit exceeded: {error_text}") from e
|
|
126
|
-
else:
|
|
127
|
-
raise AIError.model_error(f"Custom Anthropic API error: {status_code} - {error_text}") from e
|
|
128
|
-
except httpx.TimeoutException as e:
|
|
129
|
-
raise AIError.timeout_error(f"Custom Anthropic API request timed out: {str(e)}") from e
|
|
130
|
-
except AIError:
|
|
131
|
-
raise
|
|
132
|
-
except Exception as e:
|
|
133
|
-
raise AIError.model_error(f"Error calling Custom Anthropic API: {str(e)}") from e
|
gac/providers/custom_openai.py
CHANGED
|
@@ -4,95 +4,41 @@ This provider allows users to specify a custom OpenAI-compatible endpoint
|
|
|
4
4
|
while using the same model capabilities as the standard OpenAI provider.
|
|
5
5
|
"""
|
|
6
6
|
|
|
7
|
-
import json
|
|
8
|
-
import logging
|
|
9
7
|
import os
|
|
10
|
-
|
|
11
|
-
import httpx
|
|
8
|
+
from typing import Any
|
|
12
9
|
|
|
13
10
|
from gac.errors import AIError
|
|
11
|
+
from gac.providers.base import OpenAICompatibleProvider, ProviderConfig
|
|
14
12
|
|
|
15
|
-
logger = logging.getLogger(__name__)
|
|
16
|
-
|
|
17
|
-
|
|
18
|
-
def call_custom_openai_api(model: str, messages: list[dict], temperature: float, max_tokens: int) -> str:
|
|
19
|
-
"""Call a custom OpenAI-compatible API endpoint.
|
|
20
|
-
|
|
21
|
-
This provider is useful for:
|
|
22
|
-
- OpenAI-compatible proxies or gateways
|
|
23
|
-
- Self-hosted OpenAI-compatible services
|
|
24
|
-
- Other services implementing the OpenAI Chat Completions API
|
|
25
|
-
|
|
26
|
-
Environment variables:
|
|
27
|
-
CUSTOM_OPENAI_API_KEY: API key for authentication (required)
|
|
28
|
-
CUSTOM_OPENAI_BASE_URL: Base URL for the API endpoint (required)
|
|
29
|
-
Example: https://your-proxy.example.com/v1
|
|
30
|
-
Example: https://your-custom-endpoint.com
|
|
31
|
-
|
|
32
|
-
Args:
|
|
33
|
-
model: The model to use (e.g., 'gpt-4', 'gpt-3.5-turbo')
|
|
34
|
-
messages: List of message dictionaries with 'role' and 'content' keys
|
|
35
|
-
temperature: Controls randomness (0.0-1.0)
|
|
36
|
-
max_tokens: Maximum tokens in the response
|
|
37
|
-
|
|
38
|
-
Returns:
|
|
39
|
-
The generated commit message
|
|
40
|
-
|
|
41
|
-
Raises:
|
|
42
|
-
AIError: If authentication fails, API errors occur, or response is invalid
|
|
43
|
-
"""
|
|
44
|
-
api_key = os.getenv("CUSTOM_OPENAI_API_KEY")
|
|
45
|
-
if not api_key:
|
|
46
|
-
raise AIError.authentication_error("CUSTOM_OPENAI_API_KEY environment variable not set")
|
|
47
|
-
|
|
48
|
-
base_url = os.getenv("CUSTOM_OPENAI_BASE_URL")
|
|
49
|
-
if not base_url:
|
|
50
|
-
raise AIError.model_error("CUSTOM_OPENAI_BASE_URL environment variable not set")
|
|
51
|
-
|
|
52
|
-
if "/chat/completions" not in base_url:
|
|
53
|
-
base_url = base_url.rstrip("/")
|
|
54
|
-
url = f"{base_url}/chat/completions"
|
|
55
|
-
else:
|
|
56
|
-
url = base_url
|
|
57
|
-
|
|
58
|
-
headers = {"Authorization": f"Bearer {api_key}", "Content-Type": "application/json"}
|
|
59
|
-
|
|
60
|
-
data = {"model": model, "messages": messages, "temperature": temperature, "max_completion_tokens": max_tokens}
|
|
61
13
|
|
|
62
|
-
|
|
63
|
-
|
|
64
|
-
response.raise_for_status()
|
|
65
|
-
response_data = response.json()
|
|
14
|
+
class CustomOpenAIProvider(OpenAICompatibleProvider):
|
|
15
|
+
"""Custom OpenAI-compatible provider with configurable base URL."""
|
|
66
16
|
|
|
67
|
-
|
|
68
|
-
|
|
69
|
-
|
|
70
|
-
|
|
71
|
-
|
|
72
|
-
f"Custom OpenAI API returned unexpected format. Expected OpenAI-compatible response with "
|
|
73
|
-
f"'choices[0].message.content', but got: {type(e).__name__}. Check logs for full response structure."
|
|
74
|
-
) from e
|
|
17
|
+
config = ProviderConfig(
|
|
18
|
+
name="Custom OpenAI",
|
|
19
|
+
api_key_env="CUSTOM_OPENAI_API_KEY",
|
|
20
|
+
base_url="", # Will be set in __init__
|
|
21
|
+
)
|
|
75
22
|
|
|
76
|
-
|
|
77
|
-
|
|
78
|
-
|
|
79
|
-
|
|
80
|
-
|
|
81
|
-
except httpx.ConnectError as e:
|
|
82
|
-
raise AIError.connection_error(f"Custom OpenAI API connection failed: {str(e)}") from e
|
|
83
|
-
except httpx.HTTPStatusError as e:
|
|
84
|
-
status_code = e.response.status_code
|
|
85
|
-
error_text = e.response.text
|
|
23
|
+
def __init__(self, config: ProviderConfig):
|
|
24
|
+
"""Initialize with base URL from environment."""
|
|
25
|
+
base_url = os.getenv("CUSTOM_OPENAI_BASE_URL")
|
|
26
|
+
if not base_url:
|
|
27
|
+
raise AIError.model_error("CUSTOM_OPENAI_BASE_URL environment variable not set")
|
|
86
28
|
|
|
87
|
-
if
|
|
88
|
-
|
|
89
|
-
|
|
90
|
-
raise AIError.rate_limit_error(f"Custom OpenAI API rate limit exceeded: {error_text}") from e
|
|
29
|
+
if "/chat/completions" not in base_url:
|
|
30
|
+
base_url = base_url.rstrip("/")
|
|
31
|
+
url = f"{base_url}/chat/completions"
|
|
91
32
|
else:
|
|
92
|
-
|
|
93
|
-
|
|
94
|
-
|
|
95
|
-
|
|
96
|
-
|
|
97
|
-
|
|
98
|
-
|
|
33
|
+
url = base_url
|
|
34
|
+
|
|
35
|
+
config.base_url = url
|
|
36
|
+
super().__init__(config)
|
|
37
|
+
|
|
38
|
+
def _build_request_body(
|
|
39
|
+
self, messages: list[dict[str, Any]], temperature: float, max_tokens: int, model: str, **kwargs: Any
|
|
40
|
+
) -> dict[str, Any]:
|
|
41
|
+
"""Build request body with max_completion_tokens instead of max_tokens."""
|
|
42
|
+
data = super()._build_request_body(messages, temperature, max_tokens, model, **kwargs)
|
|
43
|
+
data["max_completion_tokens"] = data.pop("max_tokens")
|
|
44
|
+
return data
|
gac/providers/deepseek.py
CHANGED
|
@@ -1,38 +1,15 @@
|
|
|
1
1
|
"""DeepSeek API provider for gac."""
|
|
2
2
|
|
|
3
|
-
import
|
|
3
|
+
from gac.providers.base import OpenAICompatibleProvider, ProviderConfig
|
|
4
4
|
|
|
5
|
-
import httpx
|
|
6
5
|
|
|
7
|
-
|
|
6
|
+
class DeepSeekProvider(OpenAICompatibleProvider):
|
|
7
|
+
config = ProviderConfig(
|
|
8
|
+
name="DeepSeek",
|
|
9
|
+
api_key_env="DEEPSEEK_API_KEY",
|
|
10
|
+
base_url="https://api.deepseek.com/v1",
|
|
11
|
+
)
|
|
8
12
|
|
|
9
|
-
|
|
10
|
-
|
|
11
|
-
|
|
12
|
-
api_key = os.getenv("DEEPSEEK_API_KEY")
|
|
13
|
-
if not api_key:
|
|
14
|
-
raise AIError.authentication_error("DEEPSEEK_API_KEY not found in environment variables")
|
|
15
|
-
|
|
16
|
-
url = "https://api.deepseek.com/v1/chat/completions"
|
|
17
|
-
headers = {"Authorization": f"Bearer {api_key}", "Content-Type": "application/json"}
|
|
18
|
-
|
|
19
|
-
data = {"model": model, "messages": messages, "temperature": temperature, "max_tokens": max_tokens}
|
|
20
|
-
|
|
21
|
-
try:
|
|
22
|
-
response = httpx.post(url, headers=headers, json=data, timeout=120)
|
|
23
|
-
response.raise_for_status()
|
|
24
|
-
response_data = response.json()
|
|
25
|
-
content = response_data["choices"][0]["message"]["content"]
|
|
26
|
-
if content is None:
|
|
27
|
-
raise AIError.model_error("DeepSeek API returned null content")
|
|
28
|
-
if content == "":
|
|
29
|
-
raise AIError.model_error("DeepSeek API returned empty content")
|
|
30
|
-
return content
|
|
31
|
-
except httpx.HTTPStatusError as e:
|
|
32
|
-
if e.response.status_code == 429:
|
|
33
|
-
raise AIError.rate_limit_error(f"DeepSeek API rate limit exceeded: {e.response.text}") from e
|
|
34
|
-
raise AIError.model_error(f"DeepSeek API error: {e.response.status_code} - {e.response.text}") from e
|
|
35
|
-
except httpx.TimeoutException as e:
|
|
36
|
-
raise AIError.timeout_error(f"DeepSeek API request timed out: {str(e)}") from e
|
|
37
|
-
except Exception as e:
|
|
38
|
-
raise AIError.model_error(f"Error calling DeepSeek API: {str(e)}") from e
|
|
13
|
+
def _get_api_url(self, model: str | None = None) -> str:
|
|
14
|
+
"""Get DeepSeek API URL with /chat/completions endpoint."""
|
|
15
|
+
return f"{self.config.base_url}/chat/completions"
|
|
@@ -0,0 +1,139 @@
|
|
|
1
|
+
"""Centralized error handling decorator for AI providers.
|
|
2
|
+
|
|
3
|
+
This module provides the single authoritative location for converting exceptions
|
|
4
|
+
to AIError types. All provider API functions should be decorated with
|
|
5
|
+
@handle_provider_errors to ensure consistent error handling.
|
|
6
|
+
|
|
7
|
+
Error Classification:
|
|
8
|
+
- httpx.ConnectError -> AIError.connection_error
|
|
9
|
+
- httpx.TimeoutException -> AIError.timeout_error
|
|
10
|
+
- httpx.HTTPStatusError:
|
|
11
|
+
- 401 -> AIError.authentication_error
|
|
12
|
+
- 429 -> AIError.rate_limit_error
|
|
13
|
+
- 404 -> AIError.model_error
|
|
14
|
+
- 5xx -> AIError.connection_error (server issues)
|
|
15
|
+
- other -> AIError.model_error
|
|
16
|
+
- Other exceptions: String-based classification as fallback
|
|
17
|
+
"""
|
|
18
|
+
|
|
19
|
+
import re
|
|
20
|
+
from collections.abc import Callable
|
|
21
|
+
from functools import wraps
|
|
22
|
+
from typing import Any
|
|
23
|
+
|
|
24
|
+
import httpx
|
|
25
|
+
|
|
26
|
+
from gac.errors import AIError
|
|
27
|
+
|
|
28
|
+
MAX_ERROR_RESPONSE_LENGTH = 200
|
|
29
|
+
|
|
30
|
+
SENSITIVE_PATTERNS = [
|
|
31
|
+
re.compile(r"sk-[A-Za-z0-9_-]{20,}"), # OpenAI keys
|
|
32
|
+
re.compile(r"sk-ant-[A-Za-z0-9_-]{20,}"), # Anthropic keys
|
|
33
|
+
re.compile(r"(?:ghp|gho|ghu|ghs|ghr)_[A-Za-z0-9]{20,}"), # GitHub tokens
|
|
34
|
+
re.compile(r"AIza[0-9A-Za-z_-]{20,}"), # Google API keys
|
|
35
|
+
re.compile(r"(?:sk|pk|rk)_(?:live|test)_[A-Za-z0-9]{20,}"), # Stripe keys
|
|
36
|
+
re.compile(r"xox[baprs]-[A-Za-z0-9-]{20,}"), # Slack tokens
|
|
37
|
+
re.compile(r"eyJ[A-Za-z0-9_-]+\.eyJ[A-Za-z0-9_-]+\.[A-Za-z0-9_-]+"), # JWT tokens
|
|
38
|
+
re.compile(r"Bearer\s+[A-Za-z0-9_-]{20,}"), # Bearer tokens
|
|
39
|
+
re.compile(r"[A-Za-z0-9]{32,}"), # Generic long alphanumeric tokens
|
|
40
|
+
]
|
|
41
|
+
|
|
42
|
+
|
|
43
|
+
def sanitize_error_response(text: str) -> str:
|
|
44
|
+
"""Sanitize API error response text for safe logging/display.
|
|
45
|
+
|
|
46
|
+
This function:
|
|
47
|
+
1. Redacts potential API keys and tokens
|
|
48
|
+
2. Truncates to MAX_ERROR_RESPONSE_LENGTH characters
|
|
49
|
+
|
|
50
|
+
Args:
|
|
51
|
+
text: Raw error response text from an API
|
|
52
|
+
|
|
53
|
+
Returns:
|
|
54
|
+
Sanitized text safe for logging/display
|
|
55
|
+
"""
|
|
56
|
+
if not text:
|
|
57
|
+
return ""
|
|
58
|
+
|
|
59
|
+
sanitized = text
|
|
60
|
+
for pattern in SENSITIVE_PATTERNS:
|
|
61
|
+
sanitized = pattern.sub("[REDACTED]", sanitized)
|
|
62
|
+
|
|
63
|
+
if len(sanitized) > MAX_ERROR_RESPONSE_LENGTH:
|
|
64
|
+
sanitized = sanitized[:MAX_ERROR_RESPONSE_LENGTH] + "..."
|
|
65
|
+
|
|
66
|
+
return sanitized
|
|
67
|
+
|
|
68
|
+
|
|
69
|
+
def handle_provider_errors(provider_name: str) -> Callable[[Callable[..., Any]], Callable[..., Any]]:
|
|
70
|
+
"""Decorator to standardize error handling across all AI providers.
|
|
71
|
+
|
|
72
|
+
This is the single authoritative location for error handling. Provider
|
|
73
|
+
implementations should not catch httpx exceptions - they will be caught
|
|
74
|
+
and converted to appropriate AIError types by this decorator.
|
|
75
|
+
|
|
76
|
+
Args:
|
|
77
|
+
provider_name: Name of the AI provider for error messages
|
|
78
|
+
|
|
79
|
+
Returns:
|
|
80
|
+
Decorator function that wraps provider functions with standardized error handling
|
|
81
|
+
"""
|
|
82
|
+
|
|
83
|
+
def decorator(func: Callable[..., Any]) -> Callable[..., Any]:
|
|
84
|
+
@wraps(func)
|
|
85
|
+
def wrapper(*args: Any, **kwargs: Any) -> Any:
|
|
86
|
+
try:
|
|
87
|
+
return func(*args, **kwargs)
|
|
88
|
+
except AIError:
|
|
89
|
+
# Re-raise AIError exceptions as-is without wrapping
|
|
90
|
+
raise
|
|
91
|
+
except httpx.ConnectError as e:
|
|
92
|
+
raise AIError.connection_error(f"{provider_name}: {e}") from e
|
|
93
|
+
except httpx.TimeoutException as e:
|
|
94
|
+
raise AIError.timeout_error(f"{provider_name}: {e}") from e
|
|
95
|
+
except httpx.HTTPStatusError as e:
|
|
96
|
+
sanitized_response = sanitize_error_response(e.response.text)
|
|
97
|
+
if e.response.status_code == 401:
|
|
98
|
+
raise AIError.authentication_error(
|
|
99
|
+
f"{provider_name}: Invalid API key or authentication failed"
|
|
100
|
+
) from e
|
|
101
|
+
elif e.response.status_code == 429:
|
|
102
|
+
raise AIError.rate_limit_error(
|
|
103
|
+
f"{provider_name}: Rate limit exceeded. Please try again later."
|
|
104
|
+
) from e
|
|
105
|
+
elif e.response.status_code == 404:
|
|
106
|
+
raise AIError.model_error(f"{provider_name}: Model not found or endpoint not available") from e
|
|
107
|
+
elif e.response.status_code >= 500:
|
|
108
|
+
raise AIError.connection_error(
|
|
109
|
+
f"{provider_name}: Server error (HTTP {e.response.status_code})"
|
|
110
|
+
) from e
|
|
111
|
+
else:
|
|
112
|
+
raise AIError.model_error(
|
|
113
|
+
f"{provider_name}: HTTP {e.response.status_code}: {sanitized_response}"
|
|
114
|
+
) from e
|
|
115
|
+
except Exception as e:
|
|
116
|
+
# Handle any other unexpected exceptions with string-based classification
|
|
117
|
+
error_str = str(e).lower()
|
|
118
|
+
if "authentication" in error_str or "unauthorized" in error_str:
|
|
119
|
+
raise AIError.authentication_error(f"Error calling {provider_name} API: {e}") from e
|
|
120
|
+
elif "rate limit" in error_str or "quota" in error_str:
|
|
121
|
+
raise AIError.rate_limit_error(f"Error calling {provider_name} API: {e}") from e
|
|
122
|
+
elif "timeout" in error_str:
|
|
123
|
+
raise AIError.timeout_error(f"Error calling {provider_name} API: {e}") from e
|
|
124
|
+
elif "connection" in error_str:
|
|
125
|
+
raise AIError.connection_error(f"Error calling {provider_name} API: {e}") from e
|
|
126
|
+
else:
|
|
127
|
+
raise AIError.model_error(f"Error calling {provider_name} API: {e}") from e
|
|
128
|
+
|
|
129
|
+
return wrapper
|
|
130
|
+
|
|
131
|
+
return decorator
|
|
132
|
+
|
|
133
|
+
|
|
134
|
+
__all__ = [
|
|
135
|
+
"MAX_ERROR_RESPONSE_LENGTH",
|
|
136
|
+
"SENSITIVE_PATTERNS",
|
|
137
|
+
"handle_provider_errors",
|
|
138
|
+
"sanitize_error_response",
|
|
139
|
+
]
|