gac 3.10.3__py3-none-any.whl
This diff represents the content of publicly available package versions that have been released to one of the supported registries. The information contained in this diff is provided for informational purposes only and reflects changes between package versions as they appear in their respective public registries.
Potentially problematic release.
This version of gac might be problematic. Click here for more details.
- gac/__init__.py +15 -0
- gac/__version__.py +3 -0
- gac/ai.py +109 -0
- gac/ai_utils.py +246 -0
- gac/auth_cli.py +214 -0
- gac/cli.py +218 -0
- gac/commit_executor.py +62 -0
- gac/config.py +125 -0
- gac/config_cli.py +95 -0
- gac/constants.py +328 -0
- gac/diff_cli.py +159 -0
- gac/errors.py +231 -0
- gac/git.py +372 -0
- gac/git_state_validator.py +184 -0
- gac/grouped_commit_workflow.py +423 -0
- gac/init_cli.py +70 -0
- gac/interactive_mode.py +182 -0
- gac/language_cli.py +377 -0
- gac/main.py +476 -0
- gac/model_cli.py +430 -0
- gac/oauth/__init__.py +27 -0
- gac/oauth/claude_code.py +464 -0
- gac/oauth/qwen_oauth.py +327 -0
- gac/oauth/token_store.py +81 -0
- gac/preprocess.py +511 -0
- gac/prompt.py +878 -0
- gac/prompt_builder.py +88 -0
- gac/providers/README.md +437 -0
- gac/providers/__init__.py +80 -0
- gac/providers/anthropic.py +17 -0
- gac/providers/azure_openai.py +57 -0
- gac/providers/base.py +329 -0
- gac/providers/cerebras.py +15 -0
- gac/providers/chutes.py +25 -0
- gac/providers/claude_code.py +79 -0
- gac/providers/custom_anthropic.py +103 -0
- gac/providers/custom_openai.py +44 -0
- gac/providers/deepseek.py +15 -0
- gac/providers/error_handler.py +139 -0
- gac/providers/fireworks.py +15 -0
- gac/providers/gemini.py +90 -0
- gac/providers/groq.py +15 -0
- gac/providers/kimi_coding.py +27 -0
- gac/providers/lmstudio.py +80 -0
- gac/providers/minimax.py +15 -0
- gac/providers/mistral.py +15 -0
- gac/providers/moonshot.py +15 -0
- gac/providers/ollama.py +73 -0
- gac/providers/openai.py +32 -0
- gac/providers/openrouter.py +21 -0
- gac/providers/protocol.py +71 -0
- gac/providers/qwen.py +64 -0
- gac/providers/registry.py +58 -0
- gac/providers/replicate.py +156 -0
- gac/providers/streamlake.py +31 -0
- gac/providers/synthetic.py +40 -0
- gac/providers/together.py +15 -0
- gac/providers/zai.py +31 -0
- gac/py.typed +0 -0
- gac/security.py +293 -0
- gac/utils.py +401 -0
- gac/workflow_utils.py +217 -0
- gac-3.10.3.dist-info/METADATA +283 -0
- gac-3.10.3.dist-info/RECORD +67 -0
- gac-3.10.3.dist-info/WHEEL +4 -0
- gac-3.10.3.dist-info/entry_points.txt +2 -0
- gac-3.10.3.dist-info/licenses/LICENSE +16 -0
|
@@ -0,0 +1,139 @@
|
|
|
1
|
+
"""Centralized error handling decorator for AI providers.
|
|
2
|
+
|
|
3
|
+
This module provides the single authoritative location for converting exceptions
|
|
4
|
+
to AIError types. All provider API functions should be decorated with
|
|
5
|
+
@handle_provider_errors to ensure consistent error handling.
|
|
6
|
+
|
|
7
|
+
Error Classification:
|
|
8
|
+
- httpx.ConnectError -> AIError.connection_error
|
|
9
|
+
- httpx.TimeoutException -> AIError.timeout_error
|
|
10
|
+
- httpx.HTTPStatusError:
|
|
11
|
+
- 401 -> AIError.authentication_error
|
|
12
|
+
- 429 -> AIError.rate_limit_error
|
|
13
|
+
- 404 -> AIError.model_error
|
|
14
|
+
- 5xx -> AIError.connection_error (server issues)
|
|
15
|
+
- other -> AIError.model_error
|
|
16
|
+
- Other exceptions: String-based classification as fallback
|
|
17
|
+
"""
|
|
18
|
+
|
|
19
|
+
import re
|
|
20
|
+
from collections.abc import Callable
|
|
21
|
+
from functools import wraps
|
|
22
|
+
from typing import Any
|
|
23
|
+
|
|
24
|
+
import httpx
|
|
25
|
+
|
|
26
|
+
from gac.errors import AIError
|
|
27
|
+
|
|
28
|
+
MAX_ERROR_RESPONSE_LENGTH = 200
|
|
29
|
+
|
|
30
|
+
SENSITIVE_PATTERNS = [
|
|
31
|
+
re.compile(r"sk-[A-Za-z0-9_-]{20,}"), # OpenAI keys
|
|
32
|
+
re.compile(r"sk-ant-[A-Za-z0-9_-]{20,}"), # Anthropic keys
|
|
33
|
+
re.compile(r"(?:ghp|gho|ghu|ghs|ghr)_[A-Za-z0-9]{20,}"), # GitHub tokens
|
|
34
|
+
re.compile(r"AIza[0-9A-Za-z_-]{20,}"), # Google API keys
|
|
35
|
+
re.compile(r"(?:sk|pk|rk)_(?:live|test)_[A-Za-z0-9]{20,}"), # Stripe keys
|
|
36
|
+
re.compile(r"xox[baprs]-[A-Za-z0-9-]{20,}"), # Slack tokens
|
|
37
|
+
re.compile(r"eyJ[A-Za-z0-9_-]+\.eyJ[A-Za-z0-9_-]+\.[A-Za-z0-9_-]+"), # JWT tokens
|
|
38
|
+
re.compile(r"Bearer\s+[A-Za-z0-9_-]{20,}"), # Bearer tokens
|
|
39
|
+
re.compile(r"[A-Za-z0-9]{32,}"), # Generic long alphanumeric tokens
|
|
40
|
+
]
|
|
41
|
+
|
|
42
|
+
|
|
43
|
+
def sanitize_error_response(text: str) -> str:
|
|
44
|
+
"""Sanitize API error response text for safe logging/display.
|
|
45
|
+
|
|
46
|
+
This function:
|
|
47
|
+
1. Redacts potential API keys and tokens
|
|
48
|
+
2. Truncates to MAX_ERROR_RESPONSE_LENGTH characters
|
|
49
|
+
|
|
50
|
+
Args:
|
|
51
|
+
text: Raw error response text from an API
|
|
52
|
+
|
|
53
|
+
Returns:
|
|
54
|
+
Sanitized text safe for logging/display
|
|
55
|
+
"""
|
|
56
|
+
if not text:
|
|
57
|
+
return ""
|
|
58
|
+
|
|
59
|
+
sanitized = text
|
|
60
|
+
for pattern in SENSITIVE_PATTERNS:
|
|
61
|
+
sanitized = pattern.sub("[REDACTED]", sanitized)
|
|
62
|
+
|
|
63
|
+
if len(sanitized) > MAX_ERROR_RESPONSE_LENGTH:
|
|
64
|
+
sanitized = sanitized[:MAX_ERROR_RESPONSE_LENGTH] + "..."
|
|
65
|
+
|
|
66
|
+
return sanitized
|
|
67
|
+
|
|
68
|
+
|
|
69
|
+
def handle_provider_errors(provider_name: str) -> Callable[[Callable[..., Any]], Callable[..., Any]]:
|
|
70
|
+
"""Decorator to standardize error handling across all AI providers.
|
|
71
|
+
|
|
72
|
+
This is the single authoritative location for error handling. Provider
|
|
73
|
+
implementations should not catch httpx exceptions - they will be caught
|
|
74
|
+
and converted to appropriate AIError types by this decorator.
|
|
75
|
+
|
|
76
|
+
Args:
|
|
77
|
+
provider_name: Name of the AI provider for error messages
|
|
78
|
+
|
|
79
|
+
Returns:
|
|
80
|
+
Decorator function that wraps provider functions with standardized error handling
|
|
81
|
+
"""
|
|
82
|
+
|
|
83
|
+
def decorator(func: Callable[..., Any]) -> Callable[..., Any]:
|
|
84
|
+
@wraps(func)
|
|
85
|
+
def wrapper(*args: Any, **kwargs: Any) -> Any:
|
|
86
|
+
try:
|
|
87
|
+
return func(*args, **kwargs)
|
|
88
|
+
except AIError:
|
|
89
|
+
# Re-raise AIError exceptions as-is without wrapping
|
|
90
|
+
raise
|
|
91
|
+
except httpx.ConnectError as e:
|
|
92
|
+
raise AIError.connection_error(f"{provider_name}: {e}") from e
|
|
93
|
+
except httpx.TimeoutException as e:
|
|
94
|
+
raise AIError.timeout_error(f"{provider_name}: {e}") from e
|
|
95
|
+
except httpx.HTTPStatusError as e:
|
|
96
|
+
sanitized_response = sanitize_error_response(e.response.text)
|
|
97
|
+
if e.response.status_code == 401:
|
|
98
|
+
raise AIError.authentication_error(
|
|
99
|
+
f"{provider_name}: Invalid API key or authentication failed"
|
|
100
|
+
) from e
|
|
101
|
+
elif e.response.status_code == 429:
|
|
102
|
+
raise AIError.rate_limit_error(
|
|
103
|
+
f"{provider_name}: Rate limit exceeded. Please try again later."
|
|
104
|
+
) from e
|
|
105
|
+
elif e.response.status_code == 404:
|
|
106
|
+
raise AIError.model_error(f"{provider_name}: Model not found or endpoint not available") from e
|
|
107
|
+
elif e.response.status_code >= 500:
|
|
108
|
+
raise AIError.connection_error(
|
|
109
|
+
f"{provider_name}: Server error (HTTP {e.response.status_code})"
|
|
110
|
+
) from e
|
|
111
|
+
else:
|
|
112
|
+
raise AIError.model_error(
|
|
113
|
+
f"{provider_name}: HTTP {e.response.status_code}: {sanitized_response}"
|
|
114
|
+
) from e
|
|
115
|
+
except Exception as e:
|
|
116
|
+
# Handle any other unexpected exceptions with string-based classification
|
|
117
|
+
error_str = str(e).lower()
|
|
118
|
+
if "authentication" in error_str or "unauthorized" in error_str:
|
|
119
|
+
raise AIError.authentication_error(f"Error calling {provider_name} API: {e}") from e
|
|
120
|
+
elif "rate limit" in error_str or "quota" in error_str:
|
|
121
|
+
raise AIError.rate_limit_error(f"Error calling {provider_name} API: {e}") from e
|
|
122
|
+
elif "timeout" in error_str:
|
|
123
|
+
raise AIError.timeout_error(f"Error calling {provider_name} API: {e}") from e
|
|
124
|
+
elif "connection" in error_str:
|
|
125
|
+
raise AIError.connection_error(f"Error calling {provider_name} API: {e}") from e
|
|
126
|
+
else:
|
|
127
|
+
raise AIError.model_error(f"Error calling {provider_name} API: {e}") from e
|
|
128
|
+
|
|
129
|
+
return wrapper
|
|
130
|
+
|
|
131
|
+
return decorator
|
|
132
|
+
|
|
133
|
+
|
|
134
|
+
__all__ = [
|
|
135
|
+
"MAX_ERROR_RESPONSE_LENGTH",
|
|
136
|
+
"SENSITIVE_PATTERNS",
|
|
137
|
+
"handle_provider_errors",
|
|
138
|
+
"sanitize_error_response",
|
|
139
|
+
]
|
|
@@ -0,0 +1,15 @@
|
|
|
1
|
+
"""Fireworks AI API provider for gac."""
|
|
2
|
+
|
|
3
|
+
from gac.providers.base import OpenAICompatibleProvider, ProviderConfig
|
|
4
|
+
|
|
5
|
+
|
|
6
|
+
class FireworksProvider(OpenAICompatibleProvider):
|
|
7
|
+
config = ProviderConfig(
|
|
8
|
+
name="Fireworks",
|
|
9
|
+
api_key_env="FIREWORKS_API_KEY",
|
|
10
|
+
base_url="https://api.fireworks.ai/inference/v1",
|
|
11
|
+
)
|
|
12
|
+
|
|
13
|
+
def _get_api_url(self, model: str | None = None) -> str:
|
|
14
|
+
"""Get Fireworks API URL with /chat/completions endpoint."""
|
|
15
|
+
return f"{self.config.base_url}/chat/completions"
|
gac/providers/gemini.py
ADDED
|
@@ -0,0 +1,90 @@
|
|
|
1
|
+
"""Gemini AI provider implementation."""
|
|
2
|
+
|
|
3
|
+
from typing import Any
|
|
4
|
+
|
|
5
|
+
from gac.errors import AIError
|
|
6
|
+
from gac.providers.base import GenericHTTPProvider, ProviderConfig
|
|
7
|
+
|
|
8
|
+
|
|
9
|
+
class GeminiProvider(GenericHTTPProvider):
|
|
10
|
+
"""Google Gemini provider with custom format and role conversion."""
|
|
11
|
+
|
|
12
|
+
config = ProviderConfig(
|
|
13
|
+
name="Gemini",
|
|
14
|
+
api_key_env="GEMINI_API_KEY",
|
|
15
|
+
base_url="https://generativelanguage.googleapis.com/v1beta",
|
|
16
|
+
)
|
|
17
|
+
|
|
18
|
+
def _get_api_url(self, model: str | None = None) -> str:
|
|
19
|
+
"""Build Gemini URL with model in path."""
|
|
20
|
+
if model is None:
|
|
21
|
+
return super()._get_api_url(model)
|
|
22
|
+
return f"{self.config.base_url}/models/{model}:generateContent"
|
|
23
|
+
|
|
24
|
+
def _build_headers(self) -> dict[str, str]:
|
|
25
|
+
"""Build headers with Google API key."""
|
|
26
|
+
headers = super()._build_headers()
|
|
27
|
+
# Remove any Authorization header
|
|
28
|
+
if "Authorization" in headers:
|
|
29
|
+
del headers["Authorization"]
|
|
30
|
+
headers["x-goog-api-key"] = self.api_key
|
|
31
|
+
return headers
|
|
32
|
+
|
|
33
|
+
def _build_request_body(
|
|
34
|
+
self, messages: list[dict[str, Any]], temperature: float, max_tokens: int, model: str, **kwargs: Any
|
|
35
|
+
) -> dict[str, Any]:
|
|
36
|
+
"""Build Gemini-format request with role conversion and system instruction extraction."""
|
|
37
|
+
contents: list[dict[str, Any]] = []
|
|
38
|
+
system_instruction_parts: list[dict[str, str]] = []
|
|
39
|
+
|
|
40
|
+
for msg in messages:
|
|
41
|
+
role = msg.get("role")
|
|
42
|
+
content_value = msg.get("content")
|
|
43
|
+
content = "" if content_value is None else str(content_value)
|
|
44
|
+
|
|
45
|
+
if role == "system":
|
|
46
|
+
if content.strip():
|
|
47
|
+
system_instruction_parts.append({"text": content})
|
|
48
|
+
continue
|
|
49
|
+
|
|
50
|
+
if role == "assistant":
|
|
51
|
+
gemini_role = "model"
|
|
52
|
+
elif role == "user":
|
|
53
|
+
gemini_role = "user"
|
|
54
|
+
else:
|
|
55
|
+
raise AIError.model_error(f"Unsupported message role for Gemini API: {role}")
|
|
56
|
+
|
|
57
|
+
contents.append({"role": gemini_role, "parts": [{"text": content}]})
|
|
58
|
+
|
|
59
|
+
body: dict[str, Any] = {
|
|
60
|
+
"contents": contents,
|
|
61
|
+
"generationConfig": {"temperature": temperature, "maxOutputTokens": max_tokens},
|
|
62
|
+
}
|
|
63
|
+
|
|
64
|
+
if system_instruction_parts:
|
|
65
|
+
body["systemInstruction"] = {"role": "system", "parts": system_instruction_parts}
|
|
66
|
+
|
|
67
|
+
return body
|
|
68
|
+
|
|
69
|
+
def _parse_response(self, response: dict[str, Any]) -> str:
|
|
70
|
+
"""Parse Gemini response format: candidates[0].content.parts[0].text."""
|
|
71
|
+
candidates = response.get("candidates")
|
|
72
|
+
if not candidates:
|
|
73
|
+
raise AIError.model_error("Gemini API response missing candidates")
|
|
74
|
+
|
|
75
|
+
candidate = candidates[0]
|
|
76
|
+
if "content" not in candidate or "parts" not in candidate["content"] or not candidate["content"]["parts"]:
|
|
77
|
+
raise AIError.model_error("Gemini API response has invalid content structure")
|
|
78
|
+
|
|
79
|
+
parts = candidate["content"]["parts"]
|
|
80
|
+
content_text: str | None = None
|
|
81
|
+
for part in parts:
|
|
82
|
+
if isinstance(part, dict):
|
|
83
|
+
part_text = part.get("text")
|
|
84
|
+
if isinstance(part_text, str) and part_text:
|
|
85
|
+
content_text = part_text
|
|
86
|
+
break
|
|
87
|
+
if content_text is None:
|
|
88
|
+
raise AIError.model_error("Gemini API response missing text content")
|
|
89
|
+
|
|
90
|
+
return content_text
|
gac/providers/groq.py
ADDED
|
@@ -0,0 +1,15 @@
|
|
|
1
|
+
"""Groq API provider for gac."""
|
|
2
|
+
|
|
3
|
+
from gac.providers.base import OpenAICompatibleProvider, ProviderConfig
|
|
4
|
+
|
|
5
|
+
|
|
6
|
+
class GroqProvider(OpenAICompatibleProvider):
|
|
7
|
+
config = ProviderConfig(
|
|
8
|
+
name="Groq",
|
|
9
|
+
api_key_env="GROQ_API_KEY",
|
|
10
|
+
base_url="https://api.groq.com/openai/v1",
|
|
11
|
+
)
|
|
12
|
+
|
|
13
|
+
def _get_api_url(self, model: str | None = None) -> str:
|
|
14
|
+
"""Get Groq API URL with /chat/completions endpoint."""
|
|
15
|
+
return f"{self.config.base_url}/chat/completions"
|
|
@@ -0,0 +1,27 @@
|
|
|
1
|
+
"""Kimi Coding AI provider implementation."""
|
|
2
|
+
|
|
3
|
+
from typing import Any
|
|
4
|
+
|
|
5
|
+
from gac.providers.base import OpenAICompatibleProvider, ProviderConfig
|
|
6
|
+
|
|
7
|
+
|
|
8
|
+
class KimiCodingProvider(OpenAICompatibleProvider):
|
|
9
|
+
"""Kimi Coding API provider using OpenAI-compatible format."""
|
|
10
|
+
|
|
11
|
+
config = ProviderConfig(
|
|
12
|
+
name="Kimi Coding",
|
|
13
|
+
api_key_env="KIMI_CODING_API_KEY",
|
|
14
|
+
base_url="https://api.kimi.com/coding/v1",
|
|
15
|
+
)
|
|
16
|
+
|
|
17
|
+
def _get_api_url(self, model: str | None = None) -> str:
|
|
18
|
+
"""Get Kimi Coding API URL with /chat/completions endpoint."""
|
|
19
|
+
return f"{self.config.base_url}/chat/completions"
|
|
20
|
+
|
|
21
|
+
def _build_request_body(
|
|
22
|
+
self, messages: list[dict[str, Any]], temperature: float, max_tokens: int, model: str, **kwargs: Any
|
|
23
|
+
) -> dict[str, Any]:
|
|
24
|
+
"""Build request body with max_completion_tokens instead of max_tokens."""
|
|
25
|
+
data = super()._build_request_body(messages, temperature, max_tokens, model, **kwargs)
|
|
26
|
+
data["max_completion_tokens"] = data.pop("max_tokens")
|
|
27
|
+
return data
|
|
@@ -0,0 +1,80 @@
|
|
|
1
|
+
"""LM Studio API provider for gac."""
|
|
2
|
+
|
|
3
|
+
import os
|
|
4
|
+
from typing import Any
|
|
5
|
+
|
|
6
|
+
from gac.providers.base import OpenAICompatibleProvider, ProviderConfig
|
|
7
|
+
|
|
8
|
+
|
|
9
|
+
class LMStudioProvider(OpenAICompatibleProvider):
|
|
10
|
+
"""LM Studio provider for local OpenAI-compatible models."""
|
|
11
|
+
|
|
12
|
+
config = ProviderConfig(
|
|
13
|
+
name="LM Studio",
|
|
14
|
+
api_key_env="LMSTUDIO_API_KEY",
|
|
15
|
+
base_url="http://localhost:1234/v1",
|
|
16
|
+
)
|
|
17
|
+
|
|
18
|
+
def __init__(self, config: ProviderConfig):
|
|
19
|
+
"""Initialize with configurable URL from environment."""
|
|
20
|
+
super().__init__(config)
|
|
21
|
+
# Allow URL override via environment variable
|
|
22
|
+
api_url = os.getenv("LMSTUDIO_API_URL", "http://localhost:1234")
|
|
23
|
+
api_url = api_url.rstrip("/")
|
|
24
|
+
self.config.base_url = f"{api_url}/v1"
|
|
25
|
+
|
|
26
|
+
def _get_api_key(self) -> str:
|
|
27
|
+
"""Get optional API key for LM Studio."""
|
|
28
|
+
api_key = os.getenv(self.config.api_key_env)
|
|
29
|
+
if not api_key:
|
|
30
|
+
return "" # Optional API key
|
|
31
|
+
return api_key
|
|
32
|
+
|
|
33
|
+
def _build_headers(self) -> dict[str, str]:
|
|
34
|
+
"""Build headers with optional API key."""
|
|
35
|
+
headers = super()._build_headers()
|
|
36
|
+
# Remove Bearer token from parent if it was added
|
|
37
|
+
if "Authorization" in headers:
|
|
38
|
+
del headers["Authorization"]
|
|
39
|
+
# Add optional Authorization
|
|
40
|
+
api_key = os.getenv("LMSTUDIO_API_KEY")
|
|
41
|
+
if api_key:
|
|
42
|
+
headers["Authorization"] = f"Bearer {api_key}"
|
|
43
|
+
return headers
|
|
44
|
+
|
|
45
|
+
def _get_api_url(self, model: str | None = None) -> str:
|
|
46
|
+
"""Get LM Studio API URL with /chat/completions endpoint."""
|
|
47
|
+
return f"{self.config.base_url}/chat/completions"
|
|
48
|
+
|
|
49
|
+
def _build_request_body(
|
|
50
|
+
self, messages: list[dict[str, Any]], temperature: float, max_tokens: int, model: str, **kwargs: Any
|
|
51
|
+
) -> dict[str, Any]:
|
|
52
|
+
"""Build OpenAI-compatible request body with stream disabled."""
|
|
53
|
+
body = super()._build_request_body(messages, temperature, max_tokens, model, **kwargs)
|
|
54
|
+
body["stream"] = False
|
|
55
|
+
return body
|
|
56
|
+
|
|
57
|
+
def _parse_response(self, response: dict[str, Any]) -> str:
|
|
58
|
+
"""Parse OpenAI-compatible response with text field fallback."""
|
|
59
|
+
from gac.errors import AIError
|
|
60
|
+
|
|
61
|
+
choices = response.get("choices")
|
|
62
|
+
if not choices or not isinstance(choices, list):
|
|
63
|
+
raise AIError.model_error("Invalid response: missing choices")
|
|
64
|
+
|
|
65
|
+
# First try message.content (standard OpenAI format)
|
|
66
|
+
choice = choices[0]
|
|
67
|
+
content = choice.get("message", {}).get("content")
|
|
68
|
+
if content is not None:
|
|
69
|
+
if content == "":
|
|
70
|
+
raise AIError.model_error("Invalid response: empty content")
|
|
71
|
+
return content
|
|
72
|
+
|
|
73
|
+
# Fallback to text field (some OpenAI-compatible servers use this)
|
|
74
|
+
content = choice.get("text")
|
|
75
|
+
if content is not None:
|
|
76
|
+
if content == "":
|
|
77
|
+
raise AIError.model_error("Invalid response: empty content")
|
|
78
|
+
return content
|
|
79
|
+
|
|
80
|
+
raise AIError.model_error("Invalid response: missing content")
|
gac/providers/minimax.py
ADDED
|
@@ -0,0 +1,15 @@
|
|
|
1
|
+
"""MiniMax API provider for gac."""
|
|
2
|
+
|
|
3
|
+
from gac.providers.base import OpenAICompatibleProvider, ProviderConfig
|
|
4
|
+
|
|
5
|
+
|
|
6
|
+
class MinimaxProvider(OpenAICompatibleProvider):
|
|
7
|
+
config = ProviderConfig(
|
|
8
|
+
name="MiniMax",
|
|
9
|
+
api_key_env="MINIMAX_API_KEY",
|
|
10
|
+
base_url="https://api.minimaxi.com/v1",
|
|
11
|
+
)
|
|
12
|
+
|
|
13
|
+
def _get_api_url(self, model: str | None = None) -> str:
|
|
14
|
+
"""Get MiniMax API URL with /chat/completions endpoint."""
|
|
15
|
+
return f"{self.config.base_url}/chat/completions"
|
gac/providers/mistral.py
ADDED
|
@@ -0,0 +1,15 @@
|
|
|
1
|
+
"""Mistral API provider for gac."""
|
|
2
|
+
|
|
3
|
+
from gac.providers.base import OpenAICompatibleProvider, ProviderConfig
|
|
4
|
+
|
|
5
|
+
|
|
6
|
+
class MistralProvider(OpenAICompatibleProvider):
|
|
7
|
+
config = ProviderConfig(
|
|
8
|
+
name="Mistral",
|
|
9
|
+
api_key_env="MISTRAL_API_KEY",
|
|
10
|
+
base_url="https://api.mistral.ai/v1",
|
|
11
|
+
)
|
|
12
|
+
|
|
13
|
+
def _get_api_url(self, model: str | None = None) -> str:
|
|
14
|
+
"""Get Mistral API URL with /chat/completions endpoint."""
|
|
15
|
+
return f"{self.config.base_url}/chat/completions"
|
|
@@ -0,0 +1,15 @@
|
|
|
1
|
+
"""Moonshot AI provider for gac."""
|
|
2
|
+
|
|
3
|
+
from gac.providers.base import OpenAICompatibleProvider, ProviderConfig
|
|
4
|
+
|
|
5
|
+
|
|
6
|
+
class MoonshotProvider(OpenAICompatibleProvider):
|
|
7
|
+
config = ProviderConfig(
|
|
8
|
+
name="Moonshot",
|
|
9
|
+
api_key_env="MOONSHOT_API_KEY",
|
|
10
|
+
base_url="https://api.moonshot.cn/v1",
|
|
11
|
+
)
|
|
12
|
+
|
|
13
|
+
def _get_api_url(self, model: str | None = None) -> str:
|
|
14
|
+
"""Get Moonshot API URL with /chat/completions endpoint."""
|
|
15
|
+
return f"{self.config.base_url}/chat/completions"
|
gac/providers/ollama.py
ADDED
|
@@ -0,0 +1,73 @@
|
|
|
1
|
+
"""Ollama API provider for gac."""
|
|
2
|
+
|
|
3
|
+
import os
|
|
4
|
+
from typing import Any
|
|
5
|
+
|
|
6
|
+
from gac.providers.base import OpenAICompatibleProvider, ProviderConfig
|
|
7
|
+
|
|
8
|
+
|
|
9
|
+
class OllamaProvider(OpenAICompatibleProvider):
|
|
10
|
+
"""Ollama provider for local LLM models with optional authentication."""
|
|
11
|
+
|
|
12
|
+
config = ProviderConfig(
|
|
13
|
+
name="Ollama",
|
|
14
|
+
api_key_env="OLLAMA_API_KEY",
|
|
15
|
+
base_url="http://localhost:11434",
|
|
16
|
+
)
|
|
17
|
+
|
|
18
|
+
def __init__(self, config: ProviderConfig):
|
|
19
|
+
"""Initialize with configurable URL from environment."""
|
|
20
|
+
super().__init__(config)
|
|
21
|
+
# Allow URL override via environment variable
|
|
22
|
+
api_url = os.getenv("OLLAMA_API_URL", "http://localhost:11434")
|
|
23
|
+
self.config.base_url = api_url.rstrip("/")
|
|
24
|
+
|
|
25
|
+
def _build_headers(self) -> dict[str, str]:
|
|
26
|
+
"""Build headers with optional API key."""
|
|
27
|
+
headers = super()._build_headers()
|
|
28
|
+
api_key = os.getenv("OLLAMA_API_KEY")
|
|
29
|
+
if api_key:
|
|
30
|
+
headers["Authorization"] = f"Bearer {api_key}"
|
|
31
|
+
return headers
|
|
32
|
+
|
|
33
|
+
def _build_request_body(
|
|
34
|
+
self, messages: list[dict[str, Any]], temperature: float, max_tokens: int, model: str, **kwargs: Any
|
|
35
|
+
) -> dict[str, Any]:
|
|
36
|
+
"""Build Ollama request body with stream disabled."""
|
|
37
|
+
return {
|
|
38
|
+
"messages": messages,
|
|
39
|
+
"temperature": temperature,
|
|
40
|
+
"stream": False,
|
|
41
|
+
**kwargs,
|
|
42
|
+
}
|
|
43
|
+
|
|
44
|
+
def _get_api_url(self, model: str | None = None) -> str:
|
|
45
|
+
"""Get API URL with /api/chat endpoint."""
|
|
46
|
+
return f"{self.config.base_url}/api/chat"
|
|
47
|
+
|
|
48
|
+
def _get_api_key(self) -> str:
|
|
49
|
+
"""Get optional API key for Ollama."""
|
|
50
|
+
api_key = os.getenv(self.config.api_key_env)
|
|
51
|
+
if not api_key:
|
|
52
|
+
return "" # Optional API key
|
|
53
|
+
return api_key
|
|
54
|
+
|
|
55
|
+
def _parse_response(self, response: dict[str, Any]) -> str:
|
|
56
|
+
"""Parse Ollama response with flexible format support."""
|
|
57
|
+
from gac.errors import AIError
|
|
58
|
+
|
|
59
|
+
# Handle different response formats from Ollama
|
|
60
|
+
if "message" in response and "content" in response["message"]:
|
|
61
|
+
content = response["message"]["content"]
|
|
62
|
+
elif "response" in response:
|
|
63
|
+
content = response["response"]
|
|
64
|
+
else:
|
|
65
|
+
# Fallback: try to serialize response
|
|
66
|
+
content = str(response) if response else ""
|
|
67
|
+
|
|
68
|
+
if content is None:
|
|
69
|
+
raise AIError.model_error("Ollama API returned null content")
|
|
70
|
+
if content == "":
|
|
71
|
+
raise AIError.model_error("Ollama API returned empty content")
|
|
72
|
+
|
|
73
|
+
return content
|
gac/providers/openai.py
ADDED
|
@@ -0,0 +1,32 @@
|
|
|
1
|
+
"""OpenAI API provider for gac."""
|
|
2
|
+
|
|
3
|
+
from typing import Any
|
|
4
|
+
|
|
5
|
+
from gac.providers.base import OpenAICompatibleProvider, ProviderConfig
|
|
6
|
+
|
|
7
|
+
|
|
8
|
+
class OpenAIProvider(OpenAICompatibleProvider):
|
|
9
|
+
"""OpenAI API provider with model-specific adjustments."""
|
|
10
|
+
|
|
11
|
+
config = ProviderConfig(name="OpenAI", api_key_env="OPENAI_API_KEY", base_url="https://api.openai.com/v1")
|
|
12
|
+
|
|
13
|
+
def _get_api_url(self, model: str | None = None) -> str:
|
|
14
|
+
"""Get OpenAI API URL with /chat/completions endpoint."""
|
|
15
|
+
return f"{self.config.base_url}/chat/completions"
|
|
16
|
+
|
|
17
|
+
def _build_request_body(
|
|
18
|
+
self, messages: list[dict[str, Any]], temperature: float, max_tokens: int, model: str, **kwargs: Any
|
|
19
|
+
) -> dict[str, Any]:
|
|
20
|
+
"""Build OpenAI-specific request body."""
|
|
21
|
+
data = super()._build_request_body(messages, temperature, max_tokens, model, **kwargs)
|
|
22
|
+
|
|
23
|
+
# OpenAI uses max_completion_tokens instead of max_tokens
|
|
24
|
+
data["max_completion_tokens"] = data.pop("max_tokens")
|
|
25
|
+
|
|
26
|
+
# Handle optional parameters
|
|
27
|
+
if "response_format" in kwargs:
|
|
28
|
+
data["response_format"] = kwargs["response_format"]
|
|
29
|
+
if "stop" in kwargs:
|
|
30
|
+
data["stop"] = kwargs["stop"]
|
|
31
|
+
|
|
32
|
+
return data
|
|
@@ -0,0 +1,21 @@
|
|
|
1
|
+
"""OpenRouter API provider for gac."""
|
|
2
|
+
|
|
3
|
+
from gac.providers.base import OpenAICompatibleProvider, ProviderConfig
|
|
4
|
+
|
|
5
|
+
|
|
6
|
+
class OpenRouterProvider(OpenAICompatibleProvider):
|
|
7
|
+
config = ProviderConfig(
|
|
8
|
+
name="OpenRouter",
|
|
9
|
+
api_key_env="OPENROUTER_API_KEY",
|
|
10
|
+
base_url="https://openrouter.ai/api/v1",
|
|
11
|
+
)
|
|
12
|
+
|
|
13
|
+
def _get_api_url(self, model: str | None = None) -> str:
|
|
14
|
+
"""Get OpenRouter API URL with /chat/completions endpoint."""
|
|
15
|
+
return f"{self.config.base_url}/chat/completions"
|
|
16
|
+
|
|
17
|
+
def _build_headers(self) -> dict[str, str]:
|
|
18
|
+
"""Build headers with OpenRouter-style authorization and HTTP-Referer."""
|
|
19
|
+
headers = super()._build_headers()
|
|
20
|
+
headers["HTTP-Referer"] = "https://github.com/codeindolence/gac"
|
|
21
|
+
return headers
|
|
@@ -0,0 +1,71 @@
|
|
|
1
|
+
"""Provider protocol for type-safe AI provider implementations."""
|
|
2
|
+
|
|
3
|
+
from typing import Any, Protocol, runtime_checkable
|
|
4
|
+
|
|
5
|
+
|
|
6
|
+
@runtime_checkable
|
|
7
|
+
class ProviderProtocol(Protocol):
|
|
8
|
+
"""Protocol defining the contract for AI providers.
|
|
9
|
+
|
|
10
|
+
All providers must implement this protocol to ensure consistent
|
|
11
|
+
interface and type safety across the codebase.
|
|
12
|
+
|
|
13
|
+
This protocol supports both class-based providers (with methods)
|
|
14
|
+
and function-based providers (used in the registry).
|
|
15
|
+
"""
|
|
16
|
+
|
|
17
|
+
def generate(
|
|
18
|
+
self, model: str, messages: list[dict[str, Any]], temperature: float, max_tokens: int, **kwargs: Any
|
|
19
|
+
) -> str:
|
|
20
|
+
"""Generate text using the AI model.
|
|
21
|
+
|
|
22
|
+
Args:
|
|
23
|
+
model: The model name to use
|
|
24
|
+
messages: List of message dictionaries in chat format
|
|
25
|
+
temperature: Temperature parameter (0.0-2.0)
|
|
26
|
+
max_tokens: Maximum tokens in response
|
|
27
|
+
**kwargs: Additional provider-specific parameters
|
|
28
|
+
|
|
29
|
+
Returns:
|
|
30
|
+
Generated text content
|
|
31
|
+
|
|
32
|
+
Raises:
|
|
33
|
+
AIError: For any generation-related errors
|
|
34
|
+
"""
|
|
35
|
+
...
|
|
36
|
+
|
|
37
|
+
@property
|
|
38
|
+
def name(self) -> str:
|
|
39
|
+
"""Get the provider name.
|
|
40
|
+
|
|
41
|
+
Returns:
|
|
42
|
+
Provider name identifier
|
|
43
|
+
"""
|
|
44
|
+
...
|
|
45
|
+
|
|
46
|
+
@property
|
|
47
|
+
def api_key_env(self) -> str:
|
|
48
|
+
"""Get the environment variable name for the API key.
|
|
49
|
+
|
|
50
|
+
Returns:
|
|
51
|
+
Environment variable name
|
|
52
|
+
"""
|
|
53
|
+
...
|
|
54
|
+
|
|
55
|
+
@property
|
|
56
|
+
def base_url(self) -> str:
|
|
57
|
+
"""Get the base URL for the API.
|
|
58
|
+
|
|
59
|
+
Returns:
|
|
60
|
+
Base API URL
|
|
61
|
+
"""
|
|
62
|
+
...
|
|
63
|
+
|
|
64
|
+
@property
|
|
65
|
+
def timeout(self) -> int:
|
|
66
|
+
"""Get the timeout in seconds.
|
|
67
|
+
|
|
68
|
+
Returns:
|
|
69
|
+
Timeout in seconds
|
|
70
|
+
"""
|
|
71
|
+
...
|