gac 0.17.2__py3-none-any.whl → 3.6.0__py3-none-any.whl
This diff represents the content of publicly available package versions that have been released to one of the supported registries. The information contained in this diff is provided for informational purposes only and reflects changes between package versions as they appear in their respective public registries.
- gac/__version__.py +1 -1
- gac/ai.py +69 -123
- gac/ai_utils.py +227 -0
- gac/auth_cli.py +69 -0
- gac/cli.py +87 -19
- gac/config.py +13 -7
- gac/config_cli.py +26 -5
- gac/constants.py +176 -5
- gac/errors.py +14 -0
- gac/git.py +207 -11
- gac/init_cli.py +52 -29
- gac/language_cli.py +378 -0
- gac/main.py +922 -189
- gac/model_cli.py +374 -0
- gac/oauth/__init__.py +1 -0
- gac/oauth/claude_code.py +397 -0
- gac/preprocess.py +5 -5
- gac/prompt.py +656 -219
- gac/providers/__init__.py +88 -0
- gac/providers/anthropic.py +51 -0
- gac/providers/azure_openai.py +97 -0
- gac/providers/cerebras.py +38 -0
- gac/providers/chutes.py +71 -0
- gac/providers/claude_code.py +102 -0
- gac/providers/custom_anthropic.py +133 -0
- gac/providers/custom_openai.py +98 -0
- gac/providers/deepseek.py +38 -0
- gac/providers/fireworks.py +38 -0
- gac/providers/gemini.py +87 -0
- gac/providers/groq.py +63 -0
- gac/providers/kimi_coding.py +63 -0
- gac/providers/lmstudio.py +59 -0
- gac/providers/minimax.py +38 -0
- gac/providers/mistral.py +38 -0
- gac/providers/moonshot.py +38 -0
- gac/providers/ollama.py +50 -0
- gac/providers/openai.py +38 -0
- gac/providers/openrouter.py +58 -0
- gac/providers/replicate.py +98 -0
- gac/providers/streamlake.py +51 -0
- gac/providers/synthetic.py +42 -0
- gac/providers/together.py +38 -0
- gac/providers/zai.py +59 -0
- gac/security.py +293 -0
- gac/utils.py +243 -4
- gac/workflow_utils.py +222 -0
- gac-3.6.0.dist-info/METADATA +281 -0
- gac-3.6.0.dist-info/RECORD +53 -0
- {gac-0.17.2.dist-info → gac-3.6.0.dist-info}/WHEEL +1 -1
- {gac-0.17.2.dist-info → gac-3.6.0.dist-info}/licenses/LICENSE +1 -1
- gac-0.17.2.dist-info/METADATA +0 -221
- gac-0.17.2.dist-info/RECORD +0 -20
- {gac-0.17.2.dist-info → gac-3.6.0.dist-info}/entry_points.txt +0 -0
|
@@ -0,0 +1,38 @@
|
|
|
1
|
+
"""DeepSeek API provider for gac."""
|
|
2
|
+
|
|
3
|
+
import os
|
|
4
|
+
|
|
5
|
+
import httpx
|
|
6
|
+
|
|
7
|
+
from gac.errors import AIError
|
|
8
|
+
|
|
9
|
+
|
|
10
|
+
def call_deepseek_api(model: str, messages: list[dict], temperature: float, max_tokens: int) -> str:
|
|
11
|
+
"""Call DeepSeek API directly."""
|
|
12
|
+
api_key = os.getenv("DEEPSEEK_API_KEY")
|
|
13
|
+
if not api_key:
|
|
14
|
+
raise AIError.authentication_error("DEEPSEEK_API_KEY not found in environment variables")
|
|
15
|
+
|
|
16
|
+
url = "https://api.deepseek.com/v1/chat/completions"
|
|
17
|
+
headers = {"Authorization": f"Bearer {api_key}", "Content-Type": "application/json"}
|
|
18
|
+
|
|
19
|
+
data = {"model": model, "messages": messages, "temperature": temperature, "max_tokens": max_tokens}
|
|
20
|
+
|
|
21
|
+
try:
|
|
22
|
+
response = httpx.post(url, headers=headers, json=data, timeout=120)
|
|
23
|
+
response.raise_for_status()
|
|
24
|
+
response_data = response.json()
|
|
25
|
+
content = response_data["choices"][0]["message"]["content"]
|
|
26
|
+
if content is None:
|
|
27
|
+
raise AIError.model_error("DeepSeek API returned null content")
|
|
28
|
+
if content == "":
|
|
29
|
+
raise AIError.model_error("DeepSeek API returned empty content")
|
|
30
|
+
return content
|
|
31
|
+
except httpx.HTTPStatusError as e:
|
|
32
|
+
if e.response.status_code == 429:
|
|
33
|
+
raise AIError.rate_limit_error(f"DeepSeek API rate limit exceeded: {e.response.text}") from e
|
|
34
|
+
raise AIError.model_error(f"DeepSeek API error: {e.response.status_code} - {e.response.text}") from e
|
|
35
|
+
except httpx.TimeoutException as e:
|
|
36
|
+
raise AIError.timeout_error(f"DeepSeek API request timed out: {str(e)}") from e
|
|
37
|
+
except Exception as e:
|
|
38
|
+
raise AIError.model_error(f"Error calling DeepSeek API: {str(e)}") from e
|
|
@@ -0,0 +1,38 @@
|
|
|
1
|
+
"""Fireworks AI API provider for gac."""
|
|
2
|
+
|
|
3
|
+
import os
|
|
4
|
+
|
|
5
|
+
import httpx
|
|
6
|
+
|
|
7
|
+
from gac.errors import AIError
|
|
8
|
+
|
|
9
|
+
|
|
10
|
+
def call_fireworks_api(model: str, messages: list[dict], temperature: float, max_tokens: int) -> str:
|
|
11
|
+
"""Call Fireworks AI API directly."""
|
|
12
|
+
api_key = os.getenv("FIREWORKS_API_KEY")
|
|
13
|
+
if not api_key:
|
|
14
|
+
raise AIError.authentication_error("FIREWORKS_API_KEY not found in environment variables")
|
|
15
|
+
|
|
16
|
+
url = "https://api.fireworks.ai/inference/v1/chat/completions"
|
|
17
|
+
headers = {"Authorization": f"Bearer {api_key}", "Content-Type": "application/json"}
|
|
18
|
+
|
|
19
|
+
data = {"model": model, "messages": messages, "temperature": temperature, "max_tokens": max_tokens}
|
|
20
|
+
|
|
21
|
+
try:
|
|
22
|
+
response = httpx.post(url, headers=headers, json=data, timeout=120)
|
|
23
|
+
response.raise_for_status()
|
|
24
|
+
response_data = response.json()
|
|
25
|
+
content = response_data["choices"][0]["message"]["content"]
|
|
26
|
+
if content is None:
|
|
27
|
+
raise AIError.model_error("Fireworks AI API returned null content")
|
|
28
|
+
if content == "":
|
|
29
|
+
raise AIError.model_error("Fireworks AI API returned empty content")
|
|
30
|
+
return content
|
|
31
|
+
except httpx.HTTPStatusError as e:
|
|
32
|
+
if e.response.status_code == 429:
|
|
33
|
+
raise AIError.rate_limit_error(f"Fireworks AI API rate limit exceeded: {e.response.text}") from e
|
|
34
|
+
raise AIError.model_error(f"Fireworks AI API error: {e.response.status_code} - {e.response.text}") from e
|
|
35
|
+
except httpx.TimeoutException as e:
|
|
36
|
+
raise AIError.timeout_error(f"Fireworks AI API request timed out: {str(e)}") from e
|
|
37
|
+
except Exception as e:
|
|
38
|
+
raise AIError.model_error(f"Error calling Fireworks AI API: {str(e)}") from e
|
gac/providers/gemini.py
ADDED
|
@@ -0,0 +1,87 @@
|
|
|
1
|
+
"""Gemini AI provider implementation."""
|
|
2
|
+
|
|
3
|
+
import os
|
|
4
|
+
from typing import Any
|
|
5
|
+
|
|
6
|
+
import httpx
|
|
7
|
+
|
|
8
|
+
from gac.errors import AIError
|
|
9
|
+
|
|
10
|
+
|
|
11
|
+
def call_gemini_api(model: str, messages: list[dict[str, Any]], temperature: float, max_tokens: int) -> str:
|
|
12
|
+
"""Call Gemini API directly."""
|
|
13
|
+
api_key = os.getenv("GEMINI_API_KEY")
|
|
14
|
+
if not api_key:
|
|
15
|
+
raise AIError.authentication_error("GEMINI_API_KEY not found in environment variables")
|
|
16
|
+
|
|
17
|
+
url = f"https://generativelanguage.googleapis.com/v1beta/models/{model}:generateContent"
|
|
18
|
+
|
|
19
|
+
# Build Gemini request payload, converting roles to supported values.
|
|
20
|
+
contents: list[dict[str, Any]] = []
|
|
21
|
+
system_instruction_parts: list[dict[str, str]] = []
|
|
22
|
+
|
|
23
|
+
for msg in messages:
|
|
24
|
+
role = msg.get("role")
|
|
25
|
+
content_value = msg.get("content")
|
|
26
|
+
content = "" if content_value is None else str(content_value)
|
|
27
|
+
|
|
28
|
+
if role == "system":
|
|
29
|
+
if content.strip():
|
|
30
|
+
system_instruction_parts.append({"text": content})
|
|
31
|
+
continue
|
|
32
|
+
|
|
33
|
+
if role == "assistant":
|
|
34
|
+
gemini_role = "model"
|
|
35
|
+
elif role == "user":
|
|
36
|
+
gemini_role = "user"
|
|
37
|
+
else:
|
|
38
|
+
raise AIError.model_error(f"Unsupported message role for Gemini API: {role}")
|
|
39
|
+
|
|
40
|
+
contents.append({"role": gemini_role, "parts": [{"text": content}]})
|
|
41
|
+
|
|
42
|
+
payload: dict[str, Any] = {
|
|
43
|
+
"contents": contents,
|
|
44
|
+
"generationConfig": {"temperature": temperature, "maxOutputTokens": max_tokens},
|
|
45
|
+
}
|
|
46
|
+
|
|
47
|
+
if system_instruction_parts:
|
|
48
|
+
payload["systemInstruction"] = {"role": "system", "parts": system_instruction_parts}
|
|
49
|
+
|
|
50
|
+
headers = {"x-goog-api-key": api_key, "Content-Type": "application/json"}
|
|
51
|
+
|
|
52
|
+
try:
|
|
53
|
+
response = httpx.post(url, headers=headers, json=payload, timeout=120)
|
|
54
|
+
response.raise_for_status()
|
|
55
|
+
response_data = response.json()
|
|
56
|
+
|
|
57
|
+
# Check for candidates and proper response structure
|
|
58
|
+
candidates = response_data.get("candidates")
|
|
59
|
+
if not candidates:
|
|
60
|
+
raise AIError.model_error("Gemini API response missing candidates")
|
|
61
|
+
|
|
62
|
+
candidate = candidates[0]
|
|
63
|
+
if "content" not in candidate or "parts" not in candidate["content"] or not candidate["content"]["parts"]:
|
|
64
|
+
raise AIError.model_error("Gemini API response has invalid content structure")
|
|
65
|
+
|
|
66
|
+
parts = candidate["content"]["parts"]
|
|
67
|
+
content_text: str | None = None
|
|
68
|
+
for part in parts:
|
|
69
|
+
if isinstance(part, dict):
|
|
70
|
+
part_text = part.get("text")
|
|
71
|
+
if isinstance(part_text, str) and part_text:
|
|
72
|
+
content_text = part_text
|
|
73
|
+
break
|
|
74
|
+
if content_text is None:
|
|
75
|
+
raise AIError.model_error("Gemini API response missing text content")
|
|
76
|
+
|
|
77
|
+
return content_text
|
|
78
|
+
except AIError:
|
|
79
|
+
raise
|
|
80
|
+
except httpx.HTTPStatusError as e:
|
|
81
|
+
if e.response.status_code == 429:
|
|
82
|
+
raise AIError.rate_limit_error(f"Gemini API rate limit exceeded: {e.response.text}") from e
|
|
83
|
+
raise AIError.model_error(f"Gemini API error: {e.response.status_code} - {e.response.text}") from e
|
|
84
|
+
except httpx.TimeoutException as e:
|
|
85
|
+
raise AIError.timeout_error(f"Gemini API request timed out: {str(e)}") from e
|
|
86
|
+
except Exception as e:
|
|
87
|
+
raise AIError.model_error(f"Error calling Gemini API: {str(e)}") from e
|
gac/providers/groq.py
ADDED
|
@@ -0,0 +1,63 @@
|
|
|
1
|
+
"""Groq API provider for gac."""
|
|
2
|
+
|
|
3
|
+
import logging
|
|
4
|
+
import os
|
|
5
|
+
|
|
6
|
+
import httpx
|
|
7
|
+
|
|
8
|
+
from gac.errors import AIError
|
|
9
|
+
|
|
10
|
+
logger = logging.getLogger(__name__)
|
|
11
|
+
|
|
12
|
+
|
|
13
|
+
def call_groq_api(model: str, messages: list[dict], temperature: float, max_tokens: int) -> str:
|
|
14
|
+
"""Call Groq API directly."""
|
|
15
|
+
api_key = os.getenv("GROQ_API_KEY")
|
|
16
|
+
if not api_key:
|
|
17
|
+
raise AIError.authentication_error("GROQ_API_KEY not found in environment variables")
|
|
18
|
+
|
|
19
|
+
url = "https://api.groq.com/openai/v1/chat/completions"
|
|
20
|
+
headers = {"Authorization": f"Bearer {api_key}", "Content-Type": "application/json"}
|
|
21
|
+
|
|
22
|
+
data = {"model": model, "messages": messages, "temperature": temperature, "max_tokens": max_tokens}
|
|
23
|
+
|
|
24
|
+
try:
|
|
25
|
+
response = httpx.post(url, headers=headers, json=data, timeout=120)
|
|
26
|
+
response.raise_for_status()
|
|
27
|
+
response_data = response.json()
|
|
28
|
+
|
|
29
|
+
# Debug logging to understand response structure
|
|
30
|
+
logger.debug(f"Groq API response: {response_data}")
|
|
31
|
+
|
|
32
|
+
# Handle different response formats
|
|
33
|
+
if "choices" in response_data and len(response_data["choices"]) > 0:
|
|
34
|
+
choice = response_data["choices"][0]
|
|
35
|
+
if "message" in choice and "content" in choice["message"]:
|
|
36
|
+
content = choice["message"]["content"]
|
|
37
|
+
logger.debug(f"Found content in message.content: {repr(content)}")
|
|
38
|
+
if content is None:
|
|
39
|
+
raise AIError.model_error("Groq API returned null content")
|
|
40
|
+
if content == "":
|
|
41
|
+
raise AIError.model_error("Groq API returned empty content")
|
|
42
|
+
return content
|
|
43
|
+
elif "text" in choice:
|
|
44
|
+
content = choice["text"]
|
|
45
|
+
logger.debug(f"Found content in choice.text: {repr(content)}")
|
|
46
|
+
if content is None:
|
|
47
|
+
logger.warning("Groq API returned None content in choice.text")
|
|
48
|
+
return ""
|
|
49
|
+
return content
|
|
50
|
+
else:
|
|
51
|
+
logger.warning(f"Unexpected choice structure: {choice}")
|
|
52
|
+
|
|
53
|
+
# If we can't find content in the expected places, raise an error
|
|
54
|
+
logger.error(f"Unexpected response format from Groq API: {response_data}")
|
|
55
|
+
raise AIError.model_error(f"Unexpected response format from Groq API: {response_data}")
|
|
56
|
+
except httpx.HTTPStatusError as e:
|
|
57
|
+
if e.response.status_code == 429:
|
|
58
|
+
raise AIError.rate_limit_error(f"Groq API rate limit exceeded: {e.response.text}") from e
|
|
59
|
+
raise AIError.model_error(f"Groq API error: {e.response.status_code} - {e.response.text}") from e
|
|
60
|
+
except httpx.TimeoutException as e:
|
|
61
|
+
raise AIError.timeout_error(f"Groq API request timed out: {str(e)}") from e
|
|
62
|
+
except Exception as e:
|
|
63
|
+
raise AIError.model_error(f"Error calling Groq API: {str(e)}") from e
|
|
@@ -0,0 +1,63 @@
|
|
|
1
|
+
"""Kimi Coding AI provider implementation."""
|
|
2
|
+
|
|
3
|
+
import json
|
|
4
|
+
import logging
|
|
5
|
+
import os
|
|
6
|
+
|
|
7
|
+
import httpx
|
|
8
|
+
|
|
9
|
+
from gac.errors import AIError
|
|
10
|
+
|
|
11
|
+
logger = logging.getLogger(__name__)
|
|
12
|
+
|
|
13
|
+
|
|
14
|
+
def call_kimi_coding_api(model: str, messages: list[dict], temperature: float, max_tokens: int) -> str:
|
|
15
|
+
"""Call Kimi Coding API using OpenAI-compatible endpoint."""
|
|
16
|
+
api_key = os.getenv("KIMI_CODING_API_KEY")
|
|
17
|
+
if not api_key:
|
|
18
|
+
raise AIError.authentication_error("KIMI_CODING_API_KEY not found in environment variables")
|
|
19
|
+
|
|
20
|
+
base_url = "https://api.kimi.com/coding/v1"
|
|
21
|
+
url = f"{base_url}/chat/completions"
|
|
22
|
+
headers = {"Authorization": f"Bearer {api_key}", "Content-Type": "application/json"}
|
|
23
|
+
|
|
24
|
+
# Use standard OpenAI format - no message conversion needed
|
|
25
|
+
data = {"model": model, "messages": messages, "temperature": temperature, "max_completion_tokens": max_tokens}
|
|
26
|
+
|
|
27
|
+
try:
|
|
28
|
+
response = httpx.post(url, headers=headers, json=data, timeout=120)
|
|
29
|
+
response.raise_for_status()
|
|
30
|
+
response_data = response.json()
|
|
31
|
+
|
|
32
|
+
try:
|
|
33
|
+
content = response_data["choices"][0]["message"]["content"]
|
|
34
|
+
except (KeyError, IndexError, TypeError) as e:
|
|
35
|
+
logger.error(f"Unexpected response format from Kimi Coding API. Response: {json.dumps(response_data)}")
|
|
36
|
+
raise AIError.model_error(
|
|
37
|
+
f"Kimi Coding API returned unexpected format. Expected OpenAI-compatible response with "
|
|
38
|
+
f"'choices[0].message.content', but got: {type(e).__name__}. Check logs for full response structure."
|
|
39
|
+
) from e
|
|
40
|
+
|
|
41
|
+
if content is None:
|
|
42
|
+
raise AIError.model_error("Kimi Coding API returned null content")
|
|
43
|
+
if content == "":
|
|
44
|
+
raise AIError.model_error("Kimi Coding API returned empty content")
|
|
45
|
+
return content
|
|
46
|
+
except httpx.ConnectError as e:
|
|
47
|
+
raise AIError.connection_error(f"Kimi Coding API connection failed: {str(e)}") from e
|
|
48
|
+
except httpx.HTTPStatusError as e:
|
|
49
|
+
status_code = e.response.status_code
|
|
50
|
+
error_text = e.response.text
|
|
51
|
+
|
|
52
|
+
if status_code == 401:
|
|
53
|
+
raise AIError.authentication_error(f"Kimi Coding API authentication failed: {error_text}") from e
|
|
54
|
+
elif status_code == 429:
|
|
55
|
+
raise AIError.rate_limit_error(f"Kimi Coding API rate limit exceeded: {error_text}") from e
|
|
56
|
+
else:
|
|
57
|
+
raise AIError.model_error(f"Kimi Coding API error: {status_code} - {error_text}") from e
|
|
58
|
+
except httpx.TimeoutException as e:
|
|
59
|
+
raise AIError.timeout_error(f"Kimi Coding API request timed out: {str(e)}") from e
|
|
60
|
+
except AIError:
|
|
61
|
+
raise
|
|
62
|
+
except Exception as e:
|
|
63
|
+
raise AIError.model_error(f"Error calling Kimi Coding API: {str(e)}") from e
|
|
@@ -0,0 +1,59 @@
|
|
|
1
|
+
"""LM Studio AI provider implementation."""
|
|
2
|
+
|
|
3
|
+
import os
|
|
4
|
+
from typing import Any
|
|
5
|
+
|
|
6
|
+
import httpx
|
|
7
|
+
|
|
8
|
+
from gac.errors import AIError
|
|
9
|
+
|
|
10
|
+
|
|
11
|
+
def call_lmstudio_api(model: str, messages: list[dict[str, Any]], temperature: float, max_tokens: int) -> str:
|
|
12
|
+
"""Call LM Studio's OpenAI-compatible API."""
|
|
13
|
+
api_url = os.getenv("LMSTUDIO_API_URL", "http://localhost:1234")
|
|
14
|
+
api_url = api_url.rstrip("/")
|
|
15
|
+
|
|
16
|
+
url = f"{api_url}/v1/chat/completions"
|
|
17
|
+
|
|
18
|
+
headers = {"Content-Type": "application/json"}
|
|
19
|
+
api_key = os.getenv("LMSTUDIO_API_KEY")
|
|
20
|
+
if api_key:
|
|
21
|
+
headers["Authorization"] = f"Bearer {api_key}"
|
|
22
|
+
|
|
23
|
+
payload: dict[str, Any] = {
|
|
24
|
+
"model": model,
|
|
25
|
+
"messages": messages,
|
|
26
|
+
"temperature": temperature,
|
|
27
|
+
"max_tokens": max_tokens,
|
|
28
|
+
"stream": False,
|
|
29
|
+
}
|
|
30
|
+
|
|
31
|
+
try:
|
|
32
|
+
response = httpx.post(url, headers=headers, json=payload, timeout=120)
|
|
33
|
+
response.raise_for_status()
|
|
34
|
+
response_data = response.json()
|
|
35
|
+
choices = response_data.get("choices") or []
|
|
36
|
+
if not choices:
|
|
37
|
+
raise AIError.model_error("LM Studio API response missing choices")
|
|
38
|
+
|
|
39
|
+
message = choices[0].get("message") or {}
|
|
40
|
+
content = message.get("content")
|
|
41
|
+
if content:
|
|
42
|
+
return content
|
|
43
|
+
|
|
44
|
+
# Some OpenAI-compatible servers return text field directly
|
|
45
|
+
content = choices[0].get("text")
|
|
46
|
+
if content:
|
|
47
|
+
return content
|
|
48
|
+
|
|
49
|
+
raise AIError.model_error("LM Studio API response missing content")
|
|
50
|
+
except httpx.ConnectError as e:
|
|
51
|
+
raise AIError.connection_error(f"LM Studio connection failed: {str(e)}") from e
|
|
52
|
+
except httpx.HTTPStatusError as e:
|
|
53
|
+
if e.response.status_code == 429:
|
|
54
|
+
raise AIError.rate_limit_error(f"LM Studio API rate limit exceeded: {e.response.text}") from e
|
|
55
|
+
raise AIError.model_error(f"LM Studio API error: {e.response.status_code} - {e.response.text}") from e
|
|
56
|
+
except httpx.TimeoutException as e:
|
|
57
|
+
raise AIError.timeout_error(f"LM Studio API request timed out: {str(e)}") from e
|
|
58
|
+
except Exception as e:
|
|
59
|
+
raise AIError.model_error(f"Error calling LM Studio API: {str(e)}") from e
|
gac/providers/minimax.py
ADDED
|
@@ -0,0 +1,38 @@
|
|
|
1
|
+
"""MiniMax API provider for gac."""
|
|
2
|
+
|
|
3
|
+
import os
|
|
4
|
+
|
|
5
|
+
import httpx
|
|
6
|
+
|
|
7
|
+
from gac.errors import AIError
|
|
8
|
+
|
|
9
|
+
|
|
10
|
+
def call_minimax_api(model: str, messages: list[dict], temperature: float, max_tokens: int) -> str:
|
|
11
|
+
"""Call MiniMax API directly."""
|
|
12
|
+
api_key = os.getenv("MINIMAX_API_KEY")
|
|
13
|
+
if not api_key:
|
|
14
|
+
raise AIError.authentication_error("MINIMAX_API_KEY not found in environment variables")
|
|
15
|
+
|
|
16
|
+
url = "https://api.minimax.io/v1/chat/completions"
|
|
17
|
+
headers = {"Authorization": f"Bearer {api_key}", "Content-Type": "application/json"}
|
|
18
|
+
|
|
19
|
+
data = {"model": model, "messages": messages, "temperature": temperature, "max_tokens": max_tokens}
|
|
20
|
+
|
|
21
|
+
try:
|
|
22
|
+
response = httpx.post(url, headers=headers, json=data, timeout=120)
|
|
23
|
+
response.raise_for_status()
|
|
24
|
+
response_data = response.json()
|
|
25
|
+
content = response_data["choices"][0]["message"]["content"]
|
|
26
|
+
if content is None:
|
|
27
|
+
raise AIError.model_error("MiniMax API returned null content")
|
|
28
|
+
if content == "":
|
|
29
|
+
raise AIError.model_error("MiniMax API returned empty content")
|
|
30
|
+
return content
|
|
31
|
+
except httpx.HTTPStatusError as e:
|
|
32
|
+
if e.response.status_code == 429:
|
|
33
|
+
raise AIError.rate_limit_error(f"MiniMax API rate limit exceeded: {e.response.text}") from e
|
|
34
|
+
raise AIError.model_error(f"MiniMax API error: {e.response.status_code} - {e.response.text}") from e
|
|
35
|
+
except httpx.TimeoutException as e:
|
|
36
|
+
raise AIError.timeout_error(f"MiniMax API request timed out: {str(e)}") from e
|
|
37
|
+
except Exception as e:
|
|
38
|
+
raise AIError.model_error(f"Error calling MiniMax API: {str(e)}") from e
|
gac/providers/mistral.py
ADDED
|
@@ -0,0 +1,38 @@
|
|
|
1
|
+
"""Mistral API provider for gac."""
|
|
2
|
+
|
|
3
|
+
import os
|
|
4
|
+
|
|
5
|
+
import httpx
|
|
6
|
+
|
|
7
|
+
from gac.errors import AIError
|
|
8
|
+
|
|
9
|
+
|
|
10
|
+
def call_mistral_api(model: str, messages: list[dict], temperature: float, max_tokens: int) -> str:
|
|
11
|
+
"""Call Mistral API directly."""
|
|
12
|
+
api_key = os.getenv("MISTRAL_API_KEY")
|
|
13
|
+
if not api_key:
|
|
14
|
+
raise AIError.authentication_error("MISTRAL_API_KEY not found in environment variables")
|
|
15
|
+
|
|
16
|
+
url = "https://api.mistral.ai/v1/chat/completions"
|
|
17
|
+
headers = {"Authorization": f"Bearer {api_key}", "Content-Type": "application/json"}
|
|
18
|
+
|
|
19
|
+
data = {"model": model, "messages": messages, "temperature": temperature, "max_tokens": max_tokens}
|
|
20
|
+
|
|
21
|
+
try:
|
|
22
|
+
response = httpx.post(url, headers=headers, json=data, timeout=120)
|
|
23
|
+
response.raise_for_status()
|
|
24
|
+
response_data = response.json()
|
|
25
|
+
content = response_data["choices"][0]["message"]["content"]
|
|
26
|
+
if content is None:
|
|
27
|
+
raise AIError.model_error("Mistral API returned null content")
|
|
28
|
+
if content == "":
|
|
29
|
+
raise AIError.model_error("Mistral API returned empty content")
|
|
30
|
+
return content
|
|
31
|
+
except httpx.HTTPStatusError as e:
|
|
32
|
+
if e.response.status_code == 429:
|
|
33
|
+
raise AIError.rate_limit_error(f"Mistral API rate limit exceeded: {e.response.text}") from e
|
|
34
|
+
raise AIError.model_error(f"Mistral API error: {e.response.status_code} - {e.response.text}") from e
|
|
35
|
+
except httpx.TimeoutException as e:
|
|
36
|
+
raise AIError.timeout_error(f"Mistral API request timed out: {str(e)}") from e
|
|
37
|
+
except Exception as e:
|
|
38
|
+
raise AIError.model_error(f"Error calling Mistral API: {str(e)}") from e
|
|
@@ -0,0 +1,38 @@
|
|
|
1
|
+
"""Moonshot AI provider for gac."""
|
|
2
|
+
|
|
3
|
+
import os
|
|
4
|
+
|
|
5
|
+
import httpx
|
|
6
|
+
|
|
7
|
+
from gac.errors import AIError
|
|
8
|
+
|
|
9
|
+
|
|
10
|
+
def call_moonshot_api(model: str, messages: list[dict], temperature: float, max_tokens: int) -> str:
|
|
11
|
+
"""Call Moonshot AI API directly."""
|
|
12
|
+
api_key = os.getenv("MOONSHOT_API_KEY")
|
|
13
|
+
if not api_key:
|
|
14
|
+
raise AIError.authentication_error("MOONSHOT_API_KEY not found in environment variables")
|
|
15
|
+
|
|
16
|
+
url = "https://api.moonshot.ai/v1/chat/completions"
|
|
17
|
+
headers = {"Authorization": f"Bearer {api_key}", "Content-Type": "application/json"}
|
|
18
|
+
|
|
19
|
+
data = {"model": model, "messages": messages, "temperature": temperature, "max_tokens": max_tokens}
|
|
20
|
+
|
|
21
|
+
try:
|
|
22
|
+
response = httpx.post(url, headers=headers, json=data, timeout=120)
|
|
23
|
+
response.raise_for_status()
|
|
24
|
+
response_data = response.json()
|
|
25
|
+
content = response_data["choices"][0]["message"]["content"]
|
|
26
|
+
if content is None:
|
|
27
|
+
raise AIError.model_error("Moonshot AI API returned null content")
|
|
28
|
+
if content == "":
|
|
29
|
+
raise AIError.model_error("Moonshot AI API returned empty content")
|
|
30
|
+
return content
|
|
31
|
+
except httpx.HTTPStatusError as e:
|
|
32
|
+
if e.response.status_code == 429:
|
|
33
|
+
raise AIError.rate_limit_error(f"Moonshot AI API rate limit exceeded: {e.response.text}") from e
|
|
34
|
+
raise AIError.model_error(f"Moonshot AI API error: {e.response.status_code} - {e.response.text}") from e
|
|
35
|
+
except httpx.TimeoutException as e:
|
|
36
|
+
raise AIError.timeout_error(f"Moonshot AI API request timed out: {str(e)}") from e
|
|
37
|
+
except Exception as e:
|
|
38
|
+
raise AIError.model_error(f"Error calling Moonshot AI API: {str(e)}") from e
|
gac/providers/ollama.py
ADDED
|
@@ -0,0 +1,50 @@
|
|
|
1
|
+
"""Ollama AI provider implementation."""
|
|
2
|
+
|
|
3
|
+
import os
|
|
4
|
+
|
|
5
|
+
import httpx
|
|
6
|
+
|
|
7
|
+
from gac.errors import AIError
|
|
8
|
+
|
|
9
|
+
|
|
10
|
+
def call_ollama_api(model: str, messages: list[dict], temperature: float, max_tokens: int) -> str:
|
|
11
|
+
"""Call Ollama API directly."""
|
|
12
|
+
api_url = os.getenv("OLLAMA_API_URL", "http://localhost:11434")
|
|
13
|
+
api_key = os.getenv("OLLAMA_API_KEY")
|
|
14
|
+
|
|
15
|
+
url = f"{api_url.rstrip('/')}/api/chat"
|
|
16
|
+
data = {"model": model, "messages": messages, "temperature": temperature, "stream": False}
|
|
17
|
+
headers = {"Content-Type": "application/json"}
|
|
18
|
+
if api_key:
|
|
19
|
+
headers["Authorization"] = f"Bearer {api_key}"
|
|
20
|
+
|
|
21
|
+
try:
|
|
22
|
+
response = httpx.post(url, headers=headers, json=data, timeout=120)
|
|
23
|
+
response.raise_for_status()
|
|
24
|
+
response_data = response.json()
|
|
25
|
+
|
|
26
|
+
content = None
|
|
27
|
+
# Handle different response formats from Ollama
|
|
28
|
+
if "message" in response_data and "content" in response_data["message"]:
|
|
29
|
+
content = response_data["message"]["content"]
|
|
30
|
+
elif "response" in response_data:
|
|
31
|
+
content = response_data["response"]
|
|
32
|
+
else:
|
|
33
|
+
# Fallback: return the full response as string
|
|
34
|
+
content = str(response_data)
|
|
35
|
+
|
|
36
|
+
if content is None:
|
|
37
|
+
raise AIError.model_error("Ollama API returned null content")
|
|
38
|
+
if content == "":
|
|
39
|
+
raise AIError.model_error("Ollama API returned empty content")
|
|
40
|
+
return content
|
|
41
|
+
except httpx.ConnectError as e:
|
|
42
|
+
raise AIError.connection_error(f"Ollama connection failed. Make sure Ollama is running: {str(e)}") from e
|
|
43
|
+
except httpx.HTTPStatusError as e:
|
|
44
|
+
if e.response.status_code == 429:
|
|
45
|
+
raise AIError.rate_limit_error(f"Ollama API rate limit exceeded: {e.response.text}") from e
|
|
46
|
+
raise AIError.model_error(f"Ollama API error: {e.response.status_code} - {e.response.text}") from e
|
|
47
|
+
except httpx.TimeoutException as e:
|
|
48
|
+
raise AIError.timeout_error(f"Ollama API request timed out: {str(e)}") from e
|
|
49
|
+
except Exception as e:
|
|
50
|
+
raise AIError.model_error(f"Error calling Ollama API: {str(e)}") from e
|
gac/providers/openai.py
ADDED
|
@@ -0,0 +1,38 @@
|
|
|
1
|
+
"""OpenAI API provider for gac."""
|
|
2
|
+
|
|
3
|
+
import os
|
|
4
|
+
|
|
5
|
+
import httpx
|
|
6
|
+
|
|
7
|
+
from gac.errors import AIError
|
|
8
|
+
|
|
9
|
+
|
|
10
|
+
def call_openai_api(model: str, messages: list[dict], temperature: float, max_tokens: int) -> str:
|
|
11
|
+
"""Call OpenAI API directly."""
|
|
12
|
+
api_key = os.getenv("OPENAI_API_KEY")
|
|
13
|
+
if not api_key:
|
|
14
|
+
raise AIError.authentication_error("OPENAI_API_KEY not found in environment variables")
|
|
15
|
+
|
|
16
|
+
url = "https://api.openai.com/v1/chat/completions"
|
|
17
|
+
headers = {"Authorization": f"Bearer {api_key}", "Content-Type": "application/json"}
|
|
18
|
+
|
|
19
|
+
data = {"model": model, "messages": messages, "temperature": temperature, "max_completion_tokens": max_tokens}
|
|
20
|
+
|
|
21
|
+
try:
|
|
22
|
+
response = httpx.post(url, headers=headers, json=data, timeout=120)
|
|
23
|
+
response.raise_for_status()
|
|
24
|
+
response_data = response.json()
|
|
25
|
+
content = response_data["choices"][0]["message"]["content"]
|
|
26
|
+
if content is None:
|
|
27
|
+
raise AIError.model_error("OpenAI API returned null content")
|
|
28
|
+
if content == "":
|
|
29
|
+
raise AIError.model_error("OpenAI API returned empty content")
|
|
30
|
+
return content
|
|
31
|
+
except httpx.HTTPStatusError as e:
|
|
32
|
+
if e.response.status_code == 429:
|
|
33
|
+
raise AIError.rate_limit_error(f"OpenAI API rate limit exceeded: {e.response.text}") from e
|
|
34
|
+
raise AIError.model_error(f"OpenAI API error: {e.response.status_code} - {e.response.text}") from e
|
|
35
|
+
except httpx.TimeoutException as e:
|
|
36
|
+
raise AIError.timeout_error(f"OpenAI API request timed out: {str(e)}") from e
|
|
37
|
+
except Exception as e:
|
|
38
|
+
raise AIError.model_error(f"Error calling OpenAI API: {str(e)}") from e
|
|
@@ -0,0 +1,58 @@
|
|
|
1
|
+
"""OpenRouter API provider for gac."""
|
|
2
|
+
|
|
3
|
+
import os
|
|
4
|
+
|
|
5
|
+
import httpx
|
|
6
|
+
|
|
7
|
+
from gac.errors import AIError
|
|
8
|
+
|
|
9
|
+
|
|
10
|
+
def call_openrouter_api(model: str, messages: list[dict], temperature: float, max_tokens: int) -> str:
|
|
11
|
+
"""Call OpenRouter API directly."""
|
|
12
|
+
api_key = os.getenv("OPENROUTER_API_KEY")
|
|
13
|
+
if not api_key:
|
|
14
|
+
raise AIError.authentication_error("OPENROUTER_API_KEY environment variable not set")
|
|
15
|
+
|
|
16
|
+
url = "https://openrouter.ai/api/v1/chat/completions"
|
|
17
|
+
headers = {
|
|
18
|
+
"Content-Type": "application/json",
|
|
19
|
+
"Authorization": f"Bearer {api_key}",
|
|
20
|
+
}
|
|
21
|
+
|
|
22
|
+
data = {
|
|
23
|
+
"model": model,
|
|
24
|
+
"messages": messages,
|
|
25
|
+
"temperature": temperature,
|
|
26
|
+
"max_tokens": max_tokens,
|
|
27
|
+
}
|
|
28
|
+
|
|
29
|
+
try:
|
|
30
|
+
response = httpx.post(url, headers=headers, json=data, timeout=120)
|
|
31
|
+
response.raise_for_status()
|
|
32
|
+
response_data = response.json()
|
|
33
|
+
content = response_data["choices"][0]["message"]["content"]
|
|
34
|
+
if content is None:
|
|
35
|
+
raise AIError.model_error("OpenRouter API returned null content")
|
|
36
|
+
if content == "":
|
|
37
|
+
raise AIError.model_error("OpenRouter API returned empty content")
|
|
38
|
+
return content
|
|
39
|
+
except httpx.HTTPStatusError as e:
|
|
40
|
+
# Handle specific HTTP status codes
|
|
41
|
+
status_code = e.response.status_code
|
|
42
|
+
error_text = e.response.text
|
|
43
|
+
|
|
44
|
+
# Rate limiting
|
|
45
|
+
if status_code == 429:
|
|
46
|
+
raise AIError.rate_limit_error(f"OpenRouter API rate limit exceeded: {error_text}") from e
|
|
47
|
+
# Service unavailable
|
|
48
|
+
elif status_code in (502, 503):
|
|
49
|
+
raise AIError.connection_error(f"OpenRouter API service unavailable: {status_code} - {error_text}") from e
|
|
50
|
+
# Other HTTP errors
|
|
51
|
+
else:
|
|
52
|
+
raise AIError.model_error(f"OpenRouter API error: {status_code} - {error_text}") from e
|
|
53
|
+
except httpx.ConnectError as e:
|
|
54
|
+
raise AIError.connection_error(f"OpenRouter API connection error: {str(e)}") from e
|
|
55
|
+
except httpx.TimeoutException as e:
|
|
56
|
+
raise AIError.timeout_error(f"OpenRouter API request timed out: {str(e)}") from e
|
|
57
|
+
except Exception as e:
|
|
58
|
+
raise AIError.model_error(f"Error calling OpenRouter API: {str(e)}") from e
|