gac 3.6.0__py3-none-any.whl → 3.8.1__py3-none-any.whl
This diff represents the content of publicly available package versions that have been released to one of the supported registries. The information contained in this diff is provided for informational purposes only and reflects changes between package versions as they appear in their respective public registries.
- gac/__version__.py +1 -1
- gac/ai_utils.py +47 -0
- gac/auth_cli.py +181 -36
- gac/cli.py +13 -0
- gac/config.py +54 -0
- gac/constants.py +7 -0
- gac/main.py +53 -11
- gac/model_cli.py +65 -10
- gac/oauth/__init__.py +26 -0
- gac/oauth/claude_code.py +87 -20
- gac/oauth/qwen_oauth.py +323 -0
- gac/oauth/token_store.py +81 -0
- gac/prompt.py +16 -4
- gac/providers/__init__.py +3 -0
- gac/providers/anthropic.py +11 -1
- gac/providers/azure_openai.py +5 -1
- gac/providers/cerebras.py +11 -1
- gac/providers/chutes.py +11 -1
- gac/providers/claude_code.py +11 -1
- gac/providers/custom_anthropic.py +5 -1
- gac/providers/custom_openai.py +5 -1
- gac/providers/deepseek.py +11 -1
- gac/providers/fireworks.py +11 -1
- gac/providers/gemini.py +11 -1
- gac/providers/groq.py +5 -1
- gac/providers/kimi_coding.py +5 -1
- gac/providers/lmstudio.py +12 -1
- gac/providers/minimax.py +11 -1
- gac/providers/mistral.py +11 -1
- gac/providers/moonshot.py +11 -1
- gac/providers/ollama.py +11 -1
- gac/providers/openai.py +11 -1
- gac/providers/openrouter.py +11 -1
- gac/providers/qwen.py +76 -0
- gac/providers/replicate.py +14 -2
- gac/providers/streamlake.py +11 -1
- gac/providers/synthetic.py +11 -1
- gac/providers/together.py +11 -1
- gac/providers/zai.py +11 -1
- gac/utils.py +30 -1
- gac/workflow_utils.py +3 -8
- {gac-3.6.0.dist-info → gac-3.8.1.dist-info}/METADATA +6 -4
- gac-3.8.1.dist-info/RECORD +56 -0
- gac-3.6.0.dist-info/RECORD +0 -53
- {gac-3.6.0.dist-info → gac-3.8.1.dist-info}/WHEEL +0 -0
- {gac-3.6.0.dist-info → gac-3.8.1.dist-info}/entry_points.txt +0 -0
- {gac-3.6.0.dist-info → gac-3.8.1.dist-info}/licenses/LICENSE +0 -0
gac/oauth/token_store.py
ADDED
|
@@ -0,0 +1,81 @@
|
|
|
1
|
+
"""Token storage for OAuth authentication."""
|
|
2
|
+
|
|
3
|
+
import json
|
|
4
|
+
import os
|
|
5
|
+
import stat
|
|
6
|
+
from dataclasses import dataclass
|
|
7
|
+
from pathlib import Path
|
|
8
|
+
from typing import TypedDict
|
|
9
|
+
|
|
10
|
+
|
|
11
|
+
class OAuthToken(TypedDict, total=False):
|
|
12
|
+
"""OAuth token structure."""
|
|
13
|
+
|
|
14
|
+
access_token: str
|
|
15
|
+
refresh_token: str | None
|
|
16
|
+
expiry: int
|
|
17
|
+
token_type: str
|
|
18
|
+
scope: str | None
|
|
19
|
+
resource_url: str | None
|
|
20
|
+
|
|
21
|
+
|
|
22
|
+
@dataclass
|
|
23
|
+
class TokenStore:
|
|
24
|
+
"""Secure file-based token storage for OAuth tokens."""
|
|
25
|
+
|
|
26
|
+
base_dir: Path
|
|
27
|
+
|
|
28
|
+
def __init__(self, base_dir: Path | None = None):
|
|
29
|
+
if base_dir is None:
|
|
30
|
+
base_dir = Path.home() / ".gac" / "oauth"
|
|
31
|
+
self.base_dir = base_dir
|
|
32
|
+
self._ensure_directory()
|
|
33
|
+
|
|
34
|
+
def _ensure_directory(self) -> None:
|
|
35
|
+
"""Create the OAuth directory with secure permissions."""
|
|
36
|
+
if not self.base_dir.exists():
|
|
37
|
+
self.base_dir.mkdir(parents=True, mode=0o700)
|
|
38
|
+
else:
|
|
39
|
+
os.chmod(self.base_dir, stat.S_IRWXU)
|
|
40
|
+
|
|
41
|
+
def _get_token_path(self, provider: str) -> Path:
|
|
42
|
+
"""Get the path for a provider's token file."""
|
|
43
|
+
return self.base_dir / f"{provider}.json"
|
|
44
|
+
|
|
45
|
+
def save_token(self, provider: str, token: OAuthToken) -> None:
|
|
46
|
+
"""Save a token to file with secure permissions.
|
|
47
|
+
|
|
48
|
+
Uses atomic write (temp file + rename) to prevent partial reads.
|
|
49
|
+
"""
|
|
50
|
+
token_path = self._get_token_path(provider)
|
|
51
|
+
temp_path = token_path.with_suffix(".tmp")
|
|
52
|
+
|
|
53
|
+
with open(temp_path, "w") as f:
|
|
54
|
+
json.dump(token, f, indent=2)
|
|
55
|
+
|
|
56
|
+
os.chmod(temp_path, stat.S_IRUSR | stat.S_IWUSR)
|
|
57
|
+
temp_path.rename(token_path)
|
|
58
|
+
|
|
59
|
+
def get_token(self, provider: str) -> OAuthToken | None:
|
|
60
|
+
"""Retrieve a token from file."""
|
|
61
|
+
token_path = self._get_token_path(provider)
|
|
62
|
+
if not token_path.exists():
|
|
63
|
+
return None
|
|
64
|
+
|
|
65
|
+
with open(token_path) as f:
|
|
66
|
+
token_data = json.load(f)
|
|
67
|
+
if isinstance(token_data, dict) and isinstance(token_data.get("access_token"), str):
|
|
68
|
+
return token_data # type: ignore[return-value]
|
|
69
|
+
return None
|
|
70
|
+
|
|
71
|
+
def remove_token(self, provider: str) -> None:
|
|
72
|
+
"""Remove a token file."""
|
|
73
|
+
token_path = self._get_token_path(provider)
|
|
74
|
+
if token_path.exists():
|
|
75
|
+
token_path.unlink()
|
|
76
|
+
|
|
77
|
+
def list_providers(self) -> list[str]:
|
|
78
|
+
"""List all providers with stored tokens."""
|
|
79
|
+
if not self.base_dir.exists():
|
|
80
|
+
return []
|
|
81
|
+
return [f.stem for f in self.base_dir.glob("*.json")]
|
gac/prompt.py
CHANGED
|
@@ -276,16 +276,24 @@ You are an expert code reviewer specializing in identifying missing context and
|
|
|
276
276
|
</role>
|
|
277
277
|
|
|
278
278
|
<focus>
|
|
279
|
-
Analyze the git diff and
|
|
279
|
+
Analyze the git diff and determine the appropriate number of questions based on change complexity. Generate 1-5 focused questions to clarify intent, motivation, and impact. Your questions should help the developer provide the essential context needed for a meaningful commit message.
|
|
280
280
|
</focus>
|
|
281
281
|
|
|
282
|
+
<adaptive_guidelines>
|
|
283
|
+
- For very small changes (single file, <10 lines): Ask 1-2 essential questions about core purpose
|
|
284
|
+
- For small changes (few files, <50 lines): Ask 1-3 questions covering intent and impact
|
|
285
|
+
- For medium changes (multiple files, <200 lines): Ask 2-4 questions covering scope, intent, and impact
|
|
286
|
+
- For large changes (many files or substantial modifications): Ask 3-5 questions covering all aspects
|
|
287
|
+
- Always prioritize questions that would most help generate an informative commit message
|
|
288
|
+
- Lean toward fewer questions for straightforward changes
|
|
289
|
+
</adaptive_guidelines>
|
|
290
|
+
|
|
282
291
|
<guidelines>
|
|
283
292
|
- Focus on WHY the changes were made, not just WHAT was changed
|
|
284
293
|
- Ask about the intent, motivation, or business purpose behind the changes
|
|
285
294
|
- Consider what future developers need to understand about this change
|
|
286
295
|
- Ask about the broader impact or consequences of the changes
|
|
287
296
|
- Target areas where technical implementation doesn't reveal the underlying purpose
|
|
288
|
-
- Prioritize questions that would most help generate an informative commit message
|
|
289
297
|
- Keep questions concise and specific
|
|
290
298
|
- Format as a clean list for easy parsing
|
|
291
299
|
</guidelines>
|
|
@@ -306,7 +314,11 @@ Respond with ONLY a numbered list of questions, one per line:
|
|
|
306
314
|
</output_format>
|
|
307
315
|
|
|
308
316
|
<examples>
|
|
309
|
-
Good example questions:
|
|
317
|
+
Good example questions for small changes:
|
|
318
|
+
1. What problem does this fix?
|
|
319
|
+
2. Why was this approach chosen?
|
|
320
|
+
|
|
321
|
+
Good example questions for larger changes:
|
|
310
322
|
1. What problem or user need does this change address?
|
|
311
323
|
2. Why was this particular approach chosen over alternatives?
|
|
312
324
|
3. What impact will this have on existing functionality?
|
|
@@ -704,7 +716,7 @@ Additional context provided by the user: {hint}
|
|
|
704
716
|
user_prompt += """
|
|
705
717
|
|
|
706
718
|
<format_instructions>
|
|
707
|
-
Analyze the changes above and
|
|
719
|
+
Analyze the changes above and determine the appropriate number of questions based on the change complexity. Generate 1-5 focused questions that clarify the intent, motivation, and impact of these changes. For very small changes, ask only 1-2 essential questions. Respond with ONLY a numbered list of questions as specified in the system prompt.
|
|
708
720
|
</format_instructions>"""
|
|
709
721
|
|
|
710
722
|
return system_prompt.strip(), user_prompt.strip()
|
gac/providers/__init__.py
CHANGED
|
@@ -19,6 +19,7 @@ from .moonshot import call_moonshot_api
|
|
|
19
19
|
from .ollama import call_ollama_api
|
|
20
20
|
from .openai import call_openai_api
|
|
21
21
|
from .openrouter import call_openrouter_api
|
|
22
|
+
from .qwen import call_qwen_api
|
|
22
23
|
from .replicate import call_replicate_api
|
|
23
24
|
from .streamlake import call_streamlake_api
|
|
24
25
|
from .synthetic import call_synthetic_api
|
|
@@ -46,6 +47,7 @@ PROVIDER_REGISTRY = {
|
|
|
46
47
|
"ollama": call_ollama_api,
|
|
47
48
|
"openai": call_openai_api,
|
|
48
49
|
"openrouter": call_openrouter_api,
|
|
50
|
+
"qwen": call_qwen_api,
|
|
49
51
|
"replicate": call_replicate_api,
|
|
50
52
|
"streamlake": call_streamlake_api,
|
|
51
53
|
"synthetic": call_synthetic_api,
|
|
@@ -77,6 +79,7 @@ __all__ = [
|
|
|
77
79
|
"call_ollama_api",
|
|
78
80
|
"call_openai_api",
|
|
79
81
|
"call_openrouter_api",
|
|
82
|
+
"call_qwen_api",
|
|
80
83
|
"call_replicate_api",
|
|
81
84
|
"call_streamlake_api",
|
|
82
85
|
"call_synthetic_api",
|
gac/providers/anthropic.py
CHANGED
|
@@ -1,10 +1,15 @@
|
|
|
1
1
|
"""Anthropic AI provider implementation."""
|
|
2
2
|
|
|
3
|
+
import logging
|
|
3
4
|
import os
|
|
4
5
|
|
|
5
6
|
import httpx
|
|
6
7
|
|
|
8
|
+
from gac.constants import ProviderDefaults
|
|
7
9
|
from gac.errors import AIError
|
|
10
|
+
from gac.utils import get_ssl_verify
|
|
11
|
+
|
|
12
|
+
logger = logging.getLogger(__name__)
|
|
8
13
|
|
|
9
14
|
|
|
10
15
|
def call_anthropic_api(model: str, messages: list[dict], temperature: float, max_tokens: int) -> str:
|
|
@@ -31,8 +36,12 @@ def call_anthropic_api(model: str, messages: list[dict], temperature: float, max
|
|
|
31
36
|
if system_message:
|
|
32
37
|
data["system"] = system_message
|
|
33
38
|
|
|
39
|
+
logger.debug(f"Calling Anthropic API with model={model}")
|
|
40
|
+
|
|
34
41
|
try:
|
|
35
|
-
response = httpx.post(
|
|
42
|
+
response = httpx.post(
|
|
43
|
+
url, headers=headers, json=data, timeout=ProviderDefaults.HTTP_TIMEOUT, verify=get_ssl_verify()
|
|
44
|
+
)
|
|
36
45
|
response.raise_for_status()
|
|
37
46
|
response_data = response.json()
|
|
38
47
|
content = response_data["content"][0]["text"]
|
|
@@ -40,6 +49,7 @@ def call_anthropic_api(model: str, messages: list[dict], temperature: float, max
|
|
|
40
49
|
raise AIError.model_error("Anthropic API returned null content")
|
|
41
50
|
if content == "":
|
|
42
51
|
raise AIError.model_error("Anthropic API returned empty content")
|
|
52
|
+
logger.debug("Anthropic API response received successfully")
|
|
43
53
|
return content
|
|
44
54
|
except httpx.HTTPStatusError as e:
|
|
45
55
|
if e.response.status_code == 429:
|
gac/providers/azure_openai.py
CHANGED
|
@@ -10,7 +10,9 @@ import os
|
|
|
10
10
|
|
|
11
11
|
import httpx
|
|
12
12
|
|
|
13
|
+
from gac.constants import ProviderDefaults
|
|
13
14
|
from gac.errors import AIError
|
|
15
|
+
from gac.utils import get_ssl_verify
|
|
14
16
|
|
|
15
17
|
logger = logging.getLogger(__name__)
|
|
16
18
|
|
|
@@ -59,7 +61,9 @@ def call_azure_openai_api(model: str, messages: list[dict], temperature: float,
|
|
|
59
61
|
data = {"messages": messages, "temperature": temperature, "max_tokens": max_tokens}
|
|
60
62
|
|
|
61
63
|
try:
|
|
62
|
-
response = httpx.post(
|
|
64
|
+
response = httpx.post(
|
|
65
|
+
url, headers=headers, json=data, timeout=ProviderDefaults.HTTP_TIMEOUT, verify=get_ssl_verify()
|
|
66
|
+
)
|
|
63
67
|
response.raise_for_status()
|
|
64
68
|
response_data = response.json()
|
|
65
69
|
|
gac/providers/cerebras.py
CHANGED
|
@@ -1,10 +1,15 @@
|
|
|
1
1
|
"""Cerebras AI provider implementation."""
|
|
2
2
|
|
|
3
|
+
import logging
|
|
3
4
|
import os
|
|
4
5
|
|
|
5
6
|
import httpx
|
|
6
7
|
|
|
8
|
+
from gac.constants import ProviderDefaults
|
|
7
9
|
from gac.errors import AIError
|
|
10
|
+
from gac.utils import get_ssl_verify
|
|
11
|
+
|
|
12
|
+
logger = logging.getLogger(__name__)
|
|
8
13
|
|
|
9
14
|
|
|
10
15
|
def call_cerebras_api(model: str, messages: list[dict], temperature: float, max_tokens: int) -> str:
|
|
@@ -18,8 +23,12 @@ def call_cerebras_api(model: str, messages: list[dict], temperature: float, max_
|
|
|
18
23
|
|
|
19
24
|
data = {"model": model, "messages": messages, "temperature": temperature, "max_tokens": max_tokens}
|
|
20
25
|
|
|
26
|
+
logger.debug(f"Calling Cerebras API with model={model}")
|
|
27
|
+
|
|
21
28
|
try:
|
|
22
|
-
response = httpx.post(
|
|
29
|
+
response = httpx.post(
|
|
30
|
+
url, headers=headers, json=data, timeout=ProviderDefaults.HTTP_TIMEOUT, verify=get_ssl_verify()
|
|
31
|
+
)
|
|
23
32
|
response.raise_for_status()
|
|
24
33
|
response_data = response.json()
|
|
25
34
|
content = response_data["choices"][0]["message"]["content"]
|
|
@@ -27,6 +36,7 @@ def call_cerebras_api(model: str, messages: list[dict], temperature: float, max_
|
|
|
27
36
|
raise AIError.model_error("Cerebras API returned null content")
|
|
28
37
|
if content == "":
|
|
29
38
|
raise AIError.model_error("Cerebras API returned empty content")
|
|
39
|
+
logger.debug("Cerebras API response received successfully")
|
|
30
40
|
return content
|
|
31
41
|
except httpx.HTTPStatusError as e:
|
|
32
42
|
if e.response.status_code == 429:
|
gac/providers/chutes.py
CHANGED
|
@@ -1,10 +1,15 @@
|
|
|
1
1
|
"""Chutes.ai API provider for gac."""
|
|
2
2
|
|
|
3
|
+
import logging
|
|
3
4
|
import os
|
|
4
5
|
|
|
5
6
|
import httpx
|
|
6
7
|
|
|
8
|
+
from gac.constants import ProviderDefaults
|
|
7
9
|
from gac.errors import AIError
|
|
10
|
+
from gac.utils import get_ssl_verify
|
|
11
|
+
|
|
12
|
+
logger = logging.getLogger(__name__)
|
|
8
13
|
|
|
9
14
|
|
|
10
15
|
def call_chutes_api(model: str, messages: list[dict], temperature: float, max_tokens: int) -> str:
|
|
@@ -43,8 +48,12 @@ def call_chutes_api(model: str, messages: list[dict], temperature: float, max_to
|
|
|
43
48
|
"max_tokens": max_tokens,
|
|
44
49
|
}
|
|
45
50
|
|
|
51
|
+
logger.debug(f"Calling Chutes.ai API with model={model}")
|
|
52
|
+
|
|
46
53
|
try:
|
|
47
|
-
response = httpx.post(
|
|
54
|
+
response = httpx.post(
|
|
55
|
+
url, headers=headers, json=data, timeout=ProviderDefaults.HTTP_TIMEOUT, verify=get_ssl_verify()
|
|
56
|
+
)
|
|
48
57
|
response.raise_for_status()
|
|
49
58
|
response_data = response.json()
|
|
50
59
|
content = response_data["choices"][0]["message"]["content"]
|
|
@@ -52,6 +61,7 @@ def call_chutes_api(model: str, messages: list[dict], temperature: float, max_to
|
|
|
52
61
|
raise AIError.model_error("Chutes.ai API returned null content")
|
|
53
62
|
if content == "":
|
|
54
63
|
raise AIError.model_error("Chutes.ai API returned empty content")
|
|
64
|
+
logger.debug("Chutes.ai API response received successfully")
|
|
55
65
|
return content
|
|
56
66
|
except httpx.HTTPStatusError as e:
|
|
57
67
|
status_code = e.response.status_code
|
gac/providers/claude_code.py
CHANGED
|
@@ -4,11 +4,16 @@ This provider allows users with Claude Code subscriptions to use their OAuth tok
|
|
|
4
4
|
instead of paying for the expensive Anthropic API.
|
|
5
5
|
"""
|
|
6
6
|
|
|
7
|
+
import logging
|
|
7
8
|
import os
|
|
8
9
|
|
|
9
10
|
import httpx
|
|
10
11
|
|
|
12
|
+
from gac.constants import ProviderDefaults
|
|
11
13
|
from gac.errors import AIError
|
|
14
|
+
from gac.utils import get_ssl_verify
|
|
15
|
+
|
|
16
|
+
logger = logging.getLogger(__name__)
|
|
12
17
|
|
|
13
18
|
|
|
14
19
|
def call_claude_code_api(model: str, messages: list[dict], temperature: float, max_tokens: int) -> str:
|
|
@@ -77,8 +82,12 @@ def call_claude_code_api(model: str, messages: list[dict], temperature: float, m
|
|
|
77
82
|
"system": system_message,
|
|
78
83
|
}
|
|
79
84
|
|
|
85
|
+
logger.debug(f"Calling Claude Code API with model={model}")
|
|
86
|
+
|
|
80
87
|
try:
|
|
81
|
-
response = httpx.post(
|
|
88
|
+
response = httpx.post(
|
|
89
|
+
url, headers=headers, json=data, timeout=ProviderDefaults.HTTP_TIMEOUT, verify=get_ssl_verify()
|
|
90
|
+
)
|
|
82
91
|
response.raise_for_status()
|
|
83
92
|
response_data = response.json()
|
|
84
93
|
content = response_data["content"][0]["text"]
|
|
@@ -86,6 +95,7 @@ def call_claude_code_api(model: str, messages: list[dict], temperature: float, m
|
|
|
86
95
|
raise AIError.model_error("Claude Code API returned null content")
|
|
87
96
|
if content == "":
|
|
88
97
|
raise AIError.model_error("Claude Code API returned empty content")
|
|
98
|
+
logger.debug("Claude Code API response received successfully")
|
|
89
99
|
return content
|
|
90
100
|
except httpx.HTTPStatusError as e:
|
|
91
101
|
if e.response.status_code == 401:
|
|
@@ -10,7 +10,9 @@ import os
|
|
|
10
10
|
|
|
11
11
|
import httpx
|
|
12
12
|
|
|
13
|
+
from gac.constants import ProviderDefaults
|
|
13
14
|
from gac.errors import AIError
|
|
15
|
+
from gac.utils import get_ssl_verify
|
|
14
16
|
|
|
15
17
|
logger = logging.getLogger(__name__)
|
|
16
18
|
|
|
@@ -74,7 +76,9 @@ def call_custom_anthropic_api(model: str, messages: list[dict], temperature: flo
|
|
|
74
76
|
data["system"] = system_message
|
|
75
77
|
|
|
76
78
|
try:
|
|
77
|
-
response = httpx.post(
|
|
79
|
+
response = httpx.post(
|
|
80
|
+
url, headers=headers, json=data, timeout=ProviderDefaults.HTTP_TIMEOUT, verify=get_ssl_verify()
|
|
81
|
+
)
|
|
78
82
|
response.raise_for_status()
|
|
79
83
|
response_data = response.json()
|
|
80
84
|
|
gac/providers/custom_openai.py
CHANGED
|
@@ -10,7 +10,9 @@ import os
|
|
|
10
10
|
|
|
11
11
|
import httpx
|
|
12
12
|
|
|
13
|
+
from gac.constants import ProviderDefaults
|
|
13
14
|
from gac.errors import AIError
|
|
15
|
+
from gac.utils import get_ssl_verify
|
|
14
16
|
|
|
15
17
|
logger = logging.getLogger(__name__)
|
|
16
18
|
|
|
@@ -60,7 +62,9 @@ def call_custom_openai_api(model: str, messages: list[dict], temperature: float,
|
|
|
60
62
|
data = {"model": model, "messages": messages, "temperature": temperature, "max_completion_tokens": max_tokens}
|
|
61
63
|
|
|
62
64
|
try:
|
|
63
|
-
response = httpx.post(
|
|
65
|
+
response = httpx.post(
|
|
66
|
+
url, headers=headers, json=data, timeout=ProviderDefaults.HTTP_TIMEOUT, verify=get_ssl_verify()
|
|
67
|
+
)
|
|
64
68
|
response.raise_for_status()
|
|
65
69
|
response_data = response.json()
|
|
66
70
|
|
gac/providers/deepseek.py
CHANGED
|
@@ -1,10 +1,15 @@
|
|
|
1
1
|
"""DeepSeek API provider for gac."""
|
|
2
2
|
|
|
3
|
+
import logging
|
|
3
4
|
import os
|
|
4
5
|
|
|
5
6
|
import httpx
|
|
6
7
|
|
|
8
|
+
from gac.constants import ProviderDefaults
|
|
7
9
|
from gac.errors import AIError
|
|
10
|
+
from gac.utils import get_ssl_verify
|
|
11
|
+
|
|
12
|
+
logger = logging.getLogger(__name__)
|
|
8
13
|
|
|
9
14
|
|
|
10
15
|
def call_deepseek_api(model: str, messages: list[dict], temperature: float, max_tokens: int) -> str:
|
|
@@ -18,8 +23,12 @@ def call_deepseek_api(model: str, messages: list[dict], temperature: float, max_
|
|
|
18
23
|
|
|
19
24
|
data = {"model": model, "messages": messages, "temperature": temperature, "max_tokens": max_tokens}
|
|
20
25
|
|
|
26
|
+
logger.debug(f"Calling DeepSeek API with model={model}")
|
|
27
|
+
|
|
21
28
|
try:
|
|
22
|
-
response = httpx.post(
|
|
29
|
+
response = httpx.post(
|
|
30
|
+
url, headers=headers, json=data, timeout=ProviderDefaults.HTTP_TIMEOUT, verify=get_ssl_verify()
|
|
31
|
+
)
|
|
23
32
|
response.raise_for_status()
|
|
24
33
|
response_data = response.json()
|
|
25
34
|
content = response_data["choices"][0]["message"]["content"]
|
|
@@ -27,6 +36,7 @@ def call_deepseek_api(model: str, messages: list[dict], temperature: float, max_
|
|
|
27
36
|
raise AIError.model_error("DeepSeek API returned null content")
|
|
28
37
|
if content == "":
|
|
29
38
|
raise AIError.model_error("DeepSeek API returned empty content")
|
|
39
|
+
logger.debug("DeepSeek API response received successfully")
|
|
30
40
|
return content
|
|
31
41
|
except httpx.HTTPStatusError as e:
|
|
32
42
|
if e.response.status_code == 429:
|
gac/providers/fireworks.py
CHANGED
|
@@ -1,10 +1,15 @@
|
|
|
1
1
|
"""Fireworks AI API provider for gac."""
|
|
2
2
|
|
|
3
|
+
import logging
|
|
3
4
|
import os
|
|
4
5
|
|
|
5
6
|
import httpx
|
|
6
7
|
|
|
8
|
+
from gac.constants import ProviderDefaults
|
|
7
9
|
from gac.errors import AIError
|
|
10
|
+
from gac.utils import get_ssl_verify
|
|
11
|
+
|
|
12
|
+
logger = logging.getLogger(__name__)
|
|
8
13
|
|
|
9
14
|
|
|
10
15
|
def call_fireworks_api(model: str, messages: list[dict], temperature: float, max_tokens: int) -> str:
|
|
@@ -18,8 +23,12 @@ def call_fireworks_api(model: str, messages: list[dict], temperature: float, max
|
|
|
18
23
|
|
|
19
24
|
data = {"model": model, "messages": messages, "temperature": temperature, "max_tokens": max_tokens}
|
|
20
25
|
|
|
26
|
+
logger.debug(f"Calling Fireworks AI API with model={model}")
|
|
27
|
+
|
|
21
28
|
try:
|
|
22
|
-
response = httpx.post(
|
|
29
|
+
response = httpx.post(
|
|
30
|
+
url, headers=headers, json=data, timeout=ProviderDefaults.HTTP_TIMEOUT, verify=get_ssl_verify()
|
|
31
|
+
)
|
|
23
32
|
response.raise_for_status()
|
|
24
33
|
response_data = response.json()
|
|
25
34
|
content = response_data["choices"][0]["message"]["content"]
|
|
@@ -27,6 +36,7 @@ def call_fireworks_api(model: str, messages: list[dict], temperature: float, max
|
|
|
27
36
|
raise AIError.model_error("Fireworks AI API returned null content")
|
|
28
37
|
if content == "":
|
|
29
38
|
raise AIError.model_error("Fireworks AI API returned empty content")
|
|
39
|
+
logger.debug("Fireworks AI API response received successfully")
|
|
30
40
|
return content
|
|
31
41
|
except httpx.HTTPStatusError as e:
|
|
32
42
|
if e.response.status_code == 429:
|
gac/providers/gemini.py
CHANGED
|
@@ -1,11 +1,16 @@
|
|
|
1
1
|
"""Gemini AI provider implementation."""
|
|
2
2
|
|
|
3
|
+
import logging
|
|
3
4
|
import os
|
|
4
5
|
from typing import Any
|
|
5
6
|
|
|
6
7
|
import httpx
|
|
7
8
|
|
|
9
|
+
from gac.constants import ProviderDefaults
|
|
8
10
|
from gac.errors import AIError
|
|
11
|
+
from gac.utils import get_ssl_verify
|
|
12
|
+
|
|
13
|
+
logger = logging.getLogger(__name__)
|
|
9
14
|
|
|
10
15
|
|
|
11
16
|
def call_gemini_api(model: str, messages: list[dict[str, Any]], temperature: float, max_tokens: int) -> str:
|
|
@@ -49,8 +54,12 @@ def call_gemini_api(model: str, messages: list[dict[str, Any]], temperature: flo
|
|
|
49
54
|
|
|
50
55
|
headers = {"x-goog-api-key": api_key, "Content-Type": "application/json"}
|
|
51
56
|
|
|
57
|
+
logger.debug(f"Calling Gemini API with model={model}")
|
|
58
|
+
|
|
52
59
|
try:
|
|
53
|
-
response = httpx.post(
|
|
60
|
+
response = httpx.post(
|
|
61
|
+
url, headers=headers, json=payload, timeout=ProviderDefaults.HTTP_TIMEOUT, verify=get_ssl_verify()
|
|
62
|
+
)
|
|
54
63
|
response.raise_for_status()
|
|
55
64
|
response_data = response.json()
|
|
56
65
|
|
|
@@ -74,6 +83,7 @@ def call_gemini_api(model: str, messages: list[dict[str, Any]], temperature: flo
|
|
|
74
83
|
if content_text is None:
|
|
75
84
|
raise AIError.model_error("Gemini API response missing text content")
|
|
76
85
|
|
|
86
|
+
logger.debug("Gemini API response received successfully")
|
|
77
87
|
return content_text
|
|
78
88
|
except AIError:
|
|
79
89
|
raise
|
gac/providers/groq.py
CHANGED
|
@@ -5,7 +5,9 @@ import os
|
|
|
5
5
|
|
|
6
6
|
import httpx
|
|
7
7
|
|
|
8
|
+
from gac.constants import ProviderDefaults
|
|
8
9
|
from gac.errors import AIError
|
|
10
|
+
from gac.utils import get_ssl_verify
|
|
9
11
|
|
|
10
12
|
logger = logging.getLogger(__name__)
|
|
11
13
|
|
|
@@ -22,7 +24,9 @@ def call_groq_api(model: str, messages: list[dict], temperature: float, max_toke
|
|
|
22
24
|
data = {"model": model, "messages": messages, "temperature": temperature, "max_tokens": max_tokens}
|
|
23
25
|
|
|
24
26
|
try:
|
|
25
|
-
response = httpx.post(
|
|
27
|
+
response = httpx.post(
|
|
28
|
+
url, headers=headers, json=data, timeout=ProviderDefaults.HTTP_TIMEOUT, verify=get_ssl_verify()
|
|
29
|
+
)
|
|
26
30
|
response.raise_for_status()
|
|
27
31
|
response_data = response.json()
|
|
28
32
|
|
gac/providers/kimi_coding.py
CHANGED
|
@@ -6,7 +6,9 @@ import os
|
|
|
6
6
|
|
|
7
7
|
import httpx
|
|
8
8
|
|
|
9
|
+
from gac.constants import ProviderDefaults
|
|
9
10
|
from gac.errors import AIError
|
|
11
|
+
from gac.utils import get_ssl_verify
|
|
10
12
|
|
|
11
13
|
logger = logging.getLogger(__name__)
|
|
12
14
|
|
|
@@ -25,7 +27,9 @@ def call_kimi_coding_api(model: str, messages: list[dict], temperature: float, m
|
|
|
25
27
|
data = {"model": model, "messages": messages, "temperature": temperature, "max_completion_tokens": max_tokens}
|
|
26
28
|
|
|
27
29
|
try:
|
|
28
|
-
response = httpx.post(
|
|
30
|
+
response = httpx.post(
|
|
31
|
+
url, headers=headers, json=data, timeout=ProviderDefaults.HTTP_TIMEOUT, verify=get_ssl_verify()
|
|
32
|
+
)
|
|
29
33
|
response.raise_for_status()
|
|
30
34
|
response_data = response.json()
|
|
31
35
|
|
gac/providers/lmstudio.py
CHANGED
|
@@ -1,11 +1,16 @@
|
|
|
1
1
|
"""LM Studio AI provider implementation."""
|
|
2
2
|
|
|
3
|
+
import logging
|
|
3
4
|
import os
|
|
4
5
|
from typing import Any
|
|
5
6
|
|
|
6
7
|
import httpx
|
|
7
8
|
|
|
9
|
+
from gac.constants import ProviderDefaults
|
|
8
10
|
from gac.errors import AIError
|
|
11
|
+
from gac.utils import get_ssl_verify
|
|
12
|
+
|
|
13
|
+
logger = logging.getLogger(__name__)
|
|
9
14
|
|
|
10
15
|
|
|
11
16
|
def call_lmstudio_api(model: str, messages: list[dict[str, Any]], temperature: float, max_tokens: int) -> str:
|
|
@@ -28,8 +33,12 @@ def call_lmstudio_api(model: str, messages: list[dict[str, Any]], temperature: f
|
|
|
28
33
|
"stream": False,
|
|
29
34
|
}
|
|
30
35
|
|
|
36
|
+
logger.debug(f"Calling LM Studio API with model={model}")
|
|
37
|
+
|
|
31
38
|
try:
|
|
32
|
-
response = httpx.post(
|
|
39
|
+
response = httpx.post(
|
|
40
|
+
url, headers=headers, json=payload, timeout=ProviderDefaults.HTTP_TIMEOUT, verify=get_ssl_verify()
|
|
41
|
+
)
|
|
33
42
|
response.raise_for_status()
|
|
34
43
|
response_data = response.json()
|
|
35
44
|
choices = response_data.get("choices") or []
|
|
@@ -39,11 +48,13 @@ def call_lmstudio_api(model: str, messages: list[dict[str, Any]], temperature: f
|
|
|
39
48
|
message = choices[0].get("message") or {}
|
|
40
49
|
content = message.get("content")
|
|
41
50
|
if content:
|
|
51
|
+
logger.debug("LM Studio API response received successfully")
|
|
42
52
|
return content
|
|
43
53
|
|
|
44
54
|
# Some OpenAI-compatible servers return text field directly
|
|
45
55
|
content = choices[0].get("text")
|
|
46
56
|
if content:
|
|
57
|
+
logger.debug("LM Studio API response received successfully")
|
|
47
58
|
return content
|
|
48
59
|
|
|
49
60
|
raise AIError.model_error("LM Studio API response missing content")
|
gac/providers/minimax.py
CHANGED
|
@@ -1,10 +1,15 @@
|
|
|
1
1
|
"""MiniMax API provider for gac."""
|
|
2
2
|
|
|
3
|
+
import logging
|
|
3
4
|
import os
|
|
4
5
|
|
|
5
6
|
import httpx
|
|
6
7
|
|
|
8
|
+
from gac.constants import ProviderDefaults
|
|
7
9
|
from gac.errors import AIError
|
|
10
|
+
from gac.utils import get_ssl_verify
|
|
11
|
+
|
|
12
|
+
logger = logging.getLogger(__name__)
|
|
8
13
|
|
|
9
14
|
|
|
10
15
|
def call_minimax_api(model: str, messages: list[dict], temperature: float, max_tokens: int) -> str:
|
|
@@ -18,8 +23,12 @@ def call_minimax_api(model: str, messages: list[dict], temperature: float, max_t
|
|
|
18
23
|
|
|
19
24
|
data = {"model": model, "messages": messages, "temperature": temperature, "max_tokens": max_tokens}
|
|
20
25
|
|
|
26
|
+
logger.debug(f"Calling MiniMax API with model={model}")
|
|
27
|
+
|
|
21
28
|
try:
|
|
22
|
-
response = httpx.post(
|
|
29
|
+
response = httpx.post(
|
|
30
|
+
url, headers=headers, json=data, timeout=ProviderDefaults.HTTP_TIMEOUT, verify=get_ssl_verify()
|
|
31
|
+
)
|
|
23
32
|
response.raise_for_status()
|
|
24
33
|
response_data = response.json()
|
|
25
34
|
content = response_data["choices"][0]["message"]["content"]
|
|
@@ -27,6 +36,7 @@ def call_minimax_api(model: str, messages: list[dict], temperature: float, max_t
|
|
|
27
36
|
raise AIError.model_error("MiniMax API returned null content")
|
|
28
37
|
if content == "":
|
|
29
38
|
raise AIError.model_error("MiniMax API returned empty content")
|
|
39
|
+
logger.debug("MiniMax API response received successfully")
|
|
30
40
|
return content
|
|
31
41
|
except httpx.HTTPStatusError as e:
|
|
32
42
|
if e.response.status_code == 429:
|
gac/providers/mistral.py
CHANGED
|
@@ -1,10 +1,15 @@
|
|
|
1
1
|
"""Mistral API provider for gac."""
|
|
2
2
|
|
|
3
|
+
import logging
|
|
3
4
|
import os
|
|
4
5
|
|
|
5
6
|
import httpx
|
|
6
7
|
|
|
8
|
+
from gac.constants import ProviderDefaults
|
|
7
9
|
from gac.errors import AIError
|
|
10
|
+
from gac.utils import get_ssl_verify
|
|
11
|
+
|
|
12
|
+
logger = logging.getLogger(__name__)
|
|
8
13
|
|
|
9
14
|
|
|
10
15
|
def call_mistral_api(model: str, messages: list[dict], temperature: float, max_tokens: int) -> str:
|
|
@@ -18,8 +23,12 @@ def call_mistral_api(model: str, messages: list[dict], temperature: float, max_t
|
|
|
18
23
|
|
|
19
24
|
data = {"model": model, "messages": messages, "temperature": temperature, "max_tokens": max_tokens}
|
|
20
25
|
|
|
26
|
+
logger.debug(f"Calling Mistral API with model={model}")
|
|
27
|
+
|
|
21
28
|
try:
|
|
22
|
-
response = httpx.post(
|
|
29
|
+
response = httpx.post(
|
|
30
|
+
url, headers=headers, json=data, timeout=ProviderDefaults.HTTP_TIMEOUT, verify=get_ssl_verify()
|
|
31
|
+
)
|
|
23
32
|
response.raise_for_status()
|
|
24
33
|
response_data = response.json()
|
|
25
34
|
content = response_data["choices"][0]["message"]["content"]
|
|
@@ -27,6 +36,7 @@ def call_mistral_api(model: str, messages: list[dict], temperature: float, max_t
|
|
|
27
36
|
raise AIError.model_error("Mistral API returned null content")
|
|
28
37
|
if content == "":
|
|
29
38
|
raise AIError.model_error("Mistral API returned empty content")
|
|
39
|
+
logger.debug("Mistral API response received successfully")
|
|
30
40
|
return content
|
|
31
41
|
except httpx.HTTPStatusError as e:
|
|
32
42
|
if e.response.status_code == 429:
|