gac 1.13.0__py3-none-any.whl → 3.8.1__py3-none-any.whl
This diff represents the content of publicly available package versions that have been released to one of the supported registries. The information contained in this diff is provided for informational purposes only and reflects changes between package versions as they appear in their respective public registries.
- gac/__version__.py +1 -1
- gac/ai.py +33 -47
- gac/ai_utils.py +113 -41
- gac/auth_cli.py +214 -0
- gac/cli.py +72 -2
- gac/config.py +63 -6
- gac/config_cli.py +26 -5
- gac/constants.py +178 -2
- gac/git.py +158 -12
- gac/init_cli.py +40 -125
- gac/language_cli.py +378 -0
- gac/main.py +868 -158
- gac/model_cli.py +429 -0
- gac/oauth/__init__.py +27 -0
- gac/oauth/claude_code.py +464 -0
- gac/oauth/qwen_oauth.py +323 -0
- gac/oauth/token_store.py +81 -0
- gac/preprocess.py +3 -3
- gac/prompt.py +573 -226
- gac/providers/__init__.py +49 -0
- gac/providers/anthropic.py +11 -1
- gac/providers/azure_openai.py +101 -0
- gac/providers/cerebras.py +11 -1
- gac/providers/chutes.py +11 -1
- gac/providers/claude_code.py +112 -0
- gac/providers/custom_anthropic.py +6 -2
- gac/providers/custom_openai.py +6 -3
- gac/providers/deepseek.py +11 -1
- gac/providers/fireworks.py +11 -1
- gac/providers/gemini.py +11 -1
- gac/providers/groq.py +5 -1
- gac/providers/kimi_coding.py +67 -0
- gac/providers/lmstudio.py +12 -1
- gac/providers/minimax.py +11 -1
- gac/providers/mistral.py +48 -0
- gac/providers/moonshot.py +48 -0
- gac/providers/ollama.py +11 -1
- gac/providers/openai.py +11 -1
- gac/providers/openrouter.py +11 -1
- gac/providers/qwen.py +76 -0
- gac/providers/replicate.py +110 -0
- gac/providers/streamlake.py +11 -1
- gac/providers/synthetic.py +11 -1
- gac/providers/together.py +11 -1
- gac/providers/zai.py +11 -1
- gac/security.py +1 -1
- gac/utils.py +272 -4
- gac/workflow_utils.py +217 -0
- {gac-1.13.0.dist-info → gac-3.8.1.dist-info}/METADATA +90 -27
- gac-3.8.1.dist-info/RECORD +56 -0
- {gac-1.13.0.dist-info → gac-3.8.1.dist-info}/WHEEL +1 -1
- gac-1.13.0.dist-info/RECORD +0 -41
- {gac-1.13.0.dist-info → gac-3.8.1.dist-info}/entry_points.txt +0 -0
- {gac-1.13.0.dist-info → gac-3.8.1.dist-info}/licenses/LICENSE +0 -0
|
@@ -0,0 +1,67 @@
|
|
|
1
|
+
"""Kimi Coding AI provider implementation."""
|
|
2
|
+
|
|
3
|
+
import json
|
|
4
|
+
import logging
|
|
5
|
+
import os
|
|
6
|
+
|
|
7
|
+
import httpx
|
|
8
|
+
|
|
9
|
+
from gac.constants import ProviderDefaults
|
|
10
|
+
from gac.errors import AIError
|
|
11
|
+
from gac.utils import get_ssl_verify
|
|
12
|
+
|
|
13
|
+
logger = logging.getLogger(__name__)
|
|
14
|
+
|
|
15
|
+
|
|
16
|
+
def call_kimi_coding_api(model: str, messages: list[dict], temperature: float, max_tokens: int) -> str:
|
|
17
|
+
"""Call Kimi Coding API using OpenAI-compatible endpoint."""
|
|
18
|
+
api_key = os.getenv("KIMI_CODING_API_KEY")
|
|
19
|
+
if not api_key:
|
|
20
|
+
raise AIError.authentication_error("KIMI_CODING_API_KEY not found in environment variables")
|
|
21
|
+
|
|
22
|
+
base_url = "https://api.kimi.com/coding/v1"
|
|
23
|
+
url = f"{base_url}/chat/completions"
|
|
24
|
+
headers = {"Authorization": f"Bearer {api_key}", "Content-Type": "application/json"}
|
|
25
|
+
|
|
26
|
+
# Use standard OpenAI format - no message conversion needed
|
|
27
|
+
data = {"model": model, "messages": messages, "temperature": temperature, "max_completion_tokens": max_tokens}
|
|
28
|
+
|
|
29
|
+
try:
|
|
30
|
+
response = httpx.post(
|
|
31
|
+
url, headers=headers, json=data, timeout=ProviderDefaults.HTTP_TIMEOUT, verify=get_ssl_verify()
|
|
32
|
+
)
|
|
33
|
+
response.raise_for_status()
|
|
34
|
+
response_data = response.json()
|
|
35
|
+
|
|
36
|
+
try:
|
|
37
|
+
content = response_data["choices"][0]["message"]["content"]
|
|
38
|
+
except (KeyError, IndexError, TypeError) as e:
|
|
39
|
+
logger.error(f"Unexpected response format from Kimi Coding API. Response: {json.dumps(response_data)}")
|
|
40
|
+
raise AIError.model_error(
|
|
41
|
+
f"Kimi Coding API returned unexpected format. Expected OpenAI-compatible response with "
|
|
42
|
+
f"'choices[0].message.content', but got: {type(e).__name__}. Check logs for full response structure."
|
|
43
|
+
) from e
|
|
44
|
+
|
|
45
|
+
if content is None:
|
|
46
|
+
raise AIError.model_error("Kimi Coding API returned null content")
|
|
47
|
+
if content == "":
|
|
48
|
+
raise AIError.model_error("Kimi Coding API returned empty content")
|
|
49
|
+
return content
|
|
50
|
+
except httpx.ConnectError as e:
|
|
51
|
+
raise AIError.connection_error(f"Kimi Coding API connection failed: {str(e)}") from e
|
|
52
|
+
except httpx.HTTPStatusError as e:
|
|
53
|
+
status_code = e.response.status_code
|
|
54
|
+
error_text = e.response.text
|
|
55
|
+
|
|
56
|
+
if status_code == 401:
|
|
57
|
+
raise AIError.authentication_error(f"Kimi Coding API authentication failed: {error_text}") from e
|
|
58
|
+
elif status_code == 429:
|
|
59
|
+
raise AIError.rate_limit_error(f"Kimi Coding API rate limit exceeded: {error_text}") from e
|
|
60
|
+
else:
|
|
61
|
+
raise AIError.model_error(f"Kimi Coding API error: {status_code} - {error_text}") from e
|
|
62
|
+
except httpx.TimeoutException as e:
|
|
63
|
+
raise AIError.timeout_error(f"Kimi Coding API request timed out: {str(e)}") from e
|
|
64
|
+
except AIError:
|
|
65
|
+
raise
|
|
66
|
+
except Exception as e:
|
|
67
|
+
raise AIError.model_error(f"Error calling Kimi Coding API: {str(e)}") from e
|
gac/providers/lmstudio.py
CHANGED
|
@@ -1,11 +1,16 @@
|
|
|
1
1
|
"""LM Studio AI provider implementation."""
|
|
2
2
|
|
|
3
|
+
import logging
|
|
3
4
|
import os
|
|
4
5
|
from typing import Any
|
|
5
6
|
|
|
6
7
|
import httpx
|
|
7
8
|
|
|
9
|
+
from gac.constants import ProviderDefaults
|
|
8
10
|
from gac.errors import AIError
|
|
11
|
+
from gac.utils import get_ssl_verify
|
|
12
|
+
|
|
13
|
+
logger = logging.getLogger(__name__)
|
|
9
14
|
|
|
10
15
|
|
|
11
16
|
def call_lmstudio_api(model: str, messages: list[dict[str, Any]], temperature: float, max_tokens: int) -> str:
|
|
@@ -28,8 +33,12 @@ def call_lmstudio_api(model: str, messages: list[dict[str, Any]], temperature: f
|
|
|
28
33
|
"stream": False,
|
|
29
34
|
}
|
|
30
35
|
|
|
36
|
+
logger.debug(f"Calling LM Studio API with model={model}")
|
|
37
|
+
|
|
31
38
|
try:
|
|
32
|
-
response = httpx.post(
|
|
39
|
+
response = httpx.post(
|
|
40
|
+
url, headers=headers, json=payload, timeout=ProviderDefaults.HTTP_TIMEOUT, verify=get_ssl_verify()
|
|
41
|
+
)
|
|
33
42
|
response.raise_for_status()
|
|
34
43
|
response_data = response.json()
|
|
35
44
|
choices = response_data.get("choices") or []
|
|
@@ -39,11 +48,13 @@ def call_lmstudio_api(model: str, messages: list[dict[str, Any]], temperature: f
|
|
|
39
48
|
message = choices[0].get("message") or {}
|
|
40
49
|
content = message.get("content")
|
|
41
50
|
if content:
|
|
51
|
+
logger.debug("LM Studio API response received successfully")
|
|
42
52
|
return content
|
|
43
53
|
|
|
44
54
|
# Some OpenAI-compatible servers return text field directly
|
|
45
55
|
content = choices[0].get("text")
|
|
46
56
|
if content:
|
|
57
|
+
logger.debug("LM Studio API response received successfully")
|
|
47
58
|
return content
|
|
48
59
|
|
|
49
60
|
raise AIError.model_error("LM Studio API response missing content")
|
gac/providers/minimax.py
CHANGED
|
@@ -1,10 +1,15 @@
|
|
|
1
1
|
"""MiniMax API provider for gac."""
|
|
2
2
|
|
|
3
|
+
import logging
|
|
3
4
|
import os
|
|
4
5
|
|
|
5
6
|
import httpx
|
|
6
7
|
|
|
8
|
+
from gac.constants import ProviderDefaults
|
|
7
9
|
from gac.errors import AIError
|
|
10
|
+
from gac.utils import get_ssl_verify
|
|
11
|
+
|
|
12
|
+
logger = logging.getLogger(__name__)
|
|
8
13
|
|
|
9
14
|
|
|
10
15
|
def call_minimax_api(model: str, messages: list[dict], temperature: float, max_tokens: int) -> str:
|
|
@@ -18,8 +23,12 @@ def call_minimax_api(model: str, messages: list[dict], temperature: float, max_t
|
|
|
18
23
|
|
|
19
24
|
data = {"model": model, "messages": messages, "temperature": temperature, "max_tokens": max_tokens}
|
|
20
25
|
|
|
26
|
+
logger.debug(f"Calling MiniMax API with model={model}")
|
|
27
|
+
|
|
21
28
|
try:
|
|
22
|
-
response = httpx.post(
|
|
29
|
+
response = httpx.post(
|
|
30
|
+
url, headers=headers, json=data, timeout=ProviderDefaults.HTTP_TIMEOUT, verify=get_ssl_verify()
|
|
31
|
+
)
|
|
23
32
|
response.raise_for_status()
|
|
24
33
|
response_data = response.json()
|
|
25
34
|
content = response_data["choices"][0]["message"]["content"]
|
|
@@ -27,6 +36,7 @@ def call_minimax_api(model: str, messages: list[dict], temperature: float, max_t
|
|
|
27
36
|
raise AIError.model_error("MiniMax API returned null content")
|
|
28
37
|
if content == "":
|
|
29
38
|
raise AIError.model_error("MiniMax API returned empty content")
|
|
39
|
+
logger.debug("MiniMax API response received successfully")
|
|
30
40
|
return content
|
|
31
41
|
except httpx.HTTPStatusError as e:
|
|
32
42
|
if e.response.status_code == 429:
|
gac/providers/mistral.py
ADDED
|
@@ -0,0 +1,48 @@
|
|
|
1
|
+
"""Mistral API provider for gac."""
|
|
2
|
+
|
|
3
|
+
import logging
|
|
4
|
+
import os
|
|
5
|
+
|
|
6
|
+
import httpx
|
|
7
|
+
|
|
8
|
+
from gac.constants import ProviderDefaults
|
|
9
|
+
from gac.errors import AIError
|
|
10
|
+
from gac.utils import get_ssl_verify
|
|
11
|
+
|
|
12
|
+
logger = logging.getLogger(__name__)
|
|
13
|
+
|
|
14
|
+
|
|
15
|
+
def call_mistral_api(model: str, messages: list[dict], temperature: float, max_tokens: int) -> str:
|
|
16
|
+
"""Call Mistral API directly."""
|
|
17
|
+
api_key = os.getenv("MISTRAL_API_KEY")
|
|
18
|
+
if not api_key:
|
|
19
|
+
raise AIError.authentication_error("MISTRAL_API_KEY not found in environment variables")
|
|
20
|
+
|
|
21
|
+
url = "https://api.mistral.ai/v1/chat/completions"
|
|
22
|
+
headers = {"Authorization": f"Bearer {api_key}", "Content-Type": "application/json"}
|
|
23
|
+
|
|
24
|
+
data = {"model": model, "messages": messages, "temperature": temperature, "max_tokens": max_tokens}
|
|
25
|
+
|
|
26
|
+
logger.debug(f"Calling Mistral API with model={model}")
|
|
27
|
+
|
|
28
|
+
try:
|
|
29
|
+
response = httpx.post(
|
|
30
|
+
url, headers=headers, json=data, timeout=ProviderDefaults.HTTP_TIMEOUT, verify=get_ssl_verify()
|
|
31
|
+
)
|
|
32
|
+
response.raise_for_status()
|
|
33
|
+
response_data = response.json()
|
|
34
|
+
content = response_data["choices"][0]["message"]["content"]
|
|
35
|
+
if content is None:
|
|
36
|
+
raise AIError.model_error("Mistral API returned null content")
|
|
37
|
+
if content == "":
|
|
38
|
+
raise AIError.model_error("Mistral API returned empty content")
|
|
39
|
+
logger.debug("Mistral API response received successfully")
|
|
40
|
+
return content
|
|
41
|
+
except httpx.HTTPStatusError as e:
|
|
42
|
+
if e.response.status_code == 429:
|
|
43
|
+
raise AIError.rate_limit_error(f"Mistral API rate limit exceeded: {e.response.text}") from e
|
|
44
|
+
raise AIError.model_error(f"Mistral API error: {e.response.status_code} - {e.response.text}") from e
|
|
45
|
+
except httpx.TimeoutException as e:
|
|
46
|
+
raise AIError.timeout_error(f"Mistral API request timed out: {str(e)}") from e
|
|
47
|
+
except Exception as e:
|
|
48
|
+
raise AIError.model_error(f"Error calling Mistral API: {str(e)}") from e
|
|
@@ -0,0 +1,48 @@
|
|
|
1
|
+
"""Moonshot AI provider for gac."""
|
|
2
|
+
|
|
3
|
+
import logging
|
|
4
|
+
import os
|
|
5
|
+
|
|
6
|
+
import httpx
|
|
7
|
+
|
|
8
|
+
from gac.constants import ProviderDefaults
|
|
9
|
+
from gac.errors import AIError
|
|
10
|
+
from gac.utils import get_ssl_verify
|
|
11
|
+
|
|
12
|
+
logger = logging.getLogger(__name__)
|
|
13
|
+
|
|
14
|
+
|
|
15
|
+
def call_moonshot_api(model: str, messages: list[dict], temperature: float, max_tokens: int) -> str:
|
|
16
|
+
"""Call Moonshot AI API directly."""
|
|
17
|
+
api_key = os.getenv("MOONSHOT_API_KEY")
|
|
18
|
+
if not api_key:
|
|
19
|
+
raise AIError.authentication_error("MOONSHOT_API_KEY not found in environment variables")
|
|
20
|
+
|
|
21
|
+
url = "https://api.moonshot.ai/v1/chat/completions"
|
|
22
|
+
headers = {"Authorization": f"Bearer {api_key}", "Content-Type": "application/json"}
|
|
23
|
+
|
|
24
|
+
data = {"model": model, "messages": messages, "temperature": temperature, "max_tokens": max_tokens}
|
|
25
|
+
|
|
26
|
+
logger.debug(f"Calling Moonshot AI API with model={model}")
|
|
27
|
+
|
|
28
|
+
try:
|
|
29
|
+
response = httpx.post(
|
|
30
|
+
url, headers=headers, json=data, timeout=ProviderDefaults.HTTP_TIMEOUT, verify=get_ssl_verify()
|
|
31
|
+
)
|
|
32
|
+
response.raise_for_status()
|
|
33
|
+
response_data = response.json()
|
|
34
|
+
content = response_data["choices"][0]["message"]["content"]
|
|
35
|
+
if content is None:
|
|
36
|
+
raise AIError.model_error("Moonshot AI API returned null content")
|
|
37
|
+
if content == "":
|
|
38
|
+
raise AIError.model_error("Moonshot AI API returned empty content")
|
|
39
|
+
logger.debug("Moonshot AI API response received successfully")
|
|
40
|
+
return content
|
|
41
|
+
except httpx.HTTPStatusError as e:
|
|
42
|
+
if e.response.status_code == 429:
|
|
43
|
+
raise AIError.rate_limit_error(f"Moonshot AI API rate limit exceeded: {e.response.text}") from e
|
|
44
|
+
raise AIError.model_error(f"Moonshot AI API error: {e.response.status_code} - {e.response.text}") from e
|
|
45
|
+
except httpx.TimeoutException as e:
|
|
46
|
+
raise AIError.timeout_error(f"Moonshot AI API request timed out: {str(e)}") from e
|
|
47
|
+
except Exception as e:
|
|
48
|
+
raise AIError.model_error(f"Error calling Moonshot AI API: {str(e)}") from e
|
gac/providers/ollama.py
CHANGED
|
@@ -1,10 +1,15 @@
|
|
|
1
1
|
"""Ollama AI provider implementation."""
|
|
2
2
|
|
|
3
|
+
import logging
|
|
3
4
|
import os
|
|
4
5
|
|
|
5
6
|
import httpx
|
|
6
7
|
|
|
8
|
+
from gac.constants import ProviderDefaults
|
|
7
9
|
from gac.errors import AIError
|
|
10
|
+
from gac.utils import get_ssl_verify
|
|
11
|
+
|
|
12
|
+
logger = logging.getLogger(__name__)
|
|
8
13
|
|
|
9
14
|
|
|
10
15
|
def call_ollama_api(model: str, messages: list[dict], temperature: float, max_tokens: int) -> str:
|
|
@@ -18,8 +23,12 @@ def call_ollama_api(model: str, messages: list[dict], temperature: float, max_to
|
|
|
18
23
|
if api_key:
|
|
19
24
|
headers["Authorization"] = f"Bearer {api_key}"
|
|
20
25
|
|
|
26
|
+
logger.debug(f"Calling Ollama API with model={model}")
|
|
27
|
+
|
|
21
28
|
try:
|
|
22
|
-
response = httpx.post(
|
|
29
|
+
response = httpx.post(
|
|
30
|
+
url, headers=headers, json=data, timeout=ProviderDefaults.HTTP_TIMEOUT, verify=get_ssl_verify()
|
|
31
|
+
)
|
|
23
32
|
response.raise_for_status()
|
|
24
33
|
response_data = response.json()
|
|
25
34
|
|
|
@@ -37,6 +46,7 @@ def call_ollama_api(model: str, messages: list[dict], temperature: float, max_to
|
|
|
37
46
|
raise AIError.model_error("Ollama API returned null content")
|
|
38
47
|
if content == "":
|
|
39
48
|
raise AIError.model_error("Ollama API returned empty content")
|
|
49
|
+
logger.debug("Ollama API response received successfully")
|
|
40
50
|
return content
|
|
41
51
|
except httpx.ConnectError as e:
|
|
42
52
|
raise AIError.connection_error(f"Ollama connection failed. Make sure Ollama is running: {str(e)}") from e
|
gac/providers/openai.py
CHANGED
|
@@ -1,10 +1,15 @@
|
|
|
1
1
|
"""OpenAI API provider for gac."""
|
|
2
2
|
|
|
3
|
+
import logging
|
|
3
4
|
import os
|
|
4
5
|
|
|
5
6
|
import httpx
|
|
6
7
|
|
|
8
|
+
from gac.constants import ProviderDefaults
|
|
7
9
|
from gac.errors import AIError
|
|
10
|
+
from gac.utils import get_ssl_verify
|
|
11
|
+
|
|
12
|
+
logger = logging.getLogger(__name__)
|
|
8
13
|
|
|
9
14
|
|
|
10
15
|
def call_openai_api(model: str, messages: list[dict], temperature: float, max_tokens: int) -> str:
|
|
@@ -18,8 +23,12 @@ def call_openai_api(model: str, messages: list[dict], temperature: float, max_to
|
|
|
18
23
|
|
|
19
24
|
data = {"model": model, "messages": messages, "temperature": temperature, "max_completion_tokens": max_tokens}
|
|
20
25
|
|
|
26
|
+
logger.debug(f"Calling OpenAI API with model={model}")
|
|
27
|
+
|
|
21
28
|
try:
|
|
22
|
-
response = httpx.post(
|
|
29
|
+
response = httpx.post(
|
|
30
|
+
url, headers=headers, json=data, timeout=ProviderDefaults.HTTP_TIMEOUT, verify=get_ssl_verify()
|
|
31
|
+
)
|
|
23
32
|
response.raise_for_status()
|
|
24
33
|
response_data = response.json()
|
|
25
34
|
content = response_data["choices"][0]["message"]["content"]
|
|
@@ -27,6 +36,7 @@ def call_openai_api(model: str, messages: list[dict], temperature: float, max_to
|
|
|
27
36
|
raise AIError.model_error("OpenAI API returned null content")
|
|
28
37
|
if content == "":
|
|
29
38
|
raise AIError.model_error("OpenAI API returned empty content")
|
|
39
|
+
logger.debug("OpenAI API response received successfully")
|
|
30
40
|
return content
|
|
31
41
|
except httpx.HTTPStatusError as e:
|
|
32
42
|
if e.response.status_code == 429:
|
gac/providers/openrouter.py
CHANGED
|
@@ -1,10 +1,15 @@
|
|
|
1
1
|
"""OpenRouter API provider for gac."""
|
|
2
2
|
|
|
3
|
+
import logging
|
|
3
4
|
import os
|
|
4
5
|
|
|
5
6
|
import httpx
|
|
6
7
|
|
|
8
|
+
from gac.constants import ProviderDefaults
|
|
7
9
|
from gac.errors import AIError
|
|
10
|
+
from gac.utils import get_ssl_verify
|
|
11
|
+
|
|
12
|
+
logger = logging.getLogger(__name__)
|
|
8
13
|
|
|
9
14
|
|
|
10
15
|
def call_openrouter_api(model: str, messages: list[dict], temperature: float, max_tokens: int) -> str:
|
|
@@ -26,8 +31,12 @@ def call_openrouter_api(model: str, messages: list[dict], temperature: float, ma
|
|
|
26
31
|
"max_tokens": max_tokens,
|
|
27
32
|
}
|
|
28
33
|
|
|
34
|
+
logger.debug(f"Calling OpenRouter API with model={model}")
|
|
35
|
+
|
|
29
36
|
try:
|
|
30
|
-
response = httpx.post(
|
|
37
|
+
response = httpx.post(
|
|
38
|
+
url, headers=headers, json=data, timeout=ProviderDefaults.HTTP_TIMEOUT, verify=get_ssl_verify()
|
|
39
|
+
)
|
|
31
40
|
response.raise_for_status()
|
|
32
41
|
response_data = response.json()
|
|
33
42
|
content = response_data["choices"][0]["message"]["content"]
|
|
@@ -35,6 +44,7 @@ def call_openrouter_api(model: str, messages: list[dict], temperature: float, ma
|
|
|
35
44
|
raise AIError.model_error("OpenRouter API returned null content")
|
|
36
45
|
if content == "":
|
|
37
46
|
raise AIError.model_error("OpenRouter API returned empty content")
|
|
47
|
+
logger.debug("OpenRouter API response received successfully")
|
|
38
48
|
return content
|
|
39
49
|
except httpx.HTTPStatusError as e:
|
|
40
50
|
# Handle specific HTTP status codes
|
gac/providers/qwen.py
ADDED
|
@@ -0,0 +1,76 @@
|
|
|
1
|
+
"""Qwen API provider for gac with OAuth support."""
|
|
2
|
+
|
|
3
|
+
import logging
|
|
4
|
+
import os
|
|
5
|
+
|
|
6
|
+
import httpx
|
|
7
|
+
|
|
8
|
+
from gac.constants import ProviderDefaults
|
|
9
|
+
from gac.errors import AIError
|
|
10
|
+
from gac.oauth import QwenOAuthProvider, TokenStore
|
|
11
|
+
|
|
12
|
+
logger = logging.getLogger(__name__)
|
|
13
|
+
|
|
14
|
+
QWEN_API_URL = "https://chat.qwen.ai/api/v1/chat/completions"
|
|
15
|
+
|
|
16
|
+
|
|
17
|
+
def get_qwen_auth() -> tuple[str, str]:
|
|
18
|
+
"""Get Qwen authentication (API key or OAuth token).
|
|
19
|
+
|
|
20
|
+
Returns:
|
|
21
|
+
Tuple of (token, api_url) for authentication.
|
|
22
|
+
"""
|
|
23
|
+
api_key = os.getenv("QWEN_API_KEY")
|
|
24
|
+
if api_key:
|
|
25
|
+
return api_key, QWEN_API_URL
|
|
26
|
+
|
|
27
|
+
oauth_provider = QwenOAuthProvider(TokenStore())
|
|
28
|
+
token = oauth_provider.get_token()
|
|
29
|
+
if token:
|
|
30
|
+
resource_url = token.get("resource_url")
|
|
31
|
+
if resource_url:
|
|
32
|
+
if not resource_url.startswith(("http://", "https://")):
|
|
33
|
+
resource_url = f"https://{resource_url}"
|
|
34
|
+
if not resource_url.endswith("/chat/completions"):
|
|
35
|
+
resource_url = resource_url.rstrip("/") + "/v1/chat/completions"
|
|
36
|
+
api_url = resource_url
|
|
37
|
+
else:
|
|
38
|
+
api_url = QWEN_API_URL
|
|
39
|
+
return token["access_token"], api_url
|
|
40
|
+
|
|
41
|
+
raise AIError.authentication_error(
|
|
42
|
+
"Qwen authentication not found. Set QWEN_API_KEY or run 'gac auth qwen login' for OAuth."
|
|
43
|
+
)
|
|
44
|
+
|
|
45
|
+
|
|
46
|
+
def call_qwen_api(model: str, messages: list[dict], temperature: float, max_tokens: int) -> str:
|
|
47
|
+
"""Call Qwen API with OAuth or API key authentication."""
|
|
48
|
+
auth_token, api_url = get_qwen_auth()
|
|
49
|
+
|
|
50
|
+
headers = {"Authorization": f"Bearer {auth_token}", "Content-Type": "application/json"}
|
|
51
|
+
|
|
52
|
+
data = {"model": model, "messages": messages, "temperature": temperature, "max_tokens": max_tokens}
|
|
53
|
+
|
|
54
|
+
logger.debug(f"Calling Qwen API with model={model}")
|
|
55
|
+
|
|
56
|
+
try:
|
|
57
|
+
response = httpx.post(api_url, headers=headers, json=data, timeout=ProviderDefaults.HTTP_TIMEOUT)
|
|
58
|
+
response.raise_for_status()
|
|
59
|
+
response_data = response.json()
|
|
60
|
+
content = response_data["choices"][0]["message"]["content"]
|
|
61
|
+
if content is None:
|
|
62
|
+
raise AIError.model_error("Qwen API returned null content")
|
|
63
|
+
if content == "":
|
|
64
|
+
raise AIError.model_error("Qwen API returned empty content")
|
|
65
|
+
logger.debug("Qwen API response received successfully")
|
|
66
|
+
return content
|
|
67
|
+
except httpx.HTTPStatusError as e:
|
|
68
|
+
if e.response.status_code == 401:
|
|
69
|
+
raise AIError.authentication_error(f"Qwen authentication failed: {e.response.text}") from e
|
|
70
|
+
if e.response.status_code == 429:
|
|
71
|
+
raise AIError.rate_limit_error(f"Qwen API rate limit exceeded: {e.response.text}") from e
|
|
72
|
+
raise AIError.model_error(f"Qwen API error: {e.response.status_code} - {e.response.text}") from e
|
|
73
|
+
except httpx.TimeoutException as e:
|
|
74
|
+
raise AIError.timeout_error(f"Qwen API request timed out: {str(e)}") from e
|
|
75
|
+
except Exception as e:
|
|
76
|
+
raise AIError.model_error(f"Error calling Qwen API: {str(e)}") from e
|
|
@@ -0,0 +1,110 @@
|
|
|
1
|
+
"""Replicate API provider for gac."""
|
|
2
|
+
|
|
3
|
+
import logging
|
|
4
|
+
import os
|
|
5
|
+
|
|
6
|
+
import httpx
|
|
7
|
+
|
|
8
|
+
from gac.constants import ProviderDefaults
|
|
9
|
+
from gac.errors import AIError
|
|
10
|
+
from gac.utils import get_ssl_verify
|
|
11
|
+
|
|
12
|
+
logger = logging.getLogger(__name__)
|
|
13
|
+
|
|
14
|
+
|
|
15
|
+
def call_replicate_api(model: str, messages: list[dict], temperature: float, max_tokens: int) -> str:
|
|
16
|
+
"""Call Replicate API directly."""
|
|
17
|
+
api_key = os.getenv("REPLICATE_API_TOKEN")
|
|
18
|
+
if not api_key:
|
|
19
|
+
raise AIError.authentication_error("REPLICATE_API_TOKEN not found in environment variables")
|
|
20
|
+
|
|
21
|
+
# Replicate uses a different endpoint for language models
|
|
22
|
+
url = "https://api.replicate.com/v1/predictions"
|
|
23
|
+
headers = {"Authorization": f"Token {api_key}", "Content-Type": "application/json"}
|
|
24
|
+
|
|
25
|
+
# Convert messages to a single prompt for Replicate
|
|
26
|
+
prompt_parts = []
|
|
27
|
+
system_message = None
|
|
28
|
+
|
|
29
|
+
for message in messages:
|
|
30
|
+
role = message.get("role")
|
|
31
|
+
content = message.get("content", "")
|
|
32
|
+
|
|
33
|
+
if role == "system":
|
|
34
|
+
system_message = content
|
|
35
|
+
elif role == "user":
|
|
36
|
+
prompt_parts.append(f"Human: {content}")
|
|
37
|
+
elif role == "assistant":
|
|
38
|
+
prompt_parts.append(f"Assistant: {content}")
|
|
39
|
+
|
|
40
|
+
# Add system message at the beginning if present
|
|
41
|
+
if system_message:
|
|
42
|
+
prompt_parts.insert(0, f"System: {system_message}")
|
|
43
|
+
|
|
44
|
+
# Add final assistant prompt
|
|
45
|
+
prompt_parts.append("Assistant:")
|
|
46
|
+
full_prompt = "\n\n".join(prompt_parts)
|
|
47
|
+
|
|
48
|
+
# Replicate prediction payload
|
|
49
|
+
data = {
|
|
50
|
+
"version": model, # Replicate uses version string as model identifier
|
|
51
|
+
"input": {
|
|
52
|
+
"prompt": full_prompt,
|
|
53
|
+
"temperature": temperature,
|
|
54
|
+
"max_tokens": max_tokens,
|
|
55
|
+
},
|
|
56
|
+
}
|
|
57
|
+
|
|
58
|
+
logger.debug(f"Calling Replicate API with model={model}")
|
|
59
|
+
|
|
60
|
+
try:
|
|
61
|
+
# Create prediction
|
|
62
|
+
response = httpx.post(
|
|
63
|
+
url, headers=headers, json=data, timeout=ProviderDefaults.HTTP_TIMEOUT, verify=get_ssl_verify()
|
|
64
|
+
)
|
|
65
|
+
response.raise_for_status()
|
|
66
|
+
prediction_data = response.json()
|
|
67
|
+
|
|
68
|
+
# Get the prediction URL to check status
|
|
69
|
+
get_url = f"https://api.replicate.com/v1/predictions/{prediction_data['id']}"
|
|
70
|
+
|
|
71
|
+
# Poll for completion (Replicate predictions are async)
|
|
72
|
+
max_wait_time = 120
|
|
73
|
+
wait_interval = 2
|
|
74
|
+
elapsed_time = 0
|
|
75
|
+
|
|
76
|
+
while elapsed_time < max_wait_time:
|
|
77
|
+
get_response = httpx.get(
|
|
78
|
+
get_url, headers=headers, timeout=ProviderDefaults.HTTP_TIMEOUT, verify=get_ssl_verify()
|
|
79
|
+
)
|
|
80
|
+
get_response.raise_for_status()
|
|
81
|
+
status_data = get_response.json()
|
|
82
|
+
|
|
83
|
+
if status_data["status"] == "succeeded":
|
|
84
|
+
content = status_data["output"]
|
|
85
|
+
if not content:
|
|
86
|
+
raise AIError.model_error("Replicate API returned empty content")
|
|
87
|
+
logger.debug("Replicate API response received successfully")
|
|
88
|
+
return content
|
|
89
|
+
elif status_data["status"] == "failed":
|
|
90
|
+
raise AIError.model_error(f"Replicate prediction failed: {status_data.get('error', 'Unknown error')}")
|
|
91
|
+
elif status_data["status"] in ["starting", "processing"]:
|
|
92
|
+
import time
|
|
93
|
+
|
|
94
|
+
time.sleep(wait_interval)
|
|
95
|
+
elapsed_time += wait_interval
|
|
96
|
+
else:
|
|
97
|
+
raise AIError.model_error(f"Replicate API returned unknown status: {status_data['status']}")
|
|
98
|
+
|
|
99
|
+
raise AIError.timeout_error("Replicate API prediction timed out")
|
|
100
|
+
|
|
101
|
+
except httpx.HTTPStatusError as e:
|
|
102
|
+
if e.response.status_code == 429:
|
|
103
|
+
raise AIError.rate_limit_error(f"Replicate API rate limit exceeded: {e.response.text}") from e
|
|
104
|
+
elif e.response.status_code == 401:
|
|
105
|
+
raise AIError.authentication_error(f"Replicate API authentication failed: {e.response.text}") from e
|
|
106
|
+
raise AIError.model_error(f"Replicate API error: {e.response.status_code} - {e.response.text}") from e
|
|
107
|
+
except httpx.TimeoutException as e:
|
|
108
|
+
raise AIError.timeout_error(f"Replicate API request timed out: {str(e)}") from e
|
|
109
|
+
except Exception as e:
|
|
110
|
+
raise AIError.model_error(f"Error calling Replicate API: {str(e)}") from e
|
gac/providers/streamlake.py
CHANGED
|
@@ -1,10 +1,15 @@
|
|
|
1
1
|
"""StreamLake (Vanchin) API provider for gac."""
|
|
2
2
|
|
|
3
|
+
import logging
|
|
3
4
|
import os
|
|
4
5
|
|
|
5
6
|
import httpx
|
|
6
7
|
|
|
8
|
+
from gac.constants import ProviderDefaults
|
|
7
9
|
from gac.errors import AIError
|
|
10
|
+
from gac.utils import get_ssl_verify
|
|
11
|
+
|
|
12
|
+
logger = logging.getLogger(__name__)
|
|
8
13
|
|
|
9
14
|
|
|
10
15
|
def call_streamlake_api(model: str, messages: list[dict], temperature: float, max_tokens: int) -> str:
|
|
@@ -25,8 +30,12 @@ def call_streamlake_api(model: str, messages: list[dict], temperature: float, ma
|
|
|
25
30
|
"max_tokens": max_tokens,
|
|
26
31
|
}
|
|
27
32
|
|
|
33
|
+
logger.debug(f"Calling StreamLake API with model={model}")
|
|
34
|
+
|
|
28
35
|
try:
|
|
29
|
-
response = httpx.post(
|
|
36
|
+
response = httpx.post(
|
|
37
|
+
url, headers=headers, json=data, timeout=ProviderDefaults.HTTP_TIMEOUT, verify=get_ssl_verify()
|
|
38
|
+
)
|
|
30
39
|
response.raise_for_status()
|
|
31
40
|
response_data = response.json()
|
|
32
41
|
choices = response_data.get("choices")
|
|
@@ -40,6 +49,7 @@ def call_streamlake_api(model: str, messages: list[dict], temperature: float, ma
|
|
|
40
49
|
if content == "":
|
|
41
50
|
raise AIError.model_error("StreamLake API returned empty content")
|
|
42
51
|
|
|
52
|
+
logger.debug("StreamLake API response received successfully")
|
|
43
53
|
return content
|
|
44
54
|
except httpx.HTTPStatusError as e:
|
|
45
55
|
if e.response.status_code == 429:
|
gac/providers/synthetic.py
CHANGED
|
@@ -1,10 +1,15 @@
|
|
|
1
1
|
"""Synthetic.new API provider for gac."""
|
|
2
2
|
|
|
3
|
+
import logging
|
|
3
4
|
import os
|
|
4
5
|
|
|
5
6
|
import httpx
|
|
6
7
|
|
|
8
|
+
from gac.constants import ProviderDefaults
|
|
7
9
|
from gac.errors import AIError
|
|
10
|
+
from gac.utils import get_ssl_verify
|
|
11
|
+
|
|
12
|
+
logger = logging.getLogger(__name__)
|
|
8
13
|
|
|
9
14
|
|
|
10
15
|
def call_synthetic_api(model: str, messages: list[dict], temperature: float, max_tokens: int) -> str:
|
|
@@ -22,8 +27,12 @@ def call_synthetic_api(model: str, messages: list[dict], temperature: float, max
|
|
|
22
27
|
|
|
23
28
|
data = {"model": model, "messages": messages, "temperature": temperature, "max_completion_tokens": max_tokens}
|
|
24
29
|
|
|
30
|
+
logger.debug(f"Calling Synthetic.new API with model={model}")
|
|
31
|
+
|
|
25
32
|
try:
|
|
26
|
-
response = httpx.post(
|
|
33
|
+
response = httpx.post(
|
|
34
|
+
url, headers=headers, json=data, timeout=ProviderDefaults.HTTP_TIMEOUT, verify=get_ssl_verify()
|
|
35
|
+
)
|
|
27
36
|
response.raise_for_status()
|
|
28
37
|
response_data = response.json()
|
|
29
38
|
content = response_data["choices"][0]["message"]["content"]
|
|
@@ -31,6 +40,7 @@ def call_synthetic_api(model: str, messages: list[dict], temperature: float, max
|
|
|
31
40
|
raise AIError.model_error("Synthetic.new API returned null content")
|
|
32
41
|
if content == "":
|
|
33
42
|
raise AIError.model_error("Synthetic.new API returned empty content")
|
|
43
|
+
logger.debug("Synthetic.new API response received successfully")
|
|
34
44
|
return content
|
|
35
45
|
except httpx.HTTPStatusError as e:
|
|
36
46
|
if e.response.status_code == 429:
|
gac/providers/together.py
CHANGED
|
@@ -1,10 +1,15 @@
|
|
|
1
1
|
"""Together AI API provider for gac."""
|
|
2
2
|
|
|
3
|
+
import logging
|
|
3
4
|
import os
|
|
4
5
|
|
|
5
6
|
import httpx
|
|
6
7
|
|
|
8
|
+
from gac.constants import ProviderDefaults
|
|
7
9
|
from gac.errors import AIError
|
|
10
|
+
from gac.utils import get_ssl_verify
|
|
11
|
+
|
|
12
|
+
logger = logging.getLogger(__name__)
|
|
8
13
|
|
|
9
14
|
|
|
10
15
|
def call_together_api(model: str, messages: list[dict], temperature: float, max_tokens: int) -> str:
|
|
@@ -18,8 +23,12 @@ def call_together_api(model: str, messages: list[dict], temperature: float, max_
|
|
|
18
23
|
|
|
19
24
|
data = {"model": model, "messages": messages, "temperature": temperature, "max_tokens": max_tokens}
|
|
20
25
|
|
|
26
|
+
logger.debug(f"Calling Together AI API with model={model}")
|
|
27
|
+
|
|
21
28
|
try:
|
|
22
|
-
response = httpx.post(
|
|
29
|
+
response = httpx.post(
|
|
30
|
+
url, headers=headers, json=data, timeout=ProviderDefaults.HTTP_TIMEOUT, verify=get_ssl_verify()
|
|
31
|
+
)
|
|
23
32
|
response.raise_for_status()
|
|
24
33
|
response_data = response.json()
|
|
25
34
|
content = response_data["choices"][0]["message"]["content"]
|
|
@@ -27,6 +36,7 @@ def call_together_api(model: str, messages: list[dict], temperature: float, max_
|
|
|
27
36
|
raise AIError.model_error("Together AI API returned null content")
|
|
28
37
|
if content == "":
|
|
29
38
|
raise AIError.model_error("Together AI API returned empty content")
|
|
39
|
+
logger.debug("Together AI API response received successfully")
|
|
30
40
|
return content
|
|
31
41
|
except httpx.HTTPStatusError as e:
|
|
32
42
|
if e.response.status_code == 429:
|