gac 3.8.1__py3-none-any.whl → 3.10.10__py3-none-any.whl

This diff represents the content of publicly available package versions that have been released to one of the supported registries. The information contained in this diff is provided for informational purposes only and reflects changes between package versions as they appear in their respective public registries.
Files changed (76) hide show
  1. gac/__init__.py +4 -6
  2. gac/__version__.py +1 -1
  3. gac/ai_utils.py +18 -49
  4. gac/cli.py +14 -10
  5. gac/commit_executor.py +59 -0
  6. gac/config.py +28 -3
  7. gac/config_cli.py +19 -7
  8. gac/constants/__init__.py +34 -0
  9. gac/constants/commit.py +63 -0
  10. gac/constants/defaults.py +40 -0
  11. gac/constants/file_patterns.py +110 -0
  12. gac/constants/languages.py +119 -0
  13. gac/diff_cli.py +0 -22
  14. gac/errors.py +8 -2
  15. gac/git.py +6 -6
  16. gac/git_state_validator.py +193 -0
  17. gac/grouped_commit_workflow.py +458 -0
  18. gac/init_cli.py +2 -1
  19. gac/interactive_mode.py +179 -0
  20. gac/language_cli.py +0 -1
  21. gac/main.py +222 -959
  22. gac/model_cli.py +2 -1
  23. gac/model_identifier.py +70 -0
  24. gac/oauth/claude_code.py +2 -2
  25. gac/oauth/qwen_oauth.py +4 -0
  26. gac/oauth/token_store.py +2 -2
  27. gac/oauth_retry.py +161 -0
  28. gac/postprocess.py +155 -0
  29. gac/prompt.py +20 -490
  30. gac/prompt_builder.py +88 -0
  31. gac/providers/README.md +437 -0
  32. gac/providers/__init__.py +70 -81
  33. gac/providers/anthropic.py +12 -56
  34. gac/providers/azure_openai.py +48 -92
  35. gac/providers/base.py +329 -0
  36. gac/providers/cerebras.py +10 -43
  37. gac/providers/chutes.py +16 -72
  38. gac/providers/claude_code.py +64 -97
  39. gac/providers/custom_anthropic.py +51 -85
  40. gac/providers/custom_openai.py +29 -87
  41. gac/providers/deepseek.py +10 -43
  42. gac/providers/error_handler.py +139 -0
  43. gac/providers/fireworks.py +10 -43
  44. gac/providers/gemini.py +66 -73
  45. gac/providers/groq.py +10 -62
  46. gac/providers/kimi_coding.py +19 -59
  47. gac/providers/lmstudio.py +62 -52
  48. gac/providers/minimax.py +10 -43
  49. gac/providers/mistral.py +10 -43
  50. gac/providers/moonshot.py +10 -43
  51. gac/providers/ollama.py +54 -41
  52. gac/providers/openai.py +30 -46
  53. gac/providers/openrouter.py +15 -62
  54. gac/providers/protocol.py +71 -0
  55. gac/providers/qwen.py +55 -67
  56. gac/providers/registry.py +58 -0
  57. gac/providers/replicate.py +137 -91
  58. gac/providers/streamlake.py +26 -56
  59. gac/providers/synthetic.py +35 -47
  60. gac/providers/together.py +10 -43
  61. gac/providers/zai.py +21 -59
  62. gac/py.typed +0 -0
  63. gac/security.py +1 -1
  64. gac/templates/__init__.py +1 -0
  65. gac/templates/question_generation.txt +60 -0
  66. gac/templates/system_prompt.txt +224 -0
  67. gac/templates/user_prompt.txt +28 -0
  68. gac/utils.py +6 -5
  69. gac/workflow_context.py +162 -0
  70. {gac-3.8.1.dist-info → gac-3.10.10.dist-info}/METADATA +1 -1
  71. gac-3.10.10.dist-info/RECORD +79 -0
  72. gac/constants.py +0 -328
  73. gac-3.8.1.dist-info/RECORD +0 -56
  74. {gac-3.8.1.dist-info → gac-3.10.10.dist-info}/WHEEL +0 -0
  75. {gac-3.8.1.dist-info → gac-3.10.10.dist-info}/entry_points.txt +0 -0
  76. {gac-3.8.1.dist-info → gac-3.10.10.dist-info}/licenses/LICENSE +0 -0
gac/providers/qwen.py CHANGED
@@ -1,76 +1,64 @@
1
- """Qwen API provider for gac with OAuth support."""
1
+ """Qwen API provider for gac with OAuth-only support."""
2
2
 
3
- import logging
4
- import os
5
-
6
- import httpx
7
-
8
- from gac.constants import ProviderDefaults
9
3
  from gac.errors import AIError
10
4
  from gac.oauth import QwenOAuthProvider, TokenStore
5
+ from gac.providers.base import OpenAICompatibleProvider, ProviderConfig
11
6
 
12
- logger = logging.getLogger(__name__)
13
-
14
- QWEN_API_URL = "https://chat.qwen.ai/api/v1/chat/completions"
15
-
16
-
17
- def get_qwen_auth() -> tuple[str, str]:
18
- """Get Qwen authentication (API key or OAuth token).
7
+ QWEN_DEFAULT_API_URL = "https://chat.qwen.ai/api/v1"
19
8
 
20
- Returns:
21
- Tuple of (token, api_url) for authentication.
22
- """
23
- api_key = os.getenv("QWEN_API_KEY")
24
- if api_key:
25
- return api_key, QWEN_API_URL
26
9
 
27
- oauth_provider = QwenOAuthProvider(TokenStore())
28
- token = oauth_provider.get_token()
29
- if token:
30
- resource_url = token.get("resource_url")
31
- if resource_url:
32
- if not resource_url.startswith(("http://", "https://")):
33
- resource_url = f"https://{resource_url}"
34
- if not resource_url.endswith("/chat/completions"):
35
- resource_url = resource_url.rstrip("/") + "/v1/chat/completions"
36
- api_url = resource_url
37
- else:
38
- api_url = QWEN_API_URL
39
- return token["access_token"], api_url
10
+ class QwenProvider(OpenAICompatibleProvider):
11
+ """Qwen provider with OAuth-only authentication."""
40
12
 
41
- raise AIError.authentication_error(
42
- "Qwen authentication not found. Set QWEN_API_KEY or run 'gac auth qwen login' for OAuth."
13
+ config = ProviderConfig(
14
+ name="Qwen",
15
+ api_key_env="",
16
+ base_url=QWEN_DEFAULT_API_URL,
43
17
  )
44
18
 
45
-
46
- def call_qwen_api(model: str, messages: list[dict], temperature: float, max_tokens: int) -> str:
47
- """Call Qwen API with OAuth or API key authentication."""
48
- auth_token, api_url = get_qwen_auth()
49
-
50
- headers = {"Authorization": f"Bearer {auth_token}", "Content-Type": "application/json"}
51
-
52
- data = {"model": model, "messages": messages, "temperature": temperature, "max_tokens": max_tokens}
53
-
54
- logger.debug(f"Calling Qwen API with model={model}")
55
-
56
- try:
57
- response = httpx.post(api_url, headers=headers, json=data, timeout=ProviderDefaults.HTTP_TIMEOUT)
58
- response.raise_for_status()
59
- response_data = response.json()
60
- content = response_data["choices"][0]["message"]["content"]
61
- if content is None:
62
- raise AIError.model_error("Qwen API returned null content")
63
- if content == "":
64
- raise AIError.model_error("Qwen API returned empty content")
65
- logger.debug("Qwen API response received successfully")
66
- return content
67
- except httpx.HTTPStatusError as e:
68
- if e.response.status_code == 401:
69
- raise AIError.authentication_error(f"Qwen authentication failed: {e.response.text}") from e
70
- if e.response.status_code == 429:
71
- raise AIError.rate_limit_error(f"Qwen API rate limit exceeded: {e.response.text}") from e
72
- raise AIError.model_error(f"Qwen API error: {e.response.status_code} - {e.response.text}") from e
73
- except httpx.TimeoutException as e:
74
- raise AIError.timeout_error(f"Qwen API request timed out: {str(e)}") from e
75
- except Exception as e:
76
- raise AIError.model_error(f"Error calling Qwen API: {str(e)}") from e
19
+ def __init__(self, config: ProviderConfig):
20
+ """Initialize with OAuth authentication."""
21
+ super().__init__(config)
22
+ self._auth_token, self._resolved_base_url = self._get_oauth_token()
23
+
24
+ def _get_api_key(self) -> str:
25
+ """Return placeholder for parent class compatibility (OAuth is used instead)."""
26
+ return "oauth-token"
27
+
28
+ def _get_oauth_token(self) -> tuple[str, str]:
29
+ """Get Qwen OAuth token from token store.
30
+
31
+ Returns:
32
+ Tuple of (access_token, api_url) for authentication.
33
+
34
+ Raises:
35
+ AIError: If no OAuth token is found.
36
+ """
37
+ oauth_provider = QwenOAuthProvider(TokenStore())
38
+ token = oauth_provider.get_token()
39
+ if token:
40
+ resource_url = token.get("resource_url")
41
+ if resource_url:
42
+ if not resource_url.startswith(("http://", "https://")):
43
+ resource_url = f"https://{resource_url}"
44
+ if not resource_url.endswith("/v1"):
45
+ resource_url = resource_url.rstrip("/") + "/v1"
46
+ base_url = resource_url
47
+ else:
48
+ base_url = QWEN_DEFAULT_API_URL
49
+ return token["access_token"], base_url
50
+
51
+ raise AIError.authentication_error("Qwen OAuth token not found. Run 'gac auth qwen login' to authenticate.")
52
+
53
+ def _build_headers(self) -> dict[str, str]:
54
+ """Build headers with OAuth token."""
55
+ headers = super()._build_headers()
56
+ # Replace Bearer token with the stored auth token
57
+ if "Authorization" in headers:
58
+ del headers["Authorization"]
59
+ headers["Authorization"] = f"Bearer {self._auth_token}"
60
+ return headers
61
+
62
+ def _get_api_url(self, model: str | None = None) -> str:
63
+ """Get Qwen API URL with /chat/completions endpoint."""
64
+ return f"{self._resolved_base_url}/chat/completions"
@@ -0,0 +1,58 @@
1
+ """Provider registry for AI providers."""
2
+
3
+ from collections.abc import Callable
4
+ from functools import wraps
5
+ from typing import TYPE_CHECKING, Any
6
+
7
+ if TYPE_CHECKING:
8
+ from gac.providers.base import BaseConfiguredProvider
9
+
10
+ # Global registry for provider functions
11
+ PROVIDER_REGISTRY: dict[str, Callable[..., str]] = {}
12
+
13
+
14
+ def create_provider_func(provider_class: type["BaseConfiguredProvider"]) -> Callable[..., str]:
15
+ """Create a provider function from a provider class.
16
+
17
+ This function creates a callable that:
18
+ 1. Instantiates the provider class
19
+ 2. Calls generate() with the provided arguments
20
+ 3. Is wrapped with @handle_provider_errors for consistent error handling
21
+
22
+ Args:
23
+ provider_class: A provider class with a `config` class attribute
24
+
25
+ Returns:
26
+ A callable function that can be used to generate text
27
+ """
28
+ from gac.providers.error_handler import handle_provider_errors
29
+
30
+ provider_name = provider_class.config.name
31
+
32
+ @handle_provider_errors(provider_name)
33
+ @wraps(provider_class.generate)
34
+ def provider_func(model: str, messages: list[dict[str, Any]], temperature: float, max_tokens: int) -> str:
35
+ provider = provider_class(provider_class.config)
36
+ return provider.generate(model=model, messages=messages, temperature=temperature, max_tokens=max_tokens)
37
+
38
+ # Add metadata for introspection
39
+ provider_func.__name__ = f"call_{provider_name.lower().replace(' ', '_').replace('.', '_')}_api"
40
+ provider_func.__doc__ = f"Call {provider_name} API to generate text."
41
+
42
+ return provider_func
43
+
44
+
45
+ def register_provider(name: str, provider_class: type["BaseConfiguredProvider"]) -> None:
46
+ """Register a provider class and auto-generate its function.
47
+
48
+ Args:
49
+ name: Provider name (e.g., "openai", "anthropic")
50
+ provider_class: The provider class to register
51
+ """
52
+ PROVIDER_REGISTRY[name] = create_provider_func(provider_class)
53
+
54
+
55
+ __all__ = [
56
+ "PROVIDER_REGISTRY",
57
+ "register_provider",
58
+ ]
@@ -1,110 +1,156 @@
1
1
  """Replicate API provider for gac."""
2
2
 
3
- import logging
4
- import os
3
+ import time
4
+ from typing import Any
5
5
 
6
6
  import httpx
7
7
 
8
- from gac.constants import ProviderDefaults
9
8
  from gac.errors import AIError
9
+ from gac.providers.base import GenericHTTPProvider, ProviderConfig
10
10
  from gac.utils import get_ssl_verify
11
11
 
12
- logger = logging.getLogger(__name__)
13
12
 
13
+ class ReplicateProvider(GenericHTTPProvider):
14
+ """Replicate API provider with async prediction polling."""
15
+
16
+ config = ProviderConfig(
17
+ name="Replicate",
18
+ api_key_env="REPLICATE_API_TOKEN",
19
+ base_url="https://api.replicate.com/v1",
20
+ )
21
+
22
+ def _get_api_url(self, model: str | None = None) -> str:
23
+ """Get Replicate API URL with /predictions endpoint."""
24
+ return f"{self.config.base_url}/predictions"
25
+
26
+ def _build_headers(self) -> dict[str, str]:
27
+ """Build headers with Token-based authorization."""
28
+ headers = super()._build_headers()
29
+ # Replace Bearer token with Token format
30
+ if "Authorization" in headers:
31
+ del headers["Authorization"]
32
+ headers["Authorization"] = f"Token {self.api_key}"
33
+ return headers
34
+
35
+ def _build_request_body(
36
+ self, messages: list[dict[str, Any]], temperature: float, max_tokens: int, model: str, **kwargs: Any
37
+ ) -> dict[str, Any]:
38
+ """Build Replicate prediction payload with message-to-prompt conversion."""
39
+ # Convert messages to a single prompt for Replicate
40
+ prompt_parts = []
41
+ system_message = None
42
+
43
+ for message in messages:
44
+ role = message.get("role")
45
+ content = message.get("content", "")
46
+
47
+ if role == "system":
48
+ system_message = content
49
+ elif role == "user":
50
+ prompt_parts.append(f"Human: {content}")
51
+ elif role == "assistant":
52
+ prompt_parts.append(f"Assistant: {content}")
53
+
54
+ # Add system message at the beginning if present
55
+ if system_message:
56
+ prompt_parts.insert(0, f"System: {system_message}")
57
+
58
+ # Add final assistant prompt
59
+ prompt_parts.append("Assistant:")
60
+ full_prompt = "\n\n".join(prompt_parts)
61
+
62
+ # Replicate prediction payload
63
+ return {
64
+ "version": model, # Replicate uses version string as model identifier
65
+ "input": {
66
+ "prompt": full_prompt,
67
+ "temperature": temperature,
68
+ "max_tokens": max_tokens,
69
+ },
70
+ }
71
+
72
+ def generate(
73
+ self,
74
+ model: str,
75
+ messages: list[dict[str, Any]],
76
+ temperature: float = 0.7,
77
+ max_tokens: int = 1024,
78
+ **kwargs: Any,
79
+ ) -> str:
80
+ """Override generate to handle Replicate's async polling mechanism."""
81
+ # Build request components
82
+ try:
83
+ url = self._get_api_url(model)
84
+ except AIError:
85
+ raise
86
+ except Exception as e:
87
+ raise AIError.model_error(f"Error calling {self.config.name} AI API: {e!s}") from e
88
+
89
+ try:
90
+ headers = self._build_headers()
91
+ except AIError:
92
+ raise
93
+ except Exception as e:
94
+ raise AIError.model_error(f"Error calling {self.config.name} AI API: {e!s}") from e
95
+
96
+ try:
97
+ body = self._build_request_body(messages, temperature, max_tokens, model, **kwargs)
98
+ except AIError:
99
+ raise
100
+ except Exception as e:
101
+ raise AIError.model_error(f"Error calling {self.config.name} AI API: {e!s}") from e
14
102
 
15
- def call_replicate_api(model: str, messages: list[dict], temperature: float, max_tokens: int) -> str:
16
- """Call Replicate API directly."""
17
- api_key = os.getenv("REPLICATE_API_TOKEN")
18
- if not api_key:
19
- raise AIError.authentication_error("REPLICATE_API_TOKEN not found in environment variables")
20
-
21
- # Replicate uses a different endpoint for language models
22
- url = "https://api.replicate.com/v1/predictions"
23
- headers = {"Authorization": f"Token {api_key}", "Content-Type": "application/json"}
24
-
25
- # Convert messages to a single prompt for Replicate
26
- prompt_parts = []
27
- system_message = None
28
-
29
- for message in messages:
30
- role = message.get("role")
31
- content = message.get("content", "")
32
-
33
- if role == "system":
34
- system_message = content
35
- elif role == "user":
36
- prompt_parts.append(f"Human: {content}")
37
- elif role == "assistant":
38
- prompt_parts.append(f"Assistant: {content}")
39
-
40
- # Add system message at the beginning if present
41
- if system_message:
42
- prompt_parts.insert(0, f"System: {system_message}")
43
-
44
- # Add final assistant prompt
45
- prompt_parts.append("Assistant:")
46
- full_prompt = "\n\n".join(prompt_parts)
47
-
48
- # Replicate prediction payload
49
- data = {
50
- "version": model, # Replicate uses version string as model identifier
51
- "input": {
52
- "prompt": full_prompt,
53
- "temperature": temperature,
54
- "max_tokens": max_tokens,
55
- },
56
- }
57
-
58
- logger.debug(f"Calling Replicate API with model={model}")
59
-
60
- try:
61
103
  # Create prediction
62
- response = httpx.post(
63
- url, headers=headers, json=data, timeout=ProviderDefaults.HTTP_TIMEOUT, verify=get_ssl_verify()
64
- )
65
- response.raise_for_status()
66
- prediction_data = response.json()
67
-
68
- # Get the prediction URL to check status
104
+ try:
105
+ response = httpx.post(url, json=body, headers=headers, timeout=self.config.timeout, verify=get_ssl_verify())
106
+ response.raise_for_status()
107
+ prediction_data = response.json()
108
+ except httpx.HTTPStatusError as e:
109
+ if e.response.status_code == 429:
110
+ raise AIError.rate_limit_error(f"Replicate API rate limit exceeded: {e.response.text}") from e
111
+ elif e.response.status_code == 401:
112
+ raise AIError.authentication_error(f"Replicate API authentication failed: {e.response.text}") from e
113
+ raise AIError.model_error(f"Replicate API error: {e.response.status_code} - {e.response.text}") from e
114
+ except httpx.TimeoutException as e:
115
+ raise AIError.timeout_error(f"Replicate API request timed out: {str(e)}") from e
116
+ except Exception as e:
117
+ raise AIError.model_error(f"Error calling Replicate API: {str(e)}") from e
118
+
119
+ # Poll for completion
69
120
  get_url = f"https://api.replicate.com/v1/predictions/{prediction_data['id']}"
70
-
71
- # Poll for completion (Replicate predictions are async)
72
121
  max_wait_time = 120
73
122
  wait_interval = 2
74
123
  elapsed_time = 0
75
124
 
76
125
  while elapsed_time < max_wait_time:
77
- get_response = httpx.get(
78
- get_url, headers=headers, timeout=ProviderDefaults.HTTP_TIMEOUT, verify=get_ssl_verify()
79
- )
80
- get_response.raise_for_status()
81
- status_data = get_response.json()
82
-
83
- if status_data["status"] == "succeeded":
84
- content = status_data["output"]
85
- if not content:
86
- raise AIError.model_error("Replicate API returned empty content")
87
- logger.debug("Replicate API response received successfully")
88
- return content
89
- elif status_data["status"] == "failed":
90
- raise AIError.model_error(f"Replicate prediction failed: {status_data.get('error', 'Unknown error')}")
91
- elif status_data["status"] in ["starting", "processing"]:
92
- import time
93
-
94
- time.sleep(wait_interval)
95
- elapsed_time += wait_interval
96
- else:
97
- raise AIError.model_error(f"Replicate API returned unknown status: {status_data['status']}")
126
+ try:
127
+ get_response = httpx.get(get_url, headers=headers, timeout=self.config.timeout, verify=get_ssl_verify())
128
+ get_response.raise_for_status()
129
+ status_data = get_response.json()
130
+
131
+ if status_data["status"] == "succeeded":
132
+ content = status_data["output"]
133
+ if not content:
134
+ raise AIError.model_error("Replicate API returned empty content")
135
+ return content
136
+ elif status_data["status"] == "failed":
137
+ raise AIError.model_error(
138
+ f"Replicate prediction failed: {status_data.get('error', 'Unknown error')}"
139
+ )
140
+ elif status_data["status"] in ["starting", "processing"]:
141
+ time.sleep(wait_interval)
142
+ elapsed_time += wait_interval
143
+ else:
144
+ raise AIError.model_error(f"Replicate API returned unknown status: {status_data['status']}")
145
+ except httpx.HTTPStatusError as e:
146
+ if e.response.status_code == 429:
147
+ raise AIError.rate_limit_error(f"Replicate API rate limit exceeded: {e.response.text}") from e
148
+ raise AIError.model_error(f"Replicate API error: {e.response.status_code} - {e.response.text}") from e
149
+ except httpx.TimeoutException as e:
150
+ raise AIError.timeout_error(f"Replicate API request timed out: {str(e)}") from e
151
+ except AIError:
152
+ raise
153
+ except Exception as e:
154
+ raise AIError.model_error(f"Error polling Replicate API: {str(e)}") from e
98
155
 
99
156
  raise AIError.timeout_error("Replicate API prediction timed out")
100
-
101
- except httpx.HTTPStatusError as e:
102
- if e.response.status_code == 429:
103
- raise AIError.rate_limit_error(f"Replicate API rate limit exceeded: {e.response.text}") from e
104
- elif e.response.status_code == 401:
105
- raise AIError.authentication_error(f"Replicate API authentication failed: {e.response.text}") from e
106
- raise AIError.model_error(f"Replicate API error: {e.response.status_code} - {e.response.text}") from e
107
- except httpx.TimeoutException as e:
108
- raise AIError.timeout_error(f"Replicate API request timed out: {str(e)}") from e
109
- except Exception as e:
110
- raise AIError.model_error(f"Error calling Replicate API: {str(e)}") from e
@@ -1,61 +1,31 @@
1
1
  """StreamLake (Vanchin) API provider for gac."""
2
2
 
3
- import logging
4
3
  import os
5
4
 
6
- import httpx
7
-
8
- from gac.constants import ProviderDefaults
9
5
  from gac.errors import AIError
10
- from gac.utils import get_ssl_verify
11
-
12
- logger = logging.getLogger(__name__)
13
-
14
-
15
- def call_streamlake_api(model: str, messages: list[dict], temperature: float, max_tokens: int) -> str:
16
- """Call StreamLake (Vanchin) chat completions API."""
17
- api_key = os.getenv("STREAMLAKE_API_KEY") or os.getenv("VC_API_KEY")
18
- if not api_key:
19
- raise AIError.authentication_error(
20
- "STREAMLAKE_API_KEY not found in environment variables (VC_API_KEY alias also not set)"
21
- )
22
-
23
- url = "https://vanchin.streamlake.ai/api/gateway/v1/endpoints/chat/completions"
24
- headers = {"Authorization": f"Bearer {api_key}", "Content-Type": "application/json"}
25
-
26
- data = {
27
- "model": model,
28
- "messages": messages,
29
- "temperature": temperature,
30
- "max_tokens": max_tokens,
31
- }
32
-
33
- logger.debug(f"Calling StreamLake API with model={model}")
34
-
35
- try:
36
- response = httpx.post(
37
- url, headers=headers, json=data, timeout=ProviderDefaults.HTTP_TIMEOUT, verify=get_ssl_verify()
38
- )
39
- response.raise_for_status()
40
- response_data = response.json()
41
- choices = response_data.get("choices")
42
- if not choices:
43
- raise AIError.model_error("StreamLake API returned no choices")
44
-
45
- message = choices[0].get("message", {})
46
- content = message.get("content")
47
- if content is None:
48
- raise AIError.model_error("StreamLake API returned null content")
49
- if content == "":
50
- raise AIError.model_error("StreamLake API returned empty content")
51
-
52
- logger.debug("StreamLake API response received successfully")
53
- return content
54
- except httpx.HTTPStatusError as e:
55
- if e.response.status_code == 429:
56
- raise AIError.rate_limit_error(f"StreamLake API rate limit exceeded: {e.response.text}") from e
57
- raise AIError.model_error(f"StreamLake API error: {e.response.status_code} - {e.response.text}") from e
58
- except httpx.TimeoutException as e:
59
- raise AIError.timeout_error(f"StreamLake API request timed out: {str(e)}") from e
60
- except Exception as e: # noqa: BLE001 - convert to AIError
61
- raise AIError.model_error(f"Error calling StreamLake API: {str(e)}") from e
6
+ from gac.providers.base import OpenAICompatibleProvider, ProviderConfig
7
+
8
+
9
+ class StreamlakeProvider(OpenAICompatibleProvider):
10
+ """StreamLake (Vanchin) OpenAI-compatible provider with alternative env vars."""
11
+
12
+ config = ProviderConfig(
13
+ name="StreamLake",
14
+ api_key_env="STREAMLAKE_API_KEY",
15
+ base_url="https://vanchin.streamlake.ai/api/gateway/v1/endpoints",
16
+ )
17
+
18
+ def _get_api_url(self, model: str | None = None) -> str:
19
+ """Get StreamLake API URL with /chat/completions endpoint."""
20
+ return f"{self.config.base_url}/chat/completions"
21
+
22
+ def _get_api_key(self) -> str:
23
+ """Get API key from environment with fallback to VC_API_KEY."""
24
+ api_key = os.getenv(self.config.api_key_env)
25
+ if not api_key:
26
+ api_key = os.getenv("VC_API_KEY")
27
+ if not api_key:
28
+ raise AIError.authentication_error(
29
+ "STREAMLAKE_API_KEY not found in environment variables (VC_API_KEY alias also not set)"
30
+ )
31
+ return api_key
@@ -1,52 +1,40 @@
1
1
  """Synthetic.new API provider for gac."""
2
2
 
3
- import logging
4
3
  import os
4
+ from typing import Any
5
5
 
6
- import httpx
7
-
8
- from gac.constants import ProviderDefaults
9
6
  from gac.errors import AIError
10
- from gac.utils import get_ssl_verify
11
-
12
- logger = logging.getLogger(__name__)
13
-
14
-
15
- def call_synthetic_api(model: str, messages: list[dict], temperature: float, max_tokens: int) -> str:
16
- """Call Synthetic API directly."""
17
- # Handle model names without hf: prefix
18
- if not model.startswith("hf:"):
19
- model = f"hf:{model}"
20
-
21
- api_key = os.getenv("SYNTHETIC_API_KEY") or os.getenv("SYN_API_KEY")
22
- if not api_key:
23
- raise AIError.authentication_error("SYNTHETIC_API_KEY or SYN_API_KEY not found in environment variables")
24
-
25
- url = "https://api.synthetic.new/openai/v1/chat/completions"
26
- headers = {"Authorization": f"Bearer {api_key}", "Content-Type": "application/json"}
27
-
28
- data = {"model": model, "messages": messages, "temperature": temperature, "max_completion_tokens": max_tokens}
29
-
30
- logger.debug(f"Calling Synthetic.new API with model={model}")
31
-
32
- try:
33
- response = httpx.post(
34
- url, headers=headers, json=data, timeout=ProviderDefaults.HTTP_TIMEOUT, verify=get_ssl_verify()
35
- )
36
- response.raise_for_status()
37
- response_data = response.json()
38
- content = response_data["choices"][0]["message"]["content"]
39
- if content is None:
40
- raise AIError.model_error("Synthetic.new API returned null content")
41
- if content == "":
42
- raise AIError.model_error("Synthetic.new API returned empty content")
43
- logger.debug("Synthetic.new API response received successfully")
44
- return content
45
- except httpx.HTTPStatusError as e:
46
- if e.response.status_code == 429:
47
- raise AIError.rate_limit_error(f"Synthetic.new API rate limit exceeded: {e.response.text}") from e
48
- raise AIError.model_error(f"Synthetic.new API error: {e.response.status_code} - {e.response.text}") from e
49
- except httpx.TimeoutException as e:
50
- raise AIError.timeout_error(f"Synthetic.new API request timed out: {str(e)}") from e
51
- except Exception as e:
52
- raise AIError.model_error(f"Error calling Synthetic.new API: {str(e)}") from e
7
+ from gac.providers.base import OpenAICompatibleProvider, ProviderConfig
8
+
9
+
10
+ class SyntheticProvider(OpenAICompatibleProvider):
11
+ """Synthetic.new OpenAI-compatible provider with alternative env vars and model preprocessing."""
12
+
13
+ config = ProviderConfig(
14
+ name="Synthetic",
15
+ api_key_env="SYNTHETIC_API_KEY",
16
+ base_url="https://api.synthetic.new/openai/v1/chat/completions",
17
+ )
18
+
19
+ def _get_api_key(self) -> str:
20
+ """Get API key from environment with fallback to SYN_API_KEY."""
21
+ api_key = os.getenv(self.config.api_key_env)
22
+ if not api_key:
23
+ api_key = os.getenv("SYN_API_KEY")
24
+ if not api_key:
25
+ raise AIError.authentication_error("SYNTHETIC_API_KEY or SYN_API_KEY not found in environment variables")
26
+ return api_key
27
+
28
+ def _build_request_body(
29
+ self, messages: list[dict[str, Any]], temperature: float, max_tokens: int, model: str, **kwargs: Any
30
+ ) -> dict[str, Any]:
31
+ """Build request body with model name preprocessing and max_completion_tokens."""
32
+ # Auto-add hf: prefix if not present
33
+ if not model.startswith("hf:"):
34
+ model = f"hf:{model}"
35
+
36
+ data = super()._build_request_body(messages, temperature, max_tokens, model, **kwargs)
37
+ data["max_completion_tokens"] = data.pop("max_tokens")
38
+ # Ensure the prefixed model is used
39
+ data["model"] = model
40
+ return data