gac 1.13.0__py3-none-any.whl → 3.8.1__py3-none-any.whl

This diff represents the content of publicly available package versions that have been released to one of the supported registries. The information contained in this diff is provided for informational purposes only and reflects changes between package versions as they appear in their respective public registries.
Files changed (54) hide show
  1. gac/__version__.py +1 -1
  2. gac/ai.py +33 -47
  3. gac/ai_utils.py +113 -41
  4. gac/auth_cli.py +214 -0
  5. gac/cli.py +72 -2
  6. gac/config.py +63 -6
  7. gac/config_cli.py +26 -5
  8. gac/constants.py +178 -2
  9. gac/git.py +158 -12
  10. gac/init_cli.py +40 -125
  11. gac/language_cli.py +378 -0
  12. gac/main.py +868 -158
  13. gac/model_cli.py +429 -0
  14. gac/oauth/__init__.py +27 -0
  15. gac/oauth/claude_code.py +464 -0
  16. gac/oauth/qwen_oauth.py +323 -0
  17. gac/oauth/token_store.py +81 -0
  18. gac/preprocess.py +3 -3
  19. gac/prompt.py +573 -226
  20. gac/providers/__init__.py +49 -0
  21. gac/providers/anthropic.py +11 -1
  22. gac/providers/azure_openai.py +101 -0
  23. gac/providers/cerebras.py +11 -1
  24. gac/providers/chutes.py +11 -1
  25. gac/providers/claude_code.py +112 -0
  26. gac/providers/custom_anthropic.py +6 -2
  27. gac/providers/custom_openai.py +6 -3
  28. gac/providers/deepseek.py +11 -1
  29. gac/providers/fireworks.py +11 -1
  30. gac/providers/gemini.py +11 -1
  31. gac/providers/groq.py +5 -1
  32. gac/providers/kimi_coding.py +67 -0
  33. gac/providers/lmstudio.py +12 -1
  34. gac/providers/minimax.py +11 -1
  35. gac/providers/mistral.py +48 -0
  36. gac/providers/moonshot.py +48 -0
  37. gac/providers/ollama.py +11 -1
  38. gac/providers/openai.py +11 -1
  39. gac/providers/openrouter.py +11 -1
  40. gac/providers/qwen.py +76 -0
  41. gac/providers/replicate.py +110 -0
  42. gac/providers/streamlake.py +11 -1
  43. gac/providers/synthetic.py +11 -1
  44. gac/providers/together.py +11 -1
  45. gac/providers/zai.py +11 -1
  46. gac/security.py +1 -1
  47. gac/utils.py +272 -4
  48. gac/workflow_utils.py +217 -0
  49. {gac-1.13.0.dist-info → gac-3.8.1.dist-info}/METADATA +90 -27
  50. gac-3.8.1.dist-info/RECORD +56 -0
  51. {gac-1.13.0.dist-info → gac-3.8.1.dist-info}/WHEEL +1 -1
  52. gac-1.13.0.dist-info/RECORD +0 -41
  53. {gac-1.13.0.dist-info → gac-3.8.1.dist-info}/entry_points.txt +0 -0
  54. {gac-1.13.0.dist-info → gac-3.8.1.dist-info}/licenses/LICENSE +0 -0
gac/providers/__init__.py CHANGED
@@ -1,42 +1,91 @@
1
1
  """AI provider implementations for commit message generation."""
2
2
 
3
3
  from .anthropic import call_anthropic_api
4
+ from .azure_openai import call_azure_openai_api
4
5
  from .cerebras import call_cerebras_api
5
6
  from .chutes import call_chutes_api
7
+ from .claude_code import call_claude_code_api
6
8
  from .custom_anthropic import call_custom_anthropic_api
7
9
  from .custom_openai import call_custom_openai_api
8
10
  from .deepseek import call_deepseek_api
9
11
  from .fireworks import call_fireworks_api
10
12
  from .gemini import call_gemini_api
11
13
  from .groq import call_groq_api
14
+ from .kimi_coding import call_kimi_coding_api
12
15
  from .lmstudio import call_lmstudio_api
13
16
  from .minimax import call_minimax_api
17
+ from .mistral import call_mistral_api
18
+ from .moonshot import call_moonshot_api
14
19
  from .ollama import call_ollama_api
15
20
  from .openai import call_openai_api
16
21
  from .openrouter import call_openrouter_api
22
+ from .qwen import call_qwen_api
23
+ from .replicate import call_replicate_api
17
24
  from .streamlake import call_streamlake_api
18
25
  from .synthetic import call_synthetic_api
19
26
  from .together import call_together_api
20
27
  from .zai import call_zai_api, call_zai_coding_api
21
28
 
29
+ # Provider registry - single source of truth for all providers
30
+ PROVIDER_REGISTRY = {
31
+ "anthropic": call_anthropic_api,
32
+ "azure-openai": call_azure_openai_api,
33
+ "cerebras": call_cerebras_api,
34
+ "claude-code": call_claude_code_api,
35
+ "chutes": call_chutes_api,
36
+ "custom-anthropic": call_custom_anthropic_api,
37
+ "custom-openai": call_custom_openai_api,
38
+ "deepseek": call_deepseek_api,
39
+ "fireworks": call_fireworks_api,
40
+ "gemini": call_gemini_api,
41
+ "groq": call_groq_api,
42
+ "kimi-coding": call_kimi_coding_api,
43
+ "lm-studio": call_lmstudio_api,
44
+ "minimax": call_minimax_api,
45
+ "mistral": call_mistral_api,
46
+ "moonshot": call_moonshot_api,
47
+ "ollama": call_ollama_api,
48
+ "openai": call_openai_api,
49
+ "openrouter": call_openrouter_api,
50
+ "qwen": call_qwen_api,
51
+ "replicate": call_replicate_api,
52
+ "streamlake": call_streamlake_api,
53
+ "synthetic": call_synthetic_api,
54
+ "together": call_together_api,
55
+ "zai": call_zai_api,
56
+ "zai-coding": call_zai_coding_api,
57
+ }
58
+
59
+ # List of supported provider names - derived from registry keys
60
+ SUPPORTED_PROVIDERS = sorted(PROVIDER_REGISTRY.keys())
61
+
22
62
  __all__ = [
23
63
  "call_anthropic_api",
64
+ "call_azure_openai_api",
24
65
  "call_cerebras_api",
25
66
  "call_chutes_api",
67
+ "call_claude_code_api",
26
68
  "call_custom_anthropic_api",
27
69
  "call_custom_openai_api",
28
70
  "call_deepseek_api",
29
71
  "call_fireworks_api",
30
72
  "call_gemini_api",
31
73
  "call_groq_api",
74
+ "call_kimi_coding_api",
32
75
  "call_lmstudio_api",
33
76
  "call_minimax_api",
77
+ "call_mistral_api",
78
+ "call_moonshot_api",
34
79
  "call_ollama_api",
35
80
  "call_openai_api",
36
81
  "call_openrouter_api",
82
+ "call_qwen_api",
83
+ "call_replicate_api",
37
84
  "call_streamlake_api",
38
85
  "call_synthetic_api",
39
86
  "call_together_api",
40
87
  "call_zai_api",
41
88
  "call_zai_coding_api",
89
+ "PROVIDER_REGISTRY",
90
+ "SUPPORTED_PROVIDERS",
42
91
  ]
@@ -1,10 +1,15 @@
1
1
  """Anthropic AI provider implementation."""
2
2
 
3
+ import logging
3
4
  import os
4
5
 
5
6
  import httpx
6
7
 
8
+ from gac.constants import ProviderDefaults
7
9
  from gac.errors import AIError
10
+ from gac.utils import get_ssl_verify
11
+
12
+ logger = logging.getLogger(__name__)
8
13
 
9
14
 
10
15
  def call_anthropic_api(model: str, messages: list[dict], temperature: float, max_tokens: int) -> str:
@@ -31,8 +36,12 @@ def call_anthropic_api(model: str, messages: list[dict], temperature: float, max
31
36
  if system_message:
32
37
  data["system"] = system_message
33
38
 
39
+ logger.debug(f"Calling Anthropic API with model={model}")
40
+
34
41
  try:
35
- response = httpx.post(url, headers=headers, json=data, timeout=120)
42
+ response = httpx.post(
43
+ url, headers=headers, json=data, timeout=ProviderDefaults.HTTP_TIMEOUT, verify=get_ssl_verify()
44
+ )
36
45
  response.raise_for_status()
37
46
  response_data = response.json()
38
47
  content = response_data["content"][0]["text"]
@@ -40,6 +49,7 @@ def call_anthropic_api(model: str, messages: list[dict], temperature: float, max
40
49
  raise AIError.model_error("Anthropic API returned null content")
41
50
  if content == "":
42
51
  raise AIError.model_error("Anthropic API returned empty content")
52
+ logger.debug("Anthropic API response received successfully")
43
53
  return content
44
54
  except httpx.HTTPStatusError as e:
45
55
  if e.response.status_code == 429:
@@ -0,0 +1,101 @@
1
+ """Azure OpenAI provider for gac.
2
+
3
+ This provider provides native support for Azure OpenAI Service with proper
4
+ endpoint construction and API version handling.
5
+ """
6
+
7
+ import json
8
+ import logging
9
+ import os
10
+
11
+ import httpx
12
+
13
+ from gac.constants import ProviderDefaults
14
+ from gac.errors import AIError
15
+ from gac.utils import get_ssl_verify
16
+
17
+ logger = logging.getLogger(__name__)
18
+
19
+
20
+ def call_azure_openai_api(model: str, messages: list[dict], temperature: float, max_tokens: int) -> str:
21
+ """Call Azure OpenAI Service API.
22
+
23
+ Environment variables:
24
+ AZURE_OPENAI_API_KEY: Azure OpenAI API key (required)
25
+ AZURE_OPENAI_ENDPOINT: Azure OpenAI endpoint URL (required)
26
+ Example: https://your-resource.openai.azure.com
27
+ AZURE_OPENAI_API_VERSION: Azure OpenAI API version (required)
28
+ Example: 2025-01-01-preview
29
+ Example: 2024-02-15-preview
30
+
31
+ Args:
32
+ model: The deployment name in Azure OpenAI (e.g., 'gpt-4o', 'gpt-35-turbo')
33
+ messages: List of message dictionaries with 'role' and 'content' keys
34
+ temperature: Controls randomness (0.0-1.0)
35
+ max_tokens: Maximum tokens in the response
36
+
37
+ Returns:
38
+ The generated commit message
39
+
40
+ Raises:
41
+ AIError: If authentication fails, API errors occur, or response is invalid
42
+ """
43
+ api_key = os.getenv("AZURE_OPENAI_API_KEY")
44
+ if not api_key:
45
+ raise AIError.authentication_error("AZURE_OPENAI_API_KEY environment variable not set")
46
+
47
+ endpoint = os.getenv("AZURE_OPENAI_ENDPOINT")
48
+ if not endpoint:
49
+ raise AIError.model_error("AZURE_OPENAI_ENDPOINT environment variable not set")
50
+
51
+ api_version = os.getenv("AZURE_OPENAI_API_VERSION")
52
+ if not api_version:
53
+ raise AIError.model_error("AZURE_OPENAI_API_VERSION environment variable not set")
54
+
55
+ # Build Azure OpenAI URL with proper structure
56
+ endpoint = endpoint.rstrip("/")
57
+ url = f"{endpoint}/openai/deployments/{model}/chat/completions?api-version={api_version}"
58
+
59
+ headers = {"api-key": api_key, "Content-Type": "application/json"}
60
+
61
+ data = {"messages": messages, "temperature": temperature, "max_tokens": max_tokens}
62
+
63
+ try:
64
+ response = httpx.post(
65
+ url, headers=headers, json=data, timeout=ProviderDefaults.HTTP_TIMEOUT, verify=get_ssl_verify()
66
+ )
67
+ response.raise_for_status()
68
+ response_data = response.json()
69
+
70
+ try:
71
+ content = response_data["choices"][0]["message"]["content"]
72
+ except (KeyError, IndexError, TypeError) as e:
73
+ logger.error(f"Unexpected response format from Azure OpenAI API. Response: {json.dumps(response_data)}")
74
+ raise AIError.model_error(
75
+ f"Azure OpenAI API returned unexpected format. Expected response with "
76
+ f"'choices[0].message.content', but got: {type(e).__name__}. Check logs for full response structure."
77
+ ) from e
78
+
79
+ if content is None:
80
+ raise AIError.model_error("Azure OpenAI API returned null content")
81
+ if content == "":
82
+ raise AIError.model_error("Azure OpenAI API returned empty content")
83
+ return content
84
+ except httpx.ConnectError as e:
85
+ raise AIError.connection_error(f"Azure OpenAI API connection failed: {str(e)}") from e
86
+ except httpx.HTTPStatusError as e:
87
+ status_code = e.response.status_code
88
+ error_text = e.response.text
89
+
90
+ if status_code == 401:
91
+ raise AIError.authentication_error(f"Azure OpenAI API authentication failed: {error_text}") from e
92
+ elif status_code == 429:
93
+ raise AIError.rate_limit_error(f"Azure OpenAI API rate limit exceeded: {error_text}") from e
94
+ else:
95
+ raise AIError.model_error(f"Azure OpenAI API error: {status_code} - {error_text}") from e
96
+ except httpx.TimeoutException as e:
97
+ raise AIError.timeout_error(f"Azure OpenAI API request timed out: {str(e)}") from e
98
+ except AIError:
99
+ raise
100
+ except Exception as e:
101
+ raise AIError.model_error(f"Error calling Azure OpenAI API: {str(e)}") from e
gac/providers/cerebras.py CHANGED
@@ -1,10 +1,15 @@
1
1
  """Cerebras AI provider implementation."""
2
2
 
3
+ import logging
3
4
  import os
4
5
 
5
6
  import httpx
6
7
 
8
+ from gac.constants import ProviderDefaults
7
9
  from gac.errors import AIError
10
+ from gac.utils import get_ssl_verify
11
+
12
+ logger = logging.getLogger(__name__)
8
13
 
9
14
 
10
15
  def call_cerebras_api(model: str, messages: list[dict], temperature: float, max_tokens: int) -> str:
@@ -18,8 +23,12 @@ def call_cerebras_api(model: str, messages: list[dict], temperature: float, max_
18
23
 
19
24
  data = {"model": model, "messages": messages, "temperature": temperature, "max_tokens": max_tokens}
20
25
 
26
+ logger.debug(f"Calling Cerebras API with model={model}")
27
+
21
28
  try:
22
- response = httpx.post(url, headers=headers, json=data, timeout=120)
29
+ response = httpx.post(
30
+ url, headers=headers, json=data, timeout=ProviderDefaults.HTTP_TIMEOUT, verify=get_ssl_verify()
31
+ )
23
32
  response.raise_for_status()
24
33
  response_data = response.json()
25
34
  content = response_data["choices"][0]["message"]["content"]
@@ -27,6 +36,7 @@ def call_cerebras_api(model: str, messages: list[dict], temperature: float, max_
27
36
  raise AIError.model_error("Cerebras API returned null content")
28
37
  if content == "":
29
38
  raise AIError.model_error("Cerebras API returned empty content")
39
+ logger.debug("Cerebras API response received successfully")
30
40
  return content
31
41
  except httpx.HTTPStatusError as e:
32
42
  if e.response.status_code == 429:
gac/providers/chutes.py CHANGED
@@ -1,10 +1,15 @@
1
1
  """Chutes.ai API provider for gac."""
2
2
 
3
+ import logging
3
4
  import os
4
5
 
5
6
  import httpx
6
7
 
8
+ from gac.constants import ProviderDefaults
7
9
  from gac.errors import AIError
10
+ from gac.utils import get_ssl_verify
11
+
12
+ logger = logging.getLogger(__name__)
8
13
 
9
14
 
10
15
  def call_chutes_api(model: str, messages: list[dict], temperature: float, max_tokens: int) -> str:
@@ -43,8 +48,12 @@ def call_chutes_api(model: str, messages: list[dict], temperature: float, max_to
43
48
  "max_tokens": max_tokens,
44
49
  }
45
50
 
51
+ logger.debug(f"Calling Chutes.ai API with model={model}")
52
+
46
53
  try:
47
- response = httpx.post(url, headers=headers, json=data, timeout=120)
54
+ response = httpx.post(
55
+ url, headers=headers, json=data, timeout=ProviderDefaults.HTTP_TIMEOUT, verify=get_ssl_verify()
56
+ )
48
57
  response.raise_for_status()
49
58
  response_data = response.json()
50
59
  content = response_data["choices"][0]["message"]["content"]
@@ -52,6 +61,7 @@ def call_chutes_api(model: str, messages: list[dict], temperature: float, max_to
52
61
  raise AIError.model_error("Chutes.ai API returned null content")
53
62
  if content == "":
54
63
  raise AIError.model_error("Chutes.ai API returned empty content")
64
+ logger.debug("Chutes.ai API response received successfully")
55
65
  return content
56
66
  except httpx.HTTPStatusError as e:
57
67
  status_code = e.response.status_code
@@ -0,0 +1,112 @@
1
+ """Claude Code provider implementation.
2
+
3
+ This provider allows users with Claude Code subscriptions to use their OAuth tokens
4
+ instead of paying for the expensive Anthropic API.
5
+ """
6
+
7
+ import logging
8
+ import os
9
+
10
+ import httpx
11
+
12
+ from gac.constants import ProviderDefaults
13
+ from gac.errors import AIError
14
+ from gac.utils import get_ssl_verify
15
+
16
+ logger = logging.getLogger(__name__)
17
+
18
+
19
+ def call_claude_code_api(model: str, messages: list[dict], temperature: float, max_tokens: int) -> str:
20
+ """Call Claude Code API using OAuth token.
21
+
22
+ This provider uses the Claude Code subscription OAuth token instead of the Anthropic API key.
23
+ It authenticates using Bearer token authentication with the special anthropic-beta header.
24
+
25
+ Environment variables:
26
+ CLAUDE_CODE_ACCESS_TOKEN: OAuth access token from Claude Code authentication
27
+
28
+ Args:
29
+ model: Model name (e.g., 'claude-sonnet-4-5')
30
+ messages: List of message dictionaries with 'role' and 'content' keys
31
+ temperature: Sampling temperature (0.0-1.0)
32
+ max_tokens: Maximum tokens in response
33
+
34
+ Returns:
35
+ Generated text response
36
+
37
+ Raises:
38
+ AIError: If authentication fails or API call fails
39
+ """
40
+ access_token = os.getenv("CLAUDE_CODE_ACCESS_TOKEN")
41
+ if not access_token:
42
+ raise AIError.authentication_error(
43
+ "CLAUDE_CODE_ACCESS_TOKEN not found in environment variables. "
44
+ "Please authenticate with Claude Code and set this token."
45
+ )
46
+
47
+ url = "https://api.anthropic.com/v1/messages"
48
+ headers = {
49
+ "Authorization": f"Bearer {access_token}",
50
+ "anthropic-version": "2023-06-01",
51
+ "anthropic-beta": "oauth-2025-04-20",
52
+ "content-type": "application/json",
53
+ }
54
+
55
+ # Convert messages to Anthropic format
56
+ # IMPORTANT: Claude Code OAuth tokens require the system message to be EXACTLY
57
+ # "You are Claude Code, Anthropic's official CLI for Claude." with NO additional content.
58
+ # Any other instructions must be moved to the user message.
59
+ anthropic_messages = []
60
+ system_instructions = ""
61
+
62
+ for msg in messages:
63
+ if msg["role"] == "system":
64
+ system_instructions = msg["content"]
65
+ else:
66
+ anthropic_messages.append({"role": msg["role"], "content": msg["content"]})
67
+
68
+ # Claude Code requires this exact system message, nothing more
69
+ system_message = "You are Claude Code, Anthropic's official CLI for Claude."
70
+
71
+ # Move any system instructions into the first user message
72
+ if system_instructions and anthropic_messages:
73
+ # Prepend system instructions to the first user message
74
+ first_user_msg = anthropic_messages[0]
75
+ first_user_msg["content"] = f"{system_instructions}\n\n{first_user_msg['content']}"
76
+
77
+ data = {
78
+ "model": model,
79
+ "messages": anthropic_messages,
80
+ "temperature": temperature,
81
+ "max_tokens": max_tokens,
82
+ "system": system_message,
83
+ }
84
+
85
+ logger.debug(f"Calling Claude Code API with model={model}")
86
+
87
+ try:
88
+ response = httpx.post(
89
+ url, headers=headers, json=data, timeout=ProviderDefaults.HTTP_TIMEOUT, verify=get_ssl_verify()
90
+ )
91
+ response.raise_for_status()
92
+ response_data = response.json()
93
+ content = response_data["content"][0]["text"]
94
+ if content is None:
95
+ raise AIError.model_error("Claude Code API returned null content")
96
+ if content == "":
97
+ raise AIError.model_error("Claude Code API returned empty content")
98
+ logger.debug("Claude Code API response received successfully")
99
+ return content
100
+ except httpx.HTTPStatusError as e:
101
+ if e.response.status_code == 401:
102
+ raise AIError.authentication_error(
103
+ f"Claude Code authentication failed: {e.response.text}. "
104
+ "Your token may have expired. Please re-authenticate."
105
+ ) from e
106
+ if e.response.status_code == 429:
107
+ raise AIError.rate_limit_error(f"Claude Code API rate limit exceeded: {e.response.text}") from e
108
+ raise AIError.model_error(f"Claude Code API error: {e.response.status_code} - {e.response.text}") from e
109
+ except httpx.TimeoutException as e:
110
+ raise AIError.timeout_error(f"Claude Code API request timed out: {str(e)}") from e
111
+ except Exception as e:
112
+ raise AIError.model_error(f"Error calling Claude Code API: {str(e)}") from e
@@ -10,7 +10,9 @@ import os
10
10
 
11
11
  import httpx
12
12
 
13
+ from gac.constants import ProviderDefaults
13
14
  from gac.errors import AIError
15
+ from gac.utils import get_ssl_verify
14
16
 
15
17
  logger = logging.getLogger(__name__)
16
18
 
@@ -30,7 +32,7 @@ def call_custom_anthropic_api(model: str, messages: list[dict], temperature: flo
30
32
  CUSTOM_ANTHROPIC_VERSION: API version header (optional, defaults to '2023-06-01')
31
33
 
32
34
  Args:
33
- model: The model to use (e.g., 'claude-3-5-sonnet-20241022', 'claude-3-5-haiku-latest')
35
+ model: The model to use (e.g., 'claude-sonnet-4-5', 'claude-haiku-4-5')
34
36
  messages: List of message dictionaries with 'role' and 'content' keys
35
37
  temperature: Controls randomness (0.0-1.0)
36
38
  max_tokens: Maximum tokens in the response
@@ -74,7 +76,9 @@ def call_custom_anthropic_api(model: str, messages: list[dict], temperature: flo
74
76
  data["system"] = system_message
75
77
 
76
78
  try:
77
- response = httpx.post(url, headers=headers, json=data, timeout=120)
79
+ response = httpx.post(
80
+ url, headers=headers, json=data, timeout=ProviderDefaults.HTTP_TIMEOUT, verify=get_ssl_verify()
81
+ )
78
82
  response.raise_for_status()
79
83
  response_data = response.json()
80
84
 
@@ -10,7 +10,9 @@ import os
10
10
 
11
11
  import httpx
12
12
 
13
+ from gac.constants import ProviderDefaults
13
14
  from gac.errors import AIError
15
+ from gac.utils import get_ssl_verify
14
16
 
15
17
  logger = logging.getLogger(__name__)
16
18
 
@@ -19,7 +21,6 @@ def call_custom_openai_api(model: str, messages: list[dict], temperature: float,
19
21
  """Call a custom OpenAI-compatible API endpoint.
20
22
 
21
23
  This provider is useful for:
22
- - Azure OpenAI Service
23
24
  - OpenAI-compatible proxies or gateways
24
25
  - Self-hosted OpenAI-compatible services
25
26
  - Other services implementing the OpenAI Chat Completions API
@@ -27,8 +28,8 @@ def call_custom_openai_api(model: str, messages: list[dict], temperature: float,
27
28
  Environment variables:
28
29
  CUSTOM_OPENAI_API_KEY: API key for authentication (required)
29
30
  CUSTOM_OPENAI_BASE_URL: Base URL for the API endpoint (required)
30
- Example: https://your-endpoint.openai.azure.com
31
31
  Example: https://your-proxy.example.com/v1
32
+ Example: https://your-custom-endpoint.com
32
33
 
33
34
  Args:
34
35
  model: The model to use (e.g., 'gpt-4', 'gpt-3.5-turbo')
@@ -61,7 +62,9 @@ def call_custom_openai_api(model: str, messages: list[dict], temperature: float,
61
62
  data = {"model": model, "messages": messages, "temperature": temperature, "max_completion_tokens": max_tokens}
62
63
 
63
64
  try:
64
- response = httpx.post(url, headers=headers, json=data, timeout=120)
65
+ response = httpx.post(
66
+ url, headers=headers, json=data, timeout=ProviderDefaults.HTTP_TIMEOUT, verify=get_ssl_verify()
67
+ )
65
68
  response.raise_for_status()
66
69
  response_data = response.json()
67
70
 
gac/providers/deepseek.py CHANGED
@@ -1,10 +1,15 @@
1
1
  """DeepSeek API provider for gac."""
2
2
 
3
+ import logging
3
4
  import os
4
5
 
5
6
  import httpx
6
7
 
8
+ from gac.constants import ProviderDefaults
7
9
  from gac.errors import AIError
10
+ from gac.utils import get_ssl_verify
11
+
12
+ logger = logging.getLogger(__name__)
8
13
 
9
14
 
10
15
  def call_deepseek_api(model: str, messages: list[dict], temperature: float, max_tokens: int) -> str:
@@ -18,8 +23,12 @@ def call_deepseek_api(model: str, messages: list[dict], temperature: float, max_
18
23
 
19
24
  data = {"model": model, "messages": messages, "temperature": temperature, "max_tokens": max_tokens}
20
25
 
26
+ logger.debug(f"Calling DeepSeek API with model={model}")
27
+
21
28
  try:
22
- response = httpx.post(url, headers=headers, json=data, timeout=120)
29
+ response = httpx.post(
30
+ url, headers=headers, json=data, timeout=ProviderDefaults.HTTP_TIMEOUT, verify=get_ssl_verify()
31
+ )
23
32
  response.raise_for_status()
24
33
  response_data = response.json()
25
34
  content = response_data["choices"][0]["message"]["content"]
@@ -27,6 +36,7 @@ def call_deepseek_api(model: str, messages: list[dict], temperature: float, max_
27
36
  raise AIError.model_error("DeepSeek API returned null content")
28
37
  if content == "":
29
38
  raise AIError.model_error("DeepSeek API returned empty content")
39
+ logger.debug("DeepSeek API response received successfully")
30
40
  return content
31
41
  except httpx.HTTPStatusError as e:
32
42
  if e.response.status_code == 429:
@@ -1,10 +1,15 @@
1
1
  """Fireworks AI API provider for gac."""
2
2
 
3
+ import logging
3
4
  import os
4
5
 
5
6
  import httpx
6
7
 
8
+ from gac.constants import ProviderDefaults
7
9
  from gac.errors import AIError
10
+ from gac.utils import get_ssl_verify
11
+
12
+ logger = logging.getLogger(__name__)
8
13
 
9
14
 
10
15
  def call_fireworks_api(model: str, messages: list[dict], temperature: float, max_tokens: int) -> str:
@@ -18,8 +23,12 @@ def call_fireworks_api(model: str, messages: list[dict], temperature: float, max
18
23
 
19
24
  data = {"model": model, "messages": messages, "temperature": temperature, "max_tokens": max_tokens}
20
25
 
26
+ logger.debug(f"Calling Fireworks AI API with model={model}")
27
+
21
28
  try:
22
- response = httpx.post(url, headers=headers, json=data, timeout=120)
29
+ response = httpx.post(
30
+ url, headers=headers, json=data, timeout=ProviderDefaults.HTTP_TIMEOUT, verify=get_ssl_verify()
31
+ )
23
32
  response.raise_for_status()
24
33
  response_data = response.json()
25
34
  content = response_data["choices"][0]["message"]["content"]
@@ -27,6 +36,7 @@ def call_fireworks_api(model: str, messages: list[dict], temperature: float, max
27
36
  raise AIError.model_error("Fireworks AI API returned null content")
28
37
  if content == "":
29
38
  raise AIError.model_error("Fireworks AI API returned empty content")
39
+ logger.debug("Fireworks AI API response received successfully")
30
40
  return content
31
41
  except httpx.HTTPStatusError as e:
32
42
  if e.response.status_code == 429:
gac/providers/gemini.py CHANGED
@@ -1,11 +1,16 @@
1
1
  """Gemini AI provider implementation."""
2
2
 
3
+ import logging
3
4
  import os
4
5
  from typing import Any
5
6
 
6
7
  import httpx
7
8
 
9
+ from gac.constants import ProviderDefaults
8
10
  from gac.errors import AIError
11
+ from gac.utils import get_ssl_verify
12
+
13
+ logger = logging.getLogger(__name__)
9
14
 
10
15
 
11
16
  def call_gemini_api(model: str, messages: list[dict[str, Any]], temperature: float, max_tokens: int) -> str:
@@ -49,8 +54,12 @@ def call_gemini_api(model: str, messages: list[dict[str, Any]], temperature: flo
49
54
 
50
55
  headers = {"x-goog-api-key": api_key, "Content-Type": "application/json"}
51
56
 
57
+ logger.debug(f"Calling Gemini API with model={model}")
58
+
52
59
  try:
53
- response = httpx.post(url, headers=headers, json=payload, timeout=120)
60
+ response = httpx.post(
61
+ url, headers=headers, json=payload, timeout=ProviderDefaults.HTTP_TIMEOUT, verify=get_ssl_verify()
62
+ )
54
63
  response.raise_for_status()
55
64
  response_data = response.json()
56
65
 
@@ -74,6 +83,7 @@ def call_gemini_api(model: str, messages: list[dict[str, Any]], temperature: flo
74
83
  if content_text is None:
75
84
  raise AIError.model_error("Gemini API response missing text content")
76
85
 
86
+ logger.debug("Gemini API response received successfully")
77
87
  return content_text
78
88
  except AIError:
79
89
  raise
gac/providers/groq.py CHANGED
@@ -5,7 +5,9 @@ import os
5
5
 
6
6
  import httpx
7
7
 
8
+ from gac.constants import ProviderDefaults
8
9
  from gac.errors import AIError
10
+ from gac.utils import get_ssl_verify
9
11
 
10
12
  logger = logging.getLogger(__name__)
11
13
 
@@ -22,7 +24,9 @@ def call_groq_api(model: str, messages: list[dict], temperature: float, max_toke
22
24
  data = {"model": model, "messages": messages, "temperature": temperature, "max_tokens": max_tokens}
23
25
 
24
26
  try:
25
- response = httpx.post(url, headers=headers, json=data, timeout=120)
27
+ response = httpx.post(
28
+ url, headers=headers, json=data, timeout=ProviderDefaults.HTTP_TIMEOUT, verify=get_ssl_verify()
29
+ )
26
30
  response.raise_for_status()
27
31
  response_data = response.json()
28
32