gac 0.17.2__py3-none-any.whl → 3.6.0__py3-none-any.whl

This diff represents the content of publicly available package versions that have been released to one of the supported registries. The information contained in this diff is provided for informational purposes only and reflects changes between package versions as they appear in their respective public registries.
Files changed (53) hide show
  1. gac/__version__.py +1 -1
  2. gac/ai.py +69 -123
  3. gac/ai_utils.py +227 -0
  4. gac/auth_cli.py +69 -0
  5. gac/cli.py +87 -19
  6. gac/config.py +13 -7
  7. gac/config_cli.py +26 -5
  8. gac/constants.py +176 -5
  9. gac/errors.py +14 -0
  10. gac/git.py +207 -11
  11. gac/init_cli.py +52 -29
  12. gac/language_cli.py +378 -0
  13. gac/main.py +922 -189
  14. gac/model_cli.py +374 -0
  15. gac/oauth/__init__.py +1 -0
  16. gac/oauth/claude_code.py +397 -0
  17. gac/preprocess.py +5 -5
  18. gac/prompt.py +656 -219
  19. gac/providers/__init__.py +88 -0
  20. gac/providers/anthropic.py +51 -0
  21. gac/providers/azure_openai.py +97 -0
  22. gac/providers/cerebras.py +38 -0
  23. gac/providers/chutes.py +71 -0
  24. gac/providers/claude_code.py +102 -0
  25. gac/providers/custom_anthropic.py +133 -0
  26. gac/providers/custom_openai.py +98 -0
  27. gac/providers/deepseek.py +38 -0
  28. gac/providers/fireworks.py +38 -0
  29. gac/providers/gemini.py +87 -0
  30. gac/providers/groq.py +63 -0
  31. gac/providers/kimi_coding.py +63 -0
  32. gac/providers/lmstudio.py +59 -0
  33. gac/providers/minimax.py +38 -0
  34. gac/providers/mistral.py +38 -0
  35. gac/providers/moonshot.py +38 -0
  36. gac/providers/ollama.py +50 -0
  37. gac/providers/openai.py +38 -0
  38. gac/providers/openrouter.py +58 -0
  39. gac/providers/replicate.py +98 -0
  40. gac/providers/streamlake.py +51 -0
  41. gac/providers/synthetic.py +42 -0
  42. gac/providers/together.py +38 -0
  43. gac/providers/zai.py +59 -0
  44. gac/security.py +293 -0
  45. gac/utils.py +243 -4
  46. gac/workflow_utils.py +222 -0
  47. gac-3.6.0.dist-info/METADATA +281 -0
  48. gac-3.6.0.dist-info/RECORD +53 -0
  49. {gac-0.17.2.dist-info → gac-3.6.0.dist-info}/WHEEL +1 -1
  50. {gac-0.17.2.dist-info → gac-3.6.0.dist-info}/licenses/LICENSE +1 -1
  51. gac-0.17.2.dist-info/METADATA +0 -221
  52. gac-0.17.2.dist-info/RECORD +0 -20
  53. {gac-0.17.2.dist-info → gac-3.6.0.dist-info}/entry_points.txt +0 -0
@@ -0,0 +1,88 @@
1
+ """AI provider implementations for commit message generation."""
2
+
3
+ from .anthropic import call_anthropic_api
4
+ from .azure_openai import call_azure_openai_api
5
+ from .cerebras import call_cerebras_api
6
+ from .chutes import call_chutes_api
7
+ from .claude_code import call_claude_code_api
8
+ from .custom_anthropic import call_custom_anthropic_api
9
+ from .custom_openai import call_custom_openai_api
10
+ from .deepseek import call_deepseek_api
11
+ from .fireworks import call_fireworks_api
12
+ from .gemini import call_gemini_api
13
+ from .groq import call_groq_api
14
+ from .kimi_coding import call_kimi_coding_api
15
+ from .lmstudio import call_lmstudio_api
16
+ from .minimax import call_minimax_api
17
+ from .mistral import call_mistral_api
18
+ from .moonshot import call_moonshot_api
19
+ from .ollama import call_ollama_api
20
+ from .openai import call_openai_api
21
+ from .openrouter import call_openrouter_api
22
+ from .replicate import call_replicate_api
23
+ from .streamlake import call_streamlake_api
24
+ from .synthetic import call_synthetic_api
25
+ from .together import call_together_api
26
+ from .zai import call_zai_api, call_zai_coding_api
27
+
28
+ # Provider registry - single source of truth for all providers
29
+ PROVIDER_REGISTRY = {
30
+ "anthropic": call_anthropic_api,
31
+ "azure-openai": call_azure_openai_api,
32
+ "cerebras": call_cerebras_api,
33
+ "claude-code": call_claude_code_api,
34
+ "chutes": call_chutes_api,
35
+ "custom-anthropic": call_custom_anthropic_api,
36
+ "custom-openai": call_custom_openai_api,
37
+ "deepseek": call_deepseek_api,
38
+ "fireworks": call_fireworks_api,
39
+ "gemini": call_gemini_api,
40
+ "groq": call_groq_api,
41
+ "kimi-coding": call_kimi_coding_api,
42
+ "lm-studio": call_lmstudio_api,
43
+ "minimax": call_minimax_api,
44
+ "mistral": call_mistral_api,
45
+ "moonshot": call_moonshot_api,
46
+ "ollama": call_ollama_api,
47
+ "openai": call_openai_api,
48
+ "openrouter": call_openrouter_api,
49
+ "replicate": call_replicate_api,
50
+ "streamlake": call_streamlake_api,
51
+ "synthetic": call_synthetic_api,
52
+ "together": call_together_api,
53
+ "zai": call_zai_api,
54
+ "zai-coding": call_zai_coding_api,
55
+ }
56
+
57
+ # List of supported provider names - derived from registry keys
58
+ SUPPORTED_PROVIDERS = sorted(PROVIDER_REGISTRY.keys())
59
+
60
+ __all__ = [
61
+ "call_anthropic_api",
62
+ "call_azure_openai_api",
63
+ "call_cerebras_api",
64
+ "call_chutes_api",
65
+ "call_claude_code_api",
66
+ "call_custom_anthropic_api",
67
+ "call_custom_openai_api",
68
+ "call_deepseek_api",
69
+ "call_fireworks_api",
70
+ "call_gemini_api",
71
+ "call_groq_api",
72
+ "call_kimi_coding_api",
73
+ "call_lmstudio_api",
74
+ "call_minimax_api",
75
+ "call_mistral_api",
76
+ "call_moonshot_api",
77
+ "call_ollama_api",
78
+ "call_openai_api",
79
+ "call_openrouter_api",
80
+ "call_replicate_api",
81
+ "call_streamlake_api",
82
+ "call_synthetic_api",
83
+ "call_together_api",
84
+ "call_zai_api",
85
+ "call_zai_coding_api",
86
+ "PROVIDER_REGISTRY",
87
+ "SUPPORTED_PROVIDERS",
88
+ ]
@@ -0,0 +1,51 @@
1
+ """Anthropic AI provider implementation."""
2
+
3
+ import os
4
+
5
+ import httpx
6
+
7
+ from gac.errors import AIError
8
+
9
+
10
+ def call_anthropic_api(model: str, messages: list[dict], temperature: float, max_tokens: int) -> str:
11
+ """Call Anthropic API directly."""
12
+ api_key = os.getenv("ANTHROPIC_API_KEY")
13
+ if not api_key:
14
+ raise AIError.authentication_error("ANTHROPIC_API_KEY not found in environment variables")
15
+
16
+ url = "https://api.anthropic.com/v1/messages"
17
+ headers = {"x-api-key": api_key, "anthropic-version": "2023-06-01", "content-type": "application/json"}
18
+
19
+ # Convert messages to Anthropic format
20
+ anthropic_messages = []
21
+ system_message = ""
22
+
23
+ for msg in messages:
24
+ if msg["role"] == "system":
25
+ system_message = msg["content"]
26
+ else:
27
+ anthropic_messages.append({"role": msg["role"], "content": msg["content"]})
28
+
29
+ data = {"model": model, "messages": anthropic_messages, "temperature": temperature, "max_tokens": max_tokens}
30
+
31
+ if system_message:
32
+ data["system"] = system_message
33
+
34
+ try:
35
+ response = httpx.post(url, headers=headers, json=data, timeout=120)
36
+ response.raise_for_status()
37
+ response_data = response.json()
38
+ content = response_data["content"][0]["text"]
39
+ if content is None:
40
+ raise AIError.model_error("Anthropic API returned null content")
41
+ if content == "":
42
+ raise AIError.model_error("Anthropic API returned empty content")
43
+ return content
44
+ except httpx.HTTPStatusError as e:
45
+ if e.response.status_code == 429:
46
+ raise AIError.rate_limit_error(f"Anthropic API rate limit exceeded: {e.response.text}") from e
47
+ raise AIError.model_error(f"Anthropic API error: {e.response.status_code} - {e.response.text}") from e
48
+ except httpx.TimeoutException as e:
49
+ raise AIError.timeout_error(f"Anthropic API request timed out: {str(e)}") from e
50
+ except Exception as e:
51
+ raise AIError.model_error(f"Error calling Anthropic API: {str(e)}") from e
@@ -0,0 +1,97 @@
1
+ """Azure OpenAI provider for gac.
2
+
3
+ This provider provides native support for Azure OpenAI Service with proper
4
+ endpoint construction and API version handling.
5
+ """
6
+
7
+ import json
8
+ import logging
9
+ import os
10
+
11
+ import httpx
12
+
13
+ from gac.errors import AIError
14
+
15
+ logger = logging.getLogger(__name__)
16
+
17
+
18
+ def call_azure_openai_api(model: str, messages: list[dict], temperature: float, max_tokens: int) -> str:
19
+ """Call Azure OpenAI Service API.
20
+
21
+ Environment variables:
22
+ AZURE_OPENAI_API_KEY: Azure OpenAI API key (required)
23
+ AZURE_OPENAI_ENDPOINT: Azure OpenAI endpoint URL (required)
24
+ Example: https://your-resource.openai.azure.com
25
+ AZURE_OPENAI_API_VERSION: Azure OpenAI API version (required)
26
+ Example: 2025-01-01-preview
27
+ Example: 2024-02-15-preview
28
+
29
+ Args:
30
+ model: The deployment name in Azure OpenAI (e.g., 'gpt-4o', 'gpt-35-turbo')
31
+ messages: List of message dictionaries with 'role' and 'content' keys
32
+ temperature: Controls randomness (0.0-1.0)
33
+ max_tokens: Maximum tokens in the response
34
+
35
+ Returns:
36
+ The generated commit message
37
+
38
+ Raises:
39
+ AIError: If authentication fails, API errors occur, or response is invalid
40
+ """
41
+ api_key = os.getenv("AZURE_OPENAI_API_KEY")
42
+ if not api_key:
43
+ raise AIError.authentication_error("AZURE_OPENAI_API_KEY environment variable not set")
44
+
45
+ endpoint = os.getenv("AZURE_OPENAI_ENDPOINT")
46
+ if not endpoint:
47
+ raise AIError.model_error("AZURE_OPENAI_ENDPOINT environment variable not set")
48
+
49
+ api_version = os.getenv("AZURE_OPENAI_API_VERSION")
50
+ if not api_version:
51
+ raise AIError.model_error("AZURE_OPENAI_API_VERSION environment variable not set")
52
+
53
+ # Build Azure OpenAI URL with proper structure
54
+ endpoint = endpoint.rstrip("/")
55
+ url = f"{endpoint}/openai/deployments/{model}/chat/completions?api-version={api_version}"
56
+
57
+ headers = {"api-key": api_key, "Content-Type": "application/json"}
58
+
59
+ data = {"messages": messages, "temperature": temperature, "max_tokens": max_tokens}
60
+
61
+ try:
62
+ response = httpx.post(url, headers=headers, json=data, timeout=120)
63
+ response.raise_for_status()
64
+ response_data = response.json()
65
+
66
+ try:
67
+ content = response_data["choices"][0]["message"]["content"]
68
+ except (KeyError, IndexError, TypeError) as e:
69
+ logger.error(f"Unexpected response format from Azure OpenAI API. Response: {json.dumps(response_data)}")
70
+ raise AIError.model_error(
71
+ f"Azure OpenAI API returned unexpected format. Expected response with "
72
+ f"'choices[0].message.content', but got: {type(e).__name__}. Check logs for full response structure."
73
+ ) from e
74
+
75
+ if content is None:
76
+ raise AIError.model_error("Azure OpenAI API returned null content")
77
+ if content == "":
78
+ raise AIError.model_error("Azure OpenAI API returned empty content")
79
+ return content
80
+ except httpx.ConnectError as e:
81
+ raise AIError.connection_error(f"Azure OpenAI API connection failed: {str(e)}") from e
82
+ except httpx.HTTPStatusError as e:
83
+ status_code = e.response.status_code
84
+ error_text = e.response.text
85
+
86
+ if status_code == 401:
87
+ raise AIError.authentication_error(f"Azure OpenAI API authentication failed: {error_text}") from e
88
+ elif status_code == 429:
89
+ raise AIError.rate_limit_error(f"Azure OpenAI API rate limit exceeded: {error_text}") from e
90
+ else:
91
+ raise AIError.model_error(f"Azure OpenAI API error: {status_code} - {error_text}") from e
92
+ except httpx.TimeoutException as e:
93
+ raise AIError.timeout_error(f"Azure OpenAI API request timed out: {str(e)}") from e
94
+ except AIError:
95
+ raise
96
+ except Exception as e:
97
+ raise AIError.model_error(f"Error calling Azure OpenAI API: {str(e)}") from e
@@ -0,0 +1,38 @@
1
+ """Cerebras AI provider implementation."""
2
+
3
+ import os
4
+
5
+ import httpx
6
+
7
+ from gac.errors import AIError
8
+
9
+
10
+ def call_cerebras_api(model: str, messages: list[dict], temperature: float, max_tokens: int) -> str:
11
+ """Call Cerebras API directly."""
12
+ api_key = os.getenv("CEREBRAS_API_KEY")
13
+ if not api_key:
14
+ raise AIError.authentication_error("CEREBRAS_API_KEY not found in environment variables")
15
+
16
+ url = "https://api.cerebras.ai/v1/chat/completions"
17
+ headers = {"Authorization": f"Bearer {api_key}", "Content-Type": "application/json"}
18
+
19
+ data = {"model": model, "messages": messages, "temperature": temperature, "max_tokens": max_tokens}
20
+
21
+ try:
22
+ response = httpx.post(url, headers=headers, json=data, timeout=120)
23
+ response.raise_for_status()
24
+ response_data = response.json()
25
+ content = response_data["choices"][0]["message"]["content"]
26
+ if content is None:
27
+ raise AIError.model_error("Cerebras API returned null content")
28
+ if content == "":
29
+ raise AIError.model_error("Cerebras API returned empty content")
30
+ return content
31
+ except httpx.HTTPStatusError as e:
32
+ if e.response.status_code == 429:
33
+ raise AIError.rate_limit_error(f"Cerebras API rate limit exceeded: {e.response.text}") from e
34
+ raise AIError.model_error(f"Cerebras API error: {e.response.status_code} - {e.response.text}") from e
35
+ except httpx.TimeoutException as e:
36
+ raise AIError.timeout_error(f"Cerebras API request timed out: {str(e)}") from e
37
+ except Exception as e:
38
+ raise AIError.model_error(f"Error calling Cerebras API: {str(e)}") from e
@@ -0,0 +1,71 @@
1
+ """Chutes.ai API provider for gac."""
2
+
3
+ import os
4
+
5
+ import httpx
6
+
7
+ from gac.errors import AIError
8
+
9
+
10
+ def call_chutes_api(model: str, messages: list[dict], temperature: float, max_tokens: int) -> str:
11
+ """Call Chutes.ai API directly.
12
+
13
+ Chutes.ai provides an OpenAI-compatible API for serverless, decentralized AI compute.
14
+
15
+ Args:
16
+ model: The model to use (e.g., 'deepseek-ai/DeepSeek-V3-0324')
17
+ messages: List of message dictionaries with 'role' and 'content' keys
18
+ temperature: Controls randomness (0.0-1.0)
19
+ max_tokens: Maximum tokens in the response
20
+
21
+ Returns:
22
+ The generated commit message
23
+
24
+ Raises:
25
+ AIError: If authentication fails, API errors occur, or response is invalid
26
+ """
27
+ api_key = os.getenv("CHUTES_API_KEY")
28
+ if not api_key:
29
+ raise AIError.authentication_error("CHUTES_API_KEY environment variable not set")
30
+
31
+ base_url = os.getenv("CHUTES_BASE_URL", "https://llm.chutes.ai")
32
+ url = f"{base_url}/v1/chat/completions"
33
+
34
+ headers = {
35
+ "Content-Type": "application/json",
36
+ "Authorization": f"Bearer {api_key}",
37
+ }
38
+
39
+ data = {
40
+ "model": model,
41
+ "messages": messages,
42
+ "temperature": temperature,
43
+ "max_tokens": max_tokens,
44
+ }
45
+
46
+ try:
47
+ response = httpx.post(url, headers=headers, json=data, timeout=120)
48
+ response.raise_for_status()
49
+ response_data = response.json()
50
+ content = response_data["choices"][0]["message"]["content"]
51
+ if content is None:
52
+ raise AIError.model_error("Chutes.ai API returned null content")
53
+ if content == "":
54
+ raise AIError.model_error("Chutes.ai API returned empty content")
55
+ return content
56
+ except httpx.HTTPStatusError as e:
57
+ status_code = e.response.status_code
58
+ error_text = e.response.text
59
+
60
+ if status_code == 429:
61
+ raise AIError.rate_limit_error(f"Chutes.ai API rate limit exceeded: {error_text}") from e
62
+ elif status_code in (502, 503):
63
+ raise AIError.connection_error(f"Chutes.ai API service unavailable: {status_code} - {error_text}") from e
64
+ else:
65
+ raise AIError.model_error(f"Chutes.ai API error: {status_code} - {error_text}") from e
66
+ except httpx.ConnectError as e:
67
+ raise AIError.connection_error(f"Chutes.ai API connection error: {str(e)}") from e
68
+ except httpx.TimeoutException as e:
69
+ raise AIError.timeout_error(f"Chutes.ai API request timed out: {str(e)}") from e
70
+ except Exception as e:
71
+ raise AIError.model_error(f"Error calling Chutes.ai API: {str(e)}") from e
@@ -0,0 +1,102 @@
1
+ """Claude Code provider implementation.
2
+
3
+ This provider allows users with Claude Code subscriptions to use their OAuth tokens
4
+ instead of paying for the expensive Anthropic API.
5
+ """
6
+
7
+ import os
8
+
9
+ import httpx
10
+
11
+ from gac.errors import AIError
12
+
13
+
14
+ def call_claude_code_api(model: str, messages: list[dict], temperature: float, max_tokens: int) -> str:
15
+ """Call Claude Code API using OAuth token.
16
+
17
+ This provider uses the Claude Code subscription OAuth token instead of the Anthropic API key.
18
+ It authenticates using Bearer token authentication with the special anthropic-beta header.
19
+
20
+ Environment variables:
21
+ CLAUDE_CODE_ACCESS_TOKEN: OAuth access token from Claude Code authentication
22
+
23
+ Args:
24
+ model: Model name (e.g., 'claude-sonnet-4-5')
25
+ messages: List of message dictionaries with 'role' and 'content' keys
26
+ temperature: Sampling temperature (0.0-1.0)
27
+ max_tokens: Maximum tokens in response
28
+
29
+ Returns:
30
+ Generated text response
31
+
32
+ Raises:
33
+ AIError: If authentication fails or API call fails
34
+ """
35
+ access_token = os.getenv("CLAUDE_CODE_ACCESS_TOKEN")
36
+ if not access_token:
37
+ raise AIError.authentication_error(
38
+ "CLAUDE_CODE_ACCESS_TOKEN not found in environment variables. "
39
+ "Please authenticate with Claude Code and set this token."
40
+ )
41
+
42
+ url = "https://api.anthropic.com/v1/messages"
43
+ headers = {
44
+ "Authorization": f"Bearer {access_token}",
45
+ "anthropic-version": "2023-06-01",
46
+ "anthropic-beta": "oauth-2025-04-20",
47
+ "content-type": "application/json",
48
+ }
49
+
50
+ # Convert messages to Anthropic format
51
+ # IMPORTANT: Claude Code OAuth tokens require the system message to be EXACTLY
52
+ # "You are Claude Code, Anthropic's official CLI for Claude." with NO additional content.
53
+ # Any other instructions must be moved to the user message.
54
+ anthropic_messages = []
55
+ system_instructions = ""
56
+
57
+ for msg in messages:
58
+ if msg["role"] == "system":
59
+ system_instructions = msg["content"]
60
+ else:
61
+ anthropic_messages.append({"role": msg["role"], "content": msg["content"]})
62
+
63
+ # Claude Code requires this exact system message, nothing more
64
+ system_message = "You are Claude Code, Anthropic's official CLI for Claude."
65
+
66
+ # Move any system instructions into the first user message
67
+ if system_instructions and anthropic_messages:
68
+ # Prepend system instructions to the first user message
69
+ first_user_msg = anthropic_messages[0]
70
+ first_user_msg["content"] = f"{system_instructions}\n\n{first_user_msg['content']}"
71
+
72
+ data = {
73
+ "model": model,
74
+ "messages": anthropic_messages,
75
+ "temperature": temperature,
76
+ "max_tokens": max_tokens,
77
+ "system": system_message,
78
+ }
79
+
80
+ try:
81
+ response = httpx.post(url, headers=headers, json=data, timeout=120)
82
+ response.raise_for_status()
83
+ response_data = response.json()
84
+ content = response_data["content"][0]["text"]
85
+ if content is None:
86
+ raise AIError.model_error("Claude Code API returned null content")
87
+ if content == "":
88
+ raise AIError.model_error("Claude Code API returned empty content")
89
+ return content
90
+ except httpx.HTTPStatusError as e:
91
+ if e.response.status_code == 401:
92
+ raise AIError.authentication_error(
93
+ f"Claude Code authentication failed: {e.response.text}. "
94
+ "Your token may have expired. Please re-authenticate."
95
+ ) from e
96
+ if e.response.status_code == 429:
97
+ raise AIError.rate_limit_error(f"Claude Code API rate limit exceeded: {e.response.text}") from e
98
+ raise AIError.model_error(f"Claude Code API error: {e.response.status_code} - {e.response.text}") from e
99
+ except httpx.TimeoutException as e:
100
+ raise AIError.timeout_error(f"Claude Code API request timed out: {str(e)}") from e
101
+ except Exception as e:
102
+ raise AIError.model_error(f"Error calling Claude Code API: {str(e)}") from e
@@ -0,0 +1,133 @@
1
+ """Custom Anthropic-compatible API provider for gac.
2
+
3
+ This provider allows users to specify a custom Anthropic-compatible endpoint
4
+ while using the same model capabilities as the standard Anthropic provider.
5
+ """
6
+
7
+ import json
8
+ import logging
9
+ import os
10
+
11
+ import httpx
12
+
13
+ from gac.errors import AIError
14
+
15
+ logger = logging.getLogger(__name__)
16
+
17
+
18
+ def call_custom_anthropic_api(model: str, messages: list[dict], temperature: float, max_tokens: int) -> str:
19
+ """Call a custom Anthropic-compatible API endpoint.
20
+
21
+ This provider is useful for:
22
+ - Anthropic-compatible proxies or gateways
23
+ - Self-hosted Anthropic-compatible services
24
+ - Other services implementing the Anthropic Messages API
25
+
26
+ Environment variables:
27
+ CUSTOM_ANTHROPIC_API_KEY: API key for authentication (required)
28
+ CUSTOM_ANTHROPIC_BASE_URL: Base URL for the API endpoint (required)
29
+ Example: https://your-proxy.example.com
30
+ CUSTOM_ANTHROPIC_VERSION: API version header (optional, defaults to '2023-06-01')
31
+
32
+ Args:
33
+ model: The model to use (e.g., 'claude-sonnet-4-5', 'claude-haiku-4-5')
34
+ messages: List of message dictionaries with 'role' and 'content' keys
35
+ temperature: Controls randomness (0.0-1.0)
36
+ max_tokens: Maximum tokens in the response
37
+
38
+ Returns:
39
+ The generated commit message
40
+
41
+ Raises:
42
+ AIError: If authentication fails, API errors occur, or response is invalid
43
+ """
44
+ api_key = os.getenv("CUSTOM_ANTHROPIC_API_KEY")
45
+ if not api_key:
46
+ raise AIError.authentication_error("CUSTOM_ANTHROPIC_API_KEY environment variable not set")
47
+
48
+ base_url = os.getenv("CUSTOM_ANTHROPIC_BASE_URL")
49
+ if not base_url:
50
+ raise AIError.model_error("CUSTOM_ANTHROPIC_BASE_URL environment variable not set")
51
+
52
+ api_version = os.getenv("CUSTOM_ANTHROPIC_VERSION", "2023-06-01")
53
+
54
+ if "/v1/messages" not in base_url:
55
+ base_url = base_url.rstrip("/")
56
+ url = f"{base_url}/v1/messages"
57
+ else:
58
+ url = base_url
59
+
60
+ headers = {"x-api-key": api_key, "anthropic-version": api_version, "content-type": "application/json"}
61
+
62
+ anthropic_messages = []
63
+ system_message = ""
64
+
65
+ for msg in messages:
66
+ if msg["role"] == "system":
67
+ system_message = msg["content"]
68
+ else:
69
+ anthropic_messages.append({"role": msg["role"], "content": msg["content"]})
70
+
71
+ data = {"model": model, "messages": anthropic_messages, "temperature": temperature, "max_tokens": max_tokens}
72
+
73
+ if system_message:
74
+ data["system"] = system_message
75
+
76
+ try:
77
+ response = httpx.post(url, headers=headers, json=data, timeout=120)
78
+ response.raise_for_status()
79
+ response_data = response.json()
80
+
81
+ try:
82
+ content_list = response_data.get("content", [])
83
+ if not content_list:
84
+ raise AIError.model_error("Custom Anthropic API returned empty content array")
85
+
86
+ # Try standard Anthropic format first: content[0].text
87
+ if "text" in content_list[0]:
88
+ content = content_list[0]["text"]
89
+ else:
90
+ # Extended format (e.g., MiniMax with thinking): find first item with type="text"
91
+ text_item = next((item for item in content_list if item.get("type") == "text"), None)
92
+ if text_item and "text" in text_item:
93
+ content = text_item["text"]
94
+ else:
95
+ logger.error(
96
+ f"Unexpected response format from Custom Anthropic API. Response: {json.dumps(response_data)}"
97
+ )
98
+ raise AIError.model_error(
99
+ "Custom Anthropic API returned unexpected format. Expected 'text' field in content array."
100
+ )
101
+ except AIError:
102
+ raise
103
+ except (KeyError, IndexError, TypeError, StopIteration) as e:
104
+ logger.error(f"Unexpected response format from Custom Anthropic API. Response: {json.dumps(response_data)}")
105
+ raise AIError.model_error(
106
+ f"Custom Anthropic API returned unexpected format. Expected Anthropic-compatible response with "
107
+ f"'content[0].text' or items with type='text', but got: {type(e).__name__}. "
108
+ f"Check logs for full response structure."
109
+ ) from e
110
+
111
+ if content is None:
112
+ raise AIError.model_error("Custom Anthropic API returned null content")
113
+ if content == "":
114
+ raise AIError.model_error("Custom Anthropic API returned empty content")
115
+ return content
116
+ except httpx.ConnectError as e:
117
+ raise AIError.connection_error(f"Custom Anthropic API connection failed: {str(e)}") from e
118
+ except httpx.HTTPStatusError as e:
119
+ status_code = e.response.status_code
120
+ error_text = e.response.text
121
+
122
+ if status_code == 401:
123
+ raise AIError.authentication_error(f"Custom Anthropic API authentication failed: {error_text}") from e
124
+ elif status_code == 429:
125
+ raise AIError.rate_limit_error(f"Custom Anthropic API rate limit exceeded: {error_text}") from e
126
+ else:
127
+ raise AIError.model_error(f"Custom Anthropic API error: {status_code} - {error_text}") from e
128
+ except httpx.TimeoutException as e:
129
+ raise AIError.timeout_error(f"Custom Anthropic API request timed out: {str(e)}") from e
130
+ except AIError:
131
+ raise
132
+ except Exception as e:
133
+ raise AIError.model_error(f"Error calling Custom Anthropic API: {str(e)}") from e
@@ -0,0 +1,98 @@
1
+ """Custom OpenAI-compatible API provider for gac.
2
+
3
+ This provider allows users to specify a custom OpenAI-compatible endpoint
4
+ while using the same model capabilities as the standard OpenAI provider.
5
+ """
6
+
7
+ import json
8
+ import logging
9
+ import os
10
+
11
+ import httpx
12
+
13
+ from gac.errors import AIError
14
+
15
+ logger = logging.getLogger(__name__)
16
+
17
+
18
+ def call_custom_openai_api(model: str, messages: list[dict], temperature: float, max_tokens: int) -> str:
19
+ """Call a custom OpenAI-compatible API endpoint.
20
+
21
+ This provider is useful for:
22
+ - OpenAI-compatible proxies or gateways
23
+ - Self-hosted OpenAI-compatible services
24
+ - Other services implementing the OpenAI Chat Completions API
25
+
26
+ Environment variables:
27
+ CUSTOM_OPENAI_API_KEY: API key for authentication (required)
28
+ CUSTOM_OPENAI_BASE_URL: Base URL for the API endpoint (required)
29
+ Example: https://your-proxy.example.com/v1
30
+ Example: https://your-custom-endpoint.com
31
+
32
+ Args:
33
+ model: The model to use (e.g., 'gpt-4', 'gpt-3.5-turbo')
34
+ messages: List of message dictionaries with 'role' and 'content' keys
35
+ temperature: Controls randomness (0.0-1.0)
36
+ max_tokens: Maximum tokens in the response
37
+
38
+ Returns:
39
+ The generated commit message
40
+
41
+ Raises:
42
+ AIError: If authentication fails, API errors occur, or response is invalid
43
+ """
44
+ api_key = os.getenv("CUSTOM_OPENAI_API_KEY")
45
+ if not api_key:
46
+ raise AIError.authentication_error("CUSTOM_OPENAI_API_KEY environment variable not set")
47
+
48
+ base_url = os.getenv("CUSTOM_OPENAI_BASE_URL")
49
+ if not base_url:
50
+ raise AIError.model_error("CUSTOM_OPENAI_BASE_URL environment variable not set")
51
+
52
+ if "/chat/completions" not in base_url:
53
+ base_url = base_url.rstrip("/")
54
+ url = f"{base_url}/chat/completions"
55
+ else:
56
+ url = base_url
57
+
58
+ headers = {"Authorization": f"Bearer {api_key}", "Content-Type": "application/json"}
59
+
60
+ data = {"model": model, "messages": messages, "temperature": temperature, "max_completion_tokens": max_tokens}
61
+
62
+ try:
63
+ response = httpx.post(url, headers=headers, json=data, timeout=120)
64
+ response.raise_for_status()
65
+ response_data = response.json()
66
+
67
+ try:
68
+ content = response_data["choices"][0]["message"]["content"]
69
+ except (KeyError, IndexError, TypeError) as e:
70
+ logger.error(f"Unexpected response format from Custom OpenAI API. Response: {json.dumps(response_data)}")
71
+ raise AIError.model_error(
72
+ f"Custom OpenAI API returned unexpected format. Expected OpenAI-compatible response with "
73
+ f"'choices[0].message.content', but got: {type(e).__name__}. Check logs for full response structure."
74
+ ) from e
75
+
76
+ if content is None:
77
+ raise AIError.model_error("Custom OpenAI API returned null content")
78
+ if content == "":
79
+ raise AIError.model_error("Custom OpenAI API returned empty content")
80
+ return content
81
+ except httpx.ConnectError as e:
82
+ raise AIError.connection_error(f"Custom OpenAI API connection failed: {str(e)}") from e
83
+ except httpx.HTTPStatusError as e:
84
+ status_code = e.response.status_code
85
+ error_text = e.response.text
86
+
87
+ if status_code == 401:
88
+ raise AIError.authentication_error(f"Custom OpenAI API authentication failed: {error_text}") from e
89
+ elif status_code == 429:
90
+ raise AIError.rate_limit_error(f"Custom OpenAI API rate limit exceeded: {error_text}") from e
91
+ else:
92
+ raise AIError.model_error(f"Custom OpenAI API error: {status_code} - {error_text}") from e
93
+ except httpx.TimeoutException as e:
94
+ raise AIError.timeout_error(f"Custom OpenAI API request timed out: {str(e)}") from e
95
+ except AIError:
96
+ raise
97
+ except Exception as e:
98
+ raise AIError.model_error(f"Error calling Custom OpenAI API: {str(e)}") from e