gac 1.12.1__tar.gz → 1.13.1__tar.gz
This diff represents the content of publicly available package versions that have been released to one of the supported registries. The information contained in this diff is provided for informational purposes only and reflects changes between package versions as they appear in their respective public registries.
Potentially problematic release.
This version of gac might be problematic. Click here for more details.
- {gac-1.12.1 → gac-1.13.1}/PKG-INFO +5 -4
- {gac-1.12.1 → gac-1.13.1}/README.md +4 -3
- {gac-1.12.1 → gac-1.13.1}/src/gac/__version__.py +1 -1
- {gac-1.12.1 → gac-1.13.1}/src/gac/ai.py +4 -0
- {gac-1.12.1 → gac-1.13.1}/src/gac/ai_utils.py +2 -0
- {gac-1.12.1 → gac-1.13.1}/src/gac/init_cli.py +35 -3
- {gac-1.12.1 → gac-1.13.1}/src/gac/prompt.py +33 -0
- {gac-1.12.1 → gac-1.13.1}/src/gac/providers/__init__.py +4 -0
- gac-1.13.1/src/gac/providers/custom_anthropic.py +133 -0
- gac-1.13.1/src/gac/providers/custom_openai.py +99 -0
- {gac-1.12.1 → gac-1.13.1}/.gitignore +0 -0
- {gac-1.12.1 → gac-1.13.1}/LICENSE +0 -0
- {gac-1.12.1 → gac-1.13.1}/pyproject.toml +0 -0
- {gac-1.12.1 → gac-1.13.1}/src/gac/__init__.py +0 -0
- {gac-1.12.1 → gac-1.13.1}/src/gac/cli.py +0 -0
- {gac-1.12.1 → gac-1.13.1}/src/gac/config.py +0 -0
- {gac-1.12.1 → gac-1.13.1}/src/gac/config_cli.py +0 -0
- {gac-1.12.1 → gac-1.13.1}/src/gac/constants.py +0 -0
- {gac-1.12.1 → gac-1.13.1}/src/gac/diff_cli.py +0 -0
- {gac-1.12.1 → gac-1.13.1}/src/gac/errors.py +0 -0
- {gac-1.12.1 → gac-1.13.1}/src/gac/git.py +0 -0
- {gac-1.12.1 → gac-1.13.1}/src/gac/main.py +0 -0
- {gac-1.12.1 → gac-1.13.1}/src/gac/preprocess.py +0 -0
- {gac-1.12.1 → gac-1.13.1}/src/gac/providers/anthropic.py +0 -0
- {gac-1.12.1 → gac-1.13.1}/src/gac/providers/cerebras.py +0 -0
- {gac-1.12.1 → gac-1.13.1}/src/gac/providers/chutes.py +0 -0
- {gac-1.12.1 → gac-1.13.1}/src/gac/providers/deepseek.py +0 -0
- {gac-1.12.1 → gac-1.13.1}/src/gac/providers/fireworks.py +0 -0
- {gac-1.12.1 → gac-1.13.1}/src/gac/providers/gemini.py +0 -0
- {gac-1.12.1 → gac-1.13.1}/src/gac/providers/groq.py +0 -0
- {gac-1.12.1 → gac-1.13.1}/src/gac/providers/lmstudio.py +0 -0
- {gac-1.12.1 → gac-1.13.1}/src/gac/providers/minimax.py +0 -0
- {gac-1.12.1 → gac-1.13.1}/src/gac/providers/ollama.py +0 -0
- {gac-1.12.1 → gac-1.13.1}/src/gac/providers/openai.py +0 -0
- {gac-1.12.1 → gac-1.13.1}/src/gac/providers/openrouter.py +0 -0
- {gac-1.12.1 → gac-1.13.1}/src/gac/providers/streamlake.py +0 -0
- {gac-1.12.1 → gac-1.13.1}/src/gac/providers/synthetic.py +0 -0
- {gac-1.12.1 → gac-1.13.1}/src/gac/providers/together.py +0 -0
- {gac-1.12.1 → gac-1.13.1}/src/gac/providers/zai.py +0 -0
- {gac-1.12.1 → gac-1.13.1}/src/gac/security.py +0 -0
- {gac-1.12.1 → gac-1.13.1}/src/gac/utils.py +0 -0
|
@@ -1,6 +1,6 @@
|
|
|
1
1
|
Metadata-Version: 2.4
|
|
2
2
|
Name: gac
|
|
3
|
-
Version: 1.
|
|
3
|
+
Version: 1.13.1
|
|
4
4
|
Summary: LLM-powered Git commit message generator with multi-provider support
|
|
5
5
|
Project-URL: Homepage, https://github.com/cellwebb/gac
|
|
6
6
|
Project-URL: Documentation, https://github.com/cellwebb/gac#readme
|
|
@@ -91,9 +91,10 @@ gac
|
|
|
91
91
|
|
|
92
92
|
### 🌐 **Supported Providers**
|
|
93
93
|
|
|
94
|
-
- **Anthropic** • **Cerebras** • **Chutes.ai** • **DeepSeek** • **Fireworks**
|
|
95
|
-
- **Groq** • **LM Studio** • **MiniMax** • **Ollama** • **OpenAI**
|
|
96
|
-
- **Streamlake** • **Synthetic.new** • **Together AI**
|
|
94
|
+
- **Anthropic** • **Cerebras** • **Chutes.ai** • **DeepSeek** • **Fireworks**
|
|
95
|
+
- **Gemini** • **Groq** • **LM Studio** • **MiniMax** • **Ollama** • **OpenAI**
|
|
96
|
+
- **OpenRouter** • **Streamlake** • **Synthetic.new** • **Together AI**
|
|
97
|
+
- **Z.AI** • **Z.AI Coding** • **Custom Endpoints (Anthropic/OpenAI)**
|
|
97
98
|
|
|
98
99
|
### 🧠 **Smart LLM Analysis**
|
|
99
100
|
|
|
@@ -50,9 +50,10 @@ gac
|
|
|
50
50
|
|
|
51
51
|
### 🌐 **Supported Providers**
|
|
52
52
|
|
|
53
|
-
- **Anthropic** • **Cerebras** • **Chutes.ai** • **DeepSeek** • **Fireworks**
|
|
54
|
-
- **Groq** • **LM Studio** • **MiniMax** • **Ollama** • **OpenAI**
|
|
55
|
-
- **Streamlake** • **Synthetic.new** • **Together AI**
|
|
53
|
+
- **Anthropic** • **Cerebras** • **Chutes.ai** • **DeepSeek** • **Fireworks**
|
|
54
|
+
- **Gemini** • **Groq** • **LM Studio** • **MiniMax** • **Ollama** • **OpenAI**
|
|
55
|
+
- **OpenRouter** • **Streamlake** • **Synthetic.new** • **Together AI**
|
|
56
|
+
- **Z.AI** • **Z.AI Coding** • **Custom Endpoints (Anthropic/OpenAI)**
|
|
56
57
|
|
|
57
58
|
### 🧠 **Smart LLM Analysis**
|
|
58
59
|
|
|
@@ -13,6 +13,8 @@ from gac.providers import (
|
|
|
13
13
|
call_anthropic_api,
|
|
14
14
|
call_cerebras_api,
|
|
15
15
|
call_chutes_api,
|
|
16
|
+
call_custom_anthropic_api,
|
|
17
|
+
call_custom_openai_api,
|
|
16
18
|
call_deepseek_api,
|
|
17
19
|
call_fireworks_api,
|
|
18
20
|
call_gemini_api,
|
|
@@ -84,6 +86,8 @@ def generate_commit_message(
|
|
|
84
86
|
"anthropic": call_anthropic_api,
|
|
85
87
|
"cerebras": call_cerebras_api,
|
|
86
88
|
"chutes": call_chutes_api,
|
|
89
|
+
"custom-anthropic": call_custom_anthropic_api,
|
|
90
|
+
"custom-openai": call_custom_openai_api,
|
|
87
91
|
"deepseek": call_deepseek_api,
|
|
88
92
|
"fireworks": call_fireworks_api,
|
|
89
93
|
"gemini": call_gemini_api,
|
|
@@ -110,6 +110,8 @@ def generate_with_retries(
|
|
|
110
110
|
"together",
|
|
111
111
|
"zai",
|
|
112
112
|
"zai-coding",
|
|
113
|
+
"custom-anthropic",
|
|
114
|
+
"custom-openai",
|
|
113
115
|
]
|
|
114
116
|
if provider not in supported_providers:
|
|
115
117
|
raise AIError.model_error(f"Unsupported provider: {provider}. Supported providers: {supported_providers}")
|
|
@@ -35,6 +35,8 @@ def init() -> None:
|
|
|
35
35
|
("Anthropic", "claude-haiku-4-5"),
|
|
36
36
|
("Cerebras", "qwen-3-coder-480b"),
|
|
37
37
|
("Chutes", "zai-org/GLM-4.6-FP8"),
|
|
38
|
+
("Custom (Anthropic)", ""),
|
|
39
|
+
("Custom (OpenAI)", ""),
|
|
38
40
|
("DeepSeek", "deepseek-chat"),
|
|
39
41
|
("Fireworks", "accounts/fireworks/models/gpt-oss-20b"),
|
|
40
42
|
("Gemini", "gemini-2.5-flash"),
|
|
@@ -55,12 +57,14 @@ def init() -> None:
|
|
|
55
57
|
if not provider:
|
|
56
58
|
click.echo("Provider selection cancelled. Exiting.")
|
|
57
59
|
return
|
|
58
|
-
provider_key = provider.lower().replace(".", "").replace(" ", "-")
|
|
60
|
+
provider_key = provider.lower().replace(".", "").replace(" ", "-").replace("(", "").replace(")", "")
|
|
59
61
|
|
|
60
62
|
is_ollama = provider_key == "ollama"
|
|
61
63
|
is_lmstudio = provider_key == "lm-studio"
|
|
62
64
|
is_streamlake = provider_key == "streamlake"
|
|
63
65
|
is_zai = provider_key in ("zai", "zai-coding")
|
|
66
|
+
is_custom_anthropic = provider_key == "custom-anthropic"
|
|
67
|
+
is_custom_openai = provider_key == "custom-openai"
|
|
64
68
|
|
|
65
69
|
if is_streamlake:
|
|
66
70
|
endpoint_id = _prompt_required_text("Enter the Streamlake inference endpoint ID (required):")
|
|
@@ -70,7 +74,10 @@ def init() -> None:
|
|
|
70
74
|
model_to_save = endpoint_id
|
|
71
75
|
else:
|
|
72
76
|
model_suggestion = dict(providers)[provider]
|
|
73
|
-
|
|
77
|
+
if model_suggestion == "":
|
|
78
|
+
model_prompt = "Enter the model (required):"
|
|
79
|
+
else:
|
|
80
|
+
model_prompt = f"Enter the model (default: {model_suggestion}):"
|
|
74
81
|
model = questionary.text(model_prompt, default=model_suggestion).ask()
|
|
75
82
|
if model is None:
|
|
76
83
|
click.echo("Model entry cancelled. Exiting.")
|
|
@@ -80,7 +87,28 @@ def init() -> None:
|
|
|
80
87
|
set_key(str(GAC_ENV_PATH), "GAC_MODEL", f"{provider_key}:{model_to_save}")
|
|
81
88
|
click.echo(f"Set GAC_MODEL={provider_key}:{model_to_save}")
|
|
82
89
|
|
|
83
|
-
if
|
|
90
|
+
if is_custom_anthropic:
|
|
91
|
+
base_url = _prompt_required_text("Enter the custom Anthropic-compatible base URL (required):")
|
|
92
|
+
if base_url is None:
|
|
93
|
+
click.echo("Custom Anthropic base URL entry cancelled. Exiting.")
|
|
94
|
+
return
|
|
95
|
+
set_key(str(GAC_ENV_PATH), "CUSTOM_ANTHROPIC_BASE_URL", base_url)
|
|
96
|
+
click.echo(f"Set CUSTOM_ANTHROPIC_BASE_URL={base_url}")
|
|
97
|
+
|
|
98
|
+
api_version = questionary.text(
|
|
99
|
+
"Enter the API version (optional, press Enter for default: 2023-06-01):", default="2023-06-01"
|
|
100
|
+
).ask()
|
|
101
|
+
if api_version and api_version != "2023-06-01":
|
|
102
|
+
set_key(str(GAC_ENV_PATH), "CUSTOM_ANTHROPIC_VERSION", api_version)
|
|
103
|
+
click.echo(f"Set CUSTOM_ANTHROPIC_VERSION={api_version}")
|
|
104
|
+
elif is_custom_openai:
|
|
105
|
+
base_url = _prompt_required_text("Enter the custom OpenAI-compatible base URL (required):")
|
|
106
|
+
if base_url is None:
|
|
107
|
+
click.echo("Custom OpenAI base URL entry cancelled. Exiting.")
|
|
108
|
+
return
|
|
109
|
+
set_key(str(GAC_ENV_PATH), "CUSTOM_OPENAI_BASE_URL", base_url)
|
|
110
|
+
click.echo(f"Set CUSTOM_OPENAI_BASE_URL={base_url}")
|
|
111
|
+
elif is_ollama:
|
|
84
112
|
url_default = "http://localhost:11434"
|
|
85
113
|
url = questionary.text(f"Enter the Ollama API URL (default: {url_default}):", default=url_default).ask()
|
|
86
114
|
if url is None:
|
|
@@ -112,6 +140,10 @@ def init() -> None:
|
|
|
112
140
|
api_key_name = "LMSTUDIO_API_KEY"
|
|
113
141
|
elif is_zai:
|
|
114
142
|
api_key_name = "ZAI_API_KEY"
|
|
143
|
+
elif is_custom_anthropic:
|
|
144
|
+
api_key_name = "CUSTOM_ANTHROPIC_API_KEY"
|
|
145
|
+
elif is_custom_openai:
|
|
146
|
+
api_key_name = "CUSTOM_OPENAI_API_KEY"
|
|
115
147
|
else:
|
|
116
148
|
api_key_name = f"{provider_key.upper()}_API_KEY"
|
|
117
149
|
set_key(str(GAC_ENV_PATH), api_key_name, api_key)
|
|
@@ -454,6 +454,39 @@ def clean_commit_message(message: str) -> str:
|
|
|
454
454
|
"""
|
|
455
455
|
message = message.strip()
|
|
456
456
|
|
|
457
|
+
# Remove <think> tags and their content (some providers like MiniMax include reasoning)
|
|
458
|
+
# Only remove multi-line reasoning blocks, never single-line content that might be descriptions
|
|
459
|
+
# Strategy: Remove blocks that clearly contain internal newlines (multi-line reasoning)
|
|
460
|
+
|
|
461
|
+
# Step 1: Remove multi-line <think>...</think> blocks (those with newlines inside)
|
|
462
|
+
# Pattern: <think> followed by content that includes newlines, ending with </think>
|
|
463
|
+
# This safely distinguishes reasoning from inline mentions like "handle <think> tags"
|
|
464
|
+
# Use negative lookahead to prevent matching across multiple blocks
|
|
465
|
+
while re.search(r"<think>(?:(?!</think>)[^\n])*\n.*?</think>", message, flags=re.DOTALL | re.IGNORECASE):
|
|
466
|
+
message = re.sub(
|
|
467
|
+
r"<think>(?:(?!</think>)[^\n])*\n.*?</think>\s*", "", message, flags=re.DOTALL | re.IGNORECASE, count=1
|
|
468
|
+
)
|
|
469
|
+
|
|
470
|
+
# Step 2: Remove blocks separated by blank lines (before or after the message)
|
|
471
|
+
message = re.sub(r"\n\n+\s*<think>.*?</think>\s*", "", message, flags=re.DOTALL | re.IGNORECASE)
|
|
472
|
+
message = re.sub(r"<think>.*?</think>\s*\n\n+", "", message, flags=re.DOTALL | re.IGNORECASE)
|
|
473
|
+
|
|
474
|
+
# Step 3: Handle orphaned opening <think> tags followed by newline
|
|
475
|
+
message = re.sub(r"<think>\s*\n.*$", "", message, flags=re.DOTALL | re.IGNORECASE)
|
|
476
|
+
|
|
477
|
+
# Step 4: Handle orphaned closing </think> tags at the start (before any conventional prefix)
|
|
478
|
+
conventional_prefixes_pattern = r"(feat|fix|docs|style|refactor|perf|test|build|ci|chore)[\(:)]"
|
|
479
|
+
if re.search(r"^.*?</think>", message, flags=re.DOTALL | re.IGNORECASE):
|
|
480
|
+
prefix_match = re.search(conventional_prefixes_pattern, message, flags=re.IGNORECASE)
|
|
481
|
+
think_match = re.search(r"</think>", message, flags=re.IGNORECASE)
|
|
482
|
+
|
|
483
|
+
if not prefix_match or (think_match and think_match.start() < prefix_match.start()):
|
|
484
|
+
# No prefix or </think> comes before prefix - remove everything up to and including it
|
|
485
|
+
message = re.sub(r"^.*?</think>\s*", "", message, flags=re.DOTALL | re.IGNORECASE)
|
|
486
|
+
|
|
487
|
+
# Step 5: Remove orphaned closing </think> tags at the end (not part of inline mentions)
|
|
488
|
+
message = re.sub(r"</think>\s*$", "", message, flags=re.IGNORECASE)
|
|
489
|
+
|
|
457
490
|
# Remove any markdown code blocks
|
|
458
491
|
message = re.sub(r"```[\w]*\n|```", "", message)
|
|
459
492
|
|
|
@@ -3,6 +3,8 @@
|
|
|
3
3
|
from .anthropic import call_anthropic_api
|
|
4
4
|
from .cerebras import call_cerebras_api
|
|
5
5
|
from .chutes import call_chutes_api
|
|
6
|
+
from .custom_anthropic import call_custom_anthropic_api
|
|
7
|
+
from .custom_openai import call_custom_openai_api
|
|
6
8
|
from .deepseek import call_deepseek_api
|
|
7
9
|
from .fireworks import call_fireworks_api
|
|
8
10
|
from .gemini import call_gemini_api
|
|
@@ -21,6 +23,8 @@ __all__ = [
|
|
|
21
23
|
"call_anthropic_api",
|
|
22
24
|
"call_cerebras_api",
|
|
23
25
|
"call_chutes_api",
|
|
26
|
+
"call_custom_anthropic_api",
|
|
27
|
+
"call_custom_openai_api",
|
|
24
28
|
"call_deepseek_api",
|
|
25
29
|
"call_fireworks_api",
|
|
26
30
|
"call_gemini_api",
|
|
@@ -0,0 +1,133 @@
|
|
|
1
|
+
"""Custom Anthropic-compatible API provider for gac.
|
|
2
|
+
|
|
3
|
+
This provider allows users to specify a custom Anthropic-compatible endpoint
|
|
4
|
+
while using the same model capabilities as the standard Anthropic provider.
|
|
5
|
+
"""
|
|
6
|
+
|
|
7
|
+
import json
|
|
8
|
+
import logging
|
|
9
|
+
import os
|
|
10
|
+
|
|
11
|
+
import httpx
|
|
12
|
+
|
|
13
|
+
from gac.errors import AIError
|
|
14
|
+
|
|
15
|
+
logger = logging.getLogger(__name__)
|
|
16
|
+
|
|
17
|
+
|
|
18
|
+
def call_custom_anthropic_api(model: str, messages: list[dict], temperature: float, max_tokens: int) -> str:
|
|
19
|
+
"""Call a custom Anthropic-compatible API endpoint.
|
|
20
|
+
|
|
21
|
+
This provider is useful for:
|
|
22
|
+
- Anthropic-compatible proxies or gateways
|
|
23
|
+
- Self-hosted Anthropic-compatible services
|
|
24
|
+
- Other services implementing the Anthropic Messages API
|
|
25
|
+
|
|
26
|
+
Environment variables:
|
|
27
|
+
CUSTOM_ANTHROPIC_API_KEY: API key for authentication (required)
|
|
28
|
+
CUSTOM_ANTHROPIC_BASE_URL: Base URL for the API endpoint (required)
|
|
29
|
+
Example: https://your-proxy.example.com
|
|
30
|
+
CUSTOM_ANTHROPIC_VERSION: API version header (optional, defaults to '2023-06-01')
|
|
31
|
+
|
|
32
|
+
Args:
|
|
33
|
+
model: The model to use (e.g., 'claude-3-5-sonnet-20241022', 'claude-3-5-haiku-latest')
|
|
34
|
+
messages: List of message dictionaries with 'role' and 'content' keys
|
|
35
|
+
temperature: Controls randomness (0.0-1.0)
|
|
36
|
+
max_tokens: Maximum tokens in the response
|
|
37
|
+
|
|
38
|
+
Returns:
|
|
39
|
+
The generated commit message
|
|
40
|
+
|
|
41
|
+
Raises:
|
|
42
|
+
AIError: If authentication fails, API errors occur, or response is invalid
|
|
43
|
+
"""
|
|
44
|
+
api_key = os.getenv("CUSTOM_ANTHROPIC_API_KEY")
|
|
45
|
+
if not api_key:
|
|
46
|
+
raise AIError.authentication_error("CUSTOM_ANTHROPIC_API_KEY environment variable not set")
|
|
47
|
+
|
|
48
|
+
base_url = os.getenv("CUSTOM_ANTHROPIC_BASE_URL")
|
|
49
|
+
if not base_url:
|
|
50
|
+
raise AIError.model_error("CUSTOM_ANTHROPIC_BASE_URL environment variable not set")
|
|
51
|
+
|
|
52
|
+
api_version = os.getenv("CUSTOM_ANTHROPIC_VERSION", "2023-06-01")
|
|
53
|
+
|
|
54
|
+
if "/v1/messages" not in base_url:
|
|
55
|
+
base_url = base_url.rstrip("/")
|
|
56
|
+
url = f"{base_url}/v1/messages"
|
|
57
|
+
else:
|
|
58
|
+
url = base_url
|
|
59
|
+
|
|
60
|
+
headers = {"x-api-key": api_key, "anthropic-version": api_version, "content-type": "application/json"}
|
|
61
|
+
|
|
62
|
+
anthropic_messages = []
|
|
63
|
+
system_message = ""
|
|
64
|
+
|
|
65
|
+
for msg in messages:
|
|
66
|
+
if msg["role"] == "system":
|
|
67
|
+
system_message = msg["content"]
|
|
68
|
+
else:
|
|
69
|
+
anthropic_messages.append({"role": msg["role"], "content": msg["content"]})
|
|
70
|
+
|
|
71
|
+
data = {"model": model, "messages": anthropic_messages, "temperature": temperature, "max_tokens": max_tokens}
|
|
72
|
+
|
|
73
|
+
if system_message:
|
|
74
|
+
data["system"] = system_message
|
|
75
|
+
|
|
76
|
+
try:
|
|
77
|
+
response = httpx.post(url, headers=headers, json=data, timeout=120)
|
|
78
|
+
response.raise_for_status()
|
|
79
|
+
response_data = response.json()
|
|
80
|
+
|
|
81
|
+
try:
|
|
82
|
+
content_list = response_data.get("content", [])
|
|
83
|
+
if not content_list:
|
|
84
|
+
raise AIError.model_error("Custom Anthropic API returned empty content array")
|
|
85
|
+
|
|
86
|
+
# Try standard Anthropic format first: content[0].text
|
|
87
|
+
if "text" in content_list[0]:
|
|
88
|
+
content = content_list[0]["text"]
|
|
89
|
+
else:
|
|
90
|
+
# Extended format (e.g., MiniMax with thinking): find first item with type="text"
|
|
91
|
+
text_item = next((item for item in content_list if item.get("type") == "text"), None)
|
|
92
|
+
if text_item and "text" in text_item:
|
|
93
|
+
content = text_item["text"]
|
|
94
|
+
else:
|
|
95
|
+
logger.error(
|
|
96
|
+
f"Unexpected response format from Custom Anthropic API. Response: {json.dumps(response_data)}"
|
|
97
|
+
)
|
|
98
|
+
raise AIError.model_error(
|
|
99
|
+
"Custom Anthropic API returned unexpected format. Expected 'text' field in content array."
|
|
100
|
+
)
|
|
101
|
+
except AIError:
|
|
102
|
+
raise
|
|
103
|
+
except (KeyError, IndexError, TypeError, StopIteration) as e:
|
|
104
|
+
logger.error(f"Unexpected response format from Custom Anthropic API. Response: {json.dumps(response_data)}")
|
|
105
|
+
raise AIError.model_error(
|
|
106
|
+
f"Custom Anthropic API returned unexpected format. Expected Anthropic-compatible response with "
|
|
107
|
+
f"'content[0].text' or items with type='text', but got: {type(e).__name__}. "
|
|
108
|
+
f"Check logs for full response structure."
|
|
109
|
+
) from e
|
|
110
|
+
|
|
111
|
+
if content is None:
|
|
112
|
+
raise AIError.model_error("Custom Anthropic API returned null content")
|
|
113
|
+
if content == "":
|
|
114
|
+
raise AIError.model_error("Custom Anthropic API returned empty content")
|
|
115
|
+
return content
|
|
116
|
+
except httpx.ConnectError as e:
|
|
117
|
+
raise AIError.connection_error(f"Custom Anthropic API connection failed: {str(e)}") from e
|
|
118
|
+
except httpx.HTTPStatusError as e:
|
|
119
|
+
status_code = e.response.status_code
|
|
120
|
+
error_text = e.response.text
|
|
121
|
+
|
|
122
|
+
if status_code == 401:
|
|
123
|
+
raise AIError.authentication_error(f"Custom Anthropic API authentication failed: {error_text}") from e
|
|
124
|
+
elif status_code == 429:
|
|
125
|
+
raise AIError.rate_limit_error(f"Custom Anthropic API rate limit exceeded: {error_text}") from e
|
|
126
|
+
else:
|
|
127
|
+
raise AIError.model_error(f"Custom Anthropic API error: {status_code} - {error_text}") from e
|
|
128
|
+
except httpx.TimeoutException as e:
|
|
129
|
+
raise AIError.timeout_error(f"Custom Anthropic API request timed out: {str(e)}") from e
|
|
130
|
+
except AIError:
|
|
131
|
+
raise
|
|
132
|
+
except Exception as e:
|
|
133
|
+
raise AIError.model_error(f"Error calling Custom Anthropic API: {str(e)}") from e
|
|
@@ -0,0 +1,99 @@
|
|
|
1
|
+
"""Custom OpenAI-compatible API provider for gac.
|
|
2
|
+
|
|
3
|
+
This provider allows users to specify a custom OpenAI-compatible endpoint
|
|
4
|
+
while using the same model capabilities as the standard OpenAI provider.
|
|
5
|
+
"""
|
|
6
|
+
|
|
7
|
+
import json
|
|
8
|
+
import logging
|
|
9
|
+
import os
|
|
10
|
+
|
|
11
|
+
import httpx
|
|
12
|
+
|
|
13
|
+
from gac.errors import AIError
|
|
14
|
+
|
|
15
|
+
logger = logging.getLogger(__name__)
|
|
16
|
+
|
|
17
|
+
|
|
18
|
+
def call_custom_openai_api(model: str, messages: list[dict], temperature: float, max_tokens: int) -> str:
|
|
19
|
+
"""Call a custom OpenAI-compatible API endpoint.
|
|
20
|
+
|
|
21
|
+
This provider is useful for:
|
|
22
|
+
- Azure OpenAI Service
|
|
23
|
+
- OpenAI-compatible proxies or gateways
|
|
24
|
+
- Self-hosted OpenAI-compatible services
|
|
25
|
+
- Other services implementing the OpenAI Chat Completions API
|
|
26
|
+
|
|
27
|
+
Environment variables:
|
|
28
|
+
CUSTOM_OPENAI_API_KEY: API key for authentication (required)
|
|
29
|
+
CUSTOM_OPENAI_BASE_URL: Base URL for the API endpoint (required)
|
|
30
|
+
Example: https://your-endpoint.openai.azure.com
|
|
31
|
+
Example: https://your-proxy.example.com/v1
|
|
32
|
+
|
|
33
|
+
Args:
|
|
34
|
+
model: The model to use (e.g., 'gpt-4', 'gpt-3.5-turbo')
|
|
35
|
+
messages: List of message dictionaries with 'role' and 'content' keys
|
|
36
|
+
temperature: Controls randomness (0.0-1.0)
|
|
37
|
+
max_tokens: Maximum tokens in the response
|
|
38
|
+
|
|
39
|
+
Returns:
|
|
40
|
+
The generated commit message
|
|
41
|
+
|
|
42
|
+
Raises:
|
|
43
|
+
AIError: If authentication fails, API errors occur, or response is invalid
|
|
44
|
+
"""
|
|
45
|
+
api_key = os.getenv("CUSTOM_OPENAI_API_KEY")
|
|
46
|
+
if not api_key:
|
|
47
|
+
raise AIError.authentication_error("CUSTOM_OPENAI_API_KEY environment variable not set")
|
|
48
|
+
|
|
49
|
+
base_url = os.getenv("CUSTOM_OPENAI_BASE_URL")
|
|
50
|
+
if not base_url:
|
|
51
|
+
raise AIError.model_error("CUSTOM_OPENAI_BASE_URL environment variable not set")
|
|
52
|
+
|
|
53
|
+
if "/chat/completions" not in base_url:
|
|
54
|
+
base_url = base_url.rstrip("/")
|
|
55
|
+
url = f"{base_url}/chat/completions"
|
|
56
|
+
else:
|
|
57
|
+
url = base_url
|
|
58
|
+
|
|
59
|
+
headers = {"Authorization": f"Bearer {api_key}", "Content-Type": "application/json"}
|
|
60
|
+
|
|
61
|
+
data = {"model": model, "messages": messages, "temperature": temperature, "max_completion_tokens": max_tokens}
|
|
62
|
+
|
|
63
|
+
try:
|
|
64
|
+
response = httpx.post(url, headers=headers, json=data, timeout=120)
|
|
65
|
+
response.raise_for_status()
|
|
66
|
+
response_data = response.json()
|
|
67
|
+
|
|
68
|
+
try:
|
|
69
|
+
content = response_data["choices"][0]["message"]["content"]
|
|
70
|
+
except (KeyError, IndexError, TypeError) as e:
|
|
71
|
+
logger.error(f"Unexpected response format from Custom OpenAI API. Response: {json.dumps(response_data)}")
|
|
72
|
+
raise AIError.model_error(
|
|
73
|
+
f"Custom OpenAI API returned unexpected format. Expected OpenAI-compatible response with "
|
|
74
|
+
f"'choices[0].message.content', but got: {type(e).__name__}. Check logs for full response structure."
|
|
75
|
+
) from e
|
|
76
|
+
|
|
77
|
+
if content is None:
|
|
78
|
+
raise AIError.model_error("Custom OpenAI API returned null content")
|
|
79
|
+
if content == "":
|
|
80
|
+
raise AIError.model_error("Custom OpenAI API returned empty content")
|
|
81
|
+
return content
|
|
82
|
+
except httpx.ConnectError as e:
|
|
83
|
+
raise AIError.connection_error(f"Custom OpenAI API connection failed: {str(e)}") from e
|
|
84
|
+
except httpx.HTTPStatusError as e:
|
|
85
|
+
status_code = e.response.status_code
|
|
86
|
+
error_text = e.response.text
|
|
87
|
+
|
|
88
|
+
if status_code == 401:
|
|
89
|
+
raise AIError.authentication_error(f"Custom OpenAI API authentication failed: {error_text}") from e
|
|
90
|
+
elif status_code == 429:
|
|
91
|
+
raise AIError.rate_limit_error(f"Custom OpenAI API rate limit exceeded: {error_text}") from e
|
|
92
|
+
else:
|
|
93
|
+
raise AIError.model_error(f"Custom OpenAI API error: {status_code} - {error_text}") from e
|
|
94
|
+
except httpx.TimeoutException as e:
|
|
95
|
+
raise AIError.timeout_error(f"Custom OpenAI API request timed out: {str(e)}") from e
|
|
96
|
+
except AIError:
|
|
97
|
+
raise
|
|
98
|
+
except Exception as e:
|
|
99
|
+
raise AIError.model_error(f"Error calling Custom OpenAI API: {str(e)}") from e
|
|
File without changes
|
|
File without changes
|
|
File without changes
|
|
File without changes
|
|
File without changes
|
|
File without changes
|
|
File without changes
|
|
File without changes
|
|
File without changes
|
|
File without changes
|
|
File without changes
|
|
File without changes
|
|
File without changes
|
|
File without changes
|
|
File without changes
|
|
File without changes
|
|
File without changes
|
|
File without changes
|
|
File without changes
|
|
File without changes
|
|
File without changes
|
|
File without changes
|
|
File without changes
|
|
File without changes
|
|
File without changes
|
|
File without changes
|
|
File without changes
|
|
File without changes
|
|
File without changes
|
|
File without changes
|
|
File without changes
|