gac 1.1.0__py3-none-any.whl → 1.2.1__py3-none-any.whl
This diff represents the content of publicly available package versions that have been released to one of the supported registries. The information contained in this diff is provided for informational purposes only and reflects changes between package versions as they appear in their respective public registries.
Potentially problematic release.
This version of gac might be problematic. Click here for more details.
- gac/__init__.py +6 -6
- gac/__version__.py +1 -1
- gac/ai.py +43 -34
- gac/ai_utils.py +113 -62
- gac/errors.py +5 -0
- gac/providers/__init__.py +17 -1
- gac/providers/anthropic.py +32 -131
- gac/providers/cerebras.py +19 -124
- gac/providers/groq.py +43 -119
- gac/providers/ollama.py +27 -127
- gac/providers/openai.py +18 -123
- gac/providers/openrouter.py +19 -98
- {gac-1.1.0.dist-info → gac-1.2.1.dist-info}/METADATA +1 -1
- gac-1.2.1.dist-info/RECORD +28 -0
- gac-1.1.0.dist-info/RECORD +0 -28
- {gac-1.1.0.dist-info → gac-1.2.1.dist-info}/WHEEL +0 -0
- {gac-1.1.0.dist-info → gac-1.2.1.dist-info}/entry_points.txt +0 -0
- {gac-1.1.0.dist-info → gac-1.2.1.dist-info}/licenses/LICENSE +0 -0
gac/__init__.py
CHANGED
|
@@ -4,12 +4,12 @@ from gac.__version__ import __version__
|
|
|
4
4
|
from gac.ai import generate_commit_message
|
|
5
5
|
from gac.git import get_staged_files, push_changes
|
|
6
6
|
from gac.prompt import build_prompt, clean_commit_message
|
|
7
|
-
from gac.providers.anthropic import
|
|
8
|
-
from gac.providers.cerebras import
|
|
9
|
-
from gac.providers.groq import
|
|
10
|
-
from gac.providers.ollama import
|
|
11
|
-
from gac.providers.openai import
|
|
12
|
-
from gac.providers.openrouter import
|
|
7
|
+
from gac.providers.anthropic import call_anthropic_api as anthropic_generate
|
|
8
|
+
from gac.providers.cerebras import call_cerebras_api as cerebras_generate
|
|
9
|
+
from gac.providers.groq import call_groq_api as groq_generate
|
|
10
|
+
from gac.providers.ollama import call_ollama_api as ollama_generate
|
|
11
|
+
from gac.providers.openai import call_openai_api as openai_generate
|
|
12
|
+
from gac.providers.openrouter import call_openrouter_api as openrouter_generate
|
|
13
13
|
|
|
14
14
|
__all__ = [
|
|
15
15
|
"__version__",
|
gac/__version__.py
CHANGED
gac/ai.py
CHANGED
|
@@ -6,14 +6,17 @@ It consolidates all AI-related functionality including token counting and commit
|
|
|
6
6
|
|
|
7
7
|
import logging
|
|
8
8
|
|
|
9
|
+
from gac.ai_utils import generate_with_retries
|
|
9
10
|
from gac.constants import EnvDefaults
|
|
10
11
|
from gac.errors import AIError
|
|
11
|
-
from gac.providers
|
|
12
|
-
|
|
13
|
-
|
|
14
|
-
|
|
15
|
-
|
|
16
|
-
|
|
12
|
+
from gac.providers import (
|
|
13
|
+
call_anthropic_api,
|
|
14
|
+
call_cerebras_api,
|
|
15
|
+
call_groq_api,
|
|
16
|
+
call_ollama_api,
|
|
17
|
+
call_openai_api,
|
|
18
|
+
call_openrouter_api,
|
|
19
|
+
)
|
|
17
20
|
|
|
18
21
|
logger = logging.getLogger(__name__)
|
|
19
22
|
|
|
@@ -48,33 +51,39 @@ def generate_commit_message(
|
|
|
48
51
|
>>> generate_commit_message(model, (system_prompt, user_prompt))
|
|
49
52
|
'docs: Update README with installation instructions'
|
|
50
53
|
"""
|
|
51
|
-
|
|
52
|
-
|
|
53
|
-
|
|
54
|
-
|
|
55
|
-
|
|
56
|
-
|
|
54
|
+
# Handle both old (string) and new (tuple) prompt formats
|
|
55
|
+
if isinstance(prompt, tuple):
|
|
56
|
+
system_prompt, user_prompt = prompt
|
|
57
|
+
else:
|
|
58
|
+
# Backward compatibility: treat string as user prompt with no system prompt
|
|
59
|
+
system_prompt = ""
|
|
60
|
+
user_prompt = prompt
|
|
57
61
|
|
|
58
|
-
#
|
|
59
|
-
|
|
60
|
-
|
|
61
|
-
|
|
62
|
-
|
|
63
|
-
|
|
64
|
-
|
|
62
|
+
# Provider functions mapping
|
|
63
|
+
provider_funcs = {
|
|
64
|
+
"anthropic": call_anthropic_api,
|
|
65
|
+
"openai": call_openai_api,
|
|
66
|
+
"groq": call_groq_api,
|
|
67
|
+
"cerebras": call_cerebras_api,
|
|
68
|
+
"ollama": call_ollama_api,
|
|
69
|
+
"openrouter": call_openrouter_api,
|
|
70
|
+
}
|
|
65
71
|
|
|
66
|
-
#
|
|
67
|
-
|
|
68
|
-
return
|
|
69
|
-
|
|
70
|
-
|
|
71
|
-
|
|
72
|
-
|
|
73
|
-
|
|
74
|
-
|
|
75
|
-
|
|
76
|
-
|
|
77
|
-
|
|
78
|
-
|
|
79
|
-
|
|
80
|
-
raise
|
|
72
|
+
# Generate the commit message using centralized retry logic
|
|
73
|
+
try:
|
|
74
|
+
return generate_with_retries(
|
|
75
|
+
provider_funcs=provider_funcs,
|
|
76
|
+
model=model,
|
|
77
|
+
system_prompt=system_prompt,
|
|
78
|
+
user_prompt=user_prompt,
|
|
79
|
+
temperature=temperature,
|
|
80
|
+
max_tokens=max_tokens,
|
|
81
|
+
max_retries=max_retries,
|
|
82
|
+
quiet=quiet,
|
|
83
|
+
)
|
|
84
|
+
except AIError:
|
|
85
|
+
# Re-raise AIError exceptions as-is to preserve error classification
|
|
86
|
+
raise
|
|
87
|
+
except Exception as e:
|
|
88
|
+
logger.error(f"Failed to generate commit message: {e}")
|
|
89
|
+
raise AIError.model_error(f"Failed to generate commit message: {e}") from e
|
gac/ai_utils.py
CHANGED
|
@@ -4,14 +4,15 @@ This module provides utility functions that support the AI provider implementati
|
|
|
4
4
|
"""
|
|
5
5
|
|
|
6
6
|
import logging
|
|
7
|
-
import
|
|
7
|
+
import time
|
|
8
8
|
from functools import lru_cache
|
|
9
9
|
from typing import Any
|
|
10
10
|
|
|
11
|
-
import httpx
|
|
12
11
|
import tiktoken
|
|
12
|
+
from halo import Halo
|
|
13
13
|
|
|
14
14
|
from gac.constants import Utility
|
|
15
|
+
from gac.errors import AIError
|
|
15
16
|
|
|
16
17
|
logger = logging.getLogger(__name__)
|
|
17
18
|
|
|
@@ -22,12 +23,6 @@ def count_tokens(content: str | list[dict[str, str]] | dict[str, Any], model: st
|
|
|
22
23
|
if not text:
|
|
23
24
|
return 0
|
|
24
25
|
|
|
25
|
-
if model.startswith("anthropic"):
|
|
26
|
-
anthropic_tokens = anthropic_count_tokens(text, model)
|
|
27
|
-
if anthropic_tokens is not None:
|
|
28
|
-
return anthropic_tokens
|
|
29
|
-
return len(text) // 4
|
|
30
|
-
|
|
31
26
|
try:
|
|
32
27
|
encoding = get_encoding(model)
|
|
33
28
|
return len(encoding.encode(text))
|
|
@@ -36,60 +31,6 @@ def count_tokens(content: str | list[dict[str, str]] | dict[str, Any], model: st
|
|
|
36
31
|
return len(text) // 4
|
|
37
32
|
|
|
38
33
|
|
|
39
|
-
def anthropic_count_tokens(text: str, model: str) -> int | None:
|
|
40
|
-
"""Call Anthropic's token count endpoint and return the token usage.
|
|
41
|
-
|
|
42
|
-
Returns the token count when successful, otherwise ``None`` so callers can
|
|
43
|
-
fall back to a heuristic estimate.
|
|
44
|
-
"""
|
|
45
|
-
api_key = os.getenv("ANTHROPIC_API_KEY")
|
|
46
|
-
if not api_key:
|
|
47
|
-
logger.debug("ANTHROPIC_API_KEY not set; using heuristic token estimation for Anthropic model")
|
|
48
|
-
return None
|
|
49
|
-
|
|
50
|
-
model_name = model.split(":", 1)[1] if ":" in model else "claude-3-5-haiku-latest"
|
|
51
|
-
headers = {
|
|
52
|
-
"Content-Type": "application/json",
|
|
53
|
-
"x-api-key": api_key,
|
|
54
|
-
"anthropic-version": "2023-06-01",
|
|
55
|
-
}
|
|
56
|
-
payload = {
|
|
57
|
-
"model": model_name,
|
|
58
|
-
"messages": [
|
|
59
|
-
{
|
|
60
|
-
"role": "user",
|
|
61
|
-
"content": [
|
|
62
|
-
{
|
|
63
|
-
"type": "text",
|
|
64
|
-
"text": text,
|
|
65
|
-
}
|
|
66
|
-
],
|
|
67
|
-
}
|
|
68
|
-
],
|
|
69
|
-
}
|
|
70
|
-
|
|
71
|
-
try:
|
|
72
|
-
response = httpx.post(
|
|
73
|
-
"https://api.anthropic.com/v1/messages/count_tokens",
|
|
74
|
-
headers=headers,
|
|
75
|
-
json=payload,
|
|
76
|
-
timeout=30.0,
|
|
77
|
-
)
|
|
78
|
-
response.raise_for_status()
|
|
79
|
-
data = response.json()
|
|
80
|
-
|
|
81
|
-
if "input_tokens" in data:
|
|
82
|
-
return data["input_tokens"]
|
|
83
|
-
if "usage" in data and "input_tokens" in data["usage"]:
|
|
84
|
-
return data["usage"]["input_tokens"]
|
|
85
|
-
|
|
86
|
-
logger.warning("Unexpected response format from Anthropic token count API: %s", data)
|
|
87
|
-
except Exception as exc:
|
|
88
|
-
logger.warning("Failed to retrieve Anthropic token count via HTTP: %s", exc)
|
|
89
|
-
|
|
90
|
-
return None
|
|
91
|
-
|
|
92
|
-
|
|
93
34
|
def extract_text_content(content: str | list[dict[str, str]] | dict[str, Any]) -> str:
|
|
94
35
|
"""Extract text content from various input formats."""
|
|
95
36
|
if isinstance(content, str):
|
|
@@ -132,3 +73,113 @@ def _classify_error(error_str: str) -> str:
|
|
|
132
73
|
return "model"
|
|
133
74
|
else:
|
|
134
75
|
return "unknown"
|
|
76
|
+
|
|
77
|
+
|
|
78
|
+
def generate_with_retries(
|
|
79
|
+
provider_funcs: dict,
|
|
80
|
+
model: str,
|
|
81
|
+
system_prompt: str,
|
|
82
|
+
user_prompt: str,
|
|
83
|
+
temperature: float,
|
|
84
|
+
max_tokens: int,
|
|
85
|
+
max_retries: int,
|
|
86
|
+
quiet: bool = False,
|
|
87
|
+
) -> str:
|
|
88
|
+
"""Generate content with retry logic using direct API calls."""
|
|
89
|
+
# Parse model string to determine provider and actual model
|
|
90
|
+
if ":" not in model:
|
|
91
|
+
raise AIError.model_error(f"Invalid model format. Expected 'provider:model', got '{model}'")
|
|
92
|
+
|
|
93
|
+
provider, model_name = model.split(":", 1)
|
|
94
|
+
|
|
95
|
+
# Validate provider
|
|
96
|
+
supported_providers = ["anthropic", "openai", "groq", "cerebras", "ollama", "openrouter"]
|
|
97
|
+
if provider not in supported_providers:
|
|
98
|
+
raise AIError.model_error(f"Unsupported provider: {provider}. Supported providers: {supported_providers}")
|
|
99
|
+
|
|
100
|
+
messages = [
|
|
101
|
+
{"role": "system", "content": system_prompt},
|
|
102
|
+
{"role": "user", "content": user_prompt},
|
|
103
|
+
]
|
|
104
|
+
|
|
105
|
+
# Set up spinner
|
|
106
|
+
if quiet:
|
|
107
|
+
spinner = None
|
|
108
|
+
else:
|
|
109
|
+
spinner = Halo(text=f"Generating commit message with {provider} {model_name}...", spinner="dots")
|
|
110
|
+
spinner.start()
|
|
111
|
+
|
|
112
|
+
last_exception = None
|
|
113
|
+
last_error_type = "unknown"
|
|
114
|
+
|
|
115
|
+
for attempt in range(max_retries):
|
|
116
|
+
try:
|
|
117
|
+
if not quiet and attempt > 0:
|
|
118
|
+
if spinner:
|
|
119
|
+
spinner.text = f"Retry {attempt + 1}/{max_retries} with {provider} {model_name}..."
|
|
120
|
+
logger.info(f"Retry attempt {attempt + 1}/{max_retries}")
|
|
121
|
+
|
|
122
|
+
# Call the appropriate provider function
|
|
123
|
+
provider_func = provider_funcs.get(provider)
|
|
124
|
+
if not provider_func:
|
|
125
|
+
raise AIError.model_error(f"Provider function not found for: {provider}")
|
|
126
|
+
|
|
127
|
+
content = provider_func(model=model_name, messages=messages, temperature=temperature, max_tokens=max_tokens)
|
|
128
|
+
|
|
129
|
+
if spinner:
|
|
130
|
+
spinner.succeed(f"Generated commit message with {provider} {model_name}")
|
|
131
|
+
|
|
132
|
+
if content is not None and content.strip():
|
|
133
|
+
return content.strip()
|
|
134
|
+
else:
|
|
135
|
+
logger.warning(f"Empty or None content received from {provider} {model_name}: {repr(content)}")
|
|
136
|
+
raise AIError.model_error("Empty response from AI model")
|
|
137
|
+
|
|
138
|
+
except Exception as e:
|
|
139
|
+
last_exception = e
|
|
140
|
+
error_type = _classify_error(str(e))
|
|
141
|
+
last_error_type = error_type
|
|
142
|
+
|
|
143
|
+
# For authentication and model errors, don't retry
|
|
144
|
+
if error_type in ["authentication", "model"]:
|
|
145
|
+
if spinner:
|
|
146
|
+
spinner.fail(f"Failed to generate commit message with {provider} {model_name}")
|
|
147
|
+
|
|
148
|
+
# Create the appropriate error type based on classification
|
|
149
|
+
if error_type == "authentication":
|
|
150
|
+
raise AIError.authentication_error(f"AI generation failed: {str(e)}") from e
|
|
151
|
+
elif error_type == "model":
|
|
152
|
+
raise AIError.model_error(f"AI generation failed: {str(e)}") from e
|
|
153
|
+
|
|
154
|
+
if attempt < max_retries - 1:
|
|
155
|
+
# Exponential backoff
|
|
156
|
+
wait_time = 2**attempt
|
|
157
|
+
if not quiet:
|
|
158
|
+
logger.warning(f"AI generation failed (attempt {attempt + 1}), retrying in {wait_time}s: {str(e)}")
|
|
159
|
+
|
|
160
|
+
if spinner:
|
|
161
|
+
for i in range(wait_time, 0, -1):
|
|
162
|
+
spinner.text = f"Retry {attempt + 1}/{max_retries} in {i}s..."
|
|
163
|
+
time.sleep(1)
|
|
164
|
+
else:
|
|
165
|
+
time.sleep(wait_time)
|
|
166
|
+
else:
|
|
167
|
+
logger.error(f"AI generation failed after {max_retries} attempts: {str(e)}")
|
|
168
|
+
|
|
169
|
+
if spinner:
|
|
170
|
+
spinner.fail(f"Failed to generate commit message with {provider} {model_name}")
|
|
171
|
+
|
|
172
|
+
# If we get here, all retries failed - use the last classified error type
|
|
173
|
+
error_message = f"Failed to generate commit message after {max_retries} attempts"
|
|
174
|
+
if last_error_type == "authentication":
|
|
175
|
+
raise AIError.authentication_error(error_message) from last_exception
|
|
176
|
+
elif last_error_type == "rate_limit":
|
|
177
|
+
raise AIError.rate_limit_error(error_message) from last_exception
|
|
178
|
+
elif last_error_type == "timeout":
|
|
179
|
+
raise AIError.timeout_error(error_message) from last_exception
|
|
180
|
+
elif last_error_type == "connection":
|
|
181
|
+
raise AIError.connection_error(error_message) from last_exception
|
|
182
|
+
elif last_error_type == "model":
|
|
183
|
+
raise AIError.model_error(error_message) from last_exception
|
|
184
|
+
else:
|
|
185
|
+
raise AIError.unknown_error(error_message) from last_exception
|
gac/errors.py
CHANGED
|
@@ -95,6 +95,11 @@ class AIError(GacError):
|
|
|
95
95
|
"""Create a model error."""
|
|
96
96
|
return cls(message, error_type="model")
|
|
97
97
|
|
|
98
|
+
@classmethod
|
|
99
|
+
def unknown_error(cls, message: str) -> "AIError":
|
|
100
|
+
"""Create an unknown error."""
|
|
101
|
+
return cls(message, error_type="unknown")
|
|
102
|
+
|
|
98
103
|
|
|
99
104
|
class FormattingError(GacError):
|
|
100
105
|
"""Error related to code formatting."""
|
gac/providers/__init__.py
CHANGED
|
@@ -1 +1,17 @@
|
|
|
1
|
-
"""AI
|
|
1
|
+
"""AI provider implementations for commit message generation."""
|
|
2
|
+
|
|
3
|
+
from .anthropic import call_anthropic_api
|
|
4
|
+
from .cerebras import call_cerebras_api
|
|
5
|
+
from .groq import call_groq_api
|
|
6
|
+
from .ollama import call_ollama_api
|
|
7
|
+
from .openai import call_openai_api
|
|
8
|
+
from .openrouter import call_openrouter_api
|
|
9
|
+
|
|
10
|
+
__all__ = [
|
|
11
|
+
"call_anthropic_api",
|
|
12
|
+
"call_cerebras_api",
|
|
13
|
+
"call_groq_api",
|
|
14
|
+
"call_ollama_api",
|
|
15
|
+
"call_openai_api",
|
|
16
|
+
"call_openrouter_api",
|
|
17
|
+
]
|
gac/providers/anthropic.py
CHANGED
|
@@ -1,141 +1,42 @@
|
|
|
1
|
-
"""Anthropic
|
|
1
|
+
"""Anthropic AI provider implementation."""
|
|
2
2
|
|
|
3
|
-
import logging
|
|
4
3
|
import os
|
|
5
|
-
import time
|
|
6
4
|
|
|
7
5
|
import httpx
|
|
8
|
-
from halo import Halo
|
|
9
6
|
|
|
10
|
-
from gac.ai_utils import _classify_error
|
|
11
|
-
from gac.constants import EnvDefaults
|
|
12
7
|
from gac.errors import AIError
|
|
13
8
|
|
|
14
|
-
logger = logging.getLogger(__name__)
|
|
15
9
|
|
|
16
|
-
|
|
17
|
-
|
|
18
|
-
model: str,
|
|
19
|
-
prompt: str | tuple[str, str],
|
|
20
|
-
temperature: float = EnvDefaults.TEMPERATURE,
|
|
21
|
-
max_tokens: int = EnvDefaults.MAX_OUTPUT_TOKENS,
|
|
22
|
-
max_retries: int = EnvDefaults.MAX_RETRIES,
|
|
23
|
-
quiet: bool = False,
|
|
24
|
-
) -> str:
|
|
25
|
-
"""Generate commit message using Anthropic API with retry logic.
|
|
26
|
-
|
|
27
|
-
Args:
|
|
28
|
-
model: The model name (e.g., 'claude-3-5-haiku-latest', 'claude-3-opus-latest')
|
|
29
|
-
prompt: Either a string prompt or tuple of (system_prompt, user_prompt)
|
|
30
|
-
temperature: Controls randomness (0.0-1.0)
|
|
31
|
-
max_tokens: Maximum tokens in the response
|
|
32
|
-
max_retries: Number of retry attempts if generation fails
|
|
33
|
-
quiet: If True, suppress progress indicators
|
|
34
|
-
|
|
35
|
-
Returns:
|
|
36
|
-
A formatted commit message string
|
|
37
|
-
|
|
38
|
-
Raises:
|
|
39
|
-
AIError: If generation fails after max_retries attempts
|
|
40
|
-
"""
|
|
10
|
+
def call_anthropic_api(model: str, messages: list[dict], temperature: float, max_tokens: int) -> str:
|
|
11
|
+
"""Call Anthropic API directly."""
|
|
41
12
|
api_key = os.getenv("ANTHROPIC_API_KEY")
|
|
42
13
|
if not api_key:
|
|
43
|
-
raise AIError.model_error("ANTHROPIC_API_KEY
|
|
44
|
-
|
|
45
|
-
|
|
46
|
-
|
|
47
|
-
|
|
48
|
-
|
|
49
|
-
|
|
50
|
-
|
|
51
|
-
|
|
52
|
-
|
|
53
|
-
|
|
54
|
-
"
|
|
55
|
-
|
|
56
|
-
|
|
57
|
-
|
|
58
|
-
|
|
59
|
-
|
|
60
|
-
|
|
61
|
-
|
|
62
|
-
|
|
63
|
-
|
|
64
|
-
|
|
65
|
-
|
|
66
|
-
|
|
67
|
-
"
|
|
68
|
-
|
|
69
|
-
"
|
|
70
|
-
|
|
71
|
-
|
|
72
|
-
return _make_request_with_retry(
|
|
73
|
-
url="https://api.anthropic.com/v1/messages",
|
|
74
|
-
headers=headers,
|
|
75
|
-
payload=payload,
|
|
76
|
-
provider_name=f"Anthropic {model}",
|
|
77
|
-
max_retries=max_retries,
|
|
78
|
-
quiet=quiet,
|
|
79
|
-
response_parser=lambda r: r["content"][0]["text"],
|
|
80
|
-
)
|
|
81
|
-
|
|
82
|
-
|
|
83
|
-
def _make_request_with_retry(
|
|
84
|
-
url: str,
|
|
85
|
-
headers: dict,
|
|
86
|
-
payload: dict,
|
|
87
|
-
provider_name: str,
|
|
88
|
-
max_retries: int,
|
|
89
|
-
quiet: bool,
|
|
90
|
-
response_parser: callable,
|
|
91
|
-
) -> str:
|
|
92
|
-
"""Make HTTP request with retry logic and common error handling."""
|
|
93
|
-
if quiet:
|
|
94
|
-
spinner = None
|
|
95
|
-
else:
|
|
96
|
-
spinner = Halo(text=f"Generating commit message with {provider_name}...", spinner="dots")
|
|
97
|
-
spinner.start()
|
|
98
|
-
|
|
99
|
-
last_error = None
|
|
100
|
-
retry_count = 0
|
|
101
|
-
|
|
102
|
-
while retry_count < max_retries:
|
|
103
|
-
try:
|
|
104
|
-
logger.debug(f"Trying with {provider_name} (attempt {retry_count + 1}/{max_retries})")
|
|
105
|
-
|
|
106
|
-
with httpx.Client(timeout=30.0) as client:
|
|
107
|
-
response = client.post(url, headers=headers, json=payload)
|
|
108
|
-
response.raise_for_status()
|
|
109
|
-
|
|
110
|
-
response_data = response.json()
|
|
111
|
-
message = response_parser(response_data)
|
|
112
|
-
|
|
113
|
-
if spinner:
|
|
114
|
-
spinner.succeed(f"Generated commit message with {provider_name}")
|
|
115
|
-
|
|
116
|
-
return message
|
|
117
|
-
|
|
118
|
-
except Exception as e:
|
|
119
|
-
last_error = e
|
|
120
|
-
retry_count += 1
|
|
121
|
-
|
|
122
|
-
if retry_count == max_retries:
|
|
123
|
-
logger.warning(f"Error generating commit message: {e}. Giving up.")
|
|
124
|
-
break
|
|
125
|
-
|
|
126
|
-
wait_time = 2**retry_count
|
|
127
|
-
logger.warning(f"Error generating commit message: {e}. Retrying in {wait_time}s...")
|
|
128
|
-
if spinner:
|
|
129
|
-
for i in range(wait_time, 0, -1):
|
|
130
|
-
spinner.text = f"Retry {retry_count}/{max_retries} in {i}s..."
|
|
131
|
-
time.sleep(1)
|
|
132
|
-
else:
|
|
133
|
-
time.sleep(wait_time)
|
|
134
|
-
|
|
135
|
-
if spinner:
|
|
136
|
-
spinner.fail(f"Failed to generate commit message with {provider_name}")
|
|
137
|
-
|
|
138
|
-
error_type = _classify_error(str(last_error))
|
|
139
|
-
raise AIError(
|
|
140
|
-
f"Failed to generate commit message after {max_retries} attempts: {last_error}", error_type=error_type
|
|
141
|
-
)
|
|
14
|
+
raise AIError.model_error("ANTHROPIC_API_KEY not found in environment variables")
|
|
15
|
+
|
|
16
|
+
url = "https://api.anthropic.com/v1/messages"
|
|
17
|
+
headers = {"x-api-key": api_key, "anthropic-version": "2023-06-01", "content-type": "application/json"}
|
|
18
|
+
|
|
19
|
+
# Convert messages to Anthropic format
|
|
20
|
+
anthropic_messages = []
|
|
21
|
+
system_message = ""
|
|
22
|
+
|
|
23
|
+
for msg in messages:
|
|
24
|
+
if msg["role"] == "system":
|
|
25
|
+
system_message = msg["content"]
|
|
26
|
+
else:
|
|
27
|
+
anthropic_messages.append({"role": msg["role"], "content": msg["content"]})
|
|
28
|
+
|
|
29
|
+
data = {"model": model, "messages": anthropic_messages, "temperature": temperature, "max_tokens": max_tokens}
|
|
30
|
+
|
|
31
|
+
if system_message:
|
|
32
|
+
data["system"] = system_message
|
|
33
|
+
|
|
34
|
+
try:
|
|
35
|
+
response = httpx.post(url, headers=headers, json=data, timeout=120)
|
|
36
|
+
response.raise_for_status()
|
|
37
|
+
response_data = response.json()
|
|
38
|
+
return response_data["content"][0]["text"]
|
|
39
|
+
except httpx.HTTPStatusError as e:
|
|
40
|
+
raise AIError.model_error(f"Anthropic API error: {e.response.status_code} - {e.response.text}") from e
|
|
41
|
+
except Exception as e:
|
|
42
|
+
raise AIError.model_error(f"Error calling Anthropic API: {str(e)}") from e
|