gac 1.0.1__py3-none-any.whl → 1.2.0__py3-none-any.whl
This diff represents the content of publicly available package versions that have been released to one of the supported registries. The information contained in this diff is provided for informational purposes only and reflects changes between package versions as they appear in their respective public registries.
Potentially problematic release.
This version of gac might be problematic. Click here for more details.
- gac/__init__.py +7 -7
- gac/__version__.py +1 -1
- gac/ai.py +41 -135
- gac/ai_utils.py +229 -0
- gac/init_cli.py +1 -0
- gac/main.py +2 -1
- gac/preprocess.py +1 -1
- gac/providers/__init__.py +17 -0
- gac/providers/anthropic.py +42 -0
- gac/providers/cerebras.py +29 -0
- gac/providers/groq.py +51 -0
- gac/providers/ollama.py +35 -0
- gac/providers/openai.py +29 -0
- gac/providers/openrouter.py +46 -0
- {gac-1.0.1.dist-info → gac-1.2.0.dist-info}/METADATA +7 -2
- gac-1.2.0.dist-info/RECORD +28 -0
- gac/ai_providers.py +0 -404
- gac-1.0.1.dist-info/RECORD +0 -21
- {gac-1.0.1.dist-info → gac-1.2.0.dist-info}/WHEEL +0 -0
- {gac-1.0.1.dist-info → gac-1.2.0.dist-info}/entry_points.txt +0 -0
- {gac-1.0.1.dist-info → gac-1.2.0.dist-info}/licenses/LICENSE +0 -0
gac/__init__.py
CHANGED
|
@@ -2,15 +2,14 @@
|
|
|
2
2
|
|
|
3
3
|
from gac.__version__ import __version__
|
|
4
4
|
from gac.ai import generate_commit_message
|
|
5
|
-
from gac.ai_providers import (
|
|
6
|
-
anthropic_generate,
|
|
7
|
-
cerebras_generate,
|
|
8
|
-
groq_generate,
|
|
9
|
-
ollama_generate,
|
|
10
|
-
openai_generate,
|
|
11
|
-
)
|
|
12
5
|
from gac.git import get_staged_files, push_changes
|
|
13
6
|
from gac.prompt import build_prompt, clean_commit_message
|
|
7
|
+
from gac.providers.anthropic import call_anthropic_api as anthropic_generate
|
|
8
|
+
from gac.providers.cerebras import call_cerebras_api as cerebras_generate
|
|
9
|
+
from gac.providers.groq import call_groq_api as groq_generate
|
|
10
|
+
from gac.providers.ollama import call_ollama_api as ollama_generate
|
|
11
|
+
from gac.providers.openai import call_openai_api as openai_generate
|
|
12
|
+
from gac.providers.openrouter import call_openrouter_api as openrouter_generate
|
|
14
13
|
|
|
15
14
|
__all__ = [
|
|
16
15
|
"__version__",
|
|
@@ -24,4 +23,5 @@ __all__ = [
|
|
|
24
23
|
"groq_generate",
|
|
25
24
|
"ollama_generate",
|
|
26
25
|
"openai_generate",
|
|
26
|
+
"openrouter_generate",
|
|
27
27
|
]
|
gac/__version__.py
CHANGED
gac/ai.py
CHANGED
|
@@ -5,121 +5,22 @@ It consolidates all AI-related functionality including token counting and commit
|
|
|
5
5
|
"""
|
|
6
6
|
|
|
7
7
|
import logging
|
|
8
|
-
import os
|
|
9
|
-
from functools import lru_cache
|
|
10
|
-
from typing import Any
|
|
11
8
|
|
|
12
|
-
import
|
|
13
|
-
import
|
|
14
|
-
|
|
15
|
-
from gac.ai_providers import (
|
|
16
|
-
anthropic_generate,
|
|
17
|
-
cerebras_generate,
|
|
18
|
-
groq_generate,
|
|
19
|
-
ollama_generate,
|
|
20
|
-
openai_generate,
|
|
21
|
-
)
|
|
22
|
-
from gac.constants import EnvDefaults, Utility
|
|
9
|
+
from gac.ai_utils import generate_with_retries
|
|
10
|
+
from gac.constants import EnvDefaults
|
|
23
11
|
from gac.errors import AIError
|
|
12
|
+
from gac.providers import (
|
|
13
|
+
call_anthropic_api,
|
|
14
|
+
call_cerebras_api,
|
|
15
|
+
call_groq_api,
|
|
16
|
+
call_ollama_api,
|
|
17
|
+
call_openai_api,
|
|
18
|
+
call_openrouter_api,
|
|
19
|
+
)
|
|
24
20
|
|
|
25
21
|
logger = logging.getLogger(__name__)
|
|
26
22
|
|
|
27
23
|
|
|
28
|
-
def count_tokens(content: str | list[dict[str, str]] | dict[str, Any], model: str) -> int:
|
|
29
|
-
"""Count tokens in content using the model's tokenizer."""
|
|
30
|
-
text = extract_text_content(content)
|
|
31
|
-
if not text:
|
|
32
|
-
return 0
|
|
33
|
-
|
|
34
|
-
if model.startswith("anthropic"):
|
|
35
|
-
anthropic_tokens = anthropic_count_tokens(text, model)
|
|
36
|
-
if anthropic_tokens is not None:
|
|
37
|
-
return anthropic_tokens
|
|
38
|
-
return len(text) // 4
|
|
39
|
-
|
|
40
|
-
try:
|
|
41
|
-
encoding = get_encoding(model)
|
|
42
|
-
return len(encoding.encode(text))
|
|
43
|
-
except Exception as e:
|
|
44
|
-
logger.error(f"Error counting tokens: {e}")
|
|
45
|
-
return len(text) // 4
|
|
46
|
-
|
|
47
|
-
|
|
48
|
-
def anthropic_count_tokens(text: str, model: str) -> int | None:
|
|
49
|
-
"""Call Anthropic's token count endpoint and return the token usage.
|
|
50
|
-
|
|
51
|
-
Returns the token count when successful, otherwise ``None`` so callers can
|
|
52
|
-
fall back to a heuristic estimate.
|
|
53
|
-
"""
|
|
54
|
-
api_key = os.getenv("ANTHROPIC_API_KEY")
|
|
55
|
-
if not api_key:
|
|
56
|
-
logger.debug("ANTHROPIC_API_KEY not set; using heuristic token estimation for Anthropic model")
|
|
57
|
-
return None
|
|
58
|
-
|
|
59
|
-
model_name = model.split(":", 1)[1] if ":" in model else "claude-3-5-haiku-latest"
|
|
60
|
-
headers = {
|
|
61
|
-
"Content-Type": "application/json",
|
|
62
|
-
"x-api-key": api_key,
|
|
63
|
-
"anthropic-version": "2023-06-01",
|
|
64
|
-
}
|
|
65
|
-
payload = {
|
|
66
|
-
"model": model_name,
|
|
67
|
-
"messages": [
|
|
68
|
-
{
|
|
69
|
-
"role": "user",
|
|
70
|
-
"content": [
|
|
71
|
-
{
|
|
72
|
-
"type": "text",
|
|
73
|
-
"text": text,
|
|
74
|
-
}
|
|
75
|
-
],
|
|
76
|
-
}
|
|
77
|
-
],
|
|
78
|
-
}
|
|
79
|
-
|
|
80
|
-
try:
|
|
81
|
-
response = httpx.post(
|
|
82
|
-
"https://api.anthropic.com/v1/messages/count_tokens",
|
|
83
|
-
headers=headers,
|
|
84
|
-
json=payload,
|
|
85
|
-
timeout=30.0,
|
|
86
|
-
)
|
|
87
|
-
response.raise_for_status()
|
|
88
|
-
data = response.json()
|
|
89
|
-
|
|
90
|
-
if "input_tokens" in data:
|
|
91
|
-
return data["input_tokens"]
|
|
92
|
-
if "usage" in data and "input_tokens" in data["usage"]:
|
|
93
|
-
return data["usage"]["input_tokens"]
|
|
94
|
-
|
|
95
|
-
logger.warning("Unexpected response format from Anthropic token count API: %s", data)
|
|
96
|
-
except Exception as exc:
|
|
97
|
-
logger.warning("Failed to retrieve Anthropic token count via HTTP: %s", exc)
|
|
98
|
-
|
|
99
|
-
return None
|
|
100
|
-
|
|
101
|
-
|
|
102
|
-
def extract_text_content(content: str | list[dict[str, str]] | dict[str, Any]) -> str:
|
|
103
|
-
"""Extract text content from various input formats."""
|
|
104
|
-
if isinstance(content, str):
|
|
105
|
-
return content
|
|
106
|
-
elif isinstance(content, list):
|
|
107
|
-
return "\n".join(msg["content"] for msg in content if isinstance(msg, dict) and "content" in msg)
|
|
108
|
-
elif isinstance(content, dict) and "content" in content:
|
|
109
|
-
return content["content"]
|
|
110
|
-
return ""
|
|
111
|
-
|
|
112
|
-
|
|
113
|
-
@lru_cache(maxsize=1)
|
|
114
|
-
def get_encoding(model: str) -> tiktoken.Encoding:
|
|
115
|
-
"""Get the appropriate encoding for a given model."""
|
|
116
|
-
model_name = model.split(":")[-1] if ":" in model else model
|
|
117
|
-
try:
|
|
118
|
-
return tiktoken.encoding_for_model(model_name)
|
|
119
|
-
except KeyError:
|
|
120
|
-
return tiktoken.get_encoding(Utility.DEFAULT_ENCODING)
|
|
121
|
-
|
|
122
|
-
|
|
123
24
|
def generate_commit_message(
|
|
124
25
|
model: str,
|
|
125
26
|
prompt: str | tuple[str, str],
|
|
@@ -150,31 +51,36 @@ def generate_commit_message(
|
|
|
150
51
|
>>> generate_commit_message(model, (system_prompt, user_prompt))
|
|
151
52
|
'docs: Update README with installation instructions'
|
|
152
53
|
"""
|
|
153
|
-
|
|
154
|
-
|
|
155
|
-
|
|
156
|
-
|
|
157
|
-
|
|
158
|
-
|
|
54
|
+
# Handle both old (string) and new (tuple) prompt formats
|
|
55
|
+
if isinstance(prompt, tuple):
|
|
56
|
+
system_prompt, user_prompt = prompt
|
|
57
|
+
else:
|
|
58
|
+
# Backward compatibility: treat string as user prompt with no system prompt
|
|
59
|
+
system_prompt = ""
|
|
60
|
+
user_prompt = prompt
|
|
61
|
+
|
|
62
|
+
# Provider functions mapping
|
|
63
|
+
provider_funcs = {
|
|
64
|
+
"anthropic": call_anthropic_api,
|
|
65
|
+
"openai": call_openai_api,
|
|
66
|
+
"groq": call_groq_api,
|
|
67
|
+
"cerebras": call_cerebras_api,
|
|
68
|
+
"ollama": call_ollama_api,
|
|
69
|
+
"openrouter": call_openrouter_api,
|
|
70
|
+
}
|
|
159
71
|
|
|
160
|
-
#
|
|
72
|
+
# Generate the commit message using centralized retry logic
|
|
161
73
|
try:
|
|
162
|
-
|
|
163
|
-
|
|
164
|
-
|
|
165
|
-
|
|
166
|
-
|
|
167
|
-
|
|
168
|
-
|
|
169
|
-
|
|
170
|
-
|
|
171
|
-
|
|
172
|
-
|
|
173
|
-
|
|
174
|
-
|
|
175
|
-
elif provider == "cerebras":
|
|
176
|
-
return cerebras_generate(model_name, prompt, temperature, max_tokens, max_retries, quiet)
|
|
177
|
-
elif provider == "ollama":
|
|
178
|
-
return ollama_generate(model_name, prompt, temperature, max_tokens, max_retries, quiet)
|
|
179
|
-
else:
|
|
180
|
-
raise AIError.model_error(f"Unsupported provider: {provider}")
|
|
74
|
+
return generate_with_retries(
|
|
75
|
+
provider_funcs=provider_funcs,
|
|
76
|
+
model=model,
|
|
77
|
+
system_prompt=system_prompt,
|
|
78
|
+
user_prompt=user_prompt,
|
|
79
|
+
temperature=temperature,
|
|
80
|
+
max_tokens=max_tokens,
|
|
81
|
+
max_retries=max_retries,
|
|
82
|
+
quiet=quiet,
|
|
83
|
+
)
|
|
84
|
+
except Exception as e:
|
|
85
|
+
logger.error(f"Failed to generate commit message: {e}")
|
|
86
|
+
raise AIError.model_error(f"Failed to generate commit message: {e}") from e
|
gac/ai_utils.py
ADDED
|
@@ -0,0 +1,229 @@
|
|
|
1
|
+
"""Utilities for AI provider integration for gac.
|
|
2
|
+
|
|
3
|
+
This module provides utility functions that support the AI provider implementations.
|
|
4
|
+
"""
|
|
5
|
+
|
|
6
|
+
import logging
|
|
7
|
+
import os
|
|
8
|
+
import time
|
|
9
|
+
from functools import lru_cache
|
|
10
|
+
from typing import Any
|
|
11
|
+
|
|
12
|
+
import httpx
|
|
13
|
+
import tiktoken
|
|
14
|
+
from halo import Halo
|
|
15
|
+
|
|
16
|
+
from gac.constants import Utility
|
|
17
|
+
from gac.errors import AIError
|
|
18
|
+
|
|
19
|
+
logger = logging.getLogger(__name__)
|
|
20
|
+
|
|
21
|
+
|
|
22
|
+
def count_tokens(content: str | list[dict[str, str]] | dict[str, Any], model: str) -> int:
|
|
23
|
+
"""Count tokens in content using the model's tokenizer."""
|
|
24
|
+
text = extract_text_content(content)
|
|
25
|
+
if not text:
|
|
26
|
+
return 0
|
|
27
|
+
|
|
28
|
+
if model.startswith("anthropic"):
|
|
29
|
+
anthropic_tokens = anthropic_count_tokens(text, model)
|
|
30
|
+
if anthropic_tokens is not None:
|
|
31
|
+
return anthropic_tokens
|
|
32
|
+
return len(text) // 4
|
|
33
|
+
|
|
34
|
+
try:
|
|
35
|
+
encoding = get_encoding(model)
|
|
36
|
+
return len(encoding.encode(text))
|
|
37
|
+
except Exception as e:
|
|
38
|
+
logger.error(f"Error counting tokens: {e}")
|
|
39
|
+
return len(text) // 4
|
|
40
|
+
|
|
41
|
+
|
|
42
|
+
def anthropic_count_tokens(text: str, model: str) -> int | None:
|
|
43
|
+
"""Call Anthropic's token count endpoint and return the token usage.
|
|
44
|
+
|
|
45
|
+
Returns the token count when successful, otherwise ``None`` so callers can
|
|
46
|
+
fall back to a heuristic estimate.
|
|
47
|
+
"""
|
|
48
|
+
api_key = os.getenv("ANTHROPIC_API_KEY")
|
|
49
|
+
if not api_key:
|
|
50
|
+
logger.debug("ANTHROPIC_API_KEY not set; using heuristic token estimation for Anthropic model")
|
|
51
|
+
return None
|
|
52
|
+
|
|
53
|
+
model_name = model.split(":", 1)[1] if ":" in model else "claude-3-5-haiku-latest"
|
|
54
|
+
headers = {
|
|
55
|
+
"Content-Type": "application/json",
|
|
56
|
+
"x-api-key": api_key,
|
|
57
|
+
"anthropic-version": "2023-06-01",
|
|
58
|
+
}
|
|
59
|
+
payload = {
|
|
60
|
+
"model": model_name,
|
|
61
|
+
"messages": [
|
|
62
|
+
{
|
|
63
|
+
"role": "user",
|
|
64
|
+
"content": [
|
|
65
|
+
{
|
|
66
|
+
"type": "text",
|
|
67
|
+
"text": text,
|
|
68
|
+
}
|
|
69
|
+
],
|
|
70
|
+
}
|
|
71
|
+
],
|
|
72
|
+
}
|
|
73
|
+
|
|
74
|
+
try:
|
|
75
|
+
response = httpx.post(
|
|
76
|
+
"https://api.anthropic.com/v1/messages/count_tokens",
|
|
77
|
+
headers=headers,
|
|
78
|
+
json=payload,
|
|
79
|
+
timeout=30.0,
|
|
80
|
+
)
|
|
81
|
+
response.raise_for_status()
|
|
82
|
+
data = response.json()
|
|
83
|
+
|
|
84
|
+
if "input_tokens" in data:
|
|
85
|
+
return data["input_tokens"]
|
|
86
|
+
if "usage" in data and "input_tokens" in data["usage"]:
|
|
87
|
+
return data["usage"]["input_tokens"]
|
|
88
|
+
|
|
89
|
+
logger.warning("Unexpected response format from Anthropic token count API: %s", data)
|
|
90
|
+
except Exception as exc:
|
|
91
|
+
logger.warning("Failed to retrieve Anthropic token count via HTTP: %s", exc)
|
|
92
|
+
|
|
93
|
+
return None
|
|
94
|
+
|
|
95
|
+
|
|
96
|
+
def extract_text_content(content: str | list[dict[str, str]] | dict[str, Any]) -> str:
|
|
97
|
+
"""Extract text content from various input formats."""
|
|
98
|
+
if isinstance(content, str):
|
|
99
|
+
return content
|
|
100
|
+
elif isinstance(content, list):
|
|
101
|
+
return "\n".join(msg["content"] for msg in content if isinstance(msg, dict) and "content" in msg)
|
|
102
|
+
elif isinstance(content, dict) and "content" in content:
|
|
103
|
+
return content["content"]
|
|
104
|
+
return ""
|
|
105
|
+
|
|
106
|
+
|
|
107
|
+
@lru_cache(maxsize=1)
|
|
108
|
+
def get_encoding(model: str) -> tiktoken.Encoding:
|
|
109
|
+
"""Get the appropriate encoding for a given model."""
|
|
110
|
+
model_name = model.split(":")[-1] if ":" in model else model
|
|
111
|
+
try:
|
|
112
|
+
return tiktoken.encoding_for_model(model_name)
|
|
113
|
+
except KeyError:
|
|
114
|
+
return tiktoken.get_encoding(Utility.DEFAULT_ENCODING)
|
|
115
|
+
|
|
116
|
+
|
|
117
|
+
def _classify_error(error_str: str) -> str:
|
|
118
|
+
"""Classify error types based on error message content."""
|
|
119
|
+
error_str = error_str.lower()
|
|
120
|
+
|
|
121
|
+
if (
|
|
122
|
+
"api key" in error_str
|
|
123
|
+
or "unauthorized" in error_str
|
|
124
|
+
or "authentication" in error_str
|
|
125
|
+
or "invalid api key" in error_str
|
|
126
|
+
):
|
|
127
|
+
return "authentication"
|
|
128
|
+
elif "timeout" in error_str or "timed out" in error_str or "request timeout" in error_str:
|
|
129
|
+
return "timeout"
|
|
130
|
+
elif "rate limit" in error_str or "too many requests" in error_str or "rate limit exceeded" in error_str:
|
|
131
|
+
return "rate_limit"
|
|
132
|
+
elif "connect" in error_str or "network" in error_str or "network connection failed" in error_str:
|
|
133
|
+
return "connection"
|
|
134
|
+
elif "model" in error_str or "not found" in error_str or "model not found" in error_str:
|
|
135
|
+
return "model"
|
|
136
|
+
else:
|
|
137
|
+
return "unknown"
|
|
138
|
+
|
|
139
|
+
|
|
140
|
+
def generate_with_retries(
|
|
141
|
+
provider_funcs: dict,
|
|
142
|
+
model: str,
|
|
143
|
+
system_prompt: str,
|
|
144
|
+
user_prompt: str,
|
|
145
|
+
temperature: float,
|
|
146
|
+
max_tokens: int,
|
|
147
|
+
max_retries: int,
|
|
148
|
+
quiet: bool = False,
|
|
149
|
+
) -> str:
|
|
150
|
+
"""Generate content with retry logic using direct API calls."""
|
|
151
|
+
# Parse model string to determine provider and actual model
|
|
152
|
+
if ":" not in model:
|
|
153
|
+
raise AIError.model_error(f"Invalid model format. Expected 'provider:model', got '{model}'")
|
|
154
|
+
|
|
155
|
+
provider, model_name = model.split(":", 1)
|
|
156
|
+
|
|
157
|
+
# Validate provider
|
|
158
|
+
supported_providers = ["anthropic", "openai", "groq", "cerebras", "ollama", "openrouter"]
|
|
159
|
+
if provider not in supported_providers:
|
|
160
|
+
raise AIError.model_error(f"Unsupported provider: {provider}. Supported providers: {supported_providers}")
|
|
161
|
+
|
|
162
|
+
messages = [
|
|
163
|
+
{"role": "system", "content": system_prompt},
|
|
164
|
+
{"role": "user", "content": user_prompt},
|
|
165
|
+
]
|
|
166
|
+
|
|
167
|
+
# Set up spinner
|
|
168
|
+
if quiet:
|
|
169
|
+
spinner = None
|
|
170
|
+
else:
|
|
171
|
+
spinner = Halo(text=f"Generating commit message with {provider} {model_name}...", spinner="dots")
|
|
172
|
+
spinner.start()
|
|
173
|
+
|
|
174
|
+
last_exception = None
|
|
175
|
+
|
|
176
|
+
for attempt in range(max_retries):
|
|
177
|
+
try:
|
|
178
|
+
if not quiet and attempt > 0:
|
|
179
|
+
if spinner:
|
|
180
|
+
spinner.text = f"Retry {attempt + 1}/{max_retries} with {provider} {model_name}..."
|
|
181
|
+
logger.info(f"Retry attempt {attempt + 1}/{max_retries}")
|
|
182
|
+
|
|
183
|
+
# Call the appropriate provider function
|
|
184
|
+
provider_func = provider_funcs.get(provider)
|
|
185
|
+
if not provider_func:
|
|
186
|
+
raise AIError.model_error(f"Provider function not found for: {provider}")
|
|
187
|
+
|
|
188
|
+
content = provider_func(model=model_name, messages=messages, temperature=temperature, max_tokens=max_tokens)
|
|
189
|
+
|
|
190
|
+
if spinner:
|
|
191
|
+
spinner.succeed(f"Generated commit message with {provider} {model_name}")
|
|
192
|
+
|
|
193
|
+
if content:
|
|
194
|
+
return content.strip()
|
|
195
|
+
else:
|
|
196
|
+
raise AIError.model_error("Empty response from AI model")
|
|
197
|
+
|
|
198
|
+
except Exception as e:
|
|
199
|
+
last_exception = e
|
|
200
|
+
error_type = _classify_error(str(e))
|
|
201
|
+
|
|
202
|
+
if error_type in ["authentication", "model"]:
|
|
203
|
+
# Don't retry these errors
|
|
204
|
+
if spinner:
|
|
205
|
+
spinner.fail(f"Failed to generate commit message with {provider} {model_name}")
|
|
206
|
+
raise AIError.authentication_error(f"AI generation failed: {str(e)}") from e
|
|
207
|
+
|
|
208
|
+
if attempt < max_retries - 1:
|
|
209
|
+
# Exponential backoff
|
|
210
|
+
wait_time = 2**attempt
|
|
211
|
+
if not quiet:
|
|
212
|
+
logger.warning(f"AI generation failed (attempt {attempt + 1}), retrying in {wait_time}s: {str(e)}")
|
|
213
|
+
|
|
214
|
+
if spinner:
|
|
215
|
+
for i in range(wait_time, 0, -1):
|
|
216
|
+
spinner.text = f"Retry {attempt + 1}/{max_retries} in {i}s..."
|
|
217
|
+
time.sleep(1)
|
|
218
|
+
else:
|
|
219
|
+
time.sleep(wait_time)
|
|
220
|
+
else:
|
|
221
|
+
logger.error(f"AI generation failed after {max_retries} attempts: {str(e)}")
|
|
222
|
+
|
|
223
|
+
if spinner:
|
|
224
|
+
spinner.fail(f"Failed to generate commit message with {provider} {model_name}")
|
|
225
|
+
|
|
226
|
+
# If we get here, all retries failed
|
|
227
|
+
raise AIError.model_error(
|
|
228
|
+
f"AI generation failed after {max_retries} attempts: {str(last_exception)}"
|
|
229
|
+
) from last_exception
|
gac/init_cli.py
CHANGED
|
@@ -24,6 +24,7 @@ def init() -> None:
|
|
|
24
24
|
("Cerebras", "qwen-3-coder-480b"),
|
|
25
25
|
("Groq", "meta-llama/llama-4-maverick-17b-128e-instruct"),
|
|
26
26
|
("Ollama", "gemma3"),
|
|
27
|
+
("OpenRouter", "openrouter/auto"),
|
|
27
28
|
("OpenAI", "gpt-4.1-mini"),
|
|
28
29
|
]
|
|
29
30
|
provider_names = [p[0] for p in providers]
|
gac/main.py
CHANGED
|
@@ -10,7 +10,8 @@ import click
|
|
|
10
10
|
from rich.console import Console
|
|
11
11
|
from rich.panel import Panel
|
|
12
12
|
|
|
13
|
-
from gac.ai import
|
|
13
|
+
from gac.ai import generate_commit_message
|
|
14
|
+
from gac.ai_utils import count_tokens
|
|
14
15
|
from gac.config import load_config
|
|
15
16
|
from gac.constants import EnvDefaults, Utility
|
|
16
17
|
from gac.errors import AIError, GitError, handle_error
|
gac/preprocess.py
CHANGED
|
@@ -0,0 +1,17 @@
|
|
|
1
|
+
"""AI provider implementations for commit message generation."""
|
|
2
|
+
|
|
3
|
+
from .anthropic import call_anthropic_api
|
|
4
|
+
from .cerebras import call_cerebras_api
|
|
5
|
+
from .groq import call_groq_api
|
|
6
|
+
from .ollama import call_ollama_api
|
|
7
|
+
from .openai import call_openai_api
|
|
8
|
+
from .openrouter import call_openrouter_api
|
|
9
|
+
|
|
10
|
+
__all__ = [
|
|
11
|
+
"call_anthropic_api",
|
|
12
|
+
"call_cerebras_api",
|
|
13
|
+
"call_groq_api",
|
|
14
|
+
"call_ollama_api",
|
|
15
|
+
"call_openai_api",
|
|
16
|
+
"call_openrouter_api",
|
|
17
|
+
]
|
|
@@ -0,0 +1,42 @@
|
|
|
1
|
+
"""Anthropic AI provider implementation."""
|
|
2
|
+
|
|
3
|
+
import os
|
|
4
|
+
|
|
5
|
+
import httpx
|
|
6
|
+
|
|
7
|
+
from gac.errors import AIError
|
|
8
|
+
|
|
9
|
+
|
|
10
|
+
def call_anthropic_api(model: str, messages: list[dict], temperature: float, max_tokens: int) -> str:
|
|
11
|
+
"""Call Anthropic API directly."""
|
|
12
|
+
api_key = os.getenv("ANTHROPIC_API_KEY")
|
|
13
|
+
if not api_key:
|
|
14
|
+
raise AIError.model_error("ANTHROPIC_API_KEY not found in environment variables")
|
|
15
|
+
|
|
16
|
+
url = "https://api.anthropic.com/v1/messages"
|
|
17
|
+
headers = {"x-api-key": api_key, "anthropic-version": "2023-06-01", "content-type": "application/json"}
|
|
18
|
+
|
|
19
|
+
# Convert messages to Anthropic format
|
|
20
|
+
anthropic_messages = []
|
|
21
|
+
system_message = ""
|
|
22
|
+
|
|
23
|
+
for msg in messages:
|
|
24
|
+
if msg["role"] == "system":
|
|
25
|
+
system_message = msg["content"]
|
|
26
|
+
else:
|
|
27
|
+
anthropic_messages.append({"role": msg["role"], "content": msg["content"]})
|
|
28
|
+
|
|
29
|
+
data = {"model": model, "messages": anthropic_messages, "temperature": temperature, "max_tokens": max_tokens}
|
|
30
|
+
|
|
31
|
+
if system_message:
|
|
32
|
+
data["system"] = system_message
|
|
33
|
+
|
|
34
|
+
try:
|
|
35
|
+
response = httpx.post(url, headers=headers, json=data, timeout=120)
|
|
36
|
+
response.raise_for_status()
|
|
37
|
+
response_data = response.json()
|
|
38
|
+
return response_data["content"][0]["text"]
|
|
39
|
+
except httpx.HTTPStatusError as e:
|
|
40
|
+
raise AIError.model_error(f"Anthropic API error: {e.response.status_code} - {e.response.text}") from e
|
|
41
|
+
except Exception as e:
|
|
42
|
+
raise AIError.model_error(f"Error calling Anthropic API: {str(e)}") from e
|
|
@@ -0,0 +1,29 @@
|
|
|
1
|
+
"""Cerebras AI provider implementation."""
|
|
2
|
+
|
|
3
|
+
import os
|
|
4
|
+
|
|
5
|
+
import httpx
|
|
6
|
+
|
|
7
|
+
from gac.errors import AIError
|
|
8
|
+
|
|
9
|
+
|
|
10
|
+
def call_cerebras_api(model: str, messages: list[dict], temperature: float, max_tokens: int) -> str:
|
|
11
|
+
"""Call Cerebras API directly."""
|
|
12
|
+
api_key = os.getenv("CEREBRAS_API_KEY")
|
|
13
|
+
if not api_key:
|
|
14
|
+
raise AIError.model_error("CEREBRAS_API_KEY not found in environment variables")
|
|
15
|
+
|
|
16
|
+
url = "https://api.cerebras.ai/v1/chat/completions"
|
|
17
|
+
headers = {"Authorization": f"Bearer {api_key}", "Content-Type": "application/json"}
|
|
18
|
+
|
|
19
|
+
data = {"model": model, "messages": messages, "temperature": temperature, "max_tokens": max_tokens}
|
|
20
|
+
|
|
21
|
+
try:
|
|
22
|
+
response = httpx.post(url, headers=headers, json=data, timeout=120)
|
|
23
|
+
response.raise_for_status()
|
|
24
|
+
response_data = response.json()
|
|
25
|
+
return response_data["choices"][0]["message"]["content"]
|
|
26
|
+
except httpx.HTTPStatusError as e:
|
|
27
|
+
raise AIError.model_error(f"Cerebras API error: {e.response.status_code} - {e.response.text}") from e
|
|
28
|
+
except Exception as e:
|
|
29
|
+
raise AIError.model_error(f"Error calling Cerebras API: {str(e)}") from e
|
gac/providers/groq.py
ADDED
|
@@ -0,0 +1,51 @@
|
|
|
1
|
+
"""Groq API provider for gac."""
|
|
2
|
+
|
|
3
|
+
import logging
|
|
4
|
+
import os
|
|
5
|
+
|
|
6
|
+
import httpx
|
|
7
|
+
|
|
8
|
+
from gac.errors import AIError
|
|
9
|
+
|
|
10
|
+
logger = logging.getLogger(__name__)
|
|
11
|
+
|
|
12
|
+
|
|
13
|
+
def call_groq_api(model: str, messages: list[dict], temperature: float, max_tokens: int) -> str:
|
|
14
|
+
"""Call Groq API directly."""
|
|
15
|
+
api_key = os.getenv("GROQ_API_KEY")
|
|
16
|
+
if not api_key:
|
|
17
|
+
raise AIError.model_error("GROQ_API_KEY not found in environment variables")
|
|
18
|
+
|
|
19
|
+
url = "https://api.groq.com/openai/v1/chat/completions"
|
|
20
|
+
headers = {"Authorization": f"Bearer {api_key}", "Content-Type": "application/json"}
|
|
21
|
+
|
|
22
|
+
data = {"model": model, "messages": messages, "temperature": temperature, "max_tokens": max_tokens}
|
|
23
|
+
|
|
24
|
+
try:
|
|
25
|
+
response = httpx.post(url, headers=headers, json=data, timeout=120)
|
|
26
|
+
response.raise_for_status()
|
|
27
|
+
response_data = response.json()
|
|
28
|
+
|
|
29
|
+
# Debug logging to understand response structure
|
|
30
|
+
logger.debug(f"Groq API response: {response_data}")
|
|
31
|
+
|
|
32
|
+
# Handle different response formats
|
|
33
|
+
if "choices" in response_data and len(response_data["choices"]) > 0:
|
|
34
|
+
choice = response_data["choices"][0]
|
|
35
|
+
if "message" in choice and "content" in choice["message"]:
|
|
36
|
+
content = choice["message"]["content"]
|
|
37
|
+
logger.debug(f"Found content in message.content: {content}")
|
|
38
|
+
return content
|
|
39
|
+
elif "text" in choice:
|
|
40
|
+
content = choice["text"]
|
|
41
|
+
logger.debug(f"Found content in choice.text: {content}")
|
|
42
|
+
return content
|
|
43
|
+
else:
|
|
44
|
+
logger.debug(f"Choice structure: {choice}")
|
|
45
|
+
|
|
46
|
+
# If we can't find content in the expected places, raise an error
|
|
47
|
+
raise AIError.model_error(f"Unexpected response format from Groq API: {response_data}")
|
|
48
|
+
except httpx.HTTPStatusError as e:
|
|
49
|
+
raise AIError.model_error(f"Groq API error: {e.response.status_code} - {e.response.text}") from e
|
|
50
|
+
except Exception as e:
|
|
51
|
+
raise AIError.model_error(f"Error calling Groq API: {str(e)}") from e
|
gac/providers/ollama.py
ADDED
|
@@ -0,0 +1,35 @@
|
|
|
1
|
+
"""Ollama AI provider implementation."""
|
|
2
|
+
|
|
3
|
+
import os
|
|
4
|
+
|
|
5
|
+
import httpx
|
|
6
|
+
|
|
7
|
+
from gac.errors import AIError
|
|
8
|
+
|
|
9
|
+
|
|
10
|
+
def call_ollama_api(model: str, messages: list[dict], temperature: float, max_tokens: int) -> str:
|
|
11
|
+
"""Call Ollama API directly."""
|
|
12
|
+
api_url = os.getenv("OLLAMA_API_URL", "http://localhost:11434")
|
|
13
|
+
|
|
14
|
+
url = f"{api_url.rstrip('/')}/api/chat"
|
|
15
|
+
data = {"model": model, "messages": messages, "temperature": temperature, "stream": False}
|
|
16
|
+
|
|
17
|
+
try:
|
|
18
|
+
response = httpx.post(url, json=data, timeout=120)
|
|
19
|
+
response.raise_for_status()
|
|
20
|
+
response_data = response.json()
|
|
21
|
+
|
|
22
|
+
# Handle different response formats from Ollama
|
|
23
|
+
if "message" in response_data and "content" in response_data["message"]:
|
|
24
|
+
return response_data["message"]["content"]
|
|
25
|
+
elif "response" in response_data:
|
|
26
|
+
return response_data["response"]
|
|
27
|
+
else:
|
|
28
|
+
# Fallback: return the full response as string
|
|
29
|
+
return str(response_data)
|
|
30
|
+
except httpx.ConnectError as e:
|
|
31
|
+
raise AIError.connection_error(f"Ollama connection failed. Make sure Ollama is running: {str(e)}") from e
|
|
32
|
+
except httpx.HTTPStatusError as e:
|
|
33
|
+
raise AIError.model_error(f"Ollama API error: {e.response.status_code} - {e.response.text}") from e
|
|
34
|
+
except Exception as e:
|
|
35
|
+
raise AIError.model_error(f"Error calling Ollama API: {str(e)}") from e
|
gac/providers/openai.py
ADDED
|
@@ -0,0 +1,29 @@
|
|
|
1
|
+
"""OpenAI API provider for gac."""
|
|
2
|
+
|
|
3
|
+
import os
|
|
4
|
+
|
|
5
|
+
import httpx
|
|
6
|
+
|
|
7
|
+
from gac.errors import AIError
|
|
8
|
+
|
|
9
|
+
|
|
10
|
+
def call_openai_api(model: str, messages: list[dict], temperature: float, max_tokens: int) -> str:
|
|
11
|
+
"""Call OpenAI API directly."""
|
|
12
|
+
api_key = os.getenv("OPENAI_API_KEY")
|
|
13
|
+
if not api_key:
|
|
14
|
+
raise AIError.model_error("OPENAI_API_KEY not found in environment variables")
|
|
15
|
+
|
|
16
|
+
url = "https://api.openai.com/v1/chat/completions"
|
|
17
|
+
headers = {"Authorization": f"Bearer {api_key}", "Content-Type": "application/json"}
|
|
18
|
+
|
|
19
|
+
data = {"model": model, "messages": messages, "temperature": temperature, "max_tokens": max_tokens}
|
|
20
|
+
|
|
21
|
+
try:
|
|
22
|
+
response = httpx.post(url, headers=headers, json=data, timeout=120)
|
|
23
|
+
response.raise_for_status()
|
|
24
|
+
response_data = response.json()
|
|
25
|
+
return response_data["choices"][0]["message"]["content"]
|
|
26
|
+
except httpx.HTTPStatusError as e:
|
|
27
|
+
raise AIError.model_error(f"OpenAI API error: {e.response.status_code} - {e.response.text}") from e
|
|
28
|
+
except Exception as e:
|
|
29
|
+
raise AIError.model_error(f"Error calling OpenAI API: {str(e)}") from e
|
|
@@ -0,0 +1,46 @@
|
|
|
1
|
+
"""OpenRouter API provider for gac."""
|
|
2
|
+
|
|
3
|
+
import os
|
|
4
|
+
|
|
5
|
+
import httpx
|
|
6
|
+
|
|
7
|
+
from gac.errors import AIError
|
|
8
|
+
|
|
9
|
+
|
|
10
|
+
def call_openrouter_api(model: str, messages: list[dict], temperature: float, max_tokens: int) -> str:
|
|
11
|
+
"""Call OpenRouter API directly."""
|
|
12
|
+
api_key = os.getenv("OPENROUTER_API_KEY")
|
|
13
|
+
if not api_key:
|
|
14
|
+
raise AIError.model_error("OPENROUTER_API_KEY environment variable not set")
|
|
15
|
+
|
|
16
|
+
url = "https://openrouter.ai/api/v1/chat/completions"
|
|
17
|
+
headers = {
|
|
18
|
+
"Content-Type": "application/json",
|
|
19
|
+
"Authorization": f"Bearer {api_key}",
|
|
20
|
+
}
|
|
21
|
+
|
|
22
|
+
# Add optional headers if environment variables are set
|
|
23
|
+
site_url = os.getenv("OPENROUTER_SITE_URL")
|
|
24
|
+
if site_url:
|
|
25
|
+
headers["HTTP-Referer"] = site_url
|
|
26
|
+
|
|
27
|
+
site_name = os.getenv("OPENROUTER_SITE_NAME")
|
|
28
|
+
if site_name:
|
|
29
|
+
headers["X-Title"] = site_name
|
|
30
|
+
|
|
31
|
+
data = {
|
|
32
|
+
"model": model,
|
|
33
|
+
"messages": messages,
|
|
34
|
+
"temperature": temperature,
|
|
35
|
+
"max_tokens": max_tokens,
|
|
36
|
+
}
|
|
37
|
+
|
|
38
|
+
try:
|
|
39
|
+
response = httpx.post(url, headers=headers, json=data, timeout=120)
|
|
40
|
+
response.raise_for_status()
|
|
41
|
+
response_data = response.json()
|
|
42
|
+
return response_data["choices"][0]["message"]["content"]
|
|
43
|
+
except httpx.HTTPStatusError as e:
|
|
44
|
+
raise AIError.model_error(f"OpenRouter API error: {e.response.status_code} - {e.response.text}") from e
|
|
45
|
+
except Exception as e:
|
|
46
|
+
raise AIError.model_error(f"Error calling OpenRouter API: {str(e)}") from e
|
|
@@ -1,6 +1,6 @@
|
|
|
1
1
|
Metadata-Version: 2.4
|
|
2
2
|
Name: gac
|
|
3
|
-
Version: 1.0
|
|
3
|
+
Version: 1.2.0
|
|
4
4
|
Summary: AI-powered Git commit message generator with multi-provider support
|
|
5
5
|
Project-URL: Homepage, https://github.com/cellwebb/gac
|
|
6
6
|
Project-URL: Documentation, https://github.com/cellwebb/gac#readme
|
|
@@ -56,7 +56,7 @@ Description-Content-Type: text/markdown
|
|
|
56
56
|
|
|
57
57
|
- **LLM-Powered Commit Messages:** Automatically generates clear, concise, and context-aware commit messages using large language models.
|
|
58
58
|
- **Deep Contextual Analysis:** Understands your code by analyzing staged changes, repository structure, and recent commit history to provide highly relevant suggestions.
|
|
59
|
-
- **Multi-Provider & Model Support:** Flexibly works with various leading AI providers (like Anthropic, Cerebras, Groq, OpenAI) and models, easily configured through an interactive setup or environment variables.
|
|
59
|
+
- **Multi-Provider & Model Support:** Flexibly works with various leading AI providers (like Anthropic, Cerebras, Groq, OpenRouter, OpenAI) and models, easily configured through an interactive setup or environment variables.
|
|
60
60
|
- **Seamless Git Workflow:** Integrates smoothly into your existing Git routine as a simple drop-in replacement for `git commit`.
|
|
61
61
|
- **Extensive Customization:** Tailor commit messages to your needs with a rich set of flags, including one-liners (`-o`), AI hints (`-h`), scope inference (`-s`), and specific model selection (`-m`).
|
|
62
62
|
- **Streamlined Workflow Commands:** Boost your productivity with convenient options to stage all changes (`-a`), auto-confirm commits (`-y`), and push to your remote repository (`-p`) in a single step.
|
|
@@ -136,6 +136,11 @@ Example `$HOME/.gac.env` output:
|
|
|
136
136
|
```env
|
|
137
137
|
GAC_MODEL=anthropic:claude-3-5-haiku-latest
|
|
138
138
|
ANTHROPIC_API_KEY=your_anthropic_key_here
|
|
139
|
+
# Optional: configure OpenRouter
|
|
140
|
+
# GAC_MODEL=openrouter:openrouter/auto
|
|
141
|
+
# OPENROUTER_API_KEY=your_openrouter_key_here
|
|
142
|
+
# OPENROUTER_SITE_URL=https://example.com
|
|
143
|
+
# OPENROUTER_SITE_NAME=Example App
|
|
139
144
|
```
|
|
140
145
|
|
|
141
146
|
Alternatively, you can configure `gac` using environment variables or by manually creating/editing the configuration file.
|
|
@@ -0,0 +1,28 @@
|
|
|
1
|
+
gac/__init__.py,sha256=HFWgSVNbTAFhgetCRWI1WrtyE7zC7IHvoBOrfDGUurM,989
|
|
2
|
+
gac/__version__.py,sha256=_cfu33DyI3jZln3bUKxhnDn1AeCj66_yLEqy9IJASqs,66
|
|
3
|
+
gac/ai.py,sha256=RcZKXiyx9Wll2e-dRx0jNzQPzojVYE7OaSEDclE2MKc,2979
|
|
4
|
+
gac/ai_utils.py,sha256=p63lxYp4AqzBgbYkYyNUKLecIszTVvBp4Wkm2hKlMlc,7854
|
|
5
|
+
gac/cli.py,sha256=eQS8S7v6p0CfN9wtr239ujYGTi9rKl-KV7STX2U-C3w,4581
|
|
6
|
+
gac/config.py,sha256=wSgEDjtis7Vk1pv5VPvYmJyD9-tymDS6GiUHjnCMbIM,1486
|
|
7
|
+
gac/config_cli.py,sha256=v9nFHZO1RvK9fzHyuUS6SG-BCLHMsdOMDwWamBhVVh4,1608
|
|
8
|
+
gac/constants.py,sha256=MAxdASGncfZY1TdKGdhJZ0wvTBEU3gTN6KEdw8n3Bd8,4844
|
|
9
|
+
gac/diff_cli.py,sha256=wnVQ9OFGnM0d2Pj9WVjWbo0jxqIuRHVAwmb8wU9Pa3E,5676
|
|
10
|
+
gac/errors.py,sha256=3vIRMQ2QF3sP9_rPfXAFuu5ZSjIVX4FxM-FAuiR8N-8,7416
|
|
11
|
+
gac/git.py,sha256=MS2m4fv8h4mau1djFG1aje9NXTmkGsjPO9w18LqNGX0,6031
|
|
12
|
+
gac/init_cli.py,sha256=e4z9-4NhoUn2DUyApIru8JR-W7HuNq2VeeXkR9aXHLo,1868
|
|
13
|
+
gac/main.py,sha256=POay7l6ihm3oF9ajGWx9cA40Pu-NVz5x_OzQOYPDoX8,12011
|
|
14
|
+
gac/preprocess.py,sha256=krrLPHsccYMdn_YAtUrppBJIoRgevxGWusDwhE40LEo,15366
|
|
15
|
+
gac/prompt.py,sha256=_fv24XU3DZE_S72vcdUYnNkmy-_KXnr1Vlc-9okop7E,17263
|
|
16
|
+
gac/utils.py,sha256=W3ladtmsH01MNLdckQYTzYrYbTGEdzCKI36he9C-y_E,3945
|
|
17
|
+
gac/providers/__init__.py,sha256=iGwZmV-cFqL3AeFE0vc6KpHwm-RLWcVSU17c7IvJg2s,456
|
|
18
|
+
gac/providers/anthropic.py,sha256=esf6pq6nRdqD0mpKz_IQNXmXe5WnkoSA2b1isrrRB4o,1514
|
|
19
|
+
gac/providers/cerebras.py,sha256=eE9lAjEzrATIo941vv97I2DSmpnXYBCJ9HkVIb-6Whg,1130
|
|
20
|
+
gac/providers/groq.py,sha256=EPivjTg3TUqynBofnatlIxKzFTpLPP4psVb562Dsx5o,2040
|
|
21
|
+
gac/providers/ollama.py,sha256=Bp94DvortQssDhekuNdJ7fKLeWpWASYXSssJNCuGszg,1383
|
|
22
|
+
gac/providers/openai.py,sha256=1l-Wu7ETXXaJ7cNB3OD5ivf4_72iIEP9bPFMQst8JWI,1109
|
|
23
|
+
gac/providers/openrouter.py,sha256=Vs0MXfv9KCldfEUD2roTwcXqs89tgE3ndNqRKoqdJQs,1473
|
|
24
|
+
gac-1.2.0.dist-info/METADATA,sha256=fbK0j24cpsMWVJXnybrXyQ6MzMS791bpdIKu2vvG7-c,8558
|
|
25
|
+
gac-1.2.0.dist-info/WHEEL,sha256=qtCwoSJWgHk21S1Kb4ihdzI2rlJ1ZKaIurTj_ngOhyQ,87
|
|
26
|
+
gac-1.2.0.dist-info/entry_points.txt,sha256=tdjN-XMmcWfL92swuRAjT62bFLOAwk9bTMRLGP5Z4aI,36
|
|
27
|
+
gac-1.2.0.dist-info/licenses/LICENSE,sha256=vOab37NouL1PNs5BswnPayrMCqaN2sqLfMQfqPDrpZg,1103
|
|
28
|
+
gac-1.2.0.dist-info/RECORD,,
|
gac/ai_providers.py
DELETED
|
@@ -1,404 +0,0 @@
|
|
|
1
|
-
"""Direct HTTP API calls to AI providers using httpx.
|
|
2
|
-
|
|
3
|
-
This module provides functions for making direct HTTP API calls to various AI providers.
|
|
4
|
-
Each provider has its own function to generate commit messages using only httpx.
|
|
5
|
-
"""
|
|
6
|
-
|
|
7
|
-
import logging
|
|
8
|
-
import os
|
|
9
|
-
import time
|
|
10
|
-
|
|
11
|
-
import httpx
|
|
12
|
-
from halo import Halo
|
|
13
|
-
|
|
14
|
-
from gac.constants import EnvDefaults
|
|
15
|
-
from gac.errors import AIError
|
|
16
|
-
|
|
17
|
-
logger = logging.getLogger(__name__)
|
|
18
|
-
|
|
19
|
-
|
|
20
|
-
def _classify_error(error_str: str) -> str:
|
|
21
|
-
"""Classify error types based on error message content."""
|
|
22
|
-
error_str = error_str.lower()
|
|
23
|
-
|
|
24
|
-
if (
|
|
25
|
-
"api key" in error_str
|
|
26
|
-
or "unauthorized" in error_str
|
|
27
|
-
or "authentication" in error_str
|
|
28
|
-
or "invalid api key" in error_str
|
|
29
|
-
):
|
|
30
|
-
return "authentication"
|
|
31
|
-
elif "timeout" in error_str or "timed out" in error_str or "request timeout" in error_str:
|
|
32
|
-
return "timeout"
|
|
33
|
-
elif "rate limit" in error_str or "too many requests" in error_str or "rate limit exceeded" in error_str:
|
|
34
|
-
return "rate_limit"
|
|
35
|
-
elif "connect" in error_str or "network" in error_str or "network connection failed" in error_str:
|
|
36
|
-
return "connection"
|
|
37
|
-
elif "model" in error_str or "not found" in error_str or "model not found" in error_str:
|
|
38
|
-
return "model"
|
|
39
|
-
else:
|
|
40
|
-
return "unknown"
|
|
41
|
-
|
|
42
|
-
|
|
43
|
-
def anthropic_generate(
|
|
44
|
-
model: str,
|
|
45
|
-
prompt: str | tuple[str, str],
|
|
46
|
-
temperature: float = EnvDefaults.TEMPERATURE,
|
|
47
|
-
max_tokens: int = EnvDefaults.MAX_OUTPUT_TOKENS,
|
|
48
|
-
max_retries: int = EnvDefaults.MAX_RETRIES,
|
|
49
|
-
quiet: bool = False,
|
|
50
|
-
) -> str:
|
|
51
|
-
"""Generate commit message using Anthropic API with retry logic.
|
|
52
|
-
|
|
53
|
-
Args:
|
|
54
|
-
model: The model name (e.g., 'claude-3-5-haiku-latest', 'claude-3-opus-latest')
|
|
55
|
-
prompt: Either a string prompt or tuple of (system_prompt, user_prompt)
|
|
56
|
-
temperature: Controls randomness (0.0-1.0)
|
|
57
|
-
max_tokens: Maximum tokens in the response
|
|
58
|
-
max_retries: Number of retry attempts if generation fails
|
|
59
|
-
quiet: If True, suppress progress indicators
|
|
60
|
-
|
|
61
|
-
Returns:
|
|
62
|
-
A formatted commit message string
|
|
63
|
-
|
|
64
|
-
Raises:
|
|
65
|
-
AIError: If generation fails after max_retries attempts
|
|
66
|
-
"""
|
|
67
|
-
api_key = os.getenv("ANTHROPIC_API_KEY")
|
|
68
|
-
if not api_key:
|
|
69
|
-
raise AIError.model_error("ANTHROPIC_API_KEY environment variable not set")
|
|
70
|
-
|
|
71
|
-
# Handle both old (string) and new (tuple) prompt formats
|
|
72
|
-
if isinstance(prompt, tuple):
|
|
73
|
-
system_prompt, user_prompt = prompt
|
|
74
|
-
messages = [{"role": "user", "content": user_prompt}]
|
|
75
|
-
payload = {
|
|
76
|
-
"model": model,
|
|
77
|
-
"messages": messages,
|
|
78
|
-
"system": system_prompt,
|
|
79
|
-
"temperature": temperature,
|
|
80
|
-
"max_tokens": max_tokens,
|
|
81
|
-
}
|
|
82
|
-
else:
|
|
83
|
-
# Backward compatibility: treat string as user prompt
|
|
84
|
-
messages = [{"role": "user", "content": prompt}]
|
|
85
|
-
payload = {
|
|
86
|
-
"model": model,
|
|
87
|
-
"messages": messages,
|
|
88
|
-
"temperature": temperature,
|
|
89
|
-
"max_tokens": max_tokens,
|
|
90
|
-
}
|
|
91
|
-
|
|
92
|
-
headers = {
|
|
93
|
-
"Content-Type": "application/json",
|
|
94
|
-
"x-api-key": api_key,
|
|
95
|
-
"anthropic-version": "2023-06-01",
|
|
96
|
-
}
|
|
97
|
-
|
|
98
|
-
return _make_request_with_retry(
|
|
99
|
-
url="https://api.anthropic.com/v1/messages",
|
|
100
|
-
headers=headers,
|
|
101
|
-
payload=payload,
|
|
102
|
-
provider_name=f"Anthropic {model}",
|
|
103
|
-
max_retries=max_retries,
|
|
104
|
-
quiet=quiet,
|
|
105
|
-
response_parser=lambda r: r["content"][0]["text"],
|
|
106
|
-
)
|
|
107
|
-
|
|
108
|
-
|
|
109
|
-
def cerebras_generate(
|
|
110
|
-
model: str,
|
|
111
|
-
prompt: str | tuple[str, str],
|
|
112
|
-
temperature: float = EnvDefaults.TEMPERATURE,
|
|
113
|
-
max_tokens: int = EnvDefaults.MAX_OUTPUT_TOKENS,
|
|
114
|
-
max_retries: int = EnvDefaults.MAX_RETRIES,
|
|
115
|
-
quiet: bool = False,
|
|
116
|
-
) -> str:
|
|
117
|
-
"""Generate commit message using Cerebras API with retry logic.
|
|
118
|
-
|
|
119
|
-
Args:
|
|
120
|
-
model: The model name (e.g., 'llama3.1-8b', 'llama3.1-70b')
|
|
121
|
-
prompt: Either a string prompt or tuple of (system_prompt, user_prompt)
|
|
122
|
-
temperature: Controls randomness (0.0-1.0)
|
|
123
|
-
max_tokens: Maximum tokens in the response
|
|
124
|
-
max_retries: Number of retry attempts if generation fails
|
|
125
|
-
quiet: If True, suppress progress indicators
|
|
126
|
-
|
|
127
|
-
Returns:
|
|
128
|
-
A formatted commit message string
|
|
129
|
-
|
|
130
|
-
Raises:
|
|
131
|
-
AIError: If generation fails after max_retries attempts
|
|
132
|
-
"""
|
|
133
|
-
api_key = os.getenv("CEREBRAS_API_KEY")
|
|
134
|
-
if not api_key:
|
|
135
|
-
raise AIError.model_error("CEREBRAS_API_KEY environment variable not set")
|
|
136
|
-
|
|
137
|
-
# Handle both old (string) and new (tuple) prompt formats
|
|
138
|
-
if isinstance(prompt, tuple):
|
|
139
|
-
system_prompt, user_prompt = prompt
|
|
140
|
-
messages = [{"role": "system", "content": system_prompt}, {"role": "user", "content": user_prompt}]
|
|
141
|
-
else:
|
|
142
|
-
# Backward compatibility: treat string as user prompt
|
|
143
|
-
messages = [{"role": "user", "content": prompt}]
|
|
144
|
-
|
|
145
|
-
payload = {
|
|
146
|
-
"model": model,
|
|
147
|
-
"messages": messages,
|
|
148
|
-
"temperature": temperature,
|
|
149
|
-
"max_tokens": max_tokens,
|
|
150
|
-
}
|
|
151
|
-
|
|
152
|
-
headers = {
|
|
153
|
-
"Content-Type": "application/json",
|
|
154
|
-
"Authorization": f"Bearer {api_key}",
|
|
155
|
-
}
|
|
156
|
-
|
|
157
|
-
return _make_request_with_retry(
|
|
158
|
-
url="https://api.cerebras.ai/v1/chat/completions",
|
|
159
|
-
headers=headers,
|
|
160
|
-
payload=payload,
|
|
161
|
-
provider_name=f"Cerebras {model}",
|
|
162
|
-
max_retries=max_retries,
|
|
163
|
-
quiet=quiet,
|
|
164
|
-
response_parser=lambda r: r["choices"][0]["message"]["content"],
|
|
165
|
-
)
|
|
166
|
-
|
|
167
|
-
|
|
168
|
-
def groq_generate(
|
|
169
|
-
model: str,
|
|
170
|
-
prompt: str | tuple[str, str],
|
|
171
|
-
temperature: float = EnvDefaults.TEMPERATURE,
|
|
172
|
-
max_tokens: int = EnvDefaults.MAX_OUTPUT_TOKENS,
|
|
173
|
-
max_retries: int = EnvDefaults.MAX_RETRIES,
|
|
174
|
-
quiet: bool = False,
|
|
175
|
-
) -> str:
|
|
176
|
-
"""Generate commit message using Groq API with retry logic.
|
|
177
|
-
|
|
178
|
-
Args:
|
|
179
|
-
model: The model name (e.g., 'llama3-8b-8192', 'llama3-70b-8192')
|
|
180
|
-
prompt: Either a string prompt or tuple of (system_prompt, user_prompt)
|
|
181
|
-
temperature: Controls randomness (0.0-1.0)
|
|
182
|
-
max_tokens: Maximum tokens in the response
|
|
183
|
-
max_retries: Number of retry attempts if generation fails
|
|
184
|
-
quiet: If True, suppress progress indicators
|
|
185
|
-
|
|
186
|
-
Returns:
|
|
187
|
-
A formatted commit message string
|
|
188
|
-
|
|
189
|
-
Raises:
|
|
190
|
-
AIError: If generation fails after max_retries attempts
|
|
191
|
-
"""
|
|
192
|
-
api_key = os.getenv("GROQ_API_KEY")
|
|
193
|
-
if not api_key:
|
|
194
|
-
raise AIError.model_error("GROQ_API_KEY environment variable not set")
|
|
195
|
-
|
|
196
|
-
# Handle both old (string) and new (tuple) prompt formats
|
|
197
|
-
if isinstance(prompt, tuple):
|
|
198
|
-
system_prompt, user_prompt = prompt
|
|
199
|
-
messages = [{"role": "system", "content": system_prompt}, {"role": "user", "content": user_prompt}]
|
|
200
|
-
else:
|
|
201
|
-
# Backward compatibility: treat string as user prompt
|
|
202
|
-
messages = [{"role": "user", "content": prompt}]
|
|
203
|
-
|
|
204
|
-
payload = {
|
|
205
|
-
"model": model,
|
|
206
|
-
"messages": messages,
|
|
207
|
-
"temperature": temperature,
|
|
208
|
-
"max_tokens": max_tokens,
|
|
209
|
-
}
|
|
210
|
-
|
|
211
|
-
headers = {
|
|
212
|
-
"Content-Type": "application/json",
|
|
213
|
-
"Authorization": f"Bearer {api_key}",
|
|
214
|
-
}
|
|
215
|
-
|
|
216
|
-
return _make_request_with_retry(
|
|
217
|
-
url="https://api.groq.com/openai/v1/chat/completions",
|
|
218
|
-
headers=headers,
|
|
219
|
-
payload=payload,
|
|
220
|
-
provider_name=f"Groq {model}",
|
|
221
|
-
max_retries=max_retries,
|
|
222
|
-
quiet=quiet,
|
|
223
|
-
response_parser=lambda r: r["choices"][0]["message"]["content"],
|
|
224
|
-
)
|
|
225
|
-
|
|
226
|
-
|
|
227
|
-
def ollama_generate(
|
|
228
|
-
model: str,
|
|
229
|
-
prompt: str | tuple[str, str],
|
|
230
|
-
temperature: float = EnvDefaults.TEMPERATURE,
|
|
231
|
-
max_tokens: int = EnvDefaults.MAX_OUTPUT_TOKENS,
|
|
232
|
-
max_retries: int = EnvDefaults.MAX_RETRIES,
|
|
233
|
-
quiet: bool = False,
|
|
234
|
-
) -> str:
|
|
235
|
-
"""Generate commit message using Ollama API with retry logic.
|
|
236
|
-
|
|
237
|
-
Args:
|
|
238
|
-
model: The model name (e.g., 'llama3', 'mistral')
|
|
239
|
-
prompt: Either a string prompt or tuple of (system_prompt, user_prompt)
|
|
240
|
-
temperature: Controls randomness (0.0-1.0)
|
|
241
|
-
max_tokens: Maximum tokens in the response (note: Ollama uses 'num_predict')
|
|
242
|
-
max_retries: Number of retry attempts if generation fails
|
|
243
|
-
quiet: If True, suppress progress indicators
|
|
244
|
-
|
|
245
|
-
Returns:
|
|
246
|
-
A formatted commit message string
|
|
247
|
-
|
|
248
|
-
Raises:
|
|
249
|
-
AIError: If generation fails after max_retries attempts
|
|
250
|
-
"""
|
|
251
|
-
# Handle both old (string) and new (tuple) prompt formats
|
|
252
|
-
if isinstance(prompt, tuple):
|
|
253
|
-
system_prompt, user_prompt = prompt
|
|
254
|
-
messages = [{"role": "system", "content": system_prompt}, {"role": "user", "content": user_prompt}]
|
|
255
|
-
else:
|
|
256
|
-
# Backward compatibility: treat string as user prompt
|
|
257
|
-
messages = [{"role": "user", "content": prompt}]
|
|
258
|
-
|
|
259
|
-
payload = {
|
|
260
|
-
"model": model,
|
|
261
|
-
"messages": messages,
|
|
262
|
-
"stream": False,
|
|
263
|
-
"options": {
|
|
264
|
-
"temperature": temperature,
|
|
265
|
-
"num_predict": max_tokens,
|
|
266
|
-
},
|
|
267
|
-
}
|
|
268
|
-
|
|
269
|
-
headers = {
|
|
270
|
-
"Content-Type": "application/json",
|
|
271
|
-
}
|
|
272
|
-
|
|
273
|
-
# Ollama typically runs locally on port 11434
|
|
274
|
-
ollama_url = os.getenv("OLLAMA_URL", "http://localhost:11434")
|
|
275
|
-
|
|
276
|
-
return _make_request_with_retry(
|
|
277
|
-
url=f"{ollama_url}/api/chat",
|
|
278
|
-
headers=headers,
|
|
279
|
-
payload=payload,
|
|
280
|
-
provider_name=f"Ollama {model}",
|
|
281
|
-
max_retries=max_retries,
|
|
282
|
-
quiet=quiet,
|
|
283
|
-
response_parser=lambda r: r["message"]["content"],
|
|
284
|
-
)
|
|
285
|
-
|
|
286
|
-
|
|
287
|
-
def openai_generate(
|
|
288
|
-
model: str,
|
|
289
|
-
prompt: str | tuple[str, str],
|
|
290
|
-
temperature: float = EnvDefaults.TEMPERATURE,
|
|
291
|
-
max_tokens: int = EnvDefaults.MAX_OUTPUT_TOKENS,
|
|
292
|
-
max_retries: int = EnvDefaults.MAX_RETRIES,
|
|
293
|
-
quiet: bool = False,
|
|
294
|
-
) -> str:
|
|
295
|
-
"""Generate commit message using OpenAI API with retry logic.
|
|
296
|
-
|
|
297
|
-
Args:
|
|
298
|
-
model: The model name (e.g., 'gpt-4', 'gpt-3.5-turbo')
|
|
299
|
-
prompt: Either a string prompt or tuple of (system_prompt, user_prompt)
|
|
300
|
-
temperature: Controls randomness (0.0-1.0)
|
|
301
|
-
max_tokens: Maximum tokens in the response
|
|
302
|
-
max_retries: Number of retry attempts if generation fails
|
|
303
|
-
quiet: If True, suppress progress indicators
|
|
304
|
-
|
|
305
|
-
Returns:
|
|
306
|
-
A formatted commit message string
|
|
307
|
-
|
|
308
|
-
Raises:
|
|
309
|
-
AIError: If generation fails after max_retries attempts
|
|
310
|
-
"""
|
|
311
|
-
api_key = os.getenv("OPENAI_API_KEY")
|
|
312
|
-
if not api_key:
|
|
313
|
-
raise AIError.model_error("OPENAI_API_KEY environment variable not set")
|
|
314
|
-
|
|
315
|
-
# Handle both old (string) and new (tuple) prompt formats
|
|
316
|
-
if isinstance(prompt, tuple):
|
|
317
|
-
system_prompt, user_prompt = prompt
|
|
318
|
-
messages = [{"role": "system", "content": system_prompt}, {"role": "user", "content": user_prompt}]
|
|
319
|
-
else:
|
|
320
|
-
# Backward compatibility: treat string as user prompt
|
|
321
|
-
messages = [{"role": "user", "content": prompt}]
|
|
322
|
-
|
|
323
|
-
payload = {
|
|
324
|
-
"model": model,
|
|
325
|
-
"messages": messages,
|
|
326
|
-
"temperature": temperature,
|
|
327
|
-
"max_tokens": max_tokens,
|
|
328
|
-
}
|
|
329
|
-
|
|
330
|
-
headers = {
|
|
331
|
-
"Content-Type": "application/json",
|
|
332
|
-
"Authorization": f"Bearer {api_key}",
|
|
333
|
-
}
|
|
334
|
-
|
|
335
|
-
return _make_request_with_retry(
|
|
336
|
-
url="https://api.openai.com/v1/chat/completions",
|
|
337
|
-
headers=headers,
|
|
338
|
-
payload=payload,
|
|
339
|
-
provider_name=f"OpenAI {model}",
|
|
340
|
-
max_retries=max_retries,
|
|
341
|
-
quiet=quiet,
|
|
342
|
-
response_parser=lambda r: r["choices"][0]["message"]["content"],
|
|
343
|
-
)
|
|
344
|
-
|
|
345
|
-
|
|
346
|
-
def _make_request_with_retry(
|
|
347
|
-
url: str,
|
|
348
|
-
headers: dict,
|
|
349
|
-
payload: dict,
|
|
350
|
-
provider_name: str,
|
|
351
|
-
max_retries: int,
|
|
352
|
-
quiet: bool,
|
|
353
|
-
response_parser: callable,
|
|
354
|
-
) -> str:
|
|
355
|
-
"""Make HTTP request with retry logic and common error handling."""
|
|
356
|
-
if quiet:
|
|
357
|
-
spinner = None
|
|
358
|
-
else:
|
|
359
|
-
spinner = Halo(text=f"Generating commit message with {provider_name}...", spinner="dots")
|
|
360
|
-
spinner.start()
|
|
361
|
-
|
|
362
|
-
last_error = None
|
|
363
|
-
retry_count = 0
|
|
364
|
-
|
|
365
|
-
while retry_count < max_retries:
|
|
366
|
-
try:
|
|
367
|
-
logger.debug(f"Trying with {provider_name} (attempt {retry_count + 1}/{max_retries})")
|
|
368
|
-
|
|
369
|
-
with httpx.Client(timeout=30.0) as client:
|
|
370
|
-
response = client.post(url, headers=headers, json=payload)
|
|
371
|
-
response.raise_for_status()
|
|
372
|
-
|
|
373
|
-
response_data = response.json()
|
|
374
|
-
message = response_parser(response_data)
|
|
375
|
-
|
|
376
|
-
if spinner:
|
|
377
|
-
spinner.succeed(f"Generated commit message with {provider_name}")
|
|
378
|
-
|
|
379
|
-
return message
|
|
380
|
-
|
|
381
|
-
except Exception as e:
|
|
382
|
-
last_error = e
|
|
383
|
-
retry_count += 1
|
|
384
|
-
|
|
385
|
-
if retry_count == max_retries:
|
|
386
|
-
logger.warning(f"Error generating commit message: {e}. Giving up.")
|
|
387
|
-
break
|
|
388
|
-
|
|
389
|
-
wait_time = 2**retry_count
|
|
390
|
-
logger.warning(f"Error generating commit message: {e}. Retrying in {wait_time}s...")
|
|
391
|
-
if spinner:
|
|
392
|
-
for i in range(wait_time, 0, -1):
|
|
393
|
-
spinner.text = f"Retry {retry_count}/{max_retries} in {i}s..."
|
|
394
|
-
time.sleep(1)
|
|
395
|
-
else:
|
|
396
|
-
time.sleep(wait_time)
|
|
397
|
-
|
|
398
|
-
if spinner:
|
|
399
|
-
spinner.fail(f"Failed to generate commit message with {provider_name}")
|
|
400
|
-
|
|
401
|
-
error_type = _classify_error(str(last_error))
|
|
402
|
-
raise AIError(
|
|
403
|
-
f"Failed to generate commit message after {max_retries} attempts: {last_error}", error_type=error_type
|
|
404
|
-
)
|
gac-1.0.1.dist-info/RECORD
DELETED
|
@@ -1,21 +0,0 @@
|
|
|
1
|
-
gac/__init__.py,sha256=T3KAW47ZmvB5AozG_uL92ryBYgp-2LNEztBaxaY3dJE,674
|
|
2
|
-
gac/__version__.py,sha256=8Yc4NMnplKKb2pLEFCgKeKRVX9Q-31V40n444l7Ao5M,66
|
|
3
|
-
gac/ai.py,sha256=E0vfkWyqYvNJFAbOYvHG-tnwcQakAYm34oHMbKM5GIk,6267
|
|
4
|
-
gac/ai_providers.py,sha256=QiVSspn0cauxl7m1Chn6nw1kAO1ByAuPiQqZWyZZCys,13210
|
|
5
|
-
gac/cli.py,sha256=eQS8S7v6p0CfN9wtr239ujYGTi9rKl-KV7STX2U-C3w,4581
|
|
6
|
-
gac/config.py,sha256=wSgEDjtis7Vk1pv5VPvYmJyD9-tymDS6GiUHjnCMbIM,1486
|
|
7
|
-
gac/config_cli.py,sha256=v9nFHZO1RvK9fzHyuUS6SG-BCLHMsdOMDwWamBhVVh4,1608
|
|
8
|
-
gac/constants.py,sha256=MAxdASGncfZY1TdKGdhJZ0wvTBEU3gTN6KEdw8n3Bd8,4844
|
|
9
|
-
gac/diff_cli.py,sha256=wnVQ9OFGnM0d2Pj9WVjWbo0jxqIuRHVAwmb8wU9Pa3E,5676
|
|
10
|
-
gac/errors.py,sha256=3vIRMQ2QF3sP9_rPfXAFuu5ZSjIVX4FxM-FAuiR8N-8,7416
|
|
11
|
-
gac/git.py,sha256=MS2m4fv8h4mau1djFG1aje9NXTmkGsjPO9w18LqNGX0,6031
|
|
12
|
-
gac/init_cli.py,sha256=aNllguofrcLn0ML9tzLVWFkPbwlAvCM9m7undHhMLEo,1825
|
|
13
|
-
gac/main.py,sha256=WI7mxIbL05neQr1VfoopOeZKIonwpwFeZCt_4VFewPY,11987
|
|
14
|
-
gac/preprocess.py,sha256=4igtZ9OTHgTpqwlJmbcGaqzmdD0HHCZJwsZ9eG118Gk,15360
|
|
15
|
-
gac/prompt.py,sha256=_fv24XU3DZE_S72vcdUYnNkmy-_KXnr1Vlc-9okop7E,17263
|
|
16
|
-
gac/utils.py,sha256=W3ladtmsH01MNLdckQYTzYrYbTGEdzCKI36he9C-y_E,3945
|
|
17
|
-
gac-1.0.1.dist-info/METADATA,sha256=iEfp3b1Mx12iwon4aD6QDE8foNYrn-dN9yCmwhuKQTU,8351
|
|
18
|
-
gac-1.0.1.dist-info/WHEEL,sha256=qtCwoSJWgHk21S1Kb4ihdzI2rlJ1ZKaIurTj_ngOhyQ,87
|
|
19
|
-
gac-1.0.1.dist-info/entry_points.txt,sha256=tdjN-XMmcWfL92swuRAjT62bFLOAwk9bTMRLGP5Z4aI,36
|
|
20
|
-
gac-1.0.1.dist-info/licenses/LICENSE,sha256=vOab37NouL1PNs5BswnPayrMCqaN2sqLfMQfqPDrpZg,1103
|
|
21
|
-
gac-1.0.1.dist-info/RECORD,,
|
|
File without changes
|
|
File without changes
|
|
File without changes
|