gac 1.0.1__py3-none-any.whl → 1.1.0__py3-none-any.whl
This diff represents the content of publicly available package versions that have been released to one of the supported registries. The information contained in this diff is provided for informational purposes only and reflects changes between package versions as they appear in their respective public registries.
Potentially problematic release.
This version of gac might be problematic. Click here for more details.
- gac/__init__.py +7 -7
- gac/__version__.py +1 -1
- gac/ai.py +9 -109
- gac/ai_utils.py +134 -0
- gac/init_cli.py +1 -0
- gac/main.py +2 -1
- gac/preprocess.py +1 -1
- gac/providers/__init__.py +1 -0
- gac/providers/anthropic.py +141 -0
- gac/providers/cerebras.py +134 -0
- gac/providers/groq.py +134 -0
- gac/providers/ollama.py +135 -0
- gac/providers/openai.py +134 -0
- gac/providers/openrouter.py +125 -0
- {gac-1.0.1.dist-info → gac-1.1.0.dist-info}/METADATA +7 -2
- gac-1.1.0.dist-info/RECORD +28 -0
- gac/ai_providers.py +0 -404
- gac-1.0.1.dist-info/RECORD +0 -21
- {gac-1.0.1.dist-info → gac-1.1.0.dist-info}/WHEEL +0 -0
- {gac-1.0.1.dist-info → gac-1.1.0.dist-info}/entry_points.txt +0 -0
- {gac-1.0.1.dist-info → gac-1.1.0.dist-info}/licenses/LICENSE +0 -0
gac/providers/groq.py
ADDED
|
@@ -0,0 +1,134 @@
|
|
|
1
|
+
"""Groq API provider for gac."""
|
|
2
|
+
|
|
3
|
+
import logging
|
|
4
|
+
import os
|
|
5
|
+
import time
|
|
6
|
+
|
|
7
|
+
import httpx
|
|
8
|
+
from halo import Halo
|
|
9
|
+
|
|
10
|
+
from gac.ai_utils import _classify_error
|
|
11
|
+
from gac.constants import EnvDefaults
|
|
12
|
+
from gac.errors import AIError
|
|
13
|
+
|
|
14
|
+
logger = logging.getLogger(__name__)
|
|
15
|
+
|
|
16
|
+
|
|
17
|
+
def generate(
|
|
18
|
+
model: str,
|
|
19
|
+
prompt: str | tuple[str, str],
|
|
20
|
+
temperature: float = EnvDefaults.TEMPERATURE,
|
|
21
|
+
max_tokens: int = EnvDefaults.MAX_OUTPUT_TOKENS,
|
|
22
|
+
max_retries: int = EnvDefaults.MAX_RETRIES,
|
|
23
|
+
quiet: bool = False,
|
|
24
|
+
) -> str:
|
|
25
|
+
"""Generate commit message using Groq API with retry logic.
|
|
26
|
+
|
|
27
|
+
Args:
|
|
28
|
+
model: The model name (e.g., 'llama3-8b-8192', 'llama3-70b-8192')
|
|
29
|
+
prompt: Either a string prompt or tuple of (system_prompt, user_prompt)
|
|
30
|
+
temperature: Controls randomness (0.0-1.0)
|
|
31
|
+
max_tokens: Maximum tokens in the response
|
|
32
|
+
max_retries: Number of retry attempts if generation fails
|
|
33
|
+
quiet: If True, suppress progress indicators
|
|
34
|
+
|
|
35
|
+
Returns:
|
|
36
|
+
A formatted commit message string
|
|
37
|
+
|
|
38
|
+
Raises:
|
|
39
|
+
AIError: If generation fails after max_retries attempts
|
|
40
|
+
"""
|
|
41
|
+
api_key = os.getenv("GROQ_API_KEY")
|
|
42
|
+
if not api_key:
|
|
43
|
+
raise AIError.model_error("GROQ_API_KEY environment variable not set")
|
|
44
|
+
|
|
45
|
+
# Handle both old (string) and new (tuple) prompt formats
|
|
46
|
+
if isinstance(prompt, tuple):
|
|
47
|
+
system_prompt, user_prompt = prompt
|
|
48
|
+
messages = [{"role": "system", "content": system_prompt}, {"role": "user", "content": user_prompt}]
|
|
49
|
+
else:
|
|
50
|
+
# Backward compatibility: treat string as user prompt
|
|
51
|
+
messages = [{"role": "user", "content": prompt}]
|
|
52
|
+
|
|
53
|
+
payload = {
|
|
54
|
+
"model": model,
|
|
55
|
+
"messages": messages,
|
|
56
|
+
"temperature": temperature,
|
|
57
|
+
"max_tokens": max_tokens,
|
|
58
|
+
}
|
|
59
|
+
|
|
60
|
+
headers = {
|
|
61
|
+
"Content-Type": "application/json",
|
|
62
|
+
"Authorization": f"Bearer {api_key}",
|
|
63
|
+
}
|
|
64
|
+
|
|
65
|
+
return _make_request_with_retry(
|
|
66
|
+
url="https://api.groq.com/openai/v1/chat/completions",
|
|
67
|
+
headers=headers,
|
|
68
|
+
payload=payload,
|
|
69
|
+
provider_name=f"Groq {model}",
|
|
70
|
+
max_retries=max_retries,
|
|
71
|
+
quiet=quiet,
|
|
72
|
+
response_parser=lambda r: r["choices"][0]["message"]["content"],
|
|
73
|
+
)
|
|
74
|
+
|
|
75
|
+
|
|
76
|
+
def _make_request_with_retry(
|
|
77
|
+
url: str,
|
|
78
|
+
headers: dict,
|
|
79
|
+
payload: dict,
|
|
80
|
+
provider_name: str,
|
|
81
|
+
max_retries: int,
|
|
82
|
+
quiet: bool,
|
|
83
|
+
response_parser: callable,
|
|
84
|
+
) -> str:
|
|
85
|
+
"""Make HTTP request with retry logic and common error handling."""
|
|
86
|
+
if quiet:
|
|
87
|
+
spinner = None
|
|
88
|
+
else:
|
|
89
|
+
spinner = Halo(text=f"Generating commit message with {provider_name}...", spinner="dots")
|
|
90
|
+
spinner.start()
|
|
91
|
+
|
|
92
|
+
last_error = None
|
|
93
|
+
retry_count = 0
|
|
94
|
+
|
|
95
|
+
while retry_count < max_retries:
|
|
96
|
+
try:
|
|
97
|
+
logger.debug(f"Trying with {provider_name} (attempt {retry_count + 1}/{max_retries})")
|
|
98
|
+
|
|
99
|
+
with httpx.Client(timeout=30.0) as client:
|
|
100
|
+
response = client.post(url, headers=headers, json=payload)
|
|
101
|
+
response.raise_for_status()
|
|
102
|
+
|
|
103
|
+
response_data = response.json()
|
|
104
|
+
message = response_parser(response_data)
|
|
105
|
+
|
|
106
|
+
if spinner:
|
|
107
|
+
spinner.succeed(f"Generated commit message with {provider_name}")
|
|
108
|
+
|
|
109
|
+
return message
|
|
110
|
+
|
|
111
|
+
except Exception as e:
|
|
112
|
+
last_error = e
|
|
113
|
+
retry_count += 1
|
|
114
|
+
|
|
115
|
+
if retry_count == max_retries:
|
|
116
|
+
logger.warning(f"Error generating commit message: {e}. Giving up.")
|
|
117
|
+
break
|
|
118
|
+
|
|
119
|
+
wait_time = 2**retry_count
|
|
120
|
+
logger.warning(f"Error generating commit message: {e}. Retrying in {wait_time}s...")
|
|
121
|
+
if spinner:
|
|
122
|
+
for i in range(wait_time, 0, -1):
|
|
123
|
+
spinner.text = f"Retry {retry_count}/{max_retries} in {i}s..."
|
|
124
|
+
time.sleep(1)
|
|
125
|
+
else:
|
|
126
|
+
time.sleep(wait_time)
|
|
127
|
+
|
|
128
|
+
if spinner:
|
|
129
|
+
spinner.fail(f"Failed to generate commit message with {provider_name}")
|
|
130
|
+
|
|
131
|
+
error_type = _classify_error(str(last_error))
|
|
132
|
+
raise AIError(
|
|
133
|
+
f"Failed to generate commit message after {max_retries} attempts: {last_error}", error_type=error_type
|
|
134
|
+
)
|
gac/providers/ollama.py
ADDED
|
@@ -0,0 +1,135 @@
|
|
|
1
|
+
"""Ollama API provider for gac."""
|
|
2
|
+
|
|
3
|
+
import logging
|
|
4
|
+
import os
|
|
5
|
+
import time
|
|
6
|
+
|
|
7
|
+
import httpx
|
|
8
|
+
from halo import Halo
|
|
9
|
+
|
|
10
|
+
from gac.ai_utils import _classify_error
|
|
11
|
+
from gac.constants import EnvDefaults
|
|
12
|
+
from gac.errors import AIError
|
|
13
|
+
|
|
14
|
+
logger = logging.getLogger(__name__)
|
|
15
|
+
|
|
16
|
+
|
|
17
|
+
def generate(
|
|
18
|
+
model: str,
|
|
19
|
+
prompt: str | tuple[str, str],
|
|
20
|
+
temperature: float = EnvDefaults.TEMPERATURE,
|
|
21
|
+
max_tokens: int = EnvDefaults.MAX_OUTPUT_TOKENS,
|
|
22
|
+
max_retries: int = EnvDefaults.MAX_RETRIES,
|
|
23
|
+
quiet: bool = False,
|
|
24
|
+
) -> str:
|
|
25
|
+
"""Generate commit message using Ollama API with retry logic.
|
|
26
|
+
|
|
27
|
+
Args:
|
|
28
|
+
model: The model name (e.g., 'llama3', 'mistral')
|
|
29
|
+
prompt: Either a string prompt or tuple of (system_prompt, user_prompt)
|
|
30
|
+
temperature: Controls randomness (0.0-1.0)
|
|
31
|
+
max_tokens: Maximum tokens in the response (note: Ollama uses 'num_predict')
|
|
32
|
+
max_retries: Number of retry attempts if generation fails
|
|
33
|
+
quiet: If True, suppress progress indicators
|
|
34
|
+
|
|
35
|
+
Returns:
|
|
36
|
+
A formatted commit message string
|
|
37
|
+
|
|
38
|
+
Raises:
|
|
39
|
+
AIError: If generation fails after max_retries attempts
|
|
40
|
+
"""
|
|
41
|
+
# Handle both old (string) and new (tuple) prompt formats
|
|
42
|
+
if isinstance(prompt, tuple):
|
|
43
|
+
system_prompt, user_prompt = prompt
|
|
44
|
+
messages = [{"role": "system", "content": system_prompt}, {"role": "user", "content": user_prompt}]
|
|
45
|
+
else:
|
|
46
|
+
# Backward compatibility: treat string as user prompt
|
|
47
|
+
messages = [{"role": "user", "content": prompt}]
|
|
48
|
+
|
|
49
|
+
payload = {
|
|
50
|
+
"model": model,
|
|
51
|
+
"messages": messages,
|
|
52
|
+
"stream": False,
|
|
53
|
+
"options": {
|
|
54
|
+
"temperature": temperature,
|
|
55
|
+
"num_predict": max_tokens,
|
|
56
|
+
},
|
|
57
|
+
}
|
|
58
|
+
|
|
59
|
+
headers = {
|
|
60
|
+
"Content-Type": "application/json",
|
|
61
|
+
}
|
|
62
|
+
|
|
63
|
+
# Ollama typically runs locally on port 11434
|
|
64
|
+
ollama_url = os.getenv("OLLAMA_URL", "http://localhost:11434")
|
|
65
|
+
|
|
66
|
+
return _make_request_with_retry(
|
|
67
|
+
url=f"{ollama_url}/api/chat",
|
|
68
|
+
headers=headers,
|
|
69
|
+
payload=payload,
|
|
70
|
+
provider_name=f"Ollama {model}",
|
|
71
|
+
max_retries=max_retries,
|
|
72
|
+
quiet=quiet,
|
|
73
|
+
response_parser=lambda r: r["message"]["content"],
|
|
74
|
+
)
|
|
75
|
+
|
|
76
|
+
|
|
77
|
+
def _make_request_with_retry(
|
|
78
|
+
url: str,
|
|
79
|
+
headers: dict,
|
|
80
|
+
payload: dict,
|
|
81
|
+
provider_name: str,
|
|
82
|
+
max_retries: int,
|
|
83
|
+
quiet: bool,
|
|
84
|
+
response_parser: callable,
|
|
85
|
+
) -> str:
|
|
86
|
+
"""Make HTTP request with retry logic and common error handling."""
|
|
87
|
+
if quiet:
|
|
88
|
+
spinner = None
|
|
89
|
+
else:
|
|
90
|
+
spinner = Halo(text=f"Generating commit message with {provider_name}...", spinner="dots")
|
|
91
|
+
spinner.start()
|
|
92
|
+
|
|
93
|
+
last_error = None
|
|
94
|
+
retry_count = 0
|
|
95
|
+
|
|
96
|
+
while retry_count < max_retries:
|
|
97
|
+
try:
|
|
98
|
+
logger.debug(f"Trying with {provider_name} (attempt {retry_count + 1}/{max_retries})")
|
|
99
|
+
|
|
100
|
+
with httpx.Client(timeout=30.0) as client:
|
|
101
|
+
response = client.post(url, headers=headers, json=payload)
|
|
102
|
+
response.raise_for_status()
|
|
103
|
+
|
|
104
|
+
response_data = response.json()
|
|
105
|
+
message = response_parser(response_data)
|
|
106
|
+
|
|
107
|
+
if spinner:
|
|
108
|
+
spinner.succeed(f"Generated commit message with {provider_name}")
|
|
109
|
+
|
|
110
|
+
return message
|
|
111
|
+
|
|
112
|
+
except Exception as e:
|
|
113
|
+
last_error = e
|
|
114
|
+
retry_count += 1
|
|
115
|
+
|
|
116
|
+
if retry_count == max_retries:
|
|
117
|
+
logger.warning(f"Error generating commit message: {e}. Giving up.")
|
|
118
|
+
break
|
|
119
|
+
|
|
120
|
+
wait_time = 2**retry_count
|
|
121
|
+
logger.warning(f"Error generating commit message: {e}. Retrying in {wait_time}s...")
|
|
122
|
+
if spinner:
|
|
123
|
+
for i in range(wait_time, 0, -1):
|
|
124
|
+
spinner.text = f"Retry {retry_count}/{max_retries} in {i}s..."
|
|
125
|
+
time.sleep(1)
|
|
126
|
+
else:
|
|
127
|
+
time.sleep(wait_time)
|
|
128
|
+
|
|
129
|
+
if spinner:
|
|
130
|
+
spinner.fail(f"Failed to generate commit message with {provider_name}")
|
|
131
|
+
|
|
132
|
+
error_type = _classify_error(str(last_error))
|
|
133
|
+
raise AIError(
|
|
134
|
+
f"Failed to generate commit message after {max_retries} attempts: {last_error}", error_type=error_type
|
|
135
|
+
)
|
gac/providers/openai.py
ADDED
|
@@ -0,0 +1,134 @@
|
|
|
1
|
+
"""OpenAI API provider for gac."""
|
|
2
|
+
|
|
3
|
+
import logging
|
|
4
|
+
import os
|
|
5
|
+
import time
|
|
6
|
+
|
|
7
|
+
import httpx
|
|
8
|
+
from halo import Halo
|
|
9
|
+
|
|
10
|
+
from gac.ai_utils import _classify_error
|
|
11
|
+
from gac.constants import EnvDefaults
|
|
12
|
+
from gac.errors import AIError
|
|
13
|
+
|
|
14
|
+
logger = logging.getLogger(__name__)
|
|
15
|
+
|
|
16
|
+
|
|
17
|
+
def generate(
|
|
18
|
+
model: str,
|
|
19
|
+
prompt: str | tuple[str, str],
|
|
20
|
+
temperature: float = EnvDefaults.TEMPERATURE,
|
|
21
|
+
max_tokens: int = EnvDefaults.MAX_OUTPUT_TOKENS,
|
|
22
|
+
max_retries: int = EnvDefaults.MAX_RETRIES,
|
|
23
|
+
quiet: bool = False,
|
|
24
|
+
) -> str:
|
|
25
|
+
"""Generate commit message using OpenAI API with retry logic.
|
|
26
|
+
|
|
27
|
+
Args:
|
|
28
|
+
model: The model name (e.g., 'gpt-4', 'gpt-3.5-turbo')
|
|
29
|
+
prompt: Either a string prompt or tuple of (system_prompt, user_prompt)
|
|
30
|
+
temperature: Controls randomness (0.0-1.0)
|
|
31
|
+
max_tokens: Maximum tokens in the response
|
|
32
|
+
max_retries: Number of retry attempts if generation fails
|
|
33
|
+
quiet: If True, suppress progress indicators
|
|
34
|
+
|
|
35
|
+
Returns:
|
|
36
|
+
A formatted commit message string
|
|
37
|
+
|
|
38
|
+
Raises:
|
|
39
|
+
AIError: If generation fails after max_retries attempts
|
|
40
|
+
"""
|
|
41
|
+
api_key = os.getenv("OPENAI_API_KEY")
|
|
42
|
+
if not api_key:
|
|
43
|
+
raise AIError.model_error("OPENAI_API_KEY environment variable not set")
|
|
44
|
+
|
|
45
|
+
# Handle both old (string) and new (tuple) prompt formats
|
|
46
|
+
if isinstance(prompt, tuple):
|
|
47
|
+
system_prompt, user_prompt = prompt
|
|
48
|
+
messages = [{"role": "system", "content": system_prompt}, {"role": "user", "content": user_prompt}]
|
|
49
|
+
else:
|
|
50
|
+
# Backward compatibility: treat string as user prompt
|
|
51
|
+
messages = [{"role": "user", "content": prompt}]
|
|
52
|
+
|
|
53
|
+
payload = {
|
|
54
|
+
"model": model,
|
|
55
|
+
"messages": messages,
|
|
56
|
+
"temperature": temperature,
|
|
57
|
+
"max_tokens": max_tokens,
|
|
58
|
+
}
|
|
59
|
+
|
|
60
|
+
headers = {
|
|
61
|
+
"Content-Type": "application/json",
|
|
62
|
+
"Authorization": f"Bearer {api_key}",
|
|
63
|
+
}
|
|
64
|
+
|
|
65
|
+
return _make_request_with_retry(
|
|
66
|
+
url="https://api.openai.com/v1/chat/completions",
|
|
67
|
+
headers=headers,
|
|
68
|
+
payload=payload,
|
|
69
|
+
provider_name=f"OpenAI {model}",
|
|
70
|
+
max_retries=max_retries,
|
|
71
|
+
quiet=quiet,
|
|
72
|
+
response_parser=lambda r: r["choices"][0]["message"]["content"],
|
|
73
|
+
)
|
|
74
|
+
|
|
75
|
+
|
|
76
|
+
def _make_request_with_retry(
|
|
77
|
+
url: str,
|
|
78
|
+
headers: dict,
|
|
79
|
+
payload: dict,
|
|
80
|
+
provider_name: str,
|
|
81
|
+
max_retries: int,
|
|
82
|
+
quiet: bool,
|
|
83
|
+
response_parser: callable,
|
|
84
|
+
) -> str:
|
|
85
|
+
"""Make HTTP request with retry logic and common error handling."""
|
|
86
|
+
if quiet:
|
|
87
|
+
spinner = None
|
|
88
|
+
else:
|
|
89
|
+
spinner = Halo(text=f"Generating commit message with {provider_name}...", spinner="dots")
|
|
90
|
+
spinner.start()
|
|
91
|
+
|
|
92
|
+
last_error = None
|
|
93
|
+
retry_count = 0
|
|
94
|
+
|
|
95
|
+
while retry_count < max_retries:
|
|
96
|
+
try:
|
|
97
|
+
logger.debug(f"Trying with {provider_name} (attempt {retry_count + 1}/{max_retries})")
|
|
98
|
+
|
|
99
|
+
with httpx.Client(timeout=30.0) as client:
|
|
100
|
+
response = client.post(url, headers=headers, json=payload)
|
|
101
|
+
response.raise_for_status()
|
|
102
|
+
|
|
103
|
+
response_data = response.json()
|
|
104
|
+
message = response_parser(response_data)
|
|
105
|
+
|
|
106
|
+
if spinner:
|
|
107
|
+
spinner.succeed(f"Generated commit message with {provider_name}")
|
|
108
|
+
|
|
109
|
+
return message
|
|
110
|
+
|
|
111
|
+
except Exception as e:
|
|
112
|
+
last_error = e
|
|
113
|
+
retry_count += 1
|
|
114
|
+
|
|
115
|
+
if retry_count == max_retries:
|
|
116
|
+
logger.warning(f"Error generating commit message: {e}. Giving up.")
|
|
117
|
+
break
|
|
118
|
+
|
|
119
|
+
wait_time = 2**retry_count
|
|
120
|
+
logger.warning(f"Error generating commit message: {e}. Retrying in {wait_time}s...")
|
|
121
|
+
if spinner:
|
|
122
|
+
for i in range(wait_time, 0, -1):
|
|
123
|
+
spinner.text = f"Retry {retry_count}/{max_retries} in {i}s..."
|
|
124
|
+
time.sleep(1)
|
|
125
|
+
else:
|
|
126
|
+
time.sleep(wait_time)
|
|
127
|
+
|
|
128
|
+
if spinner:
|
|
129
|
+
spinner.fail(f"Failed to generate commit message with {provider_name}")
|
|
130
|
+
|
|
131
|
+
error_type = _classify_error(str(last_error))
|
|
132
|
+
raise AIError(
|
|
133
|
+
f"Failed to generate commit message after {max_retries} attempts: {last_error}", error_type=error_type
|
|
134
|
+
)
|
|
@@ -0,0 +1,125 @@
|
|
|
1
|
+
"""OpenRouter API provider for gac."""
|
|
2
|
+
|
|
3
|
+
import logging
|
|
4
|
+
import os
|
|
5
|
+
import time
|
|
6
|
+
|
|
7
|
+
import httpx
|
|
8
|
+
from halo import Halo
|
|
9
|
+
|
|
10
|
+
from gac.ai_utils import _classify_error
|
|
11
|
+
from gac.constants import EnvDefaults
|
|
12
|
+
from gac.errors import AIError
|
|
13
|
+
|
|
14
|
+
logger = logging.getLogger(__name__)
|
|
15
|
+
|
|
16
|
+
|
|
17
|
+
def generate(
|
|
18
|
+
model: str,
|
|
19
|
+
prompt: str | tuple[str, str],
|
|
20
|
+
temperature: float = EnvDefaults.TEMPERATURE,
|
|
21
|
+
max_tokens: int = EnvDefaults.MAX_OUTPUT_TOKENS,
|
|
22
|
+
max_retries: int = EnvDefaults.MAX_RETRIES,
|
|
23
|
+
quiet: bool = False,
|
|
24
|
+
) -> str:
|
|
25
|
+
"""Generate commit message using OpenRouter API with retry logic."""
|
|
26
|
+
api_key = os.getenv("OPENROUTER_API_KEY")
|
|
27
|
+
if not api_key:
|
|
28
|
+
raise AIError.model_error("OPENROUTER_API_KEY environment variable not set")
|
|
29
|
+
|
|
30
|
+
if isinstance(prompt, tuple):
|
|
31
|
+
system_prompt, user_prompt = prompt
|
|
32
|
+
messages = [{"role": "system", "content": system_prompt}, {"role": "user", "content": user_prompt}]
|
|
33
|
+
else:
|
|
34
|
+
messages = [{"role": "user", "content": prompt}]
|
|
35
|
+
|
|
36
|
+
payload = {
|
|
37
|
+
"model": model,
|
|
38
|
+
"messages": messages,
|
|
39
|
+
"temperature": temperature,
|
|
40
|
+
"max_tokens": max_tokens,
|
|
41
|
+
}
|
|
42
|
+
|
|
43
|
+
headers = {
|
|
44
|
+
"Content-Type": "application/json",
|
|
45
|
+
"Authorization": f"Bearer {api_key}",
|
|
46
|
+
}
|
|
47
|
+
|
|
48
|
+
site_url = os.getenv("OPENROUTER_SITE_URL")
|
|
49
|
+
if site_url:
|
|
50
|
+
headers["HTTP-Referer"] = site_url
|
|
51
|
+
|
|
52
|
+
site_name = os.getenv("OPENROUTER_SITE_NAME")
|
|
53
|
+
if site_name:
|
|
54
|
+
headers["X-Title"] = site_name
|
|
55
|
+
|
|
56
|
+
return _make_request_with_retry(
|
|
57
|
+
url="https://openrouter.ai/api/v1/chat/completions",
|
|
58
|
+
headers=headers,
|
|
59
|
+
payload=payload,
|
|
60
|
+
provider_name=f"OpenRouter {model}",
|
|
61
|
+
max_retries=max_retries,
|
|
62
|
+
quiet=quiet,
|
|
63
|
+
response_parser=lambda r: r["choices"][0]["message"]["content"],
|
|
64
|
+
)
|
|
65
|
+
|
|
66
|
+
|
|
67
|
+
def _make_request_with_retry(
|
|
68
|
+
url: str,
|
|
69
|
+
headers: dict,
|
|
70
|
+
payload: dict,
|
|
71
|
+
provider_name: str,
|
|
72
|
+
max_retries: int,
|
|
73
|
+
quiet: bool,
|
|
74
|
+
response_parser: callable,
|
|
75
|
+
) -> str:
|
|
76
|
+
"""Make HTTP request with retry logic and common error handling."""
|
|
77
|
+
if quiet:
|
|
78
|
+
spinner = None
|
|
79
|
+
else:
|
|
80
|
+
spinner = Halo(text=f"Generating commit message with {provider_name}...", spinner="dots")
|
|
81
|
+
spinner.start()
|
|
82
|
+
|
|
83
|
+
last_error = None
|
|
84
|
+
retry_count = 0
|
|
85
|
+
|
|
86
|
+
while retry_count < max_retries:
|
|
87
|
+
try:
|
|
88
|
+
logger.debug(f"Trying with {provider_name} (attempt {retry_count + 1}/{max_retries})")
|
|
89
|
+
|
|
90
|
+
with httpx.Client(timeout=30.0) as client:
|
|
91
|
+
response = client.post(url, headers=headers, json=payload)
|
|
92
|
+
response.raise_for_status()
|
|
93
|
+
|
|
94
|
+
response_data = response.json()
|
|
95
|
+
message = response_parser(response_data)
|
|
96
|
+
|
|
97
|
+
if spinner:
|
|
98
|
+
spinner.succeed(f"Generated commit message with {provider_name}")
|
|
99
|
+
|
|
100
|
+
return message
|
|
101
|
+
|
|
102
|
+
except Exception as e:
|
|
103
|
+
last_error = e
|
|
104
|
+
retry_count += 1
|
|
105
|
+
|
|
106
|
+
if retry_count == max_retries:
|
|
107
|
+
logger.warning(f"Error generating commit message: {e}. Giving up.")
|
|
108
|
+
break
|
|
109
|
+
|
|
110
|
+
wait_time = 2**retry_count
|
|
111
|
+
logger.warning(f"Error generating commit message: {e}. Retrying in {wait_time}s...")
|
|
112
|
+
if spinner:
|
|
113
|
+
for i in range(wait_time, 0, -1):
|
|
114
|
+
spinner.text = f"Retry {retry_count}/{max_retries} in {i}s..."
|
|
115
|
+
time.sleep(1)
|
|
116
|
+
else:
|
|
117
|
+
time.sleep(wait_time)
|
|
118
|
+
|
|
119
|
+
if spinner:
|
|
120
|
+
spinner.fail(f"Failed to generate commit message with {provider_name}")
|
|
121
|
+
|
|
122
|
+
error_type = _classify_error(str(last_error))
|
|
123
|
+
raise AIError(
|
|
124
|
+
f"Failed to generate commit message after {max_retries} attempts: {last_error}", error_type=error_type
|
|
125
|
+
)
|
|
@@ -1,6 +1,6 @@
|
|
|
1
1
|
Metadata-Version: 2.4
|
|
2
2
|
Name: gac
|
|
3
|
-
Version: 1.0
|
|
3
|
+
Version: 1.1.0
|
|
4
4
|
Summary: AI-powered Git commit message generator with multi-provider support
|
|
5
5
|
Project-URL: Homepage, https://github.com/cellwebb/gac
|
|
6
6
|
Project-URL: Documentation, https://github.com/cellwebb/gac#readme
|
|
@@ -56,7 +56,7 @@ Description-Content-Type: text/markdown
|
|
|
56
56
|
|
|
57
57
|
- **LLM-Powered Commit Messages:** Automatically generates clear, concise, and context-aware commit messages using large language models.
|
|
58
58
|
- **Deep Contextual Analysis:** Understands your code by analyzing staged changes, repository structure, and recent commit history to provide highly relevant suggestions.
|
|
59
|
-
- **Multi-Provider & Model Support:** Flexibly works with various leading AI providers (like Anthropic, Cerebras, Groq, OpenAI) and models, easily configured through an interactive setup or environment variables.
|
|
59
|
+
- **Multi-Provider & Model Support:** Flexibly works with various leading AI providers (like Anthropic, Cerebras, Groq, OpenRouter, OpenAI) and models, easily configured through an interactive setup or environment variables.
|
|
60
60
|
- **Seamless Git Workflow:** Integrates smoothly into your existing Git routine as a simple drop-in replacement for `git commit`.
|
|
61
61
|
- **Extensive Customization:** Tailor commit messages to your needs with a rich set of flags, including one-liners (`-o`), AI hints (`-h`), scope inference (`-s`), and specific model selection (`-m`).
|
|
62
62
|
- **Streamlined Workflow Commands:** Boost your productivity with convenient options to stage all changes (`-a`), auto-confirm commits (`-y`), and push to your remote repository (`-p`) in a single step.
|
|
@@ -136,6 +136,11 @@ Example `$HOME/.gac.env` output:
|
|
|
136
136
|
```env
|
|
137
137
|
GAC_MODEL=anthropic:claude-3-5-haiku-latest
|
|
138
138
|
ANTHROPIC_API_KEY=your_anthropic_key_here
|
|
139
|
+
# Optional: configure OpenRouter
|
|
140
|
+
# GAC_MODEL=openrouter:openrouter/auto
|
|
141
|
+
# OPENROUTER_API_KEY=your_openrouter_key_here
|
|
142
|
+
# OPENROUTER_SITE_URL=https://example.com
|
|
143
|
+
# OPENROUTER_SITE_NAME=Example App
|
|
139
144
|
```
|
|
140
145
|
|
|
141
146
|
Alternatively, you can configure `gac` using environment variables or by manually creating/editing the configuration file.
|
|
@@ -0,0 +1,28 @@
|
|
|
1
|
+
gac/__init__.py,sha256=-knZC1oRylbJUv5uGk3fP_dHaaLHG76duAEWxLUi1eg,940
|
|
2
|
+
gac/__version__.py,sha256=JSz2jRyqqtKTmDkhJ8NI5pr2e0rhFqSSkokXslLdc2M,66
|
|
3
|
+
gac/ai.py,sha256=-Wd9cr5yK-UvR14AHvBeHrpa28KXu0bYZjJS02U-q90,3419
|
|
4
|
+
gac/ai_utils.py,sha256=DFiM0Wzp-kremo33_i5SEYpms7tE13ipKLCCr_-AZPU,4327
|
|
5
|
+
gac/cli.py,sha256=eQS8S7v6p0CfN9wtr239ujYGTi9rKl-KV7STX2U-C3w,4581
|
|
6
|
+
gac/config.py,sha256=wSgEDjtis7Vk1pv5VPvYmJyD9-tymDS6GiUHjnCMbIM,1486
|
|
7
|
+
gac/config_cli.py,sha256=v9nFHZO1RvK9fzHyuUS6SG-BCLHMsdOMDwWamBhVVh4,1608
|
|
8
|
+
gac/constants.py,sha256=MAxdASGncfZY1TdKGdhJZ0wvTBEU3gTN6KEdw8n3Bd8,4844
|
|
9
|
+
gac/diff_cli.py,sha256=wnVQ9OFGnM0d2Pj9WVjWbo0jxqIuRHVAwmb8wU9Pa3E,5676
|
|
10
|
+
gac/errors.py,sha256=3vIRMQ2QF3sP9_rPfXAFuu5ZSjIVX4FxM-FAuiR8N-8,7416
|
|
11
|
+
gac/git.py,sha256=MS2m4fv8h4mau1djFG1aje9NXTmkGsjPO9w18LqNGX0,6031
|
|
12
|
+
gac/init_cli.py,sha256=e4z9-4NhoUn2DUyApIru8JR-W7HuNq2VeeXkR9aXHLo,1868
|
|
13
|
+
gac/main.py,sha256=POay7l6ihm3oF9ajGWx9cA40Pu-NVz5x_OzQOYPDoX8,12011
|
|
14
|
+
gac/preprocess.py,sha256=krrLPHsccYMdn_YAtUrppBJIoRgevxGWusDwhE40LEo,15366
|
|
15
|
+
gac/prompt.py,sha256=_fv24XU3DZE_S72vcdUYnNkmy-_KXnr1Vlc-9okop7E,17263
|
|
16
|
+
gac/utils.py,sha256=W3ladtmsH01MNLdckQYTzYrYbTGEdzCKI36he9C-y_E,3945
|
|
17
|
+
gac/providers/__init__.py,sha256=_JawPCV_ZCtI18bBqVCbQkkbLetSQqOBcj2b19KRuX0,38
|
|
18
|
+
gac/providers/anthropic.py,sha256=v0JApD6dNfIbxBoFbpf4GBWc4PsVPgcEcw1GDEWhoeM,4382
|
|
19
|
+
gac/providers/cerebras.py,sha256=rWtndVj9q1w-EIfLyP4NJ10GFPnlfiu43ZminTdWP1s,4166
|
|
20
|
+
gac/providers/groq.py,sha256=K7hrHaYfGn25hnDedR6FBEOqb0r0Y65zQRYso5w3xGE,4156
|
|
21
|
+
gac/providers/ollama.py,sha256=qzX4qMsQeZUh8oPOWFIszhsvgioSnoLi82wO8G13pOI,4139
|
|
22
|
+
gac/providers/openai.py,sha256=VKH7SQE6O4UB1c15KXIBL5UP1Fj3Ed4XQDrqENEPmBo,4150
|
|
23
|
+
gac/providers/openrouter.py,sha256=vU4zIwtnyz9hjWokJwORwX3bZjYtF2MWqz8xxE-srsA,3753
|
|
24
|
+
gac-1.1.0.dist-info/METADATA,sha256=VImMQAbgQT23S1czOiINsmy4tnloF8MRcb9_TSvasrI,8558
|
|
25
|
+
gac-1.1.0.dist-info/WHEEL,sha256=qtCwoSJWgHk21S1Kb4ihdzI2rlJ1ZKaIurTj_ngOhyQ,87
|
|
26
|
+
gac-1.1.0.dist-info/entry_points.txt,sha256=tdjN-XMmcWfL92swuRAjT62bFLOAwk9bTMRLGP5Z4aI,36
|
|
27
|
+
gac-1.1.0.dist-info/licenses/LICENSE,sha256=vOab37NouL1PNs5BswnPayrMCqaN2sqLfMQfqPDrpZg,1103
|
|
28
|
+
gac-1.1.0.dist-info/RECORD,,
|