gac 1.9.5__tar.gz → 1.10.0__tar.gz
This diff represents the content of publicly available package versions that have been released to one of the supported registries. The information contained in this diff is provided for informational purposes only and reflects changes between package versions as they appear in their respective public registries.
Potentially problematic release.
This version of gac might be problematic. Click here for more details.
- {gac-1.9.5 → gac-1.10.0}/PKG-INFO +15 -21
- {gac-1.9.5 → gac-1.10.0}/README.md +14 -20
- {gac-1.9.5 → gac-1.10.0}/src/gac/__version__.py +1 -1
- {gac-1.9.5 → gac-1.10.0}/src/gac/ai.py +17 -7
- {gac-1.9.5 → gac-1.10.0}/src/gac/ai_utils.py +4 -6
- {gac-1.9.5 → gac-1.10.0}/src/gac/init_cli.py +1 -0
- {gac-1.9.5 → gac-1.10.0}/src/gac/main.py +58 -86
- {gac-1.9.5 → gac-1.10.0}/src/gac/providers/__init__.py +2 -0
- gac-1.10.0/src/gac/providers/fireworks.py +38 -0
- {gac-1.9.5 → gac-1.10.0}/.gitignore +0 -0
- {gac-1.9.5 → gac-1.10.0}/LICENSE +0 -0
- {gac-1.9.5 → gac-1.10.0}/pyproject.toml +0 -0
- {gac-1.9.5 → gac-1.10.0}/src/gac/__init__.py +0 -0
- {gac-1.9.5 → gac-1.10.0}/src/gac/cli.py +0 -0
- {gac-1.9.5 → gac-1.10.0}/src/gac/config.py +0 -0
- {gac-1.9.5 → gac-1.10.0}/src/gac/config_cli.py +0 -0
- {gac-1.9.5 → gac-1.10.0}/src/gac/constants.py +0 -0
- {gac-1.9.5 → gac-1.10.0}/src/gac/diff_cli.py +0 -0
- {gac-1.9.5 → gac-1.10.0}/src/gac/errors.py +0 -0
- {gac-1.9.5 → gac-1.10.0}/src/gac/git.py +0 -0
- {gac-1.9.5 → gac-1.10.0}/src/gac/preprocess.py +0 -0
- {gac-1.9.5 → gac-1.10.0}/src/gac/prompt.py +0 -0
- {gac-1.9.5 → gac-1.10.0}/src/gac/providers/anthropic.py +0 -0
- {gac-1.9.5 → gac-1.10.0}/src/gac/providers/cerebras.py +0 -0
- {gac-1.9.5 → gac-1.10.0}/src/gac/providers/chutes.py +0 -0
- {gac-1.9.5 → gac-1.10.0}/src/gac/providers/gemini.py +0 -0
- {gac-1.9.5 → gac-1.10.0}/src/gac/providers/groq.py +0 -0
- {gac-1.9.5 → gac-1.10.0}/src/gac/providers/lmstudio.py +0 -0
- {gac-1.9.5 → gac-1.10.0}/src/gac/providers/ollama.py +0 -0
- {gac-1.9.5 → gac-1.10.0}/src/gac/providers/openai.py +0 -0
- {gac-1.9.5 → gac-1.10.0}/src/gac/providers/openrouter.py +0 -0
- {gac-1.9.5 → gac-1.10.0}/src/gac/providers/streamlake.py +0 -0
- {gac-1.9.5 → gac-1.10.0}/src/gac/providers/synthetic.py +0 -0
- {gac-1.9.5 → gac-1.10.0}/src/gac/providers/zai.py +0 -0
- {gac-1.9.5 → gac-1.10.0}/src/gac/security.py +0 -0
- {gac-1.9.5 → gac-1.10.0}/src/gac/utils.py +0 -0
|
@@ -1,6 +1,6 @@
|
|
|
1
1
|
Metadata-Version: 2.4
|
|
2
2
|
Name: gac
|
|
3
|
-
Version: 1.
|
|
3
|
+
Version: 1.10.0
|
|
4
4
|
Summary: LLM-powered Git commit message generator with multi-provider support
|
|
5
5
|
Project-URL: Homepage, https://github.com/cellwebb/gac
|
|
6
6
|
Project-URL: Documentation, https://github.com/cellwebb/gac#readme
|
|
@@ -69,58 +69,52 @@ Instead of generic messages like `"update stuff"`, `"fix bug"`, or `"add feature
|
|
|
69
69
|
|
|
70
70
|
## Quick Start
|
|
71
71
|
|
|
72
|
-
###
|
|
72
|
+
### Use without installing
|
|
73
73
|
|
|
74
74
|
```bash
|
|
75
|
-
#
|
|
76
|
-
|
|
77
|
-
|
|
78
|
-
# 2. Configure your LLM provider
|
|
79
|
-
gac init
|
|
80
|
-
|
|
81
|
-
# 3. Use on your staged changes
|
|
82
|
-
git add .
|
|
83
|
-
gac
|
|
75
|
+
uvx gac init # Configure your LLM provider
|
|
76
|
+
uvx gac # Generate and commit with LLM
|
|
84
77
|
```
|
|
85
78
|
|
|
86
79
|
That's it! Review the generated message and confirm with `y`.
|
|
87
80
|
|
|
88
|
-
###
|
|
81
|
+
### Install gac globally
|
|
89
82
|
|
|
90
83
|
```bash
|
|
91
|
-
|
|
92
|
-
|
|
84
|
+
uv tool install gac
|
|
85
|
+
gac init
|
|
86
|
+
gac
|
|
93
87
|
```
|
|
94
88
|
|
|
95
89
|
---
|
|
96
90
|
|
|
97
91
|
## Key Features
|
|
98
92
|
|
|
99
|
-
### **Supported Providers**
|
|
93
|
+
### 🌐 **Supported Providers**
|
|
100
94
|
|
|
101
|
-
- **Anthropic** • **Cerebras** • **Chutes.ai** • **
|
|
102
|
-
- **LM Studio** • **Ollama** • **OpenAI** • **OpenRouter**
|
|
95
|
+
- **Anthropic** • **Cerebras** • **Chutes.ai** • **Fireworks** • **Gemini**
|
|
96
|
+
- **Groq** • **LM Studio** • **Ollama** • **OpenAI** • **OpenRouter**
|
|
103
97
|
- **Streamlake** • **Synthetic.new** • **Z.AI** • **Z.AI Coding**
|
|
104
98
|
|
|
105
|
-
### **Smart LLM Analysis**
|
|
99
|
+
### 🧠 **Smart LLM Analysis**
|
|
106
100
|
|
|
107
101
|
- **Understands intent**: Analyzes code structure, logic, and patterns to understand the "why" behind your changes, not just what changed
|
|
108
102
|
- **Semantic awareness**: Recognizes refactoring, bug fixes, features, and breaking changes to generate contextually appropriate messages
|
|
109
103
|
- **Intelligent filtering**: Prioritizes meaningful changes while ignoring generated files, dependencies, and artifacts
|
|
110
104
|
|
|
111
|
-
### **Multiple Message Formats**
|
|
105
|
+
### 📝 **Multiple Message Formats**
|
|
112
106
|
|
|
113
107
|
- **One-liner** (-o flag): Single-line commit message following conventional commit format
|
|
114
108
|
- **Standard** (default): Summary with bullet points explaining implementation details
|
|
115
109
|
- **Verbose** (-v flag): Comprehensive explanations including motivation, technical approach, and impact analysis
|
|
116
110
|
|
|
117
|
-
### **Developer Experience**
|
|
111
|
+
### 💻 **Developer Experience**
|
|
118
112
|
|
|
119
113
|
- **Interactive feedback**: Regenerate messages with specific requests like `r "make it shorter"` or `r "focus on the bug fix"`
|
|
120
114
|
- **One-command workflows**: Complete workflows with flags like `gac -ayp` (stage all, auto-confirm, push)
|
|
121
115
|
- **Git integration**: Respects pre-commit and lefthook hooks, running them before expensive LLM operations
|
|
122
116
|
|
|
123
|
-
### **Built-in Security**
|
|
117
|
+
### 🛡️ **Built-in Security**
|
|
124
118
|
|
|
125
119
|
- **Automatic secret detection**: Scans for API keys, passwords, and tokens before committing
|
|
126
120
|
- **Interactive protection**: Prompts before committing potentially sensitive data with clear remediation options
|
|
@@ -27,58 +27,52 @@ Instead of generic messages like `"update stuff"`, `"fix bug"`, or `"add feature
|
|
|
27
27
|
|
|
28
28
|
## Quick Start
|
|
29
29
|
|
|
30
|
-
###
|
|
30
|
+
### Use without installing
|
|
31
31
|
|
|
32
32
|
```bash
|
|
33
|
-
#
|
|
34
|
-
|
|
35
|
-
|
|
36
|
-
# 2. Configure your LLM provider
|
|
37
|
-
gac init
|
|
38
|
-
|
|
39
|
-
# 3. Use on your staged changes
|
|
40
|
-
git add .
|
|
41
|
-
gac
|
|
33
|
+
uvx gac init # Configure your LLM provider
|
|
34
|
+
uvx gac # Generate and commit with LLM
|
|
42
35
|
```
|
|
43
36
|
|
|
44
37
|
That's it! Review the generated message and confirm with `y`.
|
|
45
38
|
|
|
46
|
-
###
|
|
39
|
+
### Install gac globally
|
|
47
40
|
|
|
48
41
|
```bash
|
|
49
|
-
|
|
50
|
-
|
|
42
|
+
uv tool install gac
|
|
43
|
+
gac init
|
|
44
|
+
gac
|
|
51
45
|
```
|
|
52
46
|
|
|
53
47
|
---
|
|
54
48
|
|
|
55
49
|
## Key Features
|
|
56
50
|
|
|
57
|
-
### **Supported Providers**
|
|
51
|
+
### 🌐 **Supported Providers**
|
|
58
52
|
|
|
59
|
-
- **Anthropic** • **Cerebras** • **Chutes.ai** • **
|
|
60
|
-
- **LM Studio** • **Ollama** • **OpenAI** • **OpenRouter**
|
|
53
|
+
- **Anthropic** • **Cerebras** • **Chutes.ai** • **Fireworks** • **Gemini**
|
|
54
|
+
- **Groq** • **LM Studio** • **Ollama** • **OpenAI** • **OpenRouter**
|
|
61
55
|
- **Streamlake** • **Synthetic.new** • **Z.AI** • **Z.AI Coding**
|
|
62
56
|
|
|
63
|
-
### **Smart LLM Analysis**
|
|
57
|
+
### 🧠 **Smart LLM Analysis**
|
|
64
58
|
|
|
65
59
|
- **Understands intent**: Analyzes code structure, logic, and patterns to understand the "why" behind your changes, not just what changed
|
|
66
60
|
- **Semantic awareness**: Recognizes refactoring, bug fixes, features, and breaking changes to generate contextually appropriate messages
|
|
67
61
|
- **Intelligent filtering**: Prioritizes meaningful changes while ignoring generated files, dependencies, and artifacts
|
|
68
62
|
|
|
69
|
-
### **Multiple Message Formats**
|
|
63
|
+
### 📝 **Multiple Message Formats**
|
|
70
64
|
|
|
71
65
|
- **One-liner** (-o flag): Single-line commit message following conventional commit format
|
|
72
66
|
- **Standard** (default): Summary with bullet points explaining implementation details
|
|
73
67
|
- **Verbose** (-v flag): Comprehensive explanations including motivation, technical approach, and impact analysis
|
|
74
68
|
|
|
75
|
-
### **Developer Experience**
|
|
69
|
+
### 💻 **Developer Experience**
|
|
76
70
|
|
|
77
71
|
- **Interactive feedback**: Regenerate messages with specific requests like `r "make it shorter"` or `r "focus on the bug fix"`
|
|
78
72
|
- **One-command workflows**: Complete workflows with flags like `gac -ayp` (stage all, auto-confirm, push)
|
|
79
73
|
- **Git integration**: Respects pre-commit and lefthook hooks, running them before expensive LLM operations
|
|
80
74
|
|
|
81
|
-
### **Built-in Security**
|
|
75
|
+
### 🛡️ **Built-in Security**
|
|
82
76
|
|
|
83
77
|
- **Automatic secret detection**: Scans for API keys, passwords, and tokens before committing
|
|
84
78
|
- **Interactive protection**: Prompts before committing potentially sensitive data with clear remediation options
|
|
@@ -13,6 +13,7 @@ from gac.providers import (
|
|
|
13
13
|
call_anthropic_api,
|
|
14
14
|
call_cerebras_api,
|
|
15
15
|
call_chutes_api,
|
|
16
|
+
call_fireworks_api,
|
|
16
17
|
call_gemini_api,
|
|
17
18
|
call_groq_api,
|
|
18
19
|
call_lmstudio_api,
|
|
@@ -30,7 +31,7 @@ logger = logging.getLogger(__name__)
|
|
|
30
31
|
|
|
31
32
|
def generate_commit_message(
|
|
32
33
|
model: str,
|
|
33
|
-
prompt: str | tuple[str, str],
|
|
34
|
+
prompt: str | tuple[str, str] | list[dict[str, str]],
|
|
34
35
|
temperature: float = EnvDefaults.TEMPERATURE,
|
|
35
36
|
max_tokens: int = EnvDefaults.MAX_OUTPUT_TOKENS,
|
|
36
37
|
max_retries: int = EnvDefaults.MAX_RETRIES,
|
|
@@ -59,18 +60,28 @@ def generate_commit_message(
|
|
|
59
60
|
'docs: Update README with installation instructions'
|
|
60
61
|
"""
|
|
61
62
|
# Handle both old (string) and new (tuple) prompt formats
|
|
62
|
-
if isinstance(prompt,
|
|
63
|
+
if isinstance(prompt, list):
|
|
64
|
+
messages = [{**msg} for msg in prompt]
|
|
65
|
+
elif isinstance(prompt, tuple):
|
|
63
66
|
system_prompt, user_prompt = prompt
|
|
67
|
+
messages = [
|
|
68
|
+
{"role": "system", "content": system_prompt or ""},
|
|
69
|
+
{"role": "user", "content": user_prompt},
|
|
70
|
+
]
|
|
64
71
|
else:
|
|
65
|
-
# Backward compatibility: treat string as user prompt with
|
|
66
|
-
|
|
67
|
-
|
|
72
|
+
# Backward compatibility: treat string as user prompt with empty system prompt
|
|
73
|
+
user_prompt = str(prompt)
|
|
74
|
+
messages = [
|
|
75
|
+
{"role": "system", "content": ""},
|
|
76
|
+
{"role": "user", "content": user_prompt},
|
|
77
|
+
]
|
|
68
78
|
|
|
69
79
|
# Provider functions mapping
|
|
70
80
|
provider_funcs = {
|
|
71
81
|
"anthropic": call_anthropic_api,
|
|
72
82
|
"cerebras": call_cerebras_api,
|
|
73
83
|
"chutes": call_chutes_api,
|
|
84
|
+
"fireworks": call_fireworks_api,
|
|
74
85
|
"gemini": call_gemini_api,
|
|
75
86
|
"groq": call_groq_api,
|
|
76
87
|
"lm-studio": call_lmstudio_api,
|
|
@@ -88,8 +99,7 @@ def generate_commit_message(
|
|
|
88
99
|
return generate_with_retries(
|
|
89
100
|
provider_funcs=provider_funcs,
|
|
90
101
|
model=model,
|
|
91
|
-
|
|
92
|
-
user_prompt=user_prompt,
|
|
102
|
+
messages=messages,
|
|
93
103
|
temperature=temperature,
|
|
94
104
|
max_tokens=max_tokens,
|
|
95
105
|
max_retries=max_retries,
|
|
@@ -78,8 +78,7 @@ def _classify_error(error_str: str) -> str:
|
|
|
78
78
|
def generate_with_retries(
|
|
79
79
|
provider_funcs: dict,
|
|
80
80
|
model: str,
|
|
81
|
-
|
|
82
|
-
user_prompt: str,
|
|
81
|
+
messages: list[dict[str, str]],
|
|
83
82
|
temperature: float,
|
|
84
83
|
max_tokens: int,
|
|
85
84
|
max_retries: int,
|
|
@@ -97,6 +96,7 @@ def generate_with_retries(
|
|
|
97
96
|
"anthropic",
|
|
98
97
|
"cerebras",
|
|
99
98
|
"chutes",
|
|
99
|
+
"fireworks",
|
|
100
100
|
"gemini",
|
|
101
101
|
"groq",
|
|
102
102
|
"lm-studio",
|
|
@@ -111,10 +111,8 @@ def generate_with_retries(
|
|
|
111
111
|
if provider not in supported_providers:
|
|
112
112
|
raise AIError.model_error(f"Unsupported provider: {provider}. Supported providers: {supported_providers}")
|
|
113
113
|
|
|
114
|
-
|
|
115
|
-
|
|
116
|
-
{"role": "user", "content": user_prompt},
|
|
117
|
-
]
|
|
114
|
+
if not messages:
|
|
115
|
+
raise AIError.model_error("No messages provided for AI generation")
|
|
118
116
|
|
|
119
117
|
# Set up spinner
|
|
120
118
|
if quiet:
|
|
@@ -35,6 +35,7 @@ def init() -> None:
|
|
|
35
35
|
("Anthropic", "claude-haiku-4-5"),
|
|
36
36
|
("Cerebras", "qwen-3-coder-480b"),
|
|
37
37
|
("Chutes", "zai-org/GLM-4.6-FP8"),
|
|
38
|
+
("Fireworks", "accounts/fireworks/models/gpt-oss-20b"),
|
|
38
39
|
("Gemini", "gemini-2.5-flash"),
|
|
39
40
|
("Groq", "meta-llama/llama-4-maverick-17b-128e-instruct"),
|
|
40
41
|
("LM Studio", "gemma3"),
|
|
@@ -202,39 +202,49 @@ def main(
|
|
|
202
202
|
)
|
|
203
203
|
)
|
|
204
204
|
|
|
205
|
+
conversation_messages: list[dict[str, str]] = []
|
|
206
|
+
if system_prompt:
|
|
207
|
+
conversation_messages.append({"role": "system", "content": system_prompt})
|
|
208
|
+
conversation_messages.append({"role": "user", "content": user_prompt})
|
|
209
|
+
|
|
205
210
|
try:
|
|
206
|
-
|
|
207
|
-
|
|
208
|
-
|
|
209
|
-
|
|
210
|
-
|
|
211
|
-
|
|
212
|
-
|
|
213
|
-
|
|
214
|
-
|
|
215
|
-
|
|
211
|
+
first_iteration = True
|
|
212
|
+
|
|
213
|
+
while True:
|
|
214
|
+
prompt_tokens = count_tokens(conversation_messages, model)
|
|
215
|
+
|
|
216
|
+
if first_iteration:
|
|
217
|
+
warning_limit_val = config.get("warning_limit_tokens", EnvDefaults.WARNING_LIMIT_TOKENS)
|
|
218
|
+
assert warning_limit_val is not None
|
|
219
|
+
warning_limit = int(warning_limit_val)
|
|
220
|
+
if warning_limit and prompt_tokens > warning_limit:
|
|
221
|
+
console.print(
|
|
222
|
+
f"[yellow]⚠️ WARNING: Prompt contains {prompt_tokens} tokens, which exceeds the warning limit of "
|
|
223
|
+
f"{warning_limit} tokens.[/yellow]"
|
|
224
|
+
)
|
|
225
|
+
if require_confirmation:
|
|
226
|
+
proceed = click.confirm("Do you want to continue anyway?", default=True)
|
|
227
|
+
if not proceed:
|
|
228
|
+
console.print("[yellow]Aborted due to token limit.[/yellow]")
|
|
229
|
+
sys.exit(0)
|
|
230
|
+
|
|
231
|
+
first_iteration = False
|
|
232
|
+
|
|
233
|
+
raw_commit_message = generate_commit_message(
|
|
234
|
+
model=model,
|
|
235
|
+
prompt=conversation_messages,
|
|
236
|
+
temperature=temperature,
|
|
237
|
+
max_tokens=max_output_tokens,
|
|
238
|
+
max_retries=max_retries,
|
|
239
|
+
quiet=quiet,
|
|
216
240
|
)
|
|
217
|
-
|
|
218
|
-
proceed = click.confirm("Do you want to continue anyway?", default=True)
|
|
219
|
-
if not proceed:
|
|
220
|
-
console.print("[yellow]Aborted due to token limit.[/yellow]")
|
|
221
|
-
sys.exit(0)
|
|
241
|
+
commit_message = clean_commit_message(raw_commit_message)
|
|
222
242
|
|
|
223
|
-
|
|
224
|
-
|
|
225
|
-
prompt=(system_prompt, user_prompt),
|
|
226
|
-
temperature=temperature,
|
|
227
|
-
max_tokens=max_output_tokens,
|
|
228
|
-
max_retries=max_retries,
|
|
229
|
-
quiet=quiet,
|
|
230
|
-
)
|
|
231
|
-
commit_message = clean_commit_message(commit_message)
|
|
243
|
+
logger.info("Generated commit message:")
|
|
244
|
+
logger.info(commit_message)
|
|
232
245
|
|
|
233
|
-
|
|
234
|
-
logger.info(commit_message)
|
|
246
|
+
conversation_messages.append({"role": "assistant", "content": commit_message})
|
|
235
247
|
|
|
236
|
-
# Reroll loop
|
|
237
|
-
while True:
|
|
238
248
|
console.print("[bold green]Generated commit message:[/bold green]")
|
|
239
249
|
console.print(Panel(commit_message, title="Commit Message", border_style="cyan"))
|
|
240
250
|
|
|
@@ -252,76 +262,38 @@ def main(
|
|
|
252
262
|
response = click.prompt(
|
|
253
263
|
"Proceed with commit above? [y/n/r <feedback>]", type=str, show_default=False
|
|
254
264
|
).strip()
|
|
265
|
+
response_lower = response.lower()
|
|
255
266
|
|
|
256
|
-
if
|
|
257
|
-
break
|
|
258
|
-
|
|
267
|
+
if response_lower in ["y", "yes"]:
|
|
268
|
+
break
|
|
269
|
+
if response_lower in ["n", "no"]:
|
|
259
270
|
console.print("[yellow]Prompt not accepted. Exiting...[/yellow]")
|
|
260
271
|
sys.exit(0)
|
|
261
|
-
|
|
262
|
-
|
|
263
|
-
|
|
264
|
-
|
|
265
|
-
|
|
272
|
+
if response_lower == "r" or response_lower == "reroll" or response_lower.startswith("r "):
|
|
273
|
+
if response_lower == "r" or response_lower == "reroll":
|
|
274
|
+
feedback_message = (
|
|
275
|
+
"Please provide an alternative commit message using the same repository context."
|
|
276
|
+
)
|
|
266
277
|
console.print("[cyan]Regenerating commit message...[/cyan]")
|
|
267
278
|
else:
|
|
268
|
-
|
|
269
|
-
|
|
270
|
-
|
|
271
|
-
|
|
272
|
-
# Combine hints if reroll feedback provided
|
|
273
|
-
combined_hint = hint
|
|
274
|
-
if reroll_feedback:
|
|
275
|
-
# Create conversational prompt with previous attempt and feedback
|
|
276
|
-
conversational_hint = f"Previous attempt: '{commit_message}'. User feedback: {reroll_feedback}. Please revise accordingly."
|
|
277
|
-
|
|
278
|
-
if hint:
|
|
279
|
-
combined_hint = f"{hint}. {conversational_hint}"
|
|
280
|
-
else:
|
|
281
|
-
combined_hint = conversational_hint
|
|
282
|
-
|
|
283
|
-
# Regenerate prompt with conversational feedback
|
|
284
|
-
reroll_system_prompt, reroll_user_prompt = build_prompt(
|
|
285
|
-
status=status,
|
|
286
|
-
processed_diff=processed_diff,
|
|
287
|
-
diff_stat=diff_stat,
|
|
288
|
-
one_liner=one_liner,
|
|
289
|
-
hint=combined_hint,
|
|
290
|
-
infer_scope=infer_scope,
|
|
291
|
-
verbose=verbose,
|
|
279
|
+
reroll_feedback = response[2:].strip()
|
|
280
|
+
feedback_message = (
|
|
281
|
+
f"Please revise the commit message based on this feedback: {reroll_feedback}"
|
|
292
282
|
)
|
|
293
|
-
|
|
294
|
-
# No hint given, just reroll with same prompts
|
|
295
|
-
reroll_system_prompt, reroll_user_prompt = system_prompt, user_prompt
|
|
283
|
+
console.print(f"[cyan]Regenerating commit message with feedback: {reroll_feedback}[/cyan]")
|
|
296
284
|
|
|
297
|
-
|
|
298
|
-
system_prompt = reroll_system_prompt
|
|
299
|
-
user_prompt = reroll_user_prompt
|
|
300
|
-
prompt_tokens = count_tokens(system_prompt, model) + count_tokens(user_prompt, model)
|
|
285
|
+
conversation_messages.append({"role": "user", "content": feedback_message})
|
|
301
286
|
|
|
302
287
|
console.print() # Add blank line for readability
|
|
288
|
+
break
|
|
289
|
+
|
|
290
|
+
console.print(
|
|
291
|
+
"[red]Invalid response. Please enter y (yes), n (no), r (reroll), or r <feedback>.[/red]"
|
|
292
|
+
)
|
|
303
293
|
|
|
304
|
-
|
|
305
|
-
commit_message = generate_commit_message(
|
|
306
|
-
model=model,
|
|
307
|
-
prompt=(system_prompt, user_prompt),
|
|
308
|
-
temperature=temperature,
|
|
309
|
-
max_tokens=max_output_tokens,
|
|
310
|
-
max_retries=max_retries,
|
|
311
|
-
quiet=quiet,
|
|
312
|
-
)
|
|
313
|
-
commit_message = clean_commit_message(commit_message)
|
|
314
|
-
break # Exit inner loop, continue outer loop
|
|
315
|
-
else:
|
|
316
|
-
console.print(
|
|
317
|
-
"[red]Invalid response. Please enter y (yes), n (no), r (reroll), or r <feedback>.[/red]"
|
|
318
|
-
)
|
|
319
|
-
|
|
320
|
-
# If we got here with 'y', break the outer loop
|
|
321
|
-
if response.lower() in ["y", "yes"]:
|
|
294
|
+
if response_lower in ["y", "yes"]:
|
|
322
295
|
break
|
|
323
296
|
else:
|
|
324
|
-
# No confirmation required, exit loop
|
|
325
297
|
break
|
|
326
298
|
|
|
327
299
|
if dry_run:
|
|
@@ -3,6 +3,7 @@
|
|
|
3
3
|
from .anthropic import call_anthropic_api
|
|
4
4
|
from .cerebras import call_cerebras_api
|
|
5
5
|
from .chutes import call_chutes_api
|
|
6
|
+
from .fireworks import call_fireworks_api
|
|
6
7
|
from .gemini import call_gemini_api
|
|
7
8
|
from .groq import call_groq_api
|
|
8
9
|
from .lmstudio import call_lmstudio_api
|
|
@@ -17,6 +18,7 @@ __all__ = [
|
|
|
17
18
|
"call_anthropic_api",
|
|
18
19
|
"call_cerebras_api",
|
|
19
20
|
"call_chutes_api",
|
|
21
|
+
"call_fireworks_api",
|
|
20
22
|
"call_gemini_api",
|
|
21
23
|
"call_groq_api",
|
|
22
24
|
"call_lmstudio_api",
|
|
@@ -0,0 +1,38 @@
|
|
|
1
|
+
"""Fireworks AI API provider for gac."""
|
|
2
|
+
|
|
3
|
+
import os
|
|
4
|
+
|
|
5
|
+
import httpx
|
|
6
|
+
|
|
7
|
+
from gac.errors import AIError
|
|
8
|
+
|
|
9
|
+
|
|
10
|
+
def call_fireworks_api(model: str, messages: list[dict], temperature: float, max_tokens: int) -> str:
|
|
11
|
+
"""Call Fireworks AI API directly."""
|
|
12
|
+
api_key = os.getenv("FIREWORKS_API_KEY")
|
|
13
|
+
if not api_key:
|
|
14
|
+
raise AIError.authentication_error("FIREWORKS_API_KEY not found in environment variables")
|
|
15
|
+
|
|
16
|
+
url = "https://api.fireworks.ai/inference/v1/chat/completions"
|
|
17
|
+
headers = {"Authorization": f"Bearer {api_key}", "Content-Type": "application/json"}
|
|
18
|
+
|
|
19
|
+
data = {"model": model, "messages": messages, "temperature": temperature, "max_tokens": max_tokens}
|
|
20
|
+
|
|
21
|
+
try:
|
|
22
|
+
response = httpx.post(url, headers=headers, json=data, timeout=120)
|
|
23
|
+
response.raise_for_status()
|
|
24
|
+
response_data = response.json()
|
|
25
|
+
content = response_data["choices"][0]["message"]["content"]
|
|
26
|
+
if content is None:
|
|
27
|
+
raise AIError.model_error("Fireworks AI API returned null content")
|
|
28
|
+
if content == "":
|
|
29
|
+
raise AIError.model_error("Fireworks AI API returned empty content")
|
|
30
|
+
return content
|
|
31
|
+
except httpx.HTTPStatusError as e:
|
|
32
|
+
if e.response.status_code == 429:
|
|
33
|
+
raise AIError.rate_limit_error(f"Fireworks AI API rate limit exceeded: {e.response.text}") from e
|
|
34
|
+
raise AIError.model_error(f"Fireworks AI API error: {e.response.status_code} - {e.response.text}") from e
|
|
35
|
+
except httpx.TimeoutException as e:
|
|
36
|
+
raise AIError.timeout_error(f"Fireworks AI API request timed out: {str(e)}") from e
|
|
37
|
+
except Exception as e:
|
|
38
|
+
raise AIError.model_error(f"Error calling Fireworks AI API: {str(e)}") from e
|
|
File without changes
|
{gac-1.9.5 → gac-1.10.0}/LICENSE
RENAMED
|
File without changes
|
|
File without changes
|
|
File without changes
|
|
File without changes
|
|
File without changes
|
|
File without changes
|
|
File without changes
|
|
File without changes
|
|
File without changes
|
|
File without changes
|
|
File without changes
|
|
File without changes
|
|
File without changes
|
|
File without changes
|
|
File without changes
|
|
File without changes
|
|
File without changes
|
|
File without changes
|
|
File without changes
|
|
File without changes
|
|
File without changes
|
|
File without changes
|
|
File without changes
|
|
File without changes
|
|
File without changes
|
|
File without changes
|