gac 1.9.5__tar.gz → 1.10.1__tar.gz

This diff represents the content of publicly available package versions that have been released to one of the supported registries. The information contained in this diff is provided for informational purposes only and reflects changes between package versions as they appear in their respective public registries.

Potentially problematic release.


This version of gac might be problematic. Click here for more details.

Files changed (36) hide show
  1. {gac-1.9.5 → gac-1.10.1}/PKG-INFO +15 -22
  2. {gac-1.9.5 → gac-1.10.1}/README.md +14 -20
  3. {gac-1.9.5 → gac-1.10.1}/pyproject.toml +0 -3
  4. {gac-1.9.5 → gac-1.10.1}/src/gac/__version__.py +1 -1
  5. {gac-1.9.5 → gac-1.10.1}/src/gac/ai.py +17 -7
  6. {gac-1.9.5 → gac-1.10.1}/src/gac/ai_utils.py +4 -6
  7. {gac-1.9.5 → gac-1.10.1}/src/gac/init_cli.py +1 -0
  8. {gac-1.9.5 → gac-1.10.1}/src/gac/main.py +58 -86
  9. {gac-1.9.5 → gac-1.10.1}/src/gac/providers/__init__.py +2 -0
  10. gac-1.10.1/src/gac/providers/fireworks.py +38 -0
  11. {gac-1.9.5 → gac-1.10.1}/.gitignore +0 -0
  12. {gac-1.9.5 → gac-1.10.1}/LICENSE +0 -0
  13. {gac-1.9.5 → gac-1.10.1}/src/gac/__init__.py +0 -0
  14. {gac-1.9.5 → gac-1.10.1}/src/gac/cli.py +0 -0
  15. {gac-1.9.5 → gac-1.10.1}/src/gac/config.py +0 -0
  16. {gac-1.9.5 → gac-1.10.1}/src/gac/config_cli.py +0 -0
  17. {gac-1.9.5 → gac-1.10.1}/src/gac/constants.py +0 -0
  18. {gac-1.9.5 → gac-1.10.1}/src/gac/diff_cli.py +0 -0
  19. {gac-1.9.5 → gac-1.10.1}/src/gac/errors.py +0 -0
  20. {gac-1.9.5 → gac-1.10.1}/src/gac/git.py +0 -0
  21. {gac-1.9.5 → gac-1.10.1}/src/gac/preprocess.py +0 -0
  22. {gac-1.9.5 → gac-1.10.1}/src/gac/prompt.py +0 -0
  23. {gac-1.9.5 → gac-1.10.1}/src/gac/providers/anthropic.py +0 -0
  24. {gac-1.9.5 → gac-1.10.1}/src/gac/providers/cerebras.py +0 -0
  25. {gac-1.9.5 → gac-1.10.1}/src/gac/providers/chutes.py +0 -0
  26. {gac-1.9.5 → gac-1.10.1}/src/gac/providers/gemini.py +0 -0
  27. {gac-1.9.5 → gac-1.10.1}/src/gac/providers/groq.py +0 -0
  28. {gac-1.9.5 → gac-1.10.1}/src/gac/providers/lmstudio.py +0 -0
  29. {gac-1.9.5 → gac-1.10.1}/src/gac/providers/ollama.py +0 -0
  30. {gac-1.9.5 → gac-1.10.1}/src/gac/providers/openai.py +0 -0
  31. {gac-1.9.5 → gac-1.10.1}/src/gac/providers/openrouter.py +0 -0
  32. {gac-1.9.5 → gac-1.10.1}/src/gac/providers/streamlake.py +0 -0
  33. {gac-1.9.5 → gac-1.10.1}/src/gac/providers/synthetic.py +0 -0
  34. {gac-1.9.5 → gac-1.10.1}/src/gac/providers/zai.py +0 -0
  35. {gac-1.9.5 → gac-1.10.1}/src/gac/security.py +0 -0
  36. {gac-1.9.5 → gac-1.10.1}/src/gac/utils.py +0 -0
@@ -1,6 +1,6 @@
1
1
  Metadata-Version: 2.4
2
2
  Name: gac
3
- Version: 1.9.5
3
+ Version: 1.10.1
4
4
  Summary: LLM-powered Git commit message generator with multi-provider support
5
5
  Project-URL: Homepage, https://github.com/cellwebb/gac
6
6
  Project-URL: Documentation, https://github.com/cellwebb/gac#readme
@@ -33,7 +33,6 @@ Requires-Dist: tiktoken>=0.12.0
33
33
  Provides-Extra: dev
34
34
  Requires-Dist: build; extra == 'dev'
35
35
  Requires-Dist: codecov; extra == 'dev'
36
- Requires-Dist: pre-commit; extra == 'dev'
37
36
  Requires-Dist: pytest; extra == 'dev'
38
37
  Requires-Dist: pytest-cov; extra == 'dev'
39
38
  Requires-Dist: ruff; extra == 'dev'
@@ -69,58 +68,52 @@ Instead of generic messages like `"update stuff"`, `"fix bug"`, or `"add feature
69
68
 
70
69
  ## Quick Start
71
70
 
72
- ### Install & Use
71
+ ### Use without installing
73
72
 
74
73
  ```bash
75
- # 1. Install globally
76
- uv tool install gac
77
-
78
- # 2. Configure your LLM provider
79
- gac init
80
-
81
- # 3. Use on your staged changes
82
- git add .
83
- gac
74
+ uvx gac init # Configure your LLM provider
75
+ uvx gac # Generate and commit with LLM
84
76
  ```
85
77
 
86
78
  That's it! Review the generated message and confirm with `y`.
87
79
 
88
- ### Use without installing
80
+ ### Install gac globally
89
81
 
90
82
  ```bash
91
- uvx gac init
92
- uvx gac
83
+ uv tool install gac
84
+ gac init
85
+ gac
93
86
  ```
94
87
 
95
88
  ---
96
89
 
97
90
  ## Key Features
98
91
 
99
- ### **Supported Providers**
92
+ ### 🌐 **Supported Providers**
100
93
 
101
- - **Anthropic** • **Cerebras** • **Chutes.ai** • **Gemini** • **Groq**
102
- - **LM Studio** • **Ollama** • **OpenAI** • **OpenRouter**
94
+ - **Anthropic** • **Cerebras** • **Chutes.ai** • **Fireworks** • **Gemini**
95
+ - **Groq** • **LM Studio** • **Ollama** • **OpenAI** • **OpenRouter**
103
96
  - **Streamlake** • **Synthetic.new** • **Z.AI** • **Z.AI Coding**
104
97
 
105
- ### **Smart LLM Analysis**
98
+ ### 🧠 **Smart LLM Analysis**
106
99
 
107
100
  - **Understands intent**: Analyzes code structure, logic, and patterns to understand the "why" behind your changes, not just what changed
108
101
  - **Semantic awareness**: Recognizes refactoring, bug fixes, features, and breaking changes to generate contextually appropriate messages
109
102
  - **Intelligent filtering**: Prioritizes meaningful changes while ignoring generated files, dependencies, and artifacts
110
103
 
111
- ### **Multiple Message Formats**
104
+ ### 📝 **Multiple Message Formats**
112
105
 
113
106
  - **One-liner** (-o flag): Single-line commit message following conventional commit format
114
107
  - **Standard** (default): Summary with bullet points explaining implementation details
115
108
  - **Verbose** (-v flag): Comprehensive explanations including motivation, technical approach, and impact analysis
116
109
 
117
- ### **Developer Experience**
110
+ ### 💻 **Developer Experience**
118
111
 
119
112
  - **Interactive feedback**: Regenerate messages with specific requests like `r "make it shorter"` or `r "focus on the bug fix"`
120
113
  - **One-command workflows**: Complete workflows with flags like `gac -ayp` (stage all, auto-confirm, push)
121
114
  - **Git integration**: Respects pre-commit and lefthook hooks, running them before expensive LLM operations
122
115
 
123
- ### **Built-in Security**
116
+ ### 🛡️ **Built-in Security**
124
117
 
125
118
  - **Automatic secret detection**: Scans for API keys, passwords, and tokens before committing
126
119
  - **Interactive protection**: Prompts before committing potentially sensitive data with clear remediation options
@@ -27,58 +27,52 @@ Instead of generic messages like `"update stuff"`, `"fix bug"`, or `"add feature
27
27
 
28
28
  ## Quick Start
29
29
 
30
- ### Install & Use
30
+ ### Use without installing
31
31
 
32
32
  ```bash
33
- # 1. Install globally
34
- uv tool install gac
35
-
36
- # 2. Configure your LLM provider
37
- gac init
38
-
39
- # 3. Use on your staged changes
40
- git add .
41
- gac
33
+ uvx gac init # Configure your LLM provider
34
+ uvx gac # Generate and commit with LLM
42
35
  ```
43
36
 
44
37
  That's it! Review the generated message and confirm with `y`.
45
38
 
46
- ### Use without installing
39
+ ### Install gac globally
47
40
 
48
41
  ```bash
49
- uvx gac init
50
- uvx gac
42
+ uv tool install gac
43
+ gac init
44
+ gac
51
45
  ```
52
46
 
53
47
  ---
54
48
 
55
49
  ## Key Features
56
50
 
57
- ### **Supported Providers**
51
+ ### 🌐 **Supported Providers**
58
52
 
59
- - **Anthropic** • **Cerebras** • **Chutes.ai** • **Gemini** • **Groq**
60
- - **LM Studio** • **Ollama** • **OpenAI** • **OpenRouter**
53
+ - **Anthropic** • **Cerebras** • **Chutes.ai** • **Fireworks** • **Gemini**
54
+ - **Groq** • **LM Studio** • **Ollama** • **OpenAI** • **OpenRouter**
61
55
  - **Streamlake** • **Synthetic.new** • **Z.AI** • **Z.AI Coding**
62
56
 
63
- ### **Smart LLM Analysis**
57
+ ### 🧠 **Smart LLM Analysis**
64
58
 
65
59
  - **Understands intent**: Analyzes code structure, logic, and patterns to understand the "why" behind your changes, not just what changed
66
60
  - **Semantic awareness**: Recognizes refactoring, bug fixes, features, and breaking changes to generate contextually appropriate messages
67
61
  - **Intelligent filtering**: Prioritizes meaningful changes while ignoring generated files, dependencies, and artifacts
68
62
 
69
- ### **Multiple Message Formats**
63
+ ### 📝 **Multiple Message Formats**
70
64
 
71
65
  - **One-liner** (-o flag): Single-line commit message following conventional commit format
72
66
  - **Standard** (default): Summary with bullet points explaining implementation details
73
67
  - **Verbose** (-v flag): Comprehensive explanations including motivation, technical approach, and impact analysis
74
68
 
75
- ### **Developer Experience**
69
+ ### 💻 **Developer Experience**
76
70
 
77
71
  - **Interactive feedback**: Regenerate messages with specific requests like `r "make it shorter"` or `r "focus on the bug fix"`
78
72
  - **One-command workflows**: Complete workflows with flags like `gac -ayp` (stage all, auto-confirm, push)
79
73
  - **Git integration**: Respects pre-commit and lefthook hooks, running them before expensive LLM operations
80
74
 
81
- ### **Built-in Security**
75
+ ### 🛡️ **Built-in Security**
82
76
 
83
77
  - **Automatic secret detection**: Scans for API keys, passwords, and tokens before committing
84
78
  - **Interactive protection**: Prompts before committing potentially sensitive data with clear remediation options
@@ -61,7 +61,6 @@ dev = [
61
61
 
62
62
  # Linting and formatting
63
63
  "ruff",
64
- "pre-commit",
65
64
 
66
65
  # Release tools
67
66
  "build",
@@ -150,7 +149,6 @@ dependencies = [
150
149
 
151
150
  # Linting and formatting
152
151
  "ruff",
153
- "pre-commit",
154
152
  ]
155
153
 
156
154
  [tool.hatch.envs.default.scripts]
@@ -232,6 +230,5 @@ dev = [
232
230
  "pytest>=8.4.2",
233
231
  "pytest-asyncio>=1.2.0",
234
232
  "pytest-cov>=7.0.0",
235
- "pre-commit",
236
233
  "mypy>=1.8.0",
237
234
  ]
@@ -1,3 +1,3 @@
1
1
  """Version information for gac package."""
2
2
 
3
- __version__ = "1.9.5"
3
+ __version__ = "1.10.1"
@@ -13,6 +13,7 @@ from gac.providers import (
13
13
  call_anthropic_api,
14
14
  call_cerebras_api,
15
15
  call_chutes_api,
16
+ call_fireworks_api,
16
17
  call_gemini_api,
17
18
  call_groq_api,
18
19
  call_lmstudio_api,
@@ -30,7 +31,7 @@ logger = logging.getLogger(__name__)
30
31
 
31
32
  def generate_commit_message(
32
33
  model: str,
33
- prompt: str | tuple[str, str],
34
+ prompt: str | tuple[str, str] | list[dict[str, str]],
34
35
  temperature: float = EnvDefaults.TEMPERATURE,
35
36
  max_tokens: int = EnvDefaults.MAX_OUTPUT_TOKENS,
36
37
  max_retries: int = EnvDefaults.MAX_RETRIES,
@@ -59,18 +60,28 @@ def generate_commit_message(
59
60
  'docs: Update README with installation instructions'
60
61
  """
61
62
  # Handle both old (string) and new (tuple) prompt formats
62
- if isinstance(prompt, tuple):
63
+ if isinstance(prompt, list):
64
+ messages = [{**msg} for msg in prompt]
65
+ elif isinstance(prompt, tuple):
63
66
  system_prompt, user_prompt = prompt
67
+ messages = [
68
+ {"role": "system", "content": system_prompt or ""},
69
+ {"role": "user", "content": user_prompt},
70
+ ]
64
71
  else:
65
- # Backward compatibility: treat string as user prompt with no system prompt
66
- system_prompt = ""
67
- user_prompt = prompt
72
+ # Backward compatibility: treat string as user prompt with empty system prompt
73
+ user_prompt = str(prompt)
74
+ messages = [
75
+ {"role": "system", "content": ""},
76
+ {"role": "user", "content": user_prompt},
77
+ ]
68
78
 
69
79
  # Provider functions mapping
70
80
  provider_funcs = {
71
81
  "anthropic": call_anthropic_api,
72
82
  "cerebras": call_cerebras_api,
73
83
  "chutes": call_chutes_api,
84
+ "fireworks": call_fireworks_api,
74
85
  "gemini": call_gemini_api,
75
86
  "groq": call_groq_api,
76
87
  "lm-studio": call_lmstudio_api,
@@ -88,8 +99,7 @@ def generate_commit_message(
88
99
  return generate_with_retries(
89
100
  provider_funcs=provider_funcs,
90
101
  model=model,
91
- system_prompt=system_prompt,
92
- user_prompt=user_prompt,
102
+ messages=messages,
93
103
  temperature=temperature,
94
104
  max_tokens=max_tokens,
95
105
  max_retries=max_retries,
@@ -78,8 +78,7 @@ def _classify_error(error_str: str) -> str:
78
78
  def generate_with_retries(
79
79
  provider_funcs: dict,
80
80
  model: str,
81
- system_prompt: str,
82
- user_prompt: str,
81
+ messages: list[dict[str, str]],
83
82
  temperature: float,
84
83
  max_tokens: int,
85
84
  max_retries: int,
@@ -97,6 +96,7 @@ def generate_with_retries(
97
96
  "anthropic",
98
97
  "cerebras",
99
98
  "chutes",
99
+ "fireworks",
100
100
  "gemini",
101
101
  "groq",
102
102
  "lm-studio",
@@ -111,10 +111,8 @@ def generate_with_retries(
111
111
  if provider not in supported_providers:
112
112
  raise AIError.model_error(f"Unsupported provider: {provider}. Supported providers: {supported_providers}")
113
113
 
114
- messages = [
115
- {"role": "system", "content": system_prompt},
116
- {"role": "user", "content": user_prompt},
117
- ]
114
+ if not messages:
115
+ raise AIError.model_error("No messages provided for AI generation")
118
116
 
119
117
  # Set up spinner
120
118
  if quiet:
@@ -35,6 +35,7 @@ def init() -> None:
35
35
  ("Anthropic", "claude-haiku-4-5"),
36
36
  ("Cerebras", "qwen-3-coder-480b"),
37
37
  ("Chutes", "zai-org/GLM-4.6-FP8"),
38
+ ("Fireworks", "accounts/fireworks/models/gpt-oss-20b"),
38
39
  ("Gemini", "gemini-2.5-flash"),
39
40
  ("Groq", "meta-llama/llama-4-maverick-17b-128e-instruct"),
40
41
  ("LM Studio", "gemma3"),
@@ -202,39 +202,49 @@ def main(
202
202
  )
203
203
  )
204
204
 
205
+ conversation_messages: list[dict[str, str]] = []
206
+ if system_prompt:
207
+ conversation_messages.append({"role": "system", "content": system_prompt})
208
+ conversation_messages.append({"role": "user", "content": user_prompt})
209
+
205
210
  try:
206
- # Count tokens for both prompts
207
- prompt_tokens = count_tokens(system_prompt, model) + count_tokens(user_prompt, model)
208
-
209
- warning_limit_val = config.get("warning_limit_tokens", EnvDefaults.WARNING_LIMIT_TOKENS)
210
- assert warning_limit_val is not None
211
- warning_limit = int(warning_limit_val)
212
- if warning_limit and prompt_tokens > warning_limit:
213
- console.print(
214
- f"[yellow]⚠️ WARNING: Prompt contains {prompt_tokens} tokens, which exceeds the warning limit of "
215
- f"{warning_limit} tokens.[/yellow]"
211
+ first_iteration = True
212
+
213
+ while True:
214
+ prompt_tokens = count_tokens(conversation_messages, model)
215
+
216
+ if first_iteration:
217
+ warning_limit_val = config.get("warning_limit_tokens", EnvDefaults.WARNING_LIMIT_TOKENS)
218
+ assert warning_limit_val is not None
219
+ warning_limit = int(warning_limit_val)
220
+ if warning_limit and prompt_tokens > warning_limit:
221
+ console.print(
222
+ f"[yellow]⚠️ WARNING: Prompt contains {prompt_tokens} tokens, which exceeds the warning limit of "
223
+ f"{warning_limit} tokens.[/yellow]"
224
+ )
225
+ if require_confirmation:
226
+ proceed = click.confirm("Do you want to continue anyway?", default=True)
227
+ if not proceed:
228
+ console.print("[yellow]Aborted due to token limit.[/yellow]")
229
+ sys.exit(0)
230
+
231
+ first_iteration = False
232
+
233
+ raw_commit_message = generate_commit_message(
234
+ model=model,
235
+ prompt=conversation_messages,
236
+ temperature=temperature,
237
+ max_tokens=max_output_tokens,
238
+ max_retries=max_retries,
239
+ quiet=quiet,
216
240
  )
217
- if require_confirmation:
218
- proceed = click.confirm("Do you want to continue anyway?", default=True)
219
- if not proceed:
220
- console.print("[yellow]Aborted due to token limit.[/yellow]")
221
- sys.exit(0)
241
+ commit_message = clean_commit_message(raw_commit_message)
222
242
 
223
- commit_message = generate_commit_message(
224
- model=model,
225
- prompt=(system_prompt, user_prompt),
226
- temperature=temperature,
227
- max_tokens=max_output_tokens,
228
- max_retries=max_retries,
229
- quiet=quiet,
230
- )
231
- commit_message = clean_commit_message(commit_message)
243
+ logger.info("Generated commit message:")
244
+ logger.info(commit_message)
232
245
 
233
- logger.info("Generated commit message:")
234
- logger.info(commit_message)
246
+ conversation_messages.append({"role": "assistant", "content": commit_message})
235
247
 
236
- # Reroll loop
237
- while True:
238
248
  console.print("[bold green]Generated commit message:[/bold green]")
239
249
  console.print(Panel(commit_message, title="Commit Message", border_style="cyan"))
240
250
 
@@ -252,76 +262,38 @@ def main(
252
262
  response = click.prompt(
253
263
  "Proceed with commit above? [y/n/r <feedback>]", type=str, show_default=False
254
264
  ).strip()
265
+ response_lower = response.lower()
255
266
 
256
- if response.lower() in ["y", "yes"]:
257
- break # Exit both loops and proceed with commit
258
- elif response.lower() in ["n", "no"]:
267
+ if response_lower in ["y", "yes"]:
268
+ break
269
+ if response_lower in ["n", "no"]:
259
270
  console.print("[yellow]Prompt not accepted. Exiting...[/yellow]")
260
271
  sys.exit(0)
261
- elif response.lower() == "r" or response.lower().startswith("r ") or response.lower() == "reroll":
262
- # Parse the reroll command for optional feedback
263
- if response.lower() == "r" or response.lower() == "reroll":
264
- # Simple reroll without feedback
265
- reroll_feedback = ""
272
+ if response_lower == "r" or response_lower == "reroll" or response_lower.startswith("r "):
273
+ if response_lower == "r" or response_lower == "reroll":
274
+ feedback_message = (
275
+ "Please provide an alternative commit message using the same repository context."
276
+ )
266
277
  console.print("[cyan]Regenerating commit message...[/cyan]")
267
278
  else:
268
- # Extract feedback from "r <feedback>"
269
- reroll_feedback = response[2:].strip() # Remove "r " prefix
270
- console.print(f"[cyan]Regenerating commit message with feedback: {reroll_feedback}[/cyan]")
271
-
272
- # Combine hints if reroll feedback provided
273
- combined_hint = hint
274
- if reroll_feedback:
275
- # Create conversational prompt with previous attempt and feedback
276
- conversational_hint = f"Previous attempt: '{commit_message}'. User feedback: {reroll_feedback}. Please revise accordingly."
277
-
278
- if hint:
279
- combined_hint = f"{hint}. {conversational_hint}"
280
- else:
281
- combined_hint = conversational_hint
282
-
283
- # Regenerate prompt with conversational feedback
284
- reroll_system_prompt, reroll_user_prompt = build_prompt(
285
- status=status,
286
- processed_diff=processed_diff,
287
- diff_stat=diff_stat,
288
- one_liner=one_liner,
289
- hint=combined_hint,
290
- infer_scope=infer_scope,
291
- verbose=verbose,
279
+ reroll_feedback = response[2:].strip()
280
+ feedback_message = (
281
+ f"Please revise the commit message based on this feedback: {reroll_feedback}"
292
282
  )
293
- else:
294
- # No hint given, just reroll with same prompts
295
- reroll_system_prompt, reroll_user_prompt = system_prompt, user_prompt
283
+ console.print(f"[cyan]Regenerating commit message with feedback: {reroll_feedback}[/cyan]")
296
284
 
297
- # Update prompts and recalculate token count for the next generation
298
- system_prompt = reroll_system_prompt
299
- user_prompt = reroll_user_prompt
300
- prompt_tokens = count_tokens(system_prompt, model) + count_tokens(user_prompt, model)
285
+ conversation_messages.append({"role": "user", "content": feedback_message})
301
286
 
302
287
  console.print() # Add blank line for readability
288
+ break
289
+
290
+ console.print(
291
+ "[red]Invalid response. Please enter y (yes), n (no), r (reroll), or r <feedback>.[/red]"
292
+ )
303
293
 
304
- # Generate new message
305
- commit_message = generate_commit_message(
306
- model=model,
307
- prompt=(system_prompt, user_prompt),
308
- temperature=temperature,
309
- max_tokens=max_output_tokens,
310
- max_retries=max_retries,
311
- quiet=quiet,
312
- )
313
- commit_message = clean_commit_message(commit_message)
314
- break # Exit inner loop, continue outer loop
315
- else:
316
- console.print(
317
- "[red]Invalid response. Please enter y (yes), n (no), r (reroll), or r <feedback>.[/red]"
318
- )
319
-
320
- # If we got here with 'y', break the outer loop
321
- if response.lower() in ["y", "yes"]:
294
+ if response_lower in ["y", "yes"]:
322
295
  break
323
296
  else:
324
- # No confirmation required, exit loop
325
297
  break
326
298
 
327
299
  if dry_run:
@@ -3,6 +3,7 @@
3
3
  from .anthropic import call_anthropic_api
4
4
  from .cerebras import call_cerebras_api
5
5
  from .chutes import call_chutes_api
6
+ from .fireworks import call_fireworks_api
6
7
  from .gemini import call_gemini_api
7
8
  from .groq import call_groq_api
8
9
  from .lmstudio import call_lmstudio_api
@@ -17,6 +18,7 @@ __all__ = [
17
18
  "call_anthropic_api",
18
19
  "call_cerebras_api",
19
20
  "call_chutes_api",
21
+ "call_fireworks_api",
20
22
  "call_gemini_api",
21
23
  "call_groq_api",
22
24
  "call_lmstudio_api",
@@ -0,0 +1,38 @@
1
+ """Fireworks AI API provider for gac."""
2
+
3
+ import os
4
+
5
+ import httpx
6
+
7
+ from gac.errors import AIError
8
+
9
+
10
+ def call_fireworks_api(model: str, messages: list[dict], temperature: float, max_tokens: int) -> str:
11
+ """Call Fireworks AI API directly."""
12
+ api_key = os.getenv("FIREWORKS_API_KEY")
13
+ if not api_key:
14
+ raise AIError.authentication_error("FIREWORKS_API_KEY not found in environment variables")
15
+
16
+ url = "https://api.fireworks.ai/inference/v1/chat/completions"
17
+ headers = {"Authorization": f"Bearer {api_key}", "Content-Type": "application/json"}
18
+
19
+ data = {"model": model, "messages": messages, "temperature": temperature, "max_tokens": max_tokens}
20
+
21
+ try:
22
+ response = httpx.post(url, headers=headers, json=data, timeout=120)
23
+ response.raise_for_status()
24
+ response_data = response.json()
25
+ content = response_data["choices"][0]["message"]["content"]
26
+ if content is None:
27
+ raise AIError.model_error("Fireworks AI API returned null content")
28
+ if content == "":
29
+ raise AIError.model_error("Fireworks AI API returned empty content")
30
+ return content
31
+ except httpx.HTTPStatusError as e:
32
+ if e.response.status_code == 429:
33
+ raise AIError.rate_limit_error(f"Fireworks AI API rate limit exceeded: {e.response.text}") from e
34
+ raise AIError.model_error(f"Fireworks AI API error: {e.response.status_code} - {e.response.text}") from e
35
+ except httpx.TimeoutException as e:
36
+ raise AIError.timeout_error(f"Fireworks AI API request timed out: {str(e)}") from e
37
+ except Exception as e:
38
+ raise AIError.model_error(f"Error calling Fireworks AI API: {str(e)}") from e
File without changes
File without changes
File without changes
File without changes
File without changes
File without changes
File without changes
File without changes
File without changes
File without changes
File without changes
File without changes
File without changes
File without changes
File without changes
File without changes
File without changes
File without changes
File without changes
File without changes
File without changes
File without changes
File without changes
File without changes