gac 1.0.1__tar.gz → 1.1.0__tar.gz

This diff represents the content of publicly available package versions that have been released to one of the supported registries. The information contained in this diff is provided for informational purposes only and reflects changes between package versions as they appear in their respective public registries.

Potentially problematic release.


This version of gac might be problematic. Click here for more details.

@@ -1,6 +1,6 @@
1
1
  Metadata-Version: 2.4
2
2
  Name: gac
3
- Version: 1.0.1
3
+ Version: 1.1.0
4
4
  Summary: AI-powered Git commit message generator with multi-provider support
5
5
  Project-URL: Homepage, https://github.com/cellwebb/gac
6
6
  Project-URL: Documentation, https://github.com/cellwebb/gac#readme
@@ -56,7 +56,7 @@ Description-Content-Type: text/markdown
56
56
 
57
57
  - **LLM-Powered Commit Messages:** Automatically generates clear, concise, and context-aware commit messages using large language models.
58
58
  - **Deep Contextual Analysis:** Understands your code by analyzing staged changes, repository structure, and recent commit history to provide highly relevant suggestions.
59
- - **Multi-Provider & Model Support:** Flexibly works with various leading AI providers (like Anthropic, Cerebras, Groq, OpenAI) and models, easily configured through an interactive setup or environment variables.
59
+ - **Multi-Provider & Model Support:** Flexibly works with various leading AI providers (like Anthropic, Cerebras, Groq, OpenRouter, OpenAI) and models, easily configured through an interactive setup or environment variables.
60
60
  - **Seamless Git Workflow:** Integrates smoothly into your existing Git routine as a simple drop-in replacement for `git commit`.
61
61
  - **Extensive Customization:** Tailor commit messages to your needs with a rich set of flags, including one-liners (`-o`), AI hints (`-h`), scope inference (`-s`), and specific model selection (`-m`).
62
62
  - **Streamlined Workflow Commands:** Boost your productivity with convenient options to stage all changes (`-a`), auto-confirm commits (`-y`), and push to your remote repository (`-p`) in a single step.
@@ -136,6 +136,11 @@ Example `$HOME/.gac.env` output:
136
136
  ```env
137
137
  GAC_MODEL=anthropic:claude-3-5-haiku-latest
138
138
  ANTHROPIC_API_KEY=your_anthropic_key_here
139
+ # Optional: configure OpenRouter
140
+ # GAC_MODEL=openrouter:openrouter/auto
141
+ # OPENROUTER_API_KEY=your_openrouter_key_here
142
+ # OPENROUTER_SITE_URL=https://example.com
143
+ # OPENROUTER_SITE_NAME=Example App
139
144
  ```
140
145
 
141
146
  Alternatively, you can configure `gac` using environment variables or by manually creating/editing the configuration file.
@@ -14,7 +14,7 @@
14
14
 
15
15
  - **LLM-Powered Commit Messages:** Automatically generates clear, concise, and context-aware commit messages using large language models.
16
16
  - **Deep Contextual Analysis:** Understands your code by analyzing staged changes, repository structure, and recent commit history to provide highly relevant suggestions.
17
- - **Multi-Provider & Model Support:** Flexibly works with various leading AI providers (like Anthropic, Cerebras, Groq, OpenAI) and models, easily configured through an interactive setup or environment variables.
17
+ - **Multi-Provider & Model Support:** Flexibly works with various leading AI providers (like Anthropic, Cerebras, Groq, OpenRouter, OpenAI) and models, easily configured through an interactive setup or environment variables.
18
18
  - **Seamless Git Workflow:** Integrates smoothly into your existing Git routine as a simple drop-in replacement for `git commit`.
19
19
  - **Extensive Customization:** Tailor commit messages to your needs with a rich set of flags, including one-liners (`-o`), AI hints (`-h`), scope inference (`-s`), and specific model selection (`-m`).
20
20
  - **Streamlined Workflow Commands:** Boost your productivity with convenient options to stage all changes (`-a`), auto-confirm commits (`-y`), and push to your remote repository (`-p`) in a single step.
@@ -94,6 +94,11 @@ Example `$HOME/.gac.env` output:
94
94
  ```env
95
95
  GAC_MODEL=anthropic:claude-3-5-haiku-latest
96
96
  ANTHROPIC_API_KEY=your_anthropic_key_here
97
+ # Optional: configure OpenRouter
98
+ # GAC_MODEL=openrouter:openrouter/auto
99
+ # OPENROUTER_API_KEY=your_openrouter_key_here
100
+ # OPENROUTER_SITE_URL=https://example.com
101
+ # OPENROUTER_SITE_NAME=Example App
97
102
  ```
98
103
 
99
104
  Alternatively, you can configure `gac` using environment variables or by manually creating/editing the configuration file.
@@ -2,15 +2,14 @@
2
2
 
3
3
  from gac.__version__ import __version__
4
4
  from gac.ai import generate_commit_message
5
- from gac.ai_providers import (
6
- anthropic_generate,
7
- cerebras_generate,
8
- groq_generate,
9
- ollama_generate,
10
- openai_generate,
11
- )
12
5
  from gac.git import get_staged_files, push_changes
13
6
  from gac.prompt import build_prompt, clean_commit_message
7
+ from gac.providers.anthropic import generate as anthropic_generate
8
+ from gac.providers.cerebras import generate as cerebras_generate
9
+ from gac.providers.groq import generate as groq_generate
10
+ from gac.providers.ollama import generate as ollama_generate
11
+ from gac.providers.openai import generate as openai_generate
12
+ from gac.providers.openrouter import generate as openrouter_generate
14
13
 
15
14
  __all__ = [
16
15
  "__version__",
@@ -24,4 +23,5 @@ __all__ = [
24
23
  "groq_generate",
25
24
  "ollama_generate",
26
25
  "openai_generate",
26
+ "openrouter_generate",
27
27
  ]
@@ -1,3 +1,3 @@
1
1
  """Version information for gac package."""
2
2
 
3
- __version__ = "1.0.1"
3
+ __version__ = "1.1.0"
@@ -0,0 +1,80 @@
1
+ """AI provider integration for gac.
2
+
3
+ This module provides core functionality for AI provider interaction.
4
+ It consolidates all AI-related functionality including token counting and commit message generation.
5
+ """
6
+
7
+ import logging
8
+
9
+ from gac.constants import EnvDefaults
10
+ from gac.errors import AIError
11
+ from gac.providers.anthropic import generate as anthropic_generate
12
+ from gac.providers.cerebras import generate as cerebras_generate
13
+ from gac.providers.groq import generate as groq_generate
14
+ from gac.providers.ollama import generate as ollama_generate
15
+ from gac.providers.openai import generate as openai_generate
16
+ from gac.providers.openrouter import generate as openrouter_generate
17
+
18
+ logger = logging.getLogger(__name__)
19
+
20
+
21
+ def generate_commit_message(
22
+ model: str,
23
+ prompt: str | tuple[str, str],
24
+ temperature: float = EnvDefaults.TEMPERATURE,
25
+ max_tokens: int = EnvDefaults.MAX_OUTPUT_TOKENS,
26
+ max_retries: int = EnvDefaults.MAX_RETRIES,
27
+ quiet: bool = False,
28
+ ) -> str:
29
+ """Generate a commit message using direct API calls to AI providers.
30
+
31
+ Args:
32
+ model: The model to use in provider:model_name format (e.g., 'anthropic:claude-3-5-haiku-latest')
33
+ prompt: Either a string prompt (for backward compatibility) or tuple of (system_prompt, user_prompt)
34
+ temperature: Controls randomness (0.0-1.0), lower values are more deterministic
35
+ max_tokens: Maximum tokens in the response
36
+ max_retries: Number of retry attempts if generation fails
37
+ quiet: If True, suppress progress indicators
38
+
39
+ Returns:
40
+ A formatted commit message string
41
+
42
+ Raises:
43
+ AIError: If generation fails after max_retries attempts
44
+
45
+ Example:
46
+ >>> model = "anthropic:claude-3-5-haiku-latest"
47
+ >>> system_prompt, user_prompt = build_prompt("On branch main", "diff --git a/README.md b/README.md")
48
+ >>> generate_commit_message(model, (system_prompt, user_prompt))
49
+ 'docs: Update README with installation instructions'
50
+ """
51
+ try:
52
+ _, _ = model.split(":", 1)
53
+ except ValueError as err:
54
+ raise AIError.model_error(
55
+ f"Invalid model format: {model}. Please use the format 'provider:model_name'."
56
+ ) from err
57
+
58
+ # Parse the model string to extract provider and model name
59
+ try:
60
+ provider, model_name = model.split(":", 1)
61
+ except ValueError as err:
62
+ raise AIError.model_error(
63
+ f"Invalid model format: {model}. Please use the format 'provider:model_name'."
64
+ ) from err
65
+
66
+ # Route to the appropriate provider function
67
+ if provider == "openai":
68
+ return openai_generate(model_name, prompt, temperature, max_tokens, max_retries, quiet)
69
+ elif provider == "anthropic":
70
+ return anthropic_generate(model_name, prompt, temperature, max_tokens, max_retries, quiet)
71
+ elif provider == "groq":
72
+ return groq_generate(model_name, prompt, temperature, max_tokens, max_retries, quiet)
73
+ elif provider == "cerebras":
74
+ return cerebras_generate(model_name, prompt, temperature, max_tokens, max_retries, quiet)
75
+ elif provider == "ollama":
76
+ return ollama_generate(model_name, prompt, temperature, max_tokens, max_retries, quiet)
77
+ elif provider == "openrouter":
78
+ return openrouter_generate(model_name, prompt, temperature, max_tokens, max_retries, quiet)
79
+ else:
80
+ raise AIError.model_error(f"Unsupported provider: {provider}")
@@ -1,7 +1,6 @@
1
- """AI provider integration for gac.
1
+ """Utilities for AI provider integration for gac.
2
2
 
3
- This module provides core functionality for AI provider interaction.
4
- It consolidates all AI-related functionality including token counting and commit message generation.
3
+ This module provides utility functions that support the AI provider implementations.
5
4
  """
6
5
 
7
6
  import logging
@@ -12,15 +11,7 @@ from typing import Any
12
11
  import httpx
13
12
  import tiktoken
14
13
 
15
- from gac.ai_providers import (
16
- anthropic_generate,
17
- cerebras_generate,
18
- groq_generate,
19
- ollama_generate,
20
- openai_generate,
21
- )
22
- from gac.constants import EnvDefaults, Utility
23
- from gac.errors import AIError
14
+ from gac.constants import Utility
24
15
 
25
16
  logger = logging.getLogger(__name__)
26
17
 
@@ -120,61 +111,24 @@ def get_encoding(model: str) -> tiktoken.Encoding:
120
111
  return tiktoken.get_encoding(Utility.DEFAULT_ENCODING)
121
112
 
122
113
 
123
- def generate_commit_message(
124
- model: str,
125
- prompt: str | tuple[str, str],
126
- temperature: float = EnvDefaults.TEMPERATURE,
127
- max_tokens: int = EnvDefaults.MAX_OUTPUT_TOKENS,
128
- max_retries: int = EnvDefaults.MAX_RETRIES,
129
- quiet: bool = False,
130
- ) -> str:
131
- """Generate a commit message using direct API calls to AI providers.
132
-
133
- Args:
134
- model: The model to use in provider:model_name format (e.g., 'anthropic:claude-3-5-haiku-latest')
135
- prompt: Either a string prompt (for backward compatibility) or tuple of (system_prompt, user_prompt)
136
- temperature: Controls randomness (0.0-1.0), lower values are more deterministic
137
- max_tokens: Maximum tokens in the response
138
- max_retries: Number of retry attempts if generation fails
139
- quiet: If True, suppress progress indicators
140
-
141
- Returns:
142
- A formatted commit message string
143
-
144
- Raises:
145
- AIError: If generation fails after max_retries attempts
146
-
147
- Example:
148
- >>> model = "anthropic:claude-3-5-haiku-latest"
149
- >>> system_prompt, user_prompt = build_prompt("On branch main", "diff --git a/README.md b/README.md")
150
- >>> generate_commit_message(model, (system_prompt, user_prompt))
151
- 'docs: Update README with installation instructions'
152
- """
153
- try:
154
- _, _ = model.split(":", 1)
155
- except ValueError as err:
156
- raise AIError.model_error(
157
- f"Invalid model format: {model}. Please use the format 'provider:model_name'."
158
- ) from err
159
-
160
- # Parse the model string to extract provider and model name
161
- try:
162
- provider, model_name = model.split(":", 1)
163
- except ValueError as err:
164
- raise AIError.model_error(
165
- f"Invalid model format: {model}. Please use the format 'provider:model_name'."
166
- ) from err
167
-
168
- # Route to the appropriate provider function
169
- if provider == "openai":
170
- return openai_generate(model_name, prompt, temperature, max_tokens, max_retries, quiet)
171
- elif provider == "anthropic":
172
- return anthropic_generate(model_name, prompt, temperature, max_tokens, max_retries, quiet)
173
- elif provider == "groq":
174
- return groq_generate(model_name, prompt, temperature, max_tokens, max_retries, quiet)
175
- elif provider == "cerebras":
176
- return cerebras_generate(model_name, prompt, temperature, max_tokens, max_retries, quiet)
177
- elif provider == "ollama":
178
- return ollama_generate(model_name, prompt, temperature, max_tokens, max_retries, quiet)
114
+ def _classify_error(error_str: str) -> str:
115
+ """Classify error types based on error message content."""
116
+ error_str = error_str.lower()
117
+
118
+ if (
119
+ "api key" in error_str
120
+ or "unauthorized" in error_str
121
+ or "authentication" in error_str
122
+ or "invalid api key" in error_str
123
+ ):
124
+ return "authentication"
125
+ elif "timeout" in error_str or "timed out" in error_str or "request timeout" in error_str:
126
+ return "timeout"
127
+ elif "rate limit" in error_str or "too many requests" in error_str or "rate limit exceeded" in error_str:
128
+ return "rate_limit"
129
+ elif "connect" in error_str or "network" in error_str or "network connection failed" in error_str:
130
+ return "connection"
131
+ elif "model" in error_str or "not found" in error_str or "model not found" in error_str:
132
+ return "model"
179
133
  else:
180
- raise AIError.model_error(f"Unsupported provider: {provider}")
134
+ return "unknown"
@@ -24,6 +24,7 @@ def init() -> None:
24
24
  ("Cerebras", "qwen-3-coder-480b"),
25
25
  ("Groq", "meta-llama/llama-4-maverick-17b-128e-instruct"),
26
26
  ("Ollama", "gemma3"),
27
+ ("OpenRouter", "openrouter/auto"),
27
28
  ("OpenAI", "gpt-4.1-mini"),
28
29
  ]
29
30
  provider_names = [p[0] for p in providers]
@@ -10,7 +10,8 @@ import click
10
10
  from rich.console import Console
11
11
  from rich.panel import Panel
12
12
 
13
- from gac.ai import count_tokens, generate_commit_message
13
+ from gac.ai import generate_commit_message
14
+ from gac.ai_utils import count_tokens
14
15
  from gac.config import load_config
15
16
  from gac.constants import EnvDefaults, Utility
16
17
  from gac.errors import AIError, GitError, handle_error
@@ -10,7 +10,7 @@ import logging
10
10
  import os
11
11
  import re
12
12
 
13
- from gac.ai import count_tokens
13
+ from gac.ai_utils import count_tokens
14
14
  from gac.constants import (
15
15
  CodePatternImportance,
16
16
  FilePatterns,
@@ -0,0 +1 @@
1
+ """AI providers submodule for gac."""
@@ -0,0 +1,141 @@
1
+ """Anthropic API provider for gac."""
2
+
3
+ import logging
4
+ import os
5
+ import time
6
+
7
+ import httpx
8
+ from halo import Halo
9
+
10
+ from gac.ai_utils import _classify_error
11
+ from gac.constants import EnvDefaults
12
+ from gac.errors import AIError
13
+
14
+ logger = logging.getLogger(__name__)
15
+
16
+
17
+ def generate(
18
+ model: str,
19
+ prompt: str | tuple[str, str],
20
+ temperature: float = EnvDefaults.TEMPERATURE,
21
+ max_tokens: int = EnvDefaults.MAX_OUTPUT_TOKENS,
22
+ max_retries: int = EnvDefaults.MAX_RETRIES,
23
+ quiet: bool = False,
24
+ ) -> str:
25
+ """Generate commit message using Anthropic API with retry logic.
26
+
27
+ Args:
28
+ model: The model name (e.g., 'claude-3-5-haiku-latest', 'claude-3-opus-latest')
29
+ prompt: Either a string prompt or tuple of (system_prompt, user_prompt)
30
+ temperature: Controls randomness (0.0-1.0)
31
+ max_tokens: Maximum tokens in the response
32
+ max_retries: Number of retry attempts if generation fails
33
+ quiet: If True, suppress progress indicators
34
+
35
+ Returns:
36
+ A formatted commit message string
37
+
38
+ Raises:
39
+ AIError: If generation fails after max_retries attempts
40
+ """
41
+ api_key = os.getenv("ANTHROPIC_API_KEY")
42
+ if not api_key:
43
+ raise AIError.model_error("ANTHROPIC_API_KEY environment variable not set")
44
+
45
+ # Handle both old (string) and new (tuple) prompt formats
46
+ if isinstance(prompt, tuple):
47
+ system_prompt, user_prompt = prompt
48
+ messages = [{"role": "user", "content": user_prompt}]
49
+ payload = {
50
+ "model": model,
51
+ "messages": messages,
52
+ "system": system_prompt,
53
+ "temperature": temperature,
54
+ "max_tokens": max_tokens,
55
+ }
56
+ else:
57
+ # Backward compatibility: treat string as user prompt
58
+ messages = [{"role": "user", "content": prompt}]
59
+ payload = {
60
+ "model": model,
61
+ "messages": messages,
62
+ "temperature": temperature,
63
+ "max_tokens": max_tokens,
64
+ }
65
+
66
+ headers = {
67
+ "Content-Type": "application/json",
68
+ "x-api-key": api_key,
69
+ "anthropic-version": "2023-06-01",
70
+ }
71
+
72
+ return _make_request_with_retry(
73
+ url="https://api.anthropic.com/v1/messages",
74
+ headers=headers,
75
+ payload=payload,
76
+ provider_name=f"Anthropic {model}",
77
+ max_retries=max_retries,
78
+ quiet=quiet,
79
+ response_parser=lambda r: r["content"][0]["text"],
80
+ )
81
+
82
+
83
+ def _make_request_with_retry(
84
+ url: str,
85
+ headers: dict,
86
+ payload: dict,
87
+ provider_name: str,
88
+ max_retries: int,
89
+ quiet: bool,
90
+ response_parser: callable,
91
+ ) -> str:
92
+ """Make HTTP request with retry logic and common error handling."""
93
+ if quiet:
94
+ spinner = None
95
+ else:
96
+ spinner = Halo(text=f"Generating commit message with {provider_name}...", spinner="dots")
97
+ spinner.start()
98
+
99
+ last_error = None
100
+ retry_count = 0
101
+
102
+ while retry_count < max_retries:
103
+ try:
104
+ logger.debug(f"Trying with {provider_name} (attempt {retry_count + 1}/{max_retries})")
105
+
106
+ with httpx.Client(timeout=30.0) as client:
107
+ response = client.post(url, headers=headers, json=payload)
108
+ response.raise_for_status()
109
+
110
+ response_data = response.json()
111
+ message = response_parser(response_data)
112
+
113
+ if spinner:
114
+ spinner.succeed(f"Generated commit message with {provider_name}")
115
+
116
+ return message
117
+
118
+ except Exception as e:
119
+ last_error = e
120
+ retry_count += 1
121
+
122
+ if retry_count == max_retries:
123
+ logger.warning(f"Error generating commit message: {e}. Giving up.")
124
+ break
125
+
126
+ wait_time = 2**retry_count
127
+ logger.warning(f"Error generating commit message: {e}. Retrying in {wait_time}s...")
128
+ if spinner:
129
+ for i in range(wait_time, 0, -1):
130
+ spinner.text = f"Retry {retry_count}/{max_retries} in {i}s..."
131
+ time.sleep(1)
132
+ else:
133
+ time.sleep(wait_time)
134
+
135
+ if spinner:
136
+ spinner.fail(f"Failed to generate commit message with {provider_name}")
137
+
138
+ error_type = _classify_error(str(last_error))
139
+ raise AIError(
140
+ f"Failed to generate commit message after {max_retries} attempts: {last_error}", error_type=error_type
141
+ )
@@ -0,0 +1,134 @@
1
+ """Cerebras API provider for gac."""
2
+
3
+ import logging
4
+ import os
5
+ import time
6
+
7
+ import httpx
8
+ from halo import Halo
9
+
10
+ from gac.ai_utils import _classify_error
11
+ from gac.constants import EnvDefaults
12
+ from gac.errors import AIError
13
+
14
+ logger = logging.getLogger(__name__)
15
+
16
+
17
+ def generate(
18
+ model: str,
19
+ prompt: str | tuple[str, str],
20
+ temperature: float = EnvDefaults.TEMPERATURE,
21
+ max_tokens: int = EnvDefaults.MAX_OUTPUT_TOKENS,
22
+ max_retries: int = EnvDefaults.MAX_RETRIES,
23
+ quiet: bool = False,
24
+ ) -> str:
25
+ """Generate commit message using Cerebras API with retry logic.
26
+
27
+ Args:
28
+ model: The model name (e.g., 'llama3.1-8b', 'llama3.1-70b')
29
+ prompt: Either a string prompt or tuple of (system_prompt, user_prompt)
30
+ temperature: Controls randomness (0.0-1.0)
31
+ max_tokens: Maximum tokens in the response
32
+ max_retries: Number of retry attempts if generation fails
33
+ quiet: If True, suppress progress indicators
34
+
35
+ Returns:
36
+ A formatted commit message string
37
+
38
+ Raises:
39
+ AIError: If generation fails after max_retries attempts
40
+ """
41
+ api_key = os.getenv("CEREBRAS_API_KEY")
42
+ if not api_key:
43
+ raise AIError.model_error("CEREBRAS_API_KEY environment variable not set")
44
+
45
+ # Handle both old (string) and new (tuple) prompt formats
46
+ if isinstance(prompt, tuple):
47
+ system_prompt, user_prompt = prompt
48
+ messages = [{"role": "system", "content": system_prompt}, {"role": "user", "content": user_prompt}]
49
+ else:
50
+ # Backward compatibility: treat string as user prompt
51
+ messages = [{"role": "user", "content": prompt}]
52
+
53
+ payload = {
54
+ "model": model,
55
+ "messages": messages,
56
+ "temperature": temperature,
57
+ "max_tokens": max_tokens,
58
+ }
59
+
60
+ headers = {
61
+ "Content-Type": "application/json",
62
+ "Authorization": f"Bearer {api_key}",
63
+ }
64
+
65
+ return _make_request_with_retry(
66
+ url="https://api.cerebras.ai/v1/chat/completions",
67
+ headers=headers,
68
+ payload=payload,
69
+ provider_name=f"Cerebras {model}",
70
+ max_retries=max_retries,
71
+ quiet=quiet,
72
+ response_parser=lambda r: r["choices"][0]["message"]["content"],
73
+ )
74
+
75
+
76
+ def _make_request_with_retry(
77
+ url: str,
78
+ headers: dict,
79
+ payload: dict,
80
+ provider_name: str,
81
+ max_retries: int,
82
+ quiet: bool,
83
+ response_parser: callable,
84
+ ) -> str:
85
+ """Make HTTP request with retry logic and common error handling."""
86
+ if quiet:
87
+ spinner = None
88
+ else:
89
+ spinner = Halo(text=f"Generating commit message with {provider_name}...", spinner="dots")
90
+ spinner.start()
91
+
92
+ last_error = None
93
+ retry_count = 0
94
+
95
+ while retry_count < max_retries:
96
+ try:
97
+ logger.debug(f"Trying with {provider_name} (attempt {retry_count + 1}/{max_retries})")
98
+
99
+ with httpx.Client(timeout=30.0) as client:
100
+ response = client.post(url, headers=headers, json=payload)
101
+ response.raise_for_status()
102
+
103
+ response_data = response.json()
104
+ message = response_parser(response_data)
105
+
106
+ if spinner:
107
+ spinner.succeed(f"Generated commit message with {provider_name}")
108
+
109
+ return message
110
+
111
+ except Exception as e:
112
+ last_error = e
113
+ retry_count += 1
114
+
115
+ if retry_count == max_retries:
116
+ logger.warning(f"Error generating commit message: {e}. Giving up.")
117
+ break
118
+
119
+ wait_time = 2**retry_count
120
+ logger.warning(f"Error generating commit message: {e}. Retrying in {wait_time}s...")
121
+ if spinner:
122
+ for i in range(wait_time, 0, -1):
123
+ spinner.text = f"Retry {retry_count}/{max_retries} in {i}s..."
124
+ time.sleep(1)
125
+ else:
126
+ time.sleep(wait_time)
127
+
128
+ if spinner:
129
+ spinner.fail(f"Failed to generate commit message with {provider_name}")
130
+
131
+ error_type = _classify_error(str(last_error))
132
+ raise AIError(
133
+ f"Failed to generate commit message after {max_retries} attempts: {last_error}", error_type=error_type
134
+ )