gac 0.19.1__tar.gz → 1.0.0__tar.gz

This diff represents the content of publicly available package versions that have been released to one of the supported registries. The information contained in this diff is provided for informational purposes only and reflects changes between package versions as they appear in their respective public registries.

Potentially problematic release.


This version of gac might be problematic. Click here for more details.

@@ -209,3 +209,4 @@ scripts/changelog_prompt.md
209
209
  **/.claude/settings.local.json
210
210
  .plandex-v2/
211
211
  .vscode/
212
+ .serena/
@@ -1,6 +1,6 @@
1
1
  # MIT License
2
2
 
3
- Copyright (c) 2025 cellwebb <cell@criteria.dev>
3
+ Copyright (c) 2025 cellwebb <cellwebb@users.noreply.github.com>
4
4
 
5
5
  Permission is hereby granted, free of charge, to any person obtaining a copy of this software and associated
6
6
  documentation files (the "Software"), to deal in the Software without restriction, including without limitation the
@@ -1,6 +1,6 @@
1
1
  Metadata-Version: 2.4
2
2
  Name: gac
3
- Version: 0.19.1
3
+ Version: 1.0.0
4
4
  Summary: AI-powered Git commit message generator with multi-provider support
5
5
  Project-URL: Homepage, https://github.com/cellwebb/gac
6
6
  Project-URL: Documentation, https://github.com/cellwebb/gac#readme
@@ -20,19 +20,15 @@ Classifier: Programming Language :: Python :: 3.13
20
20
  Classifier: Programming Language :: Python :: Implementation :: CPython
21
21
  Classifier: Programming Language :: Python :: Implementation :: PyPy
22
22
  Requires-Python: >=3.10
23
- Requires-Dist: aisuite>=0.1.11
24
23
  Requires-Dist: anthropic>=0.68.0
25
- Requires-Dist: cerebras-cloud-sdk==1.49.0
26
24
  Requires-Dist: click>=8.3.0
27
- Requires-Dist: docstring-parser
28
- Requires-Dist: groq>=0.31.1
29
25
  Requires-Dist: halo
30
- Requires-Dist: ollama>=0.5.4
31
- Requires-Dist: openai>=1.108.1
26
+ Requires-Dist: httpx>=0.28.0
32
27
  Requires-Dist: pydantic>=2.11.9
33
28
  Requires-Dist: python-dotenv>=1.1.1
34
29
  Requires-Dist: questionary
35
30
  Requires-Dist: rich>=14.1.0
31
+ Requires-Dist: sumy
36
32
  Requires-Dist: tiktoken>=0.11.0
37
33
  Provides-Extra: dev
38
34
  Requires-Dist: build; extra == 'dev'
@@ -24,25 +24,27 @@ classifiers = [
24
24
  "Programming Language :: Python :: Implementation :: PyPy",
25
25
  ]
26
26
  dependencies = [
27
- # AI components - base providers
28
- "aisuite>=0.1.11",
27
+ # HTTP client for AI provider APIs
28
+ "httpx>=0.28.0",
29
+
30
+ # Anthropic SDK (token counting)
29
31
  "anthropic>=0.68.0",
30
- "cerebras_cloud_sdk==1.49.0",
31
- "groq>=0.31.1",
32
- "ollama>=0.5.4",
33
- "openai>=1.108.1",
32
+
33
+ # Token counting (OpenAI models)
34
34
  "tiktoken>=0.11.0",
35
-
35
+
36
36
  # Core functionality
37
37
  "pydantic>=2.11.9",
38
38
  "python-dotenv>=1.1.1",
39
- "docstring_parser", # needed by aisuite but not in their deps
40
-
39
+
41
40
  # CLI and formatting
42
41
  "click>=8.3.0",
43
42
  "halo",
44
43
  "questionary",
45
44
  "rich>=14.1.0",
45
+
46
+ # Summarization and document processing
47
+ "sumy",
46
48
  ]
47
49
 
48
50
  [project.scripts]
@@ -130,10 +132,10 @@ include = [
130
132
  [tool.hatch.envs.default]
131
133
  dependencies = [
132
134
  # AI components
133
- "aisuite",
134
135
  "anthropic",
135
136
  "openai",
136
137
  "groq",
138
+ "httpx",
137
139
 
138
140
  # Core functionality
139
141
  "pydantic",
@@ -2,6 +2,13 @@
2
2
 
3
3
  from gac.__version__ import __version__
4
4
  from gac.ai import generate_commit_message
5
+ from gac.ai_providers import (
6
+ anthropic_generate,
7
+ cerebras_generate,
8
+ groq_generate,
9
+ ollama_generate,
10
+ openai_generate,
11
+ )
5
12
  from gac.git import get_staged_files, push_changes
6
13
  from gac.prompt import build_prompt, clean_commit_message
7
14
 
@@ -12,4 +19,9 @@ __all__ = [
12
19
  "clean_commit_message",
13
20
  "get_staged_files",
14
21
  "push_changes",
22
+ "anthropic_generate",
23
+ "cerebras_generate",
24
+ "groq_generate",
25
+ "ollama_generate",
26
+ "openai_generate",
15
27
  ]
@@ -1,3 +1,3 @@
1
1
  """Version information for gac package."""
2
2
 
3
- __version__ = "0.19.1"
3
+ __version__ = "1.0.0"
@@ -5,14 +5,18 @@ It consolidates all AI-related functionality including token counting and commit
5
5
  """
6
6
 
7
7
  import logging
8
- import time
9
8
  from functools import lru_cache
10
9
  from typing import Any
11
10
 
12
- import aisuite as ai
13
11
  import tiktoken
14
- from halo import Halo
15
12
 
13
+ from gac.ai_providers import (
14
+ anthropic_generate,
15
+ cerebras_generate,
16
+ groq_generate,
17
+ ollama_generate,
18
+ openai_generate,
19
+ )
16
20
  from gac.constants import EnvDefaults, Utility
17
21
  from gac.errors import AIError
18
22
 
@@ -77,7 +81,7 @@ def generate_commit_message(
77
81
  max_retries: int = EnvDefaults.MAX_RETRIES,
78
82
  quiet: bool = False,
79
83
  ) -> str:
80
- """Generate a commit message using aisuite.
84
+ """Generate a commit message using direct API calls to AI providers.
81
85
 
82
86
  Args:
83
87
  model: The model to use in provider:model_name format (e.g., 'anthropic:claude-3-5-haiku-latest')
@@ -106,76 +110,24 @@ def generate_commit_message(
106
110
  f"Invalid model format: {model}. Please use the format 'provider:model_name'."
107
111
  ) from err
108
112
 
109
- client = ai.Client()
110
-
111
- # Handle both old (string) and new (tuple) prompt formats
112
- if isinstance(prompt, tuple):
113
- system_prompt, user_prompt = prompt
114
- messages = [{"role": "system", "content": system_prompt}, {"role": "user", "content": user_prompt}]
115
- else:
116
- # Backward compatibility: treat string as user prompt
117
- messages = [{"role": "user", "content": prompt}]
118
-
119
- if quiet:
120
- spinner = None
121
- else:
122
- spinner = Halo(text=f"Generating commit message with {model}...", spinner="dots")
123
- spinner.start()
124
-
125
- last_error = None
113
+ # Parse the model string to extract provider and model name
114
+ try:
115
+ provider, model_name = model.split(":", 1)
116
+ except ValueError as err:
117
+ raise AIError.model_error(
118
+ f"Invalid model format: {model}. Please use the format 'provider:model_name'."
119
+ ) from err
126
120
 
127
- retry_count = 0
128
- while retry_count < max_retries:
129
- try:
130
- logger.debug(f"Trying with model {model} (attempt {retry_count + 1}/{max_retries})")
131
- response = client.chat.completions.create(
132
- model=model,
133
- messages=messages,
134
- temperature=temperature,
135
- max_tokens=max_tokens,
136
- )
137
-
138
- message = response.choices[0].message.content if hasattr(response, "choices") else response.content
139
-
140
- if spinner:
141
- spinner.succeed(f"Generated commit message with {model}")
142
-
143
- return message
144
-
145
- except Exception as e:
146
- last_error = e
147
- retry_count += 1
148
-
149
- if retry_count == max_retries:
150
- logger.warning(f"Error generating commit message: {e}. Giving up.")
151
- break
152
-
153
- wait_time = 2**retry_count
154
- logger.warning(f"Error generating commit message: {e}. Retrying in {wait_time}s...")
155
- if spinner:
156
- for i in range(wait_time, 0, -1):
157
- spinner.text = f"Retry {retry_count}/{max_retries} in {i}s..."
158
- time.sleep(1)
159
- else:
160
- time.sleep(wait_time)
161
- if spinner:
162
- spinner.fail("Failed to generate commit message")
163
-
164
- error_str = str(last_error).lower()
165
-
166
- if "api key" in error_str or "unauthorized" in error_str or "authentication" in error_str:
167
- error_type = "authentication"
168
- elif "timeout" in error_str:
169
- error_type = "timeout"
170
- elif "rate limit" in error_str or "too many requests" in error_str:
171
- error_type = "rate_limit"
172
- elif "connect" in error_str or "network" in error_str:
173
- error_type = "connection"
174
- elif "model" in error_str or "not found" in error_str:
175
- error_type = "model"
121
+ # Route to the appropriate provider function
122
+ if provider == "openai":
123
+ return openai_generate(model_name, prompt, temperature, max_tokens, max_retries, quiet)
124
+ elif provider == "anthropic":
125
+ return anthropic_generate(model_name, prompt, temperature, max_tokens, max_retries, quiet)
126
+ elif provider == "groq":
127
+ return groq_generate(model_name, prompt, temperature, max_tokens, max_retries, quiet)
128
+ elif provider == "cerebras":
129
+ return cerebras_generate(model_name, prompt, temperature, max_tokens, max_retries, quiet)
130
+ elif provider == "ollama":
131
+ return ollama_generate(model_name, prompt, temperature, max_tokens, max_retries, quiet)
176
132
  else:
177
- error_type = "unknown"
178
-
179
- raise AIError(
180
- f"Failed to generate commit message after {max_retries} attempts: {last_error}", error_type=error_type
181
- )
133
+ raise AIError.model_error(f"Unsupported provider: {provider}")
@@ -0,0 +1,404 @@
1
+ """Direct HTTP API calls to AI providers using httpx.
2
+
3
+ This module provides functions for making direct HTTP API calls to various AI providers.
4
+ Each provider has its own function to generate commit messages using only httpx.
5
+ """
6
+
7
+ import logging
8
+ import os
9
+ import time
10
+
11
+ import httpx
12
+ from halo import Halo
13
+
14
+ from gac.constants import EnvDefaults
15
+ from gac.errors import AIError
16
+
17
+ logger = logging.getLogger(__name__)
18
+
19
+
20
+ def _classify_error(error_str: str) -> str:
21
+ """Classify error types based on error message content."""
22
+ error_str = error_str.lower()
23
+
24
+ if (
25
+ "api key" in error_str
26
+ or "unauthorized" in error_str
27
+ or "authentication" in error_str
28
+ or "invalid api key" in error_str
29
+ ):
30
+ return "authentication"
31
+ elif "timeout" in error_str or "timed out" in error_str or "request timeout" in error_str:
32
+ return "timeout"
33
+ elif "rate limit" in error_str or "too many requests" in error_str or "rate limit exceeded" in error_str:
34
+ return "rate_limit"
35
+ elif "connect" in error_str or "network" in error_str or "network connection failed" in error_str:
36
+ return "connection"
37
+ elif "model" in error_str or "not found" in error_str or "model not found" in error_str:
38
+ return "model"
39
+ else:
40
+ return "unknown"
41
+
42
+
43
+ def anthropic_generate(
44
+ model: str,
45
+ prompt: str | tuple[str, str],
46
+ temperature: float = EnvDefaults.TEMPERATURE,
47
+ max_tokens: int = EnvDefaults.MAX_OUTPUT_TOKENS,
48
+ max_retries: int = EnvDefaults.MAX_RETRIES,
49
+ quiet: bool = False,
50
+ ) -> str:
51
+ """Generate commit message using Anthropic API with retry logic.
52
+
53
+ Args:
54
+ model: The model name (e.g., 'claude-3-5-haiku-latest', 'claude-3-opus-latest')
55
+ prompt: Either a string prompt or tuple of (system_prompt, user_prompt)
56
+ temperature: Controls randomness (0.0-1.0)
57
+ max_tokens: Maximum tokens in the response
58
+ max_retries: Number of retry attempts if generation fails
59
+ quiet: If True, suppress progress indicators
60
+
61
+ Returns:
62
+ A formatted commit message string
63
+
64
+ Raises:
65
+ AIError: If generation fails after max_retries attempts
66
+ """
67
+ api_key = os.getenv("ANTHROPIC_API_KEY")
68
+ if not api_key:
69
+ raise AIError.model_error("ANTHROPIC_API_KEY environment variable not set")
70
+
71
+ # Handle both old (string) and new (tuple) prompt formats
72
+ if isinstance(prompt, tuple):
73
+ system_prompt, user_prompt = prompt
74
+ messages = [{"role": "user", "content": user_prompt}]
75
+ payload = {
76
+ "model": model,
77
+ "messages": messages,
78
+ "system": system_prompt,
79
+ "temperature": temperature,
80
+ "max_tokens": max_tokens,
81
+ }
82
+ else:
83
+ # Backward compatibility: treat string as user prompt
84
+ messages = [{"role": "user", "content": prompt}]
85
+ payload = {
86
+ "model": model,
87
+ "messages": messages,
88
+ "temperature": temperature,
89
+ "max_tokens": max_tokens,
90
+ }
91
+
92
+ headers = {
93
+ "Content-Type": "application/json",
94
+ "x-api-key": api_key,
95
+ "anthropic-version": "2023-06-01",
96
+ }
97
+
98
+ return _make_request_with_retry(
99
+ url="https://api.anthropic.com/v1/messages",
100
+ headers=headers,
101
+ payload=payload,
102
+ provider_name=f"Anthropic {model}",
103
+ max_retries=max_retries,
104
+ quiet=quiet,
105
+ response_parser=lambda r: r["content"][0]["text"],
106
+ )
107
+
108
+
109
+ def cerebras_generate(
110
+ model: str,
111
+ prompt: str | tuple[str, str],
112
+ temperature: float = EnvDefaults.TEMPERATURE,
113
+ max_tokens: int = EnvDefaults.MAX_OUTPUT_TOKENS,
114
+ max_retries: int = EnvDefaults.MAX_RETRIES,
115
+ quiet: bool = False,
116
+ ) -> str:
117
+ """Generate commit message using Cerebras API with retry logic.
118
+
119
+ Args:
120
+ model: The model name (e.g., 'llama3.1-8b', 'llama3.1-70b')
121
+ prompt: Either a string prompt or tuple of (system_prompt, user_prompt)
122
+ temperature: Controls randomness (0.0-1.0)
123
+ max_tokens: Maximum tokens in the response
124
+ max_retries: Number of retry attempts if generation fails
125
+ quiet: If True, suppress progress indicators
126
+
127
+ Returns:
128
+ A formatted commit message string
129
+
130
+ Raises:
131
+ AIError: If generation fails after max_retries attempts
132
+ """
133
+ api_key = os.getenv("CEREBRAS_API_KEY")
134
+ if not api_key:
135
+ raise AIError.model_error("CEREBRAS_API_KEY environment variable not set")
136
+
137
+ # Handle both old (string) and new (tuple) prompt formats
138
+ if isinstance(prompt, tuple):
139
+ system_prompt, user_prompt = prompt
140
+ messages = [{"role": "system", "content": system_prompt}, {"role": "user", "content": user_prompt}]
141
+ else:
142
+ # Backward compatibility: treat string as user prompt
143
+ messages = [{"role": "user", "content": prompt}]
144
+
145
+ payload = {
146
+ "model": model,
147
+ "messages": messages,
148
+ "temperature": temperature,
149
+ "max_tokens": max_tokens,
150
+ }
151
+
152
+ headers = {
153
+ "Content-Type": "application/json",
154
+ "Authorization": f"Bearer {api_key}",
155
+ }
156
+
157
+ return _make_request_with_retry(
158
+ url="https://api.cerebras.ai/v1/chat/completions",
159
+ headers=headers,
160
+ payload=payload,
161
+ provider_name=f"Cerebras {model}",
162
+ max_retries=max_retries,
163
+ quiet=quiet,
164
+ response_parser=lambda r: r["choices"][0]["message"]["content"],
165
+ )
166
+
167
+
168
+ def groq_generate(
169
+ model: str,
170
+ prompt: str | tuple[str, str],
171
+ temperature: float = EnvDefaults.TEMPERATURE,
172
+ max_tokens: int = EnvDefaults.MAX_OUTPUT_TOKENS,
173
+ max_retries: int = EnvDefaults.MAX_RETRIES,
174
+ quiet: bool = False,
175
+ ) -> str:
176
+ """Generate commit message using Groq API with retry logic.
177
+
178
+ Args:
179
+ model: The model name (e.g., 'llama3-8b-8192', 'llama3-70b-8192')
180
+ prompt: Either a string prompt or tuple of (system_prompt, user_prompt)
181
+ temperature: Controls randomness (0.0-1.0)
182
+ max_tokens: Maximum tokens in the response
183
+ max_retries: Number of retry attempts if generation fails
184
+ quiet: If True, suppress progress indicators
185
+
186
+ Returns:
187
+ A formatted commit message string
188
+
189
+ Raises:
190
+ AIError: If generation fails after max_retries attempts
191
+ """
192
+ api_key = os.getenv("GROQ_API_KEY")
193
+ if not api_key:
194
+ raise AIError.model_error("GROQ_API_KEY environment variable not set")
195
+
196
+ # Handle both old (string) and new (tuple) prompt formats
197
+ if isinstance(prompt, tuple):
198
+ system_prompt, user_prompt = prompt
199
+ messages = [{"role": "system", "content": system_prompt}, {"role": "user", "content": user_prompt}]
200
+ else:
201
+ # Backward compatibility: treat string as user prompt
202
+ messages = [{"role": "user", "content": prompt}]
203
+
204
+ payload = {
205
+ "model": model,
206
+ "messages": messages,
207
+ "temperature": temperature,
208
+ "max_tokens": max_tokens,
209
+ }
210
+
211
+ headers = {
212
+ "Content-Type": "application/json",
213
+ "Authorization": f"Bearer {api_key}",
214
+ }
215
+
216
+ return _make_request_with_retry(
217
+ url="https://api.groq.com/openai/v1/chat/completions",
218
+ headers=headers,
219
+ payload=payload,
220
+ provider_name=f"Groq {model}",
221
+ max_retries=max_retries,
222
+ quiet=quiet,
223
+ response_parser=lambda r: r["choices"][0]["message"]["content"],
224
+ )
225
+
226
+
227
+ def ollama_generate(
228
+ model: str,
229
+ prompt: str | tuple[str, str],
230
+ temperature: float = EnvDefaults.TEMPERATURE,
231
+ max_tokens: int = EnvDefaults.MAX_OUTPUT_TOKENS,
232
+ max_retries: int = EnvDefaults.MAX_RETRIES,
233
+ quiet: bool = False,
234
+ ) -> str:
235
+ """Generate commit message using Ollama API with retry logic.
236
+
237
+ Args:
238
+ model: The model name (e.g., 'llama3', 'mistral')
239
+ prompt: Either a string prompt or tuple of (system_prompt, user_prompt)
240
+ temperature: Controls randomness (0.0-1.0)
241
+ max_tokens: Maximum tokens in the response (note: Ollama uses 'num_predict')
242
+ max_retries: Number of retry attempts if generation fails
243
+ quiet: If True, suppress progress indicators
244
+
245
+ Returns:
246
+ A formatted commit message string
247
+
248
+ Raises:
249
+ AIError: If generation fails after max_retries attempts
250
+ """
251
+ # Handle both old (string) and new (tuple) prompt formats
252
+ if isinstance(prompt, tuple):
253
+ system_prompt, user_prompt = prompt
254
+ messages = [{"role": "system", "content": system_prompt}, {"role": "user", "content": user_prompt}]
255
+ else:
256
+ # Backward compatibility: treat string as user prompt
257
+ messages = [{"role": "user", "content": prompt}]
258
+
259
+ payload = {
260
+ "model": model,
261
+ "messages": messages,
262
+ "stream": False,
263
+ "options": {
264
+ "temperature": temperature,
265
+ "num_predict": max_tokens,
266
+ },
267
+ }
268
+
269
+ headers = {
270
+ "Content-Type": "application/json",
271
+ }
272
+
273
+ # Ollama typically runs locally on port 11434
274
+ ollama_url = os.getenv("OLLAMA_URL", "http://localhost:11434")
275
+
276
+ return _make_request_with_retry(
277
+ url=f"{ollama_url}/api/chat",
278
+ headers=headers,
279
+ payload=payload,
280
+ provider_name=f"Ollama {model}",
281
+ max_retries=max_retries,
282
+ quiet=quiet,
283
+ response_parser=lambda r: r["message"]["content"],
284
+ )
285
+
286
+
287
+ def openai_generate(
288
+ model: str,
289
+ prompt: str | tuple[str, str],
290
+ temperature: float = EnvDefaults.TEMPERATURE,
291
+ max_tokens: int = EnvDefaults.MAX_OUTPUT_TOKENS,
292
+ max_retries: int = EnvDefaults.MAX_RETRIES,
293
+ quiet: bool = False,
294
+ ) -> str:
295
+ """Generate commit message using OpenAI API with retry logic.
296
+
297
+ Args:
298
+ model: The model name (e.g., 'gpt-4', 'gpt-3.5-turbo')
299
+ prompt: Either a string prompt or tuple of (system_prompt, user_prompt)
300
+ temperature: Controls randomness (0.0-1.0)
301
+ max_tokens: Maximum tokens in the response
302
+ max_retries: Number of retry attempts if generation fails
303
+ quiet: If True, suppress progress indicators
304
+
305
+ Returns:
306
+ A formatted commit message string
307
+
308
+ Raises:
309
+ AIError: If generation fails after max_retries attempts
310
+ """
311
+ api_key = os.getenv("OPENAI_API_KEY")
312
+ if not api_key:
313
+ raise AIError.model_error("OPENAI_API_KEY environment variable not set")
314
+
315
+ # Handle both old (string) and new (tuple) prompt formats
316
+ if isinstance(prompt, tuple):
317
+ system_prompt, user_prompt = prompt
318
+ messages = [{"role": "system", "content": system_prompt}, {"role": "user", "content": user_prompt}]
319
+ else:
320
+ # Backward compatibility: treat string as user prompt
321
+ messages = [{"role": "user", "content": prompt}]
322
+
323
+ payload = {
324
+ "model": model,
325
+ "messages": messages,
326
+ "temperature": temperature,
327
+ "max_tokens": max_tokens,
328
+ }
329
+
330
+ headers = {
331
+ "Content-Type": "application/json",
332
+ "Authorization": f"Bearer {api_key}",
333
+ }
334
+
335
+ return _make_request_with_retry(
336
+ url="https://api.openai.com/v1/chat/completions",
337
+ headers=headers,
338
+ payload=payload,
339
+ provider_name=f"OpenAI {model}",
340
+ max_retries=max_retries,
341
+ quiet=quiet,
342
+ response_parser=lambda r: r["choices"][0]["message"]["content"],
343
+ )
344
+
345
+
346
+ def _make_request_with_retry(
347
+ url: str,
348
+ headers: dict,
349
+ payload: dict,
350
+ provider_name: str,
351
+ max_retries: int,
352
+ quiet: bool,
353
+ response_parser: callable,
354
+ ) -> str:
355
+ """Make HTTP request with retry logic and common error handling."""
356
+ if quiet:
357
+ spinner = None
358
+ else:
359
+ spinner = Halo(text=f"Generating commit message with {provider_name}...", spinner="dots")
360
+ spinner.start()
361
+
362
+ last_error = None
363
+ retry_count = 0
364
+
365
+ while retry_count < max_retries:
366
+ try:
367
+ logger.debug(f"Trying with {provider_name} (attempt {retry_count + 1}/{max_retries})")
368
+
369
+ with httpx.Client(timeout=30.0) as client:
370
+ response = client.post(url, headers=headers, json=payload)
371
+ response.raise_for_status()
372
+
373
+ response_data = response.json()
374
+ message = response_parser(response_data)
375
+
376
+ if spinner:
377
+ spinner.succeed(f"Generated commit message with {provider_name}")
378
+
379
+ return message
380
+
381
+ except Exception as e:
382
+ last_error = e
383
+ retry_count += 1
384
+
385
+ if retry_count == max_retries:
386
+ logger.warning(f"Error generating commit message: {e}. Giving up.")
387
+ break
388
+
389
+ wait_time = 2**retry_count
390
+ logger.warning(f"Error generating commit message: {e}. Retrying in {wait_time}s...")
391
+ if spinner:
392
+ for i in range(wait_time, 0, -1):
393
+ spinner.text = f"Retry {retry_count}/{max_retries} in {i}s..."
394
+ time.sleep(1)
395
+ else:
396
+ time.sleep(wait_time)
397
+
398
+ if spinner:
399
+ spinner.fail(f"Failed to generate commit message with {provider_name}")
400
+
401
+ error_type = _classify_error(str(last_error))
402
+ raise AIError(
403
+ f"Failed to generate commit message after {max_retries} attempts: {last_error}", error_type=error_type
404
+ )
File without changes
File without changes
File without changes
File without changes
File without changes
File without changes
File without changes
File without changes
File without changes
File without changes
File without changes
File without changes
File without changes