gac 0.19.1__py3-none-any.whl → 1.0.1__py3-none-any.whl

This diff represents the content of publicly available package versions that have been released to one of the supported registries. The information contained in this diff is provided for informational purposes only and reflects changes between package versions as they appear in their respective public registries.

Potentially problematic release.


This version of gac might be problematic. Click here for more details.

gac/__init__.py CHANGED
@@ -2,6 +2,13 @@
2
2
 
3
3
  from gac.__version__ import __version__
4
4
  from gac.ai import generate_commit_message
5
+ from gac.ai_providers import (
6
+ anthropic_generate,
7
+ cerebras_generate,
8
+ groq_generate,
9
+ ollama_generate,
10
+ openai_generate,
11
+ )
5
12
  from gac.git import get_staged_files, push_changes
6
13
  from gac.prompt import build_prompt, clean_commit_message
7
14
 
@@ -12,4 +19,9 @@ __all__ = [
12
19
  "clean_commit_message",
13
20
  "get_staged_files",
14
21
  "push_changes",
22
+ "anthropic_generate",
23
+ "cerebras_generate",
24
+ "groq_generate",
25
+ "ollama_generate",
26
+ "openai_generate",
15
27
  ]
gac/__version__.py CHANGED
@@ -1,3 +1,3 @@
1
1
  """Version information for gac package."""
2
2
 
3
- __version__ = "0.19.1"
3
+ __version__ = "1.0.1"
gac/ai.py CHANGED
@@ -5,14 +5,20 @@ It consolidates all AI-related functionality including token counting and commit
5
5
  """
6
6
 
7
7
  import logging
8
- import time
8
+ import os
9
9
  from functools import lru_cache
10
10
  from typing import Any
11
11
 
12
- import aisuite as ai
12
+ import httpx
13
13
  import tiktoken
14
- from halo import Halo
15
14
 
15
+ from gac.ai_providers import (
16
+ anthropic_generate,
17
+ cerebras_generate,
18
+ groq_generate,
19
+ ollama_generate,
20
+ openai_generate,
21
+ )
16
22
  from gac.constants import EnvDefaults, Utility
17
23
  from gac.errors import AIError
18
24
 
@@ -26,19 +32,10 @@ def count_tokens(content: str | list[dict[str, str]] | dict[str, Any], model: st
26
32
  return 0
27
33
 
28
34
  if model.startswith("anthropic"):
29
- import anthropic
30
-
31
- try:
32
- client = anthropic.Anthropic()
33
-
34
- # Use the messages.count_tokens API for accurate counting
35
- model_name = model.split(":", 1)[1] if ":" in model else "claude-3-5-haiku-latest"
36
- response = client.messages.count_tokens(model=model_name, messages=[{"role": "user", "content": text}])
37
-
38
- return response.input_tokens
39
- except Exception:
40
- # Fallback to simple estimation for Anthropic models
41
- return len(text) // 4
35
+ anthropic_tokens = anthropic_count_tokens(text, model)
36
+ if anthropic_tokens is not None:
37
+ return anthropic_tokens
38
+ return len(text) // 4
42
39
 
43
40
  try:
44
41
  encoding = get_encoding(model)
@@ -48,6 +45,60 @@ def count_tokens(content: str | list[dict[str, str]] | dict[str, Any], model: st
48
45
  return len(text) // 4
49
46
 
50
47
 
48
+ def anthropic_count_tokens(text: str, model: str) -> int | None:
49
+ """Call Anthropic's token count endpoint and return the token usage.
50
+
51
+ Returns the token count when successful, otherwise ``None`` so callers can
52
+ fall back to a heuristic estimate.
53
+ """
54
+ api_key = os.getenv("ANTHROPIC_API_KEY")
55
+ if not api_key:
56
+ logger.debug("ANTHROPIC_API_KEY not set; using heuristic token estimation for Anthropic model")
57
+ return None
58
+
59
+ model_name = model.split(":", 1)[1] if ":" in model else "claude-3-5-haiku-latest"
60
+ headers = {
61
+ "Content-Type": "application/json",
62
+ "x-api-key": api_key,
63
+ "anthropic-version": "2023-06-01",
64
+ }
65
+ payload = {
66
+ "model": model_name,
67
+ "messages": [
68
+ {
69
+ "role": "user",
70
+ "content": [
71
+ {
72
+ "type": "text",
73
+ "text": text,
74
+ }
75
+ ],
76
+ }
77
+ ],
78
+ }
79
+
80
+ try:
81
+ response = httpx.post(
82
+ "https://api.anthropic.com/v1/messages/count_tokens",
83
+ headers=headers,
84
+ json=payload,
85
+ timeout=30.0,
86
+ )
87
+ response.raise_for_status()
88
+ data = response.json()
89
+
90
+ if "input_tokens" in data:
91
+ return data["input_tokens"]
92
+ if "usage" in data and "input_tokens" in data["usage"]:
93
+ return data["usage"]["input_tokens"]
94
+
95
+ logger.warning("Unexpected response format from Anthropic token count API: %s", data)
96
+ except Exception as exc:
97
+ logger.warning("Failed to retrieve Anthropic token count via HTTP: %s", exc)
98
+
99
+ return None
100
+
101
+
51
102
  def extract_text_content(content: str | list[dict[str, str]] | dict[str, Any]) -> str:
52
103
  """Extract text content from various input formats."""
53
104
  if isinstance(content, str):
@@ -77,7 +128,7 @@ def generate_commit_message(
77
128
  max_retries: int = EnvDefaults.MAX_RETRIES,
78
129
  quiet: bool = False,
79
130
  ) -> str:
80
- """Generate a commit message using aisuite.
131
+ """Generate a commit message using direct API calls to AI providers.
81
132
 
82
133
  Args:
83
134
  model: The model to use in provider:model_name format (e.g., 'anthropic:claude-3-5-haiku-latest')
@@ -106,76 +157,24 @@ def generate_commit_message(
106
157
  f"Invalid model format: {model}. Please use the format 'provider:model_name'."
107
158
  ) from err
108
159
 
109
- client = ai.Client()
110
-
111
- # Handle both old (string) and new (tuple) prompt formats
112
- if isinstance(prompt, tuple):
113
- system_prompt, user_prompt = prompt
114
- messages = [{"role": "system", "content": system_prompt}, {"role": "user", "content": user_prompt}]
115
- else:
116
- # Backward compatibility: treat string as user prompt
117
- messages = [{"role": "user", "content": prompt}]
160
+ # Parse the model string to extract provider and model name
161
+ try:
162
+ provider, model_name = model.split(":", 1)
163
+ except ValueError as err:
164
+ raise AIError.model_error(
165
+ f"Invalid model format: {model}. Please use the format 'provider:model_name'."
166
+ ) from err
118
167
 
119
- if quiet:
120
- spinner = None
168
+ # Route to the appropriate provider function
169
+ if provider == "openai":
170
+ return openai_generate(model_name, prompt, temperature, max_tokens, max_retries, quiet)
171
+ elif provider == "anthropic":
172
+ return anthropic_generate(model_name, prompt, temperature, max_tokens, max_retries, quiet)
173
+ elif provider == "groq":
174
+ return groq_generate(model_name, prompt, temperature, max_tokens, max_retries, quiet)
175
+ elif provider == "cerebras":
176
+ return cerebras_generate(model_name, prompt, temperature, max_tokens, max_retries, quiet)
177
+ elif provider == "ollama":
178
+ return ollama_generate(model_name, prompt, temperature, max_tokens, max_retries, quiet)
121
179
  else:
122
- spinner = Halo(text=f"Generating commit message with {model}...", spinner="dots")
123
- spinner.start()
124
-
125
- last_error = None
126
-
127
- retry_count = 0
128
- while retry_count < max_retries:
129
- try:
130
- logger.debug(f"Trying with model {model} (attempt {retry_count + 1}/{max_retries})")
131
- response = client.chat.completions.create(
132
- model=model,
133
- messages=messages,
134
- temperature=temperature,
135
- max_tokens=max_tokens,
136
- )
137
-
138
- message = response.choices[0].message.content if hasattr(response, "choices") else response.content
139
-
140
- if spinner:
141
- spinner.succeed(f"Generated commit message with {model}")
142
-
143
- return message
144
-
145
- except Exception as e:
146
- last_error = e
147
- retry_count += 1
148
-
149
- if retry_count == max_retries:
150
- logger.warning(f"Error generating commit message: {e}. Giving up.")
151
- break
152
-
153
- wait_time = 2**retry_count
154
- logger.warning(f"Error generating commit message: {e}. Retrying in {wait_time}s...")
155
- if spinner:
156
- for i in range(wait_time, 0, -1):
157
- spinner.text = f"Retry {retry_count}/{max_retries} in {i}s..."
158
- time.sleep(1)
159
- else:
160
- time.sleep(wait_time)
161
- if spinner:
162
- spinner.fail("Failed to generate commit message")
163
-
164
- error_str = str(last_error).lower()
165
-
166
- if "api key" in error_str or "unauthorized" in error_str or "authentication" in error_str:
167
- error_type = "authentication"
168
- elif "timeout" in error_str:
169
- error_type = "timeout"
170
- elif "rate limit" in error_str or "too many requests" in error_str:
171
- error_type = "rate_limit"
172
- elif "connect" in error_str or "network" in error_str:
173
- error_type = "connection"
174
- elif "model" in error_str or "not found" in error_str:
175
- error_type = "model"
176
- else:
177
- error_type = "unknown"
178
-
179
- raise AIError(
180
- f"Failed to generate commit message after {max_retries} attempts: {last_error}", error_type=error_type
181
- )
180
+ raise AIError.model_error(f"Unsupported provider: {provider}")
gac/ai_providers.py ADDED
@@ -0,0 +1,404 @@
1
+ """Direct HTTP API calls to AI providers using httpx.
2
+
3
+ This module provides functions for making direct HTTP API calls to various AI providers.
4
+ Each provider has its own function to generate commit messages using only httpx.
5
+ """
6
+
7
+ import logging
8
+ import os
9
+ import time
10
+
11
+ import httpx
12
+ from halo import Halo
13
+
14
+ from gac.constants import EnvDefaults
15
+ from gac.errors import AIError
16
+
17
+ logger = logging.getLogger(__name__)
18
+
19
+
20
+ def _classify_error(error_str: str) -> str:
21
+ """Classify error types based on error message content."""
22
+ error_str = error_str.lower()
23
+
24
+ if (
25
+ "api key" in error_str
26
+ or "unauthorized" in error_str
27
+ or "authentication" in error_str
28
+ or "invalid api key" in error_str
29
+ ):
30
+ return "authentication"
31
+ elif "timeout" in error_str or "timed out" in error_str or "request timeout" in error_str:
32
+ return "timeout"
33
+ elif "rate limit" in error_str or "too many requests" in error_str or "rate limit exceeded" in error_str:
34
+ return "rate_limit"
35
+ elif "connect" in error_str or "network" in error_str or "network connection failed" in error_str:
36
+ return "connection"
37
+ elif "model" in error_str or "not found" in error_str or "model not found" in error_str:
38
+ return "model"
39
+ else:
40
+ return "unknown"
41
+
42
+
43
+ def anthropic_generate(
44
+ model: str,
45
+ prompt: str | tuple[str, str],
46
+ temperature: float = EnvDefaults.TEMPERATURE,
47
+ max_tokens: int = EnvDefaults.MAX_OUTPUT_TOKENS,
48
+ max_retries: int = EnvDefaults.MAX_RETRIES,
49
+ quiet: bool = False,
50
+ ) -> str:
51
+ """Generate commit message using Anthropic API with retry logic.
52
+
53
+ Args:
54
+ model: The model name (e.g., 'claude-3-5-haiku-latest', 'claude-3-opus-latest')
55
+ prompt: Either a string prompt or tuple of (system_prompt, user_prompt)
56
+ temperature: Controls randomness (0.0-1.0)
57
+ max_tokens: Maximum tokens in the response
58
+ max_retries: Number of retry attempts if generation fails
59
+ quiet: If True, suppress progress indicators
60
+
61
+ Returns:
62
+ A formatted commit message string
63
+
64
+ Raises:
65
+ AIError: If generation fails after max_retries attempts
66
+ """
67
+ api_key = os.getenv("ANTHROPIC_API_KEY")
68
+ if not api_key:
69
+ raise AIError.model_error("ANTHROPIC_API_KEY environment variable not set")
70
+
71
+ # Handle both old (string) and new (tuple) prompt formats
72
+ if isinstance(prompt, tuple):
73
+ system_prompt, user_prompt = prompt
74
+ messages = [{"role": "user", "content": user_prompt}]
75
+ payload = {
76
+ "model": model,
77
+ "messages": messages,
78
+ "system": system_prompt,
79
+ "temperature": temperature,
80
+ "max_tokens": max_tokens,
81
+ }
82
+ else:
83
+ # Backward compatibility: treat string as user prompt
84
+ messages = [{"role": "user", "content": prompt}]
85
+ payload = {
86
+ "model": model,
87
+ "messages": messages,
88
+ "temperature": temperature,
89
+ "max_tokens": max_tokens,
90
+ }
91
+
92
+ headers = {
93
+ "Content-Type": "application/json",
94
+ "x-api-key": api_key,
95
+ "anthropic-version": "2023-06-01",
96
+ }
97
+
98
+ return _make_request_with_retry(
99
+ url="https://api.anthropic.com/v1/messages",
100
+ headers=headers,
101
+ payload=payload,
102
+ provider_name=f"Anthropic {model}",
103
+ max_retries=max_retries,
104
+ quiet=quiet,
105
+ response_parser=lambda r: r["content"][0]["text"],
106
+ )
107
+
108
+
109
+ def cerebras_generate(
110
+ model: str,
111
+ prompt: str | tuple[str, str],
112
+ temperature: float = EnvDefaults.TEMPERATURE,
113
+ max_tokens: int = EnvDefaults.MAX_OUTPUT_TOKENS,
114
+ max_retries: int = EnvDefaults.MAX_RETRIES,
115
+ quiet: bool = False,
116
+ ) -> str:
117
+ """Generate commit message using Cerebras API with retry logic.
118
+
119
+ Args:
120
+ model: The model name (e.g., 'llama3.1-8b', 'llama3.1-70b')
121
+ prompt: Either a string prompt or tuple of (system_prompt, user_prompt)
122
+ temperature: Controls randomness (0.0-1.0)
123
+ max_tokens: Maximum tokens in the response
124
+ max_retries: Number of retry attempts if generation fails
125
+ quiet: If True, suppress progress indicators
126
+
127
+ Returns:
128
+ A formatted commit message string
129
+
130
+ Raises:
131
+ AIError: If generation fails after max_retries attempts
132
+ """
133
+ api_key = os.getenv("CEREBRAS_API_KEY")
134
+ if not api_key:
135
+ raise AIError.model_error("CEREBRAS_API_KEY environment variable not set")
136
+
137
+ # Handle both old (string) and new (tuple) prompt formats
138
+ if isinstance(prompt, tuple):
139
+ system_prompt, user_prompt = prompt
140
+ messages = [{"role": "system", "content": system_prompt}, {"role": "user", "content": user_prompt}]
141
+ else:
142
+ # Backward compatibility: treat string as user prompt
143
+ messages = [{"role": "user", "content": prompt}]
144
+
145
+ payload = {
146
+ "model": model,
147
+ "messages": messages,
148
+ "temperature": temperature,
149
+ "max_tokens": max_tokens,
150
+ }
151
+
152
+ headers = {
153
+ "Content-Type": "application/json",
154
+ "Authorization": f"Bearer {api_key}",
155
+ }
156
+
157
+ return _make_request_with_retry(
158
+ url="https://api.cerebras.ai/v1/chat/completions",
159
+ headers=headers,
160
+ payload=payload,
161
+ provider_name=f"Cerebras {model}",
162
+ max_retries=max_retries,
163
+ quiet=quiet,
164
+ response_parser=lambda r: r["choices"][0]["message"]["content"],
165
+ )
166
+
167
+
168
+ def groq_generate(
169
+ model: str,
170
+ prompt: str | tuple[str, str],
171
+ temperature: float = EnvDefaults.TEMPERATURE,
172
+ max_tokens: int = EnvDefaults.MAX_OUTPUT_TOKENS,
173
+ max_retries: int = EnvDefaults.MAX_RETRIES,
174
+ quiet: bool = False,
175
+ ) -> str:
176
+ """Generate commit message using Groq API with retry logic.
177
+
178
+ Args:
179
+ model: The model name (e.g., 'llama3-8b-8192', 'llama3-70b-8192')
180
+ prompt: Either a string prompt or tuple of (system_prompt, user_prompt)
181
+ temperature: Controls randomness (0.0-1.0)
182
+ max_tokens: Maximum tokens in the response
183
+ max_retries: Number of retry attempts if generation fails
184
+ quiet: If True, suppress progress indicators
185
+
186
+ Returns:
187
+ A formatted commit message string
188
+
189
+ Raises:
190
+ AIError: If generation fails after max_retries attempts
191
+ """
192
+ api_key = os.getenv("GROQ_API_KEY")
193
+ if not api_key:
194
+ raise AIError.model_error("GROQ_API_KEY environment variable not set")
195
+
196
+ # Handle both old (string) and new (tuple) prompt formats
197
+ if isinstance(prompt, tuple):
198
+ system_prompt, user_prompt = prompt
199
+ messages = [{"role": "system", "content": system_prompt}, {"role": "user", "content": user_prompt}]
200
+ else:
201
+ # Backward compatibility: treat string as user prompt
202
+ messages = [{"role": "user", "content": prompt}]
203
+
204
+ payload = {
205
+ "model": model,
206
+ "messages": messages,
207
+ "temperature": temperature,
208
+ "max_tokens": max_tokens,
209
+ }
210
+
211
+ headers = {
212
+ "Content-Type": "application/json",
213
+ "Authorization": f"Bearer {api_key}",
214
+ }
215
+
216
+ return _make_request_with_retry(
217
+ url="https://api.groq.com/openai/v1/chat/completions",
218
+ headers=headers,
219
+ payload=payload,
220
+ provider_name=f"Groq {model}",
221
+ max_retries=max_retries,
222
+ quiet=quiet,
223
+ response_parser=lambda r: r["choices"][0]["message"]["content"],
224
+ )
225
+
226
+
227
+ def ollama_generate(
228
+ model: str,
229
+ prompt: str | tuple[str, str],
230
+ temperature: float = EnvDefaults.TEMPERATURE,
231
+ max_tokens: int = EnvDefaults.MAX_OUTPUT_TOKENS,
232
+ max_retries: int = EnvDefaults.MAX_RETRIES,
233
+ quiet: bool = False,
234
+ ) -> str:
235
+ """Generate commit message using Ollama API with retry logic.
236
+
237
+ Args:
238
+ model: The model name (e.g., 'llama3', 'mistral')
239
+ prompt: Either a string prompt or tuple of (system_prompt, user_prompt)
240
+ temperature: Controls randomness (0.0-1.0)
241
+ max_tokens: Maximum tokens in the response (note: Ollama uses 'num_predict')
242
+ max_retries: Number of retry attempts if generation fails
243
+ quiet: If True, suppress progress indicators
244
+
245
+ Returns:
246
+ A formatted commit message string
247
+
248
+ Raises:
249
+ AIError: If generation fails after max_retries attempts
250
+ """
251
+ # Handle both old (string) and new (tuple) prompt formats
252
+ if isinstance(prompt, tuple):
253
+ system_prompt, user_prompt = prompt
254
+ messages = [{"role": "system", "content": system_prompt}, {"role": "user", "content": user_prompt}]
255
+ else:
256
+ # Backward compatibility: treat string as user prompt
257
+ messages = [{"role": "user", "content": prompt}]
258
+
259
+ payload = {
260
+ "model": model,
261
+ "messages": messages,
262
+ "stream": False,
263
+ "options": {
264
+ "temperature": temperature,
265
+ "num_predict": max_tokens,
266
+ },
267
+ }
268
+
269
+ headers = {
270
+ "Content-Type": "application/json",
271
+ }
272
+
273
+ # Ollama typically runs locally on port 11434
274
+ ollama_url = os.getenv("OLLAMA_URL", "http://localhost:11434")
275
+
276
+ return _make_request_with_retry(
277
+ url=f"{ollama_url}/api/chat",
278
+ headers=headers,
279
+ payload=payload,
280
+ provider_name=f"Ollama {model}",
281
+ max_retries=max_retries,
282
+ quiet=quiet,
283
+ response_parser=lambda r: r["message"]["content"],
284
+ )
285
+
286
+
287
+ def openai_generate(
288
+ model: str,
289
+ prompt: str | tuple[str, str],
290
+ temperature: float = EnvDefaults.TEMPERATURE,
291
+ max_tokens: int = EnvDefaults.MAX_OUTPUT_TOKENS,
292
+ max_retries: int = EnvDefaults.MAX_RETRIES,
293
+ quiet: bool = False,
294
+ ) -> str:
295
+ """Generate commit message using OpenAI API with retry logic.
296
+
297
+ Args:
298
+ model: The model name (e.g., 'gpt-4', 'gpt-3.5-turbo')
299
+ prompt: Either a string prompt or tuple of (system_prompt, user_prompt)
300
+ temperature: Controls randomness (0.0-1.0)
301
+ max_tokens: Maximum tokens in the response
302
+ max_retries: Number of retry attempts if generation fails
303
+ quiet: If True, suppress progress indicators
304
+
305
+ Returns:
306
+ A formatted commit message string
307
+
308
+ Raises:
309
+ AIError: If generation fails after max_retries attempts
310
+ """
311
+ api_key = os.getenv("OPENAI_API_KEY")
312
+ if not api_key:
313
+ raise AIError.model_error("OPENAI_API_KEY environment variable not set")
314
+
315
+ # Handle both old (string) and new (tuple) prompt formats
316
+ if isinstance(prompt, tuple):
317
+ system_prompt, user_prompt = prompt
318
+ messages = [{"role": "system", "content": system_prompt}, {"role": "user", "content": user_prompt}]
319
+ else:
320
+ # Backward compatibility: treat string as user prompt
321
+ messages = [{"role": "user", "content": prompt}]
322
+
323
+ payload = {
324
+ "model": model,
325
+ "messages": messages,
326
+ "temperature": temperature,
327
+ "max_tokens": max_tokens,
328
+ }
329
+
330
+ headers = {
331
+ "Content-Type": "application/json",
332
+ "Authorization": f"Bearer {api_key}",
333
+ }
334
+
335
+ return _make_request_with_retry(
336
+ url="https://api.openai.com/v1/chat/completions",
337
+ headers=headers,
338
+ payload=payload,
339
+ provider_name=f"OpenAI {model}",
340
+ max_retries=max_retries,
341
+ quiet=quiet,
342
+ response_parser=lambda r: r["choices"][0]["message"]["content"],
343
+ )
344
+
345
+
346
+ def _make_request_with_retry(
347
+ url: str,
348
+ headers: dict,
349
+ payload: dict,
350
+ provider_name: str,
351
+ max_retries: int,
352
+ quiet: bool,
353
+ response_parser: callable,
354
+ ) -> str:
355
+ """Make HTTP request with retry logic and common error handling."""
356
+ if quiet:
357
+ spinner = None
358
+ else:
359
+ spinner = Halo(text=f"Generating commit message with {provider_name}...", spinner="dots")
360
+ spinner.start()
361
+
362
+ last_error = None
363
+ retry_count = 0
364
+
365
+ while retry_count < max_retries:
366
+ try:
367
+ logger.debug(f"Trying with {provider_name} (attempt {retry_count + 1}/{max_retries})")
368
+
369
+ with httpx.Client(timeout=30.0) as client:
370
+ response = client.post(url, headers=headers, json=payload)
371
+ response.raise_for_status()
372
+
373
+ response_data = response.json()
374
+ message = response_parser(response_data)
375
+
376
+ if spinner:
377
+ spinner.succeed(f"Generated commit message with {provider_name}")
378
+
379
+ return message
380
+
381
+ except Exception as e:
382
+ last_error = e
383
+ retry_count += 1
384
+
385
+ if retry_count == max_retries:
386
+ logger.warning(f"Error generating commit message: {e}. Giving up.")
387
+ break
388
+
389
+ wait_time = 2**retry_count
390
+ logger.warning(f"Error generating commit message: {e}. Retrying in {wait_time}s...")
391
+ if spinner:
392
+ for i in range(wait_time, 0, -1):
393
+ spinner.text = f"Retry {retry_count}/{max_retries} in {i}s..."
394
+ time.sleep(1)
395
+ else:
396
+ time.sleep(wait_time)
397
+
398
+ if spinner:
399
+ spinner.fail(f"Failed to generate commit message with {provider_name}")
400
+
401
+ error_type = _classify_error(str(last_error))
402
+ raise AIError(
403
+ f"Failed to generate commit message after {max_retries} attempts: {last_error}", error_type=error_type
404
+ )
@@ -1,6 +1,6 @@
1
1
  Metadata-Version: 2.4
2
2
  Name: gac
3
- Version: 0.19.1
3
+ Version: 1.0.1
4
4
  Summary: AI-powered Git commit message generator with multi-provider support
5
5
  Project-URL: Homepage, https://github.com/cellwebb/gac
6
6
  Project-URL: Documentation, https://github.com/cellwebb/gac#readme
@@ -20,19 +20,15 @@ Classifier: Programming Language :: Python :: 3.13
20
20
  Classifier: Programming Language :: Python :: Implementation :: CPython
21
21
  Classifier: Programming Language :: Python :: Implementation :: PyPy
22
22
  Requires-Python: >=3.10
23
- Requires-Dist: aisuite>=0.1.11
24
23
  Requires-Dist: anthropic>=0.68.0
25
- Requires-Dist: cerebras-cloud-sdk==1.49.0
26
24
  Requires-Dist: click>=8.3.0
27
- Requires-Dist: docstring-parser
28
- Requires-Dist: groq>=0.31.1
29
25
  Requires-Dist: halo
30
- Requires-Dist: ollama>=0.5.4
31
- Requires-Dist: openai>=1.108.1
26
+ Requires-Dist: httpx>=0.28.0
32
27
  Requires-Dist: pydantic>=2.11.9
33
28
  Requires-Dist: python-dotenv>=1.1.1
34
29
  Requires-Dist: questionary
35
30
  Requires-Dist: rich>=14.1.0
31
+ Requires-Dist: sumy
36
32
  Requires-Dist: tiktoken>=0.11.0
37
33
  Provides-Extra: dev
38
34
  Requires-Dist: build; extra == 'dev'
@@ -1,6 +1,7 @@
1
- gac/__init__.py,sha256=z9yGInqtycFIT3g1ca24r-A3699hKVaRqGUI79wsmMc,415
2
- gac/__version__.py,sha256=hyGMebb4FUEdQREZTwJbEWXUpawroToZSIoTm_-QyVs,67
3
- gac/ai.py,sha256=vncnizPce4QbbpddjVORdK2X95rIJwWbJxmIhjscQSU,6357
1
+ gac/__init__.py,sha256=T3KAW47ZmvB5AozG_uL92ryBYgp-2LNEztBaxaY3dJE,674
2
+ gac/__version__.py,sha256=8Yc4NMnplKKb2pLEFCgKeKRVX9Q-31V40n444l7Ao5M,66
3
+ gac/ai.py,sha256=E0vfkWyqYvNJFAbOYvHG-tnwcQakAYm34oHMbKM5GIk,6267
4
+ gac/ai_providers.py,sha256=QiVSspn0cauxl7m1Chn6nw1kAO1ByAuPiQqZWyZZCys,13210
4
5
  gac/cli.py,sha256=eQS8S7v6p0CfN9wtr239ujYGTi9rKl-KV7STX2U-C3w,4581
5
6
  gac/config.py,sha256=wSgEDjtis7Vk1pv5VPvYmJyD9-tymDS6GiUHjnCMbIM,1486
6
7
  gac/config_cli.py,sha256=v9nFHZO1RvK9fzHyuUS6SG-BCLHMsdOMDwWamBhVVh4,1608
@@ -13,8 +14,8 @@ gac/main.py,sha256=WI7mxIbL05neQr1VfoopOeZKIonwpwFeZCt_4VFewPY,11987
13
14
  gac/preprocess.py,sha256=4igtZ9OTHgTpqwlJmbcGaqzmdD0HHCZJwsZ9eG118Gk,15360
14
15
  gac/prompt.py,sha256=_fv24XU3DZE_S72vcdUYnNkmy-_KXnr1Vlc-9okop7E,17263
15
16
  gac/utils.py,sha256=W3ladtmsH01MNLdckQYTzYrYbTGEdzCKI36he9C-y_E,3945
16
- gac-0.19.1.dist-info/METADATA,sha256=jpY9N8ZkA0n30pqIf_5yOxXZVHBtbzUm2FkROBYt9YE,8496
17
- gac-0.19.1.dist-info/WHEEL,sha256=qtCwoSJWgHk21S1Kb4ihdzI2rlJ1ZKaIurTj_ngOhyQ,87
18
- gac-0.19.1.dist-info/entry_points.txt,sha256=tdjN-XMmcWfL92swuRAjT62bFLOAwk9bTMRLGP5Z4aI,36
19
- gac-0.19.1.dist-info/licenses/LICENSE,sha256=s11puNmYfzwoSwG96nhOJe268Y1QFckr8-Hmzo3_eJE,1087
20
- gac-0.19.1.dist-info/RECORD,,
17
+ gac-1.0.1.dist-info/METADATA,sha256=iEfp3b1Mx12iwon4aD6QDE8foNYrn-dN9yCmwhuKQTU,8351
18
+ gac-1.0.1.dist-info/WHEEL,sha256=qtCwoSJWgHk21S1Kb4ihdzI2rlJ1ZKaIurTj_ngOhyQ,87
19
+ gac-1.0.1.dist-info/entry_points.txt,sha256=tdjN-XMmcWfL92swuRAjT62bFLOAwk9bTMRLGP5Z4aI,36
20
+ gac-1.0.1.dist-info/licenses/LICENSE,sha256=vOab37NouL1PNs5BswnPayrMCqaN2sqLfMQfqPDrpZg,1103
21
+ gac-1.0.1.dist-info/RECORD,,
@@ -1,6 +1,6 @@
1
1
  # MIT License
2
2
 
3
- Copyright (c) 2025 cellwebb <cell@criteria.dev>
3
+ Copyright (c) 2025 cellwebb <cellwebb@users.noreply.github.com>
4
4
 
5
5
  Permission is hereby granted, free of charge, to any person obtaining a copy of this software and associated
6
6
  documentation files (the "Software"), to deal in the Software without restriction, including without limitation the
File without changes