gac 0.19.1__tar.gz → 1.0.1__tar.gz

This diff represents the content of publicly available package versions that have been released to one of the supported registries. The information contained in this diff is provided for informational purposes only and reflects changes between package versions as they appear in their respective public registries.

Potentially problematic release.


This version of gac might be problematic. Click here for more details.

@@ -209,3 +209,4 @@ scripts/changelog_prompt.md
209
209
  **/.claude/settings.local.json
210
210
  .plandex-v2/
211
211
  .vscode/
212
+ .serena/
@@ -1,6 +1,6 @@
1
1
  # MIT License
2
2
 
3
- Copyright (c) 2025 cellwebb <cell@criteria.dev>
3
+ Copyright (c) 2025 cellwebb <cellwebb@users.noreply.github.com>
4
4
 
5
5
  Permission is hereby granted, free of charge, to any person obtaining a copy of this software and associated
6
6
  documentation files (the "Software"), to deal in the Software without restriction, including without limitation the
@@ -1,6 +1,6 @@
1
1
  Metadata-Version: 2.4
2
2
  Name: gac
3
- Version: 0.19.1
3
+ Version: 1.0.1
4
4
  Summary: AI-powered Git commit message generator with multi-provider support
5
5
  Project-URL: Homepage, https://github.com/cellwebb/gac
6
6
  Project-URL: Documentation, https://github.com/cellwebb/gac#readme
@@ -20,19 +20,15 @@ Classifier: Programming Language :: Python :: 3.13
20
20
  Classifier: Programming Language :: Python :: Implementation :: CPython
21
21
  Classifier: Programming Language :: Python :: Implementation :: PyPy
22
22
  Requires-Python: >=3.10
23
- Requires-Dist: aisuite>=0.1.11
24
23
  Requires-Dist: anthropic>=0.68.0
25
- Requires-Dist: cerebras-cloud-sdk==1.49.0
26
24
  Requires-Dist: click>=8.3.0
27
- Requires-Dist: docstring-parser
28
- Requires-Dist: groq>=0.31.1
29
25
  Requires-Dist: halo
30
- Requires-Dist: ollama>=0.5.4
31
- Requires-Dist: openai>=1.108.1
26
+ Requires-Dist: httpx>=0.28.0
32
27
  Requires-Dist: pydantic>=2.11.9
33
28
  Requires-Dist: python-dotenv>=1.1.1
34
29
  Requires-Dist: questionary
35
30
  Requires-Dist: rich>=14.1.0
31
+ Requires-Dist: sumy
36
32
  Requires-Dist: tiktoken>=0.11.0
37
33
  Provides-Extra: dev
38
34
  Requires-Dist: build; extra == 'dev'
@@ -24,25 +24,27 @@ classifiers = [
24
24
  "Programming Language :: Python :: Implementation :: PyPy",
25
25
  ]
26
26
  dependencies = [
27
- # AI components - base providers
28
- "aisuite>=0.1.11",
27
+ # HTTP client for AI provider APIs
28
+ "httpx>=0.28.0",
29
+
30
+ # Anthropic SDK (token counting)
29
31
  "anthropic>=0.68.0",
30
- "cerebras_cloud_sdk==1.49.0",
31
- "groq>=0.31.1",
32
- "ollama>=0.5.4",
33
- "openai>=1.108.1",
32
+
33
+ # Token counting (OpenAI models)
34
34
  "tiktoken>=0.11.0",
35
-
35
+
36
36
  # Core functionality
37
37
  "pydantic>=2.11.9",
38
38
  "python-dotenv>=1.1.1",
39
- "docstring_parser", # needed by aisuite but not in their deps
40
-
39
+
41
40
  # CLI and formatting
42
41
  "click>=8.3.0",
43
42
  "halo",
44
43
  "questionary",
45
44
  "rich>=14.1.0",
45
+
46
+ # Summarization and document processing
47
+ "sumy",
46
48
  ]
47
49
 
48
50
  [project.scripts]
@@ -130,10 +132,10 @@ include = [
130
132
  [tool.hatch.envs.default]
131
133
  dependencies = [
132
134
  # AI components
133
- "aisuite",
134
135
  "anthropic",
135
136
  "openai",
136
137
  "groq",
138
+ "httpx",
137
139
 
138
140
  # Core functionality
139
141
  "pydantic",
@@ -2,6 +2,13 @@
2
2
 
3
3
  from gac.__version__ import __version__
4
4
  from gac.ai import generate_commit_message
5
+ from gac.ai_providers import (
6
+ anthropic_generate,
7
+ cerebras_generate,
8
+ groq_generate,
9
+ ollama_generate,
10
+ openai_generate,
11
+ )
5
12
  from gac.git import get_staged_files, push_changes
6
13
  from gac.prompt import build_prompt, clean_commit_message
7
14
 
@@ -12,4 +19,9 @@ __all__ = [
12
19
  "clean_commit_message",
13
20
  "get_staged_files",
14
21
  "push_changes",
22
+ "anthropic_generate",
23
+ "cerebras_generate",
24
+ "groq_generate",
25
+ "ollama_generate",
26
+ "openai_generate",
15
27
  ]
@@ -1,3 +1,3 @@
1
1
  """Version information for gac package."""
2
2
 
3
- __version__ = "0.19.1"
3
+ __version__ = "1.0.1"
@@ -0,0 +1,180 @@
1
+ """AI provider integration for gac.
2
+
3
+ This module provides core functionality for AI provider interaction.
4
+ It consolidates all AI-related functionality including token counting and commit message generation.
5
+ """
6
+
7
+ import logging
8
+ import os
9
+ from functools import lru_cache
10
+ from typing import Any
11
+
12
+ import httpx
13
+ import tiktoken
14
+
15
+ from gac.ai_providers import (
16
+ anthropic_generate,
17
+ cerebras_generate,
18
+ groq_generate,
19
+ ollama_generate,
20
+ openai_generate,
21
+ )
22
+ from gac.constants import EnvDefaults, Utility
23
+ from gac.errors import AIError
24
+
25
+ logger = logging.getLogger(__name__)
26
+
27
+
28
+ def count_tokens(content: str | list[dict[str, str]] | dict[str, Any], model: str) -> int:
29
+ """Count tokens in content using the model's tokenizer."""
30
+ text = extract_text_content(content)
31
+ if not text:
32
+ return 0
33
+
34
+ if model.startswith("anthropic"):
35
+ anthropic_tokens = anthropic_count_tokens(text, model)
36
+ if anthropic_tokens is not None:
37
+ return anthropic_tokens
38
+ return len(text) // 4
39
+
40
+ try:
41
+ encoding = get_encoding(model)
42
+ return len(encoding.encode(text))
43
+ except Exception as e:
44
+ logger.error(f"Error counting tokens: {e}")
45
+ return len(text) // 4
46
+
47
+
48
+ def anthropic_count_tokens(text: str, model: str) -> int | None:
49
+ """Call Anthropic's token count endpoint and return the token usage.
50
+
51
+ Returns the token count when successful, otherwise ``None`` so callers can
52
+ fall back to a heuristic estimate.
53
+ """
54
+ api_key = os.getenv("ANTHROPIC_API_KEY")
55
+ if not api_key:
56
+ logger.debug("ANTHROPIC_API_KEY not set; using heuristic token estimation for Anthropic model")
57
+ return None
58
+
59
+ model_name = model.split(":", 1)[1] if ":" in model else "claude-3-5-haiku-latest"
60
+ headers = {
61
+ "Content-Type": "application/json",
62
+ "x-api-key": api_key,
63
+ "anthropic-version": "2023-06-01",
64
+ }
65
+ payload = {
66
+ "model": model_name,
67
+ "messages": [
68
+ {
69
+ "role": "user",
70
+ "content": [
71
+ {
72
+ "type": "text",
73
+ "text": text,
74
+ }
75
+ ],
76
+ }
77
+ ],
78
+ }
79
+
80
+ try:
81
+ response = httpx.post(
82
+ "https://api.anthropic.com/v1/messages/count_tokens",
83
+ headers=headers,
84
+ json=payload,
85
+ timeout=30.0,
86
+ )
87
+ response.raise_for_status()
88
+ data = response.json()
89
+
90
+ if "input_tokens" in data:
91
+ return data["input_tokens"]
92
+ if "usage" in data and "input_tokens" in data["usage"]:
93
+ return data["usage"]["input_tokens"]
94
+
95
+ logger.warning("Unexpected response format from Anthropic token count API: %s", data)
96
+ except Exception as exc:
97
+ logger.warning("Failed to retrieve Anthropic token count via HTTP: %s", exc)
98
+
99
+ return None
100
+
101
+
102
+ def extract_text_content(content: str | list[dict[str, str]] | dict[str, Any]) -> str:
103
+ """Extract text content from various input formats."""
104
+ if isinstance(content, str):
105
+ return content
106
+ elif isinstance(content, list):
107
+ return "\n".join(msg["content"] for msg in content if isinstance(msg, dict) and "content" in msg)
108
+ elif isinstance(content, dict) and "content" in content:
109
+ return content["content"]
110
+ return ""
111
+
112
+
113
+ @lru_cache(maxsize=1)
114
+ def get_encoding(model: str) -> tiktoken.Encoding:
115
+ """Get the appropriate encoding for a given model."""
116
+ model_name = model.split(":")[-1] if ":" in model else model
117
+ try:
118
+ return tiktoken.encoding_for_model(model_name)
119
+ except KeyError:
120
+ return tiktoken.get_encoding(Utility.DEFAULT_ENCODING)
121
+
122
+
123
+ def generate_commit_message(
124
+ model: str,
125
+ prompt: str | tuple[str, str],
126
+ temperature: float = EnvDefaults.TEMPERATURE,
127
+ max_tokens: int = EnvDefaults.MAX_OUTPUT_TOKENS,
128
+ max_retries: int = EnvDefaults.MAX_RETRIES,
129
+ quiet: bool = False,
130
+ ) -> str:
131
+ """Generate a commit message using direct API calls to AI providers.
132
+
133
+ Args:
134
+ model: The model to use in provider:model_name format (e.g., 'anthropic:claude-3-5-haiku-latest')
135
+ prompt: Either a string prompt (for backward compatibility) or tuple of (system_prompt, user_prompt)
136
+ temperature: Controls randomness (0.0-1.0), lower values are more deterministic
137
+ max_tokens: Maximum tokens in the response
138
+ max_retries: Number of retry attempts if generation fails
139
+ quiet: If True, suppress progress indicators
140
+
141
+ Returns:
142
+ A formatted commit message string
143
+
144
+ Raises:
145
+ AIError: If generation fails after max_retries attempts
146
+
147
+ Example:
148
+ >>> model = "anthropic:claude-3-5-haiku-latest"
149
+ >>> system_prompt, user_prompt = build_prompt("On branch main", "diff --git a/README.md b/README.md")
150
+ >>> generate_commit_message(model, (system_prompt, user_prompt))
151
+ 'docs: Update README with installation instructions'
152
+ """
153
+ try:
154
+ _, _ = model.split(":", 1)
155
+ except ValueError as err:
156
+ raise AIError.model_error(
157
+ f"Invalid model format: {model}. Please use the format 'provider:model_name'."
158
+ ) from err
159
+
160
+ # Parse the model string to extract provider and model name
161
+ try:
162
+ provider, model_name = model.split(":", 1)
163
+ except ValueError as err:
164
+ raise AIError.model_error(
165
+ f"Invalid model format: {model}. Please use the format 'provider:model_name'."
166
+ ) from err
167
+
168
+ # Route to the appropriate provider function
169
+ if provider == "openai":
170
+ return openai_generate(model_name, prompt, temperature, max_tokens, max_retries, quiet)
171
+ elif provider == "anthropic":
172
+ return anthropic_generate(model_name, prompt, temperature, max_tokens, max_retries, quiet)
173
+ elif provider == "groq":
174
+ return groq_generate(model_name, prompt, temperature, max_tokens, max_retries, quiet)
175
+ elif provider == "cerebras":
176
+ return cerebras_generate(model_name, prompt, temperature, max_tokens, max_retries, quiet)
177
+ elif provider == "ollama":
178
+ return ollama_generate(model_name, prompt, temperature, max_tokens, max_retries, quiet)
179
+ else:
180
+ raise AIError.model_error(f"Unsupported provider: {provider}")
@@ -0,0 +1,404 @@
1
+ """Direct HTTP API calls to AI providers using httpx.
2
+
3
+ This module provides functions for making direct HTTP API calls to various AI providers.
4
+ Each provider has its own function to generate commit messages using only httpx.
5
+ """
6
+
7
+ import logging
8
+ import os
9
+ import time
10
+
11
+ import httpx
12
+ from halo import Halo
13
+
14
+ from gac.constants import EnvDefaults
15
+ from gac.errors import AIError
16
+
17
+ logger = logging.getLogger(__name__)
18
+
19
+
20
+ def _classify_error(error_str: str) -> str:
21
+ """Classify error types based on error message content."""
22
+ error_str = error_str.lower()
23
+
24
+ if (
25
+ "api key" in error_str
26
+ or "unauthorized" in error_str
27
+ or "authentication" in error_str
28
+ or "invalid api key" in error_str
29
+ ):
30
+ return "authentication"
31
+ elif "timeout" in error_str or "timed out" in error_str or "request timeout" in error_str:
32
+ return "timeout"
33
+ elif "rate limit" in error_str or "too many requests" in error_str or "rate limit exceeded" in error_str:
34
+ return "rate_limit"
35
+ elif "connect" in error_str or "network" in error_str or "network connection failed" in error_str:
36
+ return "connection"
37
+ elif "model" in error_str or "not found" in error_str or "model not found" in error_str:
38
+ return "model"
39
+ else:
40
+ return "unknown"
41
+
42
+
43
+ def anthropic_generate(
44
+ model: str,
45
+ prompt: str | tuple[str, str],
46
+ temperature: float = EnvDefaults.TEMPERATURE,
47
+ max_tokens: int = EnvDefaults.MAX_OUTPUT_TOKENS,
48
+ max_retries: int = EnvDefaults.MAX_RETRIES,
49
+ quiet: bool = False,
50
+ ) -> str:
51
+ """Generate commit message using Anthropic API with retry logic.
52
+
53
+ Args:
54
+ model: The model name (e.g., 'claude-3-5-haiku-latest', 'claude-3-opus-latest')
55
+ prompt: Either a string prompt or tuple of (system_prompt, user_prompt)
56
+ temperature: Controls randomness (0.0-1.0)
57
+ max_tokens: Maximum tokens in the response
58
+ max_retries: Number of retry attempts if generation fails
59
+ quiet: If True, suppress progress indicators
60
+
61
+ Returns:
62
+ A formatted commit message string
63
+
64
+ Raises:
65
+ AIError: If generation fails after max_retries attempts
66
+ """
67
+ api_key = os.getenv("ANTHROPIC_API_KEY")
68
+ if not api_key:
69
+ raise AIError.model_error("ANTHROPIC_API_KEY environment variable not set")
70
+
71
+ # Handle both old (string) and new (tuple) prompt formats
72
+ if isinstance(prompt, tuple):
73
+ system_prompt, user_prompt = prompt
74
+ messages = [{"role": "user", "content": user_prompt}]
75
+ payload = {
76
+ "model": model,
77
+ "messages": messages,
78
+ "system": system_prompt,
79
+ "temperature": temperature,
80
+ "max_tokens": max_tokens,
81
+ }
82
+ else:
83
+ # Backward compatibility: treat string as user prompt
84
+ messages = [{"role": "user", "content": prompt}]
85
+ payload = {
86
+ "model": model,
87
+ "messages": messages,
88
+ "temperature": temperature,
89
+ "max_tokens": max_tokens,
90
+ }
91
+
92
+ headers = {
93
+ "Content-Type": "application/json",
94
+ "x-api-key": api_key,
95
+ "anthropic-version": "2023-06-01",
96
+ }
97
+
98
+ return _make_request_with_retry(
99
+ url="https://api.anthropic.com/v1/messages",
100
+ headers=headers,
101
+ payload=payload,
102
+ provider_name=f"Anthropic {model}",
103
+ max_retries=max_retries,
104
+ quiet=quiet,
105
+ response_parser=lambda r: r["content"][0]["text"],
106
+ )
107
+
108
+
109
+ def cerebras_generate(
110
+ model: str,
111
+ prompt: str | tuple[str, str],
112
+ temperature: float = EnvDefaults.TEMPERATURE,
113
+ max_tokens: int = EnvDefaults.MAX_OUTPUT_TOKENS,
114
+ max_retries: int = EnvDefaults.MAX_RETRIES,
115
+ quiet: bool = False,
116
+ ) -> str:
117
+ """Generate commit message using Cerebras API with retry logic.
118
+
119
+ Args:
120
+ model: The model name (e.g., 'llama3.1-8b', 'llama3.1-70b')
121
+ prompt: Either a string prompt or tuple of (system_prompt, user_prompt)
122
+ temperature: Controls randomness (0.0-1.0)
123
+ max_tokens: Maximum tokens in the response
124
+ max_retries: Number of retry attempts if generation fails
125
+ quiet: If True, suppress progress indicators
126
+
127
+ Returns:
128
+ A formatted commit message string
129
+
130
+ Raises:
131
+ AIError: If generation fails after max_retries attempts
132
+ """
133
+ api_key = os.getenv("CEREBRAS_API_KEY")
134
+ if not api_key:
135
+ raise AIError.model_error("CEREBRAS_API_KEY environment variable not set")
136
+
137
+ # Handle both old (string) and new (tuple) prompt formats
138
+ if isinstance(prompt, tuple):
139
+ system_prompt, user_prompt = prompt
140
+ messages = [{"role": "system", "content": system_prompt}, {"role": "user", "content": user_prompt}]
141
+ else:
142
+ # Backward compatibility: treat string as user prompt
143
+ messages = [{"role": "user", "content": prompt}]
144
+
145
+ payload = {
146
+ "model": model,
147
+ "messages": messages,
148
+ "temperature": temperature,
149
+ "max_tokens": max_tokens,
150
+ }
151
+
152
+ headers = {
153
+ "Content-Type": "application/json",
154
+ "Authorization": f"Bearer {api_key}",
155
+ }
156
+
157
+ return _make_request_with_retry(
158
+ url="https://api.cerebras.ai/v1/chat/completions",
159
+ headers=headers,
160
+ payload=payload,
161
+ provider_name=f"Cerebras {model}",
162
+ max_retries=max_retries,
163
+ quiet=quiet,
164
+ response_parser=lambda r: r["choices"][0]["message"]["content"],
165
+ )
166
+
167
+
168
+ def groq_generate(
169
+ model: str,
170
+ prompt: str | tuple[str, str],
171
+ temperature: float = EnvDefaults.TEMPERATURE,
172
+ max_tokens: int = EnvDefaults.MAX_OUTPUT_TOKENS,
173
+ max_retries: int = EnvDefaults.MAX_RETRIES,
174
+ quiet: bool = False,
175
+ ) -> str:
176
+ """Generate commit message using Groq API with retry logic.
177
+
178
+ Args:
179
+ model: The model name (e.g., 'llama3-8b-8192', 'llama3-70b-8192')
180
+ prompt: Either a string prompt or tuple of (system_prompt, user_prompt)
181
+ temperature: Controls randomness (0.0-1.0)
182
+ max_tokens: Maximum tokens in the response
183
+ max_retries: Number of retry attempts if generation fails
184
+ quiet: If True, suppress progress indicators
185
+
186
+ Returns:
187
+ A formatted commit message string
188
+
189
+ Raises:
190
+ AIError: If generation fails after max_retries attempts
191
+ """
192
+ api_key = os.getenv("GROQ_API_KEY")
193
+ if not api_key:
194
+ raise AIError.model_error("GROQ_API_KEY environment variable not set")
195
+
196
+ # Handle both old (string) and new (tuple) prompt formats
197
+ if isinstance(prompt, tuple):
198
+ system_prompt, user_prompt = prompt
199
+ messages = [{"role": "system", "content": system_prompt}, {"role": "user", "content": user_prompt}]
200
+ else:
201
+ # Backward compatibility: treat string as user prompt
202
+ messages = [{"role": "user", "content": prompt}]
203
+
204
+ payload = {
205
+ "model": model,
206
+ "messages": messages,
207
+ "temperature": temperature,
208
+ "max_tokens": max_tokens,
209
+ }
210
+
211
+ headers = {
212
+ "Content-Type": "application/json",
213
+ "Authorization": f"Bearer {api_key}",
214
+ }
215
+
216
+ return _make_request_with_retry(
217
+ url="https://api.groq.com/openai/v1/chat/completions",
218
+ headers=headers,
219
+ payload=payload,
220
+ provider_name=f"Groq {model}",
221
+ max_retries=max_retries,
222
+ quiet=quiet,
223
+ response_parser=lambda r: r["choices"][0]["message"]["content"],
224
+ )
225
+
226
+
227
+ def ollama_generate(
228
+ model: str,
229
+ prompt: str | tuple[str, str],
230
+ temperature: float = EnvDefaults.TEMPERATURE,
231
+ max_tokens: int = EnvDefaults.MAX_OUTPUT_TOKENS,
232
+ max_retries: int = EnvDefaults.MAX_RETRIES,
233
+ quiet: bool = False,
234
+ ) -> str:
235
+ """Generate commit message using Ollama API with retry logic.
236
+
237
+ Args:
238
+ model: The model name (e.g., 'llama3', 'mistral')
239
+ prompt: Either a string prompt or tuple of (system_prompt, user_prompt)
240
+ temperature: Controls randomness (0.0-1.0)
241
+ max_tokens: Maximum tokens in the response (note: Ollama uses 'num_predict')
242
+ max_retries: Number of retry attempts if generation fails
243
+ quiet: If True, suppress progress indicators
244
+
245
+ Returns:
246
+ A formatted commit message string
247
+
248
+ Raises:
249
+ AIError: If generation fails after max_retries attempts
250
+ """
251
+ # Handle both old (string) and new (tuple) prompt formats
252
+ if isinstance(prompt, tuple):
253
+ system_prompt, user_prompt = prompt
254
+ messages = [{"role": "system", "content": system_prompt}, {"role": "user", "content": user_prompt}]
255
+ else:
256
+ # Backward compatibility: treat string as user prompt
257
+ messages = [{"role": "user", "content": prompt}]
258
+
259
+ payload = {
260
+ "model": model,
261
+ "messages": messages,
262
+ "stream": False,
263
+ "options": {
264
+ "temperature": temperature,
265
+ "num_predict": max_tokens,
266
+ },
267
+ }
268
+
269
+ headers = {
270
+ "Content-Type": "application/json",
271
+ }
272
+
273
+ # Ollama typically runs locally on port 11434
274
+ ollama_url = os.getenv("OLLAMA_URL", "http://localhost:11434")
275
+
276
+ return _make_request_with_retry(
277
+ url=f"{ollama_url}/api/chat",
278
+ headers=headers,
279
+ payload=payload,
280
+ provider_name=f"Ollama {model}",
281
+ max_retries=max_retries,
282
+ quiet=quiet,
283
+ response_parser=lambda r: r["message"]["content"],
284
+ )
285
+
286
+
287
+ def openai_generate(
288
+ model: str,
289
+ prompt: str | tuple[str, str],
290
+ temperature: float = EnvDefaults.TEMPERATURE,
291
+ max_tokens: int = EnvDefaults.MAX_OUTPUT_TOKENS,
292
+ max_retries: int = EnvDefaults.MAX_RETRIES,
293
+ quiet: bool = False,
294
+ ) -> str:
295
+ """Generate commit message using OpenAI API with retry logic.
296
+
297
+ Args:
298
+ model: The model name (e.g., 'gpt-4', 'gpt-3.5-turbo')
299
+ prompt: Either a string prompt or tuple of (system_prompt, user_prompt)
300
+ temperature: Controls randomness (0.0-1.0)
301
+ max_tokens: Maximum tokens in the response
302
+ max_retries: Number of retry attempts if generation fails
303
+ quiet: If True, suppress progress indicators
304
+
305
+ Returns:
306
+ A formatted commit message string
307
+
308
+ Raises:
309
+ AIError: If generation fails after max_retries attempts
310
+ """
311
+ api_key = os.getenv("OPENAI_API_KEY")
312
+ if not api_key:
313
+ raise AIError.model_error("OPENAI_API_KEY environment variable not set")
314
+
315
+ # Handle both old (string) and new (tuple) prompt formats
316
+ if isinstance(prompt, tuple):
317
+ system_prompt, user_prompt = prompt
318
+ messages = [{"role": "system", "content": system_prompt}, {"role": "user", "content": user_prompt}]
319
+ else:
320
+ # Backward compatibility: treat string as user prompt
321
+ messages = [{"role": "user", "content": prompt}]
322
+
323
+ payload = {
324
+ "model": model,
325
+ "messages": messages,
326
+ "temperature": temperature,
327
+ "max_tokens": max_tokens,
328
+ }
329
+
330
+ headers = {
331
+ "Content-Type": "application/json",
332
+ "Authorization": f"Bearer {api_key}",
333
+ }
334
+
335
+ return _make_request_with_retry(
336
+ url="https://api.openai.com/v1/chat/completions",
337
+ headers=headers,
338
+ payload=payload,
339
+ provider_name=f"OpenAI {model}",
340
+ max_retries=max_retries,
341
+ quiet=quiet,
342
+ response_parser=lambda r: r["choices"][0]["message"]["content"],
343
+ )
344
+
345
+
346
+ def _make_request_with_retry(
347
+ url: str,
348
+ headers: dict,
349
+ payload: dict,
350
+ provider_name: str,
351
+ max_retries: int,
352
+ quiet: bool,
353
+ response_parser: callable,
354
+ ) -> str:
355
+ """Make HTTP request with retry logic and common error handling."""
356
+ if quiet:
357
+ spinner = None
358
+ else:
359
+ spinner = Halo(text=f"Generating commit message with {provider_name}...", spinner="dots")
360
+ spinner.start()
361
+
362
+ last_error = None
363
+ retry_count = 0
364
+
365
+ while retry_count < max_retries:
366
+ try:
367
+ logger.debug(f"Trying with {provider_name} (attempt {retry_count + 1}/{max_retries})")
368
+
369
+ with httpx.Client(timeout=30.0) as client:
370
+ response = client.post(url, headers=headers, json=payload)
371
+ response.raise_for_status()
372
+
373
+ response_data = response.json()
374
+ message = response_parser(response_data)
375
+
376
+ if spinner:
377
+ spinner.succeed(f"Generated commit message with {provider_name}")
378
+
379
+ return message
380
+
381
+ except Exception as e:
382
+ last_error = e
383
+ retry_count += 1
384
+
385
+ if retry_count == max_retries:
386
+ logger.warning(f"Error generating commit message: {e}. Giving up.")
387
+ break
388
+
389
+ wait_time = 2**retry_count
390
+ logger.warning(f"Error generating commit message: {e}. Retrying in {wait_time}s...")
391
+ if spinner:
392
+ for i in range(wait_time, 0, -1):
393
+ spinner.text = f"Retry {retry_count}/{max_retries} in {i}s..."
394
+ time.sleep(1)
395
+ else:
396
+ time.sleep(wait_time)
397
+
398
+ if spinner:
399
+ spinner.fail(f"Failed to generate commit message with {provider_name}")
400
+
401
+ error_type = _classify_error(str(last_error))
402
+ raise AIError(
403
+ f"Failed to generate commit message after {max_retries} attempts: {last_error}", error_type=error_type
404
+ )
gac-0.19.1/src/gac/ai.py DELETED
@@ -1,181 +0,0 @@
1
- """AI provider integration for gac.
2
-
3
- This module provides core functionality for AI provider interaction.
4
- It consolidates all AI-related functionality including token counting and commit message generation.
5
- """
6
-
7
- import logging
8
- import time
9
- from functools import lru_cache
10
- from typing import Any
11
-
12
- import aisuite as ai
13
- import tiktoken
14
- from halo import Halo
15
-
16
- from gac.constants import EnvDefaults, Utility
17
- from gac.errors import AIError
18
-
19
- logger = logging.getLogger(__name__)
20
-
21
-
22
- def count_tokens(content: str | list[dict[str, str]] | dict[str, Any], model: str) -> int:
23
- """Count tokens in content using the model's tokenizer."""
24
- text = extract_text_content(content)
25
- if not text:
26
- return 0
27
-
28
- if model.startswith("anthropic"):
29
- import anthropic
30
-
31
- try:
32
- client = anthropic.Anthropic()
33
-
34
- # Use the messages.count_tokens API for accurate counting
35
- model_name = model.split(":", 1)[1] if ":" in model else "claude-3-5-haiku-latest"
36
- response = client.messages.count_tokens(model=model_name, messages=[{"role": "user", "content": text}])
37
-
38
- return response.input_tokens
39
- except Exception:
40
- # Fallback to simple estimation for Anthropic models
41
- return len(text) // 4
42
-
43
- try:
44
- encoding = get_encoding(model)
45
- return len(encoding.encode(text))
46
- except Exception as e:
47
- logger.error(f"Error counting tokens: {e}")
48
- return len(text) // 4
49
-
50
-
51
- def extract_text_content(content: str | list[dict[str, str]] | dict[str, Any]) -> str:
52
- """Extract text content from various input formats."""
53
- if isinstance(content, str):
54
- return content
55
- elif isinstance(content, list):
56
- return "\n".join(msg["content"] for msg in content if isinstance(msg, dict) and "content" in msg)
57
- elif isinstance(content, dict) and "content" in content:
58
- return content["content"]
59
- return ""
60
-
61
-
62
- @lru_cache(maxsize=1)
63
- def get_encoding(model: str) -> tiktoken.Encoding:
64
- """Get the appropriate encoding for a given model."""
65
- model_name = model.split(":")[-1] if ":" in model else model
66
- try:
67
- return tiktoken.encoding_for_model(model_name)
68
- except KeyError:
69
- return tiktoken.get_encoding(Utility.DEFAULT_ENCODING)
70
-
71
-
72
- def generate_commit_message(
73
- model: str,
74
- prompt: str | tuple[str, str],
75
- temperature: float = EnvDefaults.TEMPERATURE,
76
- max_tokens: int = EnvDefaults.MAX_OUTPUT_TOKENS,
77
- max_retries: int = EnvDefaults.MAX_RETRIES,
78
- quiet: bool = False,
79
- ) -> str:
80
- """Generate a commit message using aisuite.
81
-
82
- Args:
83
- model: The model to use in provider:model_name format (e.g., 'anthropic:claude-3-5-haiku-latest')
84
- prompt: Either a string prompt (for backward compatibility) or tuple of (system_prompt, user_prompt)
85
- temperature: Controls randomness (0.0-1.0), lower values are more deterministic
86
- max_tokens: Maximum tokens in the response
87
- max_retries: Number of retry attempts if generation fails
88
- quiet: If True, suppress progress indicators
89
-
90
- Returns:
91
- A formatted commit message string
92
-
93
- Raises:
94
- AIError: If generation fails after max_retries attempts
95
-
96
- Example:
97
- >>> model = "anthropic:claude-3-5-haiku-latest"
98
- >>> system_prompt, user_prompt = build_prompt("On branch main", "diff --git a/README.md b/README.md")
99
- >>> generate_commit_message(model, (system_prompt, user_prompt))
100
- 'docs: Update README with installation instructions'
101
- """
102
- try:
103
- _, _ = model.split(":", 1)
104
- except ValueError as err:
105
- raise AIError.model_error(
106
- f"Invalid model format: {model}. Please use the format 'provider:model_name'."
107
- ) from err
108
-
109
- client = ai.Client()
110
-
111
- # Handle both old (string) and new (tuple) prompt formats
112
- if isinstance(prompt, tuple):
113
- system_prompt, user_prompt = prompt
114
- messages = [{"role": "system", "content": system_prompt}, {"role": "user", "content": user_prompt}]
115
- else:
116
- # Backward compatibility: treat string as user prompt
117
- messages = [{"role": "user", "content": prompt}]
118
-
119
- if quiet:
120
- spinner = None
121
- else:
122
- spinner = Halo(text=f"Generating commit message with {model}...", spinner="dots")
123
- spinner.start()
124
-
125
- last_error = None
126
-
127
- retry_count = 0
128
- while retry_count < max_retries:
129
- try:
130
- logger.debug(f"Trying with model {model} (attempt {retry_count + 1}/{max_retries})")
131
- response = client.chat.completions.create(
132
- model=model,
133
- messages=messages,
134
- temperature=temperature,
135
- max_tokens=max_tokens,
136
- )
137
-
138
- message = response.choices[0].message.content if hasattr(response, "choices") else response.content
139
-
140
- if spinner:
141
- spinner.succeed(f"Generated commit message with {model}")
142
-
143
- return message
144
-
145
- except Exception as e:
146
- last_error = e
147
- retry_count += 1
148
-
149
- if retry_count == max_retries:
150
- logger.warning(f"Error generating commit message: {e}. Giving up.")
151
- break
152
-
153
- wait_time = 2**retry_count
154
- logger.warning(f"Error generating commit message: {e}. Retrying in {wait_time}s...")
155
- if spinner:
156
- for i in range(wait_time, 0, -1):
157
- spinner.text = f"Retry {retry_count}/{max_retries} in {i}s..."
158
- time.sleep(1)
159
- else:
160
- time.sleep(wait_time)
161
- if spinner:
162
- spinner.fail("Failed to generate commit message")
163
-
164
- error_str = str(last_error).lower()
165
-
166
- if "api key" in error_str or "unauthorized" in error_str or "authentication" in error_str:
167
- error_type = "authentication"
168
- elif "timeout" in error_str:
169
- error_type = "timeout"
170
- elif "rate limit" in error_str or "too many requests" in error_str:
171
- error_type = "rate_limit"
172
- elif "connect" in error_str or "network" in error_str:
173
- error_type = "connection"
174
- elif "model" in error_str or "not found" in error_str:
175
- error_type = "model"
176
- else:
177
- error_type = "unknown"
178
-
179
- raise AIError(
180
- f"Failed to generate commit message after {max_retries} attempts: {last_error}", error_type=error_type
181
- )
File without changes
File without changes
File without changes
File without changes
File without changes
File without changes
File without changes
File without changes
File without changes
File without changes
File without changes
File without changes
File without changes