gac 0.17.2__py3-none-any.whl → 3.6.0__py3-none-any.whl

This diff represents the content of publicly available package versions that have been released to one of the supported registries. The information contained in this diff is provided for informational purposes only and reflects changes between package versions as they appear in their respective public registries.
Files changed (53) hide show
  1. gac/__version__.py +1 -1
  2. gac/ai.py +69 -123
  3. gac/ai_utils.py +227 -0
  4. gac/auth_cli.py +69 -0
  5. gac/cli.py +87 -19
  6. gac/config.py +13 -7
  7. gac/config_cli.py +26 -5
  8. gac/constants.py +176 -5
  9. gac/errors.py +14 -0
  10. gac/git.py +207 -11
  11. gac/init_cli.py +52 -29
  12. gac/language_cli.py +378 -0
  13. gac/main.py +922 -189
  14. gac/model_cli.py +374 -0
  15. gac/oauth/__init__.py +1 -0
  16. gac/oauth/claude_code.py +397 -0
  17. gac/preprocess.py +5 -5
  18. gac/prompt.py +656 -219
  19. gac/providers/__init__.py +88 -0
  20. gac/providers/anthropic.py +51 -0
  21. gac/providers/azure_openai.py +97 -0
  22. gac/providers/cerebras.py +38 -0
  23. gac/providers/chutes.py +71 -0
  24. gac/providers/claude_code.py +102 -0
  25. gac/providers/custom_anthropic.py +133 -0
  26. gac/providers/custom_openai.py +98 -0
  27. gac/providers/deepseek.py +38 -0
  28. gac/providers/fireworks.py +38 -0
  29. gac/providers/gemini.py +87 -0
  30. gac/providers/groq.py +63 -0
  31. gac/providers/kimi_coding.py +63 -0
  32. gac/providers/lmstudio.py +59 -0
  33. gac/providers/minimax.py +38 -0
  34. gac/providers/mistral.py +38 -0
  35. gac/providers/moonshot.py +38 -0
  36. gac/providers/ollama.py +50 -0
  37. gac/providers/openai.py +38 -0
  38. gac/providers/openrouter.py +58 -0
  39. gac/providers/replicate.py +98 -0
  40. gac/providers/streamlake.py +51 -0
  41. gac/providers/synthetic.py +42 -0
  42. gac/providers/together.py +38 -0
  43. gac/providers/zai.py +59 -0
  44. gac/security.py +293 -0
  45. gac/utils.py +243 -4
  46. gac/workflow_utils.py +222 -0
  47. gac-3.6.0.dist-info/METADATA +281 -0
  48. gac-3.6.0.dist-info/RECORD +53 -0
  49. {gac-0.17.2.dist-info → gac-3.6.0.dist-info}/WHEEL +1 -1
  50. {gac-0.17.2.dist-info → gac-3.6.0.dist-info}/licenses/LICENSE +1 -1
  51. gac-0.17.2.dist-info/METADATA +0 -221
  52. gac-0.17.2.dist-info/RECORD +0 -20
  53. {gac-0.17.2.dist-info → gac-3.6.0.dist-info}/entry_points.txt +0 -0
gac/__version__.py CHANGED
@@ -1,3 +1,3 @@
1
1
  """Version information for gac package."""
2
2
 
3
- __version__ = "0.17.2"
3
+ __version__ = "3.6.0"
gac/ai.py CHANGED
@@ -5,73 +5,31 @@ It consolidates all AI-related functionality including token counting and commit
5
5
  """
6
6
 
7
7
  import logging
8
- import time
9
- from functools import lru_cache
10
- from typing import Any
11
8
 
12
- import aisuite as ai
13
- import tiktoken
14
- from halo import Halo
15
-
16
- from gac.constants import EnvDefaults, Utility
9
+ from gac.ai_utils import generate_with_retries
10
+ from gac.constants import EnvDefaults
17
11
  from gac.errors import AIError
12
+ from gac.providers import PROVIDER_REGISTRY
18
13
 
19
14
  logger = logging.getLogger(__name__)
20
15
 
21
16
 
22
- def count_tokens(content: str | list[dict[str, str]] | dict[str, Any], model: str) -> int:
23
- """Count tokens in content using the model's tokenizer."""
24
- text = extract_text_content(content)
25
- if not text:
26
- return 0
27
-
28
- if model.startswith("anthropic"):
29
- import anthropic
30
-
31
- return anthropic.Client().count_tokens(text)
32
-
33
- try:
34
- encoding = get_encoding(model)
35
- return len(encoding.encode(text))
36
- except Exception as e:
37
- logger.error(f"Error counting tokens: {e}")
38
- return len(text) // 4
39
-
40
-
41
- def extract_text_content(content: str | list[dict[str, str]] | dict[str, Any]) -> str:
42
- """Extract text content from various input formats."""
43
- if isinstance(content, str):
44
- return content
45
- elif isinstance(content, list):
46
- return "\n".join(msg["content"] for msg in content if isinstance(msg, dict) and "content" in msg)
47
- elif isinstance(content, dict) and "content" in content:
48
- return content["content"]
49
- return ""
50
-
51
-
52
- @lru_cache(maxsize=1)
53
- def get_encoding(model: str) -> tiktoken.Encoding:
54
- """Get the appropriate encoding for a given model."""
55
- model_name = model.split(":")[-1] if ":" in model else model
56
- try:
57
- return tiktoken.encoding_for_model(model_name)
58
- except KeyError:
59
- return tiktoken.get_encoding(Utility.DEFAULT_ENCODING)
60
-
61
-
62
17
  def generate_commit_message(
63
18
  model: str,
64
- prompt: str,
19
+ prompt: str | tuple[str, str] | list[dict[str, str]],
65
20
  temperature: float = EnvDefaults.TEMPERATURE,
66
21
  max_tokens: int = EnvDefaults.MAX_OUTPUT_TOKENS,
67
22
  max_retries: int = EnvDefaults.MAX_RETRIES,
68
23
  quiet: bool = False,
24
+ is_group: bool = False,
25
+ skip_success_message: bool = False,
26
+ task_description: str = "commit message",
69
27
  ) -> str:
70
- """Generate a commit message using aisuite.
28
+ """Generate a commit message using direct API calls to AI providers.
71
29
 
72
30
  Args:
73
- model: The model to use in provider:model_name format (e.g., 'anthropic:claude-3-5-haiku-latest')
74
- prompt: The formatted prompt containing diff and context
31
+ model: The model to use in provider:model_name format (e.g., 'anthropic:claude-haiku-4-5')
32
+ prompt: Either a string prompt (for backward compatibility) or tuple of (system_prompt, user_prompt)
75
33
  temperature: Controls randomness (0.0-1.0), lower values are more deterministic
76
34
  max_tokens: Maximum tokens in the response
77
35
  max_retries: Number of retry attempts if generation fails
@@ -84,80 +42,68 @@ def generate_commit_message(
84
42
  AIError: If generation fails after max_retries attempts
85
43
 
86
44
  Example:
87
- >>> model = "anthropic:claude-3-5-haiku-latest"
88
- >>> prompt = build_prompt("On branch main", "diff --git a/README.md b/README.md")
89
- >>> generate_commit_message(model, prompt)
45
+ >>> model = "anthropic:claude-haiku-4-5"
46
+ >>> system_prompt, user_prompt = build_prompt("On branch main", "diff --git a/README.md b/README.md")
47
+ >>> generate_commit_message(model, (system_prompt, user_prompt))
90
48
  'docs: Update README with installation instructions'
91
49
  """
92
- try:
93
- _, _ = model.split(":", 1)
94
- except ValueError as err:
95
- raise AIError.model_error(
96
- f"Invalid model format: {model}. Please use the format 'provider:model_name'."
97
- ) from err
98
-
99
- client = ai.Client()
100
-
101
- if quiet:
102
- spinner = None
50
+ # Handle both old (string) and new (tuple) prompt formats
51
+ if isinstance(prompt, list):
52
+ messages = [{**msg} for msg in prompt]
53
+ elif isinstance(prompt, tuple):
54
+ system_prompt, user_prompt = prompt
55
+ messages = [
56
+ {"role": "system", "content": system_prompt or ""},
57
+ {"role": "user", "content": user_prompt},
58
+ ]
103
59
  else:
104
- spinner = Halo(text=f"Generating commit message with {model}...", spinner="dots")
105
- spinner.start()
106
-
107
- last_error = None
108
-
109
- retry_count = 0
110
- while retry_count < max_retries:
111
- try:
112
- logger.debug(f"Trying with model {model} (attempt {retry_count + 1}/{max_retries})")
113
- response = client.chat.completions.create(
114
- model=model,
115
- messages=[{"role": "user", "content": prompt}],
116
- temperature=temperature,
117
- max_tokens=max_tokens,
118
- )
119
-
120
- message = response.choices[0].message.content if hasattr(response, "choices") else response.content
121
-
122
- if spinner:
123
- spinner.succeed(f"Generated commit message with {model}")
124
-
125
- return message
126
-
127
- except Exception as e:
128
- last_error = e
129
- retry_count += 1
130
-
131
- if retry_count == max_retries:
132
- logger.warning(f"Error generating commit message: {e}. Giving up.")
133
- break
134
-
135
- wait_time = 2**retry_count
136
- logger.warning(f"Error generating commit message: {e}. Retrying in {wait_time}s...")
137
- if spinner:
138
- for i in range(wait_time, 0, -1):
139
- spinner.text = f"Retry {retry_count}/{max_retries} in {i}s..."
140
- time.sleep(1)
141
- else:
142
- time.sleep(wait_time)
143
- if spinner:
144
- spinner.fail("Failed to generate commit message")
145
-
146
- error_str = str(last_error).lower()
60
+ # Backward compatibility: treat string as user prompt with empty system prompt
61
+ user_prompt = str(prompt)
62
+ messages = [
63
+ {"role": "system", "content": ""},
64
+ {"role": "user", "content": user_prompt},
65
+ ]
66
+
67
+ # Generate the commit message using centralized retry logic
68
+ try:
69
+ return generate_with_retries(
70
+ provider_funcs=PROVIDER_REGISTRY,
71
+ model=model,
72
+ messages=messages,
73
+ temperature=temperature,
74
+ max_tokens=max_tokens,
75
+ max_retries=max_retries,
76
+ quiet=quiet,
77
+ is_group=is_group,
78
+ skip_success_message=skip_success_message,
79
+ task_description=task_description,
80
+ )
81
+ except AIError:
82
+ # Re-raise AIError exceptions as-is to preserve error classification
83
+ raise
84
+ except Exception as e:
85
+ logger.error(f"Failed to generate commit message: {e}")
86
+ raise AIError.model_error(f"Failed to generate commit message: {e}") from e
147
87
 
148
- if "api key" in error_str or "unauthorized" in error_str or "authentication" in error_str:
149
- error_type = "authentication"
150
- elif "timeout" in error_str:
151
- error_type = "timeout"
152
- elif "rate limit" in error_str or "too many requests" in error_str:
153
- error_type = "rate_limit"
154
- elif "connect" in error_str or "network" in error_str:
155
- error_type = "connection"
156
- elif "model" in error_str or "not found" in error_str:
157
- error_type = "model"
158
- else:
159
- error_type = "unknown"
160
88
 
161
- raise AIError(
162
- f"Failed to generate commit message after {max_retries} attempts: {last_error}", error_type=error_type
89
+ def generate_grouped_commits(
90
+ model: str,
91
+ prompt: list[dict[str, str]],
92
+ temperature: float,
93
+ max_tokens: int,
94
+ max_retries: int,
95
+ quiet: bool = False,
96
+ skip_success_message: bool = False,
97
+ ) -> str:
98
+ """Generate grouped commits JSON response."""
99
+ return generate_commit_message(
100
+ model=model,
101
+ prompt=prompt,
102
+ temperature=temperature,
103
+ max_tokens=max_tokens,
104
+ max_retries=max_retries,
105
+ quiet=quiet,
106
+ is_group=True,
107
+ skip_success_message=skip_success_message,
108
+ task_description="commit message",
163
109
  )
gac/ai_utils.py ADDED
@@ -0,0 +1,227 @@
1
+ """Utilities for AI provider integration for gac.
2
+
3
+ This module provides utility functions that support the AI provider implementations.
4
+ """
5
+
6
+ import logging
7
+ import os
8
+ import time
9
+ from functools import lru_cache
10
+ from typing import Any
11
+
12
+ import tiktoken
13
+ from rich.console import Console
14
+ from rich.status import Status
15
+
16
+ from gac.constants import EnvDefaults, Utility
17
+ from gac.errors import AIError
18
+ from gac.providers import SUPPORTED_PROVIDERS
19
+
20
+ logger = logging.getLogger(__name__)
21
+ console = Console()
22
+
23
+
24
+ @lru_cache(maxsize=1)
25
+ def _should_skip_tiktoken_counting() -> bool:
26
+ """Return True when token counting should avoid tiktoken calls entirely."""
27
+ value = os.getenv("GAC_NO_TIKTOKEN", str(EnvDefaults.NO_TIKTOKEN))
28
+ return value.lower() in ("true", "1", "yes", "on")
29
+
30
+
31
+ def count_tokens(content: str | list[dict[str, str]] | dict[str, Any], model: str) -> int:
32
+ """Count tokens in content using the model's tokenizer."""
33
+ text = extract_text_content(content)
34
+ if not text:
35
+ return 0
36
+
37
+ if _should_skip_tiktoken_counting():
38
+ return len(text) // 4
39
+
40
+ try:
41
+ encoding = get_encoding(model)
42
+ return len(encoding.encode(text))
43
+ except Exception as e:
44
+ logger.error(f"Error counting tokens: {e}")
45
+ # Fallback to rough estimation (4 chars per token on average)
46
+ return len(text) // 4
47
+
48
+
49
+ def extract_text_content(content: str | list[dict[str, str]] | dict[str, Any]) -> str:
50
+ """Extract text content from various input formats."""
51
+ if isinstance(content, str):
52
+ return content
53
+ elif isinstance(content, list):
54
+ return "\n".join(msg["content"] for msg in content if isinstance(msg, dict) and "content" in msg)
55
+ elif isinstance(content, dict) and "content" in content:
56
+ return content["content"] # type: ignore[no-any-return]
57
+ return ""
58
+
59
+
60
+ @lru_cache(maxsize=1)
61
+ def get_encoding(model: str) -> tiktoken.Encoding:
62
+ """Get the appropriate encoding for a given model."""
63
+ provider, model_name = model.split(":", 1) if ":" in model else (None, model)
64
+
65
+ if provider != "openai":
66
+ return tiktoken.get_encoding(Utility.DEFAULT_ENCODING)
67
+
68
+ try:
69
+ return tiktoken.encoding_for_model(model_name)
70
+ except KeyError:
71
+ # Fall back to default encoding if model not found
72
+ return tiktoken.get_encoding(Utility.DEFAULT_ENCODING)
73
+ except Exception:
74
+ # If there are any network/SSL issues, fall back to default encoding
75
+ return tiktoken.get_encoding(Utility.DEFAULT_ENCODING)
76
+
77
+
78
+ def _classify_error(error_str: str) -> str:
79
+ """Classify error types based on error message content."""
80
+ error_str = error_str.lower()
81
+
82
+ if (
83
+ "api key" in error_str
84
+ or "unauthorized" in error_str
85
+ or "authentication" in error_str
86
+ or "invalid api key" in error_str
87
+ ):
88
+ return "authentication"
89
+ elif "timeout" in error_str or "timed out" in error_str or "request timeout" in error_str:
90
+ return "timeout"
91
+ elif "rate limit" in error_str or "too many requests" in error_str or "rate limit exceeded" in error_str:
92
+ return "rate_limit"
93
+ elif "connect" in error_str or "network" in error_str or "network connection failed" in error_str:
94
+ return "connection"
95
+ elif "model" in error_str or "not found" in error_str or "model not found" in error_str:
96
+ return "model"
97
+ else:
98
+ return "unknown"
99
+
100
+
101
+ def generate_with_retries(
102
+ provider_funcs: dict,
103
+ model: str,
104
+ messages: list[dict[str, str]],
105
+ temperature: float,
106
+ max_tokens: int,
107
+ max_retries: int,
108
+ quiet: bool = False,
109
+ is_group: bool = False,
110
+ skip_success_message: bool = False,
111
+ task_description: str = "commit message",
112
+ ) -> str:
113
+ """Generate content with retry logic using direct API calls."""
114
+ # Parse model string to determine provider and actual model
115
+ if ":" not in model:
116
+ raise AIError.model_error(f"Invalid model format. Expected 'provider:model', got '{model}'")
117
+
118
+ provider, model_name = model.split(":", 1)
119
+
120
+ # Validate provider
121
+ if provider not in SUPPORTED_PROVIDERS:
122
+ raise AIError.model_error(f"Unsupported provider: {provider}. Supported providers: {SUPPORTED_PROVIDERS}")
123
+
124
+ if not messages:
125
+ raise AIError.model_error("No messages provided for AI generation")
126
+
127
+ # Set up spinner
128
+ if is_group:
129
+ message_type = f"grouped {task_description}s"
130
+ else:
131
+ message_type = task_description
132
+
133
+ if quiet:
134
+ spinner = None
135
+ else:
136
+ spinner = Status(f"Generating {message_type} with {provider} {model_name}...")
137
+ spinner.start()
138
+
139
+ last_exception = None
140
+ last_error_type = "unknown"
141
+
142
+ for attempt in range(max_retries):
143
+ try:
144
+ if not quiet and not skip_success_message and attempt > 0:
145
+ if spinner:
146
+ spinner.update(f"Retry {attempt + 1}/{max_retries} with {provider} {model_name}...")
147
+ logger.info(f"Retry attempt {attempt + 1}/{max_retries}")
148
+
149
+ # Call the appropriate provider function
150
+ provider_func = provider_funcs.get(provider)
151
+ if not provider_func:
152
+ raise AIError.model_error(f"Provider function not found for: {provider}")
153
+
154
+ content = provider_func(model=model_name, messages=messages, temperature=temperature, max_tokens=max_tokens)
155
+
156
+ if spinner:
157
+ if skip_success_message:
158
+ spinner.stop() # Stop spinner without showing success/failure
159
+ else:
160
+ spinner.stop()
161
+ console.print(f"✓ Generated {message_type} with {provider} {model_name}")
162
+
163
+ if content is not None and content.strip():
164
+ return content.strip() # type: ignore[no-any-return]
165
+ else:
166
+ logger.warning(f"Empty or None content received from {provider} {model_name}: {repr(content)}")
167
+ raise AIError.model_error("Empty response from AI model")
168
+
169
+ except Exception as e:
170
+ last_exception = e
171
+ error_type = _classify_error(str(e))
172
+ last_error_type = error_type
173
+
174
+ # For authentication and model errors, don't retry
175
+ if error_type in ["authentication", "model"]:
176
+ if spinner and not skip_success_message:
177
+ spinner.stop()
178
+ console.print(f"✗ Failed to generate {message_type} with {provider} {model_name}")
179
+
180
+ # Create the appropriate error type based on classification
181
+ if error_type == "authentication":
182
+ raise AIError.authentication_error(f"AI generation failed: {str(e)}") from e
183
+ elif error_type == "model":
184
+ raise AIError.model_error(f"AI generation failed: {str(e)}") from e
185
+
186
+ if attempt < max_retries - 1:
187
+ # Exponential backoff
188
+ wait_time = 2**attempt
189
+ if not quiet and not skip_success_message:
190
+ if attempt == 0:
191
+ logger.warning(f"AI generation failed, retrying in {wait_time}s: {str(e)}")
192
+ else:
193
+ logger.warning(
194
+ f"AI generation failed (attempt {attempt + 1}), retrying in {wait_time}s: {str(e)}"
195
+ )
196
+
197
+ if spinner and not skip_success_message:
198
+ for i in range(wait_time, 0, -1):
199
+ spinner.update(f"Retry {attempt + 1}/{max_retries} in {i}s...")
200
+ time.sleep(1)
201
+ else:
202
+ time.sleep(wait_time)
203
+ else:
204
+ num_retries = max_retries
205
+ retry_word = "retry" if num_retries == 1 else "retries"
206
+ logger.error(f"AI generation failed after {num_retries} {retry_word}: {str(e)}")
207
+
208
+ if spinner and not skip_success_message:
209
+ spinner.stop()
210
+ console.print(f"✗ Failed to generate {message_type} with {provider} {model_name}")
211
+
212
+ # If we get here, all retries failed - use the last classified error type
213
+ num_retries = max_retries
214
+ retry_word = "retry" if num_retries == 1 else "retries"
215
+ error_message = f"Failed to generate {message_type} after {num_retries} {retry_word}"
216
+ if last_error_type == "authentication":
217
+ raise AIError.authentication_error(error_message) from last_exception
218
+ elif last_error_type == "rate_limit":
219
+ raise AIError.rate_limit_error(error_message) from last_exception
220
+ elif last_error_type == "timeout":
221
+ raise AIError.timeout_error(error_message) from last_exception
222
+ elif last_error_type == "connection":
223
+ raise AIError.connection_error(error_message) from last_exception
224
+ elif last_error_type == "model":
225
+ raise AIError.model_error(error_message) from last_exception
226
+ else:
227
+ raise AIError.unknown_error(error_message) from last_exception
gac/auth_cli.py ADDED
@@ -0,0 +1,69 @@
1
+ """CLI for authenticating Claude Code OAuth tokens.
2
+
3
+ Provides a command to authenticate and re-authenticate Claude Code subscriptions.
4
+ """
5
+
6
+ import logging
7
+
8
+ import click
9
+
10
+ from gac.oauth.claude_code import authenticate_and_save, load_stored_token
11
+ from gac.utils import setup_logging
12
+
13
+ logger = logging.getLogger(__name__)
14
+
15
+
16
+ @click.command()
17
+ @click.option(
18
+ "--quiet",
19
+ "-q",
20
+ is_flag=True,
21
+ help="Suppress non-error output",
22
+ )
23
+ @click.option(
24
+ "--log-level",
25
+ default="INFO",
26
+ type=click.Choice(["CRITICAL", "ERROR", "WARNING", "INFO", "DEBUG"], case_sensitive=False),
27
+ help="Set log level (default: INFO)",
28
+ )
29
+ def auth(quiet: bool = False, log_level: str = "INFO") -> None:
30
+ """Authenticate Claude Code OAuth token.
31
+
32
+ This command allows you to authenticate or re-authenticate your
33
+ Claude Code OAuth token when it expires or you want to refresh it.
34
+ It opens a browser window for the OAuth flow and saves the token
35
+ to ~/.gac.env.
36
+
37
+ The token is used by the Claude Code provider to access your
38
+ Claude Code subscription instead of requiring an Anthropic API key.
39
+ """
40
+ # Setup logging
41
+ if quiet:
42
+ effective_log_level = "ERROR"
43
+ else:
44
+ effective_log_level = log_level
45
+ setup_logging(effective_log_level)
46
+
47
+ # Check if there's an existing token
48
+ existing_token = load_stored_token()
49
+ if existing_token and not quiet:
50
+ click.echo("✓ Found existing Claude Code access token.")
51
+ click.echo()
52
+
53
+ if not quiet:
54
+ click.echo("🔐 Starting Claude Code OAuth authentication...")
55
+ click.echo(" Your browser will open automatically")
56
+ click.echo(" (Waiting up to 3 minutes for callback)")
57
+ click.echo()
58
+
59
+ # Perform OAuth authentication
60
+ success = authenticate_and_save(quiet=quiet)
61
+
62
+ if success:
63
+ if not quiet:
64
+ click.echo("✅ Claude Code authentication completed successfully!")
65
+ click.echo(" Your new token has been saved and is ready to use.")
66
+ else:
67
+ click.echo("❌ Claude Code authentication failed.")
68
+ click.echo(" Please try again or check your network connection.")
69
+ raise click.ClickException("Claude Code authentication failed")