gac 1.0.1__py3-none-any.whl → 1.1.0__py3-none-any.whl

This diff represents the content of publicly available package versions that have been released to one of the supported registries. The information contained in this diff is provided for informational purposes only and reflects changes between package versions as they appear in their respective public registries.

Potentially problematic release.


This version of gac might be problematic. Click here for more details.

gac/__init__.py CHANGED
@@ -2,15 +2,14 @@
2
2
 
3
3
  from gac.__version__ import __version__
4
4
  from gac.ai import generate_commit_message
5
- from gac.ai_providers import (
6
- anthropic_generate,
7
- cerebras_generate,
8
- groq_generate,
9
- ollama_generate,
10
- openai_generate,
11
- )
12
5
  from gac.git import get_staged_files, push_changes
13
6
  from gac.prompt import build_prompt, clean_commit_message
7
+ from gac.providers.anthropic import generate as anthropic_generate
8
+ from gac.providers.cerebras import generate as cerebras_generate
9
+ from gac.providers.groq import generate as groq_generate
10
+ from gac.providers.ollama import generate as ollama_generate
11
+ from gac.providers.openai import generate as openai_generate
12
+ from gac.providers.openrouter import generate as openrouter_generate
14
13
 
15
14
  __all__ = [
16
15
  "__version__",
@@ -24,4 +23,5 @@ __all__ = [
24
23
  "groq_generate",
25
24
  "ollama_generate",
26
25
  "openai_generate",
26
+ "openrouter_generate",
27
27
  ]
gac/__version__.py CHANGED
@@ -1,3 +1,3 @@
1
1
  """Version information for gac package."""
2
2
 
3
- __version__ = "1.0.1"
3
+ __version__ = "1.1.0"
gac/ai.py CHANGED
@@ -5,121 +5,19 @@ It consolidates all AI-related functionality including token counting and commit
5
5
  """
6
6
 
7
7
  import logging
8
- import os
9
- from functools import lru_cache
10
- from typing import Any
11
8
 
12
- import httpx
13
- import tiktoken
14
-
15
- from gac.ai_providers import (
16
- anthropic_generate,
17
- cerebras_generate,
18
- groq_generate,
19
- ollama_generate,
20
- openai_generate,
21
- )
22
- from gac.constants import EnvDefaults, Utility
9
+ from gac.constants import EnvDefaults
23
10
  from gac.errors import AIError
11
+ from gac.providers.anthropic import generate as anthropic_generate
12
+ from gac.providers.cerebras import generate as cerebras_generate
13
+ from gac.providers.groq import generate as groq_generate
14
+ from gac.providers.ollama import generate as ollama_generate
15
+ from gac.providers.openai import generate as openai_generate
16
+ from gac.providers.openrouter import generate as openrouter_generate
24
17
 
25
18
  logger = logging.getLogger(__name__)
26
19
 
27
20
 
28
- def count_tokens(content: str | list[dict[str, str]] | dict[str, Any], model: str) -> int:
29
- """Count tokens in content using the model's tokenizer."""
30
- text = extract_text_content(content)
31
- if not text:
32
- return 0
33
-
34
- if model.startswith("anthropic"):
35
- anthropic_tokens = anthropic_count_tokens(text, model)
36
- if anthropic_tokens is not None:
37
- return anthropic_tokens
38
- return len(text) // 4
39
-
40
- try:
41
- encoding = get_encoding(model)
42
- return len(encoding.encode(text))
43
- except Exception as e:
44
- logger.error(f"Error counting tokens: {e}")
45
- return len(text) // 4
46
-
47
-
48
- def anthropic_count_tokens(text: str, model: str) -> int | None:
49
- """Call Anthropic's token count endpoint and return the token usage.
50
-
51
- Returns the token count when successful, otherwise ``None`` so callers can
52
- fall back to a heuristic estimate.
53
- """
54
- api_key = os.getenv("ANTHROPIC_API_KEY")
55
- if not api_key:
56
- logger.debug("ANTHROPIC_API_KEY not set; using heuristic token estimation for Anthropic model")
57
- return None
58
-
59
- model_name = model.split(":", 1)[1] if ":" in model else "claude-3-5-haiku-latest"
60
- headers = {
61
- "Content-Type": "application/json",
62
- "x-api-key": api_key,
63
- "anthropic-version": "2023-06-01",
64
- }
65
- payload = {
66
- "model": model_name,
67
- "messages": [
68
- {
69
- "role": "user",
70
- "content": [
71
- {
72
- "type": "text",
73
- "text": text,
74
- }
75
- ],
76
- }
77
- ],
78
- }
79
-
80
- try:
81
- response = httpx.post(
82
- "https://api.anthropic.com/v1/messages/count_tokens",
83
- headers=headers,
84
- json=payload,
85
- timeout=30.0,
86
- )
87
- response.raise_for_status()
88
- data = response.json()
89
-
90
- if "input_tokens" in data:
91
- return data["input_tokens"]
92
- if "usage" in data and "input_tokens" in data["usage"]:
93
- return data["usage"]["input_tokens"]
94
-
95
- logger.warning("Unexpected response format from Anthropic token count API: %s", data)
96
- except Exception as exc:
97
- logger.warning("Failed to retrieve Anthropic token count via HTTP: %s", exc)
98
-
99
- return None
100
-
101
-
102
- def extract_text_content(content: str | list[dict[str, str]] | dict[str, Any]) -> str:
103
- """Extract text content from various input formats."""
104
- if isinstance(content, str):
105
- return content
106
- elif isinstance(content, list):
107
- return "\n".join(msg["content"] for msg in content if isinstance(msg, dict) and "content" in msg)
108
- elif isinstance(content, dict) and "content" in content:
109
- return content["content"]
110
- return ""
111
-
112
-
113
- @lru_cache(maxsize=1)
114
- def get_encoding(model: str) -> tiktoken.Encoding:
115
- """Get the appropriate encoding for a given model."""
116
- model_name = model.split(":")[-1] if ":" in model else model
117
- try:
118
- return tiktoken.encoding_for_model(model_name)
119
- except KeyError:
120
- return tiktoken.get_encoding(Utility.DEFAULT_ENCODING)
121
-
122
-
123
21
  def generate_commit_message(
124
22
  model: str,
125
23
  prompt: str | tuple[str, str],
@@ -176,5 +74,7 @@ def generate_commit_message(
176
74
  return cerebras_generate(model_name, prompt, temperature, max_tokens, max_retries, quiet)
177
75
  elif provider == "ollama":
178
76
  return ollama_generate(model_name, prompt, temperature, max_tokens, max_retries, quiet)
77
+ elif provider == "openrouter":
78
+ return openrouter_generate(model_name, prompt, temperature, max_tokens, max_retries, quiet)
179
79
  else:
180
80
  raise AIError.model_error(f"Unsupported provider: {provider}")
gac/ai_utils.py ADDED
@@ -0,0 +1,134 @@
1
+ """Utilities for AI provider integration for gac.
2
+
3
+ This module provides utility functions that support the AI provider implementations.
4
+ """
5
+
6
+ import logging
7
+ import os
8
+ from functools import lru_cache
9
+ from typing import Any
10
+
11
+ import httpx
12
+ import tiktoken
13
+
14
+ from gac.constants import Utility
15
+
16
+ logger = logging.getLogger(__name__)
17
+
18
+
19
+ def count_tokens(content: str | list[dict[str, str]] | dict[str, Any], model: str) -> int:
20
+ """Count tokens in content using the model's tokenizer."""
21
+ text = extract_text_content(content)
22
+ if not text:
23
+ return 0
24
+
25
+ if model.startswith("anthropic"):
26
+ anthropic_tokens = anthropic_count_tokens(text, model)
27
+ if anthropic_tokens is not None:
28
+ return anthropic_tokens
29
+ return len(text) // 4
30
+
31
+ try:
32
+ encoding = get_encoding(model)
33
+ return len(encoding.encode(text))
34
+ except Exception as e:
35
+ logger.error(f"Error counting tokens: {e}")
36
+ return len(text) // 4
37
+
38
+
39
+ def anthropic_count_tokens(text: str, model: str) -> int | None:
40
+ """Call Anthropic's token count endpoint and return the token usage.
41
+
42
+ Returns the token count when successful, otherwise ``None`` so callers can
43
+ fall back to a heuristic estimate.
44
+ """
45
+ api_key = os.getenv("ANTHROPIC_API_KEY")
46
+ if not api_key:
47
+ logger.debug("ANTHROPIC_API_KEY not set; using heuristic token estimation for Anthropic model")
48
+ return None
49
+
50
+ model_name = model.split(":", 1)[1] if ":" in model else "claude-3-5-haiku-latest"
51
+ headers = {
52
+ "Content-Type": "application/json",
53
+ "x-api-key": api_key,
54
+ "anthropic-version": "2023-06-01",
55
+ }
56
+ payload = {
57
+ "model": model_name,
58
+ "messages": [
59
+ {
60
+ "role": "user",
61
+ "content": [
62
+ {
63
+ "type": "text",
64
+ "text": text,
65
+ }
66
+ ],
67
+ }
68
+ ],
69
+ }
70
+
71
+ try:
72
+ response = httpx.post(
73
+ "https://api.anthropic.com/v1/messages/count_tokens",
74
+ headers=headers,
75
+ json=payload,
76
+ timeout=30.0,
77
+ )
78
+ response.raise_for_status()
79
+ data = response.json()
80
+
81
+ if "input_tokens" in data:
82
+ return data["input_tokens"]
83
+ if "usage" in data and "input_tokens" in data["usage"]:
84
+ return data["usage"]["input_tokens"]
85
+
86
+ logger.warning("Unexpected response format from Anthropic token count API: %s", data)
87
+ except Exception as exc:
88
+ logger.warning("Failed to retrieve Anthropic token count via HTTP: %s", exc)
89
+
90
+ return None
91
+
92
+
93
+ def extract_text_content(content: str | list[dict[str, str]] | dict[str, Any]) -> str:
94
+ """Extract text content from various input formats."""
95
+ if isinstance(content, str):
96
+ return content
97
+ elif isinstance(content, list):
98
+ return "\n".join(msg["content"] for msg in content if isinstance(msg, dict) and "content" in msg)
99
+ elif isinstance(content, dict) and "content" in content:
100
+ return content["content"]
101
+ return ""
102
+
103
+
104
+ @lru_cache(maxsize=1)
105
+ def get_encoding(model: str) -> tiktoken.Encoding:
106
+ """Get the appropriate encoding for a given model."""
107
+ model_name = model.split(":")[-1] if ":" in model else model
108
+ try:
109
+ return tiktoken.encoding_for_model(model_name)
110
+ except KeyError:
111
+ return tiktoken.get_encoding(Utility.DEFAULT_ENCODING)
112
+
113
+
114
+ def _classify_error(error_str: str) -> str:
115
+ """Classify error types based on error message content."""
116
+ error_str = error_str.lower()
117
+
118
+ if (
119
+ "api key" in error_str
120
+ or "unauthorized" in error_str
121
+ or "authentication" in error_str
122
+ or "invalid api key" in error_str
123
+ ):
124
+ return "authentication"
125
+ elif "timeout" in error_str or "timed out" in error_str or "request timeout" in error_str:
126
+ return "timeout"
127
+ elif "rate limit" in error_str or "too many requests" in error_str or "rate limit exceeded" in error_str:
128
+ return "rate_limit"
129
+ elif "connect" in error_str or "network" in error_str or "network connection failed" in error_str:
130
+ return "connection"
131
+ elif "model" in error_str or "not found" in error_str or "model not found" in error_str:
132
+ return "model"
133
+ else:
134
+ return "unknown"
gac/init_cli.py CHANGED
@@ -24,6 +24,7 @@ def init() -> None:
24
24
  ("Cerebras", "qwen-3-coder-480b"),
25
25
  ("Groq", "meta-llama/llama-4-maverick-17b-128e-instruct"),
26
26
  ("Ollama", "gemma3"),
27
+ ("OpenRouter", "openrouter/auto"),
27
28
  ("OpenAI", "gpt-4.1-mini"),
28
29
  ]
29
30
  provider_names = [p[0] for p in providers]
gac/main.py CHANGED
@@ -10,7 +10,8 @@ import click
10
10
  from rich.console import Console
11
11
  from rich.panel import Panel
12
12
 
13
- from gac.ai import count_tokens, generate_commit_message
13
+ from gac.ai import generate_commit_message
14
+ from gac.ai_utils import count_tokens
14
15
  from gac.config import load_config
15
16
  from gac.constants import EnvDefaults, Utility
16
17
  from gac.errors import AIError, GitError, handle_error
gac/preprocess.py CHANGED
@@ -10,7 +10,7 @@ import logging
10
10
  import os
11
11
  import re
12
12
 
13
- from gac.ai import count_tokens
13
+ from gac.ai_utils import count_tokens
14
14
  from gac.constants import (
15
15
  CodePatternImportance,
16
16
  FilePatterns,
@@ -0,0 +1 @@
1
+ """AI providers submodule for gac."""
@@ -0,0 +1,141 @@
1
+ """Anthropic API provider for gac."""
2
+
3
+ import logging
4
+ import os
5
+ import time
6
+
7
+ import httpx
8
+ from halo import Halo
9
+
10
+ from gac.ai_utils import _classify_error
11
+ from gac.constants import EnvDefaults
12
+ from gac.errors import AIError
13
+
14
+ logger = logging.getLogger(__name__)
15
+
16
+
17
+ def generate(
18
+ model: str,
19
+ prompt: str | tuple[str, str],
20
+ temperature: float = EnvDefaults.TEMPERATURE,
21
+ max_tokens: int = EnvDefaults.MAX_OUTPUT_TOKENS,
22
+ max_retries: int = EnvDefaults.MAX_RETRIES,
23
+ quiet: bool = False,
24
+ ) -> str:
25
+ """Generate commit message using Anthropic API with retry logic.
26
+
27
+ Args:
28
+ model: The model name (e.g., 'claude-3-5-haiku-latest', 'claude-3-opus-latest')
29
+ prompt: Either a string prompt or tuple of (system_prompt, user_prompt)
30
+ temperature: Controls randomness (0.0-1.0)
31
+ max_tokens: Maximum tokens in the response
32
+ max_retries: Number of retry attempts if generation fails
33
+ quiet: If True, suppress progress indicators
34
+
35
+ Returns:
36
+ A formatted commit message string
37
+
38
+ Raises:
39
+ AIError: If generation fails after max_retries attempts
40
+ """
41
+ api_key = os.getenv("ANTHROPIC_API_KEY")
42
+ if not api_key:
43
+ raise AIError.model_error("ANTHROPIC_API_KEY environment variable not set")
44
+
45
+ # Handle both old (string) and new (tuple) prompt formats
46
+ if isinstance(prompt, tuple):
47
+ system_prompt, user_prompt = prompt
48
+ messages = [{"role": "user", "content": user_prompt}]
49
+ payload = {
50
+ "model": model,
51
+ "messages": messages,
52
+ "system": system_prompt,
53
+ "temperature": temperature,
54
+ "max_tokens": max_tokens,
55
+ }
56
+ else:
57
+ # Backward compatibility: treat string as user prompt
58
+ messages = [{"role": "user", "content": prompt}]
59
+ payload = {
60
+ "model": model,
61
+ "messages": messages,
62
+ "temperature": temperature,
63
+ "max_tokens": max_tokens,
64
+ }
65
+
66
+ headers = {
67
+ "Content-Type": "application/json",
68
+ "x-api-key": api_key,
69
+ "anthropic-version": "2023-06-01",
70
+ }
71
+
72
+ return _make_request_with_retry(
73
+ url="https://api.anthropic.com/v1/messages",
74
+ headers=headers,
75
+ payload=payload,
76
+ provider_name=f"Anthropic {model}",
77
+ max_retries=max_retries,
78
+ quiet=quiet,
79
+ response_parser=lambda r: r["content"][0]["text"],
80
+ )
81
+
82
+
83
+ def _make_request_with_retry(
84
+ url: str,
85
+ headers: dict,
86
+ payload: dict,
87
+ provider_name: str,
88
+ max_retries: int,
89
+ quiet: bool,
90
+ response_parser: callable,
91
+ ) -> str:
92
+ """Make HTTP request with retry logic and common error handling."""
93
+ if quiet:
94
+ spinner = None
95
+ else:
96
+ spinner = Halo(text=f"Generating commit message with {provider_name}...", spinner="dots")
97
+ spinner.start()
98
+
99
+ last_error = None
100
+ retry_count = 0
101
+
102
+ while retry_count < max_retries:
103
+ try:
104
+ logger.debug(f"Trying with {provider_name} (attempt {retry_count + 1}/{max_retries})")
105
+
106
+ with httpx.Client(timeout=30.0) as client:
107
+ response = client.post(url, headers=headers, json=payload)
108
+ response.raise_for_status()
109
+
110
+ response_data = response.json()
111
+ message = response_parser(response_data)
112
+
113
+ if spinner:
114
+ spinner.succeed(f"Generated commit message with {provider_name}")
115
+
116
+ return message
117
+
118
+ except Exception as e:
119
+ last_error = e
120
+ retry_count += 1
121
+
122
+ if retry_count == max_retries:
123
+ logger.warning(f"Error generating commit message: {e}. Giving up.")
124
+ break
125
+
126
+ wait_time = 2**retry_count
127
+ logger.warning(f"Error generating commit message: {e}. Retrying in {wait_time}s...")
128
+ if spinner:
129
+ for i in range(wait_time, 0, -1):
130
+ spinner.text = f"Retry {retry_count}/{max_retries} in {i}s..."
131
+ time.sleep(1)
132
+ else:
133
+ time.sleep(wait_time)
134
+
135
+ if spinner:
136
+ spinner.fail(f"Failed to generate commit message with {provider_name}")
137
+
138
+ error_type = _classify_error(str(last_error))
139
+ raise AIError(
140
+ f"Failed to generate commit message after {max_retries} attempts: {last_error}", error_type=error_type
141
+ )
@@ -0,0 +1,134 @@
1
+ """Cerebras API provider for gac."""
2
+
3
+ import logging
4
+ import os
5
+ import time
6
+
7
+ import httpx
8
+ from halo import Halo
9
+
10
+ from gac.ai_utils import _classify_error
11
+ from gac.constants import EnvDefaults
12
+ from gac.errors import AIError
13
+
14
+ logger = logging.getLogger(__name__)
15
+
16
+
17
+ def generate(
18
+ model: str,
19
+ prompt: str | tuple[str, str],
20
+ temperature: float = EnvDefaults.TEMPERATURE,
21
+ max_tokens: int = EnvDefaults.MAX_OUTPUT_TOKENS,
22
+ max_retries: int = EnvDefaults.MAX_RETRIES,
23
+ quiet: bool = False,
24
+ ) -> str:
25
+ """Generate commit message using Cerebras API with retry logic.
26
+
27
+ Args:
28
+ model: The model name (e.g., 'llama3.1-8b', 'llama3.1-70b')
29
+ prompt: Either a string prompt or tuple of (system_prompt, user_prompt)
30
+ temperature: Controls randomness (0.0-1.0)
31
+ max_tokens: Maximum tokens in the response
32
+ max_retries: Number of retry attempts if generation fails
33
+ quiet: If True, suppress progress indicators
34
+
35
+ Returns:
36
+ A formatted commit message string
37
+
38
+ Raises:
39
+ AIError: If generation fails after max_retries attempts
40
+ """
41
+ api_key = os.getenv("CEREBRAS_API_KEY")
42
+ if not api_key:
43
+ raise AIError.model_error("CEREBRAS_API_KEY environment variable not set")
44
+
45
+ # Handle both old (string) and new (tuple) prompt formats
46
+ if isinstance(prompt, tuple):
47
+ system_prompt, user_prompt = prompt
48
+ messages = [{"role": "system", "content": system_prompt}, {"role": "user", "content": user_prompt}]
49
+ else:
50
+ # Backward compatibility: treat string as user prompt
51
+ messages = [{"role": "user", "content": prompt}]
52
+
53
+ payload = {
54
+ "model": model,
55
+ "messages": messages,
56
+ "temperature": temperature,
57
+ "max_tokens": max_tokens,
58
+ }
59
+
60
+ headers = {
61
+ "Content-Type": "application/json",
62
+ "Authorization": f"Bearer {api_key}",
63
+ }
64
+
65
+ return _make_request_with_retry(
66
+ url="https://api.cerebras.ai/v1/chat/completions",
67
+ headers=headers,
68
+ payload=payload,
69
+ provider_name=f"Cerebras {model}",
70
+ max_retries=max_retries,
71
+ quiet=quiet,
72
+ response_parser=lambda r: r["choices"][0]["message"]["content"],
73
+ )
74
+
75
+
76
+ def _make_request_with_retry(
77
+ url: str,
78
+ headers: dict,
79
+ payload: dict,
80
+ provider_name: str,
81
+ max_retries: int,
82
+ quiet: bool,
83
+ response_parser: callable,
84
+ ) -> str:
85
+ """Make HTTP request with retry logic and common error handling."""
86
+ if quiet:
87
+ spinner = None
88
+ else:
89
+ spinner = Halo(text=f"Generating commit message with {provider_name}...", spinner="dots")
90
+ spinner.start()
91
+
92
+ last_error = None
93
+ retry_count = 0
94
+
95
+ while retry_count < max_retries:
96
+ try:
97
+ logger.debug(f"Trying with {provider_name} (attempt {retry_count + 1}/{max_retries})")
98
+
99
+ with httpx.Client(timeout=30.0) as client:
100
+ response = client.post(url, headers=headers, json=payload)
101
+ response.raise_for_status()
102
+
103
+ response_data = response.json()
104
+ message = response_parser(response_data)
105
+
106
+ if spinner:
107
+ spinner.succeed(f"Generated commit message with {provider_name}")
108
+
109
+ return message
110
+
111
+ except Exception as e:
112
+ last_error = e
113
+ retry_count += 1
114
+
115
+ if retry_count == max_retries:
116
+ logger.warning(f"Error generating commit message: {e}. Giving up.")
117
+ break
118
+
119
+ wait_time = 2**retry_count
120
+ logger.warning(f"Error generating commit message: {e}. Retrying in {wait_time}s...")
121
+ if spinner:
122
+ for i in range(wait_time, 0, -1):
123
+ spinner.text = f"Retry {retry_count}/{max_retries} in {i}s..."
124
+ time.sleep(1)
125
+ else:
126
+ time.sleep(wait_time)
127
+
128
+ if spinner:
129
+ spinner.fail(f"Failed to generate commit message with {provider_name}")
130
+
131
+ error_type = _classify_error(str(last_error))
132
+ raise AIError(
133
+ f"Failed to generate commit message after {max_retries} attempts: {last_error}", error_type=error_type
134
+ )