gac 1.1.0__tar.gz → 1.2.1__tar.gz

This diff represents the content of publicly available package versions that have been released to one of the supported registries. The information contained in this diff is provided for informational purposes only and reflects changes between package versions as they appear in their respective public registries.

Potentially problematic release.


This version of gac might be problematic. Click here for more details.

Files changed (37) hide show
  1. {gac-1.1.0 → gac-1.2.1}/PKG-INFO +1 -1
  2. {gac-1.1.0 → gac-1.2.1}/src/gac/__init__.py +6 -6
  3. {gac-1.1.0 → gac-1.2.1}/src/gac/__version__.py +1 -1
  4. gac-1.2.1/src/gac/ai.py +89 -0
  5. gac-1.2.1/src/gac/ai_utils.py +185 -0
  6. {gac-1.1.0 → gac-1.2.1}/src/gac/errors.py +5 -0
  7. gac-1.2.1/src/gac/providers/__init__.py +17 -0
  8. gac-1.2.1/src/gac/providers/anthropic.py +42 -0
  9. gac-1.2.1/src/gac/providers/cerebras.py +29 -0
  10. gac-1.2.1/src/gac/providers/groq.py +58 -0
  11. gac-1.2.1/src/gac/providers/ollama.py +35 -0
  12. gac-1.2.1/src/gac/providers/openai.py +29 -0
  13. gac-1.2.1/src/gac/providers/openrouter.py +46 -0
  14. gac-1.1.0/src/gac/ai.py +0 -80
  15. gac-1.1.0/src/gac/ai_utils.py +0 -134
  16. gac-1.1.0/src/gac/providers/__init__.py +0 -1
  17. gac-1.1.0/src/gac/providers/anthropic.py +0 -141
  18. gac-1.1.0/src/gac/providers/cerebras.py +0 -134
  19. gac-1.1.0/src/gac/providers/groq.py +0 -134
  20. gac-1.1.0/src/gac/providers/ollama.py +0 -135
  21. gac-1.1.0/src/gac/providers/openai.py +0 -134
  22. gac-1.1.0/src/gac/providers/openrouter.py +0 -125
  23. {gac-1.1.0 → gac-1.2.1}/.gitignore +0 -0
  24. {gac-1.1.0 → gac-1.2.1}/LICENSE +0 -0
  25. {gac-1.1.0 → gac-1.2.1}/README.md +0 -0
  26. {gac-1.1.0 → gac-1.2.1}/pyproject.toml +0 -0
  27. {gac-1.1.0 → gac-1.2.1}/src/gac/cli.py +0 -0
  28. {gac-1.1.0 → gac-1.2.1}/src/gac/config.py +0 -0
  29. {gac-1.1.0 → gac-1.2.1}/src/gac/config_cli.py +0 -0
  30. {gac-1.1.0 → gac-1.2.1}/src/gac/constants.py +0 -0
  31. {gac-1.1.0 → gac-1.2.1}/src/gac/diff_cli.py +0 -0
  32. {gac-1.1.0 → gac-1.2.1}/src/gac/git.py +0 -0
  33. {gac-1.1.0 → gac-1.2.1}/src/gac/init_cli.py +0 -0
  34. {gac-1.1.0 → gac-1.2.1}/src/gac/main.py +0 -0
  35. {gac-1.1.0 → gac-1.2.1}/src/gac/preprocess.py +0 -0
  36. {gac-1.1.0 → gac-1.2.1}/src/gac/prompt.py +0 -0
  37. {gac-1.1.0 → gac-1.2.1}/src/gac/utils.py +0 -0
@@ -1,6 +1,6 @@
1
1
  Metadata-Version: 2.4
2
2
  Name: gac
3
- Version: 1.1.0
3
+ Version: 1.2.1
4
4
  Summary: AI-powered Git commit message generator with multi-provider support
5
5
  Project-URL: Homepage, https://github.com/cellwebb/gac
6
6
  Project-URL: Documentation, https://github.com/cellwebb/gac#readme
@@ -4,12 +4,12 @@ from gac.__version__ import __version__
4
4
  from gac.ai import generate_commit_message
5
5
  from gac.git import get_staged_files, push_changes
6
6
  from gac.prompt import build_prompt, clean_commit_message
7
- from gac.providers.anthropic import generate as anthropic_generate
8
- from gac.providers.cerebras import generate as cerebras_generate
9
- from gac.providers.groq import generate as groq_generate
10
- from gac.providers.ollama import generate as ollama_generate
11
- from gac.providers.openai import generate as openai_generate
12
- from gac.providers.openrouter import generate as openrouter_generate
7
+ from gac.providers.anthropic import call_anthropic_api as anthropic_generate
8
+ from gac.providers.cerebras import call_cerebras_api as cerebras_generate
9
+ from gac.providers.groq import call_groq_api as groq_generate
10
+ from gac.providers.ollama import call_ollama_api as ollama_generate
11
+ from gac.providers.openai import call_openai_api as openai_generate
12
+ from gac.providers.openrouter import call_openrouter_api as openrouter_generate
13
13
 
14
14
  __all__ = [
15
15
  "__version__",
@@ -1,3 +1,3 @@
1
1
  """Version information for gac package."""
2
2
 
3
- __version__ = "1.1.0"
3
+ __version__ = "1.2.1"
@@ -0,0 +1,89 @@
1
+ """AI provider integration for gac.
2
+
3
+ This module provides core functionality for AI provider interaction.
4
+ It consolidates all AI-related functionality including token counting and commit message generation.
5
+ """
6
+
7
+ import logging
8
+
9
+ from gac.ai_utils import generate_with_retries
10
+ from gac.constants import EnvDefaults
11
+ from gac.errors import AIError
12
+ from gac.providers import (
13
+ call_anthropic_api,
14
+ call_cerebras_api,
15
+ call_groq_api,
16
+ call_ollama_api,
17
+ call_openai_api,
18
+ call_openrouter_api,
19
+ )
20
+
21
+ logger = logging.getLogger(__name__)
22
+
23
+
24
+ def generate_commit_message(
25
+ model: str,
26
+ prompt: str | tuple[str, str],
27
+ temperature: float = EnvDefaults.TEMPERATURE,
28
+ max_tokens: int = EnvDefaults.MAX_OUTPUT_TOKENS,
29
+ max_retries: int = EnvDefaults.MAX_RETRIES,
30
+ quiet: bool = False,
31
+ ) -> str:
32
+ """Generate a commit message using direct API calls to AI providers.
33
+
34
+ Args:
35
+ model: The model to use in provider:model_name format (e.g., 'anthropic:claude-3-5-haiku-latest')
36
+ prompt: Either a string prompt (for backward compatibility) or tuple of (system_prompt, user_prompt)
37
+ temperature: Controls randomness (0.0-1.0), lower values are more deterministic
38
+ max_tokens: Maximum tokens in the response
39
+ max_retries: Number of retry attempts if generation fails
40
+ quiet: If True, suppress progress indicators
41
+
42
+ Returns:
43
+ A formatted commit message string
44
+
45
+ Raises:
46
+ AIError: If generation fails after max_retries attempts
47
+
48
+ Example:
49
+ >>> model = "anthropic:claude-3-5-haiku-latest"
50
+ >>> system_prompt, user_prompt = build_prompt("On branch main", "diff --git a/README.md b/README.md")
51
+ >>> generate_commit_message(model, (system_prompt, user_prompt))
52
+ 'docs: Update README with installation instructions'
53
+ """
54
+ # Handle both old (string) and new (tuple) prompt formats
55
+ if isinstance(prompt, tuple):
56
+ system_prompt, user_prompt = prompt
57
+ else:
58
+ # Backward compatibility: treat string as user prompt with no system prompt
59
+ system_prompt = ""
60
+ user_prompt = prompt
61
+
62
+ # Provider functions mapping
63
+ provider_funcs = {
64
+ "anthropic": call_anthropic_api,
65
+ "openai": call_openai_api,
66
+ "groq": call_groq_api,
67
+ "cerebras": call_cerebras_api,
68
+ "ollama": call_ollama_api,
69
+ "openrouter": call_openrouter_api,
70
+ }
71
+
72
+ # Generate the commit message using centralized retry logic
73
+ try:
74
+ return generate_with_retries(
75
+ provider_funcs=provider_funcs,
76
+ model=model,
77
+ system_prompt=system_prompt,
78
+ user_prompt=user_prompt,
79
+ temperature=temperature,
80
+ max_tokens=max_tokens,
81
+ max_retries=max_retries,
82
+ quiet=quiet,
83
+ )
84
+ except AIError:
85
+ # Re-raise AIError exceptions as-is to preserve error classification
86
+ raise
87
+ except Exception as e:
88
+ logger.error(f"Failed to generate commit message: {e}")
89
+ raise AIError.model_error(f"Failed to generate commit message: {e}") from e
@@ -0,0 +1,185 @@
1
+ """Utilities for AI provider integration for gac.
2
+
3
+ This module provides utility functions that support the AI provider implementations.
4
+ """
5
+
6
+ import logging
7
+ import time
8
+ from functools import lru_cache
9
+ from typing import Any
10
+
11
+ import tiktoken
12
+ from halo import Halo
13
+
14
+ from gac.constants import Utility
15
+ from gac.errors import AIError
16
+
17
+ logger = logging.getLogger(__name__)
18
+
19
+
20
+ def count_tokens(content: str | list[dict[str, str]] | dict[str, Any], model: str) -> int:
21
+ """Count tokens in content using the model's tokenizer."""
22
+ text = extract_text_content(content)
23
+ if not text:
24
+ return 0
25
+
26
+ try:
27
+ encoding = get_encoding(model)
28
+ return len(encoding.encode(text))
29
+ except Exception as e:
30
+ logger.error(f"Error counting tokens: {e}")
31
+ return len(text) // 4
32
+
33
+
34
+ def extract_text_content(content: str | list[dict[str, str]] | dict[str, Any]) -> str:
35
+ """Extract text content from various input formats."""
36
+ if isinstance(content, str):
37
+ return content
38
+ elif isinstance(content, list):
39
+ return "\n".join(msg["content"] for msg in content if isinstance(msg, dict) and "content" in msg)
40
+ elif isinstance(content, dict) and "content" in content:
41
+ return content["content"]
42
+ return ""
43
+
44
+
45
+ @lru_cache(maxsize=1)
46
+ def get_encoding(model: str) -> tiktoken.Encoding:
47
+ """Get the appropriate encoding for a given model."""
48
+ model_name = model.split(":")[-1] if ":" in model else model
49
+ try:
50
+ return tiktoken.encoding_for_model(model_name)
51
+ except KeyError:
52
+ return tiktoken.get_encoding(Utility.DEFAULT_ENCODING)
53
+
54
+
55
+ def _classify_error(error_str: str) -> str:
56
+ """Classify error types based on error message content."""
57
+ error_str = error_str.lower()
58
+
59
+ if (
60
+ "api key" in error_str
61
+ or "unauthorized" in error_str
62
+ or "authentication" in error_str
63
+ or "invalid api key" in error_str
64
+ ):
65
+ return "authentication"
66
+ elif "timeout" in error_str or "timed out" in error_str or "request timeout" in error_str:
67
+ return "timeout"
68
+ elif "rate limit" in error_str or "too many requests" in error_str or "rate limit exceeded" in error_str:
69
+ return "rate_limit"
70
+ elif "connect" in error_str or "network" in error_str or "network connection failed" in error_str:
71
+ return "connection"
72
+ elif "model" in error_str or "not found" in error_str or "model not found" in error_str:
73
+ return "model"
74
+ else:
75
+ return "unknown"
76
+
77
+
78
+ def generate_with_retries(
79
+ provider_funcs: dict,
80
+ model: str,
81
+ system_prompt: str,
82
+ user_prompt: str,
83
+ temperature: float,
84
+ max_tokens: int,
85
+ max_retries: int,
86
+ quiet: bool = False,
87
+ ) -> str:
88
+ """Generate content with retry logic using direct API calls."""
89
+ # Parse model string to determine provider and actual model
90
+ if ":" not in model:
91
+ raise AIError.model_error(f"Invalid model format. Expected 'provider:model', got '{model}'")
92
+
93
+ provider, model_name = model.split(":", 1)
94
+
95
+ # Validate provider
96
+ supported_providers = ["anthropic", "openai", "groq", "cerebras", "ollama", "openrouter"]
97
+ if provider not in supported_providers:
98
+ raise AIError.model_error(f"Unsupported provider: {provider}. Supported providers: {supported_providers}")
99
+
100
+ messages = [
101
+ {"role": "system", "content": system_prompt},
102
+ {"role": "user", "content": user_prompt},
103
+ ]
104
+
105
+ # Set up spinner
106
+ if quiet:
107
+ spinner = None
108
+ else:
109
+ spinner = Halo(text=f"Generating commit message with {provider} {model_name}...", spinner="dots")
110
+ spinner.start()
111
+
112
+ last_exception = None
113
+ last_error_type = "unknown"
114
+
115
+ for attempt in range(max_retries):
116
+ try:
117
+ if not quiet and attempt > 0:
118
+ if spinner:
119
+ spinner.text = f"Retry {attempt + 1}/{max_retries} with {provider} {model_name}..."
120
+ logger.info(f"Retry attempt {attempt + 1}/{max_retries}")
121
+
122
+ # Call the appropriate provider function
123
+ provider_func = provider_funcs.get(provider)
124
+ if not provider_func:
125
+ raise AIError.model_error(f"Provider function not found for: {provider}")
126
+
127
+ content = provider_func(model=model_name, messages=messages, temperature=temperature, max_tokens=max_tokens)
128
+
129
+ if spinner:
130
+ spinner.succeed(f"Generated commit message with {provider} {model_name}")
131
+
132
+ if content is not None and content.strip():
133
+ return content.strip()
134
+ else:
135
+ logger.warning(f"Empty or None content received from {provider} {model_name}: {repr(content)}")
136
+ raise AIError.model_error("Empty response from AI model")
137
+
138
+ except Exception as e:
139
+ last_exception = e
140
+ error_type = _classify_error(str(e))
141
+ last_error_type = error_type
142
+
143
+ # For authentication and model errors, don't retry
144
+ if error_type in ["authentication", "model"]:
145
+ if spinner:
146
+ spinner.fail(f"Failed to generate commit message with {provider} {model_name}")
147
+
148
+ # Create the appropriate error type based on classification
149
+ if error_type == "authentication":
150
+ raise AIError.authentication_error(f"AI generation failed: {str(e)}") from e
151
+ elif error_type == "model":
152
+ raise AIError.model_error(f"AI generation failed: {str(e)}") from e
153
+
154
+ if attempt < max_retries - 1:
155
+ # Exponential backoff
156
+ wait_time = 2**attempt
157
+ if not quiet:
158
+ logger.warning(f"AI generation failed (attempt {attempt + 1}), retrying in {wait_time}s: {str(e)}")
159
+
160
+ if spinner:
161
+ for i in range(wait_time, 0, -1):
162
+ spinner.text = f"Retry {attempt + 1}/{max_retries} in {i}s..."
163
+ time.sleep(1)
164
+ else:
165
+ time.sleep(wait_time)
166
+ else:
167
+ logger.error(f"AI generation failed after {max_retries} attempts: {str(e)}")
168
+
169
+ if spinner:
170
+ spinner.fail(f"Failed to generate commit message with {provider} {model_name}")
171
+
172
+ # If we get here, all retries failed - use the last classified error type
173
+ error_message = f"Failed to generate commit message after {max_retries} attempts"
174
+ if last_error_type == "authentication":
175
+ raise AIError.authentication_error(error_message) from last_exception
176
+ elif last_error_type == "rate_limit":
177
+ raise AIError.rate_limit_error(error_message) from last_exception
178
+ elif last_error_type == "timeout":
179
+ raise AIError.timeout_error(error_message) from last_exception
180
+ elif last_error_type == "connection":
181
+ raise AIError.connection_error(error_message) from last_exception
182
+ elif last_error_type == "model":
183
+ raise AIError.model_error(error_message) from last_exception
184
+ else:
185
+ raise AIError.unknown_error(error_message) from last_exception
@@ -95,6 +95,11 @@ class AIError(GacError):
95
95
  """Create a model error."""
96
96
  return cls(message, error_type="model")
97
97
 
98
+ @classmethod
99
+ def unknown_error(cls, message: str) -> "AIError":
100
+ """Create an unknown error."""
101
+ return cls(message, error_type="unknown")
102
+
98
103
 
99
104
  class FormattingError(GacError):
100
105
  """Error related to code formatting."""
@@ -0,0 +1,17 @@
1
+ """AI provider implementations for commit message generation."""
2
+
3
+ from .anthropic import call_anthropic_api
4
+ from .cerebras import call_cerebras_api
5
+ from .groq import call_groq_api
6
+ from .ollama import call_ollama_api
7
+ from .openai import call_openai_api
8
+ from .openrouter import call_openrouter_api
9
+
10
+ __all__ = [
11
+ "call_anthropic_api",
12
+ "call_cerebras_api",
13
+ "call_groq_api",
14
+ "call_ollama_api",
15
+ "call_openai_api",
16
+ "call_openrouter_api",
17
+ ]
@@ -0,0 +1,42 @@
1
+ """Anthropic AI provider implementation."""
2
+
3
+ import os
4
+
5
+ import httpx
6
+
7
+ from gac.errors import AIError
8
+
9
+
10
+ def call_anthropic_api(model: str, messages: list[dict], temperature: float, max_tokens: int) -> str:
11
+ """Call Anthropic API directly."""
12
+ api_key = os.getenv("ANTHROPIC_API_KEY")
13
+ if not api_key:
14
+ raise AIError.model_error("ANTHROPIC_API_KEY not found in environment variables")
15
+
16
+ url = "https://api.anthropic.com/v1/messages"
17
+ headers = {"x-api-key": api_key, "anthropic-version": "2023-06-01", "content-type": "application/json"}
18
+
19
+ # Convert messages to Anthropic format
20
+ anthropic_messages = []
21
+ system_message = ""
22
+
23
+ for msg in messages:
24
+ if msg["role"] == "system":
25
+ system_message = msg["content"]
26
+ else:
27
+ anthropic_messages.append({"role": msg["role"], "content": msg["content"]})
28
+
29
+ data = {"model": model, "messages": anthropic_messages, "temperature": temperature, "max_tokens": max_tokens}
30
+
31
+ if system_message:
32
+ data["system"] = system_message
33
+
34
+ try:
35
+ response = httpx.post(url, headers=headers, json=data, timeout=120)
36
+ response.raise_for_status()
37
+ response_data = response.json()
38
+ return response_data["content"][0]["text"]
39
+ except httpx.HTTPStatusError as e:
40
+ raise AIError.model_error(f"Anthropic API error: {e.response.status_code} - {e.response.text}") from e
41
+ except Exception as e:
42
+ raise AIError.model_error(f"Error calling Anthropic API: {str(e)}") from e
@@ -0,0 +1,29 @@
1
+ """Cerebras AI provider implementation."""
2
+
3
+ import os
4
+
5
+ import httpx
6
+
7
+ from gac.errors import AIError
8
+
9
+
10
+ def call_cerebras_api(model: str, messages: list[dict], temperature: float, max_tokens: int) -> str:
11
+ """Call Cerebras API directly."""
12
+ api_key = os.getenv("CEREBRAS_API_KEY")
13
+ if not api_key:
14
+ raise AIError.model_error("CEREBRAS_API_KEY not found in environment variables")
15
+
16
+ url = "https://api.cerebras.ai/v1/chat/completions"
17
+ headers = {"Authorization": f"Bearer {api_key}", "Content-Type": "application/json"}
18
+
19
+ data = {"model": model, "messages": messages, "temperature": temperature, "max_tokens": max_tokens}
20
+
21
+ try:
22
+ response = httpx.post(url, headers=headers, json=data, timeout=120)
23
+ response.raise_for_status()
24
+ response_data = response.json()
25
+ return response_data["choices"][0]["message"]["content"]
26
+ except httpx.HTTPStatusError as e:
27
+ raise AIError.model_error(f"Cerebras API error: {e.response.status_code} - {e.response.text}") from e
28
+ except Exception as e:
29
+ raise AIError.model_error(f"Error calling Cerebras API: {str(e)}") from e
@@ -0,0 +1,58 @@
1
+ """Groq API provider for gac."""
2
+
3
+ import logging
4
+ import os
5
+
6
+ import httpx
7
+
8
+ from gac.errors import AIError
9
+
10
+ logger = logging.getLogger(__name__)
11
+
12
+
13
+ def call_groq_api(model: str, messages: list[dict], temperature: float, max_tokens: int) -> str:
14
+ """Call Groq API directly."""
15
+ api_key = os.getenv("GROQ_API_KEY")
16
+ if not api_key:
17
+ raise AIError.model_error("GROQ_API_KEY not found in environment variables")
18
+
19
+ url = "https://api.groq.com/openai/v1/chat/completions"
20
+ headers = {"Authorization": f"Bearer {api_key}", "Content-Type": "application/json"}
21
+
22
+ data = {"model": model, "messages": messages, "temperature": temperature, "max_tokens": max_tokens}
23
+
24
+ try:
25
+ response = httpx.post(url, headers=headers, json=data, timeout=120)
26
+ response.raise_for_status()
27
+ response_data = response.json()
28
+
29
+ # Debug logging to understand response structure
30
+ logger.debug(f"Groq API response: {response_data}")
31
+
32
+ # Handle different response formats
33
+ if "choices" in response_data and len(response_data["choices"]) > 0:
34
+ choice = response_data["choices"][0]
35
+ if "message" in choice and "content" in choice["message"]:
36
+ content = choice["message"]["content"]
37
+ logger.debug(f"Found content in message.content: {repr(content)}")
38
+ if content is None:
39
+ logger.warning("Groq API returned None content in message.content")
40
+ return ""
41
+ return content
42
+ elif "text" in choice:
43
+ content = choice["text"]
44
+ logger.debug(f"Found content in choice.text: {repr(content)}")
45
+ if content is None:
46
+ logger.warning("Groq API returned None content in choice.text")
47
+ return ""
48
+ return content
49
+ else:
50
+ logger.warning(f"Unexpected choice structure: {choice}")
51
+
52
+ # If we can't find content in the expected places, raise an error
53
+ logger.error(f"Unexpected response format from Groq API: {response_data}")
54
+ raise AIError.model_error(f"Unexpected response format from Groq API: {response_data}")
55
+ except httpx.HTTPStatusError as e:
56
+ raise AIError.model_error(f"Groq API error: {e.response.status_code} - {e.response.text}") from e
57
+ except Exception as e:
58
+ raise AIError.model_error(f"Error calling Groq API: {str(e)}") from e
@@ -0,0 +1,35 @@
1
+ """Ollama AI provider implementation."""
2
+
3
+ import os
4
+
5
+ import httpx
6
+
7
+ from gac.errors import AIError
8
+
9
+
10
+ def call_ollama_api(model: str, messages: list[dict], temperature: float, max_tokens: int) -> str:
11
+ """Call Ollama API directly."""
12
+ api_url = os.getenv("OLLAMA_API_URL", "http://localhost:11434")
13
+
14
+ url = f"{api_url.rstrip('/')}/api/chat"
15
+ data = {"model": model, "messages": messages, "temperature": temperature, "stream": False}
16
+
17
+ try:
18
+ response = httpx.post(url, json=data, timeout=120)
19
+ response.raise_for_status()
20
+ response_data = response.json()
21
+
22
+ # Handle different response formats from Ollama
23
+ if "message" in response_data and "content" in response_data["message"]:
24
+ return response_data["message"]["content"]
25
+ elif "response" in response_data:
26
+ return response_data["response"]
27
+ else:
28
+ # Fallback: return the full response as string
29
+ return str(response_data)
30
+ except httpx.ConnectError as e:
31
+ raise AIError.connection_error(f"Ollama connection failed. Make sure Ollama is running: {str(e)}") from e
32
+ except httpx.HTTPStatusError as e:
33
+ raise AIError.model_error(f"Ollama API error: {e.response.status_code} - {e.response.text}") from e
34
+ except Exception as e:
35
+ raise AIError.model_error(f"Error calling Ollama API: {str(e)}") from e
@@ -0,0 +1,29 @@
1
+ """OpenAI API provider for gac."""
2
+
3
+ import os
4
+
5
+ import httpx
6
+
7
+ from gac.errors import AIError
8
+
9
+
10
+ def call_openai_api(model: str, messages: list[dict], temperature: float, max_tokens: int) -> str:
11
+ """Call OpenAI API directly."""
12
+ api_key = os.getenv("OPENAI_API_KEY")
13
+ if not api_key:
14
+ raise AIError.model_error("OPENAI_API_KEY not found in environment variables")
15
+
16
+ url = "https://api.openai.com/v1/chat/completions"
17
+ headers = {"Authorization": f"Bearer {api_key}", "Content-Type": "application/json"}
18
+
19
+ data = {"model": model, "messages": messages, "temperature": temperature, "max_tokens": max_tokens}
20
+
21
+ try:
22
+ response = httpx.post(url, headers=headers, json=data, timeout=120)
23
+ response.raise_for_status()
24
+ response_data = response.json()
25
+ return response_data["choices"][0]["message"]["content"]
26
+ except httpx.HTTPStatusError as e:
27
+ raise AIError.model_error(f"OpenAI API error: {e.response.status_code} - {e.response.text}") from e
28
+ except Exception as e:
29
+ raise AIError.model_error(f"Error calling OpenAI API: {str(e)}") from e
@@ -0,0 +1,46 @@
1
+ """OpenRouter API provider for gac."""
2
+
3
+ import os
4
+
5
+ import httpx
6
+
7
+ from gac.errors import AIError
8
+
9
+
10
+ def call_openrouter_api(model: str, messages: list[dict], temperature: float, max_tokens: int) -> str:
11
+ """Call OpenRouter API directly."""
12
+ api_key = os.getenv("OPENROUTER_API_KEY")
13
+ if not api_key:
14
+ raise AIError.model_error("OPENROUTER_API_KEY environment variable not set")
15
+
16
+ url = "https://openrouter.ai/api/v1/chat/completions"
17
+ headers = {
18
+ "Content-Type": "application/json",
19
+ "Authorization": f"Bearer {api_key}",
20
+ }
21
+
22
+ # Add optional headers if environment variables are set
23
+ site_url = os.getenv("OPENROUTER_SITE_URL")
24
+ if site_url:
25
+ headers["HTTP-Referer"] = site_url
26
+
27
+ site_name = os.getenv("OPENROUTER_SITE_NAME")
28
+ if site_name:
29
+ headers["X-Title"] = site_name
30
+
31
+ data = {
32
+ "model": model,
33
+ "messages": messages,
34
+ "temperature": temperature,
35
+ "max_tokens": max_tokens,
36
+ }
37
+
38
+ try:
39
+ response = httpx.post(url, headers=headers, json=data, timeout=120)
40
+ response.raise_for_status()
41
+ response_data = response.json()
42
+ return response_data["choices"][0]["message"]["content"]
43
+ except httpx.HTTPStatusError as e:
44
+ raise AIError.model_error(f"OpenRouter API error: {e.response.status_code} - {e.response.text}") from e
45
+ except Exception as e:
46
+ raise AIError.model_error(f"Error calling OpenRouter API: {str(e)}") from e
gac-1.1.0/src/gac/ai.py DELETED
@@ -1,80 +0,0 @@
1
- """AI provider integration for gac.
2
-
3
- This module provides core functionality for AI provider interaction.
4
- It consolidates all AI-related functionality including token counting and commit message generation.
5
- """
6
-
7
- import logging
8
-
9
- from gac.constants import EnvDefaults
10
- from gac.errors import AIError
11
- from gac.providers.anthropic import generate as anthropic_generate
12
- from gac.providers.cerebras import generate as cerebras_generate
13
- from gac.providers.groq import generate as groq_generate
14
- from gac.providers.ollama import generate as ollama_generate
15
- from gac.providers.openai import generate as openai_generate
16
- from gac.providers.openrouter import generate as openrouter_generate
17
-
18
- logger = logging.getLogger(__name__)
19
-
20
-
21
- def generate_commit_message(
22
- model: str,
23
- prompt: str | tuple[str, str],
24
- temperature: float = EnvDefaults.TEMPERATURE,
25
- max_tokens: int = EnvDefaults.MAX_OUTPUT_TOKENS,
26
- max_retries: int = EnvDefaults.MAX_RETRIES,
27
- quiet: bool = False,
28
- ) -> str:
29
- """Generate a commit message using direct API calls to AI providers.
30
-
31
- Args:
32
- model: The model to use in provider:model_name format (e.g., 'anthropic:claude-3-5-haiku-latest')
33
- prompt: Either a string prompt (for backward compatibility) or tuple of (system_prompt, user_prompt)
34
- temperature: Controls randomness (0.0-1.0), lower values are more deterministic
35
- max_tokens: Maximum tokens in the response
36
- max_retries: Number of retry attempts if generation fails
37
- quiet: If True, suppress progress indicators
38
-
39
- Returns:
40
- A formatted commit message string
41
-
42
- Raises:
43
- AIError: If generation fails after max_retries attempts
44
-
45
- Example:
46
- >>> model = "anthropic:claude-3-5-haiku-latest"
47
- >>> system_prompt, user_prompt = build_prompt("On branch main", "diff --git a/README.md b/README.md")
48
- >>> generate_commit_message(model, (system_prompt, user_prompt))
49
- 'docs: Update README with installation instructions'
50
- """
51
- try:
52
- _, _ = model.split(":", 1)
53
- except ValueError as err:
54
- raise AIError.model_error(
55
- f"Invalid model format: {model}. Please use the format 'provider:model_name'."
56
- ) from err
57
-
58
- # Parse the model string to extract provider and model name
59
- try:
60
- provider, model_name = model.split(":", 1)
61
- except ValueError as err:
62
- raise AIError.model_error(
63
- f"Invalid model format: {model}. Please use the format 'provider:model_name'."
64
- ) from err
65
-
66
- # Route to the appropriate provider function
67
- if provider == "openai":
68
- return openai_generate(model_name, prompt, temperature, max_tokens, max_retries, quiet)
69
- elif provider == "anthropic":
70
- return anthropic_generate(model_name, prompt, temperature, max_tokens, max_retries, quiet)
71
- elif provider == "groq":
72
- return groq_generate(model_name, prompt, temperature, max_tokens, max_retries, quiet)
73
- elif provider == "cerebras":
74
- return cerebras_generate(model_name, prompt, temperature, max_tokens, max_retries, quiet)
75
- elif provider == "ollama":
76
- return ollama_generate(model_name, prompt, temperature, max_tokens, max_retries, quiet)
77
- elif provider == "openrouter":
78
- return openrouter_generate(model_name, prompt, temperature, max_tokens, max_retries, quiet)
79
- else:
80
- raise AIError.model_error(f"Unsupported provider: {provider}")