gac 1.1.0__py3-none-any.whl → 1.2.0__py3-none-any.whl

This diff represents the content of publicly available package versions that have been released to one of the supported registries. The information contained in this diff is provided for informational purposes only and reflects changes between package versions as they appear in their respective public registries.

Potentially problematic release.


This version of gac might be problematic. Click here for more details.

gac/__init__.py CHANGED
@@ -4,12 +4,12 @@ from gac.__version__ import __version__
4
4
  from gac.ai import generate_commit_message
5
5
  from gac.git import get_staged_files, push_changes
6
6
  from gac.prompt import build_prompt, clean_commit_message
7
- from gac.providers.anthropic import generate as anthropic_generate
8
- from gac.providers.cerebras import generate as cerebras_generate
9
- from gac.providers.groq import generate as groq_generate
10
- from gac.providers.ollama import generate as ollama_generate
11
- from gac.providers.openai import generate as openai_generate
12
- from gac.providers.openrouter import generate as openrouter_generate
7
+ from gac.providers.anthropic import call_anthropic_api as anthropic_generate
8
+ from gac.providers.cerebras import call_cerebras_api as cerebras_generate
9
+ from gac.providers.groq import call_groq_api as groq_generate
10
+ from gac.providers.ollama import call_ollama_api as ollama_generate
11
+ from gac.providers.openai import call_openai_api as openai_generate
12
+ from gac.providers.openrouter import call_openrouter_api as openrouter_generate
13
13
 
14
14
  __all__ = [
15
15
  "__version__",
gac/__version__.py CHANGED
@@ -1,3 +1,3 @@
1
1
  """Version information for gac package."""
2
2
 
3
- __version__ = "1.1.0"
3
+ __version__ = "1.2.0"
gac/ai.py CHANGED
@@ -6,14 +6,17 @@ It consolidates all AI-related functionality including token counting and commit
6
6
 
7
7
  import logging
8
8
 
9
+ from gac.ai_utils import generate_with_retries
9
10
  from gac.constants import EnvDefaults
10
11
  from gac.errors import AIError
11
- from gac.providers.anthropic import generate as anthropic_generate
12
- from gac.providers.cerebras import generate as cerebras_generate
13
- from gac.providers.groq import generate as groq_generate
14
- from gac.providers.ollama import generate as ollama_generate
15
- from gac.providers.openai import generate as openai_generate
16
- from gac.providers.openrouter import generate as openrouter_generate
12
+ from gac.providers import (
13
+ call_anthropic_api,
14
+ call_cerebras_api,
15
+ call_groq_api,
16
+ call_ollama_api,
17
+ call_openai_api,
18
+ call_openrouter_api,
19
+ )
17
20
 
18
21
  logger = logging.getLogger(__name__)
19
22
 
@@ -48,33 +51,36 @@ def generate_commit_message(
48
51
  >>> generate_commit_message(model, (system_prompt, user_prompt))
49
52
  'docs: Update README with installation instructions'
50
53
  """
51
- try:
52
- _, _ = model.split(":", 1)
53
- except ValueError as err:
54
- raise AIError.model_error(
55
- f"Invalid model format: {model}. Please use the format 'provider:model_name'."
56
- ) from err
54
+ # Handle both old (string) and new (tuple) prompt formats
55
+ if isinstance(prompt, tuple):
56
+ system_prompt, user_prompt = prompt
57
+ else:
58
+ # Backward compatibility: treat string as user prompt with no system prompt
59
+ system_prompt = ""
60
+ user_prompt = prompt
57
61
 
58
- # Parse the model string to extract provider and model name
59
- try:
60
- provider, model_name = model.split(":", 1)
61
- except ValueError as err:
62
- raise AIError.model_error(
63
- f"Invalid model format: {model}. Please use the format 'provider:model_name'."
64
- ) from err
62
+ # Provider functions mapping
63
+ provider_funcs = {
64
+ "anthropic": call_anthropic_api,
65
+ "openai": call_openai_api,
66
+ "groq": call_groq_api,
67
+ "cerebras": call_cerebras_api,
68
+ "ollama": call_ollama_api,
69
+ "openrouter": call_openrouter_api,
70
+ }
65
71
 
66
- # Route to the appropriate provider function
67
- if provider == "openai":
68
- return openai_generate(model_name, prompt, temperature, max_tokens, max_retries, quiet)
69
- elif provider == "anthropic":
70
- return anthropic_generate(model_name, prompt, temperature, max_tokens, max_retries, quiet)
71
- elif provider == "groq":
72
- return groq_generate(model_name, prompt, temperature, max_tokens, max_retries, quiet)
73
- elif provider == "cerebras":
74
- return cerebras_generate(model_name, prompt, temperature, max_tokens, max_retries, quiet)
75
- elif provider == "ollama":
76
- return ollama_generate(model_name, prompt, temperature, max_tokens, max_retries, quiet)
77
- elif provider == "openrouter":
78
- return openrouter_generate(model_name, prompt, temperature, max_tokens, max_retries, quiet)
79
- else:
80
- raise AIError.model_error(f"Unsupported provider: {provider}")
72
+ # Generate the commit message using centralized retry logic
73
+ try:
74
+ return generate_with_retries(
75
+ provider_funcs=provider_funcs,
76
+ model=model,
77
+ system_prompt=system_prompt,
78
+ user_prompt=user_prompt,
79
+ temperature=temperature,
80
+ max_tokens=max_tokens,
81
+ max_retries=max_retries,
82
+ quiet=quiet,
83
+ )
84
+ except Exception as e:
85
+ logger.error(f"Failed to generate commit message: {e}")
86
+ raise AIError.model_error(f"Failed to generate commit message: {e}") from e
gac/ai_utils.py CHANGED
@@ -5,13 +5,16 @@ This module provides utility functions that support the AI provider implementati
5
5
 
6
6
  import logging
7
7
  import os
8
+ import time
8
9
  from functools import lru_cache
9
10
  from typing import Any
10
11
 
11
12
  import httpx
12
13
  import tiktoken
14
+ from halo import Halo
13
15
 
14
16
  from gac.constants import Utility
17
+ from gac.errors import AIError
15
18
 
16
19
  logger = logging.getLogger(__name__)
17
20
 
@@ -132,3 +135,95 @@ def _classify_error(error_str: str) -> str:
132
135
  return "model"
133
136
  else:
134
137
  return "unknown"
138
+
139
+
140
+ def generate_with_retries(
141
+ provider_funcs: dict,
142
+ model: str,
143
+ system_prompt: str,
144
+ user_prompt: str,
145
+ temperature: float,
146
+ max_tokens: int,
147
+ max_retries: int,
148
+ quiet: bool = False,
149
+ ) -> str:
150
+ """Generate content with retry logic using direct API calls."""
151
+ # Parse model string to determine provider and actual model
152
+ if ":" not in model:
153
+ raise AIError.model_error(f"Invalid model format. Expected 'provider:model', got '{model}'")
154
+
155
+ provider, model_name = model.split(":", 1)
156
+
157
+ # Validate provider
158
+ supported_providers = ["anthropic", "openai", "groq", "cerebras", "ollama", "openrouter"]
159
+ if provider not in supported_providers:
160
+ raise AIError.model_error(f"Unsupported provider: {provider}. Supported providers: {supported_providers}")
161
+
162
+ messages = [
163
+ {"role": "system", "content": system_prompt},
164
+ {"role": "user", "content": user_prompt},
165
+ ]
166
+
167
+ # Set up spinner
168
+ if quiet:
169
+ spinner = None
170
+ else:
171
+ spinner = Halo(text=f"Generating commit message with {provider} {model_name}...", spinner="dots")
172
+ spinner.start()
173
+
174
+ last_exception = None
175
+
176
+ for attempt in range(max_retries):
177
+ try:
178
+ if not quiet and attempt > 0:
179
+ if spinner:
180
+ spinner.text = f"Retry {attempt + 1}/{max_retries} with {provider} {model_name}..."
181
+ logger.info(f"Retry attempt {attempt + 1}/{max_retries}")
182
+
183
+ # Call the appropriate provider function
184
+ provider_func = provider_funcs.get(provider)
185
+ if not provider_func:
186
+ raise AIError.model_error(f"Provider function not found for: {provider}")
187
+
188
+ content = provider_func(model=model_name, messages=messages, temperature=temperature, max_tokens=max_tokens)
189
+
190
+ if spinner:
191
+ spinner.succeed(f"Generated commit message with {provider} {model_name}")
192
+
193
+ if content:
194
+ return content.strip()
195
+ else:
196
+ raise AIError.model_error("Empty response from AI model")
197
+
198
+ except Exception as e:
199
+ last_exception = e
200
+ error_type = _classify_error(str(e))
201
+
202
+ if error_type in ["authentication", "model"]:
203
+ # Don't retry these errors
204
+ if spinner:
205
+ spinner.fail(f"Failed to generate commit message with {provider} {model_name}")
206
+ raise AIError.authentication_error(f"AI generation failed: {str(e)}") from e
207
+
208
+ if attempt < max_retries - 1:
209
+ # Exponential backoff
210
+ wait_time = 2**attempt
211
+ if not quiet:
212
+ logger.warning(f"AI generation failed (attempt {attempt + 1}), retrying in {wait_time}s: {str(e)}")
213
+
214
+ if spinner:
215
+ for i in range(wait_time, 0, -1):
216
+ spinner.text = f"Retry {attempt + 1}/{max_retries} in {i}s..."
217
+ time.sleep(1)
218
+ else:
219
+ time.sleep(wait_time)
220
+ else:
221
+ logger.error(f"AI generation failed after {max_retries} attempts: {str(e)}")
222
+
223
+ if spinner:
224
+ spinner.fail(f"Failed to generate commit message with {provider} {model_name}")
225
+
226
+ # If we get here, all retries failed
227
+ raise AIError.model_error(
228
+ f"AI generation failed after {max_retries} attempts: {str(last_exception)}"
229
+ ) from last_exception
gac/providers/__init__.py CHANGED
@@ -1 +1,17 @@
1
- """AI providers submodule for gac."""
1
+ """AI provider implementations for commit message generation."""
2
+
3
+ from .anthropic import call_anthropic_api
4
+ from .cerebras import call_cerebras_api
5
+ from .groq import call_groq_api
6
+ from .ollama import call_ollama_api
7
+ from .openai import call_openai_api
8
+ from .openrouter import call_openrouter_api
9
+
10
+ __all__ = [
11
+ "call_anthropic_api",
12
+ "call_cerebras_api",
13
+ "call_groq_api",
14
+ "call_ollama_api",
15
+ "call_openai_api",
16
+ "call_openrouter_api",
17
+ ]
@@ -1,141 +1,42 @@
1
- """Anthropic API provider for gac."""
1
+ """Anthropic AI provider implementation."""
2
2
 
3
- import logging
4
3
  import os
5
- import time
6
4
 
7
5
  import httpx
8
- from halo import Halo
9
6
 
10
- from gac.ai_utils import _classify_error
11
- from gac.constants import EnvDefaults
12
7
  from gac.errors import AIError
13
8
 
14
- logger = logging.getLogger(__name__)
15
9
 
16
-
17
- def generate(
18
- model: str,
19
- prompt: str | tuple[str, str],
20
- temperature: float = EnvDefaults.TEMPERATURE,
21
- max_tokens: int = EnvDefaults.MAX_OUTPUT_TOKENS,
22
- max_retries: int = EnvDefaults.MAX_RETRIES,
23
- quiet: bool = False,
24
- ) -> str:
25
- """Generate commit message using Anthropic API with retry logic.
26
-
27
- Args:
28
- model: The model name (e.g., 'claude-3-5-haiku-latest', 'claude-3-opus-latest')
29
- prompt: Either a string prompt or tuple of (system_prompt, user_prompt)
30
- temperature: Controls randomness (0.0-1.0)
31
- max_tokens: Maximum tokens in the response
32
- max_retries: Number of retry attempts if generation fails
33
- quiet: If True, suppress progress indicators
34
-
35
- Returns:
36
- A formatted commit message string
37
-
38
- Raises:
39
- AIError: If generation fails after max_retries attempts
40
- """
10
+ def call_anthropic_api(model: str, messages: list[dict], temperature: float, max_tokens: int) -> str:
11
+ """Call Anthropic API directly."""
41
12
  api_key = os.getenv("ANTHROPIC_API_KEY")
42
13
  if not api_key:
43
- raise AIError.model_error("ANTHROPIC_API_KEY environment variable not set")
44
-
45
- # Handle both old (string) and new (tuple) prompt formats
46
- if isinstance(prompt, tuple):
47
- system_prompt, user_prompt = prompt
48
- messages = [{"role": "user", "content": user_prompt}]
49
- payload = {
50
- "model": model,
51
- "messages": messages,
52
- "system": system_prompt,
53
- "temperature": temperature,
54
- "max_tokens": max_tokens,
55
- }
56
- else:
57
- # Backward compatibility: treat string as user prompt
58
- messages = [{"role": "user", "content": prompt}]
59
- payload = {
60
- "model": model,
61
- "messages": messages,
62
- "temperature": temperature,
63
- "max_tokens": max_tokens,
64
- }
65
-
66
- headers = {
67
- "Content-Type": "application/json",
68
- "x-api-key": api_key,
69
- "anthropic-version": "2023-06-01",
70
- }
71
-
72
- return _make_request_with_retry(
73
- url="https://api.anthropic.com/v1/messages",
74
- headers=headers,
75
- payload=payload,
76
- provider_name=f"Anthropic {model}",
77
- max_retries=max_retries,
78
- quiet=quiet,
79
- response_parser=lambda r: r["content"][0]["text"],
80
- )
81
-
82
-
83
- def _make_request_with_retry(
84
- url: str,
85
- headers: dict,
86
- payload: dict,
87
- provider_name: str,
88
- max_retries: int,
89
- quiet: bool,
90
- response_parser: callable,
91
- ) -> str:
92
- """Make HTTP request with retry logic and common error handling."""
93
- if quiet:
94
- spinner = None
95
- else:
96
- spinner = Halo(text=f"Generating commit message with {provider_name}...", spinner="dots")
97
- spinner.start()
98
-
99
- last_error = None
100
- retry_count = 0
101
-
102
- while retry_count < max_retries:
103
- try:
104
- logger.debug(f"Trying with {provider_name} (attempt {retry_count + 1}/{max_retries})")
105
-
106
- with httpx.Client(timeout=30.0) as client:
107
- response = client.post(url, headers=headers, json=payload)
108
- response.raise_for_status()
109
-
110
- response_data = response.json()
111
- message = response_parser(response_data)
112
-
113
- if spinner:
114
- spinner.succeed(f"Generated commit message with {provider_name}")
115
-
116
- return message
117
-
118
- except Exception as e:
119
- last_error = e
120
- retry_count += 1
121
-
122
- if retry_count == max_retries:
123
- logger.warning(f"Error generating commit message: {e}. Giving up.")
124
- break
125
-
126
- wait_time = 2**retry_count
127
- logger.warning(f"Error generating commit message: {e}. Retrying in {wait_time}s...")
128
- if spinner:
129
- for i in range(wait_time, 0, -1):
130
- spinner.text = f"Retry {retry_count}/{max_retries} in {i}s..."
131
- time.sleep(1)
132
- else:
133
- time.sleep(wait_time)
134
-
135
- if spinner:
136
- spinner.fail(f"Failed to generate commit message with {provider_name}")
137
-
138
- error_type = _classify_error(str(last_error))
139
- raise AIError(
140
- f"Failed to generate commit message after {max_retries} attempts: {last_error}", error_type=error_type
141
- )
14
+ raise AIError.model_error("ANTHROPIC_API_KEY not found in environment variables")
15
+
16
+ url = "https://api.anthropic.com/v1/messages"
17
+ headers = {"x-api-key": api_key, "anthropic-version": "2023-06-01", "content-type": "application/json"}
18
+
19
+ # Convert messages to Anthropic format
20
+ anthropic_messages = []
21
+ system_message = ""
22
+
23
+ for msg in messages:
24
+ if msg["role"] == "system":
25
+ system_message = msg["content"]
26
+ else:
27
+ anthropic_messages.append({"role": msg["role"], "content": msg["content"]})
28
+
29
+ data = {"model": model, "messages": anthropic_messages, "temperature": temperature, "max_tokens": max_tokens}
30
+
31
+ if system_message:
32
+ data["system"] = system_message
33
+
34
+ try:
35
+ response = httpx.post(url, headers=headers, json=data, timeout=120)
36
+ response.raise_for_status()
37
+ response_data = response.json()
38
+ return response_data["content"][0]["text"]
39
+ except httpx.HTTPStatusError as e:
40
+ raise AIError.model_error(f"Anthropic API error: {e.response.status_code} - {e.response.text}") from e
41
+ except Exception as e:
42
+ raise AIError.model_error(f"Error calling Anthropic API: {str(e)}") from e
gac/providers/cerebras.py CHANGED
@@ -1,134 +1,29 @@
1
- """Cerebras API provider for gac."""
1
+ """Cerebras AI provider implementation."""
2
2
 
3
- import logging
4
3
  import os
5
- import time
6
4
 
7
5
  import httpx
8
- from halo import Halo
9
6
 
10
- from gac.ai_utils import _classify_error
11
- from gac.constants import EnvDefaults
12
7
  from gac.errors import AIError
13
8
 
14
- logger = logging.getLogger(__name__)
15
9
 
16
-
17
- def generate(
18
- model: str,
19
- prompt: str | tuple[str, str],
20
- temperature: float = EnvDefaults.TEMPERATURE,
21
- max_tokens: int = EnvDefaults.MAX_OUTPUT_TOKENS,
22
- max_retries: int = EnvDefaults.MAX_RETRIES,
23
- quiet: bool = False,
24
- ) -> str:
25
- """Generate commit message using Cerebras API with retry logic.
26
-
27
- Args:
28
- model: The model name (e.g., 'llama3.1-8b', 'llama3.1-70b')
29
- prompt: Either a string prompt or tuple of (system_prompt, user_prompt)
30
- temperature: Controls randomness (0.0-1.0)
31
- max_tokens: Maximum tokens in the response
32
- max_retries: Number of retry attempts if generation fails
33
- quiet: If True, suppress progress indicators
34
-
35
- Returns:
36
- A formatted commit message string
37
-
38
- Raises:
39
- AIError: If generation fails after max_retries attempts
40
- """
10
+ def call_cerebras_api(model: str, messages: list[dict], temperature: float, max_tokens: int) -> str:
11
+ """Call Cerebras API directly."""
41
12
  api_key = os.getenv("CEREBRAS_API_KEY")
42
13
  if not api_key:
43
- raise AIError.model_error("CEREBRAS_API_KEY environment variable not set")
44
-
45
- # Handle both old (string) and new (tuple) prompt formats
46
- if isinstance(prompt, tuple):
47
- system_prompt, user_prompt = prompt
48
- messages = [{"role": "system", "content": system_prompt}, {"role": "user", "content": user_prompt}]
49
- else:
50
- # Backward compatibility: treat string as user prompt
51
- messages = [{"role": "user", "content": prompt}]
52
-
53
- payload = {
54
- "model": model,
55
- "messages": messages,
56
- "temperature": temperature,
57
- "max_tokens": max_tokens,
58
- }
59
-
60
- headers = {
61
- "Content-Type": "application/json",
62
- "Authorization": f"Bearer {api_key}",
63
- }
64
-
65
- return _make_request_with_retry(
66
- url="https://api.cerebras.ai/v1/chat/completions",
67
- headers=headers,
68
- payload=payload,
69
- provider_name=f"Cerebras {model}",
70
- max_retries=max_retries,
71
- quiet=quiet,
72
- response_parser=lambda r: r["choices"][0]["message"]["content"],
73
- )
74
-
75
-
76
- def _make_request_with_retry(
77
- url: str,
78
- headers: dict,
79
- payload: dict,
80
- provider_name: str,
81
- max_retries: int,
82
- quiet: bool,
83
- response_parser: callable,
84
- ) -> str:
85
- """Make HTTP request with retry logic and common error handling."""
86
- if quiet:
87
- spinner = None
88
- else:
89
- spinner = Halo(text=f"Generating commit message with {provider_name}...", spinner="dots")
90
- spinner.start()
91
-
92
- last_error = None
93
- retry_count = 0
94
-
95
- while retry_count < max_retries:
96
- try:
97
- logger.debug(f"Trying with {provider_name} (attempt {retry_count + 1}/{max_retries})")
98
-
99
- with httpx.Client(timeout=30.0) as client:
100
- response = client.post(url, headers=headers, json=payload)
101
- response.raise_for_status()
102
-
103
- response_data = response.json()
104
- message = response_parser(response_data)
105
-
106
- if spinner:
107
- spinner.succeed(f"Generated commit message with {provider_name}")
108
-
109
- return message
110
-
111
- except Exception as e:
112
- last_error = e
113
- retry_count += 1
114
-
115
- if retry_count == max_retries:
116
- logger.warning(f"Error generating commit message: {e}. Giving up.")
117
- break
118
-
119
- wait_time = 2**retry_count
120
- logger.warning(f"Error generating commit message: {e}. Retrying in {wait_time}s...")
121
- if spinner:
122
- for i in range(wait_time, 0, -1):
123
- spinner.text = f"Retry {retry_count}/{max_retries} in {i}s..."
124
- time.sleep(1)
125
- else:
126
- time.sleep(wait_time)
127
-
128
- if spinner:
129
- spinner.fail(f"Failed to generate commit message with {provider_name}")
130
-
131
- error_type = _classify_error(str(last_error))
132
- raise AIError(
133
- f"Failed to generate commit message after {max_retries} attempts: {last_error}", error_type=error_type
134
- )
14
+ raise AIError.model_error("CEREBRAS_API_KEY not found in environment variables")
15
+
16
+ url = "https://api.cerebras.ai/v1/chat/completions"
17
+ headers = {"Authorization": f"Bearer {api_key}", "Content-Type": "application/json"}
18
+
19
+ data = {"model": model, "messages": messages, "temperature": temperature, "max_tokens": max_tokens}
20
+
21
+ try:
22
+ response = httpx.post(url, headers=headers, json=data, timeout=120)
23
+ response.raise_for_status()
24
+ response_data = response.json()
25
+ return response_data["choices"][0]["message"]["content"]
26
+ except httpx.HTTPStatusError as e:
27
+ raise AIError.model_error(f"Cerebras API error: {e.response.status_code} - {e.response.text}") from e
28
+ except Exception as e:
29
+ raise AIError.model_error(f"Error calling Cerebras API: {str(e)}") from e
gac/providers/groq.py CHANGED
@@ -2,133 +2,50 @@
2
2
 
3
3
  import logging
4
4
  import os
5
- import time
6
5
 
7
6
  import httpx
8
- from halo import Halo
9
7
 
10
- from gac.ai_utils import _classify_error
11
- from gac.constants import EnvDefaults
12
8
  from gac.errors import AIError
13
9
 
14
10
  logger = logging.getLogger(__name__)
15
11
 
16
12
 
17
- def generate(
18
- model: str,
19
- prompt: str | tuple[str, str],
20
- temperature: float = EnvDefaults.TEMPERATURE,
21
- max_tokens: int = EnvDefaults.MAX_OUTPUT_TOKENS,
22
- max_retries: int = EnvDefaults.MAX_RETRIES,
23
- quiet: bool = False,
24
- ) -> str:
25
- """Generate commit message using Groq API with retry logic.
26
-
27
- Args:
28
- model: The model name (e.g., 'llama3-8b-8192', 'llama3-70b-8192')
29
- prompt: Either a string prompt or tuple of (system_prompt, user_prompt)
30
- temperature: Controls randomness (0.0-1.0)
31
- max_tokens: Maximum tokens in the response
32
- max_retries: Number of retry attempts if generation fails
33
- quiet: If True, suppress progress indicators
34
-
35
- Returns:
36
- A formatted commit message string
37
-
38
- Raises:
39
- AIError: If generation fails after max_retries attempts
40
- """
13
+ def call_groq_api(model: str, messages: list[dict], temperature: float, max_tokens: int) -> str:
14
+ """Call Groq API directly."""
41
15
  api_key = os.getenv("GROQ_API_KEY")
42
16
  if not api_key:
43
- raise AIError.model_error("GROQ_API_KEY environment variable not set")
44
-
45
- # Handle both old (string) and new (tuple) prompt formats
46
- if isinstance(prompt, tuple):
47
- system_prompt, user_prompt = prompt
48
- messages = [{"role": "system", "content": system_prompt}, {"role": "user", "content": user_prompt}]
49
- else:
50
- # Backward compatibility: treat string as user prompt
51
- messages = [{"role": "user", "content": prompt}]
52
-
53
- payload = {
54
- "model": model,
55
- "messages": messages,
56
- "temperature": temperature,
57
- "max_tokens": max_tokens,
58
- }
59
-
60
- headers = {
61
- "Content-Type": "application/json",
62
- "Authorization": f"Bearer {api_key}",
63
- }
64
-
65
- return _make_request_with_retry(
66
- url="https://api.groq.com/openai/v1/chat/completions",
67
- headers=headers,
68
- payload=payload,
69
- provider_name=f"Groq {model}",
70
- max_retries=max_retries,
71
- quiet=quiet,
72
- response_parser=lambda r: r["choices"][0]["message"]["content"],
73
- )
74
-
75
-
76
- def _make_request_with_retry(
77
- url: str,
78
- headers: dict,
79
- payload: dict,
80
- provider_name: str,
81
- max_retries: int,
82
- quiet: bool,
83
- response_parser: callable,
84
- ) -> str:
85
- """Make HTTP request with retry logic and common error handling."""
86
- if quiet:
87
- spinner = None
88
- else:
89
- spinner = Halo(text=f"Generating commit message with {provider_name}...", spinner="dots")
90
- spinner.start()
91
-
92
- last_error = None
93
- retry_count = 0
94
-
95
- while retry_count < max_retries:
96
- try:
97
- logger.debug(f"Trying with {provider_name} (attempt {retry_count + 1}/{max_retries})")
98
-
99
- with httpx.Client(timeout=30.0) as client:
100
- response = client.post(url, headers=headers, json=payload)
101
- response.raise_for_status()
102
-
103
- response_data = response.json()
104
- message = response_parser(response_data)
105
-
106
- if spinner:
107
- spinner.succeed(f"Generated commit message with {provider_name}")
108
-
109
- return message
110
-
111
- except Exception as e:
112
- last_error = e
113
- retry_count += 1
114
-
115
- if retry_count == max_retries:
116
- logger.warning(f"Error generating commit message: {e}. Giving up.")
117
- break
118
-
119
- wait_time = 2**retry_count
120
- logger.warning(f"Error generating commit message: {e}. Retrying in {wait_time}s...")
121
- if spinner:
122
- for i in range(wait_time, 0, -1):
123
- spinner.text = f"Retry {retry_count}/{max_retries} in {i}s..."
124
- time.sleep(1)
17
+ raise AIError.model_error("GROQ_API_KEY not found in environment variables")
18
+
19
+ url = "https://api.groq.com/openai/v1/chat/completions"
20
+ headers = {"Authorization": f"Bearer {api_key}", "Content-Type": "application/json"}
21
+
22
+ data = {"model": model, "messages": messages, "temperature": temperature, "max_tokens": max_tokens}
23
+
24
+ try:
25
+ response = httpx.post(url, headers=headers, json=data, timeout=120)
26
+ response.raise_for_status()
27
+ response_data = response.json()
28
+
29
+ # Debug logging to understand response structure
30
+ logger.debug(f"Groq API response: {response_data}")
31
+
32
+ # Handle different response formats
33
+ if "choices" in response_data and len(response_data["choices"]) > 0:
34
+ choice = response_data["choices"][0]
35
+ if "message" in choice and "content" in choice["message"]:
36
+ content = choice["message"]["content"]
37
+ logger.debug(f"Found content in message.content: {content}")
38
+ return content
39
+ elif "text" in choice:
40
+ content = choice["text"]
41
+ logger.debug(f"Found content in choice.text: {content}")
42
+ return content
125
43
  else:
126
- time.sleep(wait_time)
127
-
128
- if spinner:
129
- spinner.fail(f"Failed to generate commit message with {provider_name}")
130
-
131
- error_type = _classify_error(str(last_error))
132
- raise AIError(
133
- f"Failed to generate commit message after {max_retries} attempts: {last_error}", error_type=error_type
134
- )
44
+ logger.debug(f"Choice structure: {choice}")
45
+
46
+ # If we can't find content in the expected places, raise an error
47
+ raise AIError.model_error(f"Unexpected response format from Groq API: {response_data}")
48
+ except httpx.HTTPStatusError as e:
49
+ raise AIError.model_error(f"Groq API error: {e.response.status_code} - {e.response.text}") from e
50
+ except Exception as e:
51
+ raise AIError.model_error(f"Error calling Groq API: {str(e)}") from e
gac/providers/ollama.py CHANGED
@@ -1,135 +1,35 @@
1
- """Ollama API provider for gac."""
1
+ """Ollama AI provider implementation."""
2
2
 
3
- import logging
4
3
  import os
5
- import time
6
4
 
7
5
  import httpx
8
- from halo import Halo
9
6
 
10
- from gac.ai_utils import _classify_error
11
- from gac.constants import EnvDefaults
12
7
  from gac.errors import AIError
13
8
 
14
- logger = logging.getLogger(__name__)
15
9
 
16
-
17
- def generate(
18
- model: str,
19
- prompt: str | tuple[str, str],
20
- temperature: float = EnvDefaults.TEMPERATURE,
21
- max_tokens: int = EnvDefaults.MAX_OUTPUT_TOKENS,
22
- max_retries: int = EnvDefaults.MAX_RETRIES,
23
- quiet: bool = False,
24
- ) -> str:
25
- """Generate commit message using Ollama API with retry logic.
26
-
27
- Args:
28
- model: The model name (e.g., 'llama3', 'mistral')
29
- prompt: Either a string prompt or tuple of (system_prompt, user_prompt)
30
- temperature: Controls randomness (0.0-1.0)
31
- max_tokens: Maximum tokens in the response (note: Ollama uses 'num_predict')
32
- max_retries: Number of retry attempts if generation fails
33
- quiet: If True, suppress progress indicators
34
-
35
- Returns:
36
- A formatted commit message string
37
-
38
- Raises:
39
- AIError: If generation fails after max_retries attempts
40
- """
41
- # Handle both old (string) and new (tuple) prompt formats
42
- if isinstance(prompt, tuple):
43
- system_prompt, user_prompt = prompt
44
- messages = [{"role": "system", "content": system_prompt}, {"role": "user", "content": user_prompt}]
45
- else:
46
- # Backward compatibility: treat string as user prompt
47
- messages = [{"role": "user", "content": prompt}]
48
-
49
- payload = {
50
- "model": model,
51
- "messages": messages,
52
- "stream": False,
53
- "options": {
54
- "temperature": temperature,
55
- "num_predict": max_tokens,
56
- },
57
- }
58
-
59
- headers = {
60
- "Content-Type": "application/json",
61
- }
62
-
63
- # Ollama typically runs locally on port 11434
64
- ollama_url = os.getenv("OLLAMA_URL", "http://localhost:11434")
65
-
66
- return _make_request_with_retry(
67
- url=f"{ollama_url}/api/chat",
68
- headers=headers,
69
- payload=payload,
70
- provider_name=f"Ollama {model}",
71
- max_retries=max_retries,
72
- quiet=quiet,
73
- response_parser=lambda r: r["message"]["content"],
74
- )
75
-
76
-
77
- def _make_request_with_retry(
78
- url: str,
79
- headers: dict,
80
- payload: dict,
81
- provider_name: str,
82
- max_retries: int,
83
- quiet: bool,
84
- response_parser: callable,
85
- ) -> str:
86
- """Make HTTP request with retry logic and common error handling."""
87
- if quiet:
88
- spinner = None
89
- else:
90
- spinner = Halo(text=f"Generating commit message with {provider_name}...", spinner="dots")
91
- spinner.start()
92
-
93
- last_error = None
94
- retry_count = 0
95
-
96
- while retry_count < max_retries:
97
- try:
98
- logger.debug(f"Trying with {provider_name} (attempt {retry_count + 1}/{max_retries})")
99
-
100
- with httpx.Client(timeout=30.0) as client:
101
- response = client.post(url, headers=headers, json=payload)
102
- response.raise_for_status()
103
-
104
- response_data = response.json()
105
- message = response_parser(response_data)
106
-
107
- if spinner:
108
- spinner.succeed(f"Generated commit message with {provider_name}")
109
-
110
- return message
111
-
112
- except Exception as e:
113
- last_error = e
114
- retry_count += 1
115
-
116
- if retry_count == max_retries:
117
- logger.warning(f"Error generating commit message: {e}. Giving up.")
118
- break
119
-
120
- wait_time = 2**retry_count
121
- logger.warning(f"Error generating commit message: {e}. Retrying in {wait_time}s...")
122
- if spinner:
123
- for i in range(wait_time, 0, -1):
124
- spinner.text = f"Retry {retry_count}/{max_retries} in {i}s..."
125
- time.sleep(1)
126
- else:
127
- time.sleep(wait_time)
128
-
129
- if spinner:
130
- spinner.fail(f"Failed to generate commit message with {provider_name}")
131
-
132
- error_type = _classify_error(str(last_error))
133
- raise AIError(
134
- f"Failed to generate commit message after {max_retries} attempts: {last_error}", error_type=error_type
135
- )
10
+ def call_ollama_api(model: str, messages: list[dict], temperature: float, max_tokens: int) -> str:
11
+ """Call Ollama API directly."""
12
+ api_url = os.getenv("OLLAMA_API_URL", "http://localhost:11434")
13
+
14
+ url = f"{api_url.rstrip('/')}/api/chat"
15
+ data = {"model": model, "messages": messages, "temperature": temperature, "stream": False}
16
+
17
+ try:
18
+ response = httpx.post(url, json=data, timeout=120)
19
+ response.raise_for_status()
20
+ response_data = response.json()
21
+
22
+ # Handle different response formats from Ollama
23
+ if "message" in response_data and "content" in response_data["message"]:
24
+ return response_data["message"]["content"]
25
+ elif "response" in response_data:
26
+ return response_data["response"]
27
+ else:
28
+ # Fallback: return the full response as string
29
+ return str(response_data)
30
+ except httpx.ConnectError as e:
31
+ raise AIError.connection_error(f"Ollama connection failed. Make sure Ollama is running: {str(e)}") from e
32
+ except httpx.HTTPStatusError as e:
33
+ raise AIError.model_error(f"Ollama API error: {e.response.status_code} - {e.response.text}") from e
34
+ except Exception as e:
35
+ raise AIError.model_error(f"Error calling Ollama API: {str(e)}") from e
gac/providers/openai.py CHANGED
@@ -1,134 +1,29 @@
1
1
  """OpenAI API provider for gac."""
2
2
 
3
- import logging
4
3
  import os
5
- import time
6
4
 
7
5
  import httpx
8
- from halo import Halo
9
6
 
10
- from gac.ai_utils import _classify_error
11
- from gac.constants import EnvDefaults
12
7
  from gac.errors import AIError
13
8
 
14
- logger = logging.getLogger(__name__)
15
9
 
16
-
17
- def generate(
18
- model: str,
19
- prompt: str | tuple[str, str],
20
- temperature: float = EnvDefaults.TEMPERATURE,
21
- max_tokens: int = EnvDefaults.MAX_OUTPUT_TOKENS,
22
- max_retries: int = EnvDefaults.MAX_RETRIES,
23
- quiet: bool = False,
24
- ) -> str:
25
- """Generate commit message using OpenAI API with retry logic.
26
-
27
- Args:
28
- model: The model name (e.g., 'gpt-4', 'gpt-3.5-turbo')
29
- prompt: Either a string prompt or tuple of (system_prompt, user_prompt)
30
- temperature: Controls randomness (0.0-1.0)
31
- max_tokens: Maximum tokens in the response
32
- max_retries: Number of retry attempts if generation fails
33
- quiet: If True, suppress progress indicators
34
-
35
- Returns:
36
- A formatted commit message string
37
-
38
- Raises:
39
- AIError: If generation fails after max_retries attempts
40
- """
10
+ def call_openai_api(model: str, messages: list[dict], temperature: float, max_tokens: int) -> str:
11
+ """Call OpenAI API directly."""
41
12
  api_key = os.getenv("OPENAI_API_KEY")
42
13
  if not api_key:
43
- raise AIError.model_error("OPENAI_API_KEY environment variable not set")
44
-
45
- # Handle both old (string) and new (tuple) prompt formats
46
- if isinstance(prompt, tuple):
47
- system_prompt, user_prompt = prompt
48
- messages = [{"role": "system", "content": system_prompt}, {"role": "user", "content": user_prompt}]
49
- else:
50
- # Backward compatibility: treat string as user prompt
51
- messages = [{"role": "user", "content": prompt}]
52
-
53
- payload = {
54
- "model": model,
55
- "messages": messages,
56
- "temperature": temperature,
57
- "max_tokens": max_tokens,
58
- }
59
-
60
- headers = {
61
- "Content-Type": "application/json",
62
- "Authorization": f"Bearer {api_key}",
63
- }
64
-
65
- return _make_request_with_retry(
66
- url="https://api.openai.com/v1/chat/completions",
67
- headers=headers,
68
- payload=payload,
69
- provider_name=f"OpenAI {model}",
70
- max_retries=max_retries,
71
- quiet=quiet,
72
- response_parser=lambda r: r["choices"][0]["message"]["content"],
73
- )
74
-
75
-
76
- def _make_request_with_retry(
77
- url: str,
78
- headers: dict,
79
- payload: dict,
80
- provider_name: str,
81
- max_retries: int,
82
- quiet: bool,
83
- response_parser: callable,
84
- ) -> str:
85
- """Make HTTP request with retry logic and common error handling."""
86
- if quiet:
87
- spinner = None
88
- else:
89
- spinner = Halo(text=f"Generating commit message with {provider_name}...", spinner="dots")
90
- spinner.start()
91
-
92
- last_error = None
93
- retry_count = 0
94
-
95
- while retry_count < max_retries:
96
- try:
97
- logger.debug(f"Trying with {provider_name} (attempt {retry_count + 1}/{max_retries})")
98
-
99
- with httpx.Client(timeout=30.0) as client:
100
- response = client.post(url, headers=headers, json=payload)
101
- response.raise_for_status()
102
-
103
- response_data = response.json()
104
- message = response_parser(response_data)
105
-
106
- if spinner:
107
- spinner.succeed(f"Generated commit message with {provider_name}")
108
-
109
- return message
110
-
111
- except Exception as e:
112
- last_error = e
113
- retry_count += 1
114
-
115
- if retry_count == max_retries:
116
- logger.warning(f"Error generating commit message: {e}. Giving up.")
117
- break
118
-
119
- wait_time = 2**retry_count
120
- logger.warning(f"Error generating commit message: {e}. Retrying in {wait_time}s...")
121
- if spinner:
122
- for i in range(wait_time, 0, -1):
123
- spinner.text = f"Retry {retry_count}/{max_retries} in {i}s..."
124
- time.sleep(1)
125
- else:
126
- time.sleep(wait_time)
127
-
128
- if spinner:
129
- spinner.fail(f"Failed to generate commit message with {provider_name}")
130
-
131
- error_type = _classify_error(str(last_error))
132
- raise AIError(
133
- f"Failed to generate commit message after {max_retries} attempts: {last_error}", error_type=error_type
134
- )
14
+ raise AIError.model_error("OPENAI_API_KEY not found in environment variables")
15
+
16
+ url = "https://api.openai.com/v1/chat/completions"
17
+ headers = {"Authorization": f"Bearer {api_key}", "Content-Type": "application/json"}
18
+
19
+ data = {"model": model, "messages": messages, "temperature": temperature, "max_tokens": max_tokens}
20
+
21
+ try:
22
+ response = httpx.post(url, headers=headers, json=data, timeout=120)
23
+ response.raise_for_status()
24
+ response_data = response.json()
25
+ return response_data["choices"][0]["message"]["content"]
26
+ except httpx.HTTPStatusError as e:
27
+ raise AIError.model_error(f"OpenAI API error: {e.response.status_code} - {e.response.text}") from e
28
+ except Exception as e:
29
+ raise AIError.model_error(f"Error calling OpenAI API: {str(e)}") from e
@@ -1,50 +1,25 @@
1
1
  """OpenRouter API provider for gac."""
2
2
 
3
- import logging
4
3
  import os
5
- import time
6
4
 
7
5
  import httpx
8
- from halo import Halo
9
6
 
10
- from gac.ai_utils import _classify_error
11
- from gac.constants import EnvDefaults
12
7
  from gac.errors import AIError
13
8
 
14
- logger = logging.getLogger(__name__)
15
9
 
16
-
17
- def generate(
18
- model: str,
19
- prompt: str | tuple[str, str],
20
- temperature: float = EnvDefaults.TEMPERATURE,
21
- max_tokens: int = EnvDefaults.MAX_OUTPUT_TOKENS,
22
- max_retries: int = EnvDefaults.MAX_RETRIES,
23
- quiet: bool = False,
24
- ) -> str:
25
- """Generate commit message using OpenRouter API with retry logic."""
10
+ def call_openrouter_api(model: str, messages: list[dict], temperature: float, max_tokens: int) -> str:
11
+ """Call OpenRouter API directly."""
26
12
  api_key = os.getenv("OPENROUTER_API_KEY")
27
13
  if not api_key:
28
14
  raise AIError.model_error("OPENROUTER_API_KEY environment variable not set")
29
15
 
30
- if isinstance(prompt, tuple):
31
- system_prompt, user_prompt = prompt
32
- messages = [{"role": "system", "content": system_prompt}, {"role": "user", "content": user_prompt}]
33
- else:
34
- messages = [{"role": "user", "content": prompt}]
35
-
36
- payload = {
37
- "model": model,
38
- "messages": messages,
39
- "temperature": temperature,
40
- "max_tokens": max_tokens,
41
- }
42
-
16
+ url = "https://openrouter.ai/api/v1/chat/completions"
43
17
  headers = {
44
18
  "Content-Type": "application/json",
45
19
  "Authorization": f"Bearer {api_key}",
46
20
  }
47
21
 
22
+ # Add optional headers if environment variables are set
48
23
  site_url = os.getenv("OPENROUTER_SITE_URL")
49
24
  if site_url:
50
25
  headers["HTTP-Referer"] = site_url
@@ -53,73 +28,19 @@ def generate(
53
28
  if site_name:
54
29
  headers["X-Title"] = site_name
55
30
 
56
- return _make_request_with_retry(
57
- url="https://openrouter.ai/api/v1/chat/completions",
58
- headers=headers,
59
- payload=payload,
60
- provider_name=f"OpenRouter {model}",
61
- max_retries=max_retries,
62
- quiet=quiet,
63
- response_parser=lambda r: r["choices"][0]["message"]["content"],
64
- )
65
-
66
-
67
- def _make_request_with_retry(
68
- url: str,
69
- headers: dict,
70
- payload: dict,
71
- provider_name: str,
72
- max_retries: int,
73
- quiet: bool,
74
- response_parser: callable,
75
- ) -> str:
76
- """Make HTTP request with retry logic and common error handling."""
77
- if quiet:
78
- spinner = None
79
- else:
80
- spinner = Halo(text=f"Generating commit message with {provider_name}...", spinner="dots")
81
- spinner.start()
82
-
83
- last_error = None
84
- retry_count = 0
85
-
86
- while retry_count < max_retries:
87
- try:
88
- logger.debug(f"Trying with {provider_name} (attempt {retry_count + 1}/{max_retries})")
89
-
90
- with httpx.Client(timeout=30.0) as client:
91
- response = client.post(url, headers=headers, json=payload)
92
- response.raise_for_status()
93
-
94
- response_data = response.json()
95
- message = response_parser(response_data)
96
-
97
- if spinner:
98
- spinner.succeed(f"Generated commit message with {provider_name}")
99
-
100
- return message
101
-
102
- except Exception as e:
103
- last_error = e
104
- retry_count += 1
105
-
106
- if retry_count == max_retries:
107
- logger.warning(f"Error generating commit message: {e}. Giving up.")
108
- break
109
-
110
- wait_time = 2**retry_count
111
- logger.warning(f"Error generating commit message: {e}. Retrying in {wait_time}s...")
112
- if spinner:
113
- for i in range(wait_time, 0, -1):
114
- spinner.text = f"Retry {retry_count}/{max_retries} in {i}s..."
115
- time.sleep(1)
116
- else:
117
- time.sleep(wait_time)
118
-
119
- if spinner:
120
- spinner.fail(f"Failed to generate commit message with {provider_name}")
31
+ data = {
32
+ "model": model,
33
+ "messages": messages,
34
+ "temperature": temperature,
35
+ "max_tokens": max_tokens,
36
+ }
121
37
 
122
- error_type = _classify_error(str(last_error))
123
- raise AIError(
124
- f"Failed to generate commit message after {max_retries} attempts: {last_error}", error_type=error_type
125
- )
38
+ try:
39
+ response = httpx.post(url, headers=headers, json=data, timeout=120)
40
+ response.raise_for_status()
41
+ response_data = response.json()
42
+ return response_data["choices"][0]["message"]["content"]
43
+ except httpx.HTTPStatusError as e:
44
+ raise AIError.model_error(f"OpenRouter API error: {e.response.status_code} - {e.response.text}") from e
45
+ except Exception as e:
46
+ raise AIError.model_error(f"Error calling OpenRouter API: {str(e)}") from e
@@ -1,6 +1,6 @@
1
1
  Metadata-Version: 2.4
2
2
  Name: gac
3
- Version: 1.1.0
3
+ Version: 1.2.0
4
4
  Summary: AI-powered Git commit message generator with multi-provider support
5
5
  Project-URL: Homepage, https://github.com/cellwebb/gac
6
6
  Project-URL: Documentation, https://github.com/cellwebb/gac#readme
@@ -0,0 +1,28 @@
1
+ gac/__init__.py,sha256=HFWgSVNbTAFhgetCRWI1WrtyE7zC7IHvoBOrfDGUurM,989
2
+ gac/__version__.py,sha256=_cfu33DyI3jZln3bUKxhnDn1AeCj66_yLEqy9IJASqs,66
3
+ gac/ai.py,sha256=RcZKXiyx9Wll2e-dRx0jNzQPzojVYE7OaSEDclE2MKc,2979
4
+ gac/ai_utils.py,sha256=p63lxYp4AqzBgbYkYyNUKLecIszTVvBp4Wkm2hKlMlc,7854
5
+ gac/cli.py,sha256=eQS8S7v6p0CfN9wtr239ujYGTi9rKl-KV7STX2U-C3w,4581
6
+ gac/config.py,sha256=wSgEDjtis7Vk1pv5VPvYmJyD9-tymDS6GiUHjnCMbIM,1486
7
+ gac/config_cli.py,sha256=v9nFHZO1RvK9fzHyuUS6SG-BCLHMsdOMDwWamBhVVh4,1608
8
+ gac/constants.py,sha256=MAxdASGncfZY1TdKGdhJZ0wvTBEU3gTN6KEdw8n3Bd8,4844
9
+ gac/diff_cli.py,sha256=wnVQ9OFGnM0d2Pj9WVjWbo0jxqIuRHVAwmb8wU9Pa3E,5676
10
+ gac/errors.py,sha256=3vIRMQ2QF3sP9_rPfXAFuu5ZSjIVX4FxM-FAuiR8N-8,7416
11
+ gac/git.py,sha256=MS2m4fv8h4mau1djFG1aje9NXTmkGsjPO9w18LqNGX0,6031
12
+ gac/init_cli.py,sha256=e4z9-4NhoUn2DUyApIru8JR-W7HuNq2VeeXkR9aXHLo,1868
13
+ gac/main.py,sha256=POay7l6ihm3oF9ajGWx9cA40Pu-NVz5x_OzQOYPDoX8,12011
14
+ gac/preprocess.py,sha256=krrLPHsccYMdn_YAtUrppBJIoRgevxGWusDwhE40LEo,15366
15
+ gac/prompt.py,sha256=_fv24XU3DZE_S72vcdUYnNkmy-_KXnr1Vlc-9okop7E,17263
16
+ gac/utils.py,sha256=W3ladtmsH01MNLdckQYTzYrYbTGEdzCKI36he9C-y_E,3945
17
+ gac/providers/__init__.py,sha256=iGwZmV-cFqL3AeFE0vc6KpHwm-RLWcVSU17c7IvJg2s,456
18
+ gac/providers/anthropic.py,sha256=esf6pq6nRdqD0mpKz_IQNXmXe5WnkoSA2b1isrrRB4o,1514
19
+ gac/providers/cerebras.py,sha256=eE9lAjEzrATIo941vv97I2DSmpnXYBCJ9HkVIb-6Whg,1130
20
+ gac/providers/groq.py,sha256=EPivjTg3TUqynBofnatlIxKzFTpLPP4psVb562Dsx5o,2040
21
+ gac/providers/ollama.py,sha256=Bp94DvortQssDhekuNdJ7fKLeWpWASYXSssJNCuGszg,1383
22
+ gac/providers/openai.py,sha256=1l-Wu7ETXXaJ7cNB3OD5ivf4_72iIEP9bPFMQst8JWI,1109
23
+ gac/providers/openrouter.py,sha256=Vs0MXfv9KCldfEUD2roTwcXqs89tgE3ndNqRKoqdJQs,1473
24
+ gac-1.2.0.dist-info/METADATA,sha256=fbK0j24cpsMWVJXnybrXyQ6MzMS791bpdIKu2vvG7-c,8558
25
+ gac-1.2.0.dist-info/WHEEL,sha256=qtCwoSJWgHk21S1Kb4ihdzI2rlJ1ZKaIurTj_ngOhyQ,87
26
+ gac-1.2.0.dist-info/entry_points.txt,sha256=tdjN-XMmcWfL92swuRAjT62bFLOAwk9bTMRLGP5Z4aI,36
27
+ gac-1.2.0.dist-info/licenses/LICENSE,sha256=vOab37NouL1PNs5BswnPayrMCqaN2sqLfMQfqPDrpZg,1103
28
+ gac-1.2.0.dist-info/RECORD,,
@@ -1,28 +0,0 @@
1
- gac/__init__.py,sha256=-knZC1oRylbJUv5uGk3fP_dHaaLHG76duAEWxLUi1eg,940
2
- gac/__version__.py,sha256=JSz2jRyqqtKTmDkhJ8NI5pr2e0rhFqSSkokXslLdc2M,66
3
- gac/ai.py,sha256=-Wd9cr5yK-UvR14AHvBeHrpa28KXu0bYZjJS02U-q90,3419
4
- gac/ai_utils.py,sha256=DFiM0Wzp-kremo33_i5SEYpms7tE13ipKLCCr_-AZPU,4327
5
- gac/cli.py,sha256=eQS8S7v6p0CfN9wtr239ujYGTi9rKl-KV7STX2U-C3w,4581
6
- gac/config.py,sha256=wSgEDjtis7Vk1pv5VPvYmJyD9-tymDS6GiUHjnCMbIM,1486
7
- gac/config_cli.py,sha256=v9nFHZO1RvK9fzHyuUS6SG-BCLHMsdOMDwWamBhVVh4,1608
8
- gac/constants.py,sha256=MAxdASGncfZY1TdKGdhJZ0wvTBEU3gTN6KEdw8n3Bd8,4844
9
- gac/diff_cli.py,sha256=wnVQ9OFGnM0d2Pj9WVjWbo0jxqIuRHVAwmb8wU9Pa3E,5676
10
- gac/errors.py,sha256=3vIRMQ2QF3sP9_rPfXAFuu5ZSjIVX4FxM-FAuiR8N-8,7416
11
- gac/git.py,sha256=MS2m4fv8h4mau1djFG1aje9NXTmkGsjPO9w18LqNGX0,6031
12
- gac/init_cli.py,sha256=e4z9-4NhoUn2DUyApIru8JR-W7HuNq2VeeXkR9aXHLo,1868
13
- gac/main.py,sha256=POay7l6ihm3oF9ajGWx9cA40Pu-NVz5x_OzQOYPDoX8,12011
14
- gac/preprocess.py,sha256=krrLPHsccYMdn_YAtUrppBJIoRgevxGWusDwhE40LEo,15366
15
- gac/prompt.py,sha256=_fv24XU3DZE_S72vcdUYnNkmy-_KXnr1Vlc-9okop7E,17263
16
- gac/utils.py,sha256=W3ladtmsH01MNLdckQYTzYrYbTGEdzCKI36he9C-y_E,3945
17
- gac/providers/__init__.py,sha256=_JawPCV_ZCtI18bBqVCbQkkbLetSQqOBcj2b19KRuX0,38
18
- gac/providers/anthropic.py,sha256=v0JApD6dNfIbxBoFbpf4GBWc4PsVPgcEcw1GDEWhoeM,4382
19
- gac/providers/cerebras.py,sha256=rWtndVj9q1w-EIfLyP4NJ10GFPnlfiu43ZminTdWP1s,4166
20
- gac/providers/groq.py,sha256=K7hrHaYfGn25hnDedR6FBEOqb0r0Y65zQRYso5w3xGE,4156
21
- gac/providers/ollama.py,sha256=qzX4qMsQeZUh8oPOWFIszhsvgioSnoLi82wO8G13pOI,4139
22
- gac/providers/openai.py,sha256=VKH7SQE6O4UB1c15KXIBL5UP1Fj3Ed4XQDrqENEPmBo,4150
23
- gac/providers/openrouter.py,sha256=vU4zIwtnyz9hjWokJwORwX3bZjYtF2MWqz8xxE-srsA,3753
24
- gac-1.1.0.dist-info/METADATA,sha256=VImMQAbgQT23S1czOiINsmy4tnloF8MRcb9_TSvasrI,8558
25
- gac-1.1.0.dist-info/WHEEL,sha256=qtCwoSJWgHk21S1Kb4ihdzI2rlJ1ZKaIurTj_ngOhyQ,87
26
- gac-1.1.0.dist-info/entry_points.txt,sha256=tdjN-XMmcWfL92swuRAjT62bFLOAwk9bTMRLGP5Z4aI,36
27
- gac-1.1.0.dist-info/licenses/LICENSE,sha256=vOab37NouL1PNs5BswnPayrMCqaN2sqLfMQfqPDrpZg,1103
28
- gac-1.1.0.dist-info/RECORD,,
File without changes