gac 1.9.5__py3-none-any.whl → 1.10.1__py3-none-any.whl

This diff represents the content of publicly available package versions that have been released to one of the supported registries. The information contained in this diff is provided for informational purposes only and reflects changes between package versions as they appear in their respective public registries.

Potentially problematic release.


This version of gac might be problematic. Click here for more details.

gac/__version__.py CHANGED
@@ -1,3 +1,3 @@
1
1
  """Version information for gac package."""
2
2
 
3
- __version__ = "1.9.5"
3
+ __version__ = "1.10.1"
gac/ai.py CHANGED
@@ -13,6 +13,7 @@ from gac.providers import (
13
13
  call_anthropic_api,
14
14
  call_cerebras_api,
15
15
  call_chutes_api,
16
+ call_fireworks_api,
16
17
  call_gemini_api,
17
18
  call_groq_api,
18
19
  call_lmstudio_api,
@@ -30,7 +31,7 @@ logger = logging.getLogger(__name__)
30
31
 
31
32
  def generate_commit_message(
32
33
  model: str,
33
- prompt: str | tuple[str, str],
34
+ prompt: str | tuple[str, str] | list[dict[str, str]],
34
35
  temperature: float = EnvDefaults.TEMPERATURE,
35
36
  max_tokens: int = EnvDefaults.MAX_OUTPUT_TOKENS,
36
37
  max_retries: int = EnvDefaults.MAX_RETRIES,
@@ -59,18 +60,28 @@ def generate_commit_message(
59
60
  'docs: Update README with installation instructions'
60
61
  """
61
62
  # Handle both old (string) and new (tuple) prompt formats
62
- if isinstance(prompt, tuple):
63
+ if isinstance(prompt, list):
64
+ messages = [{**msg} for msg in prompt]
65
+ elif isinstance(prompt, tuple):
63
66
  system_prompt, user_prompt = prompt
67
+ messages = [
68
+ {"role": "system", "content": system_prompt or ""},
69
+ {"role": "user", "content": user_prompt},
70
+ ]
64
71
  else:
65
- # Backward compatibility: treat string as user prompt with no system prompt
66
- system_prompt = ""
67
- user_prompt = prompt
72
+ # Backward compatibility: treat string as user prompt with empty system prompt
73
+ user_prompt = str(prompt)
74
+ messages = [
75
+ {"role": "system", "content": ""},
76
+ {"role": "user", "content": user_prompt},
77
+ ]
68
78
 
69
79
  # Provider functions mapping
70
80
  provider_funcs = {
71
81
  "anthropic": call_anthropic_api,
72
82
  "cerebras": call_cerebras_api,
73
83
  "chutes": call_chutes_api,
84
+ "fireworks": call_fireworks_api,
74
85
  "gemini": call_gemini_api,
75
86
  "groq": call_groq_api,
76
87
  "lm-studio": call_lmstudio_api,
@@ -88,8 +99,7 @@ def generate_commit_message(
88
99
  return generate_with_retries(
89
100
  provider_funcs=provider_funcs,
90
101
  model=model,
91
- system_prompt=system_prompt,
92
- user_prompt=user_prompt,
102
+ messages=messages,
93
103
  temperature=temperature,
94
104
  max_tokens=max_tokens,
95
105
  max_retries=max_retries,
gac/ai_utils.py CHANGED
@@ -78,8 +78,7 @@ def _classify_error(error_str: str) -> str:
78
78
  def generate_with_retries(
79
79
  provider_funcs: dict,
80
80
  model: str,
81
- system_prompt: str,
82
- user_prompt: str,
81
+ messages: list[dict[str, str]],
83
82
  temperature: float,
84
83
  max_tokens: int,
85
84
  max_retries: int,
@@ -97,6 +96,7 @@ def generate_with_retries(
97
96
  "anthropic",
98
97
  "cerebras",
99
98
  "chutes",
99
+ "fireworks",
100
100
  "gemini",
101
101
  "groq",
102
102
  "lm-studio",
@@ -111,10 +111,8 @@ def generate_with_retries(
111
111
  if provider not in supported_providers:
112
112
  raise AIError.model_error(f"Unsupported provider: {provider}. Supported providers: {supported_providers}")
113
113
 
114
- messages = [
115
- {"role": "system", "content": system_prompt},
116
- {"role": "user", "content": user_prompt},
117
- ]
114
+ if not messages:
115
+ raise AIError.model_error("No messages provided for AI generation")
118
116
 
119
117
  # Set up spinner
120
118
  if quiet:
gac/init_cli.py CHANGED
@@ -35,6 +35,7 @@ def init() -> None:
35
35
  ("Anthropic", "claude-haiku-4-5"),
36
36
  ("Cerebras", "qwen-3-coder-480b"),
37
37
  ("Chutes", "zai-org/GLM-4.6-FP8"),
38
+ ("Fireworks", "accounts/fireworks/models/gpt-oss-20b"),
38
39
  ("Gemini", "gemini-2.5-flash"),
39
40
  ("Groq", "meta-llama/llama-4-maverick-17b-128e-instruct"),
40
41
  ("LM Studio", "gemma3"),
gac/main.py CHANGED
@@ -202,39 +202,49 @@ def main(
202
202
  )
203
203
  )
204
204
 
205
+ conversation_messages: list[dict[str, str]] = []
206
+ if system_prompt:
207
+ conversation_messages.append({"role": "system", "content": system_prompt})
208
+ conversation_messages.append({"role": "user", "content": user_prompt})
209
+
205
210
  try:
206
- # Count tokens for both prompts
207
- prompt_tokens = count_tokens(system_prompt, model) + count_tokens(user_prompt, model)
208
-
209
- warning_limit_val = config.get("warning_limit_tokens", EnvDefaults.WARNING_LIMIT_TOKENS)
210
- assert warning_limit_val is not None
211
- warning_limit = int(warning_limit_val)
212
- if warning_limit and prompt_tokens > warning_limit:
213
- console.print(
214
- f"[yellow]⚠️ WARNING: Prompt contains {prompt_tokens} tokens, which exceeds the warning limit of "
215
- f"{warning_limit} tokens.[/yellow]"
211
+ first_iteration = True
212
+
213
+ while True:
214
+ prompt_tokens = count_tokens(conversation_messages, model)
215
+
216
+ if first_iteration:
217
+ warning_limit_val = config.get("warning_limit_tokens", EnvDefaults.WARNING_LIMIT_TOKENS)
218
+ assert warning_limit_val is not None
219
+ warning_limit = int(warning_limit_val)
220
+ if warning_limit and prompt_tokens > warning_limit:
221
+ console.print(
222
+ f"[yellow]⚠️ WARNING: Prompt contains {prompt_tokens} tokens, which exceeds the warning limit of "
223
+ f"{warning_limit} tokens.[/yellow]"
224
+ )
225
+ if require_confirmation:
226
+ proceed = click.confirm("Do you want to continue anyway?", default=True)
227
+ if not proceed:
228
+ console.print("[yellow]Aborted due to token limit.[/yellow]")
229
+ sys.exit(0)
230
+
231
+ first_iteration = False
232
+
233
+ raw_commit_message = generate_commit_message(
234
+ model=model,
235
+ prompt=conversation_messages,
236
+ temperature=temperature,
237
+ max_tokens=max_output_tokens,
238
+ max_retries=max_retries,
239
+ quiet=quiet,
216
240
  )
217
- if require_confirmation:
218
- proceed = click.confirm("Do you want to continue anyway?", default=True)
219
- if not proceed:
220
- console.print("[yellow]Aborted due to token limit.[/yellow]")
221
- sys.exit(0)
241
+ commit_message = clean_commit_message(raw_commit_message)
222
242
 
223
- commit_message = generate_commit_message(
224
- model=model,
225
- prompt=(system_prompt, user_prompt),
226
- temperature=temperature,
227
- max_tokens=max_output_tokens,
228
- max_retries=max_retries,
229
- quiet=quiet,
230
- )
231
- commit_message = clean_commit_message(commit_message)
243
+ logger.info("Generated commit message:")
244
+ logger.info(commit_message)
232
245
 
233
- logger.info("Generated commit message:")
234
- logger.info(commit_message)
246
+ conversation_messages.append({"role": "assistant", "content": commit_message})
235
247
 
236
- # Reroll loop
237
- while True:
238
248
  console.print("[bold green]Generated commit message:[/bold green]")
239
249
  console.print(Panel(commit_message, title="Commit Message", border_style="cyan"))
240
250
 
@@ -252,76 +262,38 @@ def main(
252
262
  response = click.prompt(
253
263
  "Proceed with commit above? [y/n/r <feedback>]", type=str, show_default=False
254
264
  ).strip()
265
+ response_lower = response.lower()
255
266
 
256
- if response.lower() in ["y", "yes"]:
257
- break # Exit both loops and proceed with commit
258
- elif response.lower() in ["n", "no"]:
267
+ if response_lower in ["y", "yes"]:
268
+ break
269
+ if response_lower in ["n", "no"]:
259
270
  console.print("[yellow]Prompt not accepted. Exiting...[/yellow]")
260
271
  sys.exit(0)
261
- elif response.lower() == "r" or response.lower().startswith("r ") or response.lower() == "reroll":
262
- # Parse the reroll command for optional feedback
263
- if response.lower() == "r" or response.lower() == "reroll":
264
- # Simple reroll without feedback
265
- reroll_feedback = ""
272
+ if response_lower == "r" or response_lower == "reroll" or response_lower.startswith("r "):
273
+ if response_lower == "r" or response_lower == "reroll":
274
+ feedback_message = (
275
+ "Please provide an alternative commit message using the same repository context."
276
+ )
266
277
  console.print("[cyan]Regenerating commit message...[/cyan]")
267
278
  else:
268
- # Extract feedback from "r <feedback>"
269
- reroll_feedback = response[2:].strip() # Remove "r " prefix
270
- console.print(f"[cyan]Regenerating commit message with feedback: {reroll_feedback}[/cyan]")
271
-
272
- # Combine hints if reroll feedback provided
273
- combined_hint = hint
274
- if reroll_feedback:
275
- # Create conversational prompt with previous attempt and feedback
276
- conversational_hint = f"Previous attempt: '{commit_message}'. User feedback: {reroll_feedback}. Please revise accordingly."
277
-
278
- if hint:
279
- combined_hint = f"{hint}. {conversational_hint}"
280
- else:
281
- combined_hint = conversational_hint
282
-
283
- # Regenerate prompt with conversational feedback
284
- reroll_system_prompt, reroll_user_prompt = build_prompt(
285
- status=status,
286
- processed_diff=processed_diff,
287
- diff_stat=diff_stat,
288
- one_liner=one_liner,
289
- hint=combined_hint,
290
- infer_scope=infer_scope,
291
- verbose=verbose,
279
+ reroll_feedback = response[2:].strip()
280
+ feedback_message = (
281
+ f"Please revise the commit message based on this feedback: {reroll_feedback}"
292
282
  )
293
- else:
294
- # No hint given, just reroll with same prompts
295
- reroll_system_prompt, reroll_user_prompt = system_prompt, user_prompt
283
+ console.print(f"[cyan]Regenerating commit message with feedback: {reroll_feedback}[/cyan]")
296
284
 
297
- # Update prompts and recalculate token count for the next generation
298
- system_prompt = reroll_system_prompt
299
- user_prompt = reroll_user_prompt
300
- prompt_tokens = count_tokens(system_prompt, model) + count_tokens(user_prompt, model)
285
+ conversation_messages.append({"role": "user", "content": feedback_message})
301
286
 
302
287
  console.print() # Add blank line for readability
288
+ break
289
+
290
+ console.print(
291
+ "[red]Invalid response. Please enter y (yes), n (no), r (reroll), or r <feedback>.[/red]"
292
+ )
303
293
 
304
- # Generate new message
305
- commit_message = generate_commit_message(
306
- model=model,
307
- prompt=(system_prompt, user_prompt),
308
- temperature=temperature,
309
- max_tokens=max_output_tokens,
310
- max_retries=max_retries,
311
- quiet=quiet,
312
- )
313
- commit_message = clean_commit_message(commit_message)
314
- break # Exit inner loop, continue outer loop
315
- else:
316
- console.print(
317
- "[red]Invalid response. Please enter y (yes), n (no), r (reroll), or r <feedback>.[/red]"
318
- )
319
-
320
- # If we got here with 'y', break the outer loop
321
- if response.lower() in ["y", "yes"]:
294
+ if response_lower in ["y", "yes"]:
322
295
  break
323
296
  else:
324
- # No confirmation required, exit loop
325
297
  break
326
298
 
327
299
  if dry_run:
gac/providers/__init__.py CHANGED
@@ -3,6 +3,7 @@
3
3
  from .anthropic import call_anthropic_api
4
4
  from .cerebras import call_cerebras_api
5
5
  from .chutes import call_chutes_api
6
+ from .fireworks import call_fireworks_api
6
7
  from .gemini import call_gemini_api
7
8
  from .groq import call_groq_api
8
9
  from .lmstudio import call_lmstudio_api
@@ -17,6 +18,7 @@ __all__ = [
17
18
  "call_anthropic_api",
18
19
  "call_cerebras_api",
19
20
  "call_chutes_api",
21
+ "call_fireworks_api",
20
22
  "call_gemini_api",
21
23
  "call_groq_api",
22
24
  "call_lmstudio_api",
@@ -0,0 +1,38 @@
1
+ """Fireworks AI API provider for gac."""
2
+
3
+ import os
4
+
5
+ import httpx
6
+
7
+ from gac.errors import AIError
8
+
9
+
10
+ def call_fireworks_api(model: str, messages: list[dict], temperature: float, max_tokens: int) -> str:
11
+ """Call Fireworks AI API directly."""
12
+ api_key = os.getenv("FIREWORKS_API_KEY")
13
+ if not api_key:
14
+ raise AIError.authentication_error("FIREWORKS_API_KEY not found in environment variables")
15
+
16
+ url = "https://api.fireworks.ai/inference/v1/chat/completions"
17
+ headers = {"Authorization": f"Bearer {api_key}", "Content-Type": "application/json"}
18
+
19
+ data = {"model": model, "messages": messages, "temperature": temperature, "max_tokens": max_tokens}
20
+
21
+ try:
22
+ response = httpx.post(url, headers=headers, json=data, timeout=120)
23
+ response.raise_for_status()
24
+ response_data = response.json()
25
+ content = response_data["choices"][0]["message"]["content"]
26
+ if content is None:
27
+ raise AIError.model_error("Fireworks AI API returned null content")
28
+ if content == "":
29
+ raise AIError.model_error("Fireworks AI API returned empty content")
30
+ return content
31
+ except httpx.HTTPStatusError as e:
32
+ if e.response.status_code == 429:
33
+ raise AIError.rate_limit_error(f"Fireworks AI API rate limit exceeded: {e.response.text}") from e
34
+ raise AIError.model_error(f"Fireworks AI API error: {e.response.status_code} - {e.response.text}") from e
35
+ except httpx.TimeoutException as e:
36
+ raise AIError.timeout_error(f"Fireworks AI API request timed out: {str(e)}") from e
37
+ except Exception as e:
38
+ raise AIError.model_error(f"Error calling Fireworks AI API: {str(e)}") from e
@@ -1,6 +1,6 @@
1
1
  Metadata-Version: 2.4
2
2
  Name: gac
3
- Version: 1.9.5
3
+ Version: 1.10.1
4
4
  Summary: LLM-powered Git commit message generator with multi-provider support
5
5
  Project-URL: Homepage, https://github.com/cellwebb/gac
6
6
  Project-URL: Documentation, https://github.com/cellwebb/gac#readme
@@ -33,7 +33,6 @@ Requires-Dist: tiktoken>=0.12.0
33
33
  Provides-Extra: dev
34
34
  Requires-Dist: build; extra == 'dev'
35
35
  Requires-Dist: codecov; extra == 'dev'
36
- Requires-Dist: pre-commit; extra == 'dev'
37
36
  Requires-Dist: pytest; extra == 'dev'
38
37
  Requires-Dist: pytest-cov; extra == 'dev'
39
38
  Requires-Dist: ruff; extra == 'dev'
@@ -69,58 +68,52 @@ Instead of generic messages like `"update stuff"`, `"fix bug"`, or `"add feature
69
68
 
70
69
  ## Quick Start
71
70
 
72
- ### Install & Use
71
+ ### Use without installing
73
72
 
74
73
  ```bash
75
- # 1. Install globally
76
- uv tool install gac
77
-
78
- # 2. Configure your LLM provider
79
- gac init
80
-
81
- # 3. Use on your staged changes
82
- git add .
83
- gac
74
+ uvx gac init # Configure your LLM provider
75
+ uvx gac # Generate and commit with LLM
84
76
  ```
85
77
 
86
78
  That's it! Review the generated message and confirm with `y`.
87
79
 
88
- ### Use without installing
80
+ ### Install gac globally
89
81
 
90
82
  ```bash
91
- uvx gac init
92
- uvx gac
83
+ uv tool install gac
84
+ gac init
85
+ gac
93
86
  ```
94
87
 
95
88
  ---
96
89
 
97
90
  ## Key Features
98
91
 
99
- ### **Supported Providers**
92
+ ### 🌐 **Supported Providers**
100
93
 
101
- - **Anthropic** • **Cerebras** • **Chutes.ai** • **Gemini** • **Groq**
102
- - **LM Studio** • **Ollama** • **OpenAI** • **OpenRouter**
94
+ - **Anthropic** • **Cerebras** • **Chutes.ai** • **Fireworks** • **Gemini**
95
+ - **Groq** • **LM Studio** • **Ollama** • **OpenAI** • **OpenRouter**
103
96
  - **Streamlake** • **Synthetic.new** • **Z.AI** • **Z.AI Coding**
104
97
 
105
- ### **Smart LLM Analysis**
98
+ ### 🧠 **Smart LLM Analysis**
106
99
 
107
100
  - **Understands intent**: Analyzes code structure, logic, and patterns to understand the "why" behind your changes, not just what changed
108
101
  - **Semantic awareness**: Recognizes refactoring, bug fixes, features, and breaking changes to generate contextually appropriate messages
109
102
  - **Intelligent filtering**: Prioritizes meaningful changes while ignoring generated files, dependencies, and artifacts
110
103
 
111
- ### **Multiple Message Formats**
104
+ ### 📝 **Multiple Message Formats**
112
105
 
113
106
  - **One-liner** (-o flag): Single-line commit message following conventional commit format
114
107
  - **Standard** (default): Summary with bullet points explaining implementation details
115
108
  - **Verbose** (-v flag): Comprehensive explanations including motivation, technical approach, and impact analysis
116
109
 
117
- ### **Developer Experience**
110
+ ### 💻 **Developer Experience**
118
111
 
119
112
  - **Interactive feedback**: Regenerate messages with specific requests like `r "make it shorter"` or `r "focus on the bug fix"`
120
113
  - **One-command workflows**: Complete workflows with flags like `gac -ayp` (stage all, auto-confirm, push)
121
114
  - **Git integration**: Respects pre-commit and lefthook hooks, running them before expensive LLM operations
122
115
 
123
- ### **Built-in Security**
116
+ ### 🛡️ **Built-in Security**
124
117
 
125
118
  - **Automatic secret detection**: Scans for API keys, passwords, and tokens before committing
126
119
  - **Interactive protection**: Prompts before committing potentially sensitive data with clear remediation options
@@ -1,7 +1,7 @@
1
1
  gac/__init__.py,sha256=z9yGInqtycFIT3g1ca24r-A3699hKVaRqGUI79wsmMc,415
2
- gac/__version__.py,sha256=yksa_gnQUQ8A-21rG93rnvVwWUiw5gocNqEFu06B9pg,66
3
- gac/ai.py,sha256=-0rIZQCHC7yOEmkLtCLDzeInnrm960hVpiEELi8NM_U,3513
4
- gac/ai_utils.py,sha256=eqrpiBTueCgSOxUC2b4Ei0G4DM1GMt866sJEIhWpytU,7333
2
+ gac/__version__.py,sha256=lC-fukW_E9H6KMI2tR7GB0PL0fk8ddRKnvUgOWsJ5e8,67
3
+ gac/ai.py,sha256=QSGPrtFuzZh4LIjIt2HKVtdZ07xtSQXJRT7fxJ-Fufs,3898
4
+ gac/ai_utils.py,sha256=NBxmYAPLj3YcFHuo3TCJ4kpCtpAxnGJ7aG6_1Af9VOw,7314
5
5
  gac/cli.py,sha256=crUUI6osYtE3QAZ7r6DRlVk9gR3X2PctzS1sssVQ9_g,5070
6
6
  gac/config.py,sha256=n3TkQYBqSKkH68QUM6M7kwSK83ghmItoh0p5ZDFnhHA,1746
7
7
  gac/config_cli.py,sha256=v9nFHZO1RvK9fzHyuUS6SG-BCLHMsdOMDwWamBhVVh4,1608
@@ -9,16 +9,17 @@ gac/constants.py,sha256=8GHB7yeK2CYT0t80-k9N6LvgZPe-StNH3dK3NsUO46c,4977
9
9
  gac/diff_cli.py,sha256=wnVQ9OFGnM0d2Pj9WVjWbo0jxqIuRHVAwmb8wU9Pa3E,5676
10
10
  gac/errors.py,sha256=ysDIVRCd0YQVTOW3Q6YzdolxCdtkoQCAFf3_jrqbjUY,7916
11
11
  gac/git.py,sha256=g6tvph50zV-wrTWrxARYXEpl0NeI8-ffFwHoqhp3fSE,8033
12
- gac/init_cli.py,sha256=wq2MMi1xQrbVTib-5BsVdbPXQkPStB3G3Q8VnSKiKFQ,4740
13
- gac/main.py,sha256=IBzBuZkdEBpaxBLzuRw-LS-6wpOMt8admdKDLzP1gDc,16471
12
+ gac/init_cli.py,sha256=YZAFRWcQ1TeXCGwEEyBeCwai2QYa1U0ErBCE8shkSkU,4804
13
+ gac/main.py,sha256=dJrBSN5rJlbWspLGDx3eUJU4uZFVhvuv7qtgIvF7RH4,14723
14
14
  gac/preprocess.py,sha256=aMxsjGxy9YP752NWjgf0KP5Sn6p8keIJAGlMYr8jDgQ,15373
15
15
  gac/prompt.py,sha256=d_kBXmhf3bDVLyDj8J7AS7GBAxF2jlc8lXoHX3Dzi5k,24255
16
16
  gac/security.py,sha256=15Yp6YR8QC4eECJi1BUCkMteh_veZXUbLL6W8qGcDm4,9920
17
17
  gac/utils.py,sha256=nV42-brIHW_fBg7x855GM8nRrqEBbRzTSweg-GTyGE8,3971
18
- gac/providers/__init__.py,sha256=ejIM5vvmfTp7vfJSNeQQPIEJusOkKTUZpUE7OeWBc9Y,876
18
+ gac/providers/__init__.py,sha256=26G8YJgZh2gK21bNjuDmPOTPeU8kbJz6HmUcvm0TOfw,944
19
19
  gac/providers/anthropic.py,sha256=VK5d1s1PmBNDwh_x7illQ2CIZIHNIYU28btVfizwQPs,2036
20
20
  gac/providers/cerebras.py,sha256=Ik8lhlsliGJVkgDgqlThfpra9tqbdYQZkaC4eNxRd9w,1648
21
21
  gac/providers/chutes.py,sha256=cclJOLuGVIiiaF-9Bs1kH6SSOhEmduGB2zZ86KIaXKw,2617
22
+ gac/providers/fireworks.py,sha256=zsWhf6LMVdtsD9keXRFwgn9lCQigz6VmrDl6vqIVkdI,1688
22
23
  gac/providers/gemini.py,sha256=GZQz6Y9fd5-xk-U4pXn9bXLeBowxDXOYDyWyrtjFurM,2909
23
24
  gac/providers/groq.py,sha256=9v2fAjDa_iRNHFptiUBN8Vt7ZDKkW_JOmIBeYvycD1M,2806
24
25
  gac/providers/lmstudio.py,sha256=R82-f0tWdFfGQxLT6o3Q2tfvYguF7ESUg9DEUHNyrDk,2146
@@ -28,8 +29,8 @@ gac/providers/openrouter.py,sha256=H3ce8JcRUYq1I30lOjGESdX7jfoPkW3mKAYnc2aYfBw,2
28
29
  gac/providers/streamlake.py,sha256=KAA2ZnpuEI5imzvdWVWUhEBHSP0BMnprKXte6CbwBWY,2047
29
30
  gac/providers/synthetic.py,sha256=sRMIJTS9LpcXd9A7qp_ZjZxdqtTKRn9fl1W4YwJZP4c,1855
30
31
  gac/providers/zai.py,sha256=kywhhrCfPBu0rElZyb-iENxQxxpVGykvePuL4xrXlaU,2739
31
- gac-1.9.5.dist-info/METADATA,sha256=NEi3DaMvArxIi6h9UZZ_yVNa6_HCQDaEW1ZOXLFWCGM,7680
32
- gac-1.9.5.dist-info/WHEEL,sha256=qtCwoSJWgHk21S1Kb4ihdzI2rlJ1ZKaIurTj_ngOhyQ,87
33
- gac-1.9.5.dist-info/entry_points.txt,sha256=tdjN-XMmcWfL92swuRAjT62bFLOAwk9bTMRLGP5Z4aI,36
34
- gac-1.9.5.dist-info/licenses/LICENSE,sha256=vOab37NouL1PNs5BswnPayrMCqaN2sqLfMQfqPDrpZg,1103
35
- gac-1.9.5.dist-info/RECORD,,
32
+ gac-1.10.1.dist-info/METADATA,sha256=0dMoJ3LQ9WFGGsnBxfcYcBE4JwNo_qtgHr7oN3z2RJI,7656
33
+ gac-1.10.1.dist-info/WHEEL,sha256=qtCwoSJWgHk21S1Kb4ihdzI2rlJ1ZKaIurTj_ngOhyQ,87
34
+ gac-1.10.1.dist-info/entry_points.txt,sha256=tdjN-XMmcWfL92swuRAjT62bFLOAwk9bTMRLGP5Z4aI,36
35
+ gac-1.10.1.dist-info/licenses/LICENSE,sha256=vOab37NouL1PNs5BswnPayrMCqaN2sqLfMQfqPDrpZg,1103
36
+ gac-1.10.1.dist-info/RECORD,,
File without changes