gac 2.7.3__tar.gz → 3.12.0__tar.gz

This diff represents the content of publicly available package versions that have been released to one of the supported registries. The information contained in this diff is provided for informational purposes only and reflects changes between package versions as they appear in their respective public registries.
Files changed (109) hide show
  1. {gac-2.7.3 → gac-3.12.0}/PKG-INFO +15 -9
  2. {gac-2.7.3 → gac-3.12.0}/README.md +14 -6
  3. {gac-2.7.3 → gac-3.12.0}/pyproject.toml +3 -9
  4. {gac-2.7.3 → gac-3.12.0}/src/gac/__init__.py +4 -6
  5. {gac-2.7.3 → gac-3.12.0}/src/gac/__version__.py +1 -1
  6. {gac-2.7.3 → gac-3.12.0}/src/gac/ai.py +5 -49
  7. {gac-2.7.3 → gac-3.12.0}/src/gac/ai_utils.py +83 -113
  8. gac-3.12.0/src/gac/auth_cli.py +214 -0
  9. {gac-2.7.3 → gac-3.12.0}/src/gac/cli.py +48 -10
  10. gac-3.12.0/src/gac/commit_executor.py +59 -0
  11. gac-3.12.0/src/gac/config.py +123 -0
  12. gac-3.12.0/src/gac/config_cli.py +95 -0
  13. gac-3.12.0/src/gac/constants/__init__.py +34 -0
  14. gac-3.12.0/src/gac/constants/commit.py +63 -0
  15. gac-3.12.0/src/gac/constants/defaults.py +38 -0
  16. gac-3.12.0/src/gac/constants/file_patterns.py +110 -0
  17. gac-3.12.0/src/gac/constants/languages.py +119 -0
  18. {gac-2.7.3 → gac-3.12.0}/src/gac/diff_cli.py +0 -22
  19. {gac-2.7.3 → gac-3.12.0}/src/gac/errors.py +8 -2
  20. {gac-2.7.3 → gac-3.12.0}/src/gac/git.py +53 -6
  21. gac-3.12.0/src/gac/git_state_validator.py +193 -0
  22. gac-3.12.0/src/gac/grouped_commit_workflow.py +474 -0
  23. gac-3.12.0/src/gac/init_cli.py +70 -0
  24. gac-3.12.0/src/gac/interactive_mode.py +179 -0
  25. gac-3.12.0/src/gac/language_cli.py +377 -0
  26. gac-3.12.0/src/gac/main.py +328 -0
  27. gac-2.7.3/src/gac/init_cli.py → gac-3.12.0/src/gac/model_cli.py +169 -222
  28. gac-3.12.0/src/gac/model_identifier.py +70 -0
  29. gac-3.12.0/src/gac/oauth/__init__.py +27 -0
  30. {gac-2.7.3 → gac-3.12.0}/src/gac/oauth/claude_code.py +89 -22
  31. gac-3.12.0/src/gac/oauth/qwen_oauth.py +327 -0
  32. gac-3.12.0/src/gac/oauth/token_store.py +81 -0
  33. gac-3.12.0/src/gac/oauth_retry.py +161 -0
  34. gac-3.12.0/src/gac/postprocess.py +155 -0
  35. gac-3.12.0/src/gac/prompt.py +425 -0
  36. gac-3.12.0/src/gac/prompt_builder.py +88 -0
  37. gac-3.12.0/src/gac/prompt_cli.py +266 -0
  38. gac-3.12.0/src/gac/providers/README.md +437 -0
  39. gac-3.12.0/src/gac/providers/__init__.py +80 -0
  40. gac-3.12.0/src/gac/providers/anthropic.py +17 -0
  41. gac-3.12.0/src/gac/providers/azure_openai.py +57 -0
  42. gac-3.12.0/src/gac/providers/base.py +337 -0
  43. gac-3.12.0/src/gac/providers/cerebras.py +15 -0
  44. gac-3.12.0/src/gac/providers/chutes.py +25 -0
  45. gac-3.12.0/src/gac/providers/claude_code.py +79 -0
  46. gac-3.12.0/src/gac/providers/custom_anthropic.py +103 -0
  47. gac-3.12.0/src/gac/providers/custom_openai.py +44 -0
  48. gac-3.12.0/src/gac/providers/deepseek.py +15 -0
  49. gac-3.12.0/src/gac/providers/error_handler.py +139 -0
  50. gac-3.12.0/src/gac/providers/fireworks.py +15 -0
  51. gac-3.12.0/src/gac/providers/gemini.py +90 -0
  52. gac-3.12.0/src/gac/providers/groq.py +15 -0
  53. gac-3.12.0/src/gac/providers/kimi_coding.py +27 -0
  54. gac-3.12.0/src/gac/providers/lmstudio.py +80 -0
  55. gac-3.12.0/src/gac/providers/minimax.py +15 -0
  56. gac-3.12.0/src/gac/providers/mistral.py +15 -0
  57. gac-3.12.0/src/gac/providers/moonshot.py +15 -0
  58. gac-3.12.0/src/gac/providers/ollama.py +73 -0
  59. gac-3.12.0/src/gac/providers/openai.py +32 -0
  60. gac-3.12.0/src/gac/providers/openrouter.py +21 -0
  61. gac-3.12.0/src/gac/providers/protocol.py +71 -0
  62. gac-3.12.0/src/gac/providers/qwen.py +64 -0
  63. gac-3.12.0/src/gac/providers/registry.py +58 -0
  64. gac-3.12.0/src/gac/providers/replicate.py +156 -0
  65. gac-3.12.0/src/gac/providers/streamlake.py +31 -0
  66. gac-3.12.0/src/gac/providers/synthetic.py +40 -0
  67. gac-3.12.0/src/gac/providers/together.py +15 -0
  68. gac-3.12.0/src/gac/providers/zai.py +31 -0
  69. gac-3.12.0/src/gac/py.typed +0 -0
  70. {gac-2.7.3 → gac-3.12.0}/src/gac/security.py +1 -1
  71. gac-3.12.0/src/gac/templates/__init__.py +1 -0
  72. gac-3.12.0/src/gac/templates/question_generation.txt +60 -0
  73. gac-3.12.0/src/gac/templates/system_prompt.txt +224 -0
  74. gac-3.12.0/src/gac/templates/user_prompt.txt +28 -0
  75. {gac-2.7.3 → gac-3.12.0}/src/gac/utils.py +36 -6
  76. gac-3.12.0/src/gac/workflow_context.py +162 -0
  77. {gac-2.7.3 → gac-3.12.0}/src/gac/workflow_utils.py +89 -6
  78. gac-2.7.3/src/gac/auth_cli.py +0 -69
  79. gac-2.7.3/src/gac/config.py +0 -49
  80. gac-2.7.3/src/gac/config_cli.py +0 -62
  81. gac-2.7.3/src/gac/constants.py +0 -321
  82. gac-2.7.3/src/gac/language_cli.py +0 -253
  83. gac-2.7.3/src/gac/main.py +0 -767
  84. gac-2.7.3/src/gac/oauth/__init__.py +0 -1
  85. gac-2.7.3/src/gac/prompt.py +0 -785
  86. gac-2.7.3/src/gac/providers/__init__.py +0 -46
  87. gac-2.7.3/src/gac/providers/anthropic.py +0 -51
  88. gac-2.7.3/src/gac/providers/cerebras.py +0 -38
  89. gac-2.7.3/src/gac/providers/chutes.py +0 -71
  90. gac-2.7.3/src/gac/providers/claude_code.py +0 -102
  91. gac-2.7.3/src/gac/providers/custom_anthropic.py +0 -133
  92. gac-2.7.3/src/gac/providers/custom_openai.py +0 -99
  93. gac-2.7.3/src/gac/providers/deepseek.py +0 -38
  94. gac-2.7.3/src/gac/providers/fireworks.py +0 -38
  95. gac-2.7.3/src/gac/providers/gemini.py +0 -87
  96. gac-2.7.3/src/gac/providers/groq.py +0 -63
  97. gac-2.7.3/src/gac/providers/lmstudio.py +0 -59
  98. gac-2.7.3/src/gac/providers/minimax.py +0 -38
  99. gac-2.7.3/src/gac/providers/mistral.py +0 -38
  100. gac-2.7.3/src/gac/providers/ollama.py +0 -50
  101. gac-2.7.3/src/gac/providers/openai.py +0 -38
  102. gac-2.7.3/src/gac/providers/openrouter.py +0 -58
  103. gac-2.7.3/src/gac/providers/streamlake.py +0 -51
  104. gac-2.7.3/src/gac/providers/synthetic.py +0 -42
  105. gac-2.7.3/src/gac/providers/together.py +0 -38
  106. gac-2.7.3/src/gac/providers/zai.py +0 -59
  107. {gac-2.7.3 → gac-3.12.0}/.gitignore +0 -0
  108. {gac-2.7.3 → gac-3.12.0}/LICENSE +0 -0
  109. {gac-2.7.3 → gac-3.12.0}/src/gac/preprocess.py +0 -0
@@ -1,6 +1,6 @@
1
1
  Metadata-Version: 2.4
2
2
  Name: gac
3
- Version: 2.7.3
3
+ Version: 3.12.0
4
4
  Summary: LLM-powered Git commit message generator with multi-provider support
5
5
  Project-URL: Homepage, https://github.com/cellwebb/gac
6
6
  Project-URL: Documentation, https://github.com/cellwebb/gac#readme
@@ -22,7 +22,6 @@ Classifier: Programming Language :: Python :: Implementation :: CPython
22
22
  Classifier: Programming Language :: Python :: Implementation :: PyPy
23
23
  Requires-Python: >=3.10
24
24
  Requires-Dist: click>=8.3.0
25
- Requires-Dist: halo
26
25
  Requires-Dist: httpcore>=1.0.9
27
26
  Requires-Dist: httpx>=0.28.0
28
27
  Requires-Dist: prompt-toolkit>=3.0.36
@@ -30,7 +29,6 @@ Requires-Dist: pydantic>=2.12.0
30
29
  Requires-Dist: python-dotenv>=1.1.1
31
30
  Requires-Dist: questionary
32
31
  Requires-Dist: rich>=14.1.0
33
- Requires-Dist: tiktoken>=0.12.0
34
32
  Provides-Extra: dev
35
33
  Requires-Dist: build; extra == 'dev'
36
34
  Requires-Dist: codecov; extra == 'dev'
@@ -48,7 +46,7 @@ Description-Content-Type: text/markdown
48
46
  # 🚀 Git Auto Commit (gac)
49
47
 
50
48
  [![PyPI version](https://img.shields.io/pypi/v/gac.svg)](https://pypi.org/project/gac/)
51
- [![Python](https://img.shields.io/badge/python-3.10%20|%203.11%20|%203.12%20|%203.13%20|%203.14-blue.svg)](https://www.python.org/downloads/)
49
+ [![Python](https://img.shields.io/badge/python-3.10--3.14-blue.svg)](https://www.python.org/downloads/)
52
50
  [![Build Status](https://github.com/cellwebb/gac/actions/workflows/ci.yml/badge.svg)](https://github.com/cellwebb/gac/actions)
53
51
  [![codecov](https://codecov.io/gh/cellwebb/gac/branch/main/graph/badge.svg)](https://app.codecov.io/gh/cellwebb/gac)
54
52
  [![Ruff](https://img.shields.io/endpoint?url=https://raw.githubusercontent.com/astral-sh/ruff/main/assets/badge/v2.json)](https://github.com/astral-sh/ruff)
@@ -105,11 +103,12 @@ uv tool upgrade gac
105
103
 
106
104
  ## Key Features
107
105
 
108
- ### 🌐 **Supported Providers**
106
+ ### 🌐 **25+ Supported Providers**
109
107
 
110
- - **Anthropic** • **Cerebras** • **Chutes.ai** • **Claude Code** **DeepSeek**
111
- - **Fireworks** • **Gemini** • **Groq** • **LM Studio** • **MiniMax** **Mistral** • **Ollama**
112
- - **OpenAI** • **OpenRouter** • **Streamlake** • **Synthetic.new** • **Together AI**
108
+ - **Anthropic** • **Azure OpenAI** • **Cerebras** • **Chutes.ai** • **Claude Code (OAuth)**
109
+ - **DeepSeek** • **Fireworks** • **Gemini** • **Groq** • **Kimi for Coding** • **LM Studio**
110
+ - **MiniMax.io** • **Mistral AI** • **Moonshot AI** • **Ollama** • **OpenAI** **OpenRouter**
111
+ - **Qwen.ai (OAuth)** • **Replicate** • **Streamlake** • **Synthetic.new** • **Together AI**
113
112
  - **Z.AI** • **Z.AI Coding** • **Custom Endpoints (Anthropic/OpenAI)**
114
113
 
115
114
  ### 🧠 **Smart LLM Analysis**
@@ -130,11 +129,12 @@ uv tool upgrade gac
130
129
  - **25+ languages**: Generate commit messages in English, Chinese, Japanese, Korean, Spanish, French, German, and 20+ more languages
131
130
  - **Flexible translation**: Choose to keep conventional commit prefixes in English for tool compatibility, or fully translate them
132
131
  - **Multiple workflows**: Set a default language with `gac language`, or use `-l <language>` flag for one-time overrides
133
- - **Native script support**: Full support for non-Latin scripts including CJK, Cyrillic, Arabic, and more
132
+ - **Native script support**: Full support for non-Latin scripts including CJK, Cyrillic, Thai, and more
134
133
 
135
134
  ### 💻 **Developer Experience**
136
135
 
137
136
  - **Interactive feedback**: Type `r` to reroll, `e` to edit in-place with vi/emacs keybindings, or directly type your feedback like `make it shorter` or `focus on the bug fix`
137
+ - **Interactive questioning**: Use `--interactive` (`-i`) to answer targeted questions about your changes for more contextual commit messages
138
138
  - **One-command workflows**: Complete workflows with flags like `gac -ayp` (stage all, auto-confirm, push)
139
139
  - **Git integration**: Respects pre-commit and lefthook hooks, running them before expensive LLM operations
140
140
 
@@ -171,6 +171,7 @@ gac
171
171
  | `gac -v` | Verbose format with Motivation, Technical Approach, and Impact Analysis |
172
172
  | `gac -h "hint"` | Add context for LLM (e.g., `gac -h "bug fix"`) |
173
173
  | `gac -s` | Include scope (e.g., feat(auth):) |
174
+ | `gac -i` | Ask questions about changes for better context |
174
175
  | `gac -p` | Commit and push |
175
176
 
176
177
  ### Power User Examples
@@ -188,6 +189,9 @@ gac -o
188
189
  # Group changes into logically related commits
189
190
  gac -ag
190
191
 
192
+ # Interactive mode with verbose output for detailed explanations
193
+ gac -iv
194
+
191
195
  # Debug what the LLM sees
192
196
  gac --show-prompt
193
197
 
@@ -257,6 +261,8 @@ Track real-time installation metrics and package download statistics.
257
261
  ## Getting Help
258
262
 
259
263
  - **Full documentation**: [docs/USAGE.md](docs/en/USAGE.md) - Complete CLI reference
264
+ - **Claude Code OAuth**: [docs/CLAUDE_CODE.md](docs/en/CLAUDE_CODE.md) - Claude Code setup and authentication
265
+ - **Qwen.ai OAuth**: [docs/QWEN.md](docs/en/QWEN.md) - Qwen.ai setup and authentication
260
266
  - **Custom prompts**: [docs/CUSTOM_SYSTEM_PROMPTS.md](docs/en/CUSTOM_SYSTEM_PROMPTS.md) - Customize commit message style
261
267
  - **Troubleshooting**: [docs/TROUBLESHOOTING.md](docs/en/TROUBLESHOOTING.md) - Common issues and solutions
262
268
  - **Contributing**: [docs/CONTRIBUTING.md](docs/en/CONTRIBUTING.md) - Development setup and guidelines
@@ -6,7 +6,7 @@
6
6
  # 🚀 Git Auto Commit (gac)
7
7
 
8
8
  [![PyPI version](https://img.shields.io/pypi/v/gac.svg)](https://pypi.org/project/gac/)
9
- [![Python](https://img.shields.io/badge/python-3.10%20|%203.11%20|%203.12%20|%203.13%20|%203.14-blue.svg)](https://www.python.org/downloads/)
9
+ [![Python](https://img.shields.io/badge/python-3.10--3.14-blue.svg)](https://www.python.org/downloads/)
10
10
  [![Build Status](https://github.com/cellwebb/gac/actions/workflows/ci.yml/badge.svg)](https://github.com/cellwebb/gac/actions)
11
11
  [![codecov](https://codecov.io/gh/cellwebb/gac/branch/main/graph/badge.svg)](https://app.codecov.io/gh/cellwebb/gac)
12
12
  [![Ruff](https://img.shields.io/endpoint?url=https://raw.githubusercontent.com/astral-sh/ruff/main/assets/badge/v2.json)](https://github.com/astral-sh/ruff)
@@ -63,11 +63,12 @@ uv tool upgrade gac
63
63
 
64
64
  ## Key Features
65
65
 
66
- ### 🌐 **Supported Providers**
66
+ ### 🌐 **25+ Supported Providers**
67
67
 
68
- - **Anthropic** • **Cerebras** • **Chutes.ai** • **Claude Code** **DeepSeek**
69
- - **Fireworks** • **Gemini** • **Groq** • **LM Studio** • **MiniMax** **Mistral** • **Ollama**
70
- - **OpenAI** • **OpenRouter** • **Streamlake** • **Synthetic.new** • **Together AI**
68
+ - **Anthropic** • **Azure OpenAI** • **Cerebras** • **Chutes.ai** • **Claude Code (OAuth)**
69
+ - **DeepSeek** • **Fireworks** • **Gemini** • **Groq** • **Kimi for Coding** • **LM Studio**
70
+ - **MiniMax.io** • **Mistral AI** • **Moonshot AI** • **Ollama** • **OpenAI** **OpenRouter**
71
+ - **Qwen.ai (OAuth)** • **Replicate** • **Streamlake** • **Synthetic.new** • **Together AI**
71
72
  - **Z.AI** • **Z.AI Coding** • **Custom Endpoints (Anthropic/OpenAI)**
72
73
 
73
74
  ### 🧠 **Smart LLM Analysis**
@@ -88,11 +89,12 @@ uv tool upgrade gac
88
89
  - **25+ languages**: Generate commit messages in English, Chinese, Japanese, Korean, Spanish, French, German, and 20+ more languages
89
90
  - **Flexible translation**: Choose to keep conventional commit prefixes in English for tool compatibility, or fully translate them
90
91
  - **Multiple workflows**: Set a default language with `gac language`, or use `-l <language>` flag for one-time overrides
91
- - **Native script support**: Full support for non-Latin scripts including CJK, Cyrillic, Arabic, and more
92
+ - **Native script support**: Full support for non-Latin scripts including CJK, Cyrillic, Thai, and more
92
93
 
93
94
  ### 💻 **Developer Experience**
94
95
 
95
96
  - **Interactive feedback**: Type `r` to reroll, `e` to edit in-place with vi/emacs keybindings, or directly type your feedback like `make it shorter` or `focus on the bug fix`
97
+ - **Interactive questioning**: Use `--interactive` (`-i`) to answer targeted questions about your changes for more contextual commit messages
96
98
  - **One-command workflows**: Complete workflows with flags like `gac -ayp` (stage all, auto-confirm, push)
97
99
  - **Git integration**: Respects pre-commit and lefthook hooks, running them before expensive LLM operations
98
100
 
@@ -129,6 +131,7 @@ gac
129
131
  | `gac -v` | Verbose format with Motivation, Technical Approach, and Impact Analysis |
130
132
  | `gac -h "hint"` | Add context for LLM (e.g., `gac -h "bug fix"`) |
131
133
  | `gac -s` | Include scope (e.g., feat(auth):) |
134
+ | `gac -i` | Ask questions about changes for better context |
132
135
  | `gac -p` | Commit and push |
133
136
 
134
137
  ### Power User Examples
@@ -146,6 +149,9 @@ gac -o
146
149
  # Group changes into logically related commits
147
150
  gac -ag
148
151
 
152
+ # Interactive mode with verbose output for detailed explanations
153
+ gac -iv
154
+
149
155
  # Debug what the LLM sees
150
156
  gac --show-prompt
151
157
 
@@ -215,6 +221,8 @@ Track real-time installation metrics and package download statistics.
215
221
  ## Getting Help
216
222
 
217
223
  - **Full documentation**: [docs/USAGE.md](docs/en/USAGE.md) - Complete CLI reference
224
+ - **Claude Code OAuth**: [docs/CLAUDE_CODE.md](docs/en/CLAUDE_CODE.md) - Claude Code setup and authentication
225
+ - **Qwen.ai OAuth**: [docs/QWEN.md](docs/en/QWEN.md) - Qwen.ai setup and authentication
218
226
  - **Custom prompts**: [docs/CUSTOM_SYSTEM_PROMPTS.md](docs/en/CUSTOM_SYSTEM_PROMPTS.md) - Customize commit message style
219
227
  - **Troubleshooting**: [docs/TROUBLESHOOTING.md](docs/en/TROUBLESHOOTING.md) - Common issues and solutions
220
228
  - **Contributing**: [docs/CONTRIBUTING.md](docs/en/CONTRIBUTING.md) - Development setup and guidelines
@@ -29,8 +29,7 @@ dependencies = [
29
29
  "httpx>=0.28.0",
30
30
  "httpcore>=1.0.9", # Required for Python 3.14 compatibility
31
31
 
32
- # Token counting (OpenAI models)
33
- "tiktoken>=0.12.0",
32
+
34
33
 
35
34
  # Core functionality
36
35
  "pydantic>=2.12.0",
@@ -38,7 +37,6 @@ dependencies = [
38
37
 
39
38
  # CLI and formatting
40
39
  "click>=8.3.0",
41
- "halo",
42
40
  "questionary",
43
41
  "rich>=14.1.0",
44
42
  "prompt_toolkit>=3.0.36",
@@ -203,8 +201,8 @@ addopts = "-m 'not integration'"
203
201
  python_version = "3.10"
204
202
  warn_return_any = true
205
203
  warn_unused_configs = true
206
- disallow_untyped_defs = false
207
- disallow_incomplete_defs = false
204
+ disallow_untyped_defs = true
205
+ disallow_incomplete_defs = true
208
206
  check_untyped_defs = true
209
207
  no_implicit_optional = true
210
208
  warn_redundant_casts = true
@@ -218,10 +216,6 @@ show_error_codes = true
218
216
  module = "gac.providers.*"
219
217
  warn_return_any = false
220
218
 
221
- [[tool.mypy.overrides]]
222
- module = "halo"
223
- ignore_missing_imports = true
224
-
225
219
  [template.plugins.default]
226
220
  tests = true
227
221
  src-layout = true
@@ -1,15 +1,13 @@
1
1
  """Git Auto Commit (gac) - Generate commit messages using AI."""
2
2
 
3
+ from gac import init_cli
3
4
  from gac.__version__ import __version__
4
5
  from gac.ai import generate_commit_message
5
- from gac.git import get_staged_files, push_changes
6
- from gac.prompt import build_prompt, clean_commit_message
6
+ from gac.prompt import build_prompt
7
7
 
8
8
  __all__ = [
9
9
  "__version__",
10
- "generate_commit_message",
11
10
  "build_prompt",
12
- "clean_commit_message",
13
- "get_staged_files",
14
- "push_changes",
11
+ "generate_commit_message",
12
+ "init_cli",
15
13
  ]
@@ -1,3 +1,3 @@
1
1
  """Version information for gac package."""
2
2
 
3
- __version__ = "2.7.3"
3
+ __version__ = "3.12.0"
@@ -9,29 +9,7 @@ import logging
9
9
  from gac.ai_utils import generate_with_retries
10
10
  from gac.constants import EnvDefaults
11
11
  from gac.errors import AIError
12
- from gac.providers import (
13
- call_anthropic_api,
14
- call_cerebras_api,
15
- call_chutes_api,
16
- call_claude_code_api,
17
- call_custom_anthropic_api,
18
- call_custom_openai_api,
19
- call_deepseek_api,
20
- call_fireworks_api,
21
- call_gemini_api,
22
- call_groq_api,
23
- call_lmstudio_api,
24
- call_minimax_api,
25
- call_mistral_api,
26
- call_ollama_api,
27
- call_openai_api,
28
- call_openrouter_api,
29
- call_streamlake_api,
30
- call_synthetic_api,
31
- call_together_api,
32
- call_zai_api,
33
- call_zai_coding_api,
34
- )
12
+ from gac.providers import PROVIDER_REGISTRY
35
13
 
36
14
  logger = logging.getLogger(__name__)
37
15
 
@@ -45,6 +23,7 @@ def generate_commit_message(
45
23
  quiet: bool = False,
46
24
  is_group: bool = False,
47
25
  skip_success_message: bool = False,
26
+ task_description: str = "commit message",
48
27
  ) -> str:
49
28
  """Generate a commit message using direct API calls to AI providers.
50
29
 
@@ -85,35 +64,10 @@ def generate_commit_message(
85
64
  {"role": "user", "content": user_prompt},
86
65
  ]
87
66
 
88
- # Provider functions mapping
89
- provider_funcs = {
90
- "anthropic": call_anthropic_api,
91
- "cerebras": call_cerebras_api,
92
- "claude-code": call_claude_code_api,
93
- "chutes": call_chutes_api,
94
- "custom-anthropic": call_custom_anthropic_api,
95
- "custom-openai": call_custom_openai_api,
96
- "deepseek": call_deepseek_api,
97
- "fireworks": call_fireworks_api,
98
- "gemini": call_gemini_api,
99
- "groq": call_groq_api,
100
- "lm-studio": call_lmstudio_api,
101
- "minimax": call_minimax_api,
102
- "mistral": call_mistral_api,
103
- "ollama": call_ollama_api,
104
- "openai": call_openai_api,
105
- "openrouter": call_openrouter_api,
106
- "streamlake": call_streamlake_api,
107
- "synthetic": call_synthetic_api,
108
- "together": call_together_api,
109
- "zai": call_zai_api,
110
- "zai-coding": call_zai_coding_api,
111
- }
112
-
113
67
  # Generate the commit message using centralized retry logic
114
68
  try:
115
69
  return generate_with_retries(
116
- provider_funcs=provider_funcs,
70
+ provider_funcs=PROVIDER_REGISTRY,
117
71
  model=model,
118
72
  messages=messages,
119
73
  temperature=temperature,
@@ -122,6 +76,7 @@ def generate_commit_message(
122
76
  quiet=quiet,
123
77
  is_group=is_group,
124
78
  skip_success_message=skip_success_message,
79
+ task_description=task_description,
125
80
  )
126
81
  except AIError:
127
82
  # Re-raise AIError exceptions as-is to preserve error classification
@@ -150,4 +105,5 @@ def generate_grouped_commits(
150
105
  quiet=quiet,
151
106
  is_group=True,
152
107
  skip_success_message=skip_success_message,
108
+ task_description="commit message",
153
109
  )
@@ -3,44 +3,35 @@
3
3
  This module provides utility functions that support the AI provider implementations.
4
4
  """
5
5
 
6
+ import json
6
7
  import logging
7
8
  import os
8
9
  import time
9
- from functools import lru_cache
10
- from typing import Any
10
+ from collections.abc import Callable
11
+ from typing import Any, cast
11
12
 
12
- import tiktoken
13
- from halo import Halo
13
+ from rich.console import Console
14
+ from rich.status import Status
14
15
 
15
- from gac.constants import EnvDefaults, Utility
16
16
  from gac.errors import AIError
17
+ from gac.oauth import QwenOAuthProvider, refresh_token_if_expired
18
+ from gac.oauth.token_store import TokenStore
19
+ from gac.providers import SUPPORTED_PROVIDERS
17
20
 
18
21
  logger = logging.getLogger(__name__)
19
-
20
-
21
- @lru_cache(maxsize=1)
22
- def _should_skip_tiktoken_counting() -> bool:
23
- """Return True when token counting should avoid tiktoken calls entirely."""
24
- value = os.getenv("GAC_NO_TIKTOKEN", str(EnvDefaults.NO_TIKTOKEN))
25
- return value.lower() in ("true", "1", "yes", "on")
22
+ console = Console()
26
23
 
27
24
 
28
25
  def count_tokens(content: str | list[dict[str, str]] | dict[str, Any], model: str) -> int:
29
- """Count tokens in content using the model's tokenizer."""
26
+ """Count tokens in content using character-based estimation (1 token per 3.4 characters)."""
30
27
  text = extract_text_content(content)
31
28
  if not text:
32
29
  return 0
33
30
 
34
- if _should_skip_tiktoken_counting():
35
- return len(text) // 4
36
-
37
- try:
38
- encoding = get_encoding(model)
39
- return len(encoding.encode(text))
40
- except Exception as e:
41
- logger.error(f"Error counting tokens: {e}")
42
- # Fallback to rough estimation (4 chars per token on average)
43
- return len(text) // 4
31
+ # Use simple character-based estimation: 1 token per 3.4 characters (rounded)
32
+ result = round(len(text) / 3.4)
33
+ # Ensure at least 1 token for non-empty text
34
+ return result if result > 0 else 1
44
35
 
45
36
 
46
37
  def extract_text_content(content: str | list[dict[str, str]] | dict[str, Any]) -> str:
@@ -50,53 +41,12 @@ def extract_text_content(content: str | list[dict[str, str]] | dict[str, Any]) -
50
41
  elif isinstance(content, list):
51
42
  return "\n".join(msg["content"] for msg in content if isinstance(msg, dict) and "content" in msg)
52
43
  elif isinstance(content, dict) and "content" in content:
53
- return content["content"] # type: ignore[no-any-return]
44
+ return cast(str, content["content"])
54
45
  return ""
55
46
 
56
47
 
57
- @lru_cache(maxsize=1)
58
- def get_encoding(model: str) -> tiktoken.Encoding:
59
- """Get the appropriate encoding for a given model."""
60
- provider, model_name = model.split(":", 1) if ":" in model else (None, model)
61
-
62
- if provider != "openai":
63
- return tiktoken.get_encoding(Utility.DEFAULT_ENCODING)
64
-
65
- try:
66
- return tiktoken.encoding_for_model(model_name)
67
- except KeyError:
68
- # Fall back to default encoding if model not found
69
- return tiktoken.get_encoding(Utility.DEFAULT_ENCODING)
70
- except Exception:
71
- # If there are any network/SSL issues, fall back to default encoding
72
- return tiktoken.get_encoding(Utility.DEFAULT_ENCODING)
73
-
74
-
75
- def _classify_error(error_str: str) -> str:
76
- """Classify error types based on error message content."""
77
- error_str = error_str.lower()
78
-
79
- if (
80
- "api key" in error_str
81
- or "unauthorized" in error_str
82
- or "authentication" in error_str
83
- or "invalid api key" in error_str
84
- ):
85
- return "authentication"
86
- elif "timeout" in error_str or "timed out" in error_str or "request timeout" in error_str:
87
- return "timeout"
88
- elif "rate limit" in error_str or "too many requests" in error_str or "rate limit exceeded" in error_str:
89
- return "rate_limit"
90
- elif "connect" in error_str or "network" in error_str or "network connection failed" in error_str:
91
- return "connection"
92
- elif "model" in error_str or "not found" in error_str or "model not found" in error_str:
93
- return "model"
94
- else:
95
- return "unknown"
96
-
97
-
98
48
  def generate_with_retries(
99
- provider_funcs: dict,
49
+ provider_funcs: dict[str, Callable[..., str]],
100
50
  model: str,
101
51
  messages: list[dict[str, str]],
102
52
  temperature: float,
@@ -105,6 +55,7 @@ def generate_with_retries(
105
55
  quiet: bool = False,
106
56
  is_group: bool = False,
107
57
  skip_success_message: bool = False,
58
+ task_description: str = "commit message",
108
59
  ) -> str:
109
60
  """Generate content with retry logic using direct API calls."""
110
61
  # Parse model string to determine provider and actual model
@@ -114,51 +65,74 @@ def generate_with_retries(
114
65
  provider, model_name = model.split(":", 1)
115
66
 
116
67
  # Validate provider
117
- supported_providers = [
118
- "anthropic",
119
- "cerebras",
120
- "chutes",
121
- "claude-code",
122
- "deepseek",
123
- "fireworks",
124
- "gemini",
125
- "groq",
126
- "lm-studio",
127
- "minimax",
128
- "mistral",
129
- "ollama",
130
- "openai",
131
- "openrouter",
132
- "streamlake",
133
- "synthetic",
134
- "together",
135
- "zai",
136
- "zai-coding",
137
- "custom-anthropic",
138
- "custom-openai",
139
- ]
140
- if provider not in supported_providers:
141
- raise AIError.model_error(f"Unsupported provider: {provider}. Supported providers: {supported_providers}")
68
+ if provider not in SUPPORTED_PROVIDERS:
69
+ raise AIError.model_error(f"Unsupported provider: {provider}. Supported providers: {SUPPORTED_PROVIDERS}")
142
70
 
143
71
  if not messages:
144
72
  raise AIError.model_error("No messages provided for AI generation")
145
73
 
74
+ # Load Claude Code token from TokenStore if needed
75
+ if provider == "claude-code":
76
+ # Check token expiry and refresh if needed
77
+ if not refresh_token_if_expired(quiet=True):
78
+ raise AIError.authentication_error(
79
+ "Claude Code token not found or expired. Please authenticate with 'gac auth claude-code login'."
80
+ )
81
+
82
+ # Load the (possibly refreshed) token
83
+ token_store = TokenStore()
84
+ token_data = token_store.get_token("claude-code")
85
+ if token_data and "access_token" in token_data:
86
+ os.environ["CLAUDE_CODE_ACCESS_TOKEN"] = token_data["access_token"]
87
+ else:
88
+ raise AIError.authentication_error(
89
+ "Claude Code token not found. Please authenticate with 'gac auth claude-code login'."
90
+ )
91
+
92
+ # Check Qwen OAuth token expiry and refresh if needed
93
+ if provider == "qwen":
94
+ oauth_provider = QwenOAuthProvider(TokenStore())
95
+ token = oauth_provider.get_token()
96
+ if not token:
97
+ if not quiet:
98
+ console.print("[yellow]⚠ Qwen authentication not found or expired[/yellow]")
99
+ console.print("[cyan]🔐 Starting automatic authentication...[/cyan]")
100
+ try:
101
+ oauth_provider.initiate_auth(open_browser=True)
102
+ token = oauth_provider.get_token()
103
+ if not token:
104
+ raise AIError.authentication_error(
105
+ "Qwen authentication failed. Run 'gac auth qwen login' to authenticate manually."
106
+ )
107
+ if not quiet:
108
+ console.print("[green]✓ Authentication successful![/green]\n")
109
+ except AIError:
110
+ raise
111
+ except (ValueError, KeyError, json.JSONDecodeError, ConnectionError, OSError) as e:
112
+ raise AIError.authentication_error(
113
+ f"Qwen authentication failed: {e}. Run 'gac auth qwen login' to authenticate manually."
114
+ ) from e
115
+
146
116
  # Set up spinner
147
- message_type = "commit messages" if is_group else "commit message"
117
+ if is_group:
118
+ message_type = f"grouped {task_description}s"
119
+ else:
120
+ message_type = task_description
121
+
148
122
  if quiet:
149
123
  spinner = None
150
124
  else:
151
- spinner = Halo(text=f"Generating {message_type} with {provider} {model_name}...", spinner="dots")
125
+ spinner = Status(f"Generating {message_type} with {provider} {model_name}...")
152
126
  spinner.start()
153
127
 
154
- last_exception = None
128
+ last_exception: Exception | None = None
155
129
  last_error_type = "unknown"
156
130
 
157
131
  for attempt in range(max_retries):
158
132
  try:
159
133
  if not quiet and not skip_success_message and attempt > 0:
160
134
  if spinner:
161
- spinner.text = f"Retry {attempt + 1}/{max_retries} with {provider} {model_name}..."
135
+ spinner.update(f"Retry {attempt + 1}/{max_retries} with {provider} {model_name}...")
162
136
  logger.info(f"Retry attempt {attempt + 1}/{max_retries}")
163
137
 
164
138
  # Call the appropriate provider function
@@ -172,54 +146,50 @@ def generate_with_retries(
172
146
  if skip_success_message:
173
147
  spinner.stop() # Stop spinner without showing success/failure
174
148
  else:
175
- spinner.succeed(f"Generated {message_type} with {provider} {model_name}")
149
+ spinner.stop()
150
+ console.print(f"✓ Generated {message_type} with {provider} {model_name}")
176
151
 
177
152
  if content is not None and content.strip():
178
- return content.strip() # type: ignore[no-any-return]
153
+ return content.strip()
179
154
  else:
180
155
  logger.warning(f"Empty or None content received from {provider} {model_name}: {repr(content)}")
181
156
  raise AIError.model_error("Empty response from AI model")
182
157
 
183
- except Exception as e:
158
+ except AIError as e:
184
159
  last_exception = e
185
- error_type = _classify_error(str(e))
160
+ error_type = e.error_type
186
161
  last_error_type = error_type
187
162
 
188
163
  # For authentication and model errors, don't retry
189
164
  if error_type in ["authentication", "model"]:
190
165
  if spinner and not skip_success_message:
191
- spinner.fail(f"Failed to generate {message_type} with {provider} {model_name}")
192
-
193
- # Create the appropriate error type based on classification
194
- if error_type == "authentication":
195
- raise AIError.authentication_error(f"AI generation failed: {str(e)}") from e
196
- elif error_type == "model":
197
- raise AIError.model_error(f"AI generation failed: {str(e)}") from e
166
+ spinner.stop()
167
+ console.print(f"✗ Failed to generate {message_type} with {provider} {model_name}")
168
+ raise
198
169
 
199
170
  if attempt < max_retries - 1:
200
171
  # Exponential backoff
201
172
  wait_time = 2**attempt
202
173
  if not quiet and not skip_success_message:
203
174
  if attempt == 0:
204
- logger.warning(f"AI generation failed, retrying in {wait_time}s: {str(e)}")
175
+ logger.warning(f"AI generation failed, retrying in {wait_time}s: {e}")
205
176
  else:
206
- logger.warning(
207
- f"AI generation failed (attempt {attempt + 1}), retrying in {wait_time}s: {str(e)}"
208
- )
177
+ logger.warning(f"AI generation failed (attempt {attempt + 1}), retrying in {wait_time}s: {e}")
209
178
 
210
179
  if spinner and not skip_success_message:
211
180
  for i in range(wait_time, 0, -1):
212
- spinner.text = f"Retry {attempt + 1}/{max_retries} in {i}s..."
181
+ spinner.update(f"Retry {attempt + 1}/{max_retries} in {i}s...")
213
182
  time.sleep(1)
214
183
  else:
215
184
  time.sleep(wait_time)
216
185
  else:
217
186
  num_retries = max_retries
218
187
  retry_word = "retry" if num_retries == 1 else "retries"
219
- logger.error(f"AI generation failed after {num_retries} {retry_word}: {str(e)}")
188
+ logger.error(f"AI generation failed after {num_retries} {retry_word}: {e}")
220
189
 
221
190
  if spinner and not skip_success_message:
222
- spinner.fail(f"Failed to generate {message_type} with {provider} {model_name}")
191
+ spinner.stop()
192
+ console.print(f"✗ Failed to generate {message_type} with {provider} {model_name}")
223
193
 
224
194
  # If we get here, all retries failed - use the last classified error type
225
195
  num_retries = max_retries