git-llm-tool 0.1.12__py3-none-any.whl
This diff represents the content of publicly available package versions that have been released to one of the supported registries. The information contained in this diff is provided for informational purposes only and reflects changes between package versions as they appear in their respective public registries.
- git_llm_tool/__init__.py +5 -0
- git_llm_tool/__main__.py +6 -0
- git_llm_tool/cli.py +167 -0
- git_llm_tool/commands/__init__.py +1 -0
- git_llm_tool/commands/changelog_cmd.py +189 -0
- git_llm_tool/commands/commit_cmd.py +134 -0
- git_llm_tool/core/__init__.py +1 -0
- git_llm_tool/core/config.py +352 -0
- git_llm_tool/core/diff_optimizer.py +206 -0
- git_llm_tool/core/exceptions.py +26 -0
- git_llm_tool/core/git_helper.py +250 -0
- git_llm_tool/core/jira_helper.py +238 -0
- git_llm_tool/core/rate_limiter.py +136 -0
- git_llm_tool/core/smart_chunker.py +262 -0
- git_llm_tool/core/token_counter.py +169 -0
- git_llm_tool/providers/__init__.py +21 -0
- git_llm_tool/providers/anthropic_langchain.py +42 -0
- git_llm_tool/providers/azure_openai_langchain.py +59 -0
- git_llm_tool/providers/base.py +203 -0
- git_llm_tool/providers/factory.py +85 -0
- git_llm_tool/providers/gemini_langchain.py +57 -0
- git_llm_tool/providers/langchain_base.py +608 -0
- git_llm_tool/providers/ollama_langchain.py +45 -0
- git_llm_tool/providers/openai_langchain.py +42 -0
- git_llm_tool-0.1.12.dist-info/LICENSE +21 -0
- git_llm_tool-0.1.12.dist-info/METADATA +645 -0
- git_llm_tool-0.1.12.dist-info/RECORD +29 -0
- git_llm_tool-0.1.12.dist-info/WHEEL +4 -0
- git_llm_tool-0.1.12.dist-info/entry_points.txt +3 -0
|
@@ -0,0 +1,42 @@
|
|
|
1
|
+
"""OpenAI LangChain provider implementation."""
|
|
2
|
+
|
|
3
|
+
from langchain_openai import ChatOpenAI
|
|
4
|
+
from langchain_core.language_models import BaseLanguageModel
|
|
5
|
+
|
|
6
|
+
from git_llm_tool.core.config import AppConfig
|
|
7
|
+
from git_llm_tool.core.exceptions import ApiError
|
|
8
|
+
from git_llm_tool.providers.langchain_base import LangChainProvider
|
|
9
|
+
|
|
10
|
+
|
|
11
|
+
class OpenAiLangChainProvider(LangChainProvider):
|
|
12
|
+
"""OpenAI provider using LangChain with intelligent chunking support."""
|
|
13
|
+
|
|
14
|
+
def _create_llm(self) -> BaseLanguageModel:
|
|
15
|
+
"""Create OpenAI LangChain LLM instance."""
|
|
16
|
+
# Get API key
|
|
17
|
+
api_key = self.config.llm.api_keys.get("openai")
|
|
18
|
+
if not api_key:
|
|
19
|
+
raise ApiError("OpenAI API key not found in configuration")
|
|
20
|
+
|
|
21
|
+
# Determine model
|
|
22
|
+
model = self.config.llm.default_model
|
|
23
|
+
if not model.startswith(("gpt-", "o1-")):
|
|
24
|
+
# Fallback to GPT-4o if model doesn't look like OpenAI model
|
|
25
|
+
model = "gpt-4o"
|
|
26
|
+
|
|
27
|
+
try:
|
|
28
|
+
# Create LangChain OpenAI instance
|
|
29
|
+
return ChatOpenAI(
|
|
30
|
+
api_key=api_key,
|
|
31
|
+
model=model,
|
|
32
|
+
temperature=0.7,
|
|
33
|
+
max_tokens=500, # Increased for better commit messages
|
|
34
|
+
# LangChain will handle retries and error handling automatically
|
|
35
|
+
)
|
|
36
|
+
|
|
37
|
+
except Exception as e:
|
|
38
|
+
raise ApiError(f"Failed to create OpenAI LangChain instance: {e}")
|
|
39
|
+
|
|
40
|
+
def __str__(self) -> str:
|
|
41
|
+
"""String representation for debugging."""
|
|
42
|
+
return f"OpenAiLangChainProvider(model={self.llm.model_name})"
|
|
@@ -0,0 +1,21 @@
|
|
|
1
|
+
MIT License
|
|
2
|
+
|
|
3
|
+
Copyright (c) 2024 skyler-gogolook
|
|
4
|
+
|
|
5
|
+
Permission is hereby granted, free of charge, to any person obtaining a copy
|
|
6
|
+
of this software and associated documentation files (the "Software"), to deal
|
|
7
|
+
in the Software without restriction, including without limitation the rights
|
|
8
|
+
to use, copy, modify, merge, publish, distribute, sublicense, and/or sell
|
|
9
|
+
copies of the Software, and to permit persons to whom the Software is
|
|
10
|
+
furnished to do so, subject to the following conditions:
|
|
11
|
+
|
|
12
|
+
The above copyright notice and this permission notice shall be included in all
|
|
13
|
+
copies or substantial portions of the Software.
|
|
14
|
+
|
|
15
|
+
THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
|
|
16
|
+
IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
|
|
17
|
+
FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE
|
|
18
|
+
AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER
|
|
19
|
+
LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM,
|
|
20
|
+
OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE
|
|
21
|
+
SOFTWARE.
|
|
@@ -0,0 +1,645 @@
|
|
|
1
|
+
Metadata-Version: 2.3
|
|
2
|
+
Name: git-llm-tool
|
|
3
|
+
Version: 0.1.12
|
|
4
|
+
Summary: AI-powered git commit message and changelog generator
|
|
5
|
+
License: MIT
|
|
6
|
+
Keywords: git,commit,llm,ai,automation,jira,conventional-commits
|
|
7
|
+
Author: skyler-gogolook
|
|
8
|
+
Author-email: skyler.lo@gogolook.com
|
|
9
|
+
Requires-Python: >=3.12,<4.0
|
|
10
|
+
Classifier: Development Status :: 3 - Alpha
|
|
11
|
+
Classifier: Intended Audience :: Developers
|
|
12
|
+
Classifier: License :: OSI Approved :: MIT License
|
|
13
|
+
Classifier: Operating System :: OS Independent
|
|
14
|
+
Classifier: Programming Language :: Python :: 3
|
|
15
|
+
Classifier: Programming Language :: Python :: 3.12
|
|
16
|
+
Classifier: Programming Language :: Python :: 3.13
|
|
17
|
+
Classifier: Topic :: Software Development :: Libraries :: Python Modules
|
|
18
|
+
Classifier: Topic :: Software Development :: Version Control :: Git
|
|
19
|
+
Classifier: Topic :: Utilities
|
|
20
|
+
Requires-Dist: anthropic (>=0.30.0,<0.31.0)
|
|
21
|
+
Requires-Dist: click (>=8.1.0,<9.0.0)
|
|
22
|
+
Requires-Dist: google-generativeai (>=0.5.0,<0.6.0)
|
|
23
|
+
Requires-Dist: halo (>=0.0.31,<0.0.32)
|
|
24
|
+
Requires-Dist: langchain (>=0.2.0,<0.3.0)
|
|
25
|
+
Requires-Dist: langchain-anthropic (>=0.1.23,<0.2.0)
|
|
26
|
+
Requires-Dist: langchain-google-genai (>=1.0.0,<2.0.0)
|
|
27
|
+
Requires-Dist: langchain-ollama (>=0.1.0,<0.2.0)
|
|
28
|
+
Requires-Dist: langchain-openai (>=0.1.0,<0.2.0)
|
|
29
|
+
Requires-Dist: openai (>=1.0.0,<2.0.0)
|
|
30
|
+
Requires-Dist: pyyaml (>=6.0,<7.0)
|
|
31
|
+
Requires-Dist: tiktoken (>=0.7.0,<0.8.0)
|
|
32
|
+
Project-URL: Documentation, https://github.com/z0890142/git-llm-tool#readme
|
|
33
|
+
Project-URL: Homepage, https://github.com/z0890142/git-llm-tool
|
|
34
|
+
Project-URL: Repository, https://github.com/z0890142/git-llm-tool
|
|
35
|
+
Description-Content-Type: text/markdown
|
|
36
|
+
|
|
37
|
+
# Git-LLM-Tool
|
|
38
|
+
|
|
39
|
+
[](https://python.org)
|
|
40
|
+
[](https://opensource.org/licenses/MIT)
|
|
41
|
+
[](https://github.com/psf/black)
|
|
42
|
+
|
|
43
|
+
AI-powered git commit message and changelog generator using LLM APIs.
|
|
44
|
+
|
|
45
|
+
## Table of Contents
|
|
46
|
+
|
|
47
|
+
- [Features](#features)
|
|
48
|
+
- [Installation](#installation)
|
|
49
|
+
- [Quick Start](#quick-start)
|
|
50
|
+
- [Configuration](#configuration)
|
|
51
|
+
- [Advanced Features](#advanced-features)
|
|
52
|
+
- [CLI Commands Reference](#cli-commands-reference)
|
|
53
|
+
- [Environment Variables](#environment-variables)
|
|
54
|
+
- [Usage Examples](#usage-examples)
|
|
55
|
+
- [Supported Models](#supported-models)
|
|
56
|
+
- [Development](#development)
|
|
57
|
+
- [Contributing](#contributing)
|
|
58
|
+
- [Git Custom Command Integration](#git-custom-command-integration)
|
|
59
|
+
- [Troubleshooting](#troubleshooting)
|
|
60
|
+
- [License](#license)
|
|
61
|
+
|
|
62
|
+
## Features
|
|
63
|
+
|
|
64
|
+
- 🤖 **Smart Commit Messages**: Automatically generate commit messages from git diff using AI
|
|
65
|
+
- 📝 **Changelog Generation**: Generate structured changelogs from git history
|
|
66
|
+
- 🔧 **Multiple LLM Providers**: Support for OpenAI, Anthropic Claude, Google Gemini, Azure OpenAI, and Ollama
|
|
67
|
+
- 🚀 **Intelligent Chunking**: Automatic diff splitting with parallel processing for large changes
|
|
68
|
+
- 🔄 **Hybrid Processing**: Use local Ollama for chunk processing + cloud LLM for final quality
|
|
69
|
+
- 📊 **Progress Indicators**: Beautiful progress bars with Halo for long-running operations
|
|
70
|
+
- ⚙️ **Hierarchical Configuration**: Project-level and global configuration support
|
|
71
|
+
- 🎯 **Jira Integration**: Automatic ticket detection and work hours tracking
|
|
72
|
+
- 🌐 **Multi-language Support**: Generate messages in different languages
|
|
73
|
+
- ✏️ **Editor Integration**: Configurable editor support for reviewing commit messages
|
|
74
|
+
- 🛠️ **Easy Setup**: Simple installation and configuration
|
|
75
|
+
|
|
76
|
+
## Installation
|
|
77
|
+
|
|
78
|
+
### From PyPI (Coming Soon)
|
|
79
|
+
```bash
|
|
80
|
+
pip install git-llm-tool
|
|
81
|
+
```
|
|
82
|
+
|
|
83
|
+
### From Source
|
|
84
|
+
```bash
|
|
85
|
+
git clone https://github.com/z0890142/git-llm-tool.git
|
|
86
|
+
cd git-llm-tool
|
|
87
|
+
poetry install
|
|
88
|
+
```
|
|
89
|
+
|
|
90
|
+
## Quick Start
|
|
91
|
+
|
|
92
|
+
### 1. Initialize Configuration
|
|
93
|
+
```bash
|
|
94
|
+
git-llm config init
|
|
95
|
+
```
|
|
96
|
+
|
|
97
|
+
### 2. Configure Your API Key
|
|
98
|
+
Choose one of the supported providers:
|
|
99
|
+
|
|
100
|
+
```bash
|
|
101
|
+
# OpenAI
|
|
102
|
+
git-llm config set llm.api_keys.openai sk-your-openai-key-here
|
|
103
|
+
|
|
104
|
+
# Anthropic Claude
|
|
105
|
+
git-llm config set llm.api_keys.anthropic sk-ant-your-key-here
|
|
106
|
+
|
|
107
|
+
# Google Gemini
|
|
108
|
+
git-llm config set llm.api_keys.google your-gemini-key-here
|
|
109
|
+
|
|
110
|
+
# Azure OpenAI
|
|
111
|
+
git-llm config set llm.api_keys.azure_openai your-azure-key
|
|
112
|
+
git-llm config set llm.azure_openai.endpoint https://your-resource.openai.azure.com/
|
|
113
|
+
git-llm config set llm.azure_openai.deployment_name gpt-4o
|
|
114
|
+
```
|
|
115
|
+
|
|
116
|
+
### 3. Generate Commit Messages
|
|
117
|
+
```bash
|
|
118
|
+
# Stage your changes
|
|
119
|
+
git add .
|
|
120
|
+
|
|
121
|
+
# Generate and review commit message (opens editor)
|
|
122
|
+
git-llm commit
|
|
123
|
+
|
|
124
|
+
# Or apply directly without review
|
|
125
|
+
git-llm commit --apply
|
|
126
|
+
```
|
|
127
|
+
|
|
128
|
+
### 4. Generate Changelogs
|
|
129
|
+
```bash
|
|
130
|
+
# Generate changelog from last tag to HEAD
|
|
131
|
+
git-llm changelog
|
|
132
|
+
|
|
133
|
+
# Generate changelog for specific range
|
|
134
|
+
git-llm changelog --from v1.0.0 --to v2.0.0
|
|
135
|
+
```
|
|
136
|
+
|
|
137
|
+
## Configuration
|
|
138
|
+
|
|
139
|
+
### Configuration Hierarchy
|
|
140
|
+
|
|
141
|
+
The tool uses a hierarchical configuration system (highest to lowest priority):
|
|
142
|
+
1. **CLI flags** (highest priority)
|
|
143
|
+
2. **Project config** `.git-llm-tool.yaml`
|
|
144
|
+
3. **Global config** `~/.git-llm-tool/config.yaml`
|
|
145
|
+
4. **Environment variables**
|
|
146
|
+
5. **Default values**
|
|
147
|
+
|
|
148
|
+
### Configuration Options
|
|
149
|
+
|
|
150
|
+
#### LLM Settings
|
|
151
|
+
```bash
|
|
152
|
+
# Set default model
|
|
153
|
+
git-llm config set llm.default_model gpt-4o
|
|
154
|
+
|
|
155
|
+
# Set output language (en, zh, ja, etc.)
|
|
156
|
+
git-llm config set llm.language en
|
|
157
|
+
|
|
158
|
+
# API Keys
|
|
159
|
+
git-llm config set llm.api_keys.openai sk-your-key
|
|
160
|
+
git-llm config set llm.api_keys.anthropic sk-ant-your-key
|
|
161
|
+
git-llm config set llm.api_keys.google your-key
|
|
162
|
+
|
|
163
|
+
# Azure OpenAI specific settings
|
|
164
|
+
git-llm config set llm.azure_openai.endpoint https://your-resource.openai.azure.com/
|
|
165
|
+
git-llm config set llm.azure_openai.api_version 2024-12-01-preview
|
|
166
|
+
git-llm config set llm.azure_openai.deployment_name gpt-4o
|
|
167
|
+
|
|
168
|
+
# Hybrid Ollama processing (optional)
|
|
169
|
+
git-llm config set llm.use_ollama_for_chunks true
|
|
170
|
+
git-llm config set llm.ollama_model llama3:8b
|
|
171
|
+
git-llm config set llm.ollama_base_url http://localhost:11434
|
|
172
|
+
```
|
|
173
|
+
|
|
174
|
+
#### Editor Configuration
|
|
175
|
+
```bash
|
|
176
|
+
# Set preferred editor for commit message review
|
|
177
|
+
git-llm config set editor.preferred_editor vi
|
|
178
|
+
git-llm config set editor.preferred_editor nano
|
|
179
|
+
git-llm config set editor.preferred_editor "code --wait" # VS Code
|
|
180
|
+
git-llm config set editor.preferred_editor "subl --wait" # Sublime Text
|
|
181
|
+
```
|
|
182
|
+
|
|
183
|
+
**Editor Priority (highest to lowest):**
|
|
184
|
+
1. `editor.preferred_editor` config
|
|
185
|
+
2. `git config core.editor`
|
|
186
|
+
3. Environment variables (`GIT_EDITOR`, `VISUAL`, `EDITOR`)
|
|
187
|
+
4. System defaults (`nano`, `vim`, `vi`)
|
|
188
|
+
|
|
189
|
+
#### Jira Integration
|
|
190
|
+
```bash
|
|
191
|
+
# Enable Jira integration
|
|
192
|
+
git-llm config set jira.enabled true
|
|
193
|
+
|
|
194
|
+
# Set branch regex pattern for ticket extraction
|
|
195
|
+
git-llm config set jira.branch_regex '^(feat|fix|chore)\/([A-Z]+-\d+)\/.+$'
|
|
196
|
+
```
|
|
197
|
+
|
|
198
|
+
### Example Configuration File
|
|
199
|
+
|
|
200
|
+
Global config (`~/.git-llm-tool/config.yaml`):
|
|
201
|
+
```yaml
|
|
202
|
+
llm:
|
|
203
|
+
default_model: 'gpt-4o'
|
|
204
|
+
language: 'en'
|
|
205
|
+
api_keys:
|
|
206
|
+
openai: 'sk-your-openai-key'
|
|
207
|
+
anthropic: 'sk-ant-your-key'
|
|
208
|
+
google: 'your-gemini-key'
|
|
209
|
+
azure_openai:
|
|
210
|
+
endpoint: 'https://your-resource.openai.azure.com/'
|
|
211
|
+
api_version: '2024-12-01-preview'
|
|
212
|
+
deployment_name: 'gpt-4o'
|
|
213
|
+
|
|
214
|
+
# LangChain and intelligent processing
|
|
215
|
+
use_langchain: true
|
|
216
|
+
chunking_threshold: 12000 # Enable chunking for diffs larger than 12k tokens
|
|
217
|
+
|
|
218
|
+
# Hybrid Ollama processing (optional)
|
|
219
|
+
use_ollama_for_chunks: false # Set to true to enable
|
|
220
|
+
ollama_model: "llama3:8b" # Local model for chunk processing
|
|
221
|
+
ollama_base_url: "http://localhost:11434"
|
|
222
|
+
|
|
223
|
+
editor:
|
|
224
|
+
preferred_editor: 'vi'
|
|
225
|
+
|
|
226
|
+
jira:
|
|
227
|
+
enabled: true
|
|
228
|
+
ticket_pattern: '^(feat|fix|chore)\/([A-Z]+-\d+)\/.+$'
|
|
229
|
+
```
|
|
230
|
+
|
|
231
|
+
### View Configuration
|
|
232
|
+
```bash
|
|
233
|
+
# View all configuration
|
|
234
|
+
git-llm config get
|
|
235
|
+
|
|
236
|
+
# View specific setting
|
|
237
|
+
git-llm config get llm.default_model
|
|
238
|
+
git-llm config get editor.preferred_editor
|
|
239
|
+
```
|
|
240
|
+
|
|
241
|
+
## Advanced Features
|
|
242
|
+
|
|
243
|
+
### Intelligent Chunking & Parallel Processing
|
|
244
|
+
|
|
245
|
+
For large diffs, git-llm-tool automatically uses intelligent chunking to break down changes into manageable pieces:
|
|
246
|
+
|
|
247
|
+
- **Automatic Threshold Detection**: Diffs larger than 12,000 tokens are automatically chunked
|
|
248
|
+
- **Smart Splitting**: Prioritizes file-based splitting, then hunks, then size-based splitting
|
|
249
|
+
- **Parallel Processing**: Multiple chunks processed simultaneously for faster results
|
|
250
|
+
- **Progress Indicators**: Beautiful progress bars show real-time processing status
|
|
251
|
+
|
|
252
|
+
```bash
|
|
253
|
+
# Enable verbose mode to see chunking details
|
|
254
|
+
git-llm commit --verbose
|
|
255
|
+
|
|
256
|
+
# Example output:
|
|
257
|
+
# 🔄 Analyzing diff and creating intelligent chunks...
|
|
258
|
+
# ✅ Created 4 intelligent chunks
|
|
259
|
+
# 📄 Smart chunking stats:
|
|
260
|
+
# Total chunks: 4
|
|
261
|
+
# File chunks: 2
|
|
262
|
+
# Hunk chunks: 2
|
|
263
|
+
# Complete files: 2
|
|
264
|
+
# 🚀 Processing 4 chunks in parallel (4/4 completed)...
|
|
265
|
+
# ✅ Parallel processing completed: 4/4 chunks successful
|
|
266
|
+
# 🔄 Combining 4 summaries into final commit message...
|
|
267
|
+
# ✅ Final commit message generated successfully
|
|
268
|
+
```
|
|
269
|
+
|
|
270
|
+
### Hybrid Ollama Processing
|
|
271
|
+
|
|
272
|
+
Use local Ollama for chunk processing combined with cloud LLM for final quality:
|
|
273
|
+
|
|
274
|
+
#### Setup Ollama
|
|
275
|
+
```bash
|
|
276
|
+
# Install Ollama (macOS/Linux)
|
|
277
|
+
curl -fsSL https://ollama.ai/install.sh | sh
|
|
278
|
+
|
|
279
|
+
# Pull a model
|
|
280
|
+
ollama pull llama3:8b
|
|
281
|
+
# or
|
|
282
|
+
ollama pull llama3.1:8b
|
|
283
|
+
ollama pull codellama:7b
|
|
284
|
+
```
|
|
285
|
+
|
|
286
|
+
#### Enable Hybrid Mode
|
|
287
|
+
```bash
|
|
288
|
+
# Enable hybrid processing
|
|
289
|
+
git-llm config set llm.use_ollama_for_chunks true
|
|
290
|
+
git-llm config set llm.ollama_model llama3:8b
|
|
291
|
+
|
|
292
|
+
# Verify Ollama is running
|
|
293
|
+
curl http://localhost:11434/api/version
|
|
294
|
+
```
|
|
295
|
+
|
|
296
|
+
#### How It Works
|
|
297
|
+
1. **Map Phase**: Each chunk processed locally with Ollama (fast, private)
|
|
298
|
+
2. **Reduce Phase**: Final combination using cloud LLM (high quality)
|
|
299
|
+
3. **Cost Efficient**: Reduces cloud API usage while maintaining quality
|
|
300
|
+
4. **Privacy**: Sensitive code chunks processed locally
|
|
301
|
+
|
|
302
|
+
```bash
|
|
303
|
+
# With verbose mode, you'll see:
|
|
304
|
+
git-llm commit --verbose
|
|
305
|
+
|
|
306
|
+
# 🔄 Hybrid processing mode:
|
|
307
|
+
# Map phase (chunks): Ollama (llama3:8b)
|
|
308
|
+
# Reduce phase (final): gpt-4o
|
|
309
|
+
# 🚀 Processing 4 chunks in parallel (4/4 completed)...
|
|
310
|
+
```
|
|
311
|
+
|
|
312
|
+
### LangChain Integration
|
|
313
|
+
|
|
314
|
+
Advanced LLM provider management with automatic model selection:
|
|
315
|
+
|
|
316
|
+
- **Automatic Provider Detection**: Based on model name
|
|
317
|
+
- **Retry Logic**: Exponential backoff for failed requests
|
|
318
|
+
- **Rate Limiting**: Prevents API quota exhaustion
|
|
319
|
+
- **Error Recovery**: Graceful fallbacks
|
|
320
|
+
|
|
321
|
+
## CLI Commands Reference
|
|
322
|
+
|
|
323
|
+
### Commit Command
|
|
324
|
+
```bash
|
|
325
|
+
git-llm commit [OPTIONS]
|
|
326
|
+
|
|
327
|
+
Options:
|
|
328
|
+
-a, --apply Apply commit message directly without opening editor
|
|
329
|
+
-m, --model TEXT Override LLM model (e.g., gpt-4, claude-3-sonnet)
|
|
330
|
+
-l, --language TEXT Override output language (e.g., en, zh, ja)
|
|
331
|
+
-v, --verbose Enable verbose output
|
|
332
|
+
--help Show help message
|
|
333
|
+
```
|
|
334
|
+
|
|
335
|
+
### Changelog Command
|
|
336
|
+
```bash
|
|
337
|
+
git-llm changelog [OPTIONS]
|
|
338
|
+
|
|
339
|
+
Options:
|
|
340
|
+
--from TEXT Starting reference (default: last tag)
|
|
341
|
+
--to TEXT Ending reference (default: HEAD)
|
|
342
|
+
-o, --output TEXT Output file (default: stdout)
|
|
343
|
+
-f, --force Force overwrite existing output file
|
|
344
|
+
--help Show help message
|
|
345
|
+
```
|
|
346
|
+
|
|
347
|
+
### Config Commands
|
|
348
|
+
```bash
|
|
349
|
+
git-llm config init # Initialize configuration
|
|
350
|
+
git-llm config get [KEY] # Get configuration value(s)
|
|
351
|
+
git-llm config set KEY VALUE # Set configuration value
|
|
352
|
+
```
|
|
353
|
+
|
|
354
|
+
## Environment Variables
|
|
355
|
+
|
|
356
|
+
You can also configure the tool using environment variables:
|
|
357
|
+
|
|
358
|
+
```bash
|
|
359
|
+
# LLM API Keys
|
|
360
|
+
export OPENAI_API_KEY="sk-your-openai-key"
|
|
361
|
+
export ANTHROPIC_API_KEY="sk-ant-your-key"
|
|
362
|
+
export GOOGLE_API_KEY="your-gemini-key"
|
|
363
|
+
|
|
364
|
+
# Azure OpenAI
|
|
365
|
+
export AZURE_OPENAI_API_KEY="your-azure-key"
|
|
366
|
+
export AZURE_OPENAI_ENDPOINT="https://your-resource.openai.azure.com/"
|
|
367
|
+
export AZURE_OPENAI_API_VERSION="2024-12-01-preview"
|
|
368
|
+
export AZURE_OPENAI_DEPLOYMENT_NAME="gpt-4o"
|
|
369
|
+
|
|
370
|
+
# Override default model
|
|
371
|
+
export GIT_LLM_MODEL="gpt-4o"
|
|
372
|
+
export GIT_LLM_LANGUAGE="en"
|
|
373
|
+
```
|
|
374
|
+
|
|
375
|
+
## Usage Examples
|
|
376
|
+
|
|
377
|
+
### Basic Workflow
|
|
378
|
+
```bash
|
|
379
|
+
# 1. Make changes to your code
|
|
380
|
+
echo "console.log('Hello World');" > app.js
|
|
381
|
+
|
|
382
|
+
# 2. Stage changes
|
|
383
|
+
git add app.js
|
|
384
|
+
|
|
385
|
+
# 3. Generate commit message with review
|
|
386
|
+
git-llm commit
|
|
387
|
+
# Opens your editor with AI-generated message for review
|
|
388
|
+
|
|
389
|
+
# 4. Or apply directly
|
|
390
|
+
git-llm commit --apply
|
|
391
|
+
```
|
|
392
|
+
|
|
393
|
+
### Using Different Models
|
|
394
|
+
```bash
|
|
395
|
+
# Use specific model for this commit
|
|
396
|
+
git-llm commit --model claude-3-sonnet
|
|
397
|
+
|
|
398
|
+
# Use different language
|
|
399
|
+
git-llm commit --language zh
|
|
400
|
+
```
|
|
401
|
+
|
|
402
|
+
### Project-specific Configuration
|
|
403
|
+
Create `.git-llm-tool.yaml` in your project root:
|
|
404
|
+
```yaml
|
|
405
|
+
llm:
|
|
406
|
+
default_model: 'claude-3-sonnet'
|
|
407
|
+
language: 'zh'
|
|
408
|
+
editor:
|
|
409
|
+
preferred_editor: 'code --wait'
|
|
410
|
+
jira:
|
|
411
|
+
enabled: true
|
|
412
|
+
branch_regex: '^(feat|fix|docs)\/([A-Z]+-\d+)\/.+$'
|
|
413
|
+
```
|
|
414
|
+
|
|
415
|
+
## Supported Models
|
|
416
|
+
|
|
417
|
+
### OpenAI
|
|
418
|
+
- `gpt-4o` (recommended)
|
|
419
|
+
- `gpt-4o-mini`
|
|
420
|
+
- `gpt-4-turbo`
|
|
421
|
+
- `gpt-3.5-turbo`
|
|
422
|
+
|
|
423
|
+
### Anthropic Claude
|
|
424
|
+
- `claude-3-5-sonnet-20241022` (recommended)
|
|
425
|
+
- `claude-3-5-haiku-20241022`
|
|
426
|
+
- `claude-3-opus-20240229`
|
|
427
|
+
|
|
428
|
+
### Google Gemini
|
|
429
|
+
- `gemini-1.5-pro`
|
|
430
|
+
- `gemini-1.5-flash`
|
|
431
|
+
|
|
432
|
+
### Azure OpenAI
|
|
433
|
+
- Any deployment of the above OpenAI models
|
|
434
|
+
|
|
435
|
+
### Ollama (Local)
|
|
436
|
+
For hybrid processing (chunk processing only):
|
|
437
|
+
- `llama3:8b` (recommended)
|
|
438
|
+
- `llama3.1:8b`
|
|
439
|
+
- `llama3:70b`
|
|
440
|
+
- `codellama:7b`
|
|
441
|
+
- `codellama:13b`
|
|
442
|
+
- `mistral:7b`
|
|
443
|
+
- `qwen2:7b`
|
|
444
|
+
|
|
445
|
+
**Note**: Ollama models are used only for chunk processing in hybrid mode. Final commit message generation still uses cloud LLMs for optimal quality.
|
|
446
|
+
|
|
447
|
+
## Development
|
|
448
|
+
|
|
449
|
+
### Setup Development Environment
|
|
450
|
+
```bash
|
|
451
|
+
# Clone repository
|
|
452
|
+
git clone https://github.com/z0890142/git-llm-tool.git
|
|
453
|
+
cd git-llm-tool
|
|
454
|
+
|
|
455
|
+
# Install dependencies
|
|
456
|
+
poetry install
|
|
457
|
+
|
|
458
|
+
# Install pre-commit hooks
|
|
459
|
+
poetry run pre-commit install
|
|
460
|
+
```
|
|
461
|
+
|
|
462
|
+
### Running Tests
|
|
463
|
+
```bash
|
|
464
|
+
# Run all tests
|
|
465
|
+
poetry run pytest
|
|
466
|
+
|
|
467
|
+
# Run with coverage
|
|
468
|
+
poetry run pytest --cov=git_llm_tool
|
|
469
|
+
|
|
470
|
+
# Run specific test file
|
|
471
|
+
poetry run pytest tests/test_config.py
|
|
472
|
+
```
|
|
473
|
+
|
|
474
|
+
### Code Formatting
|
|
475
|
+
```bash
|
|
476
|
+
# Format code
|
|
477
|
+
poetry run black .
|
|
478
|
+
poetry run isort .
|
|
479
|
+
|
|
480
|
+
# Check formatting
|
|
481
|
+
poetry run black --check .
|
|
482
|
+
poetry run flake8 .
|
|
483
|
+
```
|
|
484
|
+
|
|
485
|
+
### Building and Publishing
|
|
486
|
+
```bash
|
|
487
|
+
# Build package
|
|
488
|
+
poetry build
|
|
489
|
+
|
|
490
|
+
# Publish to PyPI (maintainers only)
|
|
491
|
+
poetry publish
|
|
492
|
+
```
|
|
493
|
+
|
|
494
|
+
## Contributing
|
|
495
|
+
|
|
496
|
+
1. Fork the repository
|
|
497
|
+
2. Create a feature branch (`git checkout -b feature/amazing-feature`)
|
|
498
|
+
3. Make your changes
|
|
499
|
+
4. Add tests for new functionality
|
|
500
|
+
5. Ensure all tests pass (`poetry run pytest`)
|
|
501
|
+
6. Format code (`poetry run black . && poetry run isort .`)
|
|
502
|
+
7. Commit your changes (`git-llm commit` 😉)
|
|
503
|
+
8. Push to the branch (`git push origin feature/amazing-feature`)
|
|
504
|
+
9. Open a Pull Request
|
|
505
|
+
|
|
506
|
+
## Git Custom Command Integration
|
|
507
|
+
|
|
508
|
+
You can integrate git-llm as a native git subcommand, allowing you to use `git llm` instead of `git-llm`.
|
|
509
|
+
|
|
510
|
+
### Method 1: Git Aliases (Recommended)
|
|
511
|
+
|
|
512
|
+
Add aliases to your git configuration:
|
|
513
|
+
|
|
514
|
+
```bash
|
|
515
|
+
# Add git aliases for all commands
|
|
516
|
+
git config --global alias.llm-commit '!git-llm commit'
|
|
517
|
+
git config --global alias.llm-changelog '!git-llm changelog'
|
|
518
|
+
git config --global alias.llm-config '!git-llm config'
|
|
519
|
+
|
|
520
|
+
# Or create a general alias
|
|
521
|
+
git config --global alias.llm '!git-llm'
|
|
522
|
+
```
|
|
523
|
+
|
|
524
|
+
Now you can use:
|
|
525
|
+
```bash
|
|
526
|
+
git llm commit # Instead of git-llm commit
|
|
527
|
+
git llm changelog # Instead of git-llm changelog
|
|
528
|
+
git llm config get # Instead of git-llm config get
|
|
529
|
+
|
|
530
|
+
# Or with specific aliases
|
|
531
|
+
git llm-commit # Direct alias to git-llm commit
|
|
532
|
+
git llm-changelog # Direct alias to git-llm changelog
|
|
533
|
+
```
|
|
534
|
+
|
|
535
|
+
### Method 2: Shell Aliases
|
|
536
|
+
|
|
537
|
+
Add to your shell profile (`.bashrc`, `.zshrc`, etc.):
|
|
538
|
+
|
|
539
|
+
```bash
|
|
540
|
+
# Simple alias
|
|
541
|
+
alias gllm='git-llm'
|
|
542
|
+
|
|
543
|
+
# Or git-style aliases
|
|
544
|
+
alias gllmc='git-llm commit'
|
|
545
|
+
alias gllmcl='git-llm changelog'
|
|
546
|
+
alias gllmcfg='git-llm config'
|
|
547
|
+
```
|
|
548
|
+
|
|
549
|
+
Usage:
|
|
550
|
+
```bash
|
|
551
|
+
gllm commit # git-llm commit
|
|
552
|
+
gllmc # git-llm commit
|
|
553
|
+
gllmcl # git-llm changelog
|
|
554
|
+
```
|
|
555
|
+
|
|
556
|
+
### Method 3: Custom Git Script
|
|
557
|
+
|
|
558
|
+
Create a custom git command script:
|
|
559
|
+
|
|
560
|
+
```bash
|
|
561
|
+
# Create git-llm script in your PATH
|
|
562
|
+
sudo tee /usr/local/bin/git-llm > /dev/null << 'EOF'
|
|
563
|
+
#!/bin/bash
|
|
564
|
+
# Git-LLM integration script
|
|
565
|
+
exec git-llm "$@"
|
|
566
|
+
EOF
|
|
567
|
+
|
|
568
|
+
sudo chmod +x /usr/local/bin/git-llm
|
|
569
|
+
```
|
|
570
|
+
|
|
571
|
+
Now you can use:
|
|
572
|
+
```bash
|
|
573
|
+
git llm commit # Calls git-llm commit
|
|
574
|
+
git llm changelog # Calls git-llm changelog
|
|
575
|
+
```
|
|
576
|
+
|
|
577
|
+
### Recommended Git Workflow
|
|
578
|
+
|
|
579
|
+
With git aliases configured, your workflow becomes:
|
|
580
|
+
|
|
581
|
+
```bash
|
|
582
|
+
# Make changes
|
|
583
|
+
echo "console.log('Hello');" > app.js
|
|
584
|
+
|
|
585
|
+
# Stage changes
|
|
586
|
+
git add .
|
|
587
|
+
|
|
588
|
+
# Generate AI commit message (opens editor)
|
|
589
|
+
git llm commit
|
|
590
|
+
|
|
591
|
+
# Or commit directly
|
|
592
|
+
git llm commit --apply
|
|
593
|
+
|
|
594
|
+
# Generate changelog
|
|
595
|
+
git llm changelog
|
|
596
|
+
|
|
597
|
+
# Check configuration
|
|
598
|
+
git llm config get
|
|
599
|
+
```
|
|
600
|
+
|
|
601
|
+
## Requirements
|
|
602
|
+
|
|
603
|
+
- Python 3.12+
|
|
604
|
+
- Git
|
|
605
|
+
- At least one LLM provider API key (OpenAI, Anthropic, Google, or Azure OpenAI)
|
|
606
|
+
- **Optional**: Ollama for hybrid processing (local chunk processing)
|
|
607
|
+
|
|
608
|
+
## Troubleshooting
|
|
609
|
+
|
|
610
|
+
### Common Issues
|
|
611
|
+
|
|
612
|
+
**"No suitable editor found"**
|
|
613
|
+
- Set your preferred editor: `git-llm config set editor.preferred_editor vi`
|
|
614
|
+
- Or set git editor: `git config --global core.editor vi`
|
|
615
|
+
|
|
616
|
+
**"No staged changes found"**
|
|
617
|
+
- Stage your changes first: `git add .`
|
|
618
|
+
|
|
619
|
+
**"API Error: Invalid API key"**
|
|
620
|
+
- Check your API key configuration: `git-llm config get`
|
|
621
|
+
- Ensure the key is correctly set: `git-llm config set llm.api_keys.openai sk-your-key`
|
|
622
|
+
|
|
623
|
+
**"No commits found in range"**
|
|
624
|
+
- Make sure you have commits in the specified range
|
|
625
|
+
- Check git log: `git log --oneline`
|
|
626
|
+
|
|
627
|
+
**"Ollama not available, using main LLM for chunks"**
|
|
628
|
+
- Make sure Ollama is installed and running: `ollama serve`
|
|
629
|
+
- Check Ollama is accessible: `curl http://localhost:11434/api/version`
|
|
630
|
+
- Verify the model is pulled: `ollama list`
|
|
631
|
+
- Pull the model if needed: `ollama pull llama3:8b`
|
|
632
|
+
|
|
633
|
+
**"Processing is slower than expected"**
|
|
634
|
+
- For large diffs, enable hybrid mode with Ollama for faster chunk processing
|
|
635
|
+
- Check your `chunking_threshold` setting - lower values use chunking sooner
|
|
636
|
+
- Use `--verbose` to see processing details and bottlenecks
|
|
637
|
+
|
|
638
|
+
**"Chunk processing failed"**
|
|
639
|
+
- If using Ollama, ensure sufficient system resources (RAM)
|
|
640
|
+
- Try a smaller model like `llama3:8b` instead of larger models
|
|
641
|
+
- Check Ollama logs: `ollama logs`
|
|
642
|
+
|
|
643
|
+
## License
|
|
644
|
+
|
|
645
|
+
MIT License
|