enhanced-git 1.0.2__py3-none-any.whl → 1.0.5__py3-none-any.whl
This diff represents the content of publicly available package versions that have been released to one of the supported registries. The information contained in this diff is provided for informational purposes only and reflects changes between package versions as they appear in their respective public registries.
- {enhanced_git-1.0.2.dist-info → enhanced_git-1.0.5.dist-info}/METADATA +35 -1
- {enhanced_git-1.0.2.dist-info → enhanced_git-1.0.5.dist-info}/RECORD +6 -6
- gitai/config.py +26 -8
- {enhanced_git-1.0.2.dist-info → enhanced_git-1.0.5.dist-info}/WHEEL +0 -0
- {enhanced_git-1.0.2.dist-info → enhanced_git-1.0.5.dist-info}/entry_points.txt +0 -0
- {enhanced_git-1.0.2.dist-info → enhanced_git-1.0.5.dist-info}/licenses/LICENSE +0 -0
@@ -1,6 +1,6 @@
|
|
1
1
|
Metadata-Version: 2.4
|
2
2
|
Name: enhanced-git
|
3
|
-
Version: 1.0.
|
3
|
+
Version: 1.0.5
|
4
4
|
Summary: Generate Conventional Commit messages and changelog sections using AI
|
5
5
|
Project-URL: Homepage, https://github.com/mxzahid/git-ai
|
6
6
|
Project-URL: Repository, https://github.com/mxzahid/git-ai
|
@@ -168,6 +168,40 @@ git-ai changelog --since v1.0.0 --to main
|
|
168
168
|
- `OLLAMA_BASE_URL`: Ollama server URL (default: http://localhost:11434)
|
169
169
|
- `OLLAMA_MODEL`: Ollama model name (default: qwen2.5-coder:3b)
|
170
170
|
|
171
|
+
### Configuration File
|
172
|
+
|
173
|
+
GitAI supports optional configuration via a `.gitai.toml` file in your git repository root. This allows you to customize behavior beyond environment variables.
|
174
|
+
|
175
|
+
**Auto-detection**: GitAI automatically detects your LLM provider based on environment variables (no config file needed!):
|
176
|
+
- If `OPENAI_API_KEY` is set → uses OpenAI
|
177
|
+
- If `OLLAMA_BASE_URL` or `OLLAMA_MODEL` is set → uses Ollama
|
178
|
+
- Otherwise → falls back to OpenAI
|
179
|
+
|
180
|
+
**Custom configuration**: Create `.gitai.toml` in your project root for advanced settings:
|
181
|
+
|
182
|
+
```toml
|
183
|
+
[llm]
|
184
|
+
provider = "ollama" # "openai" | "ollama"
|
185
|
+
model = "qwen2.5-coder:3b" # I suggest using one of: qwen2.5-coder:3b, qwen2.5-coder:1.5b, codellama:7b, deepseek-coder:6.7b
|
186
|
+
max_tokens = 300
|
187
|
+
temperature = 0.1
|
188
|
+
timeout_seconds = 45
|
189
|
+
|
190
|
+
[commit]
|
191
|
+
style = "conventional" # "conventional" | "plain"
|
192
|
+
scope_detection = true
|
193
|
+
include_body = true
|
194
|
+
include_footers = true
|
195
|
+
wrap_width = 72
|
196
|
+
|
197
|
+
[changelog]
|
198
|
+
grouping = "type" # group by Conventional Commit type
|
199
|
+
heading_style = "keep-a-changelog"
|
200
|
+
|
201
|
+
[debug]
|
202
|
+
debug_mode = false
|
203
|
+
```
|
204
|
+
|
171
205
|
## How It Works
|
172
206
|
|
173
207
|
### Commit Message Generation
|
@@ -2,7 +2,7 @@ gitai/__init__.py,sha256=X_3SlMT2EeGvZ9bdsXdjzwd1FFta8HHakgPv6yRq7kU,108
|
|
2
2
|
gitai/changelog.py,sha256=F2atDczLs-HgoafHOngKC6m2BhlfVYmknHyCIPjjFL4,8724
|
3
3
|
gitai/cli.py,sha256=l0i6UKqdeHB3TDAgaY8Gq1N-GFM4QrI2h72m9EWzY-I,4453
|
4
4
|
gitai/commit.py,sha256=vVfMcXDCOSc90ILMm8wMg0PjLiuUarzteyI2I_IT76c,12257
|
5
|
-
gitai/config.py,sha256=
|
5
|
+
gitai/config.py,sha256=F8DA5Hd20LMTV1l5gjAurukXJMUI_H_qnKofob0moqc,4223
|
6
6
|
gitai/constants.py,sha256=smipnjD7Y8h11Io1bpnimue-bz21opM74MHxczOq3rQ,3201
|
7
7
|
gitai/diff.py,sha256=Ae3aslHoeVrYDYo1UVnZe5x-z5poFUCM8K9bXRfVyug,5107
|
8
8
|
gitai/hook.py,sha256=U4KF1_uJZuw6AKtsyCcnntJcBOIsHNxZF149DYjgQDk,2527
|
@@ -11,8 +11,8 @@ gitai/providers/__init__.py,sha256=6IFc912-oepXeDGJyE4Ksm3KJLn6CGdYZb8HkUMfvlA,3
|
|
11
11
|
gitai/providers/base.py,sha256=a5b1ZulBnQvVmTlxeUQhixMyFWhwiZKMX1sIeQHHkms,1851
|
12
12
|
gitai/providers/ollama_provider.py,sha256=crRCfQZxJY1S4LaSFdiNT19u2T9WjbhpU8TCxbuo92w,2540
|
13
13
|
gitai/providers/openai_provider.py,sha256=i1lwyCtWoN5APt3UsB4MBS-jOLifDZcUCGj1Ko1CKcs,2444
|
14
|
-
enhanced_git-1.0.
|
15
|
-
enhanced_git-1.0.
|
16
|
-
enhanced_git-1.0.
|
17
|
-
enhanced_git-1.0.
|
18
|
-
enhanced_git-1.0.
|
14
|
+
enhanced_git-1.0.5.dist-info/METADATA,sha256=NX4qWPg2kjuWwXrYF-y2qbBG-0cSyq6F05yPELhPUuM,10056
|
15
|
+
enhanced_git-1.0.5.dist-info/WHEEL,sha256=qtCwoSJWgHk21S1Kb4ihdzI2rlJ1ZKaIurTj_ngOhyQ,87
|
16
|
+
enhanced_git-1.0.5.dist-info/entry_points.txt,sha256=KzU5dZTYOoumsgMHpgn1XqwcALjUrcb1MCk7iyp9xTI,70
|
17
|
+
enhanced_git-1.0.5.dist-info/licenses/LICENSE,sha256=d11_Oc9IT-MUTvztUzbHPs_CSr9drf-6d1vnIvPiMJc,1075
|
18
|
+
enhanced_git-1.0.5.dist-info/RECORD,,
|
gitai/config.py
CHANGED
@@ -66,20 +66,36 @@ class Config:
|
|
66
66
|
config_data = load_toml_config(config_path)
|
67
67
|
|
68
68
|
llm_data = config_data.get("llm", {})
|
69
|
+
|
70
|
+
# auto-detect provider based on available environment variables
|
71
|
+
configured_provider = llm_data.get("provider")
|
72
|
+
if configured_provider is None:
|
73
|
+
if os.getenv("OPENAI_API_KEY"):
|
74
|
+
configured_provider = "openai"
|
75
|
+
elif os.getenv("OLLAMA_BASE_URL") or os.getenv("OLLAMA_MODEL"):
|
76
|
+
configured_provider = "ollama"
|
77
|
+
else:
|
78
|
+
configured_provider = None # fallback
|
79
|
+
|
69
80
|
llm_config = LLMConfig(
|
70
|
-
provider=
|
71
|
-
model=llm_data.get(
|
81
|
+
provider=configured_provider,
|
82
|
+
model=llm_data.get(
|
83
|
+
"model",
|
84
|
+
(
|
85
|
+
"gpt-4o-mini"
|
86
|
+
if configured_provider == "openai"
|
87
|
+
else "qwen2.5-coder:3b"
|
88
|
+
),
|
89
|
+
),
|
72
90
|
max_tokens=llm_data.get("max_tokens", 300),
|
73
91
|
temperature=llm_data.get("temperature", 0.0),
|
74
92
|
timeout_seconds=llm_data.get("timeout_seconds", 45),
|
75
93
|
api_key=(
|
76
|
-
os.getenv("OPENAI_API_KEY")
|
77
|
-
if llm_data.get("provider") == "openai"
|
78
|
-
else None
|
94
|
+
os.getenv("OPENAI_API_KEY") if configured_provider == "openai" else None
|
79
95
|
),
|
80
96
|
base_url=(
|
81
97
|
os.getenv("OLLAMA_BASE_URL", "http://localhost:11434")
|
82
|
-
if
|
98
|
+
if configured_provider == "ollama"
|
83
99
|
else None
|
84
100
|
),
|
85
101
|
)
|
@@ -115,6 +131,8 @@ class Config:
|
|
115
131
|
if self.llm.provider == "openai":
|
116
132
|
return self.llm.api_key is not None
|
117
133
|
elif self.llm.provider == "ollama":
|
118
|
-
# for
|
119
|
-
return
|
134
|
+
# for ollama we assume it's available if base_url is set or model is configured
|
135
|
+
return (
|
136
|
+
self.llm.base_url is not None or os.getenv("OLLAMA_MODEL") is not None
|
137
|
+
)
|
120
138
|
return False
|
File without changes
|
File without changes
|
File without changes
|