enhanced-git 1.0.4__py3-none-any.whl → 1.0.6__py3-none-any.whl
This diff represents the content of publicly available package versions that have been released to one of the supported registries. The information contained in this diff is provided for informational purposes only and reflects changes between package versions as they appear in their respective public registries.
- {enhanced_git-1.0.4.dist-info → enhanced_git-1.0.6.dist-info}/METADATA +33 -3
- {enhanced_git-1.0.4.dist-info → enhanced_git-1.0.6.dist-info}/RECORD +7 -7
- gitai/cli.py +66 -0
- gitai/config.py +39 -9
- {enhanced_git-1.0.4.dist-info → enhanced_git-1.0.6.dist-info}/WHEEL +0 -0
- {enhanced_git-1.0.4.dist-info → enhanced_git-1.0.6.dist-info}/entry_points.txt +0 -0
- {enhanced_git-1.0.4.dist-info → enhanced_git-1.0.6.dist-info}/licenses/LICENSE +0 -0
@@ -1,6 +1,6 @@
|
|
1
1
|
Metadata-Version: 2.4
|
2
2
|
Name: enhanced-git
|
3
|
-
Version: 1.0.
|
3
|
+
Version: 1.0.6
|
4
4
|
Summary: Generate Conventional Commit messages and changelog sections using AI
|
5
5
|
Project-URL: Homepage, https://github.com/mxzahid/git-ai
|
6
6
|
Project-URL: Repository, https://github.com/mxzahid/git-ai
|
@@ -168,9 +168,39 @@ git-ai changelog --since v1.0.0 --to main
|
|
168
168
|
- `OLLAMA_BASE_URL`: Ollama server URL (default: http://localhost:11434)
|
169
169
|
- `OLLAMA_MODEL`: Ollama model name (default: qwen2.5-coder:3b)
|
170
170
|
|
171
|
-
|
171
|
+
**Important for pipx users**: pipx creates isolated environments that don't inherit shell environment variables. Use one of these solutions:
|
172
172
|
|
173
|
-
|
173
|
+
```bash
|
174
|
+
# Solution 1: Use the setup command (easiest)
|
175
|
+
git-ai setup --provider ollama --model qwen2.5-coder:3b
|
176
|
+
# This creates ~/.config/gitai/config.toml for global configuration
|
177
|
+
|
178
|
+
# Solution 2: Use .gitai.toml config file (per-project)
|
179
|
+
# Create .gitai.toml in your project root with:
|
180
|
+
[llm]
|
181
|
+
provider = "ollama"
|
182
|
+
model = "qwen2.5-coder:3b"
|
183
|
+
|
184
|
+
# Solution 3: Set variables in your shell profile (.bashrc, .zshrc, etc.)
|
185
|
+
export OLLAMA_BASE_URL="http://localhost:11434"
|
186
|
+
export OLLAMA_MODEL="qwen2.5-coder:3b"
|
187
|
+
# Then restart your terminal
|
188
|
+
|
189
|
+
# Solution 4: Use environment variables inline
|
190
|
+
OLLAMA_BASE_URL="http://localhost:11434" OLLAMA_MODEL="qwen2.5-coder:3b" git-ai commit
|
191
|
+
```
|
192
|
+
|
193
|
+
### Configuration Files
|
194
|
+
|
195
|
+
GitAI supports configuration at multiple levels:
|
196
|
+
|
197
|
+
1. **Global config**: `~/.config/gitai/config.toml` (applies to all repositories)
|
198
|
+
2. **Project config**: `.gitai.toml` in git repository root (overrides global config)
|
199
|
+
|
200
|
+
Create a global configuration easily with:
|
201
|
+
```bash
|
202
|
+
git-ai setup --provider ollama --model qwen2.5-coder:3b
|
203
|
+
```
|
174
204
|
|
175
205
|
**Auto-detection**: GitAI automatically detects your LLM provider based on environment variables (no config file needed!):
|
176
206
|
- If `OPENAI_API_KEY` is set → uses OpenAI
|
@@ -1,8 +1,8 @@
|
|
1
1
|
gitai/__init__.py,sha256=X_3SlMT2EeGvZ9bdsXdjzwd1FFta8HHakgPv6yRq7kU,108
|
2
2
|
gitai/changelog.py,sha256=F2atDczLs-HgoafHOngKC6m2BhlfVYmknHyCIPjjFL4,8724
|
3
|
-
gitai/cli.py,sha256=
|
3
|
+
gitai/cli.py,sha256=fPuoPCkQAHP3Z6qyM16rrj3e3bu1RTJVhvIGilWshUY,6394
|
4
4
|
gitai/commit.py,sha256=vVfMcXDCOSc90ILMm8wMg0PjLiuUarzteyI2I_IT76c,12257
|
5
|
-
gitai/config.py,sha256=
|
5
|
+
gitai/config.py,sha256=aUvqO0dZJqFSba_Y0-BYWlG39hyxqa3rNegxh54HBqM,5032
|
6
6
|
gitai/constants.py,sha256=smipnjD7Y8h11Io1bpnimue-bz21opM74MHxczOq3rQ,3201
|
7
7
|
gitai/diff.py,sha256=Ae3aslHoeVrYDYo1UVnZe5x-z5poFUCM8K9bXRfVyug,5107
|
8
8
|
gitai/hook.py,sha256=U4KF1_uJZuw6AKtsyCcnntJcBOIsHNxZF149DYjgQDk,2527
|
@@ -11,8 +11,8 @@ gitai/providers/__init__.py,sha256=6IFc912-oepXeDGJyE4Ksm3KJLn6CGdYZb8HkUMfvlA,3
|
|
11
11
|
gitai/providers/base.py,sha256=a5b1ZulBnQvVmTlxeUQhixMyFWhwiZKMX1sIeQHHkms,1851
|
12
12
|
gitai/providers/ollama_provider.py,sha256=crRCfQZxJY1S4LaSFdiNT19u2T9WjbhpU8TCxbuo92w,2540
|
13
13
|
gitai/providers/openai_provider.py,sha256=i1lwyCtWoN5APt3UsB4MBS-jOLifDZcUCGj1Ko1CKcs,2444
|
14
|
-
enhanced_git-1.0.
|
15
|
-
enhanced_git-1.0.
|
16
|
-
enhanced_git-1.0.
|
17
|
-
enhanced_git-1.0.
|
18
|
-
enhanced_git-1.0.
|
14
|
+
enhanced_git-1.0.6.dist-info/METADATA,sha256=aCRkf2GDfGzgXBeZu3S26NILKpl3Ak8CnLmSy0yiZGM,11036
|
15
|
+
enhanced_git-1.0.6.dist-info/WHEEL,sha256=qtCwoSJWgHk21S1Kb4ihdzI2rlJ1ZKaIurTj_ngOhyQ,87
|
16
|
+
enhanced_git-1.0.6.dist-info/entry_points.txt,sha256=KzU5dZTYOoumsgMHpgn1XqwcALjUrcb1MCk7iyp9xTI,70
|
17
|
+
enhanced_git-1.0.6.dist-info/licenses/LICENSE,sha256=d11_Oc9IT-MUTvztUzbHPs_CSr9drf-6d1vnIvPiMJc,1075
|
18
|
+
enhanced_git-1.0.6.dist-info/RECORD,,
|
gitai/cli.py
CHANGED
@@ -156,6 +156,72 @@ def changelog(
|
|
156
156
|
exit_with_error(f"Error generating changelog: {e}")
|
157
157
|
|
158
158
|
|
159
|
+
@app.command()
|
160
|
+
def debug() -> None:
|
161
|
+
"""Debug configuration and environment."""
|
162
|
+
import os
|
163
|
+
from .config import Config
|
164
|
+
|
165
|
+
print("=== GitAI Debug Information ===")
|
166
|
+
print(f"OPENAI_API_KEY: {'SET' if os.getenv('OPENAI_API_KEY') else 'NOT SET'}")
|
167
|
+
print(f"OLLAMA_BASE_URL: {os.getenv('OLLAMA_BASE_URL', 'NOT SET')}")
|
168
|
+
print(f"OLLAMA_MODEL: {os.getenv('OLLAMA_MODEL', 'NOT SET')}")
|
169
|
+
|
170
|
+
try:
|
171
|
+
config = Config.load()
|
172
|
+
print(f"\nDetected provider: {config.llm.provider}")
|
173
|
+
print(f"Model: {config.llm.model}")
|
174
|
+
print(f"Base URL: {config.llm.base_url}")
|
175
|
+
print(f"API Key: {'SET' if config.llm.api_key else 'NOT SET'}")
|
176
|
+
print(f"LLM Available: {config.is_llm_available()}")
|
177
|
+
except Exception as e:
|
178
|
+
print(f"Error loading config: {e}")
|
179
|
+
|
180
|
+
|
181
|
+
@app.command()
|
182
|
+
def setup(
|
183
|
+
provider: Annotated[
|
184
|
+
str,
|
185
|
+
typer.Option("--provider", help="LLM provider: openai or ollama"),
|
186
|
+
] = "ollama",
|
187
|
+
model: Annotated[
|
188
|
+
str,
|
189
|
+
typer.Option("--model", help="Model name"),
|
190
|
+
] = "qwen2.5-coder:3b",
|
191
|
+
) -> None:
|
192
|
+
"""Set up user-level GitAI configuration."""
|
193
|
+
from pathlib import Path
|
194
|
+
|
195
|
+
config_dir = Path.home() / ".config" / "gitai"
|
196
|
+
config_file = config_dir / "config.toml"
|
197
|
+
|
198
|
+
# Create config directory if it doesn't exist
|
199
|
+
config_dir.mkdir(parents=True, exist_ok=True)
|
200
|
+
|
201
|
+
# Create config content
|
202
|
+
config_content = f"""# GitAI User Configuration
|
203
|
+
# This file is automatically created by 'git-ai setup'
|
204
|
+
|
205
|
+
[llm]
|
206
|
+
provider = "{provider}"
|
207
|
+
model = "{model}"
|
208
|
+
use_ollama = true
|
209
|
+
|
210
|
+
[commit]
|
211
|
+
style = "conventional"
|
212
|
+
include_body = true
|
213
|
+
"""
|
214
|
+
|
215
|
+
# Write config file
|
216
|
+
config_file.write_text(config_content)
|
217
|
+
|
218
|
+
print_success(f"Created user configuration at {config_file}")
|
219
|
+
print_info("GitAI will now automatically use Ollama for all repositories")
|
220
|
+
print_info(
|
221
|
+
"You can override this by creating a .gitai.toml file in specific projects"
|
222
|
+
)
|
223
|
+
|
224
|
+
|
159
225
|
@app.callback()
|
160
226
|
def main() -> None:
|
161
227
|
"""GitAI - Generate Conventional Commit messages and changelog sections using AI."""
|
gitai/config.py
CHANGED
@@ -1,6 +1,7 @@
|
|
1
1
|
"""Configuration management for GitAI."""
|
2
2
|
|
3
3
|
import os
|
4
|
+
from typing import Any
|
4
5
|
from dataclasses import dataclass
|
5
6
|
from pathlib import Path
|
6
7
|
|
@@ -70,23 +71,38 @@ class Config:
|
|
70
71
|
# auto-detect provider based on available environment variables
|
71
72
|
configured_provider = llm_data.get("provider")
|
72
73
|
if configured_provider is None:
|
73
|
-
|
74
|
-
|
75
|
-
|
74
|
+
# Check environment variables for auto-detection
|
75
|
+
openai_key = os.getenv("OPENAI_API_KEY")
|
76
|
+
ollama_url = os.getenv("OLLAMA_BASE_URL")
|
77
|
+
ollama_model = os.getenv("OLLAMA_MODEL")
|
78
|
+
|
79
|
+
# Also check for user-level config directory
|
80
|
+
home_config = cls._load_user_config()
|
81
|
+
|
82
|
+
if ollama_url or ollama_model or home_config.get("use_ollama"):
|
76
83
|
configured_provider = "ollama"
|
84
|
+
elif openai_key:
|
85
|
+
configured_provider = "openai"
|
77
86
|
else:
|
78
|
-
configured_provider =
|
87
|
+
configured_provider = (
|
88
|
+
"openai" # fallback to openai (will use non-AI mode if no key)
|
89
|
+
)
|
79
90
|
|
80
91
|
llm_config = LLMConfig(
|
81
92
|
provider=configured_provider,
|
82
|
-
model=llm_data.get(
|
93
|
+
model=llm_data.get(
|
94
|
+
"model",
|
95
|
+
(
|
96
|
+
"gpt-4o-mini"
|
97
|
+
if configured_provider == "openai"
|
98
|
+
else "qwen2.5-coder:3b"
|
99
|
+
),
|
100
|
+
),
|
83
101
|
max_tokens=llm_data.get("max_tokens", 300),
|
84
102
|
temperature=llm_data.get("temperature", 0.0),
|
85
103
|
timeout_seconds=llm_data.get("timeout_seconds", 45),
|
86
104
|
api_key=(
|
87
|
-
os.getenv("OPENAI_API_KEY")
|
88
|
-
if configured_provider == "openai"
|
89
|
-
else None
|
105
|
+
os.getenv("OPENAI_API_KEY") if configured_provider == "openai" else None
|
90
106
|
),
|
91
107
|
base_url=(
|
92
108
|
os.getenv("OLLAMA_BASE_URL", "http://localhost:11434")
|
@@ -121,11 +137,25 @@ class Config:
|
|
121
137
|
git_root=git_root,
|
122
138
|
)
|
123
139
|
|
140
|
+
@classmethod
|
141
|
+
def _load_user_config(cls) -> dict[str, Any]:
|
142
|
+
"""Load user-level configuration from ~/.config/gitai/config.toml"""
|
143
|
+
from pathlib import Path
|
144
|
+
|
145
|
+
config_dir = Path.home() / ".config" / "gitai"
|
146
|
+
config_file = config_dir / "config.toml"
|
147
|
+
|
148
|
+
if config_file.exists():
|
149
|
+
return load_toml_config(config_file)
|
150
|
+
return {}
|
151
|
+
|
124
152
|
def is_llm_available(self) -> bool:
|
125
153
|
"""Check if LLM provider is available."""
|
126
154
|
if self.llm.provider == "openai":
|
127
155
|
return self.llm.api_key is not None
|
128
156
|
elif self.llm.provider == "ollama":
|
129
157
|
# for ollama we assume it's available if base_url is set or model is configured
|
130
|
-
return
|
158
|
+
return (
|
159
|
+
self.llm.base_url is not None or os.getenv("OLLAMA_MODEL") is not None
|
160
|
+
)
|
131
161
|
return False
|
File without changes
|
File without changes
|
File without changes
|