gitai-cli 0.1.0__tar.gz
This diff represents the content of publicly available package versions that have been released to one of the supported registries. The information contained in this diff is provided for informational purposes only and reflects changes between package versions as they appear in their respective public registries.
- gitai_cli-0.1.0/.gitignore +2 -0
- gitai_cli-0.1.0/CLAUDE.md +34 -0
- gitai_cli-0.1.0/LICENSE +21 -0
- gitai_cli-0.1.0/PKG-INFO +139 -0
- gitai_cli-0.1.0/README.md +107 -0
- gitai_cli-0.1.0/assets/commit-genie.png +0 -0
- gitai_cli-0.1.0/gitai/__init__.py +1 -0
- gitai_cli-0.1.0/gitai/ai.py +52 -0
- gitai_cli-0.1.0/gitai/cli.py +132 -0
- gitai_cli-0.1.0/gitai/config.py +27 -0
- gitai_cli-0.1.0/gitai/git.py +24 -0
- gitai_cli-0.1.0/gitai/prompt.py +73 -0
- gitai_cli-0.1.0/pyproject.toml +47 -0
- gitai_cli-0.1.0/tests/__init__.py +0 -0
- gitai_cli-0.1.0/tests/test_ai.py +163 -0
- gitai_cli-0.1.0/tests/test_config.py +65 -0
- gitai_cli-0.1.0/tests/test_git.py +81 -0
- gitai_cli-0.1.0/tests/test_prompt.py +97 -0
|
@@ -0,0 +1,34 @@
|
|
|
1
|
+
# CLAUDE.md
|
|
2
|
+
|
|
3
|
+
This file provides guidance to Claude Code (claude.ai/code) when working with code in this repository.
|
|
4
|
+
|
|
5
|
+
## Commands
|
|
6
|
+
|
|
7
|
+
```bash
|
|
8
|
+
# Install for development
|
|
9
|
+
pip install -e .
|
|
10
|
+
|
|
11
|
+
# Run the CLI
|
|
12
|
+
gitai commit
|
|
13
|
+
gitai config
|
|
14
|
+
|
|
15
|
+
# No test suite exists yet
|
|
16
|
+
```
|
|
17
|
+
|
|
18
|
+
## Architecture
|
|
19
|
+
|
|
20
|
+
Python CLI tool (Typer + Rich) that generates AI-powered git commit messages using local LLMs. Entry point: `gitai/cli.py`, registered as the `gitai` console script.
|
|
21
|
+
|
|
22
|
+
Flow for `gitai commit`:
|
|
23
|
+
|
|
24
|
+
1. **`git.py`** — Runs `git diff --cached` via subprocess; `is_diff_meaningful()` filters out whitespace-only diffs by checking for non-trivial `+`/`-` lines
|
|
25
|
+
2. **`prompt.py`** — Builds the prompt: instructs the LLM to return exactly 3 numbered Conventional Commits suggestions, nothing else
|
|
26
|
+
3. **`ai.py`** — POSTs to `{ollama_url}/api/generate` (non-streaming); parses numbered lines from response by detecting lines starting with a digit followed by `.`
|
|
27
|
+
4. **`cli.py`** — Strips the `"1. "` prefix from suggestions, presents them via `questionary.select`, then runs `git commit -m` via subprocess
|
|
28
|
+
|
|
29
|
+
**Config** (`config.py`): `~/.gitai.toml`, merged over defaults at load time. Defaults: `model=llama3.2`, `provider=ollama`, `ollama_url=http://localhost:11434`, `commit_style=conventional`, `emoji=false`.
|
|
30
|
+
|
|
31
|
+
**Known gaps to be aware of:**
|
|
32
|
+
- `provider` and `commit_style` config keys are stored but not yet used — `ai.py` hardcodes the Ollama HTTP API regardless of `provider`
|
|
33
|
+
- `emoji` config is stored but not yet applied to suggestions
|
|
34
|
+
- `litellm` is installed and is the intended path for multi-provider support (OpenAI, Anthropic, Gemini), but is not yet wired up
|
gitai_cli-0.1.0/LICENSE
ADDED
|
@@ -0,0 +1,21 @@
|
|
|
1
|
+
MIT License
|
|
2
|
+
|
|
3
|
+
Copyright (c) 2024 Jeranguz
|
|
4
|
+
|
|
5
|
+
Permission is hereby granted, free of charge, to any person obtaining a copy
|
|
6
|
+
of this software and associated documentation files (the "Software"), to deal
|
|
7
|
+
in the Software without restriction, including without limitation the rights
|
|
8
|
+
to use, copy, modify, merge, publish, distribute, sublicense, and/or sell
|
|
9
|
+
copies of the Software, and to permit persons to whom the Software is
|
|
10
|
+
furnished to do so, subject to the following conditions:
|
|
11
|
+
|
|
12
|
+
The above copyright notice and this permission notice shall be included in all
|
|
13
|
+
copies or substantial portions of the Software.
|
|
14
|
+
|
|
15
|
+
THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
|
|
16
|
+
IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
|
|
17
|
+
FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE
|
|
18
|
+
AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER
|
|
19
|
+
LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM,
|
|
20
|
+
OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE
|
|
21
|
+
SOFTWARE.
|
gitai_cli-0.1.0/PKG-INFO
ADDED
|
@@ -0,0 +1,139 @@
|
|
|
1
|
+
Metadata-Version: 2.4
|
|
2
|
+
Name: gitai-cli
|
|
3
|
+
Version: 0.1.0
|
|
4
|
+
Summary: AI-powered git commit message generator
|
|
5
|
+
Project-URL: Homepage, https://github.com/Jeranguz/gitai
|
|
6
|
+
Project-URL: Repository, https://github.com/Jeranguz/gitai
|
|
7
|
+
Project-URL: Issues, https://github.com/Jeranguz/gitai/issues
|
|
8
|
+
Author: Jeranguz
|
|
9
|
+
License: MIT
|
|
10
|
+
License-File: LICENSE
|
|
11
|
+
Keywords: ai,cli,commit,conventional-commits,git,llm
|
|
12
|
+
Classifier: Development Status :: 3 - Alpha
|
|
13
|
+
Classifier: Environment :: Console
|
|
14
|
+
Classifier: Intended Audience :: Developers
|
|
15
|
+
Classifier: License :: OSI Approved :: MIT License
|
|
16
|
+
Classifier: Operating System :: OS Independent
|
|
17
|
+
Classifier: Programming Language :: Python :: 3
|
|
18
|
+
Classifier: Programming Language :: Python :: 3.11
|
|
19
|
+
Classifier: Programming Language :: Python :: 3.12
|
|
20
|
+
Classifier: Topic :: Software Development :: Version Control :: Git
|
|
21
|
+
Classifier: Topic :: Utilities
|
|
22
|
+
Requires-Python: >=3.11
|
|
23
|
+
Requires-Dist: gitpython
|
|
24
|
+
Requires-Dist: litellm
|
|
25
|
+
Requires-Dist: questionary
|
|
26
|
+
Requires-Dist: rich
|
|
27
|
+
Requires-Dist: tomli-w
|
|
28
|
+
Requires-Dist: typer
|
|
29
|
+
Provides-Extra: dev
|
|
30
|
+
Requires-Dist: pytest; extra == 'dev'
|
|
31
|
+
Description-Content-Type: text/markdown
|
|
32
|
+
|
|
33
|
+
# gitai
|
|
34
|
+
|
|
35
|
+
<p align="center">
|
|
36
|
+
<img src="assets/commit-genie.png" alt="The Commit Genie" width="200"/>
|
|
37
|
+
</p>
|
|
38
|
+
|
|
39
|
+
<p align="center">
|
|
40
|
+
<a href="https://pypi.org/project/gitai"><img src="https://img.shields.io/pypi/v/gitai" alt="PyPI version"/></a>
|
|
41
|
+
<a href="https://pypi.org/project/gitai"><img src="https://img.shields.io/pypi/pyversions/gitai" alt="Python versions"/></a>
|
|
42
|
+
<a href="LICENSE"><img src="https://img.shields.io/badge/license-MIT-blue" alt="MIT license"/></a>
|
|
43
|
+
</p>
|
|
44
|
+
|
|
45
|
+
AI-powered git commit message generator. Analyzes your staged changes and suggests meaningful commit messages — using any LLM you already have access to.
|
|
46
|
+
|
|
47
|
+
## Features
|
|
48
|
+
|
|
49
|
+
- Reads your staged `git diff` and generates 3 commit message suggestions
|
|
50
|
+
- Interactive selection: pick a suggestion or write your own
|
|
51
|
+
- Supports multiple providers: Ollama (local), OpenAI, Anthropic, Gemini, and [more](https://docs.litellm.ai/docs/providers)
|
|
52
|
+
- Two commit styles: [Conventional Commits](https://www.conventionalcommits.org/) or free-form
|
|
53
|
+
- Optional emoji (gitmoji) support
|
|
54
|
+
|
|
55
|
+
## Installation
|
|
56
|
+
|
|
57
|
+
```bash
|
|
58
|
+
pip install gitai
|
|
59
|
+
```
|
|
60
|
+
|
|
61
|
+
Requires Python 3.11+.
|
|
62
|
+
|
|
63
|
+
## Quick start
|
|
64
|
+
|
|
65
|
+
```bash
|
|
66
|
+
# 1. Stage your changes
|
|
67
|
+
git add .
|
|
68
|
+
|
|
69
|
+
# 2. Run gitai
|
|
70
|
+
gitai commit
|
|
71
|
+
```
|
|
72
|
+
|
|
73
|
+
gitai reads the diff, calls your configured LLM, and presents 3 suggestions to choose from.
|
|
74
|
+
|
|
75
|
+
## Usage
|
|
76
|
+
|
|
77
|
+
```
|
|
78
|
+
gitai commit Generate commit message suggestions for staged changes
|
|
79
|
+
gitai config View and update settings
|
|
80
|
+
gitai --version Show version
|
|
81
|
+
gitai --help Show help
|
|
82
|
+
```
|
|
83
|
+
|
|
84
|
+
## Configuration
|
|
85
|
+
|
|
86
|
+
Run `gitai config` to update settings interactively. Settings are stored in `~/.gitai.toml`.
|
|
87
|
+
|
|
88
|
+
| Key | Default | Description |
|
|
89
|
+
|---|---|---|
|
|
90
|
+
| `provider` | `ollama` | LLM provider |
|
|
91
|
+
| `model` | `llama3.2` | Model name |
|
|
92
|
+
| `ollama_url` | `http://localhost:11434` | Ollama API base URL (Ollama only) |
|
|
93
|
+
| `commit_style` | `conventional` | `conventional` or `free-form` |
|
|
94
|
+
| `emoji` | `false` | Prefix suggestions with gitmoji |
|
|
95
|
+
|
|
96
|
+
### Supported providers
|
|
97
|
+
|
|
98
|
+
| Provider | `provider` value | Example `model` value | API key env var |
|
|
99
|
+
|---|---|---|---|
|
|
100
|
+
| Ollama (local) | `ollama` | `llama3.2`, `mistral` | — |
|
|
101
|
+
| Anthropic | `anthropic` | `claude-sonnet-4-6`, `claude-haiku-4-5-20251001` | `ANTHROPIC_API_KEY` |
|
|
102
|
+
| OpenAI | `openai` | `gpt-4o`, `gpt-4o-mini` | `OPENAI_API_KEY` |
|
|
103
|
+
| Gemini | `gemini` | `gemini-2.0-flash` | `GEMINI_API_KEY` |
|
|
104
|
+
|
|
105
|
+
For cloud providers, set the API key in your shell profile:
|
|
106
|
+
|
|
107
|
+
**bash/zsh** (`~/.bashrc` or `~/.zshrc`):
|
|
108
|
+
```bash
|
|
109
|
+
export ANTHROPIC_API_KEY=sk-ant-...
|
|
110
|
+
```
|
|
111
|
+
|
|
112
|
+
**PowerShell** (`$PROFILE`):
|
|
113
|
+
```powershell
|
|
114
|
+
$env:ANTHROPIC_API_KEY="sk-ant-..."
|
|
115
|
+
```
|
|
116
|
+
|
|
117
|
+
### Example `~/.gitai.toml`
|
|
118
|
+
|
|
119
|
+
```toml
|
|
120
|
+
provider = "anthropic"
|
|
121
|
+
model = "claude-haiku-4-5-20251001"
|
|
122
|
+
commit_style = "conventional"
|
|
123
|
+
emoji = false
|
|
124
|
+
ollama_url = "http://localhost:11434"
|
|
125
|
+
```
|
|
126
|
+
|
|
127
|
+
## Local setup (Ollama)
|
|
128
|
+
|
|
129
|
+
If you want to run fully offline with Ollama:
|
|
130
|
+
|
|
131
|
+
1. Install [Ollama](https://ollama.com/)
|
|
132
|
+
2. Pull a model: `ollama pull llama3.2`
|
|
133
|
+
3. Run `gitai commit` — no API key needed
|
|
134
|
+
|
|
135
|
+
## TODO
|
|
136
|
+
|
|
137
|
+
- [ ] Allow configuring the number of suggestions generated
|
|
138
|
+
- [ ] Add `--push` flag to commit and push in one step
|
|
139
|
+
- [ ] Support unstaged changes with an optional `--all` flag
|
|
@@ -0,0 +1,107 @@
|
|
|
1
|
+
# gitai
|
|
2
|
+
|
|
3
|
+
<p align="center">
|
|
4
|
+
<img src="assets/commit-genie.png" alt="The Commit Genie" width="200"/>
|
|
5
|
+
</p>
|
|
6
|
+
|
|
7
|
+
<p align="center">
|
|
8
|
+
<a href="https://pypi.org/project/gitai"><img src="https://img.shields.io/pypi/v/gitai" alt="PyPI version"/></a>
|
|
9
|
+
<a href="https://pypi.org/project/gitai"><img src="https://img.shields.io/pypi/pyversions/gitai" alt="Python versions"/></a>
|
|
10
|
+
<a href="LICENSE"><img src="https://img.shields.io/badge/license-MIT-blue" alt="MIT license"/></a>
|
|
11
|
+
</p>
|
|
12
|
+
|
|
13
|
+
AI-powered git commit message generator. Analyzes your staged changes and suggests meaningful commit messages — using any LLM you already have access to.
|
|
14
|
+
|
|
15
|
+
## Features
|
|
16
|
+
|
|
17
|
+
- Reads your staged `git diff` and generates 3 commit message suggestions
|
|
18
|
+
- Interactive selection: pick a suggestion or write your own
|
|
19
|
+
- Supports multiple providers: Ollama (local), OpenAI, Anthropic, Gemini, and [more](https://docs.litellm.ai/docs/providers)
|
|
20
|
+
- Two commit styles: [Conventional Commits](https://www.conventionalcommits.org/) or free-form
|
|
21
|
+
- Optional emoji (gitmoji) support
|
|
22
|
+
|
|
23
|
+
## Installation
|
|
24
|
+
|
|
25
|
+
```bash
|
|
26
|
+
pip install gitai
|
|
27
|
+
```
|
|
28
|
+
|
|
29
|
+
Requires Python 3.11+.
|
|
30
|
+
|
|
31
|
+
## Quick start
|
|
32
|
+
|
|
33
|
+
```bash
|
|
34
|
+
# 1. Stage your changes
|
|
35
|
+
git add .
|
|
36
|
+
|
|
37
|
+
# 2. Run gitai
|
|
38
|
+
gitai commit
|
|
39
|
+
```
|
|
40
|
+
|
|
41
|
+
gitai reads the diff, calls your configured LLM, and presents 3 suggestions to choose from.
|
|
42
|
+
|
|
43
|
+
## Usage
|
|
44
|
+
|
|
45
|
+
```
|
|
46
|
+
gitai commit Generate commit message suggestions for staged changes
|
|
47
|
+
gitai config View and update settings
|
|
48
|
+
gitai --version Show version
|
|
49
|
+
gitai --help Show help
|
|
50
|
+
```
|
|
51
|
+
|
|
52
|
+
## Configuration
|
|
53
|
+
|
|
54
|
+
Run `gitai config` to update settings interactively. Settings are stored in `~/.gitai.toml`.
|
|
55
|
+
|
|
56
|
+
| Key | Default | Description |
|
|
57
|
+
|---|---|---|
|
|
58
|
+
| `provider` | `ollama` | LLM provider |
|
|
59
|
+
| `model` | `llama3.2` | Model name |
|
|
60
|
+
| `ollama_url` | `http://localhost:11434` | Ollama API base URL (Ollama only) |
|
|
61
|
+
| `commit_style` | `conventional` | `conventional` or `free-form` |
|
|
62
|
+
| `emoji` | `false` | Prefix suggestions with gitmoji |
|
|
63
|
+
|
|
64
|
+
### Supported providers
|
|
65
|
+
|
|
66
|
+
| Provider | `provider` value | Example `model` value | API key env var |
|
|
67
|
+
|---|---|---|---|
|
|
68
|
+
| Ollama (local) | `ollama` | `llama3.2`, `mistral` | — |
|
|
69
|
+
| Anthropic | `anthropic` | `claude-sonnet-4-6`, `claude-haiku-4-5-20251001` | `ANTHROPIC_API_KEY` |
|
|
70
|
+
| OpenAI | `openai` | `gpt-4o`, `gpt-4o-mini` | `OPENAI_API_KEY` |
|
|
71
|
+
| Gemini | `gemini` | `gemini-2.0-flash` | `GEMINI_API_KEY` |
|
|
72
|
+
|
|
73
|
+
For cloud providers, set the API key in your shell profile:
|
|
74
|
+
|
|
75
|
+
**bash/zsh** (`~/.bashrc` or `~/.zshrc`):
|
|
76
|
+
```bash
|
|
77
|
+
export ANTHROPIC_API_KEY=sk-ant-...
|
|
78
|
+
```
|
|
79
|
+
|
|
80
|
+
**PowerShell** (`$PROFILE`):
|
|
81
|
+
```powershell
|
|
82
|
+
$env:ANTHROPIC_API_KEY="sk-ant-..."
|
|
83
|
+
```
|
|
84
|
+
|
|
85
|
+
### Example `~/.gitai.toml`
|
|
86
|
+
|
|
87
|
+
```toml
|
|
88
|
+
provider = "anthropic"
|
|
89
|
+
model = "claude-haiku-4-5-20251001"
|
|
90
|
+
commit_style = "conventional"
|
|
91
|
+
emoji = false
|
|
92
|
+
ollama_url = "http://localhost:11434"
|
|
93
|
+
```
|
|
94
|
+
|
|
95
|
+
## Local setup (Ollama)
|
|
96
|
+
|
|
97
|
+
If you want to run fully offline with Ollama:
|
|
98
|
+
|
|
99
|
+
1. Install [Ollama](https://ollama.com/)
|
|
100
|
+
2. Pull a model: `ollama pull llama3.2`
|
|
101
|
+
3. Run `gitai commit` — no API key needed
|
|
102
|
+
|
|
103
|
+
## TODO
|
|
104
|
+
|
|
105
|
+
- [ ] Allow configuring the number of suggestions generated
|
|
106
|
+
- [ ] Add `--push` flag to commit and push in one step
|
|
107
|
+
- [ ] Support unstaged changes with an optional `--all` flag
|
|
Binary file
|
|
@@ -0,0 +1 @@
|
|
|
1
|
+
__version__ = "0.1.0"
|
|
@@ -0,0 +1,52 @@
|
|
|
1
|
+
import litellm
|
|
2
|
+
from litellm.exceptions import APIConnectionError, AuthenticationError
|
|
3
|
+
from gitai.config import load_config
|
|
4
|
+
|
|
5
|
+
def get_commit_suggestions(prompt: str) -> list[str]:
|
|
6
|
+
config = load_config()
|
|
7
|
+
|
|
8
|
+
provider = config["provider"].lower()
|
|
9
|
+
model = config["model"]
|
|
10
|
+
model_string = f"{provider}/{model}"
|
|
11
|
+
|
|
12
|
+
kwargs = {
|
|
13
|
+
"model": model_string,
|
|
14
|
+
"messages": [{"role": "user", "content": prompt}],
|
|
15
|
+
}
|
|
16
|
+
|
|
17
|
+
if provider == "ollama":
|
|
18
|
+
kwargs["api_base"] = config["ollama_url"]
|
|
19
|
+
|
|
20
|
+
try:
|
|
21
|
+
response = litellm.completion(**kwargs)
|
|
22
|
+
except APIConnectionError:
|
|
23
|
+
if provider == "ollama":
|
|
24
|
+
raise SystemExit(
|
|
25
|
+
f"[gitai] Could not connect to Ollama at {config['ollama_url']}. "
|
|
26
|
+
"Is Ollama running? Try: ollama serve"
|
|
27
|
+
)
|
|
28
|
+
raise SystemExit(
|
|
29
|
+
f"[gitai] Could not connect to the {provider} API. "
|
|
30
|
+
"Check your network connection and try again."
|
|
31
|
+
)
|
|
32
|
+
except AuthenticationError:
|
|
33
|
+
key_map = {
|
|
34
|
+
"openai": "OPENAI_API_KEY",
|
|
35
|
+
"anthropic": "ANTHROPIC_API_KEY",
|
|
36
|
+
"gemini": "GEMINI_API_KEY",
|
|
37
|
+
}
|
|
38
|
+
env_var = key_map.get(provider, f"{provider.upper()}_API_KEY")
|
|
39
|
+
raise SystemExit(
|
|
40
|
+
f"[gitai] Authentication failed for {provider}. "
|
|
41
|
+
f"Set the {env_var} environment variable and try again."
|
|
42
|
+
)
|
|
43
|
+
|
|
44
|
+
text = response.choices[0].message.content.strip()
|
|
45
|
+
|
|
46
|
+
suggestions = []
|
|
47
|
+
for line in text.splitlines():
|
|
48
|
+
line = line.strip()
|
|
49
|
+
if line and line[0].isdigit() and "." in line:
|
|
50
|
+
suggestions.append(line)
|
|
51
|
+
|
|
52
|
+
return suggestions
|
|
@@ -0,0 +1,132 @@
|
|
|
1
|
+
from typing import Optional
|
|
2
|
+
import typer
|
|
3
|
+
import questionary
|
|
4
|
+
import subprocess
|
|
5
|
+
from gitai.config import load_config, save_config
|
|
6
|
+
from gitai.git import get_staged_diff, get_repo_name, is_diff_meaningful
|
|
7
|
+
from gitai.prompt import build_commit_prompt
|
|
8
|
+
from gitai.ai import get_commit_suggestions
|
|
9
|
+
from gitai import __version__
|
|
10
|
+
|
|
11
|
+
VALID_PROVIDERS = {"ollama", "openai", "anthropic", "gemini"}
|
|
12
|
+
VALID_COMMIT_STYLES = {"conventional", "free-form"}
|
|
13
|
+
|
|
14
|
+
app = typer.Typer()
|
|
15
|
+
|
|
16
|
+
|
|
17
|
+
def _version_callback(value: bool):
|
|
18
|
+
if value:
|
|
19
|
+
typer.echo(f"gitai {__version__}")
|
|
20
|
+
raise typer.Exit()
|
|
21
|
+
|
|
22
|
+
|
|
23
|
+
@app.callback()
|
|
24
|
+
def main(
|
|
25
|
+
version: Optional[bool] = typer.Option(
|
|
26
|
+
None, "--version", "-V",
|
|
27
|
+
callback=_version_callback,
|
|
28
|
+
is_eager=True,
|
|
29
|
+
help="Show version and exit.",
|
|
30
|
+
),
|
|
31
|
+
):
|
|
32
|
+
"""AI-powered git commit message generator."""
|
|
33
|
+
|
|
34
|
+
|
|
35
|
+
@app.command()
|
|
36
|
+
def commit():
|
|
37
|
+
"""Generate AI-powered commit message suggestions for staged changes."""
|
|
38
|
+
typer.echo("🔍 Reading your git diff...")
|
|
39
|
+
|
|
40
|
+
diff = get_staged_diff()
|
|
41
|
+
if not diff:
|
|
42
|
+
typer.echo("No staged changes found. Please stage your changes before committing.")
|
|
43
|
+
raise typer.Exit(code=1)
|
|
44
|
+
|
|
45
|
+
if not is_diff_meaningful(diff):
|
|
46
|
+
typer.echo("Staged file appears to be empty or has no content changes.")
|
|
47
|
+
chosen = typer.prompt("Enter your commit message")
|
|
48
|
+
subprocess.run(["git", "commit", "-m", chosen])
|
|
49
|
+
typer.echo(f"\n Committed: {chosen}")
|
|
50
|
+
raise typer.Exit()
|
|
51
|
+
|
|
52
|
+
config = load_config()
|
|
53
|
+
repo_name = get_repo_name()
|
|
54
|
+
prompt = build_commit_prompt(diff, repo_name, emoji=config["emoji"], commit_style=config["commit_style"])
|
|
55
|
+
|
|
56
|
+
typer.echo("Generating commit message suggestions...")
|
|
57
|
+
suggestions = get_commit_suggestions(prompt)
|
|
58
|
+
|
|
59
|
+
if not suggestions:
|
|
60
|
+
typer.echo("No suggestions generated. Please try again.")
|
|
61
|
+
raise typer.Exit()
|
|
62
|
+
|
|
63
|
+
clean = [s.split(". ", 1)[1] if ". " in s else s for s in suggestions]
|
|
64
|
+
clean.append("Write my own")
|
|
65
|
+
|
|
66
|
+
chosen = questionary.select(
|
|
67
|
+
"Choose a commit message:",
|
|
68
|
+
choices=clean
|
|
69
|
+
).ask()
|
|
70
|
+
|
|
71
|
+
if chosen is None:
|
|
72
|
+
typer.echo("Aborted.")
|
|
73
|
+
raise typer.Exit()
|
|
74
|
+
|
|
75
|
+
if chosen == "Write my own":
|
|
76
|
+
chosen = typer.prompt("Enter your custom commit message")
|
|
77
|
+
|
|
78
|
+
subprocess.run(["git", "commit", "-m", chosen])
|
|
79
|
+
typer.echo(f"\n Committed: {chosen}")
|
|
80
|
+
|
|
81
|
+
|
|
82
|
+
@app.command()
|
|
83
|
+
def config():
|
|
84
|
+
"""View and update gitai settings."""
|
|
85
|
+
current = load_config()
|
|
86
|
+
|
|
87
|
+
typer.echo("Current configuration:\n")
|
|
88
|
+
for key, value in current.items():
|
|
89
|
+
typer.echo(f" {key}: {value}")
|
|
90
|
+
|
|
91
|
+
typer.echo("")
|
|
92
|
+
if not typer.confirm("Do you want to change any settings?"):
|
|
93
|
+
raise typer.Exit()
|
|
94
|
+
|
|
95
|
+
provider = typer.prompt(
|
|
96
|
+
"Provider (ollama, openai, anthropic, gemini)",
|
|
97
|
+
default=current["provider"],
|
|
98
|
+
)
|
|
99
|
+
if provider not in VALID_PROVIDERS:
|
|
100
|
+
typer.echo(f"[gitai] Unknown provider '{provider}'. Choose from: {', '.join(sorted(VALID_PROVIDERS))}")
|
|
101
|
+
raise typer.Exit(code=1)
|
|
102
|
+
|
|
103
|
+
model = typer.prompt("Model name", default=current["model"])
|
|
104
|
+
|
|
105
|
+
ollama_url = current["ollama_url"]
|
|
106
|
+
if provider == "ollama":
|
|
107
|
+
ollama_url = typer.prompt("Ollama URL", default=current["ollama_url"])
|
|
108
|
+
|
|
109
|
+
commit_style = typer.prompt(
|
|
110
|
+
"Commit style (conventional, free-form)",
|
|
111
|
+
default=current["commit_style"],
|
|
112
|
+
)
|
|
113
|
+
if commit_style not in VALID_COMMIT_STYLES:
|
|
114
|
+
typer.echo(f"[gitai] Unknown commit style '{commit_style}'. Choose from: {', '.join(sorted(VALID_COMMIT_STYLES))}")
|
|
115
|
+
raise typer.Exit(code=1)
|
|
116
|
+
|
|
117
|
+
emoji = typer.confirm("Use emojis in commit messages?", default=current["emoji"])
|
|
118
|
+
|
|
119
|
+
new_config = {
|
|
120
|
+
"model": model,
|
|
121
|
+
"provider": provider,
|
|
122
|
+
"ollama_url": ollama_url,
|
|
123
|
+
"commit_style": commit_style,
|
|
124
|
+
"emoji": emoji,
|
|
125
|
+
}
|
|
126
|
+
|
|
127
|
+
save_config(new_config)
|
|
128
|
+
typer.echo("\n✅ Config saved to ~/.gitai.toml")
|
|
129
|
+
|
|
130
|
+
|
|
131
|
+
if __name__ == "__main__":
|
|
132
|
+
app()
|
|
@@ -0,0 +1,27 @@
|
|
|
1
|
+
import tomllib
|
|
2
|
+
import tomli_w
|
|
3
|
+
from pathlib import Path
|
|
4
|
+
|
|
5
|
+
CONFIG_PATH = Path.home() / ".gitai.toml"
|
|
6
|
+
|
|
7
|
+
DEFAULT_CONFIG = {
|
|
8
|
+
"model": "llama3.2",
|
|
9
|
+
"provider": "ollama",
|
|
10
|
+
"ollama_url": "http://localhost:11434",
|
|
11
|
+
"commit_style": "conventional",
|
|
12
|
+
"emoji": False,
|
|
13
|
+
}
|
|
14
|
+
|
|
15
|
+
def load_config() -> dict:
|
|
16
|
+
if not CONFIG_PATH.exists():
|
|
17
|
+
save_config(DEFAULT_CONFIG)
|
|
18
|
+
return DEFAULT_CONFIG.copy()
|
|
19
|
+
|
|
20
|
+
with open(CONFIG_PATH, "rb") as f:
|
|
21
|
+
user_config = tomllib.load(f)
|
|
22
|
+
|
|
23
|
+
return {**DEFAULT_CONFIG, **user_config}
|
|
24
|
+
|
|
25
|
+
def save_config(config: dict) -> None:
|
|
26
|
+
with open(CONFIG_PATH, "wb") as f:
|
|
27
|
+
tomli_w.dump(config, f)
|
|
@@ -0,0 +1,24 @@
|
|
|
1
|
+
import subprocess
|
|
2
|
+
from pathlib import Path
|
|
3
|
+
|
|
4
|
+
def get_staged_diff() -> str:
|
|
5
|
+
result = subprocess.run(
|
|
6
|
+
["git", "diff", "--cached"],
|
|
7
|
+
capture_output=True,
|
|
8
|
+
text=True,
|
|
9
|
+
encoding="utf-8",
|
|
10
|
+
)
|
|
11
|
+
return result.stdout
|
|
12
|
+
|
|
13
|
+
def get_repo_name() -> str:
|
|
14
|
+
path = Path.cwd()
|
|
15
|
+
return path.name
|
|
16
|
+
|
|
17
|
+
def is_diff_meaningful(diff: str) -> bool:
|
|
18
|
+
meaningful_lines = [
|
|
19
|
+
line for line in diff.splitlines()
|
|
20
|
+
if line.startswith(("+", "-"))
|
|
21
|
+
and not line.startswith(("+++", "---"))
|
|
22
|
+
and line.strip() not in ("+", "-", "")
|
|
23
|
+
]
|
|
24
|
+
return len(meaningful_lines) > 0
|
|
@@ -0,0 +1,73 @@
|
|
|
1
|
+
def build_commit_prompt(diff: str, repo_name: str, emoji: bool = False, commit_style: str = "conventional") -> str:
|
|
2
|
+
format_rules = _build_format_rules(commit_style, emoji)
|
|
3
|
+
example = _build_example(commit_style, emoji)
|
|
4
|
+
|
|
5
|
+
return f"""You are an expert developer generating git commit messages.
|
|
6
|
+
|
|
7
|
+
Repository: {repo_name}
|
|
8
|
+
|
|
9
|
+
Analyze the git diff below and return exactly 3 commit message suggestions.
|
|
10
|
+
|
|
11
|
+
Rules:
|
|
12
|
+
{format_rules}
|
|
13
|
+
- Each suggestion must offer a meaningfully different angle — vary the type, scope, or emphasis
|
|
14
|
+
- Base suggestions strictly on what you see in the diff — never invent context
|
|
15
|
+
- Output: a numbered list of exactly 3 lines, no intro, no explanation, nothing else
|
|
16
|
+
|
|
17
|
+
Example output:
|
|
18
|
+
{example}
|
|
19
|
+
|
|
20
|
+
Git diff:
|
|
21
|
+
{diff}
|
|
22
|
+
"""
|
|
23
|
+
|
|
24
|
+
|
|
25
|
+
def _build_format_rules(commit_style: str, emoji: bool) -> str:
|
|
26
|
+
if commit_style == "free-form":
|
|
27
|
+
rules = (
|
|
28
|
+
"- Format: a single clear imperative sentence, no type prefix required\n"
|
|
29
|
+
"- Keep the message under 72 characters\n"
|
|
30
|
+
'- Imperative mood ("add" not "added"), lowercase, no trailing period'
|
|
31
|
+
)
|
|
32
|
+
if emoji:
|
|
33
|
+
rules += "\n- Prefix each message with a relevant emoji that reflects the nature of the change"
|
|
34
|
+
else: # conventional (default)
|
|
35
|
+
rules = (
|
|
36
|
+
"- Format: type(scope): description\n"
|
|
37
|
+
"- Types: feat, fix, refactor, chore, docs, style, test, perf, ci, build\n"
|
|
38
|
+
"- Scope: infer from the file paths or module names changed; omit if unclear\n"
|
|
39
|
+
'- Description: imperative mood ("add" not "added"), lowercase, no trailing period, under 72 characters'
|
|
40
|
+
)
|
|
41
|
+
if emoji:
|
|
42
|
+
rules += (
|
|
43
|
+
"\n- Prefix each message with the matching gitmoji before the type: "
|
|
44
|
+
"✨ feat, 🐛 fix, ♻️ refactor, 🔧 chore, 📝 docs, 🎨 style, ✅ test, ⚡ perf, 👷 ci, 📦 build"
|
|
45
|
+
)
|
|
46
|
+
return rules
|
|
47
|
+
|
|
48
|
+
|
|
49
|
+
def _build_example(commit_style: str, emoji: bool) -> str:
|
|
50
|
+
if commit_style == "free-form":
|
|
51
|
+
if emoji:
|
|
52
|
+
return (
|
|
53
|
+
"1. ✨ add refresh token rotation on session expiry\n"
|
|
54
|
+
"2. 🐛 prevent reuse of invalidated tokens\n"
|
|
55
|
+
"3. ♻️ extract token validation into dedicated service"
|
|
56
|
+
)
|
|
57
|
+
return (
|
|
58
|
+
"1. add refresh token rotation on session expiry\n"
|
|
59
|
+
"2. prevent reuse of invalidated tokens\n"
|
|
60
|
+
"3. extract token validation into dedicated service"
|
|
61
|
+
)
|
|
62
|
+
else: # conventional
|
|
63
|
+
if emoji:
|
|
64
|
+
return (
|
|
65
|
+
"1. ✨ feat(auth): add refresh token rotation on expiry\n"
|
|
66
|
+
"2. 🐛 fix(auth): prevent reuse of invalidated tokens\n"
|
|
67
|
+
"3. ♻️ refactor(auth): extract token validation into dedicated service"
|
|
68
|
+
)
|
|
69
|
+
return (
|
|
70
|
+
"1. feat(auth): add refresh token rotation on expiry\n"
|
|
71
|
+
"2. fix(auth): prevent reuse of invalidated tokens\n"
|
|
72
|
+
"3. refactor(auth): extract token validation into dedicated service"
|
|
73
|
+
)
|
|
@@ -0,0 +1,47 @@
|
|
|
1
|
+
[project]
|
|
2
|
+
name = "gitai-cli"
|
|
3
|
+
version = "0.1.0"
|
|
4
|
+
description = "AI-powered git commit message generator"
|
|
5
|
+
readme = "README.md"
|
|
6
|
+
license = { text = "MIT" }
|
|
7
|
+
authors = [{ name = "Jeranguz" }]
|
|
8
|
+
requires-python = ">=3.11"
|
|
9
|
+
keywords = ["git", "ai", "commit", "cli", "llm", "conventional-commits"]
|
|
10
|
+
classifiers = [
|
|
11
|
+
"Development Status :: 3 - Alpha",
|
|
12
|
+
"Environment :: Console",
|
|
13
|
+
"Intended Audience :: Developers",
|
|
14
|
+
"License :: OSI Approved :: MIT License",
|
|
15
|
+
"Operating System :: OS Independent",
|
|
16
|
+
"Programming Language :: Python :: 3",
|
|
17
|
+
"Programming Language :: Python :: 3.11",
|
|
18
|
+
"Programming Language :: Python :: 3.12",
|
|
19
|
+
"Topic :: Software Development :: Version Control :: Git",
|
|
20
|
+
"Topic :: Utilities",
|
|
21
|
+
]
|
|
22
|
+
dependencies = [
|
|
23
|
+
"typer",
|
|
24
|
+
"rich",
|
|
25
|
+
"gitpython",
|
|
26
|
+
"litellm",
|
|
27
|
+
"questionary",
|
|
28
|
+
"tomli-w",
|
|
29
|
+
]
|
|
30
|
+
|
|
31
|
+
[project.optional-dependencies]
|
|
32
|
+
dev = ["pytest"]
|
|
33
|
+
|
|
34
|
+
[project.urls]
|
|
35
|
+
Homepage = "https://github.com/Jeranguz/gitai"
|
|
36
|
+
Repository = "https://github.com/Jeranguz/gitai"
|
|
37
|
+
Issues = "https://github.com/Jeranguz/gitai/issues"
|
|
38
|
+
|
|
39
|
+
[project.scripts]
|
|
40
|
+
gitai = "gitai.cli:app"
|
|
41
|
+
|
|
42
|
+
[tool.hatch.build.targets.wheel]
|
|
43
|
+
packages = ["gitai"]
|
|
44
|
+
|
|
45
|
+
[build-system]
|
|
46
|
+
requires = ["hatchling"]
|
|
47
|
+
build-backend = "hatchling.build"
|
|
File without changes
|
|
@@ -0,0 +1,163 @@
|
|
|
1
|
+
from unittest.mock import patch, MagicMock
|
|
2
|
+
import pytest
|
|
3
|
+
from gitai.ai import get_commit_suggestions
|
|
4
|
+
from litellm.exceptions import APIConnectionError, AuthenticationError
|
|
5
|
+
|
|
6
|
+
FAKE_CONFIG_OLLAMA = {
|
|
7
|
+
"provider": "ollama",
|
|
8
|
+
"model": "llama3.2",
|
|
9
|
+
"ollama_url": "http://localhost:11434",
|
|
10
|
+
}
|
|
11
|
+
|
|
12
|
+
FAKE_CONFIG_OPENAI = {
|
|
13
|
+
"provider": "openai",
|
|
14
|
+
"model": "gpt-4o",
|
|
15
|
+
"ollama_url": "http://localhost:11434",
|
|
16
|
+
}
|
|
17
|
+
|
|
18
|
+
FAKE_CONFIG_ANTHROPIC = {
|
|
19
|
+
"provider": "anthropic",
|
|
20
|
+
"model": "claude-haiku-4-5-20251001",
|
|
21
|
+
"ollama_url": "http://localhost:11434",
|
|
22
|
+
}
|
|
23
|
+
|
|
24
|
+
|
|
25
|
+
def make_response(text: str):
|
|
26
|
+
response = MagicMock()
|
|
27
|
+
response.choices[0].message.content = text
|
|
28
|
+
return response
|
|
29
|
+
|
|
30
|
+
|
|
31
|
+
# --- suggestion parsing ---
|
|
32
|
+
|
|
33
|
+
@patch("gitai.ai.load_config", return_value=FAKE_CONFIG_OLLAMA)
|
|
34
|
+
@patch("gitai.ai.litellm.completion")
|
|
35
|
+
def test_parses_three_numbered_suggestions(mock_completion, _):
|
|
36
|
+
mock_completion.return_value = make_response(
|
|
37
|
+
"1. feat(cli): add commit command\n"
|
|
38
|
+
"2. fix(cli): handle empty diff\n"
|
|
39
|
+
"3. chore(cli): clean up imports"
|
|
40
|
+
)
|
|
41
|
+
result = get_commit_suggestions("prompt")
|
|
42
|
+
assert len(result) == 3
|
|
43
|
+
|
|
44
|
+
@patch("gitai.ai.load_config", return_value=FAKE_CONFIG_OLLAMA)
|
|
45
|
+
@patch("gitai.ai.litellm.completion")
|
|
46
|
+
def test_preserves_full_suggestion_text(mock_completion, _):
|
|
47
|
+
mock_completion.return_value = make_response(
|
|
48
|
+
"1. feat(cli): add commit command\n"
|
|
49
|
+
"2. fix(cli): handle empty diff\n"
|
|
50
|
+
"3. chore(cli): clean up imports"
|
|
51
|
+
)
|
|
52
|
+
result = get_commit_suggestions("prompt")
|
|
53
|
+
assert result[0] == "1. feat(cli): add commit command"
|
|
54
|
+
assert result[1] == "2. fix(cli): handle empty diff"
|
|
55
|
+
|
|
56
|
+
@patch("gitai.ai.load_config", return_value=FAKE_CONFIG_OLLAMA)
|
|
57
|
+
@patch("gitai.ai.litellm.completion")
|
|
58
|
+
def test_ignores_preamble_and_trailing_lines(mock_completion, _):
|
|
59
|
+
mock_completion.return_value = make_response(
|
|
60
|
+
"Here are your suggestions:\n"
|
|
61
|
+
"1. feat(cli): add commit command\n"
|
|
62
|
+
"2. fix(cli): handle empty diff\n"
|
|
63
|
+
"3. chore(cli): clean up imports\n"
|
|
64
|
+
"Hope these help!"
|
|
65
|
+
)
|
|
66
|
+
result = get_commit_suggestions("prompt")
|
|
67
|
+
assert len(result) == 3
|
|
68
|
+
assert all(r[0].isdigit() for r in result)
|
|
69
|
+
|
|
70
|
+
@patch("gitai.ai.load_config", return_value=FAKE_CONFIG_OLLAMA)
|
|
71
|
+
@patch("gitai.ai.litellm.completion")
|
|
72
|
+
def test_returns_empty_list_when_no_numbered_lines(mock_completion, _):
|
|
73
|
+
mock_completion.return_value = make_response("Nothing useful here.")
|
|
74
|
+
result = get_commit_suggestions("prompt")
|
|
75
|
+
assert result == []
|
|
76
|
+
|
|
77
|
+
@patch("gitai.ai.load_config", return_value=FAKE_CONFIG_OLLAMA)
|
|
78
|
+
@patch("gitai.ai.litellm.completion")
|
|
79
|
+
def test_strips_whitespace_from_lines(mock_completion, _):
|
|
80
|
+
mock_completion.return_value = make_response(
|
|
81
|
+
" 1. feat(cli): add commit command \n"
|
|
82
|
+
" 2. fix(cli): handle empty diff \n"
|
|
83
|
+
" 3. chore(cli): clean up imports "
|
|
84
|
+
)
|
|
85
|
+
result = get_commit_suggestions("prompt")
|
|
86
|
+
assert result[0] == "1. feat(cli): add commit command"
|
|
87
|
+
|
|
88
|
+
|
|
89
|
+
# --- model string and api_base ---
|
|
90
|
+
|
|
91
|
+
@patch("gitai.ai.load_config", return_value=FAKE_CONFIG_OLLAMA)
|
|
92
|
+
@patch("gitai.ai.litellm.completion")
|
|
93
|
+
def test_ollama_model_string_format(mock_completion, _):
|
|
94
|
+
mock_completion.return_value = make_response("1. feat: add thing")
|
|
95
|
+
get_commit_suggestions("prompt")
|
|
96
|
+
assert mock_completion.call_args.kwargs["model"] == "ollama/llama3.2"
|
|
97
|
+
|
|
98
|
+
@patch("gitai.ai.load_config", return_value=FAKE_CONFIG_OPENAI)
|
|
99
|
+
@patch("gitai.ai.litellm.completion")
|
|
100
|
+
def test_openai_model_string_format(mock_completion, _):
|
|
101
|
+
mock_completion.return_value = make_response("1. feat: add thing")
|
|
102
|
+
get_commit_suggestions("prompt")
|
|
103
|
+
assert mock_completion.call_args.kwargs["model"] == "openai/gpt-4o"
|
|
104
|
+
|
|
105
|
+
@patch("gitai.ai.load_config", return_value=FAKE_CONFIG_OLLAMA)
|
|
106
|
+
@patch("gitai.ai.litellm.completion")
|
|
107
|
+
def test_ollama_passes_api_base(mock_completion, _):
|
|
108
|
+
mock_completion.return_value = make_response("1. feat: add thing")
|
|
109
|
+
get_commit_suggestions("prompt")
|
|
110
|
+
assert mock_completion.call_args.kwargs.get("api_base") == "http://localhost:11434"
|
|
111
|
+
|
|
112
|
+
@patch("gitai.ai.load_config", return_value=FAKE_CONFIG_OPENAI)
|
|
113
|
+
@patch("gitai.ai.litellm.completion")
|
|
114
|
+
def test_non_ollama_omits_api_base(mock_completion, _):
|
|
115
|
+
mock_completion.return_value = make_response("1. feat: add thing")
|
|
116
|
+
get_commit_suggestions("prompt")
|
|
117
|
+
assert "api_base" not in mock_completion.call_args.kwargs
|
|
118
|
+
|
|
119
|
+
@patch("gitai.ai.load_config", return_value={**FAKE_CONFIG_OLLAMA, "provider": "OLLAMA"})
|
|
120
|
+
@patch("gitai.ai.litellm.completion")
|
|
121
|
+
def test_provider_casing_is_normalized(mock_completion, _):
|
|
122
|
+
mock_completion.return_value = make_response("1. feat: add thing")
|
|
123
|
+
get_commit_suggestions("prompt")
|
|
124
|
+
assert mock_completion.call_args.kwargs["model"].startswith("ollama/")
|
|
125
|
+
|
|
126
|
+
|
|
127
|
+
# --- error handling ---
|
|
128
|
+
|
|
129
|
+
@patch("gitai.ai.load_config", return_value=FAKE_CONFIG_OLLAMA)
|
|
130
|
+
@patch("gitai.ai.litellm.completion")
|
|
131
|
+
def test_ollama_connection_error_mentions_ollama_serve(mock_completion, _):
|
|
132
|
+
mock_completion.side_effect = APIConnectionError(
|
|
133
|
+
message="connection refused", llm_provider="ollama", model="llama3.2"
|
|
134
|
+
)
|
|
135
|
+
with pytest.raises(SystemExit, match="ollama serve"):
|
|
136
|
+
get_commit_suggestions("prompt")
|
|
137
|
+
|
|
138
|
+
@patch("gitai.ai.load_config", return_value=FAKE_CONFIG_OLLAMA)
|
|
139
|
+
@patch("gitai.ai.litellm.completion")
|
|
140
|
+
def test_ollama_connection_error_mentions_url(mock_completion, _):
|
|
141
|
+
mock_completion.side_effect = APIConnectionError(
|
|
142
|
+
message="connection refused", llm_provider="ollama", model="llama3.2"
|
|
143
|
+
)
|
|
144
|
+
with pytest.raises(SystemExit, match="localhost:11434"):
|
|
145
|
+
get_commit_suggestions("prompt")
|
|
146
|
+
|
|
147
|
+
@patch("gitai.ai.load_config", return_value=FAKE_CONFIG_OPENAI)
|
|
148
|
+
@patch("gitai.ai.litellm.completion")
|
|
149
|
+
def test_openai_auth_error_suggests_api_key_var(mock_completion, _):
|
|
150
|
+
mock_completion.side_effect = AuthenticationError(
|
|
151
|
+
message="invalid key", llm_provider="openai", model="gpt-4o"
|
|
152
|
+
)
|
|
153
|
+
with pytest.raises(SystemExit, match="OPENAI_API_KEY"):
|
|
154
|
+
get_commit_suggestions("prompt")
|
|
155
|
+
|
|
156
|
+
@patch("gitai.ai.load_config", return_value=FAKE_CONFIG_ANTHROPIC)
|
|
157
|
+
@patch("gitai.ai.litellm.completion")
|
|
158
|
+
def test_anthropic_auth_error_suggests_api_key_var(mock_completion, _):
|
|
159
|
+
mock_completion.side_effect = AuthenticationError(
|
|
160
|
+
message="invalid key", llm_provider="anthropic", model="claude-haiku-4-5-20251001"
|
|
161
|
+
)
|
|
162
|
+
with pytest.raises(SystemExit, match="ANTHROPIC_API_KEY"):
|
|
163
|
+
get_commit_suggestions("prompt")
|
|
@@ -0,0 +1,65 @@
|
|
|
1
|
+
from unittest.mock import patch
|
|
2
|
+
from gitai.config import load_config, save_config, DEFAULT_CONFIG
|
|
3
|
+
|
|
4
|
+
|
|
5
|
+
# --- load_config ---
|
|
6
|
+
|
|
7
|
+
def test_returns_defaults_when_no_file(tmp_path):
|
|
8
|
+
config_path = tmp_path / ".gitai.toml"
|
|
9
|
+
with patch("gitai.config.CONFIG_PATH", config_path):
|
|
10
|
+
config = load_config()
|
|
11
|
+
assert config == DEFAULT_CONFIG
|
|
12
|
+
|
|
13
|
+
def test_creates_config_file_when_missing(tmp_path):
|
|
14
|
+
config_path = tmp_path / ".gitai.toml"
|
|
15
|
+
with patch("gitai.config.CONFIG_PATH", config_path):
|
|
16
|
+
load_config()
|
|
17
|
+
assert config_path.exists()
|
|
18
|
+
|
|
19
|
+
def test_merges_partial_user_config_with_defaults(tmp_path):
|
|
20
|
+
config_path = tmp_path / ".gitai.toml"
|
|
21
|
+
config_path.write_bytes(b'model = "gpt-4o"\n')
|
|
22
|
+
with patch("gitai.config.CONFIG_PATH", config_path):
|
|
23
|
+
config = load_config()
|
|
24
|
+
assert config["model"] == "gpt-4o"
|
|
25
|
+
assert config["provider"] == DEFAULT_CONFIG["provider"]
|
|
26
|
+
assert config["ollama_url"] == DEFAULT_CONFIG["ollama_url"]
|
|
27
|
+
|
|
28
|
+
def test_user_provider_overrides_default(tmp_path):
|
|
29
|
+
config_path = tmp_path / ".gitai.toml"
|
|
30
|
+
config_path.write_bytes(b'provider = "anthropic"\n')
|
|
31
|
+
with patch("gitai.config.CONFIG_PATH", config_path):
|
|
32
|
+
config = load_config()
|
|
33
|
+
assert config["provider"] == "anthropic"
|
|
34
|
+
|
|
35
|
+
def test_user_emoji_true_overrides_default(tmp_path):
|
|
36
|
+
config_path = tmp_path / ".gitai.toml"
|
|
37
|
+
config_path.write_bytes(b'emoji = true\n')
|
|
38
|
+
with patch("gitai.config.CONFIG_PATH", config_path):
|
|
39
|
+
config = load_config()
|
|
40
|
+
assert config["emoji"] is True
|
|
41
|
+
|
|
42
|
+
|
|
43
|
+
# --- save_config / roundtrip ---
|
|
44
|
+
|
|
45
|
+
def test_save_and_load_roundtrip(tmp_path):
|
|
46
|
+
config_path = tmp_path / ".gitai.toml"
|
|
47
|
+
custom = {**DEFAULT_CONFIG, "model": "mistral", "emoji": True}
|
|
48
|
+
with patch("gitai.config.CONFIG_PATH", config_path):
|
|
49
|
+
save_config(custom)
|
|
50
|
+
loaded = load_config()
|
|
51
|
+
assert loaded["model"] == "mistral"
|
|
52
|
+
assert loaded["emoji"] is True
|
|
53
|
+
|
|
54
|
+
def test_save_writes_all_default_keys(tmp_path):
|
|
55
|
+
config_path = tmp_path / ".gitai.toml"
|
|
56
|
+
with patch("gitai.config.CONFIG_PATH", config_path):
|
|
57
|
+
save_config(DEFAULT_CONFIG)
|
|
58
|
+
loaded = load_config()
|
|
59
|
+
assert set(loaded.keys()) == set(DEFAULT_CONFIG.keys())
|
|
60
|
+
|
|
61
|
+
def test_save_creates_file(tmp_path):
|
|
62
|
+
config_path = tmp_path / ".gitai.toml"
|
|
63
|
+
with patch("gitai.config.CONFIG_PATH", config_path):
|
|
64
|
+
save_config(DEFAULT_CONFIG)
|
|
65
|
+
assert config_path.exists()
|
|
@@ -0,0 +1,81 @@
|
|
|
1
|
+
from unittest.mock import patch, MagicMock
|
|
2
|
+
from gitai.git import is_diff_meaningful, get_staged_diff, get_repo_name
|
|
3
|
+
|
|
4
|
+
MEANINGFUL_DIFF = """\
|
|
5
|
+
diff --git a/foo.py b/foo.py
|
|
6
|
+
index abc..def 100644
|
|
7
|
+
--- a/foo.py
|
|
8
|
+
+++ b/foo.py
|
|
9
|
+
@@ -1,3 +1,4 @@
|
|
10
|
+
def foo():
|
|
11
|
+
+ return 42
|
|
12
|
+
- pass
|
|
13
|
+
"""
|
|
14
|
+
|
|
15
|
+
|
|
16
|
+
# --- is_diff_meaningful ---
|
|
17
|
+
|
|
18
|
+
def test_meaningful_diff_returns_true():
|
|
19
|
+
assert is_diff_meaningful(MEANINGFUL_DIFF) is True
|
|
20
|
+
|
|
21
|
+
def test_empty_string_returns_false():
|
|
22
|
+
assert is_diff_meaningful("") is False
|
|
23
|
+
|
|
24
|
+
def test_only_header_lines_returns_false():
|
|
25
|
+
diff = "--- a/foo.py\n+++ b/foo.py\n"
|
|
26
|
+
assert is_diff_meaningful(diff) is False
|
|
27
|
+
|
|
28
|
+
def test_blank_added_line_returns_false():
|
|
29
|
+
diff = "--- a/foo.py\n+++ b/foo.py\n+\n"
|
|
30
|
+
assert is_diff_meaningful(diff) is False
|
|
31
|
+
|
|
32
|
+
def test_blank_removed_line_returns_false():
|
|
33
|
+
diff = "--- a/foo.py\n+++ b/foo.py\n-\n"
|
|
34
|
+
assert is_diff_meaningful(diff) is False
|
|
35
|
+
|
|
36
|
+
def test_single_added_line_returns_true():
|
|
37
|
+
diff = "+def new_function(): pass"
|
|
38
|
+
assert is_diff_meaningful(diff) is True
|
|
39
|
+
|
|
40
|
+
def test_single_removed_line_returns_true():
|
|
41
|
+
diff = "-old_code = True"
|
|
42
|
+
assert is_diff_meaningful(diff) is True
|
|
43
|
+
|
|
44
|
+
def test_only_whitespace_change_returns_false():
|
|
45
|
+
diff = "+ \n- \n"
|
|
46
|
+
assert is_diff_meaningful(diff) is False
|
|
47
|
+
|
|
48
|
+
|
|
49
|
+
# --- get_staged_diff ---
|
|
50
|
+
|
|
51
|
+
def test_get_staged_diff_returns_stdout():
|
|
52
|
+
with patch("gitai.git.subprocess.run") as mock_run:
|
|
53
|
+
mock_run.return_value = MagicMock(stdout="diff --git a/foo.py b/foo.py\n+change")
|
|
54
|
+
result = get_staged_diff()
|
|
55
|
+
assert result == "diff --git a/foo.py b/foo.py\n+change"
|
|
56
|
+
|
|
57
|
+
def test_get_staged_diff_calls_correct_command():
|
|
58
|
+
with patch("gitai.git.subprocess.run") as mock_run:
|
|
59
|
+
mock_run.return_value = MagicMock(stdout="")
|
|
60
|
+
get_staged_diff()
|
|
61
|
+
mock_run.assert_called_once_with(
|
|
62
|
+
["git", "diff", "--cached"],
|
|
63
|
+
capture_output=True,
|
|
64
|
+
text=True,
|
|
65
|
+
encoding="utf-8",
|
|
66
|
+
)
|
|
67
|
+
|
|
68
|
+
def test_get_staged_diff_returns_empty_string_when_nothing_staged():
|
|
69
|
+
with patch("gitai.git.subprocess.run") as mock_run:
|
|
70
|
+
mock_run.return_value = MagicMock(stdout="")
|
|
71
|
+
result = get_staged_diff()
|
|
72
|
+
assert result == ""
|
|
73
|
+
|
|
74
|
+
|
|
75
|
+
# --- get_repo_name ---
|
|
76
|
+
|
|
77
|
+
def test_get_repo_name_returns_string():
|
|
78
|
+
assert isinstance(get_repo_name(), str)
|
|
79
|
+
|
|
80
|
+
def test_get_repo_name_returns_non_empty():
|
|
81
|
+
assert len(get_repo_name()) > 0
|
|
@@ -0,0 +1,97 @@
|
|
|
1
|
+
from gitai.prompt import build_commit_prompt, _build_format_rules, _build_example
|
|
2
|
+
|
|
3
|
+
SAMPLE_DIFF = "diff --git a/foo.py b/foo.py\n+def bar(): pass"
|
|
4
|
+
|
|
5
|
+
|
|
6
|
+
# --- _build_format_rules ---
|
|
7
|
+
|
|
8
|
+
def test_conventional_no_emoji_has_type_format():
|
|
9
|
+
rules = _build_format_rules("conventional", False)
|
|
10
|
+
assert "type(scope): description" in rules
|
|
11
|
+
assert "feat, fix" in rules
|
|
12
|
+
|
|
13
|
+
def test_conventional_no_emoji_has_no_gitmoji():
|
|
14
|
+
rules = _build_format_rules("conventional", False)
|
|
15
|
+
assert "✨" not in rules
|
|
16
|
+
|
|
17
|
+
def test_conventional_with_emoji_includes_gitmoji_map():
|
|
18
|
+
rules = _build_format_rules("conventional", True)
|
|
19
|
+
assert "✨ feat" in rules
|
|
20
|
+
assert "🐛 fix" in rules
|
|
21
|
+
assert "♻️ refactor" in rules
|
|
22
|
+
|
|
23
|
+
def test_freeform_no_emoji_omits_type_prefix():
|
|
24
|
+
rules = _build_format_rules("free-form", False)
|
|
25
|
+
assert "type(scope)" not in rules
|
|
26
|
+
assert "imperative mood" in rules.lower()
|
|
27
|
+
|
|
28
|
+
def test_freeform_with_emoji_includes_emoji_instruction():
|
|
29
|
+
rules = _build_format_rules("free-form", True)
|
|
30
|
+
assert "emoji" in rules.lower()
|
|
31
|
+
|
|
32
|
+
def test_freeform_with_emoji_omits_gitmoji_type_map():
|
|
33
|
+
# free-form emoji rule is generic, not a type→emoji map
|
|
34
|
+
rules = _build_format_rules("free-form", True)
|
|
35
|
+
assert "✨ feat" not in rules
|
|
36
|
+
|
|
37
|
+
|
|
38
|
+
# --- _build_example ---
|
|
39
|
+
|
|
40
|
+
def test_conventional_example_uses_type_scope_format():
|
|
41
|
+
example = _build_example("conventional", False)
|
|
42
|
+
assert "feat(" in example
|
|
43
|
+
assert "fix(" in example
|
|
44
|
+
assert "refactor(" in example
|
|
45
|
+
|
|
46
|
+
def test_conventional_example_is_numbered():
|
|
47
|
+
example = _build_example("conventional", False)
|
|
48
|
+
assert example.startswith("1.")
|
|
49
|
+
|
|
50
|
+
def test_conventional_emoji_example_has_gitmoji():
|
|
51
|
+
example = _build_example("conventional", True)
|
|
52
|
+
assert "✨" in example
|
|
53
|
+
assert "🐛" in example
|
|
54
|
+
|
|
55
|
+
def test_freeform_example_has_no_type_scope():
|
|
56
|
+
example = _build_example("free-form", False)
|
|
57
|
+
assert "feat(" not in example
|
|
58
|
+
assert "fix(" not in example
|
|
59
|
+
|
|
60
|
+
def test_freeform_emoji_example_has_emoji():
|
|
61
|
+
example = _build_example("free-form", True)
|
|
62
|
+
assert "✨" in example
|
|
63
|
+
|
|
64
|
+
|
|
65
|
+
# --- build_commit_prompt ---
|
|
66
|
+
|
|
67
|
+
def test_prompt_contains_diff():
|
|
68
|
+
prompt = build_commit_prompt(SAMPLE_DIFF, "myrepo")
|
|
69
|
+
assert SAMPLE_DIFF in prompt
|
|
70
|
+
|
|
71
|
+
def test_prompt_contains_repo_name():
|
|
72
|
+
prompt = build_commit_prompt(SAMPLE_DIFF, "myrepo")
|
|
73
|
+
assert "myrepo" in prompt
|
|
74
|
+
|
|
75
|
+
def test_prompt_conventional_by_default():
|
|
76
|
+
prompt = build_commit_prompt(SAMPLE_DIFF, "myrepo")
|
|
77
|
+
assert "type(scope)" in prompt
|
|
78
|
+
|
|
79
|
+
def test_prompt_freeform_style_omits_type_scope():
|
|
80
|
+
prompt = build_commit_prompt(SAMPLE_DIFF, "myrepo", commit_style="free-form")
|
|
81
|
+
assert "type(scope)" not in prompt
|
|
82
|
+
|
|
83
|
+
def test_prompt_emoji_enabled_includes_gitmoji():
|
|
84
|
+
prompt = build_commit_prompt(SAMPLE_DIFF, "myrepo", emoji=True)
|
|
85
|
+
assert "✨" in prompt
|
|
86
|
+
|
|
87
|
+
def test_prompt_emoji_disabled_by_default():
|
|
88
|
+
prompt = build_commit_prompt(SAMPLE_DIFF, "myrepo")
|
|
89
|
+
assert "✨" not in prompt
|
|
90
|
+
|
|
91
|
+
def test_prompt_requests_exactly_3_suggestions():
|
|
92
|
+
prompt = build_commit_prompt(SAMPLE_DIFF, "myrepo")
|
|
93
|
+
assert "3" in prompt
|
|
94
|
+
|
|
95
|
+
def test_prompt_instructs_no_extra_output():
|
|
96
|
+
prompt = build_commit_prompt(SAMPLE_DIFF, "myrepo")
|
|
97
|
+
assert "nothing else" in prompt
|