commit-msg-ai 0.2.1__tar.gz

This diff represents the content of publicly available package versions that have been released to one of the supported registries. The information contained in this diff is provided for informational purposes only and reflects changes between package versions as they appear in their respective public registries.
@@ -0,0 +1,26 @@
1
+ name: CI
2
+
3
+ on:
4
+ pull_request:
5
+ branches: [main]
6
+
7
+ jobs:
8
+ test:
9
+ runs-on: ubuntu-latest
10
+ steps:
11
+ - uses: actions/checkout@v4
12
+
13
+ - uses: actions/setup-python@v5
14
+ with:
15
+ python-version: '3.12'
16
+
17
+ - name: Build package
18
+ run: |
19
+ pip install build
20
+ python -m build
21
+
22
+ - name: Test package install
23
+ run: |
24
+ pip install dist/*.whl
25
+ commit-msg-ai --help
26
+ python -c "from commit_msg_ai.main import main; print('Import OK')"
@@ -0,0 +1,42 @@
1
+ name: Deploy docs to GitHub Pages
2
+
3
+ on:
4
+ push:
5
+ branches: [main]
6
+ paths: [docs/**]
7
+ workflow_dispatch:
8
+
9
+ permissions:
10
+ contents: read
11
+ pages: write
12
+ id-token: write
13
+
14
+ concurrency:
15
+ group: pages
16
+ cancel-in-progress: false
17
+
18
+ jobs:
19
+ build:
20
+ runs-on: ubuntu-latest
21
+ steps:
22
+ - uses: actions/checkout@v4
23
+
24
+ - uses: actions/configure-pages@v5
25
+
26
+ - uses: actions/jekyll-build-pages@v1
27
+ with:
28
+ source: ./docs
29
+ destination: ./_site
30
+
31
+ - uses: actions/upload-pages-artifact@v3
32
+
33
+ deploy:
34
+ environment:
35
+ name: github-pages
36
+ url: ${{ steps.deployment.outputs.page_url }}
37
+ runs-on: ubuntu-latest
38
+ needs: build
39
+ steps:
40
+ - name: Deploy to GitHub Pages
41
+ id: deployment
42
+ uses: actions/deploy-pages@v4
@@ -0,0 +1,141 @@
1
+ name: Release & Publish
2
+
3
+ on:
4
+ push:
5
+ branches: [main]
6
+ paths-ignore:
7
+ - 'docs/**'
8
+ - 'README.md'
9
+
10
+ permissions:
11
+ contents: write
12
+ id-token: write
13
+
14
+ jobs:
15
+ release:
16
+ runs-on: ubuntu-latest
17
+ steps:
18
+ - uses: actions/checkout@v4
19
+ with:
20
+ fetch-depth: 0
21
+ token: ${{ secrets.GITHUB_TOKEN }}
22
+
23
+ - name: Get commit message
24
+ id: commit
25
+ run: echo "message=$(git log -1 --pretty=%s)" >> "$GITHUB_OUTPUT"
26
+
27
+ - name: Determine version bump
28
+ id: bump
29
+ run: |
30
+ MSG="${{ steps.commit.outputs.message }}"
31
+ if [[ "$MSG" == bc:* ]]; then
32
+ echo "type=major" >> "$GITHUB_OUTPUT"
33
+ elif [[ "$MSG" == feat:* ]]; then
34
+ echo "type=minor" >> "$GITHUB_OUTPUT"
35
+ elif [[ "$MSG" == fix:* ]]; then
36
+ echo "type=patch" >> "$GITHUB_OUTPUT"
37
+ else
38
+ echo "type=none" >> "$GITHUB_OUTPUT"
39
+ fi
40
+
41
+ - name: Bump version
42
+ if: steps.bump.outputs.type != 'none'
43
+ id: version
44
+ run: |
45
+ CURRENT=$(grep '^version' pyproject.toml | head -1 | sed 's/.*"\(.*\)".*/\1/')
46
+ IFS='.' read -r MAJOR MINOR PATCH <<< "$CURRENT"
47
+
48
+ case "${{ steps.bump.outputs.type }}" in
49
+ major) MAJOR=$((MAJOR + 1)); MINOR=0; PATCH=0 ;;
50
+ minor) MINOR=$((MINOR + 1)); PATCH=0 ;;
51
+ patch) PATCH=$((PATCH + 1)) ;;
52
+ esac
53
+
54
+ NEW="${MAJOR}.${MINOR}.${PATCH}"
55
+ echo "current=$CURRENT" >> "$GITHUB_OUTPUT"
56
+ echo "new=$NEW" >> "$GITHUB_OUTPUT"
57
+
58
+ sed -i "s/version = \"$CURRENT\"/version = \"$NEW\"/" pyproject.toml
59
+ sed -i "s/__version__ = \"$CURRENT\"/__version__ = \"$NEW\"/" commit_msg_ai/__init__.py
60
+
61
+ - name: Generate changelog
62
+ if: steps.bump.outputs.type != 'none'
63
+ id: changelog
64
+ run: |
65
+ PREV_TAG=$(git describe --tags --abbrev=0 2>/dev/null || echo "")
66
+ if [ -z "$PREV_TAG" ]; then
67
+ RANGE="HEAD"
68
+ else
69
+ RANGE="${PREV_TAG}..HEAD"
70
+ fi
71
+
72
+ {
73
+ echo 'body<<CHANGELOG_EOF'
74
+
75
+ FEATS=$(git log "$RANGE" --pretty=format:"%s" | grep -E "^feat:" | sed 's/^feat: //' || true)
76
+ if [ -n "$FEATS" ]; then
77
+ echo "### Features"
78
+ echo "$FEATS" | while read -r line; do echo "- $line"; done
79
+ echo ""
80
+ fi
81
+
82
+ FIXES=$(git log "$RANGE" --pretty=format:"%s" | grep -E "^fix:" | sed 's/^fix: //' || true)
83
+ if [ -n "$FIXES" ]; then
84
+ echo "### Fixes"
85
+ echo "$FIXES" | while read -r line; do echo "- $line"; done
86
+ echo ""
87
+ fi
88
+
89
+ BREAKING=$(git log "$RANGE" --pretty=format:"%s" | grep -E "^bc:" | sed 's/^bc: //' || true)
90
+ if [ -n "$BREAKING" ]; then
91
+ echo "### Breaking Changes"
92
+ echo "$BREAKING" | while read -r line; do echo "- $line"; done
93
+ echo ""
94
+ fi
95
+
96
+ echo 'CHANGELOG_EOF'
97
+ } >> "$GITHUB_OUTPUT"
98
+
99
+ - name: Commit version bump
100
+ if: steps.bump.outputs.type != 'none'
101
+ run: |
102
+ git config user.name "github-actions[bot]"
103
+ git config user.email "github-actions[bot]@users.noreply.github.com"
104
+ git add pyproject.toml commit_msg_ai/__init__.py
105
+ git commit -m "chore: bump version to ${{ steps.version.outputs.new }}"
106
+ git push
107
+
108
+ - name: Create tag and release
109
+ if: steps.bump.outputs.type != 'none'
110
+ run: |
111
+ TAG="v${{ steps.version.outputs.new }}"
112
+ git tag "$TAG"
113
+ git push origin "$TAG"
114
+
115
+ gh release create "$TAG" \
116
+ --title "$TAG" \
117
+ --notes "${{ steps.changelog.outputs.body }}"
118
+ env:
119
+ GH_TOKEN: ${{ secrets.GITHUB_TOKEN }}
120
+
121
+ - uses: actions/setup-python@v5
122
+ if: steps.bump.outputs.type != 'none'
123
+ with:
124
+ python-version: '3.12'
125
+
126
+ - name: Build package
127
+ if: steps.bump.outputs.type != 'none'
128
+ run: |
129
+ pip install build
130
+ python -m build
131
+
132
+ - name: Test package install
133
+ if: steps.bump.outputs.type != 'none'
134
+ run: |
135
+ pip install dist/*.whl
136
+ commit-msg-ai --help
137
+ python -c "from commit_msg_ai.main import main; print('Import OK')"
138
+
139
+ - name: Publish to PyPI
140
+ if: steps.bump.outputs.type != 'none'
141
+ uses: pypa/gh-action-pypi-publish@release/v1
@@ -0,0 +1,7 @@
1
+ __pycache__/
2
+ *.pyc
3
+ *.pyo
4
+ *.egg-info/
5
+ dist/
6
+ build/
7
+ .venv/
@@ -0,0 +1,171 @@
1
+ Metadata-Version: 2.4
2
+ Name: commit-msg-ai
3
+ Version: 0.2.1
4
+ Summary: Generate commit messages from staged changes using a local LLM via Ollama
5
+ Project-URL: Homepage, https://xavimf87.github.io/commit-msg-ai
6
+ Project-URL: Repository, https://github.com/xavimf87/commit-msg-ai
7
+ Project-URL: Issues, https://github.com/xavimf87/commit-msg-ai/issues
8
+ Author: Xavi Martínez
9
+ License-Expression: MIT
10
+ Keywords: ai,cli,commit,git,llm,ollama
11
+ Classifier: Development Status :: 4 - Beta
12
+ Classifier: Environment :: Console
13
+ Classifier: Intended Audience :: Developers
14
+ Classifier: License :: OSI Approved :: MIT License
15
+ Classifier: Programming Language :: Python :: 3
16
+ Classifier: Programming Language :: Python :: 3.9
17
+ Classifier: Programming Language :: Python :: 3.10
18
+ Classifier: Programming Language :: Python :: 3.11
19
+ Classifier: Programming Language :: Python :: 3.12
20
+ Classifier: Programming Language :: Python :: 3.13
21
+ Classifier: Topic :: Software Development :: Version Control :: Git
22
+ Requires-Python: >=3.9
23
+ Requires-Dist: httpx>=0.27
24
+ Description-Content-Type: text/markdown
25
+
26
+ # commit-msg-ai
27
+
28
+ Generate commit messages from your staged changes using a local LLM via [Ollama](https://ollama.com). No API keys, no cloud — everything runs on your machine.
29
+
30
+ ## Getting started
31
+
32
+ ### 1. Install and set up Ollama
33
+
34
+ commit-msg-ai requires [Ollama](https://ollama.com) to run language models locally. Install it first:
35
+
36
+ **macOS:**
37
+
38
+ ```bash
39
+ brew install ollama
40
+ ```
41
+
42
+ **Linux:**
43
+
44
+ ```bash
45
+ curl -fsSL https://ollama.com/install.sh | sh
46
+ ```
47
+
48
+ **Windows:** Download the installer from [ollama.com/download](https://ollama.com/download).
49
+
50
+ Once installed, start the Ollama server:
51
+
52
+ ```bash
53
+ ollama serve
54
+ ```
55
+
56
+ > On macOS, Ollama runs automatically in the background after installation. You can skip this step if you see the Ollama icon in your menu bar.
57
+
58
+ ### 2. Choose a model
59
+
60
+ You need at least one model downloaded. See what's available on your machine:
61
+
62
+ ```bash
63
+ ollama list
64
+ ```
65
+
66
+ If the list is empty, pull a model. Some good options for commit message generation:
67
+
68
+ ```bash
69
+ # Lightweight and fast (~2GB)
70
+ ollama pull llama3.2
71
+
72
+ # Good for code understanding (~4.7GB)
73
+ ollama pull qwen2.5-coder
74
+
75
+ # Small and capable (~2.3GB)
76
+ ollama pull mistral
77
+ ```
78
+
79
+ You can browse all available models at [ollama.com/library](https://ollama.com/library).
80
+
81
+ ### 3. Install commit-msg-ai
82
+
83
+ **With pipx (recommended):**
84
+
85
+ ```bash
86
+ pipx install git+https://github.com/YOUR_USER/commit-msg-ai.git
87
+ ```
88
+
89
+ **With pip:**
90
+
91
+ ```bash
92
+ pip install git+https://github.com/YOUR_USER/commit-msg-ai.git
93
+ ```
94
+
95
+ ### 4. Configure your model
96
+
97
+ By default commit-msg-ai uses `llama3.2`. If you pulled a different model, set it as default:
98
+
99
+ ```bash
100
+ commit-msg-ai config model qwen2.5-coder
101
+ ```
102
+
103
+ Verify your config:
104
+
105
+ ```bash
106
+ commit-msg-ai config
107
+ ```
108
+
109
+ ### 5. Use it
110
+
111
+ ```bash
112
+ git add .
113
+ commit-msg-ai
114
+ ```
115
+
116
+ ```
117
+ Staged files:
118
+ M src/auth.py
119
+ A src/middleware.py
120
+
121
+ Generating commit message with qwen2.5-coder...
122
+
123
+ ──────────────────────────────────────────────────
124
+ feat: add JWT authentication middleware
125
+ ──────────────────────────────────────────────────
126
+
127
+ Commit with this message? [Y/n] y
128
+ [main 3a1b2c3] feat: add JWT authentication middleware
129
+ 2 files changed, 45 insertions(+), 3 deletions(-)
130
+ ```
131
+
132
+ That's it.
133
+
134
+ ## Configuration
135
+
136
+ commit-msg-ai stores config in `~/.config/commit-msg-ai/config.json`.
137
+
138
+ ```bash
139
+ # Set default model
140
+ commit-msg-ai config model mistral
141
+
142
+ # Set Ollama server URL (useful for remote setups)
143
+ commit-msg-ai config url http://192.168.1.50:11434
144
+
145
+ # View all config
146
+ commit-msg-ai config
147
+
148
+ # View a single value
149
+ commit-msg-ai config model
150
+ ```
151
+
152
+ Override any config for a single run with flags:
153
+
154
+ ```bash
155
+ commit-msg-ai --model codellama
156
+ commit-msg-ai --url http://other-server:11434
157
+ ```
158
+
159
+ ## Commit message format
160
+
161
+ commit-msg-ai generates messages with only three prefixes:
162
+
163
+ - `feat:` new features
164
+ - `fix:` bug fixes
165
+ - `bc:` breaking changes
166
+
167
+ ## Requirements
168
+
169
+ - Python 3.9+
170
+ - [Ollama](https://ollama.com) running locally (or on a reachable server)
171
+ - At least one model pulled (`ollama pull llama3.2`)
@@ -0,0 +1,146 @@
1
+ # commit-msg-ai
2
+
3
+ Generate commit messages from your staged changes using a local LLM via [Ollama](https://ollama.com). No API keys, no cloud — everything runs on your machine.
4
+
5
+ ## Getting started
6
+
7
+ ### 1. Install and set up Ollama
8
+
9
+ commit-msg-ai requires [Ollama](https://ollama.com) to run language models locally. Install it first:
10
+
11
+ **macOS:**
12
+
13
+ ```bash
14
+ brew install ollama
15
+ ```
16
+
17
+ **Linux:**
18
+
19
+ ```bash
20
+ curl -fsSL https://ollama.com/install.sh | sh
21
+ ```
22
+
23
+ **Windows:** Download the installer from [ollama.com/download](https://ollama.com/download).
24
+
25
+ Once installed, start the Ollama server:
26
+
27
+ ```bash
28
+ ollama serve
29
+ ```
30
+
31
+ > On macOS, Ollama runs automatically in the background after installation. You can skip this step if you see the Ollama icon in your menu bar.
32
+
33
+ ### 2. Choose a model
34
+
35
+ You need at least one model downloaded. See what's available on your machine:
36
+
37
+ ```bash
38
+ ollama list
39
+ ```
40
+
41
+ If the list is empty, pull a model. Some good options for commit message generation:
42
+
43
+ ```bash
44
+ # Lightweight and fast (~2GB)
45
+ ollama pull llama3.2
46
+
47
+ # Good for code understanding (~4.7GB)
48
+ ollama pull qwen2.5-coder
49
+
50
+ # Small and capable (~2.3GB)
51
+ ollama pull mistral
52
+ ```
53
+
54
+ You can browse all available models at [ollama.com/library](https://ollama.com/library).
55
+
56
+ ### 3. Install commit-msg-ai
57
+
58
+ **With pipx (recommended):**
59
+
60
+ ```bash
61
+ pipx install git+https://github.com/YOUR_USER/commit-msg-ai.git
62
+ ```
63
+
64
+ **With pip:**
65
+
66
+ ```bash
67
+ pip install git+https://github.com/YOUR_USER/commit-msg-ai.git
68
+ ```
69
+
70
+ ### 4. Configure your model
71
+
72
+ By default commit-msg-ai uses `llama3.2`. If you pulled a different model, set it as default:
73
+
74
+ ```bash
75
+ commit-msg-ai config model qwen2.5-coder
76
+ ```
77
+
78
+ Verify your config:
79
+
80
+ ```bash
81
+ commit-msg-ai config
82
+ ```
83
+
84
+ ### 5. Use it
85
+
86
+ ```bash
87
+ git add .
88
+ commit-msg-ai
89
+ ```
90
+
91
+ ```
92
+ Staged files:
93
+ M src/auth.py
94
+ A src/middleware.py
95
+
96
+ Generating commit message with qwen2.5-coder...
97
+
98
+ ──────────────────────────────────────────────────
99
+ feat: add JWT authentication middleware
100
+ ──────────────────────────────────────────────────
101
+
102
+ Commit with this message? [Y/n] y
103
+ [main 3a1b2c3] feat: add JWT authentication middleware
104
+ 2 files changed, 45 insertions(+), 3 deletions(-)
105
+ ```
106
+
107
+ That's it.
108
+
109
+ ## Configuration
110
+
111
+ commit-msg-ai stores config in `~/.config/commit-msg-ai/config.json`.
112
+
113
+ ```bash
114
+ # Set default model
115
+ commit-msg-ai config model mistral
116
+
117
+ # Set Ollama server URL (useful for remote setups)
118
+ commit-msg-ai config url http://192.168.1.50:11434
119
+
120
+ # View all config
121
+ commit-msg-ai config
122
+
123
+ # View a single value
124
+ commit-msg-ai config model
125
+ ```
126
+
127
+ Override any config for a single run with flags:
128
+
129
+ ```bash
130
+ commit-msg-ai --model codellama
131
+ commit-msg-ai --url http://other-server:11434
132
+ ```
133
+
134
+ ## Commit message format
135
+
136
+ commit-msg-ai generates messages with only three prefixes:
137
+
138
+ - `feat:` new features
139
+ - `fix:` bug fixes
140
+ - `bc:` breaking changes
141
+
142
+ ## Requirements
143
+
144
+ - Python 3.9+
145
+ - [Ollama](https://ollama.com) running locally (or on a reachable server)
146
+ - At least one model pulled (`ollama pull llama3.2`)
@@ -0,0 +1,3 @@
1
+ """commit-msg-ai - Generate commit messages from staged changes using a local LLM."""
2
+
3
+ __version__ = "0.2.1"