git-commit-message 0.8.1__tar.gz → 0.9.0__tar.gz

This diff represents the content of publicly available package versions that have been released to one of the supported registries. The information contained in this diff is provided for informational purposes only and reflects changes between package versions as they appear in their respective public registries.
Files changed (21) hide show
  1. {git_commit_message-0.8.1 → git_commit_message-0.9.0}/PKG-INFO +75 -10
  2. {git_commit_message-0.8.1 → git_commit_message-0.9.0}/README.md +74 -8
  3. {git_commit_message-0.8.1 → git_commit_message-0.9.0}/pyproject.toml +1 -2
  4. {git_commit_message-0.8.1 → git_commit_message-0.9.0}/src/git_commit_message/_cli.py +193 -11
  5. git_commit_message-0.9.0/src/git_commit_message/_config.py +71 -0
  6. {git_commit_message-0.8.1 → git_commit_message-0.9.0}/src/git_commit_message/_git.py +6 -0
  7. {git_commit_message-0.8.1 → git_commit_message-0.9.0}/src/git_commit_message/_gpt.py +29 -13
  8. git_commit_message-0.9.0/src/git_commit_message/_llamacpp.py +141 -0
  9. {git_commit_message-0.8.1 → git_commit_message-0.9.0}/src/git_commit_message/_llm.py +84 -56
  10. {git_commit_message-0.8.1 → git_commit_message-0.9.0}/src/git_commit_message/_ollama.py +4 -17
  11. {git_commit_message-0.8.1 → git_commit_message-0.9.0}/src/git_commit_message.egg-info/PKG-INFO +75 -10
  12. {git_commit_message-0.8.1 → git_commit_message-0.9.0}/src/git_commit_message.egg-info/SOURCES.txt +2 -0
  13. {git_commit_message-0.8.1 → git_commit_message-0.9.0}/src/git_commit_message.egg-info/requires.txt +0 -1
  14. {git_commit_message-0.8.1 → git_commit_message-0.9.0}/UNLICENSE +0 -0
  15. {git_commit_message-0.8.1 → git_commit_message-0.9.0}/setup.cfg +0 -0
  16. {git_commit_message-0.8.1 → git_commit_message-0.9.0}/src/git_commit_message/__init__.py +0 -0
  17. {git_commit_message-0.8.1 → git_commit_message-0.9.0}/src/git_commit_message/__main__.py +0 -0
  18. {git_commit_message-0.8.1 → git_commit_message-0.9.0}/src/git_commit_message/_gemini.py +0 -0
  19. {git_commit_message-0.8.1 → git_commit_message-0.9.0}/src/git_commit_message.egg-info/dependency_links.txt +0 -0
  20. {git_commit_message-0.8.1 → git_commit_message-0.9.0}/src/git_commit_message.egg-info/entry_points.txt +0 -0
  21. {git_commit_message-0.8.1 → git_commit_message-0.9.0}/src/git_commit_message.egg-info/top_level.txt +0 -0
@@ -1,6 +1,6 @@
1
1
  Metadata-Version: 2.4
2
2
  Name: git-commit-message
3
- Version: 0.8.1
3
+ Version: 0.9.0
4
4
  Summary: Generate Git commit messages from staged changes using LLM
5
5
  Maintainer-email: Mina Her <minacle@live.com>
6
6
  License: This is free and unencumbered software released into the public domain.
@@ -47,11 +47,10 @@ Requires-Dist: babel>=2.17.0
47
47
  Requires-Dist: google-genai>=1.56.0
48
48
  Requires-Dist: ollama>=0.4.0
49
49
  Requires-Dist: openai>=2.6.1
50
- Requires-Dist: tiktoken>=0.12.0
51
50
 
52
51
  # git-commit-message
53
52
 
54
- Generate a commit message from your staged changes using OpenAI, Google Gemini, or Ollama.
53
+ Generate a commit message from your staged changes using OpenAI, Google Gemini, Ollama, or llama.cpp.
55
54
 
56
55
  [![asciicast](https://asciinema.org/a/jk0phFqNnc5vaCiIZEYBwZOyN.svg)](https://asciinema.org/a/jk0phFqNnc5vaCiIZEYBwZOyN)
57
56
 
@@ -120,6 +119,23 @@ export GIT_COMMIT_MESSAGE_PROVIDER=ollama
120
119
  export OLLAMA_MODEL=mistral
121
120
  ```
122
121
 
122
+ ### llama.cpp (local models)
123
+
124
+ 1. Build and run llama.cpp server with your model:
125
+
126
+ ```sh
127
+ llama-server -hf ggml-org/gpt-oss-20b-GGUF --host 0.0.0.0 --port 8080
128
+ ```
129
+
130
+ 2. The server runs on `http://localhost:8080` by default.
131
+
132
+ Optional: set defaults:
133
+
134
+ ```sh
135
+ export GIT_COMMIT_MESSAGE_PROVIDER=llamacpp
136
+ export LLAMACPP_HOST=http://localhost:8080
137
+ ```
138
+
123
139
  Note (fish):
124
140
 
125
141
  ```fish
@@ -141,10 +157,25 @@ git add -A
141
157
  git-commit-message "optional extra context about the change"
142
158
  ```
143
159
 
144
- Generate a single-line subject only:
160
+ Generate a single-line subject only (when no trailers are appended):
145
161
 
146
162
  ```sh
147
163
  git-commit-message --one-line "optional context"
164
+
165
+ # with trailers, output is subject plus trailer lines
166
+ git-commit-message --one-line --co-author 'John Doe <john.doe@example.com>'
167
+ ```
168
+
169
+ Use Conventional Commits constraints for the subject/footer only (body format is preserved):
170
+
171
+ ```sh
172
+ git-commit-message --conventional
173
+
174
+ # can be combined with one-line mode
175
+ git-commit-message --conventional --one-line
176
+
177
+ # co-author trailers are appended after any existing footers
178
+ git-commit-message --conventional --co-author copilot
148
179
  ```
149
180
 
150
181
  Select provider:
@@ -158,6 +189,9 @@ git-commit-message --provider google
158
189
 
159
190
  # Ollama
160
191
  git-commit-message --provider ollama
192
+
193
+ # llama.cpp
194
+ git-commit-message --provider llamacpp
161
195
  ```
162
196
 
163
197
  Commit immediately (optionally open editor):
@@ -165,6 +199,11 @@ Commit immediately (optionally open editor):
165
199
  ```sh
166
200
  git-commit-message --commit "refactor parser for speed"
167
201
  git-commit-message --commit --edit "refactor parser for speed"
202
+
203
+ # add co-author trailers
204
+ git-commit-message --commit --co-author 'John Doe <john.doe@example.com>'
205
+ git-commit-message --commit --co-author 'John Doe <john.doe@example.com>' --co-author 'Jane Doe <jane.doe@example.com>'
206
+ git-commit-message --commit --co-author copilot
168
207
  ```
169
208
 
170
209
  Amend the previous commit:
@@ -195,10 +234,24 @@ git-commit-message --chunk-tokens 0
195
234
  # chunk the diff into ~4000-token pieces before summarising
196
235
  git-commit-message --chunk-tokens 4000
197
236
 
237
+ # note: for provider 'ollama', values >= 1 are not supported
238
+ # use 0 (single summary pass) or -1 (legacy one-shot)
239
+ git-commit-message --provider ollama --chunk-tokens 0
240
+
198
241
  # disable summarisation and use the legacy one-shot prompt
199
242
  git-commit-message --chunk-tokens -1
200
243
  ```
201
244
 
245
+ Adjust unified diff context lines:
246
+
247
+ ```sh
248
+ # use 5 context lines around each change hunk
249
+ git-commit-message --diff-context 5
250
+
251
+ # include only changed lines (no surrounding context)
252
+ git-commit-message --diff-context 0
253
+ ```
254
+
202
255
  Select output language/locale (IETF language tag):
203
256
 
204
257
  ```sh
@@ -219,19 +272,28 @@ Configure Ollama host (if running on a different machine):
219
272
  git-commit-message --provider ollama --host http://192.168.1.100:11434
220
273
  ```
221
274
 
275
+ Configure llama.cpp host:
276
+
277
+ ```sh
278
+ git-commit-message --provider llamacpp --host http://192.168.1.100:8080
279
+ ```
280
+
222
281
  ## Options
223
282
 
224
- - `--provider {openai,google,ollama}`: provider to use (default: `openai`)
225
- - `--model MODEL`: model override (provider-specific)
283
+ - `--provider {openai,google,ollama,llamacpp}`: provider to use (default: `openai`)
284
+ - `--model MODEL`: model override (provider-specific; ignored for llama.cpp)
226
285
  - `--language TAG`: output language/locale (default: `en-GB`)
227
- - `--one-line`: output subject only
286
+ - `--conventional`: apply Conventional Commits constraints to the subject and footer behavior. The body format is unchanged and still includes the translated `Rationale:` line. Breaking changes are expressed with `!` in the subject line, and `BREAKING CHANGE` footer lines are not generated.
287
+ - `--one-line`: output subject only when no trailers are appended; with `--co-author`, output is a single-line subject plus `Co-authored-by:` trailer lines
228
288
  - `--max-length N`: max subject length (default: 72)
229
- - `--chunk-tokens N`: token budget per diff chunk (`0` = single summary pass, `-1` disables summarisation)
289
+ - `--chunk-tokens N`: token budget per diff chunk (`0` = single summary pass, `-1` disables summarisation). For `ollama`, values `>= 1` are not supported.
290
+ - `--diff-context N`: context lines in unified diff (`N >= 0`). If omitted, uses `GIT_COMMIT_MESSAGE_DIFF_CONTEXT` when set; otherwise uses Git default (usually `3`).
230
291
  - `--debug`: print request/response details
231
292
  - `--commit`: run `git commit -m <message>`
232
293
  - `--amend`: generate a message suitable for amending the previous commit (diff is from the amended commit's parent to the staged index; if nothing is staged, this effectively becomes the diff introduced by `HEAD`)
233
294
  - `--edit`: with `--commit`, open editor for final message
234
- - `--host URL`: host URL for providers like Ollama (default: `http://localhost:11434`)
295
+ - `--host URL`: host URL for providers like Ollama or llama.cpp (default: `http://localhost:11434` for Ollama, `http://localhost:8080` for llama.cpp)
296
+ - `--co-author VALUE`: append `Co-authored-by:` trailer(s). Repeat to add multiple values. Accepted forms: `Name <email@example.com>` or `copilot` (alias, case-insensitive).
235
297
 
236
298
  ## Environment variables
237
299
 
@@ -247,14 +309,17 @@ Optional:
247
309
  - `OPENAI_MODEL`: OpenAI-only model override (used if `--model`/`GIT_COMMIT_MESSAGE_MODEL` are not set)
248
310
  - `OLLAMA_MODEL`: Ollama-only model override (used if `--model`/`GIT_COMMIT_MESSAGE_MODEL` are not set)
249
311
  - `OLLAMA_HOST`: Ollama server URL (default: `http://localhost:11434`)
312
+ - `LLAMACPP_HOST`: llama.cpp server URL (default: `http://localhost:8080`)
250
313
  - `GIT_COMMIT_MESSAGE_LANGUAGE`: default language/locale (default: `en-GB`)
251
- - `GIT_COMMIT_MESSAGE_CHUNK_TOKENS`: default chunk token budget (default: `0`)
314
+ - `GIT_COMMIT_MESSAGE_CHUNK_TOKENS`: default chunk token budget (default: `0`; for `ollama`, values `>= 1` are not supported)
315
+ - `GIT_COMMIT_MESSAGE_DIFF_CONTEXT`: default unified diff context lines (`0` or greater). If unset, Git default is used (usually `3`).
252
316
 
253
317
  Default models (if not overridden):
254
318
 
255
319
  - OpenAI: `gpt-5-mini`
256
320
  - Google: `gemini-2.5-flash`
257
321
  - Ollama: `gpt-oss:20b`
322
+ - llama.cpp: uses pre-loaded model (model parameter is ignored)
258
323
 
259
324
  ## AI-generated code notice
260
325
 
@@ -1,6 +1,6 @@
1
1
  # git-commit-message
2
2
 
3
- Generate a commit message from your staged changes using OpenAI, Google Gemini, or Ollama.
3
+ Generate a commit message from your staged changes using OpenAI, Google Gemini, Ollama, or llama.cpp.
4
4
 
5
5
  [![asciicast](https://asciinema.org/a/jk0phFqNnc5vaCiIZEYBwZOyN.svg)](https://asciinema.org/a/jk0phFqNnc5vaCiIZEYBwZOyN)
6
6
 
@@ -69,6 +69,23 @@ export GIT_COMMIT_MESSAGE_PROVIDER=ollama
69
69
  export OLLAMA_MODEL=mistral
70
70
  ```
71
71
 
72
+ ### llama.cpp (local models)
73
+
74
+ 1. Build and run llama.cpp server with your model:
75
+
76
+ ```sh
77
+ llama-server -hf ggml-org/gpt-oss-20b-GGUF --host 0.0.0.0 --port 8080
78
+ ```
79
+
80
+ 2. The server runs on `http://localhost:8080` by default.
81
+
82
+ Optional: set defaults:
83
+
84
+ ```sh
85
+ export GIT_COMMIT_MESSAGE_PROVIDER=llamacpp
86
+ export LLAMACPP_HOST=http://localhost:8080
87
+ ```
88
+
72
89
  Note (fish):
73
90
 
74
91
  ```fish
@@ -90,10 +107,25 @@ git add -A
90
107
  git-commit-message "optional extra context about the change"
91
108
  ```
92
109
 
93
- Generate a single-line subject only:
110
+ Generate a single-line subject only (when no trailers are appended):
94
111
 
95
112
  ```sh
96
113
  git-commit-message --one-line "optional context"
114
+
115
+ # with trailers, output is subject plus trailer lines
116
+ git-commit-message --one-line --co-author 'John Doe <john.doe@example.com>'
117
+ ```
118
+
119
+ Use Conventional Commits constraints for the subject/footer only (body format is preserved):
120
+
121
+ ```sh
122
+ git-commit-message --conventional
123
+
124
+ # can be combined with one-line mode
125
+ git-commit-message --conventional --one-line
126
+
127
+ # co-author trailers are appended after any existing footers
128
+ git-commit-message --conventional --co-author copilot
97
129
  ```
98
130
 
99
131
  Select provider:
@@ -107,6 +139,9 @@ git-commit-message --provider google
107
139
 
108
140
  # Ollama
109
141
  git-commit-message --provider ollama
142
+
143
+ # llama.cpp
144
+ git-commit-message --provider llamacpp
110
145
  ```
111
146
 
112
147
  Commit immediately (optionally open editor):
@@ -114,6 +149,11 @@ Commit immediately (optionally open editor):
114
149
  ```sh
115
150
  git-commit-message --commit "refactor parser for speed"
116
151
  git-commit-message --commit --edit "refactor parser for speed"
152
+
153
+ # add co-author trailers
154
+ git-commit-message --commit --co-author 'John Doe <john.doe@example.com>'
155
+ git-commit-message --commit --co-author 'John Doe <john.doe@example.com>' --co-author 'Jane Doe <jane.doe@example.com>'
156
+ git-commit-message --commit --co-author copilot
117
157
  ```
118
158
 
119
159
  Amend the previous commit:
@@ -144,10 +184,24 @@ git-commit-message --chunk-tokens 0
144
184
  # chunk the diff into ~4000-token pieces before summarising
145
185
  git-commit-message --chunk-tokens 4000
146
186
 
187
+ # note: for provider 'ollama', values >= 1 are not supported
188
+ # use 0 (single summary pass) or -1 (legacy one-shot)
189
+ git-commit-message --provider ollama --chunk-tokens 0
190
+
147
191
  # disable summarisation and use the legacy one-shot prompt
148
192
  git-commit-message --chunk-tokens -1
149
193
  ```
150
194
 
195
+ Adjust unified diff context lines:
196
+
197
+ ```sh
198
+ # use 5 context lines around each change hunk
199
+ git-commit-message --diff-context 5
200
+
201
+ # include only changed lines (no surrounding context)
202
+ git-commit-message --diff-context 0
203
+ ```
204
+
151
205
  Select output language/locale (IETF language tag):
152
206
 
153
207
  ```sh
@@ -168,19 +222,28 @@ Configure Ollama host (if running on a different machine):
168
222
  git-commit-message --provider ollama --host http://192.168.1.100:11434
169
223
  ```
170
224
 
225
+ Configure llama.cpp host:
226
+
227
+ ```sh
228
+ git-commit-message --provider llamacpp --host http://192.168.1.100:8080
229
+ ```
230
+
171
231
  ## Options
172
232
 
173
- - `--provider {openai,google,ollama}`: provider to use (default: `openai`)
174
- - `--model MODEL`: model override (provider-specific)
233
+ - `--provider {openai,google,ollama,llamacpp}`: provider to use (default: `openai`)
234
+ - `--model MODEL`: model override (provider-specific; ignored for llama.cpp)
175
235
  - `--language TAG`: output language/locale (default: `en-GB`)
176
- - `--one-line`: output subject only
236
+ - `--conventional`: apply Conventional Commits constraints to the subject and footer behavior. The body format is unchanged and still includes the translated `Rationale:` line. Breaking changes are expressed with `!` in the subject line, and `BREAKING CHANGE` footer lines are not generated.
237
+ - `--one-line`: output subject only when no trailers are appended; with `--co-author`, output is a single-line subject plus `Co-authored-by:` trailer lines
177
238
  - `--max-length N`: max subject length (default: 72)
178
- - `--chunk-tokens N`: token budget per diff chunk (`0` = single summary pass, `-1` disables summarisation)
239
+ - `--chunk-tokens N`: token budget per diff chunk (`0` = single summary pass, `-1` disables summarisation). For `ollama`, values `>= 1` are not supported.
240
+ - `--diff-context N`: context lines in unified diff (`N >= 0`). If omitted, uses `GIT_COMMIT_MESSAGE_DIFF_CONTEXT` when set; otherwise uses Git default (usually `3`).
179
241
  - `--debug`: print request/response details
180
242
  - `--commit`: run `git commit -m <message>`
181
243
  - `--amend`: generate a message suitable for amending the previous commit (diff is from the amended commit's parent to the staged index; if nothing is staged, this effectively becomes the diff introduced by `HEAD`)
182
244
  - `--edit`: with `--commit`, open editor for final message
183
- - `--host URL`: host URL for providers like Ollama (default: `http://localhost:11434`)
245
+ - `--host URL`: host URL for providers like Ollama or llama.cpp (default: `http://localhost:11434` for Ollama, `http://localhost:8080` for llama.cpp)
246
+ - `--co-author VALUE`: append `Co-authored-by:` trailer(s). Repeat to add multiple values. Accepted forms: `Name <email@example.com>` or `copilot` (alias, case-insensitive).
184
247
 
185
248
  ## Environment variables
186
249
 
@@ -196,14 +259,17 @@ Optional:
196
259
  - `OPENAI_MODEL`: OpenAI-only model override (used if `--model`/`GIT_COMMIT_MESSAGE_MODEL` are not set)
197
260
  - `OLLAMA_MODEL`: Ollama-only model override (used if `--model`/`GIT_COMMIT_MESSAGE_MODEL` are not set)
198
261
  - `OLLAMA_HOST`: Ollama server URL (default: `http://localhost:11434`)
262
+ - `LLAMACPP_HOST`: llama.cpp server URL (default: `http://localhost:8080`)
199
263
  - `GIT_COMMIT_MESSAGE_LANGUAGE`: default language/locale (default: `en-GB`)
200
- - `GIT_COMMIT_MESSAGE_CHUNK_TOKENS`: default chunk token budget (default: `0`)
264
+ - `GIT_COMMIT_MESSAGE_CHUNK_TOKENS`: default chunk token budget (default: `0`; for `ollama`, values `>= 1` are not supported)
265
+ - `GIT_COMMIT_MESSAGE_DIFF_CONTEXT`: default unified diff context lines (`0` or greater). If unset, Git default is used (usually `3`).
201
266
 
202
267
  Default models (if not overridden):
203
268
 
204
269
  - OpenAI: `gpt-5-mini`
205
270
  - Google: `gemini-2.5-flash`
206
271
  - Ollama: `gpt-oss:20b`
272
+ - llama.cpp: uses pre-loaded model (model parameter is ignored)
207
273
 
208
274
  ## AI-generated code notice
209
275
 
@@ -1,6 +1,6 @@
1
1
  [project]
2
2
  name = "git-commit-message"
3
- version = "0.8.1"
3
+ version = "0.9.0"
4
4
  description = "Generate Git commit messages from staged changes using LLM"
5
5
  readme = "README.md"
6
6
  requires-python = ">=3.13"
@@ -9,7 +9,6 @@ dependencies = [
9
9
  "google-genai>=1.56.0",
10
10
  "ollama>=0.4.0",
11
11
  "openai>=2.6.1",
12
- "tiktoken>=0.12.0",
13
12
  ]
14
13
  maintainers = [{ name = "Mina Her", email = "minacle@live.com" }]
15
14
  license = { file = "UNLICENSE" }
@@ -9,6 +9,8 @@ from __future__ import annotations
9
9
  from argparse import ArgumentParser, Namespace
10
10
  from os import environ
11
11
  from pathlib import Path
12
+ import re
13
+ from re import Pattern
12
14
  from sys import exit as sys_exit
13
15
  from sys import stderr
14
16
  from typing import Final
@@ -21,6 +23,7 @@ from ._git import (
21
23
  has_staged_changes,
22
24
  resolve_amend_base_ref,
23
25
  )
26
+ from ._config import resolve_provider_name, validate_provider_chunk_tokens
24
27
  from ._llm import (
25
28
  CommitMessageResult,
26
29
  UnsupportedProviderError,
@@ -35,6 +38,7 @@ class CliArgs(Namespace):
35
38
  "commit",
36
39
  "amend",
37
40
  "edit",
41
+ "conventional",
38
42
  "provider",
39
43
  "model",
40
44
  "language",
@@ -42,7 +46,9 @@ class CliArgs(Namespace):
42
46
  "one_line",
43
47
  "max_length",
44
48
  "chunk_tokens",
49
+ "diff_context",
45
50
  "host",
51
+ "co_authors",
46
52
  )
47
53
 
48
54
  def __init__(
@@ -53,6 +59,7 @@ class CliArgs(Namespace):
53
59
  self.commit: bool = False
54
60
  self.amend: bool = False
55
61
  self.edit: bool = False
62
+ self.conventional: bool = False
56
63
  self.provider: str | None = None
57
64
  self.model: str | None = None
58
65
  self.language: str | None = None
@@ -60,7 +67,85 @@ class CliArgs(Namespace):
60
67
  self.one_line: bool = False
61
68
  self.max_length: int | None = None
62
69
  self.chunk_tokens: int | None = None
70
+ self.diff_context: int | None = None
63
71
  self.host: str | None = None
72
+ self.co_authors: list[str] | None = None
73
+
74
+
75
+ _CO_AUTHOR_LINE_RE: Final[Pattern[str]] = re.compile(
76
+ r"^\s*([^<>\s\n][^<>\n]*?)\s*<([^<>\s\n]+@[^<>\s\n]+)>\s*$"
77
+ )
78
+ _CO_AUTHOR_ALIASES: Final[dict[str, str]] = {
79
+ "copilot": "Copilot <copilot@github.com>",
80
+ }
81
+
82
+
83
+ def _co_author_alias_keywords_text() -> str:
84
+ """Return a readable list of accepted co-author alias keywords."""
85
+
86
+ keywords: list[str] = sorted(_CO_AUTHOR_ALIASES.keys())
87
+ return ", ".join(f"'{keyword}'" for keyword in keywords)
88
+
89
+
90
+ def _normalize_co_author(
91
+ raw: str,
92
+ /,
93
+ ) -> str:
94
+ """Normalize one co-author input into ``Name <email>`` form."""
95
+
96
+ value: str = raw.strip()
97
+ if not value:
98
+ raise ValueError("Co-author cannot be empty.")
99
+
100
+ alias: str | None = _CO_AUTHOR_ALIASES.get(value.lower())
101
+ if alias is not None:
102
+ return alias
103
+
104
+ match = _CO_AUTHOR_LINE_RE.match(value)
105
+ if match is None:
106
+ raise ValueError(
107
+ "Invalid co-author format: use 'Name <email@example.com>' "
108
+ f"or an alias keyword ({_co_author_alias_keywords_text()})."
109
+ )
110
+
111
+ name: str = match.group(1).strip()
112
+ email: str = match.group(2).strip()
113
+ return f"{name} <{email}>"
114
+
115
+
116
+ def _append_co_author_footers(
117
+ message: str,
118
+ normalized_co_authors: list[str],
119
+ /,
120
+ ) -> str:
121
+ """Append Git co-author trailers to a commit message."""
122
+
123
+ if not normalized_co_authors:
124
+ return message
125
+
126
+ base: str = message.rstrip()
127
+ footer_lines: list[str] = [
128
+ f"Co-authored-by: {author}" for author in normalized_co_authors
129
+ ]
130
+ return f"{base}\n\n" + "\n".join(footer_lines)
131
+
132
+
133
+ def _normalize_co_authors(
134
+ co_authors: list[str],
135
+ /,
136
+ ) -> list[str]:
137
+ """Normalize and deduplicate co-author values in insertion order."""
138
+
139
+ seen: set[str] = set()
140
+ normalized: list[str] = []
141
+ for raw in co_authors:
142
+ author = _normalize_co_author(raw)
143
+ key = author.lower()
144
+ if key in seen:
145
+ continue
146
+ seen.add(key)
147
+ normalized.append(author)
148
+ return normalized
64
149
 
65
150
 
66
151
  def _env_chunk_tokens_default() -> int | None:
@@ -75,6 +160,21 @@ def _env_chunk_tokens_default() -> int | None:
75
160
  return None
76
161
 
77
162
 
163
+ def _env_diff_context_default() -> int | None:
164
+ """Return diff context default from env.
165
+
166
+ Raises
167
+ ------
168
+ ValueError
169
+ If the configured value is not an integer.
170
+ """
171
+
172
+ raw: str | None = environ.get("GIT_COMMIT_MESSAGE_DIFF_CONTEXT")
173
+ if raw is None:
174
+ return None
175
+ return int(raw)
176
+
177
+
78
178
  def _build_parser() -> ArgumentParser:
79
179
  """Create the CLI argument parser.
80
180
 
@@ -119,13 +219,23 @@ def _build_parser() -> ArgumentParser:
119
219
  help="Open an editor to amend the message before committing. Use with '--commit'.",
120
220
  )
121
221
 
222
+ parser.add_argument(
223
+ "--conventional",
224
+ action="store_true",
225
+ help=(
226
+ "Use Conventional Commits constraints for the subject line and footer. "
227
+ "The existing body format remains unchanged, including the translated Rationale line."
228
+ ),
229
+ )
230
+
122
231
  parser.add_argument(
123
232
  "--provider",
124
233
  default=None,
125
234
  help=(
126
235
  "LLM provider to use (default: openai). "
127
236
  "You may also set GIT_COMMIT_MESSAGE_PROVIDER. "
128
- "The CLI flag overrides the environment variable."
237
+ "The CLI flag overrides the environment variable. "
238
+ "Supported providers: openai, google, ollama, llamacpp."
129
239
  ),
130
240
  )
131
241
 
@@ -133,7 +243,8 @@ def _build_parser() -> ArgumentParser:
133
243
  "--model",
134
244
  default=None,
135
245
  help=(
136
- "Model name to use. If unspecified, uses GIT_COMMIT_MESSAGE_MODEL or a provider-specific default (openai: gpt-5-mini; google: gemini-2.5-flash; ollama: gpt-oss:20b)."
246
+ "Model name to use. If unspecified, uses GIT_COMMIT_MESSAGE_MODEL or a provider-specific default "
247
+ "(openai: gpt-5-mini; google: gemini-2.5-flash; ollama: gpt-oss:20b; llamacpp: default)."
137
248
  ),
138
249
  )
139
250
 
@@ -176,17 +287,46 @@ def _build_parser() -> ArgumentParser:
176
287
  help=(
177
288
  "Target token budget per diff chunk. "
178
289
  "0 forces a single chunk with summarisation; -1 disables summarisation (legacy one-shot). "
290
+ "For provider 'ollama', values >= 1 are not supported. "
179
291
  "If omitted, uses GIT_COMMIT_MESSAGE_CHUNK_TOKENS when set (default: 0)."
180
292
  ),
181
293
  )
182
294
 
295
+ parser.add_argument(
296
+ "--diff-context",
297
+ dest="diff_context",
298
+ type=int,
299
+ default=None,
300
+ help=(
301
+ "Number of context lines in unified diff output. "
302
+ "If omitted, uses GIT_COMMIT_MESSAGE_DIFF_CONTEXT when set "
303
+ "(default: Git default, usually 3)."
304
+ ),
305
+ )
306
+
183
307
  parser.add_argument(
184
308
  "--host",
185
309
  dest="host",
186
310
  default=None,
187
311
  help=(
188
- "Host URL for API providers like Ollama (default: http://localhost:11434). "
189
- "You may also set OLLAMA_HOST for Ollama."
312
+ "Host URL for API providers like Ollama or llama.cpp "
313
+ "(default: http://localhost:11434 for Ollama, http://localhost:8080 for llama.cpp). "
314
+ "You may also set OLLAMA_HOST for Ollama or LLAMACPP_HOST for llama.cpp."
315
+ ),
316
+ )
317
+
318
+ parser.add_argument(
319
+ "--co-author",
320
+ dest="co_authors",
321
+ action="append",
322
+ default=None,
323
+ help=(
324
+ "Add Co-authored-by trailer(s) to the generated message. "
325
+ "Repeat for multiple co-authors. "
326
+ "Use 'Name <email@example.com>' or an alias keyword "
327
+ f"({_co_author_alias_keywords_text()}). "
328
+ "When used with --one-line, the subject line remains single-line and these "
329
+ "trailers are appended on separate lines (i.e., the overall output is multi-line)."
190
330
  ),
191
331
  )
192
332
 
@@ -210,6 +350,32 @@ def _run(
210
350
  Process exit code. 0 indicates success; any other value indicates failure.
211
351
  """
212
352
 
353
+ chunk_tokens: int | None = args.chunk_tokens
354
+ if chunk_tokens is None:
355
+ chunk_tokens = _env_chunk_tokens_default()
356
+ if chunk_tokens is None:
357
+ chunk_tokens = 0
358
+
359
+ diff_context: int | None = args.diff_context
360
+ if diff_context is None:
361
+ try:
362
+ diff_context = _env_diff_context_default()
363
+ except ValueError:
364
+ print(
365
+ "GIT_COMMIT_MESSAGE_DIFF_CONTEXT must be an integer.",
366
+ file=stderr,
367
+ )
368
+ return 2
369
+ if diff_context is not None and diff_context < 0:
370
+ print("--diff-context must be greater than or equal to 0.", file=stderr)
371
+ return 2
372
+
373
+ provider_name: str = resolve_provider_name(args.provider)
374
+ provider_arg_error = validate_provider_chunk_tokens(provider_name, chunk_tokens)
375
+ if provider_arg_error is not None:
376
+ print(provider_arg_error, file=stderr)
377
+ return 2
378
+
213
379
  repo_root: Path = get_repo_root()
214
380
 
215
381
  if args.amend:
@@ -218,21 +384,27 @@ def _run(
218
384
  return 2
219
385
 
220
386
  base_ref = resolve_amend_base_ref(repo_root)
221
- diff_text: str = get_staged_diff(repo_root, base_ref=base_ref)
387
+ diff_text: str = get_staged_diff(
388
+ repo_root,
389
+ base_ref=base_ref,
390
+ context_lines=diff_context,
391
+ )
222
392
  else:
223
393
  if not has_staged_changes(repo_root):
224
394
  print("No staged changes. Run 'git add' and try again.", file=stderr)
225
395
  return 2
226
396
 
227
- diff_text = get_staged_diff(repo_root)
397
+ diff_text = get_staged_diff(repo_root, context_lines=diff_context)
228
398
 
229
399
  hint: str | None = args.description if isinstance(args.description, str) else None
230
400
 
231
- chunk_tokens: int | None = args.chunk_tokens
232
- if chunk_tokens is None:
233
- chunk_tokens = _env_chunk_tokens_default()
234
- if chunk_tokens is None:
235
- chunk_tokens = 0
401
+ normalized_co_authors: list[str] | None = None
402
+ if args.co_authors:
403
+ try:
404
+ normalized_co_authors = _normalize_co_authors(args.co_authors)
405
+ except ValueError as exc:
406
+ print(str(exc), file=stderr)
407
+ return 2
236
408
 
237
409
  result: CommitMessageResult | None = None
238
410
  try:
@@ -247,6 +419,7 @@ def _run(
247
419
  chunk_tokens,
248
420
  args.provider,
249
421
  args.host,
422
+ args.conventional,
250
423
  )
251
424
  message = result.message
252
425
  else:
@@ -260,6 +433,7 @@ def _run(
260
433
  chunk_tokens,
261
434
  args.provider,
262
435
  args.host,
436
+ args.conventional,
263
437
  )
264
438
  except UnsupportedProviderError as exc:
265
439
  print(str(exc), file=stderr)
@@ -278,6 +452,14 @@ def _run(
278
452
  else:
279
453
  message = ""
280
454
 
455
+ # Defensive check: one-line normalization can result in an empty message.
456
+ if not message.strip():
457
+ print("Failed to generate commit message: generated message is empty.", file=stderr)
458
+ return 3
459
+
460
+ if normalized_co_authors:
461
+ message = _append_co_author_footers(message, normalized_co_authors)
462
+
281
463
  if not args.commit:
282
464
  if args.debug and result is not None:
283
465
  # Print debug information