git-commit-message 0.8.0__tar.gz → 0.8.2__tar.gz

This diff represents the content of publicly available package versions that have been released to one of the supported registries. The information contained in this diff is provided for informational purposes only and reflects changes between package versions as they appear in their respective public registries.
Files changed (21) hide show
  1. {git_commit_message-0.8.0 → git_commit_message-0.8.2}/PKG-INFO +59 -8
  2. {git_commit_message-0.8.0 → git_commit_message-0.8.2}/README.md +58 -7
  3. {git_commit_message-0.8.0 → git_commit_message-0.8.2}/pyproject.toml +1 -1
  4. {git_commit_message-0.8.0 → git_commit_message-0.8.2}/src/git_commit_message/_cli.py +146 -10
  5. git_commit_message-0.8.2/src/git_commit_message/_git.py +269 -0
  6. git_commit_message-0.8.2/src/git_commit_message/_llamacpp.py +145 -0
  7. {git_commit_message-0.8.0 → git_commit_message-0.8.2}/src/git_commit_message/_llm.py +11 -1
  8. {git_commit_message-0.8.0 → git_commit_message-0.8.2}/src/git_commit_message.egg-info/PKG-INFO +59 -8
  9. {git_commit_message-0.8.0 → git_commit_message-0.8.2}/src/git_commit_message.egg-info/SOURCES.txt +1 -0
  10. git_commit_message-0.8.0/src/git_commit_message/_git.py +0 -114
  11. {git_commit_message-0.8.0 → git_commit_message-0.8.2}/UNLICENSE +0 -0
  12. {git_commit_message-0.8.0 → git_commit_message-0.8.2}/setup.cfg +0 -0
  13. {git_commit_message-0.8.0 → git_commit_message-0.8.2}/src/git_commit_message/__init__.py +0 -0
  14. {git_commit_message-0.8.0 → git_commit_message-0.8.2}/src/git_commit_message/__main__.py +0 -0
  15. {git_commit_message-0.8.0 → git_commit_message-0.8.2}/src/git_commit_message/_gemini.py +0 -0
  16. {git_commit_message-0.8.0 → git_commit_message-0.8.2}/src/git_commit_message/_gpt.py +0 -0
  17. {git_commit_message-0.8.0 → git_commit_message-0.8.2}/src/git_commit_message/_ollama.py +0 -0
  18. {git_commit_message-0.8.0 → git_commit_message-0.8.2}/src/git_commit_message.egg-info/dependency_links.txt +0 -0
  19. {git_commit_message-0.8.0 → git_commit_message-0.8.2}/src/git_commit_message.egg-info/entry_points.txt +0 -0
  20. {git_commit_message-0.8.0 → git_commit_message-0.8.2}/src/git_commit_message.egg-info/requires.txt +0 -0
  21. {git_commit_message-0.8.0 → git_commit_message-0.8.2}/src/git_commit_message.egg-info/top_level.txt +0 -0
@@ -1,6 +1,6 @@
1
1
  Metadata-Version: 2.4
2
2
  Name: git-commit-message
3
- Version: 0.8.0
3
+ Version: 0.8.2
4
4
  Summary: Generate Git commit messages from staged changes using LLM
5
5
  Maintainer-email: Mina Her <minacle@live.com>
6
6
  License: This is free and unencumbered software released into the public domain.
@@ -51,14 +51,14 @@ Requires-Dist: tiktoken>=0.12.0
51
51
 
52
52
  # git-commit-message
53
53
 
54
- Generate a commit message from your staged changes using OpenAI, Google Gemini, or Ollama.
54
+ Generate a commit message from your staged changes using OpenAI, Google Gemini, Ollama, or llama.cpp.
55
55
 
56
56
  [![asciicast](https://asciinema.org/a/jk0phFqNnc5vaCiIZEYBwZOyN.svg)](https://asciinema.org/a/jk0phFqNnc5vaCiIZEYBwZOyN)
57
57
 
58
58
  ## Requirements
59
59
 
60
60
  - Python 3.13+
61
- - A Git repo with staged changes (`git add ...`)
61
+ - A Git repo with staged changes (`git add ...`) (or use `--amend` even if nothing is staged)
62
62
 
63
63
  ## Install
64
64
 
@@ -120,6 +120,23 @@ export GIT_COMMIT_MESSAGE_PROVIDER=ollama
120
120
  export OLLAMA_MODEL=mistral
121
121
  ```
122
122
 
123
+ ### llama.cpp (local models)
124
+
125
+ 1. Build and run llama.cpp server with your model:
126
+
127
+ ```sh
128
+ llama-server -hf ggml-org/gpt-oss-20b-GGUF --host 0.0.0.0 --port 8080
129
+ ```
130
+
131
+ 2. The server runs on `http://localhost:8080` by default.
132
+
133
+ Optional: set defaults:
134
+
135
+ ```sh
136
+ export GIT_COMMIT_MESSAGE_PROVIDER=llamacpp
137
+ export LLAMACPP_HOST=http://localhost:8080
138
+ ```
139
+
123
140
  Note (fish):
124
141
 
125
142
  ```fish
@@ -141,10 +158,13 @@ git add -A
141
158
  git-commit-message "optional extra context about the change"
142
159
  ```
143
160
 
144
- Generate a single-line subject only:
161
+ Generate a single-line subject only (when no trailers are appended):
145
162
 
146
163
  ```sh
147
164
  git-commit-message --one-line "optional context"
165
+
166
+ # with trailers, output is subject plus trailer lines
167
+ git-commit-message --one-line --co-author 'John Doe <john.doe@example.com>'
148
168
  ```
149
169
 
150
170
  Select provider:
@@ -158,6 +178,9 @@ git-commit-message --provider google
158
178
 
159
179
  # Ollama
160
180
  git-commit-message --provider ollama
181
+
182
+ # llama.cpp
183
+ git-commit-message --provider llamacpp
161
184
  ```
162
185
 
163
186
  Commit immediately (optionally open editor):
@@ -165,6 +188,24 @@ Commit immediately (optionally open editor):
165
188
  ```sh
166
189
  git-commit-message --commit "refactor parser for speed"
167
190
  git-commit-message --commit --edit "refactor parser for speed"
191
+
192
+ # add co-author trailers
193
+ git-commit-message --commit --co-author 'John Doe <john.doe@example.com>'
194
+ git-commit-message --commit --co-author 'John Doe <john.doe@example.com>' --co-author 'Jane Doe <jane.doe@example.com>'
195
+ git-commit-message --commit --co-author copilot
196
+ ```
197
+
198
+ Amend the previous commit:
199
+
200
+ ```sh
201
+ # print only (useful for pasting into a GUI editor)
202
+ git-commit-message --amend "optional context"
203
+
204
+ # amend immediately
205
+ git-commit-message --commit --amend "optional context"
206
+
207
+ # amend immediately, but open editor for final tweaks
208
+ git-commit-message --commit --amend --edit "optional context"
168
209
  ```
169
210
 
170
211
  Limit subject length:
@@ -206,18 +247,26 @@ Configure Ollama host (if running on a different machine):
206
247
  git-commit-message --provider ollama --host http://192.168.1.100:11434
207
248
  ```
208
249
 
250
+ Configure llama.cpp host:
251
+
252
+ ```sh
253
+ git-commit-message --provider llamacpp --host http://192.168.1.100:8080
254
+ ```
255
+
209
256
  ## Options
210
257
 
211
- - `--provider {openai,google,ollama}`: provider to use (default: `openai`)
212
- - `--model MODEL`: model override (provider-specific)
258
+ - `--provider {openai,google,ollama,llamacpp}`: provider to use (default: `openai`)
259
+ - `--model MODEL`: model override (provider-specific; ignored for llama.cpp)
213
260
  - `--language TAG`: output language/locale (default: `en-GB`)
214
- - `--one-line`: output subject only
261
+ - `--one-line`: output subject only when no trailers are appended; with `--co-author`, output is a single-line subject plus `Co-authored-by:` trailer lines
215
262
  - `--max-length N`: max subject length (default: 72)
216
263
  - `--chunk-tokens N`: token budget per diff chunk (`0` = single summary pass, `-1` disables summarisation)
217
264
  - `--debug`: print request/response details
218
265
  - `--commit`: run `git commit -m <message>`
266
+ - `--amend`: generate a message suitable for amending the previous commit (diff is from the amended commit's parent to the staged index; if nothing is staged, this effectively becomes the diff introduced by `HEAD`)
219
267
  - `--edit`: with `--commit`, open editor for final message
220
- - `--host URL`: host URL for providers like Ollama (default: `http://localhost:11434`)
268
+ - `--host URL`: host URL for providers like Ollama or llama.cpp (default: `http://localhost:11434` for Ollama, `http://localhost:8080` for llama.cpp)
269
+ - `--co-author VALUE`: append `Co-authored-by:` trailer(s). Repeat to add multiple values. Accepted forms: `Name <email@example.com>` or `copilot` (alias, case-insensitive).
221
270
 
222
271
  ## Environment variables
223
272
 
@@ -233,6 +282,7 @@ Optional:
233
282
  - `OPENAI_MODEL`: OpenAI-only model override (used if `--model`/`GIT_COMMIT_MESSAGE_MODEL` are not set)
234
283
  - `OLLAMA_MODEL`: Ollama-only model override (used if `--model`/`GIT_COMMIT_MESSAGE_MODEL` are not set)
235
284
  - `OLLAMA_HOST`: Ollama server URL (default: `http://localhost:11434`)
285
+ - `LLAMACPP_HOST`: llama.cpp server URL (default: `http://localhost:8080`)
236
286
  - `GIT_COMMIT_MESSAGE_LANGUAGE`: default language/locale (default: `en-GB`)
237
287
  - `GIT_COMMIT_MESSAGE_CHUNK_TOKENS`: default chunk token budget (default: `0`)
238
288
 
@@ -241,6 +291,7 @@ Default models (if not overridden):
241
291
  - OpenAI: `gpt-5-mini`
242
292
  - Google: `gemini-2.5-flash`
243
293
  - Ollama: `gpt-oss:20b`
294
+ - llama.cpp: uses pre-loaded model (model parameter is ignored)
244
295
 
245
296
  ## AI-generated code notice
246
297
 
@@ -1,13 +1,13 @@
1
1
  # git-commit-message
2
2
 
3
- Generate a commit message from your staged changes using OpenAI, Google Gemini, or Ollama.
3
+ Generate a commit message from your staged changes using OpenAI, Google Gemini, Ollama, or llama.cpp.
4
4
 
5
5
  [![asciicast](https://asciinema.org/a/jk0phFqNnc5vaCiIZEYBwZOyN.svg)](https://asciinema.org/a/jk0phFqNnc5vaCiIZEYBwZOyN)
6
6
 
7
7
  ## Requirements
8
8
 
9
9
  - Python 3.13+
10
- - A Git repo with staged changes (`git add ...`)
10
+ - A Git repo with staged changes (`git add ...`) (or use `--amend` even if nothing is staged)
11
11
 
12
12
  ## Install
13
13
 
@@ -69,6 +69,23 @@ export GIT_COMMIT_MESSAGE_PROVIDER=ollama
69
69
  export OLLAMA_MODEL=mistral
70
70
  ```
71
71
 
72
+ ### llama.cpp (local models)
73
+
74
+ 1. Build and run llama.cpp server with your model:
75
+
76
+ ```sh
77
+ llama-server -hf ggml-org/gpt-oss-20b-GGUF --host 0.0.0.0 --port 8080
78
+ ```
79
+
80
+ 2. The server runs on `http://localhost:8080` by default.
81
+
82
+ Optional: set defaults:
83
+
84
+ ```sh
85
+ export GIT_COMMIT_MESSAGE_PROVIDER=llamacpp
86
+ export LLAMACPP_HOST=http://localhost:8080
87
+ ```
88
+
72
89
  Note (fish):
73
90
 
74
91
  ```fish
@@ -90,10 +107,13 @@ git add -A
90
107
  git-commit-message "optional extra context about the change"
91
108
  ```
92
109
 
93
- Generate a single-line subject only:
110
+ Generate a single-line subject only (when no trailers are appended):
94
111
 
95
112
  ```sh
96
113
  git-commit-message --one-line "optional context"
114
+
115
+ # with trailers, output is subject plus trailer lines
116
+ git-commit-message --one-line --co-author 'John Doe <john.doe@example.com>'
97
117
  ```
98
118
 
99
119
  Select provider:
@@ -107,6 +127,9 @@ git-commit-message --provider google
107
127
 
108
128
  # Ollama
109
129
  git-commit-message --provider ollama
130
+
131
+ # llama.cpp
132
+ git-commit-message --provider llamacpp
110
133
  ```
111
134
 
112
135
  Commit immediately (optionally open editor):
@@ -114,6 +137,24 @@ Commit immediately (optionally open editor):
114
137
  ```sh
115
138
  git-commit-message --commit "refactor parser for speed"
116
139
  git-commit-message --commit --edit "refactor parser for speed"
140
+
141
+ # add co-author trailers
142
+ git-commit-message --commit --co-author 'John Doe <john.doe@example.com>'
143
+ git-commit-message --commit --co-author 'John Doe <john.doe@example.com>' --co-author 'Jane Doe <jane.doe@example.com>'
144
+ git-commit-message --commit --co-author copilot
145
+ ```
146
+
147
+ Amend the previous commit:
148
+
149
+ ```sh
150
+ # print only (useful for pasting into a GUI editor)
151
+ git-commit-message --amend "optional context"
152
+
153
+ # amend immediately
154
+ git-commit-message --commit --amend "optional context"
155
+
156
+ # amend immediately, but open editor for final tweaks
157
+ git-commit-message --commit --amend --edit "optional context"
117
158
  ```
118
159
 
119
160
  Limit subject length:
@@ -155,18 +196,26 @@ Configure Ollama host (if running on a different machine):
155
196
  git-commit-message --provider ollama --host http://192.168.1.100:11434
156
197
  ```
157
198
 
199
+ Configure llama.cpp host:
200
+
201
+ ```sh
202
+ git-commit-message --provider llamacpp --host http://192.168.1.100:8080
203
+ ```
204
+
158
205
  ## Options
159
206
 
160
- - `--provider {openai,google,ollama}`: provider to use (default: `openai`)
161
- - `--model MODEL`: model override (provider-specific)
207
+ - `--provider {openai,google,ollama,llamacpp}`: provider to use (default: `openai`)
208
+ - `--model MODEL`: model override (provider-specific; ignored for llama.cpp)
162
209
  - `--language TAG`: output language/locale (default: `en-GB`)
163
- - `--one-line`: output subject only
210
+ - `--one-line`: output subject only when no trailers are appended; with `--co-author`, output is a single-line subject plus `Co-authored-by:` trailer lines
164
211
  - `--max-length N`: max subject length (default: 72)
165
212
  - `--chunk-tokens N`: token budget per diff chunk (`0` = single summary pass, `-1` disables summarisation)
166
213
  - `--debug`: print request/response details
167
214
  - `--commit`: run `git commit -m <message>`
215
+ - `--amend`: generate a message suitable for amending the previous commit (diff is from the amended commit's parent to the staged index; if nothing is staged, this effectively becomes the diff introduced by `HEAD`)
168
216
  - `--edit`: with `--commit`, open editor for final message
169
- - `--host URL`: host URL for providers like Ollama (default: `http://localhost:11434`)
217
+ - `--host URL`: host URL for providers like Ollama or llama.cpp (default: `http://localhost:11434` for Ollama, `http://localhost:8080` for llama.cpp)
218
+ - `--co-author VALUE`: append `Co-authored-by:` trailer(s). Repeat to add multiple values. Accepted forms: `Name <email@example.com>` or `copilot` (alias, case-insensitive).
170
219
 
171
220
  ## Environment variables
172
221
 
@@ -182,6 +231,7 @@ Optional:
182
231
  - `OPENAI_MODEL`: OpenAI-only model override (used if `--model`/`GIT_COMMIT_MESSAGE_MODEL` are not set)
183
232
  - `OLLAMA_MODEL`: Ollama-only model override (used if `--model`/`GIT_COMMIT_MESSAGE_MODEL` are not set)
184
233
  - `OLLAMA_HOST`: Ollama server URL (default: `http://localhost:11434`)
234
+ - `LLAMACPP_HOST`: llama.cpp server URL (default: `http://localhost:8080`)
185
235
  - `GIT_COMMIT_MESSAGE_LANGUAGE`: default language/locale (default: `en-GB`)
186
236
  - `GIT_COMMIT_MESSAGE_CHUNK_TOKENS`: default chunk token budget (default: `0`)
187
237
 
@@ -190,6 +240,7 @@ Default models (if not overridden):
190
240
  - OpenAI: `gpt-5-mini`
191
241
  - Google: `gemini-2.5-flash`
192
242
  - Ollama: `gpt-oss:20b`
243
+ - llama.cpp: uses pre-loaded model (model parameter is ignored)
193
244
 
194
245
  ## AI-generated code notice
195
246
 
@@ -1,6 +1,6 @@
1
1
  [project]
2
2
  name = "git-commit-message"
3
- version = "0.8.0"
3
+ version = "0.8.2"
4
4
  description = "Generate Git commit messages from staged changes using LLM"
5
5
  readme = "README.md"
6
6
  requires-python = ">=3.13"
@@ -9,6 +9,8 @@ from __future__ import annotations
9
9
  from argparse import ArgumentParser, Namespace
10
10
  from os import environ
11
11
  from pathlib import Path
12
+ import re
13
+ from re import Pattern
12
14
  from sys import exit as sys_exit
13
15
  from sys import stderr
14
16
  from typing import Final
@@ -17,7 +19,9 @@ from ._git import (
17
19
  commit_with_message,
18
20
  get_repo_root,
19
21
  get_staged_diff,
22
+ has_head_commit,
20
23
  has_staged_changes,
24
+ resolve_amend_base_ref,
21
25
  )
22
26
  from ._llm import (
23
27
  CommitMessageResult,
@@ -31,6 +35,7 @@ class CliArgs(Namespace):
31
35
  __slots__ = (
32
36
  "description",
33
37
  "commit",
38
+ "amend",
34
39
  "edit",
35
40
  "provider",
36
41
  "model",
@@ -40,6 +45,7 @@ class CliArgs(Namespace):
40
45
  "max_length",
41
46
  "chunk_tokens",
42
47
  "host",
48
+ "co_authors",
43
49
  )
44
50
 
45
51
  def __init__(
@@ -48,6 +54,7 @@ class CliArgs(Namespace):
48
54
  ) -> None:
49
55
  self.description: str | None = None
50
56
  self.commit: bool = False
57
+ self.amend: bool = False
51
58
  self.edit: bool = False
52
59
  self.provider: str | None = None
53
60
  self.model: str | None = None
@@ -57,6 +64,83 @@ class CliArgs(Namespace):
57
64
  self.max_length: int | None = None
58
65
  self.chunk_tokens: int | None = None
59
66
  self.host: str | None = None
67
+ self.co_authors: list[str] | None = None
68
+
69
+
70
+ _CO_AUTHOR_LINE_RE: Final[Pattern[str]] = re.compile(
71
+ r"^\s*([^<>\s\n][^<>\n]*?)\s*<([^<>\s\n]+@[^<>\s\n]+)>\s*$"
72
+ )
73
+ _CO_AUTHOR_ALIASES: Final[dict[str, str]] = {
74
+ "copilot": "Copilot <copilot@github.com>",
75
+ }
76
+
77
+
78
+ def _co_author_alias_keywords_text() -> str:
79
+ """Return a readable list of accepted co-author alias keywords."""
80
+
81
+ keywords: list[str] = sorted(_CO_AUTHOR_ALIASES.keys())
82
+ return ", ".join(f"'{keyword}'" for keyword in keywords)
83
+
84
+
85
+ def _normalize_co_author(
86
+ raw: str,
87
+ /,
88
+ ) -> str:
89
+ """Normalize one co-author input into ``Name <email>`` form."""
90
+
91
+ value: str = raw.strip()
92
+ if not value:
93
+ raise ValueError("Co-author cannot be empty.")
94
+
95
+ alias: str | None = _CO_AUTHOR_ALIASES.get(value.lower())
96
+ if alias is not None:
97
+ return alias
98
+
99
+ match = _CO_AUTHOR_LINE_RE.match(value)
100
+ if match is None:
101
+ raise ValueError(
102
+ "Invalid co-author format: use 'Name <email@example.com>' "
103
+ f"or an alias keyword ({_co_author_alias_keywords_text()})."
104
+ )
105
+
106
+ name: str = match.group(1).strip()
107
+ email: str = match.group(2).strip()
108
+ return f"{name} <{email}>"
109
+
110
+
111
+ def _append_co_author_footers(
112
+ message: str,
113
+ normalized_co_authors: list[str],
114
+ /,
115
+ ) -> str:
116
+ """Append Git co-author trailers to a commit message."""
117
+
118
+ if not normalized_co_authors:
119
+ return message
120
+
121
+ base: str = message.rstrip()
122
+ footer_lines: list[str] = [
123
+ f"Co-authored-by: {author}" for author in normalized_co_authors
124
+ ]
125
+ return f"{base}\n\n" + "\n".join(footer_lines)
126
+
127
+
128
+ def _normalize_co_authors(
129
+ co_authors: list[str],
130
+ /,
131
+ ) -> list[str]:
132
+ """Normalize and deduplicate co-author values in insertion order."""
133
+
134
+ seen: set[str] = set()
135
+ normalized: list[str] = []
136
+ for raw in co_authors:
137
+ author = _normalize_co_author(raw)
138
+ key = author.lower()
139
+ if key in seen:
140
+ continue
141
+ seen.add(key)
142
+ normalized.append(author)
143
+ return normalized
60
144
 
61
145
 
62
146
  def _env_chunk_tokens_default() -> int | None:
@@ -99,6 +183,16 @@ def _build_parser() -> ArgumentParser:
99
183
  help="Commit immediately with the generated message.",
100
184
  )
101
185
 
186
+ parser.add_argument(
187
+ "--amend",
188
+ action="store_true",
189
+ help=(
190
+ "Generate a message suitable for amending the previous commit. "
191
+ "When set, the diff is computed from the amended commit's parent to the staged index. "
192
+ "Use with '--commit' to run the amend, or omit '--commit' to print the message only."
193
+ ),
194
+ )
195
+
102
196
  parser.add_argument(
103
197
  "--edit",
104
198
  action="store_true",
@@ -111,7 +205,8 @@ def _build_parser() -> ArgumentParser:
111
205
  help=(
112
206
  "LLM provider to use (default: openai). "
113
207
  "You may also set GIT_COMMIT_MESSAGE_PROVIDER. "
114
- "The CLI flag overrides the environment variable."
208
+ "The CLI flag overrides the environment variable. "
209
+ "Supported providers: openai, google, ollama, llamacpp."
115
210
  ),
116
211
  )
117
212
 
@@ -119,7 +214,8 @@ def _build_parser() -> ArgumentParser:
119
214
  "--model",
120
215
  default=None,
121
216
  help=(
122
- "Model name to use. If unspecified, uses GIT_COMMIT_MESSAGE_MODEL or a provider-specific default (openai: gpt-5-mini; google: gemini-2.5-flash; ollama: gpt-oss:20b)."
217
+ "Model name to use. If unspecified, uses GIT_COMMIT_MESSAGE_MODEL or a provider-specific default "
218
+ "(openai: gpt-5-mini; google: gemini-2.5-flash; ollama: gpt-oss:20b; llamacpp: default)."
123
219
  ),
124
220
  )
125
221
 
@@ -171,8 +267,24 @@ def _build_parser() -> ArgumentParser:
171
267
  dest="host",
172
268
  default=None,
173
269
  help=(
174
- "Host URL for API providers like Ollama (default: http://localhost:11434). "
175
- "You may also set OLLAMA_HOST for Ollama."
270
+ "Host URL for API providers like Ollama or llama.cpp "
271
+ "(default: http://localhost:11434 for Ollama, http://localhost:8080 for llama.cpp). "
272
+ "You may also set OLLAMA_HOST for Ollama or LLAMACPP_HOST for llama.cpp."
273
+ ),
274
+ )
275
+
276
+ parser.add_argument(
277
+ "--co-author",
278
+ dest="co_authors",
279
+ action="append",
280
+ default=None,
281
+ help=(
282
+ "Add Co-authored-by trailer(s) to the generated message. "
283
+ "Repeat for multiple co-authors. "
284
+ "Use 'Name <email@example.com>' or an alias keyword "
285
+ f"({_co_author_alias_keywords_text()}). "
286
+ "When used with --one-line, the subject line remains single-line and these "
287
+ "trailers are appended on separate lines (i.e., the overall output is multi-line)."
176
288
  ),
177
289
  )
178
290
 
@@ -198,11 +310,19 @@ def _run(
198
310
 
199
311
  repo_root: Path = get_repo_root()
200
312
 
201
- if not has_staged_changes(repo_root):
202
- print("No staged changes. Run 'git add' and try again.", file=stderr)
203
- return 2
313
+ if args.amend:
314
+ if not has_head_commit(repo_root):
315
+ print("Cannot amend: the repository has no commits yet.", file=stderr)
316
+ return 2
317
+
318
+ base_ref = resolve_amend_base_ref(repo_root)
319
+ diff_text: str = get_staged_diff(repo_root, base_ref=base_ref)
320
+ else:
321
+ if not has_staged_changes(repo_root):
322
+ print("No staged changes. Run 'git add' and try again.", file=stderr)
323
+ return 2
204
324
 
205
- diff_text: str = get_staged_diff(repo_root)
325
+ diff_text = get_staged_diff(repo_root)
206
326
 
207
327
  hint: str | None = args.description if isinstance(args.description, str) else None
208
328
 
@@ -212,6 +332,14 @@ def _run(
212
332
  if chunk_tokens is None:
213
333
  chunk_tokens = 0
214
334
 
335
+ normalized_co_authors: list[str] | None = None
336
+ if args.co_authors:
337
+ try:
338
+ normalized_co_authors = _normalize_co_authors(args.co_authors)
339
+ except ValueError as exc:
340
+ print(str(exc), file=stderr)
341
+ return 2
342
+
215
343
  result: CommitMessageResult | None = None
216
344
  try:
217
345
  if args.debug:
@@ -256,6 +384,14 @@ def _run(
256
384
  else:
257
385
  message = ""
258
386
 
387
+ # Defensive check: one-line normalization can result in an empty message.
388
+ if not message.strip():
389
+ print("Failed to generate commit message: generated message is empty.", file=stderr)
390
+ return 3
391
+
392
+ if normalized_co_authors:
393
+ message = _append_co_author_footers(message, normalized_co_authors)
394
+
259
395
  if not args.commit:
260
396
  if args.debug and result is not None:
261
397
  # Print debug information
@@ -299,9 +435,9 @@ def _run(
299
435
  print(message)
300
436
 
301
437
  if args.edit:
302
- rc: int = commit_with_message(message, True, repo_root)
438
+ rc: int = commit_with_message(message, True, repo_root, amend=args.amend)
303
439
  else:
304
- rc = commit_with_message(message, False, repo_root)
440
+ rc = commit_with_message(message, False, repo_root, amend=args.amend)
305
441
 
306
442
  return rc
307
443
 
@@ -0,0 +1,269 @@
1
+ """Git-related helper functions.
2
+
3
+ Provides repository root discovery, extraction of staged changes, and
4
+ creating commits from a message.
5
+ """
6
+
7
+ from __future__ import annotations
8
+
9
+ from pathlib import Path
10
+ from subprocess import CalledProcessError, check_call, check_output, run
11
+
12
+
13
+ def _get_empty_tree_hash(
14
+ cwd: Path,
15
+ /,
16
+ ) -> str:
17
+ """Return the empty tree hash for this repository.
18
+
19
+ Parameters
20
+ ----------
21
+ cwd
22
+ Repository directory in which to run Git.
23
+
24
+ Notes
25
+ -----
26
+ Do not hard-code the SHA, because repositories may use different
27
+ hash algorithms (e.g. SHA-1 vs SHA-256). We ask Git to compute the
28
+ empty tree object ID for the current repo.
29
+
30
+ Returns
31
+ -------
32
+ str
33
+ The empty tree object ID for the current repository.
34
+ """
35
+
36
+ try:
37
+ completed = run(
38
+ [
39
+ "git",
40
+ "hash-object",
41
+ "-t",
42
+ "tree",
43
+ "--stdin",
44
+ ],
45
+ cwd=str(cwd),
46
+ check=True,
47
+ input=b"",
48
+ capture_output=True,
49
+ )
50
+ except CalledProcessError as exc:
51
+ stderr_text = (exc.stderr or b"").decode(errors="replace").strip()
52
+ suffix = f"\nGit stderr: {stderr_text}" if stderr_text else ""
53
+ raise RuntimeError(
54
+ f"Failed to compute empty tree hash (git exited with {exc.returncode}).{suffix}"
55
+ ) from exc
56
+ except OSError as exc:
57
+ raise RuntimeError(
58
+ f"Failed to run git to compute empty tree hash: {exc}"
59
+ ) from exc
60
+ oid = completed.stdout.decode().strip()
61
+ if not oid:
62
+ raise RuntimeError(
63
+ "Failed to compute empty tree hash: git returned an empty object ID."
64
+ )
65
+ return oid
66
+
67
+
68
+ def get_repo_root(
69
+ cwd: Path | None = None,
70
+ /,
71
+ ) -> Path:
72
+ """Find the repository root from the current working directory.
73
+
74
+ Parameters
75
+ ----------
76
+ cwd
77
+ Starting directory for the search. Defaults to the current working directory.
78
+
79
+ Returns
80
+ -------
81
+ Path
82
+ The repository root path.
83
+ """
84
+
85
+ start: Path = cwd or Path.cwd()
86
+ try:
87
+ out: bytes = check_output(
88
+ [
89
+ "git",
90
+ "rev-parse",
91
+ "--show-toplevel",
92
+ ],
93
+ cwd=str(start),
94
+ )
95
+ except CalledProcessError as exc: # noqa: TRY003
96
+ raise RuntimeError("Not a Git repository.") from exc
97
+
98
+ root = Path(out.decode().strip())
99
+ return root
100
+
101
+
102
+ def has_staged_changes(
103
+ cwd: Path,
104
+ /,
105
+ ) -> bool:
106
+ """Check whether there are staged changes."""
107
+
108
+ try:
109
+ check_call(
110
+ ["git", "diff", "--cached", "--quiet", "--exit-code"],
111
+ cwd=str(cwd),
112
+ )
113
+ return False
114
+ except CalledProcessError:
115
+ return True
116
+
117
+
118
+ def has_head_commit(
119
+ cwd: Path,
120
+ /,
121
+ ) -> bool:
122
+ """Return True if the repository has at least one commit (HEAD exists).
123
+
124
+ Parameters
125
+ ----------
126
+ cwd
127
+ Repository directory in which to run Git.
128
+
129
+ Returns
130
+ -------
131
+ bool
132
+ True if ``HEAD`` exists in the repository, False otherwise.
133
+ """
134
+
135
+ completed = run(
136
+ ["git", "rev-parse", "--verify", "HEAD"],
137
+ cwd=str(cwd),
138
+ check=False,
139
+ capture_output=True,
140
+ )
141
+ return completed.returncode == 0
142
+
143
+
144
+ def resolve_amend_base_ref(
145
+ cwd: Path,
146
+ /,
147
+ ) -> str:
148
+ """Resolve the base ref for an amend diff.
149
+
150
+ Parameters
151
+ ----------
152
+ cwd
153
+ Repository directory in which to run Git.
154
+
155
+ Notes
156
+ -----
157
+ The amended commit keeps the same parent as the current HEAD commit.
158
+
159
+ - If HEAD has a parent, base is ``HEAD^``.
160
+ - If HEAD is a root commit (no parent), base is the empty tree.
161
+
162
+ Returns
163
+ -------
164
+ str
165
+ The base reference for the amend diff: either ``HEAD^`` (when the
166
+ current ``HEAD`` commit has a parent) or the empty tree object ID
167
+ (when ``HEAD`` is a root commit).
168
+ """
169
+
170
+ completed = run(
171
+ ["git", "rev-parse", "--verify", "HEAD^"],
172
+ cwd=str(cwd),
173
+ check=False,
174
+ capture_output=True,
175
+ )
176
+ if completed.returncode == 0:
177
+ return "HEAD^"
178
+ return _get_empty_tree_hash(cwd)
179
+
180
+
181
+ def get_staged_diff(
182
+ cwd: Path,
183
+ /,
184
+ *,
185
+ base_ref: str | None = None,
186
+ ) -> str:
187
+ """Return the staged changes as diff text.
188
+
189
+ Parameters
190
+ ----------
191
+ cwd
192
+ Git working directory.
193
+ base_ref
194
+ Optional Git reference or tree object ID (e.g., branch name, tag,
195
+ commit hash, or the empty tree hash) to diff against. When provided,
196
+ the diff shows changes from ``base_ref`` to the staged index, instead
197
+ of changes from ``HEAD`` to the staged index.
198
+
199
+ Returns
200
+ -------
201
+ str
202
+ Unified diff text for the staged changes.
203
+ """
204
+
205
+ cmd: list[str] = [
206
+ "git",
207
+ "diff",
208
+ "--cached",
209
+ "--patch",
210
+ "--minimal",
211
+ "--no-color",
212
+ ]
213
+ if base_ref:
214
+ cmd.append(base_ref)
215
+
216
+ try:
217
+ out: bytes = check_output(cmd, cwd=str(cwd))
218
+ except CalledProcessError as exc:
219
+ message = "Failed to retrieve staged diff from Git."
220
+ if base_ref:
221
+ message += (
222
+ " Ensure that the provided base_ref exists and is a valid Git reference."
223
+ )
224
+ raise RuntimeError(message) from exc
225
+
226
+ return out.decode()
227
+
228
+
229
+ def commit_with_message(
230
+ message: str,
231
+ edit: bool,
232
+ cwd: Path,
233
+ /,
234
+ *,
235
+ amend: bool = False,
236
+ ) -> int:
237
+ """Create a commit with the given message.
238
+
239
+ Parameters
240
+ ----------
241
+ message
242
+ Commit message.
243
+ edit
244
+ If True, use the `--edit` flag to open an editor for amendments.
245
+ cwd
246
+ Git working directory.
247
+ amend
248
+ If True, pass ``--amend`` to Git to amend the current ``HEAD`` commit
249
+ instead of creating a new commit.
250
+
251
+ Returns
252
+ -------
253
+ int
254
+ The subprocess exit code.
255
+ """
256
+
257
+ cmd: list[str] = ["git", "commit"]
258
+ if amend:
259
+ cmd.append("--amend")
260
+
261
+ cmd.extend(["-m", message])
262
+ if edit:
263
+ cmd.append("--edit")
264
+
265
+ try:
266
+ completed = run(cmd, cwd=str(cwd), check=False)
267
+ return int(completed.returncode)
268
+ except OSError as exc: # e.g., editor launch failure, etc.
269
+ raise RuntimeError(f"Failed to run 'git commit': {exc}") from exc
@@ -0,0 +1,145 @@
1
+ """llama.cpp provider implementation.
2
+
3
+ This module contains llama.cpp server-specific API calls and token counting.
4
+ llama.cpp server provides an OpenAI-compatible API, so we use the openai library.
5
+ Provider-agnostic orchestration/prompt logic lives in `_llm.py`.
6
+ """
7
+
8
+ from __future__ import annotations
9
+
10
+ from os import environ
11
+ from typing import ClassVar, Final
12
+
13
+ from openai import OpenAI
14
+ from openai.types.chat import ChatCompletionMessageParam
15
+ from tiktoken import Encoding, get_encoding
16
+
17
+ from ._llm import LLMTextResult, LLMUsage
18
+
19
+
20
+ _DEFAULT_LLAMACPP_HOST: Final[str] = "http://localhost:8080"
21
+
22
+
23
+ def _resolve_llamacpp_host(
24
+ host: str | None,
25
+ /,
26
+ ) -> str:
27
+ """Resolve the llama.cpp server host URL from arg, env, or default."""
28
+
29
+ return host or environ.get("LLAMACPP_HOST") or _DEFAULT_LLAMACPP_HOST
30
+
31
+
32
+ def _get_encoding() -> Encoding:
33
+ """Get a fallback encoding for token counting."""
34
+
35
+ try:
36
+ return get_encoding("cl100k_base")
37
+ except Exception:
38
+ return get_encoding("gpt2")
39
+
40
+
41
+ class LlamaCppProvider:
42
+ """llama.cpp provider implementation for the LLM protocol.
43
+
44
+ Uses the OpenAI-compatible API provided by llama.cpp server.
45
+ """
46
+
47
+ __slots__ = (
48
+ "_host",
49
+ "_client",
50
+ )
51
+
52
+ name: ClassVar[str] = "llamacpp"
53
+
54
+ def __init__(
55
+ self,
56
+ /,
57
+ *,
58
+ host: str | None = None,
59
+ ) -> None:
60
+ self._host = _resolve_llamacpp_host(host)
61
+ # llama.cpp server uses OpenAI-compatible API
62
+ # api_key is not required but openai library needs a placeholder
63
+ self._client = OpenAI(
64
+ base_url=f"{self._host}/v1",
65
+ api_key="llamacpp", # Placeholder, llama.cpp doesn't require auth by default
66
+ )
67
+
68
+ def generate_text(
69
+ self,
70
+ /,
71
+ *,
72
+ model: str,
73
+ instructions: str,
74
+ user_text: str,
75
+ ) -> LLMTextResult:
76
+ """Generate text using llama.cpp server (OpenAI-compatible chat/completions API)."""
77
+
78
+ messages: list[ChatCompletionMessageParam] = [
79
+ {"role": "system", "content": instructions},
80
+ {"role": "user", "content": user_text},
81
+ ]
82
+
83
+ try:
84
+ resp = self._client.chat.completions.create(
85
+ model=model,
86
+ messages=messages,
87
+ )
88
+ except Exception as exc:
89
+ raise RuntimeError(
90
+ f"Failed to connect to llama.cpp server at {self._host}. "
91
+ f"Make sure llama.cpp server is running: {exc}"
92
+ ) from exc
93
+
94
+ text: str = ""
95
+ if resp.choices and len(resp.choices) > 0:
96
+ choice = resp.choices[0]
97
+ if choice.message and choice.message.content:
98
+ text = choice.message.content.strip()
99
+
100
+ if not text:
101
+ raise RuntimeError("An empty response text was generated by the provider.")
102
+
103
+ usage: LLMUsage | None = None
104
+ if resp.usage is not None:
105
+ usage = LLMUsage(
106
+ prompt_tokens=resp.usage.prompt_tokens,
107
+ completion_tokens=resp.usage.completion_tokens,
108
+ total_tokens=resp.usage.total_tokens,
109
+ )
110
+
111
+ return LLMTextResult(
112
+ text=text,
113
+ response_id=resp.id,
114
+ usage=usage,
115
+ )
116
+
117
+ def count_tokens(
118
+ self,
119
+ /,
120
+ *,
121
+ model: str,
122
+ text: str,
123
+ ) -> int:
124
+ """Count tokens using llama.cpp's official token counting API."""
125
+
126
+ try:
127
+ # Use llama.cpp's official token counting endpoint via OpenAI client's internal HTTP client
128
+ response = self._client.post(
129
+ "/messages/count_tokens",
130
+ body={
131
+ "model": model,
132
+ "messages": [
133
+ {"role": "user", "content": text}
134
+ ]
135
+ },
136
+ cast_to=dict,
137
+ )
138
+ return response.get("total", 0)
139
+ except Exception:
140
+ # Fallback to tiktoken approximation
141
+ try:
142
+ encoding = _get_encoding()
143
+ return len(encoding.encode(text))
144
+ except Exception:
145
+ return len(text.split())
@@ -19,6 +19,7 @@ _DEFAULT_PROVIDER: Final[str] = "openai"
19
19
  _DEFAULT_MODEL_OPENAI: Final[str] = "gpt-5-mini"
20
20
  _DEFAULT_MODEL_GOOGLE: Final[str] = "gemini-2.5-flash"
21
21
  _DEFAULT_MODEL_OLLAMA: Final[str] = "gpt-oss:20b"
22
+ _DEFAULT_MODEL_LLAMACPP: Final[str] = "default"
22
23
  _DEFAULT_LANGUAGE: Final[str] = "en-GB"
23
24
 
24
25
 
@@ -155,6 +156,9 @@ def _resolve_model(
155
156
  elif provider_name == "ollama":
156
157
  default_model = _DEFAULT_MODEL_OLLAMA
157
158
  provider_model = environ.get("OLLAMA_MODEL")
159
+ elif provider_name == "llamacpp":
160
+ default_model = _DEFAULT_MODEL_LLAMACPP
161
+ provider_model = environ.get("LLAMACPP_MODEL")
158
162
  else:
159
163
  default_model = _DEFAULT_MODEL_OPENAI
160
164
  provider_model = environ.get("OPENAI_MODEL")
@@ -195,8 +199,14 @@ def get_provider(
195
199
 
196
200
  return OllamaProvider(host=host)
197
201
 
202
+ if name == "llamacpp":
203
+ # Local import to avoid import cycles: providers may import shared types from this module.
204
+ from ._llamacpp import LlamaCppProvider
205
+
206
+ return LlamaCppProvider(host=host)
207
+
198
208
  raise UnsupportedProviderError(
199
- f"Unsupported provider: {name}. Supported providers: openai, google, ollama"
209
+ f"Unsupported provider: {name}. Supported providers: openai, google, ollama, llamacpp"
200
210
  )
201
211
 
202
212
 
@@ -1,6 +1,6 @@
1
1
  Metadata-Version: 2.4
2
2
  Name: git-commit-message
3
- Version: 0.8.0
3
+ Version: 0.8.2
4
4
  Summary: Generate Git commit messages from staged changes using LLM
5
5
  Maintainer-email: Mina Her <minacle@live.com>
6
6
  License: This is free and unencumbered software released into the public domain.
@@ -51,14 +51,14 @@ Requires-Dist: tiktoken>=0.12.0
51
51
 
52
52
  # git-commit-message
53
53
 
54
- Generate a commit message from your staged changes using OpenAI, Google Gemini, or Ollama.
54
+ Generate a commit message from your staged changes using OpenAI, Google Gemini, Ollama, or llama.cpp.
55
55
 
56
56
  [![asciicast](https://asciinema.org/a/jk0phFqNnc5vaCiIZEYBwZOyN.svg)](https://asciinema.org/a/jk0phFqNnc5vaCiIZEYBwZOyN)
57
57
 
58
58
  ## Requirements
59
59
 
60
60
  - Python 3.13+
61
- - A Git repo with staged changes (`git add ...`)
61
+ - A Git repo with staged changes (`git add ...`) (or use `--amend` even if nothing is staged)
62
62
 
63
63
  ## Install
64
64
 
@@ -120,6 +120,23 @@ export GIT_COMMIT_MESSAGE_PROVIDER=ollama
120
120
  export OLLAMA_MODEL=mistral
121
121
  ```
122
122
 
123
+ ### llama.cpp (local models)
124
+
125
+ 1. Build and run llama.cpp server with your model:
126
+
127
+ ```sh
128
+ llama-server -hf ggml-org/gpt-oss-20b-GGUF --host 0.0.0.0 --port 8080
129
+ ```
130
+
131
+ 2. The server runs on `http://localhost:8080` by default.
132
+
133
+ Optional: set defaults:
134
+
135
+ ```sh
136
+ export GIT_COMMIT_MESSAGE_PROVIDER=llamacpp
137
+ export LLAMACPP_HOST=http://localhost:8080
138
+ ```
139
+
123
140
  Note (fish):
124
141
 
125
142
  ```fish
@@ -141,10 +158,13 @@ git add -A
141
158
  git-commit-message "optional extra context about the change"
142
159
  ```
143
160
 
144
- Generate a single-line subject only:
161
+ Generate a single-line subject only (when no trailers are appended):
145
162
 
146
163
  ```sh
147
164
  git-commit-message --one-line "optional context"
165
+
166
+ # with trailers, output is subject plus trailer lines
167
+ git-commit-message --one-line --co-author 'John Doe <john.doe@example.com>'
148
168
  ```
149
169
 
150
170
  Select provider:
@@ -158,6 +178,9 @@ git-commit-message --provider google
158
178
 
159
179
  # Ollama
160
180
  git-commit-message --provider ollama
181
+
182
+ # llama.cpp
183
+ git-commit-message --provider llamacpp
161
184
  ```
162
185
 
163
186
  Commit immediately (optionally open editor):
@@ -165,6 +188,24 @@ Commit immediately (optionally open editor):
165
188
  ```sh
166
189
  git-commit-message --commit "refactor parser for speed"
167
190
  git-commit-message --commit --edit "refactor parser for speed"
191
+
192
+ # add co-author trailers
193
+ git-commit-message --commit --co-author 'John Doe <john.doe@example.com>'
194
+ git-commit-message --commit --co-author 'John Doe <john.doe@example.com>' --co-author 'Jane Doe <jane.doe@example.com>'
195
+ git-commit-message --commit --co-author copilot
196
+ ```
197
+
198
+ Amend the previous commit:
199
+
200
+ ```sh
201
+ # print only (useful for pasting into a GUI editor)
202
+ git-commit-message --amend "optional context"
203
+
204
+ # amend immediately
205
+ git-commit-message --commit --amend "optional context"
206
+
207
+ # amend immediately, but open editor for final tweaks
208
+ git-commit-message --commit --amend --edit "optional context"
168
209
  ```
169
210
 
170
211
  Limit subject length:
@@ -206,18 +247,26 @@ Configure Ollama host (if running on a different machine):
206
247
  git-commit-message --provider ollama --host http://192.168.1.100:11434
207
248
  ```
208
249
 
250
+ Configure llama.cpp host:
251
+
252
+ ```sh
253
+ git-commit-message --provider llamacpp --host http://192.168.1.100:8080
254
+ ```
255
+
209
256
  ## Options
210
257
 
211
- - `--provider {openai,google,ollama}`: provider to use (default: `openai`)
212
- - `--model MODEL`: model override (provider-specific)
258
+ - `--provider {openai,google,ollama,llamacpp}`: provider to use (default: `openai`)
259
+ - `--model MODEL`: model override (provider-specific; ignored for llama.cpp)
213
260
  - `--language TAG`: output language/locale (default: `en-GB`)
214
- - `--one-line`: output subject only
261
+ - `--one-line`: output subject only when no trailers are appended; with `--co-author`, output is a single-line subject plus `Co-authored-by:` trailer lines
215
262
  - `--max-length N`: max subject length (default: 72)
216
263
  - `--chunk-tokens N`: token budget per diff chunk (`0` = single summary pass, `-1` disables summarisation)
217
264
  - `--debug`: print request/response details
218
265
  - `--commit`: run `git commit -m <message>`
266
+ - `--amend`: generate a message suitable for amending the previous commit (diff is from the amended commit's parent to the staged index; if nothing is staged, this effectively becomes the diff introduced by `HEAD`)
219
267
  - `--edit`: with `--commit`, open editor for final message
220
- - `--host URL`: host URL for providers like Ollama (default: `http://localhost:11434`)
268
+ - `--host URL`: host URL for providers like Ollama or llama.cpp (default: `http://localhost:11434` for Ollama, `http://localhost:8080` for llama.cpp)
269
+ - `--co-author VALUE`: append `Co-authored-by:` trailer(s). Repeat to add multiple values. Accepted forms: `Name <email@example.com>` or `copilot` (alias, case-insensitive).
221
270
 
222
271
  ## Environment variables
223
272
 
@@ -233,6 +282,7 @@ Optional:
233
282
  - `OPENAI_MODEL`: OpenAI-only model override (used if `--model`/`GIT_COMMIT_MESSAGE_MODEL` are not set)
234
283
  - `OLLAMA_MODEL`: Ollama-only model override (used if `--model`/`GIT_COMMIT_MESSAGE_MODEL` are not set)
235
284
  - `OLLAMA_HOST`: Ollama server URL (default: `http://localhost:11434`)
285
+ - `LLAMACPP_HOST`: llama.cpp server URL (default: `http://localhost:8080`)
236
286
  - `GIT_COMMIT_MESSAGE_LANGUAGE`: default language/locale (default: `en-GB`)
237
287
  - `GIT_COMMIT_MESSAGE_CHUNK_TOKENS`: default chunk token budget (default: `0`)
238
288
 
@@ -241,6 +291,7 @@ Default models (if not overridden):
241
291
  - OpenAI: `gpt-5-mini`
242
292
  - Google: `gemini-2.5-flash`
243
293
  - Ollama: `gpt-oss:20b`
294
+ - llama.cpp: uses pre-loaded model (model parameter is ignored)
244
295
 
245
296
  ## AI-generated code notice
246
297
 
@@ -7,6 +7,7 @@ src/git_commit_message/_cli.py
7
7
  src/git_commit_message/_gemini.py
8
8
  src/git_commit_message/_git.py
9
9
  src/git_commit_message/_gpt.py
10
+ src/git_commit_message/_llamacpp.py
10
11
  src/git_commit_message/_llm.py
11
12
  src/git_commit_message/_ollama.py
12
13
  src/git_commit_message.egg-info/PKG-INFO
@@ -1,114 +0,0 @@
1
- """Git-related helper functions.
2
-
3
- Provides repository root discovery, extraction of staged changes, and
4
- creating commits from a message.
5
- """
6
-
7
- from __future__ import annotations
8
-
9
- from pathlib import Path
10
- from subprocess import CalledProcessError, check_call, check_output, run
11
-
12
-
13
- def get_repo_root(
14
- cwd: Path | None = None,
15
- /,
16
- ) -> Path:
17
- """Find the repository root from the current working directory.
18
-
19
- Parameters
20
- ----------
21
- cwd
22
- Starting directory for the search. Defaults to the current working directory.
23
-
24
- Returns
25
- -------
26
- Path
27
- The repository root path.
28
- """
29
-
30
- start: Path = cwd or Path.cwd()
31
- try:
32
- out: bytes = check_output(
33
- [
34
- "git",
35
- "rev-parse",
36
- "--show-toplevel",
37
- ],
38
- cwd=str(start),
39
- )
40
- except CalledProcessError as exc: # noqa: TRY003
41
- raise RuntimeError("Not a Git repository.") from exc
42
-
43
- root = Path(out.decode().strip())
44
- return root
45
-
46
-
47
- def has_staged_changes(
48
- cwd: Path,
49
- /,
50
- ) -> bool:
51
- """Check whether there are staged changes."""
52
-
53
- try:
54
- check_call(
55
- ["git", "diff", "--cached", "--quiet", "--exit-code"],
56
- cwd=str(cwd),
57
- )
58
- return False
59
- except CalledProcessError:
60
- return True
61
-
62
-
63
- def get_staged_diff(
64
- cwd: Path,
65
- /,
66
- ) -> str:
67
- """Return the staged changes as diff text."""
68
-
69
- out: bytes = check_output(
70
- [
71
- "git",
72
- "diff",
73
- "--cached",
74
- "--patch",
75
- "--minimal",
76
- "--no-color",
77
- ],
78
- cwd=str(cwd),
79
- )
80
- return out.decode()
81
-
82
-
83
- def commit_with_message(
84
- message: str,
85
- edit: bool,
86
- cwd: Path,
87
- /,
88
- ) -> int:
89
- """Create a commit with the given message.
90
-
91
- Parameters
92
- ----------
93
- message
94
- Commit message.
95
- edit
96
- If True, use the `--edit` flag to open an editor for amendments.
97
- cwd
98
- Git working directory.
99
-
100
- Returns
101
- -------
102
- int
103
- The subprocess exit code.
104
- """
105
-
106
- cmd: list[str] = ["git", "commit", "-m", message]
107
- if edit:
108
- cmd.append("--edit")
109
-
110
- try:
111
- completed = run(cmd, cwd=str(cwd), check=False)
112
- return int(completed.returncode)
113
- except OSError as exc: # e.g., editor launch failure, etc.
114
- raise RuntimeError(f"Failed to run 'git commit': {exc}") from exc