git-commit-message 0.6.0__tar.gz → 0.7.0__tar.gz
This diff represents the content of publicly available package versions that have been released to one of the supported registries. The information contained in this diff is provided for informational purposes only and reflects changes between package versions as they appear in their respective public registries.
- {git_commit_message-0.6.0 → git_commit_message-0.7.0}/PKG-INFO +36 -4
- {git_commit_message-0.6.0 → git_commit_message-0.7.0}/README.md +33 -2
- {git_commit_message-0.6.0 → git_commit_message-0.7.0}/pyproject.toml +3 -2
- {git_commit_message-0.6.0 → git_commit_message-0.7.0}/src/git_commit_message/_cli.py +25 -7
- git_commit_message-0.7.0/src/git_commit_message/_gemini.py +122 -0
- git_commit_message-0.7.0/src/git_commit_message/_gpt.py +90 -0
- git_commit_message-0.6.0/src/git_commit_message/_gpt.py → git_commit_message-0.7.0/src/git_commit_message/_llm.py +286 -296
- {git_commit_message-0.6.0 → git_commit_message-0.7.0}/src/git_commit_message.egg-info/PKG-INFO +36 -4
- {git_commit_message-0.6.0 → git_commit_message-0.7.0}/src/git_commit_message.egg-info/SOURCES.txt +2 -0
- {git_commit_message-0.6.0 → git_commit_message-0.7.0}/src/git_commit_message.egg-info/requires.txt +1 -0
- {git_commit_message-0.6.0 → git_commit_message-0.7.0}/UNLICENSE +0 -0
- {git_commit_message-0.6.0 → git_commit_message-0.7.0}/setup.cfg +0 -0
- {git_commit_message-0.6.0 → git_commit_message-0.7.0}/src/git_commit_message/__init__.py +0 -0
- {git_commit_message-0.6.0 → git_commit_message-0.7.0}/src/git_commit_message/__main__.py +0 -0
- {git_commit_message-0.6.0 → git_commit_message-0.7.0}/src/git_commit_message/_git.py +0 -0
- {git_commit_message-0.6.0 → git_commit_message-0.7.0}/src/git_commit_message.egg-info/dependency_links.txt +0 -0
- {git_commit_message-0.6.0 → git_commit_message-0.7.0}/src/git_commit_message.egg-info/entry_points.txt +0 -0
- {git_commit_message-0.6.0 → git_commit_message-0.7.0}/src/git_commit_message.egg-info/top_level.txt +0 -0
|
@@ -1,7 +1,7 @@
|
|
|
1
1
|
Metadata-Version: 2.4
|
|
2
2
|
Name: git-commit-message
|
|
3
|
-
Version: 0.
|
|
4
|
-
Summary: Generate Git commit messages from staged changes using
|
|
3
|
+
Version: 0.7.0
|
|
4
|
+
Summary: Generate Git commit messages from staged changes using LLM
|
|
5
5
|
Maintainer-email: Mina Her <minacle@live.com>
|
|
6
6
|
License: This is free and unencumbered software released into the public domain.
|
|
7
7
|
|
|
@@ -44,6 +44,7 @@ Classifier: Topic :: Software Development :: Version Control :: Git
|
|
|
44
44
|
Requires-Python: >=3.13
|
|
45
45
|
Description-Content-Type: text/markdown
|
|
46
46
|
Requires-Dist: babel>=2.17.0
|
|
47
|
+
Requires-Dist: google-genai>=1.56.0
|
|
47
48
|
Requires-Dist: openai>=2.6.1
|
|
48
49
|
Requires-Dist: tiktoken>=0.12.0
|
|
49
50
|
|
|
@@ -83,6 +84,12 @@ Set your API key (POSIX sh):
|
|
|
83
84
|
export OPENAI_API_KEY="sk-..."
|
|
84
85
|
```
|
|
85
86
|
|
|
87
|
+
Or for the Google provider:
|
|
88
|
+
|
|
89
|
+
```sh
|
|
90
|
+
export GOOGLE_API_KEY="..."
|
|
91
|
+
```
|
|
92
|
+
|
|
86
93
|
Note (fish): In fish, set it as follows.
|
|
87
94
|
|
|
88
95
|
```fish
|
|
@@ -110,6 +117,18 @@ git-commit-message "optional extra context about the change"
|
|
|
110
117
|
git-commit-message --one-line "optional context"
|
|
111
118
|
```
|
|
112
119
|
|
|
120
|
+
- Select provider (default: openai):
|
|
121
|
+
|
|
122
|
+
```sh
|
|
123
|
+
git-commit-message --provider openai "optional context"
|
|
124
|
+
```
|
|
125
|
+
|
|
126
|
+
- Select provider (Google Gemini via google-genai):
|
|
127
|
+
|
|
128
|
+
```sh
|
|
129
|
+
git-commit-message --provider google "optional context"
|
|
130
|
+
```
|
|
131
|
+
|
|
113
132
|
- Limit subject length (default 72):
|
|
114
133
|
|
|
115
134
|
```sh
|
|
@@ -135,6 +154,12 @@ git-commit-message --chunk-tokens -1 "optional context"
|
|
|
135
154
|
git-commit-message --commit --edit "refactor parser for speed"
|
|
136
155
|
```
|
|
137
156
|
|
|
157
|
+
- Print debug info (prompt/response + token usage):
|
|
158
|
+
|
|
159
|
+
```sh
|
|
160
|
+
git-commit-message --debug "optional context"
|
|
161
|
+
```
|
|
162
|
+
|
|
138
163
|
- Select output language/locale (default: en-GB):
|
|
139
164
|
|
|
140
165
|
```sh
|
|
@@ -155,11 +180,18 @@ Notes:
|
|
|
155
180
|
|
|
156
181
|
Environment:
|
|
157
182
|
|
|
158
|
-
- `OPENAI_API_KEY`: required
|
|
159
|
-
- `
|
|
183
|
+
- `OPENAI_API_KEY`: required when provider is `openai`
|
|
184
|
+
- `GOOGLE_API_KEY`: required when provider is `google`
|
|
185
|
+
- `GIT_COMMIT_MESSAGE_PROVIDER`: optional (default: `openai`). `--provider` overrides this value.
|
|
186
|
+
- `GIT_COMMIT_MESSAGE_MODEL`: optional model override (defaults: `openai` -> `gpt-5-mini`, `google` -> `gemini-2.5-flash`)
|
|
187
|
+
- `OPENAI_MODEL`: optional OpenAI-only model override
|
|
160
188
|
- `GIT_COMMIT_MESSAGE_LANGUAGE`: optional (default: `en-GB`)
|
|
161
189
|
- `GIT_COMMIT_MESSAGE_CHUNK_TOKENS`: optional token budget per diff chunk (default: 0 = single chunk + summary; -1 disables summarisation)
|
|
162
190
|
|
|
191
|
+
Notes:
|
|
192
|
+
|
|
193
|
+
- If token counting fails for your provider while chunking, try `--chunk-tokens 0` (default) or `--chunk-tokens -1`.
|
|
194
|
+
|
|
163
195
|
## AI‑generated code notice
|
|
164
196
|
|
|
165
197
|
Parts of this project were created with assistance from AI tools (e.g. large language models).
|
|
@@ -34,6 +34,12 @@ Set your API key (POSIX sh):
|
|
|
34
34
|
export OPENAI_API_KEY="sk-..."
|
|
35
35
|
```
|
|
36
36
|
|
|
37
|
+
Or for the Google provider:
|
|
38
|
+
|
|
39
|
+
```sh
|
|
40
|
+
export GOOGLE_API_KEY="..."
|
|
41
|
+
```
|
|
42
|
+
|
|
37
43
|
Note (fish): In fish, set it as follows.
|
|
38
44
|
|
|
39
45
|
```fish
|
|
@@ -61,6 +67,18 @@ git-commit-message "optional extra context about the change"
|
|
|
61
67
|
git-commit-message --one-line "optional context"
|
|
62
68
|
```
|
|
63
69
|
|
|
70
|
+
- Select provider (default: openai):
|
|
71
|
+
|
|
72
|
+
```sh
|
|
73
|
+
git-commit-message --provider openai "optional context"
|
|
74
|
+
```
|
|
75
|
+
|
|
76
|
+
- Select provider (Google Gemini via google-genai):
|
|
77
|
+
|
|
78
|
+
```sh
|
|
79
|
+
git-commit-message --provider google "optional context"
|
|
80
|
+
```
|
|
81
|
+
|
|
64
82
|
- Limit subject length (default 72):
|
|
65
83
|
|
|
66
84
|
```sh
|
|
@@ -86,6 +104,12 @@ git-commit-message --chunk-tokens -1 "optional context"
|
|
|
86
104
|
git-commit-message --commit --edit "refactor parser for speed"
|
|
87
105
|
```
|
|
88
106
|
|
|
107
|
+
- Print debug info (prompt/response + token usage):
|
|
108
|
+
|
|
109
|
+
```sh
|
|
110
|
+
git-commit-message --debug "optional context"
|
|
111
|
+
```
|
|
112
|
+
|
|
89
113
|
- Select output language/locale (default: en-GB):
|
|
90
114
|
|
|
91
115
|
```sh
|
|
@@ -106,11 +130,18 @@ Notes:
|
|
|
106
130
|
|
|
107
131
|
Environment:
|
|
108
132
|
|
|
109
|
-
- `OPENAI_API_KEY`: required
|
|
110
|
-
- `
|
|
133
|
+
- `OPENAI_API_KEY`: required when provider is `openai`
|
|
134
|
+
- `GOOGLE_API_KEY`: required when provider is `google`
|
|
135
|
+
- `GIT_COMMIT_MESSAGE_PROVIDER`: optional (default: `openai`). `--provider` overrides this value.
|
|
136
|
+
- `GIT_COMMIT_MESSAGE_MODEL`: optional model override (defaults: `openai` -> `gpt-5-mini`, `google` -> `gemini-2.5-flash`)
|
|
137
|
+
- `OPENAI_MODEL`: optional OpenAI-only model override
|
|
111
138
|
- `GIT_COMMIT_MESSAGE_LANGUAGE`: optional (default: `en-GB`)
|
|
112
139
|
- `GIT_COMMIT_MESSAGE_CHUNK_TOKENS`: optional token budget per diff chunk (default: 0 = single chunk + summary; -1 disables summarisation)
|
|
113
140
|
|
|
141
|
+
Notes:
|
|
142
|
+
|
|
143
|
+
- If token counting fails for your provider while chunking, try `--chunk-tokens 0` (default) or `--chunk-tokens -1`.
|
|
144
|
+
|
|
114
145
|
## AI‑generated code notice
|
|
115
146
|
|
|
116
147
|
Parts of this project were created with assistance from AI tools (e.g. large language models).
|
|
@@ -1,11 +1,12 @@
|
|
|
1
1
|
[project]
|
|
2
2
|
name = "git-commit-message"
|
|
3
|
-
version = "0.
|
|
4
|
-
description = "Generate Git commit messages from staged changes using
|
|
3
|
+
version = "0.7.0"
|
|
4
|
+
description = "Generate Git commit messages from staged changes using LLM"
|
|
5
5
|
readme = "README.md"
|
|
6
6
|
requires-python = ">=3.13"
|
|
7
7
|
dependencies = [
|
|
8
8
|
"babel>=2.17.0",
|
|
9
|
+
"google-genai>=1.56.0",
|
|
9
10
|
"openai>=2.6.1",
|
|
10
11
|
"tiktoken>=0.12.0",
|
|
11
12
|
]
|
|
@@ -1,6 +1,6 @@
|
|
|
1
1
|
"""Command-line interface entry point.
|
|
2
2
|
|
|
3
|
-
Collect staged changes from the repository and call an
|
|
3
|
+
Collect staged changes from the repository and call an LLM provider
|
|
4
4
|
to generate a commit message, or create a commit straight away.
|
|
5
5
|
"""
|
|
6
6
|
|
|
@@ -19,10 +19,11 @@ from ._git import (
|
|
|
19
19
|
get_staged_diff,
|
|
20
20
|
has_staged_changes,
|
|
21
21
|
)
|
|
22
|
-
from .
|
|
22
|
+
from ._llm import (
|
|
23
|
+
CommitMessageResult,
|
|
24
|
+
UnsupportedProviderError,
|
|
23
25
|
generate_commit_message,
|
|
24
26
|
generate_commit_message_with_info,
|
|
25
|
-
CommitMessageResult,
|
|
26
27
|
)
|
|
27
28
|
|
|
28
29
|
|
|
@@ -50,7 +51,7 @@ def _build_parser() -> ArgumentParser:
|
|
|
50
51
|
parser: ArgumentParser = ArgumentParser(
|
|
51
52
|
prog="git-commit-message",
|
|
52
53
|
description=(
|
|
53
|
-
"Generate a commit message
|
|
54
|
+
"Generate a commit message based on the staged changes."
|
|
54
55
|
),
|
|
55
56
|
)
|
|
56
57
|
|
|
@@ -72,11 +73,21 @@ def _build_parser() -> ArgumentParser:
|
|
|
72
73
|
help="Open an editor to amend the message before committing. Use with '--commit'.",
|
|
73
74
|
)
|
|
74
75
|
|
|
76
|
+
parser.add_argument(
|
|
77
|
+
"--provider",
|
|
78
|
+
default=None,
|
|
79
|
+
help=(
|
|
80
|
+
"LLM provider to use (default: openai). "
|
|
81
|
+
"You may also set GIT_COMMIT_MESSAGE_PROVIDER. "
|
|
82
|
+
"The CLI flag overrides the environment variable."
|
|
83
|
+
),
|
|
84
|
+
)
|
|
85
|
+
|
|
75
86
|
parser.add_argument(
|
|
76
87
|
"--model",
|
|
77
88
|
default=None,
|
|
78
89
|
help=(
|
|
79
|
-
"
|
|
90
|
+
"Model name to use. If unspecified, uses GIT_COMMIT_MESSAGE_MODEL or a provider-specific default (openai: gpt-5-mini; google: gemini-2.5-flash)."
|
|
80
91
|
),
|
|
81
92
|
)
|
|
82
93
|
|
|
@@ -170,6 +181,7 @@ def _run(
|
|
|
170
181
|
getattr(args, "max_length", None),
|
|
171
182
|
getattr(args, "language", None),
|
|
172
183
|
chunk_tokens,
|
|
184
|
+
getattr(args, "provider", None),
|
|
173
185
|
)
|
|
174
186
|
message = result.message
|
|
175
187
|
else:
|
|
@@ -181,7 +193,11 @@ def _run(
|
|
|
181
193
|
getattr(args, "max_length", None),
|
|
182
194
|
getattr(args, "language", None),
|
|
183
195
|
chunk_tokens,
|
|
196
|
+
getattr(args, "provider", None),
|
|
184
197
|
)
|
|
198
|
+
except UnsupportedProviderError as exc:
|
|
199
|
+
print(str(exc), file=stderr)
|
|
200
|
+
return 3
|
|
185
201
|
except Exception as exc: # noqa: BLE001 - to preserve standard output messaging
|
|
186
202
|
print(f"Failed to generate commit message: {exc}", file=stderr)
|
|
187
203
|
return 3
|
|
@@ -199,7 +215,8 @@ def _run(
|
|
|
199
215
|
if not args.commit:
|
|
200
216
|
if args.debug and result is not None:
|
|
201
217
|
# Print debug information
|
|
202
|
-
print("====
|
|
218
|
+
print(f"==== {result.provider} Usage ====")
|
|
219
|
+
print(f"provider: {result.provider}")
|
|
203
220
|
print(f"model: {result.model}")
|
|
204
221
|
print(f"response_id: {getattr(result, 'response_id', '(n/a)')}")
|
|
205
222
|
if result.total_tokens is not None:
|
|
@@ -220,7 +237,8 @@ def _run(
|
|
|
220
237
|
|
|
221
238
|
if args.debug and result is not None:
|
|
222
239
|
# Also print debug info before commit
|
|
223
|
-
print("====
|
|
240
|
+
print(f"==== {result.provider} Usage ====")
|
|
241
|
+
print(f"provider: {result.provider}")
|
|
224
242
|
print(f"model: {result.model}")
|
|
225
243
|
print(f"response_id: {getattr(result, 'response_id', '(n/a)')}")
|
|
226
244
|
if result.total_tokens is not None:
|
|
@@ -0,0 +1,122 @@
|
|
|
1
|
+
"""Google (Gemini) provider implementation.
|
|
2
|
+
|
|
3
|
+
This module contains only Google GenAI-specific API calls and token counting.
|
|
4
|
+
Provider-agnostic orchestration/prompt logic lives in `_llm.py`.
|
|
5
|
+
"""
|
|
6
|
+
|
|
7
|
+
from __future__ import annotations
|
|
8
|
+
|
|
9
|
+
from os import environ
|
|
10
|
+
|
|
11
|
+
from google import genai
|
|
12
|
+
from google.genai import types
|
|
13
|
+
|
|
14
|
+
from ._llm import LLMTextResult, LLMUsage
|
|
15
|
+
|
|
16
|
+
|
|
17
|
+
class GoogleGenAIProvider:
|
|
18
|
+
name = "google"
|
|
19
|
+
|
|
20
|
+
def __init__(
|
|
21
|
+
self,
|
|
22
|
+
/,
|
|
23
|
+
*,
|
|
24
|
+
api_key: str | None = None,
|
|
25
|
+
) -> None:
|
|
26
|
+
key = api_key or environ.get("GOOGLE_API_KEY")
|
|
27
|
+
if not key:
|
|
28
|
+
raise RuntimeError("The GOOGLE_API_KEY environment variable is required.")
|
|
29
|
+
self._client = genai.Client(api_key=key)
|
|
30
|
+
|
|
31
|
+
def count_tokens(
|
|
32
|
+
self,
|
|
33
|
+
/,
|
|
34
|
+
*,
|
|
35
|
+
model: str,
|
|
36
|
+
text: str,
|
|
37
|
+
) -> int:
|
|
38
|
+
try:
|
|
39
|
+
resp = self._client.models.count_tokens(
|
|
40
|
+
model=model,
|
|
41
|
+
contents=text,
|
|
42
|
+
)
|
|
43
|
+
except Exception as exc:
|
|
44
|
+
raise RuntimeError(
|
|
45
|
+
"Token counting failed for the Google provider. "
|
|
46
|
+
"Try `--chunk-tokens 0` (default) or `--chunk-tokens -1` to disable summarisation."
|
|
47
|
+
) from exc
|
|
48
|
+
|
|
49
|
+
total = getattr(resp, "total_tokens", None)
|
|
50
|
+
if not isinstance(total, int):
|
|
51
|
+
raise RuntimeError(
|
|
52
|
+
"Token counting returned an unexpected response from the Google provider. "
|
|
53
|
+
"Try `--chunk-tokens 0` (default) or `--chunk-tokens -1` to disable summarisation."
|
|
54
|
+
)
|
|
55
|
+
|
|
56
|
+
return total
|
|
57
|
+
|
|
58
|
+
def generate_text(
|
|
59
|
+
self,
|
|
60
|
+
/,
|
|
61
|
+
*,
|
|
62
|
+
model: str,
|
|
63
|
+
instructions: str,
|
|
64
|
+
user_text: str,
|
|
65
|
+
) -> LLMTextResult:
|
|
66
|
+
config = types.GenerateContentConfig(
|
|
67
|
+
system_instruction=instructions,
|
|
68
|
+
)
|
|
69
|
+
|
|
70
|
+
resp = self._client.models.generate_content(
|
|
71
|
+
model=model,
|
|
72
|
+
contents=user_text,
|
|
73
|
+
config=config,
|
|
74
|
+
)
|
|
75
|
+
|
|
76
|
+
text = self._extract_text(resp)
|
|
77
|
+
if not text:
|
|
78
|
+
raise RuntimeError("An empty response text was generated by the provider.")
|
|
79
|
+
|
|
80
|
+
usage = self._extract_usage(resp)
|
|
81
|
+
|
|
82
|
+
return LLMTextResult(
|
|
83
|
+
text=text,
|
|
84
|
+
response_id=getattr(resp, "response_id", None),
|
|
85
|
+
usage=usage,
|
|
86
|
+
)
|
|
87
|
+
|
|
88
|
+
@staticmethod
|
|
89
|
+
def _extract_text(
|
|
90
|
+
resp: types.GenerateContentResponse,
|
|
91
|
+
/,
|
|
92
|
+
) -> str:
|
|
93
|
+
candidates = getattr(resp, "candidates", None)
|
|
94
|
+
if not candidates:
|
|
95
|
+
return ""
|
|
96
|
+
|
|
97
|
+
parts = getattr(candidates[0].content, "parts", None) if candidates[0].content else None
|
|
98
|
+
if not parts:
|
|
99
|
+
return ""
|
|
100
|
+
|
|
101
|
+
texts: list[str] = []
|
|
102
|
+
for part in parts:
|
|
103
|
+
t = getattr(part, "text", None)
|
|
104
|
+
if isinstance(t, str) and t.strip():
|
|
105
|
+
texts.append(t)
|
|
106
|
+
|
|
107
|
+
return "\n".join(texts).strip()
|
|
108
|
+
|
|
109
|
+
@staticmethod
|
|
110
|
+
def _extract_usage(
|
|
111
|
+
resp: types.GenerateContentResponse,
|
|
112
|
+
/,
|
|
113
|
+
) -> LLMUsage | None:
|
|
114
|
+
metadata = getattr(resp, "usage_metadata", None)
|
|
115
|
+
if metadata is None:
|
|
116
|
+
return None
|
|
117
|
+
|
|
118
|
+
return LLMUsage(
|
|
119
|
+
prompt_tokens=getattr(metadata, "prompt_token_count", None),
|
|
120
|
+
completion_tokens=getattr(metadata, "candidates_token_count", None),
|
|
121
|
+
total_tokens=getattr(metadata, "total_token_count", None),
|
|
122
|
+
)
|
|
@@ -0,0 +1,90 @@
|
|
|
1
|
+
"""OpenAI provider implementation.
|
|
2
|
+
|
|
3
|
+
This module contains only OpenAI-specific API calls and token counting.
|
|
4
|
+
Provider-agnostic orchestration/prompt logic lives in `_llm.py`.
|
|
5
|
+
"""
|
|
6
|
+
|
|
7
|
+
from __future__ import annotations
|
|
8
|
+
|
|
9
|
+
from openai import OpenAI
|
|
10
|
+
from openai.types.responses import Response
|
|
11
|
+
from os import environ
|
|
12
|
+
from tiktoken import Encoding, encoding_for_model, get_encoding
|
|
13
|
+
from ._llm import LLMTextResult, LLMUsage
|
|
14
|
+
|
|
15
|
+
|
|
16
|
+
def _encoding_for_model(
|
|
17
|
+
model: str,
|
|
18
|
+
/,
|
|
19
|
+
) -> Encoding:
|
|
20
|
+
try:
|
|
21
|
+
return encoding_for_model(model)
|
|
22
|
+
except Exception:
|
|
23
|
+
return get_encoding("cl100k_base")
|
|
24
|
+
|
|
25
|
+
|
|
26
|
+
class OpenAIResponsesProvider:
|
|
27
|
+
name = "openai"
|
|
28
|
+
|
|
29
|
+
def __init__(
|
|
30
|
+
self,
|
|
31
|
+
/,
|
|
32
|
+
*,
|
|
33
|
+
api_key: str | None = None,
|
|
34
|
+
) -> None:
|
|
35
|
+
key = api_key or environ.get("OPENAI_API_KEY")
|
|
36
|
+
if not key:
|
|
37
|
+
raise RuntimeError("The OPENAI_API_KEY environment variable is required.")
|
|
38
|
+
self._client = OpenAI(api_key=key)
|
|
39
|
+
|
|
40
|
+
def count_tokens(
|
|
41
|
+
self,
|
|
42
|
+
/,
|
|
43
|
+
*,
|
|
44
|
+
model: str,
|
|
45
|
+
text: str,
|
|
46
|
+
) -> int:
|
|
47
|
+
encoding = _encoding_for_model(model)
|
|
48
|
+
return len(encoding.encode(text))
|
|
49
|
+
|
|
50
|
+
def generate_text(
|
|
51
|
+
self,
|
|
52
|
+
/,
|
|
53
|
+
*,
|
|
54
|
+
model: str,
|
|
55
|
+
instructions: str,
|
|
56
|
+
user_text: str,
|
|
57
|
+
) -> LLMTextResult:
|
|
58
|
+
resp: Response = self._client.responses.create(
|
|
59
|
+
model=model,
|
|
60
|
+
instructions=instructions,
|
|
61
|
+
input=[
|
|
62
|
+
{
|
|
63
|
+
"role": "user",
|
|
64
|
+
"content": [
|
|
65
|
+
{
|
|
66
|
+
"type": "input_text",
|
|
67
|
+
"text": user_text,
|
|
68
|
+
}
|
|
69
|
+
],
|
|
70
|
+
}
|
|
71
|
+
],
|
|
72
|
+
)
|
|
73
|
+
|
|
74
|
+
text: str = (resp.output_text or "").strip()
|
|
75
|
+
if not text:
|
|
76
|
+
raise RuntimeError("An empty response text was generated by the provider.")
|
|
77
|
+
|
|
78
|
+
usage: LLMUsage | None = None
|
|
79
|
+
if resp.usage is not None:
|
|
80
|
+
usage = LLMUsage(
|
|
81
|
+
prompt_tokens=resp.usage.input_tokens,
|
|
82
|
+
completion_tokens=resp.usage.output_tokens,
|
|
83
|
+
total_tokens=resp.usage.total_tokens,
|
|
84
|
+
)
|
|
85
|
+
|
|
86
|
+
return LLMTextResult(
|
|
87
|
+
text=text,
|
|
88
|
+
response_id=resp.id,
|
|
89
|
+
usage=usage,
|
|
90
|
+
)
|