@johnowennixon/diffdash 1.7.0 → 1.8.0
This diff represents the content of publicly available package versions that have been released to one of the supported registries. The information contained in this diff is provided for informational purposes only and reflects changes between package versions as they appear in their respective public registries.
- package/README.md +13 -16
- package/dist/package.json +21 -20
- package/dist/src/lib_datetime.js +4 -1
- package/dist/src/lib_diffdash_cli.js +1 -3
- package/dist/src/lib_diffdash_config.js +4 -13
- package/dist/src/lib_diffdash_llm.js +7 -13
- package/dist/src/lib_diffdash_sequence.js +12 -9
- package/dist/src/lib_git_message_generate.js +11 -11
- package/dist/src/lib_git_message_schema.js +3 -3
- package/dist/src/lib_llm_access.js +14 -53
- package/dist/src/lib_llm_api.js +2 -36
- package/dist/src/lib_llm_chat.js +26 -19
- package/dist/src/lib_llm_config.js +4 -14
- package/dist/src/lib_llm_list.js +10 -8
- package/dist/src/lib_llm_model.js +328 -228
- package/dist/src/lib_llm_results.js +47 -0
- package/dist/src/lib_tell.js +6 -5
- package/dist/src/lib_tui_none.js +15 -0
- package/dist/src/lib_tui_number.js +22 -0
- package/dist/src/lib_tui_table.js +13 -5
- package/package.json +21 -20
- package/dist/src/lib_assert_type.js +0 -30
- package/dist/src/lib_type_guard.js +0 -15
package/README.md
CHANGED
|
@@ -1,5 +1,7 @@
|
|
|
1
1
|
# DiffDash
|
|
2
2
|
|
|
3
|
+
[](https://deepwiki.com/johnowennixon/diffdash)
|
|
4
|
+
|
|
3
5
|
A command-line tool to generate Git commit messages using AI.
|
|
4
6
|
|
|
5
7
|
## Demonstration
|
|
@@ -8,15 +10,15 @@ A command-line tool to generate Git commit messages using AI.
|
|
|
8
10
|
|
|
9
11
|
## Features
|
|
10
12
|
|
|
11
|
-
* Generate Git commit messages in natural English
|
|
13
|
+
* Generate Git commit messages in **natural English**
|
|
12
14
|
* Add a footer to the generated commit messages
|
|
13
15
|
* Add a prefix or suffix to the summary line
|
|
14
16
|
* Select from a choice of LLM models
|
|
15
17
|
* Compare messages generated from all configured models
|
|
16
18
|
* Disable or auto-approve various stages
|
|
17
|
-
*
|
|
19
|
+
* Option to output just the commit message for use in scripts
|
|
18
20
|
* Configuration using standard API provider environment variables
|
|
19
|
-
* Uses the Vercel AI SDK
|
|
21
|
+
* Uses the Vercel AI SDK (version 5)
|
|
20
22
|
* Uses structured JSON with compatible models
|
|
21
23
|
* Substantially written using AI coding (Claude Code, Roo Code, and Amp)
|
|
22
24
|
|
|
@@ -28,24 +30,21 @@ npm install -g @johnowennixon/diffdash
|
|
|
28
30
|
|
|
29
31
|
## LLM Models
|
|
30
32
|
|
|
31
|
-
Currently, for this application, the best LLM model
|
|
33
|
+
Currently, for this application, the best LLM model is **gpt-4.1-mini** from OpenAI.
|
|
32
34
|
It is set as the default model.
|
|
33
35
|
I can only presume they have done a ton of training on diffs.
|
|
34
36
|
|
|
37
|
+
I am now testing the GPT-5 models and **gpt-5-mini-minimal** (GPT-5 Mini with reasoning disabled) is behaving much the same.
|
|
38
|
+
It will probably become the default model soon.
|
|
39
|
+
|
|
35
40
|
## API Keys
|
|
36
41
|
|
|
37
42
|
DiffDash requires at least one API key for an LLM provider. These must be provided as environment variables.
|
|
38
43
|
|
|
39
44
|
```bash
|
|
40
|
-
# For OpenAI (recommended)
|
|
45
|
+
# For OpenAI (strongly recommended)
|
|
41
46
|
export OPENAI_API_KEY=your-api-key
|
|
42
47
|
|
|
43
|
-
# For Requesty
|
|
44
|
-
export REQUESTY_API_KEY=your-api-key
|
|
45
|
-
|
|
46
|
-
# For OpenRouter
|
|
47
|
-
export OPENROUTER_API_KEY=your-api-key
|
|
48
|
-
|
|
49
48
|
# For Anthropic
|
|
50
49
|
export ANTHROPIC_API_KEY=your-api-key
|
|
51
50
|
|
|
@@ -54,6 +53,9 @@ export DEEPSEEK_API_KEY=your-api-key
|
|
|
54
53
|
|
|
55
54
|
# For Google Gemini
|
|
56
55
|
export GEMINI_API_KEY=your-api-key
|
|
56
|
+
|
|
57
|
+
# For OpenRouter (all other models)
|
|
58
|
+
export OPENROUTER_API_KEY=your-api-key
|
|
57
59
|
```
|
|
58
60
|
|
|
59
61
|
## Usage
|
|
@@ -98,9 +100,6 @@ diffdash --add-suffix "(closes #123)"
|
|
|
98
100
|
# Display commit messages generated by all models
|
|
99
101
|
diffdash --llm-compare
|
|
100
102
|
|
|
101
|
-
# Use the fallback LLM model
|
|
102
|
-
diffdash --llm-fallback
|
|
103
|
-
|
|
104
103
|
# Specify the LLM model by name
|
|
105
104
|
diffdash --llm-model claude-3.5-haiku
|
|
106
105
|
|
|
@@ -133,8 +132,6 @@ All command-line arguments are optional.
|
|
|
133
132
|
| `--add-suffix SUFFIX` | add a suffix to the commit message summary line |
|
|
134
133
|
| `--llm-list` | display a list of available Large Language Models and exit |
|
|
135
134
|
| `--llm-compare` | compare the generated messages from all models - but do not commit |
|
|
136
|
-
| `--llm-router` | prefer to access the LLM via a router rather than direct |
|
|
137
|
-
| `--llm-fallback` | use the fallback LLM model instead of the default |
|
|
138
135
|
| `--llm-model MODEL` | choose the LLM model by name (the default is normally best) |
|
|
139
136
|
| `--llm-excludes MODELS` | models to exclude from comparison (comma separated) |
|
|
140
137
|
| `--just-output` | just output the commit message for use in scripts |
|
package/dist/package.json
CHANGED
|
@@ -1,6 +1,6 @@
|
|
|
1
1
|
{
|
|
2
2
|
"name": "@johnowennixon/diffdash",
|
|
3
|
-
"version": "1.
|
|
3
|
+
"version": "1.8.0",
|
|
4
4
|
"description": "A command-line tool to generate Git commit messages using AI",
|
|
5
5
|
"license": "0BSD",
|
|
6
6
|
"author": "John Owen Nixon",
|
|
@@ -24,49 +24,50 @@
|
|
|
24
24
|
"build:clean": "echo 'Removing dist' && rimraf dist",
|
|
25
25
|
"build:shebang": "echo 'Fixing the shebangs' && add-shebangs --node --exclude 'dist/**/lib_*.js' 'dist/**/*.js'",
|
|
26
26
|
"build:tsc": "echo 'Transpiling TypeScript to dist (using tsc)' && tsc --erasableSyntaxOnly --libReplacement false",
|
|
27
|
+
"build:tsgo": "echo 'Transpiling TypeScript to dist (using tsgo)' && tsgo || (rimraf dist && false)",
|
|
27
28
|
"fix": "run-s -ls fix:biome fix:markdownlint",
|
|
28
29
|
"fix:biome": "echo 'Fixing with Biome' && biome check --write",
|
|
29
|
-
"fix:docbot": "echo 'Fixing with DocBot' && docbot --
|
|
30
|
+
"fix:docbot": "echo 'Fixing with DocBot' && docbot --prune --generate",
|
|
30
31
|
"fix:markdownlint": "echo 'Fixing with markdownlint' && markdownlint-cli2 '**/*.md' --fix",
|
|
31
|
-
"fix:oxlint": "echo 'Fixing with
|
|
32
|
+
"fix:oxlint": "echo 'Fixing with Oxlint' && oxlint --fix",
|
|
32
33
|
"lint": "run-s -ls lint:biome lint:oxlint lint:tsgolint lint:knip lint:markdownlint",
|
|
33
34
|
"lint:biome": "echo 'Linting with Biome' && biome check",
|
|
34
35
|
"lint:docbot": "echo 'Linting with DocBot' && docbot",
|
|
35
36
|
"lint:knip": "echo 'Linting with Knip' && knip",
|
|
36
|
-
"lint:markdownlint": "echo 'Linting with
|
|
37
|
-
"lint:oxlint": "echo 'Linting with
|
|
37
|
+
"lint:markdownlint": "echo 'Linting with Markdownlint' && markdownlint-cli2 '**/*.md'",
|
|
38
|
+
"lint:oxlint": "echo 'Linting with Oxlint' && oxlint",
|
|
38
39
|
"lint:tsc": "echo 'Linting with tsc' && tsc --noEmit --erasableSyntaxOnly --libReplacement false",
|
|
39
|
-
"lint:
|
|
40
|
+
"lint:tsgo": "echo 'Linting with tsgo' && tsgo --noEmit",
|
|
41
|
+
"lint:tsgolint": "echo 'Linting with tsgolint' && candide-tsgolint",
|
|
40
42
|
"test": "run-s -ls lint build"
|
|
41
43
|
},
|
|
42
44
|
"dependencies": {
|
|
43
|
-
"@ai-sdk/anthropic": "
|
|
44
|
-
"@ai-sdk/deepseek": "0.
|
|
45
|
-
"@ai-sdk/google": "
|
|
46
|
-
"@ai-sdk/openai": "
|
|
47
|
-
"@openrouter/ai-sdk-provider": "
|
|
48
|
-
"
|
|
49
|
-
"ai": "4.3.19",
|
|
45
|
+
"@ai-sdk/anthropic": "2.0.1",
|
|
46
|
+
"@ai-sdk/deepseek": "1.0.5",
|
|
47
|
+
"@ai-sdk/google": "2.0.4",
|
|
48
|
+
"@ai-sdk/openai": "2.0.9",
|
|
49
|
+
"@openrouter/ai-sdk-provider": "1.1.2",
|
|
50
|
+
"ai": "5.0.9",
|
|
50
51
|
"ansis": "4.1.0",
|
|
51
52
|
"argparse": "2.0.1",
|
|
52
53
|
"cli-table3": "0.6.5",
|
|
53
54
|
"json5": "2.2.3",
|
|
54
55
|
"simple-git": "3.28.0",
|
|
55
|
-
"zod": "
|
|
56
|
+
"zod": "4.0.17"
|
|
56
57
|
},
|
|
57
58
|
"devDependencies": {
|
|
58
|
-
"@biomejs/biome": "2.1.
|
|
59
|
+
"@biomejs/biome": "2.1.4",
|
|
60
|
+
"@candide/tsgolint": "1.3.0",
|
|
59
61
|
"@johnowennixon/add-shebangs": "1.1.0",
|
|
60
62
|
"@johnowennixon/chmodx": "2.0.0",
|
|
61
|
-
"@johnowennixon/pipe-exit": "1.0.1",
|
|
62
63
|
"@types/argparse": "2.0.17",
|
|
63
|
-
"@types/node": "24.1
|
|
64
|
+
"@types/node": "24.2.1",
|
|
65
|
+
"@typescript/native-preview": "7.0.0-dev.20250811.1",
|
|
64
66
|
"knip": "5.62.0",
|
|
65
67
|
"markdownlint-cli2": "0.18.1",
|
|
66
68
|
"npm-run-all2": "8.0.4",
|
|
67
|
-
"oxlint": "1.
|
|
68
|
-
"oxlint-tsgolint": "0.0.0-8",
|
|
69
|
+
"oxlint": "1.11.1",
|
|
69
70
|
"rimraf": "6.0.1",
|
|
70
|
-
"typescript": "5.
|
|
71
|
+
"typescript": "5.9.2"
|
|
71
72
|
}
|
|
72
73
|
}
|
package/dist/src/lib_datetime.js
CHANGED
|
@@ -25,6 +25,9 @@ export function datetime_parse_timestamp(timestamp) {
|
|
|
25
25
|
export function datetime_format_utc_iso_ymdthms(date) {
|
|
26
26
|
return date.toISOString().slice(0, 19);
|
|
27
27
|
}
|
|
28
|
+
export function datetime_format_utc_iso_ymd_hms(date) {
|
|
29
|
+
return date.toISOString().slice(0, 19).replace("T", SPACE);
|
|
30
|
+
}
|
|
28
31
|
export function datetime_format_utc_iso_ymd(date) {
|
|
29
32
|
return date.toISOString().slice(0, 10);
|
|
30
33
|
}
|
|
@@ -44,7 +47,7 @@ export function datetime_format_local_iso_ymdthms(date) {
|
|
|
44
47
|
return datetime_format_utc_iso_ymdthms(datetime_localize(date));
|
|
45
48
|
}
|
|
46
49
|
export function datetime_format_local_iso_ymd_hms(date) {
|
|
47
|
-
return
|
|
50
|
+
return datetime_format_utc_iso_ymd_hms(datetime_localize(date));
|
|
48
51
|
}
|
|
49
52
|
export function datetime_format_local_iso_ymd(date) {
|
|
50
53
|
return datetime_format_utc_iso_ymd(datetime_localize(date));
|
|
@@ -1,5 +1,5 @@
|
|
|
1
1
|
import { cli_boolean, cli_choice_default, cli_make_parser, cli_string } from "./lib_cli.js";
|
|
2
|
-
import { diffdash_llm_model_choices, diffdash_llm_model_default
|
|
2
|
+
import { diffdash_llm_model_choices, diffdash_llm_model_default } from "./lib_diffdash_llm.js";
|
|
3
3
|
const diffdash_cli_schema = {
|
|
4
4
|
version: cli_boolean({ help: "show program version information and exit" }),
|
|
5
5
|
auto_add: cli_boolean({ help: "automatically stage all changes without confirmation" }),
|
|
@@ -16,8 +16,6 @@ const diffdash_cli_schema = {
|
|
|
16
16
|
add_suffix: cli_string({ help: "add a suffix to the commit message summary line", metavar: "SUFFIX" }),
|
|
17
17
|
llm_list: cli_boolean({ help: "display a list of available Large Language Models and exit" }),
|
|
18
18
|
llm_compare: cli_boolean({ help: "compare the generated messages from all models - but do not commit" }),
|
|
19
|
-
llm_router: cli_boolean({ help: "prefer to access the LLM via a router rather than direct" }),
|
|
20
|
-
llm_fallback: cli_boolean({ help: `use the fallback model (${diffdash_llm_model_fallback})` }),
|
|
21
19
|
llm_model: cli_choice_default({
|
|
22
20
|
help: `choose the Large Language Model by name (defaults to ${diffdash_llm_model_default})`,
|
|
23
21
|
choices: diffdash_llm_model_choices,
|
|
@@ -2,7 +2,7 @@ import { z } from "zod";
|
|
|
2
2
|
import { abort_with_error } from "./lib_abort.js";
|
|
3
3
|
import { debug_channels, debug_inspect_when } from "./lib_debug.js";
|
|
4
4
|
import { diffdash_cli_parsed_args } from "./lib_diffdash_cli.js";
|
|
5
|
-
import { diffdash_llm_model_details
|
|
5
|
+
import { diffdash_llm_model_details } from "./lib_diffdash_llm.js";
|
|
6
6
|
import { file_io_read_text } from "./lib_file_io.js";
|
|
7
7
|
import { file_is_file } from "./lib_file_is.js";
|
|
8
8
|
import { json5_parse } from "./lib_json5.js";
|
|
@@ -33,7 +33,7 @@ function diffdash_config_file_read(config) {
|
|
|
33
33
|
}
|
|
34
34
|
}
|
|
35
35
|
export function diffdash_config_get() {
|
|
36
|
-
const { version, auto_add, auto_commit, auto_push, disable_add, disable_commit, disable_preview, disable_status, disable_push, push_no_verify, push_force, add_prefix, add_suffix, llm_list, llm_compare,
|
|
36
|
+
const { version, auto_add, auto_commit, auto_push, disable_add, disable_commit, disable_preview, disable_status, disable_push, push_no_verify, push_force, add_prefix, add_suffix, llm_list, llm_compare, llm_model, llm_excludes, just_output, silent, debug_llm_prompts, debug_llm_inputs, debug_llm_outputs, } = diffdash_cli_parsed_args;
|
|
37
37
|
if (version) {
|
|
38
38
|
tell_plain(`${PACKAGE_NAME} v${PACKAGE_VERSION}`);
|
|
39
39
|
process.exit(0);
|
|
@@ -42,17 +42,8 @@ export function diffdash_config_get() {
|
|
|
42
42
|
llm_list_models({ llm_model_details: diffdash_llm_model_details });
|
|
43
43
|
process.exit(0);
|
|
44
44
|
}
|
|
45
|
-
const
|
|
46
|
-
const
|
|
47
|
-
llm_model_details: diffdash_llm_model_details,
|
|
48
|
-
llm_model_name,
|
|
49
|
-
llm_router,
|
|
50
|
-
});
|
|
51
|
-
const all_llm_configs = llm_config_get_all({
|
|
52
|
-
llm_model_details: diffdash_llm_model_details,
|
|
53
|
-
llm_router,
|
|
54
|
-
llm_excludes,
|
|
55
|
-
});
|
|
45
|
+
const llm_config = llm_config_get({ llm_model_details: diffdash_llm_model_details, llm_model_name: llm_model });
|
|
46
|
+
const all_llm_configs = llm_config_get_all({ llm_model_details: diffdash_llm_model_details, llm_excludes });
|
|
56
47
|
debug_channels.llm_prompts = debug_llm_prompts;
|
|
57
48
|
debug_channels.llm_inputs = debug_llm_inputs;
|
|
58
49
|
debug_channels.llm_outputs = debug_llm_outputs;
|
|
@@ -1,25 +1,19 @@
|
|
|
1
1
|
import { env_get_substitute } from "./lib_env.js";
|
|
2
2
|
import { llm_model_get_choices, llm_model_get_details } from "./lib_llm_model.js";
|
|
3
3
|
const model_name_default = "gpt-4.1-mini";
|
|
4
|
-
const model_name_fallback = "claude-3.5-haiku";
|
|
5
4
|
const model_name_options = [
|
|
6
|
-
"claude-3.5-haiku",
|
|
7
|
-
"deepseek-
|
|
8
|
-
"devstral-medium",
|
|
9
|
-
"devstral-small",
|
|
5
|
+
"claude-3.5-haiku", // fallback
|
|
6
|
+
"deepseek-chat",
|
|
10
7
|
"gemini-2.0-flash",
|
|
11
8
|
"gemini-2.5-flash",
|
|
12
9
|
"gpt-4.1-mini", // the best
|
|
13
10
|
"gpt-4.1-nano",
|
|
14
|
-
"gpt-
|
|
15
|
-
"
|
|
16
|
-
"
|
|
17
|
-
"
|
|
18
|
-
"
|
|
19
|
-
"mistral-medium-3",
|
|
20
|
-
"qwen3-235b-a22b",
|
|
11
|
+
"gpt-5-mini",
|
|
12
|
+
"gpt-5-mini-minimal", // fallback
|
|
13
|
+
"gpt-5-nano",
|
|
14
|
+
"gpt-5-nano-minimal",
|
|
15
|
+
"llama-4-maverick@cerebras",
|
|
21
16
|
];
|
|
22
17
|
export const diffdash_llm_model_details = llm_model_get_details({ llm_model_names: model_name_options });
|
|
23
18
|
export const diffdash_llm_model_choices = llm_model_get_choices({ llm_model_details: diffdash_llm_model_details });
|
|
24
19
|
export const diffdash_llm_model_default = env_get_substitute("DIFFDASH_LLM_MODEL", model_name_default);
|
|
25
|
-
export const diffdash_llm_model_fallback = model_name_fallback;
|
|
@@ -7,7 +7,7 @@ import { git_message_generate_result } from "./lib_git_message_generate.js";
|
|
|
7
7
|
import { git_message_validate_check, git_message_validate_get_result } from "./lib_git_message_validate.js";
|
|
8
8
|
import { git_simple_open_check_not_bare, git_simple_open_git_repo } from "./lib_git_simple_open.js";
|
|
9
9
|
import { git_simple_staging_create_commit, git_simple_staging_get_staged_diff, git_simple_staging_get_staged_diffstat, git_simple_staging_has_staged_changes, git_simple_staging_has_unstaged_changes, git_simple_staging_push_to_remote, git_simple_staging_stage_all_changes, } from "./lib_git_simple_staging.js";
|
|
10
|
-
import {
|
|
10
|
+
import { llm_results_summary } from "./lib_llm_results.js";
|
|
11
11
|
import { stdio_write_stdout, stdio_write_stdout_linefeed } from "./lib_stdio_write.js";
|
|
12
12
|
import { tell_action, tell_info, tell_plain, tell_success, tell_warning } from "./lib_tell.js";
|
|
13
13
|
import { tui_justify_left } from "./lib_tui_justify.js";
|
|
@@ -98,25 +98,27 @@ async function phase_compare({ config, git }) {
|
|
|
98
98
|
const all_results = await Promise.all(result_promises);
|
|
99
99
|
for (const result of all_results) {
|
|
100
100
|
const { llm_config, seconds, error_text } = result;
|
|
101
|
-
const
|
|
101
|
+
const { llm_model_name } = llm_config;
|
|
102
102
|
if (error_text !== null) {
|
|
103
|
-
tell_warning(`Failed to generate a commit message in ${seconds} seconds using ${
|
|
103
|
+
tell_warning(`Failed to generate a commit message in ${seconds} seconds using ${llm_model_name}: ${error_text}`);
|
|
104
104
|
continue;
|
|
105
105
|
}
|
|
106
|
-
tell_info(`Git commit message in ${seconds} seconds using ${
|
|
107
|
-
|
|
106
|
+
tell_info(`Git commit message in ${seconds} seconds using ${llm_model_name}:`);
|
|
107
|
+
const { outputs } = result;
|
|
108
|
+
let { generated_text: git_message } = outputs;
|
|
108
109
|
const validation_result = git_message_validate_get_result(git_message);
|
|
109
110
|
const teller = validation_result.valid ? tell_plain : tell_warning;
|
|
110
111
|
git_message = diffdash_add_prefix_or_suffix({ git_message, add_prefix, add_suffix });
|
|
111
112
|
git_message = diffdash_add_footer({ git_message, llm_config });
|
|
112
113
|
git_message_display({ git_message, teller });
|
|
113
114
|
}
|
|
115
|
+
llm_results_summary(all_results);
|
|
114
116
|
}
|
|
115
117
|
async function phase_generate({ config, git }) {
|
|
116
118
|
const { disable_preview, add_prefix, add_suffix, llm_config, just_output, silent, extra_prompts } = config;
|
|
117
|
-
const
|
|
119
|
+
const { llm_model_name } = llm_config;
|
|
118
120
|
if (!silent && !just_output) {
|
|
119
|
-
tell_action(`Generating the Git commit message using ${
|
|
121
|
+
tell_action(`Generating the Git commit message using ${llm_model_name}`);
|
|
120
122
|
}
|
|
121
123
|
const diffstat = await git_simple_staging_get_staged_diffstat(git);
|
|
122
124
|
const diff = await git_simple_staging_get_staged_diff(git);
|
|
@@ -124,9 +126,10 @@ async function phase_generate({ config, git }) {
|
|
|
124
126
|
const result = await git_message_generate_result({ llm_config, inputs });
|
|
125
127
|
const { error_text } = result;
|
|
126
128
|
if (error_text !== null) {
|
|
127
|
-
abort_with_error(`Failed to generate a commit message using ${
|
|
129
|
+
abort_with_error(`Failed to generate a commit message using ${llm_model_name}: ${error_text}`);
|
|
128
130
|
}
|
|
129
|
-
|
|
131
|
+
const { outputs } = result;
|
|
132
|
+
let { generated_text: git_message } = outputs;
|
|
130
133
|
git_message_validate_check(git_message);
|
|
131
134
|
git_message = diffdash_add_prefix_or_suffix({ git_message, add_prefix, add_suffix });
|
|
132
135
|
git_message = diffdash_add_footer({ git_message, llm_config });
|
|
@@ -5,19 +5,19 @@ import { git_message_schema, git_message_schema_format } from "./lib_git_message
|
|
|
5
5
|
import { llm_chat_generate_object, llm_chat_generate_text } from "./lib_llm_chat.js";
|
|
6
6
|
import { llm_tokens_count_estimated, llm_tokens_debug_usage } from "./lib_llm_tokens.js";
|
|
7
7
|
async function git_message_generate_unstructured({ llm_config, system_prompt, user_prompt, }) {
|
|
8
|
-
const
|
|
9
|
-
return
|
|
8
|
+
const outputs = await llm_chat_generate_text({ llm_config, system_prompt, user_prompt });
|
|
9
|
+
return outputs;
|
|
10
10
|
}
|
|
11
11
|
async function git_message_generate_structured({ llm_config, system_prompt, user_prompt, }) {
|
|
12
12
|
const schema = git_message_schema;
|
|
13
|
-
const
|
|
13
|
+
const { generated_object, total_usage, provider_metadata } = await llm_chat_generate_object({
|
|
14
14
|
llm_config,
|
|
15
15
|
system_prompt,
|
|
16
16
|
user_prompt,
|
|
17
17
|
schema,
|
|
18
18
|
});
|
|
19
|
-
const
|
|
20
|
-
return
|
|
19
|
+
const generated_text = git_message_schema_format(generated_object);
|
|
20
|
+
return { generated_text, reasoning_text: undefined, total_usage, provider_metadata };
|
|
21
21
|
}
|
|
22
22
|
export async function git_message_generate_string({ llm_config, inputs, }) {
|
|
23
23
|
const { context_window, has_structured_json } = llm_config.llm_model_detail;
|
|
@@ -31,25 +31,25 @@ export async function git_message_generate_string({ llm_config, inputs, }) {
|
|
|
31
31
|
max_length: user_length,
|
|
32
32
|
});
|
|
33
33
|
llm_tokens_debug_usage({ name: "Inputs", llm_config, text: system_prompt + user_prompt });
|
|
34
|
-
const
|
|
34
|
+
const outputs = has_structured_json
|
|
35
35
|
? await git_message_generate_structured({ llm_config, system_prompt, user_prompt })
|
|
36
36
|
: await git_message_generate_unstructured({ llm_config, system_prompt, user_prompt });
|
|
37
|
-
llm_tokens_debug_usage({ name: "Outputs", llm_config, text:
|
|
38
|
-
return
|
|
37
|
+
llm_tokens_debug_usage({ name: "Outputs", llm_config, text: outputs.generated_text });
|
|
38
|
+
return outputs;
|
|
39
39
|
}
|
|
40
40
|
export async function git_message_generate_result({ llm_config, inputs, }) {
|
|
41
41
|
const duration = new Duration();
|
|
42
42
|
duration.start();
|
|
43
43
|
try {
|
|
44
|
-
const
|
|
44
|
+
const outputs = await git_message_generate_string({ llm_config, inputs });
|
|
45
45
|
duration.stop();
|
|
46
46
|
const seconds = duration.seconds_rounded();
|
|
47
|
-
return { llm_config, seconds,
|
|
47
|
+
return { llm_config, seconds, error_text: null, outputs };
|
|
48
48
|
}
|
|
49
49
|
catch (error) {
|
|
50
50
|
duration.stop();
|
|
51
51
|
const seconds = duration.seconds_rounded();
|
|
52
52
|
const error_text = error_get_text(error);
|
|
53
|
-
return { llm_config, seconds,
|
|
53
|
+
return { llm_config, seconds, error_text, outputs: null };
|
|
54
54
|
}
|
|
55
55
|
}
|
|
@@ -7,10 +7,10 @@ export const git_message_schema = z.object({
|
|
|
7
7
|
.array(z.string().describe("Another sentence giving more information about the changes."))
|
|
8
8
|
.describe("More information about the changes."),
|
|
9
9
|
});
|
|
10
|
-
export function git_message_schema_format(
|
|
10
|
+
export function git_message_schema_format(git_message_object) {
|
|
11
11
|
return [
|
|
12
|
-
|
|
12
|
+
git_message_object.summary_line,
|
|
13
13
|
EMPTY, // Empty line
|
|
14
|
-
...
|
|
14
|
+
...git_message_object.extra_lines.map((line) => (line.startsWith("- ") ? line : `- ${line}`)),
|
|
15
15
|
].join(LF);
|
|
16
16
|
}
|
|
@@ -3,8 +3,13 @@ import { COMMA } from "./lib_char_punctuation.js";
|
|
|
3
3
|
import { llm_api_get_api_key, llm_api_get_api_key_env } from "./lib_llm_api.js";
|
|
4
4
|
import { llm_model_find_detail } from "./lib_llm_model.js";
|
|
5
5
|
export function llm_access_available({ llm_model_details, llm_model_name, llm_include, llm_excludes, }) {
|
|
6
|
+
const detail = llm_model_find_detail({ llm_model_details, llm_model_name });
|
|
7
|
+
const { llm_api_code } = detail;
|
|
8
|
+
if (llm_api_get_api_key(llm_api_code) === null) {
|
|
9
|
+
return false;
|
|
10
|
+
}
|
|
6
11
|
if (llm_include) {
|
|
7
|
-
if (!llm_model_name.includes(llm_include)) {
|
|
12
|
+
if (!(llm_model_name.includes(llm_include) || llm_include === llm_api_code)) {
|
|
8
13
|
return false;
|
|
9
14
|
}
|
|
10
15
|
}
|
|
@@ -16,59 +21,15 @@ export function llm_access_available({ llm_model_details, llm_model_name, llm_in
|
|
|
16
21
|
}
|
|
17
22
|
}
|
|
18
23
|
}
|
|
19
|
-
|
|
20
|
-
const { llm_api_code, llm_model_code_direct, llm_model_code_requesty, llm_model_code_openrouter } = detail;
|
|
21
|
-
if (llm_model_code_direct !== null && llm_api_code !== null) {
|
|
22
|
-
if (llm_api_get_api_key(llm_api_code)) {
|
|
23
|
-
return true;
|
|
24
|
-
}
|
|
25
|
-
}
|
|
26
|
-
if (llm_model_code_openrouter !== null) {
|
|
27
|
-
if (llm_api_get_api_key("openrouter")) {
|
|
28
|
-
return true;
|
|
29
|
-
}
|
|
30
|
-
}
|
|
31
|
-
if (llm_model_code_requesty !== null) {
|
|
32
|
-
if (llm_api_get_api_key("requesty")) {
|
|
33
|
-
return true;
|
|
34
|
-
}
|
|
35
|
-
}
|
|
36
|
-
return false;
|
|
24
|
+
return true;
|
|
37
25
|
}
|
|
38
|
-
export function llm_access_get({ llm_model_details, llm_model_name,
|
|
26
|
+
export function llm_access_get({ llm_model_details, llm_model_name, }) {
|
|
39
27
|
const detail = llm_model_find_detail({ llm_model_details, llm_model_name });
|
|
40
|
-
const { llm_api_code,
|
|
41
|
-
|
|
42
|
-
|
|
43
|
-
|
|
44
|
-
|
|
45
|
-
return { llm_model_code: llm_model_code_direct, llm_api_code, llm_api_key };
|
|
46
|
-
}
|
|
47
|
-
}
|
|
48
|
-
}
|
|
49
|
-
if (llm_model_code_openrouter !== null) {
|
|
50
|
-
const llm_api_key = llm_api_get_api_key("openrouter");
|
|
51
|
-
if (llm_api_key) {
|
|
52
|
-
return { llm_model_code: llm_model_code_openrouter, llm_api_code: "openrouter", llm_api_key };
|
|
53
|
-
}
|
|
54
|
-
}
|
|
55
|
-
if (llm_model_code_requesty !== null) {
|
|
56
|
-
const llm_api_key = llm_api_get_api_key("requesty");
|
|
57
|
-
if (llm_api_key) {
|
|
58
|
-
return { llm_model_code: llm_model_code_requesty, llm_api_code: "requesty", llm_api_key };
|
|
59
|
-
}
|
|
60
|
-
}
|
|
61
|
-
if (llm_model_code_direct !== null && llm_api_code !== null) {
|
|
62
|
-
const llm_api_key = llm_api_get_api_key(llm_api_code);
|
|
63
|
-
if (llm_api_key) {
|
|
64
|
-
return { llm_model_code: llm_model_code_direct, llm_api_code, llm_api_key };
|
|
65
|
-
}
|
|
66
|
-
}
|
|
67
|
-
const env_openrouter = llm_api_get_api_key_env("openrouter");
|
|
68
|
-
const env_requesty = llm_api_get_api_key_env("requesty");
|
|
69
|
-
if (llm_api_code !== null) {
|
|
70
|
-
const env_direct = llm_api_get_api_key_env(llm_api_code);
|
|
71
|
-
abort_with_error(`Please set environment variable ${env_direct}, ${env_openrouter} or ${env_requesty}`);
|
|
28
|
+
const { llm_api_code, llm_model_code } = detail;
|
|
29
|
+
const llm_api_key = llm_api_get_api_key(llm_api_code);
|
|
30
|
+
if (!llm_api_key) {
|
|
31
|
+
const env_name = llm_api_get_api_key_env(llm_api_code);
|
|
32
|
+
abort_with_error(`Please set environment variable ${env_name}`);
|
|
72
33
|
}
|
|
73
|
-
|
|
34
|
+
return { llm_model_code, llm_api_code, llm_api_key };
|
|
74
35
|
}
|
package/dist/src/lib_llm_api.js
CHANGED
|
@@ -3,26 +3,8 @@ import { createDeepSeek } from "@ai-sdk/deepseek";
|
|
|
3
3
|
import { createGoogleGenerativeAI } from "@ai-sdk/google";
|
|
4
4
|
import { createOpenAI } from "@ai-sdk/openai";
|
|
5
5
|
import { createOpenRouter } from "@openrouter/ai-sdk-provider";
|
|
6
|
-
import { createRequesty } from "@requesty/ai-sdk";
|
|
7
6
|
import { abort_with_error } from "./lib_abort.js";
|
|
8
|
-
import { assert_type_string } from "./lib_assert_type.js";
|
|
9
|
-
import { AT_SIGN } from "./lib_char_punctuation.js";
|
|
10
7
|
import { env_get } from "./lib_env.js";
|
|
11
|
-
export function llm_api_get_via(llm_api_code) {
|
|
12
|
-
switch (llm_api_code) {
|
|
13
|
-
case "anthropic":
|
|
14
|
-
case "deepseek":
|
|
15
|
-
case "google":
|
|
16
|
-
case "openai":
|
|
17
|
-
return "direct";
|
|
18
|
-
case "requesty":
|
|
19
|
-
return "via Requesty";
|
|
20
|
-
case "openrouter":
|
|
21
|
-
return "via OpenRouter";
|
|
22
|
-
default:
|
|
23
|
-
abort_with_error("Unknown LLM API");
|
|
24
|
-
}
|
|
25
|
-
}
|
|
26
8
|
export function llm_api_get_api_key_env(llm_api_code) {
|
|
27
9
|
switch (llm_api_code) {
|
|
28
10
|
case "anthropic":
|
|
@@ -33,8 +15,6 @@ export function llm_api_get_api_key_env(llm_api_code) {
|
|
|
33
15
|
return "GEMINI_API_KEY";
|
|
34
16
|
case "openai":
|
|
35
17
|
return "OPENAI_API_KEY";
|
|
36
|
-
case "requesty":
|
|
37
|
-
return "REQUESTY_API_KEY";
|
|
38
18
|
case "openrouter":
|
|
39
19
|
return "OPENROUTER_API_KEY";
|
|
40
20
|
default:
|
|
@@ -45,6 +25,7 @@ export function llm_api_get_api_key(llm_api_code) {
|
|
|
45
25
|
const env = llm_api_get_api_key_env(llm_api_code);
|
|
46
26
|
return env_get(env);
|
|
47
27
|
}
|
|
28
|
+
// eslint-disable-next-line sonarjs/function-return-type
|
|
48
29
|
export function llm_api_get_ai_sdk_language_model({ llm_model_code, llm_api_code, llm_api_key, }) {
|
|
49
30
|
switch (llm_api_code) {
|
|
50
31
|
case "anthropic":
|
|
@@ -55,23 +36,8 @@ export function llm_api_get_ai_sdk_language_model({ llm_model_code, llm_api_code
|
|
|
55
36
|
return createGoogleGenerativeAI({ apiKey: llm_api_key })(llm_model_code);
|
|
56
37
|
case "openai":
|
|
57
38
|
return createOpenAI({ apiKey: llm_api_key })(llm_model_code);
|
|
58
|
-
case "requesty":
|
|
59
|
-
return createRequesty({ apiKey: llm_api_key })(llm_model_code);
|
|
60
39
|
case "openrouter": {
|
|
61
|
-
|
|
62
|
-
if (llm_model_code.includes(AT_SIGN)) {
|
|
63
|
-
const splits = llm_model_code.split(AT_SIGN);
|
|
64
|
-
const model_id = assert_type_string(splits[0]);
|
|
65
|
-
const provider = assert_type_string(splits[1]);
|
|
66
|
-
return openrouter(model_id, {
|
|
67
|
-
extraBody: {
|
|
68
|
-
provider: {
|
|
69
|
-
only: [provider],
|
|
70
|
-
},
|
|
71
|
-
},
|
|
72
|
-
});
|
|
73
|
-
}
|
|
74
|
-
return openrouter(llm_model_code);
|
|
40
|
+
return createOpenRouter({ apiKey: llm_api_key })(llm_model_code);
|
|
75
41
|
}
|
|
76
42
|
default:
|
|
77
43
|
abort_with_error("Unknown LLM API");
|