@johnowennixon/diffdash 1.6.1 → 1.7.0

This diff represents the content of publicly available package versions that have been released to one of the supported registries. The information contained in this diff is provided for informational purposes only and reflects changes between package versions as they appear in their respective public registries.
package/README.md CHANGED
@@ -107,8 +107,8 @@ diffdash --llm-model claude-3.5-haiku
107
107
  # Just output the commit message for use in scripts
108
108
  diffdash --just-output
109
109
 
110
- # Debug options
111
- diffdash --debug-llm-inputs --debug-llm-outputs
110
+ # Debug prompts
111
+ diffdash --debug-llm-prompts
112
112
  ```
113
113
 
114
114
  ## Command Line Arguments
@@ -139,8 +139,9 @@ All command-line arguments are optional.
139
139
  | `--llm-excludes MODELS` | models to exclude from comparison (comma separated) |
140
140
  | `--just-output` | just output the commit message for use in scripts |
141
141
  | `--silent` | suppress all normal output - errors and aborts still display |
142
- | `--debug-llm-inputs` | show inputs (including all prompts) sent to the LLM |
143
- | `--debug-llm-outputs` | show outputs received from the LLM |
142
+ | `--debug-llm-prompts` | show prompts sent to the LLM |
143
+ | `--debug-llm-inputs` | show inputs object sent to the LLM |
144
+ | `--debug-llm-outputs` | show outputs object received from the LLM |
144
145
 
145
146
  ## Files containing secrets
146
147
 
package/dist/package.json CHANGED
@@ -1,6 +1,6 @@
1
1
  {
2
2
  "name": "@johnowennixon/diffdash",
3
- "version": "1.6.1",
3
+ "version": "1.7.0",
4
4
  "description": "A command-line tool to generate Git commit messages using AI",
5
5
  "license": "0BSD",
6
6
  "author": "John Owen Nixon",
@@ -26,16 +26,17 @@
26
26
  "build:tsc": "echo 'Transpiling TypeScript to dist (using tsc)' && tsc --erasableSyntaxOnly --libReplacement false",
27
27
  "fix": "run-s -ls fix:biome fix:markdownlint",
28
28
  "fix:biome": "echo 'Fixing with Biome' && biome check --write",
29
- "fix:eslint": "echo 'Fixing with ESLint' && eslint --fix",
29
+ "fix:docbot": "echo 'Fixing with DocBot' && docbot --remove --generate",
30
30
  "fix:markdownlint": "echo 'Fixing with markdownlint' && markdownlint-cli2 '**/*.md' --fix",
31
31
  "fix:oxlint": "echo 'Fixing with oxlint' && oxlint --fix",
32
- "lint": "run-s -ls lint:biome lint:oxlint lint:knip lint:markdownlint",
32
+ "lint": "run-s -ls lint:biome lint:oxlint lint:tsgolint lint:knip lint:markdownlint",
33
33
  "lint:biome": "echo 'Linting with Biome' && biome check",
34
- "lint:eslint": "echo 'Linting with ESLint' && eslint",
34
+ "lint:docbot": "echo 'Linting with DocBot' && docbot",
35
35
  "lint:knip": "echo 'Linting with Knip' && knip",
36
36
  "lint:markdownlint": "echo 'Linting with markdownlint' && markdownlint-cli2 '**/*.md'",
37
37
  "lint:oxlint": "echo 'Linting with oxlint' && oxlint",
38
38
  "lint:tsc": "echo 'Linting with tsc' && tsc --noEmit --erasableSyntaxOnly --libReplacement false",
39
+ "lint:tsgolint": "echo 'Linting with tsgolint' && (tsgolint | grep -A5 no-floating-promises) 2>&1 | pipe-exit",
39
40
  "test": "run-s -ls lint build"
40
41
  },
41
42
  "dependencies": {
@@ -43,38 +44,29 @@
43
44
  "@ai-sdk/deepseek": "0.2.16",
44
45
  "@ai-sdk/google": "1.2.22",
45
46
  "@ai-sdk/openai": "1.3.23",
46
- "@openrouter/ai-sdk-provider": "0.7.2",
47
+ "@openrouter/ai-sdk-provider": "0.7.3",
47
48
  "@requesty/ai-sdk": "0.0.9",
48
49
  "ai": "4.3.19",
49
50
  "ansis": "4.1.0",
50
51
  "argparse": "2.0.1",
51
52
  "cli-table3": "0.6.5",
53
+ "json5": "2.2.3",
52
54
  "simple-git": "3.28.0",
53
55
  "zod": "3.25.76"
54
56
  },
55
57
  "devDependencies": {
56
- "@biomejs/biome": "2.1.1",
57
- "@eslint/eslintrc": "3.3.1",
58
- "@eslint/js": "9.31.0",
58
+ "@biomejs/biome": "2.1.2",
59
59
  "@johnowennixon/add-shebangs": "1.1.0",
60
60
  "@johnowennixon/chmodx": "2.0.0",
61
- "@stylistic/eslint-plugin": "5.2.0",
61
+ "@johnowennixon/pipe-exit": "1.0.1",
62
62
  "@types/argparse": "2.0.17",
63
- "@types/node": "24.0.14",
64
- "@typescript-eslint/eslint-plugin": "8.37.0",
65
- "@typescript-eslint/parser": "8.37.0",
66
- "eslint": "9.31.0",
67
- "eslint-import-resolver-typescript": "4.4.4",
68
- "eslint-plugin-import-x": "4.16.1",
69
- "eslint-plugin-sonarjs": "3.0.4",
70
- "eslint-plugin-unicorn": "59.0.1",
71
- "globals": "16.3.0",
72
- "knip": "5.61.3",
63
+ "@types/node": "24.1.0",
64
+ "knip": "5.62.0",
73
65
  "markdownlint-cli2": "0.18.1",
74
66
  "npm-run-all2": "8.0.4",
75
- "oxlint": "1.7.0",
67
+ "oxlint": "1.8.0",
68
+ "oxlint-tsgolint": "0.0.0-8",
76
69
  "rimraf": "6.0.1",
77
- "typescript": "5.8.3",
78
- "typescript-eslint": "8.37.0"
70
+ "typescript": "5.8.3"
79
71
  }
80
72
  }
@@ -1,7 +1,7 @@
1
1
  import { ansi_red, ansi_yellow } from "./lib_ansi.js";
2
2
  import { stdio_write_stderr_linefeed } from "./lib_stdio_write.js";
3
- export function abort_exit() {
4
- process.exit(1);
3
+ export function abort_exit(exitcode = 1) {
4
+ process.exit(exitcode);
5
5
  }
6
6
  export function abort_with_warning(message) {
7
7
  if (process.stdout.isTTY) {
@@ -0,0 +1,30 @@
1
+ import { abort_with_error } from "./lib_abort.js";
2
+ import { type_guard_is_boolean, type_guard_is_number, type_guard_is_object, type_guard_is_string, } from "./lib_type_guard.js";
3
+ export function assert_type_boolean(value) {
4
+ if (type_guard_is_boolean(value)) {
5
+ return value;
6
+ }
7
+ console.error(value);
8
+ return abort_with_error("Assertion failed: value is not a boolean");
9
+ }
10
+ export function assert_type_number(value) {
11
+ if (type_guard_is_number(value)) {
12
+ return value;
13
+ }
14
+ console.error(value);
15
+ return abort_with_error("Assertion failed: value is not a number");
16
+ }
17
+ export function assert_type_string(value) {
18
+ if (type_guard_is_string(value)) {
19
+ return value;
20
+ }
21
+ console.error(value);
22
+ return abort_with_error("Assertion failed: value is not a string");
23
+ }
24
+ export function assert_type_object(value) {
25
+ if (type_guard_is_object(value)) {
26
+ return value;
27
+ }
28
+ console.error(value);
29
+ return abort_with_error("Assertion failed: value is not an object");
30
+ }
@@ -0,0 +1,4 @@
1
+ export const LEFT_DOUBLE_QUOTATION_MARK = "“";
2
+ export const LEFT_SINGLE_QUOTATION_MARK = "‘";
3
+ export const RIGHT_DOUBLE_QUOTATION_MARK = "”";
4
+ export const RIGHT_SINGLE_QUOTATION_MARK = "’";
@@ -22,6 +22,7 @@ export const debug_channels = {
22
22
  lines: false,
23
23
  llm_inputs: false,
24
24
  llm_outputs: false,
25
+ llm_prompts: false,
25
26
  llm_tokens: false,
26
27
  llm_tools: false,
27
28
  node: false,
@@ -26,10 +26,12 @@ const diffdash_cli_schema = {
26
26
  llm_excludes: cli_string({ help: "models to exclude from comparison (comma separated)", metavar: "MODELS" }),
27
27
  just_output: cli_boolean({ help: "just output the commit message for use in scripts" }),
28
28
  silent: cli_boolean({ help: "suppress all normal output - errors and aborts still display" }),
29
- debug_llm_inputs: cli_boolean({ help: "debug inputs (including all prompts) sent to the LLM" }),
30
- debug_llm_outputs: cli_boolean({ help: "debug outputs received from the LLM" }),
29
+ debug_llm_prompts: cli_boolean({ help: "debug prompts sent to the LLM" }),
30
+ debug_llm_inputs: cli_boolean({ help: "debug inputs object sent to the LLM" }),
31
+ debug_llm_outputs: cli_boolean({ help: "debug outputs object received from the LLM" }),
31
32
  };
32
33
  export const diffdash_cli_parser = cli_make_parser({
33
34
  cli_schema: diffdash_cli_schema,
34
35
  description: "DiffDash - generate Git commit messages using AI",
35
36
  });
37
+ export const diffdash_cli_parsed_args = diffdash_cli_parser.parsed_args;
@@ -1,13 +1,39 @@
1
+ import { z } from "zod";
2
+ import { abort_with_error } from "./lib_abort.js";
1
3
  import { debug_channels, debug_inspect_when } from "./lib_debug.js";
2
- import { diffdash_cli_parser } from "./lib_diffdash_cli.js";
4
+ import { diffdash_cli_parsed_args } from "./lib_diffdash_cli.js";
3
5
  import { diffdash_llm_model_details, diffdash_llm_model_fallback } from "./lib_diffdash_llm.js";
6
+ import { file_io_read_text } from "./lib_file_io.js";
7
+ import { file_is_file } from "./lib_file_is.js";
8
+ import { json5_parse } from "./lib_json5.js";
4
9
  import { llm_config_get, llm_config_get_all } from "./lib_llm_config.js";
5
10
  import { llm_list_models } from "./lib_llm_list.js";
6
11
  import { PACKAGE_NAME, PACKAGE_VERSION } from "./lib_package.js";
7
12
  import { tell_plain } from "./lib_tell.js";
13
+ import { tui_quote_smart_single } from "./lib_tui_quote.js";
14
+ const diffdash_config_file_schema = z
15
+ .object({
16
+ extra_prompts: z.string().array().optional(),
17
+ })
18
+ .strict();
19
+ function diffdash_config_file_read(config) {
20
+ const config_file_name = ".diffdash.json5";
21
+ if (!file_is_file(config_file_name)) {
22
+ return;
23
+ }
24
+ const config_content = file_io_read_text(config_file_name);
25
+ const parsed_json = json5_parse(config_content);
26
+ const validation_result = diffdash_config_file_schema.safeParse(parsed_json);
27
+ if (!validation_result.success) {
28
+ abort_with_error(`Unable to parse DiffDash config file: ${tui_quote_smart_single(config_file_name)}`);
29
+ }
30
+ const data = validation_result.data;
31
+ if (data.extra_prompts) {
32
+ config.extra_prompts = data.extra_prompts;
33
+ }
34
+ }
8
35
  export function diffdash_config_get() {
9
- const pa = diffdash_cli_parser.parsed_args;
10
- const { version, auto_add, auto_commit, auto_push, disable_add, disable_commit, disable_preview, disable_status, disable_push, push_no_verify, push_force, add_prefix, add_suffix, llm_list, llm_compare, llm_router, llm_fallback, llm_model, llm_excludes, just_output, silent, debug_llm_inputs, debug_llm_outputs, } = pa;
36
+ const { version, auto_add, auto_commit, auto_push, disable_add, disable_commit, disable_preview, disable_status, disable_push, push_no_verify, push_force, add_prefix, add_suffix, llm_list, llm_compare, llm_router, llm_fallback, llm_model, llm_excludes, just_output, silent, debug_llm_prompts, debug_llm_inputs, debug_llm_outputs, } = diffdash_cli_parsed_args;
11
37
  if (version) {
12
38
  tell_plain(`${PACKAGE_NAME} v${PACKAGE_VERSION}`);
13
39
  process.exit(0);
@@ -27,6 +53,7 @@ export function diffdash_config_get() {
27
53
  llm_router,
28
54
  llm_excludes,
29
55
  });
56
+ debug_channels.llm_prompts = debug_llm_prompts;
30
57
  debug_channels.llm_inputs = debug_llm_inputs;
31
58
  debug_channels.llm_outputs = debug_llm_outputs;
32
59
  const config = {
@@ -47,7 +74,9 @@ export function diffdash_config_get() {
47
74
  all_llm_configs,
48
75
  just_output,
49
76
  silent,
77
+ extra_prompts: undefined,
50
78
  };
79
+ diffdash_config_file_read(config);
51
80
  debug_inspect_when(debug_channels.config, config, "config");
52
81
  return config;
53
82
  }
@@ -20,6 +20,6 @@ const model_name_options = [
20
20
  "qwen3-235b-a22b",
21
21
  ];
22
22
  export const diffdash_llm_model_details = llm_model_get_details({ llm_model_names: model_name_options });
23
- export const diffdash_llm_model_choices = llm_model_get_choices(diffdash_llm_model_details);
23
+ export const diffdash_llm_model_choices = llm_model_get_choices({ llm_model_details: diffdash_llm_model_details });
24
24
  export const diffdash_llm_model_default = env_get_substitute("DIFFDASH_LLM_MODEL", model_name_default);
25
25
  export const diffdash_llm_model_fallback = model_name_fallback;
@@ -90,24 +90,21 @@ async function phase_compare({ config, git }) {
90
90
  if (!silent) {
91
91
  tell_action("Generating Git commit messages using all models in parallel");
92
92
  }
93
- const { all_llm_configs, add_prefix, add_suffix } = config;
93
+ const { all_llm_configs, add_prefix, add_suffix, extra_prompts } = config;
94
94
  const diffstat = await git_simple_staging_get_staged_diffstat(git);
95
95
  const diff = await git_simple_staging_get_staged_diff(git);
96
- const inputs = { diffstat, diff };
96
+ const inputs = { diffstat, diff, extra_prompts };
97
97
  const result_promises = all_llm_configs.map((llm_config) => git_message_generate_result({ llm_config, inputs }));
98
98
  const all_results = await Promise.all(result_promises);
99
99
  for (const result of all_results) {
100
100
  const { llm_config, seconds, error_text } = result;
101
- let { git_message } = result;
102
- const model_via = llm_config_get_model_via(llm_config);
103
- if (error_text) {
101
+ const model_via = llm_config_get_model_via({ llm_config });
102
+ if (error_text !== null) {
104
103
  tell_warning(`Failed to generate a commit message in ${seconds} seconds using ${model_via}: ${error_text}`);
105
104
  continue;
106
105
  }
107
- if (!git_message) {
108
- continue;
109
- }
110
106
  tell_info(`Git commit message in ${seconds} seconds using ${model_via}:`);
107
+ let { git_message } = result;
111
108
  const validation_result = git_message_validate_get_result(git_message);
112
109
  const teller = validation_result.valid ? tell_plain : tell_warning;
113
110
  git_message = diffdash_add_prefix_or_suffix({ git_message, add_prefix, add_suffix });
@@ -116,20 +113,20 @@ async function phase_compare({ config, git }) {
116
113
  }
117
114
  }
118
115
  async function phase_generate({ config, git }) {
119
- const { disable_preview, add_prefix, add_suffix, llm_config, just_output, silent } = config;
120
- const model_via = llm_config_get_model_via(llm_config);
116
+ const { disable_preview, add_prefix, add_suffix, llm_config, just_output, silent, extra_prompts } = config;
117
+ const model_via = llm_config_get_model_via({ llm_config });
121
118
  if (!silent && !just_output) {
122
119
  tell_action(`Generating the Git commit message using ${model_via}`);
123
120
  }
124
121
  const diffstat = await git_simple_staging_get_staged_diffstat(git);
125
122
  const diff = await git_simple_staging_get_staged_diff(git);
126
- const inputs = { diffstat, diff };
123
+ const inputs = { diffstat, diff, extra_prompts };
127
124
  const result = await git_message_generate_result({ llm_config, inputs });
128
125
  const { error_text } = result;
129
- let { git_message } = result;
130
- if (error_text || git_message === null) {
126
+ if (error_text !== null) {
131
127
  abort_with_error(`Failed to generate a commit message using ${model_via}: ${error_text}`);
132
128
  }
129
+ let { git_message } = result;
133
130
  git_message_validate_check(git_message);
134
131
  git_message = diffdash_add_prefix_or_suffix({ git_message, add_prefix, add_suffix });
135
132
  git_message = diffdash_add_footer({ git_message, llm_config });
@@ -0,0 +1,13 @@
1
+ import fs from "node:fs";
2
+ export function file_io_read_binary(file_path) {
3
+ return fs.readFileSync(file_path);
4
+ }
5
+ export function file_io_read_text(file_path) {
6
+ return fs.readFileSync(file_path, { encoding: "utf8" });
7
+ }
8
+ export function file_io_write_binary({ file_path, data }) {
9
+ fs.writeFileSync(file_path, data);
10
+ }
11
+ export function file_io_write_text({ file_path, text }) {
12
+ fs.writeFileSync(file_path, text, { encoding: "utf8" });
13
+ }
@@ -0,0 +1,34 @@
1
+ import fs from "node:fs";
2
+ export function file_is_dir(dir_path) {
3
+ try {
4
+ return fs.statSync(dir_path).isDirectory();
5
+ }
6
+ catch {
7
+ return false;
8
+ }
9
+ }
10
+ export function file_is_file(file_path) {
11
+ try {
12
+ return fs.statSync(file_path).isFile();
13
+ }
14
+ catch {
15
+ return false;
16
+ }
17
+ }
18
+ export function file_is_socket(file_path) {
19
+ try {
20
+ return fs.statSync(file_path).isSocket();
21
+ }
22
+ catch {
23
+ return false;
24
+ }
25
+ }
26
+ export function file_is_executable(file_path) {
27
+ try {
28
+ fs.accessSync(file_path, fs.constants.X_OK);
29
+ return true;
30
+ }
31
+ catch {
32
+ return false;
33
+ }
34
+ }
@@ -21,7 +21,7 @@ async function git_message_generate_structured({ llm_config, system_prompt, user
21
21
  }
22
22
  export async function git_message_generate_string({ llm_config, inputs, }) {
23
23
  const { context_window, has_structured_json } = llm_config.llm_model_detail;
24
- const system_prompt = git_message_get_system_prompt({ has_structured_json });
24
+ const system_prompt = git_message_get_system_prompt({ has_structured_json, inputs });
25
25
  // Estimate remaining prompt length
26
26
  const user_tokens = context_window - llm_tokens_count_estimated({ llm_config, text: system_prompt }) - 1000;
27
27
  const user_length = user_tokens * 3;
@@ -1,34 +1,35 @@
1
1
  import { LF } from "./lib_char_control.js";
2
2
  import { EMPTY } from "./lib_char_empty.js";
3
+ const LF_LF = LF + LF;
3
4
  const portion_role = `
4
5
  Your role is to generate a Git commit message in conversational English.
5
6
  The user does not want Conventional Commits - the summary line must be a normal sentence.
6
- `.trim() + LF;
7
+ `.trim() + LF_LF;
7
8
  const portion_inputs = `
8
9
  The user will send you a <diffstat> block, the output of a 'git diff --staged --stat' command.
9
10
  The user will send you a <diff> block, the output of a 'git diff --staged' command.
10
- `.trim() + LF;
11
+ `.trim() + LF_LF;
11
12
  const portion_reminders = `
12
13
  Some reminders of how diffs work:
13
14
  - Lines that start with a single plus sign have been added to the file.
14
15
  - Lines that start with a single minus sign have been removed from the file.
15
16
  - Lines that start with @@ indicate a jump to a different section of the file - you can not see the code in these gaps.
16
- `.trim() + LF;
17
+ `.trim() + LF_LF;
17
18
  const portion_format_structured = `
18
19
  You must output in the following format (this will be forced):
19
20
  - summary_line: a single sentence giving a concise summary of the changes.
20
21
  - extra_lines: additional sentences giving more information about the changes.
21
- `.trim() + LF;
22
+ `.trim() + LF_LF;
22
23
  const portion_format_unstructured = `
23
24
  You must output in the following format - without any preamble or conclusion:
24
25
  - First line: a single sentence giving a concise summary of the changes.
25
26
  - Second line: completely blank - not even any spaces.
26
27
  - Then an unordered list (with a dash prefix) of additional sentences giving more information about the changes.
27
28
  - And nothing else.
28
- `.trim() + LF;
29
- function portion_format(has_structured_json) {
29
+ `.trim() + LF_LF;
30
+ const portion_format = (has_structured_json) => {
30
31
  return has_structured_json ? portion_format_structured : portion_format_unstructured;
31
- }
32
+ };
32
33
  const portion_instructions = `
33
34
  Use the imperative mood and present tense.
34
35
  Please write in full sentences that start with a capital letter.
@@ -42,19 +43,23 @@ Don't assume the change is always an improvement - it might be making things wor
42
43
  The number of additional sentences should depend upon the complexity of the change.
43
44
  A simple change needs only two additional sentences scaling up to a complex change with five additional sentences.
44
45
  If there are a lot of changes, you will need to summarize even more.
45
- `.trim() + LF;
46
+ `.trim() + LF_LF;
47
+ const portion_extra = (extra_prompts) => {
48
+ return extra_prompts && extra_prompts.length > 0 ? extra_prompts.map((s) => s.trim).join(LF) + LF_LF : EMPTY;
49
+ };
46
50
  const portion_final = `
47
51
  Everything you write will be checked for validity and then saved directly to Git - it will not be reviewed by a human.
48
52
  Therefore, you must just output the Git message itself without any introductory or concluding sections.
49
- `.trim() + LF;
50
- export function git_message_get_system_prompt({ has_structured_json }) {
53
+ `.trim() + LF_LF;
54
+ export function git_message_get_system_prompt({ has_structured_json, inputs, }) {
51
55
  let system_prompt = EMPTY;
52
- system_prompt += portion_role + LF;
53
- system_prompt += portion_inputs + LF;
54
- system_prompt += portion_reminders + LF;
55
- system_prompt += portion_format(has_structured_json) + LF;
56
- system_prompt += portion_instructions + LF;
57
- system_prompt += portion_final + LF;
56
+ system_prompt += portion_role;
57
+ system_prompt += portion_inputs;
58
+ system_prompt += portion_reminders;
59
+ system_prompt += portion_format(has_structured_json);
60
+ system_prompt += portion_instructions;
61
+ system_prompt += portion_extra(inputs.extra_prompts);
62
+ system_prompt += portion_final;
58
63
  return system_prompt.trim();
59
64
  }
60
65
  export function git_message_get_user_prompt({ has_structured_json, inputs, max_length, }) {
@@ -62,11 +67,11 @@ export function git_message_get_user_prompt({ has_structured_json, inputs, max_l
62
67
  const truncate = diffstat.length + diff.length > max_length;
63
68
  const diff_truncated = truncate ? diff.slice(0, max_length - diffstat.length) + LF : diff;
64
69
  let user_prompt = EMPTY;
65
- user_prompt += "<diffstat>" + LF + diffstat + "</diffstat>" + LF + LF;
66
- user_prompt += "<diff>" + LF + diff_truncated + "</diff>" + LF + LF;
70
+ user_prompt += "<diffstat>" + LF + diffstat + "</diffstat>" + LF_LF;
71
+ user_prompt += "<diff>" + LF + diff_truncated + "</diff>" + LF_LF;
67
72
  if (truncate) {
68
- user_prompt += "Please note: the Diff above has been truncated" + LF + LF;
73
+ user_prompt += "Please note: the Diff above has been truncated" + LF_LF;
69
74
  }
70
- user_prompt += portion_format(has_structured_json) + LF;
75
+ user_prompt += portion_format(has_structured_json);
71
76
  return user_prompt.trim();
72
77
  }
@@ -0,0 +1,4 @@
1
+ import JSON5 from "json5";
2
+ export function json5_parse(text) {
3
+ return JSON5.parse(text);
4
+ }
@@ -1,8 +1,13 @@
1
1
  import { abort_with_error } from "./lib_abort.js";
2
2
  import { COMMA } from "./lib_char_punctuation.js";
3
+ import { llm_api_get_api_key, llm_api_get_api_key_env } from "./lib_llm_api.js";
3
4
  import { llm_model_find_detail } from "./lib_llm_model.js";
4
- import { llm_provider_get_api_key, llm_provider_get_api_key_env } from "./lib_llm_provider.js";
5
- export function llm_access_available({ llm_model_details, llm_model_name, llm_excludes, }) {
5
+ export function llm_access_available({ llm_model_details, llm_model_name, llm_include, llm_excludes, }) {
6
+ if (llm_include) {
7
+ if (!llm_model_name.includes(llm_include)) {
8
+ return false;
9
+ }
10
+ }
6
11
  if (llm_excludes) {
7
12
  const llm_excludes_array = llm_excludes.split(COMMA).map((exclude) => exclude.trim());
8
13
  for (const llm_exclude of llm_excludes_array) {
@@ -12,19 +17,19 @@ export function llm_access_available({ llm_model_details, llm_model_name, llm_ex
12
17
  }
13
18
  }
14
19
  const detail = llm_model_find_detail({ llm_model_details, llm_model_name });
15
- const { llm_provider, llm_model_code_direct, llm_model_code_requesty, llm_model_code_openrouter } = detail;
16
- if (llm_model_code_direct !== null && llm_provider !== null) {
17
- if (llm_provider_get_api_key(llm_provider)) {
20
+ const { llm_api_code, llm_model_code_direct, llm_model_code_requesty, llm_model_code_openrouter } = detail;
21
+ if (llm_model_code_direct !== null && llm_api_code !== null) {
22
+ if (llm_api_get_api_key(llm_api_code)) {
18
23
  return true;
19
24
  }
20
25
  }
21
26
  if (llm_model_code_openrouter !== null) {
22
- if (llm_provider_get_api_key("openrouter")) {
27
+ if (llm_api_get_api_key("openrouter")) {
23
28
  return true;
24
29
  }
25
30
  }
26
31
  if (llm_model_code_requesty !== null) {
27
- if (llm_provider_get_api_key("requesty")) {
32
+ if (llm_api_get_api_key("requesty")) {
28
33
  return true;
29
34
  }
30
35
  }
@@ -32,38 +37,38 @@ export function llm_access_available({ llm_model_details, llm_model_name, llm_ex
32
37
  }
33
38
  export function llm_access_get({ llm_model_details, llm_model_name, llm_router, }) {
34
39
  const detail = llm_model_find_detail({ llm_model_details, llm_model_name });
35
- const { llm_provider, llm_model_code_direct, llm_model_code_requesty, llm_model_code_openrouter } = detail;
40
+ const { llm_api_code, llm_model_code_direct, llm_model_code_requesty, llm_model_code_openrouter } = detail;
36
41
  if (!llm_router) {
37
- if (llm_model_code_direct !== null && llm_provider !== null) {
38
- const llm_api_key = llm_provider_get_api_key(llm_provider);
42
+ if (llm_model_code_direct !== null && llm_api_code !== null) {
43
+ const llm_api_key = llm_api_get_api_key(llm_api_code);
39
44
  if (llm_api_key) {
40
- return { llm_model_code: llm_model_code_direct, llm_provider, llm_api_key };
45
+ return { llm_model_code: llm_model_code_direct, llm_api_code, llm_api_key };
41
46
  }
42
47
  }
43
48
  }
44
49
  if (llm_model_code_openrouter !== null) {
45
- const llm_api_key = llm_provider_get_api_key("openrouter");
50
+ const llm_api_key = llm_api_get_api_key("openrouter");
46
51
  if (llm_api_key) {
47
- return { llm_model_code: llm_model_code_openrouter, llm_provider: "openrouter", llm_api_key };
52
+ return { llm_model_code: llm_model_code_openrouter, llm_api_code: "openrouter", llm_api_key };
48
53
  }
49
54
  }
50
55
  if (llm_model_code_requesty !== null) {
51
- const llm_api_key = llm_provider_get_api_key("requesty");
56
+ const llm_api_key = llm_api_get_api_key("requesty");
52
57
  if (llm_api_key) {
53
- return { llm_model_code: llm_model_code_requesty, llm_provider: "requesty", llm_api_key };
58
+ return { llm_model_code: llm_model_code_requesty, llm_api_code: "requesty", llm_api_key };
54
59
  }
55
60
  }
56
- if (llm_model_code_direct !== null && llm_provider !== null) {
57
- const llm_api_key = llm_provider_get_api_key(llm_provider);
61
+ if (llm_model_code_direct !== null && llm_api_code !== null) {
62
+ const llm_api_key = llm_api_get_api_key(llm_api_code);
58
63
  if (llm_api_key) {
59
- return { llm_model_code: llm_model_code_direct, llm_provider, llm_api_key };
64
+ return { llm_model_code: llm_model_code_direct, llm_api_code, llm_api_key };
60
65
  }
61
66
  }
62
- const env_openrouter = llm_provider_get_api_key_env("openrouter");
63
- const env_requesty = llm_provider_get_api_key_env("requesty");
64
- if (llm_provider !== null) {
65
- const env_provider = llm_provider_get_api_key_env(llm_provider);
66
- abort_with_error(`Please set environment variable ${env_provider}, ${env_openrouter} or ${env_requesty}`);
67
+ const env_openrouter = llm_api_get_api_key_env("openrouter");
68
+ const env_requesty = llm_api_get_api_key_env("requesty");
69
+ if (llm_api_code !== null) {
70
+ const env_direct = llm_api_get_api_key_env(llm_api_code);
71
+ abort_with_error(`Please set environment variable ${env_direct}, ${env_openrouter} or ${env_requesty}`);
67
72
  }
68
73
  abort_with_error(`Please set environment variable ${env_openrouter} or ${env_requesty}`);
69
74
  }
@@ -5,9 +5,11 @@ import { createOpenAI } from "@ai-sdk/openai";
5
5
  import { createOpenRouter } from "@openrouter/ai-sdk-provider";
6
6
  import { createRequesty } from "@requesty/ai-sdk";
7
7
  import { abort_with_error } from "./lib_abort.js";
8
+ import { assert_type_string } from "./lib_assert_type.js";
9
+ import { AT_SIGN } from "./lib_char_punctuation.js";
8
10
  import { env_get } from "./lib_env.js";
9
- export function llm_provider_get_via(llm_provider) {
10
- switch (llm_provider) {
11
+ export function llm_api_get_via(llm_api_code) {
12
+ switch (llm_api_code) {
11
13
  case "anthropic":
12
14
  case "deepseek":
13
15
  case "google":
@@ -18,11 +20,11 @@ export function llm_provider_get_via(llm_provider) {
18
20
  case "openrouter":
19
21
  return "via OpenRouter";
20
22
  default:
21
- abort_with_error("Unknown LLM provider");
23
+ abort_with_error("Unknown LLM API");
22
24
  }
23
25
  }
24
- export function llm_provider_get_api_key_env(llm_provider) {
25
- switch (llm_provider) {
26
+ export function llm_api_get_api_key_env(llm_api_code) {
27
+ switch (llm_api_code) {
26
28
  case "anthropic":
27
29
  return "ANTHROPIC_API_KEY";
28
30
  case "deepseek":
@@ -36,15 +38,15 @@ export function llm_provider_get_api_key_env(llm_provider) {
36
38
  case "openrouter":
37
39
  return "OPENROUTER_API_KEY";
38
40
  default:
39
- abort_with_error("Unknown LLM provider");
41
+ abort_with_error("Unknown LLM API");
40
42
  }
41
43
  }
42
- export function llm_provider_get_api_key(llm_provider) {
43
- const env = llm_provider_get_api_key_env(llm_provider);
44
+ export function llm_api_get_api_key(llm_api_code) {
45
+ const env = llm_api_get_api_key_env(llm_api_code);
44
46
  return env_get(env);
45
47
  }
46
- export function llm_provider_get_ai_sdk_language_model({ llm_model_code, llm_provider, llm_api_key, }) {
47
- switch (llm_provider) {
48
+ export function llm_api_get_ai_sdk_language_model({ llm_model_code, llm_api_code, llm_api_key, }) {
49
+ switch (llm_api_code) {
48
50
  case "anthropic":
49
51
  return createAnthropic({ apiKey: llm_api_key })(llm_model_code);
50
52
  case "deepseek":
@@ -55,9 +57,23 @@ export function llm_provider_get_ai_sdk_language_model({ llm_model_code, llm_pro
55
57
  return createOpenAI({ apiKey: llm_api_key })(llm_model_code);
56
58
  case "requesty":
57
59
  return createRequesty({ apiKey: llm_api_key })(llm_model_code);
58
- case "openrouter":
59
- return createOpenRouter({ apiKey: llm_api_key })(llm_model_code);
60
+ case "openrouter": {
61
+ const openrouter = createOpenRouter({ apiKey: llm_api_key });
62
+ if (llm_model_code.includes(AT_SIGN)) {
63
+ const splits = llm_model_code.split(AT_SIGN);
64
+ const model_id = assert_type_string(splits[0]);
65
+ const provider = assert_type_string(splits[1]);
66
+ return openrouter(model_id, {
67
+ extraBody: {
68
+ provider: {
69
+ only: [provider],
70
+ },
71
+ },
72
+ });
73
+ }
74
+ return openrouter(llm_model_code);
75
+ }
60
76
  default:
61
- abort_with_error("Unknown LLM provider");
77
+ abort_with_error("Unknown LLM API");
62
78
  }
63
79
  }