@johnowennixon/diffdash 1.9.0 → 1.10.0

This diff represents the content of publicly available package versions that have been released to one of the supported registries. The information contained in this diff is provided for informational purposes only and reflects changes between package versions as they appear in their respective public registries.
package/README.md CHANGED
@@ -131,12 +131,13 @@ All command-line arguments are optional.
131
131
  | `--disable-push` | disable pushing changes - exit after making the commit |
132
132
  | `--add-prefix PREFIX` | add a prefix to the commit message summary line |
133
133
  | `--add-suffix SUFFIX` | add a suffix to the commit message summary line |
134
- | `--no-verify` | bypass git hooks when committing or pushing to Git |
135
- | `--force` | apply force when pushing to Git |
136
134
  | `--llm-list` | display a list of available Large Language Models and exit |
137
135
  | `--llm-compare` | compare the generated messages from all models - but do not commit |
138
136
  | `--llm-model MODEL` | choose the LLM model by name (the default is normally best) |
139
137
  | `--llm-excludes MODELS` | models to exclude from comparison (comma separated) |
138
+ | `--no-secret-check` | bypass checking for secrets in diffs |
139
+ | `--no-verify` | bypass git hooks when committing or pushing to Git |
140
+ | `--force` | apply force when pushing to Git |
140
141
  | `--just-output` | just output the commit message for use in scripts |
141
142
  | `--silent` | suppress all normal output - errors and aborts still display |
142
143
  | `--debug-llm-prompts` | show prompts sent to the LLM |
@@ -153,6 +154,8 @@ Files containing secrets should not be in Git. But if they are, you can add an e
153
154
  .env -diff
154
155
  ```
155
156
 
157
+ There is a rudimentary check for secrets in diffs before submitting to the LLM. If any are found, there is an interactive option to ignore. If you want to bypass this check, you can use the `--no-secret-check` flag.
158
+
156
159
  ## Development
157
160
 
158
161
  To install on your laptop:
package/dist/package.json CHANGED
@@ -1,12 +1,12 @@
1
1
  {
2
2
  "name": "@johnowennixon/diffdash",
3
- "version": "1.9.0",
3
+ "version": "1.10.0",
4
4
  "description": "A command-line tool to generate Git commit messages using AI",
5
5
  "license": "0BSD",
6
6
  "author": "John Owen Nixon",
7
7
  "repository": {
8
8
  "type": "git",
9
- "url": "https://github.com/johnowennixon/diffdash.git"
9
+ "url": "git+https://github.com/johnowennixon/diffdash.git"
10
10
  },
11
11
  "engines": {
12
12
  "node": ">=20"
@@ -42,33 +42,34 @@
42
42
  "test": "run-s -ls lint build"
43
43
  },
44
44
  "dependencies": {
45
- "@ai-sdk/anthropic": "2.0.9",
46
- "@ai-sdk/deepseek": "1.0.13",
47
- "@ai-sdk/google": "2.0.11",
48
- "@ai-sdk/openai": "2.0.23",
49
- "@inquirer/prompts": "7.8.4",
50
- "@openrouter/ai-sdk-provider": "1.1.2",
51
- "ai": "5.0.29",
52
- "ansis": "4.1.0",
45
+ "@ai-sdk/anthropic": "2.0.23",
46
+ "@ai-sdk/deepseek": "1.0.20",
47
+ "@ai-sdk/google": "2.0.17",
48
+ "@ai-sdk/openai": "2.0.42",
49
+ "@inquirer/prompts": "7.8.6",
50
+ "@openrouter/ai-sdk-provider": "1.2.0",
51
+ "ai": "5.0.60",
52
+ "ansis": "4.2.0",
53
53
  "argparse": "2.0.1",
54
54
  "cli-table3": "0.6.5",
55
55
  "json5": "2.2.3",
56
+ "magic-regexp": "0.10.0",
56
57
  "simple-git": "3.28.0",
57
- "zod": "4.1.5"
58
+ "zod": "4.1.11"
58
59
  },
59
60
  "devDependencies": {
60
- "@biomejs/biome": "2.2.2",
61
- "@candide/tsgolint": "1.3.0",
61
+ "@biomejs/biome": "2.2.5",
62
+ "@candide/tsgolint": "1.4.0",
62
63
  "@johnowennixon/add-shebangs": "1.1.0",
63
64
  "@johnowennixon/chmodx": "2.1.0",
64
65
  "@types/argparse": "2.0.17",
65
- "@types/node": "24.3.0",
66
- "@typescript/native-preview": "7.0.0-dev.20250902.1",
67
- "knip": "5.63.0",
66
+ "@types/node": "24.5.2",
67
+ "@typescript/native-preview": "7.0.0-dev.20250925.1",
68
+ "knip": "5.63.1",
68
69
  "markdownlint-cli2": "0.18.1",
69
70
  "npm-run-all2": "8.0.4",
70
- "oxlint": "1.14.0",
71
+ "oxlint": "1.19.0",
71
72
  "rimraf": "6.0.1",
72
- "typescript": "5.9.2"
73
+ "typescript": "5.9.3"
73
74
  }
74
75
  }
@@ -8,3 +8,4 @@ export const DIGIT_6 = "6";
8
8
  export const DIGIT_7 = "7";
9
9
  export const DIGIT_8 = "8";
10
10
  export const DIGIT_9 = "9";
11
+ export const DIGITS = "0123456789";
@@ -17,12 +17,6 @@ export function cli_integer_always(options = {}) {
17
17
  export function cli_boolean(options = {}) {
18
18
  return { kind: "boolean", options, value: false };
19
19
  }
20
- export function cli_command_sync(command_sync, options = {}) {
21
- return { kind: "boolean", options, value: false, command_sync };
22
- }
23
- export function cli_command_async(command_async, options = {}) {
24
- return { kind: "boolean", options, value: false, command_async };
25
- }
26
20
  export function cli_choice_optional(options = {}) {
27
21
  return { kind: "choice", options, value: undefined };
28
22
  }
@@ -126,50 +120,6 @@ function cli_recursive_parse({ schema, namespace, predicate, }) {
126
120
  }
127
121
  return result;
128
122
  }
129
- function cli_recursive_despatch_sync({ schema, namespace, parsed_args, }) {
130
- for (const key in schema) {
131
- if (!Object.hasOwn(schema, key)) {
132
- continue;
133
- }
134
- const cli = schema[key];
135
- if (!cli) {
136
- continue;
137
- }
138
- if (cli.kind === "meg") {
139
- const nested_schema = cli.value;
140
- cli_recursive_despatch_sync({ schema: nested_schema, namespace, parsed_args });
141
- }
142
- else if (cli.kind === "boolean") {
143
- if (namespace[key]) {
144
- if (cli.command_sync) {
145
- cli.command_sync(parsed_args);
146
- }
147
- }
148
- }
149
- }
150
- }
151
- async function cli_recursive_despatch_async({ schema, namespace, parsed_args, }) {
152
- for (const key in schema) {
153
- if (!Object.hasOwn(schema, key)) {
154
- continue;
155
- }
156
- const cli = schema[key];
157
- if (!cli) {
158
- continue;
159
- }
160
- if (cli.kind === "meg") {
161
- const nested_schema = cli.value;
162
- await cli_recursive_despatch_async({ schema: nested_schema, namespace, parsed_args });
163
- }
164
- else if (cli.kind === "boolean") {
165
- if (namespace[key]) {
166
- if (cli.command_async) {
167
- await cli.command_async(parsed_args);
168
- }
169
- }
170
- }
171
- }
172
- }
173
123
  export function cli_make_parser({ cli_schema, description, }) {
174
124
  const argument_parser_options = { description, allow_abbrev: false };
175
125
  const parser = new ArgumentParser(argument_parser_options);
@@ -177,11 +127,5 @@ export function cli_make_parser({ cli_schema, description, }) {
177
127
  const namespace = parser.parse_args();
178
128
  debug_inspect_when(debug_channels.cli, namespace, "namespace");
179
129
  const parsed_args = cli_recursive_parse({ schema: cli_schema, namespace });
180
- function despatch_sync() {
181
- cli_recursive_despatch_sync({ schema: cli_schema, namespace, parsed_args });
182
- }
183
- async function despatch_async() {
184
- await cli_recursive_despatch_async({ schema: cli_schema, namespace, parsed_args });
185
- }
186
- return { parsed_args, despatch_sync, despatch_async };
130
+ return { parsed_args };
187
131
  }
@@ -10,8 +10,6 @@ const diffdash_cli_schema = {
10
10
  disable_preview: cli_boolean({ help: "disable previewing the generated message" }),
11
11
  disable_commit: cli_boolean({ help: "disable committing changes - exit after generating the message" }),
12
12
  disable_push: cli_boolean({ help: "disable pushing changes - exit after making the commit" }),
13
- no_verify: cli_boolean({ help: "bypass git hooks when committing or pushing to Git" }),
14
- force: cli_boolean({ help: "apply force when pushing to Git" }),
15
13
  add_prefix: cli_string({ help: "add a prefix to the commit message summary line", metavar: "PREFIX" }),
16
14
  add_suffix: cli_string({ help: "add a suffix to the commit message summary line", metavar: "SUFFIX" }),
17
15
  llm_list: cli_boolean({ help: "display a list of available Large Language Models and exit" }),
@@ -22,6 +20,9 @@ const diffdash_cli_schema = {
22
20
  default: diffdash_llm_model_default,
23
21
  }),
24
22
  llm_excludes: cli_string({ help: "models to exclude from comparison (comma separated)", metavar: "MODELS" }),
23
+ no_secret_check: cli_boolean({ help: "bypass checking for secrets in diffs" }),
24
+ no_verify: cli_boolean({ help: "bypass git hooks when committing or pushing to Git" }),
25
+ force: cli_boolean({ help: "apply force when pushing to Git" }),
25
26
  just_output: cli_boolean({ help: "just output the commit message for use in scripts" }),
26
27
  silent: cli_boolean({ help: "suppress all normal output - errors and aborts still display" }),
27
28
  debug_llm_prompts: cli_boolean({ help: "debug prompts sent to the LLM" }),
@@ -33,7 +33,7 @@ function diffdash_config_file_read(config) {
33
33
  }
34
34
  }
35
35
  export function diffdash_config_get() {
36
- const { version, auto_add, auto_commit, auto_push, disable_add, disable_commit, disable_preview, disable_status, disable_push, no_verify, force, add_prefix, add_suffix, llm_list, llm_compare, llm_model, llm_excludes, just_output, silent, debug_llm_prompts, debug_llm_inputs, debug_llm_outputs, } = diffdash_cli_parsed_args;
36
+ const { version, auto_add, auto_commit, auto_push, disable_add, disable_commit, disable_preview, disable_status, disable_push, add_prefix, add_suffix, llm_list, llm_compare, llm_model, llm_excludes, no_secret_check, no_verify, force, just_output, silent, debug_llm_prompts, debug_llm_inputs, debug_llm_outputs, } = diffdash_cli_parsed_args;
37
37
  if (version) {
38
38
  tell_plain(`${PACKAGE_NAME} v${PACKAGE_VERSION}`);
39
39
  process.exit(0);
@@ -58,6 +58,7 @@ export function diffdash_config_get() {
58
58
  disable_push,
59
59
  add_prefix,
60
60
  add_suffix,
61
+ no_secret_check,
61
62
  no_verify,
62
63
  force,
63
64
  llm_compare,
@@ -2,13 +2,14 @@ import { abort_with_error, abort_with_warning } from "./lib_abort.js";
2
2
  import { ansi_blue } from "./lib_ansi.js";
3
3
  import { debug_channels, debug_inspect } from "./lib_debug.js";
4
4
  import { diffdash_add_footer, diffdash_add_prefix_or_suffix } from "./lib_diffdash_add.js";
5
- import { error_get_text } from "./lib_error.js";
5
+ import { error_get_message, error_get_text } from "./lib_error.js";
6
6
  import { git_message_display } from "./lib_git_message_display.js";
7
7
  import { git_message_generate_result } from "./lib_git_message_generate.js";
8
8
  import { git_message_validate_check, git_message_validate_get_result } from "./lib_git_message_validate.js";
9
9
  import { git_simple_open_check_not_bare, git_simple_open_git_repo } from "./lib_git_simple_open.js";
10
10
  import { git_simple_staging_create_commit, git_simple_staging_get_staged_diff, git_simple_staging_get_staged_diffstat, git_simple_staging_has_staged_changes, git_simple_staging_has_unstaged_changes, git_simple_staging_push_to_remote, git_simple_staging_stage_all_changes, } from "./lib_git_simple_staging.js";
11
11
  import { llm_results_summary } from "./lib_llm_results.js";
12
+ import { secret_check } from "./lib_secret_check.js";
12
13
  import { stdio_write_stdout, stdio_write_stdout_linefeed } from "./lib_stdio_write.js";
13
14
  import { tell_action, tell_info, tell_plain, tell_success, tell_warning } from "./lib_tell.js";
14
15
  import { tui_confirm } from "./lib_tui_confirm.js";
@@ -92,13 +93,21 @@ async function phase_status({ config, git }) {
92
93
  }
93
94
  async function phase_compare({ config, git }) {
94
95
  const { silent } = config;
95
- if (!silent) {
96
- tell_action("Generating Git commit messages using all models in parallel");
97
- }
98
- const { all_llm_configs, add_prefix, add_suffix, extra_prompts } = config;
96
+ const { all_llm_configs, add_prefix, add_suffix, no_secret_check, extra_prompts } = config;
99
97
  const diffstat = await git_simple_staging_get_staged_diffstat(git);
100
98
  const diff = await git_simple_staging_get_staged_diff(git);
99
+ if (!no_secret_check) {
100
+ try {
101
+ await secret_check({ text: diff, interactive: true });
102
+ }
103
+ catch (error) {
104
+ abort_with_error(`Aborting: ${error_get_message(error)}`);
105
+ }
106
+ }
101
107
  const inputs = { diffstat, diff, extra_prompts };
108
+ if (!silent) {
109
+ tell_action("Generating Git commit messages using all models in parallel");
110
+ }
102
111
  const result_promises = all_llm_configs.map((llm_config) => git_message_generate_result({ llm_config, inputs }));
103
112
  const all_results = await Promise.all(result_promises);
104
113
  for (const result of all_results) {
@@ -120,14 +129,22 @@ async function phase_compare({ config, git }) {
120
129
  llm_results_summary(all_results);
121
130
  }
122
131
  async function phase_generate({ config, git }) {
123
- const { disable_preview, add_prefix, add_suffix, llm_config, just_output, silent, extra_prompts } = config;
132
+ const { disable_preview, add_prefix, add_suffix, llm_config, no_secret_check, just_output, silent, extra_prompts } = config;
124
133
  const { llm_model_name } = llm_config;
125
- if (!silent && !just_output) {
126
- tell_action(`Generating the Git commit message using ${llm_model_name}`);
127
- }
128
134
  const diffstat = await git_simple_staging_get_staged_diffstat(git);
129
135
  const diff = await git_simple_staging_get_staged_diff(git);
136
+ if (!no_secret_check) {
137
+ try {
138
+ await secret_check({ text: diff, interactive: true });
139
+ }
140
+ catch (error) {
141
+ abort_with_error(`Aborting: ${error_get_message(error)}`);
142
+ }
143
+ }
130
144
  const inputs = { diffstat, diff, extra_prompts };
145
+ if (!silent && !just_output) {
146
+ tell_action(`Generating the Git commit message using ${llm_model_name}`);
147
+ }
131
148
  const result = await git_message_generate_result({ llm_config, inputs });
132
149
  const { error_text } = result;
133
150
  if (error_text !== null) {
@@ -4,22 +4,23 @@ import { git_message_prompt_get_system, git_message_prompt_get_user } from "./li
4
4
  import { git_message_schema, git_message_schema_format } from "./lib_git_message_schema.js";
5
5
  import { llm_chat_generate_object, llm_chat_generate_text } from "./lib_llm_chat.js";
6
6
  import { llm_tokens_debug_usage, llm_tokens_estimate_length_from_tokens, llm_tokens_estimate_tokens_from_length, } from "./lib_llm_tokens.js";
7
- async function git_message_generate_unstructured({ llm_config, system_prompt, user_prompt, }) {
8
- const outputs = await llm_chat_generate_text({ llm_config, system_prompt, user_prompt });
7
+ async function git_message_generate_unstructured({ llm_config, system_prompt, user_prompt, max_output_tokens, }) {
8
+ const outputs = await llm_chat_generate_text({ llm_config, system_prompt, user_prompt, max_output_tokens });
9
9
  return outputs;
10
10
  }
11
- async function git_message_generate_structured({ llm_config, system_prompt, user_prompt, }) {
11
+ async function git_message_generate_structured({ llm_config, system_prompt, user_prompt, max_output_tokens, }) {
12
12
  const schema = git_message_schema;
13
13
  const { generated_object, total_usage, provider_metadata } = await llm_chat_generate_object({
14
14
  llm_config,
15
15
  system_prompt,
16
16
  user_prompt,
17
+ max_output_tokens,
17
18
  schema,
18
19
  });
19
20
  const generated_text = git_message_schema_format(generated_object);
20
21
  return { generated_text, reasoning_text: undefined, total_usage, provider_metadata };
21
22
  }
22
- export async function git_message_generate_string({ llm_config, inputs, }) {
23
+ async function git_message_generate_outputs({ llm_config, inputs, }) {
23
24
  const { effective_context_window } = llm_config;
24
25
  const { has_structured_json } = llm_config.llm_model_detail;
25
26
  const system_prompt = git_message_prompt_get_system({ has_structured_json, inputs });
@@ -30,10 +31,11 @@ export async function git_message_generate_string({ llm_config, inputs, }) {
30
31
  inputs,
31
32
  max_length: user_length,
32
33
  });
34
+ const max_output_tokens = 8192; // This is the maximum for some models
33
35
  llm_tokens_debug_usage({ name: "Inputs", llm_config, text: system_prompt + user_prompt });
34
36
  const outputs = has_structured_json
35
- ? await git_message_generate_structured({ llm_config, system_prompt, user_prompt })
36
- : await git_message_generate_unstructured({ llm_config, system_prompt, user_prompt });
37
+ ? await git_message_generate_structured({ llm_config, system_prompt, user_prompt, max_output_tokens })
38
+ : await git_message_generate_unstructured({ llm_config, system_prompt, user_prompt, max_output_tokens });
37
39
  llm_tokens_debug_usage({ name: "Outputs", llm_config, text: outputs.generated_text });
38
40
  return outputs;
39
41
  }
@@ -41,7 +43,7 @@ export async function git_message_generate_result({ llm_config, inputs, }) {
41
43
  const duration = new Duration();
42
44
  duration.start();
43
45
  try {
44
- const outputs = await git_message_generate_string({ llm_config, inputs });
46
+ const outputs = await git_message_generate_outputs({ llm_config, inputs });
45
47
  duration.stop();
46
48
  const seconds = duration.seconds_rounded();
47
49
  return { llm_config, seconds, error_text: null, outputs };
@@ -1,5 +1,6 @@
1
1
  import { LF } from "./lib_char_control.js";
2
2
  import { EMPTY } from "./lib_char_empty.js";
3
+ import { tell_warning } from "./lib_tell.js";
3
4
  const LF_LF = LF + LF;
4
5
  const portion_role = `
5
6
  Your role is to generate a Git commit message in conversational English.
@@ -65,6 +66,9 @@ export function git_message_prompt_get_system({ has_structured_json, inputs, })
65
66
  export function git_message_prompt_get_user({ has_structured_json, inputs, max_length, }) {
66
67
  const { diffstat, diff } = inputs;
67
68
  const truncate = diffstat.length + diff.length > max_length;
69
+ if (truncate) {
70
+ tell_warning("The Diff is too long to fit in the user prompt - it is being truncated");
71
+ }
68
72
  const diff_truncated = truncate ? diff.slice(0, max_length - diffstat.length) + LF : diff;
69
73
  let user_prompt = EMPTY;
70
74
  user_prompt += "<diffstat>" + LF + diffstat + "</diffstat>" + LF_LF;
@@ -10,7 +10,7 @@ import { tui_block_string } from "./lib_tui_block.js";
10
10
  function llm_chat_get_parameters() {
11
11
  return {
12
12
  max_output_tokens: parse_int_or_undefined(env_get_empty("lib_llm_chat_max_output_tokens")),
13
- timeout: parse_int(env_get_substitute("lib_llm_chat_timeout", "60")),
13
+ timeout: parse_int(env_get_substitute("lib_llm_chat_timeout", "90")),
14
14
  };
15
15
  }
16
16
  function llm_chat_debug_prompts({ llm_model_name, system_prompt, user_prompt, }) {
@@ -22,7 +22,7 @@ function llm_chat_debug_prompts({ llm_model_name, system_prompt, user_prompt, })
22
22
  tui_block_string({ teller, title: `LLM user prompt (for ${llm_model_name}):`, content: user_prompt });
23
23
  }
24
24
  }
25
- export async function llm_chat_generate_text({ llm_config, system_prompt, user_prompt, tools, max_steps, min_steps, }) {
25
+ export async function llm_chat_generate_text({ llm_config, system_prompt, user_prompt, max_output_tokens, tools, max_steps, min_steps, }) {
26
26
  const { llm_model_name, llm_model_detail, llm_model_code, llm_api_code, llm_api_key } = llm_config;
27
27
  llm_chat_debug_prompts({ system_prompt, user_prompt, llm_model_name });
28
28
  const ai_sdk_language_model = llm_api_get_ai_sdk_language_model({
@@ -30,9 +30,14 @@ export async function llm_chat_generate_text({ llm_config, system_prompt, user_p
30
30
  llm_api_code,
31
31
  llm_api_key,
32
32
  });
33
- const { recommended_temperature, provider_options } = llm_model_detail;
33
+ const { recommended_temperature, provider_options, max_output_tokens: max_output_tokens_model } = llm_model_detail;
34
34
  const temperature = recommended_temperature;
35
- const { max_output_tokens, timeout } = llm_chat_get_parameters();
35
+ const { timeout, max_output_tokens: max_output_tokens_env } = llm_chat_get_parameters();
36
+ if (max_output_tokens_env !== undefined) {
37
+ max_output_tokens = max_output_tokens_env;
38
+ }
39
+ max_output_tokens =
40
+ max_output_tokens === undefined ? max_output_tokens_model : Math.min(max_output_tokens, max_output_tokens_model);
36
41
  const llm_inputs = {
37
42
  model: ai_sdk_language_model,
38
43
  system: system_prompt,
@@ -57,11 +62,11 @@ export async function llm_chat_generate_text({ llm_config, system_prompt, user_p
57
62
  const { text: generated_text, reasoningText: reasoning_text, totalUsage: total_usage, providerMetadata: provider_metadata, } = llm_outputs;
58
63
  return { generated_text, reasoning_text, total_usage, provider_metadata };
59
64
  }
60
- export async function llm_chat_generate_text_result({ llm_config, system_prompt, user_prompt, }) {
65
+ export async function llm_chat_generate_text_result({ llm_config, system_prompt, user_prompt, max_output_tokens, }) {
61
66
  const duration = new Duration();
62
67
  duration.start();
63
68
  try {
64
- const outputs = await llm_chat_generate_text({ llm_config, system_prompt, user_prompt });
69
+ const outputs = await llm_chat_generate_text({ llm_config, system_prompt, user_prompt, max_output_tokens });
65
70
  duration.stop();
66
71
  const seconds = duration.seconds_rounded();
67
72
  return { llm_config, seconds, error_text: null, outputs };
@@ -73,7 +78,7 @@ export async function llm_chat_generate_text_result({ llm_config, system_prompt,
73
78
  return { llm_config, seconds, error_text, outputs: null };
74
79
  }
75
80
  }
76
- export async function llm_chat_generate_object({ llm_config, user_prompt, system_prompt, schema, }) {
81
+ export async function llm_chat_generate_object({ llm_config, user_prompt, system_prompt, max_output_tokens, schema, }) {
77
82
  const { llm_model_name, llm_model_detail, llm_model_code, llm_api_code, llm_api_key } = llm_config;
78
83
  llm_chat_debug_prompts({ system_prompt, user_prompt, llm_model_name });
79
84
  const ai_sdk_language_model = llm_api_get_ai_sdk_language_model({
@@ -83,14 +88,14 @@ export async function llm_chat_generate_object({ llm_config, user_prompt, system
83
88
  });
84
89
  const { recommended_temperature, provider_options } = llm_model_detail;
85
90
  const temperature = recommended_temperature;
86
- const { max_output_tokens, timeout } = llm_chat_get_parameters();
91
+ const { timeout, max_output_tokens: max_output_tokens_env } = llm_chat_get_parameters();
87
92
  const llm_inputs = {
88
93
  model: ai_sdk_language_model,
89
94
  system: system_prompt,
90
95
  prompt: user_prompt,
91
96
  output: "object",
92
97
  schema,
93
- maxOutputTokens: max_output_tokens,
98
+ maxOutputTokens: max_output_tokens_env ?? max_output_tokens,
94
99
  temperature,
95
100
  providerOptions: provider_options,
96
101
  abortSignal: AbortSignal.timeout(timeout * 1000),
@@ -37,6 +37,7 @@ export const LLM_MODEL_DETAILS = [
37
37
  llm_model_code: "claude-3-5-haiku-latest",
38
38
  llm_api_code: "anthropic",
39
39
  context_window: 200_000,
40
+ max_output_tokens: 8192,
40
41
  cents_input: 80,
41
42
  cents_output: 400,
42
43
  default_reasoning: false,
@@ -49,6 +50,7 @@ export const LLM_MODEL_DETAILS = [
49
50
  llm_model_code: "claude-3-7-sonnet-latest",
50
51
  llm_api_code: "anthropic",
51
52
  context_window: 200_000,
53
+ max_output_tokens: 64_000,
52
54
  cents_input: 300,
53
55
  cents_output: 1500,
54
56
  default_reasoning: false,
@@ -61,6 +63,7 @@ export const LLM_MODEL_DETAILS = [
61
63
  llm_model_code: "claude-sonnet-4-0",
62
64
  llm_api_code: "anthropic",
63
65
  context_window: 200_000,
66
+ max_output_tokens: 64_000,
64
67
  cents_input: 300,
65
68
  cents_output: 1500,
66
69
  default_reasoning: false,
@@ -73,6 +76,7 @@ export const LLM_MODEL_DETAILS = [
73
76
  llm_model_code: "claude-sonnet-4-0",
74
77
  llm_api_code: "anthropic",
75
78
  context_window: 200_000,
79
+ max_output_tokens: 62_976, // = 64000 - 1024 used for reasoning
76
80
  cents_input: 300,
77
81
  cents_output: 1500,
78
82
  default_reasoning: true,
@@ -80,11 +84,38 @@ export const LLM_MODEL_DETAILS = [
80
84
  recommended_temperature: undefined,
81
85
  provider_options: provider_options_anthropic({ thinking: true }),
82
86
  },
87
+ {
88
+ llm_model_name: "claude-sonnet-4.5",
89
+ llm_model_code: "claude-sonnet-4-5",
90
+ llm_api_code: "anthropic",
91
+ context_window: 1_000_000,
92
+ max_output_tokens: 64_000,
93
+ cents_input: 300, // for input tokens <= 200K
94
+ cents_output: 1500, // for input tokens <= 200K
95
+ default_reasoning: false,
96
+ has_structured_json: true,
97
+ recommended_temperature: undefined,
98
+ provider_options: provider_options_anthropic({ thinking: false }),
99
+ },
100
+ {
101
+ llm_model_name: "claude-sonnet-4.5-thinking",
102
+ llm_model_code: "claude-sonnet-4-5",
103
+ llm_api_code: "anthropic",
104
+ context_window: 1_000_000,
105
+ max_output_tokens: 62_976, // = 64000 - 1024 used for reasoning
106
+ cents_input: 300, // for input tokens <= 200K
107
+ cents_output: 1500, // for input tokens <= 200K
108
+ default_reasoning: false,
109
+ has_structured_json: true,
110
+ recommended_temperature: undefined,
111
+ provider_options: provider_options_anthropic({ thinking: true }),
112
+ },
83
113
  {
84
114
  llm_model_name: "codestral-2508",
85
115
  llm_model_code: "mistralai/codestral-2508",
86
116
  llm_api_code: "openrouter",
87
117
  context_window: 256_000,
118
+ max_output_tokens: 256_000,
88
119
  cents_input: 30,
89
120
  cents_output: 90,
90
121
  default_reasoning: false,
@@ -97,6 +128,7 @@ export const LLM_MODEL_DETAILS = [
97
128
  llm_model_code: "deepseek-chat",
98
129
  llm_api_code: "deepseek",
99
130
  context_window: 128_000,
131
+ max_output_tokens: 8192,
100
132
  cents_input: 56,
101
133
  cents_output: 168,
102
134
  default_reasoning: false,
@@ -109,6 +141,7 @@ export const LLM_MODEL_DETAILS = [
109
141
  llm_model_code: "deepseek-reasoner",
110
142
  llm_api_code: "deepseek",
111
143
  context_window: 128_000,
144
+ max_output_tokens: 65_536,
112
145
  cents_input: 56,
113
146
  cents_output: 168,
114
147
  default_reasoning: true,
@@ -121,6 +154,7 @@ export const LLM_MODEL_DETAILS = [
121
154
  llm_model_code: "mistralai/devstral-medium",
122
155
  llm_api_code: "openrouter",
123
156
  context_window: 128_000,
157
+ max_output_tokens: 128_000,
124
158
  cents_input: 40,
125
159
  cents_output: 200,
126
160
  default_reasoning: false,
@@ -133,6 +167,7 @@ export const LLM_MODEL_DETAILS = [
133
167
  llm_model_code: "mistralai/devstral-small",
134
168
  llm_api_code: "openrouter",
135
169
  context_window: 128_000,
170
+ max_output_tokens: 128_000,
136
171
  cents_input: 10,
137
172
  cents_output: 30,
138
173
  default_reasoning: false,
@@ -145,6 +180,7 @@ export const LLM_MODEL_DETAILS = [
145
180
  llm_model_code: "gemini-2.0-flash",
146
181
  llm_api_code: "google",
147
182
  context_window: 1_048_576,
183
+ max_output_tokens: 8192,
148
184
  cents_input: 10,
149
185
  cents_output: 40,
150
186
  default_reasoning: false,
@@ -157,6 +193,7 @@ export const LLM_MODEL_DETAILS = [
157
193
  llm_model_code: "gemini-2.5-flash",
158
194
  llm_api_code: "google",
159
195
  context_window: 1_048_576,
196
+ max_output_tokens: 65_536,
160
197
  cents_input: 30,
161
198
  cents_output: 250,
162
199
  default_reasoning: false,
@@ -169,6 +206,7 @@ export const LLM_MODEL_DETAILS = [
169
206
  llm_model_code: "gemini-2.5-pro",
170
207
  llm_api_code: "google",
171
208
  context_window: 1_048_576,
209
+ max_output_tokens: 65_536,
172
210
  cents_input: 125,
173
211
  cents_output: 1000,
174
212
  default_reasoning: false,
@@ -181,6 +219,7 @@ export const LLM_MODEL_DETAILS = [
181
219
  llm_model_code: "z-ai/glm-4-32b",
182
220
  llm_api_code: "openrouter",
183
221
  context_window: 128_000,
222
+ max_output_tokens: 128_000,
184
223
  cents_input: 10,
185
224
  cents_output: 10,
186
225
  default_reasoning: false,
@@ -193,30 +232,46 @@ export const LLM_MODEL_DETAILS = [
193
232
  llm_model_code: "z-ai/glm-4.5",
194
233
  llm_api_code: "openrouter",
195
234
  context_window: 128_000,
235
+ max_output_tokens: 96_000,
196
236
  cents_input: 60,
197
237
  cents_output: 220,
198
238
  default_reasoning: true,
199
239
  has_structured_json: false,
200
240
  recommended_temperature: undefined,
201
- provider_options: provider_options_openrouter({ only: "z-ai/fp8" }),
241
+ provider_options: provider_options_openrouter({ only: "z-ai" }),
202
242
  },
203
243
  {
204
244
  llm_model_name: "glm-4.5-air@z-ai",
205
245
  llm_model_code: "z-ai/glm-4.5-air",
206
246
  llm_api_code: "openrouter",
207
247
  context_window: 128_000,
248
+ max_output_tokens: 96_000,
208
249
  cents_input: 20,
209
250
  cents_output: 110,
210
251
  default_reasoning: true,
211
252
  has_structured_json: false,
212
253
  recommended_temperature: undefined,
213
- provider_options: provider_options_openrouter({ only: "z-ai/fp8" }),
254
+ provider_options: provider_options_openrouter({ only: "z-ai" }),
255
+ },
256
+ {
257
+ llm_model_name: "glm-4.6@z-ai",
258
+ llm_model_code: "z-ai/glm-4.6",
259
+ llm_api_code: "openrouter",
260
+ context_window: 128_000,
261
+ max_output_tokens: 96_000,
262
+ cents_input: 60,
263
+ cents_output: 220,
264
+ default_reasoning: true,
265
+ has_structured_json: false,
266
+ recommended_temperature: undefined,
267
+ provider_options: provider_options_openrouter({ only: "z-ai" }),
214
268
  },
215
269
  {
216
270
  llm_model_name: "gpt-4.1",
217
271
  llm_model_code: "gpt-4.1",
218
272
  llm_api_code: "openai",
219
273
  context_window: context_window_openai({ tier1: 200_000, unrestricted: 1_000_000 }),
274
+ max_output_tokens: 32_768,
220
275
  cents_input: 200,
221
276
  cents_output: 800,
222
277
  default_reasoning: false,
@@ -229,6 +284,7 @@ export const LLM_MODEL_DETAILS = [
229
284
  llm_model_code: "gpt-4.1-mini",
230
285
  llm_api_code: "openai",
231
286
  context_window: context_window_openai({ tier1: 400_000, unrestricted: 1_000_000 }),
287
+ max_output_tokens: 32_768,
232
288
  cents_input: 40,
233
289
  cents_output: 160,
234
290
  default_reasoning: false,
@@ -241,6 +297,7 @@ export const LLM_MODEL_DETAILS = [
241
297
  llm_model_code: "gpt-4.1-nano",
242
298
  llm_api_code: "openai",
243
299
  context_window: context_window_openai({ tier1: 400_000, unrestricted: 1_000_000 }),
300
+ max_output_tokens: 32_768,
244
301
  cents_input: 10,
245
302
  cents_output: 40,
246
303
  default_reasoning: false,
@@ -253,6 +310,7 @@ export const LLM_MODEL_DETAILS = [
253
310
  llm_model_code: "gpt-5",
254
311
  llm_api_code: "openai",
255
312
  context_window: context_window_openai({ tier1: 30_000, unrestricted: 272_000 }),
313
+ max_output_tokens: 128_000,
256
314
  cents_input: 125,
257
315
  cents_output: 1000,
258
316
  default_reasoning: true,
@@ -265,6 +323,7 @@ export const LLM_MODEL_DETAILS = [
265
323
  llm_model_code: "gpt-5",
266
324
  llm_api_code: "openai",
267
325
  context_window: context_window_openai({ tier1: 30_000, unrestricted: 272_000 }),
326
+ max_output_tokens: 128_000,
268
327
  cents_input: 125,
269
328
  cents_output: 1000,
270
329
  default_reasoning: false,
@@ -277,6 +336,7 @@ export const LLM_MODEL_DETAILS = [
277
336
  llm_model_code: "gpt-5-mini",
278
337
  llm_api_code: "openai",
279
338
  context_window: context_window_openai({ tier1: 200_000, unrestricted: 272_000 }),
339
+ max_output_tokens: 128_000,
280
340
  cents_input: 25,
281
341
  cents_output: 200,
282
342
  default_reasoning: true,
@@ -289,6 +349,7 @@ export const LLM_MODEL_DETAILS = [
289
349
  llm_model_code: "gpt-5-mini",
290
350
  llm_api_code: "openai",
291
351
  context_window: context_window_openai({ tier1: 200_000, unrestricted: 272_000 }),
352
+ max_output_tokens: 128_000,
292
353
  cents_input: 25,
293
354
  cents_output: 200,
294
355
  default_reasoning: true,
@@ -301,6 +362,7 @@ export const LLM_MODEL_DETAILS = [
301
362
  llm_model_code: "gpt-5-mini",
302
363
  llm_api_code: "openai",
303
364
  context_window: context_window_openai({ tier1: 200_000, unrestricted: 272_000 }),
365
+ max_output_tokens: 128_000,
304
366
  cents_input: 25,
305
367
  cents_output: 200,
306
368
  default_reasoning: true,
@@ -313,6 +375,7 @@ export const LLM_MODEL_DETAILS = [
313
375
  llm_model_code: "gpt-5-mini",
314
376
  llm_api_code: "openai",
315
377
  context_window: context_window_openai({ tier1: 200_000, unrestricted: 272_000 }),
378
+ max_output_tokens: 128_000,
316
379
  cents_input: 25,
317
380
  cents_output: 200,
318
381
  default_reasoning: true,
@@ -325,6 +388,7 @@ export const LLM_MODEL_DETAILS = [
325
388
  llm_model_code: "gpt-5-mini",
326
389
  llm_api_code: "openai",
327
390
  context_window: context_window_openai({ tier1: 200_000, unrestricted: 272_000 }),
391
+ max_output_tokens: 128_000,
328
392
  cents_input: 25,
329
393
  cents_output: 200,
330
394
  default_reasoning: false,
@@ -337,6 +401,7 @@ export const LLM_MODEL_DETAILS = [
337
401
  llm_model_code: "gpt-5-nano",
338
402
  llm_api_code: "openai",
339
403
  context_window: context_window_openai({ tier1: 200_000, unrestricted: 272_000 }),
404
+ max_output_tokens: 128_000,
340
405
  cents_input: 5,
341
406
  cents_output: 40,
342
407
  default_reasoning: true,
@@ -349,6 +414,7 @@ export const LLM_MODEL_DETAILS = [
349
414
  llm_model_code: "gpt-5-nano",
350
415
  llm_api_code: "openai",
351
416
  context_window: context_window_openai({ tier1: 200_000, unrestricted: 272_000 }),
417
+ max_output_tokens: 128_000,
352
418
  cents_input: 5,
353
419
  cents_output: 40,
354
420
  default_reasoning: false,
@@ -361,6 +427,7 @@ export const LLM_MODEL_DETAILS = [
361
427
  llm_model_code: "openai/gpt-oss-120b",
362
428
  llm_api_code: "openrouter",
363
429
  context_window: 131_072,
430
+ max_output_tokens: 32_768,
364
431
  cents_input: 25,
365
432
  cents_output: 69,
366
433
  default_reasoning: false,
@@ -373,6 +440,7 @@ export const LLM_MODEL_DETAILS = [
373
440
  llm_model_code: "openai/gpt-oss-120b",
374
441
  llm_api_code: "openrouter",
375
442
  context_window: 131_072,
443
+ max_output_tokens: 65_536,
376
444
  cents_input: 15,
377
445
  cents_output: 75,
378
446
  default_reasoning: false,
@@ -385,6 +453,7 @@ export const LLM_MODEL_DETAILS = [
385
453
  llm_model_code: "x-ai/grok-3",
386
454
  llm_api_code: "openrouter",
387
455
  context_window: 131_072,
456
+ max_output_tokens: 131_072,
388
457
  cents_input: 300,
389
458
  cents_output: 1500,
390
459
  default_reasoning: true,
@@ -397,6 +466,7 @@ export const LLM_MODEL_DETAILS = [
397
466
  llm_model_code: "x-ai/grok-3-mini",
398
467
  llm_api_code: "openrouter",
399
468
  context_window: 131_072,
469
+ max_output_tokens: 131_072,
400
470
  cents_input: 30,
401
471
  cents_output: 50,
402
472
  default_reasoning: true,
@@ -404,23 +474,38 @@ export const LLM_MODEL_DETAILS = [
404
474
  recommended_temperature: undefined,
405
475
  provider_options: undefined,
406
476
  },
407
- // {
408
- // llm_model_name: "grok-4",
409
- // llm_model_code: "x-ai/grok-4", // BYOK required
410
- // llm_api_code: "openrouter",
411
- // context_window: 256_000,
412
- // cents_input: 300,
413
- // cents_output: 1500,
414
- // default_reasoning: true,
415
- // has_structured_json: true,
416
- // recommended_temperature: undefined,
417
- // provider_options: undefined,
418
- // },
477
+ {
478
+ llm_model_name: "grok-4",
479
+ llm_model_code: "x-ai/grok-4",
480
+ llm_api_code: "openrouter",
481
+ context_window: 256_000,
482
+ max_output_tokens: 256_000,
483
+ cents_input: 300,
484
+ cents_output: 1500,
485
+ default_reasoning: true,
486
+ has_structured_json: true,
487
+ recommended_temperature: undefined,
488
+ provider_options: undefined,
489
+ },
490
+ {
491
+ llm_model_name: "grok-4-fast",
492
+ llm_model_code: "x-ai/grok-4-fast",
493
+ llm_api_code: "openrouter",
494
+ context_window: 2_000_000,
495
+ max_output_tokens: 30_000,
496
+ cents_input: 20, // for input tokens <= 128K
497
+ cents_output: 50, // for input tokens <= 128K
498
+ default_reasoning: true,
499
+ has_structured_json: true,
500
+ recommended_temperature: undefined,
501
+ provider_options: undefined,
502
+ },
419
503
  {
420
504
  llm_model_name: "grok-code-fast-1",
421
505
  llm_model_code: "x-ai/grok-code-fast-1",
422
506
  llm_api_code: "openrouter",
423
507
  context_window: 256_000,
508
+ max_output_tokens: 10_000,
424
509
  cents_input: 20,
425
510
  cents_output: 150,
426
511
  default_reasoning: true,
@@ -433,6 +518,7 @@ export const LLM_MODEL_DETAILS = [
433
518
  llm_model_code: "moonshotai/kimi-k2",
434
519
  llm_api_code: "openrouter",
435
520
  context_window: 131_072,
521
+ max_output_tokens: 16_384,
436
522
  cents_input: 100,
437
523
  cents_output: 300,
438
524
  default_reasoning: false,
@@ -445,6 +531,7 @@ export const LLM_MODEL_DETAILS = [
445
531
  llm_model_code: "moonshotai/kimi-k2",
446
532
  llm_api_code: "openrouter",
447
533
  context_window: 131_072,
534
+ max_output_tokens: 131_072,
448
535
  cents_input: 60,
449
536
  cents_output: 250,
450
537
  default_reasoning: false,
@@ -457,6 +544,7 @@ export const LLM_MODEL_DETAILS = [
457
544
  llm_model_code: "moonshotai/kimi-k2-0905",
458
545
  llm_api_code: "openrouter",
459
546
  context_window: 262_144,
547
+ max_output_tokens: 16_384,
460
548
  cents_input: 100,
461
549
  cents_output: 300,
462
550
  default_reasoning: false,
@@ -468,7 +556,8 @@ export const LLM_MODEL_DETAILS = [
468
556
  llm_model_name: "llama-4-maverick@cerebras",
469
557
  llm_model_code: "meta-llama/llama-4-maverick",
470
558
  llm_api_code: "openrouter",
471
- context_window: 32_000,
559
+ context_window: 32_768,
560
+ max_output_tokens: 32_768,
472
561
  cents_input: 20,
473
562
  cents_output: 60,
474
563
  default_reasoning: false,
@@ -481,6 +570,7 @@ export const LLM_MODEL_DETAILS = [
481
570
  llm_model_code: "meta-llama/llama-4-scout",
482
571
  llm_api_code: "openrouter",
483
572
  context_window: 32_000,
573
+ max_output_tokens: 32_000,
484
574
  cents_input: 65,
485
575
  cents_output: 85,
486
576
  default_reasoning: false,
@@ -488,11 +578,25 @@ export const LLM_MODEL_DETAILS = [
488
578
  recommended_temperature: undefined,
489
579
  provider_options: provider_options_openrouter({ only: "cerebras" }),
490
580
  },
581
+ {
582
+ llm_model_name: "longcat-flash",
583
+ llm_model_code: "meituan/longcat-flash-chat",
584
+ llm_api_code: "openrouter",
585
+ context_window: 131_072,
586
+ max_output_tokens: 131_072,
587
+ cents_input: 15,
588
+ cents_output: 75,
589
+ default_reasoning: false,
590
+ has_structured_json: true,
591
+ recommended_temperature: undefined,
592
+ provider_options: undefined,
593
+ },
491
594
  {
492
595
  llm_model_name: "mercury",
493
596
  llm_model_code: "inception/mercury",
494
597
  llm_api_code: "openrouter",
495
- context_window: 32_000,
598
+ context_window: 128_000,
599
+ max_output_tokens: 16_384,
496
600
  cents_input: 25,
497
601
  cents_output: 100,
498
602
  default_reasoning: false,
@@ -504,7 +608,8 @@ export const LLM_MODEL_DETAILS = [
504
608
  llm_model_name: "mercury-coder",
505
609
  llm_model_code: "inception/mercury-coder-small-beta",
506
610
  llm_api_code: "openrouter",
507
- context_window: 32_000,
611
+ context_window: 128_000,
612
+ max_output_tokens: 16_384,
508
613
  cents_input: 25,
509
614
  cents_output: 100,
510
615
  default_reasoning: false,
@@ -516,7 +621,8 @@ export const LLM_MODEL_DETAILS = [
516
621
  llm_model_name: "mistral-medium-3.1",
517
622
  llm_model_code: "mistralai/mistral-medium-3.1",
518
623
  llm_api_code: "openrouter",
519
- context_window: 262_144,
624
+ context_window: 131_072,
625
+ max_output_tokens: 131_072,
520
626
  cents_input: 40,
521
627
  cents_output: 200,
522
628
  default_reasoning: false,
@@ -528,7 +634,8 @@ export const LLM_MODEL_DETAILS = [
528
634
  llm_model_name: "qwen3-235b-a22b-2507-instruct@cerebras",
529
635
  llm_model_code: "qwen/qwen3-235b-a22b-2507",
530
636
  llm_api_code: "openrouter",
531
- context_window: 262_144,
637
+ context_window: 131_072,
638
+ max_output_tokens: 131_072,
532
639
  cents_input: 60,
533
640
  cents_output: 120,
534
641
  default_reasoning: false,
@@ -540,7 +647,8 @@ export const LLM_MODEL_DETAILS = [
540
647
  llm_model_name: "qwen3-235b-a22b-2507-thinking@cerebras",
541
648
  llm_model_code: "qwen/qwen3-235b-a22b-thinking-2507",
542
649
  llm_api_code: "openrouter",
543
- context_window: 262_144,
650
+ context_window: 131_072,
651
+ max_output_tokens: 131_072,
544
652
  cents_input: 60,
545
653
  cents_output: 120,
546
654
  default_reasoning: true,
@@ -553,8 +661,9 @@ export const LLM_MODEL_DETAILS = [
553
661
  llm_model_code: "qwen/qwen3-coder",
554
662
  llm_api_code: "openrouter",
555
663
  context_window: 262_144,
556
- cents_input: 150,
557
- cents_output: 750,
664
+ max_output_tokens: 65_536,
665
+ cents_input: 150, // for input tokens <= 128K
666
+ cents_output: 750, // for input tokens <= 128K
558
667
  default_reasoning: false,
559
668
  has_structured_json: true,
560
669
  recommended_temperature: undefined,
@@ -565,6 +674,7 @@ export const LLM_MODEL_DETAILS = [
565
674
  llm_model_code: "qwen/qwen3-coder",
566
675
  llm_api_code: "openrouter",
567
676
  context_window: 131_072,
677
+ max_output_tokens: 131_072,
568
678
  cents_input: 200,
569
679
  cents_output: 200,
570
680
  default_reasoning: false,
@@ -572,6 +682,19 @@ export const LLM_MODEL_DETAILS = [
572
682
  recommended_temperature: undefined,
573
683
  provider_options: provider_options_openrouter({ only: "cerebras" }),
574
684
  },
685
+ {
686
+ llm_model_name: "qwen-plus@alibaba",
687
+ llm_model_code: "qwen/qwen-plus-2025-07-28",
688
+ llm_api_code: "openrouter",
689
+ context_window: 1_000_000,
690
+ max_output_tokens: 32_768,
691
+ cents_input: 40, // for input tokens <= 256K
692
+ cents_output: 120, // for input tokens <= 256K
693
+ default_reasoning: false,
694
+ has_structured_json: true,
695
+ recommended_temperature: undefined,
696
+ provider_options: provider_options_openrouter({ only: "alibaba" }),
697
+ },
575
698
  ];
576
699
  export function llm_model_get_details({ llm_model_names, }) {
577
700
  return LLM_MODEL_DETAILS.filter((detail) => llm_model_names.includes(detail.llm_model_name));
@@ -0,0 +1,109 @@
1
+ import { anyOf, createRegExp, digit, exactly, global, letter, oneOrMore, wordChar } from "magic-regexp";
2
+ import { ansi_yellow } from "./lib_ansi.js";
3
+ import { DASH } from "./lib_char_punctuation.js";
4
+ import { text_split_lines } from "./lib_text.js";
5
+ import { tui_confirm } from "./lib_tui_confirm.js";
6
+ import { tui_quote_smart_single } from "./lib_tui_quote.js";
7
+ const regexp_word_global = createRegExp(oneOrMore(anyOf(wordChar, exactly(DASH))), [global]);
8
+ const regexp_segment_global = createRegExp(oneOrMore(anyOf(letter, digit)), [global]);
9
+ const regexp_identifier_exactly = createRegExp(anyOf(
10
+ // Only letters (no digits)
11
+ oneOrMore(letter),
12
+ // Digits at the end
13
+ oneOrMore(letter).and(oneOrMore(digit)),
14
+ // Digits in the middle (letters before and after)
15
+ oneOrMore(letter)
16
+ .and(oneOrMore(digit))
17
+ .and(oneOrMore(letter)),
18
+ // Only digits (no letters)
19
+ oneOrMore(digit))
20
+ .at.lineStart()
21
+ .at.lineEnd());
22
+ function is_secret_line(line) {
23
+ if (line.endsWith(" # secret") || line.endsWith(" // secret")) {
24
+ return true;
25
+ }
26
+ return false;
27
+ }
28
+ const NOT_SECRET_LINE_INCLUDES = ["http://", "https://"];
29
+ function is_not_secret_line(line) {
30
+ if (line.endsWith(" not secret")) {
31
+ return true;
32
+ }
33
+ for (const not_secret_line_include of NOT_SECRET_LINE_INCLUDES) {
34
+ if (line.includes(not_secret_line_include)) {
35
+ return true;
36
+ }
37
+ }
38
+ return false;
39
+ }
40
+ const SECRET_WORD_REGEXPS = [
41
+ /^ghp_[A-Za-z0-9]{30}/, // GitHub Personal Access Token
42
+ /^glpat-[A-Za-z0-9]{20}/, // GitLab Personal Access Token
43
+ /^sk-[A-Za-z0-9-]{30}/, // Secret Key
44
+ /^sk_[A-Za-z0-9]{30}/, // Secret Key
45
+ /^sk_test_[A-Za-z0-9]{30}/, // Secret Key (test)
46
+ /^whsec_[A-Za-z0-9]{30}/, // WebHook Secret key
47
+ /^xox[a-z]-[A-Za-z0-9-]{27}/, // Slack Access Token
48
+ ];
49
+ function is_secret_word(word) {
50
+ for (const secret_word_regexp of SECRET_WORD_REGEXPS) {
51
+ if (secret_word_regexp.test(word)) {
52
+ return true;
53
+ }
54
+ }
55
+ return false;
56
+ }
57
+ async function is_secret_segment(segment, not_secret_segments, interactive) {
58
+ if (not_secret_segments.has(segment)) {
59
+ return false;
60
+ }
61
+ if (regexp_identifier_exactly.test(segment)) {
62
+ return false;
63
+ }
64
+ if (segment.length < 20) {
65
+ return false;
66
+ }
67
+ if (interactive) {
68
+ const confirmed_is_secret = await tui_confirm({
69
+ question: `Is ${tui_quote_smart_single(segment)} a secret?`,
70
+ default: false,
71
+ style_message: ansi_yellow,
72
+ });
73
+ if (!confirmed_is_secret) {
74
+ not_secret_segments.add(segment);
75
+ return false;
76
+ }
77
+ }
78
+ return true;
79
+ }
80
+ export async function secret_check({ text, interactive }) {
81
+ const not_secret_segments = new Set();
82
+ const lines = text_split_lines(text);
83
+ for (const line of lines.toReversed()) {
84
+ if (is_secret_line(line)) {
85
+ throw new Error(`Secret detected: ${tui_quote_smart_single(line.trim())}`);
86
+ }
87
+ const words = line.match(regexp_word_global);
88
+ const segments = line.match(regexp_segment_global);
89
+ if (!words || !segments) {
90
+ continue;
91
+ }
92
+ if (is_not_secret_line(line)) {
93
+ for (const segment of segments) {
94
+ not_secret_segments.add(segment);
95
+ }
96
+ continue;
97
+ }
98
+ for (const word of words) {
99
+ if (is_secret_word(word)) {
100
+ throw new Error(`Secret detected: ${tui_quote_smart_single(word)}`);
101
+ }
102
+ }
103
+ for (const segment of segments) {
104
+ if (await is_secret_segment(segment, not_secret_segments, interactive)) {
105
+ throw new Error(`Secret detected: ${tui_quote_smart_single(segment)}`);
106
+ }
107
+ }
108
+ }
109
+ }
@@ -0,0 +1,39 @@
1
+ import { LF } from "./lib_char_control.js";
2
+ import { EMPTY } from "./lib_char_empty.js";
3
+ export function text_split_lines(text) {
4
+ const lines = text.split(/\r?\n/);
5
+ if (lines.length > 0) {
6
+ const last_line = lines.at(-1);
7
+ if (last_line === EMPTY) {
8
+ return lines.slice(0, -1);
9
+ }
10
+ }
11
+ return lines;
12
+ }
13
+ export function text_join_lines(lines) {
14
+ return lines.length > 0 ? lines.join(LF) + LF : EMPTY;
15
+ }
16
+ function text_lines_matching_generic(text, pattern, remove) {
17
+ const regex = new RegExp(pattern);
18
+ const lines = text_split_lines(text);
19
+ const new_lines = [];
20
+ for (const line of lines) {
21
+ const found = regex.test(line);
22
+ if (found !== remove) {
23
+ new_lines.push(line);
24
+ }
25
+ }
26
+ return text_join_lines(new_lines);
27
+ }
28
+ export function text_lines_matching_only(text, pattern) {
29
+ return text_lines_matching_generic(text, pattern, false);
30
+ }
31
+ export function text_lines_matching_remove(text, pattern) {
32
+ return text_lines_matching_generic(text, pattern, true);
33
+ }
34
+ export function text_get_head(text, lines) {
35
+ return text_join_lines(text_split_lines(text).slice(0, lines));
36
+ }
37
+ export function text_get_tail(text, lines) {
38
+ return text_join_lines(text_split_lines(text).slice(-lines));
39
+ }
package/package.json CHANGED
@@ -1,12 +1,12 @@
1
1
  {
2
2
  "name": "@johnowennixon/diffdash",
3
- "version": "1.9.0",
3
+ "version": "1.10.0",
4
4
  "description": "A command-line tool to generate Git commit messages using AI",
5
5
  "license": "0BSD",
6
6
  "author": "John Owen Nixon",
7
7
  "repository": {
8
8
  "type": "git",
9
- "url": "https://github.com/johnowennixon/diffdash.git"
9
+ "url": "git+https://github.com/johnowennixon/diffdash.git"
10
10
  },
11
11
  "engines": {
12
12
  "node": ">=20"
@@ -19,34 +19,35 @@
19
19
  "diffdash": "dist/src/diffdash.js"
20
20
  },
21
21
  "dependencies": {
22
- "@ai-sdk/anthropic": "2.0.9",
23
- "@ai-sdk/deepseek": "1.0.13",
24
- "@ai-sdk/google": "2.0.11",
25
- "@ai-sdk/openai": "2.0.23",
26
- "@inquirer/prompts": "7.8.4",
27
- "@openrouter/ai-sdk-provider": "1.1.2",
28
- "ai": "5.0.29",
29
- "ansis": "4.1.0",
22
+ "@ai-sdk/anthropic": "2.0.23",
23
+ "@ai-sdk/deepseek": "1.0.20",
24
+ "@ai-sdk/google": "2.0.17",
25
+ "@ai-sdk/openai": "2.0.42",
26
+ "@inquirer/prompts": "7.8.6",
27
+ "@openrouter/ai-sdk-provider": "1.2.0",
28
+ "ai": "5.0.60",
29
+ "ansis": "4.2.0",
30
30
  "argparse": "2.0.1",
31
31
  "cli-table3": "0.6.5",
32
32
  "json5": "2.2.3",
33
+ "magic-regexp": "0.10.0",
33
34
  "simple-git": "3.28.0",
34
- "zod": "4.1.5"
35
+ "zod": "4.1.11"
35
36
  },
36
37
  "devDependencies": {
37
- "@biomejs/biome": "2.2.2",
38
- "@candide/tsgolint": "1.3.0",
38
+ "@biomejs/biome": "2.2.5",
39
+ "@candide/tsgolint": "1.4.0",
39
40
  "@johnowennixon/add-shebangs": "1.1.0",
40
41
  "@johnowennixon/chmodx": "2.1.0",
41
42
  "@types/argparse": "2.0.17",
42
- "@types/node": "24.3.0",
43
- "@typescript/native-preview": "7.0.0-dev.20250902.1",
44
- "knip": "5.63.0",
43
+ "@types/node": "24.5.2",
44
+ "@typescript/native-preview": "7.0.0-dev.20250925.1",
45
+ "knip": "5.63.1",
45
46
  "markdownlint-cli2": "0.18.1",
46
47
  "npm-run-all2": "8.0.4",
47
- "oxlint": "1.14.0",
48
+ "oxlint": "1.19.0",
48
49
  "rimraf": "6.0.1",
49
- "typescript": "5.9.2"
50
+ "typescript": "5.9.3"
50
51
  },
51
52
  "scripts": {
52
53
  "build": "run-s -ls build:clean build:tsc build:shebang build:chmod",