@johnowennixon/diffdash 1.8.0 → 1.9.0

This diff represents the content of publicly available package versions that have been released to one of the supported registries. The information contained in this diff is provided for informational purposes only and reflects changes between package versions as they appear in their respective public registries.
package/README.md CHANGED
@@ -1,5 +1,8 @@
1
1
  # DiffDash
2
2
 
3
+ ![npm version](https://img.shields.io/npm/v/@johnowennixon/diffdash.svg)
4
+ ![NPM License](https://img.shields.io/npm/l/@johnowennixon/diffdash)
5
+ ![Downloads](https://img.shields.io/npm/dm/@johnowennixon/diffdash.svg)
3
6
  [![Ask DeepWiki](https://deepwiki.com/badge.svg)](https://deepwiki.com/johnowennixon/diffdash)
4
7
 
5
8
  A command-line tool to generate Git commit messages using AI.
@@ -126,10 +129,10 @@ All command-line arguments are optional.
126
129
  | `--disable-preview` | disable previewing the generated message|
127
130
  | `--disable-commit` | disable committing changes - exit after generating the message |
128
131
  | `--disable-push` | disable pushing changes - exit after making the commit |
129
- | `--push-no-verify` | bypass git hooks when pushing to Git |
130
- | `--push-force` | apply force when pushing to Git |
131
132
  | `--add-prefix PREFIX` | add a prefix to the commit message summary line |
132
133
  | `--add-suffix SUFFIX` | add a suffix to the commit message summary line |
134
+ | `--no-verify` | bypass git hooks when committing or pushing to Git |
135
+ | `--force` | apply force when pushing to Git |
133
136
  | `--llm-list` | display a list of available Large Language Models and exit |
134
137
  | `--llm-compare` | compare the generated messages from all models - but do not commit |
135
138
  | `--llm-model MODEL` | choose the LLM model by name (the default is normally best) |
package/dist/package.json CHANGED
@@ -1,6 +1,6 @@
1
1
  {
2
2
  "name": "@johnowennixon/diffdash",
3
- "version": "1.8.0",
3
+ "version": "1.9.0",
4
4
  "description": "A command-line tool to generate Git commit messages using AI",
5
5
  "license": "0BSD",
6
6
  "author": "John Owen Nixon",
@@ -42,31 +42,32 @@
42
42
  "test": "run-s -ls lint build"
43
43
  },
44
44
  "dependencies": {
45
- "@ai-sdk/anthropic": "2.0.1",
46
- "@ai-sdk/deepseek": "1.0.5",
47
- "@ai-sdk/google": "2.0.4",
48
- "@ai-sdk/openai": "2.0.9",
45
+ "@ai-sdk/anthropic": "2.0.9",
46
+ "@ai-sdk/deepseek": "1.0.13",
47
+ "@ai-sdk/google": "2.0.11",
48
+ "@ai-sdk/openai": "2.0.23",
49
+ "@inquirer/prompts": "7.8.4",
49
50
  "@openrouter/ai-sdk-provider": "1.1.2",
50
- "ai": "5.0.9",
51
+ "ai": "5.0.29",
51
52
  "ansis": "4.1.0",
52
53
  "argparse": "2.0.1",
53
54
  "cli-table3": "0.6.5",
54
55
  "json5": "2.2.3",
55
56
  "simple-git": "3.28.0",
56
- "zod": "4.0.17"
57
+ "zod": "4.1.5"
57
58
  },
58
59
  "devDependencies": {
59
- "@biomejs/biome": "2.1.4",
60
+ "@biomejs/biome": "2.2.2",
60
61
  "@candide/tsgolint": "1.3.0",
61
62
  "@johnowennixon/add-shebangs": "1.1.0",
62
- "@johnowennixon/chmodx": "2.0.0",
63
+ "@johnowennixon/chmodx": "2.1.0",
63
64
  "@types/argparse": "2.0.17",
64
- "@types/node": "24.2.1",
65
- "@typescript/native-preview": "7.0.0-dev.20250811.1",
66
- "knip": "5.62.0",
65
+ "@types/node": "24.3.0",
66
+ "@typescript/native-preview": "7.0.0-dev.20250902.1",
67
+ "knip": "5.63.0",
67
68
  "markdownlint-cli2": "0.18.1",
68
69
  "npm-run-all2": "8.0.4",
69
- "oxlint": "1.11.1",
70
+ "oxlint": "1.14.0",
70
71
  "rimraf": "6.0.1",
71
72
  "typescript": "5.9.2"
72
73
  }
@@ -29,6 +29,7 @@ export const debug_channels = {
29
29
  path: false,
30
30
  kubectl: false,
31
31
  postgresql: false,
32
+ regex: false,
32
33
  rejects: false,
33
34
  retries: false,
34
35
  sql: false,
@@ -10,8 +10,8 @@ const diffdash_cli_schema = {
10
10
  disable_preview: cli_boolean({ help: "disable previewing the generated message" }),
11
11
  disable_commit: cli_boolean({ help: "disable committing changes - exit after generating the message" }),
12
12
  disable_push: cli_boolean({ help: "disable pushing changes - exit after making the commit" }),
13
- push_no_verify: cli_boolean({ help: "bypass git hooks when pushing to Git" }),
14
- push_force: cli_boolean({ help: "apply force when pushing to Git" }),
13
+ no_verify: cli_boolean({ help: "bypass git hooks when committing or pushing to Git" }),
14
+ force: cli_boolean({ help: "apply force when pushing to Git" }),
15
15
  add_prefix: cli_string({ help: "add a prefix to the commit message summary line", metavar: "PREFIX" }),
16
16
  add_suffix: cli_string({ help: "add a suffix to the commit message summary line", metavar: "SUFFIX" }),
17
17
  llm_list: cli_boolean({ help: "display a list of available Large Language Models and exit" }),
@@ -33,7 +33,7 @@ function diffdash_config_file_read(config) {
33
33
  }
34
34
  }
35
35
  export function diffdash_config_get() {
36
- const { version, auto_add, auto_commit, auto_push, disable_add, disable_commit, disable_preview, disable_status, disable_push, push_no_verify, push_force, add_prefix, add_suffix, llm_list, llm_compare, llm_model, llm_excludes, just_output, silent, debug_llm_prompts, debug_llm_inputs, debug_llm_outputs, } = diffdash_cli_parsed_args;
36
+ const { version, auto_add, auto_commit, auto_push, disable_add, disable_commit, disable_preview, disable_status, disable_push, no_verify, force, add_prefix, add_suffix, llm_list, llm_compare, llm_model, llm_excludes, just_output, silent, debug_llm_prompts, debug_llm_inputs, debug_llm_outputs, } = diffdash_cli_parsed_args;
37
37
  if (version) {
38
38
  tell_plain(`${PACKAGE_NAME} v${PACKAGE_VERSION}`);
39
39
  process.exit(0);
@@ -56,10 +56,10 @@ export function diffdash_config_get() {
56
56
  disable_preview,
57
57
  disable_status,
58
58
  disable_push,
59
- push_no_verify,
60
- push_force,
61
59
  add_prefix,
62
60
  add_suffix,
61
+ no_verify,
62
+ force,
63
63
  llm_compare,
64
64
  llm_config,
65
65
  all_llm_configs,
@@ -12,6 +12,7 @@ const model_name_options = [
12
12
  "gpt-5-mini-minimal", // fallback
13
13
  "gpt-5-nano",
14
14
  "gpt-5-nano-minimal",
15
+ "grok-code-fast-1",
15
16
  "llama-4-maverick@cerebras",
16
17
  ];
17
18
  export const diffdash_llm_model_details = llm_model_get_details({ llm_model_names: model_name_options });
@@ -1,4 +1,5 @@
1
1
  import { abort_with_error, abort_with_warning } from "./lib_abort.js";
2
+ import { ansi_blue } from "./lib_ansi.js";
2
3
  import { debug_channels, debug_inspect } from "./lib_debug.js";
3
4
  import { diffdash_add_footer, diffdash_add_prefix_or_suffix } from "./lib_diffdash_add.js";
4
5
  import { error_get_text } from "./lib_error.js";
@@ -10,8 +11,8 @@ import { git_simple_staging_create_commit, git_simple_staging_get_staged_diff, g
10
11
  import { llm_results_summary } from "./lib_llm_results.js";
11
12
  import { stdio_write_stdout, stdio_write_stdout_linefeed } from "./lib_stdio_write.js";
12
13
  import { tell_action, tell_info, tell_plain, tell_success, tell_warning } from "./lib_tell.js";
14
+ import { tui_confirm } from "./lib_tui_confirm.js";
13
15
  import { tui_justify_left } from "./lib_tui_justify.js";
14
- import { tui_readline_confirm } from "./lib_tui_readline.js";
15
16
  async function phase_open() {
16
17
  const git = await git_simple_open_git_repo();
17
18
  await git_simple_open_check_not_bare(git);
@@ -40,7 +41,11 @@ async function phase_add({ config, git }) {
40
41
  }
41
42
  }
42
43
  else {
43
- const add_confirmed = await tui_readline_confirm("No staged changes found - would you like to add all changes?");
44
+ const add_confirmed = await tui_confirm({
45
+ question: "No staged changes found - would you like to add all changes?",
46
+ default: true,
47
+ style_message: ansi_blue,
48
+ });
44
49
  if (!add_confirmed) {
45
50
  abort_with_warning("Please add changes before creating a commit");
46
51
  }
@@ -142,7 +147,7 @@ async function phase_generate({ config, git }) {
142
147
  return git_message;
143
148
  }
144
149
  async function phase_commit({ config, git, git_message, }) {
145
- const { auto_commit, disable_commit, silent } = config;
150
+ const { auto_commit, disable_commit, no_verify, silent } = config;
146
151
  if (disable_commit) {
147
152
  return;
148
153
  }
@@ -152,18 +157,22 @@ async function phase_commit({ config, git, git_message, }) {
152
157
  }
153
158
  }
154
159
  else {
155
- const commit_confirmed = await tui_readline_confirm("Do you want to commit these changes?");
160
+ const commit_confirmed = await tui_confirm({
161
+ question: "Do you want to commit these changes?",
162
+ default: true,
163
+ style_message: ansi_blue,
164
+ });
156
165
  if (!commit_confirmed) {
157
166
  abort_with_warning("Commit cancelled by user");
158
167
  }
159
168
  }
160
- await git_simple_staging_create_commit(git, git_message);
169
+ await git_simple_staging_create_commit({ git, git_message, no_verify });
161
170
  if (!silent) {
162
171
  tell_success("Changes committed successfully");
163
172
  }
164
173
  }
165
174
  async function phase_push({ config, git }) {
166
- const { auto_push, disable_commit, disable_push, push_no_verify, push_force, silent } = config;
175
+ const { auto_push, disable_commit, disable_push, no_verify, force, silent } = config;
167
176
  if (disable_push || disable_commit) {
168
177
  return;
169
178
  }
@@ -173,13 +182,17 @@ async function phase_push({ config, git }) {
173
182
  }
174
183
  }
175
184
  else {
176
- const push_confirmed = await tui_readline_confirm("Do you want to push these changes?");
185
+ const push_confirmed = await tui_confirm({
186
+ question: "Do you want to push these changes?",
187
+ default: true,
188
+ style_message: ansi_blue,
189
+ });
177
190
  if (!push_confirmed) {
178
191
  return;
179
192
  }
180
193
  }
181
194
  try {
182
- await git_simple_staging_push_to_remote({ git, no_verify: push_no_verify, force: push_force });
195
+ await git_simple_staging_push_to_remote({ git, no_verify, force });
183
196
  }
184
197
  catch (error) {
185
198
  abort_with_error(`Failed to push to remote: ${error_get_text(error)}`);
@@ -1,9 +1,9 @@
1
1
  import { Duration } from "./lib_duration.js";
2
2
  import { error_get_text } from "./lib_error.js";
3
- import { git_message_get_system_prompt, git_message_get_user_prompt } from "./lib_git_message_prompt.js";
3
+ import { git_message_prompt_get_system, git_message_prompt_get_user } from "./lib_git_message_prompt.js";
4
4
  import { git_message_schema, git_message_schema_format } from "./lib_git_message_schema.js";
5
5
  import { llm_chat_generate_object, llm_chat_generate_text } from "./lib_llm_chat.js";
6
- import { llm_tokens_count_estimated, llm_tokens_debug_usage } from "./lib_llm_tokens.js";
6
+ import { llm_tokens_debug_usage, llm_tokens_estimate_length_from_tokens, llm_tokens_estimate_tokens_from_length, } from "./lib_llm_tokens.js";
7
7
  async function git_message_generate_unstructured({ llm_config, system_prompt, user_prompt, }) {
8
8
  const outputs = await llm_chat_generate_text({ llm_config, system_prompt, user_prompt });
9
9
  return outputs;
@@ -20,12 +20,12 @@ async function git_message_generate_structured({ llm_config, system_prompt, user
20
20
  return { generated_text, reasoning_text: undefined, total_usage, provider_metadata };
21
21
  }
22
22
  export async function git_message_generate_string({ llm_config, inputs, }) {
23
- const { context_window, has_structured_json } = llm_config.llm_model_detail;
24
- const system_prompt = git_message_get_system_prompt({ has_structured_json, inputs });
25
- // Estimate remaining prompt length
26
- const user_tokens = context_window - llm_tokens_count_estimated({ llm_config, text: system_prompt }) - 1000;
27
- const user_length = user_tokens * 3;
28
- const user_prompt = git_message_get_user_prompt({
23
+ const { effective_context_window } = llm_config;
24
+ const { has_structured_json } = llm_config.llm_model_detail;
25
+ const system_prompt = git_message_prompt_get_system({ has_structured_json, inputs });
26
+ const user_tokens = effective_context_window - llm_tokens_estimate_tokens_from_length({ llm_config, length: system_prompt.length }) - 1000;
27
+ const user_length = llm_tokens_estimate_length_from_tokens({ llm_config, tokens: user_tokens });
28
+ const user_prompt = git_message_prompt_get_user({
29
29
  has_structured_json,
30
30
  inputs,
31
31
  max_length: user_length,
@@ -45,13 +45,13 @@ A simple change needs only two additional sentences scaling up to a complex chan
45
45
  If there are a lot of changes, you will need to summarize even more.
46
46
  `.trim() + LF_LF;
47
47
  const portion_extra = (extra_prompts) => {
48
- return extra_prompts && extra_prompts.length > 0 ? extra_prompts.map((s) => s.trim).join(LF) + LF_LF : EMPTY;
48
+ return extra_prompts && extra_prompts.length > 0 ? extra_prompts.map((s) => s.trim()).join(LF) + LF_LF : EMPTY;
49
49
  };
50
50
  const portion_final = `
51
51
  Everything you write will be checked for validity and then saved directly to Git - it will not be reviewed by a human.
52
52
  Therefore, you must just output the Git message itself without any introductory or concluding sections.
53
53
  `.trim() + LF_LF;
54
- export function git_message_get_system_prompt({ has_structured_json, inputs, }) {
54
+ export function git_message_prompt_get_system({ has_structured_json, inputs, }) {
55
55
  let system_prompt = EMPTY;
56
56
  system_prompt += portion_role;
57
57
  system_prompt += portion_inputs;
@@ -62,7 +62,7 @@ export function git_message_get_system_prompt({ has_structured_json, inputs, })
62
62
  system_prompt += portion_final;
63
63
  return system_prompt.trim();
64
64
  }
65
- export function git_message_get_user_prompt({ has_structured_json, inputs, max_length, }) {
65
+ export function git_message_prompt_get_user({ has_structured_json, inputs, max_length, }) {
66
66
  const { diffstat, diff } = inputs;
67
67
  const truncate = diffstat.length + diff.length > max_length;
68
68
  const diff_truncated = truncate ? diff.slice(0, max_length - diffstat.length) + LF : diff;
@@ -26,16 +26,21 @@ export async function git_simple_staging_get_staged_diffstat(git) {
26
26
  export async function git_simple_staging_get_staged_diff(git) {
27
27
  return await git.diff(["--cached"]);
28
28
  }
29
- export async function git_simple_staging_create_commit(git, git_message) {
30
- await git.commit(git_message);
29
+ export async function git_simple_staging_create_commit({ git, git_message, no_verify = false, }) {
30
+ const options = {};
31
+ if (no_verify) {
32
+ options["--no-verify"] = null;
33
+ }
34
+ await git.commit(git_message, options);
31
35
  }
32
36
  export async function git_simple_staging_push_to_remote({ git, no_verify = false, force = false, }) {
33
- const push_args = ["--follow-tags"];
37
+ const options = {};
38
+ options["--follow-tags"] = null;
34
39
  if (no_verify) {
35
- push_args.push("--no-verify");
40
+ options["--no-verify"] = null;
36
41
  }
37
42
  if (force) {
38
- push_args.push("--force");
43
+ options["--force"] = null;
39
44
  }
40
- await git.push(push_args);
45
+ await git.push(options);
41
46
  }
@@ -1,10 +1,17 @@
1
+ import { env_get } from "./lib_env.js";
1
2
  import { llm_access_available, llm_access_get } from "./lib_llm_access.js";
2
3
  import { llm_model_find_detail, llm_model_get_choices } from "./lib_llm_model.js";
4
+ import { parse_int_or_undefined } from "./lib_parse_number.js";
3
5
  export function llm_config_get({ llm_model_details, llm_model_name, }) {
4
6
  const llm_model_detail = llm_model_find_detail({ llm_model_details, llm_model_name });
5
7
  const access = llm_access_get({ llm_model_details, llm_model_name });
6
8
  const { llm_model_code, llm_api_code, llm_api_key } = access;
7
- return { llm_model_name, llm_model_detail, llm_model_code, llm_api_code, llm_api_key };
9
+ let effective_context_window = llm_model_detail.context_window;
10
+ const env_context_window = parse_int_or_undefined(env_get("LLM_CONFIG_CONTEXT_WINDOW"));
11
+ if (env_context_window !== undefined) {
12
+ effective_context_window = Math.min(effective_context_window, env_context_window);
13
+ }
14
+ return { llm_model_name, llm_model_detail, llm_model_code, llm_api_code, llm_api_key, effective_context_window };
8
15
  }
9
16
  export function llm_config_get_all({ llm_model_details, llm_include, llm_excludes, }) {
10
17
  const choices = llm_model_get_choices({ llm_model_details });
@@ -5,7 +5,7 @@ import { TuiTable } from "./lib_tui_table.js";
5
5
  export function llm_list_models({ llm_model_details }) {
6
6
  const headings = ["NAME", "API", "CONTEXT", "INPUT", "OUTPUT", "REASONING"];
7
7
  const alignments = ["left", "left", "right", "right", "right", "left"];
8
- const table = new TuiTable({ headings, alignments });
8
+ const table = new TuiTable({ headings, alignments, compact: true });
9
9
  for (const detail of llm_model_details) {
10
10
  const { llm_model_name, llm_api_code, context_window, cents_input, cents_output, default_reasoning } = detail;
11
11
  const tui_name = llm_model_name;
@@ -1,4 +1,8 @@
1
1
  import { abort_with_error } from "./lib_abort.js";
2
+ import { enabled_from_env } from "./lib_enabled.js";
3
+ function context_window_openai({ tier1, unrestricted }) {
4
+ return enabled_from_env("LLM_MODEL_OPENAI_UNRESTRICTED") ? unrestricted : tier1;
5
+ }
2
6
  function provider_options_anthropic({ thinking }) {
3
7
  return thinking
4
8
  ? {
@@ -71,7 +75,7 @@ export const LLM_MODEL_DETAILS = [
71
75
  context_window: 200_000,
72
76
  cents_input: 300,
73
77
  cents_output: 1500,
74
- default_reasoning: false,
78
+ default_reasoning: true,
75
79
  has_structured_json: true,
76
80
  recommended_temperature: undefined,
77
81
  provider_options: provider_options_anthropic({ thinking: true }),
@@ -92,9 +96,9 @@ export const LLM_MODEL_DETAILS = [
92
96
  llm_model_name: "deepseek-chat",
93
97
  llm_model_code: "deepseek-chat",
94
98
  llm_api_code: "deepseek",
95
- context_window: 64_000,
96
- cents_input: 27,
97
- cents_output: 110,
99
+ context_window: 128_000,
100
+ cents_input: 56,
101
+ cents_output: 168,
98
102
  default_reasoning: false,
99
103
  has_structured_json: false,
100
104
  recommended_temperature: undefined,
@@ -104,11 +108,11 @@ export const LLM_MODEL_DETAILS = [
104
108
  llm_model_name: "deepseek-reasoner",
105
109
  llm_model_code: "deepseek-reasoner",
106
110
  llm_api_code: "deepseek",
107
- context_window: 163_840,
108
- cents_input: 55,
109
- cents_output: 219,
111
+ context_window: 128_000,
112
+ cents_input: 56,
113
+ cents_output: 168,
110
114
  default_reasoning: true,
111
- has_structured_json: true,
115
+ has_structured_json: false,
112
116
  recommended_temperature: undefined,
113
117
  provider_options: undefined,
114
118
  },
@@ -212,7 +216,7 @@ export const LLM_MODEL_DETAILS = [
212
216
  llm_model_name: "gpt-4.1",
213
217
  llm_model_code: "gpt-4.1",
214
218
  llm_api_code: "openai",
215
- context_window: 1_047_576,
219
+ context_window: context_window_openai({ tier1: 200_000, unrestricted: 1_000_000 }),
216
220
  cents_input: 200,
217
221
  cents_output: 800,
218
222
  default_reasoning: false,
@@ -224,7 +228,7 @@ export const LLM_MODEL_DETAILS = [
224
228
  llm_model_name: "gpt-4.1-mini",
225
229
  llm_model_code: "gpt-4.1-mini",
226
230
  llm_api_code: "openai",
227
- context_window: 1_047_576,
231
+ context_window: context_window_openai({ tier1: 400_000, unrestricted: 1_000_000 }),
228
232
  cents_input: 40,
229
233
  cents_output: 160,
230
234
  default_reasoning: false,
@@ -236,7 +240,7 @@ export const LLM_MODEL_DETAILS = [
236
240
  llm_model_name: "gpt-4.1-nano",
237
241
  llm_model_code: "gpt-4.1-nano",
238
242
  llm_api_code: "openai",
239
- context_window: 1_047_576,
243
+ context_window: context_window_openai({ tier1: 400_000, unrestricted: 1_000_000 }),
240
244
  cents_input: 10,
241
245
  cents_output: 40,
242
246
  default_reasoning: false,
@@ -248,7 +252,7 @@ export const LLM_MODEL_DETAILS = [
248
252
  llm_model_name: "gpt-5",
249
253
  llm_model_code: "gpt-5",
250
254
  llm_api_code: "openai",
251
- context_window: 400_000,
255
+ context_window: context_window_openai({ tier1: 30_000, unrestricted: 272_000 }),
252
256
  cents_input: 125,
253
257
  cents_output: 1000,
254
258
  default_reasoning: true,
@@ -260,7 +264,7 @@ export const LLM_MODEL_DETAILS = [
260
264
  llm_model_name: "gpt-5-minimal",
261
265
  llm_model_code: "gpt-5",
262
266
  llm_api_code: "openai",
263
- context_window: 400_000,
267
+ context_window: context_window_openai({ tier1: 30_000, unrestricted: 272_000 }),
264
268
  cents_input: 125,
265
269
  cents_output: 1000,
266
270
  default_reasoning: false,
@@ -272,7 +276,7 @@ export const LLM_MODEL_DETAILS = [
272
276
  llm_model_name: "gpt-5-mini",
273
277
  llm_model_code: "gpt-5-mini",
274
278
  llm_api_code: "openai",
275
- context_window: 400_000,
279
+ context_window: context_window_openai({ tier1: 200_000, unrestricted: 272_000 }),
276
280
  cents_input: 25,
277
281
  cents_output: 200,
278
282
  default_reasoning: true,
@@ -284,7 +288,7 @@ export const LLM_MODEL_DETAILS = [
284
288
  llm_model_name: "gpt-5-mini-high",
285
289
  llm_model_code: "gpt-5-mini",
286
290
  llm_api_code: "openai",
287
- context_window: 400_000,
291
+ context_window: context_window_openai({ tier1: 200_000, unrestricted: 272_000 }),
288
292
  cents_input: 25,
289
293
  cents_output: 200,
290
294
  default_reasoning: true,
@@ -296,7 +300,7 @@ export const LLM_MODEL_DETAILS = [
296
300
  llm_model_name: "gpt-5-mini-low",
297
301
  llm_model_code: "gpt-5-mini",
298
302
  llm_api_code: "openai",
299
- context_window: 400_000,
303
+ context_window: context_window_openai({ tier1: 200_000, unrestricted: 272_000 }),
300
304
  cents_input: 25,
301
305
  cents_output: 200,
302
306
  default_reasoning: true,
@@ -308,7 +312,7 @@ export const LLM_MODEL_DETAILS = [
308
312
  llm_model_name: "gpt-5-mini-medium",
309
313
  llm_model_code: "gpt-5-mini",
310
314
  llm_api_code: "openai",
311
- context_window: 400_000,
315
+ context_window: context_window_openai({ tier1: 200_000, unrestricted: 272_000 }),
312
316
  cents_input: 25,
313
317
  cents_output: 200,
314
318
  default_reasoning: true,
@@ -320,7 +324,7 @@ export const LLM_MODEL_DETAILS = [
320
324
  llm_model_name: "gpt-5-mini-minimal",
321
325
  llm_model_code: "gpt-5-mini",
322
326
  llm_api_code: "openai",
323
- context_window: 400_000,
327
+ context_window: context_window_openai({ tier1: 200_000, unrestricted: 272_000 }),
324
328
  cents_input: 25,
325
329
  cents_output: 200,
326
330
  default_reasoning: false,
@@ -332,7 +336,7 @@ export const LLM_MODEL_DETAILS = [
332
336
  llm_model_name: "gpt-5-nano",
333
337
  llm_model_code: "gpt-5-nano",
334
338
  llm_api_code: "openai",
335
- context_window: 400_000,
339
+ context_window: context_window_openai({ tier1: 200_000, unrestricted: 272_000 }),
336
340
  cents_input: 5,
337
341
  cents_output: 40,
338
342
  default_reasoning: true,
@@ -344,7 +348,7 @@ export const LLM_MODEL_DETAILS = [
344
348
  llm_model_name: "gpt-5-nano-minimal",
345
349
  llm_model_code: "gpt-5-nano",
346
350
  llm_api_code: "openai",
347
- context_window: 400_000,
351
+ context_window: context_window_openai({ tier1: 200_000, unrestricted: 272_000 }),
348
352
  cents_input: 5,
349
353
  cents_output: 40,
350
354
  default_reasoning: false,
@@ -413,7 +417,19 @@ export const LLM_MODEL_DETAILS = [
413
417
  // provider_options: undefined,
414
418
  // },
415
419
  {
416
- llm_model_name: "kimi-k2@groq",
420
+ llm_model_name: "grok-code-fast-1",
421
+ llm_model_code: "x-ai/grok-code-fast-1",
422
+ llm_api_code: "openrouter",
423
+ context_window: 256_000,
424
+ cents_input: 20,
425
+ cents_output: 150,
426
+ default_reasoning: true,
427
+ has_structured_json: true,
428
+ recommended_temperature: undefined,
429
+ provider_options: undefined,
430
+ },
431
+ {
432
+ llm_model_name: "kimi-k2-0711@groq",
417
433
  llm_model_code: "moonshotai/kimi-k2",
418
434
  llm_api_code: "openrouter",
419
435
  context_window: 131_072,
@@ -425,7 +441,7 @@ export const LLM_MODEL_DETAILS = [
425
441
  provider_options: provider_options_openrouter({ only: "groq" }),
426
442
  },
427
443
  {
428
- llm_model_name: "kimi-k2@moonshotai",
444
+ llm_model_name: "kimi-k2-0711@moonshotai",
429
445
  llm_model_code: "moonshotai/kimi-k2",
430
446
  llm_api_code: "openrouter",
431
447
  context_window: 131_072,
@@ -436,6 +452,18 @@ export const LLM_MODEL_DETAILS = [
436
452
  recommended_temperature: undefined,
437
453
  provider_options: provider_options_openrouter({ only: "moonshotai" }),
438
454
  },
455
+ {
456
+ llm_model_name: "kimi-k2-0905@groq",
457
+ llm_model_code: "moonshotai/kimi-k2-0905",
458
+ llm_api_code: "openrouter",
459
+ context_window: 262_144,
460
+ cents_input: 100,
461
+ cents_output: 300,
462
+ default_reasoning: false,
463
+ has_structured_json: false,
464
+ recommended_temperature: undefined,
465
+ provider_options: provider_options_openrouter({ only: "groq" }),
466
+ },
439
467
  {
440
468
  llm_model_name: "llama-4-maverick@cerebras",
441
469
  llm_model_code: "meta-llama/llama-4-maverick",
@@ -485,10 +513,10 @@ export const LLM_MODEL_DETAILS = [
485
513
  provider_options: undefined,
486
514
  },
487
515
  {
488
- llm_model_name: "mistral-medium-3",
489
- llm_model_code: "mistralai/mistral-medium-3",
516
+ llm_model_name: "mistral-medium-3.1",
517
+ llm_model_code: "mistralai/mistral-medium-3.1",
490
518
  llm_api_code: "openrouter",
491
- context_window: 131_072,
519
+ context_window: 262_144,
492
520
  cents_input: 40,
493
521
  cents_output: 200,
494
522
  default_reasoning: false,
@@ -1,4 +1,4 @@
1
- import { SPACE } from "./lib_char_punctuation.js";
1
+ import { QUESTION, SPACE } from "./lib_char_punctuation.js";
2
2
  import { tell_action, tell_info, tell_warning } from "./lib_tell.js";
3
3
  import { tui_justify_left } from "./lib_tui_justify.js";
4
4
  import { tui_none_blank } from "./lib_tui_none.js";
@@ -22,7 +22,8 @@ export function llm_results_summary(all_results) {
22
22
  if (error_text !== null) {
23
23
  continue;
24
24
  }
25
- const { llm_model_name } = llm_config;
25
+ const { llm_model_name, llm_model_detail } = llm_config;
26
+ const { default_reasoning } = llm_model_detail;
26
27
  const { outputs } = result;
27
28
  const { total_usage, provider_metadata } = outputs;
28
29
  const openrouter_provider = provider_metadata?.["openrouter"]?.["provider"];
@@ -30,14 +31,16 @@ export function llm_results_summary(all_results) {
30
31
  const tui_seconds = tui_number_plain({ num: seconds, justify_left: 3 });
31
32
  const tui_input = tui_number_plain({ num: total_usage.inputTokens, justify_left: 5 });
32
33
  const tui_output = tui_number_plain({ num: total_usage.outputTokens, justify_left: 5 });
33
- const tui_reasoning = tui_number_plain({ num: total_usage.reasoningTokens, justify_left: 5 });
34
+ const tui_reasoning = tui_number_plain({ num: total_usage.reasoningTokens, justify_left: 5, none: QUESTION });
34
35
  const tui_provider = tui_none_blank(openrouter_provider);
35
36
  const segments = [];
36
37
  segments.push(tui_model);
37
38
  segments.push(`seconds=${tui_seconds}`);
38
39
  segments.push(`input=${tui_input}`);
39
40
  segments.push(`output=${tui_output}`);
40
- segments.push(`reasoning=${tui_reasoning}`);
41
+ if (default_reasoning || total_usage.reasoningTokens !== undefined) {
42
+ segments.push(`reasoning=${tui_reasoning}`);
43
+ }
41
44
  if (openrouter_provider) {
42
45
  segments.push(`provider=${tui_provider}`);
43
46
  }
@@ -1,13 +1,16 @@
1
1
  import { debug_channels } from "./lib_debug.js";
2
2
  import { tell_debug } from "./lib_tell.js";
3
- export function llm_tokens_count_estimated({ text }) {
4
- return Math.round(text.length / 3);
3
+ export function llm_tokens_estimate_tokens_from_length({ length }) {
4
+ return Math.round(length / 1.4);
5
+ }
6
+ export function llm_tokens_estimate_length_from_tokens({ tokens }) {
7
+ return Math.round(tokens * 1.4);
5
8
  }
6
9
  export function llm_tokens_debug_usage({ name, llm_config, text, }) {
7
10
  if (debug_channels.llm_tokens) {
8
11
  const { llm_model_name } = llm_config;
9
12
  const length = text.length;
10
- const tokens = llm_tokens_count_estimated({ llm_config, text });
13
+ const tokens = llm_tokens_estimate_tokens_from_length({ llm_config, length });
11
14
  const ratio = Math.round((length / tokens) * 100) / 100;
12
15
  tell_debug(`${name}: length=${length}, tokens=${tokens}, ratio=${ratio}, model=${llm_model_name}`);
13
16
  }
@@ -6,8 +6,8 @@ export function parse_float(input) {
6
6
  return Number.parseFloat(input);
7
7
  }
8
8
  export function parse_int_or_undefined(input) {
9
- return input === undefined || input === EMPTY ? undefined : parse_int(input);
9
+ return input === undefined || input === null || input === EMPTY ? undefined : parse_int(input);
10
10
  }
11
11
  export function parse_float_or_undefined(input) {
12
- return input === undefined || input === EMPTY ? undefined : parse_float(input);
12
+ return input === undefined || input === null || input === EMPTY ? undefined : parse_float(input);
13
13
  }
@@ -0,0 +1,25 @@
1
+ import { input as inquirer_input } from "@inquirer/prompts";
2
+ export async function tui_confirm({ question, default: default_value, style_message, }) {
3
+ const result = await inquirer_input({
4
+ message: question,
5
+ default: default_value === undefined ? undefined : default_value ? "Yes" : "No",
6
+ validate: (text) => {
7
+ const cleaned = text.trim().toLowerCase();
8
+ if (cleaned === "y" || cleaned === "yes" || cleaned === "n" || cleaned === "no") {
9
+ return true;
10
+ }
11
+ return "Please enter Yes or No";
12
+ },
13
+ transformer: (text, { isFinal: is_final }) => {
14
+ const cleaned = text.trim().toLowerCase();
15
+ return is_final ? (cleaned === "y" || cleaned === "yes" ? "Yes" : "No") : text;
16
+ },
17
+ theme: {
18
+ prefix: { idle: undefined, done: undefined },
19
+ style: {
20
+ message: style_message,
21
+ },
22
+ },
23
+ });
24
+ return result.trim().toLowerCase() === "y" || result.trim().toLowerCase() === "yes";
25
+ }
@@ -1,15 +1,12 @@
1
1
  import { EMPTY } from "./lib_char_empty.js";
2
2
  import { DASH } from "./lib_char_punctuation.js";
3
3
  // eslint-disable-next-line sonarjs/use-type-alias
4
- function tui_none_generic(str, replacement) {
5
- if (str === undefined || str === null || str === EMPTY) {
6
- return replacement;
7
- }
8
- return str.toString();
4
+ function tui_none_generic({ str, none }) {
5
+ return str === undefined || str === null || str === EMPTY ? none : str.toString();
9
6
  }
10
7
  export function tui_none_blank(str) {
11
- return tui_none_generic(str, EMPTY);
8
+ return tui_none_generic({ str, none: EMPTY });
12
9
  }
13
10
  export function tui_none_dash(str) {
14
- return tui_none_generic(str, DASH);
11
+ return tui_none_generic({ str, none: DASH });
15
12
  }
@@ -1,8 +1,8 @@
1
1
  import { EMPTY } from "./lib_char_empty.js";
2
2
  import { DOT } from "./lib_char_punctuation.js";
3
3
  import { tui_justify_left, tui_justify_zero } from "./lib_tui_justify.js";
4
- export function tui_number_plain({ num, justify_left, justify_right, }) {
5
- let str = num === null || num === undefined ? EMPTY : num.toString();
4
+ export function tui_number_plain({ num, none = EMPTY, justify_left, justify_right, }) {
5
+ let str = num === null || num === undefined ? none : num.toString();
6
6
  if (justify_left !== undefined) {
7
7
  str = tui_justify_left(justify_left, str);
8
8
  }
@@ -4,8 +4,8 @@ import { ansi_bold } from "./lib_ansi.js";
4
4
  export class TuiTable {
5
5
  table;
6
6
  columns_total;
7
- constructor({ headings, alignments }) {
8
- const constructor_options = { style: { head: [] } };
7
+ constructor({ headings, alignments, compact, }) {
8
+ const constructor_options = { style: { head: [], compact } };
9
9
  constructor_options.head = headings.map((heading) => ansi_bold(heading));
10
10
  this.columns_total = headings.length;
11
11
  if (alignments) {
package/package.json CHANGED
@@ -1,6 +1,6 @@
1
1
  {
2
2
  "name": "@johnowennixon/diffdash",
3
- "version": "1.8.0",
3
+ "version": "1.9.0",
4
4
  "description": "A command-line tool to generate Git commit messages using AI",
5
5
  "license": "0BSD",
6
6
  "author": "John Owen Nixon",
@@ -19,31 +19,32 @@
19
19
  "diffdash": "dist/src/diffdash.js"
20
20
  },
21
21
  "dependencies": {
22
- "@ai-sdk/anthropic": "2.0.1",
23
- "@ai-sdk/deepseek": "1.0.5",
24
- "@ai-sdk/google": "2.0.4",
25
- "@ai-sdk/openai": "2.0.9",
22
+ "@ai-sdk/anthropic": "2.0.9",
23
+ "@ai-sdk/deepseek": "1.0.13",
24
+ "@ai-sdk/google": "2.0.11",
25
+ "@ai-sdk/openai": "2.0.23",
26
+ "@inquirer/prompts": "7.8.4",
26
27
  "@openrouter/ai-sdk-provider": "1.1.2",
27
- "ai": "5.0.9",
28
+ "ai": "5.0.29",
28
29
  "ansis": "4.1.0",
29
30
  "argparse": "2.0.1",
30
31
  "cli-table3": "0.6.5",
31
32
  "json5": "2.2.3",
32
33
  "simple-git": "3.28.0",
33
- "zod": "4.0.17"
34
+ "zod": "4.1.5"
34
35
  },
35
36
  "devDependencies": {
36
- "@biomejs/biome": "2.1.4",
37
+ "@biomejs/biome": "2.2.2",
37
38
  "@candide/tsgolint": "1.3.0",
38
39
  "@johnowennixon/add-shebangs": "1.1.0",
39
- "@johnowennixon/chmodx": "2.0.0",
40
+ "@johnowennixon/chmodx": "2.1.0",
40
41
  "@types/argparse": "2.0.17",
41
- "@types/node": "24.2.1",
42
- "@typescript/native-preview": "7.0.0-dev.20250811.1",
43
- "knip": "5.62.0",
42
+ "@types/node": "24.3.0",
43
+ "@typescript/native-preview": "7.0.0-dev.20250902.1",
44
+ "knip": "5.63.0",
44
45
  "markdownlint-cli2": "0.18.1",
45
46
  "npm-run-all2": "8.0.4",
46
- "oxlint": "1.11.1",
47
+ "oxlint": "1.14.0",
47
48
  "rimraf": "6.0.1",
48
49
  "typescript": "5.9.2"
49
50
  },
@@ -1,16 +0,0 @@
1
- import { createInterface } from "node:readline";
2
- import { ansi_blue, ansi_bold } from "./lib_ansi.js";
3
- export async function tui_readline_confirm(message) {
4
- const query = ansi_bold(ansi_blue(`${message} [Y/n] `));
5
- const rl = createInterface({
6
- input: process.stdin,
7
- output: process.stdout,
8
- });
9
- return new Promise((resolve) => {
10
- rl.question(query, (answer) => {
11
- rl.close();
12
- const normalized_answer = answer.trim().toLowerCase();
13
- resolve(normalized_answer === "" || normalized_answer === "y" || normalized_answer === "yes");
14
- });
15
- });
16
- }