@johnowennixon/diffdash 1.13.0 → 1.14.0

This diff represents the content of publicly available package versions that have been released to one of the supported registries. The information contained in this diff is provided for informational purposes only and reflects changes between package versions as they appear in their respective public registries.
package/dist/package.json CHANGED
@@ -1,6 +1,6 @@
1
1
  {
2
2
  "name": "@johnowennixon/diffdash",
3
- "version": "1.13.0",
3
+ "version": "1.14.0",
4
4
  "description": "A command-line tool to generate Git commit messages using AI",
5
5
  "license": "0BSD",
6
6
  "author": "John Owen Nixon",
@@ -31,29 +31,36 @@ function provider_options_openai({ reasoning_effort, }) {
31
31
  },
32
32
  };
33
33
  }
34
- function provider_options_openrouter({ only }) {
35
- return {
36
- openrouter: {
37
- provider: {
38
- only: [only],
34
+ function provider_options_openrouter({ only, thinking, }) {
35
+ if (only !== undefined && thinking !== undefined) {
36
+ return {
37
+ openrouter: {
38
+ provider: {
39
+ only: [only],
40
+ },
41
+ thinking,
39
42
  },
40
- },
41
- };
43
+ };
44
+ }
45
+ if (thinking !== undefined) {
46
+ return {
47
+ openrouter: {
48
+ thinking,
49
+ },
50
+ };
51
+ }
52
+ if (only !== undefined) {
53
+ return {
54
+ openrouter: {
55
+ provider: {
56
+ only: [only],
57
+ },
58
+ },
59
+ };
60
+ }
61
+ return undefined;
42
62
  }
43
63
  export const LLM_MODEL_DETAILS = [
44
- {
45
- llm_model_name: "claude-3.5-haiku",
46
- llm_model_code: "claude-3-5-haiku-latest",
47
- llm_api_code: "anthropic",
48
- context_window: 200_000,
49
- max_output_tokens: 8192,
50
- cents_input: 80,
51
- cents_output: 400,
52
- default_reasoning: false,
53
- has_structured_json: true,
54
- recommended_temperature: undefined,
55
- provider_options: provider_options_anthropic({ thinking: false }),
56
- },
57
64
  {
58
65
  llm_model_name: "claude-haiku-4.5",
59
66
  llm_model_code: "claude-haiku-4-5",
@@ -67,40 +74,14 @@ export const LLM_MODEL_DETAILS = [
67
74
  recommended_temperature: undefined,
68
75
  provider_options: provider_options_anthropic({ thinking: false }),
69
76
  },
70
- {
71
- llm_model_name: "claude-opus-4.5",
72
- llm_model_code: "claude-opus-4-5",
73
- llm_api_code: "anthropic",
74
- context_window: 200_000,
75
- max_output_tokens: 64_000,
76
- cents_input: 300, // for input tokens <= 200K
77
- cents_output: 1500, // for input tokens <= 200K
78
- default_reasoning: false,
79
- has_structured_json: true,
80
- recommended_temperature: undefined,
81
- provider_options: provider_options_anthropic({ thinking: false }),
82
- },
83
- {
84
- llm_model_name: "claude-opus-4.5-thinking",
85
- llm_model_code: "claude-opus-4-5",
86
- llm_api_code: "anthropic",
87
- context_window: 200_000,
88
- max_output_tokens: 64_000 - 1024,
89
- cents_input: 300, // for input tokens <= 200K
90
- cents_output: 1500, // for input tokens <= 200K
91
- default_reasoning: false,
92
- has_structured_json: true,
93
- recommended_temperature: undefined,
94
- provider_options: provider_options_anthropic({ thinking: true }),
95
- },
96
77
  {
97
78
  llm_model_name: "claude-opus-4.6",
98
79
  llm_model_code: "claude-opus-4-6",
99
80
  llm_api_code: "anthropic",
100
- context_window: 200_000,
101
- max_output_tokens: 64_000,
102
- cents_input: 300, // for input tokens <= 200K
103
- cents_output: 1500, // for input tokens <= 200K
81
+ context_window: 200_000, // 1_000_000 available with context-1m beta header
82
+ max_output_tokens: 128_000,
83
+ cents_input: 500, // for input tokens <= 200K
84
+ cents_output: 2500, // for input tokens <= 200K
104
85
  default_reasoning: false,
105
86
  has_structured_json: true,
106
87
  recommended_temperature: undefined,
@@ -110,44 +91,18 @@ export const LLM_MODEL_DETAILS = [
110
91
  llm_model_name: "claude-opus-4.6-thinking",
111
92
  llm_model_code: "claude-opus-4-6",
112
93
  llm_api_code: "anthropic",
113
- context_window: 200_000,
114
- max_output_tokens: 64_000 - 1024,
115
- cents_input: 300, // for input tokens <= 200K
116
- cents_output: 1500, // for input tokens <= 200K
117
- default_reasoning: false,
118
- has_structured_json: true,
119
- recommended_temperature: undefined,
120
- provider_options: provider_options_anthropic({ thinking: true }),
121
- },
122
- {
123
- llm_model_name: "claude-sonnet-4",
124
- llm_model_code: "claude-sonnet-4-0",
125
- llm_api_code: "anthropic",
126
- context_window: 200_000,
127
- max_output_tokens: 64_000,
128
- cents_input: 300,
129
- cents_output: 1500,
130
- default_reasoning: false,
131
- has_structured_json: true,
132
- recommended_temperature: undefined,
133
- provider_options: provider_options_anthropic({ thinking: false }),
134
- },
135
- {
136
- llm_model_name: "claude-sonnet-4-thinking",
137
- llm_model_code: "claude-sonnet-4-0",
138
- llm_api_code: "anthropic",
139
- context_window: 200_000,
140
- max_output_tokens: 62_976, // = 64000 - 1024 used for reasoning
141
- cents_input: 300,
142
- cents_output: 1500,
94
+ context_window: 200_000, // 1_000_000 available with context-1m beta header
95
+ max_output_tokens: 128_000 - 1024,
96
+ cents_input: 500, // for input tokens <= 200K
97
+ cents_output: 2500, // for input tokens <= 200K
143
98
  default_reasoning: true,
144
99
  has_structured_json: true,
145
100
  recommended_temperature: undefined,
146
101
  provider_options: provider_options_anthropic({ thinking: true }),
147
102
  },
148
103
  {
149
- llm_model_name: "claude-sonnet-4.5",
150
- llm_model_code: "claude-sonnet-4-5",
104
+ llm_model_name: "claude-sonnet-4.6",
105
+ llm_model_code: "claude-sonnet-4-6",
151
106
  llm_api_code: "anthropic",
152
107
  context_window: 200_000, // 1_000_000 available with context-1m beta header
153
108
  max_output_tokens: 64_000,
@@ -159,14 +114,14 @@ export const LLM_MODEL_DETAILS = [
159
114
  provider_options: provider_options_anthropic({ thinking: false }),
160
115
  },
161
116
  {
162
- llm_model_name: "claude-sonnet-4.5-thinking",
163
- llm_model_code: "claude-sonnet-4-5",
117
+ llm_model_name: "claude-sonnet-4.6-thinking",
118
+ llm_model_code: "claude-sonnet-4-6",
164
119
  llm_api_code: "anthropic",
165
120
  context_window: 200_000, // 1_000_000 available with context-1m beta header
166
121
  max_output_tokens: 62_976, // = 64000 - 1024 used for reasoning
167
122
  cents_input: 300, // for input tokens <= 200K
168
123
  cents_output: 1500, // for input tokens <= 200K
169
- default_reasoning: false,
124
+ default_reasoning: true,
170
125
  has_structured_json: true,
171
126
  recommended_temperature: undefined,
172
127
  provider_options: provider_options_anthropic({ thinking: true }),
@@ -296,7 +251,7 @@ export const LLM_MODEL_DETAILS = [
296
251
  max_output_tokens: 65_536,
297
252
  cents_input: 200,
298
253
  cents_output: 1200,
299
- default_reasoning: false,
254
+ default_reasoning: true,
300
255
  has_structured_json: true,
301
256
  recommended_temperature: undefined,
302
257
  provider_options: provider_options_google({ thinking_level: "low" }),
@@ -575,33 +530,33 @@ export const LLM_MODEL_DETAILS = [
575
530
  provider_options: undefined,
576
531
  },
577
532
  {
578
- llm_model_name: "kimi-k2-0711@moonshotai",
579
- llm_model_code: "moonshotai/kimi-k2",
533
+ llm_model_name: "kimi-k2.5@moonshot",
534
+ llm_model_code: "moonshotai/kimi-k2.5",
580
535
  llm_api_code: "openrouter",
581
536
  context_window: 131_072,
582
537
  max_output_tokens: 131_072,
583
538
  cents_input: 60,
584
- cents_output: 250,
585
- default_reasoning: false,
539
+ cents_output: 300,
540
+ default_reasoning: true,
586
541
  has_structured_json: true,
587
542
  recommended_temperature: undefined,
588
543
  provider_options: provider_options_openrouter({ only: "moonshotai" }),
589
544
  },
590
545
  {
591
- llm_model_name: "kimi-k2-0905@groq",
592
- llm_model_code: "moonshotai/kimi-k2-0905",
546
+ llm_model_name: "kimi-k2.5@groq",
547
+ llm_model_code: "moonshotai/kimi-k2.5",
593
548
  llm_api_code: "openrouter",
594
- context_window: 262_144,
595
- max_output_tokens: 16_384,
596
- cents_input: 100,
549
+ context_window: 131_072,
550
+ max_output_tokens: 131_072,
551
+ cents_input: 60,
597
552
  cents_output: 300,
598
- default_reasoning: false,
599
- has_structured_json: false,
553
+ default_reasoning: true,
554
+ has_structured_json: true,
600
555
  recommended_temperature: undefined,
601
- provider_options: provider_options_openrouter({ only: "groq" }),
556
+ provider_options: provider_options_openrouter({ only: "qroq" }),
602
557
  },
603
558
  {
604
- llm_model_name: "kimi-k2.5",
559
+ llm_model_name: "kimi-k2.5-nonthinking",
605
560
  llm_model_code: "moonshotai/kimi-k2.5",
606
561
  llm_api_code: "openrouter",
607
562
  context_window: 131_072,
@@ -611,7 +566,7 @@ export const LLM_MODEL_DETAILS = [
611
566
  default_reasoning: false,
612
567
  has_structured_json: true,
613
568
  recommended_temperature: undefined,
614
- provider_options: provider_options_openrouter({ only: "moonshotai" }),
569
+ provider_options: provider_options_openrouter({ only: "moonshotai", thinking: false }),
615
570
  },
616
571
  {
617
572
  llm_model_name: "llama-4-maverick@groq",
@@ -678,19 +633,6 @@ export const LLM_MODEL_DETAILS = [
678
633
  recommended_temperature: undefined,
679
634
  provider_options: undefined,
680
635
  },
681
- {
682
- llm_model_name: "minimax-m2.1",
683
- llm_model_code: "minimax/minimax-m2.1",
684
- llm_api_code: "openrouter",
685
- context_window: 204_800,
686
- max_output_tokens: 131_072,
687
- cents_input: 30,
688
- cents_output: 120,
689
- default_reasoning: false,
690
- has_structured_json: false,
691
- recommended_temperature: undefined,
692
- provider_options: provider_options_openrouter({ only: "minimax" }),
693
- },
694
636
  {
695
637
  llm_model_name: "minimax-m2.5",
696
638
  llm_model_code: "minimax/minimax-m2.5",
package/package.json CHANGED
@@ -1,6 +1,6 @@
1
1
  {
2
2
  "name": "@johnowennixon/diffdash",
3
- "version": "1.13.0",
3
+ "version": "1.14.0",
4
4
  "description": "A command-line tool to generate Git commit messages using AI",
5
5
  "license": "0BSD",
6
6
  "author": "John Owen Nixon",