@johnowennixon/diffdash 1.13.0 → 1.15.0
This diff represents the content of publicly available package versions that have been released to one of the supported registries. The information contained in this diff is provided for informational purposes only and reflects changes between package versions as they appear in their respective public registries.
- package/README.md +2 -1
- package/dist/package.json +1 -1
- package/dist/src/lib_diffdash_llm.js +3 -3
- package/dist/src/lib_llm_model.js +59 -143
- package/package.json +1 -1
package/README.md
CHANGED
|
@@ -32,9 +32,10 @@ npm install -g @johnowennixon/diffdash
|
|
|
32
32
|
|
|
33
33
|
## LLM Models
|
|
34
34
|
|
|
35
|
-
Currently, for this application, the best LLM model is **gpt-
|
|
35
|
+
Currently, for this application, the best LLM model is **gpt-4.1-mini** from OpenAI.
|
|
36
36
|
It is set as the default model.
|
|
37
37
|
I can only presume they have done a ton of training on diffs.
|
|
38
|
+
I have tested later GPT models but they don't seem to be as good.
|
|
38
39
|
|
|
39
40
|
## API Keys
|
|
40
41
|
|
package/dist/package.json
CHANGED
|
@@ -1,15 +1,15 @@
|
|
|
1
1
|
import { env_get_substitute } from "./lib_env.js";
|
|
2
2
|
import { llm_model_get_choices, llm_model_get_details } from "./lib_llm_model.js";
|
|
3
|
-
const model_name_default = "gpt-
|
|
3
|
+
const model_name_default = "gpt-4.1-mini";
|
|
4
4
|
const model_name_options = [
|
|
5
5
|
"claude-haiku-4.5", // fallback
|
|
6
6
|
"deepseek-chat",
|
|
7
7
|
"gemini-2.5-flash",
|
|
8
8
|
"gemini-3-flash-preview-low",
|
|
9
|
-
"gpt-4.1-mini", //
|
|
9
|
+
"gpt-4.1-mini", // the best
|
|
10
10
|
"gpt-4.1-nano",
|
|
11
11
|
"gpt-5-mini",
|
|
12
|
-
"gpt-5-mini-minimal", //
|
|
12
|
+
"gpt-5-mini-minimal", // fallback
|
|
13
13
|
"gpt-5-nano",
|
|
14
14
|
"gpt-5-nano-minimal",
|
|
15
15
|
"grok-code-fast-1",
|
|
@@ -31,29 +31,36 @@ function provider_options_openai({ reasoning_effort, }) {
|
|
|
31
31
|
},
|
|
32
32
|
};
|
|
33
33
|
}
|
|
34
|
-
function provider_options_openrouter({ only }) {
|
|
35
|
-
|
|
36
|
-
|
|
37
|
-
|
|
38
|
-
|
|
34
|
+
function provider_options_openrouter({ only, thinking, }) {
|
|
35
|
+
if (only !== undefined && thinking !== undefined) {
|
|
36
|
+
return {
|
|
37
|
+
openrouter: {
|
|
38
|
+
provider: {
|
|
39
|
+
only: [only],
|
|
40
|
+
},
|
|
41
|
+
thinking,
|
|
39
42
|
},
|
|
40
|
-
}
|
|
41
|
-
}
|
|
43
|
+
};
|
|
44
|
+
}
|
|
45
|
+
if (thinking !== undefined) {
|
|
46
|
+
return {
|
|
47
|
+
openrouter: {
|
|
48
|
+
thinking,
|
|
49
|
+
},
|
|
50
|
+
};
|
|
51
|
+
}
|
|
52
|
+
if (only !== undefined) {
|
|
53
|
+
return {
|
|
54
|
+
openrouter: {
|
|
55
|
+
provider: {
|
|
56
|
+
only: [only],
|
|
57
|
+
},
|
|
58
|
+
},
|
|
59
|
+
};
|
|
60
|
+
}
|
|
61
|
+
return undefined;
|
|
42
62
|
}
|
|
43
63
|
export const LLM_MODEL_DETAILS = [
|
|
44
|
-
{
|
|
45
|
-
llm_model_name: "claude-3.5-haiku",
|
|
46
|
-
llm_model_code: "claude-3-5-haiku-latest",
|
|
47
|
-
llm_api_code: "anthropic",
|
|
48
|
-
context_window: 200_000,
|
|
49
|
-
max_output_tokens: 8192,
|
|
50
|
-
cents_input: 80,
|
|
51
|
-
cents_output: 400,
|
|
52
|
-
default_reasoning: false,
|
|
53
|
-
has_structured_json: true,
|
|
54
|
-
recommended_temperature: undefined,
|
|
55
|
-
provider_options: provider_options_anthropic({ thinking: false }),
|
|
56
|
-
},
|
|
57
64
|
{
|
|
58
65
|
llm_model_name: "claude-haiku-4.5",
|
|
59
66
|
llm_model_code: "claude-haiku-4-5",
|
|
@@ -67,40 +74,14 @@ export const LLM_MODEL_DETAILS = [
|
|
|
67
74
|
recommended_temperature: undefined,
|
|
68
75
|
provider_options: provider_options_anthropic({ thinking: false }),
|
|
69
76
|
},
|
|
70
|
-
{
|
|
71
|
-
llm_model_name: "claude-opus-4.5",
|
|
72
|
-
llm_model_code: "claude-opus-4-5",
|
|
73
|
-
llm_api_code: "anthropic",
|
|
74
|
-
context_window: 200_000,
|
|
75
|
-
max_output_tokens: 64_000,
|
|
76
|
-
cents_input: 300, // for input tokens <= 200K
|
|
77
|
-
cents_output: 1500, // for input tokens <= 200K
|
|
78
|
-
default_reasoning: false,
|
|
79
|
-
has_structured_json: true,
|
|
80
|
-
recommended_temperature: undefined,
|
|
81
|
-
provider_options: provider_options_anthropic({ thinking: false }),
|
|
82
|
-
},
|
|
83
|
-
{
|
|
84
|
-
llm_model_name: "claude-opus-4.5-thinking",
|
|
85
|
-
llm_model_code: "claude-opus-4-5",
|
|
86
|
-
llm_api_code: "anthropic",
|
|
87
|
-
context_window: 200_000,
|
|
88
|
-
max_output_tokens: 64_000 - 1024,
|
|
89
|
-
cents_input: 300, // for input tokens <= 200K
|
|
90
|
-
cents_output: 1500, // for input tokens <= 200K
|
|
91
|
-
default_reasoning: false,
|
|
92
|
-
has_structured_json: true,
|
|
93
|
-
recommended_temperature: undefined,
|
|
94
|
-
provider_options: provider_options_anthropic({ thinking: true }),
|
|
95
|
-
},
|
|
96
77
|
{
|
|
97
78
|
llm_model_name: "claude-opus-4.6",
|
|
98
79
|
llm_model_code: "claude-opus-4-6",
|
|
99
80
|
llm_api_code: "anthropic",
|
|
100
|
-
context_window: 200_000,
|
|
101
|
-
max_output_tokens:
|
|
102
|
-
cents_input:
|
|
103
|
-
cents_output:
|
|
81
|
+
context_window: 200_000, // 1_000_000 available with context-1m beta header
|
|
82
|
+
max_output_tokens: 128_000,
|
|
83
|
+
cents_input: 500, // for input tokens <= 200K
|
|
84
|
+
cents_output: 2500, // for input tokens <= 200K
|
|
104
85
|
default_reasoning: false,
|
|
105
86
|
has_structured_json: true,
|
|
106
87
|
recommended_temperature: undefined,
|
|
@@ -110,44 +91,18 @@ export const LLM_MODEL_DETAILS = [
|
|
|
110
91
|
llm_model_name: "claude-opus-4.6-thinking",
|
|
111
92
|
llm_model_code: "claude-opus-4-6",
|
|
112
93
|
llm_api_code: "anthropic",
|
|
113
|
-
context_window: 200_000,
|
|
114
|
-
max_output_tokens:
|
|
115
|
-
cents_input:
|
|
116
|
-
cents_output:
|
|
117
|
-
default_reasoning: false,
|
|
118
|
-
has_structured_json: true,
|
|
119
|
-
recommended_temperature: undefined,
|
|
120
|
-
provider_options: provider_options_anthropic({ thinking: true }),
|
|
121
|
-
},
|
|
122
|
-
{
|
|
123
|
-
llm_model_name: "claude-sonnet-4",
|
|
124
|
-
llm_model_code: "claude-sonnet-4-0",
|
|
125
|
-
llm_api_code: "anthropic",
|
|
126
|
-
context_window: 200_000,
|
|
127
|
-
max_output_tokens: 64_000,
|
|
128
|
-
cents_input: 300,
|
|
129
|
-
cents_output: 1500,
|
|
130
|
-
default_reasoning: false,
|
|
131
|
-
has_structured_json: true,
|
|
132
|
-
recommended_temperature: undefined,
|
|
133
|
-
provider_options: provider_options_anthropic({ thinking: false }),
|
|
134
|
-
},
|
|
135
|
-
{
|
|
136
|
-
llm_model_name: "claude-sonnet-4-thinking",
|
|
137
|
-
llm_model_code: "claude-sonnet-4-0",
|
|
138
|
-
llm_api_code: "anthropic",
|
|
139
|
-
context_window: 200_000,
|
|
140
|
-
max_output_tokens: 62_976, // = 64000 - 1024 used for reasoning
|
|
141
|
-
cents_input: 300,
|
|
142
|
-
cents_output: 1500,
|
|
94
|
+
context_window: 200_000, // 1_000_000 available with context-1m beta header
|
|
95
|
+
max_output_tokens: 128_000 - 1024,
|
|
96
|
+
cents_input: 500, // for input tokens <= 200K
|
|
97
|
+
cents_output: 2500, // for input tokens <= 200K
|
|
143
98
|
default_reasoning: true,
|
|
144
99
|
has_structured_json: true,
|
|
145
100
|
recommended_temperature: undefined,
|
|
146
101
|
provider_options: provider_options_anthropic({ thinking: true }),
|
|
147
102
|
},
|
|
148
103
|
{
|
|
149
|
-
llm_model_name: "claude-sonnet-4.
|
|
150
|
-
llm_model_code: "claude-sonnet-4-
|
|
104
|
+
llm_model_name: "claude-sonnet-4.6",
|
|
105
|
+
llm_model_code: "claude-sonnet-4-6",
|
|
151
106
|
llm_api_code: "anthropic",
|
|
152
107
|
context_window: 200_000, // 1_000_000 available with context-1m beta header
|
|
153
108
|
max_output_tokens: 64_000,
|
|
@@ -159,14 +114,14 @@ export const LLM_MODEL_DETAILS = [
|
|
|
159
114
|
provider_options: provider_options_anthropic({ thinking: false }),
|
|
160
115
|
},
|
|
161
116
|
{
|
|
162
|
-
llm_model_name: "claude-sonnet-4.
|
|
163
|
-
llm_model_code: "claude-sonnet-4-
|
|
117
|
+
llm_model_name: "claude-sonnet-4.6-thinking",
|
|
118
|
+
llm_model_code: "claude-sonnet-4-6",
|
|
164
119
|
llm_api_code: "anthropic",
|
|
165
120
|
context_window: 200_000, // 1_000_000 available with context-1m beta header
|
|
166
121
|
max_output_tokens: 62_976, // = 64000 - 1024 used for reasoning
|
|
167
122
|
cents_input: 300, // for input tokens <= 200K
|
|
168
123
|
cents_output: 1500, // for input tokens <= 200K
|
|
169
|
-
default_reasoning:
|
|
124
|
+
default_reasoning: true,
|
|
170
125
|
has_structured_json: true,
|
|
171
126
|
recommended_temperature: undefined,
|
|
172
127
|
provider_options: provider_options_anthropic({ thinking: true }),
|
|
@@ -296,7 +251,7 @@ export const LLM_MODEL_DETAILS = [
|
|
|
296
251
|
max_output_tokens: 65_536,
|
|
297
252
|
cents_input: 200,
|
|
298
253
|
cents_output: 1200,
|
|
299
|
-
default_reasoning:
|
|
254
|
+
default_reasoning: true,
|
|
300
255
|
has_structured_json: true,
|
|
301
256
|
recommended_temperature: undefined,
|
|
302
257
|
provider_options: provider_options_google({ thinking_level: "low" }),
|
|
@@ -575,33 +530,33 @@ export const LLM_MODEL_DETAILS = [
|
|
|
575
530
|
provider_options: undefined,
|
|
576
531
|
},
|
|
577
532
|
{
|
|
578
|
-
llm_model_name: "kimi-k2
|
|
579
|
-
llm_model_code: "moonshotai/kimi-k2",
|
|
533
|
+
llm_model_name: "kimi-k2.5@moonshot",
|
|
534
|
+
llm_model_code: "moonshotai/kimi-k2.5",
|
|
580
535
|
llm_api_code: "openrouter",
|
|
581
536
|
context_window: 131_072,
|
|
582
537
|
max_output_tokens: 131_072,
|
|
583
538
|
cents_input: 60,
|
|
584
|
-
cents_output:
|
|
585
|
-
default_reasoning:
|
|
539
|
+
cents_output: 300,
|
|
540
|
+
default_reasoning: true,
|
|
586
541
|
has_structured_json: true,
|
|
587
542
|
recommended_temperature: undefined,
|
|
588
543
|
provider_options: provider_options_openrouter({ only: "moonshotai" }),
|
|
589
544
|
},
|
|
590
545
|
{
|
|
591
|
-
llm_model_name: "kimi-k2
|
|
592
|
-
llm_model_code: "moonshotai/kimi-k2
|
|
546
|
+
llm_model_name: "kimi-k2.5@groq",
|
|
547
|
+
llm_model_code: "moonshotai/kimi-k2.5",
|
|
593
548
|
llm_api_code: "openrouter",
|
|
594
|
-
context_window:
|
|
595
|
-
max_output_tokens:
|
|
596
|
-
cents_input:
|
|
549
|
+
context_window: 131_072,
|
|
550
|
+
max_output_tokens: 131_072,
|
|
551
|
+
cents_input: 60,
|
|
597
552
|
cents_output: 300,
|
|
598
|
-
default_reasoning:
|
|
599
|
-
has_structured_json:
|
|
553
|
+
default_reasoning: true,
|
|
554
|
+
has_structured_json: true,
|
|
600
555
|
recommended_temperature: undefined,
|
|
601
|
-
provider_options: provider_options_openrouter({ only: "
|
|
556
|
+
provider_options: provider_options_openrouter({ only: "qroq" }),
|
|
602
557
|
},
|
|
603
558
|
{
|
|
604
|
-
llm_model_name: "kimi-k2.5",
|
|
559
|
+
llm_model_name: "kimi-k2.5-nonthinking",
|
|
605
560
|
llm_model_code: "moonshotai/kimi-k2.5",
|
|
606
561
|
llm_api_code: "openrouter",
|
|
607
562
|
context_window: 131_072,
|
|
@@ -611,7 +566,7 @@ export const LLM_MODEL_DETAILS = [
|
|
|
611
566
|
default_reasoning: false,
|
|
612
567
|
has_structured_json: true,
|
|
613
568
|
recommended_temperature: undefined,
|
|
614
|
-
provider_options: provider_options_openrouter({ only: "moonshotai" }),
|
|
569
|
+
provider_options: provider_options_openrouter({ only: "moonshotai", thinking: false }),
|
|
615
570
|
},
|
|
616
571
|
{
|
|
617
572
|
llm_model_name: "llama-4-maverick@groq",
|
|
@@ -678,19 +633,6 @@ export const LLM_MODEL_DETAILS = [
|
|
|
678
633
|
recommended_temperature: undefined,
|
|
679
634
|
provider_options: undefined,
|
|
680
635
|
},
|
|
681
|
-
{
|
|
682
|
-
llm_model_name: "minimax-m2.1",
|
|
683
|
-
llm_model_code: "minimax/minimax-m2.1",
|
|
684
|
-
llm_api_code: "openrouter",
|
|
685
|
-
context_window: 204_800,
|
|
686
|
-
max_output_tokens: 131_072,
|
|
687
|
-
cents_input: 30,
|
|
688
|
-
cents_output: 120,
|
|
689
|
-
default_reasoning: false,
|
|
690
|
-
has_structured_json: false,
|
|
691
|
-
recommended_temperature: undefined,
|
|
692
|
-
provider_options: provider_options_openrouter({ only: "minimax" }),
|
|
693
|
-
},
|
|
694
636
|
{
|
|
695
637
|
llm_model_name: "minimax-m2.5",
|
|
696
638
|
llm_model_code: "minimax/minimax-m2.5",
|
|
@@ -718,39 +660,13 @@ export const LLM_MODEL_DETAILS = [
|
|
|
718
660
|
provider_options: provider_options_openrouter({ only: "mistral" }),
|
|
719
661
|
},
|
|
720
662
|
{
|
|
721
|
-
llm_model_name: "qwen3-
|
|
722
|
-
llm_model_code: "qwen/qwen3-
|
|
723
|
-
llm_api_code: "openrouter",
|
|
724
|
-
context_window: 131_072,
|
|
725
|
-
max_output_tokens: 131_072,
|
|
726
|
-
cents_input: 60,
|
|
727
|
-
cents_output: 120,
|
|
728
|
-
default_reasoning: false,
|
|
729
|
-
has_structured_json: true,
|
|
730
|
-
recommended_temperature: undefined,
|
|
731
|
-
provider_options: provider_options_openrouter({ only: "cerebras" }),
|
|
732
|
-
},
|
|
733
|
-
{
|
|
734
|
-
llm_model_name: "qwen3-coder@alibaba",
|
|
735
|
-
llm_model_code: "qwen/qwen3-coder",
|
|
663
|
+
llm_model_name: "qwen3.5-397b-a17b@alibaba",
|
|
664
|
+
llm_model_code: "qwen/qwen3.5-397b-a17b",
|
|
736
665
|
llm_api_code: "openrouter",
|
|
737
666
|
context_window: 262_144,
|
|
738
667
|
max_output_tokens: 65_536,
|
|
739
|
-
cents_input:
|
|
740
|
-
cents_output:
|
|
741
|
-
default_reasoning: false,
|
|
742
|
-
has_structured_json: true,
|
|
743
|
-
recommended_temperature: undefined,
|
|
744
|
-
provider_options: provider_options_openrouter({ only: "alibaba/opensource" }),
|
|
745
|
-
},
|
|
746
|
-
{
|
|
747
|
-
llm_model_name: "qwen-plus@alibaba",
|
|
748
|
-
llm_model_code: "qwen/qwen-plus-2025-07-28",
|
|
749
|
-
llm_api_code: "openrouter",
|
|
750
|
-
context_window: 1_000_000,
|
|
751
|
-
max_output_tokens: 32_768,
|
|
752
|
-
cents_input: 40, // for input tokens <= 256K
|
|
753
|
-
cents_output: 120, // for input tokens <= 256K
|
|
668
|
+
cents_input: 60,
|
|
669
|
+
cents_output: 360,
|
|
754
670
|
default_reasoning: false,
|
|
755
671
|
has_structured_json: true,
|
|
756
672
|
recommended_temperature: undefined,
|