@johnowennixon/diffdash 1.0.3 → 1.2.1
This diff represents the content of publicly available package versions that have been released to one of the supported registries. The information contained in this diff is provided for informational purposes only and reflects changes between package versions as they appear in their respective public registries.
- package/dist/package.json +80 -0
- package/dist/src/diffdash.js +26 -0
- package/dist/src/lib_abort.js +21 -0
- package/dist/src/lib_ansi.js +21 -0
- package/dist/src/lib_char_box.js +3 -0
- package/dist/src/lib_char_control.js +8 -0
- package/dist/src/lib_char_digit.js +10 -0
- package/dist/src/lib_char_empty.js +1 -0
- package/dist/src/lib_char_punctuation.js +37 -0
- package/dist/src/lib_cli.js +187 -0
- package/dist/src/lib_datetime.js +62 -0
- package/dist/src/lib_debug.js +68 -0
- package/dist/src/lib_diffdash_add.js +23 -0
- package/dist/src/lib_diffdash_cli.js +34 -0
- package/dist/src/lib_diffdash_config.js +52 -0
- package/dist/src/lib_diffdash_llm.js +24 -0
- package/dist/src/lib_diffdash_sequence.js +199 -0
- package/dist/src/lib_duration.js +29 -0
- package/dist/src/lib_enabled.js +30 -0
- package/dist/src/lib_env.js +18 -0
- package/dist/src/lib_error.js +14 -0
- package/dist/src/lib_file_path.js +22 -0
- package/dist/src/lib_git_message_display.js +4 -0
- package/dist/src/lib_git_message_generate.js +55 -0
- package/dist/src/lib_git_message_prompt.js +72 -0
- package/dist/src/lib_git_message_schema.js +16 -0
- package/dist/src/lib_git_message_validate.js +61 -0
- package/dist/src/lib_git_simple_open.js +24 -0
- package/dist/src/lib_git_simple_staging.js +41 -0
- package/dist/src/lib_inspect.js +4 -0
- package/dist/src/lib_llm_access.js +69 -0
- package/dist/src/lib_llm_chat.js +66 -0
- package/dist/src/lib_llm_config.js +23 -0
- package/dist/src/lib_llm_list.js +21 -0
- package/dist/src/lib_llm_model.js +336 -0
- package/dist/src/lib_llm_provider.js +63 -0
- package/dist/src/lib_llm_tokens.js +14 -0
- package/dist/src/lib_package.js +7 -0
- package/dist/src/lib_parse_number.js +13 -0
- package/dist/src/lib_stdio_write.js +14 -0
- package/dist/src/lib_string_types.js +1 -0
- package/dist/src/lib_tell.js +58 -0
- package/dist/src/lib_tui_block.js +10 -0
- package/dist/src/lib_tui_justify.js +29 -0
- package/dist/src/lib_tui_readline.js +16 -0
- package/dist/src/lib_tui_table.js +20 -0
- package/dist/src/lib_tui_truncate.js +13 -0
- package/dist/src/lib_type_infer.js +1 -0
- package/package.json +30 -25
- package/out/diffdash.cjs +0 -33011
|
@@ -0,0 +1,66 @@
|
|
|
1
|
+
import { generateObject, generateText } from "ai";
|
|
2
|
+
import { debug_channels, debug_inspect_when } from "./lib_debug.js";
|
|
3
|
+
import { env_get_empty, env_get_substitute } from "./lib_env.js";
|
|
4
|
+
import { llm_provider_get_ai_sdk_language_model } from "./lib_llm_provider.js";
|
|
5
|
+
import { parse_float_or_undefined, parse_int, parse_int_or_undefined } from "./lib_parse_number.js";
|
|
6
|
+
function llm_chat_get_parameters() {
|
|
7
|
+
return {
|
|
8
|
+
max_tokens: parse_int_or_undefined(env_get_empty("lib_llm_chat_max_tokens")),
|
|
9
|
+
temperature: parse_float_or_undefined(env_get_substitute("lib_llm_chat_temperature", "0.6")),
|
|
10
|
+
timeout: parse_int(env_get_substitute("lib_llm_chat_timeout", "60")),
|
|
11
|
+
};
|
|
12
|
+
}
|
|
13
|
+
export async function llm_chat_generate_text({ llm_config, headers, system_prompt, user_prompt, tools, max_steps, min_steps, }) {
|
|
14
|
+
const { llm_model_name, llm_provider, llm_model_code, llm_api_key } = llm_config;
|
|
15
|
+
const ai_sdk_language_model = llm_provider_get_ai_sdk_language_model({
|
|
16
|
+
llm_model_code,
|
|
17
|
+
llm_provider,
|
|
18
|
+
llm_api_key,
|
|
19
|
+
});
|
|
20
|
+
const { max_tokens, temperature, timeout } = llm_chat_get_parameters();
|
|
21
|
+
const llm_inputs = {
|
|
22
|
+
model: ai_sdk_language_model,
|
|
23
|
+
system: system_prompt,
|
|
24
|
+
prompt: user_prompt,
|
|
25
|
+
tools,
|
|
26
|
+
headers,
|
|
27
|
+
maxSteps: max_steps,
|
|
28
|
+
maxTokens: max_tokens,
|
|
29
|
+
temperature,
|
|
30
|
+
abortSignal: AbortSignal.timeout(timeout * 1000),
|
|
31
|
+
};
|
|
32
|
+
debug_inspect_when(debug_channels.llm_inputs, llm_inputs, `llm_inputs (for ${llm_model_name})`);
|
|
33
|
+
// This is liable to throw an error
|
|
34
|
+
const llm_outputs = await generateText(llm_inputs);
|
|
35
|
+
debug_inspect_when(debug_channels.llm_outputs, llm_outputs, `llm_outputs (for ${llm_model_name})`);
|
|
36
|
+
if (min_steps !== undefined && llm_outputs.steps.length < min_steps) {
|
|
37
|
+
throw new Error("Too few steps taken");
|
|
38
|
+
}
|
|
39
|
+
if (max_steps !== undefined && llm_outputs.steps.length === max_steps) {
|
|
40
|
+
throw new Error("Too many steps taken");
|
|
41
|
+
}
|
|
42
|
+
return llm_outputs.text;
|
|
43
|
+
}
|
|
44
|
+
export async function llm_chat_generate_object({ llm_config, user_prompt, system_prompt, schema, }) {
|
|
45
|
+
const { llm_model_name, llm_provider, llm_model_code, llm_api_key } = llm_config;
|
|
46
|
+
const ai_sdk_language_model = llm_provider_get_ai_sdk_language_model({
|
|
47
|
+
llm_model_code,
|
|
48
|
+
llm_provider,
|
|
49
|
+
llm_api_key,
|
|
50
|
+
});
|
|
51
|
+
const { max_tokens, temperature, timeout } = llm_chat_get_parameters();
|
|
52
|
+
const llm_inputs = {
|
|
53
|
+
model: ai_sdk_language_model,
|
|
54
|
+
system: system_prompt,
|
|
55
|
+
prompt: user_prompt,
|
|
56
|
+
schema,
|
|
57
|
+
maxTokens: max_tokens,
|
|
58
|
+
temperature,
|
|
59
|
+
abortSignal: AbortSignal.timeout(timeout * 1000),
|
|
60
|
+
};
|
|
61
|
+
debug_inspect_when(debug_channels.llm_inputs, llm_inputs, `llm_inputs (for ${llm_model_name})`);
|
|
62
|
+
// This is liable to throw an error
|
|
63
|
+
const llm_outputs = await generateObject(llm_inputs);
|
|
64
|
+
debug_inspect_when(debug_channels.llm_outputs, llm_outputs, `llm_outputs (for ${llm_model_name})`);
|
|
65
|
+
return llm_outputs.object;
|
|
66
|
+
}
|
|
@@ -0,0 +1,23 @@
|
|
|
1
|
+
import { llm_access_available, llm_access_get } from "./lib_llm_access.js";
|
|
2
|
+
import { llm_model_find_detail, llm_model_get_choices } from "./lib_llm_model.js";
|
|
3
|
+
import { llm_provider_get_via } from "./lib_llm_provider.js";
|
|
4
|
+
import { tell_info } from "./lib_tell.js";
|
|
5
|
+
export function llm_config_get({ llm_model_details, llm_model_name, llm_router, }) {
|
|
6
|
+
const llm_model_detail = llm_model_find_detail({ llm_model_details, llm_model_name });
|
|
7
|
+
const access = llm_access_get({ llm_model_details, llm_model_name, llm_router });
|
|
8
|
+
const { llm_model_code, llm_provider, llm_api_key } = access;
|
|
9
|
+
return { llm_model_name, llm_model_detail, llm_model_code, llm_provider, llm_api_key };
|
|
10
|
+
}
|
|
11
|
+
export function llm_config_get_all({ llm_model_details, llm_router, llm_excludes, }) {
|
|
12
|
+
const choices = llm_model_get_choices(llm_model_details);
|
|
13
|
+
const available = choices.filter((llm_model_name) => llm_access_available({ llm_model_details, llm_model_name, llm_excludes }));
|
|
14
|
+
return available.map((llm_model_name) => llm_config_get({ llm_model_details, llm_model_name, llm_router }));
|
|
15
|
+
}
|
|
16
|
+
export function llm_config_get_model_via(llm_config) {
|
|
17
|
+
const { llm_model_name, llm_provider } = llm_config;
|
|
18
|
+
return `${llm_model_name} (${llm_provider_get_via(llm_provider)})`;
|
|
19
|
+
}
|
|
20
|
+
export function llm_config_show(llm_config) {
|
|
21
|
+
const model_via = llm_config_get_model_via(llm_config);
|
|
22
|
+
tell_info(`Using LLM ${model_via}`);
|
|
23
|
+
}
|
|
@@ -0,0 +1,21 @@
|
|
|
1
|
+
import { DOLLAR } from "./lib_char_punctuation.js";
|
|
2
|
+
import { stdio_write_stdout_linefeed } from "./lib_stdio_write.js";
|
|
3
|
+
import { tell_info, tell_warning } from "./lib_tell.js";
|
|
4
|
+
import { tui_justify_right } from "./lib_tui_justify.js";
|
|
5
|
+
import { TuiTable } from "./lib_tui_table.js";
|
|
6
|
+
export function llm_list_models({ llm_model_details }) {
|
|
7
|
+
const headings = ["NAME", "CONTEXT", "INPUT", "OUTPUT"];
|
|
8
|
+
const table = new TuiTable({ headings });
|
|
9
|
+
for (const detail of llm_model_details) {
|
|
10
|
+
const { llm_model_name, context_window, cents_input, cents_output } = detail;
|
|
11
|
+
const tui_name = llm_model_name;
|
|
12
|
+
const tui_context = tui_justify_right(7, context_window.toString());
|
|
13
|
+
const tui_input = tui_justify_right(6, DOLLAR + (cents_input / 100).toFixed(2));
|
|
14
|
+
const tui_output = tui_justify_right(6, DOLLAR + (cents_output / 100).toFixed(2));
|
|
15
|
+
const row = [tui_name, tui_context, tui_input, tui_output];
|
|
16
|
+
table.push(row);
|
|
17
|
+
}
|
|
18
|
+
stdio_write_stdout_linefeed(table.toString());
|
|
19
|
+
tell_info("Prices are per million tokens.");
|
|
20
|
+
tell_warning("Prices are best effort and are liable to change - always double-check with your LLM provider.");
|
|
21
|
+
}
|
|
@@ -0,0 +1,336 @@
|
|
|
1
|
+
import { abort_with_error } from "./lib_abort.js";
|
|
2
|
+
const LLM_MODEL_DETAILS = [
|
|
3
|
+
{
|
|
4
|
+
llm_model_name: "claude-3.5-haiku",
|
|
5
|
+
llm_provider: "anthropic",
|
|
6
|
+
llm_model_code_direct: "claude-3-5-haiku-latest",
|
|
7
|
+
llm_model_code_requesty: "anthropic/claude-3-5-haiku-latest",
|
|
8
|
+
llm_model_code_openrouter: "anthropic/claude-3.5-haiku",
|
|
9
|
+
context_window: 200_000,
|
|
10
|
+
cents_input: 80,
|
|
11
|
+
cents_output: 400,
|
|
12
|
+
has_structured_json: true,
|
|
13
|
+
},
|
|
14
|
+
{
|
|
15
|
+
llm_model_name: "claude-3.7-sonnet",
|
|
16
|
+
llm_provider: "anthropic",
|
|
17
|
+
llm_model_code_direct: "claude-3-7-sonnet-20250219",
|
|
18
|
+
llm_model_code_requesty: "anthropic/claude-3-7-sonnet-latest",
|
|
19
|
+
llm_model_code_openrouter: "anthropic/claude-3.7-sonnet",
|
|
20
|
+
context_window: 200_000,
|
|
21
|
+
cents_input: 300,
|
|
22
|
+
cents_output: 1500,
|
|
23
|
+
has_structured_json: true,
|
|
24
|
+
},
|
|
25
|
+
{
|
|
26
|
+
llm_model_name: "claude-sonnet-4",
|
|
27
|
+
llm_provider: "anthropic",
|
|
28
|
+
llm_model_code_direct: "claude-sonnet-4-0",
|
|
29
|
+
llm_model_code_requesty: "anthropic/claude-sonnet-4-20250514",
|
|
30
|
+
llm_model_code_openrouter: "anthropic/claude-sonnet-4",
|
|
31
|
+
context_window: 200_000,
|
|
32
|
+
cents_input: 300,
|
|
33
|
+
cents_output: 1500,
|
|
34
|
+
has_structured_json: true,
|
|
35
|
+
},
|
|
36
|
+
{
|
|
37
|
+
llm_model_name: "codex-mini",
|
|
38
|
+
llm_provider: "openai",
|
|
39
|
+
llm_model_code_direct: "codex-mini-latest",
|
|
40
|
+
llm_model_code_requesty: null,
|
|
41
|
+
llm_model_code_openrouter: "openai/codex-mini",
|
|
42
|
+
context_window: 200_000,
|
|
43
|
+
cents_input: 150,
|
|
44
|
+
cents_output: 600,
|
|
45
|
+
has_structured_json: true,
|
|
46
|
+
},
|
|
47
|
+
{
|
|
48
|
+
llm_model_name: "deepseek-v3",
|
|
49
|
+
llm_provider: "deepseek",
|
|
50
|
+
llm_model_code_direct: "deepseek-chat",
|
|
51
|
+
llm_model_code_requesty: "novita/deepseek/deepseek-v3-0324",
|
|
52
|
+
llm_model_code_openrouter: "deepseek/deepseek-chat-v3-0324",
|
|
53
|
+
context_window: 64_000,
|
|
54
|
+
cents_input: 27,
|
|
55
|
+
cents_output: 110,
|
|
56
|
+
has_structured_json: true,
|
|
57
|
+
},
|
|
58
|
+
{
|
|
59
|
+
llm_model_name: "deepseek-r1",
|
|
60
|
+
llm_provider: "deepseek",
|
|
61
|
+
llm_model_code_direct: "deepseek-reasoner",
|
|
62
|
+
llm_model_code_requesty: "netmind/deepseek-ai/DeepSeek-R1-0528",
|
|
63
|
+
llm_model_code_openrouter: "deepseek/deepseek-r1-0528",
|
|
64
|
+
context_window: 163_840,
|
|
65
|
+
cents_input: 55,
|
|
66
|
+
cents_output: 219,
|
|
67
|
+
has_structured_json: true,
|
|
68
|
+
},
|
|
69
|
+
{
|
|
70
|
+
llm_model_name: "devstral-small",
|
|
71
|
+
llm_provider: null,
|
|
72
|
+
llm_model_code_direct: null,
|
|
73
|
+
llm_model_code_requesty: "mistral/devstral-small-latest",
|
|
74
|
+
llm_model_code_openrouter: "mistralai/devstral-small",
|
|
75
|
+
context_window: 128_000,
|
|
76
|
+
cents_input: 7,
|
|
77
|
+
cents_output: 10,
|
|
78
|
+
has_structured_json: true,
|
|
79
|
+
},
|
|
80
|
+
{
|
|
81
|
+
llm_model_name: "ernie-4.5-300b",
|
|
82
|
+
llm_provider: null,
|
|
83
|
+
llm_model_code_direct: null,
|
|
84
|
+
llm_model_code_requesty: null,
|
|
85
|
+
llm_model_code_openrouter: "baidu/ernie-4.5-300b-a47b",
|
|
86
|
+
context_window: 123_000,
|
|
87
|
+
cents_input: 30,
|
|
88
|
+
cents_output: 100,
|
|
89
|
+
has_structured_json: true,
|
|
90
|
+
},
|
|
91
|
+
{
|
|
92
|
+
llm_model_name: "gemini-2.0-flash",
|
|
93
|
+
llm_provider: "google",
|
|
94
|
+
llm_model_code_direct: "gemini-2.0-flash",
|
|
95
|
+
llm_model_code_requesty: "google/gemini-2.0-flash-001",
|
|
96
|
+
llm_model_code_openrouter: "google/gemini-2.0-flash-001",
|
|
97
|
+
context_window: 1_048_576,
|
|
98
|
+
cents_input: 10,
|
|
99
|
+
cents_output: 40,
|
|
100
|
+
has_structured_json: true,
|
|
101
|
+
},
|
|
102
|
+
{
|
|
103
|
+
llm_model_name: "gemini-2.5-flash",
|
|
104
|
+
llm_provider: "google",
|
|
105
|
+
llm_model_code_direct: "gemini-2.5-flash",
|
|
106
|
+
llm_model_code_requesty: "google/gemini-2.5-flash",
|
|
107
|
+
llm_model_code_openrouter: "google/gemini-2.5-flash",
|
|
108
|
+
context_window: 1_048_576,
|
|
109
|
+
cents_input: 30,
|
|
110
|
+
cents_output: 250,
|
|
111
|
+
has_structured_json: true,
|
|
112
|
+
},
|
|
113
|
+
{
|
|
114
|
+
llm_model_name: "gemini-2.5-pro",
|
|
115
|
+
llm_provider: "google",
|
|
116
|
+
llm_model_code_direct: "gemini-2.5-pro",
|
|
117
|
+
llm_model_code_requesty: "google/gemini-2.5-pro",
|
|
118
|
+
llm_model_code_openrouter: "google/gemini-2.5-pro",
|
|
119
|
+
context_window: 1_048_576,
|
|
120
|
+
cents_input: 125,
|
|
121
|
+
cents_output: 1000,
|
|
122
|
+
has_structured_json: true,
|
|
123
|
+
},
|
|
124
|
+
{
|
|
125
|
+
llm_model_name: "glm-4-32b",
|
|
126
|
+
llm_provider: null,
|
|
127
|
+
llm_model_code_direct: null,
|
|
128
|
+
llm_model_code_requesty: null,
|
|
129
|
+
llm_model_code_openrouter: "thudm/glm-4-32b",
|
|
130
|
+
context_window: 32_000,
|
|
131
|
+
cents_input: 24,
|
|
132
|
+
cents_output: 24,
|
|
133
|
+
has_structured_json: false,
|
|
134
|
+
},
|
|
135
|
+
{
|
|
136
|
+
llm_model_name: "gpt-4.1",
|
|
137
|
+
llm_provider: "openai",
|
|
138
|
+
llm_model_code_direct: "gpt-4.1",
|
|
139
|
+
llm_model_code_requesty: "openai/gpt-4.1",
|
|
140
|
+
llm_model_code_openrouter: "openai/gpt-4.1",
|
|
141
|
+
context_window: 1_047_576,
|
|
142
|
+
cents_input: 200,
|
|
143
|
+
cents_output: 800,
|
|
144
|
+
has_structured_json: true,
|
|
145
|
+
},
|
|
146
|
+
{
|
|
147
|
+
llm_model_name: "gpt-4.1-mini",
|
|
148
|
+
llm_provider: "openai",
|
|
149
|
+
llm_model_code_direct: "gpt-4.1-mini",
|
|
150
|
+
llm_model_code_requesty: "openai/gpt-4.1-mini",
|
|
151
|
+
llm_model_code_openrouter: "openai/gpt-4.1-mini",
|
|
152
|
+
context_window: 1_047_576,
|
|
153
|
+
cents_input: 40,
|
|
154
|
+
cents_output: 160,
|
|
155
|
+
has_structured_json: true,
|
|
156
|
+
},
|
|
157
|
+
{
|
|
158
|
+
llm_model_name: "gpt-4.1-nano",
|
|
159
|
+
llm_provider: "openai",
|
|
160
|
+
llm_model_code_direct: "gpt-4.1-nano",
|
|
161
|
+
llm_model_code_requesty: "openai/gpt-4.1-nano",
|
|
162
|
+
llm_model_code_openrouter: "openai/gpt-4.1-nano",
|
|
163
|
+
context_window: 1_047_576,
|
|
164
|
+
cents_input: 10,
|
|
165
|
+
cents_output: 40,
|
|
166
|
+
has_structured_json: true,
|
|
167
|
+
},
|
|
168
|
+
{
|
|
169
|
+
llm_model_name: "gpt-4o",
|
|
170
|
+
llm_provider: "openai",
|
|
171
|
+
llm_model_code_direct: "gpt-4o-2024-11-20",
|
|
172
|
+
llm_model_code_requesty: "openai/gpt-4o-2024-11-20",
|
|
173
|
+
llm_model_code_openrouter: "openai/gpt-4o-2024-11-20",
|
|
174
|
+
context_window: 128_000,
|
|
175
|
+
cents_input: 250,
|
|
176
|
+
cents_output: 1000,
|
|
177
|
+
has_structured_json: true,
|
|
178
|
+
},
|
|
179
|
+
{
|
|
180
|
+
llm_model_name: "gpt-4o-mini",
|
|
181
|
+
llm_provider: "openai",
|
|
182
|
+
llm_model_code_direct: "gpt-4o-mini",
|
|
183
|
+
llm_model_code_requesty: "openai/gpt-4o-mini-2024-07-18",
|
|
184
|
+
llm_model_code_openrouter: "openai/gpt-4o-mini-2024-07-18",
|
|
185
|
+
context_window: 128_000,
|
|
186
|
+
cents_input: 15,
|
|
187
|
+
cents_output: 60,
|
|
188
|
+
has_structured_json: true,
|
|
189
|
+
},
|
|
190
|
+
{
|
|
191
|
+
llm_model_name: "grok-3",
|
|
192
|
+
llm_provider: null,
|
|
193
|
+
llm_model_code_direct: "grok-3",
|
|
194
|
+
llm_model_code_requesty: "xai/grok-3-beta",
|
|
195
|
+
llm_model_code_openrouter: "x-ai/grok-3-beta",
|
|
196
|
+
context_window: 131_072,
|
|
197
|
+
cents_input: 300,
|
|
198
|
+
cents_output: 1500,
|
|
199
|
+
has_structured_json: true,
|
|
200
|
+
},
|
|
201
|
+
{
|
|
202
|
+
llm_model_name: "grok-3-mini",
|
|
203
|
+
llm_provider: null,
|
|
204
|
+
llm_model_code_direct: "grok-3-mini",
|
|
205
|
+
llm_model_code_requesty: "xai/grok-3-mini-beta",
|
|
206
|
+
llm_model_code_openrouter: "x-ai/grok-3-mini-beta",
|
|
207
|
+
context_window: 131_072,
|
|
208
|
+
cents_input: 30,
|
|
209
|
+
cents_output: 50,
|
|
210
|
+
has_structured_json: true,
|
|
211
|
+
},
|
|
212
|
+
{
|
|
213
|
+
llm_model_name: "llama-4-maverick",
|
|
214
|
+
llm_provider: null,
|
|
215
|
+
llm_model_code_direct: null,
|
|
216
|
+
llm_model_code_requesty: "parasail/meta-llama/Llama-4-Maverick-17B-128E-Instruct-FP8",
|
|
217
|
+
llm_model_code_openrouter: "meta-llama/llama-4-maverick",
|
|
218
|
+
context_window: 1_048_576,
|
|
219
|
+
cents_input: 21,
|
|
220
|
+
cents_output: 85,
|
|
221
|
+
has_structured_json: true,
|
|
222
|
+
},
|
|
223
|
+
{
|
|
224
|
+
llm_model_name: "llama-4-scout",
|
|
225
|
+
llm_provider: null,
|
|
226
|
+
llm_model_code_direct: null,
|
|
227
|
+
llm_model_code_requesty: "parasail/meta-llama/Llama-4-Scout-17B-16E-Instruct",
|
|
228
|
+
llm_model_code_openrouter: "meta-llama/llama-4-scout",
|
|
229
|
+
context_window: 1_048_576,
|
|
230
|
+
cents_input: 14,
|
|
231
|
+
cents_output: 58,
|
|
232
|
+
has_structured_json: true,
|
|
233
|
+
},
|
|
234
|
+
{
|
|
235
|
+
llm_model_name: "mercury-coder-small",
|
|
236
|
+
llm_provider: null,
|
|
237
|
+
llm_model_code_direct: null,
|
|
238
|
+
llm_model_code_requesty: null,
|
|
239
|
+
llm_model_code_openrouter: "inception/mercury-coder-small-beta",
|
|
240
|
+
context_window: 32_000,
|
|
241
|
+
cents_input: 25,
|
|
242
|
+
cents_output: 100,
|
|
243
|
+
has_structured_json: false,
|
|
244
|
+
},
|
|
245
|
+
{
|
|
246
|
+
llm_model_name: "mistral-medium-3",
|
|
247
|
+
llm_provider: null,
|
|
248
|
+
llm_model_code_direct: null,
|
|
249
|
+
llm_model_code_requesty: null,
|
|
250
|
+
llm_model_code_openrouter: "mistralai/mistral-medium-3",
|
|
251
|
+
context_window: 131_072,
|
|
252
|
+
cents_input: 40,
|
|
253
|
+
cents_output: 200,
|
|
254
|
+
has_structured_json: true,
|
|
255
|
+
},
|
|
256
|
+
{
|
|
257
|
+
llm_model_name: "o3",
|
|
258
|
+
llm_provider: "openai",
|
|
259
|
+
llm_model_code_direct: "o3-2025-04-16",
|
|
260
|
+
llm_model_code_requesty: "openai/o3-2025-04-16",
|
|
261
|
+
llm_model_code_openrouter: "openai/o3-2025-04-16",
|
|
262
|
+
context_window: 200_000,
|
|
263
|
+
cents_input: 200,
|
|
264
|
+
cents_output: 800,
|
|
265
|
+
has_structured_json: true,
|
|
266
|
+
},
|
|
267
|
+
{
|
|
268
|
+
llm_model_name: "o3-pro",
|
|
269
|
+
llm_provider: "openai",
|
|
270
|
+
llm_model_code_direct: "o3-pro",
|
|
271
|
+
llm_model_code_requesty: "openai/o3-pro",
|
|
272
|
+
llm_model_code_openrouter: "openai/o3-pro",
|
|
273
|
+
context_window: 200_000,
|
|
274
|
+
cents_input: 2000,
|
|
275
|
+
cents_output: 8000,
|
|
276
|
+
has_structured_json: true,
|
|
277
|
+
},
|
|
278
|
+
{
|
|
279
|
+
llm_model_name: "o4-mini",
|
|
280
|
+
llm_provider: "openai",
|
|
281
|
+
llm_model_code_direct: "o4-mini-2025-04-16",
|
|
282
|
+
llm_model_code_requesty: "openai/o4-mini-2025-04-16",
|
|
283
|
+
llm_model_code_openrouter: "openai/o4-mini-2025-04-16",
|
|
284
|
+
context_window: 200_000,
|
|
285
|
+
cents_input: 110,
|
|
286
|
+
cents_output: 440,
|
|
287
|
+
has_structured_json: true,
|
|
288
|
+
},
|
|
289
|
+
{
|
|
290
|
+
llm_model_name: "qwen3-30b-a3b",
|
|
291
|
+
llm_provider: null,
|
|
292
|
+
llm_model_code_direct: null,
|
|
293
|
+
llm_model_code_requesty: null,
|
|
294
|
+
llm_model_code_openrouter: "qwen/qwen3-30b-a3b",
|
|
295
|
+
context_window: 40_000,
|
|
296
|
+
cents_input: 8,
|
|
297
|
+
cents_output: 29,
|
|
298
|
+
has_structured_json: true,
|
|
299
|
+
},
|
|
300
|
+
{
|
|
301
|
+
llm_model_name: "qwen3-32b",
|
|
302
|
+
llm_provider: null,
|
|
303
|
+
llm_model_code_direct: null,
|
|
304
|
+
llm_model_code_requesty: "deepinfra/Qwen/Qwen3-32B",
|
|
305
|
+
llm_model_code_openrouter: "qwen/qwen3-32b",
|
|
306
|
+
context_window: 40_000,
|
|
307
|
+
cents_input: 10,
|
|
308
|
+
cents_output: 30,
|
|
309
|
+
has_structured_json: true,
|
|
310
|
+
},
|
|
311
|
+
{
|
|
312
|
+
llm_model_name: "qwen3-235b-a22b",
|
|
313
|
+
llm_provider: null,
|
|
314
|
+
llm_model_code_direct: null,
|
|
315
|
+
llm_model_code_requesty: "deepinfra/Qwen/Qwen3-235B-A22B",
|
|
316
|
+
llm_model_code_openrouter: "qwen/qwen3-235b-a22b",
|
|
317
|
+
context_window: 40_000,
|
|
318
|
+
cents_input: 20,
|
|
319
|
+
cents_output: 60,
|
|
320
|
+
has_structured_json: true,
|
|
321
|
+
},
|
|
322
|
+
];
|
|
323
|
+
export function llm_model_get_details({ llm_model_names, }) {
|
|
324
|
+
return LLM_MODEL_DETAILS.filter((detail) => llm_model_names.includes(detail.llm_model_name));
|
|
325
|
+
}
|
|
326
|
+
export function llm_model_get_choices(llm_model_details) {
|
|
327
|
+
return llm_model_details.map((model) => model.llm_model_name);
|
|
328
|
+
}
|
|
329
|
+
export function llm_model_find_detail({ llm_model_details, llm_model_name, }) {
|
|
330
|
+
for (const detail of llm_model_details) {
|
|
331
|
+
if (detail.llm_model_name === llm_model_name) {
|
|
332
|
+
return detail;
|
|
333
|
+
}
|
|
334
|
+
}
|
|
335
|
+
abort_with_error(`Unknown model: ${llm_model_name}`);
|
|
336
|
+
}
|
|
@@ -0,0 +1,63 @@
|
|
|
1
|
+
import { createAnthropic } from "@ai-sdk/anthropic";
|
|
2
|
+
import { createDeepSeek } from "@ai-sdk/deepseek";
|
|
3
|
+
import { createGoogleGenerativeAI } from "@ai-sdk/google";
|
|
4
|
+
import { createOpenAI } from "@ai-sdk/openai";
|
|
5
|
+
import { createOpenRouter } from "@openrouter/ai-sdk-provider";
|
|
6
|
+
import { createRequesty } from "@requesty/ai-sdk";
|
|
7
|
+
import { abort_with_error } from "./lib_abort.js";
|
|
8
|
+
import { env_get } from "./lib_env.js";
|
|
9
|
+
export function llm_provider_get_via(llm_provider) {
|
|
10
|
+
switch (llm_provider) {
|
|
11
|
+
case "anthropic":
|
|
12
|
+
case "deepseek":
|
|
13
|
+
case "google":
|
|
14
|
+
case "openai":
|
|
15
|
+
return "direct";
|
|
16
|
+
case "requesty":
|
|
17
|
+
return "via Requesty";
|
|
18
|
+
case "openrouter":
|
|
19
|
+
return "via OpenRouter";
|
|
20
|
+
default:
|
|
21
|
+
abort_with_error("Unknown LLM provider");
|
|
22
|
+
}
|
|
23
|
+
}
|
|
24
|
+
export function llm_provider_get_api_key_env(llm_provider) {
|
|
25
|
+
switch (llm_provider) {
|
|
26
|
+
case "anthropic":
|
|
27
|
+
return "ANTHROPIC_API_KEY";
|
|
28
|
+
case "deepseek":
|
|
29
|
+
return "DEEPSEEK_API_KEY";
|
|
30
|
+
case "google":
|
|
31
|
+
return "GEMINI_API_KEY";
|
|
32
|
+
case "openai":
|
|
33
|
+
return "OPENAI_API_KEY";
|
|
34
|
+
case "requesty":
|
|
35
|
+
return "REQUESTY_API_KEY";
|
|
36
|
+
case "openrouter":
|
|
37
|
+
return "OPENROUTER_API_KEY";
|
|
38
|
+
default:
|
|
39
|
+
abort_with_error("Unknown LLM provider");
|
|
40
|
+
}
|
|
41
|
+
}
|
|
42
|
+
export function llm_provider_get_api_key(llm_provider) {
|
|
43
|
+
const env = llm_provider_get_api_key_env(llm_provider);
|
|
44
|
+
return env_get(env);
|
|
45
|
+
}
|
|
46
|
+
export function llm_provider_get_ai_sdk_language_model({ llm_model_code, llm_provider, llm_api_key, }) {
|
|
47
|
+
switch (llm_provider) {
|
|
48
|
+
case "anthropic":
|
|
49
|
+
return createAnthropic({ apiKey: llm_api_key })(llm_model_code);
|
|
50
|
+
case "deepseek":
|
|
51
|
+
return createDeepSeek({ apiKey: llm_api_key })(llm_model_code);
|
|
52
|
+
case "google":
|
|
53
|
+
return createGoogleGenerativeAI({ apiKey: llm_api_key })(llm_model_code);
|
|
54
|
+
case "openai":
|
|
55
|
+
return createOpenAI({ apiKey: llm_api_key })(llm_model_code);
|
|
56
|
+
case "requesty":
|
|
57
|
+
return createRequesty({ apiKey: llm_api_key })(llm_model_code);
|
|
58
|
+
case "openrouter":
|
|
59
|
+
return createOpenRouter({ apiKey: llm_api_key })(llm_model_code);
|
|
60
|
+
default:
|
|
61
|
+
abort_with_error("Unknown LLM provider");
|
|
62
|
+
}
|
|
63
|
+
}
|
|
@@ -0,0 +1,14 @@
|
|
|
1
|
+
import { debug_channels } from "./lib_debug.js";
|
|
2
|
+
import { tell_debug } from "./lib_tell.js";
|
|
3
|
+
export function llm_tokens_count_estimated({ text }) {
|
|
4
|
+
return Math.round(text.length / 3);
|
|
5
|
+
}
|
|
6
|
+
export function llm_tokens_debug_usage({ name, llm_config, text, }) {
|
|
7
|
+
if (debug_channels.llm_tokens) {
|
|
8
|
+
const { llm_model_name } = llm_config;
|
|
9
|
+
const length = text.length;
|
|
10
|
+
const tokens = llm_tokens_count_estimated({ llm_config, text });
|
|
11
|
+
const ratio = Math.round((length / tokens) * 100) / 100;
|
|
12
|
+
tell_debug(`${name}: length=${length}, tokens=${tokens}, ratio=${ratio}, model=${llm_model_name}`);
|
|
13
|
+
}
|
|
14
|
+
}
|
|
@@ -0,0 +1,7 @@
|
|
|
1
|
+
import package_json from "../package.json" with { type: "json" };
|
|
2
|
+
import { EMPTY } from "./lib_char_empty.js";
|
|
3
|
+
function remove_npmjs_scope(name) {
|
|
4
|
+
return name.replace(/^@[^/]+\//, EMPTY);
|
|
5
|
+
}
|
|
6
|
+
export const PACKAGE_NAME = remove_npmjs_scope(package_json.name);
|
|
7
|
+
export const PACKAGE_VERSION = package_json.version;
|
|
@@ -0,0 +1,13 @@
|
|
|
1
|
+
import { EMPTY } from "./lib_char_empty.js";
|
|
2
|
+
export function parse_int(input) {
|
|
3
|
+
return Number.parseInt(input, 10);
|
|
4
|
+
}
|
|
5
|
+
export function parse_float(input) {
|
|
6
|
+
return Number.parseFloat(input);
|
|
7
|
+
}
|
|
8
|
+
export function parse_int_or_undefined(input) {
|
|
9
|
+
return input === undefined || input === EMPTY ? undefined : parse_int(input);
|
|
10
|
+
}
|
|
11
|
+
export function parse_float_or_undefined(input) {
|
|
12
|
+
return input === undefined || input === EMPTY ? undefined : parse_float(input);
|
|
13
|
+
}
|
|
@@ -0,0 +1,14 @@
|
|
|
1
|
+
import { writeSync } from "node:fs";
|
|
2
|
+
import { LF } from "./lib_char_control.js";
|
|
3
|
+
export function stdio_write_stdout(message) {
|
|
4
|
+
writeSync(1, message);
|
|
5
|
+
}
|
|
6
|
+
export function stdio_write_stdout_linefeed(message) {
|
|
7
|
+
stdio_write_stdout(message + LF);
|
|
8
|
+
}
|
|
9
|
+
export function stdio_write_stderr(message) {
|
|
10
|
+
writeSync(2, message);
|
|
11
|
+
}
|
|
12
|
+
export function stdio_write_stderr_linefeed(message) {
|
|
13
|
+
stdio_write_stderr(message + LF);
|
|
14
|
+
}
|
|
@@ -0,0 +1 @@
|
|
|
1
|
+
export {};
|
|
@@ -0,0 +1,58 @@
|
|
|
1
|
+
import { ansi_cyan, ansi_green, ansi_grey, ansi_magenta, ansi_normal, ansi_red, ansi_yellow } from "./lib_ansi.js";
|
|
2
|
+
import { LF } from "./lib_char_control.js";
|
|
3
|
+
import { EMPTY } from "./lib_char_empty.js";
|
|
4
|
+
import { SPACE } from "./lib_char_punctuation.js";
|
|
5
|
+
import { datetime_format_local_iso_ymdthms, datetime_now } from "./lib_datetime.js";
|
|
6
|
+
import { enabled_from_env } from "./lib_enabled.js";
|
|
7
|
+
import { stdio_write_stderr_linefeed } from "./lib_stdio_write.js";
|
|
8
|
+
export const tell_enables = {
|
|
9
|
+
timestamp: enabled_from_env("TELL_TIMESTAMP"),
|
|
10
|
+
okay: enabled_from_env("TELL_OKAY", { default: true }),
|
|
11
|
+
};
|
|
12
|
+
function tell_generic({ message, colourizer }) {
|
|
13
|
+
while (message.endsWith(LF)) {
|
|
14
|
+
message = message.slice(0, -1);
|
|
15
|
+
}
|
|
16
|
+
let text = EMPTY;
|
|
17
|
+
if (tell_enables.timestamp) {
|
|
18
|
+
const now_local_ymdthms = datetime_format_local_iso_ymdthms(datetime_now());
|
|
19
|
+
text += ansi_grey(now_local_ymdthms);
|
|
20
|
+
text += SPACE;
|
|
21
|
+
}
|
|
22
|
+
if (colourizer) {
|
|
23
|
+
text += colourizer(message);
|
|
24
|
+
}
|
|
25
|
+
stdio_write_stderr_linefeed(text);
|
|
26
|
+
}
|
|
27
|
+
export function tell_nowhere(_message) {
|
|
28
|
+
// intentionally empty
|
|
29
|
+
}
|
|
30
|
+
export function tell_plain(message) {
|
|
31
|
+
tell_generic({ message, colourizer: ansi_normal });
|
|
32
|
+
}
|
|
33
|
+
export function tell_error(message) {
|
|
34
|
+
tell_generic({ message, colourizer: ansi_red });
|
|
35
|
+
}
|
|
36
|
+
export function tell_warning(message) {
|
|
37
|
+
tell_generic({ message, colourizer: ansi_yellow });
|
|
38
|
+
}
|
|
39
|
+
export function tell_success(message) {
|
|
40
|
+
tell_generic({ message, colourizer: ansi_green });
|
|
41
|
+
}
|
|
42
|
+
export function tell_info(message) {
|
|
43
|
+
tell_generic({ message, colourizer: ansi_cyan });
|
|
44
|
+
}
|
|
45
|
+
export function tell_action(message) {
|
|
46
|
+
tell_generic({ message, colourizer: ansi_magenta });
|
|
47
|
+
}
|
|
48
|
+
export function tell_debug(message) {
|
|
49
|
+
tell_generic({ message, colourizer: ansi_grey });
|
|
50
|
+
}
|
|
51
|
+
export function tell_blank() {
|
|
52
|
+
tell_plain(EMPTY);
|
|
53
|
+
}
|
|
54
|
+
export function tell_okay() {
|
|
55
|
+
if (tell_enables.okay) {
|
|
56
|
+
tell_success("Okay");
|
|
57
|
+
}
|
|
58
|
+
}
|
|
@@ -0,0 +1,10 @@
|
|
|
1
|
+
import { BOX_DRAWINGS_LIGHT_HORIZONTAL } from "./lib_char_box.js";
|
|
2
|
+
import { SPACE } from "./lib_char_punctuation.js";
|
|
3
|
+
import { tui_justify_centre } from "./lib_tui_justify.js";
|
|
4
|
+
export function tui_block_string({ teller, content, title, pad_char = BOX_DRAWINGS_LIGHT_HORIZONTAL, width = 120, }) {
|
|
5
|
+
const separator = pad_char.repeat(width);
|
|
6
|
+
const top_line = title ? tui_justify_centre({ line: SPACE + title + SPACE, width, pad_char }) : separator;
|
|
7
|
+
teller(top_line);
|
|
8
|
+
teller(content);
|
|
9
|
+
teller(separator);
|
|
10
|
+
}
|
|
@@ -0,0 +1,29 @@
|
|
|
1
|
+
import { DIGIT_0 } from "./lib_char_digit.js";
|
|
2
|
+
import { SPACE } from "./lib_char_punctuation.js";
|
|
3
|
+
import { tui_truncate_ellipsis, tui_truncate_plain } from "./lib_tui_truncate.js";
|
|
4
|
+
export function tui_justify_left(n, s, ellipsis = false, truncate = false) {
|
|
5
|
+
let justified = s;
|
|
6
|
+
if (ellipsis) {
|
|
7
|
+
justified = tui_truncate_ellipsis(n, justified);
|
|
8
|
+
}
|
|
9
|
+
if (truncate) {
|
|
10
|
+
justified = tui_truncate_plain(n, justified);
|
|
11
|
+
}
|
|
12
|
+
justified = justified.padEnd(n);
|
|
13
|
+
return justified;
|
|
14
|
+
}
|
|
15
|
+
export function tui_justify_right(n, s) {
|
|
16
|
+
return s.padStart(n);
|
|
17
|
+
}
|
|
18
|
+
export function tui_justify_centre({ line, width, pad_char = SPACE, }) {
|
|
19
|
+
if (line.length >= width) {
|
|
20
|
+
return line;
|
|
21
|
+
}
|
|
22
|
+
const total_pad = width - line.length;
|
|
23
|
+
const left_pad = Math.floor(total_pad / 2);
|
|
24
|
+
const right_pad = total_pad - left_pad;
|
|
25
|
+
return pad_char.repeat(left_pad) + line + pad_char.repeat(right_pad);
|
|
26
|
+
}
|
|
27
|
+
export function tui_justify_zero(n, s) {
|
|
28
|
+
return s.padStart(n, DIGIT_0);
|
|
29
|
+
}
|