@poolzin/pool-bot 2026.3.23 → 2026.3.24

This diff represents the content of publicly available package versions that have been released to one of the supported registries. The information contained in this diff is provided for informational purposes only and reflects changes between package versions as they appear in their respective public registries.
Files changed (38) hide show
  1. package/CHANGELOG.md +57 -0
  2. package/dist/.buildstamp +1 -1
  3. package/dist/acp/policy.js +52 -0
  4. package/dist/agents/btw.js +280 -0
  5. package/dist/agents/fast-mode.js +24 -0
  6. package/dist/agents/live-model-errors.js +23 -0
  7. package/dist/agents/model-auth-env-vars.js +44 -0
  8. package/dist/agents/model-auth-markers.js +69 -0
  9. package/dist/agents/models-config.providers.discovery.js +180 -0
  10. package/dist/agents/models-config.providers.static.js +480 -0
  11. package/dist/auto-reply/reply/typing-policy.js +15 -0
  12. package/dist/build-info.json +3 -3
  13. package/dist/channels/account-snapshot-fields.js +176 -0
  14. package/dist/channels/draft-stream-controls.js +89 -0
  15. package/dist/channels/inbound-debounce-policy.js +28 -0
  16. package/dist/channels/typing-lifecycle.js +39 -0
  17. package/dist/cli/program/command-registry.js +52 -0
  18. package/dist/commands/agent-binding.js +123 -0
  19. package/dist/commands/agents.commands.bind.js +280 -0
  20. package/dist/commands/backup-shared.js +186 -0
  21. package/dist/commands/backup-verify.js +236 -0
  22. package/dist/commands/backup.js +166 -0
  23. package/dist/commands/channel-account-context.js +15 -0
  24. package/dist/commands/channel-account.js +190 -0
  25. package/dist/commands/gateway-install-token.js +117 -0
  26. package/dist/commands/oauth-tls-preflight.js +121 -0
  27. package/dist/commands/ollama-setup.js +402 -0
  28. package/dist/commands/self-hosted-provider-setup.js +207 -0
  29. package/dist/commands/session-store-targets.js +12 -0
  30. package/dist/commands/sessions-cleanup.js +97 -0
  31. package/dist/cron/heartbeat-policy.js +26 -0
  32. package/dist/gateway/hooks-mapping.js +46 -7
  33. package/dist/hooks/module-loader.js +28 -0
  34. package/dist/infra/agent-command-binding.js +144 -0
  35. package/dist/infra/backup.js +328 -0
  36. package/dist/infra/channel-account-context.js +173 -0
  37. package/dist/infra/session-cleanup.js +143 -0
  38. package/package.json +1 -1
@@ -0,0 +1,180 @@
1
+ import { createSubsystemLogger } from "../logging/subsystem.js";
2
+ import { KILOCODE_BASE_URL } from "../providers/kilocode-shared.js";
3
+ import { discoverHuggingfaceModels, HUGGINGFACE_BASE_URL, HUGGINGFACE_MODEL_CATALOG, buildHuggingfaceModelDefinition, } from "./huggingface-models.js";
4
+ import { discoverKilocodeModels } from "./kilocode-models.js";
5
+ import { enrichOllamaModelsWithContext, OLLAMA_DEFAULT_CONTEXT_WINDOW, OLLAMA_DEFAULT_COST, OLLAMA_DEFAULT_MAX_TOKENS, isReasoningModelHeuristic, resolveOllamaApiBase, } from "./ollama-models.js";
6
+ import { discoverVeniceModels, VENICE_BASE_URL } from "./venice-models.js";
7
+ import { discoverVercelAiGatewayModels, VERCEL_AI_GATEWAY_BASE_URL } from "./vercel-ai-gateway.js";
8
+ export { resolveOllamaApiBase } from "./ollama-models.js";
9
+ const log = createSubsystemLogger("agents/model-providers");
10
+ const OLLAMA_SHOW_CONCURRENCY = 8;
11
+ const OLLAMA_SHOW_MAX_MODELS = 200;
12
+ const OPENAI_COMPAT_LOCAL_DEFAULT_CONTEXT_WINDOW = 128000;
13
+ const OPENAI_COMPAT_LOCAL_DEFAULT_MAX_TOKENS = 8192;
14
+ const OPENAI_COMPAT_LOCAL_DEFAULT_COST = {
15
+ input: 0,
16
+ output: 0,
17
+ cacheRead: 0,
18
+ cacheWrite: 0,
19
+ };
20
+ const SGLANG_BASE_URL = "http://127.0.0.1:30000/v1";
21
+ const VLLM_BASE_URL = "http://127.0.0.1:8000/v1";
22
+ async function discoverOllamaModels(baseUrl, opts) {
23
+ if (process.env.VITEST || process.env.NODE_ENV === "test") {
24
+ return [];
25
+ }
26
+ try {
27
+ const apiBase = resolveOllamaApiBase(baseUrl);
28
+ const response = await fetch(`${apiBase}/api/tags`, {
29
+ signal: AbortSignal.timeout(5000),
30
+ });
31
+ if (!response.ok) {
32
+ if (!opts?.quiet) {
33
+ log.warn(`Failed to discover Ollama models: ${response.status}`);
34
+ }
35
+ return [];
36
+ }
37
+ const data = (await response.json());
38
+ if (!data.models || data.models.length === 0) {
39
+ log.debug("No Ollama models found on local instance");
40
+ return [];
41
+ }
42
+ const modelsToInspect = data.models.slice(0, OLLAMA_SHOW_MAX_MODELS);
43
+ if (modelsToInspect.length < data.models.length && !opts?.quiet) {
44
+ log.warn(`Capping Ollama /api/show inspection to ${OLLAMA_SHOW_MAX_MODELS} models (received ${data.models.length})`);
45
+ }
46
+ const discovered = await enrichOllamaModelsWithContext(apiBase, modelsToInspect, {
47
+ concurrency: OLLAMA_SHOW_CONCURRENCY,
48
+ });
49
+ return discovered.map((model) => ({
50
+ id: model.name,
51
+ name: model.name,
52
+ reasoning: isReasoningModelHeuristic(model.name),
53
+ input: ["text"],
54
+ cost: OLLAMA_DEFAULT_COST,
55
+ contextWindow: model.contextWindow ?? OLLAMA_DEFAULT_CONTEXT_WINDOW,
56
+ maxTokens: OLLAMA_DEFAULT_MAX_TOKENS,
57
+ }));
58
+ }
59
+ catch (error) {
60
+ if (!opts?.quiet) {
61
+ log.warn(`Failed to discover Ollama models: ${String(error)}`);
62
+ }
63
+ return [];
64
+ }
65
+ }
66
+ async function discoverOpenAICompatibleLocalModels(params) {
67
+ if (process.env.VITEST || process.env.NODE_ENV === "test") {
68
+ return [];
69
+ }
70
+ const trimmedBaseUrl = params.baseUrl.trim().replace(/\/+$/, "");
71
+ const url = `${trimmedBaseUrl}/models`;
72
+ try {
73
+ const trimmedApiKey = params.apiKey?.trim();
74
+ const response = await fetch(url, {
75
+ headers: trimmedApiKey ? { Authorization: `Bearer ${trimmedApiKey}` } : undefined,
76
+ signal: AbortSignal.timeout(5000),
77
+ });
78
+ if (!response.ok) {
79
+ log.warn(`Failed to discover ${params.label} models: ${response.status}`);
80
+ return [];
81
+ }
82
+ const data = (await response.json());
83
+ const models = data.data ?? [];
84
+ if (models.length === 0) {
85
+ log.warn(`No ${params.label} models found on local instance`);
86
+ return [];
87
+ }
88
+ return models
89
+ .map((model) => ({ id: typeof model.id === "string" ? model.id.trim() : "" }))
90
+ .filter((model) => Boolean(model.id))
91
+ .map((model) => {
92
+ const modelId = model.id;
93
+ return {
94
+ id: modelId,
95
+ name: modelId,
96
+ reasoning: isReasoningModelHeuristic(modelId),
97
+ input: ["text"],
98
+ cost: OPENAI_COMPAT_LOCAL_DEFAULT_COST,
99
+ contextWindow: params.contextWindow ?? OPENAI_COMPAT_LOCAL_DEFAULT_CONTEXT_WINDOW,
100
+ maxTokens: params.maxTokens ?? OPENAI_COMPAT_LOCAL_DEFAULT_MAX_TOKENS,
101
+ };
102
+ });
103
+ }
104
+ catch (error) {
105
+ log.warn(`Failed to discover ${params.label} models: ${String(error)}`);
106
+ return [];
107
+ }
108
+ }
109
+ export async function buildVeniceProvider() {
110
+ const models = await discoverVeniceModels();
111
+ return {
112
+ baseUrl: VENICE_BASE_URL,
113
+ api: "openai-completions",
114
+ models,
115
+ };
116
+ }
117
+ export async function buildOllamaProvider(configuredBaseUrl, opts) {
118
+ const models = await discoverOllamaModels(configuredBaseUrl, opts);
119
+ return {
120
+ baseUrl: resolveOllamaApiBase(configuredBaseUrl),
121
+ api: "ollama",
122
+ models,
123
+ };
124
+ }
125
+ export async function buildHuggingfaceProvider(discoveryApiKey) {
126
+ const resolvedSecret = discoveryApiKey?.trim() ?? "";
127
+ const models = resolvedSecret !== ""
128
+ ? await discoverHuggingfaceModels(resolvedSecret)
129
+ : HUGGINGFACE_MODEL_CATALOG.map(buildHuggingfaceModelDefinition);
130
+ return {
131
+ baseUrl: HUGGINGFACE_BASE_URL,
132
+ api: "openai-completions",
133
+ models,
134
+ };
135
+ }
136
+ export async function buildVercelAiGatewayProvider() {
137
+ return {
138
+ baseUrl: VERCEL_AI_GATEWAY_BASE_URL,
139
+ api: "anthropic-messages",
140
+ models: await discoverVercelAiGatewayModels(),
141
+ };
142
+ }
143
+ export async function buildVllmProvider(params) {
144
+ const baseUrl = (params?.baseUrl?.trim() || VLLM_BASE_URL).replace(/\/+$/, "");
145
+ const models = await discoverOpenAICompatibleLocalModels({
146
+ baseUrl,
147
+ apiKey: params?.apiKey,
148
+ label: "vLLM",
149
+ });
150
+ return {
151
+ baseUrl,
152
+ api: "openai-completions",
153
+ models,
154
+ };
155
+ }
156
+ export async function buildSglangProvider(params) {
157
+ const baseUrl = (params?.baseUrl?.trim() || SGLANG_BASE_URL).replace(/\/+$/, "");
158
+ const models = await discoverOpenAICompatibleLocalModels({
159
+ baseUrl,
160
+ apiKey: params?.apiKey,
161
+ label: "SGLang",
162
+ });
163
+ return {
164
+ baseUrl,
165
+ api: "openai-completions",
166
+ models,
167
+ };
168
+ }
169
+ /**
170
+ * Build the Kilocode provider with dynamic model discovery from the gateway
171
+ * API. Falls back to the static catalog on failure.
172
+ */
173
+ export async function buildKilocodeProviderWithDiscovery() {
174
+ const models = await discoverKilocodeModels();
175
+ return {
176
+ baseUrl: KILOCODE_BASE_URL,
177
+ api: "openai-completions",
178
+ models,
179
+ };
180
+ }
@@ -0,0 +1,480 @@
1
+ import { KILOCODE_BASE_URL, KILOCODE_DEFAULT_CONTEXT_WINDOW, KILOCODE_DEFAULT_COST, KILOCODE_DEFAULT_MAX_TOKENS, KILOCODE_MODEL_CATALOG, } from "../providers/kilocode-shared.js";
2
+ import { buildBytePlusModelDefinition, BYTEPLUS_BASE_URL, BYTEPLUS_MODEL_CATALOG, BYTEPLUS_CODING_BASE_URL, BYTEPLUS_CODING_MODEL_CATALOG, } from "./byteplus-models.js";
3
+ import { buildDoubaoModelDefinition, DOUBAO_BASE_URL, DOUBAO_MODEL_CATALOG, DOUBAO_CODING_BASE_URL, DOUBAO_CODING_MODEL_CATALOG, } from "./doubao-models.js";
4
+ import { buildSyntheticModelDefinition, SYNTHETIC_BASE_URL, SYNTHETIC_MODEL_CATALOG, } from "./synthetic-models.js";
5
+ import { TOGETHER_BASE_URL, TOGETHER_MODEL_CATALOG, buildTogetherModelDefinition, } from "./together-models.js";
6
+ const MINIMAX_PORTAL_BASE_URL = "https://api.minimax.io/anthropic";
7
+ const MINIMAX_DEFAULT_MODEL_ID = "MiniMax-M2.5";
8
+ const MINIMAX_DEFAULT_VISION_MODEL_ID = "MiniMax-VL-01";
9
+ const MINIMAX_DEFAULT_CONTEXT_WINDOW = 200000;
10
+ const MINIMAX_DEFAULT_MAX_TOKENS = 8192;
11
+ const MINIMAX_API_COST = {
12
+ input: 0.3,
13
+ output: 1.2,
14
+ cacheRead: 0.03,
15
+ cacheWrite: 0.12,
16
+ };
17
+ function buildMinimaxModel(params) {
18
+ return {
19
+ id: params.id,
20
+ name: params.name,
21
+ reasoning: params.reasoning,
22
+ input: params.input,
23
+ cost: MINIMAX_API_COST,
24
+ contextWindow: MINIMAX_DEFAULT_CONTEXT_WINDOW,
25
+ maxTokens: MINIMAX_DEFAULT_MAX_TOKENS,
26
+ };
27
+ }
28
+ function buildMinimaxTextModel(params) {
29
+ return buildMinimaxModel({ ...params, input: ["text"] });
30
+ }
31
+ const XIAOMI_BASE_URL = "https://api.xiaomimimo.com/anthropic";
32
+ export const XIAOMI_DEFAULT_MODEL_ID = "mimo-v2-flash";
33
+ const XIAOMI_DEFAULT_CONTEXT_WINDOW = 262144;
34
+ const XIAOMI_DEFAULT_MAX_TOKENS = 8192;
35
+ const XIAOMI_DEFAULT_COST = {
36
+ input: 0,
37
+ output: 0,
38
+ cacheRead: 0,
39
+ cacheWrite: 0,
40
+ };
41
+ const MOONSHOT_BASE_URL = "https://api.moonshot.ai/v1";
42
+ const MOONSHOT_DEFAULT_MODEL_ID = "kimi-k2.5";
43
+ const MOONSHOT_DEFAULT_CONTEXT_WINDOW = 256000;
44
+ const MOONSHOT_DEFAULT_MAX_TOKENS = 8192;
45
+ const MOONSHOT_DEFAULT_COST = {
46
+ input: 0,
47
+ output: 0,
48
+ cacheRead: 0,
49
+ cacheWrite: 0,
50
+ };
51
+ const KIMI_CODING_BASE_URL = "https://api.kimi.com/coding/";
52
+ const KIMI_CODING_USER_AGENT = "claude-code/0.1.0";
53
+ const KIMI_CODING_DEFAULT_MODEL_ID = "k2p5";
54
+ const KIMI_CODING_DEFAULT_CONTEXT_WINDOW = 262144;
55
+ const KIMI_CODING_DEFAULT_MAX_TOKENS = 32768;
56
+ const KIMI_CODING_DEFAULT_COST = {
57
+ input: 0,
58
+ output: 0,
59
+ cacheRead: 0,
60
+ cacheWrite: 0,
61
+ };
62
+ const QWEN_PORTAL_BASE_URL = "https://portal.qwen.ai/v1";
63
+ const QWEN_PORTAL_DEFAULT_CONTEXT_WINDOW = 128000;
64
+ const QWEN_PORTAL_DEFAULT_MAX_TOKENS = 8192;
65
+ const QWEN_PORTAL_DEFAULT_COST = {
66
+ input: 0,
67
+ output: 0,
68
+ cacheRead: 0,
69
+ cacheWrite: 0,
70
+ };
71
+ const OPENROUTER_BASE_URL = "https://openrouter.ai/api/v1";
72
+ const OPENROUTER_DEFAULT_MODEL_ID = "auto";
73
+ const OPENROUTER_DEFAULT_CONTEXT_WINDOW = 200000;
74
+ const OPENROUTER_DEFAULT_MAX_TOKENS = 8192;
75
+ const OPENROUTER_DEFAULT_COST = {
76
+ input: 0,
77
+ output: 0,
78
+ cacheRead: 0,
79
+ cacheWrite: 0,
80
+ };
81
+ export const QIANFAN_BASE_URL = "https://qianfan.baidubce.com/v2";
82
+ export const QIANFAN_DEFAULT_MODEL_ID = "deepseek-v3.2";
83
+ const QIANFAN_DEFAULT_CONTEXT_WINDOW = 98304;
84
+ const QIANFAN_DEFAULT_MAX_TOKENS = 32768;
85
+ const QIANFAN_DEFAULT_COST = {
86
+ input: 0,
87
+ output: 0,
88
+ cacheRead: 0,
89
+ cacheWrite: 0,
90
+ };
91
+ export const MODELSTUDIO_BASE_URL = "https://coding-intl.dashscope.aliyuncs.com/v1";
92
+ export const MODELSTUDIO_DEFAULT_MODEL_ID = "qwen3.5-plus";
93
+ const MODELSTUDIO_DEFAULT_COST = {
94
+ input: 0,
95
+ output: 0,
96
+ cacheRead: 0,
97
+ cacheWrite: 0,
98
+ };
99
+ const MODELSTUDIO_MODEL_CATALOG = [
100
+ {
101
+ id: "qwen3.5-plus",
102
+ name: "qwen3.5-plus",
103
+ reasoning: false,
104
+ input: ["text", "image"],
105
+ cost: MODELSTUDIO_DEFAULT_COST,
106
+ contextWindow: 1_000_000,
107
+ maxTokens: 65_536,
108
+ },
109
+ {
110
+ id: "qwen3-max-2026-01-23",
111
+ name: "qwen3-max-2026-01-23",
112
+ reasoning: false,
113
+ input: ["text"],
114
+ cost: MODELSTUDIO_DEFAULT_COST,
115
+ contextWindow: 262_144,
116
+ maxTokens: 65_536,
117
+ },
118
+ {
119
+ id: "qwen3-coder-next",
120
+ name: "qwen3-coder-next",
121
+ reasoning: false,
122
+ input: ["text"],
123
+ cost: MODELSTUDIO_DEFAULT_COST,
124
+ contextWindow: 262_144,
125
+ maxTokens: 65_536,
126
+ },
127
+ {
128
+ id: "qwen3-coder-plus",
129
+ name: "qwen3-coder-plus",
130
+ reasoning: false,
131
+ input: ["text"],
132
+ cost: MODELSTUDIO_DEFAULT_COST,
133
+ contextWindow: 1_000_000,
134
+ maxTokens: 65_536,
135
+ },
136
+ {
137
+ id: "MiniMax-M2.5",
138
+ name: "MiniMax-M2.5",
139
+ reasoning: true,
140
+ input: ["text"],
141
+ cost: MODELSTUDIO_DEFAULT_COST,
142
+ contextWindow: 1_000_000,
143
+ maxTokens: 65_536,
144
+ },
145
+ {
146
+ id: "glm-5",
147
+ name: "glm-5",
148
+ reasoning: false,
149
+ input: ["text"],
150
+ cost: MODELSTUDIO_DEFAULT_COST,
151
+ contextWindow: 202_752,
152
+ maxTokens: 16_384,
153
+ },
154
+ {
155
+ id: "glm-4.7",
156
+ name: "glm-4.7",
157
+ reasoning: false,
158
+ input: ["text"],
159
+ cost: MODELSTUDIO_DEFAULT_COST,
160
+ contextWindow: 202_752,
161
+ maxTokens: 16_384,
162
+ },
163
+ {
164
+ id: "kimi-k2.5",
165
+ name: "kimi-k2.5",
166
+ reasoning: false,
167
+ input: ["text", "image"],
168
+ cost: MODELSTUDIO_DEFAULT_COST,
169
+ contextWindow: 262_144,
170
+ maxTokens: 32_768,
171
+ },
172
+ ];
173
+ const NVIDIA_BASE_URL = "https://integrate.api.nvidia.com/v1";
174
+ const NVIDIA_DEFAULT_MODEL_ID = "nvidia/llama-3.1-nemotron-70b-instruct";
175
+ const NVIDIA_DEFAULT_CONTEXT_WINDOW = 131072;
176
+ const NVIDIA_DEFAULT_MAX_TOKENS = 4096;
177
+ const NVIDIA_DEFAULT_COST = {
178
+ input: 0,
179
+ output: 0,
180
+ cacheRead: 0,
181
+ cacheWrite: 0,
182
+ };
183
+ const OPENAI_CODEX_BASE_URL = "https://chatgpt.com/backend-api";
184
+ export function buildMinimaxProvider() {
185
+ return {
186
+ baseUrl: MINIMAX_PORTAL_BASE_URL,
187
+ api: "anthropic-messages",
188
+ authHeader: true,
189
+ models: [
190
+ buildMinimaxModel({
191
+ id: MINIMAX_DEFAULT_VISION_MODEL_ID,
192
+ name: "MiniMax VL 01",
193
+ reasoning: false,
194
+ input: ["text", "image"],
195
+ }),
196
+ buildMinimaxTextModel({
197
+ id: "MiniMax-M2.5",
198
+ name: "MiniMax M2.5",
199
+ reasoning: true,
200
+ }),
201
+ buildMinimaxTextModel({
202
+ id: "MiniMax-M2.5-highspeed",
203
+ name: "MiniMax M2.5 Highspeed",
204
+ reasoning: true,
205
+ }),
206
+ ],
207
+ };
208
+ }
209
+ export function buildMinimaxPortalProvider() {
210
+ return {
211
+ baseUrl: MINIMAX_PORTAL_BASE_URL,
212
+ api: "anthropic-messages",
213
+ authHeader: true,
214
+ models: [
215
+ buildMinimaxModel({
216
+ id: MINIMAX_DEFAULT_VISION_MODEL_ID,
217
+ name: "MiniMax VL 01",
218
+ reasoning: false,
219
+ input: ["text", "image"],
220
+ }),
221
+ buildMinimaxTextModel({
222
+ id: MINIMAX_DEFAULT_MODEL_ID,
223
+ name: "MiniMax M2.5",
224
+ reasoning: true,
225
+ }),
226
+ buildMinimaxTextModel({
227
+ id: "MiniMax-M2.5-highspeed",
228
+ name: "MiniMax M2.5 Highspeed",
229
+ reasoning: true,
230
+ }),
231
+ ],
232
+ };
233
+ }
234
+ export function buildMoonshotProvider() {
235
+ return {
236
+ baseUrl: MOONSHOT_BASE_URL,
237
+ api: "openai-completions",
238
+ models: [
239
+ {
240
+ id: MOONSHOT_DEFAULT_MODEL_ID,
241
+ name: "Kimi K2.5",
242
+ reasoning: false,
243
+ input: ["text", "image"],
244
+ cost: MOONSHOT_DEFAULT_COST,
245
+ contextWindow: MOONSHOT_DEFAULT_CONTEXT_WINDOW,
246
+ maxTokens: MOONSHOT_DEFAULT_MAX_TOKENS,
247
+ },
248
+ ],
249
+ };
250
+ }
251
+ export function buildKimiCodingProvider() {
252
+ return {
253
+ baseUrl: KIMI_CODING_BASE_URL,
254
+ api: "anthropic-messages",
255
+ headers: {
256
+ "User-Agent": KIMI_CODING_USER_AGENT,
257
+ },
258
+ models: [
259
+ {
260
+ id: KIMI_CODING_DEFAULT_MODEL_ID,
261
+ name: "Kimi for Coding",
262
+ reasoning: true,
263
+ input: ["text", "image"],
264
+ cost: KIMI_CODING_DEFAULT_COST,
265
+ contextWindow: KIMI_CODING_DEFAULT_CONTEXT_WINDOW,
266
+ maxTokens: KIMI_CODING_DEFAULT_MAX_TOKENS,
267
+ },
268
+ ],
269
+ };
270
+ }
271
+ export function buildQwenPortalProvider() {
272
+ return {
273
+ baseUrl: QWEN_PORTAL_BASE_URL,
274
+ api: "openai-completions",
275
+ models: [
276
+ {
277
+ id: "coder-model",
278
+ name: "Qwen Coder",
279
+ reasoning: false,
280
+ input: ["text"],
281
+ cost: QWEN_PORTAL_DEFAULT_COST,
282
+ contextWindow: QWEN_PORTAL_DEFAULT_CONTEXT_WINDOW,
283
+ maxTokens: QWEN_PORTAL_DEFAULT_MAX_TOKENS,
284
+ },
285
+ {
286
+ id: "vision-model",
287
+ name: "Qwen Vision",
288
+ reasoning: false,
289
+ input: ["text", "image"],
290
+ cost: QWEN_PORTAL_DEFAULT_COST,
291
+ contextWindow: QWEN_PORTAL_DEFAULT_CONTEXT_WINDOW,
292
+ maxTokens: QWEN_PORTAL_DEFAULT_MAX_TOKENS,
293
+ },
294
+ ],
295
+ };
296
+ }
297
+ export function buildSyntheticProvider() {
298
+ return {
299
+ baseUrl: SYNTHETIC_BASE_URL,
300
+ api: "anthropic-messages",
301
+ models: SYNTHETIC_MODEL_CATALOG.map(buildSyntheticModelDefinition),
302
+ };
303
+ }
304
+ export function buildDoubaoProvider() {
305
+ return {
306
+ baseUrl: DOUBAO_BASE_URL,
307
+ api: "openai-completions",
308
+ models: DOUBAO_MODEL_CATALOG.map(buildDoubaoModelDefinition),
309
+ };
310
+ }
311
+ export function buildDoubaoCodingProvider() {
312
+ return {
313
+ baseUrl: DOUBAO_CODING_BASE_URL,
314
+ api: "openai-completions",
315
+ models: DOUBAO_CODING_MODEL_CATALOG.map(buildDoubaoModelDefinition),
316
+ };
317
+ }
318
+ export function buildBytePlusProvider() {
319
+ return {
320
+ baseUrl: BYTEPLUS_BASE_URL,
321
+ api: "openai-completions",
322
+ models: BYTEPLUS_MODEL_CATALOG.map(buildBytePlusModelDefinition),
323
+ };
324
+ }
325
+ export function buildBytePlusCodingProvider() {
326
+ return {
327
+ baseUrl: BYTEPLUS_CODING_BASE_URL,
328
+ api: "openai-completions",
329
+ models: BYTEPLUS_CODING_MODEL_CATALOG.map(buildBytePlusModelDefinition),
330
+ };
331
+ }
332
+ export function buildXiaomiProvider() {
333
+ return {
334
+ baseUrl: XIAOMI_BASE_URL,
335
+ api: "anthropic-messages",
336
+ models: [
337
+ {
338
+ id: XIAOMI_DEFAULT_MODEL_ID,
339
+ name: "Xiaomi MiMo V2 Flash",
340
+ reasoning: false,
341
+ input: ["text"],
342
+ cost: XIAOMI_DEFAULT_COST,
343
+ contextWindow: XIAOMI_DEFAULT_CONTEXT_WINDOW,
344
+ maxTokens: XIAOMI_DEFAULT_MAX_TOKENS,
345
+ },
346
+ ],
347
+ };
348
+ }
349
+ export function buildTogetherProvider() {
350
+ return {
351
+ baseUrl: TOGETHER_BASE_URL,
352
+ api: "openai-completions",
353
+ models: TOGETHER_MODEL_CATALOG.map(buildTogetherModelDefinition),
354
+ };
355
+ }
356
+ export function buildOpenrouterProvider() {
357
+ return {
358
+ baseUrl: OPENROUTER_BASE_URL,
359
+ api: "openai-completions",
360
+ models: [
361
+ {
362
+ id: OPENROUTER_DEFAULT_MODEL_ID,
363
+ name: "OpenRouter Auto",
364
+ reasoning: false,
365
+ input: ["text", "image"],
366
+ cost: OPENROUTER_DEFAULT_COST,
367
+ contextWindow: OPENROUTER_DEFAULT_CONTEXT_WINDOW,
368
+ maxTokens: OPENROUTER_DEFAULT_MAX_TOKENS,
369
+ },
370
+ {
371
+ id: "openrouter/hunter-alpha",
372
+ name: "Hunter Alpha",
373
+ reasoning: true,
374
+ input: ["text"],
375
+ cost: OPENROUTER_DEFAULT_COST,
376
+ contextWindow: 1048576,
377
+ maxTokens: 65536,
378
+ },
379
+ {
380
+ id: "openrouter/healer-alpha",
381
+ name: "Healer Alpha",
382
+ reasoning: true,
383
+ input: ["text", "image"],
384
+ cost: OPENROUTER_DEFAULT_COST,
385
+ contextWindow: 262144,
386
+ maxTokens: 65536,
387
+ },
388
+ ],
389
+ };
390
+ }
391
+ export function buildOpenAICodexProvider() {
392
+ return {
393
+ baseUrl: OPENAI_CODEX_BASE_URL,
394
+ api: "openai-codex-responses",
395
+ models: [],
396
+ };
397
+ }
398
+ export function buildQianfanProvider() {
399
+ return {
400
+ baseUrl: QIANFAN_BASE_URL,
401
+ api: "openai-completions",
402
+ models: [
403
+ {
404
+ id: QIANFAN_DEFAULT_MODEL_ID,
405
+ name: "DEEPSEEK V3.2",
406
+ reasoning: true,
407
+ input: ["text"],
408
+ cost: QIANFAN_DEFAULT_COST,
409
+ contextWindow: QIANFAN_DEFAULT_CONTEXT_WINDOW,
410
+ maxTokens: QIANFAN_DEFAULT_MAX_TOKENS,
411
+ },
412
+ {
413
+ id: "ernie-5.0-thinking-preview",
414
+ name: "ERNIE-5.0-Thinking-Preview",
415
+ reasoning: true,
416
+ input: ["text", "image"],
417
+ cost: QIANFAN_DEFAULT_COST,
418
+ contextWindow: 119000,
419
+ maxTokens: 64000,
420
+ },
421
+ ],
422
+ };
423
+ }
424
+ export function buildModelStudioProvider() {
425
+ return {
426
+ baseUrl: MODELSTUDIO_BASE_URL,
427
+ api: "openai-completions",
428
+ models: MODELSTUDIO_MODEL_CATALOG.map((model) => ({ ...model })),
429
+ };
430
+ }
431
+ export function buildNvidiaProvider() {
432
+ return {
433
+ baseUrl: NVIDIA_BASE_URL,
434
+ api: "openai-completions",
435
+ models: [
436
+ {
437
+ id: NVIDIA_DEFAULT_MODEL_ID,
438
+ name: "NVIDIA Llama 3.1 Nemotron 70B Instruct",
439
+ reasoning: false,
440
+ input: ["text"],
441
+ cost: NVIDIA_DEFAULT_COST,
442
+ contextWindow: NVIDIA_DEFAULT_CONTEXT_WINDOW,
443
+ maxTokens: NVIDIA_DEFAULT_MAX_TOKENS,
444
+ },
445
+ {
446
+ id: "meta/llama-3.3-70b-instruct",
447
+ name: "Meta Llama 3.3 70B Instruct",
448
+ reasoning: false,
449
+ input: ["text"],
450
+ cost: NVIDIA_DEFAULT_COST,
451
+ contextWindow: 131072,
452
+ maxTokens: 4096,
453
+ },
454
+ {
455
+ id: "nvidia/mistral-nemo-minitron-8b-8k-instruct",
456
+ name: "NVIDIA Mistral NeMo Minitron 8B Instruct",
457
+ reasoning: false,
458
+ input: ["text"],
459
+ cost: NVIDIA_DEFAULT_COST,
460
+ contextWindow: 8192,
461
+ maxTokens: 2048,
462
+ },
463
+ ],
464
+ };
465
+ }
466
+ export function buildKilocodeProvider() {
467
+ return {
468
+ baseUrl: KILOCODE_BASE_URL,
469
+ api: "openai-completions",
470
+ models: KILOCODE_MODEL_CATALOG.map((model) => ({
471
+ id: model.id,
472
+ name: model.name,
473
+ reasoning: model.reasoning,
474
+ input: model.input,
475
+ cost: KILOCODE_DEFAULT_COST,
476
+ contextWindow: model.contextWindow ?? KILOCODE_DEFAULT_CONTEXT_WINDOW,
477
+ maxTokens: model.maxTokens ?? KILOCODE_DEFAULT_MAX_TOKENS,
478
+ })),
479
+ };
480
+ }
@@ -0,0 +1,15 @@
1
+ import { INTERNAL_MESSAGE_CHANNEL } from "../../utils/message-channel.js";
2
+ export function resolveRunTypingPolicy(params) {
3
+ const typingPolicy = params.isHeartbeat
4
+ ? "heartbeat"
5
+ : params.originatingChannel === INTERNAL_MESSAGE_CHANNEL
6
+ ? "internal_webchat"
7
+ : params.systemEvent
8
+ ? "system_event"
9
+ : (params.requestedPolicy ?? "auto");
10
+ const suppressTyping = params.suppressTyping === true ||
11
+ typingPolicy === "heartbeat" ||
12
+ typingPolicy === "system_event" ||
13
+ typingPolicy === "internal_webchat";
14
+ return { typingPolicy, suppressTyping };
15
+ }
@@ -1,5 +1,5 @@
1
1
  {
2
- "version": "2026.3.23",
3
- "commit": "127528099de2f9958448f0d468e1a91bdf3bb13e",
4
- "builtAt": "2026-03-16T05:19:01.312Z"
2
+ "version": "2026.3.24",
3
+ "commit": "30d8108895641ae634c05fb74351706c65b83308",
4
+ "builtAt": "2026-03-16T18:27:37.980Z"
5
5
  }