oricore 1.4.1 → 1.5.1

This diff represents the content of publicly available package versions that have been released to one of the supported registries. The information contained in this diff is provided for informational purposes only and reflects changes between package versions as they appear in their respective public registries.
Files changed (34) hide show
  1. package/dist/{chunk-SXDGT4YB.js → chunk-D5X6YFSK.js} +1814 -457
  2. package/dist/chunk-D5X6YFSK.js.map +1 -0
  3. package/dist/{chunk-XKZSVWRX.js → chunk-MZNH54NB.js} +375 -171
  4. package/dist/chunk-MZNH54NB.js.map +1 -0
  5. package/dist/{chunk-AXJGNOSQ.js → chunk-XBRIUBK5.js} +2 -2
  6. package/dist/history-FS6CASR6.js +8 -0
  7. package/dist/index.cjs +2424 -585
  8. package/dist/index.cjs.map +1 -1
  9. package/dist/index.d.cts +240 -4
  10. package/dist/index.d.ts +240 -4
  11. package/dist/index.js +298 -14
  12. package/dist/index.js.map +1 -1
  13. package/dist/{session-34VFUDZB.js → session-W73HJB5Q.js} +4 -4
  14. package/dist/undici-NSB7IUB7.js +5 -0
  15. package/package.json +2 -1
  16. package/src/core/loop.ts +79 -25
  17. package/src/core/model/models.ts +69 -0
  18. package/src/core/model/providers.ts +76 -37
  19. package/src/core/model/resolution.ts +13 -0
  20. package/src/index.ts +12 -0
  21. package/src/mcp/mcp.ts +4 -1
  22. package/src/skill/bundled.ts +225 -0
  23. package/src/skill/skill.ts +278 -7
  24. package/src/tools/tool.ts +14 -4
  25. package/src/tools/tools/skill.ts +86 -8
  26. package/src/utils/messageNormalization.ts +18 -0
  27. package/dist/chunk-SXDGT4YB.js.map +0 -1
  28. package/dist/chunk-XKZSVWRX.js.map +0 -1
  29. package/dist/history-3JS745YJ.js +0 -8
  30. package/dist/undici-DJO5UB2C.js +0 -5
  31. /package/dist/{chunk-AXJGNOSQ.js.map → chunk-XBRIUBK5.js.map} +0 -0
  32. /package/dist/{history-3JS745YJ.js.map → history-FS6CASR6.js.map} +0 -0
  33. /package/dist/{session-34VFUDZB.js.map → session-W73HJB5Q.js.map} +0 -0
  34. /package/dist/{undici-DJO5UB2C.js.map → undici-NSB7IUB7.js.map} +0 -0
@@ -1,7 +1,7 @@
1
1
  import {
2
2
  __toESM,
3
3
  require_undici
4
- } from "./chunk-SXDGT4YB.js";
4
+ } from "./chunk-D5X6YFSK.js";
5
5
 
6
6
  // src/core/history.ts
7
7
  import createDebug7 from "debug";
@@ -929,6 +929,7 @@ async function runLoop(opts) {
929
929
  request: result.request,
930
930
  response: result.response
931
931
  });
932
+ let finishChunkReceived = false;
932
933
  for await (const chunk of result.stream) {
933
934
  if (opts.signal?.aborted) {
934
935
  return createCancelError();
@@ -960,6 +961,7 @@ async function runLoop(opts) {
960
961
  });
961
962
  break;
962
963
  case "finish":
964
+ finishChunkReceived = true;
963
965
  lastUsage = Usage.fromEventUsage(chunk.usage);
964
966
  totalUsage.add(lastUsage);
965
967
  if (toolCalls.length === 0 && text.trim() === "") {
@@ -998,6 +1000,13 @@ async function runLoop(opts) {
998
1000
  break;
999
1001
  }
1000
1002
  }
1003
+ if (!finishChunkReceived && toolCalls.length === 0 && text.trim() === "") {
1004
+ const error = new Error(
1005
+ "Empty response: stream ended without any chunks"
1006
+ );
1007
+ error.isRetryable = true;
1008
+ throw error;
1009
+ }
1001
1010
  break;
1002
1011
  } catch (error) {
1003
1012
  const nextRetryCount = retryCount + 1;
@@ -1165,6 +1174,8 @@ async function runLoop(opts) {
1165
1174
  }
1166
1175
  }
1167
1176
  };
1177
+ const approvedToolUses = [];
1178
+ let earlyReturn = null;
1168
1179
  for (const toolCall of toolCalls) {
1169
1180
  let toolUse = {
1170
1181
  name: toolCall.toolName,
@@ -1188,25 +1199,10 @@ async function runLoop(opts) {
1188
1199
  }
1189
1200
  }
1190
1201
  if (approved) {
1191
- toolCallsCount++;
1192
1202
  if (updatedParams) {
1193
1203
  toolUse.params = { ...toolUse.params, ...updatedParams };
1194
1204
  }
1195
- let toolResult = await opts.tools.invoke(
1196
- toolUse.name,
1197
- JSON.stringify(toolUse.params),
1198
- toolUse.callId
1199
- );
1200
- if (opts.onToolResult) {
1201
- toolResult = await opts.onToolResult(toolUse, toolResult, approved);
1202
- }
1203
- toolResults.push({
1204
- toolCallId: toolUse.callId,
1205
- toolName: toolUse.name,
1206
- input: toolUse.params,
1207
- result: toolResult
1208
- });
1209
- turnsCount--;
1205
+ approvedToolUses.push(toolUse);
1210
1206
  } else {
1211
1207
  let message = "Error: Tool execution was denied by user.";
1212
1208
  if (denyReason) {
@@ -1217,7 +1213,7 @@ async function runLoop(opts) {
1217
1213
  isError: true
1218
1214
  };
1219
1215
  if (opts.onToolResult) {
1220
- toolResult = await opts.onToolResult(toolUse, toolResult, approved);
1216
+ toolResult = await opts.onToolResult(toolUse, toolResult, false);
1221
1217
  }
1222
1218
  toolResults.push({
1223
1219
  toolCallId: toolUse.callId,
@@ -1225,8 +1221,8 @@ async function runLoop(opts) {
1225
1221
  input: toolUse.params,
1226
1222
  result: toolResult
1227
1223
  });
1228
- await addDeniedResultsForRemainingTools();
1229
1224
  if (!denyReason) {
1225
+ await addDeniedResultsForRemainingTools();
1230
1226
  await history.addMessage({
1231
1227
  role: "tool",
1232
1228
  content: toolResults.map(
@@ -1238,7 +1234,7 @@ async function runLoop(opts) {
1238
1234
  )
1239
1235
  )
1240
1236
  });
1241
- return {
1237
+ earlyReturn = {
1242
1238
  success: false,
1243
1239
  error: {
1244
1240
  type: "tool_denied",
@@ -1250,9 +1246,60 @@ async function runLoop(opts) {
1250
1246
  }
1251
1247
  }
1252
1248
  };
1253
- } else {
1254
1249
  break;
1255
1250
  }
1251
+ await addDeniedResultsForRemainingTools();
1252
+ break;
1253
+ }
1254
+ }
1255
+ if (earlyReturn) {
1256
+ return earlyReturn;
1257
+ }
1258
+ if (approvedToolUses.length > 0) {
1259
+ const executionResults = await Promise.allSettled(
1260
+ approvedToolUses.map(async (toolUse) => {
1261
+ let toolResult = await opts.tools.invoke(
1262
+ toolUse.name,
1263
+ JSON.stringify(toolUse.params),
1264
+ toolUse.callId
1265
+ );
1266
+ if (opts.onToolResult) {
1267
+ toolResult = await opts.onToolResult(toolUse, toolResult, true);
1268
+ }
1269
+ return {
1270
+ toolCallId: toolUse.callId,
1271
+ toolName: toolUse.name,
1272
+ input: toolUse.params,
1273
+ result: toolResult
1274
+ };
1275
+ })
1276
+ );
1277
+ toolCallsCount += approvedToolUses.length;
1278
+ turnsCount -= approvedToolUses.length;
1279
+ for (let i = 0; i < executionResults.length; i++) {
1280
+ const settledResult = executionResults[i];
1281
+ if (settledResult.status === "fulfilled") {
1282
+ toolResults.push(settledResult.value);
1283
+ } else {
1284
+ const failedToolUse = approvedToolUses[i];
1285
+ let errorResult = {
1286
+ llmContent: `Tool execution error: ${settledResult.reason instanceof Error ? settledResult.reason.message : String(settledResult.reason)}`,
1287
+ isError: true
1288
+ };
1289
+ if (opts.onToolResult) {
1290
+ errorResult = await opts.onToolResult(
1291
+ failedToolUse,
1292
+ errorResult,
1293
+ true
1294
+ );
1295
+ }
1296
+ toolResults.push({
1297
+ toolCallId: failedToolUse.callId,
1298
+ toolName: failedToolUse.name,
1299
+ input: failedToolUse.params,
1300
+ result: errorResult
1301
+ });
1302
+ }
1256
1303
  }
1257
1304
  }
1258
1305
  if (opts.signal?.aborted) {
@@ -1614,6 +1661,22 @@ var models = {
1614
1661
  open_weights: false,
1615
1662
  limit: { context: 2e5, output: 65536 }
1616
1663
  },
1664
+ "gemini-3.1-pro-preview": {
1665
+ name: "Gemini 3.1 Pro Preview",
1666
+ attachment: true,
1667
+ reasoning: true,
1668
+ temperature: true,
1669
+ tool_call: true,
1670
+ knowledge: "2025-01",
1671
+ release_date: "2026-02-19",
1672
+ last_updated: "2026-02-19",
1673
+ modalities: {
1674
+ input: ["text", "image", "audio", "video", "pdf"],
1675
+ output: ["text"]
1676
+ },
1677
+ open_weights: false,
1678
+ limit: { context: 1048576, output: 65536 }
1679
+ },
1617
1680
  "gemini-3-flash-preview": {
1618
1681
  name: "Gemini 3 Flash Preview",
1619
1682
  attachment: true,
@@ -2095,6 +2158,19 @@ var models = {
2095
2158
  open_weights: true,
2096
2159
  limit: { context: 204800, output: 131072 }
2097
2160
  },
2161
+ "glm-5": {
2162
+ name: "GLM-5",
2163
+ attachment: false,
2164
+ reasoning: true,
2165
+ temperature: true,
2166
+ tool_call: true,
2167
+ knowledge: "2025-06",
2168
+ release_date: "2026-02-10",
2169
+ last_updated: "2026-02-10",
2170
+ modalities: { input: ["text"], output: ["text"] },
2171
+ open_weights: true,
2172
+ limit: { context: 262144, output: 131072 }
2173
+ },
2098
2174
  "sonoma-dusk-alpha": {
2099
2175
  name: "Sonoma Dusk Alpha",
2100
2176
  attachment: true,
@@ -2186,6 +2262,20 @@ var models = {
2186
2262
  open_weights: false,
2187
2263
  limit: { context: 2e5, output: 128e3 }
2188
2264
  },
2265
+ "claude-sonnet-4-6": {
2266
+ name: "Claude Sonnet 4.6",
2267
+ shortName: "Sonnet 4.6",
2268
+ attachment: true,
2269
+ reasoning: true,
2270
+ temperature: true,
2271
+ tool_call: true,
2272
+ knowledge: "2025-07-31",
2273
+ release_date: "2026-02-17",
2274
+ last_updated: "2026-02-17",
2275
+ modalities: { input: ["text", "image", "pdf"], output: ["text"] },
2276
+ open_weights: false,
2277
+ limit: { context: 1e6, output: 64e3 }
2278
+ },
2189
2279
  "ling-1t": {
2190
2280
  name: "InclusionAI Ling-1T",
2191
2281
  attachment: true,
@@ -2328,6 +2418,32 @@ var models = {
2328
2418
  modalities: { input: ["text"], output: ["text"] },
2329
2419
  open_weights: true,
2330
2420
  limit: { context: 204800, output: 131072 }
2421
+ },
2422
+ "minimax-m2.5": {
2423
+ name: "MiniMax M2.5",
2424
+ attachment: false,
2425
+ reasoning: true,
2426
+ temperature: true,
2427
+ tool_call: true,
2428
+ knowledge: "",
2429
+ release_date: "2026-02-13",
2430
+ last_updated: "2026-02-13",
2431
+ modalities: { input: ["text"], output: ["text"] },
2432
+ open_weights: true,
2433
+ limit: { context: 204800, output: 131072 }
2434
+ },
2435
+ "minimax-m2.7": {
2436
+ name: "MiniMax M2.7",
2437
+ attachment: false,
2438
+ reasoning: true,
2439
+ temperature: true,
2440
+ tool_call: true,
2441
+ knowledge: "",
2442
+ release_date: "2026-03-18",
2443
+ last_updated: "2026-03-18",
2444
+ modalities: { input: ["text"], output: ["text"] },
2445
+ open_weights: true,
2446
+ limit: { context: 204800, output: 131072 }
2331
2447
  }
2332
2448
  };
2333
2449
 
@@ -2444,7 +2560,7 @@ function createProxyFetch(proxyUrl) {
2444
2560
  }
2445
2561
  return async (input, init) => {
2446
2562
  if (!undiciFetch) {
2447
- undiciFetch = (await import("./undici-DJO5UB2C.js")).fetch;
2563
+ undiciFetch = (await import("./undici-NSB7IUB7.js")).fetch;
2448
2564
  }
2449
2565
  let url;
2450
2566
  let requestInit = init;
@@ -2491,36 +2607,6 @@ function withProxyConfig(config, provider) {
2491
2607
  return result;
2492
2608
  }
2493
2609
 
2494
- // src/utils/mergeSystemMessagesMiddleware.ts
2495
- var mergeSystemMessagesMiddleware = {
2496
- specificationVersion: "v3",
2497
- transformParams: async ({ params }) => {
2498
- const mergedPrompt = [];
2499
- let pendingSystemContent = [];
2500
- for (const msg of params.prompt) {
2501
- if (msg.role === "system") {
2502
- pendingSystemContent.push(msg.content);
2503
- } else {
2504
- if (pendingSystemContent.length > 0) {
2505
- mergedPrompt.push({
2506
- role: "system",
2507
- content: pendingSystemContent.join("\n\n")
2508
- });
2509
- pendingSystemContent = [];
2510
- }
2511
- mergedPrompt.push(msg);
2512
- }
2513
- }
2514
- if (pendingSystemContent.length > 0) {
2515
- mergedPrompt.push({
2516
- role: "system",
2517
- content: pendingSystemContent.join("\n\n")
2518
- });
2519
- }
2520
- return { ...params, prompt: mergedPrompt };
2521
- }
2522
- };
2523
-
2524
2610
  // src/core/model/providers.ts
2525
2611
  var defaultModelCreator = (name, provider) => {
2526
2612
  if (provider.id !== "openai") {
@@ -2646,6 +2732,7 @@ var providers = {
2646
2732
  "gemini-2.5-flash-lite": models["gemini-2.5-flash-lite-preview-06-17"],
2647
2733
  "gemini-2.5-pro": models["gemini-2.5-pro"],
2648
2734
  "gemini-3-pro-preview": models["gemini-3-pro-preview"],
2735
+ "gemini-3.1-pro-preview": models["gemini-3.1-pro-preview"],
2649
2736
  "gemini-3-flash-preview": models["gemini-3-flash-preview"]
2650
2737
  },
2651
2738
  createModel(name, provider) {
@@ -2713,7 +2800,8 @@ var providers = {
2713
2800
  "claude-3-5-sonnet-20241022": models["claude-3-5-sonnet-20241022"],
2714
2801
  "claude-haiku-4-5": models["claude-haiku-4-5"],
2715
2802
  "claude-opus-4-5": models["claude-opus-4-5"],
2716
- "claude-opus-4-6": models["claude-opus-4-6"]
2803
+ "claude-opus-4-6": models["claude-opus-4-6"],
2804
+ "claude-sonnet-4-6": models["claude-sonnet-4-6"]
2717
2805
  },
2718
2806
  apiFormat: "anthropic",
2719
2807
  headers: {
@@ -2783,6 +2871,7 @@ var providers = {
2783
2871
  "anthropic/claude-opus-4": models["claude-4-opus"],
2784
2872
  "anthropic/claude-opus-4.1": models["claude-4.1-opus"],
2785
2873
  "anthropic/claude-opus-4.5": models["claude-opus-4-5"],
2874
+ "anthropic/claude-sonnet-4.6": models["claude-sonnet-4-6"],
2786
2875
  "anthropic/claude-opus-4.6": models["claude-opus-4-6"],
2787
2876
  "deepseek/deepseek-r1-0528": models["deepseek-r1-0528"],
2788
2877
  "deepseek/deepseek-chat-v3-0324": models["deepseek-v3-0324"],
@@ -2827,7 +2916,9 @@ var providers = {
2827
2916
  "z-ai/glm-4.6": models["glm-4.6"],
2828
2917
  "z-ai/glm-4.6v": models["glm-4.6v"],
2829
2918
  "z-ai/glm-4.7": models["glm-4.7"],
2919
+ "z-ai/glm-5": models["glm-5"],
2830
2920
  "minimax/minimax-m2": models["minimax-m2"],
2921
+ "minimax/minimax-m2.5": models["minimax-m2.5"],
2831
2922
  "openrouter/sherlock-dash-alpha": models["sherlock-dash-alpha"],
2832
2923
  "openrouter/sherlock-think-alpha": models["sherlock-think-alpha"],
2833
2924
  "xiaomi/mimo-v2-flash:free": models["mimo-v2-flash"]
@@ -2846,43 +2937,6 @@ var providers = {
2846
2937
  ).chat(name);
2847
2938
  }
2848
2939
  },
2849
- iflow: {
2850
- id: "iflow",
2851
- source: "built-in",
2852
- env: ["IFLOW_API_KEY"],
2853
- name: "iFlow",
2854
- api: "https://apis.iflow.cn/v1/",
2855
- doc: "https://iflow.cn/",
2856
- models: {
2857
- "qwen3-coder-plus": models["qwen3-coder-plus"],
2858
- "kimi-k2": models["kimi-k2"],
2859
- "kimi-k2-0905": models["kimi-k2-0905"],
2860
- "deepseek-v3": models["deepseek-v3-0324"],
2861
- "deepseek-v3.2": models["deepseek-v3-2-exp"],
2862
- "deepseek-r1": models["deepseek-r1-0528"],
2863
- "glm-4.6": models["glm-4.6"],
2864
- "glm-4.7": models["glm-4.7"],
2865
- "minimax-m2.1": models["minimax-m2.1"],
2866
- "qwen3-max": models["qwen3-max"]
2867
- },
2868
- createModel: createModelCreatorCompatible({
2869
- fetch: (url, options) => {
2870
- return fetch(url, {
2871
- ...options,
2872
- headers: {
2873
- ...options.headers,
2874
- "user-agent": "iFlow-Cli"
2875
- }
2876
- });
2877
- },
2878
- middlewares: [
2879
- mergeSystemMessagesMiddleware,
2880
- extractReasoningMiddleware({
2881
- tagName: "think"
2882
- })
2883
- ]
2884
- })
2885
- },
2886
2940
  moonshotai: {
2887
2941
  id: "moonshotai",
2888
2942
  source: "built-in",
@@ -2959,7 +3013,12 @@ var providers = {
2959
3013
  "deepseek-ai/DeepSeek-R1": models["deepseek-r1-0528"],
2960
3014
  "deepseek-ai/DeepSeek-V3.1": models["deepseek-v3-1"],
2961
3015
  "deepseek-ai/DeepSeek-V3": models["deepseek-v3-0324"],
2962
- "zai-org/GLM-4.5": models["glm-4.5"]
3016
+ "zai-org/GLM-4.5": models["glm-4.5"],
3017
+ "Pro/moonshotai/Kimi-K2.5": models["kimi-k2-5"],
3018
+ "Pro/zai-org/GLM-5": models["glm-5"],
3019
+ "Pro/zai-org/GLM-4.7": models["glm-4.7"],
3020
+ "Pro/MiniMaxAI/MiniMax-M2.5": models["minimax-m2.5"],
3021
+ "Pro/deepseek-ai/DeepSeek-V3.2": models["deepseek-v3.2"]
2963
3022
  },
2964
3023
  createModel: defaultModelCreator
2965
3024
  },
@@ -2976,6 +3035,7 @@ var providers = {
2976
3035
  "ZhipuAI/GLM-4.5": models["glm-4.5"],
2977
3036
  "ZhipuAI/GLM-4.5V": models["glm-4.5v"],
2978
3037
  "ZhipuAI/GLM-4.6": models["glm-4.6"],
3038
+ "ZhipuAI/GLM-5": models["glm-5"],
2979
3039
  "deepseek-ai/DeepSeek-V3.2": models["deepseek-v3.2"],
2980
3040
  "deepseek-ai/DeepSeek-V3.2-Speciale": models["deepseek-v3.2-speciale"]
2981
3041
  },
@@ -3010,7 +3070,8 @@ var providers = {
3010
3070
  "glm-4.5v": models["glm-4.5v"],
3011
3071
  "glm-4.6": models["glm-4.6"],
3012
3072
  "glm-4.6v": models["glm-4.6v"],
3013
- "glm-4.7": models["glm-4.7"]
3073
+ "glm-4.7": models["glm-4.7"],
3074
+ "glm-5": models["glm-5"]
3014
3075
  },
3015
3076
  createModel: defaultModelCreator
3016
3077
  },
@@ -3028,10 +3089,31 @@ var providers = {
3028
3089
  "glm-4.5": models["glm-4.5"],
3029
3090
  "glm-4.5-flash": models["glm-4.5-flash"],
3030
3091
  "glm-4.6v": models["glm-4.6v"],
3031
- "glm-4.7": models["glm-4.7"]
3092
+ "glm-4.7": models["glm-4.7"],
3093
+ "glm-5": models["glm-5"]
3032
3094
  },
3033
3095
  createModel: defaultModelCreator
3034
3096
  },
3097
+ "bailian-coding-plan": {
3098
+ id: "bailian-coding-plan",
3099
+ source: "built-in",
3100
+ env: ["BAILIAN_CODING_API_KEY"],
3101
+ name: "BaiLian Coding Plan",
3102
+ api: "https://coding.dashscope.aliyuncs.com/apps/anthropic/v1",
3103
+ doc: "https://www.aliyun.com/benefit/scene/codingplan",
3104
+ apiFormat: "anthropic",
3105
+ models: {
3106
+ "qwen3.5-plus": models["qwen3-5-plus"],
3107
+ "qwen3-max-2026-01-23": models["qwen3-max"],
3108
+ "qwen3-coder-next": models["qwen3-coder-plus"],
3109
+ "qwen3-coder-plus": models["qwen3-coder-plus"],
3110
+ "MiniMax-M2.5": models["minimax-m2.5"],
3111
+ "glm-5": models["glm-5"],
3112
+ "glm-4.7": models["glm-4.7"],
3113
+ "kimi-k2.5": models["kimi-k2-5"]
3114
+ },
3115
+ createModel: defaultAnthropicModelCreator
3116
+ },
3035
3117
  zhipuai: {
3036
3118
  id: "zhipuai",
3037
3119
  source: "built-in",
@@ -3046,7 +3128,8 @@ var providers = {
3046
3128
  "glm-4.5": models["glm-4.5"],
3047
3129
  "glm-4.5-flash": models["glm-4.5-flash"],
3048
3130
  "glm-4.6v": models["glm-4.6v"],
3049
- "glm-4.7": models["glm-4.7"]
3131
+ "glm-4.7": models["glm-4.7"],
3132
+ "glm-5": models["glm-5"]
3050
3133
  },
3051
3134
  createModel: defaultModelCreator
3052
3135
  },
@@ -3080,9 +3163,12 @@ var providers = {
3080
3163
  "z-ai/glm-4.6": models["glm-4.6"],
3081
3164
  "z-ai/glm-4.6v": models["glm-4.6v"],
3082
3165
  "z-ai/glm-4.6v-flash": models["glm-4.6v"],
3166
+ "z-ai/glm-4.7": models["glm-4.7"],
3167
+ "z-ai/glm-5": models["glm-5"],
3083
3168
  "deepseek/deepseek-v3.2-speciale": models["deepseek-v3.2-speciale"],
3084
3169
  "deepseek/deepseek-chat": models["deepseek-v3-2-exp"],
3085
- "deepseek/deepseek-reasoner": models["deepseek-r1-0528"]
3170
+ "deepseek/deepseek-reasoner": models["deepseek-r1-0528"],
3171
+ "minimax/minimax-m2.5": models["minimax-m2.5"]
3086
3172
  },
3087
3173
  headers: {
3088
3174
  "X-Title": "OriCore",
@@ -3098,7 +3184,9 @@ var providers = {
3098
3184
  doc: "https://platform.minimaxi.io/docs/guides/quickstart",
3099
3185
  models: {
3100
3186
  "minimax-m2": models["minimax-m2"],
3101
- "minimax-m2.1": models["minimax-m2.1"]
3187
+ "minimax-m2.1": models["minimax-m2.1"],
3188
+ "minimax-m2.5": models["minimax-m2.5"],
3189
+ "minimax-m2.7": models["minimax-m2.7"]
3102
3190
  },
3103
3191
  createModel(name, provider) {
3104
3192
  const baseURL = getProviderBaseURL(provider);
@@ -3117,7 +3205,9 @@ var providers = {
3117
3205
  doc: "https://platform.minimaxi.com/docs/guides/quickstart",
3118
3206
  models: {
3119
3207
  "minimax-m2": models["minimax-m2"],
3120
- "minimax-m2.1": models["minimax-m2.1"]
3208
+ "minimax-m2.1": models["minimax-m2.1"],
3209
+ "minimax-m2.5": models["minimax-m2.5"],
3210
+ "minimax-m2.7": models["minimax-m2.7"]
3121
3211
  },
3122
3212
  createModel(name, provider) {
3123
3213
  const baseURL = getProviderBaseURL(provider);
@@ -3246,7 +3336,9 @@ var providers = {
3246
3336
  "zai/glm-4.7": models["glm-4.7"],
3247
3337
  "moonshotai/kimi-k2-thinking": models["kimi-k2-thinking"],
3248
3338
  "moonshotai/kimi-k2.5": models["kimi-k2.5"],
3249
- "deepseek/deepseek-chat-v3.2": models["deepseek-v3-2-exp"]
3339
+ "deepseek/deepseek-chat-v3.2": models["deepseek-v3-2-exp"],
3340
+ "openai/gpt-oss-120b": models["gpt-oss-120b"],
3341
+ "xiaomimimo/mimo-v2-flash": models["mimo-v2-flash"]
3250
3342
  },
3251
3343
  createModel: defaultModelCreator
3252
3344
  },
@@ -3266,6 +3358,7 @@ var providers = {
3266
3358
  "claude-4-5-sonnet": models["claude-4-5-sonnet"],
3267
3359
  "claude-haiku-4-5": models["claude-haiku-4-5"],
3268
3360
  "claude-opus-4-5": models["claude-opus-4-5"],
3361
+ "claude-sonnet-4-6": models["claude-sonnet-4-6"],
3269
3362
  "claude-opus-4-6": models["claude-opus-4-6"],
3270
3363
  "gpt-5.1": models["gpt-5.1"],
3271
3364
  "gpt-5.1-codex-max": models["gpt-5.1-codex-max"],
@@ -3284,6 +3377,38 @@ var providers = {
3284
3377
  }
3285
3378
  return defaultModelCreator(name, provider);
3286
3379
  }
3380
+ },
3381
+ kilo: {
3382
+ id: "kilo",
3383
+ source: "built-in",
3384
+ env: ["KILO_API_KEY"],
3385
+ name: "Kilo",
3386
+ api: "https://api.kilo.ai/api/gateway",
3387
+ doc: "https://kilo.ai",
3388
+ apiFormat: "openai",
3389
+ models: {
3390
+ "z-ai/glm-5": models["glm-5"],
3391
+ "z-ai/glm-5:free": models["glm-5"],
3392
+ "z-ai/glm-4.7": models["glm-4.7"],
3393
+ "anthropic/claude-opus-4.6": models["claude-opus-4-6"],
3394
+ "anthropic/claude-sonnet-4.6": models["claude-sonnet-4-6"],
3395
+ "anthropic/claude-haiku-4.5": models["claude-haiku-4-5"],
3396
+ "anthropic/claude-sonnet-4.5": models["claude-4-5-sonnet"],
3397
+ "google/gemini-3-flash-preview": models["gemini-3-flash-preview"],
3398
+ "google/gemini-3-pro-preview": models["gemini-3-pro-preview"],
3399
+ "minimax/minimax-m2.5:free": models["minimax-m2.5"],
3400
+ "minimax/minimax-m2.5": models["minimax-m2.5"],
3401
+ "moonshotai/kimi-k2.5": models["kimi-k2-5"]
3402
+ },
3403
+ createModel: (name, provider) => {
3404
+ if (name.includes("claude-")) {
3405
+ return defaultAnthropicModelCreator(name, provider);
3406
+ }
3407
+ if (name.includes("gemini-")) {
3408
+ return defaultAnthropicModelCreator(name, provider);
3409
+ }
3410
+ return defaultModelCreator(name, provider);
3411
+ }
3287
3412
  }
3288
3413
  };
3289
3414
 
@@ -3917,6 +4042,17 @@ function transformVariants(model, provider) {
3917
4042
  return {};
3918
4043
  }
3919
4044
  const id = (model.id || "").toLowerCase();
4045
+ if (provider.id === "bailian-coding-plan") {
4046
+ if (id.includes("kimi") || id.includes("minimax") || id.includes("glm")) {
4047
+ return {
4048
+ on: {
4049
+ thinking: {
4050
+ type: "enabled"
4051
+ }
4052
+ }
4053
+ };
4054
+ }
4055
+ }
3920
4056
  if (id.includes("deepseek") || id.includes("minimax") || id.includes("glm") || id.includes("mistral") || // id.includes("kimi") ||
3921
4057
  id.includes("grok")) {
3922
4058
  return {};
@@ -4589,6 +4725,9 @@ function stripAnsi(string) {
4589
4725
  if (typeof string !== "string") {
4590
4726
  throw new TypeError(`Expected a \`string\`, got \`${typeof string}\``);
4591
4727
  }
4728
+ if (!string.includes("\x1B") && !string.includes("\x9B")) {
4729
+ return string;
4730
+ }
4592
4731
  return string.replace(regex, "");
4593
4732
  }
4594
4733
 
@@ -6852,72 +6991,10 @@ function readFileWithOffsetLimit(filePath, offset = 1, limit = MAX_LINES_TO_READ
6852
6991
 
6853
6992
  // src/tools/tools/skill.ts
6854
6993
  import path9 from "pathe";
6855
- import { z as z10 } from "zod";
6856
- function renderAvailableSkills(skills) {
6857
- return skills.map(
6858
- (skill) => `<skill>
6859
- <name>${skill.name}</name>
6860
- <description>${skill.description}</description>
6861
- </skill>`
6862
- ).join("\n");
6863
- }
6864
- function generateDescription(skillManager) {
6865
- const skills = skillManager.getSkills();
6866
- return `Execute a skill within the main conversation
6867
- <skills_instructions>
6868
- When users ask you to perform tasks, check if any of the available skills below match the task. If a skill matches, use this tool to invoke it. Skills provide specialized knowledge and procedures for specific tasks.
6869
- </skills_instructions>
6870
- <available_skills>
6871
- ${renderAvailableSkills(skills)}
6872
- </available_skills>`;
6873
- }
6874
- function createSkillTool(opts) {
6875
- return createTool({
6876
- name: "skill",
6877
- description: generateDescription(opts.skillManager),
6878
- parameters: z10.object({
6879
- skill: z10.string().describe("The skill name to execute")
6880
- }),
6881
- getDescription: ({ params }) => {
6882
- return params.skill;
6883
- },
6884
- async execute({ skill }) {
6885
- const trimmed = skill.trim();
6886
- const skillName = trimmed.startsWith("/") ? trimmed.substring(1) : trimmed;
6887
- const foundSkill = opts.skillManager.getSkill(skillName);
6888
- if (!foundSkill) {
6889
- return {
6890
- isError: true,
6891
- llmContent: `Skill "${skillName}" not found`
6892
- };
6893
- }
6894
- const body = await opts.skillManager.readSkillBody(foundSkill);
6895
- const baseDir = path9.dirname(foundSkill.path);
6896
- const messages = [
6897
- {
6898
- type: "text",
6899
- text: `<command-message>${skillName} is running\u2026</command-message>
6900
- <command-name>${skillName}</command-name>`
6901
- },
6902
- {
6903
- type: "text",
6904
- text: `Base directory for this skill: ${baseDir}
6905
-
6906
- ${body}`,
6907
- isMeta: true
6908
- }
6909
- ];
6910
- return {
6911
- llmContent: safeStringify(messages),
6912
- returnDisplay: `Loaded skill: ${foundSkill.name}`
6913
- };
6914
- },
6915
- approval: { category: "read" }
6916
- });
6917
- }
6994
+ import { z as z11 } from "zod";
6918
6995
 
6919
6996
  // src/tools/tools/task.ts
6920
- import { z as z11 } from "zod";
6997
+ import { z as z10 } from "zod";
6921
6998
  function createTaskTool(opts) {
6922
6999
  const { signal, sessionId } = opts;
6923
7000
  const { cwd, agentManager, messageBus } = opts.context;
@@ -6983,11 +7060,11 @@ Since the user is greeting, use the greeting-responder agent to respond with a f
6983
7060
  assistant: "I'm going to use the ${"task" /* TASK */} tool to launch the with the greeting-responder agent"
6984
7061
  </example>
6985
7062
  `,
6986
- parameters: z11.object({
6987
- description: z11.string().describe("A short (3-5 word) description of task"),
6988
- prompt: z11.string().describe("The task for the agent to perform"),
6989
- subagent_type: z11.string().describe("The type of specialized agent to use for this task"),
6990
- resume: z11.string().optional().describe(
7063
+ parameters: z10.object({
7064
+ description: z10.string().describe("A short (3-5 word) description of task"),
7065
+ prompt: z10.string().describe("The task for the agent to perform"),
7066
+ subagent_type: z10.string().describe("The type of specialized agent to use for this task"),
7067
+ resume: z10.string().optional().describe(
6991
7068
  "Optional agent ID to resume from. If provided, the agent will continue from the previous execution transcript."
6992
7069
  )
6993
7070
  }),
@@ -7159,6 +7236,112 @@ Agent ID: ${result.agentId}`,
7159
7236
  });
7160
7237
  }
7161
7238
 
7239
+ // src/tools/tools/skill.ts
7240
+ function renderAvailableSkills(skills) {
7241
+ return skills.filter((skill) => skill.modelInvocable !== false).map(
7242
+ (skill) => `<skill>
7243
+ <name>${skill.name}</name>
7244
+ <description>${skill.description}</description>
7245
+ </skill>`
7246
+ ).join("\n");
7247
+ }
7248
+ function generateDescription(skillManager) {
7249
+ const skills = skillManager.getSkills({ modelInvocable: true });
7250
+ return `Execute a skill within the main conversation
7251
+ <skills_instructions>
7252
+ When users ask you to perform tasks, check if any of the available skills below match the task. If a skill matches, use this tool to invoke it. Skills provide specialized knowledge and procedures for specific tasks.
7253
+ </skills_instructions>
7254
+ <available_skills>
7255
+ ${renderAvailableSkills(skills)}
7256
+ </available_skills>`;
7257
+ }
7258
+ function createSkillTool(opts) {
7259
+ const { skillManager, context, tools, sessionId, signal } = opts;
7260
+ return createTool({
7261
+ name: "skill",
7262
+ description: generateDescription(skillManager),
7263
+ parameters: z11.object({
7264
+ skill: z11.string().describe("The skill name to execute"),
7265
+ args: z11.string().optional().describe("Optional arguments to pass to the skill")
7266
+ }),
7267
+ getDescription: ({ params }) => {
7268
+ return params.args ? `${params.skill} ${params.args}` : params.skill;
7269
+ },
7270
+ async execute({ skill, args }) {
7271
+ const trimmed = skill.trim();
7272
+ const skillName = trimmed.startsWith("/") ? trimmed.substring(1) : trimmed;
7273
+ const foundSkill = skillManager.getSkill(skillName);
7274
+ if (!foundSkill) {
7275
+ return {
7276
+ isError: true,
7277
+ llmContent: `Skill "${skillName}" not found`
7278
+ };
7279
+ }
7280
+ if (foundSkill.modelInvocable === false) {
7281
+ return {
7282
+ isError: true,
7283
+ llmContent: `Skill "${skillName}" cannot be invoked by the model`
7284
+ };
7285
+ }
7286
+ const skillArgs = args || "";
7287
+ const body = await skillManager.readSkillBody(foundSkill, skillArgs);
7288
+ const baseDir = path9.dirname(foundSkill.path);
7289
+ if (foundSkill.context === "fork") {
7290
+ if (!context.agentManager) {
7291
+ return {
7292
+ isError: true,
7293
+ llmContent: `Skill "${skillName}" requires fork execution but agent manager is not available`
7294
+ };
7295
+ }
7296
+ const allowedTools = foundSkill.allowedTools;
7297
+ const filteredTools = allowedTools ? tools.filter(
7298
+ (t) => t.name !== "skill" && allowedTools.some(
7299
+ (allowed) => allowed.toLowerCase() === t.name.toLowerCase()
7300
+ )
7301
+ ) : tools.filter((t) => t.name !== "skill");
7302
+ const taskTool = createTaskTool({
7303
+ context,
7304
+ tools: filteredTools,
7305
+ sessionId,
7306
+ signal
7307
+ });
7308
+ const agentType = foundSkill.agent || "general-purpose";
7309
+ const prompt = `Base directory for this skill: ${baseDir}
7310
+
7311
+ ${body}`;
7312
+ const toolCallId = `skill-${skillName}-${randomUUID()}`;
7313
+ return taskTool.execute(
7314
+ {
7315
+ description: `Execute skill: ${skillName}`,
7316
+ prompt,
7317
+ subagent_type: agentType
7318
+ },
7319
+ toolCallId
7320
+ );
7321
+ }
7322
+ const messages = [
7323
+ {
7324
+ type: "text",
7325
+ text: `<command-message>${skillName} is running\u2026</command-message>
7326
+ <command-name>${skillName}</command-name>`
7327
+ },
7328
+ {
7329
+ type: "text",
7330
+ text: `Base directory for this skill: ${baseDir}
7331
+
7332
+ ${body}`,
7333
+ isMeta: true
7334
+ }
7335
+ ];
7336
+ return {
7337
+ llmContent: safeStringify(messages),
7338
+ returnDisplay: `Loaded skill: ${foundSkill.name}`
7339
+ };
7340
+ },
7341
+ approval: { category: "read" }
7342
+ });
7343
+ }
7344
+
7162
7345
  // src/tools/tools/todo.ts
7163
7346
  import fs10 from "fs";
7164
7347
  import { readFile, writeFile } from "fs/promises";
@@ -7485,8 +7668,7 @@ async function resolveTools(opts) {
7485
7668
  createLSTool({ cwd }),
7486
7669
  createGlobTool({ cwd }),
7487
7670
  createGrepTool({ cwd }),
7488
- createFetchTool({ model, fetch: opts.context.fetch }),
7489
- ...hasSkills ? [createSkillTool({ skillManager: opts.context.skillManager })] : []
7671
+ createFetchTool({ model, fetch: opts.context.fetch })
7490
7672
  ];
7491
7673
  const askUserQuestionTools = opts.askUserQuestion ? [createAskUserQuestionTool()] : [];
7492
7674
  const writeTools = opts.write ? [
@@ -7514,7 +7696,7 @@ async function resolveTools(opts) {
7514
7696
  })
7515
7697
  ] : [];
7516
7698
  const mcpTools = await getMcpTools(opts.context);
7517
- const allTools = [
7699
+ let allTools = [
7518
7700
  ...readonlyTools,
7519
7701
  ...askUserQuestionTools,
7520
7702
  ...writeTools,
@@ -7522,6 +7704,16 @@ async function resolveTools(opts) {
7522
7704
  ...backgroundTools,
7523
7705
  ...mcpTools
7524
7706
  ];
7707
+ if (hasSkills) {
7708
+ const skillTool = createSkillTool({
7709
+ skillManager: opts.context.skillManager,
7710
+ context: opts.context,
7711
+ tools: allTools,
7712
+ sessionId: opts.sessionId,
7713
+ signal: opts.signal
7714
+ });
7715
+ allTools = [...allTools, skillTool];
7716
+ }
7525
7717
  let availableTools = allTools;
7526
7718
  try {
7527
7719
  availableTools = await opts.context.apply({
@@ -7757,6 +7949,18 @@ function normalizeMessagesForCompact(messages) {
7757
7949
  content: "[Tool operations completed]"
7758
7950
  };
7759
7951
  }
7952
+ if (message.role === "user") {
7953
+ if (Array.isArray(message.content)) {
7954
+ const filteredContent = message.content.filter(
7955
+ (part) => part.type === "text"
7956
+ );
7957
+ return {
7958
+ ...message,
7959
+ content: filteredContent.length > 0 ? filteredContent : message.content
7960
+ };
7961
+ }
7962
+ return message;
7963
+ }
7760
7964
  return message;
7761
7965
  }).filter((message) => {
7762
7966
  if (typeof message.content === "string") {
@@ -8347,4 +8551,4 @@ export {
8347
8551
  Tools,
8348
8552
  createLSTool
8349
8553
  };
8350
- //# sourceMappingURL=chunk-XKZSVWRX.js.map
8554
+ //# sourceMappingURL=chunk-MZNH54NB.js.map