jinzd-ai-cli 0.4.101 → 0.4.103
This diff represents the content of publicly available package versions that have been released to one of the supported registries. The information contained in this diff is provided for informational purposes only and reflects changes between package versions as they appear in their respective public registries.
- package/dist/{batch-DL3IJ7JQ.js → batch-LNTG2IRQ.js} +2 -2
- package/dist/{chunk-5EQP4IAM.js → chunk-DGXUO7D4.js} +1 -1
- package/dist/{chunk-3MEH7D36.js → chunk-JHPSWYO3.js} +1 -1
- package/dist/{chunk-TFCBOPQV.js → chunk-OVYOYUP7.js} +1 -1
- package/dist/{chunk-MBPA2FMK.js → chunk-RZWWODW7.js} +22 -11
- package/dist/{chunk-GXR2VZDI.js → chunk-SN56X6RE.js} +1 -1
- package/dist/{chunk-VWZW5JWK.js → chunk-VOWVIR2U.js} +2 -2
- package/dist/electron-server.js +164 -23
- package/dist/{hub-O5OX6KWP.js → hub-4YGZ4XHN.js} +1 -1
- package/dist/index.js +10 -10
- package/dist/{run-tests-IG45VWEM.js → run-tests-3YOJEN2Q.js} +2 -2
- package/dist/{run-tests-7LVMNX4S.js → run-tests-SN74WT4Z.js} +1 -1
- package/dist/{server-X5DQ5V6S.js → server-BG4WR6RF.js} +3 -3
- package/dist/{server-UBHVIJA6.js → server-TNPDHGQT.js} +145 -15
- package/dist/{task-orchestrator-ZATGIRNE.js → task-orchestrator-MUIH3XBY.js} +3 -3
- package/package.json +1 -1
|
@@ -1,9 +1,9 @@
|
|
|
1
1
|
#!/usr/bin/env node
|
|
2
2
|
import {
|
|
3
3
|
ConfigManager
|
|
4
|
-
} from "./chunk-
|
|
4
|
+
} from "./chunk-SN56X6RE.js";
|
|
5
5
|
import "./chunk-2ZD3YTVM.js";
|
|
6
|
-
import "./chunk-
|
|
6
|
+
import "./chunk-JHPSWYO3.js";
|
|
7
7
|
|
|
8
8
|
// src/cli/batch.ts
|
|
9
9
|
import Anthropic from "@anthropic-ai/sdk";
|
|
@@ -23,7 +23,7 @@ import {
|
|
|
23
23
|
} from "./chunk-6VRJGH25.js";
|
|
24
24
|
import {
|
|
25
25
|
runTestsTool
|
|
26
|
-
} from "./chunk-
|
|
26
|
+
} from "./chunk-OVYOYUP7.js";
|
|
27
27
|
import {
|
|
28
28
|
CONFIG_DIR_NAME,
|
|
29
29
|
DEFAULT_MAX_TOOL_OUTPUT_CHARS_CAP,
|
|
@@ -31,7 +31,7 @@ import {
|
|
|
31
31
|
SUBAGENT_ALLOWED_TOOLS,
|
|
32
32
|
SUBAGENT_DEFAULT_MAX_ROUNDS,
|
|
33
33
|
SUBAGENT_MAX_ROUNDS_LIMIT
|
|
34
|
-
} from "./chunk-
|
|
34
|
+
} from "./chunk-JHPSWYO3.js";
|
|
35
35
|
|
|
36
36
|
// src/tools/types.ts
|
|
37
37
|
function isFileWriteTool(name) {
|
|
@@ -1827,8 +1827,13 @@ var ToolExecutor = class {
|
|
|
1827
1827
|
var writeFileTool = {
|
|
1828
1828
|
definition: {
|
|
1829
1829
|
name: "write_file",
|
|
1830
|
-
description: `Write content to a file. Creates the file if it doesn't exist
|
|
1831
|
-
|
|
1830
|
+
description: `Write content to a file. Creates the file if it doesn't exist; overwrites by default or appends with append=true. Automatically creates parent directories.
|
|
1831
|
+
|
|
1832
|
+
[CRITICAL: pick the right tool by content size]
|
|
1833
|
+
- Short content (under ~300 lines / ~3 KB): use write_file in ONE call. Do NOT chunk.
|
|
1834
|
+
- Long content (300+ lines, or 3 KB+, or any document you'd describe as "complete file" / "full directory" / "long article" / "exam paper"): you MUST use \`save_last_response\` instead. It generates and saves the file in a single streaming pass, avoiding the tool_call argument truncation that happens around 2 KB.
|
|
1835
|
+
|
|
1836
|
+
Do NOT split a long document into many write_file(append=true) calls. That pattern is fragile, slow, and triggers many approval prompts. If you find yourself about to call write_file twice for the same path in one turn \u2014 STOP and use save_last_response.`,
|
|
1832
1837
|
parameters: {
|
|
1833
1838
|
path: {
|
|
1834
1839
|
type: "string",
|
|
@@ -3207,16 +3212,22 @@ var lastResponseStore = { content: "" };
|
|
|
3207
3212
|
var saveLastResponseTool = {
|
|
3208
3213
|
definition: {
|
|
3209
3214
|
name: "save_last_response",
|
|
3210
|
-
description: `
|
|
3215
|
+
description: `Generate AND save a long document to a file in one streaming pass. Use this whenever the user asks you to PRODUCE and SAVE a complete document.
|
|
3211
3216
|
|
|
3212
|
-
[
|
|
3213
|
-
|
|
3214
|
-
-
|
|
3215
|
-
-
|
|
3217
|
+
[When to use \u2014 pick this over write_file]
|
|
3218
|
+
Any of these triggers means use save_last_response, NOT write_file:
|
|
3219
|
+
- The expected output is 300+ lines or 3 KB+ (exam papers, table-of-contents, full chapter, long report, structured outline, complete teaching plan, full code module)
|
|
3220
|
+
- The user said "save as ...", "\u4FDD\u5B58\u4E3A ...", "write the full ...", "complete the entire ...", "list the whole directory"
|
|
3221
|
+
- You'd otherwise need multiple write_file(append=true) calls to fit the content
|
|
3216
3222
|
|
|
3217
|
-
[
|
|
3223
|
+
[How it works]
|
|
3224
|
+
- You only pass the target file path as an argument
|
|
3225
|
+
- The system runs a fresh streaming generation; content streams to the user AND to disk simultaneously (tee mode)
|
|
3226
|
+
- No content is passed via tool arguments \u2192 not subject to the ~2 KB tool_call argument truncation that breaks write_file for large docs
|
|
3218
3227
|
|
|
3219
|
-
[
|
|
3228
|
+
[Anti-pattern \u2014 DO NOT DO]
|
|
3229
|
+
- Do NOT call write_file with chunked append=true to build up a long file. Use save_last_response for the whole file in one pass.
|
|
3230
|
+
- Do NOT first write a partial file with write_file, then "extend" it with edit_file inserts. Use save_last_response.`,
|
|
3220
3231
|
parameters: {
|
|
3221
3232
|
path: {
|
|
3222
3233
|
type: "string",
|
|
@@ -2,7 +2,7 @@
|
|
|
2
2
|
import {
|
|
3
3
|
schemaToJsonSchema,
|
|
4
4
|
truncateForPersist
|
|
5
|
-
} from "./chunk-
|
|
5
|
+
} from "./chunk-RZWWODW7.js";
|
|
6
6
|
import {
|
|
7
7
|
AuthError,
|
|
8
8
|
ProviderError,
|
|
@@ -21,7 +21,7 @@ import {
|
|
|
21
21
|
MCP_PROTOCOL_VERSION,
|
|
22
22
|
MCP_TOOL_PREFIX,
|
|
23
23
|
VERSION
|
|
24
|
-
} from "./chunk-
|
|
24
|
+
} from "./chunk-JHPSWYO3.js";
|
|
25
25
|
|
|
26
26
|
// src/providers/claude.ts
|
|
27
27
|
import Anthropic from "@anthropic-ai/sdk";
|
package/dist/electron-server.js
CHANGED
|
@@ -36,7 +36,7 @@ import {
|
|
|
36
36
|
VERSION,
|
|
37
37
|
buildUserIdentityPrompt,
|
|
38
38
|
runTestsTool
|
|
39
|
-
} from "./chunk-
|
|
39
|
+
} from "./chunk-DGXUO7D4.js";
|
|
40
40
|
import {
|
|
41
41
|
hasSemanticIndex,
|
|
42
42
|
semanticSearch
|
|
@@ -56,7 +56,7 @@ import "./chunk-JV5N65KN.js";
|
|
|
56
56
|
import express from "express";
|
|
57
57
|
import { createServer } from "http";
|
|
58
58
|
import { WebSocketServer } from "ws";
|
|
59
|
-
import { join as join15, dirname as
|
|
59
|
+
import { join as join15, dirname as dirname5, resolve as resolve5, relative as relative3, sep as sep2 } from "path";
|
|
60
60
|
import { existsSync as existsSync22, readFileSync as readFileSync15, readdirSync as readdirSync11, statSync as statSync9, realpathSync } from "fs";
|
|
61
61
|
import { networkInterfaces } from "os";
|
|
62
62
|
|
|
@@ -5179,8 +5179,13 @@ var ToolExecutor = class {
|
|
|
5179
5179
|
var writeFileTool = {
|
|
5180
5180
|
definition: {
|
|
5181
5181
|
name: "write_file",
|
|
5182
|
-
description: `Write content to a file. Creates the file if it doesn't exist
|
|
5183
|
-
|
|
5182
|
+
description: `Write content to a file. Creates the file if it doesn't exist; overwrites by default or appends with append=true. Automatically creates parent directories.
|
|
5183
|
+
|
|
5184
|
+
[CRITICAL: pick the right tool by content size]
|
|
5185
|
+
- Short content (under ~300 lines / ~3 KB): use write_file in ONE call. Do NOT chunk.
|
|
5186
|
+
- Long content (300+ lines, or 3 KB+, or any document you'd describe as "complete file" / "full directory" / "long article" / "exam paper"): you MUST use \`save_last_response\` instead. It generates and saves the file in a single streaming pass, avoiding the tool_call argument truncation that happens around 2 KB.
|
|
5187
|
+
|
|
5188
|
+
Do NOT split a long document into many write_file(append=true) calls. That pattern is fragile, slow, and triggers many approval prompts. If you find yourself about to call write_file twice for the same path in one turn \u2014 STOP and use save_last_response.`,
|
|
5184
5189
|
parameters: {
|
|
5185
5190
|
path: {
|
|
5186
5191
|
type: "string",
|
|
@@ -6559,16 +6564,22 @@ var lastResponseStore = { content: "" };
|
|
|
6559
6564
|
var saveLastResponseTool = {
|
|
6560
6565
|
definition: {
|
|
6561
6566
|
name: "save_last_response",
|
|
6562
|
-
description: `
|
|
6563
|
-
|
|
6564
|
-
[
|
|
6565
|
-
|
|
6566
|
-
-
|
|
6567
|
-
-
|
|
6568
|
-
|
|
6569
|
-
|
|
6570
|
-
|
|
6571
|
-
|
|
6567
|
+
description: `Generate AND save a long document to a file in one streaming pass. Use this whenever the user asks you to PRODUCE and SAVE a complete document.
|
|
6568
|
+
|
|
6569
|
+
[When to use \u2014 pick this over write_file]
|
|
6570
|
+
Any of these triggers means use save_last_response, NOT write_file:
|
|
6571
|
+
- The expected output is 300+ lines or 3 KB+ (exam papers, table-of-contents, full chapter, long report, structured outline, complete teaching plan, full code module)
|
|
6572
|
+
- The user said "save as ...", "\u4FDD\u5B58\u4E3A ...", "write the full ...", "complete the entire ...", "list the whole directory"
|
|
6573
|
+
- You'd otherwise need multiple write_file(append=true) calls to fit the content
|
|
6574
|
+
|
|
6575
|
+
[How it works]
|
|
6576
|
+
- You only pass the target file path as an argument
|
|
6577
|
+
- The system runs a fresh streaming generation; content streams to the user AND to disk simultaneously (tee mode)
|
|
6578
|
+
- No content is passed via tool arguments \u2192 not subject to the ~2 KB tool_call argument truncation that breaks write_file for large docs
|
|
6579
|
+
|
|
6580
|
+
[Anti-pattern \u2014 DO NOT DO]
|
|
6581
|
+
- Do NOT call write_file with chunked append=true to build up a long file. Use save_last_response for the whole file in one pass.
|
|
6582
|
+
- Do NOT first write a partial file with write_file, then "extend" it with edit_file inserts. Use save_last_response.`,
|
|
6572
6583
|
parameters: {
|
|
6573
6584
|
path: {
|
|
6574
6585
|
type: "string",
|
|
@@ -9615,8 +9626,8 @@ function autoTrimSessionIfNeeded(session, sizeLimit = SESSION_SIZE_LIMIT) {
|
|
|
9615
9626
|
}
|
|
9616
9627
|
|
|
9617
9628
|
// src/web/session-handler.ts
|
|
9618
|
-
import { existsSync as existsSync20, readFileSync as readFileSync13, appendFileSync as appendFileSync3, writeFileSync as writeFileSync8, mkdirSync as mkdirSync9, readdirSync as readdirSync9, statSync as statSync8 } from "fs";
|
|
9619
|
-
import { join as join13, resolve as resolve4 } from "path";
|
|
9629
|
+
import { existsSync as existsSync20, readFileSync as readFileSync13, appendFileSync as appendFileSync3, writeFileSync as writeFileSync8, mkdirSync as mkdirSync9, readdirSync as readdirSync9, statSync as statSync8, createWriteStream } from "fs";
|
|
9630
|
+
import { join as join13, resolve as resolve4, dirname as dirname4 } from "path";
|
|
9620
9631
|
import { execSync as execSync3 } from "child_process";
|
|
9621
9632
|
|
|
9622
9633
|
// src/tools/git-context.ts
|
|
@@ -10306,6 +10317,45 @@ ${systemPromptVolatile}` : systemPrompt;
|
|
|
10306
10317
|
spawnAgentContext.modelParams = modelParams;
|
|
10307
10318
|
spawnAgentContext.configManager = this.config;
|
|
10308
10319
|
ToolExecutor.currentMessageIndex = this.sessions.current?.messages.length ?? 0;
|
|
10320
|
+
const saveLastResponseCall = result.toolCalls.find((tc) => tc.name === "save_last_response");
|
|
10321
|
+
const saveLastResponsePath = saveLastResponseCall ? String(saveLastResponseCall.arguments["path"] ?? "") : "";
|
|
10322
|
+
if (saveLastResponseCall && saveLastResponsePath) {
|
|
10323
|
+
const teeResult = await this.runSaveLastResponseTee(
|
|
10324
|
+
provider,
|
|
10325
|
+
saveLastResponseCall,
|
|
10326
|
+
saveLastResponsePath,
|
|
10327
|
+
apiMessages,
|
|
10328
|
+
extraMessages,
|
|
10329
|
+
systemPrompt,
|
|
10330
|
+
systemPromptVolatile,
|
|
10331
|
+
modelParams,
|
|
10332
|
+
ac,
|
|
10333
|
+
roundUsage
|
|
10334
|
+
);
|
|
10335
|
+
const teeToolResults = result.toolCalls.map((tc) => {
|
|
10336
|
+
if (tc.id === saveLastResponseCall.id) {
|
|
10337
|
+
return {
|
|
10338
|
+
callId: tc.id,
|
|
10339
|
+
content: teeResult.summary,
|
|
10340
|
+
isError: teeResult.isError
|
|
10341
|
+
};
|
|
10342
|
+
}
|
|
10343
|
+
return {
|
|
10344
|
+
callId: tc.id,
|
|
10345
|
+
content: "[skipped: file already saved by tee streaming]",
|
|
10346
|
+
isError: false
|
|
10347
|
+
};
|
|
10348
|
+
});
|
|
10349
|
+
const reasoningContent2 = result.reasoningContent;
|
|
10350
|
+
const newMsgs2 = provider.buildToolResultMessages(result.toolCalls, teeToolResults, reasoningContent2);
|
|
10351
|
+
extraMessages.push(...newMsgs2);
|
|
10352
|
+
persistToolRound(session, result.toolCalls, teeToolResults, {
|
|
10353
|
+
assistantContent: teeResult.content,
|
|
10354
|
+
reasoningContent: reasoningContent2
|
|
10355
|
+
});
|
|
10356
|
+
consecutiveFreeRounds = 0;
|
|
10357
|
+
continue;
|
|
10358
|
+
}
|
|
10309
10359
|
const toolResults = await this.toolExecutor.executeAll(result.toolCalls);
|
|
10310
10360
|
const reasoningContent = result.reasoningContent;
|
|
10311
10361
|
const newMsgs = provider.buildToolResultMessages(result.toolCalls, toolResults, reasoningContent);
|
|
@@ -10417,6 +10467,98 @@ ${summaryResult.content}`,
|
|
|
10417
10467
|
this.abortController = null;
|
|
10418
10468
|
}
|
|
10419
10469
|
}
|
|
10470
|
+
/**
|
|
10471
|
+
* Tee-streaming for save_last_response in Web mode (v0.4.102+).
|
|
10472
|
+
*
|
|
10473
|
+
* 复刻 REPL 的 [repl.ts:2576] 路径——AI 调用 save_last_response(path) 时,
|
|
10474
|
+
* 不走默认 executor,而是发起一次新的 chatStream,把生成的内容同步推送到
|
|
10475
|
+
* WebSocket(`text_delta` 事件,Web UI 实时渲染)+ 写入磁盘文件。这样 AI
|
|
10476
|
+
* 就能一次性输出 >2KB 的大文档(如教材目录、模考试卷),不会被 tool_call
|
|
10477
|
+
* arguments 截断。
|
|
10478
|
+
*
|
|
10479
|
+
* 已知约束:
|
|
10480
|
+
* - 直接跳过 confirm 流程(与 REPL tee 路径一致)。tee 文本会实时显示,用户
|
|
10481
|
+
* 看到不对可中断。
|
|
10482
|
+
* - 同一轮其余 tool_calls 会被标记为 skipped——AI 通常只调一个工具,多并发
|
|
10483
|
+
* 场景极少。
|
|
10484
|
+
*/
|
|
10485
|
+
async runSaveLastResponseTee(provider, call, saveToFile, apiMessages, extraMessages, systemPrompt, systemPromptVolatile, modelParams, ac, roundUsage) {
|
|
10486
|
+
this.send({
|
|
10487
|
+
type: "tool_call_start",
|
|
10488
|
+
callId: call.id,
|
|
10489
|
+
toolName: call.name,
|
|
10490
|
+
args: call.arguments,
|
|
10491
|
+
dangerLevel: "write",
|
|
10492
|
+
round: 0,
|
|
10493
|
+
totalRounds: 0,
|
|
10494
|
+
startTime: Date.now()
|
|
10495
|
+
});
|
|
10496
|
+
let fileStream = null;
|
|
10497
|
+
let fullContent = "";
|
|
10498
|
+
let teeUsage;
|
|
10499
|
+
let isError = false;
|
|
10500
|
+
let summary;
|
|
10501
|
+
try {
|
|
10502
|
+
mkdirSync9(dirname4(saveToFile), { recursive: true });
|
|
10503
|
+
fileStream = createWriteStream(saveToFile, { encoding: "utf-8" });
|
|
10504
|
+
const chatRequest = {
|
|
10505
|
+
messages: apiMessages,
|
|
10506
|
+
model: this.currentModel,
|
|
10507
|
+
systemPrompt,
|
|
10508
|
+
systemPromptVolatile,
|
|
10509
|
+
stream: true,
|
|
10510
|
+
temperature: modelParams.temperature,
|
|
10511
|
+
maxTokens: modelParams.maxTokens,
|
|
10512
|
+
timeout: modelParams.timeout,
|
|
10513
|
+
thinking: modelParams.thinking,
|
|
10514
|
+
thinkingBudget: modelParams.thinkingBudget,
|
|
10515
|
+
signal: ac.signal,
|
|
10516
|
+
...extraMessages.length > 0 ? { _extraMessages: extraMessages } : {}
|
|
10517
|
+
};
|
|
10518
|
+
const stream = provider.chatStream(chatRequest);
|
|
10519
|
+
for await (const chunk of stream) {
|
|
10520
|
+
if (ac.signal.aborted) break;
|
|
10521
|
+
if (chunk.usage) teeUsage = chunk.usage;
|
|
10522
|
+
if (chunk.delta) {
|
|
10523
|
+
fullContent += chunk.delta;
|
|
10524
|
+
this.send({ type: "text_delta", delta: chunk.delta });
|
|
10525
|
+
fileStream.write(chunk.delta);
|
|
10526
|
+
}
|
|
10527
|
+
if (chunk.done) break;
|
|
10528
|
+
}
|
|
10529
|
+
await new Promise((resolve6, reject) => {
|
|
10530
|
+
fileStream.end((err) => err ? reject(err) : resolve6());
|
|
10531
|
+
});
|
|
10532
|
+
const lines = fullContent.split("\n").length;
|
|
10533
|
+
const bytes = Buffer.byteLength(fullContent, "utf-8");
|
|
10534
|
+
summary = `File saved: ${saveToFile} (${lines} lines, ${bytes} bytes)`;
|
|
10535
|
+
undoStack.push(saveToFile, `save_last_response: ${saveToFile}`);
|
|
10536
|
+
if (teeUsage) {
|
|
10537
|
+
roundUsage.inputTokens += teeUsage.inputTokens;
|
|
10538
|
+
roundUsage.outputTokens += teeUsage.outputTokens;
|
|
10539
|
+
roundUsage.cacheCreationTokens += teeUsage.cacheCreationTokens ?? 0;
|
|
10540
|
+
roundUsage.cacheReadTokens += teeUsage.cacheReadTokens ?? 0;
|
|
10541
|
+
}
|
|
10542
|
+
} catch (err) {
|
|
10543
|
+
if (fileStream) {
|
|
10544
|
+
try {
|
|
10545
|
+
await new Promise((resolve6) => fileStream.end(() => resolve6()));
|
|
10546
|
+
} catch {
|
|
10547
|
+
}
|
|
10548
|
+
}
|
|
10549
|
+
isError = true;
|
|
10550
|
+
const msg = err instanceof Error ? err.message : String(err);
|
|
10551
|
+
summary = `[save_last_response failed] ${msg}`;
|
|
10552
|
+
}
|
|
10553
|
+
this.send({
|
|
10554
|
+
type: "tool_call_result",
|
|
10555
|
+
callId: call.id,
|
|
10556
|
+
toolName: call.name,
|
|
10557
|
+
content: summary,
|
|
10558
|
+
isError
|
|
10559
|
+
});
|
|
10560
|
+
return { content: fullContent, summary, isError };
|
|
10561
|
+
}
|
|
10420
10562
|
/** Consume streaming tool call events and forward to client */
|
|
10421
10563
|
async consumeToolStream(streamGen, ac) {
|
|
10422
10564
|
let textContent = "";
|
|
@@ -11485,7 +11627,7 @@ ${undoResults.map((r) => ` \u2022 ${r}`).join("\n")}` });
|
|
|
11485
11627
|
case "test": {
|
|
11486
11628
|
this.send({ type: "info", message: "\u{1F9EA} Running tests..." });
|
|
11487
11629
|
try {
|
|
11488
|
-
const { executeTests } = await import("./run-tests-
|
|
11630
|
+
const { executeTests } = await import("./run-tests-SN74WT4Z.js");
|
|
11489
11631
|
const argStr = args.join(" ").trim();
|
|
11490
11632
|
let testArgs = {};
|
|
11491
11633
|
if (argStr) {
|
|
@@ -12174,17 +12316,16 @@ Add .md files to create commands.` });
|
|
|
12174
12316
|
};
|
|
12175
12317
|
}
|
|
12176
12318
|
getFilteredToolDefs() {
|
|
12177
|
-
const excludeWeb = (t) => t.name !== "save_last_response";
|
|
12178
12319
|
if (this.planMode) {
|
|
12179
12320
|
return {
|
|
12180
|
-
toolDefs: this.toolRegistry.getDefinitions().filter((t) => PLAN_MODE_READONLY_TOOLS.has(t.name)
|
|
12321
|
+
toolDefs: this.toolRegistry.getDefinitions().filter((t) => PLAN_MODE_READONLY_TOOLS.has(t.name)),
|
|
12181
12322
|
mcpBudgetNote: null
|
|
12182
12323
|
};
|
|
12183
12324
|
}
|
|
12184
12325
|
const skillFilter = this.skillManager?.getActiveToolFilter();
|
|
12185
12326
|
if (skillFilter) {
|
|
12186
12327
|
return {
|
|
12187
|
-
toolDefs: this.toolRegistry.getDefinitions().filter((t) => skillFilter.has(t.name)
|
|
12328
|
+
toolDefs: this.toolRegistry.getDefinitions().filter((t) => skillFilter.has(t.name)),
|
|
12188
12329
|
mcpBudgetNote: null
|
|
12189
12330
|
};
|
|
12190
12331
|
}
|
|
@@ -12192,9 +12333,9 @@ Add .md files to create commands.` });
|
|
|
12192
12333
|
if (contextWindow > 0) {
|
|
12193
12334
|
const toolBudget = Math.floor(contextWindow * 0.2);
|
|
12194
12335
|
const { definitions, systemNote } = this.toolRegistry.getDefinitionsWithBudget(toolBudget, this.usedMcpToolNames);
|
|
12195
|
-
return { toolDefs: definitions
|
|
12336
|
+
return { toolDefs: definitions, mcpBudgetNote: systemNote };
|
|
12196
12337
|
}
|
|
12197
|
-
return { toolDefs: this.toolRegistry.getDefinitions()
|
|
12338
|
+
return { toolDefs: this.toolRegistry.getDefinitions(), mcpBudgetNote: null };
|
|
12198
12339
|
}
|
|
12199
12340
|
/**
|
|
12200
12341
|
* Find first matching context file in a directory.
|
|
@@ -12686,7 +12827,7 @@ function getModuleDir() {
|
|
|
12686
12827
|
if (typeof import.meta?.url === "string") {
|
|
12687
12828
|
const url = new URL(import.meta.url);
|
|
12688
12829
|
const filePath = url.pathname.replace(/^\/([A-Z]:)/i, "$1");
|
|
12689
|
-
return
|
|
12830
|
+
return dirname5(filePath);
|
|
12690
12831
|
}
|
|
12691
12832
|
} catch {
|
|
12692
12833
|
}
|
|
@@ -385,7 +385,7 @@ ${content}`);
|
|
|
385
385
|
}
|
|
386
386
|
}
|
|
387
387
|
async function runTaskMode(config, providers, configManager, topic) {
|
|
388
|
-
const { TaskOrchestrator } = await import("./task-orchestrator-
|
|
388
|
+
const { TaskOrchestrator } = await import("./task-orchestrator-MUIH3XBY.js");
|
|
389
389
|
const orchestrator = new TaskOrchestrator(config, providers, configManager);
|
|
390
390
|
let interrupted = false;
|
|
391
391
|
const onSigint = () => {
|
package/dist/index.js
CHANGED
|
@@ -28,10 +28,10 @@ import {
|
|
|
28
28
|
saveDevState,
|
|
29
29
|
sessionHasMeaningfulContent,
|
|
30
30
|
setupProxy
|
|
31
|
-
} from "./chunk-
|
|
31
|
+
} from "./chunk-VOWVIR2U.js";
|
|
32
32
|
import {
|
|
33
33
|
ConfigManager
|
|
34
|
-
} from "./chunk-
|
|
34
|
+
} from "./chunk-SN56X6RE.js";
|
|
35
35
|
import {
|
|
36
36
|
ToolExecutor,
|
|
37
37
|
ToolRegistry,
|
|
@@ -50,7 +50,7 @@ import {
|
|
|
50
50
|
spawnAgentContext,
|
|
51
51
|
theme,
|
|
52
52
|
undoStack
|
|
53
|
-
} from "./chunk-
|
|
53
|
+
} from "./chunk-RZWWODW7.js";
|
|
54
54
|
import "./chunk-2ZD3YTVM.js";
|
|
55
55
|
import {
|
|
56
56
|
fileCheckpoints
|
|
@@ -68,7 +68,7 @@ import "./chunk-KJLJPUY2.js";
|
|
|
68
68
|
import "./chunk-6VRJGH25.js";
|
|
69
69
|
import "./chunk-2DXY7UGF.js";
|
|
70
70
|
import "./chunk-KHYD3WXE.js";
|
|
71
|
-
import "./chunk-
|
|
71
|
+
import "./chunk-OVYOYUP7.js";
|
|
72
72
|
import {
|
|
73
73
|
AGENTIC_BEHAVIOR_GUIDELINE,
|
|
74
74
|
AUTHOR,
|
|
@@ -90,7 +90,7 @@ import {
|
|
|
90
90
|
SKILLS_DIR_NAME,
|
|
91
91
|
VERSION,
|
|
92
92
|
buildUserIdentityPrompt
|
|
93
|
-
} from "./chunk-
|
|
93
|
+
} from "./chunk-JHPSWYO3.js";
|
|
94
94
|
|
|
95
95
|
// src/index.ts
|
|
96
96
|
import { program } from "commander";
|
|
@@ -2610,7 +2610,7 @@ ${hint}` : "")
|
|
|
2610
2610
|
usage: "/test [command|filter]",
|
|
2611
2611
|
async execute(args, ctx) {
|
|
2612
2612
|
try {
|
|
2613
|
-
const { executeTests } = await import("./run-tests-
|
|
2613
|
+
const { executeTests } = await import("./run-tests-3YOJEN2Q.js");
|
|
2614
2614
|
const argStr = args.join(" ").trim();
|
|
2615
2615
|
let testArgs = {};
|
|
2616
2616
|
if (argStr) {
|
|
@@ -6749,7 +6749,7 @@ program.command("web").description("Start Web UI server with browser-based chat
|
|
|
6749
6749
|
console.error("Error: Invalid port number. Must be between 1 and 65535.");
|
|
6750
6750
|
process.exit(1);
|
|
6751
6751
|
}
|
|
6752
|
-
const { startWebServer } = await import("./server-
|
|
6752
|
+
const { startWebServer } = await import("./server-TNPDHGQT.js");
|
|
6753
6753
|
await startWebServer({ port, host: options.host });
|
|
6754
6754
|
});
|
|
6755
6755
|
program.command("user [action] [username]").description("Manage Web UI users (list | create <name> | delete <name> | reset-password <name> | migrate <name>)").action(async (action, username) => {
|
|
@@ -6872,7 +6872,7 @@ program.command("sessions").description("List recent conversation sessions").act
|
|
|
6872
6872
|
});
|
|
6873
6873
|
program.command("batch <action> [arg] [arg2]").description("Anthropic Message Batches: submit | list | status <id> | results <id> [out] | cancel <id>").option("--dry-run", "Parse and validate input without submitting (submit only)").action(async (action, arg, arg2, options) => {
|
|
6874
6874
|
try {
|
|
6875
|
-
const batch = await import("./batch-
|
|
6875
|
+
const batch = await import("./batch-LNTG2IRQ.js");
|
|
6876
6876
|
switch (action) {
|
|
6877
6877
|
case "submit":
|
|
6878
6878
|
if (!arg) {
|
|
@@ -6915,7 +6915,7 @@ program.command("batch <action> [arg] [arg2]").description("Anthropic Message Ba
|
|
|
6915
6915
|
}
|
|
6916
6916
|
});
|
|
6917
6917
|
program.command("mcp-serve").description("Start an MCP server over STDIO, exposing aicli's built-in tools to Claude Desktop / Cursor / other MCP clients").option("--allow-destructive", "Allow bash / run_interactive / task_create (always destructive in MCP mode)").option("--allow-outside-cwd", "Allow tool path arguments to escape the sandbox root \u2014 disabled by default").option("--tools <list>", "Comma-separated whitelist of tools to expose (default: all eligible tools)").option("--cwd <path>", "Working directory AND sandbox root (default: current directory)").action(async (options) => {
|
|
6918
|
-
const { startMcpServer } = await import("./server-
|
|
6918
|
+
const { startMcpServer } = await import("./server-BG4WR6RF.js");
|
|
6919
6919
|
await startMcpServer({
|
|
6920
6920
|
allowDestructive: !!options.allowDestructive,
|
|
6921
6921
|
allowOutsideCwd: !!options.allowOutsideCwd,
|
|
@@ -7042,7 +7042,7 @@ program.command("hub [topic]").description("Start multi-agent hub (discuss / bra
|
|
|
7042
7042
|
}),
|
|
7043
7043
|
config.get("customProviders")
|
|
7044
7044
|
);
|
|
7045
|
-
const { startHub } = await import("./hub-
|
|
7045
|
+
const { startHub } = await import("./hub-4YGZ4XHN.js");
|
|
7046
7046
|
await startHub(
|
|
7047
7047
|
{
|
|
7048
7048
|
topic: topic ?? "",
|
|
@@ -3,7 +3,7 @@ import {
|
|
|
3
3
|
ToolRegistry,
|
|
4
4
|
getDangerLevel,
|
|
5
5
|
schemaToJsonSchema
|
|
6
|
-
} from "./chunk-
|
|
6
|
+
} from "./chunk-RZWWODW7.js";
|
|
7
7
|
import "./chunk-2ZD3YTVM.js";
|
|
8
8
|
import "./chunk-4BKXL7SM.js";
|
|
9
9
|
import "./chunk-ANYYM4CF.js";
|
|
@@ -12,10 +12,10 @@ import "./chunk-KJLJPUY2.js";
|
|
|
12
12
|
import "./chunk-6VRJGH25.js";
|
|
13
13
|
import "./chunk-2DXY7UGF.js";
|
|
14
14
|
import "./chunk-KHYD3WXE.js";
|
|
15
|
-
import "./chunk-
|
|
15
|
+
import "./chunk-OVYOYUP7.js";
|
|
16
16
|
import {
|
|
17
17
|
VERSION
|
|
18
|
-
} from "./chunk-
|
|
18
|
+
} from "./chunk-JHPSWYO3.js";
|
|
19
19
|
|
|
20
20
|
// src/mcp/server.ts
|
|
21
21
|
import { createInterface } from "readline";
|
|
@@ -21,10 +21,10 @@ import {
|
|
|
21
21
|
loadDevState,
|
|
22
22
|
persistToolRound,
|
|
23
23
|
setupProxy
|
|
24
|
-
} from "./chunk-
|
|
24
|
+
} from "./chunk-VOWVIR2U.js";
|
|
25
25
|
import {
|
|
26
26
|
ConfigManager
|
|
27
|
-
} from "./chunk-
|
|
27
|
+
} from "./chunk-SN56X6RE.js";
|
|
28
28
|
import {
|
|
29
29
|
ToolExecutor,
|
|
30
30
|
ToolRegistry,
|
|
@@ -42,7 +42,7 @@ import {
|
|
|
42
42
|
spawnAgentContext,
|
|
43
43
|
truncateOutput,
|
|
44
44
|
undoStack
|
|
45
|
-
} from "./chunk-
|
|
45
|
+
} from "./chunk-RZWWODW7.js";
|
|
46
46
|
import "./chunk-2ZD3YTVM.js";
|
|
47
47
|
import "./chunk-4BKXL7SM.js";
|
|
48
48
|
import "./chunk-ANYYM4CF.js";
|
|
@@ -51,7 +51,7 @@ import "./chunk-KJLJPUY2.js";
|
|
|
51
51
|
import "./chunk-6VRJGH25.js";
|
|
52
52
|
import "./chunk-2DXY7UGF.js";
|
|
53
53
|
import "./chunk-KHYD3WXE.js";
|
|
54
|
-
import "./chunk-
|
|
54
|
+
import "./chunk-OVYOYUP7.js";
|
|
55
55
|
import {
|
|
56
56
|
AGENTIC_BEHAVIOR_GUIDELINE,
|
|
57
57
|
AUTHOR,
|
|
@@ -70,13 +70,13 @@ import {
|
|
|
70
70
|
SKILLS_DIR_NAME,
|
|
71
71
|
VERSION,
|
|
72
72
|
buildUserIdentityPrompt
|
|
73
|
-
} from "./chunk-
|
|
73
|
+
} from "./chunk-JHPSWYO3.js";
|
|
74
74
|
|
|
75
75
|
// src/web/server.ts
|
|
76
76
|
import express from "express";
|
|
77
77
|
import { createServer } from "http";
|
|
78
78
|
import { WebSocketServer } from "ws";
|
|
79
|
-
import { join as join3, dirname, resolve as resolve2, relative, sep } from "path";
|
|
79
|
+
import { join as join3, dirname as dirname2, resolve as resolve2, relative, sep } from "path";
|
|
80
80
|
import { existsSync as existsSync4, readFileSync as readFileSync4, readdirSync as readdirSync2, statSync as statSync2, realpathSync } from "fs";
|
|
81
81
|
import { networkInterfaces } from "os";
|
|
82
82
|
|
|
@@ -470,8 +470,8 @@ function loadMemoryContent(configDir) {
|
|
|
470
470
|
}
|
|
471
471
|
|
|
472
472
|
// src/web/session-handler.ts
|
|
473
|
-
import { existsSync as existsSync3, readFileSync as readFileSync3, appendFileSync, writeFileSync, mkdirSync, readdirSync, statSync } from "fs";
|
|
474
|
-
import { join as join2, resolve } from "path";
|
|
473
|
+
import { existsSync as existsSync3, readFileSync as readFileSync3, appendFileSync, writeFileSync, mkdirSync, readdirSync, statSync, createWriteStream } from "fs";
|
|
474
|
+
import { join as join2, resolve, dirname } from "path";
|
|
475
475
|
import { execSync } from "child_process";
|
|
476
476
|
var FREE_ROUND_TOOLS = /* @__PURE__ */ new Set(["write_todos"]);
|
|
477
477
|
var MAX_CONSECUTIVE_FREE_ROUNDS = 5;
|
|
@@ -1069,6 +1069,45 @@ ${systemPromptVolatile}` : systemPrompt;
|
|
|
1069
1069
|
spawnAgentContext.modelParams = modelParams;
|
|
1070
1070
|
spawnAgentContext.configManager = this.config;
|
|
1071
1071
|
ToolExecutor.currentMessageIndex = this.sessions.current?.messages.length ?? 0;
|
|
1072
|
+
const saveLastResponseCall = result.toolCalls.find((tc) => tc.name === "save_last_response");
|
|
1073
|
+
const saveLastResponsePath = saveLastResponseCall ? String(saveLastResponseCall.arguments["path"] ?? "") : "";
|
|
1074
|
+
if (saveLastResponseCall && saveLastResponsePath) {
|
|
1075
|
+
const teeResult = await this.runSaveLastResponseTee(
|
|
1076
|
+
provider,
|
|
1077
|
+
saveLastResponseCall,
|
|
1078
|
+
saveLastResponsePath,
|
|
1079
|
+
apiMessages,
|
|
1080
|
+
extraMessages,
|
|
1081
|
+
systemPrompt,
|
|
1082
|
+
systemPromptVolatile,
|
|
1083
|
+
modelParams,
|
|
1084
|
+
ac,
|
|
1085
|
+
roundUsage
|
|
1086
|
+
);
|
|
1087
|
+
const teeToolResults = result.toolCalls.map((tc) => {
|
|
1088
|
+
if (tc.id === saveLastResponseCall.id) {
|
|
1089
|
+
return {
|
|
1090
|
+
callId: tc.id,
|
|
1091
|
+
content: teeResult.summary,
|
|
1092
|
+
isError: teeResult.isError
|
|
1093
|
+
};
|
|
1094
|
+
}
|
|
1095
|
+
return {
|
|
1096
|
+
callId: tc.id,
|
|
1097
|
+
content: "[skipped: file already saved by tee streaming]",
|
|
1098
|
+
isError: false
|
|
1099
|
+
};
|
|
1100
|
+
});
|
|
1101
|
+
const reasoningContent2 = result.reasoningContent;
|
|
1102
|
+
const newMsgs2 = provider.buildToolResultMessages(result.toolCalls, teeToolResults, reasoningContent2);
|
|
1103
|
+
extraMessages.push(...newMsgs2);
|
|
1104
|
+
persistToolRound(session, result.toolCalls, teeToolResults, {
|
|
1105
|
+
assistantContent: teeResult.content,
|
|
1106
|
+
reasoningContent: reasoningContent2
|
|
1107
|
+
});
|
|
1108
|
+
consecutiveFreeRounds = 0;
|
|
1109
|
+
continue;
|
|
1110
|
+
}
|
|
1072
1111
|
const toolResults = await this.toolExecutor.executeAll(result.toolCalls);
|
|
1073
1112
|
const reasoningContent = result.reasoningContent;
|
|
1074
1113
|
const newMsgs = provider.buildToolResultMessages(result.toolCalls, toolResults, reasoningContent);
|
|
@@ -1180,6 +1219,98 @@ ${summaryResult.content}`,
|
|
|
1180
1219
|
this.abortController = null;
|
|
1181
1220
|
}
|
|
1182
1221
|
}
|
|
1222
|
+
/**
|
|
1223
|
+
* Tee-streaming for save_last_response in Web mode (v0.4.102+).
|
|
1224
|
+
*
|
|
1225
|
+
* 复刻 REPL 的 [repl.ts:2576] 路径——AI 调用 save_last_response(path) 时,
|
|
1226
|
+
* 不走默认 executor,而是发起一次新的 chatStream,把生成的内容同步推送到
|
|
1227
|
+
* WebSocket(`text_delta` 事件,Web UI 实时渲染)+ 写入磁盘文件。这样 AI
|
|
1228
|
+
* 就能一次性输出 >2KB 的大文档(如教材目录、模考试卷),不会被 tool_call
|
|
1229
|
+
* arguments 截断。
|
|
1230
|
+
*
|
|
1231
|
+
* 已知约束:
|
|
1232
|
+
* - 直接跳过 confirm 流程(与 REPL tee 路径一致)。tee 文本会实时显示,用户
|
|
1233
|
+
* 看到不对可中断。
|
|
1234
|
+
* - 同一轮其余 tool_calls 会被标记为 skipped——AI 通常只调一个工具,多并发
|
|
1235
|
+
* 场景极少。
|
|
1236
|
+
*/
|
|
1237
|
+
async runSaveLastResponseTee(provider, call, saveToFile, apiMessages, extraMessages, systemPrompt, systemPromptVolatile, modelParams, ac, roundUsage) {
|
|
1238
|
+
this.send({
|
|
1239
|
+
type: "tool_call_start",
|
|
1240
|
+
callId: call.id,
|
|
1241
|
+
toolName: call.name,
|
|
1242
|
+
args: call.arguments,
|
|
1243
|
+
dangerLevel: "write",
|
|
1244
|
+
round: 0,
|
|
1245
|
+
totalRounds: 0,
|
|
1246
|
+
startTime: Date.now()
|
|
1247
|
+
});
|
|
1248
|
+
let fileStream = null;
|
|
1249
|
+
let fullContent = "";
|
|
1250
|
+
let teeUsage;
|
|
1251
|
+
let isError = false;
|
|
1252
|
+
let summary;
|
|
1253
|
+
try {
|
|
1254
|
+
mkdirSync(dirname(saveToFile), { recursive: true });
|
|
1255
|
+
fileStream = createWriteStream(saveToFile, { encoding: "utf-8" });
|
|
1256
|
+
const chatRequest = {
|
|
1257
|
+
messages: apiMessages,
|
|
1258
|
+
model: this.currentModel,
|
|
1259
|
+
systemPrompt,
|
|
1260
|
+
systemPromptVolatile,
|
|
1261
|
+
stream: true,
|
|
1262
|
+
temperature: modelParams.temperature,
|
|
1263
|
+
maxTokens: modelParams.maxTokens,
|
|
1264
|
+
timeout: modelParams.timeout,
|
|
1265
|
+
thinking: modelParams.thinking,
|
|
1266
|
+
thinkingBudget: modelParams.thinkingBudget,
|
|
1267
|
+
signal: ac.signal,
|
|
1268
|
+
...extraMessages.length > 0 ? { _extraMessages: extraMessages } : {}
|
|
1269
|
+
};
|
|
1270
|
+
const stream = provider.chatStream(chatRequest);
|
|
1271
|
+
for await (const chunk of stream) {
|
|
1272
|
+
if (ac.signal.aborted) break;
|
|
1273
|
+
if (chunk.usage) teeUsage = chunk.usage;
|
|
1274
|
+
if (chunk.delta) {
|
|
1275
|
+
fullContent += chunk.delta;
|
|
1276
|
+
this.send({ type: "text_delta", delta: chunk.delta });
|
|
1277
|
+
fileStream.write(chunk.delta);
|
|
1278
|
+
}
|
|
1279
|
+
if (chunk.done) break;
|
|
1280
|
+
}
|
|
1281
|
+
await new Promise((resolve3, reject) => {
|
|
1282
|
+
fileStream.end((err) => err ? reject(err) : resolve3());
|
|
1283
|
+
});
|
|
1284
|
+
const lines = fullContent.split("\n").length;
|
|
1285
|
+
const bytes = Buffer.byteLength(fullContent, "utf-8");
|
|
1286
|
+
summary = `File saved: ${saveToFile} (${lines} lines, ${bytes} bytes)`;
|
|
1287
|
+
undoStack.push(saveToFile, `save_last_response: ${saveToFile}`);
|
|
1288
|
+
if (teeUsage) {
|
|
1289
|
+
roundUsage.inputTokens += teeUsage.inputTokens;
|
|
1290
|
+
roundUsage.outputTokens += teeUsage.outputTokens;
|
|
1291
|
+
roundUsage.cacheCreationTokens += teeUsage.cacheCreationTokens ?? 0;
|
|
1292
|
+
roundUsage.cacheReadTokens += teeUsage.cacheReadTokens ?? 0;
|
|
1293
|
+
}
|
|
1294
|
+
} catch (err) {
|
|
1295
|
+
if (fileStream) {
|
|
1296
|
+
try {
|
|
1297
|
+
await new Promise((resolve3) => fileStream.end(() => resolve3()));
|
|
1298
|
+
} catch {
|
|
1299
|
+
}
|
|
1300
|
+
}
|
|
1301
|
+
isError = true;
|
|
1302
|
+
const msg = err instanceof Error ? err.message : String(err);
|
|
1303
|
+
summary = `[save_last_response failed] ${msg}`;
|
|
1304
|
+
}
|
|
1305
|
+
this.send({
|
|
1306
|
+
type: "tool_call_result",
|
|
1307
|
+
callId: call.id,
|
|
1308
|
+
toolName: call.name,
|
|
1309
|
+
content: summary,
|
|
1310
|
+
isError
|
|
1311
|
+
});
|
|
1312
|
+
return { content: fullContent, summary, isError };
|
|
1313
|
+
}
|
|
1183
1314
|
/** Consume streaming tool call events and forward to client */
|
|
1184
1315
|
async consumeToolStream(streamGen, ac) {
|
|
1185
1316
|
let textContent = "";
|
|
@@ -2248,7 +2379,7 @@ ${undoResults.map((r) => ` \u2022 ${r}`).join("\n")}` });
|
|
|
2248
2379
|
case "test": {
|
|
2249
2380
|
this.send({ type: "info", message: "\u{1F9EA} Running tests..." });
|
|
2250
2381
|
try {
|
|
2251
|
-
const { executeTests } = await import("./run-tests-
|
|
2382
|
+
const { executeTests } = await import("./run-tests-3YOJEN2Q.js");
|
|
2252
2383
|
const argStr = args.join(" ").trim();
|
|
2253
2384
|
let testArgs = {};
|
|
2254
2385
|
if (argStr) {
|
|
@@ -2937,17 +3068,16 @@ Add .md files to create commands.` });
|
|
|
2937
3068
|
};
|
|
2938
3069
|
}
|
|
2939
3070
|
getFilteredToolDefs() {
|
|
2940
|
-
const excludeWeb = (t) => t.name !== "save_last_response";
|
|
2941
3071
|
if (this.planMode) {
|
|
2942
3072
|
return {
|
|
2943
|
-
toolDefs: this.toolRegistry.getDefinitions().filter((t) => PLAN_MODE_READONLY_TOOLS.has(t.name)
|
|
3073
|
+
toolDefs: this.toolRegistry.getDefinitions().filter((t) => PLAN_MODE_READONLY_TOOLS.has(t.name)),
|
|
2944
3074
|
mcpBudgetNote: null
|
|
2945
3075
|
};
|
|
2946
3076
|
}
|
|
2947
3077
|
const skillFilter = this.skillManager?.getActiveToolFilter();
|
|
2948
3078
|
if (skillFilter) {
|
|
2949
3079
|
return {
|
|
2950
|
-
toolDefs: this.toolRegistry.getDefinitions().filter((t) => skillFilter.has(t.name)
|
|
3080
|
+
toolDefs: this.toolRegistry.getDefinitions().filter((t) => skillFilter.has(t.name)),
|
|
2951
3081
|
mcpBudgetNote: null
|
|
2952
3082
|
};
|
|
2953
3083
|
}
|
|
@@ -2955,9 +3085,9 @@ Add .md files to create commands.` });
|
|
|
2955
3085
|
if (contextWindow > 0) {
|
|
2956
3086
|
const toolBudget = Math.floor(contextWindow * 0.2);
|
|
2957
3087
|
const { definitions, systemNote } = this.toolRegistry.getDefinitionsWithBudget(toolBudget, this.usedMcpToolNames);
|
|
2958
|
-
return { toolDefs: definitions
|
|
3088
|
+
return { toolDefs: definitions, mcpBudgetNote: systemNote };
|
|
2959
3089
|
}
|
|
2960
|
-
return { toolDefs: this.toolRegistry.getDefinitions()
|
|
3090
|
+
return { toolDefs: this.toolRegistry.getDefinitions(), mcpBudgetNote: null };
|
|
2961
3091
|
}
|
|
2962
3092
|
/**
|
|
2963
3093
|
* Find first matching context file in a directory.
|
|
@@ -3227,7 +3357,7 @@ function getModuleDir() {
|
|
|
3227
3357
|
if (typeof import.meta?.url === "string") {
|
|
3228
3358
|
const url = new URL(import.meta.url);
|
|
3229
3359
|
const filePath = url.pathname.replace(/^\/([A-Z]:)/i, "$1");
|
|
3230
|
-
return
|
|
3360
|
+
return dirname2(filePath);
|
|
3231
3361
|
}
|
|
3232
3362
|
} catch {
|
|
3233
3363
|
}
|
|
@@ -4,7 +4,7 @@ import {
|
|
|
4
4
|
getDangerLevel,
|
|
5
5
|
googleSearchContext,
|
|
6
6
|
truncateOutput
|
|
7
|
-
} from "./chunk-
|
|
7
|
+
} from "./chunk-RZWWODW7.js";
|
|
8
8
|
import "./chunk-2ZD3YTVM.js";
|
|
9
9
|
import "./chunk-4BKXL7SM.js";
|
|
10
10
|
import "./chunk-ANYYM4CF.js";
|
|
@@ -13,10 +13,10 @@ import "./chunk-KJLJPUY2.js";
|
|
|
13
13
|
import "./chunk-6VRJGH25.js";
|
|
14
14
|
import "./chunk-2DXY7UGF.js";
|
|
15
15
|
import "./chunk-KHYD3WXE.js";
|
|
16
|
-
import "./chunk-
|
|
16
|
+
import "./chunk-OVYOYUP7.js";
|
|
17
17
|
import {
|
|
18
18
|
SUBAGENT_ALLOWED_TOOLS
|
|
19
|
-
} from "./chunk-
|
|
19
|
+
} from "./chunk-JHPSWYO3.js";
|
|
20
20
|
|
|
21
21
|
// src/hub/task-orchestrator.ts
|
|
22
22
|
import { createInterface } from "readline";
|