kc-beta 0.5.6 → 0.6.0
This diff represents the content of publicly available package versions that have been released to one of the supported registries. The information contained in this diff is provided for informational purposes only and reflects changes between package versions as they appear in their respective public registries.
- package/QUICKSTART.md +17 -4
- package/README.md +58 -11
- package/bin/kc-beta.js +35 -1
- package/package.json +1 -1
- package/src/agent/bundle-tree.js +553 -0
- package/src/agent/context.js +40 -1
- package/src/agent/engine.js +644 -28
- package/src/agent/llm-client.js +67 -18
- package/src/agent/pipelines/finalization.js +186 -0
- package/src/agent/pipelines/index.js +8 -0
- package/src/agent/pipelines/initializer.js +40 -0
- package/src/agent/pipelines/skill-authoring.js +100 -6
- package/src/agent/skill-loader.js +54 -4
- package/src/agent/task-manager.js +66 -3
- package/src/agent/tools/agent-tool.js +283 -35
- package/src/agent/tools/bundle-search.js +146 -0
- package/src/agent/tools/document-chunk.js +246 -0
- package/src/agent/tools/document-classify.js +311 -0
- package/src/agent/tools/document-parse.js +8 -1
- package/src/agent/tools/phase-advance.js +30 -7
- package/src/agent/tools/registry.js +10 -0
- package/src/agent/tools/rule-catalog.js +17 -3
- package/src/agent/tools/sandbox-exec.js +30 -0
- package/src/agent/workspace.js +168 -14
- package/src/cli/components.js +165 -17
- package/src/cli/index.js +166 -19
- package/src/cli/meme.js +58 -0
- package/src/config.js +39 -2
- package/src/providers.js +26 -0
- package/template/skills/en/meta-meta/evolution-loop/SKILL.md +13 -1
- package/template/skills/en/meta-meta/rule-extraction/SKILL.md +74 -0
- package/template/skills/zh/meta-meta/evolution-loop/SKILL.md +7 -1
- package/template/skills/zh/meta-meta/rule-extraction/SKILL.md +73 -0
|
@@ -0,0 +1,246 @@
|
|
|
1
|
+
import fs from "node:fs";
|
|
2
|
+
import path from "node:path";
|
|
3
|
+
import crypto from "node:crypto";
|
|
4
|
+
import { BaseTool, ToolResult } from "./base.js";
|
|
5
|
+
import { buildBundleTree, BundleTree } from "../bundle-tree.js";
|
|
6
|
+
|
|
7
|
+
const CACHE_SUBDIR = path.join("cache", "bundles");
|
|
8
|
+
|
|
9
|
+
/**
|
|
10
|
+
* Build a BundleTree (onion-peeler chunk tree with a keyword index) from
|
|
11
|
+
* a list of files. Caches the result under
|
|
12
|
+
* <workspace>/cache/bundles/<sha256-of-bundle>.json
|
|
13
|
+
* keyed by the combined content hash, so re-chunking the same bundle is
|
|
14
|
+
* free.
|
|
15
|
+
*
|
|
16
|
+
* The bundle tree is the foundation for:
|
|
17
|
+
* - `bundle_search` — cheap keyword RAG over the tree's leaves
|
|
18
|
+
* - `document_classify` — reads each file's head to classify the bundle
|
|
19
|
+
* - Group D skill_authoring context auto-attach (reads chunks by id)
|
|
20
|
+
*
|
|
21
|
+
* PDFs are extracted with per-page resolution via pdfjs (already a KC
|
|
22
|
+
* dependency). Other formats go in as single-page blocks, which still
|
|
23
|
+
* benefits from the chunker's header-based splitting.
|
|
24
|
+
*/
|
|
25
|
+
export class DocumentChunkTool extends BaseTool {
|
|
26
|
+
constructor(workspace) {
|
|
27
|
+
super();
|
|
28
|
+
this._workspace = workspace;
|
|
29
|
+
}
|
|
30
|
+
|
|
31
|
+
get name() { return "document_chunk"; }
|
|
32
|
+
|
|
33
|
+
get description() {
|
|
34
|
+
return (
|
|
35
|
+
"Build a searchable BundleTree from a list of regulation / reference documents. " +
|
|
36
|
+
"Produces a hierarchical chunk tree (max ~2000 tokens per leaf) with a " +
|
|
37
|
+
"keyword index for RAG. Result is cached by content hash — repeated calls " +
|
|
38
|
+
"on the same bundle are free. Use bundle_search afterward to look up evidence by keyword."
|
|
39
|
+
);
|
|
40
|
+
}
|
|
41
|
+
|
|
42
|
+
get inputSchema() {
|
|
43
|
+
return {
|
|
44
|
+
type: "object",
|
|
45
|
+
properties: {
|
|
46
|
+
paths: {
|
|
47
|
+
type: "array",
|
|
48
|
+
items: { type: "string" },
|
|
49
|
+
description:
|
|
50
|
+
"Paths to input files (PDFs, .md, .txt). Relative to the chosen scope.",
|
|
51
|
+
},
|
|
52
|
+
scope: {
|
|
53
|
+
type: "string",
|
|
54
|
+
enum: ["workspace", "project"],
|
|
55
|
+
description: "Which directory to resolve paths against. Default 'workspace'.",
|
|
56
|
+
},
|
|
57
|
+
max_tokens_per_chunk: {
|
|
58
|
+
type: "integer",
|
|
59
|
+
description: "Max tokens per leaf chunk. Default 2000 (≈5000 chars CJK).",
|
|
60
|
+
},
|
|
61
|
+
force_refresh: {
|
|
62
|
+
type: "boolean",
|
|
63
|
+
description: "Ignore cache and re-chunk. Default false.",
|
|
64
|
+
},
|
|
65
|
+
},
|
|
66
|
+
required: ["paths"],
|
|
67
|
+
};
|
|
68
|
+
}
|
|
69
|
+
|
|
70
|
+
async execute(input) {
|
|
71
|
+
const paths = Array.isArray(input.paths) ? input.paths : [];
|
|
72
|
+
const scope = input.scope || "workspace";
|
|
73
|
+
const maxTokens = Number.isFinite(input.max_tokens_per_chunk)
|
|
74
|
+
? input.max_tokens_per_chunk : 2000;
|
|
75
|
+
const forceRefresh = input.force_refresh === true;
|
|
76
|
+
|
|
77
|
+
if (paths.length === 0) return new ToolResult("No paths provided", true);
|
|
78
|
+
if (scope === "project" && !this._workspace.projectDir) {
|
|
79
|
+
return new ToolResult("No project directory available", true);
|
|
80
|
+
}
|
|
81
|
+
|
|
82
|
+
// Resolve + stat every path up front so cache key is based on actual files
|
|
83
|
+
const resolved = [];
|
|
84
|
+
for (const p of paths) {
|
|
85
|
+
let abs;
|
|
86
|
+
try {
|
|
87
|
+
abs = scope === "project"
|
|
88
|
+
? this._workspace.resolveProjectPath(p)
|
|
89
|
+
: this._workspace.resolvePath(p);
|
|
90
|
+
} catch (e) { return new ToolResult(`Path error (${p}): ${e.message}`, true); }
|
|
91
|
+
if (!fs.existsSync(abs) || !fs.statSync(abs).isFile()) {
|
|
92
|
+
return new ToolResult(`File not found: ${p}`, true);
|
|
93
|
+
}
|
|
94
|
+
resolved.push({ requested: p, abs });
|
|
95
|
+
}
|
|
96
|
+
|
|
97
|
+
const cacheKey = this._hashBundle(resolved, maxTokens);
|
|
98
|
+
const cacheDir = path.join(this._workspace.cwd, CACHE_SUBDIR);
|
|
99
|
+
const cachePath = path.join(cacheDir, `${cacheKey}.json`);
|
|
100
|
+
|
|
101
|
+
if (!forceRefresh && fs.existsSync(cachePath)) {
|
|
102
|
+
try {
|
|
103
|
+
const tree = BundleTree.fromJSON(JSON.parse(fs.readFileSync(cachePath, "utf-8")));
|
|
104
|
+
return new ToolResult(this._summarize(tree, cachePath, /*cached*/ true));
|
|
105
|
+
} catch {
|
|
106
|
+
// Fall through to rebuild; corrupt cache is self-healing.
|
|
107
|
+
}
|
|
108
|
+
}
|
|
109
|
+
|
|
110
|
+
// Parse each file into { source_file, total_pages, blocks: [{page, markdown}] }
|
|
111
|
+
const parsedFiles = [];
|
|
112
|
+
for (const { requested, abs } of resolved) {
|
|
113
|
+
try {
|
|
114
|
+
parsedFiles.push(await this._parseOne(requested, abs));
|
|
115
|
+
} catch (e) {
|
|
116
|
+
parsedFiles.push({
|
|
117
|
+
source_file: path.basename(abs),
|
|
118
|
+
total_pages: 0,
|
|
119
|
+
blocks: [],
|
|
120
|
+
parse_error: `${e.name || "Error"}: ${e.message}`,
|
|
121
|
+
});
|
|
122
|
+
}
|
|
123
|
+
}
|
|
124
|
+
|
|
125
|
+
const tree = buildBundleTree(parsedFiles, { maxTokensPerChunk: maxTokens });
|
|
126
|
+
|
|
127
|
+
// Write cache
|
|
128
|
+
try {
|
|
129
|
+
fs.mkdirSync(cacheDir, { recursive: true });
|
|
130
|
+
fs.writeFileSync(cachePath, JSON.stringify(tree.toJSON()), "utf-8");
|
|
131
|
+
} catch {
|
|
132
|
+
// Cache write failure is non-fatal; the tree is still valid in memory
|
|
133
|
+
// for this turn. Next turn will just re-chunk.
|
|
134
|
+
}
|
|
135
|
+
|
|
136
|
+
return new ToolResult(this._summarize(tree, cachePath, /*cached*/ false));
|
|
137
|
+
}
|
|
138
|
+
|
|
139
|
+
/**
|
|
140
|
+
* Produce a concise summary output. Full tree is on disk; we show the
|
|
141
|
+
* outline + leaf stats so the agent knows what's inside without dumping
|
|
142
|
+
* every chunk into the LLM turn.
|
|
143
|
+
*/
|
|
144
|
+
_summarize(tree, cachePath, cached) {
|
|
145
|
+
const files = tree.files();
|
|
146
|
+
const leaves = tree.allLeaves();
|
|
147
|
+
const totalTokens = leaves.reduce((n, ch) => n + (ch.tokens || 0), 0);
|
|
148
|
+
const rel = path.relative(this._workspace.cwd, cachePath) || cachePath;
|
|
149
|
+
const lines = [
|
|
150
|
+
`${cached ? "Reused cached" : "Built new"} BundleTree → ${rel}`,
|
|
151
|
+
`Files: ${files.length} · Leaves: ${leaves.length} · ~${totalTokens} tokens indexed`,
|
|
152
|
+
`Keyword index: ${Object.keys(tree.keyword_index).length} tokens`,
|
|
153
|
+
"",
|
|
154
|
+
"Outline:",
|
|
155
|
+
tree.outline(4),
|
|
156
|
+
"",
|
|
157
|
+
`Next step: use \`bundle_search\` with keywords to look up evidence by chunk_id.`,
|
|
158
|
+
`Cache key: ${path.basename(cachePath)}`,
|
|
159
|
+
];
|
|
160
|
+
return lines.join("\n");
|
|
161
|
+
}
|
|
162
|
+
|
|
163
|
+
_hashBundle(resolved, maxTokens) {
|
|
164
|
+
const h = crypto.createHash("sha256");
|
|
165
|
+
h.update(`max_tokens:${maxTokens}\n`);
|
|
166
|
+
for (const { abs } of resolved) {
|
|
167
|
+
try {
|
|
168
|
+
const stat = fs.statSync(abs);
|
|
169
|
+
h.update(`${abs}|${stat.size}|${stat.mtimeMs}\n`);
|
|
170
|
+
} catch { h.update(`${abs}|?|?\n`); }
|
|
171
|
+
}
|
|
172
|
+
return h.digest("hex").slice(0, 16);
|
|
173
|
+
}
|
|
174
|
+
|
|
175
|
+
async _parseOne(requestedRelPath, absPath) {
|
|
176
|
+
const baseName = path.basename(absPath);
|
|
177
|
+
const suffix = path.extname(absPath).toLowerCase();
|
|
178
|
+
|
|
179
|
+
if (suffix === ".pdf") {
|
|
180
|
+
const blocks = await this._parsePdfPages(absPath);
|
|
181
|
+
return {
|
|
182
|
+
source_file: baseName,
|
|
183
|
+
total_pages: blocks.length || 1,
|
|
184
|
+
blocks: blocks.length > 0 ? blocks : [{ page: 1, markdown: "" }],
|
|
185
|
+
};
|
|
186
|
+
}
|
|
187
|
+
|
|
188
|
+
if (suffix === ".md" || suffix === ".txt") {
|
|
189
|
+
const txt = fs.readFileSync(absPath, "utf-8");
|
|
190
|
+
return {
|
|
191
|
+
source_file: baseName,
|
|
192
|
+
total_pages: 1,
|
|
193
|
+
blocks: [{ page: 1, markdown: txt }],
|
|
194
|
+
};
|
|
195
|
+
}
|
|
196
|
+
|
|
197
|
+
// For other formats (.docx, .xlsx, etc): read as UTF-8 best-effort.
|
|
198
|
+
// Upstream agent should call document_parse first and then document_chunk
|
|
199
|
+
// on the parsed output directly — current MVP keeps the tool surface small.
|
|
200
|
+
try {
|
|
201
|
+
const txt = fs.readFileSync(absPath, "utf-8");
|
|
202
|
+
return {
|
|
203
|
+
source_file: baseName,
|
|
204
|
+
total_pages: 1,
|
|
205
|
+
blocks: [{ page: 1, markdown: txt }],
|
|
206
|
+
};
|
|
207
|
+
} catch {
|
|
208
|
+
return {
|
|
209
|
+
source_file: baseName, total_pages: 0, blocks: [],
|
|
210
|
+
parse_error: `Unsupported format '${suffix}'. Run document_parse first and use its output, or stick to .pdf / .md / .txt.`,
|
|
211
|
+
};
|
|
212
|
+
}
|
|
213
|
+
}
|
|
214
|
+
|
|
215
|
+
async _parsePdfPages(absPath) {
|
|
216
|
+
const pdfjsLib = await import("pdfjs-dist/legacy/build/pdf.mjs");
|
|
217
|
+
const data = new Uint8Array(fs.readFileSync(absPath));
|
|
218
|
+
const doc = await pdfjsLib.getDocument({ data, useSystemFonts: true }).promise;
|
|
219
|
+
const blocks = [];
|
|
220
|
+
try {
|
|
221
|
+
for (let i = 0; i < doc.numPages; i++) {
|
|
222
|
+
let pageText = "";
|
|
223
|
+
try {
|
|
224
|
+
const page = await doc.getPage(i + 1);
|
|
225
|
+
const content = await page.getTextContent();
|
|
226
|
+
// Preserve line breaks reasonably well: group items by rough y-coord.
|
|
227
|
+
let lastY = null;
|
|
228
|
+
const out = [];
|
|
229
|
+
for (const item of content.items) {
|
|
230
|
+
const y = item.transform?.[5];
|
|
231
|
+
if (lastY !== null && Math.abs(y - lastY) > 2) out.push("\n");
|
|
232
|
+
else if (out.length > 0 && !out[out.length - 1].endsWith(" "))
|
|
233
|
+
out.push(" ");
|
|
234
|
+
out.push(item.str || "");
|
|
235
|
+
lastY = y;
|
|
236
|
+
}
|
|
237
|
+
pageText = out.join("").replace(/\s+\n/g, "\n").trim();
|
|
238
|
+
} catch { pageText = ""; }
|
|
239
|
+
blocks.push({ page: i + 1, markdown: pageText });
|
|
240
|
+
}
|
|
241
|
+
} finally {
|
|
242
|
+
try { await doc.destroy?.(); } catch { /* ignore */ }
|
|
243
|
+
}
|
|
244
|
+
return blocks;
|
|
245
|
+
}
|
|
246
|
+
}
|
|
@@ -0,0 +1,311 @@
|
|
|
1
|
+
import fs from "node:fs";
|
|
2
|
+
import path from "node:path";
|
|
3
|
+
import { BaseTool, ToolResult } from "./base.js";
|
|
4
|
+
import { BundleTree } from "../bundle-tree.js";
|
|
5
|
+
import { LLMClient } from "../llm-client.js";
|
|
6
|
+
|
|
7
|
+
const CACHE_SUBDIR = path.join("cache", "bundles");
|
|
8
|
+
|
|
9
|
+
// Keep in sync with applicable_product_types / report_types arrays the
|
|
10
|
+
// extraction pipeline uses when writing rules/catalog.json.
|
|
11
|
+
const PRODUCT_TYPES = [
|
|
12
|
+
"公募产品", "私募产品", "现金管理类",
|
|
13
|
+
"理财产品", "信托计划", "保险资管产品",
|
|
14
|
+
];
|
|
15
|
+
const REPORT_TYPES = ["季报", "中报", "年报"];
|
|
16
|
+
|
|
17
|
+
const CLASSIFIER_SYSTEM = [
|
|
18
|
+
"你是资管产品文档分类助理。用户提供一份文档包(一至多份文件,来自同一只",
|
|
19
|
+
"资管产品 / 合同 / 公司),你需要判断:",
|
|
20
|
+
"1. 产品类型(product_type)— 只能从以下取值中选一个:",
|
|
21
|
+
` ${PRODUCT_TYPES.join(", ")}`,
|
|
22
|
+
" 若无法确定,填空字符串 \"\"。现金管理类优先于公募/私募(它是独立披露类别)。",
|
|
23
|
+
"2. 报告类型(report_type)— 只能从以下取值中选一个:",
|
|
24
|
+
` ${REPORT_TYPES.join(", ")}`,
|
|
25
|
+
" 若文档是定期公告/定期报告但未明确周期,按季报处理。若无法确定,填 \"\"。",
|
|
26
|
+
"3. confidence — \"高\"/\"中\"/\"低\"",
|
|
27
|
+
"4. reasoning — 一句话说明判断依据,≤60 字",
|
|
28
|
+
"",
|
|
29
|
+
"严格按 JSON 输出,不要包裹代码块:",
|
|
30
|
+
"{\"product_type\":\"...\",\"report_type\":\"...\",\"confidence\":\"...\",\"reasoning\":\"...\"}",
|
|
31
|
+
].join("\n");
|
|
32
|
+
|
|
33
|
+
// Balanced-brace scan with string-awareness, for parsing LLM JSON even when
|
|
34
|
+
// extra prose surrounds it. Mirrors classifier.py's `_parse_classifier_response`.
|
|
35
|
+
const SMART_QUOTE_REPAIR = new Map([
|
|
36
|
+
["\u201c", '"'], ["\u201d", '"'],
|
|
37
|
+
["\u2018", "'"], ["\u2019", "'"],
|
|
38
|
+
["\uff02", '"'], ["\uff1a", ":"], ["\uff0c", ","],
|
|
39
|
+
]);
|
|
40
|
+
|
|
41
|
+
function repairSmartQuotes(s) {
|
|
42
|
+
return s.replace(/[\u201c\u201d\u2018\u2019\uff02\uff1a\uff0c]/g,
|
|
43
|
+
(c) => SMART_QUOTE_REPAIR.get(c) || c);
|
|
44
|
+
}
|
|
45
|
+
|
|
46
|
+
function extractJsonObject(raw) {
|
|
47
|
+
if (!raw) return null;
|
|
48
|
+
let candidate = raw.trim();
|
|
49
|
+
// Strip ```json fences if present
|
|
50
|
+
const fence = candidate.match(/```(?:json)?\s*([\s\S]+?)```/);
|
|
51
|
+
if (fence) candidate = fence[1].trim();
|
|
52
|
+
|
|
53
|
+
const start = candidate.indexOf("{");
|
|
54
|
+
if (start < 0) return null;
|
|
55
|
+
let depth = 0, end = -1, inStr = false, esc = false;
|
|
56
|
+
for (let i = start; i < candidate.length; i++) {
|
|
57
|
+
const c = candidate[i];
|
|
58
|
+
if (esc) { esc = false; continue; }
|
|
59
|
+
if (inStr) {
|
|
60
|
+
if (c === "\\") esc = true;
|
|
61
|
+
else if (c === '"') inStr = false;
|
|
62
|
+
continue;
|
|
63
|
+
}
|
|
64
|
+
if (c === '"') inStr = true;
|
|
65
|
+
else if (c === "{") depth++;
|
|
66
|
+
else if (c === "}") {
|
|
67
|
+
depth--;
|
|
68
|
+
if (depth === 0) { end = i + 1; break; }
|
|
69
|
+
}
|
|
70
|
+
}
|
|
71
|
+
if (end < 0) return null;
|
|
72
|
+
const objStr = candidate.slice(start, end);
|
|
73
|
+
// Try: raw → strip trailing commas → strip + repair smart quotes
|
|
74
|
+
for (const attempt of [
|
|
75
|
+
objStr,
|
|
76
|
+
objStr.replace(/,\s*([}\]])/g, "$1"),
|
|
77
|
+
repairSmartQuotes(objStr.replace(/,\s*([}\]])/g, "$1")),
|
|
78
|
+
]) {
|
|
79
|
+
try {
|
|
80
|
+
const obj = JSON.parse(attempt);
|
|
81
|
+
if (obj && typeof obj === "object") return obj;
|
|
82
|
+
} catch { /* try next */ }
|
|
83
|
+
}
|
|
84
|
+
return null;
|
|
85
|
+
}
|
|
86
|
+
|
|
87
|
+
/**
|
|
88
|
+
* Classify a BundleTree's product/report type in one LLM call, with a
|
|
89
|
+
* keyword-based fallback when the LLM is unreachable or returns unparseable
|
|
90
|
+
* output. The classification is cached alongside the BundleTree under
|
|
91
|
+
* <workspace>/cache/bundles/<hash>.classification.json
|
|
92
|
+
* so successive calls on the same bundle are free.
|
|
93
|
+
*
|
|
94
|
+
* Used by the Group D applicability pre-filter: rules whose
|
|
95
|
+
* `applicable_product_types` / `report_types` don't overlap with the
|
|
96
|
+
* bundle classification can be skipped without a skill_authoring turn.
|
|
97
|
+
*/
|
|
98
|
+
export class DocumentClassifyTool extends BaseTool {
|
|
99
|
+
constructor(workspace, config) {
|
|
100
|
+
super();
|
|
101
|
+
this._workspace = workspace;
|
|
102
|
+
this._config = config;
|
|
103
|
+
}
|
|
104
|
+
|
|
105
|
+
get name() { return "document_classify"; }
|
|
106
|
+
|
|
107
|
+
get description() {
|
|
108
|
+
return (
|
|
109
|
+
"Classify a bundle's product type (公募/私募/现金管理类/...) and report type " +
|
|
110
|
+
"(季报/中报/年报) via a one-shot worker-LLM call over each file's first " +
|
|
111
|
+
"~5000 chars. Falls back to keyword matching on LLM failure. Requires a " +
|
|
112
|
+
"prior `document_chunk` call. Result is cached per bundle."
|
|
113
|
+
);
|
|
114
|
+
}
|
|
115
|
+
|
|
116
|
+
get inputSchema() {
|
|
117
|
+
return {
|
|
118
|
+
type: "object",
|
|
119
|
+
properties: {
|
|
120
|
+
cache_key: {
|
|
121
|
+
type: "string",
|
|
122
|
+
description:
|
|
123
|
+
"BundleTree cache file name. Omit to use the most recently built bundle.",
|
|
124
|
+
},
|
|
125
|
+
force_refresh: {
|
|
126
|
+
type: "boolean",
|
|
127
|
+
description: "Re-classify even if a cached classification exists.",
|
|
128
|
+
},
|
|
129
|
+
},
|
|
130
|
+
};
|
|
131
|
+
}
|
|
132
|
+
|
|
133
|
+
async execute(input) {
|
|
134
|
+
const cacheKey = input?.cache_key || "";
|
|
135
|
+
const forceRefresh = input?.force_refresh === true;
|
|
136
|
+
|
|
137
|
+
const cacheDir = path.join(this._workspace.cwd, CACHE_SUBDIR);
|
|
138
|
+
if (!fs.existsSync(cacheDir)) {
|
|
139
|
+
return new ToolResult(
|
|
140
|
+
"No bundle cache found. Call `document_chunk` first.",
|
|
141
|
+
true,
|
|
142
|
+
);
|
|
143
|
+
}
|
|
144
|
+
|
|
145
|
+
let treePath;
|
|
146
|
+
if (cacheKey) {
|
|
147
|
+
treePath = path.join(cacheDir, cacheKey.endsWith(".json") ? cacheKey : `${cacheKey}.json`);
|
|
148
|
+
if (!fs.existsSync(treePath)) {
|
|
149
|
+
return new ToolResult(`BundleTree cache not found: ${cacheKey}`, true);
|
|
150
|
+
}
|
|
151
|
+
} else {
|
|
152
|
+
treePath = this._findMostRecentCache(cacheDir);
|
|
153
|
+
if (!treePath) return new ToolResult("No bundle cache found.", true);
|
|
154
|
+
}
|
|
155
|
+
|
|
156
|
+
const classificationPath = treePath.replace(/\.json$/, ".classification.json");
|
|
157
|
+
if (!forceRefresh && fs.existsSync(classificationPath)) {
|
|
158
|
+
try {
|
|
159
|
+
const cached = JSON.parse(fs.readFileSync(classificationPath, "utf-8"));
|
|
160
|
+
return new ToolResult(this._formatResult(cached, treePath, /*cached*/ true));
|
|
161
|
+
} catch { /* fall through */ }
|
|
162
|
+
}
|
|
163
|
+
|
|
164
|
+
let tree;
|
|
165
|
+
try {
|
|
166
|
+
tree = BundleTree.fromJSON(JSON.parse(fs.readFileSync(treePath, "utf-8")));
|
|
167
|
+
} catch (e) {
|
|
168
|
+
return new ToolResult(`Corrupt bundle cache: ${e.message}`, true);
|
|
169
|
+
}
|
|
170
|
+
|
|
171
|
+
// Try LLM first; fall back to keyword matching
|
|
172
|
+
const result = (await this._classifyLlm(tree)) || this._classifyKeyword(tree);
|
|
173
|
+
|
|
174
|
+
// Persist
|
|
175
|
+
try {
|
|
176
|
+
fs.writeFileSync(classificationPath, JSON.stringify(result, null, 2), "utf-8");
|
|
177
|
+
} catch { /* non-fatal */ }
|
|
178
|
+
|
|
179
|
+
return new ToolResult(this._formatResult(result, treePath, /*cached*/ false));
|
|
180
|
+
}
|
|
181
|
+
|
|
182
|
+
async _classifyLlm(tree) {
|
|
183
|
+
// Use conductor config for classification. The main-LLM config is always
|
|
184
|
+
// available; the worker LLM tier is phase-gated (distill-only) and
|
|
185
|
+
// classification runs during extraction, so we intentionally use the
|
|
186
|
+
// conductor here even though the AMC Python version uses a worker call.
|
|
187
|
+
const apiKey = this._config?.llmApiKey || "";
|
|
188
|
+
const baseUrl = this._config?.llmBaseUrl || "";
|
|
189
|
+
const model = this._config?.kcModel || "";
|
|
190
|
+
if (!apiKey || !baseUrl || !model) return null;
|
|
191
|
+
|
|
192
|
+
// Build prompt: each file's head (up to 5000 chars), concatenated
|
|
193
|
+
const fileBlocks = [];
|
|
194
|
+
const files = tree.files();
|
|
195
|
+
for (const f of files) {
|
|
196
|
+
const src = f.source_file || f.title || "(未命名文件)";
|
|
197
|
+
let text = "";
|
|
198
|
+
for (const cid of tree.leaves_order) {
|
|
199
|
+
const ch = tree.chunks[cid];
|
|
200
|
+
if (!ch || ch.source_file !== src) continue;
|
|
201
|
+
text += (ch.content || "") + "\n\n";
|
|
202
|
+
if (text.length >= 5000) break;
|
|
203
|
+
}
|
|
204
|
+
fileBlocks.push(`【文件名】${src}\n【前 5000 字预览】\n${text.slice(0, 5000).trim()}`);
|
|
205
|
+
}
|
|
206
|
+
const userMsg =
|
|
207
|
+
`=== 文档包(共 ${fileBlocks.length} 份文件)===\n\n` +
|
|
208
|
+
fileBlocks.join("\n\n---\n\n") +
|
|
209
|
+
"\n\n按格式输出 JSON。";
|
|
210
|
+
|
|
211
|
+
const client = new LLMClient({
|
|
212
|
+
apiKey, baseUrl,
|
|
213
|
+
authType: this._config?.authType || "bearer",
|
|
214
|
+
apiFormat: this._config?.apiFormat || "openai",
|
|
215
|
+
});
|
|
216
|
+
let resp;
|
|
217
|
+
try {
|
|
218
|
+
resp = await client.chat({
|
|
219
|
+
model,
|
|
220
|
+
messages: [
|
|
221
|
+
{ role: "system", content: CLASSIFIER_SYSTEM },
|
|
222
|
+
{ role: "user", content: userMsg },
|
|
223
|
+
],
|
|
224
|
+
maxTokens: 400,
|
|
225
|
+
});
|
|
226
|
+
} catch {
|
|
227
|
+
return null;
|
|
228
|
+
}
|
|
229
|
+
|
|
230
|
+
const content = resp?.choices?.[0]?.message?.content || "";
|
|
231
|
+
const parsed = extractJsonObject(content);
|
|
232
|
+
if (!parsed) return null;
|
|
233
|
+
|
|
234
|
+
const product = String(parsed.product_type || "").trim();
|
|
235
|
+
const report = String(parsed.report_type || "").trim();
|
|
236
|
+
const confidence = String(parsed.confidence || "").trim() || "中";
|
|
237
|
+
const reasoning = String(parsed.reasoning || "").trim().slice(0, 200);
|
|
238
|
+
|
|
239
|
+
return {
|
|
240
|
+
product_type: product,
|
|
241
|
+
report_type: report,
|
|
242
|
+
confidence,
|
|
243
|
+
reasoning,
|
|
244
|
+
source: "llm",
|
|
245
|
+
model,
|
|
246
|
+
};
|
|
247
|
+
}
|
|
248
|
+
|
|
249
|
+
_classifyKeyword(tree) {
|
|
250
|
+
const out = {
|
|
251
|
+
product_type: "",
|
|
252
|
+
report_type: "",
|
|
253
|
+
confidence: "低",
|
|
254
|
+
reasoning: "关键字规则匹配(LLM 分类不可用时的兜底)",
|
|
255
|
+
source: "keyword_fallback",
|
|
256
|
+
};
|
|
257
|
+
let head = "";
|
|
258
|
+
for (const cid of tree.leaves_order.slice(0, 8)) {
|
|
259
|
+
const ch = tree.chunks[cid];
|
|
260
|
+
head += "\n" + (ch?.content || "");
|
|
261
|
+
if (head.length > 6000) break;
|
|
262
|
+
}
|
|
263
|
+
if (head.includes("现金管理") || head.includes("摊余成本法")) {
|
|
264
|
+
out.product_type = "现金管理类";
|
|
265
|
+
} else if (head.includes("公募")) out.product_type = "公募产品";
|
|
266
|
+
else if (head.includes("私募") || head.includes("合格投资者")) out.product_type = "私募产品";
|
|
267
|
+
else if (head.includes("理财")) out.product_type = "理财产品";
|
|
268
|
+
else if (head.includes("信托")) out.product_type = "信托计划";
|
|
269
|
+
else if (head.includes("保险")) out.product_type = "保险资管产品";
|
|
270
|
+
|
|
271
|
+
if (head.includes("年度报告") || head.includes("年报")) out.report_type = "年报";
|
|
272
|
+
else if (head.includes("半年度") || head.includes("中报")) out.report_type = "中报";
|
|
273
|
+
else if (head.includes("季度") || head.includes("季报") ||
|
|
274
|
+
head.includes("第4 季度") || head.includes("第3 季度"))
|
|
275
|
+
out.report_type = "季报";
|
|
276
|
+
else if (head.includes("定期公告") || head.includes("定期报告"))
|
|
277
|
+
out.report_type = "季报";
|
|
278
|
+
|
|
279
|
+
return out;
|
|
280
|
+
}
|
|
281
|
+
|
|
282
|
+
_formatResult(cls, treePath, cached) {
|
|
283
|
+
const rel = path.relative(this._workspace.cwd, treePath) || treePath;
|
|
284
|
+
return [
|
|
285
|
+
`${cached ? "Cached" : "Fresh"} classification · bundle ${path.basename(treePath)}`,
|
|
286
|
+
` product_type : ${cls.product_type || "(unknown)"}`,
|
|
287
|
+
` report_type : ${cls.report_type || "(unknown)"}`,
|
|
288
|
+
` confidence : ${cls.confidence || "?"}`,
|
|
289
|
+
` source : ${cls.source}${cls.model ? ` · ${cls.model}` : ""}`,
|
|
290
|
+
` reasoning : ${cls.reasoning || "(none)"}`,
|
|
291
|
+
"",
|
|
292
|
+
`Persisted to ${rel.replace(/\.json$/, ".classification.json")}.`,
|
|
293
|
+
].join("\n");
|
|
294
|
+
}
|
|
295
|
+
|
|
296
|
+
_findMostRecentCache(cacheDir) {
|
|
297
|
+
let entries;
|
|
298
|
+
try { entries = fs.readdirSync(cacheDir); }
|
|
299
|
+
catch { return null; }
|
|
300
|
+
const candidates = entries
|
|
301
|
+
.filter((n) => n.endsWith(".json") && !n.endsWith(".classification.json"))
|
|
302
|
+
.map((n) => {
|
|
303
|
+
const full = path.join(cacheDir, n);
|
|
304
|
+
try { return { full, mtime: fs.statSync(full).mtimeMs }; }
|
|
305
|
+
catch { return null; }
|
|
306
|
+
})
|
|
307
|
+
.filter(Boolean)
|
|
308
|
+
.sort((a, b) => b.mtime - a.mtime);
|
|
309
|
+
return candidates[0]?.full || null;
|
|
310
|
+
}
|
|
311
|
+
}
|
|
@@ -113,8 +113,15 @@ export class DocumentParseTool extends BaseTool {
|
|
|
113
113
|
|
|
114
114
|
if (result) return new ToolResult(this._formatOutput(result, "pdfjs (low quality)", resolved));
|
|
115
115
|
|
|
116
|
+
// A7: Original message implied worker-LLM setup was missing which
|
|
117
|
+
// confused users in early phases (BOOTSTRAP/EXTRACTION) where the
|
|
118
|
+
// worker LLM is intentionally inactive anyway. Clearer phrasing: name
|
|
119
|
+
// exactly what's needed and where to set it, and why.
|
|
120
|
+
const ocrHint = this._ocrModel
|
|
121
|
+
? `Tried pdfjs / VLM (${this._ocrModel}) / MineRU; all failed — the file may be encrypted, corrupted, or an unsupported format.`
|
|
122
|
+
: `pdfjs extraction failed. Set VLM_TIER1 in the workspace .env to enable OCR fallback for image-based / scanned PDFs.`;
|
|
116
123
|
return new ToolResult(
|
|
117
|
-
`Could not extract text from ${pathStr}.
|
|
124
|
+
`Could not extract text from ${pathStr}. ${ocrHint}`,
|
|
118
125
|
true,
|
|
119
126
|
);
|
|
120
127
|
}
|
|
@@ -14,9 +14,18 @@ const VALID_PHASES = new Set(Object.values(Phase));
|
|
|
14
14
|
* asks). Description kept short to minimize system-prompt budget cost.
|
|
15
15
|
*/
|
|
16
16
|
export class PhaseAdvanceTool extends BaseTool {
|
|
17
|
-
|
|
17
|
+
/**
|
|
18
|
+
* @param {(to: string, reason: string, opts: {force?: boolean}) => boolean} advanceFn
|
|
19
|
+
* @param {() => string} getCurrentPhaseFn - H1: lets the tool read the
|
|
20
|
+
* engine's phase BEFORE the call, so it can distinguish "already there"
|
|
21
|
+
* (silent no-op, informational) from "non-adjacent refusal" (actionable).
|
|
22
|
+
* Before H1 both cases returned the same confusing "Either you're already
|
|
23
|
+
* there, or transition is non-adjacent" message.
|
|
24
|
+
*/
|
|
25
|
+
constructor(advanceFn, getCurrentPhaseFn) {
|
|
18
26
|
super();
|
|
19
27
|
this._advance = advanceFn;
|
|
28
|
+
this._getCurrentPhase = getCurrentPhaseFn || (() => null);
|
|
20
29
|
}
|
|
21
30
|
|
|
22
31
|
get name() { return "phase_advance"; }
|
|
@@ -47,14 +56,28 @@ export class PhaseAdvanceTool extends BaseTool {
|
|
|
47
56
|
async execute(input) {
|
|
48
57
|
const to = input.to;
|
|
49
58
|
if (!VALID_PHASES.has(to)) return new ToolResult(`Unknown phase: ${to}`, true);
|
|
50
|
-
|
|
51
|
-
|
|
52
|
-
|
|
59
|
+
|
|
60
|
+
const beforePhase = this._getCurrentPhase();
|
|
61
|
+
// H1: short-circuit the "already in target" case with an informational
|
|
62
|
+
// message — the agent was trying to advance correctly, engine just
|
|
63
|
+
// auto-advanced ahead of it (common when _maybeAutoAdvance fires on a
|
|
64
|
+
// criteria flip). Treat as success, not refusal.
|
|
65
|
+
if (beforePhase && beforePhase === to) {
|
|
53
66
|
return new ToolResult(
|
|
54
|
-
`
|
|
55
|
-
false,
|
|
67
|
+
`Already in phase ${to} (engine auto-advanced earlier via criteria flip or prior explicit call). Proceed with phase-appropriate work.`,
|
|
56
68
|
);
|
|
57
69
|
}
|
|
58
|
-
|
|
70
|
+
|
|
71
|
+
const advanced = this._advance(to, input.reason || "agent request", { force: !!input.force });
|
|
72
|
+
if (advanced) {
|
|
73
|
+
return new ToolResult(`Advanced${beforePhase ? ` from ${beforePhase}` : ""} to ${to}${input.force ? " (forced)" : ""}`);
|
|
74
|
+
}
|
|
75
|
+
|
|
76
|
+
// Truly refused — non-adjacent transition without force, or terminal-phase
|
|
77
|
+
// forward attempt. Give the actionable hint.
|
|
78
|
+
return new ToolResult(
|
|
79
|
+
`Did not advance to ${to}. Transition is non-adjacent${beforePhase ? ` (currently in ${beforePhase})` : ""} — set force:true to override, or advance to the immediate-next phase first.`,
|
|
80
|
+
false,
|
|
81
|
+
);
|
|
59
82
|
}
|
|
60
83
|
}
|
|
@@ -52,4 +52,14 @@ export class ToolRegistry {
|
|
|
52
52
|
get size() {
|
|
53
53
|
return this._tools.size;
|
|
54
54
|
}
|
|
55
|
+
|
|
56
|
+
/** F5: tool names currently registered. */
|
|
57
|
+
names() {
|
|
58
|
+
return Array.from(this._tools.keys()).sort();
|
|
59
|
+
}
|
|
60
|
+
|
|
61
|
+
/** F5: lookup a specific tool — used by diagnostics/UI. */
|
|
62
|
+
get(name) {
|
|
63
|
+
return this._tools.get(name);
|
|
64
|
+
}
|
|
55
65
|
}
|
|
@@ -85,11 +85,25 @@ export class RuleCatalogTool extends BaseTool {
|
|
|
85
85
|
const ruleId = input.rule_id || "";
|
|
86
86
|
const data = input.data || {};
|
|
87
87
|
|
|
88
|
+
// read operations don't need the lock — they're read-only
|
|
88
89
|
if (op === "list") return this._list();
|
|
89
90
|
if (op === "read") return this._read(ruleId || data.id || "");
|
|
90
|
-
|
|
91
|
-
|
|
92
|
-
|
|
91
|
+
|
|
92
|
+
// B9: write operations acquire the catalog lock so concurrent engines
|
|
93
|
+
// (main + subagents + sandbox_exec-via-workspace_file) serialize their
|
|
94
|
+
// read-modify-write on catalog.json. Without this, two writers can
|
|
95
|
+
// both read N rules, one writes N+1, the other writes N+1 of its own,
|
|
96
|
+
// and one write is silently lost — exactly what we saw in session
|
|
97
|
+
// 6304673afaa0 thrashing catalog rule counts.
|
|
98
|
+
if (op === "create") {
|
|
99
|
+
return this._workspace.withFileLock("rules/catalog.json", () => this._create(data));
|
|
100
|
+
}
|
|
101
|
+
if (op === "update") {
|
|
102
|
+
return this._workspace.withFileLock("rules/catalog.json", () => this._update(ruleId || data.id || "", data));
|
|
103
|
+
}
|
|
104
|
+
if (op === "delete") {
|
|
105
|
+
return this._workspace.withFileLock("rules/catalog.json", () => this._delete(ruleId || data.id || ""));
|
|
106
|
+
}
|
|
93
107
|
// More helpful than "Unknown operation: " — tells the agent exactly what's
|
|
94
108
|
// allowed and what shape to call with next time (observed in v0.5.3 E2E
|
|
95
109
|
// where GLM-5.1 sent input: {} 38+ times without learning).
|