agent-scene-toolkit 0.1.0

This diff represents the content of publicly available package versions that have been released to one of the supported registries. The information contained in this diff is provided for informational purposes only and reflects changes between package versions as they appear in their respective public registries.
package/README.md ADDED
@@ -0,0 +1,135 @@
1
+ # agent-scene-toolkit
2
+
3
+ Lightweight Agent orchestration library built on LangChain, with unified SSE streaming.
4
+
5
+ > **3 分钟上手**:定义 Profile → 定义 ToolKit → 创建 Agent → 对话。
6
+
7
+ ## ✨ Features
8
+
9
+ - **ToolKit**(静态能力包):按领域分组的工具集 + 使用策略 Prompt
10
+ - **Profile**(角色身份):只需定义 name + systemPrompt + model
11
+ - **Scene**(运行时上下文):注入动态业务状态 + 工具集过滤
12
+ - **单/多 Agent 自动切换**:配置 `supervisor` 即启用多 Agent 协作
13
+ - **标准化 SSE 事件流**:`text` / `tool_start` / `tool_end` / `handoff` / `agent` / `error` / `done`
14
+ - **Express 一行集成**:`app.post('/chat', agent.handleRequest())`
15
+
16
+ ## 📦 Install
17
+
18
+ ```bash
19
+ npm install agent-scene-toolkit @langchain/core @langchain/langgraph @langchain/openai langchain express
20
+ ```
21
+
22
+ ## 🚀 Quick Start
23
+
24
+ ```typescript
25
+ import { createAgent, defineProfile, defineToolKit, defineScene } from 'agent-scene-toolkit'
26
+
27
+ // 1. 定义能力包
28
+ const canvasToolKit = defineToolKit({
29
+ name: 'canvas',
30
+ tools: [bindElementTool],
31
+ prompt: '画面调整时优先使用 canvas 工具...',
32
+ })
33
+
34
+ // 2. 定义角色
35
+ const director = defineProfile({
36
+ name: '导演',
37
+ systemPrompt: '你是一位视频导演...',
38
+ model: 'gpt-4o',
39
+ })
40
+
41
+ // 3. 定义场景
42
+ const timelineScene = defineScene({
43
+ name: 'timeline-editing',
44
+ toolkits: ['canvas'],
45
+ prompt: (ctx) => `视频时长: ${ctx.duration}秒`,
46
+ })
47
+
48
+ // 4. 创建 Agent
49
+ const agent = createAgent({
50
+ toolkits: [canvasToolKit],
51
+ agents: [director],
52
+ scene: timelineScene,
53
+ llm: { baseURL: 'https://api.openai.com/v1', apiKey: 'sk-xxx' },
54
+ })
55
+
56
+ // 5. 发起对话
57
+ for await (const event of agent.chat({ message: '你好', threadId: 'thread-001' })) {
58
+ console.log(event)
59
+ }
60
+ ```
61
+
62
+ ## 🤝 Multi-Agent
63
+
64
+ 配置 `supervisor` 后自动启用 Supervisor 策略:
65
+
66
+ ```typescript
67
+ const agent = createAgent({
68
+ toolkits: [canvasToolKit, aiToolKit],
69
+ agents: [director, screenwriter],
70
+ supervisor: '导演', // ← 指定 Supervisor
71
+ llm: { baseURL: 'https://api.openai.com/v1', apiKey: 'sk-xxx' },
72
+ })
73
+ ```
74
+
75
+ ## 🌐 Express Integration
76
+
77
+ ```typescript
78
+ import express from 'express'
79
+
80
+ const app = express()
81
+ app.use(express.json())
82
+ app.post('/chat', agent.handleRequest())
83
+ ```
84
+
85
+ ## 📡 SSE Event Protocol
86
+
87
+ | Event | Trigger | Payload |
88
+ |-------|---------|---------|
89
+ | `text` | LLM outputs text token | `{ content: string }` |
90
+ | `tool_start` | Tool call begins | `{ toolName: string, input: Record<string, any> }` |
91
+ | `tool_end` | Tool call ends | `{ toolName: string, output: any }` |
92
+ | `handoff` | Agent switch (multi-agent) | `{ from: string, to: string }` |
93
+ | `agent` | Current answering agent | `{ name: string }` |
94
+ | `error` | Execution error | `{ message: string }` |
95
+ | `done` | Stream ends | `{}` |
96
+
97
+ ## ⚙️ Full Configuration
98
+
99
+ ```typescript
100
+ const agent = createAgent({
101
+ toolkits: [canvasToolKit, aiToolKit], // Global toolkit pool
102
+ agents: [director], // Agent profiles
103
+ supervisor: '导演', // Optional: enables multi-agent
104
+ scene: timelineScene, // Optional: runtime context + tool filtering
105
+ checkpointer: new MemorySaver(), // Optional: memory persistence
106
+ maxMessages: 50, // Optional: sliding window size
107
+ callbacks: [langfuseHandler], // Optional: LangChain callbacks (e.g. LangFuse)
108
+ llm: { // Optional: OpenAI-compatible gateway
109
+ baseURL: 'https://api.openai.com/v1',
110
+ apiKey: 'sk-xxx',
111
+ },
112
+ })
113
+ ```
114
+
115
+ ## 📖 API Documentation
116
+
117
+ ```bash
118
+ npm run docs
119
+ ```
120
+
121
+ Generates TypeDoc documentation from TSDoc comments.
122
+
123
+ ## 🛠️ Development
124
+
125
+ ```bash
126
+ npm run build # Build ESM + CJS + .d.ts
127
+ npm run dev # Watch mode
128
+ npm run typecheck # Type check
129
+ npm run playground # Launch debug playground
130
+ ```
131
+
132
+ ## 📄 License
133
+
134
+ MIT © [Lootoe](https://github.com/Lootoe)
135
+
package/dist/index.cjs ADDED
@@ -0,0 +1,570 @@
1
+ 'use strict';
2
+
3
+ var langgraph = require('@langchain/langgraph');
4
+ var openai = require('@langchain/openai');
5
+ var langchain = require('langchain');
6
+ var messages = require('@langchain/core/messages');
7
+ var prebuilt = require('@langchain/langgraph/prebuilt');
8
+ var langgraphSupervisor = require('@langchain/langgraph-supervisor');
9
+
10
+ // src/profile.ts
11
+ function defineProfile(input) {
12
+ if (!input.name) throw new Error("Profile name is required");
13
+ if (!input.systemPrompt) throw new Error("Profile systemPrompt is required");
14
+ if (!input.model) throw new Error("Profile model is required");
15
+ return Object.freeze({ ...input });
16
+ }
17
+
18
+ // src/toolkit.ts
19
+ function defineToolKit(input) {
20
+ if (!input.name) throw new Error("ToolKit name is required");
21
+ if (!input.tools?.length) throw new Error("ToolKit tools must not be empty");
22
+ if (!input.prompt) throw new Error("ToolKit prompt is required");
23
+ return Object.freeze({ ...input });
24
+ }
25
+
26
+ // src/scene.ts
27
+ function defineScene(input) {
28
+ if (!input.name) throw new Error("Scene name is required");
29
+ if (!input.toolkits?.length) throw new Error("Scene toolkits must not be empty");
30
+ if (typeof input.prompt !== "function") throw new Error("Scene prompt must be a function");
31
+ return Object.freeze({ ...input });
32
+ }
33
+
34
+ // src/prompt.ts
35
+ var BASE_PROMPT = `You are an autonomous AI agent. You can reason, plan, and take actions using the tools available to you.
36
+
37
+ ## Core Behavior
38
+ - When given a task, break it down into steps, then execute each step using the appropriate tools.
39
+ - After each tool call, observe the result and decide the next action. Continue until the task is fully completed.
40
+ - If no tools are needed, respond directly with your knowledge.
41
+ - Never fabricate uncertain information. If you cannot complete a task, explain why honestly.
42
+
43
+ ## Rules
44
+ - Respond in the same language as the user.
45
+ - Follow tool parameter schemas strictly \u2014 do not invent or omit required fields.
46
+ - When multiple tools are available, choose the most relevant one for the current step.`;
47
+ function buildPromptChain(params) {
48
+ const layers = [
49
+ // ① Base — 库内置固定指令
50
+ BASE_PROMPT,
51
+ // ② Profile — 角色身份提示词
52
+ params.profile.systemPrompt,
53
+ // ③ ToolKit — 当前场景激活的能力包提示词
54
+ ...params.toolkitPrompts
55
+ ];
56
+ if (params.scene) {
57
+ try {
58
+ const scenePrompt = params.scene.prompt(params.sceneContext ?? {});
59
+ layers.push(scenePrompt);
60
+ } catch (error) {
61
+ console.error("[buildPromptChain] Scene.prompt() error:", error);
62
+ layers.push(`[Scene context unavailable due to error: ${error instanceof Error ? error.message : String(error)}]`);
63
+ }
64
+ }
65
+ return layers.filter(Boolean).join("\n\n");
66
+ }
67
+ async function buildSingleGraph(params) {
68
+ const hasCallbacks = params.callbacks.length > 0;
69
+ const callbacksOrUndefined = hasCallbacks ? params.callbacks : void 0;
70
+ let llm;
71
+ try {
72
+ llm = new openai.ChatOpenAI({
73
+ model: params.model,
74
+ apiKey: params.llm?.apiKey,
75
+ configuration: params.llm?.baseURL ? { baseURL: params.llm.baseURL } : void 0,
76
+ // LLM 层透传 callbacks — 追踪 LLM 调用本身
77
+ callbacks: callbacksOrUndefined
78
+ });
79
+ } catch (error) {
80
+ const message = error instanceof Error ? error.message : String(error);
81
+ console.error("[buildSingleGraph] LLM initialization failed:", message);
82
+ throw new Error(`Failed to initialize LLM: ${message}`);
83
+ }
84
+ console.log("[buildSingleGraph] systemPrompt:", JSON.stringify(params.systemPrompt));
85
+ console.log("[buildSingleGraph] model:", params.model);
86
+ console.log("[buildSingleGraph] tools:", params.tools.map((t) => t.name));
87
+ console.log("[buildSingleGraph] threadId:", params.threadId);
88
+ console.log("[buildSingleGraph] maxMessages:", params.maxMessages);
89
+ let graph;
90
+ try {
91
+ graph = langchain.createAgent({
92
+ model: llm,
93
+ tools: params.tools,
94
+ checkpointer: params.checkpointer,
95
+ systemPrompt: params.systemPrompt,
96
+ // 滑动窗口中间件 — beforeModel 阶段裁剪消息,Checkpointer 仍全量存储
97
+ middleware: [
98
+ langchain.createMiddleware({
99
+ name: "sliding-window",
100
+ beforeModel: (state) => {
101
+ try {
102
+ const max = params.maxMessages;
103
+ if (!state.messages || state.messages.length <= max) return void 0;
104
+ return { messages: state.messages.slice(-max) };
105
+ } catch (error) {
106
+ console.error("[buildSingleGraph] sliding-window middleware error:", error);
107
+ return void 0;
108
+ }
109
+ }
110
+ })
111
+ ]
112
+ });
113
+ } catch (error) {
114
+ const message = error instanceof Error ? error.message : String(error);
115
+ console.error("[buildSingleGraph] Graph creation failed:", message);
116
+ throw new Error(`Failed to create agent graph: ${message}`);
117
+ }
118
+ try {
119
+ return graph.stream(
120
+ { messages: [new messages.HumanMessage(params.message)] },
121
+ {
122
+ configurable: { thread_id: params.threadId },
123
+ recursionLimit: 25,
124
+ streamMode: ["messages", "updates"],
125
+ // graph 层透传 callbacks — 追踪完整执行链路(工具调用、节点跳转等)
126
+ callbacks: callbacksOrUndefined
127
+ }
128
+ );
129
+ } catch (error) {
130
+ const message = error instanceof Error ? error.message : String(error);
131
+ console.error("[buildSingleGraph] Stream initialization failed:", message);
132
+ throw new Error(`Failed to start agent stream: ${message}`);
133
+ }
134
+ }
135
+ async function buildSupervisorGraph(params) {
136
+ const hasCallbacks = params.callbacks.length > 0;
137
+ const callbacksOrUndefined = hasCallbacks ? params.callbacks : void 0;
138
+ const workerProfiles = params.agents.filter((profile) => profile.name !== params.supervisorName);
139
+ const workers = [];
140
+ const failedWorkers = [];
141
+ for (const profile of workerProfiles) {
142
+ try {
143
+ const workerLLM = new openai.ChatOpenAI({
144
+ model: profile.model,
145
+ apiKey: params.llm?.apiKey,
146
+ configuration: params.llm?.baseURL ? { baseURL: params.llm.baseURL } : void 0,
147
+ callbacks: callbacksOrUndefined
148
+ });
149
+ const workerPrompt = params.workerPrompts.get(profile.name) ?? profile.systemPrompt;
150
+ const worker = prebuilt.createReactAgent({
151
+ llm: workerLLM,
152
+ tools: params.tools,
153
+ name: profile.name,
154
+ prompt: workerPrompt
155
+ });
156
+ workers.push(worker);
157
+ } catch (error) {
158
+ const message = error instanceof Error ? error.message : String(error);
159
+ console.error(`[buildSupervisorGraph] Failed to create worker "${profile.name}":`, message);
160
+ failedWorkers.push(profile.name);
161
+ }
162
+ }
163
+ if (workers.length === 0) {
164
+ throw new Error(
165
+ `Failed to create any workers. All ${workerProfiles.length} workers failed: ${failedWorkers.join(", ")}`
166
+ );
167
+ }
168
+ if (failedWorkers.length > 0) {
169
+ console.warn(
170
+ `[buildSupervisorGraph] ${failedWorkers.length} worker(s) failed to initialize: ${failedWorkers.join(", ")}`
171
+ );
172
+ }
173
+ const supervisorProfile = params.agents.find((p) => p.name === params.supervisorName);
174
+ let supervisorLLM;
175
+ try {
176
+ supervisorLLM = new openai.ChatOpenAI({
177
+ model: supervisorProfile.model,
178
+ apiKey: params.llm?.apiKey,
179
+ configuration: params.llm?.baseURL ? { baseURL: params.llm.baseURL } : void 0,
180
+ callbacks: callbacksOrUndefined
181
+ });
182
+ } catch (error) {
183
+ const message = error instanceof Error ? error.message : String(error);
184
+ console.error("[buildSupervisorGraph] Supervisor LLM initialization failed:", message);
185
+ throw new Error(`Failed to initialize Supervisor LLM: ${message}`);
186
+ }
187
+ console.log("[buildSupervisorGraph] supervisor:", params.supervisorName);
188
+ console.log("[buildSupervisorGraph] workers:", workers.map((_, i) => workerProfiles[i]?.name));
189
+ console.log("[buildSupervisorGraph] tools:", params.tools.map((t) => t.name));
190
+ console.log("[buildSupervisorGraph] threadId:", params.threadId);
191
+ let workflow;
192
+ try {
193
+ workflow = langgraphSupervisor.createSupervisor({
194
+ agents: workers,
195
+ llm: supervisorLLM,
196
+ prompt: params.supervisorPrompt,
197
+ // 保留完整消息历史,让前端能追踪 handoff 过程
198
+ outputMode: "full_history",
199
+ // 滑动窗口 — 只裁剪发给 LLM 的消息,Checkpointer 仍全量存储
200
+ // 与 single.ts 的 createMiddleware.beforeModel 策略一致
201
+ preModelHook: (state) => {
202
+ try {
203
+ const max = params.maxMessages;
204
+ if (!state.messages || state.messages.length <= max) {
205
+ return { llmInputMessages: state.messages || [] };
206
+ }
207
+ return { llmInputMessages: state.messages.slice(-max) };
208
+ } catch (error) {
209
+ console.error("[buildSupervisorGraph] sliding-window preModelHook error:", error);
210
+ return { llmInputMessages: state.messages || [] };
211
+ }
212
+ }
213
+ });
214
+ } catch (error) {
215
+ const message = error instanceof Error ? error.message : String(error);
216
+ console.error("[buildSupervisorGraph] Supervisor workflow creation failed:", message);
217
+ throw new Error(`Failed to create supervisor workflow: ${message}`);
218
+ }
219
+ let graph;
220
+ try {
221
+ graph = workflow.compile({
222
+ checkpointer: params.checkpointer
223
+ });
224
+ } catch (error) {
225
+ const message = error instanceof Error ? error.message : String(error);
226
+ console.error("[buildSupervisorGraph] Graph compilation failed:", message);
227
+ throw new Error(`Failed to compile supervisor graph: ${message}`);
228
+ }
229
+ try {
230
+ return graph.stream(
231
+ { messages: [new messages.HumanMessage(params.message)] },
232
+ {
233
+ configurable: { thread_id: params.threadId },
234
+ recursionLimit: 50,
235
+ // 多 Agent 需要更高的递归限制
236
+ streamMode: ["messages", "updates"],
237
+ callbacks: callbacksOrUndefined
238
+ }
239
+ );
240
+ } catch (error) {
241
+ const message = error instanceof Error ? error.message : String(error);
242
+ console.error("[buildSupervisorGraph] Stream initialization failed:", message);
243
+ throw new Error(`Failed to start supervisor stream: ${message}`);
244
+ }
245
+ }
246
+ async function* transformStream(stream, onToolEnd) {
247
+ let currentAgentName = null;
248
+ for await (const chunk of stream) {
249
+ let events;
250
+ let detectedAgent;
251
+ try {
252
+ const result = parseStreamChunk(chunk, currentAgentName);
253
+ events = result.events;
254
+ detectedAgent = result.detectedAgent;
255
+ } catch (error) {
256
+ const message = error instanceof Error ? error.message : String(error);
257
+ console.error("[transformStream] Failed to parse chunk:", message, chunk);
258
+ continue;
259
+ }
260
+ if (detectedAgent && detectedAgent !== currentAgentName) {
261
+ if (currentAgentName) {
262
+ yield { type: "handoff", from: currentAgentName, to: detectedAgent };
263
+ }
264
+ yield { type: "agent", name: detectedAgent };
265
+ currentAgentName = detectedAgent;
266
+ }
267
+ for (const event of events) {
268
+ yield event;
269
+ if (event.type === "tool_end" && onToolEnd) {
270
+ try {
271
+ onToolEnd(event.toolName, event.output);
272
+ } catch (error) {
273
+ const message = error instanceof Error ? error.message : String(error);
274
+ console.error(`[transformStream] Scene.onToolEnd("${event.toolName}") error:`, message);
275
+ }
276
+ }
277
+ }
278
+ }
279
+ }
280
+ function parseStreamChunk(chunk, currentAgentName) {
281
+ const events = [];
282
+ let detectedAgent = null;
283
+ if (!Array.isArray(chunk) || chunk.length !== 2) return { events, detectedAgent };
284
+ const [mode, data] = chunk;
285
+ if (mode === "messages") {
286
+ const [message, metadata] = data;
287
+ if (metadata?.langgraph_node && typeof metadata.langgraph_node === "string") {
288
+ const nodeName = metadata.langgraph_node;
289
+ if (nodeName !== "tools" && !nodeName.startsWith("__")) {
290
+ detectedAgent = nodeName;
291
+ }
292
+ }
293
+ if (messages.isAIMessageChunk(message)) {
294
+ const aiChunk = message;
295
+ if (aiChunk.tool_call_chunks && aiChunk.tool_call_chunks.length > 0) {
296
+ for (const toolChunk of aiChunk.tool_call_chunks) {
297
+ if (toolChunk.name) {
298
+ events.push({
299
+ type: "tool_start",
300
+ toolName: toolChunk.name,
301
+ input: {}
302
+ });
303
+ }
304
+ }
305
+ }
306
+ const content = typeof aiChunk.content === "string" ? aiChunk.content : "";
307
+ if (content) {
308
+ events.push({ type: "text", content });
309
+ }
310
+ }
311
+ }
312
+ if (mode === "updates") {
313
+ if (data && typeof data === "object") {
314
+ const nodeData = data;
315
+ for (const [nodeName, nodeOutput] of Object.entries(nodeData)) {
316
+ if (nodeOutput?.messages) {
317
+ const toolMessages = nodeOutput.messages;
318
+ for (const msg of toolMessages) {
319
+ if (msg instanceof messages.ToolMessage) {
320
+ events.push({
321
+ type: "tool_end",
322
+ toolName: msg.name ?? "unknown",
323
+ output: safeParseJSON(
324
+ typeof msg.content === "string" ? msg.content : JSON.stringify(msg.content)
325
+ )
326
+ });
327
+ }
328
+ }
329
+ }
330
+ }
331
+ }
332
+ }
333
+ return { events, detectedAgent };
334
+ }
335
+ function safeParseJSON(str) {
336
+ try {
337
+ return JSON.parse(str);
338
+ } catch {
339
+ return str;
340
+ }
341
+ }
342
+ function formatSSE(event) {
343
+ return `data: ${JSON.stringify(event)}
344
+
345
+ `;
346
+ }
347
+
348
+ // src/middleware.ts
349
+ function writeSSEHeaders(res) {
350
+ res.status(200);
351
+ res.setHeader("Content-Type", "text/event-stream; charset=utf-8");
352
+ res.setHeader("Cache-Control", "no-cache, no-transform");
353
+ res.setHeader("Connection", "keep-alive");
354
+ res.setHeader("X-Accel-Buffering", "no");
355
+ res.flushHeaders?.();
356
+ }
357
+ function parseChatOptions(req) {
358
+ const body = req.body ?? {};
359
+ return {
360
+ message: typeof body.message === "string" ? body.message : "",
361
+ threadId: typeof body.threadId === "string" ? body.threadId : "",
362
+ sceneContext: body.sceneContext
363
+ };
364
+ }
365
+ function createExpressHandler(agent) {
366
+ return async (req, res) => {
367
+ try {
368
+ writeSSEHeaders(res);
369
+ } catch (error) {
370
+ console.error("[createExpressHandler] Failed to write SSE headers:", error);
371
+ if (!res.headersSent) {
372
+ res.status(500).json({ error: "Failed to initialize SSE stream" });
373
+ }
374
+ return;
375
+ }
376
+ const safeWrite = (event) => {
377
+ try {
378
+ return res.write(formatSSE(event));
379
+ } catch (error) {
380
+ console.error("[createExpressHandler] Failed to write SSE event:", error);
381
+ return false;
382
+ }
383
+ };
384
+ try {
385
+ const chatOptions = parseChatOptions(req);
386
+ for await (const event of agent.chat(chatOptions)) {
387
+ const success = safeWrite(event);
388
+ if (!success && event.type !== "done") {
389
+ console.warn("[createExpressHandler] Client disconnected, stopping stream");
390
+ break;
391
+ }
392
+ }
393
+ } catch (error) {
394
+ const message = error instanceof Error ? error.message : String(error);
395
+ console.error("[createExpressHandler] Stream error:", message);
396
+ safeWrite({ type: "error", message });
397
+ safeWrite({ type: "done" });
398
+ } finally {
399
+ try {
400
+ res.end();
401
+ } catch (error) {
402
+ console.error("[createExpressHandler] Failed to end response:", error);
403
+ }
404
+ }
405
+ };
406
+ }
407
+
408
+ // src/agent.ts
409
+ var Agent = class {
410
+ /** @internal */
411
+ options;
412
+ constructor(options) {
413
+ this.options = {
414
+ maxMessages: 50,
415
+ callbacks: [],
416
+ checkpointer: new langgraph.MemorySaver(),
417
+ ...options
418
+ };
419
+ this.validate();
420
+ }
421
+ /**
422
+ * 发起对话,返回标准化 SSE 事件的异步生成器。
423
+ *
424
+ * 完整流程:
425
+ * 1. ToolKit 过滤(Scene.toolkits 决定)
426
+ * 2. Prompt 4 层拼接(Base → Profile → ToolKit → Scene)
427
+ * 3. 构建 LangGraph 图 + stream
428
+ * 4. 转换流事件 → 标准化 SSEEvent
429
+ *
430
+ * 任何异常均以 `error` + `done` 事件正常结束流,不崩溃。
431
+ *
432
+ * @param chatOptions - 对话参数
433
+ * @yields 标准化 SSE 事件序列
434
+ */
435
+ async *chat(chatOptions) {
436
+ try {
437
+ if (!chatOptions.message) {
438
+ yield { type: "error", message: "message is required" };
439
+ yield { type: "done" };
440
+ return;
441
+ }
442
+ if (!chatOptions.threadId) {
443
+ yield { type: "error", message: "threadId is required" };
444
+ yield { type: "done" };
445
+ return;
446
+ }
447
+ const scene = this.options.scene;
448
+ const activeToolkits = scene ? this.options.toolkits.filter((tk) => scene.toolkits.includes(tk.name)) : this.options.toolkits;
449
+ const tools = activeToolkits.flatMap((tk) => tk.tools);
450
+ const toolkitPrompts = activeToolkits.map((tk) => tk.prompt);
451
+ const isMultiAgent = this.options.agents.length > 1 && !!this.options.supervisor;
452
+ let stream;
453
+ if (isMultiAgent) {
454
+ const supervisorProfile = this.options.agents.find(
455
+ (a) => a.name === this.options.supervisor
456
+ );
457
+ const supervisorPrompt = buildPromptChain({
458
+ profile: supervisorProfile,
459
+ toolkitPrompts,
460
+ scene,
461
+ sceneContext: chatOptions.sceneContext
462
+ });
463
+ const workerPrompts = /* @__PURE__ */ new Map();
464
+ for (const profile of this.options.agents) {
465
+ if (profile.name !== this.options.supervisor) {
466
+ workerPrompts.set(
467
+ profile.name,
468
+ buildPromptChain({
469
+ profile,
470
+ toolkitPrompts,
471
+ scene,
472
+ sceneContext: chatOptions.sceneContext
473
+ })
474
+ );
475
+ }
476
+ }
477
+ stream = await buildSupervisorGraph({
478
+ supervisorPrompt,
479
+ agents: this.options.agents,
480
+ supervisorName: this.options.supervisor,
481
+ tools,
482
+ workerPrompts,
483
+ message: chatOptions.message,
484
+ threadId: chatOptions.threadId,
485
+ checkpointer: this.options.checkpointer,
486
+ maxMessages: this.options.maxMessages,
487
+ callbacks: this.options.callbacks,
488
+ llm: this.options.llm
489
+ });
490
+ } else {
491
+ const profile = this.options.agents[0];
492
+ const systemPrompt = buildPromptChain({
493
+ profile,
494
+ toolkitPrompts,
495
+ scene,
496
+ sceneContext: chatOptions.sceneContext
497
+ });
498
+ stream = await buildSingleGraph({
499
+ systemPrompt,
500
+ tools,
501
+ model: profile.model,
502
+ message: chatOptions.message,
503
+ threadId: chatOptions.threadId,
504
+ checkpointer: this.options.checkpointer,
505
+ maxMessages: this.options.maxMessages,
506
+ callbacks: this.options.callbacks,
507
+ llm: this.options.llm
508
+ });
509
+ }
510
+ yield* transformStream(stream, scene?.onToolEnd);
511
+ yield { type: "done" };
512
+ } catch (err) {
513
+ const message = err instanceof Error ? `${err.message}${err.cause ? ` | cause: ${err.cause}` : ""}${err.stack ? `
514
+ ${err.stack}` : ""}` : String(err);
515
+ console.error("[agent.chat] error:", message);
516
+ yield { type: "error", message };
517
+ yield { type: "done" };
518
+ }
519
+ }
520
+ /**
521
+ * 返回 Express RequestHandler,直接用于路由挂载。
522
+ *
523
+ * @example
524
+ * ```ts
525
+ * app.post('/chat', agent.handleRequest())
526
+ * ```
527
+ */
528
+ handleRequest() {
529
+ return createExpressHandler(this);
530
+ }
531
+ /**
532
+ * 参数校验 — 在构造时执行,快速失败。
533
+ */
534
+ validate() {
535
+ if (!this.options.agents.length) {
536
+ throw new Error("At least one agent is required");
537
+ }
538
+ if (this.options.supervisor) {
539
+ const found = this.options.agents.find((a) => a.name === this.options.supervisor);
540
+ if (!found) {
541
+ throw new Error(`Supervisor "${this.options.supervisor}" not found in agents`);
542
+ }
543
+ }
544
+ if (this.options.scene) {
545
+ const registeredNames = new Set(this.options.toolkits.map((tk) => tk.name));
546
+ for (const name of this.options.scene.toolkits) {
547
+ if (!registeredNames.has(name)) {
548
+ throw new Error(`Scene references toolkit "${name}" which is not registered`);
549
+ }
550
+ }
551
+ }
552
+ }
553
+ };
554
+ function createAgent2(options) {
555
+ return new Agent(options);
556
+ }
557
+
558
+ exports.Agent = Agent;
559
+ exports.buildPromptChain = buildPromptChain;
560
+ exports.buildSingleGraph = buildSingleGraph;
561
+ exports.buildSupervisorGraph = buildSupervisorGraph;
562
+ exports.createAgent = createAgent2;
563
+ exports.createExpressHandler = createExpressHandler;
564
+ exports.defineProfile = defineProfile;
565
+ exports.defineScene = defineScene;
566
+ exports.defineToolKit = defineToolKit;
567
+ exports.formatSSE = formatSSE;
568
+ exports.transformStream = transformStream;
569
+ //# sourceMappingURL=index.cjs.map
570
+ //# sourceMappingURL=index.cjs.map