@langgraph-js/sdk 1.0.0 → 1.1.1

This diff represents the content of publicly available package versions that have been released to one of the supported registries. The information contained in this diff is provided for informational purposes only and reflects changes between package versions as they appear in their respective public registries.
Files changed (41) hide show
  1. package/.env +0 -0
  2. package/README.md +163 -0
  3. package/dist/LangGraphClient.d.ts +101 -0
  4. package/dist/LangGraphClient.js +401 -0
  5. package/dist/SpendTime.d.ts +9 -0
  6. package/dist/SpendTime.js +32 -0
  7. package/dist/ToolManager.d.ts +63 -0
  8. package/dist/ToolManager.js +93 -0
  9. package/dist/index.d.ts +5 -0
  10. package/dist/index.js +5 -0
  11. package/dist/tool/copilotkit-actions.d.ts +66 -0
  12. package/dist/tool/copilotkit-actions.js +1 -0
  13. package/dist/tool/createTool.d.ts +47 -0
  14. package/dist/tool/createTool.js +61 -0
  15. package/dist/tool/index.d.ts +2 -0
  16. package/dist/tool/index.js +2 -0
  17. package/dist/tool/utils.d.ts +36 -0
  18. package/dist/tool/utils.js +120 -0
  19. package/dist/ui-store/UnionStore.d.ts +11 -0
  20. package/dist/ui-store/UnionStore.js +9 -0
  21. package/dist/ui-store/createChatStore.d.ts +43 -0
  22. package/dist/ui-store/createChatStore.js +145 -0
  23. package/dist/ui-store/index.d.ts +2 -0
  24. package/dist/ui-store/index.js +2 -0
  25. package/index.html +12 -0
  26. package/package.json +35 -7
  27. package/src/LangGraphClient.ts +461 -0
  28. package/src/SpendTime.ts +29 -0
  29. package/src/ToolManager.ts +100 -0
  30. package/src/index.ts +5 -0
  31. package/src/tool/copilotkit-actions.ts +72 -0
  32. package/src/tool/createTool.ts +78 -0
  33. package/src/tool/index.ts +2 -0
  34. package/src/tool/utils.ts +158 -0
  35. package/src/ui-store/UnionStore.ts +20 -0
  36. package/src/ui-store/createChatStore.ts +153 -0
  37. package/src/ui-store/index.ts +2 -0
  38. package/test/testResponse.json +5418 -0
  39. package/tsconfig.json +112 -0
  40. package/ui/index.ts +182 -0
  41. package/ui/tool.ts +55 -0
package/.env ADDED
File without changes
package/README.md ADDED
@@ -0,0 +1,163 @@
1
+ # @langgraph-js/sdk
2
+
3
+ ![npm version](https://img.shields.io/npm/v/@langgraph-js/sdk)
4
+ ![license](https://img.shields.io/npm/l/@langgraph-js/sdk)
5
+
6
+ > The missing UI SDK for LangGraph - seamlessly integrate your AI agents with frontend interfaces
7
+
8
+ ## Why @langgraph-js/sdk?
9
+
10
+ Building AI agent applications is complex, especially when you need to bridge the gap between LangGraph agents and interactive user interfaces. This SDK solves the critical challenges of frontend integration:
11
+
12
+ - **Provides a complete UI integration layer** - no more complex custom code to handle tools, streaming, and state management
13
+ - **Simplifies human-in-the-loop interactions** - easily incorporate user feedback within agent workflows
14
+ - **Handles edge cases automatically** - interruptions, errors, token management and more
15
+ - **Offers a rich set of UI components** - ready-to-use elements to display agent interactions
16
+
17
+ [DOCS](https://langgraph-js.netlify.app)
18
+
19
+ ## Installation
20
+
21
+ ```bash
22
+ # Using npm
23
+ npm install @langgraph-js/sdk
24
+
25
+ # Using yarn
26
+ yarn add @langgraph-js/sdk
27
+
28
+ # Using pnpm
29
+ pnpm add @langgraph-js/sdk
30
+ ```
31
+
32
+ ## Key Features
33
+
34
+ ### Generative UI
35
+
36
+ - ✅ Custom Tool Messages
37
+ - ✅ Token Counter
38
+ - ✅ Stop Graph Progress
39
+ - ✅ Interrupt Handling
40
+ - ✅ Error Handling
41
+ - ✅ Spend Time Tracking
42
+ - ✅ Time Persistence
43
+
44
+ ### Frontend Actions
45
+
46
+ - ✅ Definition of Union Tools
47
+ - ✅ Frontend Functions As Tools
48
+ - ✅ Human-in-the-Loop Interaction
49
+ - ✅ Interrupt Mode
50
+
51
+ ### Authorization
52
+
53
+ - ✅ Cookie-Based Authentication
54
+ - ✅ Custom Token Authentication
55
+
56
+ ### Persistence
57
+
58
+ - ✅ Read History from LangGraph
59
+
60
+ ## Advanced Usage
61
+
62
+ ### Creating a Chat Store
63
+
64
+ You can easily create a reactive store for your LangGraph client:
65
+
66
+ ```typescript
67
+ import { createChatStore } from "@langgraph-js/sdk";
68
+
69
+ export const globalChatStore = createChatStore(
70
+ "agent",
71
+ {
72
+ // Custom LangGraph backend interaction
73
+ apiUrl: "http://localhost:8123",
74
+ // Custom headers for authentication
75
+ defaultHeaders: JSON.parse(localStorage.getItem("code") || "{}"),
76
+ callerOptions: {
77
+ // Example for including cookies
78
+ // fetch(url: string, options: RequestInit) {
79
+ // options.credentials = "include";
80
+ // return fetch(url, options);
81
+ // },
82
+ },
83
+ },
84
+ {
85
+ onInit(client) {
86
+ client.tools.bindTools([]);
87
+ },
88
+ }
89
+ );
90
+ ```
91
+
92
+ ### React Integration
93
+
94
+ First, install the nanostores React integration:
95
+
96
+ ```bash
97
+ pnpm i @nanostores/react
98
+ ```
99
+
100
+ Then create a context provider for your chat:
101
+
102
+ ```tsx
103
+ import React, { createContext, useContext, useEffect } from "react";
104
+ import { globalChatStore } from "../store"; // Import your store
105
+ import { UnionStore, useUnionStore } from "@langgraph-js/sdk";
106
+ import { useStore } from "@nanostores/react";
107
+
108
+ type ChatContextType = UnionStore<typeof globalChatStore>;
109
+
110
+ const ChatContext = createContext<ChatContextType | undefined>(undefined);
111
+
112
+ export const useChat = () => {
113
+ const context = useContext(ChatContext);
114
+ if (!context) {
115
+ throw new Error("useChat must be used within a ChatProvider");
116
+ }
117
+ return context;
118
+ };
119
+
120
+ export const ChatProvider = ({ children }) => {
121
+ // Use store to ensure React gets reactive state updates
122
+ const store = useUnionStore(globalChatStore, useStore);
123
+
124
+ useEffect(() => {
125
+ // Initialize client
126
+ store.initClient().then(() => {
127
+ // Initialize conversation history
128
+ store.refreshHistoryList();
129
+ });
130
+ }, [store.currentAgent]);
131
+
132
+ return <ChatContext.Provider value={store}>{children}</ChatContext.Provider>;
133
+ };
134
+ ```
135
+
136
+ Use it in your components:
137
+
138
+ ```tsx
139
+ export const MyChat = () => {
140
+ return (
141
+ <ChatProvider>
142
+ <ChatComp></ChatComp>
143
+ </ChatProvider>
144
+ );
145
+ };
146
+
147
+ function ChatComp() {
148
+ const chat = useChat();
149
+ // Use chat store methods and state here
150
+ }
151
+ ```
152
+
153
+ ## Documentation
154
+
155
+ For complete documentation, visit our [official docs](https://langgraph-js.netlify.app).
156
+
157
+ ## Contributing
158
+
159
+ Contributions are welcome! Please feel free to submit a Pull Request.
160
+
161
+ ## License
162
+
163
+ This project is licensed under the Apache-2.0 License.
@@ -0,0 +1,101 @@
1
+ import { Client, Thread, Message, Assistant, HumanMessage, ToolMessage, Command } from "@langchain/langgraph-sdk";
2
+ import { ToolManager } from "./ToolManager";
3
+ import { CallToolResult } from "./tool";
4
+ import { AsyncCallerParams } from "@langchain/langgraph-sdk/dist/utils/async_caller";
5
+ export type RenderMessage = Message & {
6
+ /** 工具入参 ,聚合而来*/
7
+ tool_input?: string;
8
+ additional_kwargs?: {
9
+ done?: boolean;
10
+ tool_calls?: {
11
+ function: {
12
+ arguments: string;
13
+ };
14
+ }[];
15
+ };
16
+ usage_metadata?: {
17
+ total_tokens: number;
18
+ input_tokens: number;
19
+ output_tokens: number;
20
+ };
21
+ response_metadata?: {
22
+ create_time: string;
23
+ };
24
+ /** 耗时 */
25
+ spend_time?: number;
26
+ /** 渲染时的唯一 id,聚合而来*/
27
+ unique_id?: string;
28
+ };
29
+ export interface LangGraphClientConfig {
30
+ apiUrl?: string;
31
+ apiKey?: string;
32
+ callerOptions?: AsyncCallerParams;
33
+ timeoutMs?: number;
34
+ defaultHeaders?: Record<string, string | null | undefined>;
35
+ }
36
+ export declare class StreamingMessageType {
37
+ static isUser(m: Message): m is HumanMessage;
38
+ static isTool(m: Message): m is ToolMessage;
39
+ static isAssistant(m: Message): boolean;
40
+ static isToolAssistant(m: Message): any;
41
+ }
42
+ type StreamingUpdateEvent = {
43
+ type: "message" | "value" | "update" | "error" | "thread" | "done";
44
+ data: any;
45
+ };
46
+ type StreamingUpdateCallback = (event: StreamingUpdateEvent) => void;
47
+ export declare class LangGraphClient extends Client {
48
+ private currentAssistant;
49
+ private currentThread;
50
+ private streamingCallbacks;
51
+ tools: ToolManager;
52
+ stopController: AbortController | null;
53
+ constructor(config: LangGraphClientConfig);
54
+ availableAssistants: Assistant[];
55
+ private listAssistants;
56
+ initAssistant(agentName: string): Promise<void>;
57
+ createThread({ threadId, }?: {
58
+ threadId?: string;
59
+ }): Promise<Thread<import("@langchain/langgraph-sdk").DefaultValues>>;
60
+ listThreads<T>(): Promise<Thread<T>[]>;
61
+ /** 从历史中恢复数据 */
62
+ resetThread(agent: string, threadId: string): Promise<void>;
63
+ streamingMessage: RenderMessage[];
64
+ /** 图发过来的更新信息 */
65
+ graphMessages: RenderMessage[];
66
+ cloneMessage(message: Message): Message;
67
+ private replaceMessageWithValuesMessage;
68
+ /** 用于 UI 中的流式渲染中的消息 */
69
+ get renderMessage(): RenderMessage[];
70
+ attachInfoForMessage(result: RenderMessage[]): RenderMessage[];
71
+ composeToolMessages(messages: RenderMessage[]): RenderMessage[];
72
+ get tokenCounter(): {
73
+ total_tokens: number;
74
+ input_tokens: number;
75
+ output_tokens: number;
76
+ };
77
+ onStreamingUpdate(callback: StreamingUpdateCallback): () => void;
78
+ private emitStreamingUpdate;
79
+ graphState: any;
80
+ currentRun?: {
81
+ run_id: string;
82
+ };
83
+ cancelRun(): void;
84
+ sendMessage(input: string | Message[], { extraParams, _debug, command }?: {
85
+ extraParams?: Record<string, any>;
86
+ _debug?: {
87
+ streamResponse?: any;
88
+ };
89
+ command?: Command;
90
+ }): Promise<any[]>;
91
+ private runFETool;
92
+ private callFETool;
93
+ /** 恢复消息,当中断流时使用 */
94
+ resume(result: CallToolResult): Promise<any[]>;
95
+ /** 完成工具等待 */
96
+ doneFEToolWaiting(id: string, result: CallToolResult): void;
97
+ getCurrentThread(): Thread<import("@langchain/langgraph-sdk").DefaultValues> | null;
98
+ getCurrentAssistant(): Assistant | null;
99
+ reset(): Promise<void>;
100
+ }
101
+ export {};
@@ -0,0 +1,401 @@
1
+ import { Client } from "@langchain/langgraph-sdk";
2
+ import { ToolManager } from "./ToolManager";
3
+ export class StreamingMessageType {
4
+ static isUser(m) {
5
+ return m.type === "human";
6
+ }
7
+ static isTool(m) {
8
+ return m.type === "tool";
9
+ }
10
+ static isAssistant(m) {
11
+ return m.type === "ai" && !this.isToolAssistant(m);
12
+ }
13
+ static isToolAssistant(m) {
14
+ var _a, _b;
15
+ /** @ts-ignore */
16
+ return m.type === "ai" && (((_a = m.tool_calls) === null || _a === void 0 ? void 0 : _a.length) || ((_b = m.tool_call_chunks) === null || _b === void 0 ? void 0 : _b.length));
17
+ }
18
+ }
19
+ export class LangGraphClient extends Client {
20
+ constructor(config) {
21
+ super(config);
22
+ this.currentAssistant = null;
23
+ this.currentThread = null;
24
+ this.streamingCallbacks = new Set();
25
+ this.tools = new ToolManager();
26
+ this.stopController = null;
27
+ this.availableAssistants = [];
28
+ this.streamingMessage = [];
29
+ /** 图发过来的更新信息 */
30
+ this.graphMessages = [];
31
+ this.graphState = {};
32
+ }
33
+ listAssistants() {
34
+ return this.assistants.search({
35
+ metadata: null,
36
+ offset: 0,
37
+ limit: 100,
38
+ });
39
+ }
40
+ async initAssistant(agentName) {
41
+ try {
42
+ const assistants = await this.listAssistants();
43
+ this.availableAssistants = assistants;
44
+ if (assistants.length > 0) {
45
+ this.currentAssistant = assistants.find((assistant) => assistant.name === agentName) || null;
46
+ if (!this.currentAssistant) {
47
+ throw new Error("Agent not found");
48
+ }
49
+ }
50
+ else {
51
+ throw new Error("No assistants found");
52
+ }
53
+ }
54
+ catch (error) {
55
+ console.error("Failed to initialize LangGraphClient:", error);
56
+ throw error;
57
+ }
58
+ }
59
+ async createThread({ threadId, } = {}) {
60
+ try {
61
+ this.currentThread = await this.threads.create({
62
+ threadId,
63
+ });
64
+ return this.currentThread;
65
+ }
66
+ catch (error) {
67
+ console.error("Failed to create new thread:", error);
68
+ throw error;
69
+ }
70
+ }
71
+ async listThreads() {
72
+ return this.threads.search({
73
+ sortOrder: "desc",
74
+ });
75
+ }
76
+ /** 从历史中恢复数据 */
77
+ async resetThread(agent, threadId) {
78
+ await this.initAssistant(agent);
79
+ this.currentThread = await this.threads.get(threadId);
80
+ this.graphState = this.currentThread.values;
81
+ this.graphMessages = this.graphState.messages;
82
+ this.emitStreamingUpdate({
83
+ type: "value",
84
+ data: {
85
+ event: "messages/partial",
86
+ data: {
87
+ messages: this.graphMessages,
88
+ },
89
+ },
90
+ });
91
+ }
92
+ cloneMessage(message) {
93
+ return JSON.parse(JSON.stringify(message));
94
+ }
95
+ replaceMessageWithValuesMessage(message, isTool = false) {
96
+ const key = (isTool ? "tool_call_id" : "id");
97
+ const valuesMessage = this.graphMessages.find((i) => i[key] === message[key]);
98
+ if (valuesMessage) {
99
+ return {
100
+ ...valuesMessage,
101
+ /** @ts-ignore */
102
+ tool_input: message.tool_input,
103
+ };
104
+ }
105
+ return message;
106
+ }
107
+ /** 用于 UI 中的流式渲染中的消息 */
108
+ get renderMessage() {
109
+ var _a;
110
+ const previousMessage = new Map();
111
+ const result = [];
112
+ const inputMessages = [...this.graphMessages, ...this.streamingMessage];
113
+ // 从后往前遍历,这样可以保证最新的消息在前面
114
+ for (let i = inputMessages.length - 1; i >= 0; i--) {
115
+ const message = this.cloneMessage(inputMessages[i]);
116
+ if (!message.id) {
117
+ result.unshift(message);
118
+ continue;
119
+ }
120
+ // 如果已经处理过这个 id 的消息,跳过
121
+ if (previousMessage.has(message.id)) {
122
+ continue;
123
+ }
124
+ if (StreamingMessageType.isToolAssistant(message)) {
125
+ const m = this.replaceMessageWithValuesMessage(message);
126
+ // 记录这个 id 的消息,并添加到结果中
127
+ previousMessage.set(message.id, m);
128
+ /** @ts-ignore */
129
+ const tool_calls = ((_a = m.tool_calls) === null || _a === void 0 ? void 0 : _a.length) ? m.tool_calls : m.tool_call_chunks;
130
+ const new_tool_calls = tool_calls.map((tool, index) => {
131
+ var _a;
132
+ return this.replaceMessageWithValuesMessage({
133
+ type: "tool",
134
+ additional_kwargs: {},
135
+ /** @ts-ignore */
136
+ tool_input: (_a = m.additional_kwargs) === null || _a === void 0 ? void 0 : _a.tool_calls[index].function.arguments,
137
+ id: tool.id,
138
+ name: tool.name,
139
+ response_metadata: {},
140
+ tool_call_id: tool.id,
141
+ }, true);
142
+ });
143
+ for (const tool of new_tool_calls) {
144
+ if (!previousMessage.has(tool.id)) {
145
+ result.unshift(tool);
146
+ previousMessage.set(tool.id, tool);
147
+ }
148
+ }
149
+ result.unshift(m);
150
+ }
151
+ else {
152
+ // 记录这个 id 的消息,并添加到结果中
153
+ const m = this.replaceMessageWithValuesMessage(message);
154
+ previousMessage.set(message.id, m);
155
+ result.unshift(m);
156
+ }
157
+ }
158
+ return this.attachInfoForMessage(this.composeToolMessages(result));
159
+ }
160
+ attachInfoForMessage(result) {
161
+ var _a, _b, _c;
162
+ let lastMessage = null;
163
+ for (const message of result) {
164
+ const createTime = ((_a = message.response_metadata) === null || _a === void 0 ? void 0 : _a.create_time) || "";
165
+ // 用长度作为渲染 id,长度变了就要重新渲染
166
+ message.unique_id = message.id + JSON.stringify(message.content).length;
167
+ message.spend_time = new Date(createTime).getTime() - new Date(((_b = lastMessage === null || lastMessage === void 0 ? void 0 : lastMessage.response_metadata) === null || _b === void 0 ? void 0 : _b.create_time) || createTime).getTime();
168
+ if (!message.usage_metadata && ((_c = message.response_metadata) === null || _c === void 0 ? void 0 : _c.usage)) {
169
+ const usage = message.response_metadata.usage;
170
+ message.usage_metadata = {
171
+ input_tokens: usage.prompt_tokens,
172
+ output_tokens: usage.completion_tokens,
173
+ total_tokens: usage.total_tokens,
174
+ };
175
+ }
176
+ lastMessage = message;
177
+ }
178
+ return result;
179
+ }
180
+ composeToolMessages(messages) {
181
+ var _a;
182
+ const result = [];
183
+ const assistantToolMessages = new Map();
184
+ const toolParentMessage = new Map();
185
+ for (const message of messages) {
186
+ if (StreamingMessageType.isToolAssistant(message)) {
187
+ /** @ts-ignore 只有 tool_call_chunks 的 args 才是文本 */
188
+ (_a = message.tool_call_chunks) === null || _a === void 0 ? void 0 : _a.forEach((element) => {
189
+ assistantToolMessages.set(element.id, element);
190
+ toolParentMessage.set(element.id, message);
191
+ });
192
+ if (!message.content)
193
+ continue;
194
+ }
195
+ if (StreamingMessageType.isTool(message) && !message.tool_input) {
196
+ const assistantToolMessage = assistantToolMessages.get(message.tool_call_id);
197
+ const parentMessage = toolParentMessage.get(message.tool_call_id);
198
+ if (assistantToolMessage) {
199
+ message.tool_input = assistantToolMessage.args;
200
+ if (message.additional_kwargs) {
201
+ message.additional_kwargs.done = true;
202
+ }
203
+ else {
204
+ message.additional_kwargs = {
205
+ done: true,
206
+ };
207
+ }
208
+ }
209
+ if (parentMessage) {
210
+ message.usage_metadata = parentMessage.usage_metadata;
211
+ }
212
+ }
213
+ result.push(message);
214
+ }
215
+ return result;
216
+ }
217
+ get tokenCounter() {
218
+ return this.graphMessages.reduce((acc, message) => {
219
+ var _a, _b, _c, _d, _e;
220
+ if (message.usage_metadata) {
221
+ acc.total_tokens += ((_a = message.usage_metadata) === null || _a === void 0 ? void 0 : _a.total_tokens) || 0;
222
+ acc.input_tokens += ((_b = message.usage_metadata) === null || _b === void 0 ? void 0 : _b.input_tokens) || 0;
223
+ acc.output_tokens += ((_c = message.usage_metadata) === null || _c === void 0 ? void 0 : _c.output_tokens) || 0;
224
+ }
225
+ else if ((_d = message.response_metadata) === null || _d === void 0 ? void 0 : _d.usage) {
226
+ const usage = (_e = message.response_metadata) === null || _e === void 0 ? void 0 : _e.usage;
227
+ acc.total_tokens += usage.total_tokens || 0;
228
+ acc.input_tokens += usage.prompt_tokens || 0;
229
+ acc.output_tokens += usage.completion_tokens || 0;
230
+ }
231
+ return acc;
232
+ }, {
233
+ total_tokens: 0,
234
+ input_tokens: 0,
235
+ output_tokens: 0,
236
+ });
237
+ }
238
+ onStreamingUpdate(callback) {
239
+ this.streamingCallbacks.add(callback);
240
+ return () => {
241
+ this.streamingCallbacks.delete(callback);
242
+ };
243
+ }
244
+ emitStreamingUpdate(event) {
245
+ this.streamingCallbacks.forEach((callback) => callback(event));
246
+ }
247
+ cancelRun() {
248
+ var _a, _b;
249
+ if (((_a = this.currentThread) === null || _a === void 0 ? void 0 : _a.thread_id) && ((_b = this.currentRun) === null || _b === void 0 ? void 0 : _b.run_id)) {
250
+ this.runs.cancel(this.currentThread.thread_id, this.currentRun.run_id);
251
+ }
252
+ }
253
+ async sendMessage(input, { extraParams, _debug, command } = {}) {
254
+ if (!this.currentAssistant) {
255
+ throw new Error("Thread or Assistant not initialized");
256
+ }
257
+ if (!this.currentThread) {
258
+ await this.createThread();
259
+ this.emitStreamingUpdate({
260
+ type: "thread",
261
+ data: {
262
+ event: "thread/create",
263
+ data: {
264
+ thread: this.currentThread,
265
+ },
266
+ },
267
+ });
268
+ }
269
+ const messagesToSend = Array.isArray(input)
270
+ ? input
271
+ : [
272
+ {
273
+ type: "human",
274
+ content: input,
275
+ },
276
+ ];
277
+ const streamResponse = (_debug === null || _debug === void 0 ? void 0 : _debug.streamResponse) ||
278
+ this.runs.stream(this.currentThread.thread_id, this.currentAssistant.assistant_id, {
279
+ input: { ...this.graphState, ...(extraParams || {}), messages: messagesToSend, fe_tools: this.tools.toJSON() },
280
+ streamMode: ["messages", "values"],
281
+ streamSubgraphs: true,
282
+ command,
283
+ });
284
+ const streamRecord = [];
285
+ for await (const chunk of streamResponse) {
286
+ streamRecord.push(chunk);
287
+ if (chunk.event === "metadata") {
288
+ this.currentRun = chunk.data;
289
+ }
290
+ else if (chunk.event === "error") {
291
+ this.emitStreamingUpdate({
292
+ type: "error",
293
+ data: chunk,
294
+ });
295
+ }
296
+ else if (chunk.event === "messages/partial") {
297
+ for (const message of chunk.data) {
298
+ this.streamingMessage.push(message);
299
+ }
300
+ this.emitStreamingUpdate({
301
+ type: "message",
302
+ data: chunk,
303
+ });
304
+ continue;
305
+ }
306
+ else if (chunk.event.startsWith("values")) {
307
+ const data = chunk.data;
308
+ if (data.messages) {
309
+ const isResume = !!(command === null || command === void 0 ? void 0 : command.resume);
310
+ const isLongerThanLocal = data.messages.length >= this.graphMessages.length;
311
+ // resume 情况下,长度低于前端 message 的统统不接受
312
+ if (!isResume || (isResume && isLongerThanLocal)) {
313
+ this.graphMessages = data.messages;
314
+ this.emitStreamingUpdate({
315
+ type: "value",
316
+ data: chunk,
317
+ });
318
+ }
319
+ }
320
+ this.graphState = chunk.data;
321
+ this.streamingMessage = [];
322
+ continue;
323
+ }
324
+ }
325
+ this.streamingMessage = [];
326
+ const data = await this.runFETool();
327
+ if (data)
328
+ streamRecord.push(...data);
329
+ this.emitStreamingUpdate({
330
+ type: "done",
331
+ data: {
332
+ event: "done",
333
+ },
334
+ });
335
+ return streamRecord;
336
+ }
337
+ runFETool() {
338
+ var _a;
339
+ const data = this.graphMessages;
340
+ const lastMessage = data[data.length - 1];
341
+ // 如果最后一条消息是前端工具消息,则调用工具
342
+ if (lastMessage.type === "ai" && ((_a = lastMessage.tool_calls) === null || _a === void 0 ? void 0 : _a.length)) {
343
+ const result = lastMessage.tool_calls.map((tool) => {
344
+ if (this.tools.getTool(tool.name)) {
345
+ const toolMessage = {
346
+ ...tool,
347
+ tool_call_id: tool.id,
348
+ /** @ts-ignore */
349
+ tool_input: JSON.stringify(tool.args),
350
+ additional_kwargs: {},
351
+ };
352
+ // json 校验
353
+ return this.callFETool(toolMessage, tool.args);
354
+ }
355
+ });
356
+ return Promise.all(result);
357
+ }
358
+ }
359
+ async callFETool(message, args) {
360
+ const that = this; // 防止 this 被错误解析
361
+ const result = await this.tools.callTool(message.name, args, { client: that, message });
362
+ return this.resume(result);
363
+ }
364
+ /** 恢复消息,当中断流时使用 */
365
+ resume(result) {
366
+ return this.sendMessage([], {
367
+ command: {
368
+ resume: result,
369
+ },
370
+ });
371
+ }
372
+ /** 完成工具等待 */
373
+ doneFEToolWaiting(id, result) {
374
+ var _a;
375
+ const done = this.tools.doneWaiting(id, result);
376
+ if (!done && ((_a = this.currentThread) === null || _a === void 0 ? void 0 : _a.status) === "interrupted") {
377
+ this.resume(result);
378
+ }
379
+ }
380
+ getCurrentThread() {
381
+ return this.currentThread;
382
+ }
383
+ getCurrentAssistant() {
384
+ return this.currentAssistant;
385
+ }
386
+ async reset() {
387
+ var _a;
388
+ await this.initAssistant((_a = this.currentAssistant) === null || _a === void 0 ? void 0 : _a.name);
389
+ this.currentThread = null;
390
+ this.graphState = {};
391
+ this.graphMessages = [];
392
+ this.streamingMessage = [];
393
+ this.currentRun = undefined;
394
+ this.emitStreamingUpdate({
395
+ type: "value",
396
+ data: {
397
+ event: "messages/partial",
398
+ },
399
+ });
400
+ }
401
+ }
@@ -0,0 +1,9 @@
1
+ export declare class SpendTime {
2
+ private timeCounter;
3
+ start(key: string): void;
4
+ end(key: string): void;
5
+ setSpendTime(key: string): void;
6
+ getStartTime(key: string): Date;
7
+ getEndTime(key: string): Date;
8
+ getSpendTime(key: string): number;
9
+ }