@yh-ui/ai-sdk 0.1.21
This diff represents the content of publicly available package versions that have been released to one of the supported registries. The information contained in this diff is provided for informational purposes only and reflects changes between package versions as they appear in their respective public registries.
- package/LICENSE +21 -0
- package/README.md +309 -0
- package/dist/agent-enhanced.cjs +292 -0
- package/dist/agent-enhanced.d.ts +143 -0
- package/dist/agent-enhanced.mjs +267 -0
- package/dist/cache-adapter.cjs +99 -0
- package/dist/cache-adapter.d.ts +42 -0
- package/dist/cache-adapter.mjs +95 -0
- package/dist/future.cjs +882 -0
- package/dist/future.d.ts +519 -0
- package/dist/future.mjs +765 -0
- package/dist/index.cjs +913 -0
- package/dist/index.d.ts +55 -0
- package/dist/index.mjs +217 -0
- package/dist/langchain.cjs +363 -0
- package/dist/langchain.d.ts +232 -0
- package/dist/langchain.mjs +319 -0
- package/dist/loaders.cjs +110 -0
- package/dist/loaders.d.ts +58 -0
- package/dist/loaders.mjs +76 -0
- package/dist/mcp-server.cjs +265 -0
- package/dist/mcp-server.d.ts +186 -0
- package/dist/mcp-server.mjs +234 -0
- package/dist/mcp.cjs +370 -0
- package/dist/mcp.d.ts +206 -0
- package/dist/mcp.mjs +354 -0
- package/dist/observability.cjs +150 -0
- package/dist/observability.d.ts +112 -0
- package/dist/observability.mjs +117 -0
- package/dist/rag-production.cjs +95 -0
- package/dist/rag-production.d.ts +43 -0
- package/dist/rag-production.mjs +85 -0
- package/dist/rate-limit.cjs +73 -0
- package/dist/rate-limit.d.ts +55 -0
- package/dist/rate-limit.mjs +51 -0
- package/dist/vector-store.cjs +63 -0
- package/dist/vector-store.d.ts +74 -0
- package/dist/vector-store.mjs +55 -0
- package/dist/vue/index.cjs +1023 -0
- package/dist/vue/index.d.ts +627 -0
- package/dist/vue/index.mjs +913 -0
- package/package.json +87 -0
|
@@ -0,0 +1,232 @@
|
|
|
1
|
+
/**
|
|
2
|
+
* LangChain Integration for YH-UI
|
|
3
|
+
*
|
|
4
|
+
* 提供与 LangChain 的深度集成
|
|
5
|
+
* 支持流式输出、函数调用、对话历史管理
|
|
6
|
+
*/
|
|
7
|
+
import { type Ref } from 'vue';
|
|
8
|
+
import type { BaseMessage } from '@langchain/core/messages';
|
|
9
|
+
import type { BaseChatModel } from '@langchain/core/language_models/chat_models';
|
|
10
|
+
import { HumanMessage, AIMessage } from '@langchain/core/messages';
|
|
11
|
+
export interface LangChainMessage {
|
|
12
|
+
id: string;
|
|
13
|
+
role: 'user' | 'assistant' | 'system' | 'tool';
|
|
14
|
+
content: string;
|
|
15
|
+
name?: string;
|
|
16
|
+
toolCallId?: string;
|
|
17
|
+
createdAt?: Date;
|
|
18
|
+
additionalKwargs?: Record<string, unknown>;
|
|
19
|
+
}
|
|
20
|
+
export interface LangChainConfig {
|
|
21
|
+
/** LangChain ChatModel 实例 */
|
|
22
|
+
model: BaseChatModel;
|
|
23
|
+
/** 系统提示词 */
|
|
24
|
+
systemMessage?: string;
|
|
25
|
+
/** 温度参数 */
|
|
26
|
+
temperature?: number;
|
|
27
|
+
/** 最大 token 数 */
|
|
28
|
+
maxTokens?: number;
|
|
29
|
+
/** 流式输出 */
|
|
30
|
+
streaming?: boolean;
|
|
31
|
+
/** 回调函数 */
|
|
32
|
+
callbacks?: {
|
|
33
|
+
onChunk?: (chunk: string) => void;
|
|
34
|
+
onFinish?: (message: LangChainMessage) => void;
|
|
35
|
+
onError?: (error: Error) => void;
|
|
36
|
+
};
|
|
37
|
+
}
|
|
38
|
+
export interface UseLangChainChatOptions {
|
|
39
|
+
/** LangChain ChatModel 实例 */
|
|
40
|
+
model: BaseChatModel;
|
|
41
|
+
/** 初始消息 */
|
|
42
|
+
initialMessages?: LangChainMessage[];
|
|
43
|
+
/** 系统提示词 */
|
|
44
|
+
systemMessage?: string;
|
|
45
|
+
/** 最大历史消息数 */
|
|
46
|
+
maxHistory?: number;
|
|
47
|
+
/** 温度参数 */
|
|
48
|
+
temperature?: number;
|
|
49
|
+
/** 是否流式输出 */
|
|
50
|
+
streaming?: boolean;
|
|
51
|
+
/** 回调:收到 chunk */
|
|
52
|
+
onChunk?: (chunk: string) => void;
|
|
53
|
+
/** 回调:完成 */
|
|
54
|
+
onFinish?: (message: LangChainMessage) => void;
|
|
55
|
+
/** 回调:出错 */
|
|
56
|
+
onError?: (error: Error) => void;
|
|
57
|
+
}
|
|
58
|
+
export interface UseLangChainChatReturn {
|
|
59
|
+
/** 消息列表 */
|
|
60
|
+
messages: Ref<LangChainMessage[]>;
|
|
61
|
+
/** 输入内容 */
|
|
62
|
+
input: Ref<string>;
|
|
63
|
+
/** 是否正在加载 */
|
|
64
|
+
isLoading: Ref<boolean>;
|
|
65
|
+
/** 错误 */
|
|
66
|
+
error: Ref<Error | null>;
|
|
67
|
+
/** 发送消息 */
|
|
68
|
+
sendMessage: (content: string) => Promise<void>;
|
|
69
|
+
/** 清空历史 */
|
|
70
|
+
clearHistory: () => void;
|
|
71
|
+
/** 重新生成最后一条回复 */
|
|
72
|
+
reload: () => void;
|
|
73
|
+
}
|
|
74
|
+
export interface UseLangChainStreamOptions {
|
|
75
|
+
/** LangChain ChatModel 实例 */
|
|
76
|
+
model: BaseChatModel;
|
|
77
|
+
/** 系统提示词 */
|
|
78
|
+
systemMessage?: string;
|
|
79
|
+
/** 温度参数 */
|
|
80
|
+
temperature?: number;
|
|
81
|
+
/** 最大 token 数 */
|
|
82
|
+
maxTokens?: number;
|
|
83
|
+
}
|
|
84
|
+
export interface UseLangChainStreamReturn {
|
|
85
|
+
/** 当前内容 */
|
|
86
|
+
content: Ref<string>;
|
|
87
|
+
/** 是否正在流式 */
|
|
88
|
+
isStreaming: Ref<boolean>;
|
|
89
|
+
/** 错误 */
|
|
90
|
+
error: Ref<Error | null>;
|
|
91
|
+
/** 开始流式请求 */
|
|
92
|
+
start: (prompt: string, history?: LangChainMessage[]) => Promise<void>;
|
|
93
|
+
/** 停止 */
|
|
94
|
+
stop: () => void;
|
|
95
|
+
}
|
|
96
|
+
/**
|
|
97
|
+
* LangChain Runtime - LangChain.js 在浏览器中的运行时
|
|
98
|
+
*
|
|
99
|
+
* @example
|
|
100
|
+
* ```ts
|
|
101
|
+
* import { ChatOpenAI } from '@langchain/openai'
|
|
102
|
+
* import { langChainRuntime } from '@yh-ui/ai-sdk'
|
|
103
|
+
*
|
|
104
|
+
* const model = new ChatOpenAI({
|
|
105
|
+
* model: 'gpt-4',
|
|
106
|
+
* apiKey: yourApiKey
|
|
107
|
+
* })
|
|
108
|
+
*
|
|
109
|
+
* const result = await langChainRuntime.invoke(model, '你好')
|
|
110
|
+
* ```
|
|
111
|
+
*/
|
|
112
|
+
export declare const langChainRuntime: {
|
|
113
|
+
/**
|
|
114
|
+
* 同步调用模型
|
|
115
|
+
*/
|
|
116
|
+
invoke(model: BaseChatModel, input: string | HumanMessage | BaseMessage, options?: {
|
|
117
|
+
systemMessage?: string;
|
|
118
|
+
temperature?: number;
|
|
119
|
+
maxTokens?: number;
|
|
120
|
+
}): Promise<AIMessage>;
|
|
121
|
+
/**
|
|
122
|
+
* 流式调用模型
|
|
123
|
+
*/
|
|
124
|
+
stream(model: BaseChatModel, input: string | HumanMessage | BaseMessage, options?: {
|
|
125
|
+
systemMessage?: string;
|
|
126
|
+
temperature?: number;
|
|
127
|
+
maxTokens?: number;
|
|
128
|
+
onChunk?: (chunk: string) => void;
|
|
129
|
+
}): Promise<AIMessage>;
|
|
130
|
+
/**
|
|
131
|
+
* 带工具调用的流式调用
|
|
132
|
+
*/
|
|
133
|
+
streamWithTools(model: BaseChatModel, input: string | HumanMessage | BaseMessage, tools: unknown[], options?: {
|
|
134
|
+
systemMessage?: string;
|
|
135
|
+
temperature?: number;
|
|
136
|
+
maxTokens?: number;
|
|
137
|
+
onChunk?: (chunk: string) => void;
|
|
138
|
+
onToolCall?: (toolCall: {
|
|
139
|
+
name: string;
|
|
140
|
+
args: Record<string, unknown>;
|
|
141
|
+
}) => void;
|
|
142
|
+
}): Promise<{
|
|
143
|
+
message: AIMessage;
|
|
144
|
+
toolCalls?: unknown[];
|
|
145
|
+
}>;
|
|
146
|
+
};
|
|
147
|
+
/**
|
|
148
|
+
* 使用 LangChain 进行对话
|
|
149
|
+
*
|
|
150
|
+
* @example
|
|
151
|
+
* ```ts
|
|
152
|
+
* import { ChatOpenAI } from '@langchain/openai'
|
|
153
|
+
* import { useLangChainChat } from '@yh-ui/ai-sdk'
|
|
154
|
+
*
|
|
155
|
+
* const model = new ChatOpenAI({ model: 'gpt-4' })
|
|
156
|
+
*
|
|
157
|
+
* const { messages, input, isLoading, sendMessage } = useLangChainChat({
|
|
158
|
+
* model,
|
|
159
|
+
* systemMessage: '你是一个有帮助的助手',
|
|
160
|
+
* streaming: true,
|
|
161
|
+
* onChunk: (chunk) => console.log('收到:', chunk)
|
|
162
|
+
* })
|
|
163
|
+
*
|
|
164
|
+
* await sendMessage('你好')
|
|
165
|
+
* ```
|
|
166
|
+
*/
|
|
167
|
+
export declare function useLangChainChat(options: UseLangChainChatOptions): UseLangChainChatReturn;
|
|
168
|
+
/**
|
|
169
|
+
* 使用 LangChain 进行流式文本生成
|
|
170
|
+
*
|
|
171
|
+
* @example
|
|
172
|
+
* ```ts
|
|
173
|
+
* import { ChatOpenAI } from '@langchain/openai'
|
|
174
|
+
* import { useLangChainStream } from '@yh-ui/ai-sdk'
|
|
175
|
+
*
|
|
176
|
+
* const model = new ChatOpenAI({ model: 'gpt-4' })
|
|
177
|
+
*
|
|
178
|
+
* const { content, isStreaming, start, stop } = useLangChainStream({
|
|
179
|
+
* model,
|
|
180
|
+
* systemMessage: '你是一个有帮助的助手'
|
|
181
|
+
* })
|
|
182
|
+
*
|
|
183
|
+
* await start('请介绍一下 Vue 3')
|
|
184
|
+
* ```
|
|
185
|
+
*/
|
|
186
|
+
export declare function useLangChainStream(options: UseLangChainStreamOptions): UseLangChainStreamReturn;
|
|
187
|
+
/**
|
|
188
|
+
* 创建 LangChain 对话链
|
|
189
|
+
*
|
|
190
|
+
* @example
|
|
191
|
+
* ```ts
|
|
192
|
+
* import { ChatOpenAI } from '@langchain/openai'
|
|
193
|
+
* import { createLangChainChain } from '@yh-ui/ai-sdk'
|
|
194
|
+
*
|
|
195
|
+
* const model = new ChatOpenAI({ model: 'gpt-4' })
|
|
196
|
+
*
|
|
197
|
+
* const chain = createLangChainChain(model, {
|
|
198
|
+
* systemMessage: '你是一个有帮助的助手',
|
|
199
|
+
* temperature: 0.7
|
|
200
|
+
* })
|
|
201
|
+
*
|
|
202
|
+
* const result = await chain.invoke('你好')
|
|
203
|
+
* ```
|
|
204
|
+
*/
|
|
205
|
+
export declare function createLangChainChain(model: BaseChatModel, config?: {
|
|
206
|
+
systemMessage?: string;
|
|
207
|
+
temperature?: number;
|
|
208
|
+
maxTokens?: number;
|
|
209
|
+
streaming?: boolean;
|
|
210
|
+
}): {
|
|
211
|
+
/**
|
|
212
|
+
* 同步调用
|
|
213
|
+
*/
|
|
214
|
+
invoke(input: string): Promise<string | (import("@langchain/core/messages").ContentBlock | import("@langchain/core/messages").ContentBlock.Text)[]>;
|
|
215
|
+
/**
|
|
216
|
+
* 流式调用
|
|
217
|
+
*/
|
|
218
|
+
stream(input: string): AsyncGenerator<string | (import("@langchain/core/messages").ContentBlock | import("@langchain/core/messages").ContentBlock.Text)[], void, unknown>;
|
|
219
|
+
/**
|
|
220
|
+
* 带工具调用
|
|
221
|
+
*/
|
|
222
|
+
invokeWithTools(input: string, tools: unknown[], toolHandler?: (toolName: string, args: Record<string, unknown>) => Promise<string>): Promise<{
|
|
223
|
+
message: string | (import("@langchain/core/messages").ContentBlock | import("@langchain/core/messages").ContentBlock.Text)[];
|
|
224
|
+
toolCalls: import("@langchain/core/messages").OpenAIToolCall[];
|
|
225
|
+
} | {
|
|
226
|
+
message: string | (import("@langchain/core/messages").ContentBlock | import("@langchain/core/messages").ContentBlock.Text)[];
|
|
227
|
+
toolCalls: undefined;
|
|
228
|
+
}>;
|
|
229
|
+
};
|
|
230
|
+
export type { BaseChatModel, BaseChatModelCallOptions } from '@langchain/core/language_models/chat_models';
|
|
231
|
+
export type { BaseMessage, AIMessage as LCAIMessage, HumanMessage as LCHumanMessage, SystemMessage as LCSystemMessage, ToolMessage as LCToolMessage } from '@langchain/core/messages';
|
|
232
|
+
export { AIMessage, HumanMessage, SystemMessage, ToolMessage } from '@langchain/core/messages';
|
|
@@ -0,0 +1,319 @@
|
|
|
1
|
+
import { ref, onUnmounted } from "vue";
|
|
2
|
+
import { HumanMessage, AIMessage, SystemMessage, ToolMessage } from "@langchain/core/messages";
|
|
3
|
+
export const langChainRuntime = {
|
|
4
|
+
/**
|
|
5
|
+
* 同步调用模型
|
|
6
|
+
*/
|
|
7
|
+
async invoke(model, input, options) {
|
|
8
|
+
const messages = [];
|
|
9
|
+
if (options?.systemMessage) {
|
|
10
|
+
messages.push(new SystemMessage(options.systemMessage));
|
|
11
|
+
}
|
|
12
|
+
if (typeof input === "string") {
|
|
13
|
+
messages.push(new HumanMessage(input));
|
|
14
|
+
} else {
|
|
15
|
+
messages.push(input);
|
|
16
|
+
}
|
|
17
|
+
const response = await model.invoke(messages);
|
|
18
|
+
return response;
|
|
19
|
+
},
|
|
20
|
+
/**
|
|
21
|
+
* 流式调用模型
|
|
22
|
+
*/
|
|
23
|
+
async stream(model, input, options) {
|
|
24
|
+
const messages = [];
|
|
25
|
+
if (options?.systemMessage) {
|
|
26
|
+
messages.push(new SystemMessage(options.systemMessage));
|
|
27
|
+
}
|
|
28
|
+
if (typeof input === "string") {
|
|
29
|
+
messages.push(new HumanMessage(input));
|
|
30
|
+
} else {
|
|
31
|
+
messages.push(input);
|
|
32
|
+
}
|
|
33
|
+
const stream = await model.stream(messages);
|
|
34
|
+
let fullContent = "";
|
|
35
|
+
for await (const chunk of stream) {
|
|
36
|
+
const content = String(chunk?.content || "");
|
|
37
|
+
fullContent += content;
|
|
38
|
+
options?.onChunk?.(content);
|
|
39
|
+
}
|
|
40
|
+
return new AIMessage(fullContent);
|
|
41
|
+
},
|
|
42
|
+
/**
|
|
43
|
+
* 带工具调用的流式调用
|
|
44
|
+
*/
|
|
45
|
+
async streamWithTools(model, input, tools, options) {
|
|
46
|
+
const messages = [];
|
|
47
|
+
if (options?.systemMessage) {
|
|
48
|
+
messages.push(new SystemMessage(options.systemMessage));
|
|
49
|
+
}
|
|
50
|
+
if (typeof input === "string") {
|
|
51
|
+
messages.push(new HumanMessage(input));
|
|
52
|
+
} else {
|
|
53
|
+
messages.push(input);
|
|
54
|
+
}
|
|
55
|
+
const modelWithTools = model.bind({ tools });
|
|
56
|
+
const stream = await modelWithTools.stream(messages);
|
|
57
|
+
let fullContent = "";
|
|
58
|
+
const toolCalls = [];
|
|
59
|
+
for await (const chunk of stream) {
|
|
60
|
+
const content = String(chunk?.content || "");
|
|
61
|
+
if (content) {
|
|
62
|
+
fullContent += content;
|
|
63
|
+
options?.onChunk?.(content);
|
|
64
|
+
}
|
|
65
|
+
const additionalKwargs = chunk?.additional_kwargs;
|
|
66
|
+
if (additionalKwargs?.tool_calls) {
|
|
67
|
+
for (const tc of additionalKwargs.tool_calls) {
|
|
68
|
+
toolCalls.push(tc);
|
|
69
|
+
options?.onToolCall?.({
|
|
70
|
+
name: tc.function?.name || "",
|
|
71
|
+
args: tc.function?.arguments ? JSON.parse(tc.function.arguments) : {}
|
|
72
|
+
});
|
|
73
|
+
}
|
|
74
|
+
}
|
|
75
|
+
}
|
|
76
|
+
return {
|
|
77
|
+
message: new AIMessage(fullContent),
|
|
78
|
+
toolCalls: toolCalls.length > 0 ? toolCalls : void 0
|
|
79
|
+
};
|
|
80
|
+
}
|
|
81
|
+
};
|
|
82
|
+
export function useLangChainChat(options) {
|
|
83
|
+
const {
|
|
84
|
+
model,
|
|
85
|
+
initialMessages = [],
|
|
86
|
+
systemMessage,
|
|
87
|
+
maxHistory = 20,
|
|
88
|
+
streaming = false,
|
|
89
|
+
onChunk,
|
|
90
|
+
onFinish,
|
|
91
|
+
onError
|
|
92
|
+
} = options;
|
|
93
|
+
const messages = ref([...initialMessages]);
|
|
94
|
+
const input = ref("");
|
|
95
|
+
const isLoading = ref(false);
|
|
96
|
+
const error = ref(null);
|
|
97
|
+
const toLangChainMessages = (msgs) => {
|
|
98
|
+
return msgs.map((msg) => {
|
|
99
|
+
switch (msg.role) {
|
|
100
|
+
case "user":
|
|
101
|
+
return new HumanMessage(msg.content);
|
|
102
|
+
case "assistant":
|
|
103
|
+
return new AIMessage(msg.content);
|
|
104
|
+
case "system":
|
|
105
|
+
return new SystemMessage(msg.content);
|
|
106
|
+
case "tool":
|
|
107
|
+
return new ToolMessage({
|
|
108
|
+
content: msg.content,
|
|
109
|
+
tool_call_id: msg.toolCallId || ""
|
|
110
|
+
});
|
|
111
|
+
default:
|
|
112
|
+
return new HumanMessage(msg.content);
|
|
113
|
+
}
|
|
114
|
+
});
|
|
115
|
+
};
|
|
116
|
+
const addMessage = (msg) => {
|
|
117
|
+
const newMessage = {
|
|
118
|
+
...msg,
|
|
119
|
+
id: `msg-${Date.now()}-${Math.random().toString(36).slice(2)}`,
|
|
120
|
+
createdAt: /* @__PURE__ */ new Date()
|
|
121
|
+
};
|
|
122
|
+
messages.value = [...messages.value, newMessage].slice(-maxHistory);
|
|
123
|
+
return newMessage;
|
|
124
|
+
};
|
|
125
|
+
const sendMessage = async (content) => {
|
|
126
|
+
if (!content.trim() || isLoading.value) return;
|
|
127
|
+
error.value = null;
|
|
128
|
+
isLoading.value = true;
|
|
129
|
+
addMessage({ role: "user", content });
|
|
130
|
+
input.value = "";
|
|
131
|
+
try {
|
|
132
|
+
const langChainMessages = [];
|
|
133
|
+
if (systemMessage) {
|
|
134
|
+
langChainMessages.push(new SystemMessage(systemMessage));
|
|
135
|
+
}
|
|
136
|
+
langChainMessages.push(...toLangChainMessages(messages.value.slice(0, -1)));
|
|
137
|
+
if (streaming) {
|
|
138
|
+
const stream = await model.stream(langChainMessages);
|
|
139
|
+
const assistantMsg = addMessage({ role: "assistant", content: "" });
|
|
140
|
+
let fullContent = "";
|
|
141
|
+
for await (const chunk of stream) {
|
|
142
|
+
const chunkContent = String(chunk?.content || "");
|
|
143
|
+
fullContent += chunkContent;
|
|
144
|
+
messages.value = messages.value.map(
|
|
145
|
+
(m) => m.id === assistantMsg.id ? { ...m, content: fullContent } : m
|
|
146
|
+
);
|
|
147
|
+
onChunk?.(chunkContent);
|
|
148
|
+
}
|
|
149
|
+
onFinish?.(messages.value[messages.value.length - 1]);
|
|
150
|
+
} else {
|
|
151
|
+
const response = await model.invoke(langChainMessages);
|
|
152
|
+
const assistantMsg = addMessage({
|
|
153
|
+
role: "assistant",
|
|
154
|
+
content: String(response.content)
|
|
155
|
+
});
|
|
156
|
+
onFinish?.(assistantMsg);
|
|
157
|
+
}
|
|
158
|
+
} catch (err) {
|
|
159
|
+
const errorObj = err instanceof Error ? err : new Error(String(err));
|
|
160
|
+
error.value = errorObj;
|
|
161
|
+
onError?.(errorObj);
|
|
162
|
+
} finally {
|
|
163
|
+
isLoading.value = false;
|
|
164
|
+
}
|
|
165
|
+
};
|
|
166
|
+
const clearHistory = () => {
|
|
167
|
+
messages.value = [];
|
|
168
|
+
};
|
|
169
|
+
const reload = async () => {
|
|
170
|
+
if (messages.value.length > 0) {
|
|
171
|
+
const lastUserMsg = [...messages.value].reverse().find((m) => m.role === "user");
|
|
172
|
+
if (lastUserMsg) {
|
|
173
|
+
const userMsgIndex = messages.value.findIndex((m) => m.id === lastUserMsg.id);
|
|
174
|
+
messages.value = messages.value.slice(0, userMsgIndex + 1);
|
|
175
|
+
await sendMessage(lastUserMsg.content);
|
|
176
|
+
}
|
|
177
|
+
}
|
|
178
|
+
};
|
|
179
|
+
return {
|
|
180
|
+
messages,
|
|
181
|
+
input,
|
|
182
|
+
isLoading,
|
|
183
|
+
error,
|
|
184
|
+
sendMessage,
|
|
185
|
+
clearHistory,
|
|
186
|
+
reload
|
|
187
|
+
};
|
|
188
|
+
}
|
|
189
|
+
export function useLangChainStream(options) {
|
|
190
|
+
const { model, systemMessage } = options;
|
|
191
|
+
const content = ref("");
|
|
192
|
+
const isStreaming = ref(false);
|
|
193
|
+
const error = ref(null);
|
|
194
|
+
let abortController = null;
|
|
195
|
+
const start = async (prompt, history = []) => {
|
|
196
|
+
if (isStreaming.value) {
|
|
197
|
+
stop();
|
|
198
|
+
}
|
|
199
|
+
error.value = null;
|
|
200
|
+
isStreaming.value = true;
|
|
201
|
+
content.value = "";
|
|
202
|
+
try {
|
|
203
|
+
const messages = [];
|
|
204
|
+
if (systemMessage) {
|
|
205
|
+
messages.push(new SystemMessage(systemMessage));
|
|
206
|
+
}
|
|
207
|
+
for (const msg of history) {
|
|
208
|
+
switch (msg.role) {
|
|
209
|
+
case "user":
|
|
210
|
+
messages.push(new HumanMessage(msg.content));
|
|
211
|
+
break;
|
|
212
|
+
case "assistant":
|
|
213
|
+
messages.push(new AIMessage(msg.content));
|
|
214
|
+
break;
|
|
215
|
+
case "system":
|
|
216
|
+
messages.push(new SystemMessage(msg.content));
|
|
217
|
+
break;
|
|
218
|
+
}
|
|
219
|
+
}
|
|
220
|
+
messages.push(new HumanMessage(prompt));
|
|
221
|
+
const stream = await model.stream(messages);
|
|
222
|
+
for await (const chunk of stream) {
|
|
223
|
+
const chunkContent = String(chunk?.content || "");
|
|
224
|
+
content.value += chunkContent;
|
|
225
|
+
}
|
|
226
|
+
} catch (err) {
|
|
227
|
+
if (err.name !== "AbortError") {
|
|
228
|
+
const errorObj = err instanceof Error ? err : new Error(String(err));
|
|
229
|
+
error.value = errorObj;
|
|
230
|
+
}
|
|
231
|
+
} finally {
|
|
232
|
+
isStreaming.value = false;
|
|
233
|
+
}
|
|
234
|
+
};
|
|
235
|
+
const stop = () => {
|
|
236
|
+
if (abortController) {
|
|
237
|
+
;
|
|
238
|
+
abortController.abort();
|
|
239
|
+
}
|
|
240
|
+
isStreaming.value = false;
|
|
241
|
+
};
|
|
242
|
+
onUnmounted(() => {
|
|
243
|
+
stop();
|
|
244
|
+
});
|
|
245
|
+
return {
|
|
246
|
+
content,
|
|
247
|
+
isStreaming,
|
|
248
|
+
error,
|
|
249
|
+
start,
|
|
250
|
+
stop
|
|
251
|
+
};
|
|
252
|
+
}
|
|
253
|
+
export function createLangChainChain(model, config) {
|
|
254
|
+
return {
|
|
255
|
+
/**
|
|
256
|
+
* 同步调用
|
|
257
|
+
*/
|
|
258
|
+
async invoke(input) {
|
|
259
|
+
const messages = [];
|
|
260
|
+
if (config?.systemMessage) {
|
|
261
|
+
messages.push(new SystemMessage(config.systemMessage));
|
|
262
|
+
}
|
|
263
|
+
messages.push(new HumanMessage(input));
|
|
264
|
+
const response = await model.invoke(messages);
|
|
265
|
+
return response.content;
|
|
266
|
+
},
|
|
267
|
+
/**
|
|
268
|
+
* 流式调用
|
|
269
|
+
*/
|
|
270
|
+
async *stream(input) {
|
|
271
|
+
const messages = [];
|
|
272
|
+
if (config?.systemMessage) {
|
|
273
|
+
messages.push(new SystemMessage(config.systemMessage));
|
|
274
|
+
}
|
|
275
|
+
messages.push(new HumanMessage(input));
|
|
276
|
+
const stream = await model.stream(messages);
|
|
277
|
+
for await (const chunk of stream) {
|
|
278
|
+
yield chunk?.content || "";
|
|
279
|
+
}
|
|
280
|
+
},
|
|
281
|
+
/**
|
|
282
|
+
* 带工具调用
|
|
283
|
+
*/
|
|
284
|
+
async invokeWithTools(input, tools, toolHandler) {
|
|
285
|
+
const messages = [];
|
|
286
|
+
if (config?.systemMessage) {
|
|
287
|
+
messages.push(new SystemMessage(config.systemMessage));
|
|
288
|
+
}
|
|
289
|
+
messages.push(new HumanMessage(input));
|
|
290
|
+
const modelWithTools = model.bind({ tools });
|
|
291
|
+
const response = await modelWithTools.invoke(messages);
|
|
292
|
+
const toolCalls = response?.additional_kwargs?.tool_calls;
|
|
293
|
+
if (toolCalls && toolCalls.length > 0 && toolHandler) {
|
|
294
|
+
for (const tc of toolCalls) {
|
|
295
|
+
const toolName = tc.function?.name || "";
|
|
296
|
+
const args = tc.function?.arguments ? JSON.parse(tc.function.arguments) : {};
|
|
297
|
+
const toolResult = await toolHandler(toolName, args);
|
|
298
|
+
messages.push(response);
|
|
299
|
+
messages.push(
|
|
300
|
+
new ToolMessage({
|
|
301
|
+
content: toolResult,
|
|
302
|
+
tool_call_id: tc.id || ""
|
|
303
|
+
})
|
|
304
|
+
);
|
|
305
|
+
const finalResponse = await model.invoke(messages);
|
|
306
|
+
return {
|
|
307
|
+
message: finalResponse.content,
|
|
308
|
+
toolCalls
|
|
309
|
+
};
|
|
310
|
+
}
|
|
311
|
+
}
|
|
312
|
+
return {
|
|
313
|
+
message: response.content,
|
|
314
|
+
toolCalls: void 0
|
|
315
|
+
};
|
|
316
|
+
}
|
|
317
|
+
};
|
|
318
|
+
}
|
|
319
|
+
export { AIMessage, HumanMessage, SystemMessage, ToolMessage } from "@langchain/core/messages";
|
package/dist/loaders.cjs
ADDED
|
@@ -0,0 +1,110 @@
|
|
|
1
|
+
"use strict";
|
|
2
|
+
|
|
3
|
+
Object.defineProperty(exports, "__esModule", {
|
|
4
|
+
value: true
|
|
5
|
+
});
|
|
6
|
+
exports.chunkText = chunkText;
|
|
7
|
+
exports.createFileLoader = createFileLoader;
|
|
8
|
+
exports.createTextLoader = createTextLoader;
|
|
9
|
+
exports.loadMarkdown = loadMarkdown;
|
|
10
|
+
function createTextLoader(content, metadata) {
|
|
11
|
+
return {
|
|
12
|
+
content: content.trim(),
|
|
13
|
+
metadata
|
|
14
|
+
};
|
|
15
|
+
}
|
|
16
|
+
function loadMarkdown(markdown, options = {}) {
|
|
17
|
+
const chunkSize = options.chunkSize ?? 1e3;
|
|
18
|
+
const chunkOverlap = Math.min(options.chunkOverlap ?? 200, Math.floor(chunkSize * 0.5));
|
|
19
|
+
const {
|
|
20
|
+
split = true
|
|
21
|
+
} = options;
|
|
22
|
+
if (!split) {
|
|
23
|
+
return [{
|
|
24
|
+
content: markdown.trim(),
|
|
25
|
+
metadata: {
|
|
26
|
+
type: "markdown"
|
|
27
|
+
}
|
|
28
|
+
}];
|
|
29
|
+
}
|
|
30
|
+
const blocks = [];
|
|
31
|
+
const lines = markdown.split(/\r?\n/);
|
|
32
|
+
let current = "";
|
|
33
|
+
for (const line of lines) {
|
|
34
|
+
const isHeading = /^#{1,6}\s/.test(line);
|
|
35
|
+
if (isHeading && current.length >= chunkSize) {
|
|
36
|
+
if (current.trim()) blocks.push(current.trim());
|
|
37
|
+
current = line + "\n";
|
|
38
|
+
} else {
|
|
39
|
+
current += line + "\n";
|
|
40
|
+
if (current.length >= chunkSize) {
|
|
41
|
+
if (current.trim()) blocks.push(current.trim());
|
|
42
|
+
current = current.slice(-chunkOverlap);
|
|
43
|
+
}
|
|
44
|
+
}
|
|
45
|
+
}
|
|
46
|
+
if (current.trim()) blocks.push(current.trim());
|
|
47
|
+
return blocks.map((content, i) => ({
|
|
48
|
+
content,
|
|
49
|
+
metadata: {
|
|
50
|
+
type: "markdown",
|
|
51
|
+
index: i
|
|
52
|
+
}
|
|
53
|
+
}));
|
|
54
|
+
}
|
|
55
|
+
function chunkText(text, options = {}) {
|
|
56
|
+
const chunkSize = options.chunkSize ?? 800;
|
|
57
|
+
const chunkOverlap = Math.min(options.chunkOverlap ?? 100, Math.floor(chunkSize * 0.5));
|
|
58
|
+
const chunks = [];
|
|
59
|
+
let start = 0;
|
|
60
|
+
while (start < text.length) {
|
|
61
|
+
const end = Math.min(start + chunkSize, text.length);
|
|
62
|
+
let slice = text.slice(start, end);
|
|
63
|
+
if (end < text.length) {
|
|
64
|
+
const lastSpace = slice.lastIndexOf(" ");
|
|
65
|
+
if (lastSpace > chunkSize / 2) slice = slice.slice(0, lastSpace + 1);
|
|
66
|
+
}
|
|
67
|
+
if (slice.trim()) {
|
|
68
|
+
chunks.push({
|
|
69
|
+
content: slice.trim(),
|
|
70
|
+
metadata: {
|
|
71
|
+
index: chunks.length
|
|
72
|
+
}
|
|
73
|
+
});
|
|
74
|
+
}
|
|
75
|
+
const nextStart = start + slice.length - chunkOverlap;
|
|
76
|
+
start = nextStart > start ? nextStart : start + slice.length;
|
|
77
|
+
if (start >= text.length) break;
|
|
78
|
+
}
|
|
79
|
+
return chunks;
|
|
80
|
+
}
|
|
81
|
+
function createFileLoader(options) {
|
|
82
|
+
const {
|
|
83
|
+
pdf,
|
|
84
|
+
docx,
|
|
85
|
+
chunkSize = 800,
|
|
86
|
+
chunkOverlap = 100
|
|
87
|
+
} = options;
|
|
88
|
+
return {
|
|
89
|
+
async loadFromText(content) {
|
|
90
|
+
return chunkText(content, {
|
|
91
|
+
chunkSize,
|
|
92
|
+
chunkOverlap
|
|
93
|
+
});
|
|
94
|
+
},
|
|
95
|
+
async loadFromMarkdown(content) {
|
|
96
|
+
return loadMarkdown(content, {
|
|
97
|
+
chunkSize,
|
|
98
|
+
chunkOverlap
|
|
99
|
+
});
|
|
100
|
+
},
|
|
101
|
+
async loadFromPDF(input) {
|
|
102
|
+
if (!pdf) throw new Error("PDF loader not configured. Provide options.pdf or install a PDF library.");
|
|
103
|
+
return pdf(input);
|
|
104
|
+
},
|
|
105
|
+
async loadFromDOCX(input) {
|
|
106
|
+
if (!docx) throw new Error("DOCX loader not configured. Provide options.docx or install a DOCX library.");
|
|
107
|
+
return docx(input);
|
|
108
|
+
}
|
|
109
|
+
};
|
|
110
|
+
}
|