@yh-ui/ai-sdk 0.1.21
This diff represents the content of publicly available package versions that have been released to one of the supported registries. The information contained in this diff is provided for informational purposes only and reflects changes between package versions as they appear in their respective public registries.
- package/LICENSE +21 -0
- package/README.md +309 -0
- package/dist/agent-enhanced.cjs +292 -0
- package/dist/agent-enhanced.d.ts +143 -0
- package/dist/agent-enhanced.mjs +267 -0
- package/dist/cache-adapter.cjs +99 -0
- package/dist/cache-adapter.d.ts +42 -0
- package/dist/cache-adapter.mjs +95 -0
- package/dist/future.cjs +882 -0
- package/dist/future.d.ts +519 -0
- package/dist/future.mjs +765 -0
- package/dist/index.cjs +913 -0
- package/dist/index.d.ts +55 -0
- package/dist/index.mjs +217 -0
- package/dist/langchain.cjs +363 -0
- package/dist/langchain.d.ts +232 -0
- package/dist/langchain.mjs +319 -0
- package/dist/loaders.cjs +110 -0
- package/dist/loaders.d.ts +58 -0
- package/dist/loaders.mjs +76 -0
- package/dist/mcp-server.cjs +265 -0
- package/dist/mcp-server.d.ts +186 -0
- package/dist/mcp-server.mjs +234 -0
- package/dist/mcp.cjs +370 -0
- package/dist/mcp.d.ts +206 -0
- package/dist/mcp.mjs +354 -0
- package/dist/observability.cjs +150 -0
- package/dist/observability.d.ts +112 -0
- package/dist/observability.mjs +117 -0
- package/dist/rag-production.cjs +95 -0
- package/dist/rag-production.d.ts +43 -0
- package/dist/rag-production.mjs +85 -0
- package/dist/rate-limit.cjs +73 -0
- package/dist/rate-limit.d.ts +55 -0
- package/dist/rate-limit.mjs +51 -0
- package/dist/vector-store.cjs +63 -0
- package/dist/vector-store.d.ts +74 -0
- package/dist/vector-store.mjs +55 -0
- package/dist/vue/index.cjs +1023 -0
- package/dist/vue/index.d.ts +627 -0
- package/dist/vue/index.mjs +913 -0
- package/package.json +87 -0
package/dist/index.d.ts
ADDED
|
@@ -0,0 +1,55 @@
|
|
|
1
|
+
/**
|
|
2
|
+
* YH-UI AI SDK Utilities
|
|
3
|
+
*
|
|
4
|
+
* 提供基础的 AI 工具函数和类型定义
|
|
5
|
+
* 同时完全兼容 Vercel AI SDK
|
|
6
|
+
*/
|
|
7
|
+
export { generateText, streamText, generateObject, streamObject, embed, embedMany, experimental_generateImage, experimental_generateSpeech, experimental_transcribe, tool, jsonSchema, zodSchema, customProvider, createProviderRegistry, wrapLanguageModel, extractReasoningMiddleware, simulateStreamingMiddleware, defaultSettingsMiddleware, smoothStream, simulateReadableStream, generateId, createIdGenerator, cosineSimilarity, createDataStream, createDataStreamResponse, formatAssistantStreamPart, formatDataStreamPart, parseAssistantStreamPart, parseDataStreamPart, pipeDataStreamToResponse, processDataStream, processTextStream, convertToCoreMessages, appendClientMessage, appendResponseMessages, coreMessageSchema, coreSystemMessageSchema, coreUserMessageSchema, coreAssistantMessageSchema, coreToolMessageSchema, LangChainAdapter, LlamaIndexAdapter } from 'ai';
|
|
8
|
+
export { AISDKError, APICallError, AssistantResponse, DownloadError, EmptyResponseBodyError, InvalidArgumentError, InvalidDataContentError, InvalidMessageRoleError, InvalidPromptError, InvalidResponseDataError, InvalidStreamPartError, InvalidToolArgumentsError, JSONParseError, LoadAPIKeyError, MCPClientError, MessageConversionError, NoContentGeneratedError, NoImageGeneratedError, NoObjectGeneratedError, NoOutputSpecifiedError, NoSuchModelError, NoSuchProviderError, NoSuchToolError, Output, RetryError, StreamData, ToolCallRepairError, ToolExecutionError, TypeValidationError, UnsupportedFunctionalityError, UnsupportedModelVersionError } from 'ai';
|
|
9
|
+
export type { BaseChatModel, BaseChatModelCallOptions, BaseMessage, AIMessage as LCAIMessage, HumanMessage as LCHumanMessage, SystemMessage as LCSystemMessage, ToolMessage as LCToolMessage, LangChainMessage, LangChainConfig, UseLangChainChatOptions, UseLangChainChatReturn, UseLangChainStreamOptions, UseLangChainStreamReturn } from './langchain';
|
|
10
|
+
export { AIMessage, HumanMessage, SystemMessage, ToolMessage } from './langchain';
|
|
11
|
+
export { useLangChainChat, useLangChainStream, createLangChainChain, langChainRuntime } from './langchain';
|
|
12
|
+
export type { StreamableValue, ConversationMessage, ConversationConfig, ToolCallHandler, ProviderAdapter, ModelConfig, UseAIChatOptions, UseAIChatReturn, UseAIStreamOptions, UseAIStreamReturn, AIContextValue, ToolCall, XRequestConfig, XRequestCallbacks, AIMiddleware, CacheConfig, RetryConfig, Conversation, UseConversationsOptions, ProviderPreset } from './vue';
|
|
13
|
+
export { createStreamableValue, useStreamableValue, useConversation, useConversations, useAIChat, useAIStream, createYHFunctionTool, createProviderAdapter, createAIContext, XRequest, createXRequest, registerMiddleware, clearCache, getProviderPreset, PROVIDER_PRESETS, createVercelAIProvider } from './vue';
|
|
14
|
+
export { useReActAgent, createPlanExecuteAgent, type AgentConfig, type AgentTool, type AgentStep, type AgentResult, type StopCondition } from './future';
|
|
15
|
+
export { createMultiModalMessage, createImageContent, createImageUrlContent, createAudioContent, createVideoContent, type MultiModalContent, type MultiModalMessage, type VisionAnalysisOptions } from './future';
|
|
16
|
+
export { createRAGSystem, type RAGConfig, type RAGResult, type DocumentChunk } from './future';
|
|
17
|
+
export { createChainOfThought, type CoTConfig, type ReasoningStep } from './future';
|
|
18
|
+
export { createContextCompressor, type CompressionConfig, type CompressionResult } from './future';
|
|
19
|
+
export { createCostTracker, type CostConfig, type CostTracking, type TokenUsage } from './future';
|
|
20
|
+
export { createTracer, type TraceEvent, type TraceSpan } from './future';
|
|
21
|
+
export { createOTelConsoleExporter, createLangSmithExporter, createObservabilityManager, toOTelSpan, type OTelSpan, type OTelExportPayload, type LangSmithRun, type LangSmithClientConfig, type TraceExporter } from './observability';
|
|
22
|
+
export { createSafetyFilter, type SafetyRule, type SafetyResult } from './future';
|
|
23
|
+
export { fromZodSchema, createJSONSchema, schema, parseStructuredOutput, type SchemaDefinition, type SchemaProperty } from './future';
|
|
24
|
+
export { createInMemoryVectorStore, type IVectorStore, type VectorDocument, type VectorSearchResult, type VectorStoreConfig, type PineconeVectorStoreConfig, type WeaviateVectorStoreConfig, type QdrantVectorStoreConfig } from './vector-store';
|
|
25
|
+
export { createTextLoader, loadMarkdown, chunkText, createFileLoader, type LoadedDocument, type DocumentLoaderOptions, type PDFLoader, type DOCXLoader } from './loaders';
|
|
26
|
+
export { createMemoryCache, createLocalStorageCache, createSessionStorageCache, type CacheAdapter, type RedisCacheConfig } from './cache-adapter';
|
|
27
|
+
export { createRateLimiter, createRateLimitMiddleware, type RateLimitConfig, type RateLimitMiddleware } from './rate-limit';
|
|
28
|
+
export { createProductionRAG, type ProductionRAGConfig } from './rag-production';
|
|
29
|
+
export { createEnhancedAgent, createReflexionAgent, createReWOOAgent, createChain, createParallelChain, type ReasoningMode, type EnhancedAgentConfig, type ReflexionConfig, type ReWOOConfig, type ChainStep, type Chain } from './agent-enhanced';
|
|
30
|
+
export { useMCPClient, useMCPTools, type MCPConnectionConfig, type MCPClientState, type MCPTool, type UseMCPClientOptions, type UseMCPClientReturn, type UseMCPToolsOptions, type UseMCPToolsReturn, type MCPServerState } from './mcp';
|
|
31
|
+
export { MCPServer, useMCPServer, createMCPServerHTTPHandler, type MCPServerTool, type MCPToolContent, type MCPServerConfig, type MCPServerOptions, type MCPServerTransport, type UseMCPServerOptions, type UseMCPServerReturn, type MCPJSONRPCRequest, type MCPJSONRPCResponse } from './mcp-server';
|
|
32
|
+
export interface AIProvider {
|
|
33
|
+
name: string;
|
|
34
|
+
baseUrl?: string;
|
|
35
|
+
apiKey?: string;
|
|
36
|
+
defaultModel?: string;
|
|
37
|
+
}
|
|
38
|
+
export interface ChatOptions {
|
|
39
|
+
model: string;
|
|
40
|
+
temperature?: number;
|
|
41
|
+
maxTokens?: number;
|
|
42
|
+
topP?: number;
|
|
43
|
+
messages: Array<{
|
|
44
|
+
role: string;
|
|
45
|
+
content: string;
|
|
46
|
+
}>;
|
|
47
|
+
}
|
|
48
|
+
/**
|
|
49
|
+
* Create a simple AI chat completion
|
|
50
|
+
*/
|
|
51
|
+
export declare function createChatCompletion(provider: AIProvider, options: ChatOptions): Promise<string>;
|
|
52
|
+
/**
|
|
53
|
+
* Create a streaming AI chat completion
|
|
54
|
+
*/
|
|
55
|
+
export declare function createStreamingChatCompletion(provider: AIProvider, options: ChatOptions): AsyncGenerator<string>;
|
package/dist/index.mjs
ADDED
|
@@ -0,0 +1,217 @@
|
|
|
1
|
+
export {
|
|
2
|
+
generateText,
|
|
3
|
+
streamText,
|
|
4
|
+
generateObject,
|
|
5
|
+
streamObject,
|
|
6
|
+
embed,
|
|
7
|
+
embedMany,
|
|
8
|
+
experimental_generateImage,
|
|
9
|
+
experimental_generateSpeech,
|
|
10
|
+
experimental_transcribe,
|
|
11
|
+
tool,
|
|
12
|
+
jsonSchema,
|
|
13
|
+
zodSchema,
|
|
14
|
+
customProvider,
|
|
15
|
+
createProviderRegistry,
|
|
16
|
+
wrapLanguageModel,
|
|
17
|
+
extractReasoningMiddleware,
|
|
18
|
+
simulateStreamingMiddleware,
|
|
19
|
+
defaultSettingsMiddleware,
|
|
20
|
+
smoothStream,
|
|
21
|
+
simulateReadableStream,
|
|
22
|
+
generateId,
|
|
23
|
+
createIdGenerator,
|
|
24
|
+
cosineSimilarity,
|
|
25
|
+
createDataStream,
|
|
26
|
+
createDataStreamResponse,
|
|
27
|
+
formatAssistantStreamPart,
|
|
28
|
+
formatDataStreamPart,
|
|
29
|
+
parseAssistantStreamPart,
|
|
30
|
+
parseDataStreamPart,
|
|
31
|
+
pipeDataStreamToResponse,
|
|
32
|
+
processDataStream,
|
|
33
|
+
processTextStream,
|
|
34
|
+
convertToCoreMessages,
|
|
35
|
+
appendClientMessage,
|
|
36
|
+
appendResponseMessages,
|
|
37
|
+
coreMessageSchema,
|
|
38
|
+
coreSystemMessageSchema,
|
|
39
|
+
coreUserMessageSchema,
|
|
40
|
+
coreAssistantMessageSchema,
|
|
41
|
+
coreToolMessageSchema,
|
|
42
|
+
LangChainAdapter,
|
|
43
|
+
LlamaIndexAdapter
|
|
44
|
+
} from "ai";
|
|
45
|
+
export {
|
|
46
|
+
AISDKError,
|
|
47
|
+
APICallError,
|
|
48
|
+
AssistantResponse,
|
|
49
|
+
DownloadError,
|
|
50
|
+
EmptyResponseBodyError,
|
|
51
|
+
InvalidArgumentError,
|
|
52
|
+
InvalidDataContentError,
|
|
53
|
+
InvalidMessageRoleError,
|
|
54
|
+
InvalidPromptError,
|
|
55
|
+
InvalidResponseDataError,
|
|
56
|
+
InvalidStreamPartError,
|
|
57
|
+
InvalidToolArgumentsError,
|
|
58
|
+
JSONParseError,
|
|
59
|
+
LoadAPIKeyError,
|
|
60
|
+
MCPClientError,
|
|
61
|
+
MessageConversionError,
|
|
62
|
+
NoContentGeneratedError,
|
|
63
|
+
NoImageGeneratedError,
|
|
64
|
+
NoObjectGeneratedError,
|
|
65
|
+
NoOutputSpecifiedError,
|
|
66
|
+
NoSuchModelError,
|
|
67
|
+
NoSuchProviderError,
|
|
68
|
+
NoSuchToolError,
|
|
69
|
+
Output,
|
|
70
|
+
RetryError,
|
|
71
|
+
StreamData,
|
|
72
|
+
ToolCallRepairError,
|
|
73
|
+
ToolExecutionError,
|
|
74
|
+
TypeValidationError,
|
|
75
|
+
UnsupportedFunctionalityError,
|
|
76
|
+
UnsupportedModelVersionError
|
|
77
|
+
} from "ai";
|
|
78
|
+
export { AIMessage, HumanMessage, SystemMessage, ToolMessage } from "./langchain.mjs";
|
|
79
|
+
export {
|
|
80
|
+
useLangChainChat,
|
|
81
|
+
useLangChainStream,
|
|
82
|
+
createLangChainChain,
|
|
83
|
+
langChainRuntime
|
|
84
|
+
} from "./langchain.mjs";
|
|
85
|
+
export {
|
|
86
|
+
createStreamableValue,
|
|
87
|
+
useStreamableValue,
|
|
88
|
+
useConversation,
|
|
89
|
+
useConversations,
|
|
90
|
+
useAIChat,
|
|
91
|
+
useAIStream,
|
|
92
|
+
createYHFunctionTool,
|
|
93
|
+
createProviderAdapter,
|
|
94
|
+
createAIContext,
|
|
95
|
+
XRequest,
|
|
96
|
+
createXRequest,
|
|
97
|
+
registerMiddleware,
|
|
98
|
+
clearCache,
|
|
99
|
+
getProviderPreset,
|
|
100
|
+
PROVIDER_PRESETS,
|
|
101
|
+
createVercelAIProvider
|
|
102
|
+
} from "./vue/index.mjs";
|
|
103
|
+
export {
|
|
104
|
+
useReActAgent,
|
|
105
|
+
createPlanExecuteAgent
|
|
106
|
+
} from "./future.mjs";
|
|
107
|
+
export {
|
|
108
|
+
createMultiModalMessage,
|
|
109
|
+
createImageContent,
|
|
110
|
+
createImageUrlContent,
|
|
111
|
+
createAudioContent,
|
|
112
|
+
createVideoContent
|
|
113
|
+
} from "./future.mjs";
|
|
114
|
+
export { createRAGSystem } from "./future.mjs";
|
|
115
|
+
export { createChainOfThought } from "./future.mjs";
|
|
116
|
+
export { createContextCompressor } from "./future.mjs";
|
|
117
|
+
export { createCostTracker } from "./future.mjs";
|
|
118
|
+
export { createTracer } from "./future.mjs";
|
|
119
|
+
export {
|
|
120
|
+
createOTelConsoleExporter,
|
|
121
|
+
createLangSmithExporter,
|
|
122
|
+
createObservabilityManager,
|
|
123
|
+
toOTelSpan
|
|
124
|
+
} from "./observability.mjs";
|
|
125
|
+
export { createSafetyFilter } from "./future.mjs";
|
|
126
|
+
export {
|
|
127
|
+
fromZodSchema,
|
|
128
|
+
createJSONSchema,
|
|
129
|
+
schema,
|
|
130
|
+
parseStructuredOutput
|
|
131
|
+
} from "./future.mjs";
|
|
132
|
+
export {
|
|
133
|
+
createInMemoryVectorStore
|
|
134
|
+
} from "./vector-store.mjs";
|
|
135
|
+
export {
|
|
136
|
+
createTextLoader,
|
|
137
|
+
loadMarkdown,
|
|
138
|
+
chunkText,
|
|
139
|
+
createFileLoader
|
|
140
|
+
} from "./loaders.mjs";
|
|
141
|
+
export {
|
|
142
|
+
createMemoryCache,
|
|
143
|
+
createLocalStorageCache,
|
|
144
|
+
createSessionStorageCache
|
|
145
|
+
} from "./cache-adapter.mjs";
|
|
146
|
+
export {
|
|
147
|
+
createRateLimiter,
|
|
148
|
+
createRateLimitMiddleware
|
|
149
|
+
} from "./rate-limit.mjs";
|
|
150
|
+
export { createProductionRAG } from "./rag-production.mjs";
|
|
151
|
+
export {
|
|
152
|
+
createEnhancedAgent,
|
|
153
|
+
createReflexionAgent,
|
|
154
|
+
createReWOOAgent,
|
|
155
|
+
createChain,
|
|
156
|
+
createParallelChain
|
|
157
|
+
} from "./agent-enhanced.mjs";
|
|
158
|
+
export {
|
|
159
|
+
useMCPClient,
|
|
160
|
+
useMCPTools
|
|
161
|
+
} from "./mcp.mjs";
|
|
162
|
+
export {
|
|
163
|
+
MCPServer,
|
|
164
|
+
useMCPServer,
|
|
165
|
+
createMCPServerHTTPHandler
|
|
166
|
+
} from "./mcp-server.mjs";
|
|
167
|
+
export async function createChatCompletion(provider, options) {
|
|
168
|
+
const response = await fetch(`${provider.baseUrl}/chat/completions`, {
|
|
169
|
+
method: "POST",
|
|
170
|
+
headers: {
|
|
171
|
+
"Content-Type": "application/json",
|
|
172
|
+
...provider.apiKey && { Authorization: `Bearer ${provider.apiKey}` }
|
|
173
|
+
},
|
|
174
|
+
body: JSON.stringify(options)
|
|
175
|
+
});
|
|
176
|
+
if (!response.ok) {
|
|
177
|
+
throw new Error(`AI API Error: ${response.status}`);
|
|
178
|
+
}
|
|
179
|
+
const data = await response.json();
|
|
180
|
+
return data.choices?.[0]?.message?.content || "";
|
|
181
|
+
}
|
|
182
|
+
export async function* createStreamingChatCompletion(provider, options) {
|
|
183
|
+
const response = await fetch(`${provider.baseUrl}/chat/completions`, {
|
|
184
|
+
method: "POST",
|
|
185
|
+
headers: {
|
|
186
|
+
"Content-Type": "application/json",
|
|
187
|
+
...provider.apiKey && { Authorization: `Bearer ${provider.apiKey}` }
|
|
188
|
+
},
|
|
189
|
+
body: JSON.stringify({
|
|
190
|
+
stream: true,
|
|
191
|
+
...options
|
|
192
|
+
})
|
|
193
|
+
});
|
|
194
|
+
if (!response.ok || !response.body) {
|
|
195
|
+
throw new Error(`AI API Error: ${response.status}`);
|
|
196
|
+
}
|
|
197
|
+
const reader = response.body.getReader();
|
|
198
|
+
const decoder = new TextDecoder();
|
|
199
|
+
while (true) {
|
|
200
|
+
const { done, value } = await reader.read();
|
|
201
|
+
if (done) break;
|
|
202
|
+
const chunk = decoder.decode(value, { stream: true });
|
|
203
|
+
const lines = chunk.split("\n").filter((line) => line.trim() !== "");
|
|
204
|
+
for (const line of lines) {
|
|
205
|
+
if (line.startsWith("data: ")) {
|
|
206
|
+
const data = line.slice(6);
|
|
207
|
+
if (data === "[DONE]") return;
|
|
208
|
+
try {
|
|
209
|
+
const parsed = JSON.parse(data);
|
|
210
|
+
const content = parsed.choices?.[0]?.delta?.content;
|
|
211
|
+
if (content) yield content;
|
|
212
|
+
} catch {
|
|
213
|
+
}
|
|
214
|
+
}
|
|
215
|
+
}
|
|
216
|
+
}
|
|
217
|
+
}
|
|
@@ -0,0 +1,363 @@
|
|
|
1
|
+
"use strict";
|
|
2
|
+
|
|
3
|
+
Object.defineProperty(exports, "__esModule", {
|
|
4
|
+
value: true
|
|
5
|
+
});
|
|
6
|
+
Object.defineProperty(exports, "AIMessage", {
|
|
7
|
+
enumerable: true,
|
|
8
|
+
get: function () {
|
|
9
|
+
return _messages.AIMessage;
|
|
10
|
+
}
|
|
11
|
+
});
|
|
12
|
+
Object.defineProperty(exports, "HumanMessage", {
|
|
13
|
+
enumerable: true,
|
|
14
|
+
get: function () {
|
|
15
|
+
return _messages.HumanMessage;
|
|
16
|
+
}
|
|
17
|
+
});
|
|
18
|
+
Object.defineProperty(exports, "SystemMessage", {
|
|
19
|
+
enumerable: true,
|
|
20
|
+
get: function () {
|
|
21
|
+
return _messages.SystemMessage;
|
|
22
|
+
}
|
|
23
|
+
});
|
|
24
|
+
Object.defineProperty(exports, "ToolMessage", {
|
|
25
|
+
enumerable: true,
|
|
26
|
+
get: function () {
|
|
27
|
+
return _messages.ToolMessage;
|
|
28
|
+
}
|
|
29
|
+
});
|
|
30
|
+
exports.createLangChainChain = createLangChainChain;
|
|
31
|
+
exports.langChainRuntime = void 0;
|
|
32
|
+
exports.useLangChainChat = useLangChainChat;
|
|
33
|
+
exports.useLangChainStream = useLangChainStream;
|
|
34
|
+
var _vue = require("vue");
|
|
35
|
+
var _messages = require("@langchain/core/messages");
|
|
36
|
+
const langChainRuntime = exports.langChainRuntime = {
|
|
37
|
+
/**
|
|
38
|
+
* 同步调用模型
|
|
39
|
+
*/
|
|
40
|
+
async invoke(model, input, options) {
|
|
41
|
+
const messages = [];
|
|
42
|
+
if (options?.systemMessage) {
|
|
43
|
+
messages.push(new _messages.SystemMessage(options.systemMessage));
|
|
44
|
+
}
|
|
45
|
+
if (typeof input === "string") {
|
|
46
|
+
messages.push(new _messages.HumanMessage(input));
|
|
47
|
+
} else {
|
|
48
|
+
messages.push(input);
|
|
49
|
+
}
|
|
50
|
+
const response = await model.invoke(messages);
|
|
51
|
+
return response;
|
|
52
|
+
},
|
|
53
|
+
/**
|
|
54
|
+
* 流式调用模型
|
|
55
|
+
*/
|
|
56
|
+
async stream(model, input, options) {
|
|
57
|
+
const messages = [];
|
|
58
|
+
if (options?.systemMessage) {
|
|
59
|
+
messages.push(new _messages.SystemMessage(options.systemMessage));
|
|
60
|
+
}
|
|
61
|
+
if (typeof input === "string") {
|
|
62
|
+
messages.push(new _messages.HumanMessage(input));
|
|
63
|
+
} else {
|
|
64
|
+
messages.push(input);
|
|
65
|
+
}
|
|
66
|
+
const stream = await model.stream(messages);
|
|
67
|
+
let fullContent = "";
|
|
68
|
+
for await (const chunk of stream) {
|
|
69
|
+
const content = String(chunk?.content || "");
|
|
70
|
+
fullContent += content;
|
|
71
|
+
options?.onChunk?.(content);
|
|
72
|
+
}
|
|
73
|
+
return new _messages.AIMessage(fullContent);
|
|
74
|
+
},
|
|
75
|
+
/**
|
|
76
|
+
* 带工具调用的流式调用
|
|
77
|
+
*/
|
|
78
|
+
async streamWithTools(model, input, tools, options) {
|
|
79
|
+
const messages = [];
|
|
80
|
+
if (options?.systemMessage) {
|
|
81
|
+
messages.push(new _messages.SystemMessage(options.systemMessage));
|
|
82
|
+
}
|
|
83
|
+
if (typeof input === "string") {
|
|
84
|
+
messages.push(new _messages.HumanMessage(input));
|
|
85
|
+
} else {
|
|
86
|
+
messages.push(input);
|
|
87
|
+
}
|
|
88
|
+
const modelWithTools = model.bind({
|
|
89
|
+
tools
|
|
90
|
+
});
|
|
91
|
+
const stream = await modelWithTools.stream(messages);
|
|
92
|
+
let fullContent = "";
|
|
93
|
+
const toolCalls = [];
|
|
94
|
+
for await (const chunk of stream) {
|
|
95
|
+
const content = String(chunk?.content || "");
|
|
96
|
+
if (content) {
|
|
97
|
+
fullContent += content;
|
|
98
|
+
options?.onChunk?.(content);
|
|
99
|
+
}
|
|
100
|
+
const additionalKwargs = chunk?.additional_kwargs;
|
|
101
|
+
if (additionalKwargs?.tool_calls) {
|
|
102
|
+
for (const tc of additionalKwargs.tool_calls) {
|
|
103
|
+
toolCalls.push(tc);
|
|
104
|
+
options?.onToolCall?.({
|
|
105
|
+
name: tc.function?.name || "",
|
|
106
|
+
args: tc.function?.arguments ? JSON.parse(tc.function.arguments) : {}
|
|
107
|
+
});
|
|
108
|
+
}
|
|
109
|
+
}
|
|
110
|
+
}
|
|
111
|
+
return {
|
|
112
|
+
message: new _messages.AIMessage(fullContent),
|
|
113
|
+
toolCalls: toolCalls.length > 0 ? toolCalls : void 0
|
|
114
|
+
};
|
|
115
|
+
}
|
|
116
|
+
};
|
|
117
|
+
function useLangChainChat(options) {
|
|
118
|
+
const {
|
|
119
|
+
model,
|
|
120
|
+
initialMessages = [],
|
|
121
|
+
systemMessage,
|
|
122
|
+
maxHistory = 20,
|
|
123
|
+
streaming = false,
|
|
124
|
+
onChunk,
|
|
125
|
+
onFinish,
|
|
126
|
+
onError
|
|
127
|
+
} = options;
|
|
128
|
+
const messages = (0, _vue.ref)([...initialMessages]);
|
|
129
|
+
const input = (0, _vue.ref)("");
|
|
130
|
+
const isLoading = (0, _vue.ref)(false);
|
|
131
|
+
const error = (0, _vue.ref)(null);
|
|
132
|
+
const toLangChainMessages = msgs => {
|
|
133
|
+
return msgs.map(msg => {
|
|
134
|
+
switch (msg.role) {
|
|
135
|
+
case "user":
|
|
136
|
+
return new _messages.HumanMessage(msg.content);
|
|
137
|
+
case "assistant":
|
|
138
|
+
return new _messages.AIMessage(msg.content);
|
|
139
|
+
case "system":
|
|
140
|
+
return new _messages.SystemMessage(msg.content);
|
|
141
|
+
case "tool":
|
|
142
|
+
return new _messages.ToolMessage({
|
|
143
|
+
content: msg.content,
|
|
144
|
+
tool_call_id: msg.toolCallId || ""
|
|
145
|
+
});
|
|
146
|
+
default:
|
|
147
|
+
return new _messages.HumanMessage(msg.content);
|
|
148
|
+
}
|
|
149
|
+
});
|
|
150
|
+
};
|
|
151
|
+
const addMessage = msg => {
|
|
152
|
+
const newMessage = {
|
|
153
|
+
...msg,
|
|
154
|
+
id: `msg-${Date.now()}-${Math.random().toString(36).slice(2)}`,
|
|
155
|
+
createdAt: /* @__PURE__ */new Date()
|
|
156
|
+
};
|
|
157
|
+
messages.value = [...messages.value, newMessage].slice(-maxHistory);
|
|
158
|
+
return newMessage;
|
|
159
|
+
};
|
|
160
|
+
const sendMessage = async content => {
|
|
161
|
+
if (!content.trim() || isLoading.value) return;
|
|
162
|
+
error.value = null;
|
|
163
|
+
isLoading.value = true;
|
|
164
|
+
addMessage({
|
|
165
|
+
role: "user",
|
|
166
|
+
content
|
|
167
|
+
});
|
|
168
|
+
input.value = "";
|
|
169
|
+
try {
|
|
170
|
+
const langChainMessages = [];
|
|
171
|
+
if (systemMessage) {
|
|
172
|
+
langChainMessages.push(new _messages.SystemMessage(systemMessage));
|
|
173
|
+
}
|
|
174
|
+
langChainMessages.push(...toLangChainMessages(messages.value.slice(0, -1)));
|
|
175
|
+
if (streaming) {
|
|
176
|
+
const stream = await model.stream(langChainMessages);
|
|
177
|
+
const assistantMsg = addMessage({
|
|
178
|
+
role: "assistant",
|
|
179
|
+
content: ""
|
|
180
|
+
});
|
|
181
|
+
let fullContent = "";
|
|
182
|
+
for await (const chunk of stream) {
|
|
183
|
+
const chunkContent = String(chunk?.content || "");
|
|
184
|
+
fullContent += chunkContent;
|
|
185
|
+
messages.value = messages.value.map(m => m.id === assistantMsg.id ? {
|
|
186
|
+
...m,
|
|
187
|
+
content: fullContent
|
|
188
|
+
} : m);
|
|
189
|
+
onChunk?.(chunkContent);
|
|
190
|
+
}
|
|
191
|
+
onFinish?.(messages.value[messages.value.length - 1]);
|
|
192
|
+
} else {
|
|
193
|
+
const response = await model.invoke(langChainMessages);
|
|
194
|
+
const assistantMsg = addMessage({
|
|
195
|
+
role: "assistant",
|
|
196
|
+
content: String(response.content)
|
|
197
|
+
});
|
|
198
|
+
onFinish?.(assistantMsg);
|
|
199
|
+
}
|
|
200
|
+
} catch (err) {
|
|
201
|
+
const errorObj = err instanceof Error ? err : new Error(String(err));
|
|
202
|
+
error.value = errorObj;
|
|
203
|
+
onError?.(errorObj);
|
|
204
|
+
} finally {
|
|
205
|
+
isLoading.value = false;
|
|
206
|
+
}
|
|
207
|
+
};
|
|
208
|
+
const clearHistory = () => {
|
|
209
|
+
messages.value = [];
|
|
210
|
+
};
|
|
211
|
+
const reload = async () => {
|
|
212
|
+
if (messages.value.length > 0) {
|
|
213
|
+
const lastUserMsg = [...messages.value].reverse().find(m => m.role === "user");
|
|
214
|
+
if (lastUserMsg) {
|
|
215
|
+
const userMsgIndex = messages.value.findIndex(m => m.id === lastUserMsg.id);
|
|
216
|
+
messages.value = messages.value.slice(0, userMsgIndex + 1);
|
|
217
|
+
await sendMessage(lastUserMsg.content);
|
|
218
|
+
}
|
|
219
|
+
}
|
|
220
|
+
};
|
|
221
|
+
return {
|
|
222
|
+
messages,
|
|
223
|
+
input,
|
|
224
|
+
isLoading,
|
|
225
|
+
error,
|
|
226
|
+
sendMessage,
|
|
227
|
+
clearHistory,
|
|
228
|
+
reload
|
|
229
|
+
};
|
|
230
|
+
}
|
|
231
|
+
function useLangChainStream(options) {
|
|
232
|
+
const {
|
|
233
|
+
model,
|
|
234
|
+
systemMessage
|
|
235
|
+
} = options;
|
|
236
|
+
const content = (0, _vue.ref)("");
|
|
237
|
+
const isStreaming = (0, _vue.ref)(false);
|
|
238
|
+
const error = (0, _vue.ref)(null);
|
|
239
|
+
let abortController = null;
|
|
240
|
+
const start = async (prompt, history = []) => {
|
|
241
|
+
if (isStreaming.value) {
|
|
242
|
+
stop();
|
|
243
|
+
}
|
|
244
|
+
error.value = null;
|
|
245
|
+
isStreaming.value = true;
|
|
246
|
+
content.value = "";
|
|
247
|
+
try {
|
|
248
|
+
const messages = [];
|
|
249
|
+
if (systemMessage) {
|
|
250
|
+
messages.push(new _messages.SystemMessage(systemMessage));
|
|
251
|
+
}
|
|
252
|
+
for (const msg of history) {
|
|
253
|
+
switch (msg.role) {
|
|
254
|
+
case "user":
|
|
255
|
+
messages.push(new _messages.HumanMessage(msg.content));
|
|
256
|
+
break;
|
|
257
|
+
case "assistant":
|
|
258
|
+
messages.push(new _messages.AIMessage(msg.content));
|
|
259
|
+
break;
|
|
260
|
+
case "system":
|
|
261
|
+
messages.push(new _messages.SystemMessage(msg.content));
|
|
262
|
+
break;
|
|
263
|
+
}
|
|
264
|
+
}
|
|
265
|
+
messages.push(new _messages.HumanMessage(prompt));
|
|
266
|
+
const stream = await model.stream(messages);
|
|
267
|
+
for await (const chunk of stream) {
|
|
268
|
+
const chunkContent = String(chunk?.content || "");
|
|
269
|
+
content.value += chunkContent;
|
|
270
|
+
}
|
|
271
|
+
} catch (err) {
|
|
272
|
+
if (err.name !== "AbortError") {
|
|
273
|
+
const errorObj = err instanceof Error ? err : new Error(String(err));
|
|
274
|
+
error.value = errorObj;
|
|
275
|
+
}
|
|
276
|
+
} finally {
|
|
277
|
+
isStreaming.value = false;
|
|
278
|
+
}
|
|
279
|
+
};
|
|
280
|
+
const stop = () => {
|
|
281
|
+
if (abortController) {
|
|
282
|
+
;
|
|
283
|
+
abortController.abort();
|
|
284
|
+
}
|
|
285
|
+
isStreaming.value = false;
|
|
286
|
+
};
|
|
287
|
+
(0, _vue.onUnmounted)(() => {
|
|
288
|
+
stop();
|
|
289
|
+
});
|
|
290
|
+
return {
|
|
291
|
+
content,
|
|
292
|
+
isStreaming,
|
|
293
|
+
error,
|
|
294
|
+
start,
|
|
295
|
+
stop
|
|
296
|
+
};
|
|
297
|
+
}
|
|
298
|
+
function createLangChainChain(model, config) {
|
|
299
|
+
return {
|
|
300
|
+
/**
|
|
301
|
+
* 同步调用
|
|
302
|
+
*/
|
|
303
|
+
async invoke(input) {
|
|
304
|
+
const messages = [];
|
|
305
|
+
if (config?.systemMessage) {
|
|
306
|
+
messages.push(new _messages.SystemMessage(config.systemMessage));
|
|
307
|
+
}
|
|
308
|
+
messages.push(new _messages.HumanMessage(input));
|
|
309
|
+
const response = await model.invoke(messages);
|
|
310
|
+
return response.content;
|
|
311
|
+
},
|
|
312
|
+
/**
|
|
313
|
+
* 流式调用
|
|
314
|
+
*/
|
|
315
|
+
async *stream(input) {
|
|
316
|
+
const messages = [];
|
|
317
|
+
if (config?.systemMessage) {
|
|
318
|
+
messages.push(new _messages.SystemMessage(config.systemMessage));
|
|
319
|
+
}
|
|
320
|
+
messages.push(new _messages.HumanMessage(input));
|
|
321
|
+
const stream = await model.stream(messages);
|
|
322
|
+
for await (const chunk of stream) {
|
|
323
|
+
yield chunk?.content || "";
|
|
324
|
+
}
|
|
325
|
+
},
|
|
326
|
+
/**
|
|
327
|
+
* 带工具调用
|
|
328
|
+
*/
|
|
329
|
+
async invokeWithTools(input, tools, toolHandler) {
|
|
330
|
+
const messages = [];
|
|
331
|
+
if (config?.systemMessage) {
|
|
332
|
+
messages.push(new _messages.SystemMessage(config.systemMessage));
|
|
333
|
+
}
|
|
334
|
+
messages.push(new _messages.HumanMessage(input));
|
|
335
|
+
const modelWithTools = model.bind({
|
|
336
|
+
tools
|
|
337
|
+
});
|
|
338
|
+
const response = await modelWithTools.invoke(messages);
|
|
339
|
+
const toolCalls = response?.additional_kwargs?.tool_calls;
|
|
340
|
+
if (toolCalls && toolCalls.length > 0 && toolHandler) {
|
|
341
|
+
for (const tc of toolCalls) {
|
|
342
|
+
const toolName = tc.function?.name || "";
|
|
343
|
+
const args = tc.function?.arguments ? JSON.parse(tc.function.arguments) : {};
|
|
344
|
+
const toolResult = await toolHandler(toolName, args);
|
|
345
|
+
messages.push(response);
|
|
346
|
+
messages.push(new _messages.ToolMessage({
|
|
347
|
+
content: toolResult,
|
|
348
|
+
tool_call_id: tc.id || ""
|
|
349
|
+
}));
|
|
350
|
+
const finalResponse = await model.invoke(messages);
|
|
351
|
+
return {
|
|
352
|
+
message: finalResponse.content,
|
|
353
|
+
toolCalls
|
|
354
|
+
};
|
|
355
|
+
}
|
|
356
|
+
}
|
|
357
|
+
return {
|
|
358
|
+
message: response.content,
|
|
359
|
+
toolCalls: void 0
|
|
360
|
+
};
|
|
361
|
+
}
|
|
362
|
+
};
|
|
363
|
+
}
|