@yh-ui/ai-sdk 0.1.21

This diff represents the content of publicly available package versions that have been released to one of the supported registries. The information contained in this diff is provided for informational purposes only and reflects changes between package versions as they appear in their respective public registries.
package/LICENSE ADDED
@@ -0,0 +1,21 @@
1
+ MIT License
2
+
3
+ Copyright (c) 2024-present YH-UI Team
4
+
5
+ Permission is hereby granted, free of charge, to any person obtaining a copy
6
+ of this software and associated documentation files (the "Software"), to deal
7
+ in the Software without restriction, including without limitation the rights
8
+ to use, copy, modify, merge, publish, distribute, sublicense, and/or sell
9
+ copies of the Software, and to permit persons to whom the Software is
10
+ furnished to do so, subject to the following conditions:
11
+
12
+ The above copyright notice and this permission notice shall be included in all
13
+ copies or substantial portions of the Software.
14
+
15
+ THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
16
+ IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
17
+ FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE
18
+ AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER
19
+ LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM,
20
+ OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE
21
+ SOFTWARE.
package/README.md ADDED
@@ -0,0 +1,309 @@
1
+ # @yh-ui/ai-sdk
2
+
3
+ <p align="center">
4
+ <img src="https://raw.githubusercontent.com/1079161148/yh-ui/main/docs/public/logo.svg" width="100" height="100" alt="YH-UI Logo">
5
+ </p>
6
+
7
+ <h3 align="center">YH-UI AI SDK</h3>
8
+
9
+ <p align="center">
10
+ 官方 AI 集成方案 · Vercel AI SDK 深度集成 · LangChain 支持 · Vue 3 响应式 Hooks · 流式渲染
11
+ </p>
12
+
13
+ <p align="center">
14
+ <a href="https://www.npmjs.com/package/@yh-ui/ai-sdk">
15
+ <img src="https://img.shields.io/npm/v/@yh-ui/ai-sdk.svg?style=flat-square&colorB=409eff" alt="npm version">
16
+ </a>
17
+ <a href="https://www.npmjs.com/package/@yh-ui/ai-sdk">
18
+ <img src="https://img.shields.io/npm/dm/@yh-ui/ai-sdk.svg?style=flat-square&colorB=409eff" alt="npm downloads">
19
+ </a>
20
+ <a href="https://github.com/1079161148/yh-ui/blob/main/LICENSE">
21
+ <img src="https://img.shields.io/npm/l/@yh-ui/ai-sdk.svg?style=flat-square" alt="license">
22
+ </a>
23
+ </p>
24
+
25
+ ---
26
+
27
+ ## ✨ 特性
28
+
29
+ - 🤖 **多 Provider 支持** — OpenAI、Anthropic (Claude)、Google Gemini、本地模型,统一适配器接口
30
+ - ⚡ **Vercel AI SDK 完整集成** — `generateText`、`streamText`、`useChat`、工具调用全覆盖
31
+ - 🦜 **LangChain 支持** — 原生适配 LangChain 聊天模型,支持复杂 Chain/Agent 场景
32
+ - 💬 **Vue 3 响应式 Hooks** — `useAIChat`、`useAIStream` 等,状态自动响应视图更新
33
+ - 🌊 **流式渲染深度集成** — 与 `YhAiBubble` 组件无缝配合,逐词/逐字动画效果
34
+ - 📝 **对话历史管理** — 开箱即用的会话历史、多轮对话、持久化存储
35
+ - 🔧 **工具调用(Function Calling)** — 简洁的 API 定义 AI 工具函数
36
+ - 🔒 **完整 TypeScript** — 所有 API 均有精确类型,含 Zod schema 验证
37
+
38
+ ---
39
+
40
+ ## 📦 安装
41
+
42
+ ```bash
43
+ # 核心包
44
+ pnpm add @yh-ui/ai-sdk
45
+
46
+ # 根据使用的 AI Provider 选择安装(至少一个)
47
+ pnpm add @ai-sdk/openai # OpenAI(GPT-4o, GPT-3.5, o1...)
48
+ pnpm add @ai-sdk/anthropic # Anthropic(Claude 3.5 Sonnet...)
49
+ pnpm add @ai-sdk/google # Google(Gemini Pro, Gemini Flash...)
50
+ ```
51
+
52
+ **同伴依赖**:需同时安装 `@yh-ui/components`(提供 AI UI 组件)
53
+
54
+ ---
55
+
56
+ ## 🔨 快速开始
57
+
58
+ ### 最简单的 AI 聊天
59
+
60
+ ```vue
61
+ <!-- 前端组件 -->
62
+ <script setup lang="ts">
63
+ import { YhAiBubble, YhAiSender } from '@yh-ui/components'
64
+ import { useAIChat } from '@yh-ui/ai-sdk/vue'
65
+
66
+ const { messages, input, isLoading, sendMessage, stop } = useAIChat({
67
+ api: '/api/chat',
68
+ onFinish: (msg) => console.log('AI 回复完成:', msg.content),
69
+ onError: (err) => console.error('请求失败:', err)
70
+ })
71
+ </script>
72
+
73
+ <template>
74
+ <div class="chat-container">
75
+ <!-- 消息列表 -->
76
+ <div class="messages">
77
+ <YhAiBubble
78
+ v-for="msg in messages"
79
+ :key="msg.id"
80
+ :role="msg.role"
81
+ :content="msg.content"
82
+ streaming
83
+ stream-mode="word"
84
+ :stream-interval="15"
85
+ />
86
+ </div>
87
+
88
+ <!-- 输入框 -->
89
+ <YhAiSender v-model="input" :loading="isLoading" @send="sendMessage" @cancel="stop" />
90
+ </div>
91
+ </template>
92
+ ```
93
+
94
+ ```ts
95
+ // 后端 API 路由(Nuxt server/api/chat.post.ts)
96
+ import { streamText } from 'ai'
97
+ import { openai } from '@ai-sdk/openai'
98
+
99
+ export default defineEventHandler(async (event) => {
100
+ const { messages } = await readBody(event)
101
+
102
+ const result = await streamText({
103
+ model: openai('gpt-4o'),
104
+ messages
105
+ })
106
+
107
+ return result.toDataStreamResponse()
108
+ })
109
+ ```
110
+
111
+ ---
112
+
113
+ ## 🌊 流式渲染
114
+
115
+ ```vue
116
+ <script setup lang="ts">
117
+ import { YhAiBubble } from '@yh-ui/components'
118
+ import { useAIStream } from '@yh-ui/ai-sdk/vue'
119
+
120
+ const { content, isStreaming, start, stop } = useAIStream({
121
+ api: '/api/chat/stream',
122
+ onChunk: (chunk) => console.log('收到片段:', chunk),
123
+ onFinish: () => console.log('流式结束')
124
+ })
125
+ </script>
126
+
127
+ <template>
128
+ <YhAiBubble
129
+ role="assistant"
130
+ :content="content"
131
+ :typing="isStreaming"
132
+ streaming
133
+ stream-mode="word"
134
+ :stream-interval="20"
135
+ />
136
+ <button @click="start">开始生成</button>
137
+ <button @click="stop" :disabled="!isStreaming">停止</button>
138
+ </template>
139
+ ```
140
+
141
+ ---
142
+
143
+ ## 💬 多轮对话历史管理
144
+
145
+ ```ts
146
+ import { useConversation } from '@yh-ui/ai-sdk'
147
+
148
+ const { messages, addMessage, clearHistory, exportHistory, importHistory } = useConversation({
149
+ maxHistory: 50, // 最大保留消息数
150
+ persist: true, // 持久化到 localStorage
151
+ storageKey: 'my-chat' // 存储 key
152
+ })
153
+
154
+ // 手动添加消息
155
+ addMessage({ role: 'user', content: '你好' })
156
+ addMessage({ role: 'assistant', content: '你好!有什么可以帮你的?' })
157
+
158
+ // 清空历史
159
+ clearHistory()
160
+
161
+ // 导出/导入对话(用于分享或备份)
162
+ const json = exportHistory()
163
+ importHistory(json)
164
+ ```
165
+
166
+ ---
167
+
168
+ ## 🔧 工具调用(Function Calling)
169
+
170
+ ```ts
171
+ import { createYHFunctionTool } from '@yh-ui/ai-sdk'
172
+ import { z } from 'zod'
173
+
174
+ // 定义 AI 工具
175
+ const weatherTool = createYHFunctionTool({
176
+ name: 'get_weather',
177
+ description: '获取指定城市的实时天气',
178
+ // 使用 Zod schema 定义参数(自动生成 JSON Schema)
179
+ schema: z.object({
180
+ city: z.string().describe('城市名称,如:北京、上海'),
181
+ unit: z.enum(['celsius', 'fahrenheit']).default('celsius').describe('温度单位')
182
+ }),
183
+ execute: async ({ city, unit }) => {
184
+ // 调用真实天气 API
185
+ const res = await fetch(`https://api.weather.com?city=${city}`)
186
+ return await res.json()
187
+ }
188
+ })
189
+
190
+ // 在流式对话中使用工具
191
+ const result = await streamText({
192
+ model: openai('gpt-4o'),
193
+ tools: { weatherTool },
194
+ messages: [{ role: 'user', content: '北京今天天气怎么样?' }]
195
+ })
196
+ ```
197
+
198
+ ---
199
+
200
+ ## 🔌 多 Provider 适配器
201
+
202
+ ```ts
203
+ import { createProviderAdapter } from '@yh-ui/ai-sdk'
204
+
205
+ // 统一适配器,方便切换 Provider
206
+ const openai = createProviderAdapter({
207
+ name: 'openai',
208
+ baseUrl: 'https://api.openai.com/v1',
209
+ apiKey: process.env.OPENAI_API_KEY,
210
+ defaultModel: 'gpt-4o'
211
+ })
212
+
213
+ const anthropic = createProviderAdapter({
214
+ name: 'anthropic',
215
+ baseUrl: 'https://api.anthropic.com/v1',
216
+ apiKey: process.env.ANTHROPIC_API_KEY,
217
+ defaultModel: 'claude-3-5-sonnet-20241022'
218
+ })
219
+
220
+ const gemini = createProviderAdapter({
221
+ name: 'google',
222
+ baseUrl: 'https://generativelanguage.googleapis.com/v1beta',
223
+ apiKey: process.env.GOOGLE_API_KEY,
224
+ defaultModel: 'gemini-1.5-flash'
225
+ })
226
+ ```
227
+
228
+ ---
229
+
230
+ ## 📚 API 参考
231
+
232
+ ### Vue Composables(从 `@yh-ui/ai-sdk/vue` 导入)
233
+
234
+ | 函数 | 说明 |
235
+ | ---------------------- | ------------------------------------------ |
236
+ | `useAIChat(options)` | AI 对话 Hook,管理消息列表、发送、流式接收 |
237
+ | `useAIStream(options)` | 流式文本生成 Hook |
238
+
239
+ ### 核心工具(从 `@yh-ui/ai-sdk` 导入)
240
+
241
+ | 函数 | 说明 |
242
+ | -------------------------------- | ----------------------------------- |
243
+ | `useConversation(options)` | 对话历史管理(持久化、导入导出) |
244
+ | `createYHFunctionTool(options)` | 创建 AI 工具函数(集成 Zod 验证) |
245
+ | `createProviderAdapter(options)` | 创建统一 AI Provider 适配器 |
246
+ | `createStreamableValue()` | 创建可流式更新的响应式值 |
247
+ | `createAIContext()` | 创建全局 AI 上下文(Provider 模式) |
248
+
249
+ ### LangChain 集成(从 `@yh-ui/ai-sdk/langchain` 导入)
250
+
251
+ ```ts
252
+ import { YHLangChainChatModel } from '@yh-ui/ai-sdk/langchain'
253
+
254
+ const model = new YHLangChainChatModel({
255
+ provider: 'openai',
256
+ model: 'gpt-4o',
257
+ streaming: true
258
+ })
259
+ ```
260
+
261
+ ---
262
+
263
+ ## 🔗 与 AI 组件配合
264
+
265
+ ### `YhAiBubble` — 流式 Markdown 渲染
266
+
267
+ ```vue
268
+ <YhAiBubble
269
+ role="assistant" <!-- 'user' | 'assistant' | 'system' -->
270
+ :content="streamContent" <!-- 支持 Markdown、代码块、表格 -->
271
+ :typing="isStreaming" <!-- 显示光标动效 -->
272
+ streaming <!-- 启用流式模式 -->
273
+ stream-mode="word" <!-- 'char'(逐字)| 'word'(逐词) -->
274
+ :stream-interval="15" <!-- 渲染间隔(ms) -->
275
+ />
276
+ ```
277
+
278
+ ### `YhAiThoughtChain` — 思维链展示
279
+
280
+ ```vue
281
+ <YhAiThoughtChain
282
+ :items="thoughtSteps"
283
+ status="thinking" <!-- 'thinking' | 'success' | 'error' -->
284
+ :show-progress="true"
285
+ :line-gradient="true"
286
+ @node-click="handleNodeClick"
287
+ />
288
+ ```
289
+
290
+ ---
291
+
292
+ ## ⚠️ 安全注意事项
293
+
294
+ 1. **API Key 不要暴露到前端** — 所有 AI 调用应通过后端 API 路由进行,不要在 `.env.VITE_` 等前端可见的环境变量中存放密钥
295
+ 2. **内容过滤** — 建议在后端对用户输入和 AI 输出做内容安全过滤
296
+ 3. **速率限制** — 生产环境建议在 API 路由层添加用户级别的请求限流
297
+
298
+ ---
299
+
300
+ ## 🔗 相关资源
301
+
302
+ - [📖 AI 组件文档](https://1079161148.github.io/yh-ui/components/ai-bubble)
303
+ - [📦 Vercel AI SDK](https://sdk.vercel.ai/docs)
304
+ - [🦜 LangChain.js](https://js.langchain.com/docs)
305
+ - [📦 GitHub 仓库](https://github.com/1079161148/yh-ui)
306
+
307
+ ## 📄 开源协议
308
+
309
+ MIT License © 2024-present YH-UI Team
@@ -0,0 +1,292 @@
1
+ "use strict";
2
+
3
+ Object.defineProperty(exports, "__esModule", {
4
+ value: true
5
+ });
6
+ exports.createChain = createChain;
7
+ exports.createEnhancedAgent = createEnhancedAgent;
8
+ exports.createParallelChain = createParallelChain;
9
+ exports.createReWOOAgent = createReWOOAgent;
10
+ exports.createReflexionAgent = createReflexionAgent;
11
+ var _vue = require("vue");
12
+ function createChain(initialHandler) {
13
+ const steps = [];
14
+ if (initialHandler) {
15
+ steps.push({
16
+ name: "initial",
17
+ handler: initialHandler
18
+ });
19
+ }
20
+ return {
21
+ pipe(step) {
22
+ steps.push(step);
23
+ return this;
24
+ },
25
+ async invoke(input) {
26
+ let current = input;
27
+ for (const step of steps) {
28
+ try {
29
+ current = await step.handler(current);
30
+ } catch (err) {
31
+ if (step.onError) {
32
+ step.onError(err);
33
+ } else {
34
+ throw err;
35
+ }
36
+ }
37
+ }
38
+ return current;
39
+ }
40
+ };
41
+ }
42
+ function createParallelChain(steps) {
43
+ return {
44
+ pipe(step) {
45
+ steps[step.name] = step.handler;
46
+ return this;
47
+ },
48
+ async invoke(input) {
49
+ const results = {};
50
+ const promises = Object.entries(steps).map(async ([key, handler]) => {
51
+ results[key] = await handler(input);
52
+ });
53
+ await Promise.all(promises);
54
+ return results;
55
+ }
56
+ };
57
+ }
58
+ function createReflexionAgent(config) {
59
+ const {
60
+ maxIterations = 5,
61
+ returnReasoning = true,
62
+ stopConditions = [],
63
+ memoryWindow = 3,
64
+ onError
65
+ } = config;
66
+ const steps = (0, _vue.ref)([]);
67
+ const reflections = (0, _vue.ref)([]);
68
+ const isRunning = (0, _vue.ref)(false);
69
+ async function run(input, executeFn, toolFn) {
70
+ isRunning.value = true;
71
+ steps.value = [];
72
+ reflections.value = [];
73
+ let iteration = 0;
74
+ let currentInput = input;
75
+ let bestOutput = "";
76
+ let bestScore = -Infinity;
77
+ try {
78
+ while (iteration < maxIterations) {
79
+ iteration++;
80
+ const prompt = buildPrompt(currentInput, reflections.value);
81
+ const response = await executeFn(prompt);
82
+ let toolResult = "";
83
+ const actionMatch = response.match(/Action:\s*(\w+)\s*[\n\r]Action Input:\s*(.+)/);
84
+ if (actionMatch && toolFn) {
85
+ const [, toolName, toolInputStr] = actionMatch;
86
+ try {
87
+ const toolInput = JSON.parse(toolInputStr);
88
+ toolResult = await toolFn(toolName, toolInput);
89
+ } catch {
90
+ toolResult = `Failed to parse: ${toolInputStr}`;
91
+ }
92
+ }
93
+ steps.value.push({
94
+ id: `step-${iteration}`,
95
+ type: "thought",
96
+ content: response,
97
+ timestamp: /* @__PURE__ */new Date()
98
+ });
99
+ if (toolResult) {
100
+ steps.value.push({
101
+ id: `step-${iteration}-obs`,
102
+ type: "observation",
103
+ content: toolResult,
104
+ timestamp: /* @__PURE__ */new Date()
105
+ });
106
+ }
107
+ const score = await evaluateOutput(response + toolResult);
108
+ if (score > bestScore) {
109
+ bestScore = score;
110
+ bestOutput = response + (toolResult ? `
111
+
112
+ Observation: ${toolResult}` : "");
113
+ }
114
+ if (iteration < maxIterations) {
115
+ const reflection = await reflect(currentInput, response, toolResult, score);
116
+ reflections.value.push(reflection);
117
+ if (reflections.value.length > memoryWindow) {
118
+ reflections.value.shift();
119
+ }
120
+ currentInput = `${input}
121
+
122
+ Previous attempts:
123
+ ${reflections.value.join("\n---\n")}`;
124
+ }
125
+ if (checkStop("", bestOutput)) break;
126
+ }
127
+ return {
128
+ output: bestOutput,
129
+ reasoning: returnReasoning ? steps.value : void 0,
130
+ toolCalls: iteration,
131
+ finished: iteration >= maxIterations
132
+ };
133
+ } catch (err) {
134
+ if (onError) await onError(err, steps.value[steps.value.length - 1]);
135
+ return {
136
+ output: bestOutput,
137
+ reasoning: returnReasoning ? steps.value : void 0,
138
+ toolCalls: iteration,
139
+ finished: false,
140
+ error: err
141
+ };
142
+ } finally {
143
+ isRunning.value = false;
144
+ }
145
+ }
146
+ function buildPrompt(input, reflections2) {
147
+ let prompt = `Task: ${input}
148
+ `;
149
+ if (reflections2.length > 0) {
150
+ prompt += `
151
+ Previous reflections:
152
+ ${reflections2.join("\n")}
153
+ `;
154
+ }
155
+ prompt += "\nProvide your reasoning and any tool actions (Action: toolName\nAction Input: {...}).";
156
+ return prompt;
157
+ }
158
+ async function reflect(task, response, toolResult, score) {
159
+ return `Attempt ${steps.value.length}: ${score < 5 ? "Need to try different approach" : "Making progress"}`;
160
+ }
161
+ async function evaluateOutput(output) {
162
+ return output.length > 10 ? 5 + Math.random() * 5 : Math.random() * 5;
163
+ }
164
+ function checkStop(iterationOutput, bestOutput) {
165
+ for (const cond of stopConditions) {
166
+ if (cond.type === "contains" && cond.value) {
167
+ if (bestOutput.includes(cond.value)) return true;
168
+ }
169
+ if (cond.type === "custom" && cond.value) {
170
+ if (cond.value(bestOutput)) return true;
171
+ }
172
+ }
173
+ return false;
174
+ }
175
+ return {
176
+ run,
177
+ steps,
178
+ reflections,
179
+ isRunning
180
+ };
181
+ }
182
+ function createReWOOAgent(config) {
183
+ const {
184
+ returnReasoning = true,
185
+ onError
186
+ } = config;
187
+ const steps = (0, _vue.ref)([]);
188
+ const plan = (0, _vue.ref)([]);
189
+ const isRunning = (0, _vue.ref)(false);
190
+ const defaultPlanner = async task => {
191
+ return {
192
+ steps: [{
193
+ id: "1",
194
+ description: `Analyze: ${task.slice(0, 50)}`,
195
+ dependentIds: []
196
+ }, {
197
+ id: "2",
198
+ description: `Gather info for: ${task.slice(0, 50)}`,
199
+ dependentIds: ["1"]
200
+ }, {
201
+ id: "3",
202
+ description: `Answer: ${task.slice(0, 50)}`,
203
+ dependentIds: ["1", "2"]
204
+ }]
205
+ };
206
+ };
207
+ async function run(input, executeFn) {
208
+ isRunning.value = true;
209
+ steps.value = [];
210
+ plan.value = [];
211
+ try {
212
+ const planner = config.planner || defaultPlanner;
213
+ const planResult = await planner(input);
214
+ plan.value = planResult.steps;
215
+ steps.value.push({
216
+ id: "plan-thought",
217
+ type: "thought",
218
+ content: `Generated plan with ${planResult.steps.length} steps`,
219
+ timestamp: /* @__PURE__ */new Date()
220
+ });
221
+ const results = {};
222
+ const executed = /* @__PURE__ */new Set();
223
+ while (executed.size < plan.value.length) {
224
+ const readyStep = plan.value.find(s => !executed.has(s.id) && s.dependentIds.every(depId => executed.has(depId)));
225
+ if (!readyStep) break;
226
+ const context = Object.entries(results).filter(([id]) => readyStep.dependentIds.includes(id)).map(([, res]) => res).join("\n\n");
227
+ const stepPrompt = `Step: ${readyStep.description}
228
+ Context: ${context || "No prior context"}
229
+ Execute this step and provide the result.`;
230
+ const stepResult = await executeFn(stepPrompt);
231
+ results[readyStep.id] = stepResult;
232
+ executed.add(readyStep.id);
233
+ steps.value.push({
234
+ id: `step-${readyStep.id}`,
235
+ type: "action",
236
+ content: readyStep.description,
237
+ toolOutput: stepResult,
238
+ timestamp: /* @__PURE__ */new Date()
239
+ });
240
+ }
241
+ const finalPrompt = `Original Task: ${input}
242
+
243
+ Execution Results:
244
+ ${Object.entries(results).map(([id, res]) => `Step ${id}: ${res}`).join("\n\n")}
245
+
246
+ Based on the above results, provide the final answer to the original task.`;
247
+ const finalAnswer = await executeFn(finalPrompt);
248
+ return {
249
+ output: finalAnswer,
250
+ reasoning: returnReasoning ? steps.value : void 0,
251
+ toolCalls: executed.size,
252
+ finished: true
253
+ };
254
+ } catch (err) {
255
+ if (onError) {
256
+ const lastStep = steps.value[steps.value.length - 1];
257
+ if (lastStep) await onError(err, lastStep);
258
+ }
259
+ return {
260
+ output: "",
261
+ reasoning: returnReasoning ? steps.value : void 0,
262
+ toolCalls: 0,
263
+ finished: false,
264
+ error: err
265
+ };
266
+ } finally {
267
+ isRunning.value = false;
268
+ }
269
+ }
270
+ return {
271
+ run,
272
+ steps,
273
+ plan,
274
+ isRunning
275
+ };
276
+ }
277
+ function createEnhancedAgent(config) {
278
+ switch (config.mode) {
279
+ case "reflexion":
280
+ return createReflexionAgent(config);
281
+ case "rewoo":
282
+ return createReWOOAgent(config);
283
+ case "react":
284
+ case "plan-execute":
285
+ return createReWOOAgent({
286
+ ...config,
287
+ mode: "rewoo"
288
+ });
289
+ default:
290
+ throw new Error(`Unsupported mode: ${config.mode}`);
291
+ }
292
+ }