@opentiny/tiny-robot-kit 0.2.0-alpha.0
This diff represents the content of publicly available package versions that have been released to one of the supported registries. The information contained in this diff is provided for informational purposes only and reflects changes between package versions as they appear in their respective public registries.
- package/README.md +164 -0
- package/dist/index.d.mts +351 -0
- package/dist/index.d.ts +351 -0
- package/dist/index.js +561 -0
- package/dist/index.mjs +524 -0
- package/package.json +26 -0
- package/src/client.ts +101 -0
- package/src/error.ts +100 -0
- package/src/index.ts +10 -0
- package/src/providers/base.ts +62 -0
- package/src/providers/openai.ts +134 -0
- package/src/types.ts +163 -0
- package/src/utils.ts +125 -0
- package/src/vue/index.ts +1 -0
- package/src/vue/useMessage.ts +227 -0
- package/tsconfig.json +21 -0
package/dist/index.mjs
ADDED
|
@@ -0,0 +1,524 @@
|
|
|
1
|
+
// src/providers/base.ts
|
|
2
|
+
var BaseModelProvider = class {
|
|
3
|
+
/**
|
|
4
|
+
* @param config AI模型配置
|
|
5
|
+
*/
|
|
6
|
+
constructor(config) {
|
|
7
|
+
this.config = config;
|
|
8
|
+
}
|
|
9
|
+
/**
|
|
10
|
+
* 更新配置
|
|
11
|
+
* @param config 新的AI模型配置
|
|
12
|
+
*/
|
|
13
|
+
updateConfig(config) {
|
|
14
|
+
this.config = { ...this.config, ...config };
|
|
15
|
+
}
|
|
16
|
+
/**
|
|
17
|
+
* 获取当前配置
|
|
18
|
+
* @returns AI模型配置
|
|
19
|
+
*/
|
|
20
|
+
getConfig() {
|
|
21
|
+
return { ...this.config };
|
|
22
|
+
}
|
|
23
|
+
/**
|
|
24
|
+
* 验证请求参数
|
|
25
|
+
* @param request 聊天请求参数
|
|
26
|
+
*/
|
|
27
|
+
validateRequest(request) {
|
|
28
|
+
if (!request.messages || !Array.isArray(request.messages) || request.messages.length === 0) {
|
|
29
|
+
throw new Error("\u8BF7\u6C42\u5FC5\u987B\u5305\u542B\u81F3\u5C11\u4E00\u6761\u6D88\u606F");
|
|
30
|
+
}
|
|
31
|
+
for (const message of request.messages) {
|
|
32
|
+
if (!message.role || !message.content) {
|
|
33
|
+
throw new Error("\u6BCF\u6761\u6D88\u606F\u5FC5\u987B\u5305\u542B\u89D2\u8272\u548C\u5185\u5BB9");
|
|
34
|
+
}
|
|
35
|
+
}
|
|
36
|
+
}
|
|
37
|
+
};
|
|
38
|
+
|
|
39
|
+
// src/types.ts
|
|
40
|
+
var ErrorType = /* @__PURE__ */ ((ErrorType2) => {
|
|
41
|
+
ErrorType2["NETWORK_ERROR"] = "network_error";
|
|
42
|
+
ErrorType2["AUTHENTICATION_ERROR"] = "authentication_error";
|
|
43
|
+
ErrorType2["RATE_LIMIT_ERROR"] = "rate_limit_error";
|
|
44
|
+
ErrorType2["SERVER_ERROR"] = "server_error";
|
|
45
|
+
ErrorType2["MODEL_ERROR"] = "model_error";
|
|
46
|
+
ErrorType2["TIMEOUT_ERROR"] = "timeout_error";
|
|
47
|
+
ErrorType2["UNKNOWN_ERROR"] = "unknown_error";
|
|
48
|
+
return ErrorType2;
|
|
49
|
+
})(ErrorType || {});
|
|
50
|
+
var StreamEventType = /* @__PURE__ */ ((StreamEventType2) => {
|
|
51
|
+
StreamEventType2["DATA"] = "data";
|
|
52
|
+
StreamEventType2["ERROR"] = "error";
|
|
53
|
+
StreamEventType2["DONE"] = "done";
|
|
54
|
+
return StreamEventType2;
|
|
55
|
+
})(StreamEventType || {});
|
|
56
|
+
|
|
57
|
+
// src/error.ts
|
|
58
|
+
function createError(error) {
|
|
59
|
+
return {
|
|
60
|
+
type: error.type || "unknown_error" /* UNKNOWN_ERROR */,
|
|
61
|
+
message: error.message || "\u672A\u77E5\u9519\u8BEF",
|
|
62
|
+
statusCode: error.statusCode,
|
|
63
|
+
originalError: error.originalError
|
|
64
|
+
};
|
|
65
|
+
}
|
|
66
|
+
function handleRequestError(error) {
|
|
67
|
+
if (!error.response) {
|
|
68
|
+
return createError({
|
|
69
|
+
type: "network_error" /* NETWORK_ERROR */,
|
|
70
|
+
message: "\u7F51\u7EDC\u8FDE\u63A5\u9519\u8BEF\uFF0C\u8BF7\u68C0\u67E5\u60A8\u7684\u7F51\u7EDC\u8FDE\u63A5",
|
|
71
|
+
originalError: error
|
|
72
|
+
});
|
|
73
|
+
}
|
|
74
|
+
if (error.response) {
|
|
75
|
+
const { status, data } = error.response;
|
|
76
|
+
if (status === 401 || status === 403) {
|
|
77
|
+
return createError({
|
|
78
|
+
type: "authentication_error" /* AUTHENTICATION_ERROR */,
|
|
79
|
+
message: "\u8EAB\u4EFD\u9A8C\u8BC1\u5931\u8D25\uFF0C\u8BF7\u68C0\u67E5\u60A8\u7684API\u5BC6\u94A5",
|
|
80
|
+
statusCode: status,
|
|
81
|
+
originalError: error
|
|
82
|
+
});
|
|
83
|
+
}
|
|
84
|
+
if (status === 429) {
|
|
85
|
+
return createError({
|
|
86
|
+
type: "rate_limit_error" /* RATE_LIMIT_ERROR */,
|
|
87
|
+
message: "\u8D85\u51FAAPI\u8C03\u7528\u9650\u5236\uFF0C\u8BF7\u7A0D\u540E\u518D\u8BD5",
|
|
88
|
+
statusCode: status,
|
|
89
|
+
originalError: error
|
|
90
|
+
});
|
|
91
|
+
}
|
|
92
|
+
if (status >= 500) {
|
|
93
|
+
return createError({
|
|
94
|
+
type: "server_error" /* SERVER_ERROR */,
|
|
95
|
+
message: "\u670D\u52A1\u5668\u9519\u8BEF\uFF0C\u8BF7\u7A0D\u540E\u518D\u8BD5",
|
|
96
|
+
statusCode: status,
|
|
97
|
+
originalError: error
|
|
98
|
+
});
|
|
99
|
+
}
|
|
100
|
+
return createError({
|
|
101
|
+
type: "unknown_error" /* UNKNOWN_ERROR */,
|
|
102
|
+
message: data?.error?.message || `\u8BF7\u6C42\u5931\u8D25\uFF0C\u72B6\u6001\u7801: ${status}`,
|
|
103
|
+
statusCode: status,
|
|
104
|
+
originalError: error
|
|
105
|
+
});
|
|
106
|
+
}
|
|
107
|
+
if (error.code === "ECONNABORTED") {
|
|
108
|
+
return createError({
|
|
109
|
+
type: "timeout_error" /* TIMEOUT_ERROR */,
|
|
110
|
+
message: "\u8BF7\u6C42\u8D85\u65F6\uFF0C\u8BF7\u7A0D\u540E\u518D\u8BD5",
|
|
111
|
+
originalError: error
|
|
112
|
+
});
|
|
113
|
+
}
|
|
114
|
+
return createError({
|
|
115
|
+
type: "unknown_error" /* UNKNOWN_ERROR */,
|
|
116
|
+
message: error.message || "\u53D1\u751F\u672A\u77E5\u9519\u8BEF",
|
|
117
|
+
originalError: error
|
|
118
|
+
});
|
|
119
|
+
}
|
|
120
|
+
|
|
121
|
+
// src/utils.ts
|
|
122
|
+
async function handleSSEStream(response, handler, signal) {
|
|
123
|
+
const reader = response.body?.getReader();
|
|
124
|
+
if (!reader) {
|
|
125
|
+
throw new Error("Response body is null");
|
|
126
|
+
}
|
|
127
|
+
const decoder = new TextDecoder();
|
|
128
|
+
let buffer = "";
|
|
129
|
+
if (signal) {
|
|
130
|
+
signal.addEventListener(
|
|
131
|
+
"abort",
|
|
132
|
+
() => {
|
|
133
|
+
reader.cancel().catch((err) => console.error("Error cancelling reader:", err));
|
|
134
|
+
},
|
|
135
|
+
{ once: true }
|
|
136
|
+
);
|
|
137
|
+
}
|
|
138
|
+
try {
|
|
139
|
+
while (true) {
|
|
140
|
+
if (signal?.aborted) {
|
|
141
|
+
await reader.cancel();
|
|
142
|
+
break;
|
|
143
|
+
}
|
|
144
|
+
const { done, value } = await reader.read();
|
|
145
|
+
if (done) break;
|
|
146
|
+
const chunk = decoder.decode(value, { stream: true });
|
|
147
|
+
buffer += chunk;
|
|
148
|
+
const lines = buffer.split("\n\n");
|
|
149
|
+
buffer = lines.pop() || "";
|
|
150
|
+
for (const line of lines) {
|
|
151
|
+
if (line.trim() === "") continue;
|
|
152
|
+
if (line.trim() === "data: [DONE]") {
|
|
153
|
+
handler.onDone();
|
|
154
|
+
continue;
|
|
155
|
+
}
|
|
156
|
+
try {
|
|
157
|
+
const dataMatch = line.match(/^data: (.+)$/m);
|
|
158
|
+
if (!dataMatch) continue;
|
|
159
|
+
const data = JSON.parse(dataMatch[1]);
|
|
160
|
+
handler.onData(data);
|
|
161
|
+
} catch (error) {
|
|
162
|
+
console.error("Error parsing SSE message:", error);
|
|
163
|
+
}
|
|
164
|
+
}
|
|
165
|
+
}
|
|
166
|
+
if (buffer.trim() === "data: [DONE]" || signal?.aborted) {
|
|
167
|
+
handler.onDone();
|
|
168
|
+
}
|
|
169
|
+
} catch (error) {
|
|
170
|
+
if (signal?.aborted) return;
|
|
171
|
+
throw error;
|
|
172
|
+
}
|
|
173
|
+
}
|
|
174
|
+
function formatMessages(messages) {
|
|
175
|
+
return messages.map((msg) => {
|
|
176
|
+
if (typeof msg === "object" && "role" in msg && "content" in msg) {
|
|
177
|
+
return {
|
|
178
|
+
role: msg.role,
|
|
179
|
+
content: String(msg.content),
|
|
180
|
+
...msg.name ? { name: msg.name } : {}
|
|
181
|
+
};
|
|
182
|
+
}
|
|
183
|
+
if (typeof msg === "string") {
|
|
184
|
+
return {
|
|
185
|
+
role: "user",
|
|
186
|
+
content: msg
|
|
187
|
+
};
|
|
188
|
+
}
|
|
189
|
+
return {
|
|
190
|
+
role: "user",
|
|
191
|
+
content: String(msg)
|
|
192
|
+
};
|
|
193
|
+
});
|
|
194
|
+
}
|
|
195
|
+
function extractTextFromResponse(response) {
|
|
196
|
+
if (!response.choices || !response.choices.length) {
|
|
197
|
+
return "";
|
|
198
|
+
}
|
|
199
|
+
return response.choices[0].message?.content || "";
|
|
200
|
+
}
|
|
201
|
+
|
|
202
|
+
// src/providers/openai.ts
|
|
203
|
+
var OpenAIProvider = class extends BaseModelProvider {
|
|
204
|
+
/**
|
|
205
|
+
* @param config AI模型配置
|
|
206
|
+
*/
|
|
207
|
+
constructor(config) {
|
|
208
|
+
super(config);
|
|
209
|
+
this.defaultModel = "gpt-3.5-turbo";
|
|
210
|
+
this.baseURL = config.apiUrl || "https://api.openai.com/v1";
|
|
211
|
+
this.apiKey = config.apiKey || "";
|
|
212
|
+
if (config.defaultModel) {
|
|
213
|
+
this.defaultModel = config.defaultModel;
|
|
214
|
+
}
|
|
215
|
+
if (!this.apiKey) {
|
|
216
|
+
console.warn("API key is not provided. Authentication will likely fail.");
|
|
217
|
+
}
|
|
218
|
+
}
|
|
219
|
+
/**
|
|
220
|
+
* 发送聊天请求并获取响应
|
|
221
|
+
* @param request 聊天请求参数
|
|
222
|
+
* @returns 聊天响应
|
|
223
|
+
*/
|
|
224
|
+
async chat(request) {
|
|
225
|
+
try {
|
|
226
|
+
this.validateRequest(request);
|
|
227
|
+
const requestData = {
|
|
228
|
+
model: request.options?.model || this.config.defaultModel || this.defaultModel,
|
|
229
|
+
messages: request.messages,
|
|
230
|
+
...request.options,
|
|
231
|
+
stream: false
|
|
232
|
+
};
|
|
233
|
+
const response = await fetch(`${this.baseURL}/chat/completions`, {
|
|
234
|
+
method: "POST",
|
|
235
|
+
headers: {
|
|
236
|
+
"Content-Type": "application/json",
|
|
237
|
+
Authorization: `Bearer ${this.apiKey}`
|
|
238
|
+
},
|
|
239
|
+
body: JSON.stringify(requestData)
|
|
240
|
+
});
|
|
241
|
+
if (!response.ok) {
|
|
242
|
+
const errorText = await response.text();
|
|
243
|
+
throw new Error(`HTTP error! status: ${response.status}, details: ${errorText}`);
|
|
244
|
+
}
|
|
245
|
+
return await response.json();
|
|
246
|
+
} catch (error) {
|
|
247
|
+
throw handleRequestError(error);
|
|
248
|
+
}
|
|
249
|
+
}
|
|
250
|
+
/**
|
|
251
|
+
* 发送流式聊天请求并通过处理器处理响应
|
|
252
|
+
* @param request 聊天请求参数
|
|
253
|
+
* @param handler 流式响应处理器
|
|
254
|
+
*/
|
|
255
|
+
async chatStream(request, handler) {
|
|
256
|
+
const { signal, ...options } = request.options || {};
|
|
257
|
+
try {
|
|
258
|
+
this.validateRequest(request);
|
|
259
|
+
const requestData = {
|
|
260
|
+
model: request.options?.model || this.config.defaultModel || this.defaultModel,
|
|
261
|
+
messages: request.messages,
|
|
262
|
+
...options,
|
|
263
|
+
stream: true
|
|
264
|
+
};
|
|
265
|
+
const response = await fetch(`${this.baseURL}/chat/completions`, {
|
|
266
|
+
method: "POST",
|
|
267
|
+
headers: {
|
|
268
|
+
"Content-Type": "application/json",
|
|
269
|
+
Authorization: `Bearer ${this.apiKey}`,
|
|
270
|
+
Accept: "text/event-stream"
|
|
271
|
+
},
|
|
272
|
+
body: JSON.stringify(requestData),
|
|
273
|
+
signal
|
|
274
|
+
});
|
|
275
|
+
if (!response.ok) {
|
|
276
|
+
const errorText = await response.text();
|
|
277
|
+
throw new Error(`HTTP error! status: ${response.status}, details: ${errorText}`);
|
|
278
|
+
}
|
|
279
|
+
await handleSSEStream(response, handler, signal);
|
|
280
|
+
} catch (error) {
|
|
281
|
+
if (signal?.aborted) return;
|
|
282
|
+
handler.onError(handleRequestError(error));
|
|
283
|
+
}
|
|
284
|
+
}
|
|
285
|
+
/**
|
|
286
|
+
* 更新配置
|
|
287
|
+
* @param config 新的AI模型配置
|
|
288
|
+
*/
|
|
289
|
+
updateConfig(config) {
|
|
290
|
+
super.updateConfig(config);
|
|
291
|
+
if (config.apiUrl) {
|
|
292
|
+
this.baseURL = config.apiUrl;
|
|
293
|
+
}
|
|
294
|
+
if (config.apiKey) {
|
|
295
|
+
this.apiKey = config.apiKey;
|
|
296
|
+
}
|
|
297
|
+
if (config.defaultModel) {
|
|
298
|
+
this.defaultModel = config.defaultModel;
|
|
299
|
+
}
|
|
300
|
+
}
|
|
301
|
+
};
|
|
302
|
+
|
|
303
|
+
// src/client.ts
|
|
304
|
+
var AIClient = class {
|
|
305
|
+
/**
|
|
306
|
+
* 构造函数
|
|
307
|
+
* @param config AI模型配置
|
|
308
|
+
*/
|
|
309
|
+
constructor(config) {
|
|
310
|
+
this.config = config;
|
|
311
|
+
this.provider = this.createProvider(config);
|
|
312
|
+
}
|
|
313
|
+
/**
|
|
314
|
+
* 创建提供商实例
|
|
315
|
+
* @param config AI模型配置
|
|
316
|
+
* @returns 提供商实例
|
|
317
|
+
*/
|
|
318
|
+
createProvider(config) {
|
|
319
|
+
if (config.provider === "custom" && "providerImplementation" in config) {
|
|
320
|
+
return config.providerImplementation;
|
|
321
|
+
}
|
|
322
|
+
switch (config.provider) {
|
|
323
|
+
case "deepseek":
|
|
324
|
+
const defaultConfig = {
|
|
325
|
+
defaultModel: "deepseek-chat",
|
|
326
|
+
apiUrl: "https://api.deepseek.com/v1"
|
|
327
|
+
};
|
|
328
|
+
return new OpenAIProvider({ ...defaultConfig, ...config });
|
|
329
|
+
case "openai":
|
|
330
|
+
default:
|
|
331
|
+
return new OpenAIProvider(config);
|
|
332
|
+
}
|
|
333
|
+
}
|
|
334
|
+
/**
|
|
335
|
+
* 发送聊天请求并获取响应
|
|
336
|
+
* @param request 聊天请求参数
|
|
337
|
+
* @returns 聊天响应
|
|
338
|
+
*/
|
|
339
|
+
async chat(request) {
|
|
340
|
+
return this.provider.chat(request);
|
|
341
|
+
}
|
|
342
|
+
/**
|
|
343
|
+
* 发送流式聊天请求并通过处理器处理响应
|
|
344
|
+
* @param request 聊天请求参数
|
|
345
|
+
* @param handler 流式响应处理器
|
|
346
|
+
*/
|
|
347
|
+
async chatStream(request, handler) {
|
|
348
|
+
const streamRequest = {
|
|
349
|
+
...request,
|
|
350
|
+
options: {
|
|
351
|
+
...request.options,
|
|
352
|
+
stream: true
|
|
353
|
+
}
|
|
354
|
+
};
|
|
355
|
+
return this.provider.chatStream(streamRequest, handler);
|
|
356
|
+
}
|
|
357
|
+
/**
|
|
358
|
+
* 获取当前配置
|
|
359
|
+
* @returns AI模型配置
|
|
360
|
+
*/
|
|
361
|
+
getConfig() {
|
|
362
|
+
return { ...this.config };
|
|
363
|
+
}
|
|
364
|
+
/**
|
|
365
|
+
* 更新配置
|
|
366
|
+
* @param config 新的AI模型配置
|
|
367
|
+
*/
|
|
368
|
+
updateConfig(config) {
|
|
369
|
+
this.config = { ...this.config, ...config };
|
|
370
|
+
if (config.provider && config.provider !== this.config.provider) {
|
|
371
|
+
this.provider = this.createProvider(this.config);
|
|
372
|
+
} else {
|
|
373
|
+
this.provider.updateConfig(this.config);
|
|
374
|
+
}
|
|
375
|
+
}
|
|
376
|
+
};
|
|
377
|
+
|
|
378
|
+
// src/vue/useMessage.ts
|
|
379
|
+
import { reactive, ref, toRaw } from "vue";
|
|
380
|
+
var STATUS = /* @__PURE__ */ ((STATUS2) => {
|
|
381
|
+
STATUS2["INIT"] = "init";
|
|
382
|
+
STATUS2["PROCESSING"] = "processing";
|
|
383
|
+
STATUS2["STREAMING"] = "streaming";
|
|
384
|
+
STATUS2["FINISHED"] = "finished";
|
|
385
|
+
STATUS2["ABORTED"] = "aborted";
|
|
386
|
+
STATUS2["ERROR"] = "error";
|
|
387
|
+
return STATUS2;
|
|
388
|
+
})(STATUS || {});
|
|
389
|
+
var GeneratingStatus = ["processing" /* PROCESSING */, "streaming" /* STREAMING */];
|
|
390
|
+
var FinalStatus = ["finished" /* FINISHED */, "aborted" /* ABORTED */, "error" /* ERROR */];
|
|
391
|
+
function useMessage(options) {
|
|
392
|
+
const { client, useStreamByDefault = true, errorMessage = "\u8BF7\u6C42\u5931\u8D25\uFF0C\u8BF7\u7A0D\u540E\u91CD\u8BD5", initialMessages = [] } = options;
|
|
393
|
+
const messages = ref([...initialMessages]);
|
|
394
|
+
const inputMessage = ref("");
|
|
395
|
+
const useStream = ref(useStreamByDefault);
|
|
396
|
+
let abortController = null;
|
|
397
|
+
const messageState = reactive({
|
|
398
|
+
status: "init" /* INIT */,
|
|
399
|
+
errorMsg: null
|
|
400
|
+
});
|
|
401
|
+
const chat = async (abortController2) => {
|
|
402
|
+
const response = await client.chat({
|
|
403
|
+
messages: toRaw(messages.value),
|
|
404
|
+
options: {
|
|
405
|
+
stream: false,
|
|
406
|
+
signal: abortController2.signal
|
|
407
|
+
}
|
|
408
|
+
});
|
|
409
|
+
const assistantMessage = {
|
|
410
|
+
role: "assistant",
|
|
411
|
+
content: response.choices[0].message.content
|
|
412
|
+
};
|
|
413
|
+
messages.value.push(assistantMessage);
|
|
414
|
+
};
|
|
415
|
+
const streamChat = async (abortController2) => {
|
|
416
|
+
await client.chatStream(
|
|
417
|
+
{
|
|
418
|
+
messages: toRaw(messages.value),
|
|
419
|
+
options: {
|
|
420
|
+
stream: true,
|
|
421
|
+
signal: abortController2.signal
|
|
422
|
+
}
|
|
423
|
+
},
|
|
424
|
+
{
|
|
425
|
+
onData: (data) => {
|
|
426
|
+
messageState.status = "streaming" /* STREAMING */;
|
|
427
|
+
if (messages.value[messages.value.length - 1].role === "user") {
|
|
428
|
+
messages.value.push({ role: "assistant", content: "" });
|
|
429
|
+
}
|
|
430
|
+
const choice = data.choices[0];
|
|
431
|
+
if (choice && choice.delta.content) {
|
|
432
|
+
messages.value[messages.value.length - 1].content += choice.delta.content;
|
|
433
|
+
}
|
|
434
|
+
},
|
|
435
|
+
onError: (error) => {
|
|
436
|
+
messageState.status = "error" /* ERROR */;
|
|
437
|
+
messageState.errorMsg = errorMessage;
|
|
438
|
+
console.error("Stream request error:", error);
|
|
439
|
+
},
|
|
440
|
+
onDone: () => {
|
|
441
|
+
messageState.status = "finished" /* FINISHED */;
|
|
442
|
+
}
|
|
443
|
+
}
|
|
444
|
+
);
|
|
445
|
+
};
|
|
446
|
+
const chatRequest = async () => {
|
|
447
|
+
messageState.status = "processing" /* PROCESSING */;
|
|
448
|
+
messageState.errorMsg = null;
|
|
449
|
+
abortController = new AbortController();
|
|
450
|
+
try {
|
|
451
|
+
if (useStream.value) {
|
|
452
|
+
await streamChat(abortController);
|
|
453
|
+
} else {
|
|
454
|
+
await chat(abortController);
|
|
455
|
+
}
|
|
456
|
+
messageState.status = "finished" /* FINISHED */;
|
|
457
|
+
} catch (error) {
|
|
458
|
+
messageState.errorMsg = errorMessage;
|
|
459
|
+
messageState.status = "error" /* ERROR */;
|
|
460
|
+
console.error("Send message error:", error);
|
|
461
|
+
} finally {
|
|
462
|
+
abortController = null;
|
|
463
|
+
}
|
|
464
|
+
};
|
|
465
|
+
const sendMessage = async (content = inputMessage.value, clearInput = true) => {
|
|
466
|
+
if (!content?.trim() || GeneratingStatus.includes(messageState.status)) {
|
|
467
|
+
return;
|
|
468
|
+
}
|
|
469
|
+
const userMessage = {
|
|
470
|
+
role: "user",
|
|
471
|
+
content
|
|
472
|
+
};
|
|
473
|
+
messages.value.push(userMessage);
|
|
474
|
+
if (clearInput) {
|
|
475
|
+
inputMessage.value = "";
|
|
476
|
+
}
|
|
477
|
+
await chatRequest();
|
|
478
|
+
};
|
|
479
|
+
const abortRequest = () => {
|
|
480
|
+
if (abortController) {
|
|
481
|
+
abortController.abort();
|
|
482
|
+
abortController = null;
|
|
483
|
+
messageState.status = "aborted" /* ABORTED */;
|
|
484
|
+
}
|
|
485
|
+
};
|
|
486
|
+
const retryRequest = async (msgIndex) => {
|
|
487
|
+
if (msgIndex === 0 || !messages.value[msgIndex] || messages.value[msgIndex].role === "user") {
|
|
488
|
+
return;
|
|
489
|
+
}
|
|
490
|
+
messages.value.splice(msgIndex);
|
|
491
|
+
await chatRequest();
|
|
492
|
+
};
|
|
493
|
+
const clearMessages = () => {
|
|
494
|
+
messages.value = [];
|
|
495
|
+
messageState.errorMsg = null;
|
|
496
|
+
};
|
|
497
|
+
const addMessage = (message) => {
|
|
498
|
+
messages.value.push(message);
|
|
499
|
+
};
|
|
500
|
+
return {
|
|
501
|
+
messages,
|
|
502
|
+
messageState,
|
|
503
|
+
inputMessage,
|
|
504
|
+
useStream,
|
|
505
|
+
sendMessage,
|
|
506
|
+
clearMessages,
|
|
507
|
+
addMessage,
|
|
508
|
+
abortRequest,
|
|
509
|
+
retryRequest
|
|
510
|
+
};
|
|
511
|
+
}
|
|
512
|
+
export {
|
|
513
|
+
AIClient,
|
|
514
|
+
BaseModelProvider,
|
|
515
|
+
ErrorType,
|
|
516
|
+
FinalStatus,
|
|
517
|
+
GeneratingStatus,
|
|
518
|
+
OpenAIProvider,
|
|
519
|
+
STATUS,
|
|
520
|
+
StreamEventType,
|
|
521
|
+
extractTextFromResponse,
|
|
522
|
+
formatMessages,
|
|
523
|
+
useMessage
|
|
524
|
+
};
|
package/package.json
ADDED
|
@@ -0,0 +1,26 @@
|
|
|
1
|
+
{
|
|
2
|
+
"name": "@opentiny/tiny-robot-kit",
|
|
3
|
+
"version": "0.2.0-alpha.0",
|
|
4
|
+
"publishConfig": {
|
|
5
|
+
"access": "public"
|
|
6
|
+
},
|
|
7
|
+
"description": "AI大模型请求与数据处理工具包",
|
|
8
|
+
"main": "dist/index.js",
|
|
9
|
+
"module": "dist/index.mjs",
|
|
10
|
+
"types": "dist/index.d.ts",
|
|
11
|
+
"scripts": {
|
|
12
|
+
"build": "tsup src/index.ts --format cjs,esm --dts",
|
|
13
|
+
"dev": "tsup src/index.ts --format cjs,esm --dts --watch"
|
|
14
|
+
},
|
|
15
|
+
"author": "",
|
|
16
|
+
"license": "MIT",
|
|
17
|
+
"devDependencies": {
|
|
18
|
+
"@types/node": "^22.13.17",
|
|
19
|
+
"tsup": "^8.0.1",
|
|
20
|
+
"typescript": "^5.8.2"
|
|
21
|
+
},
|
|
22
|
+
"peerDependencies": {
|
|
23
|
+
"vue": ">=3.0.0"
|
|
24
|
+
},
|
|
25
|
+
"gitHead": "3517724651afbbb05b1be20487a9f3341c06133c"
|
|
26
|
+
}
|
package/src/client.ts
ADDED
|
@@ -0,0 +1,101 @@
|
|
|
1
|
+
/**
|
|
2
|
+
* AI客户端类
|
|
3
|
+
* 负责根据配置选择合适的提供商并处理请求
|
|
4
|
+
*/
|
|
5
|
+
|
|
6
|
+
import type { AIModelConfig, ChatCompletionRequest, ChatCompletionResponse, StreamHandler } from './types'
|
|
7
|
+
import type { BaseModelProvider } from './providers/base'
|
|
8
|
+
import { OpenAIProvider } from './providers/openai'
|
|
9
|
+
|
|
10
|
+
/**
|
|
11
|
+
* AI客户端类
|
|
12
|
+
*/
|
|
13
|
+
export class AIClient {
|
|
14
|
+
private provider: BaseModelProvider
|
|
15
|
+
private config: AIModelConfig
|
|
16
|
+
|
|
17
|
+
/**
|
|
18
|
+
* 构造函数
|
|
19
|
+
* @param config AI模型配置
|
|
20
|
+
*/
|
|
21
|
+
constructor(config: AIModelConfig) {
|
|
22
|
+
this.config = config
|
|
23
|
+
this.provider = this.createProvider(config)
|
|
24
|
+
}
|
|
25
|
+
|
|
26
|
+
/**
|
|
27
|
+
* 创建提供商实例
|
|
28
|
+
* @param config AI模型配置
|
|
29
|
+
* @returns 提供商实例
|
|
30
|
+
*/
|
|
31
|
+
private createProvider(config: AIModelConfig): BaseModelProvider {
|
|
32
|
+
// 如果提供了自定义提供商实现,直接使用
|
|
33
|
+
if (config.provider === 'custom' && 'providerImplementation' in config) {
|
|
34
|
+
return (config as { providerImplementation: BaseModelProvider }).providerImplementation
|
|
35
|
+
}
|
|
36
|
+
|
|
37
|
+
// 根据提供商类型创建对应的提供商实例
|
|
38
|
+
switch (config.provider) {
|
|
39
|
+
case 'deepseek':
|
|
40
|
+
const defaultConfig = {
|
|
41
|
+
defaultModel: 'deepseek-chat',
|
|
42
|
+
apiUrl: 'https://api.deepseek.com/v1',
|
|
43
|
+
}
|
|
44
|
+
return new OpenAIProvider({ ...defaultConfig, ...config })
|
|
45
|
+
case 'openai':
|
|
46
|
+
default:
|
|
47
|
+
return new OpenAIProvider(config)
|
|
48
|
+
}
|
|
49
|
+
}
|
|
50
|
+
|
|
51
|
+
/**
|
|
52
|
+
* 发送聊天请求并获取响应
|
|
53
|
+
* @param request 聊天请求参数
|
|
54
|
+
* @returns 聊天响应
|
|
55
|
+
*/
|
|
56
|
+
async chat(request: ChatCompletionRequest): Promise<ChatCompletionResponse> {
|
|
57
|
+
return this.provider.chat(request)
|
|
58
|
+
}
|
|
59
|
+
|
|
60
|
+
/**
|
|
61
|
+
* 发送流式聊天请求并通过处理器处理响应
|
|
62
|
+
* @param request 聊天请求参数
|
|
63
|
+
* @param handler 流式响应处理器
|
|
64
|
+
*/
|
|
65
|
+
async chatStream(request: ChatCompletionRequest, handler: StreamHandler): Promise<void> {
|
|
66
|
+
// 确保请求中启用了流式响应
|
|
67
|
+
const streamRequest = {
|
|
68
|
+
...request,
|
|
69
|
+
options: {
|
|
70
|
+
...request.options,
|
|
71
|
+
stream: true,
|
|
72
|
+
},
|
|
73
|
+
}
|
|
74
|
+
|
|
75
|
+
return this.provider.chatStream(streamRequest, handler)
|
|
76
|
+
}
|
|
77
|
+
|
|
78
|
+
/**
|
|
79
|
+
* 获取当前配置
|
|
80
|
+
* @returns AI模型配置
|
|
81
|
+
*/
|
|
82
|
+
getConfig(): AIModelConfig {
|
|
83
|
+
return { ...this.config }
|
|
84
|
+
}
|
|
85
|
+
|
|
86
|
+
/**
|
|
87
|
+
* 更新配置
|
|
88
|
+
* @param config 新的AI模型配置
|
|
89
|
+
*/
|
|
90
|
+
updateConfig(config: Partial<AIModelConfig>): void {
|
|
91
|
+
this.config = { ...this.config, ...config }
|
|
92
|
+
|
|
93
|
+
// 如果提供商类型发生变化,重新创建提供商实例
|
|
94
|
+
if (config.provider && config.provider !== this.config.provider) {
|
|
95
|
+
this.provider = this.createProvider(this.config)
|
|
96
|
+
} else {
|
|
97
|
+
// 否则只更新提供商配置
|
|
98
|
+
this.provider.updateConfig(this.config)
|
|
99
|
+
}
|
|
100
|
+
}
|
|
101
|
+
}
|