@opentiny/tiny-robot-kit 0.2.0-alpha.0

This diff represents the content of publicly available package versions that have been released to one of the supported registries. The information contained in this diff is provided for informational purposes only and reflects changes between package versions as they appear in their respective public registries.
package/dist/index.js ADDED
@@ -0,0 +1,561 @@
1
+ "use strict";
2
+ var __defProp = Object.defineProperty;
3
+ var __getOwnPropDesc = Object.getOwnPropertyDescriptor;
4
+ var __getOwnPropNames = Object.getOwnPropertyNames;
5
+ var __hasOwnProp = Object.prototype.hasOwnProperty;
6
+ var __export = (target, all) => {
7
+ for (var name in all)
8
+ __defProp(target, name, { get: all[name], enumerable: true });
9
+ };
10
+ var __copyProps = (to, from, except, desc) => {
11
+ if (from && typeof from === "object" || typeof from === "function") {
12
+ for (let key of __getOwnPropNames(from))
13
+ if (!__hasOwnProp.call(to, key) && key !== except)
14
+ __defProp(to, key, { get: () => from[key], enumerable: !(desc = __getOwnPropDesc(from, key)) || desc.enumerable });
15
+ }
16
+ return to;
17
+ };
18
+ var __toCommonJS = (mod) => __copyProps(__defProp({}, "__esModule", { value: true }), mod);
19
+
20
+ // src/index.ts
21
+ var index_exports = {};
22
+ __export(index_exports, {
23
+ AIClient: () => AIClient,
24
+ BaseModelProvider: () => BaseModelProvider,
25
+ ErrorType: () => ErrorType,
26
+ FinalStatus: () => FinalStatus,
27
+ GeneratingStatus: () => GeneratingStatus,
28
+ OpenAIProvider: () => OpenAIProvider,
29
+ STATUS: () => STATUS,
30
+ StreamEventType: () => StreamEventType,
31
+ extractTextFromResponse: () => extractTextFromResponse,
32
+ formatMessages: () => formatMessages,
33
+ useMessage: () => useMessage
34
+ });
35
+ module.exports = __toCommonJS(index_exports);
36
+
37
+ // src/providers/base.ts
38
+ var BaseModelProvider = class {
39
+ /**
40
+ * @param config AI模型配置
41
+ */
42
+ constructor(config) {
43
+ this.config = config;
44
+ }
45
+ /**
46
+ * 更新配置
47
+ * @param config 新的AI模型配置
48
+ */
49
+ updateConfig(config) {
50
+ this.config = { ...this.config, ...config };
51
+ }
52
+ /**
53
+ * 获取当前配置
54
+ * @returns AI模型配置
55
+ */
56
+ getConfig() {
57
+ return { ...this.config };
58
+ }
59
+ /**
60
+ * 验证请求参数
61
+ * @param request 聊天请求参数
62
+ */
63
+ validateRequest(request) {
64
+ if (!request.messages || !Array.isArray(request.messages) || request.messages.length === 0) {
65
+ throw new Error("\u8BF7\u6C42\u5FC5\u987B\u5305\u542B\u81F3\u5C11\u4E00\u6761\u6D88\u606F");
66
+ }
67
+ for (const message of request.messages) {
68
+ if (!message.role || !message.content) {
69
+ throw new Error("\u6BCF\u6761\u6D88\u606F\u5FC5\u987B\u5305\u542B\u89D2\u8272\u548C\u5185\u5BB9");
70
+ }
71
+ }
72
+ }
73
+ };
74
+
75
+ // src/types.ts
76
+ var ErrorType = /* @__PURE__ */ ((ErrorType2) => {
77
+ ErrorType2["NETWORK_ERROR"] = "network_error";
78
+ ErrorType2["AUTHENTICATION_ERROR"] = "authentication_error";
79
+ ErrorType2["RATE_LIMIT_ERROR"] = "rate_limit_error";
80
+ ErrorType2["SERVER_ERROR"] = "server_error";
81
+ ErrorType2["MODEL_ERROR"] = "model_error";
82
+ ErrorType2["TIMEOUT_ERROR"] = "timeout_error";
83
+ ErrorType2["UNKNOWN_ERROR"] = "unknown_error";
84
+ return ErrorType2;
85
+ })(ErrorType || {});
86
+ var StreamEventType = /* @__PURE__ */ ((StreamEventType2) => {
87
+ StreamEventType2["DATA"] = "data";
88
+ StreamEventType2["ERROR"] = "error";
89
+ StreamEventType2["DONE"] = "done";
90
+ return StreamEventType2;
91
+ })(StreamEventType || {});
92
+
93
+ // src/error.ts
94
+ function createError(error) {
95
+ return {
96
+ type: error.type || "unknown_error" /* UNKNOWN_ERROR */,
97
+ message: error.message || "\u672A\u77E5\u9519\u8BEF",
98
+ statusCode: error.statusCode,
99
+ originalError: error.originalError
100
+ };
101
+ }
102
+ function handleRequestError(error) {
103
+ if (!error.response) {
104
+ return createError({
105
+ type: "network_error" /* NETWORK_ERROR */,
106
+ message: "\u7F51\u7EDC\u8FDE\u63A5\u9519\u8BEF\uFF0C\u8BF7\u68C0\u67E5\u60A8\u7684\u7F51\u7EDC\u8FDE\u63A5",
107
+ originalError: error
108
+ });
109
+ }
110
+ if (error.response) {
111
+ const { status, data } = error.response;
112
+ if (status === 401 || status === 403) {
113
+ return createError({
114
+ type: "authentication_error" /* AUTHENTICATION_ERROR */,
115
+ message: "\u8EAB\u4EFD\u9A8C\u8BC1\u5931\u8D25\uFF0C\u8BF7\u68C0\u67E5\u60A8\u7684API\u5BC6\u94A5",
116
+ statusCode: status,
117
+ originalError: error
118
+ });
119
+ }
120
+ if (status === 429) {
121
+ return createError({
122
+ type: "rate_limit_error" /* RATE_LIMIT_ERROR */,
123
+ message: "\u8D85\u51FAAPI\u8C03\u7528\u9650\u5236\uFF0C\u8BF7\u7A0D\u540E\u518D\u8BD5",
124
+ statusCode: status,
125
+ originalError: error
126
+ });
127
+ }
128
+ if (status >= 500) {
129
+ return createError({
130
+ type: "server_error" /* SERVER_ERROR */,
131
+ message: "\u670D\u52A1\u5668\u9519\u8BEF\uFF0C\u8BF7\u7A0D\u540E\u518D\u8BD5",
132
+ statusCode: status,
133
+ originalError: error
134
+ });
135
+ }
136
+ return createError({
137
+ type: "unknown_error" /* UNKNOWN_ERROR */,
138
+ message: data?.error?.message || `\u8BF7\u6C42\u5931\u8D25\uFF0C\u72B6\u6001\u7801: ${status}`,
139
+ statusCode: status,
140
+ originalError: error
141
+ });
142
+ }
143
+ if (error.code === "ECONNABORTED") {
144
+ return createError({
145
+ type: "timeout_error" /* TIMEOUT_ERROR */,
146
+ message: "\u8BF7\u6C42\u8D85\u65F6\uFF0C\u8BF7\u7A0D\u540E\u518D\u8BD5",
147
+ originalError: error
148
+ });
149
+ }
150
+ return createError({
151
+ type: "unknown_error" /* UNKNOWN_ERROR */,
152
+ message: error.message || "\u53D1\u751F\u672A\u77E5\u9519\u8BEF",
153
+ originalError: error
154
+ });
155
+ }
156
+
157
+ // src/utils.ts
158
+ async function handleSSEStream(response, handler, signal) {
159
+ const reader = response.body?.getReader();
160
+ if (!reader) {
161
+ throw new Error("Response body is null");
162
+ }
163
+ const decoder = new TextDecoder();
164
+ let buffer = "";
165
+ if (signal) {
166
+ signal.addEventListener(
167
+ "abort",
168
+ () => {
169
+ reader.cancel().catch((err) => console.error("Error cancelling reader:", err));
170
+ },
171
+ { once: true }
172
+ );
173
+ }
174
+ try {
175
+ while (true) {
176
+ if (signal?.aborted) {
177
+ await reader.cancel();
178
+ break;
179
+ }
180
+ const { done, value } = await reader.read();
181
+ if (done) break;
182
+ const chunk = decoder.decode(value, { stream: true });
183
+ buffer += chunk;
184
+ const lines = buffer.split("\n\n");
185
+ buffer = lines.pop() || "";
186
+ for (const line of lines) {
187
+ if (line.trim() === "") continue;
188
+ if (line.trim() === "data: [DONE]") {
189
+ handler.onDone();
190
+ continue;
191
+ }
192
+ try {
193
+ const dataMatch = line.match(/^data: (.+)$/m);
194
+ if (!dataMatch) continue;
195
+ const data = JSON.parse(dataMatch[1]);
196
+ handler.onData(data);
197
+ } catch (error) {
198
+ console.error("Error parsing SSE message:", error);
199
+ }
200
+ }
201
+ }
202
+ if (buffer.trim() === "data: [DONE]" || signal?.aborted) {
203
+ handler.onDone();
204
+ }
205
+ } catch (error) {
206
+ if (signal?.aborted) return;
207
+ throw error;
208
+ }
209
+ }
210
+ function formatMessages(messages) {
211
+ return messages.map((msg) => {
212
+ if (typeof msg === "object" && "role" in msg && "content" in msg) {
213
+ return {
214
+ role: msg.role,
215
+ content: String(msg.content),
216
+ ...msg.name ? { name: msg.name } : {}
217
+ };
218
+ }
219
+ if (typeof msg === "string") {
220
+ return {
221
+ role: "user",
222
+ content: msg
223
+ };
224
+ }
225
+ return {
226
+ role: "user",
227
+ content: String(msg)
228
+ };
229
+ });
230
+ }
231
+ function extractTextFromResponse(response) {
232
+ if (!response.choices || !response.choices.length) {
233
+ return "";
234
+ }
235
+ return response.choices[0].message?.content || "";
236
+ }
237
+
238
+ // src/providers/openai.ts
239
+ var OpenAIProvider = class extends BaseModelProvider {
240
+ /**
241
+ * @param config AI模型配置
242
+ */
243
+ constructor(config) {
244
+ super(config);
245
+ this.defaultModel = "gpt-3.5-turbo";
246
+ this.baseURL = config.apiUrl || "https://api.openai.com/v1";
247
+ this.apiKey = config.apiKey || "";
248
+ if (config.defaultModel) {
249
+ this.defaultModel = config.defaultModel;
250
+ }
251
+ if (!this.apiKey) {
252
+ console.warn("API key is not provided. Authentication will likely fail.");
253
+ }
254
+ }
255
+ /**
256
+ * 发送聊天请求并获取响应
257
+ * @param request 聊天请求参数
258
+ * @returns 聊天响应
259
+ */
260
+ async chat(request) {
261
+ try {
262
+ this.validateRequest(request);
263
+ const requestData = {
264
+ model: request.options?.model || this.config.defaultModel || this.defaultModel,
265
+ messages: request.messages,
266
+ ...request.options,
267
+ stream: false
268
+ };
269
+ const response = await fetch(`${this.baseURL}/chat/completions`, {
270
+ method: "POST",
271
+ headers: {
272
+ "Content-Type": "application/json",
273
+ Authorization: `Bearer ${this.apiKey}`
274
+ },
275
+ body: JSON.stringify(requestData)
276
+ });
277
+ if (!response.ok) {
278
+ const errorText = await response.text();
279
+ throw new Error(`HTTP error! status: ${response.status}, details: ${errorText}`);
280
+ }
281
+ return await response.json();
282
+ } catch (error) {
283
+ throw handleRequestError(error);
284
+ }
285
+ }
286
+ /**
287
+ * 发送流式聊天请求并通过处理器处理响应
288
+ * @param request 聊天请求参数
289
+ * @param handler 流式响应处理器
290
+ */
291
+ async chatStream(request, handler) {
292
+ const { signal, ...options } = request.options || {};
293
+ try {
294
+ this.validateRequest(request);
295
+ const requestData = {
296
+ model: request.options?.model || this.config.defaultModel || this.defaultModel,
297
+ messages: request.messages,
298
+ ...options,
299
+ stream: true
300
+ };
301
+ const response = await fetch(`${this.baseURL}/chat/completions`, {
302
+ method: "POST",
303
+ headers: {
304
+ "Content-Type": "application/json",
305
+ Authorization: `Bearer ${this.apiKey}`,
306
+ Accept: "text/event-stream"
307
+ },
308
+ body: JSON.stringify(requestData),
309
+ signal
310
+ });
311
+ if (!response.ok) {
312
+ const errorText = await response.text();
313
+ throw new Error(`HTTP error! status: ${response.status}, details: ${errorText}`);
314
+ }
315
+ await handleSSEStream(response, handler, signal);
316
+ } catch (error) {
317
+ if (signal?.aborted) return;
318
+ handler.onError(handleRequestError(error));
319
+ }
320
+ }
321
+ /**
322
+ * 更新配置
323
+ * @param config 新的AI模型配置
324
+ */
325
+ updateConfig(config) {
326
+ super.updateConfig(config);
327
+ if (config.apiUrl) {
328
+ this.baseURL = config.apiUrl;
329
+ }
330
+ if (config.apiKey) {
331
+ this.apiKey = config.apiKey;
332
+ }
333
+ if (config.defaultModel) {
334
+ this.defaultModel = config.defaultModel;
335
+ }
336
+ }
337
+ };
338
+
339
+ // src/client.ts
340
+ var AIClient = class {
341
+ /**
342
+ * 构造函数
343
+ * @param config AI模型配置
344
+ */
345
+ constructor(config) {
346
+ this.config = config;
347
+ this.provider = this.createProvider(config);
348
+ }
349
+ /**
350
+ * 创建提供商实例
351
+ * @param config AI模型配置
352
+ * @returns 提供商实例
353
+ */
354
+ createProvider(config) {
355
+ if (config.provider === "custom" && "providerImplementation" in config) {
356
+ return config.providerImplementation;
357
+ }
358
+ switch (config.provider) {
359
+ case "deepseek":
360
+ const defaultConfig = {
361
+ defaultModel: "deepseek-chat",
362
+ apiUrl: "https://api.deepseek.com/v1"
363
+ };
364
+ return new OpenAIProvider({ ...defaultConfig, ...config });
365
+ case "openai":
366
+ default:
367
+ return new OpenAIProvider(config);
368
+ }
369
+ }
370
+ /**
371
+ * 发送聊天请求并获取响应
372
+ * @param request 聊天请求参数
373
+ * @returns 聊天响应
374
+ */
375
+ async chat(request) {
376
+ return this.provider.chat(request);
377
+ }
378
+ /**
379
+ * 发送流式聊天请求并通过处理器处理响应
380
+ * @param request 聊天请求参数
381
+ * @param handler 流式响应处理器
382
+ */
383
+ async chatStream(request, handler) {
384
+ const streamRequest = {
385
+ ...request,
386
+ options: {
387
+ ...request.options,
388
+ stream: true
389
+ }
390
+ };
391
+ return this.provider.chatStream(streamRequest, handler);
392
+ }
393
+ /**
394
+ * 获取当前配置
395
+ * @returns AI模型配置
396
+ */
397
+ getConfig() {
398
+ return { ...this.config };
399
+ }
400
+ /**
401
+ * 更新配置
402
+ * @param config 新的AI模型配置
403
+ */
404
+ updateConfig(config) {
405
+ this.config = { ...this.config, ...config };
406
+ if (config.provider && config.provider !== this.config.provider) {
407
+ this.provider = this.createProvider(this.config);
408
+ } else {
409
+ this.provider.updateConfig(this.config);
410
+ }
411
+ }
412
+ };
413
+
414
+ // src/vue/useMessage.ts
415
+ var import_vue = require("vue");
416
+ var STATUS = /* @__PURE__ */ ((STATUS2) => {
417
+ STATUS2["INIT"] = "init";
418
+ STATUS2["PROCESSING"] = "processing";
419
+ STATUS2["STREAMING"] = "streaming";
420
+ STATUS2["FINISHED"] = "finished";
421
+ STATUS2["ABORTED"] = "aborted";
422
+ STATUS2["ERROR"] = "error";
423
+ return STATUS2;
424
+ })(STATUS || {});
425
+ var GeneratingStatus = ["processing" /* PROCESSING */, "streaming" /* STREAMING */];
426
+ var FinalStatus = ["finished" /* FINISHED */, "aborted" /* ABORTED */, "error" /* ERROR */];
427
+ function useMessage(options) {
428
+ const { client, useStreamByDefault = true, errorMessage = "\u8BF7\u6C42\u5931\u8D25\uFF0C\u8BF7\u7A0D\u540E\u91CD\u8BD5", initialMessages = [] } = options;
429
+ const messages = (0, import_vue.ref)([...initialMessages]);
430
+ const inputMessage = (0, import_vue.ref)("");
431
+ const useStream = (0, import_vue.ref)(useStreamByDefault);
432
+ let abortController = null;
433
+ const messageState = (0, import_vue.reactive)({
434
+ status: "init" /* INIT */,
435
+ errorMsg: null
436
+ });
437
+ const chat = async (abortController2) => {
438
+ const response = await client.chat({
439
+ messages: (0, import_vue.toRaw)(messages.value),
440
+ options: {
441
+ stream: false,
442
+ signal: abortController2.signal
443
+ }
444
+ });
445
+ const assistantMessage = {
446
+ role: "assistant",
447
+ content: response.choices[0].message.content
448
+ };
449
+ messages.value.push(assistantMessage);
450
+ };
451
+ const streamChat = async (abortController2) => {
452
+ await client.chatStream(
453
+ {
454
+ messages: (0, import_vue.toRaw)(messages.value),
455
+ options: {
456
+ stream: true,
457
+ signal: abortController2.signal
458
+ }
459
+ },
460
+ {
461
+ onData: (data) => {
462
+ messageState.status = "streaming" /* STREAMING */;
463
+ if (messages.value[messages.value.length - 1].role === "user") {
464
+ messages.value.push({ role: "assistant", content: "" });
465
+ }
466
+ const choice = data.choices[0];
467
+ if (choice && choice.delta.content) {
468
+ messages.value[messages.value.length - 1].content += choice.delta.content;
469
+ }
470
+ },
471
+ onError: (error) => {
472
+ messageState.status = "error" /* ERROR */;
473
+ messageState.errorMsg = errorMessage;
474
+ console.error("Stream request error:", error);
475
+ },
476
+ onDone: () => {
477
+ messageState.status = "finished" /* FINISHED */;
478
+ }
479
+ }
480
+ );
481
+ };
482
+ const chatRequest = async () => {
483
+ messageState.status = "processing" /* PROCESSING */;
484
+ messageState.errorMsg = null;
485
+ abortController = new AbortController();
486
+ try {
487
+ if (useStream.value) {
488
+ await streamChat(abortController);
489
+ } else {
490
+ await chat(abortController);
491
+ }
492
+ messageState.status = "finished" /* FINISHED */;
493
+ } catch (error) {
494
+ messageState.errorMsg = errorMessage;
495
+ messageState.status = "error" /* ERROR */;
496
+ console.error("Send message error:", error);
497
+ } finally {
498
+ abortController = null;
499
+ }
500
+ };
501
+ const sendMessage = async (content = inputMessage.value, clearInput = true) => {
502
+ if (!content?.trim() || GeneratingStatus.includes(messageState.status)) {
503
+ return;
504
+ }
505
+ const userMessage = {
506
+ role: "user",
507
+ content
508
+ };
509
+ messages.value.push(userMessage);
510
+ if (clearInput) {
511
+ inputMessage.value = "";
512
+ }
513
+ await chatRequest();
514
+ };
515
+ const abortRequest = () => {
516
+ if (abortController) {
517
+ abortController.abort();
518
+ abortController = null;
519
+ messageState.status = "aborted" /* ABORTED */;
520
+ }
521
+ };
522
+ const retryRequest = async (msgIndex) => {
523
+ if (msgIndex === 0 || !messages.value[msgIndex] || messages.value[msgIndex].role === "user") {
524
+ return;
525
+ }
526
+ messages.value.splice(msgIndex);
527
+ await chatRequest();
528
+ };
529
+ const clearMessages = () => {
530
+ messages.value = [];
531
+ messageState.errorMsg = null;
532
+ };
533
+ const addMessage = (message) => {
534
+ messages.value.push(message);
535
+ };
536
+ return {
537
+ messages,
538
+ messageState,
539
+ inputMessage,
540
+ useStream,
541
+ sendMessage,
542
+ clearMessages,
543
+ addMessage,
544
+ abortRequest,
545
+ retryRequest
546
+ };
547
+ }
548
+ // Annotate the CommonJS export names for ESM import in node:
549
+ 0 && (module.exports = {
550
+ AIClient,
551
+ BaseModelProvider,
552
+ ErrorType,
553
+ FinalStatus,
554
+ GeneratingStatus,
555
+ OpenAIProvider,
556
+ STATUS,
557
+ StreamEventType,
558
+ extractTextFromResponse,
559
+ formatMessages,
560
+ useMessage
561
+ });