@vectorx/ai-sdk 1.0.0 → 1.1.0

This diff represents the content of publicly available package versions that have been released to one of the supported registries. The information contained in this diff is provided for informational purposes only and reflects changes between package versions as they appear in their respective public registries.
Files changed (38) hide show
  1. package/lib/ai.d.ts +1 -1
  2. package/lib/ai.js +14 -0
  3. package/lib/model-type.d.ts +26 -0
  4. package/lib/models/Chat.d.ts +2 -2
  5. package/lib/models/Default/index.d.ts +1 -0
  6. package/lib/models/Default/index.js +27 -1
  7. package/lib/models/Qwen25T2iPreview/index.d.ts +3 -1
  8. package/lib/models/Qwen25T2iPreview/index.js +49 -0
  9. package/lib/models/Qwen3VlPlus/index.d.ts +2 -0
  10. package/lib/models/Qwen3VlPlus/index.js +66 -2
  11. package/lib/models/QwenCosyVoiceTTS/index.d.ts +8 -0
  12. package/lib/models/QwenCosyVoiceTTS/index.js +178 -0
  13. package/lib/models/QwenDocTurbo/adapters/DashScope.d.ts +2 -0
  14. package/lib/models/QwenDocTurbo/adapters/DashScope.js +86 -11
  15. package/lib/models/QwenDocTurbo/adapters/OpenAICompat.d.ts +2 -0
  16. package/lib/models/QwenDocTurbo/adapters/OpenAICompat.js +102 -3
  17. package/lib/models/QwenImage/index.d.ts +3 -1
  18. package/lib/models/QwenImage/index.js +52 -1
  19. package/lib/models/QwenImage20/index.d.ts +91 -0
  20. package/lib/models/QwenImage20/index.js +244 -0
  21. package/lib/models/QwenImageEdit/index.d.ts +3 -1
  22. package/lib/models/QwenImageEdit/index.js +52 -1
  23. package/lib/models/QwenSketchToImage/index.d.ts +1 -1
  24. package/lib/models/QwenStyleRepaintV1/index.d.ts +1 -1
  25. package/lib/models/QwenStyleRepaintV1/index.js +3 -1
  26. package/lib/models/QwenVlMax/index.d.ts +2 -0
  27. package/lib/models/QwenVlMax/index.js +67 -2
  28. package/lib/models/TTSModel.d.ts +11 -0
  29. package/lib/models/TTSModel.js +75 -0
  30. package/lib/models/Wan26Image/index.d.ts +3 -1
  31. package/lib/models/Wan26Image/index.js +52 -1
  32. package/lib/models/index.d.ts +25 -3
  33. package/lib/models/index.js +49 -2
  34. package/lib/models/react.d.ts +6 -2
  35. package/lib/models/react.js +22 -0
  36. package/lib/stream.js +10 -7
  37. package/lib/tokenManager.js +11 -1
  38. package/package.json +9 -7
@@ -0,0 +1,244 @@
1
+ "use strict";
2
+ var __awaiter = (this && this.__awaiter) || function (thisArg, _arguments, P, generator) {
3
+ function adopt(value) { return value instanceof P ? value : new P(function (resolve) { resolve(value); }); }
4
+ return new (P || (P = Promise))(function (resolve, reject) {
5
+ function fulfilled(value) { try { step(generator.next(value)); } catch (e) { reject(e); } }
6
+ function rejected(value) { try { step(generator["throw"](value)); } catch (e) { reject(e); } }
7
+ function step(result) { result.done ? resolve(result.value) : adopt(result.value).then(fulfilled, rejected); }
8
+ step((generator = generator.apply(thisArg, _arguments || [])).next());
9
+ });
10
+ };
11
+ Object.defineProperty(exports, "__esModule", { value: true });
12
+ exports.QwenImage20FastModel = exports.QwenImage20ProModel = exports.QwenImage20Model = void 0;
13
+ const stream_1 = require("../../stream");
14
+ const utils_1 = require("../../utils");
15
+ const Chat_1 = require("../Chat");
16
+ const index_1 = require("../index");
17
+ class QwenImage20Model extends Chat_1.SimpleChatModel {
18
+ constructor(req, baseUrl, tokenManager, model = index_1.modelName[index_1.MultiModalModelName.QwenImage20Pro]) {
19
+ super(req, baseUrl, QwenImage20Model.SUB_URL, tokenManager);
20
+ this.modelName = model;
21
+ }
22
+ normalizeStandardImageCompletion(res, fallbackModel) {
23
+ var _a, _b, _c, _d, _e, _f, _g, _h, _j, _k;
24
+ const qOutput = ((res === null || res === void 0 ? void 0 : res.output) || {});
25
+ const first = ((_b = (_a = qOutput === null || qOutput === void 0 ? void 0 : qOutput.choices) === null || _a === void 0 ? void 0 : _a[0]) !== null && _b !== void 0 ? _b : null);
26
+ const message = (_c = first === null || first === void 0 ? void 0 : first.message) !== null && _c !== void 0 ? _c : {};
27
+ const contentUrl = Array.isArray(message === null || message === void 0 ? void 0 : message.content) && ((_d = message.content[0]) === null || _d === void 0 ? void 0 : _d.image)
28
+ ? String(message.content[0].image)
29
+ : "";
30
+ const created = (_e = res === null || res === void 0 ? void 0 : res.created) !== null && _e !== void 0 ? _e : Math.floor(Date.now() / 1000);
31
+ const id = (res === null || res === void 0 ? void 0 : res.id) || (res === null || res === void 0 ? void 0 : res.request_id) || "";
32
+ const normalized = {
33
+ id,
34
+ object: (_f = res === null || res === void 0 ? void 0 : res.object) !== null && _f !== void 0 ? _f : "chat.completion",
35
+ created,
36
+ model: (_g = res === null || res === void 0 ? void 0 : res.model) !== null && _g !== void 0 ? _g : fallbackModel,
37
+ log_id: id,
38
+ error: (_j = (_h = res === null || res === void 0 ? void 0 : res.error) !== null && _h !== void 0 ? _h : res === null || res === void 0 ? void 0 : res.message) !== null && _j !== void 0 ? _j : "",
39
+ code: typeof (res === null || res === void 0 ? void 0 : res.code) === "number" ? res.code : 0,
40
+ choices: [
41
+ {
42
+ index: 0,
43
+ message: {
44
+ id,
45
+ role: "assistant",
46
+ type: "image",
47
+ content: contentUrl || "",
48
+ reasoning_content: "",
49
+ },
50
+ finish_reason: (_k = first === null || first === void 0 ? void 0 : first.finish_reason) !== null && _k !== void 0 ? _k : "stop",
51
+ },
52
+ ],
53
+ usage: mapUsageToStandard(res === null || res === void 0 ? void 0 : res.usage),
54
+ };
55
+ return normalized;
56
+ }
57
+ convertToQwenImage20Input(data) {
58
+ var _a, _b;
59
+ const imageUrls = [];
60
+ const texts = [];
61
+ const messages = data.messages || data.history || [];
62
+ if (Array.isArray(messages) && messages.length > 0) {
63
+ const firstUser = (_a = messages.find((m) => (m === null || m === void 0 ? void 0 : m.role) === "user")) !== null && _a !== void 0 ? _a : messages[0];
64
+ const c = firstUser === null || firstUser === void 0 ? void 0 : firstUser.content;
65
+ if (typeof c === "string" && c.trim()) {
66
+ texts.push(c.trim());
67
+ }
68
+ else if (Array.isArray(c)) {
69
+ for (const p of c) {
70
+ if ((p === null || p === void 0 ? void 0 : p.type) === "image_url" && ((_b = p.image_url) === null || _b === void 0 ? void 0 : _b.url)) {
71
+ imageUrls.push(String(p.image_url.url));
72
+ }
73
+ else if ((p === null || p === void 0 ? void 0 : p.type) === "image" && p.image) {
74
+ imageUrls.push(String(p.image));
75
+ }
76
+ else if ((p === null || p === void 0 ? void 0 : p.type) === "text" && typeof p.text === "string" && p.text.trim()) {
77
+ texts.push(p.text.trim());
78
+ }
79
+ }
80
+ }
81
+ }
82
+ if (imageUrls.length === 0 && data.image)
83
+ imageUrls.push(String(data.image));
84
+ if (imageUrls.length === 0 && Array.isArray(data.images))
85
+ imageUrls.push(...data.images.map(String));
86
+ if (texts.length === 0 && data.msg)
87
+ texts.push(String(data.msg));
88
+ if (texts.length === 0 && data.prompt)
89
+ texts.push(String(data.prompt));
90
+ const prompt = texts.join(" ").trim();
91
+ if (!prompt)
92
+ throw new Error("QwenImage20 需要提供一个 text 提示词");
93
+ if (imageUrls.length > 3) {
94
+ throw new Error("QwenImage20 图像编辑最多支持 3 张输入图");
95
+ }
96
+ const content = [];
97
+ imageUrls.forEach((url) => content.push({ image: url }));
98
+ content.push({ text: prompt });
99
+ const userParams = Object.assign({}, (data.parameters || {}));
100
+ if (!userParams.negative_prompt && data.negative_prompt) {
101
+ userParams.negative_prompt = String(data.negative_prompt);
102
+ }
103
+ const parameters = Object.assign({ watermark: false, prompt_extend: true, n: 1 }, userParams);
104
+ return {
105
+ model: this.modelName,
106
+ input: {
107
+ messages: [{ role: "user", content }],
108
+ },
109
+ parameters,
110
+ };
111
+ }
112
+ modelRequest(data_1) {
113
+ return __awaiter(this, arguments, void 0, function* (data, options = { timeout: 60 * 1000 }) {
114
+ const fetchHeaders = yield this.createAuthHeaders(options === null || options === void 0 ? void 0 : options.headers);
115
+ const joinedUrl = `${String(this.baseUrl).replace(/\/+$/, "")}/${String(this.subUrl).replace(/^\/+/, "")}`;
116
+ const { data: responseData, header } = (yield this.req.fetch({
117
+ url: joinedUrl,
118
+ headers: Object.assign(Object.assign({}, fetchHeaders), ((options === null || options === void 0 ? void 0 : options.headers) || {})),
119
+ body: JSON.stringify(data),
120
+ method: "post",
121
+ stream: false,
122
+ }));
123
+ return (0, utils_1.handleResponseData)(responseData, header);
124
+ });
125
+ }
126
+ isErrorResponse(obj) {
127
+ if ((obj === null || obj === void 0 ? void 0 : obj.error) && typeof obj.error === "object" && obj.error.code)
128
+ return true;
129
+ if (typeof (obj === null || obj === void 0 ? void 0 : obj.code) === "string" && obj.code && !(obj === null || obj === void 0 ? void 0 : obj.output))
130
+ return true;
131
+ if (typeof (obj === null || obj === void 0 ? void 0 : obj.code) === "number" && obj.code !== 0 && !(obj === null || obj === void 0 ? void 0 : obj.output))
132
+ return true;
133
+ return false;
134
+ }
135
+ normalizeErrorResponse(res) {
136
+ var _a, _b;
137
+ const err = typeof (res === null || res === void 0 ? void 0 : res.error) === "object" ? res.error : {};
138
+ const errorCode = (err === null || err === void 0 ? void 0 : err.code) || (res === null || res === void 0 ? void 0 : res.code) || "unknown_error";
139
+ const errorMessage = (err === null || err === void 0 ? void 0 : err.message) || (res === null || res === void 0 ? void 0 : res.message) || "Unknown error";
140
+ const id = (res === null || res === void 0 ? void 0 : res.id) || (res === null || res === void 0 ? void 0 : res.request_id) || "";
141
+ const created = (_a = res === null || res === void 0 ? void 0 : res.created) !== null && _a !== void 0 ? _a : Math.floor(Date.now() / 1000);
142
+ return {
143
+ id,
144
+ object: "chat.completion",
145
+ created,
146
+ model: (_b = res === null || res === void 0 ? void 0 : res.model) !== null && _b !== void 0 ? _b : this.modelName,
147
+ log_id: id,
148
+ error: `[${errorCode}] ${errorMessage}`,
149
+ code: typeof (res === null || res === void 0 ? void 0 : res.code) === "number" ? res.code : -1,
150
+ choices: [
151
+ {
152
+ index: 0,
153
+ message: {
154
+ id,
155
+ role: "assistant",
156
+ type: "error",
157
+ content: `[${errorCode}] ${errorMessage}`,
158
+ reasoning_content: "",
159
+ },
160
+ finish_reason: "error",
161
+ },
162
+ ],
163
+ usage: {
164
+ prompt_tokens: 0,
165
+ completion_tokens: 0,
166
+ knowledge_tokens: 0,
167
+ reasoning_tokens: 0,
168
+ total_tokens: 0,
169
+ },
170
+ };
171
+ }
172
+ doGenerate(data, options) {
173
+ return __awaiter(this, void 0, void 0, function* () {
174
+ data.model = this.modelName;
175
+ const payload = this.convertToQwenImage20Input(data);
176
+ const res = (yield this.modelRequest(payload, options));
177
+ if (this.isErrorResponse(res)) {
178
+ return this.normalizeErrorResponse(res);
179
+ }
180
+ return this.normalizeStandardImageCompletion(res, this.modelName);
181
+ });
182
+ }
183
+ doStream(data, options) {
184
+ return __awaiter(this, void 0, void 0, function* () {
185
+ var _a, _b;
186
+ const nonStream = yield this.doGenerate(Object.assign({}, data), options);
187
+ const msg = ((_b = (_a = nonStream.choices) === null || _a === void 0 ? void 0 : _a[0]) === null || _b === void 0 ? void 0 : _b.message) || {};
188
+ const singleChunk = {
189
+ id: nonStream.id,
190
+ object: "chat.completion.chunk",
191
+ created: nonStream.created,
192
+ model: nonStream.model,
193
+ log_id: nonStream.log_id,
194
+ error: nonStream.error || "",
195
+ code: nonStream.code || 0,
196
+ choices: [
197
+ {
198
+ index: 0,
199
+ message: {
200
+ id: nonStream.id,
201
+ role: "assistant",
202
+ type: msg.type || "image",
203
+ content: msg.content || "",
204
+ reasoning_content: "",
205
+ },
206
+ finish_reason: "stop",
207
+ },
208
+ ],
209
+ usage: nonStream.usage,
210
+ };
211
+ const stream = new stream_1.ReadableStream({
212
+ start(controller) {
213
+ controller.enqueue(singleChunk);
214
+ controller.close();
215
+ },
216
+ });
217
+ return (0, stream_1.createAsyncIterable)(stream);
218
+ });
219
+ }
220
+ }
221
+ exports.QwenImage20Model = QwenImage20Model;
222
+ QwenImage20Model.BASE_URL = "https://dashscope.aliyuncs.com";
223
+ QwenImage20Model.SUB_URL = "api/v1/services/aigc/multimodal-generation/generation";
224
+ class QwenImage20ProModel extends QwenImage20Model {
225
+ constructor(req, baseUrl, tokenManager) {
226
+ super(req, baseUrl, tokenManager, index_1.modelName[index_1.MultiModalModelName.QwenImage20Pro]);
227
+ }
228
+ }
229
+ exports.QwenImage20ProModel = QwenImage20ProModel;
230
+ class QwenImage20FastModel extends QwenImage20Model {
231
+ constructor(req, baseUrl, tokenManager) {
232
+ super(req, baseUrl, tokenManager, index_1.modelName[index_1.MultiModalModelName.QwenImage20]);
233
+ }
234
+ }
235
+ exports.QwenImage20FastModel = QwenImage20FastModel;
236
+ function mapUsageToStandard(_usage) {
237
+ return {
238
+ prompt_tokens: 0,
239
+ completion_tokens: 0,
240
+ knowledge_tokens: 0,
241
+ reasoning_tokens: 0,
242
+ total_tokens: 0,
243
+ };
244
+ }
@@ -1,6 +1,6 @@
1
1
  import type { IAbstractRequest } from "@vectorx/ai-types";
2
2
  import type { DoGenerateOutput, DoStreamOutput, ModelRequestOptions, ReqOptions } from "../../model-type";
3
- import { TokenManager } from "../../tokenManager";
3
+ import type { TokenManager } from "../../tokenManager";
4
4
  import { SimpleChatModel } from "../Chat";
5
5
  export interface QwenImageEditParameters {
6
6
  negative_prompt?: string;
@@ -72,6 +72,8 @@ export declare class QwenImageEditModel extends SimpleChatModel {
72
72
  parameters?: QwenImageEditParameters;
73
73
  }): QwenImageEditRequestOptions;
74
74
  protected modelRequest(data: QwenImageEditRequestOptions, options?: ReqOptions): Promise<ReadableStream<Uint8Array> | Promise<unknown>>;
75
+ private isErrorResponse;
76
+ private normalizeErrorResponse;
75
77
  doGenerate(data: ModelRequestOptions, options?: ReqOptions): Promise<DoGenerateOutput>;
76
78
  doStream(data: ModelRequestOptions, options?: ReqOptions): Promise<DoStreamOutput>;
77
79
  }
@@ -51,7 +51,9 @@ class QwenImageEditModel extends Chat_1.SimpleChatModel {
51
51
  }
52
52
  const first = ((_g = (_f = qOutput === null || qOutput === void 0 ? void 0 : qOutput.choices) === null || _f === void 0 ? void 0 : _f[0]) !== null && _g !== void 0 ? _g : null);
53
53
  const message = (_h = first === null || first === void 0 ? void 0 : first.message) !== null && _h !== void 0 ? _h : {};
54
- const contentUrl = Array.isArray(message === null || message === void 0 ? void 0 : message.content) && ((_j = message.content[0]) === null || _j === void 0 ? void 0 : _j.image) ? String(message.content[0].image) : "";
54
+ const contentUrl = Array.isArray(message === null || message === void 0 ? void 0 : message.content) && ((_j = message.content[0]) === null || _j === void 0 ? void 0 : _j.image)
55
+ ? String(message.content[0].image)
56
+ : "";
55
57
  const created = (_k = res === null || res === void 0 ? void 0 : res.created) !== null && _k !== void 0 ? _k : Math.floor(Date.now() / 1000);
56
58
  const id = (res === null || res === void 0 ? void 0 : res.id) || (res === null || res === void 0 ? void 0 : res.request_id) || "";
57
59
  const normalized = {
@@ -145,11 +147,60 @@ class QwenImageEditModel extends Chat_1.SimpleChatModel {
145
147
  return (0, utils_1.handleResponseData)(responseData, header);
146
148
  });
147
149
  }
150
+ isErrorResponse(obj) {
151
+ if ((obj === null || obj === void 0 ? void 0 : obj.error) && typeof obj.error === "object" && obj.error.code)
152
+ return true;
153
+ if (typeof (obj === null || obj === void 0 ? void 0 : obj.code) === "string" && !(obj === null || obj === void 0 ? void 0 : obj.output))
154
+ return true;
155
+ if (typeof (obj === null || obj === void 0 ? void 0 : obj.code) === "number" && obj.code !== 0 && !(obj === null || obj === void 0 ? void 0 : obj.output))
156
+ return true;
157
+ return false;
158
+ }
159
+ normalizeErrorResponse(res) {
160
+ var _a, _b;
161
+ const err = typeof (res === null || res === void 0 ? void 0 : res.error) === "object" ? res.error : {};
162
+ const errorCode = (err === null || err === void 0 ? void 0 : err.code) || (res === null || res === void 0 ? void 0 : res.code) || "unknown_error";
163
+ const errorMessage = (err === null || err === void 0 ? void 0 : err.message) || (res === null || res === void 0 ? void 0 : res.message) || "Unknown error";
164
+ const id = (res === null || res === void 0 ? void 0 : res.id) || (res === null || res === void 0 ? void 0 : res.request_id) || "";
165
+ const created = (_a = res === null || res === void 0 ? void 0 : res.created) !== null && _a !== void 0 ? _a : Math.floor(Date.now() / 1000);
166
+ return {
167
+ id,
168
+ object: "chat.completion",
169
+ created,
170
+ model: (_b = res === null || res === void 0 ? void 0 : res.model) !== null && _b !== void 0 ? _b : this.modelName,
171
+ log_id: id,
172
+ error: `[${errorCode}] ${errorMessage}`,
173
+ code: typeof (res === null || res === void 0 ? void 0 : res.code) === "number" ? res.code : -1,
174
+ choices: [
175
+ {
176
+ index: 0,
177
+ message: {
178
+ id,
179
+ role: "assistant",
180
+ type: "error",
181
+ content: `[${errorCode}] ${errorMessage}`,
182
+ reasoning_content: "",
183
+ },
184
+ finish_reason: "error",
185
+ },
186
+ ],
187
+ usage: {
188
+ prompt_tokens: 0,
189
+ completion_tokens: 0,
190
+ knowledge_tokens: 0,
191
+ reasoning_tokens: 0,
192
+ total_tokens: 0,
193
+ },
194
+ };
195
+ }
148
196
  doGenerate(data, options) {
149
197
  return __awaiter(this, void 0, void 0, function* () {
150
198
  data.model = this.modelName;
151
199
  const payload = this.coverModelRequestToQwenInput(data);
152
200
  const res = (yield this.modelRequest(payload, options));
201
+ if (this.isErrorResponse(res)) {
202
+ return this.normalizeErrorResponse(res);
203
+ }
153
204
  return this.normalizeStandardImageEditCompletion(res, this.modelName);
154
205
  });
155
206
  }
@@ -1,6 +1,6 @@
1
1
  import type { IAbstractRequest } from "@vectorx/ai-types";
2
2
  import type { DoGenerateOutput, DoStreamOutput, ModelRequestOptions, ReqOptions } from "../../model-type";
3
- import { TokenManager } from "../../tokenManager";
3
+ import type { TokenManager } from "../../tokenManager";
4
4
  import { SimpleChatModel } from "../Chat";
5
5
  export interface WanxSketchToImageLiteParameters {
6
6
  size?: "768*768" | "720*1280" | "1280*720" | string;
@@ -1,6 +1,6 @@
1
1
  import type { IAbstractRequest } from "@vectorx/ai-types";
2
2
  import type { DoGenerateOutput, DoStreamOutput, ModelRequestOptions, ReqOptions } from "../../model-type";
3
- import { TokenManager } from "../../tokenManager";
3
+ import type { TokenManager } from "../../tokenManager";
4
4
  import { SimpleChatModel } from "../Chat";
5
5
  export declare const STYLE_INDEX: {
6
6
  readonly CUSTOM: -1;
@@ -78,7 +78,9 @@ class QwenStyleRepaintV1Model extends Chat_1.SimpleChatModel {
78
78
  }
79
79
  const first = ((_g = (_f = qOutput === null || qOutput === void 0 ? void 0 : qOutput.choices) === null || _f === void 0 ? void 0 : _f[0]) !== null && _g !== void 0 ? _g : null);
80
80
  const message = (_h = first === null || first === void 0 ? void 0 : first.message) !== null && _h !== void 0 ? _h : {};
81
- const contentUrl = Array.isArray(message === null || message === void 0 ? void 0 : message.content) && ((_j = message.content[0]) === null || _j === void 0 ? void 0 : _j.image) ? String(message.content[0].image) : "";
81
+ const contentUrl = Array.isArray(message === null || message === void 0 ? void 0 : message.content) && ((_j = message.content[0]) === null || _j === void 0 ? void 0 : _j.image)
82
+ ? String(message.content[0].image)
83
+ : "";
82
84
  const created = (_k = res === null || res === void 0 ? void 0 : res.created) !== null && _k !== void 0 ? _k : Math.floor(Date.now() / 1000);
83
85
  const id = (res === null || res === void 0 ? void 0 : res.id) || (res === null || res === void 0 ? void 0 : res.request_id) || "";
84
86
  const normalized = {
@@ -72,6 +72,8 @@ export declare class QwenVlMax extends SimpleChatModel {
72
72
  constructor(req: IAbstractRequest, baseUrl: string, modelName: MultiModalModelName, tokenManager: TokenManager);
73
73
  protected modelRequest(data: QwenVlMaxAPIInput, options?: ReqOptions): Promise<ReadableStream<Uint8Array> | Promise<unknown>>;
74
74
  protected normalizeResponse(response: QwenVlMaxResponse): DoGenerateOutput;
75
+ private normalizeErrorResponse;
76
+ private isErrorResponse;
75
77
  doGenerate(data: ModelRequestOptions, options?: ReqOptions): Promise<DoGenerateOutput>;
76
78
  doStream(data: ModelRequestOptions, options?: ReqOptions): Promise<DoStreamOutput>;
77
79
  private convertToQwenVlMaxRequestOptions;
@@ -77,11 +77,61 @@ class QwenVlMax extends Chat_1.SimpleChatModel {
77
77
  },
78
78
  };
79
79
  }
80
+ normalizeErrorResponse(res) {
81
+ var _a, _b;
82
+ const err = (res === null || res === void 0 ? void 0 : res.error) || {};
83
+ const errorCode = (err === null || err === void 0 ? void 0 : err.code) || (res === null || res === void 0 ? void 0 : res.code) || "unknown_error";
84
+ const errorMessage = (err === null || err === void 0 ? void 0 : err.message) || (res === null || res === void 0 ? void 0 : res.message) || "Unknown error";
85
+ const id = (res === null || res === void 0 ? void 0 : res.id) || (res === null || res === void 0 ? void 0 : res.request_id) || "";
86
+ const created = (_a = res === null || res === void 0 ? void 0 : res.created) !== null && _a !== void 0 ? _a : Math.floor(Date.now() / 1000);
87
+ return {
88
+ id,
89
+ object: "chat.completion",
90
+ created,
91
+ model: (_b = res === null || res === void 0 ? void 0 : res.model) !== null && _b !== void 0 ? _b : this.modelName,
92
+ log_id: id,
93
+ error: `[${errorCode}] ${errorMessage}`,
94
+ code: typeof (res === null || res === void 0 ? void 0 : res.code) === "number" ? res.code : -1,
95
+ choices: [
96
+ {
97
+ index: 0,
98
+ message: {
99
+ id,
100
+ role: "assistant",
101
+ type: "error",
102
+ content: `[${errorCode}] ${errorMessage}`,
103
+ reasoning_content: "",
104
+ },
105
+ finish_reason: "error",
106
+ },
107
+ ],
108
+ usage: {
109
+ prompt_tokens: 0,
110
+ completion_tokens: 0,
111
+ knowledge_tokens: 0,
112
+ reasoning_tokens: 0,
113
+ total_tokens: 0,
114
+ },
115
+ };
116
+ }
117
+ isErrorResponse(obj) {
118
+ if ((obj === null || obj === void 0 ? void 0 : obj.error) && typeof obj.error === "object" && obj.error.code)
119
+ return true;
120
+ if ((obj === null || obj === void 0 ? void 0 : obj.error) && typeof obj.error === "string" && obj.error.length > 0 && !obj.choices)
121
+ return true;
122
+ if (typeof (obj === null || obj === void 0 ? void 0 : obj.code) === "number" && obj.code !== 0 && !obj.choices)
123
+ return true;
124
+ return false;
125
+ }
80
126
  doGenerate(data, options) {
81
127
  return __awaiter(this, void 0, void 0, function* () {
82
128
  const qwenVlMaxData = this.convertToQwenVlMaxRequestOptions(data);
83
129
  const requestData = Object.assign(Object.assign({}, qwenVlMaxData), { stream: false });
84
130
  const res = yield this.modelRequest(requestData, options);
131
+ const resObj = res;
132
+ if (this.isErrorResponse(resObj)) {
133
+ return this.normalizeErrorResponse(resObj);
134
+ }
85
135
  return this.normalizeResponse(res);
86
136
  });
87
137
  }
@@ -92,7 +142,20 @@ class QwenVlMax extends Chat_1.SimpleChatModel {
92
142
  const _stream = (yield this.modelRequest(requestData, options));
93
143
  const stream = (0, stream_1.toPolyfillReadable)(_stream);
94
144
  const standardStream = (0, stream_1.intoStandardStream)(stream);
95
- return (0, stream_1.createAsyncIterable)(standardStream);
145
+ const self = this;
146
+ const errorNormalizingStream = standardStream.pipeThrough(new stream_1.TransformStream({
147
+ transform(chunk, controller) {
148
+ const raw = chunk;
149
+ if (self.isErrorResponse(raw)) {
150
+ const normalized = self.normalizeErrorResponse(raw);
151
+ controller.enqueue(normalized);
152
+ }
153
+ else {
154
+ controller.enqueue(chunk);
155
+ }
156
+ },
157
+ }));
158
+ return (0, stream_1.createAsyncIterable)(errorNormalizingStream);
96
159
  });
97
160
  }
98
161
  convertToQwenVlMaxRequestOptions(data) {
@@ -100,7 +163,9 @@ class QwenVlMax extends Chat_1.SimpleChatModel {
100
163
  const clamp = (value, min, max, defaultValue) => value !== undefined ? Math.max(min, Math.min(max, value)) : defaultValue;
101
164
  const messages = (data.messages || []).map((msg) => ({
102
165
  role: msg.role,
103
- content: Array.isArray(msg.content) ? (0, model_type_1.filterContentByTypes)(msg.content, ["text", "image_url"]) : msg.content,
166
+ content: Array.isArray(msg.content)
167
+ ? (0, model_type_1.filterContentByTypes)(msg.content, ["text", "image_url"])
168
+ : msg.content,
104
169
  }));
105
170
  return {
106
171
  model: this.modelName,
@@ -0,0 +1,11 @@
1
+ import type { AsyncIterableReadableStream, DoGenerateSpeechOutput, DoStreamSpeechOutput, TTSRequestOptions } from "../model-type";
2
+ import type { TokenManager } from "../tokenManager";
3
+ export declare abstract class SimpleTTSModel {
4
+ protected modelName: string;
5
+ protected tokenManager: TokenManager;
6
+ constructor(tokenManager: TokenManager, modelName: string);
7
+ abstract doStreamSpeech(options: TTSRequestOptions): Promise<DoStreamSpeechOutput>;
8
+ doGenerateSpeech(options: TTSRequestOptions): Promise<DoGenerateSpeechOutput>;
9
+ protected getValidToken(): Promise<string>;
10
+ protected createAsyncIterable<T>(stream: ReadableStream<T>): AsyncIterableReadableStream<T>;
11
+ }
@@ -0,0 +1,75 @@
1
+ "use strict";
2
+ var __awaiter = (this && this.__awaiter) || function (thisArg, _arguments, P, generator) {
3
+ function adopt(value) { return value instanceof P ? value : new P(function (resolve) { resolve(value); }); }
4
+ return new (P || (P = Promise))(function (resolve, reject) {
5
+ function fulfilled(value) { try { step(generator.next(value)); } catch (e) { reject(e); } }
6
+ function rejected(value) { try { step(generator["throw"](value)); } catch (e) { reject(e); } }
7
+ function step(result) { result.done ? resolve(result.value) : adopt(result.value).then(fulfilled, rejected); }
8
+ step((generator = generator.apply(thisArg, _arguments || [])).next());
9
+ });
10
+ };
11
+ var __asyncValues = (this && this.__asyncValues) || function (o) {
12
+ if (!Symbol.asyncIterator) throw new TypeError("Symbol.asyncIterator is not defined.");
13
+ var m = o[Symbol.asyncIterator], i;
14
+ return m ? m.call(o) : (o = typeof __values === "function" ? __values(o) : o[Symbol.iterator](), i = {}, verb("next"), verb("throw"), verb("return"), i[Symbol.asyncIterator] = function () { return this; }, i);
15
+ function verb(n) { i[n] = o[n] && function (v) { return new Promise(function (resolve, reject) { v = o[n](v), settle(resolve, reject, v.done, v.value); }); }; }
16
+ function settle(resolve, reject, d, v) { Promise.resolve(v).then(function(v) { resolve({ value: v, done: d }); }, reject); }
17
+ };
18
+ Object.defineProperty(exports, "__esModule", { value: true });
19
+ exports.SimpleTTSModel = void 0;
20
+ class SimpleTTSModel {
21
+ constructor(tokenManager, modelName) {
22
+ this.tokenManager = tokenManager;
23
+ this.modelName = modelName;
24
+ }
25
+ doGenerateSpeech(options) {
26
+ return __awaiter(this, void 0, void 0, function* () {
27
+ var _a, e_1, _b, _c;
28
+ const stream = yield this.doStreamSpeech(options);
29
+ const chunks = [];
30
+ try {
31
+ for (var _d = true, stream_1 = __asyncValues(stream), stream_1_1; stream_1_1 = yield stream_1.next(), _a = stream_1_1.done, !_a; _d = true) {
32
+ _c = stream_1_1.value;
33
+ _d = false;
34
+ const chunk = _c;
35
+ if (chunk.audio.length > 0) {
36
+ chunks.push(chunk.audio);
37
+ }
38
+ }
39
+ }
40
+ catch (e_1_1) { e_1 = { error: e_1_1 }; }
41
+ finally {
42
+ try {
43
+ if (!_d && !_a && (_b = stream_1.return)) yield _b.call(stream_1);
44
+ }
45
+ finally { if (e_1) throw e_1.error; }
46
+ }
47
+ const charCount = [...options.text].reduce((acc, ch) => acc + (/[\u4e00-\u9fff]/.test(ch) ? 2 : 1), 0);
48
+ return {
49
+ audio: Buffer.concat(chunks),
50
+ usage: { characters: charCount },
51
+ };
52
+ });
53
+ }
54
+ getValidToken() {
55
+ return __awaiter(this, void 0, void 0, function* () {
56
+ return yield this.tokenManager.getValidToken();
57
+ });
58
+ }
59
+ createAsyncIterable(stream) {
60
+ const asyncStream = stream;
61
+ asyncStream[Symbol.asyncIterator] = () => {
62
+ const reader = stream.getReader();
63
+ return {
64
+ next() {
65
+ return __awaiter(this, void 0, void 0, function* () {
66
+ const { done, value } = yield reader.read();
67
+ return done ? { done: true, value: undefined } : { done: false, value };
68
+ });
69
+ },
70
+ };
71
+ };
72
+ return asyncStream;
73
+ }
74
+ }
75
+ exports.SimpleTTSModel = SimpleTTSModel;
@@ -1,6 +1,6 @@
1
1
  import type { IAbstractRequest } from "@vectorx/ai-types";
2
2
  import type { DoGenerateOutput, DoStreamOutput, ModelRequestOptions, ReqOptions } from "../../model-type";
3
- import { TokenManager } from "../../tokenManager";
3
+ import type { TokenManager } from "../../tokenManager";
4
4
  import { SimpleChatModel } from "../Chat";
5
5
  export interface Wan26ImageParameters {
6
6
  negative_prompt?: string;
@@ -91,6 +91,8 @@ export declare class Wan26ImageModel extends SimpleChatModel {
91
91
  parameters?: Wan26ImageParameters;
92
92
  } & Record<string, any>): Wan26ImageRequestOptions;
93
93
  protected modelRequest(data: Wan26ImageRequestOptions, options?: ReqOptions): Promise<ReadableStream<Uint8Array> | Promise<unknown>>;
94
+ private isErrorResponse;
95
+ private normalizeErrorResponse;
94
96
  doGenerate(data: ModelRequestOptions & {
95
97
  parameters?: Wan26ImageParameters;
96
98
  }, options?: ReqOptions): Promise<DoGenerateOutput>;
@@ -157,12 +157,61 @@ class Wan26ImageModel extends Chat_1.SimpleChatModel {
157
157
  return (0, utils_1.handleResponseData)(responseData, header);
158
158
  });
159
159
  }
160
+ isErrorResponse(obj) {
161
+ if ((obj === null || obj === void 0 ? void 0 : obj.error) && typeof obj.error === "object" && obj.error.code)
162
+ return true;
163
+ if (typeof (obj === null || obj === void 0 ? void 0 : obj.code) === "string" && !(obj === null || obj === void 0 ? void 0 : obj.output))
164
+ return true;
165
+ if (typeof (obj === null || obj === void 0 ? void 0 : obj.code) === "number" && obj.code !== 0 && !(obj === null || obj === void 0 ? void 0 : obj.output))
166
+ return true;
167
+ return false;
168
+ }
169
+ normalizeErrorResponse(res) {
170
+ var _a, _b;
171
+ const err = typeof (res === null || res === void 0 ? void 0 : res.error) === "object" ? res.error : {};
172
+ const errorCode = (err === null || err === void 0 ? void 0 : err.code) || (res === null || res === void 0 ? void 0 : res.code) || "unknown_error";
173
+ const errorMessage = (err === null || err === void 0 ? void 0 : err.message) || (res === null || res === void 0 ? void 0 : res.message) || "Unknown error";
174
+ const id = (res === null || res === void 0 ? void 0 : res.id) || (res === null || res === void 0 ? void 0 : res.request_id) || "";
175
+ const created = (_a = res === null || res === void 0 ? void 0 : res.created) !== null && _a !== void 0 ? _a : Math.floor(Date.now() / 1000);
176
+ return {
177
+ id,
178
+ object: "chat.completion",
179
+ created,
180
+ model: (_b = res === null || res === void 0 ? void 0 : res.model) !== null && _b !== void 0 ? _b : this.modelName,
181
+ log_id: id,
182
+ error: `[${errorCode}] ${errorMessage}`,
183
+ code: typeof (res === null || res === void 0 ? void 0 : res.code) === "number" ? res.code : -1,
184
+ choices: [
185
+ {
186
+ index: 0,
187
+ message: {
188
+ id,
189
+ role: "assistant",
190
+ type: "error",
191
+ content: `[${errorCode}] ${errorMessage}`,
192
+ reasoning_content: "",
193
+ },
194
+ finish_reason: "error",
195
+ },
196
+ ],
197
+ usage: {
198
+ prompt_tokens: 0,
199
+ completion_tokens: 0,
200
+ knowledge_tokens: 0,
201
+ reasoning_tokens: 0,
202
+ total_tokens: 0,
203
+ },
204
+ };
205
+ }
160
206
  doGenerate(data, options) {
161
207
  return __awaiter(this, void 0, void 0, function* () {
162
208
  data.model = this.modelName;
163
209
  const body = this.convertToWan26Input(data);
164
210
  const headers = Object.assign(Object.assign({}, ((options === null || options === void 0 ? void 0 : options.headers) || {})), { "X-DashScope-Async": "enable" });
165
211
  const res = (yield this.modelRequest(body, Object.assign(Object.assign({}, options), { headers })));
212
+ if (this.isErrorResponse(res)) {
213
+ return this.normalizeErrorResponse(res);
214
+ }
166
215
  return this.normalizeStandardImageCompletion(res, this.modelName);
167
216
  });
168
217
  }
@@ -231,7 +280,9 @@ function extractFirstImageUrl(res) {
231
280
  return item.url;
232
281
  if (typeof item.image_url === "string" && item.image_url)
233
282
  return item.image_url;
234
- if (item.image_url && typeof item.image_url === "object" && typeof item.image_url.url === "string") {
283
+ if (item.image_url &&
284
+ typeof item.image_url === "object" &&
285
+ typeof item.image_url.url === "string") {
235
286
  return String(item.image_url.url);
236
287
  }
237
288
  }