@vectorx/ai-sdk 0.0.0-beta-20251112071234

This diff represents the content of publicly available package versions that have been released to one of the supported registries. The information contained in this diff is provided for informational purposes only and reflects changes between package versions as they appear in their respective public registries.
Files changed (52) hide show
  1. package/README.md +1 -0
  2. package/lib/agent/index.d.ts +17 -0
  3. package/lib/agent/index.js +69 -0
  4. package/lib/ai.d.ts +16 -0
  5. package/lib/ai.js +90 -0
  6. package/lib/eventsource_parser/index.d.ts +2 -0
  7. package/lib/eventsource_parser/index.js +5 -0
  8. package/lib/eventsource_parser/parse.d.ts +2 -0
  9. package/lib/eventsource_parser/parse.js +124 -0
  10. package/lib/eventsource_parser/stream.d.ts +5 -0
  11. package/lib/eventsource_parser/stream.js +22 -0
  12. package/lib/eventsource_parser/types.d.ts +16 -0
  13. package/lib/eventsource_parser/types.js +2 -0
  14. package/lib/index.d.ts +14 -0
  15. package/lib/index.js +56 -0
  16. package/lib/model-type.d.ts +207 -0
  17. package/lib/model-type.js +24 -0
  18. package/lib/models/Chat.d.ts +14 -0
  19. package/lib/models/Chat.js +36 -0
  20. package/lib/models/Default/index.d.ts +11 -0
  21. package/lib/models/Default/index.js +68 -0
  22. package/lib/models/Qwen25T2iPreview/index.d.ts +76 -0
  23. package/lib/models/Qwen25T2iPreview/index.js +211 -0
  24. package/lib/models/QwenDocTurbo/adapters/DashScope.d.ts +25 -0
  25. package/lib/models/QwenDocTurbo/adapters/DashScope.js +179 -0
  26. package/lib/models/QwenDocTurbo/adapters/OpenAICompat.d.ts +24 -0
  27. package/lib/models/QwenDocTurbo/adapters/OpenAICompat.js +143 -0
  28. package/lib/models/QwenDocTurbo/index.d.ts +16 -0
  29. package/lib/models/QwenDocTurbo/index.js +86 -0
  30. package/lib/models/QwenDocTurbo/types.d.ts +124 -0
  31. package/lib/models/QwenDocTurbo/types.js +2 -0
  32. package/lib/models/QwenImage/index.d.ts +81 -0
  33. package/lib/models/QwenImage/index.js +208 -0
  34. package/lib/models/QwenImageEdit/index.d.ts +77 -0
  35. package/lib/models/QwenImageEdit/index.js +205 -0
  36. package/lib/models/QwenSketchToImage/index.d.ts +35 -0
  37. package/lib/models/QwenSketchToImage/index.js +155 -0
  38. package/lib/models/QwenStyleRepaintV1/index.d.ts +114 -0
  39. package/lib/models/QwenStyleRepaintV1/index.js +213 -0
  40. package/lib/models/QwenVlMax/index.d.ts +78 -0
  41. package/lib/models/QwenVlMax/index.js +121 -0
  42. package/lib/models/index.d.ts +56 -0
  43. package/lib/models/index.js +77 -0
  44. package/lib/models/react.d.ts +8 -0
  45. package/lib/models/react.js +28 -0
  46. package/lib/stream.d.ts +47 -0
  47. package/lib/stream.js +138 -0
  48. package/lib/tokenManager.d.ts +36 -0
  49. package/lib/tokenManager.js +89 -0
  50. package/lib/utils.d.ts +1 -0
  51. package/lib/utils.js +54 -0
  52. package/package.json +49 -0
@@ -0,0 +1,24 @@
1
+ "use strict";
2
+ Object.defineProperty(exports, "__esModule", { value: true });
3
+ exports.IAgentEnv = void 0;
4
+ exports.filterContentByTypes = filterContentByTypes;
5
+ exports.hasContentType = hasContentType;
6
+ exports.extractContentByType = extractContentByType;
7
+ function filterContentByTypes(content, supportedTypes) {
8
+ return content.filter((item) => supportedTypes.includes(item.type));
9
+ }
10
+ function hasContentType(content, type) {
11
+ if (typeof content === "string")
12
+ return false;
13
+ return content.some((item) => item.type === type);
14
+ }
15
+ function extractContentByType(content, type) {
16
+ if (typeof content === "string")
17
+ return [];
18
+ return content.filter((item) => item.type === type);
19
+ }
20
+ var IAgentEnv;
21
+ (function (IAgentEnv) {
22
+ IAgentEnv["Production"] = "production";
23
+ IAgentEnv["Development"] = "development";
24
+ })(IAgentEnv || (exports.IAgentEnv = IAgentEnv = {}));
@@ -0,0 +1,14 @@
1
+ import type { IAbstractRequest } from "@vectorx/ai-types";
2
+ import { DoGenerateOutput, DoStreamOutput, ModelRequestOptions, ReqOptions } from "../model-type";
3
+ import { TokenManager } from "../tokenManager";
4
+ export declare abstract class SimpleChatModel {
5
+ protected req: IAbstractRequest;
6
+ protected baseUrl: string;
7
+ protected subUrl: string;
8
+ protected tokenManager?: TokenManager;
9
+ constructor(req: IAbstractRequest, baseUrl: string, subUrl: string, tokenManager?: TokenManager);
10
+ abstract doGenerate(data: ModelRequestOptions, options?: ReqOptions): Promise<DoGenerateOutput>;
11
+ abstract doStream(data: ModelRequestOptions, options?: ReqOptions): Promise<DoStreamOutput>;
12
+ protected getValidToken(): Promise<string>;
13
+ protected createAuthHeaders(additionalHeaders?: Record<string, string>): Promise<Record<string, string>>;
14
+ }
@@ -0,0 +1,36 @@
1
+ "use strict";
2
+ var __awaiter = (this && this.__awaiter) || function (thisArg, _arguments, P, generator) {
3
+ function adopt(value) { return value instanceof P ? value : new P(function (resolve) { resolve(value); }); }
4
+ return new (P || (P = Promise))(function (resolve, reject) {
5
+ function fulfilled(value) { try { step(generator.next(value)); } catch (e) { reject(e); } }
6
+ function rejected(value) { try { step(generator["throw"](value)); } catch (e) { reject(e); } }
7
+ function step(result) { result.done ? resolve(result.value) : adopt(result.value).then(fulfilled, rejected); }
8
+ step((generator = generator.apply(thisArg, _arguments || [])).next());
9
+ });
10
+ };
11
+ Object.defineProperty(exports, "__esModule", { value: true });
12
+ exports.SimpleChatModel = void 0;
13
+ class SimpleChatModel {
14
+ constructor(req, baseUrl, subUrl, tokenManager) {
15
+ this.req = req;
16
+ this.baseUrl = baseUrl;
17
+ this.subUrl = subUrl;
18
+ if (tokenManager)
19
+ this.tokenManager = tokenManager;
20
+ }
21
+ getValidToken() {
22
+ return __awaiter(this, void 0, void 0, function* () {
23
+ if (!this.tokenManager) {
24
+ throw new Error("TokenManager is not set");
25
+ }
26
+ return yield this.tokenManager.getValidToken();
27
+ });
28
+ }
29
+ createAuthHeaders() {
30
+ return __awaiter(this, arguments, void 0, function* (additionalHeaders = {}) {
31
+ const token = yield this.getValidToken();
32
+ return Object.assign({ "Content-Type": "application/json", Authorization: `Bearer ${token}` }, additionalHeaders);
33
+ });
34
+ }
35
+ }
36
+ exports.SimpleChatModel = SimpleChatModel;
@@ -0,0 +1,11 @@
1
+ import type { IAbstractRequest } from "@vectorx/ai-types";
2
+ import type { DoGenerateOutput, DoStreamOutput, ModelRequestOptions, ReqOptions } from "../../model-type";
3
+ import { SimpleChatModel } from "../Chat";
4
+ import type { ModelName } from "../index";
5
+ export declare class DefaultSimpleModel extends SimpleChatModel {
6
+ modelName: ModelName;
7
+ constructor(req: IAbstractRequest, baseUrl: string, modelName: ModelName);
8
+ protected modelRequest(data: ModelRequestOptions, options?: ReqOptions): Promise<ReadableStream<Uint8Array> | Promise<unknown>>;
9
+ doGenerate(data: ModelRequestOptions, options?: ReqOptions): Promise<DoGenerateOutput>;
10
+ doStream(data: ModelRequestOptions, options?: ReqOptions): Promise<DoStreamOutput>;
11
+ }
@@ -0,0 +1,68 @@
1
+ "use strict";
2
+ var __awaiter = (this && this.__awaiter) || function (thisArg, _arguments, P, generator) {
3
+ function adopt(value) { return value instanceof P ? value : new P(function (resolve) { resolve(value); }); }
4
+ return new (P || (P = Promise))(function (resolve, reject) {
5
+ function fulfilled(value) { try { step(generator.next(value)); } catch (e) { reject(e); } }
6
+ function rejected(value) { try { step(generator["throw"](value)); } catch (e) { reject(e); } }
7
+ function step(result) { result.done ? resolve(result.value) : adopt(result.value).then(fulfilled, rejected); }
8
+ step((generator = generator.apply(thisArg, _arguments || [])).next());
9
+ });
10
+ };
11
+ Object.defineProperty(exports, "__esModule", { value: true });
12
+ exports.DefaultSimpleModel = void 0;
13
+ const stream_1 = require("../../stream");
14
+ const utils_1 = require("../../utils");
15
+ const Chat_1 = require("../Chat");
16
+ const defaultOptions = {
17
+ max_tokens: 1000,
18
+ temperature: 0.7,
19
+ top_p: 1,
20
+ frequency_penalty: 0,
21
+ presence_penalty: 0,
22
+ stream: true,
23
+ platform_tools: [],
24
+ knowledge_base: [],
25
+ enable_thinking: false,
26
+ enable_search: false,
27
+ };
28
+ class DefaultSimpleModel extends Chat_1.SimpleChatModel {
29
+ constructor(req, baseUrl, modelName) {
30
+ super(req, baseUrl, "conversation/chat");
31
+ this.modelName = modelName;
32
+ }
33
+ modelRequest(data_1) {
34
+ return __awaiter(this, arguments, void 0, function* (data, options = {
35
+ timeout: 30 * 1000,
36
+ }) {
37
+ const fetchHeaders = Object.assign({ "Content-Type": "application/json" }, ((options === null || options === void 0 ? void 0 : options.headers) || {}));
38
+ data.stream && Object.assign(fetchHeaders, { Accept: "text/event-stream" });
39
+ const { data: responseData, header } = (yield this.req.fetch({
40
+ url: `${this.baseUrl}/${this.subUrl}`,
41
+ headers: Object.assign({}, fetchHeaders),
42
+ body: JSON.stringify(data),
43
+ method: "post",
44
+ stream: Boolean(data.stream),
45
+ }));
46
+ return (0, utils_1.handleResponseData)(responseData, header);
47
+ });
48
+ }
49
+ doGenerate(data, options) {
50
+ return __awaiter(this, void 0, void 0, function* () {
51
+ data.model = this.modelName;
52
+ const payload = Object.assign(Object.assign(Object.assign({}, defaultOptions), data), { stream: false });
53
+ const res = yield this.modelRequest(payload, options);
54
+ return res;
55
+ });
56
+ }
57
+ doStream(data, options) {
58
+ return __awaiter(this, void 0, void 0, function* () {
59
+ data.model = this.modelName;
60
+ const payload = Object.assign(Object.assign(Object.assign({}, defaultOptions), data), { stream: true });
61
+ const _stream = (yield this.modelRequest(payload, options));
62
+ const stream = (0, stream_1.toPolyfillReadable)(_stream);
63
+ const standardStream = (0, stream_1.intoStandardStream)(stream);
64
+ return (0, stream_1.createAsyncIterable)(standardStream);
65
+ });
66
+ }
67
+ }
68
+ exports.DefaultSimpleModel = DefaultSimpleModel;
@@ -0,0 +1,76 @@
1
+ import type { IAbstractRequest } from "@vectorx/ai-types";
2
+ import type { DoGenerateOutput, DoStreamOutput, ModelRequestOptions, ReqOptions } from "../../model-type";
3
+ import { TokenManager } from "../../tokenManager";
4
+ import { SimpleChatModel } from "../Chat";
5
+ export interface Qwen25T2iPreviewParameters {
6
+ size?: string;
7
+ n?: number;
8
+ prompt_extend?: boolean;
9
+ watermark?: boolean;
10
+ seed?: number;
11
+ }
12
+ export interface Qwen25T2iPreviewAPIInput {
13
+ model: string;
14
+ input: {
15
+ prompt: string;
16
+ negative_prompt?: string;
17
+ images: string[];
18
+ };
19
+ parameters?: Qwen25T2iPreviewParameters;
20
+ }
21
+ export type Qwen25T2iPreviewRequestOptions = Qwen25T2iPreviewAPIInput & {
22
+ parameters?: Qwen25T2iPreviewParameters;
23
+ };
24
+ export interface Qwen25T2iPreviewAPIResponse {
25
+ async?: boolean;
26
+ output: {
27
+ choices?: Array<{
28
+ finish_reason: string;
29
+ message: {
30
+ role: "assistant" | "user";
31
+ content: Array<{
32
+ image?: string;
33
+ url?: string;
34
+ }>;
35
+ };
36
+ }>;
37
+ task_status?: string;
38
+ task_id?: string;
39
+ task_metric?: {
40
+ TOTAL: number;
41
+ FAILED: number;
42
+ SUCCEEDED: number;
43
+ };
44
+ results?: Array<{
45
+ url?: string;
46
+ orig_prompt?: string;
47
+ actual_prompt?: string;
48
+ [key: string]: any;
49
+ }>;
50
+ };
51
+ usage?: {
52
+ width?: number;
53
+ height?: number;
54
+ image_count?: number;
55
+ };
56
+ request_id?: string;
57
+ id?: string;
58
+ model?: string;
59
+ created?: number;
60
+ object?: string;
61
+ code?: number;
62
+ error?: string;
63
+ }
64
+ export declare class Qwen25T2iPreviewModel extends SimpleChatModel {
65
+ static BASE_URL: string;
66
+ static SUB_GENERATION_URL: string;
67
+ modelName: string;
68
+ constructor(req: IAbstractRequest, baseUrl: string, tokenManager: TokenManager);
69
+ protected normalizeStandardImageCompletion(res: Qwen25T2iPreviewAPIResponse, fallbackModel: string): DoGenerateOutput;
70
+ protected coverModelRequestToQwenInput(data: ModelRequestOptions & {
71
+ parameters?: Qwen25T2iPreviewParameters;
72
+ }): Qwen25T2iPreviewRequestOptions;
73
+ protected modelRequest(data: Qwen25T2iPreviewRequestOptions, options?: ReqOptions): Promise<ReadableStream<Uint8Array> | Promise<unknown>>;
74
+ doGenerate(data: ModelRequestOptions, options?: ReqOptions): Promise<DoGenerateOutput>;
75
+ doStream(data: ModelRequestOptions, options?: ReqOptions): Promise<DoStreamOutput>;
76
+ }
@@ -0,0 +1,211 @@
1
+ "use strict";
2
+ var __awaiter = (this && this.__awaiter) || function (thisArg, _arguments, P, generator) {
3
+ function adopt(value) { return value instanceof P ? value : new P(function (resolve) { resolve(value); }); }
4
+ return new (P || (P = Promise))(function (resolve, reject) {
5
+ function fulfilled(value) { try { step(generator.next(value)); } catch (e) { reject(e); } }
6
+ function rejected(value) { try { step(generator["throw"](value)); } catch (e) { reject(e); } }
7
+ function step(result) { result.done ? resolve(result.value) : adopt(result.value).then(fulfilled, rejected); }
8
+ step((generator = generator.apply(thisArg, _arguments || [])).next());
9
+ });
10
+ };
11
+ Object.defineProperty(exports, "__esModule", { value: true });
12
+ exports.Qwen25T2iPreviewModel = void 0;
13
+ const stream_1 = require("../../stream");
14
+ const utils_1 = require("../../utils");
15
+ const Chat_1 = require("../Chat");
16
+ class Qwen25T2iPreviewModel extends Chat_1.SimpleChatModel {
17
+ constructor(req, baseUrl, tokenManager) {
18
+ super(req, baseUrl, Qwen25T2iPreviewModel.SUB_GENERATION_URL, tokenManager);
19
+ this.modelName = "wan2.5-i2i-preview";
20
+ }
21
+ normalizeStandardImageCompletion(res, fallbackModel) {
22
+ var _a, _b, _c, _d, _e, _f, _g, _h, _j, _k, _l, _m, _o, _p, _q, _r;
23
+ const qOutput = (res === null || res === void 0 ? void 0 : res.output) || {};
24
+ if ((qOutput === null || qOutput === void 0 ? void 0 : qOutput.task_status) && (qOutput === null || qOutput === void 0 ? void 0 : qOutput.task_id)) {
25
+ const created = (_a = res === null || res === void 0 ? void 0 : res.created) !== null && _a !== void 0 ? _a : Math.floor(Date.now() / 1000);
26
+ const id = (res === null || res === void 0 ? void 0 : res.id) || (res === null || res === void 0 ? void 0 : res.request_id) || "";
27
+ const normalized = {
28
+ id,
29
+ object: (_b = res === null || res === void 0 ? void 0 : res.object) !== null && _b !== void 0 ? _b : "chat.completion",
30
+ created,
31
+ model: (_c = res === null || res === void 0 ? void 0 : res.model) !== null && _c !== void 0 ? _c : fallbackModel,
32
+ log_id: id,
33
+ error: (_d = res === null || res === void 0 ? void 0 : res.error) !== null && _d !== void 0 ? _d : "",
34
+ code: (_e = res === null || res === void 0 ? void 0 : res.code) !== null && _e !== void 0 ? _e : 0,
35
+ choices: [
36
+ {
37
+ index: 0,
38
+ message: {
39
+ id,
40
+ role: "assistant",
41
+ type: "async_task",
42
+ content: JSON.stringify(Object.assign(Object.assign({}, ((res === null || res === void 0 ? void 0 : res.output) || {})), { request_id: (res === null || res === void 0 ? void 0 : res.request_id) || id })),
43
+ reasoning_content: "",
44
+ },
45
+ finish_reason: "stop",
46
+ },
47
+ ],
48
+ usage: mapUsageToStandard(res === null || res === void 0 ? void 0 : res.usage),
49
+ };
50
+ return normalized;
51
+ }
52
+ const first = ((_g = (_f = qOutput === null || qOutput === void 0 ? void 0 : qOutput.choices) === null || _f === void 0 ? void 0 : _f[0]) !== null && _g !== void 0 ? _g : null);
53
+ const message = (_h = first === null || first === void 0 ? void 0 : first.message) !== null && _h !== void 0 ? _h : {};
54
+ const contentUrl = Array.isArray(message === null || message === void 0 ? void 0 : message.content) && ((_j = message.content[0]) === null || _j === void 0 ? void 0 : _j.image)
55
+ ? String(message.content[0].image)
56
+ : Array.isArray(message === null || message === void 0 ? void 0 : message.content) && ((_k = message.content[0]) === null || _k === void 0 ? void 0 : _k.url)
57
+ ? String(message.content[0].url)
58
+ : "";
59
+ const created = (_l = res === null || res === void 0 ? void 0 : res.created) !== null && _l !== void 0 ? _l : Math.floor(Date.now() / 1000);
60
+ const id = (res === null || res === void 0 ? void 0 : res.id) || (res === null || res === void 0 ? void 0 : res.request_id) || "";
61
+ const normalized = {
62
+ id,
63
+ object: (_m = res === null || res === void 0 ? void 0 : res.object) !== null && _m !== void 0 ? _m : "chat.completion",
64
+ created,
65
+ model: (_o = res === null || res === void 0 ? void 0 : res.model) !== null && _o !== void 0 ? _o : fallbackModel,
66
+ log_id: id,
67
+ error: (_p = res === null || res === void 0 ? void 0 : res.error) !== null && _p !== void 0 ? _p : "",
68
+ code: (_q = res === null || res === void 0 ? void 0 : res.code) !== null && _q !== void 0 ? _q : 0,
69
+ choices: [
70
+ {
71
+ index: 0,
72
+ message: {
73
+ id,
74
+ role: "assistant",
75
+ type: "image",
76
+ content: contentUrl || "",
77
+ reasoning_content: "",
78
+ },
79
+ finish_reason: (_r = first === null || first === void 0 ? void 0 : first.finish_reason) !== null && _r !== void 0 ? _r : "stop",
80
+ },
81
+ ],
82
+ usage: mapUsageToStandard(res === null || res === void 0 ? void 0 : res.usage),
83
+ };
84
+ return normalized;
85
+ }
86
+ coverModelRequestToQwenInput(data) {
87
+ var _a, _b;
88
+ const imageUrls = [];
89
+ const texts = [];
90
+ let negativePrompt = "";
91
+ const messages = data.messages || data.history || [];
92
+ if (Array.isArray(messages) && messages.length > 0) {
93
+ const firstUser = (_a = messages.find((m) => (m === null || m === void 0 ? void 0 : m.role) === "user")) !== null && _a !== void 0 ? _a : messages[0];
94
+ const c = firstUser === null || firstUser === void 0 ? void 0 : firstUser.content;
95
+ if (Array.isArray(c)) {
96
+ for (const p of c) {
97
+ if ((p === null || p === void 0 ? void 0 : p.type) === "image_url" && ((_b = p.image_url) === null || _b === void 0 ? void 0 : _b.url)) {
98
+ imageUrls.push(p.image_url.url);
99
+ }
100
+ else if ((p === null || p === void 0 ? void 0 : p.type) === "text" && typeof p.text === "string" && p.text.trim()) {
101
+ texts.push(p.text.trim());
102
+ }
103
+ }
104
+ }
105
+ }
106
+ if (imageUrls.length === 0 && data.image) {
107
+ imageUrls.push(String(data.image));
108
+ }
109
+ if (imageUrls.length === 0 && data.images && Array.isArray(data.images)) {
110
+ imageUrls.push(...data.images);
111
+ }
112
+ if (texts.length === 0 && data.msg) {
113
+ texts.push(String(data.msg));
114
+ }
115
+ if (texts.length === 0 && data.prompt) {
116
+ texts.push(String(data.prompt));
117
+ }
118
+ if (data.negative_prompt) {
119
+ negativePrompt = String(data.negative_prompt);
120
+ }
121
+ if (imageUrls.length === 0)
122
+ throw new Error("Qwen25T2iPreview 需要提供至少一个图片 URL");
123
+ if (texts.length === 0)
124
+ throw new Error("Qwen25T2iPreview 需要提供至少一个文本提示词");
125
+ const prompt = texts.join(" ");
126
+ const parameters = Object.assign({ size: "1280*1280", n: 1, prompt_extend: false, watermark: false }, data.parameters);
127
+ if (negativePrompt) {
128
+ }
129
+ return {
130
+ parameters,
131
+ model: this.modelName,
132
+ input: {
133
+ prompt,
134
+ negative_prompt: negativePrompt || undefined,
135
+ images: imageUrls,
136
+ },
137
+ };
138
+ }
139
+ modelRequest(data_1) {
140
+ return __awaiter(this, arguments, void 0, function* (data, options = { timeout: 30 * 1000 }) {
141
+ const fetchHeaders = yield this.createAuthHeaders(options === null || options === void 0 ? void 0 : options.headers);
142
+ fetchHeaders["X-DashScope-Async"] = "enable";
143
+ const joinedUrl = `${String(this.baseUrl).replace(/\/+$/, "")}/${String(this.subUrl).replace(/^\/+/, "")}`;
144
+ const { data: responseData, header } = (yield this.req.fetch({
145
+ url: joinedUrl,
146
+ headers: Object.assign({}, fetchHeaders),
147
+ body: JSON.stringify(data),
148
+ method: "post",
149
+ stream: false,
150
+ }));
151
+ return (0, utils_1.handleResponseData)(responseData, header);
152
+ });
153
+ }
154
+ doGenerate(data, options) {
155
+ return __awaiter(this, void 0, void 0, function* () {
156
+ data.model = this.modelName;
157
+ const payload = this.coverModelRequestToQwenInput(data);
158
+ const res = (yield this.modelRequest(payload, options));
159
+ return this.normalizeStandardImageCompletion(res, this.modelName);
160
+ });
161
+ }
162
+ doStream(data, options) {
163
+ return __awaiter(this, void 0, void 0, function* () {
164
+ var _a, _b;
165
+ const nonStream = yield this.doGenerate(Object.assign({}, data), options);
166
+ const msg = ((_b = (_a = nonStream.choices) === null || _a === void 0 ? void 0 : _a[0]) === null || _b === void 0 ? void 0 : _b.message) || {};
167
+ const singleChunk = {
168
+ id: nonStream.id,
169
+ object: "chat.completion.chunk",
170
+ created: nonStream.created,
171
+ model: nonStream.model,
172
+ log_id: nonStream.log_id,
173
+ error: nonStream.error || "",
174
+ code: nonStream.code || 0,
175
+ choices: [
176
+ {
177
+ index: 0,
178
+ message: {
179
+ id: nonStream.id,
180
+ role: "assistant",
181
+ type: msg.type || "image",
182
+ content: msg.content || "",
183
+ reasoning_content: "",
184
+ },
185
+ finish_reason: "stop",
186
+ },
187
+ ],
188
+ usage: nonStream.usage,
189
+ };
190
+ const stream = new stream_1.ReadableStream({
191
+ start(controller) {
192
+ controller.enqueue(singleChunk);
193
+ controller.close();
194
+ },
195
+ });
196
+ return (0, stream_1.createAsyncIterable)(stream);
197
+ });
198
+ }
199
+ }
200
+ exports.Qwen25T2iPreviewModel = Qwen25T2iPreviewModel;
201
+ Qwen25T2iPreviewModel.BASE_URL = "https://dashscope.aliyuncs.com";
202
+ Qwen25T2iPreviewModel.SUB_GENERATION_URL = "api/v1/services/aigc/image2image/image-synthesis";
203
+ function mapUsageToStandard(usage) {
204
+ return {
205
+ prompt_tokens: 0,
206
+ completion_tokens: 0,
207
+ knowledge_tokens: 0,
208
+ reasoning_tokens: 0,
209
+ total_tokens: 0,
210
+ };
211
+ }
@@ -0,0 +1,25 @@
1
+ import type { IAbstractRequest } from "@vectorx/ai-types";
2
+ import type { BaseDoStreamOutputChunk, DoGenerateOutput, ModelRequestOptions, ReqOptions } from "../../../model-type";
3
+ import type { TokenManager } from "../../../tokenManager";
4
+ import type { QwenDocTurboApi } from "../types";
5
+ import type { QwenDocTurboAPIInput, QwenDocTurboResponse } from "../types";
6
+ export declare class DashScopeApi implements QwenDocTurboApi {
7
+ private req;
8
+ private baseUrl;
9
+ private subUrl;
10
+ private modelName;
11
+ private tokenManager?;
12
+ constructor(ctx: {
13
+ req: IAbstractRequest;
14
+ baseUrl: string;
15
+ subUrl: string;
16
+ modelName: string;
17
+ tokenManager?: TokenManager;
18
+ });
19
+ private createAuthHeaders;
20
+ private clamp;
21
+ buildPayload(data: ModelRequestOptions, stream: boolean): QwenDocTurboAPIInput;
22
+ request(payload: QwenDocTurboAPIInput, options?: ReqOptions): Promise<ReadableStream<Uint8Array> | unknown>;
23
+ normalizeResponse(response: QwenDocTurboResponse): DoGenerateOutput;
24
+ normalizeStream(_stream: ReadableStream<Uint8Array>): ReadableStream<BaseDoStreamOutputChunk>;
25
+ }
@@ -0,0 +1,179 @@
1
+ "use strict";
2
+ var __awaiter = (this && this.__awaiter) || function (thisArg, _arguments, P, generator) {
3
+ function adopt(value) { return value instanceof P ? value : new P(function (resolve) { resolve(value); }); }
4
+ return new (P || (P = Promise))(function (resolve, reject) {
5
+ function fulfilled(value) { try { step(generator.next(value)); } catch (e) { reject(e); } }
6
+ function rejected(value) { try { step(generator["throw"](value)); } catch (e) { reject(e); } }
7
+ function step(result) { result.done ? resolve(result.value) : adopt(result.value).then(fulfilled, rejected); }
8
+ step((generator = generator.apply(thisArg, _arguments || [])).next());
9
+ });
10
+ };
11
+ Object.defineProperty(exports, "__esModule", { value: true });
12
+ exports.DashScopeApi = void 0;
13
+ const model_type_1 = require("../../../model-type");
14
+ const stream_1 = require("../../../stream");
15
+ const utils_1 = require("../../../utils");
16
+ class DashScopeApi {
17
+ constructor(ctx) {
18
+ this.req = ctx.req;
19
+ this.baseUrl = ctx.baseUrl;
20
+ this.subUrl = ctx.subUrl;
21
+ this.modelName = ctx.modelName;
22
+ this.tokenManager = ctx.tokenManager;
23
+ }
24
+ createAuthHeaders() {
25
+ return __awaiter(this, arguments, void 0, function* (additionalHeaders = {}) {
26
+ if (!this.tokenManager)
27
+ throw new Error("TokenManager is not set");
28
+ const token = yield this.tokenManager.getValidToken();
29
+ return Object.assign({ "Content-Type": "application/json", Authorization: `Bearer ${token}` }, additionalHeaders);
30
+ });
31
+ }
32
+ clamp(value, min, max, def) {
33
+ return value !== undefined ? Math.max(min, Math.min(max, value)) : def;
34
+ }
35
+ buildPayload(data, stream) {
36
+ const messages = (data.messages || []).map((msg) => {
37
+ var _a;
38
+ const role = msg.role;
39
+ if (role === "system") {
40
+ return {
41
+ role,
42
+ content: Array.isArray(msg.content)
43
+ ? ((_a = msg.content.find((c) => c.type === "text")) === null || _a === void 0 ? void 0 : _a.text) || ""
44
+ : msg.content,
45
+ };
46
+ }
47
+ return {
48
+ role,
49
+ content: Array.isArray(msg.content) ? (0, model_type_1.filterContentByTypes)(msg.content, ["text", "doc_url"]) : msg.content,
50
+ };
51
+ });
52
+ const parameters = Object.assign({ stream, max_tokens: this.clamp(data.max_tokens, 1, 8000, 2000), temperature: this.clamp(data.temperature, 0, 2, 1.0), top_p: this.clamp(data.top_p, 0, 1, 1.0), frequency_penalty: this.clamp(data.frequency_penalty, -2, 2, 0.0), presence_penalty: this.clamp(data.presence_penalty, -2, 2, 0.0), n: this.clamp(data.n, 1, 10, 1), incremental_output: false }, ((data === null || data === void 0 ? void 0 : data.parameters) || {}));
53
+ return {
54
+ model: this.modelName,
55
+ input: { messages },
56
+ parameters,
57
+ };
58
+ }
59
+ request(payload, options) {
60
+ return __awaiter(this, void 0, void 0, function* () {
61
+ var _a, _b;
62
+ const headers = yield this.createAuthHeaders(options === null || options === void 0 ? void 0 : options.headers);
63
+ const isStreaming = (_b = (_a = payload.parameters) === null || _a === void 0 ? void 0 : _a.stream) !== null && _b !== void 0 ? _b : false;
64
+ if (isStreaming)
65
+ headers["X-DashScope-SSE"] = "enable";
66
+ const response = (yield this.req.fetch({
67
+ method: "post",
68
+ headers: Object.assign({}, headers),
69
+ body: JSON.stringify(payload),
70
+ url: `${this.baseUrl}/${this.subUrl}`,
71
+ stream: isStreaming,
72
+ }));
73
+ const { data, header } = response;
74
+ return (0, utils_1.handleResponseData)(data, header);
75
+ });
76
+ }
77
+ normalizeResponse(response) {
78
+ var _a, _b, _c, _d, _e;
79
+ const output = response.output || {};
80
+ const choice = (_a = output.choices) === null || _a === void 0 ? void 0 : _a[0];
81
+ const requestId = response.request_id || "";
82
+ const created = Math.floor(Date.now() / 1000);
83
+ const content = ((_b = choice === null || choice === void 0 ? void 0 : choice.message) === null || _b === void 0 ? void 0 : _b.content) || output.text || "";
84
+ return {
85
+ id: requestId,
86
+ object: "chat.completion",
87
+ created,
88
+ model: this.modelName,
89
+ log_id: requestId,
90
+ error: "",
91
+ code: 0,
92
+ choices: [
93
+ {
94
+ index: 0,
95
+ message: { id: requestId, role: "assistant", type: "answer", content, reasoning_content: "" },
96
+ finish_reason: (choice === null || choice === void 0 ? void 0 : choice.finish_reason) || "stop",
97
+ },
98
+ ],
99
+ usage: {
100
+ prompt_tokens: ((_c = response.usage) === null || _c === void 0 ? void 0 : _c.input_tokens) || 0,
101
+ completion_tokens: ((_d = response.usage) === null || _d === void 0 ? void 0 : _d.output_tokens) || 0,
102
+ knowledge_tokens: 0,
103
+ reasoning_tokens: 0,
104
+ total_tokens: ((_e = response.usage) === null || _e === void 0 ? void 0 : _e.total_tokens) || 0,
105
+ },
106
+ };
107
+ }
108
+ normalizeStream(_stream) {
109
+ const modelName = this.modelName;
110
+ const stream = (0, stream_1.toPolyfillReadable)(_stream);
111
+ const raw = (0, stream_1.intoStandardStream)(stream);
112
+ let previousContent = "";
113
+ return raw.pipeThrough(new stream_1.TransformStream({
114
+ transform(chunk, controller) {
115
+ var _a, _b, _c, _d, _e;
116
+ const requestId = chunk.request_id || "";
117
+ const created = Math.floor(Date.now() / 1000);
118
+ if ("code" in chunk && chunk.code !== undefined && !("output" in chunk)) {
119
+ const errorData = chunk;
120
+ controller.enqueue({
121
+ id: errorData.request_id || requestId,
122
+ object: "chat.completion.chunk",
123
+ created,
124
+ model: modelName,
125
+ log_id: errorData.request_id || requestId,
126
+ error: errorData.message || String(errorData.code),
127
+ code: typeof errorData.code === "string" ? -1 : errorData.code || -1,
128
+ choices: [],
129
+ usage: {
130
+ prompt_tokens: 0,
131
+ completion_tokens: 0,
132
+ knowledge_tokens: 0,
133
+ reasoning_tokens: 0,
134
+ total_tokens: 0,
135
+ },
136
+ });
137
+ return;
138
+ }
139
+ const output = chunk.output || {};
140
+ const choice = (_a = output.choices) === null || _a === void 0 ? void 0 : _a[0];
141
+ const fullContent = ((_b = choice === null || choice === void 0 ? void 0 : choice.message) === null || _b === void 0 ? void 0 : _b.content) || output.text || "";
142
+ const deltaContent = fullContent.slice(previousContent.length);
143
+ previousContent = fullContent;
144
+ if (!deltaContent && (choice === null || choice === void 0 ? void 0 : choice.finish_reason) !== "stop")
145
+ return;
146
+ controller.enqueue({
147
+ id: requestId,
148
+ object: "chat.completion.chunk",
149
+ created,
150
+ model: modelName,
151
+ log_id: requestId,
152
+ error: "",
153
+ code: 0,
154
+ choices: [
155
+ {
156
+ index: 0,
157
+ message: {
158
+ id: requestId,
159
+ role: "assistant",
160
+ type: "answer",
161
+ content: deltaContent,
162
+ reasoning_content: "",
163
+ },
164
+ finish_reason: (choice === null || choice === void 0 ? void 0 : choice.finish_reason) || null,
165
+ },
166
+ ],
167
+ usage: {
168
+ prompt_tokens: ((_c = chunk.usage) === null || _c === void 0 ? void 0 : _c.input_tokens) || 0,
169
+ completion_tokens: ((_d = chunk.usage) === null || _d === void 0 ? void 0 : _d.output_tokens) || 0,
170
+ knowledge_tokens: 0,
171
+ reasoning_tokens: 0,
172
+ total_tokens: ((_e = chunk.usage) === null || _e === void 0 ? void 0 : _e.total_tokens) || 0,
173
+ },
174
+ });
175
+ },
176
+ }));
177
+ }
178
+ }
179
+ exports.DashScopeApi = DashScopeApi;