@vectorx/ai-sdk 0.1.3 → 0.4.0

This diff represents the content of publicly available package versions that have been released to one of the supported registries. The information contained in this diff is provided for informational purposes only and reflects changes between package versions as they appear in their respective public registries.
Files changed (40) hide show
  1. package/{types → lib}/agent/index.d.ts +2 -2
  2. package/{types → lib}/ai.d.ts +7 -8
  3. package/lib/ai.js +20 -28
  4. package/{types → lib}/index.d.ts +3 -2
  5. package/lib/index.js +4 -3
  6. package/{types/types.d.ts → lib/model-type.d.ts} +62 -14
  7. package/lib/model-type.js +24 -0
  8. package/lib/models/Chat.d.ts +14 -0
  9. package/lib/models/Chat.js +36 -0
  10. package/lib/models/Default/index.d.ts +11 -0
  11. package/lib/models/{default.js → Default/index.js} +28 -11
  12. package/lib/models/QwenDocTurbo/index.d.ts +84 -0
  13. package/lib/models/QwenDocTurbo/index.js +178 -0
  14. package/lib/models/QwenImage/index.d.ts +81 -0
  15. package/lib/models/QwenImage/index.js +208 -0
  16. package/lib/models/QwenSketchToImage/index.d.ts +35 -0
  17. package/lib/models/QwenSketchToImage/index.js +155 -0
  18. package/lib/models/QwenStyleRepaintV1/index.d.ts +114 -0
  19. package/lib/models/QwenStyleRepaintV1/index.js +213 -0
  20. package/lib/models/QwenVlMax/index.d.ts +78 -0
  21. package/lib/models/QwenVlMax/index.js +121 -0
  22. package/lib/models/index.d.ts +50 -0
  23. package/lib/models/index.js +44 -4
  24. package/{types → lib}/models/react.d.ts +3 -2
  25. package/lib/models/react.js +3 -3
  26. package/{types → lib}/stream.d.ts +1 -8
  27. package/lib/tokenManager.d.ts +36 -0
  28. package/lib/tokenManager.js +89 -0
  29. package/lib/utils.js +2 -3
  30. package/package.json +4 -5
  31. package/lib/models/model-types.js +0 -6
  32. package/lib/types.js +0 -11
  33. package/types/models/default.d.ts +0 -13
  34. package/types/models/index.d.ts +0 -23
  35. package/types/models/model-types.d.ts +0 -131
  36. /package/{types → lib}/eventsource_parser/index.d.ts +0 -0
  37. /package/{types → lib}/eventsource_parser/parse.d.ts +0 -0
  38. /package/{types → lib}/eventsource_parser/stream.d.ts +0 -0
  39. /package/{types → lib}/eventsource_parser/types.d.ts +0 -0
  40. /package/{types → lib}/utils.d.ts +0 -0
@@ -0,0 +1,178 @@
1
+ "use strict";
2
+ var __awaiter = (this && this.__awaiter) || function (thisArg, _arguments, P, generator) {
3
+ function adopt(value) { return value instanceof P ? value : new P(function (resolve) { resolve(value); }); }
4
+ return new (P || (P = Promise))(function (resolve, reject) {
5
+ function fulfilled(value) { try { step(generator.next(value)); } catch (e) { reject(e); } }
6
+ function rejected(value) { try { step(generator["throw"](value)); } catch (e) { reject(e); } }
7
+ function step(result) { result.done ? resolve(result.value) : adopt(result.value).then(fulfilled, rejected); }
8
+ step((generator = generator.apply(thisArg, _arguments || [])).next());
9
+ });
10
+ };
11
+ Object.defineProperty(exports, "__esModule", { value: true });
12
+ exports.QwenDocTurbo = void 0;
13
+ const model_type_1 = require("../../model-type");
14
+ const stream_1 = require("../../stream");
15
+ const utils_1 = require("../../utils");
16
+ const Chat_1 = require("../Chat");
17
+ class QwenDocTurbo extends Chat_1.SimpleChatModel {
18
+ constructor(req, baseUrl, modelName, tokenManager) {
19
+ super(req, baseUrl, QwenDocTurbo.SUB_URL, tokenManager);
20
+ this.modelName = modelName;
21
+ }
22
+ modelRequest(data, options) {
23
+ return __awaiter(this, void 0, void 0, function* () {
24
+ var _a, _b;
25
+ const fetchHeaders = yield this.createAuthHeaders(options === null || options === void 0 ? void 0 : options.headers);
26
+ const isStreaming = (_b = (_a = data.parameters) === null || _a === void 0 ? void 0 : _a.stream) !== null && _b !== void 0 ? _b : false;
27
+ if (isStreaming) {
28
+ fetchHeaders["X-DashScope-SSE"] = "enable";
29
+ }
30
+ console.log("=== QwenDocTurbo Request ===", {
31
+ method: "post",
32
+ headers: Object.assign({}, fetchHeaders),
33
+ body: JSON.stringify(data),
34
+ url: `${this.baseUrl}/${this.subUrl}`,
35
+ stream: isStreaming,
36
+ });
37
+ const { data: responseData, header } = (yield this.req.fetch({
38
+ method: "post",
39
+ headers: Object.assign({}, fetchHeaders),
40
+ body: JSON.stringify(data),
41
+ url: `${this.baseUrl}/${this.subUrl}`,
42
+ stream: isStreaming,
43
+ }));
44
+ return (0, utils_1.handleResponseData)(responseData, header);
45
+ });
46
+ }
47
+ normalizeResponse(response) {
48
+ var _a, _b, _c, _d, _e;
49
+ const output = response.output || {};
50
+ const choice = (_a = output.choices) === null || _a === void 0 ? void 0 : _a[0];
51
+ const requestId = response.request_id || "";
52
+ const created = Math.floor(Date.now() / 1000);
53
+ const content = ((_b = choice === null || choice === void 0 ? void 0 : choice.message) === null || _b === void 0 ? void 0 : _b.content) || output.text || "";
54
+ return {
55
+ id: requestId,
56
+ object: "chat.completion",
57
+ created: created,
58
+ model: this.modelName,
59
+ log_id: requestId,
60
+ error: "",
61
+ code: 0,
62
+ choices: [
63
+ {
64
+ index: 0,
65
+ message: {
66
+ id: requestId,
67
+ role: "assistant",
68
+ type: "answer",
69
+ content: content,
70
+ reasoning_content: "",
71
+ },
72
+ finish_reason: (choice === null || choice === void 0 ? void 0 : choice.finish_reason) || "stop",
73
+ },
74
+ ],
75
+ usage: {
76
+ prompt_tokens: ((_c = response.usage) === null || _c === void 0 ? void 0 : _c.input_tokens) || 0,
77
+ completion_tokens: ((_d = response.usage) === null || _d === void 0 ? void 0 : _d.output_tokens) || 0,
78
+ knowledge_tokens: 0,
79
+ reasoning_tokens: 0,
80
+ total_tokens: ((_e = response.usage) === null || _e === void 0 ? void 0 : _e.total_tokens) || 0,
81
+ },
82
+ };
83
+ }
84
+ doGenerate(data, options) {
85
+ return __awaiter(this, void 0, void 0, function* () {
86
+ const qwenDocTurboData = this.convertToQwenDocTurboRequestOptions(data, false);
87
+ const res = yield this.modelRequest(qwenDocTurboData, options);
88
+ return this.normalizeResponse(res);
89
+ });
90
+ }
91
+ doStream(data, options) {
92
+ return __awaiter(this, void 0, void 0, function* () {
93
+ const qwenDocTurboData = this.convertToQwenDocTurboRequestOptions(data, true);
94
+ const _stream = (yield this.modelRequest(qwenDocTurboData, options));
95
+ const stream = (0, stream_1.toPolyfillReadable)(_stream);
96
+ const rawStream = (0, stream_1.intoStandardStream)(stream);
97
+ const standardStream = this.normalizeStreamChunks(rawStream);
98
+ return (0, stream_1.createAsyncIterable)(standardStream);
99
+ });
100
+ }
101
+ normalizeStreamChunks(stream) {
102
+ const modelName = this.modelName;
103
+ let previousContent = "";
104
+ return stream.pipeThrough(new stream_1.TransformStream({
105
+ transform(chunk, controller) {
106
+ var _a, _b, _c, _d, _e;
107
+ const output = chunk.output || {};
108
+ const choice = (_a = output.choices) === null || _a === void 0 ? void 0 : _a[0];
109
+ const requestId = chunk.request_id || "";
110
+ const created = Math.floor(Date.now() / 1000);
111
+ const fullContent = ((_b = choice === null || choice === void 0 ? void 0 : choice.message) === null || _b === void 0 ? void 0 : _b.content) || output.text || "";
112
+ const deltaContent = fullContent.slice(previousContent.length);
113
+ previousContent = fullContent;
114
+ if (!deltaContent && (choice === null || choice === void 0 ? void 0 : choice.finish_reason) !== "stop") {
115
+ return;
116
+ }
117
+ const standardChunk = {
118
+ id: requestId,
119
+ object: "chat.completion.chunk",
120
+ created: created,
121
+ model: modelName,
122
+ log_id: requestId,
123
+ error: "",
124
+ code: 0,
125
+ choices: [
126
+ {
127
+ index: 0,
128
+ message: {
129
+ id: requestId,
130
+ role: "assistant",
131
+ type: "answer",
132
+ content: deltaContent,
133
+ reasoning_content: "",
134
+ },
135
+ finish_reason: (choice === null || choice === void 0 ? void 0 : choice.finish_reason) || null,
136
+ },
137
+ ],
138
+ usage: {
139
+ prompt_tokens: ((_c = chunk.usage) === null || _c === void 0 ? void 0 : _c.input_tokens) || 0,
140
+ completion_tokens: ((_d = chunk.usage) === null || _d === void 0 ? void 0 : _d.output_tokens) || 0,
141
+ knowledge_tokens: 0,
142
+ reasoning_tokens: 0,
143
+ total_tokens: ((_e = chunk.usage) === null || _e === void 0 ? void 0 : _e.total_tokens) || 0,
144
+ },
145
+ };
146
+ controller.enqueue(standardChunk);
147
+ },
148
+ }));
149
+ }
150
+ convertToQwenDocTurboRequestOptions(data, stream) {
151
+ const clamp = (value, min, max, defaultValue) => value !== undefined ? Math.max(min, Math.min(max, value)) : defaultValue;
152
+ const messages = (data.messages || []).map((msg) => {
153
+ var _a;
154
+ const role = msg.role;
155
+ if (role === "system") {
156
+ return {
157
+ role,
158
+ content: Array.isArray(msg.content) ? ((_a = msg.content.find((c) => c.type === "text")) === null || _a === void 0 ? void 0 : _a.text) || "" : msg.content,
159
+ };
160
+ }
161
+ return {
162
+ role,
163
+ content: Array.isArray(msg.content) ? (0, model_type_1.filterContentByTypes)(msg.content, ["text", "doc_url"]) : msg.content,
164
+ };
165
+ });
166
+ const parameters = Object.assign({ stream, max_tokens: clamp(data.max_tokens, 1, 8000, 2000), temperature: clamp(data.temperature, 0, 2, 1.0), top_p: clamp(data.top_p, 0, 1, 1.0), frequency_penalty: clamp(data.frequency_penalty, -2, 2, 0.0), presence_penalty: clamp(data.presence_penalty, -2, 2, 0.0), n: clamp(data.n, 1, 10, 1), incremental_output: false }, ((data === null || data === void 0 ? void 0 : data.parameters) || {}));
167
+ return {
168
+ model: this.modelName,
169
+ input: {
170
+ messages,
171
+ },
172
+ parameters,
173
+ };
174
+ }
175
+ }
176
+ exports.QwenDocTurbo = QwenDocTurbo;
177
+ QwenDocTurbo.BASE_URL = "https://dashscope.aliyuncs.com";
178
+ QwenDocTurbo.SUB_URL = "api/v1/services/aigc/text-generation/generation";
@@ -0,0 +1,81 @@
1
+ import type { IAbstractRequest } from "@vectorx/ai-types";
2
+ import type { DoGenerateOutput, DoStreamOutput, ModelRequestOptions, ReqOptions } from "../../model-type";
3
+ import { TokenManager } from "../../tokenManager";
4
+ import { SimpleChatModel } from "../Chat";
5
+ export interface QwenImageParameters {
6
+ negative_prompt?: string;
7
+ size?: "1664*928" | "1472*1140" | "1328*1328" | "1140*1472" | "928*1664" | string;
8
+ n?: 1;
9
+ prompt_extend?: boolean;
10
+ watermark?: boolean;
11
+ }
12
+ export interface QwenImageAPIInput {
13
+ model: string;
14
+ input: {
15
+ messages?: Array<{
16
+ role: "user";
17
+ content: Array<{
18
+ text: string;
19
+ }>;
20
+ }>;
21
+ prompt?: string;
22
+ };
23
+ parameters?: QwenImageParameters;
24
+ }
25
+ export type QwenImageRequestOptions = QwenImageAPIInput & {
26
+ parameters?: QwenImageParameters;
27
+ };
28
+ export type QwenImageContentItem = {
29
+ image?: string;
30
+ url?: string;
31
+ image_url?: string | {
32
+ url: string;
33
+ };
34
+ b64_json?: string;
35
+ [key: string]: any;
36
+ };
37
+ export interface QwenImageAPIResponse {
38
+ async?: boolean;
39
+ output: {
40
+ choices?: Array<{
41
+ finish_reason: string;
42
+ message: {
43
+ role: "assistant" | "user";
44
+ content: QwenImageContentItem[];
45
+ };
46
+ }>;
47
+ task_status?: string;
48
+ task_id?: string;
49
+ task_metric?: {
50
+ TOTAL: number;
51
+ FAILED: number;
52
+ SUCCEEDED: number;
53
+ };
54
+ };
55
+ usage?: {
56
+ width?: number;
57
+ height?: number;
58
+ image_count?: number;
59
+ };
60
+ request_id?: string;
61
+ id?: string;
62
+ model?: string;
63
+ created?: number;
64
+ object?: string;
65
+ code?: number;
66
+ error?: string;
67
+ }
68
+ export declare class QwenImageModel extends SimpleChatModel {
69
+ static BASE_URL: string;
70
+ static SUB_SYNTHESIS_URL: string;
71
+ static SUB_GENERATION_URL: string;
72
+ modelName: string;
73
+ constructor(req: IAbstractRequest, baseUrl: string, tokenManager: TokenManager);
74
+ protected normalizeStandardImageCompletion(res: QwenImageAPIResponse, fallbackModel: string): DoGenerateOutput;
75
+ protected coverModelRequestToQwenInput(data: ModelRequestOptions & {
76
+ parameters?: QwenImageParameters;
77
+ }): QwenImageRequestOptions;
78
+ protected modelRequest(data: QwenImageRequestOptions, options?: ReqOptions): Promise<ReadableStream<Uint8Array> | Promise<unknown>>;
79
+ doGenerate(data: ModelRequestOptions, options?: ReqOptions): Promise<DoGenerateOutput>;
80
+ doStream(data: ModelRequestOptions, options?: ReqOptions): Promise<DoStreamOutput>;
81
+ }
@@ -0,0 +1,208 @@
1
+ "use strict";
2
+ var __awaiter = (this && this.__awaiter) || function (thisArg, _arguments, P, generator) {
3
+ function adopt(value) { return value instanceof P ? value : new P(function (resolve) { resolve(value); }); }
4
+ return new (P || (P = Promise))(function (resolve, reject) {
5
+ function fulfilled(value) { try { step(generator.next(value)); } catch (e) { reject(e); } }
6
+ function rejected(value) { try { step(generator["throw"](value)); } catch (e) { reject(e); } }
7
+ function step(result) { result.done ? resolve(result.value) : adopt(result.value).then(fulfilled, rejected); }
8
+ step((generator = generator.apply(thisArg, _arguments || [])).next());
9
+ });
10
+ };
11
+ Object.defineProperty(exports, "__esModule", { value: true });
12
+ exports.QwenImageModel = void 0;
13
+ const stream_1 = require("../../stream");
14
+ const utils_1 = require("../../utils");
15
+ const Chat_1 = require("../Chat");
16
+ const index_1 = require("../index");
17
+ class QwenImageModel extends Chat_1.SimpleChatModel {
18
+ constructor(req, baseUrl, tokenManager) {
19
+ super(req, baseUrl, QwenImageModel.SUB_GENERATION_URL, tokenManager);
20
+ this.modelName = index_1.modelName[index_1.MultiModalModelName.QwenImage];
21
+ }
22
+ normalizeStandardImageCompletion(res, fallbackModel) {
23
+ var _a, _b, _c, _d, _e, _f, _g, _h, _j, _k, _l, _m, _o, _p, _q;
24
+ const qOutput = (res === null || res === void 0 ? void 0 : res.output) || {};
25
+ if ((qOutput === null || qOutput === void 0 ? void 0 : qOutput.task_status) && (qOutput === null || qOutput === void 0 ? void 0 : qOutput.task_id)) {
26
+ const created = (_a = res === null || res === void 0 ? void 0 : res.created) !== null && _a !== void 0 ? _a : Math.floor(Date.now() / 1000);
27
+ const id = (res === null || res === void 0 ? void 0 : res.id) || (res === null || res === void 0 ? void 0 : res.request_id) || "";
28
+ const normalized = {
29
+ id,
30
+ object: (_b = res === null || res === void 0 ? void 0 : res.object) !== null && _b !== void 0 ? _b : "chat.completion",
31
+ created,
32
+ model: (_c = res === null || res === void 0 ? void 0 : res.model) !== null && _c !== void 0 ? _c : fallbackModel,
33
+ log_id: id,
34
+ error: (_d = res === null || res === void 0 ? void 0 : res.error) !== null && _d !== void 0 ? _d : "",
35
+ code: (_e = res === null || res === void 0 ? void 0 : res.code) !== null && _e !== void 0 ? _e : 0,
36
+ choices: [
37
+ {
38
+ index: 0,
39
+ message: {
40
+ id,
41
+ role: "assistant",
42
+ type: "async_task",
43
+ content: JSON.stringify(Object.assign(Object.assign({}, ((res === null || res === void 0 ? void 0 : res.output) || {})), { request_id: (res === null || res === void 0 ? void 0 : res.request_id) || id })),
44
+ reasoning_content: "",
45
+ },
46
+ finish_reason: "stop",
47
+ },
48
+ ],
49
+ usage: mapUsageToStandard(res === null || res === void 0 ? void 0 : res.usage),
50
+ };
51
+ return normalized;
52
+ }
53
+ const first = ((_g = (_f = qOutput === null || qOutput === void 0 ? void 0 : qOutput.choices) === null || _f === void 0 ? void 0 : _f[0]) !== null && _g !== void 0 ? _g : null);
54
+ const message = (_h = first === null || first === void 0 ? void 0 : first.message) !== null && _h !== void 0 ? _h : {};
55
+ const contentUrl = Array.isArray(message === null || message === void 0 ? void 0 : message.content) && ((_j = message.content[0]) === null || _j === void 0 ? void 0 : _j.image) ? String(message.content[0].image) : "";
56
+ const created = (_k = res === null || res === void 0 ? void 0 : res.created) !== null && _k !== void 0 ? _k : Math.floor(Date.now() / 1000);
57
+ const id = (res === null || res === void 0 ? void 0 : res.id) || (res === null || res === void 0 ? void 0 : res.request_id) || "";
58
+ const normalized = {
59
+ id,
60
+ object: (_l = res === null || res === void 0 ? void 0 : res.object) !== null && _l !== void 0 ? _l : "chat.completion",
61
+ created,
62
+ model: (_m = res === null || res === void 0 ? void 0 : res.model) !== null && _m !== void 0 ? _m : fallbackModel,
63
+ log_id: id,
64
+ error: (_o = res === null || res === void 0 ? void 0 : res.error) !== null && _o !== void 0 ? _o : "",
65
+ code: (_p = res === null || res === void 0 ? void 0 : res.code) !== null && _p !== void 0 ? _p : 0,
66
+ choices: [
67
+ {
68
+ index: 0,
69
+ message: {
70
+ id,
71
+ role: "assistant",
72
+ type: "image",
73
+ content: contentUrl || "",
74
+ reasoning_content: "",
75
+ },
76
+ finish_reason: (_q = first === null || first === void 0 ? void 0 : first.finish_reason) !== null && _q !== void 0 ? _q : "stop",
77
+ },
78
+ ],
79
+ usage: mapUsageToStandard(res === null || res === void 0 ? void 0 : res.usage),
80
+ };
81
+ return normalized;
82
+ }
83
+ coverModelRequestToQwenInput(data) {
84
+ var _a;
85
+ let text = "";
86
+ const messages = data.messages || data.history || [];
87
+ if (Array.isArray(messages) && messages.length > 0) {
88
+ const firstUser = (_a = messages.find((m) => (m === null || m === void 0 ? void 0 : m.role) === "user")) !== null && _a !== void 0 ? _a : messages[0];
89
+ const c = firstUser === null || firstUser === void 0 ? void 0 : firstUser.content;
90
+ if (typeof c === "string" && c.trim()) {
91
+ text = c.trim();
92
+ }
93
+ else if (Array.isArray(c)) {
94
+ for (const p of c) {
95
+ if ((p === null || p === void 0 ? void 0 : p.type) === "text" && typeof p.text === "string" && p.text.trim()) {
96
+ text = p.text.trim();
97
+ break;
98
+ }
99
+ }
100
+ }
101
+ }
102
+ if (!text && data.msg)
103
+ text = String(data.msg);
104
+ if (!text)
105
+ throw new Error("QwenImage 需要提供一个 text 提示词");
106
+ const isAsync = !!(data === null || data === void 0 ? void 0 : data.async);
107
+ return {
108
+ parameters: data.parameters,
109
+ model: this.modelName,
110
+ input: isAsync
111
+ ? {
112
+ prompt: text,
113
+ }
114
+ : {
115
+ messages: [
116
+ {
117
+ role: "user",
118
+ content: [{ text }],
119
+ },
120
+ ],
121
+ },
122
+ };
123
+ }
124
+ modelRequest(data_1) {
125
+ return __awaiter(this, arguments, void 0, function* (data, options = { timeout: 30 * 1000 }) {
126
+ const fetchHeaders = yield this.createAuthHeaders(options === null || options === void 0 ? void 0 : options.headers);
127
+ if (this.subUrl === QwenImageModel.SUB_SYNTHESIS_URL) {
128
+ fetchHeaders["X-DashScope-Async"] = "enable";
129
+ }
130
+ const joinedUrl = `${String(this.baseUrl).replace(/\/+$/, "")}/${String(this.subUrl).replace(/^\/+/, "")}`;
131
+ const { data: responseData, header } = (yield this.req.fetch({
132
+ url: joinedUrl,
133
+ headers: Object.assign({}, fetchHeaders),
134
+ body: JSON.stringify(data),
135
+ method: "post",
136
+ stream: false,
137
+ }));
138
+ return (0, utils_1.handleResponseData)(responseData, header);
139
+ });
140
+ }
141
+ doGenerate(data, options) {
142
+ return __awaiter(this, void 0, void 0, function* () {
143
+ data.model = this.modelName;
144
+ if (!Object.prototype.hasOwnProperty.call(data, "async")) {
145
+ data.async = true;
146
+ }
147
+ if (data.async) {
148
+ this.subUrl = QwenImageModel.SUB_SYNTHESIS_URL;
149
+ }
150
+ else {
151
+ this.subUrl = QwenImageModel.SUB_GENERATION_URL;
152
+ }
153
+ const payload = this.coverModelRequestToQwenInput(data);
154
+ const res = (yield this.modelRequest(payload, options));
155
+ return this.normalizeStandardImageCompletion(res, this.modelName);
156
+ });
157
+ }
158
+ doStream(data, options) {
159
+ return __awaiter(this, void 0, void 0, function* () {
160
+ var _a, _b;
161
+ const nonStream = yield this.doGenerate(Object.assign({}, data), options);
162
+ const msg = ((_b = (_a = nonStream.choices) === null || _a === void 0 ? void 0 : _a[0]) === null || _b === void 0 ? void 0 : _b.message) || {};
163
+ const singleChunk = {
164
+ id: nonStream.id,
165
+ object: "chat.completion.chunk",
166
+ created: nonStream.created,
167
+ model: nonStream.model,
168
+ log_id: nonStream.log_id,
169
+ error: nonStream.error || "",
170
+ code: nonStream.code || 0,
171
+ choices: [
172
+ {
173
+ index: 0,
174
+ message: {
175
+ id: nonStream.id,
176
+ role: "assistant",
177
+ type: msg.type || "image",
178
+ content: msg.content || "",
179
+ reasoning_content: "",
180
+ },
181
+ finish_reason: "stop",
182
+ },
183
+ ],
184
+ usage: nonStream.usage,
185
+ };
186
+ const stream = new stream_1.ReadableStream({
187
+ start(controller) {
188
+ controller.enqueue(singleChunk);
189
+ controller.close();
190
+ },
191
+ });
192
+ return (0, stream_1.createAsyncIterable)(stream);
193
+ });
194
+ }
195
+ }
196
+ exports.QwenImageModel = QwenImageModel;
197
+ QwenImageModel.BASE_URL = "https://dashscope.aliyuncs.com";
198
+ QwenImageModel.SUB_SYNTHESIS_URL = "api/v1/services/aigc/text2image/image-synthesis";
199
+ QwenImageModel.SUB_GENERATION_URL = "api/v1/services/aigc/multimodal-generation/generation";
200
+ function mapUsageToStandard(usage) {
201
+ return {
202
+ prompt_tokens: 0,
203
+ completion_tokens: 0,
204
+ knowledge_tokens: 0,
205
+ reasoning_tokens: 0,
206
+ total_tokens: 0,
207
+ };
208
+ }
@@ -0,0 +1,35 @@
1
+ import type { IAbstractRequest } from "@vectorx/ai-types";
2
+ import type { DoGenerateOutput, DoStreamOutput, ModelRequestOptions, ReqOptions } from "../../model-type";
3
+ import { TokenManager } from "../../tokenManager";
4
+ import { SimpleChatModel } from "../Chat";
5
+ export interface WanxSketchToImageLiteParameters {
6
+ size?: "768*768" | "720*1280" | "1280*720" | string;
7
+ n?: number;
8
+ sketch_weight?: number;
9
+ style?: "<auto>" | "<3d cartoon>" | "<anime>" | "<oil painting>" | "<watercolor>" | "<sketch>" | "<chinese painting>" | "<flat illustration>" | string;
10
+ sketch_extraction?: boolean;
11
+ sketch_color?: number[][];
12
+ }
13
+ export interface WanxSketchToImageLiteAPIInput {
14
+ model: string;
15
+ input: {
16
+ sketch_image_url: string;
17
+ prompt: string;
18
+ };
19
+ parameters?: WanxSketchToImageLiteParameters;
20
+ }
21
+ export type WanxSketchToImageLiteRequestOptions = WanxSketchToImageLiteAPIInput & {
22
+ parameters?: WanxSketchToImageLiteParameters;
23
+ };
24
+ export declare class WanxSketchToImageLiteModel extends SimpleChatModel {
25
+ static BASE_URL: string;
26
+ static SUB_URL: string;
27
+ modelName: string;
28
+ constructor(req: IAbstractRequest, baseUrl: string, tokenManager: TokenManager);
29
+ protected convertToSketchToImageInput(data: ModelRequestOptions & {
30
+ parameters?: WanxSketchToImageLiteParameters;
31
+ }): WanxSketchToImageLiteRequestOptions;
32
+ protected modelRequest(data: WanxSketchToImageLiteRequestOptions, options?: ReqOptions): Promise<ReadableStream<Uint8Array> | Promise<unknown>>;
33
+ doGenerate(data: ModelRequestOptions, options?: ReqOptions): Promise<DoGenerateOutput>;
34
+ doStream(data: ModelRequestOptions, options?: ReqOptions): Promise<DoStreamOutput>;
35
+ }
@@ -0,0 +1,155 @@
1
+ "use strict";
2
+ var __awaiter = (this && this.__awaiter) || function (thisArg, _arguments, P, generator) {
3
+ function adopt(value) { return value instanceof P ? value : new P(function (resolve) { resolve(value); }); }
4
+ return new (P || (P = Promise))(function (resolve, reject) {
5
+ function fulfilled(value) { try { step(generator.next(value)); } catch (e) { reject(e); } }
6
+ function rejected(value) { try { step(generator["throw"](value)); } catch (e) { reject(e); } }
7
+ function step(result) { result.done ? resolve(result.value) : adopt(result.value).then(fulfilled, rejected); }
8
+ step((generator = generator.apply(thisArg, _arguments || [])).next());
9
+ });
10
+ };
11
+ Object.defineProperty(exports, "__esModule", { value: true });
12
+ exports.WanxSketchToImageLiteModel = void 0;
13
+ const stream_1 = require("../../stream");
14
+ const utils_1 = require("../../utils");
15
+ const Chat_1 = require("../Chat");
16
+ const index_1 = require("../index");
17
+ class WanxSketchToImageLiteModel extends Chat_1.SimpleChatModel {
18
+ constructor(req, baseUrl, tokenManager) {
19
+ super(req, baseUrl, WanxSketchToImageLiteModel.SUB_URL, tokenManager);
20
+ this.modelName = index_1.modelName[index_1.MultiModalModelName.WanxSketchToImageLite];
21
+ }
22
+ convertToSketchToImageInput(data) {
23
+ var _a;
24
+ const messages = data.messages || [];
25
+ if (!Array.isArray(messages) || messages.length === 0) {
26
+ throw new Error("wanx-sketch-to-image-lite 请求需要提供 messages");
27
+ }
28
+ const userMessage = messages.find((m) => m.role === "user");
29
+ if (!userMessage || !Array.isArray(userMessage.content)) {
30
+ throw new Error("wanx-sketch-to-image-lite 请求需要提供 user 消息");
31
+ }
32
+ const imageContent = userMessage.content.find((c) => c.type === "image_url");
33
+ if (!((_a = imageContent === null || imageContent === void 0 ? void 0 : imageContent.image_url) === null || _a === void 0 ? void 0 : _a.url)) {
34
+ throw new Error("wanx-sketch-to-image-lite 请求需要提供草图图片 URL");
35
+ }
36
+ const sketchImageUrl = imageContent.image_url.url;
37
+ const textContent = userMessage.content.find((c) => c.type === "text");
38
+ if (!(textContent === null || textContent === void 0 ? void 0 : textContent.text)) {
39
+ throw new Error("wanx-sketch-to-image-lite 请求需要提供文本提示词");
40
+ }
41
+ const prompt = textContent.text;
42
+ const modelParams = data.parameters || {};
43
+ const parameters = Object.assign({ size: "768*768", n: 4, sketch_weight: 10, style: "<auto>", sketch_extraction: false, sketch_color: [] }, modelParams);
44
+ if (parameters.n < 1 || parameters.n > 4) {
45
+ throw new Error("生成图片数量 n 必须在 1-4 之间");
46
+ }
47
+ if (parameters.sketch_weight < 0 || parameters.sketch_weight > 10) {
48
+ throw new Error("草图权重 sketch_weight 必须在 0-10 之间");
49
+ }
50
+ return {
51
+ model: this.modelName,
52
+ input: {
53
+ sketch_image_url: sketchImageUrl,
54
+ prompt: prompt,
55
+ },
56
+ parameters,
57
+ };
58
+ }
59
+ modelRequest(data_1) {
60
+ return __awaiter(this, arguments, void 0, function* (data, options = {}) {
61
+ const fetchHeaders = yield this.createAuthHeaders(options === null || options === void 0 ? void 0 : options.headers);
62
+ const { data: responseData, header } = (yield this.req.fetch({
63
+ method: "post",
64
+ headers: Object.assign(Object.assign({}, fetchHeaders), ((options === null || options === void 0 ? void 0 : options.headers) || {})),
65
+ body: JSON.stringify(data),
66
+ url: `${this.baseUrl}/${this.subUrl}`,
67
+ stream: false,
68
+ }));
69
+ return (0, utils_1.handleResponseData)(responseData, header);
70
+ });
71
+ }
72
+ doGenerate(data, options) {
73
+ return __awaiter(this, void 0, void 0, function* () {
74
+ var _a, _b, _c, _d, _e, _f, _g, _h, _j, _k, _l, _m, _o, _p, _q, _r, _s, _t;
75
+ data.model = this.modelName;
76
+ const body = this.convertToSketchToImageInput(data);
77
+ const headers = Object.assign(Object.assign({}, ((options === null || options === void 0 ? void 0 : options.headers) || {})), { "X-DashScope-Async": "enable" });
78
+ const res = (yield this.modelRequest(body, { headers }));
79
+ const normalized = {
80
+ id: (_b = (_a = res === null || res === void 0 ? void 0 : res.request_id) !== null && _a !== void 0 ? _a : res === null || res === void 0 ? void 0 : res.id) !== null && _b !== void 0 ? _b : "",
81
+ object: (_c = res === null || res === void 0 ? void 0 : res.object) !== null && _c !== void 0 ? _c : "chat.completion",
82
+ created: Math.floor(Date.now() / 1000),
83
+ model: this.modelName,
84
+ log_id: (_e = (_d = res === null || res === void 0 ? void 0 : res.request_id) !== null && _d !== void 0 ? _d : res === null || res === void 0 ? void 0 : res.id) !== null && _e !== void 0 ? _e : "",
85
+ error: "",
86
+ code: Number((_f = res === null || res === void 0 ? void 0 : res.status_code) !== null && _f !== void 0 ? _f : 0) || 0,
87
+ choices: [
88
+ {
89
+ index: 0,
90
+ message: {
91
+ id: (_h = (_g = res === null || res === void 0 ? void 0 : res.request_id) !== null && _g !== void 0 ? _g : res === null || res === void 0 ? void 0 : res.id) !== null && _h !== void 0 ? _h : "",
92
+ role: "assistant",
93
+ type: "async_task",
94
+ content: JSON.stringify({
95
+ task_id: (_j = res === null || res === void 0 ? void 0 : res.output) === null || _j === void 0 ? void 0 : _j.task_id,
96
+ task_status: (_k = res === null || res === void 0 ? void 0 : res.output) === null || _k === void 0 ? void 0 : _k.task_status,
97
+ results: (_m = (_l = res === null || res === void 0 ? void 0 : res.output) === null || _l === void 0 ? void 0 : _l.results) !== null && _m !== void 0 ? _m : [],
98
+ }),
99
+ reasoning_content: "",
100
+ },
101
+ finish_reason: "stop",
102
+ },
103
+ ],
104
+ usage: {
105
+ prompt_tokens: (_p = (_o = res === null || res === void 0 ? void 0 : res.usage) === null || _o === void 0 ? void 0 : _o.prompt_tokens) !== null && _p !== void 0 ? _p : 0,
106
+ completion_tokens: (_r = (_q = res === null || res === void 0 ? void 0 : res.usage) === null || _q === void 0 ? void 0 : _q.completion_tokens) !== null && _r !== void 0 ? _r : 0,
107
+ total_tokens: (_t = (_s = res === null || res === void 0 ? void 0 : res.usage) === null || _s === void 0 ? void 0 : _s.total_tokens) !== null && _t !== void 0 ? _t : 0,
108
+ knowledge_tokens: 0,
109
+ reasoning_tokens: 0,
110
+ },
111
+ };
112
+ return normalized;
113
+ });
114
+ }
115
+ doStream(data, options) {
116
+ return __awaiter(this, void 0, void 0, function* () {
117
+ var _a, _b;
118
+ const nonStream = yield this.doGenerate(data, options);
119
+ const msg = ((_b = (_a = nonStream.choices) === null || _a === void 0 ? void 0 : _a[0]) === null || _b === void 0 ? void 0 : _b.message) || {};
120
+ const singleChunk = {
121
+ id: nonStream.id,
122
+ object: "chat.completion.chunk",
123
+ created: nonStream.created,
124
+ model: nonStream.model,
125
+ log_id: nonStream.log_id,
126
+ error: nonStream.error || "",
127
+ code: nonStream.code || 0,
128
+ choices: [
129
+ {
130
+ index: 0,
131
+ message: {
132
+ id: nonStream.id,
133
+ role: "assistant",
134
+ type: msg.type || "async_task",
135
+ content: msg.content || "",
136
+ reasoning_content: "",
137
+ },
138
+ finish_reason: "stop",
139
+ },
140
+ ],
141
+ usage: nonStream.usage,
142
+ };
143
+ const stream = new ReadableStream({
144
+ start(controller) {
145
+ controller.enqueue(singleChunk);
146
+ controller.close();
147
+ },
148
+ });
149
+ return (0, stream_1.createAsyncIterable)(stream);
150
+ });
151
+ }
152
+ }
153
+ exports.WanxSketchToImageLiteModel = WanxSketchToImageLiteModel;
154
+ WanxSketchToImageLiteModel.BASE_URL = "https://dashscope.aliyuncs.com";
155
+ WanxSketchToImageLiteModel.SUB_URL = "api/v1/services/aigc/image2image/image-synthesis";