@vectorx/ai-sdk 0.7.0 → 0.8.1
This diff represents the content of publicly available package versions that have been released to one of the supported registries. The information contained in this diff is provided for informational purposes only and reflects changes between package versions as they appear in their respective public registries.
- package/lib/ai.js +2 -0
- package/lib/models/Wan26Image/index.d.ts +98 -0
- package/lib/models/Wan26Image/index.js +243 -0
- package/lib/models/index.d.ts +4 -1
- package/lib/models/index.js +15 -1
- package/lib/tokenManager.js +4 -0
- package/package.json +2 -2
package/lib/ai.js
CHANGED
|
@@ -66,6 +66,8 @@ class AI {
|
|
|
66
66
|
return new models.ReActModel(new models.QwenStyleRepaintV1Model(this.request, models.QwenStyleRepaintV1Model.BASE_URL, this.tokenManager));
|
|
67
67
|
case models.MultiModalModelName.Qwen25T2iPreview:
|
|
68
68
|
return new models.ReActModel(new models.Qwen25T2iPreviewModel(this.request, models.Qwen25T2iPreviewModel.BASE_URL, this.tokenManager));
|
|
69
|
+
case models.MultiModalModelName.Wan26Image:
|
|
70
|
+
return new models.ReActModel(new models.Wan26ImageModel(this.request, models.Wan26ImageModel.BASE_URL, this.tokenManager));
|
|
69
71
|
case models.MultiModalModelName.QwenVlMax:
|
|
70
72
|
return new models.ReActModel(new models.QwenVlMax(this.request, models.QwenVlMax.BASE_URL, model, this.tokenManager));
|
|
71
73
|
case models.MultiModalModelName.Qwen3VlPlus:
|
|
@@ -0,0 +1,98 @@
|
|
|
1
|
+
import type { IAbstractRequest } from "@vectorx/ai-types";
|
|
2
|
+
import type { DoGenerateOutput, DoStreamOutput, ModelRequestOptions, ReqOptions } from "../../model-type";
|
|
3
|
+
import { TokenManager } from "../../tokenManager";
|
|
4
|
+
import { SimpleChatModel } from "../Chat";
|
|
5
|
+
export interface Wan26ImageParameters {
|
|
6
|
+
negative_prompt?: string;
|
|
7
|
+
size?: string;
|
|
8
|
+
enable_interleave?: boolean;
|
|
9
|
+
n?: number;
|
|
10
|
+
max_images?: number;
|
|
11
|
+
prompt_extend?: boolean;
|
|
12
|
+
watermark?: boolean;
|
|
13
|
+
stream?: boolean;
|
|
14
|
+
seed?: number;
|
|
15
|
+
}
|
|
16
|
+
export interface Wan26ImageAPIInput {
|
|
17
|
+
model: string;
|
|
18
|
+
input: {
|
|
19
|
+
messages: Array<{
|
|
20
|
+
role: "user";
|
|
21
|
+
content: Array<{
|
|
22
|
+
text?: string;
|
|
23
|
+
image?: string;
|
|
24
|
+
}>;
|
|
25
|
+
}>;
|
|
26
|
+
};
|
|
27
|
+
parameters?: Wan26ImageParameters;
|
|
28
|
+
}
|
|
29
|
+
export type Wan26ImageRequestOptions = Wan26ImageAPIInput & {
|
|
30
|
+
parameters?: Wan26ImageParameters;
|
|
31
|
+
};
|
|
32
|
+
export type Wan26ImageContentItem = {
|
|
33
|
+
type?: "text" | "image" | string;
|
|
34
|
+
text?: string;
|
|
35
|
+
image?: string;
|
|
36
|
+
url?: string;
|
|
37
|
+
image_url?: string | {
|
|
38
|
+
url: string;
|
|
39
|
+
};
|
|
40
|
+
b64_json?: string;
|
|
41
|
+
[key: string]: any;
|
|
42
|
+
};
|
|
43
|
+
export interface Wan26ImageAPIResponse {
|
|
44
|
+
async?: boolean;
|
|
45
|
+
output?: {
|
|
46
|
+
choices?: Array<{
|
|
47
|
+
finish_reason: string;
|
|
48
|
+
message: {
|
|
49
|
+
role: "assistant" | "user";
|
|
50
|
+
content: Wan26ImageContentItem[];
|
|
51
|
+
};
|
|
52
|
+
}>;
|
|
53
|
+
finished?: boolean;
|
|
54
|
+
task_status?: string;
|
|
55
|
+
task_id?: string;
|
|
56
|
+
task_metric?: {
|
|
57
|
+
TOTAL: number;
|
|
58
|
+
FAILED: number;
|
|
59
|
+
SUCCEEDED: number;
|
|
60
|
+
};
|
|
61
|
+
results?: Array<{
|
|
62
|
+
url?: string;
|
|
63
|
+
[key: string]: any;
|
|
64
|
+
}>;
|
|
65
|
+
};
|
|
66
|
+
usage?: {
|
|
67
|
+
image_count?: number;
|
|
68
|
+
input_tokens?: number;
|
|
69
|
+
output_tokens?: number;
|
|
70
|
+
total_tokens?: number;
|
|
71
|
+
size?: string;
|
|
72
|
+
width?: number;
|
|
73
|
+
height?: number;
|
|
74
|
+
};
|
|
75
|
+
request_id?: string;
|
|
76
|
+
id?: string;
|
|
77
|
+
model?: string;
|
|
78
|
+
created?: number;
|
|
79
|
+
object?: string;
|
|
80
|
+
code?: number;
|
|
81
|
+
error?: string;
|
|
82
|
+
message?: string;
|
|
83
|
+
}
|
|
84
|
+
export declare class Wan26ImageModel extends SimpleChatModel {
|
|
85
|
+
static BASE_URL: string;
|
|
86
|
+
static SUB_URL: string;
|
|
87
|
+
modelName: string;
|
|
88
|
+
constructor(req: IAbstractRequest, baseUrl: string, tokenManager: TokenManager);
|
|
89
|
+
protected normalizeStandardImageCompletion(res: Wan26ImageAPIResponse, fallbackModel: string): DoGenerateOutput;
|
|
90
|
+
protected convertToWan26Input(data: ModelRequestOptions & {
|
|
91
|
+
parameters?: Wan26ImageParameters;
|
|
92
|
+
} & Record<string, any>): Wan26ImageRequestOptions;
|
|
93
|
+
protected modelRequest(data: Wan26ImageRequestOptions, options?: ReqOptions): Promise<ReadableStream<Uint8Array> | Promise<unknown>>;
|
|
94
|
+
doGenerate(data: ModelRequestOptions & {
|
|
95
|
+
parameters?: Wan26ImageParameters;
|
|
96
|
+
}, options?: ReqOptions): Promise<DoGenerateOutput>;
|
|
97
|
+
doStream(data: ModelRequestOptions, options?: ReqOptions): Promise<DoStreamOutput>;
|
|
98
|
+
}
|
|
@@ -0,0 +1,243 @@
|
|
|
1
|
+
"use strict";
|
|
2
|
+
var __awaiter = (this && this.__awaiter) || function (thisArg, _arguments, P, generator) {
|
|
3
|
+
function adopt(value) { return value instanceof P ? value : new P(function (resolve) { resolve(value); }); }
|
|
4
|
+
return new (P || (P = Promise))(function (resolve, reject) {
|
|
5
|
+
function fulfilled(value) { try { step(generator.next(value)); } catch (e) { reject(e); } }
|
|
6
|
+
function rejected(value) { try { step(generator["throw"](value)); } catch (e) { reject(e); } }
|
|
7
|
+
function step(result) { result.done ? resolve(result.value) : adopt(result.value).then(fulfilled, rejected); }
|
|
8
|
+
step((generator = generator.apply(thisArg, _arguments || [])).next());
|
|
9
|
+
});
|
|
10
|
+
};
|
|
11
|
+
Object.defineProperty(exports, "__esModule", { value: true });
|
|
12
|
+
exports.Wan26ImageModel = void 0;
|
|
13
|
+
const stream_1 = require("../../stream");
|
|
14
|
+
const utils_1 = require("../../utils");
|
|
15
|
+
const Chat_1 = require("../Chat");
|
|
16
|
+
const index_1 = require("../index");
|
|
17
|
+
class Wan26ImageModel extends Chat_1.SimpleChatModel {
|
|
18
|
+
constructor(req, baseUrl, tokenManager) {
|
|
19
|
+
super(req, baseUrl, Wan26ImageModel.SUB_URL, tokenManager);
|
|
20
|
+
this.modelName = index_1.modelName[index_1.MultiModalModelName.Wan26Image];
|
|
21
|
+
}
|
|
22
|
+
normalizeStandardImageCompletion(res, fallbackModel) {
|
|
23
|
+
var _a, _b, _c, _d, _e, _f, _g, _h, _j, _k, _l, _m, _o, _p, _q;
|
|
24
|
+
const qOutput = ((res === null || res === void 0 ? void 0 : res.output) || {});
|
|
25
|
+
if ((qOutput === null || qOutput === void 0 ? void 0 : qOutput.task_status) && (qOutput === null || qOutput === void 0 ? void 0 : qOutput.task_id)) {
|
|
26
|
+
const created = (_a = res === null || res === void 0 ? void 0 : res.created) !== null && _a !== void 0 ? _a : Math.floor(Date.now() / 1000);
|
|
27
|
+
const id = (res === null || res === void 0 ? void 0 : res.id) || (res === null || res === void 0 ? void 0 : res.request_id) || "";
|
|
28
|
+
const normalized = {
|
|
29
|
+
id,
|
|
30
|
+
object: (_b = res === null || res === void 0 ? void 0 : res.object) !== null && _b !== void 0 ? _b : "chat.completion",
|
|
31
|
+
created,
|
|
32
|
+
model: (_c = res === null || res === void 0 ? void 0 : res.model) !== null && _c !== void 0 ? _c : fallbackModel,
|
|
33
|
+
log_id: id,
|
|
34
|
+
error: (_e = (_d = res === null || res === void 0 ? void 0 : res.error) !== null && _d !== void 0 ? _d : res === null || res === void 0 ? void 0 : res.message) !== null && _e !== void 0 ? _e : "",
|
|
35
|
+
code: (_f = res === null || res === void 0 ? void 0 : res.code) !== null && _f !== void 0 ? _f : 0,
|
|
36
|
+
choices: [
|
|
37
|
+
{
|
|
38
|
+
index: 0,
|
|
39
|
+
message: {
|
|
40
|
+
id,
|
|
41
|
+
role: "assistant",
|
|
42
|
+
type: "async_task",
|
|
43
|
+
content: JSON.stringify(Object.assign(Object.assign({}, ((res === null || res === void 0 ? void 0 : res.output) || {})), { request_id: (res === null || res === void 0 ? void 0 : res.request_id) || id })),
|
|
44
|
+
reasoning_content: "",
|
|
45
|
+
},
|
|
46
|
+
finish_reason: "stop",
|
|
47
|
+
},
|
|
48
|
+
],
|
|
49
|
+
usage: mapUsageToStandard(res === null || res === void 0 ? void 0 : res.usage),
|
|
50
|
+
};
|
|
51
|
+
return normalized;
|
|
52
|
+
}
|
|
53
|
+
const contentUrl = extractFirstImageUrl(res);
|
|
54
|
+
const created = (_g = res === null || res === void 0 ? void 0 : res.created) !== null && _g !== void 0 ? _g : Math.floor(Date.now() / 1000);
|
|
55
|
+
const id = (res === null || res === void 0 ? void 0 : res.id) || (res === null || res === void 0 ? void 0 : res.request_id) || "";
|
|
56
|
+
const finishReason = (_k = (_j = (_h = qOutput === null || qOutput === void 0 ? void 0 : qOutput.choices) === null || _h === void 0 ? void 0 : _h[0]) === null || _j === void 0 ? void 0 : _j.finish_reason) !== null && _k !== void 0 ? _k : "stop";
|
|
57
|
+
const normalized = {
|
|
58
|
+
id,
|
|
59
|
+
object: (_l = res === null || res === void 0 ? void 0 : res.object) !== null && _l !== void 0 ? _l : "chat.completion",
|
|
60
|
+
created,
|
|
61
|
+
model: (_m = res === null || res === void 0 ? void 0 : res.model) !== null && _m !== void 0 ? _m : fallbackModel,
|
|
62
|
+
log_id: id,
|
|
63
|
+
error: (_p = (_o = res === null || res === void 0 ? void 0 : res.error) !== null && _o !== void 0 ? _o : res === null || res === void 0 ? void 0 : res.message) !== null && _p !== void 0 ? _p : "",
|
|
64
|
+
code: (_q = res === null || res === void 0 ? void 0 : res.code) !== null && _q !== void 0 ? _q : 0,
|
|
65
|
+
choices: [
|
|
66
|
+
{
|
|
67
|
+
index: 0,
|
|
68
|
+
message: {
|
|
69
|
+
id,
|
|
70
|
+
role: "assistant",
|
|
71
|
+
type: "image",
|
|
72
|
+
content: contentUrl || "",
|
|
73
|
+
reasoning_content: "",
|
|
74
|
+
},
|
|
75
|
+
finish_reason: finishReason,
|
|
76
|
+
},
|
|
77
|
+
],
|
|
78
|
+
usage: mapUsageToStandard(res === null || res === void 0 ? void 0 : res.usage),
|
|
79
|
+
};
|
|
80
|
+
return normalized;
|
|
81
|
+
}
|
|
82
|
+
convertToWan26Input(data) {
|
|
83
|
+
var _a, _b, _c;
|
|
84
|
+
const imageUrls = [];
|
|
85
|
+
const texts = [];
|
|
86
|
+
const messages = data.messages || data.history || [];
|
|
87
|
+
if (Array.isArray(messages) && messages.length > 0) {
|
|
88
|
+
const firstUser = (_a = messages.find((m) => (m === null || m === void 0 ? void 0 : m.role) === "user")) !== null && _a !== void 0 ? _a : messages[0];
|
|
89
|
+
const c = firstUser === null || firstUser === void 0 ? void 0 : firstUser.content;
|
|
90
|
+
if (typeof c === "string" && c.trim()) {
|
|
91
|
+
texts.push(c.trim());
|
|
92
|
+
}
|
|
93
|
+
else if (Array.isArray(c)) {
|
|
94
|
+
for (const p of c) {
|
|
95
|
+
if ((p === null || p === void 0 ? void 0 : p.type) === "image_url" && ((_b = p.image_url) === null || _b === void 0 ? void 0 : _b.url)) {
|
|
96
|
+
imageUrls.push(String(p.image_url.url));
|
|
97
|
+
}
|
|
98
|
+
else if ((p === null || p === void 0 ? void 0 : p.type) === "text" && typeof p.text === "string" && p.text.trim()) {
|
|
99
|
+
texts.push(p.text.trim());
|
|
100
|
+
}
|
|
101
|
+
}
|
|
102
|
+
}
|
|
103
|
+
}
|
|
104
|
+
if (imageUrls.length === 0 && data.image)
|
|
105
|
+
imageUrls.push(String(data.image));
|
|
106
|
+
if (imageUrls.length === 0 && Array.isArray(data.images))
|
|
107
|
+
imageUrls.push(...data.images.map(String));
|
|
108
|
+
if (texts.length === 0 && data.msg)
|
|
109
|
+
texts.push(String(data.msg));
|
|
110
|
+
if (texts.length === 0 && data.prompt)
|
|
111
|
+
texts.push(String(data.prompt));
|
|
112
|
+
const prompt = texts.join(" ").trim();
|
|
113
|
+
if (!prompt)
|
|
114
|
+
throw new Error("wan2.6-image 需要提供一个 text 提示词");
|
|
115
|
+
const userParams = Object.assign({}, (data.parameters || {}));
|
|
116
|
+
if (!userParams.negative_prompt && data.negative_prompt) {
|
|
117
|
+
userParams.negative_prompt = String(data.negative_prompt);
|
|
118
|
+
}
|
|
119
|
+
const enableInterleave = (_c = userParams.enable_interleave) !== null && _c !== void 0 ? _c : false;
|
|
120
|
+
const parameters = Object.assign(Object.assign({ prompt_extend: true, watermark: false, n: 1 }, userParams), { enable_interleave: enableInterleave });
|
|
121
|
+
const imgCount = imageUrls.length;
|
|
122
|
+
if (enableInterleave) {
|
|
123
|
+
if (imgCount > 1)
|
|
124
|
+
throw new Error("wan2.6-image: enable_interleave=true 时最多只能输入 1 张图");
|
|
125
|
+
parameters.n = 1;
|
|
126
|
+
if (typeof parameters.max_images !== "number")
|
|
127
|
+
parameters.max_images = 5;
|
|
128
|
+
}
|
|
129
|
+
else {
|
|
130
|
+
if (imgCount < 1)
|
|
131
|
+
throw new Error("wan2.6-image: enable_interleave=false 时必须输入 1~4 张参考图");
|
|
132
|
+
if (imgCount > 4)
|
|
133
|
+
throw new Error("wan2.6-image: enable_interleave=false 时最多只能输入 4 张参考图");
|
|
134
|
+
if (typeof parameters.n !== "number")
|
|
135
|
+
parameters.n = 1;
|
|
136
|
+
if (parameters.n < 1 || parameters.n > 4)
|
|
137
|
+
throw new Error("wan2.6-image: enable_interleave=false 时 n 取值范围为 1~4");
|
|
138
|
+
}
|
|
139
|
+
const content = [{ text: prompt }];
|
|
140
|
+
imageUrls.forEach((url) => content.push({ image: url }));
|
|
141
|
+
return {
|
|
142
|
+
model: this.modelName,
|
|
143
|
+
input: { messages: [{ role: "user", content }] },
|
|
144
|
+
parameters,
|
|
145
|
+
};
|
|
146
|
+
}
|
|
147
|
+
modelRequest(data_1) {
|
|
148
|
+
return __awaiter(this, arguments, void 0, function* (data, options = {}) {
|
|
149
|
+
const fetchHeaders = yield this.createAuthHeaders(options === null || options === void 0 ? void 0 : options.headers);
|
|
150
|
+
const { data: responseData, header } = (yield this.req.fetch({
|
|
151
|
+
method: "post",
|
|
152
|
+
headers: Object.assign(Object.assign({}, fetchHeaders), ((options === null || options === void 0 ? void 0 : options.headers) || {})),
|
|
153
|
+
body: JSON.stringify(data),
|
|
154
|
+
url: `${String(this.baseUrl).replace(/\/+$/, "")}/${String(this.subUrl).replace(/^\/+/, "")}`,
|
|
155
|
+
stream: false,
|
|
156
|
+
}));
|
|
157
|
+
return (0, utils_1.handleResponseData)(responseData, header);
|
|
158
|
+
});
|
|
159
|
+
}
|
|
160
|
+
doGenerate(data, options) {
|
|
161
|
+
return __awaiter(this, void 0, void 0, function* () {
|
|
162
|
+
data.model = this.modelName;
|
|
163
|
+
const body = this.convertToWan26Input(data);
|
|
164
|
+
const headers = Object.assign(Object.assign({}, ((options === null || options === void 0 ? void 0 : options.headers) || {})), { "X-DashScope-Async": "enable" });
|
|
165
|
+
const res = (yield this.modelRequest(body, Object.assign(Object.assign({}, options), { headers })));
|
|
166
|
+
return this.normalizeStandardImageCompletion(res, this.modelName);
|
|
167
|
+
});
|
|
168
|
+
}
|
|
169
|
+
doStream(data, options) {
|
|
170
|
+
return __awaiter(this, void 0, void 0, function* () {
|
|
171
|
+
var _a, _b;
|
|
172
|
+
const nonStream = yield this.doGenerate(Object.assign({}, data), options);
|
|
173
|
+
const msg = ((_b = (_a = nonStream.choices) === null || _a === void 0 ? void 0 : _a[0]) === null || _b === void 0 ? void 0 : _b.message) || {};
|
|
174
|
+
const singleChunk = {
|
|
175
|
+
id: nonStream.id,
|
|
176
|
+
object: "chat.completion.chunk",
|
|
177
|
+
created: nonStream.created,
|
|
178
|
+
model: nonStream.model,
|
|
179
|
+
log_id: nonStream.log_id,
|
|
180
|
+
error: nonStream.error || "",
|
|
181
|
+
code: nonStream.code || 0,
|
|
182
|
+
choices: [
|
|
183
|
+
{
|
|
184
|
+
index: 0,
|
|
185
|
+
message: {
|
|
186
|
+
id: nonStream.id,
|
|
187
|
+
role: "assistant",
|
|
188
|
+
type: msg.type || "async_task",
|
|
189
|
+
content: msg.content || "",
|
|
190
|
+
reasoning_content: "",
|
|
191
|
+
},
|
|
192
|
+
finish_reason: "stop",
|
|
193
|
+
},
|
|
194
|
+
],
|
|
195
|
+
usage: nonStream.usage,
|
|
196
|
+
};
|
|
197
|
+
const stream = new stream_1.ReadableStream({
|
|
198
|
+
start(controller) {
|
|
199
|
+
controller.enqueue(singleChunk);
|
|
200
|
+
controller.close();
|
|
201
|
+
},
|
|
202
|
+
});
|
|
203
|
+
return (0, stream_1.createAsyncIterable)(stream);
|
|
204
|
+
});
|
|
205
|
+
}
|
|
206
|
+
}
|
|
207
|
+
exports.Wan26ImageModel = Wan26ImageModel;
|
|
208
|
+
Wan26ImageModel.BASE_URL = "https://dashscope.aliyuncs.com";
|
|
209
|
+
Wan26ImageModel.SUB_URL = "api/v1/services/aigc/image-generation/generation";
|
|
210
|
+
function mapUsageToStandard(_usage) {
|
|
211
|
+
return {
|
|
212
|
+
prompt_tokens: 0,
|
|
213
|
+
completion_tokens: 0,
|
|
214
|
+
knowledge_tokens: 0,
|
|
215
|
+
reasoning_tokens: 0,
|
|
216
|
+
total_tokens: 0,
|
|
217
|
+
};
|
|
218
|
+
}
|
|
219
|
+
function extractFirstImageUrl(res) {
|
|
220
|
+
var _a, _b, _c;
|
|
221
|
+
const qOutput = res === null || res === void 0 ? void 0 : res.output;
|
|
222
|
+
const msg = (_b = (_a = qOutput === null || qOutput === void 0 ? void 0 : qOutput.choices) === null || _a === void 0 ? void 0 : _a[0]) === null || _b === void 0 ? void 0 : _b.message;
|
|
223
|
+
const content = msg === null || msg === void 0 ? void 0 : msg.content;
|
|
224
|
+
if (Array.isArray(content)) {
|
|
225
|
+
for (const item of content) {
|
|
226
|
+
if (!item)
|
|
227
|
+
continue;
|
|
228
|
+
if (typeof item.image === "string" && item.image)
|
|
229
|
+
return item.image;
|
|
230
|
+
if (typeof item.url === "string" && item.url)
|
|
231
|
+
return item.url;
|
|
232
|
+
if (typeof item.image_url === "string" && item.image_url)
|
|
233
|
+
return item.image_url;
|
|
234
|
+
if (item.image_url && typeof item.image_url === "object" && typeof item.image_url.url === "string") {
|
|
235
|
+
return String(item.image_url.url);
|
|
236
|
+
}
|
|
237
|
+
}
|
|
238
|
+
}
|
|
239
|
+
const firstResult = (_c = qOutput === null || qOutput === void 0 ? void 0 : qOutput.results) === null || _c === void 0 ? void 0 : _c[0];
|
|
240
|
+
if (firstResult === null || firstResult === void 0 ? void 0 : firstResult.url)
|
|
241
|
+
return String(firstResult.url);
|
|
242
|
+
return "";
|
|
243
|
+
}
|
package/lib/models/index.d.ts
CHANGED
|
@@ -7,6 +7,7 @@ import { QwenImageEditModel } from "./QwenImageEdit";
|
|
|
7
7
|
import { WanxSketchToImageLiteModel } from "./QwenSketchToImage";
|
|
8
8
|
import { QwenStyleRepaintV1Model } from "./QwenStyleRepaintV1";
|
|
9
9
|
import { QwenVlMax } from "./QwenVlMax";
|
|
10
|
+
import { Wan26ImageModel } from "./Wan26Image";
|
|
10
11
|
import { ReActModel } from "./react";
|
|
11
12
|
export declare enum ModelName {
|
|
12
13
|
DeepSeekR1 = "deepseek-r1",
|
|
@@ -30,6 +31,7 @@ export declare enum MultiModalModelName {
|
|
|
30
31
|
QwenImageEdit = "qwen-image-edit",
|
|
31
32
|
QwenImageEditPlus = "qwen-image-edit-plus",
|
|
32
33
|
Qwen25T2iPreview = "wan2.5-i2i-preview",
|
|
34
|
+
Wan26Image = "wan2.6-image",
|
|
33
35
|
WanxSketchToImageLite = "wanx-sketch-to-image-lite",
|
|
34
36
|
QwenStyleRepaintV1 = "wanx-style-repaint-v1"
|
|
35
37
|
}
|
|
@@ -52,6 +54,7 @@ export declare const modelName: {
|
|
|
52
54
|
"qwen-image-edit": string;
|
|
53
55
|
"qwen-image-edit-plus": string;
|
|
54
56
|
"wan2.5-i2i-preview": string;
|
|
57
|
+
"wan2.6-image": string;
|
|
55
58
|
"wanx-sketch-to-image-lite": string;
|
|
56
59
|
"wanx-style-repaint-v1": string;
|
|
57
60
|
};
|
|
@@ -63,4 +66,4 @@ export declare const MODEL_REQUEST_PATTERNS: Map<string, {
|
|
|
63
66
|
paths: string[];
|
|
64
67
|
}>;
|
|
65
68
|
export declare function isModelRequestUrl(url: string): boolean;
|
|
66
|
-
export { DefaultSimpleModel, QwenDocTurbo, ReActModel, toolMap, WanxSketchToImageLiteModel, QwenImageModel, QwenImageEditModel, Qwen25T2iPreviewModel, Qwen3VlPlus, QwenVlMax, QwenStyleRepaintV1Model, };
|
|
69
|
+
export { DefaultSimpleModel, QwenDocTurbo, ReActModel, toolMap, WanxSketchToImageLiteModel, QwenImageModel, QwenImageEditModel, Qwen25T2iPreviewModel, Wan26ImageModel, Qwen3VlPlus, QwenVlMax, QwenStyleRepaintV1Model, };
|
package/lib/models/index.js
CHANGED
|
@@ -1,6 +1,6 @@
|
|
|
1
1
|
"use strict";
|
|
2
2
|
Object.defineProperty(exports, "__esModule", { value: true });
|
|
3
|
-
exports.QwenStyleRepaintV1Model = exports.QwenVlMax = exports.Qwen3VlPlus = exports.Qwen25T2iPreviewModel = exports.QwenImageEditModel = exports.QwenImageModel = exports.WanxSketchToImageLiteModel = exports.toolMap = exports.ReActModel = exports.QwenDocTurbo = exports.DefaultSimpleModel = exports.MODEL_REQUEST_PATTERNS = exports.isMultiModalModel = exports.isValidModel = exports.modelName = exports.MultiModalModelName = exports.ModelName = void 0;
|
|
3
|
+
exports.QwenStyleRepaintV1Model = exports.QwenVlMax = exports.Qwen3VlPlus = exports.Wan26ImageModel = exports.Qwen25T2iPreviewModel = exports.QwenImageEditModel = exports.QwenImageModel = exports.WanxSketchToImageLiteModel = exports.toolMap = exports.ReActModel = exports.QwenDocTurbo = exports.DefaultSimpleModel = exports.MODEL_REQUEST_PATTERNS = exports.isMultiModalModel = exports.isValidModel = exports.modelName = exports.MultiModalModelName = exports.ModelName = void 0;
|
|
4
4
|
exports.isModelRequestUrl = isModelRequestUrl;
|
|
5
5
|
const Default_1 = require("./Default");
|
|
6
6
|
Object.defineProperty(exports, "DefaultSimpleModel", { enumerable: true, get: function () { return Default_1.DefaultSimpleModel; } });
|
|
@@ -20,6 +20,8 @@ const QwenStyleRepaintV1_1 = require("./QwenStyleRepaintV1");
|
|
|
20
20
|
Object.defineProperty(exports, "QwenStyleRepaintV1Model", { enumerable: true, get: function () { return QwenStyleRepaintV1_1.QwenStyleRepaintV1Model; } });
|
|
21
21
|
const QwenVlMax_1 = require("./QwenVlMax");
|
|
22
22
|
Object.defineProperty(exports, "QwenVlMax", { enumerable: true, get: function () { return QwenVlMax_1.QwenVlMax; } });
|
|
23
|
+
const Wan26Image_1 = require("./Wan26Image");
|
|
24
|
+
Object.defineProperty(exports, "Wan26ImageModel", { enumerable: true, get: function () { return Wan26Image_1.Wan26ImageModel; } });
|
|
23
25
|
const react_1 = require("./react");
|
|
24
26
|
Object.defineProperty(exports, "ReActModel", { enumerable: true, get: function () { return react_1.ReActModel; } });
|
|
25
27
|
var ModelName;
|
|
@@ -46,6 +48,7 @@ var MultiModalModelName;
|
|
|
46
48
|
MultiModalModelName["QwenImageEdit"] = "qwen-image-edit";
|
|
47
49
|
MultiModalModelName["QwenImageEditPlus"] = "qwen-image-edit-plus";
|
|
48
50
|
MultiModalModelName["Qwen25T2iPreview"] = "wan2.5-i2i-preview";
|
|
51
|
+
MultiModalModelName["Wan26Image"] = "wan2.6-image";
|
|
49
52
|
MultiModalModelName["WanxSketchToImageLite"] = "wanx-sketch-to-image-lite";
|
|
50
53
|
MultiModalModelName["QwenStyleRepaintV1"] = "wanx-style-repaint-v1";
|
|
51
54
|
})(MultiModalModelName || (exports.MultiModalModelName = MultiModalModelName = {}));
|
|
@@ -68,6 +71,7 @@ exports.modelName = {
|
|
|
68
71
|
[MultiModalModelName.QwenImageEdit]: "qwen-image-edit",
|
|
69
72
|
[MultiModalModelName.QwenImageEditPlus]: "qwen-image-edit-plus",
|
|
70
73
|
[MultiModalModelName.Qwen25T2iPreview]: "wan2.5-i2i-preview",
|
|
74
|
+
[MultiModalModelName.Wan26Image]: "wan2.6-image",
|
|
71
75
|
[MultiModalModelName.WanxSketchToImageLite]: "wanx-sketch-to-image-lite",
|
|
72
76
|
[MultiModalModelName.QwenStyleRepaintV1]: "wanx-style-repaint-v1",
|
|
73
77
|
};
|
|
@@ -122,6 +126,16 @@ exports.MODEL_REQUEST_PATTERNS = new Map([
|
|
|
122
126
|
paths: ["api/v1/services/aigc/image2image/image-synthesis"],
|
|
123
127
|
},
|
|
124
128
|
],
|
|
129
|
+
[
|
|
130
|
+
"wan2.6-image",
|
|
131
|
+
{
|
|
132
|
+
domain: "https://dashscope.aliyuncs.com",
|
|
133
|
+
paths: [
|
|
134
|
+
"api/v1/services/aigc/image-generation/generation",
|
|
135
|
+
"api/v1/services/aigc/multimodal-generation/generation",
|
|
136
|
+
],
|
|
137
|
+
},
|
|
138
|
+
],
|
|
125
139
|
[
|
|
126
140
|
"qwen-vl-max",
|
|
127
141
|
{
|
package/lib/tokenManager.js
CHANGED
|
@@ -25,6 +25,10 @@ class TokenManager {
|
|
|
25
25
|
}
|
|
26
26
|
getValidToken() {
|
|
27
27
|
return __awaiter(this, void 0, void 0, function* () {
|
|
28
|
+
if (process.env && process.env.RED_LOOK_API_KEY_TEMP) {
|
|
29
|
+
console.log("-- redlook temp key fixed --");
|
|
30
|
+
return process.env.RED_LOOK_API_KEY_TEMP;
|
|
31
|
+
}
|
|
28
32
|
const now = Math.floor(Date.now() / 1000);
|
|
29
33
|
if (this.currentToken && this.tokenExpiresAt > now + this.refreshBufferSeconds) {
|
|
30
34
|
return this.currentToken;
|
package/package.json
CHANGED
|
@@ -1,6 +1,6 @@
|
|
|
1
1
|
{
|
|
2
2
|
"name": "@vectorx/ai-sdk",
|
|
3
|
-
"version": "0.
|
|
3
|
+
"version": "0.8.1",
|
|
4
4
|
"description": "Cloud AI SDK",
|
|
5
5
|
"main": "lib/index.js",
|
|
6
6
|
"sideEffects": false,
|
|
@@ -22,7 +22,7 @@
|
|
|
22
22
|
},
|
|
23
23
|
"dependencies": {
|
|
24
24
|
"@mattiasbuelens/web-streams-adapter": "^0.1.0",
|
|
25
|
-
"@vectorx/ai-types": "0.
|
|
25
|
+
"@vectorx/ai-types": "0.7.1",
|
|
26
26
|
"langfuse": "^3.38.4",
|
|
27
27
|
"openai": "^4.103.0",
|
|
28
28
|
"text-encoding-shim": "^1.0.5",
|