@vectorx/ai-sdk 0.0.0-beta-20251112071234
This diff represents the content of publicly available package versions that have been released to one of the supported registries. The information contained in this diff is provided for informational purposes only and reflects changes between package versions as they appear in their respective public registries.
- package/README.md +1 -0
- package/lib/agent/index.d.ts +17 -0
- package/lib/agent/index.js +69 -0
- package/lib/ai.d.ts +16 -0
- package/lib/ai.js +90 -0
- package/lib/eventsource_parser/index.d.ts +2 -0
- package/lib/eventsource_parser/index.js +5 -0
- package/lib/eventsource_parser/parse.d.ts +2 -0
- package/lib/eventsource_parser/parse.js +124 -0
- package/lib/eventsource_parser/stream.d.ts +5 -0
- package/lib/eventsource_parser/stream.js +22 -0
- package/lib/eventsource_parser/types.d.ts +16 -0
- package/lib/eventsource_parser/types.js +2 -0
- package/lib/index.d.ts +14 -0
- package/lib/index.js +56 -0
- package/lib/model-type.d.ts +207 -0
- package/lib/model-type.js +24 -0
- package/lib/models/Chat.d.ts +14 -0
- package/lib/models/Chat.js +36 -0
- package/lib/models/Default/index.d.ts +11 -0
- package/lib/models/Default/index.js +68 -0
- package/lib/models/Qwen25T2iPreview/index.d.ts +76 -0
- package/lib/models/Qwen25T2iPreview/index.js +211 -0
- package/lib/models/QwenDocTurbo/adapters/DashScope.d.ts +25 -0
- package/lib/models/QwenDocTurbo/adapters/DashScope.js +179 -0
- package/lib/models/QwenDocTurbo/adapters/OpenAICompat.d.ts +24 -0
- package/lib/models/QwenDocTurbo/adapters/OpenAICompat.js +143 -0
- package/lib/models/QwenDocTurbo/index.d.ts +16 -0
- package/lib/models/QwenDocTurbo/index.js +86 -0
- package/lib/models/QwenDocTurbo/types.d.ts +124 -0
- package/lib/models/QwenDocTurbo/types.js +2 -0
- package/lib/models/QwenImage/index.d.ts +81 -0
- package/lib/models/QwenImage/index.js +208 -0
- package/lib/models/QwenImageEdit/index.d.ts +77 -0
- package/lib/models/QwenImageEdit/index.js +205 -0
- package/lib/models/QwenSketchToImage/index.d.ts +35 -0
- package/lib/models/QwenSketchToImage/index.js +155 -0
- package/lib/models/QwenStyleRepaintV1/index.d.ts +114 -0
- package/lib/models/QwenStyleRepaintV1/index.js +213 -0
- package/lib/models/QwenVlMax/index.d.ts +78 -0
- package/lib/models/QwenVlMax/index.js +121 -0
- package/lib/models/index.d.ts +56 -0
- package/lib/models/index.js +77 -0
- package/lib/models/react.d.ts +8 -0
- package/lib/models/react.js +28 -0
- package/lib/stream.d.ts +47 -0
- package/lib/stream.js +138 -0
- package/lib/tokenManager.d.ts +36 -0
- package/lib/tokenManager.js +89 -0
- package/lib/utils.d.ts +1 -0
- package/lib/utils.js +54 -0
- package/package.json +49 -0
|
@@ -0,0 +1,208 @@
|
|
|
1
|
+
"use strict";
|
|
2
|
+
var __awaiter = (this && this.__awaiter) || function (thisArg, _arguments, P, generator) {
|
|
3
|
+
function adopt(value) { return value instanceof P ? value : new P(function (resolve) { resolve(value); }); }
|
|
4
|
+
return new (P || (P = Promise))(function (resolve, reject) {
|
|
5
|
+
function fulfilled(value) { try { step(generator.next(value)); } catch (e) { reject(e); } }
|
|
6
|
+
function rejected(value) { try { step(generator["throw"](value)); } catch (e) { reject(e); } }
|
|
7
|
+
function step(result) { result.done ? resolve(result.value) : adopt(result.value).then(fulfilled, rejected); }
|
|
8
|
+
step((generator = generator.apply(thisArg, _arguments || [])).next());
|
|
9
|
+
});
|
|
10
|
+
};
|
|
11
|
+
Object.defineProperty(exports, "__esModule", { value: true });
|
|
12
|
+
exports.QwenImageModel = void 0;
|
|
13
|
+
const stream_1 = require("../../stream");
|
|
14
|
+
const utils_1 = require("../../utils");
|
|
15
|
+
const Chat_1 = require("../Chat");
|
|
16
|
+
const index_1 = require("../index");
|
|
17
|
+
class QwenImageModel extends Chat_1.SimpleChatModel {
|
|
18
|
+
constructor(req, baseUrl, tokenManager) {
|
|
19
|
+
super(req, baseUrl, QwenImageModel.SUB_GENERATION_URL, tokenManager);
|
|
20
|
+
this.modelName = index_1.modelName[index_1.MultiModalModelName.QwenImage];
|
|
21
|
+
}
|
|
22
|
+
normalizeStandardImageCompletion(res, fallbackModel) {
|
|
23
|
+
var _a, _b, _c, _d, _e, _f, _g, _h, _j, _k, _l, _m, _o, _p, _q;
|
|
24
|
+
const qOutput = (res === null || res === void 0 ? void 0 : res.output) || {};
|
|
25
|
+
if ((qOutput === null || qOutput === void 0 ? void 0 : qOutput.task_status) && (qOutput === null || qOutput === void 0 ? void 0 : qOutput.task_id)) {
|
|
26
|
+
const created = (_a = res === null || res === void 0 ? void 0 : res.created) !== null && _a !== void 0 ? _a : Math.floor(Date.now() / 1000);
|
|
27
|
+
const id = (res === null || res === void 0 ? void 0 : res.id) || (res === null || res === void 0 ? void 0 : res.request_id) || "";
|
|
28
|
+
const normalized = {
|
|
29
|
+
id,
|
|
30
|
+
object: (_b = res === null || res === void 0 ? void 0 : res.object) !== null && _b !== void 0 ? _b : "chat.completion",
|
|
31
|
+
created,
|
|
32
|
+
model: (_c = res === null || res === void 0 ? void 0 : res.model) !== null && _c !== void 0 ? _c : fallbackModel,
|
|
33
|
+
log_id: id,
|
|
34
|
+
error: (_d = res === null || res === void 0 ? void 0 : res.error) !== null && _d !== void 0 ? _d : "",
|
|
35
|
+
code: (_e = res === null || res === void 0 ? void 0 : res.code) !== null && _e !== void 0 ? _e : 0,
|
|
36
|
+
choices: [
|
|
37
|
+
{
|
|
38
|
+
index: 0,
|
|
39
|
+
message: {
|
|
40
|
+
id,
|
|
41
|
+
role: "assistant",
|
|
42
|
+
type: "async_task",
|
|
43
|
+
content: JSON.stringify(Object.assign(Object.assign({}, ((res === null || res === void 0 ? void 0 : res.output) || {})), { request_id: (res === null || res === void 0 ? void 0 : res.request_id) || id })),
|
|
44
|
+
reasoning_content: "",
|
|
45
|
+
},
|
|
46
|
+
finish_reason: "stop",
|
|
47
|
+
},
|
|
48
|
+
],
|
|
49
|
+
usage: mapUsageToStandard(res === null || res === void 0 ? void 0 : res.usage),
|
|
50
|
+
};
|
|
51
|
+
return normalized;
|
|
52
|
+
}
|
|
53
|
+
const first = ((_g = (_f = qOutput === null || qOutput === void 0 ? void 0 : qOutput.choices) === null || _f === void 0 ? void 0 : _f[0]) !== null && _g !== void 0 ? _g : null);
|
|
54
|
+
const message = (_h = first === null || first === void 0 ? void 0 : first.message) !== null && _h !== void 0 ? _h : {};
|
|
55
|
+
const contentUrl = Array.isArray(message === null || message === void 0 ? void 0 : message.content) && ((_j = message.content[0]) === null || _j === void 0 ? void 0 : _j.image) ? String(message.content[0].image) : "";
|
|
56
|
+
const created = (_k = res === null || res === void 0 ? void 0 : res.created) !== null && _k !== void 0 ? _k : Math.floor(Date.now() / 1000);
|
|
57
|
+
const id = (res === null || res === void 0 ? void 0 : res.id) || (res === null || res === void 0 ? void 0 : res.request_id) || "";
|
|
58
|
+
const normalized = {
|
|
59
|
+
id,
|
|
60
|
+
object: (_l = res === null || res === void 0 ? void 0 : res.object) !== null && _l !== void 0 ? _l : "chat.completion",
|
|
61
|
+
created,
|
|
62
|
+
model: (_m = res === null || res === void 0 ? void 0 : res.model) !== null && _m !== void 0 ? _m : fallbackModel,
|
|
63
|
+
log_id: id,
|
|
64
|
+
error: (_o = res === null || res === void 0 ? void 0 : res.error) !== null && _o !== void 0 ? _o : "",
|
|
65
|
+
code: (_p = res === null || res === void 0 ? void 0 : res.code) !== null && _p !== void 0 ? _p : 0,
|
|
66
|
+
choices: [
|
|
67
|
+
{
|
|
68
|
+
index: 0,
|
|
69
|
+
message: {
|
|
70
|
+
id,
|
|
71
|
+
role: "assistant",
|
|
72
|
+
type: "image",
|
|
73
|
+
content: contentUrl || "",
|
|
74
|
+
reasoning_content: "",
|
|
75
|
+
},
|
|
76
|
+
finish_reason: (_q = first === null || first === void 0 ? void 0 : first.finish_reason) !== null && _q !== void 0 ? _q : "stop",
|
|
77
|
+
},
|
|
78
|
+
],
|
|
79
|
+
usage: mapUsageToStandard(res === null || res === void 0 ? void 0 : res.usage),
|
|
80
|
+
};
|
|
81
|
+
return normalized;
|
|
82
|
+
}
|
|
83
|
+
coverModelRequestToQwenInput(data) {
|
|
84
|
+
var _a;
|
|
85
|
+
let text = "";
|
|
86
|
+
const messages = data.messages || data.history || [];
|
|
87
|
+
if (Array.isArray(messages) && messages.length > 0) {
|
|
88
|
+
const firstUser = (_a = messages.find((m) => (m === null || m === void 0 ? void 0 : m.role) === "user")) !== null && _a !== void 0 ? _a : messages[0];
|
|
89
|
+
const c = firstUser === null || firstUser === void 0 ? void 0 : firstUser.content;
|
|
90
|
+
if (typeof c === "string" && c.trim()) {
|
|
91
|
+
text = c.trim();
|
|
92
|
+
}
|
|
93
|
+
else if (Array.isArray(c)) {
|
|
94
|
+
for (const p of c) {
|
|
95
|
+
if ((p === null || p === void 0 ? void 0 : p.type) === "text" && typeof p.text === "string" && p.text.trim()) {
|
|
96
|
+
text = p.text.trim();
|
|
97
|
+
break;
|
|
98
|
+
}
|
|
99
|
+
}
|
|
100
|
+
}
|
|
101
|
+
}
|
|
102
|
+
if (!text && data.msg)
|
|
103
|
+
text = String(data.msg);
|
|
104
|
+
if (!text)
|
|
105
|
+
throw new Error("QwenImage 需要提供一个 text 提示词");
|
|
106
|
+
const isAsync = !!(data === null || data === void 0 ? void 0 : data.async);
|
|
107
|
+
return {
|
|
108
|
+
parameters: data.parameters,
|
|
109
|
+
model: this.modelName,
|
|
110
|
+
input: isAsync
|
|
111
|
+
? {
|
|
112
|
+
prompt: text,
|
|
113
|
+
}
|
|
114
|
+
: {
|
|
115
|
+
messages: [
|
|
116
|
+
{
|
|
117
|
+
role: "user",
|
|
118
|
+
content: [{ text }],
|
|
119
|
+
},
|
|
120
|
+
],
|
|
121
|
+
},
|
|
122
|
+
};
|
|
123
|
+
}
|
|
124
|
+
modelRequest(data_1) {
|
|
125
|
+
return __awaiter(this, arguments, void 0, function* (data, options = { timeout: 30 * 1000 }) {
|
|
126
|
+
const fetchHeaders = yield this.createAuthHeaders(options === null || options === void 0 ? void 0 : options.headers);
|
|
127
|
+
if (this.subUrl === QwenImageModel.SUB_SYNTHESIS_URL) {
|
|
128
|
+
fetchHeaders["X-DashScope-Async"] = "enable";
|
|
129
|
+
}
|
|
130
|
+
const joinedUrl = `${String(this.baseUrl).replace(/\/+$/, "")}/${String(this.subUrl).replace(/^\/+/, "")}`;
|
|
131
|
+
const { data: responseData, header } = (yield this.req.fetch({
|
|
132
|
+
url: joinedUrl,
|
|
133
|
+
headers: Object.assign({}, fetchHeaders),
|
|
134
|
+
body: JSON.stringify(data),
|
|
135
|
+
method: "post",
|
|
136
|
+
stream: false,
|
|
137
|
+
}));
|
|
138
|
+
return (0, utils_1.handleResponseData)(responseData, header);
|
|
139
|
+
});
|
|
140
|
+
}
|
|
141
|
+
doGenerate(data, options) {
|
|
142
|
+
return __awaiter(this, void 0, void 0, function* () {
|
|
143
|
+
data.model = this.modelName;
|
|
144
|
+
if (!Object.prototype.hasOwnProperty.call(data, "async")) {
|
|
145
|
+
data.async = true;
|
|
146
|
+
}
|
|
147
|
+
if (data.async) {
|
|
148
|
+
this.subUrl = QwenImageModel.SUB_SYNTHESIS_URL;
|
|
149
|
+
}
|
|
150
|
+
else {
|
|
151
|
+
this.subUrl = QwenImageModel.SUB_GENERATION_URL;
|
|
152
|
+
}
|
|
153
|
+
const payload = this.coverModelRequestToQwenInput(data);
|
|
154
|
+
const res = (yield this.modelRequest(payload, options));
|
|
155
|
+
return this.normalizeStandardImageCompletion(res, this.modelName);
|
|
156
|
+
});
|
|
157
|
+
}
|
|
158
|
+
doStream(data, options) {
|
|
159
|
+
return __awaiter(this, void 0, void 0, function* () {
|
|
160
|
+
var _a, _b;
|
|
161
|
+
const nonStream = yield this.doGenerate(Object.assign({}, data), options);
|
|
162
|
+
const msg = ((_b = (_a = nonStream.choices) === null || _a === void 0 ? void 0 : _a[0]) === null || _b === void 0 ? void 0 : _b.message) || {};
|
|
163
|
+
const singleChunk = {
|
|
164
|
+
id: nonStream.id,
|
|
165
|
+
object: "chat.completion.chunk",
|
|
166
|
+
created: nonStream.created,
|
|
167
|
+
model: nonStream.model,
|
|
168
|
+
log_id: nonStream.log_id,
|
|
169
|
+
error: nonStream.error || "",
|
|
170
|
+
code: nonStream.code || 0,
|
|
171
|
+
choices: [
|
|
172
|
+
{
|
|
173
|
+
index: 0,
|
|
174
|
+
message: {
|
|
175
|
+
id: nonStream.id,
|
|
176
|
+
role: "assistant",
|
|
177
|
+
type: msg.type || "image",
|
|
178
|
+
content: msg.content || "",
|
|
179
|
+
reasoning_content: "",
|
|
180
|
+
},
|
|
181
|
+
finish_reason: "stop",
|
|
182
|
+
},
|
|
183
|
+
],
|
|
184
|
+
usage: nonStream.usage,
|
|
185
|
+
};
|
|
186
|
+
const stream = new stream_1.ReadableStream({
|
|
187
|
+
start(controller) {
|
|
188
|
+
controller.enqueue(singleChunk);
|
|
189
|
+
controller.close();
|
|
190
|
+
},
|
|
191
|
+
});
|
|
192
|
+
return (0, stream_1.createAsyncIterable)(stream);
|
|
193
|
+
});
|
|
194
|
+
}
|
|
195
|
+
}
|
|
196
|
+
exports.QwenImageModel = QwenImageModel;
|
|
197
|
+
QwenImageModel.BASE_URL = "https://dashscope.aliyuncs.com";
|
|
198
|
+
QwenImageModel.SUB_SYNTHESIS_URL = "api/v1/services/aigc/text2image/image-synthesis";
|
|
199
|
+
QwenImageModel.SUB_GENERATION_URL = "api/v1/services/aigc/multimodal-generation/generation";
|
|
200
|
+
function mapUsageToStandard(usage) {
|
|
201
|
+
return {
|
|
202
|
+
prompt_tokens: 0,
|
|
203
|
+
completion_tokens: 0,
|
|
204
|
+
knowledge_tokens: 0,
|
|
205
|
+
reasoning_tokens: 0,
|
|
206
|
+
total_tokens: 0,
|
|
207
|
+
};
|
|
208
|
+
}
|
|
@@ -0,0 +1,77 @@
|
|
|
1
|
+
import type { IAbstractRequest } from "@vectorx/ai-types";
|
|
2
|
+
import type { DoGenerateOutput, DoStreamOutput, ModelRequestOptions, ReqOptions } from "../../model-type";
|
|
3
|
+
import { TokenManager } from "../../tokenManager";
|
|
4
|
+
import { SimpleChatModel } from "../Chat";
|
|
5
|
+
export interface QwenImageEditParameters {
|
|
6
|
+
negative_prompt?: string;
|
|
7
|
+
watermark?: boolean;
|
|
8
|
+
}
|
|
9
|
+
export interface QwenImageEditAPIInput {
|
|
10
|
+
model: string;
|
|
11
|
+
input: {
|
|
12
|
+
messages: Array<{
|
|
13
|
+
role: "user";
|
|
14
|
+
content: Array<{
|
|
15
|
+
image?: string;
|
|
16
|
+
text?: string;
|
|
17
|
+
}>;
|
|
18
|
+
}>;
|
|
19
|
+
};
|
|
20
|
+
parameters?: QwenImageEditParameters;
|
|
21
|
+
}
|
|
22
|
+
export type QwenImageEditRequestOptions = QwenImageEditAPIInput & {
|
|
23
|
+
parameters?: QwenImageEditParameters;
|
|
24
|
+
};
|
|
25
|
+
export type QwenImageEditContentItem = {
|
|
26
|
+
image?: string;
|
|
27
|
+
url?: string;
|
|
28
|
+
image_url?: string | {
|
|
29
|
+
url: string;
|
|
30
|
+
};
|
|
31
|
+
b64_json?: string;
|
|
32
|
+
[key: string]: any;
|
|
33
|
+
};
|
|
34
|
+
export interface QwenImageEditAPIResponse {
|
|
35
|
+
async?: boolean;
|
|
36
|
+
output: {
|
|
37
|
+
choices?: Array<{
|
|
38
|
+
finish_reason: string;
|
|
39
|
+
message: {
|
|
40
|
+
role: "assistant" | "user";
|
|
41
|
+
content: QwenImageEditContentItem[];
|
|
42
|
+
};
|
|
43
|
+
}>;
|
|
44
|
+
task_status?: string;
|
|
45
|
+
task_id?: string;
|
|
46
|
+
task_metric?: {
|
|
47
|
+
TOTAL: number;
|
|
48
|
+
FAILED: number;
|
|
49
|
+
SUCCEEDED: number;
|
|
50
|
+
};
|
|
51
|
+
};
|
|
52
|
+
usage?: {
|
|
53
|
+
width?: number;
|
|
54
|
+
height?: number;
|
|
55
|
+
image_count?: number;
|
|
56
|
+
};
|
|
57
|
+
request_id?: string;
|
|
58
|
+
id?: string;
|
|
59
|
+
model?: string;
|
|
60
|
+
created?: number;
|
|
61
|
+
object?: string;
|
|
62
|
+
code?: number;
|
|
63
|
+
error?: string;
|
|
64
|
+
}
|
|
65
|
+
export declare class QwenImageEditModel extends SimpleChatModel {
|
|
66
|
+
static BASE_URL: string;
|
|
67
|
+
static SUB_GENERATION_URL: string;
|
|
68
|
+
modelName: string;
|
|
69
|
+
constructor(req: IAbstractRequest, baseUrl: string, tokenManager: TokenManager);
|
|
70
|
+
protected normalizeStandardImageEditCompletion(res: QwenImageEditAPIResponse, fallbackModel: string): DoGenerateOutput;
|
|
71
|
+
protected coverModelRequestToQwenInput(data: ModelRequestOptions & {
|
|
72
|
+
parameters?: QwenImageEditParameters;
|
|
73
|
+
}): QwenImageEditRequestOptions;
|
|
74
|
+
protected modelRequest(data: QwenImageEditRequestOptions, options?: ReqOptions): Promise<ReadableStream<Uint8Array> | Promise<unknown>>;
|
|
75
|
+
doGenerate(data: ModelRequestOptions, options?: ReqOptions): Promise<DoGenerateOutput>;
|
|
76
|
+
doStream(data: ModelRequestOptions, options?: ReqOptions): Promise<DoStreamOutput>;
|
|
77
|
+
}
|
|
@@ -0,0 +1,205 @@
|
|
|
1
|
+
"use strict";
|
|
2
|
+
var __awaiter = (this && this.__awaiter) || function (thisArg, _arguments, P, generator) {
|
|
3
|
+
function adopt(value) { return value instanceof P ? value : new P(function (resolve) { resolve(value); }); }
|
|
4
|
+
return new (P || (P = Promise))(function (resolve, reject) {
|
|
5
|
+
function fulfilled(value) { try { step(generator.next(value)); } catch (e) { reject(e); } }
|
|
6
|
+
function rejected(value) { try { step(generator["throw"](value)); } catch (e) { reject(e); } }
|
|
7
|
+
function step(result) { result.done ? resolve(result.value) : adopt(result.value).then(fulfilled, rejected); }
|
|
8
|
+
step((generator = generator.apply(thisArg, _arguments || [])).next());
|
|
9
|
+
});
|
|
10
|
+
};
|
|
11
|
+
Object.defineProperty(exports, "__esModule", { value: true });
|
|
12
|
+
exports.QwenImageEditModel = void 0;
|
|
13
|
+
const stream_1 = require("../../stream");
|
|
14
|
+
const utils_1 = require("../../utils");
|
|
15
|
+
const Chat_1 = require("../Chat");
|
|
16
|
+
class QwenImageEditModel extends Chat_1.SimpleChatModel {
|
|
17
|
+
constructor(req, baseUrl, tokenManager) {
|
|
18
|
+
super(req, baseUrl, QwenImageEditModel.SUB_GENERATION_URL, tokenManager);
|
|
19
|
+
this.modelName = "qwen-image-edit";
|
|
20
|
+
}
|
|
21
|
+
normalizeStandardImageEditCompletion(res, fallbackModel) {
|
|
22
|
+
var _a, _b, _c, _d, _e, _f, _g, _h, _j, _k, _l, _m, _o, _p, _q;
|
|
23
|
+
const qOutput = (res === null || res === void 0 ? void 0 : res.output) || {};
|
|
24
|
+
if ((qOutput === null || qOutput === void 0 ? void 0 : qOutput.task_status) && (qOutput === null || qOutput === void 0 ? void 0 : qOutput.task_id)) {
|
|
25
|
+
const created = (_a = res === null || res === void 0 ? void 0 : res.created) !== null && _a !== void 0 ? _a : Math.floor(Date.now() / 1000);
|
|
26
|
+
const id = (res === null || res === void 0 ? void 0 : res.id) || (res === null || res === void 0 ? void 0 : res.request_id) || "";
|
|
27
|
+
const normalized = {
|
|
28
|
+
id,
|
|
29
|
+
object: (_b = res === null || res === void 0 ? void 0 : res.object) !== null && _b !== void 0 ? _b : "chat.completion",
|
|
30
|
+
created,
|
|
31
|
+
model: (_c = res === null || res === void 0 ? void 0 : res.model) !== null && _c !== void 0 ? _c : fallbackModel,
|
|
32
|
+
log_id: id,
|
|
33
|
+
error: (_d = res === null || res === void 0 ? void 0 : res.error) !== null && _d !== void 0 ? _d : "",
|
|
34
|
+
code: (_e = res === null || res === void 0 ? void 0 : res.code) !== null && _e !== void 0 ? _e : 0,
|
|
35
|
+
choices: [
|
|
36
|
+
{
|
|
37
|
+
index: 0,
|
|
38
|
+
message: {
|
|
39
|
+
id,
|
|
40
|
+
role: "assistant",
|
|
41
|
+
type: "async_task",
|
|
42
|
+
content: JSON.stringify(Object.assign(Object.assign({}, ((res === null || res === void 0 ? void 0 : res.output) || {})), { request_id: (res === null || res === void 0 ? void 0 : res.request_id) || id })),
|
|
43
|
+
reasoning_content: "",
|
|
44
|
+
},
|
|
45
|
+
finish_reason: "stop",
|
|
46
|
+
},
|
|
47
|
+
],
|
|
48
|
+
usage: mapUsageToStandard(res === null || res === void 0 ? void 0 : res.usage),
|
|
49
|
+
};
|
|
50
|
+
return normalized;
|
|
51
|
+
}
|
|
52
|
+
const first = ((_g = (_f = qOutput === null || qOutput === void 0 ? void 0 : qOutput.choices) === null || _f === void 0 ? void 0 : _f[0]) !== null && _g !== void 0 ? _g : null);
|
|
53
|
+
const message = (_h = first === null || first === void 0 ? void 0 : first.message) !== null && _h !== void 0 ? _h : {};
|
|
54
|
+
const contentUrl = Array.isArray(message === null || message === void 0 ? void 0 : message.content) && ((_j = message.content[0]) === null || _j === void 0 ? void 0 : _j.image) ? String(message.content[0].image) : "";
|
|
55
|
+
const created = (_k = res === null || res === void 0 ? void 0 : res.created) !== null && _k !== void 0 ? _k : Math.floor(Date.now() / 1000);
|
|
56
|
+
const id = (res === null || res === void 0 ? void 0 : res.id) || (res === null || res === void 0 ? void 0 : res.request_id) || "";
|
|
57
|
+
const normalized = {
|
|
58
|
+
id,
|
|
59
|
+
object: (_l = res === null || res === void 0 ? void 0 : res.object) !== null && _l !== void 0 ? _l : "chat.completion",
|
|
60
|
+
created,
|
|
61
|
+
model: (_m = res === null || res === void 0 ? void 0 : res.model) !== null && _m !== void 0 ? _m : fallbackModel,
|
|
62
|
+
log_id: id,
|
|
63
|
+
error: (_o = res === null || res === void 0 ? void 0 : res.error) !== null && _o !== void 0 ? _o : "",
|
|
64
|
+
code: (_p = res === null || res === void 0 ? void 0 : res.code) !== null && _p !== void 0 ? _p : 0,
|
|
65
|
+
choices: [
|
|
66
|
+
{
|
|
67
|
+
index: 0,
|
|
68
|
+
message: {
|
|
69
|
+
id,
|
|
70
|
+
role: "assistant",
|
|
71
|
+
type: "image",
|
|
72
|
+
content: contentUrl || "",
|
|
73
|
+
reasoning_content: "",
|
|
74
|
+
},
|
|
75
|
+
finish_reason: (_q = first === null || first === void 0 ? void 0 : first.finish_reason) !== null && _q !== void 0 ? _q : "stop",
|
|
76
|
+
},
|
|
77
|
+
],
|
|
78
|
+
usage: mapUsageToStandard(res === null || res === void 0 ? void 0 : res.usage),
|
|
79
|
+
};
|
|
80
|
+
return normalized;
|
|
81
|
+
}
|
|
82
|
+
coverModelRequestToQwenInput(data) {
|
|
83
|
+
var _a, _b;
|
|
84
|
+
const imageUrls = [];
|
|
85
|
+
const texts = [];
|
|
86
|
+
const messages = data.messages || data.history || [];
|
|
87
|
+
if (Array.isArray(messages) && messages.length > 0) {
|
|
88
|
+
const firstUser = (_a = messages.find((m) => (m === null || m === void 0 ? void 0 : m.role) === "user")) !== null && _a !== void 0 ? _a : messages[0];
|
|
89
|
+
const c = firstUser === null || firstUser === void 0 ? void 0 : firstUser.content;
|
|
90
|
+
if (Array.isArray(c)) {
|
|
91
|
+
for (const p of c) {
|
|
92
|
+
if ((p === null || p === void 0 ? void 0 : p.type) === "image_url" && ((_b = p.image_url) === null || _b === void 0 ? void 0 : _b.url)) {
|
|
93
|
+
imageUrls.push(p.image_url.url);
|
|
94
|
+
}
|
|
95
|
+
else if ((p === null || p === void 0 ? void 0 : p.type) === "text" && typeof p.text === "string" && p.text.trim()) {
|
|
96
|
+
texts.push(p.text.trim());
|
|
97
|
+
}
|
|
98
|
+
}
|
|
99
|
+
}
|
|
100
|
+
}
|
|
101
|
+
if (imageUrls.length === 0 && data.image) {
|
|
102
|
+
imageUrls.push(String(data.image));
|
|
103
|
+
}
|
|
104
|
+
if (texts.length === 0 && data.msg) {
|
|
105
|
+
texts.push(String(data.msg));
|
|
106
|
+
}
|
|
107
|
+
if (texts.length === 0 && data.prompt) {
|
|
108
|
+
texts.push(String(data.prompt));
|
|
109
|
+
}
|
|
110
|
+
if (imageUrls.length === 0)
|
|
111
|
+
throw new Error("QwenImageEdit 需要提供至少一个图片 URL");
|
|
112
|
+
if (texts.length === 0)
|
|
113
|
+
throw new Error("QwenImageEdit 需要提供至少一个文本提示词");
|
|
114
|
+
const content = [];
|
|
115
|
+
imageUrls.forEach((url) => {
|
|
116
|
+
content.push({ image: url });
|
|
117
|
+
});
|
|
118
|
+
texts.forEach((text) => {
|
|
119
|
+
content.push({ text });
|
|
120
|
+
});
|
|
121
|
+
return {
|
|
122
|
+
parameters: data.parameters,
|
|
123
|
+
model: this.modelName,
|
|
124
|
+
input: {
|
|
125
|
+
messages: [
|
|
126
|
+
{
|
|
127
|
+
role: "user",
|
|
128
|
+
content,
|
|
129
|
+
},
|
|
130
|
+
],
|
|
131
|
+
},
|
|
132
|
+
};
|
|
133
|
+
}
|
|
134
|
+
modelRequest(data_1) {
|
|
135
|
+
return __awaiter(this, arguments, void 0, function* (data, options = { timeout: 30 * 1000 }) {
|
|
136
|
+
const fetchHeaders = yield this.createAuthHeaders(options === null || options === void 0 ? void 0 : options.headers);
|
|
137
|
+
const joinedUrl = `${String(this.baseUrl).replace(/\/+$/, "")}/${String(this.subUrl).replace(/^\/+/, "")}`;
|
|
138
|
+
const { data: responseData, header } = (yield this.req.fetch({
|
|
139
|
+
url: joinedUrl,
|
|
140
|
+
headers: Object.assign({}, fetchHeaders),
|
|
141
|
+
body: JSON.stringify(data),
|
|
142
|
+
method: "post",
|
|
143
|
+
stream: false,
|
|
144
|
+
}));
|
|
145
|
+
return (0, utils_1.handleResponseData)(responseData, header);
|
|
146
|
+
});
|
|
147
|
+
}
|
|
148
|
+
doGenerate(data, options) {
|
|
149
|
+
return __awaiter(this, void 0, void 0, function* () {
|
|
150
|
+
data.model = this.modelName;
|
|
151
|
+
const payload = this.coverModelRequestToQwenInput(data);
|
|
152
|
+
const res = (yield this.modelRequest(payload, options));
|
|
153
|
+
return this.normalizeStandardImageEditCompletion(res, this.modelName);
|
|
154
|
+
});
|
|
155
|
+
}
|
|
156
|
+
doStream(data, options) {
|
|
157
|
+
return __awaiter(this, void 0, void 0, function* () {
|
|
158
|
+
var _a, _b;
|
|
159
|
+
const nonStream = yield this.doGenerate(Object.assign({}, data), options);
|
|
160
|
+
const msg = ((_b = (_a = nonStream.choices) === null || _a === void 0 ? void 0 : _a[0]) === null || _b === void 0 ? void 0 : _b.message) || {};
|
|
161
|
+
const singleChunk = {
|
|
162
|
+
id: nonStream.id,
|
|
163
|
+
object: "chat.completion.chunk",
|
|
164
|
+
created: nonStream.created,
|
|
165
|
+
model: nonStream.model,
|
|
166
|
+
log_id: nonStream.log_id,
|
|
167
|
+
error: nonStream.error || "",
|
|
168
|
+
code: nonStream.code || 0,
|
|
169
|
+
choices: [
|
|
170
|
+
{
|
|
171
|
+
index: 0,
|
|
172
|
+
message: {
|
|
173
|
+
id: nonStream.id,
|
|
174
|
+
role: "assistant",
|
|
175
|
+
type: msg.type || "image",
|
|
176
|
+
content: msg.content || "",
|
|
177
|
+
reasoning_content: "",
|
|
178
|
+
},
|
|
179
|
+
finish_reason: "stop",
|
|
180
|
+
},
|
|
181
|
+
],
|
|
182
|
+
usage: nonStream.usage,
|
|
183
|
+
};
|
|
184
|
+
const stream = new stream_1.ReadableStream({
|
|
185
|
+
start(controller) {
|
|
186
|
+
controller.enqueue(singleChunk);
|
|
187
|
+
controller.close();
|
|
188
|
+
},
|
|
189
|
+
});
|
|
190
|
+
return (0, stream_1.createAsyncIterable)(stream);
|
|
191
|
+
});
|
|
192
|
+
}
|
|
193
|
+
}
|
|
194
|
+
exports.QwenImageEditModel = QwenImageEditModel;
|
|
195
|
+
QwenImageEditModel.BASE_URL = "https://dashscope.aliyuncs.com";
|
|
196
|
+
QwenImageEditModel.SUB_GENERATION_URL = "api/v1/services/aigc/multimodal-generation/generation";
|
|
197
|
+
function mapUsageToStandard(usage) {
|
|
198
|
+
return {
|
|
199
|
+
prompt_tokens: 0,
|
|
200
|
+
completion_tokens: 0,
|
|
201
|
+
knowledge_tokens: 0,
|
|
202
|
+
reasoning_tokens: 0,
|
|
203
|
+
total_tokens: 0,
|
|
204
|
+
};
|
|
205
|
+
}
|
|
@@ -0,0 +1,35 @@
|
|
|
1
|
+
import type { IAbstractRequest } from "@vectorx/ai-types";
|
|
2
|
+
import type { DoGenerateOutput, DoStreamOutput, ModelRequestOptions, ReqOptions } from "../../model-type";
|
|
3
|
+
import { TokenManager } from "../../tokenManager";
|
|
4
|
+
import { SimpleChatModel } from "../Chat";
|
|
5
|
+
export interface WanxSketchToImageLiteParameters {
|
|
6
|
+
size?: "768*768" | "720*1280" | "1280*720" | string;
|
|
7
|
+
n?: number;
|
|
8
|
+
sketch_weight?: number;
|
|
9
|
+
style?: "<auto>" | "<3d cartoon>" | "<anime>" | "<oil painting>" | "<watercolor>" | "<sketch>" | "<chinese painting>" | "<flat illustration>" | string;
|
|
10
|
+
sketch_extraction?: boolean;
|
|
11
|
+
sketch_color?: number[][];
|
|
12
|
+
}
|
|
13
|
+
export interface WanxSketchToImageLiteAPIInput {
|
|
14
|
+
model: string;
|
|
15
|
+
input: {
|
|
16
|
+
sketch_image_url: string;
|
|
17
|
+
prompt: string;
|
|
18
|
+
};
|
|
19
|
+
parameters?: WanxSketchToImageLiteParameters;
|
|
20
|
+
}
|
|
21
|
+
export type WanxSketchToImageLiteRequestOptions = WanxSketchToImageLiteAPIInput & {
|
|
22
|
+
parameters?: WanxSketchToImageLiteParameters;
|
|
23
|
+
};
|
|
24
|
+
export declare class WanxSketchToImageLiteModel extends SimpleChatModel {
|
|
25
|
+
static BASE_URL: string;
|
|
26
|
+
static SUB_URL: string;
|
|
27
|
+
modelName: string;
|
|
28
|
+
constructor(req: IAbstractRequest, baseUrl: string, tokenManager: TokenManager);
|
|
29
|
+
protected convertToSketchToImageInput(data: ModelRequestOptions & {
|
|
30
|
+
parameters?: WanxSketchToImageLiteParameters;
|
|
31
|
+
}): WanxSketchToImageLiteRequestOptions;
|
|
32
|
+
protected modelRequest(data: WanxSketchToImageLiteRequestOptions, options?: ReqOptions): Promise<ReadableStream<Uint8Array> | Promise<unknown>>;
|
|
33
|
+
doGenerate(data: ModelRequestOptions, options?: ReqOptions): Promise<DoGenerateOutput>;
|
|
34
|
+
doStream(data: ModelRequestOptions, options?: ReqOptions): Promise<DoStreamOutput>;
|
|
35
|
+
}
|