@vectorx/ai-sdk 0.1.3 → 0.3.0
This diff represents the content of publicly available package versions that have been released to one of the supported registries. The information contained in this diff is provided for informational purposes only and reflects changes between package versions as they appear in their respective public registries.
- package/{types → lib}/agent/index.d.ts +2 -2
- package/{types → lib}/ai.d.ts +7 -8
- package/lib/ai.js +18 -28
- package/{types → lib}/index.d.ts +3 -2
- package/lib/index.js +4 -3
- package/{types/types.d.ts → lib/model-type.d.ts} +24 -15
- package/lib/{types.js → model-type.js} +1 -4
- package/lib/models/Chat.d.ts +14 -0
- package/lib/models/Chat.js +36 -0
- package/lib/models/Default/index.d.ts +11 -0
- package/lib/models/{default.js → Default/index.js} +28 -11
- package/lib/models/QwenImage/index.d.ts +81 -0
- package/lib/models/QwenImage/index.js +208 -0
- package/lib/models/QwenSketchToImage/index.d.ts +35 -0
- package/lib/models/QwenSketchToImage/index.js +155 -0
- package/lib/models/QwenStyleRepaintV1/index.d.ts +114 -0
- package/lib/models/QwenStyleRepaintV1/index.js +213 -0
- package/lib/models/QwenVlMax/index.d.ts +85 -0
- package/lib/models/QwenVlMax/index.js +120 -0
- package/lib/models/index.d.ts +47 -0
- package/lib/models/index.js +40 -4
- package/{types → lib}/models/react.d.ts +3 -2
- package/lib/models/react.js +3 -3
- package/{types → lib}/stream.d.ts +1 -8
- package/lib/tokenManager.d.ts +36 -0
- package/lib/tokenManager.js +89 -0
- package/lib/utils.js +2 -3
- package/package.json +4 -5
- package/lib/models/model-types.js +0 -6
- package/types/models/default.d.ts +0 -13
- package/types/models/index.d.ts +0 -23
- package/types/models/model-types.d.ts +0 -131
- /package/{types → lib}/eventsource_parser/index.d.ts +0 -0
- /package/{types → lib}/eventsource_parser/parse.d.ts +0 -0
- /package/{types → lib}/eventsource_parser/stream.d.ts +0 -0
- /package/{types → lib}/eventsource_parser/types.d.ts +0 -0
- /package/{types → lib}/utils.d.ts +0 -0
|
@@ -0,0 +1,35 @@
|
|
|
1
|
+
import type { IAbstractRequest } from "@vectorx/ai-types";
|
|
2
|
+
import type { DoGenerateOutput, DoStreamOutput, ModelRequestOptions, ReqOptions } from "../../model-type";
|
|
3
|
+
import { TokenManager } from "../../tokenManager";
|
|
4
|
+
import { SimpleChatModel } from "../Chat";
|
|
5
|
+
export interface WanxSketchToImageLiteParameters {
|
|
6
|
+
size?: "768*768" | "720*1280" | "1280*720" | string;
|
|
7
|
+
n?: number;
|
|
8
|
+
sketch_weight?: number;
|
|
9
|
+
style?: "<auto>" | "<3d cartoon>" | "<anime>" | "<oil painting>" | "<watercolor>" | "<sketch>" | "<chinese painting>" | "<flat illustration>" | string;
|
|
10
|
+
sketch_extraction?: boolean;
|
|
11
|
+
sketch_color?: number[][];
|
|
12
|
+
}
|
|
13
|
+
export interface WanxSketchToImageLiteAPIInput {
|
|
14
|
+
model: string;
|
|
15
|
+
input: {
|
|
16
|
+
sketch_image_url: string;
|
|
17
|
+
prompt: string;
|
|
18
|
+
};
|
|
19
|
+
parameters?: WanxSketchToImageLiteParameters;
|
|
20
|
+
}
|
|
21
|
+
export type WanxSketchToImageLiteRequestOptions = WanxSketchToImageLiteAPIInput & {
|
|
22
|
+
parameters?: WanxSketchToImageLiteParameters;
|
|
23
|
+
};
|
|
24
|
+
export declare class WanxSketchToImageLiteModel extends SimpleChatModel {
|
|
25
|
+
static BASE_URL: string;
|
|
26
|
+
static SUB_URL: string;
|
|
27
|
+
modelName: string;
|
|
28
|
+
constructor(req: IAbstractRequest, baseUrl: string, tokenManager: TokenManager);
|
|
29
|
+
protected convertToSketchToImageInput(data: ModelRequestOptions & {
|
|
30
|
+
parameters?: WanxSketchToImageLiteParameters;
|
|
31
|
+
}): WanxSketchToImageLiteRequestOptions;
|
|
32
|
+
protected modelRequest(data: WanxSketchToImageLiteRequestOptions, options?: ReqOptions): Promise<ReadableStream<Uint8Array> | Promise<unknown>>;
|
|
33
|
+
doGenerate(data: ModelRequestOptions, options?: ReqOptions): Promise<DoGenerateOutput>;
|
|
34
|
+
doStream(data: ModelRequestOptions, options?: ReqOptions): Promise<DoStreamOutput>;
|
|
35
|
+
}
|
|
@@ -0,0 +1,155 @@
|
|
|
1
|
+
"use strict";
|
|
2
|
+
var __awaiter = (this && this.__awaiter) || function (thisArg, _arguments, P, generator) {
|
|
3
|
+
function adopt(value) { return value instanceof P ? value : new P(function (resolve) { resolve(value); }); }
|
|
4
|
+
return new (P || (P = Promise))(function (resolve, reject) {
|
|
5
|
+
function fulfilled(value) { try { step(generator.next(value)); } catch (e) { reject(e); } }
|
|
6
|
+
function rejected(value) { try { step(generator["throw"](value)); } catch (e) { reject(e); } }
|
|
7
|
+
function step(result) { result.done ? resolve(result.value) : adopt(result.value).then(fulfilled, rejected); }
|
|
8
|
+
step((generator = generator.apply(thisArg, _arguments || [])).next());
|
|
9
|
+
});
|
|
10
|
+
};
|
|
11
|
+
Object.defineProperty(exports, "__esModule", { value: true });
|
|
12
|
+
exports.WanxSketchToImageLiteModel = void 0;
|
|
13
|
+
const stream_1 = require("../../stream");
|
|
14
|
+
const utils_1 = require("../../utils");
|
|
15
|
+
const Chat_1 = require("../Chat");
|
|
16
|
+
const index_1 = require("../index");
|
|
17
|
+
class WanxSketchToImageLiteModel extends Chat_1.SimpleChatModel {
|
|
18
|
+
constructor(req, baseUrl, tokenManager) {
|
|
19
|
+
super(req, baseUrl, WanxSketchToImageLiteModel.SUB_URL, tokenManager);
|
|
20
|
+
this.modelName = index_1.modelName[index_1.MultiModalModelName.WanxSketchToImageLite];
|
|
21
|
+
}
|
|
22
|
+
convertToSketchToImageInput(data) {
|
|
23
|
+
var _a;
|
|
24
|
+
const messages = data.messages || [];
|
|
25
|
+
if (!Array.isArray(messages) || messages.length === 0) {
|
|
26
|
+
throw new Error("wanx-sketch-to-image-lite 请求需要提供 messages");
|
|
27
|
+
}
|
|
28
|
+
const userMessage = messages.find((m) => m.role === "user");
|
|
29
|
+
if (!userMessage || !Array.isArray(userMessage.content)) {
|
|
30
|
+
throw new Error("wanx-sketch-to-image-lite 请求需要提供 user 消息");
|
|
31
|
+
}
|
|
32
|
+
const imageContent = userMessage.content.find((c) => c.type === "image_url");
|
|
33
|
+
if (!((_a = imageContent === null || imageContent === void 0 ? void 0 : imageContent.image_url) === null || _a === void 0 ? void 0 : _a.url)) {
|
|
34
|
+
throw new Error("wanx-sketch-to-image-lite 请求需要提供草图图片 URL");
|
|
35
|
+
}
|
|
36
|
+
const sketchImageUrl = imageContent.image_url.url;
|
|
37
|
+
const textContent = userMessage.content.find((c) => c.type === "text");
|
|
38
|
+
if (!(textContent === null || textContent === void 0 ? void 0 : textContent.text)) {
|
|
39
|
+
throw new Error("wanx-sketch-to-image-lite 请求需要提供文本提示词");
|
|
40
|
+
}
|
|
41
|
+
const prompt = textContent.text;
|
|
42
|
+
const modelParams = data.parameters || {};
|
|
43
|
+
const parameters = Object.assign({ size: "768*768", n: 4, sketch_weight: 10, style: "<auto>", sketch_extraction: false, sketch_color: [] }, modelParams);
|
|
44
|
+
if (parameters.n < 1 || parameters.n > 4) {
|
|
45
|
+
throw new Error("生成图片数量 n 必须在 1-4 之间");
|
|
46
|
+
}
|
|
47
|
+
if (parameters.sketch_weight < 0 || parameters.sketch_weight > 10) {
|
|
48
|
+
throw new Error("草图权重 sketch_weight 必须在 0-10 之间");
|
|
49
|
+
}
|
|
50
|
+
return {
|
|
51
|
+
model: this.modelName,
|
|
52
|
+
input: {
|
|
53
|
+
sketch_image_url: sketchImageUrl,
|
|
54
|
+
prompt: prompt,
|
|
55
|
+
},
|
|
56
|
+
parameters,
|
|
57
|
+
};
|
|
58
|
+
}
|
|
59
|
+
modelRequest(data_1) {
|
|
60
|
+
return __awaiter(this, arguments, void 0, function* (data, options = {}) {
|
|
61
|
+
const fetchHeaders = yield this.createAuthHeaders(options === null || options === void 0 ? void 0 : options.headers);
|
|
62
|
+
const { data: responseData, header } = (yield this.req.fetch({
|
|
63
|
+
method: "post",
|
|
64
|
+
headers: Object.assign(Object.assign({}, fetchHeaders), ((options === null || options === void 0 ? void 0 : options.headers) || {})),
|
|
65
|
+
body: JSON.stringify(data),
|
|
66
|
+
url: `${this.baseUrl}/${this.subUrl}`,
|
|
67
|
+
stream: false,
|
|
68
|
+
}));
|
|
69
|
+
return (0, utils_1.handleResponseData)(responseData, header);
|
|
70
|
+
});
|
|
71
|
+
}
|
|
72
|
+
doGenerate(data, options) {
|
|
73
|
+
return __awaiter(this, void 0, void 0, function* () {
|
|
74
|
+
var _a, _b, _c, _d, _e, _f, _g, _h, _j, _k, _l, _m, _o, _p, _q, _r, _s, _t;
|
|
75
|
+
data.model = this.modelName;
|
|
76
|
+
const body = this.convertToSketchToImageInput(data);
|
|
77
|
+
const headers = Object.assign(Object.assign({}, ((options === null || options === void 0 ? void 0 : options.headers) || {})), { "X-DashScope-Async": "enable" });
|
|
78
|
+
const res = (yield this.modelRequest(body, { headers }));
|
|
79
|
+
const normalized = {
|
|
80
|
+
id: (_b = (_a = res === null || res === void 0 ? void 0 : res.request_id) !== null && _a !== void 0 ? _a : res === null || res === void 0 ? void 0 : res.id) !== null && _b !== void 0 ? _b : "",
|
|
81
|
+
object: (_c = res === null || res === void 0 ? void 0 : res.object) !== null && _c !== void 0 ? _c : "chat.completion",
|
|
82
|
+
created: Math.floor(Date.now() / 1000),
|
|
83
|
+
model: this.modelName,
|
|
84
|
+
log_id: (_e = (_d = res === null || res === void 0 ? void 0 : res.request_id) !== null && _d !== void 0 ? _d : res === null || res === void 0 ? void 0 : res.id) !== null && _e !== void 0 ? _e : "",
|
|
85
|
+
error: "",
|
|
86
|
+
code: Number((_f = res === null || res === void 0 ? void 0 : res.status_code) !== null && _f !== void 0 ? _f : 0) || 0,
|
|
87
|
+
choices: [
|
|
88
|
+
{
|
|
89
|
+
index: 0,
|
|
90
|
+
message: {
|
|
91
|
+
id: (_h = (_g = res === null || res === void 0 ? void 0 : res.request_id) !== null && _g !== void 0 ? _g : res === null || res === void 0 ? void 0 : res.id) !== null && _h !== void 0 ? _h : "",
|
|
92
|
+
role: "assistant",
|
|
93
|
+
type: "async_task",
|
|
94
|
+
content: JSON.stringify({
|
|
95
|
+
task_id: (_j = res === null || res === void 0 ? void 0 : res.output) === null || _j === void 0 ? void 0 : _j.task_id,
|
|
96
|
+
task_status: (_k = res === null || res === void 0 ? void 0 : res.output) === null || _k === void 0 ? void 0 : _k.task_status,
|
|
97
|
+
results: (_m = (_l = res === null || res === void 0 ? void 0 : res.output) === null || _l === void 0 ? void 0 : _l.results) !== null && _m !== void 0 ? _m : [],
|
|
98
|
+
}),
|
|
99
|
+
reasoning_content: "",
|
|
100
|
+
},
|
|
101
|
+
finish_reason: "stop",
|
|
102
|
+
},
|
|
103
|
+
],
|
|
104
|
+
usage: {
|
|
105
|
+
prompt_tokens: (_p = (_o = res === null || res === void 0 ? void 0 : res.usage) === null || _o === void 0 ? void 0 : _o.prompt_tokens) !== null && _p !== void 0 ? _p : 0,
|
|
106
|
+
completion_tokens: (_r = (_q = res === null || res === void 0 ? void 0 : res.usage) === null || _q === void 0 ? void 0 : _q.completion_tokens) !== null && _r !== void 0 ? _r : 0,
|
|
107
|
+
total_tokens: (_t = (_s = res === null || res === void 0 ? void 0 : res.usage) === null || _s === void 0 ? void 0 : _s.total_tokens) !== null && _t !== void 0 ? _t : 0,
|
|
108
|
+
knowledge_tokens: 0,
|
|
109
|
+
reasoning_tokens: 0,
|
|
110
|
+
},
|
|
111
|
+
};
|
|
112
|
+
return normalized;
|
|
113
|
+
});
|
|
114
|
+
}
|
|
115
|
+
doStream(data, options) {
|
|
116
|
+
return __awaiter(this, void 0, void 0, function* () {
|
|
117
|
+
var _a, _b;
|
|
118
|
+
const nonStream = yield this.doGenerate(data, options);
|
|
119
|
+
const msg = ((_b = (_a = nonStream.choices) === null || _a === void 0 ? void 0 : _a[0]) === null || _b === void 0 ? void 0 : _b.message) || {};
|
|
120
|
+
const singleChunk = {
|
|
121
|
+
id: nonStream.id,
|
|
122
|
+
object: "chat.completion.chunk",
|
|
123
|
+
created: nonStream.created,
|
|
124
|
+
model: nonStream.model,
|
|
125
|
+
log_id: nonStream.log_id,
|
|
126
|
+
error: nonStream.error || "",
|
|
127
|
+
code: nonStream.code || 0,
|
|
128
|
+
choices: [
|
|
129
|
+
{
|
|
130
|
+
index: 0,
|
|
131
|
+
message: {
|
|
132
|
+
id: nonStream.id,
|
|
133
|
+
role: "assistant",
|
|
134
|
+
type: msg.type || "async_task",
|
|
135
|
+
content: msg.content || "",
|
|
136
|
+
reasoning_content: "",
|
|
137
|
+
},
|
|
138
|
+
finish_reason: "stop",
|
|
139
|
+
},
|
|
140
|
+
],
|
|
141
|
+
usage: nonStream.usage,
|
|
142
|
+
};
|
|
143
|
+
const stream = new ReadableStream({
|
|
144
|
+
start(controller) {
|
|
145
|
+
controller.enqueue(singleChunk);
|
|
146
|
+
controller.close();
|
|
147
|
+
},
|
|
148
|
+
});
|
|
149
|
+
return (0, stream_1.createAsyncIterable)(stream);
|
|
150
|
+
});
|
|
151
|
+
}
|
|
152
|
+
}
|
|
153
|
+
exports.WanxSketchToImageLiteModel = WanxSketchToImageLiteModel;
|
|
154
|
+
WanxSketchToImageLiteModel.BASE_URL = "https://dashscope.aliyuncs.com";
|
|
155
|
+
WanxSketchToImageLiteModel.SUB_URL = "api/v1/services/aigc/image2image/image-synthesis";
|
|
@@ -0,0 +1,114 @@
|
|
|
1
|
+
import type { IAbstractRequest } from "@vectorx/ai-types";
|
|
2
|
+
import type { DoGenerateOutput, DoStreamOutput, ModelRequestOptions, ReqOptions } from "../../model-type";
|
|
3
|
+
import { TokenManager } from "../../tokenManager";
|
|
4
|
+
import { SimpleChatModel } from "../Chat";
|
|
5
|
+
export declare const STYLE_INDEX: {
|
|
6
|
+
readonly CUSTOM: -1;
|
|
7
|
+
readonly RETRO_COMIC: 0;
|
|
8
|
+
readonly FAIRY_TALE_3D: 1;
|
|
9
|
+
readonly ANIME_2D: 2;
|
|
10
|
+
readonly FRESH_STYLE: 3;
|
|
11
|
+
readonly FUTURE_TECH: 4;
|
|
12
|
+
readonly CHINESE_PAINTING: 5;
|
|
13
|
+
readonly GENERAL_BATTLE: 6;
|
|
14
|
+
readonly COLORFUL_CARTOON: 7;
|
|
15
|
+
readonly ELEGANT_CHINESE: 8;
|
|
16
|
+
readonly NEW_YEAR: 9;
|
|
17
|
+
readonly GONGBI_STYLE: 14;
|
|
18
|
+
readonly NEW_YEAR_GREET: 15;
|
|
19
|
+
readonly FAIRY_WORLD: 30;
|
|
20
|
+
readonly CLAY_WORLD: 31;
|
|
21
|
+
readonly PIXEL_WORLD: 32;
|
|
22
|
+
readonly ADVENTURE_WORLD: 33;
|
|
23
|
+
readonly JAPANESE_ANIME: 34;
|
|
24
|
+
readonly WORLD_3D: 35;
|
|
25
|
+
readonly ANIME_WORLD: 36;
|
|
26
|
+
readonly HAND_DRAWN: 37;
|
|
27
|
+
readonly CRAYON_WORLD: 38;
|
|
28
|
+
readonly FRIDGE_MAGNET: 39;
|
|
29
|
+
readonly BAJIQUAN_WORLD: 40;
|
|
30
|
+
};
|
|
31
|
+
export type StyleIndex = (typeof STYLE_INDEX)[keyof typeof STYLE_INDEX];
|
|
32
|
+
export interface QwenStyleRepaintV1Parameters {
|
|
33
|
+
style_index?: StyleIndex;
|
|
34
|
+
style_ref_url?: string;
|
|
35
|
+
}
|
|
36
|
+
export interface QwenStyleRepaintV1StandardInput {
|
|
37
|
+
messages: Array<{
|
|
38
|
+
role: "user" | "assistant" | "system";
|
|
39
|
+
content: Array<{
|
|
40
|
+
type: "image_url";
|
|
41
|
+
image_url: {
|
|
42
|
+
url: string;
|
|
43
|
+
};
|
|
44
|
+
} | {
|
|
45
|
+
type: "text";
|
|
46
|
+
text: string;
|
|
47
|
+
}>;
|
|
48
|
+
}>;
|
|
49
|
+
parameters?: QwenStyleRepaintV1Parameters;
|
|
50
|
+
}
|
|
51
|
+
export interface QwenStyleRepaintV1APIInput {
|
|
52
|
+
model: string;
|
|
53
|
+
input: {
|
|
54
|
+
image_url: string;
|
|
55
|
+
style_index: number;
|
|
56
|
+
style_ref_url?: string;
|
|
57
|
+
};
|
|
58
|
+
}
|
|
59
|
+
export type QwenStyleRepaintV1RequestOptions = QwenStyleRepaintV1APIInput;
|
|
60
|
+
export type QwenStyleRepaintV1ContentItem = {
|
|
61
|
+
image?: string;
|
|
62
|
+
url?: string;
|
|
63
|
+
image_url?: string | {
|
|
64
|
+
url: string;
|
|
65
|
+
};
|
|
66
|
+
b64_json?: string;
|
|
67
|
+
[key: string]: any;
|
|
68
|
+
};
|
|
69
|
+
export interface QwenStyleRepaintV1APIResponse {
|
|
70
|
+
async?: boolean;
|
|
71
|
+
output: {
|
|
72
|
+
choices?: Array<{
|
|
73
|
+
finish_reason: string;
|
|
74
|
+
message: {
|
|
75
|
+
role: "assistant" | "user";
|
|
76
|
+
content: QwenStyleRepaintV1ContentItem[];
|
|
77
|
+
};
|
|
78
|
+
}>;
|
|
79
|
+
task_status?: string;
|
|
80
|
+
task_id?: string;
|
|
81
|
+
task_metric?: {
|
|
82
|
+
TOTAL: number;
|
|
83
|
+
FAILED: number;
|
|
84
|
+
SUCCEEDED: number;
|
|
85
|
+
};
|
|
86
|
+
};
|
|
87
|
+
usage?: {
|
|
88
|
+
width?: number;
|
|
89
|
+
height?: number;
|
|
90
|
+
image_count?: number;
|
|
91
|
+
};
|
|
92
|
+
request_id?: string;
|
|
93
|
+
id?: string;
|
|
94
|
+
model?: string;
|
|
95
|
+
created?: number;
|
|
96
|
+
object?: string;
|
|
97
|
+
code?: number;
|
|
98
|
+
error?: string;
|
|
99
|
+
}
|
|
100
|
+
export declare class QwenStyleRepaintV1Model extends SimpleChatModel {
|
|
101
|
+
static BASE_URL: string;
|
|
102
|
+
static SUB_URL: string;
|
|
103
|
+
modelName: string;
|
|
104
|
+
constructor(req: IAbstractRequest, baseUrl: string, tokenManager: TokenManager);
|
|
105
|
+
protected normalizeStandardImageCompletion(res: QwenStyleRepaintV1APIResponse, fallbackModel: string): DoGenerateOutput;
|
|
106
|
+
protected convertToStyleRepaintInput(data: ModelRequestOptions & {
|
|
107
|
+
parameters?: QwenStyleRepaintV1Parameters;
|
|
108
|
+
}): QwenStyleRepaintV1APIInput;
|
|
109
|
+
protected modelRequest(data: QwenStyleRepaintV1APIInput, options?: ReqOptions): Promise<ReadableStream<Uint8Array> | Promise<unknown>>;
|
|
110
|
+
doGenerate(data: ModelRequestOptions & {
|
|
111
|
+
parameters?: QwenStyleRepaintV1Parameters;
|
|
112
|
+
}, options?: ReqOptions): Promise<DoGenerateOutput>;
|
|
113
|
+
doStream(data: ModelRequestOptions, options?: ReqOptions): Promise<DoStreamOutput>;
|
|
114
|
+
}
|
|
@@ -0,0 +1,213 @@
|
|
|
1
|
+
"use strict";
|
|
2
|
+
var __awaiter = (this && this.__awaiter) || function (thisArg, _arguments, P, generator) {
|
|
3
|
+
function adopt(value) { return value instanceof P ? value : new P(function (resolve) { resolve(value); }); }
|
|
4
|
+
return new (P || (P = Promise))(function (resolve, reject) {
|
|
5
|
+
function fulfilled(value) { try { step(generator.next(value)); } catch (e) { reject(e); } }
|
|
6
|
+
function rejected(value) { try { step(generator["throw"](value)); } catch (e) { reject(e); } }
|
|
7
|
+
function step(result) { result.done ? resolve(result.value) : adopt(result.value).then(fulfilled, rejected); }
|
|
8
|
+
step((generator = generator.apply(thisArg, _arguments || [])).next());
|
|
9
|
+
});
|
|
10
|
+
};
|
|
11
|
+
Object.defineProperty(exports, "__esModule", { value: true });
|
|
12
|
+
exports.QwenStyleRepaintV1Model = exports.STYLE_INDEX = void 0;
|
|
13
|
+
const stream_1 = require("../../stream");
|
|
14
|
+
const utils_1 = require("../../utils");
|
|
15
|
+
const Chat_1 = require("../Chat");
|
|
16
|
+
const index_1 = require("../index");
|
|
17
|
+
exports.STYLE_INDEX = {
|
|
18
|
+
CUSTOM: -1,
|
|
19
|
+
RETRO_COMIC: 0,
|
|
20
|
+
FAIRY_TALE_3D: 1,
|
|
21
|
+
ANIME_2D: 2,
|
|
22
|
+
FRESH_STYLE: 3,
|
|
23
|
+
FUTURE_TECH: 4,
|
|
24
|
+
CHINESE_PAINTING: 5,
|
|
25
|
+
GENERAL_BATTLE: 6,
|
|
26
|
+
COLORFUL_CARTOON: 7,
|
|
27
|
+
ELEGANT_CHINESE: 8,
|
|
28
|
+
NEW_YEAR: 9,
|
|
29
|
+
GONGBI_STYLE: 14,
|
|
30
|
+
NEW_YEAR_GREET: 15,
|
|
31
|
+
FAIRY_WORLD: 30,
|
|
32
|
+
CLAY_WORLD: 31,
|
|
33
|
+
PIXEL_WORLD: 32,
|
|
34
|
+
ADVENTURE_WORLD: 33,
|
|
35
|
+
JAPANESE_ANIME: 34,
|
|
36
|
+
WORLD_3D: 35,
|
|
37
|
+
ANIME_WORLD: 36,
|
|
38
|
+
HAND_DRAWN: 37,
|
|
39
|
+
CRAYON_WORLD: 38,
|
|
40
|
+
FRIDGE_MAGNET: 39,
|
|
41
|
+
BAJIQUAN_WORLD: 40,
|
|
42
|
+
};
|
|
43
|
+
class QwenStyleRepaintV1Model extends Chat_1.SimpleChatModel {
|
|
44
|
+
constructor(req, baseUrl, tokenManager) {
|
|
45
|
+
super(req, baseUrl, QwenStyleRepaintV1Model.SUB_URL, tokenManager);
|
|
46
|
+
this.modelName = index_1.modelName[index_1.MultiModalModelName.QwenStyleRepaintV1];
|
|
47
|
+
}
|
|
48
|
+
normalizeStandardImageCompletion(res, fallbackModel) {
|
|
49
|
+
var _a, _b, _c, _d, _e, _f, _g, _h, _j, _k, _l, _m, _o, _p, _q;
|
|
50
|
+
const qOutput = (res === null || res === void 0 ? void 0 : res.output) || {};
|
|
51
|
+
if ((qOutput === null || qOutput === void 0 ? void 0 : qOutput.task_status) && (qOutput === null || qOutput === void 0 ? void 0 : qOutput.task_id)) {
|
|
52
|
+
const created = (_a = res === null || res === void 0 ? void 0 : res.created) !== null && _a !== void 0 ? _a : Math.floor(Date.now() / 1000);
|
|
53
|
+
const id = (res === null || res === void 0 ? void 0 : res.id) || (res === null || res === void 0 ? void 0 : res.request_id) || "";
|
|
54
|
+
const normalized = {
|
|
55
|
+
id,
|
|
56
|
+
object: (_b = res === null || res === void 0 ? void 0 : res.object) !== null && _b !== void 0 ? _b : "chat.completion",
|
|
57
|
+
created,
|
|
58
|
+
model: (_c = res === null || res === void 0 ? void 0 : res.model) !== null && _c !== void 0 ? _c : fallbackModel,
|
|
59
|
+
log_id: id,
|
|
60
|
+
error: (_d = res === null || res === void 0 ? void 0 : res.error) !== null && _d !== void 0 ? _d : "",
|
|
61
|
+
code: (_e = res === null || res === void 0 ? void 0 : res.code) !== null && _e !== void 0 ? _e : 0,
|
|
62
|
+
choices: [
|
|
63
|
+
{
|
|
64
|
+
index: 0,
|
|
65
|
+
message: {
|
|
66
|
+
id,
|
|
67
|
+
role: "assistant",
|
|
68
|
+
type: "async_task",
|
|
69
|
+
content: JSON.stringify(Object.assign(Object.assign({}, ((res === null || res === void 0 ? void 0 : res.output) || {})), { request_id: (res === null || res === void 0 ? void 0 : res.request_id) || id })),
|
|
70
|
+
reasoning_content: "",
|
|
71
|
+
},
|
|
72
|
+
finish_reason: "stop",
|
|
73
|
+
},
|
|
74
|
+
],
|
|
75
|
+
usage: mapUsageToStandard(res === null || res === void 0 ? void 0 : res.usage),
|
|
76
|
+
};
|
|
77
|
+
return normalized;
|
|
78
|
+
}
|
|
79
|
+
const first = ((_g = (_f = qOutput === null || qOutput === void 0 ? void 0 : qOutput.choices) === null || _f === void 0 ? void 0 : _f[0]) !== null && _g !== void 0 ? _g : null);
|
|
80
|
+
const message = (_h = first === null || first === void 0 ? void 0 : first.message) !== null && _h !== void 0 ? _h : {};
|
|
81
|
+
const contentUrl = Array.isArray(message === null || message === void 0 ? void 0 : message.content) && ((_j = message.content[0]) === null || _j === void 0 ? void 0 : _j.image) ? String(message.content[0].image) : "";
|
|
82
|
+
const created = (_k = res === null || res === void 0 ? void 0 : res.created) !== null && _k !== void 0 ? _k : Math.floor(Date.now() / 1000);
|
|
83
|
+
const id = (res === null || res === void 0 ? void 0 : res.id) || (res === null || res === void 0 ? void 0 : res.request_id) || "";
|
|
84
|
+
const normalized = {
|
|
85
|
+
id,
|
|
86
|
+
object: (_l = res === null || res === void 0 ? void 0 : res.object) !== null && _l !== void 0 ? _l : "chat.completion",
|
|
87
|
+
created,
|
|
88
|
+
model: (_m = res === null || res === void 0 ? void 0 : res.model) !== null && _m !== void 0 ? _m : fallbackModel,
|
|
89
|
+
log_id: id,
|
|
90
|
+
error: (_o = res === null || res === void 0 ? void 0 : res.error) !== null && _o !== void 0 ? _o : "",
|
|
91
|
+
code: (_p = res === null || res === void 0 ? void 0 : res.code) !== null && _p !== void 0 ? _p : 0,
|
|
92
|
+
choices: [
|
|
93
|
+
{
|
|
94
|
+
index: 0,
|
|
95
|
+
message: {
|
|
96
|
+
id,
|
|
97
|
+
role: "assistant",
|
|
98
|
+
type: "image",
|
|
99
|
+
content: contentUrl || "",
|
|
100
|
+
reasoning_content: "",
|
|
101
|
+
},
|
|
102
|
+
finish_reason: (_q = first === null || first === void 0 ? void 0 : first.finish_reason) !== null && _q !== void 0 ? _q : "stop",
|
|
103
|
+
},
|
|
104
|
+
],
|
|
105
|
+
usage: mapUsageToStandard(res === null || res === void 0 ? void 0 : res.usage),
|
|
106
|
+
};
|
|
107
|
+
return normalized;
|
|
108
|
+
}
|
|
109
|
+
convertToStyleRepaintInput(data) {
|
|
110
|
+
var _a, _b;
|
|
111
|
+
const messages = data.messages || [];
|
|
112
|
+
if (!Array.isArray(messages) || messages.length === 0) {
|
|
113
|
+
throw new Error("wanx-style-repaint-v1 请求需要提供 messages");
|
|
114
|
+
}
|
|
115
|
+
const userMessage = messages.find((m) => m.role === "user");
|
|
116
|
+
if (!userMessage || !Array.isArray(userMessage.content)) {
|
|
117
|
+
throw new Error("wanx-style-repaint-v1 请求需要提供 user 消息");
|
|
118
|
+
}
|
|
119
|
+
const imageContent = userMessage.content.find((c) => c.type === "image_url");
|
|
120
|
+
if (!((_a = imageContent === null || imageContent === void 0 ? void 0 : imageContent.image_url) === null || _a === void 0 ? void 0 : _a.url)) {
|
|
121
|
+
throw new Error("wanx-style-repaint-v1 请求需要提供图片 URL");
|
|
122
|
+
}
|
|
123
|
+
const imageUrl = imageContent.image_url.url;
|
|
124
|
+
const modelParams = data.parameters || {};
|
|
125
|
+
const styleIndex = (_b = modelParams.style_index) !== null && _b !== void 0 ? _b : exports.STYLE_INDEX.FRESH_STYLE;
|
|
126
|
+
const styleRefUrl = modelParams.style_ref_url;
|
|
127
|
+
if (styleIndex === exports.STYLE_INDEX.CUSTOM && !styleRefUrl) {
|
|
128
|
+
throw new Error("使用自定义风格时(style_index: -1),必须提供 style_ref_url");
|
|
129
|
+
}
|
|
130
|
+
const input = {
|
|
131
|
+
image_url: imageUrl,
|
|
132
|
+
style_index: styleIndex,
|
|
133
|
+
};
|
|
134
|
+
if (styleIndex === exports.STYLE_INDEX.CUSTOM && styleRefUrl) {
|
|
135
|
+
input.style_ref_url = styleRefUrl;
|
|
136
|
+
}
|
|
137
|
+
return {
|
|
138
|
+
model: this.modelName,
|
|
139
|
+
input,
|
|
140
|
+
};
|
|
141
|
+
}
|
|
142
|
+
modelRequest(data_1) {
|
|
143
|
+
return __awaiter(this, arguments, void 0, function* (data, options = {}) {
|
|
144
|
+
const fetchHeaders = yield this.createAuthHeaders(options === null || options === void 0 ? void 0 : options.headers);
|
|
145
|
+
const { data: responseData, header } = (yield this.req.fetch({
|
|
146
|
+
method: "post",
|
|
147
|
+
headers: Object.assign(Object.assign({}, fetchHeaders), ((options === null || options === void 0 ? void 0 : options.headers) || {})),
|
|
148
|
+
body: JSON.stringify(data),
|
|
149
|
+
url: `${this.baseUrl}/${this.subUrl}`,
|
|
150
|
+
stream: false,
|
|
151
|
+
}));
|
|
152
|
+
return (0, utils_1.handleResponseData)(responseData, header);
|
|
153
|
+
});
|
|
154
|
+
}
|
|
155
|
+
doGenerate(data, options) {
|
|
156
|
+
return __awaiter(this, void 0, void 0, function* () {
|
|
157
|
+
data.model = this.modelName;
|
|
158
|
+
const body = this.convertToStyleRepaintInput(data);
|
|
159
|
+
const headers = Object.assign(Object.assign({}, ((options === null || options === void 0 ? void 0 : options.headers) || {})), { "X-DashScope-Async": "enable" });
|
|
160
|
+
const res = (yield this.modelRequest(body, Object.assign(Object.assign({}, options), { headers })));
|
|
161
|
+
return this.normalizeStandardImageCompletion(res, this.modelName);
|
|
162
|
+
});
|
|
163
|
+
}
|
|
164
|
+
doStream(data, options) {
|
|
165
|
+
return __awaiter(this, void 0, void 0, function* () {
|
|
166
|
+
var _a, _b;
|
|
167
|
+
const nonStream = yield this.doGenerate(data, options);
|
|
168
|
+
const msg = ((_b = (_a = nonStream.choices) === null || _a === void 0 ? void 0 : _a[0]) === null || _b === void 0 ? void 0 : _b.message) || {};
|
|
169
|
+
const singleChunk = {
|
|
170
|
+
id: nonStream.id,
|
|
171
|
+
object: "chat.completion.chunk",
|
|
172
|
+
created: nonStream.created,
|
|
173
|
+
model: nonStream.model,
|
|
174
|
+
log_id: nonStream.log_id,
|
|
175
|
+
error: nonStream.error || "",
|
|
176
|
+
code: nonStream.code || 0,
|
|
177
|
+
choices: [
|
|
178
|
+
{
|
|
179
|
+
index: 0,
|
|
180
|
+
message: {
|
|
181
|
+
id: nonStream.id,
|
|
182
|
+
role: "assistant",
|
|
183
|
+
type: msg.type || "async_task",
|
|
184
|
+
content: msg.content || "",
|
|
185
|
+
reasoning_content: "",
|
|
186
|
+
},
|
|
187
|
+
finish_reason: "stop",
|
|
188
|
+
},
|
|
189
|
+
],
|
|
190
|
+
usage: nonStream.usage,
|
|
191
|
+
};
|
|
192
|
+
const stream = new stream_1.ReadableStream({
|
|
193
|
+
start(controller) {
|
|
194
|
+
controller.enqueue(singleChunk);
|
|
195
|
+
controller.close();
|
|
196
|
+
},
|
|
197
|
+
});
|
|
198
|
+
return (0, stream_1.createAsyncIterable)(stream);
|
|
199
|
+
});
|
|
200
|
+
}
|
|
201
|
+
}
|
|
202
|
+
exports.QwenStyleRepaintV1Model = QwenStyleRepaintV1Model;
|
|
203
|
+
QwenStyleRepaintV1Model.BASE_URL = "https://dashscope.aliyuncs.com";
|
|
204
|
+
QwenStyleRepaintV1Model.SUB_URL = "api/v1/services/aigc/image-generation/generation";
|
|
205
|
+
function mapUsageToStandard(usage) {
|
|
206
|
+
return {
|
|
207
|
+
prompt_tokens: 0,
|
|
208
|
+
completion_tokens: 0,
|
|
209
|
+
knowledge_tokens: 0,
|
|
210
|
+
reasoning_tokens: 0,
|
|
211
|
+
total_tokens: 0,
|
|
212
|
+
};
|
|
213
|
+
}
|
|
@@ -0,0 +1,85 @@
|
|
|
1
|
+
import type { IAbstractRequest } from "@vectorx/ai-types";
|
|
2
|
+
import type { DoGenerateOutput, DoStreamOutput, ModelRequestOptions, ReqOptions } from "../../model-type";
|
|
3
|
+
import type { TokenManager } from "../../tokenManager";
|
|
4
|
+
import { SimpleChatModel } from "../Chat";
|
|
5
|
+
import type { MultiModalModelName } from "../index";
|
|
6
|
+
export interface QwenVlMaxMessage {
|
|
7
|
+
role: "system" | "user" | "assistant";
|
|
8
|
+
content: string | Array<{
|
|
9
|
+
type: "text" | "image_url";
|
|
10
|
+
text?: string;
|
|
11
|
+
image_url?: {
|
|
12
|
+
url: string;
|
|
13
|
+
detail?: "auto" | "low" | "high";
|
|
14
|
+
};
|
|
15
|
+
}>;
|
|
16
|
+
}
|
|
17
|
+
export interface QwenVlMaxParameters {
|
|
18
|
+
seed?: number;
|
|
19
|
+
temperature?: number;
|
|
20
|
+
top_p?: number;
|
|
21
|
+
max_tokens?: number;
|
|
22
|
+
n?: number;
|
|
23
|
+
stream?: boolean;
|
|
24
|
+
stop?: string | string[];
|
|
25
|
+
frequency_penalty?: number;
|
|
26
|
+
presence_penalty?: number;
|
|
27
|
+
user?: string;
|
|
28
|
+
enable_search?: boolean;
|
|
29
|
+
enable_thinking?: boolean;
|
|
30
|
+
}
|
|
31
|
+
export interface QwenVlMaxAPIInput extends QwenVlMaxParameters {
|
|
32
|
+
model: string;
|
|
33
|
+
messages: QwenVlMaxMessage[];
|
|
34
|
+
stream?: boolean;
|
|
35
|
+
}
|
|
36
|
+
export interface QwenVlMaxResponse {
|
|
37
|
+
id: string;
|
|
38
|
+
object: string;
|
|
39
|
+
created: number;
|
|
40
|
+
model: string;
|
|
41
|
+
choices: Array<{
|
|
42
|
+
index: number;
|
|
43
|
+
message: {
|
|
44
|
+
role: string;
|
|
45
|
+
content: string;
|
|
46
|
+
};
|
|
47
|
+
finish_reason: string;
|
|
48
|
+
}>;
|
|
49
|
+
usage: {
|
|
50
|
+
prompt_tokens: number;
|
|
51
|
+
completion_tokens: number;
|
|
52
|
+
total_tokens: number;
|
|
53
|
+
output_tokens?: number;
|
|
54
|
+
input_tokens?: number;
|
|
55
|
+
};
|
|
56
|
+
}
|
|
57
|
+
export interface QwenVlMaxStreamChunk {
|
|
58
|
+
id: string;
|
|
59
|
+
object: string;
|
|
60
|
+
created: number;
|
|
61
|
+
model: string;
|
|
62
|
+
choices: Array<{
|
|
63
|
+
index: number;
|
|
64
|
+
delta: {
|
|
65
|
+
role?: string;
|
|
66
|
+
content?: string;
|
|
67
|
+
};
|
|
68
|
+
finish_reason: string | null;
|
|
69
|
+
}>;
|
|
70
|
+
usage?: {
|
|
71
|
+
prompt_tokens: number;
|
|
72
|
+
completion_tokens: number;
|
|
73
|
+
total_tokens: number;
|
|
74
|
+
};
|
|
75
|
+
}
|
|
76
|
+
export declare class QwenVlMax extends SimpleChatModel {
|
|
77
|
+
static BASE_URL: string;
|
|
78
|
+
modelName: MultiModalModelName;
|
|
79
|
+
constructor(req: IAbstractRequest, baseUrl: string, modelName: MultiModalModelName, tokenManager: TokenManager);
|
|
80
|
+
protected modelRequest(data: QwenVlMaxAPIInput, options?: ReqOptions): Promise<ReadableStream<Uint8Array> | Promise<unknown>>;
|
|
81
|
+
protected normalizeResponse(response: QwenVlMaxResponse): DoGenerateOutput;
|
|
82
|
+
doGenerate(data: ModelRequestOptions, options?: ReqOptions): Promise<DoGenerateOutput>;
|
|
83
|
+
doStream(data: ModelRequestOptions, options?: ReqOptions): Promise<DoStreamOutput>;
|
|
84
|
+
private convertToQwenVlMaxRequestOptions;
|
|
85
|
+
}
|