@vectorx/ai-sdk 0.3.0 → 0.4.0
This diff represents the content of publicly available package versions that have been released to one of the supported registries. The information contained in this diff is provided for informational purposes only and reflects changes between package versions as they appear in their respective public registries.
- package/lib/ai.js +2 -0
- package/lib/model-type.d.ts +50 -11
- package/lib/model-type.js +16 -0
- package/lib/models/QwenDocTurbo/index.d.ts +84 -0
- package/lib/models/QwenDocTurbo/index.js +178 -0
- package/lib/models/QwenVlMax/index.d.ts +2 -9
- package/lib/models/QwenVlMax/index.js +2 -1
- package/lib/models/index.d.ts +4 -1
- package/lib/models/index.js +5 -1
- package/package.json +2 -2
package/lib/ai.js
CHANGED
|
@@ -63,6 +63,8 @@ class AI {
|
|
|
63
63
|
return new models.ReActModel(new models.QwenStyleRepaintV1Model(this.request, models.QwenStyleRepaintV1Model.BASE_URL, this.tokenManager));
|
|
64
64
|
case models.MultiModalModelName.QwenVlMax:
|
|
65
65
|
return new models.ReActModel(new models.QwenVlMax(this.request, models.QwenVlMax.BASE_URL, model, this.tokenManager));
|
|
66
|
+
case models.MultiModalModelName.QwenDocTurbo:
|
|
67
|
+
return new models.ReActModel(new models.QwenDocTurbo(this.request, models.QwenDocTurbo.BASE_URL, model, this.tokenManager));
|
|
66
68
|
default:
|
|
67
69
|
return new models.ReActModel(new models.DefaultSimpleModel(this.request, this.baseUrl, model));
|
|
68
70
|
}
|
package/lib/model-type.d.ts
CHANGED
|
@@ -1,6 +1,6 @@
|
|
|
1
|
-
import type { GetAgentInfoResponse, GetConversationsResponse, GetHistoryMessagesParams, GetHistoryMessagesResponse, QueryTasksResponse, SendMessageInput } from "@vectorx/ai-types";
|
|
1
|
+
import type { GetAgentInfoResponse, GetConversationsResponse, GetHistoryMessagesParams, GetHistoryMessagesResponse, QueryTasksResponse, SendMessageInput, UploadFileInput, UploadFileResponse } from "@vectorx/ai-types";
|
|
2
2
|
import type { ModelName } from "./models";
|
|
3
|
-
export type { GetAgentInfoResponse, GetConversationsResponse, GetHistoryMessagesParams, GetHistoryMessagesResponse, SendMessageInput, QueryTasksResponse, };
|
|
3
|
+
export type { GetAgentInfoResponse, GetConversationsResponse, GetHistoryMessagesParams, GetHistoryMessagesResponse, SendMessageInput, QueryTasksResponse, UploadFileInput, UploadFileResponse, };
|
|
4
4
|
export type DoGenerateOutput = BaseDoGenerateOutput;
|
|
5
5
|
export type DoStreamOutput = AsyncIterableReadableStream<BaseDoStreamOutputChunk>;
|
|
6
6
|
export type AsyncIterableReadableStream<T> = ReadableStream<T> & {
|
|
@@ -8,6 +8,51 @@ export type AsyncIterableReadableStream<T> = ReadableStream<T> & {
|
|
|
8
8
|
next(): Promise<IteratorResult<T>>;
|
|
9
9
|
};
|
|
10
10
|
};
|
|
11
|
+
export interface TextContent {
|
|
12
|
+
type: "text";
|
|
13
|
+
text: string;
|
|
14
|
+
}
|
|
15
|
+
export interface ImageUrlContent {
|
|
16
|
+
type: "image_url";
|
|
17
|
+
image_url: {
|
|
18
|
+
url: string;
|
|
19
|
+
detail?: "auto" | "low" | "high";
|
|
20
|
+
};
|
|
21
|
+
}
|
|
22
|
+
export interface DocUrlContent {
|
|
23
|
+
type: "doc_url";
|
|
24
|
+
doc_url: string[];
|
|
25
|
+
file_parsing_strategy?: "auto" | "text_only";
|
|
26
|
+
}
|
|
27
|
+
export interface VideoUrlContent {
|
|
28
|
+
type: "video_url";
|
|
29
|
+
video_url: {
|
|
30
|
+
url: string;
|
|
31
|
+
};
|
|
32
|
+
}
|
|
33
|
+
export interface AudioUrlContent {
|
|
34
|
+
type: "audio_url";
|
|
35
|
+
audio_url: {
|
|
36
|
+
url: string;
|
|
37
|
+
};
|
|
38
|
+
}
|
|
39
|
+
export interface VideoContent {
|
|
40
|
+
type: "video";
|
|
41
|
+
video: string[];
|
|
42
|
+
}
|
|
43
|
+
export interface AudioContent {
|
|
44
|
+
type: "audio";
|
|
45
|
+
audio: string[];
|
|
46
|
+
}
|
|
47
|
+
export type MultiModalContent = TextContent | ImageUrlContent | DocUrlContent | VideoUrlContent | AudioUrlContent | VideoContent | AudioContent;
|
|
48
|
+
export type SupportedContentType = MultiModalContent["type"];
|
|
49
|
+
export declare function filterContentByTypes<T extends SupportedContentType>(content: MultiModalContent[], supportedTypes: T[]): Extract<MultiModalContent, {
|
|
50
|
+
type: T;
|
|
51
|
+
}>[];
|
|
52
|
+
export declare function hasContentType(content: string | MultiModalContent[], type: SupportedContentType): boolean;
|
|
53
|
+
export declare function extractContentByType<T extends SupportedContentType>(content: string | MultiModalContent[], type: T): Extract<MultiModalContent, {
|
|
54
|
+
type: T;
|
|
55
|
+
}>[];
|
|
11
56
|
export interface ModelRequestOptions {
|
|
12
57
|
async?: boolean;
|
|
13
58
|
model: ModelName | string;
|
|
@@ -25,15 +70,7 @@ export interface ModelRequestOptions {
|
|
|
25
70
|
}>;
|
|
26
71
|
messages?: Array<{
|
|
27
72
|
role: string;
|
|
28
|
-
content: string |
|
|
29
|
-
type: "text";
|
|
30
|
-
text: string;
|
|
31
|
-
} | {
|
|
32
|
-
type: "image_url";
|
|
33
|
-
image_url: {
|
|
34
|
-
url: string;
|
|
35
|
-
};
|
|
36
|
-
}>;
|
|
73
|
+
content: string | MultiModalContent[];
|
|
37
74
|
}>;
|
|
38
75
|
knowledge_base?: Array<{
|
|
39
76
|
knowledge_base_id: string;
|
|
@@ -44,6 +81,7 @@ export interface ModelRequestOptions {
|
|
|
44
81
|
db_base?: any[];
|
|
45
82
|
enable_thinking?: boolean;
|
|
46
83
|
enable_search?: boolean;
|
|
84
|
+
parameters?: any;
|
|
47
85
|
}
|
|
48
86
|
export type ModelReq = <T extends ModelRequestOptions>(props: T) => T["stream"] extends true ? Promise<ReadableStream<Uint8Array>> : Promise<Object>;
|
|
49
87
|
export interface IAgentReqInput {
|
|
@@ -155,6 +193,7 @@ export interface IAgent {
|
|
|
155
193
|
getAgentInfo?(): Promise<GetAgentInfoResponse>;
|
|
156
194
|
getConversations?(): Promise<GetConversationsResponse>;
|
|
157
195
|
queryTasks?(task_id?: string): Promise<QueryTasksResponse>;
|
|
196
|
+
uploadFile?(input: UploadFileInput): Promise<UploadFileResponse>;
|
|
158
197
|
}
|
|
159
198
|
export declare enum IAgentEnv {
|
|
160
199
|
Production = "production",
|
package/lib/model-type.js
CHANGED
|
@@ -1,6 +1,22 @@
|
|
|
1
1
|
"use strict";
|
|
2
2
|
Object.defineProperty(exports, "__esModule", { value: true });
|
|
3
3
|
exports.IAgentEnv = void 0;
|
|
4
|
+
exports.filterContentByTypes = filterContentByTypes;
|
|
5
|
+
exports.hasContentType = hasContentType;
|
|
6
|
+
exports.extractContentByType = extractContentByType;
|
|
7
|
+
function filterContentByTypes(content, supportedTypes) {
|
|
8
|
+
return content.filter((item) => supportedTypes.includes(item.type));
|
|
9
|
+
}
|
|
10
|
+
function hasContentType(content, type) {
|
|
11
|
+
if (typeof content === "string")
|
|
12
|
+
return false;
|
|
13
|
+
return content.some((item) => item.type === type);
|
|
14
|
+
}
|
|
15
|
+
function extractContentByType(content, type) {
|
|
16
|
+
if (typeof content === "string")
|
|
17
|
+
return [];
|
|
18
|
+
return content.filter((item) => item.type === type);
|
|
19
|
+
}
|
|
4
20
|
var IAgentEnv;
|
|
5
21
|
(function (IAgentEnv) {
|
|
6
22
|
IAgentEnv["Production"] = "production";
|
|
@@ -0,0 +1,84 @@
|
|
|
1
|
+
import type { IAbstractRequest } from "@vectorx/ai-types";
|
|
2
|
+
import type { DoGenerateOutput, DoStreamOutput, DocUrlContent, ModelRequestOptions, ReqOptions, TextContent } from "../../model-type";
|
|
3
|
+
import type { TokenManager } from "../../tokenManager";
|
|
4
|
+
import { SimpleChatModel } from "../Chat";
|
|
5
|
+
import type { MultiModalModelName } from "../index";
|
|
6
|
+
export interface QwenDocTurboMessage {
|
|
7
|
+
role: "system" | "user" | "assistant";
|
|
8
|
+
content: string | Array<TextContent | DocUrlContent>;
|
|
9
|
+
}
|
|
10
|
+
export interface QwenDocTurboParameters {
|
|
11
|
+
temperature?: number;
|
|
12
|
+
top_p?: number;
|
|
13
|
+
max_tokens?: number;
|
|
14
|
+
n?: number;
|
|
15
|
+
stream?: boolean;
|
|
16
|
+
stop?: string | string[];
|
|
17
|
+
frequency_penalty?: number;
|
|
18
|
+
presence_penalty?: number;
|
|
19
|
+
user?: string;
|
|
20
|
+
stream_options?: {
|
|
21
|
+
include_usage?: boolean;
|
|
22
|
+
};
|
|
23
|
+
response_format?: {
|
|
24
|
+
type: "text" | "json_object";
|
|
25
|
+
};
|
|
26
|
+
tools?: Array<object>;
|
|
27
|
+
tool_choice?: string | object;
|
|
28
|
+
parallel_tool_calls?: boolean;
|
|
29
|
+
}
|
|
30
|
+
export interface QwenDocTurboAPIInput {
|
|
31
|
+
model: string;
|
|
32
|
+
input: {
|
|
33
|
+
messages: QwenDocTurboMessage[];
|
|
34
|
+
};
|
|
35
|
+
parameters?: QwenDocTurboParameters;
|
|
36
|
+
}
|
|
37
|
+
export interface QwenDocTurboResponse {
|
|
38
|
+
request_id?: string;
|
|
39
|
+
output?: {
|
|
40
|
+
text?: string;
|
|
41
|
+
choices?: Array<{
|
|
42
|
+
finish_reason: string;
|
|
43
|
+
message: {
|
|
44
|
+
role: string;
|
|
45
|
+
content: string;
|
|
46
|
+
};
|
|
47
|
+
}>;
|
|
48
|
+
};
|
|
49
|
+
usage?: {
|
|
50
|
+
input_tokens?: number;
|
|
51
|
+
output_tokens?: number;
|
|
52
|
+
total_tokens?: number;
|
|
53
|
+
};
|
|
54
|
+
}
|
|
55
|
+
export interface QwenDocTurboStreamChunk {
|
|
56
|
+
request_id?: string;
|
|
57
|
+
output?: {
|
|
58
|
+
text?: string;
|
|
59
|
+
choices?: Array<{
|
|
60
|
+
finish_reason: string;
|
|
61
|
+
message: {
|
|
62
|
+
role?: string;
|
|
63
|
+
content?: string;
|
|
64
|
+
};
|
|
65
|
+
}>;
|
|
66
|
+
};
|
|
67
|
+
usage?: {
|
|
68
|
+
input_tokens?: number;
|
|
69
|
+
output_tokens?: number;
|
|
70
|
+
total_tokens?: number;
|
|
71
|
+
};
|
|
72
|
+
}
|
|
73
|
+
export declare class QwenDocTurbo extends SimpleChatModel {
|
|
74
|
+
static BASE_URL: string;
|
|
75
|
+
static SUB_URL: string;
|
|
76
|
+
modelName: MultiModalModelName;
|
|
77
|
+
constructor(req: IAbstractRequest, baseUrl: string, modelName: MultiModalModelName, tokenManager: TokenManager);
|
|
78
|
+
protected modelRequest(data: QwenDocTurboAPIInput, options?: ReqOptions): Promise<ReadableStream<Uint8Array> | Promise<unknown>>;
|
|
79
|
+
protected normalizeResponse(response: QwenDocTurboResponse): DoGenerateOutput;
|
|
80
|
+
doGenerate(data: ModelRequestOptions, options?: ReqOptions): Promise<DoGenerateOutput>;
|
|
81
|
+
doStream(data: ModelRequestOptions, options?: ReqOptions): Promise<DoStreamOutput>;
|
|
82
|
+
private normalizeStreamChunks;
|
|
83
|
+
private convertToQwenDocTurboRequestOptions;
|
|
84
|
+
}
|
|
@@ -0,0 +1,178 @@
|
|
|
1
|
+
"use strict";
|
|
2
|
+
var __awaiter = (this && this.__awaiter) || function (thisArg, _arguments, P, generator) {
|
|
3
|
+
function adopt(value) { return value instanceof P ? value : new P(function (resolve) { resolve(value); }); }
|
|
4
|
+
return new (P || (P = Promise))(function (resolve, reject) {
|
|
5
|
+
function fulfilled(value) { try { step(generator.next(value)); } catch (e) { reject(e); } }
|
|
6
|
+
function rejected(value) { try { step(generator["throw"](value)); } catch (e) { reject(e); } }
|
|
7
|
+
function step(result) { result.done ? resolve(result.value) : adopt(result.value).then(fulfilled, rejected); }
|
|
8
|
+
step((generator = generator.apply(thisArg, _arguments || [])).next());
|
|
9
|
+
});
|
|
10
|
+
};
|
|
11
|
+
Object.defineProperty(exports, "__esModule", { value: true });
|
|
12
|
+
exports.QwenDocTurbo = void 0;
|
|
13
|
+
const model_type_1 = require("../../model-type");
|
|
14
|
+
const stream_1 = require("../../stream");
|
|
15
|
+
const utils_1 = require("../../utils");
|
|
16
|
+
const Chat_1 = require("../Chat");
|
|
17
|
+
class QwenDocTurbo extends Chat_1.SimpleChatModel {
|
|
18
|
+
constructor(req, baseUrl, modelName, tokenManager) {
|
|
19
|
+
super(req, baseUrl, QwenDocTurbo.SUB_URL, tokenManager);
|
|
20
|
+
this.modelName = modelName;
|
|
21
|
+
}
|
|
22
|
+
modelRequest(data, options) {
|
|
23
|
+
return __awaiter(this, void 0, void 0, function* () {
|
|
24
|
+
var _a, _b;
|
|
25
|
+
const fetchHeaders = yield this.createAuthHeaders(options === null || options === void 0 ? void 0 : options.headers);
|
|
26
|
+
const isStreaming = (_b = (_a = data.parameters) === null || _a === void 0 ? void 0 : _a.stream) !== null && _b !== void 0 ? _b : false;
|
|
27
|
+
if (isStreaming) {
|
|
28
|
+
fetchHeaders["X-DashScope-SSE"] = "enable";
|
|
29
|
+
}
|
|
30
|
+
console.log("=== QwenDocTurbo Request ===", {
|
|
31
|
+
method: "post",
|
|
32
|
+
headers: Object.assign({}, fetchHeaders),
|
|
33
|
+
body: JSON.stringify(data),
|
|
34
|
+
url: `${this.baseUrl}/${this.subUrl}`,
|
|
35
|
+
stream: isStreaming,
|
|
36
|
+
});
|
|
37
|
+
const { data: responseData, header } = (yield this.req.fetch({
|
|
38
|
+
method: "post",
|
|
39
|
+
headers: Object.assign({}, fetchHeaders),
|
|
40
|
+
body: JSON.stringify(data),
|
|
41
|
+
url: `${this.baseUrl}/${this.subUrl}`,
|
|
42
|
+
stream: isStreaming,
|
|
43
|
+
}));
|
|
44
|
+
return (0, utils_1.handleResponseData)(responseData, header);
|
|
45
|
+
});
|
|
46
|
+
}
|
|
47
|
+
normalizeResponse(response) {
|
|
48
|
+
var _a, _b, _c, _d, _e;
|
|
49
|
+
const output = response.output || {};
|
|
50
|
+
const choice = (_a = output.choices) === null || _a === void 0 ? void 0 : _a[0];
|
|
51
|
+
const requestId = response.request_id || "";
|
|
52
|
+
const created = Math.floor(Date.now() / 1000);
|
|
53
|
+
const content = ((_b = choice === null || choice === void 0 ? void 0 : choice.message) === null || _b === void 0 ? void 0 : _b.content) || output.text || "";
|
|
54
|
+
return {
|
|
55
|
+
id: requestId,
|
|
56
|
+
object: "chat.completion",
|
|
57
|
+
created: created,
|
|
58
|
+
model: this.modelName,
|
|
59
|
+
log_id: requestId,
|
|
60
|
+
error: "",
|
|
61
|
+
code: 0,
|
|
62
|
+
choices: [
|
|
63
|
+
{
|
|
64
|
+
index: 0,
|
|
65
|
+
message: {
|
|
66
|
+
id: requestId,
|
|
67
|
+
role: "assistant",
|
|
68
|
+
type: "answer",
|
|
69
|
+
content: content,
|
|
70
|
+
reasoning_content: "",
|
|
71
|
+
},
|
|
72
|
+
finish_reason: (choice === null || choice === void 0 ? void 0 : choice.finish_reason) || "stop",
|
|
73
|
+
},
|
|
74
|
+
],
|
|
75
|
+
usage: {
|
|
76
|
+
prompt_tokens: ((_c = response.usage) === null || _c === void 0 ? void 0 : _c.input_tokens) || 0,
|
|
77
|
+
completion_tokens: ((_d = response.usage) === null || _d === void 0 ? void 0 : _d.output_tokens) || 0,
|
|
78
|
+
knowledge_tokens: 0,
|
|
79
|
+
reasoning_tokens: 0,
|
|
80
|
+
total_tokens: ((_e = response.usage) === null || _e === void 0 ? void 0 : _e.total_tokens) || 0,
|
|
81
|
+
},
|
|
82
|
+
};
|
|
83
|
+
}
|
|
84
|
+
doGenerate(data, options) {
|
|
85
|
+
return __awaiter(this, void 0, void 0, function* () {
|
|
86
|
+
const qwenDocTurboData = this.convertToQwenDocTurboRequestOptions(data, false);
|
|
87
|
+
const res = yield this.modelRequest(qwenDocTurboData, options);
|
|
88
|
+
return this.normalizeResponse(res);
|
|
89
|
+
});
|
|
90
|
+
}
|
|
91
|
+
doStream(data, options) {
|
|
92
|
+
return __awaiter(this, void 0, void 0, function* () {
|
|
93
|
+
const qwenDocTurboData = this.convertToQwenDocTurboRequestOptions(data, true);
|
|
94
|
+
const _stream = (yield this.modelRequest(qwenDocTurboData, options));
|
|
95
|
+
const stream = (0, stream_1.toPolyfillReadable)(_stream);
|
|
96
|
+
const rawStream = (0, stream_1.intoStandardStream)(stream);
|
|
97
|
+
const standardStream = this.normalizeStreamChunks(rawStream);
|
|
98
|
+
return (0, stream_1.createAsyncIterable)(standardStream);
|
|
99
|
+
});
|
|
100
|
+
}
|
|
101
|
+
normalizeStreamChunks(stream) {
|
|
102
|
+
const modelName = this.modelName;
|
|
103
|
+
let previousContent = "";
|
|
104
|
+
return stream.pipeThrough(new stream_1.TransformStream({
|
|
105
|
+
transform(chunk, controller) {
|
|
106
|
+
var _a, _b, _c, _d, _e;
|
|
107
|
+
const output = chunk.output || {};
|
|
108
|
+
const choice = (_a = output.choices) === null || _a === void 0 ? void 0 : _a[0];
|
|
109
|
+
const requestId = chunk.request_id || "";
|
|
110
|
+
const created = Math.floor(Date.now() / 1000);
|
|
111
|
+
const fullContent = ((_b = choice === null || choice === void 0 ? void 0 : choice.message) === null || _b === void 0 ? void 0 : _b.content) || output.text || "";
|
|
112
|
+
const deltaContent = fullContent.slice(previousContent.length);
|
|
113
|
+
previousContent = fullContent;
|
|
114
|
+
if (!deltaContent && (choice === null || choice === void 0 ? void 0 : choice.finish_reason) !== "stop") {
|
|
115
|
+
return;
|
|
116
|
+
}
|
|
117
|
+
const standardChunk = {
|
|
118
|
+
id: requestId,
|
|
119
|
+
object: "chat.completion.chunk",
|
|
120
|
+
created: created,
|
|
121
|
+
model: modelName,
|
|
122
|
+
log_id: requestId,
|
|
123
|
+
error: "",
|
|
124
|
+
code: 0,
|
|
125
|
+
choices: [
|
|
126
|
+
{
|
|
127
|
+
index: 0,
|
|
128
|
+
message: {
|
|
129
|
+
id: requestId,
|
|
130
|
+
role: "assistant",
|
|
131
|
+
type: "answer",
|
|
132
|
+
content: deltaContent,
|
|
133
|
+
reasoning_content: "",
|
|
134
|
+
},
|
|
135
|
+
finish_reason: (choice === null || choice === void 0 ? void 0 : choice.finish_reason) || null,
|
|
136
|
+
},
|
|
137
|
+
],
|
|
138
|
+
usage: {
|
|
139
|
+
prompt_tokens: ((_c = chunk.usage) === null || _c === void 0 ? void 0 : _c.input_tokens) || 0,
|
|
140
|
+
completion_tokens: ((_d = chunk.usage) === null || _d === void 0 ? void 0 : _d.output_tokens) || 0,
|
|
141
|
+
knowledge_tokens: 0,
|
|
142
|
+
reasoning_tokens: 0,
|
|
143
|
+
total_tokens: ((_e = chunk.usage) === null || _e === void 0 ? void 0 : _e.total_tokens) || 0,
|
|
144
|
+
},
|
|
145
|
+
};
|
|
146
|
+
controller.enqueue(standardChunk);
|
|
147
|
+
},
|
|
148
|
+
}));
|
|
149
|
+
}
|
|
150
|
+
convertToQwenDocTurboRequestOptions(data, stream) {
|
|
151
|
+
const clamp = (value, min, max, defaultValue) => value !== undefined ? Math.max(min, Math.min(max, value)) : defaultValue;
|
|
152
|
+
const messages = (data.messages || []).map((msg) => {
|
|
153
|
+
var _a;
|
|
154
|
+
const role = msg.role;
|
|
155
|
+
if (role === "system") {
|
|
156
|
+
return {
|
|
157
|
+
role,
|
|
158
|
+
content: Array.isArray(msg.content) ? ((_a = msg.content.find((c) => c.type === "text")) === null || _a === void 0 ? void 0 : _a.text) || "" : msg.content,
|
|
159
|
+
};
|
|
160
|
+
}
|
|
161
|
+
return {
|
|
162
|
+
role,
|
|
163
|
+
content: Array.isArray(msg.content) ? (0, model_type_1.filterContentByTypes)(msg.content, ["text", "doc_url"]) : msg.content,
|
|
164
|
+
};
|
|
165
|
+
});
|
|
166
|
+
const parameters = Object.assign({ stream, max_tokens: clamp(data.max_tokens, 1, 8000, 2000), temperature: clamp(data.temperature, 0, 2, 1.0), top_p: clamp(data.top_p, 0, 1, 1.0), frequency_penalty: clamp(data.frequency_penalty, -2, 2, 0.0), presence_penalty: clamp(data.presence_penalty, -2, 2, 0.0), n: clamp(data.n, 1, 10, 1), incremental_output: false }, ((data === null || data === void 0 ? void 0 : data.parameters) || {}));
|
|
167
|
+
return {
|
|
168
|
+
model: this.modelName,
|
|
169
|
+
input: {
|
|
170
|
+
messages,
|
|
171
|
+
},
|
|
172
|
+
parameters,
|
|
173
|
+
};
|
|
174
|
+
}
|
|
175
|
+
}
|
|
176
|
+
exports.QwenDocTurbo = QwenDocTurbo;
|
|
177
|
+
QwenDocTurbo.BASE_URL = "https://dashscope.aliyuncs.com";
|
|
178
|
+
QwenDocTurbo.SUB_URL = "api/v1/services/aigc/text-generation/generation";
|
|
@@ -1,18 +1,11 @@
|
|
|
1
1
|
import type { IAbstractRequest } from "@vectorx/ai-types";
|
|
2
|
-
import type { DoGenerateOutput, DoStreamOutput, ModelRequestOptions, ReqOptions } from "../../model-type";
|
|
2
|
+
import type { DoGenerateOutput, DoStreamOutput, ImageUrlContent, ModelRequestOptions, ReqOptions, TextContent } from "../../model-type";
|
|
3
3
|
import type { TokenManager } from "../../tokenManager";
|
|
4
4
|
import { SimpleChatModel } from "../Chat";
|
|
5
5
|
import type { MultiModalModelName } from "../index";
|
|
6
6
|
export interface QwenVlMaxMessage {
|
|
7
7
|
role: "system" | "user" | "assistant";
|
|
8
|
-
content: string | Array<
|
|
9
|
-
type: "text" | "image_url";
|
|
10
|
-
text?: string;
|
|
11
|
-
image_url?: {
|
|
12
|
-
url: string;
|
|
13
|
-
detail?: "auto" | "low" | "high";
|
|
14
|
-
};
|
|
15
|
-
}>;
|
|
8
|
+
content: string | Array<TextContent | ImageUrlContent>;
|
|
16
9
|
}
|
|
17
10
|
export interface QwenVlMaxParameters {
|
|
18
11
|
seed?: number;
|
|
@@ -10,6 +10,7 @@ var __awaiter = (this && this.__awaiter) || function (thisArg, _arguments, P, ge
|
|
|
10
10
|
};
|
|
11
11
|
Object.defineProperty(exports, "__esModule", { value: true });
|
|
12
12
|
exports.QwenVlMax = void 0;
|
|
13
|
+
const model_type_1 = require("../../model-type");
|
|
13
14
|
const stream_1 = require("../../stream");
|
|
14
15
|
const utils_1 = require("../../utils");
|
|
15
16
|
const Chat_1 = require("../Chat");
|
|
@@ -99,7 +100,7 @@ class QwenVlMax extends Chat_1.SimpleChatModel {
|
|
|
99
100
|
const clamp = (value, min, max, defaultValue) => value !== undefined ? Math.max(min, Math.min(max, value)) : defaultValue;
|
|
100
101
|
const messages = (data.messages || []).map((msg) => ({
|
|
101
102
|
role: msg.role,
|
|
102
|
-
content: msg.content,
|
|
103
|
+
content: Array.isArray(msg.content) ? (0, model_type_1.filterContentByTypes)(msg.content, ["text", "image_url"]) : msg.content,
|
|
103
104
|
}));
|
|
104
105
|
return {
|
|
105
106
|
model: this.modelName,
|
package/lib/models/index.d.ts
CHANGED
|
@@ -1,4 +1,5 @@
|
|
|
1
1
|
import { DefaultSimpleModel } from "./Default";
|
|
2
|
+
import { QwenDocTurbo } from "./QwenDocTurbo";
|
|
2
3
|
import { QwenImageModel } from "./QwenImage";
|
|
3
4
|
import { WanxSketchToImageLiteModel } from "./QwenSketchToImage";
|
|
4
5
|
import { QwenStyleRepaintV1Model } from "./QwenStyleRepaintV1";
|
|
@@ -17,6 +18,7 @@ export declare enum ModelName {
|
|
|
17
18
|
export declare enum MultiModalModelName {
|
|
18
19
|
QvqMaxLatest = "qvq-max-latest",
|
|
19
20
|
QwenVlMax = "qwen-vl-max",
|
|
21
|
+
QwenDocTurbo = "qwen-doc-turbo",
|
|
20
22
|
QwenOmniTurboRealtime = "qwen-omni-turbo-realtime",
|
|
21
23
|
Wanx21T2iPlus = "wanx2.1-t2i-plus",
|
|
22
24
|
Wanx21T2iTurbo = "wanx2.1-t2i-turbo",
|
|
@@ -34,6 +36,7 @@ export declare const modelName: {
|
|
|
34
36
|
"qwen-vl-ocr": string;
|
|
35
37
|
"qvq-max-latest": string;
|
|
36
38
|
"qwen-vl-max": string;
|
|
39
|
+
"qwen-doc-turbo": string;
|
|
37
40
|
"qwen-omni-turbo-realtime": string;
|
|
38
41
|
"wanx2.1-t2i-plus": string;
|
|
39
42
|
"wanx2.1-t2i-turbo": string;
|
|
@@ -44,4 +47,4 @@ export declare const modelName: {
|
|
|
44
47
|
export declare const isValidModel: (model: ModelName | MultiModalModelName) => model is ModelName | MultiModalModelName;
|
|
45
48
|
export declare const isMultiModalModel: (model: ModelName | MultiModalModelName) => model is MultiModalModelName;
|
|
46
49
|
declare const toolMap: Map<string, Function>;
|
|
47
|
-
export { DefaultSimpleModel, ReActModel, toolMap, WanxSketchToImageLiteModel, QwenImageModel, QwenVlMax, QwenStyleRepaintV1Model, };
|
|
50
|
+
export { DefaultSimpleModel, QwenDocTurbo, ReActModel, toolMap, WanxSketchToImageLiteModel, QwenImageModel, QwenVlMax, QwenStyleRepaintV1Model, };
|
package/lib/models/index.js
CHANGED
|
@@ -1,8 +1,10 @@
|
|
|
1
1
|
"use strict";
|
|
2
2
|
Object.defineProperty(exports, "__esModule", { value: true });
|
|
3
|
-
exports.QwenStyleRepaintV1Model = exports.QwenVlMax = exports.QwenImageModel = exports.WanxSketchToImageLiteModel = exports.toolMap = exports.ReActModel = exports.DefaultSimpleModel = exports.isMultiModalModel = exports.isValidModel = exports.modelName = exports.MultiModalModelName = exports.ModelName = void 0;
|
|
3
|
+
exports.QwenStyleRepaintV1Model = exports.QwenVlMax = exports.QwenImageModel = exports.WanxSketchToImageLiteModel = exports.toolMap = exports.ReActModel = exports.QwenDocTurbo = exports.DefaultSimpleModel = exports.isMultiModalModel = exports.isValidModel = exports.modelName = exports.MultiModalModelName = exports.ModelName = void 0;
|
|
4
4
|
const Default_1 = require("./Default");
|
|
5
5
|
Object.defineProperty(exports, "DefaultSimpleModel", { enumerable: true, get: function () { return Default_1.DefaultSimpleModel; } });
|
|
6
|
+
const QwenDocTurbo_1 = require("./QwenDocTurbo");
|
|
7
|
+
Object.defineProperty(exports, "QwenDocTurbo", { enumerable: true, get: function () { return QwenDocTurbo_1.QwenDocTurbo; } });
|
|
6
8
|
const QwenImage_1 = require("./QwenImage");
|
|
7
9
|
Object.defineProperty(exports, "QwenImageModel", { enumerable: true, get: function () { return QwenImage_1.QwenImageModel; } });
|
|
8
10
|
const QwenSketchToImage_1 = require("./QwenSketchToImage");
|
|
@@ -28,6 +30,7 @@ var MultiModalModelName;
|
|
|
28
30
|
(function (MultiModalModelName) {
|
|
29
31
|
MultiModalModelName["QvqMaxLatest"] = "qvq-max-latest";
|
|
30
32
|
MultiModalModelName["QwenVlMax"] = "qwen-vl-max";
|
|
33
|
+
MultiModalModelName["QwenDocTurbo"] = "qwen-doc-turbo";
|
|
31
34
|
MultiModalModelName["QwenOmniTurboRealtime"] = "qwen-omni-turbo-realtime";
|
|
32
35
|
MultiModalModelName["Wanx21T2iPlus"] = "wanx2.1-t2i-plus";
|
|
33
36
|
MultiModalModelName["Wanx21T2iTurbo"] = "wanx2.1-t2i-turbo";
|
|
@@ -45,6 +48,7 @@ exports.modelName = {
|
|
|
45
48
|
[ModelName.QwenVlOcr]: "qwen-vl-ocr",
|
|
46
49
|
[MultiModalModelName.QvqMaxLatest]: "qvq-max-latest",
|
|
47
50
|
[MultiModalModelName.QwenVlMax]: "qwen-vl-max",
|
|
51
|
+
[MultiModalModelName.QwenDocTurbo]: "qwen-doc-turbo",
|
|
48
52
|
[MultiModalModelName.QwenOmniTurboRealtime]: "qwen-omni-turbo-realtime",
|
|
49
53
|
[MultiModalModelName.Wanx21T2iPlus]: "wanx2.1-t2i-plus",
|
|
50
54
|
[MultiModalModelName.Wanx21T2iTurbo]: "wanx2.1-t2i-turbo",
|
package/package.json
CHANGED
|
@@ -1,6 +1,6 @@
|
|
|
1
1
|
{
|
|
2
2
|
"name": "@vectorx/ai-sdk",
|
|
3
|
-
"version": "0.
|
|
3
|
+
"version": "0.4.0",
|
|
4
4
|
"description": "Cloud AI SDK",
|
|
5
5
|
"main": "lib/index.js",
|
|
6
6
|
"sideEffects": false,
|
|
@@ -22,7 +22,7 @@
|
|
|
22
22
|
},
|
|
23
23
|
"dependencies": {
|
|
24
24
|
"@mattiasbuelens/web-streams-adapter": "^0.1.0",
|
|
25
|
-
"@vectorx/ai-types": "0.
|
|
25
|
+
"@vectorx/ai-types": "0.4.0",
|
|
26
26
|
"langfuse": "^3.38.4",
|
|
27
27
|
"openai": "^4.103.0",
|
|
28
28
|
"text-encoding-shim": "^1.0.5",
|