@vectorx/ai-sdk 0.0.0-beta-20251112071234

This diff represents the content of publicly available package versions that have been released to one of the supported registries. The information contained in this diff is provided for informational purposes only and reflects changes between package versions as they appear in their respective public registries.
Files changed (52) hide show
  1. package/README.md +1 -0
  2. package/lib/agent/index.d.ts +17 -0
  3. package/lib/agent/index.js +69 -0
  4. package/lib/ai.d.ts +16 -0
  5. package/lib/ai.js +90 -0
  6. package/lib/eventsource_parser/index.d.ts +2 -0
  7. package/lib/eventsource_parser/index.js +5 -0
  8. package/lib/eventsource_parser/parse.d.ts +2 -0
  9. package/lib/eventsource_parser/parse.js +124 -0
  10. package/lib/eventsource_parser/stream.d.ts +5 -0
  11. package/lib/eventsource_parser/stream.js +22 -0
  12. package/lib/eventsource_parser/types.d.ts +16 -0
  13. package/lib/eventsource_parser/types.js +2 -0
  14. package/lib/index.d.ts +14 -0
  15. package/lib/index.js +56 -0
  16. package/lib/model-type.d.ts +207 -0
  17. package/lib/model-type.js +24 -0
  18. package/lib/models/Chat.d.ts +14 -0
  19. package/lib/models/Chat.js +36 -0
  20. package/lib/models/Default/index.d.ts +11 -0
  21. package/lib/models/Default/index.js +68 -0
  22. package/lib/models/Qwen25T2iPreview/index.d.ts +76 -0
  23. package/lib/models/Qwen25T2iPreview/index.js +211 -0
  24. package/lib/models/QwenDocTurbo/adapters/DashScope.d.ts +25 -0
  25. package/lib/models/QwenDocTurbo/adapters/DashScope.js +179 -0
  26. package/lib/models/QwenDocTurbo/adapters/OpenAICompat.d.ts +24 -0
  27. package/lib/models/QwenDocTurbo/adapters/OpenAICompat.js +143 -0
  28. package/lib/models/QwenDocTurbo/index.d.ts +16 -0
  29. package/lib/models/QwenDocTurbo/index.js +86 -0
  30. package/lib/models/QwenDocTurbo/types.d.ts +124 -0
  31. package/lib/models/QwenDocTurbo/types.js +2 -0
  32. package/lib/models/QwenImage/index.d.ts +81 -0
  33. package/lib/models/QwenImage/index.js +208 -0
  34. package/lib/models/QwenImageEdit/index.d.ts +77 -0
  35. package/lib/models/QwenImageEdit/index.js +205 -0
  36. package/lib/models/QwenSketchToImage/index.d.ts +35 -0
  37. package/lib/models/QwenSketchToImage/index.js +155 -0
  38. package/lib/models/QwenStyleRepaintV1/index.d.ts +114 -0
  39. package/lib/models/QwenStyleRepaintV1/index.js +213 -0
  40. package/lib/models/QwenVlMax/index.d.ts +78 -0
  41. package/lib/models/QwenVlMax/index.js +121 -0
  42. package/lib/models/index.d.ts +56 -0
  43. package/lib/models/index.js +77 -0
  44. package/lib/models/react.d.ts +8 -0
  45. package/lib/models/react.js +28 -0
  46. package/lib/stream.d.ts +47 -0
  47. package/lib/stream.js +138 -0
  48. package/lib/tokenManager.d.ts +36 -0
  49. package/lib/tokenManager.js +89 -0
  50. package/lib/utils.d.ts +1 -0
  51. package/lib/utils.js +54 -0
  52. package/package.json +49 -0
@@ -0,0 +1,24 @@
1
+ import type { IAbstractRequest } from "@vectorx/ai-types";
2
+ import type { BaseDoStreamOutputChunk, DoGenerateOutput, ModelRequestOptions, ReqOptions } from "../../../model-type";
3
+ import type { TokenManager } from "../../../tokenManager";
4
+ import type { QwenDocTurboApi } from "../types";
5
+ import type { OpenAICompatibleAPIInput, OpenAICompatibleResponse } from "../types";
6
+ export declare class OpenAICompatApi implements QwenDocTurboApi {
7
+ private req;
8
+ private baseUrl;
9
+ private subUrl;
10
+ private modelName;
11
+ private tokenManager?;
12
+ constructor(ctx: {
13
+ req: IAbstractRequest;
14
+ baseUrl: string;
15
+ subUrl: string;
16
+ modelName: string;
17
+ tokenManager?: TokenManager;
18
+ });
19
+ private createAuthHeaders;
20
+ buildPayload(data: ModelRequestOptions, stream: boolean): OpenAICompatibleAPIInput;
21
+ request(payload: OpenAICompatibleAPIInput, options?: ReqOptions): Promise<ReadableStream<Uint8Array> | unknown>;
22
+ normalizeResponse(response: OpenAICompatibleResponse): DoGenerateOutput;
23
+ normalizeStream(_stream: ReadableStream<Uint8Array>): ReadableStream<BaseDoStreamOutputChunk>;
24
+ }
@@ -0,0 +1,143 @@
1
+ "use strict";
2
+ var __awaiter = (this && this.__awaiter) || function (thisArg, _arguments, P, generator) {
3
+ function adopt(value) { return value instanceof P ? value : new P(function (resolve) { resolve(value); }); }
4
+ return new (P || (P = Promise))(function (resolve, reject) {
5
+ function fulfilled(value) { try { step(generator.next(value)); } catch (e) { reject(e); } }
6
+ function rejected(value) { try { step(generator["throw"](value)); } catch (e) { reject(e); } }
7
+ function step(result) { result.done ? resolve(result.value) : adopt(result.value).then(fulfilled, rejected); }
8
+ step((generator = generator.apply(thisArg, _arguments || [])).next());
9
+ });
10
+ };
11
+ Object.defineProperty(exports, "__esModule", { value: true });
12
+ exports.OpenAICompatApi = void 0;
13
+ const stream_1 = require("../../../stream");
14
+ const utils_1 = require("../../../utils");
15
+ class OpenAICompatApi {
16
+ constructor(ctx) {
17
+ this.req = ctx.req;
18
+ this.baseUrl = ctx.baseUrl;
19
+ this.subUrl = ctx.subUrl;
20
+ this.modelName = ctx.modelName;
21
+ this.tokenManager = ctx.tokenManager;
22
+ }
23
+ createAuthHeaders() {
24
+ return __awaiter(this, arguments, void 0, function* (additionalHeaders = {}) {
25
+ if (!this.tokenManager)
26
+ throw new Error("TokenManager is not set");
27
+ const token = yield this.tokenManager.getValidToken();
28
+ return Object.assign({ "Content-Type": "application/json", Authorization: `Bearer ${token}` }, additionalHeaders);
29
+ });
30
+ }
31
+ buildPayload(data, stream) {
32
+ const messages = (data.messages || []).map((msg) => {
33
+ const role = msg.role;
34
+ let contentStr = "";
35
+ if (typeof msg.content === "string")
36
+ contentStr = msg.content;
37
+ else if (Array.isArray(msg.content)) {
38
+ contentStr = msg.content
39
+ .map((c) => ((c === null || c === void 0 ? void 0 : c.type) === "text" ? c.text : ""))
40
+ .filter(Boolean)
41
+ .join("\n");
42
+ }
43
+ return { role, content: contentStr };
44
+ });
45
+ return {
46
+ model: this.modelName,
47
+ messages,
48
+ stream,
49
+ stream_options: stream ? { include_usage: true } : undefined,
50
+ };
51
+ }
52
+ request(payload, options) {
53
+ return __awaiter(this, void 0, void 0, function* () {
54
+ var _a;
55
+ const headers = yield this.createAuthHeaders(options === null || options === void 0 ? void 0 : options.headers);
56
+ const response = (yield this.req.fetch({
57
+ method: "post",
58
+ headers: Object.assign({}, headers),
59
+ body: JSON.stringify(payload),
60
+ url: `${this.baseUrl}/${this.subUrl}`,
61
+ stream: (_a = payload.stream) !== null && _a !== void 0 ? _a : false,
62
+ }));
63
+ const { data, header } = response;
64
+ return (0, utils_1.handleResponseData)(data, header);
65
+ });
66
+ }
67
+ normalizeResponse(response) {
68
+ var _a, _b, _c, _d, _e;
69
+ const choice = (_a = response.choices) === null || _a === void 0 ? void 0 : _a[0];
70
+ const requestId = response.id || "";
71
+ const created = response.created || Math.floor(Date.now() / 1000);
72
+ const content = ((_b = choice === null || choice === void 0 ? void 0 : choice.message) === null || _b === void 0 ? void 0 : _b.content) || "";
73
+ return {
74
+ id: requestId,
75
+ object: "chat.completion",
76
+ created,
77
+ model: this.modelName,
78
+ log_id: requestId,
79
+ error: "",
80
+ code: 0,
81
+ choices: [
82
+ {
83
+ index: 0,
84
+ message: { id: requestId, role: "assistant", type: "answer", content, reasoning_content: "" },
85
+ finish_reason: (choice === null || choice === void 0 ? void 0 : choice.finish_reason) || "stop",
86
+ },
87
+ ],
88
+ usage: {
89
+ prompt_tokens: ((_c = response.usage) === null || _c === void 0 ? void 0 : _c.prompt_tokens) || 0,
90
+ completion_tokens: ((_d = response.usage) === null || _d === void 0 ? void 0 : _d.completion_tokens) || 0,
91
+ knowledge_tokens: 0,
92
+ reasoning_tokens: 0,
93
+ total_tokens: ((_e = response.usage) === null || _e === void 0 ? void 0 : _e.total_tokens) || 0,
94
+ },
95
+ };
96
+ }
97
+ normalizeStream(_stream) {
98
+ const modelName = this.modelName;
99
+ const stream = (0, stream_1.toPolyfillReadable)(_stream);
100
+ const raw = (0, stream_1.intoStandardStream)(stream);
101
+ return raw.pipeThrough(new stream_1.TransformStream({
102
+ transform(chunk, controller) {
103
+ var _a, _b, _c, _d, _e;
104
+ const requestId = chunk.id || "";
105
+ const created = chunk.created || Math.floor(Date.now() / 1000);
106
+ const choice = (_a = chunk.choices) === null || _a === void 0 ? void 0 : _a[0];
107
+ const deltaContent = ((_b = choice === null || choice === void 0 ? void 0 : choice.delta) === null || _b === void 0 ? void 0 : _b.content) || "";
108
+ if (!deltaContent && !(choice === null || choice === void 0 ? void 0 : choice.finish_reason))
109
+ return;
110
+ controller.enqueue({
111
+ id: requestId,
112
+ object: "chat.completion.chunk",
113
+ created,
114
+ model: modelName,
115
+ log_id: requestId,
116
+ error: "",
117
+ code: 0,
118
+ choices: [
119
+ {
120
+ index: (choice === null || choice === void 0 ? void 0 : choice.index) || 0,
121
+ message: {
122
+ id: requestId,
123
+ role: "assistant",
124
+ type: "answer",
125
+ content: deltaContent,
126
+ reasoning_content: "",
127
+ },
128
+ finish_reason: (choice === null || choice === void 0 ? void 0 : choice.finish_reason) || null,
129
+ },
130
+ ],
131
+ usage: {
132
+ prompt_tokens: ((_c = chunk.usage) === null || _c === void 0 ? void 0 : _c.prompt_tokens) || 0,
133
+ completion_tokens: ((_d = chunk.usage) === null || _d === void 0 ? void 0 : _d.completion_tokens) || 0,
134
+ knowledge_tokens: 0,
135
+ reasoning_tokens: 0,
136
+ total_tokens: ((_e = chunk.usage) === null || _e === void 0 ? void 0 : _e.total_tokens) || 0,
137
+ },
138
+ });
139
+ },
140
+ }));
141
+ }
142
+ }
143
+ exports.OpenAICompatApi = OpenAICompatApi;
@@ -0,0 +1,16 @@
1
+ import type { IAbstractRequest } from "@vectorx/ai-types";
2
+ import type { DoGenerateOutput, DoStreamOutput, ModelRequestOptions, ReqOptions } from "../../model-type";
3
+ import type { TokenManager } from "../../tokenManager";
4
+ import { SimpleChatModel } from "../Chat";
5
+ import type { MultiModalModelName } from "../index";
6
+ export declare class QwenDocTurbo extends SimpleChatModel {
7
+ static BASE_URL: string;
8
+ static SUB_URL: string;
9
+ static OPENAI_COMPATIBLE_SUB_URL: string;
10
+ modelName: MultiModalModelName;
11
+ constructor(req: IAbstractRequest, baseUrl: string, modelName: MultiModalModelName, tokenManager: TokenManager);
12
+ private hasFileIdProtocol;
13
+ private createApi;
14
+ doGenerate(data: ModelRequestOptions, options?: ReqOptions): Promise<DoGenerateOutput>;
15
+ doStream(data: ModelRequestOptions, options?: ReqOptions): Promise<DoStreamOutput>;
16
+ }
@@ -0,0 +1,86 @@
1
+ "use strict";
2
+ var __awaiter = (this && this.__awaiter) || function (thisArg, _arguments, P, generator) {
3
+ function adopt(value) { return value instanceof P ? value : new P(function (resolve) { resolve(value); }); }
4
+ return new (P || (P = Promise))(function (resolve, reject) {
5
+ function fulfilled(value) { try { step(generator.next(value)); } catch (e) { reject(e); } }
6
+ function rejected(value) { try { step(generator["throw"](value)); } catch (e) { reject(e); } }
7
+ function step(result) { result.done ? resolve(result.value) : adopt(result.value).then(fulfilled, rejected); }
8
+ step((generator = generator.apply(thisArg, _arguments || [])).next());
9
+ });
10
+ };
11
+ Object.defineProperty(exports, "__esModule", { value: true });
12
+ exports.QwenDocTurbo = void 0;
13
+ const stream_1 = require("../../stream");
14
+ const Chat_1 = require("../Chat");
15
+ const DashScope_1 = require("./adapters/DashScope");
16
+ const OpenAICompat_1 = require("./adapters/OpenAICompat");
17
+ class QwenDocTurbo extends Chat_1.SimpleChatModel {
18
+ constructor(req, baseUrl, modelName, tokenManager) {
19
+ super(req, baseUrl, QwenDocTurbo.SUB_URL, tokenManager);
20
+ this.modelName = modelName;
21
+ }
22
+ hasFileIdProtocol(messages) {
23
+ if (!messages)
24
+ return false;
25
+ return messages.some((msg) => {
26
+ if (msg.role !== "system")
27
+ return false;
28
+ const content = msg.content;
29
+ if (typeof content === "string") {
30
+ return content.includes("fileid://");
31
+ }
32
+ if (Array.isArray(content)) {
33
+ return content.some((c) => {
34
+ if (c.type === "text" && typeof c.text === "string") {
35
+ return c.text.includes("fileid://");
36
+ }
37
+ return false;
38
+ });
39
+ }
40
+ return false;
41
+ });
42
+ }
43
+ createApi(hasFileId) {
44
+ if (hasFileId) {
45
+ return new OpenAICompat_1.OpenAICompatApi({
46
+ req: this.req,
47
+ baseUrl: this.baseUrl,
48
+ subUrl: QwenDocTurbo.OPENAI_COMPATIBLE_SUB_URL,
49
+ modelName: this.modelName,
50
+ tokenManager: this.tokenManager,
51
+ });
52
+ }
53
+ return new DashScope_1.DashScopeApi({
54
+ req: this.req,
55
+ baseUrl: this.baseUrl,
56
+ subUrl: QwenDocTurbo.SUB_URL,
57
+ modelName: this.modelName,
58
+ tokenManager: this.tokenManager,
59
+ });
60
+ }
61
+ doGenerate(data, options) {
62
+ return __awaiter(this, void 0, void 0, function* () {
63
+ const hasFileId = this.hasFileIdProtocol(data.messages);
64
+ const api = this.createApi(hasFileId);
65
+ const payload = api.buildPayload(data, false);
66
+ console.log("==== api ====", payload, options);
67
+ const res = yield api.request(payload, options);
68
+ console.log("==== res ====", JSON.stringify(res, null, 2));
69
+ return api.normalizeResponse(res);
70
+ });
71
+ }
72
+ doStream(data, options) {
73
+ return __awaiter(this, void 0, void 0, function* () {
74
+ const hasFileId = this.hasFileIdProtocol(data.messages);
75
+ const api = this.createApi(hasFileId);
76
+ const payload = api.buildPayload(data, true);
77
+ const _stream = (yield api.request(payload, options));
78
+ const normalized = api.normalizeStream(_stream);
79
+ return (0, stream_1.createAsyncIterable)(normalized);
80
+ });
81
+ }
82
+ }
83
+ exports.QwenDocTurbo = QwenDocTurbo;
84
+ QwenDocTurbo.BASE_URL = "https://dashscope.aliyuncs.com";
85
+ QwenDocTurbo.SUB_URL = "api/v1/services/aigc/text-generation/generation";
86
+ QwenDocTurbo.OPENAI_COMPATIBLE_SUB_URL = "compatible-mode/v1/chat/completions";
@@ -0,0 +1,124 @@
1
+ import type { DocUrlContent, TextContent } from "../../model-type";
2
+ export interface QwenDocTurboMessage {
3
+ role: "system" | "user" | "assistant";
4
+ content: string | Array<TextContent | DocUrlContent>;
5
+ }
6
+ export interface QwenDocTurboParameters {
7
+ temperature?: number;
8
+ top_p?: number;
9
+ max_tokens?: number;
10
+ n?: number;
11
+ stream?: boolean;
12
+ stop?: string | string[];
13
+ frequency_penalty?: number;
14
+ presence_penalty?: number;
15
+ user?: string;
16
+ stream_options?: {
17
+ include_usage?: boolean;
18
+ };
19
+ response_format?: {
20
+ type: "text" | "json_object";
21
+ };
22
+ tools?: Array<object>;
23
+ tool_choice?: string | object;
24
+ parallel_tool_calls?: boolean;
25
+ incremental_output?: boolean;
26
+ }
27
+ export interface QwenDocTurboAPIInput {
28
+ model: string;
29
+ input: {
30
+ messages: QwenDocTurboMessage[];
31
+ };
32
+ parameters?: QwenDocTurboParameters;
33
+ }
34
+ export interface QwenDocTurboResponse {
35
+ request_id?: string;
36
+ output?: {
37
+ text?: string;
38
+ choices?: Array<{
39
+ finish_reason: string;
40
+ message: {
41
+ role: string;
42
+ content: string;
43
+ };
44
+ }>;
45
+ };
46
+ usage?: {
47
+ input_tokens?: number;
48
+ output_tokens?: number;
49
+ total_tokens?: number;
50
+ };
51
+ }
52
+ export interface QwenDocTurboStreamChunk {
53
+ request_id?: string;
54
+ output?: {
55
+ text?: string;
56
+ choices?: Array<{
57
+ finish_reason: string;
58
+ message: {
59
+ role?: string;
60
+ content?: string;
61
+ };
62
+ }>;
63
+ };
64
+ usage?: {
65
+ input_tokens?: number;
66
+ output_tokens?: number;
67
+ total_tokens?: number;
68
+ };
69
+ }
70
+ export interface OpenAICompatibleAPIInput {
71
+ model: string;
72
+ messages: Array<{
73
+ role: "system" | "user" | "assistant";
74
+ content: string;
75
+ }>;
76
+ stream?: boolean;
77
+ stream_options?: {
78
+ include_usage?: boolean;
79
+ };
80
+ }
81
+ export interface OpenAICompatibleResponse {
82
+ id?: string;
83
+ object?: string;
84
+ created?: number;
85
+ model?: string;
86
+ choices?: Array<{
87
+ index?: number;
88
+ message?: {
89
+ role?: string;
90
+ content?: string;
91
+ };
92
+ finish_reason?: string | null;
93
+ }>;
94
+ usage?: {
95
+ prompt_tokens?: number;
96
+ completion_tokens?: number;
97
+ total_tokens?: number;
98
+ };
99
+ }
100
+ export interface OpenAICompatibleStreamChunk {
101
+ id?: string;
102
+ object?: string;
103
+ created?: number;
104
+ model?: string;
105
+ choices?: Array<{
106
+ index?: number;
107
+ delta?: {
108
+ role?: string;
109
+ content?: string;
110
+ };
111
+ finish_reason?: string | null;
112
+ }>;
113
+ usage?: {
114
+ prompt_tokens?: number;
115
+ completion_tokens?: number;
116
+ total_tokens?: number;
117
+ };
118
+ }
119
+ export interface QwenDocTurboApi {
120
+ buildPayload(data: any, stream: boolean): unknown;
121
+ request(payload: unknown, options?: any): Promise<ReadableStream<Uint8Array> | unknown>;
122
+ normalizeResponse(res: unknown): any;
123
+ normalizeStream(stream: ReadableStream<Uint8Array>): ReadableStream<any>;
124
+ }
@@ -0,0 +1,2 @@
1
+ "use strict";
2
+ Object.defineProperty(exports, "__esModule", { value: true });
@@ -0,0 +1,81 @@
1
+ import type { IAbstractRequest } from "@vectorx/ai-types";
2
+ import type { DoGenerateOutput, DoStreamOutput, ModelRequestOptions, ReqOptions } from "../../model-type";
3
+ import { TokenManager } from "../../tokenManager";
4
+ import { SimpleChatModel } from "../Chat";
5
+ export interface QwenImageParameters {
6
+ negative_prompt?: string;
7
+ size?: "1664*928" | "1472*1140" | "1328*1328" | "1140*1472" | "928*1664" | string;
8
+ n?: 1;
9
+ prompt_extend?: boolean;
10
+ watermark?: boolean;
11
+ }
12
+ export interface QwenImageAPIInput {
13
+ model: string;
14
+ input: {
15
+ messages?: Array<{
16
+ role: "user";
17
+ content: Array<{
18
+ text: string;
19
+ }>;
20
+ }>;
21
+ prompt?: string;
22
+ };
23
+ parameters?: QwenImageParameters;
24
+ }
25
+ export type QwenImageRequestOptions = QwenImageAPIInput & {
26
+ parameters?: QwenImageParameters;
27
+ };
28
+ export type QwenImageContentItem = {
29
+ image?: string;
30
+ url?: string;
31
+ image_url?: string | {
32
+ url: string;
33
+ };
34
+ b64_json?: string;
35
+ [key: string]: any;
36
+ };
37
+ export interface QwenImageAPIResponse {
38
+ async?: boolean;
39
+ output: {
40
+ choices?: Array<{
41
+ finish_reason: string;
42
+ message: {
43
+ role: "assistant" | "user";
44
+ content: QwenImageContentItem[];
45
+ };
46
+ }>;
47
+ task_status?: string;
48
+ task_id?: string;
49
+ task_metric?: {
50
+ TOTAL: number;
51
+ FAILED: number;
52
+ SUCCEEDED: number;
53
+ };
54
+ };
55
+ usage?: {
56
+ width?: number;
57
+ height?: number;
58
+ image_count?: number;
59
+ };
60
+ request_id?: string;
61
+ id?: string;
62
+ model?: string;
63
+ created?: number;
64
+ object?: string;
65
+ code?: number;
66
+ error?: string;
67
+ }
68
+ export declare class QwenImageModel extends SimpleChatModel {
69
+ static BASE_URL: string;
70
+ static SUB_SYNTHESIS_URL: string;
71
+ static SUB_GENERATION_URL: string;
72
+ modelName: string;
73
+ constructor(req: IAbstractRequest, baseUrl: string, tokenManager: TokenManager);
74
+ protected normalizeStandardImageCompletion(res: QwenImageAPIResponse, fallbackModel: string): DoGenerateOutput;
75
+ protected coverModelRequestToQwenInput(data: ModelRequestOptions & {
76
+ parameters?: QwenImageParameters;
77
+ }): QwenImageRequestOptions;
78
+ protected modelRequest(data: QwenImageRequestOptions, options?: ReqOptions): Promise<ReadableStream<Uint8Array> | Promise<unknown>>;
79
+ doGenerate(data: ModelRequestOptions, options?: ReqOptions): Promise<DoGenerateOutput>;
80
+ doStream(data: ModelRequestOptions, options?: ReqOptions): Promise<DoStreamOutput>;
81
+ }