@vectorx/ai-sdk 0.1.3 → 0.3.0
This diff represents the content of publicly available package versions that have been released to one of the supported registries. The information contained in this diff is provided for informational purposes only and reflects changes between package versions as they appear in their respective public registries.
- package/{types → lib}/agent/index.d.ts +2 -2
- package/{types → lib}/ai.d.ts +7 -8
- package/lib/ai.js +18 -28
- package/{types → lib}/index.d.ts +3 -2
- package/lib/index.js +4 -3
- package/{types/types.d.ts → lib/model-type.d.ts} +24 -15
- package/lib/{types.js → model-type.js} +1 -4
- package/lib/models/Chat.d.ts +14 -0
- package/lib/models/Chat.js +36 -0
- package/lib/models/Default/index.d.ts +11 -0
- package/lib/models/{default.js → Default/index.js} +28 -11
- package/lib/models/QwenImage/index.d.ts +81 -0
- package/lib/models/QwenImage/index.js +208 -0
- package/lib/models/QwenSketchToImage/index.d.ts +35 -0
- package/lib/models/QwenSketchToImage/index.js +155 -0
- package/lib/models/QwenStyleRepaintV1/index.d.ts +114 -0
- package/lib/models/QwenStyleRepaintV1/index.js +213 -0
- package/lib/models/QwenVlMax/index.d.ts +85 -0
- package/lib/models/QwenVlMax/index.js +120 -0
- package/lib/models/index.d.ts +47 -0
- package/lib/models/index.js +40 -4
- package/{types → lib}/models/react.d.ts +3 -2
- package/lib/models/react.js +3 -3
- package/{types → lib}/stream.d.ts +1 -8
- package/lib/tokenManager.d.ts +36 -0
- package/lib/tokenManager.js +89 -0
- package/lib/utils.js +2 -3
- package/package.json +4 -5
- package/lib/models/model-types.js +0 -6
- package/types/models/default.d.ts +0 -13
- package/types/models/index.d.ts +0 -23
- package/types/models/model-types.d.ts +0 -131
- /package/{types → lib}/eventsource_parser/index.d.ts +0 -0
- /package/{types → lib}/eventsource_parser/parse.d.ts +0 -0
- /package/{types → lib}/eventsource_parser/stream.d.ts +0 -0
- /package/{types → lib}/eventsource_parser/types.d.ts +0 -0
- /package/{types → lib}/utils.d.ts +0 -0
|
@@ -0,0 +1,120 @@
|
|
|
1
|
+
"use strict";
|
|
2
|
+
var __awaiter = (this && this.__awaiter) || function (thisArg, _arguments, P, generator) {
|
|
3
|
+
function adopt(value) { return value instanceof P ? value : new P(function (resolve) { resolve(value); }); }
|
|
4
|
+
return new (P || (P = Promise))(function (resolve, reject) {
|
|
5
|
+
function fulfilled(value) { try { step(generator.next(value)); } catch (e) { reject(e); } }
|
|
6
|
+
function rejected(value) { try { step(generator["throw"](value)); } catch (e) { reject(e); } }
|
|
7
|
+
function step(result) { result.done ? resolve(result.value) : adopt(result.value).then(fulfilled, rejected); }
|
|
8
|
+
step((generator = generator.apply(thisArg, _arguments || [])).next());
|
|
9
|
+
});
|
|
10
|
+
};
|
|
11
|
+
Object.defineProperty(exports, "__esModule", { value: true });
|
|
12
|
+
exports.QwenVlMax = void 0;
|
|
13
|
+
const stream_1 = require("../../stream");
|
|
14
|
+
const utils_1 = require("../../utils");
|
|
15
|
+
const Chat_1 = require("../Chat");
|
|
16
|
+
const defaultParameters = {
|
|
17
|
+
max_tokens: 2000,
|
|
18
|
+
temperature: 1.0,
|
|
19
|
+
top_p: 1.0,
|
|
20
|
+
frequency_penalty: 0.0,
|
|
21
|
+
presence_penalty: 0.0,
|
|
22
|
+
stream: false,
|
|
23
|
+
enable_thinking: false,
|
|
24
|
+
enable_search: false,
|
|
25
|
+
n: 1,
|
|
26
|
+
};
|
|
27
|
+
class QwenVlMax extends Chat_1.SimpleChatModel {
|
|
28
|
+
constructor(req, baseUrl, modelName, tokenManager) {
|
|
29
|
+
super(req, baseUrl, "compatible-mode/v1/chat/completions", tokenManager);
|
|
30
|
+
this.modelName = modelName;
|
|
31
|
+
}
|
|
32
|
+
modelRequest(data, options) {
|
|
33
|
+
return __awaiter(this, void 0, void 0, function* () {
|
|
34
|
+
const fetchHeaders = yield this.createAuthHeaders(options === null || options === void 0 ? void 0 : options.headers);
|
|
35
|
+
const { data: responseData, header } = (yield this.req.fetch({
|
|
36
|
+
method: "post",
|
|
37
|
+
headers: Object.assign({}, fetchHeaders),
|
|
38
|
+
body: JSON.stringify(data),
|
|
39
|
+
url: `${this.baseUrl}/${this.subUrl}`,
|
|
40
|
+
stream: Boolean(data.stream),
|
|
41
|
+
}));
|
|
42
|
+
return (0, utils_1.handleResponseData)(responseData, header);
|
|
43
|
+
});
|
|
44
|
+
}
|
|
45
|
+
normalizeResponse(response) {
|
|
46
|
+
var _a, _b, _c, _d;
|
|
47
|
+
const choice = (_a = response.choices) === null || _a === void 0 ? void 0 : _a[0];
|
|
48
|
+
const message = choice === null || choice === void 0 ? void 0 : choice.message;
|
|
49
|
+
return {
|
|
50
|
+
id: response.id,
|
|
51
|
+
object: response.object,
|
|
52
|
+
created: response.created,
|
|
53
|
+
model: response.model,
|
|
54
|
+
log_id: response.id,
|
|
55
|
+
error: "",
|
|
56
|
+
code: 0,
|
|
57
|
+
choices: [
|
|
58
|
+
{
|
|
59
|
+
index: (choice === null || choice === void 0 ? void 0 : choice.index) || 0,
|
|
60
|
+
message: {
|
|
61
|
+
id: response.id,
|
|
62
|
+
role: (message === null || message === void 0 ? void 0 : message.role) || "assistant",
|
|
63
|
+
type: "answer",
|
|
64
|
+
content: (message === null || message === void 0 ? void 0 : message.content) || "",
|
|
65
|
+
reasoning_content: "",
|
|
66
|
+
},
|
|
67
|
+
finish_reason: (choice === null || choice === void 0 ? void 0 : choice.finish_reason) || "stop",
|
|
68
|
+
},
|
|
69
|
+
],
|
|
70
|
+
usage: {
|
|
71
|
+
prompt_tokens: ((_b = response.usage) === null || _b === void 0 ? void 0 : _b.prompt_tokens) || 0,
|
|
72
|
+
completion_tokens: ((_c = response.usage) === null || _c === void 0 ? void 0 : _c.completion_tokens) || 0,
|
|
73
|
+
knowledge_tokens: 0,
|
|
74
|
+
reasoning_tokens: 0,
|
|
75
|
+
total_tokens: ((_d = response.usage) === null || _d === void 0 ? void 0 : _d.total_tokens) || 0,
|
|
76
|
+
},
|
|
77
|
+
};
|
|
78
|
+
}
|
|
79
|
+
doGenerate(data, options) {
|
|
80
|
+
return __awaiter(this, void 0, void 0, function* () {
|
|
81
|
+
const qwenVlMaxData = this.convertToQwenVlMaxRequestOptions(data);
|
|
82
|
+
const requestData = Object.assign(Object.assign({}, qwenVlMaxData), { stream: false });
|
|
83
|
+
const res = yield this.modelRequest(requestData, options);
|
|
84
|
+
return this.normalizeResponse(res);
|
|
85
|
+
});
|
|
86
|
+
}
|
|
87
|
+
doStream(data, options) {
|
|
88
|
+
return __awaiter(this, void 0, void 0, function* () {
|
|
89
|
+
const qwenVlMaxData = this.convertToQwenVlMaxRequestOptions(data);
|
|
90
|
+
const requestData = Object.assign(Object.assign({}, qwenVlMaxData), { stream: true });
|
|
91
|
+
const _stream = (yield this.modelRequest(requestData, options));
|
|
92
|
+
const stream = (0, stream_1.toPolyfillReadable)(_stream);
|
|
93
|
+
const standardStream = (0, stream_1.intoStandardStream)(stream);
|
|
94
|
+
return (0, stream_1.createAsyncIterable)(standardStream);
|
|
95
|
+
});
|
|
96
|
+
}
|
|
97
|
+
convertToQwenVlMaxRequestOptions(data) {
|
|
98
|
+
var _a, _b, _c;
|
|
99
|
+
const clamp = (value, min, max, defaultValue) => value !== undefined ? Math.max(min, Math.min(max, value)) : defaultValue;
|
|
100
|
+
const messages = (data.messages || []).map((msg) => ({
|
|
101
|
+
role: msg.role,
|
|
102
|
+
content: msg.content,
|
|
103
|
+
}));
|
|
104
|
+
return {
|
|
105
|
+
model: this.modelName,
|
|
106
|
+
messages,
|
|
107
|
+
temperature: clamp(data.temperature, 0, 2, defaultParameters.temperature),
|
|
108
|
+
top_p: clamp(data.top_p, 0, 1, defaultParameters.top_p),
|
|
109
|
+
frequency_penalty: clamp(data.frequency_penalty, -2, 2, defaultParameters.frequency_penalty),
|
|
110
|
+
presence_penalty: clamp(data.presence_penalty, -2, 2, defaultParameters.presence_penalty),
|
|
111
|
+
max_tokens: clamp(data.max_tokens, 1, Number.POSITIVE_INFINITY, defaultParameters.max_tokens),
|
|
112
|
+
n: clamp(data.n, 1, Number.POSITIVE_INFINITY, defaultParameters.n),
|
|
113
|
+
stream: (_a = data.stream) !== null && _a !== void 0 ? _a : defaultParameters.stream,
|
|
114
|
+
enable_search: (_b = data.enable_search) !== null && _b !== void 0 ? _b : defaultParameters.enable_search,
|
|
115
|
+
enable_thinking: (_c = data.enable_thinking) !== null && _c !== void 0 ? _c : defaultParameters.enable_thinking,
|
|
116
|
+
};
|
|
117
|
+
}
|
|
118
|
+
}
|
|
119
|
+
exports.QwenVlMax = QwenVlMax;
|
|
120
|
+
QwenVlMax.BASE_URL = "https://dashscope.aliyuncs.com";
|
|
@@ -0,0 +1,47 @@
|
|
|
1
|
+
import { DefaultSimpleModel } from "./Default";
|
|
2
|
+
import { QwenImageModel } from "./QwenImage";
|
|
3
|
+
import { WanxSketchToImageLiteModel } from "./QwenSketchToImage";
|
|
4
|
+
import { QwenStyleRepaintV1Model } from "./QwenStyleRepaintV1";
|
|
5
|
+
import { QwenVlMax } from "./QwenVlMax";
|
|
6
|
+
import { ReActModel } from "./react";
|
|
7
|
+
export declare enum ModelName {
|
|
8
|
+
DeepSeekR1 = "deepseek-r1",
|
|
9
|
+
DeepSeekV3 = "deepseek-v3",
|
|
10
|
+
QwenMax = "qwen-max",
|
|
11
|
+
QwenLong = "qwen-long",
|
|
12
|
+
QwenPlus = "qwen-plus",
|
|
13
|
+
QwenTurbo = "qwen-turbo",
|
|
14
|
+
QwenVlOcr = "qwen-vl-ocr",
|
|
15
|
+
Wanx21T2iTurbo = "wanx2.1-t2i-turbo"
|
|
16
|
+
}
|
|
17
|
+
export declare enum MultiModalModelName {
|
|
18
|
+
QvqMaxLatest = "qvq-max-latest",
|
|
19
|
+
QwenVlMax = "qwen-vl-max",
|
|
20
|
+
QwenOmniTurboRealtime = "qwen-omni-turbo-realtime",
|
|
21
|
+
Wanx21T2iPlus = "wanx2.1-t2i-plus",
|
|
22
|
+
Wanx21T2iTurbo = "wanx2.1-t2i-turbo",
|
|
23
|
+
QwenImage = "qwen-image",
|
|
24
|
+
WanxSketchToImageLite = "wanx-sketch-to-image-lite",
|
|
25
|
+
QwenStyleRepaintV1 = "wanx-style-repaint-v1"
|
|
26
|
+
}
|
|
27
|
+
export declare const modelName: {
|
|
28
|
+
"deepseek-r1": string;
|
|
29
|
+
"deepseek-v3": string;
|
|
30
|
+
"qwen-max": string;
|
|
31
|
+
"qwen-long": string;
|
|
32
|
+
"qwen-plus": string;
|
|
33
|
+
"qwen-turbo": string;
|
|
34
|
+
"qwen-vl-ocr": string;
|
|
35
|
+
"qvq-max-latest": string;
|
|
36
|
+
"qwen-vl-max": string;
|
|
37
|
+
"qwen-omni-turbo-realtime": string;
|
|
38
|
+
"wanx2.1-t2i-plus": string;
|
|
39
|
+
"wanx2.1-t2i-turbo": string;
|
|
40
|
+
"qwen-image": string;
|
|
41
|
+
"wanx-sketch-to-image-lite": string;
|
|
42
|
+
"wanx-style-repaint-v1": string;
|
|
43
|
+
};
|
|
44
|
+
export declare const isValidModel: (model: ModelName | MultiModalModelName) => model is ModelName | MultiModalModelName;
|
|
45
|
+
export declare const isMultiModalModel: (model: ModelName | MultiModalModelName) => model is MultiModalModelName;
|
|
46
|
+
declare const toolMap: Map<string, Function>;
|
|
47
|
+
export { DefaultSimpleModel, ReActModel, toolMap, WanxSketchToImageLiteModel, QwenImageModel, QwenVlMax, QwenStyleRepaintV1Model, };
|
package/lib/models/index.js
CHANGED
|
@@ -1,10 +1,18 @@
|
|
|
1
1
|
"use strict";
|
|
2
2
|
Object.defineProperty(exports, "__esModule", { value: true });
|
|
3
|
-
exports.toolMap = exports.
|
|
4
|
-
const
|
|
5
|
-
Object.defineProperty(exports, "DefaultSimpleModel", { enumerable: true, get: function () { return
|
|
3
|
+
exports.QwenStyleRepaintV1Model = exports.QwenVlMax = exports.QwenImageModel = exports.WanxSketchToImageLiteModel = exports.toolMap = exports.ReActModel = exports.DefaultSimpleModel = exports.isMultiModalModel = exports.isValidModel = exports.modelName = exports.MultiModalModelName = exports.ModelName = void 0;
|
|
4
|
+
const Default_1 = require("./Default");
|
|
5
|
+
Object.defineProperty(exports, "DefaultSimpleModel", { enumerable: true, get: function () { return Default_1.DefaultSimpleModel; } });
|
|
6
|
+
const QwenImage_1 = require("./QwenImage");
|
|
7
|
+
Object.defineProperty(exports, "QwenImageModel", { enumerable: true, get: function () { return QwenImage_1.QwenImageModel; } });
|
|
8
|
+
const QwenSketchToImage_1 = require("./QwenSketchToImage");
|
|
9
|
+
Object.defineProperty(exports, "WanxSketchToImageLiteModel", { enumerable: true, get: function () { return QwenSketchToImage_1.WanxSketchToImageLiteModel; } });
|
|
10
|
+
const QwenStyleRepaintV1_1 = require("./QwenStyleRepaintV1");
|
|
11
|
+
Object.defineProperty(exports, "QwenStyleRepaintV1Model", { enumerable: true, get: function () { return QwenStyleRepaintV1_1.QwenStyleRepaintV1Model; } });
|
|
12
|
+
const QwenVlMax_1 = require("./QwenVlMax");
|
|
13
|
+
Object.defineProperty(exports, "QwenVlMax", { enumerable: true, get: function () { return QwenVlMax_1.QwenVlMax; } });
|
|
6
14
|
const react_1 = require("./react");
|
|
7
|
-
Object.defineProperty(exports, "
|
|
15
|
+
Object.defineProperty(exports, "ReActModel", { enumerable: true, get: function () { return react_1.ReActModel; } });
|
|
8
16
|
var ModelName;
|
|
9
17
|
(function (ModelName) {
|
|
10
18
|
ModelName["DeepSeekR1"] = "deepseek-r1";
|
|
@@ -16,6 +24,17 @@ var ModelName;
|
|
|
16
24
|
ModelName["QwenVlOcr"] = "qwen-vl-ocr";
|
|
17
25
|
ModelName["Wanx21T2iTurbo"] = "wanx2.1-t2i-turbo";
|
|
18
26
|
})(ModelName || (exports.ModelName = ModelName = {}));
|
|
27
|
+
var MultiModalModelName;
|
|
28
|
+
(function (MultiModalModelName) {
|
|
29
|
+
MultiModalModelName["QvqMaxLatest"] = "qvq-max-latest";
|
|
30
|
+
MultiModalModelName["QwenVlMax"] = "qwen-vl-max";
|
|
31
|
+
MultiModalModelName["QwenOmniTurboRealtime"] = "qwen-omni-turbo-realtime";
|
|
32
|
+
MultiModalModelName["Wanx21T2iPlus"] = "wanx2.1-t2i-plus";
|
|
33
|
+
MultiModalModelName["Wanx21T2iTurbo"] = "wanx2.1-t2i-turbo";
|
|
34
|
+
MultiModalModelName["QwenImage"] = "qwen-image";
|
|
35
|
+
MultiModalModelName["WanxSketchToImageLite"] = "wanx-sketch-to-image-lite";
|
|
36
|
+
MultiModalModelName["QwenStyleRepaintV1"] = "wanx-style-repaint-v1";
|
|
37
|
+
})(MultiModalModelName || (exports.MultiModalModelName = MultiModalModelName = {}));
|
|
19
38
|
exports.modelName = {
|
|
20
39
|
[ModelName.DeepSeekR1]: "deepseek-r1",
|
|
21
40
|
[ModelName.DeepSeekV3]: "deepseek-v3",
|
|
@@ -24,6 +43,23 @@ exports.modelName = {
|
|
|
24
43
|
[ModelName.QwenPlus]: "qwen-plus",
|
|
25
44
|
[ModelName.QwenTurbo]: "qwen-turbo",
|
|
26
45
|
[ModelName.QwenVlOcr]: "qwen-vl-ocr",
|
|
46
|
+
[MultiModalModelName.QvqMaxLatest]: "qvq-max-latest",
|
|
47
|
+
[MultiModalModelName.QwenVlMax]: "qwen-vl-max",
|
|
48
|
+
[MultiModalModelName.QwenOmniTurboRealtime]: "qwen-omni-turbo-realtime",
|
|
49
|
+
[MultiModalModelName.Wanx21T2iPlus]: "wanx2.1-t2i-plus",
|
|
50
|
+
[MultiModalModelName.Wanx21T2iTurbo]: "wanx2.1-t2i-turbo",
|
|
51
|
+
[MultiModalModelName.QwenImage]: "qwen-image",
|
|
52
|
+
[MultiModalModelName.WanxSketchToImageLite]: "wanx-sketch-to-image-lite",
|
|
53
|
+
[MultiModalModelName.QwenStyleRepaintV1]: "wanx-style-repaint-v1",
|
|
27
54
|
};
|
|
55
|
+
const isValidModel = (model) => {
|
|
56
|
+
const modelNames = Object.values(exports.modelName);
|
|
57
|
+
return modelNames.includes(model);
|
|
58
|
+
};
|
|
59
|
+
exports.isValidModel = isValidModel;
|
|
60
|
+
const isMultiModalModel = (model) => {
|
|
61
|
+
return Object.values(MultiModalModelName).includes(model);
|
|
62
|
+
};
|
|
63
|
+
exports.isMultiModalModel = isMultiModalModel;
|
|
28
64
|
const toolMap = new Map();
|
|
29
65
|
exports.toolMap = toolMap;
|
|
@@ -1,5 +1,6 @@
|
|
|
1
|
-
import type { DoGenerateOutput, DoStreamOutput, ModelRequestOptions, ReqOptions
|
|
2
|
-
|
|
1
|
+
import type { DoGenerateOutput, DoStreamOutput, ModelRequestOptions, ReqOptions } from "../model-type";
|
|
2
|
+
import type { SimpleChatModel } from "./Chat";
|
|
3
|
+
export declare class ReActModel {
|
|
3
4
|
private model;
|
|
4
5
|
constructor(model: SimpleChatModel);
|
|
5
6
|
generateText(_input: ModelRequestOptions, options?: ReqOptions): Promise<DoGenerateOutput>;
|
package/lib/models/react.js
CHANGED
|
@@ -9,8 +9,8 @@ var __awaiter = (this && this.__awaiter) || function (thisArg, _arguments, P, ge
|
|
|
9
9
|
});
|
|
10
10
|
};
|
|
11
11
|
Object.defineProperty(exports, "__esModule", { value: true });
|
|
12
|
-
exports.
|
|
13
|
-
class
|
|
12
|
+
exports.ReActModel = void 0;
|
|
13
|
+
class ReActModel {
|
|
14
14
|
constructor(model) {
|
|
15
15
|
this.model = model;
|
|
16
16
|
}
|
|
@@ -25,4 +25,4 @@ class ReactModel {
|
|
|
25
25
|
});
|
|
26
26
|
}
|
|
27
27
|
}
|
|
28
|
-
exports.
|
|
28
|
+
exports.ReActModel = ReActModel;
|
|
@@ -1,17 +1,11 @@
|
|
|
1
1
|
import { type ParsedEvent } from "./eventsource_parser";
|
|
2
|
-
import type {
|
|
2
|
+
import type { ChatModelMessage, ModelTool, ToolCallAssistantMessage } from "./model-type";
|
|
3
3
|
export type FunctionTool = {
|
|
4
4
|
name: string;
|
|
5
5
|
description: string;
|
|
6
6
|
fn: CallableFunction;
|
|
7
7
|
parameters: object;
|
|
8
8
|
};
|
|
9
|
-
type RawResponse = {
|
|
10
|
-
rawResponse?: any;
|
|
11
|
-
};
|
|
12
|
-
export type DoGenerateOutput = BaseDoGenerateOutput & RawResponse;
|
|
13
|
-
export type DoStreamOutput = AsyncIterableReadableStream<BaseDoStreamOutputChunk & RawResponse>;
|
|
14
|
-
export type ChatModelConstructor = typeof SimpleChatModel;
|
|
15
9
|
export type AsyncIterableReadableStream<T> = ReadableStream<T> & {
|
|
16
10
|
[Symbol.asyncIterator]: () => {
|
|
17
11
|
next(): Promise<IteratorResult<T>>;
|
|
@@ -51,4 +45,3 @@ export declare function createPromise<T = unknown>(): {
|
|
|
51
45
|
};
|
|
52
46
|
export declare function isToolCallAssistantMessage(message: ChatModelMessage): message is ToolCallAssistantMessage;
|
|
53
47
|
export declare function functionToolToModelTool(tool: FunctionTool): ModelTool;
|
|
54
|
-
export {};
|
|
@@ -0,0 +1,36 @@
|
|
|
1
|
+
import type { IAbstractRequest } from "@vectorx/ai-types";
|
|
2
|
+
export interface TokenResponse {
|
|
3
|
+
code: number;
|
|
4
|
+
msg: string;
|
|
5
|
+
data: {
|
|
6
|
+
token: string;
|
|
7
|
+
expires_at: number | null;
|
|
8
|
+
};
|
|
9
|
+
success: boolean;
|
|
10
|
+
}
|
|
11
|
+
export interface TokenError {
|
|
12
|
+
code: string;
|
|
13
|
+
message: string;
|
|
14
|
+
request_id: string;
|
|
15
|
+
}
|
|
16
|
+
export declare class TokenManager {
|
|
17
|
+
private currentToken;
|
|
18
|
+
private tokenExpiresAt;
|
|
19
|
+
private readonly baseUrl;
|
|
20
|
+
private readonly subUrl;
|
|
21
|
+
private readonly defaultExpireSeconds;
|
|
22
|
+
private readonly refreshBufferSeconds;
|
|
23
|
+
private req;
|
|
24
|
+
constructor(req: IAbstractRequest, baseUrl: string);
|
|
25
|
+
get fullUrl(): string;
|
|
26
|
+
getValidToken(): Promise<string>;
|
|
27
|
+
private refreshToken;
|
|
28
|
+
isTokenValid(): boolean;
|
|
29
|
+
getTokenRemainingTime(): number;
|
|
30
|
+
clearToken(): void;
|
|
31
|
+
getTokenInfo(): {
|
|
32
|
+
token: string | null;
|
|
33
|
+
expiresAt: number;
|
|
34
|
+
remainingTime: number;
|
|
35
|
+
};
|
|
36
|
+
}
|
|
@@ -0,0 +1,89 @@
|
|
|
1
|
+
"use strict";
|
|
2
|
+
var __awaiter = (this && this.__awaiter) || function (thisArg, _arguments, P, generator) {
|
|
3
|
+
function adopt(value) { return value instanceof P ? value : new P(function (resolve) { resolve(value); }); }
|
|
4
|
+
return new (P || (P = Promise))(function (resolve, reject) {
|
|
5
|
+
function fulfilled(value) { try { step(generator.next(value)); } catch (e) { reject(e); } }
|
|
6
|
+
function rejected(value) { try { step(generator["throw"](value)); } catch (e) { reject(e); } }
|
|
7
|
+
function step(result) { result.done ? resolve(result.value) : adopt(result.value).then(fulfilled, rejected); }
|
|
8
|
+
step((generator = generator.apply(thisArg, _arguments || [])).next());
|
|
9
|
+
});
|
|
10
|
+
};
|
|
11
|
+
Object.defineProperty(exports, "__esModule", { value: true });
|
|
12
|
+
exports.TokenManager = void 0;
|
|
13
|
+
class TokenManager {
|
|
14
|
+
constructor(req, baseUrl) {
|
|
15
|
+
this.currentToken = null;
|
|
16
|
+
this.tokenExpiresAt = 0;
|
|
17
|
+
this.subUrl = "agent/token";
|
|
18
|
+
this.defaultExpireSeconds = 600;
|
|
19
|
+
this.refreshBufferSeconds = 300;
|
|
20
|
+
this.req = req;
|
|
21
|
+
this.baseUrl = baseUrl;
|
|
22
|
+
}
|
|
23
|
+
get fullUrl() {
|
|
24
|
+
return `${this.baseUrl}/${this.subUrl}`;
|
|
25
|
+
}
|
|
26
|
+
getValidToken() {
|
|
27
|
+
return __awaiter(this, void 0, void 0, function* () {
|
|
28
|
+
const now = Math.floor(Date.now() / 1000);
|
|
29
|
+
if (this.currentToken && this.tokenExpiresAt > now + this.refreshBufferSeconds) {
|
|
30
|
+
return this.currentToken;
|
|
31
|
+
}
|
|
32
|
+
yield this.refreshToken();
|
|
33
|
+
return this.currentToken;
|
|
34
|
+
});
|
|
35
|
+
}
|
|
36
|
+
refreshToken(expireInSeconds) {
|
|
37
|
+
return __awaiter(this, void 0, void 0, function* () {
|
|
38
|
+
var _a, _b, _c, _d;
|
|
39
|
+
const expireSeconds = expireInSeconds || this.defaultExpireSeconds;
|
|
40
|
+
const url = `${this.fullUrl}?expire_in_seconds=${expireSeconds}`;
|
|
41
|
+
try {
|
|
42
|
+
const response = yield this.req.post({
|
|
43
|
+
url,
|
|
44
|
+
headers: {
|
|
45
|
+
"Content-Type": "application/json",
|
|
46
|
+
},
|
|
47
|
+
});
|
|
48
|
+
if (response.statusCode !== 200) {
|
|
49
|
+
throw new Error(`获取临时 Token 失败: ${(_a = response.data) === null || _a === void 0 ? void 0 : _a.msg} (${(_b = response.data) === null || _b === void 0 ? void 0 : _b.code})`);
|
|
50
|
+
}
|
|
51
|
+
if (!((_d = (_c = response.data) === null || _c === void 0 ? void 0 : _c.data) === null || _d === void 0 ? void 0 : _d.token)) {
|
|
52
|
+
throw new Error("获取临时 Token 失败: 响应数据无效");
|
|
53
|
+
}
|
|
54
|
+
this.currentToken = response.data.data.token;
|
|
55
|
+
this.tokenExpiresAt = response.data.data.expires_at || 0;
|
|
56
|
+
}
|
|
57
|
+
catch (error) {
|
|
58
|
+
console.error("刷新临时 Token 失败:", error);
|
|
59
|
+
throw error;
|
|
60
|
+
}
|
|
61
|
+
});
|
|
62
|
+
}
|
|
63
|
+
isTokenValid() {
|
|
64
|
+
if (!this.currentToken) {
|
|
65
|
+
return false;
|
|
66
|
+
}
|
|
67
|
+
const now = Math.floor(Date.now() / 1000);
|
|
68
|
+
return this.tokenExpiresAt > now + this.refreshBufferSeconds;
|
|
69
|
+
}
|
|
70
|
+
getTokenRemainingTime() {
|
|
71
|
+
if (!this.currentToken) {
|
|
72
|
+
return 0;
|
|
73
|
+
}
|
|
74
|
+
const now = Math.floor(Date.now() / 1000);
|
|
75
|
+
return Math.max(0, this.tokenExpiresAt - now);
|
|
76
|
+
}
|
|
77
|
+
clearToken() {
|
|
78
|
+
this.currentToken = null;
|
|
79
|
+
this.tokenExpiresAt = 0;
|
|
80
|
+
}
|
|
81
|
+
getTokenInfo() {
|
|
82
|
+
return {
|
|
83
|
+
token: this.currentToken,
|
|
84
|
+
expiresAt: this.tokenExpiresAt,
|
|
85
|
+
remainingTime: this.getTokenRemainingTime(),
|
|
86
|
+
};
|
|
87
|
+
}
|
|
88
|
+
}
|
|
89
|
+
exports.TokenManager = TokenManager;
|
package/lib/utils.js
CHANGED
|
@@ -10,21 +10,20 @@ var __awaiter = (this && this.__awaiter) || function (thisArg, _arguments, P, ge
|
|
|
10
10
|
};
|
|
11
11
|
Object.defineProperty(exports, "__esModule", { value: true });
|
|
12
12
|
exports.handleResponseData = handleResponseData;
|
|
13
|
-
const GO_TO_AI_TEXT = "请检查调用方式,或前往云开发 AI+ 首页查看文档:https://tcb.cloud.tencent.com/dev#/ai";
|
|
14
13
|
function handleResponseData(responseData, header) {
|
|
15
14
|
return __awaiter(this, void 0, void 0, function* () {
|
|
16
15
|
var _a, _b;
|
|
17
16
|
if (typeof responseData === "object" && responseData && "then" in responseData) {
|
|
18
17
|
const json = (yield responseData);
|
|
19
18
|
if (typeof json === "object" && json && "code" in json && json.code !== 0) {
|
|
20
|
-
throw new Error(
|
|
19
|
+
throw new Error(`ModelRequest 请求出错,错误码:${json.code},错误信息:${json.message}\n${JSON.stringify(json, null, 2)}`);
|
|
21
20
|
}
|
|
22
21
|
return responseData;
|
|
23
22
|
}
|
|
24
23
|
if ((_b = (_a = header === null || header === void 0 ? void 0 : header.get) === null || _a === void 0 ? void 0 : _a.call(header, "content-type")) === null || _b === void 0 ? void 0 : _b.includes("application/json")) {
|
|
25
24
|
const json = yield readableStream2JsonObject(responseData);
|
|
26
25
|
if (typeof json === "object" && json && "code" in json && json.code !== 0) {
|
|
27
|
-
throw new Error(
|
|
26
|
+
throw new Error(`ModelRequest 请求出错,错误码:${json.code},错误信息:${json.message}\n${JSON.stringify(json, null, 2)}`);
|
|
28
27
|
}
|
|
29
28
|
}
|
|
30
29
|
return responseData;
|
package/package.json
CHANGED
|
@@ -1,9 +1,8 @@
|
|
|
1
1
|
{
|
|
2
2
|
"name": "@vectorx/ai-sdk",
|
|
3
|
-
"version": "0.
|
|
3
|
+
"version": "0.3.0",
|
|
4
4
|
"description": "Cloud AI SDK",
|
|
5
5
|
"main": "lib/index.js",
|
|
6
|
-
"types": "types/index.d.ts",
|
|
7
6
|
"sideEffects": false,
|
|
8
7
|
"files": [
|
|
9
8
|
"lib",
|
|
@@ -22,9 +21,10 @@
|
|
|
22
21
|
"node": ">=18.0.0"
|
|
23
22
|
},
|
|
24
23
|
"dependencies": {
|
|
25
|
-
"@vectorx/ai-types": "0.1.3",
|
|
26
|
-
"@vectorx/agent-runtime": "0.1.3",
|
|
27
24
|
"@mattiasbuelens/web-streams-adapter": "^0.1.0",
|
|
25
|
+
"@vectorx/ai-types": "0.3.0",
|
|
26
|
+
"langfuse": "^3.38.4",
|
|
27
|
+
"openai": "^4.103.0",
|
|
28
28
|
"text-encoding-shim": "^1.0.5",
|
|
29
29
|
"web-streams-polyfill": "^4.1.0",
|
|
30
30
|
"zod": "^3.24.2"
|
|
@@ -36,7 +36,6 @@
|
|
|
36
36
|
"@typescript-eslint/parser": "^7.1.0",
|
|
37
37
|
"eslint": "^8.57.0",
|
|
38
38
|
"jest": "^29.7.0",
|
|
39
|
-
"openai": "^4.103.0",
|
|
40
39
|
"ts-node-dev": "^2.0.0",
|
|
41
40
|
"typescript": "^5.3.3"
|
|
42
41
|
},
|
|
@@ -1,13 +0,0 @@
|
|
|
1
|
-
import type { DoGenerateOutput, DoStreamOutput, ModelReq, ModelRequestOptions, ReqOptions } from "../types";
|
|
2
|
-
import type { ModelName } from "./index";
|
|
3
|
-
export declare abstract class SimpleChatModel {
|
|
4
|
-
abstract doGenerate(data: ModelRequestOptions, options?: ReqOptions): Promise<DoGenerateOutput>;
|
|
5
|
-
abstract doStream(data: ModelRequestOptions, options?: ReqOptions): Promise<DoStreamOutput>;
|
|
6
|
-
}
|
|
7
|
-
export declare class DefaultSimpleModel implements SimpleChatModel {
|
|
8
|
-
private req;
|
|
9
|
-
modelName: ModelName;
|
|
10
|
-
constructor(req: ModelReq, modelName: ModelName);
|
|
11
|
-
doGenerate(data: ModelRequestOptions, options?: ReqOptions): Promise<DoGenerateOutput>;
|
|
12
|
-
doStream(data: ModelRequestOptions, options?: ReqOptions): Promise<DoStreamOutput>;
|
|
13
|
-
}
|
package/types/models/index.d.ts
DELETED
|
@@ -1,23 +0,0 @@
|
|
|
1
|
-
import { DefaultSimpleModel } from "./default";
|
|
2
|
-
import { ReactModel } from "./react";
|
|
3
|
-
export declare enum ModelName {
|
|
4
|
-
DeepSeekR1 = "deepseek-r1",
|
|
5
|
-
DeepSeekV3 = "deepseek-v3",
|
|
6
|
-
QwenMax = "qwen-max",
|
|
7
|
-
QwenLong = "qwen-long",
|
|
8
|
-
QwenPlus = "qwen-plus",
|
|
9
|
-
QwenTurbo = "qwen-turbo",
|
|
10
|
-
QwenVlOcr = "qwen-vl-ocr",
|
|
11
|
-
Wanx21T2iTurbo = "wanx2.1-t2i-turbo"
|
|
12
|
-
}
|
|
13
|
-
export declare const modelName: {
|
|
14
|
-
"deepseek-r1": string;
|
|
15
|
-
"deepseek-v3": string;
|
|
16
|
-
"qwen-max": string;
|
|
17
|
-
"qwen-long": string;
|
|
18
|
-
"qwen-plus": string;
|
|
19
|
-
"qwen-turbo": string;
|
|
20
|
-
"qwen-vl-ocr": string;
|
|
21
|
-
};
|
|
22
|
-
declare const toolMap: Map<string, Function>;
|
|
23
|
-
export { DefaultSimpleModel, ReactModel, toolMap };
|
|
@@ -1,131 +0,0 @@
|
|
|
1
|
-
export declare abstract class SimpleChatModel {
|
|
2
|
-
abstract doGenerate(data: ModelRequestOptions, options?: ReqOptions): Promise<DoGenerateOutput>;
|
|
3
|
-
abstract doStream(data: ModelRequestOptions, options?: ReqOptions): Promise<DoStreamOutput>;
|
|
4
|
-
}
|
|
5
|
-
export interface ModelRequestOptions {
|
|
6
|
-
model: string;
|
|
7
|
-
max_tokens?: number;
|
|
8
|
-
temperature?: number;
|
|
9
|
-
top_p?: number;
|
|
10
|
-
n?: number;
|
|
11
|
-
conversation_id?: string;
|
|
12
|
-
frequency_penalty?: number;
|
|
13
|
-
presence_penalty?: number;
|
|
14
|
-
stream?: boolean;
|
|
15
|
-
platform_tools?: Array<{
|
|
16
|
-
plantform_tool_id: string;
|
|
17
|
-
payload: any;
|
|
18
|
-
}>;
|
|
19
|
-
historys?: Array<{
|
|
20
|
-
role: string;
|
|
21
|
-
content: string;
|
|
22
|
-
}>;
|
|
23
|
-
knowledge_base?: Array<{
|
|
24
|
-
knowledge_base_id: string;
|
|
25
|
-
search_mode?: "vector" | "full_text" | "hybrid";
|
|
26
|
-
limit?: number;
|
|
27
|
-
score_threshold?: number;
|
|
28
|
-
}>;
|
|
29
|
-
db_base?: any[];
|
|
30
|
-
enable_thinking?: boolean;
|
|
31
|
-
enable_search?: boolean;
|
|
32
|
-
}
|
|
33
|
-
type RawResponse = {
|
|
34
|
-
rawResponse?: any;
|
|
35
|
-
};
|
|
36
|
-
export type DoGenerateOutput = BaseDoGenerateOutput & RawResponse;
|
|
37
|
-
export type DoStreamOutput = AsyncIterableReadableStream<BaseDoStreamOutputChunk & RawResponse>;
|
|
38
|
-
export type ChatModelConstructor = typeof SimpleChatModel;
|
|
39
|
-
export type AsyncIterableReadableStream<T> = ReadableStream<T> & {
|
|
40
|
-
[Symbol.asyncIterator]: () => {
|
|
41
|
-
next(): Promise<IteratorResult<T>>;
|
|
42
|
-
};
|
|
43
|
-
};
|
|
44
|
-
export interface IModelReqInput {
|
|
45
|
-
url: string;
|
|
46
|
-
headers?: Record<string, string>;
|
|
47
|
-
data?: Object;
|
|
48
|
-
stream?: boolean;
|
|
49
|
-
timeout?: number;
|
|
50
|
-
}
|
|
51
|
-
export type ModelReq = <T extends IModelReqInput>(props: T) => T["stream"] extends true ? Promise<ReadableStream<Uint8Array>> : Promise<Object>;
|
|
52
|
-
export interface IAgentReqInput {
|
|
53
|
-
url: string;
|
|
54
|
-
method: string;
|
|
55
|
-
headers?: Record<string, string>;
|
|
56
|
-
data?: Object;
|
|
57
|
-
stream?: boolean;
|
|
58
|
-
timeout?: number;
|
|
59
|
-
}
|
|
60
|
-
export interface ReqOptions {
|
|
61
|
-
timeout?: number;
|
|
62
|
-
}
|
|
63
|
-
export type AgentReq = <T extends IAgentReqInput>(props: T) => T["stream"] extends true ? Promise<ReadableStream<Uint8Array>> : Promise<Object>;
|
|
64
|
-
export type UserMessage = {
|
|
65
|
-
role: "user";
|
|
66
|
-
content: string;
|
|
67
|
-
};
|
|
68
|
-
export type SystemMessage = {
|
|
69
|
-
role: "system";
|
|
70
|
-
content: string;
|
|
71
|
-
};
|
|
72
|
-
export type AssistantMessage = PlainAssistantMessage | ToolCallAssistantMessage;
|
|
73
|
-
export type PlainAssistantMessage = {
|
|
74
|
-
role: "assistant";
|
|
75
|
-
content: string;
|
|
76
|
-
};
|
|
77
|
-
export type ToolCallAssistantMessage = {
|
|
78
|
-
role: "assistant";
|
|
79
|
-
tool_calls: Array<ToolCall>;
|
|
80
|
-
content?: string;
|
|
81
|
-
};
|
|
82
|
-
export type ToolMessage = {
|
|
83
|
-
role: "tool";
|
|
84
|
-
tool_call_id: string;
|
|
85
|
-
content: string;
|
|
86
|
-
};
|
|
87
|
-
export type ChatModelMessage = UserMessage | SystemMessage | AssistantMessage | ToolMessage;
|
|
88
|
-
export type FunctionTool = {
|
|
89
|
-
name: string;
|
|
90
|
-
description: string;
|
|
91
|
-
fn: CallableFunction;
|
|
92
|
-
parameters: object;
|
|
93
|
-
};
|
|
94
|
-
export type ModelTool = {
|
|
95
|
-
type: string;
|
|
96
|
-
function: ModelToolFunction;
|
|
97
|
-
};
|
|
98
|
-
export type ModelToolFunction = {
|
|
99
|
-
name: string;
|
|
100
|
-
description: string;
|
|
101
|
-
parameters: object;
|
|
102
|
-
};
|
|
103
|
-
export type ToolCall = {
|
|
104
|
-
id: string;
|
|
105
|
-
type: string;
|
|
106
|
-
function: {
|
|
107
|
-
name: string;
|
|
108
|
-
arguments: string;
|
|
109
|
-
};
|
|
110
|
-
};
|
|
111
|
-
type FinishReason = "tool_calls" | (string & {});
|
|
112
|
-
export type Usage = {
|
|
113
|
-
completion_tokens: number;
|
|
114
|
-
prompt_tokens: number;
|
|
115
|
-
total_tokens: number;
|
|
116
|
-
};
|
|
117
|
-
export interface BaseDoGenerateOutput {
|
|
118
|
-
choices?: Array<{
|
|
119
|
-
finish_reason?: FinishReason;
|
|
120
|
-
message?: ChatModelMessage;
|
|
121
|
-
}>;
|
|
122
|
-
usage?: Usage;
|
|
123
|
-
}
|
|
124
|
-
export interface BaseDoStreamOutputChunk {
|
|
125
|
-
choices?: Array<{
|
|
126
|
-
finish_reason?: FinishReason;
|
|
127
|
-
delta?: ChatModelMessage;
|
|
128
|
-
}>;
|
|
129
|
-
usage?: Usage;
|
|
130
|
-
}
|
|
131
|
-
export {};
|
|
File without changes
|
|
File without changes
|
|
File without changes
|
|
File without changes
|
|
File without changes
|