@kevisual/ai 0.0.11 → 0.0.13
This diff represents the content of publicly available package versions that have been released to one of the supported registries. The information contained in this diff is provided for informational purposes only and reflects changes between package versions as they appear in their respective public registries.
- package/dist/ai-provider-browser.d.ts +102 -20
- package/dist/ai-provider-browser.js +2056 -7512
- package/dist/ai-provider.d.ts +102 -20
- package/dist/ai-provider.js +220 -5676
- package/package.json +16 -16
- package/src/provider/chat-adapter/kimi.ts +10 -0
- package/src/provider/chat-adapter/ollama.ts +2 -2
- package/src/provider/chat-adapter/siliconflow.ts +1 -1
- package/src/provider/chat-adapter/zhipu.ts +10 -0
- package/src/provider/chat.ts +6 -0
- package/src/provider/core/chat.ts +104 -39
- package/src/provider/core/index.ts +0 -11
- package/src/provider/core/utils/index.ts +192 -0
- package/src/provider/knowledge-adapter/siliconflow.ts +2 -2
- package/src/test/aliyun/test.ts +46 -13
- package/src/provider/utils/chunk.ts +0 -86
package/package.json
CHANGED
|
@@ -1,12 +1,11 @@
|
|
|
1
1
|
{
|
|
2
2
|
"name": "@kevisual/ai",
|
|
3
|
-
"version": "0.0.
|
|
3
|
+
"version": "0.0.13",
|
|
4
4
|
"description": "AI Center Services",
|
|
5
5
|
"main": "index.js",
|
|
6
6
|
"basename": "/root/ai-center-services",
|
|
7
7
|
"app": {
|
|
8
8
|
"entry": "dist/app.mjs",
|
|
9
|
-
"key": "ai-center-services",
|
|
10
9
|
"type": "system-app"
|
|
11
10
|
},
|
|
12
11
|
"files": [
|
|
@@ -28,7 +27,7 @@
|
|
|
28
27
|
],
|
|
29
28
|
"author": "abearxiong <xiongxiao@xiongxiao.me> (https://www.xiongxiao.me)",
|
|
30
29
|
"license": "MIT",
|
|
31
|
-
"packageManager": "pnpm@10.
|
|
30
|
+
"packageManager": "pnpm@10.24.0",
|
|
32
31
|
"type": "module",
|
|
33
32
|
"publishConfig": {
|
|
34
33
|
"registry": "https://registry.npmjs.org/",
|
|
@@ -56,36 +55,37 @@
|
|
|
56
55
|
"devDependencies": {
|
|
57
56
|
"@kevisual/code-center-module": "0.0.24",
|
|
58
57
|
"@kevisual/mark": "0.0.7",
|
|
59
|
-
"@kevisual/router": "0.0.
|
|
58
|
+
"@kevisual/router": "0.0.33",
|
|
60
59
|
"@kevisual/types": "^0.0.10",
|
|
61
|
-
"@kevisual/use-config": "^1.0.
|
|
62
|
-
"@types/bun": "^1.3.
|
|
60
|
+
"@kevisual/use-config": "^1.0.21",
|
|
61
|
+
"@types/bun": "^1.3.3",
|
|
63
62
|
"@types/crypto-js": "^4.2.2",
|
|
64
63
|
"@types/formidable": "^3.4.6",
|
|
65
64
|
"@types/lodash-es": "^4.17.12",
|
|
66
|
-
"@types/node": "^24.
|
|
65
|
+
"@types/node": "^24.10.1",
|
|
67
66
|
"@vitejs/plugin-basic-ssl": "^2.1.0",
|
|
68
|
-
"cookie": "^1.
|
|
67
|
+
"cookie": "^1.1.1",
|
|
69
68
|
"cross-env": "^10.1.0",
|
|
70
69
|
"crypto-js": "^4.2.0",
|
|
71
|
-
"dayjs": "^1.11.
|
|
70
|
+
"dayjs": "^1.11.19",
|
|
72
71
|
"dotenv": "^17.2.3",
|
|
73
72
|
"formidable": "^3.5.4",
|
|
74
73
|
"ioredis": "^5.8.2",
|
|
75
74
|
"json5": "^2.2.3",
|
|
76
75
|
"lodash-es": "^4.17.21",
|
|
77
|
-
"openai": "6.
|
|
78
|
-
"pm2": "^6.0.
|
|
79
|
-
"rimraf": "^6.
|
|
80
|
-
"rollup": "^4.
|
|
81
|
-
"rollup-plugin-dts": "^6.
|
|
76
|
+
"openai": "6.9.1",
|
|
77
|
+
"pm2": "^6.0.14",
|
|
78
|
+
"rimraf": "^6.1.2",
|
|
79
|
+
"rollup": "^4.53.3",
|
|
80
|
+
"rollup-plugin-dts": "^6.3.0",
|
|
82
81
|
"sequelize": "^6.37.7",
|
|
83
82
|
"tape": "^5.9.0",
|
|
84
83
|
"tiktoken": "^1.0.22",
|
|
85
84
|
"typescript": "^5.9.3",
|
|
86
|
-
"vite": "^7.
|
|
85
|
+
"vite": "^7.2.6"
|
|
87
86
|
},
|
|
88
87
|
"dependencies": {
|
|
89
|
-
"@kevisual/logger": "^0.0.4"
|
|
88
|
+
"@kevisual/logger": "^0.0.4",
|
|
89
|
+
"@kevisual/permission": "^0.0.3"
|
|
90
90
|
}
|
|
91
91
|
}
|
|
@@ -0,0 +1,10 @@
|
|
|
1
|
+
import { BaseChat, BaseChatOptions } from '../core/chat.ts';
|
|
2
|
+
|
|
3
|
+
export type KimiOptions = Partial<BaseChatOptions>;
|
|
4
|
+
export class Kimi extends BaseChat {
|
|
5
|
+
static BASE_URL = 'https://api.moonshot.cn/v1/';
|
|
6
|
+
constructor(options: KimiOptions) {
|
|
7
|
+
const baseURL = options.baseURL || Kimi.BASE_URL;
|
|
8
|
+
super({ ...(options as BaseChatOptions), baseURL: baseURL });
|
|
9
|
+
}
|
|
10
|
+
}
|
|
@@ -37,11 +37,11 @@ export class Ollama extends BaseChat {
|
|
|
37
37
|
async listModels(): Promise<{ models: OllamaModel[] }> {
|
|
38
38
|
const _url = new URL(this.baseURL);
|
|
39
39
|
const tagsURL = new URL('/api/tags', _url);
|
|
40
|
-
return this.
|
|
40
|
+
return this.get(tagsURL.toString());
|
|
41
41
|
}
|
|
42
42
|
async listRunModels(): Promise<{ models: OllamaModel[] }> {
|
|
43
43
|
const _url = new URL(this.baseURL);
|
|
44
44
|
const tagsURL = new URL('/api/ps', _url);
|
|
45
|
-
return this.
|
|
45
|
+
return this.get(tagsURL.toString());
|
|
46
46
|
}
|
|
47
47
|
}
|
|
@@ -31,7 +31,7 @@ export class SiliconFlow extends BaseChat {
|
|
|
31
31
|
super({ ...(options as BaseChatOptions), baseURL: baseURL });
|
|
32
32
|
}
|
|
33
33
|
async getUsageInfo(): Promise<SiliconFlowUsageResponse> {
|
|
34
|
-
return this.
|
|
34
|
+
return this.get('/user/info');
|
|
35
35
|
}
|
|
36
36
|
async chat(messages: OpenAI.Chat.Completions.ChatCompletionMessageParam[], options?: ChatMessageOptions) {
|
|
37
37
|
const res = await super.chat(messages, options);
|
|
@@ -0,0 +1,10 @@
|
|
|
1
|
+
import { BaseChat, BaseChatOptions } from '../core/chat.ts';
|
|
2
|
+
|
|
3
|
+
export type ZhipuOptions = Partial<BaseChatOptions>;
|
|
4
|
+
export class Zhipu extends BaseChat {
|
|
5
|
+
static BASE_URL = 'https://open.bigmodel.cn/api/paas/v4/';
|
|
6
|
+
constructor(options: ZhipuOptions) {
|
|
7
|
+
const baseURL = options.baseURL || Zhipu.BASE_URL;
|
|
8
|
+
super({ ...(options as BaseChatOptions), baseURL: baseURL });
|
|
9
|
+
}
|
|
10
|
+
}
|
package/src/provider/chat.ts
CHANGED
|
@@ -8,6 +8,8 @@ import { Volces } from './chat-adapter/volces.ts';
|
|
|
8
8
|
import { DeepSeek } from './chat-adapter/deepseek.ts';
|
|
9
9
|
import { ModelScope } from './chat-adapter/model-scope.ts';
|
|
10
10
|
import { BailianChat } from './chat-adapter/dashscope.ts';
|
|
11
|
+
import { Zhipu } from './chat-adapter/zhipu.ts';
|
|
12
|
+
import { Kimi } from './chat-adapter/kimi.ts';
|
|
11
13
|
|
|
12
14
|
import { ChatMessage } from './core/type.ts';
|
|
13
15
|
|
|
@@ -18,6 +20,8 @@ export const VolcesProvider = Volces;
|
|
|
18
20
|
export const DeepSeekProvider = DeepSeek;
|
|
19
21
|
export const ModelScopeProvider = ModelScope;
|
|
20
22
|
export const BailianProvider = BailianChat;
|
|
23
|
+
export const ZhipuProvider = Zhipu;
|
|
24
|
+
export const KimiProvider = Kimi;
|
|
21
25
|
|
|
22
26
|
export const ChatProviderMap = {
|
|
23
27
|
Ollama: OllamaProvider,
|
|
@@ -28,6 +32,8 @@ export const ChatProviderMap = {
|
|
|
28
32
|
ModelScope: ModelScopeProvider,
|
|
29
33
|
BaseChat: BaseChat,
|
|
30
34
|
Bailian: BailianProvider,
|
|
35
|
+
Zhipu: ZhipuProvider,
|
|
36
|
+
Kimi: KimiProvider,
|
|
31
37
|
};
|
|
32
38
|
|
|
33
39
|
type ProviderManagerConfig = {
|
|
@@ -9,6 +9,7 @@ import type {
|
|
|
9
9
|
EmbeddingMessage,
|
|
10
10
|
EmbeddingMessageComplete,
|
|
11
11
|
} from './type.ts';
|
|
12
|
+
import { AIUtils } from './utils/index.ts';
|
|
12
13
|
|
|
13
14
|
export type BaseChatOptions<T = Record<string, any>> = {
|
|
14
15
|
/**
|
|
@@ -32,14 +33,7 @@ export type BaseChatOptions<T = Record<string, any>> = {
|
|
|
32
33
|
*/
|
|
33
34
|
stream?: boolean;
|
|
34
35
|
} & T;
|
|
35
|
-
|
|
36
|
-
try {
|
|
37
|
-
// 检查是否存在window对象
|
|
38
|
-
return typeof window !== 'undefined' && typeof window.document !== 'undefined';
|
|
39
|
-
} catch (e) {
|
|
40
|
-
return false;
|
|
41
|
-
}
|
|
42
|
-
};
|
|
36
|
+
|
|
43
37
|
export class BaseChat implements BaseChatInterface, BaseChatUsageInterface {
|
|
44
38
|
/**
|
|
45
39
|
* 默认baseURL
|
|
@@ -53,67 +47,126 @@ export class BaseChat implements BaseChatInterface, BaseChatUsageInterface {
|
|
|
53
47
|
* 默认apiKey
|
|
54
48
|
*/
|
|
55
49
|
apiKey: string;
|
|
56
|
-
/**
|
|
57
|
-
* 是否在浏览器中使用
|
|
58
|
-
*/
|
|
59
|
-
isBrowser: boolean;
|
|
60
|
-
/**
|
|
61
|
-
* openai实例
|
|
62
|
-
*/
|
|
63
|
-
openai: OpenAI;
|
|
64
|
-
|
|
65
50
|
prompt_tokens: number;
|
|
66
51
|
total_tokens: number;
|
|
67
52
|
completion_tokens: number;
|
|
68
|
-
|
|
53
|
+
responseText: string;
|
|
54
|
+
utils: AIUtils;
|
|
69
55
|
constructor(options: BaseChatOptions) {
|
|
70
56
|
this.baseURL = options.baseURL;
|
|
71
57
|
this.model = options.model;
|
|
72
58
|
this.apiKey = options.apiKey;
|
|
73
|
-
|
|
74
|
-
|
|
75
|
-
|
|
76
|
-
this.
|
|
77
|
-
|
|
78
|
-
|
|
79
|
-
|
|
59
|
+
this.utils = new AIUtils();
|
|
60
|
+
}
|
|
61
|
+
post(url = '', opts: { headers?: Record<string, string>, data?: any } = {}) {
|
|
62
|
+
let _url = url.startsWith('http') ? url : this.baseURL + url;
|
|
63
|
+
return fetch(_url, {
|
|
64
|
+
method: 'POST',
|
|
65
|
+
...opts,
|
|
66
|
+
headers: {
|
|
67
|
+
'Content-Type': 'application/json',
|
|
68
|
+
Authorization: `Bearer ${this.apiKey}`,
|
|
69
|
+
...opts.headers,
|
|
70
|
+
},
|
|
71
|
+
body: opts?.data ? JSON.stringify(opts.data) : undefined,
|
|
80
72
|
});
|
|
81
73
|
}
|
|
74
|
+
async get<T = any>(url = '', opts: { headers?: Record<string, string> } = {}): Promise<T> {
|
|
75
|
+
let _url = url.startsWith('http') ? url : this.baseURL + url;
|
|
76
|
+
return fetch(_url, {
|
|
77
|
+
method: 'GET',
|
|
78
|
+
...opts,
|
|
79
|
+
headers: {
|
|
80
|
+
'Content-Type': 'application/json',
|
|
81
|
+
Authorization: `Bearer ${this.apiKey}`,
|
|
82
|
+
...opts.headers,
|
|
83
|
+
},
|
|
84
|
+
}).then((res) => res.json());
|
|
85
|
+
}
|
|
82
86
|
/**
|
|
83
87
|
* 聊天
|
|
84
88
|
*/
|
|
85
89
|
async chat(messages: ChatMessage[], options?: ChatMessageOptions): Promise<ChatMessageComplete> {
|
|
86
|
-
const
|
|
90
|
+
const requestBody = {
|
|
87
91
|
model: this.model,
|
|
88
92
|
messages,
|
|
89
93
|
...options,
|
|
90
94
|
stream: false,
|
|
91
95
|
};
|
|
92
|
-
|
|
96
|
+
|
|
97
|
+
const response = await this.post(`${this.baseURL}/chat/completions`, { data: requestBody });
|
|
98
|
+
|
|
99
|
+
if (!response.ok) {
|
|
100
|
+
const errorText = await response.text();
|
|
101
|
+
throw new Error(`Chat API request failed: ${response.status} ${response.statusText} - ${errorText}`);
|
|
102
|
+
}
|
|
103
|
+
|
|
104
|
+
const res = await response.json() as ChatMessageComplete;
|
|
105
|
+
|
|
93
106
|
this.prompt_tokens = res.usage?.prompt_tokens ?? 0;
|
|
94
107
|
this.total_tokens = res.usage?.total_tokens ?? 0;
|
|
95
108
|
this.completion_tokens = res.usage?.completion_tokens ?? 0;
|
|
109
|
+
this.responseText = res.choices[0]?.message?.content || '';
|
|
96
110
|
return res;
|
|
97
111
|
}
|
|
98
112
|
async chatStream(messages: ChatMessage[], options?: ChatMessageOptions) {
|
|
99
|
-
|
|
113
|
+
if (options?.response_format) {
|
|
114
|
+
throw new Error('response_format is not supported in stream mode');
|
|
115
|
+
}
|
|
116
|
+
|
|
117
|
+
const requestBody = {
|
|
100
118
|
model: this.model,
|
|
101
119
|
messages,
|
|
102
120
|
...options,
|
|
103
121
|
stream: true,
|
|
104
122
|
};
|
|
105
|
-
|
|
106
|
-
|
|
123
|
+
|
|
124
|
+
const response = await this.post(`${this.baseURL}/chat/completions`, { data: requestBody });
|
|
125
|
+
|
|
126
|
+
if (!response.ok) {
|
|
127
|
+
const errorText = await response.text();
|
|
128
|
+
throw new Error(`Chat Stream API request failed: ${response.status} ${response.statusText} - ${errorText}`);
|
|
107
129
|
}
|
|
108
|
-
return this.openai.chat.completions.create(createParams) as unknown as ChatStream;
|
|
109
|
-
}
|
|
110
130
|
|
|
111
|
-
|
|
112
|
-
|
|
113
|
-
|
|
114
|
-
|
|
115
|
-
|
|
131
|
+
const decoder = new TextDecoder();
|
|
132
|
+
const reader = response.body?.getReader();
|
|
133
|
+
|
|
134
|
+
if (!reader) {
|
|
135
|
+
throw new Error('Response body is not readable');
|
|
136
|
+
}
|
|
137
|
+
|
|
138
|
+
// 创建一个新的 ReadableStream,使用 decoder 解析数据
|
|
139
|
+
const stream = new ReadableStream({
|
|
140
|
+
async start(controller) {
|
|
141
|
+
try {
|
|
142
|
+
while (true) {
|
|
143
|
+
const { done, value } = await reader.read();
|
|
144
|
+
if (done) {
|
|
145
|
+
controller.close();
|
|
146
|
+
break;
|
|
147
|
+
}
|
|
148
|
+
// 检查 value 类型,如果是 Uint8Array 才解码,否则直接使用
|
|
149
|
+
if (typeof value === 'string') {
|
|
150
|
+
controller.enqueue(value);
|
|
151
|
+
} else if (value instanceof Uint8Array) {
|
|
152
|
+
const text = decoder.decode(value, { stream: true });
|
|
153
|
+
controller.enqueue(text);
|
|
154
|
+
} else {
|
|
155
|
+
controller.enqueue(value);
|
|
156
|
+
}
|
|
157
|
+
}
|
|
158
|
+
} catch (error) {
|
|
159
|
+
controller.error(error);
|
|
160
|
+
}
|
|
161
|
+
},
|
|
162
|
+
cancel() {
|
|
163
|
+
reader.releaseLock();
|
|
164
|
+
}
|
|
165
|
+
});
|
|
166
|
+
|
|
167
|
+
return stream as unknown as ChatStream;
|
|
116
168
|
}
|
|
169
|
+
|
|
117
170
|
/**
|
|
118
171
|
* 获取聊天使用情况
|
|
119
172
|
* @returns
|
|
@@ -125,6 +178,7 @@ export class BaseChat implements BaseChatInterface, BaseChatUsageInterface {
|
|
|
125
178
|
completion_tokens: this.completion_tokens,
|
|
126
179
|
};
|
|
127
180
|
}
|
|
181
|
+
|
|
128
182
|
getHeaders(headers?: Record<string, string>) {
|
|
129
183
|
return {
|
|
130
184
|
'Content-Type': 'application/json',
|
|
@@ -139,12 +193,23 @@ export class BaseChat implements BaseChatInterface, BaseChatUsageInterface {
|
|
|
139
193
|
*/
|
|
140
194
|
async generateEmbeddingCore(text: string | string[], options?: EmbeddingMessage): Promise<EmbeddingMessageComplete> {
|
|
141
195
|
const embeddingModel = options?.model || this.model;
|
|
142
|
-
|
|
196
|
+
|
|
197
|
+
const requestBody = {
|
|
143
198
|
model: embeddingModel,
|
|
144
199
|
input: text,
|
|
145
200
|
encoding_format: 'float',
|
|
146
201
|
...options,
|
|
147
|
-
}
|
|
202
|
+
};
|
|
203
|
+
|
|
204
|
+
const response = await this.post(`${this.baseURL}/embeddings`, { data: requestBody });
|
|
205
|
+
|
|
206
|
+
if (!response.ok) {
|
|
207
|
+
const errorText = await response.text();
|
|
208
|
+
throw new Error(`Embedding API request failed: ${response.status} ${response.statusText} - ${errorText}`);
|
|
209
|
+
}
|
|
210
|
+
|
|
211
|
+
const res = await response.json() as EmbeddingMessageComplete;
|
|
212
|
+
|
|
148
213
|
this.prompt_tokens += res.usage.prompt_tokens;
|
|
149
214
|
this.total_tokens += res.usage.total_tokens;
|
|
150
215
|
return res;
|
|
@@ -2,17 +2,6 @@ import { ChatStream } from './type.ts';
|
|
|
2
2
|
|
|
3
3
|
// export type { BaseChat, BaseChatOptions } from './chat.ts';
|
|
4
4
|
export * from './chat.ts'
|
|
5
|
-
// export {
|
|
6
|
-
// ChatMessage,
|
|
7
|
-
// ChatMessageOptions, //
|
|
8
|
-
// ChatMessageComplete,
|
|
9
|
-
// ChatMessageStream,
|
|
10
|
-
// BaseChatInterface,
|
|
11
|
-
// BaseChatUsageInterface,
|
|
12
|
-
// ChatStream,
|
|
13
|
-
// EmbeddingMessage,
|
|
14
|
-
// EmbeddingMessageComplete,
|
|
15
|
-
// } from './type.ts';
|
|
16
5
|
export * from './type.ts'
|
|
17
6
|
/**
|
|
18
7
|
* for await (const chunk of chatStream) {
|
|
@@ -0,0 +1,192 @@
|
|
|
1
|
+
export class AIUtils {
|
|
2
|
+
/**
|
|
3
|
+
* 从 Markdown 代码块中提取 JSON
|
|
4
|
+
* @param str 包含 JSON 的字符串
|
|
5
|
+
* @returns 解析后的对象或 null
|
|
6
|
+
*/
|
|
7
|
+
extractJsonFromMarkdown(str: string): any | null {
|
|
8
|
+
// Try to extract JSON from ```json ... ```
|
|
9
|
+
const jsonRegex = /```json\s*([\s\S]*?)\s*```/;
|
|
10
|
+
const match = str.match(jsonRegex);
|
|
11
|
+
let jsonStr = match && match[1] ? match[1] : str;
|
|
12
|
+
|
|
13
|
+
try {
|
|
14
|
+
return JSON.parse(jsonStr);
|
|
15
|
+
} catch {
|
|
16
|
+
return null;
|
|
17
|
+
}
|
|
18
|
+
}
|
|
19
|
+
|
|
20
|
+
/**
|
|
21
|
+
* 从 Markdown 代码块中提取代码
|
|
22
|
+
* @param str Markdown 字符串
|
|
23
|
+
* @param language 语言类型,不指定则返回所有代码块
|
|
24
|
+
* @returns 提取的代码字符串或数组
|
|
25
|
+
*/
|
|
26
|
+
extractCodeFromMarkdown(str: string, language?: string): string | string[] | null {
|
|
27
|
+
if (language) {
|
|
28
|
+
const regex = new RegExp(`\`\`\`${language}\\s*([\\s\\S]*?)\\s*\`\`\``, 'g');
|
|
29
|
+
const matches = str.match(regex);
|
|
30
|
+
if (!matches) return null;
|
|
31
|
+
return matches.map(m => m.replace(new RegExp(`\`\`\`${language}\\s*|\\s*\`\`\``, 'g'), '').trim());
|
|
32
|
+
}
|
|
33
|
+
|
|
34
|
+
const regex = /```[\w]*\s*([\s\S]*?)\s*```/g;
|
|
35
|
+
const matches = [...str.matchAll(regex)];
|
|
36
|
+
if (matches.length === 0) return null;
|
|
37
|
+
return matches.map(m => m[1].trim());
|
|
38
|
+
}
|
|
39
|
+
|
|
40
|
+
/**
|
|
41
|
+
* 清理 AI 响应中的多余空白和格式
|
|
42
|
+
* @param str 原始字符串
|
|
43
|
+
* @returns 清理后的字符串
|
|
44
|
+
*/
|
|
45
|
+
cleanResponse(str: string): string {
|
|
46
|
+
return str
|
|
47
|
+
.trim()
|
|
48
|
+
.replace(/\n{3,}/g, '\n\n') // 多个换行符替换为两个
|
|
49
|
+
.replace(/[ \t]+$/gm, ''); // 删除行尾空格
|
|
50
|
+
}
|
|
51
|
+
|
|
52
|
+
/**
|
|
53
|
+
* 从 AI 响应中提取标签
|
|
54
|
+
* @param str 响应字符串
|
|
55
|
+
* @returns 标签数组
|
|
56
|
+
*/
|
|
57
|
+
extractTags(str: string): string[] {
|
|
58
|
+
const tagPatterns = [
|
|
59
|
+
/#(\w+)/g, // #tag 格式
|
|
60
|
+
/\[(\w+)\]/g, // [tag] 格式
|
|
61
|
+
/tags?:\s*\[([^\]]+)\]/gi, // tags: [...] 格式
|
|
62
|
+
];
|
|
63
|
+
|
|
64
|
+
const tags = new Set<string>();
|
|
65
|
+
|
|
66
|
+
for (const pattern of tagPatterns) {
|
|
67
|
+
const matches = str.matchAll(pattern);
|
|
68
|
+
for (const match of matches) {
|
|
69
|
+
if (match[1]) {
|
|
70
|
+
const extracted = match[1].split(/[,;]/).map(t => t.trim()).filter(Boolean);
|
|
71
|
+
extracted.forEach(tag => tags.add(tag));
|
|
72
|
+
}
|
|
73
|
+
}
|
|
74
|
+
}
|
|
75
|
+
|
|
76
|
+
return Array.from(tags);
|
|
77
|
+
}
|
|
78
|
+
|
|
79
|
+
/**
|
|
80
|
+
* 从文本中提取 URL
|
|
81
|
+
* @param str 文本字符串
|
|
82
|
+
* @returns URL 数组
|
|
83
|
+
*/
|
|
84
|
+
extractUrls(str: string): string[] {
|
|
85
|
+
const urlRegex = /(https?:\/\/[^\s]+)/g;
|
|
86
|
+
const matches = str.match(urlRegex);
|
|
87
|
+
return matches || [];
|
|
88
|
+
}
|
|
89
|
+
|
|
90
|
+
/**
|
|
91
|
+
* 分割长文本为指定 token 数量的块
|
|
92
|
+
* @param text 原始文本
|
|
93
|
+
* @param maxTokens 每块最大 token 数(粗略估算:1 token ≈ 4 字符)
|
|
94
|
+
* @returns 文本块数组
|
|
95
|
+
*/
|
|
96
|
+
chunkText(text: string, maxTokens: number = 1000): string[] {
|
|
97
|
+
const chunkSize = maxTokens * 4; // 粗略估算
|
|
98
|
+
const chunks: string[] = [];
|
|
99
|
+
|
|
100
|
+
// 按段落分割
|
|
101
|
+
const paragraphs = text.split(/\n\n+/);
|
|
102
|
+
let currentChunk = '';
|
|
103
|
+
|
|
104
|
+
for (const paragraph of paragraphs) {
|
|
105
|
+
if ((currentChunk + paragraph).length > chunkSize && currentChunk) {
|
|
106
|
+
chunks.push(currentChunk.trim());
|
|
107
|
+
currentChunk = paragraph;
|
|
108
|
+
} else {
|
|
109
|
+
currentChunk += (currentChunk ? '\n\n' : '') + paragraph;
|
|
110
|
+
}
|
|
111
|
+
}
|
|
112
|
+
|
|
113
|
+
if (currentChunk) {
|
|
114
|
+
chunks.push(currentChunk.trim());
|
|
115
|
+
}
|
|
116
|
+
|
|
117
|
+
return chunks;
|
|
118
|
+
}
|
|
119
|
+
|
|
120
|
+
/**
|
|
121
|
+
* 移除 AI 响应中的思考过程(thinking 标签)
|
|
122
|
+
* @param str 响应字符串
|
|
123
|
+
* @returns 清理后的字符串
|
|
124
|
+
*/
|
|
125
|
+
removeThinkingTags(str: string): string {
|
|
126
|
+
return str
|
|
127
|
+
.replace(/<thinking>[\s\S]*?<\/thinking>/gi, '')
|
|
128
|
+
.replace(/\[thinking\][\s\S]*?\[\/thinking\]/gi, '')
|
|
129
|
+
.trim();
|
|
130
|
+
}
|
|
131
|
+
|
|
132
|
+
/**
|
|
133
|
+
* 转义特殊字符用于 AI 提示词
|
|
134
|
+
* @param str 原始字符串
|
|
135
|
+
* @returns 转义后的字符串
|
|
136
|
+
*/
|
|
137
|
+
escapeForPrompt(str: string): string {
|
|
138
|
+
return str
|
|
139
|
+
.replace(/\\/g, '\\\\')
|
|
140
|
+
.replace(/`/g, '\\`')
|
|
141
|
+
.replace(/\$/g, '\\$');
|
|
142
|
+
}
|
|
143
|
+
|
|
144
|
+
/**
|
|
145
|
+
* 统计文本的大致 token 数量
|
|
146
|
+
* @param text 文本
|
|
147
|
+
* @returns 估算的 token 数量
|
|
148
|
+
*/
|
|
149
|
+
estimateTokens(text: string): number {
|
|
150
|
+
// 简单估算:中文约 1.5 字符/token,英文约 4 字符/token
|
|
151
|
+
const chineseChars = (text.match(/[\u4e00-\u9fa5]/g) || []).length;
|
|
152
|
+
const otherChars = text.length - chineseChars;
|
|
153
|
+
return Math.ceil(chineseChars / 1.5 + otherChars / 4);
|
|
154
|
+
}
|
|
155
|
+
|
|
156
|
+
/**
|
|
157
|
+
* 从响应中提取结构化数据(key: value 格式)
|
|
158
|
+
* @param str 响应字符串
|
|
159
|
+
* @returns 键值对对象
|
|
160
|
+
*/
|
|
161
|
+
extractKeyValuePairs(str: string): Record<string, string> {
|
|
162
|
+
const result: Record<string, string> = {};
|
|
163
|
+
const lines = str.split('\n');
|
|
164
|
+
|
|
165
|
+
for (const line of lines) {
|
|
166
|
+
const match = line.match(/^([^::]+)[::]\s*(.+)$/);
|
|
167
|
+
if (match) {
|
|
168
|
+
const key = match[1].trim();
|
|
169
|
+
const value = match[2].trim();
|
|
170
|
+
result[key] = value;
|
|
171
|
+
}
|
|
172
|
+
}
|
|
173
|
+
|
|
174
|
+
return result;
|
|
175
|
+
}
|
|
176
|
+
|
|
177
|
+
/**
|
|
178
|
+
* 验证 AI 响应是否完整(检查截断)
|
|
179
|
+
* @param str 响应字符串
|
|
180
|
+
* @returns 是否完整
|
|
181
|
+
*/
|
|
182
|
+
isResponseComplete(str: string): boolean {
|
|
183
|
+
const incompleteSigns = [
|
|
184
|
+
/```[\w]*\s*[\s\S]*?(?<!```)$/, // 未闭合的代码块
|
|
185
|
+
/\{[\s\S]*(?<!\})$/, // 未闭合的 JSON
|
|
186
|
+
/\[[\s\S]*(?<!\])$/, // 未闭合的数组
|
|
187
|
+
/\.{3,}$/, // 结尾省略号
|
|
188
|
+
];
|
|
189
|
+
|
|
190
|
+
return !incompleteSigns.some(pattern => pattern.test(str.trim()));
|
|
191
|
+
}
|
|
192
|
+
}
|
package/src/test/aliyun/test.ts
CHANGED
|
@@ -11,16 +11,49 @@ const ai = new BailianProvider({
|
|
|
11
11
|
})
|
|
12
12
|
|
|
13
13
|
|
|
14
|
-
const res = await ai.chat([
|
|
15
|
-
|
|
16
|
-
|
|
17
|
-
|
|
18
|
-
|
|
19
|
-
|
|
20
|
-
|
|
21
|
-
],
|
|
22
|
-
)
|
|
23
|
-
console.log('AI Response:', res);
|
|
24
|
-
const content = res.choices[0].message?.content || ''
|
|
25
|
-
|
|
26
|
-
console.log(util.inspect(res, { depth: null }))
|
|
14
|
+
// const res = await ai.chat([
|
|
15
|
+
|
|
16
|
+
// {
|
|
17
|
+
// role: 'user',
|
|
18
|
+
// content: `1+1等于多少?`
|
|
19
|
+
// },
|
|
20
|
+
|
|
21
|
+
// ],
|
|
22
|
+
// )
|
|
23
|
+
// // console.log('AI Response:', res);
|
|
24
|
+
// const content = res.choices[0].message?.content || ''
|
|
25
|
+
|
|
26
|
+
// console.log(util.inspect(res, { depth: null }))
|
|
27
|
+
|
|
28
|
+
// console.log('responseText', ai.responseText)
|
|
29
|
+
|
|
30
|
+
|
|
31
|
+
// const res = await ai.chatStream([
|
|
32
|
+
|
|
33
|
+
// {
|
|
34
|
+
// role: 'user',
|
|
35
|
+
// content: `1+1等于多少?`
|
|
36
|
+
// },
|
|
37
|
+
|
|
38
|
+
// ],
|
|
39
|
+
// )
|
|
40
|
+
// // console.log('AI Response:', res);
|
|
41
|
+
// export const readStream = async (chatStream) => {
|
|
42
|
+
// let buffer = '';
|
|
43
|
+
// for await (const chunk of chatStream) {
|
|
44
|
+
// // chunk 已经是解码后的字符串,直接拼接即可
|
|
45
|
+
// buffer += chunk;
|
|
46
|
+
// }
|
|
47
|
+
// console.log('AI Response:', buffer);
|
|
48
|
+
// };
|
|
49
|
+
|
|
50
|
+
// await readStream(res);
|
|
51
|
+
|
|
52
|
+
const embe = await ai.generateEmbeddingCore([
|
|
53
|
+
'你好,世界!',
|
|
54
|
+
'Hello, world!',
|
|
55
|
+
], {
|
|
56
|
+
model: 'text-embedding-v4'
|
|
57
|
+
});
|
|
58
|
+
|
|
59
|
+
console.log('Embedding Response:', util.inspect(embe, { depth: null }));
|