@kevisual/ai 0.0.10 → 0.0.12

This diff represents the content of publicly available package versions that have been released to one of the supported registries. The information contained in this diff is provided for informational purposes only and reflects changes between package versions as they appear in their respective public registries.
package/package.json CHANGED
@@ -1,6 +1,6 @@
1
1
  {
2
2
  "name": "@kevisual/ai",
3
- "version": "0.0.10",
3
+ "version": "0.0.12",
4
4
  "description": "AI Center Services",
5
5
  "main": "index.js",
6
6
  "basename": "/root/ai-center-services",
@@ -28,7 +28,7 @@
28
28
  ],
29
29
  "author": "abearxiong <xiongxiao@xiongxiao.me> (https://www.xiongxiao.me)",
30
30
  "license": "MIT",
31
- "packageManager": "pnpm@10.14.0",
31
+ "packageManager": "pnpm@10.23.0",
32
32
  "type": "module",
33
33
  "publishConfig": {
34
34
  "registry": "https://registry.npmjs.org/",
@@ -56,34 +56,34 @@
56
56
  "devDependencies": {
57
57
  "@kevisual/code-center-module": "0.0.24",
58
58
  "@kevisual/mark": "0.0.7",
59
- "@kevisual/router": "0.0.23",
59
+ "@kevisual/router": "0.0.33",
60
60
  "@kevisual/types": "^0.0.10",
61
61
  "@kevisual/use-config": "^1.0.19",
62
- "@types/bun": "^1.2.19",
62
+ "@types/bun": "^1.3.3",
63
63
  "@types/crypto-js": "^4.2.2",
64
- "@types/formidable": "^3.4.5",
64
+ "@types/formidable": "^3.4.6",
65
65
  "@types/lodash-es": "^4.17.12",
66
- "@types/node": "^24.2.0",
66
+ "@types/node": "^24.10.1",
67
67
  "@vitejs/plugin-basic-ssl": "^2.1.0",
68
68
  "cookie": "^1.0.2",
69
- "cross-env": "^10.0.0",
69
+ "cross-env": "^10.1.0",
70
70
  "crypto-js": "^4.2.0",
71
- "dayjs": "^1.11.13",
72
- "dotenv": "^17.2.1",
71
+ "dayjs": "^1.11.19",
72
+ "dotenv": "^17.2.3",
73
73
  "formidable": "^3.5.4",
74
- "ioredis": "^5.7.0",
74
+ "ioredis": "^5.8.2",
75
75
  "json5": "^2.2.3",
76
76
  "lodash-es": "^4.17.21",
77
- "openai": "5.12.0",
78
- "pm2": "^6.0.8",
79
- "rimraf": "^6.0.1",
80
- "rollup": "^4.46.2",
81
- "rollup-plugin-dts": "^6.2.1",
77
+ "openai": "6.9.1",
78
+ "pm2": "^6.0.13",
79
+ "rimraf": "^6.1.2",
80
+ "rollup": "^4.53.3",
81
+ "rollup-plugin-dts": "^6.2.3",
82
82
  "sequelize": "^6.37.7",
83
83
  "tape": "^5.9.0",
84
- "tiktoken": "^1.0.21",
85
- "typescript": "^5.9.2",
86
- "vite": "^7.1.0"
84
+ "tiktoken": "^1.0.22",
85
+ "typescript": "^5.9.3",
86
+ "vite": "^7.2.4"
87
87
  },
88
88
  "dependencies": {
89
89
  "@kevisual/logger": "^0.0.4"
@@ -37,11 +37,11 @@ export class Ollama extends BaseChat {
37
37
  async listModels(): Promise<{ models: OllamaModel[] }> {
38
38
  const _url = new URL(this.baseURL);
39
39
  const tagsURL = new URL('/api/tags', _url);
40
- return this.openai.get(tagsURL.toString());
40
+ return this.get(tagsURL.toString());
41
41
  }
42
42
  async listRunModels(): Promise<{ models: OllamaModel[] }> {
43
43
  const _url = new URL(this.baseURL);
44
44
  const tagsURL = new URL('/api/ps', _url);
45
- return this.openai.get(tagsURL.toString());
45
+ return this.get(tagsURL.toString());
46
46
  }
47
47
  }
@@ -31,7 +31,7 @@ export class SiliconFlow extends BaseChat {
31
31
  super({ ...(options as BaseChatOptions), baseURL: baseURL });
32
32
  }
33
33
  async getUsageInfo(): Promise<SiliconFlowUsageResponse> {
34
- return this.openai.get('/user/info');
34
+ return this.get('/user/info');
35
35
  }
36
36
  async chat(messages: OpenAI.Chat.Completions.ChatCompletionMessageParam[], options?: ChatMessageOptions) {
37
37
  const res = await super.chat(messages, options);
@@ -65,6 +65,7 @@ export class BaseChat implements BaseChatInterface, BaseChatUsageInterface {
65
65
  prompt_tokens: number;
66
66
  total_tokens: number;
67
67
  completion_tokens: number;
68
+ responseText: string;
68
69
 
69
70
  constructor(options: BaseChatOptions) {
70
71
  this.baseURL = options.baseURL;
@@ -73,47 +74,121 @@ export class BaseChat implements BaseChatInterface, BaseChatUsageInterface {
73
74
  // @ts-ignore
74
75
  const DEFAULT_IS_BROWSER = getIsBrowser();
75
76
  this.isBrowser = options.isBrowser ?? DEFAULT_IS_BROWSER;
76
- this.openai = new OpenAI({
77
- apiKey: this.apiKey,
78
- baseURL: this.baseURL,
79
- dangerouslyAllowBrowser: options?.dangerouslyAllowBrowser ?? this.isBrowser,
77
+ // this.openai = new OpenAI({
78
+ // apiKey: this.apiKey,
79
+ // baseURL: this.baseURL,
80
+ // dangerouslyAllowBrowser: options?.dangerouslyAllowBrowser ?? this.isBrowser,
81
+ // });
82
+ }
83
+ post(url = '', opts: { headers?: Record<string, string>, data?: any } = {}) {
84
+ let _url = url.startsWith('http') ? url : this.baseURL + url;
85
+ return fetch(_url, {
86
+ method: 'POST',
87
+ ...opts,
88
+ headers: {
89
+ 'Content-Type': 'application/json',
90
+ Authorization: `Bearer ${this.apiKey}`,
91
+ ...opts.headers,
92
+ },
93
+ body: opts?.data ? JSON.stringify(opts.data) : undefined,
80
94
  });
81
95
  }
96
+ async get<T = any>(url = '', opts: { headers?: Record<string, string> } = {}): Promise<T> {
97
+ let _url = url.startsWith('http') ? url : this.baseURL + url;
98
+ return fetch(_url, {
99
+ method: 'GET',
100
+ ...opts,
101
+ headers: {
102
+ 'Content-Type': 'application/json',
103
+ Authorization: `Bearer ${this.apiKey}`,
104
+ ...opts.headers,
105
+ },
106
+ }).then((res) => res.json());
107
+ }
82
108
  /**
83
109
  * 聊天
84
110
  */
85
111
  async chat(messages: ChatMessage[], options?: ChatMessageOptions): Promise<ChatMessageComplete> {
86
- const createParams: OpenAI.Chat.Completions.ChatCompletionCreateParams = {
112
+ const requestBody = {
87
113
  model: this.model,
88
114
  messages,
89
115
  ...options,
90
116
  stream: false,
91
117
  };
92
- const res = (await this.openai.chat.completions.create(createParams)) as ChatMessageComplete;
118
+
119
+ const response = await this.post(`${this.baseURL}/chat/completions`, { data: requestBody });
120
+
121
+ if (!response.ok) {
122
+ const errorText = await response.text();
123
+ throw new Error(`Chat API request failed: ${response.status} ${response.statusText} - ${errorText}`);
124
+ }
125
+
126
+ const res = await response.json() as ChatMessageComplete;
127
+
93
128
  this.prompt_tokens = res.usage?.prompt_tokens ?? 0;
94
129
  this.total_tokens = res.usage?.total_tokens ?? 0;
95
130
  this.completion_tokens = res.usage?.completion_tokens ?? 0;
131
+ this.responseText = res.choices[0]?.message?.content || '';
96
132
  return res;
97
133
  }
98
134
  async chatStream(messages: ChatMessage[], options?: ChatMessageOptions) {
99
- const createParams: OpenAI.Chat.Completions.ChatCompletionCreateParams = {
135
+ if (options?.response_format) {
136
+ throw new Error('response_format is not supported in stream mode');
137
+ }
138
+
139
+ const requestBody = {
100
140
  model: this.model,
101
141
  messages,
102
142
  ...options,
103
143
  stream: true,
104
144
  };
105
- if (createParams.response_format) {
106
- throw new Error('response_format is not supported in stream mode');
145
+
146
+ const response = await this.post(`${this.baseURL}/chat/completions`, { data: requestBody });
147
+
148
+ if (!response.ok) {
149
+ const errorText = await response.text();
150
+ throw new Error(`Chat Stream API request failed: ${response.status} ${response.statusText} - ${errorText}`);
107
151
  }
108
- return this.openai.chat.completions.create(createParams) as unknown as ChatStream;
109
- }
110
152
 
111
- /**
112
- * 测试
113
- */
114
- test() {
115
- return this.chat([{ role: 'user', content: 'Hello, world!' }]);
153
+ const decoder = new TextDecoder();
154
+ const reader = response.body?.getReader();
155
+
156
+ if (!reader) {
157
+ throw new Error('Response body is not readable');
158
+ }
159
+
160
+ // 创建一个新的 ReadableStream,使用 decoder 解析数据
161
+ const stream = new ReadableStream({
162
+ async start(controller) {
163
+ try {
164
+ while (true) {
165
+ const { done, value } = await reader.read();
166
+ if (done) {
167
+ controller.close();
168
+ break;
169
+ }
170
+ // 检查 value 类型,如果是 Uint8Array 才解码,否则直接使用
171
+ if (typeof value === 'string') {
172
+ controller.enqueue(value);
173
+ } else if (value instanceof Uint8Array) {
174
+ const text = decoder.decode(value, { stream: true });
175
+ controller.enqueue(text);
176
+ } else {
177
+ controller.enqueue(value);
178
+ }
179
+ }
180
+ } catch (error) {
181
+ controller.error(error);
182
+ }
183
+ },
184
+ cancel() {
185
+ reader.releaseLock();
186
+ }
187
+ });
188
+
189
+ return stream as unknown as ChatStream;
116
190
  }
191
+
117
192
  /**
118
193
  * 获取聊天使用情况
119
194
  * @returns
@@ -125,6 +200,7 @@ export class BaseChat implements BaseChatInterface, BaseChatUsageInterface {
125
200
  completion_tokens: this.completion_tokens,
126
201
  };
127
202
  }
203
+
128
204
  getHeaders(headers?: Record<string, string>) {
129
205
  return {
130
206
  'Content-Type': 'application/json',
@@ -139,12 +215,23 @@ export class BaseChat implements BaseChatInterface, BaseChatUsageInterface {
139
215
  */
140
216
  async generateEmbeddingCore(text: string | string[], options?: EmbeddingMessage): Promise<EmbeddingMessageComplete> {
141
217
  const embeddingModel = options?.model || this.model;
142
- const res = await this.openai.embeddings.create({
218
+
219
+ const requestBody = {
143
220
  model: embeddingModel,
144
221
  input: text,
145
222
  encoding_format: 'float',
146
223
  ...options,
147
- });
224
+ };
225
+
226
+ const response = await this.post(`${this.baseURL}/embeddings`, { data: requestBody });
227
+
228
+ if (!response.ok) {
229
+ const errorText = await response.text();
230
+ throw new Error(`Embedding API request failed: ${response.status} ${response.statusText} - ${errorText}`);
231
+ }
232
+
233
+ const res = await response.json() as EmbeddingMessageComplete;
234
+
148
235
  this.prompt_tokens += res.usage.prompt_tokens;
149
236
  this.total_tokens += res.usage.total_tokens;
150
237
  return res;
@@ -2,17 +2,6 @@ import { ChatStream } from './type.ts';
2
2
 
3
3
  // export type { BaseChat, BaseChatOptions } from './chat.ts';
4
4
  export * from './chat.ts'
5
- // export {
6
- // ChatMessage,
7
- // ChatMessageOptions, //
8
- // ChatMessageComplete,
9
- // ChatMessageStream,
10
- // BaseChatInterface,
11
- // BaseChatUsageInterface,
12
- // ChatStream,
13
- // EmbeddingMessage,
14
- // EmbeddingMessageComplete,
15
- // } from './type.ts';
16
5
  export * from './type.ts'
17
6
  /**
18
7
  * for await (const chunk of chatStream) {
@@ -7,8 +7,8 @@ export class SiliconFlowKnowledge extends KnowledgeBase {
7
7
  }
8
8
 
9
9
  async rerank(data: RerankOptions) {
10
- return this.openai.post('/rerank', {
11
- body: data,
10
+ return this.post('/rerank', {
11
+ data: data,
12
12
  });
13
13
  }
14
14
  }
@@ -1,6 +1,7 @@
1
1
  import { SiliconFlowKnowledge } from './knowledge-adapter/siliconflow.ts';
2
- import { KnowledgeBase, KnowledgeOptions } from './knowledge-adapter/knowledge-base.ts';
3
- import { RerankOptions } from './knowledge-adapter/siliconflow.ts';
4
- export { KnowledgeBase, KnowledgeOptions, RerankOptions };
2
+ import { KnowledgeBase, } from './knowledge-adapter/knowledge-base.ts';
3
+ export type { KnowledgeOptions } from './knowledge-adapter/knowledge-base.ts';
4
+ export type { RerankOptions } from './knowledge-adapter/siliconflow.ts';
5
+ export { KnowledgeBase, };
5
6
 
6
7
  export { SiliconFlowKnowledge };
@@ -0,0 +1,59 @@
1
+ import { BailianProvider } from '../../provider/index.ts'
2
+ import dotenv from 'dotenv';
3
+
4
+ dotenv.config();
5
+ import { App } from '@kevisual/router'
6
+ import util from 'node:util';
7
+ const ai = new BailianProvider({
8
+ apiKey: process.env.BAILIAN_API_KEY || '',
9
+ model: 'qwen-turbo-latest',
10
+ baseURL: 'https://dashscope.aliyuncs.com/compatible-mode/v1'
11
+ })
12
+
13
+
14
+ // const res = await ai.chat([
15
+
16
+ // {
17
+ // role: 'user',
18
+ // content: `1+1等于多少?`
19
+ // },
20
+
21
+ // ],
22
+ // )
23
+ // // console.log('AI Response:', res);
24
+ // const content = res.choices[0].message?.content || ''
25
+
26
+ // console.log(util.inspect(res, { depth: null }))
27
+
28
+ // console.log('responseText', ai.responseText)
29
+
30
+
31
+ // const res = await ai.chatStream([
32
+
33
+ // {
34
+ // role: 'user',
35
+ // content: `1+1等于多少?`
36
+ // },
37
+
38
+ // ],
39
+ // )
40
+ // // console.log('AI Response:', res);
41
+ // export const readStream = async (chatStream) => {
42
+ // let buffer = '';
43
+ // for await (const chunk of chatStream) {
44
+ // // chunk 已经是解码后的字符串,直接拼接即可
45
+ // buffer += chunk;
46
+ // }
47
+ // console.log('AI Response:', buffer);
48
+ // };
49
+
50
+ // await readStream(res);
51
+
52
+ const embe = await ai.generateEmbeddingCore([
53
+ '你好,世界!',
54
+ 'Hello, world!',
55
+ ], {
56
+ model: 'text-embedding-v4'
57
+ });
58
+
59
+ console.log('Embedding Response:', util.inspect(embe, { depth: null }));
@@ -1,5 +1,5 @@
1
- import { SiliconFlow } from '../..//provider/chat-adapter/siliconflow.ts';
2
- import { Ollama } from '../..//provider/chat-adapter/ollama.ts';
1
+ import { SiliconFlow } from '../../provider/chat-adapter/siliconflow.ts';
2
+ import { Ollama } from '../../provider/chat-adapter/ollama.ts';
3
3
  import dotenv from 'dotenv';
4
4
 
5
5
  dotenv.config();
@@ -1,5 +1,5 @@
1
- import { ModelScope } from '../..//provider/chat-adapter/model-scope.ts';
2
- import { log } from '../..//logger/index.ts';
1
+ import { ModelScope } from '../../provider/chat-adapter/model-scope.ts';
2
+ import { logger } from '../../modules/logger.ts';
3
3
  import util from 'util';
4
4
  import { config } from 'dotenv';
5
5
  config();
@@ -14,13 +14,13 @@ const chatMessage = [{ role: 'user', content: 'Hello, world! 1 + 1 equals ?' }];
14
14
 
15
15
  const main = async () => {
16
16
  const res = await chat.test();
17
- log.info('test', res);
17
+ logger.info('test', res);
18
18
  };
19
19
 
20
20
  main();
21
21
  const mainChat = async () => {
22
22
  const res = await chat.chat(chatMessage as any);
23
- log.info('chat', res);
23
+ logger.info('chat', res);
24
24
  };
25
25
 
26
26
  // mainChat();