@zhin.js/core 1.1.0 → 1.1.3

This diff represents the content of publicly available package versions that have been released to one of the supported registries. The information contained in this diff is provided for informational purposes only and reflects changes between package versions as they appear in their respective public registries.
Files changed (122) hide show
  1. package/lib/adapter.d.ts +1 -26
  2. package/lib/adapter.d.ts.map +1 -1
  3. package/lib/adapter.js +20 -117
  4. package/lib/adapter.js.map +1 -1
  5. package/lib/built/adapter-process.d.ts +0 -4
  6. package/lib/built/adapter-process.d.ts.map +1 -1
  7. package/lib/built/adapter-process.js +0 -95
  8. package/lib/built/adapter-process.js.map +1 -1
  9. package/lib/built/agent-preset.d.ts +2 -0
  10. package/lib/built/agent-preset.d.ts.map +1 -1
  11. package/lib/built/agent-preset.js +4 -0
  12. package/lib/built/agent-preset.js.map +1 -1
  13. package/lib/built/command.d.ts +4 -0
  14. package/lib/built/command.d.ts.map +1 -1
  15. package/lib/built/command.js +6 -0
  16. package/lib/built/command.js.map +1 -1
  17. package/lib/built/component.d.ts.map +1 -1
  18. package/lib/built/component.js +1 -0
  19. package/lib/built/component.js.map +1 -1
  20. package/lib/built/dispatcher.d.ts.map +1 -1
  21. package/lib/built/dispatcher.js +0 -13
  22. package/lib/built/dispatcher.js.map +1 -1
  23. package/lib/built/message-filter.d.ts +2 -0
  24. package/lib/built/message-filter.d.ts.map +1 -1
  25. package/lib/built/message-filter.js +5 -0
  26. package/lib/built/message-filter.js.map +1 -1
  27. package/lib/built/skill.d.ts +11 -0
  28. package/lib/built/skill.d.ts.map +1 -1
  29. package/lib/built/skill.js +14 -0
  30. package/lib/built/skill.js.map +1 -1
  31. package/lib/built/tool.d.ts +11 -44
  32. package/lib/built/tool.d.ts.map +1 -1
  33. package/lib/built/tool.js +14 -353
  34. package/lib/built/tool.js.map +1 -1
  35. package/lib/plugin.d.ts +1 -25
  36. package/lib/plugin.d.ts.map +1 -1
  37. package/lib/plugin.js +1 -77
  38. package/lib/plugin.js.map +1 -1
  39. package/lib/types.d.ts +0 -25
  40. package/lib/types.d.ts.map +1 -1
  41. package/package.json +10 -7
  42. package/CHANGELOG.md +0 -561
  43. package/REFACTORING_COMPLETE.md +0 -178
  44. package/REFACTORING_STATUS.md +0 -263
  45. package/src/adapter.ts +0 -275
  46. package/src/ai/index.ts +0 -55
  47. package/src/ai/providers/anthropic.ts +0 -379
  48. package/src/ai/providers/base.ts +0 -175
  49. package/src/ai/providers/index.ts +0 -13
  50. package/src/ai/providers/ollama.ts +0 -302
  51. package/src/ai/providers/openai.ts +0 -174
  52. package/src/ai/types.ts +0 -348
  53. package/src/bot.ts +0 -37
  54. package/src/built/adapter-process.ts +0 -177
  55. package/src/built/agent-preset.ts +0 -136
  56. package/src/built/ai-trigger.ts +0 -259
  57. package/src/built/command.ts +0 -108
  58. package/src/built/common-adapter-tools.ts +0 -242
  59. package/src/built/component.ts +0 -130
  60. package/src/built/config.ts +0 -335
  61. package/src/built/cron.ts +0 -156
  62. package/src/built/database.ts +0 -134
  63. package/src/built/dispatcher.ts +0 -496
  64. package/src/built/login-assist.ts +0 -131
  65. package/src/built/message-filter.ts +0 -390
  66. package/src/built/permission.ts +0 -151
  67. package/src/built/schema-feature.ts +0 -190
  68. package/src/built/skill.ts +0 -221
  69. package/src/built/tool.ts +0 -948
  70. package/src/command.ts +0 -87
  71. package/src/component.ts +0 -565
  72. package/src/cron.ts +0 -4
  73. package/src/errors.ts +0 -46
  74. package/src/feature.ts +0 -7
  75. package/src/index.ts +0 -53
  76. package/src/jsx-dev-runtime.ts +0 -2
  77. package/src/jsx-runtime.ts +0 -12
  78. package/src/jsx.ts +0 -135
  79. package/src/message.ts +0 -48
  80. package/src/models/system-log.ts +0 -20
  81. package/src/models/user.ts +0 -15
  82. package/src/notice.ts +0 -98
  83. package/src/plugin.ts +0 -896
  84. package/src/prompt.ts +0 -293
  85. package/src/request.ts +0 -95
  86. package/src/scheduler/index.ts +0 -19
  87. package/src/scheduler/scheduler.ts +0 -372
  88. package/src/scheduler/types.ts +0 -74
  89. package/src/tool-zod.ts +0 -115
  90. package/src/types-generator.ts +0 -78
  91. package/src/types.ts +0 -505
  92. package/src/utils.ts +0 -227
  93. package/tests/adapter.test.ts +0 -638
  94. package/tests/ai/ai-trigger.test.ts +0 -368
  95. package/tests/ai/providers.integration.test.ts +0 -227
  96. package/tests/ai/setup.ts +0 -308
  97. package/tests/ai/tool.test.ts +0 -800
  98. package/tests/bot.test.ts +0 -151
  99. package/tests/command.test.ts +0 -737
  100. package/tests/component-new.test.ts +0 -361
  101. package/tests/config.test.ts +0 -372
  102. package/tests/cron.test.ts +0 -82
  103. package/tests/dispatcher.test.ts +0 -293
  104. package/tests/errors.test.ts +0 -21
  105. package/tests/expression-evaluation.test.ts +0 -258
  106. package/tests/features-builtin.test.ts +0 -191
  107. package/tests/jsx-runtime.test.ts +0 -45
  108. package/tests/jsx.test.ts +0 -319
  109. package/tests/message-filter.test.ts +0 -566
  110. package/tests/message.test.ts +0 -402
  111. package/tests/notice.test.ts +0 -198
  112. package/tests/plugin.test.ts +0 -779
  113. package/tests/prompt.test.ts +0 -78
  114. package/tests/redos-protection.test.ts +0 -198
  115. package/tests/request.test.ts +0 -221
  116. package/tests/schema.test.ts +0 -248
  117. package/tests/skill-feature.test.ts +0 -179
  118. package/tests/test-utils.ts +0 -59
  119. package/tests/tool-feature.test.ts +0 -254
  120. package/tests/types.test.ts +0 -162
  121. package/tests/utils.test.ts +0 -135
  122. package/tsconfig.json +0 -24
@@ -1,302 +0,0 @@
1
- /**
2
- * @zhin.js/ai - Ollama Provider
3
- * 支持本地 Ollama 模型
4
- */
5
-
6
- import { Logger } from '@zhin.js/logger';
7
- import { BaseProvider } from './base.js';
8
- import type {
9
- ProviderConfig,
10
- ChatCompletionRequest,
11
- ChatCompletionResponse,
12
- ChatCompletionChunk,
13
- ChatMessage,
14
- ToolDefinition,
15
- } from '../types.js';
16
-
17
- const logger = new Logger(null, 'Ollama');
18
-
19
- export interface OllamaConfig extends ProviderConfig {
20
- host?: string;
21
- models?: string[];
22
- /** Ollama 上下文窗口大小(token 数),默认 32768。影响多轮对话和技能指令的保持能力 */
23
- num_ctx?: number;
24
- }
25
-
26
- /**
27
- * 转换消息格式
28
- */
29
- function toOllamaMessages(messages: ChatMessage[]): any[] {
30
- return messages.map(msg => {
31
- let content: string;
32
- const images: string[] = [];
33
-
34
- if (typeof msg.content === 'string') {
35
- content = msg.content;
36
- } else {
37
- content = msg.content
38
- .filter(p => p.type === 'text')
39
- .map(p => (p as { type: 'text'; text: string }).text)
40
- .join('');
41
-
42
- for (const part of msg.content) {
43
- if (part.type === 'image_url') {
44
- const url = part.image_url.url;
45
- if (url.startsWith('data:')) {
46
- // 提取 base64 数据
47
- const base64 = url.split(',')[1];
48
- if (base64) images.push(base64);
49
- }
50
- }
51
- }
52
- }
53
-
54
- const result: any = {
55
- role: msg.role === 'tool' ? 'user' : msg.role,
56
- content,
57
- };
58
-
59
- if (images.length > 0) {
60
- result.images = images;
61
- }
62
-
63
- return result;
64
- });
65
- }
66
-
67
- /**
68
- * 转换工具定义
69
- */
70
- function toOllamaTools(tools?: ToolDefinition[]): any[] | undefined {
71
- if (!tools?.length) return undefined;
72
-
73
- return tools.map(tool => ({
74
- type: 'function',
75
- function: {
76
- name: tool.function.name,
77
- description: tool.function.description,
78
- parameters: tool.function.parameters,
79
- },
80
- }));
81
- }
82
-
83
- export class OllamaProvider extends BaseProvider {
84
- name = 'ollama';
85
- models: string[];
86
- contextWindow: number;
87
- capabilities = { vision: true, streaming: true, toolCalling: true, thinking: true };
88
-
89
- private host: string;
90
- private numCtx: number;
91
-
92
- constructor(config: OllamaConfig = {}) {
93
- super(config);
94
- this.host = config.host || config.baseUrl || 'http://localhost:11434';
95
- this.numCtx = config.contextWindow ?? config.num_ctx ?? 32768;
96
- this.contextWindow = this.numCtx;
97
- this.models = config.models?.length ? config.models : [
98
- 'llama3.3',
99
- 'llama3.2',
100
- 'llama3.1',
101
- 'qwen2.5',
102
- 'qwen2.5-coder',
103
- 'deepseek-r1',
104
- 'deepseek-v3',
105
- 'mistral',
106
- 'mixtral',
107
- 'phi4',
108
- 'gemma2',
109
- ];
110
- logger.debug(`初始化完成, host: ${this.host}`);
111
- }
112
-
113
- async chat(request: ChatCompletionRequest): Promise<ChatCompletionResponse> {
114
- const messages = toOllamaMessages(request.messages);
115
-
116
- logger.debug(`请求 ${request.model}, 消息: ${messages.length}`);
117
-
118
- const ollamaRequest: any = {
119
- model: request.model,
120
- messages,
121
- stream: false,
122
- options: {
123
- num_ctx: this.numCtx,
124
- },
125
- };
126
-
127
- // think 参数:控制 qwen3 等模型的思考模式
128
- if (request.think !== undefined) {
129
- ollamaRequest.think = request.think;
130
- }
131
-
132
- if (request.temperature !== undefined) {
133
- ollamaRequest.options.temperature = request.temperature;
134
- }
135
-
136
- if (request.top_p !== undefined) {
137
- ollamaRequest.options.top_p = request.top_p;
138
- }
139
-
140
- if (request.max_tokens !== undefined) {
141
- ollamaRequest.options.num_predict = request.max_tokens;
142
- }
143
-
144
- const tools = toOllamaTools(request.tools);
145
- if (tools) {
146
- ollamaRequest.tools = tools;
147
- }
148
-
149
- const startTime = Date.now();
150
-
151
- const response = await this.fetch<any>(`${this.host}/api/chat`, {
152
- method: 'POST',
153
- json: ollamaRequest,
154
- });
155
-
156
- logger.debug(`响应耗时: ${Date.now() - startTime}ms, 工具调用: ${response.message?.tool_calls?.length || 0}`);
157
-
158
- // 转换响应格式
159
- const toolCalls = response.message?.tool_calls?.map((tc: any, i: number) => ({
160
- id: `call_${i}`,
161
- type: 'function' as const,
162
- function: {
163
- name: tc.function.name,
164
- arguments: JSON.stringify(tc.function.arguments),
165
- },
166
- }));
167
-
168
- return {
169
- id: `ollama-${Date.now()}`,
170
- object: 'chat.completion',
171
- created: Date.now(),
172
- model: response.model,
173
- choices: [{
174
- index: 0,
175
- message: {
176
- role: 'assistant',
177
- content: response.message?.content || '',
178
- tool_calls: toolCalls?.length ? toolCalls : undefined,
179
- },
180
- finish_reason: toolCalls?.length ? 'tool_calls' : 'stop',
181
- }],
182
- usage: {
183
- prompt_tokens: response.prompt_eval_count || 0,
184
- completion_tokens: response.eval_count || 0,
185
- total_tokens: (response.prompt_eval_count || 0) + (response.eval_count || 0),
186
- },
187
- };
188
- }
189
-
190
- async *chatStream(request: ChatCompletionRequest): AsyncIterable<ChatCompletionChunk> {
191
- const messages = toOllamaMessages(request.messages);
192
-
193
- const ollamaRequest: any = {
194
- model: request.model,
195
- messages,
196
- stream: true,
197
- options: {
198
- num_ctx: this.numCtx,
199
- },
200
- };
201
-
202
- if (request.think !== undefined) {
203
- ollamaRequest.think = request.think;
204
- }
205
-
206
- if (request.temperature !== undefined) {
207
- ollamaRequest.options.temperature = request.temperature;
208
- }
209
-
210
- if (request.top_p !== undefined) {
211
- ollamaRequest.options.top_p = request.top_p;
212
- }
213
-
214
- const response = await globalThis.fetch(`${this.host}/api/chat`, {
215
- method: 'POST',
216
- headers: { 'Content-Type': 'application/json' },
217
- body: JSON.stringify(ollamaRequest),
218
- });
219
-
220
- if (!response.ok) {
221
- const error = await response.text();
222
- throw new Error(`Ollama API Error (${response.status}): ${error}`);
223
- }
224
-
225
- if (!response.body) {
226
- throw new Error('Response body is empty');
227
- }
228
-
229
- const reader = response.body.getReader();
230
- const decoder = new TextDecoder();
231
- let buffer = '';
232
- const id = `ollama-${Date.now()}`;
233
-
234
- while (true) {
235
- const { done, value } = await reader.read();
236
- if (done) break;
237
-
238
- buffer += decoder.decode(value, { stream: true });
239
- const lines = buffer.split('\n');
240
- buffer = lines.pop() || '';
241
-
242
- for (const line of lines) {
243
- if (!line.trim()) continue;
244
-
245
- try {
246
- const data = JSON.parse(line);
247
-
248
- yield {
249
- id,
250
- object: 'chat.completion.chunk',
251
- created: Date.now(),
252
- model: data.model || request.model,
253
- choices: [{
254
- index: 0,
255
- delta: data.done
256
- ? {}
257
- : { content: data.message?.content || '' },
258
- finish_reason: data.done ? 'stop' : null,
259
- }],
260
- usage: data.done ? {
261
- prompt_tokens: data.prompt_eval_count || 0,
262
- completion_tokens: data.eval_count || 0,
263
- total_tokens: (data.prompt_eval_count || 0) + (data.eval_count || 0),
264
- } : undefined,
265
- };
266
- } catch {
267
- // 忽略解析错误
268
- }
269
- }
270
- }
271
- }
272
-
273
- async listModels(): Promise<string[]> {
274
- try {
275
- const response = await this.fetch<{ models: { name: string }[] }>(
276
- `${this.host}/api/tags`
277
- );
278
- return response.models.map(m => m.name);
279
- } catch {
280
- return this.models;
281
- }
282
- }
283
-
284
- /**
285
- * 拉取模型
286
- */
287
- async pullModel(model: string): Promise<void> {
288
- await this.fetch(`${this.host}/api/pull`, {
289
- method: 'POST',
290
- json: { name: model },
291
- });
292
- }
293
-
294
- async healthCheck(): Promise<boolean> {
295
- try {
296
- await globalThis.fetch(`${this.host}/api/tags`);
297
- return true;
298
- } catch {
299
- return false;
300
- }
301
- }
302
- }
@@ -1,174 +0,0 @@
1
- /**
2
- * @zhin.js/ai - OpenAI Provider
3
- * 支持 OpenAI API 及兼容接口(DeepSeek、Moonshot 等)
4
- */
5
-
6
- import { BaseProvider } from './base.js';
7
- import type {
8
- ProviderConfig,
9
- ChatCompletionRequest,
10
- ChatCompletionResponse,
11
- ChatCompletionChunk,
12
- } from '../types.js';
13
-
14
- export interface OpenAIConfig extends ProviderConfig {
15
- organization?: string;
16
- }
17
-
18
- export class OpenAIProvider extends BaseProvider {
19
- name = 'openai';
20
- models = [
21
- 'gpt-4o',
22
- 'gpt-4o-mini',
23
- 'gpt-4-turbo',
24
- 'gpt-4',
25
- 'gpt-3.5-turbo',
26
- 'o1',
27
- 'o1-mini',
28
- 'o1-preview',
29
- 'o3-mini',
30
- ];
31
- contextWindow: number;
32
- capabilities = { vision: true, streaming: true, toolCalling: true, thinking: false };
33
-
34
- private baseUrl: string;
35
-
36
- constructor(config: OpenAIConfig = {}) {
37
- super(config);
38
- this.contextWindow = config.contextWindow ?? 128000;
39
- this.baseUrl = config.baseUrl || 'https://api.openai.com/v1';
40
- if (config.models?.length) this.models = config.models;
41
-
42
- if (config.organization) {
43
- this.config.headers = {
44
- ...this.config.headers,
45
- 'OpenAI-Organization': config.organization,
46
- };
47
- }
48
- }
49
-
50
- async chat(request: ChatCompletionRequest): Promise<ChatCompletionResponse> {
51
- return this.fetch<ChatCompletionResponse>(
52
- `${this.baseUrl}/chat/completions`,
53
- {
54
- method: 'POST',
55
- json: {
56
- ...request,
57
- stream: false,
58
- },
59
- }
60
- );
61
- }
62
-
63
- async *chatStream(request: ChatCompletionRequest): AsyncIterable<ChatCompletionChunk> {
64
- const stream = this.fetchStream(
65
- `${this.baseUrl}/chat/completions`,
66
- {
67
- method: 'POST',
68
- json: {
69
- ...request,
70
- stream: true,
71
- stream_options: { include_usage: true },
72
- },
73
- }
74
- );
75
-
76
- for await (const data of stream) {
77
- try {
78
- const chunk = JSON.parse(data) as ChatCompletionChunk;
79
- yield chunk;
80
- } catch {
81
- // 忽略解析错误的行
82
- }
83
- }
84
- }
85
-
86
- async listModels(): Promise<string[]> {
87
- interface ModelList {
88
- data: { id: string }[];
89
- }
90
-
91
- try {
92
- const response = await this.fetch<ModelList>(`${this.baseUrl}/models`);
93
- return response.data
94
- .map((m) => m.id)
95
- .filter((id) => id.includes('gpt') || id.includes('o1') || id.includes('o3'));
96
- } catch {
97
- return this.models;
98
- }
99
- }
100
- }
101
-
102
- /**
103
- * DeepSeek Provider(基于 OpenAI 兼容接口)
104
- */
105
- export class DeepSeekProvider extends OpenAIProvider {
106
- name = 'deepseek';
107
- models = [
108
- 'deepseek-chat',
109
- 'deepseek-coder',
110
- 'deepseek-reasoner',
111
- ];
112
-
113
- constructor(config: ProviderConfig = {}) {
114
- super({
115
- ...config,
116
- baseUrl: config.baseUrl || 'https://api.deepseek.com/v1',
117
- });
118
- if (config.models?.length) this.models = config.models;
119
- }
120
-
121
- async listModels(): Promise<string[]> {
122
- return this.models;
123
- }
124
- }
125
-
126
- /**
127
- * Moonshot Provider(基于 OpenAI 兼容接口)
128
- */
129
- export class MoonshotProvider extends OpenAIProvider {
130
- name = 'moonshot';
131
- models = [
132
- 'moonshot-v1-8k',
133
- 'moonshot-v1-32k',
134
- 'moonshot-v1-128k',
135
- ];
136
-
137
- constructor(config: ProviderConfig = {}) {
138
- super({
139
- ...config,
140
- baseUrl: config.baseUrl || 'https://api.moonshot.cn/v1',
141
- });
142
- if (config.models?.length) this.models = config.models;
143
- }
144
-
145
- async listModels(): Promise<string[]> {
146
- return this.models;
147
- }
148
- }
149
-
150
- /**
151
- * 智谱 AI Provider(基于 OpenAI 兼容接口)
152
- */
153
- export class ZhipuProvider extends OpenAIProvider {
154
- name = 'zhipu';
155
- models = [
156
- 'glm-4-plus',
157
- 'glm-4',
158
- 'glm-4-air',
159
- 'glm-4-flash',
160
- 'glm-4v-plus',
161
- ];
162
-
163
- constructor(config: ProviderConfig = {}) {
164
- super({
165
- ...config,
166
- baseUrl: config.baseUrl || 'https://open.bigmodel.cn/api/paas/v4',
167
- });
168
- if (config.models?.length) this.models = config.models;
169
- }
170
-
171
- async listModels(): Promise<string[]> {
172
- return this.models;
173
- }
174
- }