@zhin.js/core 1.0.57 → 1.1.2

This diff represents the content of publicly available package versions that have been released to one of the supported registries. The information contained in this diff is provided for informational purposes only and reflects changes between package versions as they appear in their respective public registries.
Files changed (126) hide show
  1. package/lib/adapter.d.ts +1 -26
  2. package/lib/adapter.d.ts.map +1 -1
  3. package/lib/adapter.js +20 -117
  4. package/lib/adapter.js.map +1 -1
  5. package/lib/ai/index.d.ts +2 -0
  6. package/lib/ai/index.d.ts.map +1 -1
  7. package/lib/ai/index.js +1 -0
  8. package/lib/ai/index.js.map +1 -1
  9. package/lib/built/adapter-process.d.ts +0 -4
  10. package/lib/built/adapter-process.d.ts.map +1 -1
  11. package/lib/built/adapter-process.js +0 -95
  12. package/lib/built/adapter-process.js.map +1 -1
  13. package/lib/built/agent-preset.d.ts +2 -0
  14. package/lib/built/agent-preset.d.ts.map +1 -1
  15. package/lib/built/agent-preset.js +4 -0
  16. package/lib/built/agent-preset.js.map +1 -1
  17. package/lib/built/command.d.ts +4 -0
  18. package/lib/built/command.d.ts.map +1 -1
  19. package/lib/built/command.js +6 -0
  20. package/lib/built/command.js.map +1 -1
  21. package/lib/built/component.d.ts.map +1 -1
  22. package/lib/built/component.js +1 -0
  23. package/lib/built/component.js.map +1 -1
  24. package/lib/built/dispatcher.d.ts.map +1 -1
  25. package/lib/built/dispatcher.js +0 -13
  26. package/lib/built/dispatcher.js.map +1 -1
  27. package/lib/built/message-filter.d.ts +2 -0
  28. package/lib/built/message-filter.d.ts.map +1 -1
  29. package/lib/built/message-filter.js +5 -0
  30. package/lib/built/message-filter.js.map +1 -1
  31. package/lib/built/skill.d.ts +11 -0
  32. package/lib/built/skill.d.ts.map +1 -1
  33. package/lib/built/skill.js +14 -0
  34. package/lib/built/skill.js.map +1 -1
  35. package/lib/built/tool.d.ts +11 -44
  36. package/lib/built/tool.d.ts.map +1 -1
  37. package/lib/built/tool.js +14 -353
  38. package/lib/built/tool.js.map +1 -1
  39. package/lib/plugin.d.ts +1 -25
  40. package/lib/plugin.d.ts.map +1 -1
  41. package/lib/plugin.js +1 -77
  42. package/lib/plugin.js.map +1 -1
  43. package/lib/types.d.ts +0 -25
  44. package/lib/types.d.ts.map +1 -1
  45. package/package.json +10 -7
  46. package/CHANGELOG.md +0 -538
  47. package/REFACTORING_COMPLETE.md +0 -178
  48. package/REFACTORING_STATUS.md +0 -263
  49. package/src/adapter.ts +0 -275
  50. package/src/ai/index.ts +0 -52
  51. package/src/ai/providers/anthropic.ts +0 -379
  52. package/src/ai/providers/base.ts +0 -175
  53. package/src/ai/providers/index.ts +0 -13
  54. package/src/ai/providers/ollama.ts +0 -302
  55. package/src/ai/providers/openai.ts +0 -174
  56. package/src/ai/types.ts +0 -348
  57. package/src/bot.ts +0 -37
  58. package/src/built/adapter-process.ts +0 -177
  59. package/src/built/agent-preset.ts +0 -136
  60. package/src/built/ai-trigger.ts +0 -259
  61. package/src/built/command.ts +0 -108
  62. package/src/built/common-adapter-tools.ts +0 -242
  63. package/src/built/component.ts +0 -130
  64. package/src/built/config.ts +0 -335
  65. package/src/built/cron.ts +0 -156
  66. package/src/built/database.ts +0 -134
  67. package/src/built/dispatcher.ts +0 -496
  68. package/src/built/login-assist.ts +0 -131
  69. package/src/built/message-filter.ts +0 -390
  70. package/src/built/permission.ts +0 -151
  71. package/src/built/schema-feature.ts +0 -190
  72. package/src/built/skill.ts +0 -221
  73. package/src/built/tool.ts +0 -948
  74. package/src/command.ts +0 -87
  75. package/src/component.ts +0 -565
  76. package/src/cron.ts +0 -4
  77. package/src/errors.ts +0 -46
  78. package/src/feature.ts +0 -7
  79. package/src/index.ts +0 -53
  80. package/src/jsx-dev-runtime.ts +0 -2
  81. package/src/jsx-runtime.ts +0 -12
  82. package/src/jsx.ts +0 -135
  83. package/src/message.ts +0 -48
  84. package/src/models/system-log.ts +0 -20
  85. package/src/models/user.ts +0 -15
  86. package/src/notice.ts +0 -98
  87. package/src/plugin.ts +0 -896
  88. package/src/prompt.ts +0 -293
  89. package/src/request.ts +0 -95
  90. package/src/scheduler/index.ts +0 -19
  91. package/src/scheduler/scheduler.ts +0 -372
  92. package/src/scheduler/types.ts +0 -74
  93. package/src/tool-zod.ts +0 -115
  94. package/src/types-generator.ts +0 -78
  95. package/src/types.ts +0 -505
  96. package/src/utils.ts +0 -227
  97. package/tests/adapter.test.ts +0 -638
  98. package/tests/ai/ai-trigger.test.ts +0 -368
  99. package/tests/ai/providers.integration.test.ts +0 -227
  100. package/tests/ai/setup.ts +0 -308
  101. package/tests/ai/tool.test.ts +0 -800
  102. package/tests/bot.test.ts +0 -151
  103. package/tests/command.test.ts +0 -737
  104. package/tests/component-new.test.ts +0 -361
  105. package/tests/config.test.ts +0 -372
  106. package/tests/cron.test.ts +0 -82
  107. package/tests/dispatcher.test.ts +0 -293
  108. package/tests/errors.test.ts +0 -21
  109. package/tests/expression-evaluation.test.ts +0 -258
  110. package/tests/features-builtin.test.ts +0 -191
  111. package/tests/jsx-runtime.test.ts +0 -45
  112. package/tests/jsx.test.ts +0 -319
  113. package/tests/message-filter.test.ts +0 -566
  114. package/tests/message.test.ts +0 -402
  115. package/tests/notice.test.ts +0 -198
  116. package/tests/plugin.test.ts +0 -779
  117. package/tests/prompt.test.ts +0 -78
  118. package/tests/redos-protection.test.ts +0 -198
  119. package/tests/request.test.ts +0 -221
  120. package/tests/schema.test.ts +0 -248
  121. package/tests/skill-feature.test.ts +0 -179
  122. package/tests/test-utils.ts +0 -59
  123. package/tests/tool-feature.test.ts +0 -254
  124. package/tests/types.test.ts +0 -162
  125. package/tests/utils.test.ts +0 -135
  126. package/tsconfig.json +0 -24
@@ -1,302 +0,0 @@
1
- /**
2
- * @zhin.js/ai - Ollama Provider
3
- * 支持本地 Ollama 模型
4
- */
5
-
6
- import { Logger } from '@zhin.js/logger';
7
- import { BaseProvider } from './base.js';
8
- import type {
9
- ProviderConfig,
10
- ChatCompletionRequest,
11
- ChatCompletionResponse,
12
- ChatCompletionChunk,
13
- ChatMessage,
14
- ToolDefinition,
15
- } from '../types.js';
16
-
17
- const logger = new Logger(null, 'Ollama');
18
-
19
- export interface OllamaConfig extends ProviderConfig {
20
- host?: string;
21
- models?: string[];
22
- /** Ollama 上下文窗口大小(token 数),默认 32768。影响多轮对话和技能指令的保持能力 */
23
- num_ctx?: number;
24
- }
25
-
26
- /**
27
- * 转换消息格式
28
- */
29
- function toOllamaMessages(messages: ChatMessage[]): any[] {
30
- return messages.map(msg => {
31
- let content: string;
32
- const images: string[] = [];
33
-
34
- if (typeof msg.content === 'string') {
35
- content = msg.content;
36
- } else {
37
- content = msg.content
38
- .filter(p => p.type === 'text')
39
- .map(p => (p as { type: 'text'; text: string }).text)
40
- .join('');
41
-
42
- for (const part of msg.content) {
43
- if (part.type === 'image_url') {
44
- const url = part.image_url.url;
45
- if (url.startsWith('data:')) {
46
- // 提取 base64 数据
47
- const base64 = url.split(',')[1];
48
- if (base64) images.push(base64);
49
- }
50
- }
51
- }
52
- }
53
-
54
- const result: any = {
55
- role: msg.role === 'tool' ? 'user' : msg.role,
56
- content,
57
- };
58
-
59
- if (images.length > 0) {
60
- result.images = images;
61
- }
62
-
63
- return result;
64
- });
65
- }
66
-
67
- /**
68
- * 转换工具定义
69
- */
70
- function toOllamaTools(tools?: ToolDefinition[]): any[] | undefined {
71
- if (!tools?.length) return undefined;
72
-
73
- return tools.map(tool => ({
74
- type: 'function',
75
- function: {
76
- name: tool.function.name,
77
- description: tool.function.description,
78
- parameters: tool.function.parameters,
79
- },
80
- }));
81
- }
82
-
83
- export class OllamaProvider extends BaseProvider {
84
- name = 'ollama';
85
- models: string[];
86
- contextWindow: number;
87
- capabilities = { vision: true, streaming: true, toolCalling: true, thinking: true };
88
-
89
- private host: string;
90
- private numCtx: number;
91
-
92
- constructor(config: OllamaConfig = {}) {
93
- super(config);
94
- this.host = config.host || config.baseUrl || 'http://localhost:11434';
95
- this.numCtx = config.contextWindow ?? config.num_ctx ?? 32768;
96
- this.contextWindow = this.numCtx;
97
- this.models = config.models?.length ? config.models : [
98
- 'llama3.3',
99
- 'llama3.2',
100
- 'llama3.1',
101
- 'qwen2.5',
102
- 'qwen2.5-coder',
103
- 'deepseek-r1',
104
- 'deepseek-v3',
105
- 'mistral',
106
- 'mixtral',
107
- 'phi4',
108
- 'gemma2',
109
- ];
110
- logger.debug(`初始化完成, host: ${this.host}`);
111
- }
112
-
113
- async chat(request: ChatCompletionRequest): Promise<ChatCompletionResponse> {
114
- const messages = toOllamaMessages(request.messages);
115
-
116
- logger.debug(`请求 ${request.model}, 消息: ${messages.length}`);
117
-
118
- const ollamaRequest: any = {
119
- model: request.model,
120
- messages,
121
- stream: false,
122
- options: {
123
- num_ctx: this.numCtx,
124
- },
125
- };
126
-
127
- // think 参数:控制 qwen3 等模型的思考模式
128
- if (request.think !== undefined) {
129
- ollamaRequest.think = request.think;
130
- }
131
-
132
- if (request.temperature !== undefined) {
133
- ollamaRequest.options.temperature = request.temperature;
134
- }
135
-
136
- if (request.top_p !== undefined) {
137
- ollamaRequest.options.top_p = request.top_p;
138
- }
139
-
140
- if (request.max_tokens !== undefined) {
141
- ollamaRequest.options.num_predict = request.max_tokens;
142
- }
143
-
144
- const tools = toOllamaTools(request.tools);
145
- if (tools) {
146
- ollamaRequest.tools = tools;
147
- }
148
-
149
- const startTime = Date.now();
150
-
151
- const response = await this.fetch<any>(`${this.host}/api/chat`, {
152
- method: 'POST',
153
- json: ollamaRequest,
154
- });
155
-
156
- logger.debug(`响应耗时: ${Date.now() - startTime}ms, 工具调用: ${response.message?.tool_calls?.length || 0}`);
157
-
158
- // 转换响应格式
159
- const toolCalls = response.message?.tool_calls?.map((tc: any, i: number) => ({
160
- id: `call_${i}`,
161
- type: 'function' as const,
162
- function: {
163
- name: tc.function.name,
164
- arguments: JSON.stringify(tc.function.arguments),
165
- },
166
- }));
167
-
168
- return {
169
- id: `ollama-${Date.now()}`,
170
- object: 'chat.completion',
171
- created: Date.now(),
172
- model: response.model,
173
- choices: [{
174
- index: 0,
175
- message: {
176
- role: 'assistant',
177
- content: response.message?.content || '',
178
- tool_calls: toolCalls?.length ? toolCalls : undefined,
179
- },
180
- finish_reason: toolCalls?.length ? 'tool_calls' : 'stop',
181
- }],
182
- usage: {
183
- prompt_tokens: response.prompt_eval_count || 0,
184
- completion_tokens: response.eval_count || 0,
185
- total_tokens: (response.prompt_eval_count || 0) + (response.eval_count || 0),
186
- },
187
- };
188
- }
189
-
190
- async *chatStream(request: ChatCompletionRequest): AsyncIterable<ChatCompletionChunk> {
191
- const messages = toOllamaMessages(request.messages);
192
-
193
- const ollamaRequest: any = {
194
- model: request.model,
195
- messages,
196
- stream: true,
197
- options: {
198
- num_ctx: this.numCtx,
199
- },
200
- };
201
-
202
- if (request.think !== undefined) {
203
- ollamaRequest.think = request.think;
204
- }
205
-
206
- if (request.temperature !== undefined) {
207
- ollamaRequest.options.temperature = request.temperature;
208
- }
209
-
210
- if (request.top_p !== undefined) {
211
- ollamaRequest.options.top_p = request.top_p;
212
- }
213
-
214
- const response = await globalThis.fetch(`${this.host}/api/chat`, {
215
- method: 'POST',
216
- headers: { 'Content-Type': 'application/json' },
217
- body: JSON.stringify(ollamaRequest),
218
- });
219
-
220
- if (!response.ok) {
221
- const error = await response.text();
222
- throw new Error(`Ollama API Error (${response.status}): ${error}`);
223
- }
224
-
225
- if (!response.body) {
226
- throw new Error('Response body is empty');
227
- }
228
-
229
- const reader = response.body.getReader();
230
- const decoder = new TextDecoder();
231
- let buffer = '';
232
- const id = `ollama-${Date.now()}`;
233
-
234
- while (true) {
235
- const { done, value } = await reader.read();
236
- if (done) break;
237
-
238
- buffer += decoder.decode(value, { stream: true });
239
- const lines = buffer.split('\n');
240
- buffer = lines.pop() || '';
241
-
242
- for (const line of lines) {
243
- if (!line.trim()) continue;
244
-
245
- try {
246
- const data = JSON.parse(line);
247
-
248
- yield {
249
- id,
250
- object: 'chat.completion.chunk',
251
- created: Date.now(),
252
- model: data.model || request.model,
253
- choices: [{
254
- index: 0,
255
- delta: data.done
256
- ? {}
257
- : { content: data.message?.content || '' },
258
- finish_reason: data.done ? 'stop' : null,
259
- }],
260
- usage: data.done ? {
261
- prompt_tokens: data.prompt_eval_count || 0,
262
- completion_tokens: data.eval_count || 0,
263
- total_tokens: (data.prompt_eval_count || 0) + (data.eval_count || 0),
264
- } : undefined,
265
- };
266
- } catch {
267
- // 忽略解析错误
268
- }
269
- }
270
- }
271
- }
272
-
273
- async listModels(): Promise<string[]> {
274
- try {
275
- const response = await this.fetch<{ models: { name: string }[] }>(
276
- `${this.host}/api/tags`
277
- );
278
- return response.models.map(m => m.name);
279
- } catch {
280
- return this.models;
281
- }
282
- }
283
-
284
- /**
285
- * 拉取模型
286
- */
287
- async pullModel(model: string): Promise<void> {
288
- await this.fetch(`${this.host}/api/pull`, {
289
- method: 'POST',
290
- json: { name: model },
291
- });
292
- }
293
-
294
- async healthCheck(): Promise<boolean> {
295
- try {
296
- await globalThis.fetch(`${this.host}/api/tags`);
297
- return true;
298
- } catch {
299
- return false;
300
- }
301
- }
302
- }
@@ -1,174 +0,0 @@
1
- /**
2
- * @zhin.js/ai - OpenAI Provider
3
- * 支持 OpenAI API 及兼容接口(DeepSeek、Moonshot 等)
4
- */
5
-
6
- import { BaseProvider } from './base.js';
7
- import type {
8
- ProviderConfig,
9
- ChatCompletionRequest,
10
- ChatCompletionResponse,
11
- ChatCompletionChunk,
12
- } from '../types.js';
13
-
14
- export interface OpenAIConfig extends ProviderConfig {
15
- organization?: string;
16
- }
17
-
18
- export class OpenAIProvider extends BaseProvider {
19
- name = 'openai';
20
- models = [
21
- 'gpt-4o',
22
- 'gpt-4o-mini',
23
- 'gpt-4-turbo',
24
- 'gpt-4',
25
- 'gpt-3.5-turbo',
26
- 'o1',
27
- 'o1-mini',
28
- 'o1-preview',
29
- 'o3-mini',
30
- ];
31
- contextWindow: number;
32
- capabilities = { vision: true, streaming: true, toolCalling: true, thinking: false };
33
-
34
- private baseUrl: string;
35
-
36
- constructor(config: OpenAIConfig = {}) {
37
- super(config);
38
- this.contextWindow = config.contextWindow ?? 128000;
39
- this.baseUrl = config.baseUrl || 'https://api.openai.com/v1';
40
- if (config.models?.length) this.models = config.models;
41
-
42
- if (config.organization) {
43
- this.config.headers = {
44
- ...this.config.headers,
45
- 'OpenAI-Organization': config.organization,
46
- };
47
- }
48
- }
49
-
50
- async chat(request: ChatCompletionRequest): Promise<ChatCompletionResponse> {
51
- return this.fetch<ChatCompletionResponse>(
52
- `${this.baseUrl}/chat/completions`,
53
- {
54
- method: 'POST',
55
- json: {
56
- ...request,
57
- stream: false,
58
- },
59
- }
60
- );
61
- }
62
-
63
- async *chatStream(request: ChatCompletionRequest): AsyncIterable<ChatCompletionChunk> {
64
- const stream = this.fetchStream(
65
- `${this.baseUrl}/chat/completions`,
66
- {
67
- method: 'POST',
68
- json: {
69
- ...request,
70
- stream: true,
71
- stream_options: { include_usage: true },
72
- },
73
- }
74
- );
75
-
76
- for await (const data of stream) {
77
- try {
78
- const chunk = JSON.parse(data) as ChatCompletionChunk;
79
- yield chunk;
80
- } catch {
81
- // 忽略解析错误的行
82
- }
83
- }
84
- }
85
-
86
- async listModels(): Promise<string[]> {
87
- interface ModelList {
88
- data: { id: string }[];
89
- }
90
-
91
- try {
92
- const response = await this.fetch<ModelList>(`${this.baseUrl}/models`);
93
- return response.data
94
- .map((m) => m.id)
95
- .filter((id) => id.includes('gpt') || id.includes('o1') || id.includes('o3'));
96
- } catch {
97
- return this.models;
98
- }
99
- }
100
- }
101
-
102
- /**
103
- * DeepSeek Provider(基于 OpenAI 兼容接口)
104
- */
105
- export class DeepSeekProvider extends OpenAIProvider {
106
- name = 'deepseek';
107
- models = [
108
- 'deepseek-chat',
109
- 'deepseek-coder',
110
- 'deepseek-reasoner',
111
- ];
112
-
113
- constructor(config: ProviderConfig = {}) {
114
- super({
115
- ...config,
116
- baseUrl: config.baseUrl || 'https://api.deepseek.com/v1',
117
- });
118
- if (config.models?.length) this.models = config.models;
119
- }
120
-
121
- async listModels(): Promise<string[]> {
122
- return this.models;
123
- }
124
- }
125
-
126
- /**
127
- * Moonshot Provider(基于 OpenAI 兼容接口)
128
- */
129
- export class MoonshotProvider extends OpenAIProvider {
130
- name = 'moonshot';
131
- models = [
132
- 'moonshot-v1-8k',
133
- 'moonshot-v1-32k',
134
- 'moonshot-v1-128k',
135
- ];
136
-
137
- constructor(config: ProviderConfig = {}) {
138
- super({
139
- ...config,
140
- baseUrl: config.baseUrl || 'https://api.moonshot.cn/v1',
141
- });
142
- if (config.models?.length) this.models = config.models;
143
- }
144
-
145
- async listModels(): Promise<string[]> {
146
- return this.models;
147
- }
148
- }
149
-
150
- /**
151
- * 智谱 AI Provider(基于 OpenAI 兼容接口)
152
- */
153
- export class ZhipuProvider extends OpenAIProvider {
154
- name = 'zhipu';
155
- models = [
156
- 'glm-4-plus',
157
- 'glm-4',
158
- 'glm-4-air',
159
- 'glm-4-flash',
160
- 'glm-4v-plus',
161
- ];
162
-
163
- constructor(config: ProviderConfig = {}) {
164
- super({
165
- ...config,
166
- baseUrl: config.baseUrl || 'https://open.bigmodel.cn/api/paas/v4',
167
- });
168
- if (config.models?.length) this.models = config.models;
169
- }
170
-
171
- async listModels(): Promise<string[]> {
172
- return this.models;
173
- }
174
- }