@dangao/bun-server 1.12.1 → 2.0.0

This diff represents the content of publicly available package versions that have been released to one of the supported registries. The information contained in this diff is provided for informational purposes only and reflects changes between package versions as they appear in their respective public registries.
Files changed (214) hide show
  1. package/README.md +32 -0
  2. package/dist/ai/ai-module.d.ts +24 -0
  3. package/dist/ai/ai-module.d.ts.map +1 -0
  4. package/dist/ai/decorators.d.ts +25 -0
  5. package/dist/ai/decorators.d.ts.map +1 -0
  6. package/dist/ai/errors.d.ts +39 -0
  7. package/dist/ai/errors.d.ts.map +1 -0
  8. package/dist/ai/index.d.ts +12 -0
  9. package/dist/ai/index.d.ts.map +1 -0
  10. package/dist/ai/providers/anthropic-provider.d.ts +23 -0
  11. package/dist/ai/providers/anthropic-provider.d.ts.map +1 -0
  12. package/dist/ai/providers/google-provider.d.ts +20 -0
  13. package/dist/ai/providers/google-provider.d.ts.map +1 -0
  14. package/dist/ai/providers/ollama-provider.d.ts +17 -0
  15. package/dist/ai/providers/ollama-provider.d.ts.map +1 -0
  16. package/dist/ai/providers/openai-provider.d.ts +28 -0
  17. package/dist/ai/providers/openai-provider.d.ts.map +1 -0
  18. package/dist/ai/service.d.ts +40 -0
  19. package/dist/ai/service.d.ts.map +1 -0
  20. package/dist/ai/tools/tool-executor.d.ts +15 -0
  21. package/dist/ai/tools/tool-executor.d.ts.map +1 -0
  22. package/dist/ai/tools/tool-registry.d.ts +39 -0
  23. package/dist/ai/tools/tool-registry.d.ts.map +1 -0
  24. package/dist/ai/types.d.ts +134 -0
  25. package/dist/ai/types.d.ts.map +1 -0
  26. package/dist/ai-guard/ai-guard-module.d.ts +18 -0
  27. package/dist/ai-guard/ai-guard-module.d.ts.map +1 -0
  28. package/dist/ai-guard/decorators.d.ts +16 -0
  29. package/dist/ai-guard/decorators.d.ts.map +1 -0
  30. package/dist/ai-guard/detectors/content-moderator.d.ts +26 -0
  31. package/dist/ai-guard/detectors/content-moderator.d.ts.map +1 -0
  32. package/dist/ai-guard/detectors/injection-detector.d.ts +13 -0
  33. package/dist/ai-guard/detectors/injection-detector.d.ts.map +1 -0
  34. package/dist/ai-guard/detectors/pii-detector.d.ts +11 -0
  35. package/dist/ai-guard/detectors/pii-detector.d.ts.map +1 -0
  36. package/dist/ai-guard/index.d.ts +8 -0
  37. package/dist/ai-guard/index.d.ts.map +1 -0
  38. package/dist/ai-guard/service.d.ts +21 -0
  39. package/dist/ai-guard/service.d.ts.map +1 -0
  40. package/dist/ai-guard/types.d.ts +59 -0
  41. package/dist/ai-guard/types.d.ts.map +1 -0
  42. package/dist/conversation/conversation-module.d.ts +25 -0
  43. package/dist/conversation/conversation-module.d.ts.map +1 -0
  44. package/dist/conversation/decorators.d.ts +28 -0
  45. package/dist/conversation/decorators.d.ts.map +1 -0
  46. package/dist/conversation/index.d.ts +8 -0
  47. package/dist/conversation/index.d.ts.map +1 -0
  48. package/dist/conversation/service.d.ts +43 -0
  49. package/dist/conversation/service.d.ts.map +1 -0
  50. package/dist/conversation/stores/database-store.d.ts +46 -0
  51. package/dist/conversation/stores/database-store.d.ts.map +1 -0
  52. package/dist/conversation/stores/memory-store.d.ts +17 -0
  53. package/dist/conversation/stores/memory-store.d.ts.map +1 -0
  54. package/dist/conversation/stores/redis-store.d.ts +39 -0
  55. package/dist/conversation/stores/redis-store.d.ts.map +1 -0
  56. package/dist/conversation/types.d.ts +64 -0
  57. package/dist/conversation/types.d.ts.map +1 -0
  58. package/dist/embedding/embedding-module.d.ts +20 -0
  59. package/dist/embedding/embedding-module.d.ts.map +1 -0
  60. package/dist/embedding/index.d.ts +6 -0
  61. package/dist/embedding/index.d.ts.map +1 -0
  62. package/dist/embedding/providers/ollama-embedding-provider.d.ts +18 -0
  63. package/dist/embedding/providers/ollama-embedding-provider.d.ts.map +1 -0
  64. package/dist/embedding/providers/openai-embedding-provider.d.ts +18 -0
  65. package/dist/embedding/providers/openai-embedding-provider.d.ts.map +1 -0
  66. package/dist/embedding/service.d.ts +27 -0
  67. package/dist/embedding/service.d.ts.map +1 -0
  68. package/dist/embedding/types.d.ts +25 -0
  69. package/dist/embedding/types.d.ts.map +1 -0
  70. package/dist/index.d.ts +8 -0
  71. package/dist/index.d.ts.map +1 -1
  72. package/dist/index.js +2638 -1
  73. package/dist/mcp/decorators.d.ts +42 -0
  74. package/dist/mcp/decorators.d.ts.map +1 -0
  75. package/dist/mcp/index.d.ts +6 -0
  76. package/dist/mcp/index.d.ts.map +1 -0
  77. package/dist/mcp/mcp-module.d.ts +22 -0
  78. package/dist/mcp/mcp-module.d.ts.map +1 -0
  79. package/dist/mcp/registry.d.ts +23 -0
  80. package/dist/mcp/registry.d.ts.map +1 -0
  81. package/dist/mcp/server.d.ts +29 -0
  82. package/dist/mcp/server.d.ts.map +1 -0
  83. package/dist/mcp/types.d.ts +60 -0
  84. package/dist/mcp/types.d.ts.map +1 -0
  85. package/dist/prompt/index.d.ts +6 -0
  86. package/dist/prompt/index.d.ts.map +1 -0
  87. package/dist/prompt/prompt-module.d.ts +23 -0
  88. package/dist/prompt/prompt-module.d.ts.map +1 -0
  89. package/dist/prompt/service.d.ts +47 -0
  90. package/dist/prompt/service.d.ts.map +1 -0
  91. package/dist/prompt/stores/file-store.d.ts +36 -0
  92. package/dist/prompt/stores/file-store.d.ts.map +1 -0
  93. package/dist/prompt/stores/memory-store.d.ts +17 -0
  94. package/dist/prompt/stores/memory-store.d.ts.map +1 -0
  95. package/dist/prompt/types.d.ts +68 -0
  96. package/dist/prompt/types.d.ts.map +1 -0
  97. package/dist/rag/chunkers/markdown-chunker.d.ts +11 -0
  98. package/dist/rag/chunkers/markdown-chunker.d.ts.map +1 -0
  99. package/dist/rag/chunkers/text-chunker.d.ts +11 -0
  100. package/dist/rag/chunkers/text-chunker.d.ts.map +1 -0
  101. package/dist/rag/decorators.d.ts +24 -0
  102. package/dist/rag/decorators.d.ts.map +1 -0
  103. package/dist/rag/index.d.ts +7 -0
  104. package/dist/rag/index.d.ts.map +1 -0
  105. package/dist/rag/rag-module.d.ts +23 -0
  106. package/dist/rag/rag-module.d.ts.map +1 -0
  107. package/dist/rag/service.d.ts +36 -0
  108. package/dist/rag/service.d.ts.map +1 -0
  109. package/dist/rag/types.d.ts +56 -0
  110. package/dist/rag/types.d.ts.map +1 -0
  111. package/dist/vector-store/index.d.ts +6 -0
  112. package/dist/vector-store/index.d.ts.map +1 -0
  113. package/dist/vector-store/stores/memory-store.d.ts +17 -0
  114. package/dist/vector-store/stores/memory-store.d.ts.map +1 -0
  115. package/dist/vector-store/stores/pinecone-store.d.ts +27 -0
  116. package/dist/vector-store/stores/pinecone-store.d.ts.map +1 -0
  117. package/dist/vector-store/stores/qdrant-store.d.ts +29 -0
  118. package/dist/vector-store/stores/qdrant-store.d.ts.map +1 -0
  119. package/dist/vector-store/types.d.ts +60 -0
  120. package/dist/vector-store/types.d.ts.map +1 -0
  121. package/dist/vector-store/vector-store-module.d.ts +20 -0
  122. package/dist/vector-store/vector-store-module.d.ts.map +1 -0
  123. package/docs/ai.md +500 -0
  124. package/docs/best-practices.md +83 -8
  125. package/docs/database.md +23 -0
  126. package/docs/guide.md +90 -27
  127. package/docs/migration.md +81 -7
  128. package/docs/security.md +23 -0
  129. package/docs/zh/ai.md +441 -0
  130. package/docs/zh/best-practices.md +43 -0
  131. package/docs/zh/database.md +23 -0
  132. package/docs/zh/guide.md +40 -1
  133. package/docs/zh/migration.md +39 -0
  134. package/docs/zh/security.md +23 -0
  135. package/package.json +2 -2
  136. package/src/ai/ai-module.ts +62 -0
  137. package/src/ai/decorators.ts +30 -0
  138. package/src/ai/errors.ts +71 -0
  139. package/src/ai/index.ts +11 -0
  140. package/src/ai/providers/anthropic-provider.ts +190 -0
  141. package/src/ai/providers/google-provider.ts +179 -0
  142. package/src/ai/providers/ollama-provider.ts +126 -0
  143. package/src/ai/providers/openai-provider.ts +242 -0
  144. package/src/ai/service.ts +155 -0
  145. package/src/ai/tools/tool-executor.ts +38 -0
  146. package/src/ai/tools/tool-registry.ts +91 -0
  147. package/src/ai/types.ts +145 -0
  148. package/src/ai-guard/ai-guard-module.ts +50 -0
  149. package/src/ai-guard/decorators.ts +21 -0
  150. package/src/ai-guard/detectors/content-moderator.ts +80 -0
  151. package/src/ai-guard/detectors/injection-detector.ts +48 -0
  152. package/src/ai-guard/detectors/pii-detector.ts +64 -0
  153. package/src/ai-guard/index.ts +7 -0
  154. package/src/ai-guard/service.ts +100 -0
  155. package/src/ai-guard/types.ts +61 -0
  156. package/src/conversation/conversation-module.ts +63 -0
  157. package/src/conversation/decorators.ts +47 -0
  158. package/src/conversation/index.ts +7 -0
  159. package/src/conversation/service.ts +133 -0
  160. package/src/conversation/stores/database-store.ts +125 -0
  161. package/src/conversation/stores/memory-store.ts +57 -0
  162. package/src/conversation/stores/redis-store.ts +101 -0
  163. package/src/conversation/types.ts +68 -0
  164. package/src/embedding/embedding-module.ts +52 -0
  165. package/src/embedding/index.ts +5 -0
  166. package/src/embedding/providers/ollama-embedding-provider.ts +39 -0
  167. package/src/embedding/providers/openai-embedding-provider.ts +47 -0
  168. package/src/embedding/service.ts +55 -0
  169. package/src/embedding/types.ts +27 -0
  170. package/src/index.ts +10 -0
  171. package/src/mcp/decorators.ts +60 -0
  172. package/src/mcp/index.ts +5 -0
  173. package/src/mcp/mcp-module.ts +58 -0
  174. package/src/mcp/registry.ts +72 -0
  175. package/src/mcp/server.ts +164 -0
  176. package/src/mcp/types.ts +63 -0
  177. package/src/prompt/index.ts +5 -0
  178. package/src/prompt/prompt-module.ts +61 -0
  179. package/src/prompt/service.ts +93 -0
  180. package/src/prompt/stores/file-store.ts +135 -0
  181. package/src/prompt/stores/memory-store.ts +82 -0
  182. package/src/prompt/types.ts +84 -0
  183. package/src/rag/chunkers/markdown-chunker.ts +40 -0
  184. package/src/rag/chunkers/text-chunker.ts +30 -0
  185. package/src/rag/decorators.ts +26 -0
  186. package/src/rag/index.ts +6 -0
  187. package/src/rag/rag-module.ts +78 -0
  188. package/src/rag/service.ts +134 -0
  189. package/src/rag/types.ts +47 -0
  190. package/src/vector-store/index.ts +5 -0
  191. package/src/vector-store/stores/memory-store.ts +69 -0
  192. package/src/vector-store/stores/pinecone-store.ts +123 -0
  193. package/src/vector-store/stores/qdrant-store.ts +147 -0
  194. package/src/vector-store/types.ts +77 -0
  195. package/src/vector-store/vector-store-module.ts +50 -0
  196. package/tests/ai/ai-module.test.ts +46 -0
  197. package/tests/ai/ai-service.test.ts +91 -0
  198. package/tests/ai/tool-registry.test.ts +57 -0
  199. package/tests/ai-guard/ai-guard-module.test.ts +23 -0
  200. package/tests/ai-guard/content-moderator.test.ts +65 -0
  201. package/tests/ai-guard/pii-detector.test.ts +41 -0
  202. package/tests/conversation/conversation-module.test.ts +26 -0
  203. package/tests/conversation/conversation-service.test.ts +64 -0
  204. package/tests/conversation/memory-store.test.ts +68 -0
  205. package/tests/embedding/embedding-service.test.ts +55 -0
  206. package/tests/mcp/mcp-server.test.ts +85 -0
  207. package/tests/prompt/prompt-module.test.ts +30 -0
  208. package/tests/prompt/prompt-service.test.ts +74 -0
  209. package/tests/rag/chunkers.test.ts +58 -0
  210. package/tests/rag/rag-service.test.ts +66 -0
  211. package/tests/vector-store/memory-vector-store.test.ts +84 -0
  212. package/tests/interceptor/perf/interceptor-performance.test.ts +0 -340
  213. package/tests/perf/optimization.test.ts +0 -182
  214. package/tests/perf/regression.test.ts +0 -120
@@ -0,0 +1,242 @@
1
+ import type { LlmProvider, AiRequest, AiResponse, AiMessage } from '../types';
2
+ import { AiProviderError, AiRateLimitError, AiContextLengthError, AiTimeoutError } from '../errors';
3
+
4
+ export interface OpenAIProviderConfig {
5
+ apiKey: string;
6
+ /** Default: https://api.openai.com/v1 */
7
+ baseUrl?: string;
8
+ /** Default: gpt-4o */
9
+ defaultModel?: string;
10
+ /** Pricing per 1M tokens (input/output) for cost estimation */
11
+ pricing?: Record<string, { input: number; output: number }>;
12
+ }
13
+
14
+ const DEFAULT_PRICING: Record<string, { input: number; output: number }> = {
15
+ 'gpt-4o': { input: 2.5, output: 10 },
16
+ 'gpt-4o-mini': { input: 0.15, output: 0.6 },
17
+ 'gpt-4-turbo': { input: 10, output: 30 },
18
+ 'gpt-3.5-turbo': { input: 0.5, output: 1.5 },
19
+ };
20
+
21
+ interface OpenAiToolCallFunction {
22
+ name?: string;
23
+ arguments?: string;
24
+ }
25
+
26
+ interface OpenAiToolCall {
27
+ id?: string;
28
+ function?: OpenAiToolCallFunction;
29
+ }
30
+
31
+ interface OpenAiMessage {
32
+ content?: string | null;
33
+ tool_calls?: OpenAiToolCall[];
34
+ }
35
+
36
+ interface OpenAiChoice {
37
+ message?: OpenAiMessage;
38
+ finish_reason?: string | null;
39
+ }
40
+
41
+ interface OpenAiUsage {
42
+ prompt_tokens?: number;
43
+ completion_tokens?: number;
44
+ total_tokens?: number;
45
+ }
46
+
47
+ interface OpenAiChatCompletionResponse {
48
+ choices?: OpenAiChoice[];
49
+ usage?: OpenAiUsage;
50
+ }
51
+
52
+ export class OpenAIProvider implements LlmProvider {
53
+ public readonly name = 'openai';
54
+ private readonly apiKey: string;
55
+ private readonly baseUrl: string;
56
+ private readonly defaultModel: string;
57
+ private readonly pricing: Record<string, { input: number; output: number }>;
58
+
59
+ public constructor(config: OpenAIProviderConfig) {
60
+ this.apiKey = config.apiKey;
61
+ this.baseUrl = (config.baseUrl ?? 'https://api.openai.com/v1').replace(/\/$/, '');
62
+ this.defaultModel = config.defaultModel ?? 'gpt-4o';
63
+ this.pricing = { ...DEFAULT_PRICING, ...(config.pricing ?? {}) };
64
+ }
65
+
66
+ public async complete(request: AiRequest): Promise<AiResponse> {
67
+ const model = request.model ?? this.defaultModel;
68
+ const body: Record<string, unknown> = {
69
+ model,
70
+ messages: request.messages,
71
+ temperature: request.temperature,
72
+ max_tokens: request.maxTokens,
73
+ };
74
+
75
+ if (request.tools && request.tools.length > 0) {
76
+ body['tools'] = request.tools.map((t) => ({
77
+ type: 'function',
78
+ function: { name: t.name, description: t.description, parameters: t.parameters },
79
+ }));
80
+ }
81
+
82
+ const response = await this.post('/chat/completions', body);
83
+ const choice = response.choices?.[0];
84
+ const usage = response.usage ?? { prompt_tokens: 0, completion_tokens: 0, total_tokens: 0 };
85
+ const message = choice?.message;
86
+
87
+ return {
88
+ content: message?.content ?? '',
89
+ toolCalls: message?.tool_calls?.map((tc) => ({
90
+ id: tc.id ?? '',
91
+ name: tc.function?.name ?? '',
92
+ arguments: this.safeParseToolArguments(tc.function?.arguments),
93
+ })),
94
+ model,
95
+ provider: this.name,
96
+ usage: {
97
+ promptTokens: usage.prompt_tokens ?? 0,
98
+ completionTokens: usage.completion_tokens ?? 0,
99
+ totalTokens: usage.total_tokens ?? 0,
100
+ estimatedCostUsd: this.estimateCost(
101
+ model,
102
+ usage.prompt_tokens ?? 0,
103
+ usage.completion_tokens ?? 0,
104
+ ),
105
+ },
106
+ finishReason: choice?.finish_reason === 'tool_calls' ? 'tool_calls' : 'stop',
107
+ };
108
+ }
109
+
110
+ public stream(request: AiRequest): ReadableStream<Uint8Array> {
111
+ const model = request.model ?? this.defaultModel;
112
+ const body: Record<string, unknown> = {
113
+ model,
114
+ messages: request.messages,
115
+ temperature: request.temperature,
116
+ max_tokens: request.maxTokens,
117
+ stream: true,
118
+ };
119
+
120
+ if (request.tools && request.tools.length > 0) {
121
+ body['tools'] = request.tools.map((t) => ({
122
+ type: 'function',
123
+ function: { name: t.name, description: t.description, parameters: t.parameters },
124
+ }));
125
+ }
126
+
127
+ const encoder = new TextEncoder();
128
+ const apiKey = this.apiKey;
129
+ const baseUrl = this.baseUrl;
130
+
131
+ return new ReadableStream<Uint8Array>({
132
+ async start(controller) {
133
+ try {
134
+ const res = await fetch(`${baseUrl}/chat/completions`, {
135
+ method: 'POST',
136
+ headers: {
137
+ 'Content-Type': 'application/json',
138
+ 'Authorization': `Bearer ${apiKey}`,
139
+ },
140
+ body: JSON.stringify(body),
141
+ });
142
+
143
+ if (!res.ok || !res.body) {
144
+ const err = await res.text();
145
+ controller.enqueue(encoder.encode(`data: ${JSON.stringify({ error: err, done: true })}\n\n`));
146
+ controller.close();
147
+ return;
148
+ }
149
+
150
+ const reader = res.body.getReader();
151
+ const dec = new TextDecoder();
152
+ let buf = '';
153
+
154
+ while (true) {
155
+ const { done, value } = await reader.read();
156
+ if (done) break;
157
+ buf += dec.decode(value, { stream: true });
158
+
159
+ const lines = buf.split('\n');
160
+ buf = lines.pop() ?? '';
161
+
162
+ for (const line of lines) {
163
+ if (line.startsWith('data: ')) {
164
+ const data = line.slice(6).trim();
165
+ if (data === '[DONE]') {
166
+ controller.enqueue(encoder.encode(`data: ${JSON.stringify({ done: true })}\n\n`));
167
+ continue;
168
+ }
169
+ try {
170
+ const parsed = JSON.parse(data);
171
+ const delta = parsed.choices?.[0]?.delta;
172
+ const chunk = { content: delta?.content ?? '', done: false };
173
+ controller.enqueue(encoder.encode(`data: ${JSON.stringify(chunk)}\n\n`));
174
+ } catch {
175
+ // skip malformed lines
176
+ }
177
+ }
178
+ }
179
+ }
180
+ } catch (err) {
181
+ controller.enqueue(encoder.encode(`data: ${JSON.stringify({ error: String(err), done: true })}\n\n`));
182
+ } finally {
183
+ controller.close();
184
+ }
185
+ },
186
+ });
187
+ }
188
+
189
+ public countTokens(messages: AiMessage[]): number {
190
+ // Rough approximation: 1 token ≈ 4 chars
191
+ return Math.ceil(messages.reduce((sum, m) => sum + m.content.length, 0) / 4);
192
+ }
193
+
194
+ private async post(path: string, body: Record<string, unknown>): Promise<OpenAiChatCompletionResponse> {
195
+ const res = await fetch(`${this.baseUrl}${path}`, {
196
+ method: 'POST',
197
+ headers: {
198
+ 'Content-Type': 'application/json',
199
+ 'Authorization': `Bearer ${this.apiKey}`,
200
+ },
201
+ body: JSON.stringify(body),
202
+ });
203
+
204
+ if (res.status === 429) {
205
+ const retryAfter = res.headers.get('retry-after');
206
+ throw new AiRateLimitError(this.name, retryAfter ? Number(retryAfter) * 1000 : undefined);
207
+ }
208
+ if (res.status === 413) {
209
+ throw new AiContextLengthError(this.name);
210
+ }
211
+ if (res.status === 408 || res.status === 504) {
212
+ throw new AiTimeoutError(this.name, 30000);
213
+ }
214
+ if (!res.ok) {
215
+ const text = await res.text();
216
+ throw new AiProviderError(text, this.name, res.status);
217
+ }
218
+
219
+ return await res.json() as OpenAiChatCompletionResponse;
220
+ }
221
+
222
+ private safeParseToolArguments(argumentsJson?: string): Record<string, unknown> {
223
+ if (!argumentsJson) {
224
+ return {};
225
+ }
226
+ try {
227
+ const parsed = JSON.parse(argumentsJson);
228
+ if (typeof parsed === 'object' && parsed !== null) {
229
+ return parsed as Record<string, unknown>;
230
+ }
231
+ return {};
232
+ } catch {
233
+ return {};
234
+ }
235
+ }
236
+
237
+ private estimateCost(model: string, promptTokens: number, completionTokens: number): number {
238
+ const pricing = this.pricing[model];
239
+ if (!pricing) return 0;
240
+ return (promptTokens * pricing.input + completionTokens * pricing.output) / 1_000_000;
241
+ }
242
+ }
@@ -0,0 +1,155 @@
1
+ import { Injectable } from '../di/decorators';
2
+ import type {
3
+ LlmProvider,
4
+ AiRequest,
5
+ AiResponse,
6
+ AiModuleOptions,
7
+ AiMessage,
8
+ } from './types';
9
+ import { AI_MODULE_OPTIONS_TOKEN } from './types';
10
+ import { Inject } from '../di/decorators';
11
+ import { AiNoProviderError, AiAllProvidersFailed, AiTimeoutError } from './errors';
12
+ import type { ToolRegistry } from './tools/tool-registry';
13
+ import { ToolExecutor } from './tools/tool-executor';
14
+
15
+ /**
16
+ * Core AI service — manages multiple LLM providers, fallback, streaming,
17
+ * Tool Calling loop, and cost tracking.
18
+ */
19
+ @Injectable()
20
+ export class AiService {
21
+ private readonly providers = new Map<string, LlmProvider>();
22
+ private defaultProviderName: string | undefined;
23
+ private readonly options: AiModuleOptions;
24
+ private toolExecutor: ToolExecutor | null = null;
25
+
26
+ public constructor(
27
+ @Inject(AI_MODULE_OPTIONS_TOKEN) options: AiModuleOptions,
28
+ ) {
29
+ this.options = options;
30
+ for (const entry of options.providers) {
31
+ const provider = new entry.provider(entry.config);
32
+ this.providers.set(entry.name, provider);
33
+ if (entry.default || !this.defaultProviderName) {
34
+ this.defaultProviderName = entry.name;
35
+ }
36
+ }
37
+ }
38
+
39
+ /**
40
+ * Attach a ToolRegistry so the service can run Tool Calling loops
41
+ */
42
+ public setToolRegistry(registry: ToolRegistry): void {
43
+ this.toolExecutor = new ToolExecutor(registry);
44
+ }
45
+
46
+ /**
47
+ * Non-streaming completion with optional Tool Calling loop
48
+ */
49
+ public async complete(request: AiRequest): Promise<AiResponse> {
50
+ const maxIterations = this.options.tools?.maxIterations ?? 10;
51
+ let messages: AiMessage[] = [...request.messages];
52
+ let iteration = 0;
53
+
54
+ while (iteration < maxIterations) {
55
+ const response = await this.completeSingle({ ...request, messages });
56
+
57
+ // No tool calls — return final response
58
+ if (!response.toolCalls || response.toolCalls.length === 0 || !this.toolExecutor) {
59
+ return response;
60
+ }
61
+
62
+ // Append assistant message with tool calls
63
+ messages = [
64
+ ...messages,
65
+ { role: 'assistant', content: response.content, toolCalls: response.toolCalls },
66
+ ];
67
+
68
+ // Execute tools and append results
69
+ const toolResults = await this.toolExecutor.executeAll(response.toolCalls);
70
+ messages = [...messages, ...toolResults];
71
+
72
+ iteration++;
73
+ }
74
+
75
+ // Reached max iterations — do final pass without tools
76
+ return this.completeSingle({ ...request, messages, tools: [] });
77
+ }
78
+
79
+ /**
80
+ * Streaming completion — returns SSE ReadableStream
81
+ */
82
+ public stream(request: AiRequest): ReadableStream<Uint8Array> {
83
+ const provider = this.getProvider(request.provider);
84
+ return provider.stream(request);
85
+ }
86
+
87
+ /**
88
+ * Estimate token count for messages using the default provider
89
+ */
90
+ public countTokens(messages: AiMessage[]): number {
91
+ const provider = this.getProvider();
92
+ return provider.countTokens(messages);
93
+ }
94
+
95
+ /**
96
+ * Get a provider by name (or default)
97
+ */
98
+ public getProvider(name?: string): LlmProvider {
99
+ const providerName = name ?? this.defaultProviderName;
100
+ if (!providerName) throw new AiNoProviderError();
101
+
102
+ const provider = this.providers.get(providerName);
103
+ if (!provider) throw new AiNoProviderError();
104
+
105
+ return provider;
106
+ }
107
+
108
+ /**
109
+ * List all registered provider names
110
+ */
111
+ public getProviderNames(): string[] {
112
+ return Array.from(this.providers.keys());
113
+ }
114
+
115
+ private async completeSingle(request: AiRequest): Promise<AiResponse> {
116
+ const targetName = request.provider ?? this.defaultProviderName;
117
+ if (!targetName) throw new AiNoProviderError();
118
+
119
+ const fallback = this.options.fallback ?? false;
120
+ const timeout = this.options.timeout ?? 30000;
121
+
122
+ if (!fallback) {
123
+ return this.withTimeout(this.getProvider(targetName).complete(request), timeout, targetName);
124
+ }
125
+
126
+ // Fallback chain: try target first, then others in order
127
+ const names = [
128
+ targetName,
129
+ ...Array.from(this.providers.keys()).filter((n) => n !== targetName),
130
+ ];
131
+
132
+ const errors: string[] = [];
133
+ for (const name of names) {
134
+ try {
135
+ const provider = this.providers.get(name);
136
+ if (!provider) continue;
137
+ return await this.withTimeout(provider.complete({ ...request, provider: name }), timeout, name);
138
+ } catch (err) {
139
+ errors.push(`${name}: ${err instanceof Error ? err.message : String(err)}`);
140
+ }
141
+ }
142
+
143
+ throw new AiAllProvidersFailed(errors);
144
+ }
145
+
146
+ private withTimeout<T>(promise: Promise<T>, ms: number, providerName: string): Promise<T> {
147
+ return new Promise<T>((resolve, reject) => {
148
+ const timer = setTimeout(() => reject(new AiTimeoutError(providerName, ms)), ms);
149
+ promise.then(
150
+ (val) => { clearTimeout(timer); resolve(val); },
151
+ (err) => { clearTimeout(timer); reject(err); },
152
+ );
153
+ });
154
+ }
155
+ }
@@ -0,0 +1,38 @@
1
+ import type { AiMessage, AiToolCall } from '../types';
2
+ import type { ToolRegistry } from './tool-registry';
3
+
4
+ /**
5
+ * Executes tool calls from LLM responses and formats results as AiMessages
6
+ */
7
+ export class ToolExecutor {
8
+ public constructor(private readonly registry: ToolRegistry) {}
9
+
10
+ /**
11
+ * Execute all tool calls in parallel and return result messages
12
+ */
13
+ public async executeAll(toolCalls: AiToolCall[]): Promise<AiMessage[]> {
14
+ const results = await Promise.all(
15
+ toolCalls.map((call) => this.executeOne(call)),
16
+ );
17
+ return results;
18
+ }
19
+
20
+ private async executeOne(call: AiToolCall): Promise<AiMessage> {
21
+ let content: string;
22
+ try {
23
+ const result = await this.registry.execute(call.name, call.arguments);
24
+ content =
25
+ typeof result === 'string'
26
+ ? result
27
+ : JSON.stringify(result, null, 2);
28
+ } catch (err) {
29
+ content = `Error executing tool "${call.name}": ${err instanceof Error ? err.message : String(err)}`;
30
+ }
31
+
32
+ return {
33
+ role: 'tool',
34
+ content,
35
+ toolCallId: call.id,
36
+ };
37
+ }
38
+ }
@@ -0,0 +1,91 @@
1
+ import type { AiToolDefinition } from '../types';
2
+ import { AI_TOOL_METADATA_KEY } from '../types';
3
+
4
+ /**
5
+ * Registered tool entry
6
+ */
7
+ export interface RegisteredTool extends AiToolDefinition {
8
+ /** Bound execute function */
9
+ execute(args: Record<string, unknown>): Promise<unknown>;
10
+ }
11
+
12
+ /**
13
+ * Registry for all @AiTool()-decorated methods
14
+ */
15
+ export class ToolRegistry {
16
+ private readonly tools = new Map<string, RegisteredTool>();
17
+
18
+ /**
19
+ * Register a tool manually
20
+ */
21
+ public register(tool: RegisteredTool): void {
22
+ this.tools.set(tool.name, tool);
23
+ }
24
+
25
+ /**
26
+ * Scan an object instance for @AiTool() decorated methods and register them
27
+ */
28
+ public scanAndRegister(instance: object): void {
29
+ const proto = Object.getPrototypeOf(instance);
30
+ const methodNames = Object.getOwnPropertyNames(proto).filter(
31
+ (key) => key !== 'constructor',
32
+ );
33
+
34
+ for (const methodName of methodNames) {
35
+ const metadata: AiToolDefinition | undefined = Reflect.getMetadata(
36
+ AI_TOOL_METADATA_KEY,
37
+ proto,
38
+ methodName,
39
+ );
40
+ if (metadata) {
41
+ const method = (instance as Record<string, unknown>)[methodName];
42
+ if (typeof method === 'function') {
43
+ this.tools.set(metadata.name, {
44
+ ...metadata,
45
+ execute: (args: Record<string, unknown>) =>
46
+ (method as (args: Record<string, unknown>) => Promise<unknown>).call(
47
+ instance,
48
+ args,
49
+ ),
50
+ });
51
+ }
52
+ }
53
+ }
54
+ }
55
+
56
+ /**
57
+ * Get all registered tools as definitions (for LLM request)
58
+ */
59
+ public getDefinitions(): AiToolDefinition[] {
60
+ return Array.from(this.tools.values()).map(({ name, description, parameters }) => ({
61
+ name,
62
+ description,
63
+ parameters,
64
+ }));
65
+ }
66
+
67
+ /**
68
+ * Execute a tool by name
69
+ */
70
+ public async execute(name: string, args: Record<string, unknown>): Promise<unknown> {
71
+ const tool = this.tools.get(name);
72
+ if (!tool) {
73
+ throw new Error(`Tool "${name}" not found in registry`);
74
+ }
75
+ return tool.execute(args);
76
+ }
77
+
78
+ /**
79
+ * Check whether a tool exists
80
+ */
81
+ public has(name: string): boolean {
82
+ return this.tools.has(name);
83
+ }
84
+
85
+ /**
86
+ * Number of registered tools
87
+ */
88
+ public get size(): number {
89
+ return this.tools.size;
90
+ }
91
+ }
@@ -0,0 +1,145 @@
1
+ /**
2
+ * AI message role
3
+ */
4
+ export type AiMessageRole = 'system' | 'user' | 'assistant' | 'tool';
5
+
6
+ /**
7
+ * Single AI message
8
+ */
9
+ export interface AiMessage {
10
+ role: AiMessageRole;
11
+ content: string;
12
+ /** Tool call ID (only for role='tool') */
13
+ toolCallId?: string;
14
+ /** Tool calls returned by assistant */
15
+ toolCalls?: AiToolCall[];
16
+ }
17
+
18
+ /**
19
+ * AI tool call from LLM response
20
+ */
21
+ export interface AiToolCall {
22
+ id: string;
23
+ name: string;
24
+ arguments: Record<string, unknown>;
25
+ }
26
+
27
+ /**
28
+ * AI tool definition (for function calling)
29
+ */
30
+ export interface AiToolDefinition {
31
+ name: string;
32
+ description: string;
33
+ /** JSON Schema for parameters */
34
+ parameters: Record<string, unknown>;
35
+ }
36
+
37
+ /**
38
+ * AI request to LLM
39
+ */
40
+ export interface AiRequest {
41
+ messages: AiMessage[];
42
+ model?: string;
43
+ temperature?: number;
44
+ maxTokens?: number;
45
+ tools?: AiToolDefinition[];
46
+ /** Provider name override */
47
+ provider?: string;
48
+ }
49
+
50
+ /**
51
+ * Non-streaming AI response
52
+ */
53
+ export interface AiResponse {
54
+ content: string;
55
+ toolCalls?: AiToolCall[];
56
+ model: string;
57
+ provider: string;
58
+ usage: AiUsage;
59
+ finishReason: 'stop' | 'tool_calls' | 'length' | 'error';
60
+ }
61
+
62
+ /**
63
+ * Streaming AI chunk
64
+ */
65
+ export interface AiChunk {
66
+ content?: string;
67
+ toolCallDelta?: {
68
+ index: number;
69
+ id?: string;
70
+ name?: string;
71
+ argumentsDelta?: string;
72
+ };
73
+ done: boolean;
74
+ model?: string;
75
+ usage?: AiUsage;
76
+ }
77
+
78
+ /**
79
+ * Token usage statistics
80
+ */
81
+ export interface AiUsage {
82
+ promptTokens: number;
83
+ completionTokens: number;
84
+ totalTokens: number;
85
+ /** Estimated cost in USD */
86
+ estimatedCostUsd?: number;
87
+ }
88
+
89
+ /**
90
+ * LLM provider abstraction interface
91
+ */
92
+ export interface LlmProvider {
93
+ /**
94
+ * Non-streaming completion
95
+ */
96
+ complete(request: AiRequest): Promise<AiResponse>;
97
+ /**
98
+ * Streaming completion — returns SSE-encoded ReadableStream
99
+ */
100
+ stream(request: AiRequest): ReadableStream<Uint8Array>;
101
+ /**
102
+ * Approximate token count for messages
103
+ */
104
+ countTokens(messages: AiMessage[]): number;
105
+ /** Provider name */
106
+ readonly name: string;
107
+ }
108
+
109
+ /**
110
+ * Provider configuration entry
111
+ */
112
+ export interface AiProviderConfig<T = unknown> {
113
+ name: string;
114
+ provider: new (config: T) => LlmProvider;
115
+ config: T;
116
+ /** Use this provider by default */
117
+ default?: boolean;
118
+ }
119
+
120
+ /**
121
+ * AiModule configuration
122
+ */
123
+ export interface AiModuleOptions {
124
+ providers: AiProviderConfig[];
125
+ /** Enable provider fallback chain on error */
126
+ fallback?: boolean;
127
+ /** Request timeout in ms */
128
+ timeout?: number;
129
+ /** Tool calling configuration */
130
+ tools?: {
131
+ /** Auto-discover @AiTool() decorated methods */
132
+ autoDiscover?: boolean;
133
+ /** Max tool call iterations per request */
134
+ maxIterations?: number;
135
+ };
136
+ }
137
+
138
+ export const AI_SERVICE_TOKEN = Symbol('@dangao/bun-server:ai:service');
139
+ export const AI_MODULE_OPTIONS_TOKEN = Symbol('@dangao/bun-server:ai:options');
140
+ export const AI_TOOL_REGISTRY_TOKEN = Symbol('@dangao/bun-server:ai:tool-registry');
141
+
142
+ /**
143
+ * Metadata key for @AiTool decorator
144
+ */
145
+ export const AI_TOOL_METADATA_KEY = '@dangao/bun-server:ai:tool';