universal-llm-client 4.0.0 → 4.2.0

This diff represents the content of publicly available package versions that have been released to one of the supported registries. The information contained in this diff is provided for informational purposes only and reflects changes between package versions as they appear in their respective public registries.
Files changed (127) hide show
  1. package/dist/ai-model.d.ts +20 -22
  2. package/dist/ai-model.d.ts.map +1 -1
  3. package/dist/ai-model.js +26 -23
  4. package/dist/ai-model.js.map +1 -1
  5. package/dist/client.d.ts +5 -5
  6. package/dist/client.d.ts.map +1 -1
  7. package/dist/client.js +17 -9
  8. package/dist/client.js.map +1 -1
  9. package/dist/http.d.ts +2 -0
  10. package/dist/http.d.ts.map +1 -1
  11. package/dist/http.js +1 -0
  12. package/dist/http.js.map +1 -1
  13. package/dist/index.d.ts +3 -3
  14. package/dist/index.d.ts.map +1 -1
  15. package/dist/index.js +4 -4
  16. package/dist/index.js.map +1 -1
  17. package/dist/interfaces.d.ts +49 -11
  18. package/dist/interfaces.d.ts.map +1 -1
  19. package/dist/interfaces.js +14 -0
  20. package/dist/interfaces.js.map +1 -1
  21. package/dist/providers/anthropic.d.ts +56 -0
  22. package/dist/providers/anthropic.d.ts.map +1 -0
  23. package/dist/providers/anthropic.js +524 -0
  24. package/dist/providers/anthropic.js.map +1 -0
  25. package/dist/providers/google.d.ts +5 -0
  26. package/dist/providers/google.d.ts.map +1 -1
  27. package/dist/providers/google.js +64 -8
  28. package/dist/providers/google.js.map +1 -1
  29. package/dist/providers/index.d.ts +1 -0
  30. package/dist/providers/index.d.ts.map +1 -1
  31. package/dist/providers/index.js +1 -0
  32. package/dist/providers/index.js.map +1 -1
  33. package/dist/providers/ollama.d.ts.map +1 -1
  34. package/dist/providers/ollama.js +38 -11
  35. package/dist/providers/ollama.js.map +1 -1
  36. package/dist/providers/openai.d.ts.map +1 -1
  37. package/dist/providers/openai.js +9 -7
  38. package/dist/providers/openai.js.map +1 -1
  39. package/dist/router.d.ts +13 -33
  40. package/dist/router.d.ts.map +1 -1
  41. package/dist/router.js +33 -57
  42. package/dist/router.js.map +1 -1
  43. package/dist/stream-decoder.d.ts +29 -2
  44. package/dist/stream-decoder.d.ts.map +1 -1
  45. package/dist/stream-decoder.js +39 -11
  46. package/dist/stream-decoder.js.map +1 -1
  47. package/dist/structured-output.d.ts +107 -181
  48. package/dist/structured-output.d.ts.map +1 -1
  49. package/dist/structured-output.js +137 -192
  50. package/dist/structured-output.js.map +1 -1
  51. package/dist/zod-adapter.d.ts +44 -0
  52. package/dist/zod-adapter.d.ts.map +1 -0
  53. package/dist/zod-adapter.js +61 -0
  54. package/dist/zod-adapter.js.map +1 -0
  55. package/package.json +9 -1
  56. package/src/ai-model.ts +350 -0
  57. package/src/auditor.ts +213 -0
  58. package/src/client.ts +402 -0
  59. package/src/debug/debug-google-streaming.ts +97 -0
  60. package/src/debug/debug-tool-execution.ts +86 -0
  61. package/src/debug/test-lmstudio-tools.ts +155 -0
  62. package/src/demos/README.md +47 -0
  63. package/src/demos/basic/universal-llm-examples.ts +161 -0
  64. package/src/demos/mcp/astrid-memory-demo.ts +295 -0
  65. package/src/demos/mcp/astrid-persona-memory.ts +357 -0
  66. package/src/demos/mcp/mcp-mongodb-demo.ts +275 -0
  67. package/src/demos/mcp/simple-astrid-memory.ts +148 -0
  68. package/src/demos/mcp/simple-mcp-demo.ts +68 -0
  69. package/src/demos/mcp/working-mcp-demo.ts +62 -0
  70. package/src/demos/model-alias-demo.ts +0 -0
  71. package/src/demos/tools/RAG_MEMORY_INTEGRATION.md +267 -0
  72. package/src/demos/tools/astrid-memory-demo.ts +270 -0
  73. package/src/demos/tools/astrid-production-memory-clean.ts +785 -0
  74. package/src/demos/tools/astrid-production-memory.ts +558 -0
  75. package/src/demos/tools/basic-translation-test.ts +66 -0
  76. package/src/demos/tools/chromadb-similarity-tuning.ts +390 -0
  77. package/src/demos/tools/clean-multilingual-conversation.ts +209 -0
  78. package/src/demos/tools/clean-translation-test.ts +119 -0
  79. package/src/demos/tools/clean-universal-multilingual-test.ts +131 -0
  80. package/src/demos/tools/complete-rag-demo.ts +369 -0
  81. package/src/demos/tools/complete-tool-demo.ts +132 -0
  82. package/src/demos/tools/demo-tool-calling.ts +124 -0
  83. package/src/demos/tools/dynamic-language-switching-test.ts +251 -0
  84. package/src/demos/tools/hybrid-thinking-test.ts +154 -0
  85. package/src/demos/tools/memory-integration-test.ts +420 -0
  86. package/src/demos/tools/multilingual-memory-system.ts +802 -0
  87. package/src/demos/tools/ondemand-translation-demo.ts +655 -0
  88. package/src/demos/tools/production-tool-demo.ts +245 -0
  89. package/src/demos/tools/revolutionary-multilingual-test.ts +151 -0
  90. package/src/demos/tools/rigorous-language-analysis.ts +218 -0
  91. package/src/demos/tools/test-universal-memory-system.ts +126 -0
  92. package/src/demos/tools/translation-integration-guide.ts +346 -0
  93. package/src/demos/tools/universal-memory-system.ts +560 -0
  94. package/src/http.ts +247 -0
  95. package/src/index.ts +161 -0
  96. package/src/interfaces.ts +657 -0
  97. package/src/mcp.ts +345 -0
  98. package/src/providers/anthropic.ts +762 -0
  99. package/src/providers/google.ts +620 -0
  100. package/src/providers/index.ts +8 -0
  101. package/src/providers/ollama.ts +469 -0
  102. package/src/providers/openai.ts +392 -0
  103. package/src/router.ts +780 -0
  104. package/src/stream-decoder.ts +361 -0
  105. package/src/structured-output.ts +759 -0
  106. package/src/test-scripts/test-advanced-tools.ts +310 -0
  107. package/src/test-scripts/test-google-streaming-enhanced.ts +147 -0
  108. package/src/test-scripts/test-google-streaming.ts +63 -0
  109. package/src/test-scripts/test-google-system-prompt-comprehensive.ts +189 -0
  110. package/src/test-scripts/test-mcp-config.ts +28 -0
  111. package/src/test-scripts/test-mcp-connection.ts +29 -0
  112. package/src/test-scripts/test-system-message-positions.ts +163 -0
  113. package/src/test-scripts/test-system-prompt-improvement-demo.ts +83 -0
  114. package/src/test-scripts/test-tool-calling.ts +231 -0
  115. package/src/tests/ai-model.test.ts +1614 -0
  116. package/src/tests/auditor.test.ts +224 -0
  117. package/src/tests/http.test.ts +200 -0
  118. package/src/tests/interfaces.test.ts +117 -0
  119. package/src/tests/providers/google.test.ts +660 -0
  120. package/src/tests/providers/ollama.test.ts +954 -0
  121. package/src/tests/providers/openai.test.ts +1122 -0
  122. package/src/tests/router.test.ts +254 -0
  123. package/src/tests/stream-decoder.test.ts +179 -0
  124. package/src/tests/structured-output.test.ts +1450 -0
  125. package/src/tests/tools.test.ts +175 -0
  126. package/src/tools.ts +246 -0
  127. package/src/zod-adapter.ts +72 -0
package/package.json CHANGED
@@ -1,6 +1,6 @@
1
1
  {
2
2
  "name": "universal-llm-client",
3
- "version": "4.0.0",
3
+ "version": "4.2.0",
4
4
  "type": "module",
5
5
  "description": "A universal LLM client with transparent provider failover, streaming tool execution, pluggable reasoning, and native observability.",
6
6
  "main": "./dist/index.js",
@@ -33,10 +33,15 @@
33
33
  "./structured-output": {
34
34
  "import": "./dist/structured-output.js",
35
35
  "types": "./dist/structured-output.d.ts"
36
+ },
37
+ "./zod": {
38
+ "import": "./dist/zod-adapter.js",
39
+ "types": "./dist/zod-adapter.d.ts"
36
40
  }
37
41
  },
38
42
  "files": [
39
43
  "dist",
44
+ "src",
40
45
  "README.md",
41
46
  "CHANGELOG.md",
42
47
  "LICENSE"
@@ -89,6 +94,9 @@
89
94
  "peerDependenciesMeta": {
90
95
  "@modelcontextprotocol/sdk": {
91
96
  "optional": true
97
+ },
98
+ "zod": {
99
+ "optional": true
92
100
  }
93
101
  },
94
102
  "engines": {
@@ -0,0 +1,350 @@
1
+ /**
2
+ * Universal LLM Client v3 — AIModel (The Universal Client)
3
+ *
4
+ * The only public-facing class. Developers configure one model with
5
+ * multiple provider backends for transparent failover.
6
+ *
7
+ * Provider classes are internal — the user never imports them.
8
+ */
9
+
10
+ import {
11
+ AIModelApiType,
12
+ type AIModelConfig,
13
+ type ProviderConfig,
14
+ type LLMClientOptions,
15
+ type LLMChatMessage,
16
+ type LLMChatResponse,
17
+ type ChatOptions,
18
+ type ModelMetadata,
19
+ type LLMFunction,
20
+ type ToolHandler,
21
+ } from './interfaces.js';
22
+ import type { DecodedEvent } from './stream-decoder.js';
23
+ import { Router, type RouterConfig, type ProviderStatus } from './router.js';
24
+ import type { Auditor } from './auditor.js';
25
+ import { NoopAuditor } from './auditor.js';
26
+ import { OllamaClient } from './providers/ollama.js';
27
+ import { OpenAICompatibleClient } from './providers/openai.js';
28
+ import { GoogleClient } from './providers/google.js';
29
+ import { AnthropicClient } from './providers/anthropic.js';
30
+ import { BaseLLMClient } from './client.js';
31
+ import {
32
+ type StructuredOutputResult,
33
+ type SchemaConfig,
34
+ } from './structured-output.js';
35
+
36
+ // ============================================================================
37
+ // Default Provider URLs
38
+ // ============================================================================
39
+
40
+ const DEFAULT_URLS: Record<string, string> = {
41
+ ollama: 'http://localhost:11434',
42
+ openai: 'https://api.openai.com',
43
+ llamacpp: 'http://localhost:8080',
44
+ anthropic: 'https://api.anthropic.com',
45
+ // google and vertex build their own URLs internally
46
+ };
47
+
48
+ // ============================================================================
49
+ // AIModel — The Universal Client
50
+ // ============================================================================
51
+
52
+ export class AIModel {
53
+ private router: Router;
54
+ private auditor: Auditor;
55
+ private config: AIModelConfig;
56
+
57
+ constructor(config: AIModelConfig) {
58
+ this.config = config;
59
+ this.auditor = config.auditor ?? new NoopAuditor();
60
+
61
+ const routerConfig: RouterConfig = {
62
+ retriesPerProvider: config.retries ?? 2,
63
+ auditor: this.auditor,
64
+ };
65
+ this.router = new Router(routerConfig);
66
+
67
+ // Initialize providers in order
68
+ for (let i = 0; i < config.providers.length; i++) {
69
+ const providerConfig = config.providers[i]!;
70
+ const client = this.createClient(providerConfig);
71
+ const id = `${this.normalizeType(providerConfig.type)}-${i}`;
72
+
73
+ this.router.addProvider({
74
+ id,
75
+ client,
76
+ priority: providerConfig.priority ?? i,
77
+ modelOverride: providerConfig.model,
78
+ });
79
+ }
80
+ }
81
+
82
+ // ========================================================================
83
+ // Chat
84
+ // ========================================================================
85
+
86
+ /** Send a chat request with automatic failover across providers */
87
+ async chat(
88
+ messages: LLMChatMessage[],
89
+ options?: ChatOptions,
90
+ ): Promise<LLMChatResponse> {
91
+ return this.router.chat(messages, options);
92
+ }
93
+
94
+ /** Chat with automatic tool execution (multi-turn loop) */
95
+ async chatWithTools(
96
+ messages: LLMChatMessage[],
97
+ options?: ChatOptions & { maxIterations?: number },
98
+ ): Promise<LLMChatResponse> {
99
+ return this.router.chatWithTools(messages, options);
100
+ }
101
+
102
+ /** Stream chat response with pluggable decoder strategy */
103
+ async *chatStream(
104
+ messages: LLMChatMessage[],
105
+ options?: ChatOptions,
106
+ ): AsyncGenerator<DecodedEvent, LLMChatResponse | void, unknown> {
107
+ return yield* this.router.chatStream(messages, options);
108
+ }
109
+
110
+ // ========================================================================
111
+ // Structured Output
112
+ // ========================================================================
113
+
114
+ /**
115
+ * Generate structured output from the LLM with automatic failover.
116
+ * Validates the response against the provided Zod schema.
117
+ * Throws StructuredOutputError on validation failure.
118
+ *
119
+ * @template T The output type
120
+ * @param config Schema configuration (JSON Schema + optional validator)
121
+ * @param messages Chat messages to send
122
+ * @param options Additional options (temperature, maxTokens, etc.)
123
+ * @returns Promise resolving to validated structured output
124
+ * @throws StructuredOutputError if JSON parsing fails or validation fails
125
+ *
126
+ * @example
127
+ * ```typescript
128
+ * import { fromZod } from 'universal-llm-client/zod';
129
+ * const UserConfig = fromZod(z.object({
130
+ * name: z.string(),
131
+ * age: z.number(),
132
+ * }));
133
+ *
134
+ * const user = await model.generateStructured(UserConfig, [
135
+ * { role: 'user', content: 'Generate a user profile' },
136
+ * ]);
137
+ * // user.name: string, user.age: number
138
+ * ```
139
+ */
140
+ async generateStructured<T>(
141
+ config: SchemaConfig<T>,
142
+ messages: LLMChatMessage[],
143
+ options?: ChatOptions,
144
+ ): Promise<T> {
145
+ return this.router.generateStructured(config, messages, options);
146
+ }
147
+
148
+ /**
149
+ * Try to generate structured output, returning a result object instead of throwing.
150
+ * Same as generateStructured but returns { ok: true, value } on success
151
+ * and { ok: false, error, rawOutput } on failure.
152
+ *
153
+ * @template T The output type
154
+ * @param config Schema configuration (JSON Schema + optional validator)
155
+ * @param messages Chat messages to send
156
+ * @param options Additional options (temperature, maxTokens, etc.)
157
+ * @returns StructuredOutputResult<T> - either success with value or failure with error
158
+ *
159
+ * @example
160
+ * ```typescript
161
+ * const result = await model.tryParseStructured(config, messages);
162
+ *
163
+ * if (result.ok) {
164
+ * console.log('User:', result.value.name);
165
+ * } else {
166
+ * console.log('Error:', result.error.message);
167
+ * console.log('Raw output:', result.rawOutput);
168
+ * }
169
+ * ```
170
+ */
171
+ async tryParseStructured<T>(
172
+ config: SchemaConfig<T>,
173
+ messages: LLMChatMessage[],
174
+ options?: ChatOptions,
175
+ ): Promise<StructuredOutputResult<T>> {
176
+ return this.router.tryParseStructured(config, messages, options);
177
+ }
178
+
179
+ /**
180
+ * Stream structured output with partial validated objects.
181
+ *
182
+ * Yields partial validated objects as JSON generates, then returns the
183
+ * complete validated object on stream completion.
184
+ *
185
+ * For invalid partial JSON, no yield occurs (partial validation is best-effort).
186
+ * On stream completion, if the final JSON fails validation, throws StructuredOutputError.
187
+ *
188
+ * @template T The output type
189
+ * @param config Schema configuration (JSON Schema + optional validator)
190
+ * @param messages Chat messages to send
191
+ * @param options Additional options (temperature, maxTokens, etc.)
192
+ * @yields Partial validated objects as the JSON stream progresses
193
+ * @returns Complete validated object on stream completion
194
+ * @throws StructuredOutputError if final validation fails
195
+ *
196
+ * @example
197
+ * ```typescript
198
+ * import { fromZod } from 'universal-llm-client/zod';
199
+ * const UserConfig = fromZod(z.object({
200
+ * name: z.string(),
201
+ * age: z.number(),
202
+ * }));
203
+ *
204
+ * const stream = model.generateStructuredStream(UserConfig, [
205
+ * { role: 'user', content: 'Generate a user' },
206
+ * ]);
207
+ *
208
+ * for await (const partial of stream) {
209
+ * console.log('Partial user:', partial);
210
+ * }
211
+ * ```
212
+ */
213
+ async *generateStructuredStream<T>(
214
+ config: SchemaConfig<T>,
215
+ messages: LLMChatMessage[],
216
+ options?: ChatOptions,
217
+ ): AsyncGenerator<T, T, unknown> {
218
+ return yield* this.router.generateStructuredStream(config, messages, options);
219
+ }
220
+
221
+ // ========================================================================
222
+ // Embeddings
223
+ // ========================================================================
224
+
225
+ /** Generate embedding for a single text */
226
+ async embed(text: string): Promise<number[]> {
227
+ return this.router.embed(text);
228
+ }
229
+
230
+ /** Generate embeddings for multiple texts */
231
+ async embedArray(texts: string[]): Promise<number[][]> {
232
+ return this.router.embedArray(texts);
233
+ }
234
+
235
+ // ========================================================================
236
+ // Tool Registration
237
+ // ========================================================================
238
+
239
+ /** Register a tool callable by the LLM (broadcast to all providers) */
240
+ registerTool(
241
+ name: string,
242
+ description: string,
243
+ parameters: LLMFunction['parameters'],
244
+ handler: ToolHandler,
245
+ ): void {
246
+ this.router.registerTool(name, description, parameters, handler);
247
+ }
248
+
249
+ /** Register multiple tools at once */
250
+ registerTools(
251
+ tools: Array<{
252
+ name: string;
253
+ description: string;
254
+ parameters: LLMFunction['parameters'];
255
+ handler: ToolHandler;
256
+ }>,
257
+ ): void {
258
+ this.router.registerTools(tools);
259
+ }
260
+
261
+ // ========================================================================
262
+ // Model Management
263
+ // ========================================================================
264
+
265
+ /** Get available models from all configured providers */
266
+ async getModels(): Promise<string[]> {
267
+ return this.router.getModels();
268
+ }
269
+
270
+ /** Get metadata about the current model (context length, capabilities) */
271
+ async getModelInfo(): Promise<ModelMetadata> {
272
+ return this.router.getModelInfo();
273
+ }
274
+
275
+ /** Switch model at runtime (updates all providers) */
276
+ setModel(name: string): void {
277
+ this.config.model = name;
278
+ // The model name change will be picked up by the providers
279
+ // through the router on next request
280
+ }
281
+
282
+ /** Get the current model name */
283
+ get model(): string {
284
+ return this.config.model;
285
+ }
286
+
287
+ // ========================================================================
288
+ // Provider Status
289
+ // ========================================================================
290
+
291
+ /** Get health/status of all configured providers */
292
+ getProviderStatus(): ProviderStatus[] {
293
+ return this.router.getStatus();
294
+ }
295
+
296
+ // ========================================================================
297
+ // Lifecycle
298
+ // ========================================================================
299
+
300
+ /** Clean shutdown — flush auditor, disconnect MCP, etc. */
301
+ async dispose(): Promise<void> {
302
+ await this.auditor.flush?.();
303
+ }
304
+
305
+ // ========================================================================
306
+ // Internal: Provider Factory
307
+ // ========================================================================
308
+
309
+ private createClient(providerConfig: ProviderConfig): BaseLLMClient {
310
+ const type = this.normalizeType(providerConfig.type);
311
+ const modelName = providerConfig.model ?? this.config.model;
312
+
313
+ const clientOptions: LLMClientOptions = {
314
+ model: modelName,
315
+ url: providerConfig.url ?? DEFAULT_URLS[type] ?? '',
316
+ apiType: type as AIModelApiType,
317
+ apiKey: providerConfig.apiKey,
318
+ timeout: this.config.timeout ?? 30000,
319
+ retries: this.config.retries ?? 2,
320
+ debug: this.config.debug ?? false,
321
+ defaultParameters: this.config.defaultParameters,
322
+ thinking: this.config.thinking ?? false,
323
+ region: providerConfig.region,
324
+ apiVersion: providerConfig.apiVersion,
325
+ };
326
+
327
+ switch (type) {
328
+ case 'ollama':
329
+ return new OllamaClient(clientOptions, this.auditor);
330
+
331
+ case 'openai':
332
+ case 'llamacpp':
333
+ return new OpenAICompatibleClient(clientOptions, this.auditor);
334
+
335
+ case 'google':
336
+ case 'vertex':
337
+ return new GoogleClient(clientOptions, this.auditor);
338
+
339
+ case 'anthropic':
340
+ return new AnthropicClient(clientOptions, this.auditor);
341
+
342
+ default:
343
+ throw new Error(`Unknown provider type: ${type}`);
344
+ }
345
+ }
346
+
347
+ private normalizeType(type: string): string {
348
+ return type.toLowerCase();
349
+ }
350
+ }
package/src/auditor.ts ADDED
@@ -0,0 +1,213 @@
1
+ /**
2
+ * Universal LLM Client v3 — Auditor (Observability)
3
+ *
4
+ * Every LLM interaction (request, response, tool call, retry, failover)
5
+ * is recorded through the Auditor interface. Frameworks inject their own
6
+ * Auditor for dashboards, cost tracking, or behavioral scoring.
7
+ */
8
+
9
+ import type { TokenUsageInfo, ToolExecutionResult } from './interfaces.js';
10
+
11
+ // ============================================================================
12
+ // Audit Event
13
+ // ============================================================================
14
+
15
+ export type AuditEventType =
16
+ | 'request'
17
+ | 'response'
18
+ | 'stream_start'
19
+ | 'stream_end'
20
+ | 'tool_call'
21
+ | 'tool_result'
22
+ | 'error'
23
+ | 'retry'
24
+ | 'failover'
25
+ | 'structured_request'
26
+ | 'structured_response'
27
+ | 'structured_validation_error';
28
+
29
+ export interface AuditEvent {
30
+ /** Unix timestamp in ms */
31
+ timestamp: number;
32
+ /** Event type */
33
+ type: AuditEventType;
34
+ /** Provider that generated this event */
35
+ provider?: string;
36
+ /** Model name */
37
+ model?: string;
38
+ /** Duration in ms (for request/response pairs) */
39
+ duration?: number;
40
+ /** Token usage (for response events) */
41
+ usage?: TokenUsageInfo;
42
+ /** Tool execution details (for tool_call/tool_result events) */
43
+ toolExecution?: ToolExecutionResult;
44
+ /** Error message (for error/retry events) */
45
+ error?: string;
46
+ /** Arbitrary metadata for framework-specific data */
47
+ metadata?: Record<string, unknown>;
48
+ /** Schema name for structured output events */
49
+ schemaName?: string;
50
+ /** Raw output snippet for validation errors */
51
+ rawOutput?: string;
52
+ }
53
+
54
+ // ============================================================================
55
+ // Auditor Interface
56
+ // ============================================================================
57
+
58
+ /**
59
+ * Interface for LLM observability.
60
+ *
61
+ * Implement this to capture all LLM lifecycle events.
62
+ * The library calls `record()` at every interaction point.
63
+ */
64
+ export interface Auditor {
65
+ /** Record an audit event */
66
+ record(event: AuditEvent): void;
67
+ /** Flush any buffered events (optional) */
68
+ flush?(): Promise<void>;
69
+ }
70
+
71
+ // ============================================================================
72
+ // Built-in Auditors
73
+ // ============================================================================
74
+
75
+ /**
76
+ * Zero-overhead auditor that discards all events.
77
+ * Used as the default when no auditor is configured.
78
+ */
79
+ export class NoopAuditor implements Auditor {
80
+ record(_event: AuditEvent): void {
81
+ // Intentionally empty
82
+ }
83
+ }
84
+
85
+ /**
86
+ * Structured console logging auditor.
87
+ * Useful for development and debugging.
88
+ */
89
+ export class ConsoleAuditor implements Auditor {
90
+ private prefix: string;
91
+
92
+ constructor(prefix: string = '[LLM]') {
93
+ this.prefix = prefix;
94
+ }
95
+
96
+ record(event: AuditEvent): void {
97
+ const parts = [
98
+ this.prefix,
99
+ event.type.toUpperCase(),
100
+ event.provider ? `[${event.provider}]` : '',
101
+ event.model ? `(${event.model})` : '',
102
+ ].filter(Boolean);
103
+
104
+ switch (event.type) {
105
+ case 'request':
106
+ console.log(parts.join(' '), '→');
107
+ break;
108
+ case 'response':
109
+ console.log(
110
+ parts.join(' '),
111
+ event.duration ? `${event.duration}ms` : '',
112
+ event.usage ? `${event.usage.totalTokens} tokens` : '',
113
+ );
114
+ break;
115
+ case 'stream_start':
116
+ console.log(parts.join(' '), 'streaming...');
117
+ break;
118
+ case 'stream_end':
119
+ console.log(
120
+ parts.join(' '),
121
+ 'done',
122
+ event.duration ? `${event.duration}ms` : '',
123
+ );
124
+ break;
125
+ case 'tool_call':
126
+ console.log(parts.join(' '), event.toolExecution?.tool_call_id ?? '');
127
+ break;
128
+ case 'tool_result':
129
+ console.log(
130
+ parts.join(' '),
131
+ event.toolExecution?.error ? '❌' : '✅',
132
+ event.toolExecution?.duration ? `${event.toolExecution.duration}ms` : '',
133
+ );
134
+ break;
135
+ case 'error':
136
+ console.error(parts.join(' '), event.error ?? 'Unknown error');
137
+ break;
138
+ case 'retry':
139
+ console.warn(parts.join(' '), event.error ?? '', event.metadata ?? '');
140
+ break;
141
+ case 'failover':
142
+ console.warn(parts.join(' '), '→', event.metadata?.['nextProvider'] ?? '');
143
+ break;
144
+ case 'structured_request':
145
+ console.log(
146
+ parts.join(' '),
147
+ `schema=${event.schemaName ?? 'unknown'}`,
148
+ '→',
149
+ );
150
+ break;
151
+ case 'structured_response':
152
+ console.log(
153
+ parts.join(' '),
154
+ event.duration ? `${event.duration}ms` : '',
155
+ `schema=${event.schemaName ?? 'unknown'}`,
156
+ );
157
+ break;
158
+ case 'structured_validation_error':
159
+ console.error(
160
+ parts.join(' '),
161
+ `schema=${event.schemaName ?? 'unknown'}`,
162
+ event.error ?? 'Validation failed',
163
+ event.rawOutput ? `raw=${event.rawOutput.slice(0, 50)}...` : '',
164
+ );
165
+ break;
166
+ }
167
+ }
168
+ }
169
+
170
+ /**
171
+ * Buffered auditor that collects events for batch processing.
172
+ * Useful for custom sinks (OpenTelemetry, DataDog, databases, etc.)
173
+ */
174
+ export class BufferedAuditor implements Auditor {
175
+ private events: AuditEvent[] = [];
176
+ private maxBufferSize: number;
177
+ private onFlush?: (events: AuditEvent[]) => Promise<void>;
178
+
179
+ constructor(options: {
180
+ maxBufferSize?: number;
181
+ onFlush?: (events: AuditEvent[]) => Promise<void>;
182
+ } = {}) {
183
+ this.maxBufferSize = options.maxBufferSize ?? 1000;
184
+ this.onFlush = options.onFlush;
185
+ }
186
+
187
+ record(event: AuditEvent): void {
188
+ this.events.push(event);
189
+ if (this.events.length >= this.maxBufferSize) {
190
+ // Auto-flush when buffer is full (fire and forget)
191
+ this.flush().catch(() => {});
192
+ }
193
+ }
194
+
195
+ /** Get all buffered events */
196
+ getEvents(): ReadonlyArray<AuditEvent> {
197
+ return this.events;
198
+ }
199
+
200
+ /** Flush buffered events to the configured sink */
201
+ async flush(): Promise<void> {
202
+ if (this.events.length === 0) return;
203
+ const batch = this.events.splice(0);
204
+ if (this.onFlush) {
205
+ await this.onFlush(batch);
206
+ }
207
+ }
208
+
209
+ /** Clear all buffered events without flushing */
210
+ clear(): void {
211
+ this.events.length = 0;
212
+ }
213
+ }