universal-llm-client 4.0.0 → 4.2.0

This diff represents the content of publicly available package versions that have been released to one of the supported registries. The information contained in this diff is provided for informational purposes only and reflects changes between package versions as they appear in their respective public registries.
Files changed (127) hide show
  1. package/dist/ai-model.d.ts +20 -22
  2. package/dist/ai-model.d.ts.map +1 -1
  3. package/dist/ai-model.js +26 -23
  4. package/dist/ai-model.js.map +1 -1
  5. package/dist/client.d.ts +5 -5
  6. package/dist/client.d.ts.map +1 -1
  7. package/dist/client.js +17 -9
  8. package/dist/client.js.map +1 -1
  9. package/dist/http.d.ts +2 -0
  10. package/dist/http.d.ts.map +1 -1
  11. package/dist/http.js +1 -0
  12. package/dist/http.js.map +1 -1
  13. package/dist/index.d.ts +3 -3
  14. package/dist/index.d.ts.map +1 -1
  15. package/dist/index.js +4 -4
  16. package/dist/index.js.map +1 -1
  17. package/dist/interfaces.d.ts +49 -11
  18. package/dist/interfaces.d.ts.map +1 -1
  19. package/dist/interfaces.js +14 -0
  20. package/dist/interfaces.js.map +1 -1
  21. package/dist/providers/anthropic.d.ts +56 -0
  22. package/dist/providers/anthropic.d.ts.map +1 -0
  23. package/dist/providers/anthropic.js +524 -0
  24. package/dist/providers/anthropic.js.map +1 -0
  25. package/dist/providers/google.d.ts +5 -0
  26. package/dist/providers/google.d.ts.map +1 -1
  27. package/dist/providers/google.js +64 -8
  28. package/dist/providers/google.js.map +1 -1
  29. package/dist/providers/index.d.ts +1 -0
  30. package/dist/providers/index.d.ts.map +1 -1
  31. package/dist/providers/index.js +1 -0
  32. package/dist/providers/index.js.map +1 -1
  33. package/dist/providers/ollama.d.ts.map +1 -1
  34. package/dist/providers/ollama.js +38 -11
  35. package/dist/providers/ollama.js.map +1 -1
  36. package/dist/providers/openai.d.ts.map +1 -1
  37. package/dist/providers/openai.js +9 -7
  38. package/dist/providers/openai.js.map +1 -1
  39. package/dist/router.d.ts +13 -33
  40. package/dist/router.d.ts.map +1 -1
  41. package/dist/router.js +33 -57
  42. package/dist/router.js.map +1 -1
  43. package/dist/stream-decoder.d.ts +29 -2
  44. package/dist/stream-decoder.d.ts.map +1 -1
  45. package/dist/stream-decoder.js +39 -11
  46. package/dist/stream-decoder.js.map +1 -1
  47. package/dist/structured-output.d.ts +107 -181
  48. package/dist/structured-output.d.ts.map +1 -1
  49. package/dist/structured-output.js +137 -192
  50. package/dist/structured-output.js.map +1 -1
  51. package/dist/zod-adapter.d.ts +44 -0
  52. package/dist/zod-adapter.d.ts.map +1 -0
  53. package/dist/zod-adapter.js +61 -0
  54. package/dist/zod-adapter.js.map +1 -0
  55. package/package.json +9 -1
  56. package/src/ai-model.ts +350 -0
  57. package/src/auditor.ts +213 -0
  58. package/src/client.ts +402 -0
  59. package/src/debug/debug-google-streaming.ts +97 -0
  60. package/src/debug/debug-tool-execution.ts +86 -0
  61. package/src/debug/test-lmstudio-tools.ts +155 -0
  62. package/src/demos/README.md +47 -0
  63. package/src/demos/basic/universal-llm-examples.ts +161 -0
  64. package/src/demos/mcp/astrid-memory-demo.ts +295 -0
  65. package/src/demos/mcp/astrid-persona-memory.ts +357 -0
  66. package/src/demos/mcp/mcp-mongodb-demo.ts +275 -0
  67. package/src/demos/mcp/simple-astrid-memory.ts +148 -0
  68. package/src/demos/mcp/simple-mcp-demo.ts +68 -0
  69. package/src/demos/mcp/working-mcp-demo.ts +62 -0
  70. package/src/demos/model-alias-demo.ts +0 -0
  71. package/src/demos/tools/RAG_MEMORY_INTEGRATION.md +267 -0
  72. package/src/demos/tools/astrid-memory-demo.ts +270 -0
  73. package/src/demos/tools/astrid-production-memory-clean.ts +785 -0
  74. package/src/demos/tools/astrid-production-memory.ts +558 -0
  75. package/src/demos/tools/basic-translation-test.ts +66 -0
  76. package/src/demos/tools/chromadb-similarity-tuning.ts +390 -0
  77. package/src/demos/tools/clean-multilingual-conversation.ts +209 -0
  78. package/src/demos/tools/clean-translation-test.ts +119 -0
  79. package/src/demos/tools/clean-universal-multilingual-test.ts +131 -0
  80. package/src/demos/tools/complete-rag-demo.ts +369 -0
  81. package/src/demos/tools/complete-tool-demo.ts +132 -0
  82. package/src/demos/tools/demo-tool-calling.ts +124 -0
  83. package/src/demos/tools/dynamic-language-switching-test.ts +251 -0
  84. package/src/demos/tools/hybrid-thinking-test.ts +154 -0
  85. package/src/demos/tools/memory-integration-test.ts +420 -0
  86. package/src/demos/tools/multilingual-memory-system.ts +802 -0
  87. package/src/demos/tools/ondemand-translation-demo.ts +655 -0
  88. package/src/demos/tools/production-tool-demo.ts +245 -0
  89. package/src/demos/tools/revolutionary-multilingual-test.ts +151 -0
  90. package/src/demos/tools/rigorous-language-analysis.ts +218 -0
  91. package/src/demos/tools/test-universal-memory-system.ts +126 -0
  92. package/src/demos/tools/translation-integration-guide.ts +346 -0
  93. package/src/demos/tools/universal-memory-system.ts +560 -0
  94. package/src/http.ts +247 -0
  95. package/src/index.ts +161 -0
  96. package/src/interfaces.ts +657 -0
  97. package/src/mcp.ts +345 -0
  98. package/src/providers/anthropic.ts +762 -0
  99. package/src/providers/google.ts +620 -0
  100. package/src/providers/index.ts +8 -0
  101. package/src/providers/ollama.ts +469 -0
  102. package/src/providers/openai.ts +392 -0
  103. package/src/router.ts +780 -0
  104. package/src/stream-decoder.ts +361 -0
  105. package/src/structured-output.ts +759 -0
  106. package/src/test-scripts/test-advanced-tools.ts +310 -0
  107. package/src/test-scripts/test-google-streaming-enhanced.ts +147 -0
  108. package/src/test-scripts/test-google-streaming.ts +63 -0
  109. package/src/test-scripts/test-google-system-prompt-comprehensive.ts +189 -0
  110. package/src/test-scripts/test-mcp-config.ts +28 -0
  111. package/src/test-scripts/test-mcp-connection.ts +29 -0
  112. package/src/test-scripts/test-system-message-positions.ts +163 -0
  113. package/src/test-scripts/test-system-prompt-improvement-demo.ts +83 -0
  114. package/src/test-scripts/test-tool-calling.ts +231 -0
  115. package/src/tests/ai-model.test.ts +1614 -0
  116. package/src/tests/auditor.test.ts +224 -0
  117. package/src/tests/http.test.ts +200 -0
  118. package/src/tests/interfaces.test.ts +117 -0
  119. package/src/tests/providers/google.test.ts +660 -0
  120. package/src/tests/providers/ollama.test.ts +954 -0
  121. package/src/tests/providers/openai.test.ts +1122 -0
  122. package/src/tests/router.test.ts +254 -0
  123. package/src/tests/stream-decoder.test.ts +179 -0
  124. package/src/tests/structured-output.test.ts +1450 -0
  125. package/src/tests/tools.test.ts +175 -0
  126. package/src/tools.ts +246 -0
  127. package/src/zod-adapter.ts +72 -0
@@ -0,0 +1,392 @@
1
+ /**
2
+ * Universal LLM Client v3 — OpenAI-Compatible Provider
3
+ *
4
+ * Implements BaseLLMClient for OpenAI-compatible APIs.
5
+ * Works with: OpenAI, OpenRouter, LM Studio, LlamaCpp, vLLM, Groq, Together.
6
+ */
7
+
8
+ import { BaseLLMClient } from '../client.js';
9
+ import { httpRequest, httpStream, parseSSE, buildHeaders } from '../http.js';
10
+ import { StandardChatDecoder } from '../stream-decoder.js';
11
+ import {
12
+ normalizeJsonSchema,
13
+ getJsonSchemaFromConfig,
14
+ type JSONSchema,
15
+ type StructuredOutputOptions,
16
+ } from '../structured-output.js';
17
+ import type {
18
+ LLMClientOptions,
19
+ LLMChatMessage,
20
+ LLMChatResponse,
21
+ ChatOptions,
22
+ OpenAIResponse,
23
+ OpenAIModelInfo,
24
+ TokenUsageInfo,
25
+ } from '../interfaces.js';
26
+ import type { DecodedEvent } from '../stream-decoder.js';
27
+ import type { Auditor } from '../auditor.js';
28
+
29
+ export class OpenAICompatibleClient extends BaseLLMClient {
30
+ constructor(options: LLMClientOptions, auditor?: Auditor) {
31
+ // Ensure URL ends with /v1 for standard endpoints
32
+ let url = (options.url || 'https://api.openai.com').replace(/\/+$/, '');
33
+ if (!url.endsWith('/v1')) {
34
+ url += '/v1';
35
+ }
36
+ super({ ...options, url }, auditor);
37
+ }
38
+
39
+ // ========================================================================
40
+ // Chat
41
+ // ========================================================================
42
+
43
+ async chat(
44
+ messages: LLMChatMessage[],
45
+ options?: ChatOptions,
46
+ ): Promise<LLMChatResponse> {
47
+ // Structured output and tools can now be used together.\n // The provider sends both response_format and tools in the request.\n // The Router handles skipping validation when the response contains tool calls.
48
+
49
+ const url = `${this.options.url}/chat/completions`;
50
+ const tools = options?.tools ?? (Object.keys(this.toolRegistry).length > 0 ? this.getToolDefinitions() : undefined);
51
+
52
+ const body: Record<string, unknown> = {
53
+ model: this.options.model,
54
+ messages: this.convertMessages(messages),
55
+ ...this.buildRequestParams(options),
56
+ };
57
+
58
+ // Handle structured output
59
+ const schemaOptions = this.extractSchemaOptions(options);
60
+ if (schemaOptions) {
61
+ body['response_format'] = this.buildResponseFormat(schemaOptions);
62
+ } else if (options?.responseFormat) {
63
+ body['response_format'] = options.responseFormat;
64
+ }
65
+
66
+ if (tools?.length) {
67
+ body['tools'] = tools;
68
+ if (options?.toolChoice) {
69
+ body['tool_choice'] = options.toolChoice;
70
+ }
71
+ }
72
+
73
+ const start = Date.now();
74
+ this.auditor.record({
75
+ timestamp: start,
76
+ type: 'request',
77
+ provider: 'openai',
78
+ model: this.options.model,
79
+ });
80
+
81
+ const response = await httpRequest<OpenAIResponse>(url, {
82
+ method: 'POST',
83
+ headers: buildHeaders(this.options),
84
+ body,
85
+ timeout: this.options.timeout ?? 30000,
86
+ });
87
+
88
+ const data = response.data;
89
+ const choice = data.choices[0];
90
+
91
+ if (!choice) {
92
+ throw new Error('No choices returned from OpenAI API');
93
+ }
94
+
95
+ const usage: TokenUsageInfo | undefined = data.usage
96
+ ? {
97
+ inputTokens: data.usage.prompt_tokens,
98
+ outputTokens: data.usage.completion_tokens,
99
+ totalTokens: data.usage.total_tokens,
100
+ cachedTokens: data.usage.prompt_tokens_details?.cached_tokens,
101
+ }
102
+ : undefined;
103
+
104
+ // Normalize tool calls (ensure IDs exist)
105
+ const toolCalls = choice.message.tool_calls?.map(tc => ({
106
+ ...tc,
107
+ id: tc.id || this.generateToolCallId(),
108
+ }));
109
+
110
+ // Get content, handling null case
111
+ const content = choice.message.content || '';
112
+
113
+ const result: LLMChatResponse = {
114
+ message: {
115
+ role: 'assistant',
116
+ content,
117
+ tool_calls: toolCalls,
118
+ },
119
+ usage,
120
+ provider: 'openai',
121
+ };
122
+
123
+ this.auditor.record({
124
+ timestamp: Date.now(),
125
+ type: 'response',
126
+ provider: 'openai',
127
+ model: this.options.model,
128
+ duration: Date.now() - start,
129
+ usage,
130
+ });
131
+
132
+ return result;
133
+ }
134
+
135
+ // ========================================================================
136
+ // Streaming
137
+ // ========================================================================
138
+
139
+ async *chatStream(
140
+ messages: LLMChatMessage[],
141
+ options?: ChatOptions,
142
+ ): AsyncGenerator<DecodedEvent, LLMChatResponse | void, unknown> {
143
+ const url = `${this.options.url}/chat/completions`;
144
+ const tools = options?.tools ?? (Object.keys(this.toolRegistry).length > 0 ? this.getToolDefinitions() : undefined);
145
+
146
+ const body: Record<string, unknown> = {
147
+ model: this.options.model,
148
+ messages: this.convertMessages(messages),
149
+ stream: true,
150
+ ...this.buildRequestParams(options),
151
+ };
152
+
153
+ if (tools?.length) {
154
+ body['tools'] = tools;
155
+ if (options?.toolChoice) {
156
+ body['tool_choice'] = options.toolChoice;
157
+ }
158
+ }
159
+
160
+ const start = Date.now();
161
+ this.auditor.record({
162
+ timestamp: start,
163
+ type: 'stream_start',
164
+ provider: 'openai',
165
+ model: this.options.model,
166
+ });
167
+
168
+ const decoder = new StandardChatDecoder(() => {});
169
+
170
+ // Track accumulated tool calls across chunks
171
+ const toolCallAccum: Map<number, {
172
+ id: string;
173
+ type: 'function';
174
+ function: { name: string; arguments: string };
175
+ }> = new Map();
176
+
177
+ const stream = httpStream(url, {
178
+ method: 'POST',
179
+ headers: buildHeaders(this.options),
180
+ body,
181
+ timeout: this.options.timeout ?? 120000,
182
+ });
183
+
184
+ let usage: TokenUsageInfo | undefined;
185
+
186
+ for await (const { data } of parseSSE(stream)) {
187
+ try {
188
+ const parsed = JSON.parse(data) as {
189
+ choices?: Array<{
190
+ delta?: {
191
+ content?: string;
192
+ tool_calls?: Array<{
193
+ index: number;
194
+ id?: string;
195
+ type?: string;
196
+ function?: { name?: string; arguments?: string };
197
+ }>;
198
+ };
199
+ finish_reason?: string;
200
+ }>;
201
+ usage?: {
202
+ prompt_tokens: number;
203
+ completion_tokens: number;
204
+ total_tokens: number;
205
+ prompt_tokens_details?: {
206
+ cached_tokens?: number;
207
+ };
208
+ };
209
+ };
210
+
211
+ if (parsed.usage) {
212
+ usage = {
213
+ inputTokens: parsed.usage.prompt_tokens,
214
+ outputTokens: parsed.usage.completion_tokens,
215
+ totalTokens: parsed.usage.total_tokens,
216
+ cachedTokens: parsed.usage.prompt_tokens_details?.cached_tokens,
217
+ };
218
+ }
219
+
220
+ const delta = parsed.choices?.[0]?.delta;
221
+ if (!delta) continue;
222
+
223
+ if (delta.content) {
224
+ decoder.push(delta.content);
225
+ yield { type: 'text', content: delta.content };
226
+ }
227
+
228
+ // Accumulate streamed tool calls
229
+ if (delta.tool_calls) {
230
+ for (const tc of delta.tool_calls) {
231
+ const existing = toolCallAccum.get(tc.index);
232
+ if (!existing) {
233
+ toolCallAccum.set(tc.index, {
234
+ id: tc.id || this.generateToolCallId(),
235
+ type: 'function',
236
+ function: {
237
+ name: tc.function?.name || '',
238
+ arguments: tc.function?.arguments || '',
239
+ },
240
+ });
241
+ } else {
242
+ if (tc.function?.arguments) {
243
+ existing.function.arguments += tc.function.arguments;
244
+ }
245
+ if (tc.function?.name) {
246
+ existing.function.name += tc.function.name;
247
+ }
248
+ }
249
+ }
250
+ }
251
+
252
+ // Emit tool calls when stream finishes
253
+ if (parsed.choices?.[0]?.finish_reason === 'tool_calls' || parsed.choices?.[0]?.finish_reason === 'stop') {
254
+ if (toolCallAccum.size > 0) {
255
+ const calls = Array.from(toolCallAccum.values());
256
+ yield { type: 'tool_call', calls };
257
+ }
258
+ }
259
+ } catch {
260
+ // Skip unparseable SSE data
261
+ }
262
+ }
263
+
264
+ decoder.flush();
265
+
266
+ this.auditor.record({
267
+ timestamp: Date.now(),
268
+ type: 'stream_end',
269
+ provider: 'openai',
270
+ model: this.options.model,
271
+ duration: Date.now() - start,
272
+ usage,
273
+ });
274
+
275
+ const finalToolCalls = toolCallAccum.size > 0
276
+ ? Array.from(toolCallAccum.values())
277
+ : undefined;
278
+
279
+ return {
280
+ message: {
281
+ role: 'assistant',
282
+ content: decoder.getCleanContent(),
283
+ tool_calls: finalToolCalls,
284
+ },
285
+ reasoning: decoder.getReasoning(),
286
+ usage,
287
+ provider: 'openai',
288
+ };
289
+ }
290
+
291
+ // ========================================================================
292
+ // Embeddings
293
+ // ========================================================================
294
+
295
+ async embed(text: string): Promise<number[]> {
296
+ const url = `${this.options.url}/embeddings`;
297
+ const response = await httpRequest<{
298
+ data: Array<{ embedding: number[] }>;
299
+ }>(url, {
300
+ method: 'POST',
301
+ headers: buildHeaders(this.options),
302
+ body: {
303
+ model: this.options.model,
304
+ input: text,
305
+ },
306
+ timeout: this.options.timeout ?? 30000,
307
+ });
308
+ return response.data.data[0]?.embedding ?? [];
309
+ }
310
+
311
+ // ========================================================================
312
+ // Model Discovery
313
+ // ========================================================================
314
+
315
+ async getModels(): Promise<string[]> {
316
+ const url = `${this.options.url}/models`;
317
+ try {
318
+ const response = await httpRequest<{
319
+ data: OpenAIModelInfo[];
320
+ }>(url, {
321
+ headers: buildHeaders(this.options),
322
+ timeout: 5000,
323
+ });
324
+ return response.data.data.map(m => m.id);
325
+ } catch {
326
+ return [];
327
+ }
328
+ }
329
+
330
+ // ========================================================================
331
+ // Internals
332
+ // ========================================================================
333
+
334
+ private convertMessages(messages: LLMChatMessage[]): LLMChatMessage[] {
335
+ // OpenAI format is our canonical format, minimal conversion needed
336
+ return messages.map(msg => ({
337
+ ...msg,
338
+ // Ensure content is never null/undefined
339
+ content: msg.content ?? '',
340
+ }));
341
+ }
342
+
343
+ private buildRequestParams(options?: ChatOptions): Record<string, unknown> {
344
+ const params: Record<string, unknown> = {
345
+ ...this.options.defaultParameters,
346
+ ...options?.parameters,
347
+ };
348
+ if (options?.temperature !== undefined) params['temperature'] = options.temperature;
349
+ if (options?.maxTokens !== undefined) params['max_tokens'] = options.maxTokens;
350
+ return params;
351
+ }
352
+
353
+ // ========================================================================
354
+ // Structured Output Helpers
355
+ // ========================================================================
356
+
357
+ /**
358
+ * Build OpenAI response_format for structured output.
359
+ */
360
+ private buildResponseFormat(options: StructuredOutputOptions<unknown> & { strict?: boolean }): Record<string, unknown> {
361
+ let jsonSchema: JSONSchema;
362
+ let name: string;
363
+ let description: string | undefined;
364
+
365
+ // Prefer jsonSchema if provided (handles raw JSON Schema case)
366
+ if (options.jsonSchema) {
367
+ // Use raw JSON Schema
368
+ jsonSchema = normalizeJsonSchema(options.jsonSchema);
369
+ name = options.name || 'response';
370
+ description = options.description;
371
+ } else if (options.schemaConfig) {
372
+ // Use SchemaConfig's embedded JSON Schema
373
+ jsonSchema = getJsonSchemaFromConfig(options.schemaConfig);
374
+ name = options.name || options.schemaConfig.name || 'response';
375
+ description = options.description || options.schemaConfig.description;
376
+ } else {
377
+ // Should not happen - we check this in extractSchemaOptions
378
+ throw new Error('Either schemaConfig or jsonSchema must be provided');
379
+ }
380
+
381
+ // OpenAI strict mode — configurable, defaults to true for reliable structured output
382
+ return {
383
+ type: 'json_schema',
384
+ json_schema: {
385
+ name,
386
+ ...(description && { description }),
387
+ schema: jsonSchema,
388
+ strict: options.strict ?? true,
389
+ },
390
+ };
391
+ }
392
+ }