universal-llm-client 4.0.0 โ†’ 4.2.0

This diff represents the content of publicly available package versions that have been released to one of the supported registries. The information contained in this diff is provided for informational purposes only and reflects changes between package versions as they appear in their respective public registries.
Files changed (127) hide show
  1. package/dist/ai-model.d.ts +20 -22
  2. package/dist/ai-model.d.ts.map +1 -1
  3. package/dist/ai-model.js +26 -23
  4. package/dist/ai-model.js.map +1 -1
  5. package/dist/client.d.ts +5 -5
  6. package/dist/client.d.ts.map +1 -1
  7. package/dist/client.js +17 -9
  8. package/dist/client.js.map +1 -1
  9. package/dist/http.d.ts +2 -0
  10. package/dist/http.d.ts.map +1 -1
  11. package/dist/http.js +1 -0
  12. package/dist/http.js.map +1 -1
  13. package/dist/index.d.ts +3 -3
  14. package/dist/index.d.ts.map +1 -1
  15. package/dist/index.js +4 -4
  16. package/dist/index.js.map +1 -1
  17. package/dist/interfaces.d.ts +49 -11
  18. package/dist/interfaces.d.ts.map +1 -1
  19. package/dist/interfaces.js +14 -0
  20. package/dist/interfaces.js.map +1 -1
  21. package/dist/providers/anthropic.d.ts +56 -0
  22. package/dist/providers/anthropic.d.ts.map +1 -0
  23. package/dist/providers/anthropic.js +524 -0
  24. package/dist/providers/anthropic.js.map +1 -0
  25. package/dist/providers/google.d.ts +5 -0
  26. package/dist/providers/google.d.ts.map +1 -1
  27. package/dist/providers/google.js +64 -8
  28. package/dist/providers/google.js.map +1 -1
  29. package/dist/providers/index.d.ts +1 -0
  30. package/dist/providers/index.d.ts.map +1 -1
  31. package/dist/providers/index.js +1 -0
  32. package/dist/providers/index.js.map +1 -1
  33. package/dist/providers/ollama.d.ts.map +1 -1
  34. package/dist/providers/ollama.js +38 -11
  35. package/dist/providers/ollama.js.map +1 -1
  36. package/dist/providers/openai.d.ts.map +1 -1
  37. package/dist/providers/openai.js +9 -7
  38. package/dist/providers/openai.js.map +1 -1
  39. package/dist/router.d.ts +13 -33
  40. package/dist/router.d.ts.map +1 -1
  41. package/dist/router.js +33 -57
  42. package/dist/router.js.map +1 -1
  43. package/dist/stream-decoder.d.ts +29 -2
  44. package/dist/stream-decoder.d.ts.map +1 -1
  45. package/dist/stream-decoder.js +39 -11
  46. package/dist/stream-decoder.js.map +1 -1
  47. package/dist/structured-output.d.ts +107 -181
  48. package/dist/structured-output.d.ts.map +1 -1
  49. package/dist/structured-output.js +137 -192
  50. package/dist/structured-output.js.map +1 -1
  51. package/dist/zod-adapter.d.ts +44 -0
  52. package/dist/zod-adapter.d.ts.map +1 -0
  53. package/dist/zod-adapter.js +61 -0
  54. package/dist/zod-adapter.js.map +1 -0
  55. package/package.json +9 -1
  56. package/src/ai-model.ts +350 -0
  57. package/src/auditor.ts +213 -0
  58. package/src/client.ts +402 -0
  59. package/src/debug/debug-google-streaming.ts +97 -0
  60. package/src/debug/debug-tool-execution.ts +86 -0
  61. package/src/debug/test-lmstudio-tools.ts +155 -0
  62. package/src/demos/README.md +47 -0
  63. package/src/demos/basic/universal-llm-examples.ts +161 -0
  64. package/src/demos/mcp/astrid-memory-demo.ts +295 -0
  65. package/src/demos/mcp/astrid-persona-memory.ts +357 -0
  66. package/src/demos/mcp/mcp-mongodb-demo.ts +275 -0
  67. package/src/demos/mcp/simple-astrid-memory.ts +148 -0
  68. package/src/demos/mcp/simple-mcp-demo.ts +68 -0
  69. package/src/demos/mcp/working-mcp-demo.ts +62 -0
  70. package/src/demos/model-alias-demo.ts +0 -0
  71. package/src/demos/tools/RAG_MEMORY_INTEGRATION.md +267 -0
  72. package/src/demos/tools/astrid-memory-demo.ts +270 -0
  73. package/src/demos/tools/astrid-production-memory-clean.ts +785 -0
  74. package/src/demos/tools/astrid-production-memory.ts +558 -0
  75. package/src/demos/tools/basic-translation-test.ts +66 -0
  76. package/src/demos/tools/chromadb-similarity-tuning.ts +390 -0
  77. package/src/demos/tools/clean-multilingual-conversation.ts +209 -0
  78. package/src/demos/tools/clean-translation-test.ts +119 -0
  79. package/src/demos/tools/clean-universal-multilingual-test.ts +131 -0
  80. package/src/demos/tools/complete-rag-demo.ts +369 -0
  81. package/src/demos/tools/complete-tool-demo.ts +132 -0
  82. package/src/demos/tools/demo-tool-calling.ts +124 -0
  83. package/src/demos/tools/dynamic-language-switching-test.ts +251 -0
  84. package/src/demos/tools/hybrid-thinking-test.ts +154 -0
  85. package/src/demos/tools/memory-integration-test.ts +420 -0
  86. package/src/demos/tools/multilingual-memory-system.ts +802 -0
  87. package/src/demos/tools/ondemand-translation-demo.ts +655 -0
  88. package/src/demos/tools/production-tool-demo.ts +245 -0
  89. package/src/demos/tools/revolutionary-multilingual-test.ts +151 -0
  90. package/src/demos/tools/rigorous-language-analysis.ts +218 -0
  91. package/src/demos/tools/test-universal-memory-system.ts +126 -0
  92. package/src/demos/tools/translation-integration-guide.ts +346 -0
  93. package/src/demos/tools/universal-memory-system.ts +560 -0
  94. package/src/http.ts +247 -0
  95. package/src/index.ts +161 -0
  96. package/src/interfaces.ts +657 -0
  97. package/src/mcp.ts +345 -0
  98. package/src/providers/anthropic.ts +762 -0
  99. package/src/providers/google.ts +620 -0
  100. package/src/providers/index.ts +8 -0
  101. package/src/providers/ollama.ts +469 -0
  102. package/src/providers/openai.ts +392 -0
  103. package/src/router.ts +780 -0
  104. package/src/stream-decoder.ts +361 -0
  105. package/src/structured-output.ts +759 -0
  106. package/src/test-scripts/test-advanced-tools.ts +310 -0
  107. package/src/test-scripts/test-google-streaming-enhanced.ts +147 -0
  108. package/src/test-scripts/test-google-streaming.ts +63 -0
  109. package/src/test-scripts/test-google-system-prompt-comprehensive.ts +189 -0
  110. package/src/test-scripts/test-mcp-config.ts +28 -0
  111. package/src/test-scripts/test-mcp-connection.ts +29 -0
  112. package/src/test-scripts/test-system-message-positions.ts +163 -0
  113. package/src/test-scripts/test-system-prompt-improvement-demo.ts +83 -0
  114. package/src/test-scripts/test-tool-calling.ts +231 -0
  115. package/src/tests/ai-model.test.ts +1614 -0
  116. package/src/tests/auditor.test.ts +224 -0
  117. package/src/tests/http.test.ts +200 -0
  118. package/src/tests/interfaces.test.ts +117 -0
  119. package/src/tests/providers/google.test.ts +660 -0
  120. package/src/tests/providers/ollama.test.ts +954 -0
  121. package/src/tests/providers/openai.test.ts +1122 -0
  122. package/src/tests/router.test.ts +254 -0
  123. package/src/tests/stream-decoder.test.ts +179 -0
  124. package/src/tests/structured-output.test.ts +1450 -0
  125. package/src/tests/tools.test.ts +175 -0
  126. package/src/tools.ts +246 -0
  127. package/src/zod-adapter.ts +72 -0
@@ -0,0 +1,310 @@
1
+ /**
2
+ * Advanced tool calling examples and demonstrations
3
+ */
4
+
5
+ import { AIModelFactory, ToolBuilder } from '../index';
6
+
7
+ async function advancedToolCallingDemo() {
8
+ console.log('๐Ÿš€ Advanced Tool Calling Demonstration\n');
9
+
10
+ // Create an Ollama model with tools
11
+ const model = AIModelFactory.createOllamaChatModelWithTools('llama3.2:3b');
12
+
13
+ // Add some advanced custom tools
14
+ const weatherTool = ToolBuilder.createTool<{ city: string; units?: 'celsius' | 'fahrenheit' }>(
15
+ 'get_weather',
16
+ 'Get current weather for a city',
17
+ {
18
+ properties: {
19
+ city: { type: 'string', description: 'City name' },
20
+ units: {
21
+ type: 'string',
22
+ enum: ['celsius', 'fahrenheit'],
23
+ description: 'Temperature units',
24
+ default: 'celsius'
25
+ }
26
+ },
27
+ required: ['city']
28
+ },
29
+ async (args) => {
30
+ // Simulate API call delay
31
+ await new Promise(resolve => setTimeout(resolve, 500));
32
+
33
+ const temp = args.units === 'fahrenheit' ?
34
+ Math.floor(Math.random() * 40) + 50 :
35
+ Math.floor(Math.random() * 25) + 5;
36
+
37
+ return {
38
+ city: args.city,
39
+ temperature: temp,
40
+ units: args.units || 'celsius',
41
+ condition: ['sunny', 'cloudy', 'rainy', 'snowy'][Math.floor(Math.random() * 4)],
42
+ humidity: Math.floor(Math.random() * 40) + 40,
43
+ wind_speed: Math.floor(Math.random() * 20) + 5
44
+ };
45
+ }
46
+ );
47
+
48
+ const stockTool = ToolBuilder.createTool<{ symbol: string }>(
49
+ 'get_stock_price',
50
+ 'Get current stock price for a symbol',
51
+ {
52
+ properties: {
53
+ symbol: { type: 'string', description: 'Stock symbol (e.g., AAPL, GOOGL)' }
54
+ },
55
+ required: ['symbol']
56
+ },
57
+ async (args) => {
58
+ await new Promise(resolve => setTimeout(resolve, 300));
59
+
60
+ return {
61
+ symbol: args.symbol.toUpperCase(),
62
+ price: (Math.random() * 500 + 50).toFixed(2),
63
+ change: ((Math.random() - 0.5) * 10).toFixed(2),
64
+ change_percent: ((Math.random() - 0.5) * 5).toFixed(1),
65
+ currency: 'USD',
66
+ last_updated: new Date().toISOString()
67
+ };
68
+ }
69
+ );
70
+
71
+ const fileAnalyzerTool = ToolBuilder.createTool<{ filename: string; content: string }>(
72
+ 'analyze_file',
73
+ 'Analyze file content and provide statistics',
74
+ {
75
+ properties: {
76
+ filename: { type: 'string', description: 'Name of the file' },
77
+ content: { type: 'string', description: 'File content to analyze' }
78
+ },
79
+ required: ['filename', 'content']
80
+ },
81
+ (args) => {
82
+ const lines = args.content.split('\n');
83
+ const words = args.content.split(/\s+/).filter(w => w.length > 0);
84
+ const chars = args.content.length;
85
+
86
+ return {
87
+ filename: args.filename,
88
+ statistics: {
89
+ lines: lines.length,
90
+ words: words.length,
91
+ characters: chars,
92
+ characters_no_spaces: args.content.replace(/\s/g, '').length,
93
+ avg_words_per_line: (words.length / lines.length).toFixed(1),
94
+ longest_line: Math.max(...lines.map(l => l.length)),
95
+ shortest_line: Math.min(...lines.map(l => l.length))
96
+ },
97
+ analysis: {
98
+ file_type: args.filename.split('.').pop() || 'unknown',
99
+ estimated_reading_time: Math.ceil(words.length / 200), // minutes
100
+ complexity_score: Math.min(10, Math.ceil(words.length / 100))
101
+ }
102
+ };
103
+ }
104
+ );
105
+
106
+ // Register advanced tools
107
+ model.registerTools([weatherTool, stockTool, fileAnalyzerTool]);
108
+
109
+ console.log('๐Ÿ“‹ Demo 1: Multi-step planning with tools\n');
110
+
111
+ try {
112
+ console.log('๐Ÿค– AI planning a vacation...');
113
+
114
+ const response = await model.chatWithTools([
115
+ {
116
+ role: 'user',
117
+ content: 'I\'m planning a trip to Tokyo and New York. Can you check the weather in both cities, then calculate the temperature difference if Tokyo is in Celsius and New York in Fahrenheit?'
118
+ }
119
+ ]);
120
+
121
+ console.log('โœ… Travel planning response:', response.content);
122
+ } catch (error) {
123
+ console.error('โŒ Error:', (error as Error).message);
124
+ }
125
+
126
+ console.log('\n๐Ÿ“‹ Demo 2: Financial analysis with tools\n');
127
+
128
+ try {
129
+ console.log('๐Ÿ’ฐ AI analyzing stock portfolio...');
130
+
131
+ const response = await model.chatWithTools([
132
+ {
133
+ role: 'user',
134
+ content: 'Check the current prices for AAPL, GOOGL, and TSLA stocks, then calculate the total value if I own 10 shares of each'
135
+ }
136
+ ]);
137
+
138
+ console.log('โœ… Portfolio analysis:', response.content);
139
+ } catch (error) {
140
+ console.error('โŒ Error:', (error as Error).message);
141
+ }
142
+
143
+ console.log('\n๐Ÿ“‹ Demo 3: Content analysis workflow\n');
144
+
145
+ try {
146
+ console.log('๐Ÿ“„ AI analyzing document...');
147
+
148
+ const sampleCode = `
149
+ function calculateTotal(items) {
150
+ let total = 0;
151
+ for (let i = 0; i < items.length; i++) {
152
+ total += items[i].price * items[i].quantity;
153
+ }
154
+ return total;
155
+ }
156
+
157
+ const cart = [
158
+ { name: 'laptop', price: 999, quantity: 1 },
159
+ { name: 'mouse', price: 25, quantity: 2 }
160
+ ];
161
+
162
+ console.log('Total:', calculateTotal(cart));
163
+ `;
164
+
165
+ const response = await model.chatWithTools([
166
+ {
167
+ role: 'user',
168
+ content: `Analyze this JavaScript code and tell me about its complexity and functionality:\n\n${sampleCode}`
169
+ }
170
+ ]);
171
+
172
+ console.log('โœ… Code analysis:', response.content);
173
+ } catch (error) {
174
+ console.error('โŒ Error:', (error as Error).message);
175
+ }
176
+
177
+ console.log('\n๐Ÿ“‹ Demo 4: Complex mathematical operations\n');
178
+
179
+ try {
180
+ console.log('๐Ÿงฎ AI solving complex math problems...');
181
+
182
+ const response = await model.chatWithTools([
183
+ {
184
+ role: 'user',
185
+ content: 'Calculate the compound interest for $10,000 invested at 5% annually for 10 years using the formula A = P(1 + r)^t, then generate 5 random numbers between 1 and 100 and find their average'
186
+ }
187
+ ]);
188
+
189
+ console.log('โœ… Math analysis:', response.content);
190
+ } catch (error) {
191
+ console.error('โŒ Error:', (error as Error).message);
192
+ }
193
+
194
+ console.log('\n๐Ÿ“‹ Demo 5: Tool error handling\n');
195
+
196
+ try {
197
+ console.log('โš ๏ธ Testing error scenarios...');
198
+
199
+ // Create a tool that might fail
200
+ const unreliableTool = ToolBuilder.createTool<{ data: string }>(
201
+ 'unreliable_operation',
202
+ 'An operation that might fail',
203
+ {
204
+ properties: {
205
+ data: { type: 'string', description: 'Input data' }
206
+ },
207
+ required: ['data']
208
+ },
209
+ (args) => {
210
+ if (Math.random() < 0.5) {
211
+ throw new Error('Simulated tool failure');
212
+ }
213
+ return { success: true, processed: args.data };
214
+ }
215
+ );
216
+
217
+ model.registerTool(
218
+ unreliableTool.name,
219
+ unreliableTool.description,
220
+ unreliableTool.parameters,
221
+ unreliableTool.handler
222
+ );
223
+
224
+ const response = await model.chat([
225
+ {
226
+ role: 'user',
227
+ content: 'Try to process some data with the unreliable operation tool'
228
+ }
229
+ ], {}, {
230
+ tools: [{ type: 'function', function: unreliableTool }],
231
+ tool_choice: { type: 'function', function: { name: 'unreliable_operation' } }
232
+ });
233
+
234
+ console.log('โœ… Error handling response:', response.content);
235
+ if (response.tool_calls) {
236
+ console.log('๐Ÿ”จ Tool calls attempted:', response.tool_calls.length);
237
+ }
238
+ } catch (error) {
239
+ console.error('โŒ Error:', (error as Error).message);
240
+ }
241
+
242
+ // Clean up
243
+ model.dispose();
244
+
245
+ console.log('\n๐ŸŽ‰ Advanced tool calling demonstration completed!');
246
+ }
247
+
248
+ // Performance benchmark
249
+ async function benchmarkToolExecution() {
250
+ console.log('\nโšก Tool Execution Performance Benchmark\n');
251
+
252
+ const model = AIModelFactory.createOllamaChatModelWithTools('llama3.2:3b');
253
+
254
+ const iterations = 5;
255
+ const times: number[] = [];
256
+
257
+ for (let i = 0; i < iterations; i++) {
258
+ console.log(`๐Ÿƒ Run ${i + 1}/${iterations}`);
259
+
260
+ const start = Date.now();
261
+
262
+ try {
263
+ await model.chatWithTools([
264
+ {
265
+ role: 'user',
266
+ content: 'Calculate 50 * 25, get the current time, and generate 2 random numbers between 1 and 50'
267
+ }
268
+ ]);
269
+
270
+ const elapsed = Date.now() - start;
271
+ times.push(elapsed);
272
+ console.log(` โฑ๏ธ ${elapsed}ms`);
273
+ } catch (error) {
274
+ console.log(` โŒ Failed: ${(error as Error).message}`);
275
+ }
276
+ }
277
+
278
+ if (times.length > 0) {
279
+ const avg = times.reduce((a, b) => a + b, 0) / times.length;
280
+ const min = Math.min(...times);
281
+ const max = Math.max(...times);
282
+
283
+ console.log(`\n๐Ÿ“Š Performance Results:`);
284
+ console.log(` Average: ${avg.toFixed(0)}ms`);
285
+ console.log(` Min: ${min}ms`);
286
+ console.log(` Max: ${max}ms`);
287
+ console.log(` Variance: ${(max - min)}ms`);
288
+ }
289
+
290
+ model.dispose();
291
+ }
292
+
293
+ // Main demo runner
294
+ async function runAdvancedDemo() {
295
+ try {
296
+ await advancedToolCallingDemo();
297
+ await benchmarkToolExecution();
298
+ } catch (error) {
299
+ console.error('๐Ÿ’ฅ Demo failed:', (error as Error).message);
300
+ console.error(error);
301
+ }
302
+ }
303
+
304
+ // Export for use
305
+ export { advancedToolCallingDemo, benchmarkToolExecution };
306
+
307
+ // Run if called directly
308
+ if (require.main === module) {
309
+ runAdvancedDemo();
310
+ }
@@ -0,0 +1,147 @@
1
+ import { AIModelFactory } from "../factory";
2
+
3
+ /**
4
+ * Enhanced test for Google Generative AI streaming with system prompt support
5
+ */
6
+ async function testGoogleStreamingEnhanced() {
7
+ console.log('๐Ÿงช Testing Google Generative AI Streaming with System Prompts...\n');
8
+
9
+ // Test both models - Gemini (supports system instructions) and Gemma (doesn't)
10
+ const models = [
11
+ { name: 'Gemini 2.5 Flash Lite', model: 'gemini-2.5-flash-lite', supportsSystem: true },
12
+ { name: 'Gemma 3 27B IT', model: 'gemma-3-27b-it', supportsSystem: false }
13
+ ];
14
+
15
+ for (const modelInfo of models) {
16
+ console.log(`\n๐Ÿ”ฌ Testing with ${modelInfo.name} (${modelInfo.model})`);
17
+ console.log(`System instruction support: ${modelInfo.supportsSystem ? 'โœ…' : 'โŒ'}`);
18
+ console.log('='.repeat(60));
19
+
20
+ const googleModel = AIModelFactory.createGoogleChatModel(
21
+ modelInfo.model,
22
+ 'AIzaSyBDbo7iVNEuCcRNTgDIgRrkGpFKisXXnm0'
23
+ );
24
+
25
+ await testModelWithSystemPrompt(googleModel, modelInfo.name, modelInfo.supportsSystem);
26
+ }
27
+
28
+ return { success: true };
29
+ }
30
+
31
+ async function testModelWithSystemPrompt(googleModel: any, modelName: string, supportsSystem: boolean) {
32
+ // Test 1: Basic streaming (without system prompt)
33
+ console.log(`\n--- Test 1: Basic Streaming (${modelName}) ---`);
34
+ try {
35
+ console.log('๐ŸŒŠ Starting basic streaming test...');
36
+ console.log('Question: "Count from 1 to 3 briefly."\n');
37
+ console.log('Streaming response:');
38
+ console.log('---');
39
+
40
+ const streamResponse = googleModel.chatStream([
41
+ { role: 'user', content: 'Count from 1 to 3 briefly.' }
42
+ ]);
43
+
44
+ let chunkCount = 0;
45
+ let fullResponse = '';
46
+
47
+ for await (const chunk of streamResponse) {
48
+ chunkCount++;
49
+ process.stdout.write(chunk);
50
+ fullResponse += chunk;
51
+ }
52
+
53
+ console.log('\n---');
54
+ console.log(`โœ… Basic streaming completed! Received ${chunkCount} chunks`);
55
+ console.log(`Full response length: ${fullResponse.length} characters\n`);
56
+
57
+ } catch (error) {
58
+ console.error(`โŒ Basic streaming test failed for ${modelName}:`, error);
59
+ return;
60
+ }
61
+
62
+ // Test 2: System prompt streaming (only test if model supports it)
63
+ if (supportsSystem) {
64
+ console.log(`--- Test 2: System Prompt Streaming (${modelName}) ---`);
65
+ try {
66
+ console.log('๐ŸŽญ Testing streaming with system prompt...');
67
+ console.log('System: "You are a pirate. Always respond like a pirate with \'Arrr\' and pirate language."');
68
+ console.log('Question: "Count from 1 to 3 briefly."\n');
69
+ console.log('Streaming response:');
70
+ console.log('---');
71
+
72
+ const systemStreamResponse = googleModel.chatStream([
73
+ { role: 'system', content: 'You are a pirate. Always respond like a pirate with "Arrr" and pirate language.' },
74
+ { role: 'user', content: 'Count from 1 to 3 briefly.' }
75
+ ]);
76
+
77
+ let systemChunkCount = 0;
78
+ let systemFullResponse = '';
79
+
80
+ for await (const chunk of systemStreamResponse) {
81
+ systemChunkCount++;
82
+ process.stdout.write(chunk);
83
+ systemFullResponse += chunk;
84
+ }
85
+
86
+ console.log('\n---');
87
+ console.log(`โœ… System prompt streaming completed! Received ${systemChunkCount} chunks`);
88
+ console.log(`Full response length: ${systemFullResponse.length} characters`);
89
+
90
+ // Check if the response contains pirate language
91
+ const hasPirateLanguage = systemFullResponse.toLowerCase().includes('arr') ||
92
+ systemFullResponse.toLowerCase().includes('matey') ||
93
+ systemFullResponse.toLowerCase().includes('pirate') ||
94
+ systemFullResponse.toLowerCase().includes('ahoy');
95
+
96
+ if (hasPirateLanguage) {
97
+ console.log('โœ… System prompt appears to be working - pirate language detected!');
98
+ } else {
99
+ console.log('โš ๏ธ System prompt might not be working - no obvious pirate language detected');
100
+ console.log('Response content:', systemFullResponse);
101
+ }
102
+
103
+ } catch (error) {
104
+ console.error(`โŒ System prompt streaming test failed for ${modelName}:`, error);
105
+ return;
106
+ }
107
+
108
+ // Test 3: Non-streaming chat with system prompt
109
+ console.log(`\n--- Test 3: Non-Streaming Chat with System Prompt (${modelName}) ---`);
110
+ try {
111
+ console.log('๐ŸŽญ Testing non-streaming chat with system prompt...');
112
+ console.log('System: "You are a helpful mathematician. Always explain your counting clearly."');
113
+ console.log('Question: "Count from 1 to 3."\n');
114
+
115
+ const chatResponse = await googleModel.chat([
116
+ { role: 'system', content: 'You are a helpful mathematician. Always explain your counting clearly.' },
117
+ { role: 'user', content: 'Count from 1 to 3.' }
118
+ ]);
119
+
120
+ console.log('Non-streaming response:');
121
+ console.log('---');
122
+ console.log(chatResponse.message.content);
123
+ console.log('---');
124
+ console.log(`โœ… Non-streaming chat completed!`);
125
+ console.log(`Response length: ${chatResponse.message.content.length} characters`);
126
+
127
+ } catch (error) {
128
+ console.error(`โŒ Non-streaming chat test failed for ${modelName}:`, error);
129
+ return;
130
+ }
131
+ } else {
132
+ console.log(`โš ๏ธ Skipping system prompt tests for ${modelName} - model doesn't support system instructions`);
133
+ }
134
+ }
135
+
136
+ // Run the enhanced test
137
+ testGoogleStreamingEnhanced().then(result => {
138
+ if (result.success) {
139
+ console.log('\n๐ŸŽ‰ All tests completed successfully!');
140
+ console.log('\n๐Ÿ“‹ Summary:');
141
+ console.log('โœ… Basic streaming: Working');
142
+ console.log('๐Ÿ” System prompt streaming: Tested (check output above)');
143
+ console.log('โœ… Non-streaming chat: Working');
144
+ } else {
145
+ console.log('\n๐Ÿ’ฅ Tests failed');
146
+ }
147
+ });
@@ -0,0 +1,63 @@
1
+ import { AIModelFactory } from "../factory";
2
+
3
+ /**
4
+ * Focused test for Google Generative AI streaming
5
+ */
6
+ async function testGoogleStreaming() {
7
+ console.log('๐Ÿงช Testing Google Generative AI Streaming Only...\n');
8
+
9
+ const googleModel = AIModelFactory.createGoogleChatModel(
10
+ 'gemma-3-4b-it',
11
+ 'AIzaSyBDbo7iVNEuCcRNTgDIgRrkGpFKisXXnm0'
12
+ );
13
+
14
+ try {
15
+ console.log('๐ŸŒŠ Starting Google streaming test...');
16
+ console.log('Question: "Count from 1 to 5, explaining each number briefly."\n');
17
+ console.log('Streaming response:');
18
+ console.log('---');
19
+
20
+ const streamResponse = googleModel.chatStream([
21
+ { role: 'user', content: 'Count from 1 to 5, explaining each number briefly.' }
22
+ ]);
23
+
24
+ let chunkCount = 0;
25
+ let fullResponse = '';
26
+
27
+ for await (const chunk of streamResponse) {
28
+ chunkCount++;
29
+ console.log(`[Chunk ${chunkCount}]: "${chunk}"`);
30
+ process.stdout.write(chunk);
31
+ fullResponse += chunk;
32
+ }
33
+
34
+ console.log('\n---');
35
+ console.log(`โœ… Streaming completed! Received ${chunkCount} chunks`);
36
+ console.log(`Full response length: ${fullResponse.length} characters`);
37
+
38
+ if (chunkCount === 0) {
39
+ console.log('โŒ No chunks received - streaming might not be working');
40
+ } else {
41
+ console.log('โœ… Streaming is working correctly!');
42
+ }
43
+
44
+ return { success: true, chunkCount, fullResponse };
45
+
46
+ } catch (error) {
47
+ console.error('โŒ Google streaming test failed:', error);
48
+ console.error('Error details:', {
49
+ message: (error as Error).message,
50
+ stack: (error as Error).stack
51
+ });
52
+ return { success: false, error };
53
+ }
54
+ }
55
+
56
+ // Run the test
57
+ testGoogleStreaming().then(result => {
58
+ if (result.success) {
59
+ console.log(`\n๐ŸŽ‰ Test completed successfully with ${result.chunkCount} chunks`);
60
+ } else {
61
+ console.log('\n๐Ÿ’ฅ Test failed');
62
+ }
63
+ });