sambanova 1.0.6 → 1.0.7
This diff represents the content of publicly available package versions that have been released to one of the supported registries. The information contained in this diff is provided for informational purposes only and reflects changes between package versions as they appear in their respective public registries.
- package/README.md +121 -0
- package/package.json +1 -1
package/README.md
CHANGED
|
@@ -240,6 +240,127 @@ const client = new SambanovaClient('YOUR_API_KEY', {
|
|
|
240
240
|
|
|
241
241
|
---
|
|
242
242
|
|
|
243
|
+
---
|
|
244
|
+
|
|
245
|
+
## Oneshot Example To Test All Features
|
|
246
|
+
|
|
247
|
+
### Note:
|
|
248
|
+
|
|
249
|
+
Add your Sambanova API key and add Local image file path. A test Online image file URL is already in place but you can replace as needed.
|
|
250
|
+
|
|
251
|
+
```javascript
|
|
252
|
+
import { SambanovaClient, ChatMessage, ModelType } from 'sambanova';
|
|
253
|
+
|
|
254
|
+
import path from 'path';
|
|
255
|
+
|
|
256
|
+
// Replace with your actual API key
|
|
257
|
+
const API_KEY = 'Your API Key';
|
|
258
|
+
|
|
259
|
+
async function testSambanova() {
|
|
260
|
+
try {
|
|
261
|
+
// Initialize the Sambanova client
|
|
262
|
+
const client = new SambanovaClient(API_KEY);
|
|
263
|
+
|
|
264
|
+
// Test 1: Text completion
|
|
265
|
+
console.log('Starting text completion test...');
|
|
266
|
+
const textMessages: ChatMessage[] = [
|
|
267
|
+
{ role: 'user', content: 'What is the capital of France?' }
|
|
268
|
+
];
|
|
269
|
+
|
|
270
|
+
const textResponse = await client.chat(textMessages, {
|
|
271
|
+
model: 'Meta-Llama-3.2-3B-Instruct' as ModelType
|
|
272
|
+
});
|
|
273
|
+
|
|
274
|
+
console.log('Text Completion Response:');
|
|
275
|
+
console.log(textResponse.choices[0].message.content);
|
|
276
|
+
|
|
277
|
+
// Test 2: Vision analysis with an online image URL
|
|
278
|
+
console.log('Starting vision analysis test with an online image URL...');
|
|
279
|
+
const onlineImageMessages: ChatMessage[] = [
|
|
280
|
+
{
|
|
281
|
+
role: 'user',
|
|
282
|
+
content: [
|
|
283
|
+
{ type: 'text', text: 'What is in this image?' },
|
|
284
|
+
{
|
|
285
|
+
type: 'image_url',
|
|
286
|
+
image_url: { url: 'https://picsum.photos/200/300' } // Online image URL
|
|
287
|
+
}
|
|
288
|
+
]
|
|
289
|
+
}
|
|
290
|
+
];
|
|
291
|
+
|
|
292
|
+
const onlineImageResponse = await client.chat(onlineImageMessages, {
|
|
293
|
+
model: 'Llama-3.2-11B-Vision-Instruct' as ModelType
|
|
294
|
+
});
|
|
295
|
+
|
|
296
|
+
console.log('Vision Analysis (Online Image) Response:');
|
|
297
|
+
console.log(onlineImageResponse.choices[0].message.content);
|
|
298
|
+
|
|
299
|
+
//Test 3: Vision analysis with a local image file
|
|
300
|
+
console.log('Starting vision analysis test with a local image file...');
|
|
301
|
+
const localImagePath = path.resolve(__dirname, 'image.jpg'); // Replace with your local image path
|
|
302
|
+
const localImageMessages: ChatMessage[] = [
|
|
303
|
+
{
|
|
304
|
+
role: 'user',
|
|
305
|
+
content: [
|
|
306
|
+
{ type: 'text', text: 'What is in this image?' },
|
|
307
|
+
{
|
|
308
|
+
type: 'image_url',
|
|
309
|
+
image_url: { url: localImagePath } // Local image file
|
|
310
|
+
}
|
|
311
|
+
]
|
|
312
|
+
}
|
|
313
|
+
];
|
|
314
|
+
|
|
315
|
+
const localImageResponse = await client.chat(localImageMessages, {
|
|
316
|
+
model: 'Llama-3.2-11B-Vision-Instruct' as ModelType
|
|
317
|
+
});
|
|
318
|
+
|
|
319
|
+
console.log('Vision Analysis (Local Image) Response:');
|
|
320
|
+
console.log(localImageResponse.choices[0].message.content);
|
|
321
|
+
|
|
322
|
+
// Test 4: Stream chat
|
|
323
|
+
console.log('Starting text stream...');
|
|
324
|
+
const textMessages2: ChatMessage[] = [
|
|
325
|
+
{ role: 'system', content: 'You are a helpful assistant.' },
|
|
326
|
+
{ role: 'user', content: 'Tell me a story about a brave knight.' }
|
|
327
|
+
];
|
|
328
|
+
|
|
329
|
+
console.log('Streamed Text Response:');
|
|
330
|
+
for await (const chunk of client.streamChat(textMessages2, {
|
|
331
|
+
model: 'Meta-Llama-3.1-8B-Instruct' as ModelType
|
|
332
|
+
})) {
|
|
333
|
+
process.stdout.write(chunk);
|
|
334
|
+
}
|
|
335
|
+
|
|
336
|
+
console.log('\nStarting image stream...');
|
|
337
|
+
const imageMessages: ChatMessage[] = [
|
|
338
|
+
{
|
|
339
|
+
role: 'user',
|
|
340
|
+
content: [
|
|
341
|
+
{ type: 'text', text: 'What do you see in this image?' },
|
|
342
|
+
{ type: 'image_url', image_url: { url: 'https://picsum.photos/200/300' } }
|
|
343
|
+
]
|
|
344
|
+
}
|
|
345
|
+
];
|
|
346
|
+
|
|
347
|
+
console.log('Streamed Image Response:');
|
|
348
|
+
for await (const chunk of client.streamChat(imageMessages, {
|
|
349
|
+
model: 'Llama-3.2-11B-Vision-Instruct' as ModelType
|
|
350
|
+
})) {
|
|
351
|
+
process.stdout.write(chunk);
|
|
352
|
+
}
|
|
353
|
+
} catch (error) {
|
|
354
|
+
console.error('Error:', error);
|
|
355
|
+
}
|
|
356
|
+
}
|
|
357
|
+
|
|
358
|
+
// Run the test
|
|
359
|
+
testSambanova();
|
|
360
|
+
```
|
|
361
|
+
|
|
362
|
+
---
|
|
363
|
+
|
|
243
364
|
## Changelog
|
|
244
365
|
|
|
245
366
|
### v1.0.5
|