viho-llm 0.1.7 → 0.1.9
This diff represents the content of publicly available package versions that have been released to one of the supported registries. The information contained in this diff is provided for informational purposes only and reflects changes between package versions as they appear in their respective public registries.
- package/README.md +64 -30
- package/index.js +12 -81
- package/package.json +2 -2
- package/src/models/openai-util.js +8 -62
- package/src/models/openai.js +4 -19
package/README.md
CHANGED
|
@@ -99,15 +99,16 @@ import { OpenAIAPI } from 'viho-llm';
|
|
|
99
99
|
const openai = OpenAIAPI({
|
|
100
100
|
apiKey: 'your-openai-api-key',
|
|
101
101
|
baseURL: 'https://api.openai.com/v1', // or your custom endpoint
|
|
102
|
-
modelID: 'gpt-4o',
|
|
103
|
-
modelThinking: 'enabled', // 'enabled' or 'disabled' for reasoning models
|
|
104
102
|
});
|
|
105
103
|
|
|
106
|
-
// Send a chat message
|
|
107
|
-
const response = await openai.chat(
|
|
108
|
-
|
|
109
|
-
|
|
110
|
-
|
|
104
|
+
// Send a chat message (using native OpenAI API format)
|
|
105
|
+
const response = await openai.chat({
|
|
106
|
+
model: 'gpt-4o',
|
|
107
|
+
messages: [
|
|
108
|
+
{ role: 'system', content: 'You are a helpful assistant.' },
|
|
109
|
+
{ role: 'user', content: 'Hello, how are you?' },
|
|
110
|
+
],
|
|
111
|
+
});
|
|
111
112
|
|
|
112
113
|
console.log(response);
|
|
113
114
|
```
|
|
@@ -156,8 +157,16 @@ OpenAI streaming supports thinking/reasoning content for compatible models:
|
|
|
156
157
|
```javascript
|
|
157
158
|
// Send a chat message with streaming (supports thinking mode)
|
|
158
159
|
await openai.chatWithStreaming(
|
|
159
|
-
|
|
160
|
-
|
|
160
|
+
{
|
|
161
|
+
model: 'deepseek-reasoner',
|
|
162
|
+
messages: [
|
|
163
|
+
{ role: 'system', content: 'You are a helpful assistant.' },
|
|
164
|
+
{ role: 'user', content: 'Explain how neural networks work' },
|
|
165
|
+
],
|
|
166
|
+
thinking: {
|
|
167
|
+
type: 'enabled', // Enable reasoning mode
|
|
168
|
+
},
|
|
169
|
+
},
|
|
161
170
|
{
|
|
162
171
|
beginCallback: () => {
|
|
163
172
|
console.log('Stream started...');
|
|
@@ -413,21 +422,25 @@ Creates a new OpenAI client instance supporting OpenAI and compatible APIs.
|
|
|
413
422
|
- `options` (Object) - Configuration options
|
|
414
423
|
- `apiKey` (string) **required** - Your OpenAI API key or compatible service key
|
|
415
424
|
- `baseURL` (string) **required** - API base URL (e.g., 'https://api.openai.com/v1')
|
|
416
|
-
- `modelID` (string) **required** - Model identifier (e.g., 'gpt-4o', 'deepseek-reasoner')
|
|
417
|
-
- `modelThinking` (string) **required** - Thinking mode: 'enabled' or 'disabled'
|
|
418
425
|
|
|
419
426
|
#### Returns
|
|
420
427
|
|
|
421
428
|
Returns an OpenAI client object with the following methods:
|
|
422
429
|
|
|
423
|
-
##### `client.chat(
|
|
430
|
+
##### `client.chat(chatOptions)`
|
|
424
431
|
|
|
425
432
|
Sends a chat request to the OpenAI API.
|
|
426
433
|
|
|
427
434
|
**Parameters:**
|
|
428
435
|
|
|
429
|
-
- `
|
|
430
|
-
- `
|
|
436
|
+
- `chatOptions` (Object) **required** - Native OpenAI API chat completion options
|
|
437
|
+
- `model` (string) **required** - Model identifier (e.g., 'gpt-4o', 'deepseek-reasoner')
|
|
438
|
+
- `messages` (Array) **required** - Array of message objects
|
|
439
|
+
- `role` (string) - 'system', 'user', or 'assistant'
|
|
440
|
+
- `content` (string) - Message content
|
|
441
|
+
- `thinking` (Object) - Optional thinking/reasoning configuration
|
|
442
|
+
- `type` (string) - 'enabled' or 'disabled'
|
|
443
|
+
- ...other OpenAI API parameters
|
|
431
444
|
|
|
432
445
|
**Returns:**
|
|
433
446
|
|
|
@@ -436,21 +449,30 @@ Sends a chat request to the OpenAI API.
|
|
|
436
449
|
**Example:**
|
|
437
450
|
|
|
438
451
|
```javascript
|
|
439
|
-
const response = await openai.chat(
|
|
440
|
-
|
|
441
|
-
|
|
442
|
-
|
|
452
|
+
const response = await openai.chat({
|
|
453
|
+
model: 'gpt-4o',
|
|
454
|
+
messages: [
|
|
455
|
+
{ role: 'system', content: 'You are a helpful coding assistant.' },
|
|
456
|
+
{ role: 'user', content: 'Write a Python function to reverse a string' },
|
|
457
|
+
],
|
|
458
|
+
});
|
|
443
459
|
console.log(response.content);
|
|
444
460
|
```
|
|
445
461
|
|
|
446
|
-
##### `client.chatWithStreaming(
|
|
462
|
+
##### `client.chatWithStreaming(chatOptions, callbackOptions)`
|
|
447
463
|
|
|
448
464
|
Sends a chat request to the OpenAI API with streaming response and thinking support.
|
|
449
465
|
|
|
450
466
|
**Parameters:**
|
|
451
467
|
|
|
452
|
-
- `
|
|
453
|
-
- `
|
|
468
|
+
- `chatOptions` (Object) **required** - Native OpenAI API chat completion options
|
|
469
|
+
- `model` (string) **required** - Model identifier (e.g., 'gpt-4o', 'deepseek-reasoner')
|
|
470
|
+
- `messages` (Array) **required** - Array of message objects
|
|
471
|
+
- `role` (string) - 'system', 'user', or 'assistant'
|
|
472
|
+
- `content` (string) - Message content
|
|
473
|
+
- `thinking` (Object) - Optional thinking/reasoning configuration
|
|
474
|
+
- `type` (string) - 'enabled' or 'disabled'
|
|
475
|
+
- ...other OpenAI API parameters (note: `stream` will be automatically set to `true`)
|
|
454
476
|
|
|
455
477
|
- `callbackOptions` (Object) **required** - Callback functions for handling stream events
|
|
456
478
|
- `beginCallback` (Function) - Called when the stream begins
|
|
@@ -471,17 +493,29 @@ Sends a chat request to the OpenAI API with streaming response and thinking supp
|
|
|
471
493
|
**Example:**
|
|
472
494
|
|
|
473
495
|
```javascript
|
|
474
|
-
await openai.chatWithStreaming(
|
|
475
|
-
|
|
476
|
-
|
|
477
|
-
|
|
478
|
-
|
|
479
|
-
|
|
496
|
+
await openai.chatWithStreaming(
|
|
497
|
+
{
|
|
498
|
+
model: 'deepseek-reasoner',
|
|
499
|
+
messages: [
|
|
500
|
+
{ role: 'system', content: 'You are a math tutor.' },
|
|
501
|
+
{ role: 'user', content: 'Solve: What is 15% of 240?' },
|
|
502
|
+
],
|
|
503
|
+
thinking: {
|
|
504
|
+
type: 'enabled',
|
|
505
|
+
},
|
|
480
506
|
},
|
|
481
|
-
|
|
482
|
-
|
|
507
|
+
{
|
|
508
|
+
thinkingCallback: (thinking) => {
|
|
509
|
+
console.log('Thinking:', thinking);
|
|
510
|
+
},
|
|
511
|
+
contentCallback: (chunk) => {
|
|
512
|
+
process.stdout.write(chunk);
|
|
513
|
+
},
|
|
514
|
+
endCallback: () => {
|
|
515
|
+
console.log('\nDone!');
|
|
516
|
+
},
|
|
483
517
|
},
|
|
484
|
-
|
|
518
|
+
);
|
|
485
519
|
```
|
|
486
520
|
|
|
487
521
|
## License
|
package/index.js
CHANGED
|
@@ -370,13 +370,10 @@ const logger$1 = qiao_log_js.Logger('openai-util.js');
|
|
|
370
370
|
/**
|
|
371
371
|
* chat
|
|
372
372
|
* @param {*} client
|
|
373
|
-
* @param {*}
|
|
374
|
-
* @param {*} modelThinking
|
|
375
|
-
* @param {*} systemPrompt
|
|
376
|
-
* @param {*} userPrompt
|
|
373
|
+
* @param {*} chatOptions
|
|
377
374
|
* @returns
|
|
378
375
|
*/
|
|
379
|
-
const chat = async (client,
|
|
376
|
+
const chat = async (client, chatOptions) => {
|
|
380
377
|
const methodName = 'chat';
|
|
381
378
|
|
|
382
379
|
// check
|
|
@@ -384,35 +381,11 @@ const chat = async (client, modelID, modelThinking, systemPrompt, userPrompt) =>
|
|
|
384
381
|
logger$1.error(methodName, 'need client');
|
|
385
382
|
return;
|
|
386
383
|
}
|
|
387
|
-
if (!
|
|
388
|
-
logger$1.error(methodName, 'need
|
|
389
|
-
return;
|
|
390
|
-
}
|
|
391
|
-
if (!modelThinking) {
|
|
392
|
-
logger$1.error(methodName, 'need modelThinking');
|
|
393
|
-
return;
|
|
394
|
-
}
|
|
395
|
-
if (!systemPrompt) {
|
|
396
|
-
logger$1.error(methodName, 'need systemPrompt');
|
|
397
|
-
return;
|
|
398
|
-
}
|
|
399
|
-
if (!userPrompt) {
|
|
400
|
-
logger$1.error(methodName, 'need userPrompt');
|
|
384
|
+
if (!chatOptions) {
|
|
385
|
+
logger$1.error(methodName, 'need chatOptions');
|
|
401
386
|
return;
|
|
402
387
|
}
|
|
403
388
|
|
|
404
|
-
// chat
|
|
405
|
-
const chatOptions = {
|
|
406
|
-
model: modelID,
|
|
407
|
-
messages: [
|
|
408
|
-
{ role: 'system', content: systemPrompt },
|
|
409
|
-
{ role: 'user', content: userPrompt },
|
|
410
|
-
],
|
|
411
|
-
thinking: {
|
|
412
|
-
type: modelThinking,
|
|
413
|
-
},
|
|
414
|
-
};
|
|
415
|
-
|
|
416
389
|
// go
|
|
417
390
|
try {
|
|
418
391
|
const completion = await client.chat.completions.create(chatOptions);
|
|
@@ -425,14 +398,11 @@ const chat = async (client, modelID, modelThinking, systemPrompt, userPrompt) =>
|
|
|
425
398
|
/**
|
|
426
399
|
* chatWithStreaming
|
|
427
400
|
* @param {*} client
|
|
428
|
-
* @param {*}
|
|
429
|
-
* @param {*} modelThinking
|
|
430
|
-
* @param {*} systemPrompt
|
|
431
|
-
* @param {*} userPrompt
|
|
401
|
+
* @param {*} chatOptions
|
|
432
402
|
* @param {*} callbackOptions
|
|
433
403
|
* @returns
|
|
434
404
|
*/
|
|
435
|
-
const chatWithStreaming = async (client,
|
|
405
|
+
const chatWithStreaming = async (client, chatOptions, callbackOptions) => {
|
|
436
406
|
const methodName = 'chatWithStreaming';
|
|
437
407
|
|
|
438
408
|
// check
|
|
@@ -440,20 +410,8 @@ const chatWithStreaming = async (client, modelID, modelThinking, systemPrompt, u
|
|
|
440
410
|
logger$1.error(methodName, 'need client');
|
|
441
411
|
return;
|
|
442
412
|
}
|
|
443
|
-
if (!
|
|
444
|
-
logger$1.error(methodName, 'need
|
|
445
|
-
return;
|
|
446
|
-
}
|
|
447
|
-
if (!modelThinking) {
|
|
448
|
-
logger$1.error(methodName, 'need modelThinking');
|
|
449
|
-
return;
|
|
450
|
-
}
|
|
451
|
-
if (!systemPrompt) {
|
|
452
|
-
logger$1.error(methodName, 'need systemPrompt');
|
|
453
|
-
return;
|
|
454
|
-
}
|
|
455
|
-
if (!userPrompt) {
|
|
456
|
-
logger$1.error(methodName, 'need userPrompt');
|
|
413
|
+
if (!chatOptions) {
|
|
414
|
+
logger$1.error(methodName, 'need chatOptions');
|
|
457
415
|
return;
|
|
458
416
|
}
|
|
459
417
|
if (!callbackOptions) {
|
|
@@ -470,18 +428,6 @@ const chatWithStreaming = async (client, modelID, modelThinking, systemPrompt, u
|
|
|
470
428
|
const contentCallback = callbackOptions.contentCallback;
|
|
471
429
|
const firstContentCallback = callbackOptions.firstContentCallback;
|
|
472
430
|
|
|
473
|
-
// chat
|
|
474
|
-
const chatOptions = {
|
|
475
|
-
model: modelID,
|
|
476
|
-
messages: [
|
|
477
|
-
{ role: 'system', content: systemPrompt },
|
|
478
|
-
{ role: 'user', content: userPrompt },
|
|
479
|
-
],
|
|
480
|
-
thinking: {
|
|
481
|
-
type: modelThinking,
|
|
482
|
-
},
|
|
483
|
-
};
|
|
484
|
-
|
|
485
431
|
// go
|
|
486
432
|
try {
|
|
487
433
|
chatOptions.stream = true;
|
|
@@ -546,14 +492,6 @@ const OpenAIAPI = (options) => {
|
|
|
546
492
|
logger.error(methodName, 'need options.baseURL');
|
|
547
493
|
return;
|
|
548
494
|
}
|
|
549
|
-
if (!options.modelID) {
|
|
550
|
-
logger.error(methodName, 'need options.modelID');
|
|
551
|
-
return;
|
|
552
|
-
}
|
|
553
|
-
if (!options.modelThinking) {
|
|
554
|
-
logger.error(methodName, 'need options.modelThinking');
|
|
555
|
-
return;
|
|
556
|
-
}
|
|
557
495
|
|
|
558
496
|
// openai
|
|
559
497
|
const openai = {};
|
|
@@ -563,20 +501,13 @@ const OpenAIAPI = (options) => {
|
|
|
563
501
|
});
|
|
564
502
|
|
|
565
503
|
// chat
|
|
566
|
-
openai.chat = async (
|
|
567
|
-
return await chat(openai.client,
|
|
504
|
+
openai.chat = async (chatOptions) => {
|
|
505
|
+
return await chat(openai.client, chatOptions);
|
|
568
506
|
};
|
|
569
507
|
|
|
570
508
|
// chat with streaming
|
|
571
|
-
openai.chatWithStreaming = async (
|
|
572
|
-
return await chatWithStreaming(
|
|
573
|
-
openai.client,
|
|
574
|
-
options.modelID,
|
|
575
|
-
options.modelThinking,
|
|
576
|
-
systemPrompt,
|
|
577
|
-
userPrompt,
|
|
578
|
-
callbakOptions,
|
|
579
|
-
);
|
|
509
|
+
openai.chatWithStreaming = async (chatOptions, callbakOptions) => {
|
|
510
|
+
return await chatWithStreaming(openai.client, chatOptions, callbakOptions);
|
|
580
511
|
};
|
|
581
512
|
|
|
582
513
|
//
|
package/package.json
CHANGED
|
@@ -1,6 +1,6 @@
|
|
|
1
1
|
{
|
|
2
2
|
"name": "viho-llm",
|
|
3
|
-
"version": "0.1.
|
|
3
|
+
"version": "0.1.9",
|
|
4
4
|
"description": "Utility library for working with multiple LLM providers (Google Gemini and OpenAI), providing common tools and helpers for AI interactions",
|
|
5
5
|
"keywords": [
|
|
6
6
|
"llm",
|
|
@@ -68,5 +68,5 @@
|
|
|
68
68
|
}
|
|
69
69
|
}
|
|
70
70
|
},
|
|
71
|
-
"gitHead": "
|
|
71
|
+
"gitHead": "c0e05b33cb09f03f3ae41f85e81534fd16b1a5bf"
|
|
72
72
|
}
|
|
@@ -5,13 +5,10 @@ const logger = Logger('openai-util.js');
|
|
|
5
5
|
/**
|
|
6
6
|
* chat
|
|
7
7
|
* @param {*} client
|
|
8
|
-
* @param {*}
|
|
9
|
-
* @param {*} modelThinking
|
|
10
|
-
* @param {*} systemPrompt
|
|
11
|
-
* @param {*} userPrompt
|
|
8
|
+
* @param {*} chatOptions
|
|
12
9
|
* @returns
|
|
13
10
|
*/
|
|
14
|
-
export const chat = async (client,
|
|
11
|
+
export const chat = async (client, chatOptions) => {
|
|
15
12
|
const methodName = 'chat';
|
|
16
13
|
|
|
17
14
|
// check
|
|
@@ -19,34 +16,10 @@ export const chat = async (client, modelID, modelThinking, systemPrompt, userPro
|
|
|
19
16
|
logger.error(methodName, 'need client');
|
|
20
17
|
return;
|
|
21
18
|
}
|
|
22
|
-
if (!
|
|
23
|
-
logger.error(methodName, 'need
|
|
19
|
+
if (!chatOptions) {
|
|
20
|
+
logger.error(methodName, 'need chatOptions');
|
|
24
21
|
return;
|
|
25
22
|
}
|
|
26
|
-
if (!modelThinking) {
|
|
27
|
-
logger.error(methodName, 'need modelThinking');
|
|
28
|
-
return;
|
|
29
|
-
}
|
|
30
|
-
if (!systemPrompt) {
|
|
31
|
-
logger.error(methodName, 'need systemPrompt');
|
|
32
|
-
return;
|
|
33
|
-
}
|
|
34
|
-
if (!userPrompt) {
|
|
35
|
-
logger.error(methodName, 'need userPrompt');
|
|
36
|
-
return;
|
|
37
|
-
}
|
|
38
|
-
|
|
39
|
-
// chat
|
|
40
|
-
const chatOptions = {
|
|
41
|
-
model: modelID,
|
|
42
|
-
messages: [
|
|
43
|
-
{ role: 'system', content: systemPrompt },
|
|
44
|
-
{ role: 'user', content: userPrompt },
|
|
45
|
-
],
|
|
46
|
-
thinking: {
|
|
47
|
-
type: modelThinking,
|
|
48
|
-
},
|
|
49
|
-
};
|
|
50
23
|
|
|
51
24
|
// go
|
|
52
25
|
try {
|
|
@@ -60,14 +33,11 @@ export const chat = async (client, modelID, modelThinking, systemPrompt, userPro
|
|
|
60
33
|
/**
|
|
61
34
|
* chatWithStreaming
|
|
62
35
|
* @param {*} client
|
|
63
|
-
* @param {*}
|
|
64
|
-
* @param {*} modelThinking
|
|
65
|
-
* @param {*} systemPrompt
|
|
66
|
-
* @param {*} userPrompt
|
|
36
|
+
* @param {*} chatOptions
|
|
67
37
|
* @param {*} callbackOptions
|
|
68
38
|
* @returns
|
|
69
39
|
*/
|
|
70
|
-
export const chatWithStreaming = async (client,
|
|
40
|
+
export const chatWithStreaming = async (client, chatOptions, callbackOptions) => {
|
|
71
41
|
const methodName = 'chatWithStreaming';
|
|
72
42
|
|
|
73
43
|
// check
|
|
@@ -75,20 +45,8 @@ export const chatWithStreaming = async (client, modelID, modelThinking, systemPr
|
|
|
75
45
|
logger.error(methodName, 'need client');
|
|
76
46
|
return;
|
|
77
47
|
}
|
|
78
|
-
if (!
|
|
79
|
-
logger.error(methodName, 'need
|
|
80
|
-
return;
|
|
81
|
-
}
|
|
82
|
-
if (!modelThinking) {
|
|
83
|
-
logger.error(methodName, 'need modelThinking');
|
|
84
|
-
return;
|
|
85
|
-
}
|
|
86
|
-
if (!systemPrompt) {
|
|
87
|
-
logger.error(methodName, 'need systemPrompt');
|
|
88
|
-
return;
|
|
89
|
-
}
|
|
90
|
-
if (!userPrompt) {
|
|
91
|
-
logger.error(methodName, 'need userPrompt');
|
|
48
|
+
if (!chatOptions) {
|
|
49
|
+
logger.error(methodName, 'need chatOptions');
|
|
92
50
|
return;
|
|
93
51
|
}
|
|
94
52
|
if (!callbackOptions) {
|
|
@@ -105,18 +63,6 @@ export const chatWithStreaming = async (client, modelID, modelThinking, systemPr
|
|
|
105
63
|
const contentCallback = callbackOptions.contentCallback;
|
|
106
64
|
const firstContentCallback = callbackOptions.firstContentCallback;
|
|
107
65
|
|
|
108
|
-
// chat
|
|
109
|
-
const chatOptions = {
|
|
110
|
-
model: modelID,
|
|
111
|
-
messages: [
|
|
112
|
-
{ role: 'system', content: systemPrompt },
|
|
113
|
-
{ role: 'user', content: userPrompt },
|
|
114
|
-
],
|
|
115
|
-
thinking: {
|
|
116
|
-
type: modelThinking,
|
|
117
|
-
},
|
|
118
|
-
};
|
|
119
|
-
|
|
120
66
|
// go
|
|
121
67
|
try {
|
|
122
68
|
chatOptions.stream = true;
|
package/src/models/openai.js
CHANGED
|
@@ -29,14 +29,6 @@ export const OpenAIAPI = (options) => {
|
|
|
29
29
|
logger.error(methodName, 'need options.baseURL');
|
|
30
30
|
return;
|
|
31
31
|
}
|
|
32
|
-
if (!options.modelID) {
|
|
33
|
-
logger.error(methodName, 'need options.modelID');
|
|
34
|
-
return;
|
|
35
|
-
}
|
|
36
|
-
if (!options.modelThinking) {
|
|
37
|
-
logger.error(methodName, 'need options.modelThinking');
|
|
38
|
-
return;
|
|
39
|
-
}
|
|
40
32
|
|
|
41
33
|
// openai
|
|
42
34
|
const openai = {};
|
|
@@ -46,20 +38,13 @@ export const OpenAIAPI = (options) => {
|
|
|
46
38
|
});
|
|
47
39
|
|
|
48
40
|
// chat
|
|
49
|
-
openai.chat = async (
|
|
50
|
-
return await chat(openai.client,
|
|
41
|
+
openai.chat = async (chatOptions) => {
|
|
42
|
+
return await chat(openai.client, chatOptions);
|
|
51
43
|
};
|
|
52
44
|
|
|
53
45
|
// chat with streaming
|
|
54
|
-
openai.chatWithStreaming = async (
|
|
55
|
-
return await chatWithStreaming(
|
|
56
|
-
openai.client,
|
|
57
|
-
options.modelID,
|
|
58
|
-
options.modelThinking,
|
|
59
|
-
systemPrompt,
|
|
60
|
-
userPrompt,
|
|
61
|
-
callbakOptions,
|
|
62
|
-
);
|
|
46
|
+
openai.chatWithStreaming = async (chatOptions, callbakOptions) => {
|
|
47
|
+
return await chatWithStreaming(openai.client, chatOptions, callbakOptions);
|
|
63
48
|
};
|
|
64
49
|
|
|
65
50
|
//
|