viho-llm 0.1.7 → 0.1.8
This diff represents the content of publicly available package versions that have been released to one of the supported registries. The information contained in this diff is provided for informational purposes only and reflects changes between package versions as they appear in their respective public registries.
- package/README.md +47 -29
- package/index.js +40 -53
- package/package.json +2 -2
- package/src/models/openai-util.js +36 -34
- package/src/models/openai.js +4 -19
package/README.md
CHANGED
|
@@ -99,15 +99,15 @@ import { OpenAIAPI } from 'viho-llm';
|
|
|
99
99
|
const openai = OpenAIAPI({
|
|
100
100
|
apiKey: 'your-openai-api-key',
|
|
101
101
|
baseURL: 'https://api.openai.com/v1', // or your custom endpoint
|
|
102
|
-
modelID: 'gpt-4o',
|
|
103
|
-
modelThinking: 'enabled', // 'enabled' or 'disabled' for reasoning models
|
|
104
102
|
});
|
|
105
103
|
|
|
106
104
|
// Send a chat message
|
|
107
|
-
const response = await openai.chat(
|
|
108
|
-
|
|
109
|
-
'
|
|
110
|
-
|
|
105
|
+
const response = await openai.chat({
|
|
106
|
+
modelID: 'gpt-4o',
|
|
107
|
+
modelThinking: 'enabled', // 'enabled' or 'disabled' for reasoning models
|
|
108
|
+
systemPrompt: 'You are a helpful assistant.',
|
|
109
|
+
userPrompt: 'Hello, how are you?',
|
|
110
|
+
});
|
|
111
111
|
|
|
112
112
|
console.log(response);
|
|
113
113
|
```
|
|
@@ -156,8 +156,12 @@ OpenAI streaming supports thinking/reasoning content for compatible models:
|
|
|
156
156
|
```javascript
|
|
157
157
|
// Send a chat message with streaming (supports thinking mode)
|
|
158
158
|
await openai.chatWithStreaming(
|
|
159
|
-
|
|
160
|
-
|
|
159
|
+
{
|
|
160
|
+
modelID: 'deepseek-reasoner',
|
|
161
|
+
modelThinking: 'enabled',
|
|
162
|
+
systemPrompt: 'You are a helpful assistant.',
|
|
163
|
+
userPrompt: 'Explain how neural networks work',
|
|
164
|
+
},
|
|
161
165
|
{
|
|
162
166
|
beginCallback: () => {
|
|
163
167
|
console.log('Stream started...');
|
|
@@ -413,21 +417,22 @@ Creates a new OpenAI client instance supporting OpenAI and compatible APIs.
|
|
|
413
417
|
- `options` (Object) - Configuration options
|
|
414
418
|
- `apiKey` (string) **required** - Your OpenAI API key or compatible service key
|
|
415
419
|
- `baseURL` (string) **required** - API base URL (e.g., 'https://api.openai.com/v1')
|
|
416
|
-
- `modelID` (string) **required** - Model identifier (e.g., 'gpt-4o', 'deepseek-reasoner')
|
|
417
|
-
- `modelThinking` (string) **required** - Thinking mode: 'enabled' or 'disabled'
|
|
418
420
|
|
|
419
421
|
#### Returns
|
|
420
422
|
|
|
421
423
|
Returns an OpenAI client object with the following methods:
|
|
422
424
|
|
|
423
|
-
##### `client.chat(
|
|
425
|
+
##### `client.chat(chatOptions)`
|
|
424
426
|
|
|
425
427
|
Sends a chat request to the OpenAI API.
|
|
426
428
|
|
|
427
429
|
**Parameters:**
|
|
428
430
|
|
|
429
|
-
- `
|
|
430
|
-
- `
|
|
431
|
+
- `chatOptions` (Object) **required** - Chat configuration
|
|
432
|
+
- `modelID` (string) **required** - Model identifier (e.g., 'gpt-4o', 'deepseek-reasoner')
|
|
433
|
+
- `modelThinking` (string) **required** - Thinking mode: 'enabled' or 'disabled'
|
|
434
|
+
- `systemPrompt` (string) **required** - System instruction/context for the model
|
|
435
|
+
- `userPrompt` (string) **required** - User's message/question
|
|
431
436
|
|
|
432
437
|
**Returns:**
|
|
433
438
|
|
|
@@ -436,21 +441,26 @@ Sends a chat request to the OpenAI API.
|
|
|
436
441
|
**Example:**
|
|
437
442
|
|
|
438
443
|
```javascript
|
|
439
|
-
const response = await openai.chat(
|
|
440
|
-
|
|
441
|
-
|
|
442
|
-
|
|
444
|
+
const response = await openai.chat({
|
|
445
|
+
modelID: 'gpt-4o',
|
|
446
|
+
modelThinking: 'disabled',
|
|
447
|
+
systemPrompt: 'You are a helpful coding assistant.',
|
|
448
|
+
userPrompt: 'Write a Python function to reverse a string',
|
|
449
|
+
});
|
|
443
450
|
console.log(response.content);
|
|
444
451
|
```
|
|
445
452
|
|
|
446
|
-
##### `client.chatWithStreaming(
|
|
453
|
+
##### `client.chatWithStreaming(chatOptions, callbackOptions)`
|
|
447
454
|
|
|
448
455
|
Sends a chat request to the OpenAI API with streaming response and thinking support.
|
|
449
456
|
|
|
450
457
|
**Parameters:**
|
|
451
458
|
|
|
452
|
-
- `
|
|
453
|
-
- `
|
|
459
|
+
- `chatOptions` (Object) **required** - Chat configuration
|
|
460
|
+
- `modelID` (string) **required** - Model identifier (e.g., 'gpt-4o', 'deepseek-reasoner')
|
|
461
|
+
- `modelThinking` (string) **required** - Thinking mode: 'enabled' or 'disabled'
|
|
462
|
+
- `systemPrompt` (string) **required** - System instruction/context for the model
|
|
463
|
+
- `userPrompt` (string) **required** - User's message/question
|
|
454
464
|
|
|
455
465
|
- `callbackOptions` (Object) **required** - Callback functions for handling stream events
|
|
456
466
|
- `beginCallback` (Function) - Called when the stream begins
|
|
@@ -471,17 +481,25 @@ Sends a chat request to the OpenAI API with streaming response and thinking supp
|
|
|
471
481
|
**Example:**
|
|
472
482
|
|
|
473
483
|
```javascript
|
|
474
|
-
await openai.chatWithStreaming(
|
|
475
|
-
|
|
476
|
-
|
|
477
|
-
|
|
478
|
-
|
|
479
|
-
|
|
484
|
+
await openai.chatWithStreaming(
|
|
485
|
+
{
|
|
486
|
+
modelID: 'deepseek-reasoner',
|
|
487
|
+
modelThinking: 'enabled',
|
|
488
|
+
systemPrompt: 'You are a math tutor.',
|
|
489
|
+
userPrompt: 'Solve: What is 15% of 240?',
|
|
480
490
|
},
|
|
481
|
-
|
|
482
|
-
|
|
491
|
+
{
|
|
492
|
+
thinkingCallback: (thinking) => {
|
|
493
|
+
console.log('Thinking:', thinking);
|
|
494
|
+
},
|
|
495
|
+
contentCallback: (chunk) => {
|
|
496
|
+
process.stdout.write(chunk);
|
|
497
|
+
},
|
|
498
|
+
endCallback: () => {
|
|
499
|
+
console.log('\nDone!');
|
|
500
|
+
},
|
|
483
501
|
},
|
|
484
|
-
|
|
502
|
+
);
|
|
485
503
|
```
|
|
486
504
|
|
|
487
505
|
## License
|
package/index.js
CHANGED
|
@@ -370,13 +370,10 @@ const logger$1 = qiao_log_js.Logger('openai-util.js');
|
|
|
370
370
|
/**
|
|
371
371
|
* chat
|
|
372
372
|
* @param {*} client
|
|
373
|
-
* @param {*}
|
|
374
|
-
* @param {*} modelThinking
|
|
375
|
-
* @param {*} systemPrompt
|
|
376
|
-
* @param {*} userPrompt
|
|
373
|
+
* @param {*} options
|
|
377
374
|
* @returns
|
|
378
375
|
*/
|
|
379
|
-
const chat = async (client,
|
|
376
|
+
const chat = async (client, options) => {
|
|
380
377
|
const methodName = 'chat';
|
|
381
378
|
|
|
382
379
|
// check
|
|
@@ -384,32 +381,36 @@ const chat = async (client, modelID, modelThinking, systemPrompt, userPrompt) =>
|
|
|
384
381
|
logger$1.error(methodName, 'need client');
|
|
385
382
|
return;
|
|
386
383
|
}
|
|
387
|
-
if (!
|
|
388
|
-
logger$1.error(methodName, 'need
|
|
384
|
+
if (!options) {
|
|
385
|
+
logger$1.error(methodName, 'need options');
|
|
386
|
+
return;
|
|
387
|
+
}
|
|
388
|
+
if (!options.modelID) {
|
|
389
|
+
logger$1.error(methodName, 'need options.modelID');
|
|
389
390
|
return;
|
|
390
391
|
}
|
|
391
|
-
if (!modelThinking) {
|
|
392
|
-
logger$1.error(methodName, 'need modelThinking');
|
|
392
|
+
if (!options.modelThinking) {
|
|
393
|
+
logger$1.error(methodName, 'need options.modelThinking');
|
|
393
394
|
return;
|
|
394
395
|
}
|
|
395
|
-
if (!systemPrompt) {
|
|
396
|
-
logger$1.error(methodName, 'need systemPrompt');
|
|
396
|
+
if (!options.systemPrompt) {
|
|
397
|
+
logger$1.error(methodName, 'need options.systemPrompt');
|
|
397
398
|
return;
|
|
398
399
|
}
|
|
399
|
-
if (!userPrompt) {
|
|
400
|
-
logger$1.error(methodName, 'need userPrompt');
|
|
400
|
+
if (!options.userPrompt) {
|
|
401
|
+
logger$1.error(methodName, 'need options.userPrompt');
|
|
401
402
|
return;
|
|
402
403
|
}
|
|
403
404
|
|
|
404
405
|
// chat
|
|
405
406
|
const chatOptions = {
|
|
406
|
-
model: modelID,
|
|
407
|
+
model: options.modelID,
|
|
407
408
|
messages: [
|
|
408
|
-
{ role: 'system', content: systemPrompt },
|
|
409
|
-
{ role: 'user', content: userPrompt },
|
|
409
|
+
{ role: 'system', content: options.systemPrompt },
|
|
410
|
+
{ role: 'user', content: options.userPrompt },
|
|
410
411
|
],
|
|
411
412
|
thinking: {
|
|
412
|
-
type: modelThinking,
|
|
413
|
+
type: options.modelThinking,
|
|
413
414
|
},
|
|
414
415
|
};
|
|
415
416
|
|
|
@@ -425,14 +426,11 @@ const chat = async (client, modelID, modelThinking, systemPrompt, userPrompt) =>
|
|
|
425
426
|
/**
|
|
426
427
|
* chatWithStreaming
|
|
427
428
|
* @param {*} client
|
|
428
|
-
* @param {*}
|
|
429
|
-
* @param {*} modelThinking
|
|
430
|
-
* @param {*} systemPrompt
|
|
431
|
-
* @param {*} userPrompt
|
|
429
|
+
* @param {*} options
|
|
432
430
|
* @param {*} callbackOptions
|
|
433
431
|
* @returns
|
|
434
432
|
*/
|
|
435
|
-
const chatWithStreaming = async (client,
|
|
433
|
+
const chatWithStreaming = async (client, options, callbackOptions) => {
|
|
436
434
|
const methodName = 'chatWithStreaming';
|
|
437
435
|
|
|
438
436
|
// check
|
|
@@ -440,20 +438,24 @@ const chatWithStreaming = async (client, modelID, modelThinking, systemPrompt, u
|
|
|
440
438
|
logger$1.error(methodName, 'need client');
|
|
441
439
|
return;
|
|
442
440
|
}
|
|
443
|
-
if (!
|
|
444
|
-
logger$1.error(methodName, 'need
|
|
441
|
+
if (!options) {
|
|
442
|
+
logger$1.error(methodName, 'need options');
|
|
445
443
|
return;
|
|
446
444
|
}
|
|
447
|
-
if (!
|
|
448
|
-
logger$1.error(methodName, 'need
|
|
445
|
+
if (!options.modelID) {
|
|
446
|
+
logger$1.error(methodName, 'need options.modelID');
|
|
449
447
|
return;
|
|
450
448
|
}
|
|
451
|
-
if (!
|
|
452
|
-
logger$1.error(methodName, 'need
|
|
449
|
+
if (!options.modelThinking) {
|
|
450
|
+
logger$1.error(methodName, 'need options.modelThinking');
|
|
453
451
|
return;
|
|
454
452
|
}
|
|
455
|
-
if (!
|
|
456
|
-
logger$1.error(methodName, 'need
|
|
453
|
+
if (!options.systemPrompt) {
|
|
454
|
+
logger$1.error(methodName, 'need options.systemPrompt');
|
|
455
|
+
return;
|
|
456
|
+
}
|
|
457
|
+
if (!options.userPrompt) {
|
|
458
|
+
logger$1.error(methodName, 'need options.userPrompt');
|
|
457
459
|
return;
|
|
458
460
|
}
|
|
459
461
|
if (!callbackOptions) {
|
|
@@ -472,13 +474,13 @@ const chatWithStreaming = async (client, modelID, modelThinking, systemPrompt, u
|
|
|
472
474
|
|
|
473
475
|
// chat
|
|
474
476
|
const chatOptions = {
|
|
475
|
-
model: modelID,
|
|
477
|
+
model: options.modelID,
|
|
476
478
|
messages: [
|
|
477
|
-
{ role: 'system', content: systemPrompt },
|
|
478
|
-
{ role: 'user', content: userPrompt },
|
|
479
|
+
{ role: 'system', content: options.systemPrompt },
|
|
480
|
+
{ role: 'user', content: options.userPrompt },
|
|
479
481
|
],
|
|
480
482
|
thinking: {
|
|
481
|
-
type: modelThinking,
|
|
483
|
+
type: options.modelThinking,
|
|
482
484
|
},
|
|
483
485
|
};
|
|
484
486
|
|
|
@@ -546,14 +548,6 @@ const OpenAIAPI = (options) => {
|
|
|
546
548
|
logger.error(methodName, 'need options.baseURL');
|
|
547
549
|
return;
|
|
548
550
|
}
|
|
549
|
-
if (!options.modelID) {
|
|
550
|
-
logger.error(methodName, 'need options.modelID');
|
|
551
|
-
return;
|
|
552
|
-
}
|
|
553
|
-
if (!options.modelThinking) {
|
|
554
|
-
logger.error(methodName, 'need options.modelThinking');
|
|
555
|
-
return;
|
|
556
|
-
}
|
|
557
551
|
|
|
558
552
|
// openai
|
|
559
553
|
const openai = {};
|
|
@@ -563,20 +557,13 @@ const OpenAIAPI = (options) => {
|
|
|
563
557
|
});
|
|
564
558
|
|
|
565
559
|
// chat
|
|
566
|
-
openai.chat = async (
|
|
567
|
-
return await chat(openai.client,
|
|
560
|
+
openai.chat = async (chatOptions) => {
|
|
561
|
+
return await chat(openai.client, chatOptions);
|
|
568
562
|
};
|
|
569
563
|
|
|
570
564
|
// chat with streaming
|
|
571
|
-
openai.chatWithStreaming = async (
|
|
572
|
-
return await chatWithStreaming(
|
|
573
|
-
openai.client,
|
|
574
|
-
options.modelID,
|
|
575
|
-
options.modelThinking,
|
|
576
|
-
systemPrompt,
|
|
577
|
-
userPrompt,
|
|
578
|
-
callbakOptions,
|
|
579
|
-
);
|
|
565
|
+
openai.chatWithStreaming = async (chatOptions, callbakOptions) => {
|
|
566
|
+
return await chatWithStreaming(openai.client, chatOptions, callbakOptions);
|
|
580
567
|
};
|
|
581
568
|
|
|
582
569
|
//
|
package/package.json
CHANGED
|
@@ -1,6 +1,6 @@
|
|
|
1
1
|
{
|
|
2
2
|
"name": "viho-llm",
|
|
3
|
-
"version": "0.1.
|
|
3
|
+
"version": "0.1.8",
|
|
4
4
|
"description": "Utility library for working with multiple LLM providers (Google Gemini and OpenAI), providing common tools and helpers for AI interactions",
|
|
5
5
|
"keywords": [
|
|
6
6
|
"llm",
|
|
@@ -68,5 +68,5 @@
|
|
|
68
68
|
}
|
|
69
69
|
}
|
|
70
70
|
},
|
|
71
|
-
"gitHead": "
|
|
71
|
+
"gitHead": "7d413880dd77b08d2f09ebff758510a357f0f095"
|
|
72
72
|
}
|
|
@@ -5,13 +5,10 @@ const logger = Logger('openai-util.js');
|
|
|
5
5
|
/**
|
|
6
6
|
* chat
|
|
7
7
|
* @param {*} client
|
|
8
|
-
* @param {*}
|
|
9
|
-
* @param {*} modelThinking
|
|
10
|
-
* @param {*} systemPrompt
|
|
11
|
-
* @param {*} userPrompt
|
|
8
|
+
* @param {*} options
|
|
12
9
|
* @returns
|
|
13
10
|
*/
|
|
14
|
-
export const chat = async (client,
|
|
11
|
+
export const chat = async (client, options) => {
|
|
15
12
|
const methodName = 'chat';
|
|
16
13
|
|
|
17
14
|
// check
|
|
@@ -19,32 +16,36 @@ export const chat = async (client, modelID, modelThinking, systemPrompt, userPro
|
|
|
19
16
|
logger.error(methodName, 'need client');
|
|
20
17
|
return;
|
|
21
18
|
}
|
|
22
|
-
if (!
|
|
23
|
-
logger.error(methodName, 'need
|
|
19
|
+
if (!options) {
|
|
20
|
+
logger.error(methodName, 'need options');
|
|
24
21
|
return;
|
|
25
22
|
}
|
|
26
|
-
if (!
|
|
27
|
-
logger.error(methodName, 'need
|
|
23
|
+
if (!options.modelID) {
|
|
24
|
+
logger.error(methodName, 'need options.modelID');
|
|
28
25
|
return;
|
|
29
26
|
}
|
|
30
|
-
if (!
|
|
31
|
-
logger.error(methodName, 'need
|
|
27
|
+
if (!options.modelThinking) {
|
|
28
|
+
logger.error(methodName, 'need options.modelThinking');
|
|
32
29
|
return;
|
|
33
30
|
}
|
|
34
|
-
if (!
|
|
35
|
-
logger.error(methodName, 'need
|
|
31
|
+
if (!options.systemPrompt) {
|
|
32
|
+
logger.error(methodName, 'need options.systemPrompt');
|
|
33
|
+
return;
|
|
34
|
+
}
|
|
35
|
+
if (!options.userPrompt) {
|
|
36
|
+
logger.error(methodName, 'need options.userPrompt');
|
|
36
37
|
return;
|
|
37
38
|
}
|
|
38
39
|
|
|
39
40
|
// chat
|
|
40
41
|
const chatOptions = {
|
|
41
|
-
model: modelID,
|
|
42
|
+
model: options.modelID,
|
|
42
43
|
messages: [
|
|
43
|
-
{ role: 'system', content: systemPrompt },
|
|
44
|
-
{ role: 'user', content: userPrompt },
|
|
44
|
+
{ role: 'system', content: options.systemPrompt },
|
|
45
|
+
{ role: 'user', content: options.userPrompt },
|
|
45
46
|
],
|
|
46
47
|
thinking: {
|
|
47
|
-
type: modelThinking,
|
|
48
|
+
type: options.modelThinking,
|
|
48
49
|
},
|
|
49
50
|
};
|
|
50
51
|
|
|
@@ -60,14 +61,11 @@ export const chat = async (client, modelID, modelThinking, systemPrompt, userPro
|
|
|
60
61
|
/**
|
|
61
62
|
* chatWithStreaming
|
|
62
63
|
* @param {*} client
|
|
63
|
-
* @param {*}
|
|
64
|
-
* @param {*} modelThinking
|
|
65
|
-
* @param {*} systemPrompt
|
|
66
|
-
* @param {*} userPrompt
|
|
64
|
+
* @param {*} options
|
|
67
65
|
* @param {*} callbackOptions
|
|
68
66
|
* @returns
|
|
69
67
|
*/
|
|
70
|
-
export const chatWithStreaming = async (client,
|
|
68
|
+
export const chatWithStreaming = async (client, options, callbackOptions) => {
|
|
71
69
|
const methodName = 'chatWithStreaming';
|
|
72
70
|
|
|
73
71
|
// check
|
|
@@ -75,20 +73,24 @@ export const chatWithStreaming = async (client, modelID, modelThinking, systemPr
|
|
|
75
73
|
logger.error(methodName, 'need client');
|
|
76
74
|
return;
|
|
77
75
|
}
|
|
78
|
-
if (!
|
|
79
|
-
logger.error(methodName, 'need
|
|
76
|
+
if (!options) {
|
|
77
|
+
logger.error(methodName, 'need options');
|
|
78
|
+
return;
|
|
79
|
+
}
|
|
80
|
+
if (!options.modelID) {
|
|
81
|
+
logger.error(methodName, 'need options.modelID');
|
|
80
82
|
return;
|
|
81
83
|
}
|
|
82
|
-
if (!modelThinking) {
|
|
83
|
-
logger.error(methodName, 'need modelThinking');
|
|
84
|
+
if (!options.modelThinking) {
|
|
85
|
+
logger.error(methodName, 'need options.modelThinking');
|
|
84
86
|
return;
|
|
85
87
|
}
|
|
86
|
-
if (!systemPrompt) {
|
|
87
|
-
logger.error(methodName, 'need systemPrompt');
|
|
88
|
+
if (!options.systemPrompt) {
|
|
89
|
+
logger.error(methodName, 'need options.systemPrompt');
|
|
88
90
|
return;
|
|
89
91
|
}
|
|
90
|
-
if (!userPrompt) {
|
|
91
|
-
logger.error(methodName, 'need userPrompt');
|
|
92
|
+
if (!options.userPrompt) {
|
|
93
|
+
logger.error(methodName, 'need options.userPrompt');
|
|
92
94
|
return;
|
|
93
95
|
}
|
|
94
96
|
if (!callbackOptions) {
|
|
@@ -107,13 +109,13 @@ export const chatWithStreaming = async (client, modelID, modelThinking, systemPr
|
|
|
107
109
|
|
|
108
110
|
// chat
|
|
109
111
|
const chatOptions = {
|
|
110
|
-
model: modelID,
|
|
112
|
+
model: options.modelID,
|
|
111
113
|
messages: [
|
|
112
|
-
{ role: 'system', content: systemPrompt },
|
|
113
|
-
{ role: 'user', content: userPrompt },
|
|
114
|
+
{ role: 'system', content: options.systemPrompt },
|
|
115
|
+
{ role: 'user', content: options.userPrompt },
|
|
114
116
|
],
|
|
115
117
|
thinking: {
|
|
116
|
-
type: modelThinking,
|
|
118
|
+
type: options.modelThinking,
|
|
117
119
|
},
|
|
118
120
|
};
|
|
119
121
|
|
package/src/models/openai.js
CHANGED
|
@@ -29,14 +29,6 @@ export const OpenAIAPI = (options) => {
|
|
|
29
29
|
logger.error(methodName, 'need options.baseURL');
|
|
30
30
|
return;
|
|
31
31
|
}
|
|
32
|
-
if (!options.modelID) {
|
|
33
|
-
logger.error(methodName, 'need options.modelID');
|
|
34
|
-
return;
|
|
35
|
-
}
|
|
36
|
-
if (!options.modelThinking) {
|
|
37
|
-
logger.error(methodName, 'need options.modelThinking');
|
|
38
|
-
return;
|
|
39
|
-
}
|
|
40
32
|
|
|
41
33
|
// openai
|
|
42
34
|
const openai = {};
|
|
@@ -46,20 +38,13 @@ export const OpenAIAPI = (options) => {
|
|
|
46
38
|
});
|
|
47
39
|
|
|
48
40
|
// chat
|
|
49
|
-
openai.chat = async (
|
|
50
|
-
return await chat(openai.client,
|
|
41
|
+
openai.chat = async (chatOptions) => {
|
|
42
|
+
return await chat(openai.client, chatOptions);
|
|
51
43
|
};
|
|
52
44
|
|
|
53
45
|
// chat with streaming
|
|
54
|
-
openai.chatWithStreaming = async (
|
|
55
|
-
return await chatWithStreaming(
|
|
56
|
-
openai.client,
|
|
57
|
-
options.modelID,
|
|
58
|
-
options.modelThinking,
|
|
59
|
-
systemPrompt,
|
|
60
|
-
userPrompt,
|
|
61
|
-
callbakOptions,
|
|
62
|
-
);
|
|
46
|
+
openai.chatWithStreaming = async (chatOptions, callbakOptions) => {
|
|
47
|
+
return await chatWithStreaming(openai.client, chatOptions, callbakOptions);
|
|
63
48
|
};
|
|
64
49
|
|
|
65
50
|
//
|