viho-llm 0.1.6 → 0.1.8
This diff represents the content of publicly available package versions that have been released to one of the supported registries. The information contained in this diff is provided for informational purposes only and reflects changes between package versions as they appear in their respective public registries.
- package/README.md +174 -3
- package/index.js +252 -46
- package/package.json +10 -3
- package/src/index.js +1 -0
- package/src/models/gemini-util.js +1 -3
- package/src/models/openai-util.js +160 -0
- package/src/models/openai.js +52 -0
package/README.md
CHANGED
|
@@ -4,7 +4,7 @@
|
|
|
4
4
|
|
|
5
5
|
<h1 align="center">viho-llm</h1>
|
|
6
6
|
|
|
7
|
-
<p align="center">Utility library for working with Google Gemini
|
|
7
|
+
<p align="center">Utility library for working with multiple LLM providers (Google Gemini and OpenAI), providing common tools and helpers for AI interactions.</p>
|
|
8
8
|
|
|
9
9
|
## Installation
|
|
10
10
|
|
|
@@ -14,7 +14,9 @@ npm install viho-llm
|
|
|
14
14
|
|
|
15
15
|
## Prerequisites
|
|
16
16
|
|
|
17
|
-
This library supports
|
|
17
|
+
This library supports multiple LLM providers:
|
|
18
|
+
|
|
19
|
+
### Google Gemini AI
|
|
18
20
|
|
|
19
21
|
1. **Google AI Studio (GeminiAPI)** - For personal development and prototyping
|
|
20
22
|
- Get an API key from [Google AI Studio](https://makersuite.google.com/app/apikey)
|
|
@@ -23,6 +25,14 @@ This library supports two ways to access Google Gemini AI:
|
|
|
23
25
|
- Requires a Google Cloud project with Vertex AI enabled
|
|
24
26
|
- Supports context caching for cost optimization
|
|
25
27
|
|
|
28
|
+
### OpenAI Compatible APIs
|
|
29
|
+
|
|
30
|
+
**OpenAI API (OpenAIAPI)** - For OpenAI and compatible services
|
|
31
|
+
|
|
32
|
+
- Supports official OpenAI API
|
|
33
|
+
- Compatible with OpenAI-like APIs (e.g., DeepSeek, local LLMs)
|
|
34
|
+
- Supports thinking/reasoning mode for compatible models
|
|
35
|
+
|
|
26
36
|
## Usage
|
|
27
37
|
|
|
28
38
|
### Basic Example with GeminiAPI
|
|
@@ -78,9 +88,35 @@ const response = await gemini.chat({
|
|
|
78
88
|
console.log(response);
|
|
79
89
|
```
|
|
80
90
|
|
|
91
|
+
### Basic Example with OpenAI API
|
|
92
|
+
|
|
93
|
+
Using OpenAI or OpenAI-compatible services:
|
|
94
|
+
|
|
95
|
+
```javascript
|
|
96
|
+
import { OpenAIAPI } from 'viho-llm';
|
|
97
|
+
|
|
98
|
+
// Initialize OpenAI client
|
|
99
|
+
const openai = OpenAIAPI({
|
|
100
|
+
apiKey: 'your-openai-api-key',
|
|
101
|
+
baseURL: 'https://api.openai.com/v1', // or your custom endpoint
|
|
102
|
+
});
|
|
103
|
+
|
|
104
|
+
// Send a chat message
|
|
105
|
+
const response = await openai.chat({
|
|
106
|
+
modelID: 'gpt-4o',
|
|
107
|
+
modelThinking: 'enabled', // 'enabled' or 'disabled' for reasoning models
|
|
108
|
+
systemPrompt: 'You are a helpful assistant.',
|
|
109
|
+
userPrompt: 'Hello, how are you?',
|
|
110
|
+
});
|
|
111
|
+
|
|
112
|
+
console.log(response);
|
|
113
|
+
```
|
|
114
|
+
|
|
81
115
|
### Streaming Example
|
|
82
116
|
|
|
83
|
-
|
|
117
|
+
All providers (GeminiAPI, GeminiVertex, and OpenAIAPI) support streaming responses:
|
|
118
|
+
|
|
119
|
+
#### Gemini Streaming
|
|
84
120
|
|
|
85
121
|
```javascript
|
|
86
122
|
// Send a chat message with streaming
|
|
@@ -113,6 +149,45 @@ await gemini.chatWithStreaming(
|
|
|
113
149
|
);
|
|
114
150
|
```
|
|
115
151
|
|
|
152
|
+
#### OpenAI Streaming with Thinking Mode
|
|
153
|
+
|
|
154
|
+
OpenAI streaming supports thinking/reasoning content for compatible models:
|
|
155
|
+
|
|
156
|
+
```javascript
|
|
157
|
+
// Send a chat message with streaming (supports thinking mode)
|
|
158
|
+
await openai.chatWithStreaming(
|
|
159
|
+
{
|
|
160
|
+
modelID: 'deepseek-reasoner',
|
|
161
|
+
modelThinking: 'enabled',
|
|
162
|
+
systemPrompt: 'You are a helpful assistant.',
|
|
163
|
+
userPrompt: 'Explain how neural networks work',
|
|
164
|
+
},
|
|
165
|
+
{
|
|
166
|
+
beginCallback: () => {
|
|
167
|
+
console.log('Stream started...');
|
|
168
|
+
},
|
|
169
|
+
firstThinkingCallback: () => {
|
|
170
|
+
console.log('\n[Thinking...]');
|
|
171
|
+
},
|
|
172
|
+
thinkingCallback: (thinking) => {
|
|
173
|
+
process.stdout.write(thinking); // Print reasoning process
|
|
174
|
+
},
|
|
175
|
+
firstContentCallback: () => {
|
|
176
|
+
console.log('\n[Response:]');
|
|
177
|
+
},
|
|
178
|
+
contentCallback: (content) => {
|
|
179
|
+
process.stdout.write(content); // Print response content
|
|
180
|
+
},
|
|
181
|
+
endCallback: () => {
|
|
182
|
+
console.log('\nStream ended.');
|
|
183
|
+
},
|
|
184
|
+
errorCallback: (error) => {
|
|
185
|
+
console.error('Error:', error);
|
|
186
|
+
},
|
|
187
|
+
},
|
|
188
|
+
);
|
|
189
|
+
```
|
|
190
|
+
|
|
116
191
|
### Context Caching Example (Vertex AI Only)
|
|
117
192
|
|
|
118
193
|
GeminiVertex supports context caching to reduce costs and latency when using large contexts:
|
|
@@ -331,6 +406,102 @@ await gemini.cacheUpdate('projects/.../cachedContents/abc123', {
|
|
|
331
406
|
});
|
|
332
407
|
```
|
|
333
408
|
|
|
409
|
+
---
|
|
410
|
+
|
|
411
|
+
### `OpenAIAPI(options)`
|
|
412
|
+
|
|
413
|
+
Creates a new OpenAI client instance supporting OpenAI and compatible APIs.
|
|
414
|
+
|
|
415
|
+
#### Parameters
|
|
416
|
+
|
|
417
|
+
- `options` (Object) - Configuration options
|
|
418
|
+
- `apiKey` (string) **required** - Your OpenAI API key or compatible service key
|
|
419
|
+
- `baseURL` (string) **required** - API base URL (e.g., 'https://api.openai.com/v1')
|
|
420
|
+
|
|
421
|
+
#### Returns
|
|
422
|
+
|
|
423
|
+
Returns an OpenAI client object with the following methods:
|
|
424
|
+
|
|
425
|
+
##### `client.chat(chatOptions)`
|
|
426
|
+
|
|
427
|
+
Sends a chat request to the OpenAI API.
|
|
428
|
+
|
|
429
|
+
**Parameters:**
|
|
430
|
+
|
|
431
|
+
- `chatOptions` (Object) **required** - Chat configuration
|
|
432
|
+
- `modelID` (string) **required** - Model identifier (e.g., 'gpt-4o', 'deepseek-reasoner')
|
|
433
|
+
- `modelThinking` (string) **required** - Thinking mode: 'enabled' or 'disabled'
|
|
434
|
+
- `systemPrompt` (string) **required** - System instruction/context for the model
|
|
435
|
+
- `userPrompt` (string) **required** - User's message/question
|
|
436
|
+
|
|
437
|
+
**Returns:**
|
|
438
|
+
|
|
439
|
+
- (Promise\<Object\>) - Message object with `role` and `content` properties
|
|
440
|
+
|
|
441
|
+
**Example:**
|
|
442
|
+
|
|
443
|
+
```javascript
|
|
444
|
+
const response = await openai.chat({
|
|
445
|
+
modelID: 'gpt-4o',
|
|
446
|
+
modelThinking: 'disabled',
|
|
447
|
+
systemPrompt: 'You are a helpful coding assistant.',
|
|
448
|
+
userPrompt: 'Write a Python function to reverse a string',
|
|
449
|
+
});
|
|
450
|
+
console.log(response.content);
|
|
451
|
+
```
|
|
452
|
+
|
|
453
|
+
##### `client.chatWithStreaming(chatOptions, callbackOptions)`
|
|
454
|
+
|
|
455
|
+
Sends a chat request to the OpenAI API with streaming response and thinking support.
|
|
456
|
+
|
|
457
|
+
**Parameters:**
|
|
458
|
+
|
|
459
|
+
- `chatOptions` (Object) **required** - Chat configuration
|
|
460
|
+
- `modelID` (string) **required** - Model identifier (e.g., 'gpt-4o', 'deepseek-reasoner')
|
|
461
|
+
- `modelThinking` (string) **required** - Thinking mode: 'enabled' or 'disabled'
|
|
462
|
+
- `systemPrompt` (string) **required** - System instruction/context for the model
|
|
463
|
+
- `userPrompt` (string) **required** - User's message/question
|
|
464
|
+
|
|
465
|
+
- `callbackOptions` (Object) **required** - Callback functions for handling stream events
|
|
466
|
+
- `beginCallback` (Function) - Called when the stream begins
|
|
467
|
+
- `firstThinkingCallback` (Function) - Called when the first thinking chunk is received (for reasoning models)
|
|
468
|
+
- `thinkingCallback` (Function) - Called for each thinking/reasoning chunk received
|
|
469
|
+
- Parameters: `thinking` (string) - The thinking content chunk
|
|
470
|
+
- `firstContentCallback` (Function) - Called when the first response content chunk is received
|
|
471
|
+
- `contentCallback` (Function) - Called for each response content chunk received
|
|
472
|
+
- Parameters: `content` (string) - The text chunk
|
|
473
|
+
- `endCallback` (Function) - Called when the stream ends successfully
|
|
474
|
+
- `errorCallback` (Function) - Called if an error occurs
|
|
475
|
+
- Parameters: `error` (Error) - The error object
|
|
476
|
+
|
|
477
|
+
**Returns:**
|
|
478
|
+
|
|
479
|
+
- (Promise\<void\>) - Resolves when streaming completes
|
|
480
|
+
|
|
481
|
+
**Example:**
|
|
482
|
+
|
|
483
|
+
```javascript
|
|
484
|
+
await openai.chatWithStreaming(
|
|
485
|
+
{
|
|
486
|
+
modelID: 'deepseek-reasoner',
|
|
487
|
+
modelThinking: 'enabled',
|
|
488
|
+
systemPrompt: 'You are a math tutor.',
|
|
489
|
+
userPrompt: 'Solve: What is 15% of 240?',
|
|
490
|
+
},
|
|
491
|
+
{
|
|
492
|
+
thinkingCallback: (thinking) => {
|
|
493
|
+
console.log('Thinking:', thinking);
|
|
494
|
+
},
|
|
495
|
+
contentCallback: (chunk) => {
|
|
496
|
+
process.stdout.write(chunk);
|
|
497
|
+
},
|
|
498
|
+
endCallback: () => {
|
|
499
|
+
console.log('\nDone!');
|
|
500
|
+
},
|
|
501
|
+
},
|
|
502
|
+
);
|
|
503
|
+
```
|
|
504
|
+
|
|
334
505
|
## License
|
|
335
506
|
|
|
336
507
|
MIT
|
package/index.js
CHANGED
|
@@ -3,9 +3,10 @@
|
|
|
3
3
|
var genai = require('@google/genai');
|
|
4
4
|
var mime = require('mime-types');
|
|
5
5
|
var qiao_log_js = require('qiao.log.js');
|
|
6
|
+
var OpenAI = require('openai');
|
|
6
7
|
|
|
7
8
|
// gemini
|
|
8
|
-
const logger$
|
|
9
|
+
const logger$4 = qiao_log_js.Logger('gemini-util.js');
|
|
9
10
|
|
|
10
11
|
/**
|
|
11
12
|
* chat
|
|
@@ -14,24 +15,24 @@ const logger$2 = qiao_log_js.Logger('gemini-util.js');
|
|
|
14
15
|
* @param {*} chatOptions
|
|
15
16
|
* @returns
|
|
16
17
|
*/
|
|
17
|
-
const chat = async (client, modelName, chatOptions) => {
|
|
18
|
+
const chat$1 = async (client, modelName, chatOptions) => {
|
|
18
19
|
const methodName = 'chat';
|
|
19
20
|
|
|
20
21
|
// check
|
|
21
22
|
if (!client) {
|
|
22
|
-
logger$
|
|
23
|
+
logger$4.error(methodName, 'need client');
|
|
23
24
|
return;
|
|
24
25
|
}
|
|
25
26
|
if (!modelName) {
|
|
26
|
-
logger$
|
|
27
|
+
logger$4.error(methodName, 'need modelName');
|
|
27
28
|
return;
|
|
28
29
|
}
|
|
29
30
|
if (!chatOptions) {
|
|
30
|
-
logger$
|
|
31
|
+
logger$4.error(methodName, 'need chatOptions');
|
|
31
32
|
return;
|
|
32
33
|
}
|
|
33
34
|
if (!chatOptions.contents) {
|
|
34
|
-
logger$
|
|
35
|
+
logger$4.error(methodName, 'need chatOptions.contents');
|
|
35
36
|
return;
|
|
36
37
|
}
|
|
37
38
|
|
|
@@ -47,13 +48,13 @@ const chat = async (client, modelName, chatOptions) => {
|
|
|
47
48
|
// gen
|
|
48
49
|
const response = await client.models.generateContent(options);
|
|
49
50
|
if (!response || !response.text) {
|
|
50
|
-
logger$
|
|
51
|
+
logger$4.error(methodName, 'invalid response');
|
|
51
52
|
return;
|
|
52
53
|
}
|
|
53
54
|
|
|
54
55
|
return response.text;
|
|
55
56
|
} catch (error) {
|
|
56
|
-
logger$
|
|
57
|
+
logger$4.error(methodName, 'error', error);
|
|
57
58
|
}
|
|
58
59
|
};
|
|
59
60
|
|
|
@@ -65,28 +66,28 @@ const chat = async (client, modelName, chatOptions) => {
|
|
|
65
66
|
* @param {*} callbackOptions
|
|
66
67
|
* @returns
|
|
67
68
|
*/
|
|
68
|
-
const chatWithStreaming = async (client, modelName, chatOptions, callbackOptions) => {
|
|
69
|
+
const chatWithStreaming$1 = async (client, modelName, chatOptions, callbackOptions) => {
|
|
69
70
|
const methodName = 'chatWithStreaming';
|
|
70
71
|
|
|
71
72
|
// check
|
|
72
73
|
if (!client) {
|
|
73
|
-
logger$
|
|
74
|
+
logger$4.error(methodName, 'need client');
|
|
74
75
|
return;
|
|
75
76
|
}
|
|
76
77
|
if (!modelName) {
|
|
77
|
-
logger$
|
|
78
|
+
logger$4.error(methodName, 'need modelName');
|
|
78
79
|
return;
|
|
79
80
|
}
|
|
80
81
|
if (!chatOptions) {
|
|
81
|
-
logger$
|
|
82
|
+
logger$4.error(methodName, 'need chatOptions');
|
|
82
83
|
return;
|
|
83
84
|
}
|
|
84
85
|
if (!chatOptions.contents) {
|
|
85
|
-
logger$
|
|
86
|
+
logger$4.error(methodName, 'need chatOptions.contents');
|
|
86
87
|
return;
|
|
87
88
|
}
|
|
88
89
|
if (!callbackOptions) {
|
|
89
|
-
logger$
|
|
90
|
+
logger$4.error(methodName, 'need callbackOptions');
|
|
90
91
|
return;
|
|
91
92
|
}
|
|
92
93
|
|
|
@@ -98,9 +99,6 @@ const chatWithStreaming = async (client, modelName, chatOptions, callbackOptions
|
|
|
98
99
|
const firstContentCallback = callbackOptions.firstContentCallback;
|
|
99
100
|
|
|
100
101
|
try {
|
|
101
|
-
// begin
|
|
102
|
-
if (beginCallback) beginCallback();
|
|
103
|
-
|
|
104
102
|
// options
|
|
105
103
|
const options = Object.assign(
|
|
106
104
|
{
|
|
@@ -111,6 +109,7 @@ const chatWithStreaming = async (client, modelName, chatOptions, callbackOptions
|
|
|
111
109
|
|
|
112
110
|
// gen
|
|
113
111
|
const response = await client.models.generateContentStream(options);
|
|
112
|
+
if (beginCallback) beginCallback();
|
|
114
113
|
|
|
115
114
|
// go
|
|
116
115
|
let firstContent = true;
|
|
@@ -146,38 +145,38 @@ const cacheAdd = async (client, modelName, cacheOptions) => {
|
|
|
146
145
|
|
|
147
146
|
// check
|
|
148
147
|
if (!client) {
|
|
149
|
-
logger$
|
|
148
|
+
logger$4.error(methodName, 'need client');
|
|
150
149
|
return;
|
|
151
150
|
}
|
|
152
151
|
if (!modelName) {
|
|
153
|
-
logger$
|
|
152
|
+
logger$4.error(methodName, 'need modelName');
|
|
154
153
|
return;
|
|
155
154
|
}
|
|
156
155
|
if (!cacheOptions) {
|
|
157
|
-
logger$
|
|
156
|
+
logger$4.error(methodName, 'need cacheOptions');
|
|
158
157
|
return;
|
|
159
158
|
}
|
|
160
159
|
if (!cacheOptions.gsPath) {
|
|
161
|
-
logger$
|
|
160
|
+
logger$4.error(methodName, 'need cacheOptions.gsPath');
|
|
162
161
|
return;
|
|
163
162
|
}
|
|
164
163
|
if (!cacheOptions.systemPrompt) {
|
|
165
|
-
logger$
|
|
164
|
+
logger$4.error(methodName, 'need cacheOptions.systemPrompt');
|
|
166
165
|
return;
|
|
167
166
|
}
|
|
168
167
|
if (!cacheOptions.cacheName) {
|
|
169
|
-
logger$
|
|
168
|
+
logger$4.error(methodName, 'need cacheOptions.cacheName');
|
|
170
169
|
return;
|
|
171
170
|
}
|
|
172
171
|
if (!cacheOptions.cacheTTL) {
|
|
173
|
-
logger$
|
|
172
|
+
logger$4.error(methodName, 'need cacheOptions.cacheTTL');
|
|
174
173
|
return;
|
|
175
174
|
}
|
|
176
175
|
|
|
177
176
|
// const
|
|
178
177
|
const mimeType = mime.lookup(cacheOptions.gsPath);
|
|
179
|
-
logger$
|
|
180
|
-
logger$
|
|
178
|
+
logger$4.info(methodName, 'cacheOptions', cacheOptions);
|
|
179
|
+
logger$4.info(methodName, 'mimeType', mimeType);
|
|
181
180
|
|
|
182
181
|
try {
|
|
183
182
|
// cache add
|
|
@@ -193,7 +192,7 @@ const cacheAdd = async (client, modelName, cacheOptions) => {
|
|
|
193
192
|
|
|
194
193
|
return cache;
|
|
195
194
|
} catch (error) {
|
|
196
|
-
logger$
|
|
195
|
+
logger$4.error(methodName, 'error', error);
|
|
197
196
|
}
|
|
198
197
|
};
|
|
199
198
|
|
|
@@ -207,7 +206,7 @@ const cacheList = async (client) => {
|
|
|
207
206
|
|
|
208
207
|
// check
|
|
209
208
|
if (!client) {
|
|
210
|
-
logger$
|
|
209
|
+
logger$4.error(methodName, 'need client');
|
|
211
210
|
return;
|
|
212
211
|
}
|
|
213
212
|
|
|
@@ -221,7 +220,7 @@ const cacheList = async (client) => {
|
|
|
221
220
|
|
|
222
221
|
return cacheObjs;
|
|
223
222
|
} catch (error) {
|
|
224
|
-
logger$
|
|
223
|
+
logger$4.error(methodName, 'error', error);
|
|
225
224
|
}
|
|
226
225
|
};
|
|
227
226
|
|
|
@@ -237,15 +236,15 @@ const cacheUpdate = async (client, cacheName, cacheOptions) => {
|
|
|
237
236
|
|
|
238
237
|
// check
|
|
239
238
|
if (!client) {
|
|
240
|
-
logger$
|
|
239
|
+
logger$4.error(methodName, 'need client');
|
|
241
240
|
return;
|
|
242
241
|
}
|
|
243
242
|
if (!cacheName) {
|
|
244
|
-
logger$
|
|
243
|
+
logger$4.error(methodName, 'need cacheName');
|
|
245
244
|
return;
|
|
246
245
|
}
|
|
247
246
|
if (!cacheOptions) {
|
|
248
|
-
logger$
|
|
247
|
+
logger$4.error(methodName, 'need cacheOptions');
|
|
249
248
|
return;
|
|
250
249
|
}
|
|
251
250
|
|
|
@@ -258,12 +257,12 @@ const cacheUpdate = async (client, cacheName, cacheOptions) => {
|
|
|
258
257
|
|
|
259
258
|
return res;
|
|
260
259
|
} catch (error) {
|
|
261
|
-
logger$
|
|
260
|
+
logger$4.error(methodName, 'error', error);
|
|
262
261
|
}
|
|
263
262
|
};
|
|
264
263
|
|
|
265
264
|
// gemini
|
|
266
|
-
const logger$
|
|
265
|
+
const logger$3 = qiao_log_js.Logger('gemini-api.js');
|
|
267
266
|
|
|
268
267
|
/**
|
|
269
268
|
* GeminiAPI
|
|
@@ -275,15 +274,15 @@ const GeminiAPI = (options) => {
|
|
|
275
274
|
|
|
276
275
|
// check
|
|
277
276
|
if (!options) {
|
|
278
|
-
logger$
|
|
277
|
+
logger$3.error(methodName, 'need options');
|
|
279
278
|
return;
|
|
280
279
|
}
|
|
281
280
|
if (!options.apiKey) {
|
|
282
|
-
logger$
|
|
281
|
+
logger$3.error(methodName, 'need options.apiKey');
|
|
283
282
|
return;
|
|
284
283
|
}
|
|
285
284
|
if (!options.modelName) {
|
|
286
|
-
logger$
|
|
285
|
+
logger$3.error(methodName, 'need options.modelName');
|
|
287
286
|
return;
|
|
288
287
|
}
|
|
289
288
|
|
|
@@ -295,10 +294,10 @@ const GeminiAPI = (options) => {
|
|
|
295
294
|
|
|
296
295
|
// chat
|
|
297
296
|
gemini.chat = async (chatOptions) => {
|
|
298
|
-
return await chat(gemini.client, options.modelName, chatOptions);
|
|
297
|
+
return await chat$1(gemini.client, options.modelName, chatOptions);
|
|
299
298
|
};
|
|
300
299
|
gemini.chatWithStreaming = async (chatOptions, callbackOptions) => {
|
|
301
|
-
return await chatWithStreaming(gemini.client, options.modelName, chatOptions, callbackOptions);
|
|
300
|
+
return await chatWithStreaming$1(gemini.client, options.modelName, chatOptions, callbackOptions);
|
|
302
301
|
};
|
|
303
302
|
|
|
304
303
|
// r
|
|
@@ -306,7 +305,7 @@ const GeminiAPI = (options) => {
|
|
|
306
305
|
};
|
|
307
306
|
|
|
308
307
|
// gemini
|
|
309
|
-
const logger = qiao_log_js.Logger('viho-llm');
|
|
308
|
+
const logger$2 = qiao_log_js.Logger('viho-llm');
|
|
310
309
|
|
|
311
310
|
/**
|
|
312
311
|
* GeminiVertex
|
|
@@ -318,19 +317,19 @@ const GeminiVertex = (options) => {
|
|
|
318
317
|
|
|
319
318
|
// check
|
|
320
319
|
if (!options) {
|
|
321
|
-
logger.error(methodName, 'need options');
|
|
320
|
+
logger$2.error(methodName, 'need options');
|
|
322
321
|
return;
|
|
323
322
|
}
|
|
324
323
|
if (!options.projectId) {
|
|
325
|
-
logger.error(methodName, 'need options.projectId');
|
|
324
|
+
logger$2.error(methodName, 'need options.projectId');
|
|
326
325
|
return;
|
|
327
326
|
}
|
|
328
327
|
if (!options.location) {
|
|
329
|
-
logger.error(methodName, 'need options.location');
|
|
328
|
+
logger$2.error(methodName, 'need options.location');
|
|
330
329
|
return;
|
|
331
330
|
}
|
|
332
331
|
if (!options.modelName) {
|
|
333
|
-
logger.error(methodName, 'need options.modelName');
|
|
332
|
+
logger$2.error(methodName, 'need options.modelName');
|
|
334
333
|
return;
|
|
335
334
|
}
|
|
336
335
|
|
|
@@ -344,10 +343,10 @@ const GeminiVertex = (options) => {
|
|
|
344
343
|
|
|
345
344
|
// chat
|
|
346
345
|
gemini.chat = async (chatOptions) => {
|
|
347
|
-
return await chat(gemini.client, options.modelName, chatOptions);
|
|
346
|
+
return await chat$1(gemini.client, options.modelName, chatOptions);
|
|
348
347
|
};
|
|
349
348
|
gemini.chatWithStreaming = async (chatOptions, callbackOptions) => {
|
|
350
|
-
return await chatWithStreaming(gemini.client, options.modelName, chatOptions, callbackOptions);
|
|
349
|
+
return await chatWithStreaming$1(gemini.client, options.modelName, chatOptions, callbackOptions);
|
|
351
350
|
};
|
|
352
351
|
|
|
353
352
|
// cache
|
|
@@ -365,5 +364,212 @@ const GeminiVertex = (options) => {
|
|
|
365
364
|
return gemini;
|
|
366
365
|
};
|
|
367
366
|
|
|
367
|
+
// Logger
|
|
368
|
+
const logger$1 = qiao_log_js.Logger('openai-util.js');
|
|
369
|
+
|
|
370
|
+
/**
|
|
371
|
+
* chat
|
|
372
|
+
* @param {*} client
|
|
373
|
+
* @param {*} options
|
|
374
|
+
* @returns
|
|
375
|
+
*/
|
|
376
|
+
const chat = async (client, options) => {
|
|
377
|
+
const methodName = 'chat';
|
|
378
|
+
|
|
379
|
+
// check
|
|
380
|
+
if (!client) {
|
|
381
|
+
logger$1.error(methodName, 'need client');
|
|
382
|
+
return;
|
|
383
|
+
}
|
|
384
|
+
if (!options) {
|
|
385
|
+
logger$1.error(methodName, 'need options');
|
|
386
|
+
return;
|
|
387
|
+
}
|
|
388
|
+
if (!options.modelID) {
|
|
389
|
+
logger$1.error(methodName, 'need options.modelID');
|
|
390
|
+
return;
|
|
391
|
+
}
|
|
392
|
+
if (!options.modelThinking) {
|
|
393
|
+
logger$1.error(methodName, 'need options.modelThinking');
|
|
394
|
+
return;
|
|
395
|
+
}
|
|
396
|
+
if (!options.systemPrompt) {
|
|
397
|
+
logger$1.error(methodName, 'need options.systemPrompt');
|
|
398
|
+
return;
|
|
399
|
+
}
|
|
400
|
+
if (!options.userPrompt) {
|
|
401
|
+
logger$1.error(methodName, 'need options.userPrompt');
|
|
402
|
+
return;
|
|
403
|
+
}
|
|
404
|
+
|
|
405
|
+
// chat
|
|
406
|
+
const chatOptions = {
|
|
407
|
+
model: options.modelID,
|
|
408
|
+
messages: [
|
|
409
|
+
{ role: 'system', content: options.systemPrompt },
|
|
410
|
+
{ role: 'user', content: options.userPrompt },
|
|
411
|
+
],
|
|
412
|
+
thinking: {
|
|
413
|
+
type: options.modelThinking,
|
|
414
|
+
},
|
|
415
|
+
};
|
|
416
|
+
|
|
417
|
+
// go
|
|
418
|
+
try {
|
|
419
|
+
const completion = await client.chat.completions.create(chatOptions);
|
|
420
|
+
return completion.choices[0]?.message;
|
|
421
|
+
} catch (error) {
|
|
422
|
+
logger$1.error(methodName, 'error', error);
|
|
423
|
+
}
|
|
424
|
+
};
|
|
425
|
+
|
|
426
|
+
/**
|
|
427
|
+
* chatWithStreaming
|
|
428
|
+
* @param {*} client
|
|
429
|
+
* @param {*} options
|
|
430
|
+
* @param {*} callbackOptions
|
|
431
|
+
* @returns
|
|
432
|
+
*/
|
|
433
|
+
const chatWithStreaming = async (client, options, callbackOptions) => {
|
|
434
|
+
const methodName = 'chatWithStreaming';
|
|
435
|
+
|
|
436
|
+
// check
|
|
437
|
+
if (!client) {
|
|
438
|
+
logger$1.error(methodName, 'need client');
|
|
439
|
+
return;
|
|
440
|
+
}
|
|
441
|
+
if (!options) {
|
|
442
|
+
logger$1.error(methodName, 'need options');
|
|
443
|
+
return;
|
|
444
|
+
}
|
|
445
|
+
if (!options.modelID) {
|
|
446
|
+
logger$1.error(methodName, 'need options.modelID');
|
|
447
|
+
return;
|
|
448
|
+
}
|
|
449
|
+
if (!options.modelThinking) {
|
|
450
|
+
logger$1.error(methodName, 'need options.modelThinking');
|
|
451
|
+
return;
|
|
452
|
+
}
|
|
453
|
+
if (!options.systemPrompt) {
|
|
454
|
+
logger$1.error(methodName, 'need options.systemPrompt');
|
|
455
|
+
return;
|
|
456
|
+
}
|
|
457
|
+
if (!options.userPrompt) {
|
|
458
|
+
logger$1.error(methodName, 'need options.userPrompt');
|
|
459
|
+
return;
|
|
460
|
+
}
|
|
461
|
+
if (!callbackOptions) {
|
|
462
|
+
logger$1.error(methodName, 'need callbackOptions');
|
|
463
|
+
return;
|
|
464
|
+
}
|
|
465
|
+
|
|
466
|
+
// callback
|
|
467
|
+
const beginCallback = callbackOptions.beginCallback;
|
|
468
|
+
const endCallback = callbackOptions.endCallback;
|
|
469
|
+
const errorCallback = callbackOptions.errorCallback;
|
|
470
|
+
const thinkingCallback = callbackOptions.thinkingCallback;
|
|
471
|
+
const firstThinkingCallback = callbackOptions.firstThinkingCallback;
|
|
472
|
+
const contentCallback = callbackOptions.contentCallback;
|
|
473
|
+
const firstContentCallback = callbackOptions.firstContentCallback;
|
|
474
|
+
|
|
475
|
+
// chat
|
|
476
|
+
const chatOptions = {
|
|
477
|
+
model: options.modelID,
|
|
478
|
+
messages: [
|
|
479
|
+
{ role: 'system', content: options.systemPrompt },
|
|
480
|
+
{ role: 'user', content: options.userPrompt },
|
|
481
|
+
],
|
|
482
|
+
thinking: {
|
|
483
|
+
type: options.modelThinking,
|
|
484
|
+
},
|
|
485
|
+
};
|
|
486
|
+
|
|
487
|
+
// go
|
|
488
|
+
try {
|
|
489
|
+
chatOptions.stream = true;
|
|
490
|
+
const stream = await client.chat.completions.create(chatOptions);
|
|
491
|
+
if (beginCallback) beginCallback();
|
|
492
|
+
|
|
493
|
+
// go
|
|
494
|
+
let firstThinking = true;
|
|
495
|
+
let firstContent = true;
|
|
496
|
+
for await (const part of stream) {
|
|
497
|
+
// thinking
|
|
498
|
+
const thinkingContent = part.choices[0]?.delta?.reasoning_content;
|
|
499
|
+
if (thinkingContent && thinkingCallback) {
|
|
500
|
+
if (firstThinking && firstThinkingCallback) {
|
|
501
|
+
firstThinking = false;
|
|
502
|
+
firstThinkingCallback();
|
|
503
|
+
}
|
|
504
|
+
|
|
505
|
+
thinkingCallback(thinkingContent);
|
|
506
|
+
}
|
|
507
|
+
|
|
508
|
+
// content
|
|
509
|
+
const content = part.choices[0]?.delta?.content;
|
|
510
|
+
if (content && contentCallback) {
|
|
511
|
+
if (firstContent && firstContentCallback) {
|
|
512
|
+
firstContent = false;
|
|
513
|
+
firstContentCallback();
|
|
514
|
+
}
|
|
515
|
+
|
|
516
|
+
contentCallback(content);
|
|
517
|
+
}
|
|
518
|
+
}
|
|
519
|
+
|
|
520
|
+
// end
|
|
521
|
+
if (endCallback) endCallback();
|
|
522
|
+
} catch (error) {
|
|
523
|
+
if (errorCallback) errorCallback(error);
|
|
524
|
+
}
|
|
525
|
+
};
|
|
526
|
+
|
|
527
|
+
// openai
|
|
528
|
+
const logger = qiao_log_js.Logger('openai.js');
|
|
529
|
+
|
|
530
|
+
/**
|
|
531
|
+
* OpenAI
|
|
532
|
+
* @param {*} options
|
|
533
|
+
* @returns
|
|
534
|
+
*/
|
|
535
|
+
const OpenAIAPI = (options) => {
|
|
536
|
+
const methodName = 'OpenAI';
|
|
537
|
+
|
|
538
|
+
// check
|
|
539
|
+
if (!options) {
|
|
540
|
+
logger.error(methodName, 'need options');
|
|
541
|
+
return;
|
|
542
|
+
}
|
|
543
|
+
if (!options.apiKey) {
|
|
544
|
+
logger.error(methodName, 'need options.apiKey');
|
|
545
|
+
return;
|
|
546
|
+
}
|
|
547
|
+
if (!options.baseURL) {
|
|
548
|
+
logger.error(methodName, 'need options.baseURL');
|
|
549
|
+
return;
|
|
550
|
+
}
|
|
551
|
+
|
|
552
|
+
// openai
|
|
553
|
+
const openai = {};
|
|
554
|
+
openai.client = new OpenAI({
|
|
555
|
+
apiKey: options.apiKey,
|
|
556
|
+
baseURL: options.baseURL,
|
|
557
|
+
});
|
|
558
|
+
|
|
559
|
+
// chat
|
|
560
|
+
openai.chat = async (chatOptions) => {
|
|
561
|
+
return await chat(openai.client, chatOptions);
|
|
562
|
+
};
|
|
563
|
+
|
|
564
|
+
// chat with streaming
|
|
565
|
+
openai.chatWithStreaming = async (chatOptions, callbakOptions) => {
|
|
566
|
+
return await chatWithStreaming(openai.client, chatOptions, callbakOptions);
|
|
567
|
+
};
|
|
568
|
+
|
|
569
|
+
//
|
|
570
|
+
return openai;
|
|
571
|
+
};
|
|
572
|
+
|
|
368
573
|
exports.GeminiAPI = GeminiAPI;
|
|
369
574
|
exports.GeminiVertex = GeminiVertex;
|
|
575
|
+
exports.OpenAIAPI = OpenAIAPI;
|
package/package.json
CHANGED
|
@@ -1,17 +1,23 @@
|
|
|
1
1
|
{
|
|
2
2
|
"name": "viho-llm",
|
|
3
|
-
"version": "0.1.
|
|
4
|
-
"description": "Utility library for working with Google Gemini
|
|
3
|
+
"version": "0.1.8",
|
|
4
|
+
"description": "Utility library for working with multiple LLM providers (Google Gemini and OpenAI), providing common tools and helpers for AI interactions",
|
|
5
5
|
"keywords": [
|
|
6
6
|
"llm",
|
|
7
7
|
"ai",
|
|
8
8
|
"gemini",
|
|
9
|
+
"openai",
|
|
9
10
|
"google-ai",
|
|
10
11
|
"google-gemini",
|
|
11
12
|
"genai",
|
|
13
|
+
"gpt",
|
|
14
|
+
"chatgpt",
|
|
12
15
|
"ai-tools",
|
|
13
16
|
"language-model",
|
|
14
17
|
"ai-utilities",
|
|
18
|
+
"reasoning",
|
|
19
|
+
"thinking",
|
|
20
|
+
"deepseek",
|
|
15
21
|
"viho"
|
|
16
22
|
],
|
|
17
23
|
"author": "uikoo9 <uikoo9@qq.com>",
|
|
@@ -42,6 +48,7 @@
|
|
|
42
48
|
"dependencies": {
|
|
43
49
|
"@google/genai": "^1.34.0",
|
|
44
50
|
"mime-types": "^2.1.35",
|
|
51
|
+
"openai": "^5.23.2",
|
|
45
52
|
"qiao.log.js": "^3.7.5"
|
|
46
53
|
},
|
|
47
54
|
"nx": {
|
|
@@ -61,5 +68,5 @@
|
|
|
61
68
|
}
|
|
62
69
|
}
|
|
63
70
|
},
|
|
64
|
-
"gitHead": "
|
|
71
|
+
"gitHead": "7d413880dd77b08d2f09ebff758510a357f0f095"
|
|
65
72
|
}
|
package/src/index.js
CHANGED
|
@@ -99,9 +99,6 @@ export const chatWithStreaming = async (client, modelName, chatOptions, callback
|
|
|
99
99
|
const firstContentCallback = callbackOptions.firstContentCallback;
|
|
100
100
|
|
|
101
101
|
try {
|
|
102
|
-
// begin
|
|
103
|
-
if (beginCallback) beginCallback();
|
|
104
|
-
|
|
105
102
|
// options
|
|
106
103
|
const options = Object.assign(
|
|
107
104
|
{
|
|
@@ -112,6 +109,7 @@ export const chatWithStreaming = async (client, modelName, chatOptions, callback
|
|
|
112
109
|
|
|
113
110
|
// gen
|
|
114
111
|
const response = await client.models.generateContentStream(options);
|
|
112
|
+
if (beginCallback) beginCallback();
|
|
115
113
|
|
|
116
114
|
// go
|
|
117
115
|
let firstContent = true;
|
|
@@ -0,0 +1,160 @@
|
|
|
1
|
+
// Logger
|
|
2
|
+
import { Logger } from 'qiao.log.js';
|
|
3
|
+
const logger = Logger('openai-util.js');
|
|
4
|
+
|
|
5
|
+
/**
|
|
6
|
+
* chat
|
|
7
|
+
* @param {*} client
|
|
8
|
+
* @param {*} options
|
|
9
|
+
* @returns
|
|
10
|
+
*/
|
|
11
|
+
export const chat = async (client, options) => {
|
|
12
|
+
const methodName = 'chat';
|
|
13
|
+
|
|
14
|
+
// check
|
|
15
|
+
if (!client) {
|
|
16
|
+
logger.error(methodName, 'need client');
|
|
17
|
+
return;
|
|
18
|
+
}
|
|
19
|
+
if (!options) {
|
|
20
|
+
logger.error(methodName, 'need options');
|
|
21
|
+
return;
|
|
22
|
+
}
|
|
23
|
+
if (!options.modelID) {
|
|
24
|
+
logger.error(methodName, 'need options.modelID');
|
|
25
|
+
return;
|
|
26
|
+
}
|
|
27
|
+
if (!options.modelThinking) {
|
|
28
|
+
logger.error(methodName, 'need options.modelThinking');
|
|
29
|
+
return;
|
|
30
|
+
}
|
|
31
|
+
if (!options.systemPrompt) {
|
|
32
|
+
logger.error(methodName, 'need options.systemPrompt');
|
|
33
|
+
return;
|
|
34
|
+
}
|
|
35
|
+
if (!options.userPrompt) {
|
|
36
|
+
logger.error(methodName, 'need options.userPrompt');
|
|
37
|
+
return;
|
|
38
|
+
}
|
|
39
|
+
|
|
40
|
+
// chat
|
|
41
|
+
const chatOptions = {
|
|
42
|
+
model: options.modelID,
|
|
43
|
+
messages: [
|
|
44
|
+
{ role: 'system', content: options.systemPrompt },
|
|
45
|
+
{ role: 'user', content: options.userPrompt },
|
|
46
|
+
],
|
|
47
|
+
thinking: {
|
|
48
|
+
type: options.modelThinking,
|
|
49
|
+
},
|
|
50
|
+
};
|
|
51
|
+
|
|
52
|
+
// go
|
|
53
|
+
try {
|
|
54
|
+
const completion = await client.chat.completions.create(chatOptions);
|
|
55
|
+
return completion.choices[0]?.message;
|
|
56
|
+
} catch (error) {
|
|
57
|
+
logger.error(methodName, 'error', error);
|
|
58
|
+
}
|
|
59
|
+
};
|
|
60
|
+
|
|
61
|
+
/**
|
|
62
|
+
* chatWithStreaming
|
|
63
|
+
* @param {*} client
|
|
64
|
+
* @param {*} options
|
|
65
|
+
* @param {*} callbackOptions
|
|
66
|
+
* @returns
|
|
67
|
+
*/
|
|
68
|
+
export const chatWithStreaming = async (client, options, callbackOptions) => {
|
|
69
|
+
const methodName = 'chatWithStreaming';
|
|
70
|
+
|
|
71
|
+
// check
|
|
72
|
+
if (!client) {
|
|
73
|
+
logger.error(methodName, 'need client');
|
|
74
|
+
return;
|
|
75
|
+
}
|
|
76
|
+
if (!options) {
|
|
77
|
+
logger.error(methodName, 'need options');
|
|
78
|
+
return;
|
|
79
|
+
}
|
|
80
|
+
if (!options.modelID) {
|
|
81
|
+
logger.error(methodName, 'need options.modelID');
|
|
82
|
+
return;
|
|
83
|
+
}
|
|
84
|
+
if (!options.modelThinking) {
|
|
85
|
+
logger.error(methodName, 'need options.modelThinking');
|
|
86
|
+
return;
|
|
87
|
+
}
|
|
88
|
+
if (!options.systemPrompt) {
|
|
89
|
+
logger.error(methodName, 'need options.systemPrompt');
|
|
90
|
+
return;
|
|
91
|
+
}
|
|
92
|
+
if (!options.userPrompt) {
|
|
93
|
+
logger.error(methodName, 'need options.userPrompt');
|
|
94
|
+
return;
|
|
95
|
+
}
|
|
96
|
+
if (!callbackOptions) {
|
|
97
|
+
logger.error(methodName, 'need callbackOptions');
|
|
98
|
+
return;
|
|
99
|
+
}
|
|
100
|
+
|
|
101
|
+
// callback
|
|
102
|
+
const beginCallback = callbackOptions.beginCallback;
|
|
103
|
+
const endCallback = callbackOptions.endCallback;
|
|
104
|
+
const errorCallback = callbackOptions.errorCallback;
|
|
105
|
+
const thinkingCallback = callbackOptions.thinkingCallback;
|
|
106
|
+
const firstThinkingCallback = callbackOptions.firstThinkingCallback;
|
|
107
|
+
const contentCallback = callbackOptions.contentCallback;
|
|
108
|
+
const firstContentCallback = callbackOptions.firstContentCallback;
|
|
109
|
+
|
|
110
|
+
// chat
|
|
111
|
+
const chatOptions = {
|
|
112
|
+
model: options.modelID,
|
|
113
|
+
messages: [
|
|
114
|
+
{ role: 'system', content: options.systemPrompt },
|
|
115
|
+
{ role: 'user', content: options.userPrompt },
|
|
116
|
+
],
|
|
117
|
+
thinking: {
|
|
118
|
+
type: options.modelThinking,
|
|
119
|
+
},
|
|
120
|
+
};
|
|
121
|
+
|
|
122
|
+
// go
|
|
123
|
+
try {
|
|
124
|
+
chatOptions.stream = true;
|
|
125
|
+
const stream = await client.chat.completions.create(chatOptions);
|
|
126
|
+
if (beginCallback) beginCallback();
|
|
127
|
+
|
|
128
|
+
// go
|
|
129
|
+
let firstThinking = true;
|
|
130
|
+
let firstContent = true;
|
|
131
|
+
for await (const part of stream) {
|
|
132
|
+
// thinking
|
|
133
|
+
const thinkingContent = part.choices[0]?.delta?.reasoning_content;
|
|
134
|
+
if (thinkingContent && thinkingCallback) {
|
|
135
|
+
if (firstThinking && firstThinkingCallback) {
|
|
136
|
+
firstThinking = false;
|
|
137
|
+
firstThinkingCallback();
|
|
138
|
+
}
|
|
139
|
+
|
|
140
|
+
thinkingCallback(thinkingContent);
|
|
141
|
+
}
|
|
142
|
+
|
|
143
|
+
// content
|
|
144
|
+
const content = part.choices[0]?.delta?.content;
|
|
145
|
+
if (content && contentCallback) {
|
|
146
|
+
if (firstContent && firstContentCallback) {
|
|
147
|
+
firstContent = false;
|
|
148
|
+
firstContentCallback();
|
|
149
|
+
}
|
|
150
|
+
|
|
151
|
+
contentCallback(content);
|
|
152
|
+
}
|
|
153
|
+
}
|
|
154
|
+
|
|
155
|
+
// end
|
|
156
|
+
if (endCallback) endCallback();
|
|
157
|
+
} catch (error) {
|
|
158
|
+
if (errorCallback) errorCallback(error);
|
|
159
|
+
}
|
|
160
|
+
};
|
|
@@ -0,0 +1,52 @@
|
|
|
1
|
+
// openai
|
|
2
|
+
import OpenAI from 'openai';
|
|
3
|
+
|
|
4
|
+
// util
|
|
5
|
+
import { chat, chatWithStreaming } from './openai-util.js';
|
|
6
|
+
|
|
7
|
+
// Logger
|
|
8
|
+
import { Logger } from 'qiao.log.js';
|
|
9
|
+
const logger = Logger('openai.js');
|
|
10
|
+
|
|
11
|
+
/**
|
|
12
|
+
* OpenAI
|
|
13
|
+
* @param {*} options
|
|
14
|
+
* @returns
|
|
15
|
+
*/
|
|
16
|
+
export const OpenAIAPI = (options) => {
|
|
17
|
+
const methodName = 'OpenAI';
|
|
18
|
+
|
|
19
|
+
// check
|
|
20
|
+
if (!options) {
|
|
21
|
+
logger.error(methodName, 'need options');
|
|
22
|
+
return;
|
|
23
|
+
}
|
|
24
|
+
if (!options.apiKey) {
|
|
25
|
+
logger.error(methodName, 'need options.apiKey');
|
|
26
|
+
return;
|
|
27
|
+
}
|
|
28
|
+
if (!options.baseURL) {
|
|
29
|
+
logger.error(methodName, 'need options.baseURL');
|
|
30
|
+
return;
|
|
31
|
+
}
|
|
32
|
+
|
|
33
|
+
// openai
|
|
34
|
+
const openai = {};
|
|
35
|
+
openai.client = new OpenAI({
|
|
36
|
+
apiKey: options.apiKey,
|
|
37
|
+
baseURL: options.baseURL,
|
|
38
|
+
});
|
|
39
|
+
|
|
40
|
+
// chat
|
|
41
|
+
openai.chat = async (chatOptions) => {
|
|
42
|
+
return await chat(openai.client, chatOptions);
|
|
43
|
+
};
|
|
44
|
+
|
|
45
|
+
// chat with streaming
|
|
46
|
+
openai.chatWithStreaming = async (chatOptions, callbakOptions) => {
|
|
47
|
+
return await chatWithStreaming(openai.client, chatOptions, callbakOptions);
|
|
48
|
+
};
|
|
49
|
+
|
|
50
|
+
//
|
|
51
|
+
return openai;
|
|
52
|
+
};
|