viho-llm 0.1.5 → 0.1.7
This diff represents the content of publicly available package versions that have been released to one of the supported registries. The information contained in this diff is provided for informational purposes only and reflects changes between package versions as they appear in their respective public registries.
- package/README.md +326 -14
- package/index.js +355 -43
- package/package.json +10 -3
- package/src/index.js +1 -0
- package/src/models/gemini-util.js +93 -8
- package/src/models/gemini-vertex.js +7 -1
- package/src/models/openai-util.js +158 -0
- package/src/models/openai.js +67 -0
package/README.md
CHANGED
|
@@ -1,6 +1,10 @@
|
|
|
1
|
-
|
|
1
|
+
<p align="center">
|
|
2
|
+
<img src="https://static-small.vincentqiao.com/viho/logo.png" alt="viho logo" width="200"/>
|
|
3
|
+
</p>
|
|
2
4
|
|
|
3
|
-
|
|
5
|
+
<h1 align="center">viho-llm</h1>
|
|
6
|
+
|
|
7
|
+
<p align="center">Utility library for working with multiple LLM providers (Google Gemini and OpenAI), providing common tools and helpers for AI interactions.</p>
|
|
4
8
|
|
|
5
9
|
## Installation
|
|
6
10
|
|
|
@@ -10,17 +14,36 @@ npm install viho-llm
|
|
|
10
14
|
|
|
11
15
|
## Prerequisites
|
|
12
16
|
|
|
13
|
-
|
|
17
|
+
This library supports multiple LLM providers:
|
|
18
|
+
|
|
19
|
+
### Google Gemini AI
|
|
20
|
+
|
|
21
|
+
1. **Google AI Studio (GeminiAPI)** - For personal development and prototyping
|
|
22
|
+
- Get an API key from [Google AI Studio](https://makersuite.google.com/app/apikey)
|
|
23
|
+
|
|
24
|
+
2. **Vertex AI (GeminiVertex)** - For enterprise applications with advanced features
|
|
25
|
+
- Requires a Google Cloud project with Vertex AI enabled
|
|
26
|
+
- Supports context caching for cost optimization
|
|
27
|
+
|
|
28
|
+
### OpenAI Compatible APIs
|
|
29
|
+
|
|
30
|
+
**OpenAI API (OpenAIAPI)** - For OpenAI and compatible services
|
|
31
|
+
|
|
32
|
+
- Supports official OpenAI API
|
|
33
|
+
- Compatible with OpenAI-like APIs (e.g., DeepSeek, local LLMs)
|
|
34
|
+
- Supports thinking/reasoning mode for compatible models
|
|
14
35
|
|
|
15
36
|
## Usage
|
|
16
37
|
|
|
17
|
-
### Basic Example
|
|
38
|
+
### Basic Example with GeminiAPI
|
|
39
|
+
|
|
40
|
+
Using Google AI Studio API Key (recommended for development):
|
|
18
41
|
|
|
19
42
|
```javascript
|
|
20
|
-
import {
|
|
43
|
+
import { GeminiAPI } from 'viho-llm';
|
|
21
44
|
|
|
22
|
-
// Initialize Gemini client
|
|
23
|
-
const gemini =
|
|
45
|
+
// Initialize Gemini client with API Key
|
|
46
|
+
const gemini = GeminiAPI({
|
|
24
47
|
apiKey: 'your-google-api-key',
|
|
25
48
|
modelName: 'gemini-pro',
|
|
26
49
|
});
|
|
@@ -38,17 +61,64 @@ const response = await gemini.chat({
|
|
|
38
61
|
console.log(response);
|
|
39
62
|
```
|
|
40
63
|
|
|
41
|
-
###
|
|
64
|
+
### Basic Example with GeminiVertex
|
|
65
|
+
|
|
66
|
+
Using Vertex AI (recommended for production):
|
|
42
67
|
|
|
43
68
|
```javascript
|
|
44
|
-
import {
|
|
69
|
+
import { GeminiVertex } from 'viho-llm';
|
|
45
70
|
|
|
46
|
-
// Initialize Gemini client
|
|
47
|
-
const gemini =
|
|
48
|
-
|
|
71
|
+
// Initialize Gemini client with Vertex AI
|
|
72
|
+
const gemini = GeminiVertex({
|
|
73
|
+
projectId: 'your-gcp-project-id',
|
|
74
|
+
location: 'us-east1',
|
|
49
75
|
modelName: 'gemini-pro',
|
|
50
76
|
});
|
|
51
77
|
|
|
78
|
+
// Send a chat message
|
|
79
|
+
const response = await gemini.chat({
|
|
80
|
+
contents: [
|
|
81
|
+
{
|
|
82
|
+
role: 'user',
|
|
83
|
+
parts: [{ text: 'Hello, how are you?' }],
|
|
84
|
+
},
|
|
85
|
+
],
|
|
86
|
+
});
|
|
87
|
+
|
|
88
|
+
console.log(response);
|
|
89
|
+
```
|
|
90
|
+
|
|
91
|
+
### Basic Example with OpenAI API
|
|
92
|
+
|
|
93
|
+
Using OpenAI or OpenAI-compatible services:
|
|
94
|
+
|
|
95
|
+
```javascript
|
|
96
|
+
import { OpenAIAPI } from 'viho-llm';
|
|
97
|
+
|
|
98
|
+
// Initialize OpenAI client
|
|
99
|
+
const openai = OpenAIAPI({
|
|
100
|
+
apiKey: 'your-openai-api-key',
|
|
101
|
+
baseURL: 'https://api.openai.com/v1', // or your custom endpoint
|
|
102
|
+
modelID: 'gpt-4o',
|
|
103
|
+
modelThinking: 'enabled', // 'enabled' or 'disabled' for reasoning models
|
|
104
|
+
});
|
|
105
|
+
|
|
106
|
+
// Send a chat message
|
|
107
|
+
const response = await openai.chat(
|
|
108
|
+
'You are a helpful assistant.', // system prompt
|
|
109
|
+
'Hello, how are you?', // user prompt
|
|
110
|
+
);
|
|
111
|
+
|
|
112
|
+
console.log(response);
|
|
113
|
+
```
|
|
114
|
+
|
|
115
|
+
### Streaming Example
|
|
116
|
+
|
|
117
|
+
All providers (GeminiAPI, GeminiVertex, and OpenAIAPI) support streaming responses:
|
|
118
|
+
|
|
119
|
+
#### Gemini Streaming
|
|
120
|
+
|
|
121
|
+
```javascript
|
|
52
122
|
// Send a chat message with streaming
|
|
53
123
|
await gemini.chatWithStreaming(
|
|
54
124
|
{
|
|
@@ -79,11 +149,79 @@ await gemini.chatWithStreaming(
|
|
|
79
149
|
);
|
|
80
150
|
```
|
|
81
151
|
|
|
152
|
+
#### OpenAI Streaming with Thinking Mode
|
|
153
|
+
|
|
154
|
+
OpenAI streaming supports thinking/reasoning content for compatible models:
|
|
155
|
+
|
|
156
|
+
```javascript
|
|
157
|
+
// Send a chat message with streaming (supports thinking mode)
|
|
158
|
+
await openai.chatWithStreaming(
|
|
159
|
+
'You are a helpful assistant.', // system prompt
|
|
160
|
+
'Explain how neural networks work', // user prompt
|
|
161
|
+
{
|
|
162
|
+
beginCallback: () => {
|
|
163
|
+
console.log('Stream started...');
|
|
164
|
+
},
|
|
165
|
+
firstThinkingCallback: () => {
|
|
166
|
+
console.log('\n[Thinking...]');
|
|
167
|
+
},
|
|
168
|
+
thinkingCallback: (thinking) => {
|
|
169
|
+
process.stdout.write(thinking); // Print reasoning process
|
|
170
|
+
},
|
|
171
|
+
firstContentCallback: () => {
|
|
172
|
+
console.log('\n[Response:]');
|
|
173
|
+
},
|
|
174
|
+
contentCallback: (content) => {
|
|
175
|
+
process.stdout.write(content); // Print response content
|
|
176
|
+
},
|
|
177
|
+
endCallback: () => {
|
|
178
|
+
console.log('\nStream ended.');
|
|
179
|
+
},
|
|
180
|
+
errorCallback: (error) => {
|
|
181
|
+
console.error('Error:', error);
|
|
182
|
+
},
|
|
183
|
+
},
|
|
184
|
+
);
|
|
185
|
+
```
|
|
186
|
+
|
|
187
|
+
### Context Caching Example (Vertex AI Only)
|
|
188
|
+
|
|
189
|
+
GeminiVertex supports context caching to reduce costs and latency when using large contexts:
|
|
190
|
+
|
|
191
|
+
```javascript
|
|
192
|
+
import { GeminiVertex } from 'viho-llm';
|
|
193
|
+
|
|
194
|
+
const gemini = GeminiVertex({
|
|
195
|
+
projectId: 'your-gcp-project-id',
|
|
196
|
+
location: 'us-east1',
|
|
197
|
+
modelName: 'gemini-1.5-flash-002',
|
|
198
|
+
});
|
|
199
|
+
|
|
200
|
+
// Add a new cache
|
|
201
|
+
const cache = await gemini.cacheAdd({
|
|
202
|
+
gsPath: 'gs://your-bucket/large-document.pdf',
|
|
203
|
+
systemPrompt: 'You are an expert at analyzing technical documents.',
|
|
204
|
+
cacheName: 'my-document-cache',
|
|
205
|
+
cacheTTL: '3600s', // 1 hour
|
|
206
|
+
});
|
|
207
|
+
|
|
208
|
+
console.log('Cache created:', cache.name);
|
|
209
|
+
|
|
210
|
+
// List all caches
|
|
211
|
+
const caches = await gemini.cacheList();
|
|
212
|
+
console.log('Available caches:', caches);
|
|
213
|
+
|
|
214
|
+
// Update cache TTL
|
|
215
|
+
await gemini.cacheUpdate(cache.name, {
|
|
216
|
+
ttl: '7200s', // Extend to 2 hours
|
|
217
|
+
});
|
|
218
|
+
```
|
|
219
|
+
|
|
82
220
|
## API Reference
|
|
83
221
|
|
|
84
|
-
### `
|
|
222
|
+
### `GeminiAPI(options)`
|
|
85
223
|
|
|
86
|
-
Creates a new Gemini client instance.
|
|
224
|
+
Creates a new Gemini client instance using Google AI Studio API.
|
|
87
225
|
|
|
88
226
|
#### Parameters
|
|
89
227
|
|
|
@@ -172,6 +310,180 @@ await gemini.chatWithStreaming(
|
|
|
172
310
|
);
|
|
173
311
|
```
|
|
174
312
|
|
|
313
|
+
---
|
|
314
|
+
|
|
315
|
+
### `GeminiVertex(options)`
|
|
316
|
+
|
|
317
|
+
Creates a new Gemini client instance using Vertex AI. Includes all features of GeminiAPI plus context caching support.
|
|
318
|
+
|
|
319
|
+
#### Parameters
|
|
320
|
+
|
|
321
|
+
- `options` (Object) - Configuration options
|
|
322
|
+
- `projectId` (string) **required** - Your Google Cloud project ID
|
|
323
|
+
- `location` (string) **required** - GCP region (e.g., 'us-east1', 'us-central1')
|
|
324
|
+
- `modelName` (string) **required** - Model name (e.g., 'gemini-1.5-flash-002', 'gemini-1.5-pro-002')
|
|
325
|
+
|
|
326
|
+
#### Returns
|
|
327
|
+
|
|
328
|
+
Returns a Gemini client object with the following methods:
|
|
329
|
+
|
|
330
|
+
##### `client.chat(chatOptions)`
|
|
331
|
+
|
|
332
|
+
Same as GeminiAPI.chat(). See above for details.
|
|
333
|
+
|
|
334
|
+
##### `client.chatWithStreaming(chatOptions, callbackOptions)`
|
|
335
|
+
|
|
336
|
+
Same as GeminiAPI.chatWithStreaming(). See above for details.
|
|
337
|
+
|
|
338
|
+
##### `client.cacheAdd(cacheOptions)`
|
|
339
|
+
|
|
340
|
+
Creates a new context cache for frequently used content.
|
|
341
|
+
|
|
342
|
+
**Parameters:**
|
|
343
|
+
|
|
344
|
+
- `cacheOptions` (Object)
|
|
345
|
+
- `gsPath` (string) **required** - Google Cloud Storage path (e.g., 'gs://bucket/file.pdf')
|
|
346
|
+
- `systemPrompt` (string) **required** - System instruction for the cached context
|
|
347
|
+
- `cacheName` (string) **required** - Display name for the cache
|
|
348
|
+
- `cacheTTL` (string) **required** - Time-to-live (e.g., '3600s' for 1 hour)
|
|
349
|
+
|
|
350
|
+
**Returns:**
|
|
351
|
+
|
|
352
|
+
- (Promise\<Object\>) - Cache object with name and metadata
|
|
353
|
+
|
|
354
|
+
**Example:**
|
|
355
|
+
|
|
356
|
+
```javascript
|
|
357
|
+
const cache = await gemini.cacheAdd({
|
|
358
|
+
gsPath: 'gs://my-bucket/documentation.pdf',
|
|
359
|
+
systemPrompt: 'You are a helpful documentation assistant.',
|
|
360
|
+
cacheName: 'docs-cache',
|
|
361
|
+
cacheTTL: '3600s',
|
|
362
|
+
});
|
|
363
|
+
```
|
|
364
|
+
|
|
365
|
+
##### `client.cacheList()`
|
|
366
|
+
|
|
367
|
+
Lists all available caches in the project.
|
|
368
|
+
|
|
369
|
+
**Parameters:** None
|
|
370
|
+
|
|
371
|
+
**Returns:**
|
|
372
|
+
|
|
373
|
+
- (Promise\<Array\>) - Array of cache objects with `name` and `displayName` properties
|
|
374
|
+
|
|
375
|
+
**Example:**
|
|
376
|
+
|
|
377
|
+
```javascript
|
|
378
|
+
const caches = await gemini.cacheList();
|
|
379
|
+
console.log(caches);
|
|
380
|
+
// [{ name: 'projects/.../cachedContents/...', displayName: 'docs-cache' }]
|
|
381
|
+
```
|
|
382
|
+
|
|
383
|
+
##### `client.cacheUpdate(cacheName, cacheOptions)`
|
|
384
|
+
|
|
385
|
+
Updates an existing cache configuration.
|
|
386
|
+
|
|
387
|
+
**Parameters:**
|
|
388
|
+
|
|
389
|
+
- `cacheName` (string) **required** - The cache name to update
|
|
390
|
+
- `cacheOptions` (Object) **required** - Update configuration
|
|
391
|
+
- `ttl` (string) - New time-to-live value (e.g., '7200s')
|
|
392
|
+
|
|
393
|
+
**Returns:**
|
|
394
|
+
|
|
395
|
+
- (Promise\<Object\>) - Updated cache object
|
|
396
|
+
|
|
397
|
+
**Example:**
|
|
398
|
+
|
|
399
|
+
```javascript
|
|
400
|
+
await gemini.cacheUpdate('projects/.../cachedContents/abc123', {
|
|
401
|
+
ttl: '7200s', // Extend to 2 hours
|
|
402
|
+
});
|
|
403
|
+
```
|
|
404
|
+
|
|
405
|
+
---
|
|
406
|
+
|
|
407
|
+
### `OpenAIAPI(options)`
|
|
408
|
+
|
|
409
|
+
Creates a new OpenAI client instance supporting OpenAI and compatible APIs.
|
|
410
|
+
|
|
411
|
+
#### Parameters
|
|
412
|
+
|
|
413
|
+
- `options` (Object) - Configuration options
|
|
414
|
+
- `apiKey` (string) **required** - Your OpenAI API key or compatible service key
|
|
415
|
+
- `baseURL` (string) **required** - API base URL (e.g., 'https://api.openai.com/v1')
|
|
416
|
+
- `modelID` (string) **required** - Model identifier (e.g., 'gpt-4o', 'deepseek-reasoner')
|
|
417
|
+
- `modelThinking` (string) **required** - Thinking mode: 'enabled' or 'disabled'
|
|
418
|
+
|
|
419
|
+
#### Returns
|
|
420
|
+
|
|
421
|
+
Returns an OpenAI client object with the following methods:
|
|
422
|
+
|
|
423
|
+
##### `client.chat(systemPrompt, userPrompt)`
|
|
424
|
+
|
|
425
|
+
Sends a chat request to the OpenAI API.
|
|
426
|
+
|
|
427
|
+
**Parameters:**
|
|
428
|
+
|
|
429
|
+
- `systemPrompt` (string) **required** - System instruction/context for the model
|
|
430
|
+
- `userPrompt` (string) **required** - User's message/question
|
|
431
|
+
|
|
432
|
+
**Returns:**
|
|
433
|
+
|
|
434
|
+
- (Promise\<Object\>) - Message object with `role` and `content` properties
|
|
435
|
+
|
|
436
|
+
**Example:**
|
|
437
|
+
|
|
438
|
+
```javascript
|
|
439
|
+
const response = await openai.chat(
|
|
440
|
+
'You are a helpful coding assistant.',
|
|
441
|
+
'Write a Python function to reverse a string',
|
|
442
|
+
);
|
|
443
|
+
console.log(response.content);
|
|
444
|
+
```
|
|
445
|
+
|
|
446
|
+
##### `client.chatWithStreaming(systemPrompt, userPrompt, callbackOptions)`
|
|
447
|
+
|
|
448
|
+
Sends a chat request to the OpenAI API with streaming response and thinking support.
|
|
449
|
+
|
|
450
|
+
**Parameters:**
|
|
451
|
+
|
|
452
|
+
- `systemPrompt` (string) **required** - System instruction/context for the model
|
|
453
|
+
- `userPrompt` (string) **required** - User's message/question
|
|
454
|
+
|
|
455
|
+
- `callbackOptions` (Object) **required** - Callback functions for handling stream events
|
|
456
|
+
- `beginCallback` (Function) - Called when the stream begins
|
|
457
|
+
- `firstThinkingCallback` (Function) - Called when the first thinking chunk is received (for reasoning models)
|
|
458
|
+
- `thinkingCallback` (Function) - Called for each thinking/reasoning chunk received
|
|
459
|
+
- Parameters: `thinking` (string) - The thinking content chunk
|
|
460
|
+
- `firstContentCallback` (Function) - Called when the first response content chunk is received
|
|
461
|
+
- `contentCallback` (Function) - Called for each response content chunk received
|
|
462
|
+
- Parameters: `content` (string) - The text chunk
|
|
463
|
+
- `endCallback` (Function) - Called when the stream ends successfully
|
|
464
|
+
- `errorCallback` (Function) - Called if an error occurs
|
|
465
|
+
- Parameters: `error` (Error) - The error object
|
|
466
|
+
|
|
467
|
+
**Returns:**
|
|
468
|
+
|
|
469
|
+
- (Promise\<void\>) - Resolves when streaming completes
|
|
470
|
+
|
|
471
|
+
**Example:**
|
|
472
|
+
|
|
473
|
+
```javascript
|
|
474
|
+
await openai.chatWithStreaming('You are a math tutor.', 'Solve: What is 15% of 240?', {
|
|
475
|
+
thinkingCallback: (thinking) => {
|
|
476
|
+
console.log('Thinking:', thinking);
|
|
477
|
+
},
|
|
478
|
+
contentCallback: (chunk) => {
|
|
479
|
+
process.stdout.write(chunk);
|
|
480
|
+
},
|
|
481
|
+
endCallback: () => {
|
|
482
|
+
console.log('\nDone!');
|
|
483
|
+
},
|
|
484
|
+
});
|
|
485
|
+
```
|
|
486
|
+
|
|
175
487
|
## License
|
|
176
488
|
|
|
177
489
|
MIT
|
package/index.js
CHANGED
|
@@ -3,9 +3,10 @@
|
|
|
3
3
|
var genai = require('@google/genai');
|
|
4
4
|
var mime = require('mime-types');
|
|
5
5
|
var qiao_log_js = require('qiao.log.js');
|
|
6
|
+
var OpenAI = require('openai');
|
|
6
7
|
|
|
7
8
|
// gemini
|
|
8
|
-
const logger$
|
|
9
|
+
const logger$4 = qiao_log_js.Logger('gemini-util.js');
|
|
9
10
|
|
|
10
11
|
/**
|
|
11
12
|
* chat
|
|
@@ -14,40 +15,46 @@ const logger$2 = qiao_log_js.Logger('gemini-util.js');
|
|
|
14
15
|
* @param {*} chatOptions
|
|
15
16
|
* @returns
|
|
16
17
|
*/
|
|
17
|
-
const chat = async (client, modelName, chatOptions) => {
|
|
18
|
+
const chat$1 = async (client, modelName, chatOptions) => {
|
|
18
19
|
const methodName = 'chat';
|
|
19
20
|
|
|
20
21
|
// check
|
|
21
22
|
if (!client) {
|
|
22
|
-
logger$
|
|
23
|
+
logger$4.error(methodName, 'need client');
|
|
23
24
|
return;
|
|
24
25
|
}
|
|
25
26
|
if (!modelName) {
|
|
26
|
-
logger$
|
|
27
|
+
logger$4.error(methodName, 'need modelName');
|
|
27
28
|
return;
|
|
28
29
|
}
|
|
29
30
|
if (!chatOptions) {
|
|
30
|
-
logger$
|
|
31
|
+
logger$4.error(methodName, 'need chatOptions');
|
|
31
32
|
return;
|
|
32
33
|
}
|
|
33
34
|
if (!chatOptions.contents) {
|
|
34
|
-
logger$
|
|
35
|
+
logger$4.error(methodName, 'need chatOptions.contents');
|
|
35
36
|
return;
|
|
36
37
|
}
|
|
37
38
|
|
|
38
39
|
try {
|
|
39
|
-
|
|
40
|
-
|
|
41
|
-
|
|
42
|
-
|
|
40
|
+
// options
|
|
41
|
+
const options = Object.assign(
|
|
42
|
+
{
|
|
43
|
+
model: modelName,
|
|
44
|
+
},
|
|
45
|
+
chatOptions,
|
|
46
|
+
);
|
|
47
|
+
|
|
48
|
+
// gen
|
|
49
|
+
const response = await client.models.generateContent(options);
|
|
43
50
|
if (!response || !response.text) {
|
|
44
|
-
logger$
|
|
51
|
+
logger$4.error(methodName, 'invalid response');
|
|
45
52
|
return;
|
|
46
53
|
}
|
|
47
54
|
|
|
48
55
|
return response.text;
|
|
49
56
|
} catch (error) {
|
|
50
|
-
logger$
|
|
57
|
+
logger$4.error(methodName, 'error', error);
|
|
51
58
|
}
|
|
52
59
|
};
|
|
53
60
|
|
|
@@ -59,28 +66,28 @@ const chat = async (client, modelName, chatOptions) => {
|
|
|
59
66
|
* @param {*} callbackOptions
|
|
60
67
|
* @returns
|
|
61
68
|
*/
|
|
62
|
-
const chatWithStreaming = async (client, modelName, chatOptions, callbackOptions) => {
|
|
69
|
+
const chatWithStreaming$1 = async (client, modelName, chatOptions, callbackOptions) => {
|
|
63
70
|
const methodName = 'chatWithStreaming';
|
|
64
71
|
|
|
65
72
|
// check
|
|
66
73
|
if (!client) {
|
|
67
|
-
logger$
|
|
74
|
+
logger$4.error(methodName, 'need client');
|
|
68
75
|
return;
|
|
69
76
|
}
|
|
70
77
|
if (!modelName) {
|
|
71
|
-
logger$
|
|
78
|
+
logger$4.error(methodName, 'need modelName');
|
|
72
79
|
return;
|
|
73
80
|
}
|
|
74
81
|
if (!chatOptions) {
|
|
75
|
-
logger$
|
|
82
|
+
logger$4.error(methodName, 'need chatOptions');
|
|
76
83
|
return;
|
|
77
84
|
}
|
|
78
85
|
if (!chatOptions.contents) {
|
|
79
|
-
logger$
|
|
86
|
+
logger$4.error(methodName, 'need chatOptions.contents');
|
|
80
87
|
return;
|
|
81
88
|
}
|
|
82
89
|
if (!callbackOptions) {
|
|
83
|
-
logger$
|
|
90
|
+
logger$4.error(methodName, 'need callbackOptions');
|
|
84
91
|
return;
|
|
85
92
|
}
|
|
86
93
|
|
|
@@ -92,11 +99,17 @@ const chatWithStreaming = async (client, modelName, chatOptions, callbackOptions
|
|
|
92
99
|
const firstContentCallback = callbackOptions.firstContentCallback;
|
|
93
100
|
|
|
94
101
|
try {
|
|
102
|
+
// options
|
|
103
|
+
const options = Object.assign(
|
|
104
|
+
{
|
|
105
|
+
model: modelName,
|
|
106
|
+
},
|
|
107
|
+
chatOptions,
|
|
108
|
+
);
|
|
109
|
+
|
|
110
|
+
// gen
|
|
111
|
+
const response = await client.models.generateContentStream(options);
|
|
95
112
|
if (beginCallback) beginCallback();
|
|
96
|
-
const response = await client.models.generateContentStream({
|
|
97
|
-
model: modelName,
|
|
98
|
-
contents: chatOptions.contents,
|
|
99
|
-
});
|
|
100
113
|
|
|
101
114
|
// go
|
|
102
115
|
let firstContent = true;
|
|
@@ -131,31 +144,39 @@ const cacheAdd = async (client, modelName, cacheOptions) => {
|
|
|
131
144
|
const methodName = 'cacheAdd';
|
|
132
145
|
|
|
133
146
|
// check
|
|
147
|
+
if (!client) {
|
|
148
|
+
logger$4.error(methodName, 'need client');
|
|
149
|
+
return;
|
|
150
|
+
}
|
|
151
|
+
if (!modelName) {
|
|
152
|
+
logger$4.error(methodName, 'need modelName');
|
|
153
|
+
return;
|
|
154
|
+
}
|
|
134
155
|
if (!cacheOptions) {
|
|
135
|
-
logger$
|
|
156
|
+
logger$4.error(methodName, 'need cacheOptions');
|
|
136
157
|
return;
|
|
137
158
|
}
|
|
138
159
|
if (!cacheOptions.gsPath) {
|
|
139
|
-
logger$
|
|
160
|
+
logger$4.error(methodName, 'need cacheOptions.gsPath');
|
|
140
161
|
return;
|
|
141
162
|
}
|
|
142
163
|
if (!cacheOptions.systemPrompt) {
|
|
143
|
-
logger$
|
|
164
|
+
logger$4.error(methodName, 'need cacheOptions.systemPrompt');
|
|
144
165
|
return;
|
|
145
166
|
}
|
|
146
167
|
if (!cacheOptions.cacheName) {
|
|
147
|
-
logger$
|
|
168
|
+
logger$4.error(methodName, 'need cacheOptions.cacheName');
|
|
148
169
|
return;
|
|
149
170
|
}
|
|
150
171
|
if (!cacheOptions.cacheTTL) {
|
|
151
|
-
logger$
|
|
172
|
+
logger$4.error(methodName, 'need cacheOptions.cacheTTL');
|
|
152
173
|
return;
|
|
153
174
|
}
|
|
154
175
|
|
|
155
176
|
// const
|
|
156
177
|
const mimeType = mime.lookup(cacheOptions.gsPath);
|
|
157
|
-
logger$
|
|
158
|
-
logger$
|
|
178
|
+
logger$4.info(methodName, 'cacheOptions', cacheOptions);
|
|
179
|
+
logger$4.info(methodName, 'mimeType', mimeType);
|
|
159
180
|
|
|
160
181
|
try {
|
|
161
182
|
// cache add
|
|
@@ -171,12 +192,77 @@ const cacheAdd = async (client, modelName, cacheOptions) => {
|
|
|
171
192
|
|
|
172
193
|
return cache;
|
|
173
194
|
} catch (error) {
|
|
174
|
-
logger$
|
|
195
|
+
logger$4.error(methodName, 'error', error);
|
|
196
|
+
}
|
|
197
|
+
};
|
|
198
|
+
|
|
199
|
+
/**
|
|
200
|
+
* cacheList
|
|
201
|
+
* @param {*} client
|
|
202
|
+
* @returns
|
|
203
|
+
*/
|
|
204
|
+
const cacheList = async (client) => {
|
|
205
|
+
const methodName = 'cacheList';
|
|
206
|
+
|
|
207
|
+
// check
|
|
208
|
+
if (!client) {
|
|
209
|
+
logger$4.error(methodName, 'need client');
|
|
210
|
+
return;
|
|
211
|
+
}
|
|
212
|
+
|
|
213
|
+
// cache list
|
|
214
|
+
try {
|
|
215
|
+
const cacheList = await client.caches.list();
|
|
216
|
+
const cacheObjs = cacheList?.pageInternal?.map((contentCache) => ({
|
|
217
|
+
name: contentCache.name,
|
|
218
|
+
displayName: contentCache.displayName,
|
|
219
|
+
}));
|
|
220
|
+
|
|
221
|
+
return cacheObjs;
|
|
222
|
+
} catch (error) {
|
|
223
|
+
logger$4.error(methodName, 'error', error);
|
|
224
|
+
}
|
|
225
|
+
};
|
|
226
|
+
|
|
227
|
+
/**
|
|
228
|
+
* cacheUpdate
|
|
229
|
+
* @param {*} client
|
|
230
|
+
* @param {*} cacheName
|
|
231
|
+
* @param {*} cacheOptions
|
|
232
|
+
* @returns
|
|
233
|
+
*/
|
|
234
|
+
const cacheUpdate = async (client, cacheName, cacheOptions) => {
|
|
235
|
+
const methodName = 'cacheUpdate';
|
|
236
|
+
|
|
237
|
+
// check
|
|
238
|
+
if (!client) {
|
|
239
|
+
logger$4.error(methodName, 'need client');
|
|
240
|
+
return;
|
|
241
|
+
}
|
|
242
|
+
if (!cacheName) {
|
|
243
|
+
logger$4.error(methodName, 'need cacheName');
|
|
244
|
+
return;
|
|
245
|
+
}
|
|
246
|
+
if (!cacheOptions) {
|
|
247
|
+
logger$4.error(methodName, 'need cacheOptions');
|
|
248
|
+
return;
|
|
249
|
+
}
|
|
250
|
+
|
|
251
|
+
// cache update
|
|
252
|
+
try {
|
|
253
|
+
const res = await client.caches.update({
|
|
254
|
+
name: cacheName,
|
|
255
|
+
config: cacheOptions,
|
|
256
|
+
});
|
|
257
|
+
|
|
258
|
+
return res;
|
|
259
|
+
} catch (error) {
|
|
260
|
+
logger$4.error(methodName, 'error', error);
|
|
175
261
|
}
|
|
176
262
|
};
|
|
177
263
|
|
|
178
264
|
// gemini
|
|
179
|
-
const logger$
|
|
265
|
+
const logger$3 = qiao_log_js.Logger('gemini-api.js');
|
|
180
266
|
|
|
181
267
|
/**
|
|
182
268
|
* GeminiAPI
|
|
@@ -188,15 +274,15 @@ const GeminiAPI = (options) => {
|
|
|
188
274
|
|
|
189
275
|
// check
|
|
190
276
|
if (!options) {
|
|
191
|
-
logger$
|
|
277
|
+
logger$3.error(methodName, 'need options');
|
|
192
278
|
return;
|
|
193
279
|
}
|
|
194
280
|
if (!options.apiKey) {
|
|
195
|
-
logger$
|
|
281
|
+
logger$3.error(methodName, 'need options.apiKey');
|
|
196
282
|
return;
|
|
197
283
|
}
|
|
198
284
|
if (!options.modelName) {
|
|
199
|
-
logger$
|
|
285
|
+
logger$3.error(methodName, 'need options.modelName');
|
|
200
286
|
return;
|
|
201
287
|
}
|
|
202
288
|
|
|
@@ -208,10 +294,10 @@ const GeminiAPI = (options) => {
|
|
|
208
294
|
|
|
209
295
|
// chat
|
|
210
296
|
gemini.chat = async (chatOptions) => {
|
|
211
|
-
return await chat(gemini.client, options.modelName, chatOptions);
|
|
297
|
+
return await chat$1(gemini.client, options.modelName, chatOptions);
|
|
212
298
|
};
|
|
213
299
|
gemini.chatWithStreaming = async (chatOptions, callbackOptions) => {
|
|
214
|
-
return await chatWithStreaming(gemini.client, options.modelName, chatOptions, callbackOptions);
|
|
300
|
+
return await chatWithStreaming$1(gemini.client, options.modelName, chatOptions, callbackOptions);
|
|
215
301
|
};
|
|
216
302
|
|
|
217
303
|
// r
|
|
@@ -219,7 +305,7 @@ const GeminiAPI = (options) => {
|
|
|
219
305
|
};
|
|
220
306
|
|
|
221
307
|
// gemini
|
|
222
|
-
const logger = qiao_log_js.Logger('viho-llm');
|
|
308
|
+
const logger$2 = qiao_log_js.Logger('viho-llm');
|
|
223
309
|
|
|
224
310
|
/**
|
|
225
311
|
* GeminiVertex
|
|
@@ -231,19 +317,19 @@ const GeminiVertex = (options) => {
|
|
|
231
317
|
|
|
232
318
|
// check
|
|
233
319
|
if (!options) {
|
|
234
|
-
logger.error(methodName, 'need options');
|
|
320
|
+
logger$2.error(methodName, 'need options');
|
|
235
321
|
return;
|
|
236
322
|
}
|
|
237
323
|
if (!options.projectId) {
|
|
238
|
-
logger.error(methodName, 'need options.projectId');
|
|
324
|
+
logger$2.error(methodName, 'need options.projectId');
|
|
239
325
|
return;
|
|
240
326
|
}
|
|
241
327
|
if (!options.location) {
|
|
242
|
-
logger.error(methodName, 'need options.location');
|
|
328
|
+
logger$2.error(methodName, 'need options.location');
|
|
243
329
|
return;
|
|
244
330
|
}
|
|
245
331
|
if (!options.modelName) {
|
|
246
|
-
logger.error(methodName, 'need options.modelName');
|
|
332
|
+
logger$2.error(methodName, 'need options.modelName');
|
|
247
333
|
return;
|
|
248
334
|
}
|
|
249
335
|
|
|
@@ -257,20 +343,246 @@ const GeminiVertex = (options) => {
|
|
|
257
343
|
|
|
258
344
|
// chat
|
|
259
345
|
gemini.chat = async (chatOptions) => {
|
|
260
|
-
return await chat(gemini.client, options.modelName, chatOptions);
|
|
346
|
+
return await chat$1(gemini.client, options.modelName, chatOptions);
|
|
261
347
|
};
|
|
262
348
|
gemini.chatWithStreaming = async (chatOptions, callbackOptions) => {
|
|
263
|
-
return await chatWithStreaming(gemini.client, options.modelName, chatOptions, callbackOptions);
|
|
349
|
+
return await chatWithStreaming$1(gemini.client, options.modelName, chatOptions, callbackOptions);
|
|
264
350
|
};
|
|
265
351
|
|
|
266
352
|
// cache
|
|
267
353
|
gemini.cacheAdd = async (cacheOptions) => {
|
|
268
354
|
return await cacheAdd(gemini.client, options.modelName, cacheOptions);
|
|
269
355
|
};
|
|
356
|
+
gemini.cacheList = async () => {
|
|
357
|
+
return await cacheList(gemini.client);
|
|
358
|
+
};
|
|
359
|
+
gemini.cacheUpdate = async (cacheName, cacheOptions) => {
|
|
360
|
+
return await cacheUpdate(gemini.client, cacheName, cacheOptions);
|
|
361
|
+
};
|
|
270
362
|
|
|
271
363
|
// r
|
|
272
364
|
return gemini;
|
|
273
365
|
};
|
|
274
366
|
|
|
367
|
+
// Logger
|
|
368
|
+
const logger$1 = qiao_log_js.Logger('openai-util.js');
|
|
369
|
+
|
|
370
|
+
/**
|
|
371
|
+
* chat
|
|
372
|
+
* @param {*} client
|
|
373
|
+
* @param {*} modelID
|
|
374
|
+
* @param {*} modelThinking
|
|
375
|
+
* @param {*} systemPrompt
|
|
376
|
+
* @param {*} userPrompt
|
|
377
|
+
* @returns
|
|
378
|
+
*/
|
|
379
|
+
const chat = async (client, modelID, modelThinking, systemPrompt, userPrompt) => {
|
|
380
|
+
const methodName = 'chat';
|
|
381
|
+
|
|
382
|
+
// check
|
|
383
|
+
if (!client) {
|
|
384
|
+
logger$1.error(methodName, 'need client');
|
|
385
|
+
return;
|
|
386
|
+
}
|
|
387
|
+
if (!modelID) {
|
|
388
|
+
logger$1.error(methodName, 'need modelID');
|
|
389
|
+
return;
|
|
390
|
+
}
|
|
391
|
+
if (!modelThinking) {
|
|
392
|
+
logger$1.error(methodName, 'need modelThinking');
|
|
393
|
+
return;
|
|
394
|
+
}
|
|
395
|
+
if (!systemPrompt) {
|
|
396
|
+
logger$1.error(methodName, 'need systemPrompt');
|
|
397
|
+
return;
|
|
398
|
+
}
|
|
399
|
+
if (!userPrompt) {
|
|
400
|
+
logger$1.error(methodName, 'need userPrompt');
|
|
401
|
+
return;
|
|
402
|
+
}
|
|
403
|
+
|
|
404
|
+
// chat
|
|
405
|
+
const chatOptions = {
|
|
406
|
+
model: modelID,
|
|
407
|
+
messages: [
|
|
408
|
+
{ role: 'system', content: systemPrompt },
|
|
409
|
+
{ role: 'user', content: userPrompt },
|
|
410
|
+
],
|
|
411
|
+
thinking: {
|
|
412
|
+
type: modelThinking,
|
|
413
|
+
},
|
|
414
|
+
};
|
|
415
|
+
|
|
416
|
+
// go
|
|
417
|
+
try {
|
|
418
|
+
const completion = await client.chat.completions.create(chatOptions);
|
|
419
|
+
return completion.choices[0]?.message;
|
|
420
|
+
} catch (error) {
|
|
421
|
+
logger$1.error(methodName, 'error', error);
|
|
422
|
+
}
|
|
423
|
+
};
|
|
424
|
+
|
|
425
|
+
/**
|
|
426
|
+
* chatWithStreaming
|
|
427
|
+
* @param {*} client
|
|
428
|
+
* @param {*} modelID
|
|
429
|
+
* @param {*} modelThinking
|
|
430
|
+
* @param {*} systemPrompt
|
|
431
|
+
* @param {*} userPrompt
|
|
432
|
+
* @param {*} callbackOptions
|
|
433
|
+
* @returns
|
|
434
|
+
*/
|
|
435
|
+
const chatWithStreaming = async (client, modelID, modelThinking, systemPrompt, userPrompt, callbackOptions) => {
|
|
436
|
+
const methodName = 'chatWithStreaming';
|
|
437
|
+
|
|
438
|
+
// check
|
|
439
|
+
if (!client) {
|
|
440
|
+
logger$1.error(methodName, 'need client');
|
|
441
|
+
return;
|
|
442
|
+
}
|
|
443
|
+
if (!modelID) {
|
|
444
|
+
logger$1.error(methodName, 'need modelID');
|
|
445
|
+
return;
|
|
446
|
+
}
|
|
447
|
+
if (!modelThinking) {
|
|
448
|
+
logger$1.error(methodName, 'need modelThinking');
|
|
449
|
+
return;
|
|
450
|
+
}
|
|
451
|
+
if (!systemPrompt) {
|
|
452
|
+
logger$1.error(methodName, 'need systemPrompt');
|
|
453
|
+
return;
|
|
454
|
+
}
|
|
455
|
+
if (!userPrompt) {
|
|
456
|
+
logger$1.error(methodName, 'need userPrompt');
|
|
457
|
+
return;
|
|
458
|
+
}
|
|
459
|
+
if (!callbackOptions) {
|
|
460
|
+
logger$1.error(methodName, 'need callbackOptions');
|
|
461
|
+
return;
|
|
462
|
+
}
|
|
463
|
+
|
|
464
|
+
// callback
|
|
465
|
+
const beginCallback = callbackOptions.beginCallback;
|
|
466
|
+
const endCallback = callbackOptions.endCallback;
|
|
467
|
+
const errorCallback = callbackOptions.errorCallback;
|
|
468
|
+
const thinkingCallback = callbackOptions.thinkingCallback;
|
|
469
|
+
const firstThinkingCallback = callbackOptions.firstThinkingCallback;
|
|
470
|
+
const contentCallback = callbackOptions.contentCallback;
|
|
471
|
+
const firstContentCallback = callbackOptions.firstContentCallback;
|
|
472
|
+
|
|
473
|
+
// chat
|
|
474
|
+
const chatOptions = {
|
|
475
|
+
model: modelID,
|
|
476
|
+
messages: [
|
|
477
|
+
{ role: 'system', content: systemPrompt },
|
|
478
|
+
{ role: 'user', content: userPrompt },
|
|
479
|
+
],
|
|
480
|
+
thinking: {
|
|
481
|
+
type: modelThinking,
|
|
482
|
+
},
|
|
483
|
+
};
|
|
484
|
+
|
|
485
|
+
// go
|
|
486
|
+
try {
|
|
487
|
+
chatOptions.stream = true;
|
|
488
|
+
const stream = await client.chat.completions.create(chatOptions);
|
|
489
|
+
if (beginCallback) beginCallback();
|
|
490
|
+
|
|
491
|
+
// go
|
|
492
|
+
let firstThinking = true;
|
|
493
|
+
let firstContent = true;
|
|
494
|
+
for await (const part of stream) {
|
|
495
|
+
// thinking
|
|
496
|
+
const thinkingContent = part.choices[0]?.delta?.reasoning_content;
|
|
497
|
+
if (thinkingContent && thinkingCallback) {
|
|
498
|
+
if (firstThinking && firstThinkingCallback) {
|
|
499
|
+
firstThinking = false;
|
|
500
|
+
firstThinkingCallback();
|
|
501
|
+
}
|
|
502
|
+
|
|
503
|
+
thinkingCallback(thinkingContent);
|
|
504
|
+
}
|
|
505
|
+
|
|
506
|
+
// content
|
|
507
|
+
const content = part.choices[0]?.delta?.content;
|
|
508
|
+
if (content && contentCallback) {
|
|
509
|
+
if (firstContent && firstContentCallback) {
|
|
510
|
+
firstContent = false;
|
|
511
|
+
firstContentCallback();
|
|
512
|
+
}
|
|
513
|
+
|
|
514
|
+
contentCallback(content);
|
|
515
|
+
}
|
|
516
|
+
}
|
|
517
|
+
|
|
518
|
+
// end
|
|
519
|
+
if (endCallback) endCallback();
|
|
520
|
+
} catch (error) {
|
|
521
|
+
if (errorCallback) errorCallback(error);
|
|
522
|
+
}
|
|
523
|
+
};
|
|
524
|
+
|
|
525
|
+
// openai
|
|
526
|
+
const logger = qiao_log_js.Logger('openai.js');
|
|
527
|
+
|
|
528
|
+
/**
|
|
529
|
+
* OpenAI
|
|
530
|
+
* @param {*} options
|
|
531
|
+
* @returns
|
|
532
|
+
*/
|
|
533
|
+
const OpenAIAPI = (options) => {
|
|
534
|
+
const methodName = 'OpenAI';
|
|
535
|
+
|
|
536
|
+
// check
|
|
537
|
+
if (!options) {
|
|
538
|
+
logger.error(methodName, 'need options');
|
|
539
|
+
return;
|
|
540
|
+
}
|
|
541
|
+
if (!options.apiKey) {
|
|
542
|
+
logger.error(methodName, 'need options.apiKey');
|
|
543
|
+
return;
|
|
544
|
+
}
|
|
545
|
+
if (!options.baseURL) {
|
|
546
|
+
logger.error(methodName, 'need options.baseURL');
|
|
547
|
+
return;
|
|
548
|
+
}
|
|
549
|
+
if (!options.modelID) {
|
|
550
|
+
logger.error(methodName, 'need options.modelID');
|
|
551
|
+
return;
|
|
552
|
+
}
|
|
553
|
+
if (!options.modelThinking) {
|
|
554
|
+
logger.error(methodName, 'need options.modelThinking');
|
|
555
|
+
return;
|
|
556
|
+
}
|
|
557
|
+
|
|
558
|
+
// openai
|
|
559
|
+
const openai = {};
|
|
560
|
+
openai.client = new OpenAI({
|
|
561
|
+
apiKey: options.apiKey,
|
|
562
|
+
baseURL: options.baseURL,
|
|
563
|
+
});
|
|
564
|
+
|
|
565
|
+
// chat
|
|
566
|
+
openai.chat = async (systemPrompt, userPrompt) => {
|
|
567
|
+
return await chat(openai.client, options.modelID, options.modelThinking, systemPrompt, userPrompt);
|
|
568
|
+
};
|
|
569
|
+
|
|
570
|
+
// chat with streaming
|
|
571
|
+
openai.chatWithStreaming = async (systemPrompt, userPrompt, callbakOptions) => {
|
|
572
|
+
return await chatWithStreaming(
|
|
573
|
+
openai.client,
|
|
574
|
+
options.modelID,
|
|
575
|
+
options.modelThinking,
|
|
576
|
+
systemPrompt,
|
|
577
|
+
userPrompt,
|
|
578
|
+
callbakOptions,
|
|
579
|
+
);
|
|
580
|
+
};
|
|
581
|
+
|
|
582
|
+
//
|
|
583
|
+
return openai;
|
|
584
|
+
};
|
|
585
|
+
|
|
275
586
|
exports.GeminiAPI = GeminiAPI;
|
|
276
587
|
exports.GeminiVertex = GeminiVertex;
|
|
588
|
+
exports.OpenAIAPI = OpenAIAPI;
|
package/package.json
CHANGED
|
@@ -1,17 +1,23 @@
|
|
|
1
1
|
{
|
|
2
2
|
"name": "viho-llm",
|
|
3
|
-
"version": "0.1.
|
|
4
|
-
"description": "Utility library for working with Google Gemini
|
|
3
|
+
"version": "0.1.7",
|
|
4
|
+
"description": "Utility library for working with multiple LLM providers (Google Gemini and OpenAI), providing common tools and helpers for AI interactions",
|
|
5
5
|
"keywords": [
|
|
6
6
|
"llm",
|
|
7
7
|
"ai",
|
|
8
8
|
"gemini",
|
|
9
|
+
"openai",
|
|
9
10
|
"google-ai",
|
|
10
11
|
"google-gemini",
|
|
11
12
|
"genai",
|
|
13
|
+
"gpt",
|
|
14
|
+
"chatgpt",
|
|
12
15
|
"ai-tools",
|
|
13
16
|
"language-model",
|
|
14
17
|
"ai-utilities",
|
|
18
|
+
"reasoning",
|
|
19
|
+
"thinking",
|
|
20
|
+
"deepseek",
|
|
15
21
|
"viho"
|
|
16
22
|
],
|
|
17
23
|
"author": "uikoo9 <uikoo9@qq.com>",
|
|
@@ -42,6 +48,7 @@
|
|
|
42
48
|
"dependencies": {
|
|
43
49
|
"@google/genai": "^1.34.0",
|
|
44
50
|
"mime-types": "^2.1.35",
|
|
51
|
+
"openai": "^5.23.2",
|
|
45
52
|
"qiao.log.js": "^3.7.5"
|
|
46
53
|
},
|
|
47
54
|
"nx": {
|
|
@@ -61,5 +68,5 @@
|
|
|
61
68
|
}
|
|
62
69
|
}
|
|
63
70
|
},
|
|
64
|
-
"gitHead": "
|
|
71
|
+
"gitHead": "21b7c541435da99dceb5dcf657ac5a528ed792ba"
|
|
65
72
|
}
|
package/src/index.js
CHANGED
|
@@ -37,10 +37,16 @@ export const chat = async (client, modelName, chatOptions) => {
|
|
|
37
37
|
}
|
|
38
38
|
|
|
39
39
|
try {
|
|
40
|
-
|
|
41
|
-
|
|
42
|
-
|
|
43
|
-
|
|
40
|
+
// options
|
|
41
|
+
const options = Object.assign(
|
|
42
|
+
{
|
|
43
|
+
model: modelName,
|
|
44
|
+
},
|
|
45
|
+
chatOptions,
|
|
46
|
+
);
|
|
47
|
+
|
|
48
|
+
// gen
|
|
49
|
+
const response = await client.models.generateContent(options);
|
|
44
50
|
if (!response || !response.text) {
|
|
45
51
|
logger.error(methodName, 'invalid response');
|
|
46
52
|
return;
|
|
@@ -93,11 +99,17 @@ export const chatWithStreaming = async (client, modelName, chatOptions, callback
|
|
|
93
99
|
const firstContentCallback = callbackOptions.firstContentCallback;
|
|
94
100
|
|
|
95
101
|
try {
|
|
102
|
+
// options
|
|
103
|
+
const options = Object.assign(
|
|
104
|
+
{
|
|
105
|
+
model: modelName,
|
|
106
|
+
},
|
|
107
|
+
chatOptions,
|
|
108
|
+
);
|
|
109
|
+
|
|
110
|
+
// gen
|
|
111
|
+
const response = await client.models.generateContentStream(options);
|
|
96
112
|
if (beginCallback) beginCallback();
|
|
97
|
-
const response = await client.models.generateContentStream({
|
|
98
|
-
model: modelName,
|
|
99
|
-
contents: chatOptions.contents,
|
|
100
|
-
});
|
|
101
113
|
|
|
102
114
|
// go
|
|
103
115
|
let firstContent = true;
|
|
@@ -132,6 +144,14 @@ export const cacheAdd = async (client, modelName, cacheOptions) => {
|
|
|
132
144
|
const methodName = 'cacheAdd';
|
|
133
145
|
|
|
134
146
|
// check
|
|
147
|
+
if (!client) {
|
|
148
|
+
logger.error(methodName, 'need client');
|
|
149
|
+
return;
|
|
150
|
+
}
|
|
151
|
+
if (!modelName) {
|
|
152
|
+
logger.error(methodName, 'need modelName');
|
|
153
|
+
return;
|
|
154
|
+
}
|
|
135
155
|
if (!cacheOptions) {
|
|
136
156
|
logger.error(methodName, 'need cacheOptions');
|
|
137
157
|
return;
|
|
@@ -175,3 +195,68 @@ export const cacheAdd = async (client, modelName, cacheOptions) => {
|
|
|
175
195
|
logger.error(methodName, 'error', error);
|
|
176
196
|
}
|
|
177
197
|
};
|
|
198
|
+
|
|
199
|
+
/**
|
|
200
|
+
* cacheList
|
|
201
|
+
* @param {*} client
|
|
202
|
+
* @returns
|
|
203
|
+
*/
|
|
204
|
+
export const cacheList = async (client) => {
|
|
205
|
+
const methodName = 'cacheList';
|
|
206
|
+
|
|
207
|
+
// check
|
|
208
|
+
if (!client) {
|
|
209
|
+
logger.error(methodName, 'need client');
|
|
210
|
+
return;
|
|
211
|
+
}
|
|
212
|
+
|
|
213
|
+
// cache list
|
|
214
|
+
try {
|
|
215
|
+
const cacheList = await client.caches.list();
|
|
216
|
+
const cacheObjs = cacheList?.pageInternal?.map((contentCache) => ({
|
|
217
|
+
name: contentCache.name,
|
|
218
|
+
displayName: contentCache.displayName,
|
|
219
|
+
}));
|
|
220
|
+
|
|
221
|
+
return cacheObjs;
|
|
222
|
+
} catch (error) {
|
|
223
|
+
logger.error(methodName, 'error', error);
|
|
224
|
+
}
|
|
225
|
+
};
|
|
226
|
+
|
|
227
|
+
/**
|
|
228
|
+
* cacheUpdate
|
|
229
|
+
* @param {*} client
|
|
230
|
+
* @param {*} cacheName
|
|
231
|
+
* @param {*} cacheOptions
|
|
232
|
+
* @returns
|
|
233
|
+
*/
|
|
234
|
+
export const cacheUpdate = async (client, cacheName, cacheOptions) => {
|
|
235
|
+
const methodName = 'cacheUpdate';
|
|
236
|
+
|
|
237
|
+
// check
|
|
238
|
+
if (!client) {
|
|
239
|
+
logger.error(methodName, 'need client');
|
|
240
|
+
return;
|
|
241
|
+
}
|
|
242
|
+
if (!cacheName) {
|
|
243
|
+
logger.error(methodName, 'need cacheName');
|
|
244
|
+
return;
|
|
245
|
+
}
|
|
246
|
+
if (!cacheOptions) {
|
|
247
|
+
logger.error(methodName, 'need cacheOptions');
|
|
248
|
+
return;
|
|
249
|
+
}
|
|
250
|
+
|
|
251
|
+
// cache update
|
|
252
|
+
try {
|
|
253
|
+
const res = await client.caches.update({
|
|
254
|
+
name: cacheName,
|
|
255
|
+
config: cacheOptions,
|
|
256
|
+
});
|
|
257
|
+
|
|
258
|
+
return res;
|
|
259
|
+
} catch (error) {
|
|
260
|
+
logger.error(methodName, 'error', error);
|
|
261
|
+
}
|
|
262
|
+
};
|
|
@@ -2,7 +2,7 @@
|
|
|
2
2
|
import { GoogleGenAI } from '@google/genai';
|
|
3
3
|
|
|
4
4
|
// util
|
|
5
|
-
import { chat, chatWithStreaming, cacheAdd } from './gemini-util.js';
|
|
5
|
+
import { chat, chatWithStreaming, cacheAdd, cacheList, cacheUpdate } from './gemini-util.js';
|
|
6
6
|
|
|
7
7
|
// Logger
|
|
8
8
|
import { Logger } from 'qiao.log.js';
|
|
@@ -54,6 +54,12 @@ export const GeminiVertex = (options) => {
|
|
|
54
54
|
gemini.cacheAdd = async (cacheOptions) => {
|
|
55
55
|
return await cacheAdd(gemini.client, options.modelName, cacheOptions);
|
|
56
56
|
};
|
|
57
|
+
gemini.cacheList = async () => {
|
|
58
|
+
return await cacheList(gemini.client);
|
|
59
|
+
};
|
|
60
|
+
gemini.cacheUpdate = async (cacheName, cacheOptions) => {
|
|
61
|
+
return await cacheUpdate(gemini.client, cacheName, cacheOptions);
|
|
62
|
+
};
|
|
57
63
|
|
|
58
64
|
// r
|
|
59
65
|
return gemini;
|
|
@@ -0,0 +1,158 @@
|
|
|
1
|
+
// Logger
|
|
2
|
+
import { Logger } from 'qiao.log.js';
|
|
3
|
+
const logger = Logger('openai-util.js');
|
|
4
|
+
|
|
5
|
+
/**
|
|
6
|
+
* chat
|
|
7
|
+
* @param {*} client
|
|
8
|
+
* @param {*} modelID
|
|
9
|
+
* @param {*} modelThinking
|
|
10
|
+
* @param {*} systemPrompt
|
|
11
|
+
* @param {*} userPrompt
|
|
12
|
+
* @returns
|
|
13
|
+
*/
|
|
14
|
+
export const chat = async (client, modelID, modelThinking, systemPrompt, userPrompt) => {
|
|
15
|
+
const methodName = 'chat';
|
|
16
|
+
|
|
17
|
+
// check
|
|
18
|
+
if (!client) {
|
|
19
|
+
logger.error(methodName, 'need client');
|
|
20
|
+
return;
|
|
21
|
+
}
|
|
22
|
+
if (!modelID) {
|
|
23
|
+
logger.error(methodName, 'need modelID');
|
|
24
|
+
return;
|
|
25
|
+
}
|
|
26
|
+
if (!modelThinking) {
|
|
27
|
+
logger.error(methodName, 'need modelThinking');
|
|
28
|
+
return;
|
|
29
|
+
}
|
|
30
|
+
if (!systemPrompt) {
|
|
31
|
+
logger.error(methodName, 'need systemPrompt');
|
|
32
|
+
return;
|
|
33
|
+
}
|
|
34
|
+
if (!userPrompt) {
|
|
35
|
+
logger.error(methodName, 'need userPrompt');
|
|
36
|
+
return;
|
|
37
|
+
}
|
|
38
|
+
|
|
39
|
+
// chat
|
|
40
|
+
const chatOptions = {
|
|
41
|
+
model: modelID,
|
|
42
|
+
messages: [
|
|
43
|
+
{ role: 'system', content: systemPrompt },
|
|
44
|
+
{ role: 'user', content: userPrompt },
|
|
45
|
+
],
|
|
46
|
+
thinking: {
|
|
47
|
+
type: modelThinking,
|
|
48
|
+
},
|
|
49
|
+
};
|
|
50
|
+
|
|
51
|
+
// go
|
|
52
|
+
try {
|
|
53
|
+
const completion = await client.chat.completions.create(chatOptions);
|
|
54
|
+
return completion.choices[0]?.message;
|
|
55
|
+
} catch (error) {
|
|
56
|
+
logger.error(methodName, 'error', error);
|
|
57
|
+
}
|
|
58
|
+
};
|
|
59
|
+
|
|
60
|
+
/**
|
|
61
|
+
* chatWithStreaming
|
|
62
|
+
* @param {*} client
|
|
63
|
+
* @param {*} modelID
|
|
64
|
+
* @param {*} modelThinking
|
|
65
|
+
* @param {*} systemPrompt
|
|
66
|
+
* @param {*} userPrompt
|
|
67
|
+
* @param {*} callbackOptions
|
|
68
|
+
* @returns
|
|
69
|
+
*/
|
|
70
|
+
export const chatWithStreaming = async (client, modelID, modelThinking, systemPrompt, userPrompt, callbackOptions) => {
|
|
71
|
+
const methodName = 'chatWithStreaming';
|
|
72
|
+
|
|
73
|
+
// check
|
|
74
|
+
if (!client) {
|
|
75
|
+
logger.error(methodName, 'need client');
|
|
76
|
+
return;
|
|
77
|
+
}
|
|
78
|
+
if (!modelID) {
|
|
79
|
+
logger.error(methodName, 'need modelID');
|
|
80
|
+
return;
|
|
81
|
+
}
|
|
82
|
+
if (!modelThinking) {
|
|
83
|
+
logger.error(methodName, 'need modelThinking');
|
|
84
|
+
return;
|
|
85
|
+
}
|
|
86
|
+
if (!systemPrompt) {
|
|
87
|
+
logger.error(methodName, 'need systemPrompt');
|
|
88
|
+
return;
|
|
89
|
+
}
|
|
90
|
+
if (!userPrompt) {
|
|
91
|
+
logger.error(methodName, 'need userPrompt');
|
|
92
|
+
return;
|
|
93
|
+
}
|
|
94
|
+
if (!callbackOptions) {
|
|
95
|
+
logger.error(methodName, 'need callbackOptions');
|
|
96
|
+
return;
|
|
97
|
+
}
|
|
98
|
+
|
|
99
|
+
// callback
|
|
100
|
+
const beginCallback = callbackOptions.beginCallback;
|
|
101
|
+
const endCallback = callbackOptions.endCallback;
|
|
102
|
+
const errorCallback = callbackOptions.errorCallback;
|
|
103
|
+
const thinkingCallback = callbackOptions.thinkingCallback;
|
|
104
|
+
const firstThinkingCallback = callbackOptions.firstThinkingCallback;
|
|
105
|
+
const contentCallback = callbackOptions.contentCallback;
|
|
106
|
+
const firstContentCallback = callbackOptions.firstContentCallback;
|
|
107
|
+
|
|
108
|
+
// chat
|
|
109
|
+
const chatOptions = {
|
|
110
|
+
model: modelID,
|
|
111
|
+
messages: [
|
|
112
|
+
{ role: 'system', content: systemPrompt },
|
|
113
|
+
{ role: 'user', content: userPrompt },
|
|
114
|
+
],
|
|
115
|
+
thinking: {
|
|
116
|
+
type: modelThinking,
|
|
117
|
+
},
|
|
118
|
+
};
|
|
119
|
+
|
|
120
|
+
// go
|
|
121
|
+
try {
|
|
122
|
+
chatOptions.stream = true;
|
|
123
|
+
const stream = await client.chat.completions.create(chatOptions);
|
|
124
|
+
if (beginCallback) beginCallback();
|
|
125
|
+
|
|
126
|
+
// go
|
|
127
|
+
let firstThinking = true;
|
|
128
|
+
let firstContent = true;
|
|
129
|
+
for await (const part of stream) {
|
|
130
|
+
// thinking
|
|
131
|
+
const thinkingContent = part.choices[0]?.delta?.reasoning_content;
|
|
132
|
+
if (thinkingContent && thinkingCallback) {
|
|
133
|
+
if (firstThinking && firstThinkingCallback) {
|
|
134
|
+
firstThinking = false;
|
|
135
|
+
firstThinkingCallback();
|
|
136
|
+
}
|
|
137
|
+
|
|
138
|
+
thinkingCallback(thinkingContent);
|
|
139
|
+
}
|
|
140
|
+
|
|
141
|
+
// content
|
|
142
|
+
const content = part.choices[0]?.delta?.content;
|
|
143
|
+
if (content && contentCallback) {
|
|
144
|
+
if (firstContent && firstContentCallback) {
|
|
145
|
+
firstContent = false;
|
|
146
|
+
firstContentCallback();
|
|
147
|
+
}
|
|
148
|
+
|
|
149
|
+
contentCallback(content);
|
|
150
|
+
}
|
|
151
|
+
}
|
|
152
|
+
|
|
153
|
+
// end
|
|
154
|
+
if (endCallback) endCallback();
|
|
155
|
+
} catch (error) {
|
|
156
|
+
if (errorCallback) errorCallback(error);
|
|
157
|
+
}
|
|
158
|
+
};
|
|
@@ -0,0 +1,67 @@
|
|
|
1
|
+
// openai
|
|
2
|
+
import OpenAI from 'openai';
|
|
3
|
+
|
|
4
|
+
// util
|
|
5
|
+
import { chat, chatWithStreaming } from './openai-util.js';
|
|
6
|
+
|
|
7
|
+
// Logger
|
|
8
|
+
import { Logger } from 'qiao.log.js';
|
|
9
|
+
const logger = Logger('openai.js');
|
|
10
|
+
|
|
11
|
+
/**
|
|
12
|
+
* OpenAI
|
|
13
|
+
* @param {*} options
|
|
14
|
+
* @returns
|
|
15
|
+
*/
|
|
16
|
+
export const OpenAIAPI = (options) => {
|
|
17
|
+
const methodName = 'OpenAI';
|
|
18
|
+
|
|
19
|
+
// check
|
|
20
|
+
if (!options) {
|
|
21
|
+
logger.error(methodName, 'need options');
|
|
22
|
+
return;
|
|
23
|
+
}
|
|
24
|
+
if (!options.apiKey) {
|
|
25
|
+
logger.error(methodName, 'need options.apiKey');
|
|
26
|
+
return;
|
|
27
|
+
}
|
|
28
|
+
if (!options.baseURL) {
|
|
29
|
+
logger.error(methodName, 'need options.baseURL');
|
|
30
|
+
return;
|
|
31
|
+
}
|
|
32
|
+
if (!options.modelID) {
|
|
33
|
+
logger.error(methodName, 'need options.modelID');
|
|
34
|
+
return;
|
|
35
|
+
}
|
|
36
|
+
if (!options.modelThinking) {
|
|
37
|
+
logger.error(methodName, 'need options.modelThinking');
|
|
38
|
+
return;
|
|
39
|
+
}
|
|
40
|
+
|
|
41
|
+
// openai
|
|
42
|
+
const openai = {};
|
|
43
|
+
openai.client = new OpenAI({
|
|
44
|
+
apiKey: options.apiKey,
|
|
45
|
+
baseURL: options.baseURL,
|
|
46
|
+
});
|
|
47
|
+
|
|
48
|
+
// chat
|
|
49
|
+
openai.chat = async (systemPrompt, userPrompt) => {
|
|
50
|
+
return await chat(openai.client, options.modelID, options.modelThinking, systemPrompt, userPrompt);
|
|
51
|
+
};
|
|
52
|
+
|
|
53
|
+
// chat with streaming
|
|
54
|
+
openai.chatWithStreaming = async (systemPrompt, userPrompt, callbakOptions) => {
|
|
55
|
+
return await chatWithStreaming(
|
|
56
|
+
openai.client,
|
|
57
|
+
options.modelID,
|
|
58
|
+
options.modelThinking,
|
|
59
|
+
systemPrompt,
|
|
60
|
+
userPrompt,
|
|
61
|
+
callbakOptions,
|
|
62
|
+
);
|
|
63
|
+
};
|
|
64
|
+
|
|
65
|
+
//
|
|
66
|
+
return openai;
|
|
67
|
+
};
|