viho-llm 0.1.4 → 0.1.6
This diff represents the content of publicly available package versions that have been released to one of the supported registries. The information contained in this diff is provided for informational purposes only and reflects changes between package versions as they appear in their respective public registries.
- package/README.md +173 -14
- package/index.js +262 -84
- package/package.json +2 -2
- package/src/index.js +2 -1
- package/src/models/gemini-api.js +49 -0
- package/src/models/gemini-util.js +264 -0
- package/src/models/gemini-vertex.js +66 -0
- package/src/gemini.js +0 -190
package/README.md
CHANGED
|
@@ -1,6 +1,10 @@
|
|
|
1
|
-
|
|
1
|
+
<p align="center">
|
|
2
|
+
<img src="https://static-small.vincentqiao.com/viho/logo.png" alt="viho logo" width="200"/>
|
|
3
|
+
</p>
|
|
2
4
|
|
|
3
|
-
|
|
5
|
+
<h1 align="center">viho-llm</h1>
|
|
6
|
+
|
|
7
|
+
<p align="center">Utility library for working with Google Gemini AI, providing common tools and helpers for AI interactions.</p>
|
|
4
8
|
|
|
5
9
|
## Installation
|
|
6
10
|
|
|
@@ -10,17 +14,26 @@ npm install viho-llm
|
|
|
10
14
|
|
|
11
15
|
## Prerequisites
|
|
12
16
|
|
|
13
|
-
|
|
17
|
+
This library supports two ways to access Google Gemini AI:
|
|
18
|
+
|
|
19
|
+
1. **Google AI Studio (GeminiAPI)** - For personal development and prototyping
|
|
20
|
+
- Get an API key from [Google AI Studio](https://makersuite.google.com/app/apikey)
|
|
21
|
+
|
|
22
|
+
2. **Vertex AI (GeminiVertex)** - For enterprise applications with advanced features
|
|
23
|
+
- Requires a Google Cloud project with Vertex AI enabled
|
|
24
|
+
- Supports context caching for cost optimization
|
|
14
25
|
|
|
15
26
|
## Usage
|
|
16
27
|
|
|
17
|
-
### Basic Example
|
|
28
|
+
### Basic Example with GeminiAPI
|
|
29
|
+
|
|
30
|
+
Using Google AI Studio API Key (recommended for development):
|
|
18
31
|
|
|
19
32
|
```javascript
|
|
20
|
-
import {
|
|
33
|
+
import { GeminiAPI } from 'viho-llm';
|
|
21
34
|
|
|
22
|
-
// Initialize Gemini client
|
|
23
|
-
const gemini =
|
|
35
|
+
// Initialize Gemini client with API Key
|
|
36
|
+
const gemini = GeminiAPI({
|
|
24
37
|
apiKey: 'your-google-api-key',
|
|
25
38
|
modelName: 'gemini-pro',
|
|
26
39
|
});
|
|
@@ -38,17 +51,38 @@ const response = await gemini.chat({
|
|
|
38
51
|
console.log(response);
|
|
39
52
|
```
|
|
40
53
|
|
|
41
|
-
###
|
|
54
|
+
### Basic Example with GeminiVertex
|
|
55
|
+
|
|
56
|
+
Using Vertex AI (recommended for production):
|
|
42
57
|
|
|
43
58
|
```javascript
|
|
44
|
-
import {
|
|
59
|
+
import { GeminiVertex } from 'viho-llm';
|
|
45
60
|
|
|
46
|
-
// Initialize Gemini client
|
|
47
|
-
const gemini =
|
|
48
|
-
|
|
61
|
+
// Initialize Gemini client with Vertex AI
|
|
62
|
+
const gemini = GeminiVertex({
|
|
63
|
+
projectId: 'your-gcp-project-id',
|
|
64
|
+
location: 'us-east1',
|
|
49
65
|
modelName: 'gemini-pro',
|
|
50
66
|
});
|
|
51
67
|
|
|
68
|
+
// Send a chat message
|
|
69
|
+
const response = await gemini.chat({
|
|
70
|
+
contents: [
|
|
71
|
+
{
|
|
72
|
+
role: 'user',
|
|
73
|
+
parts: [{ text: 'Hello, how are you?' }],
|
|
74
|
+
},
|
|
75
|
+
],
|
|
76
|
+
});
|
|
77
|
+
|
|
78
|
+
console.log(response);
|
|
79
|
+
```
|
|
80
|
+
|
|
81
|
+
### Streaming Example
|
|
82
|
+
|
|
83
|
+
Both GeminiAPI and GeminiVertex support streaming responses:
|
|
84
|
+
|
|
85
|
+
```javascript
|
|
52
86
|
// Send a chat message with streaming
|
|
53
87
|
await gemini.chatWithStreaming(
|
|
54
88
|
{
|
|
@@ -79,11 +113,44 @@ await gemini.chatWithStreaming(
|
|
|
79
113
|
);
|
|
80
114
|
```
|
|
81
115
|
|
|
116
|
+
### Context Caching Example (Vertex AI Only)
|
|
117
|
+
|
|
118
|
+
GeminiVertex supports context caching to reduce costs and latency when using large contexts:
|
|
119
|
+
|
|
120
|
+
```javascript
|
|
121
|
+
import { GeminiVertex } from 'viho-llm';
|
|
122
|
+
|
|
123
|
+
const gemini = GeminiVertex({
|
|
124
|
+
projectId: 'your-gcp-project-id',
|
|
125
|
+
location: 'us-east1',
|
|
126
|
+
modelName: 'gemini-1.5-flash-002',
|
|
127
|
+
});
|
|
128
|
+
|
|
129
|
+
// Add a new cache
|
|
130
|
+
const cache = await gemini.cacheAdd({
|
|
131
|
+
gsPath: 'gs://your-bucket/large-document.pdf',
|
|
132
|
+
systemPrompt: 'You are an expert at analyzing technical documents.',
|
|
133
|
+
cacheName: 'my-document-cache',
|
|
134
|
+
cacheTTL: '3600s', // 1 hour
|
|
135
|
+
});
|
|
136
|
+
|
|
137
|
+
console.log('Cache created:', cache.name);
|
|
138
|
+
|
|
139
|
+
// List all caches
|
|
140
|
+
const caches = await gemini.cacheList();
|
|
141
|
+
console.log('Available caches:', caches);
|
|
142
|
+
|
|
143
|
+
// Update cache TTL
|
|
144
|
+
await gemini.cacheUpdate(cache.name, {
|
|
145
|
+
ttl: '7200s', // Extend to 2 hours
|
|
146
|
+
});
|
|
147
|
+
```
|
|
148
|
+
|
|
82
149
|
## API Reference
|
|
83
150
|
|
|
84
|
-
### `
|
|
151
|
+
### `GeminiAPI(options)`
|
|
85
152
|
|
|
86
|
-
Creates a new Gemini client instance.
|
|
153
|
+
Creates a new Gemini client instance using Google AI Studio API.
|
|
87
154
|
|
|
88
155
|
#### Parameters
|
|
89
156
|
|
|
@@ -172,6 +239,98 @@ await gemini.chatWithStreaming(
|
|
|
172
239
|
);
|
|
173
240
|
```
|
|
174
241
|
|
|
242
|
+
---
|
|
243
|
+
|
|
244
|
+
### `GeminiVertex(options)`
|
|
245
|
+
|
|
246
|
+
Creates a new Gemini client instance using Vertex AI. Includes all features of GeminiAPI plus context caching support.
|
|
247
|
+
|
|
248
|
+
#### Parameters
|
|
249
|
+
|
|
250
|
+
- `options` (Object) - Configuration options
|
|
251
|
+
- `projectId` (string) **required** - Your Google Cloud project ID
|
|
252
|
+
- `location` (string) **required** - GCP region (e.g., 'us-east1', 'us-central1')
|
|
253
|
+
- `modelName` (string) **required** - Model name (e.g., 'gemini-1.5-flash-002', 'gemini-1.5-pro-002')
|
|
254
|
+
|
|
255
|
+
#### Returns
|
|
256
|
+
|
|
257
|
+
Returns a Gemini client object with the following methods:
|
|
258
|
+
|
|
259
|
+
##### `client.chat(chatOptions)`
|
|
260
|
+
|
|
261
|
+
Same as GeminiAPI.chat(). See above for details.
|
|
262
|
+
|
|
263
|
+
##### `client.chatWithStreaming(chatOptions, callbackOptions)`
|
|
264
|
+
|
|
265
|
+
Same as GeminiAPI.chatWithStreaming(). See above for details.
|
|
266
|
+
|
|
267
|
+
##### `client.cacheAdd(cacheOptions)`
|
|
268
|
+
|
|
269
|
+
Creates a new context cache for frequently used content.
|
|
270
|
+
|
|
271
|
+
**Parameters:**
|
|
272
|
+
|
|
273
|
+
- `cacheOptions` (Object)
|
|
274
|
+
- `gsPath` (string) **required** - Google Cloud Storage path (e.g., 'gs://bucket/file.pdf')
|
|
275
|
+
- `systemPrompt` (string) **required** - System instruction for the cached context
|
|
276
|
+
- `cacheName` (string) **required** - Display name for the cache
|
|
277
|
+
- `cacheTTL` (string) **required** - Time-to-live (e.g., '3600s' for 1 hour)
|
|
278
|
+
|
|
279
|
+
**Returns:**
|
|
280
|
+
|
|
281
|
+
- (Promise\<Object\>) - Cache object with name and metadata
|
|
282
|
+
|
|
283
|
+
**Example:**
|
|
284
|
+
|
|
285
|
+
```javascript
|
|
286
|
+
const cache = await gemini.cacheAdd({
|
|
287
|
+
gsPath: 'gs://my-bucket/documentation.pdf',
|
|
288
|
+
systemPrompt: 'You are a helpful documentation assistant.',
|
|
289
|
+
cacheName: 'docs-cache',
|
|
290
|
+
cacheTTL: '3600s',
|
|
291
|
+
});
|
|
292
|
+
```
|
|
293
|
+
|
|
294
|
+
##### `client.cacheList()`
|
|
295
|
+
|
|
296
|
+
Lists all available caches in the project.
|
|
297
|
+
|
|
298
|
+
**Parameters:** None
|
|
299
|
+
|
|
300
|
+
**Returns:**
|
|
301
|
+
|
|
302
|
+
- (Promise\<Array\>) - Array of cache objects with `name` and `displayName` properties
|
|
303
|
+
|
|
304
|
+
**Example:**
|
|
305
|
+
|
|
306
|
+
```javascript
|
|
307
|
+
const caches = await gemini.cacheList();
|
|
308
|
+
console.log(caches);
|
|
309
|
+
// [{ name: 'projects/.../cachedContents/...', displayName: 'docs-cache' }]
|
|
310
|
+
```
|
|
311
|
+
|
|
312
|
+
##### `client.cacheUpdate(cacheName, cacheOptions)`
|
|
313
|
+
|
|
314
|
+
Updates an existing cache configuration.
|
|
315
|
+
|
|
316
|
+
**Parameters:**
|
|
317
|
+
|
|
318
|
+
- `cacheName` (string) **required** - The cache name to update
|
|
319
|
+
- `cacheOptions` (Object) **required** - Update configuration
|
|
320
|
+
- `ttl` (string) - New time-to-live value (e.g., '7200s')
|
|
321
|
+
|
|
322
|
+
**Returns:**
|
|
323
|
+
|
|
324
|
+
- (Promise\<Object\>) - Updated cache object
|
|
325
|
+
|
|
326
|
+
**Example:**
|
|
327
|
+
|
|
328
|
+
```javascript
|
|
329
|
+
await gemini.cacheUpdate('projects/.../cachedContents/abc123', {
|
|
330
|
+
ttl: '7200s', // Extend to 2 hours
|
|
331
|
+
});
|
|
332
|
+
```
|
|
333
|
+
|
|
175
334
|
## License
|
|
176
335
|
|
|
177
336
|
MIT
|
package/index.js
CHANGED
|
@@ -5,94 +5,88 @@ var mime = require('mime-types');
|
|
|
5
5
|
var qiao_log_js = require('qiao.log.js');
|
|
6
6
|
|
|
7
7
|
// gemini
|
|
8
|
-
const logger = qiao_log_js.Logger('
|
|
8
|
+
const logger$2 = qiao_log_js.Logger('gemini-util.js');
|
|
9
9
|
|
|
10
10
|
/**
|
|
11
|
-
*
|
|
12
|
-
* @param {*}
|
|
11
|
+
* chat
|
|
12
|
+
* @param {*} client
|
|
13
|
+
* @param {*} modelName
|
|
14
|
+
* @param {*} chatOptions
|
|
13
15
|
* @returns
|
|
14
16
|
*/
|
|
15
|
-
const
|
|
16
|
-
const methodName = '
|
|
17
|
+
const chat = async (client, modelName, chatOptions) => {
|
|
18
|
+
const methodName = 'chat';
|
|
17
19
|
|
|
18
20
|
// check
|
|
19
|
-
if (!
|
|
20
|
-
logger.error(methodName, 'need
|
|
21
|
-
return;
|
|
22
|
-
}
|
|
23
|
-
if (!options.apiKey) {
|
|
24
|
-
logger.error(methodName, 'need options.apiKey');
|
|
21
|
+
if (!client) {
|
|
22
|
+
logger$2.error(methodName, 'need client');
|
|
25
23
|
return;
|
|
26
24
|
}
|
|
27
|
-
if (!
|
|
28
|
-
logger.error(methodName, 'need
|
|
25
|
+
if (!modelName) {
|
|
26
|
+
logger$2.error(methodName, 'need modelName');
|
|
29
27
|
return;
|
|
30
28
|
}
|
|
31
|
-
|
|
32
|
-
// gemini
|
|
33
|
-
const gemini = {};
|
|
34
|
-
gemini.client = new genai.GoogleGenAI({
|
|
35
|
-
vertexai: true,
|
|
36
|
-
apiKey: options.apiKey,
|
|
37
|
-
});
|
|
38
|
-
|
|
39
|
-
// chat
|
|
40
|
-
gemini.chat = async (chatOptions) => {
|
|
41
|
-
return await chat(gemini.client, options.modelName, chatOptions);
|
|
42
|
-
};
|
|
43
|
-
gemini.chatWithStreaming = async (chatOptions, callbackOptions) => {
|
|
44
|
-
return await chatWithStreaming(gemini.client, options.modelName, chatOptions, callbackOptions);
|
|
45
|
-
};
|
|
46
|
-
|
|
47
|
-
// cache
|
|
48
|
-
gemini.cacheAdd = async (cacheOptions) => {
|
|
49
|
-
return await cacheAdd(gemini.client, options.modelName, cacheOptions);
|
|
50
|
-
};
|
|
51
|
-
|
|
52
|
-
// r
|
|
53
|
-
return gemini;
|
|
54
|
-
};
|
|
55
|
-
|
|
56
|
-
// chat
|
|
57
|
-
async function chat(client, modelName, chatOptions) {
|
|
58
|
-
const methodName = 'Gemini - chat';
|
|
59
|
-
|
|
60
|
-
// check
|
|
61
29
|
if (!chatOptions) {
|
|
62
|
-
logger.error(methodName, 'need chatOptions');
|
|
30
|
+
logger$2.error(methodName, 'need chatOptions');
|
|
63
31
|
return;
|
|
64
32
|
}
|
|
65
33
|
if (!chatOptions.contents) {
|
|
66
|
-
logger.error(methodName, 'need chatOptions.contents');
|
|
34
|
+
logger$2.error(methodName, 'need chatOptions.contents');
|
|
67
35
|
return;
|
|
68
36
|
}
|
|
69
37
|
|
|
70
38
|
try {
|
|
71
|
-
|
|
72
|
-
|
|
73
|
-
|
|
74
|
-
|
|
39
|
+
// options
|
|
40
|
+
const options = Object.assign(
|
|
41
|
+
{
|
|
42
|
+
model: modelName,
|
|
43
|
+
},
|
|
44
|
+
chatOptions,
|
|
45
|
+
);
|
|
46
|
+
|
|
47
|
+
// gen
|
|
48
|
+
const response = await client.models.generateContent(options);
|
|
75
49
|
if (!response || !response.text) {
|
|
76
|
-
logger.error(methodName, 'invalid response');
|
|
50
|
+
logger$2.error(methodName, 'invalid response');
|
|
77
51
|
return;
|
|
78
52
|
}
|
|
79
53
|
|
|
80
54
|
return response.text;
|
|
81
55
|
} catch (error) {
|
|
82
|
-
logger.error(methodName, 'error', error);
|
|
56
|
+
logger$2.error(methodName, 'error', error);
|
|
83
57
|
}
|
|
84
|
-
}
|
|
58
|
+
};
|
|
85
59
|
|
|
86
|
-
|
|
87
|
-
|
|
60
|
+
/**
|
|
61
|
+
* chatWithStreaming
|
|
62
|
+
* @param {*} client
|
|
63
|
+
* @param {*} modelName
|
|
64
|
+
* @param {*} chatOptions
|
|
65
|
+
* @param {*} callbackOptions
|
|
66
|
+
* @returns
|
|
67
|
+
*/
|
|
68
|
+
const chatWithStreaming = async (client, modelName, chatOptions, callbackOptions) => {
|
|
69
|
+
const methodName = 'chatWithStreaming';
|
|
88
70
|
|
|
89
71
|
// check
|
|
72
|
+
if (!client) {
|
|
73
|
+
logger$2.error(methodName, 'need client');
|
|
74
|
+
return;
|
|
75
|
+
}
|
|
76
|
+
if (!modelName) {
|
|
77
|
+
logger$2.error(methodName, 'need modelName');
|
|
78
|
+
return;
|
|
79
|
+
}
|
|
90
80
|
if (!chatOptions) {
|
|
91
|
-
logger.error(methodName, 'need chatOptions');
|
|
81
|
+
logger$2.error(methodName, 'need chatOptions');
|
|
92
82
|
return;
|
|
93
83
|
}
|
|
94
84
|
if (!chatOptions.contents) {
|
|
95
|
-
logger.error(methodName, 'need chatOptions.contents');
|
|
85
|
+
logger$2.error(methodName, 'need chatOptions.contents');
|
|
86
|
+
return;
|
|
87
|
+
}
|
|
88
|
+
if (!callbackOptions) {
|
|
89
|
+
logger$2.error(methodName, 'need callbackOptions');
|
|
96
90
|
return;
|
|
97
91
|
}
|
|
98
92
|
|
|
@@ -104,11 +98,19 @@ async function chatWithStreaming(client, modelName, chatOptions, callbackOptions
|
|
|
104
98
|
const firstContentCallback = callbackOptions.firstContentCallback;
|
|
105
99
|
|
|
106
100
|
try {
|
|
101
|
+
// begin
|
|
107
102
|
if (beginCallback) beginCallback();
|
|
108
|
-
|
|
109
|
-
|
|
110
|
-
|
|
111
|
-
|
|
103
|
+
|
|
104
|
+
// options
|
|
105
|
+
const options = Object.assign(
|
|
106
|
+
{
|
|
107
|
+
model: modelName,
|
|
108
|
+
},
|
|
109
|
+
chatOptions,
|
|
110
|
+
);
|
|
111
|
+
|
|
112
|
+
// gen
|
|
113
|
+
const response = await client.models.generateContentStream(options);
|
|
112
114
|
|
|
113
115
|
// go
|
|
114
116
|
let firstContent = true;
|
|
@@ -130,52 +132,59 @@ async function chatWithStreaming(client, modelName, chatOptions, callbackOptions
|
|
|
130
132
|
} catch (error) {
|
|
131
133
|
if (errorCallback) errorCallback(error);
|
|
132
134
|
}
|
|
133
|
-
}
|
|
135
|
+
};
|
|
134
136
|
|
|
135
|
-
|
|
136
|
-
|
|
137
|
-
|
|
137
|
+
/**
|
|
138
|
+
* cacheAdd
|
|
139
|
+
* @param {*} client
|
|
140
|
+
* @param {*} modelName
|
|
141
|
+
* @param {*} cacheOptions
|
|
142
|
+
* @returns
|
|
143
|
+
*/
|
|
144
|
+
const cacheAdd = async (client, modelName, cacheOptions) => {
|
|
145
|
+
const methodName = 'cacheAdd';
|
|
138
146
|
|
|
139
147
|
// check
|
|
148
|
+
if (!client) {
|
|
149
|
+
logger$2.error(methodName, 'need client');
|
|
150
|
+
return;
|
|
151
|
+
}
|
|
152
|
+
if (!modelName) {
|
|
153
|
+
logger$2.error(methodName, 'need modelName');
|
|
154
|
+
return;
|
|
155
|
+
}
|
|
140
156
|
if (!cacheOptions) {
|
|
141
|
-
logger.error(methodName, 'need cacheOptions');
|
|
157
|
+
logger$2.error(methodName, 'need cacheOptions');
|
|
142
158
|
return;
|
|
143
159
|
}
|
|
144
|
-
if (!cacheOptions.
|
|
145
|
-
logger.error(methodName, 'need cacheOptions.
|
|
160
|
+
if (!cacheOptions.gsPath) {
|
|
161
|
+
logger$2.error(methodName, 'need cacheOptions.gsPath');
|
|
146
162
|
return;
|
|
147
163
|
}
|
|
148
164
|
if (!cacheOptions.systemPrompt) {
|
|
149
|
-
logger.error(methodName, 'need cacheOptions.systemPrompt');
|
|
165
|
+
logger$2.error(methodName, 'need cacheOptions.systemPrompt');
|
|
150
166
|
return;
|
|
151
167
|
}
|
|
152
168
|
if (!cacheOptions.cacheName) {
|
|
153
|
-
logger.error(methodName, 'need cacheOptions.cacheName');
|
|
169
|
+
logger$2.error(methodName, 'need cacheOptions.cacheName');
|
|
154
170
|
return;
|
|
155
171
|
}
|
|
156
172
|
if (!cacheOptions.cacheTTL) {
|
|
157
|
-
logger.error(methodName, 'need cacheOptions.cacheTTL');
|
|
173
|
+
logger$2.error(methodName, 'need cacheOptions.cacheTTL');
|
|
158
174
|
return;
|
|
159
175
|
}
|
|
160
176
|
|
|
161
177
|
// const
|
|
162
|
-
const mimeType = mime.lookup(cacheOptions.
|
|
163
|
-
logger.info(methodName, 'cacheOptions', cacheOptions);
|
|
164
|
-
logger.info(methodName, 'mimeType', mimeType);
|
|
178
|
+
const mimeType = mime.lookup(cacheOptions.gsPath);
|
|
179
|
+
logger$2.info(methodName, 'cacheOptions', cacheOptions);
|
|
180
|
+
logger$2.info(methodName, 'mimeType', mimeType);
|
|
165
181
|
|
|
166
182
|
try {
|
|
167
|
-
// upload doc
|
|
168
|
-
const doc = await client.files.upload({
|
|
169
|
-
file: cacheOptions.filePath,
|
|
170
|
-
config: { mimeType: mimeType },
|
|
171
|
-
});
|
|
172
|
-
logger.info(methodName, 'doc.name', doc.name);
|
|
173
|
-
|
|
174
183
|
// cache add
|
|
175
184
|
const cache = await client.caches.create({
|
|
176
185
|
model: modelName,
|
|
177
186
|
config: {
|
|
178
|
-
contents: genai.createUserContent(genai.createPartFromUri(
|
|
187
|
+
contents: genai.createUserContent(genai.createPartFromUri(cacheOptions.gsPath, mimeType)),
|
|
179
188
|
systemInstruction: cacheOptions.systemPrompt,
|
|
180
189
|
displayName: cacheOptions.cacheName,
|
|
181
190
|
ttl: cacheOptions.cacheTTL,
|
|
@@ -184,8 +193,177 @@ async function cacheAdd(client, modelName, cacheOptions) {
|
|
|
184
193
|
|
|
185
194
|
return cache;
|
|
186
195
|
} catch (error) {
|
|
187
|
-
logger.error(methodName, 'error', error);
|
|
196
|
+
logger$2.error(methodName, 'error', error);
|
|
197
|
+
}
|
|
198
|
+
};
|
|
199
|
+
|
|
200
|
+
/**
|
|
201
|
+
* cacheList
|
|
202
|
+
* @param {*} client
|
|
203
|
+
* @returns
|
|
204
|
+
*/
|
|
205
|
+
const cacheList = async (client) => {
|
|
206
|
+
const methodName = 'cacheList';
|
|
207
|
+
|
|
208
|
+
// check
|
|
209
|
+
if (!client) {
|
|
210
|
+
logger$2.error(methodName, 'need client');
|
|
211
|
+
return;
|
|
212
|
+
}
|
|
213
|
+
|
|
214
|
+
// cache list
|
|
215
|
+
try {
|
|
216
|
+
const cacheList = await client.caches.list();
|
|
217
|
+
const cacheObjs = cacheList?.pageInternal?.map((contentCache) => ({
|
|
218
|
+
name: contentCache.name,
|
|
219
|
+
displayName: contentCache.displayName,
|
|
220
|
+
}));
|
|
221
|
+
|
|
222
|
+
return cacheObjs;
|
|
223
|
+
} catch (error) {
|
|
224
|
+
logger$2.error(methodName, 'error', error);
|
|
188
225
|
}
|
|
189
|
-
}
|
|
226
|
+
};
|
|
227
|
+
|
|
228
|
+
/**
|
|
229
|
+
* cacheUpdate
|
|
230
|
+
* @param {*} client
|
|
231
|
+
* @param {*} cacheName
|
|
232
|
+
* @param {*} cacheOptions
|
|
233
|
+
* @returns
|
|
234
|
+
*/
|
|
235
|
+
const cacheUpdate = async (client, cacheName, cacheOptions) => {
|
|
236
|
+
const methodName = 'cacheUpdate';
|
|
237
|
+
|
|
238
|
+
// check
|
|
239
|
+
if (!client) {
|
|
240
|
+
logger$2.error(methodName, 'need client');
|
|
241
|
+
return;
|
|
242
|
+
}
|
|
243
|
+
if (!cacheName) {
|
|
244
|
+
logger$2.error(methodName, 'need cacheName');
|
|
245
|
+
return;
|
|
246
|
+
}
|
|
247
|
+
if (!cacheOptions) {
|
|
248
|
+
logger$2.error(methodName, 'need cacheOptions');
|
|
249
|
+
return;
|
|
250
|
+
}
|
|
251
|
+
|
|
252
|
+
// cache update
|
|
253
|
+
try {
|
|
254
|
+
const res = await client.caches.update({
|
|
255
|
+
name: cacheName,
|
|
256
|
+
config: cacheOptions,
|
|
257
|
+
});
|
|
258
|
+
|
|
259
|
+
return res;
|
|
260
|
+
} catch (error) {
|
|
261
|
+
logger$2.error(methodName, 'error', error);
|
|
262
|
+
}
|
|
263
|
+
};
|
|
264
|
+
|
|
265
|
+
// gemini
|
|
266
|
+
const logger$1 = qiao_log_js.Logger('gemini-api.js');
|
|
267
|
+
|
|
268
|
+
/**
|
|
269
|
+
* GeminiAPI
|
|
270
|
+
* @param {*} options
|
|
271
|
+
* @returns
|
|
272
|
+
*/
|
|
273
|
+
const GeminiAPI = (options) => {
|
|
274
|
+
const methodName = 'GeminiAPI';
|
|
275
|
+
|
|
276
|
+
// check
|
|
277
|
+
if (!options) {
|
|
278
|
+
logger$1.error(methodName, 'need options');
|
|
279
|
+
return;
|
|
280
|
+
}
|
|
281
|
+
if (!options.apiKey) {
|
|
282
|
+
logger$1.error(methodName, 'need options.apiKey');
|
|
283
|
+
return;
|
|
284
|
+
}
|
|
285
|
+
if (!options.modelName) {
|
|
286
|
+
logger$1.error(methodName, 'need options.modelName');
|
|
287
|
+
return;
|
|
288
|
+
}
|
|
289
|
+
|
|
290
|
+
// gemini
|
|
291
|
+
const gemini = {};
|
|
292
|
+
gemini.client = new genai.GoogleGenAI({
|
|
293
|
+
apiKey: options.apiKey,
|
|
294
|
+
});
|
|
295
|
+
|
|
296
|
+
// chat
|
|
297
|
+
gemini.chat = async (chatOptions) => {
|
|
298
|
+
return await chat(gemini.client, options.modelName, chatOptions);
|
|
299
|
+
};
|
|
300
|
+
gemini.chatWithStreaming = async (chatOptions, callbackOptions) => {
|
|
301
|
+
return await chatWithStreaming(gemini.client, options.modelName, chatOptions, callbackOptions);
|
|
302
|
+
};
|
|
303
|
+
|
|
304
|
+
// r
|
|
305
|
+
return gemini;
|
|
306
|
+
};
|
|
307
|
+
|
|
308
|
+
// gemini
|
|
309
|
+
const logger = qiao_log_js.Logger('viho-llm');
|
|
310
|
+
|
|
311
|
+
/**
|
|
312
|
+
* GeminiVertex
|
|
313
|
+
* @param {*} options
|
|
314
|
+
* @returns
|
|
315
|
+
*/
|
|
316
|
+
const GeminiVertex = (options) => {
|
|
317
|
+
const methodName = 'GeminiVertex';
|
|
318
|
+
|
|
319
|
+
// check
|
|
320
|
+
if (!options) {
|
|
321
|
+
logger.error(methodName, 'need options');
|
|
322
|
+
return;
|
|
323
|
+
}
|
|
324
|
+
if (!options.projectId) {
|
|
325
|
+
logger.error(methodName, 'need options.projectId');
|
|
326
|
+
return;
|
|
327
|
+
}
|
|
328
|
+
if (!options.location) {
|
|
329
|
+
logger.error(methodName, 'need options.location');
|
|
330
|
+
return;
|
|
331
|
+
}
|
|
332
|
+
if (!options.modelName) {
|
|
333
|
+
logger.error(methodName, 'need options.modelName');
|
|
334
|
+
return;
|
|
335
|
+
}
|
|
336
|
+
|
|
337
|
+
// gemini
|
|
338
|
+
const gemini = {};
|
|
339
|
+
gemini.client = new genai.GoogleGenAI({
|
|
340
|
+
vertexai: true,
|
|
341
|
+
project: options.projectId,
|
|
342
|
+
location: options.location,
|
|
343
|
+
});
|
|
344
|
+
|
|
345
|
+
// chat
|
|
346
|
+
gemini.chat = async (chatOptions) => {
|
|
347
|
+
return await chat(gemini.client, options.modelName, chatOptions);
|
|
348
|
+
};
|
|
349
|
+
gemini.chatWithStreaming = async (chatOptions, callbackOptions) => {
|
|
350
|
+
return await chatWithStreaming(gemini.client, options.modelName, chatOptions, callbackOptions);
|
|
351
|
+
};
|
|
352
|
+
|
|
353
|
+
// cache
|
|
354
|
+
gemini.cacheAdd = async (cacheOptions) => {
|
|
355
|
+
return await cacheAdd(gemini.client, options.modelName, cacheOptions);
|
|
356
|
+
};
|
|
357
|
+
gemini.cacheList = async () => {
|
|
358
|
+
return await cacheList(gemini.client);
|
|
359
|
+
};
|
|
360
|
+
gemini.cacheUpdate = async (cacheName, cacheOptions) => {
|
|
361
|
+
return await cacheUpdate(gemini.client, cacheName, cacheOptions);
|
|
362
|
+
};
|
|
363
|
+
|
|
364
|
+
// r
|
|
365
|
+
return gemini;
|
|
366
|
+
};
|
|
190
367
|
|
|
191
|
-
exports.
|
|
368
|
+
exports.GeminiAPI = GeminiAPI;
|
|
369
|
+
exports.GeminiVertex = GeminiVertex;
|
package/package.json
CHANGED
|
@@ -1,6 +1,6 @@
|
|
|
1
1
|
{
|
|
2
2
|
"name": "viho-llm",
|
|
3
|
-
"version": "0.1.
|
|
3
|
+
"version": "0.1.6",
|
|
4
4
|
"description": "Utility library for working with Google Gemini AI, providing common tools and helpers for AI interactions",
|
|
5
5
|
"keywords": [
|
|
6
6
|
"llm",
|
|
@@ -61,5 +61,5 @@
|
|
|
61
61
|
}
|
|
62
62
|
}
|
|
63
63
|
},
|
|
64
|
-
"gitHead": "
|
|
64
|
+
"gitHead": "d77d2ba692eac3cd5262f55ade7cf74b84172385"
|
|
65
65
|
}
|
package/src/index.js
CHANGED
|
@@ -1 +1,2 @@
|
|
|
1
|
-
export * from './gemini.js';
|
|
1
|
+
export * from './models/gemini-api.js';
|
|
2
|
+
export * from './models/gemini-vertex.js';
|
|
@@ -0,0 +1,49 @@
|
|
|
1
|
+
// gemini
|
|
2
|
+
import { GoogleGenAI } from '@google/genai';
|
|
3
|
+
|
|
4
|
+
// util
|
|
5
|
+
import { chat, chatWithStreaming } from './gemini-util.js';
|
|
6
|
+
|
|
7
|
+
// Logger
|
|
8
|
+
import { Logger } from 'qiao.log.js';
|
|
9
|
+
const logger = Logger('gemini-api.js');
|
|
10
|
+
|
|
11
|
+
/**
|
|
12
|
+
* GeminiAPI
|
|
13
|
+
* @param {*} options
|
|
14
|
+
* @returns
|
|
15
|
+
*/
|
|
16
|
+
export const GeminiAPI = (options) => {
|
|
17
|
+
const methodName = 'GeminiAPI';
|
|
18
|
+
|
|
19
|
+
// check
|
|
20
|
+
if (!options) {
|
|
21
|
+
logger.error(methodName, 'need options');
|
|
22
|
+
return;
|
|
23
|
+
}
|
|
24
|
+
if (!options.apiKey) {
|
|
25
|
+
logger.error(methodName, 'need options.apiKey');
|
|
26
|
+
return;
|
|
27
|
+
}
|
|
28
|
+
if (!options.modelName) {
|
|
29
|
+
logger.error(methodName, 'need options.modelName');
|
|
30
|
+
return;
|
|
31
|
+
}
|
|
32
|
+
|
|
33
|
+
// gemini
|
|
34
|
+
const gemini = {};
|
|
35
|
+
gemini.client = new GoogleGenAI({
|
|
36
|
+
apiKey: options.apiKey,
|
|
37
|
+
});
|
|
38
|
+
|
|
39
|
+
// chat
|
|
40
|
+
gemini.chat = async (chatOptions) => {
|
|
41
|
+
return await chat(gemini.client, options.modelName, chatOptions);
|
|
42
|
+
};
|
|
43
|
+
gemini.chatWithStreaming = async (chatOptions, callbackOptions) => {
|
|
44
|
+
return await chatWithStreaming(gemini.client, options.modelName, chatOptions, callbackOptions);
|
|
45
|
+
};
|
|
46
|
+
|
|
47
|
+
// r
|
|
48
|
+
return gemini;
|
|
49
|
+
};
|
|
@@ -0,0 +1,264 @@
|
|
|
1
|
+
// gemini
|
|
2
|
+
import { createUserContent, createPartFromUri } from '@google/genai';
|
|
3
|
+
|
|
4
|
+
// mime
|
|
5
|
+
import mime from 'mime-types';
|
|
6
|
+
|
|
7
|
+
// Logger
|
|
8
|
+
import { Logger } from 'qiao.log.js';
|
|
9
|
+
const logger = Logger('gemini-util.js');
|
|
10
|
+
|
|
11
|
+
/**
|
|
12
|
+
* chat
|
|
13
|
+
* @param {*} client
|
|
14
|
+
* @param {*} modelName
|
|
15
|
+
* @param {*} chatOptions
|
|
16
|
+
* @returns
|
|
17
|
+
*/
|
|
18
|
+
export const chat = async (client, modelName, chatOptions) => {
|
|
19
|
+
const methodName = 'chat';
|
|
20
|
+
|
|
21
|
+
// check
|
|
22
|
+
if (!client) {
|
|
23
|
+
logger.error(methodName, 'need client');
|
|
24
|
+
return;
|
|
25
|
+
}
|
|
26
|
+
if (!modelName) {
|
|
27
|
+
logger.error(methodName, 'need modelName');
|
|
28
|
+
return;
|
|
29
|
+
}
|
|
30
|
+
if (!chatOptions) {
|
|
31
|
+
logger.error(methodName, 'need chatOptions');
|
|
32
|
+
return;
|
|
33
|
+
}
|
|
34
|
+
if (!chatOptions.contents) {
|
|
35
|
+
logger.error(methodName, 'need chatOptions.contents');
|
|
36
|
+
return;
|
|
37
|
+
}
|
|
38
|
+
|
|
39
|
+
try {
|
|
40
|
+
// options
|
|
41
|
+
const options = Object.assign(
|
|
42
|
+
{
|
|
43
|
+
model: modelName,
|
|
44
|
+
},
|
|
45
|
+
chatOptions,
|
|
46
|
+
);
|
|
47
|
+
|
|
48
|
+
// gen
|
|
49
|
+
const response = await client.models.generateContent(options);
|
|
50
|
+
if (!response || !response.text) {
|
|
51
|
+
logger.error(methodName, 'invalid response');
|
|
52
|
+
return;
|
|
53
|
+
}
|
|
54
|
+
|
|
55
|
+
return response.text;
|
|
56
|
+
} catch (error) {
|
|
57
|
+
logger.error(methodName, 'error', error);
|
|
58
|
+
}
|
|
59
|
+
};
|
|
60
|
+
|
|
61
|
+
/**
|
|
62
|
+
* chatWithStreaming
|
|
63
|
+
* @param {*} client
|
|
64
|
+
* @param {*} modelName
|
|
65
|
+
* @param {*} chatOptions
|
|
66
|
+
* @param {*} callbackOptions
|
|
67
|
+
* @returns
|
|
68
|
+
*/
|
|
69
|
+
export const chatWithStreaming = async (client, modelName, chatOptions, callbackOptions) => {
|
|
70
|
+
const methodName = 'chatWithStreaming';
|
|
71
|
+
|
|
72
|
+
// check
|
|
73
|
+
if (!client) {
|
|
74
|
+
logger.error(methodName, 'need client');
|
|
75
|
+
return;
|
|
76
|
+
}
|
|
77
|
+
if (!modelName) {
|
|
78
|
+
logger.error(methodName, 'need modelName');
|
|
79
|
+
return;
|
|
80
|
+
}
|
|
81
|
+
if (!chatOptions) {
|
|
82
|
+
logger.error(methodName, 'need chatOptions');
|
|
83
|
+
return;
|
|
84
|
+
}
|
|
85
|
+
if (!chatOptions.contents) {
|
|
86
|
+
logger.error(methodName, 'need chatOptions.contents');
|
|
87
|
+
return;
|
|
88
|
+
}
|
|
89
|
+
if (!callbackOptions) {
|
|
90
|
+
logger.error(methodName, 'need callbackOptions');
|
|
91
|
+
return;
|
|
92
|
+
}
|
|
93
|
+
|
|
94
|
+
// callback
|
|
95
|
+
const beginCallback = callbackOptions.beginCallback;
|
|
96
|
+
const endCallback = callbackOptions.endCallback;
|
|
97
|
+
const errorCallback = callbackOptions.errorCallback;
|
|
98
|
+
const contentCallback = callbackOptions.contentCallback;
|
|
99
|
+
const firstContentCallback = callbackOptions.firstContentCallback;
|
|
100
|
+
|
|
101
|
+
try {
|
|
102
|
+
// begin
|
|
103
|
+
if (beginCallback) beginCallback();
|
|
104
|
+
|
|
105
|
+
// options
|
|
106
|
+
const options = Object.assign(
|
|
107
|
+
{
|
|
108
|
+
model: modelName,
|
|
109
|
+
},
|
|
110
|
+
chatOptions,
|
|
111
|
+
);
|
|
112
|
+
|
|
113
|
+
// gen
|
|
114
|
+
const response = await client.models.generateContentStream(options);
|
|
115
|
+
|
|
116
|
+
// go
|
|
117
|
+
let firstContent = true;
|
|
118
|
+
for await (const chunk of response) {
|
|
119
|
+
// content
|
|
120
|
+
const content = chunk.text;
|
|
121
|
+
if (content && contentCallback) {
|
|
122
|
+
if (firstContent && firstContentCallback) {
|
|
123
|
+
firstContent = false;
|
|
124
|
+
firstContentCallback();
|
|
125
|
+
}
|
|
126
|
+
|
|
127
|
+
contentCallback(content);
|
|
128
|
+
}
|
|
129
|
+
}
|
|
130
|
+
|
|
131
|
+
// end
|
|
132
|
+
if (endCallback) endCallback();
|
|
133
|
+
} catch (error) {
|
|
134
|
+
if (errorCallback) errorCallback(error);
|
|
135
|
+
}
|
|
136
|
+
};
|
|
137
|
+
|
|
138
|
+
/**
|
|
139
|
+
* cacheAdd
|
|
140
|
+
* @param {*} client
|
|
141
|
+
* @param {*} modelName
|
|
142
|
+
* @param {*} cacheOptions
|
|
143
|
+
* @returns
|
|
144
|
+
*/
|
|
145
|
+
export const cacheAdd = async (client, modelName, cacheOptions) => {
|
|
146
|
+
const methodName = 'cacheAdd';
|
|
147
|
+
|
|
148
|
+
// check
|
|
149
|
+
if (!client) {
|
|
150
|
+
logger.error(methodName, 'need client');
|
|
151
|
+
return;
|
|
152
|
+
}
|
|
153
|
+
if (!modelName) {
|
|
154
|
+
logger.error(methodName, 'need modelName');
|
|
155
|
+
return;
|
|
156
|
+
}
|
|
157
|
+
if (!cacheOptions) {
|
|
158
|
+
logger.error(methodName, 'need cacheOptions');
|
|
159
|
+
return;
|
|
160
|
+
}
|
|
161
|
+
if (!cacheOptions.gsPath) {
|
|
162
|
+
logger.error(methodName, 'need cacheOptions.gsPath');
|
|
163
|
+
return;
|
|
164
|
+
}
|
|
165
|
+
if (!cacheOptions.systemPrompt) {
|
|
166
|
+
logger.error(methodName, 'need cacheOptions.systemPrompt');
|
|
167
|
+
return;
|
|
168
|
+
}
|
|
169
|
+
if (!cacheOptions.cacheName) {
|
|
170
|
+
logger.error(methodName, 'need cacheOptions.cacheName');
|
|
171
|
+
return;
|
|
172
|
+
}
|
|
173
|
+
if (!cacheOptions.cacheTTL) {
|
|
174
|
+
logger.error(methodName, 'need cacheOptions.cacheTTL');
|
|
175
|
+
return;
|
|
176
|
+
}
|
|
177
|
+
|
|
178
|
+
// const
|
|
179
|
+
const mimeType = mime.lookup(cacheOptions.gsPath);
|
|
180
|
+
logger.info(methodName, 'cacheOptions', cacheOptions);
|
|
181
|
+
logger.info(methodName, 'mimeType', mimeType);
|
|
182
|
+
|
|
183
|
+
try {
|
|
184
|
+
// cache add
|
|
185
|
+
const cache = await client.caches.create({
|
|
186
|
+
model: modelName,
|
|
187
|
+
config: {
|
|
188
|
+
contents: createUserContent(createPartFromUri(cacheOptions.gsPath, mimeType)),
|
|
189
|
+
systemInstruction: cacheOptions.systemPrompt,
|
|
190
|
+
displayName: cacheOptions.cacheName,
|
|
191
|
+
ttl: cacheOptions.cacheTTL,
|
|
192
|
+
},
|
|
193
|
+
});
|
|
194
|
+
|
|
195
|
+
return cache;
|
|
196
|
+
} catch (error) {
|
|
197
|
+
logger.error(methodName, 'error', error);
|
|
198
|
+
}
|
|
199
|
+
};
|
|
200
|
+
|
|
201
|
+
/**
|
|
202
|
+
* cacheList
|
|
203
|
+
* @param {*} client
|
|
204
|
+
* @returns
|
|
205
|
+
*/
|
|
206
|
+
export const cacheList = async (client) => {
|
|
207
|
+
const methodName = 'cacheList';
|
|
208
|
+
|
|
209
|
+
// check
|
|
210
|
+
if (!client) {
|
|
211
|
+
logger.error(methodName, 'need client');
|
|
212
|
+
return;
|
|
213
|
+
}
|
|
214
|
+
|
|
215
|
+
// cache list
|
|
216
|
+
try {
|
|
217
|
+
const cacheList = await client.caches.list();
|
|
218
|
+
const cacheObjs = cacheList?.pageInternal?.map((contentCache) => ({
|
|
219
|
+
name: contentCache.name,
|
|
220
|
+
displayName: contentCache.displayName,
|
|
221
|
+
}));
|
|
222
|
+
|
|
223
|
+
return cacheObjs;
|
|
224
|
+
} catch (error) {
|
|
225
|
+
logger.error(methodName, 'error', error);
|
|
226
|
+
}
|
|
227
|
+
};
|
|
228
|
+
|
|
229
|
+
/**
|
|
230
|
+
* cacheUpdate
|
|
231
|
+
* @param {*} client
|
|
232
|
+
* @param {*} cacheName
|
|
233
|
+
* @param {*} cacheOptions
|
|
234
|
+
* @returns
|
|
235
|
+
*/
|
|
236
|
+
export const cacheUpdate = async (client, cacheName, cacheOptions) => {
|
|
237
|
+
const methodName = 'cacheUpdate';
|
|
238
|
+
|
|
239
|
+
// check
|
|
240
|
+
if (!client) {
|
|
241
|
+
logger.error(methodName, 'need client');
|
|
242
|
+
return;
|
|
243
|
+
}
|
|
244
|
+
if (!cacheName) {
|
|
245
|
+
logger.error(methodName, 'need cacheName');
|
|
246
|
+
return;
|
|
247
|
+
}
|
|
248
|
+
if (!cacheOptions) {
|
|
249
|
+
logger.error(methodName, 'need cacheOptions');
|
|
250
|
+
return;
|
|
251
|
+
}
|
|
252
|
+
|
|
253
|
+
// cache update
|
|
254
|
+
try {
|
|
255
|
+
const res = await client.caches.update({
|
|
256
|
+
name: cacheName,
|
|
257
|
+
config: cacheOptions,
|
|
258
|
+
});
|
|
259
|
+
|
|
260
|
+
return res;
|
|
261
|
+
} catch (error) {
|
|
262
|
+
logger.error(methodName, 'error', error);
|
|
263
|
+
}
|
|
264
|
+
};
|
|
@@ -0,0 +1,66 @@
|
|
|
1
|
+
// gemini
|
|
2
|
+
import { GoogleGenAI } from '@google/genai';
|
|
3
|
+
|
|
4
|
+
// util
|
|
5
|
+
import { chat, chatWithStreaming, cacheAdd, cacheList, cacheUpdate } from './gemini-util.js';
|
|
6
|
+
|
|
7
|
+
// Logger
|
|
8
|
+
import { Logger } from 'qiao.log.js';
|
|
9
|
+
const logger = Logger('viho-llm');
|
|
10
|
+
|
|
11
|
+
/**
|
|
12
|
+
* GeminiVertex
|
|
13
|
+
* @param {*} options
|
|
14
|
+
* @returns
|
|
15
|
+
*/
|
|
16
|
+
export const GeminiVertex = (options) => {
|
|
17
|
+
const methodName = 'GeminiVertex';
|
|
18
|
+
|
|
19
|
+
// check
|
|
20
|
+
if (!options) {
|
|
21
|
+
logger.error(methodName, 'need options');
|
|
22
|
+
return;
|
|
23
|
+
}
|
|
24
|
+
if (!options.projectId) {
|
|
25
|
+
logger.error(methodName, 'need options.projectId');
|
|
26
|
+
return;
|
|
27
|
+
}
|
|
28
|
+
if (!options.location) {
|
|
29
|
+
logger.error(methodName, 'need options.location');
|
|
30
|
+
return;
|
|
31
|
+
}
|
|
32
|
+
if (!options.modelName) {
|
|
33
|
+
logger.error(methodName, 'need options.modelName');
|
|
34
|
+
return;
|
|
35
|
+
}
|
|
36
|
+
|
|
37
|
+
// gemini
|
|
38
|
+
const gemini = {};
|
|
39
|
+
gemini.client = new GoogleGenAI({
|
|
40
|
+
vertexai: true,
|
|
41
|
+
project: options.projectId,
|
|
42
|
+
location: options.location,
|
|
43
|
+
});
|
|
44
|
+
|
|
45
|
+
// chat
|
|
46
|
+
gemini.chat = async (chatOptions) => {
|
|
47
|
+
return await chat(gemini.client, options.modelName, chatOptions);
|
|
48
|
+
};
|
|
49
|
+
gemini.chatWithStreaming = async (chatOptions, callbackOptions) => {
|
|
50
|
+
return await chatWithStreaming(gemini.client, options.modelName, chatOptions, callbackOptions);
|
|
51
|
+
};
|
|
52
|
+
|
|
53
|
+
// cache
|
|
54
|
+
gemini.cacheAdd = async (cacheOptions) => {
|
|
55
|
+
return await cacheAdd(gemini.client, options.modelName, cacheOptions);
|
|
56
|
+
};
|
|
57
|
+
gemini.cacheList = async () => {
|
|
58
|
+
return await cacheList(gemini.client);
|
|
59
|
+
};
|
|
60
|
+
gemini.cacheUpdate = async (cacheName, cacheOptions) => {
|
|
61
|
+
return await cacheUpdate(gemini.client, cacheName, cacheOptions);
|
|
62
|
+
};
|
|
63
|
+
|
|
64
|
+
// r
|
|
65
|
+
return gemini;
|
|
66
|
+
};
|
package/src/gemini.js
DELETED
|
@@ -1,190 +0,0 @@
|
|
|
1
|
-
// gemini
|
|
2
|
-
import { GoogleGenAI, createUserContent, createPartFromUri } from '@google/genai';
|
|
3
|
-
|
|
4
|
-
// mime
|
|
5
|
-
import mime from 'mime-types';
|
|
6
|
-
|
|
7
|
-
// Logger
|
|
8
|
-
import { Logger } from 'qiao.log.js';
|
|
9
|
-
const logger = Logger('viho-llm');
|
|
10
|
-
|
|
11
|
-
/**
|
|
12
|
-
* Gemini
|
|
13
|
-
* @param {*} options
|
|
14
|
-
* @returns
|
|
15
|
-
*/
|
|
16
|
-
export const Gemini = (options) => {
|
|
17
|
-
const methodName = 'Gemini';
|
|
18
|
-
|
|
19
|
-
// check
|
|
20
|
-
if (!options) {
|
|
21
|
-
logger.error(methodName, 'need options');
|
|
22
|
-
return;
|
|
23
|
-
}
|
|
24
|
-
if (!options.apiKey) {
|
|
25
|
-
logger.error(methodName, 'need options.apiKey');
|
|
26
|
-
return;
|
|
27
|
-
}
|
|
28
|
-
if (!options.modelName) {
|
|
29
|
-
logger.error(methodName, 'need options.modelName');
|
|
30
|
-
return;
|
|
31
|
-
}
|
|
32
|
-
|
|
33
|
-
// gemini
|
|
34
|
-
const gemini = {};
|
|
35
|
-
gemini.client = new GoogleGenAI({
|
|
36
|
-
vertexai: true,
|
|
37
|
-
apiKey: options.apiKey,
|
|
38
|
-
});
|
|
39
|
-
|
|
40
|
-
// chat
|
|
41
|
-
gemini.chat = async (chatOptions) => {
|
|
42
|
-
return await chat(gemini.client, options.modelName, chatOptions);
|
|
43
|
-
};
|
|
44
|
-
gemini.chatWithStreaming = async (chatOptions, callbackOptions) => {
|
|
45
|
-
return await chatWithStreaming(gemini.client, options.modelName, chatOptions, callbackOptions);
|
|
46
|
-
};
|
|
47
|
-
|
|
48
|
-
// cache
|
|
49
|
-
gemini.cacheAdd = async (cacheOptions) => {
|
|
50
|
-
return await cacheAdd(gemini.client, options.modelName, cacheOptions);
|
|
51
|
-
};
|
|
52
|
-
|
|
53
|
-
// r
|
|
54
|
-
return gemini;
|
|
55
|
-
};
|
|
56
|
-
|
|
57
|
-
// chat
|
|
58
|
-
async function chat(client, modelName, chatOptions) {
|
|
59
|
-
const methodName = 'Gemini - chat';
|
|
60
|
-
|
|
61
|
-
// check
|
|
62
|
-
if (!chatOptions) {
|
|
63
|
-
logger.error(methodName, 'need chatOptions');
|
|
64
|
-
return;
|
|
65
|
-
}
|
|
66
|
-
if (!chatOptions.contents) {
|
|
67
|
-
logger.error(methodName, 'need chatOptions.contents');
|
|
68
|
-
return;
|
|
69
|
-
}
|
|
70
|
-
|
|
71
|
-
try {
|
|
72
|
-
const response = await client.models.generateContent({
|
|
73
|
-
model: modelName,
|
|
74
|
-
contents: chatOptions.contents,
|
|
75
|
-
});
|
|
76
|
-
if (!response || !response.text) {
|
|
77
|
-
logger.error(methodName, 'invalid response');
|
|
78
|
-
return;
|
|
79
|
-
}
|
|
80
|
-
|
|
81
|
-
return response.text;
|
|
82
|
-
} catch (error) {
|
|
83
|
-
logger.error(methodName, 'error', error);
|
|
84
|
-
}
|
|
85
|
-
}
|
|
86
|
-
|
|
87
|
-
async function chatWithStreaming(client, modelName, chatOptions, callbackOptions) {
|
|
88
|
-
const methodName = 'Gemini - chatWithStreaming';
|
|
89
|
-
|
|
90
|
-
// check
|
|
91
|
-
if (!chatOptions) {
|
|
92
|
-
logger.error(methodName, 'need chatOptions');
|
|
93
|
-
return;
|
|
94
|
-
}
|
|
95
|
-
if (!chatOptions.contents) {
|
|
96
|
-
logger.error(methodName, 'need chatOptions.contents');
|
|
97
|
-
return;
|
|
98
|
-
}
|
|
99
|
-
|
|
100
|
-
// callback
|
|
101
|
-
const beginCallback = callbackOptions.beginCallback;
|
|
102
|
-
const endCallback = callbackOptions.endCallback;
|
|
103
|
-
const errorCallback = callbackOptions.errorCallback;
|
|
104
|
-
const contentCallback = callbackOptions.contentCallback;
|
|
105
|
-
const firstContentCallback = callbackOptions.firstContentCallback;
|
|
106
|
-
|
|
107
|
-
try {
|
|
108
|
-
if (beginCallback) beginCallback();
|
|
109
|
-
const response = await client.models.generateContentStream({
|
|
110
|
-
model: modelName,
|
|
111
|
-
contents: chatOptions.contents,
|
|
112
|
-
});
|
|
113
|
-
|
|
114
|
-
// go
|
|
115
|
-
let firstContent = true;
|
|
116
|
-
for await (const chunk of response) {
|
|
117
|
-
// content
|
|
118
|
-
const content = chunk.text;
|
|
119
|
-
if (content && contentCallback) {
|
|
120
|
-
if (firstContent && firstContentCallback) {
|
|
121
|
-
firstContent = false;
|
|
122
|
-
firstContentCallback();
|
|
123
|
-
}
|
|
124
|
-
|
|
125
|
-
contentCallback(content);
|
|
126
|
-
}
|
|
127
|
-
}
|
|
128
|
-
|
|
129
|
-
// end
|
|
130
|
-
if (endCallback) endCallback();
|
|
131
|
-
} catch (error) {
|
|
132
|
-
if (errorCallback) errorCallback(error);
|
|
133
|
-
}
|
|
134
|
-
}
|
|
135
|
-
|
|
136
|
-
// cache add
|
|
137
|
-
async function cacheAdd(client, modelName, cacheOptions) {
|
|
138
|
-
const methodName = 'Gemini - cacheAdd';
|
|
139
|
-
|
|
140
|
-
// check
|
|
141
|
-
if (!cacheOptions) {
|
|
142
|
-
logger.error(methodName, 'need cacheOptions');
|
|
143
|
-
return;
|
|
144
|
-
}
|
|
145
|
-
if (!cacheOptions.filePath) {
|
|
146
|
-
logger.error(methodName, 'need cacheOptions.filePath');
|
|
147
|
-
return;
|
|
148
|
-
}
|
|
149
|
-
if (!cacheOptions.systemPrompt) {
|
|
150
|
-
logger.error(methodName, 'need cacheOptions.systemPrompt');
|
|
151
|
-
return;
|
|
152
|
-
}
|
|
153
|
-
if (!cacheOptions.cacheName) {
|
|
154
|
-
logger.error(methodName, 'need cacheOptions.cacheName');
|
|
155
|
-
return;
|
|
156
|
-
}
|
|
157
|
-
if (!cacheOptions.cacheTTL) {
|
|
158
|
-
logger.error(methodName, 'need cacheOptions.cacheTTL');
|
|
159
|
-
return;
|
|
160
|
-
}
|
|
161
|
-
|
|
162
|
-
// const
|
|
163
|
-
const mimeType = mime.lookup(cacheOptions.filePath);
|
|
164
|
-
logger.info(methodName, 'cacheOptions', cacheOptions);
|
|
165
|
-
logger.info(methodName, 'mimeType', mimeType);
|
|
166
|
-
|
|
167
|
-
try {
|
|
168
|
-
// upload doc
|
|
169
|
-
const doc = await client.files.upload({
|
|
170
|
-
file: cacheOptions.filePath,
|
|
171
|
-
config: { mimeType: mimeType },
|
|
172
|
-
});
|
|
173
|
-
logger.info(methodName, 'doc.name', doc.name);
|
|
174
|
-
|
|
175
|
-
// cache add
|
|
176
|
-
const cache = await client.caches.create({
|
|
177
|
-
model: modelName,
|
|
178
|
-
config: {
|
|
179
|
-
contents: createUserContent(createPartFromUri(doc.uri, doc.mimeType)),
|
|
180
|
-
systemInstruction: cacheOptions.systemPrompt,
|
|
181
|
-
displayName: cacheOptions.cacheName,
|
|
182
|
-
ttl: cacheOptions.cacheTTL,
|
|
183
|
-
},
|
|
184
|
-
});
|
|
185
|
-
|
|
186
|
-
return cache;
|
|
187
|
-
} catch (error) {
|
|
188
|
-
logger.error(methodName, 'error', error);
|
|
189
|
-
}
|
|
190
|
-
}
|