viho-llm 0.1.2 → 0.1.3
This diff represents the content of publicly available package versions that have been released to one of the supported registries. The information contained in this diff is provided for informational purposes only and reflects changes between package versions as they appear in their respective public registries.
- package/README.md +89 -0
- package/index.js +34 -2
- package/package.json +2 -2
- package/src/gemini.js +35 -3
package/README.md
CHANGED
|
@@ -38,6 +38,47 @@ const response = await gemini.chat({
|
|
|
38
38
|
console.log(response);
|
|
39
39
|
```
|
|
40
40
|
|
|
41
|
+
### Streaming Example
|
|
42
|
+
|
|
43
|
+
```javascript
|
|
44
|
+
import { Gemini } from 'viho-llm';
|
|
45
|
+
|
|
46
|
+
// Initialize Gemini client
|
|
47
|
+
const gemini = Gemini({
|
|
48
|
+
apiKey: 'your-google-api-key',
|
|
49
|
+
modelName: 'gemini-pro',
|
|
50
|
+
});
|
|
51
|
+
|
|
52
|
+
// Send a chat message with streaming
|
|
53
|
+
await gemini.chatWithStreaming(
|
|
54
|
+
{
|
|
55
|
+
contents: [
|
|
56
|
+
{
|
|
57
|
+
role: 'user',
|
|
58
|
+
parts: [{ text: 'Write a long story about AI' }],
|
|
59
|
+
},
|
|
60
|
+
],
|
|
61
|
+
},
|
|
62
|
+
{
|
|
63
|
+
beginCallback: () => {
|
|
64
|
+
console.log('Stream started...');
|
|
65
|
+
},
|
|
66
|
+
firstContentCallback: () => {
|
|
67
|
+
console.log('First chunk received!');
|
|
68
|
+
},
|
|
69
|
+
contentCallback: (content) => {
|
|
70
|
+
process.stdout.write(content); // Print each chunk as it arrives
|
|
71
|
+
},
|
|
72
|
+
endCallback: () => {
|
|
73
|
+
console.log('\nStream ended.');
|
|
74
|
+
},
|
|
75
|
+
errorCallback: (error) => {
|
|
76
|
+
console.error('Error:', error);
|
|
77
|
+
},
|
|
78
|
+
},
|
|
79
|
+
);
|
|
80
|
+
```
|
|
81
|
+
|
|
41
82
|
## API Reference
|
|
42
83
|
|
|
43
84
|
### `Gemini(options)`
|
|
@@ -83,6 +124,54 @@ const response = await gemini.chat({
|
|
|
83
124
|
});
|
|
84
125
|
```
|
|
85
126
|
|
|
127
|
+
##### `client.chatWithStreaming(chatOptions, callbackOptions)`
|
|
128
|
+
|
|
129
|
+
Sends a chat request to the Gemini API with streaming response.
|
|
130
|
+
|
|
131
|
+
**Parameters:**
|
|
132
|
+
|
|
133
|
+
- `chatOptions` (Object)
|
|
134
|
+
- `contents` (Array) **required** - Array of message objects
|
|
135
|
+
- `role` (string) - Either 'user' or 'model'
|
|
136
|
+
- `parts` (Array) - Array of content parts
|
|
137
|
+
- `text` (string) - The text content
|
|
138
|
+
|
|
139
|
+
- `callbackOptions` (Object) - Callback functions for handling stream events
|
|
140
|
+
- `beginCallback` (Function) - Called when the stream begins
|
|
141
|
+
- `contentCallback` (Function) - Called for each content chunk received
|
|
142
|
+
- Parameters: `content` (string) - The text chunk
|
|
143
|
+
- `firstContentCallback` (Function) - Called when the first content chunk is received
|
|
144
|
+
- `endCallback` (Function) - Called when the stream ends successfully
|
|
145
|
+
- `errorCallback` (Function) - Called if an error occurs
|
|
146
|
+
- Parameters: `error` (Error) - The error object
|
|
147
|
+
|
|
148
|
+
**Returns:**
|
|
149
|
+
|
|
150
|
+
- (Promise\<void\>) - Resolves when streaming completes
|
|
151
|
+
|
|
152
|
+
**Example:**
|
|
153
|
+
|
|
154
|
+
```javascript
|
|
155
|
+
await gemini.chatWithStreaming(
|
|
156
|
+
{
|
|
157
|
+
contents: [
|
|
158
|
+
{
|
|
159
|
+
role: 'user',
|
|
160
|
+
parts: [{ text: 'Explain quantum computing' }],
|
|
161
|
+
},
|
|
162
|
+
],
|
|
163
|
+
},
|
|
164
|
+
{
|
|
165
|
+
contentCallback: (chunk) => {
|
|
166
|
+
console.log('Received:', chunk);
|
|
167
|
+
},
|
|
168
|
+
endCallback: () => {
|
|
169
|
+
console.log('Done!');
|
|
170
|
+
},
|
|
171
|
+
},
|
|
172
|
+
);
|
|
173
|
+
```
|
|
174
|
+
|
|
86
175
|
## License
|
|
87
176
|
|
|
88
177
|
MIT
|
package/index.js
CHANGED
|
@@ -39,12 +39,15 @@ const Gemini = (options) => {
|
|
|
39
39
|
gemini.chat = async (chatOptions) => {
|
|
40
40
|
return await chat(gemini.client, options.modelName, chatOptions);
|
|
41
41
|
};
|
|
42
|
-
|
|
43
|
-
// chat with streaming
|
|
44
42
|
gemini.chatWithStreaming = async (chatOptions, callbackOptions) => {
|
|
45
43
|
return await chatWithStreaming(gemini.client, options.modelName, chatOptions, callbackOptions);
|
|
46
44
|
};
|
|
47
45
|
|
|
46
|
+
// cache
|
|
47
|
+
gemini.cacheAdd = async (systemPrompt, content) => {
|
|
48
|
+
return await cacheAdd(gemini.client, options.modelName, systemPrompt, content);
|
|
49
|
+
};
|
|
50
|
+
|
|
48
51
|
// r
|
|
49
52
|
return gemini;
|
|
50
53
|
};
|
|
@@ -128,4 +131,33 @@ async function chatWithStreaming(client, modelName, chatOptions, callbackOptions
|
|
|
128
131
|
}
|
|
129
132
|
}
|
|
130
133
|
|
|
134
|
+
// cache add
|
|
135
|
+
async function cacheAdd(client, modelName, systemPrompt, content) {
|
|
136
|
+
const methodName = 'Gemini - cacheAdd';
|
|
137
|
+
|
|
138
|
+
// check
|
|
139
|
+
if (!systemPrompt) {
|
|
140
|
+
logger.info(methodName, 'need systemPrompt');
|
|
141
|
+
return;
|
|
142
|
+
}
|
|
143
|
+
if (!content) {
|
|
144
|
+
logger.info(methodName, 'need content');
|
|
145
|
+
return;
|
|
146
|
+
}
|
|
147
|
+
|
|
148
|
+
try {
|
|
149
|
+
const cache = await client.caches.create({
|
|
150
|
+
model: modelName,
|
|
151
|
+
config: {
|
|
152
|
+
systemInstruction: systemPrompt,
|
|
153
|
+
contents: genai.createUserContent(content),
|
|
154
|
+
},
|
|
155
|
+
});
|
|
156
|
+
|
|
157
|
+
return cache;
|
|
158
|
+
} catch (error) {
|
|
159
|
+
logger.error(methodName, 'error', error);
|
|
160
|
+
}
|
|
161
|
+
}
|
|
162
|
+
|
|
131
163
|
exports.Gemini = Gemini;
|
package/package.json
CHANGED
|
@@ -1,6 +1,6 @@
|
|
|
1
1
|
{
|
|
2
2
|
"name": "viho-llm",
|
|
3
|
-
"version": "0.1.
|
|
3
|
+
"version": "0.1.3",
|
|
4
4
|
"description": "Utility library for working with Google Gemini AI, providing common tools and helpers for AI interactions",
|
|
5
5
|
"keywords": [
|
|
6
6
|
"llm",
|
|
@@ -60,5 +60,5 @@
|
|
|
60
60
|
}
|
|
61
61
|
}
|
|
62
62
|
},
|
|
63
|
-
"gitHead": "
|
|
63
|
+
"gitHead": "89ff0ac8912f04b2c8957dedb05d0c8ad058219f"
|
|
64
64
|
}
|
package/src/gemini.js
CHANGED
|
@@ -1,5 +1,5 @@
|
|
|
1
1
|
// gemini
|
|
2
|
-
import { GoogleGenAI } from '@google/genai';
|
|
2
|
+
import { GoogleGenAI, createUserContent } from '@google/genai';
|
|
3
3
|
|
|
4
4
|
// Logger
|
|
5
5
|
import { Logger } from 'qiao.log.js';
|
|
@@ -38,12 +38,15 @@ export const Gemini = (options) => {
|
|
|
38
38
|
gemini.chat = async (chatOptions) => {
|
|
39
39
|
return await chat(gemini.client, options.modelName, chatOptions);
|
|
40
40
|
};
|
|
41
|
-
|
|
42
|
-
// chat with streaming
|
|
43
41
|
gemini.chatWithStreaming = async (chatOptions, callbackOptions) => {
|
|
44
42
|
return await chatWithStreaming(gemini.client, options.modelName, chatOptions, callbackOptions);
|
|
45
43
|
};
|
|
46
44
|
|
|
45
|
+
// cache
|
|
46
|
+
gemini.cacheAdd = async (systemPrompt, content) => {
|
|
47
|
+
return await cacheAdd(gemini.client, options.modelName, systemPrompt, content);
|
|
48
|
+
};
|
|
49
|
+
|
|
47
50
|
// r
|
|
48
51
|
return gemini;
|
|
49
52
|
};
|
|
@@ -126,3 +129,32 @@ async function chatWithStreaming(client, modelName, chatOptions, callbackOptions
|
|
|
126
129
|
if (errorCallback) errorCallback(error);
|
|
127
130
|
}
|
|
128
131
|
}
|
|
132
|
+
|
|
133
|
+
// cache add
|
|
134
|
+
async function cacheAdd(client, modelName, systemPrompt, content) {
|
|
135
|
+
const methodName = 'Gemini - cacheAdd';
|
|
136
|
+
|
|
137
|
+
// check
|
|
138
|
+
if (!systemPrompt) {
|
|
139
|
+
logger.info(methodName, 'need systemPrompt');
|
|
140
|
+
return;
|
|
141
|
+
}
|
|
142
|
+
if (!content) {
|
|
143
|
+
logger.info(methodName, 'need content');
|
|
144
|
+
return;
|
|
145
|
+
}
|
|
146
|
+
|
|
147
|
+
try {
|
|
148
|
+
const cache = await client.caches.create({
|
|
149
|
+
model: modelName,
|
|
150
|
+
config: {
|
|
151
|
+
systemInstruction: systemPrompt,
|
|
152
|
+
contents: createUserContent(content),
|
|
153
|
+
},
|
|
154
|
+
});
|
|
155
|
+
|
|
156
|
+
return cache;
|
|
157
|
+
} catch (error) {
|
|
158
|
+
logger.error(methodName, 'error', error);
|
|
159
|
+
}
|
|
160
|
+
}
|