viho-llm 0.1.2 → 0.1.4

This diff represents the content of publicly available package versions that have been released to one of the supported registries. The information contained in this diff is provided for informational purposes only and reflects changes between package versions as they appear in their respective public registries.
package/README.md CHANGED
@@ -38,6 +38,47 @@ const response = await gemini.chat({
38
38
  console.log(response);
39
39
  ```
40
40
 
41
+ ### Streaming Example
42
+
43
+ ```javascript
44
+ import { Gemini } from 'viho-llm';
45
+
46
+ // Initialize Gemini client
47
+ const gemini = Gemini({
48
+ apiKey: 'your-google-api-key',
49
+ modelName: 'gemini-pro',
50
+ });
51
+
52
+ // Send a chat message with streaming
53
+ await gemini.chatWithStreaming(
54
+ {
55
+ contents: [
56
+ {
57
+ role: 'user',
58
+ parts: [{ text: 'Write a long story about AI' }],
59
+ },
60
+ ],
61
+ },
62
+ {
63
+ beginCallback: () => {
64
+ console.log('Stream started...');
65
+ },
66
+ firstContentCallback: () => {
67
+ console.log('First chunk received!');
68
+ },
69
+ contentCallback: (content) => {
70
+ process.stdout.write(content); // Print each chunk as it arrives
71
+ },
72
+ endCallback: () => {
73
+ console.log('\nStream ended.');
74
+ },
75
+ errorCallback: (error) => {
76
+ console.error('Error:', error);
77
+ },
78
+ },
79
+ );
80
+ ```
81
+
41
82
  ## API Reference
42
83
 
43
84
  ### `Gemini(options)`
@@ -83,6 +124,54 @@ const response = await gemini.chat({
83
124
  });
84
125
  ```
85
126
 
127
+ ##### `client.chatWithStreaming(chatOptions, callbackOptions)`
128
+
129
+ Sends a chat request to the Gemini API with streaming response.
130
+
131
+ **Parameters:**
132
+
133
+ - `chatOptions` (Object)
134
+ - `contents` (Array) **required** - Array of message objects
135
+ - `role` (string) - Either 'user' or 'model'
136
+ - `parts` (Array) - Array of content parts
137
+ - `text` (string) - The text content
138
+
139
+ - `callbackOptions` (Object) - Callback functions for handling stream events
140
+ - `beginCallback` (Function) - Called when the stream begins
141
+ - `contentCallback` (Function) - Called for each content chunk received
142
+ - Parameters: `content` (string) - The text chunk
143
+ - `firstContentCallback` (Function) - Called when the first content chunk is received
144
+ - `endCallback` (Function) - Called when the stream ends successfully
145
+ - `errorCallback` (Function) - Called if an error occurs
146
+ - Parameters: `error` (Error) - The error object
147
+
148
+ **Returns:**
149
+
150
+ - (Promise\<void\>) - Resolves when streaming completes
151
+
152
+ **Example:**
153
+
154
+ ```javascript
155
+ await gemini.chatWithStreaming(
156
+ {
157
+ contents: [
158
+ {
159
+ role: 'user',
160
+ parts: [{ text: 'Explain quantum computing' }],
161
+ },
162
+ ],
163
+ },
164
+ {
165
+ contentCallback: (chunk) => {
166
+ console.log('Received:', chunk);
167
+ },
168
+ endCallback: () => {
169
+ console.log('Done!');
170
+ },
171
+ },
172
+ );
173
+ ```
174
+
86
175
  ## License
87
176
 
88
177
  MIT
package/index.js CHANGED
@@ -1,6 +1,7 @@
1
1
  'use strict';
2
2
 
3
3
  var genai = require('@google/genai');
4
+ var mime = require('mime-types');
4
5
  var qiao_log_js = require('qiao.log.js');
5
6
 
6
7
  // gemini
@@ -16,15 +17,15 @@ const Gemini = (options) => {
16
17
 
17
18
  // check
18
19
  if (!options) {
19
- logger.info(methodName, 'need options');
20
+ logger.error(methodName, 'need options');
20
21
  return;
21
22
  }
22
23
  if (!options.apiKey) {
23
- logger.info(methodName, 'need options.apiKey');
24
+ logger.error(methodName, 'need options.apiKey');
24
25
  return;
25
26
  }
26
27
  if (!options.modelName) {
27
- logger.info(methodName, 'need options.modelName');
28
+ logger.error(methodName, 'need options.modelName');
28
29
  return;
29
30
  }
30
31
 
@@ -39,12 +40,15 @@ const Gemini = (options) => {
39
40
  gemini.chat = async (chatOptions) => {
40
41
  return await chat(gemini.client, options.modelName, chatOptions);
41
42
  };
42
-
43
- // chat with streaming
44
43
  gemini.chatWithStreaming = async (chatOptions, callbackOptions) => {
45
44
  return await chatWithStreaming(gemini.client, options.modelName, chatOptions, callbackOptions);
46
45
  };
47
46
 
47
+ // cache
48
+ gemini.cacheAdd = async (cacheOptions) => {
49
+ return await cacheAdd(gemini.client, options.modelName, cacheOptions);
50
+ };
51
+
48
52
  // r
49
53
  return gemini;
50
54
  };
@@ -55,11 +59,11 @@ async function chat(client, modelName, chatOptions) {
55
59
 
56
60
  // check
57
61
  if (!chatOptions) {
58
- logger.info(methodName, 'need chatOptions');
62
+ logger.error(methodName, 'need chatOptions');
59
63
  return;
60
64
  }
61
65
  if (!chatOptions.contents) {
62
- logger.info(methodName, 'need chatOptions.contents');
66
+ logger.error(methodName, 'need chatOptions.contents');
63
67
  return;
64
68
  }
65
69
 
@@ -84,11 +88,11 @@ async function chatWithStreaming(client, modelName, chatOptions, callbackOptions
84
88
 
85
89
  // check
86
90
  if (!chatOptions) {
87
- logger.info(methodName, 'need chatOptions');
91
+ logger.error(methodName, 'need chatOptions');
88
92
  return;
89
93
  }
90
94
  if (!chatOptions.contents) {
91
- logger.info(methodName, 'need chatOptions.contents');
95
+ logger.error(methodName, 'need chatOptions.contents');
92
96
  return;
93
97
  }
94
98
 
@@ -128,4 +132,60 @@ async function chatWithStreaming(client, modelName, chatOptions, callbackOptions
128
132
  }
129
133
  }
130
134
 
135
+ // cache add
136
+ async function cacheAdd(client, modelName, cacheOptions) {
137
+ const methodName = 'Gemini - cacheAdd';
138
+
139
+ // check
140
+ if (!cacheOptions) {
141
+ logger.error(methodName, 'need cacheOptions');
142
+ return;
143
+ }
144
+ if (!cacheOptions.filePath) {
145
+ logger.error(methodName, 'need cacheOptions.filePath');
146
+ return;
147
+ }
148
+ if (!cacheOptions.systemPrompt) {
149
+ logger.error(methodName, 'need cacheOptions.systemPrompt');
150
+ return;
151
+ }
152
+ if (!cacheOptions.cacheName) {
153
+ logger.error(methodName, 'need cacheOptions.cacheName');
154
+ return;
155
+ }
156
+ if (!cacheOptions.cacheTTL) {
157
+ logger.error(methodName, 'need cacheOptions.cacheTTL');
158
+ return;
159
+ }
160
+
161
+ // const
162
+ const mimeType = mime.lookup(cacheOptions.filePath);
163
+ logger.info(methodName, 'cacheOptions', cacheOptions);
164
+ logger.info(methodName, 'mimeType', mimeType);
165
+
166
+ try {
167
+ // upload doc
168
+ const doc = await client.files.upload({
169
+ file: cacheOptions.filePath,
170
+ config: { mimeType: mimeType },
171
+ });
172
+ logger.info(methodName, 'doc.name', doc.name);
173
+
174
+ // cache add
175
+ const cache = await client.caches.create({
176
+ model: modelName,
177
+ config: {
178
+ contents: genai.createUserContent(genai.createPartFromUri(doc.uri, doc.mimeType)),
179
+ systemInstruction: cacheOptions.systemPrompt,
180
+ displayName: cacheOptions.cacheName,
181
+ ttl: cacheOptions.cacheTTL,
182
+ },
183
+ });
184
+
185
+ return cache;
186
+ } catch (error) {
187
+ logger.error(methodName, 'error', error);
188
+ }
189
+ }
190
+
131
191
  exports.Gemini = Gemini;
package/package.json CHANGED
@@ -1,6 +1,6 @@
1
1
  {
2
2
  "name": "viho-llm",
3
- "version": "0.1.2",
3
+ "version": "0.1.4",
4
4
  "description": "Utility library for working with Google Gemini AI, providing common tools and helpers for AI interactions",
5
5
  "keywords": [
6
6
  "llm",
@@ -41,6 +41,7 @@
41
41
  },
42
42
  "dependencies": {
43
43
  "@google/genai": "^1.34.0",
44
+ "mime-types": "^2.1.35",
44
45
  "qiao.log.js": "^3.7.5"
45
46
  },
46
47
  "nx": {
@@ -60,5 +61,5 @@
60
61
  }
61
62
  }
62
63
  },
63
- "gitHead": "ddc9883c92c6b3bb4a0521819c79b389b438967f"
64
+ "gitHead": "38e28266271d8b64e56c5f6d27deca78ce8cdb1b"
64
65
  }
package/src/gemini.js CHANGED
@@ -1,5 +1,8 @@
1
1
  // gemini
2
- import { GoogleGenAI } from '@google/genai';
2
+ import { GoogleGenAI, createUserContent, createPartFromUri } from '@google/genai';
3
+
4
+ // mime
5
+ import mime from 'mime-types';
3
6
 
4
7
  // Logger
5
8
  import { Logger } from 'qiao.log.js';
@@ -15,15 +18,15 @@ export const Gemini = (options) => {
15
18
 
16
19
  // check
17
20
  if (!options) {
18
- logger.info(methodName, 'need options');
21
+ logger.error(methodName, 'need options');
19
22
  return;
20
23
  }
21
24
  if (!options.apiKey) {
22
- logger.info(methodName, 'need options.apiKey');
25
+ logger.error(methodName, 'need options.apiKey');
23
26
  return;
24
27
  }
25
28
  if (!options.modelName) {
26
- logger.info(methodName, 'need options.modelName');
29
+ logger.error(methodName, 'need options.modelName');
27
30
  return;
28
31
  }
29
32
 
@@ -38,12 +41,15 @@ export const Gemini = (options) => {
38
41
  gemini.chat = async (chatOptions) => {
39
42
  return await chat(gemini.client, options.modelName, chatOptions);
40
43
  };
41
-
42
- // chat with streaming
43
44
  gemini.chatWithStreaming = async (chatOptions, callbackOptions) => {
44
45
  return await chatWithStreaming(gemini.client, options.modelName, chatOptions, callbackOptions);
45
46
  };
46
47
 
48
+ // cache
49
+ gemini.cacheAdd = async (cacheOptions) => {
50
+ return await cacheAdd(gemini.client, options.modelName, cacheOptions);
51
+ };
52
+
47
53
  // r
48
54
  return gemini;
49
55
  };
@@ -54,11 +60,11 @@ async function chat(client, modelName, chatOptions) {
54
60
 
55
61
  // check
56
62
  if (!chatOptions) {
57
- logger.info(methodName, 'need chatOptions');
63
+ logger.error(methodName, 'need chatOptions');
58
64
  return;
59
65
  }
60
66
  if (!chatOptions.contents) {
61
- logger.info(methodName, 'need chatOptions.contents');
67
+ logger.error(methodName, 'need chatOptions.contents');
62
68
  return;
63
69
  }
64
70
 
@@ -83,11 +89,11 @@ async function chatWithStreaming(client, modelName, chatOptions, callbackOptions
83
89
 
84
90
  // check
85
91
  if (!chatOptions) {
86
- logger.info(methodName, 'need chatOptions');
92
+ logger.error(methodName, 'need chatOptions');
87
93
  return;
88
94
  }
89
95
  if (!chatOptions.contents) {
90
- logger.info(methodName, 'need chatOptions.contents');
96
+ logger.error(methodName, 'need chatOptions.contents');
91
97
  return;
92
98
  }
93
99
 
@@ -126,3 +132,59 @@ async function chatWithStreaming(client, modelName, chatOptions, callbackOptions
126
132
  if (errorCallback) errorCallback(error);
127
133
  }
128
134
  }
135
+
136
+ // cache add
137
+ async function cacheAdd(client, modelName, cacheOptions) {
138
+ const methodName = 'Gemini - cacheAdd';
139
+
140
+ // check
141
+ if (!cacheOptions) {
142
+ logger.error(methodName, 'need cacheOptions');
143
+ return;
144
+ }
145
+ if (!cacheOptions.filePath) {
146
+ logger.error(methodName, 'need cacheOptions.filePath');
147
+ return;
148
+ }
149
+ if (!cacheOptions.systemPrompt) {
150
+ logger.error(methodName, 'need cacheOptions.systemPrompt');
151
+ return;
152
+ }
153
+ if (!cacheOptions.cacheName) {
154
+ logger.error(methodName, 'need cacheOptions.cacheName');
155
+ return;
156
+ }
157
+ if (!cacheOptions.cacheTTL) {
158
+ logger.error(methodName, 'need cacheOptions.cacheTTL');
159
+ return;
160
+ }
161
+
162
+ // const
163
+ const mimeType = mime.lookup(cacheOptions.filePath);
164
+ logger.info(methodName, 'cacheOptions', cacheOptions);
165
+ logger.info(methodName, 'mimeType', mimeType);
166
+
167
+ try {
168
+ // upload doc
169
+ const doc = await client.files.upload({
170
+ file: cacheOptions.filePath,
171
+ config: { mimeType: mimeType },
172
+ });
173
+ logger.info(methodName, 'doc.name', doc.name);
174
+
175
+ // cache add
176
+ const cache = await client.caches.create({
177
+ model: modelName,
178
+ config: {
179
+ contents: createUserContent(createPartFromUri(doc.uri, doc.mimeType)),
180
+ systemInstruction: cacheOptions.systemPrompt,
181
+ displayName: cacheOptions.cacheName,
182
+ ttl: cacheOptions.cacheTTL,
183
+ },
184
+ });
185
+
186
+ return cache;
187
+ } catch (error) {
188
+ logger.error(methodName, 'error', error);
189
+ }
190
+ }