viho-llm 0.1.1 → 0.1.3

This diff represents the content of publicly available package versions that have been released to one of the supported registries. The information contained in this diff is provided for informational purposes only and reflects changes between package versions as they appear in their respective public registries.
package/README.md CHANGED
@@ -38,6 +38,47 @@ const response = await gemini.chat({
38
38
  console.log(response);
39
39
  ```
40
40
 
41
+ ### Streaming Example
42
+
43
+ ```javascript
44
+ import { Gemini } from 'viho-llm';
45
+
46
+ // Initialize Gemini client
47
+ const gemini = Gemini({
48
+ apiKey: 'your-google-api-key',
49
+ modelName: 'gemini-pro',
50
+ });
51
+
52
+ // Send a chat message with streaming
53
+ await gemini.chatWithStreaming(
54
+ {
55
+ contents: [
56
+ {
57
+ role: 'user',
58
+ parts: [{ text: 'Write a long story about AI' }],
59
+ },
60
+ ],
61
+ },
62
+ {
63
+ beginCallback: () => {
64
+ console.log('Stream started...');
65
+ },
66
+ firstContentCallback: () => {
67
+ console.log('First chunk received!');
68
+ },
69
+ contentCallback: (content) => {
70
+ process.stdout.write(content); // Print each chunk as it arrives
71
+ },
72
+ endCallback: () => {
73
+ console.log('\nStream ended.');
74
+ },
75
+ errorCallback: (error) => {
76
+ console.error('Error:', error);
77
+ },
78
+ },
79
+ );
80
+ ```
81
+
41
82
  ## API Reference
42
83
 
43
84
  ### `Gemini(options)`
@@ -83,6 +124,54 @@ const response = await gemini.chat({
83
124
  });
84
125
  ```
85
126
 
127
+ ##### `client.chatWithStreaming(chatOptions, callbackOptions)`
128
+
129
+ Sends a chat request to the Gemini API with streaming response.
130
+
131
+ **Parameters:**
132
+
133
+ - `chatOptions` (Object)
134
+ - `contents` (Array) **required** - Array of message objects
135
+ - `role` (string) - Either 'user' or 'model'
136
+ - `parts` (Array) - Array of content parts
137
+ - `text` (string) - The text content
138
+
139
+ - `callbackOptions` (Object) - Callback functions for handling stream events
140
+ - `beginCallback` (Function) - Called when the stream begins
141
+ - `contentCallback` (Function) - Called for each content chunk received
142
+ - Parameters: `content` (string) - The text chunk
143
+ - `firstContentCallback` (Function) - Called when the first content chunk is received
144
+ - `endCallback` (Function) - Called when the stream ends successfully
145
+ - `errorCallback` (Function) - Called if an error occurs
146
+ - Parameters: `error` (Error) - The error object
147
+
148
+ **Returns:**
149
+
150
+ - (Promise\<void\>) - Resolves when streaming completes
151
+
152
+ **Example:**
153
+
154
+ ```javascript
155
+ await gemini.chatWithStreaming(
156
+ {
157
+ contents: [
158
+ {
159
+ role: 'user',
160
+ parts: [{ text: 'Explain quantum computing' }],
161
+ },
162
+ ],
163
+ },
164
+ {
165
+ contentCallback: (chunk) => {
166
+ console.log('Received:', chunk);
167
+ },
168
+ endCallback: () => {
169
+ console.log('Done!');
170
+ },
171
+ },
172
+ );
173
+ ```
174
+
86
175
  ## License
87
176
 
88
177
  MIT
package/index.js CHANGED
@@ -39,6 +39,14 @@ const Gemini = (options) => {
39
39
  gemini.chat = async (chatOptions) => {
40
40
  return await chat(gemini.client, options.modelName, chatOptions);
41
41
  };
42
+ gemini.chatWithStreaming = async (chatOptions, callbackOptions) => {
43
+ return await chatWithStreaming(gemini.client, options.modelName, chatOptions, callbackOptions);
44
+ };
45
+
46
+ // cache
47
+ gemini.cacheAdd = async (systemPrompt, content) => {
48
+ return await cacheAdd(gemini.client, options.modelName, systemPrompt, content);
49
+ };
42
50
 
43
51
  // r
44
52
  return gemini;
@@ -74,4 +82,82 @@ async function chat(client, modelName, chatOptions) {
74
82
  }
75
83
  }
76
84
 
85
+ async function chatWithStreaming(client, modelName, chatOptions, callbackOptions) {
86
+ const methodName = 'Gemini - chatWithStreaming';
87
+
88
+ // check
89
+ if (!chatOptions) {
90
+ logger.info(methodName, 'need chatOptions');
91
+ return;
92
+ }
93
+ if (!chatOptions.contents) {
94
+ logger.info(methodName, 'need chatOptions.contents');
95
+ return;
96
+ }
97
+
98
+ // callback
99
+ const beginCallback = callbackOptions.beginCallback;
100
+ const endCallback = callbackOptions.endCallback;
101
+ const errorCallback = callbackOptions.errorCallback;
102
+ const contentCallback = callbackOptions.contentCallback;
103
+ const firstContentCallback = callbackOptions.firstContentCallback;
104
+
105
+ try {
106
+ if (beginCallback) beginCallback();
107
+ const response = await client.models.generateContentStream({
108
+ model: modelName,
109
+ contents: chatOptions.contents,
110
+ });
111
+
112
+ // go
113
+ let firstContent = true;
114
+ for await (const chunk of response) {
115
+ // content
116
+ const content = chunk.text;
117
+ if (content && contentCallback) {
118
+ if (firstContent && firstContentCallback) {
119
+ firstContent = false;
120
+ firstContentCallback();
121
+ }
122
+
123
+ contentCallback(content);
124
+ }
125
+ }
126
+
127
+ // end
128
+ if (endCallback) endCallback();
129
+ } catch (error) {
130
+ if (errorCallback) errorCallback(error);
131
+ }
132
+ }
133
+
134
+ // cache add
135
+ async function cacheAdd(client, modelName, systemPrompt, content) {
136
+ const methodName = 'Gemini - cacheAdd';
137
+
138
+ // check
139
+ if (!systemPrompt) {
140
+ logger.info(methodName, 'need systemPrompt');
141
+ return;
142
+ }
143
+ if (!content) {
144
+ logger.info(methodName, 'need content');
145
+ return;
146
+ }
147
+
148
+ try {
149
+ const cache = await client.caches.create({
150
+ model: modelName,
151
+ config: {
152
+ systemInstruction: systemPrompt,
153
+ contents: genai.createUserContent(content),
154
+ },
155
+ });
156
+
157
+ return cache;
158
+ } catch (error) {
159
+ logger.error(methodName, 'error', error);
160
+ }
161
+ }
162
+
77
163
  exports.Gemini = Gemini;
package/package.json CHANGED
@@ -1,6 +1,6 @@
1
1
  {
2
2
  "name": "viho-llm",
3
- "version": "0.1.1",
3
+ "version": "0.1.3",
4
4
  "description": "Utility library for working with Google Gemini AI, providing common tools and helpers for AI interactions",
5
5
  "keywords": [
6
6
  "llm",
@@ -60,5 +60,5 @@
60
60
  }
61
61
  }
62
62
  },
63
- "gitHead": "30ba092c8b2ab0e52894506eb93fdd77a26e4e36"
63
+ "gitHead": "89ff0ac8912f04b2c8957dedb05d0c8ad058219f"
64
64
  }
package/src/gemini.js CHANGED
@@ -1,5 +1,5 @@
1
1
  // gemini
2
- import { GoogleGenAI } from '@google/genai';
2
+ import { GoogleGenAI, createUserContent } from '@google/genai';
3
3
 
4
4
  // Logger
5
5
  import { Logger } from 'qiao.log.js';
@@ -38,6 +38,14 @@ export const Gemini = (options) => {
38
38
  gemini.chat = async (chatOptions) => {
39
39
  return await chat(gemini.client, options.modelName, chatOptions);
40
40
  };
41
+ gemini.chatWithStreaming = async (chatOptions, callbackOptions) => {
42
+ return await chatWithStreaming(gemini.client, options.modelName, chatOptions, callbackOptions);
43
+ };
44
+
45
+ // cache
46
+ gemini.cacheAdd = async (systemPrompt, content) => {
47
+ return await cacheAdd(gemini.client, options.modelName, systemPrompt, content);
48
+ };
41
49
 
42
50
  // r
43
51
  return gemini;
@@ -72,3 +80,81 @@ async function chat(client, modelName, chatOptions) {
72
80
  logger.error(methodName, 'error', error);
73
81
  }
74
82
  }
83
+
84
+ async function chatWithStreaming(client, modelName, chatOptions, callbackOptions) {
85
+ const methodName = 'Gemini - chatWithStreaming';
86
+
87
+ // check
88
+ if (!chatOptions) {
89
+ logger.info(methodName, 'need chatOptions');
90
+ return;
91
+ }
92
+ if (!chatOptions.contents) {
93
+ logger.info(methodName, 'need chatOptions.contents');
94
+ return;
95
+ }
96
+
97
+ // callback
98
+ const beginCallback = callbackOptions.beginCallback;
99
+ const endCallback = callbackOptions.endCallback;
100
+ const errorCallback = callbackOptions.errorCallback;
101
+ const contentCallback = callbackOptions.contentCallback;
102
+ const firstContentCallback = callbackOptions.firstContentCallback;
103
+
104
+ try {
105
+ if (beginCallback) beginCallback();
106
+ const response = await client.models.generateContentStream({
107
+ model: modelName,
108
+ contents: chatOptions.contents,
109
+ });
110
+
111
+ // go
112
+ let firstContent = true;
113
+ for await (const chunk of response) {
114
+ // content
115
+ const content = chunk.text;
116
+ if (content && contentCallback) {
117
+ if (firstContent && firstContentCallback) {
118
+ firstContent = false;
119
+ firstContentCallback();
120
+ }
121
+
122
+ contentCallback(content);
123
+ }
124
+ }
125
+
126
+ // end
127
+ if (endCallback) endCallback();
128
+ } catch (error) {
129
+ if (errorCallback) errorCallback(error);
130
+ }
131
+ }
132
+
133
+ // cache add
134
+ async function cacheAdd(client, modelName, systemPrompt, content) {
135
+ const methodName = 'Gemini - cacheAdd';
136
+
137
+ // check
138
+ if (!systemPrompt) {
139
+ logger.info(methodName, 'need systemPrompt');
140
+ return;
141
+ }
142
+ if (!content) {
143
+ logger.info(methodName, 'need content');
144
+ return;
145
+ }
146
+
147
+ try {
148
+ const cache = await client.caches.create({
149
+ model: modelName,
150
+ config: {
151
+ systemInstruction: systemPrompt,
152
+ contents: createUserContent(content),
153
+ },
154
+ });
155
+
156
+ return cache;
157
+ } catch (error) {
158
+ logger.error(methodName, 'error', error);
159
+ }
160
+ }