viho-llm 0.1.3 → 0.1.5

This diff represents the content of publicly available package versions that have been released to one of the supported registries. The information contained in this diff is provided for informational purposes only and reflects changes between package versions as they appear in their respective public registries.
package/index.js CHANGED
@@ -1,68 +1,37 @@
1
1
  'use strict';
2
2
 
3
3
  var genai = require('@google/genai');
4
+ var mime = require('mime-types');
4
5
  var qiao_log_js = require('qiao.log.js');
5
6
 
6
7
  // gemini
7
- const logger = qiao_log_js.Logger('viho-llm');
8
+ const logger$2 = qiao_log_js.Logger('gemini-util.js');
8
9
 
9
10
  /**
10
- * Gemini
11
- * @param {*} options
11
+ * chat
12
+ * @param {*} client
13
+ * @param {*} modelName
14
+ * @param {*} chatOptions
12
15
  * @returns
13
16
  */
14
- const Gemini = (options) => {
15
- const methodName = 'Gemini';
17
+ const chat = async (client, modelName, chatOptions) => {
18
+ const methodName = 'chat';
16
19
 
17
20
  // check
18
- if (!options) {
19
- logger.info(methodName, 'need options');
21
+ if (!client) {
22
+ logger$2.error(methodName, 'need client');
20
23
  return;
21
24
  }
22
- if (!options.apiKey) {
23
- logger.info(methodName, 'need options.apiKey');
25
+ if (!modelName) {
26
+ logger$2.error(methodName, 'need modelName');
24
27
  return;
25
28
  }
26
- if (!options.modelName) {
27
- logger.info(methodName, 'need options.modelName');
28
- return;
29
- }
30
-
31
- // gemini
32
- const gemini = {};
33
- gemini.client = new genai.GoogleGenAI({
34
- vertexai: true,
35
- apiKey: options.apiKey,
36
- });
37
-
38
- // chat
39
- gemini.chat = async (chatOptions) => {
40
- return await chat(gemini.client, options.modelName, chatOptions);
41
- };
42
- gemini.chatWithStreaming = async (chatOptions, callbackOptions) => {
43
- return await chatWithStreaming(gemini.client, options.modelName, chatOptions, callbackOptions);
44
- };
45
-
46
- // cache
47
- gemini.cacheAdd = async (systemPrompt, content) => {
48
- return await cacheAdd(gemini.client, options.modelName, systemPrompt, content);
49
- };
50
-
51
- // r
52
- return gemini;
53
- };
54
-
55
- // chat
56
- async function chat(client, modelName, chatOptions) {
57
- const methodName = 'Gemini - chat';
58
-
59
- // check
60
29
  if (!chatOptions) {
61
- logger.info(methodName, 'need chatOptions');
30
+ logger$2.error(methodName, 'need chatOptions');
62
31
  return;
63
32
  }
64
33
  if (!chatOptions.contents) {
65
- logger.info(methodName, 'need chatOptions.contents');
34
+ logger$2.error(methodName, 'need chatOptions.contents');
66
35
  return;
67
36
  }
68
37
 
@@ -72,26 +41,46 @@ async function chat(client, modelName, chatOptions) {
72
41
  contents: chatOptions.contents,
73
42
  });
74
43
  if (!response || !response.text) {
75
- logger.error(methodName, 'invalid response');
44
+ logger$2.error(methodName, 'invalid response');
76
45
  return;
77
46
  }
78
47
 
79
48
  return response.text;
80
49
  } catch (error) {
81
- logger.error(methodName, 'error', error);
50
+ logger$2.error(methodName, 'error', error);
82
51
  }
83
- }
52
+ };
84
53
 
85
- async function chatWithStreaming(client, modelName, chatOptions, callbackOptions) {
86
- const methodName = 'Gemini - chatWithStreaming';
54
+ /**
55
+ * chatWithStreaming
56
+ * @param {*} client
57
+ * @param {*} modelName
58
+ * @param {*} chatOptions
59
+ * @param {*} callbackOptions
60
+ * @returns
61
+ */
62
+ const chatWithStreaming = async (client, modelName, chatOptions, callbackOptions) => {
63
+ const methodName = 'chatWithStreaming';
87
64
 
88
65
  // check
66
+ if (!client) {
67
+ logger$2.error(methodName, 'need client');
68
+ return;
69
+ }
70
+ if (!modelName) {
71
+ logger$2.error(methodName, 'need modelName');
72
+ return;
73
+ }
89
74
  if (!chatOptions) {
90
- logger.info(methodName, 'need chatOptions');
75
+ logger$2.error(methodName, 'need chatOptions');
91
76
  return;
92
77
  }
93
78
  if (!chatOptions.contents) {
94
- logger.info(methodName, 'need chatOptions.contents');
79
+ logger$2.error(methodName, 'need chatOptions.contents');
80
+ return;
81
+ }
82
+ if (!callbackOptions) {
83
+ logger$2.error(methodName, 'need callbackOptions');
95
84
  return;
96
85
  }
97
86
 
@@ -129,35 +118,159 @@ async function chatWithStreaming(client, modelName, chatOptions, callbackOptions
129
118
  } catch (error) {
130
119
  if (errorCallback) errorCallback(error);
131
120
  }
132
- }
121
+ };
133
122
 
134
- // cache add
135
- async function cacheAdd(client, modelName, systemPrompt, content) {
136
- const methodName = 'Gemini - cacheAdd';
123
+ /**
124
+ * cacheAdd
125
+ * @param {*} client
126
+ * @param {*} modelName
127
+ * @param {*} cacheOptions
128
+ * @returns
129
+ */
130
+ const cacheAdd = async (client, modelName, cacheOptions) => {
131
+ const methodName = 'cacheAdd';
137
132
 
138
133
  // check
139
- if (!systemPrompt) {
140
- logger.info(methodName, 'need systemPrompt');
134
+ if (!cacheOptions) {
135
+ logger$2.error(methodName, 'need cacheOptions');
136
+ return;
137
+ }
138
+ if (!cacheOptions.gsPath) {
139
+ logger$2.error(methodName, 'need cacheOptions.gsPath');
140
+ return;
141
+ }
142
+ if (!cacheOptions.systemPrompt) {
143
+ logger$2.error(methodName, 'need cacheOptions.systemPrompt');
141
144
  return;
142
145
  }
143
- if (!content) {
144
- logger.info(methodName, 'need content');
146
+ if (!cacheOptions.cacheName) {
147
+ logger$2.error(methodName, 'need cacheOptions.cacheName');
148
+ return;
149
+ }
150
+ if (!cacheOptions.cacheTTL) {
151
+ logger$2.error(methodName, 'need cacheOptions.cacheTTL');
145
152
  return;
146
153
  }
147
154
 
155
+ // const
156
+ const mimeType = mime.lookup(cacheOptions.gsPath);
157
+ logger$2.info(methodName, 'cacheOptions', cacheOptions);
158
+ logger$2.info(methodName, 'mimeType', mimeType);
159
+
148
160
  try {
161
+ // cache add
149
162
  const cache = await client.caches.create({
150
163
  model: modelName,
151
164
  config: {
152
- systemInstruction: systemPrompt,
153
- contents: genai.createUserContent(content),
165
+ contents: genai.createUserContent(genai.createPartFromUri(cacheOptions.gsPath, mimeType)),
166
+ systemInstruction: cacheOptions.systemPrompt,
167
+ displayName: cacheOptions.cacheName,
168
+ ttl: cacheOptions.cacheTTL,
154
169
  },
155
170
  });
156
171
 
157
172
  return cache;
158
173
  } catch (error) {
159
- logger.error(methodName, 'error', error);
174
+ logger$2.error(methodName, 'error', error);
175
+ }
176
+ };
177
+
178
+ // gemini
179
+ const logger$1 = qiao_log_js.Logger('gemini-api.js');
180
+
181
+ /**
182
+ * GeminiAPI
183
+ * @param {*} options
184
+ * @returns
185
+ */
186
+ const GeminiAPI = (options) => {
187
+ const methodName = 'GeminiAPI';
188
+
189
+ // check
190
+ if (!options) {
191
+ logger$1.error(methodName, 'need options');
192
+ return;
160
193
  }
161
- }
194
+ if (!options.apiKey) {
195
+ logger$1.error(methodName, 'need options.apiKey');
196
+ return;
197
+ }
198
+ if (!options.modelName) {
199
+ logger$1.error(methodName, 'need options.modelName');
200
+ return;
201
+ }
202
+
203
+ // gemini
204
+ const gemini = {};
205
+ gemini.client = new genai.GoogleGenAI({
206
+ apiKey: options.apiKey,
207
+ });
208
+
209
+ // chat
210
+ gemini.chat = async (chatOptions) => {
211
+ return await chat(gemini.client, options.modelName, chatOptions);
212
+ };
213
+ gemini.chatWithStreaming = async (chatOptions, callbackOptions) => {
214
+ return await chatWithStreaming(gemini.client, options.modelName, chatOptions, callbackOptions);
215
+ };
216
+
217
+ // r
218
+ return gemini;
219
+ };
220
+
221
+ // gemini
222
+ const logger = qiao_log_js.Logger('viho-llm');
223
+
224
+ /**
225
+ * GeminiVertex
226
+ * @param {*} options
227
+ * @returns
228
+ */
229
+ const GeminiVertex = (options) => {
230
+ const methodName = 'GeminiVertex';
231
+
232
+ // check
233
+ if (!options) {
234
+ logger.error(methodName, 'need options');
235
+ return;
236
+ }
237
+ if (!options.projectId) {
238
+ logger.error(methodName, 'need options.projectId');
239
+ return;
240
+ }
241
+ if (!options.location) {
242
+ logger.error(methodName, 'need options.location');
243
+ return;
244
+ }
245
+ if (!options.modelName) {
246
+ logger.error(methodName, 'need options.modelName');
247
+ return;
248
+ }
249
+
250
+ // gemini
251
+ const gemini = {};
252
+ gemini.client = new genai.GoogleGenAI({
253
+ vertexai: true,
254
+ project: options.projectId,
255
+ location: options.location,
256
+ });
257
+
258
+ // chat
259
+ gemini.chat = async (chatOptions) => {
260
+ return await chat(gemini.client, options.modelName, chatOptions);
261
+ };
262
+ gemini.chatWithStreaming = async (chatOptions, callbackOptions) => {
263
+ return await chatWithStreaming(gemini.client, options.modelName, chatOptions, callbackOptions);
264
+ };
265
+
266
+ // cache
267
+ gemini.cacheAdd = async (cacheOptions) => {
268
+ return await cacheAdd(gemini.client, options.modelName, cacheOptions);
269
+ };
270
+
271
+ // r
272
+ return gemini;
273
+ };
162
274
 
163
- exports.Gemini = Gemini;
275
+ exports.GeminiAPI = GeminiAPI;
276
+ exports.GeminiVertex = GeminiVertex;
package/package.json CHANGED
@@ -1,6 +1,6 @@
1
1
  {
2
2
  "name": "viho-llm",
3
- "version": "0.1.3",
3
+ "version": "0.1.5",
4
4
  "description": "Utility library for working with Google Gemini AI, providing common tools and helpers for AI interactions",
5
5
  "keywords": [
6
6
  "llm",
@@ -41,6 +41,7 @@
41
41
  },
42
42
  "dependencies": {
43
43
  "@google/genai": "^1.34.0",
44
+ "mime-types": "^2.1.35",
44
45
  "qiao.log.js": "^3.7.5"
45
46
  },
46
47
  "nx": {
@@ -60,5 +61,5 @@
60
61
  }
61
62
  }
62
63
  },
63
- "gitHead": "89ff0ac8912f04b2c8957dedb05d0c8ad058219f"
64
+ "gitHead": "b37b523239fe354f991017387e95a6314e994981"
64
65
  }
package/src/index.js CHANGED
@@ -1 +1,2 @@
1
- export * from './gemini.js';
1
+ export * from './models/gemini-api.js';
2
+ export * from './models/gemini-vertex.js';
@@ -0,0 +1,49 @@
1
+ // gemini
2
+ import { GoogleGenAI } from '@google/genai';
3
+
4
+ // util
5
+ import { chat, chatWithStreaming } from './gemini-util.js';
6
+
7
+ // Logger
8
+ import { Logger } from 'qiao.log.js';
9
+ const logger = Logger('gemini-api.js');
10
+
11
+ /**
12
+ * GeminiAPI
13
+ * @param {*} options
14
+ * @returns
15
+ */
16
+ export const GeminiAPI = (options) => {
17
+ const methodName = 'GeminiAPI';
18
+
19
+ // check
20
+ if (!options) {
21
+ logger.error(methodName, 'need options');
22
+ return;
23
+ }
24
+ if (!options.apiKey) {
25
+ logger.error(methodName, 'need options.apiKey');
26
+ return;
27
+ }
28
+ if (!options.modelName) {
29
+ logger.error(methodName, 'need options.modelName');
30
+ return;
31
+ }
32
+
33
+ // gemini
34
+ const gemini = {};
35
+ gemini.client = new GoogleGenAI({
36
+ apiKey: options.apiKey,
37
+ });
38
+
39
+ // chat
40
+ gemini.chat = async (chatOptions) => {
41
+ return await chat(gemini.client, options.modelName, chatOptions);
42
+ };
43
+ gemini.chatWithStreaming = async (chatOptions, callbackOptions) => {
44
+ return await chatWithStreaming(gemini.client, options.modelName, chatOptions, callbackOptions);
45
+ };
46
+
47
+ // r
48
+ return gemini;
49
+ };
@@ -0,0 +1,177 @@
1
+ // gemini
2
+ import { createUserContent, createPartFromUri } from '@google/genai';
3
+
4
+ // mime
5
+ import mime from 'mime-types';
6
+
7
+ // Logger
8
+ import { Logger } from 'qiao.log.js';
9
+ const logger = Logger('gemini-util.js');
10
+
11
+ /**
12
+ * chat
13
+ * @param {*} client
14
+ * @param {*} modelName
15
+ * @param {*} chatOptions
16
+ * @returns
17
+ */
18
+ export const chat = async (client, modelName, chatOptions) => {
19
+ const methodName = 'chat';
20
+
21
+ // check
22
+ if (!client) {
23
+ logger.error(methodName, 'need client');
24
+ return;
25
+ }
26
+ if (!modelName) {
27
+ logger.error(methodName, 'need modelName');
28
+ return;
29
+ }
30
+ if (!chatOptions) {
31
+ logger.error(methodName, 'need chatOptions');
32
+ return;
33
+ }
34
+ if (!chatOptions.contents) {
35
+ logger.error(methodName, 'need chatOptions.contents');
36
+ return;
37
+ }
38
+
39
+ try {
40
+ const response = await client.models.generateContent({
41
+ model: modelName,
42
+ contents: chatOptions.contents,
43
+ });
44
+ if (!response || !response.text) {
45
+ logger.error(methodName, 'invalid response');
46
+ return;
47
+ }
48
+
49
+ return response.text;
50
+ } catch (error) {
51
+ logger.error(methodName, 'error', error);
52
+ }
53
+ };
54
+
55
+ /**
56
+ * chatWithStreaming
57
+ * @param {*} client
58
+ * @param {*} modelName
59
+ * @param {*} chatOptions
60
+ * @param {*} callbackOptions
61
+ * @returns
62
+ */
63
+ export const chatWithStreaming = async (client, modelName, chatOptions, callbackOptions) => {
64
+ const methodName = 'chatWithStreaming';
65
+
66
+ // check
67
+ if (!client) {
68
+ logger.error(methodName, 'need client');
69
+ return;
70
+ }
71
+ if (!modelName) {
72
+ logger.error(methodName, 'need modelName');
73
+ return;
74
+ }
75
+ if (!chatOptions) {
76
+ logger.error(methodName, 'need chatOptions');
77
+ return;
78
+ }
79
+ if (!chatOptions.contents) {
80
+ logger.error(methodName, 'need chatOptions.contents');
81
+ return;
82
+ }
83
+ if (!callbackOptions) {
84
+ logger.error(methodName, 'need callbackOptions');
85
+ return;
86
+ }
87
+
88
+ // callback
89
+ const beginCallback = callbackOptions.beginCallback;
90
+ const endCallback = callbackOptions.endCallback;
91
+ const errorCallback = callbackOptions.errorCallback;
92
+ const contentCallback = callbackOptions.contentCallback;
93
+ const firstContentCallback = callbackOptions.firstContentCallback;
94
+
95
+ try {
96
+ if (beginCallback) beginCallback();
97
+ const response = await client.models.generateContentStream({
98
+ model: modelName,
99
+ contents: chatOptions.contents,
100
+ });
101
+
102
+ // go
103
+ let firstContent = true;
104
+ for await (const chunk of response) {
105
+ // content
106
+ const content = chunk.text;
107
+ if (content && contentCallback) {
108
+ if (firstContent && firstContentCallback) {
109
+ firstContent = false;
110
+ firstContentCallback();
111
+ }
112
+
113
+ contentCallback(content);
114
+ }
115
+ }
116
+
117
+ // end
118
+ if (endCallback) endCallback();
119
+ } catch (error) {
120
+ if (errorCallback) errorCallback(error);
121
+ }
122
+ };
123
+
124
+ /**
125
+ * cacheAdd
126
+ * @param {*} client
127
+ * @param {*} modelName
128
+ * @param {*} cacheOptions
129
+ * @returns
130
+ */
131
+ export const cacheAdd = async (client, modelName, cacheOptions) => {
132
+ const methodName = 'cacheAdd';
133
+
134
+ // check
135
+ if (!cacheOptions) {
136
+ logger.error(methodName, 'need cacheOptions');
137
+ return;
138
+ }
139
+ if (!cacheOptions.gsPath) {
140
+ logger.error(methodName, 'need cacheOptions.gsPath');
141
+ return;
142
+ }
143
+ if (!cacheOptions.systemPrompt) {
144
+ logger.error(methodName, 'need cacheOptions.systemPrompt');
145
+ return;
146
+ }
147
+ if (!cacheOptions.cacheName) {
148
+ logger.error(methodName, 'need cacheOptions.cacheName');
149
+ return;
150
+ }
151
+ if (!cacheOptions.cacheTTL) {
152
+ logger.error(methodName, 'need cacheOptions.cacheTTL');
153
+ return;
154
+ }
155
+
156
+ // const
157
+ const mimeType = mime.lookup(cacheOptions.gsPath);
158
+ logger.info(methodName, 'cacheOptions', cacheOptions);
159
+ logger.info(methodName, 'mimeType', mimeType);
160
+
161
+ try {
162
+ // cache add
163
+ const cache = await client.caches.create({
164
+ model: modelName,
165
+ config: {
166
+ contents: createUserContent(createPartFromUri(cacheOptions.gsPath, mimeType)),
167
+ systemInstruction: cacheOptions.systemPrompt,
168
+ displayName: cacheOptions.cacheName,
169
+ ttl: cacheOptions.cacheTTL,
170
+ },
171
+ });
172
+
173
+ return cache;
174
+ } catch (error) {
175
+ logger.error(methodName, 'error', error);
176
+ }
177
+ };
@@ -0,0 +1,60 @@
1
+ // gemini
2
+ import { GoogleGenAI } from '@google/genai';
3
+
4
+ // util
5
+ import { chat, chatWithStreaming, cacheAdd } from './gemini-util.js';
6
+
7
+ // Logger
8
+ import { Logger } from 'qiao.log.js';
9
+ const logger = Logger('viho-llm');
10
+
11
+ /**
12
+ * GeminiVertex
13
+ * @param {*} options
14
+ * @returns
15
+ */
16
+ export const GeminiVertex = (options) => {
17
+ const methodName = 'GeminiVertex';
18
+
19
+ // check
20
+ if (!options) {
21
+ logger.error(methodName, 'need options');
22
+ return;
23
+ }
24
+ if (!options.projectId) {
25
+ logger.error(methodName, 'need options.projectId');
26
+ return;
27
+ }
28
+ if (!options.location) {
29
+ logger.error(methodName, 'need options.location');
30
+ return;
31
+ }
32
+ if (!options.modelName) {
33
+ logger.error(methodName, 'need options.modelName');
34
+ return;
35
+ }
36
+
37
+ // gemini
38
+ const gemini = {};
39
+ gemini.client = new GoogleGenAI({
40
+ vertexai: true,
41
+ project: options.projectId,
42
+ location: options.location,
43
+ });
44
+
45
+ // chat
46
+ gemini.chat = async (chatOptions) => {
47
+ return await chat(gemini.client, options.modelName, chatOptions);
48
+ };
49
+ gemini.chatWithStreaming = async (chatOptions, callbackOptions) => {
50
+ return await chatWithStreaming(gemini.client, options.modelName, chatOptions, callbackOptions);
51
+ };
52
+
53
+ // cache
54
+ gemini.cacheAdd = async (cacheOptions) => {
55
+ return await cacheAdd(gemini.client, options.modelName, cacheOptions);
56
+ };
57
+
58
+ // r
59
+ return gemini;
60
+ };
package/src/gemini.js DELETED
@@ -1,160 +0,0 @@
1
- // gemini
2
- import { GoogleGenAI, createUserContent } from '@google/genai';
3
-
4
- // Logger
5
- import { Logger } from 'qiao.log.js';
6
- const logger = Logger('viho-llm');
7
-
8
- /**
9
- * Gemini
10
- * @param {*} options
11
- * @returns
12
- */
13
- export const Gemini = (options) => {
14
- const methodName = 'Gemini';
15
-
16
- // check
17
- if (!options) {
18
- logger.info(methodName, 'need options');
19
- return;
20
- }
21
- if (!options.apiKey) {
22
- logger.info(methodName, 'need options.apiKey');
23
- return;
24
- }
25
- if (!options.modelName) {
26
- logger.info(methodName, 'need options.modelName');
27
- return;
28
- }
29
-
30
- // gemini
31
- const gemini = {};
32
- gemini.client = new GoogleGenAI({
33
- vertexai: true,
34
- apiKey: options.apiKey,
35
- });
36
-
37
- // chat
38
- gemini.chat = async (chatOptions) => {
39
- return await chat(gemini.client, options.modelName, chatOptions);
40
- };
41
- gemini.chatWithStreaming = async (chatOptions, callbackOptions) => {
42
- return await chatWithStreaming(gemini.client, options.modelName, chatOptions, callbackOptions);
43
- };
44
-
45
- // cache
46
- gemini.cacheAdd = async (systemPrompt, content) => {
47
- return await cacheAdd(gemini.client, options.modelName, systemPrompt, content);
48
- };
49
-
50
- // r
51
- return gemini;
52
- };
53
-
54
- // chat
55
- async function chat(client, modelName, chatOptions) {
56
- const methodName = 'Gemini - chat';
57
-
58
- // check
59
- if (!chatOptions) {
60
- logger.info(methodName, 'need chatOptions');
61
- return;
62
- }
63
- if (!chatOptions.contents) {
64
- logger.info(methodName, 'need chatOptions.contents');
65
- return;
66
- }
67
-
68
- try {
69
- const response = await client.models.generateContent({
70
- model: modelName,
71
- contents: chatOptions.contents,
72
- });
73
- if (!response || !response.text) {
74
- logger.error(methodName, 'invalid response');
75
- return;
76
- }
77
-
78
- return response.text;
79
- } catch (error) {
80
- logger.error(methodName, 'error', error);
81
- }
82
- }
83
-
84
- async function chatWithStreaming(client, modelName, chatOptions, callbackOptions) {
85
- const methodName = 'Gemini - chatWithStreaming';
86
-
87
- // check
88
- if (!chatOptions) {
89
- logger.info(methodName, 'need chatOptions');
90
- return;
91
- }
92
- if (!chatOptions.contents) {
93
- logger.info(methodName, 'need chatOptions.contents');
94
- return;
95
- }
96
-
97
- // callback
98
- const beginCallback = callbackOptions.beginCallback;
99
- const endCallback = callbackOptions.endCallback;
100
- const errorCallback = callbackOptions.errorCallback;
101
- const contentCallback = callbackOptions.contentCallback;
102
- const firstContentCallback = callbackOptions.firstContentCallback;
103
-
104
- try {
105
- if (beginCallback) beginCallback();
106
- const response = await client.models.generateContentStream({
107
- model: modelName,
108
- contents: chatOptions.contents,
109
- });
110
-
111
- // go
112
- let firstContent = true;
113
- for await (const chunk of response) {
114
- // content
115
- const content = chunk.text;
116
- if (content && contentCallback) {
117
- if (firstContent && firstContentCallback) {
118
- firstContent = false;
119
- firstContentCallback();
120
- }
121
-
122
- contentCallback(content);
123
- }
124
- }
125
-
126
- // end
127
- if (endCallback) endCallback();
128
- } catch (error) {
129
- if (errorCallback) errorCallback(error);
130
- }
131
- }
132
-
133
- // cache add
134
- async function cacheAdd(client, modelName, systemPrompt, content) {
135
- const methodName = 'Gemini - cacheAdd';
136
-
137
- // check
138
- if (!systemPrompt) {
139
- logger.info(methodName, 'need systemPrompt');
140
- return;
141
- }
142
- if (!content) {
143
- logger.info(methodName, 'need content');
144
- return;
145
- }
146
-
147
- try {
148
- const cache = await client.caches.create({
149
- model: modelName,
150
- config: {
151
- systemInstruction: systemPrompt,
152
- contents: createUserContent(content),
153
- },
154
- });
155
-
156
- return cache;
157
- } catch (error) {
158
- logger.error(methodName, 'error', error);
159
- }
160
- }