@aj-archipelago/cortex 1.1.18 → 1.1.19

This diff represents the content of publicly available package versions that have been released to one of the supported registries. The information contained in this diff is provided for informational purposes only and reflects changes between package versions as they appear in their respective public registries.
@@ -9,6 +9,7 @@
9
9
  },
10
10
  "dependencies": {
11
11
  "@azure/storage-blob": "^12.13.0",
12
+ "@google-cloud/storage": "^7.10.0",
12
13
  "axios": "^1.3.6",
13
14
  "busboy": "^1.6.0",
14
15
  "cors": "^2.8.5",
@@ -27,6 +27,106 @@ const publishRequestProgress = async (data) => {
27
27
  }
28
28
  };
29
29
 
30
+ // Function to get all key value pairs in "FileStoreMap" hash map
31
+ const getAllFileStoreMap = async () => {
32
+ try {
33
+ const allKeyValuePairs = await client.hgetall("FileStoreMap");
34
+ // Parse each JSON value in the returned object
35
+ for (const key in allKeyValuePairs) {
36
+ try {
37
+ // Modify the value directly in the returned object
38
+ allKeyValuePairs[key] = JSON.parse(allKeyValuePairs[key]);
39
+ } catch (error) {
40
+ console.error(`Error parsing JSON for key ${key}: ${error}`);
41
+ // keep original value if parsing failed
42
+ }
43
+ }
44
+ return allKeyValuePairs;
45
+ } catch (error) {
46
+ console.error(`Error getting all key-value pairs from FileStoreMap: ${error}`);
47
+ return {}; // Return null or any default value indicating an error occurred
48
+ }
49
+ };
50
+
51
+ // Function to set key value in "FileStoreMap" hash map
52
+ const setFileStoreMap = async (key, value) => {
53
+ try {
54
+ value.timestamp = new Date().toISOString();
55
+ await client.hset("FileStoreMap", key, JSON.stringify(value));
56
+ } catch (error) {
57
+ console.error(`Error setting key in FileStoreMap: ${error}`);
58
+ }
59
+ };
60
+
61
+ const getFileStoreMap = async (key) => {
62
+ try {
63
+ const value = await client.hget("FileStoreMap", key);
64
+ if (value) {
65
+ try {
66
+ // parse the value back to an object before returning
67
+ return JSON.parse(value);
68
+ } catch (error) {
69
+ console.error(`Error parsing JSON: ${error}`);
70
+ return value; // return original value if parsing failed
71
+ }
72
+ }
73
+ return value;
74
+ } catch (error) {
75
+ console.error(`Error getting key from FileStoreMap: ${error}`);
76
+ return null; // Return null or any default value indicating an error occurred
77
+ }
78
+ };
79
+
80
+ // Function to remove key from "FileStoreMap" hash map
81
+ const removeFromFileStoreMap = async (key) => {
82
+ try {
83
+ // hdel returns the number of keys that were removed.
84
+ // If the key does not exist, 0 is returned.
85
+ const result = await client.hdel("FileStoreMap", key);
86
+ if (result === 0) {
87
+ console.log(`The key ${key} does not exist`);
88
+ } else {
89
+ console.log(`The key ${key} was removed successfully`);
90
+ }
91
+ } catch (error) {
92
+ console.error(`Error removing key from FileStoreMap: ${error}`);
93
+ }
94
+ };
95
+
96
+ const cleanupRedisFileStoreMap = async (nDays=1) => {
97
+ let cleaned = [];
98
+ try {
99
+ // Get all key-value pairs from "FileStoreMap"
100
+ const fileStoreMap = await getAllFileStoreMap();
101
+
102
+ if(!fileStoreMap){
103
+ console.log("FileStoreMap is empty");
104
+ return;
105
+ }
106
+
107
+ // Iterate over each key-value pair in the fileStoreMap
108
+ for (const [key, value] of Object.entries(fileStoreMap)) {
109
+ //check timestamp of each value compare to nDays and remove if older
110
+ const timestamp = new Date(value.timestamp);
111
+ const now = new Date();
112
+ const diffTime = Math.abs(now - timestamp);
113
+ const diffDays = Math.ceil(diffTime / (1000 * 60 * 60 * 24));
114
+ if (diffDays > nDays) {
115
+ // Remove the key from the "FileStoreMap" hash map
116
+ await removeFromFileStoreMap(key);
117
+ console.log(`Removed key ${key} from FileStoreMap`);
118
+ cleaned.push(Object.assign({hash:key}, value));
119
+ }
120
+
121
+ }
122
+ } catch (error) {
123
+ console.error(`Error cleaning FileStoreMap: ${error}`);
124
+ }finally{
125
+ return cleaned;
126
+ }
127
+ };
128
+
129
+
30
130
  export {
31
- publishRequestProgress, connectClient
32
- }
131
+ publishRequestProgress, connectClient, setFileStoreMap, getFileStoreMap, removeFromFileStoreMap, cleanupRedisFileStoreMap
132
+ };
package/package.json CHANGED
@@ -1,6 +1,6 @@
1
1
  {
2
2
  "name": "@aj-archipelago/cortex",
3
- "version": "1.1.18",
3
+ "version": "1.1.19",
4
4
  "description": "Cortex is a GraphQL API for AI. It provides a simple, extensible interface for using AI services from OpenAI, Azure and others.",
5
5
  "private": false,
6
6
  "repository": {
@@ -21,7 +21,7 @@ export default {
21
21
  const originalTargetLength = args.targetLength;
22
22
 
23
23
  // If targetLength is not provided, execute the prompt once and return the result.
24
- if (originalTargetLength === 0 || originalTargetLength === null) {
24
+ if (!originalTargetLength) {
25
25
  let pathwayResolver = new PathwayResolver({ config, pathway, args });
26
26
  return await pathwayResolver.resolve(args);
27
27
  }
@@ -19,6 +19,8 @@ import OpenAIDallE3Plugin from './plugins/openAiDallE3Plugin.js';
19
19
  import OpenAIVisionPlugin from './plugins/openAiVisionPlugin.js';
20
20
  import GeminiChatPlugin from './plugins/geminiChatPlugin.js';
21
21
  import GeminiVisionPlugin from './plugins/geminiVisionPlugin.js';
22
+ import Gemini15ChatPlugin from './plugins/gemini15ChatPlugin.js';
23
+ import Gemini15VisionPlugin from './plugins/gemini15VisionPlugin.js';
22
24
  import AzureBingPlugin from './plugins/azureBingPlugin.js';
23
25
  import Claude3VertexPlugin from './plugins/claude3VertexPlugin.js';
24
26
 
@@ -82,6 +84,12 @@ class ModelExecutor {
82
84
  case 'GEMINI-VISION':
83
85
  plugin = new GeminiVisionPlugin(pathway, model);
84
86
  break;
87
+ case 'GEMINI-1.5-CHAT':
88
+ plugin = new Gemini15ChatPlugin(pathway, model);
89
+ break;
90
+ case 'GEMINI-1.5-VISION':
91
+ plugin = new Gemini15VisionPlugin(pathway, model);
92
+ break;
85
93
  case 'AZURE-BING':
86
94
  plugin = new AzureBingPlugin(pathway, model);
87
95
  break;
@@ -226,6 +226,7 @@ class PathwayResolver {
226
226
  }
227
227
 
228
228
  logger.warn(`Bad pathway result - retrying pathway. Attempt ${retries + 1} of ${MAX_RETRIES}`);
229
+ this.savedContext = JSON.parse(savedContextStr);
229
230
  }
230
231
 
231
232
  // Update saved context if it has changed, generating a new contextId if necessary
@@ -34,7 +34,7 @@ class AzureCognitivePlugin extends ModelPlugin {
34
34
  async getRequestParameters(text, parameters, prompt, mode, indexName, savedContextId, cortexRequest) {
35
35
  const combinedParameters = { ...this.promptParameters, ...parameters };
36
36
  const { modelPromptText } = this.getCompiledPrompt(text, combinedParameters, prompt);
37
- const { inputVector, calculateInputVector, privateData, filter, docId } = combinedParameters;
37
+ const { inputVector, calculateInputVector, privateData, filter, docId, title, chunkNo } = combinedParameters;
38
38
  const data = {};
39
39
 
40
40
  if (mode == 'delete') {
@@ -82,6 +82,15 @@ class AzureCognitivePlugin extends ModelPlugin {
82
82
  doc.contentVector = inputVector ? inputVector : await this.getInputVector(text);
83
83
  }
84
84
 
85
+
86
+ if(title){
87
+ doc.title = title;
88
+ }
89
+
90
+ if(chunkNo!=null){
91
+ doc.chunkNo = chunkNo;
92
+ }
93
+
85
94
  if(!privateData){ //if public, remove owner
86
95
  delete doc.owner;
87
96
  }
@@ -183,7 +192,22 @@ class AzureCognitivePlugin extends ModelPlugin {
183
192
  const chunkTokenLength = this.promptParameters.inputChunkSize || 1000;
184
193
  const chunks = getSemanticChunks(data, chunkTokenLength);
185
194
 
186
- for (const text of chunks) {
195
+
196
+ //extract filename as the title from file
197
+ try {
198
+ // Extract filename from file
199
+ let filename = file.split("/").pop();
200
+ // Remove everything before and including first underscore
201
+ let title = filename.replace(/^.*?_/, "");
202
+
203
+ parameters.title = title;
204
+ } catch (error) {
205
+ logger.error(`Error extracting title from file ${file}: ${error}`);
206
+ }
207
+
208
+ for (let i = 0; i < chunks.length; i++) {
209
+ const text = chunks[i];
210
+ parameters.chunkNo = i;
187
211
  const { data: singleData } = await this.getRequestParameters(text, parameters, prompt, mode, indexName, savedContextId, cortexRequest)
188
212
  fileData.value.push(singleData.value[0]);
189
213
  }
@@ -0,0 +1,215 @@
1
+ // gemini15ChatPlugin.js
2
+ import ModelPlugin from './modelPlugin.js';
3
+ import logger from '../../lib/logger.js';
4
+
5
+ const mergeResults = (data) => {
6
+ let output = '';
7
+ let safetyRatings = [];
8
+ const RESPONSE_BLOCKED = 'The response was blocked because the input or response potentially violates policies. Try rephrasing the prompt or adjusting the parameter settings.';
9
+
10
+ for (let chunk of data) {
11
+ const { promptfeedback } = chunk;
12
+ if (promptfeedback) {
13
+ const { blockReason } = promptfeedback;
14
+ if (blockReason) {
15
+ logger.warn(`Response blocked due to prompt feedback: ${blockReason}`);
16
+ return {mergedResult: RESPONSE_BLOCKED, safetyRatings: safetyRatings};
17
+ }
18
+ }
19
+
20
+ const { candidates } = chunk;
21
+ if (!candidates || !candidates.length) {
22
+ continue;
23
+ }
24
+
25
+ // If it was blocked, return the blocked message
26
+ if (candidates[0].safetyRatings?.some(rating => rating.blocked)) {
27
+ safetyRatings = candidates[0].safetyRatings;
28
+ logger.warn(`Response blocked due to safety ratings: ${JSON.stringify(safetyRatings, null, 2)}`);
29
+ return {mergedResult: RESPONSE_BLOCKED, safetyRatings: safetyRatings};
30
+ }
31
+
32
+ // Append the content of the first part of the first candidate to the output
33
+ const message = candidates[0].content.parts[0].text;
34
+ output += message;
35
+ }
36
+
37
+ return {mergedResult: output || null, safetyRatings: safetyRatings};
38
+ };
39
+
40
+ class Gemini15ChatPlugin extends ModelPlugin {
41
+ constructor(pathway, model) {
42
+ super(pathway, model);
43
+ }
44
+
45
+ // This code converts either OpenAI or PaLM messages to the Gemini messages format
46
+ convertMessagesToGemini(messages) {
47
+ let modifiedMessages = [];
48
+ let systemParts = [];
49
+ let lastAuthor = '';
50
+
51
+ // Check if the messages are already in the Gemini format
52
+ if (messages[0] && Object.prototype.hasOwnProperty.call(messages[0], 'parts')) {
53
+ modifiedMessages = messages;
54
+ } else {
55
+ messages.forEach(message => {
56
+ const { role, author, content } = message;
57
+
58
+ if (role === 'system') {
59
+ systemParts.push({ text: content });
60
+ return;
61
+ }
62
+
63
+ // Aggregate consecutive author messages, appending the content
64
+ if ((role === lastAuthor || author === lastAuthor) && modifiedMessages.length > 0) {
65
+ modifiedMessages[modifiedMessages.length - 1].parts.push({ text: content });
66
+ }
67
+
68
+ // Push messages that are role: 'user' or 'assistant', changing 'assistant' to 'model'
69
+ else if (role === 'user' || role === 'assistant' || author) {
70
+ modifiedMessages.push({
71
+ role: author || role,
72
+ parts: [{ text: content }],
73
+ });
74
+ lastAuthor = author || role;
75
+ }
76
+ });
77
+ }
78
+
79
+ // Gemini requires an even number of messages
80
+ if (modifiedMessages.length % 2 === 0) {
81
+ modifiedMessages = modifiedMessages.slice(1);
82
+ }
83
+
84
+ const system = { role: 'user', parts: systemParts };
85
+
86
+ return {
87
+ modifiedMessages,
88
+ system,
89
+ };
90
+ }
91
+
92
+ // Set up parameters specific to the Gemini API
93
+ getRequestParameters(text, parameters, prompt, cortexRequest) {
94
+ const { modelPromptText, modelPromptMessages, tokenLength } = this.getCompiledPrompt(text, parameters, prompt);
95
+ const { geminiSafetySettings, geminiTools, max_tokens } = cortexRequest ? cortexRequest.pathway : {};
96
+
97
+ // Define the model's max token length
98
+ const modelTargetTokenLength = this.getModelMaxTokenLength() * this.getPromptTokenRatio();
99
+
100
+ const geminiMessages = this.convertMessagesToGemini(modelPromptMessages || [{ "role": "user", "parts": [{ "text": modelPromptText }]}]);
101
+
102
+ let requestMessages = geminiMessages.modifiedMessages;
103
+ let system = geminiMessages.system;
104
+
105
+ // Check if the token length exceeds the model's max token length
106
+ if (tokenLength > modelTargetTokenLength) {
107
+ // Remove older messages until the token length is within the model's limit
108
+ requestMessages = this.truncateMessagesToTargetLength(requestMessages, modelTargetTokenLength);
109
+ }
110
+
111
+ if (max_tokens < 0) {
112
+ throw new Error(`Prompt is too long to successfully call the model at ${tokenLength} tokens. The model will not be called.`);
113
+ }
114
+
115
+ const requestParameters = {
116
+ contents: requestMessages,
117
+ generationConfig: {
118
+ temperature: this.temperature || 0.7,
119
+ maxOutputTokens: max_tokens || this.getModelMaxReturnTokens(),
120
+ topP: parameters.topP || 0.95,
121
+ topK: parameters.topK || 40,
122
+ },
123
+ safety_settings: geminiSafetySettings || undefined,
124
+ systemInstruction: system,
125
+ tools: geminiTools || undefined
126
+ };
127
+
128
+ return requestParameters;
129
+ }
130
+
131
+ // Parse the response from the new Chat API
132
+ parseResponse(data) {
133
+ // If data is not an array, return it directly
134
+ let dataToMerge = [];
135
+ if (data && data.contents && Array.isArray(data.contents)) {
136
+ dataToMerge = data.contents;
137
+ } else if (data && data.candidates && Array.isArray(data.candidates)) {
138
+ return data.candidates[0].content.parts[0].text;
139
+ } else if (Array.isArray(data)) {
140
+ dataToMerge = data;
141
+ } else {
142
+ return data;
143
+ }
144
+
145
+ return mergeResults(dataToMerge).mergedResult || null;
146
+
147
+ }
148
+
149
+ // Execute the request to the new Chat API
150
+ async execute(text, parameters, prompt, cortexRequest) {
151
+ const requestParameters = this.getRequestParameters(text, parameters, prompt, cortexRequest);
152
+ const { stream } = parameters;
153
+
154
+ cortexRequest.data = { ...(cortexRequest.data || {}), ...requestParameters };
155
+ cortexRequest.params = {}; // query params
156
+ cortexRequest.stream = stream;
157
+ cortexRequest.stream = stream;
158
+ cortexRequest.urlSuffix = cortexRequest.stream ? ':streamGenerateContent?alt=sse' : ':generateContent';
159
+
160
+ const gcpAuthTokenHelper = this.config.get('gcpAuthTokenHelper');
161
+ const authToken = await gcpAuthTokenHelper.getAccessToken();
162
+ cortexRequest.headers.Authorization = `Bearer ${authToken}`;
163
+
164
+ return this.executeRequest(cortexRequest);
165
+ }
166
+
167
+ // Override the logging function to display the messages and responses
168
+ logRequestData(data, responseData, prompt) {
169
+ const messages = data && data.contents;
170
+
171
+ if (messages && messages.length > 1) {
172
+ logger.info(`[chat request contains ${messages.length} messages]`);
173
+ messages.forEach((message, index) => {
174
+ const messageContent = message.parts.reduce((acc, part) => {
175
+ if (part.text) {
176
+ return acc + part.text;
177
+ }
178
+ return acc;
179
+ } , '');
180
+ const words = messageContent.split(" ");
181
+ const { length, units } = this.getLength(messageContent);
182
+ const preview = words.length < 41 ? messageContent : words.slice(0, 20).join(" ") + " ... " + words.slice(-20).join(" ");
183
+
184
+ logger.debug(`message ${index + 1}: role: ${message.role}, ${units}: ${length}, content: "${preview}"`);
185
+ });
186
+ } else if (messages && messages.length === 1) {
187
+ logger.debug(`${messages[0].parts[0].text}`);
188
+ }
189
+
190
+ // check if responseData is an array or string
191
+ if (typeof responseData === 'string') {
192
+ const { length, units } = this.getLength(responseData);
193
+ logger.info(`[response received containing ${length} ${units}]`);
194
+ logger.debug(`${responseData}`);
195
+ } else if (Array.isArray(responseData)) {
196
+ const { mergedResult, safetyRatings } = mergeResults(responseData);
197
+ if (safetyRatings?.length) {
198
+ logger.warn(`!!! response was blocked because the input or response potentially violates policies`);
199
+ logger.debug(`Safety Ratings: ${JSON.stringify(safetyRatings, null, 2)}`);
200
+ }
201
+ const { length, units } = this.getLength(mergedResult);
202
+ logger.info(`[response received containing ${length} ${units}]`);
203
+ logger.debug(`${mergedResult}`);
204
+ } else {
205
+ logger.info(`[response received as an SSE stream]`);
206
+ }
207
+
208
+ if (prompt && prompt.debugInfo) {
209
+ prompt.debugInfo += `\n${JSON.stringify(data)}`;
210
+ }
211
+ }
212
+
213
+ }
214
+
215
+ export default Gemini15ChatPlugin;
@@ -0,0 +1,100 @@
1
+ import Gemini15ChatPlugin from './gemini15ChatPlugin.js';
2
+ import mime from 'mime-types';
3
+ import logger from '../../lib/logger.js';
4
+
5
+ class Gemini15VisionPlugin extends Gemini15ChatPlugin {
6
+
7
+ // Override the convertMessagesToGemini method to handle multimodal vision messages
8
+ // This function can operate on messages in Gemini native format or in OpenAI's format
9
+ // It will convert the messages to the Gemini format
10
+ convertMessagesToGemini(messages) {
11
+ let modifiedMessages = [];
12
+ let lastAuthor = '';
13
+ let systemParts = [];
14
+
15
+ // Check if the messages are already in the Gemini format
16
+ if (messages[0] && Object.prototype.hasOwnProperty.call(messages[0], 'parts')) {
17
+ modifiedMessages = messages;
18
+ } else {
19
+ messages.forEach(message => {
20
+ const { role, author, content } = message;
21
+
22
+ if (role === 'system') {
23
+ systemParts.push({ text: content });
24
+ return;
25
+ }
26
+
27
+ // Convert content to Gemini format, trying to maintain compatibility
28
+ const convertPartToGemini = (partString) => {
29
+ try {
30
+ const part = JSON.parse(partString);
31
+ if (typeof part === 'string') {
32
+ return { text: part };
33
+ } else if (part.type === 'text') {
34
+ return { text: part.text };
35
+ } else if (part.type === 'image_url') {
36
+ if (part.image_url.url.startsWith('gs://')) {
37
+ return {
38
+ fileData: {
39
+ mimeType: mime.lookup(part.image_url.url),
40
+ fileUri: part.image_url.url
41
+ }
42
+ };
43
+ } else {
44
+ return {
45
+ inlineData: {
46
+ mimeType: 'image/jpeg', // fixed for now as there's no MIME type in the request
47
+ data: part.image_url.url.split('base64,')[1]
48
+ }
49
+ };
50
+ }
51
+ }
52
+ } catch (e) {
53
+ logger.warn(`Unable to parse part - including as string: ${partString}`);
54
+ }
55
+ return { text: partString };
56
+ };
57
+
58
+ const addPartToMessages = (geminiPart) => {
59
+ // Gemini requires alternating user: and model: messages
60
+ if ((role === lastAuthor || author === lastAuthor) && modifiedMessages.length > 0) {
61
+ modifiedMessages[modifiedMessages.length - 1].parts.push(geminiPart);
62
+ }
63
+ // Gemini only supports user: and model: roles
64
+ else if (role === 'user' || role === 'assistant' || author) {
65
+ modifiedMessages.push({
66
+ role: author || role,
67
+ parts: [geminiPart],
68
+ });
69
+ lastAuthor = author || role;
70
+ }
71
+ };
72
+
73
+ // Content can either be in the "vision" format (array) or in the "chat" format (string)
74
+ if (Array.isArray(content)) {
75
+ content.forEach(part => {
76
+ addPartToMessages(convertPartToGemini(part));
77
+ });
78
+ }
79
+ else {
80
+ addPartToMessages(convertPartToGemini(content));
81
+ }
82
+ });
83
+ }
84
+
85
+ // Gemini requires an even number of messages
86
+ if (modifiedMessages.length % 2 === 0) {
87
+ modifiedMessages = modifiedMessages.slice(1);
88
+ }
89
+
90
+ const system = { role: 'user', parts: systemParts };
91
+
92
+ return {
93
+ modifiedMessages,
94
+ system,
95
+ };
96
+ }
97
+
98
+ }
99
+
100
+ export default Gemini15VisionPlugin;
@@ -131,11 +131,18 @@ class GeminiChatPlugin extends ModelPlugin {
131
131
  // Parse the response from the new Chat API
132
132
  parseResponse(data) {
133
133
  // If data is not an array, return it directly
134
- if (!Array.isArray(data)) {
134
+ let dataToMerge = [];
135
+ if (data && data.contents && Array.isArray(data.contents)) {
136
+ dataToMerge = data.contents;
137
+ } else if (data && data.candidates && Array.isArray(data.candidates)) {
138
+ return data.candidates[0].content.parts[0].text;
139
+ } else if (Array.isArray(data)) {
140
+ dataToMerge = data;
141
+ } else {
135
142
  return data;
136
143
  }
137
144
 
138
- return mergeResults(data).mergedResult || null;
145
+ return mergeResults(dataToMerge).mergedResult || null;
139
146
 
140
147
  }
141
148
 
@@ -147,7 +154,8 @@ class GeminiChatPlugin extends ModelPlugin {
147
154
  cortexRequest.data = { ...(cortexRequest.data || {}), ...requestParameters };
148
155
  cortexRequest.params = {}; // query params
149
156
  cortexRequest.stream = stream;
150
- cortexRequest.url = cortexRequest.stream ? `${cortexRequest.url}?alt=sse` : cortexRequest.url;
157
+ cortexRequest.stream = stream;
158
+ cortexRequest.urlSuffix = cortexRequest.stream ? ':streamGenerateContent?alt=sse' : ':generateContent';
151
159
 
152
160
  const gcpAuthTokenHelper = this.config.get('gcpAuthTokenHelper');
153
161
  const authToken = await gcpAuthTokenHelper.getAccessToken();
@@ -179,10 +187,12 @@ class GeminiChatPlugin extends ModelPlugin {
179
187
  logger.debug(`${messages[0].parts[0].text}`);
180
188
  }
181
189
 
182
- // check if responseData is an array
183
- if (!Array.isArray(responseData)) {
184
- logger.info(`[response received as an SSE stream]`);
185
- } else {
190
+ // check if responseData is an array or string
191
+ if (typeof responseData === 'string') {
192
+ const { length, units } = this.getLength(responseData);
193
+ logger.info(`[response received containing ${length} ${units}]`);
194
+ logger.debug(`${responseData}`);
195
+ } else if (Array.isArray(responseData)) {
186
196
  const { mergedResult, safetyRatings } = mergeResults(responseData);
187
197
  if (safetyRatings?.length) {
188
198
  logger.warn(`!!! response was blocked because the input or response potentially violates policies`);
@@ -191,6 +201,8 @@ class GeminiChatPlugin extends ModelPlugin {
191
201
  const { length, units } = this.getLength(mergedResult);
192
202
  logger.info(`[response received containing ${length} ${units}]`);
193
203
  logger.debug(`${mergedResult}`);
204
+ } else {
205
+ logger.info(`[response received as an SSE stream]`);
194
206
  }
195
207
 
196
208
  if (prompt && prompt.debugInfo) {
package/server/typeDef.js CHANGED
@@ -1,30 +1,30 @@
1
1
  const getGraphQlType = (value) => {
2
2
  switch (typeof value) {
3
3
  case 'boolean':
4
- return {type: 'Boolean', defaultValue: 'false'};
4
+ return {type: 'Boolean'};
5
5
  case 'string':
6
- return {type: 'String', defaultValue: `""`};
6
+ return {type: 'String'};
7
7
  case 'number':
8
- return {type: 'Int', defaultValue: 'null'};
8
+ return {type: 'Int'};
9
9
  case 'object':
10
10
  if (Array.isArray(value)) {
11
11
  if (value.length > 0 && typeof(value[0]) === 'string') {
12
- return {type: '[String]', defaultValue: '[]'};
12
+ return {type: '[String]'};
13
13
  }
14
14
  else {
15
15
  // New case for MultiMessage type
16
16
  if (Array.isArray(value[0]?.content)) {
17
- return {type: '[MultiMessage]', defaultValue: '[]'};
17
+ return {type: '[MultiMessage]'};
18
18
  }
19
19
  else {
20
- return {type: '[Message]', defaultValue: '[]'};
20
+ return {type: '[Message]'};
21
21
  }
22
22
  }
23
23
  } else {
24
- return {type: `[${value.objName}]`, defaultValue: 'null'};
24
+ return {type: `[${value.objName}]`};
25
25
  }
26
26
  default:
27
- return {type: 'String', defaultValue: `""`};
27
+ return {type: 'String'};
28
28
  }
29
29
  };
30
30