@aj-archipelago/cortex 1.1.19 → 1.1.20

This diff represents the content of publicly available package versions that have been released to one of the supported registries. The information contained in this diff is provided for informational purposes only and reflects changes between package versions as they appear in their respective public registries.
@@ -1,5 +1,5 @@
1
1
  {
2
- "defaultModelName": "oai-td3",
2
+ "defaultModelName": "oai-gpturbo",
3
3
  "models": {
4
4
  "azure-translate": {
5
5
  "type": "AZURE-TRANSLATE",
@@ -12,19 +12,6 @@
12
12
  "requestsPerSecond": 10,
13
13
  "maxTokenLength": 2000
14
14
  },
15
- "oai-td3": {
16
- "type": "OPENAI-COMPLETION",
17
- "url": "https://api.openai.com/v1/completions",
18
- "headers": {
19
- "Authorization": "Bearer {{OPENAI_API_KEY}}",
20
- "Content-Type": "application/json"
21
- },
22
- "params": {
23
- "model": "text-davinci-003"
24
- },
25
- "requestsPerSecond": 10,
26
- "maxTokenLength": 4096
27
- },
28
15
  "oai-gpturbo": {
29
16
  "type": "OPENAI-CHAT",
30
17
  "url": "https://api.openai.com/v1/chat/completions",
@@ -11,6 +11,7 @@ import { join } from "path";
11
11
  import { Storage } from "@google-cloud/storage";
12
12
  import axios from "axios";
13
13
  import { publicFolder, port, ipAddress } from "./start.js";
14
+ import mime from "mime-types";
14
15
 
15
16
  const IMAGE_EXTENSIONS = [
16
17
  ".jpg",
@@ -21,6 +22,7 @@ const IMAGE_EXTENSIONS = [
21
22
  ".webp",
22
23
  ".tiff",
23
24
  ".svg",
25
+ ".pdf"
24
26
  ];
25
27
 
26
28
  const VIDEO_EXTENSIONS = [
@@ -74,7 +76,7 @@ if (!GCP_PROJECT_ID || !GCP_SERVICE_ACCOUNT) {
74
76
 
75
77
  const GCS_BUCKETNAME = process.env.GCS_BUCKETNAME || "cortextempfiles";
76
78
 
77
- const getBlobClient = () => {
79
+ const getBlobClient = async () => {
78
80
  const connectionString = process.env.AZURE_STORAGE_CONNECTION_STRING;
79
81
  const containerName = process.env.AZURE_STORAGE_CONTAINER_NAME;
80
82
  if (!connectionString || !containerName) {
@@ -83,15 +85,21 @@ const getBlobClient = () => {
83
85
  );
84
86
  }
85
87
 
86
- const blobServiceClient =
87
- BlobServiceClient.fromConnectionString(connectionString);
88
+ const blobServiceClient = BlobServiceClient.fromConnectionString(connectionString);
89
+
90
+ const serviceProperties = await blobServiceClient.getProperties();
91
+ if(!serviceProperties.defaultServiceVersion) {
92
+ serviceProperties.defaultServiceVersion = '2020-02-10';
93
+ await blobServiceClient.setProperties(serviceProperties);
94
+ }
95
+
88
96
  const containerClient = blobServiceClient.getContainerClient(containerName);
89
97
 
90
98
  return { blobServiceClient, containerClient };
91
99
  };
92
100
 
93
101
  async function saveFileToBlob(chunkPath, requestId) {
94
- const { containerClient } = getBlobClient();
102
+ const { containerClient } = await getBlobClient();
95
103
  // Use the filename with a UUID as the blob name
96
104
  const blobName = `${requestId}/${uuidv4()}_${path.basename(chunkPath)}`;
97
105
 
@@ -110,7 +118,7 @@ async function saveFileToBlob(chunkPath, requestId) {
110
118
  //deletes blob that has the requestId
111
119
  async function deleteBlob(requestId) {
112
120
  if (!requestId) throw new Error("Missing requestId parameter");
113
- const { containerClient } = getBlobClient();
121
+ const { containerClient } = await getBlobClient();
114
122
  // List the blobs in the container with the specified prefix
115
123
  const blobs = containerClient.listBlobsFlat({ prefix: `${requestId}/` });
116
124
 
@@ -181,14 +189,20 @@ async function uploadBlob(
181
189
  resolve(body); // Resolve the promise
182
190
  } else {
183
191
  const filename = `${requestId}/${uuidv4()}_${info.filename}`;
184
- const { containerClient } = getBlobClient();
192
+ const { containerClient } = await getBlobClient();
193
+
194
+ const contentType = mime.lookup(filename); // content type based on file extension
195
+ const options = {};
196
+ if (contentType) {
197
+ options.blobHTTPHeaders = { blobContentType: contentType };
198
+ }
185
199
 
186
200
  const blockBlobClient = containerClient.getBlockBlobClient(filename);
187
201
 
188
202
  const passThroughStream = new PassThrough();
189
203
  file.pipe(passThroughStream);
190
204
 
191
- await blockBlobClient.uploadStream(passThroughStream);
205
+ await blockBlobClient.uploadStream(passThroughStream, undefined, undefined, options);
192
206
 
193
207
  const message = `File '${filename}' uploaded successfully.`;
194
208
  const url = blockBlobClient.url;
@@ -250,7 +264,7 @@ async function uploadBlob(
250
264
 
251
265
  // Function to delete files that haven't been used in more than a month
252
266
  async function cleanup(urls=null) {
253
- const { containerClient } = getBlobClient();
267
+ const { containerClient } = await getBlobClient();
254
268
 
255
269
  if(!urls) {
256
270
  const xMonthAgo = new Date();
@@ -5,6 +5,7 @@
5
5
  "type": "module",
6
6
  "scripts": {
7
7
  "start": "node start.js",
8
+ "dev": "node -r dotenv/config start.js",
8
9
  "test": "echo \"No tests yet...\""
9
10
  },
10
11
  "dependencies": {
@@ -3,7 +3,11 @@ import { encode, decode } from '../lib/encodeCache.js';
3
3
  import { config } from '../config.js';
4
4
 
5
5
  // callPathway - call a pathway from another pathway
6
- const callPathway = async (pathwayName, args) => {
6
+ const callPathway = async (pathwayName, inArgs) => {
7
+
8
+ // Clone the args object to avoid modifying the original
9
+ const args = JSON.parse(JSON.stringify(inArgs));
10
+
7
11
  const pathway = config.get(`pathways.${pathwayName}`);
8
12
  if (!pathway) {
9
13
  throw new Error(`Pathway ${pathwayName} not found`);
package/package.json CHANGED
@@ -1,6 +1,6 @@
1
1
  {
2
2
  "name": "@aj-archipelago/cortex",
3
- "version": "1.1.19",
3
+ "version": "1.1.20",
4
4
  "description": "Cortex is a GraphQL API for AI. It provides a simple, extensible interface for using AI services from OpenAI, Azure and others.",
5
5
  "private": false,
6
6
  "repository": {
@@ -1,172 +1,305 @@
1
- import OpenAIVisionPlugin from './openAiVisionPlugin.js';
2
- import logger from '../../lib/logger.js';
1
+ import OpenAIVisionPlugin from "./openAiVisionPlugin.js";
2
+ import logger from "../../lib/logger.js";
3
+ import mime from 'mime-types';
3
4
 
4
- class Claude3VertexPlugin extends OpenAIVisionPlugin {
5
+ async function convertContentItem(item) {
5
6
 
6
- parseResponse(data)
7
- {
8
- if (!data) {
9
- return data;
10
- }
7
+ let imageUrl = "";
8
+ let isDataURL = false;
9
+ let urlData = "";
10
+ let mimeTypeMatch = "";
11
+ let mimeType = "";
12
+ let base64Image = "";
13
+ const allowedMIMETypes = ['image/jpeg', 'image/png', 'image/gif', 'image/webp'];
14
+
15
+ try {
16
+ switch (typeof item) {
17
+ case "string":
18
+ return item ? { type: "text", text: item } : null;
19
+
20
+ case "object":
21
+ switch (item.type) {
22
+ case "text":
23
+ return item.text ? { type: "text", text: item.text } : null;
24
+
25
+ case "image_url":
26
+ imageUrl = item.image_url.url || item.image_url;
27
+ if (!imageUrl) {
28
+ logger.warn("Could not parse image URL from content - skipping image content.");
29
+ return null;
30
+ }
11
31
 
12
- const { content } = data;
32
+ if (!allowedMIMETypes.includes(mime.lookup(imageUrl) || "")) {
33
+ logger.warn("Unsupported image type - skipping image content.");
34
+ return null;
35
+ }
13
36
 
14
- // if the response is an array, return the text property of the first item
15
- // if the type property is 'text'
16
- if (content && Array.isArray(content) && content[0].type === 'text') {
17
- return content[0].text;
18
- } else {
19
- return data;
37
+ isDataURL = imageUrl.startsWith("data:");
38
+ urlData = isDataURL ? item.image_url.url : await fetchImageAsDataURL(imageUrl);
39
+ mimeTypeMatch = urlData.match(/data:([a-zA-Z0-9]+\/[a-zA-Z0-9-.+]+).*,.*/);
40
+ mimeType = mimeTypeMatch && mimeTypeMatch[1] ? mimeTypeMatch[1] : "image/jpeg";
41
+ base64Image = urlData.split(",")[1];
42
+
43
+ return {
44
+ type: "image",
45
+ source: {
46
+ type: "base64",
47
+ media_type: mimeType,
48
+ data: base64Image,
49
+ },
50
+ };
51
+
52
+ default:
53
+ return null;
20
54
  }
55
+
56
+ default:
57
+ return null;
21
58
  }
59
+ }
60
+ catch (e) {
61
+ logger.warn(`Error converting content item: ${e}`);
62
+ return null;
63
+ }
64
+ }
22
65
 
23
- // This code converts messages to the format required by the Claude Vertex API
24
- convertMessagesToClaudeVertex(messages) {
25
- let modifiedMessages = [];
26
- let system = '';
27
- let lastAuthor = '';
28
-
29
- // Claude needs system messages in a separate field
30
- const systemMessages = messages.filter(message => message.role === 'system');
31
- if (systemMessages.length > 0) {
32
- system = systemMessages.map(message => message.content).join('\n');
33
- modifiedMessages = messages.filter(message => message.role !== 'system');
34
- } else {
35
- modifiedMessages = messages;
36
- }
66
+ // Fetch image and convert to base 64 data URL
67
+ async function fetchImageAsDataURL(imageUrl) {
68
+ try {
69
+ const response = await fetch(imageUrl);
37
70
 
38
- // remove any empty messages
39
- modifiedMessages = modifiedMessages.filter(message => message.content);
71
+ if (!response.ok) {
72
+ throw new Error(`HTTP error! status: ${response.status}`);
73
+ }
40
74
 
41
- // combine any consecutive messages from the same author
42
- var combinedMessages = [];
75
+ const buffer = await response.arrayBuffer();
76
+ const base64Image = Buffer.from(buffer).toString("base64");
77
+ const mimeType = mime.lookup(imageUrl) || "image/jpeg";
78
+ return `data:${mimeType};base64,${base64Image}`;
79
+ }
80
+ catch (e) {
81
+ logger.error(`Failed to fetch image: ${imageUrl}. ${e}`);
82
+ throw e;
83
+ }
84
+ }
43
85
 
44
- modifiedMessages.forEach((message) => {
45
- if (message.role === lastAuthor) {
46
- combinedMessages[combinedMessages.length - 1].content += '\n' + message.content;
47
- } else {
48
- combinedMessages.push(message);
49
- lastAuthor = message.role;
50
- }
51
- });
86
+ class Claude3VertexPlugin extends OpenAIVisionPlugin {
87
+
88
+ parseResponse(data) {
89
+ if (!data) {
90
+ return data;
91
+ }
52
92
 
53
- modifiedMessages = combinedMessages;
93
+ const { content } = data;
54
94
 
55
- // Claude vertex requires an even number of messages
56
- if (modifiedMessages.length % 2 === 0) {
57
- modifiedMessages = modifiedMessages.slice(1);
58
- }
95
+ // if the response is an array, return the text property of the first item
96
+ // if the type property is 'text'
97
+ if (content && Array.isArray(content) && content[0].type === "text") {
98
+ return content[0].text;
99
+ } else {
100
+ return data;
101
+ }
102
+ }
103
+
104
+ // This code converts messages to the format required by the Claude Vertex API
105
+ async convertMessagesToClaudeVertex(messages) {
106
+ let modifiedMessages = [];
107
+ let system = "";
108
+ let lastAuthor = "";
59
109
 
110
+ // Claude needs system messages in a separate field
111
+ const systemMessages = messages.filter(
112
+ (message) => message.role === "system"
113
+ );
114
+ if (systemMessages.length > 0) {
115
+ system = systemMessages.map((message) => message.content).join("\n");
116
+ modifiedMessages = messages.filter(
117
+ (message) => message.role !== "system"
118
+ );
119
+ } else {
120
+ modifiedMessages = messages;
121
+ }
122
+
123
+ // remove any empty messages
124
+ modifiedMessages = modifiedMessages.filter((message) => message.content);
125
+
126
+ // combine any consecutive messages from the same author
127
+ var combinedMessages = [];
128
+
129
+ modifiedMessages.forEach((message) => {
130
+ if (message.role === lastAuthor) {
131
+ combinedMessages[combinedMessages.length - 1].content +=
132
+ "\n" + message.content;
133
+ } else {
134
+ combinedMessages.push(message);
135
+ lastAuthor = message.role;
136
+ }
137
+ });
138
+
139
+ modifiedMessages = combinedMessages;
140
+
141
+ // Claude vertex requires an even number of messages
142
+ if (modifiedMessages.length % 2 === 0) {
143
+ modifiedMessages = modifiedMessages.slice(1);
144
+ }
145
+
146
+ const claude3Messages = await Promise.all(
147
+ modifiedMessages.map(async (message) => {
148
+ const contentArray = Array.isArray(message.content) ? message.content : [message.content];
149
+ const claude3Content = await Promise.all(contentArray.map(convertContentItem));
60
150
  return {
61
- system,
62
- modifiedMessages,
151
+ role: message.role,
152
+ content: claude3Content.filter(Boolean),
63
153
  };
64
- }
154
+ })
155
+ );
65
156
 
66
- getRequestParameters(text, parameters, prompt, cortexRequest) {
67
- const requestParameters = super.getRequestParameters(text, parameters, prompt, cortexRequest);
68
- const { system, modifiedMessages } = this.convertMessagesToClaudeVertex(requestParameters.messages);
69
- requestParameters.system = system;
70
- requestParameters.messages = modifiedMessages;
71
- requestParameters.max_tokens = this.getModelMaxReturnTokens();
72
- requestParameters.anthropic_version = 'vertex-2023-10-16';
73
- return requestParameters;
157
+ return {
158
+ system,
159
+ modifiedMessages: claude3Messages,
160
+ };
161
+ }
162
+
163
+ async getRequestParameters(text, parameters, prompt, cortexRequest) {
164
+ const requestParameters = await super.getRequestParameters(
165
+ text,
166
+ parameters,
167
+ prompt,
168
+ cortexRequest
169
+ );
170
+ const { system, modifiedMessages } =
171
+ await this.convertMessagesToClaudeVertex(requestParameters.messages);
172
+ requestParameters.system = system;
173
+ requestParameters.messages = modifiedMessages;
174
+ requestParameters.max_tokens = this.getModelMaxReturnTokens();
175
+ requestParameters.anthropic_version = "vertex-2023-10-16";
176
+ return requestParameters;
177
+ }
178
+
179
+ // Override the logging function to display the messages and responses
180
+ logRequestData(data, responseData, prompt) {
181
+ const { stream, messages, system } = data;
182
+ if (system) {
183
+ const { length, units } = this.getLength(system);
184
+ logger.info(`[system messages sent containing ${length} ${units}]`);
185
+ logger.debug(`${system}`);
74
186
  }
75
187
 
76
- // Override the logging function to display the messages and responses
77
- logRequestData(data, responseData, prompt) {
78
- const { stream, messages, system } = data;
79
- if (system) {
80
- const { length, units } = this.getLength(system);
81
- logger.info(`[system messages sent containing ${length} ${units}]`);
82
- logger.debug(`${system}`);
83
- }
84
-
85
- if (messages && messages.length > 1) {
86
- logger.info(`[chat request sent containing ${messages.length} messages]`);
87
- let totalLength = 0;
88
- let totalUnits;
89
- messages.forEach((message, index) => {
90
- //message.content string or array
91
- const content = Array.isArray(message.content) ? message.content.map(item => JSON.stringify(item)).join(', ') : message.content;
92
- const words = content.split(" ");
93
- const { length, units } = this.getLength(content);
94
- const preview = words.length < 41 ? content : words.slice(0, 20).join(" ") + " ... " + words.slice(-20).join(" ");
95
-
96
- logger.debug(`message ${index + 1}: role: ${message.role}, ${units}: ${length}, content: "${preview}"`);
97
- totalLength += length;
98
- totalUnits = units;
99
- });
100
- logger.info(`[chat request contained ${totalLength} ${totalUnits}]`);
101
- } else {
102
- const message = messages[0];
103
- const content = Array.isArray(message.content) ? message.content.map(item => JSON.stringify(item)).join(', ') : message.content;
104
- const { length, units } = this.getLength(content);
105
- logger.info(`[request sent containing ${length} ${units}]`);
106
- logger.debug(`${content}`);
107
- }
108
-
109
- if (stream) {
110
- logger.info(`[response received as an SSE stream]`);
111
- } else {
112
- const responseText = this.parseResponse(responseData);
113
- const { length, units } = this.getLength(responseText);
114
- logger.info(`[response received containing ${length} ${units}]`);
115
- logger.debug(`${responseText}`);
116
- }
188
+ if (messages && messages.length > 1) {
189
+ logger.info(`[chat request sent containing ${messages.length} messages]`);
190
+ let totalLength = 0;
191
+ let totalUnits;
192
+ messages.forEach((message, index) => {
193
+ //message.content string or array
194
+ const content = Array.isArray(message.content)
195
+ ? message.content.map((item) => {
196
+ if (item.source && item.source.type === 'base64') {
197
+ item.source.data = '* base64 data truncated for log *';
198
+ }
199
+ return JSON.stringify(item);
200
+ }).join(", ")
201
+ : message.content;
202
+ const words = content.split(" ");
203
+ const { length, units } = this.getLength(content);
204
+ const preview =
205
+ words.length < 41
206
+ ? content
207
+ : words.slice(0, 20).join(" ") +
208
+ " ... " +
209
+ words.slice(-20).join(" ");
117
210
 
118
- prompt && prompt.debugInfo && (prompt.debugInfo += `\n${JSON.stringify(data)}`);
211
+ logger.debug(
212
+ `message ${index + 1}: role: ${
213
+ message.role
214
+ }, ${units}: ${length}, content: "${preview}"`
215
+ );
216
+ totalLength += length;
217
+ totalUnits = units;
218
+ });
219
+ logger.info(`[chat request contained ${totalLength} ${totalUnits}]`);
220
+ } else {
221
+ const message = messages[0];
222
+ const content = Array.isArray(message.content)
223
+ ? message.content.map((item) => JSON.stringify(item)).join(", ")
224
+ : message.content;
225
+ const { length, units } = this.getLength(content);
226
+ logger.info(`[request sent containing ${length} ${units}]`);
227
+ logger.debug(`${content}`);
119
228
  }
120
229
 
121
- async execute(text, parameters, prompt, cortexRequest) {
122
- const requestParameters = this.getRequestParameters(text, parameters, prompt, cortexRequest);
123
- const { stream } = parameters;
230
+ if (stream) {
231
+ logger.info(`[response received as an SSE stream]`);
232
+ } else {
233
+ const responseText = this.parseResponse(responseData);
234
+ const { length, units } = this.getLength(responseText);
235
+ logger.info(`[response received containing ${length} ${units}]`);
236
+ logger.debug(`${responseText}`);
237
+ }
124
238
 
125
- cortexRequest.data = { ...(cortexRequest.data || {}), ...requestParameters };
126
- cortexRequest.params = {}; // query params
127
- cortexRequest.stream = stream;
128
- cortexRequest.urlSuffix = cortexRequest.stream ? ':streamRawPredict' : ':rawPredict';
239
+ prompt &&
240
+ prompt.debugInfo &&
241
+ (prompt.debugInfo += `\n${JSON.stringify(data)}`);
242
+ }
129
243
 
130
- const gcpAuthTokenHelper = this.config.get('gcpAuthTokenHelper');
131
- const authToken = await gcpAuthTokenHelper.getAccessToken();
132
- cortexRequest.headers.Authorization = `Bearer ${authToken}`;
244
+ async execute(text, parameters, prompt, cortexRequest) {
245
+ const requestParameters = await this.getRequestParameters(
246
+ text,
247
+ parameters,
248
+ prompt,
249
+ cortexRequest
250
+ );
251
+ const { stream } = parameters;
133
252
 
134
- return this.executeRequest(cortexRequest);
135
- }
253
+ cortexRequest.data = {
254
+ ...(cortexRequest.data || {}),
255
+ ...requestParameters,
256
+ };
257
+ cortexRequest.params = {}; // query params
258
+ cortexRequest.stream = stream;
259
+ cortexRequest.urlSuffix = cortexRequest.stream
260
+ ? ":streamRawPredict"
261
+ : ":rawPredict";
136
262
 
137
- processStreamEvent(event, requestProgress) {
138
- const eventData = JSON.parse(event.data);
139
- switch (eventData.type) {
140
- case 'message_start':
141
- requestProgress.data = JSON.stringify(eventData.message);
142
- break;
143
- case 'content_block_start':
144
- break;
145
- case 'ping':
146
- break;
147
- case 'content_block_delta':
148
- if (eventData.delta.type === 'text_delta') {
149
- requestProgress.data = JSON.stringify(eventData.delta.text);
150
- }
151
- break;
152
- case 'content_block_stop':
153
- break;
154
- case 'message_delta':
155
- break;
156
- case 'message_stop':
157
- requestProgress.data = '[DONE]';
158
- requestProgress.progress = 1;
159
- break;
160
- case 'error':
161
- requestProgress.data = `\n\n*** ${eventData.error.message || eventData.error} ***`;
162
- requestProgress.progress = 1;
163
- break;
164
- }
263
+ const gcpAuthTokenHelper = this.config.get("gcpAuthTokenHelper");
264
+ const authToken = await gcpAuthTokenHelper.getAccessToken();
265
+ cortexRequest.headers.Authorization = `Bearer ${authToken}`;
165
266
 
166
- return requestProgress;
267
+ return this.executeRequest(cortexRequest);
268
+ }
167
269
 
270
+ processStreamEvent(event, requestProgress) {
271
+ const eventData = JSON.parse(event.data);
272
+ switch (eventData.type) {
273
+ case "message_start":
274
+ requestProgress.data = JSON.stringify(eventData.message);
275
+ break;
276
+ case "content_block_start":
277
+ break;
278
+ case "ping":
279
+ break;
280
+ case "content_block_delta":
281
+ if (eventData.delta.type === "text_delta") {
282
+ requestProgress.data = JSON.stringify(eventData.delta.text);
283
+ }
284
+ break;
285
+ case "content_block_stop":
286
+ break;
287
+ case "message_delta":
288
+ break;
289
+ case "message_stop":
290
+ requestProgress.data = "[DONE]";
291
+ requestProgress.progress = 1;
292
+ break;
293
+ case "error":
294
+ requestProgress.data = `\n\n*** ${
295
+ eventData.error.message || eventData.error
296
+ } ***`;
297
+ requestProgress.progress = 1;
298
+ break;
168
299
  }
169
300
 
301
+ return requestProgress;
302
+ }
170
303
  }
171
304
 
172
305
  export default Claude3VertexPlugin;
@@ -25,9 +25,10 @@ class Gemini15VisionPlugin extends Gemini15ChatPlugin {
25
25
  }
26
26
 
27
27
  // Convert content to Gemini format, trying to maintain compatibility
28
- const convertPartToGemini = (partString) => {
28
+ const convertPartToGemini = (inputPart) => {
29
29
  try {
30
- const part = JSON.parse(partString);
30
+ const part = typeof inputPart === 'string' ? JSON.parse(inputPart) : inputPart;
31
+
31
32
  if (typeof part === 'string') {
32
33
  return { text: part };
33
34
  } else if (part.type === 'text') {
@@ -50,9 +51,9 @@ class Gemini15VisionPlugin extends Gemini15ChatPlugin {
50
51
  }
51
52
  }
52
53
  } catch (e) {
53
- logger.warn(`Unable to parse part - including as string: ${partString}`);
54
+ logger.warn(`Unable to parse part - including as string: ${inputPart}`);
54
55
  }
55
- return { text: partString };
56
+ return { text: inputPart };
56
57
  };
57
58
 
58
59
  const addPartToMessages = (geminiPart) => {
@@ -8,9 +8,15 @@ class OpenAIVisionPlugin extends OpenAIChatPlugin {
8
8
  messages.map(message => {
9
9
  try {
10
10
  // message.content can be array or string
11
- if (typeof message.content === 'string') message.content = JSON.parse(message.content);
12
- else if (Array.isArray(message.content)) message.content = message.content.map(item => JSON.parse(item));
13
-
11
+ if (typeof message.content === 'string') {
12
+ message.content = JSON.parse(message.content);
13
+ } else if (Array.isArray(message.content)) {
14
+ message.content = message.content.map(item => {
15
+ const parsedItem = JSON.parse(item);
16
+ const { type, text, image_url, url } = parsedItem;
17
+ return { type, text, image_url: url || image_url};
18
+ });
19
+ }
14
20
  } catch (e) {
15
21
  return message;
16
22
  }
@@ -1,5 +1,6 @@
1
1
  import { fulfillWithTimeout } from '../lib/promiser.js';
2
2
  import { PathwayResolver } from './pathwayResolver.js';
3
+ import logger from '../lib/logger.js';
3
4
 
4
5
  // This resolver uses standard parameters required by Apollo server:
5
6
  // (parent, args, contextValue, info)
@@ -16,7 +17,15 @@ const rootResolver = async (parent, args, contextValue, info) => {
16
17
  contextValue.pathwayResolver = pathwayResolver;
17
18
 
18
19
  // Execute the request with timeout
19
- const result = await fulfillWithTimeout(pathway.resolver(parent, args, contextValue, info), pathway.timeout);
20
+ let result = null;
21
+
22
+ try {
23
+ result = await fulfillWithTimeout(pathway.resolver(parent, args, contextValue, info), pathway.timeout);
24
+ } catch (error) {
25
+ logger.error(`Request failed with error: ${error}`);
26
+ result = error.message || error.toString();
27
+ }
28
+
20
29
  const { warnings, previousResult, savedContextId, tool } = pathwayResolver;
21
30
 
22
31
  // Add request parameters back as debug
@@ -131,7 +131,7 @@ test('vision multi single long text', async t => {
131
131
  },
132
132
  });
133
133
 
134
- t.is(response.body?.singleResult?.errors?.[0]?.message, 'Unable to process your request as your single message content is too long. Please try again with a shorter message.');
134
+ t.is(response.body?.singleResult?.errors?.[0]?.message || response.body?.singleResult?.data?.vision?.result, 'Unable to process your request as your single message content is too long. Please try again with a shorter message.');
135
135
  });
136
136
 
137
137