@aj-archipelago/cortex 1.1.32 → 1.1.34

This diff represents the content of publicly available package versions that have been released to one of the supported registries. The information contained in this diff is provided for informational purposes only and reflects changes between package versions as they appear in their respective public registries.
Files changed (43) hide show
  1. package/README.md +98 -1
  2. package/config/dynamicPathwaysConfig.example.json +4 -0
  3. package/config.js +83 -10
  4. package/helper-apps/cortex-autogen/function_app.py +8 -4
  5. package/helper-apps/cortex-autogen/main.py +1 -1
  6. package/helper-apps/cortex-autogen/myautogen.py +187 -28
  7. package/helper-apps/cortex-autogen/prompt_summary.txt +37 -0
  8. package/helper-apps/cortex-autogen/requirements.txt +4 -2
  9. package/helper-apps/cortex-autogen/tools/sasfileuploader.py +66 -0
  10. package/helper-apps/cortex-file-handler/package-lock.json +387 -203
  11. package/helper-apps/cortex-file-handler/package.json +3 -3
  12. package/helper-apps/cortex-whisper-wrapper/.dockerignore +1 -0
  13. package/helper-apps/cortex-whisper-wrapper/app.py +3 -1
  14. package/helper-apps/cortex-whisper-wrapper/requirements.txt +1 -1
  15. package/lib/pathwayManager.js +373 -0
  16. package/lib/pathwayTools.js +52 -7
  17. package/lib/requestExecutor.js +19 -15
  18. package/lib/util.js +4 -2
  19. package/package.json +5 -1
  20. package/pathways/code_human_input.js +47 -0
  21. package/pathways/dynamic/pathways.json +1 -0
  22. package/pathways/flux_image.js +12 -0
  23. package/pathways/index.js +4 -0
  24. package/pathways/styleguide/styleguide.js +1 -0
  25. package/pathways/timeline.js +1 -0
  26. package/server/chunker.js +6 -1
  27. package/server/graphql.js +67 -37
  28. package/server/modelExecutor.js +4 -0
  29. package/server/pathwayResolver.js +9 -5
  30. package/server/plugins/claude3VertexPlugin.js +86 -79
  31. package/server/plugins/gemini15VisionPlugin.js +23 -12
  32. package/server/plugins/geminiVisionPlugin.js +32 -25
  33. package/server/plugins/modelPlugin.js +15 -2
  34. package/server/plugins/openAiChatPlugin.js +1 -1
  35. package/server/plugins/openAiVisionPlugin.js +16 -4
  36. package/server/plugins/runwareAIPlugin.js +81 -0
  37. package/server/rest.js +90 -45
  38. package/server/typeDef.js +33 -15
  39. package/tests/chunkfunction.test.js +15 -1
  40. package/tests/claude3VertexPlugin.test.js +1 -1
  41. package/tests/multimodal_conversion.test.js +328 -0
  42. package/tests/vision.test.js +20 -5
  43. package/helper-apps/cortex-autogen/sasfileuploader.py +0 -93
@@ -3,6 +3,11 @@ import mime from 'mime-types';
3
3
 
4
4
  class GeminiVisionPlugin extends GeminiChatPlugin {
5
5
 
6
+ constructor(pathway, model) {
7
+ super(pathway, model);
8
+ this.isMultiModal = true;
9
+ }
10
+
6
11
  // Override the convertMessagesToGemini method to handle multimodal vision messages
7
12
  // This function can operate on messages in Gemini native format or in OpenAI's format
8
13
  // It will convert the messages to the Gemini format
@@ -15,40 +20,33 @@ class GeminiVisionPlugin extends GeminiChatPlugin {
15
20
  modifiedMessages = messages;
16
21
  } else {
17
22
  messages.forEach(message => {
18
- const { role, author, content } = message;
19
-
20
- // Right now Gemini API has no direct translation for system messages,
21
- // so we insert them as parts of the first user: role message
22
- if (role === 'system') {
23
- modifiedMessages.push({
24
- role: 'user',
25
- parts: [{ text: content }],
26
- });
27
- lastAuthor = 'user';
28
- return;
29
- }
30
-
23
+ let role = message.role;
24
+ const { author, content } = message;
25
+
31
26
  // Convert content to Gemini format, trying to maintain compatibility
32
- const convertPartToGemini = (partString) => {
27
+ const convertPartToGemini = (inputPart) => {
33
28
  try {
34
- const part = JSON.parse(partString);
29
+ const part = typeof inputPart === 'string' ? JSON.parse(inputPart) : inputPart;
30
+ const {type, text, image_url, gcs} = part;
31
+ let fileUrl = gcs || image_url?.url;
32
+
35
33
  if (typeof part === 'string') {
36
- return { text: part };
37
- } else if (part.type === 'text') {
38
- return { text: part.text };
39
- } else if (part.type === 'image_url') {
40
- if (part.image_url.url.startsWith('gs://')) {
34
+ return { text: text };
35
+ } else if (type === 'text') {
36
+ return { text: text };
37
+ } else if (type === 'image_url') {
38
+ if (fileUrl.startsWith('gs://')) {
41
39
  return {
42
40
  fileData: {
43
- mimeType: mime.lookup(part.image_url.url),
44
- fileUri: part.image_url.url
41
+ mimeType: mime.lookup(fileUrl) || 'image/jpeg',
42
+ fileUri: fileUrl
45
43
  }
46
44
  };
47
45
  } else {
48
46
  return {
49
47
  inlineData: {
50
48
  mimeType: 'image/jpeg', // fixed for now as there's no MIME type in the request
51
- data: part.image_url.url.split('base64,')[1]
49
+ data: fileUrl.split('base64,')[1]
52
50
  }
53
51
  };
54
52
  }
@@ -56,10 +54,11 @@ class GeminiVisionPlugin extends GeminiChatPlugin {
56
54
  } catch (e) {
57
55
  // this space intentionally left blank
58
56
  }
59
- return { text: partString };
57
+ return inputPart ? { text: inputPart } : null;
60
58
  };
61
-
59
+
62
60
  const addPartToMessages = (geminiPart) => {
61
+ if (!geminiPart) { return; }
63
62
  // Gemini requires alternating user: and model: messages
64
63
  if ((role === lastAuthor || author === lastAuthor) && modifiedMessages.length > 0) {
65
64
  modifiedMessages[modifiedMessages.length - 1].parts.push(geminiPart);
@@ -74,6 +73,14 @@ class GeminiVisionPlugin extends GeminiChatPlugin {
74
73
  }
75
74
  };
76
75
 
76
+ // Right now Gemini API has no direct translation for system messages,
77
+ // so we insert them as parts of the first user: role message
78
+ if (role === 'system') {
79
+ role = 'user';
80
+ addPartToMessages(convertPartToGemini(content));
81
+ return;
82
+ }
83
+
77
84
  // Content can either be in the "vision" format (array) or in the "chat" format (string)
78
85
  if (Array.isArray(content)) {
79
86
  content.forEach(part => {
@@ -20,6 +20,7 @@ class ModelPlugin {
20
20
  this.pathwayPrompt = pathway.prompt;
21
21
  this.pathwayName = pathway.name;
22
22
  this.promptParameters = {};
23
+ this.isMultiModal = false;
23
24
 
24
25
  // Make all of the parameters defined on the pathway itself available to the prompt
25
26
  for (const [k, v] of Object.entries(pathway)) {
@@ -205,6 +206,15 @@ class ModelPlugin {
205
206
  message.content = '';
206
207
  }
207
208
  });
209
+
210
+ // Flatten content arrays for non-multimodal models
211
+ if (!this.isMultiModal) {
212
+ expandedMessages.forEach(message => {
213
+ if (Array.isArray(message?.content)) {
214
+ message.content = message.content.join("\n");
215
+ }
216
+ });
217
+ }
208
218
 
209
219
  return expandedMessages;
210
220
  }
@@ -288,8 +298,11 @@ class ModelPlugin {
288
298
  return parsedData;
289
299
  } catch (error) {
290
300
  // Log the error and continue
291
- logger.error(error.message || error);
292
- throw error;
301
+ logger.error(`Error in executeRequest for ${this.pathwayName}: ${error.message || error}`);
302
+ if (error.data) {
303
+ logger.error(`Additional error data: ${JSON.stringify(error.data)}`);
304
+ }
305
+ throw new Error(`Execution failed for ${this.pathwayName}: ${error.message || error}`);
293
306
  }
294
307
  }
295
308
 
@@ -112,7 +112,7 @@ class OpenAIChatPlugin extends ModelPlugin {
112
112
  let totalUnits;
113
113
  messages.forEach((message, index) => {
114
114
  //message.content string or array
115
- const content = Array.isArray(message.content) ? message.content.map(item => JSON.stringify(item)).join(', ') : message.content;
115
+ const content = message.content === undefined ? JSON.stringify(message) : (Array.isArray(message.content) ? message.content.map(item => JSON.stringify(item)).join(', ') : message.content);
116
116
  const words = content.split(" ");
117
117
  const { length, units } = this.getLength(content);
118
118
  const preview = words.length < 41 ? content : words.slice(0, 20).join(" ") + " ... " + words.slice(-20).join(" ");
@@ -10,22 +10,34 @@ function safeJsonParse(content) {
10
10
  }
11
11
 
12
12
  class OpenAIVisionPlugin extends OpenAIChatPlugin {
13
+
14
+ constructor(pathway, model) {
15
+ super(pathway, model);
16
+ this.isMultiModal = true;
17
+ }
13
18
 
14
19
  tryParseMessages(messages) {
15
20
  return messages.map(message => {
16
21
  try {
22
+ if (message.role === "tool") {
23
+ return message;
24
+ }
17
25
  if (typeof message.content === 'string') {
18
26
  message.content = safeJsonParse(message.content);
19
27
  }
20
28
  if (Array.isArray(message.content)) {
21
29
  message.content = message.content.map(item => {
22
30
  if (typeof item === 'string') {
23
- return { type: 'text', text: item };
24
- } else {
25
31
  const parsedItem = safeJsonParse(item);
26
- const { type, text, image_url, url } = parsedItem;
27
- return { type, text, image_url: url || image_url };
32
+ return parsedItem.type ? parsedItem : { type: 'text', text: item };
33
+ } else if (typeof item === 'object') {
34
+ const { type, image_url, url } = item;
35
+ if (type === 'image_url') {
36
+ image_url.url = url || image_url.url;
37
+ return { type, image_url };
38
+ }
28
39
  }
40
+ return item;
29
41
  });
30
42
  }
31
43
  } catch (e) {
@@ -0,0 +1,81 @@
1
+ // RunwareAiPlugin.js
2
+ import ModelPlugin from "./modelPlugin.js";
3
+ import logger from "../../lib/logger.js";
4
+ import { config } from "../../config.js";
5
+ import { v4 as uuidv4 } from "uuid";
6
+
7
+ class RunwareAiPlugin extends ModelPlugin {
8
+ constructor(pathway, model) {
9
+ super(pathway, model);
10
+ }
11
+
12
+ // Set up parameters specific to the Runware REST API
13
+ getRequestParameters(text, parameters, prompt) {
14
+ const combinedParameters = { ...this.promptParameters, ...parameters };
15
+ const { modelPromptText } = this.getCompiledPrompt(
16
+ text,
17
+ parameters,
18
+ prompt,
19
+ );
20
+
21
+ const requestParameters = {
22
+ data: [
23
+ {
24
+ taskType: "authentication",
25
+ apiKey: config.get("runwareAiApiKey"),
26
+ },
27
+ {
28
+ taskType: "imageInference",
29
+ taskUUID: uuidv4(),
30
+ positivePrompt: modelPromptText,
31
+ width: combinedParameters.width,
32
+ height: combinedParameters.height,
33
+ modelId: "runware:100@1",
34
+ CFGScale: 4.0,
35
+ negative_prompt: combinedParameters.negativePrompt,
36
+ numberResults: combinedParameters.numberResults,
37
+ steps: combinedParameters.steps,
38
+ checkNSFW: false,
39
+ },
40
+ ],
41
+ };
42
+
43
+ return requestParameters;
44
+ }
45
+
46
+ // Execute the request to the Runware REST API
47
+ async execute(text, parameters, prompt, cortexRequest) {
48
+ const requestParameters = this.getRequestParameters(
49
+ text,
50
+ parameters,
51
+ prompt,
52
+ );
53
+
54
+ cortexRequest.data = requestParameters.data;
55
+ cortexRequest.params = requestParameters.params;
56
+
57
+ return this.executeRequest(cortexRequest);
58
+ }
59
+
60
+ // Parse the response from the Azure Translate API
61
+ parseResponse(data) {
62
+ if (data.data) {
63
+ return JSON.stringify(data.data);
64
+ }
65
+ return JSON.stringify(data);
66
+ }
67
+
68
+ // Override the logging function to display the request and response
69
+ logRequestData(data, responseData, prompt) {
70
+ const modelInput = data[1].positivePrompt;
71
+
72
+ logger.verbose(`${modelInput}`);
73
+ logger.verbose(`${this.parseResponse(responseData)}`);
74
+
75
+ prompt &&
76
+ prompt.debugInfo &&
77
+ (prompt.debugInfo += `\n${JSON.stringify(data)}`);
78
+ }
79
+ }
80
+
81
+ export default RunwareAiPlugin;
package/server/rest.js CHANGED
@@ -5,7 +5,20 @@ import pubsub from './pubsub.js';
5
5
  import { requestState } from './requestState.js';
6
6
  import { v4 as uuidv4 } from 'uuid';
7
7
  import logger from '../lib/logger.js';
8
-
8
+ import { getSingleTokenChunks } from './chunker.js';
9
+
10
+ const chunkTextIntoTokens = (() => {
11
+ let partialToken = '';
12
+ return (text, isLast = false, useSingleTokenStream = false) => {
13
+ const tokens = useSingleTokenStream ? getSingleTokenChunks(partialToken + text) : [text];
14
+ if (isLast) {
15
+ partialToken = '';
16
+ return tokens;
17
+ }
18
+ partialToken = useSingleTokenStream ? tokens.pop() : '';
19
+ return tokens;
20
+ };
21
+ })();
9
22
 
10
23
  const processRestRequest = async (server, req, pathway, name, parameterMap = {}) => {
11
24
  const fieldVariableDefs = pathway.typeDef(pathway).restDefinition || [];
@@ -50,7 +63,8 @@ const processRestRequest = async (server, req, pathway, name, parameterMap = {})
50
63
  return resultText;
51
64
  };
52
65
 
53
- const processIncomingStream = (requestId, res, jsonResponse) => {
66
+ const processIncomingStream = (requestId, res, jsonResponse, pathway) => {
67
+ const useSingleTokenStream = pathway.useSingleTokenStream || false;
54
68
 
55
69
  const startStream = (res) => {
56
70
  // Set the headers for streaming
@@ -61,6 +75,14 @@ const processIncomingStream = (requestId, res, jsonResponse) => {
61
75
  }
62
76
 
63
77
  const finishStream = (res, jsonResponse) => {
78
+ // Send the last partial token if it exists
79
+ const lastTokens = chunkTextIntoTokens('', true, useSingleTokenStream);
80
+ if (lastTokens.length > 0) {
81
+ lastTokens.forEach(token => {
82
+ fillJsonResponse(jsonResponse, token, null);
83
+ sendStreamData(jsonResponse);
84
+ });
85
+ }
64
86
 
65
87
  // If we haven't sent the stop message yet, do it now
66
88
  if (jsonResponse.choices?.[0]?.finish_reason !== "stop") {
@@ -85,11 +107,11 @@ const processIncomingStream = (requestId, res, jsonResponse) => {
85
107
  }
86
108
 
87
109
  const sendStreamData = (data) => {
88
- logger.debug(`REST SEND: data: ${JSON.stringify(data)}`);
89
110
  const dataString = (data==='[DONE]') ? data : JSON.stringify(data);
90
111
 
91
112
  if (!res.writableEnded) {
92
113
  res.write(`data: ${dataString}\n\n`);
114
+ logger.debug(`REST SEND: data: ${dataString}`);
93
115
  }
94
116
  }
95
117
 
@@ -115,62 +137,85 @@ const processIncomingStream = (requestId, res, jsonResponse) => {
115
137
  if (subscription) {
116
138
  try {
117
139
  const subPromiseResult = await subscription;
118
- if (subPromiseResult) {
140
+ if (subPromiseResult && pubsub.subscriptions?.[subPromiseResult]) {
119
141
  pubsub.unsubscribe(subPromiseResult);
120
142
  }
121
143
  } catch (error) {
122
- logger.error(`Error unsubscribing from pubsub: ${error}`);
144
+ logger.warn(`Pubsub unsubscribe threw error: ${error}`);
123
145
  }
124
146
  }
125
147
  }
126
148
 
127
- if (data.requestProgress.requestId === requestId) {
128
- logger.debug(`REQUEST_PROGRESS received progress: ${data.requestProgress.progress}, data: ${data.requestProgress.data}`);
129
-
130
- const progress = data.requestProgress.progress;
131
- const progressData = data.requestProgress.data;
132
-
133
- try {
134
- const messageJson = JSON.parse(progressData);
135
- if (messageJson.error) {
136
- logger.error(`Stream error REST: ${messageJson?.error?.message || 'unknown error'}`);
137
- safeUnsubscribe();
138
- finishStream(res, jsonResponse);
139
- return;
140
- } else if (messageJson.choices) {
141
- const { text, delta, finish_reason } = messageJson.choices[0];
142
-
143
- if (messageJson.object === 'text_completion') {
144
- fillJsonResponse(jsonResponse, text, finish_reason);
145
- } else {
146
- fillJsonResponse(jsonResponse, delta.content, finish_reason);
147
- }
148
- } else if (messageJson.candidates) {
149
- const { content, finishReason } = messageJson.candidates[0];
150
- fillJsonResponse(jsonResponse, content.parts[0].text, finishReason);
151
- } else if (messageJson.content) {
152
- const text = messageJson.content?.[0]?.text || '';
153
- const finishReason = messageJson.stop_reason;
154
- fillJsonResponse(jsonResponse, text, finishReason);
155
- } else {
156
- fillJsonResponse(jsonResponse, messageJson, null);
157
- }
158
- } catch (error) {
159
- //logger.info(`progressData not JSON: ${progressData}`);
160
- fillJsonResponse(jsonResponse, progressData, "stop");
161
- }
162
- if (progress === 1 && progressData.trim() === "[DONE]") {
149
+ const processStringData = (stringData) => {
150
+ if (progress === 1 && stringData.trim() === "[DONE]") {
151
+ fillJsonResponse(jsonResponse, stringData, "stop");
163
152
  safeUnsubscribe();
164
153
  finishStream(res, jsonResponse);
165
154
  return;
166
155
  }
167
156
 
168
- sendStreamData(jsonResponse);
157
+ chunkTextIntoTokens(stringData, false, useSingleTokenStream).forEach(token => {
158
+ fillJsonResponse(jsonResponse, token, null);
159
+ sendStreamData(jsonResponse);
160
+ });
169
161
 
170
162
  if (progress === 1) {
171
163
  safeUnsubscribe();
172
164
  finishStream(res, jsonResponse);
173
165
  }
166
+
167
+ }
168
+
169
+ if (data.requestProgress.requestId !== requestId) return;
170
+
171
+ logger.debug(`REQUEST_PROGRESS received progress: ${data.requestProgress.progress}, data: ${data.requestProgress.data}`);
172
+
173
+ const { progress, data: progressData } = data.requestProgress;
174
+
175
+ try {
176
+ const messageJson = JSON.parse(progressData);
177
+
178
+ if (typeof messageJson === 'string') {
179
+ processStringData(messageJson);
180
+ return;
181
+ }
182
+
183
+ if (messageJson.error) {
184
+ logger.error(`Stream error REST: ${messageJson?.error?.message || 'unknown error'}`);
185
+ safeUnsubscribe();
186
+ finishStream(res, jsonResponse);
187
+ return;
188
+ }
189
+
190
+ let content = '';
191
+ if (messageJson.choices) {
192
+ const { text, delta } = messageJson.choices[0];
193
+ content = messageJson.object === 'text_completion' ? text : delta.content;
194
+ } else if (messageJson.candidates) {
195
+ content = messageJson.candidates[0].content.parts[0].text;
196
+ } else if (messageJson.content) {
197
+ content = messageJson.content?.[0]?.text || '';
198
+ } else {
199
+ content = messageJson;
200
+ }
201
+
202
+ chunkTextIntoTokens(content, false, useSingleTokenStream).forEach(token => {
203
+ fillJsonResponse(jsonResponse, token, null);
204
+ sendStreamData(jsonResponse);
205
+ });
206
+ } catch (error) {
207
+ logger.debug(`progressData not JSON: ${progressData}`);
208
+ if (typeof progressData === 'string') {
209
+ processStringData(progressData);
210
+ } else {
211
+ fillJsonResponse(jsonResponse, progressData, "stop");
212
+ sendStreamData(jsonResponse);
213
+ }
214
+ }
215
+
216
+ if (progress === 1) {
217
+ safeUnsubscribe();
218
+ finishStream(res, jsonResponse);
174
219
  }
175
220
  });
176
221
 
@@ -254,7 +299,7 @@ function buildRestEndpoints(pathways, app, server, config) {
254
299
  jsonResponse.choices[0].finish_reason = null;
255
300
  //jsonResponse.object = "text_completion.chunk";
256
301
 
257
- processIncomingStream(resultText, res, jsonResponse);
302
+ processIncomingStream(resultText, res, jsonResponse, pathway);
258
303
  } else {
259
304
  const requestId = uuidv4();
260
305
  jsonResponse.id = `cmpl-${requestId}`;
@@ -306,7 +351,7 @@ function buildRestEndpoints(pathways, app, server, config) {
306
351
  }
307
352
  jsonResponse.object = "chat.completion.chunk";
308
353
 
309
- processIncomingStream(resultText, res, jsonResponse);
354
+ processIncomingStream(resultText, res, jsonResponse, pathway);
310
355
  } else {
311
356
  const requestId = uuidv4();
312
357
  jsonResponse.id = `chatcmpl-${requestId}`;
@@ -346,4 +391,4 @@ function buildRestEndpoints(pathways, app, server, config) {
346
391
  }
347
392
  }
348
393
 
349
- export { buildRestEndpoints };
394
+ export { buildRestEndpoints };
package/server/typeDef.js CHANGED
@@ -28,7 +28,26 @@ const getGraphQlType = (value) => {
28
28
  }
29
29
  };
30
30
 
31
- const typeDef = (pathway) => {
31
+ const getMessageTypeDefs = () => {
32
+ const messageType = `input Message { role: String, content: String, name: String }`;
33
+ const multiMessageType = `input MultiMessage { role: String, content: [String], name: String }`;
34
+
35
+ return `${messageType}\n\n${multiMessageType}`;
36
+ };
37
+
38
+ const getPathwayTypeDef = (name, returnType) => {
39
+ return `type ${name} {
40
+ debug: String
41
+ result: ${returnType}
42
+ previousResult: String
43
+ warnings: [String]
44
+ errors: [String]
45
+ contextId: String
46
+ tool: String
47
+ }`
48
+ };
49
+
50
+ const getPathwayTypeDefAndExtendQuery = (pathway) => {
32
51
  const { name, objName, defaultInputParameters, inputParameters, format } = pathway;
33
52
 
34
53
  const fields = format ? format.match(/\b(\w+)\b/g) : null;
@@ -36,24 +55,13 @@ const typeDef = (pathway) => {
36
55
 
37
56
  const typeName = fields ? `${objName}Result` : `String`;
38
57
 
39
- const messageType = `input Message { role: String, content: String, name: String }`;
40
- const multiMessageType = `input MultiMessage { role: String, content: [String], name: String }`;
41
-
42
58
  const type = fields ? `type ${typeName} {
43
59
  ${fieldsStr}
44
60
  }` : ``;
45
61
 
46
- const resultStr = pathway.list ? `[${typeName}]` : typeName;
62
+ const returnType = pathway.list ? `[${typeName}]` : typeName;
47
63
 
48
- const responseType = `type ${objName} {
49
- debug: String
50
- result: ${resultStr}
51
- previousResult: String
52
- warnings: [String]
53
- errors: [String]
54
- contextId: String
55
- tool: String
56
- }`;
64
+ const responseType = getPathwayTypeDef(objName, returnType);
57
65
 
58
66
  const params = { ...defaultInputParameters, ...inputParameters };
59
67
 
@@ -71,7 +79,7 @@ const typeDef = (pathway) => {
71
79
  };
72
80
  });
73
81
 
74
- const gqlDefinition = `${messageType}\n\n${multiMessageType}\n\n${type}\n\n${responseType}\n\nextend type Query {${name}(${paramsStr}): ${objName}}`;
82
+ const gqlDefinition = `${type}\n\n${responseType}\n\nextend type Query {${name}(${paramsStr}): ${objName}}`;
75
83
 
76
84
  return {
77
85
  gqlDefinition,
@@ -79,6 +87,16 @@ const typeDef = (pathway) => {
79
87
  };
80
88
  };
81
89
 
90
+ const typeDef = (pathway) => {
91
+ return getPathwayTypeDefAndExtendQuery(pathway);
92
+ };
93
+
94
+ const userPathwayInputParameters = `text: String`;
95
+
96
+
82
97
  export {
83
98
  typeDef,
99
+ getMessageTypeDefs,
100
+ getPathwayTypeDef,
101
+ userPathwayInputParameters,
84
102
  };
@@ -1,5 +1,5 @@
1
1
  import test from 'ava';
2
- import { getSemanticChunks, determineTextFormat } from '../server/chunker.js';
2
+ import { getSemanticChunks, determineTextFormat, getSingleTokenChunks } from '../server/chunker.js';
3
3
  import { encode } from '../lib/encodeCache.js';
4
4
 
5
5
  const testText = `Lorem ipsum dolor sit amet, consectetur adipiscing elit. In id erat sem. Phasellus ac dapibus purus, in fermentum nunc. Mauris quis rutrum magna. Quisque rutrum, augue vel blandit posuere, augue magna convallis turpis, nec elementum augue mauris sit amet nunc. Aenean sit amet leo est. Nunc ante ex, blandit et felis ut, iaculis lacinia est. Phasellus dictum orci id libero ullamcorper tempor.
@@ -207,4 +207,18 @@ test('should return identical text that chunker was passed, given weird spaces a
207
207
  t.assert(chunks.every(chunk => encode(chunk).length <= maxChunkToken)); //check chunk size
208
208
  const recomposedText = chunks.reduce((acc, chunk) => acc + chunk, '');
209
209
  t.assert(recomposedText === testTextShortWeirdSpaces); //check recomposition
210
+ });
211
+
212
+ test('should correctly split text into single token chunks', t => {
213
+ const testString = 'Hello, world!';
214
+ const chunks = getSingleTokenChunks(testString);
215
+
216
+ // Check that each chunk is a single token
217
+ t.true(chunks.every(chunk => encode(chunk).length === 1));
218
+
219
+ // Check that joining the chunks recreates the original string
220
+ t.is(chunks.join(''), testString);
221
+
222
+ // Check specific tokens (this may need adjustment based on your tokenizer)
223
+ t.deepEqual(chunks, ['Hello', ',', ' world', '!']);
210
224
  });
@@ -199,7 +199,7 @@ test('convertMessagesToClaudeVertex system message with user message', async (t)
199
199
  test('convertMessagesToClaudeVertex user message with unsupported image type', async (t) => {
200
200
  const plugin = new Claude3VertexPlugin(pathway, model);
201
201
  // Test with unsupported image type
202
- const messages = [{ role: 'user', content: { type: 'image_url', image_url: 'http://example.com/image.svg' } }];
202
+ const messages = [{ role: 'user', content: { type: 'image_url', image_url: 'https://unec.edu.az/application/uploads/2014/12/pdf-sample.pdf' } }];
203
203
  const output = await plugin.convertMessagesToClaudeVertex(messages);
204
204
  t.deepEqual(output, { system: '', modifiedMessages: [{role: 'user', content: [] }] });
205
205
  });