@aj-archipelago/cortex 1.3.35 β†’ 1.3.36

This diff represents the content of publicly available package versions that have been released to one of the supported registries. The information contained in this diff is provided for informational purposes only and reflects changes between package versions as they appear in their respective public registries.
Files changed (47) hide show
  1. package/README.md +9 -9
  2. package/config/default.example.json +0 -20
  3. package/config.js +160 -6
  4. package/lib/pathwayTools.js +79 -1
  5. package/lib/requestExecutor.js +3 -1
  6. package/lib/util.js +7 -0
  7. package/package.json +1 -1
  8. package/pathways/basePathway.js +2 -0
  9. package/pathways/call_tools.js +379 -0
  10. package/pathways/system/entity/memory/shared/sys_memory_helpers.js +1 -1
  11. package/pathways/system/entity/memory/sys_search_memory.js +2 -2
  12. package/pathways/system/entity/sys_entity_agent.js +289 -0
  13. package/pathways/system/entity/sys_generator_memory.js +1 -1
  14. package/pathways/system/entity/sys_generator_results.js +1 -1
  15. package/pathways/system/entity/sys_get_entities.js +19 -0
  16. package/pathways/system/entity/tools/shared/sys_entity_tools.js +150 -0
  17. package/pathways/system/entity/tools/sys_tool_bing_search.js +147 -0
  18. package/pathways/system/entity/tools/sys_tool_callmodel.js +62 -0
  19. package/pathways/system/entity/tools/sys_tool_coding.js +53 -0
  20. package/pathways/system/entity/tools/sys_tool_codingagent.js +100 -0
  21. package/pathways/system/entity/tools/sys_tool_cognitive_search.js +231 -0
  22. package/pathways/system/entity/tools/sys_tool_image.js +57 -0
  23. package/pathways/system/entity/tools/sys_tool_readfile.js +119 -0
  24. package/pathways/system/entity/tools/sys_tool_reasoning.js +75 -0
  25. package/pathways/system/entity/tools/sys_tool_remember.js +59 -0
  26. package/pathways/vision.js +1 -1
  27. package/server/modelExecutor.js +4 -12
  28. package/server/pathwayResolver.js +53 -40
  29. package/server/plugins/azureBingPlugin.js +42 -4
  30. package/server/plugins/azureCognitivePlugin.js +40 -12
  31. package/server/plugins/claude3VertexPlugin.js +67 -18
  32. package/server/plugins/modelPlugin.js +3 -2
  33. package/server/plugins/openAiReasoningPlugin.js +3 -3
  34. package/server/plugins/openAiReasoningVisionPlugin.js +48 -0
  35. package/server/plugins/openAiVisionPlugin.js +192 -7
  36. package/tests/agentic.test.js +256 -0
  37. package/tests/call_tools.test.js +216 -0
  38. package/tests/claude3VertexToolConversion.test.js +78 -0
  39. package/tests/mocks.js +11 -3
  40. package/tests/multimodal_conversion.test.js +1 -1
  41. package/tests/openAiToolPlugin.test.js +242 -0
  42. package/pathways/test_palm_chat.js +0 -31
  43. package/server/plugins/palmChatPlugin.js +0 -233
  44. package/server/plugins/palmCodeCompletionPlugin.js +0 -45
  45. package/server/plugins/palmCompletionPlugin.js +0 -135
  46. package/tests/palmChatPlugin.test.js +0 -219
  47. package/tests/palmCompletionPlugin.test.js +0 -58
@@ -0,0 +1,75 @@
1
+ // sys_tool_reasoning.js
2
+ // Entity tool that provides advanced reasoning and planning capabilities
3
+
4
+ import { Prompt } from '../../../../server/prompt.js';
5
+
6
+ export default {
7
+ prompt:
8
+ [
9
+ new Prompt({ messages: [
10
+ {"role": "system", "content": `You are the part of an AI entity named {{aiName}} that provides advanced reasoning and planning capabilities. You excel at breaking down complex problems, creating detailed plans, and providing thorough analysis. Think carefully about the latest request and provide a detailed, well thought out, carefully reviewed response.\n{{renderTemplate AI_DATETIME}}`},
11
+ "{{chatHistory}}",
12
+ ]}),
13
+ ],
14
+ inputParameters: {
15
+ chatHistory: [{role: '', content: []}],
16
+ contextId: ``,
17
+ aiName: "Jarvis",
18
+ language: "English",
19
+ },
20
+ max_tokens: 100000,
21
+ model: 'oai-o3',
22
+ useInputChunking: false,
23
+ enableDuplicateRequests: false,
24
+ timeout: 600,
25
+ toolDefinition: [{
26
+ type: "function",
27
+ icon: "πŸ—ΊοΈ",
28
+ function: {
29
+ name: "Plan",
30
+ description: "Use specifically to create a thorough, well thought out, step by step plan to accomplish a task. You should always use this tool when you're planning to do something complex or something that might require multiple steps.",
31
+ parameters: {
32
+ type: "object",
33
+ properties: {
34
+ detailedInstructions: {
35
+ type: "string",
36
+ description: "Detailed instructions about what you need the tool to do"
37
+ },
38
+ userMessage: {
39
+ type: "string",
40
+ description: "A user-friendly message that describes what you're doing with this tool"
41
+ }
42
+ },
43
+ required: ["detailedInstructions", "userMessage"]
44
+ }
45
+ }
46
+ },
47
+ {
48
+ type: "function",
49
+ icon: "🧠",
50
+ function: {
51
+ name: "Reason",
52
+ description: "Employ for advancedreasoning, scientific analysis, evaluating evidence, strategic planning, problem-solving, logic puzzles, mathematical calculations, or any questions that require careful thought or complex choices.",
53
+ parameters: {
54
+ type: "object",
55
+ properties: {
56
+ detailedInstructions: {
57
+ type: "string",
58
+ description: "Detailed instructions about what you need the tool to do"
59
+ },
60
+ userMessage: {
61
+ type: "string",
62
+ description: "A user-friendly message that describes what you're doing with this tool"
63
+ }
64
+ },
65
+ required: ["detailedInstructions", "userMessage"]
66
+ }
67
+ }
68
+ }],
69
+
70
+ executePathway: async ({args, runAllPrompts, resolver}) => {
71
+ let result = await runAllPrompts({ ...args, stream: false });
72
+ resolver.tool = JSON.stringify({ toolUsed: "reasoning" });
73
+ return result;
74
+ }
75
+ }
@@ -0,0 +1,59 @@
1
+ // sys_tool_remember.js
2
+ // Entity tool that looks for relevant information in the entity's memory
3
+ import { callPathway } from '../../../../lib/pathwayTools.js';
4
+
5
+ export default {
6
+ prompt:
7
+ [],
8
+ model: 'oai-gpt41-mini',
9
+
10
+ toolDefinition: [{
11
+ type: "function",
12
+ icon: "🧩",
13
+ function: {
14
+ name: "Remember",
15
+ description: "Use specifically to search your long term memory for information or details that may not be present in your short term memory. You should always use this tool before you tell the user you don't remember something. If the user asks you a question (like what's your favorite color) and you don't remember the answer, use this tool to search your long term memory for the answer before you tell the user you don't have one.",
16
+ parameters: {
17
+ type: "object",
18
+ properties: {
19
+ detailedInstructions: {
20
+ type: "string",
21
+ description: "Detailed description of what you want to see if you remember"
22
+ },
23
+ userMessage: {
24
+ type: "string",
25
+ description: "A user-friendly message that describes what you're doing with this tool"
26
+ }
27
+ },
28
+ required: ["detailedInstructions", "userMessage"]
29
+ }
30
+ }
31
+ },
32
+ {
33
+ type: "function",
34
+ icon: "🧩",
35
+ function: {
36
+ name: "LoadMemoryContext",
37
+ description: "This tool quickly preloads the memory context for this turn of the conversation. It's typically automatically used by the system, but you can use it if you need to.",
38
+ parameters: {
39
+ type: "object",
40
+ properties: {
41
+ detailedInstructions: {
42
+ type: "string",
43
+ description: "Detailed instructions about what you need the tool to do"
44
+ },
45
+ userMessage: {
46
+ type: "string",
47
+ description: "A user-friendly message that describes what you're doing with this tool"
48
+ }
49
+ },
50
+ required: ["detailedInstructions", "userMessage"]
51
+ }
52
+ }
53
+ }],
54
+
55
+ executePathway: async ({args, runAllPrompts, resolver}) => {
56
+ resolver.tool = JSON.stringify({ toolUsed: "memory" });
57
+ return await callPathway('sys_search_memory', { ...args, stream: false, section: 'memoryAll', updateContext: true });
58
+ }
59
+ }
@@ -13,7 +13,7 @@ export default {
13
13
  contextId: ``,
14
14
  },
15
15
  max_tokens: 1024,
16
- model: 'oai-gpt4o',
16
+ model: 'oai-gpt41',
17
17
  useInputChunking: false,
18
18
  enableDuplicateRequests: false,
19
19
  timeout: 600,
@@ -7,9 +7,6 @@ import AzureTranslatePlugin from './plugins/azureTranslatePlugin.js';
7
7
  import OpenAIWhisperPlugin from './plugins/openAiWhisperPlugin.js';
8
8
  import OpenAIChatExtensionPlugin from './plugins/openAiChatExtensionPlugin.js';
9
9
  import LocalModelPlugin from './plugins/localModelPlugin.js';
10
- import PalmChatPlugin from './plugins/palmChatPlugin.js';
11
- import PalmCompletionPlugin from './plugins/palmCompletionPlugin.js';
12
- import PalmCodeCompletionPlugin from './plugins/palmCodeCompletionPlugin.js';
13
10
  import CohereGeneratePlugin from './plugins/cohereGeneratePlugin.js';
14
11
  import CohereSummarizePlugin from './plugins/cohereSummarizePlugin.js';
15
12
  import AzureCognitivePlugin from './plugins/azureCognitivePlugin.js';
@@ -18,6 +15,7 @@ import OpenAIImagePlugin from './plugins/openAiImagePlugin.js';
18
15
  import OpenAIDallE3Plugin from './plugins/openAiDallE3Plugin.js';
19
16
  import OpenAIVisionPlugin from './plugins/openAiVisionPlugin.js';
20
17
  import OpenAIReasoningPlugin from './plugins/openAiReasoningPlugin.js';
18
+ import OpenAIReasoningVisionPlugin from './plugins/openAiReasoningVisionPlugin.js';
21
19
  import GeminiChatPlugin from './plugins/geminiChatPlugin.js';
22
20
  import GeminiVisionPlugin from './plugins/geminiVisionPlugin.js';
23
21
  import Gemini15ChatPlugin from './plugins/gemini15ChatPlugin.js';
@@ -70,15 +68,6 @@ class ModelExecutor {
70
68
  case 'LOCAL-CPP-MODEL':
71
69
  plugin = new LocalModelPlugin(pathway, model);
72
70
  break;
73
- case 'PALM-CHAT':
74
- plugin = new PalmChatPlugin(pathway, model);
75
- break;
76
- case 'PALM-COMPLETION':
77
- plugin = new PalmCompletionPlugin(pathway, model);
78
- break;
79
- case 'PALM-CODE-COMPLETION':
80
- plugin = new PalmCodeCompletionPlugin(pathway, model);
81
- break;
82
71
  case 'COHERE-GENERATE':
83
72
  plugin = new CohereGeneratePlugin(pathway, model);
84
73
  break;
@@ -91,6 +80,9 @@ class ModelExecutor {
91
80
  case 'OPENAI-REASONING':
92
81
  plugin = new OpenAIReasoningPlugin(pathway, model);
93
82
  break;
83
+ case 'OPENAI-REASONING-VISION':
84
+ plugin = new OpenAIReasoningVisionPlugin(pathway, model);
85
+ break;
94
86
  case 'GEMINI-CHAT':
95
87
  plugin = new GeminiChatPlugin(pathway, model);
96
88
  break;
@@ -73,19 +73,18 @@ class PathwayResolver {
73
73
  this.pathwayPrompt = pathway.prompt;
74
74
  }
75
75
 
76
+ publishNestedRequestProgress(requestProgress) {
77
+ if (requestProgress.progress === 1 && this.rootRequestId) {
78
+ delete requestProgress.progress;
79
+ }
80
+ publishRequestProgress({...requestProgress, info: this.tool || ''});
81
+ }
82
+
76
83
  // This code handles async and streaming responses for either long-running
77
84
  // tasks or streaming model responses
78
85
  async asyncResolve(args) {
79
- let streamErrorOccurred = false;
80
86
  let responseData = null;
81
87
 
82
- const publishNestedRequestProgress = (requestProgress) => {
83
- if (requestProgress.progress === 1 && this.rootRequestId) {
84
- delete requestProgress.progress;
85
- }
86
- publishRequestProgress({...requestProgress, info: this.tool || ''});
87
- }
88
-
89
88
  try {
90
89
  responseData = await this.executePathway(args);
91
90
  }
@@ -102,8 +101,45 @@ class PathwayResolver {
102
101
 
103
102
  // If the response is a stream, handle it as streaming response
104
103
  if (responseData && typeof responseData.on === 'function') {
104
+ await this.handleStream(responseData);
105
+ } else {
106
+ const { completedCount = 1, totalCount = 1 } = requestState[this.requestId];
107
+ requestState[this.requestId].data = responseData;
108
+
109
+ // some models don't support progress updates
110
+ if (!modelTypesExcludedFromProgressUpdates.includes(this.model.type)) {
111
+ this.publishNestedRequestProgress({
112
+ requestId: this.rootRequestId || this.requestId,
113
+ progress: Math.min(completedCount, totalCount) / totalCount,
114
+ // Clients expect these to be strings
115
+ data: JSON.stringify(responseData || ''),
116
+ info: this.tool || ''
117
+ });
118
+ }
119
+ }
120
+ }
121
+
122
+ mergeResults(mergeData) {
123
+ if (mergeData) {
124
+ this.previousResult = mergeData.previousResult ? mergeData.previousResult : this.previousResult;
125
+ this.warnings = [...this.warnings, ...(mergeData.warnings || [])];
126
+ this.errors = [...this.errors, ...(mergeData.errors || [])];
127
+ try {
128
+ const mergeDataTool = typeof mergeData.tool === 'string' ? JSON.parse(mergeData.tool) : mergeData.tool || {};
129
+ const thisTool = typeof this.tool === 'string' ? JSON.parse(this.tool) : this.tool || {};
130
+ this.tool = JSON.stringify({ ...thisTool, ...mergeDataTool });
131
+ } catch (error) {
132
+ logger.warn('Error merging pathway resolver tool objects: ' + error);
133
+ }
134
+ }
135
+ }
136
+
137
+ async handleStream(response) {
138
+ let streamErrorOccurred = false;
139
+
140
+ if (response && typeof response.on === 'function') {
105
141
  try {
106
- const incomingMessage = responseData;
142
+ const incomingMessage = response;
107
143
  let streamEnded = false;
108
144
 
109
145
  const onParse = (event) => {
@@ -133,7 +169,7 @@ class PathwayResolver {
133
169
 
134
170
  try {
135
171
  if (!streamEnded && requestProgress.data) {
136
- publishNestedRequestProgress(requestProgress);
172
+ this.publishNestedRequestProgress(requestProgress);
137
173
  streamEnded = requestProgress.progress === 1;
138
174
  }
139
175
  } catch (error) {
@@ -173,35 +209,6 @@ class PathwayResolver {
173
209
  } else {
174
210
  return;
175
211
  }
176
- } else {
177
- const { completedCount = 1, totalCount = 1 } = requestState[this.requestId];
178
- requestState[this.requestId].data = responseData;
179
-
180
- // some models don't support progress updates
181
- if (!modelTypesExcludedFromProgressUpdates.includes(this.model.type)) {
182
- await publishNestedRequestProgress({
183
- requestId: this.rootRequestId || this.requestId,
184
- progress: Math.min(completedCount, totalCount) / totalCount,
185
- // Clients expect these to be strings
186
- data: JSON.stringify(responseData || ''),
187
- info: this.tool || ''
188
- });
189
- }
190
- }
191
- }
192
-
193
- mergeResults(mergeData) {
194
- if (mergeData) {
195
- this.previousResult = mergeData.previousResult ? mergeData.previousResult : this.previousResult;
196
- this.warnings = [...this.warnings, ...(mergeData.warnings || [])];
197
- this.errors = [...this.errors, ...(mergeData.errors || [])];
198
- try {
199
- const mergeDataTool = typeof mergeData.tool === 'string' ? JSON.parse(mergeData.tool) : mergeData.tool || {};
200
- const thisTool = typeof this.tool === 'string' ? JSON.parse(this.tool) : this.tool || {};
201
- this.tool = JSON.stringify({ ...thisTool, ...mergeDataTool });
202
- } catch (error) {
203
- logger.warn('Error merging pathway resolver tool objects: ' + error);
204
- }
205
212
  }
206
213
  }
207
214
 
@@ -212,7 +219,7 @@ class PathwayResolver {
212
219
  requestState[this.requestId] = {}
213
220
  }
214
221
  this.rootRequestId = args.rootRequestId ?? null;
215
- requestState[this.requestId] = { ...requestState[this.requestId], args, resolver: this.asyncResolve.bind(this) };
222
+ requestState[this.requestId] = { ...requestState[this.requestId], args, resolver: this.asyncResolve.bind(this), pathwayResolver: this };
216
223
  return this.requestId;
217
224
  }
218
225
  else {
@@ -297,6 +304,12 @@ class PathwayResolver {
297
304
  break;
298
305
  }
299
306
 
307
+ // if data is a stream, handle it
308
+ if (data && typeof data.on === 'function') {
309
+ await this.handleStream(data);
310
+ return data;
311
+ }
312
+
300
313
  data = await this.responseParser.parse(data);
301
314
  if (data !== null) {
302
315
  break;
@@ -7,14 +7,42 @@ class AzureBingPlugin extends ModelPlugin {
7
7
  super(pathway, model);
8
8
  }
9
9
 
10
- getRequestParameters(text) {
10
+ getRequestParameters(text, parameters = {}) {
11
+ const {
12
+ q, // Query string (takes precedence over text parameter)
13
+ responseFilter, // Comma-separated list of answer types to include/exclude
14
+ freshness, // 'day', 'week', 'month', or date range 'YYYY-MM-DD..YYYY-MM-DD'
15
+ answerCount, // Number of top answers to return
16
+ promote, // Comma-separated list of answer types to promote
17
+ count, // Number of webpages to return (default 10)
18
+ safeSearch = 'Moderate', // 'Off', 'Moderate', or 'Strict'
19
+ } = parameters;
20
+
11
21
  const requestParameters = {
12
- data: [
13
- ],
22
+ data: [],
14
23
  params: {
15
- q: text,
24
+ q: q || text, // Use q if provided, otherwise fall back to text
16
25
  }
17
26
  };
27
+
28
+ // Add optional parameters if they exist
29
+ if (responseFilter) {
30
+ requestParameters.params.responseFilter = responseFilter;
31
+ }
32
+ if (freshness) {
33
+ requestParameters.params.freshness = freshness;
34
+ }
35
+ if (answerCount) {
36
+ requestParameters.params.answerCount = answerCount;
37
+ }
38
+ if (promote) {
39
+ requestParameters.params.promote = promote;
40
+ }
41
+ if (count) {
42
+ requestParameters.params.count = count;
43
+ }
44
+ requestParameters.params.safeSearch = safeSearch;
45
+
18
46
  return requestParameters;
19
47
  }
20
48
 
@@ -28,6 +56,16 @@ class AzureBingPlugin extends ModelPlugin {
28
56
  cortexRequest.params = requestParameters.params;
29
57
  cortexRequest.method = 'GET';
30
58
 
59
+ // Step 1: Strip any existing endpoint after version number
60
+ cortexRequest.url = cortexRequest.url.replace(/\/v(\d+\.\d+)\/.*$/, '/v$1');
61
+
62
+ // Step 2: Add appropriate endpoint based on searchType
63
+ if (parameters.searchType === 'news') {
64
+ cortexRequest.url += '/news/search';
65
+ } else {
66
+ cortexRequest.url += '/search';
67
+ }
68
+
31
69
  return this.executeRequest(cortexRequest);
32
70
  }
33
71
 
@@ -34,7 +34,7 @@ class AzureCognitivePlugin extends ModelPlugin {
34
34
  async getRequestParameters(text, parameters, prompt, mode, indexName, savedContextId, cortexRequest) {
35
35
  const combinedParameters = { ...this.promptParameters, ...parameters };
36
36
  const { modelPromptText } = this.getCompiledPrompt(text, combinedParameters, prompt);
37
- const { inputVector, calculateInputVector, privateData, filter, docId, title, chunkNo, chatId } = combinedParameters;
37
+ const { inputVector, calculateInputVector, privateData, filter, docId, title, chunkNo, chatId, semanticConfiguration } = combinedParameters;
38
38
  const data = {};
39
39
 
40
40
  if (mode == 'delete') {
@@ -105,18 +105,45 @@ class AzureCognitivePlugin extends ModelPlugin {
105
105
  }
106
106
 
107
107
  //default mode, 'search'
108
- if (inputVector) {
109
- data.vectors = [
110
- {
111
- "value": typeof inputVector === 'string' ? JSON.parse(inputVector) : inputVector,
112
- "fields": "contentVector",
113
- "k": 20
114
- }
115
- ];
108
+ data.search = modelPromptText;
109
+ data.top = parameters.top || 50;
110
+ data.skip = 0;
111
+ data.count = true;
112
+
113
+ // If semanticConfiguration is provided, switch to semantic mode
114
+ if (semanticConfiguration) {
115
+ data.queryType = "semantic";
116
+ data.semanticConfiguration = semanticConfiguration; // Use provided value directly
117
+ data.captions = "extractive";
118
+ data.answers = "extractive|count-3";
119
+ data.queryLanguage = "en-us";
120
+ // Omit top-level queryRewrites as it caused issues before
121
+
122
+ if (inputVector) {
123
+ // Use vectorQueries for semantic hybrid search
124
+ data.vectorQueries = [
125
+ {
126
+ "kind": "text",
127
+ "text": modelPromptText, // Use the search text for the vector query
128
+ "fields": "contentVector", // Ensure this field name is correct for your index
129
+ "k": parameters.k || 20, // Use parameter k or default
130
+ "queryRewrites": "generative" // Add queryRewrites inside vector query
131
+ }
132
+ ];
133
+ delete data.vectors; // Remove the standard vector field
134
+ }
116
135
  } else {
117
- data.search = modelPromptText;
118
- data.top = parameters.top || 50;
119
- data.skip = 0;
136
+ // Standard non-semantic search
137
+ if (inputVector) {
138
+ data.vectors = [
139
+ {
140
+ "value": typeof inputVector === 'string' ? JSON.parse(inputVector) : inputVector,
141
+ "fields": "contentVector",
142
+ "k": parameters.k || 20
143
+ }
144
+ ];
145
+ }
146
+ // Handle titleOnly only for non-semantic search
120
147
  if (parameters.titleOnly) {
121
148
  switch(indexName){
122
149
  case 'indexcortex':
@@ -130,6 +157,7 @@ class AzureCognitivePlugin extends ModelPlugin {
130
157
  }
131
158
  }
132
159
 
160
+ // Apply filters (common to both semantic and non-semantic)
133
161
  filter && (data.filter = filter);
134
162
  if (indexName == 'indexcortex') { //if private, filter by owner via contextId //privateData &&
135
163
  data.filter && (data.filter = data.filter + ' and ');
@@ -115,13 +115,32 @@ class Claude3VertexPlugin extends OpenAIVisionPlugin {
115
115
 
116
116
  const { content } = data;
117
117
 
118
- // if the response is an array, return the text property of the first item
119
- // if the type property is 'text'
120
- if (content && Array.isArray(content) && content[0].type === "text") {
121
- return content[0].text;
122
- } else {
123
- return data;
118
+ // Handle tool use responses from Claude
119
+ if (content && Array.isArray(content)) {
120
+ const toolUses = content.filter(item => item.type === "tool_use");
121
+ if (toolUses.length > 0) {
122
+ return {
123
+ role: "assistant",
124
+ content: "",
125
+ tool_calls: toolUses.map(toolUse => ({
126
+ id: toolUse.id,
127
+ type: "function",
128
+ function: {
129
+ name: toolUse.name,
130
+ arguments: JSON.stringify(toolUse.input)
131
+ }
132
+ }))
133
+ };
134
+ }
135
+
136
+ // Handle regular text responses
137
+ const textContent = content.find(item => item.type === "text");
138
+ if (textContent) {
139
+ return textContent.text;
140
+ }
124
141
  }
142
+
143
+ return data;
125
144
  }
126
145
 
127
146
  // This code converts messages to the format required by the Claude Vertex API
@@ -271,17 +290,44 @@ class Claude3VertexPlugin extends OpenAIVisionPlugin {
271
290
  });
272
291
  }
273
292
 
293
+ if (parameters.tool_choice) {
294
+ // Convert OpenAI tool_choice format to Claude format
295
+ if (typeof parameters.tool_choice === 'string') {
296
+ // Handle string values: auto, required, none
297
+ if (parameters.tool_choice === 'required') {
298
+ requestParameters.tool_choice = { type: 'any' }; // OpenAI's 'required' maps to Claude's 'any'
299
+ } else if (parameters.tool_choice === 'auto') {
300
+ requestParameters.tool_choice = { type: 'auto' };
301
+ } else if (parameters.tool_choice === 'none') {
302
+ requestParameters.tool_choice = { type: 'none' };
303
+ }
304
+ } else if (parameters.tool_choice.type === "function") {
305
+ // Handle function-specific tool choice
306
+ requestParameters.tool_choice = {
307
+ type: "tool",
308
+ name: parameters.tool_choice.function.name
309
+ };
310
+ }
311
+ }
312
+
274
313
  // If there are function calls in messages, generate tools block
275
314
  if (modifiedMessages?.some(msg =>
276
315
  Array.isArray(msg.content) && msg.content.some(item => item.type === 'tool_use')
277
316
  )) {
278
317
  const toolsMap = new Map();
279
318
 
280
- // Collect all unique tool uses from messages
319
+ // First add any existing tools from parameters to the map
320
+ if (requestParameters.tools) {
321
+ requestParameters.tools.forEach(tool => {
322
+ toolsMap.set(tool.name, tool);
323
+ });
324
+ }
325
+
326
+ // Collect all unique tool uses from messages, only adding if not already present
281
327
  modifiedMessages.forEach(msg => {
282
328
  if (Array.isArray(msg.content)) {
283
329
  msg.content.forEach(item => {
284
- if (item.type === 'tool_use') {
330
+ if (item.type === 'tool_use' && !toolsMap.has(item.name)) {
285
331
  toolsMap.set(item.name, {
286
332
  name: item.name,
287
333
  description: `Tool for ${item.name}`,
@@ -302,11 +348,8 @@ class Claude3VertexPlugin extends OpenAIVisionPlugin {
302
348
  }
303
349
  });
304
350
 
305
- if (requestParameters.tools) {
306
- requestParameters.tools.push(...Array.from(toolsMap.values()));
307
- } else {
308
- requestParameters.tools = Array.from(toolsMap.values());
309
- }
351
+ // Update the tools array with the combined unique tools
352
+ requestParameters.tools = Array.from(toolsMap.values());
310
353
  }
311
354
 
312
355
  requestParameters.max_tokens = this.getModelMaxReturnTokens();
@@ -361,11 +404,17 @@ class Claude3VertexPlugin extends OpenAIVisionPlugin {
361
404
  if (stream) {
362
405
  logger.info(`[response received as an SSE stream]`);
363
406
  } else {
364
- const responseText = this.parseResponse(responseData);
365
- const { length, units } = this.getLength(responseText);
366
- logger.info(`[response received containing ${length} ${units}]`);
367
- logger.verbose(`${responseText}`);
368
- }
407
+ const parsedResponse = this.parseResponse(responseData);
408
+
409
+ if (typeof parsedResponse === 'string') {
410
+ const { length, units } = this.getLength(parsedResponse);
411
+ logger.info(`[response received containing ${length} ${units}]`);
412
+ logger.verbose(`${this.shortenContent(parsedResponse)}`);
413
+ } else {
414
+ logger.info(`[response received containing object]`);
415
+ logger.verbose(`${JSON.stringify(parsedResponse)}`);
416
+ }
417
+ }
369
418
 
370
419
  prompt &&
371
420
  prompt.debugInfo &&
@@ -565,11 +565,12 @@ class ModelPlugin {
565
565
  return parsedData;
566
566
  } catch (error) {
567
567
  // Log the error and continue
568
- logger.error(`Error in executeRequest for ${this.pathwayName}: ${error.message || error}`);
568
+ const errorMessage = `${error?.response?.data?.message || error?.response?.data?.error?.message || error?.message || error}`;
569
+ logger.error(`Error in executeRequest for ${this.pathwayName}: ${errorMessage}`);
569
570
  if (error.data) {
570
571
  logger.error(`Additional error data: ${JSON.stringify(error.data)}`);
571
572
  }
572
- throw new Error(`Execution failed for ${this.pathwayName}: ${error.message || error}`);
573
+ throw new Error(`Execution failed for ${this.pathwayName}: ${errorMessage}`);
573
574
  }
574
575
  }
575
576
 
@@ -11,7 +11,7 @@ class OpenAIReasoningPlugin extends OpenAIChatPlugin {
11
11
  role: message.role,
12
12
  content: this.parseContent(message.content)
13
13
  });
14
- }else if(message.role === 'system') {
14
+ } else if (message.role === 'system') {
15
15
  // System messages to developer: https://platform.openai.com/docs/guides/text-generation#messages-and-roles
16
16
  newMessages.push({
17
17
  role: "developer",
@@ -20,8 +20,8 @@ class OpenAIReasoningPlugin extends OpenAIChatPlugin {
20
20
  }
21
21
  }
22
22
 
23
- messages.length = 0;
24
- messages.push(...newMessages);
23
+ // Replace the contents of the original array with the new messages
24
+ messages.splice(0, messages.length, ...newMessages);
25
25
  }
26
26
 
27
27
  parseContent(content) {