@aj-archipelago/cortex 1.3.35 → 1.3.36
This diff represents the content of publicly available package versions that have been released to one of the supported registries. The information contained in this diff is provided for informational purposes only and reflects changes between package versions as they appear in their respective public registries.
- package/README.md +9 -9
- package/config/default.example.json +0 -20
- package/config.js +160 -6
- package/lib/pathwayTools.js +79 -1
- package/lib/requestExecutor.js +3 -1
- package/lib/util.js +7 -0
- package/package.json +1 -1
- package/pathways/basePathway.js +2 -0
- package/pathways/call_tools.js +379 -0
- package/pathways/system/entity/memory/shared/sys_memory_helpers.js +1 -1
- package/pathways/system/entity/memory/sys_search_memory.js +2 -2
- package/pathways/system/entity/sys_entity_agent.js +289 -0
- package/pathways/system/entity/sys_generator_memory.js +1 -1
- package/pathways/system/entity/sys_generator_results.js +1 -1
- package/pathways/system/entity/sys_get_entities.js +19 -0
- package/pathways/system/entity/tools/shared/sys_entity_tools.js +150 -0
- package/pathways/system/entity/tools/sys_tool_bing_search.js +147 -0
- package/pathways/system/entity/tools/sys_tool_callmodel.js +62 -0
- package/pathways/system/entity/tools/sys_tool_coding.js +53 -0
- package/pathways/system/entity/tools/sys_tool_codingagent.js +100 -0
- package/pathways/system/entity/tools/sys_tool_cognitive_search.js +231 -0
- package/pathways/system/entity/tools/sys_tool_image.js +57 -0
- package/pathways/system/entity/tools/sys_tool_readfile.js +119 -0
- package/pathways/system/entity/tools/sys_tool_reasoning.js +75 -0
- package/pathways/system/entity/tools/sys_tool_remember.js +59 -0
- package/pathways/vision.js +1 -1
- package/server/modelExecutor.js +4 -12
- package/server/pathwayResolver.js +53 -40
- package/server/plugins/azureBingPlugin.js +42 -4
- package/server/plugins/azureCognitivePlugin.js +40 -12
- package/server/plugins/claude3VertexPlugin.js +67 -18
- package/server/plugins/modelPlugin.js +3 -2
- package/server/plugins/openAiReasoningPlugin.js +3 -3
- package/server/plugins/openAiReasoningVisionPlugin.js +48 -0
- package/server/plugins/openAiVisionPlugin.js +192 -7
- package/tests/agentic.test.js +256 -0
- package/tests/call_tools.test.js +216 -0
- package/tests/claude3VertexToolConversion.test.js +78 -0
- package/tests/mocks.js +11 -3
- package/tests/multimodal_conversion.test.js +1 -1
- package/tests/openAiToolPlugin.test.js +242 -0
- package/pathways/test_palm_chat.js +0 -31
- package/server/plugins/palmChatPlugin.js +0 -233
- package/server/plugins/palmCodeCompletionPlugin.js +0 -45
- package/server/plugins/palmCompletionPlugin.js +0 -135
- package/tests/palmChatPlugin.test.js +0 -219
- package/tests/palmCompletionPlugin.test.js +0 -58
|
@@ -0,0 +1,62 @@
|
|
|
1
|
+
// sys_tool_callmodel.js
|
|
2
|
+
// Entity tool that calls a model to get a response
|
|
3
|
+
import { Prompt } from '../../../../server/prompt.js';
|
|
4
|
+
import logger from '../../../../lib/logger.js';
|
|
5
|
+
|
|
6
|
+
export default {
|
|
7
|
+
prompt: [
|
|
8
|
+
new Prompt({
|
|
9
|
+
messages: [
|
|
10
|
+
{ "role": "system", "content": "{{{systemPrompt}}}" },
|
|
11
|
+
{ "role": "user", "content": "{{{userPrompt}}}" }
|
|
12
|
+
]
|
|
13
|
+
}),
|
|
14
|
+
],
|
|
15
|
+
|
|
16
|
+
inputParameters: {
|
|
17
|
+
userPrompt: "",
|
|
18
|
+
systemPrompt: "",
|
|
19
|
+
model: "oai-gpt41"
|
|
20
|
+
},
|
|
21
|
+
|
|
22
|
+
toolDefinition: {
|
|
23
|
+
type: "function",
|
|
24
|
+
icon: "🤖",
|
|
25
|
+
function: {
|
|
26
|
+
name: "CallModel",
|
|
27
|
+
description: "Use when you need to call an AI model to get a response. This is typically used to perform some sort of LLM analysis (translate, summarize, ask questions about content, etc.), but can literally do anything you need. You can use this to call any model you have access to and perform any task.",
|
|
28
|
+
parameters: {
|
|
29
|
+
type: "object",
|
|
30
|
+
properties: {
|
|
31
|
+
systemPrompt: {
|
|
32
|
+
type: "string",
|
|
33
|
+
description: "The system prompt to send to the model to set up the context for what you want the model to do."
|
|
34
|
+
},
|
|
35
|
+
userPrompt: {
|
|
36
|
+
type: "string",
|
|
37
|
+
description: "The complete prompt to send as a user message to the model instructing the model to perform the task you need."
|
|
38
|
+
},
|
|
39
|
+
model: {
|
|
40
|
+
type: "string",
|
|
41
|
+
description: "The model to use. You currently have the following models available to call: oai-gpt4o, oai-gpt41, oai-o3, oai-o3-mini, claude-35-sonnet-vertex, gemini-flash-20-vision, gemini-pro-25-vision."
|
|
42
|
+
},
|
|
43
|
+
userMessage: {
|
|
44
|
+
type: "string",
|
|
45
|
+
description: "A user-friendly message that describes what you're doing with this tool"
|
|
46
|
+
}
|
|
47
|
+
},
|
|
48
|
+
required: ["systemPrompt", "userPrompt", "model", "userMessage"]
|
|
49
|
+
}
|
|
50
|
+
}
|
|
51
|
+
},
|
|
52
|
+
|
|
53
|
+
executePathway: async ({args, runAllPrompts, resolver}) => {
|
|
54
|
+
try {
|
|
55
|
+
const result = await runAllPrompts({ ...args });
|
|
56
|
+
return result;
|
|
57
|
+
} catch (error) {
|
|
58
|
+
logger.error(error);
|
|
59
|
+
return "Error calling model: " + error.message;
|
|
60
|
+
}
|
|
61
|
+
}
|
|
62
|
+
}
|
|
@@ -0,0 +1,53 @@
|
|
|
1
|
+
// sys_tool_coding.js
|
|
2
|
+
// Entity tool that provides advanced coding and programming capabilities
|
|
3
|
+
|
|
4
|
+
import { Prompt } from '../../../../server/prompt.js';
|
|
5
|
+
|
|
6
|
+
export default {
|
|
7
|
+
prompt:
|
|
8
|
+
[
|
|
9
|
+
new Prompt({ messages: [
|
|
10
|
+
{"role": "system", "content": `You are the part of an AI entity named {{aiName}} that provides advanced coding and programming capabilities. You excel at writing, reviewing, and explaining code across various programming languages. You can help with code generation, debugging, optimization, and best practices. Think carefully about the latest request and provide a detailed, well thought out, carefully reviewed response.\n{{renderTemplate AI_DATETIME}}`},
|
|
11
|
+
"{{chatHistory}}",
|
|
12
|
+
]}),
|
|
13
|
+
],
|
|
14
|
+
inputParameters: {
|
|
15
|
+
chatHistory: [{role: '', content: []}],
|
|
16
|
+
contextId: ``,
|
|
17
|
+
aiName: "Jarvis",
|
|
18
|
+
language: "English",
|
|
19
|
+
},
|
|
20
|
+
max_tokens: 100000,
|
|
21
|
+
model: 'oai-o3',
|
|
22
|
+
useInputChunking: false,
|
|
23
|
+
enableDuplicateRequests: false,
|
|
24
|
+
timeout: 600,
|
|
25
|
+
toolDefinition: [{
|
|
26
|
+
type: "function",
|
|
27
|
+
icon: "💻",
|
|
28
|
+
function: {
|
|
29
|
+
name: "Code",
|
|
30
|
+
description: "Engage for any programming-related tasks, including writing, modifying, reviewing, or explaining code.",
|
|
31
|
+
parameters: {
|
|
32
|
+
type: "object",
|
|
33
|
+
properties: {
|
|
34
|
+
detailedInstructions: {
|
|
35
|
+
type: "string",
|
|
36
|
+
description: "Detailed instructions about what you need the tool to do"
|
|
37
|
+
},
|
|
38
|
+
userMessage: {
|
|
39
|
+
type: "string",
|
|
40
|
+
description: "A user-friendly message that describes what you're doing with this tool"
|
|
41
|
+
}
|
|
42
|
+
},
|
|
43
|
+
required: ["detailedInstructions", "userMessage"]
|
|
44
|
+
}
|
|
45
|
+
}
|
|
46
|
+
}],
|
|
47
|
+
|
|
48
|
+
executePathway: async ({args, runAllPrompts, resolver}) => {
|
|
49
|
+
let result = await runAllPrompts({ ...args, stream: false });
|
|
50
|
+
resolver.tool = JSON.stringify({ toolUsed: "coding" });
|
|
51
|
+
return result;
|
|
52
|
+
}
|
|
53
|
+
}
|
|
@@ -0,0 +1,100 @@
|
|
|
1
|
+
// sys_tool_codingagent.js
|
|
2
|
+
// Entity tool that provides code execution capabilities through a queue-based system
|
|
3
|
+
|
|
4
|
+
import { QueueServiceClient } from '@azure/storage-queue';
|
|
5
|
+
import logger from '../../../../lib/logger.js';
|
|
6
|
+
|
|
7
|
+
const connectionString = process.env.AZURE_STORAGE_CONNECTION_STRING;
|
|
8
|
+
let queueClient;
|
|
9
|
+
|
|
10
|
+
if (connectionString) {
|
|
11
|
+
const queueName = process.env.AUTOGEN_MESSAGE_QUEUE || "autogen-message-queue";
|
|
12
|
+
const queueClientService = QueueServiceClient.fromConnectionString(connectionString);
|
|
13
|
+
queueClient = queueClientService.getQueueClient(queueName);
|
|
14
|
+
} else {
|
|
15
|
+
logger.warn("Azure Storage connection string is not provided. Queue operations will be unavailable.");
|
|
16
|
+
}
|
|
17
|
+
|
|
18
|
+
async function sendMessageToQueue(data) {
|
|
19
|
+
try {
|
|
20
|
+
if(!queueClient){
|
|
21
|
+
logger.warn("Azure Storage connection string is not provided. Queue operations will be unavailable.");
|
|
22
|
+
return;
|
|
23
|
+
}
|
|
24
|
+
const encodedMessage = Buffer.from(JSON.stringify(data)).toString('base64');
|
|
25
|
+
const result = await queueClient.sendMessage(encodedMessage);
|
|
26
|
+
logger.info(`Message added to queue: ${JSON.stringify(result)}`);
|
|
27
|
+
return result.messageId;
|
|
28
|
+
} catch (error) {
|
|
29
|
+
logger.error("Error sending message:", error);
|
|
30
|
+
throw error;
|
|
31
|
+
}
|
|
32
|
+
}
|
|
33
|
+
|
|
34
|
+
export default {
|
|
35
|
+
inputParameters: {
|
|
36
|
+
chatHistory: [{role: '', content: []}],
|
|
37
|
+
contextId: ``,
|
|
38
|
+
aiName: "Jarvis",
|
|
39
|
+
language: "English",
|
|
40
|
+
},
|
|
41
|
+
max_tokens: 100000,
|
|
42
|
+
model: 'oai-gpt41',
|
|
43
|
+
useInputChunking: false,
|
|
44
|
+
enableDuplicateRequests: false,
|
|
45
|
+
timeout: 600,
|
|
46
|
+
toolDefinition: [{
|
|
47
|
+
type: "function",
|
|
48
|
+
icon: "🤖",
|
|
49
|
+
function: {
|
|
50
|
+
name: "CodeExecution",
|
|
51
|
+
description: "Use when explicitly asked to run or execute code, or when a coding agent is needed to perform specific tasks.",
|
|
52
|
+
parameters: {
|
|
53
|
+
type: "object",
|
|
54
|
+
properties: {
|
|
55
|
+
codingTask: {
|
|
56
|
+
type: "string",
|
|
57
|
+
description: "Detailed task description for the coding agent. Include all necessary information as this is the only message the coding agent receives. Let the agent decide how to solve it without making assumptions about its capabilities."
|
|
58
|
+
},
|
|
59
|
+
userMessage: {
|
|
60
|
+
type: "string",
|
|
61
|
+
description: "A user-friendly message to notify the user that a coding task is being handled"
|
|
62
|
+
},
|
|
63
|
+
codingTaskKeywords: {
|
|
64
|
+
type: "string",
|
|
65
|
+
description: "Keywords for Azure Cognitive Search to help the coding agent find relevant code snippets"
|
|
66
|
+
}
|
|
67
|
+
},
|
|
68
|
+
required: ["codingTask", "userMessage", "codingTaskKeywords"]
|
|
69
|
+
}
|
|
70
|
+
}
|
|
71
|
+
}],
|
|
72
|
+
|
|
73
|
+
executePathway: async ({args, resolver}) => {
|
|
74
|
+
try {
|
|
75
|
+
const { codingTask, userMessage, codingTaskKeywords } = args;
|
|
76
|
+
const { contextId } = args;
|
|
77
|
+
|
|
78
|
+
// Send the task to the queue
|
|
79
|
+
const codeRequestId = await sendMessageToQueue({
|
|
80
|
+
message: codingTask,
|
|
81
|
+
contextId,
|
|
82
|
+
keywords: codingTaskKeywords
|
|
83
|
+
});
|
|
84
|
+
|
|
85
|
+
// Set the tool response
|
|
86
|
+
resolver.tool = JSON.stringify({
|
|
87
|
+
toolUsed: "coding",
|
|
88
|
+
codeRequestId,
|
|
89
|
+
toolCallbackName: "coding",
|
|
90
|
+
toolCallbackId: codeRequestId,
|
|
91
|
+
toolCallbackMessage: userMessage
|
|
92
|
+
});
|
|
93
|
+
|
|
94
|
+
return userMessage || "I've started working on your coding task. I'll let you know when it's complete.";
|
|
95
|
+
} catch (error) {
|
|
96
|
+
logger.error("Error in coding agent tool:", error);
|
|
97
|
+
throw error;
|
|
98
|
+
}
|
|
99
|
+
}
|
|
100
|
+
};
|
|
@@ -0,0 +1,231 @@
|
|
|
1
|
+
// sys_tool_cognitive_search.js
|
|
2
|
+
// Tool pathway that handles cognitive search across various indexes
|
|
3
|
+
import { callPathway } from '../../../../lib/pathwayTools.js';
|
|
4
|
+
import { Prompt } from '../../../../server/prompt.js';
|
|
5
|
+
import logger from '../../../../lib/logger.js';
|
|
6
|
+
import { getSearchResultId } from '../../../../lib/util.js';
|
|
7
|
+
|
|
8
|
+
export default {
|
|
9
|
+
prompt: [],
|
|
10
|
+
useInputChunking: false,
|
|
11
|
+
enableDuplicateRequests: false,
|
|
12
|
+
inputParameters: {
|
|
13
|
+
text: '',
|
|
14
|
+
filter: '',
|
|
15
|
+
top: 50,
|
|
16
|
+
titleOnly: false,
|
|
17
|
+
stream: false,
|
|
18
|
+
indexName: ''
|
|
19
|
+
},
|
|
20
|
+
timeout: 300,
|
|
21
|
+
toolDefinition: [
|
|
22
|
+
{
|
|
23
|
+
type: "function",
|
|
24
|
+
icon: "📂",
|
|
25
|
+
function: {
|
|
26
|
+
name: "SearchPersonal",
|
|
27
|
+
description: "Search through the user's personal documents and uploaded files. Use this for finding information in user-provided content.",
|
|
28
|
+
parameters: {
|
|
29
|
+
type: "object",
|
|
30
|
+
properties: {
|
|
31
|
+
text: {
|
|
32
|
+
type: "string",
|
|
33
|
+
description: "The search query to find relevant content in personal documents. Can be a specific phrase or '*' for all documents."
|
|
34
|
+
},
|
|
35
|
+
filter: {
|
|
36
|
+
type: "string",
|
|
37
|
+
description: "Optional OData filter expression for date filtering (e.g. 'date ge 2024-02-22T00:00:00Z')"
|
|
38
|
+
},
|
|
39
|
+
top: {
|
|
40
|
+
type: "integer",
|
|
41
|
+
description: "Number of results to return (default is 50)"
|
|
42
|
+
},
|
|
43
|
+
titleOnly: {
|
|
44
|
+
type: "boolean",
|
|
45
|
+
description: "If true, only return document titles without content"
|
|
46
|
+
},
|
|
47
|
+
userMessage: {
|
|
48
|
+
type: "string",
|
|
49
|
+
description: "A user-friendly message that describes what you're doing with this tool"
|
|
50
|
+
}
|
|
51
|
+
},
|
|
52
|
+
required: ["text", "userMessage"]
|
|
53
|
+
}
|
|
54
|
+
}
|
|
55
|
+
},
|
|
56
|
+
{
|
|
57
|
+
type: "function",
|
|
58
|
+
function: {
|
|
59
|
+
name: "SearchAJA",
|
|
60
|
+
icon: "📰",
|
|
61
|
+
description: "Search through Al Jazeera Arabic news articles. Use this for finding Arabic news content.",
|
|
62
|
+
parameters: {
|
|
63
|
+
type: "object",
|
|
64
|
+
properties: {
|
|
65
|
+
text: {
|
|
66
|
+
type: "string",
|
|
67
|
+
description: "The search query in Arabic to find relevant news articles. Can be a specific phrase or '*' for all articles."
|
|
68
|
+
},
|
|
69
|
+
filter: {
|
|
70
|
+
type: "string",
|
|
71
|
+
description: "Optional OData filter expression for date filtering (e.g. 'date ge 2024-02-22T00:00:00Z')"
|
|
72
|
+
},
|
|
73
|
+
top: {
|
|
74
|
+
type: "integer",
|
|
75
|
+
description: "Number of results to return (default is 50)"
|
|
76
|
+
},
|
|
77
|
+
titleOnly: {
|
|
78
|
+
type: "boolean",
|
|
79
|
+
description: "If true, only return article titles without content"
|
|
80
|
+
},
|
|
81
|
+
userMessage: {
|
|
82
|
+
type: "string",
|
|
83
|
+
description: "A user-friendly message that describes what you're doing with this tool"
|
|
84
|
+
}
|
|
85
|
+
},
|
|
86
|
+
required: ["text", "userMessage"]
|
|
87
|
+
}
|
|
88
|
+
}
|
|
89
|
+
},
|
|
90
|
+
{
|
|
91
|
+
type: "function",
|
|
92
|
+
icon: "📰",
|
|
93
|
+
function: {
|
|
94
|
+
name: "SearchAJE",
|
|
95
|
+
description: "Search through Al Jazeera English news articles. Use this for finding English news content.",
|
|
96
|
+
parameters: {
|
|
97
|
+
type: "object",
|
|
98
|
+
properties: {
|
|
99
|
+
text: {
|
|
100
|
+
type: "string",
|
|
101
|
+
description: "The search query in English to find relevant news articles. Can be a specific phrase or '*' for all articles."
|
|
102
|
+
},
|
|
103
|
+
filter: {
|
|
104
|
+
type: "string",
|
|
105
|
+
description: "Optional OData filter expression for date filtering (e.g. 'date ge 2024-02-22T00:00:00Z')"
|
|
106
|
+
},
|
|
107
|
+
top: {
|
|
108
|
+
type: "integer",
|
|
109
|
+
description: "Number of results to return (default is 50)"
|
|
110
|
+
},
|
|
111
|
+
titleOnly: {
|
|
112
|
+
type: "boolean",
|
|
113
|
+
description: "If true, only return article titles without content"
|
|
114
|
+
},
|
|
115
|
+
userMessage: {
|
|
116
|
+
type: "string",
|
|
117
|
+
description: "A user-friendly message that describes what you're doing with this tool"
|
|
118
|
+
}
|
|
119
|
+
},
|
|
120
|
+
required: ["text", "userMessage"]
|
|
121
|
+
}
|
|
122
|
+
}
|
|
123
|
+
},
|
|
124
|
+
{
|
|
125
|
+
type: "function",
|
|
126
|
+
icon: "⚡️",
|
|
127
|
+
function: {
|
|
128
|
+
name: "SearchWires",
|
|
129
|
+
description: "Search through news wires from all sources. Use this for finding the latest news and articles.",
|
|
130
|
+
parameters: {
|
|
131
|
+
type: "object",
|
|
132
|
+
properties: {
|
|
133
|
+
text: {
|
|
134
|
+
type: "string",
|
|
135
|
+
description: "The search query to find relevant news wires. Can be a specific phrase or '*' for all wires."
|
|
136
|
+
},
|
|
137
|
+
filter: {
|
|
138
|
+
type: "string",
|
|
139
|
+
description: "Optional OData filter expression for date filtering (e.g. 'date ge 2024-02-22T00:00:00Z')"
|
|
140
|
+
},
|
|
141
|
+
top: {
|
|
142
|
+
type: "integer",
|
|
143
|
+
description: "Number of results to return (default is 50)"
|
|
144
|
+
},
|
|
145
|
+
titleOnly: {
|
|
146
|
+
type: "boolean",
|
|
147
|
+
description: "If true, only return wire titles without content"
|
|
148
|
+
},
|
|
149
|
+
userMessage: {
|
|
150
|
+
type: "string",
|
|
151
|
+
description: "A user-friendly message that describes what you're doing with this tool"
|
|
152
|
+
}
|
|
153
|
+
},
|
|
154
|
+
required: ["text", "userMessage"]
|
|
155
|
+
}
|
|
156
|
+
}
|
|
157
|
+
}
|
|
158
|
+
],
|
|
159
|
+
|
|
160
|
+
executePathway: async ({args, runAllPrompts, resolver}) => {
|
|
161
|
+
const { text, filter, top, titleOnly, stream, dataSources, indexName, semanticConfiguration } = args;
|
|
162
|
+
|
|
163
|
+
// Map tool names to index names
|
|
164
|
+
const toolToIndex = {
|
|
165
|
+
'searchpersonal': 'indexcortex',
|
|
166
|
+
'searchaja': 'indexucmsaja',
|
|
167
|
+
'searchaje': 'indexucmsaje',
|
|
168
|
+
'searchwires': 'indexwires'
|
|
169
|
+
};
|
|
170
|
+
|
|
171
|
+
// Helper function to remove vector fields from search results
|
|
172
|
+
const removeVectorFields = (result) => {
|
|
173
|
+
const { text_vector, image_vector, ...cleanResult } = result;
|
|
174
|
+
return cleanResult;
|
|
175
|
+
};
|
|
176
|
+
|
|
177
|
+
// Get the tool name from the function call
|
|
178
|
+
const toolName = args.toolFunction;
|
|
179
|
+
const toolIndexName = indexName || toolToIndex[toolName];
|
|
180
|
+
|
|
181
|
+
if (!toolName || !toolIndexName) {
|
|
182
|
+
throw new Error(`Invalid tool name: ${toolName}. Search not allowed.`);
|
|
183
|
+
}
|
|
184
|
+
|
|
185
|
+
try {
|
|
186
|
+
// Call the cognitive search pathway
|
|
187
|
+
const response = await callPathway('cognitive_search', {
|
|
188
|
+
...args,
|
|
189
|
+
text,
|
|
190
|
+
filter,
|
|
191
|
+
top: top || 50,
|
|
192
|
+
titleOnly: titleOnly || false,
|
|
193
|
+
indexName: toolIndexName,
|
|
194
|
+
semanticConfiguration,
|
|
195
|
+
stream: stream || false
|
|
196
|
+
});
|
|
197
|
+
|
|
198
|
+
const parsedResponse = JSON.parse(response);
|
|
199
|
+
|
|
200
|
+
const combinedResults = [];
|
|
201
|
+
|
|
202
|
+
if (parsedResponse.value && Array.isArray(parsedResponse.value)) {
|
|
203
|
+
// Filter out vector fields from each result before adding to combinedResults
|
|
204
|
+
combinedResults.push(...parsedResponse.value.map(result => ({
|
|
205
|
+
...removeVectorFields(result),
|
|
206
|
+
searchResultId: getSearchResultId()
|
|
207
|
+
})));
|
|
208
|
+
}
|
|
209
|
+
// Extract semantic answers
|
|
210
|
+
const answers = parsedResponse["@search.answers"];
|
|
211
|
+
if (answers && Array.isArray(answers)) {
|
|
212
|
+
const formattedAnswers = answers.map(ans => ({
|
|
213
|
+
// Create a pseudo-document structure for answers
|
|
214
|
+
searchResultId: getSearchResultId(),
|
|
215
|
+
title: "", // no title for answers
|
|
216
|
+
content: ans.text || "", // Use text as content
|
|
217
|
+
key: ans.key, // Keep the key if needed later
|
|
218
|
+
score: ans.score, // Keep score
|
|
219
|
+
source_type: 'answer' // Add a type identifier
|
|
220
|
+
// url: null - Answers don't have URLs
|
|
221
|
+
}));
|
|
222
|
+
combinedResults.push(...formattedAnswers);
|
|
223
|
+
}
|
|
224
|
+
|
|
225
|
+
return JSON.stringify({ _type: "SearchResponse", value: combinedResults });
|
|
226
|
+
} catch (e) {
|
|
227
|
+
logger.error(`Error in cognitive search for index ${indexName}: ${e}`);
|
|
228
|
+
throw e;
|
|
229
|
+
}
|
|
230
|
+
}
|
|
231
|
+
};
|
|
@@ -0,0 +1,57 @@
|
|
|
1
|
+
// sys_tool_image.js
|
|
2
|
+
// Entity tool that creates images for the entity to show to the user
|
|
3
|
+
import { callPathway } from '../../../../lib/pathwayTools.js';
|
|
4
|
+
|
|
5
|
+
export default {
|
|
6
|
+
prompt: [],
|
|
7
|
+
useInputChunking: false,
|
|
8
|
+
enableDuplicateRequests: false,
|
|
9
|
+
inputParameters: {
|
|
10
|
+
model: 'oai-gpt4o',
|
|
11
|
+
},
|
|
12
|
+
timeout: 300,
|
|
13
|
+
toolDefinition: {
|
|
14
|
+
type: "function",
|
|
15
|
+
icon: "🎨",
|
|
16
|
+
function: {
|
|
17
|
+
name: "Image",
|
|
18
|
+
description: "Use when asked to create, generate, or revise visual content. Any time the user asks you for a picture, a selfie, artwork, a drawing or if you want to illustrate something for the user, you can use this tool to generate any sort of image from cartoon to photo realistic.",
|
|
19
|
+
parameters: {
|
|
20
|
+
type: "object",
|
|
21
|
+
properties: {
|
|
22
|
+
detailedInstructions: {
|
|
23
|
+
type: "string",
|
|
24
|
+
description: "A very detailed prompt describing the image you want to create. You should be very specific - explaining subject matter, style, and details about the image including things like camera angle, lens types, lighting, photographic techniques, etc. Any details you can provide to the image creation engine will help it create the most accurate and useful images. The more detailed and descriptive the prompt, the better the result."
|
|
25
|
+
},
|
|
26
|
+
renderText: {
|
|
27
|
+
type: "boolean",
|
|
28
|
+
description: "Set to true if the image should be optimized to show correct text. This is useful when the user asks for a picture of something that includes specific text as it invokes a different image generation model that is optimized for including text."
|
|
29
|
+
},
|
|
30
|
+
userMessage: {
|
|
31
|
+
type: "string",
|
|
32
|
+
description: "A user-friendly message that describes what you're doing with this tool"
|
|
33
|
+
}
|
|
34
|
+
},
|
|
35
|
+
required: ["detailedInstructions", "renderText", "userMessage"]
|
|
36
|
+
}
|
|
37
|
+
}
|
|
38
|
+
},
|
|
39
|
+
|
|
40
|
+
executePathway: async ({args, runAllPrompts, resolver}) => {
|
|
41
|
+
const pathwayResolver = resolver;
|
|
42
|
+
|
|
43
|
+
try {
|
|
44
|
+
let model = "replicate-flux-11-pro";
|
|
45
|
+
let prompt = args.detailedInstructions;
|
|
46
|
+
let numberResults = args.numberResults || 1;
|
|
47
|
+
let negativePrompt = args.negativePrompt || "";
|
|
48
|
+
|
|
49
|
+
pathwayResolver.tool = JSON.stringify({ toolUsed: "image" });
|
|
50
|
+
return await callPathway('image_flux', {...args, text: prompt, negativePrompt, numberResults, model, stream: false });
|
|
51
|
+
|
|
52
|
+
} catch (e) {
|
|
53
|
+
pathwayResolver.logError(e.message ?? e);
|
|
54
|
+
return await callPathway('sys_generator_error', { ...args, text: e.message }, pathwayResolver);
|
|
55
|
+
}
|
|
56
|
+
}
|
|
57
|
+
};
|
|
@@ -0,0 +1,119 @@
|
|
|
1
|
+
// sys_tool_readfile.js
|
|
2
|
+
// Entity tool that reads one or more files and answers questions about them
|
|
3
|
+
|
|
4
|
+
import { Prompt } from '../../../../server/prompt.js';
|
|
5
|
+
|
|
6
|
+
export default {
|
|
7
|
+
prompt:
|
|
8
|
+
[
|
|
9
|
+
new Prompt({ messages: [
|
|
10
|
+
{"role": "system", "content": `You are the part of an AI entity named {{aiName}} that can view, hear, and understand files of all sorts (images, videos, audio, pdfs, text, etc.) - you provide the capability to view and analyze files that the user provides.\nThe user has provided you with one or more files in this conversation - you should consider them for context when you respond.\nIf you don't see any files, something has gone wrong in the upload and you should inform the user and have them try again.\n{{renderTemplate AI_DATETIME}}`},
|
|
11
|
+
"{{chatHistory}}",
|
|
12
|
+
]}),
|
|
13
|
+
],
|
|
14
|
+
inputParameters: {
|
|
15
|
+
chatHistory: [{role: '', content: []}],
|
|
16
|
+
contextId: ``,
|
|
17
|
+
aiName: "Jarvis",
|
|
18
|
+
language: "English",
|
|
19
|
+
},
|
|
20
|
+
max_tokens: 8192,
|
|
21
|
+
model: 'gemini-flash-20-vision',
|
|
22
|
+
useInputChunking: false,
|
|
23
|
+
enableDuplicateRequests: false,
|
|
24
|
+
timeout: 600,
|
|
25
|
+
toolDefinition: [{
|
|
26
|
+
type: "function",
|
|
27
|
+
icon: "📄",
|
|
28
|
+
function: {
|
|
29
|
+
name: "PDF",
|
|
30
|
+
description: "Use specifically for analyzing and answering questions about PDF file content.",
|
|
31
|
+
parameters: {
|
|
32
|
+
type: "object",
|
|
33
|
+
properties: {
|
|
34
|
+
detailedInstructions: {
|
|
35
|
+
type: "string",
|
|
36
|
+
description: "Detailed instructions about what you need the tool to do - questions you need answered about the files, etc."
|
|
37
|
+
},
|
|
38
|
+
userMessage: {
|
|
39
|
+
type: "string",
|
|
40
|
+
description: "A user-friendly message that describes what you're doing with this tool"
|
|
41
|
+
}
|
|
42
|
+
},
|
|
43
|
+
required: ["detailedInstructions", "userMessage"]
|
|
44
|
+
}
|
|
45
|
+
}
|
|
46
|
+
},
|
|
47
|
+
{
|
|
48
|
+
type: "function",
|
|
49
|
+
icon: "📝",
|
|
50
|
+
function: {
|
|
51
|
+
name: "Text",
|
|
52
|
+
description: "Use specifically for analyzing and answering questions about text files.",
|
|
53
|
+
parameters: {
|
|
54
|
+
type: "object",
|
|
55
|
+
properties: {
|
|
56
|
+
detailedInstructions: {
|
|
57
|
+
type: "string",
|
|
58
|
+
description: "Detailed instructions about what you need the tool to do - questions you need answered about the files, etc."
|
|
59
|
+
},
|
|
60
|
+
userMessage: {
|
|
61
|
+
type: "string",
|
|
62
|
+
description: "A user-friendly message that describes what you're doing with this tool"
|
|
63
|
+
}
|
|
64
|
+
},
|
|
65
|
+
required: ["detailedInstructions", "userMessage"]
|
|
66
|
+
}
|
|
67
|
+
}
|
|
68
|
+
},
|
|
69
|
+
{
|
|
70
|
+
type: "function",
|
|
71
|
+
icon: "🖼️",
|
|
72
|
+
function: {
|
|
73
|
+
name: "Vision",
|
|
74
|
+
description: "Use specifically for analyzing and answering questions about image files (jpg, gif, bmp, png, etc).",
|
|
75
|
+
parameters: {
|
|
76
|
+
type: "object",
|
|
77
|
+
properties: {
|
|
78
|
+
detailedInstructions: {
|
|
79
|
+
type: "string",
|
|
80
|
+
description: "Detailed instructions about what you need the tool to do - questions you need answered about the files, etc."
|
|
81
|
+
},
|
|
82
|
+
userMessage: {
|
|
83
|
+
type: "string",
|
|
84
|
+
description: "A user-friendly message that describes what you're doing with this tool"
|
|
85
|
+
}
|
|
86
|
+
},
|
|
87
|
+
required: ["detailedInstructions", "userMessage"]
|
|
88
|
+
}
|
|
89
|
+
}
|
|
90
|
+
},
|
|
91
|
+
{
|
|
92
|
+
type: "function",
|
|
93
|
+
icon: "🎥",
|
|
94
|
+
function: {
|
|
95
|
+
name: "Video",
|
|
96
|
+
description: "Use specifically for analyzing and answering questions about video or audio file content. You MUST use this tool to look at video or audio files.",
|
|
97
|
+
parameters: {
|
|
98
|
+
type: "object",
|
|
99
|
+
properties: {
|
|
100
|
+
detailedInstructions: {
|
|
101
|
+
type: "string",
|
|
102
|
+
description: "Detailed instructions about what you need the tool to do - questions you need answered about the files, etc."
|
|
103
|
+
},
|
|
104
|
+
userMessage: {
|
|
105
|
+
type: "string",
|
|
106
|
+
description: "A user-friendly message that describes what you're doing with this tool"
|
|
107
|
+
}
|
|
108
|
+
},
|
|
109
|
+
required: ["detailedInstructions", "userMessage"]
|
|
110
|
+
}
|
|
111
|
+
}
|
|
112
|
+
}],
|
|
113
|
+
|
|
114
|
+
executePathway: async ({args, runAllPrompts, resolver}) => {
|
|
115
|
+
const result = await runAllPrompts({ ...args });
|
|
116
|
+
resolver.tool = JSON.stringify({ toolUsed: "vision" });
|
|
117
|
+
return result;
|
|
118
|
+
}
|
|
119
|
+
}
|