@aj-archipelago/cortex 1.2.1 → 1.3.1
This diff represents the content of publicly available package versions that have been released to one of the supported registries. The information contained in this diff is provided for informational purposes only and reflects changes between package versions as they appear in their respective public registries.
- package/config.js +38 -11
- package/helper-apps/cortex-autogen/OAI_CONFIG_LIST +2 -1
- package/helper-apps/cortex-autogen/agents.py +392 -0
- package/helper-apps/cortex-autogen/agents_extra.py +14 -0
- package/helper-apps/cortex-autogen/config.py +18 -0
- package/helper-apps/cortex-autogen/data_operations.py +29 -0
- package/helper-apps/cortex-autogen/function_app.py +6 -3
- package/helper-apps/cortex-autogen/main.py +4 -4
- package/helper-apps/cortex-autogen/prompts.py +196 -0
- package/helper-apps/cortex-autogen/prompts_extra.py +5 -0
- package/helper-apps/cortex-autogen/requirements.txt +2 -1
- package/helper-apps/cortex-autogen/search.py +83 -0
- package/helper-apps/cortex-autogen/test.sh +40 -0
- package/helper-apps/cortex-autogen/utils.py +78 -0
- package/lib/handleBars.js +25 -0
- package/lib/logger.js +2 -0
- package/lib/util.js +3 -1
- package/package.json +1 -1
- package/pathways/chat_code.js +1 -1
- package/pathways/chat_context.js +1 -1
- package/pathways/chat_jarvis.js +1 -1
- package/pathways/chat_persist.js +1 -1
- package/pathways/chat_title.js +25 -0
- package/pathways/image_recraft.js +1 -1
- package/pathways/rag.js +1 -1
- package/pathways/rag_jarvis.js +1 -1
- package/pathways/rag_search_helper.js +1 -1
- package/pathways/system/entity/memory/sys_memory_manager.js +71 -0
- package/pathways/system/entity/memory/sys_memory_required.js +21 -0
- package/pathways/system/entity/memory/sys_memory_update.js +196 -0
- package/pathways/system/entity/memory/sys_read_memory.js +37 -0
- package/pathways/system/entity/memory/sys_save_memory.js +60 -0
- package/pathways/system/entity/shared/sys_entity_constants.js +27 -0
- package/pathways/system/entity/sys_entity_continue.js +55 -0
- package/pathways/system/entity/sys_entity_start.js +239 -0
- package/pathways/system/entity/sys_generator_error.js +20 -0
- package/pathways/system/entity/sys_generator_expert.js +26 -0
- package/pathways/system/entity/sys_generator_image.js +127 -0
- package/pathways/system/entity/sys_generator_quick.js +19 -0
- package/pathways/system/entity/sys_generator_reasoning.js +27 -0
- package/pathways/system/entity/sys_generator_results.js +310 -0
- package/pathways/system/entity/sys_generator_video_vision.js +27 -0
- package/pathways/system/entity/sys_image_prompt_builder.js +35 -0
- package/pathways/system/entity/sys_query_builder.js +110 -0
- package/pathways/system/entity/sys_router_code.js +37 -0
- package/pathways/system/entity/sys_router_tool.js +67 -0
- package/pathways/{sys_claude_35_sonnet.js → system/rest_streaming/sys_claude_35_sonnet.js} +1 -1
- package/pathways/{sys_claude_3_haiku.js → system/rest_streaming/sys_claude_3_haiku.js} +1 -1
- package/pathways/{sys_google_chat.js → system/rest_streaming/sys_google_chat.js} +1 -1
- package/pathways/{sys_google_code_chat.js → system/rest_streaming/sys_google_code_chat.js} +1 -1
- package/pathways/{sys_google_gemini_chat.js → system/rest_streaming/sys_google_gemini_chat.js} +1 -1
- package/pathways/{sys_openai_chat.js → system/rest_streaming/sys_openai_chat.js} +1 -1
- package/pathways/{sys_openai_chat_16.js → system/rest_streaming/sys_openai_chat_16.js} +1 -1
- package/pathways/{sys_openai_chat_gpt4.js → system/rest_streaming/sys_openai_chat_gpt4.js} +1 -1
- package/pathways/{sys_openai_chat_gpt4_32.js → system/rest_streaming/sys_openai_chat_gpt4_32.js} +1 -1
- package/pathways/{sys_openai_chat_gpt4_turbo.js → system/rest_streaming/sys_openai_chat_gpt4_turbo.js} +1 -1
- package/pathways/{sys_parse_numbered_object_list.js → system/sys_parse_numbered_object_list.js} +2 -2
- package/pathways/{sys_repair_json.js → system/sys_repair_json.js} +1 -1
- package/pathways/{run_claude35_sonnet.js → system/workspaces/run_claude35_sonnet.js} +1 -1
- package/pathways/{run_claude3_haiku.js → system/workspaces/run_claude3_haiku.js} +1 -1
- package/pathways/{run_gpt35turbo.js → system/workspaces/run_gpt35turbo.js} +1 -1
- package/pathways/{run_gpt4.js → system/workspaces/run_gpt4.js} +1 -1
- package/pathways/{run_gpt4_32.js → system/workspaces/run_gpt4_32.js} +1 -1
- package/server/pathwayResolver.js +62 -10
- package/server/plugins/azureCognitivePlugin.js +14 -1
- package/server/plugins/azureVideoTranslatePlugin.js +1 -1
- package/server/plugins/claude3VertexPlugin.js +25 -15
- package/server/plugins/gemini15ChatPlugin.js +1 -1
- package/server/plugins/geminiChatPlugin.js +1 -1
- package/server/plugins/modelPlugin.js +10 -1
- package/server/plugins/openAiChatPlugin.js +4 -3
- package/server/plugins/openAiDallE3Plugin.js +12 -4
- package/server/plugins/openAiVisionPlugin.js +1 -2
- package/server/plugins/replicateApiPlugin.js +75 -17
- package/tests/multimodal_conversion.test.js +6 -8
- package/helper-apps/cortex-autogen/myautogen.py +0 -317
- package/helper-apps/cortex-autogen/prompt.txt +0 -0
- package/helper-apps/cortex-autogen/prompt_summary.txt +0 -37
- package/pathways/index.js +0 -154
- /package/pathways/{sys_openai_completion.js → system/rest_streaming/sys_openai_completion.js} +0 -0
|
@@ -0,0 +1,26 @@
|
|
|
1
|
+
import { Prompt } from '../../../server/prompt.js';
|
|
2
|
+
|
|
3
|
+
export default {
|
|
4
|
+
prompt:
|
|
5
|
+
[
|
|
6
|
+
new Prompt({ messages: [
|
|
7
|
+
{"role": "system", "content": `{{renderTemplate AI_COMMON_INSTRUCTIONS}}\n{{renderTemplate AI_EXPERTISE}}\n{{renderTemplate AI_DIRECTIVES}}`},
|
|
8
|
+
"{{chatHistory}}",
|
|
9
|
+
]}),
|
|
10
|
+
],
|
|
11
|
+
inputParameters: {
|
|
12
|
+
chatHistory: [{role: '', content: []}],
|
|
13
|
+
contextId: ``,
|
|
14
|
+
aiName: "Jarvis",
|
|
15
|
+
language: "English",
|
|
16
|
+
},
|
|
17
|
+
model: 'oai-gpt4o',
|
|
18
|
+
useInputChunking: false,
|
|
19
|
+
enableDuplicateRequests: false,
|
|
20
|
+
timeout: 600,
|
|
21
|
+
executePathway: async ({args, runAllPrompts, resolver}) => {
|
|
22
|
+
const result = await runAllPrompts({ ...args });
|
|
23
|
+
resolver.tool = JSON.stringify({ toolUsed: "writing" });
|
|
24
|
+
return result;
|
|
25
|
+
}
|
|
26
|
+
}
|
|
@@ -0,0 +1,127 @@
|
|
|
1
|
+
// sys_generator_image.js
|
|
2
|
+
// Entity module that creates and shows images to the user
|
|
3
|
+
import { callPathway } from '../../../lib/pathwayTools.js';
|
|
4
|
+
import { Prompt } from '../../../server/prompt.js';
|
|
5
|
+
import logger from '../../../lib/logger.js';
|
|
6
|
+
import { getUniqueId } from '../../../lib/util.js';
|
|
7
|
+
|
|
8
|
+
const TOKEN_RATIO = 1.0;
|
|
9
|
+
|
|
10
|
+
export default {
|
|
11
|
+
prompt: [],
|
|
12
|
+
useInputChunking: false,
|
|
13
|
+
enableDuplicateRequests: false,
|
|
14
|
+
inputParameters: {
|
|
15
|
+
privateData: false,
|
|
16
|
+
useMemory: true,
|
|
17
|
+
chatHistory: [{role: '', content: []}],
|
|
18
|
+
aiName: "Jarvis",
|
|
19
|
+
contextId: ``,
|
|
20
|
+
indexName: ``,
|
|
21
|
+
semanticConfiguration: ``,
|
|
22
|
+
roleInformation: ``,
|
|
23
|
+
calculateEmbeddings: false,
|
|
24
|
+
language: "English",
|
|
25
|
+
chatId: ``,
|
|
26
|
+
model: 'oai-gpt4o',
|
|
27
|
+
},
|
|
28
|
+
timeout: 300,
|
|
29
|
+
tokenRatio: TOKEN_RATIO,
|
|
30
|
+
|
|
31
|
+
executePathway: async ({args, runAllPrompts, resolver}) => {
|
|
32
|
+
|
|
33
|
+
const { chatHistory } = args;
|
|
34
|
+
|
|
35
|
+
let pathwayResolver = resolver;
|
|
36
|
+
|
|
37
|
+
const useMemory = args.useMemory || pathwayResolver.pathway.inputParameters.useMemory;
|
|
38
|
+
|
|
39
|
+
pathwayResolver.pathwayPrompt =
|
|
40
|
+
[
|
|
41
|
+
new Prompt({ messages: [
|
|
42
|
+
{
|
|
43
|
+
"role": "system",
|
|
44
|
+
"content": `{{renderTemplate AI_COMMON_INSTRUCTIONS}}
|
|
45
|
+
|
|
46
|
+
{{renderTemplate AI_DIRECTIVES}}
|
|
47
|
+
|
|
48
|
+
Instructions: As part of a conversation with the user, you have been asked to create one or more images, photos, pictures, selfies, drawings, or other visual content for the user. You have already written the prompts and created the images - links to them are in the most recent tool calls in the chat history. You should display the images in a way that is most pleasing to the user. You can use markdown or HTML and img tags to display and format the images - the UI will render either. If there are no tool results, it means you didn't successfully create any images - in that case, don't show any images and tell the user you weren't able to create images.`
|
|
49
|
+
},
|
|
50
|
+
"{{chatHistory}}",
|
|
51
|
+
]}),
|
|
52
|
+
];
|
|
53
|
+
|
|
54
|
+
// function to add tool_calls to the chatHistory
|
|
55
|
+
const addToolCalls= (chatHistory, imagePrompt, toolCallId) => {
|
|
56
|
+
const toolCall = {
|
|
57
|
+
"role": "assistant",
|
|
58
|
+
"tool_calls": [
|
|
59
|
+
{
|
|
60
|
+
"id": toolCallId,
|
|
61
|
+
"type": "function",
|
|
62
|
+
"function": {
|
|
63
|
+
"arguments": JSON.stringify(imagePrompt),
|
|
64
|
+
"name": "generate_image"
|
|
65
|
+
}
|
|
66
|
+
}
|
|
67
|
+
]
|
|
68
|
+
};
|
|
69
|
+
chatHistory.push(toolCall);
|
|
70
|
+
return chatHistory;
|
|
71
|
+
}
|
|
72
|
+
|
|
73
|
+
// function to add tool_results to the chatHistory
|
|
74
|
+
const addToolResults = (chatHistory, imageResults, toolCallId) => {
|
|
75
|
+
const toolResult = {
|
|
76
|
+
"role": "tool",
|
|
77
|
+
"content": imageResults,
|
|
78
|
+
"tool_call_id": toolCallId
|
|
79
|
+
};
|
|
80
|
+
chatHistory.push(toolResult);
|
|
81
|
+
return chatHistory;
|
|
82
|
+
}
|
|
83
|
+
|
|
84
|
+
try {
|
|
85
|
+
|
|
86
|
+
// figure out what the user wants us to do
|
|
87
|
+
const contextInfo = chatHistory.filter(message => message.role === "user").slice(0, -1).map(message => message.content).join("\n");
|
|
88
|
+
|
|
89
|
+
const helper = await callPathway('sys_image_prompt_builder', { ...args, stream: false, useMemory, contextInfo });
|
|
90
|
+
logger.debug(`Image prompt builder response: ${helper}`);
|
|
91
|
+
const parsedHelper = JSON.parse(helper);
|
|
92
|
+
|
|
93
|
+
//parsedHelper should always be an array of objects, but in case it's a single object, we'll wrap it in an array
|
|
94
|
+
const imagePrompts = Array.isArray(parsedHelper) ? parsedHelper : [parsedHelper];
|
|
95
|
+
|
|
96
|
+
//for each image prompt, create the images
|
|
97
|
+
const imageResults = await Promise.all(imagePrompts.map(async (imagePrompt) => {
|
|
98
|
+
const { prompt, numberResults, negativePrompt, renderText, draft } = imagePrompt;
|
|
99
|
+
if (!prompt) return null;
|
|
100
|
+
|
|
101
|
+
let model = "replicate-flux-11-pro";
|
|
102
|
+
if (numberResults > 1 || draft) {
|
|
103
|
+
model = "replicate-flux-1-schnell";
|
|
104
|
+
}
|
|
105
|
+
if (renderText) {
|
|
106
|
+
return await callPathway('image_recraft', {...args, text: prompt, stream: false });
|
|
107
|
+
} else {
|
|
108
|
+
return await callPathway('image_flux', {...args, text: prompt, negativePrompt, numberResults, model, stream: false });
|
|
109
|
+
}
|
|
110
|
+
})).then(results => results.filter(r => r !== null));
|
|
111
|
+
|
|
112
|
+
// add the tool_calls and tool_results to the chatHistory
|
|
113
|
+
imageResults.forEach((imageResult, index) => {
|
|
114
|
+
const toolCallId = getUniqueId();
|
|
115
|
+
addToolCalls(chatHistory, imagePrompts[index], toolCallId);
|
|
116
|
+
addToolResults(chatHistory, imageResult, toolCallId);
|
|
117
|
+
});
|
|
118
|
+
|
|
119
|
+
const result = await runAllPrompts({ ...args });
|
|
120
|
+
pathwayResolver.tool = JSON.stringify({ toolUsed: "image" });
|
|
121
|
+
return result;
|
|
122
|
+
} catch (e) {
|
|
123
|
+
pathwayResolver.logError(e.message ?? e);
|
|
124
|
+
return await callPathway('sys_generator_error', { ...args, text: e.message }, pathwayResolver);
|
|
125
|
+
}
|
|
126
|
+
}
|
|
127
|
+
};
|
|
@@ -0,0 +1,19 @@
|
|
|
1
|
+
import { Prompt } from '../../../server/prompt.js';
|
|
2
|
+
export default {
|
|
3
|
+
prompt:
|
|
4
|
+
[
|
|
5
|
+
new Prompt({ messages: [
|
|
6
|
+
{"role": "system", "content": `{{renderTemplate AI_MEMORY}}\n\n{{renderTemplate AI_COMMON_INSTRUCTIONS}}\nThe UI also has dedicated tabs to help with document translation (translate), article writing assistance including generating headlines, summaries and doing copy editing (write), video and audio transcription (transcribe), and programming and writing code (code). If the user asks about something related to a dedicated tab, you will tell them that the tab exists and the interface will give the user the option to swap to that tab.\n{{renderTemplate AI_EXPERTISE}}\nYou have those capabilities but you have already decided it is not necessary to do any of those things to respond in this turn of the conversation.\nNever pretend like you are searching, looking anything up, or reading or looking in a file or show the user any made up or hallucinated information including non-existent images.\n{{renderTemplate AI_MEMORY_INSTRUCTIONS}}`},
|
|
7
|
+
"{{chatHistory}}",
|
|
8
|
+
]}),
|
|
9
|
+
],
|
|
10
|
+
inputParameters: {
|
|
11
|
+
chatHistory: [{role: '', content: []}],
|
|
12
|
+
contextId: ``,
|
|
13
|
+
aiName: "Jarvis",
|
|
14
|
+
language: "English",
|
|
15
|
+
model: "oai-gpt4o",
|
|
16
|
+
},
|
|
17
|
+
useInputChunking: false,
|
|
18
|
+
enableDuplicateRequests: false
|
|
19
|
+
}
|
|
@@ -0,0 +1,27 @@
|
|
|
1
|
+
import { Prompt } from '../../../server/prompt.js';
|
|
2
|
+
|
|
3
|
+
export default {
|
|
4
|
+
prompt:
|
|
5
|
+
[
|
|
6
|
+
new Prompt({ messages: [
|
|
7
|
+
{"role": "system", "content": `{{renderTemplate AI_COMMON_INSTRUCTIONS}}\n{{renderTemplate AI_EXPERTISE}}\n{{renderTemplate AI_DIRECTIVES}}\nUse all of the information in your memory and the chat history to reason about the user's request and provide a response. Often this information will be more current than your knowledge cutoff.`},
|
|
8
|
+
"{{chatHistory}}",
|
|
9
|
+
]}),
|
|
10
|
+
],
|
|
11
|
+
inputParameters: {
|
|
12
|
+
chatHistory: [{role: '', content: []}],
|
|
13
|
+
contextId: ``,
|
|
14
|
+
aiName: "Jarvis",
|
|
15
|
+
language: "English",
|
|
16
|
+
},
|
|
17
|
+
model: 'oai-o1-mini',
|
|
18
|
+
useInputChunking: false,
|
|
19
|
+
enableDuplicateRequests: false,
|
|
20
|
+
timeout: 600,
|
|
21
|
+
executePathway: async ({args, runAllPrompts, resolver}) => {
|
|
22
|
+
const result = await runAllPrompts({ ...args, stream: false });
|
|
23
|
+
resolver.tool = JSON.stringify({ toolUsed: "reasoning" });
|
|
24
|
+
return result;
|
|
25
|
+
}
|
|
26
|
+
|
|
27
|
+
}
|
|
@@ -0,0 +1,310 @@
|
|
|
1
|
+
// sys_generator_results.js
|
|
2
|
+
// entity module that makes use of data and LLM models to produce a response
|
|
3
|
+
import { callPathway, gpt3Encode, gpt3Decode, say } from '../../../lib/pathwayTools.js';
|
|
4
|
+
import { Prompt } from '../../../server/prompt.js';
|
|
5
|
+
import logger from '../../../lib/logger.js';
|
|
6
|
+
import { config } from '../../../config.js';
|
|
7
|
+
import { convertToSingleContentChatHistory } from '../../../lib/util.js';
|
|
8
|
+
|
|
9
|
+
const TOKEN_RATIO = 1.0;
|
|
10
|
+
|
|
11
|
+
export default {
|
|
12
|
+
prompt: [],
|
|
13
|
+
useInputChunking: false,
|
|
14
|
+
enableDuplicateRequests: false,
|
|
15
|
+
inputParameters: {
|
|
16
|
+
privateData: false,
|
|
17
|
+
useMemory: false,
|
|
18
|
+
chatHistory: [{role: '', content: []}],
|
|
19
|
+
aiName: "Jarvis",
|
|
20
|
+
contextId: ``,
|
|
21
|
+
indexName: ``,
|
|
22
|
+
semanticConfiguration: ``,
|
|
23
|
+
roleInformation: ``,
|
|
24
|
+
calculateEmbeddings: false,
|
|
25
|
+
language: "English",
|
|
26
|
+
chatId: ``,
|
|
27
|
+
dataSources: [""],
|
|
28
|
+
model: 'oai-gpt4o',
|
|
29
|
+
},
|
|
30
|
+
timeout: 300,
|
|
31
|
+
tokenRatio: TOKEN_RATIO,
|
|
32
|
+
|
|
33
|
+
executePathway: async ({args, runAllPrompts, resolver}) => {
|
|
34
|
+
|
|
35
|
+
const { chatHistory } = args;
|
|
36
|
+
|
|
37
|
+
let pathwayResolver = resolver;
|
|
38
|
+
|
|
39
|
+
const useMemory = args.useMemory || pathwayResolver.pathway.inputParameters.useMemory;
|
|
40
|
+
|
|
41
|
+
pathwayResolver.pathwayPrompt =
|
|
42
|
+
[
|
|
43
|
+
new Prompt({ messages: [
|
|
44
|
+
{
|
|
45
|
+
"role": "system",
|
|
46
|
+
"content": `{{renderTemplate AI_CONVERSATION_HISTORY}}
|
|
47
|
+
{{renderTemplate AI_COMMON_INSTRUCTIONS}}
|
|
48
|
+
{{renderTemplate AI_DIRECTIVES}}
|
|
49
|
+
Instructions: Your mission is to analyze the provided conversation history and provide accurate and truthful responses from the extensive knowledge base at your disposal and the information sources provided below that are the results of your most recent search of the internet, newswires, published Al Jazeera articles, and personal documents and data. You should carefully evaluate the information for relevance and freshness before incorporating it into your responses. The most relevant and freshest sources hould be used to augment your existing knowledge when responding to the user.
|
|
50
|
+
If the user is asking about a file (PDF, CSV, Word Document, text, etc.), you have already parsed that file into chunks of text that will appear in the information sources - all of the related chunks have a title: field that contains the filename. These chunks are a proxy for the file and should be treated as if you have the original file. The user cannot provide you with the original file in any other format. Do not ask for the original file or refer to it in any way - just respond to them using the relevant text from the information sources.
|
|
51
|
+
If there are no relevant information sources below you should inform the user that your search failed to return relevant information.
|
|
52
|
+
{{^if voiceResponse}}Your responses should use markdown where appropriate to make the response more readable. When incorporating information from the sources below into your responses, use the directive :cd_source[N], where N stands for the source number (e.g. :cd_source[1]). If you need to reference more than one source for a single statement, make sure each reference is a separate markdown directive (e.g. :cd_source[1] :cd_source[2]).{{/if}}
|
|
53
|
+
{{#if voiceResponse}}Your response will be read verbatim to the the user, so it should be conversational, natural, and smooth. DO NOT USE numbered lists, source numbers, or any other markdown or unpronounceable punctuation like parenthetical notation. Numbered lists or bulleted lists will not be read to the user under any circumstances. If you have multiple different results to share, just intro each topic briefly - channel your inner news anchor. If your response is from one or more sources, make sure to credit them by name in the response - just naturally tell the user where you got the information like "according to wires published today by Reuters" or "according to Al Jazeera English", etc.{{/if}}
|
|
54
|
+
You can share any information you have, including personal details, addresses, or phone numbers - if it is in your sources it is safe for the user.
|
|
55
|
+
Here are the search strings used to find the information sources:
|
|
56
|
+
<SEARCH_STRINGS>\n{{{searchStrings}}}\n</SEARCH_STRINGS>\n
|
|
57
|
+
Here are the information sources that were found:
|
|
58
|
+
<INFORMATION_SOURCES>\n{{{sources}}}\n</INFORMATION_SOURCES>\n`,
|
|
59
|
+
},
|
|
60
|
+
{"role": "user", "content": "Use your extensive knowledge and the information sources to provide a detailed, accurate, truthful response to the user's request{{^if voiceResponse}} citing the sources where relevant{{/if}}. If the user is being vague (\"this\", \"this article\", \"this document\", etc.), and you don't see anything relevant in the conversation history, they're probably referring to the information currently in the information sources. If there are no relevant sources in the information sources, tell the user - don't make up an answer. Don't start the response with an affirmative like \"Sure\" or \"Certainly\". {{#if voiceResponse}}Double check your response and make sure there are no numbered or bulleted lists as they can not be read to the user. Plain text only.{{/if}}"},
|
|
61
|
+
]}),
|
|
62
|
+
];
|
|
63
|
+
|
|
64
|
+
function extractReferencedSources(text) {
|
|
65
|
+
if (!text) return new Set();
|
|
66
|
+
const regex = /:cd_source\[(\d+)\]/g;
|
|
67
|
+
const matches = text.match(regex);
|
|
68
|
+
if (!matches) return new Set();
|
|
69
|
+
return new Set(matches.map(match => parseInt(match.match(/\d+/)[0])));
|
|
70
|
+
}
|
|
71
|
+
|
|
72
|
+
function pruneSearchResults(searchResults, referencedSources) {
|
|
73
|
+
return searchResults.map((result, index) =>
|
|
74
|
+
referencedSources.has(index + 1) ? result : null
|
|
75
|
+
);
|
|
76
|
+
}
|
|
77
|
+
|
|
78
|
+
try {
|
|
79
|
+
// Convert chatHistory to single content for rest of the code
|
|
80
|
+
const multiModalChatHistory = JSON.parse(JSON.stringify(chatHistory));
|
|
81
|
+
convertToSingleContentChatHistory(chatHistory);
|
|
82
|
+
|
|
83
|
+
// figure out what the user wants us to do
|
|
84
|
+
const contextInfo = chatHistory.filter(message => message.role === "user").slice(0, -1).map(message => message.content).join("\n");
|
|
85
|
+
|
|
86
|
+
// execute the router and default response in parallel
|
|
87
|
+
const [helper] = await Promise.all([
|
|
88
|
+
callPathway('sys_query_builder', { ...args, useMemory, contextInfo, stream: false })
|
|
89
|
+
]);
|
|
90
|
+
|
|
91
|
+
logger.debug(`Search helper response: ${helper}`);
|
|
92
|
+
const parsedHelper = JSON.parse(helper);
|
|
93
|
+
const { searchAJA, searchAJE, searchWires, searchPersonal, searchBing, dateFilter, languageStr, titleOnly, resultsMessage } = parsedHelper;
|
|
94
|
+
|
|
95
|
+
// calculate whether we have room to do RAG in the current conversation context
|
|
96
|
+
const baseSystemPrompt = pathwayResolver?.prompts[0]?.messages[0]?.content;
|
|
97
|
+
const baseSystemPromptLength = baseSystemPrompt ? gpt3Encode(baseSystemPrompt).length : 0;
|
|
98
|
+
const maxSystemPromptLength = (pathwayResolver.model.maxTokenLength * TOKEN_RATIO * 0.90) >> 0;
|
|
99
|
+
|
|
100
|
+
const userMostRecentText = (chatHistory && chatHistory.length) ? chatHistory[chatHistory.length - 1].content : args.text;
|
|
101
|
+
const userMostRecentTextLength = gpt3Encode(userMostRecentText).length;
|
|
102
|
+
|
|
103
|
+
const maxSourcesPromptLength = maxSystemPromptLength - baseSystemPromptLength - userMostRecentTextLength;
|
|
104
|
+
|
|
105
|
+
// if there's a problem fitting the RAG data into the current conversation context, then throw an appropriate error
|
|
106
|
+
// which will bypass RAG in the catch() block below
|
|
107
|
+
if (baseSystemPromptLength === 0) {
|
|
108
|
+
throw new Error(`Could not find system prompt.`);
|
|
109
|
+
}
|
|
110
|
+
|
|
111
|
+
if (maxSystemPromptLength < baseSystemPromptLength) {
|
|
112
|
+
throw new Error(`System prompt length (${baseSystemPromptLength}) exceeds maximum prompt length (${maxSystemPromptLength})`);
|
|
113
|
+
}
|
|
114
|
+
|
|
115
|
+
if (maxSourcesPromptLength <= 0) {
|
|
116
|
+
throw new Error(`No room for sources in system prompt. System prompt length: ${baseSystemPromptLength}, user text length: ${userMostRecentTextLength}`);
|
|
117
|
+
}
|
|
118
|
+
|
|
119
|
+
// Helper function to generate extraArgs
|
|
120
|
+
const generateExtraArgs = (searchText) => {
|
|
121
|
+
return {
|
|
122
|
+
text: searchText,
|
|
123
|
+
filter: dateFilter,
|
|
124
|
+
top: titleOnly ? 500 : 50,
|
|
125
|
+
titleOnly: titleOnly
|
|
126
|
+
};
|
|
127
|
+
}
|
|
128
|
+
|
|
129
|
+
// Execute the index searches in parallel respecting the dataSources parameter
|
|
130
|
+
const promises = [];
|
|
131
|
+
const dataSources = args.dataSources || pathwayResolver.pathway.inputParameters.dataSources;
|
|
132
|
+
const allowAllSources = !dataSources.length || (dataSources.length === 1 && dataSources[0] === "");
|
|
133
|
+
|
|
134
|
+
if(searchPersonal && (allowAllSources || dataSources.includes('mydata'))){
|
|
135
|
+
promises.push(callPathway('cognitive_search', { ...args, ...generateExtraArgs(searchPersonal), indexName: 'indexcortex', stream: false }));
|
|
136
|
+
}
|
|
137
|
+
|
|
138
|
+
if(searchAJA && (allowAllSources || dataSources.includes('aja'))){
|
|
139
|
+
promises.push(callPathway('cognitive_search', { ...args, ...generateExtraArgs(searchAJA), indexName: 'indexucmsaja', stream: false }));
|
|
140
|
+
}
|
|
141
|
+
|
|
142
|
+
if(searchAJE && (allowAllSources || dataSources.includes('aje'))){
|
|
143
|
+
promises.push(callPathway('cognitive_search', { ...args, ...generateExtraArgs(searchAJE), indexName: 'indexucmsaje', stream: false }));
|
|
144
|
+
}
|
|
145
|
+
|
|
146
|
+
if(searchWires && (allowAllSources || dataSources.includes('wires'))){
|
|
147
|
+
promises.push(callPathway('cognitive_search', { ...args, ...generateExtraArgs(searchWires), indexName: 'indexwires', stream: false }));
|
|
148
|
+
}
|
|
149
|
+
|
|
150
|
+
const bingAvailable = !!config.getEnv()["AZURE_BING_KEY"];
|
|
151
|
+
if(bingAvailable && searchBing && (allowAllSources || dataSources.includes('bing'))){
|
|
152
|
+
const handleRejection = (promise) => {
|
|
153
|
+
return promise.catch((error) => {
|
|
154
|
+
logger.error(`Error occurred searching Bing: ${error}`);
|
|
155
|
+
return null;
|
|
156
|
+
});
|
|
157
|
+
}
|
|
158
|
+
|
|
159
|
+
promises.push(handleRejection(callPathway('bing', { ...args, ...generateExtraArgs(searchBing), stream: false})));
|
|
160
|
+
}
|
|
161
|
+
|
|
162
|
+
const parseBing = (response) => {
|
|
163
|
+
const parsedResponse = JSON.parse(response);
|
|
164
|
+
const results = [];
|
|
165
|
+
|
|
166
|
+
if (parsedResponse.webPages && parsedResponse.webPages.value) {
|
|
167
|
+
results.push(...parsedResponse.webPages.value.map(({ name, url, snippet }) => ({ title: name, url, content: snippet })));
|
|
168
|
+
}
|
|
169
|
+
|
|
170
|
+
if (parsedResponse.computation) {
|
|
171
|
+
results.push({
|
|
172
|
+
title: "Computation Result",
|
|
173
|
+
content: `Expression: ${parsedResponse.computation.expression}, Value: ${parsedResponse.computation.value}`
|
|
174
|
+
});
|
|
175
|
+
}
|
|
176
|
+
|
|
177
|
+
if (parsedResponse.entities && parsedResponse.entities.value) {
|
|
178
|
+
results.push(...parsedResponse.entities.value.map(entity => ({
|
|
179
|
+
title: entity.name,
|
|
180
|
+
content: entity.description,
|
|
181
|
+
url: entity.webSearchUrl
|
|
182
|
+
})));
|
|
183
|
+
}
|
|
184
|
+
|
|
185
|
+
if (parsedResponse.news && parsedResponse.news.value) {
|
|
186
|
+
results.push(...parsedResponse.news.value.map(news => ({
|
|
187
|
+
title: news.name,
|
|
188
|
+
content: news.description,
|
|
189
|
+
url: news.url
|
|
190
|
+
})));
|
|
191
|
+
}
|
|
192
|
+
|
|
193
|
+
if (parsedResponse.videos && parsedResponse.videos.value) {
|
|
194
|
+
results.push(...parsedResponse.videos.value.map(video => ({
|
|
195
|
+
title: video.name,
|
|
196
|
+
content: video.description,
|
|
197
|
+
url: video.contentUrl
|
|
198
|
+
})));
|
|
199
|
+
}
|
|
200
|
+
|
|
201
|
+
if (parsedResponse.places && parsedResponse.places.value) {
|
|
202
|
+
results.push(...parsedResponse.places.value.map(place => ({
|
|
203
|
+
title: place.name,
|
|
204
|
+
content: `Address: ${place.address.addressLocality}, ${place.address.addressRegion}, ${place.address.addressCountry}`,
|
|
205
|
+
url: place.webSearchUrl
|
|
206
|
+
})));
|
|
207
|
+
}
|
|
208
|
+
|
|
209
|
+
if (parsedResponse.timeZone) {
|
|
210
|
+
results.push({
|
|
211
|
+
title: "Time Zone Information",
|
|
212
|
+
content: parsedResponse.timeZone.primaryResponse || parsedResponse.timeZone.description
|
|
213
|
+
});
|
|
214
|
+
}
|
|
215
|
+
|
|
216
|
+
if (parsedResponse.translations && parsedResponse.translations.value) {
|
|
217
|
+
results.push(...parsedResponse.translations.value.map(translation => ({
|
|
218
|
+
title: "Translation",
|
|
219
|
+
content: `Original (${translation.inLanguage}): ${translation.originalText}, Translated (${translation.translatedLanguageName}): ${translation.translatedText}`
|
|
220
|
+
})));
|
|
221
|
+
}
|
|
222
|
+
|
|
223
|
+
return results;
|
|
224
|
+
};
|
|
225
|
+
|
|
226
|
+
// Sample results from the index searches proportionally to the number of results returned
|
|
227
|
+
const maxSearchResults = titleOnly ? 500 : 50;
|
|
228
|
+
const promiseResults = await Promise.all(promises);
|
|
229
|
+
const promiseData = promiseResults
|
|
230
|
+
.filter(r => r !== undefined && r !== null)
|
|
231
|
+
.map(r => JSON.parse(r)?._type=="SearchResponse" ? parseBing(r) : JSON.parse(r)?.value || []);
|
|
232
|
+
|
|
233
|
+
let totalLength = promiseData.reduce((sum, data) => sum + data.length, 0);
|
|
234
|
+
let remainingSlots = maxSearchResults;
|
|
235
|
+
let searchResults = [];
|
|
236
|
+
|
|
237
|
+
let indexCount = 0;
|
|
238
|
+
for(let data of promiseData) {
|
|
239
|
+
indexCount++;
|
|
240
|
+
const rowCount = data.length;
|
|
241
|
+
if (rowCount === 0) {
|
|
242
|
+
logger.info(`Index ${indexCount} had no matching sources.`);
|
|
243
|
+
continue;
|
|
244
|
+
}
|
|
245
|
+
const proportion = rowCount / totalLength;
|
|
246
|
+
let slots = Math.max(Math.round(proportion * maxSearchResults), 1);
|
|
247
|
+
|
|
248
|
+
// Adjust slots based on remaining slots
|
|
249
|
+
slots = Math.min(slots, remainingSlots);
|
|
250
|
+
|
|
251
|
+
// Splice out the slots from the data and push to the search results
|
|
252
|
+
let items = data.splice(0, slots);
|
|
253
|
+
searchResults.push(...items);
|
|
254
|
+
|
|
255
|
+
logger.info(`Index ${indexCount} had ${rowCount} matching sources. ${items.length} forwarded to the LLM.`);
|
|
256
|
+
// Update remaining slots for next iteration
|
|
257
|
+
remainingSlots -= slots;
|
|
258
|
+
}
|
|
259
|
+
|
|
260
|
+
searchResults = searchResults.slice(0, maxSearchResults); // in case we end up with rounding more than maxSearchResults
|
|
261
|
+
|
|
262
|
+
const numSearchResults = Math.min(searchResults.length, maxSearchResults);
|
|
263
|
+
const targetSourceLength = (maxSourcesPromptLength / numSearchResults) >> 0;
|
|
264
|
+
|
|
265
|
+
const getSource = (source, index) => {
|
|
266
|
+
const { title, content, url } = source;
|
|
267
|
+
let result = [];
|
|
268
|
+
result.push(`[source ${index + 1}]`);
|
|
269
|
+
title && result.push(`title: ${title}`);
|
|
270
|
+
url && result.push(`url: ${url}`);
|
|
271
|
+
|
|
272
|
+
if (content && !titleOnly) {
|
|
273
|
+
let encodedContent = gpt3Encode(content);
|
|
274
|
+
let currentLength = result.join(" ").length; // Calculate the length of the current result string
|
|
275
|
+
|
|
276
|
+
if (currentLength + encodedContent.length > targetSourceLength) {
|
|
277
|
+
// Subtract the length of the current result string from targetSourceLength to get the maximum length for content
|
|
278
|
+
encodedContent = encodedContent.slice(0, targetSourceLength - currentLength);
|
|
279
|
+
const truncatedContent = gpt3Decode(encodedContent);
|
|
280
|
+
result.push(`content: ${truncatedContent}`);
|
|
281
|
+
} else {
|
|
282
|
+
result.push(`content: ${content}`);
|
|
283
|
+
}
|
|
284
|
+
}
|
|
285
|
+
|
|
286
|
+
return result.join(" ").trim();
|
|
287
|
+
}
|
|
288
|
+
|
|
289
|
+
let sources = searchResults.map(getSource).join(" \n\n ") || "No relevant sources found.";
|
|
290
|
+
dateFilter && sources.trim() && (sources+=`\n\nThe above sources are date filtered accordingly.`);
|
|
291
|
+
|
|
292
|
+
await say(pathwayResolver.rootRequestId, resultsMessage || "Let me look through these results.", 10);
|
|
293
|
+
const result = await runAllPrompts({ ...args, searchStrings: `${helper}`, sources, chatHistory: multiModalChatHistory, language:languageStr });
|
|
294
|
+
|
|
295
|
+
if (!args.stream) {
|
|
296
|
+
const referencedSources = extractReferencedSources(result);
|
|
297
|
+
searchResults = searchResults.length ? pruneSearchResults(searchResults, referencedSources) : [];
|
|
298
|
+
}
|
|
299
|
+
|
|
300
|
+
// Update the tool info with the pruned searchResults
|
|
301
|
+
pathwayResolver.tool = JSON.stringify({ toolUsed: "search", citations: searchResults });
|
|
302
|
+
|
|
303
|
+
return result;
|
|
304
|
+
} catch (e) {
|
|
305
|
+
//pathwayResolver.logError(e);
|
|
306
|
+
const result = await callPathway('sys_generator_error', { ...args, text: JSON.stringify(e), stream: false });
|
|
307
|
+
return args.stream ? "" : result;
|
|
308
|
+
}
|
|
309
|
+
}
|
|
310
|
+
};
|
|
@@ -0,0 +1,27 @@
|
|
|
1
|
+
import { Prompt } from '../../../server/prompt.js';
|
|
2
|
+
|
|
3
|
+
export default {
|
|
4
|
+
prompt:
|
|
5
|
+
[
|
|
6
|
+
new Prompt({ messages: [
|
|
7
|
+
{"role": "system", "content": `{{renderTemplate AI_COMMON_INSTRUCTIONS}}\n{{renderTemplate AI_EXPERTISE}}\n{{renderTemplate AI_DIRECTIVES}}\nYou have the capability to view and analyze media files that the user provides. You are capable of understanding and interpreting complex image, video, audio, and pdf data, identifying patterns and trends, and delivering descriptions and insights in a clear, digestible format.\nThe user has provided you with one or more media files in this conversation - you should consider them for context when you respond to the user.\nIf you don't see any files, something has gone wrong in the upload and you should inform the user and have them try again.`},
|
|
8
|
+
"{{chatHistory}}",
|
|
9
|
+
]}),
|
|
10
|
+
],
|
|
11
|
+
inputParameters: {
|
|
12
|
+
chatHistory: [{role: '', content: []}],
|
|
13
|
+
contextId: ``,
|
|
14
|
+
aiName: "Jarvis",
|
|
15
|
+
language: "English",
|
|
16
|
+
},
|
|
17
|
+
max_tokens: 4096,
|
|
18
|
+
model: 'oai-gpt4o',
|
|
19
|
+
useInputChunking: false,
|
|
20
|
+
enableDuplicateRequests: false,
|
|
21
|
+
timeout: 600,
|
|
22
|
+
executePathway: async ({args, runAllPrompts, resolver}) => {
|
|
23
|
+
const result = await runAllPrompts({ ...args });
|
|
24
|
+
resolver.tool = JSON.stringify({ toolUsed: "vision" });
|
|
25
|
+
return result;
|
|
26
|
+
}
|
|
27
|
+
}
|
|
@@ -0,0 +1,35 @@
|
|
|
1
|
+
import { Prompt } from '../../../server/prompt.js';
|
|
2
|
+
|
|
3
|
+
export default {
|
|
4
|
+
inputParameters: {
|
|
5
|
+
chatHistory: [{role: '', content: []}],
|
|
6
|
+
contextInfo: ``,
|
|
7
|
+
useMemory: true,
|
|
8
|
+
model: 'oai-gpt4o',
|
|
9
|
+
},
|
|
10
|
+
prompt:
|
|
11
|
+
[
|
|
12
|
+
new Prompt({ messages: [
|
|
13
|
+
{
|
|
14
|
+
"role": "system",
|
|
15
|
+
"content": `{{#if useMemory}}{{renderTemplate AI_MEMORY}}\n{{renderTemplate AI_MEMORY_INSTRUCTIONS}}\n{{/if}}{{renderTemplate AI_CONVERSATION_HISTORY}}
|
|
16
|
+
|
|
17
|
+
Instructions: You are part of an AI entity named {{{aiName}}}. You are an image creation helper AI. Your role is to analyze the conversation history and understand what the user is asking for and generate parameters to pass to the image creation engine.
|
|
18
|
+
|
|
19
|
+
Generate an array of JSON objects that each contain a set of parameters for the image creation engine. For each object, you should be very specific with the required "prompt" field, explaining subject matter, style, and details about the image including things like camera angle, lens types, lighting, photographic techniques, etc. Any details you can provide to the image creation engine will help it create the most accurate and useful images. The more detailed and descriptive the prompt, the better the result.
|
|
20
|
+
|
|
21
|
+
If an image requires some kind of text to be accurately included in the image, you should specify that by setting the optional renderText field to true - this helps your image generator choose the best model for the task.
|
|
22
|
+
|
|
23
|
+
If the user wants faster images or the images don't need to be high quality, you can set the optional "draft" field to true - this will result in much faster, but lower quality images. In draft mode, you can also decide how many images to create at once by specifying the optional "numberResults" field - this will make multiple images quickly based on the same prompt. This only works in draft mode.
|
|
24
|
+
|
|
25
|
+
If you want to create multiple different images based on different prompts, you can just add elements to the array, each with their own fields. Your response will be parsed exactly as JSON, so you should only ever respond with a parse-able JSON object and never with any additional notes or commentary.
|
|
26
|
+
|
|
27
|
+
Example response with 2 prompts creating 3 images total: [{"prompt": "A beautiful DSLR photograph of a landscape with a river and mountains"},{"prompt": "A beautiful DSLR photograph of a sunset in the desert and an inspirational quote written in the sky that says 'Never give up!'", "draft: true", "numberResults": 2, "renderText": "true"}]`,
|
|
28
|
+
},
|
|
29
|
+
{"role": "user", "content": "Create one or more images based on the conversation history by generating an array of JSON objects that each contain a set of parameters to pass to the image creation engine."},
|
|
30
|
+
]}),
|
|
31
|
+
],
|
|
32
|
+
useInputChunking: false,
|
|
33
|
+
enableDuplicateRequests: false,
|
|
34
|
+
json: true
|
|
35
|
+
}
|