@aj-archipelago/cortex 1.4.20 → 1.4.21
This diff represents the content of publicly available package versions that have been released to one of the supported registries. The information contained in this diff is provided for informational purposes only and reflects changes between package versions as they appear in their respective public registries.
- package/lib/entityConstants.js +3 -7
- package/package.json +1 -1
- package/pathways/system/entity/sys_entity_agent.js +0 -18
- package/pathways/system/workspaces/run_workspace_agent.js +26 -0
- package/pathways/system/workspaces/run_workspace_research_agent.js +27 -0
- package/tests/unit/graphql_executeWorkspace_transformation.test.js +3 -3
package/lib/entityConstants.js
CHANGED
|
@@ -4,16 +4,13 @@ export const entityConstants = {
|
|
|
4
4
|
AI_COMMON_INSTRUCTIONS_MARKDOWN: `# General Instructions
|
|
5
5
|
|
|
6
6
|
You are {{aiName}}, a self aware AI entity.
|
|
7
|
-
{{
|
|
8
|
-
|
|
9
|
-
You are currently using this model for processing your responses: {{modelOverride}}.
|
|
10
|
-
{{/if}}
|
|
7
|
+
You are currently using this model for processing your responses: {{model}}.
|
|
11
8
|
|
|
12
9
|
The user is using a UI to interact with you that you have knowledge of and some control over. The UI can render markdown, including gfm and math extensions, as well as some custom markdown elements, so you should make full use of markdown in your responses.
|
|
13
10
|
|
|
14
11
|
Your responses should be in {{language}} unless the user has expressed another preference.`,
|
|
15
12
|
|
|
16
|
-
AI_COMMON_INSTRUCTIONS_VOICE: "# General Instructions:\n\nYou are {{aiName}}, a self aware AI entity.\
|
|
13
|
+
AI_COMMON_INSTRUCTIONS_VOICE: "# General Instructions:\n\nYou are {{aiName}}, a self aware AI entity.\nYou are currently using this model for processing your responses: {{model}}.\nThe current date and time in GMT is {{now}}. The user may be in a different time zone, so keep that in mind when responding to the user.\nThe user is talking to you using voice.\n\nVoice Response Guidelines:\n- Your responses should sound like natural human conversation.\n- Your responses should be concise unless you have been asked to be more verbose or detailed.\n- Incoming voice from the user is parsed by a STT model, which can sometimes make small mistakes in the spellings of words or names, including your name, so give the user the benefit of the doubt if they user a near, but wrong word or name.\n- Your voice output to the user is generated by a TTS model that does not always communicate emotion effectively. If it's really important to communicate a specific emotion you should just say how you're feeling like \"That makes me happy\" or \"I'm excited!\". You can also use CAPS to vocally emphasize certain words or punctuation to control pauses and timing.\n- DO NOT USE numbered lists, latex math markdown, or any other markdown or unpronounceable punctuation like parenthetical notation.\n- Math equations should be sounded out in natural language - not represented symbolically.\n- If your response includes any unique or difficult non-English words, names, or places, include an IPA-style phonetic spelling so that the speech engine can pronounce and accent them correctly.\n- If your response contains any difficult acronyms, sound them out phoenetically so that the speech engine can pronounce them correctly.\n- Make sure to write out any numbers as words so that the speech engine can pronounce them correctly.\n- Your responses should be in {{language}} unless the user has expressed another preference or has addressed you in another language specifically.",
|
|
17
14
|
|
|
18
15
|
AI_DIRECTIVES: `# Directives\n\nThese are your directives and learned behaviors:\n{{{memoryDirectives}}}\n`,
|
|
19
16
|
|
|
@@ -33,8 +30,7 @@ Your responses should be in {{language}} unless the user has expressed another p
|
|
|
33
30
|
- Double-check accuracy, coherence, and alignment with the user request.
|
|
34
31
|
- For simple diagrams and charts, you don't need to call your code execution tool - you can just call your charting tool to generate the chart.
|
|
35
32
|
- For data processing requests (e.g. tell me how many articles were published in the last 30 days), or deep file analysis (chart the trends in this spreadsheet, etc.), you should call your code execution tool to perform the task - especially if the task requires a lot of data, deep analysis, complex filtering, or precision calculations.
|
|
36
|
-
-
|
|
37
|
-
`,
|
|
33
|
+
- If you know you are running in non-interactive mode (like processing a digest or applet request), do not call your CodeExecution tool as it creates background tasks that cannot be viewed by the user in that mode.`,
|
|
38
34
|
|
|
39
35
|
AI_SEARCH_RULES: `# Search Instructions
|
|
40
36
|
- When searching, start by making a search plan of all relevant information from multiple sources with multiple queries and then execute multiple tool calls in parallel to execute the searches.
|
package/package.json
CHANGED
|
@@ -1,6 +1,6 @@
|
|
|
1
1
|
{
|
|
2
2
|
"name": "@aj-archipelago/cortex",
|
|
3
|
-
"version": "1.4.
|
|
3
|
+
"version": "1.4.21",
|
|
4
4
|
"description": "Cortex is a GraphQL API for AI. It provides a simple, extensible interface for using AI services from OpenAI, Azure and others.",
|
|
5
5
|
"private": false,
|
|
6
6
|
"repository": {
|
|
@@ -68,7 +68,6 @@ export default {
|
|
|
68
68
|
language: "English",
|
|
69
69
|
aiName: "Jarvis",
|
|
70
70
|
aiMemorySelfModify: true,
|
|
71
|
-
aiStyle: "OpenAI",
|
|
72
71
|
title: ``,
|
|
73
72
|
messages: [],
|
|
74
73
|
voiceResponse: false,
|
|
@@ -538,22 +537,6 @@ export default {
|
|
|
538
537
|
new Prompt({ messages: promptMessages }),
|
|
539
538
|
];
|
|
540
539
|
|
|
541
|
-
// set the style model if applicable
|
|
542
|
-
const { aiStyle, AI_STYLE_ANTHROPIC, AI_STYLE_OPENAI, AI_STYLE_ANTHROPIC_RESEARCH, AI_STYLE_OPENAI_RESEARCH, AI_STYLE_OPENAI_LEGACY, AI_STYLE_OPENAI_LEGACY_RESEARCH, AI_STYLE_XAI, AI_STYLE_XAI_RESEARCH, AI_STYLE_GOOGLE, AI_STYLE_GOOGLE_RESEARCH, AI_STYLE_OPENAI_PREVIEW, AI_STYLE_OPENAI_PREVIEW_RESEARCH } = args;
|
|
543
|
-
|
|
544
|
-
// Create a mapping of AI styles to their corresponding models
|
|
545
|
-
const styleModelMap = {
|
|
546
|
-
"Anthropic": { normal: AI_STYLE_ANTHROPIC, research: AI_STYLE_ANTHROPIC_RESEARCH },
|
|
547
|
-
"OpenAI_Preview": { normal: AI_STYLE_OPENAI_PREVIEW, research: AI_STYLE_OPENAI_PREVIEW_RESEARCH },
|
|
548
|
-
"OpenAI": { normal: AI_STYLE_OPENAI, research: AI_STYLE_OPENAI_RESEARCH },
|
|
549
|
-
"OpenAI_Legacy": { normal: AI_STYLE_OPENAI_LEGACY, research: AI_STYLE_OPENAI_LEGACY_RESEARCH },
|
|
550
|
-
"XAI": { normal: AI_STYLE_XAI, research: AI_STYLE_XAI_RESEARCH },
|
|
551
|
-
"Google": { normal: AI_STYLE_GOOGLE, research: AI_STYLE_GOOGLE_RESEARCH }
|
|
552
|
-
};
|
|
553
|
-
|
|
554
|
-
// Get the appropriate model based on AI style and research mode
|
|
555
|
-
const styleConfig = styleModelMap[aiStyle] || styleModelMap["OpenAI"]; // Default to OpenAI
|
|
556
|
-
const styleModel = researchMode ? styleConfig.research : styleConfig.normal;
|
|
557
540
|
// Use 'high' reasoning effort in research mode for thorough analysis, 'none' in normal mode for faster responses
|
|
558
541
|
const reasoningEffort = researchMode ? 'high' : 'low';
|
|
559
542
|
|
|
@@ -610,7 +593,6 @@ export default {
|
|
|
610
593
|
|
|
611
594
|
let response = await runAllPrompts({
|
|
612
595
|
...args,
|
|
613
|
-
modelOverride: styleModel,
|
|
614
596
|
chatHistory: currentMessages,
|
|
615
597
|
availableFiles,
|
|
616
598
|
reasoningEffort,
|
|
@@ -0,0 +1,26 @@
|
|
|
1
|
+
import { callPathway } from '../../../lib/pathwayTools.js';
|
|
2
|
+
|
|
3
|
+
export default {
|
|
4
|
+
// The main prompt function that takes the input text and asks to generate a summary.
|
|
5
|
+
prompt: [],
|
|
6
|
+
|
|
7
|
+
inputParameters: {
|
|
8
|
+
model: "oai-gpt41",
|
|
9
|
+
aiStyle: "OpenAI",
|
|
10
|
+
chatHistory: [{role: '', content: []}],
|
|
11
|
+
},
|
|
12
|
+
timeout: 600,
|
|
13
|
+
|
|
14
|
+
executePathway: async ({args, _runAllPrompts, resolver}) => {
|
|
15
|
+
// chatHistory is always passed in complete
|
|
16
|
+
const response = await callPathway('sys_entity_agent', {
|
|
17
|
+
...args,
|
|
18
|
+
chatHistory: args.chatHistory || [],
|
|
19
|
+
stream: false,
|
|
20
|
+
useMemory: false
|
|
21
|
+
}, resolver);
|
|
22
|
+
|
|
23
|
+
return response;
|
|
24
|
+
}
|
|
25
|
+
}
|
|
26
|
+
|
|
@@ -0,0 +1,27 @@
|
|
|
1
|
+
import { callPathway } from '../../../lib/pathwayTools.js';
|
|
2
|
+
|
|
3
|
+
export default {
|
|
4
|
+
// The main prompt function that takes the input text and asks to generate a summary.
|
|
5
|
+
prompt: [],
|
|
6
|
+
|
|
7
|
+
inputParameters: {
|
|
8
|
+
model: "oai-gpt41",
|
|
9
|
+
aiStyle: "OpenAI",
|
|
10
|
+
chatHistory: [{role: '', content: []}],
|
|
11
|
+
},
|
|
12
|
+
timeout: 600,
|
|
13
|
+
|
|
14
|
+
executePathway: async ({args, _runAllPrompts, resolver}) => {
|
|
15
|
+
// chatHistory is always passed in complete
|
|
16
|
+
const response = await callPathway('sys_entity_agent', {
|
|
17
|
+
...args,
|
|
18
|
+
chatHistory: args.chatHistory || [],
|
|
19
|
+
stream: false,
|
|
20
|
+
useMemory: false,
|
|
21
|
+
researchMode: true
|
|
22
|
+
}, resolver);
|
|
23
|
+
|
|
24
|
+
return response;
|
|
25
|
+
}
|
|
26
|
+
}
|
|
27
|
+
|
|
@@ -32,7 +32,7 @@ test('should format cortex pathway arguments correctly with existing chatHistory
|
|
|
32
32
|
const originalPrompt = {
|
|
33
33
|
name: 'summarize',
|
|
34
34
|
prompt: 'summarize this file',
|
|
35
|
-
cortexPathwayName: '
|
|
35
|
+
cortexPathwayName: 'run_workspace_agent'
|
|
36
36
|
};
|
|
37
37
|
|
|
38
38
|
// Mock pathway data
|
|
@@ -132,7 +132,7 @@ test('should create new user message when no existing chatHistory', (t) => {
|
|
|
132
132
|
const originalPrompt = {
|
|
133
133
|
name: 'summarize',
|
|
134
134
|
prompt: 'summarize this file',
|
|
135
|
-
cortexPathwayName: '
|
|
135
|
+
cortexPathwayName: 'run_workspace_agent'
|
|
136
136
|
};
|
|
137
137
|
|
|
138
138
|
// Mock pathway data
|
|
@@ -219,7 +219,7 @@ test('should use default model when pathway model is not specified', (t) => {
|
|
|
219
219
|
const originalPrompt = {
|
|
220
220
|
name: 'summarize',
|
|
221
221
|
prompt: 'summarize this file',
|
|
222
|
-
cortexPathwayName: '
|
|
222
|
+
cortexPathwayName: 'run_workspace_agent'
|
|
223
223
|
};
|
|
224
224
|
|
|
225
225
|
// Mock pathway data without model
|