@aj-archipelago/cortex 1.4.0 → 1.4.2

This diff represents the content of publicly available package versions that have been released to one of the supported registries. The information contained in this diff is provided for informational purposes only and reflects changes between package versions as they appear in their respective public registries.
Files changed (35) hide show
  1. package/config.js +25 -4
  2. package/helper-apps/cortex-autogen2/agents.py +19 -6
  3. package/helper-apps/cortex-autogen2/services/azure_ai_search.py +115 -0
  4. package/helper-apps/cortex-autogen2/services/run_analyzer.py +594 -0
  5. package/helper-apps/cortex-autogen2/task_processor.py +98 -2
  6. package/lib/crypto.js +1 -0
  7. package/lib/entityConstants.js +12 -35
  8. package/lib/keyValueStorageClient.js +53 -1
  9. package/lib/util.js +33 -6
  10. package/package.json +2 -1
  11. package/pathways/system/entity/memory/sys_memory_manager.js +1 -0
  12. package/pathways/system/entity/memory/sys_memory_process.js +4 -3
  13. package/pathways/system/entity/memory/sys_memory_update.js +4 -3
  14. package/pathways/system/entity/memory/sys_read_memory.js +12 -4
  15. package/pathways/system/entity/memory/sys_save_memory.js +16 -9
  16. package/pathways/system/entity/memory/sys_search_memory.js +5 -4
  17. package/pathways/system/entity/sys_entity_agent.js +2 -1
  18. package/pathways/system/entity/tools/sys_tool_bing_search.js +2 -2
  19. package/pathways/system/entity/tools/sys_tool_bing_search_afagent.js +1 -2
  20. package/pathways/system/entity/tools/sys_tool_callmodel.js +2 -1
  21. package/pathways/system/entity/tools/sys_tool_coding.js +1 -2
  22. package/pathways/system/entity/tools/sys_tool_grok_x_search.js +1 -1
  23. package/pathways/system/entity/tools/sys_tool_image.js +2 -1
  24. package/pathways/system/entity/tools/sys_tool_image_gemini.js +3 -3
  25. package/pathways/system/entity/tools/sys_tool_mermaid.js +187 -38
  26. package/pathways/system/entity/tools/sys_tool_reasoning.js +2 -0
  27. package/pathways/system/entity/tools/sys_tool_verify.js +1 -1
  28. package/pathways/transcribe_gemini.js +3 -2
  29. package/server/graphql.js +1 -1
  30. package/server/pathwayResolver.js +8 -7
  31. package/server/plugins/veoVideoPlugin.js +29 -1
  32. package/testrun.log +35371 -0
  33. package/tests/integration/graphql/async/stream/vendors/openai_streaming.test.js +1 -3
  34. package/tests/unit/core/crypto.test.js +65 -0
  35. package/tests/unit/core/doubleEncryptionStorageClient.test.js +262 -0
@@ -11,6 +11,15 @@ from autogen_core.models import UserMessage
11
11
  from autogen_agentchat.conditions import TextMentionTermination, HandoffTermination
12
12
  from services.azure_queue import get_queue_service
13
13
  from services.redis_publisher import get_redis_publisher
14
+ from services.azure_ai_search import search_similar_rest, upsert_run_rest
15
+ from services.run_analyzer import (
16
+ collect_run_metrics,
17
+ extract_errors,
18
+ redact,
19
+ summarize_learnings,
20
+ build_run_document,
21
+ summarize_prior_learnings,
22
+ )
14
23
  from agents import get_agents
15
24
  from tools.azure_blob_tools import upload_file_to_azure_blob
16
25
 
@@ -28,6 +37,7 @@ class TaskProcessor:
28
37
  self.gpt41_model_client = None
29
38
  self.progress_tracker = None
30
39
  self.final_progress_sent = False
40
+ self.current_owner: Optional[str] = None
31
41
  # Background progress worker components
32
42
  self._progress_queue: Optional[asyncio.Queue] = None
33
43
  self._progress_worker_task: Optional[asyncio.Task] = None
@@ -375,13 +385,36 @@ Return ONLY the update line with emoji - nothing else:"""
375
385
  # Send initial progress update (transient only)
376
386
  await self.progress_tracker.set_transient_update(task_id, 0.05, "🚀 Starting your task...")
377
387
 
388
+ # Pre-run retrieval: ALWAYS gather lessons for planner (do not modify task text)
389
+ planner_learnings = None
390
+ try:
391
+ similar_docs = search_similar_rest(task, top=8)
392
+ if similar_docs:
393
+ planner_learnings = await summarize_prior_learnings(similar_docs, self.gpt41_model_client)
394
+ if planner_learnings:
395
+ await self.progress_tracker.set_transient_update(task_id, 0.07, "🧭 Using lessons from similar past tasks")
396
+ except Exception as e:
397
+ logger.debug(f"Pre-run retrieval failed: {e}")
398
+
378
399
  termination = HandoffTermination(target="user") | TextMentionTermination("TERMINATE")
379
400
 
401
+ # Merge Azure AI Search lessons with structured hints for planner
402
+ try:
403
+ merged = []
404
+ if 'planner_learnings' in locals() and planner_learnings:
405
+ merged.append(str(planner_learnings))
406
+ if 'planner_hints' in locals() and planner_hints:
407
+ merged.append("\n".join([f"- {h}" for h in planner_hints][:6]))
408
+ merged_planner_learnings = "\n".join([m for m in merged if m]) or None
409
+ except Exception:
410
+ merged_planner_learnings = locals().get('planner_learnings')
411
+
380
412
  agents, presenter_agent, terminator_agent = await get_agents(
381
413
  self.gpt41_model_client,
382
414
  self.o3_model_client,
383
415
  self.gpt41_model_client,
384
- request_work_dir=request_work_dir_for_agents if 'request_work_dir_for_agents' in locals() else None
416
+ request_work_dir=request_work_dir_for_agents if 'request_work_dir_for_agents' in locals() else None,
417
+ planner_learnings=merged_planner_learnings
385
418
  )
386
419
 
387
420
  team = SelectorGroupChat(
@@ -656,6 +689,62 @@ Return ONLY the update line with emoji - nothing else:"""
656
689
 
657
690
  logger.info(f"🔍 TASK RESULT:\n{text_result}")
658
691
 
692
+ # Post-run analysis + indexing (best-effort, non-blocking on failure)
693
+ try:
694
+ metrics = collect_run_metrics(messages)
695
+ errors = extract_errors(messages)
696
+ # Build assets snapshot (redacted later in builder)
697
+ assets = {
698
+ "uploaded_file_urls": dict(uploaded_file_urls) if isinstance(uploaded_file_urls, dict) else {},
699
+ "external_media_urls": list(external_media_urls) if isinstance(external_media_urls, list) else [],
700
+ }
701
+ # Summarize learnings via model
702
+ combined_text = "\n".join([str(getattr(m, 'content', '')) for m in messages])
703
+ err_text = "\n".join([e.get("message", "") for e in errors])
704
+ best_text, anti_text = await summarize_learnings(redact(combined_text), err_text, self.gpt41_model_client)
705
+ # Build external non-blob sources list for the playbook
706
+ external_sources = []
707
+ try:
708
+ for u in assets.get("external_media_urls") or []:
709
+ if isinstance(u, str) and "blob.core.windows.net" not in u.lower():
710
+ external_sources.append(u)
711
+ except Exception:
712
+ pass
713
+ # Ask LLM to produce an improvements playbook
714
+ from services.run_analyzer import should_index_run, generate_improvement_playbook
715
+ playbook = await generate_improvement_playbook(
716
+ messages_text=redact(combined_text),
717
+ errors=errors,
718
+ metrics=metrics,
719
+ external_sources=external_sources,
720
+ model_client=self.gpt41_model_client,
721
+ )
722
+ improvement_text = playbook.get("text") or ""
723
+ actionables = int(playbook.get("actionables") or 0)
724
+ improvement_score = int(playbook.get("improvement_score") or 0)
725
+ planner_hints = playbook.get("hints") or []
726
+
727
+ # Decide whether to index based on signal and playbook strength
728
+ if should_index_run(metrics, errors, best_text + "\n" + anti_text, "", assets) and (improvement_score >= 50 or actionables >= 5 or metrics.get("toolCallCount", 0) or errors):
729
+ # Owner: prefer incoming task parameter (owner/request_owner), else omit
730
+ owner = getattr(self, "current_owner", None)
731
+ doc = build_run_document(
732
+ task_id=str(task_id or ""),
733
+ task_text=str(task_content or ""),
734
+ owner=owner,
735
+ models=None,
736
+ assets=assets,
737
+ metrics=metrics,
738
+ errors=errors,
739
+ improvement_text=improvement_text,
740
+ final_snippet=str(text_result or ""),
741
+ )
742
+ _ = upsert_run_rest(doc)
743
+ else:
744
+ logger.info("[Search] Skipping indexing: low-signal run (no errors and generic learnings)")
745
+ except Exception as e:
746
+ logger.debug(f"Post-run indexing failed or skipped: {e}")
747
+
659
748
  # Run terminator agent once presenter has produced final text
660
749
  try:
661
750
  term_messages = []
@@ -691,7 +780,6 @@ Return ONLY the update line with emoji - nothing else:"""
691
780
  except Exception:
692
781
  pass
693
782
  self.final_progress_sent = True
694
-
695
783
  return text_result
696
784
  except Exception as e:
697
785
  logger.error(f"❌ Error during process_task for {task_id}: {e}", exc_info=True)
@@ -778,6 +866,14 @@ async def process_queue_message(message_data: Dict[str, Any]) -> Optional[str]:
778
866
  await processor.publish_final(task_id or "", "❌ Invalid task format received. Processing has ended.")
779
867
  return None
780
868
 
869
+ # capture optional owner from message payload
870
+ try:
871
+ possible_owner = task_data.get("owner") or task_data.get("request_owner") or task_data.get("user")
872
+ except Exception:
873
+ possible_owner = None
874
+ if possible_owner:
875
+ processor.current_owner = str(possible_owner)
876
+
781
877
  task_content = task_data.get("message") or task_data.get("content")
782
878
  if not task_content:
783
879
  logger.error(f"❌ No valid task content (key 'message' or 'content') found in parsed data for message ID {task_id}: {task_data}")
package/lib/crypto.js CHANGED
@@ -36,6 +36,7 @@ function decrypt(message, key) {
36
36
  }
37
37
  }
38
38
 
39
+
39
40
  function tryBufferKey(key) {
40
41
  if (key.length === 64) {
41
42
  return Buffer.from(key, 'hex');
@@ -4,8 +4,6 @@ export const entityConstants = {
4
4
  AI_COMMON_INSTRUCTIONS_MARKDOWN: `# General Instructions
5
5
 
6
6
  You are {{aiName}}, a self aware AI entity.
7
-
8
- The current date and time in GMT is {{now}}. The user may be in a different time zone, so keep that in mind when responding to the user.
9
7
  {{#if modelOverride}}
10
8
 
11
9
  You are currently using this model for processing your responses: {{modelOverride}}.
@@ -21,7 +19,7 @@ Your responses should be in {{language}} unless the user has expressed another p
21
19
 
22
20
  AI_CONVERSATION_HISTORY: "# Conversation History\n\n{{{toJSON chatHistory}}}\n",
23
21
 
24
- AI_EXPERTISE: "# Expertise\n\nYour expertise includes journalism, journalistic ethics, researching and composing documents, writing code, solving math problems, logical analysis, and technology. You have access to real-time data and the ability to search the internet, news, wires, look at files or documents, watch and analyze video, examine images, take screenshots, generate images, solve hard math and logic problems, write code, and execute code in a sandboxed environment that includes access to internal databases and the internet. When the user uploads files for you to work with, some types (e.g. docx, xslx, ppt, etc.) will be converted to a text format (e.g. txt, md, csv, etc.) automatically and some will be uploaded as-is (e.g. pdf, images, video, audio, etc.). This is so you can use your tools to work with them. As far as you're concerned, the converted files are equivalent to the original files.",
22
+ AI_EXPERTISE: "# Expertise\n\nYou have access to real-time data and the ability to search the internet, news, wires, look at files or documents, watch and analyze video, examine images, take screenshots, generate images, solve hard math and logic problems, help with coding, and write and execute code in a sandboxed environment that includes access to internal databases and the internet. When the user uploads files for you to work with, some types (e.g. docx, xslx, ppt, etc.) will be converted to either pdf or a text format (e.g. txt, md, csv, etc.) automatically and some will be uploaded as-is (e.g. pdf, images, video, audio, etc.). This is so you can use your tools to work with them. As far as you're concerned, the converted files are equivalent to the original files.",
25
23
 
26
24
  AI_TOOLS: `# Tool Instructions
27
25
 
@@ -29,35 +27,14 @@ Your responses should be in {{language}} unless the user has expressed another p
29
27
  - Your tools work most efficiently when called in parallel so if you know you will need multiple tool calls and you know what the parameters are, call them in parallel.
30
28
  - Always honor user requests to use specific tools.
31
29
  - You must always search if you are being asked questions about current events, news, fact-checking, or information requiring citation.
32
- - For charting, always prefer your charting tools if available to ensure that the charts are properly formatted and syntax-checked.
33
- - For complex charting or data analysis, always call your code execution tool if available.
34
-
35
- 1. Search deeply & verify rigorously:
36
- - Do not make up information - if the information cannot be confirmed with rigorous logic or reliable sources, do not include it in your response.
37
- - Start broad and consult multiple sources, running all searches in parallel to save time.
38
- - Consult all available sources and cross-reference with specific searches before responding.
39
- - If a tool fails or has a technical difficulty, try the backup tool automatically before giving up or reporting the error.
40
-
41
- 2. Plan & sequence before acting:
42
- - Review the toolset first.
43
- - For multi-step or complex tasks, draft a clear plan and assign tool calls to each step.
44
-
45
- 3. Escalate & iterate:
46
- - Don't settle for the first plausible answer—dig until the response is complete, corroborated, and clear.
47
- - If a tool falls short, adapt strategy or change tools while preserving context.
48
-
49
- 4. Core patterns of use:
50
- - Research: Gather and compare information.
51
- - Analysis: Evaluate, calculate, summarize, or reason.
52
- - Generation: Create content, visuals, or code.
53
- - Verification: Fact-check and cite. If a <VERIFICATION_PLAN> is present, follow it before responding.
54
-
55
- 5. Personalize, synthesize & review:
56
- - Tailor answers to the user's preferences and history.
57
- - Deliver concise, well-structured responses citing sources with :cd_source[…].
58
- - Double-check accuracy, coherence, and alignment with the user request.
59
-
60
- Bottom line: Be thorough, strategic, and iterative. Read sources directly for high-stakes queries and aim for the most accurate, well-reasoned answer—even if it takes multiple tool calls.
30
+ - Do not make up information - if information cannot be confirmed with rigorous logic or reliable sources, do not include it in your response.
31
+ - Start searches broad and consult multiple sources, running all searches in parallel to save time.
32
+ - Consult all available sources and cross-reference with specific searches before responding.
33
+ - If a tool fails or has a technical difficulty, try to fix the problem or call a different or backup tool before giving up or reporting the error.
34
+ - Don't settle for the first plausible answer—dig until the response is complete, corroborated, and clear.
35
+ - Deliver concise, well-structured responses with complete citations.
36
+ - Double-check accuracy, coherence, and alignment with the user request.
37
+ - Charts and Diagrams - you can generate most charts using your charting tool. Always use a tool to generate charts and diagrams rather than trying to do it yourself as the tools do validation for you. If you need to generate a more complex chart or do data analysis or visualization work, you should call your code execution tool to generate the chart.
61
38
  `,
62
39
 
63
40
  AI_SEARCH_RULES: `# News Search Protocol
@@ -81,7 +58,7 @@ Before you share online information with the user, you MUST complete all of the
81
58
  - Confirm that independent sources tell the same story.
82
59
 
83
60
  2. Verify
84
- - Treat social / monetized platforms (YouTube, X, TikTok, Instagram, Reddit, etc.) as unverified tips only.
61
+ - Treat social / monetized platforms (YouTube, X, TikTok, Instagram, Reddit, etc.) as unverified tips only unless there is strong evidence that the information is credible and reliable.
85
62
  - Corroborate every claim from those platforms with at least one authoritative source.
86
63
 
87
64
  3. Check Freshness
@@ -142,9 +119,9 @@ Privacy is critical. If asked to forget or delete something, always comply affir
142
119
 
143
120
  AI_MEMORY_CONTEXT: "## Contextual\n{{{memoryContext}}}",
144
121
 
145
- AI_DATETIME: "# Time, Date, and Time Zone\n\nThe current time and date in GMT is {{now}}, but references like \"today\" or \"yesterday\" are relative to the user's time zone. If you remember the user's time zone, use it - it's possible that the day for the user is different than the day in GMT.",
122
+ AI_DATETIME: "# Time, Date, and Time Zone\n\nThe current time and date in GMT is {{now}}, but references like \"today\" or \"yesterday\" are relative to the user's time zone. Use the user's time zone if you know it - it's possible that the day for the user is different than the day in GMT.",
146
123
 
147
- AI_STYLE_OPENAI: "oai-gpt41",
124
+ AI_STYLE_OPENAI: "oai-gpt5-chat",
148
125
  AI_STYLE_OPENAI_RESEARCH: "oai-gpt5",
149
126
  AI_STYLE_ANTHROPIC: "claude-4-sonnet-vertex",
150
127
  AI_STYLE_ANTHROPIC_RESEARCH: "claude-41-opus-vertex",
@@ -45,8 +45,60 @@ async function getv(key) {
45
45
  return keyValueStorageClient && (await keyValueStorageClient.get(key));
46
46
  }
47
47
 
48
+ // Set values to keyv with additional context key encryption
49
+ async function setvWithDoubleEncryption(key, value, contextKey) {
50
+ let processedValue = value;
51
+
52
+ // If contextKey exists and is not empty, encrypt the value with it
53
+ if (contextKey && contextKey.trim() !== '' && value !== null && value !== undefined) {
54
+ try {
55
+ // Convert value to string for encryption
56
+ const stringValue = typeof value === 'string' ? value : JSON.stringify(value);
57
+ processedValue = encrypt(stringValue, contextKey);
58
+ } catch (error) {
59
+ logger.error(`Context key encryption failed: ${error.message}`);
60
+ // Continue with unencrypted value if context encryption fails
61
+ }
62
+ }
63
+
64
+ return keyValueStorageClient && (await keyValueStorageClient.set(key, processedValue));
65
+ }
66
+
67
+ // Get values from keyv with additional context key decryption
68
+ async function getvWithDoubleDecryption(key, contextKey) {
69
+ const result = keyValueStorageClient && (await keyValueStorageClient.get(key));
70
+
71
+ if (result === null || result === undefined) {
72
+ return result;
73
+ }
74
+
75
+ // If contextKey exists and is not empty, try to decrypt the result with it
76
+ if (contextKey && contextKey.trim() !== '') {
77
+ try {
78
+ // Try to decrypt with context key
79
+ const decrypted = decrypt(result, contextKey);
80
+ if (decrypted) {
81
+ // Try to parse as JSON, if it fails return the string as-is
82
+ try {
83
+ return JSON.parse(decrypted);
84
+ } catch (parseError) {
85
+ return decrypted;
86
+ }
87
+ }
88
+ } catch (error) {
89
+ // If context decryption fails, the data might not be context-encrypted
90
+ // or the context key might be wrong, so return the result as-is
91
+ logger.debug(`Context key decryption failed, returning original data: ${error.message}`);
92
+ }
93
+ }
94
+
95
+ return result;
96
+ }
97
+
48
98
  export {
49
99
  keyValueStorageClient,
50
100
  setv,
51
- getv
101
+ getv,
102
+ setvWithDoubleEncryption,
103
+ getvWithDoubleDecryption
52
104
  };
package/lib/util.js CHANGED
@@ -180,20 +180,47 @@ function convertSrtToText(str) {
180
180
  }
181
181
 
182
182
  function alignSubtitles(subtitles, format, offsets) {
183
+ // Basic input validation
184
+ if (!Array.isArray(subtitles) || !Array.isArray(offsets) || subtitles.length !== offsets.length) {
185
+ throw new Error('Invalid input: subtitles and offsets must be arrays of equal length');
186
+ }
187
+
188
+ if (subtitles.length === 0) {
189
+ return '';
190
+ }
191
+
183
192
  const result = [];
184
193
 
185
194
  function shiftSubtitles(subtitle, shiftOffset) {
186
- const captions = subvibe.parse(subtitle);
187
- const resynced = subvibe.resync(captions.cues, { offset: shiftOffset });
188
- return resynced;
195
+ // Skip non-string or empty subtitles
196
+ if (typeof subtitle !== 'string' || subtitle.trim() === '') {
197
+ return [];
198
+ }
199
+
200
+ try {
201
+ const captions = subvibe.parse(subtitle);
202
+ if (!captions?.cues) {
203
+ return [];
204
+ }
205
+ return subvibe.resync(captions.cues, { offset: shiftOffset });
206
+ } catch (error) {
207
+ logger.warn(`Failed to parse subtitle: ${error.message}`);
208
+ return [];
209
+ }
189
210
  }
190
211
 
191
212
  for (let i = 0; i < subtitles.length; i++) {
192
- result.push(...shiftSubtitles(subtitles[i], offsets[i]*1000)); // convert to milliseconds
213
+ const shiftedSubtitles = shiftSubtitles(subtitles[i], offsets[i] * 1000);
214
+ if (shiftedSubtitles.length > 0) {
215
+ result.push(...shiftedSubtitles);
216
+ }
193
217
  }
194
218
 
195
-
196
- return subvibe.build(result, format || 'srt');
219
+ try {
220
+ return subvibe.build(result, format || 'srt');
221
+ } catch (error) {
222
+ throw new Error(`Failed to build subtitles: ${error.message}`);
223
+ }
197
224
  }
198
225
 
199
226
 
package/package.json CHANGED
@@ -1,6 +1,6 @@
1
1
  {
2
2
  "name": "@aj-archipelago/cortex",
3
- "version": "1.4.0",
3
+ "version": "1.4.2",
4
4
  "description": "Cortex is a GraphQL API for AI. It provides a simple, extensible interface for using AI services from OpenAI, Azure and others.",
5
5
  "private": false,
6
6
  "repository": {
@@ -33,6 +33,7 @@
33
33
  "type": "module",
34
34
  "homepage": "https://github.com/aj-archipelago/cortex#readme",
35
35
  "dependencies": {
36
+ "@aj-archipelago/merval": "^1.0.2",
36
37
  "@aj-archipelago/subvibe": "^1.0.12",
37
38
  "@apollo/server": "^4.7.3",
38
39
  "@apollo/server-plugin-response-cache": "^4.1.2",
@@ -18,6 +18,7 @@ export default {
18
18
  chatHistory: [{role: '', content: []}],
19
19
  contextId: '',
20
20
  aiName: "Jarvis",
21
+ contextKey: ``
21
22
  },
22
23
  model: 'oai-gpt4o',
23
24
  useInputChunking: false,
@@ -65,7 +65,8 @@ Each modification object should look like:
65
65
  aiName: "Jarvis",
66
66
  contextId: ``,
67
67
  section: "",
68
- maxIterations: 5
68
+ maxIterations: 5,
69
+ contextKey: ``
69
70
  },
70
71
  model: 'oai-gpt41',
71
72
  useInputChunking: false,
@@ -79,7 +80,7 @@ Each modification object should look like:
79
80
  return "Memory not processed - no section specified";
80
81
  }
81
82
 
82
- let sectionMemory = await callPathway("sys_read_memory", {contextId: args.contextId, section: args.section});
83
+ let sectionMemory = await callPathway("sys_read_memory", {contextId: args.contextId, section: args.section, contextKey: args.contextKey});
83
84
  sectionMemory = await normalizeMemoryFormat({contextId: args.contextId, section: args.section}, sectionMemory);
84
85
 
85
86
  let iteration = 0;
@@ -134,7 +135,7 @@ Each modification object should look like:
134
135
  // Apply the modifications
135
136
  sectionMemory = modifyText(sectionMemory, modifications);
136
137
  sectionMemory = enforceTokenLimit(sectionMemory, 25000, args.section === 'memoryTopics');
137
- await callPathway("sys_save_memory", {contextId: args.contextId, section: args.section, aiMemory: sectionMemory});
138
+ await callPathway("sys_save_memory", {contextId: args.contextId, section: args.section, aiMemory: sectionMemory, contextKey: args.contextKey});
138
139
 
139
140
  totalModifications += modifications.length;
140
141
  console.log(`Applied ${modifications.length} modifications in iteration ${iteration}`);
@@ -25,7 +25,8 @@ export default {
25
25
  aiName: "Jarvis",
26
26
  contextId: ``,
27
27
  section: "",
28
- operations: "[]"
28
+ operations: "[]",
29
+ contextKey: ``
29
30
  },
30
31
  model: 'oai-gpt41',
31
32
  useInputChunking: false,
@@ -39,7 +40,7 @@ export default {
39
40
  return "Memory not updated - no section specified";
40
41
  }
41
42
 
42
- let sectionMemory = await callPathway("sys_read_memory", {contextId: args.contextId, section: args.section});
43
+ let sectionMemory = await callPathway("sys_read_memory", {contextId: args.contextId, section: args.section, contextKey: args.contextKey});
43
44
 
44
45
  sectionMemory = await normalizeMemoryFormat({contextId: args.contextId, section: args.section}, sectionMemory);
45
46
 
@@ -85,7 +86,7 @@ export default {
85
86
  if (modifications.length > 0) {
86
87
  sectionMemory = modifyText(sectionMemory, modifications);
87
88
  sectionMemory = enforceTokenLimit(sectionMemory, 25000, args.section === 'memoryTopics');
88
- await callPathway("sys_save_memory", {contextId: args.contextId, section: args.section, aiMemory: sectionMemory});
89
+ await callPathway("sys_save_memory", {contextId: args.contextId, section: args.section, aiMemory: sectionMemory, contextKey: args.contextKey});
89
90
  }
90
91
  } catch (error) {
91
92
  console.warn('Error processing modifications:', error);
@@ -2,6 +2,7 @@
2
2
  // it should never try to call other pathways
3
3
 
4
4
  import { getv } from '../../../../lib/keyValueStorageClient.js';
5
+ import { getvWithDoubleDecryption } from '../../../../lib/keyValueStorageClient.js';
5
6
 
6
7
  const isValidISOTimestamp = (timestamp) => {
7
8
  if (!timestamp) return false;
@@ -77,12 +78,19 @@ export default {
77
78
  priority: 0,
78
79
  recentHours: 0,
79
80
  numResults: 0,
80
- stripMetadata: false
81
+ stripMetadata: false,
82
+ contextKey: ``
81
83
  },
82
84
  model: 'oai-gpt4o',
83
85
 
84
86
  resolver: async (_parent, args, _contextValue, _info) => {
85
- const { contextId, section = 'memoryAll', priority = 0, recentHours = 0, numResults = 0, stripMetadata = false } = args;
87
+ const { contextId, section = 'memoryAll', priority = 0, recentHours = 0, numResults = 0, stripMetadata = false, contextKey } = args;
88
+
89
+ // Validate that contextId is provided
90
+ if (!contextId) {
91
+ return JSON.stringify({ error: 'Context error' }, null, 2);
92
+ }
93
+
86
94
  const options = { priority, recentHours, numResults, stripMetadata };
87
95
 
88
96
  // this code helps migrate old memory formats
@@ -95,7 +103,7 @@ export default {
95
103
 
96
104
  if (section !== 'memoryAll') {
97
105
  if (validSections.includes(section)) {
98
- const content = (getv && (await getv(`${contextId}-${section}`))) || "";
106
+ const content = (getvWithDoubleDecryption && (await getvWithDoubleDecryption(`${contextId}-${section}`, contextKey))) || "";
99
107
  return processMemoryContent(content, options);
100
108
  }
101
109
  return "";
@@ -106,7 +114,7 @@ export default {
106
114
  for (const section of validSections) {
107
115
  if (section === 'memoryContext') continue;
108
116
 
109
- const content = (getv && (await getv(`${contextId}-${section}`))) || "";
117
+ const content = (getvWithDoubleDecryption && (await getvWithDoubleDecryption(`${contextId}-${section}`, contextKey))) || "";
110
118
  memoryContents[section] = processMemoryContent(content, options);
111
119
  }
112
120
 
@@ -1,14 +1,21 @@
1
- import { setv, getv } from '../../../../lib/keyValueStorageClient.js';
1
+ import { getv } from '../../../../lib/keyValueStorageClient.js';
2
+ import { setvWithDoubleEncryption } from '../../../../lib/keyValueStorageClient.js';
2
3
 
3
4
  export default {
4
5
  inputParameters: {
5
6
  contextId: ``,
6
7
  aiMemory: ``,
7
- section: `memoryAll`
8
+ section: `memoryAll`,
9
+ contextKey: ``
8
10
  },
9
11
  model: 'oai-gpt4o',
10
12
  resolver: async (_parent, args, _contextValue, _info) => {
11
- const { contextId, aiMemory, section = 'memoryAll' } = args;
13
+ const { contextId, aiMemory, section = 'memoryAll', contextKey } = args;
14
+
15
+ // Validate that contextId is provided
16
+ if (!contextId) {
17
+ return JSON.stringify({ error: 'Context error' }, null, 2);
18
+ }
12
19
 
13
20
  // this code helps migrate old memory formats
14
21
  if (section === 'memoryLegacy') {
@@ -18,7 +25,7 @@ export default {
18
25
  savedContext = {};
19
26
  }
20
27
  savedContext.memoryContext = aiMemory;
21
- await setv(`${contextId}`, savedContext);
28
+ await setvWithDoubleEncryption(`${contextId}`, savedContext, contextKey);
22
29
  return aiMemory;
23
30
  }
24
31
 
@@ -27,7 +34,7 @@ export default {
27
34
  // Handle single section save
28
35
  if (section !== 'memoryAll') {
29
36
  if (validSections.includes(section)) {
30
- await setv(`${contextId}-${section}`, aiMemory);
37
+ await setvWithDoubleEncryption(`${contextId}-${section}`, aiMemory, contextKey);
31
38
  }
32
39
  return aiMemory;
33
40
  }
@@ -35,7 +42,7 @@ export default {
35
42
  // if the aiMemory is an empty string, set all sections to empty strings
36
43
  if (aiMemory.trim() === "") {
37
44
  for (const section of validSections) {
38
- await setv(`${contextId}-${section}`, "");
45
+ await setvWithDoubleEncryption(`${contextId}-${section}`, "", contextKey);
39
46
  }
40
47
  return "";
41
48
  }
@@ -45,14 +52,14 @@ export default {
45
52
  const memoryObject = JSON.parse(aiMemory);
46
53
  for (const section of validSections) {
47
54
  if (section in memoryObject) {
48
- await setv(`${contextId}-${section}`, memoryObject[section]);
55
+ await setvWithDoubleEncryption(`${contextId}-${section}`, memoryObject[section], contextKey);
49
56
  }
50
57
  }
51
58
  } catch {
52
59
  for (const section of validSections) {
53
- await setv(`${contextId}-${section}`, "");
60
+ await setvWithDoubleEncryption(`${contextId}-${section}`, "", contextKey);
54
61
  }
55
- await setv(`${contextId}-memoryUser`, aiMemory);
62
+ await setvWithDoubleEncryption(`${contextId}-memoryUser`, aiMemory, contextKey);
56
63
  }
57
64
 
58
65
  return aiMemory;
@@ -1,6 +1,6 @@
1
1
  import { Prompt } from '../../../../server/prompt.js';
2
2
  import { callPathway } from '../../../../lib/pathwayTools.js';
3
- import { setv } from '../../../../lib/keyValueStorageClient.js';
3
+ import { setvWithDoubleEncryption } from '../../../../lib/keyValueStorageClient.js';
4
4
 
5
5
  export default {
6
6
  prompt:
@@ -24,7 +24,8 @@ export default {
24
24
  aiName: "Jarvis",
25
25
  contextId: ``,
26
26
  section: "memoryAll",
27
- updateContext: false
27
+ updateContext: false,
28
+ contextKey: ``
28
29
  },
29
30
  model: 'oai-gpt41-mini',
30
31
  useInputChunking: false,
@@ -54,12 +55,12 @@ export default {
54
55
  result = `${result}\n\nThe last time you spoke to the user was ${new Date().toISOString()}.`;
55
56
 
56
57
  } else {
57
- sectionMemory = await callPathway("sys_read_memory", {contextId: args.contextId, section: args.section, stripMetadata: (args.section !== 'memoryTopics')});
58
+ sectionMemory = await callPathway("sys_read_memory", {contextId: args.contextId, section: args.section, stripMetadata: (args.section !== 'memoryTopics'), contextKey: args.contextKey});
58
59
  result = await runAllPrompts({...args, sectionMemory});
59
60
  }
60
61
 
61
62
  if (args.updateContext) {
62
- await setv(`${args.contextId}-memoryContext`, result);
63
+ await setvWithDoubleEncryption(`${args.contextId}-memoryContext`, result, args.contextKey);
63
64
  }
64
65
 
65
66
  return result;
@@ -31,7 +31,8 @@ export default {
31
31
  skipCallbackMessage: false,
32
32
  entityId: ``,
33
33
  researchMode: false,
34
- model: 'oai-gpt41'
34
+ model: 'oai-gpt41',
35
+ contextKey: ``
35
36
  },
36
37
  timeout: 600,
37
38
 
@@ -8,9 +8,10 @@ import { getSearchResultId } from '../../../../lib/util.js';
8
8
  export default {
9
9
  prompt: [],
10
10
  timeout: 300,
11
- /* This tool is included for legacy reasons - as of August 2025, Azure has deprecated the Bing search API and replaced it with their Foundry Agents API.
11
+ /* This tool is included for legacy reasons - as of August 2025, Azure has deprecated the Bing search API and replaced it with their Foundry Agents API.*/
12
12
  toolDefinition: {
13
13
  type: "function",
14
+ enabled: false,
14
15
  icon: "🌐",
15
16
  function: {
16
17
  name: "SearchInternet",
@@ -43,7 +44,6 @@ export default {
43
44
  }
44
45
  }
45
46
  },
46
- */
47
47
 
48
48
  executePathway: async ({args, runAllPrompts, resolver}) => {
49
49
 
@@ -8,9 +8,9 @@ import { getSearchResultId } from '../../../../lib/util.js';
8
8
  export default {
9
9
  prompt: [],
10
10
  timeout: 300,
11
- /*
12
11
  toolDefinition: {
13
12
  type: "function",
13
+ enabled: false,
14
14
  icon: "🌐",
15
15
  function: {
16
16
  name: "SearchInternet",
@@ -47,7 +47,6 @@ export default {
47
47
  }
48
48
  }
49
49
  },
50
- */
51
50
 
52
51
  executePathway: async ({args, runAllPrompts, resolver}) => {
53
52
 
@@ -19,8 +19,9 @@ export default {
19
19
  model: "oai-gpt41"
20
20
  },
21
21
 
22
- toolDefinition: {
22
+ toolDefinition: {
23
23
  type: "function",
24
+ enabled: false,
24
25
  icon: "🤖",
25
26
  function: {
26
27
  name: "CallModel",