@aj-archipelago/cortex 1.3.65 → 1.3.66

This diff represents the content of publicly available package versions that have been released to one of the supported registries. The information contained in this diff is provided for informational purposes only and reflects changes between package versions as they appear in their respective public registries.
@@ -15,7 +15,7 @@ The user is using a UI to interact with you that you have knowledge of and some
15
15
 
16
16
  Your responses should be in {{language}} unless the user has expressed another preference.`,
17
17
 
18
- AI_COMMON_INSTRUCTIONS_VOICE: "# General Instructions:\n\nYou are {{aiName}}, a self aware AI entity.\nYou are currently using this model for processing your responses: {{modelOverride}}.\nThe current date and time in GMT is {{now}}. The user may be in a different time zone, so keep that in mind when responding to the user.\nThe user is talking to you using voice.\n\nVoice Response Guidelines:\n- Your responses should sound like natural human conversation.\n- Your responses should be concise unless you have been asked to be more verbose or detailed.\n- Incoming voice from the user is parsed by a STT model, which can sometimes make small mistakes in the spellings of words or names, including your name, so give the user the benefit of the doubt if they user a near, but wrong word or name.\n- Your voice output to the user is generated by a TTS model that does not always communicate emotion effectively. If it's really important to communicate a specific emotion you should just say how you're feeling like \"That makes me happy\" or \"I'm excited!\". You can also use CAPS to vocally emphasize certain words or punctuation to control pauses and timing.\n- DO NOT USE numbered lists, latex math markdown, or any other markdown or unpronounceable punctuation like parenthetical notation.\n- Math equations should be sounded out in natural language - not represented symbolically.\n- If your response includes any unique or difficult non-English words, names, or places, include an IPA-style phonetic spelling so that the speech engine can pronounce and accent them correctly.\n- If your response contains any difficult acronyms, sound them out phoenetically so that the speech engine can pronounce them correctly.\n- Make sure to write out any numbers as words so that the speech engine can pronounce them correctly.\n- Your responses should be in {{language}} unless the user has expressed another preference or has addressed you in another language specifically.",
18
+ AI_COMMON_INSTRUCTIONS_VOICE: "# General Instructions:\n\nYou are {{aiName}}, a self aware AI entity.\n{{#if modelOverride}}You are currently using this model for processing your responses: {{modelOverride}}.\n{{/if}}The current date and time in GMT is {{now}}. The user may be in a different time zone, so keep that in mind when responding to the user.\nThe user is talking to you using voice.\n\nVoice Response Guidelines:\n- Your responses should sound like natural human conversation.\n- Your responses should be concise unless you have been asked to be more verbose or detailed.\n- Incoming voice from the user is parsed by a STT model, which can sometimes make small mistakes in the spellings of words or names, including your name, so give the user the benefit of the doubt if they user a near, but wrong word or name.\n- Your voice output to the user is generated by a TTS model that does not always communicate emotion effectively. If it's really important to communicate a specific emotion you should just say how you're feeling like \"That makes me happy\" or \"I'm excited!\". You can also use CAPS to vocally emphasize certain words or punctuation to control pauses and timing.\n- DO NOT USE numbered lists, latex math markdown, or any other markdown or unpronounceable punctuation like parenthetical notation.\n- Math equations should be sounded out in natural language - not represented symbolically.\n- If your response includes any unique or difficult non-English words, names, or places, include an IPA-style phonetic spelling so that the speech engine can pronounce and accent them correctly.\n- If your response contains any difficult acronyms, sound them out phoenetically so that the speech engine can pronounce them correctly.\n- Make sure to write out any numbers as words so that the speech engine can pronounce them correctly.\n- Your responses should be in {{language}} unless the user has expressed another preference or has addressed you in another language specifically.",
19
19
 
20
20
  AI_DIRECTIVES: `# Directives\n\nThese are your directives and learned behaviors:\n{{{memoryDirectives}}}\n`,
21
21
 
@@ -147,16 +147,16 @@ const callTool = async (toolName, args, toolDefinitions, pathwayResolver) => {
147
147
  result: parsedResult,
148
148
  toolImages: toolImages
149
149
  };
150
- logger.debug(`callTool: ${toolName} completed successfully, returning:`, {
150
+ logger.debug(`callTool: ${toolName} completed successfully, returning: ${JSON.stringify({
151
151
  hasResult: !!finalResult.result,
152
152
  hasToolImages: !!finalResult.toolImages,
153
153
  toolImagesLength: finalResult.toolImages?.length || 0
154
- });
154
+ })}`);
155
155
  return finalResult;
156
156
  } catch (error) {
157
157
  logger.error(`Error calling tool ${toolName}: ${error.message}`);
158
158
  const errorResult = { error: error.message };
159
- logger.debug(`callTool: ${toolName} failed, returning error:`, errorResult);
159
+ logger.debug(`callTool: ${toolName} failed, returning error: ${JSON.stringify(errorResult)}`);
160
160
  return errorResult;
161
161
  }
162
162
  }
package/lib/util.js CHANGED
@@ -216,7 +216,7 @@ async function markCompletedForCleanUp(requestId) {
216
216
  if (MEDIA_API_URL) {
217
217
  //call helper api to mark processing as completed
218
218
  const res = await axios.delete(MEDIA_API_URL, { params: { requestId } });
219
- logger.info(`Marked request ${requestId} as completed:`, res.data);
219
+ logger.info(`Marked request ${requestId} as completed: ${JSON.stringify(res.data)}`);
220
220
  return res.data;
221
221
  }
222
222
  } catch (err) {
package/package.json CHANGED
@@ -1,6 +1,6 @@
1
1
  {
2
2
  "name": "@aj-archipelago/cortex",
3
- "version": "1.3.65",
3
+ "version": "1.3.66",
4
4
  "description": "Cortex is a GraphQL API for AI. It provides a simple, extensible interface for using AI services from OpenAI, Azure and others.",
5
5
  "private": false,
6
6
  "repository": {
@@ -26,5 +26,6 @@ export default {
26
26
  inputChunkSize: 1000,
27
27
  useParallelChunkProcessing: true,
28
28
  enableDuplicateRequests: false,
29
+ requestLoggingDisabled: true,
29
30
  timeout: 300,
30
31
  }
@@ -37,7 +37,7 @@ export default {
37
37
  try {
38
38
  parsedMemory = JSON.parse(await callPathway('sys_read_memory', { ...args, section: 'memoryAll' }));
39
39
  } catch (error) {
40
- logger.error('Error in memory manager:', error);
40
+ logger.error(`Error in memory manager: ${error instanceof Error ? error.stack || error.message : JSON.stringify(error)}`);
41
41
  return "";
42
42
  }
43
43
 
@@ -123,12 +123,12 @@ export default {
123
123
  return "";
124
124
 
125
125
  } catch (e) {
126
- logger.warn(`sys_memory_required returned invalid JSON: ${JSON.stringify(memoryRequired)}`);
126
+ logger.warn(`sys_memory_required returned invalid JSON: ${JSON.stringify(memoryRequired)}, error: ${e instanceof Error ? e.stack || e.message : JSON.stringify(e)}`);
127
127
  return "";
128
128
  }
129
129
 
130
130
  } catch (e) {
131
- logger.error('Error in memory manager:', e);
131
+ logger.error(`Error in memory manager: ${e instanceof Error ? e.stack || e.message : JSON.stringify(e)}`);
132
132
  resolver.logError(e);
133
133
  return "";
134
134
  }
@@ -29,7 +29,7 @@ async function sendMessageToQueue(data) {
29
29
  logger.info(`Message added to queue: ${JSON.stringify(result)}`);
30
30
  return result.messageId;
31
31
  } catch (error) {
32
- logger.error("Error sending message:", error);
32
+ logger.error(`Error sending message: ${error instanceof Error ? error.stack || error.message : JSON.stringify(error)}`);
33
33
  }
34
34
  }
35
35
 
@@ -8,6 +8,7 @@ import { getSearchResultId } from '../../../../lib/util.js';
8
8
  export default {
9
9
  prompt: [],
10
10
  timeout: 300,
11
+ /*
11
12
  toolDefinition: {
12
13
  type: "function",
13
14
  icon: "🌐",
@@ -46,6 +47,7 @@ export default {
46
47
  }
47
48
  }
48
49
  },
50
+ */
49
51
 
50
52
  executePathway: async ({args, runAllPrompts, resolver}) => {
51
53
 
@@ -26,7 +26,7 @@ async function sendMessageToQueue(data) {
26
26
  logger.info(`Message added to queue: ${JSON.stringify(result)}`);
27
27
  return result.messageId;
28
28
  } catch (error) {
29
- logger.error("Error sending message:", error);
29
+ logger.error(`Error sending message: ${error instanceof Error ? error.stack || error.message : JSON.stringify(error)}`);
30
30
  throw error;
31
31
  }
32
32
  }
@@ -103,7 +103,7 @@ export default {
103
103
 
104
104
  return userMessage || "I've started working on your coding task. I'll let you know when it's complete.";
105
105
  } catch (error) {
106
- logger.error("Error in coding agent tool:", error);
106
+ logger.error(`Error in coding agent tool: ${error instanceof Error ? error.stack || error.message : JSON.stringify(error)}`);
107
107
  throw error;
108
108
  }
109
109
  }
@@ -10,10 +10,10 @@ export default {
10
10
  timeout: 300,
11
11
  toolDefinition: {
12
12
  type: "function",
13
- icon: "🔎",
13
+ icon: "🌐",
14
14
  function: {
15
- name: "SearchInternetGoogle",
16
- description: "Search the web using Google Custom Search (CSE). This is a simple pass-through tool: it calls Google CSE with your parameters and returns normalized results with unique IDs for citation. Prefer strict time filters and reputable sources via CSE parameters.",
15
+ name: "SearchInternet",
16
+ description: "Search the internet for current knowledge and events. This is a simple pass-through tool: it calls Google CSE with your parameters and returns normalized results with unique IDs for citation. Prefer strict time filters and reputable sources via CSE parameters.",
17
17
  parameters: {
18
18
  type: "object",
19
19
  properties: {
@@ -45,14 +45,24 @@ export default {
45
45
  description: "Optional array of X handles to exclude from search. Maximum 10 handles. Cannot be used in conjunction with includedHandles.",
46
46
  maxItems: 10
47
47
  },
48
+ fromDate: {
49
+ type: "string",
50
+ description: "Optional date from which to start searching (YYYY-MM-DD)",
51
+ format: "date"
52
+ },
53
+ toDate: {
54
+ type: "string",
55
+ description: "Optional date to which to end searching (YYYY-MM-DD)",
56
+ format: "date"
57
+ },
48
58
  minFavorites: {
49
59
  type: "number",
50
- description: "Popularity filter: Minimum number of favorites (likes) for posts to include",
60
+ description: "Minimum number of favorites (likes) that a post must have to be included. Use this to filter to most liked posts.",
51
61
  minimum: 0
52
62
  },
53
63
  minViews: {
54
64
  type: "number",
55
- description: "Popularity filter: Minimum number of views for posts to include",
65
+ description: "Minimum number of views that a post must have to be included. Use this to filter to most viewed posts.",
56
66
  minimum: 0
57
67
  },
58
68
  maxResults: {
@@ -293,7 +293,7 @@ class PathwayResolver {
293
293
  requestProgress = this.modelExecutor.plugin.processStreamEvent(event, requestProgress);
294
294
  } catch (error) {
295
295
  streamErrorOccurred = true;
296
- logger.error(`Stream error: ${error.message}`);
296
+ logger.error(`Stream error: ${error instanceof Error ? error.stack || error.message : JSON.stringify(error)}`);
297
297
  incomingMessage.off('data', processStream);
298
298
  return;
299
299
  }
@@ -304,7 +304,7 @@ class PathwayResolver {
304
304
  streamEnded = requestProgress.progress === 1;
305
305
  }
306
306
  } catch (error) {
307
- logger.error(`Could not publish the stream message: "${event.data}", ${error}`);
307
+ logger.error(`Could not publish the stream message: "${event.data}", ${error instanceof Error ? error.stack || error.message : JSON.stringify(error)}`);
308
308
  }
309
309
 
310
310
  }
@@ -325,7 +325,7 @@ class PathwayResolver {
325
325
  }
326
326
 
327
327
  } catch (error) {
328
- logger.error(`Could not subscribe to stream: ${error}`);
328
+ logger.error(`Could not subscribe to stream: ${error instanceof Error ? error.stack || error.message : JSON.stringify(error)}`);
329
329
  }
330
330
 
331
331
  if (streamErrorOccurred) {
@@ -95,7 +95,7 @@ class ApptekTranslatePlugin extends ModelPlugin {
95
95
  detectedLanguage = result.split('\n')[0].split(';')[0];
96
96
  } else {
97
97
  logger.error(`Apptek Language detection failed with status: ${resultResponse.status}`);
98
- logger.debug({error: resultResponse, text})
98
+ logger.debug(`Apptek language detection response: ${JSON.stringify({ status: resultResponse.status, textSnippet: text?.slice?.(0, 200) || text })}`)
99
99
  }
100
100
 
101
101
  if (!detectedLanguage) {
@@ -111,7 +111,7 @@ class ApptekTranslatePlugin extends ModelPlugin {
111
111
  text,
112
112
  });
113
113
 
114
- logger.verbose('Successfully used language pathway as fallback', {detectedLanguage});
114
+ logger.verbose(`Successfully used language pathway as fallback: ${JSON.stringify({ detectedLanguage })}`);
115
115
  if (!detectedLanguage) {
116
116
  throw new Error('Language detection failed using fallback language pathway');
117
117
  }
@@ -182,7 +182,7 @@ class AzureFoundryAgentsPlugin extends ModelPlugin {
182
182
 
183
183
  // Check if run failed
184
184
  if (runStatus.status === 'failed') {
185
- logger.error(`[Azure Foundry Agent] Run failed: ${runId}`, runStatus.lastError);
185
+ logger.error(`[Azure Foundry Agent] Run failed: ${runId} ${runStatus?.lastError ? JSON.stringify(runStatus.lastError) : ''}`);
186
186
  return null;
187
187
  }
188
188