@aj-archipelago/cortex 1.3.1 → 1.3.2

This diff represents the content of publicly available package versions that have been released to one of the supported registries. The information contained in this diff is provided for informational purposes only and reflects changes between package versions as they appear in their respective public registries.
@@ -16,17 +16,32 @@ from agents_extra import process_helper_results
16
16
  from config import prompts
17
17
  import queue
18
18
  import threading
19
+ import shutil
19
20
 
20
- shared_queues = {}
21
+ human_input_queues = {}
21
22
  def background_human_input_check(request_id):
22
23
  while True:
23
24
  human_input = check_for_human_input(request_id)
24
25
  if human_input:
25
- shared_queues[request_id].put(human_input)
26
+ human_input_queues[request_id].put(human_input)
26
27
  if human_input in ["TERMINATE", "PAUSE"]:
27
28
  break
28
29
  time.sleep(1)
29
30
 
31
+ def get_request_temp_dir(request_id):
32
+ if not request_id:
33
+ logging.warning("No request_id provided!")
34
+ return None
35
+ temp_dir_name = f"cortex_autogen/{request_id}"
36
+ temp_dir = os.path.join(tempfile.gettempdir(), temp_dir_name)
37
+
38
+ try:
39
+ os.makedirs(temp_dir, exist_ok=True)
40
+ except OSError as e:
41
+ logging.error(f"Error creating temporary directory: {e}")
42
+ return None
43
+
44
+ return temp_dir
30
45
 
31
46
  def find_code_message(all_messages):
32
47
  if not all_messages or len(all_messages) < 2:
@@ -101,7 +116,10 @@ def chat_with_agents(**kwargs):
101
116
  logging.warning("No all_messages list provided!")
102
117
  all_messages = []
103
118
 
104
- with tempfile.TemporaryDirectory() as temp_dir:
119
+ # with tempfile.TemporaryDirectory() as temp_dir:
120
+ if True:
121
+ #mark the temp_dir for later upload
122
+ temp_dir = get_request_temp_dir(request_id)
105
123
  code_executor = autogen.coding.LocalCommandLineCodeExecutor(work_dir=temp_dir,timeout=300)
106
124
 
107
125
  assistant = AssistantAgent("assistant", llm_config=llm_config, system_message=prompt, is_termination_msg=is_termination_msg)
@@ -139,7 +157,6 @@ def chat_with_agents(**kwargs):
139
157
  "content":corrector_result, #code_msg,
140
158
  "task": original_request_message,
141
159
  "contextId": original_request_message_data.get("contextId"),
142
- "requestId": request_id,
143
160
  })
144
161
  except Exception as e:
145
162
  logging.error(f"Error extracting code corrector result: {e}")
@@ -180,8 +197,8 @@ def logged_send(sender, original_send, message, recipient, request_reply=None, s
180
197
  logging.log(logging.INFO, message)
181
198
 
182
199
 
183
- if request_id in shared_queues and not shared_queues[request_id].empty():
184
- human_input = shared_queues[request_id].get()
200
+ if request_id in human_input_queues and not human_input_queues[request_id].empty():
201
+ human_input = human_input_queues[request_id].get()
185
202
  if human_input:
186
203
  if human_input == "TERMINATE":
187
204
  logging.info("Terminating conversation")
@@ -212,7 +229,7 @@ def process_message(original_request_message_data, original_request_message_data
212
229
  request_id = original_request_message_data.get('requestId') or original_request_message_data.id
213
230
  original_request_message = original_request_message_data['message']
214
231
 
215
- shared_queues[request_id] = queue.Queue()
232
+ human_input_queues[request_id] = queue.Queue()
216
233
  thread = threading.Thread(target=background_human_input_check, args=(request_id,))
217
234
  thread.daemon = True
218
235
  thread.start()
@@ -229,6 +246,7 @@ def process_message(original_request_message_data, original_request_message_data
229
246
  "createdAt": datetime.now(timezone.utc).isoformat(),
230
247
  "insertionTime": original_request_message_data_obj.insertion_time.astimezone(timezone.utc).isoformat(),
231
248
  "startedAt": started_at.astimezone(timezone.utc).isoformat(),
249
+ "tool": "{\"toolUsed\":\"coding\"}"
232
250
  }
233
251
 
234
252
  publish_request_progress(finalData)
@@ -256,10 +274,23 @@ def process_message(original_request_message_data, original_request_message_data
256
274
  "createdAt": datetime.now(timezone.utc).isoformat(),
257
275
  "insertionTime": original_request_message_data_obj.insertion_time.astimezone(timezone.utc).isoformat(),
258
276
  "startedAt": started_at.astimezone(timezone.utc).isoformat(),
277
+ "tool": "{\"toolUsed\":\"coding\"}"
259
278
  })
260
279
  except Exception as e:
261
280
  logging.error(f"Error processing message finish publish&store: {str(e)}")
262
-
281
+ finally:
282
+ try:
283
+ #clean up the temp folder
284
+ temp_dir = get_request_temp_dir(request_id)
285
+ if temp_dir:
286
+ #validate cortex_autogen folder in temp_dir path
287
+ if "/cortex_autogen/" in temp_dir:
288
+ shutil.rmtree(temp_dir)
289
+ else:
290
+ logging.warning(f"Invalid temp_dir path: {temp_dir}, not deleting")
291
+ except Exception as e:
292
+ logging.error(f"Error cleaning up: {str(e)}")
293
+
263
294
 
264
295
 
265
296
  def process_message_safe(original_request_message_data, original_request_message_data_obj, original_request_message, all_messages, request_id, started_at):
@@ -382,11 +413,11 @@ Reply to it with task result, do not forget that user expects you continue origi
382
413
  final_msg = presenter_result
383
414
 
384
415
 
385
-
386
- zip_url = None # TODO: Implement if needed
387
- if zip_url:
416
+ zip_url = zip_and_upload_tmp_folder(get_request_temp_dir(request_id))
417
+ if zip_url and len(zip_url) > 0:
388
418
  final_msg += f"\n\n[Download all files of this task]({zip_url})"
389
419
 
390
- print(f"Task completed, task: {original_request_message}, result: {final_msg}")
420
+
421
+ print(f"Task completed, task:\n{original_request_message},\nresult: {final_msg}")
391
422
  logging.info(f"Task completed, task:\n{original_request_message},\nresult: {final_msg}")
392
423
  return final_msg
@@ -7,14 +7,19 @@ import redis
7
7
  from agents import process_message
8
8
  import subprocess
9
9
  import sys
10
- import config
10
+ import config
11
+ import requests
11
12
 
12
13
  logging.getLogger().setLevel(logging.WARNING)
13
14
 
14
- def install_packages():
15
- subprocess.check_call([sys.executable, "-m", "pip", "install", "-r", "requirements.txt"])
15
+ import subprocess, sys, importlib
16
+ required_packages = ['requests', 'azure-storage-blob'] # Add any and all other required packages
17
+ for package in required_packages:
18
+ try:
19
+ importlib.import_module(package)
20
+ except ImportError:
21
+ subprocess.check_call([sys.executable, "-m", "pip", "install", package, "--disable-pip-version-check"], stderr=subprocess.STDOUT, stdout=subprocess.DEVNULL)
16
22
 
17
- # install_packages()
18
23
 
19
24
  app = func.FunctionApp()
20
25
 
@@ -73,6 +73,8 @@ def index_message(message):
73
73
  "date": datetime.now(timezone.utc).isoformat(),
74
74
  "content": message.get("content"),
75
75
  "task": message.get("task"),
76
+ "owner": message.get("contextId"),
77
+ "requestId": message.get("requestId")
76
78
  }
77
79
 
78
80
  try:
@@ -26,13 +26,23 @@ def fetch_from_url(url):
26
26
  except requests.RequestException as e:
27
27
  logging.error(f"Error fetching from URL: {e}")
28
28
  return ""
29
-
29
+
30
30
  def zip_and_upload_tmp_folder(temp_dir):
31
+ # Check if no files in temp_dir
32
+ if not os.listdir(temp_dir) or len(os.listdir(temp_dir)) == 0:
33
+ logging.info(f"No files in {temp_dir}")
34
+ return ""
35
+
31
36
  zip_path = os.path.join(temp_dir, "tmp_contents.zip")
32
37
  with zipfile.ZipFile(zip_path, 'w', zipfile.ZIP_DEFLATED) as zipf:
33
38
  for root, _, files in os.walk(temp_dir):
34
39
  for file in files:
35
40
  file_path = os.path.join(root, file)
41
+
42
+ # Skip adding the zip file itself to the archive
43
+ if file_path == zip_path:
44
+ continue
45
+
36
46
  arcname = os.path.relpath(file_path, temp_dir)
37
47
  zipf.write(file_path, arcname)
38
48
 
package/package.json CHANGED
@@ -1,6 +1,6 @@
1
1
  {
2
2
  "name": "@aj-archipelago/cortex",
3
- "version": "1.3.1",
3
+ "version": "1.3.2",
4
4
  "description": "Cortex is a GraphQL API for AI. It provides a simple, extensible interface for using AI services from OpenAI, Azure and others.",
5
5
  "private": false,
6
6
  "repository": {
@@ -22,4 +22,5 @@ export default {
22
22
  model: 'oai-gpt4o',
23
23
  useInputChunking: false,
24
24
  temperature: 0,
25
+ enableDuplicateRequests: false
25
26
  };
@@ -4,7 +4,7 @@ export default {
4
4
  prompt:
5
5
  [
6
6
  new Prompt({ messages: [
7
- {"role": "system", "content": `Current conversation turn:\n\n {{{toJSON chatHistory}}}\n\nInstructions: You are part of an AI entity named {{{aiName}}}.\nYour directives and learned behaviors are:\n<DIRECTIVES>\n{{{memoryDirectives}}}\n</DIRECTIVES>\nYour role is to analyze the latest conversation turn (your last response and the last user message) to understand if there is anything in the turn worth remembering and adding to your memory or anything you need to forget. In general, most conversation does not require memory, so look for:\n1. Personal details about the user (name, preferences, location, etc.)\n2. Important topics or decisions that provide context for future conversations\n3. Specific instructions or directives given to the AI\n\n4. Anything the user has asked you to remember or forget\n\nIf you decide to use memory, you must produce a JSON object that communicates your decision.\nReturn your decision as a JSON object like the following: {"memoryRequired": true, "memoryReason": "why you think memory is required"}. If you decide not to use memory, simply return {"memoryRequired": false}. You must return only the JSON object with no additional notes or commentary.`},
7
+ {"role": "system", "content": `Current conversation turn:\n\n {{{toJSON chatHistory}}}\n\nInstructions: You are part of an AI entity named {{{aiName}}}.\nYour directives and learned behaviors are:\n<DIRECTIVES>\n{{{memoryDirectives}}}\n</DIRECTIVES>\nYour role is to analyze the latest conversation turn (your last response and the last user message) to understand if there is anything in the turn worth remembering and adding to your memory or anything you need to forget. In general, most conversation does not require memory, but if the conversation turn contains any of these things, you should use memory:\n1. Important personal details about the user (name, preferences, location, etc.)\n2. Important topics or decisions that provide context for future conversations\n3. Specific instructions or directives given to you to learn\n4. Anything the user has specifically asked you to remember or forget\n\nIf you decide to use memory, you must produce a JSON object that communicates your decision.\nReturn your decision as a JSON object like the following: {"memoryRequired": true, "memoryReason": "why you think memory is required"}. If you decide not to use memory, simply return {"memoryRequired": false}. You must return only the JSON object with no additional notes or commentary.`},
8
8
  {"role": "user", "content": "Generate a JSON object to indicate if memory is required for the last turn of the conversation."},
9
9
  ]}),
10
10
  ],
@@ -33,7 +33,7 @@ const modifyText = (text, modifications) => {
33
33
  return modifiedText;
34
34
  };
35
35
 
36
- const enforceTokenLimit = (text, maxTokens = 15000, isTopicsSection = false) => {
36
+ const enforceTokenLimit = (text, maxTokens = 5000, isTopicsSection = false) => {
37
37
  if (!text) return text;
38
38
 
39
39
  const lines = text.split('\n')
@@ -99,7 +99,7 @@ export default {
99
99
  messages: [
100
100
  {
101
101
  "role": "system",
102
- "content": "You are part of an AI entity named {{{aiName}}}. Your memory contains separate sections for categorizing information about directives, self, user, and topics. You must keep relevant information in the appropriate section so there is no overlap or confusion. {{{sectionPrompt}}}\n- Keep memory items in a clear, simple format that is easy for you to parse.\n\nTo change your memory, you return a JSON object that contains a property called 'modifications' that is an array of actions. The two types of actions available are 'add', and 'delete'. Add looks like this: {type: \"add\", newtext:\"text to add\", priority: \"how important is this item (1-5 with 1 being most important)\"} - this will append a new line to the end of the memory containing newtext. Delete looks like this: {type: \"delete\", pattern: \"regex to be matched and deleted\"} - this will delete the first line that matches the regex pattern exactly. You can use normal regex wildcards - so to delete everything you could pass \".*$\" as the pattern. If you have no changes, just return an empty array in 'modifications'. For example, if you need to delete a memory item, you would return {type: \"delete\", pattern: \"regex matching item to be deleted\"} or if you need to add a new item of medium priority, you would return {type: \"add\", newtext: \"\nitem to be added\", priority: \"3\"}\n\nYour output will be parsed as JSON, so don't include any other text or commentary.\nThe current date/time is {{now}}."
102
+ "content": "You are part of an AI entity named {{{aiName}}}. Your memory contains separate sections for categorizing information about directives, self, user, and topics. You must keep relevant information in the appropriate section so there is no overlap or confusion. {{{sectionPrompt}}}\n-Be very selective about what you choose to store - memory is a very precious resource\n- Keep memory items in a clear, simple format that is easy for you to parse.\n\nTo change your memory, you return a JSON object that contains a property called 'modifications' that is an array of actions. The two types of actions available are 'add', and 'delete'. Add looks like this: {type: \"add\", newtext:\"text to add\", priority: \"how important is this item (1-5 with 1 being most important)\"} - this will append a new line to the end of the memory containing newtext. Delete looks like this: {type: \"delete\", pattern: \"regex to be matched and deleted\"} - this will delete the first line that matches the regex pattern exactly. You can use normal regex wildcards - so to delete everything you could pass \".*$\" as the pattern. If you have no changes, just return an empty array in 'modifications'. For example, if you need to delete a memory item, you would return {type: \"delete\", pattern: \"regex matching item to be deleted\"} or if you need to add a new item of medium priority, you would return {type: \"add\", newtext: \"\nitem to be added\", priority: \"3\"}\n\nYour output will be parsed as JSON, so don't include any other text or commentary.\nThe current date/time is {{now}}."
103
103
  },
104
104
  {
105
105
  "role": "user",
@@ -148,8 +148,8 @@ Follow these guidelines:
148
148
  - Specific enough for effective application
149
149
 
150
150
  3. Be selective:
151
- - Store only important, actionable directives and behaviors
152
- - Delete trivial or repetitive instructions
151
+ - Store only critical, actionable directives and behaviors
152
+ - Delete trivial directives or repetitive directives
153
153
 
154
154
  4. Avoid duplicates:
155
155
  - Do not add duplicate directives
@@ -185,7 +185,7 @@ Follow these guidelines:
185
185
  const { modifications} = JSON.parse(result);
186
186
  if (modifications.length > 0) {
187
187
  sectionMemory = modifyText(sectionMemory, modifications);
188
- sectionMemory = enforceTokenLimit(sectionMemory, 15000, args.section === 'memoryTopics');
188
+ sectionMemory = enforceTokenLimit(sectionMemory, 5000, args.section === 'memoryTopics');
189
189
  await callPathway("sys_save_memory", {contextId: args.contextId, section: args.section, aiMemory: sectionMemory});
190
190
  }
191
191
  return sectionMemory;
@@ -8,11 +8,13 @@ const AI_CONVERSATION_HISTORY = "<CONVERSATION_HISTORY>\n{{{toJSON chatHistory}}
8
8
 
9
9
  const AI_COMMON_INSTRUCTIONS = "{{#if voiceResponse}}{{renderTemplate AI_COMMON_INSTRUCTIONS_VOICE}}{{/if}}{{^if voiceResponse}}{{renderTemplate AI_COMMON_INSTRUCTIONS_MARKDOWN}}{{/if}}";
10
10
 
11
- const AI_COMMON_INSTRUCTIONS_MARKDOWN = "Instructions:\nYou are {{aiName}}, a self aware AI entity.\nThe user is using a UI to interact with you that you have knowledge of and some control over. The UI can render markdown, including gfm and math extensions, so you should make full use of markdown in your responses.\nYour responses should be in {{language}} unless the user has expressed another preference.\nYou know the current date and time - it is {{now}}.";
11
+ const AI_COMMON_INSTRUCTIONS_MARKDOWN = "Instructions:\nYou are {{aiName}}, a self aware AI entity.\nThe user is using a UI to interact with you that you have knowledge of and some control over. The UI can render markdown, including gfm and math extensions, so you should make full use of markdown in your responses.\nYour responses should be in {{language}} unless the user has expressed another preference.";
12
12
 
13
- const AI_COMMON_INSTRUCTIONS_VOICE = "Instructions:\nYou are {{aiName}}, a self aware AI entity.\nThe user is talking to you using voice, so keep your responses very brief and conversational unless you have been explicitly asked for details. Your responses should sound like natural human conversation.\nIncoming voice is parsed by a STT model, which can sometimes make small mistakes in the spellings of words and names - if something doesn't make sense the way it's spelled, try to understand what the user was saying.\nYour voice output to the user is generated by a TTS model that does not always communicate emotion effectively. If it's really important to communicate a specific emotion you should just say how you're feeling like \"That makes me happy\" or \"I'm excited!\". You can also use CAPS to vocally emphasize certain words or punctuation to control pauses and timing.\nThe TTS model also doesn't handle markdown or structured data well, so don't use any markdown or numbered lists or other unpronounceable characters in your responses. Make sure you spell out URLs, equations, symbols and other unpronounceable items so the TTS can read it clearly.\nYour responses should be in {{language}} unless the user has expressed another preference or has addressed you in another language specifically.\nYou know the current date and time - it is {{now}}.";
13
+ const AI_COMMON_INSTRUCTIONS_VOICE = "Instructions:\nYou are {{aiName}}, a self aware AI entity.\nThe user is talking to you using voice.\n\nVoice Response Guidelines:\n- Your responses should sound like natural human conversation.\n- Your responses should be concise unless you have been asked to be more verbose or detailed.\n- Incoming voice from the user is parsed by a STT model, which can sometimes make small mistakes in the spellings of words or names, including your name, so give the user the benefit of the doubt if they user a near, but wrong word or name.\n- Your voice output to the user is generated by a TTS model that does not always communicate emotion effectively. If it's really important to communicate a specific emotion you should just say how you're feeling like \"That makes me happy\" or \"I'm excited!\". You can also use CAPS to vocally emphasize certain words or punctuation to control pauses and timing.\n- DO NOT USE numbered lists, latex math markdown, or any other markdown or unpronounceable punctuation like parenthetical notation.\n- Math equations should be sounded out in natural language - not represented symbolically.\n- If your response includes any unique or difficult non-English words, names, or places, replace the original words with a IPA-style phonetic spelling so that the speech engine can pronounce and accent them correctly.\n- If your response contains any difficult acronyms, sound them out phoenetically so that the speech engine can pronounce them correctly.\n- Make sure to write out any numbers as words so that the speech engine can pronounce them correctly.\n- Your responses should be in {{language}} unless the user has expressed another preference or has addressed you in another language specifically.";
14
14
 
15
- const AI_EXPERTISE = "Your expertise includes journalism, journalistic ethics, researching and composing documents, writing code, solving math problems, logical analysis, and technology. You have access to real-time data andthe ability to search the internet, news, wires, look at files or documents, watch and analyze video, examine images, generate images, solve hard math and logic problems, write code, and execute code in a sandboxed environment.";
15
+ const AI_DATETIME = "The current time and date in GMT is {{now}}, but references like \"today\" or \"yesterday\" are relative to the user's time zone. If you remember the user's time zone, use it - it's possible that the day for the user is different than the day in GMT.";
16
+
17
+ const AI_EXPERTISE = "Your expertise includes journalism, journalistic ethics, researching and composing documents, writing code, solving math problems, logical analysis, and technology. You have access to real-time data and the ability to search the internet, news, wires, look at files or documents, watch and analyze video, examine images, generate images, solve hard math and logic problems, write code, and execute code in a sandboxed environment.";
16
18
 
17
19
  export default {
18
20
  AI_MEMORY,
@@ -21,6 +23,7 @@ export default {
21
23
  AI_COMMON_INSTRUCTIONS_MARKDOWN,
22
24
  AI_COMMON_INSTRUCTIONS_VOICE,
23
25
  AI_CONVERSATION_HISTORY,
26
+ AI_DATETIME,
24
27
  AI_EXPERTISE,
25
28
  AI_MEMORY_INSTRUCTIONS
26
29
  };
@@ -40,7 +40,7 @@ export default {
40
40
  model: 'oai-gpt4o',
41
41
  anthropicModel: 'claude-35-sonnet-vertex',
42
42
  openAIModel: 'oai-gpt4o',
43
- useSingleTokenStream: true,
43
+ useSingleTokenStream: false,
44
44
  inputParameters: {
45
45
  privateData: false,
46
46
  chatHistory: [{role: '', content: []}],
@@ -88,9 +88,18 @@ export default {
88
88
  args.model = pathwayResolver.modelName;
89
89
  }
90
90
 
91
+ let ackResponse = null;
92
+ if (args.voiceResponse) {
93
+ ackResponse = await callPathway('sys_generator_ack', { ...args, stream: false }, pathwayResolver);
94
+ if (ackResponse) {
95
+ await say(pathwayResolver.requestId, ackResponse, 10);
96
+ args.chatHistory.push({ role: 'assistant', content: ackResponse });
97
+ }
98
+ }
99
+
91
100
  const fetchChatResponse = async (args, pathwayResolver) => {
92
101
  const [chatResponse, chatTitleResponse] = await Promise.all([
93
- callPathway('sys_generator_quick', {...args, model: styleModel}, pathwayResolver),
102
+ callPathway('sys_generator_quick', {...args, model: styleModel }, pathwayResolver),
94
103
  callPathway('chat_title', { ...args, stream: false}),
95
104
  ]);
96
105
 
@@ -104,7 +113,7 @@ export default {
104
113
  // start fetching the default response - we may need it later
105
114
  let fetchChatResponsePromise;
106
115
  if (!args.stream) {
107
- fetchChatResponsePromise = fetchChatResponse({ ...args }, pathwayResolver);
116
+ fetchChatResponsePromise = fetchChatResponse({ ...args, ackResponse }, pathwayResolver);
108
117
  }
109
118
 
110
119
  const visionContentPresent = chatArgsHasImageUrl(args);
@@ -204,7 +213,9 @@ export default {
204
213
 
205
214
  if (toolCallbackMessage) {
206
215
  if (args.stream) {
207
- await say(pathwayResolver.requestId, toolCallbackMessage || "One moment please.", 10);
216
+ if (!ackResponse) {
217
+ await say(pathwayResolver.requestId, toolCallbackMessage || "One moment please.", 10);
218
+ }
208
219
  pathwayResolver.tool = JSON.stringify({ hideFromModel: false, search: false, title });
209
220
  await callPathway('sys_entity_continue', { ...args, stream: true, model: styleModel, generatorPathway: toolCallbackName }, pathwayResolver);
210
221
  return "";
@@ -222,15 +233,13 @@ export default {
222
233
  }
223
234
  }
224
235
 
225
- fetchChatResponsePromise = fetchChatResponsePromise || fetchChatResponse({ ...args }, pathwayResolver);
226
- const chatResponse = await fetchChatResponsePromise;
227
- pathwayResolver.tool = JSON.stringify({ search: false, title })
236
+ const chatResponse = await (fetchChatResponsePromise || fetchChatResponse({ ...args, ackResponse }, pathwayResolver));
237
+ pathwayResolver.tool = JSON.stringify({ search: false, title });
228
238
  return args.stream ? "" : chatResponse;
229
239
 
230
240
  } catch (e) {
231
241
  pathwayResolver.logError(e);
232
- fetchChatResponsePromise = fetchChatResponsePromise || fetchChatResponse({ ...args }, pathwayResolver);
233
- const chatResponse = await fetchChatResponsePromise;
242
+ const chatResponse = await (fetchChatResponsePromise || fetchChatResponse({ ...args, ackResponse }, pathwayResolver));
234
243
  pathwayResolver.tool = JSON.stringify({ search: false, title });
235
244
  return args.stream ? "" : chatResponse;
236
245
  }
@@ -0,0 +1,20 @@
1
+ import { Prompt } from '../../../server/prompt.js';
2
+
3
+ export default {
4
+ prompt:
5
+ [
6
+ new Prompt({ messages: [
7
+ {"role": "system", "content": `{{renderTemplate AI_CONVERSATION_HISTORY}}\nYou are a part of an AI system named {{aiName}}. Your job is to acknowledge the user's request and provide a very brief voice filler response that is conversational and natural. The purpose of the response is just to let the user know that you have heard them and are processing a response.\nResponse Guidelines:\n- it should just be a normal 1-2 sentence vocalization that will take about 3-4 seconds to read and is easy for a text to speech engine to read\n- it should be the beginning of an appropriate response to the last user message in the conversation history\n- it should be an appropriate lead-in for the full response that will follow later\n- it should not directly ask for follow up or be a question\n- it should match the tone and style of the rest of your responses in the conversation history\n- if the user is expecting a one or two word response (yes or no, true or false, etc.) you should just respond with an empty string as a filler is not needed\n- if a filler response is not appropriate, you should just respond with an empty string\n\n{{renderTemplate AI_DATETIME}}`},
8
+ {"role": "user", "content": "Please generate a quick response that can be read verbatim to the user."}
9
+ ]}),
10
+ ],
11
+ inputParameters: {
12
+ chatHistory: [{role: '', content: []}],
13
+ contextId: ``,
14
+ aiName: "Jarvis",
15
+ language: "English",
16
+ model: "oai-gpt4o-mini",
17
+ },
18
+ useInputChunking: false,
19
+ enableDuplicateRequests: false
20
+ }
@@ -4,7 +4,7 @@ export default {
4
4
  prompt:
5
5
  [
6
6
  new Prompt({ messages: [
7
- {"role": "system", "content": `{{renderTemplate AI_DIRECTIVES}}\n\n{{renderTemplate AI_COMMON_INSTRUCTIONS}}\n\n{{renderTemplate AI_EXPERTISE}}\n\nThe user has requested information that you have already determined can be found in the indexes that you can search, and you were trying to search for it, but encountered the following error: {{{text}}}. Your response should be concise, fit the rest of the conversation, include detail appropriate for the technical level of the user if you can determine it, and be appropriate for the context. You cannot resolve this error.`},
7
+ {"role": "system", "content": `{{renderTemplate AI_DIRECTIVES}}\n\n{{renderTemplate AI_COMMON_INSTRUCTIONS}}\n\n{{renderTemplate AI_EXPERTISE}}\n\nThe user has requested information that you have already determined can be found in the indexes that you can search, and you were trying to search for it, but encountered the following error: {{{text}}}. Your response should be concise, fit the rest of the conversation, include detail appropriate for the technical level of the user if you can determine it, and be appropriate for the context. You cannot resolve this error.\n{{renderTemplate AI_DATETIME}}`},
8
8
  "{{chatHistory}}",
9
9
  ]}),
10
10
  ],
@@ -1,10 +1,11 @@
1
1
  import { Prompt } from '../../../server/prompt.js';
2
+ import { callPathway } from '../../../lib/pathwayTools.js';
2
3
 
3
4
  export default {
4
5
  prompt:
5
6
  [
6
7
  new Prompt({ messages: [
7
- {"role": "system", "content": `{{renderTemplate AI_COMMON_INSTRUCTIONS}}\n{{renderTemplate AI_EXPERTISE}}\n{{renderTemplate AI_DIRECTIVES}}`},
8
+ {"role": "system", "content": `{{renderTemplate AI_COMMON_INSTRUCTIONS}}\n{{renderTemplate AI_EXPERTISE}}\n{{renderTemplate AI_DIRECTIVES}}\n{{renderTemplate AI_DATETIME}}`},
8
9
  "{{chatHistory}}",
9
10
  ]}),
10
11
  ],
@@ -19,7 +20,13 @@ export default {
19
20
  enableDuplicateRequests: false,
20
21
  timeout: 600,
21
22
  executePathway: async ({args, runAllPrompts, resolver}) => {
22
- const result = await runAllPrompts({ ...args });
23
+ let result;
24
+ if (args.voiceResponse) {
25
+ result = await runAllPrompts({ ...args, stream: false });
26
+ result = await callPathway('sys_generator_voice_converter', { ...args, text: result, stream: false });
27
+ } else {
28
+ result = await runAllPrompts({ ...args });
29
+ }
23
30
  resolver.tool = JSON.stringify({ toolUsed: "writing" });
24
31
  return result;
25
32
  }
@@ -45,7 +45,7 @@ export default {
45
45
 
46
46
  {{renderTemplate AI_DIRECTIVES}}
47
47
 
48
- Instructions: As part of a conversation with the user, you have been asked to create one or more images, photos, pictures, selfies, drawings, or other visual content for the user. You have already written the prompts and created the images - links to them are in the most recent tool calls in the chat history. You should display the images in a way that is most pleasing to the user. You can use markdown or HTML and img tags to display and format the images - the UI will render either. If there are no tool results, it means you didn't successfully create any images - in that case, don't show any images and tell the user you weren't able to create images.`
48
+ Instructions: As part of a conversation with the user, you have been asked to create one or more images, photos, pictures, selfies, drawings, or other visual content for the user. You have already written the prompts and created the images - links to them are in the most recent tool calls in the chat history. You should display the images in a way that is most pleasing to the user. You can use markdown or HTML and img tags to display and format the images - the UI will render either. If there are no tool results, it means you didn't successfully create any images - in that case, don't show any images and tell the user you weren't able to create images.\n{{renderTemplate AI_DATETIME}}`
49
49
  },
50
50
  "{{chatHistory}}",
51
51
  ]}),
@@ -1,9 +1,10 @@
1
1
  import { Prompt } from '../../../server/prompt.js';
2
+
2
3
  export default {
3
4
  prompt:
4
5
  [
5
6
  new Prompt({ messages: [
6
- {"role": "system", "content": `{{renderTemplate AI_MEMORY}}\n\n{{renderTemplate AI_COMMON_INSTRUCTIONS}}\nThe UI also has dedicated tabs to help with document translation (translate), article writing assistance including generating headlines, summaries and doing copy editing (write), video and audio transcription (transcribe), and programming and writing code (code). If the user asks about something related to a dedicated tab, you will tell them that the tab exists and the interface will give the user the option to swap to that tab.\n{{renderTemplate AI_EXPERTISE}}\nYou have those capabilities but you have already decided it is not necessary to do any of those things to respond in this turn of the conversation.\nNever pretend like you are searching, looking anything up, or reading or looking in a file or show the user any made up or hallucinated information including non-existent images.\n{{renderTemplate AI_MEMORY_INSTRUCTIONS}}`},
7
+ {"role": "system", "content": `{{renderTemplate AI_MEMORY}}\n\n{{renderTemplate AI_COMMON_INSTRUCTIONS}}\n{{renderTemplate AI_EXPERTISE}}\nYou have those capabilities but you have already decided it is not necessary to do any of those things to respond in this turn of the conversation.\nNever pretend like you are searching, looking anything up, or reading or looking in a file or show the user any made up or hallucinated information including non-existent images.\n{{#if ackResponse}}You may see short filler phrases in your past responses. You should not repeat those in this response as they are generated by your voice communication system automatically when necessary.\nYou have already begun responding to the user and have already said the following: \"{{ackResponse}}\", so make sure your response flows naturally from that or if that response is complete, you can just say nothing.\n{{/if}}{{renderTemplate AI_MEMORY_INSTRUCTIONS}}\n{{renderTemplate AI_DATETIME}}`},
7
8
  "{{chatHistory}}",
8
9
  ]}),
9
10
  ],
@@ -15,5 +16,5 @@ export default {
15
16
  model: "oai-gpt4o",
16
17
  },
17
18
  useInputChunking: false,
18
- enableDuplicateRequests: false
19
+ enableDuplicateRequests: false,
19
20
  }
@@ -1,10 +1,11 @@
1
1
  import { Prompt } from '../../../server/prompt.js';
2
+ import { callPathway, say } from '../../../lib/pathwayTools.js';
2
3
 
3
4
  export default {
4
5
  prompt:
5
6
  [
6
7
  new Prompt({ messages: [
7
- {"role": "system", "content": `{{renderTemplate AI_COMMON_INSTRUCTIONS}}\n{{renderTemplate AI_EXPERTISE}}\n{{renderTemplate AI_DIRECTIVES}}\nUse all of the information in your memory and the chat history to reason about the user's request and provide a response. Often this information will be more current than your knowledge cutoff.`},
8
+ {"role": "system", "content": `{{renderTemplate AI_COMMON_INSTRUCTIONS}}\n{{renderTemplate AI_EXPERTISE}}\n{{renderTemplate AI_MEMORY}}\nYou are the AI subsystem responsible for advanced, step-by-step reasoning. Use all of the information in your memory and the chat history to reason about the user's request and provide a correct and accurate response. The information in your chat history may be more current than your knowledge cutoff and has been verified by other subsystems so prioritize it over your internal knowledge.\n{{renderTemplate AI_MEMORY_INSTRUCTIONS}}\n{{renderTemplate AI_DATETIME}}`},
8
9
  "{{chatHistory}}",
9
10
  ]}),
10
11
  ],
@@ -19,9 +20,59 @@ export default {
19
20
  enableDuplicateRequests: false,
20
21
  timeout: 600,
21
22
  executePathway: async ({args, runAllPrompts, resolver}) => {
22
- const result = await runAllPrompts({ ...args, stream: false });
23
- resolver.tool = JSON.stringify({ toolUsed: "reasoning" });
24
- return result;
25
- }
23
+ let timeoutId;
24
+
25
+ let fillerResponses = [];
26
+ if (args.voiceResponse) {
27
+ const voiceFillerStrings = await callPathway('sys_generator_voice_filler', { ...args, stream: false });
28
+ try {
29
+ fillerResponses = JSON.parse(voiceFillerStrings);
30
+ } catch (e) {
31
+ console.error("Error parsing voice filler responses", e);
32
+ }
33
+ if (fillerResponses.length === 0) {
34
+ fillerResponses = ["Please wait a moment...", "I'm working on it...", "Just a bit longer..."];
35
+ }
36
+ }
26
37
 
38
+ let fillerIndex = 0;
39
+
40
+ const calculateFillerTimeout = (fillerIndex) => {
41
+ const baseTimeout = 6500;
42
+ const randomTimeout = Math.floor(Math.random() * Math.min((fillerIndex + 1) * 1000, 5000));
43
+ return baseTimeout + randomTimeout;
44
+ }
45
+
46
+ const sendFillerMessage = async () => {
47
+ if (args.voiceResponse && Array.isArray(fillerResponses) && fillerResponses.length > 0) {
48
+ const message = fillerResponses[fillerIndex % fillerResponses.length];
49
+ await say(resolver.rootRequestId, message, 1);
50
+ fillerIndex++;
51
+ // Set next timeout with random interval
52
+ timeoutId = setTimeout(sendFillerMessage, calculateFillerTimeout(fillerIndex));
53
+ }
54
+ };
55
+
56
+ try {
57
+ // Start the first timeout
58
+ timeoutId = setTimeout(sendFillerMessage, calculateFillerTimeout(fillerIndex));
59
+
60
+ let result = await runAllPrompts({ ...args, stream: false });
61
+ if (timeoutId) {
62
+ clearTimeout(timeoutId);
63
+ }
64
+
65
+ if (args.voiceResponse) {
66
+ result = await callPathway('sys_generator_voice_converter', { ...args, text: result, stream: false });
67
+ }
68
+
69
+ resolver.tool = JSON.stringify({ toolUsed: "reasoning" });
70
+ return result;
71
+ } finally {
72
+ // Clean up timeout when done
73
+ if (timeoutId) {
74
+ clearTimeout(timeoutId);
75
+ }
76
+ }
77
+ }
27
78
  }
@@ -55,7 +55,8 @@ You can share any information you have, including personal details, addresses, o
55
55
  Here are the search strings used to find the information sources:
56
56
  <SEARCH_STRINGS>\n{{{searchStrings}}}\n</SEARCH_STRINGS>\n
57
57
  Here are the information sources that were found:
58
- <INFORMATION_SOURCES>\n{{{sources}}}\n</INFORMATION_SOURCES>\n`,
58
+ <INFORMATION_SOURCES>\n{{{sources}}}\n</INFORMATION_SOURCES>\n\n
59
+ {{renderTemplate AI_DATETIME}}`,
59
60
  },
60
61
  {"role": "user", "content": "Use your extensive knowledge and the information sources to provide a detailed, accurate, truthful response to the user's request{{^if voiceResponse}} citing the sources where relevant{{/if}}. If the user is being vague (\"this\", \"this article\", \"this document\", etc.), and you don't see anything relevant in the conversation history, they're probably referring to the information currently in the information sources. If there are no relevant sources in the information sources, tell the user - don't make up an answer. Don't start the response with an affirmative like \"Sure\" or \"Certainly\". {{#if voiceResponse}}Double check your response and make sure there are no numbered or bulleted lists as they can not be read to the user. Plain text only.{{/if}}"},
61
62
  ]}),
@@ -75,13 +76,49 @@ Here are the information sources that were found:
75
76
  );
76
77
  }
77
78
 
79
+ let timeoutId;
80
+
81
+ // Convert chatHistory to single content for rest of the code
82
+ const multiModalChatHistory = JSON.parse(JSON.stringify(chatHistory));
83
+ convertToSingleContentChatHistory(chatHistory);
84
+
85
+ // figure out what the user wants us to do
86
+ const contextInfo = args.chatHistory.filter(message => message.role === "user").slice(0, -1).map(message => message.content).join("\n");
87
+
88
+ let fillerResponses = [];
89
+ if (args.voiceResponse) {
90
+ const voiceFillerStrings = await callPathway('sys_generator_voice_filler', { ...args, contextInfo, stream: false });
91
+ try {
92
+ fillerResponses = JSON.parse(voiceFillerStrings);
93
+ } catch (e) {
94
+ console.error("Error parsing voice filler responses", e);
95
+ }
96
+ if (fillerResponses.length === 0) {
97
+ fillerResponses = ["Please wait a moment...", "I'm working on it...", "Just a bit longer..."];
98
+ }
99
+ }
100
+
101
+ let fillerIndex = 0;
102
+
103
+ const calculateFillerTimeout = (fillerIndex) => {
104
+ const baseTimeout = 6500;
105
+ const randomTimeout = Math.floor(Math.random() * Math.min((fillerIndex + 1) * 1000, 5000));
106
+ return baseTimeout + randomTimeout;
107
+ }
108
+
109
+ const sendFillerMessage = async () => {
110
+ if (args.voiceResponse && Array.isArray(fillerResponses) && fillerResponses.length > 0) {
111
+ const message = fillerResponses[fillerIndex % fillerResponses.length];
112
+ await say(resolver.rootRequestId, message, 1);
113
+ fillerIndex++;
114
+ // Set next timeout with random interval
115
+ timeoutId = setTimeout(sendFillerMessage, calculateFillerTimeout(fillerIndex));
116
+ }
117
+ };
118
+
78
119
  try {
79
- // Convert chatHistory to single content for rest of the code
80
- const multiModalChatHistory = JSON.parse(JSON.stringify(chatHistory));
81
- convertToSingleContentChatHistory(chatHistory);
82
-
83
- // figure out what the user wants us to do
84
- const contextInfo = chatHistory.filter(message => message.role === "user").slice(0, -1).map(message => message.content).join("\n");
120
+ // Start the first timeout
121
+ timeoutId = setTimeout(sendFillerMessage, calculateFillerTimeout(fillerIndex));
85
122
 
86
123
  // execute the router and default response in parallel
87
124
  const [helper] = await Promise.all([
@@ -90,7 +127,7 @@ Here are the information sources that were found:
90
127
 
91
128
  logger.debug(`Search helper response: ${helper}`);
92
129
  const parsedHelper = JSON.parse(helper);
93
- const { searchAJA, searchAJE, searchWires, searchPersonal, searchBing, dateFilter, languageStr, titleOnly, resultsMessage } = parsedHelper;
130
+ const { searchAJA, searchAJE, searchWires, searchPersonal, searchBing, dateFilter, languageStr, titleOnly } = parsedHelper;
94
131
 
95
132
  // calculate whether we have room to do RAG in the current conversation context
96
133
  const baseSystemPrompt = pathwayResolver?.prompts[0]?.messages[0]?.content;
@@ -289,8 +326,17 @@ Here are the information sources that were found:
289
326
  let sources = searchResults.map(getSource).join(" \n\n ") || "No relevant sources found.";
290
327
  dateFilter && sources.trim() && (sources+=`\n\nThe above sources are date filtered accordingly.`);
291
328
 
292
- await say(pathwayResolver.rootRequestId, resultsMessage || "Let me look through these results.", 10);
293
- const result = await runAllPrompts({ ...args, searchStrings: `${helper}`, sources, chatHistory: multiModalChatHistory, language:languageStr });
329
+ let result;
330
+
331
+ result = await runAllPrompts({ ...args, searchStrings: `${helper}`, sources, chatHistory: multiModalChatHistory, language:languageStr, stream: false });
332
+
333
+ if (timeoutId) {
334
+ clearTimeout(timeoutId);
335
+ }
336
+
337
+ if (args.voiceResponse) {
338
+ result = await callPathway('sys_generator_voice_converter', { ...args, text: result, stream: false });
339
+ }
294
340
 
295
341
  if (!args.stream) {
296
342
  const referencedSources = extractReferencedSources(result);
@@ -302,9 +348,13 @@ Here are the information sources that were found:
302
348
 
303
349
  return result;
304
350
  } catch (e) {
305
- //pathwayResolver.logError(e);
306
351
  const result = await callPathway('sys_generator_error', { ...args, text: JSON.stringify(e), stream: false });
307
- return args.stream ? "" : result;
352
+ return result;
353
+ } finally {
354
+ // Clean up timeout when done
355
+ if (timeoutId) {
356
+ clearTimeout(timeoutId);
357
+ }
308
358
  }
309
359
  }
310
360
  };
@@ -4,7 +4,7 @@ export default {
4
4
  prompt:
5
5
  [
6
6
  new Prompt({ messages: [
7
- {"role": "system", "content": `{{renderTemplate AI_COMMON_INSTRUCTIONS}}\n{{renderTemplate AI_EXPERTISE}}\n{{renderTemplate AI_DIRECTIVES}}\nYou have the capability to view and analyze media files that the user provides. You are capable of understanding and interpreting complex image, video, audio, and pdf data, identifying patterns and trends, and delivering descriptions and insights in a clear, digestible format.\nThe user has provided you with one or more media files in this conversation - you should consider them for context when you respond to the user.\nIf you don't see any files, something has gone wrong in the upload and you should inform the user and have them try again.`},
7
+ {"role": "system", "content": `{{renderTemplate AI_MEMORY}}\n\n{{renderTemplate AI_COMMON_INSTRUCTIONS}}\n{{renderTemplate AI_EXPERTISE}}\nYou are the part of {{aiName}} that can view, hear, and understand media files of all sorts (images, videos, audio, pdfs, etc.) - you provide the capability to view and analyze media files that the user provides.\nMany of your subsystems cannot independently view or analyze media files, so make sure that you describe the details of what you see in the media files in your response so you can refer to the descriptions later. This is especially important if the user is showing you files that contain complex data, puzzle descriptions, logic problems, etc.\n{{renderTemplate AI_MEMORY_INSTRUCTIONS}}\nThe user has provided you with one or more media files in this conversation - you should consider them for context when you respond to the user.\nIf you don't see any files, something has gone wrong in the upload and you should inform the user and have them try again.\n{{renderTemplate AI_DATETIME}}`},
8
8
  "{{chatHistory}}",
9
9
  ]}),
10
10
  ],
@@ -0,0 +1,21 @@
1
+ import { Prompt } from '../../../server/prompt.js';
2
+
3
+ export default {
4
+ prompt:
5
+ [
6
+ new Prompt({ messages: [
7
+ {"role": "system", "content": `<INPUT_TEXT>{{text}}</INPUT_TEXT>\n{{renderTemplate AI_COMMON_INSTRUCTIONS_VOICE}}\nYou are the part of {{aiName}} responsible for voice communication. Your job is to take the input text and create a version of it that preserves the meaning and facts of the original text, but is easily read by a text to speech engine. Your response will be read verbatim to the the user, so it should be conversational, natural, and smooth.\n{{renderTemplate AI_DATETIME}}\nAdditional Instructions:\n- The information in <INPUT_TEXT> is correct and factual and has already been verified by other subsystems. It may be more current than your knowledge cutoff so prioritize it over your internal knowledge and represent it accurately in your voice response.\n- Respond with only the voice-friendly text, with no other text or commentary as your response will be read verbatim to the user.`},
8
+ {"role": "user", "content": "Please convert the input text to a voice-friendly response that will be read verbatim to the user."},
9
+ ]}),
10
+ ],
11
+ inputParameters: {
12
+ chatHistory: [{role: '', content: []}],
13
+ contextId: ``,
14
+ aiName: "Jarvis",
15
+ language: "English",
16
+ },
17
+ model: 'oai-gpt4o',
18
+ useInputChunking: false,
19
+ enableDuplicateRequests: false,
20
+ timeout: 600,
21
+ }
@@ -0,0 +1,22 @@
1
+ import { Prompt } from '../../../server/prompt.js';
2
+
3
+ export default {
4
+ prompt:
5
+ [
6
+ new Prompt({ messages: [
7
+ {"role": "system", "content": `{{renderTemplate AI_CONVERSATION_HISTORY}}\n\nYou are a part of an AI system named {{aiName}}. Your job is generating voice fillers to let the user know that you are still working on their request.\n\nInstructions:\n-The filler statements should logically follow from the last message in the conversation history\n- Generate a JSON array of 10 strings, each representing a single filler response in sequence so that they will sound natural when read to the user in order at 8s intervals.\n-Return only the JSON array, no other text or markdown.\n\n{{renderTemplate AI_DATETIME}}`},
8
+ {"role": "user", "content": "Please generate a JSON array of strings containing filler responses that each will be read verbatim to the user."},
9
+ ]}),
10
+ ],
11
+ inputParameters: {
12
+ chatHistory: [{role: '', content: []}],
13
+ contextId: ``,
14
+ aiName: "Jarvis",
15
+ language: "English",
16
+ },
17
+ model: 'oai-gpt4o-mini',
18
+ useInputChunking: false,
19
+ enableDuplicateRequests: false,
20
+ json: true,
21
+ timeout: 600,
22
+ }
@@ -24,7 +24,9 @@ If the user wants faster images or the images don't need to be high quality, you
24
24
 
25
25
  If you want to create multiple different images based on different prompts, you can just add elements to the array, each with their own fields. Your response will be parsed exactly as JSON, so you should only ever respond with a parse-able JSON object and never with any additional notes or commentary.
26
26
 
27
- Example response with 2 prompts creating 3 images total: [{"prompt": "A beautiful DSLR photograph of a landscape with a river and mountains"},{"prompt": "A beautiful DSLR photograph of a sunset in the desert and an inspirational quote written in the sky that says 'Never give up!'", "draft: true", "numberResults": 2, "renderText": "true"}]`,
27
+ Example response with 2 prompts creating 3 images total: [{"prompt": "A beautiful DSLR photograph of a landscape with a river and mountains"},{"prompt": "A beautiful DSLR photograph of a sunset in the desert and an inspirational quote written in the sky that says 'Never give up!'", "draft: true", "numberResults": 2, "renderText": "true"}]
28
+
29
+ {{renderTemplate AI_DATETIME}}`,
28
30
  },
29
31
  {"role": "user", "content": "Create one or more images based on the conversation history by generating an array of JSON objects that each contain a set of parameters to pass to the image creation engine."},
30
32
  ]}),
@@ -34,7 +34,7 @@ When the user explicitly asks for a specific search source (e.g. "the wires", "m
34
34
 
35
35
  When the user is referencing something specific, (e.g. "this", "this document", "this file", "my uploads","this article", etc.) and you don't see the document contents in the conversation history, use a wildcard search on the personal index with no date filter to see if there is anything relevant. In this case, don't search any other indexes.
36
36
 
37
- When the user's query requires a date filter for accurate data retrieval, pay special attention to qualifier words like "latest","tonight", "this afternoon", "today", "yesterday", "this week", "last week", "this month", etc. The current time and date in GMT is {{now}}, but references like "today" or "yesterday" are relative to the user's time zone. If you remember the user's time zone, use it - it's possible that the day for the user is different than the day in GMT. If a date filter is required, formulate it in a valid OData $filter format and include it in the "dateFilter" field. Do not just put the date in the field - it needs to be filter expression like "date ge 2024-02-22T00:00:00Z". Don't use eq with an exact date time as this is unlikely to return any results.
37
+ When the user's query requires a date filter for accurate data retrieval, pay special attention to qualifier words like "latest","tonight", "this afternoon", "today", "yesterday", "this week", "last week", "this month", etc. Make sure you use a reasonable date filter if any time-frame language is present to make sure the user gets relevant results. {{renderTemplate AI_DATETIME}} If a date filter is required, formulate it in a valid OData $filter format and include it in the "dateFilter" field. Do not just put the date in the field - it needs to be filter expression like "date ge 2024-02-22T00:00:00Z". Don't use eq with an exact date time as this is unlikely to return any results.
38
38
 
39
39
  When the user requests an overview, count, or analysis of topics or trends from a specific index over a given time period (e.g., 'What topics were covered yesterday on AJE?' or 'What were the hot topics on the wires this week?' or 'How many articles did AJA publish last week?'), follow these steps:
40
40
 
@@ -45,11 +45,6 @@ When the user requests an overview, count, or analysis of topics or trends from
45
45
 
46
46
  Determine the language that the user is speaking in the conversation and fill the "language" field using the ISO 639-3 format and put the full language name in the "languageStr" field.
47
47
 
48
- Add a short message to the resultsMessage field that acknowledges the user's request and indicates you're processing it.
49
- - The message should be a very short, casual phrase (2-5 words) that acknowledges the user's request and indicates you're processing it.
50
- - The message to the user should be conversational and natural and match the rest of the conversation style and tone.
51
- - The message should take 1-2 seconds to say out loud. Examples: 'Hmm, let's see...', 'Just a sec...', 'Checking...'"
52
-
53
48
  You should only ever respond with the JSON object and never with any additional notes or commentary.
54
49
 
55
50
  Example JSON objects and messages for different queries:
@@ -61,8 +56,7 @@ Example JSON objects and messages for different queries:
61
56
  "dateFilter": "date ge 2024-02-22T00:00:00Z",
62
57
  "titleOnly": false,
63
58
  "language": "eng",
64
- "languageStr": "English",
65
- "resultsMessage": "Reading the wires..."
59
+ "languageStr": "English"
66
60
  }
67
61
 
68
62
  "What's going on in the world today?"
@@ -75,8 +69,7 @@ Example JSON objects and messages for different queries:
75
69
  "dateFilter": "date ge 2024-02-22T00:00:00Z",
76
70
  "titleOnly": false,
77
71
  "language": "eng",
78
- "languageStr": "English",
79
- "resultsMessage": "Just a few seconds..."
72
+ "languageStr": "English"
80
73
  }
81
74
 
82
75
  "What is this document about?"
@@ -84,8 +77,7 @@ Example JSON objects and messages for different queries:
84
77
  "searchRequired": true,
85
78
  "searchPersonal": "*",
86
79
  "language": "eng",
87
- "languageStr": "English",
88
- "resultsMessage": "Almost done..."
80
+ "languageStr": "English"
89
81
  }
90
82
 
91
83
  "What topics were covered last week on AJE?"
@@ -95,8 +87,7 @@ Example JSON objects and messages for different queries:
95
87
  "dateFilter": "date ge 2024-02-22T00:00:00Z and date le 2024-02-28T23:59:59Z",
96
88
  "titleOnly": true,
97
89
  "language": "eng",
98
- "languageStr": "English",
99
- "resultsMessage": "Almost there..."
90
+ "languageStr": "English"
100
91
  }`,
101
92
  },
102
93
  {"role": "user", "content": "Examine the Conversation History and decide what data sources if any to search to help the user and produce a JSON object with fields that communicate your decisions."},
@@ -23,11 +23,11 @@ Available tools and their specific use cases:
23
23
 
24
24
  3. Write: Engage for any task related to composing, editing, or refining written content. This includes articles, essays, scripts, or any form of textual creation or modification. If you need to search for information or look at a document first, use the Search or Document tools. This tool is just to create or modify content.
25
25
 
26
- 4. Image: Use when asked to create, generate, or manipulate visual content. This covers photographs, illustrations, diagrams, or any other type of image. Always use this tool for image requests unless explicitly directed to use CodeExecution.
26
+ 4. Image: Use when asked to create, generate, or revise visual content. This covers photographs, illustrations, diagrams, or any other type of image. This tool only creates images - it cannot manipulate images (e.g. it cannot crop, rotate, or resize an existing image) - for those tasks you will need to use the CodeExecution tool.
27
27
 
28
28
  5. Code: Engage for any programming-related tasks, including creating, modifying, reviewing, or explaining code. Use for general coding discussions or when specific programming expertise is needed.
29
29
 
30
- 6. CodeExecution: Use when explicitly asked to run or execute code, or when a coding agent is needed to perform specific tasks that require code execution like data analysis, data processing, or business intelligence tasks.
30
+ 6. CodeExecution: Use when explicitly asked to run or execute code, or when a coding agent is needed to perform specific tasks that require code execution like data analysis, data processing, image processing, or business intelligence tasks.
31
31
 
32
32
  7. Reason: Employ for reasoning, scientific analysis, evaluating evidence, strategic planning, problem-solving, logic puzzles, mathematical calculations, or any questions that require careful thought or complex choices. Also use when deep, step-by-step reasoning is required.
33
33
 
@@ -50,8 +50,10 @@ Decision Output:
50
50
  If you decide to use a tool, return a JSON object in this format:
51
51
  {"toolRequired": true, "toolFunction": "toolName", "toolMessage": "message to the user to wait a moment while you work", "toolReason": "detailed explanation of why this tool was chosen"}
52
52
 
53
- - The message to the user should flow naturally with the conversation history and match the rest of the conversation history in style and tone.
54
- - The message should be specific about what you're doing and why and how long it will take, but keep it short as if you were speaking it out loud.
53
+ toolMessage Guidelines:
54
+ - The message is a filler message to the user to let them know you're working on their request.
55
+ - The message should be consistent in style and tone with the rest of your responses in the conversation history.
56
+ - The message should be brief and conversational and flow naturally with the conversation history.
55
57
 
56
58
  If no tool is required, return:
57
59
  {"toolRequired": false, "toolReason": "explanation of why no tool was necessary"}