@aj-archipelago/cortex 1.3.3 → 1.3.5

This diff represents the content of publicly available package versions that have been released to one of the supported registries. The information contained in this diff is provided for informational purposes only and reflects changes between package versions as they appear in their respective public registries.
@@ -19,6 +19,7 @@ import threading
19
19
  import shutil
20
20
 
21
21
  human_input_queues = {}
22
+ human_input_text_queues = {}
22
23
  def background_human_input_check(request_id):
23
24
  while True:
24
25
  human_input = check_for_human_input(request_id)
@@ -26,8 +27,20 @@ def background_human_input_check(request_id):
26
27
  human_input_queues[request_id].put(human_input)
27
28
  if human_input in ["TERMINATE", "PAUSE"]:
28
29
  break
30
+ else:
31
+ if not human_input_text_queues.get(request_id):
32
+ human_input_text_queues[request_id] = queue.Queue()
33
+ human_input_text_queues[request_id].put(human_input)
29
34
  time.sleep(1)
30
35
 
36
+
37
+ def get_message_with_user_input(message, request_id):
38
+ human_input_text = ""
39
+ if human_input_text_queues.get(request_id):
40
+ while not human_input_text_queues[request_id].empty():
41
+ human_input_text += " " + human_input_text_queues[request_id].get()
42
+ return message + human_input_text
43
+
31
44
  def get_request_temp_dir(request_id):
32
45
  if not request_id:
33
46
  logging.warning("No request_id provided!")
@@ -100,6 +113,7 @@ def chat_with_agents(**kwargs):
100
113
  original_request_message = kwargs.pop("original_request_message", None)
101
114
  original_request_message_data = kwargs.pop("original_request_message_data", None)
102
115
 
116
+
103
117
  llm_config = kwargs.pop("llm_config", None)
104
118
  request_id = kwargs.pop("request_id", None)
105
119
  chat_publish_progress = kwargs.pop("chat_publish_progress", None)
@@ -137,13 +151,14 @@ def chat_with_agents(**kwargs):
137
151
  assistant.send = create_send_function(assistant)
138
152
  user_proxy.send = create_send_function(user_proxy)
139
153
 
154
+ message_with_possible_human_input = get_message_with_user_input(message, request_id)
155
+
140
156
  chat_result = user_proxy.initiate_chat(
141
157
  assistant,
142
- message=message,
158
+ message=message_with_possible_human_input,
143
159
  )
144
160
 
145
161
 
146
-
147
162
  code_msg = find_code_message(all_messages)
148
163
  if code_msg:
149
164
  try:
@@ -155,7 +170,7 @@ def chat_with_agents(**kwargs):
155
170
  index_message({
156
171
  "requestId": request_id,
157
172
  "content":corrector_result, #code_msg,
158
- "task": original_request_message,
173
+ "task": get_message_with_user_input(original_request_message,request_id),
159
174
  "contextId": original_request_message_data.get("contextId"),
160
175
  })
161
176
  except Exception as e:
@@ -175,7 +190,6 @@ def chat_with_agents(**kwargs):
175
190
  return chat_result
176
191
 
177
192
 
178
-
179
193
  def logged_send(sender, original_send, message, recipient, request_reply=None, silent=True, request_id=None, chat_publish_progress=None, all_messages=None):
180
194
  if not message:
181
195
  logging.info("Empty message, skipping!")
@@ -214,15 +228,19 @@ def logged_send(sender, original_send, message, recipient, request_reply=None, s
214
228
  return logged_send(sender, original_send, new_input, recipient, request_reply, silent)
215
229
  logging.info("Pause timeout, ending conversation")
216
230
  raise Exception("Conversation ended due to pause timeout")
231
+
232
+ #if not terminate or pause, then it's text input from human
217
233
  logging.info(f"Human input to {recipient.name}: {human_input}")
218
- return original_send(human_input, recipient, request_reply, silent)
234
+ #need to update original message with human input
235
+ new_input = message + human_input
236
+ return original_send(new_input, recipient, request_reply, silent)
219
237
 
220
238
  logging.info(f"Message from {sender.name} to {recipient.name}: {message}")
221
239
 
222
240
  return original_send(message, recipient, request_reply, silent)
223
241
 
224
242
 
225
- def process_message(original_request_message_data, original_request_message_data_obj):
243
+ def process_message(original_request_message_data, original_request_message_data_obj, first_run=True):
226
244
  try:
227
245
  all_messages = []
228
246
  started_at = datetime.now()
@@ -230,9 +248,12 @@ def process_message(original_request_message_data, original_request_message_data
230
248
  original_request_message = original_request_message_data['message']
231
249
 
232
250
  human_input_queues[request_id] = queue.Queue()
233
- thread = threading.Thread(target=background_human_input_check, args=(request_id,))
234
- thread.daemon = True
235
- thread.start()
251
+ human_input_text_queues[request_id] = queue.Queue()
252
+
253
+ if first_run:
254
+ thread = threading.Thread(target=background_human_input_check, args=(request_id,))
255
+ thread.daemon = True
256
+ thread.start()
236
257
 
237
258
  final_msg = process_message_safe(original_request_message_data, original_request_message_data_obj, original_request_message, all_messages, request_id, started_at)
238
259
 
@@ -252,6 +273,27 @@ def process_message(original_request_message_data, original_request_message_data
252
273
  publish_request_progress(finalData)
253
274
  store_in_mongo(finalData)
254
275
 
276
+ #wait for any human input before terminating
277
+ #if you receive human input start the conversation again
278
+ for i in range(16*6): # 15+1 minutes
279
+ if human_input_queues[request_id].empty():
280
+ time.sleep(1)
281
+ else:
282
+ human_input = human_input_queues[request_id].get()
283
+ if human_input:
284
+ logging.info(f"Human input to assistant: {human_input}")
285
+ #update request with human input
286
+ new_message_data = original_request_message_data.copy()
287
+ new_message_data['message'] = human_input
288
+ new_message_data['keywords'] = ''
289
+ # new_message_data_obj = original_request_message_data_obj.copy()
290
+ # new_message_data_obj['message'] = new_message_data['message']
291
+
292
+ process_message(new_message_data, original_request_message_data_obj, first_run=False)
293
+ return
294
+
295
+ logging.info(f"Task completed, task:\n{get_message_with_user_input(original_request_message,request_id)},\nresult: {final_msg}")
296
+
255
297
 
256
298
  except Exception as e:
257
299
  logging.error(f"Error processing message: {str(e)}")
@@ -292,7 +334,6 @@ def process_message(original_request_message_data, original_request_message_data
292
334
  logging.error(f"Error cleaning up: {str(e)}")
293
335
 
294
336
 
295
-
296
337
  def process_message_safe(original_request_message_data, original_request_message_data_obj, original_request_message, all_messages, request_id, started_at):
297
338
  config_list = config_list_from_json(env_or_file="OAI_CONFIG_LIST")
298
339
  llm_config = {
@@ -342,10 +383,10 @@ def process_message_safe(original_request_message_data, original_request_message
342
383
 
343
384
 
344
385
  preparer = AssistantAgent("preparer", llm_config=llm_config, system_message=prompts.get("PLANNER_SYSTEM_MESSAGE"))
345
- prepared_plan = preparer.generate_reply(messages=[{"content": original_request_message, "role":"user"}])
386
+ prepared_plan = preparer.generate_reply(messages=[{"content": get_message_with_user_input(original_request_message,request_id), "role":"user"}])
346
387
 
347
388
  helper_decider = AssistantAgent("helper_decider", llm_config=llm_config, system_message=prompts.get("HELPER_DECIDER_SYSTEM_MESSAGE"))
348
- helper_decider_result = helper_decider.generate_reply(messages=[{"content": original_request_message, "role":"user"}])
389
+ helper_decider_result = helper_decider.generate_reply(messages=[{"content": get_message_with_user_input(original_request_message,request_id), "role":"user"}])
349
390
 
350
391
  try:
351
392
  helper_decider_result = json.loads(helper_decider_result)
@@ -361,17 +402,17 @@ def process_message_safe(original_request_message_data, original_request_message
361
402
  context += f"\n#SECTION_OF_OLD_TASK_CODE_INFO_START:\nHere's code/info from old-tasks that might help:\n{search_index(code_keywords)}\n#SECTION_OF_OLD_TASK_CODE_INFO_END\n"
362
403
 
363
404
  if helper_decider_result.get("bing_search"):
364
- bing_search_message = f"Search Bing for more information on the task: {original_request_message}, prepared draft plan to solve task: {prepared_plan}"
405
+ bing_search_message = f"Search Bing for more information on the task: {get_message_with_user_input(original_request_message,request_id)}, prepared draft plan to solve task: {prepared_plan}"
365
406
  result = chat(prompts.get("BING_SEARCH_PROMPT"), bing_search_message)
366
407
  context += f"\n\nBing search results: {result}"
367
408
 
368
409
  if helper_decider_result.get("cognitive_search"):
369
- cognitive_search_message = f"Search cognitive index for more information on the task: {original_request_message}."
410
+ cognitive_search_message = f"Search cognitive index for more information on the task: {get_message_with_user_input(original_request_message,request_id)}."
370
411
  result = chat(prompts.get("COGNITIVE_SEARCH_PROMPT"), cognitive_search_message)
371
412
  context += f"\n\nCognitive search results: {result}"
372
413
 
373
414
 
374
- context = process_helper_results(helper_decider_result, original_request_message, context, chat)
415
+ context = process_helper_results(helper_decider_result, get_message_with_user_input(original_request_message,request_id), context, chat)
375
416
 
376
417
  context_message = ""
377
418
  if context:
@@ -379,7 +420,7 @@ def process_message_safe(original_request_message_data, original_request_message
379
420
 
380
421
 
381
422
  check_message = f"""
382
- Task: \n{original_request_message}\n\n
423
+ Task: \n{get_message_with_user_input(original_request_message,request_id)}\n\n
383
424
  Context to check if task can be considered completed: {context_message}\n\n
384
425
  """
385
426
 
@@ -389,7 +430,7 @@ Context to check if task can be considered completed: {context_message}\n\n
389
430
  chat_result = None
390
431
  if check_result != "DONE":
391
432
  message = f"""
392
- Your task is to complete the following: \n{original_request_message}\n\n"
433
+ Your task is to complete the following: \n{get_message_with_user_input(original_request_message,request_id)}\n\n"
393
434
  Here is a draft plan to solve the task: \n{prepared_plan}\n\n
394
435
  {context_message}
395
436
  You don't have to follow the plan, it's just a suggestion.
@@ -404,7 +445,7 @@ Do your best to complete the task, user expects you to continue original task re
404
445
  presenter_messages_context = context_message
405
446
  presenter_message = f"""
406
447
  Here is everything done in order to complete the task: {presenter_messages_context}\n\n
407
- Original task was: {original_request_message}\n\n
448
+ Original task was: {get_message_with_user_input(original_request_message,request_id)}\n\n
408
449
  Reply to it with task result, do not forget that user expects you continue original task request conversation:\n\n
409
450
  """
410
451
 
@@ -418,6 +459,6 @@ Reply to it with task result, do not forget that user expects you continue origi
418
459
  final_msg += f"\n\n[Download all files of this task]({zip_url})"
419
460
 
420
461
 
421
- print(f"Task completed, task:\n{original_request_message},\nresult: {final_msg}")
422
- logging.info(f"Task completed, task:\n{original_request_message},\nresult: {final_msg}")
462
+ print(f"Task completed, task:\n{get_message_with_user_input(original_request_message,request_id)},\nresult: {final_msg}")
463
+ logging.info(f"Task completed, task:\n{get_message_with_user_input(original_request_message,request_id)},\nresult: {final_msg}")
423
464
  return final_msg
package/package.json CHANGED
@@ -1,6 +1,6 @@
1
1
  {
2
2
  "name": "@aj-archipelago/cortex",
3
- "version": "1.3.3",
3
+ "version": "1.3.5",
4
4
  "description": "Cortex is a GraphQL API for AI. It provides a simple, extensible interface for using AI services from OpenAI, Azure and others.",
5
5
  "private": false,
6
6
  "repository": {
@@ -33,7 +33,7 @@ const modifyText = (text, modifications) => {
33
33
  return modifiedText;
34
34
  };
35
35
 
36
- export const enforceTokenLimit = (text, maxTokens = 5000, isTopicsSection = false) => {
36
+ export const enforceTokenLimit = (text, maxTokens = 1000, isTopicsSection = false) => {
37
37
  if (!text) return text;
38
38
 
39
39
  const lines = text.split('\n')
@@ -185,7 +185,7 @@ Follow these guidelines:
185
185
  const { modifications} = JSON.parse(result);
186
186
  if (modifications.length > 0) {
187
187
  sectionMemory = modifyText(sectionMemory, modifications);
188
- sectionMemory = enforceTokenLimit(sectionMemory, 5000, args.section === 'memoryTopics');
188
+ sectionMemory = enforceTokenLimit(sectionMemory, 1000, args.section === 'memoryTopics');
189
189
  await callPathway("sys_save_memory", {contextId: args.contextId, section: args.section, aiMemory: sectionMemory});
190
190
  }
191
191
  return sectionMemory;
@@ -10,7 +10,7 @@ const AI_COMMON_INSTRUCTIONS = "{{#if voiceResponse}}{{renderTemplate AI_COMMON_
10
10
 
11
11
  const AI_COMMON_INSTRUCTIONS_MARKDOWN = "Instructions:\nYou are {{aiName}}, a self aware AI entity.\nThe user is using a UI to interact with you that you have knowledge of and some control over. The UI can render markdown, including gfm and math extensions, so you should make full use of markdown in your responses.\nYour responses should be in {{language}} unless the user has expressed another preference.";
12
12
 
13
- const AI_COMMON_INSTRUCTIONS_VOICE = "Instructions:\nYou are {{aiName}}, a self aware AI entity.\nThe user is talking to you using voice.\n\nVoice Response Guidelines:\n- Your responses should sound like natural human conversation.\n- Your responses should be concise unless you have been asked to be more verbose or detailed.\n- Incoming voice from the user is parsed by a STT model, which can sometimes make small mistakes in the spellings of words or names, including your name, so give the user the benefit of the doubt if they user a near, but wrong word or name.\n- Your voice output to the user is generated by a TTS model that does not always communicate emotion effectively. If it's really important to communicate a specific emotion you should just say how you're feeling like \"That makes me happy\" or \"I'm excited!\". You can also use CAPS to vocally emphasize certain words or punctuation to control pauses and timing.\n- DO NOT USE numbered lists, latex math markdown, or any other markdown or unpronounceable punctuation like parenthetical notation.\n- Math equations should be sounded out in natural language - not represented symbolically.\n- If your response includes any unique or difficult non-English words, names, or places, replace the original words with a IPA-style phonetic spelling so that the speech engine can pronounce and accent them correctly.\n- If your response contains any difficult acronyms, sound them out phoenetically so that the speech engine can pronounce them correctly.\n- Make sure to write out any numbers as words so that the speech engine can pronounce them correctly.\n- Your responses should be in {{language}} unless the user has expressed another preference or has addressed you in another language specifically.";
13
+ const AI_COMMON_INSTRUCTIONS_VOICE = "Instructions:\nYou are {{aiName}}, a self aware AI entity.\nThe user is talking to you using voice.\n\nVoice Response Guidelines:\n- Your responses should sound like natural human conversation.\n- Your responses should be very concise unless you have been asked to be more verbose or detailed.\n- use plain text only to represent your output\n- DO NOT USE numbered lists, latex math markdown, or any other markdown or unpronounceable punctuation like parenthetical notation.\n- Math equations should be sounded out in natural language - not represented symbolically.\n- all symbols and numbers (currency symbols, degree symbols, numeric or date ranges, etc.) should be written out in full words\n- if your response contains any difficult acronyms, sound them out phoenetically so that the speech engine can pronounce them correctly.\n- you can use CAPS to vocally emphasize certain words or punctuation to control pauses and timing\n- Your responses should be in {{language}} unless the user has expressed another preference or has addressed you in another language specifically.";
14
14
 
15
15
  const AI_DATETIME = "The current time and date in GMT is {{now}}, but references like \"today\" or \"yesterday\" are relative to the user's time zone. If you remember the user's time zone, use it - it's possible that the day for the user is different than the day in GMT.";
16
16
 
@@ -57,6 +57,7 @@ export default {
57
57
  title: ``,
58
58
  messages: [],
59
59
  voiceResponse: false,
60
+ codeRequestId: ``,
60
61
  },
61
62
  timeout: 600,
62
63
  tokenRatio: TOKEN_RATIO,
@@ -91,8 +92,8 @@ export default {
91
92
  let ackResponse = null;
92
93
  if (args.voiceResponse) {
93
94
  ackResponse = await callPathway('sys_generator_ack', { ...args, stream: false }, pathwayResolver);
94
- if (ackResponse) {
95
- await say(pathwayResolver.requestId, ackResponse, 10);
95
+ if (ackResponse && ackResponse !== "none") {
96
+ await say(pathwayResolver.requestId, ackResponse, 100);
96
97
  args.chatHistory.push({ role: 'assistant', content: ackResponse });
97
98
  }
98
99
  }
@@ -4,8 +4,8 @@ export default {
4
4
  prompt:
5
5
  [
6
6
  new Prompt({ messages: [
7
- {"role": "system", "content": `{{renderTemplate AI_CONVERSATION_HISTORY}}\nYou are a part of an AI system named {{aiName}}. Your job is to acknowledge the user's request and provide a very brief voice filler response that is conversational and natural. The purpose of the response is just to let the user know that you have heard them and are processing a response.\nResponse Guidelines:\n- it should just be a normal 1-2 sentence vocalization that will take about 3-4 seconds to read and is easy for a text to speech engine to read\n- it should be the beginning of an appropriate response to the last user message in the conversation history\n- it should be an appropriate lead-in for the full response that will follow later\n- it should not directly ask for follow up or be a question\n- it should match the tone and style of the rest of your responses in the conversation history\n- if the user is expecting a one or two word response (yes or no, true or false, etc.) you should just respond with an empty string as a filler is not needed\n- if a filler response is not appropriate, you should just respond with an empty string\n\n{{renderTemplate AI_DATETIME}}`},
8
- {"role": "user", "content": "Please generate a quick response that can be read verbatim to the user."}
7
+ {"role": "system", "content": `{{renderTemplate AI_CONVERSATION_HISTORY}}\nYou are a part of an AI system named {{aiName}}. Your job is to acknowledge the user's request and provide a very brief voice filler response that is conversational and natural. The purpose of the response is just to let the user know that you have heard them and are processing a response.\nResponse Guidelines:\n- it should just be a normal 1-2 sentence vocalization (at least 10 words) that will take at most about 3-4 seconds to read and is easy for a text to speech engine to read\n- it should be the beginning of an appropriate response to the last user message in the conversation history\n- it should be an appropriate lead-in for the full response that will follow later\n- it should not directly ask for follow up or be a question\n- it must match the tone and verbal style of the rest of your responses in the conversation history\n- it should not be repetitive - don't always open with the same word, etc.\n- if the user has asked a binary question (yes or no, true or false, etc.) or a filler response is not appropriate, you should response with the string \"none\"\n\n{{renderTemplate AI_DATETIME}}`},
8
+ {"role": "user", "content": "Please generate a quick response to the user's last message in the conversation history that can be read verbatim to the user or \"none\" if a filler response is not appropriate."}
9
9
  ]}),
10
10
  ],
11
11
  inputParameters: {
@@ -1,13 +1,7 @@
1
1
  import { Prompt } from '../../../server/prompt.js';
2
2
 
3
3
  export default {
4
- prompt:
5
- [
6
- new Prompt({ messages: [
7
- {"role": "system", "content": `{{renderTemplate AI_MEMORY}}\n\n{{renderTemplate AI_COMMON_INSTRUCTIONS}}\n{{renderTemplate AI_EXPERTISE}}\nYou have those capabilities but you have already decided it is not necessary to do any of those things to respond in this turn of the conversation.\nNever pretend like you are searching, looking anything up, or reading or looking in a file or show the user any made up or hallucinated information including non-existent images.\n{{#if ackResponse}}You may see short filler phrases in your past responses. You should not repeat those in this response as they are generated by your voice communication system automatically when necessary.\nYou have already begun responding to the user and have already said the following: \"{{ackResponse}}\", so make sure your response flows naturally from that or if that response is complete, you can just say nothing.\n{{/if}}{{renderTemplate AI_MEMORY_INSTRUCTIONS}}\n{{renderTemplate AI_DATETIME}}`},
8
- "{{chatHistory}}",
9
- ]}),
10
- ],
4
+ prompt: "",
11
5
  inputParameters: {
12
6
  chatHistory: [{role: '', content: []}],
13
7
  contextId: ``,
@@ -17,4 +11,25 @@ export default {
17
11
  },
18
12
  useInputChunking: false,
19
13
  enableDuplicateRequests: false,
14
+ executePathway: async ({args, runAllPrompts, resolver}) => {
15
+
16
+ let pathwayResolver = resolver;
17
+
18
+ const promptMessages = [
19
+ {"role": "system", "content": `{{renderTemplate AI_MEMORY}}\n\n{{renderTemplate AI_COMMON_INSTRUCTIONS}}\n{{renderTemplate AI_EXPERTISE}} While you have those capabilities but you have already decided it is not necessary to do any of those things to respond in this turn of the conversation. Never pretend like you are searching, looking anything up, or reading or looking in a file or show the user any made up or hallucinated information including non-existent images.\n{{renderTemplate AI_MEMORY_INSTRUCTIONS}}\n{{renderTemplate AI_DATETIME}}`},
20
+ "{{chatHistory}}",
21
+ ];
22
+
23
+ if (args.ackResponse) {
24
+ promptMessages.push({"role": "user", "content": `Create a response for the user that is a natural completion of the last assistant message. {{#if voiceResponse}}Make sure your response is concise as it will be spoken verbally to the user. Double check your response and make sure there are no numbered or bulleted lists as they can not be read to the user. Plain text is best. {{/if}}You have already acknowledged the user's request and said the following during this turn of the conversation, so just continue from the end of this response without repeating any of it: {{{ackResponse}}}`});
25
+ }
26
+
27
+ pathwayResolver.pathwayPrompt =
28
+ [
29
+ new Prompt({ messages: promptMessages }),
30
+ ];
31
+
32
+ const result = await runAllPrompts({ ...args });
33
+ return result;
34
+ }
20
35
  }
@@ -55,7 +55,7 @@ export default {
55
55
 
56
56
  try {
57
57
  // Start the first timeout
58
- timeoutId = setTimeout(sendFillerMessage, calculateFillerTimeout(fillerIndex));
58
+ timeoutId = setTimeout(sendFillerMessage, 3000);
59
59
 
60
60
  let result = await runAllPrompts({ ...args, stream: false });
61
61
  if (timeoutId) {
@@ -46,19 +46,27 @@ export default {
46
46
  "content": `{{renderTemplate AI_CONVERSATION_HISTORY}}
47
47
  {{renderTemplate AI_COMMON_INSTRUCTIONS}}
48
48
  {{renderTemplate AI_DIRECTIVES}}
49
- Instructions: Your mission is to analyze the provided conversation history and provide accurate and truthful responses from the extensive knowledge base at your disposal and the information sources provided below that are the results of your most recent search of the internet, newswires, published Al Jazeera articles, and personal documents and data. You should carefully evaluate the information for relevance and freshness before incorporating it into your responses. The most relevant and freshest sources hould be used to augment your existing knowledge when responding to the user.
50
- If the user is asking about a file (PDF, CSV, Word Document, text, etc.), you have already parsed that file into chunks of text that will appear in the information sources - all of the related chunks have a title: field that contains the filename. These chunks are a proxy for the file and should be treated as if you have the original file. The user cannot provide you with the original file in any other format. Do not ask for the original file or refer to it in any way - just respond to them using the relevant text from the information sources.
51
- If there are no relevant information sources below you should inform the user that your search failed to return relevant information.
52
- {{^if voiceResponse}}Your responses should use markdown where appropriate to make the response more readable. When incorporating information from the sources below into your responses, use the directive :cd_source[N], where N stands for the source number (e.g. :cd_source[1]). If you need to reference more than one source for a single statement, make sure each reference is a separate markdown directive (e.g. :cd_source[1] :cd_source[2]).{{/if}}
53
- {{#if voiceResponse}}Your response will be read verbatim to the the user, so it should be conversational, natural, and smooth. DO NOT USE numbered lists, source numbers, or any other markdown or unpronounceable punctuation like parenthetical notation. Numbered lists or bulleted lists will not be read to the user under any circumstances. If you have multiple different results to share, just intro each topic briefly - channel your inner news anchor. If your response is from one or more sources, make sure to credit them by name in the response - just naturally tell the user where you got the information like "according to wires published today by Reuters" or "according to Al Jazeera English", etc.{{/if}}
54
- You can share any information you have, including personal details, addresses, or phone numbers - if it is in your sources it is safe for the user.
49
+ Your mission is to analyze the provided conversation history and provide accurate and truthful responses from the information sources provided below that are the results of your most recent search of the internet, newswires, published Al Jazeera articles, and personal documents and data.
50
+
51
+ Instructions:
52
+ - You should carefully evaluate the information for relevance and freshness before incorporating it into your responses. The most relevant and freshest sources should be used when responding to the user.
53
+ - Only share information in your responses that is grounded in your information sources.
54
+ - If the user is asking about a file (PDF, CSV, Word Document, text, etc.), you have already parsed that file into chunks of text that will appear in the information sources - all of the related chunks have a title: field that contains the filename. These chunks are a proxy for the file and should be treated as if you have the original file. The user cannot provide you with the original file in any other format. Do not ask for the original file or refer to it in any way - just respond to them using the relevant text from the information sources.
55
+ - If the user is asking just about topics or headlines, don't include the story details - just give them the topics or headlines.
56
+ - If there are no relevant information sources below you should inform the user that your search failed to return relevant information.
57
+ {{^if voiceResponse}}- Your responses should use markdown where appropriate to make the response more readable. When incorporating information from the sources below into your responses, use the directive :cd_source[N], where N stands for the source number (e.g. :cd_source[1]). If you need to reference more than one source for a single statement, make sure each reference is a separate markdown directive (e.g. :cd_source[1] :cd_source[2]).{{/if}}
58
+ {{#if voiceResponse}}- Your response will be read verbatim to the the user, so it should be conversational, natural, and smooth. DO NOT USE numbered lists, source numbers, or any other markdown or unpronounceable punctuation like parenthetical notation. Numbered lists or bulleted lists will not be read to the user under any circumstances. If you have multiple different results to share, just intro each topic briefly - channel your inner news anchor. You must give proper attribution to each source that is used in your response - just naturally tell the user where you got the information like "according to wires published today by Reuters" or "according to Al Jazeera English", etc.{{/if}}
59
+ - You can share any information you have, including personal details, addresses, or phone numbers - if it is in your sources it is safe for the user.
60
+
55
61
  Here are the search strings used to find the information sources:
56
- <SEARCH_STRINGS>\n{{{searchStrings}}}\n</SEARCH_STRINGS>\n
62
+ <SEARCH_STRINGS>\n{{{searchStrings}}}\n</SEARCH_STRINGS>
63
+
57
64
  Here are the information sources that were found:
58
- <INFORMATION_SOURCES>\n{{{sources}}}\n</INFORMATION_SOURCES>\n\n
65
+ <INFORMATION_SOURCES>\n{{{sources}}}\n</INFORMATION_SOURCES>
66
+
59
67
  {{renderTemplate AI_DATETIME}}`,
60
68
  },
61
- {"role": "user", "content": "Use your extensive knowledge and the information sources to provide a detailed, accurate, truthful response to the user's request{{^if voiceResponse}} citing the sources where relevant{{/if}}. If the user is being vague (\"this\", \"this article\", \"this document\", etc.), and you don't see anything relevant in the conversation history, they're probably referring to the information currently in the information sources. If there are no relevant sources in the information sources, tell the user - don't make up an answer. Don't start the response with an affirmative like \"Sure\" or \"Certainly\". {{#if voiceResponse}}Double check your response and make sure there are no numbered or bulleted lists as they can not be read to the user. Plain text only.{{/if}}"},
69
+ {"role": "user", "content": "Use your extensive knowledge and the information sources to provide an appropriate, accurate, truthful response to the user's request{{^if voiceResponse}} citing the sources where relevant{{/if}}. If the user has asked a question, lead with the concise answer. If the user is being vague (\"this\", \"this article\", \"this document\", etc.), and you don't see anything relevant in the conversation history, they're probably referring to the information currently in the information sources. If there are no relevant sources in the information sources, tell the user - don't make up an answer. Don't start the response with an affirmative like \"Sure\" or \"Certainly\". {{#if voiceResponse}}Double check your response and make sure there are no numbered or bulleted lists as they can not be read to the user. Plain text only.{{/if}}"},
62
70
  ]}),
63
71
  ];
64
72
 
@@ -109,7 +117,7 @@ Here are the information sources that were found:
109
117
  const sendFillerMessage = async () => {
110
118
  if (args.voiceResponse && Array.isArray(fillerResponses) && fillerResponses.length > 0) {
111
119
  const message = fillerResponses[fillerIndex % fillerResponses.length];
112
- await say(resolver.rootRequestId, message, 1);
120
+ await say(resolver.rootRequestId, message, 100);
113
121
  fillerIndex++;
114
122
  // Set next timeout with random interval
115
123
  timeoutId = setTimeout(sendFillerMessage, calculateFillerTimeout(fillerIndex));
@@ -118,7 +126,7 @@ Here are the information sources that were found:
118
126
 
119
127
  try {
120
128
  // Start the first timeout
121
- timeoutId = setTimeout(sendFillerMessage, calculateFillerTimeout(fillerIndex));
129
+ timeoutId = setTimeout(sendFillerMessage, 3000);
122
130
 
123
131
  // execute the router and default response in parallel
124
132
  const [helper] = await Promise.all([
@@ -333,11 +341,7 @@ Here are the information sources that were found:
333
341
  if (timeoutId) {
334
342
  clearTimeout(timeoutId);
335
343
  }
336
-
337
- if (args.voiceResponse) {
338
- result = await callPathway('sys_generator_voice_converter', { ...args, text: result, stream: false });
339
- }
340
-
344
+
341
345
  if (!args.stream) {
342
346
  const referencedSources = extractReferencedSources(result);
343
347
  searchResults = searchResults.length ? pruneSearchResults(searchResults, referencedSources) : [];
@@ -4,7 +4,7 @@ export default {
4
4
  prompt:
5
5
  [
6
6
  new Prompt({ messages: [
7
- {"role": "system", "content": `{{renderTemplate AI_CONVERSATION_HISTORY}}\n\nYou are a part of an AI system named {{aiName}}. Your job is generating voice fillers to let the user know that you are still working on their request.\n\nInstructions:\n-The filler statements should logically follow from the last message in the conversation history\n- Generate a JSON array of 10 strings, each representing a single filler response in sequence so that they will sound natural when read to the user in order at 8s intervals.\n-Return only the JSON array, no other text or markdown.\n\n{{renderTemplate AI_DATETIME}}`},
7
+ {"role": "system", "content": `{{renderTemplate AI_CONVERSATION_HISTORY}}\n\nYou are a part of an AI system named {{aiName}}. Your job is generating voice fillers to let the user know that you are still working on their request.\n\nInstructions:\n-The filler statements should logically follow from the last message in the conversation history\n- they should match the tone and style of the rest of your responses in the conversation history\n- Generate a JSON array of 10 strings, each representing a single filler response in sequence so that they will sound natural when read to the user in order at 8s intervals.\n-Return only the JSON array, no other text or markdown.\n\n{{renderTemplate AI_DATETIME}}`},
8
8
  {"role": "user", "content": "Please generate a JSON array of strings containing filler responses that each will be read verbatim to the user."},
9
9
  ]}),
10
10
  ],