@aj-archipelago/cortex 1.2.1 → 1.3.1

This diff represents the content of publicly available package versions that have been released to one of the supported registries. The information contained in this diff is provided for informational purposes only and reflects changes between package versions as they appear in their respective public registries.
Files changed (80) hide show
  1. package/config.js +38 -11
  2. package/helper-apps/cortex-autogen/OAI_CONFIG_LIST +2 -1
  3. package/helper-apps/cortex-autogen/agents.py +392 -0
  4. package/helper-apps/cortex-autogen/agents_extra.py +14 -0
  5. package/helper-apps/cortex-autogen/config.py +18 -0
  6. package/helper-apps/cortex-autogen/data_operations.py +29 -0
  7. package/helper-apps/cortex-autogen/function_app.py +6 -3
  8. package/helper-apps/cortex-autogen/main.py +4 -4
  9. package/helper-apps/cortex-autogen/prompts.py +196 -0
  10. package/helper-apps/cortex-autogen/prompts_extra.py +5 -0
  11. package/helper-apps/cortex-autogen/requirements.txt +2 -1
  12. package/helper-apps/cortex-autogen/search.py +83 -0
  13. package/helper-apps/cortex-autogen/test.sh +40 -0
  14. package/helper-apps/cortex-autogen/utils.py +78 -0
  15. package/lib/handleBars.js +25 -0
  16. package/lib/logger.js +2 -0
  17. package/lib/util.js +3 -1
  18. package/package.json +1 -1
  19. package/pathways/chat_code.js +1 -1
  20. package/pathways/chat_context.js +1 -1
  21. package/pathways/chat_jarvis.js +1 -1
  22. package/pathways/chat_persist.js +1 -1
  23. package/pathways/chat_title.js +25 -0
  24. package/pathways/image_recraft.js +1 -1
  25. package/pathways/rag.js +1 -1
  26. package/pathways/rag_jarvis.js +1 -1
  27. package/pathways/rag_search_helper.js +1 -1
  28. package/pathways/system/entity/memory/sys_memory_manager.js +71 -0
  29. package/pathways/system/entity/memory/sys_memory_required.js +21 -0
  30. package/pathways/system/entity/memory/sys_memory_update.js +196 -0
  31. package/pathways/system/entity/memory/sys_read_memory.js +37 -0
  32. package/pathways/system/entity/memory/sys_save_memory.js +60 -0
  33. package/pathways/system/entity/shared/sys_entity_constants.js +27 -0
  34. package/pathways/system/entity/sys_entity_continue.js +55 -0
  35. package/pathways/system/entity/sys_entity_start.js +239 -0
  36. package/pathways/system/entity/sys_generator_error.js +20 -0
  37. package/pathways/system/entity/sys_generator_expert.js +26 -0
  38. package/pathways/system/entity/sys_generator_image.js +127 -0
  39. package/pathways/system/entity/sys_generator_quick.js +19 -0
  40. package/pathways/system/entity/sys_generator_reasoning.js +27 -0
  41. package/pathways/system/entity/sys_generator_results.js +310 -0
  42. package/pathways/system/entity/sys_generator_video_vision.js +27 -0
  43. package/pathways/system/entity/sys_image_prompt_builder.js +35 -0
  44. package/pathways/system/entity/sys_query_builder.js +110 -0
  45. package/pathways/system/entity/sys_router_code.js +37 -0
  46. package/pathways/system/entity/sys_router_tool.js +67 -0
  47. package/pathways/{sys_claude_35_sonnet.js → system/rest_streaming/sys_claude_35_sonnet.js} +1 -1
  48. package/pathways/{sys_claude_3_haiku.js → system/rest_streaming/sys_claude_3_haiku.js} +1 -1
  49. package/pathways/{sys_google_chat.js → system/rest_streaming/sys_google_chat.js} +1 -1
  50. package/pathways/{sys_google_code_chat.js → system/rest_streaming/sys_google_code_chat.js} +1 -1
  51. package/pathways/{sys_google_gemini_chat.js → system/rest_streaming/sys_google_gemini_chat.js} +1 -1
  52. package/pathways/{sys_openai_chat.js → system/rest_streaming/sys_openai_chat.js} +1 -1
  53. package/pathways/{sys_openai_chat_16.js → system/rest_streaming/sys_openai_chat_16.js} +1 -1
  54. package/pathways/{sys_openai_chat_gpt4.js → system/rest_streaming/sys_openai_chat_gpt4.js} +1 -1
  55. package/pathways/{sys_openai_chat_gpt4_32.js → system/rest_streaming/sys_openai_chat_gpt4_32.js} +1 -1
  56. package/pathways/{sys_openai_chat_gpt4_turbo.js → system/rest_streaming/sys_openai_chat_gpt4_turbo.js} +1 -1
  57. package/pathways/{sys_parse_numbered_object_list.js → system/sys_parse_numbered_object_list.js} +2 -2
  58. package/pathways/{sys_repair_json.js → system/sys_repair_json.js} +1 -1
  59. package/pathways/{run_claude35_sonnet.js → system/workspaces/run_claude35_sonnet.js} +1 -1
  60. package/pathways/{run_claude3_haiku.js → system/workspaces/run_claude3_haiku.js} +1 -1
  61. package/pathways/{run_gpt35turbo.js → system/workspaces/run_gpt35turbo.js} +1 -1
  62. package/pathways/{run_gpt4.js → system/workspaces/run_gpt4.js} +1 -1
  63. package/pathways/{run_gpt4_32.js → system/workspaces/run_gpt4_32.js} +1 -1
  64. package/server/pathwayResolver.js +62 -10
  65. package/server/plugins/azureCognitivePlugin.js +14 -1
  66. package/server/plugins/azureVideoTranslatePlugin.js +1 -1
  67. package/server/plugins/claude3VertexPlugin.js +25 -15
  68. package/server/plugins/gemini15ChatPlugin.js +1 -1
  69. package/server/plugins/geminiChatPlugin.js +1 -1
  70. package/server/plugins/modelPlugin.js +10 -1
  71. package/server/plugins/openAiChatPlugin.js +4 -3
  72. package/server/plugins/openAiDallE3Plugin.js +12 -4
  73. package/server/plugins/openAiVisionPlugin.js +1 -2
  74. package/server/plugins/replicateApiPlugin.js +75 -17
  75. package/tests/multimodal_conversion.test.js +6 -8
  76. package/helper-apps/cortex-autogen/myautogen.py +0 -317
  77. package/helper-apps/cortex-autogen/prompt.txt +0 -0
  78. package/helper-apps/cortex-autogen/prompt_summary.txt +0 -37
  79. package/pathways/index.js +0 -154
  80. /package/pathways/{sys_openai_completion.js → system/rest_streaming/sys_openai_completion.js} +0 -0
package/config.js CHANGED
@@ -6,6 +6,7 @@ import { fileURLToPath, pathToFileURL } from 'url';
6
6
  import GcpAuthTokenHelper from './lib/gcpAuthTokenHelper.js';
7
7
  import logger from './lib/logger.js';
8
8
  import PathwayManager from './lib/pathwayManager.js';
9
+ import { readdir } from 'fs/promises';
9
10
 
10
11
  const __dirname = path.dirname(fileURLToPath(import.meta.url));
11
12
 
@@ -221,7 +222,7 @@ var config = convict({
221
222
  "type": "REPLICATE-API",
222
223
  "url": "https://api.replicate.com/v1/models/black-forest-labs/flux-1.1-pro/predictions",
223
224
  "headers": {
224
- "Prefer": "wait",
225
+ "Prefer": "wait=60",
225
226
  "Authorization": "Token {{REPLICATE_API_KEY}}",
226
227
  "Content-Type": "application/json"
227
228
  },
@@ -230,7 +231,7 @@ var config = convict({
230
231
  "type": "REPLICATE-API",
231
232
  "url": "https://api.replicate.com/v1/models/black-forest-labs/flux-schnell/predictions",
232
233
  "headers": {
233
- "Prefer": "wait",
234
+ "Prefer": "wait=10",
234
235
  "Authorization": "Token {{REPLICATE_API_KEY}}",
235
236
  "Content-Type": "application/json"
236
237
  },
@@ -411,25 +412,51 @@ const createDynamicPathwayManager = async (config, basePathway) => {
411
412
  const buildPathways = async (config) => {
412
413
  const { pathwaysPath, corePathwaysPath, basePathwayPath } = config.getProperties();
413
414
 
414
- const pathwaysURL = pathToFileURL(pathwaysPath).toString();
415
- const corePathwaysURL = pathToFileURL(corePathwaysPath).toString();
416
415
  const basePathwayURL = pathToFileURL(basePathwayPath).toString();
417
-
416
+
418
417
  // Load cortex base pathway
419
418
  const basePathway = await import(basePathwayURL).then(module => module.default);
420
419
 
421
- // Load core pathways, default from the Cortex package
422
- logger.info(`Loading core pathways from ${corePathwaysPath}`)
423
- let loadedPathways = await import(`${corePathwaysURL}/index.js`).then(module => module);
420
+ // Helper function to recursively load pathway files
421
+ const loadPathwaysFromDir = async (dirPath) => {
422
+ const pathways = {};
423
+ try {
424
+ const files = await readdir(dirPath, { withFileTypes: true });
425
+
426
+ for (const file of files) {
427
+ const fullPath = path.join(dirPath, file.name);
428
+ if (file.isDirectory()) {
429
+ // Skip the shared directory
430
+ if (file.name === 'shared') continue;
431
+
432
+ // Recursively load pathways from other subdirectories
433
+ const subPathways = await loadPathwaysFromDir(fullPath);
434
+ Object.assign(pathways, subPathways);
435
+ } else if (file.name.endsWith('.js')) {
436
+ // Load individual pathway file
437
+ const pathwayURL = pathToFileURL(fullPath).toString();
438
+ const pathway = await import(pathwayURL).then(module => module.default || module);
439
+ const pathwayName = path.basename(file.name, '.js');
440
+ pathways[pathwayName] = pathway;
441
+ }
442
+ }
443
+ } catch (error) {
444
+ logger.error(`Error loading pathways from ${dirPath}: ${error.message}`);
445
+ }
446
+ return pathways;
447
+ };
448
+
449
+ // Load core pathways
450
+ logger.info(`Loading core pathways from ${corePathwaysPath}`);
451
+ let loadedPathways = await loadPathwaysFromDir(corePathwaysPath);
424
452
 
425
453
  // Load custom pathways and override core pathways if same
426
454
  if (pathwaysPath && fs.existsSync(pathwaysPath)) {
427
- logger.info(`Loading custom pathways from ${pathwaysPath}`)
428
- const customPathways = await import(`${pathwaysURL}/index.js`).then(module => module);
455
+ logger.info(`Loading custom pathways from ${pathwaysPath}`);
456
+ const customPathways = await loadPathwaysFromDir(pathwaysPath);
429
457
  loadedPathways = { ...loadedPathways, ...customPathways };
430
458
  }
431
459
 
432
-
433
460
  const { DYNAMIC_PATHWAYS_CONFIG_FILE, DYNAMIC_PATHWAYS_CONFIG_JSON } = process.env;
434
461
 
435
462
  let dynamicPathwayConfig;
@@ -1,5 +1,6 @@
1
1
  [
2
2
  {
3
- "model": "claude-3.5-sonnet"
3
+ "model": "gpt-4o",
4
+ "price": [0,0]
4
5
  }
5
6
  ]
@@ -0,0 +1,392 @@
1
+ from autogen import AssistantAgent, UserProxyAgent, config_list_from_json
2
+ from utils import publish_request_progress, zip_and_upload_tmp_folder
3
+ from prompts import *
4
+ from data_operations import store_in_mongo
5
+ from search import search_index, index_message
6
+ from config import *
7
+ import os
8
+ import logging
9
+ import json
10
+ import tempfile
11
+ import time
12
+ from datetime import datetime, timezone
13
+ import autogen.coding
14
+ from data_operations import check_for_human_input
15
+ from agents_extra import process_helper_results
16
+ from config import prompts
17
+ import queue
18
+ import threading
19
+
20
+ shared_queues = {}
21
+ def background_human_input_check(request_id):
22
+ while True:
23
+ human_input = check_for_human_input(request_id)
24
+ if human_input:
25
+ shared_queues[request_id].put(human_input)
26
+ if human_input in ["TERMINATE", "PAUSE"]:
27
+ break
28
+ time.sleep(1)
29
+
30
+
31
+ def find_code_message(all_messages):
32
+ if not all_messages or len(all_messages) < 2:
33
+ return ""
34
+
35
+ failed = False
36
+ code_message = ""
37
+
38
+ for i in range(len(all_messages)):
39
+ current_message = all_messages[i].get('message') or all_messages[i].get('content', '')
40
+ failed = failed or "(execution failed)\n" in current_message
41
+
42
+ if not failed and "(execution failed)\n" in current_message:
43
+ failed = True
44
+
45
+ if failed:
46
+ if "exitcode: 0 (execution succeeded)" in current_message:
47
+ #grap 4 messages including the current one
48
+ messages = all_messages[i-4:i+1]
49
+ code_message = "\n".join([(msg['message'] or msg['content']) for msg in messages])
50
+ return code_message
51
+ return ""
52
+
53
+
54
+ def is_termination_msg(m):
55
+ content = m.get("content", "").strip()
56
+ if not content or content.rstrip().endswith("TERMINATE") or content.startswith("exitcode: 0 (execution succeeded)"):
57
+ return True
58
+ return False
59
+
60
+
61
+ #use this via chat() function
62
+ def chat_with_agents(**kwargs):
63
+ prompt = kwargs.pop("prompt", None)
64
+ message = kwargs.pop("message", None)
65
+
66
+ if kwargs.pop("add_python_coder_prompt", True):
67
+ prompt += prompts.get("PYTHON_CODER_SYSTEM_MESSAGE")
68
+
69
+ if kwargs.pop("add_never_hallucinate_prompt", True):
70
+ prompt += prompts.get("NEVER_HALLUCINATE_SYSTEM_MESSAGE")
71
+
72
+ if kwargs.pop("add_current_datetime_prompt", True):
73
+ current_datetime = time.strftime("%Y-%m-%d %H:%M:%S", time.gmtime())
74
+ CURRENT_DATETIME_PROMPT = f"""
75
+ You know that current date and time is {current_datetime}.
76
+ """
77
+ prompt += CURRENT_DATETIME_PROMPT
78
+
79
+ if not message:
80
+ logging.warning("No message provided! Skipping chat!")
81
+ return
82
+ if not prompt:
83
+ logging.warning("No prompt provided!")
84
+
85
+ original_request_message = kwargs.pop("original_request_message", None)
86
+ original_request_message_data = kwargs.pop("original_request_message_data", None)
87
+
88
+ llm_config = kwargs.pop("llm_config", None)
89
+ request_id = kwargs.pop("request_id", None)
90
+ chat_publish_progress = kwargs.pop("chat_publish_progress", None)
91
+
92
+ request_reply = kwargs.pop("request_reply", None)
93
+ silent = kwargs.pop("silent", True)
94
+
95
+ recipient = kwargs.pop("recipient", None)
96
+
97
+ return_type = kwargs.pop("return_type", "last")
98
+
99
+ all_messages = kwargs.pop("all_messages", None)
100
+ if all_messages is None:
101
+ logging.warning("No all_messages list provided!")
102
+ all_messages = []
103
+
104
+ with tempfile.TemporaryDirectory() as temp_dir:
105
+ code_executor = autogen.coding.LocalCommandLineCodeExecutor(work_dir=temp_dir,timeout=300)
106
+
107
+ assistant = AssistantAgent("assistant", llm_config=llm_config, system_message=prompt, is_termination_msg=is_termination_msg)
108
+ user_proxy = UserProxyAgent("user_proxy", human_input_mode="NEVER", max_consecutive_auto_reply=20,
109
+ code_execution_config={"executor": code_executor},
110
+ is_termination_msg=is_termination_msg)
111
+
112
+ def create_send_function(agent):
113
+ nonlocal request_reply, silent, request_id, chat_publish_progress, all_messages, return_type, message, recipient, user_proxy, assistant
114
+ original_send = agent.send
115
+ def send(message, recipient, request_reply=None, silent=True):
116
+ return logged_send(agent, original_send, message, recipient, request_reply, silent, request_id, chat_publish_progress, all_messages)
117
+ return send
118
+
119
+ assistant.send = create_send_function(assistant)
120
+ user_proxy.send = create_send_function(user_proxy)
121
+
122
+ chat_result = user_proxy.initiate_chat(
123
+ assistant,
124
+ message=message,
125
+ )
126
+
127
+
128
+
129
+ code_msg = find_code_message(all_messages)
130
+ if code_msg:
131
+ try:
132
+ corrector = AssistantAgent("code_corrector", llm_config=llm_config, system_message=prompts.get("CODE_CORRECTOR_PROMPTER_SYSTEM_MESSAGE"))
133
+ corrector_result = corrector.generate_reply(messages=[{"content": code_msg, "role":"user"}])
134
+
135
+ logging.info(f"Code corrector result: {corrector_result}")
136
+
137
+ index_message({
138
+ "requestId": request_id,
139
+ "content":corrector_result, #code_msg,
140
+ "task": original_request_message,
141
+ "contextId": original_request_message_data.get("contextId"),
142
+ "requestId": request_id,
143
+ })
144
+ except Exception as e:
145
+ logging.error(f"Error extracting code corrector result: {e}")
146
+
147
+ if return_type == "chat_history":
148
+ return chat_result.chat_history
149
+ if return_type == "chat_result":
150
+ return chat_result
151
+ if return_type == "summary":
152
+ return chat_result.summary
153
+ if return_type == "last":
154
+ return chat_result.chat_history[-1]["content"] or chat_result.chat_history[-2]["content"]
155
+ if return_type == "all_as_str":
156
+ return "\n".join([msg['content'] for msg in chat_result.chat_history])
157
+
158
+ return chat_result
159
+
160
+
161
+
162
+ def logged_send(sender, original_send, message, recipient, request_reply=None, silent=True, request_id=None, chat_publish_progress=None, all_messages=None):
163
+ if not message:
164
+ logging.info("Empty message, skipping!")
165
+ return
166
+ if not request_id:
167
+ logging.warning("No request_id provided!")
168
+
169
+ all_messages.append({
170
+ "sender": sender.name,
171
+ "message": message
172
+ })
173
+
174
+ if chat_publish_progress:
175
+ chat_publish_progress({
176
+ "info": message
177
+ })
178
+ else:
179
+ logging.warning("No chat_publish_progress function provided!")
180
+ logging.log(logging.INFO, message)
181
+
182
+
183
+ if request_id in shared_queues and not shared_queues[request_id].empty():
184
+ human_input = shared_queues[request_id].get()
185
+ if human_input:
186
+ if human_input == "TERMINATE":
187
+ logging.info("Terminating conversation")
188
+ raise Exception("Conversation terminated by user")
189
+ elif human_input == "PAUSE":
190
+ logging.info("Pausing conversation")
191
+ pause_start = time.time()
192
+ while time.time() - pause_start < 60*15: # 15 minutes pause timeout
193
+ time.sleep(10)
194
+ new_input = check_for_human_input(request_id)
195
+ if new_input:
196
+ logging.info(f"Resuming conversation with human input: {new_input}")
197
+ return logged_send(sender, original_send, new_input, recipient, request_reply, silent)
198
+ logging.info("Pause timeout, ending conversation")
199
+ raise Exception("Conversation ended due to pause timeout")
200
+ logging.info(f"Human input to {recipient.name}: {human_input}")
201
+ return original_send(human_input, recipient, request_reply, silent)
202
+
203
+ logging.info(f"Message from {sender.name} to {recipient.name}: {message}")
204
+
205
+ return original_send(message, recipient, request_reply, silent)
206
+
207
+
208
+ def process_message(original_request_message_data, original_request_message_data_obj):
209
+ try:
210
+ all_messages = []
211
+ started_at = datetime.now()
212
+ request_id = original_request_message_data.get('requestId') or original_request_message_data.id
213
+ original_request_message = original_request_message_data['message']
214
+
215
+ shared_queues[request_id] = queue.Queue()
216
+ thread = threading.Thread(target=background_human_input_check, args=(request_id,))
217
+ thread.daemon = True
218
+ thread.start()
219
+
220
+ final_msg = process_message_safe(original_request_message_data, original_request_message_data_obj, original_request_message, all_messages, request_id, started_at)
221
+
222
+ finalData = {
223
+ "requestId": request_id,
224
+ "requestMessage": original_request_message_data.get("message"),
225
+ "progress": 1,
226
+ "data": final_msg,
227
+ "contextId": original_request_message_data.get("contextId"),
228
+ "conversation": all_messages,
229
+ "createdAt": datetime.now(timezone.utc).isoformat(),
230
+ "insertionTime": original_request_message_data_obj.insertion_time.astimezone(timezone.utc).isoformat(),
231
+ "startedAt": started_at.astimezone(timezone.utc).isoformat(),
232
+ }
233
+
234
+ publish_request_progress(finalData)
235
+ store_in_mongo(finalData)
236
+
237
+
238
+ except Exception as e:
239
+ logging.error(f"Error processing message: {str(e)}")
240
+ try:
241
+ if request_id:
242
+ publish_request_progress({
243
+ "requestId": request_id,
244
+ "progress": 1,
245
+ "error": str(e),
246
+ "data": str(e),
247
+ })
248
+ store_in_mongo({
249
+ "requestId": request_id,
250
+ "requestMessage": original_request_message_data.get("message"),
251
+ "progress": 1,
252
+ "error": str(e),
253
+ "data": str(e),
254
+ "contextId": original_request_message_data.get("contextId"),
255
+ "conversation": all_messages,
256
+ "createdAt": datetime.now(timezone.utc).isoformat(),
257
+ "insertionTime": original_request_message_data_obj.insertion_time.astimezone(timezone.utc).isoformat(),
258
+ "startedAt": started_at.astimezone(timezone.utc).isoformat(),
259
+ })
260
+ except Exception as e:
261
+ logging.error(f"Error processing message finish publish&store: {str(e)}")
262
+
263
+
264
+
265
+ def process_message_safe(original_request_message_data, original_request_message_data_obj, original_request_message, all_messages, request_id, started_at):
266
+ config_list = config_list_from_json(env_or_file="OAI_CONFIG_LIST")
267
+ llm_config = {
268
+ "config_list": config_list,
269
+ "base_url": os.environ.get("CORTEX_API_BASE_URL"),
270
+ "api_key": os.environ.get("CORTEX_API_KEY"),
271
+ "cache_seed": None,
272
+ "timeout": 600 * 2
273
+ }
274
+
275
+ total_messages = 30 # set this for updates % progress's max just a guess
276
+ message_count = 0 # updates % progress
277
+
278
+ def chat_publish_progress(data):
279
+ nonlocal message_count
280
+ message = data.get("message") or data.get("info") or data.get("content")
281
+ message_count += 1
282
+ progress = min(message_count / total_messages, 1)
283
+ publish_request_progress({
284
+ "requestId": request_id,
285
+ "progress": progress,
286
+ "info": message
287
+ })
288
+
289
+ def chat(*args,**kwargs):
290
+ nonlocal llm_config, request_id, chat_publish_progress, all_messages, original_request_message_data, original_request_message_data_obj, original_request_message
291
+ def get_arg_or_kwarg(name, pos, args, kwargs):
292
+ if args and kwargs.get(name):
293
+ logging.warning(f"Both positional argument and keyword argument given for {name}, using keyword argument")
294
+ if kwargs.get(name):
295
+ return kwargs.get(name)
296
+ if len(args) > pos:
297
+ return args[pos]
298
+ return None
299
+
300
+ kwargs["prompt"] = get_arg_or_kwarg("prompt", 0, args, kwargs)
301
+ kwargs["message"] = get_arg_or_kwarg("message", 1, args, kwargs)
302
+ kwargs["llm_config"] = llm_config
303
+ kwargs["request_id"] = request_id
304
+ kwargs["chat_publish_progress"] = chat_publish_progress
305
+ kwargs["all_messages"] = all_messages
306
+ kwargs["original_request_message_data"] = original_request_message_data
307
+ kwargs["original_request_message_data_obj"] = original_request_message_data_obj
308
+ kwargs["original_request_message"] = original_request_message
309
+
310
+ return chat_with_agents(**kwargs)
311
+
312
+
313
+ preparer = AssistantAgent("preparer", llm_config=llm_config, system_message=prompts.get("PLANNER_SYSTEM_MESSAGE"))
314
+ prepared_plan = preparer.generate_reply(messages=[{"content": original_request_message, "role":"user"}])
315
+
316
+ helper_decider = AssistantAgent("helper_decider", llm_config=llm_config, system_message=prompts.get("HELPER_DECIDER_SYSTEM_MESSAGE"))
317
+ helper_decider_result = helper_decider.generate_reply(messages=[{"content": original_request_message, "role":"user"}])
318
+
319
+ try:
320
+ helper_decider_result = json.loads(helper_decider_result)
321
+ logging.info(f"Helper decider result: {helper_decider_result}")
322
+ except Exception as e:
323
+ logging.error(f"Error parsing helper decider result: {e}")
324
+ helper_decider_result = {}
325
+
326
+ context = ""
327
+
328
+ code_keywords = original_request_message_data.get("keywords") or original_request_message_data.get("message")
329
+ if code_keywords:
330
+ context += f"\n#SECTION_OF_OLD_TASK_CODE_INFO_START:\nHere's code/info from old-tasks that might help:\n{search_index(code_keywords)}\n#SECTION_OF_OLD_TASK_CODE_INFO_END\n"
331
+
332
+ if helper_decider_result.get("bing_search"):
333
+ bing_search_message = f"Search Bing for more information on the task: {original_request_message}, prepared draft plan to solve task: {prepared_plan}"
334
+ result = chat(prompts.get("BING_SEARCH_PROMPT"), bing_search_message)
335
+ context += f"\n\nBing search results: {result}"
336
+
337
+ if helper_decider_result.get("cognitive_search"):
338
+ cognitive_search_message = f"Search cognitive index for more information on the task: {original_request_message}."
339
+ result = chat(prompts.get("COGNITIVE_SEARCH_PROMPT"), cognitive_search_message)
340
+ context += f"\n\nCognitive search results: {result}"
341
+
342
+
343
+ context = process_helper_results(helper_decider_result, original_request_message, context, chat)
344
+
345
+ context_message = ""
346
+ if context:
347
+ context_message = f"\n\nHere is some data from search results and helpful stuff already collected and worked on, use if helpful:\n{context}\n\n"
348
+
349
+
350
+ check_message = f"""
351
+ Task: \n{original_request_message}\n\n
352
+ Context to check if task can be considered completed: {context_message}\n\n
353
+ """
354
+
355
+ task_completion_checker = AssistantAgent("task_completion_checker", llm_config=llm_config, system_message=TASK_COMPLETE_CHECKER_SYSTEM_MESSAGE)
356
+ check_result = task_completion_checker.generate_reply(messages=[{"content": check_message, "role":"user"}])
357
+
358
+ chat_result = None
359
+ if check_result != "DONE":
360
+ message = f"""
361
+ Your task is to complete the following: \n{original_request_message}\n\n"
362
+ Here is a draft plan to solve the task: \n{prepared_plan}\n\n
363
+ {context_message}
364
+ You don't have to follow the plan, it's just a suggestion.
365
+ Do your best to complete the task, user expects you to continue original task request conversation.
366
+ """
367
+ chat_result = chat(prompts.get("GENERIC_ASSISTANT_SYSTEM_MESSAGE"), message, return_type="chat_result")
368
+
369
+ presenter = AssistantAgent("presenter", llm_config=llm_config, system_message=prompts.get("PRESENTER_SYSTEM_MESSAGE"))
370
+ if chat_result is not None:
371
+ presenter_messages_context = "\n\n".join([msg['content'] for msg in chat_result.chat_history])
372
+ else:
373
+ presenter_messages_context = context_message
374
+ presenter_message = f"""
375
+ Here is everything done in order to complete the task: {presenter_messages_context}\n\n
376
+ Original task was: {original_request_message}\n\n
377
+ Reply to it with task result, do not forget that user expects you continue original task request conversation:\n\n
378
+ """
379
+
380
+ presenter_result = presenter.generate_reply(messages=[{"content": presenter_message, "role":"user"}])
381
+
382
+ final_msg = presenter_result
383
+
384
+
385
+
386
+ zip_url = None # TODO: Implement if needed
387
+ if zip_url:
388
+ final_msg += f"\n\n[Download all files of this task]({zip_url})"
389
+
390
+ print(f"Task completed, task: {original_request_message}, result: {final_msg}")
391
+ logging.info(f"Task completed, task:\n{original_request_message},\nresult: {final_msg}")
392
+ return final_msg
@@ -0,0 +1,14 @@
1
+ from config import prompts
2
+ from datetime import datetime
3
+
4
+ def process_helper_results(helper_decider_result, original_request_message, context, chat):
5
+ def add_to_context(result, prefix):
6
+ nonlocal context
7
+ context += f"\n\n{prefix}: {result}"
8
+
9
+ if helper_decider_result.get("sql"):
10
+ sql_message = f"Use SQL to help solving task, provide any related data and code that may help: {original_request_message}."
11
+ result = chat(prompts.get("SQL_PROMPT"), sql_message, return_type="all_as_str")
12
+ add_to_context(result, "SQL results")
13
+
14
+ return context
@@ -0,0 +1,18 @@
1
+ import os
2
+ from dotenv import load_dotenv
3
+
4
+ load_dotenv()
5
+
6
+ AZURE_STORAGE_CONNECTION_STRING = os.environ["AZURE_STORAGE_CONNECTION_STRING"]
7
+ HUMAN_INPUT_QUEUE_NAME = os.environ.get("HUMAN_INPUT_QUEUE_NAME", "autogen-human-input-queue")
8
+ REDIS_CONNECTION_STRING = os.environ['REDIS_CONNECTION_STRING']
9
+ REDIS_CHANNEL = 'requestProgress'
10
+ AZURE_BLOB_CONTAINER = os.environ.get("AZURE_BLOB_CONTAINER", "autogen-uploads")
11
+
12
+
13
+ # Prompts
14
+ import prompts
15
+ import prompts_extra
16
+
17
+ prompts = {**prompts.__dict__, **prompts_extra.__dict__}
18
+
@@ -0,0 +1,29 @@
1
+ from azure.storage.queue import QueueClient
2
+ import pymongo
3
+ import os
4
+ import logging
5
+ import json
6
+ import base64
7
+ from config import AZURE_STORAGE_CONNECTION_STRING, HUMAN_INPUT_QUEUE_NAME
8
+
9
+ human_input_queue_client = QueueClient.from_connection_string(AZURE_STORAGE_CONNECTION_STRING, HUMAN_INPUT_QUEUE_NAME)
10
+
11
+ def store_in_mongo(data):
12
+ try:
13
+ if 'MONGO_URI' in os.environ:
14
+ client = pymongo.MongoClient(os.environ['MONGO_URI'])
15
+ collection = client.get_default_database()[os.environ.get('MONGO_COLLECTION_NAME', 'autogenruns')]
16
+ collection.insert_one(data)
17
+ else:
18
+ logging.warning("MONGO_URI not found in environment variables")
19
+ except Exception as e:
20
+ logging.error(f"An error occurred while storing data in MongoDB: {str(e)}")
21
+
22
+ def check_for_human_input(request_id):
23
+ messages = human_input_queue_client.receive_messages()
24
+ for message in messages:
25
+ content = json.loads(base64.b64decode(message.content).decode('utf-8'))
26
+ if content['codeRequestId'] == request_id:
27
+ human_input_queue_client.delete_message(message)
28
+ return content['text']
29
+ return None
@@ -4,14 +4,17 @@ import json
4
4
  from azure.storage.queue import QueueClient
5
5
  import os
6
6
  import redis
7
- from myautogen import process_message
7
+ from agents import process_message
8
8
  import subprocess
9
9
  import sys
10
+ import config
11
+
12
+ logging.getLogger().setLevel(logging.WARNING)
10
13
 
11
14
  def install_packages():
12
15
  subprocess.check_call([sys.executable, "-m", "pip", "install", "-r", "requirements.txt"])
13
16
 
14
- install_packages()
17
+ # install_packages()
15
18
 
16
19
  app = func.FunctionApp()
17
20
 
@@ -31,6 +34,6 @@ def queue_trigger(msg: func.QueueMessage):
31
34
  if "requestId" not in message_data:
32
35
  message_data['requestId'] = msg.id
33
36
  process_message(message_data, msg)
34
-
37
+
35
38
  except Exception as e:
36
39
  logging.error(f"Error processing message: {str(e)}")
@@ -2,8 +2,9 @@ import os
2
2
  from azure.storage.queue import QueueClient
3
3
  import base64
4
4
  import json
5
- from myautogen import process_message
6
5
  import time
6
+ from agents import process_message
7
+
7
8
 
8
9
  def main():
9
10
  print("Starting message processing loop")
@@ -13,9 +14,8 @@ def main():
13
14
  queue_client = QueueClient.from_connection_string(connection_string, queue_name)
14
15
 
15
16
  attempts = 0
16
- max_attempts = 100
17
+ max_attempts = 1000
17
18
 
18
-
19
19
  while attempts < max_attempts:
20
20
  messages = queue_client.receive_messages(messages_per_page=1)
21
21
 
@@ -32,7 +32,7 @@ def main():
32
32
  attempts += 1
33
33
  time.sleep(1) # Wait for 1 second before checking again
34
34
 
35
- print("No messages received after 100 attempts. Exiting.")
35
+ print(f"No messages received after {max_attempts} attempts. Exiting.")
36
36
 
37
37
  if __name__ == "__main__":
38
38
  main()