@aj-archipelago/cortex 1.4.2 → 1.4.3

This diff represents the content of publicly available package versions that have been released to one of the supported registries. The information contained in this diff is provided for informational purposes only and reflects changes between package versions as they appear in their respective public registries.
Files changed (86) hide show
  1. package/README.md +1 -0
  2. package/config.js +1 -1
  3. package/helper-apps/cortex-autogen2/.dockerignore +1 -0
  4. package/helper-apps/cortex-autogen2/Dockerfile +6 -10
  5. package/helper-apps/cortex-autogen2/Dockerfile.worker +2 -0
  6. package/helper-apps/cortex-autogen2/agents.py +203 -2
  7. package/helper-apps/cortex-autogen2/main.py +1 -1
  8. package/helper-apps/cortex-autogen2/pyproject.toml +12 -0
  9. package/helper-apps/cortex-autogen2/requirements.txt +14 -0
  10. package/helper-apps/cortex-autogen2/services/redis_publisher.py +1 -1
  11. package/helper-apps/cortex-autogen2/services/run_analyzer.py +1 -1
  12. package/helper-apps/cortex-autogen2/task_processor.py +431 -229
  13. package/helper-apps/cortex-autogen2/test_entity_fetcher.py +305 -0
  14. package/helper-apps/cortex-autogen2/tests/README.md +240 -0
  15. package/helper-apps/cortex-autogen2/tests/TEST_REPORT.md +342 -0
  16. package/helper-apps/cortex-autogen2/tests/__init__.py +8 -0
  17. package/helper-apps/cortex-autogen2/tests/analysis/__init__.py +1 -0
  18. package/helper-apps/cortex-autogen2/tests/analysis/improvement_suggester.py +224 -0
  19. package/helper-apps/cortex-autogen2/tests/analysis/trend_analyzer.py +211 -0
  20. package/helper-apps/cortex-autogen2/tests/cli/__init__.py +1 -0
  21. package/helper-apps/cortex-autogen2/tests/cli/run_tests.py +296 -0
  22. package/helper-apps/cortex-autogen2/tests/collectors/__init__.py +1 -0
  23. package/helper-apps/cortex-autogen2/tests/collectors/log_collector.py +252 -0
  24. package/helper-apps/cortex-autogen2/tests/collectors/progress_collector.py +182 -0
  25. package/helper-apps/cortex-autogen2/tests/conftest.py +15 -0
  26. package/helper-apps/cortex-autogen2/tests/database/__init__.py +1 -0
  27. package/helper-apps/cortex-autogen2/tests/database/repository.py +501 -0
  28. package/helper-apps/cortex-autogen2/tests/database/schema.sql +108 -0
  29. package/helper-apps/cortex-autogen2/tests/evaluators/__init__.py +1 -0
  30. package/helper-apps/cortex-autogen2/tests/evaluators/llm_scorer.py +294 -0
  31. package/helper-apps/cortex-autogen2/tests/evaluators/prompts.py +250 -0
  32. package/helper-apps/cortex-autogen2/tests/evaluators/wordcloud_validator.py +168 -0
  33. package/helper-apps/cortex-autogen2/tests/metrics/__init__.py +1 -0
  34. package/helper-apps/cortex-autogen2/tests/metrics/collector.py +155 -0
  35. package/helper-apps/cortex-autogen2/tests/orchestrator.py +576 -0
  36. package/helper-apps/cortex-autogen2/tests/test_cases.yaml +279 -0
  37. package/helper-apps/cortex-autogen2/tests/test_data.db +0 -0
  38. package/helper-apps/cortex-autogen2/tests/utils/__init__.py +3 -0
  39. package/helper-apps/cortex-autogen2/tests/utils/connectivity.py +112 -0
  40. package/helper-apps/cortex-autogen2/tools/azure_blob_tools.py +74 -24
  41. package/helper-apps/cortex-autogen2/tools/entity_api_registry.json +38 -0
  42. package/helper-apps/cortex-autogen2/tools/file_tools.py +1 -1
  43. package/helper-apps/cortex-autogen2/tools/search_tools.py +436 -238
  44. package/helper-apps/cortex-file-handler/package-lock.json +2 -2
  45. package/helper-apps/cortex-file-handler/package.json +1 -1
  46. package/helper-apps/cortex-file-handler/scripts/setup-test-containers.js +4 -5
  47. package/helper-apps/cortex-file-handler/src/blobHandler.js +36 -144
  48. package/helper-apps/cortex-file-handler/src/services/FileConversionService.js +5 -3
  49. package/helper-apps/cortex-file-handler/src/services/storage/AzureStorageProvider.js +34 -1
  50. package/helper-apps/cortex-file-handler/src/services/storage/GCSStorageProvider.js +22 -0
  51. package/helper-apps/cortex-file-handler/src/services/storage/LocalStorageProvider.js +28 -1
  52. package/helper-apps/cortex-file-handler/src/services/storage/StorageFactory.js +29 -4
  53. package/helper-apps/cortex-file-handler/src/services/storage/StorageProvider.js +11 -0
  54. package/helper-apps/cortex-file-handler/src/services/storage/StorageService.js +1 -1
  55. package/helper-apps/cortex-file-handler/tests/blobHandler.test.js +3 -2
  56. package/helper-apps/cortex-file-handler/tests/checkHashShortLived.test.js +8 -1
  57. package/helper-apps/cortex-file-handler/tests/containerConversionFlow.test.js +5 -2
  58. package/helper-apps/cortex-file-handler/tests/containerNameParsing.test.js +14 -7
  59. package/helper-apps/cortex-file-handler/tests/containerParameterFlow.test.js +5 -2
  60. package/helper-apps/cortex-file-handler/tests/storage/StorageFactory.test.js +31 -19
  61. package/package.json +1 -1
  62. package/server/modelExecutor.js +4 -0
  63. package/server/plugins/claude4VertexPlugin.js +540 -0
  64. package/server/plugins/openAiWhisperPlugin.js +43 -2
  65. package/tests/integration/rest/vendors/claude_streaming.test.js +121 -0
  66. package/tests/unit/plugins/claude4VertexPlugin.test.js +462 -0
  67. package/tests/unit/plugins/claude4VertexToolConversion.test.js +413 -0
  68. package/helper-apps/cortex-autogen/.funcignore +0 -8
  69. package/helper-apps/cortex-autogen/Dockerfile +0 -10
  70. package/helper-apps/cortex-autogen/OAI_CONFIG_LIST +0 -6
  71. package/helper-apps/cortex-autogen/agents.py +0 -493
  72. package/helper-apps/cortex-autogen/agents_extra.py +0 -14
  73. package/helper-apps/cortex-autogen/config.py +0 -18
  74. package/helper-apps/cortex-autogen/data_operations.py +0 -29
  75. package/helper-apps/cortex-autogen/function_app.py +0 -44
  76. package/helper-apps/cortex-autogen/host.json +0 -15
  77. package/helper-apps/cortex-autogen/main.py +0 -38
  78. package/helper-apps/cortex-autogen/prompts.py +0 -196
  79. package/helper-apps/cortex-autogen/prompts_extra.py +0 -5
  80. package/helper-apps/cortex-autogen/requirements.txt +0 -9
  81. package/helper-apps/cortex-autogen/search.py +0 -85
  82. package/helper-apps/cortex-autogen/test.sh +0 -40
  83. package/helper-apps/cortex-autogen/tools/sasfileuploader.py +0 -66
  84. package/helper-apps/cortex-autogen/utils.py +0 -88
  85. package/helper-apps/cortex-autogen2/DigiCertGlobalRootCA.crt.pem +0 -22
  86. package/helper-apps/cortex-autogen2/poetry.lock +0 -3652
@@ -1,493 +0,0 @@
1
- from autogen import AssistantAgent, UserProxyAgent, config_list_from_json
2
- from utils import publish_request_progress, zip_and_upload_tmp_folder
3
- from prompts import *
4
- from data_operations import store_in_mongo
5
- from search import search_index, index_message
6
- from config import *
7
- import os
8
- import logging
9
- import json
10
- import tempfile
11
- import time
12
- from datetime import datetime, timezone
13
- import autogen.coding
14
- from data_operations import check_for_human_input
15
- from agents_extra import process_helper_results
16
- from config import prompts
17
- import queue
18
- import threading
19
- import shutil
20
-
21
- human_input_queues = {}
22
- human_input_text_queues = {}
23
- request_stored_message_queues = {}
24
- def background_human_input_check(request_id):
25
- while True:
26
- human_input = check_for_human_input(request_id)
27
- if human_input:
28
- human_input_queues[request_id].put(human_input)
29
- if human_input in ["TERMINATE", "PAUSE"]:
30
- break
31
- else:
32
- if not human_input_text_queues.get(request_id):
33
- human_input_text_queues[request_id] = queue.Queue()
34
- human_input_text_queues[request_id].put(human_input)
35
- time.sleep(1)
36
-
37
-
38
- def get_message_with_user_input(message, request_id):
39
- human_input_text = ""
40
- if human_input_text_queues.get(request_id):
41
- while not human_input_text_queues[request_id].empty():
42
- human_input_text += " " + human_input_text_queues[request_id].get()
43
- return message + human_input_text
44
-
45
- def get_request_temp_dir(request_id):
46
- if not request_id:
47
- logging.warning("No request_id provided!")
48
- return None
49
- temp_dir_name = f"cortex_autogen/{request_id}"
50
- temp_dir = os.path.join(tempfile.gettempdir(), temp_dir_name)
51
-
52
- try:
53
- os.makedirs(temp_dir, exist_ok=True)
54
- except OSError as e:
55
- logging.error(f"Error creating temporary directory: {e}")
56
- return None
57
-
58
- return temp_dir
59
-
60
- def find_code_message(all_messages):
61
- if not all_messages or len(all_messages) < 2:
62
- return ""
63
-
64
- failed = False
65
- code_message = ""
66
-
67
- for i in range(len(all_messages)):
68
- current_message = all_messages[i].get('message') or all_messages[i].get('content', '')
69
- failed = failed or "(execution failed)\n" in current_message
70
-
71
- if not failed and "(execution failed)\n" in current_message:
72
- failed = True
73
-
74
- if failed:
75
- if "exitcode: 0 (execution succeeded)" in current_message:
76
- #grap 4 messages including the current one
77
- messages = all_messages[i-4:i+1]
78
- code_message = "\n".join([(msg['message'] or msg['content']) for msg in messages])
79
- return code_message
80
- return ""
81
-
82
-
83
- def is_termination_msg(m):
84
- content = m.get("content", "").strip()
85
- if not content or content.rstrip().endswith("TERMINATE") or content.startswith("exitcode: 0 (execution succeeded)"):
86
- return True
87
- return False
88
-
89
-
90
- #use this via chat() function
91
- def chat_with_agents(**kwargs):
92
- prompt = kwargs.pop("prompt", None)
93
- message = kwargs.pop("message", None)
94
-
95
- if kwargs.pop("add_python_coder_prompt", True):
96
- prompt += prompts.get("PYTHON_CODER_SYSTEM_MESSAGE")
97
-
98
- if kwargs.pop("add_never_hallucinate_prompt", True):
99
- prompt += prompts.get("NEVER_HALLUCINATE_SYSTEM_MESSAGE")
100
-
101
- if kwargs.pop("add_current_datetime_prompt", True):
102
- current_datetime = time.strftime("%Y-%m-%d %H:%M:%S", time.gmtime())
103
- CURRENT_DATETIME_PROMPT = f"""
104
- You know that current date and time is {current_datetime}.
105
- """
106
- prompt += CURRENT_DATETIME_PROMPT
107
-
108
- if not message:
109
- logging.warning("No message provided! Skipping chat!")
110
- return
111
- if not prompt:
112
- logging.warning("No prompt provided!")
113
-
114
- original_request_message = kwargs.pop("original_request_message", None)
115
- original_request_message_data = kwargs.pop("original_request_message_data", None)
116
-
117
-
118
- llm_config = kwargs.pop("llm_config", None)
119
- request_id = kwargs.pop("request_id", None)
120
- chat_publish_progress = kwargs.pop("chat_publish_progress", None)
121
-
122
- request_reply = kwargs.pop("request_reply", None)
123
- silent = kwargs.pop("silent", True)
124
-
125
- recipient = kwargs.pop("recipient", None)
126
-
127
- return_type = kwargs.pop("return_type", "last")
128
-
129
- all_messages = kwargs.pop("all_messages", None)
130
- if all_messages is None:
131
- logging.warning("No all_messages list provided!")
132
- all_messages = []
133
-
134
- # with tempfile.TemporaryDirectory() as temp_dir:
135
- if True:
136
- #mark the temp_dir for later upload
137
- temp_dir = get_request_temp_dir(request_id)
138
- code_executor = autogen.coding.LocalCommandLineCodeExecutor(work_dir=temp_dir,timeout=300)
139
-
140
- assistant = AssistantAgent("assistant", llm_config=llm_config, system_message=prompt, is_termination_msg=is_termination_msg)
141
- user_proxy = UserProxyAgent("user_proxy", human_input_mode="NEVER", max_consecutive_auto_reply=20,
142
- code_execution_config={"executor": code_executor},
143
- is_termination_msg=is_termination_msg)
144
-
145
- def create_send_function(agent):
146
- nonlocal request_reply, silent, request_id, chat_publish_progress, all_messages, return_type, message, recipient, user_proxy, assistant
147
- original_send = agent.send
148
- def send(message, recipient, request_reply=None, silent=True):
149
- return logged_send(agent, original_send, message, recipient, request_reply, silent, request_id, chat_publish_progress, all_messages)
150
- return send
151
-
152
- assistant.send = create_send_function(assistant)
153
- user_proxy.send = create_send_function(user_proxy)
154
-
155
- message_with_possible_human_input = get_message_with_user_input(message, request_id)
156
-
157
- chat_result = user_proxy.initiate_chat(
158
- assistant,
159
- message=message_with_possible_human_input,
160
- )
161
-
162
-
163
- code_msg = find_code_message(all_messages)
164
- if code_msg:
165
- try:
166
- corrector = AssistantAgent("code_corrector", llm_config=llm_config, system_message=prompts.get("CODE_CORRECTOR_PROMPTER_SYSTEM_MESSAGE"))
167
- corrector_result = corrector.generate_reply(messages=[{"content": code_msg, "role":"user"}])
168
-
169
- logging.info(f"Code corrector result: {corrector_result}")
170
-
171
- index_message({
172
- "requestId": request_id,
173
- "content":corrector_result, #code_msg,
174
- "task": get_message_with_user_input(original_request_message,request_id),
175
- "contextId": original_request_message_data.get("contextId"),
176
- })
177
- except Exception as e:
178
- logging.error(f"Error extracting code corrector result: {e}")
179
-
180
- try:
181
- request_stored_message_queues[request_id].put(all_messages[-2]["message"] or all_messages[-2]["content"])
182
- request_stored_message_queues[request_id].put(all_messages[-1]["message"] or all_messages[-1]["content"])
183
- except Exception as e:
184
- logging.error(f"Error storing messages in queue: {e}")
185
-
186
-
187
-
188
- if return_type == "chat_history":
189
- return chat_result.chat_history
190
- if return_type == "chat_result":
191
- return chat_result
192
- if return_type == "summary":
193
- return chat_result.summary
194
- if return_type == "last":
195
- return chat_result.chat_history[-1]["content"] or chat_result.chat_history[-2]["content"]
196
- if return_type == "all_as_str":
197
- return "\n".join([msg['content'] for msg in chat_result.chat_history])
198
-
199
- return chat_result
200
-
201
-
202
- def logged_send(sender, original_send, message, recipient, request_reply=None, silent=True, request_id=None, chat_publish_progress=None, all_messages=None):
203
- if not message:
204
- logging.info("Empty message, skipping!")
205
- return
206
- if not request_id:
207
- logging.warning("No request_id provided!")
208
-
209
- all_messages.append({
210
- "sender": sender.name,
211
- "message": message
212
- })
213
-
214
- if chat_publish_progress:
215
- chat_publish_progress({
216
- "info": message
217
- })
218
- else:
219
- logging.warning("No chat_publish_progress function provided!")
220
- logging.log(logging.INFO, message)
221
-
222
-
223
- if request_id in human_input_queues and not human_input_queues[request_id].empty():
224
- human_input = human_input_queues[request_id].get()
225
- if human_input:
226
- if human_input == "TERMINATE":
227
- logging.info("Terminating conversation")
228
- raise Exception("Conversation terminated by user")
229
- elif human_input == "PAUSE":
230
- logging.info("Pausing conversation")
231
- pause_start = time.time()
232
- while time.time() - pause_start < 60*15: # 15 minutes pause timeout
233
- time.sleep(10)
234
- new_input = check_for_human_input(request_id)
235
- if new_input:
236
- logging.info(f"Resuming conversation with human input: {new_input}")
237
- return logged_send(sender, original_send, new_input, recipient, request_reply, silent)
238
- logging.info("Pause timeout, ending conversation")
239
- raise Exception("Conversation ended due to pause timeout")
240
-
241
- #if not terminate or pause, then it's text input from human
242
- logging.info(f"Human input to {recipient.name}: {human_input}")
243
- #need to update original message with human input
244
- new_input = message + human_input
245
- return original_send(new_input, recipient, request_reply, silent)
246
-
247
- logging.info(f"Message from {sender.name} to {recipient.name}: {message}")
248
-
249
- return original_send(message, recipient, request_reply, silent)
250
-
251
-
252
- def process_message(original_request_message_data, original_request_message_data_obj, first_run=True):
253
- try:
254
- all_messages = []
255
- started_at = datetime.now()
256
- request_id = original_request_message_data.get('requestId') or original_request_message_data.id
257
- original_request_message = original_request_message_data['message']
258
-
259
- human_input_queues[request_id] = queue.Queue()
260
- human_input_text_queues[request_id] = queue.Queue()
261
- if not request_stored_message_queues.get(request_id):
262
- request_stored_message_queues[request_id] = queue.Queue()
263
- request_stored_message_queues[request_id].put(original_request_message)
264
-
265
- if first_run:
266
- thread = threading.Thread(target=background_human_input_check, args=(request_id,))
267
- thread.daemon = True
268
- thread.start()
269
-
270
- final_msg = process_message_safe(original_request_message_data, original_request_message_data_obj, original_request_message, all_messages, request_id, started_at)
271
-
272
- finalData = {
273
- "requestId": request_id,
274
- "requestMessage": original_request_message_data.get("message"),
275
- "progress": 1,
276
- "data": final_msg,
277
- "contextId": original_request_message_data.get("contextId"),
278
- "conversation": all_messages,
279
- "createdAt": datetime.now(timezone.utc).isoformat(),
280
- "insertionTime": original_request_message_data_obj.insertion_time.astimezone(timezone.utc).isoformat(),
281
- "startedAt": started_at.astimezone(timezone.utc).isoformat(),
282
- "tool": "{\"toolUsed\":\"coding\"}"
283
- }
284
-
285
- publish_request_progress(finalData)
286
- store_in_mongo(finalData)
287
-
288
- #wait for any human input before terminating
289
- #if you receive human input start the conversation again
290
- for i in range(31*6): # 30+1 minutes
291
- if human_input_queues[request_id].empty():
292
- time.sleep(1)
293
- else:
294
- human_input = human_input_queues[request_id].get()
295
- if human_input:
296
- logging.info(f"Human input to assistant: {human_input}")
297
- #update request with human input
298
- new_message_data = original_request_message_data.copy()
299
-
300
- old_task = original_request_message_data.get("message")
301
-
302
- #get request_stored_message_queues
303
- old_messages = []
304
- if request_stored_message_queues.get(request_id):
305
- while not request_stored_message_queues[request_id].empty():
306
- old_messages.append(request_stored_message_queues[request_id].get())
307
-
308
-
309
- #convert to text, limit to max 2000 characters, keep most recent
310
- old_messages_text = "\n".join(old_messages)
311
- old_messages_text = old_messages_text[-2000:]
312
-
313
-
314
- new_message_data['message'] = f"NEW TASK: {human_input}\n\nPREV TASK: {old_task} STUFF DONE IN PREV TASK: {old_messages_text}\n\n{final_msg}\n\n"
315
- new_message_data['keywords'] = ''
316
- # new_message_data_obj = original_request_message_data_obj.copy()
317
- # new_message_data_obj['message'] = new_message_data['message']
318
-
319
-
320
-
321
- process_message(new_message_data, original_request_message_data_obj, first_run=False)
322
- return
323
-
324
- logging.info(f"Task completed, task:\n{get_message_with_user_input(original_request_message,request_id)},\nresult: {final_msg}")
325
-
326
-
327
- except Exception as e:
328
- logging.error(f"Error processing message: {str(e)}")
329
- try:
330
- if request_id:
331
- publish_request_progress({
332
- "requestId": request_id,
333
- "progress": 1,
334
- "error": str(e),
335
- "data": str(e),
336
- })
337
- store_in_mongo({
338
- "requestId": request_id,
339
- "requestMessage": original_request_message_data.get("message"),
340
- "progress": 1,
341
- "error": str(e),
342
- "data": str(e),
343
- "contextId": original_request_message_data.get("contextId"),
344
- "conversation": all_messages,
345
- "createdAt": datetime.now(timezone.utc).isoformat(),
346
- "insertionTime": original_request_message_data_obj.insertion_time.astimezone(timezone.utc).isoformat(),
347
- "startedAt": started_at.astimezone(timezone.utc).isoformat(),
348
- "tool": "{\"toolUsed\":\"coding\"}"
349
- })
350
- except Exception as e:
351
- logging.error(f"Error processing message finish publish&store: {str(e)}")
352
- finally:
353
- try:
354
- #clean up the temp folder
355
- temp_dir = get_request_temp_dir(request_id)
356
- if temp_dir:
357
- #validate cortex_autogen folder in temp_dir path
358
- if "/cortex_autogen/" in temp_dir:
359
- shutil.rmtree(temp_dir)
360
- else:
361
- logging.warning(f"Invalid temp_dir path: {temp_dir}, not deleting")
362
- except Exception as e:
363
- logging.error(f"Error cleaning up: {str(e)}")
364
-
365
-
366
- def process_message_safe(original_request_message_data, original_request_message_data_obj, original_request_message, all_messages, request_id, started_at):
367
- config_list = config_list_from_json(env_or_file="OAI_CONFIG_LIST")
368
- llm_config = {
369
- "config_list": config_list,
370
- "base_url": os.environ.get("CORTEX_API_BASE_URL"),
371
- "api_key": os.environ.get("CORTEX_API_KEY"),
372
- "cache_seed": None,
373
- "timeout": 600 * 2
374
- }
375
-
376
- total_messages = 30 # set this for updates % progress's max just a guess
377
- message_count = 0 # updates % progress
378
-
379
- def chat_publish_progress(data):
380
- nonlocal message_count
381
- message = data.get("message") or data.get("info") or data.get("content")
382
- message_count += 1
383
- progress = min(message_count / total_messages, 1)
384
- publish_request_progress({
385
- "requestId": request_id,
386
- "progress": progress,
387
- "info": message
388
- })
389
-
390
- def chat(*args,**kwargs):
391
- nonlocal llm_config, request_id, chat_publish_progress, all_messages, original_request_message_data, original_request_message_data_obj, original_request_message
392
- def get_arg_or_kwarg(name, pos, args, kwargs):
393
- if args and kwargs.get(name):
394
- logging.warning(f"Both positional argument and keyword argument given for {name}, using keyword argument")
395
- if kwargs.get(name):
396
- return kwargs.get(name)
397
- if len(args) > pos:
398
- return args[pos]
399
- return None
400
-
401
- kwargs["prompt"] = get_arg_or_kwarg("prompt", 0, args, kwargs)
402
- kwargs["message"] = get_arg_or_kwarg("message", 1, args, kwargs)
403
- kwargs["llm_config"] = llm_config
404
- kwargs["request_id"] = request_id
405
- kwargs["chat_publish_progress"] = chat_publish_progress
406
- kwargs["all_messages"] = all_messages
407
- kwargs["original_request_message_data"] = original_request_message_data
408
- kwargs["original_request_message_data_obj"] = original_request_message_data_obj
409
- kwargs["original_request_message"] = original_request_message
410
-
411
- return chat_with_agents(**kwargs)
412
-
413
-
414
- preparer = AssistantAgent("preparer", llm_config=llm_config, system_message=prompts.get("PLANNER_SYSTEM_MESSAGE"))
415
- prepared_plan = preparer.generate_reply(messages=[{"content": get_message_with_user_input(original_request_message,request_id), "role":"user"}])
416
-
417
- helper_decider = AssistantAgent("helper_decider", llm_config=llm_config, system_message=prompts.get("HELPER_DECIDER_SYSTEM_MESSAGE"))
418
- helper_decider_result = helper_decider.generate_reply(messages=[{"content": get_message_with_user_input(original_request_message,request_id), "role":"user"}])
419
-
420
- try:
421
- helper_decider_result = json.loads(helper_decider_result)
422
- logging.info(f"Helper decider result: {helper_decider_result}")
423
- except Exception as e:
424
- logging.error(f"Error parsing helper decider result: {e}")
425
- helper_decider_result = {}
426
-
427
- context = ""
428
-
429
- code_keywords = original_request_message_data.get("keywords") or original_request_message_data.get("message")
430
- if code_keywords:
431
- context += f"\n#SECTION_OF_OLD_TASK_CODE_INFO_START:\nHere's code/info from old-tasks that might help:\n{search_index(code_keywords)}\n#SECTION_OF_OLD_TASK_CODE_INFO_END\n"
432
-
433
- if helper_decider_result.get("bing_search"):
434
- bing_search_message = f"Search Bing for more information on the task: {get_message_with_user_input(original_request_message,request_id)}, prepared draft plan to solve task: {prepared_plan}"
435
- result = chat(prompts.get("BING_SEARCH_PROMPT"), bing_search_message)
436
- context += f"\n\nBing search results: {result}"
437
-
438
- if helper_decider_result.get("cognitive_search"):
439
- cognitive_search_message = f"Search cognitive index for more information on the task: {get_message_with_user_input(original_request_message,request_id)}."
440
- result = chat(prompts.get("COGNITIVE_SEARCH_PROMPT"), cognitive_search_message)
441
- context += f"\n\nCognitive search results: {result}"
442
-
443
-
444
- context = process_helper_results(helper_decider_result, get_message_with_user_input(original_request_message,request_id), context, chat)
445
-
446
- context_message = ""
447
- if context:
448
- context_message = f"\n\nHere is some data from search results and helpful stuff already collected and worked on, use if helpful:\n{context}\n\n"
449
-
450
-
451
- check_message = f"""
452
- Task: \n{get_message_with_user_input(original_request_message,request_id)}\n\n
453
- Context to check if task can be considered completed: {context_message}\n\n
454
- """
455
-
456
- task_completion_checker = AssistantAgent("task_completion_checker", llm_config=llm_config, system_message=TASK_COMPLETE_CHECKER_SYSTEM_MESSAGE)
457
- check_result = task_completion_checker.generate_reply(messages=[{"content": check_message, "role":"user"}])
458
-
459
- chat_result = None
460
- if check_result != "DONE":
461
- message = f"""
462
- Your task is to complete the following: \n{get_message_with_user_input(original_request_message,request_id)}\n\n"
463
- Here is a draft plan to solve the task: \n{prepared_plan}\n\n
464
- {context_message}
465
- You don't have to follow the plan, it's just a suggestion.
466
- Do your best to complete the task, user expects you to continue original task request conversation.
467
- """
468
- chat_result = chat(prompts.get("GENERIC_ASSISTANT_SYSTEM_MESSAGE"), message, return_type="chat_result")
469
-
470
- presenter = AssistantAgent("presenter", llm_config=llm_config, system_message=prompts.get("PRESENTER_SYSTEM_MESSAGE"))
471
- if chat_result is not None:
472
- presenter_messages_context = "\n\n".join([msg['content'] for msg in chat_result.chat_history])
473
- else:
474
- presenter_messages_context = context_message
475
- presenter_message = f"""
476
- Here is everything done in order to complete the task: {presenter_messages_context}\n\n
477
- Original task was: {get_message_with_user_input(original_request_message,request_id)}\n\n
478
- Reply to it with task result, do not forget that user expects you continue original task request conversation:\n\n
479
- """
480
-
481
- presenter_result = presenter.generate_reply(messages=[{"content": presenter_message, "role":"user"}])
482
-
483
- final_msg = presenter_result
484
-
485
-
486
- zip_url = zip_and_upload_tmp_folder(get_request_temp_dir(request_id))
487
- if zip_url and len(zip_url) > 0:
488
- final_msg += f"\n\n[Download all files of this task]({zip_url})"
489
-
490
-
491
- print(f"Task completed, task:\n{get_message_with_user_input(original_request_message,request_id)},\nresult: {final_msg}")
492
- logging.info(f"Task completed, task:\n{get_message_with_user_input(original_request_message,request_id)},\nresult: {final_msg}")
493
- return final_msg
@@ -1,14 +0,0 @@
1
- from config import prompts
2
- from datetime import datetime
3
-
4
- def process_helper_results(helper_decider_result, original_request_message, context, chat):
5
- def add_to_context(result, prefix):
6
- nonlocal context
7
- context += f"\n\n{prefix}: {result}"
8
-
9
- if helper_decider_result.get("sql"):
10
- sql_message = f"Use SQL to help solving task, provide any related data and code that may help: {original_request_message}."
11
- result = chat(prompts.get("SQL_PROMPT"), sql_message, return_type="all_as_str")
12
- add_to_context(result, "SQL results")
13
-
14
- return context
@@ -1,18 +0,0 @@
1
- import os
2
- from dotenv import load_dotenv
3
-
4
- load_dotenv()
5
-
6
- AZURE_STORAGE_CONNECTION_STRING = os.environ["AZURE_STORAGE_CONNECTION_STRING"]
7
- HUMAN_INPUT_QUEUE_NAME = os.environ.get("HUMAN_INPUT_QUEUE_NAME", "autogen-human-input-queue")
8
- REDIS_CONNECTION_STRING = os.environ['REDIS_CONNECTION_STRING']
9
- REDIS_CHANNEL = 'requestProgress'
10
- AZURE_BLOB_CONTAINER = os.environ.get("AZURE_BLOB_CONTAINER", "autogen-uploads")
11
-
12
-
13
- # Prompts
14
- import prompts
15
- import prompts_extra
16
-
17
- prompts = {**prompts.__dict__, **prompts_extra.__dict__}
18
-
@@ -1,29 +0,0 @@
1
- from azure.storage.queue import QueueClient
2
- import pymongo
3
- import os
4
- import logging
5
- import json
6
- import base64
7
- from config import AZURE_STORAGE_CONNECTION_STRING, HUMAN_INPUT_QUEUE_NAME
8
-
9
- human_input_queue_client = QueueClient.from_connection_string(AZURE_STORAGE_CONNECTION_STRING, HUMAN_INPUT_QUEUE_NAME)
10
-
11
- def store_in_mongo(data):
12
- try:
13
- if 'MONGO_URI' in os.environ:
14
- client = pymongo.MongoClient(os.environ['MONGO_URI'])
15
- collection = client.get_default_database()[os.environ.get('MONGO_COLLECTION_NAME', 'autogenruns')]
16
- collection.insert_one(data)
17
- else:
18
- logging.warning("MONGO_URI not found in environment variables")
19
- except Exception as e:
20
- logging.error(f"An error occurred while storing data in MongoDB: {str(e)}")
21
-
22
- def check_for_human_input(request_id):
23
- messages = human_input_queue_client.receive_messages()
24
- for message in messages:
25
- content = json.loads(base64.b64decode(message.content).decode('utf-8'))
26
- if content['codeRequestId'] == request_id:
27
- human_input_queue_client.delete_message(message)
28
- return content['text']
29
- return None
@@ -1,44 +0,0 @@
1
- import azure.functions as func
2
- import logging
3
- import json
4
- from azure.storage.queue import QueueClient
5
- import os
6
- import redis
7
- from agents import process_message
8
- import subprocess
9
- import sys
10
- import config
11
- import requests
12
-
13
- logging.getLogger().setLevel(logging.WARNING)
14
-
15
- import subprocess, sys, importlib
16
- required_packages = ['requests', 'azure-storage-blob'] # Add any and all other required packages
17
- for package in required_packages:
18
- try:
19
- importlib.import_module(package)
20
- except ImportError:
21
- subprocess.check_call([sys.executable, "-m", "pip", "install", package, "--disable-pip-version-check"], stderr=subprocess.STDOUT, stdout=subprocess.DEVNULL)
22
-
23
-
24
- app = func.FunctionApp()
25
-
26
- connection_string = os.environ["AZURE_STORAGE_CONNECTION_STRING"]
27
- queue_name = os.environ.get("QUEUE_NAME", "autogen-message-queue")
28
- queue_client = QueueClient.from_connection_string(connection_string, queue_name)
29
-
30
- redis_client = redis.from_url(os.environ['REDIS_CONNECTION_STRING'])
31
- channel = 'requestProgress'
32
-
33
-
34
- @app.queue_trigger(arg_name="msg", queue_name=queue_name, connection="AZURE_STORAGE_CONNECTION_STRING")
35
- def queue_trigger(msg: func.QueueMessage):
36
- logging.info(f"Queue trigger Message ID: {msg.id}")
37
- try:
38
- message_data = json.loads(msg.get_body().decode('utf-8'))
39
- if "requestId" not in message_data:
40
- message_data['requestId'] = msg.id
41
- process_message(message_data, msg)
42
-
43
- except Exception as e:
44
- logging.error(f"Error processing message: {str(e)}")
@@ -1,15 +0,0 @@
1
- {
2
- "version": "2.0",
3
- "logging": {
4
- "applicationInsights": {
5
- "samplingSettings": {
6
- "isEnabled": true,
7
- "excludedTypes": "Request"
8
- }
9
- }
10
- },
11
- "extensionBundle": {
12
- "id": "Microsoft.Azure.Functions.ExtensionBundle",
13
- "version": "[4.*, 5.0.0)"
14
- }
15
- }
@@ -1,38 +0,0 @@
1
- import os
2
- from azure.storage.queue import QueueClient
3
- import base64
4
- import json
5
- import time
6
- from agents import process_message
7
-
8
-
9
- def main():
10
- print("Starting message processing loop")
11
- connection_string = os.environ["AZURE_STORAGE_CONNECTION_STRING"]
12
- queue_name = os.environ.get("QUEUE_NAME", "autogen-message-queue")
13
-
14
- queue_client = QueueClient.from_connection_string(connection_string, queue_name)
15
-
16
- attempts = 0
17
- max_attempts = 1000
18
-
19
- while attempts < max_attempts:
20
- messages = queue_client.receive_messages(messages_per_page=1)
21
-
22
- if messages:
23
- for message in messages:
24
- decoded_content = base64.b64decode(message.content).decode('utf-8')
25
- message_data = json.loads(decoded_content)
26
- if "requestId" not in message_data:
27
- message_data['requestId'] = message.id
28
- process_message(message_data, message)
29
- queue_client.delete_message(message)
30
- attempts = 0 # Reset attempts if a message was processed
31
- else:
32
- attempts += 1
33
- time.sleep(1) # Wait for 1 second before checking again
34
-
35
- print(f"No messages received after {max_attempts} attempts. Exiting.")
36
-
37
- if __name__ == "__main__":
38
- main()