@aj-archipelago/cortex 1.3.61 → 1.3.63

This diff represents the content of publicly available package versions that have been released to one of the supported registries. The information contained in this diff is provided for informational purposes only and reflects changes between package versions as they appear in their respective public registries.
Files changed (212) hide show
  1. package/.github/workflows/cortex-file-handler-test.yml +61 -0
  2. package/README.md +31 -7
  3. package/config/default.example.json +15 -0
  4. package/config.js +158 -12
  5. package/helper-apps/cortex-autogen2/DigiCertGlobalRootCA.crt.pem +22 -0
  6. package/helper-apps/cortex-autogen2/Dockerfile +31 -0
  7. package/helper-apps/cortex-autogen2/Dockerfile.worker +41 -0
  8. package/helper-apps/cortex-autogen2/README.md +183 -0
  9. package/helper-apps/cortex-autogen2/__init__.py +1 -0
  10. package/helper-apps/cortex-autogen2/agents.py +131 -0
  11. package/helper-apps/cortex-autogen2/docker-compose.yml +20 -0
  12. package/helper-apps/cortex-autogen2/function_app.py +55 -0
  13. package/helper-apps/cortex-autogen2/host.json +15 -0
  14. package/helper-apps/cortex-autogen2/main.py +126 -0
  15. package/helper-apps/cortex-autogen2/poetry.lock +3652 -0
  16. package/helper-apps/cortex-autogen2/pyproject.toml +36 -0
  17. package/helper-apps/cortex-autogen2/requirements.txt +20 -0
  18. package/helper-apps/cortex-autogen2/send_task.py +105 -0
  19. package/helper-apps/cortex-autogen2/services/__init__.py +1 -0
  20. package/helper-apps/cortex-autogen2/services/azure_queue.py +85 -0
  21. package/helper-apps/cortex-autogen2/services/redis_publisher.py +153 -0
  22. package/helper-apps/cortex-autogen2/task_processor.py +488 -0
  23. package/helper-apps/cortex-autogen2/tools/__init__.py +24 -0
  24. package/helper-apps/cortex-autogen2/tools/azure_blob_tools.py +175 -0
  25. package/helper-apps/cortex-autogen2/tools/azure_foundry_agents.py +601 -0
  26. package/helper-apps/cortex-autogen2/tools/coding_tools.py +72 -0
  27. package/helper-apps/cortex-autogen2/tools/download_tools.py +48 -0
  28. package/helper-apps/cortex-autogen2/tools/file_tools.py +545 -0
  29. package/helper-apps/cortex-autogen2/tools/search_tools.py +646 -0
  30. package/helper-apps/cortex-azure-cleaner/README.md +36 -0
  31. package/helper-apps/cortex-file-converter/README.md +93 -0
  32. package/helper-apps/cortex-file-converter/key_to_pdf.py +104 -0
  33. package/helper-apps/cortex-file-converter/list_blob_extensions.py +89 -0
  34. package/helper-apps/cortex-file-converter/process_azure_keynotes.py +181 -0
  35. package/helper-apps/cortex-file-converter/requirements.txt +1 -0
  36. package/helper-apps/cortex-file-handler/.env.test.azure.ci +7 -0
  37. package/helper-apps/cortex-file-handler/.env.test.azure.sample +1 -1
  38. package/helper-apps/cortex-file-handler/.env.test.gcs.ci +10 -0
  39. package/helper-apps/cortex-file-handler/.env.test.gcs.sample +2 -2
  40. package/helper-apps/cortex-file-handler/INTERFACE.md +41 -0
  41. package/helper-apps/cortex-file-handler/package.json +1 -1
  42. package/helper-apps/cortex-file-handler/scripts/setup-azure-container.js +41 -17
  43. package/helper-apps/cortex-file-handler/scripts/setup-test-containers.js +30 -15
  44. package/helper-apps/cortex-file-handler/scripts/test-azure.sh +32 -6
  45. package/helper-apps/cortex-file-handler/scripts/test-gcs.sh +24 -2
  46. package/helper-apps/cortex-file-handler/scripts/validate-env.js +128 -0
  47. package/helper-apps/cortex-file-handler/src/blobHandler.js +161 -51
  48. package/helper-apps/cortex-file-handler/src/constants.js +3 -0
  49. package/helper-apps/cortex-file-handler/src/fileChunker.js +10 -8
  50. package/helper-apps/cortex-file-handler/src/index.js +116 -9
  51. package/helper-apps/cortex-file-handler/src/redis.js +61 -1
  52. package/helper-apps/cortex-file-handler/src/services/ConversionService.js +11 -8
  53. package/helper-apps/cortex-file-handler/src/services/FileConversionService.js +2 -2
  54. package/helper-apps/cortex-file-handler/src/services/storage/AzureStorageProvider.js +88 -6
  55. package/helper-apps/cortex-file-handler/src/services/storage/GCSStorageProvider.js +58 -0
  56. package/helper-apps/cortex-file-handler/src/services/storage/StorageFactory.js +25 -5
  57. package/helper-apps/cortex-file-handler/src/services/storage/StorageProvider.js +9 -0
  58. package/helper-apps/cortex-file-handler/src/services/storage/StorageService.js +120 -16
  59. package/helper-apps/cortex-file-handler/src/start.js +27 -17
  60. package/helper-apps/cortex-file-handler/tests/FileConversionService.test.js +52 -1
  61. package/helper-apps/cortex-file-handler/tests/blobHandler.test.js +40 -0
  62. package/helper-apps/cortex-file-handler/tests/checkHashShortLived.test.js +553 -0
  63. package/helper-apps/cortex-file-handler/tests/cleanup.test.js +46 -52
  64. package/helper-apps/cortex-file-handler/tests/containerConversionFlow.test.js +451 -0
  65. package/helper-apps/cortex-file-handler/tests/containerNameParsing.test.js +229 -0
  66. package/helper-apps/cortex-file-handler/tests/containerParameterFlow.test.js +392 -0
  67. package/helper-apps/cortex-file-handler/tests/conversionResilience.test.js +7 -2
  68. package/helper-apps/cortex-file-handler/tests/deleteOperations.test.js +348 -0
  69. package/helper-apps/cortex-file-handler/tests/fileChunker.test.js +23 -2
  70. package/helper-apps/cortex-file-handler/tests/fileUpload.test.js +11 -5
  71. package/helper-apps/cortex-file-handler/tests/getOperations.test.js +58 -24
  72. package/helper-apps/cortex-file-handler/tests/postOperations.test.js +11 -4
  73. package/helper-apps/cortex-file-handler/tests/shortLivedUrlConversion.test.js +225 -0
  74. package/helper-apps/cortex-file-handler/tests/start.test.js +8 -12
  75. package/helper-apps/cortex-file-handler/tests/storage/StorageFactory.test.js +80 -0
  76. package/helper-apps/cortex-file-handler/tests/storage/StorageService.test.js +388 -22
  77. package/helper-apps/cortex-file-handler/tests/testUtils.helper.js +74 -0
  78. package/lib/cortexResponse.js +153 -0
  79. package/lib/entityConstants.js +19 -12
  80. package/lib/logger.js +21 -4
  81. package/lib/pathwayTools.js +28 -9
  82. package/lib/util.js +49 -0
  83. package/package.json +1 -1
  84. package/pathways/basePathway.js +1 -0
  85. package/pathways/bing_afagent.js +54 -1
  86. package/pathways/call_tools.js +2 -3
  87. package/pathways/chat_jarvis.js +1 -1
  88. package/pathways/google_cse.js +27 -0
  89. package/pathways/grok_live_search.js +18 -0
  90. package/pathways/system/entity/memory/sys_memory_lookup_required.js +1 -0
  91. package/pathways/system/entity/memory/sys_memory_required.js +1 -0
  92. package/pathways/system/entity/memory/sys_search_memory.js +1 -0
  93. package/pathways/system/entity/sys_entity_agent.js +56 -4
  94. package/pathways/system/entity/sys_generator_quick.js +1 -0
  95. package/pathways/system/entity/tools/sys_tool_bing_search.js +2 -0
  96. package/pathways/system/entity/tools/sys_tool_bing_search_afagent.js +31 -5
  97. package/pathways/system/entity/tools/sys_tool_google_search.js +141 -0
  98. package/pathways/system/entity/tools/sys_tool_grok_x_search.js +237 -0
  99. package/pathways/system/entity/tools/sys_tool_image.js +1 -1
  100. package/pathways/system/rest_streaming/sys_claude_37_sonnet.js +21 -0
  101. package/pathways/system/rest_streaming/sys_claude_41_opus.js +21 -0
  102. package/pathways/system/rest_streaming/sys_claude_4_sonnet.js +21 -0
  103. package/pathways/system/rest_streaming/sys_google_gemini_25_flash.js +25 -0
  104. package/pathways/system/rest_streaming/{sys_google_gemini_chat.js → sys_google_gemini_25_pro.js} +6 -4
  105. package/pathways/system/rest_streaming/sys_grok_4.js +23 -0
  106. package/pathways/system/rest_streaming/sys_grok_4_fast_non_reasoning.js +23 -0
  107. package/pathways/system/rest_streaming/sys_grok_4_fast_reasoning.js +23 -0
  108. package/pathways/system/rest_streaming/sys_openai_chat.js +3 -0
  109. package/pathways/system/rest_streaming/sys_openai_chat_gpt41.js +22 -0
  110. package/pathways/system/rest_streaming/sys_openai_chat_gpt41_mini.js +21 -0
  111. package/pathways/system/rest_streaming/sys_openai_chat_gpt41_nano.js +21 -0
  112. package/pathways/system/rest_streaming/{sys_claude_35_sonnet.js → sys_openai_chat_gpt4_omni.js} +6 -4
  113. package/pathways/system/rest_streaming/sys_openai_chat_gpt4_omni_mini.js +21 -0
  114. package/pathways/system/rest_streaming/{sys_claude_3_haiku.js → sys_openai_chat_gpt5.js} +7 -5
  115. package/pathways/system/rest_streaming/sys_openai_chat_gpt5_chat.js +21 -0
  116. package/pathways/system/rest_streaming/sys_openai_chat_gpt5_mini.js +21 -0
  117. package/pathways/system/rest_streaming/sys_openai_chat_gpt5_nano.js +21 -0
  118. package/pathways/system/rest_streaming/{sys_openai_chat_o1.js → sys_openai_chat_o3.js} +6 -3
  119. package/pathways/system/rest_streaming/sys_openai_chat_o3_mini.js +3 -0
  120. package/pathways/system/workspaces/run_workspace_prompt.js +99 -0
  121. package/pathways/vision.js +1 -1
  122. package/server/graphql.js +1 -1
  123. package/server/modelExecutor.js +8 -0
  124. package/server/pathwayResolver.js +166 -16
  125. package/server/pathwayResponseParser.js +16 -8
  126. package/server/plugins/azureFoundryAgentsPlugin.js +12 -23
  127. package/server/plugins/claude3VertexPlugin.js +193 -45
  128. package/server/plugins/gemini15ChatPlugin.js +21 -0
  129. package/server/plugins/gemini15VisionPlugin.js +360 -0
  130. package/server/plugins/googleCsePlugin.js +94 -0
  131. package/server/plugins/grokVisionPlugin.js +365 -0
  132. package/server/plugins/modelPlugin.js +3 -1
  133. package/server/plugins/openAiChatPlugin.js +106 -13
  134. package/server/plugins/openAiVisionPlugin.js +42 -30
  135. package/server/resolver.js +28 -4
  136. package/server/rest.js +270 -53
  137. package/server/typeDef.js +1 -0
  138. package/tests/{mocks.js → helpers/mocks.js} +5 -2
  139. package/tests/{server.js → helpers/server.js} +2 -2
  140. package/tests/helpers/sseAssert.js +23 -0
  141. package/tests/helpers/sseClient.js +73 -0
  142. package/tests/helpers/subscriptionAssert.js +11 -0
  143. package/tests/helpers/subscriptions.js +113 -0
  144. package/tests/{sublong.srt → integration/features/translate/sublong.srt} +4543 -4543
  145. package/tests/integration/features/translate/translate_chunking_stream.test.js +100 -0
  146. package/tests/{translate_srt.test.js → integration/features/translate/translate_srt.test.js} +2 -2
  147. package/tests/integration/graphql/async/stream/agentic.test.js +477 -0
  148. package/tests/integration/graphql/async/stream/subscription_streaming.test.js +62 -0
  149. package/tests/integration/graphql/async/stream/sys_entity_start_streaming.test.js +71 -0
  150. package/tests/integration/graphql/async/stream/vendors/claude_streaming.test.js +56 -0
  151. package/tests/integration/graphql/async/stream/vendors/gemini_streaming.test.js +66 -0
  152. package/tests/integration/graphql/async/stream/vendors/grok_streaming.test.js +56 -0
  153. package/tests/integration/graphql/async/stream/vendors/openai_streaming.test.js +72 -0
  154. package/tests/integration/graphql/features/google/sysToolGoogleSearch.test.js +96 -0
  155. package/tests/integration/graphql/features/grok/grok.test.js +688 -0
  156. package/tests/integration/graphql/features/grok/grok_x_search_tool.test.js +354 -0
  157. package/tests/{main.test.js → integration/graphql/features/main.test.js} +1 -1
  158. package/tests/{call_tools.test.js → integration/graphql/features/tools/call_tools.test.js} +2 -2
  159. package/tests/{vision.test.js → integration/graphql/features/vision/vision.test.js} +1 -1
  160. package/tests/integration/graphql/subscriptions/connection.test.js +26 -0
  161. package/tests/{openai_api.test.js → integration/rest/oai/openai_api.test.js} +63 -238
  162. package/tests/integration/rest/oai/tool_calling_api.test.js +343 -0
  163. package/tests/integration/rest/oai/tool_calling_streaming.test.js +85 -0
  164. package/tests/integration/rest/vendors/claude_streaming.test.js +47 -0
  165. package/tests/integration/rest/vendors/claude_tool_calling_streaming.test.js +75 -0
  166. package/tests/integration/rest/vendors/gemini_streaming.test.js +47 -0
  167. package/tests/integration/rest/vendors/gemini_tool_calling_streaming.test.js +75 -0
  168. package/tests/integration/rest/vendors/grok_streaming.test.js +55 -0
  169. package/tests/integration/rest/vendors/grok_tool_calling_streaming.test.js +75 -0
  170. package/tests/{azureAuthTokenHelper.test.js → unit/core/azureAuthTokenHelper.test.js} +1 -1
  171. package/tests/{chunkfunction.test.js → unit/core/chunkfunction.test.js} +2 -2
  172. package/tests/{config.test.js → unit/core/config.test.js} +3 -3
  173. package/tests/{encodeCache.test.js → unit/core/encodeCache.test.js} +1 -1
  174. package/tests/{fastLruCache.test.js → unit/core/fastLruCache.test.js} +1 -1
  175. package/tests/{handleBars.test.js → unit/core/handleBars.test.js} +1 -1
  176. package/tests/{memoryfunction.test.js → unit/core/memoryfunction.test.js} +2 -2
  177. package/tests/unit/core/mergeResolver.test.js +952 -0
  178. package/tests/{parser.test.js → unit/core/parser.test.js} +3 -3
  179. package/tests/unit/core/pathwayResolver.test.js +187 -0
  180. package/tests/{requestMonitor.test.js → unit/core/requestMonitor.test.js} +1 -1
  181. package/tests/{requestMonitorDurationEstimator.test.js → unit/core/requestMonitorDurationEstimator.test.js} +1 -1
  182. package/tests/{truncateMessages.test.js → unit/core/truncateMessages.test.js} +3 -3
  183. package/tests/{util.test.js → unit/core/util.test.js} +1 -1
  184. package/tests/{apptekTranslatePlugin.test.js → unit/plugins/apptekTranslatePlugin.test.js} +3 -3
  185. package/tests/{azureFoundryAgents.test.js → unit/plugins/azureFoundryAgents.test.js} +142 -9
  186. package/tests/{claude3VertexPlugin.test.js → unit/plugins/claude3VertexPlugin.test.js} +32 -10
  187. package/tests/{claude3VertexToolConversion.test.js → unit/plugins/claude3VertexToolConversion.test.js} +3 -3
  188. package/tests/unit/plugins/googleCsePlugin.test.js +111 -0
  189. package/tests/unit/plugins/grokVisionPlugin.test.js +1392 -0
  190. package/tests/{modelPlugin.test.js → unit/plugins/modelPlugin.test.js} +3 -3
  191. package/tests/{multimodal_conversion.test.js → unit/plugins/multimodal_conversion.test.js} +4 -4
  192. package/tests/{openAiChatPlugin.test.js → unit/plugins/openAiChatPlugin.test.js} +13 -4
  193. package/tests/{openAiToolPlugin.test.js → unit/plugins/openAiToolPlugin.test.js} +35 -27
  194. package/tests/{tokenHandlingTests.test.js → unit/plugins/tokenHandlingTests.test.js} +5 -5
  195. package/tests/{translate_apptek.test.js → unit/plugins/translate_apptek.test.js} +3 -3
  196. package/tests/{streaming.test.js → unit/plugins.streaming/plugin_stream_events.test.js} +19 -58
  197. package/helper-apps/mogrt-handler/tests/test-files/test.gif +0 -1
  198. package/helper-apps/mogrt-handler/tests/test-files/test.mogrt +0 -1
  199. package/helper-apps/mogrt-handler/tests/test-files/test.mp4 +0 -1
  200. package/pathways/system/rest_streaming/sys_openai_chat_gpt4.js +0 -19
  201. package/pathways/system/rest_streaming/sys_openai_chat_gpt4_32.js +0 -19
  202. package/pathways/system/rest_streaming/sys_openai_chat_gpt4_turbo.js +0 -19
  203. package/pathways/system/workspaces/run_claude35_sonnet.js +0 -21
  204. package/pathways/system/workspaces/run_claude3_haiku.js +0 -20
  205. package/pathways/system/workspaces/run_gpt35turbo.js +0 -20
  206. package/pathways/system/workspaces/run_gpt4.js +0 -20
  207. package/pathways/system/workspaces/run_gpt4_32.js +0 -20
  208. package/tests/agentic.test.js +0 -256
  209. package/tests/pathwayResolver.test.js +0 -78
  210. package/tests/subscription.test.js +0 -387
  211. /package/tests/{subchunk.srt → integration/features/translate/subchunk.srt} +0 -0
  212. /package/tests/{subhorizontal.srt → integration/features/translate/subhorizontal.srt} +0 -0
@@ -0,0 +1,183 @@
1
+ ## Cortex AutoGen: Advanced AI Agent System 🤖
2
+
3
+ Multi-agent task automation with real code execution, Azure Storage Queue ingestion, Azure Blob uploads, and live progress via Redis.
4
+
5
+ ### Highlights
6
+ - **Selector-based orchestration** with `SelectorGroupChat`
7
+ - **Agents**: coder, code executor, cloud file uploader, presenter, terminator
8
+ - **Real execution** in a sandboxed working directory (`CORTEX_WORK_DIR`)
9
+ - **Azure native**: Queue (ingress) + Blob (files)
10
+ - **Live progress** published to Redis (`info`, `progress`, optional `data`)
11
+
12
+ ### Architecture
13
+ - Shared core in `task_processor.py` used by both the long-running worker (`main.py`) and the Azure Functions container (`function_app.py`).
14
+ - Queue messages are Base64-encoded JSON; task text is read from `message` or `content`.
15
+
16
+ ## Quick Start
17
+
18
+ ### Prerequisites
19
+ - Python 3.11+
20
+ - Redis instance
21
+ - Azure Storage account (Queue + Blob)
22
+ - Docker (optional, for containerized Azure Functions local run)
23
+
24
+ ### 1) Set environment variables
25
+ Create a `.env` in the project root:
26
+
27
+ ```dotenv
28
+ # Core
29
+ AZURE_STORAGE_CONNECTION_STRING=...
30
+ AZURE_QUEUE_NAME=autogen-test-message-queue # used by worker (main.py)
31
+ AZURE_BLOB_CONTAINER=autogentempfiles
32
+ REDIS_CONNECTION_STRING=redis://localhost:6379
33
+ REDIS_CHANNEL=requestProgress
34
+
35
+ # Models API
36
+ CORTEX_API_KEY=...
37
+ CORTEX_API_BASE_URL=http://host.docker.internal:4000/v1
38
+
39
+ # Working directory for code execution (must be writable)
40
+ CORTEX_WORK_DIR=/tmp/coding
41
+
42
+ # Azure Functions variant uses QUEUE_NAME (not AZURE_QUEUE_NAME)
43
+ QUEUE_NAME=autogen-test-message-queue
44
+ ```
45
+
46
+ Keep secrets out of version control. You can also configure `local.settings.json` for local Functions.
47
+
48
+ ### 2) Install dependencies
49
+ - Using Poetry:
50
+ ```bash
51
+ poetry install
52
+ ```
53
+ - Or with pip:
54
+ ```bash
55
+ python -m venv .venv && source .venv/bin/activate # project uses .venv
56
+ pip install -r requirements.txt
57
+ ```
58
+
59
+ ### 3) Run the worker locally
60
+ - Activate your virtualenv (`source .venv/bin/activate`) and ensure a clean worker state.
61
+ - Recommended workflow (non-continuous, exits when queue is empty):
62
+ ```bash
63
+ # Kill any previously running worker (module or script form)
64
+ pkill -f "python -m src.cortex_autogen2.main" || true
65
+ pkill -f "python main.py" || true
66
+ CONTINUOUS_MODE=false python -m src.cortex_autogen2.main &
67
+ # Alternative (direct script):
68
+ # CONTINUOUS_MODE=false python main.py &
69
+ ```
70
+ Tip: Use the module path variant if your repository layout exposes `src/cortex_autogen2` on `PYTHONPATH` (e.g., in a monorepo). Otherwise, run `python main.py` directly.
71
+
72
+ Then send a task:
73
+ ```bash
74
+ python send_task.py "create a simple PDF about cats"
75
+ ```
76
+
77
+ Notes:
78
+ - `CONTINUOUS_MODE=false` runs once and exits after the queue is empty.
79
+ - Use background run `&` to keep logs visible in the current terminal.
80
+
81
+ ### 4) Run the worker in Docker (optional)
82
+ Build and run the worker image using `Dockerfile.worker`:
83
+ ```bash
84
+ docker build -f Dockerfile.worker -t cortex-autogen-worker .
85
+ docker run --rm --env-file .env -e CORTEX_WORK_DIR=/app/coding --network host cortex-autogen-worker
86
+ ```
87
+
88
+ ### 5) Run the Azure Functions container locally (optional)
89
+ Use Docker Compose and pass your `.env` so the container gets your variables:
90
+ ```bash
91
+ docker compose --env-file .env up --build
92
+ ```
93
+ This builds `Dockerfile` (Functions) and starts on port `7071` (mapped to container `80`).
94
+
95
+ ## Usage Details
96
+
97
+ ### Sending tasks
98
+ `send_task.py` publishes a Base64-encoded JSON message with `content` to the queue defined by `AZURE_STORAGE_CONNECTION_STRING` and `AZURE_QUEUE_NAME` (or `QUEUE_NAME` for Functions).
99
+
100
+ ```bash
101
+ python send_task.py "list the files in the current directory"
102
+ # Override queue/connection if needed:
103
+ python send_task.py "create a simple PDF about cats" --queue autogen-test-message-queue --connection "<AZURE_STORAGE_CONNECTION_STRING>"
104
+ ```
105
+
106
+ Message format published to the queue (before Base64 encoding):
107
+ ```json
108
+ {
109
+ "request_id": "<uuid>",
110
+ "message_id": "<uuid>",
111
+ "content": "<task text>"
112
+ }
113
+ ```
114
+
115
+ ### Progress updates
116
+ - Channel: set via `REDIS_CHANNEL` (recommend `requestProgress`)
117
+ - Payload fields: `requestId`, `progress` (0-1), `info` (short status), optional `data` (final Markdown)
118
+ - Final result publishes `progress=1.0` with `data` containing the Markdown for UI
119
+
120
+ ### Working directory
121
+ - Code execution uses `CORTEX_WORK_DIR`. Defaults: `/home/site/wwwroot/coding` in Functions container; set to `/app/coding` in worker container; recommend `/tmp/coding` locally. Always use absolute paths within this directory.
122
+
123
+ ## Project Structure
124
+ ```
125
+ cortex-autogen2/
126
+ ├── Dockerfile # Azure Functions container
127
+ ├── Dockerfile.worker # Traditional worker container
128
+ ├── docker-compose.yml # Local Functions container orchestrator
129
+ ├── main.py # Long-running worker
130
+ ├── function_app.py # Azure Functions entry
131
+ ├── task_processor.py # Shared processing logic
132
+ ├── host.json # Azure Functions host config
133
+ ├── local.settings.json # Local Functions settings (do not commit secrets)
134
+ ├── requirements.txt # Functions deps (pip)
135
+ ├── pyproject.toml, poetry.lock # Poetry project config
136
+ ├── send_task.py # Queue task sender
137
+ ├── agents.py # Agent definitions
138
+ ├── services/
139
+ │ ├── azure_queue.py
140
+ │ └── redis_publisher.py
141
+ └── tools/
142
+ ├── azure_blob_tools.py
143
+ ├── coding_tools.py
144
+ ├── download_tools.py
145
+ ├── file_tools.py
146
+ └── search_tools.py
147
+ ```
148
+
149
+ ## Environment variables reference
150
+ | Name | Required | Default | Used by | Description |
151
+ |--------------------------------|----------|---------------------------------|-------------------|-------------|
152
+ | `AZURE_STORAGE_CONNECTION_STRING` | Yes | — | Worker/Functions | Storage account connection string |
153
+ | `AZURE_QUEUE_NAME` | Yes (worker) | — | Worker | Queue name for worker (`main.py`) |
154
+ | `QUEUE_NAME` | Yes (Functions) | `autogen-message-queue` | Functions | Queue name for Functions (`function_app.py`) |
155
+ | `AZURE_BLOB_CONTAINER` | Yes | — | Uploader tool | Blob container for uploaded files |
156
+ | `REDIS_CONNECTION_STRING` | Yes | — | Progress | Redis connection string |
157
+ | `REDIS_CHANNEL` | Yes | `requestProgress` | Progress | Redis pub/sub channel for progress |
158
+ | `CORTEX_API_KEY` | Yes | — | Models | API key for Cortex/OpenAI-style API |
159
+ | `CORTEX_API_BASE_URL` | No | `http://host.docker.internal:4000/v1` | Models | API base URL |
160
+ | `CORTEX_WORK_DIR` | No | `/tmp/coding` or container path | Code executor | Writable work dir for code execution |
161
+
162
+ ## Notes
163
+ - Health endpoint referenced in `docker-compose.yml` is optional; if you add one, expose it under `/api/health` in the Functions app.
164
+ - Do not commit `.env` or `local.settings.json` with secrets.
165
+ - On macOS, Docker's `network_mode: host` is not supported; remove it from `docker-compose.yml` if needed and rely on published ports and `host.docker.internal` for host access.
166
+
167
+ ## Troubleshooting
168
+ - No tasks processed: verify `AZURE_QUEUE_NAME`/`QUEUE_NAME` and that messages are Base64-encoded JSON with `content` or `message`.
169
+ - No progress visible: ensure `REDIS_CONNECTION_STRING` and `REDIS_CHANNEL` (e.g., `requestProgress`) are set, and network access to Redis.
170
+ - Container cannot reach host services: use `--network host` on macOS/Linux and `host.docker.internal` URLs inside containers.
171
+
172
+ ## Contributing
173
+ - Open a PR with clear description and include documentation updates when applicable.
174
+
175
+ ## Examples
176
+ - Send a research/report task:
177
+ ```bash
178
+ python send_task.py "Summarize the latest trends in AI agent frameworks with references"
179
+ ```
180
+ - Generate and upload a file:
181
+ ```bash
182
+ python send_task.py "Create a simple PDF about cats with 3 bullet points and upload it"
183
+ ```
@@ -0,0 +1,131 @@
1
+ from autogen_agentchat.agents import AssistantAgent, CodeExecutorAgent
2
+ from autogen_ext.code_executors.local import LocalCommandLineCodeExecutor
3
+ import os
4
+ from autogen_core.tools import FunctionTool
5
+ from tools.azure_blob_tools import upload_file_to_azure_blob
6
+
7
+ #AGENTS
8
+ MAGENTIC_ONE_CODER_DESCRIPTION = "A helpful and general-purpose AI assistant that has strong language skills, Python skills, and Linux command line skills."
9
+
10
+ MAGENTIC_ONE_CODER_SYSTEM_MESSAGE = """You are a helpful AI assistant.
11
+ Solve tasks using your coding and language skills.
12
+ In the following cases, suggest python code (in a python coding block) or shell script (in a sh coding block) for the user to execute.
13
+ 1. When you need to collect info, use the code to output the info you need, for example, browse or search the web, download/read a file, print the content of a webpage or a file, get the current date/time, check the operating system. After sufficient info is printed and the task is ready to be solved based on your language skill, you can solve the task by yourself.
14
+ 2. When you need to perform some task with code, use the code to perform the task and output the result. Finish the task smartly.
15
+ Solve the task step by step if you need to. If a plan is not provided, explain your plan first. Be clear which step uses code, and which step uses your language skill.
16
+ When using code, you must indicate the script type in the code block. The user cannot provide any other feedback or perform any other action beyond executing the code you suggest. The user can't modify your code. So do not suggest incomplete code which requires users to modify. Don't use a code block if it's not intended to be executed by the user.
17
+ Don't include multiple code blocks in one response. Do not ask users to copy and paste the result. Instead, use the 'print' function for the output when relevant. Check the execution result returned by the user.
18
+ If the result indicates there is an error, fix the error and output the code again. Suggest the full code instead of partial code or code changes. If the error can't be fixed or if the task is not solved even after the code is executed successfully, analyze the problem, revisit your assumption, collect additional info you need, and think of a different approach to try.
19
+ When you find an answer, verify the answer carefully. Include verifiable evidence in your response if possible."""
20
+
21
+
22
+ async def get_agents(default_model_client, big_model_client, small_model_client):
23
+
24
+ #code executor
25
+ work_dir = os.getenv("CORTEX_WORK_DIR", "/home/site/wwwroot/coding")
26
+ code_executor = LocalCommandLineCodeExecutor(work_dir=work_dir, timeout=300)
27
+
28
+ #TOOLS
29
+ upload_file_to_cloud_tool = FunctionTool(upload_file_to_azure_blob, description="Upload files to the cloud. You must use absolute path to reference local files.")
30
+
31
+ coder_agent = AssistantAgent(
32
+ "coder_agent",
33
+ model_client=default_model_client,
34
+ description=MAGENTIC_ONE_CODER_DESCRIPTION,
35
+ system_message=MAGENTIC_ONE_CODER_SYSTEM_MESSAGE + f"""
36
+ Save remote files images, videos, etc. in order to work with them locally.
37
+ Make sure to log/print everything in code otherwise you will lose the context and cannot debug it.
38
+ Make sure your code is perfect.
39
+ Never ask for user input or user to do anything.
40
+ Never ask questions.
41
+ Your are expert in coding, do wonders.
42
+ If you need to do advanced stuff you can do a project and run it.
43
+ You can split codes, build projects, run anything, coding is your strength in this task.
44
+ Take actionable verifiable small steps if needed.
45
+ Understand that pitfalls and find ways to overcome them.
46
+ Progress is important, do not get stuck in a loop, keep trying but do not repeat the same steps.
47
+ Current directory might be different from the one you think it is, use absolute path to reference files.
48
+ Code executor working directory is: {work_dir}
49
+ So you can only access files in this directory.
50
+ Always use absolute path to reference files as current directory might be different from the one you think it is.
51
+ """,
52
+ )
53
+
54
+ code_executor_agent = CodeExecutorAgent("code_executor", code_executor=code_executor)
55
+
56
+
57
+ terminator_agent = AssistantAgent(
58
+ "terminator_agent",
59
+ model_client=default_model_client,
60
+ description="A helpful assistant that can terminate.",
61
+ system_message="""You are a helpful assistant that can terminate.
62
+ Original task must be completed in order to terminate.
63
+ Only output: TERMINATE, if completed.
64
+ If not completed, give the reason instead of TERMINATE.
65
+ Do not ask questions you are the terminator.
66
+ Always output in single line without any other text.
67
+ Do not give empty response.
68
+ In order to terminate:
69
+ - Task must be completed.
70
+ - All referenced local files must have been uploaded to the cloud and their public URLs retrieved and included in the final output.
71
+ - Presenter must have provided the final output.
72
+ All code must have been executed in order to terminate.
73
+ Deliverables must have been provided in order to terminate.
74
+ If you cannot terminate because of same reason after 3 attempts, terminate.
75
+ Example outputs:
76
+
77
+ TERMINATE
78
+
79
+ TASK NOT COMPLETED: missing missing missing
80
+ """,
81
+ )
82
+
83
+
84
+ presenter_agent = AssistantAgent(
85
+ "presenter_agent",
86
+ model_client=default_model_client,
87
+ description="A highly skilled and creative presentation specialist, responsible for crafting visually stunning and exceptionally informative final deliverables.",
88
+ system_message="""You are a highly skilled and creative presentation specialist, responsible for crafting visually stunning and exceptionally informative final deliverables.
89
+ Your goal is to transform raw task results into engaging, professional, and visually appealing presentations, primarily using Markdown.
90
+
91
+ Here's what makes a great presentation:
92
+ - **Captivating Structure**: Start with a compelling summary, followed by well-organized sections with clear headings and subheadings.
93
+ - **Stunning Visuals**: Integrate relevant images, videos, and even diagrams (if applicable and possible via markdown) seamlessly to enhance understanding and engagement. Ensure visuals are high-quality and directly support the content.
94
+ **IMPORTANT: Actively look for `UPLOADED_FILES_SAS_URLS` provided in the input. If images or other visual assets are available via these URLs, you MUST incorporate them into your Markdown presentation. Describe and explain these visuals to the user, providing context and insights.**
95
+ - **Professional Aesthetics**: Utilize Markdown's full capabilities for formatting, including bolding, italics, lists, and tables, to create a clean, readable, and visually pleasing layout. Think about white space and information hierarchy.
96
+ - **Concise & Impactful Language**: Use persuasive and professional language. Avoid jargon where possible, or explain it clearly. Every word should contribute to clarity and impact.
97
+ - **User-Centric Design**: Remember that your output will be directly displayed in a React application. Focus on great UI/UX, ensuring the presentation is intuitive and easy for the end-user to consume.
98
+ - **Complete & Actionable**: Ensure all necessary information from the task is included, and if appropriate, guide the user towards next steps or key takeaways.
99
+
100
+ Report must be a direct reply to the user's task. You are the final voice to the user, so make it perfect.
101
+ User does not have local access to the files, so you must provide direct URLs for any external resources (e.g., images uploaded to cloud storage).
102
+ When including downloadable assets, always look for and use the `download_url` provided in JSON outputs from the `file_cloud_uploader_agent`. These URLs are public and include necessary SAS tokens for access.
103
+ Crucially, your output should be a final, polished presentation of the task result, suitable for direct display in a user interface.
104
+ **Your report MUST only contain the direct result of the user's task. Absolutely NO explanations of how the task was accomplished, internal agent thought processes, or any operational details should be included. Do not mention which tools or agents were used, or how they were used to achieve the result.**
105
+ **Focus exclusively on delivering the requested information (e.g., image galleries, reports) and only include minimal, essential explanations directly related to the visuals or content.** For example, for an image gallery, provide brief descriptions for each image or a short overview of the gallery structure, but do not explain the steps taken by the agents or the internal workflow.
106
+ **Do not include extensive executive summaries, detailed operational breakdowns, or generic quick-start guides unless explicitly asked for.**
107
+ Do not include raw code snippets, internal thought processes, intermediate data, or any technical logs that are not part of the final, user-friendly deliverable.
108
+ **Absolutely DO NOT include instructions on how to run, save, or modify any code (e.g., "save as .py", "pip install", "python script.py").**
109
+ **DO NOT provide information about packaging, dependencies, or development workflows.**
110
+ Your output is for a non-technical end-user viewing it in a React app.
111
+ **CRITICAL: ONLY use URLs for any files (images, videos, documents, etc.) that are explicitly provided in the `UPLOADED_FILES_SAS_URLS` or directly within the `RESULT` content from other agents, specifically from the `file_cloud_uploader_agent`. If a valid, real URL is not provided, you MUST NOT include any placeholder, fake, or fabricated URLs. NEVER hallucinate or fabricate any links or content.**
112
+ """
113
+ )
114
+
115
+ file_cloud_uploader_agent = AssistantAgent(
116
+ "file_cloud_uploader_agent",
117
+ model_client=default_model_client,
118
+ tools=[upload_file_to_cloud_tool],
119
+ description="A helpful assistant that can upload files to the cloud.",
120
+ system_message=f"""You are a helpful assistant that can upload files to the cloud.
121
+ Upload referenced files to the cloud.
122
+ Use your tool to upload the files.
123
+ User does not have local access to the files so you must upload them to the cloud and provide the url.
124
+ Your current working directory for file operations is: {work_dir}.
125
+ When referencing local files for upload, **always prepend the '{work_dir}/' to the filename** to form the correct absolute path. For example, if a file is named 'test.text' in the working directory, the absolute path should be '{work_dir}/test.txt'.
126
+ """,
127
+ )
128
+
129
+ agents = [coder_agent, code_executor_agent, file_cloud_uploader_agent, presenter_agent, terminator_agent]
130
+
131
+ return agents, presenter_agent
@@ -0,0 +1,20 @@
1
+ services:
2
+ cortex-autogen-function:
3
+ build:
4
+ context: .
5
+ dockerfile: Dockerfile
6
+ platforms:
7
+ - linux/amd64
8
+ ports:
9
+ - "7071:80"
10
+ environment:
11
+ - CORTEX_API_BASE_URL=http://host.docker.internal:4000/v1
12
+ - REDIS_CONNECTION_STRING=redis://host.docker.internal:6379
13
+ healthcheck:
14
+ test: ["CMD", "curl", "-f", "http://localhost:80/api/health"]
15
+ interval: 30s
16
+ timeout: 10s
17
+ retries: 3
18
+ start_period: 60s
19
+ restart: unless-stopped
20
+ network_mode: host
@@ -0,0 +1,55 @@
1
+ import azure.functions as func
2
+ import logging
3
+ import json
4
+ from azure.storage.queue import QueueClient
5
+ import os
6
+ import redis
7
+ from task_processor import process_queue_message
8
+ # from agents import process_message
9
+
10
+ # logging.getLogger().setLevel(logging.WARNING)
11
+ logging.getLogger().setLevel(logging.INFO)
12
+
13
+ app = func.FunctionApp()
14
+
15
+ connection_string = os.environ["AZURE_STORAGE_CONNECTION_STRING"]
16
+ queue_name = os.environ.get("AZURE_QUEUE_NAME") or os.environ.get("QUEUE_NAME", "autogen-message-queue")
17
+ logging.info(f"📦 Using Azure Storage Queue name: {queue_name}")
18
+ queue_client = QueueClient.from_connection_string(connection_string, queue_name)
19
+
20
+ redis_client = redis.from_url(os.environ['REDIS_CONNECTION_STRING'])
21
+ channel = 'requestProgress'
22
+
23
+
24
+ @app.queue_trigger(arg_name="msg", queue_name=queue_name, connection="AZURE_STORAGE_CONNECTION_STRING")
25
+ def queue_trigger(msg: func.QueueMessage):
26
+ """Queue trigger function to process Cortex AutoGen tasks."""
27
+ logging.info(f"🔍 QUEUE_TRIGGER: Processing message {msg.id}")
28
+
29
+ try:
30
+ message_body = msg.get_body().decode('utf-8')
31
+ message_data = {
32
+ "id": msg.id,
33
+ "content": message_body,
34
+ "pop_receipt": None,
35
+ "dequeue_count": msg.dequeue_count
36
+ }
37
+
38
+ logging.info(f"🔍 QUEUE_TRIGGER: Content: {message_data['content'][:100]}...")
39
+
40
+ # Process the message synchronously
41
+ import asyncio
42
+ loop = asyncio.new_event_loop()
43
+ asyncio.set_event_loop(loop)
44
+ try:
45
+ result = loop.run_until_complete(process_queue_message(message_data))
46
+ if result:
47
+ logging.info(f"✅ QUEUE_TRIGGER: Message {msg.id} processed successfully")
48
+ else:
49
+ logging.warning(f"⚠️ QUEUE_TRIGGER: Message {msg.id} returned no result")
50
+ finally:
51
+ loop.close()
52
+
53
+ except Exception as e:
54
+ logging.error(f"❌ QUEUE_TRIGGER: Error processing message {msg.id}: {e}", exc_info=True)
55
+ raise
@@ -0,0 +1,15 @@
1
+ {
2
+ "version": "2.0",
3
+ "logging": {
4
+ "applicationInsights": {
5
+ "samplingSettings": {
6
+ "isEnabled": true,
7
+ "excludedTypes": "Request"
8
+ }
9
+ }
10
+ },
11
+ "extensionBundle": {
12
+ "id": "Microsoft.Azure.Functions.ExtensionBundle",
13
+ "version": "[4.*, 5.0.0)"
14
+ }
15
+ }
@@ -0,0 +1,126 @@
1
+ import asyncio
2
+ import os
3
+ import sys
4
+ import json
5
+ import base64
6
+ import logging
7
+ from services.azure_queue import get_queue_service
8
+ from task_processor import TaskProcessor
9
+
10
+ # Add the parent directory of 'src' to sys.path to allow imports like 'from cortex_autogen2.tools import ...'
11
+ # This is crucial when running main.py directly from outside the 'src' directory.
12
+ current_dir = os.path.dirname(os.path.abspath(__file__))
13
+ project_root = os.path.abspath(os.path.join(current_dir, '..', '..'))
14
+ if project_root not in sys.path:
15
+ sys.path.insert(0, project_root)
16
+
17
+ # Load environment variables
18
+ from dotenv import load_dotenv
19
+ load_dotenv()
20
+
21
+ # Configure logging
22
+ logging.basicConfig(level=logging.INFO, format='%(asctime)s - %(levelname)s - %(message)s')
23
+ logging.getLogger("azure.core.pipeline.policies.http_logging_policy").setLevel(logging.WARNING)
24
+ logger = logging.getLogger(__name__)
25
+
26
+
27
+ async def process_task(task_id: str, task_content: str, processor: TaskProcessor) -> None:
28
+ """Process a single task using the TaskProcessor."""
29
+ await processor.process_task(task_id, task_content)
30
+
31
+
32
+
33
+ async def main():
34
+ """
35
+ Main function to continuously process tasks from the Azure queue.
36
+ """
37
+
38
+ continuous_mode = os.getenv("CONTINUOUS_MODE", "true").lower() == "true"
39
+ logger.info(f"🚀 Starting AutoGen Worker, continuous_mode: {continuous_mode}")
40
+
41
+ # Add a small initial delay in non-continuous mode to allow tasks to be enqueued
42
+ if not continuous_mode:
43
+ await asyncio.sleep(1)
44
+
45
+ try:
46
+ azure_queue = await get_queue_service()
47
+ processor = TaskProcessor()
48
+ await processor.initialize()
49
+
50
+ try:
51
+ while True: # Continuous loop
52
+ try:
53
+ message = await azure_queue.get_task()
54
+ if message:
55
+ task_id = message.get("id")
56
+ pop_receipt = message.get("pop_receipt")
57
+
58
+ if not task_id or not pop_receipt:
59
+ logger.error(f"❌ Invalid message format: {message}")
60
+ # Delete the invalid message to prevent infinite retry
61
+ if task_id and pop_receipt:
62
+ await azure_queue.delete_task(task_id, pop_receipt)
63
+ continue
64
+
65
+ raw_content = message.get("content") or message.get("message")
66
+ if not raw_content:
67
+ logger.error(f"❌ Message has no content: {message}")
68
+ await azure_queue.delete_task(task_id, pop_receipt)
69
+ continue
70
+
71
+ try:
72
+ decoded_content = base64.b64decode(raw_content).decode('utf-8')
73
+ task_data = json.loads(decoded_content)
74
+ except (json.JSONDecodeError, TypeError, ValueError) as e:
75
+ logger.warning(f"⚠️ Failed to decode as base64, trying as raw JSON: {e}")
76
+ try:
77
+ task_data = json.loads(raw_content)
78
+ except json.JSONDecodeError as e2:
79
+ logger.error(f"❌ Failed to parse message content: {e2}")
80
+ await azure_queue.delete_task(task_id, pop_receipt)
81
+ continue
82
+
83
+ # Fix: Check message field first, then content field
84
+ task_content = task_data.get("message") or task_data.get("content")
85
+ if not task_content:
86
+ logger.error(f"❌ No task content found in: {task_data}")
87
+ await azure_queue.delete_task(task_id, pop_receipt)
88
+ continue
89
+
90
+ logger.info(f"📩 Received task: {task_content}...")
91
+
92
+ await process_task(task_id, task_content, processor)
93
+
94
+ await azure_queue.delete_task(task_id, pop_receipt)
95
+ logger.info(f"✅ Task {task_id} processed successfully.")
96
+ else:
97
+ if continuous_mode:
98
+ logger.info("⏳ No tasks in queue. Waiting 3 seconds...")
99
+ await asyncio.sleep(3) # Wait before checking again
100
+ else:
101
+ logger.info("📭 No tasks in queue. Exiting (non-continuous mode).")
102
+ break
103
+
104
+ except Exception as e:
105
+ logger.error(f"❌ Error processing task: {e}")
106
+ if continuous_mode:
107
+ logger.info("📝 Continuing to next task...")
108
+ await asyncio.sleep(5) # Brief pause before retrying
109
+ else:
110
+ raise # Re-raise in non-continuous mode
111
+
112
+ finally:
113
+ await processor.close()
114
+ logger.info("🔌 Connections closed. Worker shutting down.")
115
+
116
+ except Exception as e:
117
+ logger.error(f"❌ Error in main loop: {e}")
118
+ raise
119
+
120
+
121
+ if __name__ == "__main__":
122
+ asyncio.run(main())
123
+
124
+
125
+
126
+