@aj-archipelago/cortex 1.3.62 → 1.3.63
This diff represents the content of publicly available package versions that have been released to one of the supported registries. The information contained in this diff is provided for informational purposes only and reflects changes between package versions as they appear in their respective public registries.
- package/.github/workflows/cortex-file-handler-test.yml +61 -0
- package/README.md +31 -7
- package/config/default.example.json +15 -0
- package/config.js +133 -12
- package/helper-apps/cortex-autogen2/DigiCertGlobalRootCA.crt.pem +22 -0
- package/helper-apps/cortex-autogen2/Dockerfile +31 -0
- package/helper-apps/cortex-autogen2/Dockerfile.worker +41 -0
- package/helper-apps/cortex-autogen2/README.md +183 -0
- package/helper-apps/cortex-autogen2/__init__.py +1 -0
- package/helper-apps/cortex-autogen2/agents.py +131 -0
- package/helper-apps/cortex-autogen2/docker-compose.yml +20 -0
- package/helper-apps/cortex-autogen2/function_app.py +55 -0
- package/helper-apps/cortex-autogen2/host.json +15 -0
- package/helper-apps/cortex-autogen2/main.py +126 -0
- package/helper-apps/cortex-autogen2/poetry.lock +3652 -0
- package/helper-apps/cortex-autogen2/pyproject.toml +36 -0
- package/helper-apps/cortex-autogen2/requirements.txt +20 -0
- package/helper-apps/cortex-autogen2/send_task.py +105 -0
- package/helper-apps/cortex-autogen2/services/__init__.py +1 -0
- package/helper-apps/cortex-autogen2/services/azure_queue.py +85 -0
- package/helper-apps/cortex-autogen2/services/redis_publisher.py +153 -0
- package/helper-apps/cortex-autogen2/task_processor.py +488 -0
- package/helper-apps/cortex-autogen2/tools/__init__.py +24 -0
- package/helper-apps/cortex-autogen2/tools/azure_blob_tools.py +175 -0
- package/helper-apps/cortex-autogen2/tools/azure_foundry_agents.py +601 -0
- package/helper-apps/cortex-autogen2/tools/coding_tools.py +72 -0
- package/helper-apps/cortex-autogen2/tools/download_tools.py +48 -0
- package/helper-apps/cortex-autogen2/tools/file_tools.py +545 -0
- package/helper-apps/cortex-autogen2/tools/search_tools.py +646 -0
- package/helper-apps/cortex-azure-cleaner/README.md +36 -0
- package/helper-apps/cortex-file-converter/README.md +93 -0
- package/helper-apps/cortex-file-converter/key_to_pdf.py +104 -0
- package/helper-apps/cortex-file-converter/list_blob_extensions.py +89 -0
- package/helper-apps/cortex-file-converter/process_azure_keynotes.py +181 -0
- package/helper-apps/cortex-file-converter/requirements.txt +1 -0
- package/helper-apps/cortex-file-handler/.env.test.azure.ci +7 -0
- package/helper-apps/cortex-file-handler/.env.test.azure.sample +1 -1
- package/helper-apps/cortex-file-handler/.env.test.gcs.ci +10 -0
- package/helper-apps/cortex-file-handler/.env.test.gcs.sample +2 -2
- package/helper-apps/cortex-file-handler/INTERFACE.md +41 -0
- package/helper-apps/cortex-file-handler/package.json +1 -1
- package/helper-apps/cortex-file-handler/scripts/setup-azure-container.js +41 -17
- package/helper-apps/cortex-file-handler/scripts/setup-test-containers.js +30 -15
- package/helper-apps/cortex-file-handler/scripts/test-azure.sh +32 -6
- package/helper-apps/cortex-file-handler/scripts/test-gcs.sh +24 -2
- package/helper-apps/cortex-file-handler/scripts/validate-env.js +128 -0
- package/helper-apps/cortex-file-handler/src/blobHandler.js +161 -51
- package/helper-apps/cortex-file-handler/src/constants.js +3 -0
- package/helper-apps/cortex-file-handler/src/fileChunker.js +10 -8
- package/helper-apps/cortex-file-handler/src/index.js +116 -9
- package/helper-apps/cortex-file-handler/src/redis.js +61 -1
- package/helper-apps/cortex-file-handler/src/services/ConversionService.js +11 -8
- package/helper-apps/cortex-file-handler/src/services/FileConversionService.js +2 -2
- package/helper-apps/cortex-file-handler/src/services/storage/AzureStorageProvider.js +88 -6
- package/helper-apps/cortex-file-handler/src/services/storage/GCSStorageProvider.js +58 -0
- package/helper-apps/cortex-file-handler/src/services/storage/StorageFactory.js +25 -5
- package/helper-apps/cortex-file-handler/src/services/storage/StorageProvider.js +9 -0
- package/helper-apps/cortex-file-handler/src/services/storage/StorageService.js +120 -16
- package/helper-apps/cortex-file-handler/src/start.js +27 -17
- package/helper-apps/cortex-file-handler/tests/FileConversionService.test.js +52 -1
- package/helper-apps/cortex-file-handler/tests/blobHandler.test.js +40 -0
- package/helper-apps/cortex-file-handler/tests/checkHashShortLived.test.js +553 -0
- package/helper-apps/cortex-file-handler/tests/cleanup.test.js +46 -52
- package/helper-apps/cortex-file-handler/tests/containerConversionFlow.test.js +451 -0
- package/helper-apps/cortex-file-handler/tests/containerNameParsing.test.js +229 -0
- package/helper-apps/cortex-file-handler/tests/containerParameterFlow.test.js +392 -0
- package/helper-apps/cortex-file-handler/tests/conversionResilience.test.js +7 -2
- package/helper-apps/cortex-file-handler/tests/deleteOperations.test.js +348 -0
- package/helper-apps/cortex-file-handler/tests/fileChunker.test.js +23 -2
- package/helper-apps/cortex-file-handler/tests/fileUpload.test.js +11 -5
- package/helper-apps/cortex-file-handler/tests/getOperations.test.js +58 -24
- package/helper-apps/cortex-file-handler/tests/postOperations.test.js +11 -4
- package/helper-apps/cortex-file-handler/tests/shortLivedUrlConversion.test.js +225 -0
- package/helper-apps/cortex-file-handler/tests/start.test.js +8 -12
- package/helper-apps/cortex-file-handler/tests/storage/StorageFactory.test.js +80 -0
- package/helper-apps/cortex-file-handler/tests/storage/StorageService.test.js +388 -22
- package/helper-apps/cortex-file-handler/tests/testUtils.helper.js +74 -0
- package/lib/cortexResponse.js +153 -0
- package/lib/entityConstants.js +21 -3
- package/lib/logger.js +21 -4
- package/lib/pathwayTools.js +28 -9
- package/lib/util.js +49 -0
- package/package.json +1 -1
- package/pathways/basePathway.js +1 -0
- package/pathways/bing_afagent.js +54 -1
- package/pathways/call_tools.js +2 -3
- package/pathways/chat_jarvis.js +1 -1
- package/pathways/google_cse.js +27 -0
- package/pathways/grok_live_search.js +18 -0
- package/pathways/system/entity/memory/sys_memory_lookup_required.js +1 -0
- package/pathways/system/entity/memory/sys_memory_required.js +1 -0
- package/pathways/system/entity/memory/sys_search_memory.js +1 -0
- package/pathways/system/entity/sys_entity_agent.js +56 -4
- package/pathways/system/entity/sys_generator_quick.js +1 -0
- package/pathways/system/entity/tools/sys_tool_bing_search_afagent.js +26 -0
- package/pathways/system/entity/tools/sys_tool_google_search.js +141 -0
- package/pathways/system/entity/tools/sys_tool_grok_x_search.js +237 -0
- package/pathways/system/entity/tools/sys_tool_image.js +1 -1
- package/pathways/system/rest_streaming/sys_claude_37_sonnet.js +21 -0
- package/pathways/system/rest_streaming/sys_claude_41_opus.js +21 -0
- package/pathways/system/rest_streaming/sys_claude_4_sonnet.js +21 -0
- package/pathways/system/rest_streaming/sys_google_gemini_25_flash.js +25 -0
- package/pathways/system/rest_streaming/{sys_google_gemini_chat.js → sys_google_gemini_25_pro.js} +6 -4
- package/pathways/system/rest_streaming/sys_grok_4.js +23 -0
- package/pathways/system/rest_streaming/sys_grok_4_fast_non_reasoning.js +23 -0
- package/pathways/system/rest_streaming/sys_grok_4_fast_reasoning.js +23 -0
- package/pathways/system/rest_streaming/sys_openai_chat.js +3 -0
- package/pathways/system/rest_streaming/sys_openai_chat_gpt41.js +22 -0
- package/pathways/system/rest_streaming/sys_openai_chat_gpt41_mini.js +21 -0
- package/pathways/system/rest_streaming/sys_openai_chat_gpt41_nano.js +21 -0
- package/pathways/system/rest_streaming/{sys_claude_35_sonnet.js → sys_openai_chat_gpt4_omni.js} +6 -4
- package/pathways/system/rest_streaming/sys_openai_chat_gpt4_omni_mini.js +21 -0
- package/pathways/system/rest_streaming/{sys_claude_3_haiku.js → sys_openai_chat_gpt5.js} +7 -5
- package/pathways/system/rest_streaming/sys_openai_chat_gpt5_chat.js +21 -0
- package/pathways/system/rest_streaming/sys_openai_chat_gpt5_mini.js +21 -0
- package/pathways/system/rest_streaming/sys_openai_chat_gpt5_nano.js +21 -0
- package/pathways/system/rest_streaming/{sys_openai_chat_o1.js → sys_openai_chat_o3.js} +6 -3
- package/pathways/system/rest_streaming/sys_openai_chat_o3_mini.js +3 -0
- package/pathways/system/workspaces/run_workspace_prompt.js +99 -0
- package/pathways/vision.js +1 -1
- package/server/graphql.js +1 -1
- package/server/modelExecutor.js +8 -0
- package/server/pathwayResolver.js +166 -16
- package/server/pathwayResponseParser.js +16 -8
- package/server/plugins/azureFoundryAgentsPlugin.js +1 -1
- package/server/plugins/claude3VertexPlugin.js +193 -45
- package/server/plugins/gemini15ChatPlugin.js +21 -0
- package/server/plugins/gemini15VisionPlugin.js +360 -0
- package/server/plugins/googleCsePlugin.js +94 -0
- package/server/plugins/grokVisionPlugin.js +365 -0
- package/server/plugins/modelPlugin.js +3 -1
- package/server/plugins/openAiChatPlugin.js +106 -13
- package/server/plugins/openAiVisionPlugin.js +42 -30
- package/server/resolver.js +28 -4
- package/server/rest.js +270 -53
- package/server/typeDef.js +1 -0
- package/tests/{mocks.js → helpers/mocks.js} +5 -2
- package/tests/{server.js → helpers/server.js} +2 -2
- package/tests/helpers/sseAssert.js +23 -0
- package/tests/helpers/sseClient.js +73 -0
- package/tests/helpers/subscriptionAssert.js +11 -0
- package/tests/helpers/subscriptions.js +113 -0
- package/tests/{sublong.srt → integration/features/translate/sublong.srt} +4543 -4543
- package/tests/integration/features/translate/translate_chunking_stream.test.js +100 -0
- package/tests/{translate_srt.test.js → integration/features/translate/translate_srt.test.js} +2 -2
- package/tests/integration/graphql/async/stream/agentic.test.js +477 -0
- package/tests/integration/graphql/async/stream/subscription_streaming.test.js +62 -0
- package/tests/integration/graphql/async/stream/sys_entity_start_streaming.test.js +71 -0
- package/tests/integration/graphql/async/stream/vendors/claude_streaming.test.js +56 -0
- package/tests/integration/graphql/async/stream/vendors/gemini_streaming.test.js +66 -0
- package/tests/integration/graphql/async/stream/vendors/grok_streaming.test.js +56 -0
- package/tests/integration/graphql/async/stream/vendors/openai_streaming.test.js +72 -0
- package/tests/integration/graphql/features/google/sysToolGoogleSearch.test.js +96 -0
- package/tests/integration/graphql/features/grok/grok.test.js +688 -0
- package/tests/integration/graphql/features/grok/grok_x_search_tool.test.js +354 -0
- package/tests/{main.test.js → integration/graphql/features/main.test.js} +1 -1
- package/tests/{call_tools.test.js → integration/graphql/features/tools/call_tools.test.js} +2 -2
- package/tests/{vision.test.js → integration/graphql/features/vision/vision.test.js} +1 -1
- package/tests/integration/graphql/subscriptions/connection.test.js +26 -0
- package/tests/{openai_api.test.js → integration/rest/oai/openai_api.test.js} +63 -238
- package/tests/integration/rest/oai/tool_calling_api.test.js +343 -0
- package/tests/integration/rest/oai/tool_calling_streaming.test.js +85 -0
- package/tests/integration/rest/vendors/claude_streaming.test.js +47 -0
- package/tests/integration/rest/vendors/claude_tool_calling_streaming.test.js +75 -0
- package/tests/integration/rest/vendors/gemini_streaming.test.js +47 -0
- package/tests/integration/rest/vendors/gemini_tool_calling_streaming.test.js +75 -0
- package/tests/integration/rest/vendors/grok_streaming.test.js +55 -0
- package/tests/integration/rest/vendors/grok_tool_calling_streaming.test.js +75 -0
- package/tests/{azureAuthTokenHelper.test.js → unit/core/azureAuthTokenHelper.test.js} +1 -1
- package/tests/{chunkfunction.test.js → unit/core/chunkfunction.test.js} +2 -2
- package/tests/{config.test.js → unit/core/config.test.js} +3 -3
- package/tests/{encodeCache.test.js → unit/core/encodeCache.test.js} +1 -1
- package/tests/{fastLruCache.test.js → unit/core/fastLruCache.test.js} +1 -1
- package/tests/{handleBars.test.js → unit/core/handleBars.test.js} +1 -1
- package/tests/{memoryfunction.test.js → unit/core/memoryfunction.test.js} +2 -2
- package/tests/unit/core/mergeResolver.test.js +952 -0
- package/tests/{parser.test.js → unit/core/parser.test.js} +3 -3
- package/tests/unit/core/pathwayResolver.test.js +187 -0
- package/tests/{requestMonitor.test.js → unit/core/requestMonitor.test.js} +1 -1
- package/tests/{requestMonitorDurationEstimator.test.js → unit/core/requestMonitorDurationEstimator.test.js} +1 -1
- package/tests/{truncateMessages.test.js → unit/core/truncateMessages.test.js} +3 -3
- package/tests/{util.test.js → unit/core/util.test.js} +1 -1
- package/tests/{apptekTranslatePlugin.test.js → unit/plugins/apptekTranslatePlugin.test.js} +3 -3
- package/tests/{azureFoundryAgents.test.js → unit/plugins/azureFoundryAgents.test.js} +136 -1
- package/tests/{claude3VertexPlugin.test.js → unit/plugins/claude3VertexPlugin.test.js} +32 -10
- package/tests/{claude3VertexToolConversion.test.js → unit/plugins/claude3VertexToolConversion.test.js} +3 -3
- package/tests/unit/plugins/googleCsePlugin.test.js +111 -0
- package/tests/unit/plugins/grokVisionPlugin.test.js +1392 -0
- package/tests/{modelPlugin.test.js → unit/plugins/modelPlugin.test.js} +3 -3
- package/tests/{multimodal_conversion.test.js → unit/plugins/multimodal_conversion.test.js} +4 -4
- package/tests/{openAiChatPlugin.test.js → unit/plugins/openAiChatPlugin.test.js} +13 -4
- package/tests/{openAiToolPlugin.test.js → unit/plugins/openAiToolPlugin.test.js} +35 -27
- package/tests/{tokenHandlingTests.test.js → unit/plugins/tokenHandlingTests.test.js} +5 -5
- package/tests/{translate_apptek.test.js → unit/plugins/translate_apptek.test.js} +3 -3
- package/tests/{streaming.test.js → unit/plugins.streaming/plugin_stream_events.test.js} +19 -58
- package/helper-apps/mogrt-handler/tests/test-files/test.gif +0 -1
- package/helper-apps/mogrt-handler/tests/test-files/test.mogrt +0 -1
- package/helper-apps/mogrt-handler/tests/test-files/test.mp4 +0 -1
- package/pathways/system/rest_streaming/sys_openai_chat_gpt4.js +0 -19
- package/pathways/system/rest_streaming/sys_openai_chat_gpt4_32.js +0 -19
- package/pathways/system/rest_streaming/sys_openai_chat_gpt4_turbo.js +0 -19
- package/pathways/system/workspaces/run_claude35_sonnet.js +0 -21
- package/pathways/system/workspaces/run_claude3_haiku.js +0 -20
- package/pathways/system/workspaces/run_gpt35turbo.js +0 -20
- package/pathways/system/workspaces/run_gpt4.js +0 -20
- package/pathways/system/workspaces/run_gpt4_32.js +0 -20
- package/tests/agentic.test.js +0 -256
- package/tests/pathwayResolver.test.js +0 -78
- package/tests/subscription.test.js +0 -387
- /package/tests/{subchunk.srt → integration/features/translate/subchunk.srt} +0 -0
- /package/tests/{subhorizontal.srt → integration/features/translate/subhorizontal.srt} +0 -0
|
@@ -0,0 +1,488 @@
|
|
|
1
|
+
import asyncio
|
|
2
|
+
import json
|
|
3
|
+
import base64
|
|
4
|
+
import logging
|
|
5
|
+
import os
|
|
6
|
+
from typing import Optional, Dict, Any, List
|
|
7
|
+
from autogen_ext.models.openai import OpenAIChatCompletionClient
|
|
8
|
+
from autogen_core.models import ModelInfo # Import ModelInfo
|
|
9
|
+
from autogen_agentchat.teams import SelectorGroupChat
|
|
10
|
+
from autogen_core.models import UserMessage
|
|
11
|
+
from autogen_agentchat.conditions import TextMentionTermination, HandoffTermination
|
|
12
|
+
from services.azure_queue import get_queue_service
|
|
13
|
+
from services.redis_publisher import get_redis_publisher
|
|
14
|
+
from agents import get_agents
|
|
15
|
+
from tools.azure_blob_tools import upload_file_to_azure_blob
|
|
16
|
+
|
|
17
|
+
logger = logging.getLogger(__name__)
|
|
18
|
+
|
|
19
|
+
|
|
20
|
+
class TaskProcessor:
|
|
21
|
+
"""
|
|
22
|
+
Core task processing logic that can be used by both worker and Azure Function App.
|
|
23
|
+
"""
|
|
24
|
+
|
|
25
|
+
def __init__(self):
|
|
26
|
+
self.o3_model_client = None
|
|
27
|
+
self.o4_mini_model_client = None
|
|
28
|
+
self.gpt41_model_client = None
|
|
29
|
+
self.progress_tracker = None
|
|
30
|
+
self.final_progress_sent = False
|
|
31
|
+
|
|
32
|
+
async def initialize(self):
|
|
33
|
+
"""Initialize model clients and services."""
|
|
34
|
+
import os
|
|
35
|
+
from dotenv import load_dotenv
|
|
36
|
+
load_dotenv()
|
|
37
|
+
|
|
38
|
+
CORTEX_API_KEY = os.getenv("CORTEX_API_KEY")
|
|
39
|
+
CORTEX_API_BASE_URL = os.getenv("CORTEX_API_BASE_URL", "http://host.docker.internal:4000/v1")
|
|
40
|
+
|
|
41
|
+
# Define ModelInfo for custom models
|
|
42
|
+
o3_model_info = ModelInfo(model="o3", name="Cortex o3", max_tokens=8192, cost_per_token=0.0, vision=False, function_calling=True, json_output=False, family="openai", structured_output=False) # Placeholder cost
|
|
43
|
+
o4_mini_model_info = ModelInfo(model="o4-mini", name="Cortex o4-mini", max_tokens=128000, cost_per_token=0.0, vision=False, function_calling=True, json_output=False, family="openai", structured_output=False) # Placeholder cost
|
|
44
|
+
gpt41_model_info = ModelInfo(model="gpt-4.1", name="Cortex gpt-4.1", max_tokens=8192, cost_per_token=0.0, vision=False, function_calling=True, json_output=False, family="openai", structured_output=False) # Placeholder cost
|
|
45
|
+
|
|
46
|
+
self.o3_model_client = OpenAIChatCompletionClient(
|
|
47
|
+
model="o3",
|
|
48
|
+
api_key=CORTEX_API_KEY,
|
|
49
|
+
base_url=CORTEX_API_BASE_URL,
|
|
50
|
+
timeout=600,
|
|
51
|
+
model_info=o3_model_info # Pass model_info
|
|
52
|
+
)
|
|
53
|
+
|
|
54
|
+
self.o4_mini_model_client = OpenAIChatCompletionClient(
|
|
55
|
+
model="o4-mini",
|
|
56
|
+
api_key=CORTEX_API_KEY,
|
|
57
|
+
base_url=CORTEX_API_BASE_URL,
|
|
58
|
+
timeout=600,
|
|
59
|
+
model_info=o4_mini_model_info # Pass model_info
|
|
60
|
+
)
|
|
61
|
+
|
|
62
|
+
self.gpt41_model_client = OpenAIChatCompletionClient(
|
|
63
|
+
model="gpt-4.1",
|
|
64
|
+
api_key=CORTEX_API_KEY,
|
|
65
|
+
base_url=CORTEX_API_BASE_URL,
|
|
66
|
+
timeout=600,
|
|
67
|
+
model_info=gpt41_model_info # Pass model_info
|
|
68
|
+
)
|
|
69
|
+
|
|
70
|
+
self.progress_tracker = await get_redis_publisher()
|
|
71
|
+
|
|
72
|
+
async def summarize_progress(self, content: str, message_type: str = None, source: str = None) -> str:
|
|
73
|
+
"""Summarize progress content for display with intelligent filtering."""
|
|
74
|
+
try:
|
|
75
|
+
# Filter out technical/internal messages that shouldn't be shown to users
|
|
76
|
+
if self._should_skip_progress_update(content, message_type, source):
|
|
77
|
+
return None
|
|
78
|
+
|
|
79
|
+
# Clean and prepare content for summarization
|
|
80
|
+
cleaned_content = self._clean_content_for_progress(content, message_type, source)
|
|
81
|
+
if not cleaned_content:
|
|
82
|
+
return None
|
|
83
|
+
|
|
84
|
+
prompt = f"""Generate a concise, engaging, and user-friendly progress update (5-15 words) that clearly indicates what the AI is currently working on. Include an appropriate emoji.
|
|
85
|
+
|
|
86
|
+
Context: This is for a user-facing progress indicator in a React app.
|
|
87
|
+
|
|
88
|
+
Current Activity: {cleaned_content}
|
|
89
|
+
Agent Source: {source if source else "Unknown"}
|
|
90
|
+
|
|
91
|
+
Requirements:
|
|
92
|
+
- Be positive and professional
|
|
93
|
+
- Focus on what the user will benefit from
|
|
94
|
+
- Avoid technical jargon
|
|
95
|
+
- Use engaging, action-oriented language
|
|
96
|
+
- Include a relevant emoji
|
|
97
|
+
- Consider the agent source to provide context (e.g., coder_agent = coding, presenter_agent = creating presentation)
|
|
98
|
+
|
|
99
|
+
Examples of good updates:
|
|
100
|
+
- "🔍 Researching the latest trends"
|
|
101
|
+
- "📊 Analyzing data patterns"
|
|
102
|
+
- "🎨 Creating visual content"
|
|
103
|
+
- "📝 Compiling your report"
|
|
104
|
+
- "🚀 Finalizing results"
|
|
105
|
+
- "💻 Writing code for your request"
|
|
106
|
+
- "☁️ Uploading files to cloud storage"
|
|
107
|
+
|
|
108
|
+
Bad examples (avoid):
|
|
109
|
+
- "Task terminated"
|
|
110
|
+
- "Processing internal data"
|
|
111
|
+
- "Executing tool calls"
|
|
112
|
+
- "TERMINATE"
|
|
113
|
+
|
|
114
|
+
Generate only the progress update:"""
|
|
115
|
+
|
|
116
|
+
messages = [UserMessage(content=str(prompt), source="summarize_progress_function")]
|
|
117
|
+
|
|
118
|
+
response = await self.gpt41_model_client.create(messages=messages)
|
|
119
|
+
return response.content.strip()
|
|
120
|
+
except Exception as e:
|
|
121
|
+
logging.error(f"Error in summarize_progress: {e}")
|
|
122
|
+
return None
|
|
123
|
+
|
|
124
|
+
def _should_skip_progress_update(self, content: str, message_type: str = None, source: str = None) -> bool:
|
|
125
|
+
"""Determine if a progress update should be skipped."""
|
|
126
|
+
if not content:
|
|
127
|
+
return True
|
|
128
|
+
|
|
129
|
+
content_str = str(content).strip().upper()
|
|
130
|
+
|
|
131
|
+
# Skip termination messages
|
|
132
|
+
if content_str == "TERMINATE" or "TERMINATE" in content_str:
|
|
133
|
+
return True
|
|
134
|
+
|
|
135
|
+
# Skip empty or whitespace-only content
|
|
136
|
+
if not content_str or content_str.isspace():
|
|
137
|
+
return True
|
|
138
|
+
|
|
139
|
+
# Skip technical tool execution messages
|
|
140
|
+
if message_type == "ToolCallExecutionEvent":
|
|
141
|
+
return True
|
|
142
|
+
|
|
143
|
+
# Skip messages from terminator agent
|
|
144
|
+
if source == "terminator_agent":
|
|
145
|
+
return True
|
|
146
|
+
|
|
147
|
+
# Skip JSON responses that are just data
|
|
148
|
+
try:
|
|
149
|
+
json.loads(content_str)
|
|
150
|
+
# If it's valid JSON, it's probably technical data
|
|
151
|
+
return True
|
|
152
|
+
except:
|
|
153
|
+
pass
|
|
154
|
+
|
|
155
|
+
return False
|
|
156
|
+
|
|
157
|
+
def _clean_content_for_progress(self, content: str, message_type: str = None, source: str = None) -> str:
|
|
158
|
+
"""Clean and prepare content for progress summarization."""
|
|
159
|
+
if not content:
|
|
160
|
+
return None
|
|
161
|
+
|
|
162
|
+
content_str = str(content)
|
|
163
|
+
|
|
164
|
+
# Remove common technical prefixes/suffixes
|
|
165
|
+
technical_patterns = [
|
|
166
|
+
"TERMINATE",
|
|
167
|
+
"TASK NOT COMPLETED:",
|
|
168
|
+
"Error:",
|
|
169
|
+
"Warning:",
|
|
170
|
+
"DEBUG:",
|
|
171
|
+
"INFO:",
|
|
172
|
+
"Tool call:",
|
|
173
|
+
"Function call:",
|
|
174
|
+
]
|
|
175
|
+
|
|
176
|
+
cleaned = content_str
|
|
177
|
+
for pattern in technical_patterns:
|
|
178
|
+
cleaned = cleaned.replace(pattern, "").strip()
|
|
179
|
+
|
|
180
|
+
# If content is too short after cleaning, skip it
|
|
181
|
+
if len(cleaned) < 10:
|
|
182
|
+
return None
|
|
183
|
+
|
|
184
|
+
return cleaned
|
|
185
|
+
|
|
186
|
+
async def handle_progress_update(self, task_id: str, percentage: float, content: str, message_type: str = None, source: str = None):
|
|
187
|
+
"""Handle progress updates with intelligent summarization."""
|
|
188
|
+
summarized_content = await self.summarize_progress(content, message_type, source)
|
|
189
|
+
|
|
190
|
+
# Only publish if we have meaningful content
|
|
191
|
+
if summarized_content:
|
|
192
|
+
await self.progress_tracker.publish_progress(task_id, percentage, summarized_content)
|
|
193
|
+
|
|
194
|
+
async def publish_final(self, task_id: str, message: str, data: Any = None) -> None:
|
|
195
|
+
"""Publish a final 1.0 progress message once."""
|
|
196
|
+
if self.final_progress_sent:
|
|
197
|
+
return
|
|
198
|
+
try:
|
|
199
|
+
if self.progress_tracker:
|
|
200
|
+
final_data = message if data is None else data
|
|
201
|
+
await self.progress_tracker.publish_progress(task_id, 1.0, message, data=final_data)
|
|
202
|
+
self.final_progress_sent = True
|
|
203
|
+
except Exception as e:
|
|
204
|
+
logger.error(f"❌ Failed to publish final progress for task_id={task_id}: {e}")
|
|
205
|
+
|
|
206
|
+
async def process_task(self, task_id: str, task_content: str) -> str:
|
|
207
|
+
"""Process a single task and return the final result."""
|
|
208
|
+
try:
|
|
209
|
+
task_completed_percentage = 0.05
|
|
210
|
+
task = task_content
|
|
211
|
+
|
|
212
|
+
# Send initial progress update
|
|
213
|
+
await self.progress_tracker.publish_progress(task_id, 0.05, "🚀 Starting your task...")
|
|
214
|
+
|
|
215
|
+
termination = HandoffTermination(target="user") | TextMentionTermination("TERMINATE")
|
|
216
|
+
|
|
217
|
+
agents, presenter_agent = await get_agents(
|
|
218
|
+
self.gpt41_model_client,
|
|
219
|
+
self.o3_model_client,
|
|
220
|
+
self.gpt41_model_client
|
|
221
|
+
)
|
|
222
|
+
|
|
223
|
+
team = SelectorGroupChat(
|
|
224
|
+
participants=agents,
|
|
225
|
+
model_client=self.gpt41_model_client,
|
|
226
|
+
termination_condition=termination,
|
|
227
|
+
max_turns=10000
|
|
228
|
+
)
|
|
229
|
+
|
|
230
|
+
messages = []
|
|
231
|
+
uploaded_file_urls = {}
|
|
232
|
+
final_result_content = []
|
|
233
|
+
|
|
234
|
+
detailed_task = f"""
|
|
235
|
+
Accomplish and present your task to the user in a great way, Markdown, it ll be shown in a React app that supports markdown.
|
|
236
|
+
Task:
|
|
237
|
+
{task}
|
|
238
|
+
"""
|
|
239
|
+
|
|
240
|
+
stream = team.run_stream(task=task)
|
|
241
|
+
async for message in stream:
|
|
242
|
+
messages.append(message)
|
|
243
|
+
source = message.source if hasattr(message, 'source') else None
|
|
244
|
+
content = message.content if hasattr(message, 'content') else None
|
|
245
|
+
created_at = message.created_at if hasattr(message, 'created_at') else None
|
|
246
|
+
logger.info(f"\n\n#SOURCE: {source}\n#CONTENT: {content}\n#CREATED_AT: {created_at}\n")
|
|
247
|
+
|
|
248
|
+
task_completed_percentage += 0.01
|
|
249
|
+
if task_completed_percentage >= 1.0:
|
|
250
|
+
task_completed_percentage = 0.99
|
|
251
|
+
|
|
252
|
+
if content:
|
|
253
|
+
processed_content_for_progress = content
|
|
254
|
+
if message.type == "ToolCallExecutionEvent" and hasattr(message, 'content') and isinstance(message.content, list):
|
|
255
|
+
error_contents = [res.content for res in message.content if hasattr(res, 'is_error') and res.is_error]
|
|
256
|
+
if error_contents:
|
|
257
|
+
processed_content_for_progress = "\n".join(error_contents)
|
|
258
|
+
else:
|
|
259
|
+
processed_content_for_progress = str(message.content)
|
|
260
|
+
|
|
261
|
+
if isinstance(content, str):
|
|
262
|
+
try:
|
|
263
|
+
json_content = json.loads(content)
|
|
264
|
+
if isinstance(json_content, dict):
|
|
265
|
+
if "download_url" in json_content and "blob_name" in json_content:
|
|
266
|
+
uploaded_file_urls[json_content["blob_name"]] = json_content["download_url"]
|
|
267
|
+
final_result_content.append(f"Uploaded file: [{json_content['blob_name']}]({json_content['download_url']})")
|
|
268
|
+
elif isinstance(json_content, list):
|
|
269
|
+
for item in json_content:
|
|
270
|
+
if isinstance(item, dict) and "download_url" in item and "blob_name" in item:
|
|
271
|
+
uploaded_file_urls[item["blob_name"]] = item["download_url"]
|
|
272
|
+
final_result_content.append(f"Uploaded file: [{item['blob_name']}]({item['download_url']})")
|
|
273
|
+
# otherwise, ignore scalars like numbers/strings
|
|
274
|
+
except json.JSONDecodeError:
|
|
275
|
+
pass
|
|
276
|
+
|
|
277
|
+
final_result_content.append(str(content))
|
|
278
|
+
asyncio.create_task(self.handle_progress_update(task_id, task_completed_percentage, processed_content_for_progress, message.type, source))
|
|
279
|
+
|
|
280
|
+
await self.progress_tracker.publish_progress(task_id, 0.95, "✨ Finalizing your results...")
|
|
281
|
+
|
|
282
|
+
# Targeted auto-upload: if no URLs yet, opportunistically upload recent deliverables created in this run.
|
|
283
|
+
# Fast, non-recursive, and limited to known dirs and extensions.
|
|
284
|
+
try:
|
|
285
|
+
if not uploaded_file_urls:
|
|
286
|
+
import time
|
|
287
|
+
now = time.time()
|
|
288
|
+
max_age_seconds = 15 * 60 # last 15 minutes
|
|
289
|
+
deliverable_exts = {".pptx", ".ppt", ".csv", ".png", ".jpg", ".jpeg", ".pdf"}
|
|
290
|
+
candidate_dirs: List[str] = []
|
|
291
|
+
try:
|
|
292
|
+
wd = os.getenv("CORTEX_WORK_DIR", "/tmp/coding")
|
|
293
|
+
# In Azure Functions, prefer /tmp for write access
|
|
294
|
+
if os.getenv("WEBSITE_INSTANCE_ID") and wd.startswith("/app/"):
|
|
295
|
+
wd = "/tmp/coding"
|
|
296
|
+
candidate_dirs.append(wd)
|
|
297
|
+
except Exception:
|
|
298
|
+
pass
|
|
299
|
+
candidate_dirs.append("/tmp/coding")
|
|
300
|
+
|
|
301
|
+
recent_files: List[str] = []
|
|
302
|
+
for d in candidate_dirs:
|
|
303
|
+
if not d:
|
|
304
|
+
continue
|
|
305
|
+
# Ensure directory exists if possible
|
|
306
|
+
try:
|
|
307
|
+
os.makedirs(d, exist_ok=True)
|
|
308
|
+
except Exception:
|
|
309
|
+
pass
|
|
310
|
+
if not os.path.isdir(d):
|
|
311
|
+
continue
|
|
312
|
+
try:
|
|
313
|
+
for name in os.listdir(d):
|
|
314
|
+
fp = os.path.join(d, name)
|
|
315
|
+
if not os.path.isfile(fp):
|
|
316
|
+
continue
|
|
317
|
+
_, ext = os.path.splitext(name)
|
|
318
|
+
if ext.lower() not in deliverable_exts:
|
|
319
|
+
continue
|
|
320
|
+
try:
|
|
321
|
+
mtime = os.path.getmtime(fp)
|
|
322
|
+
if now - mtime <= max_age_seconds:
|
|
323
|
+
recent_files.append(fp)
|
|
324
|
+
except Exception:
|
|
325
|
+
continue
|
|
326
|
+
except Exception:
|
|
327
|
+
continue
|
|
328
|
+
|
|
329
|
+
# Sort newest first and cap to a few uploads to keep fast
|
|
330
|
+
recent_files.sort(key=lambda p: os.path.getmtime(p), reverse=True)
|
|
331
|
+
recent_files = recent_files[:5]
|
|
332
|
+
|
|
333
|
+
for fp in recent_files:
|
|
334
|
+
try:
|
|
335
|
+
up_json = upload_file_to_azure_blob(fp, blob_name=None)
|
|
336
|
+
up = json.loads(up_json)
|
|
337
|
+
if "download_url" in up and "blob_name" in up:
|
|
338
|
+
uploaded_file_urls[up["blob_name"]] = up["download_url"]
|
|
339
|
+
final_result_content.append(f"Uploaded file: [{up['blob_name']}]({up['download_url']})")
|
|
340
|
+
except Exception:
|
|
341
|
+
continue
|
|
342
|
+
except Exception:
|
|
343
|
+
pass
|
|
344
|
+
|
|
345
|
+
result_limited_to_fit = "\n".join(final_result_content)
|
|
346
|
+
|
|
347
|
+
presenter_task = f"""
|
|
348
|
+
Present the task result in a great way, Markdown, it'll be shown in a React app that supports markdown that doesn't have access to your local files.
|
|
349
|
+
Make sure to use all the info you have, do not miss any info.
|
|
350
|
+
Make sure to have images, videos, etc. users love them.
|
|
351
|
+
UI must be professional that is really important.
|
|
352
|
+
|
|
353
|
+
TASK:
|
|
354
|
+
|
|
355
|
+
{task}
|
|
356
|
+
|
|
357
|
+
RAW_AGENT_COMMUNICATIONS:
|
|
358
|
+
|
|
359
|
+
{result_limited_to_fit}
|
|
360
|
+
|
|
361
|
+
UPLOADED_FILES_SAS_URLS:
|
|
362
|
+
|
|
363
|
+
{json.dumps(uploaded_file_urls, indent=2)}
|
|
364
|
+
|
|
365
|
+
**CRITICAL INSTRUCTION: Analyze the RAW_AGENT_COMMUNICATIONS above. Your ONLY goal is to extract and present the final, user-facing result requested in the TASK. Absolutely DO NOT include any code, internal agent thought processes, tool calls, technical logs, or descriptions of how the task was accomplished. Focus solely on delivering the ANSWER to the user's original request in a clear, professional, and visually appealing Markdown format. If the task was to create a file, you MUST ONLY use download URLs found in UPLOADED_FILES_SAS_URLS. DO NOT fabricate, guess, or link to any external or placeholder URLs. If no uploaded URLs exist, say so and present the results without a download link. Remove all extraneous information.**
|
|
366
|
+
"""
|
|
367
|
+
|
|
368
|
+
presenter_stream = presenter_agent.run_stream(task=presenter_task)
|
|
369
|
+
presenter_messages = []
|
|
370
|
+
async for message in presenter_stream:
|
|
371
|
+
logger.info(f"#PRESENTER MESSAGE: {message.content if hasattr(message, 'content') else ''}")
|
|
372
|
+
presenter_messages.append(message)
|
|
373
|
+
|
|
374
|
+
task_result = presenter_messages[-1]
|
|
375
|
+
last_message = task_result.messages[-1]
|
|
376
|
+
text_result = last_message.content if hasattr(last_message, 'content') else None
|
|
377
|
+
|
|
378
|
+
# Safety check: if presenter fabricated an external link while uploaded_file_urls is empty, replace with explicit notice
|
|
379
|
+
try:
|
|
380
|
+
if not uploaded_file_urls and isinstance(text_result, str):
|
|
381
|
+
# naive pattern for http links
|
|
382
|
+
import re
|
|
383
|
+
if re.search(r"https?://", text_result):
|
|
384
|
+
logger.warning("Presenter output contains a link but no uploaded URLs exist. Rewriting to prevent hallucinated links.")
|
|
385
|
+
text_result = re.sub(r"\(https?://[^)]+\)", "(Download not available)", text_result)
|
|
386
|
+
except Exception:
|
|
387
|
+
pass
|
|
388
|
+
|
|
389
|
+
logger.info(f"🔍 TASK RESULT:\n{text_result}")
|
|
390
|
+
final_data = text_result or "🎉 Your task is complete!"
|
|
391
|
+
await self.progress_tracker.publish_progress(task_id, 1.0, "🎉 Your task is complete!", data=final_data)
|
|
392
|
+
self.final_progress_sent = True
|
|
393
|
+
|
|
394
|
+
return text_result
|
|
395
|
+
except Exception as e:
|
|
396
|
+
logger.error(f"❌ Error during process_task for {task_id}: {e}", exc_info=True)
|
|
397
|
+
await self.publish_final(task_id, "❌ We hit an issue while working on your request. Processing has ended.")
|
|
398
|
+
raise
|
|
399
|
+
|
|
400
|
+
async def close(self):
|
|
401
|
+
"""Close all connections gracefully."""
|
|
402
|
+
clients_to_close = [
|
|
403
|
+
self.o3_model_client,
|
|
404
|
+
self.o4_mini_model_client,
|
|
405
|
+
self.gpt41_model_client
|
|
406
|
+
]
|
|
407
|
+
|
|
408
|
+
for client in clients_to_close:
|
|
409
|
+
model_name = "unknown_model"
|
|
410
|
+
try:
|
|
411
|
+
if hasattr(client, 'model'):
|
|
412
|
+
model_name = client.model
|
|
413
|
+
elif hasattr(client, '__class__'):
|
|
414
|
+
model_name = client.__class__.__name__
|
|
415
|
+
except Exception:
|
|
416
|
+
pass
|
|
417
|
+
|
|
418
|
+
try:
|
|
419
|
+
logger.info(f"🔌 Attempting to close client session for {model_name}.")
|
|
420
|
+
if client:
|
|
421
|
+
await client.close()
|
|
422
|
+
logger.info(f"🔌 Successfully closed client session for {model_name}.")
|
|
423
|
+
except Exception as e:
|
|
424
|
+
logger.error(f"❌ Error closing client session for {model_name}: {e}")
|
|
425
|
+
|
|
426
|
+
if self.progress_tracker:
|
|
427
|
+
await self.progress_tracker.close()
|
|
428
|
+
logger.info("🔌 Connections closed.")
|
|
429
|
+
|
|
430
|
+
|
|
431
|
+
async def process_queue_message(message_data: Dict[str, Any]) -> Optional[str]:
|
|
432
|
+
"""
|
|
433
|
+
Process a single queue message and return the result.
|
|
434
|
+
This is the main entry point for Azure Function App.
|
|
435
|
+
"""
|
|
436
|
+
processor = TaskProcessor()
|
|
437
|
+
try:
|
|
438
|
+
task_id = message_data.get("id")
|
|
439
|
+
await processor.initialize()
|
|
440
|
+
|
|
441
|
+
raw_content = message_data.get("content") or message_data.get("message")
|
|
442
|
+
|
|
443
|
+
if not raw_content:
|
|
444
|
+
logger.error(f"❌ Message has no content: {message_data}")
|
|
445
|
+
# Ensure terminal progress on empty content
|
|
446
|
+
await processor.publish_final(task_id or "", "⚠️ Received an empty task. Processing has ended.")
|
|
447
|
+
return None
|
|
448
|
+
|
|
449
|
+
logger.debug(f"🔍 DEBUG: process_queue_message - Raw content received (first 100 chars): {raw_content[:100]}...")
|
|
450
|
+
|
|
451
|
+
try:
|
|
452
|
+
decoded_content = base64.b64decode(raw_content).decode('utf-8')
|
|
453
|
+
task_data = json.loads(decoded_content)
|
|
454
|
+
logger.debug(f"🔍 DEBUG: process_queue_message - Successfully base64 decoded and JSON parsed. Keys: {list(task_data.keys())}")
|
|
455
|
+
except (json.JSONDecodeError, TypeError, ValueError) as e:
|
|
456
|
+
logger.warning(f"⚠️ Failed to decode as base64, trying as raw JSON: {e}")
|
|
457
|
+
try:
|
|
458
|
+
task_data = json.loads(raw_content)
|
|
459
|
+
logger.debug(f"🔍 DEBUG: process_queue_message - Successfully JSON parsed raw content. Keys: {list(task_data.keys())}")
|
|
460
|
+
except json.JSONDecodeError as e2:
|
|
461
|
+
logger.error(f"❌ Failed to parse message content as JSON after both attempts for message ID {task_id}: {e2}", exc_info=True)
|
|
462
|
+
await processor.publish_final(task_id or "", "❌ Invalid task format received. Processing has ended.")
|
|
463
|
+
return None
|
|
464
|
+
|
|
465
|
+
task_content = task_data.get("message") or task_data.get("content")
|
|
466
|
+
if not task_content:
|
|
467
|
+
logger.error(f"❌ No valid task content (key 'message' or 'content') found in parsed data for message ID {task_id}: {task_data}")
|
|
468
|
+
await processor.publish_final(task_id or "", "⚠️ No actionable task content found. Processing has ended.")
|
|
469
|
+
return None
|
|
470
|
+
|
|
471
|
+
logger.debug(f"🔍 DEBUG: process_queue_message - Extracted task_content (first 100 chars): {task_content[:100]}...")
|
|
472
|
+
logger.info(f"📩 Processing task: {task_content[:100]}...")
|
|
473
|
+
|
|
474
|
+
result = await processor.process_task(task_id, task_content)
|
|
475
|
+
return result
|
|
476
|
+
|
|
477
|
+
except Exception as e:
|
|
478
|
+
logger.error(f"❌ Error processing task: {e}", exc_info=True)
|
|
479
|
+
# Try to ensure a final progress is published even if initialization or processing failed
|
|
480
|
+
try:
|
|
481
|
+
if processor.progress_tracker is None:
|
|
482
|
+
processor.progress_tracker = await get_redis_publisher()
|
|
483
|
+
await processor.publish_final(message_data.get("id") or "", "❌ Task ended due to an unexpected error.")
|
|
484
|
+
except Exception as publish_error:
|
|
485
|
+
logger.error(f"❌ Failed to publish final error progress in exception handler: {publish_error}")
|
|
486
|
+
raise
|
|
487
|
+
finally:
|
|
488
|
+
await processor.close()
|
|
@@ -0,0 +1,24 @@
|
|
|
1
|
+
"""
|
|
2
|
+
Tools package for Cortex-AutoGen2
|
|
3
|
+
Contains various tool modules for agent capabilities.
|
|
4
|
+
"""
|
|
5
|
+
|
|
6
|
+
from .search_tools import web_search, image_search, combined_search, fetch_webpage, collect_task_images
|
|
7
|
+
from .coding_tools import execute_code
|
|
8
|
+
from .azure_blob_tools import upload_file_to_azure_blob
|
|
9
|
+
from .file_tools import list_files_in_work_dir, read_file_from_work_dir, get_file_info, create_file, download_image
|
|
10
|
+
|
|
11
|
+
__all__ = [
|
|
12
|
+
"web_search",
|
|
13
|
+
"image_search",
|
|
14
|
+
"combined_search",
|
|
15
|
+
"fetch_webpage",
|
|
16
|
+
"collect_task_images",
|
|
17
|
+
"execute_code",
|
|
18
|
+
"upload_file_to_azure_blob",
|
|
19
|
+
"list_files_in_work_dir",
|
|
20
|
+
"read_file_from_work_dir",
|
|
21
|
+
"get_file_info",
|
|
22
|
+
"create_file",
|
|
23
|
+
"download_image",
|
|
24
|
+
]
|