camel-ai 0.2.73a4__py3-none-any.whl → 0.2.80a2__py3-none-any.whl
This diff represents the content of publicly available package versions that have been released to one of the supported registries. The information contained in this diff is provided for informational purposes only and reflects changes between package versions as they appear in their respective public registries.
- camel/__init__.py +1 -1
- camel/agents/_utils.py +38 -0
- camel/agents/chat_agent.py +2217 -519
- camel/agents/mcp_agent.py +30 -27
- camel/configs/__init__.py +15 -0
- camel/configs/aihubmix_config.py +88 -0
- camel/configs/amd_config.py +70 -0
- camel/configs/cometapi_config.py +104 -0
- camel/configs/minimax_config.py +93 -0
- camel/configs/nebius_config.py +103 -0
- camel/data_collectors/alpaca_collector.py +15 -6
- camel/datasets/base_generator.py +39 -10
- camel/environments/single_step.py +28 -3
- camel/environments/tic_tac_toe.py +1 -1
- camel/interpreters/__init__.py +2 -0
- camel/interpreters/docker/Dockerfile +3 -12
- camel/interpreters/e2b_interpreter.py +34 -1
- camel/interpreters/microsandbox_interpreter.py +395 -0
- camel/loaders/__init__.py +11 -2
- camel/loaders/chunkr_reader.py +9 -0
- camel/memories/agent_memories.py +48 -4
- camel/memories/base.py +26 -0
- camel/memories/blocks/chat_history_block.py +122 -4
- camel/memories/context_creators/score_based.py +25 -384
- camel/memories/records.py +88 -8
- camel/messages/base.py +153 -34
- camel/models/__init__.py +10 -0
- camel/models/aihubmix_model.py +83 -0
- camel/models/aiml_model.py +1 -16
- camel/models/amd_model.py +101 -0
- camel/models/anthropic_model.py +6 -19
- camel/models/aws_bedrock_model.py +2 -33
- camel/models/azure_openai_model.py +114 -89
- camel/models/base_audio_model.py +3 -1
- camel/models/base_model.py +32 -14
- camel/models/cohere_model.py +1 -16
- camel/models/cometapi_model.py +83 -0
- camel/models/crynux_model.py +1 -16
- camel/models/deepseek_model.py +1 -16
- camel/models/fish_audio_model.py +6 -0
- camel/models/gemini_model.py +36 -18
- camel/models/groq_model.py +1 -17
- camel/models/internlm_model.py +1 -16
- camel/models/litellm_model.py +1 -16
- camel/models/lmstudio_model.py +1 -17
- camel/models/minimax_model.py +83 -0
- camel/models/mistral_model.py +1 -16
- camel/models/model_factory.py +27 -1
- camel/models/modelscope_model.py +1 -16
- camel/models/moonshot_model.py +105 -24
- camel/models/nebius_model.py +83 -0
- camel/models/nemotron_model.py +0 -5
- camel/models/netmind_model.py +1 -16
- camel/models/novita_model.py +1 -16
- camel/models/nvidia_model.py +1 -16
- camel/models/ollama_model.py +4 -19
- camel/models/openai_compatible_model.py +62 -41
- camel/models/openai_model.py +62 -57
- camel/models/openrouter_model.py +1 -17
- camel/models/ppio_model.py +1 -16
- camel/models/qianfan_model.py +1 -16
- camel/models/qwen_model.py +1 -16
- camel/models/reka_model.py +1 -16
- camel/models/samba_model.py +34 -47
- camel/models/sglang_model.py +64 -31
- camel/models/siliconflow_model.py +1 -16
- camel/models/stub_model.py +0 -4
- camel/models/togetherai_model.py +1 -16
- camel/models/vllm_model.py +1 -16
- camel/models/volcano_model.py +0 -17
- camel/models/watsonx_model.py +1 -16
- camel/models/yi_model.py +1 -16
- camel/models/zhipuai_model.py +60 -16
- camel/parsers/__init__.py +18 -0
- camel/parsers/mcp_tool_call_parser.py +176 -0
- camel/retrievers/auto_retriever.py +1 -0
- camel/runtimes/daytona_runtime.py +11 -12
- camel/societies/__init__.py +2 -0
- camel/societies/workforce/__init__.py +2 -0
- camel/societies/workforce/events.py +122 -0
- camel/societies/workforce/prompts.py +146 -66
- camel/societies/workforce/role_playing_worker.py +15 -11
- camel/societies/workforce/single_agent_worker.py +302 -65
- camel/societies/workforce/structured_output_handler.py +30 -18
- camel/societies/workforce/task_channel.py +163 -27
- camel/societies/workforce/utils.py +107 -13
- camel/societies/workforce/workflow_memory_manager.py +772 -0
- camel/societies/workforce/workforce.py +1949 -579
- camel/societies/workforce/workforce_callback.py +74 -0
- camel/societies/workforce/workforce_logger.py +168 -145
- camel/societies/workforce/workforce_metrics.py +33 -0
- camel/storages/key_value_storages/json.py +15 -2
- camel/storages/key_value_storages/mem0_cloud.py +48 -47
- camel/storages/object_storages/google_cloud.py +1 -1
- camel/storages/vectordb_storages/oceanbase.py +13 -13
- camel/storages/vectordb_storages/qdrant.py +3 -3
- camel/storages/vectordb_storages/tidb.py +8 -6
- camel/tasks/task.py +4 -3
- camel/toolkits/__init__.py +20 -7
- camel/toolkits/aci_toolkit.py +45 -0
- camel/toolkits/base.py +6 -4
- camel/toolkits/code_execution.py +28 -1
- camel/toolkits/context_summarizer_toolkit.py +684 -0
- camel/toolkits/dappier_toolkit.py +5 -1
- camel/toolkits/dingtalk.py +1135 -0
- camel/toolkits/edgeone_pages_mcp_toolkit.py +11 -31
- camel/toolkits/excel_toolkit.py +1 -1
- camel/toolkits/{file_write_toolkit.py → file_toolkit.py} +430 -36
- camel/toolkits/function_tool.py +13 -3
- camel/toolkits/github_toolkit.py +104 -17
- camel/toolkits/gmail_toolkit.py +1839 -0
- camel/toolkits/google_calendar_toolkit.py +38 -4
- camel/toolkits/google_drive_mcp_toolkit.py +12 -31
- camel/toolkits/hybrid_browser_toolkit/config_loader.py +15 -0
- camel/toolkits/hybrid_browser_toolkit/hybrid_browser_toolkit.py +77 -8
- camel/toolkits/hybrid_browser_toolkit/hybrid_browser_toolkit_ts.py +884 -88
- camel/toolkits/hybrid_browser_toolkit/installer.py +203 -0
- camel/toolkits/hybrid_browser_toolkit/ts/package-lock.json +5 -612
- camel/toolkits/hybrid_browser_toolkit/ts/package.json +0 -1
- camel/toolkits/hybrid_browser_toolkit/ts/src/browser-session.ts +959 -89
- camel/toolkits/hybrid_browser_toolkit/ts/src/config-loader.ts +9 -2
- camel/toolkits/hybrid_browser_toolkit/ts/src/hybrid-browser-toolkit.ts +281 -213
- camel/toolkits/hybrid_browser_toolkit/ts/src/parent-child-filter.ts +226 -0
- camel/toolkits/hybrid_browser_toolkit/ts/src/snapshot-parser.ts +219 -0
- camel/toolkits/hybrid_browser_toolkit/ts/src/som-screenshot-injected.ts +543 -0
- camel/toolkits/hybrid_browser_toolkit/ts/src/types.ts +23 -3
- camel/toolkits/hybrid_browser_toolkit/ts/websocket-server.js +72 -7
- camel/toolkits/hybrid_browser_toolkit/ws_wrapper.py +582 -132
- camel/toolkits/hybrid_browser_toolkit_py/actions.py +158 -0
- camel/toolkits/hybrid_browser_toolkit_py/browser_session.py +55 -8
- camel/toolkits/hybrid_browser_toolkit_py/config_loader.py +43 -0
- camel/toolkits/hybrid_browser_toolkit_py/hybrid_browser_toolkit.py +321 -8
- camel/toolkits/hybrid_browser_toolkit_py/snapshot.py +10 -4
- camel/toolkits/hybrid_browser_toolkit_py/unified_analyzer.js +45 -4
- camel/toolkits/{openai_image_toolkit.py → image_generation_toolkit.py} +151 -53
- camel/toolkits/klavis_toolkit.py +5 -1
- camel/toolkits/markitdown_toolkit.py +27 -1
- camel/toolkits/math_toolkit.py +64 -10
- camel/toolkits/mcp_toolkit.py +366 -71
- camel/toolkits/memory_toolkit.py +5 -1
- camel/toolkits/message_integration.py +18 -13
- camel/toolkits/minimax_mcp_toolkit.py +195 -0
- camel/toolkits/note_taking_toolkit.py +19 -10
- camel/toolkits/notion_mcp_toolkit.py +16 -26
- camel/toolkits/openbb_toolkit.py +5 -1
- camel/toolkits/origene_mcp_toolkit.py +8 -49
- camel/toolkits/playwright_mcp_toolkit.py +12 -31
- camel/toolkits/resend_toolkit.py +168 -0
- camel/toolkits/search_toolkit.py +264 -91
- camel/toolkits/slack_toolkit.py +64 -10
- camel/toolkits/terminal_toolkit/__init__.py +18 -0
- camel/toolkits/terminal_toolkit/terminal_toolkit.py +957 -0
- camel/toolkits/terminal_toolkit/utils.py +532 -0
- camel/toolkits/vertex_ai_veo_toolkit.py +590 -0
- camel/toolkits/video_analysis_toolkit.py +17 -11
- camel/toolkits/wechat_official_toolkit.py +483 -0
- camel/toolkits/zapier_toolkit.py +5 -1
- camel/types/__init__.py +2 -2
- camel/types/enums.py +274 -7
- camel/types/openai_types.py +2 -2
- camel/types/unified_model_type.py +15 -0
- camel/utils/commons.py +36 -5
- camel/utils/constants.py +3 -0
- camel/utils/context_utils.py +1003 -0
- camel/utils/mcp.py +138 -4
- camel/utils/token_counting.py +43 -20
- {camel_ai-0.2.73a4.dist-info → camel_ai-0.2.80a2.dist-info}/METADATA +223 -83
- {camel_ai-0.2.73a4.dist-info → camel_ai-0.2.80a2.dist-info}/RECORD +170 -141
- camel/loaders/pandas_reader.py +0 -368
- camel/toolkits/openai_agent_toolkit.py +0 -135
- camel/toolkits/terminal_toolkit.py +0 -1550
- {camel_ai-0.2.73a4.dist-info → camel_ai-0.2.80a2.dist-info}/WHEEL +0 -0
- {camel_ai-0.2.73a4.dist-info → camel_ai-0.2.80a2.dist-info}/licenses/LICENSE +0 -0
|
@@ -19,8 +19,8 @@ from pydantic import BaseModel, ValidationError
|
|
|
19
19
|
|
|
20
20
|
from camel.logger import get_logger
|
|
21
21
|
from camel.societies.workforce.utils import (
|
|
22
|
-
RecoveryDecision,
|
|
23
22
|
RecoveryStrategy,
|
|
23
|
+
TaskAnalysisResult,
|
|
24
24
|
TaskAssignResult,
|
|
25
25
|
WorkerConf,
|
|
26
26
|
)
|
|
@@ -65,9 +65,9 @@ class StructuredOutputHandler:
|
|
|
65
65
|
r'description.*?:\s*"([^"]+)"'
|
|
66
66
|
),
|
|
67
67
|
],
|
|
68
|
-
'
|
|
69
|
-
r'"
|
|
70
|
-
r'
|
|
68
|
+
'TaskAnalysisResult': [
|
|
69
|
+
r'"recovery_strategy"\s*:\s*"([^"]+)".*?"reasoning"\s*:\s*"([^"]+)"',
|
|
70
|
+
r'recovery_strategy.*?:\s*"([^"]+)".*?reasoning.*?:\s*"([^"]+)"',
|
|
71
71
|
],
|
|
72
72
|
}
|
|
73
73
|
|
|
@@ -239,12 +239,12 @@ Ensure the JSON is valid and properly formatted.
|
|
|
239
239
|
except (IndexError, AttributeError):
|
|
240
240
|
continue
|
|
241
241
|
|
|
242
|
-
elif schema_name == '
|
|
242
|
+
elif schema_name == 'TaskAnalysisResult':
|
|
243
243
|
for pattern in patterns:
|
|
244
244
|
match = re.search(pattern, text, re.DOTALL | re.IGNORECASE)
|
|
245
245
|
if match:
|
|
246
246
|
try:
|
|
247
|
-
|
|
247
|
+
recovery_strategy = match.group(1)
|
|
248
248
|
reasoning = match.group(2)
|
|
249
249
|
# Look for modified_task_content
|
|
250
250
|
content_match = re.search(
|
|
@@ -252,14 +252,25 @@ Ensure the JSON is valid and properly formatted.
|
|
|
252
252
|
text,
|
|
253
253
|
re.IGNORECASE,
|
|
254
254
|
)
|
|
255
|
+
# Look for quality_score (for quality evaluation)
|
|
256
|
+
score_match = re.search(
|
|
257
|
+
r'"quality_score"\s*:\s*(\d+)',
|
|
258
|
+
text,
|
|
259
|
+
re.IGNORECASE,
|
|
260
|
+
)
|
|
255
261
|
return {
|
|
256
|
-
'
|
|
262
|
+
'recovery_strategy': recovery_strategy,
|
|
257
263
|
'reasoning': reasoning,
|
|
258
264
|
'modified_task_content': (
|
|
259
265
|
content_match.group(1)
|
|
260
266
|
if content_match
|
|
261
267
|
else None
|
|
262
268
|
),
|
|
269
|
+
'quality_score': (
|
|
270
|
+
int(score_match.group(1))
|
|
271
|
+
if score_match
|
|
272
|
+
else None
|
|
273
|
+
),
|
|
263
274
|
}
|
|
264
275
|
except (IndexError, AttributeError):
|
|
265
276
|
continue
|
|
@@ -370,21 +381,22 @@ Ensure the JSON is valid and properly formatted.
|
|
|
370
381
|
else:
|
|
371
382
|
assignment['dependencies'] = []
|
|
372
383
|
|
|
373
|
-
elif schema_name == '
|
|
374
|
-
# Ensure
|
|
375
|
-
if '
|
|
376
|
-
strategy = fixed_data['
|
|
384
|
+
elif schema_name == 'TaskAnalysisResult':
|
|
385
|
+
# Ensure recovery_strategy is valid
|
|
386
|
+
if 'recovery_strategy' in fixed_data:
|
|
387
|
+
strategy = fixed_data['recovery_strategy'].lower()
|
|
377
388
|
valid_strategies = [
|
|
378
389
|
'retry',
|
|
379
390
|
'replan',
|
|
380
391
|
'decompose',
|
|
381
392
|
'create_worker',
|
|
393
|
+
'reassign',
|
|
382
394
|
]
|
|
383
395
|
if strategy not in valid_strategies:
|
|
384
396
|
# Try to match partial
|
|
385
397
|
for valid in valid_strategies:
|
|
386
398
|
if valid.startswith(strategy) or strategy in valid:
|
|
387
|
-
fixed_data['
|
|
399
|
+
fixed_data['recovery_strategy'] = valid
|
|
388
400
|
break
|
|
389
401
|
|
|
390
402
|
return fixed_data
|
|
@@ -410,10 +422,10 @@ Ensure the JSON is valid and properly formatted.
|
|
|
410
422
|
sys_msg="You are a helpful assistant.",
|
|
411
423
|
description="A general-purpose worker",
|
|
412
424
|
)
|
|
413
|
-
elif schema_name == '
|
|
414
|
-
return
|
|
415
|
-
strategy=RecoveryStrategy.RETRY,
|
|
425
|
+
elif schema_name == 'TaskAnalysisResult':
|
|
426
|
+
return TaskAnalysisResult(
|
|
416
427
|
reasoning="Unable to parse response, defaulting to retry",
|
|
428
|
+
recovery_strategy=RecoveryStrategy.RETRY,
|
|
417
429
|
modified_task_content=None,
|
|
418
430
|
)
|
|
419
431
|
else:
|
|
@@ -482,11 +494,11 @@ Ensure the JSON is valid and properly formatted.
|
|
|
482
494
|
description=f"Fallback worker for task: {task_content}...",
|
|
483
495
|
)
|
|
484
496
|
|
|
485
|
-
elif schema_name == '
|
|
497
|
+
elif schema_name == 'TaskAnalysisResult':
|
|
486
498
|
# Default to retry strategy
|
|
487
|
-
return
|
|
488
|
-
strategy=RecoveryStrategy.RETRY,
|
|
499
|
+
return TaskAnalysisResult(
|
|
489
500
|
reasoning=f"Fallback decision due to: {error_message}",
|
|
501
|
+
recovery_strategy=RecoveryStrategy.RETRY,
|
|
490
502
|
modified_task_content=None,
|
|
491
503
|
)
|
|
492
504
|
|
|
@@ -12,11 +12,15 @@
|
|
|
12
12
|
# limitations under the License.
|
|
13
13
|
# ========= Copyright 2023-2024 @ CAMEL-AI.org. All Rights Reserved. =========
|
|
14
14
|
import asyncio
|
|
15
|
+
from collections import defaultdict, deque
|
|
15
16
|
from enum import Enum
|
|
16
|
-
from typing import Dict, List, Optional
|
|
17
|
+
from typing import Dict, List, Optional, Set
|
|
17
18
|
|
|
19
|
+
from camel.logger import get_logger
|
|
18
20
|
from camel.tasks import Task
|
|
19
21
|
|
|
22
|
+
logger = get_logger(__name__)
|
|
23
|
+
|
|
20
24
|
|
|
21
25
|
class PacketStatus(Enum):
|
|
22
26
|
r"""The status of a packet. The packet can be in one of the following
|
|
@@ -79,27 +83,92 @@ class Packet:
|
|
|
79
83
|
|
|
80
84
|
|
|
81
85
|
class TaskChannel:
|
|
82
|
-
r"""An internal class used by Workforce to manage tasks.
|
|
86
|
+
r"""An internal class used by Workforce to manage tasks.
|
|
87
|
+
|
|
88
|
+
This implementation uses a hybrid data structure approach:
|
|
89
|
+
- Hash map (_task_dict) for O(1) task lookup by ID
|
|
90
|
+
- Status-based index (_task_by_status) for efficient filtering by status
|
|
91
|
+
- Assignee/publisher queues for ordered task processing
|
|
92
|
+
"""
|
|
83
93
|
|
|
84
94
|
def __init__(self) -> None:
|
|
85
95
|
self._condition = asyncio.Condition()
|
|
86
96
|
self._task_dict: Dict[str, Packet] = {}
|
|
87
97
|
|
|
98
|
+
self._task_by_status: Dict[PacketStatus, Set[str]] = defaultdict(set)
|
|
99
|
+
|
|
100
|
+
# task by assignee store which are sent to
|
|
101
|
+
self._task_by_assignee: Dict[str, deque[str]] = defaultdict(deque)
|
|
102
|
+
|
|
103
|
+
self._task_by_publisher: Dict[str, deque[str]] = defaultdict(deque)
|
|
104
|
+
|
|
105
|
+
def _update_task_status(
|
|
106
|
+
self, task_id: str, new_status: PacketStatus
|
|
107
|
+
) -> None:
|
|
108
|
+
r"""Helper method to properly update task status in all indexes."""
|
|
109
|
+
if task_id not in self._task_dict:
|
|
110
|
+
return
|
|
111
|
+
|
|
112
|
+
packet = self._task_dict[task_id]
|
|
113
|
+
old_status = packet.status
|
|
114
|
+
|
|
115
|
+
if old_status in self._task_by_status:
|
|
116
|
+
self._task_by_status[old_status].discard(task_id)
|
|
117
|
+
|
|
118
|
+
packet.status = new_status
|
|
119
|
+
|
|
120
|
+
self._task_by_status[new_status].add(task_id)
|
|
121
|
+
|
|
122
|
+
def _cleanup_task_from_indexes(self, task_id: str) -> None:
|
|
123
|
+
r"""Helper method to remove a task from all indexes.
|
|
124
|
+
|
|
125
|
+
Args:
|
|
126
|
+
task_id (str): The ID of the task to remove from indexes.
|
|
127
|
+
"""
|
|
128
|
+
if task_id not in self._task_dict:
|
|
129
|
+
return
|
|
130
|
+
|
|
131
|
+
packet = self._task_dict[task_id]
|
|
132
|
+
|
|
133
|
+
if packet.status in self._task_by_status:
|
|
134
|
+
self._task_by_status[packet.status].discard(task_id)
|
|
135
|
+
|
|
136
|
+
if packet.assignee_id and packet.assignee_id in self._task_by_assignee:
|
|
137
|
+
assignee_queue = self._task_by_assignee[packet.assignee_id]
|
|
138
|
+
self._task_by_assignee[packet.assignee_id] = deque(
|
|
139
|
+
task for task in assignee_queue if task != task_id
|
|
140
|
+
)
|
|
141
|
+
|
|
142
|
+
if packet.publisher_id in self._task_by_publisher:
|
|
143
|
+
publisher_queue = self._task_by_publisher[packet.publisher_id]
|
|
144
|
+
self._task_by_publisher[packet.publisher_id] = deque(
|
|
145
|
+
task for task in publisher_queue if task != task_id
|
|
146
|
+
)
|
|
147
|
+
|
|
88
148
|
async def get_returned_task_by_publisher(self, publisher_id: str) -> Task:
|
|
89
149
|
r"""Get a task from the channel that has been returned by the
|
|
90
150
|
publisher.
|
|
91
151
|
"""
|
|
92
152
|
async with self._condition:
|
|
93
153
|
while True:
|
|
94
|
-
|
|
95
|
-
|
|
96
|
-
|
|
97
|
-
|
|
98
|
-
|
|
99
|
-
|
|
100
|
-
|
|
101
|
-
|
|
102
|
-
|
|
154
|
+
task_ids = self._task_by_publisher[publisher_id]
|
|
155
|
+
|
|
156
|
+
if task_ids:
|
|
157
|
+
task_id = task_ids.popleft()
|
|
158
|
+
|
|
159
|
+
if task_id in self._task_dict:
|
|
160
|
+
packet = self._task_dict[task_id]
|
|
161
|
+
|
|
162
|
+
if (
|
|
163
|
+
packet.status == PacketStatus.RETURNED
|
|
164
|
+
and packet.publisher_id == publisher_id
|
|
165
|
+
):
|
|
166
|
+
# Clean up all indexes before removing
|
|
167
|
+
self._cleanup_task_from_indexes(task_id)
|
|
168
|
+
del self._task_dict[task_id]
|
|
169
|
+
self._condition.notify_all()
|
|
170
|
+
return packet.task
|
|
171
|
+
|
|
103
172
|
await self._condition.wait()
|
|
104
173
|
|
|
105
174
|
async def get_assigned_task_by_assignee(self, assignee_id: str) -> Task:
|
|
@@ -109,15 +178,26 @@ class TaskChannel:
|
|
|
109
178
|
"""
|
|
110
179
|
async with self._condition:
|
|
111
180
|
while True:
|
|
112
|
-
|
|
113
|
-
|
|
114
|
-
|
|
115
|
-
|
|
116
|
-
)
|
|
117
|
-
|
|
118
|
-
|
|
119
|
-
self.
|
|
120
|
-
|
|
181
|
+
task_ids = self._task_by_assignee.get(assignee_id, deque())
|
|
182
|
+
|
|
183
|
+
# Process all available tasks until we find a valid one
|
|
184
|
+
while task_ids:
|
|
185
|
+
task_id = task_ids.popleft()
|
|
186
|
+
|
|
187
|
+
if task_id in self._task_dict:
|
|
188
|
+
packet = self._task_dict[task_id]
|
|
189
|
+
|
|
190
|
+
if (
|
|
191
|
+
packet.status == PacketStatus.SENT
|
|
192
|
+
and packet.assignee_id == assignee_id
|
|
193
|
+
):
|
|
194
|
+
# Use helper method to properly update status
|
|
195
|
+
self._update_task_status(
|
|
196
|
+
task_id, PacketStatus.PROCESSING
|
|
197
|
+
)
|
|
198
|
+
self._condition.notify_all()
|
|
199
|
+
return packet.task
|
|
200
|
+
|
|
121
201
|
await self._condition.wait()
|
|
122
202
|
|
|
123
203
|
async def post_task(
|
|
@@ -128,6 +208,8 @@ class TaskChannel:
|
|
|
128
208
|
async with self._condition:
|
|
129
209
|
packet = Packet(task, publisher_id, assignee_id)
|
|
130
210
|
self._task_dict[packet.task.id] = packet
|
|
211
|
+
self._task_by_status[PacketStatus.SENT].add(packet.task.id)
|
|
212
|
+
self._task_by_assignee[assignee_id].append(packet.task.id)
|
|
131
213
|
self._condition.notify_all()
|
|
132
214
|
|
|
133
215
|
async def post_dependency(
|
|
@@ -140,6 +222,7 @@ class TaskChannel:
|
|
|
140
222
|
dependency, publisher_id, status=PacketStatus.ARCHIVED
|
|
141
223
|
)
|
|
142
224
|
self._task_dict[packet.task.id] = packet
|
|
225
|
+
self._task_by_status[PacketStatus.ARCHIVED].add(packet.task.id)
|
|
143
226
|
self._condition.notify_all()
|
|
144
227
|
|
|
145
228
|
async def return_task(self, task_id: str) -> None:
|
|
@@ -148,7 +231,12 @@ class TaskChannel:
|
|
|
148
231
|
async with self._condition:
|
|
149
232
|
if task_id in self._task_dict:
|
|
150
233
|
packet = self._task_dict[task_id]
|
|
151
|
-
|
|
234
|
+
# Only add to publisher queue if not already returned
|
|
235
|
+
if packet.status != PacketStatus.RETURNED:
|
|
236
|
+
self._update_task_status(task_id, PacketStatus.RETURNED)
|
|
237
|
+
self._task_by_publisher[packet.publisher_id].append(
|
|
238
|
+
packet.task.id
|
|
239
|
+
)
|
|
152
240
|
self._condition.notify_all()
|
|
153
241
|
|
|
154
242
|
async def archive_task(self, task_id: str) -> None:
|
|
@@ -156,7 +244,17 @@ class TaskChannel:
|
|
|
156
244
|
async with self._condition:
|
|
157
245
|
if task_id in self._task_dict:
|
|
158
246
|
packet = self._task_dict[task_id]
|
|
159
|
-
|
|
247
|
+
# Remove from assignee queue before archiving
|
|
248
|
+
if (
|
|
249
|
+
packet.assignee_id
|
|
250
|
+
and packet.assignee_id in self._task_by_assignee
|
|
251
|
+
):
|
|
252
|
+
assignee_queue = self._task_by_assignee[packet.assignee_id]
|
|
253
|
+
self._task_by_assignee[packet.assignee_id] = deque(
|
|
254
|
+
task for task in assignee_queue if task != task_id
|
|
255
|
+
)
|
|
256
|
+
# Update status (keeps in status index for dependencies)
|
|
257
|
+
self._update_task_status(task_id, PacketStatus.ARCHIVED)
|
|
160
258
|
self._condition.notify_all()
|
|
161
259
|
|
|
162
260
|
async def remove_task(self, task_id: str) -> None:
|
|
@@ -164,17 +262,55 @@ class TaskChannel:
|
|
|
164
262
|
async with self._condition:
|
|
165
263
|
# Check if task ID exists before removing
|
|
166
264
|
if task_id in self._task_dict:
|
|
265
|
+
# Clean up all indexes before removing
|
|
266
|
+
self._cleanup_task_from_indexes(task_id)
|
|
167
267
|
del self._task_dict[task_id]
|
|
168
268
|
self._condition.notify_all()
|
|
169
269
|
|
|
170
270
|
async def get_dependency_ids(self) -> List[str]:
|
|
171
271
|
r"""Get the IDs of all dependencies in the channel."""
|
|
172
272
|
async with self._condition:
|
|
173
|
-
|
|
174
|
-
|
|
175
|
-
|
|
176
|
-
|
|
177
|
-
|
|
273
|
+
return list(self._task_by_status[PacketStatus.ARCHIVED])
|
|
274
|
+
|
|
275
|
+
async def get_in_flight_tasks(self, publisher_id: str) -> List[Task]:
|
|
276
|
+
r"""Get all tasks that are currently in-flight (SENT, RETURNED
|
|
277
|
+
or PROCESSING) published by the given publisher.
|
|
278
|
+
|
|
279
|
+
Args:
|
|
280
|
+
publisher_id (str): The ID of the publisher whose
|
|
281
|
+
in-flight tasks to retrieve.
|
|
282
|
+
|
|
283
|
+
Returns:
|
|
284
|
+
List[Task]: List of tasks that are currently in-flight.
|
|
285
|
+
"""
|
|
286
|
+
async with self._condition:
|
|
287
|
+
in_flight_tasks = []
|
|
288
|
+
seen_task_ids = set() # Track seen IDs for duplicate detection
|
|
289
|
+
|
|
290
|
+
# Get tasks with SENT, RETURNED or PROCESSING
|
|
291
|
+
# status published by this publisher
|
|
292
|
+
for status in [
|
|
293
|
+
PacketStatus.SENT,
|
|
294
|
+
PacketStatus.PROCESSING,
|
|
295
|
+
PacketStatus.RETURNED,
|
|
296
|
+
]:
|
|
297
|
+
for task_id in self._task_by_status[status]:
|
|
298
|
+
if task_id in self._task_dict:
|
|
299
|
+
packet = self._task_dict[task_id]
|
|
300
|
+
if packet.publisher_id == publisher_id:
|
|
301
|
+
# Defensive check: detect if task appears in
|
|
302
|
+
# multiple status sets (should never happen)
|
|
303
|
+
if task_id in seen_task_ids:
|
|
304
|
+
logger.warning(
|
|
305
|
+
f"Task {task_id} found in multiple "
|
|
306
|
+
f"status sets. This indicates a bug in "
|
|
307
|
+
f"status management."
|
|
308
|
+
)
|
|
309
|
+
continue
|
|
310
|
+
in_flight_tasks.append(packet.task)
|
|
311
|
+
seen_task_ids.add(task_id)
|
|
312
|
+
|
|
313
|
+
return in_flight_tasks
|
|
178
314
|
|
|
179
315
|
async def get_task_by_id(self, task_id: str) -> Task:
|
|
180
316
|
r"""Get a task from the channel by its ID."""
|
|
@@ -38,7 +38,38 @@ class TaskResult(BaseModel):
|
|
|
38
38
|
|
|
39
39
|
content: str = Field(description="The result of the task.")
|
|
40
40
|
failed: bool = Field(
|
|
41
|
-
|
|
41
|
+
default=False,
|
|
42
|
+
description="Flag indicating whether the task processing failed.",
|
|
43
|
+
)
|
|
44
|
+
|
|
45
|
+
|
|
46
|
+
class QualityEvaluation(BaseModel):
|
|
47
|
+
r"""Quality evaluation result for a completed task.
|
|
48
|
+
|
|
49
|
+
.. deprecated::
|
|
50
|
+
Use :class:`TaskAnalysisResult` instead. This class is kept for
|
|
51
|
+
backward compatibility.
|
|
52
|
+
"""
|
|
53
|
+
|
|
54
|
+
quality_sufficient: bool = Field(
|
|
55
|
+
description="Whether the task result meets quality standards."
|
|
56
|
+
)
|
|
57
|
+
quality_score: int = Field(
|
|
58
|
+
description="Quality score from 0 to 100.", ge=0, le=100
|
|
59
|
+
)
|
|
60
|
+
issues: List[str] = Field(
|
|
61
|
+
default_factory=list,
|
|
62
|
+
description="List of quality issues found in the result.",
|
|
63
|
+
)
|
|
64
|
+
recovery_strategy: Optional[str] = Field(
|
|
65
|
+
default=None,
|
|
66
|
+
description="Recommended recovery strategy if quality is "
|
|
67
|
+
"insufficient: "
|
|
68
|
+
"'retry', 'reassign', 'replan', or 'decompose'.",
|
|
69
|
+
)
|
|
70
|
+
modified_task_content: Optional[str] = Field(
|
|
71
|
+
default=None,
|
|
72
|
+
description="Modified task content for replan strategy.",
|
|
42
73
|
)
|
|
43
74
|
|
|
44
75
|
|
|
@@ -52,7 +83,8 @@ class TaskAssignment(BaseModel):
|
|
|
52
83
|
dependencies: List[str] = Field(
|
|
53
84
|
default_factory=list,
|
|
54
85
|
description="List of task IDs that must complete before this task. "
|
|
55
|
-
"This is critical for the task decomposition and
|
|
86
|
+
"This is critical for the task decomposition and "
|
|
87
|
+
"execution.",
|
|
56
88
|
)
|
|
57
89
|
|
|
58
90
|
# Allow LLMs to output dependencies as a comma-separated string or empty
|
|
@@ -60,7 +92,8 @@ class TaskAssignment(BaseModel):
|
|
|
60
92
|
# downstream logic does not break with validation errors.
|
|
61
93
|
@staticmethod
|
|
62
94
|
def _split_and_strip(dep_str: str) -> List[str]:
|
|
63
|
-
r"""Utility to split a comma separated string and strip
|
|
95
|
+
r"""Utility to split a comma separated string and strip
|
|
96
|
+
whitespace."""
|
|
64
97
|
return [d.strip() for d in dep_str.split(',') if d.strip()]
|
|
65
98
|
|
|
66
99
|
@field_validator("dependencies", mode="before")
|
|
@@ -74,7 +107,8 @@ class TaskAssignment(BaseModel):
|
|
|
74
107
|
|
|
75
108
|
|
|
76
109
|
class TaskAssignResult(BaseModel):
|
|
77
|
-
r"""The result of task assignment for both single and batch
|
|
110
|
+
r"""The result of task assignment for both single and batch
|
|
111
|
+
assignments."""
|
|
78
112
|
|
|
79
113
|
assignments: List[TaskAssignment] = Field(
|
|
80
114
|
description="List of task assignments."
|
|
@@ -88,6 +122,7 @@ class RecoveryStrategy(str, Enum):
|
|
|
88
122
|
REPLAN = "replan"
|
|
89
123
|
DECOMPOSE = "decompose"
|
|
90
124
|
CREATE_WORKER = "create_worker"
|
|
125
|
+
REASSIGN = "reassign"
|
|
91
126
|
|
|
92
127
|
def __str__(self):
|
|
93
128
|
return self.value
|
|
@@ -116,17 +151,75 @@ class FailureContext(BaseModel):
|
|
|
116
151
|
)
|
|
117
152
|
|
|
118
153
|
|
|
119
|
-
class
|
|
120
|
-
r"""
|
|
154
|
+
class TaskAnalysisResult(BaseModel):
|
|
155
|
+
r"""Unified result for task failure analysis and quality evaluation.
|
|
156
|
+
|
|
157
|
+
This model combines both failure recovery decisions and quality evaluation
|
|
158
|
+
results into a single structure. For failure analysis, only the recovery
|
|
159
|
+
strategy and reasoning fields are populated. For quality evaluation, all
|
|
160
|
+
fields including quality_score and issues are populated.
|
|
161
|
+
"""
|
|
162
|
+
|
|
163
|
+
# Common fields - always populated
|
|
164
|
+
reasoning: str = Field(
|
|
165
|
+
description="Explanation for the analysis result or recovery "
|
|
166
|
+
"decision"
|
|
167
|
+
)
|
|
121
168
|
|
|
122
|
-
|
|
123
|
-
|
|
169
|
+
recovery_strategy: Optional[RecoveryStrategy] = Field(
|
|
170
|
+
default=None,
|
|
171
|
+
description="Recommended recovery strategy: 'retry', 'replan', "
|
|
172
|
+
"'decompose', 'create_worker', or 'reassign'. None indicates no "
|
|
173
|
+
"recovery needed (quality sufficient).",
|
|
124
174
|
)
|
|
125
|
-
|
|
175
|
+
|
|
126
176
|
modified_task_content: Optional[str] = Field(
|
|
127
|
-
default=None,
|
|
177
|
+
default=None,
|
|
178
|
+
description="Modified task content if strategy requires replan",
|
|
128
179
|
)
|
|
129
180
|
|
|
181
|
+
# Quality-specific fields - populated only for quality evaluation
|
|
182
|
+
quality_score: Optional[int] = Field(
|
|
183
|
+
default=None,
|
|
184
|
+
description="Quality score from 0 to 100 (only for quality "
|
|
185
|
+
"evaluation). "
|
|
186
|
+
"None indicates this is a failure analysis, "
|
|
187
|
+
"not quality evaluation.",
|
|
188
|
+
ge=0,
|
|
189
|
+
le=100,
|
|
190
|
+
)
|
|
191
|
+
|
|
192
|
+
issues: List[str] = Field(
|
|
193
|
+
default_factory=list,
|
|
194
|
+
description="List of issues found. For failures: error details. "
|
|
195
|
+
"For quality evaluation: quality issues.",
|
|
196
|
+
)
|
|
197
|
+
|
|
198
|
+
@property
|
|
199
|
+
def is_quality_evaluation(self) -> bool:
|
|
200
|
+
r"""Check if this is a quality evaluation result.
|
|
201
|
+
|
|
202
|
+
Returns:
|
|
203
|
+
bool: True if this is a quality evaluation (has quality_score),
|
|
204
|
+
False if this is a failure analysis.
|
|
205
|
+
"""
|
|
206
|
+
return self.quality_score is not None
|
|
207
|
+
|
|
208
|
+
@property
|
|
209
|
+
def quality_sufficient(self) -> bool:
|
|
210
|
+
r"""For quality evaluations, check if quality meets standards.
|
|
211
|
+
|
|
212
|
+
Returns:
|
|
213
|
+
bool: True if quality is sufficient (score >= 70 and no recovery
|
|
214
|
+
strategy recommended), False otherwise. Always False for
|
|
215
|
+
failure analysis results.
|
|
216
|
+
"""
|
|
217
|
+
return (
|
|
218
|
+
self.quality_score is not None
|
|
219
|
+
and self.quality_score >= 70
|
|
220
|
+
and self.recovery_strategy is None
|
|
221
|
+
)
|
|
222
|
+
|
|
130
223
|
|
|
131
224
|
def check_if_running(
|
|
132
225
|
running: bool,
|
|
@@ -178,7 +271,7 @@ def check_if_running(
|
|
|
178
271
|
if retries < max_retries:
|
|
179
272
|
logger.warning(
|
|
180
273
|
f"{error_msg} Retrying in {retry_delay}s... "
|
|
181
|
-
f"(Attempt {retries+1}/{max_retries})"
|
|
274
|
+
f"(Attempt {retries + 1}/{max_retries})"
|
|
182
275
|
)
|
|
183
276
|
time.sleep(retry_delay)
|
|
184
277
|
retries += 1
|
|
@@ -200,7 +293,7 @@ def check_if_running(
|
|
|
200
293
|
logger.warning(
|
|
201
294
|
f"Exception in {func.__name__}: {e}. "
|
|
202
295
|
f"Retrying in {retry_delay}s... "
|
|
203
|
-
f"(Attempt {retries+1}/{max_retries})"
|
|
296
|
+
f"(Attempt {retries + 1}/{max_retries})"
|
|
204
297
|
)
|
|
205
298
|
time.sleep(retry_delay)
|
|
206
299
|
retries += 1
|
|
@@ -218,7 +311,8 @@ def check_if_running(
|
|
|
218
311
|
# This should not be reached, but just in case
|
|
219
312
|
if handle_exceptions:
|
|
220
313
|
logger.error(
|
|
221
|
-
f"Unexpected failure in {func.__name__}:
|
|
314
|
+
f"Unexpected failure in {func.__name__}: "
|
|
315
|
+
f"{last_exception}"
|
|
222
316
|
)
|
|
223
317
|
return None
|
|
224
318
|
else:
|