camel-ai 0.2.67__py3-none-any.whl → 0.2.80a2__py3-none-any.whl

This diff represents the content of publicly available package versions that have been released to one of the supported registries. The information contained in this diff is provided for informational purposes only and reflects changes between package versions as they appear in their respective public registries.
Files changed (224) hide show
  1. camel/__init__.py +1 -1
  2. camel/agents/_types.py +6 -2
  3. camel/agents/_utils.py +38 -0
  4. camel/agents/chat_agent.py +4014 -410
  5. camel/agents/mcp_agent.py +30 -27
  6. camel/agents/repo_agent.py +2 -1
  7. camel/benchmarks/browsecomp.py +6 -6
  8. camel/configs/__init__.py +15 -0
  9. camel/configs/aihubmix_config.py +88 -0
  10. camel/configs/amd_config.py +70 -0
  11. camel/configs/cometapi_config.py +104 -0
  12. camel/configs/minimax_config.py +93 -0
  13. camel/configs/nebius_config.py +103 -0
  14. camel/configs/vllm_config.py +2 -0
  15. camel/data_collectors/alpaca_collector.py +15 -6
  16. camel/datagen/self_improving_cot.py +1 -1
  17. camel/datasets/base_generator.py +39 -10
  18. camel/environments/__init__.py +12 -0
  19. camel/environments/rlcards_env.py +860 -0
  20. camel/environments/single_step.py +28 -3
  21. camel/environments/tic_tac_toe.py +1 -1
  22. camel/interpreters/__init__.py +2 -0
  23. camel/interpreters/docker/Dockerfile +4 -16
  24. camel/interpreters/docker_interpreter.py +3 -2
  25. camel/interpreters/e2b_interpreter.py +34 -1
  26. camel/interpreters/internal_python_interpreter.py +51 -2
  27. camel/interpreters/microsandbox_interpreter.py +395 -0
  28. camel/loaders/__init__.py +11 -2
  29. camel/loaders/base_loader.py +85 -0
  30. camel/loaders/chunkr_reader.py +9 -0
  31. camel/loaders/firecrawl_reader.py +4 -4
  32. camel/logger.py +1 -1
  33. camel/memories/agent_memories.py +84 -1
  34. camel/memories/base.py +34 -0
  35. camel/memories/blocks/chat_history_block.py +122 -4
  36. camel/memories/blocks/vectordb_block.py +8 -1
  37. camel/memories/context_creators/score_based.py +29 -237
  38. camel/memories/records.py +88 -8
  39. camel/messages/base.py +166 -40
  40. camel/messages/func_message.py +32 -5
  41. camel/models/__init__.py +10 -0
  42. camel/models/aihubmix_model.py +83 -0
  43. camel/models/aiml_model.py +1 -16
  44. camel/models/amd_model.py +101 -0
  45. camel/models/anthropic_model.py +117 -18
  46. camel/models/aws_bedrock_model.py +2 -33
  47. camel/models/azure_openai_model.py +205 -91
  48. camel/models/base_audio_model.py +3 -1
  49. camel/models/base_model.py +189 -24
  50. camel/models/cohere_model.py +5 -17
  51. camel/models/cometapi_model.py +83 -0
  52. camel/models/crynux_model.py +1 -16
  53. camel/models/deepseek_model.py +6 -16
  54. camel/models/fish_audio_model.py +6 -0
  55. camel/models/gemini_model.py +71 -20
  56. camel/models/groq_model.py +1 -17
  57. camel/models/internlm_model.py +1 -16
  58. camel/models/litellm_model.py +49 -32
  59. camel/models/lmstudio_model.py +1 -17
  60. camel/models/minimax_model.py +83 -0
  61. camel/models/mistral_model.py +1 -16
  62. camel/models/model_factory.py +27 -1
  63. camel/models/model_manager.py +24 -6
  64. camel/models/modelscope_model.py +1 -16
  65. camel/models/moonshot_model.py +185 -19
  66. camel/models/nebius_model.py +83 -0
  67. camel/models/nemotron_model.py +0 -5
  68. camel/models/netmind_model.py +1 -16
  69. camel/models/novita_model.py +1 -16
  70. camel/models/nvidia_model.py +1 -16
  71. camel/models/ollama_model.py +4 -19
  72. camel/models/openai_compatible_model.py +171 -46
  73. camel/models/openai_model.py +205 -77
  74. camel/models/openrouter_model.py +1 -17
  75. camel/models/ppio_model.py +1 -16
  76. camel/models/qianfan_model.py +1 -16
  77. camel/models/qwen_model.py +1 -16
  78. camel/models/reka_model.py +1 -16
  79. camel/models/samba_model.py +34 -47
  80. camel/models/sglang_model.py +64 -31
  81. camel/models/siliconflow_model.py +1 -16
  82. camel/models/stub_model.py +0 -4
  83. camel/models/togetherai_model.py +1 -16
  84. camel/models/vllm_model.py +1 -16
  85. camel/models/volcano_model.py +0 -17
  86. camel/models/watsonx_model.py +1 -16
  87. camel/models/yi_model.py +1 -16
  88. camel/models/zhipuai_model.py +60 -16
  89. camel/parsers/__init__.py +18 -0
  90. camel/parsers/mcp_tool_call_parser.py +176 -0
  91. camel/retrievers/auto_retriever.py +1 -0
  92. camel/runtimes/configs.py +11 -11
  93. camel/runtimes/daytona_runtime.py +15 -16
  94. camel/runtimes/docker_runtime.py +6 -6
  95. camel/runtimes/remote_http_runtime.py +5 -5
  96. camel/services/agent_openapi_server.py +380 -0
  97. camel/societies/__init__.py +2 -0
  98. camel/societies/role_playing.py +26 -28
  99. camel/societies/workforce/__init__.py +2 -0
  100. camel/societies/workforce/events.py +122 -0
  101. camel/societies/workforce/prompts.py +249 -38
  102. camel/societies/workforce/role_playing_worker.py +82 -20
  103. camel/societies/workforce/single_agent_worker.py +634 -34
  104. camel/societies/workforce/structured_output_handler.py +512 -0
  105. camel/societies/workforce/task_channel.py +169 -23
  106. camel/societies/workforce/utils.py +176 -9
  107. camel/societies/workforce/worker.py +77 -23
  108. camel/societies/workforce/workflow_memory_manager.py +772 -0
  109. camel/societies/workforce/workforce.py +3168 -478
  110. camel/societies/workforce/workforce_callback.py +74 -0
  111. camel/societies/workforce/workforce_logger.py +203 -175
  112. camel/societies/workforce/workforce_metrics.py +33 -0
  113. camel/storages/__init__.py +4 -0
  114. camel/storages/key_value_storages/json.py +15 -2
  115. camel/storages/key_value_storages/mem0_cloud.py +48 -47
  116. camel/storages/object_storages/google_cloud.py +1 -1
  117. camel/storages/vectordb_storages/__init__.py +6 -0
  118. camel/storages/vectordb_storages/chroma.py +731 -0
  119. camel/storages/vectordb_storages/oceanbase.py +13 -13
  120. camel/storages/vectordb_storages/pgvector.py +349 -0
  121. camel/storages/vectordb_storages/qdrant.py +3 -3
  122. camel/storages/vectordb_storages/surreal.py +365 -0
  123. camel/storages/vectordb_storages/tidb.py +8 -6
  124. camel/tasks/task.py +244 -27
  125. camel/toolkits/__init__.py +46 -8
  126. camel/toolkits/aci_toolkit.py +64 -19
  127. camel/toolkits/arxiv_toolkit.py +6 -6
  128. camel/toolkits/base.py +63 -5
  129. camel/toolkits/code_execution.py +28 -1
  130. camel/toolkits/context_summarizer_toolkit.py +684 -0
  131. camel/toolkits/craw4ai_toolkit.py +93 -0
  132. camel/toolkits/dappier_toolkit.py +10 -6
  133. camel/toolkits/dingtalk.py +1135 -0
  134. camel/toolkits/edgeone_pages_mcp_toolkit.py +49 -0
  135. camel/toolkits/excel_toolkit.py +901 -67
  136. camel/toolkits/file_toolkit.py +1402 -0
  137. camel/toolkits/function_tool.py +30 -6
  138. camel/toolkits/github_toolkit.py +107 -20
  139. camel/toolkits/gmail_toolkit.py +1839 -0
  140. camel/toolkits/google_calendar_toolkit.py +38 -4
  141. camel/toolkits/google_drive_mcp_toolkit.py +54 -0
  142. camel/toolkits/human_toolkit.py +34 -10
  143. camel/toolkits/hybrid_browser_toolkit/__init__.py +18 -0
  144. camel/toolkits/hybrid_browser_toolkit/config_loader.py +185 -0
  145. camel/toolkits/hybrid_browser_toolkit/hybrid_browser_toolkit.py +246 -0
  146. camel/toolkits/hybrid_browser_toolkit/hybrid_browser_toolkit_ts.py +1973 -0
  147. camel/toolkits/hybrid_browser_toolkit/installer.py +203 -0
  148. camel/toolkits/hybrid_browser_toolkit/ts/package-lock.json +3749 -0
  149. camel/toolkits/hybrid_browser_toolkit/ts/package.json +32 -0
  150. camel/toolkits/hybrid_browser_toolkit/ts/src/browser-scripts.js +125 -0
  151. camel/toolkits/hybrid_browser_toolkit/ts/src/browser-session.ts +1815 -0
  152. camel/toolkits/hybrid_browser_toolkit/ts/src/config-loader.ts +233 -0
  153. camel/toolkits/hybrid_browser_toolkit/ts/src/hybrid-browser-toolkit.ts +590 -0
  154. camel/toolkits/hybrid_browser_toolkit/ts/src/index.ts +7 -0
  155. camel/toolkits/hybrid_browser_toolkit/ts/src/parent-child-filter.ts +226 -0
  156. camel/toolkits/hybrid_browser_toolkit/ts/src/snapshot-parser.ts +219 -0
  157. camel/toolkits/hybrid_browser_toolkit/ts/src/som-screenshot-injected.ts +543 -0
  158. camel/toolkits/hybrid_browser_toolkit/ts/src/types.ts +130 -0
  159. camel/toolkits/hybrid_browser_toolkit/ts/tsconfig.json +26 -0
  160. camel/toolkits/hybrid_browser_toolkit/ts/websocket-server.js +319 -0
  161. camel/toolkits/hybrid_browser_toolkit/ws_wrapper.py +1032 -0
  162. camel/toolkits/hybrid_browser_toolkit_py/__init__.py +17 -0
  163. camel/toolkits/hybrid_browser_toolkit_py/actions.py +575 -0
  164. camel/toolkits/hybrid_browser_toolkit_py/agent.py +311 -0
  165. camel/toolkits/hybrid_browser_toolkit_py/browser_session.py +787 -0
  166. camel/toolkits/hybrid_browser_toolkit_py/config_loader.py +490 -0
  167. camel/toolkits/hybrid_browser_toolkit_py/hybrid_browser_toolkit.py +2390 -0
  168. camel/toolkits/hybrid_browser_toolkit_py/snapshot.py +233 -0
  169. camel/toolkits/hybrid_browser_toolkit_py/stealth_script.js +0 -0
  170. camel/toolkits/hybrid_browser_toolkit_py/unified_analyzer.js +1043 -0
  171. camel/toolkits/image_generation_toolkit.py +390 -0
  172. camel/toolkits/jina_reranker_toolkit.py +3 -4
  173. camel/toolkits/klavis_toolkit.py +5 -1
  174. camel/toolkits/markitdown_toolkit.py +104 -0
  175. camel/toolkits/math_toolkit.py +64 -10
  176. camel/toolkits/mcp_toolkit.py +370 -45
  177. camel/toolkits/memory_toolkit.py +5 -1
  178. camel/toolkits/message_agent_toolkit.py +608 -0
  179. camel/toolkits/message_integration.py +724 -0
  180. camel/toolkits/minimax_mcp_toolkit.py +195 -0
  181. camel/toolkits/note_taking_toolkit.py +277 -0
  182. camel/toolkits/notion_mcp_toolkit.py +224 -0
  183. camel/toolkits/openbb_toolkit.py +5 -1
  184. camel/toolkits/origene_mcp_toolkit.py +56 -0
  185. camel/toolkits/playwright_mcp_toolkit.py +12 -31
  186. camel/toolkits/pptx_toolkit.py +25 -12
  187. camel/toolkits/resend_toolkit.py +168 -0
  188. camel/toolkits/screenshot_toolkit.py +213 -0
  189. camel/toolkits/search_toolkit.py +437 -142
  190. camel/toolkits/slack_toolkit.py +104 -50
  191. camel/toolkits/sympy_toolkit.py +1 -1
  192. camel/toolkits/task_planning_toolkit.py +3 -3
  193. camel/toolkits/terminal_toolkit/__init__.py +18 -0
  194. camel/toolkits/terminal_toolkit/terminal_toolkit.py +957 -0
  195. camel/toolkits/terminal_toolkit/utils.py +532 -0
  196. camel/toolkits/thinking_toolkit.py +1 -1
  197. camel/toolkits/vertex_ai_veo_toolkit.py +590 -0
  198. camel/toolkits/video_analysis_toolkit.py +106 -26
  199. camel/toolkits/video_download_toolkit.py +17 -14
  200. camel/toolkits/web_deploy_toolkit.py +1219 -0
  201. camel/toolkits/wechat_official_toolkit.py +483 -0
  202. camel/toolkits/zapier_toolkit.py +5 -1
  203. camel/types/__init__.py +2 -2
  204. camel/types/agents/tool_calling_record.py +4 -1
  205. camel/types/enums.py +316 -40
  206. camel/types/openai_types.py +2 -2
  207. camel/types/unified_model_type.py +31 -4
  208. camel/utils/commons.py +36 -5
  209. camel/utils/constants.py +3 -0
  210. camel/utils/context_utils.py +1003 -0
  211. camel/utils/mcp.py +138 -4
  212. camel/utils/mcp_client.py +45 -1
  213. camel/utils/message_summarizer.py +148 -0
  214. camel/utils/token_counting.py +43 -20
  215. camel/utils/tool_result.py +44 -0
  216. {camel_ai-0.2.67.dist-info → camel_ai-0.2.80a2.dist-info}/METADATA +296 -85
  217. {camel_ai-0.2.67.dist-info → camel_ai-0.2.80a2.dist-info}/RECORD +219 -146
  218. camel/loaders/pandas_reader.py +0 -368
  219. camel/toolkits/dalle_toolkit.py +0 -175
  220. camel/toolkits/file_write_toolkit.py +0 -444
  221. camel/toolkits/openai_agent_toolkit.py +0 -135
  222. camel/toolkits/terminal_toolkit.py +0 -1037
  223. {camel_ai-0.2.67.dist-info → camel_ai-0.2.80a2.dist-info}/WHEEL +0 -0
  224. {camel_ai-0.2.67.dist-info → camel_ai-0.2.80a2.dist-info}/licenses/LICENSE +0 -0
@@ -12,17 +12,23 @@
12
12
  # limitations under the License.
13
13
  # ========= Copyright 2023-2024 @ CAMEL-AI.org. All Rights Reserved. =========
14
14
  import asyncio
15
+ from collections import defaultdict, deque
15
16
  from enum import Enum
16
- from typing import Dict, List, Optional
17
+ from typing import Dict, List, Optional, Set
17
18
 
19
+ from camel.logger import get_logger
18
20
  from camel.tasks import Task
19
21
 
22
+ logger = get_logger(__name__)
23
+
20
24
 
21
25
  class PacketStatus(Enum):
22
26
  r"""The status of a packet. The packet can be in one of the following
23
27
  states:
24
28
 
25
29
  - ``SENT``: The packet has been sent to a worker.
30
+ - ``PROCESSING``: The packet has been claimed by a worker and is being
31
+ processed.
26
32
  - ``RETURNED``: The packet has been returned by the worker, meaning that
27
33
  the status of the task inside has been updated.
28
34
  - ``ARCHIVED``: The packet has been archived, meaning that the content of
@@ -31,6 +37,7 @@ class PacketStatus(Enum):
31
37
  """
32
38
 
33
39
  SENT = "SENT"
40
+ PROCESSING = "PROCESSING"
34
41
  RETURNED = "RETURNED"
35
42
  ARCHIVED = "ARCHIVED"
36
43
 
@@ -76,38 +83,121 @@ class Packet:
76
83
 
77
84
 
78
85
  class TaskChannel:
79
- r"""An internal class used by Workforce to manage tasks."""
86
+ r"""An internal class used by Workforce to manage tasks.
87
+
88
+ This implementation uses a hybrid data structure approach:
89
+ - Hash map (_task_dict) for O(1) task lookup by ID
90
+ - Status-based index (_task_by_status) for efficient filtering by status
91
+ - Assignee/publisher queues for ordered task processing
92
+ """
80
93
 
81
94
  def __init__(self) -> None:
82
95
  self._condition = asyncio.Condition()
83
96
  self._task_dict: Dict[str, Packet] = {}
84
97
 
98
+ self._task_by_status: Dict[PacketStatus, Set[str]] = defaultdict(set)
99
+
100
+ # task by assignee store which are sent to
101
+ self._task_by_assignee: Dict[str, deque[str]] = defaultdict(deque)
102
+
103
+ self._task_by_publisher: Dict[str, deque[str]] = defaultdict(deque)
104
+
105
+ def _update_task_status(
106
+ self, task_id: str, new_status: PacketStatus
107
+ ) -> None:
108
+ r"""Helper method to properly update task status in all indexes."""
109
+ if task_id not in self._task_dict:
110
+ return
111
+
112
+ packet = self._task_dict[task_id]
113
+ old_status = packet.status
114
+
115
+ if old_status in self._task_by_status:
116
+ self._task_by_status[old_status].discard(task_id)
117
+
118
+ packet.status = new_status
119
+
120
+ self._task_by_status[new_status].add(task_id)
121
+
122
+ def _cleanup_task_from_indexes(self, task_id: str) -> None:
123
+ r"""Helper method to remove a task from all indexes.
124
+
125
+ Args:
126
+ task_id (str): The ID of the task to remove from indexes.
127
+ """
128
+ if task_id not in self._task_dict:
129
+ return
130
+
131
+ packet = self._task_dict[task_id]
132
+
133
+ if packet.status in self._task_by_status:
134
+ self._task_by_status[packet.status].discard(task_id)
135
+
136
+ if packet.assignee_id and packet.assignee_id in self._task_by_assignee:
137
+ assignee_queue = self._task_by_assignee[packet.assignee_id]
138
+ self._task_by_assignee[packet.assignee_id] = deque(
139
+ task for task in assignee_queue if task != task_id
140
+ )
141
+
142
+ if packet.publisher_id in self._task_by_publisher:
143
+ publisher_queue = self._task_by_publisher[packet.publisher_id]
144
+ self._task_by_publisher[packet.publisher_id] = deque(
145
+ task for task in publisher_queue if task != task_id
146
+ )
147
+
85
148
  async def get_returned_task_by_publisher(self, publisher_id: str) -> Task:
86
149
  r"""Get a task from the channel that has been returned by the
87
150
  publisher.
88
151
  """
89
152
  async with self._condition:
90
153
  while True:
91
- for packet in self._task_dict.values():
92
- if packet.publisher_id != publisher_id:
93
- continue
94
- if packet.status != PacketStatus.RETURNED:
95
- continue
96
- return packet.task
154
+ task_ids = self._task_by_publisher[publisher_id]
155
+
156
+ if task_ids:
157
+ task_id = task_ids.popleft()
158
+
159
+ if task_id in self._task_dict:
160
+ packet = self._task_dict[task_id]
161
+
162
+ if (
163
+ packet.status == PacketStatus.RETURNED
164
+ and packet.publisher_id == publisher_id
165
+ ):
166
+ # Clean up all indexes before removing
167
+ self._cleanup_task_from_indexes(task_id)
168
+ del self._task_dict[task_id]
169
+ self._condition.notify_all()
170
+ return packet.task
171
+
97
172
  await self._condition.wait()
98
173
 
99
174
  async def get_assigned_task_by_assignee(self, assignee_id: str) -> Task:
100
- r"""Get a task from the channel that has been assigned to the
101
- assignee.
175
+ r"""Atomically get and claim a task from the channel that has been
176
+ assigned to the assignee. This prevents race conditions where multiple
177
+ concurrent calls might retrieve the same task.
102
178
  """
103
179
  async with self._condition:
104
180
  while True:
105
- for packet in self._task_dict.values():
106
- if (
107
- packet.status == PacketStatus.SENT
108
- and packet.assignee_id == assignee_id
109
- ):
110
- return packet.task
181
+ task_ids = self._task_by_assignee.get(assignee_id, deque())
182
+
183
+ # Process all available tasks until we find a valid one
184
+ while task_ids:
185
+ task_id = task_ids.popleft()
186
+
187
+ if task_id in self._task_dict:
188
+ packet = self._task_dict[task_id]
189
+
190
+ if (
191
+ packet.status == PacketStatus.SENT
192
+ and packet.assignee_id == assignee_id
193
+ ):
194
+ # Use helper method to properly update status
195
+ self._update_task_status(
196
+ task_id, PacketStatus.PROCESSING
197
+ )
198
+ self._condition.notify_all()
199
+ return packet.task
200
+
111
201
  await self._condition.wait()
112
202
 
113
203
  async def post_task(
@@ -118,6 +208,8 @@ class TaskChannel:
118
208
  async with self._condition:
119
209
  packet = Packet(task, publisher_id, assignee_id)
120
210
  self._task_dict[packet.task.id] = packet
211
+ self._task_by_status[PacketStatus.SENT].add(packet.task.id)
212
+ self._task_by_assignee[assignee_id].append(packet.task.id)
121
213
  self._condition.notify_all()
122
214
 
123
215
  async def post_dependency(
@@ -130,6 +222,7 @@ class TaskChannel:
130
222
  dependency, publisher_id, status=PacketStatus.ARCHIVED
131
223
  )
132
224
  self._task_dict[packet.task.id] = packet
225
+ self._task_by_status[PacketStatus.ARCHIVED].add(packet.task.id)
133
226
  self._condition.notify_all()
134
227
 
135
228
  async def return_task(self, task_id: str) -> None:
@@ -138,7 +231,12 @@ class TaskChannel:
138
231
  async with self._condition:
139
232
  if task_id in self._task_dict:
140
233
  packet = self._task_dict[task_id]
141
- packet.status = PacketStatus.RETURNED
234
+ # Only add to publisher queue if not already returned
235
+ if packet.status != PacketStatus.RETURNED:
236
+ self._update_task_status(task_id, PacketStatus.RETURNED)
237
+ self._task_by_publisher[packet.publisher_id].append(
238
+ packet.task.id
239
+ )
142
240
  self._condition.notify_all()
143
241
 
144
242
  async def archive_task(self, task_id: str) -> None:
@@ -146,7 +244,17 @@ class TaskChannel:
146
244
  async with self._condition:
147
245
  if task_id in self._task_dict:
148
246
  packet = self._task_dict[task_id]
149
- packet.status = PacketStatus.ARCHIVED
247
+ # Remove from assignee queue before archiving
248
+ if (
249
+ packet.assignee_id
250
+ and packet.assignee_id in self._task_by_assignee
251
+ ):
252
+ assignee_queue = self._task_by_assignee[packet.assignee_id]
253
+ self._task_by_assignee[packet.assignee_id] = deque(
254
+ task for task in assignee_queue if task != task_id
255
+ )
256
+ # Update status (keeps in status index for dependencies)
257
+ self._update_task_status(task_id, PacketStatus.ARCHIVED)
150
258
  self._condition.notify_all()
151
259
 
152
260
  async def remove_task(self, task_id: str) -> None:
@@ -154,17 +262,55 @@ class TaskChannel:
154
262
  async with self._condition:
155
263
  # Check if task ID exists before removing
156
264
  if task_id in self._task_dict:
265
+ # Clean up all indexes before removing
266
+ self._cleanup_task_from_indexes(task_id)
157
267
  del self._task_dict[task_id]
158
268
  self._condition.notify_all()
159
269
 
160
270
  async def get_dependency_ids(self) -> List[str]:
161
271
  r"""Get the IDs of all dependencies in the channel."""
162
272
  async with self._condition:
163
- dependency_ids = []
164
- for task_id, packet in self._task_dict.items():
165
- if packet.status == PacketStatus.ARCHIVED:
166
- dependency_ids.append(task_id)
167
- return dependency_ids
273
+ return list(self._task_by_status[PacketStatus.ARCHIVED])
274
+
275
+ async def get_in_flight_tasks(self, publisher_id: str) -> List[Task]:
276
+ r"""Get all tasks that are currently in-flight (SENT, RETURNED
277
+ or PROCESSING) published by the given publisher.
278
+
279
+ Args:
280
+ publisher_id (str): The ID of the publisher whose
281
+ in-flight tasks to retrieve.
282
+
283
+ Returns:
284
+ List[Task]: List of tasks that are currently in-flight.
285
+ """
286
+ async with self._condition:
287
+ in_flight_tasks = []
288
+ seen_task_ids = set() # Track seen IDs for duplicate detection
289
+
290
+ # Get tasks with SENT, RETURNED or PROCESSING
291
+ # status published by this publisher
292
+ for status in [
293
+ PacketStatus.SENT,
294
+ PacketStatus.PROCESSING,
295
+ PacketStatus.RETURNED,
296
+ ]:
297
+ for task_id in self._task_by_status[status]:
298
+ if task_id in self._task_dict:
299
+ packet = self._task_dict[task_id]
300
+ if packet.publisher_id == publisher_id:
301
+ # Defensive check: detect if task appears in
302
+ # multiple status sets (should never happen)
303
+ if task_id in seen_task_ids:
304
+ logger.warning(
305
+ f"Task {task_id} found in multiple "
306
+ f"status sets. This indicates a bug in "
307
+ f"status management."
308
+ )
309
+ continue
310
+ in_flight_tasks.append(packet.task)
311
+ seen_task_ids.add(task_id)
312
+
313
+ return in_flight_tasks
168
314
 
169
315
  async def get_task_by_id(self, task_id: str) -> Task:
170
316
  r"""Get a task from the channel by its ID."""
@@ -11,10 +11,11 @@
11
11
  # See the License for the specific language governing permissions and
12
12
  # limitations under the License.
13
13
  # ========= Copyright 2023-2024 @ CAMEL-AI.org. All Rights Reserved. =========
14
+ from enum import Enum
14
15
  from functools import wraps
15
- from typing import Callable, List
16
+ from typing import Callable, List, Optional
16
17
 
17
- from pydantic import BaseModel, Field
18
+ from pydantic import BaseModel, Field, field_validator
18
19
 
19
20
 
20
21
  class WorkerConf(BaseModel):
@@ -37,7 +38,38 @@ class TaskResult(BaseModel):
37
38
 
38
39
  content: str = Field(description="The result of the task.")
39
40
  failed: bool = Field(
40
- description="Flag indicating whether the task processing failed."
41
+ default=False,
42
+ description="Flag indicating whether the task processing failed.",
43
+ )
44
+
45
+
46
+ class QualityEvaluation(BaseModel):
47
+ r"""Quality evaluation result for a completed task.
48
+
49
+ .. deprecated::
50
+ Use :class:`TaskAnalysisResult` instead. This class is kept for
51
+ backward compatibility.
52
+ """
53
+
54
+ quality_sufficient: bool = Field(
55
+ description="Whether the task result meets quality standards."
56
+ )
57
+ quality_score: int = Field(
58
+ description="Quality score from 0 to 100.", ge=0, le=100
59
+ )
60
+ issues: List[str] = Field(
61
+ default_factory=list,
62
+ description="List of quality issues found in the result.",
63
+ )
64
+ recovery_strategy: Optional[str] = Field(
65
+ default=None,
66
+ description="Recommended recovery strategy if quality is "
67
+ "insufficient: "
68
+ "'retry', 'reassign', 'replan', or 'decompose'.",
69
+ )
70
+ modified_task_content: Optional[str] = Field(
71
+ default=None,
72
+ description="Modified task content for replan strategy.",
41
73
  )
42
74
 
43
75
 
@@ -50,18 +82,145 @@ class TaskAssignment(BaseModel):
50
82
  )
51
83
  dependencies: List[str] = Field(
52
84
  default_factory=list,
53
- description="List of task IDs that must complete before this task.",
85
+ description="List of task IDs that must complete before this task. "
86
+ "This is critical for the task decomposition and "
87
+ "execution.",
54
88
  )
55
89
 
90
+ # Allow LLMs to output dependencies as a comma-separated string or empty
91
+ # string. This validator converts such cases into a list[str] so that
92
+ # downstream logic does not break with validation errors.
93
+ @staticmethod
94
+ def _split_and_strip(dep_str: str) -> List[str]:
95
+ r"""Utility to split a comma separated string and strip
96
+ whitespace."""
97
+ return [d.strip() for d in dep_str.split(',') if d.strip()]
98
+
99
+ @field_validator("dependencies", mode="before")
100
+ def validate_dependencies(cls, v) -> List[str]:
101
+ if v is None:
102
+ return []
103
+ # Handle empty string or comma-separated string from LLM
104
+ if isinstance(v, str):
105
+ return TaskAssignment._split_and_strip(v)
106
+ return v
107
+
56
108
 
57
109
  class TaskAssignResult(BaseModel):
58
- r"""The result of task assignment for both single and batch assignments."""
110
+ r"""The result of task assignment for both single and batch
111
+ assignments."""
59
112
 
60
113
  assignments: List[TaskAssignment] = Field(
61
114
  description="List of task assignments."
62
115
  )
63
116
 
64
117
 
118
+ class RecoveryStrategy(str, Enum):
119
+ r"""Strategies for handling failed tasks."""
120
+
121
+ RETRY = "retry"
122
+ REPLAN = "replan"
123
+ DECOMPOSE = "decompose"
124
+ CREATE_WORKER = "create_worker"
125
+ REASSIGN = "reassign"
126
+
127
+ def __str__(self):
128
+ return self.value
129
+
130
+ def __repr__(self):
131
+ return f"RecoveryStrategy.{self.name}"
132
+
133
+
134
+ class FailureContext(BaseModel):
135
+ r"""Context information about a task failure."""
136
+
137
+ task_id: str = Field(description="ID of the failed task")
138
+ task_content: str = Field(description="Content of the failed task")
139
+ failure_count: int = Field(
140
+ description="Number of times this task has failed"
141
+ )
142
+ error_message: str = Field(description="Detailed error message")
143
+ worker_id: Optional[str] = Field(
144
+ default=None, description="ID of the worker that failed"
145
+ )
146
+ task_depth: int = Field(
147
+ description="Depth of the task in the decomposition hierarchy"
148
+ )
149
+ additional_info: Optional[str] = Field(
150
+ default=None, description="Additional context about the task"
151
+ )
152
+
153
+
154
+ class TaskAnalysisResult(BaseModel):
155
+ r"""Unified result for task failure analysis and quality evaluation.
156
+
157
+ This model combines both failure recovery decisions and quality evaluation
158
+ results into a single structure. For failure analysis, only the recovery
159
+ strategy and reasoning fields are populated. For quality evaluation, all
160
+ fields including quality_score and issues are populated.
161
+ """
162
+
163
+ # Common fields - always populated
164
+ reasoning: str = Field(
165
+ description="Explanation for the analysis result or recovery "
166
+ "decision"
167
+ )
168
+
169
+ recovery_strategy: Optional[RecoveryStrategy] = Field(
170
+ default=None,
171
+ description="Recommended recovery strategy: 'retry', 'replan', "
172
+ "'decompose', 'create_worker', or 'reassign'. None indicates no "
173
+ "recovery needed (quality sufficient).",
174
+ )
175
+
176
+ modified_task_content: Optional[str] = Field(
177
+ default=None,
178
+ description="Modified task content if strategy requires replan",
179
+ )
180
+
181
+ # Quality-specific fields - populated only for quality evaluation
182
+ quality_score: Optional[int] = Field(
183
+ default=None,
184
+ description="Quality score from 0 to 100 (only for quality "
185
+ "evaluation). "
186
+ "None indicates this is a failure analysis, "
187
+ "not quality evaluation.",
188
+ ge=0,
189
+ le=100,
190
+ )
191
+
192
+ issues: List[str] = Field(
193
+ default_factory=list,
194
+ description="List of issues found. For failures: error details. "
195
+ "For quality evaluation: quality issues.",
196
+ )
197
+
198
+ @property
199
+ def is_quality_evaluation(self) -> bool:
200
+ r"""Check if this is a quality evaluation result.
201
+
202
+ Returns:
203
+ bool: True if this is a quality evaluation (has quality_score),
204
+ False if this is a failure analysis.
205
+ """
206
+ return self.quality_score is not None
207
+
208
+ @property
209
+ def quality_sufficient(self) -> bool:
210
+ r"""For quality evaluations, check if quality meets standards.
211
+
212
+ Returns:
213
+ bool: True if quality is sufficient (score >= 70 and no recovery
214
+ strategy recommended), False otherwise. Always False for
215
+ failure analysis results.
216
+ """
217
+ return (
218
+ self.quality_score is not None
219
+ and self.quality_score >= 70
220
+ and self.recovery_strategy is None
221
+ )
222
+
223
+
65
224
  def check_if_running(
66
225
  running: bool,
67
226
  max_retries: int = 3,
@@ -112,7 +271,7 @@ def check_if_running(
112
271
  if retries < max_retries:
113
272
  logger.warning(
114
273
  f"{error_msg} Retrying in {retry_delay}s... "
115
- f"(Attempt {retries+1}/{max_retries})"
274
+ f"(Attempt {retries + 1}/{max_retries})"
116
275
  )
117
276
  time.sleep(retry_delay)
118
277
  retries += 1
@@ -134,7 +293,7 @@ def check_if_running(
134
293
  logger.warning(
135
294
  f"Exception in {func.__name__}: {e}. "
136
295
  f"Retrying in {retry_delay}s... "
137
- f"(Attempt {retries+1}/{max_retries})"
296
+ f"(Attempt {retries + 1}/{max_retries})"
138
297
  )
139
298
  time.sleep(retry_delay)
140
299
  retries += 1
@@ -152,11 +311,19 @@ def check_if_running(
152
311
  # This should not be reached, but just in case
153
312
  if handle_exceptions:
154
313
  logger.error(
155
- f"Unexpected failure in {func.__name__}: {last_exception}"
314
+ f"Unexpected failure in {func.__name__}: "
315
+ f"{last_exception}"
156
316
  )
157
317
  return None
158
318
  else:
159
- raise last_exception
319
+ raise (
320
+ last_exception
321
+ if last_exception
322
+ else RuntimeError(
323
+ f"Unexpected failure in {func.__name__} "
324
+ "with no exception captured."
325
+ )
326
+ )
160
327
 
161
328
  return wrapper
162
329
 
@@ -13,9 +13,10 @@
13
13
  # ========= Copyright 2023-2024 @ CAMEL-AI.org. All Rights Reserved. =========
14
14
  from __future__ import annotations
15
15
 
16
+ import asyncio
16
17
  import logging
17
18
  from abc import ABC, abstractmethod
18
- from typing import List, Optional
19
+ from typing import List, Optional, Set
19
20
 
20
21
  from colorama import Fore
21
22
 
@@ -43,6 +44,7 @@ class Worker(BaseNode, ABC):
43
44
  node_id: Optional[str] = None,
44
45
  ) -> None:
45
46
  super().__init__(description, node_id=node_id)
47
+ self._active_task_ids: Set[str] = set()
46
48
 
47
49
  def __repr__(self):
48
50
  return f"Worker node {self.node_id} ({self.description})"
@@ -60,7 +62,7 @@ class Worker(BaseNode, ABC):
60
62
  pass
61
63
 
62
64
  async def _get_assigned_task(self) -> Task:
63
- r"""Get the task assigned to this node from the channel."""
65
+ r"""Get a task assigned to this node from the channel."""
64
66
  return await self._channel.get_assigned_task_by_assignee(self.node_id)
65
67
 
66
68
  @staticmethod
@@ -77,38 +79,90 @@ class Worker(BaseNode, ABC):
77
79
  def set_channel(self, channel: TaskChannel):
78
80
  self._channel = channel
79
81
 
80
- @check_if_running(False)
81
- async def _listen_to_channel(self):
82
- """Continuously listen to the channel, process the task that are
83
- assigned to this node, and update the result and status of the task.
84
-
85
- This method should be run in an event loop, as it will run
86
- indefinitely.
87
- """
88
- self._running = True
89
- logger.info(f"{self} started.")
90
-
91
- while True:
92
- # Get the earliest task assigned to this node
93
- task = await self._get_assigned_task()
82
+ async def _process_single_task(self, task: Task) -> None:
83
+ r"""Process a single task and handle its completion/failure."""
84
+ try:
85
+ self._active_task_ids.add(task.id)
94
86
  print(
95
87
  f"{Fore.YELLOW}{self} get task {task.id}: {task.content}"
96
88
  f"{Fore.RESET}"
97
89
  )
98
- # Get the Task instance of dependencies
99
- dependency_ids = await self._channel.get_dependency_ids()
100
- task_dependencies = [
101
- await self._channel.get_task_by_id(dep_id)
102
- for dep_id in dependency_ids
103
- ]
104
90
 
105
91
  # Process the task
106
- task_state = await self._process_task(task, task_dependencies)
92
+ task_state = await self._process_task(task, task.dependencies)
107
93
 
108
94
  # Update the result and status of the task
109
95
  task.set_state(task_state)
110
96
 
111
97
  await self._channel.return_task(task.id)
98
+ except Exception as e:
99
+ logger.error(f"Error processing task {task.id}: {e}")
100
+ # Store error information in task result
101
+ task.result = f"{type(e).__name__}: {e!s}"
102
+ task.set_state(TaskState.FAILED)
103
+ await self._channel.return_task(task.id)
104
+ finally:
105
+ self._active_task_ids.discard(task.id)
106
+
107
+ @check_if_running(False)
108
+ async def _listen_to_channel(self):
109
+ r"""Continuously listen to the channel and process assigned tasks.
110
+
111
+ This method supports parallel task execution without artificial limits.
112
+ """
113
+ self._running = True
114
+ logger.info(f"{self} started.")
115
+
116
+ # Keep track of running task coroutines
117
+ running_tasks: Set[asyncio.Task] = set()
118
+
119
+ while self._running:
120
+ try:
121
+ # Clean up completed tasks
122
+ completed_tasks = [t for t in running_tasks if t.done()]
123
+ for completed_task in completed_tasks:
124
+ running_tasks.remove(completed_task)
125
+ # Check for exceptions in completed tasks
126
+ try:
127
+ await completed_task
128
+ except Exception as e:
129
+ logger.error(f"Task processing failed: {e}")
130
+
131
+ # Try to get a new task (with short timeout to avoid blocking)
132
+ try:
133
+ task = await asyncio.wait_for(
134
+ self._get_assigned_task(), timeout=1.0
135
+ )
136
+
137
+ # Create and start processing task
138
+ task_coroutine = asyncio.create_task(
139
+ self._process_single_task(task)
140
+ )
141
+ running_tasks.add(task_coroutine)
142
+
143
+ except asyncio.TimeoutError:
144
+ # No tasks available, continue loop
145
+ if not running_tasks:
146
+ # No tasks running and none available, short sleep
147
+ await asyncio.sleep(0.1)
148
+ continue
149
+
150
+ except Exception as e:
151
+ logger.error(
152
+ f"Error in worker {self.node_id} listen loop: {e}"
153
+ )
154
+ await asyncio.sleep(0.1)
155
+ continue
156
+
157
+ # Wait for all remaining tasks to complete when stopping
158
+ if running_tasks:
159
+ logger.info(
160
+ f"{self} stopping, waiting for {len(running_tasks)} "
161
+ f"tasks to complete..."
162
+ )
163
+ await asyncio.gather(*running_tasks, return_exceptions=True)
164
+
165
+ logger.info(f"{self} stopped.")
112
166
 
113
167
  @check_if_running(False)
114
168
  async def start(self):