camel-ai 0.2.75a5__py3-none-any.whl → 0.2.76a0__py3-none-any.whl

This diff represents the content of publicly available package versions that have been released to one of the supported registries. The information contained in this diff is provided for informational purposes only and reflects changes between package versions as they appear in their respective public registries.

Potentially problematic release.


This version of camel-ai might be problematic. Click here for more details.

Files changed (47) hide show
  1. camel/__init__.py +1 -1
  2. camel/agents/chat_agent.py +298 -130
  3. camel/configs/__init__.py +6 -0
  4. camel/configs/amd_config.py +70 -0
  5. camel/configs/nebius_config.py +103 -0
  6. camel/interpreters/__init__.py +2 -0
  7. camel/interpreters/microsandbox_interpreter.py +395 -0
  8. camel/models/__init__.py +4 -0
  9. camel/models/amd_model.py +101 -0
  10. camel/models/model_factory.py +4 -0
  11. camel/models/nebius_model.py +83 -0
  12. camel/models/ollama_model.py +3 -3
  13. camel/models/openai_model.py +0 -6
  14. camel/runtimes/daytona_runtime.py +11 -12
  15. camel/societies/workforce/task_channel.py +120 -27
  16. camel/societies/workforce/workforce.py +35 -3
  17. camel/toolkits/__init__.py +5 -3
  18. camel/toolkits/code_execution.py +28 -1
  19. camel/toolkits/function_tool.py +6 -1
  20. camel/toolkits/github_toolkit.py +104 -17
  21. camel/toolkits/hybrid_browser_toolkit/config_loader.py +8 -0
  22. camel/toolkits/hybrid_browser_toolkit/hybrid_browser_toolkit.py +12 -0
  23. camel/toolkits/hybrid_browser_toolkit/hybrid_browser_toolkit_ts.py +33 -14
  24. camel/toolkits/hybrid_browser_toolkit/ts/src/browser-session.ts +135 -40
  25. camel/toolkits/hybrid_browser_toolkit/ts/src/config-loader.ts +2 -0
  26. camel/toolkits/hybrid_browser_toolkit/ts/src/hybrid-browser-toolkit.ts +43 -207
  27. camel/toolkits/hybrid_browser_toolkit/ts/src/parent-child-filter.ts +226 -0
  28. camel/toolkits/hybrid_browser_toolkit/ts/src/snapshot-parser.ts +231 -0
  29. camel/toolkits/hybrid_browser_toolkit/ts/src/som-screenshot-injected.ts +543 -0
  30. camel/toolkits/hybrid_browser_toolkit/ts/websocket-server.js +39 -6
  31. camel/toolkits/hybrid_browser_toolkit/ws_wrapper.py +248 -58
  32. camel/toolkits/hybrid_browser_toolkit_py/hybrid_browser_toolkit.py +5 -1
  33. camel/toolkits/{openai_image_toolkit.py → image_generation_toolkit.py} +98 -31
  34. camel/toolkits/math_toolkit.py +64 -10
  35. camel/toolkits/mcp_toolkit.py +39 -14
  36. camel/toolkits/minimax_mcp_toolkit.py +195 -0
  37. camel/toolkits/search_toolkit.py +13 -2
  38. camel/toolkits/terminal_toolkit.py +12 -2
  39. camel/toolkits/video_analysis_toolkit.py +16 -10
  40. camel/types/enums.py +42 -0
  41. camel/types/unified_model_type.py +5 -0
  42. camel/utils/commons.py +2 -0
  43. camel/utils/mcp.py +136 -2
  44. {camel_ai-0.2.75a5.dist-info → camel_ai-0.2.76a0.dist-info}/METADATA +5 -11
  45. {camel_ai-0.2.75a5.dist-info → camel_ai-0.2.76a0.dist-info}/RECORD +47 -38
  46. {camel_ai-0.2.75a5.dist-info → camel_ai-0.2.76a0.dist-info}/WHEEL +0 -0
  47. {camel_ai-0.2.75a5.dist-info → camel_ai-0.2.76a0.dist-info}/licenses/LICENSE +0 -0
@@ -0,0 +1,101 @@
1
+ # ========= Copyright 2023-2024 @ CAMEL-AI.org. All Rights Reserved. =========
2
+ # Licensed under the Apache License, Version 2.0 (the "License");
3
+ # you may not use this file except in compliance with the License.
4
+ # You may obtain a copy of the License at
5
+ #
6
+ # http://www.apache.org/licenses/LICENSE-2.0
7
+ #
8
+ # Unless required by applicable law or agreed to in writing, software
9
+ # distributed under the License is distributed on an "AS IS" BASIS,
10
+ # WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
11
+ # See the License for the specific language governing permissions and
12
+ # limitations under the License.
13
+ # ========= Copyright 2023-2024 @ CAMEL-AI.org. All Rights Reserved. =========
14
+
15
+ import os
16
+ from typing import Any, Dict, Optional, Union
17
+
18
+ from camel.configs import AMD_API_PARAMS, AMDConfig
19
+ from camel.models.openai_compatible_model import OpenAICompatibleModel
20
+ from camel.types import ModelType
21
+ from camel.utils import BaseTokenCounter, api_keys_required
22
+
23
+
24
+ class AMDModel(OpenAICompatibleModel):
25
+ r"""AMD API in a unified OpenAICompatibleModel interface.
26
+
27
+ Args:
28
+ model_type (Union[ModelType, str]): Model for which a backend is
29
+ created, one of AMD series.
30
+ model_config_dict (Optional[Dict[str, Any]], optional): A dictionary
31
+ that will be fed into:obj:`openai.ChatCompletion.create()`. If
32
+ :obj:`None`, :obj:`AMDConfig().as_dict()` will be used.
33
+ (default: :obj:`None`)
34
+ api_key (Optional[str], optional): The API key for authenticating with
35
+ the AMD service. (default: :obj:`None`)
36
+ url (Optional[str], optional): The url to the AMD service.
37
+ (default: :obj:`None`)
38
+ token_counter (Optional[BaseTokenCounter], optional): Token counter to
39
+ use for the model. If not provided, :obj:`OpenAITokenCounter(
40
+ ModelType.GPT_4)` will be used.
41
+ (default: :obj:`None`)
42
+ timeout (Optional[float], optional): The timeout value in seconds for
43
+ API calls. If not provided, will fall back to the MODEL_TIMEOUT
44
+ environment variable or default to 180 seconds.
45
+ (default: :obj:`None`)
46
+ max_retries (int, optional): Maximum number of retries for API calls.
47
+ (default: :obj:`3`)
48
+ **kwargs (Any): Additional arguments to pass to the client
49
+ initialization.
50
+ """
51
+
52
+ @api_keys_required(
53
+ [
54
+ ("api_key", "AMD_API_KEY"),
55
+ ]
56
+ )
57
+ def __init__(
58
+ self,
59
+ model_type: Union[ModelType, str],
60
+ model_config_dict: Optional[Dict[str, Any]] = None,
61
+ api_key: Optional[str] = None,
62
+ url: Optional[str] = None,
63
+ token_counter: Optional[BaseTokenCounter] = None,
64
+ timeout: Optional[float] = None,
65
+ max_retries: int = 3,
66
+ **kwargs: Any,
67
+ ) -> None:
68
+ if model_config_dict is None:
69
+ model_config_dict = AMDConfig().as_dict()
70
+ api_key = api_key or os.environ.get("AMD_API_KEY")
71
+ url = url or os.environ.get(
72
+ "AMD_API_BASE_URL", "https://llm-api.amd.com"
73
+ )
74
+ headers = {'Ocp-Apim-Subscription-Key': api_key}
75
+ kwargs["default_headers"] = headers
76
+ timeout = timeout or float(os.environ.get("MODEL_TIMEOUT", 180))
77
+ super().__init__(
78
+ model_type=model_type,
79
+ model_config_dict=model_config_dict,
80
+ api_key=api_key,
81
+ url=url,
82
+ token_counter=token_counter,
83
+ timeout=timeout,
84
+ max_retries=max_retries,
85
+ **kwargs,
86
+ )
87
+
88
+ def check_model_config(self):
89
+ r"""Check whether the model configuration contains any
90
+ unexpected arguments to AMD API.
91
+
92
+ Raises:
93
+ ValueError: If the model configuration dictionary contains any
94
+ unexpected arguments to AMD API.
95
+ """
96
+ for param in self.model_config_dict:
97
+ if param not in AMD_API_PARAMS:
98
+ raise ValueError(
99
+ f"Unexpected argument `{param}` is "
100
+ "input into AMD model backend."
101
+ )
@@ -16,6 +16,7 @@ import os
16
16
  from typing import ClassVar, Dict, Optional, Type, Union
17
17
 
18
18
  from camel.models.aiml_model import AIMLModel
19
+ from camel.models.amd_model import AMDModel
19
20
  from camel.models.anthropic_model import AnthropicModel
20
21
  from camel.models.aws_bedrock_model import AWSBedrockModel
21
22
  from camel.models.azure_openai_model import AzureOpenAIModel
@@ -31,6 +32,7 @@ from camel.models.lmstudio_model import LMStudioModel
31
32
  from camel.models.mistral_model import MistralModel
32
33
  from camel.models.modelscope_model import ModelScopeModel
33
34
  from camel.models.moonshot_model import MoonshotModel
35
+ from camel.models.nebius_model import NebiusModel
34
36
  from camel.models.netmind_model import NetmindModel
35
37
  from camel.models.novita_model import NovitaModel
36
38
  from camel.models.nvidia_model import NvidiaModel
@@ -76,6 +78,7 @@ class ModelFactory:
76
78
  ModelPlatformType.AWS_BEDROCK: AWSBedrockModel,
77
79
  ModelPlatformType.NVIDIA: NvidiaModel,
78
80
  ModelPlatformType.SILICONFLOW: SiliconFlowModel,
81
+ ModelPlatformType.AMD: AMDModel,
79
82
  ModelPlatformType.AIML: AIMLModel,
80
83
  ModelPlatformType.VOLCANO: VolcanoModel,
81
84
  ModelPlatformType.NETMIND: NetmindModel,
@@ -83,6 +86,7 @@ class ModelFactory:
83
86
  ModelPlatformType.AZURE: AzureOpenAIModel,
84
87
  ModelPlatformType.ANTHROPIC: AnthropicModel,
85
88
  ModelPlatformType.GROQ: GroqModel,
89
+ ModelPlatformType.NEBIUS: NebiusModel,
86
90
  ModelPlatformType.LMSTUDIO: LMStudioModel,
87
91
  ModelPlatformType.OPENROUTER: OpenRouterModel,
88
92
  ModelPlatformType.ZHIPU: ZhipuAIModel,
@@ -0,0 +1,83 @@
1
+ # ========= Copyright 2023-2024 @ CAMEL-AI.org. All Rights Reserved. =========
2
+ # Licensed under the Apache License, Version 2.0 (the "License");
3
+ # you may not use this file except in compliance with the License.
4
+ # You may obtain a copy of the License at
5
+ #
6
+ # http://www.apache.org/licenses/LICENSE-2.0
7
+ #
8
+ # Unless required by applicable law or agreed to in writing, software
9
+ # distributed under the License is distributed on an "AS IS" BASIS,
10
+ # WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
11
+ # See the License for the specific language governing permissions and
12
+ # limitations under the License.
13
+ # ========= Copyright 2023-2024 @ CAMEL-AI.org. All Rights Reserved. =========
14
+ import os
15
+ from typing import Any, Dict, Optional, Union
16
+
17
+ from camel.configs import NebiusConfig
18
+ from camel.models.openai_compatible_model import OpenAICompatibleModel
19
+ from camel.types import ModelType
20
+ from camel.utils import (
21
+ BaseTokenCounter,
22
+ api_keys_required,
23
+ )
24
+
25
+
26
+ class NebiusModel(OpenAICompatibleModel):
27
+ r"""LLM API served by Nebius AI Studio in a unified OpenAICompatibleModel
28
+ interface.
29
+
30
+ Args:
31
+ model_type (Union[ModelType, str]): Model for which a backend is
32
+ created.
33
+ model_config_dict (Optional[Dict[str, Any]], optional): A dictionary
34
+ that will be fed into:obj:`openai.ChatCompletion.create()`.
35
+ If:obj:`None`, :obj:`NebiusConfig().as_dict()` will be used.
36
+ (default: :obj:`None`)
37
+ api_key (Optional[str], optional): The API key for authenticating
38
+ with the Nebius AI Studio service. (default: :obj:`None`).
39
+ url (Optional[str], optional): The url to the Nebius AI Studio service.
40
+ (default: :obj:`None`)
41
+ token_counter (Optional[BaseTokenCounter], optional): Token counter to
42
+ use for the model. If not provided, :obj:`OpenAITokenCounter(
43
+ ModelType.GPT_4O_MINI)` will be used.
44
+ (default: :obj:`None`)
45
+ timeout (Optional[float], optional): The timeout value in seconds for
46
+ API calls. If not provided, will fall back to the MODEL_TIMEOUT
47
+ environment variable or default to 180 seconds.
48
+ (default: :obj:`None`)
49
+ max_retries (int, optional): Maximum number of retries for API calls.
50
+ (default: :obj:`3`)
51
+ **kwargs (Any): Additional arguments to pass to the client
52
+ initialization.
53
+ """
54
+
55
+ @api_keys_required([("api_key", "NEBIUS_API_KEY")])
56
+ def __init__(
57
+ self,
58
+ model_type: Union[ModelType, str],
59
+ model_config_dict: Optional[Dict[str, Any]] = None,
60
+ api_key: Optional[str] = None,
61
+ url: Optional[str] = None,
62
+ token_counter: Optional[BaseTokenCounter] = None,
63
+ timeout: Optional[float] = None,
64
+ max_retries: int = 3,
65
+ **kwargs: Any,
66
+ ) -> None:
67
+ if model_config_dict is None:
68
+ model_config_dict = NebiusConfig().as_dict()
69
+ api_key = api_key or os.environ.get("NEBIUS_API_KEY")
70
+ url = url or os.environ.get(
71
+ "NEBIUS_API_BASE_URL", "https://api.studio.nebius.com/v1"
72
+ )
73
+ timeout = timeout or float(os.environ.get("MODEL_TIMEOUT", 180))
74
+ super().__init__(
75
+ model_type=model_type,
76
+ model_config_dict=model_config_dict,
77
+ api_key=api_key,
78
+ url=url,
79
+ token_counter=token_counter,
80
+ timeout=timeout,
81
+ max_retries=max_retries,
82
+ **kwargs,
83
+ )
@@ -35,8 +35,8 @@ class OllamaModel(OpenAICompatibleModel):
35
35
  If:obj:`None`, :obj:`OllamaConfig().as_dict()` will be used.
36
36
  (default: :obj:`None`)
37
37
  api_key (Optional[str], optional): The API key for authenticating with
38
- the model service. Ollama doesn't need API key, it would be
39
- ignored if set. (default: :obj:`None`)
38
+ the model service. Required for Ollama cloud services. If not
39
+ provided, defaults to "Not_Provided". (default: :obj:`None`)
40
40
  url (Optional[str], optional): The url to the model service.
41
41
  (default: :obj:`None`)
42
42
  token_counter (Optional[BaseTokenCounter], optional): Token counter to
@@ -79,7 +79,7 @@ class OllamaModel(OpenAICompatibleModel):
79
79
  super().__init__(
80
80
  model_type=self._model_type,
81
81
  model_config_dict=model_config_dict,
82
- api_key="Not_Used",
82
+ api_key=api_key or "Not_Provided",
83
83
  url=self._url,
84
84
  token_counter=token_counter,
85
85
  timeout=timeout,
@@ -303,9 +303,6 @@ class OpenAIModel(BaseModelBackend):
303
303
  is_streaming = self.model_config_dict.get("stream", False)
304
304
 
305
305
  if response_format:
306
- result: Union[ChatCompletion, Stream[ChatCompletionChunk]] = (
307
- self._request_parse(messages, response_format, tools)
308
- )
309
306
  if is_streaming:
310
307
  # Use streaming parse for structured output
311
308
  return self._request_stream_parse(
@@ -377,9 +374,6 @@ class OpenAIModel(BaseModelBackend):
377
374
  is_streaming = self.model_config_dict.get("stream", False)
378
375
 
379
376
  if response_format:
380
- result: Union[
381
- ChatCompletion, AsyncStream[ChatCompletionChunk]
382
- ] = await self._arequest_parse(messages, response_format, tools)
383
377
  if is_streaming:
384
378
  # Use streaming parse for structured output
385
379
  return await self._arequest_stream_parse(
@@ -16,7 +16,7 @@ import inspect
16
16
  import json
17
17
  import os
18
18
  from functools import wraps
19
- from typing import Any, Dict, List, Optional, Union
19
+ from typing import Any, Callable, Dict, List, Optional, Union
20
20
 
21
21
  from pydantic import BaseModel
22
22
 
@@ -49,7 +49,7 @@ class DaytonaRuntime(BaseRuntime):
49
49
  api_url: Optional[str] = None,
50
50
  language: Optional[str] = "python",
51
51
  ):
52
- from daytona_sdk import Daytona, DaytonaConfig
52
+ from daytona_sdk import Daytona, DaytonaConfig, Sandbox
53
53
 
54
54
  super().__init__()
55
55
  self.api_key = api_key or os.environ.get('DAYTONA_API_KEY')
@@ -57,7 +57,7 @@ class DaytonaRuntime(BaseRuntime):
57
57
  self.language = language
58
58
  self.config = DaytonaConfig(api_key=self.api_key, api_url=self.api_url)
59
59
  self.daytona = Daytona(self.config)
60
- self.sandbox = None
60
+ self.sandbox: Optional[Sandbox] = None
61
61
  self.entrypoint: Dict[str, str] = dict()
62
62
 
63
63
  def build(self) -> "DaytonaRuntime":
@@ -66,10 +66,10 @@ class DaytonaRuntime(BaseRuntime):
66
66
  Returns:
67
67
  DaytonaRuntime: The current runtime.
68
68
  """
69
- from daytona_sdk import CreateSandboxParams
69
+ from daytona_sdk import CreateSandboxBaseParams
70
70
 
71
71
  try:
72
- params = CreateSandboxParams(language=self.language)
72
+ params = CreateSandboxBaseParams(language=self.language)
73
73
  self.sandbox = self.daytona.create(params)
74
74
  if self.sandbox is None:
75
75
  raise RuntimeError("Failed to create sandbox.")
@@ -83,7 +83,7 @@ class DaytonaRuntime(BaseRuntime):
83
83
  r"""Clean up the sandbox when exiting."""
84
84
  if self.sandbox:
85
85
  try:
86
- self.daytona.remove(self.sandbox)
86
+ self.daytona.delete(self.sandbox)
87
87
  logger.info(f"Sandbox {self.sandbox.id} removed")
88
88
  self.sandbox = None
89
89
  except Exception as e:
@@ -112,7 +112,7 @@ class DaytonaRuntime(BaseRuntime):
112
112
  if arguments is not None:
113
113
  entrypoint += json.dumps(arguments, ensure_ascii=False)
114
114
 
115
- def make_wrapper(inner_func, func_name, func_code):
115
+ def make_wrapper(inner_func: Callable, func_name: str, func_code: str):
116
116
  r"""Creates a wrapper for a function to execute it in the
117
117
  Daytona sandbox.
118
118
 
@@ -208,12 +208,11 @@ class DaytonaRuntime(BaseRuntime):
208
208
  RuntimeError: If the sandbox is not initialized.
209
209
  """
210
210
  if self.sandbox is None:
211
- raise RuntimeError("Failed to create sandbox.")
212
- info = self.sandbox.info()
211
+ raise RuntimeError("Sandbox not initialized.")
213
212
  return (
214
- f"Sandbox {info.name}:\n"
215
- f"State: {info.state}\n"
216
- f"Resources: {info.resources.cpu} CPU, {info.resources.memory} RAM"
213
+ f"Sandbox {self.sandbox.id}:\n"
214
+ f"State: {self.sandbox.state}\n"
215
+ f"Resources: {self.sandbox.cpu} CPU, {self.sandbox.memory} RAM"
217
216
  )
218
217
 
219
218
  def __del__(self):
@@ -12,8 +12,9 @@
12
12
  # limitations under the License.
13
13
  # ========= Copyright 2023-2024 @ CAMEL-AI.org. All Rights Reserved. =========
14
14
  import asyncio
15
+ from collections import defaultdict, deque
15
16
  from enum import Enum
16
- from typing import Dict, List, Optional
17
+ from typing import Dict, List, Optional, Set
17
18
 
18
19
  from camel.tasks import Task
19
20
 
@@ -79,27 +80,92 @@ class Packet:
79
80
 
80
81
 
81
82
  class TaskChannel:
82
- r"""An internal class used by Workforce to manage tasks."""
83
+ r"""An internal class used by Workforce to manage tasks.
84
+
85
+ This implementation uses a hybrid data structure approach:
86
+ - Hash map (_task_dict) for O(1) task lookup by ID
87
+ - Status-based index (_task_by_status) for efficient filtering by status
88
+ - Assignee/publisher queues for ordered task processing
89
+ """
83
90
 
84
91
  def __init__(self) -> None:
85
92
  self._condition = asyncio.Condition()
86
93
  self._task_dict: Dict[str, Packet] = {}
87
94
 
95
+ self._task_by_status: Dict[PacketStatus, Set[str]] = defaultdict(set)
96
+
97
+ # task by assignee store which are sent to
98
+ self._task_by_assignee: Dict[str, deque[str]] = defaultdict(deque)
99
+
100
+ self._task_by_publisher: Dict[str, deque[str]] = defaultdict(deque)
101
+
102
+ def _update_task_status(
103
+ self, task_id: str, new_status: PacketStatus
104
+ ) -> None:
105
+ r"""Helper method to properly update task status in all indexes."""
106
+ if task_id not in self._task_dict:
107
+ return
108
+
109
+ packet = self._task_dict[task_id]
110
+ old_status = packet.status
111
+
112
+ if old_status in self._task_by_status:
113
+ self._task_by_status[old_status].discard(task_id)
114
+
115
+ packet.status = new_status
116
+
117
+ self._task_by_status[new_status].add(task_id)
118
+
119
+ def _cleanup_task_from_indexes(self, task_id: str) -> None:
120
+ r"""Helper method to remove a task from all indexes.
121
+
122
+ Args:
123
+ task_id (str): The ID of the task to remove from indexes.
124
+ """
125
+ if task_id not in self._task_dict:
126
+ return
127
+
128
+ packet = self._task_dict[task_id]
129
+
130
+ if packet.status in self._task_by_status:
131
+ self._task_by_status[packet.status].discard(task_id)
132
+
133
+ if packet.assignee_id and packet.assignee_id in self._task_by_assignee:
134
+ assignee_queue = self._task_by_assignee[packet.assignee_id]
135
+ self._task_by_assignee[packet.assignee_id] = deque(
136
+ task for task in assignee_queue if task != task_id
137
+ )
138
+
139
+ if packet.publisher_id in self._task_by_publisher:
140
+ publisher_queue = self._task_by_publisher[packet.publisher_id]
141
+ self._task_by_publisher[packet.publisher_id] = deque(
142
+ task for task in publisher_queue if task != task_id
143
+ )
144
+
88
145
  async def get_returned_task_by_publisher(self, publisher_id: str) -> Task:
89
146
  r"""Get a task from the channel that has been returned by the
90
147
  publisher.
91
148
  """
92
149
  async with self._condition:
93
150
  while True:
94
- for task_id, packet in list(self._task_dict.items()):
95
- if packet.publisher_id != publisher_id:
96
- continue
97
- if packet.status != PacketStatus.RETURNED:
98
- continue
99
- # Remove the task to prevent returning it again
100
- del self._task_dict[task_id]
101
- self._condition.notify_all()
102
- return packet.task
151
+ task_ids = self._task_by_publisher[publisher_id]
152
+
153
+ if task_ids:
154
+ task_id = task_ids.popleft()
155
+
156
+ if task_id in self._task_dict:
157
+ packet = self._task_dict[task_id]
158
+
159
+ if (
160
+ packet.status == PacketStatus.RETURNED
161
+ and packet.publisher_id == publisher_id
162
+ ):
163
+ # Clean up all indexes before removing
164
+ self._cleanup_task_from_indexes(task_id)
165
+ del self._task_dict[task_id]
166
+ self._condition.notify_all()
167
+ return packet.task
168
+
103
169
  await self._condition.wait()
104
170
 
105
171
  async def get_assigned_task_by_assignee(self, assignee_id: str) -> Task:
@@ -109,15 +175,26 @@ class TaskChannel:
109
175
  """
110
176
  async with self._condition:
111
177
  while True:
112
- for packet in self._task_dict.values():
113
- if (
114
- packet.status == PacketStatus.SENT
115
- and packet.assignee_id == assignee_id
116
- ):
117
- # Atomically claim the task by changing its status
118
- packet.status = PacketStatus.PROCESSING
119
- self._condition.notify_all()
120
- return packet.task
178
+ task_ids = self._task_by_assignee.get(assignee_id, deque())
179
+
180
+ # Process all available tasks until we find a valid one
181
+ while task_ids:
182
+ task_id = task_ids.popleft()
183
+
184
+ if task_id in self._task_dict:
185
+ packet = self._task_dict[task_id]
186
+
187
+ if (
188
+ packet.status == PacketStatus.SENT
189
+ and packet.assignee_id == assignee_id
190
+ ):
191
+ # Use helper method to properly update status
192
+ self._update_task_status(
193
+ task_id, PacketStatus.PROCESSING
194
+ )
195
+ self._condition.notify_all()
196
+ return packet.task
197
+
121
198
  await self._condition.wait()
122
199
 
123
200
  async def post_task(
@@ -128,6 +205,8 @@ class TaskChannel:
128
205
  async with self._condition:
129
206
  packet = Packet(task, publisher_id, assignee_id)
130
207
  self._task_dict[packet.task.id] = packet
208
+ self._task_by_status[PacketStatus.SENT].add(packet.task.id)
209
+ self._task_by_assignee[assignee_id].append(packet.task.id)
131
210
  self._condition.notify_all()
132
211
 
133
212
  async def post_dependency(
@@ -140,6 +219,7 @@ class TaskChannel:
140
219
  dependency, publisher_id, status=PacketStatus.ARCHIVED
141
220
  )
142
221
  self._task_dict[packet.task.id] = packet
222
+ self._task_by_status[PacketStatus.ARCHIVED].add(packet.task.id)
143
223
  self._condition.notify_all()
144
224
 
145
225
  async def return_task(self, task_id: str) -> None:
@@ -148,7 +228,12 @@ class TaskChannel:
148
228
  async with self._condition:
149
229
  if task_id in self._task_dict:
150
230
  packet = self._task_dict[task_id]
151
- packet.status = PacketStatus.RETURNED
231
+ # Only add to publisher queue if not already returned
232
+ if packet.status != PacketStatus.RETURNED:
233
+ self._update_task_status(task_id, PacketStatus.RETURNED)
234
+ self._task_by_publisher[packet.publisher_id].append(
235
+ packet.task.id
236
+ )
152
237
  self._condition.notify_all()
153
238
 
154
239
  async def archive_task(self, task_id: str) -> None:
@@ -156,7 +241,17 @@ class TaskChannel:
156
241
  async with self._condition:
157
242
  if task_id in self._task_dict:
158
243
  packet = self._task_dict[task_id]
159
- packet.status = PacketStatus.ARCHIVED
244
+ # Remove from assignee queue before archiving
245
+ if (
246
+ packet.assignee_id
247
+ and packet.assignee_id in self._task_by_assignee
248
+ ):
249
+ assignee_queue = self._task_by_assignee[packet.assignee_id]
250
+ self._task_by_assignee[packet.assignee_id] = deque(
251
+ task for task in assignee_queue if task != task_id
252
+ )
253
+ # Update status (keeps in status index for dependencies)
254
+ self._update_task_status(task_id, PacketStatus.ARCHIVED)
160
255
  self._condition.notify_all()
161
256
 
162
257
  async def remove_task(self, task_id: str) -> None:
@@ -164,17 +259,15 @@ class TaskChannel:
164
259
  async with self._condition:
165
260
  # Check if task ID exists before removing
166
261
  if task_id in self._task_dict:
262
+ # Clean up all indexes before removing
263
+ self._cleanup_task_from_indexes(task_id)
167
264
  del self._task_dict[task_id]
168
265
  self._condition.notify_all()
169
266
 
170
267
  async def get_dependency_ids(self) -> List[str]:
171
268
  r"""Get the IDs of all dependencies in the channel."""
172
269
  async with self._condition:
173
- dependency_ids = []
174
- for task_id, packet in self._task_dict.items():
175
- if packet.status == PacketStatus.ARCHIVED:
176
- dependency_ids.append(task_id)
177
- return dependency_ids
270
+ return list(self._task_by_status[PacketStatus.ARCHIVED])
178
271
 
179
272
  async def get_task_by_id(self, task_id: str) -> Task:
180
273
  r"""Get a task from the channel by its ID."""
@@ -84,7 +84,7 @@ logger = get_logger(__name__)
84
84
  # Constants for configuration values
85
85
  MAX_TASK_RETRIES = 3
86
86
  MAX_PENDING_TASKS_LIMIT = 20
87
- TASK_TIMEOUT_SECONDS = 480.0
87
+ TASK_TIMEOUT_SECONDS = 600.0
88
88
  DEFAULT_WORKER_POOL_SIZE = 10
89
89
 
90
90
 
@@ -1497,6 +1497,9 @@ class Workforce(BaseNode):
1497
1497
  start_coroutine, self._loop
1498
1498
  )
1499
1499
  self._child_listening_tasks.append(child_task)
1500
+ else:
1501
+ # Close the coroutine to prevent RuntimeWarning
1502
+ start_coroutine.close()
1500
1503
  else:
1501
1504
  # Close the coroutine to prevent RuntimeWarning
1502
1505
  start_coroutine.close()
@@ -2310,6 +2313,9 @@ class Workforce(BaseNode):
2310
2313
  r"""Get the task that's published by this node and just get returned
2311
2314
  from the assignee. Includes timeout handling to prevent indefinite
2312
2315
  waiting.
2316
+
2317
+ Raises:
2318
+ asyncio.TimeoutError: If waiting for task exceeds timeout
2313
2319
  """
2314
2320
  try:
2315
2321
  # Add timeout to prevent indefinite waiting
@@ -2317,6 +2323,17 @@ class Workforce(BaseNode):
2317
2323
  self._channel.get_returned_task_by_publisher(self.node_id),
2318
2324
  timeout=self.task_timeout_seconds,
2319
2325
  )
2326
+ except asyncio.TimeoutError:
2327
+ # Re-raise timeout errors to be handled by caller
2328
+ # This prevents hanging when tasks are stuck
2329
+ logger.warning(
2330
+ f"Timeout waiting for task return in workforce "
2331
+ f"{self.node_id}. "
2332
+ f"Timeout: {self.task_timeout_seconds}s, "
2333
+ f"Pending tasks: {len(self._pending_tasks)}, "
2334
+ f"In-flight tasks: {self._in_flight_tasks}"
2335
+ )
2336
+ raise
2320
2337
  except Exception as e:
2321
2338
  error_msg = (
2322
2339
  f"Error getting returned task {e} in "
@@ -2835,9 +2852,24 @@ class Workforce(BaseNode):
2835
2852
  self._last_snapshot_time = time.time()
2836
2853
 
2837
2854
  # Get returned task
2838
- returned_task = await self._get_returned_task()
2855
+ try:
2856
+ returned_task = await self._get_returned_task()
2857
+ except asyncio.TimeoutError:
2858
+ # Handle timeout - check if we have tasks stuck in flight
2859
+ if self._in_flight_tasks > 0:
2860
+ logger.warning(
2861
+ f"Timeout waiting for {self._in_flight_tasks} "
2862
+ f"in-flight tasks. Breaking to prevent hanging."
2863
+ )
2864
+ # Break the loop to prevent indefinite hanging
2865
+ # The finally block will handle cleanup
2866
+ break
2867
+ else:
2868
+ # No tasks in flight, safe to continue
2869
+ await self._post_ready_tasks()
2870
+ continue
2839
2871
 
2840
- # If no task was returned, continue
2872
+ # If no task was returned (other errors), continue
2841
2873
  if returned_task is None:
2842
2874
  logger.debug(
2843
2875
  f"No task returned in workforce {self.node_id}. "
@@ -23,7 +23,7 @@ from .open_api_specs.security_config import openapi_security_config
23
23
  from .math_toolkit import MathToolkit
24
24
  from .search_toolkit import SearchToolkit
25
25
  from .weather_toolkit import WeatherToolkit
26
- from .openai_image_toolkit import OpenAIImageToolkit
26
+ from .image_generation_toolkit import ImageGenToolkit, OpenAIImageToolkit
27
27
  from .ask_news_toolkit import AskNewsToolkit, AsyncAskNewsToolkit
28
28
  from .linkedin_toolkit import LinkedInToolkit
29
29
  from .reddit_toolkit import RedditToolkit
@@ -88,6 +88,7 @@ from .web_deploy_toolkit import WebDeployToolkit
88
88
  from .screenshot_toolkit import ScreenshotToolkit
89
89
  from .message_integration import ToolkitMessageIntegration
90
90
  from .notion_mcp_toolkit import NotionMCPToolkit
91
+ from .minimax_mcp_toolkit import MinimaxMCPToolkit
91
92
 
92
93
  __all__ = [
93
94
  'BaseToolkit',
@@ -102,7 +103,7 @@ __all__ = [
102
103
  'SearchToolkit',
103
104
  'SlackToolkit',
104
105
  'WhatsAppToolkit',
105
- 'OpenAIImageToolkit',
106
+ 'ImageGenToolkit',
106
107
  'TwitterToolkit',
107
108
  'WeatherToolkit',
108
109
  'RetrievalToolkit',
@@ -151,7 +152,7 @@ __all__ = [
151
152
  'PlaywrightMCPToolkit',
152
153
  'WolframAlphaToolkit',
153
154
  'BohriumToolkit',
154
- 'OpenAIImageToolkit',
155
+ 'OpenAIImageToolkit', # Backward compatibility
155
156
  'TaskPlanningToolkit',
156
157
  'HybridBrowserToolkit',
157
158
  'EdgeOnePagesMCPToolkit',
@@ -165,4 +166,5 @@ __all__ = [
165
166
  'RegisteredAgentToolkit',
166
167
  'ToolkitMessageIntegration',
167
168
  'NotionMCPToolkit',
169
+ 'MinimaxMCPToolkit',
168
170
  ]