camel-ai 0.2.76a9__py3-none-any.whl → 0.2.76a12__py3-none-any.whl

This diff represents the content of publicly available package versions that have been released to one of the supported registries. The information contained in this diff is provided for informational purposes only and reflects changes between package versions as they appear in their respective public registries.

Potentially problematic release.


This version of camel-ai might be problematic. Click here for more details.

@@ -277,6 +277,7 @@ class Workforce(BaseNode):
277
277
  self._pause_event = asyncio.Event()
278
278
  self._pause_event.set() # Initially not paused
279
279
  self._stop_requested = False
280
+ self._skip_requested = False
280
281
  self._snapshots: List[WorkforceSnapshot] = []
281
282
  self._completed_tasks: List[Task] = []
282
283
  self._loop: Optional[asyncio.AbstractEventLoop] = None
@@ -449,10 +450,6 @@ class Workforce(BaseNode):
449
450
  "better context continuity during task handoffs."
450
451
  )
451
452
 
452
- # ------------------------------------------------------------------
453
- # Helper for propagating pause control to externally supplied agents
454
- # ------------------------------------------------------------------
455
-
456
453
  def _validate_agent_compatibility(
457
454
  self, agent: ChatAgent, agent_context: str = "agent"
458
455
  ) -> None:
@@ -489,6 +486,9 @@ class Workforce(BaseNode):
489
486
  "the Workforce."
490
487
  )
491
488
 
489
+ # ------------------------------------------------------------------
490
+ # Helper for propagating pause control to externally supplied agents
491
+ # ------------------------------------------------------------------
492
492
  def _attach_pause_event_to_agent(self, agent: ChatAgent) -> None:
493
493
  r"""Ensure the given ChatAgent shares this workforce's pause_event.
494
494
 
@@ -981,6 +981,39 @@ class Workforce(BaseNode):
981
981
  f"(event-loop not yet started)."
982
982
  )
983
983
 
984
+ async def _async_skip_gracefully(self) -> None:
985
+ r"""Async implementation of skip_gracefully to run on the event
986
+ loop.
987
+ """
988
+ self._skip_requested = True
989
+ if self._pause_event.is_set() is False:
990
+ self._pause_event.set() # Resume if paused to process skip
991
+ logger.info(f"Workforce {self.node_id} skip requested.")
992
+
993
+ def skip_gracefully(self) -> None:
994
+ r"""Request workforce to skip current pending tasks and move to next
995
+ main task from the queue. If no main tasks exist, acts like
996
+ stop_gracefully.
997
+
998
+ This method clears the current pending subtasks and moves to the next
999
+ main task in the queue if available. Works both when the internal
1000
+ event-loop is alive and when it has not yet been started.
1001
+ """
1002
+
1003
+ if self._loop and not self._loop.is_closed():
1004
+ self._submit_coro_to_loop(self._async_skip_gracefully())
1005
+ else:
1006
+ # Loop not yet created, set the flag synchronously so later
1007
+ # startup will respect it.
1008
+ self._skip_requested = True
1009
+ # Ensure any pending pause is released so that when the loop does
1010
+ # start it can see the skip request and exit.
1011
+ self._pause_event.set()
1012
+ logger.info(
1013
+ f"Workforce {self.node_id} skip requested "
1014
+ f"(event-loop not yet started)."
1015
+ )
1016
+
984
1017
  def save_snapshot(self, description: str = "") -> None:
985
1018
  r"""Save current state as a snapshot."""
986
1019
  snapshot = WorkforceSnapshot(
@@ -1040,36 +1073,148 @@ class Workforce(BaseNode):
1040
1073
  content: str,
1041
1074
  task_id: Optional[str] = None,
1042
1075
  additional_info: Optional[Dict[str, Any]] = None,
1076
+ as_subtask: bool = False,
1043
1077
  insert_position: int = -1,
1044
1078
  ) -> Task:
1045
- r"""Add a new task to the pending queue."""
1046
- new_task = Task(
1079
+ r"""Add a new task to the workforce.
1080
+
1081
+ By default, this method adds a main task that will be decomposed into
1082
+ subtasks. Set `as_subtask=True` to add a task directly to the pending
1083
+ subtask queue without decomposition.
1084
+
1085
+ Args:
1086
+ content (str): The content of the task.
1087
+ task_id (Optional[str], optional): Optional ID for the task.
1088
+ If not provided, a unique ID will be generated.
1089
+ additional_info (Optional[Dict[str, Any]], optional): Optional
1090
+ additional metadata for the task.
1091
+ as_subtask (bool, optional): If True, adds the task directly to
1092
+ the pending subtask queue. If False, adds as a main task that
1093
+ will be decomposed. Defaults to False.
1094
+ insert_position (int, optional): Position to insert the task in
1095
+ the pending queue. Only applies when as_subtask=True.
1096
+ Defaults to -1 (append to end).
1097
+
1098
+ Returns:
1099
+ Task: The created task object.
1100
+ """
1101
+ if as_subtask:
1102
+ new_task = Task(
1103
+ content=content,
1104
+ id=task_id or f"human_added_{len(self._pending_tasks)}",
1105
+ additional_info=additional_info,
1106
+ )
1107
+
1108
+ # Add directly to current pending subtasks
1109
+ if insert_position == -1:
1110
+ self._pending_tasks.append(new_task)
1111
+ else:
1112
+ # Convert deque to list, insert, then back to deque
1113
+ tasks_list = list(self._pending_tasks)
1114
+ tasks_list.insert(insert_position, new_task)
1115
+ self._pending_tasks = deque(tasks_list)
1116
+
1117
+ logger.info(f"New subtask added to pending queue: {new_task.id}")
1118
+ return new_task
1119
+ else:
1120
+ # Add as main task that needs decomposition
1121
+ # Use additional_info to mark this task needs decomposition
1122
+ # Make a copy to avoid modifying user's dict
1123
+ info = additional_info.copy() if additional_info else {}
1124
+ info['_needs_decomposition'] = True
1125
+
1126
+ task_count = sum(
1127
+ 1
1128
+ for t in self._pending_tasks
1129
+ if t.additional_info
1130
+ and t.additional_info.get('_needs_decomposition')
1131
+ )
1132
+
1133
+ new_task = Task(
1134
+ content=content,
1135
+ id=task_id or f"main_task_{task_count}",
1136
+ additional_info=info,
1137
+ )
1138
+
1139
+ self._pending_tasks.append(new_task)
1140
+ logger.info(f"New main task added to pending queue: {new_task.id}")
1141
+ return new_task
1142
+
1143
+ def add_main_task(
1144
+ self,
1145
+ content: str,
1146
+ task_id: Optional[str] = None,
1147
+ additional_info: Optional[Dict[str, Any]] = None,
1148
+ ) -> Task:
1149
+ r"""Add a new main task that will be decomposed into subtasks.
1150
+
1151
+ This is an alias for :meth:`add_task` with `as_subtask=False`.
1152
+
1153
+ Args:
1154
+ content (str): The content of the main task.
1155
+ task_id (Optional[str], optional): Optional ID for the task.
1156
+ additional_info (Optional[Dict[str, Any]], optional): Optional
1157
+ additional metadata.
1158
+
1159
+ Returns:
1160
+ Task: The created main task object.
1161
+ """
1162
+ return self.add_task(
1047
1163
  content=content,
1048
- id=task_id or f"human_added_{len(self._pending_tasks)}",
1164
+ task_id=task_id,
1049
1165
  additional_info=additional_info,
1166
+ as_subtask=False,
1050
1167
  )
1051
- if insert_position == -1:
1052
- self._pending_tasks.append(new_task)
1053
- else:
1054
- # Convert deque to list, insert, then back to deque
1055
- tasks_list = list(self._pending_tasks)
1056
- tasks_list.insert(insert_position, new_task)
1057
- self._pending_tasks = deque(tasks_list)
1058
1168
 
1059
- logger.info(f"New task added: {new_task.id}")
1060
- return new_task
1169
+ def add_subtask(
1170
+ self,
1171
+ content: str,
1172
+ task_id: Optional[str] = None,
1173
+ additional_info: Optional[Dict[str, Any]] = None,
1174
+ insert_position: int = -1,
1175
+ ) -> Task:
1176
+ r"""Add a new subtask to the current pending queue.
1177
+
1178
+ This is an alias for :meth:`add_task` with `as_subtask=True`.
1179
+
1180
+ Args:
1181
+ content (str): The content of the subtask.
1182
+ task_id (Optional[str], optional): Optional ID for the task.
1183
+ additional_info (Optional[Dict[str, Any]], optional): Optional
1184
+ additional metadata.
1185
+ insert_position (int, optional): Position to insert the task.
1186
+ Defaults to -1 (append to end).
1187
+
1188
+ Returns:
1189
+ Task: The created subtask object.
1190
+ """
1191
+ return self.add_task(
1192
+ content=content,
1193
+ task_id=task_id,
1194
+ additional_info=additional_info,
1195
+ as_subtask=True,
1196
+ insert_position=insert_position,
1197
+ )
1061
1198
 
1062
1199
  def remove_task(self, task_id: str) -> bool:
1063
- r"""Remove a task from the pending queue."""
1064
- # Convert to list to find and remove
1065
- tasks_list = list(self._pending_tasks)
1066
- for i, task in enumerate(tasks_list):
1200
+ r"""Remove a task from the pending queue or main task queue.
1201
+
1202
+ Args:
1203
+ task_id (str): The ID of the task to remove.
1204
+
1205
+ Returns:
1206
+ bool: True if task was found and removed, False otherwise.
1207
+ """
1208
+ # Check main task queue first
1209
+ pending_tasks_list = list(self._pending_tasks)
1210
+ for i, task in enumerate(pending_tasks_list):
1067
1211
  if task.id == task_id:
1068
- tasks_list.pop(i)
1069
- self._pending_tasks = deque(tasks_list)
1070
- logger.info(f"Task {task_id} removed.")
1212
+ pending_tasks_list.pop(i)
1213
+ self._pending_tasks = deque(pending_tasks_list)
1214
+ logger.info(f"Task {task_id} removed from pending queue.")
1071
1215
  return True
1072
- logger.warning(f"Task {task_id} not found in pending tasks.")
1216
+
1217
+ logger.warning(f"Task {task_id} not found in any task queue.")
1073
1218
  return False
1074
1219
 
1075
1220
  def reorder_tasks(self, task_ids: List[str]) -> bool:
@@ -1184,26 +1329,21 @@ class Workforce(BaseNode):
1184
1329
  "main_task_id": self._task.id if self._task else None,
1185
1330
  }
1186
1331
 
1187
- @check_if_running(False)
1188
- async def process_task_async(
1189
- self, task: Task, interactive: bool = False
1190
- ) -> Task:
1191
- r"""Main entry point to process a task asynchronously.
1332
+ async def handle_decompose_append_task(
1333
+ self, task: Task, reset: bool = True
1334
+ ) -> List[Task]:
1335
+ r"""Handle task decomposition and validation with
1336
+ workforce environment functions. Then append to
1337
+ pending tasks if decomposition happened.
1192
1338
 
1193
1339
  Args:
1194
1340
  task (Task): The task to be processed.
1195
- interactive (bool, optional): If True, enables human-intervention
1196
- workflow (pause/resume/snapshot). Defaults to False, which
1197
- runs the task in a blocking one-shot manner.
1341
+ reset (Bool): Should trigger workforce reset (Workforce must not
1342
+ be running). Default: True
1198
1343
 
1199
1344
  Returns:
1200
- Task: The updated task.
1345
+ List[Task]: The decomposed subtasks or the original task.
1201
1346
  """
1202
- # Delegate to intervention pipeline when requested to keep
1203
- # backward-compat.
1204
- if interactive:
1205
- return await self._process_task_with_snapshot(task)
1206
-
1207
1347
  if not validate_task_content(task.content, task.id):
1208
1348
  task.state = TaskState.FAILED
1209
1349
  task.result = "Task failed: Invalid or empty content provided"
@@ -1211,10 +1351,16 @@ class Workforce(BaseNode):
1211
1351
  f"Task {task.id} rejected: Invalid or empty content. "
1212
1352
  f"Content preview: '{task.content}'"
1213
1353
  )
1214
- return task
1354
+ return [task]
1355
+
1356
+ if reset and self._state != WorkforceState.RUNNING:
1357
+ self.reset()
1358
+ logger.info("Workforce reset before handling task.")
1215
1359
 
1216
- self.reset()
1360
+ # Focus on the new task
1217
1361
  self._task = task
1362
+ task.state = TaskState.FAILED
1363
+
1218
1364
  if self.metrics_logger:
1219
1365
  self.metrics_logger.log_task_created(
1220
1366
  task_id=task.id,
@@ -1222,7 +1368,6 @@ class Workforce(BaseNode):
1222
1368
  task_type=task.type,
1223
1369
  metadata=task.additional_info,
1224
1370
  )
1225
- task.state = TaskState.FAILED
1226
1371
  # The agent tend to be overconfident on the whole task, so we
1227
1372
  # decompose the task into subtasks first
1228
1373
  subtasks_result = self._decompose_task(task)
@@ -1248,14 +1393,40 @@ class Workforce(BaseNode):
1248
1393
  task_type=subtask.type,
1249
1394
  metadata=subtask.additional_info,
1250
1395
  )
1396
+
1251
1397
  if subtasks:
1252
- # If decomposition happened, the original task becomes a container.
1253
- # We only execute its subtasks.
1398
+ # _pending_tasks will contain both undecomposed
1399
+ # and decomposed tasks, so we use additional_info
1400
+ # to mark the tasks that need decomposition instead
1254
1401
  self._pending_tasks.extendleft(reversed(subtasks))
1255
1402
  else:
1256
1403
  # If no decomposition, execute the original task.
1257
1404
  self._pending_tasks.append(task)
1258
1405
 
1406
+ return subtasks
1407
+
1408
+ @check_if_running(False)
1409
+ async def process_task_async(
1410
+ self, task: Task, interactive: bool = False
1411
+ ) -> Task:
1412
+ r"""Main entry point to process a task asynchronously.
1413
+
1414
+ Args:
1415
+ task (Task): The task to be processed.
1416
+ interactive (bool, optional): If True, enables human-intervention
1417
+ workflow (pause/resume/snapshot). Defaults to False, which
1418
+ runs the task in a blocking one-shot manner.
1419
+
1420
+ Returns:
1421
+ Task: The updated task.
1422
+ """
1423
+ # Delegate to intervention pipeline when requested to keep
1424
+ # backward-compat.
1425
+ if interactive:
1426
+ return await self._process_task_with_snapshot(task)
1427
+
1428
+ subtasks = await self.handle_decompose_append_task(task)
1429
+
1259
1430
  self.set_channel(TaskChannel())
1260
1431
 
1261
1432
  await self.start()
@@ -1337,39 +1508,8 @@ class Workforce(BaseNode):
1337
1508
  Task: The updated task.
1338
1509
  """
1339
1510
 
1340
- if not validate_task_content(task.content, task.id):
1341
- task.state = TaskState.FAILED
1342
- task.result = "Task failed: Invalid or empty content provided"
1343
- logger.warning(
1344
- f"Task {task.id} rejected: Invalid or empty content. "
1345
- f"Content preview: '{task.content}'"
1346
- )
1347
- return task
1348
-
1349
- self.reset()
1350
- self._task = task
1351
- self._state = WorkforceState.RUNNING
1352
- task.state = TaskState.FAILED # TODO: Add logic for OPEN
1353
-
1354
- # Decompose the task into subtasks first
1355
- subtasks_result = self._decompose_task(task)
1511
+ await self.handle_decompose_append_task(task)
1356
1512
 
1357
- # Handle both streaming and non-streaming results
1358
- if isinstance(subtasks_result, Generator):
1359
- # This is a generator (streaming mode)
1360
- subtasks = []
1361
- for new_tasks in subtasks_result:
1362
- subtasks.extend(new_tasks)
1363
- else:
1364
- # This is a regular list (non-streaming mode)
1365
- subtasks = subtasks_result
1366
- if subtasks:
1367
- # If decomposition happened, the original task becomes a container.
1368
- # We only execute its subtasks.
1369
- self._pending_tasks.extendleft(reversed(subtasks))
1370
- else:
1371
- # If no decomposition, execute the original task.
1372
- self._pending_tasks.append(task)
1373
1513
  self.set_channel(TaskChannel())
1374
1514
 
1375
1515
  # Save initial snapshot
@@ -1710,6 +1850,7 @@ class Workforce(BaseNode):
1710
1850
  # Reset intervention state
1711
1851
  self._state = WorkforceState.IDLE
1712
1852
  self._stop_requested = False
1853
+ self._skip_requested = False
1713
1854
  # Handle asyncio.Event in a thread-safe way
1714
1855
  if self._loop and not self._loop.is_closed():
1715
1856
  # If we have a loop, use it to set the event safely
@@ -2412,7 +2553,15 @@ class Workforce(BaseNode):
2412
2553
  tasks_to_assign = [
2413
2554
  task
2414
2555
  for task in self._pending_tasks
2415
- if task.id not in self._task_dependencies
2556
+ if (
2557
+ task.id not in self._task_dependencies
2558
+ and (
2559
+ task.additional_info is None
2560
+ or not task.additional_info.get(
2561
+ "_needs_decomposition", False
2562
+ )
2563
+ )
2564
+ )
2416
2565
  ]
2417
2566
  if tasks_to_assign:
2418
2567
  logger.debug(
@@ -2448,6 +2597,27 @@ class Workforce(BaseNode):
2448
2597
  for task in self._pending_tasks:
2449
2598
  # A task must be assigned to be considered for posting
2450
2599
  if task.id in self._task_dependencies:
2600
+ # Skip if task has already been posted to prevent duplicates
2601
+ try:
2602
+ task_from_channel = await self._channel.get_task_by_id(
2603
+ task.id
2604
+ )
2605
+ # Check if task is already assigned to a worker
2606
+ if (
2607
+ task_from_channel
2608
+ and task_from_channel.assigned_worker_id
2609
+ ):
2610
+ logger.debug(
2611
+ f"Task {task.id} already assigned to "
2612
+ f"{task_from_channel.assigned_worker_id}, "
2613
+ f"skipping to prevent duplicate"
2614
+ )
2615
+ continue
2616
+ except Exception as e:
2617
+ logger.info(
2618
+ f"Task {task.id} non existent in channel. "
2619
+ f"Assigning task: {e}"
2620
+ )
2451
2621
  dependencies = self._task_dependencies[task.id]
2452
2622
  # Check if all dependencies for this task are in the completed
2453
2623
  # set and their state is DONE
@@ -2870,6 +3040,124 @@ class Workforce(BaseNode):
2870
3040
  # Use logger.info or print, consistent with existing style
2871
3041
  logger.info(f"Workforce logs dumped to {file_path}")
2872
3042
 
3043
+ async def _handle_skip_task(self) -> bool:
3044
+ r"""Handle skip request by marking pending and in-flight tasks
3045
+ as completed.
3046
+
3047
+ Returns:
3048
+ bool: True if workforce should stop (no independent tasks),
3049
+ False to continue.
3050
+ """
3051
+ logger.info("Skip requested, processing skip logic.")
3052
+
3053
+ # Mark all pending tasks as completed instead of just clearing
3054
+ pending_tasks_to_complete = list(self._pending_tasks)
3055
+ if pending_tasks_to_complete:
3056
+ logger.info(
3057
+ f"Marking {len(pending_tasks_to_complete)} pending tasks "
3058
+ f"as completed."
3059
+ )
3060
+ for task in pending_tasks_to_complete:
3061
+ # Don't remove tasks that need decomposition
3062
+ if task.additional_info and task.additional_info.get(
3063
+ '_needs_decomposition', False
3064
+ ):
3065
+ continue
3066
+ # Set task state to DONE and add a completion message
3067
+ task.state = TaskState.DONE
3068
+ task.result = "Task marked as completed due to skip request"
3069
+
3070
+ # Use the existing handle completed task function
3071
+ await self._handle_completed_task(task)
3072
+
3073
+ # Handle in-flight tasks if they exist
3074
+ if self._in_flight_tasks > 0:
3075
+ logger.info(
3076
+ f"Found {self._in_flight_tasks} in-flight tasks. "
3077
+ f"Retrieving and completing them."
3078
+ )
3079
+ try:
3080
+ # Get all in-flight tasks for this publisher from the channel
3081
+ in_flight_tasks = await self._channel.get_in_flight_tasks(
3082
+ self.node_id
3083
+ )
3084
+ logger.info(
3085
+ f"Retrieved {len(in_flight_tasks)} in-flight "
3086
+ f"tasks from channel."
3087
+ )
3088
+
3089
+ for task in in_flight_tasks:
3090
+ # Set task state to DONE and add a completion message
3091
+ task.state = TaskState.DONE
3092
+ task.result = (
3093
+ "Task marked as completed due to skip request"
3094
+ )
3095
+
3096
+ # Remove the task from the channel to avoid hanging
3097
+ await self._channel.remove_task(task.id)
3098
+
3099
+ # Decrement in-flight counter
3100
+ self._decrement_in_flight_tasks(
3101
+ task.id, "skip request - removed from channel"
3102
+ )
3103
+
3104
+ # Handle as completed task to update dependencies
3105
+ await self._handle_completed_task(task)
3106
+
3107
+ logger.info(
3108
+ f"Completed in-flight task {task.id} due "
3109
+ f"to skip request."
3110
+ )
3111
+
3112
+ except Exception as e:
3113
+ logger.error(
3114
+ f"Error handling in-flight tasks during skip: {e}",
3115
+ exc_info=True,
3116
+ )
3117
+ # Reset in-flight counter to prevent hanging
3118
+ self._in_flight_tasks = 0
3119
+
3120
+ # Check if there are any pending tasks (including those needing
3121
+ # decomposition)
3122
+ if self._pending_tasks:
3123
+ # Check if the first pending task needs decomposition
3124
+ next_task = self._pending_tasks[0]
3125
+ if next_task.additional_info and next_task.additional_info.get(
3126
+ '_needs_decomposition'
3127
+ ):
3128
+ logger.info(
3129
+ f"Decomposing main task {next_task.id} after skip request."
3130
+ )
3131
+ try:
3132
+ # Remove the decomposition flag to avoid re-decomposition
3133
+ next_task.additional_info['_needs_decomposition'] = False
3134
+
3135
+ # Decompose the task and append subtasks to _pending_tasks
3136
+ await self.handle_decompose_append_task(
3137
+ next_task, reset=False
3138
+ )
3139
+
3140
+ # Mark the main task as completed and remove from pending
3141
+ await self._handle_completed_task(next_task)
3142
+ logger.info(
3143
+ f"Main task {next_task.id} decomposed after "
3144
+ f"skip request"
3145
+ )
3146
+ except Exception as e:
3147
+ logger.error(
3148
+ f"Error decomposing main task {next_task.id} "
3149
+ f"after skip: {e}",
3150
+ exc_info=True,
3151
+ )
3152
+
3153
+ logger.info("Pending tasks available after skip, continuing.")
3154
+ await self._post_ready_tasks()
3155
+ return False # Continue processing
3156
+ else:
3157
+ # No pending tasks available, act like stop
3158
+ logger.info("No pending tasks available, acting like stop.")
3159
+ return True # Stop processing
3160
+
2873
3161
  @check_if_running(False)
2874
3162
  async def _listen_to_channel(self) -> None:
2875
3163
  r"""Continuously listen to the channel, post task to the channel and
@@ -2898,6 +3186,75 @@ class Workforce(BaseNode):
2898
3186
  logger.info("Stop requested, breaking execution loop.")
2899
3187
  break
2900
3188
 
3189
+ # Check for skip request after potential pause
3190
+ if self._skip_requested:
3191
+ should_stop = await self._handle_skip_task()
3192
+ if should_stop:
3193
+ self._stop_requested = True
3194
+ break
3195
+
3196
+ # Reset skip flag
3197
+ self._skip_requested = False
3198
+ continue
3199
+
3200
+ # Check if we should decompose a main task
3201
+ # Only decompose when no tasks are in flight and pending queue
3202
+ # is empty
3203
+ if not self._pending_tasks and self._in_flight_tasks == 0:
3204
+ # All tasks completed, will exit loop
3205
+ break
3206
+
3207
+ # Check if the first pending task needs decomposition
3208
+ # This happens when add_task(as_subtask=False) was called
3209
+ if self._pending_tasks and self._in_flight_tasks == 0:
3210
+ next_task = self._pending_tasks[0]
3211
+ if (
3212
+ next_task.additional_info
3213
+ and next_task.additional_info.get(
3214
+ '_needs_decomposition'
3215
+ )
3216
+ ):
3217
+ logger.info(f"Decomposing main task: {next_task.id}")
3218
+ try:
3219
+ # Remove the decomposition flag to avoid
3220
+ # re-decomposition
3221
+ next_task.additional_info[
3222
+ '_needs_decomposition'
3223
+ ] = False
3224
+
3225
+ # Decompose the task and append subtasks to
3226
+ # _pending_tasks
3227
+ await self.handle_decompose_append_task(
3228
+ next_task, reset=False
3229
+ )
3230
+
3231
+ # Mark the main task as completed (decomposition
3232
+ # successful) and Remove it from pending tasks
3233
+ await self._handle_completed_task(next_task)
3234
+ logger.info(
3235
+ f"Main task {next_task.id} decomposed and "
3236
+ f"ready for processing"
3237
+ )
3238
+ except Exception as e:
3239
+ logger.error(
3240
+ f"Error decomposing main task {next_task.id}: "
3241
+ f"{e}",
3242
+ exc_info=True,
3243
+ )
3244
+ # Revert back to the queue for retry later if
3245
+ # decomposition failed
3246
+ if not self._pending_tasks:
3247
+ self._pending_tasks.appendleft(next_task)
3248
+ else:
3249
+ logger.warning(
3250
+ "Pending tasks exist after decomposition "
3251
+ "error."
3252
+ )
3253
+
3254
+ # Immediately assign and post the transferred tasks
3255
+ await self._post_ready_tasks()
3256
+ continue
3257
+
2901
3258
  # Save snapshot before processing next task
2902
3259
  if self._pending_tasks:
2903
3260
  current_task = self._pending_tasks[0]
@@ -74,7 +74,6 @@ class OceanBaseStorage(BaseVectorStorage):
74
74
  ObVecClient,
75
75
  )
76
76
  from pyobvector.client.index_param import (
77
- IndexParam,
78
77
  IndexParams,
79
78
  )
80
79
  from pyobvector.schema import VECTOR
camel/tasks/task.py CHANGED
@@ -237,8 +237,9 @@ class Task(BaseModel):
237
237
  (default: :obj:`[]`)
238
238
  additional_info (Optional[Dict[str, Any]]): Additional information for
239
239
  the task. (default: :obj:`None`)
240
- image_list (Optional[List[Image.Image]]): Optional list of PIL Image
241
- objects associated with the task. (default: :obj:`None`)
240
+ image_list (Optional[List[Union[Image.Image, str]]]): Optional list
241
+ of PIL Image objects or image URLs (strings) associated with the
242
+ task. (default: :obj:`None`)
242
243
  image_detail (Literal["auto", "low", "high"]): Detail level of the
243
244
  images associated with the task. (default: :obj:`auto`)
244
245
  video_bytes (Optional[bytes]): Optional bytes of a video associated
@@ -271,7 +272,7 @@ class Task(BaseModel):
271
272
 
272
273
  additional_info: Optional[Dict[str, Any]] = None
273
274
 
274
- image_list: Optional[List[Image.Image]] = None
275
+ image_list: Optional[List[Union[Image.Image, str]]] = None
275
276
 
276
277
  image_detail: Literal["auto", "low", "high"] = "auto"
277
278