camel-ai 0.2.77__py3-none-any.whl → 0.2.79a0__py3-none-any.whl
This diff represents the content of publicly available package versions that have been released to one of the supported registries. The information contained in this diff is provided for informational purposes only and reflects changes between package versions as they appear in their respective public registries.
Potentially problematic release.
This version of camel-ai might be problematic. Click here for more details.
- camel/__init__.py +1 -1
- camel/agents/chat_agent.py +321 -325
- camel/datasets/base_generator.py +39 -10
- camel/environments/single_step.py +28 -3
- camel/memories/__init__.py +1 -2
- camel/memories/blocks/chat_history_block.py +2 -17
- camel/models/aws_bedrock_model.py +1 -17
- camel/models/moonshot_model.py +102 -5
- camel/societies/workforce/events.py +122 -0
- camel/societies/workforce/single_agent_worker.py +164 -34
- camel/societies/workforce/workforce.py +417 -156
- camel/societies/workforce/workforce_callback.py +74 -0
- camel/societies/workforce/workforce_logger.py +144 -140
- camel/societies/workforce/workforce_metrics.py +33 -0
- camel/toolkits/excel_toolkit.py +1 -1
- camel/toolkits/file_toolkit.py +3 -2
- camel/utils/context_utils.py +53 -0
- {camel_ai-0.2.77.dist-info → camel_ai-0.2.79a0.dist-info}/METADATA +23 -13
- {camel_ai-0.2.77.dist-info → camel_ai-0.2.79a0.dist-info}/RECORD +21 -18
- {camel_ai-0.2.77.dist-info → camel_ai-0.2.79a0.dist-info}/WHEEL +0 -0
- {camel_ai-0.2.77.dist-info → camel_ai-0.2.79a0.dist-info}/licenses/LICENSE +0 -0
|
@@ -37,6 +37,9 @@ from typing import (
|
|
|
37
37
|
cast,
|
|
38
38
|
)
|
|
39
39
|
|
|
40
|
+
from .workforce_callback import WorkforceCallback
|
|
41
|
+
from .workforce_metrics import WorkforceMetrics
|
|
42
|
+
|
|
40
43
|
if TYPE_CHECKING:
|
|
41
44
|
from camel.utils.context_utils import ContextUtility
|
|
42
45
|
|
|
@@ -89,6 +92,16 @@ from camel.toolkits import (
|
|
|
89
92
|
from camel.types import ModelPlatformType, ModelType
|
|
90
93
|
from camel.utils import dependencies_required
|
|
91
94
|
|
|
95
|
+
from .events import (
|
|
96
|
+
AllTasksCompletedEvent,
|
|
97
|
+
TaskAssignedEvent,
|
|
98
|
+
TaskCompletedEvent,
|
|
99
|
+
TaskCreatedEvent,
|
|
100
|
+
TaskDecomposedEvent,
|
|
101
|
+
TaskFailedEvent,
|
|
102
|
+
TaskStartedEvent,
|
|
103
|
+
WorkerCreatedEvent,
|
|
104
|
+
)
|
|
92
105
|
from .workforce_logger import WorkforceLogger
|
|
93
106
|
|
|
94
107
|
if os.environ.get("TRACEROOT_ENABLED", "False").lower() == "true":
|
|
@@ -205,6 +218,17 @@ class Workforce(BaseNode):
|
|
|
205
218
|
support native structured output. When disabled, the workforce
|
|
206
219
|
uses the native response_format parameter.
|
|
207
220
|
(default: :obj:`True`)
|
|
221
|
+
callbacks (Optional[List[WorkforceCallback]], optional): A list of
|
|
222
|
+
callback handlers to observe and record workforce lifecycle events
|
|
223
|
+
and metrics (e.g., task creation/assignment/start/completion/
|
|
224
|
+
failure, worker creation/deletion, all-tasks-completed). All
|
|
225
|
+
items must be instances of :class:`WorkforceCallback`, otherwise
|
|
226
|
+
a :class:`ValueError` is raised. If none of the provided
|
|
227
|
+
callbacks implement :class:`WorkforceMetrics`, a built-in
|
|
228
|
+
:class:`WorkforceLogger` (implements both callback and metrics)
|
|
229
|
+
is added automatically. If at least one provided callback
|
|
230
|
+
implements :class:`WorkforceMetrics`, no default logger is added.
|
|
231
|
+
(default: :obj:`None`)
|
|
208
232
|
|
|
209
233
|
Example:
|
|
210
234
|
>>> import asyncio
|
|
@@ -257,6 +281,7 @@ class Workforce(BaseNode):
|
|
|
257
281
|
share_memory: bool = False,
|
|
258
282
|
use_structured_output_handler: bool = True,
|
|
259
283
|
task_timeout_seconds: Optional[float] = None,
|
|
284
|
+
callbacks: Optional[List[WorkforceCallback]] = None,
|
|
260
285
|
) -> None:
|
|
261
286
|
super().__init__(description)
|
|
262
287
|
self._child_listening_tasks: Deque[
|
|
@@ -272,7 +297,6 @@ class Workforce(BaseNode):
|
|
|
272
297
|
)
|
|
273
298
|
if self.use_structured_output_handler:
|
|
274
299
|
self.structured_handler = StructuredOutputHandler()
|
|
275
|
-
self.metrics_logger = WorkforceLogger(workforce_id=self.node_id)
|
|
276
300
|
self._task: Optional[Task] = None
|
|
277
301
|
self._pending_tasks: Deque[Task] = deque()
|
|
278
302
|
self._task_dependencies: Dict[str, List[str]] = {}
|
|
@@ -295,15 +319,9 @@ class Workforce(BaseNode):
|
|
|
295
319
|
self._last_snapshot_time: float = 0.0
|
|
296
320
|
# Minimum seconds between automatic snapshots
|
|
297
321
|
self.snapshot_interval: float = 30.0
|
|
298
|
-
|
|
299
|
-
|
|
300
|
-
|
|
301
|
-
role_or_desc = child.description
|
|
302
|
-
self.metrics_logger.log_worker_created(
|
|
303
|
-
worker_id=child.node_id,
|
|
304
|
-
worker_type=worker_type,
|
|
305
|
-
role=role_or_desc,
|
|
306
|
-
)
|
|
322
|
+
# Shared memory UUID tracking to prevent re-sharing duplicates
|
|
323
|
+
self._shared_memory_uuids: Set[str] = set()
|
|
324
|
+
self._initialize_callbacks(callbacks)
|
|
307
325
|
|
|
308
326
|
# Set up coordinator agent with default system message
|
|
309
327
|
coord_agent_sys_msg = BaseMessage.make_assistant_message(
|
|
@@ -463,20 +481,75 @@ class Workforce(BaseNode):
|
|
|
463
481
|
# Helper for propagating pause control to externally supplied agents
|
|
464
482
|
# ------------------------------------------------------------------
|
|
465
483
|
|
|
466
|
-
def
|
|
484
|
+
def _initialize_callbacks(
|
|
485
|
+
self, callbacks: Optional[List[WorkforceCallback]]
|
|
486
|
+
) -> None:
|
|
487
|
+
r"""Validate, register, and prime workforce callbacks."""
|
|
488
|
+
self._callbacks: List[WorkforceCallback] = []
|
|
489
|
+
|
|
490
|
+
if callbacks:
|
|
491
|
+
for cb in callbacks:
|
|
492
|
+
if isinstance(cb, WorkforceCallback):
|
|
493
|
+
self._callbacks.append(cb)
|
|
494
|
+
else:
|
|
495
|
+
raise ValueError(
|
|
496
|
+
"All callbacks must be instances of WorkforceCallback"
|
|
497
|
+
)
|
|
498
|
+
|
|
499
|
+
has_metrics_callback = any(
|
|
500
|
+
isinstance(cb, WorkforceMetrics) for cb in self._callbacks
|
|
501
|
+
)
|
|
502
|
+
|
|
503
|
+
if not has_metrics_callback:
|
|
504
|
+
self._callbacks.append(WorkforceLogger(workforce_id=self.node_id))
|
|
505
|
+
else:
|
|
506
|
+
logger.info(
|
|
507
|
+
"WorkforceMetrics implementation detected. Skipping default "
|
|
508
|
+
"WorkforceLogger addition."
|
|
509
|
+
)
|
|
510
|
+
|
|
511
|
+
for child in self._children:
|
|
512
|
+
self._notify_worker_created(child)
|
|
513
|
+
|
|
514
|
+
def _notify_worker_created(
|
|
515
|
+
self,
|
|
516
|
+
worker_node: BaseNode,
|
|
517
|
+
*,
|
|
518
|
+
worker_type: Optional[str] = None,
|
|
519
|
+
role: Optional[str] = None,
|
|
520
|
+
metadata: Optional[Dict[str, Any]] = None,
|
|
521
|
+
) -> None:
|
|
522
|
+
r"""Emit a worker-created event to all registered callbacks."""
|
|
523
|
+
event = WorkerCreatedEvent(
|
|
524
|
+
worker_id=worker_node.node_id,
|
|
525
|
+
worker_type=worker_type or type(worker_node).__name__,
|
|
526
|
+
role=role or worker_node.description,
|
|
527
|
+
metadata=metadata,
|
|
528
|
+
)
|
|
529
|
+
for cb in self._callbacks:
|
|
530
|
+
cb.log_worker_created(event)
|
|
531
|
+
|
|
532
|
+
def _get_or_create_shared_context_utility(
|
|
533
|
+
self,
|
|
534
|
+
session_id: Optional[str] = None,
|
|
535
|
+
) -> "ContextUtility":
|
|
467
536
|
r"""Get or create the shared context utility for workflow management.
|
|
468
537
|
|
|
469
538
|
This method creates the context utility only when needed, avoiding
|
|
470
539
|
unnecessary session folder creation during initialization.
|
|
471
540
|
|
|
541
|
+
Args:
|
|
542
|
+
session_id (Optional[str]): Custom session ID to use. If None,
|
|
543
|
+
auto-generates a timestamped session ID. (default: :obj:`None`)
|
|
544
|
+
|
|
472
545
|
Returns:
|
|
473
546
|
ContextUtility: The shared context utility instance.
|
|
474
547
|
"""
|
|
475
548
|
if self._shared_context_utility is None:
|
|
476
549
|
from camel.utils.context_utils import ContextUtility
|
|
477
550
|
|
|
478
|
-
self._shared_context_utility = (
|
|
479
|
-
|
|
551
|
+
self._shared_context_utility = ContextUtility.get_workforce_shared(
|
|
552
|
+
session_id=session_id
|
|
480
553
|
)
|
|
481
554
|
return self._shared_context_utility
|
|
482
555
|
|
|
@@ -644,14 +717,29 @@ class Workforce(BaseNode):
|
|
|
644
717
|
)
|
|
645
718
|
return
|
|
646
719
|
|
|
647
|
-
#
|
|
720
|
+
# Filter out already-shared records to prevent re-sharing
|
|
721
|
+
# This prevents exponential growth of duplicate records
|
|
722
|
+
new_records = []
|
|
648
723
|
for record in memory_records:
|
|
724
|
+
record_uuid = str(record.uuid)
|
|
725
|
+
if record_uuid not in self._shared_memory_uuids:
|
|
726
|
+
new_records.append(record)
|
|
727
|
+
self._shared_memory_uuids.add(record_uuid)
|
|
728
|
+
|
|
729
|
+
if not new_records:
|
|
730
|
+
logger.debug(
|
|
731
|
+
"No new records to share (all were already shared)"
|
|
732
|
+
)
|
|
733
|
+
return
|
|
734
|
+
|
|
735
|
+
# Share with coordinator agent
|
|
736
|
+
for record in new_records:
|
|
649
737
|
# Only add records from other agents to avoid duplication
|
|
650
738
|
if record.agent_id != self.coordinator_agent.agent_id:
|
|
651
739
|
self.coordinator_agent.memory.write_record(record)
|
|
652
740
|
|
|
653
741
|
# Share with task agent
|
|
654
|
-
for record in
|
|
742
|
+
for record in new_records:
|
|
655
743
|
if record.agent_id != self.task_agent.agent_id:
|
|
656
744
|
self.task_agent.memory.write_record(record)
|
|
657
745
|
|
|
@@ -663,12 +751,12 @@ class Workforce(BaseNode):
|
|
|
663
751
|
]
|
|
664
752
|
|
|
665
753
|
for worker in single_agent_workers:
|
|
666
|
-
for record in
|
|
754
|
+
for record in new_records:
|
|
667
755
|
if record.agent_id != worker.worker.agent_id:
|
|
668
756
|
worker.worker.memory.write_record(record)
|
|
669
757
|
|
|
670
758
|
logger.info(
|
|
671
|
-
f"Shared {len(
|
|
759
|
+
f"Shared {len(new_records)} new memory records across "
|
|
672
760
|
f"{len(single_agent_workers) + 2} agents in workforce "
|
|
673
761
|
f"{self.node_id}"
|
|
674
762
|
)
|
|
@@ -1091,19 +1179,23 @@ class Workforce(BaseNode):
|
|
|
1091
1179
|
else:
|
|
1092
1180
|
subtasks = subtasks_result
|
|
1093
1181
|
|
|
1094
|
-
if
|
|
1095
|
-
|
|
1182
|
+
if subtasks:
|
|
1183
|
+
task_decomposed_event = TaskDecomposedEvent(
|
|
1096
1184
|
parent_task_id=task.id,
|
|
1097
1185
|
subtask_ids=[st.id for st in subtasks],
|
|
1098
1186
|
)
|
|
1187
|
+
for cb in self._callbacks:
|
|
1188
|
+
cb.log_task_decomposed(task_decomposed_event)
|
|
1099
1189
|
for subtask in subtasks:
|
|
1100
|
-
|
|
1190
|
+
task_created_event = TaskCreatedEvent(
|
|
1101
1191
|
task_id=subtask.id,
|
|
1102
1192
|
description=subtask.content,
|
|
1103
1193
|
parent_task_id=task.id,
|
|
1104
1194
|
task_type=subtask.type,
|
|
1105
1195
|
metadata=subtask.additional_info,
|
|
1106
1196
|
)
|
|
1197
|
+
for cb in self._callbacks:
|
|
1198
|
+
cb.log_task_created(task_created_event)
|
|
1107
1199
|
|
|
1108
1200
|
# Insert subtasks at the head of the queue
|
|
1109
1201
|
self._pending_tasks.extendleft(reversed(subtasks))
|
|
@@ -1616,13 +1708,15 @@ class Workforce(BaseNode):
|
|
|
1616
1708
|
self._task = task
|
|
1617
1709
|
task.state = TaskState.FAILED
|
|
1618
1710
|
|
|
1619
|
-
|
|
1620
|
-
|
|
1621
|
-
|
|
1622
|
-
|
|
1623
|
-
|
|
1624
|
-
|
|
1625
|
-
|
|
1711
|
+
task_created_event = TaskCreatedEvent(
|
|
1712
|
+
task_id=task.id,
|
|
1713
|
+
description=task.content,
|
|
1714
|
+
task_type=task.type,
|
|
1715
|
+
metadata=task.additional_info,
|
|
1716
|
+
)
|
|
1717
|
+
for cb in self._callbacks:
|
|
1718
|
+
cb.log_task_created(task_created_event)
|
|
1719
|
+
|
|
1626
1720
|
# The agent tend to be overconfident on the whole task, so we
|
|
1627
1721
|
# decompose the task into subtasks first
|
|
1628
1722
|
subtasks_result = self._decompose_task(task)
|
|
@@ -1636,18 +1730,23 @@ class Workforce(BaseNode):
|
|
|
1636
1730
|
else:
|
|
1637
1731
|
# This is a regular list (non-streaming mode)
|
|
1638
1732
|
subtasks = subtasks_result
|
|
1639
|
-
if
|
|
1640
|
-
|
|
1641
|
-
parent_task_id=task.id,
|
|
1733
|
+
if subtasks:
|
|
1734
|
+
task_decomposed_event = TaskDecomposedEvent(
|
|
1735
|
+
parent_task_id=task.id,
|
|
1736
|
+
subtask_ids=[st.id for st in subtasks],
|
|
1642
1737
|
)
|
|
1738
|
+
for cb in self._callbacks:
|
|
1739
|
+
cb.log_task_decomposed(task_decomposed_event)
|
|
1643
1740
|
for subtask in subtasks:
|
|
1644
|
-
|
|
1741
|
+
task_created_event = TaskCreatedEvent(
|
|
1645
1742
|
task_id=subtask.id,
|
|
1646
1743
|
description=subtask.content,
|
|
1647
1744
|
parent_task_id=task.id,
|
|
1648
1745
|
task_type=subtask.type,
|
|
1649
1746
|
metadata=subtask.additional_info,
|
|
1650
1747
|
)
|
|
1748
|
+
for cb in self._callbacks:
|
|
1749
|
+
cb.log_task_created(task_created_event)
|
|
1651
1750
|
|
|
1652
1751
|
if subtasks:
|
|
1653
1752
|
# _pending_tasks will contain both undecomposed
|
|
@@ -1966,12 +2065,10 @@ class Workforce(BaseNode):
|
|
|
1966
2065
|
# If workforce is paused, start the worker's listening task
|
|
1967
2066
|
self._start_child_node_when_paused(worker_node.start())
|
|
1968
2067
|
|
|
1969
|
-
|
|
1970
|
-
|
|
1971
|
-
|
|
1972
|
-
|
|
1973
|
-
role=worker_node.description,
|
|
1974
|
-
)
|
|
2068
|
+
self._notify_worker_created(
|
|
2069
|
+
worker_node,
|
|
2070
|
+
worker_type='SingleAgentWorker',
|
|
2071
|
+
)
|
|
1975
2072
|
return self
|
|
1976
2073
|
|
|
1977
2074
|
def add_role_playing_worker(
|
|
@@ -2045,12 +2142,10 @@ class Workforce(BaseNode):
|
|
|
2045
2142
|
# If workforce is paused, start the worker's listening task
|
|
2046
2143
|
self._start_child_node_when_paused(worker_node.start())
|
|
2047
2144
|
|
|
2048
|
-
|
|
2049
|
-
|
|
2050
|
-
|
|
2051
|
-
|
|
2052
|
-
role=worker_node.description,
|
|
2053
|
-
)
|
|
2145
|
+
self._notify_worker_created(
|
|
2146
|
+
worker_node,
|
|
2147
|
+
worker_type='RolePlayingWorker',
|
|
2148
|
+
)
|
|
2054
2149
|
return self
|
|
2055
2150
|
|
|
2056
2151
|
def add_workforce(self, workforce: Workforce) -> Workforce:
|
|
@@ -2127,21 +2222,36 @@ class Workforce(BaseNode):
|
|
|
2127
2222
|
# No active loop, directly set the event
|
|
2128
2223
|
self._pause_event.set()
|
|
2129
2224
|
|
|
2130
|
-
|
|
2131
|
-
|
|
2132
|
-
|
|
2133
|
-
self.metrics_logger = WorkforceLogger(workforce_id=self.node_id)
|
|
2225
|
+
for cb in self._callbacks:
|
|
2226
|
+
if isinstance(cb, WorkforceMetrics):
|
|
2227
|
+
cb.reset_task_data()
|
|
2134
2228
|
|
|
2135
|
-
def save_workflow_memories(
|
|
2229
|
+
def save_workflow_memories(
|
|
2230
|
+
self,
|
|
2231
|
+
session_id: Optional[str] = None,
|
|
2232
|
+
) -> Dict[str, str]:
|
|
2136
2233
|
r"""Save workflow memories for all SingleAgentWorker instances in the
|
|
2137
2234
|
workforce.
|
|
2138
2235
|
|
|
2236
|
+
.. deprecated:: 0.2.80
|
|
2237
|
+
This synchronous method processes workers sequentially, which can
|
|
2238
|
+
be slow for multiple agents. Use
|
|
2239
|
+
:meth:`save_workflow_memories_async`
|
|
2240
|
+
instead for parallel processing and significantly better
|
|
2241
|
+
performance.
|
|
2242
|
+
|
|
2139
2243
|
This method iterates through all child workers and triggers workflow
|
|
2140
2244
|
saving for SingleAgentWorker instances using their
|
|
2141
2245
|
save_workflow_memories()
|
|
2142
2246
|
method.
|
|
2143
2247
|
Other worker types are skipped.
|
|
2144
2248
|
|
|
2249
|
+
Args:
|
|
2250
|
+
session_id (Optional[str]): Custom session ID to use for saving
|
|
2251
|
+
workflows. If None, auto-generates a timestamped session ID.
|
|
2252
|
+
Useful for organizing workflows by project or context.
|
|
2253
|
+
(default: :obj:`None`)
|
|
2254
|
+
|
|
2145
2255
|
Returns:
|
|
2146
2256
|
Dict[str, str]: Dictionary mapping worker node IDs to save results.
|
|
2147
2257
|
Values are either file paths (success) or error messages
|
|
@@ -2150,15 +2260,41 @@ class Workforce(BaseNode):
|
|
|
2150
2260
|
Example:
|
|
2151
2261
|
>>> workforce = Workforce("My Team")
|
|
2152
2262
|
>>> # ... add workers and process tasks ...
|
|
2153
|
-
>>>
|
|
2263
|
+
>>> # save with auto-generated session id
|
|
2264
|
+
>>> results = workforce.save_workflow_memories()
|
|
2154
2265
|
>>> print(results)
|
|
2155
|
-
{'worker_123': '/path/to/
|
|
2266
|
+
{'worker_123': '/path/to/developer_agent_workflow.md',
|
|
2156
2267
|
'worker_456': 'error: No conversation context available'}
|
|
2268
|
+
>>> # save with custom project id
|
|
2269
|
+
>>> results = workforce.save_workflow_memories(
|
|
2270
|
+
... session_id="project_123"
|
|
2271
|
+
... )
|
|
2272
|
+
|
|
2273
|
+
Note:
|
|
2274
|
+
For better performance with multiple workers, use the async
|
|
2275
|
+
version::
|
|
2276
|
+
|
|
2277
|
+
results = await workforce.save_workflow_memories_async()
|
|
2278
|
+
|
|
2279
|
+
See Also:
|
|
2280
|
+
:meth:`save_workflow_memories_async`: Async version with parallel
|
|
2281
|
+
processing for significantly better performance.
|
|
2157
2282
|
"""
|
|
2283
|
+
import warnings
|
|
2284
|
+
|
|
2285
|
+
warnings.warn(
|
|
2286
|
+
"save_workflow_memories() is slow for multiple workers. "
|
|
2287
|
+
"Consider using save_workflow_memories_async() for parallel "
|
|
2288
|
+
"processing and ~4x faster performance.",
|
|
2289
|
+
DeprecationWarning,
|
|
2290
|
+
stacklevel=2,
|
|
2291
|
+
)
|
|
2158
2292
|
results = {}
|
|
2159
2293
|
|
|
2160
2294
|
# Get or create shared context utility for this save operation
|
|
2161
|
-
shared_context_utility = self._get_or_create_shared_context_utility(
|
|
2295
|
+
shared_context_utility = self._get_or_create_shared_context_utility(
|
|
2296
|
+
session_id=session_id
|
|
2297
|
+
)
|
|
2162
2298
|
|
|
2163
2299
|
for child in self._children:
|
|
2164
2300
|
if isinstance(child, SingleAgentWorker):
|
|
@@ -2191,6 +2327,116 @@ class Workforce(BaseNode):
|
|
|
2191
2327
|
logger.info(f"Workflow save completed for {len(results)} workers")
|
|
2192
2328
|
return results
|
|
2193
2329
|
|
|
2330
|
+
async def save_workflow_memories_async(
|
|
2331
|
+
self,
|
|
2332
|
+
session_id: Optional[str] = None,
|
|
2333
|
+
) -> Dict[str, str]:
|
|
2334
|
+
r"""Asynchronously save workflow memories for all SingleAgentWorker
|
|
2335
|
+
instances in the workforce.
|
|
2336
|
+
|
|
2337
|
+
This is the async version of save_workflow_memories() that parallelizes
|
|
2338
|
+
LLM summarization calls across all workers using asyncio.gather(),
|
|
2339
|
+
significantly reducing total save time.
|
|
2340
|
+
|
|
2341
|
+
This method iterates through all child workers and triggers workflow
|
|
2342
|
+
saving for SingleAgentWorker instances using their
|
|
2343
|
+
save_workflow_memories_async() method in parallel.
|
|
2344
|
+
Other worker types are skipped.
|
|
2345
|
+
|
|
2346
|
+
Args:
|
|
2347
|
+
session_id (Optional[str]): Custom session ID to use for saving
|
|
2348
|
+
workflows. If None, auto-generates a timestamped session ID.
|
|
2349
|
+
Useful for organizing workflows by project or context.
|
|
2350
|
+
(default: :obj:`None`)
|
|
2351
|
+
|
|
2352
|
+
Returns:
|
|
2353
|
+
Dict[str, str]: Dictionary mapping worker node IDs to save results.
|
|
2354
|
+
Values are either file paths (success) or error messages
|
|
2355
|
+
(failure).
|
|
2356
|
+
|
|
2357
|
+
Example:
|
|
2358
|
+
>>> workforce = Workforce("My Team")
|
|
2359
|
+
>>> # ... add workers and process tasks ...
|
|
2360
|
+
>>> # save with parallel summarization (faster)
|
|
2361
|
+
>>> results = await workforce.save_workflow_memories_async()
|
|
2362
|
+
>>> print(results)
|
|
2363
|
+
{'worker_123': '/path/to/developer_agent_workflow.md',
|
|
2364
|
+
'worker_456': '/path/to/search_agent_workflow.md',
|
|
2365
|
+
'worker_789': '/path/to/document_agent_workflow.md'}
|
|
2366
|
+
"""
|
|
2367
|
+
import asyncio
|
|
2368
|
+
|
|
2369
|
+
results = {}
|
|
2370
|
+
|
|
2371
|
+
# Get or create shared context utility for this save operation
|
|
2372
|
+
shared_context_utility = self._get_or_create_shared_context_utility(
|
|
2373
|
+
session_id=session_id
|
|
2374
|
+
)
|
|
2375
|
+
|
|
2376
|
+
# Prepare tasks for parallel execution
|
|
2377
|
+
async def save_single_worker(
|
|
2378
|
+
child: BaseNode,
|
|
2379
|
+
) -> tuple[str, str]:
|
|
2380
|
+
"""Save workflow for a single worker, then return (node_id,
|
|
2381
|
+
result)."""
|
|
2382
|
+
if isinstance(child, SingleAgentWorker):
|
|
2383
|
+
try:
|
|
2384
|
+
# Set the shared context utility for this operation
|
|
2385
|
+
child._shared_context_utility = shared_context_utility
|
|
2386
|
+
child.worker.set_context_utility(shared_context_utility)
|
|
2387
|
+
|
|
2388
|
+
result = await child.save_workflow_memories_async()
|
|
2389
|
+
if result.get("status") == "success":
|
|
2390
|
+
return (
|
|
2391
|
+
child.node_id,
|
|
2392
|
+
result.get("file_path", "unknown_path"),
|
|
2393
|
+
)
|
|
2394
|
+
else:
|
|
2395
|
+
# Error: check if there's a separate message field,
|
|
2396
|
+
# otherwise use the status itself
|
|
2397
|
+
error_msg = result.get(
|
|
2398
|
+
"message", result.get("status", "Unknown error")
|
|
2399
|
+
)
|
|
2400
|
+
return (child.node_id, f"error: {error_msg}")
|
|
2401
|
+
|
|
2402
|
+
except Exception as e:
|
|
2403
|
+
return (child.node_id, f"error: {e!s}")
|
|
2404
|
+
else:
|
|
2405
|
+
# Skip non-SingleAgentWorker types
|
|
2406
|
+
return (
|
|
2407
|
+
child.node_id,
|
|
2408
|
+
f"skipped: {type(child).__name__} not supported",
|
|
2409
|
+
)
|
|
2410
|
+
|
|
2411
|
+
# Create tasks for all workers
|
|
2412
|
+
tasks = [save_single_worker(child) for child in self._children]
|
|
2413
|
+
|
|
2414
|
+
# Execute all tasks in parallel using asyncio.gather()
|
|
2415
|
+
parallel_results = await asyncio.gather(*tasks, return_exceptions=True)
|
|
2416
|
+
|
|
2417
|
+
# Process results
|
|
2418
|
+
for result in parallel_results:
|
|
2419
|
+
if isinstance(result, Exception):
|
|
2420
|
+
# Handle any unexpected exceptions
|
|
2421
|
+
logger.error(
|
|
2422
|
+
f"Unexpected error during workflow save: {result}"
|
|
2423
|
+
)
|
|
2424
|
+
results["unknown"] = f"error: {result!s}"
|
|
2425
|
+
elif isinstance(result, tuple) and len(result) == 2:
|
|
2426
|
+
# Successfully got (node_id, save_result) tuple
|
|
2427
|
+
node_id, save_result = result
|
|
2428
|
+
results[node_id] = save_result
|
|
2429
|
+
else:
|
|
2430
|
+
# Unexpected result format
|
|
2431
|
+
logger.error(f"Unexpected result format: {result}")
|
|
2432
|
+
results["unknown"] = "error: unexpected result format"
|
|
2433
|
+
|
|
2434
|
+
logger.info(
|
|
2435
|
+
f"Workflow save completed for {len(results)} workers "
|
|
2436
|
+
f"(parallelized)"
|
|
2437
|
+
)
|
|
2438
|
+
return results
|
|
2439
|
+
|
|
2194
2440
|
def load_workflow_memories(
|
|
2195
2441
|
self,
|
|
2196
2442
|
max_files_to_load: int = 3,
|
|
@@ -2807,10 +3053,11 @@ class Workforce(BaseNode):
|
|
|
2807
3053
|
|
|
2808
3054
|
task.assigned_worker_id = assignee_id
|
|
2809
3055
|
|
|
2810
|
-
|
|
2811
|
-
|
|
2812
|
-
|
|
2813
|
-
|
|
3056
|
+
task_started_event = TaskStartedEvent(
|
|
3057
|
+
task_id=task.id, worker_id=assignee_id
|
|
3058
|
+
)
|
|
3059
|
+
for cb in self._callbacks:
|
|
3060
|
+
cb.log_task_started(task_started_event)
|
|
2814
3061
|
|
|
2815
3062
|
try:
|
|
2816
3063
|
await self._channel.post_task(task, self.node_id, assignee_id)
|
|
@@ -2954,13 +3201,13 @@ class Workforce(BaseNode):
|
|
|
2954
3201
|
print(f"{Fore.CYAN}{new_node} created.{Fore.RESET}")
|
|
2955
3202
|
|
|
2956
3203
|
self._children.append(new_node)
|
|
2957
|
-
|
|
2958
|
-
|
|
2959
|
-
|
|
2960
|
-
|
|
2961
|
-
|
|
2962
|
-
|
|
2963
|
-
|
|
3204
|
+
|
|
3205
|
+
self._notify_worker_created(
|
|
3206
|
+
new_node,
|
|
3207
|
+
worker_type='SingleAgentWorker',
|
|
3208
|
+
role=new_node_conf.role,
|
|
3209
|
+
metadata={'description': new_node_conf.description},
|
|
3210
|
+
)
|
|
2964
3211
|
self._child_listening_tasks.append(
|
|
2965
3212
|
asyncio.create_task(new_node.start())
|
|
2966
3213
|
)
|
|
@@ -3061,22 +3308,24 @@ class Workforce(BaseNode):
|
|
|
3061
3308
|
batch_result = await self._find_assignee(tasks_to_assign)
|
|
3062
3309
|
logger.debug(
|
|
3063
3310
|
f"Coordinator returned assignments:\n"
|
|
3064
|
-
f"{json.dumps(batch_result.
|
|
3311
|
+
f"{json.dumps(batch_result.model_dump(), indent=2)}"
|
|
3065
3312
|
)
|
|
3066
3313
|
for assignment in batch_result.assignments:
|
|
3067
3314
|
self._task_dependencies[assignment.task_id] = (
|
|
3068
3315
|
assignment.dependencies
|
|
3069
3316
|
)
|
|
3070
3317
|
self._assignees[assignment.task_id] = assignment.assignee_id
|
|
3071
|
-
|
|
3318
|
+
|
|
3319
|
+
task_assigned_event = TaskAssignedEvent(
|
|
3320
|
+
task_id=assignment.task_id,
|
|
3321
|
+
worker_id=assignment.assignee_id,
|
|
3322
|
+
dependencies=assignment.dependencies,
|
|
3323
|
+
queue_time_seconds=None,
|
|
3324
|
+
)
|
|
3325
|
+
for cb in self._callbacks:
|
|
3072
3326
|
# queue_time_seconds can be derived by logger if task
|
|
3073
3327
|
# creation time is logged
|
|
3074
|
-
|
|
3075
|
-
task_id=assignment.task_id,
|
|
3076
|
-
worker_id=assignment.assignee_id,
|
|
3077
|
-
dependencies=assignment.dependencies,
|
|
3078
|
-
queue_time_seconds=None,
|
|
3079
|
-
)
|
|
3328
|
+
cb.log_task_assigned(task_assigned_event)
|
|
3080
3329
|
|
|
3081
3330
|
# Step 2: Iterate through all pending tasks and post those that are
|
|
3082
3331
|
# ready
|
|
@@ -3193,21 +3442,19 @@ class Workforce(BaseNode):
|
|
|
3193
3442
|
)
|
|
3194
3443
|
|
|
3195
3444
|
# Log the failure to metrics
|
|
3196
|
-
|
|
3197
|
-
|
|
3198
|
-
|
|
3199
|
-
|
|
3200
|
-
|
|
3201
|
-
|
|
3202
|
-
|
|
3203
|
-
|
|
3204
|
-
|
|
3205
|
-
|
|
3206
|
-
|
|
3207
|
-
|
|
3208
|
-
|
|
3209
|
-
},
|
|
3210
|
-
)
|
|
3445
|
+
task_failed_event = TaskFailedEvent(
|
|
3446
|
+
task_id=task.id,
|
|
3447
|
+
worker_id=task.assigned_worker_id or "unknown",
|
|
3448
|
+
error_message=task.result,
|
|
3449
|
+
metadata={
|
|
3450
|
+
'failure_reason': 'dependency_failure',
|
|
3451
|
+
'failed_dependencies': (
|
|
3452
|
+
permanently_failed_deps
|
|
3453
|
+
),
|
|
3454
|
+
},
|
|
3455
|
+
)
|
|
3456
|
+
for cb in self._callbacks:
|
|
3457
|
+
cb.log_task_failed(task_failed_event)
|
|
3211
3458
|
|
|
3212
3459
|
self._completed_tasks.append(task)
|
|
3213
3460
|
self._cleanup_task_tracking(task.id)
|
|
@@ -3259,17 +3506,18 @@ class Workforce(BaseNode):
|
|
|
3259
3506
|
f"{failure_reason}{Fore.RESET}"
|
|
3260
3507
|
)
|
|
3261
3508
|
|
|
3262
|
-
|
|
3263
|
-
|
|
3264
|
-
|
|
3265
|
-
|
|
3266
|
-
|
|
3267
|
-
|
|
3268
|
-
|
|
3269
|
-
|
|
3270
|
-
|
|
3271
|
-
|
|
3272
|
-
|
|
3509
|
+
task_failed_event = TaskFailedEvent(
|
|
3510
|
+
task_id=task.id,
|
|
3511
|
+
worker_id=worker_id,
|
|
3512
|
+
error_message=detailed_error,
|
|
3513
|
+
metadata={
|
|
3514
|
+
'failure_count': task.failure_count,
|
|
3515
|
+
'task_content': task.content,
|
|
3516
|
+
'result_length': len(task.result) if task.result else 0,
|
|
3517
|
+
},
|
|
3518
|
+
)
|
|
3519
|
+
for cb in self._callbacks:
|
|
3520
|
+
cb.log_task_failed(task_failed_event)
|
|
3273
3521
|
|
|
3274
3522
|
# Check for immediate halt conditions
|
|
3275
3523
|
if task.failure_count >= MAX_TASK_RETRIES:
|
|
@@ -3360,61 +3608,60 @@ class Workforce(BaseNode):
|
|
|
3360
3608
|
return False
|
|
3361
3609
|
|
|
3362
3610
|
async def _handle_completed_task(self, task: Task) -> None:
|
|
3363
|
-
|
|
3364
|
-
|
|
3365
|
-
|
|
3366
|
-
token_usage = None
|
|
3367
|
-
|
|
3368
|
-
# Get processing time from task start time or additional info
|
|
3369
|
-
if task.id in self._task_start_times:
|
|
3370
|
-
processing_time_seconds = (
|
|
3371
|
-
time.time() - self._task_start_times[task.id]
|
|
3372
|
-
)
|
|
3373
|
-
self._cleanup_task_tracking(task.id)
|
|
3374
|
-
elif (
|
|
3375
|
-
task.additional_info is not None
|
|
3376
|
-
and 'processing_time_seconds' in task.additional_info
|
|
3377
|
-
):
|
|
3378
|
-
processing_time_seconds = task.additional_info[
|
|
3379
|
-
'processing_time_seconds'
|
|
3380
|
-
]
|
|
3611
|
+
worker_id = task.assigned_worker_id or "unknown"
|
|
3612
|
+
processing_time_seconds = None
|
|
3613
|
+
token_usage = None
|
|
3381
3614
|
|
|
3382
|
-
|
|
3383
|
-
|
|
3384
|
-
|
|
3385
|
-
|
|
3386
|
-
|
|
3387
|
-
)
|
|
3388
|
-
|
|
3389
|
-
|
|
3390
|
-
|
|
3391
|
-
|
|
3392
|
-
|
|
3393
|
-
|
|
3394
|
-
|
|
3395
|
-
for child in self._children
|
|
3396
|
-
if child.node_id == worker_id
|
|
3397
|
-
),
|
|
3398
|
-
None,
|
|
3399
|
-
)
|
|
3400
|
-
if isinstance(assignee_node, SingleAgentWorker):
|
|
3401
|
-
try:
|
|
3402
|
-
_, total_tokens = (
|
|
3403
|
-
assignee_node.worker.memory.get_context()
|
|
3404
|
-
)
|
|
3405
|
-
token_usage = {'total_tokens': total_tokens}
|
|
3406
|
-
except Exception:
|
|
3407
|
-
token_usage = None
|
|
3615
|
+
# Get processing time from task start time or additional info
|
|
3616
|
+
if task.id in self._task_start_times:
|
|
3617
|
+
processing_time_seconds = (
|
|
3618
|
+
time.time() - self._task_start_times[task.id]
|
|
3619
|
+
)
|
|
3620
|
+
self._cleanup_task_tracking(task.id)
|
|
3621
|
+
elif (
|
|
3622
|
+
task.additional_info is not None
|
|
3623
|
+
and 'processing_time_seconds' in task.additional_info
|
|
3624
|
+
):
|
|
3625
|
+
processing_time_seconds = task.additional_info[
|
|
3626
|
+
'processing_time_seconds'
|
|
3627
|
+
]
|
|
3408
3628
|
|
|
3409
|
-
|
|
3410
|
-
|
|
3411
|
-
|
|
3412
|
-
|
|
3413
|
-
|
|
3414
|
-
|
|
3415
|
-
|
|
3416
|
-
|
|
3629
|
+
# Get token usage from task additional info (preferred - actual
|
|
3630
|
+
# usage)
|
|
3631
|
+
if (
|
|
3632
|
+
task.additional_info is not None
|
|
3633
|
+
and 'token_usage' in task.additional_info
|
|
3634
|
+
):
|
|
3635
|
+
token_usage = task.additional_info['token_usage']
|
|
3636
|
+
else:
|
|
3637
|
+
# Fallback: Try to get token usage from SingleAgentWorker
|
|
3638
|
+
# memory
|
|
3639
|
+
assignee_node = next(
|
|
3640
|
+
(
|
|
3641
|
+
child
|
|
3642
|
+
for child in self._children
|
|
3643
|
+
if child.node_id == worker_id
|
|
3644
|
+
),
|
|
3645
|
+
None,
|
|
3417
3646
|
)
|
|
3647
|
+
if isinstance(assignee_node, SingleAgentWorker):
|
|
3648
|
+
try:
|
|
3649
|
+
_, total_tokens = assignee_node.worker.memory.get_context()
|
|
3650
|
+
token_usage = {'total_tokens': total_tokens}
|
|
3651
|
+
except Exception:
|
|
3652
|
+
token_usage = None
|
|
3653
|
+
|
|
3654
|
+
# Log the completed task
|
|
3655
|
+
task_completed_event = TaskCompletedEvent(
|
|
3656
|
+
task_id=task.id,
|
|
3657
|
+
worker_id=worker_id,
|
|
3658
|
+
result_summary=task.result if task.result else "Completed",
|
|
3659
|
+
processing_time_seconds=processing_time_seconds,
|
|
3660
|
+
token_usage=token_usage,
|
|
3661
|
+
metadata={'current_state': task.state.value},
|
|
3662
|
+
)
|
|
3663
|
+
for cb in self._callbacks:
|
|
3664
|
+
cb.log_task_completed(task_completed_event)
|
|
3418
3665
|
|
|
3419
3666
|
# Find and remove the completed task from pending tasks
|
|
3420
3667
|
tasks_list = list(self._pending_tasks)
|
|
@@ -3534,15 +3781,23 @@ class Workforce(BaseNode):
|
|
|
3534
3781
|
r"""Returns an ASCII tree representation of the task hierarchy and
|
|
3535
3782
|
worker status.
|
|
3536
3783
|
"""
|
|
3537
|
-
|
|
3538
|
-
|
|
3539
|
-
|
|
3784
|
+
metrics_cb: List[WorkforceMetrics] = [
|
|
3785
|
+
cb for cb in self._callbacks if isinstance(cb, WorkforceMetrics)
|
|
3786
|
+
]
|
|
3787
|
+
if len(metrics_cb) == 0:
|
|
3788
|
+
return "Metrics Callback not initialized."
|
|
3789
|
+
else:
|
|
3790
|
+
return metrics_cb[0].get_ascii_tree_representation()
|
|
3540
3791
|
|
|
3541
3792
|
def get_workforce_kpis(self) -> Dict[str, Any]:
|
|
3542
3793
|
r"""Returns a dictionary of key performance indicators."""
|
|
3543
|
-
|
|
3544
|
-
|
|
3545
|
-
|
|
3794
|
+
metrics_cb: List[WorkforceMetrics] = [
|
|
3795
|
+
cb for cb in self._callbacks if isinstance(cb, WorkforceMetrics)
|
|
3796
|
+
]
|
|
3797
|
+
if len(metrics_cb) == 0:
|
|
3798
|
+
return {"error": "Metrics Callback not initialized."}
|
|
3799
|
+
else:
|
|
3800
|
+
return metrics_cb[0].get_kpis()
|
|
3546
3801
|
|
|
3547
3802
|
def dump_workforce_logs(self, file_path: str) -> None:
|
|
3548
3803
|
r"""Dumps all collected logs to a JSON file.
|
|
@@ -3550,10 +3805,13 @@ class Workforce(BaseNode):
|
|
|
3550
3805
|
Args:
|
|
3551
3806
|
file_path (str): The path to the JSON file.
|
|
3552
3807
|
"""
|
|
3553
|
-
|
|
3808
|
+
metrics_cb: List[WorkforceMetrics] = [
|
|
3809
|
+
cb for cb in self._callbacks if isinstance(cb, WorkforceMetrics)
|
|
3810
|
+
]
|
|
3811
|
+
if len(metrics_cb) == 0:
|
|
3554
3812
|
print("Logger not initialized. Cannot dump logs.")
|
|
3555
3813
|
return
|
|
3556
|
-
|
|
3814
|
+
metrics_cb[0].dump_to_json(file_path)
|
|
3557
3815
|
# Use logger.info or print, consistent with existing style
|
|
3558
3816
|
logger.info(f"Workforce logs dumped to {file_path}")
|
|
3559
3817
|
|
|
@@ -4001,6 +4259,9 @@ class Workforce(BaseNode):
|
|
|
4001
4259
|
elif not self._pending_tasks and self._in_flight_tasks == 0:
|
|
4002
4260
|
self._state = WorkforceState.IDLE
|
|
4003
4261
|
logger.info("All tasks completed.")
|
|
4262
|
+
all_tasks_completed_event = AllTasksCompletedEvent()
|
|
4263
|
+
for cb in self._callbacks:
|
|
4264
|
+
cb.log_all_tasks_completed(all_tasks_completed_event)
|
|
4004
4265
|
|
|
4005
4266
|
# shut down the whole workforce tree
|
|
4006
4267
|
self.stop()
|