camel-ai 0.2.78__py3-none-any.whl → 0.2.79a0__py3-none-any.whl
This diff represents the content of publicly available package versions that have been released to one of the supported registries. The information contained in this diff is provided for informational purposes only and reflects changes between package versions as they appear in their respective public registries.
Potentially problematic release.
This version of camel-ai might be problematic. Click here for more details.
- camel/__init__.py +1 -1
- camel/agents/chat_agent.py +304 -12
- camel/datasets/base_generator.py +39 -10
- camel/environments/single_step.py +28 -3
- camel/memories/__init__.py +1 -2
- camel/memories/blocks/chat_history_block.py +2 -17
- camel/models/aws_bedrock_model.py +1 -17
- camel/models/moonshot_model.py +102 -5
- camel/societies/workforce/events.py +122 -0
- camel/societies/workforce/single_agent_worker.py +164 -34
- camel/societies/workforce/workforce.py +417 -156
- camel/societies/workforce/workforce_callback.py +74 -0
- camel/societies/workforce/workforce_logger.py +144 -140
- camel/societies/workforce/workforce_metrics.py +33 -0
- camel/utils/context_utils.py +53 -0
- {camel_ai-0.2.78.dist-info → camel_ai-0.2.79a0.dist-info}/METADATA +14 -13
- {camel_ai-0.2.78.dist-info → camel_ai-0.2.79a0.dist-info}/RECORD +19 -16
- {camel_ai-0.2.78.dist-info → camel_ai-0.2.79a0.dist-info}/WHEEL +0 -0
- {camel_ai-0.2.78.dist-info → camel_ai-0.2.79a0.dist-info}/licenses/LICENSE +0 -0
camel/models/moonshot_model.py
CHANGED
|
@@ -12,6 +12,7 @@
|
|
|
12
12
|
# limitations under the License.
|
|
13
13
|
# ========= Copyright 2023-2024 @ CAMEL-AI.org. All Rights Reserved. =========
|
|
14
14
|
|
|
15
|
+
import copy
|
|
15
16
|
import os
|
|
16
17
|
from typing import Any, Dict, List, Optional, Type, Union
|
|
17
18
|
|
|
@@ -19,6 +20,7 @@ from openai import AsyncStream
|
|
|
19
20
|
from pydantic import BaseModel
|
|
20
21
|
|
|
21
22
|
from camel.configs import MoonshotConfig
|
|
23
|
+
from camel.logger import get_logger
|
|
22
24
|
from camel.messages import OpenAIMessage
|
|
23
25
|
from camel.models._utils import try_modify_message_with_format
|
|
24
26
|
from camel.models.openai_compatible_model import OpenAICompatibleModel
|
|
@@ -34,6 +36,8 @@ from camel.utils import (
|
|
|
34
36
|
update_langfuse_trace,
|
|
35
37
|
)
|
|
36
38
|
|
|
39
|
+
logger = get_logger(__name__)
|
|
40
|
+
|
|
37
41
|
if os.environ.get("LANGFUSE_ENABLED", "False").lower() == "true":
|
|
38
42
|
try:
|
|
39
43
|
from langfuse.decorators import observe
|
|
@@ -84,7 +88,7 @@ class MoonshotModel(OpenAICompatibleModel):
|
|
|
84
88
|
model_type: Union[ModelType, str],
|
|
85
89
|
model_config_dict: Optional[Dict[str, Any]] = None,
|
|
86
90
|
api_key: Optional[str] = None,
|
|
87
|
-
url: Optional[str] =
|
|
91
|
+
url: Optional[str] = None,
|
|
88
92
|
token_counter: Optional[BaseTokenCounter] = None,
|
|
89
93
|
timeout: Optional[float] = None,
|
|
90
94
|
max_retries: int = 3,
|
|
@@ -93,7 +97,12 @@ class MoonshotModel(OpenAICompatibleModel):
|
|
|
93
97
|
if model_config_dict is None:
|
|
94
98
|
model_config_dict = MoonshotConfig().as_dict()
|
|
95
99
|
api_key = api_key or os.environ.get("MOONSHOT_API_KEY")
|
|
96
|
-
|
|
100
|
+
# Preserve default URL if not provided
|
|
101
|
+
if url is None:
|
|
102
|
+
url = (
|
|
103
|
+
os.environ.get("MOONSHOT_API_BASE_URL")
|
|
104
|
+
or "https://api.moonshot.ai/v1"
|
|
105
|
+
)
|
|
97
106
|
timeout = timeout or float(os.environ.get("MODEL_TIMEOUT", 180))
|
|
98
107
|
super().__init__(
|
|
99
108
|
model_type=model_type,
|
|
@@ -125,12 +134,12 @@ class MoonshotModel(OpenAICompatibleModel):
|
|
|
125
134
|
Returns:
|
|
126
135
|
Dict[str, Any]: The prepared request configuration.
|
|
127
136
|
"""
|
|
128
|
-
import copy
|
|
129
|
-
|
|
130
137
|
request_config = copy.deepcopy(self.model_config_dict)
|
|
131
138
|
|
|
132
139
|
if tools:
|
|
133
|
-
|
|
140
|
+
# Clean tools to remove null types (Moonshot API incompatibility)
|
|
141
|
+
cleaned_tools = self._clean_tool_schemas(tools)
|
|
142
|
+
request_config["tools"] = cleaned_tools
|
|
134
143
|
elif response_format:
|
|
135
144
|
# Use the same approach as DeepSeek for structured output
|
|
136
145
|
try_modify_message_with_format(messages[-1], response_format)
|
|
@@ -138,6 +147,94 @@ class MoonshotModel(OpenAICompatibleModel):
|
|
|
138
147
|
|
|
139
148
|
return request_config
|
|
140
149
|
|
|
150
|
+
def _clean_tool_schemas(
|
|
151
|
+
self, tools: List[Dict[str, Any]]
|
|
152
|
+
) -> List[Dict[str, Any]]:
|
|
153
|
+
r"""Clean tool schemas to remove null types for Moonshot compatibility.
|
|
154
|
+
|
|
155
|
+
Moonshot API doesn't accept {"type": "null"} in anyOf schemas.
|
|
156
|
+
This method removes null type definitions from parameters.
|
|
157
|
+
|
|
158
|
+
Args:
|
|
159
|
+
tools (List[Dict[str, Any]]): Original tool schemas.
|
|
160
|
+
|
|
161
|
+
Returns:
|
|
162
|
+
List[Dict[str, Any]]: Cleaned tool schemas.
|
|
163
|
+
"""
|
|
164
|
+
|
|
165
|
+
def remove_null_from_schema(schema: Any) -> Any:
|
|
166
|
+
"""Recursively remove null types from schema."""
|
|
167
|
+
if isinstance(schema, dict):
|
|
168
|
+
# Create a copy to avoid modifying the original
|
|
169
|
+
result = {}
|
|
170
|
+
|
|
171
|
+
for key, value in schema.items():
|
|
172
|
+
if key == 'type' and isinstance(value, list):
|
|
173
|
+
# Handle type arrays like ["string", "null"]
|
|
174
|
+
filtered_types = [t for t in value if t != 'null']
|
|
175
|
+
if len(filtered_types) == 1:
|
|
176
|
+
# Single type remains, convert to string
|
|
177
|
+
result[key] = filtered_types[0]
|
|
178
|
+
elif len(filtered_types) > 1:
|
|
179
|
+
# Multiple types remain, keep as array
|
|
180
|
+
result[key] = filtered_types
|
|
181
|
+
else:
|
|
182
|
+
# All were null, use string as fallback
|
|
183
|
+
logger.warning(
|
|
184
|
+
"All types in tool schema type array "
|
|
185
|
+
"were null, falling back to 'string' "
|
|
186
|
+
"type for Moonshot API compatibility. "
|
|
187
|
+
"Original tool schema may need review."
|
|
188
|
+
)
|
|
189
|
+
result[key] = 'string'
|
|
190
|
+
elif key == 'anyOf':
|
|
191
|
+
# Handle anyOf with null types
|
|
192
|
+
filtered = [
|
|
193
|
+
item
|
|
194
|
+
for item in value
|
|
195
|
+
if not (
|
|
196
|
+
isinstance(item, dict)
|
|
197
|
+
and item.get('type') == 'null'
|
|
198
|
+
)
|
|
199
|
+
]
|
|
200
|
+
if len(filtered) == 1:
|
|
201
|
+
# If only one type remains, flatten it
|
|
202
|
+
return remove_null_from_schema(filtered[0])
|
|
203
|
+
elif len(filtered) > 1:
|
|
204
|
+
result[key] = [
|
|
205
|
+
remove_null_from_schema(item)
|
|
206
|
+
for item in filtered
|
|
207
|
+
]
|
|
208
|
+
else:
|
|
209
|
+
# All were null, return string type as fallback
|
|
210
|
+
logger.warning(
|
|
211
|
+
"All types in tool schema anyOf were null, "
|
|
212
|
+
"falling back to 'string' type for "
|
|
213
|
+
"Moonshot API compatibility. Original "
|
|
214
|
+
"tool schema may need review."
|
|
215
|
+
)
|
|
216
|
+
return {"type": "string"}
|
|
217
|
+
else:
|
|
218
|
+
# Recursively process other values
|
|
219
|
+
result[key] = remove_null_from_schema(value)
|
|
220
|
+
|
|
221
|
+
return result
|
|
222
|
+
elif isinstance(schema, list):
|
|
223
|
+
return [remove_null_from_schema(item) for item in schema]
|
|
224
|
+
else:
|
|
225
|
+
return schema
|
|
226
|
+
|
|
227
|
+
cleaned_tools = copy.deepcopy(tools)
|
|
228
|
+
for tool in cleaned_tools:
|
|
229
|
+
if 'function' in tool and 'parameters' in tool['function']:
|
|
230
|
+
params = tool['function']['parameters']
|
|
231
|
+
if 'properties' in params:
|
|
232
|
+
params['properties'] = remove_null_from_schema(
|
|
233
|
+
params['properties']
|
|
234
|
+
)
|
|
235
|
+
|
|
236
|
+
return cleaned_tools
|
|
237
|
+
|
|
141
238
|
@observe()
|
|
142
239
|
async def _arun(
|
|
143
240
|
self,
|
|
@@ -0,0 +1,122 @@
|
|
|
1
|
+
# ========= Copyright 2023-2024 @ CAMEL-AI.org. All Rights Reserved. =========
|
|
2
|
+
# Licensed under the Apache License, Version 2.0 (the "License");
|
|
3
|
+
# you may not use this file except in compliance with the License.
|
|
4
|
+
# You may obtain a copy of the License at
|
|
5
|
+
#
|
|
6
|
+
# http://www.apache.org/licenses/LICENSE-2.0
|
|
7
|
+
#
|
|
8
|
+
# Unless required by applicable law or agreed to in writing, software
|
|
9
|
+
# distributed under the License is distributed on an "AS IS" BASIS,
|
|
10
|
+
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
|
|
11
|
+
# See the License for the specific language governing permissions and
|
|
12
|
+
# limitations under the License.
|
|
13
|
+
# ========= Copyright 2023-2024 @ CAMEL-AI.org. All Rights Reserved. =========
|
|
14
|
+
from __future__ import annotations
|
|
15
|
+
|
|
16
|
+
from datetime import datetime, timezone
|
|
17
|
+
from typing import Any, Dict, List, Literal, Optional, Union
|
|
18
|
+
|
|
19
|
+
from pydantic import BaseModel, ConfigDict, Field
|
|
20
|
+
|
|
21
|
+
|
|
22
|
+
class WorkforceEventBase(BaseModel):
|
|
23
|
+
model_config = ConfigDict(frozen=True, extra='forbid')
|
|
24
|
+
event_type: Literal[
|
|
25
|
+
"task_decomposed",
|
|
26
|
+
"task_created",
|
|
27
|
+
"task_assigned",
|
|
28
|
+
"task_started",
|
|
29
|
+
"task_completed",
|
|
30
|
+
"task_failed",
|
|
31
|
+
"worker_created",
|
|
32
|
+
"worker_deleted",
|
|
33
|
+
"queue_status",
|
|
34
|
+
"all_tasks_completed",
|
|
35
|
+
]
|
|
36
|
+
metadata: Optional[Dict[str, Any]] = None
|
|
37
|
+
timestamp: datetime = Field(
|
|
38
|
+
default_factory=lambda: datetime.now(timezone.utc)
|
|
39
|
+
)
|
|
40
|
+
|
|
41
|
+
|
|
42
|
+
class WorkerCreatedEvent(WorkforceEventBase):
|
|
43
|
+
event_type: Literal["worker_created"] = "worker_created"
|
|
44
|
+
worker_id: str
|
|
45
|
+
worker_type: str
|
|
46
|
+
role: str
|
|
47
|
+
|
|
48
|
+
|
|
49
|
+
class WorkerDeletedEvent(WorkforceEventBase):
|
|
50
|
+
event_type: Literal["worker_deleted"] = "worker_deleted"
|
|
51
|
+
worker_id: str
|
|
52
|
+
reason: Optional[str] = None
|
|
53
|
+
|
|
54
|
+
|
|
55
|
+
class TaskDecomposedEvent(WorkforceEventBase):
|
|
56
|
+
event_type: Literal["task_decomposed"] = "task_decomposed"
|
|
57
|
+
parent_task_id: str
|
|
58
|
+
subtask_ids: List[str]
|
|
59
|
+
|
|
60
|
+
|
|
61
|
+
class TaskCreatedEvent(WorkforceEventBase):
|
|
62
|
+
event_type: Literal["task_created"] = "task_created"
|
|
63
|
+
task_id: str
|
|
64
|
+
description: str
|
|
65
|
+
parent_task_id: Optional[str] = None
|
|
66
|
+
task_type: Optional[str] = None
|
|
67
|
+
|
|
68
|
+
|
|
69
|
+
class TaskAssignedEvent(WorkforceEventBase):
|
|
70
|
+
event_type: Literal["task_assigned"] = "task_assigned"
|
|
71
|
+
task_id: str
|
|
72
|
+
worker_id: str
|
|
73
|
+
queue_time_seconds: Optional[float] = None
|
|
74
|
+
dependencies: Optional[List[str]] = None
|
|
75
|
+
|
|
76
|
+
|
|
77
|
+
class TaskStartedEvent(WorkforceEventBase):
|
|
78
|
+
event_type: Literal["task_started"] = "task_started"
|
|
79
|
+
task_id: str
|
|
80
|
+
worker_id: str
|
|
81
|
+
|
|
82
|
+
|
|
83
|
+
class TaskCompletedEvent(WorkforceEventBase):
|
|
84
|
+
event_type: Literal["task_completed"] = "task_completed"
|
|
85
|
+
task_id: str
|
|
86
|
+
worker_id: str
|
|
87
|
+
result_summary: Optional[str] = None
|
|
88
|
+
processing_time_seconds: Optional[float] = None
|
|
89
|
+
token_usage: Optional[Dict[str, int]] = None
|
|
90
|
+
|
|
91
|
+
|
|
92
|
+
class TaskFailedEvent(WorkforceEventBase):
|
|
93
|
+
event_type: Literal["task_failed"] = "task_failed"
|
|
94
|
+
task_id: str
|
|
95
|
+
error_message: str
|
|
96
|
+
worker_id: Optional[str] = None
|
|
97
|
+
|
|
98
|
+
|
|
99
|
+
class AllTasksCompletedEvent(WorkforceEventBase):
|
|
100
|
+
event_type: Literal["all_tasks_completed"] = "all_tasks_completed"
|
|
101
|
+
|
|
102
|
+
|
|
103
|
+
class QueueStatusEvent(WorkforceEventBase):
|
|
104
|
+
event_type: Literal["queue_status"] = "queue_status"
|
|
105
|
+
queue_name: str
|
|
106
|
+
length: int
|
|
107
|
+
pending_task_ids: Optional[List[str]] = None
|
|
108
|
+
metadata: Optional[Dict[str, Any]] = None
|
|
109
|
+
|
|
110
|
+
|
|
111
|
+
WorkforceEvent = Union[
|
|
112
|
+
TaskDecomposedEvent,
|
|
113
|
+
TaskCreatedEvent,
|
|
114
|
+
TaskAssignedEvent,
|
|
115
|
+
TaskStartedEvent,
|
|
116
|
+
TaskCompletedEvent,
|
|
117
|
+
TaskFailedEvent,
|
|
118
|
+
WorkerCreatedEvent,
|
|
119
|
+
WorkerDeletedEvent,
|
|
120
|
+
AllTasksCompletedEvent,
|
|
121
|
+
QueueStatusEvent,
|
|
122
|
+
]
|
|
@@ -80,6 +80,7 @@ class AgentPool:
|
|
|
80
80
|
self._in_use_agents: set = set()
|
|
81
81
|
self._agent_last_used: dict = {}
|
|
82
82
|
self._lock = asyncio.Lock()
|
|
83
|
+
self._condition = asyncio.Condition(self._lock)
|
|
83
84
|
|
|
84
85
|
# Statistics
|
|
85
86
|
self._total_borrows = 0
|
|
@@ -105,36 +106,31 @@ class AgentPool:
|
|
|
105
106
|
|
|
106
107
|
async def get_agent(self) -> ChatAgent:
|
|
107
108
|
r"""Get an agent from the pool, creating one if necessary."""
|
|
108
|
-
async with self.
|
|
109
|
+
async with self._condition:
|
|
109
110
|
self._total_borrows += 1
|
|
110
111
|
|
|
111
|
-
|
|
112
|
-
|
|
113
|
-
self._in_use_agents.add(id(agent))
|
|
114
|
-
self._pool_hits += 1
|
|
115
|
-
return agent
|
|
116
|
-
|
|
117
|
-
# Check if we can create a new agent
|
|
118
|
-
if len(self._in_use_agents) < self.max_size or self.auto_scale:
|
|
119
|
-
agent = self._create_fresh_agent()
|
|
120
|
-
self._in_use_agents.add(id(agent))
|
|
121
|
-
return agent
|
|
122
|
-
|
|
123
|
-
# Wait for available agent
|
|
124
|
-
while True:
|
|
125
|
-
async with self._lock:
|
|
112
|
+
# Try to get available agent or create new one
|
|
113
|
+
while True:
|
|
126
114
|
if self._available_agents:
|
|
127
115
|
agent = self._available_agents.popleft()
|
|
128
116
|
self._in_use_agents.add(id(agent))
|
|
129
117
|
self._pool_hits += 1
|
|
130
118
|
return agent
|
|
131
|
-
|
|
119
|
+
|
|
120
|
+
# Check if we can create a new agent
|
|
121
|
+
if len(self._in_use_agents) < self.max_size or self.auto_scale:
|
|
122
|
+
agent = self._create_fresh_agent()
|
|
123
|
+
self._in_use_agents.add(id(agent))
|
|
124
|
+
return agent
|
|
125
|
+
|
|
126
|
+
# Wait for an agent to be returned
|
|
127
|
+
await self._condition.wait()
|
|
132
128
|
|
|
133
129
|
async def return_agent(self, agent: ChatAgent) -> None:
|
|
134
130
|
r"""Return an agent to the pool."""
|
|
135
131
|
agent_id = id(agent)
|
|
136
132
|
|
|
137
|
-
async with self.
|
|
133
|
+
async with self._condition:
|
|
138
134
|
if agent_id not in self._in_use_agents:
|
|
139
135
|
return
|
|
140
136
|
|
|
@@ -145,6 +141,8 @@ class AgentPool:
|
|
|
145
141
|
agent.reset()
|
|
146
142
|
self._agent_last_used[agent_id] = time.time()
|
|
147
143
|
self._available_agents.append(agent)
|
|
144
|
+
# Notify one waiting coroutine that an agent is available
|
|
145
|
+
self._condition.notify()
|
|
148
146
|
else:
|
|
149
147
|
# Remove tracking for agents not returned to pool
|
|
150
148
|
self._agent_last_used.pop(agent_id, None)
|
|
@@ -154,7 +152,7 @@ class AgentPool:
|
|
|
154
152
|
if not self.auto_scale:
|
|
155
153
|
return
|
|
156
154
|
|
|
157
|
-
async with self.
|
|
155
|
+
async with self._condition:
|
|
158
156
|
if not self._available_agents:
|
|
159
157
|
return
|
|
160
158
|
|
|
@@ -428,6 +426,7 @@ class SingleAgentWorker(Worker):
|
|
|
428
426
|
"usage"
|
|
429
427
|
) or final_response.info.get("token_usage")
|
|
430
428
|
else:
|
|
429
|
+
final_response = response
|
|
431
430
|
usage_info = response.info.get("usage") or response.info.get(
|
|
432
431
|
"token_usage"
|
|
433
432
|
)
|
|
@@ -562,10 +561,11 @@ class SingleAgentWorker(Worker):
|
|
|
562
561
|
while True:
|
|
563
562
|
try:
|
|
564
563
|
# Fixed interval cleanup
|
|
565
|
-
await asyncio.sleep(self.agent_pool.cleanup_interval)
|
|
566
|
-
|
|
567
564
|
if self.agent_pool:
|
|
565
|
+
await asyncio.sleep(self.agent_pool.cleanup_interval)
|
|
568
566
|
await self.agent_pool.cleanup_idle_agents()
|
|
567
|
+
else:
|
|
568
|
+
break
|
|
569
569
|
except asyncio.CancelledError:
|
|
570
570
|
break
|
|
571
571
|
except Exception as e:
|
|
@@ -581,9 +581,14 @@ class SingleAgentWorker(Worker):
|
|
|
581
581
|
r"""Save the worker's current workflow memories using agent
|
|
582
582
|
summarization.
|
|
583
583
|
|
|
584
|
+
.. deprecated:: 0.2.80
|
|
585
|
+
Use :meth:`save_workflow_memories_async` for async/await support
|
|
586
|
+
and better integration with parallel workflow saving.
|
|
587
|
+
|
|
584
588
|
This method generates a workflow summary from the worker agent's
|
|
585
589
|
conversation history and saves it to a markdown file. The filename
|
|
586
|
-
is based on the worker's
|
|
590
|
+
is based on either the worker's explicit role_name or the generated
|
|
591
|
+
task_title from the summary.
|
|
587
592
|
|
|
588
593
|
Returns:
|
|
589
594
|
Dict[str, Any]: Result dictionary with keys:
|
|
@@ -591,7 +596,19 @@ class SingleAgentWorker(Worker):
|
|
|
591
596
|
- summary (str): Generated workflow summary
|
|
592
597
|
- file_path (str): Path to saved file
|
|
593
598
|
- worker_description (str): Worker description used
|
|
599
|
+
|
|
600
|
+
See Also:
|
|
601
|
+
:meth:`save_workflow_memories_async`: Async version for better
|
|
602
|
+
performance in parallel workflows.
|
|
594
603
|
"""
|
|
604
|
+
import warnings
|
|
605
|
+
|
|
606
|
+
warnings.warn(
|
|
607
|
+
"save_workflow_memories() is synchronous. Consider using "
|
|
608
|
+
"save_workflow_memories_async() for async/await support.",
|
|
609
|
+
DeprecationWarning,
|
|
610
|
+
stacklevel=2,
|
|
611
|
+
)
|
|
595
612
|
try:
|
|
596
613
|
# validate requirements
|
|
597
614
|
validation_error = self._validate_workflow_save_requirements()
|
|
@@ -603,13 +620,31 @@ class SingleAgentWorker(Worker):
|
|
|
603
620
|
self.worker.set_context_utility(context_util)
|
|
604
621
|
|
|
605
622
|
# prepare workflow summarization components
|
|
606
|
-
filename = self._generate_workflow_filename()
|
|
607
623
|
structured_prompt = self._prepare_workflow_prompt()
|
|
608
624
|
agent_to_summarize = self._select_agent_for_summarization(
|
|
609
625
|
context_util
|
|
610
626
|
)
|
|
611
627
|
|
|
628
|
+
# check if we should use role_name or let summarize extract
|
|
629
|
+
# task_title
|
|
630
|
+
role_name = getattr(self.worker, 'role_name', 'assistant')
|
|
631
|
+
use_role_name_for_filename = role_name.lower() not in {
|
|
632
|
+
'assistant',
|
|
633
|
+
'agent',
|
|
634
|
+
'user',
|
|
635
|
+
'system',
|
|
636
|
+
}
|
|
637
|
+
|
|
612
638
|
# generate and save workflow summary
|
|
639
|
+
# if role_name is explicit, use it for filename
|
|
640
|
+
# if role_name is generic, pass none to let summarize use
|
|
641
|
+
# task_title
|
|
642
|
+
filename = (
|
|
643
|
+
self._generate_workflow_filename()
|
|
644
|
+
if use_role_name_for_filename
|
|
645
|
+
else None
|
|
646
|
+
)
|
|
647
|
+
|
|
613
648
|
result = agent_to_summarize.summarize(
|
|
614
649
|
filename=filename,
|
|
615
650
|
summary_prompt=structured_prompt,
|
|
@@ -636,6 +671,84 @@ class SingleAgentWorker(Worker):
|
|
|
636
671
|
"message": f"Failed to save workflow memories: {e!s}",
|
|
637
672
|
}
|
|
638
673
|
|
|
674
|
+
async def save_workflow_memories_async(self) -> Dict[str, Any]:
|
|
675
|
+
r"""Asynchronously save the worker's current workflow memories using
|
|
676
|
+
agent summarization.
|
|
677
|
+
|
|
678
|
+
This is the async version of save_workflow_memories() that uses
|
|
679
|
+
asummarize() for non-blocking LLM calls, enabling parallel
|
|
680
|
+
summarization of multiple workers.
|
|
681
|
+
|
|
682
|
+
Returns:
|
|
683
|
+
Dict[str, Any]: Result dictionary with keys:
|
|
684
|
+
- status (str): "success" or "error"
|
|
685
|
+
- summary (str): Generated workflow summary
|
|
686
|
+
- file_path (str): Path to saved file
|
|
687
|
+
- worker_description (str): Worker description used
|
|
688
|
+
"""
|
|
689
|
+
try:
|
|
690
|
+
# validate requirements
|
|
691
|
+
validation_error = self._validate_workflow_save_requirements()
|
|
692
|
+
if validation_error:
|
|
693
|
+
return validation_error
|
|
694
|
+
|
|
695
|
+
# setup context utility and agent
|
|
696
|
+
context_util = self._get_context_utility()
|
|
697
|
+
self.worker.set_context_utility(context_util)
|
|
698
|
+
|
|
699
|
+
# prepare workflow summarization components
|
|
700
|
+
structured_prompt = self._prepare_workflow_prompt()
|
|
701
|
+
agent_to_summarize = self._select_agent_for_summarization(
|
|
702
|
+
context_util
|
|
703
|
+
)
|
|
704
|
+
|
|
705
|
+
# check if we should use role_name or let summarize extract
|
|
706
|
+
# task_title
|
|
707
|
+
role_name = getattr(self.worker, 'role_name', 'assistant')
|
|
708
|
+
use_role_name_for_filename = role_name.lower() not in {
|
|
709
|
+
'assistant',
|
|
710
|
+
'agent',
|
|
711
|
+
'user',
|
|
712
|
+
'system',
|
|
713
|
+
}
|
|
714
|
+
|
|
715
|
+
# generate and save workflow summary
|
|
716
|
+
# if role_name is explicit, use it for filename
|
|
717
|
+
# if role_name is generic, pass none to let summarize use
|
|
718
|
+
# task_title
|
|
719
|
+
filename = (
|
|
720
|
+
self._generate_workflow_filename()
|
|
721
|
+
if use_role_name_for_filename
|
|
722
|
+
else None
|
|
723
|
+
)
|
|
724
|
+
|
|
725
|
+
# **KEY CHANGE**: Using asummarize() instead of summarize()
|
|
726
|
+
result = await agent_to_summarize.asummarize(
|
|
727
|
+
filename=filename,
|
|
728
|
+
summary_prompt=structured_prompt,
|
|
729
|
+
response_format=WorkflowSummary,
|
|
730
|
+
)
|
|
731
|
+
|
|
732
|
+
# add worker metadata and cleanup
|
|
733
|
+
result["worker_description"] = self.description
|
|
734
|
+
if self._conversation_accumulator is not None:
|
|
735
|
+
logger.info(
|
|
736
|
+
"Cleaning up conversation accumulator after workflow "
|
|
737
|
+
"summarization"
|
|
738
|
+
)
|
|
739
|
+
self._conversation_accumulator = None
|
|
740
|
+
|
|
741
|
+
return result
|
|
742
|
+
|
|
743
|
+
except Exception as e:
|
|
744
|
+
return {
|
|
745
|
+
"status": "error",
|
|
746
|
+
"summary": "",
|
|
747
|
+
"file_path": None,
|
|
748
|
+
"worker_description": self.description,
|
|
749
|
+
"message": f"Failed to save workflow memories: {e!s}",
|
|
750
|
+
}
|
|
751
|
+
|
|
639
752
|
def load_workflow_memories(
|
|
640
753
|
self,
|
|
641
754
|
pattern: Optional[str] = None,
|
|
@@ -716,12 +829,23 @@ class SingleAgentWorker(Worker):
|
|
|
716
829
|
)
|
|
717
830
|
return []
|
|
718
831
|
|
|
719
|
-
# generate filename-safe search pattern from worker
|
|
832
|
+
# generate filename-safe search pattern from worker role name
|
|
720
833
|
if pattern is None:
|
|
721
|
-
|
|
722
|
-
|
|
723
|
-
|
|
724
|
-
|
|
834
|
+
from camel.utils.context_utils import ContextUtility
|
|
835
|
+
|
|
836
|
+
# get role_name (always available, defaults to "assistant")
|
|
837
|
+
role_name = getattr(self.worker, 'role_name', 'assistant')
|
|
838
|
+
clean_name = ContextUtility.sanitize_workflow_filename(role_name)
|
|
839
|
+
|
|
840
|
+
# check if role_name is generic
|
|
841
|
+
generic_names = {'assistant', 'agent', 'user', 'system'}
|
|
842
|
+
if clean_name in generic_names:
|
|
843
|
+
# for generic role names, search for all workflow files
|
|
844
|
+
# since filename is based on task_title
|
|
845
|
+
pattern = "*_workflow*.md"
|
|
846
|
+
else:
|
|
847
|
+
# for explicit role names, search for role-specific files
|
|
848
|
+
pattern = f"{clean_name}_workflow*.md"
|
|
725
849
|
|
|
726
850
|
# Get the base workforce_workflows directory
|
|
727
851
|
camel_workdir = os.environ.get("CAMEL_WORKDIR")
|
|
@@ -816,15 +940,21 @@ class SingleAgentWorker(Worker):
|
|
|
816
940
|
return None
|
|
817
941
|
|
|
818
942
|
def _generate_workflow_filename(self) -> str:
|
|
819
|
-
r"""Generate a filename for the workflow based on worker
|
|
943
|
+
r"""Generate a filename for the workflow based on worker role name.
|
|
944
|
+
|
|
945
|
+
Uses the worker's explicit role_name when available.
|
|
820
946
|
|
|
821
947
|
Returns:
|
|
822
|
-
str: Sanitized filename without timestamp
|
|
823
|
-
|
|
948
|
+
str: Sanitized filename without timestamp and without .md
|
|
949
|
+
extension. Format: {role_name}_workflow
|
|
824
950
|
"""
|
|
825
|
-
|
|
826
|
-
|
|
827
|
-
|
|
951
|
+
from camel.utils.context_utils import ContextUtility
|
|
952
|
+
|
|
953
|
+
# get role_name (always available, defaults to "assistant"/"Assistant")
|
|
954
|
+
role_name = getattr(self.worker, 'role_name', 'assistant')
|
|
955
|
+
clean_name = ContextUtility.sanitize_workflow_filename(role_name)
|
|
956
|
+
|
|
957
|
+
return f"{clean_name}_workflow"
|
|
828
958
|
|
|
829
959
|
def _prepare_workflow_prompt(self) -> str:
|
|
830
960
|
r"""Prepare the structured prompt for workflow summarization.
|