camel-ai 0.2.79a0__py3-none-any.whl → 0.2.79a1__py3-none-any.whl
This diff represents the content of publicly available package versions that have been released to one of the supported registries. The information contained in this diff is provided for informational purposes only and reflects changes between package versions as they appear in their respective public registries.
Potentially problematic release.
This version of camel-ai might be problematic. Click here for more details.
- camel/__init__.py +1 -1
- camel/agents/_utils.py +38 -0
- camel/agents/chat_agent.py +788 -255
- camel/memories/agent_memories.py +34 -0
- camel/memories/base.py +26 -0
- camel/memories/blocks/chat_history_block.py +115 -0
- camel/memories/context_creators/score_based.py +25 -384
- camel/messages/base.py +26 -0
- camel/models/azure_openai_model.py +113 -67
- camel/models/model_factory.py +17 -1
- camel/models/openai_compatible_model.py +62 -32
- camel/models/openai_model.py +61 -35
- camel/models/samba_model.py +34 -15
- camel/models/sglang_model.py +41 -11
- camel/societies/workforce/__init__.py +2 -0
- camel/societies/workforce/role_playing_worker.py +15 -11
- camel/societies/workforce/single_agent_worker.py +86 -364
- camel/societies/workforce/utils.py +2 -1
- camel/societies/workforce/workflow_memory_manager.py +772 -0
- camel/societies/workforce/workforce.py +96 -32
- camel/storages/vectordb_storages/oceanbase.py +5 -4
- camel/toolkits/file_toolkit.py +166 -0
- camel/toolkits/message_integration.py +15 -13
- camel/toolkits/terminal_toolkit/terminal_toolkit.py +112 -79
- camel/types/enums.py +1 -0
- camel/utils/context_utils.py +148 -2
- {camel_ai-0.2.79a0.dist-info → camel_ai-0.2.79a1.dist-info}/METADATA +1 -1
- {camel_ai-0.2.79a0.dist-info → camel_ai-0.2.79a1.dist-info}/RECORD +30 -29
- {camel_ai-0.2.79a0.dist-info → camel_ai-0.2.79a1.dist-info}/WHEEL +0 -0
- {camel_ai-0.2.79a0.dist-info → camel_ai-0.2.79a1.dist-info}/licenses/LICENSE +0 -0
camel/memories/agent_memories.py
CHANGED
|
@@ -129,6 +129,16 @@ class ChatHistoryMemory(AgentMemory):
|
|
|
129
129
|
# Save the modified records back to storage
|
|
130
130
|
self._chat_history_block.storage.save(record_dicts)
|
|
131
131
|
|
|
132
|
+
def pop_records(self, count: int) -> List[MemoryRecord]:
|
|
133
|
+
r"""Removes the most recent records from chat history memory."""
|
|
134
|
+
return self._chat_history_block.pop_records(count)
|
|
135
|
+
|
|
136
|
+
def remove_records_by_indices(
|
|
137
|
+
self, indices: List[int]
|
|
138
|
+
) -> List[MemoryRecord]:
|
|
139
|
+
r"""Removes records at specified indices from chat history memory."""
|
|
140
|
+
return self._chat_history_block.remove_records_by_indices(indices)
|
|
141
|
+
|
|
132
142
|
|
|
133
143
|
class VectorDBMemory(AgentMemory):
|
|
134
144
|
r"""An agent memory wrapper of :obj:`VectorDBBlock`. This memory queries
|
|
@@ -193,6 +203,20 @@ class VectorDBMemory(AgentMemory):
|
|
|
193
203
|
r"""Removes all records from the vector database memory."""
|
|
194
204
|
self._vectordb_block.clear()
|
|
195
205
|
|
|
206
|
+
def pop_records(self, count: int) -> List[MemoryRecord]:
|
|
207
|
+
r"""Rolling back is unsupported for vector database memory."""
|
|
208
|
+
raise NotImplementedError(
|
|
209
|
+
"VectorDBMemory does not support removing historical records."
|
|
210
|
+
)
|
|
211
|
+
|
|
212
|
+
def remove_records_by_indices(
|
|
213
|
+
self, indices: List[int]
|
|
214
|
+
) -> List[MemoryRecord]:
|
|
215
|
+
r"""Removing by indices is unsupported for vector database memory."""
|
|
216
|
+
raise NotImplementedError(
|
|
217
|
+
"VectorDBMemory does not support removing records by indices."
|
|
218
|
+
)
|
|
219
|
+
|
|
196
220
|
|
|
197
221
|
class LongtermAgentMemory(AgentMemory):
|
|
198
222
|
r"""An implementation of the :obj:`AgentMemory` abstract base class for
|
|
@@ -277,3 +301,13 @@ class LongtermAgentMemory(AgentMemory):
|
|
|
277
301
|
r"""Removes all records from the memory."""
|
|
278
302
|
self.chat_history_block.clear()
|
|
279
303
|
self.vector_db_block.clear()
|
|
304
|
+
|
|
305
|
+
def pop_records(self, count: int) -> List[MemoryRecord]:
|
|
306
|
+
r"""Removes recent chat history records while leaving vector memory."""
|
|
307
|
+
return self.chat_history_block.pop_records(count)
|
|
308
|
+
|
|
309
|
+
def remove_records_by_indices(
|
|
310
|
+
self, indices: List[int]
|
|
311
|
+
) -> List[MemoryRecord]:
|
|
312
|
+
r"""Removes records at specified indices from chat history."""
|
|
313
|
+
return self.chat_history_block.remove_records_by_indices(indices)
|
camel/memories/base.py
CHANGED
|
@@ -45,6 +45,32 @@ class MemoryBlock(ABC):
|
|
|
45
45
|
"""
|
|
46
46
|
self.write_records([record])
|
|
47
47
|
|
|
48
|
+
def pop_records(self, count: int) -> List[MemoryRecord]:
|
|
49
|
+
r"""Removes records from the memory and returns the removed records.
|
|
50
|
+
|
|
51
|
+
Args:
|
|
52
|
+
count (int): Number of records to remove.
|
|
53
|
+
|
|
54
|
+
Returns:
|
|
55
|
+
List[MemoryRecord]: The records that were removed from the memory
|
|
56
|
+
in their original order.
|
|
57
|
+
"""
|
|
58
|
+
raise NotImplementedError
|
|
59
|
+
|
|
60
|
+
def remove_records_by_indices(
|
|
61
|
+
self, indices: List[int]
|
|
62
|
+
) -> List[MemoryRecord]:
|
|
63
|
+
r"""Removes records at specified indices from the memory.
|
|
64
|
+
|
|
65
|
+
Args:
|
|
66
|
+
indices (List[int]): List of indices to remove. Indices should be
|
|
67
|
+
valid positions in the current record list.
|
|
68
|
+
|
|
69
|
+
Returns:
|
|
70
|
+
List[MemoryRecord]: The removed records in their original order.
|
|
71
|
+
"""
|
|
72
|
+
raise NotImplementedError
|
|
73
|
+
|
|
48
74
|
@abstractmethod
|
|
49
75
|
def clear(self) -> None:
|
|
50
76
|
r"""Clears all messages from the memory."""
|
|
@@ -167,3 +167,118 @@ class ChatHistoryBlock(MemoryBlock):
|
|
|
167
167
|
def clear(self) -> None:
|
|
168
168
|
r"""Clears all chat messages from the memory."""
|
|
169
169
|
self.storage.clear()
|
|
170
|
+
|
|
171
|
+
def pop_records(self, count: int) -> List[MemoryRecord]:
|
|
172
|
+
r"""Removes the most recent records from the memory.
|
|
173
|
+
|
|
174
|
+
Args:
|
|
175
|
+
count (int): Number of records to remove from the end of the
|
|
176
|
+
conversation history. A value of 0 results in no changes.
|
|
177
|
+
|
|
178
|
+
Returns:
|
|
179
|
+
List[MemoryRecord]: The removed records in chronological order.
|
|
180
|
+
"""
|
|
181
|
+
if not isinstance(count, int):
|
|
182
|
+
raise TypeError("`count` must be an integer.")
|
|
183
|
+
if count < 0:
|
|
184
|
+
raise ValueError("`count` must be non-negative.")
|
|
185
|
+
if count == 0:
|
|
186
|
+
return []
|
|
187
|
+
|
|
188
|
+
record_dicts = self.storage.load()
|
|
189
|
+
if not record_dicts:
|
|
190
|
+
return []
|
|
191
|
+
|
|
192
|
+
# Preserve initial system/developer instruction if present.
|
|
193
|
+
protected_prefix = (
|
|
194
|
+
1
|
|
195
|
+
if (
|
|
196
|
+
record_dicts
|
|
197
|
+
and record_dicts[0]['role_at_backend']
|
|
198
|
+
in {
|
|
199
|
+
OpenAIBackendRole.SYSTEM.value,
|
|
200
|
+
OpenAIBackendRole.DEVELOPER.value,
|
|
201
|
+
}
|
|
202
|
+
)
|
|
203
|
+
else 0
|
|
204
|
+
)
|
|
205
|
+
|
|
206
|
+
removable_count = max(len(record_dicts) - protected_prefix, 0)
|
|
207
|
+
if removable_count == 0:
|
|
208
|
+
return []
|
|
209
|
+
|
|
210
|
+
pop_count = min(count, removable_count)
|
|
211
|
+
split_index = len(record_dicts) - pop_count
|
|
212
|
+
|
|
213
|
+
popped_dicts = record_dicts[split_index:]
|
|
214
|
+
remaining_dicts = record_dicts[:split_index]
|
|
215
|
+
|
|
216
|
+
self.storage.clear()
|
|
217
|
+
if remaining_dicts:
|
|
218
|
+
self.storage.save(remaining_dicts)
|
|
219
|
+
|
|
220
|
+
return [MemoryRecord.from_dict(record) for record in popped_dicts]
|
|
221
|
+
|
|
222
|
+
def remove_records_by_indices(
|
|
223
|
+
self, indices: List[int]
|
|
224
|
+
) -> List[MemoryRecord]:
|
|
225
|
+
r"""Removes records at specified indices from the memory.
|
|
226
|
+
|
|
227
|
+
Args:
|
|
228
|
+
indices (List[int]): List of indices to remove. Indices are
|
|
229
|
+
positions in the current record list (0-based).
|
|
230
|
+
System/developer messages at index 0 are protected and will
|
|
231
|
+
not be removed.
|
|
232
|
+
|
|
233
|
+
Returns:
|
|
234
|
+
List[MemoryRecord]: The removed records in their original order.
|
|
235
|
+
"""
|
|
236
|
+
if not indices:
|
|
237
|
+
return []
|
|
238
|
+
|
|
239
|
+
record_dicts = self.storage.load()
|
|
240
|
+
if not record_dicts:
|
|
241
|
+
return []
|
|
242
|
+
|
|
243
|
+
# Preserve initial system/developer instruction if present.
|
|
244
|
+
protected_prefix = (
|
|
245
|
+
1
|
|
246
|
+
if (
|
|
247
|
+
record_dicts
|
|
248
|
+
and record_dicts[0]['role_at_backend']
|
|
249
|
+
in {
|
|
250
|
+
OpenAIBackendRole.SYSTEM.value,
|
|
251
|
+
OpenAIBackendRole.DEVELOPER.value,
|
|
252
|
+
}
|
|
253
|
+
)
|
|
254
|
+
else 0
|
|
255
|
+
)
|
|
256
|
+
|
|
257
|
+
# Filter out protected indices and invalid ones
|
|
258
|
+
valid_indices = sorted(
|
|
259
|
+
{
|
|
260
|
+
idx
|
|
261
|
+
for idx in indices
|
|
262
|
+
if idx >= protected_prefix and idx < len(record_dicts)
|
|
263
|
+
}
|
|
264
|
+
)
|
|
265
|
+
|
|
266
|
+
if not valid_indices:
|
|
267
|
+
return []
|
|
268
|
+
|
|
269
|
+
# Extract records to remove (in original order)
|
|
270
|
+
removed_records = [record_dicts[idx] for idx in valid_indices]
|
|
271
|
+
|
|
272
|
+
# Build remaining records by excluding removed indices
|
|
273
|
+
remaining_dicts = [
|
|
274
|
+
record
|
|
275
|
+
for idx, record in enumerate(record_dicts)
|
|
276
|
+
if idx not in valid_indices
|
|
277
|
+
]
|
|
278
|
+
|
|
279
|
+
# Save back to storage
|
|
280
|
+
self.storage.clear()
|
|
281
|
+
if remaining_dicts:
|
|
282
|
+
self.storage.save(remaining_dicts)
|
|
283
|
+
|
|
284
|
+
return [MemoryRecord.from_dict(record) for record in removed_records]
|
|
@@ -11,41 +11,24 @@
|
|
|
11
11
|
# See the License for the specific language governing permissions and
|
|
12
12
|
# limitations under the License.
|
|
13
13
|
# ========= Copyright 2023-2024 @ CAMEL-AI.org. All Rights Reserved. =========
|
|
14
|
-
from collections import defaultdict
|
|
15
|
-
from typing import Dict, List, Optional, Tuple
|
|
16
14
|
|
|
17
|
-
from
|
|
15
|
+
from typing import List, Optional, Tuple
|
|
18
16
|
|
|
19
|
-
from camel.logger import get_logger
|
|
20
17
|
from camel.memories.base import BaseContextCreator
|
|
21
18
|
from camel.memories.records import ContextRecord
|
|
22
|
-
from camel.messages import
|
|
19
|
+
from camel.messages import OpenAIMessage
|
|
23
20
|
from camel.types.enums import OpenAIBackendRole
|
|
24
21
|
from camel.utils import BaseTokenCounter
|
|
25
22
|
|
|
26
|
-
logger = get_logger(__name__)
|
|
27
|
-
|
|
28
|
-
|
|
29
|
-
class _ContextUnit(BaseModel):
|
|
30
|
-
idx: int
|
|
31
|
-
record: ContextRecord
|
|
32
|
-
num_tokens: int
|
|
33
|
-
|
|
34
23
|
|
|
35
24
|
class ScoreBasedContextCreator(BaseContextCreator):
|
|
36
|
-
r"""A
|
|
37
|
-
from :obj:`BaseContextCreator`.
|
|
38
|
-
|
|
39
|
-
This class provides a strategy to generate a conversational context from
|
|
40
|
-
a list of chat history records while ensuring the total token count of
|
|
41
|
-
the context does not exceed a specified limit. It prunes messages based
|
|
42
|
-
on their score if the total token count exceeds the limit.
|
|
25
|
+
r"""A context creation strategy that orders records chronologically.
|
|
43
26
|
|
|
44
27
|
Args:
|
|
45
|
-
token_counter (BaseTokenCounter):
|
|
46
|
-
|
|
47
|
-
token_limit (int):
|
|
48
|
-
|
|
28
|
+
token_counter (BaseTokenCounter): Token counter instance used to
|
|
29
|
+
compute the combined token count of the returned messages.
|
|
30
|
+
token_limit (int): Retained for API compatibility. No longer used to
|
|
31
|
+
filter records.
|
|
49
32
|
"""
|
|
50
33
|
|
|
51
34
|
def __init__(
|
|
@@ -66,376 +49,34 @@ class ScoreBasedContextCreator(BaseContextCreator):
|
|
|
66
49
|
self,
|
|
67
50
|
records: List[ContextRecord],
|
|
68
51
|
) -> Tuple[List[OpenAIMessage], int]:
|
|
69
|
-
|
|
70
|
-
token limits.
|
|
71
|
-
|
|
72
|
-
Key strategies:
|
|
73
|
-
1. System message is always prioritized and preserved
|
|
74
|
-
2. Truncation removes low-score messages first
|
|
75
|
-
3. Final output maintains chronological order and in history memory,
|
|
76
|
-
the score of each message decreases according to keep_rate. The
|
|
77
|
-
newer the message, the higher the score.
|
|
78
|
-
4. Tool calls and their responses are kept together to maintain
|
|
79
|
-
API compatibility
|
|
80
|
-
|
|
81
|
-
Args:
|
|
82
|
-
records (List[ContextRecord]): List of context records with scores
|
|
83
|
-
and timestamps.
|
|
84
|
-
|
|
85
|
-
Returns:
|
|
86
|
-
Tuple[List[OpenAIMessage], int]:
|
|
87
|
-
- Ordered list of OpenAI messages
|
|
88
|
-
- Total token count of the final context
|
|
89
|
-
|
|
90
|
-
Raises:
|
|
91
|
-
RuntimeError: If system message alone exceeds token limit
|
|
92
|
-
"""
|
|
93
|
-
# ======================
|
|
94
|
-
# 1. System Message Handling
|
|
95
|
-
# ======================
|
|
96
|
-
system_unit, regular_units = self._extract_system_message(records)
|
|
97
|
-
system_tokens = system_unit.num_tokens if system_unit else 0
|
|
52
|
+
"""Returns messages sorted by timestamp and their total token count."""
|
|
98
53
|
|
|
99
|
-
|
|
100
|
-
|
|
101
|
-
raise RuntimeError(
|
|
102
|
-
f"System message alone exceeds token limit"
|
|
103
|
-
f": {system_tokens} > {self.token_limit}",
|
|
104
|
-
system_tokens,
|
|
105
|
-
)
|
|
54
|
+
system_record: Optional[ContextRecord] = None
|
|
55
|
+
remaining_records: List[ContextRecord] = []
|
|
106
56
|
|
|
107
|
-
|
|
108
|
-
# 2. Deduplication & Initial Processing
|
|
109
|
-
# ======================
|
|
110
|
-
seen_uuids = set()
|
|
111
|
-
if system_unit:
|
|
112
|
-
seen_uuids.add(system_unit.record.memory_record.uuid)
|
|
113
|
-
|
|
114
|
-
# Process non-system messages with deduplication
|
|
115
|
-
for idx, record in enumerate(records):
|
|
57
|
+
for record in records:
|
|
116
58
|
if (
|
|
117
|
-
|
|
59
|
+
system_record is None
|
|
60
|
+
and record.memory_record.role_at_backend
|
|
118
61
|
== OpenAIBackendRole.SYSTEM
|
|
119
62
|
):
|
|
63
|
+
system_record = record
|
|
120
64
|
continue
|
|
121
|
-
|
|
122
|
-
continue
|
|
123
|
-
seen_uuids.add(record.memory_record.uuid)
|
|
124
|
-
|
|
125
|
-
token_count = self.token_counter.count_tokens_from_messages(
|
|
126
|
-
[record.memory_record.to_openai_message()]
|
|
127
|
-
)
|
|
128
|
-
regular_units.append(
|
|
129
|
-
_ContextUnit(
|
|
130
|
-
idx=idx,
|
|
131
|
-
record=record,
|
|
132
|
-
num_tokens=token_count,
|
|
133
|
-
)
|
|
134
|
-
)
|
|
135
|
-
|
|
136
|
-
# ======================
|
|
137
|
-
# 3. Tool Call Relationship Mapping
|
|
138
|
-
# ======================
|
|
139
|
-
tool_call_groups = self._group_tool_calls_and_responses(regular_units)
|
|
140
|
-
|
|
141
|
-
# ======================
|
|
142
|
-
# 4. Token Calculation
|
|
143
|
-
# ======================
|
|
144
|
-
total_tokens = system_tokens + sum(u.num_tokens for u in regular_units)
|
|
145
|
-
|
|
146
|
-
# ======================
|
|
147
|
-
# 5. Early Return if Within Limit
|
|
148
|
-
# ======================
|
|
149
|
-
if total_tokens <= self.token_limit:
|
|
150
|
-
sorted_units = sorted(
|
|
151
|
-
regular_units, key=self._conversation_sort_key
|
|
152
|
-
)
|
|
153
|
-
return self._assemble_output(sorted_units, system_unit)
|
|
154
|
-
|
|
155
|
-
# ======================
|
|
156
|
-
# 6. Truncation Logic with Tool Call Awareness
|
|
157
|
-
# ======================
|
|
158
|
-
remaining_units = self._truncate_with_tool_call_awareness(
|
|
159
|
-
regular_units, tool_call_groups, system_tokens
|
|
160
|
-
)
|
|
161
|
-
|
|
162
|
-
# Log only after truncation is actually performed so that both
|
|
163
|
-
# the original and the final token counts are visible.
|
|
164
|
-
tokens_after = system_tokens + sum(
|
|
165
|
-
u.num_tokens for u in remaining_units
|
|
166
|
-
)
|
|
167
|
-
logger.warning(
|
|
168
|
-
"Context truncation performed: "
|
|
169
|
-
f"before={total_tokens}, after={tokens_after}, "
|
|
170
|
-
f"limit={self.token_limit}"
|
|
171
|
-
)
|
|
172
|
-
|
|
173
|
-
# ======================
|
|
174
|
-
# 7. Output Assembly
|
|
175
|
-
# ======================
|
|
176
|
-
|
|
177
|
-
# In case system message is the only message in memory when sorted
|
|
178
|
-
# units are empty, raise an error
|
|
179
|
-
if system_unit and len(remaining_units) == 0 and len(records) > 1:
|
|
180
|
-
raise RuntimeError(
|
|
181
|
-
"System message and current message exceeds token limit ",
|
|
182
|
-
total_tokens,
|
|
183
|
-
)
|
|
184
|
-
|
|
185
|
-
# Sort remaining units chronologically
|
|
186
|
-
final_units = sorted(remaining_units, key=self._conversation_sort_key)
|
|
187
|
-
return self._assemble_output(final_units, system_unit)
|
|
188
|
-
|
|
189
|
-
def _group_tool_calls_and_responses(
|
|
190
|
-
self, units: List[_ContextUnit]
|
|
191
|
-
) -> Dict[str, List[_ContextUnit]]:
|
|
192
|
-
r"""Groups tool calls with their corresponding responses based on
|
|
193
|
-
`tool_call_id`.
|
|
194
|
-
|
|
195
|
-
This improved logic robustly gathers all messages (assistant requests
|
|
196
|
-
and tool responses, including chunks) that share a `tool_call_id`.
|
|
197
|
-
|
|
198
|
-
Args:
|
|
199
|
-
units (List[_ContextUnit]): List of context units to analyze.
|
|
200
|
-
|
|
201
|
-
Returns:
|
|
202
|
-
Dict[str, List[_ContextUnit]]: Mapping from `tool_call_id` to a
|
|
203
|
-
list of related units.
|
|
204
|
-
"""
|
|
205
|
-
tool_call_groups: Dict[str, List[_ContextUnit]] = defaultdict(list)
|
|
206
|
-
|
|
207
|
-
for unit in units:
|
|
208
|
-
# FunctionCallingMessage stores tool_call_id.
|
|
209
|
-
message = unit.record.memory_record.message
|
|
210
|
-
tool_call_id = getattr(message, 'tool_call_id', None)
|
|
211
|
-
|
|
212
|
-
if tool_call_id:
|
|
213
|
-
tool_call_groups[tool_call_id].append(unit)
|
|
214
|
-
|
|
215
|
-
# Filter out empty or incomplete groups if necessary,
|
|
216
|
-
# though defaultdict and getattr handle this gracefully.
|
|
217
|
-
return dict(tool_call_groups)
|
|
218
|
-
|
|
219
|
-
def _truncate_with_tool_call_awareness(
|
|
220
|
-
self,
|
|
221
|
-
regular_units: List[_ContextUnit],
|
|
222
|
-
tool_call_groups: Dict[str, List[_ContextUnit]],
|
|
223
|
-
system_tokens: int,
|
|
224
|
-
) -> List[_ContextUnit]:
|
|
225
|
-
r"""Truncates messages while preserving tool call-response pairs.
|
|
226
|
-
This method implements a more sophisticated truncation strategy:
|
|
227
|
-
1. It treats tool call groups (request + responses) and standalone
|
|
228
|
-
messages as individual items to be included.
|
|
229
|
-
2. It sorts all items by score and greedily adds them to the context.
|
|
230
|
-
3. **Partial Truncation**: If a complete tool group is too large to
|
|
231
|
-
fit,it attempts to add the request message and as many of the most
|
|
232
|
-
recent response chunks as the token budget allows.
|
|
233
|
-
|
|
234
|
-
Args:
|
|
235
|
-
regular_units (List[_ContextUnit]): All regular message units.
|
|
236
|
-
tool_call_groups (Dict[str, List[_ContextUnit]]): Grouped tool
|
|
237
|
-
calls.
|
|
238
|
-
system_tokens (int): Tokens used by the system message.
|
|
239
|
-
|
|
240
|
-
Returns:
|
|
241
|
-
List[_ContextUnit]: A list of units that fit within the token
|
|
242
|
-
limit.
|
|
243
|
-
"""
|
|
244
|
-
|
|
245
|
-
# Create a set for quick lookup of units belonging to any tool call
|
|
246
|
-
tool_call_unit_ids = {
|
|
247
|
-
unit.record.memory_record.uuid
|
|
248
|
-
for group in tool_call_groups.values()
|
|
249
|
-
for unit in group
|
|
250
|
-
}
|
|
251
|
-
|
|
252
|
-
# Separate standalone units from tool call groups
|
|
253
|
-
standalone_units = [
|
|
254
|
-
u
|
|
255
|
-
for u in regular_units
|
|
256
|
-
if u.record.memory_record.uuid not in tool_call_unit_ids
|
|
257
|
-
]
|
|
258
|
-
|
|
259
|
-
# Prepare all items (standalone units and groups) for sorting
|
|
260
|
-
all_potential_items: List[Dict] = []
|
|
261
|
-
for unit in standalone_units:
|
|
262
|
-
all_potential_items.append(
|
|
263
|
-
{
|
|
264
|
-
"type": "standalone",
|
|
265
|
-
"score": unit.record.score,
|
|
266
|
-
"timestamp": unit.record.timestamp,
|
|
267
|
-
"tokens": unit.num_tokens,
|
|
268
|
-
"item": unit,
|
|
269
|
-
}
|
|
270
|
-
)
|
|
271
|
-
for group in tool_call_groups.values():
|
|
272
|
-
all_potential_items.append(
|
|
273
|
-
{
|
|
274
|
-
"type": "group",
|
|
275
|
-
"score": max(u.record.score for u in group),
|
|
276
|
-
"timestamp": max(u.record.timestamp for u in group),
|
|
277
|
-
"tokens": sum(u.num_tokens for u in group),
|
|
278
|
-
"item": group,
|
|
279
|
-
}
|
|
280
|
-
)
|
|
281
|
-
|
|
282
|
-
# Sort all potential items by score (high to low), then timestamp
|
|
283
|
-
all_potential_items.sort(key=lambda x: (-x["score"], -x["timestamp"]))
|
|
284
|
-
|
|
285
|
-
remaining_units: List[_ContextUnit] = []
|
|
286
|
-
current_tokens = system_tokens
|
|
287
|
-
|
|
288
|
-
for item_dict in all_potential_items:
|
|
289
|
-
item_type = item_dict["type"]
|
|
290
|
-
item = item_dict["item"]
|
|
291
|
-
item_tokens = item_dict["tokens"]
|
|
292
|
-
|
|
293
|
-
if current_tokens + item_tokens <= self.token_limit:
|
|
294
|
-
# The whole item (standalone or group) fits, so add it
|
|
295
|
-
if item_type == "standalone":
|
|
296
|
-
remaining_units.append(item)
|
|
297
|
-
else: # item_type == "group"
|
|
298
|
-
remaining_units.extend(item)
|
|
299
|
-
current_tokens += item_tokens
|
|
300
|
-
|
|
301
|
-
elif item_type == "group":
|
|
302
|
-
# The group does not fit completely; try partial inclusion.
|
|
303
|
-
request_unit: Optional[_ContextUnit] = None
|
|
304
|
-
response_units: List[_ContextUnit] = []
|
|
305
|
-
|
|
306
|
-
for unit in item:
|
|
307
|
-
# Assistant msg with `args` is the request
|
|
308
|
-
if (
|
|
309
|
-
isinstance(
|
|
310
|
-
unit.record.memory_record.message,
|
|
311
|
-
FunctionCallingMessage,
|
|
312
|
-
)
|
|
313
|
-
and unit.record.memory_record.message.args is not None
|
|
314
|
-
):
|
|
315
|
-
request_unit = unit
|
|
316
|
-
else:
|
|
317
|
-
response_units.append(unit)
|
|
318
|
-
|
|
319
|
-
# A group must have a request to be considered for inclusion.
|
|
320
|
-
if request_unit is None:
|
|
321
|
-
continue
|
|
322
|
-
|
|
323
|
-
# Check if we can at least fit the request.
|
|
324
|
-
if (
|
|
325
|
-
current_tokens + request_unit.num_tokens
|
|
326
|
-
<= self.token_limit
|
|
327
|
-
):
|
|
328
|
-
units_to_add = [request_unit]
|
|
329
|
-
tokens_to_add = request_unit.num_tokens
|
|
330
|
-
|
|
331
|
-
# Sort responses by timestamp to add newest chunks first
|
|
332
|
-
response_units.sort(
|
|
333
|
-
key=lambda u: u.record.timestamp, reverse=True
|
|
334
|
-
)
|
|
65
|
+
remaining_records.append(record)
|
|
335
66
|
|
|
336
|
-
|
|
337
|
-
if (
|
|
338
|
-
current_tokens
|
|
339
|
-
+ tokens_to_add
|
|
340
|
-
+ resp_unit.num_tokens
|
|
341
|
-
<= self.token_limit
|
|
342
|
-
):
|
|
343
|
-
units_to_add.append(resp_unit)
|
|
344
|
-
tokens_to_add += resp_unit.num_tokens
|
|
67
|
+
remaining_records.sort(key=lambda record: record.timestamp)
|
|
345
68
|
|
|
346
|
-
|
|
347
|
-
|
|
348
|
-
|
|
349
|
-
current_tokens += tokens_to_add
|
|
69
|
+
messages: List[OpenAIMessage] = []
|
|
70
|
+
if system_record is not None:
|
|
71
|
+
messages.append(system_record.memory_record.to_openai_message())
|
|
350
72
|
|
|
351
|
-
|
|
352
|
-
|
|
353
|
-
|
|
354
|
-
self, records: List[ContextRecord]
|
|
355
|
-
) -> Tuple[Optional[_ContextUnit], List[_ContextUnit]]:
|
|
356
|
-
r"""Extracts the system message from records and validates it.
|
|
357
|
-
|
|
358
|
-
Args:
|
|
359
|
-
records (List[ContextRecord]): List of context records
|
|
360
|
-
representing conversation history.
|
|
361
|
-
|
|
362
|
-
Returns:
|
|
363
|
-
Tuple[Optional[_ContextUnit], List[_ContextUnit]]: containing:
|
|
364
|
-
- The system message as a `_ContextUnit`, if valid; otherwise,
|
|
365
|
-
`None`.
|
|
366
|
-
- An empty list, serving as the initial container for regular
|
|
367
|
-
messages.
|
|
368
|
-
"""
|
|
369
|
-
if not records:
|
|
370
|
-
return None, []
|
|
371
|
-
|
|
372
|
-
first_record = records[0]
|
|
373
|
-
if (
|
|
374
|
-
first_record.memory_record.role_at_backend
|
|
375
|
-
!= OpenAIBackendRole.SYSTEM
|
|
376
|
-
):
|
|
377
|
-
return None, []
|
|
378
|
-
|
|
379
|
-
message = first_record.memory_record.to_openai_message()
|
|
380
|
-
tokens = self.token_counter.count_tokens_from_messages([message])
|
|
381
|
-
system_message_unit = _ContextUnit(
|
|
382
|
-
idx=0,
|
|
383
|
-
record=first_record,
|
|
384
|
-
num_tokens=tokens,
|
|
73
|
+
messages.extend(
|
|
74
|
+
record.memory_record.to_openai_message()
|
|
75
|
+
for record in remaining_records
|
|
385
76
|
)
|
|
386
|
-
return system_message_unit, []
|
|
387
|
-
|
|
388
|
-
def _conversation_sort_key(
|
|
389
|
-
self, unit: _ContextUnit
|
|
390
|
-
) -> Tuple[float, float]:
|
|
391
|
-
r"""Defines the sorting key for assembling the final output.
|
|
392
|
-
|
|
393
|
-
Sorting priority:
|
|
394
|
-
- Primary: Sort by timestamp in ascending order (chronological order).
|
|
395
|
-
- Secondary: Sort by score in descending order (higher scores first
|
|
396
|
-
when timestamps are equal).
|
|
397
|
-
|
|
398
|
-
Args:
|
|
399
|
-
unit (_ContextUnit): A `_ContextUnit` representing a conversation
|
|
400
|
-
record.
|
|
401
|
-
|
|
402
|
-
Returns:
|
|
403
|
-
Tuple[float, float]:
|
|
404
|
-
- Timestamp for chronological sorting.
|
|
405
|
-
- Negative score for descending order sorting.
|
|
406
|
-
"""
|
|
407
|
-
return (unit.record.timestamp, -unit.record.score)
|
|
408
|
-
|
|
409
|
-
def _assemble_output(
|
|
410
|
-
self,
|
|
411
|
-
context_units: List[_ContextUnit],
|
|
412
|
-
system_unit: Optional[_ContextUnit],
|
|
413
|
-
) -> Tuple[List[OpenAIMessage], int]:
|
|
414
|
-
r"""Assembles final message list with proper ordering and token count.
|
|
415
|
-
|
|
416
|
-
Args:
|
|
417
|
-
context_units (List[_ContextUnit]): Sorted list of regular message
|
|
418
|
-
units.
|
|
419
|
-
system_unit (Optional[_ContextUnit]): System message unit (if
|
|
420
|
-
present).
|
|
421
|
-
|
|
422
|
-
Returns:
|
|
423
|
-
Tuple[List[OpenAIMessage], int]: Tuple of (ordered messages, total
|
|
424
|
-
tokens)
|
|
425
|
-
"""
|
|
426
|
-
messages = []
|
|
427
|
-
total_tokens = 0
|
|
428
|
-
|
|
429
|
-
# Add system message first if present
|
|
430
|
-
if system_unit:
|
|
431
|
-
messages.append(
|
|
432
|
-
system_unit.record.memory_record.to_openai_message()
|
|
433
|
-
)
|
|
434
|
-
total_tokens += system_unit.num_tokens
|
|
435
77
|
|
|
436
|
-
|
|
437
|
-
|
|
438
|
-
messages.append(unit.record.memory_record.to_openai_message())
|
|
439
|
-
total_tokens += unit.num_tokens
|
|
78
|
+
if not messages:
|
|
79
|
+
return [], 0
|
|
440
80
|
|
|
81
|
+
total_tokens = self.token_counter.count_tokens_from_messages(messages)
|
|
441
82
|
return messages, total_tokens
|
camel/messages/base.py
CHANGED
|
@@ -178,6 +178,32 @@ class BaseMessage:
|
|
|
178
178
|
OpenAIVisionDetailType(video_detail).value,
|
|
179
179
|
)
|
|
180
180
|
|
|
181
|
+
@classmethod
|
|
182
|
+
def make_system_message(
|
|
183
|
+
cls,
|
|
184
|
+
content: str,
|
|
185
|
+
role_name: str = "System",
|
|
186
|
+
meta_dict: Optional[Dict[str, str]] = None,
|
|
187
|
+
) -> "BaseMessage":
|
|
188
|
+
r"""Create a new system message.
|
|
189
|
+
|
|
190
|
+
Args:
|
|
191
|
+
content (str): The content of the system message.
|
|
192
|
+
role_name (str): The name of the system role.
|
|
193
|
+
(default: :obj:`"System"`)
|
|
194
|
+
meta_dict (Optional[Dict[str, str]]): Additional metadata
|
|
195
|
+
dictionary for the message.
|
|
196
|
+
|
|
197
|
+
Returns:
|
|
198
|
+
BaseMessage: The new system message.
|
|
199
|
+
"""
|
|
200
|
+
return cls(
|
|
201
|
+
role_name,
|
|
202
|
+
RoleType.SYSTEM,
|
|
203
|
+
meta_dict,
|
|
204
|
+
content,
|
|
205
|
+
)
|
|
206
|
+
|
|
181
207
|
def create_new_instance(self, content: str) -> "BaseMessage":
|
|
182
208
|
r"""Create a new instance of the :obj:`BaseMessage` with updated
|
|
183
209
|
content.
|