camel-ai 0.2.76a13__py3-none-any.whl → 0.2.77__py3-none-any.whl
This diff represents the content of publicly available package versions that have been released to one of the supported registries. The information contained in this diff is provided for informational purposes only and reflects changes between package versions as they appear in their respective public registries.
Potentially problematic release.
This version of camel-ai might be problematic. Click here for more details.
- camel/__init__.py +1 -1
- camel/agents/chat_agent.py +591 -171
- camel/data_collectors/alpaca_collector.py +15 -6
- camel/societies/workforce/prompts.py +131 -50
- camel/societies/workforce/single_agent_worker.py +390 -11
- camel/societies/workforce/structured_output_handler.py +30 -18
- camel/societies/workforce/utils.py +105 -12
- camel/societies/workforce/workforce.py +818 -224
- camel/societies/workforce/workforce_logger.py +24 -5
- camel/toolkits/context_summarizer_toolkit.py +9 -8
- camel/toolkits/terminal_toolkit/utils.py +106 -154
- camel/types/enums.py +4 -4
- camel/utils/context_utils.py +379 -22
- {camel_ai-0.2.76a13.dist-info → camel_ai-0.2.77.dist-info}/METADATA +2 -1
- {camel_ai-0.2.76a13.dist-info → camel_ai-0.2.77.dist-info}/RECORD +17 -17
- {camel_ai-0.2.76a13.dist-info → camel_ai-0.2.77.dist-info}/WHEEL +0 -0
- {camel_ai-0.2.76a13.dist-info → camel_ai-0.2.77.dist-info}/licenses/LICENSE +0 -0
camel/agents/chat_agent.py
CHANGED
|
@@ -14,15 +14,23 @@
|
|
|
14
14
|
from __future__ import annotations
|
|
15
15
|
|
|
16
16
|
import asyncio
|
|
17
|
+
import atexit
|
|
18
|
+
import base64
|
|
17
19
|
import concurrent.futures
|
|
20
|
+
import hashlib
|
|
21
|
+
import inspect
|
|
18
22
|
import json
|
|
19
|
-
import
|
|
20
|
-
import
|
|
23
|
+
import math
|
|
24
|
+
import os
|
|
21
25
|
import random
|
|
26
|
+
import re
|
|
27
|
+
import tempfile
|
|
22
28
|
import textwrap
|
|
23
29
|
import threading
|
|
24
30
|
import time
|
|
25
31
|
import uuid
|
|
32
|
+
import warnings
|
|
33
|
+
from dataclasses import dataclass
|
|
26
34
|
from datetime import datetime
|
|
27
35
|
from pathlib import Path
|
|
28
36
|
from typing import (
|
|
@@ -101,10 +109,24 @@ if TYPE_CHECKING:
|
|
|
101
109
|
|
|
102
110
|
logger = get_logger(__name__)
|
|
103
111
|
|
|
112
|
+
# Cleanup temp files on exit
|
|
113
|
+
_temp_files: Set[str] = set()
|
|
114
|
+
_temp_files_lock = threading.Lock()
|
|
115
|
+
|
|
116
|
+
|
|
117
|
+
def _cleanup_temp_files():
|
|
118
|
+
with _temp_files_lock:
|
|
119
|
+
for path in _temp_files:
|
|
120
|
+
try:
|
|
121
|
+
os.unlink(path)
|
|
122
|
+
except Exception:
|
|
123
|
+
pass
|
|
124
|
+
|
|
125
|
+
|
|
126
|
+
atexit.register(_cleanup_temp_files)
|
|
127
|
+
|
|
104
128
|
# AgentOps decorator setting
|
|
105
129
|
try:
|
|
106
|
-
import os
|
|
107
|
-
|
|
108
130
|
if os.getenv("AGENTOPS_API_KEY") is not None:
|
|
109
131
|
from agentops import track_agent
|
|
110
132
|
else:
|
|
@@ -138,6 +160,53 @@ SIMPLE_FORMAT_PROMPT = TextPrompt(
|
|
|
138
160
|
)
|
|
139
161
|
|
|
140
162
|
|
|
163
|
+
@dataclass
|
|
164
|
+
class _ToolOutputHistoryEntry:
|
|
165
|
+
tool_name: str
|
|
166
|
+
tool_call_id: str
|
|
167
|
+
result_text: str
|
|
168
|
+
record_uuids: List[str]
|
|
169
|
+
record_timestamps: List[float]
|
|
170
|
+
preview_text: str
|
|
171
|
+
cached: bool = False
|
|
172
|
+
cache_id: Optional[str] = None
|
|
173
|
+
|
|
174
|
+
|
|
175
|
+
class _ToolOutputCacheManager:
|
|
176
|
+
r"""Minimal persistent store for caching verbose tool outputs."""
|
|
177
|
+
|
|
178
|
+
def __init__(self, base_dir: Union[str, Path]) -> None:
|
|
179
|
+
self.base_dir = Path(base_dir).expanduser().resolve()
|
|
180
|
+
self.base_dir.mkdir(parents=True, exist_ok=True)
|
|
181
|
+
|
|
182
|
+
def save(
|
|
183
|
+
self,
|
|
184
|
+
tool_name: str,
|
|
185
|
+
tool_call_id: str,
|
|
186
|
+
content: str,
|
|
187
|
+
) -> Tuple[str, Path]:
|
|
188
|
+
cache_id = uuid.uuid4().hex
|
|
189
|
+
filename = f"{cache_id}.txt"
|
|
190
|
+
path = self.base_dir / filename
|
|
191
|
+
header = (
|
|
192
|
+
f"# Cached tool output\n"
|
|
193
|
+
f"tool_name: {tool_name}\n"
|
|
194
|
+
f"tool_call_id: {tool_call_id}\n"
|
|
195
|
+
f"cache_id: {cache_id}\n"
|
|
196
|
+
f"---\n"
|
|
197
|
+
)
|
|
198
|
+
path.write_text(f"{header}{content}", encoding="utf-8")
|
|
199
|
+
return cache_id, path
|
|
200
|
+
|
|
201
|
+
def load(self, cache_id: str) -> str:
|
|
202
|
+
path = self.base_dir / f"{cache_id}.txt"
|
|
203
|
+
if not path.exists():
|
|
204
|
+
raise FileNotFoundError(
|
|
205
|
+
f"Cached tool output {cache_id} not found at {path}"
|
|
206
|
+
)
|
|
207
|
+
return path.read_text(encoding="utf-8")
|
|
208
|
+
|
|
209
|
+
|
|
141
210
|
class StreamContentAccumulator:
|
|
142
211
|
r"""Manages content accumulation across streaming responses to ensure
|
|
143
212
|
all responses contain complete cumulative content."""
|
|
@@ -194,13 +263,10 @@ class StreamingChatAgentResponse:
|
|
|
194
263
|
def _ensure_latest_response(self):
|
|
195
264
|
r"""Ensure we have the latest response by consuming the generator."""
|
|
196
265
|
if not self._consumed:
|
|
197
|
-
|
|
198
|
-
|
|
199
|
-
|
|
200
|
-
|
|
201
|
-
self._consumed = True
|
|
202
|
-
except StopIteration:
|
|
203
|
-
self._consumed = True
|
|
266
|
+
for response in self._generator:
|
|
267
|
+
self._responses.append(response)
|
|
268
|
+
self._current_response = response
|
|
269
|
+
self._consumed = True
|
|
204
270
|
|
|
205
271
|
@property
|
|
206
272
|
def msgs(self) -> List[BaseMessage]:
|
|
@@ -241,14 +307,11 @@ class StreamingChatAgentResponse:
|
|
|
241
307
|
yield from self._responses
|
|
242
308
|
else:
|
|
243
309
|
# If not consumed, consume and yield
|
|
244
|
-
|
|
245
|
-
|
|
246
|
-
|
|
247
|
-
|
|
248
|
-
|
|
249
|
-
self._consumed = True
|
|
250
|
-
except StopIteration:
|
|
251
|
-
self._consumed = True
|
|
310
|
+
for response in self._generator:
|
|
311
|
+
self._responses.append(response)
|
|
312
|
+
self._current_response = response
|
|
313
|
+
yield response
|
|
314
|
+
self._consumed = True
|
|
252
315
|
|
|
253
316
|
def __getattr__(self, name):
|
|
254
317
|
r"""Forward any other attribute access to the latest response."""
|
|
@@ -279,13 +342,10 @@ class AsyncStreamingChatAgentResponse:
|
|
|
279
342
|
async def _ensure_latest_response(self):
|
|
280
343
|
r"""Ensure the latest response by consuming the async generator."""
|
|
281
344
|
if not self._consumed:
|
|
282
|
-
|
|
283
|
-
|
|
284
|
-
|
|
285
|
-
|
|
286
|
-
self._consumed = True
|
|
287
|
-
except StopAsyncIteration:
|
|
288
|
-
self._consumed = True
|
|
345
|
+
async for response in self._async_generator:
|
|
346
|
+
self._responses.append(response)
|
|
347
|
+
self._current_response = response
|
|
348
|
+
self._consumed = True
|
|
289
349
|
|
|
290
350
|
async def _get_final_response(self) -> ChatAgentResponse:
|
|
291
351
|
r"""Get the final response after consuming the entire stream."""
|
|
@@ -311,14 +371,11 @@ class AsyncStreamingChatAgentResponse:
|
|
|
311
371
|
else:
|
|
312
372
|
# If not consumed, consume and yield
|
|
313
373
|
async def _consume_and_yield():
|
|
314
|
-
|
|
315
|
-
|
|
316
|
-
|
|
317
|
-
|
|
318
|
-
|
|
319
|
-
self._consumed = True
|
|
320
|
-
except StopAsyncIteration:
|
|
321
|
-
self._consumed = True
|
|
374
|
+
async for response in self._async_generator:
|
|
375
|
+
self._responses.append(response)
|
|
376
|
+
self._current_response = response
|
|
377
|
+
yield response
|
|
378
|
+
self._consumed = True
|
|
322
379
|
|
|
323
380
|
return _consume_and_yield()
|
|
324
381
|
|
|
@@ -386,14 +443,28 @@ class ChatAgent(BaseAgent):
|
|
|
386
443
|
for individual tool execution. If None, wait indefinitely.
|
|
387
444
|
mask_tool_output (Optional[bool]): Whether to return a sanitized
|
|
388
445
|
placeholder instead of the raw tool output. (default: :obj:`False`)
|
|
389
|
-
pause_event (Optional[asyncio.Event]): Event to
|
|
390
|
-
agent's operation. When clear, the agent will
|
|
391
|
-
|
|
446
|
+
pause_event (Optional[Union[threading.Event, asyncio.Event]]): Event to
|
|
447
|
+
signal pause of the agent's operation. When clear, the agent will
|
|
448
|
+
pause its execution. Use threading.Event for sync operations or
|
|
449
|
+
asyncio.Event for async operations. (default: :obj:`None`)
|
|
392
450
|
prune_tool_calls_from_memory (bool): Whether to clean tool
|
|
393
451
|
call messages from memory after response generation to save token
|
|
394
452
|
usage. When enabled, removes FUNCTION/TOOL role messages and
|
|
395
453
|
ASSISTANT messages with tool_calls after each step.
|
|
396
454
|
(default: :obj:`False`)
|
|
455
|
+
enable_tool_output_cache (bool, optional): Whether to offload verbose
|
|
456
|
+
historical tool outputs to a local cache and replace them with
|
|
457
|
+
lightweight references in memory. Only older tool results whose
|
|
458
|
+
payload length exceeds ``tool_output_cache_threshold`` are cached.
|
|
459
|
+
(default: :obj:`False`)
|
|
460
|
+
tool_output_cache_threshold (int, optional): Minimum character length
|
|
461
|
+
of a tool result before it becomes eligible for caching. Values
|
|
462
|
+
below or equal to zero disable caching regardless of the toggle.
|
|
463
|
+
(default: :obj:`2000`)
|
|
464
|
+
tool_output_cache_dir (Optional[Union[str, Path]], optional): Target
|
|
465
|
+
directory for cached tool outputs. When omitted, a ``tool_cache``
|
|
466
|
+
directory relative to the current working directory is used.
|
|
467
|
+
(default: :obj:`None`)
|
|
397
468
|
retry_attempts (int, optional): Maximum number of retry attempts for
|
|
398
469
|
rate limit errors. (default: :obj:`3`)
|
|
399
470
|
retry_delay (float, optional): Initial delay in seconds between
|
|
@@ -443,8 +514,11 @@ class ChatAgent(BaseAgent):
|
|
|
443
514
|
stop_event: Optional[threading.Event] = None,
|
|
444
515
|
tool_execution_timeout: Optional[float] = None,
|
|
445
516
|
mask_tool_output: bool = False,
|
|
446
|
-
pause_event: Optional[asyncio.Event] = None,
|
|
517
|
+
pause_event: Optional[Union[threading.Event, asyncio.Event]] = None,
|
|
447
518
|
prune_tool_calls_from_memory: bool = False,
|
|
519
|
+
enable_tool_output_cache: bool = False,
|
|
520
|
+
tool_output_cache_threshold: int = 2000,
|
|
521
|
+
tool_output_cache_dir: Optional[Union[str, Path]] = None,
|
|
448
522
|
retry_attempts: int = 3,
|
|
449
523
|
retry_delay: float = 1.0,
|
|
450
524
|
step_timeout: Optional[float] = None,
|
|
@@ -464,6 +538,28 @@ class ChatAgent(BaseAgent):
|
|
|
464
538
|
# Assign unique ID
|
|
465
539
|
self.agent_id = agent_id if agent_id else str(uuid.uuid4())
|
|
466
540
|
|
|
541
|
+
self._tool_output_cache_enabled = (
|
|
542
|
+
enable_tool_output_cache and tool_output_cache_threshold > 0
|
|
543
|
+
)
|
|
544
|
+
self._tool_output_cache_threshold = max(0, tool_output_cache_threshold)
|
|
545
|
+
self._tool_output_cache_dir: Optional[Path]
|
|
546
|
+
self._tool_output_cache_manager: Optional[_ToolOutputCacheManager]
|
|
547
|
+
if self._tool_output_cache_enabled:
|
|
548
|
+
cache_dir = (
|
|
549
|
+
Path(tool_output_cache_dir).expanduser()
|
|
550
|
+
if tool_output_cache_dir is not None
|
|
551
|
+
else Path("tool_cache")
|
|
552
|
+
)
|
|
553
|
+
self._tool_output_cache_dir = cache_dir
|
|
554
|
+
self._tool_output_cache_manager = _ToolOutputCacheManager(
|
|
555
|
+
cache_dir
|
|
556
|
+
)
|
|
557
|
+
else:
|
|
558
|
+
self._tool_output_cache_dir = None
|
|
559
|
+
self._tool_output_cache_manager = None
|
|
560
|
+
self._tool_output_history: List[_ToolOutputHistoryEntry] = []
|
|
561
|
+
self._cache_lookup_tool_name = "retrieve_cached_tool_output"
|
|
562
|
+
|
|
467
563
|
# Set up memory
|
|
468
564
|
context_creator = ScoreBasedContextCreator(
|
|
469
565
|
self.model_backend.token_counter,
|
|
@@ -510,6 +606,8 @@ class ChatAgent(BaseAgent):
|
|
|
510
606
|
convert_to_function_tool(tool) for tool in (tools or [])
|
|
511
607
|
]
|
|
512
608
|
}
|
|
609
|
+
if self._tool_output_cache_enabled:
|
|
610
|
+
self._ensure_tool_cache_lookup_tool()
|
|
513
611
|
|
|
514
612
|
# Register agent with toolkits that have RegisteredAgentToolkit mixin
|
|
515
613
|
if toolkits_to_register_agent:
|
|
@@ -532,6 +630,7 @@ class ChatAgent(BaseAgent):
|
|
|
532
630
|
self.tool_execution_timeout = tool_execution_timeout
|
|
533
631
|
self.mask_tool_output = mask_tool_output
|
|
534
632
|
self._secure_result_store: Dict[str, Any] = {}
|
|
633
|
+
self._secure_result_store_lock = threading.Lock()
|
|
535
634
|
self.pause_event = pause_event
|
|
536
635
|
self.prune_tool_calls_from_memory = prune_tool_calls_from_memory
|
|
537
636
|
self.retry_attempts = max(1, retry_attempts)
|
|
@@ -545,6 +644,8 @@ class ChatAgent(BaseAgent):
|
|
|
545
644
|
r"""Resets the :obj:`ChatAgent` to its initial state."""
|
|
546
645
|
self.terminated = False
|
|
547
646
|
self.init_messages()
|
|
647
|
+
if self._tool_output_cache_enabled:
|
|
648
|
+
self._tool_output_history.clear()
|
|
548
649
|
for terminator in self.response_terminators:
|
|
549
650
|
terminator.reset()
|
|
550
651
|
|
|
@@ -728,6 +829,20 @@ class ChatAgent(BaseAgent):
|
|
|
728
829
|
# Ensure the new memory has the system message
|
|
729
830
|
self.init_messages()
|
|
730
831
|
|
|
832
|
+
def set_context_utility(
|
|
833
|
+
self, context_utility: Optional[ContextUtility]
|
|
834
|
+
) -> None:
|
|
835
|
+
r"""Set the context utility for the agent.
|
|
836
|
+
|
|
837
|
+
This allows external components (like SingleAgentWorker) to provide
|
|
838
|
+
a shared context utility instance for workflow management.
|
|
839
|
+
|
|
840
|
+
Args:
|
|
841
|
+
context_utility (ContextUtility, optional): The context utility
|
|
842
|
+
to use. If None, the agent will create its own when needed.
|
|
843
|
+
"""
|
|
844
|
+
self._context_utility = context_utility
|
|
845
|
+
|
|
731
846
|
def _get_full_tool_schemas(self) -> List[Dict[str, Any]]:
|
|
732
847
|
r"""Returns a list of tool schemas of all tools, including internal
|
|
733
848
|
and external tools.
|
|
@@ -751,6 +866,178 @@ class ChatAgent(BaseAgent):
|
|
|
751
866
|
for tool in tools:
|
|
752
867
|
self.add_tool(tool)
|
|
753
868
|
|
|
869
|
+
def retrieve_cached_tool_output(self, cache_id: str) -> str:
|
|
870
|
+
r"""Load a cached tool output by its cache identifier.
|
|
871
|
+
|
|
872
|
+
Args:
|
|
873
|
+
cache_id (str): Identifier provided in cached tool messages.
|
|
874
|
+
|
|
875
|
+
Returns:
|
|
876
|
+
str: The cached content or an explanatory error message.
|
|
877
|
+
"""
|
|
878
|
+
if not self._tool_output_cache_manager:
|
|
879
|
+
return "Tool output caching is disabled for this agent instance."
|
|
880
|
+
|
|
881
|
+
normalized_cache_id = cache_id.strip()
|
|
882
|
+
if not normalized_cache_id:
|
|
883
|
+
return "Please provide a non-empty cache_id."
|
|
884
|
+
|
|
885
|
+
try:
|
|
886
|
+
return self._tool_output_cache_manager.load(normalized_cache_id)
|
|
887
|
+
except FileNotFoundError:
|
|
888
|
+
return (
|
|
889
|
+
f"Cache entry '{normalized_cache_id}' was not found. "
|
|
890
|
+
"Verify the identifier and try again."
|
|
891
|
+
)
|
|
892
|
+
|
|
893
|
+
def _ensure_tool_cache_lookup_tool(self) -> None:
|
|
894
|
+
if not self._tool_output_cache_enabled:
|
|
895
|
+
return
|
|
896
|
+
lookup_name = self._cache_lookup_tool_name
|
|
897
|
+
if lookup_name in self._internal_tools:
|
|
898
|
+
return
|
|
899
|
+
lookup_tool = convert_to_function_tool(
|
|
900
|
+
self.retrieve_cached_tool_output
|
|
901
|
+
)
|
|
902
|
+
self._internal_tools[lookup_tool.get_function_name()] = lookup_tool
|
|
903
|
+
|
|
904
|
+
def _serialize_tool_result(self, result: Any) -> str:
|
|
905
|
+
if isinstance(result, str):
|
|
906
|
+
return result
|
|
907
|
+
try:
|
|
908
|
+
return json.dumps(result, ensure_ascii=False)
|
|
909
|
+
except (TypeError, ValueError):
|
|
910
|
+
return str(result)
|
|
911
|
+
|
|
912
|
+
def _summarize_tool_result(self, text: str, limit: int = 160) -> str:
|
|
913
|
+
normalized = re.sub(r"\s+", " ", text).strip()
|
|
914
|
+
if len(normalized) <= limit:
|
|
915
|
+
return normalized
|
|
916
|
+
return normalized[: max(0, limit - 3)].rstrip() + "..."
|
|
917
|
+
|
|
918
|
+
def _register_tool_output_for_cache(
|
|
919
|
+
self,
|
|
920
|
+
func_name: str,
|
|
921
|
+
tool_call_id: str,
|
|
922
|
+
result_text: str,
|
|
923
|
+
records: List[MemoryRecord],
|
|
924
|
+
) -> None:
|
|
925
|
+
if not records:
|
|
926
|
+
return
|
|
927
|
+
|
|
928
|
+
entry = _ToolOutputHistoryEntry(
|
|
929
|
+
tool_name=func_name,
|
|
930
|
+
tool_call_id=tool_call_id,
|
|
931
|
+
result_text=result_text,
|
|
932
|
+
record_uuids=[str(record.uuid) for record in records],
|
|
933
|
+
record_timestamps=[record.timestamp for record in records],
|
|
934
|
+
preview_text=self._summarize_tool_result(result_text),
|
|
935
|
+
)
|
|
936
|
+
self._tool_output_history.append(entry)
|
|
937
|
+
self._process_tool_output_cache()
|
|
938
|
+
|
|
939
|
+
def _process_tool_output_cache(self) -> None:
|
|
940
|
+
if (
|
|
941
|
+
not self._tool_output_cache_enabled
|
|
942
|
+
or not self._tool_output_history
|
|
943
|
+
or self._tool_output_cache_manager is None
|
|
944
|
+
):
|
|
945
|
+
return
|
|
946
|
+
|
|
947
|
+
# Only cache older results; keep the latest expanded for immediate use.
|
|
948
|
+
for entry in self._tool_output_history[:-1]:
|
|
949
|
+
if entry.cached:
|
|
950
|
+
continue
|
|
951
|
+
if len(entry.result_text) < self._tool_output_cache_threshold:
|
|
952
|
+
continue
|
|
953
|
+
self._cache_tool_output_entry(entry)
|
|
954
|
+
|
|
955
|
+
def _cache_tool_output_entry(self, entry: _ToolOutputHistoryEntry) -> None:
|
|
956
|
+
if self._tool_output_cache_manager is None or not entry.record_uuids:
|
|
957
|
+
return
|
|
958
|
+
|
|
959
|
+
try:
|
|
960
|
+
cache_id, cache_path = self._tool_output_cache_manager.save(
|
|
961
|
+
entry.tool_name,
|
|
962
|
+
entry.tool_call_id,
|
|
963
|
+
entry.result_text,
|
|
964
|
+
)
|
|
965
|
+
except Exception as exc: # pragma: no cover - defensive
|
|
966
|
+
logger.warning(
|
|
967
|
+
"Failed to persist cached tool output for %s (%s): %s",
|
|
968
|
+
entry.tool_name,
|
|
969
|
+
entry.tool_call_id,
|
|
970
|
+
exc,
|
|
971
|
+
)
|
|
972
|
+
return
|
|
973
|
+
|
|
974
|
+
timestamp = (
|
|
975
|
+
entry.record_timestamps[0]
|
|
976
|
+
if entry.record_timestamps
|
|
977
|
+
else time.time_ns() / 1_000_000_000
|
|
978
|
+
)
|
|
979
|
+
reference_message = FunctionCallingMessage(
|
|
980
|
+
role_name=self.role_name,
|
|
981
|
+
role_type=self.role_type,
|
|
982
|
+
meta_dict={
|
|
983
|
+
"cache_id": cache_id,
|
|
984
|
+
"cached_preview": entry.preview_text,
|
|
985
|
+
"cached_tool_output_path": str(cache_path),
|
|
986
|
+
},
|
|
987
|
+
content="",
|
|
988
|
+
func_name=entry.tool_name,
|
|
989
|
+
result=self._build_cache_reference_text(entry, cache_id),
|
|
990
|
+
tool_call_id=entry.tool_call_id,
|
|
991
|
+
)
|
|
992
|
+
|
|
993
|
+
chat_history_block = getattr(self.memory, "_chat_history_block", None)
|
|
994
|
+
storage = getattr(chat_history_block, "storage", None)
|
|
995
|
+
if storage is None:
|
|
996
|
+
return
|
|
997
|
+
|
|
998
|
+
existing_records = storage.load()
|
|
999
|
+
updated_records = [
|
|
1000
|
+
record
|
|
1001
|
+
for record in existing_records
|
|
1002
|
+
if record["uuid"] not in entry.record_uuids
|
|
1003
|
+
]
|
|
1004
|
+
new_record = MemoryRecord(
|
|
1005
|
+
message=reference_message,
|
|
1006
|
+
role_at_backend=OpenAIBackendRole.FUNCTION,
|
|
1007
|
+
timestamp=timestamp,
|
|
1008
|
+
agent_id=self.agent_id,
|
|
1009
|
+
)
|
|
1010
|
+
updated_records.append(new_record.to_dict())
|
|
1011
|
+
updated_records.sort(key=lambda record: record["timestamp"])
|
|
1012
|
+
storage.clear()
|
|
1013
|
+
storage.save(updated_records)
|
|
1014
|
+
|
|
1015
|
+
logger.info(
|
|
1016
|
+
"Cached tool output '%s' (%s) to %s with cache_id=%s",
|
|
1017
|
+
entry.tool_name,
|
|
1018
|
+
entry.tool_call_id,
|
|
1019
|
+
cache_path,
|
|
1020
|
+
cache_id,
|
|
1021
|
+
)
|
|
1022
|
+
|
|
1023
|
+
entry.cached = True
|
|
1024
|
+
entry.cache_id = cache_id
|
|
1025
|
+
entry.record_uuids = [str(new_record.uuid)]
|
|
1026
|
+
entry.record_timestamps = [timestamp]
|
|
1027
|
+
|
|
1028
|
+
def _build_cache_reference_text(
|
|
1029
|
+
self, entry: _ToolOutputHistoryEntry, cache_id: str
|
|
1030
|
+
) -> str:
|
|
1031
|
+
preview = entry.preview_text or "[no preview available]"
|
|
1032
|
+
return (
|
|
1033
|
+
"[cached tool output]\n"
|
|
1034
|
+
f"tool: {entry.tool_name}\n"
|
|
1035
|
+
f"cache_id: {cache_id}\n"
|
|
1036
|
+
f"preview: {preview}\n"
|
|
1037
|
+
f"Use `{self._cache_lookup_tool_name}` with this cache_id to "
|
|
1038
|
+
"retrieve the full content."
|
|
1039
|
+
)
|
|
1040
|
+
|
|
754
1041
|
def add_external_tool(
|
|
755
1042
|
self, tool: Union[FunctionTool, Callable, Dict[str, Any]]
|
|
756
1043
|
) -> None:
|
|
@@ -795,7 +1082,8 @@ class ChatAgent(BaseAgent):
|
|
|
795
1082
|
message: BaseMessage,
|
|
796
1083
|
role: OpenAIBackendRole,
|
|
797
1084
|
timestamp: Optional[float] = None,
|
|
798
|
-
|
|
1085
|
+
return_records: bool = False,
|
|
1086
|
+
) -> Optional[List[MemoryRecord]]:
|
|
799
1087
|
r"""Updates the agent memory with a new message.
|
|
800
1088
|
|
|
801
1089
|
If the single *message* exceeds the model's context window, it will
|
|
@@ -815,24 +1103,29 @@ class ChatAgent(BaseAgent):
|
|
|
815
1103
|
timestamp (Optional[float], optional): Custom timestamp for the
|
|
816
1104
|
memory record. If `None`, the current time will be used.
|
|
817
1105
|
(default: :obj:`None`)
|
|
818
|
-
|
|
1106
|
+
return_records (bool, optional): When ``True`` the method returns
|
|
1107
|
+
the list of :class:`MemoryRecord` objects written to memory.
|
|
1108
|
+
(default: :obj:`False`)
|
|
1109
|
+
|
|
1110
|
+
Returns:
|
|
1111
|
+
Optional[List[MemoryRecord]]: The records that were written when
|
|
1112
|
+
``return_records`` is ``True``; otherwise ``None``.
|
|
819
1113
|
"""
|
|
820
|
-
|
|
821
|
-
|
|
822
|
-
import uuid as _uuid
|
|
1114
|
+
|
|
1115
|
+
written_records: List[MemoryRecord] = []
|
|
823
1116
|
|
|
824
1117
|
# 1. Helper to write a record to memory
|
|
825
1118
|
def _write_single_record(
|
|
826
1119
|
message: BaseMessage, role: OpenAIBackendRole, timestamp: float
|
|
827
1120
|
):
|
|
828
|
-
|
|
829
|
-
|
|
830
|
-
|
|
831
|
-
|
|
832
|
-
|
|
833
|
-
agent_id=self.agent_id,
|
|
834
|
-
)
|
|
1121
|
+
record = MemoryRecord(
|
|
1122
|
+
message=message,
|
|
1123
|
+
role_at_backend=role,
|
|
1124
|
+
timestamp=timestamp,
|
|
1125
|
+
agent_id=self.agent_id,
|
|
835
1126
|
)
|
|
1127
|
+
written_records.append(record)
|
|
1128
|
+
self.memory.write_record(record)
|
|
836
1129
|
|
|
837
1130
|
base_ts = (
|
|
838
1131
|
timestamp
|
|
@@ -847,14 +1140,13 @@ class ChatAgent(BaseAgent):
|
|
|
847
1140
|
token_limit = context_creator.token_limit
|
|
848
1141
|
except AttributeError:
|
|
849
1142
|
_write_single_record(message, role, base_ts)
|
|
850
|
-
return
|
|
1143
|
+
return written_records if return_records else None
|
|
851
1144
|
|
|
852
1145
|
# 3. Check if slicing is necessary
|
|
853
1146
|
try:
|
|
854
1147
|
current_tokens = token_counter.count_tokens_from_messages(
|
|
855
1148
|
[message.to_openai_message(role)]
|
|
856
1149
|
)
|
|
857
|
-
import warnings
|
|
858
1150
|
|
|
859
1151
|
with warnings.catch_warnings():
|
|
860
1152
|
warnings.filterwarnings("ignore", category=EmptyMemoryWarning)
|
|
@@ -864,14 +1156,14 @@ class ChatAgent(BaseAgent):
|
|
|
864
1156
|
|
|
865
1157
|
if current_tokens <= remaining_budget:
|
|
866
1158
|
_write_single_record(message, role, base_ts)
|
|
867
|
-
return
|
|
1159
|
+
return written_records if return_records else None
|
|
868
1160
|
except Exception as e:
|
|
869
1161
|
logger.warning(
|
|
870
1162
|
f"Token calculation failed before chunking, "
|
|
871
1163
|
f"writing message as-is. Error: {e}"
|
|
872
1164
|
)
|
|
873
1165
|
_write_single_record(message, role, base_ts)
|
|
874
|
-
return
|
|
1166
|
+
return written_records if return_records else None
|
|
875
1167
|
|
|
876
1168
|
# 4. Perform slicing
|
|
877
1169
|
logger.warning(
|
|
@@ -892,18 +1184,18 @@ class ChatAgent(BaseAgent):
|
|
|
892
1184
|
|
|
893
1185
|
if not text_to_chunk or not text_to_chunk.strip():
|
|
894
1186
|
_write_single_record(message, role, base_ts)
|
|
895
|
-
return
|
|
1187
|
+
return written_records if return_records else None
|
|
896
1188
|
# Encode the entire text to get a list of all token IDs
|
|
897
1189
|
try:
|
|
898
1190
|
all_token_ids = token_counter.encode(text_to_chunk)
|
|
899
1191
|
except Exception as e:
|
|
900
1192
|
logger.error(f"Failed to encode text for chunking: {e}")
|
|
901
1193
|
_write_single_record(message, role, base_ts) # Fallback
|
|
902
|
-
return
|
|
1194
|
+
return written_records if return_records else None
|
|
903
1195
|
|
|
904
1196
|
if not all_token_ids:
|
|
905
1197
|
_write_single_record(message, role, base_ts) # Nothing to chunk
|
|
906
|
-
return
|
|
1198
|
+
return written_records if return_records else None
|
|
907
1199
|
|
|
908
1200
|
# 1. Base chunk size: one-tenth of the smaller of (a) total token
|
|
909
1201
|
# limit and (b) current remaining budget. This prevents us from
|
|
@@ -928,7 +1220,7 @@ class ChatAgent(BaseAgent):
|
|
|
928
1220
|
|
|
929
1221
|
# 4. Calculate how many chunks we will need with this body size.
|
|
930
1222
|
num_chunks = math.ceil(len(all_token_ids) / chunk_body_limit)
|
|
931
|
-
group_id = str(
|
|
1223
|
+
group_id = str(uuid.uuid4())
|
|
932
1224
|
|
|
933
1225
|
for i in range(num_chunks):
|
|
934
1226
|
start_idx = i * chunk_body_limit
|
|
@@ -969,6 +1261,8 @@ class ChatAgent(BaseAgent):
|
|
|
969
1261
|
# Increment timestamp slightly to maintain order
|
|
970
1262
|
_write_single_record(new_msg, role, base_ts + i * 1e-6)
|
|
971
1263
|
|
|
1264
|
+
return written_records if return_records else None
|
|
1265
|
+
|
|
972
1266
|
def load_memory(self, memory: AgentMemory) -> None:
|
|
973
1267
|
r"""Load the provided memory into the agent.
|
|
974
1268
|
|
|
@@ -1050,6 +1344,7 @@ class ChatAgent(BaseAgent):
|
|
|
1050
1344
|
self,
|
|
1051
1345
|
filename: Optional[str] = None,
|
|
1052
1346
|
summary_prompt: Optional[str] = None,
|
|
1347
|
+
response_format: Optional[Type[BaseModel]] = None,
|
|
1053
1348
|
working_directory: Optional[Union[str, Path]] = None,
|
|
1054
1349
|
) -> Dict[str, Any]:
|
|
1055
1350
|
r"""Summarize the agent's current conversation context and persist it
|
|
@@ -1062,13 +1357,18 @@ class ChatAgent(BaseAgent):
|
|
|
1062
1357
|
summary_prompt (Optional[str]): Custom prompt for the summarizer.
|
|
1063
1358
|
When omitted, a default prompt highlighting key decisions,
|
|
1064
1359
|
action items, and open questions is used.
|
|
1360
|
+
response_format (Optional[Type[BaseModel]]): A Pydantic model
|
|
1361
|
+
defining the expected structure of the response. If provided,
|
|
1362
|
+
the summary will be generated as structured output and included
|
|
1363
|
+
in the result.
|
|
1065
1364
|
working_directory (Optional[str|Path]): Optional directory to save
|
|
1066
1365
|
the markdown summary file. If provided, overrides the default
|
|
1067
1366
|
directory used by ContextUtility.
|
|
1068
1367
|
|
|
1069
1368
|
Returns:
|
|
1070
1369
|
Dict[str, Any]: A dictionary containing the summary text, file
|
|
1071
|
-
path, and
|
|
1370
|
+
path, status message, and optionally structured_summary if
|
|
1371
|
+
response_format was provided.
|
|
1072
1372
|
"""
|
|
1073
1373
|
|
|
1074
1374
|
result: Dict[str, Any] = {
|
|
@@ -1078,6 +1378,7 @@ class ChatAgent(BaseAgent):
|
|
|
1078
1378
|
}
|
|
1079
1379
|
|
|
1080
1380
|
try:
|
|
1381
|
+
# Use external context if set, otherwise create local one
|
|
1081
1382
|
if self._context_utility is None:
|
|
1082
1383
|
if working_directory is not None:
|
|
1083
1384
|
self._context_utility = ContextUtility(
|
|
@@ -1085,6 +1386,7 @@ class ChatAgent(BaseAgent):
|
|
|
1085
1386
|
)
|
|
1086
1387
|
else:
|
|
1087
1388
|
self._context_utility = ContextUtility()
|
|
1389
|
+
context_util = self._context_utility
|
|
1088
1390
|
|
|
1089
1391
|
# Get conversation directly from agent's memory
|
|
1090
1392
|
messages, _ = self.memory.get_context()
|
|
@@ -1101,7 +1403,58 @@ class ChatAgent(BaseAgent):
|
|
|
1101
1403
|
for message in messages:
|
|
1102
1404
|
role = message.get('role', 'unknown')
|
|
1103
1405
|
content = message.get('content', '')
|
|
1104
|
-
|
|
1406
|
+
|
|
1407
|
+
# Handle tool call messages (assistant calling tools)
|
|
1408
|
+
tool_calls = message.get('tool_calls')
|
|
1409
|
+
if tool_calls and isinstance(tool_calls, (list, tuple)):
|
|
1410
|
+
for tool_call in tool_calls:
|
|
1411
|
+
# Handle both dict and object formats
|
|
1412
|
+
if isinstance(tool_call, dict):
|
|
1413
|
+
func_name = tool_call.get('function', {}).get(
|
|
1414
|
+
'name', 'unknown_tool'
|
|
1415
|
+
)
|
|
1416
|
+
func_args_str = tool_call.get('function', {}).get(
|
|
1417
|
+
'arguments', '{}'
|
|
1418
|
+
)
|
|
1419
|
+
else:
|
|
1420
|
+
# Handle object format (Pydantic or similar)
|
|
1421
|
+
func_name = getattr(
|
|
1422
|
+
getattr(tool_call, 'function', None),
|
|
1423
|
+
'name',
|
|
1424
|
+
'unknown_tool',
|
|
1425
|
+
)
|
|
1426
|
+
func_args_str = getattr(
|
|
1427
|
+
getattr(tool_call, 'function', None),
|
|
1428
|
+
'arguments',
|
|
1429
|
+
'{}',
|
|
1430
|
+
)
|
|
1431
|
+
|
|
1432
|
+
# Parse and format arguments for readability
|
|
1433
|
+
try:
|
|
1434
|
+
import json
|
|
1435
|
+
|
|
1436
|
+
args_dict = json.loads(func_args_str)
|
|
1437
|
+
args_formatted = ', '.join(
|
|
1438
|
+
f"{k}={v}" for k, v in args_dict.items()
|
|
1439
|
+
)
|
|
1440
|
+
except (json.JSONDecodeError, ValueError, TypeError):
|
|
1441
|
+
args_formatted = func_args_str
|
|
1442
|
+
|
|
1443
|
+
conversation_lines.append(
|
|
1444
|
+
f"[TOOL CALL] {func_name}({args_formatted})"
|
|
1445
|
+
)
|
|
1446
|
+
|
|
1447
|
+
# Handle tool response messages
|
|
1448
|
+
elif role == 'tool':
|
|
1449
|
+
tool_name = message.get('name', 'unknown_tool')
|
|
1450
|
+
if not content:
|
|
1451
|
+
content = str(message.get('content', ''))
|
|
1452
|
+
conversation_lines.append(
|
|
1453
|
+
f"[TOOL RESULT] {tool_name} → {content}"
|
|
1454
|
+
)
|
|
1455
|
+
|
|
1456
|
+
# Handle regular content messages (user/assistant/system)
|
|
1457
|
+
elif content:
|
|
1105
1458
|
conversation_lines.append(f"{role}: {content}")
|
|
1106
1459
|
|
|
1107
1460
|
conversation_text = "\n".join(conversation_lines).strip()
|
|
@@ -1117,7 +1470,7 @@ class ChatAgent(BaseAgent):
|
|
|
1117
1470
|
self._context_summary_agent = ChatAgent(
|
|
1118
1471
|
system_message=(
|
|
1119
1472
|
"You are a helpful assistant that summarizes "
|
|
1120
|
-
"conversations
|
|
1473
|
+
"conversations"
|
|
1121
1474
|
),
|
|
1122
1475
|
model=self.model_backend,
|
|
1123
1476
|
agent_id=f"{self.agent_id}_context_summarizer",
|
|
@@ -1128,7 +1481,8 @@ class ChatAgent(BaseAgent):
|
|
|
1128
1481
|
if summary_prompt:
|
|
1129
1482
|
prompt_text = (
|
|
1130
1483
|
f"{summary_prompt.rstrip()}\n\n"
|
|
1131
|
-
f"
|
|
1484
|
+
f"AGENT CONVERSATION TO BE SUMMARIZED:\n"
|
|
1485
|
+
f"{conversation_text}"
|
|
1132
1486
|
)
|
|
1133
1487
|
else:
|
|
1134
1488
|
prompt_text = (
|
|
@@ -1138,7 +1492,13 @@ class ChatAgent(BaseAgent):
|
|
|
1138
1492
|
)
|
|
1139
1493
|
|
|
1140
1494
|
try:
|
|
1141
|
-
|
|
1495
|
+
# Use structured output if response_format is provided
|
|
1496
|
+
if response_format:
|
|
1497
|
+
response = self._context_summary_agent.step(
|
|
1498
|
+
prompt_text, response_format=response_format
|
|
1499
|
+
)
|
|
1500
|
+
else:
|
|
1501
|
+
response = self._context_summary_agent.step(prompt_text)
|
|
1142
1502
|
except Exception as step_exc:
|
|
1143
1503
|
error_message = (
|
|
1144
1504
|
f"Failed to generate summary using model: {step_exc}"
|
|
@@ -1167,7 +1527,7 @@ class ChatAgent(BaseAgent):
|
|
|
1167
1527
|
)
|
|
1168
1528
|
base_filename = Path(base_filename).with_suffix("").name
|
|
1169
1529
|
|
|
1170
|
-
metadata =
|
|
1530
|
+
metadata = context_util.get_session_metadata()
|
|
1171
1531
|
metadata.update(
|
|
1172
1532
|
{
|
|
1173
1533
|
"agent_id": self.agent_id,
|
|
@@ -1175,25 +1535,38 @@ class ChatAgent(BaseAgent):
|
|
|
1175
1535
|
}
|
|
1176
1536
|
)
|
|
1177
1537
|
|
|
1178
|
-
|
|
1538
|
+
# Handle structured output if response_format was provided
|
|
1539
|
+
structured_output = None
|
|
1540
|
+
if response_format and response.msgs[-1].parsed:
|
|
1541
|
+
structured_output = response.msgs[-1].parsed
|
|
1542
|
+
# Convert structured output to custom markdown
|
|
1543
|
+
summary_content = context_util.structured_output_to_markdown(
|
|
1544
|
+
structured_data=structured_output, metadata=metadata
|
|
1545
|
+
)
|
|
1546
|
+
|
|
1547
|
+
# Save the markdown (either custom structured or default)
|
|
1548
|
+
save_status = context_util.save_markdown_file(
|
|
1179
1549
|
base_filename,
|
|
1180
1550
|
summary_content,
|
|
1181
|
-
title="Conversation Summary"
|
|
1182
|
-
|
|
1551
|
+
title="Conversation Summary"
|
|
1552
|
+
if not structured_output
|
|
1553
|
+
else None,
|
|
1554
|
+
metadata=metadata if not structured_output else None,
|
|
1183
1555
|
)
|
|
1184
1556
|
|
|
1185
1557
|
file_path = (
|
|
1186
|
-
|
|
1187
|
-
/ f"{base_filename}.md"
|
|
1558
|
+
context_util.get_working_directory() / f"{base_filename}.md"
|
|
1188
1559
|
)
|
|
1189
1560
|
|
|
1190
|
-
result
|
|
1191
|
-
|
|
1192
|
-
|
|
1193
|
-
|
|
1194
|
-
|
|
1195
|
-
|
|
1196
|
-
|
|
1561
|
+
# Prepare result dictionary
|
|
1562
|
+
result_dict = {
|
|
1563
|
+
"summary": summary_content,
|
|
1564
|
+
"file_path": str(file_path),
|
|
1565
|
+
"status": save_status,
|
|
1566
|
+
"structured_summary": structured_output,
|
|
1567
|
+
}
|
|
1568
|
+
|
|
1569
|
+
result.update(result_dict)
|
|
1197
1570
|
logger.info("Conversation summary saved to %s", file_path)
|
|
1198
1571
|
return result
|
|
1199
1572
|
|
|
@@ -1210,6 +1583,8 @@ class ChatAgent(BaseAgent):
|
|
|
1210
1583
|
None
|
|
1211
1584
|
"""
|
|
1212
1585
|
self.memory.clear()
|
|
1586
|
+
if self._tool_output_cache_enabled:
|
|
1587
|
+
self._tool_output_history.clear()
|
|
1213
1588
|
|
|
1214
1589
|
if self.system_message is not None:
|
|
1215
1590
|
self.update_memory(self.system_message, OpenAIBackendRole.SYSTEM)
|
|
@@ -1246,8 +1621,6 @@ class ChatAgent(BaseAgent):
|
|
|
1246
1621
|
r"""Initializes the stored messages list with the current system
|
|
1247
1622
|
message.
|
|
1248
1623
|
"""
|
|
1249
|
-
import time
|
|
1250
|
-
|
|
1251
1624
|
self.memory.clear()
|
|
1252
1625
|
# avoid UserWarning: The `ChatHistoryMemory` is empty.
|
|
1253
1626
|
if self.system_message is not None:
|
|
@@ -1260,6 +1633,17 @@ class ChatAgent(BaseAgent):
|
|
|
1260
1633
|
)
|
|
1261
1634
|
)
|
|
1262
1635
|
|
|
1636
|
+
def reset_to_original_system_message(self) -> None:
|
|
1637
|
+
r"""Reset system message to original, removing any appended context.
|
|
1638
|
+
|
|
1639
|
+
This method reverts the agent's system message back to its original
|
|
1640
|
+
state, removing any workflow context or other modifications that may
|
|
1641
|
+
have been appended. Useful for resetting agent state in multi-turn
|
|
1642
|
+
scenarios.
|
|
1643
|
+
"""
|
|
1644
|
+
self._system_message = self._original_system_message
|
|
1645
|
+
self.init_messages()
|
|
1646
|
+
|
|
1263
1647
|
def record_message(self, message: BaseMessage) -> None:
|
|
1264
1648
|
r"""Records the externally provided message into the agent memory as if
|
|
1265
1649
|
it were an answer of the :obj:`ChatAgent` from the backend. Currently,
|
|
@@ -1321,7 +1705,7 @@ class ChatAgent(BaseAgent):
|
|
|
1321
1705
|
|
|
1322
1706
|
# Create a prompt based on the schema
|
|
1323
1707
|
format_instruction = (
|
|
1324
|
-
"\n\nPlease respond in the following JSON format:\n
|
|
1708
|
+
"\n\nPlease respond in the following JSON format:\n{\n"
|
|
1325
1709
|
)
|
|
1326
1710
|
|
|
1327
1711
|
properties = schema.get("properties", {})
|
|
@@ -1420,8 +1804,6 @@ class ChatAgent(BaseAgent):
|
|
|
1420
1804
|
Returns:
|
|
1421
1805
|
bool: True if called from a RegisteredAgentToolkit, False otherwise
|
|
1422
1806
|
"""
|
|
1423
|
-
import inspect
|
|
1424
|
-
|
|
1425
1807
|
from camel.toolkits.base import RegisteredAgentToolkit
|
|
1426
1808
|
|
|
1427
1809
|
try:
|
|
@@ -1453,7 +1835,6 @@ class ChatAgent(BaseAgent):
|
|
|
1453
1835
|
try:
|
|
1454
1836
|
# Try to extract JSON from the response content
|
|
1455
1837
|
import json
|
|
1456
|
-
import re
|
|
1457
1838
|
|
|
1458
1839
|
from pydantic import ValidationError
|
|
1459
1840
|
|
|
@@ -1492,8 +1873,7 @@ class ChatAgent(BaseAgent):
|
|
|
1492
1873
|
|
|
1493
1874
|
if not message.parsed:
|
|
1494
1875
|
logger.warning(
|
|
1495
|
-
f"Failed to parse JSON from response: "
|
|
1496
|
-
f"{content}"
|
|
1876
|
+
f"Failed to parse JSON from response: {content}"
|
|
1497
1877
|
)
|
|
1498
1878
|
|
|
1499
1879
|
except Exception as e:
|
|
@@ -1665,8 +2045,13 @@ class ChatAgent(BaseAgent):
|
|
|
1665
2045
|
|
|
1666
2046
|
while True:
|
|
1667
2047
|
if self.pause_event is not None and not self.pause_event.is_set():
|
|
1668
|
-
|
|
1669
|
-
|
|
2048
|
+
# Use efficient blocking wait for threading.Event
|
|
2049
|
+
if isinstance(self.pause_event, threading.Event):
|
|
2050
|
+
self.pause_event.wait()
|
|
2051
|
+
else:
|
|
2052
|
+
# Fallback for asyncio.Event in sync context
|
|
2053
|
+
while not self.pause_event.is_set():
|
|
2054
|
+
time.sleep(0.001)
|
|
1670
2055
|
|
|
1671
2056
|
try:
|
|
1672
2057
|
openai_messages, num_tokens = self.memory.get_context()
|
|
@@ -1698,7 +2083,7 @@ class ChatAgent(BaseAgent):
|
|
|
1698
2083
|
if self.stop_event and self.stop_event.is_set():
|
|
1699
2084
|
# Use the _step_terminate to terminate the agent with reason
|
|
1700
2085
|
logger.info(
|
|
1701
|
-
f"Termination triggered at iteration
|
|
2086
|
+
f"Termination triggered at iteration {iteration_count}"
|
|
1702
2087
|
)
|
|
1703
2088
|
return self._step_terminate(
|
|
1704
2089
|
accumulated_context_tokens,
|
|
@@ -1721,8 +2106,11 @@ class ChatAgent(BaseAgent):
|
|
|
1721
2106
|
self.pause_event is not None
|
|
1722
2107
|
and not self.pause_event.is_set()
|
|
1723
2108
|
):
|
|
1724
|
-
|
|
1725
|
-
|
|
2109
|
+
if isinstance(self.pause_event, threading.Event):
|
|
2110
|
+
self.pause_event.wait()
|
|
2111
|
+
else:
|
|
2112
|
+
while not self.pause_event.is_set():
|
|
2113
|
+
time.sleep(0.001)
|
|
1726
2114
|
result = self._execute_tool(tool_call_request)
|
|
1727
2115
|
tool_call_records.append(result)
|
|
1728
2116
|
|
|
@@ -1879,7 +2267,12 @@ class ChatAgent(BaseAgent):
|
|
|
1879
2267
|
prev_num_openai_messages: int = 0
|
|
1880
2268
|
while True:
|
|
1881
2269
|
if self.pause_event is not None and not self.pause_event.is_set():
|
|
1882
|
-
|
|
2270
|
+
if isinstance(self.pause_event, asyncio.Event):
|
|
2271
|
+
await self.pause_event.wait()
|
|
2272
|
+
elif isinstance(self.pause_event, threading.Event):
|
|
2273
|
+
# For threading.Event in async context, run in executor
|
|
2274
|
+
loop = asyncio.get_event_loop()
|
|
2275
|
+
await loop.run_in_executor(None, self.pause_event.wait)
|
|
1883
2276
|
try:
|
|
1884
2277
|
openai_messages, num_tokens = self.memory.get_context()
|
|
1885
2278
|
accumulated_context_tokens += num_tokens
|
|
@@ -1909,7 +2302,7 @@ class ChatAgent(BaseAgent):
|
|
|
1909
2302
|
if self.stop_event and self.stop_event.is_set():
|
|
1910
2303
|
# Use the _step_terminate to terminate the agent with reason
|
|
1911
2304
|
logger.info(
|
|
1912
|
-
f"Termination triggered at iteration
|
|
2305
|
+
f"Termination triggered at iteration {iteration_count}"
|
|
1913
2306
|
)
|
|
1914
2307
|
return self._step_terminate(
|
|
1915
2308
|
accumulated_context_tokens,
|
|
@@ -1932,7 +2325,13 @@ class ChatAgent(BaseAgent):
|
|
|
1932
2325
|
self.pause_event is not None
|
|
1933
2326
|
and not self.pause_event.is_set()
|
|
1934
2327
|
):
|
|
1935
|
-
|
|
2328
|
+
if isinstance(self.pause_event, asyncio.Event):
|
|
2329
|
+
await self.pause_event.wait()
|
|
2330
|
+
elif isinstance(self.pause_event, threading.Event):
|
|
2331
|
+
loop = asyncio.get_event_loop()
|
|
2332
|
+
await loop.run_in_executor(
|
|
2333
|
+
None, self.pause_event.wait
|
|
2334
|
+
)
|
|
1936
2335
|
tool_call_record = await self._aexecute_tool(
|
|
1937
2336
|
tool_call_request
|
|
1938
2337
|
)
|
|
@@ -2185,11 +2584,6 @@ class ChatAgent(BaseAgent):
|
|
|
2185
2584
|
Returns:
|
|
2186
2585
|
List[OpenAIMessage]: The sanitized OpenAI messages.
|
|
2187
2586
|
"""
|
|
2188
|
-
import hashlib
|
|
2189
|
-
import os
|
|
2190
|
-
import re
|
|
2191
|
-
import tempfile
|
|
2192
|
-
|
|
2193
2587
|
# Create a copy of messages for logging to avoid modifying the
|
|
2194
2588
|
# original messages
|
|
2195
2589
|
sanitized_messages = []
|
|
@@ -2230,7 +2624,14 @@ class ChatAgent(BaseAgent):
|
|
|
2230
2624
|
|
|
2231
2625
|
# Save image to temp directory for viewing
|
|
2232
2626
|
try:
|
|
2233
|
-
|
|
2627
|
+
# Sanitize img_format to prevent path
|
|
2628
|
+
# traversal
|
|
2629
|
+
safe_format = re.sub(
|
|
2630
|
+
r'[^a-zA-Z0-9]', '', img_format
|
|
2631
|
+
)[:10]
|
|
2632
|
+
img_filename = (
|
|
2633
|
+
f"image_{img_hash}.{safe_format}"
|
|
2634
|
+
)
|
|
2234
2635
|
|
|
2235
2636
|
temp_dir = tempfile.gettempdir()
|
|
2236
2637
|
img_path = os.path.join(
|
|
@@ -2245,6 +2646,9 @@ class ChatAgent(BaseAgent):
|
|
|
2245
2646
|
base64_data
|
|
2246
2647
|
)
|
|
2247
2648
|
)
|
|
2649
|
+
# Register for cleanup
|
|
2650
|
+
with _temp_files_lock:
|
|
2651
|
+
_temp_files.add(img_path)
|
|
2248
2652
|
|
|
2249
2653
|
# Create a file:// URL that can be
|
|
2250
2654
|
# opened
|
|
@@ -2497,7 +2901,8 @@ class ChatAgent(BaseAgent):
|
|
|
2497
2901
|
try:
|
|
2498
2902
|
raw_result = tool(**args)
|
|
2499
2903
|
if self.mask_tool_output:
|
|
2500
|
-
self.
|
|
2904
|
+
with self._secure_result_store_lock:
|
|
2905
|
+
self._secure_result_store[tool_call_id] = raw_result
|
|
2501
2906
|
result = (
|
|
2502
2907
|
"[The tool has been executed successfully, but the output"
|
|
2503
2908
|
" from the tool is masked. You can move forward]"
|
|
@@ -2555,7 +2960,7 @@ class ChatAgent(BaseAgent):
|
|
|
2555
2960
|
# Capture the error message to prevent framework crash
|
|
2556
2961
|
error_msg = f"Error executing async tool '{func_name}': {e!s}"
|
|
2557
2962
|
result = f"Tool execution failed: {error_msg}"
|
|
2558
|
-
|
|
2963
|
+
logger.warning(error_msg)
|
|
2559
2964
|
return self._record_tool_calling(func_name, args, result, tool_call_id)
|
|
2560
2965
|
|
|
2561
2966
|
def _record_tool_calling(
|
|
@@ -2606,20 +3011,22 @@ class ChatAgent(BaseAgent):
|
|
|
2606
3011
|
# This ensures the assistant message (tool call) always appears before
|
|
2607
3012
|
# the function message (tool result) in the conversation context
|
|
2608
3013
|
# Use time.time_ns() for nanosecond precision to avoid collisions
|
|
2609
|
-
import time
|
|
2610
|
-
|
|
2611
3014
|
current_time_ns = time.time_ns()
|
|
2612
3015
|
base_timestamp = current_time_ns / 1_000_000_000 # Convert to seconds
|
|
2613
3016
|
|
|
2614
3017
|
self.update_memory(
|
|
2615
|
-
assist_msg,
|
|
3018
|
+
assist_msg,
|
|
3019
|
+
OpenAIBackendRole.ASSISTANT,
|
|
3020
|
+
timestamp=base_timestamp,
|
|
3021
|
+
return_records=self._tool_output_cache_enabled,
|
|
2616
3022
|
)
|
|
2617
3023
|
|
|
2618
3024
|
# Add minimal increment to ensure function message comes after
|
|
2619
|
-
self.update_memory(
|
|
3025
|
+
func_records = self.update_memory(
|
|
2620
3026
|
func_msg,
|
|
2621
3027
|
OpenAIBackendRole.FUNCTION,
|
|
2622
3028
|
timestamp=base_timestamp + 1e-6,
|
|
3029
|
+
return_records=self._tool_output_cache_enabled,
|
|
2623
3030
|
)
|
|
2624
3031
|
|
|
2625
3032
|
# Record information about this tool call
|
|
@@ -2630,6 +3037,20 @@ class ChatAgent(BaseAgent):
|
|
|
2630
3037
|
tool_call_id=tool_call_id,
|
|
2631
3038
|
)
|
|
2632
3039
|
|
|
3040
|
+
if (
|
|
3041
|
+
self._tool_output_cache_enabled
|
|
3042
|
+
and not mask_output
|
|
3043
|
+
and func_records
|
|
3044
|
+
and self._tool_output_cache_manager is not None
|
|
3045
|
+
):
|
|
3046
|
+
serialized_result = self._serialize_tool_result(result)
|
|
3047
|
+
self._register_tool_output_for_cache(
|
|
3048
|
+
func_name,
|
|
3049
|
+
tool_call_id,
|
|
3050
|
+
serialized_result,
|
|
3051
|
+
cast(List[MemoryRecord], func_records),
|
|
3052
|
+
)
|
|
3053
|
+
|
|
2633
3054
|
return tool_record
|
|
2634
3055
|
|
|
2635
3056
|
def _stream(
|
|
@@ -2698,7 +3119,7 @@ class ChatAgent(BaseAgent):
|
|
|
2698
3119
|
# Check termination condition
|
|
2699
3120
|
if self.stop_event and self.stop_event.is_set():
|
|
2700
3121
|
logger.info(
|
|
2701
|
-
f"Termination triggered at iteration
|
|
3122
|
+
f"Termination triggered at iteration {iteration_count}"
|
|
2702
3123
|
)
|
|
2703
3124
|
yield self._step_terminate(
|
|
2704
3125
|
num_tokens, tool_call_records, "termination_triggered"
|
|
@@ -3067,72 +3488,70 @@ class ChatAgent(BaseAgent):
|
|
|
3067
3488
|
accumulated_tool_calls: Dict[str, Any],
|
|
3068
3489
|
tool_call_records: List[ToolCallingRecord],
|
|
3069
3490
|
) -> Generator[ChatAgentResponse, None, None]:
|
|
3070
|
-
r"""Execute multiple tools synchronously with
|
|
3071
|
-
|
|
3072
|
-
non-blocking status streaming."""
|
|
3073
|
-
|
|
3074
|
-
def tool_worker(result_queue, tool_call_data):
|
|
3075
|
-
try:
|
|
3076
|
-
tool_call_record = self._execute_tool_from_stream_data(
|
|
3077
|
-
tool_call_data
|
|
3078
|
-
)
|
|
3079
|
-
result_queue.put(tool_call_record)
|
|
3080
|
-
except Exception as e:
|
|
3081
|
-
logger.error(f"Error in threaded tool execution: {e}")
|
|
3082
|
-
result_queue.put(None)
|
|
3491
|
+
r"""Execute multiple tools synchronously with proper content
|
|
3492
|
+
accumulation, using ThreadPoolExecutor for better timeout handling."""
|
|
3083
3493
|
|
|
3084
3494
|
tool_calls_to_execute = []
|
|
3085
3495
|
for _tool_call_index, tool_call_data in accumulated_tool_calls.items():
|
|
3086
3496
|
if tool_call_data.get('complete', False):
|
|
3087
3497
|
tool_calls_to_execute.append(tool_call_data)
|
|
3088
3498
|
|
|
3089
|
-
|
|
3090
|
-
|
|
3091
|
-
|
|
3092
|
-
|
|
3093
|
-
|
|
3094
|
-
|
|
3095
|
-
|
|
3096
|
-
|
|
3097
|
-
|
|
3098
|
-
|
|
3099
|
-
|
|
3100
|
-
|
|
3101
|
-
|
|
3102
|
-
|
|
3103
|
-
|
|
3104
|
-
|
|
3105
|
-
|
|
3106
|
-
|
|
3107
|
-
f"Calling function: {function_name} with arguments: {args}"
|
|
3108
|
-
)
|
|
3109
|
-
|
|
3110
|
-
# wait for tool thread to finish with optional timeout
|
|
3111
|
-
thread.join(self.tool_execution_timeout)
|
|
3499
|
+
if not tool_calls_to_execute:
|
|
3500
|
+
# No tools to execute, return immediately
|
|
3501
|
+
return
|
|
3502
|
+
yield # Make this a generator
|
|
3503
|
+
|
|
3504
|
+
# Execute tools using ThreadPoolExecutor for proper timeout handling
|
|
3505
|
+
# Use max_workers=len() for parallel execution, with min of 1
|
|
3506
|
+
with concurrent.futures.ThreadPoolExecutor(
|
|
3507
|
+
max_workers=max(1, len(tool_calls_to_execute))
|
|
3508
|
+
) as executor:
|
|
3509
|
+
# Submit all tools first (parallel execution)
|
|
3510
|
+
futures_map = {}
|
|
3511
|
+
for tool_call_data in tool_calls_to_execute:
|
|
3512
|
+
function_name = tool_call_data['function']['name']
|
|
3513
|
+
try:
|
|
3514
|
+
args = json.loads(tool_call_data['function']['arguments'])
|
|
3515
|
+
except json.JSONDecodeError:
|
|
3516
|
+
args = tool_call_data['function']['arguments']
|
|
3112
3517
|
|
|
3113
|
-
|
|
3114
|
-
|
|
3115
|
-
|
|
3116
|
-
logger.warning(
|
|
3117
|
-
f"Function '{function_name}' timed out after "
|
|
3118
|
-
f"{self.tool_execution_timeout} seconds"
|
|
3518
|
+
# Log debug info
|
|
3519
|
+
logger.info(
|
|
3520
|
+
f"Calling function: {function_name} with arguments: {args}"
|
|
3119
3521
|
)
|
|
3120
3522
|
|
|
3121
|
-
#
|
|
3122
|
-
|
|
3123
|
-
|
|
3124
|
-
|
|
3125
|
-
|
|
3126
|
-
|
|
3127
|
-
|
|
3128
|
-
|
|
3129
|
-
|
|
3523
|
+
# Submit tool execution (non-blocking)
|
|
3524
|
+
future = executor.submit(
|
|
3525
|
+
self._execute_tool_from_stream_data, tool_call_data
|
|
3526
|
+
)
|
|
3527
|
+
futures_map[future] = (function_name, tool_call_data)
|
|
3528
|
+
|
|
3529
|
+
# Wait for all futures to complete (or timeout)
|
|
3530
|
+
for future in concurrent.futures.as_completed(
|
|
3531
|
+
futures_map.keys(),
|
|
3532
|
+
timeout=self.tool_execution_timeout
|
|
3533
|
+
if self.tool_execution_timeout
|
|
3534
|
+
else None,
|
|
3535
|
+
):
|
|
3536
|
+
function_name, tool_call_data = futures_map[future]
|
|
3130
3537
|
|
|
3131
|
-
|
|
3132
|
-
|
|
3133
|
-
|
|
3134
|
-
|
|
3135
|
-
|
|
3538
|
+
try:
|
|
3539
|
+
tool_call_record = future.result()
|
|
3540
|
+
if tool_call_record:
|
|
3541
|
+
tool_call_records.append(tool_call_record)
|
|
3542
|
+
logger.info(
|
|
3543
|
+
f"Function output: {tool_call_record.result}"
|
|
3544
|
+
)
|
|
3545
|
+
except concurrent.futures.TimeoutError:
|
|
3546
|
+
logger.warning(
|
|
3547
|
+
f"Function '{function_name}' timed out after "
|
|
3548
|
+
f"{self.tool_execution_timeout} seconds"
|
|
3549
|
+
)
|
|
3550
|
+
future.cancel()
|
|
3551
|
+
except Exception as e:
|
|
3552
|
+
logger.error(
|
|
3553
|
+
f"Error executing tool '{function_name}': {e}"
|
|
3554
|
+
)
|
|
3136
3555
|
|
|
3137
3556
|
# Ensure this function remains a generator (required by type signature)
|
|
3138
3557
|
return
|
|
@@ -3177,8 +3596,6 @@ class ChatAgent(BaseAgent):
|
|
|
3177
3596
|
|
|
3178
3597
|
# Record both messages with precise timestamps to ensure
|
|
3179
3598
|
# correct ordering
|
|
3180
|
-
import time
|
|
3181
|
-
|
|
3182
3599
|
current_time_ns = time.time_ns()
|
|
3183
3600
|
base_timestamp = (
|
|
3184
3601
|
current_time_ns / 1_000_000_000
|
|
@@ -3207,7 +3624,7 @@ class ChatAgent(BaseAgent):
|
|
|
3207
3624
|
f"Error executing tool '{function_name}': {e!s}"
|
|
3208
3625
|
)
|
|
3209
3626
|
result = {"error": error_msg}
|
|
3210
|
-
|
|
3627
|
+
logger.warning(error_msg)
|
|
3211
3628
|
|
|
3212
3629
|
# Record error response
|
|
3213
3630
|
func_msg = FunctionCallingMessage(
|
|
@@ -3302,8 +3719,6 @@ class ChatAgent(BaseAgent):
|
|
|
3302
3719
|
|
|
3303
3720
|
# Record both messages with precise timestamps to ensure
|
|
3304
3721
|
# correct ordering
|
|
3305
|
-
import time
|
|
3306
|
-
|
|
3307
3722
|
current_time_ns = time.time_ns()
|
|
3308
3723
|
base_timestamp = (
|
|
3309
3724
|
current_time_ns / 1_000_000_000
|
|
@@ -3332,7 +3747,7 @@ class ChatAgent(BaseAgent):
|
|
|
3332
3747
|
f"Error executing async tool '{function_name}': {e!s}"
|
|
3333
3748
|
)
|
|
3334
3749
|
result = {"error": error_msg}
|
|
3335
|
-
|
|
3750
|
+
logger.warning(error_msg)
|
|
3336
3751
|
|
|
3337
3752
|
# Record error response
|
|
3338
3753
|
func_msg = FunctionCallingMessage(
|
|
@@ -3442,7 +3857,7 @@ class ChatAgent(BaseAgent):
|
|
|
3442
3857
|
# Check termination condition
|
|
3443
3858
|
if self.stop_event and self.stop_event.is_set():
|
|
3444
3859
|
logger.info(
|
|
3445
|
-
f"Termination triggered at iteration
|
|
3860
|
+
f"Termination triggered at iteration {iteration_count}"
|
|
3446
3861
|
)
|
|
3447
3862
|
yield self._step_terminate(
|
|
3448
3863
|
num_tokens, tool_call_records, "termination_triggered"
|
|
@@ -3954,10 +4369,12 @@ class ChatAgent(BaseAgent):
|
|
|
3954
4369
|
configuration.
|
|
3955
4370
|
"""
|
|
3956
4371
|
# Create a new instance with the same configuration
|
|
3957
|
-
# If with_memory is True, set system_message to None
|
|
3958
|
-
#
|
|
4372
|
+
# If with_memory is True, set system_message to None (it will be
|
|
4373
|
+
# copied from memory below, including any workflow context)
|
|
4374
|
+
# If with_memory is False, use the current system message
|
|
4375
|
+
# (which may include appended workflow context)
|
|
3959
4376
|
# To avoid duplicated system memory.
|
|
3960
|
-
system_message = None if with_memory else self.
|
|
4377
|
+
system_message = None if with_memory else self._system_message
|
|
3961
4378
|
|
|
3962
4379
|
# Clone tools and collect toolkits that need registration
|
|
3963
4380
|
cloned_tools, toolkits_to_register = self._clone_tools()
|
|
@@ -3985,6 +4402,9 @@ class ChatAgent(BaseAgent):
|
|
|
3985
4402
|
tool_execution_timeout=self.tool_execution_timeout,
|
|
3986
4403
|
pause_event=self.pause_event,
|
|
3987
4404
|
prune_tool_calls_from_memory=self.prune_tool_calls_from_memory,
|
|
4405
|
+
enable_tool_output_cache=self._tool_output_cache_enabled,
|
|
4406
|
+
tool_output_cache_threshold=self._tool_output_cache_threshold,
|
|
4407
|
+
tool_output_cache_dir=self._tool_output_cache_dir,
|
|
3988
4408
|
stream_accumulate=self.stream_accumulate,
|
|
3989
4409
|
)
|
|
3990
4410
|
|