camel-ai 0.2.76a14__py3-none-any.whl → 0.2.78__py3-none-any.whl

This diff represents the content of publicly available package versions that have been released to one of the supported registries. The information contained in this diff is provided for informational purposes only and reflects changes between package versions as they appear in their respective public registries.

Potentially problematic release.


This version of camel-ai might be problematic. Click here for more details.

@@ -14,15 +14,22 @@
14
14
  from __future__ import annotations
15
15
 
16
16
  import asyncio
17
+ import atexit
18
+ import base64
17
19
  import concurrent.futures
20
+ import hashlib
21
+ import inspect
18
22
  import json
19
- import logging
20
- import queue
23
+ import math
24
+ import os
21
25
  import random
26
+ import re
27
+ import tempfile
22
28
  import textwrap
23
29
  import threading
24
30
  import time
25
31
  import uuid
32
+ import warnings
26
33
  from datetime import datetime
27
34
  from pathlib import Path
28
35
  from typing import (
@@ -101,10 +108,24 @@ if TYPE_CHECKING:
101
108
 
102
109
  logger = get_logger(__name__)
103
110
 
111
+ # Cleanup temp files on exit
112
+ _temp_files: Set[str] = set()
113
+ _temp_files_lock = threading.Lock()
114
+
115
+
116
+ def _cleanup_temp_files():
117
+ with _temp_files_lock:
118
+ for path in _temp_files:
119
+ try:
120
+ os.unlink(path)
121
+ except Exception:
122
+ pass
123
+
124
+
125
+ atexit.register(_cleanup_temp_files)
126
+
104
127
  # AgentOps decorator setting
105
128
  try:
106
- import os
107
-
108
129
  if os.getenv("AGENTOPS_API_KEY") is not None:
109
130
  from agentops import track_agent
110
131
  else:
@@ -194,13 +215,10 @@ class StreamingChatAgentResponse:
194
215
  def _ensure_latest_response(self):
195
216
  r"""Ensure we have the latest response by consuming the generator."""
196
217
  if not self._consumed:
197
- try:
198
- for response in self._generator:
199
- self._responses.append(response)
200
- self._current_response = response
201
- self._consumed = True
202
- except StopIteration:
203
- self._consumed = True
218
+ for response in self._generator:
219
+ self._responses.append(response)
220
+ self._current_response = response
221
+ self._consumed = True
204
222
 
205
223
  @property
206
224
  def msgs(self) -> List[BaseMessage]:
@@ -241,14 +259,11 @@ class StreamingChatAgentResponse:
241
259
  yield from self._responses
242
260
  else:
243
261
  # If not consumed, consume and yield
244
- try:
245
- for response in self._generator:
246
- self._responses.append(response)
247
- self._current_response = response
248
- yield response
249
- self._consumed = True
250
- except StopIteration:
251
- self._consumed = True
262
+ for response in self._generator:
263
+ self._responses.append(response)
264
+ self._current_response = response
265
+ yield response
266
+ self._consumed = True
252
267
 
253
268
  def __getattr__(self, name):
254
269
  r"""Forward any other attribute access to the latest response."""
@@ -279,13 +294,10 @@ class AsyncStreamingChatAgentResponse:
279
294
  async def _ensure_latest_response(self):
280
295
  r"""Ensure the latest response by consuming the async generator."""
281
296
  if not self._consumed:
282
- try:
283
- async for response in self._async_generator:
284
- self._responses.append(response)
285
- self._current_response = response
286
- self._consumed = True
287
- except StopAsyncIteration:
288
- self._consumed = True
297
+ async for response in self._async_generator:
298
+ self._responses.append(response)
299
+ self._current_response = response
300
+ self._consumed = True
289
301
 
290
302
  async def _get_final_response(self) -> ChatAgentResponse:
291
303
  r"""Get the final response after consuming the entire stream."""
@@ -311,14 +323,11 @@ class AsyncStreamingChatAgentResponse:
311
323
  else:
312
324
  # If not consumed, consume and yield
313
325
  async def _consume_and_yield():
314
- try:
315
- async for response in self._async_generator:
316
- self._responses.append(response)
317
- self._current_response = response
318
- yield response
319
- self._consumed = True
320
- except StopAsyncIteration:
321
- self._consumed = True
326
+ async for response in self._async_generator:
327
+ self._responses.append(response)
328
+ self._current_response = response
329
+ yield response
330
+ self._consumed = True
322
331
 
323
332
  return _consume_and_yield()
324
333
 
@@ -386,9 +395,10 @@ class ChatAgent(BaseAgent):
386
395
  for individual tool execution. If None, wait indefinitely.
387
396
  mask_tool_output (Optional[bool]): Whether to return a sanitized
388
397
  placeholder instead of the raw tool output. (default: :obj:`False`)
389
- pause_event (Optional[asyncio.Event]): Event to signal pause of the
390
- agent's operation. When clear, the agent will pause its execution.
391
- (default: :obj:`None`)
398
+ pause_event (Optional[Union[threading.Event, asyncio.Event]]): Event to
399
+ signal pause of the agent's operation. When clear, the agent will
400
+ pause its execution. Use threading.Event for sync operations or
401
+ asyncio.Event for async operations. (default: :obj:`None`)
392
402
  prune_tool_calls_from_memory (bool): Whether to clean tool
393
403
  call messages from memory after response generation to save token
394
404
  usage. When enabled, removes FUNCTION/TOOL role messages and
@@ -443,7 +453,7 @@ class ChatAgent(BaseAgent):
443
453
  stop_event: Optional[threading.Event] = None,
444
454
  tool_execution_timeout: Optional[float] = None,
445
455
  mask_tool_output: bool = False,
446
- pause_event: Optional[asyncio.Event] = None,
456
+ pause_event: Optional[Union[threading.Event, asyncio.Event]] = None,
447
457
  prune_tool_calls_from_memory: bool = False,
448
458
  retry_attempts: int = 3,
449
459
  retry_delay: float = 1.0,
@@ -532,6 +542,7 @@ class ChatAgent(BaseAgent):
532
542
  self.tool_execution_timeout = tool_execution_timeout
533
543
  self.mask_tool_output = mask_tool_output
534
544
  self._secure_result_store: Dict[str, Any] = {}
545
+ self._secure_result_store_lock = threading.Lock()
535
546
  self.pause_event = pause_event
536
547
  self.prune_tool_calls_from_memory = prune_tool_calls_from_memory
537
548
  self.retry_attempts = max(1, retry_attempts)
@@ -728,6 +739,20 @@ class ChatAgent(BaseAgent):
728
739
  # Ensure the new memory has the system message
729
740
  self.init_messages()
730
741
 
742
+ def set_context_utility(
743
+ self, context_utility: Optional[ContextUtility]
744
+ ) -> None:
745
+ r"""Set the context utility for the agent.
746
+
747
+ This allows external components (like SingleAgentWorker) to provide
748
+ a shared context utility instance for workflow management.
749
+
750
+ Args:
751
+ context_utility (ContextUtility, optional): The context utility
752
+ to use. If None, the agent will create its own when needed.
753
+ """
754
+ self._context_utility = context_utility
755
+
731
756
  def _get_full_tool_schemas(self) -> List[Dict[str, Any]]:
732
757
  r"""Returns a list of tool schemas of all tools, including internal
733
758
  and external tools.
@@ -817,9 +842,6 @@ class ChatAgent(BaseAgent):
817
842
  (default: :obj:`None`)
818
843
  (default: obj:`None`)
819
844
  """
820
- import math
821
- import time
822
- import uuid as _uuid
823
845
 
824
846
  # 1. Helper to write a record to memory
825
847
  def _write_single_record(
@@ -854,7 +876,6 @@ class ChatAgent(BaseAgent):
854
876
  current_tokens = token_counter.count_tokens_from_messages(
855
877
  [message.to_openai_message(role)]
856
878
  )
857
- import warnings
858
879
 
859
880
  with warnings.catch_warnings():
860
881
  warnings.filterwarnings("ignore", category=EmptyMemoryWarning)
@@ -928,7 +949,7 @@ class ChatAgent(BaseAgent):
928
949
 
929
950
  # 4. Calculate how many chunks we will need with this body size.
930
951
  num_chunks = math.ceil(len(all_token_ids) / chunk_body_limit)
931
- group_id = str(_uuid.uuid4())
952
+ group_id = str(uuid.uuid4())
932
953
 
933
954
  for i in range(num_chunks):
934
955
  start_idx = i * chunk_body_limit
@@ -1050,6 +1071,7 @@ class ChatAgent(BaseAgent):
1050
1071
  self,
1051
1072
  filename: Optional[str] = None,
1052
1073
  summary_prompt: Optional[str] = None,
1074
+ response_format: Optional[Type[BaseModel]] = None,
1053
1075
  working_directory: Optional[Union[str, Path]] = None,
1054
1076
  ) -> Dict[str, Any]:
1055
1077
  r"""Summarize the agent's current conversation context and persist it
@@ -1062,13 +1084,18 @@ class ChatAgent(BaseAgent):
1062
1084
  summary_prompt (Optional[str]): Custom prompt for the summarizer.
1063
1085
  When omitted, a default prompt highlighting key decisions,
1064
1086
  action items, and open questions is used.
1087
+ response_format (Optional[Type[BaseModel]]): A Pydantic model
1088
+ defining the expected structure of the response. If provided,
1089
+ the summary will be generated as structured output and included
1090
+ in the result.
1065
1091
  working_directory (Optional[str|Path]): Optional directory to save
1066
1092
  the markdown summary file. If provided, overrides the default
1067
1093
  directory used by ContextUtility.
1068
1094
 
1069
1095
  Returns:
1070
1096
  Dict[str, Any]: A dictionary containing the summary text, file
1071
- path, and status message.
1097
+ path, status message, and optionally structured_summary if
1098
+ response_format was provided.
1072
1099
  """
1073
1100
 
1074
1101
  result: Dict[str, Any] = {
@@ -1078,6 +1105,7 @@ class ChatAgent(BaseAgent):
1078
1105
  }
1079
1106
 
1080
1107
  try:
1108
+ # Use external context if set, otherwise create local one
1081
1109
  if self._context_utility is None:
1082
1110
  if working_directory is not None:
1083
1111
  self._context_utility = ContextUtility(
@@ -1085,6 +1113,7 @@ class ChatAgent(BaseAgent):
1085
1113
  )
1086
1114
  else:
1087
1115
  self._context_utility = ContextUtility()
1116
+ context_util = self._context_utility
1088
1117
 
1089
1118
  # Get conversation directly from agent's memory
1090
1119
  messages, _ = self.memory.get_context()
@@ -1101,7 +1130,58 @@ class ChatAgent(BaseAgent):
1101
1130
  for message in messages:
1102
1131
  role = message.get('role', 'unknown')
1103
1132
  content = message.get('content', '')
1104
- if content:
1133
+
1134
+ # Handle tool call messages (assistant calling tools)
1135
+ tool_calls = message.get('tool_calls')
1136
+ if tool_calls and isinstance(tool_calls, (list, tuple)):
1137
+ for tool_call in tool_calls:
1138
+ # Handle both dict and object formats
1139
+ if isinstance(tool_call, dict):
1140
+ func_name = tool_call.get('function', {}).get(
1141
+ 'name', 'unknown_tool'
1142
+ )
1143
+ func_args_str = tool_call.get('function', {}).get(
1144
+ 'arguments', '{}'
1145
+ )
1146
+ else:
1147
+ # Handle object format (Pydantic or similar)
1148
+ func_name = getattr(
1149
+ getattr(tool_call, 'function', None),
1150
+ 'name',
1151
+ 'unknown_tool',
1152
+ )
1153
+ func_args_str = getattr(
1154
+ getattr(tool_call, 'function', None),
1155
+ 'arguments',
1156
+ '{}',
1157
+ )
1158
+
1159
+ # Parse and format arguments for readability
1160
+ try:
1161
+ import json
1162
+
1163
+ args_dict = json.loads(func_args_str)
1164
+ args_formatted = ', '.join(
1165
+ f"{k}={v}" for k, v in args_dict.items()
1166
+ )
1167
+ except (json.JSONDecodeError, ValueError, TypeError):
1168
+ args_formatted = func_args_str
1169
+
1170
+ conversation_lines.append(
1171
+ f"[TOOL CALL] {func_name}({args_formatted})"
1172
+ )
1173
+
1174
+ # Handle tool response messages
1175
+ elif role == 'tool':
1176
+ tool_name = message.get('name', 'unknown_tool')
1177
+ if not content:
1178
+ content = str(message.get('content', ''))
1179
+ conversation_lines.append(
1180
+ f"[TOOL RESULT] {tool_name} → {content}"
1181
+ )
1182
+
1183
+ # Handle regular content messages (user/assistant/system)
1184
+ elif content:
1105
1185
  conversation_lines.append(f"{role}: {content}")
1106
1186
 
1107
1187
  conversation_text = "\n".join(conversation_lines).strip()
@@ -1117,7 +1197,7 @@ class ChatAgent(BaseAgent):
1117
1197
  self._context_summary_agent = ChatAgent(
1118
1198
  system_message=(
1119
1199
  "You are a helpful assistant that summarizes "
1120
- "conversations into concise markdown bullet lists."
1200
+ "conversations"
1121
1201
  ),
1122
1202
  model=self.model_backend,
1123
1203
  agent_id=f"{self.agent_id}_context_summarizer",
@@ -1128,7 +1208,8 @@ class ChatAgent(BaseAgent):
1128
1208
  if summary_prompt:
1129
1209
  prompt_text = (
1130
1210
  f"{summary_prompt.rstrip()}\n\n"
1131
- f"Context information:\n{conversation_text}"
1211
+ f"AGENT CONVERSATION TO BE SUMMARIZED:\n"
1212
+ f"{conversation_text}"
1132
1213
  )
1133
1214
  else:
1134
1215
  prompt_text = (
@@ -1138,7 +1219,13 @@ class ChatAgent(BaseAgent):
1138
1219
  )
1139
1220
 
1140
1221
  try:
1141
- response = self._context_summary_agent.step(prompt_text)
1222
+ # Use structured output if response_format is provided
1223
+ if response_format:
1224
+ response = self._context_summary_agent.step(
1225
+ prompt_text, response_format=response_format
1226
+ )
1227
+ else:
1228
+ response = self._context_summary_agent.step(prompt_text)
1142
1229
  except Exception as step_exc:
1143
1230
  error_message = (
1144
1231
  f"Failed to generate summary using model: {step_exc}"
@@ -1167,7 +1254,7 @@ class ChatAgent(BaseAgent):
1167
1254
  )
1168
1255
  base_filename = Path(base_filename).with_suffix("").name
1169
1256
 
1170
- metadata = self._context_utility.get_session_metadata()
1257
+ metadata = context_util.get_session_metadata()
1171
1258
  metadata.update(
1172
1259
  {
1173
1260
  "agent_id": self.agent_id,
@@ -1175,25 +1262,38 @@ class ChatAgent(BaseAgent):
1175
1262
  }
1176
1263
  )
1177
1264
 
1178
- save_status = self._context_utility.save_markdown_file(
1265
+ # Handle structured output if response_format was provided
1266
+ structured_output = None
1267
+ if response_format and response.msgs[-1].parsed:
1268
+ structured_output = response.msgs[-1].parsed
1269
+ # Convert structured output to custom markdown
1270
+ summary_content = context_util.structured_output_to_markdown(
1271
+ structured_data=structured_output, metadata=metadata
1272
+ )
1273
+
1274
+ # Save the markdown (either custom structured or default)
1275
+ save_status = context_util.save_markdown_file(
1179
1276
  base_filename,
1180
1277
  summary_content,
1181
- title="Conversation Summary",
1182
- metadata=metadata,
1278
+ title="Conversation Summary"
1279
+ if not structured_output
1280
+ else None,
1281
+ metadata=metadata if not structured_output else None,
1183
1282
  )
1184
1283
 
1185
1284
  file_path = (
1186
- self._context_utility.get_working_directory()
1187
- / f"{base_filename}.md"
1285
+ context_util.get_working_directory() / f"{base_filename}.md"
1188
1286
  )
1189
1287
 
1190
- result.update(
1191
- {
1192
- "summary": summary_content,
1193
- "file_path": str(file_path),
1194
- "status": save_status,
1195
- }
1196
- )
1288
+ # Prepare result dictionary
1289
+ result_dict = {
1290
+ "summary": summary_content,
1291
+ "file_path": str(file_path),
1292
+ "status": save_status,
1293
+ "structured_summary": structured_output,
1294
+ }
1295
+
1296
+ result.update(result_dict)
1197
1297
  logger.info("Conversation summary saved to %s", file_path)
1198
1298
  return result
1199
1299
 
@@ -1246,8 +1346,6 @@ class ChatAgent(BaseAgent):
1246
1346
  r"""Initializes the stored messages list with the current system
1247
1347
  message.
1248
1348
  """
1249
- import time
1250
-
1251
1349
  self.memory.clear()
1252
1350
  # avoid UserWarning: The `ChatHistoryMemory` is empty.
1253
1351
  if self.system_message is not None:
@@ -1260,6 +1358,17 @@ class ChatAgent(BaseAgent):
1260
1358
  )
1261
1359
  )
1262
1360
 
1361
+ def reset_to_original_system_message(self) -> None:
1362
+ r"""Reset system message to original, removing any appended context.
1363
+
1364
+ This method reverts the agent's system message back to its original
1365
+ state, removing any workflow context or other modifications that may
1366
+ have been appended. Useful for resetting agent state in multi-turn
1367
+ scenarios.
1368
+ """
1369
+ self._system_message = self._original_system_message
1370
+ self.init_messages()
1371
+
1263
1372
  def record_message(self, message: BaseMessage) -> None:
1264
1373
  r"""Records the externally provided message into the agent memory as if
1265
1374
  it were an answer of the :obj:`ChatAgent` from the backend. Currently,
@@ -1321,7 +1430,7 @@ class ChatAgent(BaseAgent):
1321
1430
 
1322
1431
  # Create a prompt based on the schema
1323
1432
  format_instruction = (
1324
- "\n\nPlease respond in the following JSON format:\n" "{\n"
1433
+ "\n\nPlease respond in the following JSON format:\n{\n"
1325
1434
  )
1326
1435
 
1327
1436
  properties = schema.get("properties", {})
@@ -1420,8 +1529,6 @@ class ChatAgent(BaseAgent):
1420
1529
  Returns:
1421
1530
  bool: True if called from a RegisteredAgentToolkit, False otherwise
1422
1531
  """
1423
- import inspect
1424
-
1425
1532
  from camel.toolkits.base import RegisteredAgentToolkit
1426
1533
 
1427
1534
  try:
@@ -1453,7 +1560,6 @@ class ChatAgent(BaseAgent):
1453
1560
  try:
1454
1561
  # Try to extract JSON from the response content
1455
1562
  import json
1456
- import re
1457
1563
 
1458
1564
  from pydantic import ValidationError
1459
1565
 
@@ -1492,8 +1598,7 @@ class ChatAgent(BaseAgent):
1492
1598
 
1493
1599
  if not message.parsed:
1494
1600
  logger.warning(
1495
- f"Failed to parse JSON from response: "
1496
- f"{content}"
1601
+ f"Failed to parse JSON from response: {content}"
1497
1602
  )
1498
1603
 
1499
1604
  except Exception as e:
@@ -1665,8 +1770,13 @@ class ChatAgent(BaseAgent):
1665
1770
 
1666
1771
  while True:
1667
1772
  if self.pause_event is not None and not self.pause_event.is_set():
1668
- while not self.pause_event.is_set():
1669
- time.sleep(0.001)
1773
+ # Use efficient blocking wait for threading.Event
1774
+ if isinstance(self.pause_event, threading.Event):
1775
+ self.pause_event.wait()
1776
+ else:
1777
+ # Fallback for asyncio.Event in sync context
1778
+ while not self.pause_event.is_set():
1779
+ time.sleep(0.001)
1670
1780
 
1671
1781
  try:
1672
1782
  openai_messages, num_tokens = self.memory.get_context()
@@ -1698,7 +1808,7 @@ class ChatAgent(BaseAgent):
1698
1808
  if self.stop_event and self.stop_event.is_set():
1699
1809
  # Use the _step_terminate to terminate the agent with reason
1700
1810
  logger.info(
1701
- f"Termination triggered at iteration " f"{iteration_count}"
1811
+ f"Termination triggered at iteration {iteration_count}"
1702
1812
  )
1703
1813
  return self._step_terminate(
1704
1814
  accumulated_context_tokens,
@@ -1721,8 +1831,11 @@ class ChatAgent(BaseAgent):
1721
1831
  self.pause_event is not None
1722
1832
  and not self.pause_event.is_set()
1723
1833
  ):
1724
- while not self.pause_event.is_set():
1725
- time.sleep(0.001)
1834
+ if isinstance(self.pause_event, threading.Event):
1835
+ self.pause_event.wait()
1836
+ else:
1837
+ while not self.pause_event.is_set():
1838
+ time.sleep(0.001)
1726
1839
  result = self._execute_tool(tool_call_request)
1727
1840
  tool_call_records.append(result)
1728
1841
 
@@ -1879,7 +1992,12 @@ class ChatAgent(BaseAgent):
1879
1992
  prev_num_openai_messages: int = 0
1880
1993
  while True:
1881
1994
  if self.pause_event is not None and not self.pause_event.is_set():
1882
- await self.pause_event.wait()
1995
+ if isinstance(self.pause_event, asyncio.Event):
1996
+ await self.pause_event.wait()
1997
+ elif isinstance(self.pause_event, threading.Event):
1998
+ # For threading.Event in async context, run in executor
1999
+ loop = asyncio.get_event_loop()
2000
+ await loop.run_in_executor(None, self.pause_event.wait)
1883
2001
  try:
1884
2002
  openai_messages, num_tokens = self.memory.get_context()
1885
2003
  accumulated_context_tokens += num_tokens
@@ -1909,7 +2027,7 @@ class ChatAgent(BaseAgent):
1909
2027
  if self.stop_event and self.stop_event.is_set():
1910
2028
  # Use the _step_terminate to terminate the agent with reason
1911
2029
  logger.info(
1912
- f"Termination triggered at iteration " f"{iteration_count}"
2030
+ f"Termination triggered at iteration {iteration_count}"
1913
2031
  )
1914
2032
  return self._step_terminate(
1915
2033
  accumulated_context_tokens,
@@ -1932,7 +2050,13 @@ class ChatAgent(BaseAgent):
1932
2050
  self.pause_event is not None
1933
2051
  and not self.pause_event.is_set()
1934
2052
  ):
1935
- await self.pause_event.wait()
2053
+ if isinstance(self.pause_event, asyncio.Event):
2054
+ await self.pause_event.wait()
2055
+ elif isinstance(self.pause_event, threading.Event):
2056
+ loop = asyncio.get_event_loop()
2057
+ await loop.run_in_executor(
2058
+ None, self.pause_event.wait
2059
+ )
1936
2060
  tool_call_record = await self._aexecute_tool(
1937
2061
  tool_call_request
1938
2062
  )
@@ -2185,11 +2309,6 @@ class ChatAgent(BaseAgent):
2185
2309
  Returns:
2186
2310
  List[OpenAIMessage]: The sanitized OpenAI messages.
2187
2311
  """
2188
- import hashlib
2189
- import os
2190
- import re
2191
- import tempfile
2192
-
2193
2312
  # Create a copy of messages for logging to avoid modifying the
2194
2313
  # original messages
2195
2314
  sanitized_messages = []
@@ -2230,7 +2349,14 @@ class ChatAgent(BaseAgent):
2230
2349
 
2231
2350
  # Save image to temp directory for viewing
2232
2351
  try:
2233
- import base64
2352
+ # Sanitize img_format to prevent path
2353
+ # traversal
2354
+ safe_format = re.sub(
2355
+ r'[^a-zA-Z0-9]', '', img_format
2356
+ )[:10]
2357
+ img_filename = (
2358
+ f"image_{img_hash}.{safe_format}"
2359
+ )
2234
2360
 
2235
2361
  temp_dir = tempfile.gettempdir()
2236
2362
  img_path = os.path.join(
@@ -2245,6 +2371,9 @@ class ChatAgent(BaseAgent):
2245
2371
  base64_data
2246
2372
  )
2247
2373
  )
2374
+ # Register for cleanup
2375
+ with _temp_files_lock:
2376
+ _temp_files.add(img_path)
2248
2377
 
2249
2378
  # Create a file:// URL that can be
2250
2379
  # opened
@@ -2497,7 +2626,8 @@ class ChatAgent(BaseAgent):
2497
2626
  try:
2498
2627
  raw_result = tool(**args)
2499
2628
  if self.mask_tool_output:
2500
- self._secure_result_store[tool_call_id] = raw_result
2629
+ with self._secure_result_store_lock:
2630
+ self._secure_result_store[tool_call_id] = raw_result
2501
2631
  result = (
2502
2632
  "[The tool has been executed successfully, but the output"
2503
2633
  " from the tool is masked. You can move forward]"
@@ -2555,7 +2685,7 @@ class ChatAgent(BaseAgent):
2555
2685
  # Capture the error message to prevent framework crash
2556
2686
  error_msg = f"Error executing async tool '{func_name}': {e!s}"
2557
2687
  result = f"Tool execution failed: {error_msg}"
2558
- logging.warning(error_msg)
2688
+ logger.warning(error_msg)
2559
2689
  return self._record_tool_calling(func_name, args, result, tool_call_id)
2560
2690
 
2561
2691
  def _record_tool_calling(
@@ -2606,8 +2736,6 @@ class ChatAgent(BaseAgent):
2606
2736
  # This ensures the assistant message (tool call) always appears before
2607
2737
  # the function message (tool result) in the conversation context
2608
2738
  # Use time.time_ns() for nanosecond precision to avoid collisions
2609
- import time
2610
-
2611
2739
  current_time_ns = time.time_ns()
2612
2740
  base_timestamp = current_time_ns / 1_000_000_000 # Convert to seconds
2613
2741
 
@@ -2698,7 +2826,7 @@ class ChatAgent(BaseAgent):
2698
2826
  # Check termination condition
2699
2827
  if self.stop_event and self.stop_event.is_set():
2700
2828
  logger.info(
2701
- f"Termination triggered at iteration " f"{iteration_count}"
2829
+ f"Termination triggered at iteration {iteration_count}"
2702
2830
  )
2703
2831
  yield self._step_terminate(
2704
2832
  num_tokens, tool_call_records, "termination_triggered"
@@ -3067,72 +3195,70 @@ class ChatAgent(BaseAgent):
3067
3195
  accumulated_tool_calls: Dict[str, Any],
3068
3196
  tool_call_records: List[ToolCallingRecord],
3069
3197
  ) -> Generator[ChatAgentResponse, None, None]:
3070
- r"""Execute multiple tools synchronously with
3071
- proper content accumulation, using threads+queue for
3072
- non-blocking status streaming."""
3073
-
3074
- def tool_worker(result_queue, tool_call_data):
3075
- try:
3076
- tool_call_record = self._execute_tool_from_stream_data(
3077
- tool_call_data
3078
- )
3079
- result_queue.put(tool_call_record)
3080
- except Exception as e:
3081
- logger.error(f"Error in threaded tool execution: {e}")
3082
- result_queue.put(None)
3198
+ r"""Execute multiple tools synchronously with proper content
3199
+ accumulation, using ThreadPoolExecutor for better timeout handling."""
3083
3200
 
3084
3201
  tool_calls_to_execute = []
3085
3202
  for _tool_call_index, tool_call_data in accumulated_tool_calls.items():
3086
3203
  if tool_call_data.get('complete', False):
3087
3204
  tool_calls_to_execute.append(tool_call_data)
3088
3205
 
3089
- # Phase 2: Execute tools in threads and yield status while waiting
3090
- for tool_call_data in tool_calls_to_execute:
3091
- function_name = tool_call_data['function']['name']
3092
- try:
3093
- args = json.loads(tool_call_data['function']['arguments'])
3094
- except json.JSONDecodeError:
3095
- args = tool_call_data['function']['arguments']
3096
- result_queue: queue.Queue[Optional[ToolCallingRecord]] = (
3097
- queue.Queue()
3098
- )
3099
- thread = threading.Thread(
3100
- target=tool_worker,
3101
- args=(result_queue, tool_call_data),
3102
- )
3103
- thread.start()
3104
-
3105
- # Log debug info instead of adding to content
3106
- logger.info(
3107
- f"Calling function: {function_name} with arguments: {args}"
3108
- )
3109
-
3110
- # wait for tool thread to finish with optional timeout
3111
- thread.join(self.tool_execution_timeout)
3206
+ if not tool_calls_to_execute:
3207
+ # No tools to execute, return immediately
3208
+ return
3209
+ yield # Make this a generator
3210
+
3211
+ # Execute tools using ThreadPoolExecutor for proper timeout handling
3212
+ # Use max_workers=len() for parallel execution, with min of 1
3213
+ with concurrent.futures.ThreadPoolExecutor(
3214
+ max_workers=max(1, len(tool_calls_to_execute))
3215
+ ) as executor:
3216
+ # Submit all tools first (parallel execution)
3217
+ futures_map = {}
3218
+ for tool_call_data in tool_calls_to_execute:
3219
+ function_name = tool_call_data['function']['name']
3220
+ try:
3221
+ args = json.loads(tool_call_data['function']['arguments'])
3222
+ except json.JSONDecodeError:
3223
+ args = tool_call_data['function']['arguments']
3112
3224
 
3113
- # If timeout occurred, mark as error and continue
3114
- if thread.is_alive():
3115
- # Log timeout info instead of adding to content
3116
- logger.warning(
3117
- f"Function '{function_name}' timed out after "
3118
- f"{self.tool_execution_timeout} seconds"
3225
+ # Log debug info
3226
+ logger.info(
3227
+ f"Calling function: {function_name} with arguments: {args}"
3119
3228
  )
3120
3229
 
3121
- # Detach thread (it may still finish later). Skip recording.
3122
- continue
3123
-
3124
- # Tool finished, get result
3125
- tool_call_record = result_queue.get()
3126
- if tool_call_record:
3127
- tool_call_records.append(tool_call_record)
3128
- raw_result = tool_call_record.result
3129
- result_str = str(raw_result)
3230
+ # Submit tool execution (non-blocking)
3231
+ future = executor.submit(
3232
+ self._execute_tool_from_stream_data, tool_call_data
3233
+ )
3234
+ futures_map[future] = (function_name, tool_call_data)
3235
+
3236
+ # Wait for all futures to complete (or timeout)
3237
+ for future in concurrent.futures.as_completed(
3238
+ futures_map.keys(),
3239
+ timeout=self.tool_execution_timeout
3240
+ if self.tool_execution_timeout
3241
+ else None,
3242
+ ):
3243
+ function_name, tool_call_data = futures_map[future]
3130
3244
 
3131
- # Log debug info instead of adding to content
3132
- logger.info(f"Function output: {result_str}")
3133
- else:
3134
- # Error already logged
3135
- continue
3245
+ try:
3246
+ tool_call_record = future.result()
3247
+ if tool_call_record:
3248
+ tool_call_records.append(tool_call_record)
3249
+ logger.info(
3250
+ f"Function output: {tool_call_record.result}"
3251
+ )
3252
+ except concurrent.futures.TimeoutError:
3253
+ logger.warning(
3254
+ f"Function '{function_name}' timed out after "
3255
+ f"{self.tool_execution_timeout} seconds"
3256
+ )
3257
+ future.cancel()
3258
+ except Exception as e:
3259
+ logger.error(
3260
+ f"Error executing tool '{function_name}': {e}"
3261
+ )
3136
3262
 
3137
3263
  # Ensure this function remains a generator (required by type signature)
3138
3264
  return
@@ -3177,8 +3303,6 @@ class ChatAgent(BaseAgent):
3177
3303
 
3178
3304
  # Record both messages with precise timestamps to ensure
3179
3305
  # correct ordering
3180
- import time
3181
-
3182
3306
  current_time_ns = time.time_ns()
3183
3307
  base_timestamp = (
3184
3308
  current_time_ns / 1_000_000_000
@@ -3207,7 +3331,7 @@ class ChatAgent(BaseAgent):
3207
3331
  f"Error executing tool '{function_name}': {e!s}"
3208
3332
  )
3209
3333
  result = {"error": error_msg}
3210
- logging.warning(error_msg)
3334
+ logger.warning(error_msg)
3211
3335
 
3212
3336
  # Record error response
3213
3337
  func_msg = FunctionCallingMessage(
@@ -3302,8 +3426,6 @@ class ChatAgent(BaseAgent):
3302
3426
 
3303
3427
  # Record both messages with precise timestamps to ensure
3304
3428
  # correct ordering
3305
- import time
3306
-
3307
3429
  current_time_ns = time.time_ns()
3308
3430
  base_timestamp = (
3309
3431
  current_time_ns / 1_000_000_000
@@ -3332,7 +3454,7 @@ class ChatAgent(BaseAgent):
3332
3454
  f"Error executing async tool '{function_name}': {e!s}"
3333
3455
  )
3334
3456
  result = {"error": error_msg}
3335
- logging.warning(error_msg)
3457
+ logger.warning(error_msg)
3336
3458
 
3337
3459
  # Record error response
3338
3460
  func_msg = FunctionCallingMessage(
@@ -3442,7 +3564,7 @@ class ChatAgent(BaseAgent):
3442
3564
  # Check termination condition
3443
3565
  if self.stop_event and self.stop_event.is_set():
3444
3566
  logger.info(
3445
- f"Termination triggered at iteration " f"{iteration_count}"
3567
+ f"Termination triggered at iteration {iteration_count}"
3446
3568
  )
3447
3569
  yield self._step_terminate(
3448
3570
  num_tokens, tool_call_records, "termination_triggered"
@@ -3954,10 +4076,12 @@ class ChatAgent(BaseAgent):
3954
4076
  configuration.
3955
4077
  """
3956
4078
  # Create a new instance with the same configuration
3957
- # If with_memory is True, set system_message to None
3958
- # If with_memory is False, use the original system message
4079
+ # If with_memory is True, set system_message to None (it will be
4080
+ # copied from memory below, including any workflow context)
4081
+ # If with_memory is False, use the current system message
4082
+ # (which may include appended workflow context)
3959
4083
  # To avoid duplicated system memory.
3960
- system_message = None if with_memory else self._original_system_message
4084
+ system_message = None if with_memory else self._system_message
3961
4085
 
3962
4086
  # Clone tools and collect toolkits that need registration
3963
4087
  cloned_tools, toolkits_to_register = self._clone_tools()