agno 2.1.2__py3-none-any.whl → 2.1.4__py3-none-any.whl

This diff represents the content of publicly available package versions that have been released to one of the supported registries. The information contained in this diff is provided for informational purposes only and reflects changes between package versions as they appear in their respective public registries.
agno/team/team.py CHANGED
@@ -1232,7 +1232,6 @@ class Team:
1232
1232
  dependencies: Optional[Dict[str, Any]] = None,
1233
1233
  response_format: Optional[Union[Dict, Type[BaseModel]]] = None,
1234
1234
  stream_intermediate_steps: bool = False,
1235
- workflow_context: Optional[Dict] = None,
1236
1235
  yield_run_response: bool = False,
1237
1236
  debug_mode: Optional[bool] = None,
1238
1237
  **kwargs: Any,
@@ -1244,10 +1243,11 @@ class Team:
1244
1243
  2. Prepare run messages
1245
1244
  3. Reason about the task(s) if reasoning is enabled
1246
1245
  4. Get a response from the model
1247
- 5. Add RunOutput to Team Session
1248
- 6. Calculate session metrics
1249
- 7. Update Team Memory
1250
- 8. Save session to storage
1246
+ 5. Add the run to Team Session
1247
+ 6. Update Team Memory
1248
+ 7. Create the run completed event
1249
+ 8. Calculate session metrics
1250
+ 9. Save session to storage
1251
1251
  """
1252
1252
  # Register run for cancellation tracking
1253
1253
  register_run(run_response.run_id) # type: ignore
@@ -1286,7 +1286,6 @@ class Team:
1286
1286
  videos=run_input.videos,
1287
1287
  audio=run_input.audios,
1288
1288
  files=run_input.files,
1289
- workflow_context=workflow_context,
1290
1289
  debug_mode=debug_mode,
1291
1290
  add_history_to_context=add_history_to_context,
1292
1291
  add_session_state_to_context=add_session_state_to_context,
@@ -1322,7 +1321,7 @@ class Team:
1322
1321
  try:
1323
1322
  # Start the Run by yielding a RunStarted event
1324
1323
  if stream_intermediate_steps:
1325
- yield self._handle_event(create_team_run_started_event(run_response), run_response, workflow_context)
1324
+ yield self._handle_event(create_team_run_started_event(run_response), run_response)
1326
1325
 
1327
1326
  # 3. Reason about the task(s) if reasoning is enabled
1328
1327
  yield from self._handle_reasoning_stream(
@@ -1341,7 +1340,6 @@ class Team:
1341
1340
  run_messages=run_messages,
1342
1341
  response_format=response_format,
1343
1342
  stream_intermediate_steps=stream_intermediate_steps,
1344
- workflow_context=workflow_context,
1345
1343
  ):
1346
1344
  raise_if_cancelled(run_response.run_id) # type: ignore
1347
1345
  yield event
@@ -1352,7 +1350,6 @@ class Team:
1352
1350
  run_messages=run_messages,
1353
1351
  response_format=response_format,
1354
1352
  stream_intermediate_steps=stream_intermediate_steps,
1355
- workflow_context=workflow_context,
1356
1353
  ):
1357
1354
  raise_if_cancelled(run_response.run_id) # type: ignore
1358
1355
  from agno.run.team import IntermediateRunContentEvent, RunContentEvent
@@ -1371,7 +1368,6 @@ class Team:
1371
1368
  run_response=run_response,
1372
1369
  run_messages=run_messages,
1373
1370
  stream_intermediate_steps=stream_intermediate_steps,
1374
- workflow_context=workflow_context,
1375
1371
  ):
1376
1372
  raise_if_cancelled(run_response.run_id) # type: ignore
1377
1373
  yield event
@@ -1395,26 +1391,26 @@ class Team:
1395
1391
  # 5. Add the run to Team Session
1396
1392
  session.upsert_run(run_response=run_response)
1397
1393
 
1398
- # 6. Calculate session metrics
1399
- self._update_session_metrics(session=session)
1394
+ # 6. Update Team Memory
1395
+ yield from self._make_memories_and_summaries(
1396
+ run_response=run_response,
1397
+ run_messages=run_messages,
1398
+ session=session,
1399
+ user_id=user_id,
1400
+ )
1400
1401
 
1402
+ # 7. Create the run completed event
1401
1403
  completed_event = self._handle_event(
1402
1404
  create_team_run_completed_event(
1403
1405
  from_run_response=run_response,
1404
1406
  ),
1405
1407
  run_response,
1406
- workflow_context,
1407
1408
  )
1408
1409
 
1409
- # 7. Update Team Memory
1410
- yield from self._make_memories_and_summaries(
1411
- run_response=run_response,
1412
- run_messages=run_messages,
1413
- session=session,
1414
- user_id=user_id,
1415
- )
1410
+ # 8. Calculate session metrics
1411
+ self._update_session_metrics(session=session)
1416
1412
 
1417
- # 8. Save session to storage
1413
+ # 9. Save session to storage
1418
1414
  self.save_session(session=session)
1419
1415
 
1420
1416
  if stream_intermediate_steps:
@@ -1438,7 +1434,6 @@ class Team:
1438
1434
  yield self._handle_event(
1439
1435
  create_team_run_cancelled_event(from_run_response=run_response, reason=str(e)),
1440
1436
  run_response,
1441
- workflow_context,
1442
1437
  )
1443
1438
 
1444
1439
  # Add the RunOutput to Team Session even when cancelled
@@ -1584,9 +1579,6 @@ class Team:
1584
1579
  )
1585
1580
  add_history = add_history_to_context if add_history_to_context is not None else self.add_history_to_context
1586
1581
 
1587
- # Extract workflow context from kwargs if present
1588
- workflow_context = kwargs.pop("workflow_context", None)
1589
-
1590
1582
  # Initialize Knowledge Filters
1591
1583
  effective_filters = knowledge_filters
1592
1584
 
@@ -1665,7 +1657,6 @@ class Team:
1665
1657
  dependencies=run_dependencies,
1666
1658
  response_format=response_format,
1667
1659
  stream_intermediate_steps=stream_intermediate_steps,
1668
- workflow_context=workflow_context,
1669
1660
  yield_run_response=yield_run_response,
1670
1661
  debug_mode=debug_mode,
1671
1662
  **kwargs,
@@ -1756,7 +1747,6 @@ class Team:
1756
1747
  metadata: Optional[Dict[str, Any]] = None,
1757
1748
  response_format: Optional[Union[Dict, Type[BaseModel]]] = None,
1758
1749
  dependencies: Optional[Dict[str, Any]] = None,
1759
- workflow_context: Optional[Dict] = None,
1760
1750
  debug_mode: Optional[bool] = None,
1761
1751
  **kwargs: Any,
1762
1752
  ) -> TeamRunOutput:
@@ -1812,7 +1802,6 @@ class Team:
1812
1802
  videos=run_input.videos,
1813
1803
  audio=run_input.audios,
1814
1804
  files=run_input.files,
1815
- workflow_context=workflow_context,
1816
1805
  debug_mode=debug_mode,
1817
1806
  add_history_to_context=add_history_to_context,
1818
1807
  add_dependencies_to_context=add_dependencies_to_context,
@@ -1943,7 +1932,6 @@ class Team:
1943
1932
  response_format: Optional[Union[Dict, Type[BaseModel]]] = None,
1944
1933
  dependencies: Optional[Dict[str, Any]] = None,
1945
1934
  stream_intermediate_steps: bool = False,
1946
- workflow_context: Optional[Dict] = None,
1947
1935
  yield_run_response: bool = False,
1948
1936
  debug_mode: Optional[bool] = None,
1949
1937
  **kwargs: Any,
@@ -1955,10 +1943,11 @@ class Team:
1955
1943
  2. Prepare run messages
1956
1944
  3. Reason about the task(s) if reasoning is enabled
1957
1945
  4. Get a response from the model
1958
- 5. Update Team Memory
1959
- 6. Add RunOutput to Team Session
1960
- 7. Calculate session metrics
1961
- 8. Save session to storage
1946
+ 5. Add the run to Team Session
1947
+ 6. Update Team Memory
1948
+ 7. Create the run completed event
1949
+ 8. Calculate session metrics
1950
+ 9. Save session to storage
1962
1951
  """
1963
1952
 
1964
1953
  # 1. Resolve callable dependencies if present
@@ -1998,7 +1987,6 @@ class Team:
1998
1987
  videos=run_input.videos,
1999
1988
  audio=run_input.audios,
2000
1989
  files=run_input.files,
2001
- workflow_context=workflow_context,
2002
1990
  debug_mode=debug_mode,
2003
1991
  add_history_to_context=add_history_to_context,
2004
1992
  add_dependencies_to_context=add_dependencies_to_context,
@@ -2036,7 +2024,7 @@ class Team:
2036
2024
  # Start the Run by yielding a RunStarted event
2037
2025
  if stream_intermediate_steps:
2038
2026
  yield self._handle_event(
2039
- create_team_run_started_event(from_run_response=run_response), run_response, workflow_context
2027
+ create_team_run_started_event(from_run_response=run_response), run_response
2040
2028
  )
2041
2029
 
2042
2030
  # 3. Reason about the task(s) if reasoning is enabled
@@ -2055,7 +2043,6 @@ class Team:
2055
2043
  run_messages=run_messages,
2056
2044
  response_format=response_format,
2057
2045
  stream_intermediate_steps=stream_intermediate_steps,
2058
- workflow_context=workflow_context,
2059
2046
  ):
2060
2047
  raise_if_cancelled(run_response.run_id) # type: ignore
2061
2048
  yield event
@@ -2066,7 +2053,6 @@ class Team:
2066
2053
  run_messages=run_messages,
2067
2054
  response_format=response_format,
2068
2055
  stream_intermediate_steps=stream_intermediate_steps,
2069
- workflow_context=workflow_context,
2070
2056
  ):
2071
2057
  raise_if_cancelled(run_response.run_id) # type: ignore
2072
2058
  from agno.run.team import IntermediateRunContentEvent, RunContentEvent
@@ -2085,7 +2071,6 @@ class Team:
2085
2071
  run_response=run_response,
2086
2072
  run_messages=run_messages,
2087
2073
  stream_intermediate_steps=stream_intermediate_steps,
2088
- workflow_context=workflow_context,
2089
2074
  ):
2090
2075
  raise_if_cancelled(run_response.run_id) # type: ignore
2091
2076
  yield event
@@ -2117,14 +2102,15 @@ class Team:
2117
2102
  ):
2118
2103
  yield event
2119
2104
 
2120
- # 7. Calculate session metrics
2121
- self._update_session_metrics(session=session)
2122
-
2105
+ # 7. Create the run completed event
2123
2106
  completed_event = self._handle_event(
2124
- create_team_run_completed_event(from_run_response=run_response), run_response, workflow_context
2107
+ create_team_run_completed_event(from_run_response=run_response), run_response
2125
2108
  )
2126
2109
 
2127
- # 8. Save session to storage
2110
+ # 8. Calculate session metrics
2111
+ self._update_session_metrics(session=session)
2112
+
2113
+ # 9. Save session to storage
2128
2114
  self.save_session(session=session)
2129
2115
 
2130
2116
  if stream_intermediate_steps:
@@ -2148,7 +2134,6 @@ class Team:
2148
2134
  yield self._handle_event(
2149
2135
  create_team_run_cancelled_event(from_run_response=run_response, reason=str(e)),
2150
2136
  run_response,
2151
- workflow_context,
2152
2137
  )
2153
2138
 
2154
2139
  # Add the RunOutput to Team Session even when cancelled
@@ -2289,9 +2274,6 @@ class Team:
2289
2274
  )
2290
2275
  add_history = add_history_to_context if add_history_to_context is not None else self.add_history_to_context
2291
2276
 
2292
- # Extract workflow context from kwargs if present
2293
- workflow_context = kwargs.pop("workflow_context", None)
2294
-
2295
2277
  effective_filters = knowledge_filters
2296
2278
  # When filters are passed manually
2297
2279
  if self.knowledge_filters or knowledge_filters:
@@ -2367,7 +2349,6 @@ class Team:
2367
2349
  response_format=response_format,
2368
2350
  dependencies=run_dependencies,
2369
2351
  stream_intermediate_steps=stream_intermediate_steps,
2370
- workflow_context=workflow_context,
2371
2352
  yield_run_response=yield_run_response,
2372
2353
  debug_mode=debug_mode,
2373
2354
  **kwargs,
@@ -2386,7 +2367,6 @@ class Team:
2386
2367
  metadata=metadata,
2387
2368
  response_format=response_format,
2388
2369
  dependencies=run_dependencies,
2389
- workflow_context=workflow_context,
2390
2370
  debug_mode=debug_mode,
2391
2371
  **kwargs,
2392
2372
  )
@@ -2526,7 +2506,6 @@ class Team:
2526
2506
  run_messages: RunMessages,
2527
2507
  response_format: Optional[Union[Dict, Type[BaseModel]]] = None,
2528
2508
  stream_intermediate_steps: bool = False,
2529
- workflow_context: Optional[Dict] = None,
2530
2509
  ) -> Iterator[Union[TeamRunOutputEvent, RunOutputEvent]]:
2531
2510
  self.model = cast(Model, self.model)
2532
2511
 
@@ -2559,7 +2538,6 @@ class Team:
2559
2538
  reasoning_state=reasoning_state,
2560
2539
  stream_intermediate_steps=stream_intermediate_steps,
2561
2540
  parse_structured_output=self.should_parse_structured_output,
2562
- workflow_context=workflow_context,
2563
2541
  )
2564
2542
 
2565
2543
  # 3. Update TeamRunOutput
@@ -2608,7 +2586,6 @@ class Team:
2608
2586
  run_messages: RunMessages,
2609
2587
  response_format: Optional[Union[Dict, Type[BaseModel]]] = None,
2610
2588
  stream_intermediate_steps: bool = False,
2611
- workflow_context: Optional[Dict] = None,
2612
2589
  ) -> AsyncIterator[Union[TeamRunOutputEvent, RunOutputEvent]]:
2613
2590
  self.model = cast(Model, self.model)
2614
2591
 
@@ -2642,7 +2619,6 @@ class Team:
2642
2619
  reasoning_state=reasoning_state,
2643
2620
  stream_intermediate_steps=stream_intermediate_steps,
2644
2621
  parse_structured_output=self.should_parse_structured_output,
2645
- workflow_context=workflow_context,
2646
2622
  ):
2647
2623
  yield event
2648
2624
 
@@ -2695,7 +2671,6 @@ class Team:
2695
2671
  reasoning_state: Optional[Dict[str, Any]] = None,
2696
2672
  stream_intermediate_steps: bool = False,
2697
2673
  parse_structured_output: bool = False,
2698
- workflow_context: Optional[Dict] = None,
2699
2674
  ) -> Iterator[Union[TeamRunOutputEvent, RunOutputEvent]]:
2700
2675
  if isinstance(model_response_event, tuple(get_args(RunOutputEvent))) or isinstance(
2701
2676
  model_response_event, tuple(get_args(TeamRunOutputEvent))
@@ -2829,7 +2804,6 @@ class Team:
2829
2804
  image=model_response_event.images[-1] if model_response_event.images else None,
2830
2805
  ),
2831
2806
  run_response,
2832
- workflow_context=workflow_context,
2833
2807
  )
2834
2808
  else:
2835
2809
  yield self._handle_event(
@@ -2839,7 +2813,6 @@ class Team:
2839
2813
  content_type=content_type,
2840
2814
  ),
2841
2815
  run_response,
2842
- workflow_context=workflow_context,
2843
2816
  )
2844
2817
 
2845
2818
  # If the model response is a tool_call_started, add the tool call to the run_response
@@ -3285,7 +3258,6 @@ class Team:
3285
3258
  run_response: TeamRunOutput,
3286
3259
  run_messages: RunMessages,
3287
3260
  stream_intermediate_steps: bool = False,
3288
- workflow_context: Optional[Dict] = None,
3289
3261
  ):
3290
3262
  """Parse the model response using the output model stream."""
3291
3263
  from agno.utils.events import (
@@ -3308,7 +3280,6 @@ class Team:
3308
3280
  run_response=run_response,
3309
3281
  full_model_response=model_response,
3310
3282
  model_response_event=model_response_event,
3311
- workflow_context=workflow_context,
3312
3283
  )
3313
3284
 
3314
3285
  # Update the TeamRunResponse content
@@ -3341,7 +3312,6 @@ class Team:
3341
3312
  run_response: TeamRunOutput,
3342
3313
  run_messages: RunMessages,
3343
3314
  stream_intermediate_steps: bool = False,
3344
- workflow_context: Optional[Dict] = None,
3345
3315
  ):
3346
3316
  """Parse the model response using the output model stream."""
3347
3317
  from agno.utils.events import (
@@ -3364,7 +3334,6 @@ class Team:
3364
3334
  run_response=run_response,
3365
3335
  full_model_response=model_response,
3366
3336
  model_response_event=model_response_event,
3367
- workflow_context=workflow_context,
3368
3337
  ):
3369
3338
  yield event
3370
3339
 
@@ -3385,15 +3354,7 @@ class Team:
3385
3354
  self,
3386
3355
  event: Union[RunOutputEvent, TeamRunOutputEvent],
3387
3356
  run_response: TeamRunOutput,
3388
- workflow_context: Optional[Dict] = None,
3389
3357
  ):
3390
- if workflow_context:
3391
- event.workflow_id = workflow_context.get("workflow_id")
3392
- event.workflow_run_id = workflow_context.get("workflow_run_id")
3393
- event.step_id = workflow_context.get("step_id")
3394
- event.step_name = workflow_context.get("step_name")
3395
- event.step_index = workflow_context.get("step_index")
3396
-
3397
3358
  # We only store events that are not run_response_content events
3398
3359
  events_to_skip = [event.value for event in self.events_to_skip] if self.events_to_skip else []
3399
3360
  if self.store_events and event.event not in events_to_skip:
@@ -4529,7 +4490,6 @@ class Team:
4529
4490
  videos: Optional[Sequence[Video]] = None,
4530
4491
  audio: Optional[Sequence[Audio]] = None,
4531
4492
  files: Optional[Sequence[File]] = None,
4532
- workflow_context: Optional[Dict] = None,
4533
4493
  debug_mode: Optional[bool] = None,
4534
4494
  add_history_to_context: Optional[bool] = None,
4535
4495
  dependencies: Optional[Dict[str, Any]] = None,
@@ -4623,7 +4583,6 @@ class Team:
4623
4583
  files=files, # type: ignore
4624
4584
  knowledge_filters=knowledge_filters,
4625
4585
  add_history_to_context=add_history_to_context,
4626
- workflow_context=workflow_context,
4627
4586
  dependencies=dependencies,
4628
4587
  add_dependencies_to_context=add_dependencies_to_context,
4629
4588
  add_session_state_to_context=add_session_state_to_context,
@@ -5834,7 +5793,6 @@ class Team:
5834
5793
  files: Optional[List[File]] = None,
5835
5794
  knowledge_filters: Optional[Dict[str, Any]] = None,
5836
5795
  add_history_to_context: Optional[bool] = None,
5837
- workflow_context: Optional[Dict] = None,
5838
5796
  dependencies: Optional[Dict[str, Any]] = None,
5839
5797
  add_dependencies_to_context: Optional[bool] = None,
5840
5798
  add_session_state_to_context: Optional[bool] = None,
@@ -5989,7 +5947,6 @@ class Team:
5989
5947
  stream=True,
5990
5948
  stream_intermediate_steps=stream_intermediate_steps,
5991
5949
  debug_mode=debug_mode,
5992
- workflow_context=workflow_context,
5993
5950
  dependencies=dependencies,
5994
5951
  add_dependencies_to_context=add_dependencies_to_context,
5995
5952
  metadata=metadata,
@@ -6029,7 +5986,6 @@ class Team:
6029
5986
  files=files,
6030
5987
  stream=False,
6031
5988
  debug_mode=debug_mode,
6032
- workflow_context=workflow_context,
6033
5989
  dependencies=dependencies,
6034
5990
  add_dependencies_to_context=add_dependencies_to_context,
6035
5991
  add_session_state_to_context=add_session_state_to_context,
@@ -6121,7 +6077,6 @@ class Team:
6121
6077
  add_dependencies_to_context=add_dependencies_to_context,
6122
6078
  add_session_state_to_context=add_session_state_to_context,
6123
6079
  metadata=metadata,
6124
- workflow_context=workflow_context,
6125
6080
  knowledge_filters=knowledge_filters
6126
6081
  if not member_agent.knowledge_filters and member_agent.knowledge
6127
6082
  else None,
@@ -6157,7 +6112,6 @@ class Team:
6157
6112
  files=files,
6158
6113
  stream=False,
6159
6114
  debug_mode=debug_mode,
6160
- workflow_context=workflow_context,
6161
6115
  dependencies=dependencies,
6162
6116
  add_dependencies_to_context=add_dependencies_to_context,
6163
6117
  add_session_state_to_context=add_session_state_to_context,
@@ -6234,7 +6188,6 @@ class Team:
6234
6188
  files=files,
6235
6189
  stream=True,
6236
6190
  stream_intermediate_steps=stream_intermediate_steps,
6237
- workflow_context=workflow_context,
6238
6191
  knowledge_filters=knowledge_filters
6239
6192
  if not member_agent.knowledge_filters and member_agent.knowledge
6240
6193
  else None,
@@ -6275,7 +6228,6 @@ class Team:
6275
6228
  audio=audio,
6276
6229
  files=files,
6277
6230
  stream=False,
6278
- workflow_context=workflow_context,
6279
6231
  knowledge_filters=knowledge_filters
6280
6232
  if not member_agent.knowledge_filters and member_agent.knowledge
6281
6233
  else None,
@@ -6352,7 +6304,6 @@ class Team:
6352
6304
  files=files,
6353
6305
  stream=True,
6354
6306
  stream_intermediate_steps=stream_intermediate_steps,
6355
- workflow_context=workflow_context,
6356
6307
  debug_mode=debug_mode,
6357
6308
  knowledge_filters=knowledge_filters
6358
6309
  if not member_agent.knowledge_filters and member_agent.knowledge
@@ -6433,7 +6384,6 @@ class Team:
6433
6384
  stream=False,
6434
6385
  stream_intermediate_steps=stream_intermediate_steps,
6435
6386
  debug_mode=debug_mode,
6436
- workflow_context=workflow_context,
6437
6387
  knowledge_filters=knowledge_filters
6438
6388
  if not member_agent.knowledge_filters and member_agent.knowledge
6439
6389
  else None,
agno/tools/file.py CHANGED
@@ -75,7 +75,9 @@ class FileTools(Toolkit):
75
75
  """
76
76
  try:
77
77
  log_info(f"Reading files in : {self.base_dir}")
78
- return json.dumps([str(file_path) for file_path in self.base_dir.iterdir()], indent=4)
78
+ return json.dumps(
79
+ [str(file_path.relative_to(self.base_dir)) for file_path in self.base_dir.iterdir()], indent=4
80
+ )
79
81
  except Exception as e:
80
82
  log_error(f"Error reading files: {e}")
81
83
  return f"Error reading files: {e}"
@@ -93,7 +95,7 @@ class FileTools(Toolkit):
93
95
  log_debug(f"Searching files in {self.base_dir} with pattern {pattern}")
94
96
  matching_files = list(self.base_dir.glob(pattern))
95
97
 
96
- file_paths = [str(file_path) for file_path in matching_files]
98
+ file_paths = [str(file_path.relative_to(self.base_dir)) for file_path in matching_files]
97
99
 
98
100
  result = {
99
101
  "pattern": pattern,
agno/tools/function.py CHANGED
@@ -473,6 +473,7 @@ class Function(BaseModel):
473
473
 
474
474
  def _get_cache_key(self, entrypoint_args: Dict[str, Any], call_args: Optional[Dict[str, Any]] = None) -> str:
475
475
  """Generate a cache key based on function name and arguments."""
476
+ import json
476
477
  from hashlib import md5
477
478
 
478
479
  copy_entrypoint_args = entrypoint_args.copy()
@@ -493,7 +494,8 @@ class Function(BaseModel):
493
494
  del copy_entrypoint_args["files"]
494
495
  if "dependencies" in copy_entrypoint_args:
495
496
  del copy_entrypoint_args["dependencies"]
496
- args_str = str(copy_entrypoint_args)
497
+ # Use json.dumps with sort_keys=True to ensure consistent ordering regardless of dict key order
498
+ args_str = json.dumps(copy_entrypoint_args, sort_keys=True, default=str)
497
499
 
498
500
  kwargs_str = str(sorted((call_args or {}).items()))
499
501
  key_str = f"{self.name}:{args_str}:{kwargs_str}"
@@ -799,6 +801,9 @@ class FunctionCall(BaseModel):
799
801
  return FunctionExecutionResult(status="success", result=cached_result)
800
802
 
801
803
  # Execute function
804
+ execution_result = None
805
+ exception_to_raise = None
806
+
802
807
  try:
803
808
  # Build and execute the nested chain of hooks
804
809
  if self.function.tool_hooks is not None:
@@ -822,22 +827,27 @@ class FunctionCall(BaseModel):
822
827
  cache_file = self.function._get_cache_file_path(cache_key)
823
828
  self.function._save_to_cache(cache_file, self.result)
824
829
 
830
+ execution_result = FunctionExecutionResult(
831
+ status="success", result=self.result, updated_session_state=updated_session_state
832
+ )
833
+
825
834
  except AgentRunException as e:
826
835
  log_debug(f"{e.__class__.__name__}: {e}")
827
836
  self.error = str(e)
828
- raise
837
+ exception_to_raise = e
829
838
  except Exception as e:
830
839
  log_warning(f"Could not run function {self.get_call_str()}")
831
840
  log_exception(e)
832
841
  self.error = str(e)
833
- return FunctionExecutionResult(status="failure", error=str(e))
842
+ execution_result = FunctionExecutionResult(status="failure", error=str(e))
834
843
 
835
- # Execute post-hook if it exists
836
- self._handle_post_hook()
844
+ finally:
845
+ self._handle_post_hook()
837
846
 
838
- return FunctionExecutionResult(
839
- status="success", result=self.result, updated_session_state=updated_session_state
840
- )
847
+ if exception_to_raise is not None:
848
+ raise exception_to_raise
849
+
850
+ return execution_result # type: ignore[return-value]
841
851
 
842
852
  async def _handle_pre_hook_async(self):
843
853
  """Handles the async pre-hook for the function call."""
@@ -991,6 +1001,9 @@ class FunctionCall(BaseModel):
991
1001
  return FunctionExecutionResult(status="success", result=cached_result)
992
1002
 
993
1003
  # Execute function
1004
+ execution_result = None
1005
+ exception_to_raise = None
1006
+
994
1007
  try:
995
1008
  # Build and execute the nested chain of hooks
996
1009
  if self.function.tool_hooks is not None:
@@ -1017,25 +1030,30 @@ class FunctionCall(BaseModel):
1017
1030
  if entrypoint_args.get("session_state") is not None:
1018
1031
  updated_session_state = entrypoint_args.get("session_state")
1019
1032
 
1033
+ execution_result = FunctionExecutionResult(
1034
+ status="success", result=self.result, updated_session_state=updated_session_state
1035
+ )
1036
+
1020
1037
  except AgentRunException as e:
1021
1038
  log_debug(f"{e.__class__.__name__}: {e}")
1022
1039
  self.error = str(e)
1023
- raise
1040
+ exception_to_raise = e
1024
1041
  except Exception as e:
1025
1042
  log_warning(f"Could not run function {self.get_call_str()}")
1026
1043
  log_exception(e)
1027
1044
  self.error = str(e)
1028
- return FunctionExecutionResult(status="failure", error=str(e))
1045
+ execution_result = FunctionExecutionResult(status="failure", error=str(e))
1029
1046
 
1030
- # Execute post-hook if it exists
1031
- if iscoroutinefunction(self.function.post_hook):
1032
- await self._handle_post_hook_async()
1033
- else:
1034
- self._handle_post_hook()
1047
+ finally:
1048
+ if iscoroutinefunction(self.function.post_hook):
1049
+ await self._handle_post_hook_async()
1050
+ else:
1051
+ self._handle_post_hook()
1035
1052
 
1036
- return FunctionExecutionResult(
1037
- status="success", result=self.result, updated_session_state=updated_session_state
1038
- )
1053
+ if exception_to_raise is not None:
1054
+ raise exception_to_raise
1055
+
1056
+ return execution_result # type: ignore[return-value]
1039
1057
 
1040
1058
 
1041
1059
  class ToolResult(BaseModel):