nvidia-nat 1.3.dev0__py3-none-any.whl → 1.3.0rc1__py3-none-any.whl

This diff represents the content of publicly available package versions that have been released to one of the supported registries. The information contained in this diff is provided for informational purposes only and reflects changes between package versions as they appear in their respective public registries.
Files changed (246) hide show
  1. aiq/__init__.py +66 -0
  2. nat/agent/base.py +40 -14
  3. nat/agent/dual_node.py +9 -4
  4. nat/agent/prompt_optimizer/prompt.py +68 -0
  5. nat/agent/prompt_optimizer/register.py +149 -0
  6. nat/agent/react_agent/agent.py +96 -57
  7. nat/agent/react_agent/prompt.py +4 -1
  8. nat/agent/react_agent/register.py +41 -21
  9. nat/agent/reasoning_agent/reasoning_agent.py +11 -9
  10. nat/agent/register.py +1 -1
  11. nat/agent/rewoo_agent/agent.py +332 -150
  12. nat/agent/rewoo_agent/prompt.py +22 -22
  13. nat/agent/rewoo_agent/register.py +49 -28
  14. nat/agent/tool_calling_agent/agent.py +156 -29
  15. nat/agent/tool_calling_agent/register.py +57 -38
  16. nat/authentication/api_key/api_key_auth_provider.py +2 -2
  17. nat/authentication/credential_validator/bearer_token_validator.py +557 -0
  18. nat/authentication/http_basic_auth/http_basic_auth_provider.py +1 -1
  19. nat/authentication/interfaces.py +5 -2
  20. nat/authentication/oauth2/oauth2_auth_code_flow_provider.py +40 -20
  21. nat/authentication/oauth2/oauth2_resource_server_config.py +124 -0
  22. nat/authentication/register.py +0 -1
  23. nat/builder/builder.py +56 -24
  24. nat/builder/component_utils.py +9 -5
  25. nat/builder/context.py +46 -11
  26. nat/builder/eval_builder.py +16 -11
  27. nat/builder/framework_enum.py +1 -0
  28. nat/builder/front_end.py +1 -1
  29. nat/builder/function.py +378 -8
  30. nat/builder/function_base.py +3 -3
  31. nat/builder/function_info.py +6 -8
  32. nat/builder/user_interaction_manager.py +2 -2
  33. nat/builder/workflow.py +13 -1
  34. nat/builder/workflow_builder.py +281 -76
  35. nat/cli/cli_utils/config_override.py +2 -2
  36. nat/cli/commands/evaluate.py +1 -1
  37. nat/cli/commands/info/info.py +16 -6
  38. nat/cli/commands/info/list_channels.py +1 -1
  39. nat/cli/commands/info/list_components.py +7 -8
  40. nat/cli/commands/mcp/__init__.py +14 -0
  41. nat/cli/commands/mcp/mcp.py +986 -0
  42. nat/cli/commands/object_store/__init__.py +14 -0
  43. nat/cli/commands/object_store/object_store.py +227 -0
  44. nat/cli/commands/optimize.py +90 -0
  45. nat/cli/commands/registry/publish.py +2 -2
  46. nat/cli/commands/registry/pull.py +2 -2
  47. nat/cli/commands/registry/remove.py +2 -2
  48. nat/cli/commands/registry/search.py +15 -17
  49. nat/cli/commands/start.py +16 -5
  50. nat/cli/commands/uninstall.py +1 -1
  51. nat/cli/commands/workflow/templates/config.yml.j2 +0 -1
  52. nat/cli/commands/workflow/templates/pyproject.toml.j2 +4 -1
  53. nat/cli/commands/workflow/templates/register.py.j2 +0 -1
  54. nat/cli/commands/workflow/workflow_commands.py +9 -13
  55. nat/cli/entrypoint.py +8 -10
  56. nat/cli/register_workflow.py +38 -4
  57. nat/cli/type_registry.py +79 -10
  58. nat/control_flow/__init__.py +0 -0
  59. nat/control_flow/register.py +20 -0
  60. nat/control_flow/router_agent/__init__.py +0 -0
  61. nat/control_flow/router_agent/agent.py +329 -0
  62. nat/control_flow/router_agent/prompt.py +48 -0
  63. nat/control_flow/router_agent/register.py +91 -0
  64. nat/control_flow/sequential_executor.py +166 -0
  65. nat/data_models/agent.py +34 -0
  66. nat/data_models/api_server.py +10 -10
  67. nat/data_models/authentication.py +23 -9
  68. nat/data_models/common.py +1 -1
  69. nat/data_models/component.py +2 -0
  70. nat/data_models/component_ref.py +11 -0
  71. nat/data_models/config.py +41 -17
  72. nat/data_models/dataset_handler.py +1 -1
  73. nat/data_models/discovery_metadata.py +4 -4
  74. nat/data_models/evaluate.py +4 -1
  75. nat/data_models/function.py +34 -0
  76. nat/data_models/function_dependencies.py +14 -6
  77. nat/data_models/gated_field_mixin.py +242 -0
  78. nat/data_models/intermediate_step.py +3 -3
  79. nat/data_models/optimizable.py +119 -0
  80. nat/data_models/optimizer.py +149 -0
  81. nat/data_models/swe_bench_model.py +1 -1
  82. nat/data_models/temperature_mixin.py +44 -0
  83. nat/data_models/thinking_mixin.py +86 -0
  84. nat/data_models/top_p_mixin.py +44 -0
  85. nat/embedder/azure_openai_embedder.py +46 -0
  86. nat/embedder/nim_embedder.py +1 -1
  87. nat/embedder/openai_embedder.py +2 -3
  88. nat/embedder/register.py +1 -1
  89. nat/eval/config.py +3 -1
  90. nat/eval/dataset_handler/dataset_handler.py +71 -7
  91. nat/eval/evaluate.py +86 -31
  92. nat/eval/evaluator/base_evaluator.py +1 -1
  93. nat/eval/evaluator/evaluator_model.py +13 -0
  94. nat/eval/intermediate_step_adapter.py +1 -1
  95. nat/eval/rag_evaluator/evaluate.py +2 -2
  96. nat/eval/rag_evaluator/register.py +3 -3
  97. nat/eval/register.py +4 -1
  98. nat/eval/remote_workflow.py +3 -3
  99. nat/eval/runtime_evaluator/__init__.py +14 -0
  100. nat/eval/runtime_evaluator/evaluate.py +123 -0
  101. nat/eval/runtime_evaluator/register.py +100 -0
  102. nat/eval/swe_bench_evaluator/evaluate.py +6 -6
  103. nat/eval/trajectory_evaluator/evaluate.py +1 -1
  104. nat/eval/trajectory_evaluator/register.py +1 -1
  105. nat/eval/tunable_rag_evaluator/evaluate.py +4 -7
  106. nat/eval/utils/eval_trace_ctx.py +89 -0
  107. nat/eval/utils/weave_eval.py +18 -9
  108. nat/experimental/decorators/experimental_warning_decorator.py +27 -7
  109. nat/experimental/test_time_compute/functions/plan_select_execute_function.py +7 -3
  110. nat/experimental/test_time_compute/functions/ttc_tool_orchestration_function.py +3 -3
  111. nat/experimental/test_time_compute/functions/ttc_tool_wrapper_function.py +1 -1
  112. nat/experimental/test_time_compute/models/strategy_base.py +5 -4
  113. nat/experimental/test_time_compute/register.py +0 -1
  114. nat/experimental/test_time_compute/selection/llm_based_output_merging_selector.py +1 -3
  115. nat/front_ends/console/authentication_flow_handler.py +82 -30
  116. nat/front_ends/console/console_front_end_plugin.py +8 -5
  117. nat/front_ends/fastapi/auth_flow_handlers/websocket_flow_handler.py +52 -17
  118. nat/front_ends/fastapi/dask_client_mixin.py +65 -0
  119. nat/front_ends/fastapi/fastapi_front_end_config.py +36 -5
  120. nat/front_ends/fastapi/fastapi_front_end_controller.py +4 -4
  121. nat/front_ends/fastapi/fastapi_front_end_plugin.py +135 -4
  122. nat/front_ends/fastapi/fastapi_front_end_plugin_worker.py +481 -281
  123. nat/front_ends/fastapi/job_store.py +518 -99
  124. nat/front_ends/fastapi/main.py +11 -19
  125. nat/front_ends/fastapi/message_handler.py +13 -14
  126. nat/front_ends/fastapi/message_validator.py +17 -19
  127. nat/front_ends/fastapi/response_helpers.py +4 -4
  128. nat/front_ends/fastapi/step_adaptor.py +2 -2
  129. nat/front_ends/fastapi/utils.py +57 -0
  130. nat/front_ends/mcp/introspection_token_verifier.py +73 -0
  131. nat/front_ends/mcp/mcp_front_end_config.py +10 -1
  132. nat/front_ends/mcp/mcp_front_end_plugin.py +45 -13
  133. nat/front_ends/mcp/mcp_front_end_plugin_worker.py +116 -8
  134. nat/front_ends/mcp/tool_converter.py +44 -14
  135. nat/front_ends/register.py +0 -1
  136. nat/front_ends/simple_base/simple_front_end_plugin_base.py +3 -1
  137. nat/llm/aws_bedrock_llm.py +24 -12
  138. nat/llm/azure_openai_llm.py +57 -0
  139. nat/llm/litellm_llm.py +69 -0
  140. nat/llm/nim_llm.py +20 -8
  141. nat/llm/openai_llm.py +14 -6
  142. nat/llm/register.py +5 -1
  143. nat/llm/utils/env_config_value.py +2 -3
  144. nat/llm/utils/thinking.py +215 -0
  145. nat/meta/pypi.md +9 -9
  146. nat/object_store/models.py +2 -0
  147. nat/object_store/register.py +0 -1
  148. nat/observability/exporter/base_exporter.py +3 -3
  149. nat/observability/exporter/file_exporter.py +1 -1
  150. nat/observability/exporter/processing_exporter.py +309 -81
  151. nat/observability/exporter/span_exporter.py +1 -1
  152. nat/observability/exporter_manager.py +7 -7
  153. nat/observability/mixin/file_mixin.py +7 -7
  154. nat/observability/mixin/redaction_config_mixin.py +42 -0
  155. nat/observability/mixin/tagging_config_mixin.py +62 -0
  156. nat/observability/mixin/type_introspection_mixin.py +420 -107
  157. nat/observability/processor/batching_processor.py +5 -7
  158. nat/observability/processor/falsy_batch_filter_processor.py +55 -0
  159. nat/observability/processor/processor.py +3 -0
  160. nat/observability/processor/processor_factory.py +70 -0
  161. nat/observability/processor/redaction/__init__.py +24 -0
  162. nat/observability/processor/redaction/contextual_redaction_processor.py +125 -0
  163. nat/observability/processor/redaction/contextual_span_redaction_processor.py +66 -0
  164. nat/observability/processor/redaction/redaction_processor.py +177 -0
  165. nat/observability/processor/redaction/span_header_redaction_processor.py +92 -0
  166. nat/observability/processor/span_tagging_processor.py +68 -0
  167. nat/observability/register.py +6 -4
  168. nat/profiler/calc/calc_runner.py +3 -4
  169. nat/profiler/callbacks/agno_callback_handler.py +1 -1
  170. nat/profiler/callbacks/langchain_callback_handler.py +14 -7
  171. nat/profiler/callbacks/llama_index_callback_handler.py +3 -3
  172. nat/profiler/callbacks/semantic_kernel_callback_handler.py +3 -3
  173. nat/profiler/data_frame_row.py +1 -1
  174. nat/profiler/decorators/framework_wrapper.py +62 -13
  175. nat/profiler/decorators/function_tracking.py +160 -3
  176. nat/profiler/forecasting/models/forecasting_base_model.py +3 -1
  177. nat/profiler/inference_optimization/bottleneck_analysis/simple_stack_analysis.py +1 -1
  178. nat/profiler/inference_optimization/data_models.py +3 -3
  179. nat/profiler/inference_optimization/experimental/prefix_span_analysis.py +7 -8
  180. nat/profiler/inference_optimization/token_uniqueness.py +1 -1
  181. nat/profiler/parameter_optimization/__init__.py +0 -0
  182. nat/profiler/parameter_optimization/optimizable_utils.py +93 -0
  183. nat/profiler/parameter_optimization/optimizer_runtime.py +67 -0
  184. nat/profiler/parameter_optimization/parameter_optimizer.py +153 -0
  185. nat/profiler/parameter_optimization/parameter_selection.py +107 -0
  186. nat/profiler/parameter_optimization/pareto_visualizer.py +380 -0
  187. nat/profiler/parameter_optimization/prompt_optimizer.py +384 -0
  188. nat/profiler/parameter_optimization/update_helpers.py +66 -0
  189. nat/profiler/profile_runner.py +14 -9
  190. nat/profiler/utils.py +4 -2
  191. nat/registry_handlers/local/local_handler.py +2 -2
  192. nat/registry_handlers/package_utils.py +1 -2
  193. nat/registry_handlers/pypi/pypi_handler.py +23 -26
  194. nat/registry_handlers/register.py +3 -4
  195. nat/registry_handlers/rest/rest_handler.py +12 -13
  196. nat/retriever/milvus/retriever.py +2 -2
  197. nat/retriever/nemo_retriever/retriever.py +1 -1
  198. nat/retriever/register.py +0 -1
  199. nat/runtime/loader.py +2 -2
  200. nat/runtime/runner.py +3 -2
  201. nat/runtime/session.py +43 -8
  202. nat/settings/global_settings.py +16 -5
  203. nat/tool/chat_completion.py +5 -2
  204. nat/tool/code_execution/local_sandbox/local_sandbox_server.py +3 -3
  205. nat/tool/datetime_tools.py +49 -9
  206. nat/tool/document_search.py +2 -2
  207. nat/tool/github_tools.py +450 -0
  208. nat/tool/nvidia_rag.py +1 -1
  209. nat/tool/register.py +2 -9
  210. nat/tool/retriever.py +3 -2
  211. nat/utils/callable_utils.py +70 -0
  212. nat/utils/data_models/schema_validator.py +3 -3
  213. nat/utils/exception_handlers/automatic_retries.py +104 -51
  214. nat/utils/exception_handlers/schemas.py +1 -1
  215. nat/utils/io/yaml_tools.py +2 -2
  216. nat/utils/log_levels.py +25 -0
  217. nat/utils/reactive/base/observable_base.py +2 -2
  218. nat/utils/reactive/base/observer_base.py +1 -1
  219. nat/utils/reactive/observable.py +2 -2
  220. nat/utils/reactive/observer.py +4 -4
  221. nat/utils/reactive/subscription.py +1 -1
  222. nat/utils/settings/global_settings.py +6 -8
  223. nat/utils/type_converter.py +4 -3
  224. nat/utils/type_utils.py +9 -5
  225. {nvidia_nat-1.3.dev0.dist-info → nvidia_nat-1.3.0rc1.dist-info}/METADATA +49 -21
  226. {nvidia_nat-1.3.dev0.dist-info → nvidia_nat-1.3.0rc1.dist-info}/RECORD +233 -189
  227. {nvidia_nat-1.3.dev0.dist-info → nvidia_nat-1.3.0rc1.dist-info}/entry_points.txt +1 -0
  228. nvidia_nat-1.3.0rc1.dist-info/licenses/LICENSE-3rd-party.txt +5478 -0
  229. {nvidia_nat-1.3.dev0.dist-info → nvidia_nat-1.3.0rc1.dist-info}/top_level.txt +1 -0
  230. nat/cli/commands/info/list_mcp.py +0 -304
  231. nat/tool/github_tools/create_github_commit.py +0 -133
  232. nat/tool/github_tools/create_github_issue.py +0 -87
  233. nat/tool/github_tools/create_github_pr.py +0 -106
  234. nat/tool/github_tools/get_github_file.py +0 -106
  235. nat/tool/github_tools/get_github_issue.py +0 -166
  236. nat/tool/github_tools/get_github_pr.py +0 -256
  237. nat/tool/github_tools/update_github_issue.py +0 -100
  238. nat/tool/mcp/exceptions.py +0 -142
  239. nat/tool/mcp/mcp_client.py +0 -255
  240. nat/tool/mcp/mcp_tool.py +0 -96
  241. nat/utils/exception_handlers/mcp.py +0 -211
  242. nvidia_nat-1.3.dev0.dist-info/licenses/LICENSE-3rd-party.txt +0 -3686
  243. /nat/{tool/github_tools → agent/prompt_optimizer}/__init__.py +0 -0
  244. /nat/{tool/mcp → authentication/credential_validator}/__init__.py +0 -0
  245. {nvidia_nat-1.3.dev0.dist-info → nvidia_nat-1.3.0rc1.dist-info}/WHEEL +0 -0
  246. {nvidia_nat-1.3.dev0.dist-info → nvidia_nat-1.3.0rc1.dist-info}/licenses/LICENSE.md +0 -0
@@ -18,33 +18,29 @@ For the following task, make plans that can solve the problem step by step. For
18
18
  which external tool together with tool input to retrieve evidence. You can store the evidence into a \
19
19
  placeholder #E that can be called by later tools. (Plan, #E1, Plan, #E2, Plan, ...)
20
20
 
21
- You may ask the human to the following tools:
21
+ The following tools and respective requirements are available to you:
22
22
 
23
23
  {tools}
24
24
 
25
- The tools should be one of the following: [{tool_names}]
25
+ The tool calls you make should be one of the following: [{tool_names}]
26
26
 
27
27
  You are not required to use all the tools listed. Choose only the ones that best fit the needs of each plan step.
28
28
 
29
- Your output must be a JSON array where each element represents one planning step. Each step must be an object with
30
-
29
+ Your output must be a JSON array where each element represents one planning step. Each step must be an object with \
31
30
  exactly two keys:
32
31
 
33
32
  1. "plan": A string that describes in detail the action or reasoning for that step.
34
33
 
35
- 2. "evidence": An object representing the external tool call associated with that plan step. This object must have the
34
+ 2. "evidence": An object representing the external tool call associated with that plan step. This object must have the \
36
35
  following keys:
37
36
 
38
- -"placeholder": A string that identifies the evidence placeholder (e.g., "#E1", "#E2", etc.). The numbering should
39
- be sequential based on the order of steps.
37
+ -"placeholder": A string that identifies the evidence placeholder ("#E1", "#E2", ...). The numbering should \
38
+ be sequential based on the order of steps.
40
39
 
41
40
  -"tool": A string specifying the name of the external tool used.
42
41
 
43
- -"tool_input": The input to the tool. This can be a string, array, or object, depending on the requirements of the
44
- tool.
45
-
46
- Do not include any additional keys or characters in your output, and do not wrap your response with markdown formatting.
47
- Your output must be strictly valid JSON.
42
+ -"tool_input": The input to the tool. This can be a string, array, or object, depending on the requirements of the \
43
+ tool. Be careful about type assumptions because the output of former tools might contain noise.
48
44
 
49
45
  Important instructions:
50
46
 
@@ -58,27 +54,28 @@ Here is an example of how a valid JSON output should look:
58
54
 
59
55
  [
60
56
  \'{{
61
- "plan": "Calculate the result of 2023 minus 25.",
57
+ "plan": "Find Alex's schedule on Sep 25, 2025",
62
58
  "evidence": \'{{
63
59
  "placeholder": "#E1",
64
- "tool": "calculator_subtract",
65
- "tool_input": [2023, 25]
60
+ "tool": "search_calendar",
61
+ "tool_input": ("Alex", "09/25/2025")
66
62
  }}\'
67
63
  }}\',
68
64
  \'{{
69
- "plan": "Retrieve the year represented by the result stored in #E1.",
65
+ "plan": "Find Bill's schedule on sep 25, 2025",
70
66
  "evidence": \'{{
71
67
  "placeholder": "#E2",
72
- "tool": "haystack_chitchat_agent",
73
- "tool_input": "Response with the result number contained in #E1"
68
+ "tool": "search_calendar",
69
+ "tool_input": ("Bill", "09/25/2025")
74
70
  }}\'
75
71
  }}\',
76
72
  \'{{
77
- "plan": "Search for the CEO of Golden State Warriors in the year stored in #E2.",
73
+ "plan": "Suggest a time for 1-hour meeting given Alex's and Bill's schedule.",
78
74
  "evidence": \'{{
79
75
  "placeholder": "#E3",
80
- "tool": "internet_search",
81
- "tool_input": "Who was the CEO of Golden State Warriors in the year #E2?"
76
+ "tool": "llm_chat",
77
+ "tool_input": "Find a common 1-hour time slot for Alex and Bill given their schedules. \
78
+ Alex's schedule: #E1; Bill's schedule: #E2?"
82
79
  }}\'
83
80
  }}\'
84
81
  ]
@@ -87,11 +84,14 @@ Begin!
87
84
  """
88
85
 
89
86
  PLANNER_USER_PROMPT = """
87
+ Previous conversation history:
88
+ {chat_history}
89
+
90
90
  task: {task}
91
91
  """
92
92
 
93
93
  SOLVER_SYSTEM_PROMPT = """
94
- Solve the following task or problem. To solve the problem, we have made step-by-step Plan and \
94
+ Solve the following task or problem. To solve the problem, we have made some Plans ahead and \
95
95
  retrieved corresponding Evidence to each Plan. Use them with caution since long evidence might \
96
96
  contain irrelevant information.
97
97
 
@@ -17,40 +17,41 @@ import logging
17
17
 
18
18
  from pydantic import AliasChoices
19
19
  from pydantic import Field
20
+ from pydantic import PositiveInt
20
21
 
21
22
  from nat.builder.builder import Builder
22
23
  from nat.builder.framework_enum import LLMFrameworkEnum
23
24
  from nat.builder.function_info import FunctionInfo
24
25
  from nat.cli.register_workflow import register_function
26
+ from nat.data_models.agent import AgentBaseConfig
25
27
  from nat.data_models.api_server import ChatRequest
26
28
  from nat.data_models.api_server import ChatResponse
29
+ from nat.data_models.component_ref import FunctionGroupRef
27
30
  from nat.data_models.component_ref import FunctionRef
28
- from nat.data_models.component_ref import LLMRef
29
- from nat.data_models.function import FunctionBaseConfig
30
31
  from nat.utils.type_converter import GlobalTypeConverter
31
32
 
32
33
  logger = logging.getLogger(__name__)
33
34
 
34
35
 
35
- class ReWOOAgentWorkflowConfig(FunctionBaseConfig, name="rewoo_agent"):
36
+ class ReWOOAgentWorkflowConfig(AgentBaseConfig, name="rewoo_agent"):
36
37
  """
37
38
  Defines a NAT function that uses a ReWOO Agent performs reasoning inbetween tool calls, and utilizes the
38
39
  tool names and descriptions to select the optimal tool.
39
40
  """
40
-
41
- tool_names: list[FunctionRef] = Field(default_factory=list,
42
- description="The list of tools to provide to the rewoo agent.")
43
- llm_name: LLMRef = Field(description="The LLM model to use with the rewoo agent.")
44
- verbose: bool = Field(default=False, description="Set the verbosity of the rewoo agent's logging.")
41
+ description: str = Field(default="ReWOO Agent Workflow", description="The description of this functions use.")
42
+ tool_names: list[FunctionRef | FunctionGroupRef] = Field(
43
+ default_factory=list, description="The list of tools to provide to the rewoo agent.")
45
44
  include_tool_input_schema_in_tool_description: bool = Field(
46
45
  default=True, description="Specify inclusion of tool input schemas in the prompt.")
47
- description: str = Field(default="ReWOO Agent Workflow", description="The description of this functions use.")
48
46
  planner_prompt: str | None = Field(
49
47
  default=None,
50
48
  description="Provides the PLANNER_PROMPT to use with the agent") # defaults to PLANNER_PROMPT in prompt.py
51
49
  solver_prompt: str | None = Field(
52
50
  default=None,
53
51
  description="Provides the SOLVER_PROMPT to use with the agent") # defaults to SOLVER_PROMPT in prompt.py
52
+ tool_call_max_retries: PositiveInt = Field(default=3,
53
+ description="The number of retries before raising a tool call error.",
54
+ ge=1)
54
55
  max_history: int = Field(default=15, description="Maximum number of messages to keep in the conversation history.")
55
56
  use_openai_api: bool = Field(default=False,
56
57
  description=("Use OpenAI API for the input/output types to the function. "
@@ -62,15 +63,19 @@ class ReWOOAgentWorkflowConfig(FunctionBaseConfig, name="rewoo_agent"):
62
63
  additional_solver_instructions: str | None = Field(
63
64
  default=None,
64
65
  description="Additional instructions to provide to the agent in addition to the base solver prompt.")
66
+ raise_tool_call_error: bool = Field(default=True,
67
+ description="Whether to raise a exception immediately if a tool"
68
+ "call fails. If set to False, the tool call error message will be included in"
69
+ "the tool response and passed to the next tool.")
65
70
 
66
71
 
67
72
  @register_function(config_type=ReWOOAgentWorkflowConfig, framework_wrappers=[LLMFrameworkEnum.LANGCHAIN])
68
73
  async def rewoo_agent_workflow(config: ReWOOAgentWorkflowConfig, builder: Builder):
69
- from langchain.schema import BaseMessage
70
74
  from langchain_core.messages import trim_messages
75
+ from langchain_core.messages.base import BaseMessage
71
76
  from langchain_core.messages.human import HumanMessage
72
77
  from langchain_core.prompts import ChatPromptTemplate
73
- from langgraph.graph.graph import CompiledGraph
78
+ from langgraph.graph.state import CompiledStateGraph
74
79
 
75
80
  from nat.agent.rewoo_agent.prompt import PLANNER_SYSTEM_PROMPT
76
81
  from nat.agent.rewoo_agent.prompt import PLANNER_USER_PROMPT
@@ -86,7 +91,7 @@ async def rewoo_agent_workflow(config: ReWOOAgentWorkflowConfig, builder: Builde
86
91
  if config.additional_planner_instructions:
87
92
  planner_system_prompt += f"{config.additional_planner_instructions}"
88
93
  if not ReWOOAgentGraph.validate_planner_prompt(planner_system_prompt):
89
- logger.exception("Invalid planner prompt")
94
+ logger.error("Invalid planner prompt")
90
95
  raise ValueError("Invalid planner prompt")
91
96
  planner_prompt = ChatPromptTemplate([("system", planner_system_prompt), ("user", PLANNER_USER_PROMPT)])
92
97
 
@@ -94,7 +99,7 @@ async def rewoo_agent_workflow(config: ReWOOAgentWorkflowConfig, builder: Builde
94
99
  if config.additional_solver_instructions:
95
100
  solver_system_prompt += f"{config.additional_solver_instructions}"
96
101
  if not ReWOOAgentGraph.validate_solver_prompt(solver_system_prompt):
97
- logger.exception("Invalid solver prompt")
102
+ logger.error("Invalid solver prompt")
98
103
  raise ValueError("Invalid solver prompt")
99
104
  solver_prompt = ChatPromptTemplate([("system", solver_system_prompt), ("user", SOLVER_USER_PROMPT)])
100
105
 
@@ -103,19 +108,34 @@ async def rewoo_agent_workflow(config: ReWOOAgentWorkflowConfig, builder: Builde
103
108
 
104
109
  # the agent can run any installed tool, simply install the tool and add it to the config file
105
110
  # the sample tool provided can easily be copied or changed
106
- tools = builder.get_tools(tool_names=config.tool_names, wrapper_type=LLMFrameworkEnum.LANGCHAIN)
111
+ tools = await builder.get_tools(tool_names=config.tool_names, wrapper_type=LLMFrameworkEnum.LANGCHAIN)
107
112
  if not tools:
108
113
  raise ValueError(f"No tools specified for ReWOO Agent '{config.llm_name}'")
109
114
 
110
115
  # construct the ReWOO Agent Graph from the configured llm, prompt, and tools
111
- graph: CompiledGraph = await ReWOOAgentGraph(llm=llm,
112
- planner_prompt=planner_prompt,
113
- solver_prompt=solver_prompt,
114
- tools=tools,
115
- use_tool_schema=config.include_tool_input_schema_in_tool_description,
116
- detailed_logs=config.verbose).build_graph()
116
+ graph: CompiledStateGraph = await ReWOOAgentGraph(
117
+ llm=llm,
118
+ planner_prompt=planner_prompt,
119
+ solver_prompt=solver_prompt,
120
+ tools=tools,
121
+ use_tool_schema=config.include_tool_input_schema_in_tool_description,
122
+ detailed_logs=config.verbose,
123
+ log_response_max_chars=config.log_response_max_chars,
124
+ tool_call_max_retries=config.tool_call_max_retries,
125
+ raise_tool_call_error=config.raise_tool_call_error).build_graph()
117
126
 
118
127
  async def _response_fn(input_message: ChatRequest) -> ChatResponse:
128
+ """
129
+ Main workflow entry function for the ReWOO Agent.
130
+
131
+ This function invokes the ReWOO Agent Graph and returns the response.
132
+
133
+ Args:
134
+ input_message (ChatRequest): The input message to process
135
+
136
+ Returns:
137
+ ChatResponse: The response from the agent or error message
138
+ """
119
139
  try:
120
140
  # initialize the starting state with the user query
121
141
  messages: list[BaseMessage] = trim_messages(messages=[m.model_dump() for m in input_message.messages],
@@ -124,23 +144,24 @@ async def rewoo_agent_workflow(config: ReWOOAgentWorkflowConfig, builder: Builde
124
144
  token_counter=len,
125
145
  start_on="human",
126
146
  include_system=True)
127
- task = HumanMessage(content=messages[0].content)
128
- state = ReWOOGraphState(task=task)
147
+
148
+ task = HumanMessage(content=messages[-1].content)
149
+ state = ReWOOGraphState(messages=messages, task=task)
129
150
 
130
151
  # run the ReWOO Agent Graph
131
152
  state = await graph.ainvoke(state)
132
153
 
133
154
  # get and return the output from the state
134
155
  state = ReWOOGraphState(**state)
135
- output_message = state.result.content # pylint: disable=E1101
156
+ output_message = state.result.content
157
+ # Ensure output_message is a string
158
+ if isinstance(output_message, list | dict):
159
+ output_message = str(output_message)
136
160
  return ChatResponse.from_string(output_message)
137
161
 
138
162
  except Exception as ex:
139
- logger.exception("ReWOO Agent failed with exception: %s", ex, exc_info=ex)
140
- # here, we can implement custom error messages
141
- if config.verbose:
142
- return ChatResponse.from_string(str(ex))
143
- return ChatResponse.from_string("I seem to be having a problem.")
163
+ logger.exception("ReWOO Agent failed with exception: %s", ex)
164
+ raise RuntimeError
144
165
 
145
166
  if (config.use_openai_api):
146
167
  yield FunctionInfo.from_fn(_response_fn, description=config.description)
@@ -13,14 +13,19 @@
13
13
  # See the License for the specific language governing permissions and
14
14
  # limitations under the License.
15
15
 
16
- # pylint: disable=R0917
17
16
  import logging
17
+ import typing
18
18
 
19
19
  from langchain_core.callbacks.base import AsyncCallbackHandler
20
20
  from langchain_core.language_models import BaseChatModel
21
+ from langchain_core.messages import SystemMessage
22
+ from langchain_core.messages import ToolMessage
21
23
  from langchain_core.messages.base import BaseMessage
22
- from langchain_core.runnables import RunnableConfig
24
+ from langchain_core.runnables import RunnableLambda
25
+ from langchain_core.runnables.config import RunnableConfig
23
26
  from langchain_core.tools import BaseTool
27
+ from langgraph.graph import StateGraph
28
+ from langgraph.graph.state import CompiledStateGraph
24
29
  from langgraph.prebuilt import ToolNode
25
30
  from pydantic import BaseModel
26
31
  from pydantic import Field
@@ -30,6 +35,9 @@ from nat.agent.base import AGENT_LOG_PREFIX
30
35
  from nat.agent.base import AgentDecision
31
36
  from nat.agent.dual_node import DualNodeAgent
32
37
 
38
+ if typing.TYPE_CHECKING:
39
+ from nat.agent.tool_calling_agent.register import ToolCallAgentWorkflowConfig
40
+
33
41
  logger = logging.getLogger(__name__)
34
42
 
35
43
 
@@ -43,22 +51,58 @@ class ToolCallAgentGraph(DualNodeAgent):
43
51
  A tool Calling Agent utilizes the tool input parameters to select the optimal tool. Supports handling tool errors.
44
52
  Argument "detailed_logs" toggles logging of inputs, outputs, and intermediate steps."""
45
53
 
46
- def __init__(self,
47
- llm: BaseChatModel,
48
- tools: list[BaseTool],
49
- callbacks: list[AsyncCallbackHandler] = None,
50
- detailed_logs: bool = False,
51
- handle_tool_errors: bool = True):
52
- super().__init__(llm=llm, tools=tools, callbacks=callbacks, detailed_logs=detailed_logs)
54
+ def __init__(
55
+ self,
56
+ llm: BaseChatModel,
57
+ tools: list[BaseTool],
58
+ prompt: str | None = None,
59
+ callbacks: list[AsyncCallbackHandler] = None,
60
+ detailed_logs: bool = False,
61
+ log_response_max_chars: int = 1000,
62
+ handle_tool_errors: bool = True,
63
+ return_direct: list[BaseTool] | None = None,
64
+ ):
65
+ super().__init__(llm=llm,
66
+ tools=tools,
67
+ callbacks=callbacks,
68
+ detailed_logs=detailed_logs,
69
+ log_response_max_chars=log_response_max_chars)
70
+
71
+ # some LLMs support tool calling
72
+ # these models accept the tool's input schema and decide when to use a tool based on the input's relevance
73
+ try:
74
+ # in tool calling agents, we bind the tools to the LLM, to pass the tools' input schemas at runtime
75
+ self.bound_llm = llm.bind_tools(tools)
76
+ except NotImplementedError as ex:
77
+ logger.error("%s Failed to bind tools: %s", AGENT_LOG_PREFIX, ex)
78
+ raise
79
+
80
+ if prompt is not None:
81
+ system_prompt = SystemMessage(content=prompt)
82
+ prompt_runnable = RunnableLambda(
83
+ lambda state: [system_prompt] + state.get("messages", []),
84
+ name="SystemPrompt",
85
+ )
86
+ else:
87
+ prompt_runnable = RunnableLambda(
88
+ lambda state: state.get("messages", []),
89
+ name="PromptPassthrough",
90
+ )
91
+
92
+ self.agent = prompt_runnable | self.bound_llm
53
93
  self.tool_caller = ToolNode(tools, handle_tool_errors=handle_tool_errors)
94
+ self.return_direct = [tool.name for tool in return_direct] if return_direct else []
54
95
  logger.debug("%s Initialized Tool Calling Agent Graph", AGENT_LOG_PREFIX)
55
96
 
56
97
  async def agent_node(self, state: ToolCallAgentGraphState):
57
98
  try:
58
- logger.debug('%s Starting the Tool Calling Agent Node', AGENT_LOG_PREFIX)
99
+ logger.debug("%s Starting the Tool Calling Agent Node", AGENT_LOG_PREFIX)
59
100
  if len(state.messages) == 0:
60
101
  raise RuntimeError('No input received in state: "messages"')
61
- response = await self.llm.ainvoke(state.messages, config=RunnableConfig(callbacks=self.callbacks))
102
+ response = await self.agent.ainvoke(
103
+ {"messages": state.messages},
104
+ config=RunnableConfig(callbacks=self.callbacks),
105
+ )
62
106
  if self.detailed_logs:
63
107
  agent_input = "\n".join(str(message.content) for message in state.messages)
64
108
  logger.info(AGENT_CALL_LOG_MESSAGE, agent_input, response)
@@ -66,8 +110,8 @@ class ToolCallAgentGraph(DualNodeAgent):
66
110
  state.messages += [response]
67
111
  return state
68
112
  except Exception as ex:
69
- logger.exception("%s Failed to call agent_node: %s", AGENT_LOG_PREFIX, ex, exc_info=True)
70
- raise ex
113
+ logger.error("%s Failed to call agent_node: %s", AGENT_LOG_PREFIX, ex)
114
+ raise
71
115
 
72
116
  async def conditional_edge(self, state: ToolCallAgentGraphState):
73
117
  try:
@@ -75,16 +119,13 @@ class ToolCallAgentGraph(DualNodeAgent):
75
119
  last_message = state.messages[-1]
76
120
  if last_message.tool_calls:
77
121
  # the agent wants to call a tool
78
- logger.debug('%s Agent is calling a tool', AGENT_LOG_PREFIX)
122
+ logger.debug("%s Agent is calling a tool", AGENT_LOG_PREFIX)
79
123
  return AgentDecision.TOOL
80
124
  if self.detailed_logs:
81
125
  logger.debug("%s Final answer:\n%s", AGENT_LOG_PREFIX, state.messages[-1].content)
82
126
  return AgentDecision.END
83
127
  except Exception as ex:
84
- logger.exception("%s Failed to determine whether agent is calling a tool: %s",
85
- AGENT_LOG_PREFIX,
86
- ex,
87
- exc_info=True)
128
+ logger.exception("%s Failed to determine whether agent is calling a tool: %s", AGENT_LOG_PREFIX, ex)
88
129
  logger.warning("%s Ending graph traversal", AGENT_LOG_PREFIX)
89
130
  return AgentDecision.END
90
131
 
@@ -92,28 +133,114 @@ class ToolCallAgentGraph(DualNodeAgent):
92
133
  try:
93
134
  logger.debug("%s Starting Tool Node", AGENT_LOG_PREFIX)
94
135
  tool_calls = state.messages[-1].tool_calls
95
- tools = [tool.get('name') for tool in tool_calls]
136
+ tools = [tool.get("name") for tool in tool_calls]
96
137
  tool_input = state.messages[-1]
97
- tool_response = await self.tool_caller.ainvoke(input={"messages": [tool_input]},
98
- config=RunnableConfig(callbacks=self.callbacks,
99
- configurable={}))
138
+ tool_response = await self.tool_caller.ainvoke(
139
+ input={"messages": [tool_input]},
140
+ config=RunnableConfig(callbacks=self.callbacks, configurable={}),
141
+ )
100
142
  # this configurable = {} argument is needed due to a bug in LangGraph PreBuilt ToolNode ^
101
143
 
102
- for response in tool_response.get('messages'):
144
+ for response in tool_response.get("messages"):
103
145
  if self.detailed_logs:
104
146
  self._log_tool_response(str(tools), str(tool_input), response.content)
105
147
  state.messages += [response]
106
148
 
107
149
  return state
108
150
  except Exception as ex:
109
- logger.exception("%s Failed to call tool_node: %s", AGENT_LOG_PREFIX, ex, exc_info=ex)
110
- raise ex
151
+ logger.error("%s Failed to call tool_node: %s", AGENT_LOG_PREFIX, ex)
152
+ raise
153
+
154
+ async def tool_conditional_edge(self, state: ToolCallAgentGraphState) -> AgentDecision:
155
+ """
156
+ Determines whether to continue to the agent or end graph execution after a tool call.
157
+
158
+ Args:
159
+ state: The current state of the Tool Calling Agent graph containing messages and tool responses.
160
+
161
+ Returns:
162
+ AgentDecision: TOOL to continue to agent for processing, or END to terminate graph execution.
163
+ Returns END if the tool is in return_direct list, otherwise returns TOOL to continue processing.
164
+ """
165
+ try:
166
+ logger.debug("%s Starting the Tool Conditional Edge", AGENT_LOG_PREFIX)
167
+ if not state.messages:
168
+ logger.debug("%s No messages in state; routing to agent", AGENT_LOG_PREFIX)
169
+ return AgentDecision.TOOL
170
+
171
+ last_message = state.messages[-1]
172
+ # Return directly if this tool is in the return_direct set
173
+ if (self.return_direct and isinstance(last_message, ToolMessage) and last_message.name
174
+ and last_message.name in self.return_direct):
175
+ # Return directly if this tool is in the return_direct list
176
+ logger.debug("%s Tool %s is set to return directly", AGENT_LOG_PREFIX, last_message.name)
177
+ return AgentDecision.END
178
+ else:
179
+ # Continue to agent for processing
180
+ logger.debug("%s Tool response will be processed by agent", AGENT_LOG_PREFIX)
181
+ return AgentDecision.TOOL
182
+ except Exception as ex:
183
+ logger.exception("%s Failed to determine tool conditional edge: %s", AGENT_LOG_PREFIX, ex)
184
+ logger.warning("%s Continuing to agent for processing", AGENT_LOG_PREFIX)
185
+ return AgentDecision.TOOL
111
186
 
112
- async def build_graph(self):
187
+ async def _build_graph(self, state_schema: type) -> CompiledStateGraph:
113
188
  try:
114
- await super()._build_graph(state_schema=ToolCallAgentGraphState)
189
+ logger.debug("%s Building and compiling the Tool Calling Agent Graph", AGENT_LOG_PREFIX)
190
+
191
+ graph = StateGraph(state_schema)
192
+ graph.add_node("agent", self.agent_node)
193
+ graph.add_node("tool", self.tool_node)
194
+
195
+ if self.return_direct:
196
+ # go to end of graph if tool is set to return directly
197
+ tool_conditional_edge_possible_outputs = {AgentDecision.END: "__end__", AgentDecision.TOOL: "agent"}
198
+ graph.add_conditional_edges("tool", self.tool_conditional_edge, tool_conditional_edge_possible_outputs)
199
+ else:
200
+ # otherwise return to agent after tool call
201
+ graph.add_edge("tool", "agent")
202
+
203
+ conditional_edge_possible_outputs = {AgentDecision.TOOL: "tool", AgentDecision.END: "__end__"}
204
+ graph.add_conditional_edges("agent", self.conditional_edge, conditional_edge_possible_outputs)
205
+
206
+ graph.set_entry_point("agent")
207
+ self.graph = graph.compile()
208
+
209
+ return self.graph
210
+ except Exception as ex:
211
+ logger.error("%s Failed to build Tool Calling Agent Graph: %s", AGENT_LOG_PREFIX, ex)
212
+ raise
213
+
214
+ async def build_graph(self) -> CompiledStateGraph:
215
+ try:
216
+ await self._build_graph(state_schema=ToolCallAgentGraphState)
115
217
  logger.debug("%s Tool Calling Agent Graph built and compiled successfully", AGENT_LOG_PREFIX)
116
218
  return self.graph
117
219
  except Exception as ex:
118
- logger.exception("%s Failed to build Tool Calling Agent Graph: %s", AGENT_LOG_PREFIX, ex, exc_info=ex)
119
- raise ex
220
+ logger.error("%s Failed to build Tool Calling Agent Graph: %s", AGENT_LOG_PREFIX, ex)
221
+ raise
222
+
223
+
224
+ def create_tool_calling_agent_prompt(config: "ToolCallAgentWorkflowConfig") -> str | None:
225
+ """
226
+ Create a Tool Calling Agent prompt from the config.
227
+
228
+ Args:
229
+ config (ToolCallAgentWorkflowConfig): The config to use for the prompt.
230
+
231
+ Returns:
232
+ ChatPromptTemplate: The Tool Calling Agent prompt.
233
+ """
234
+ # the Tool Calling Agent prompt can be customized via config option system_prompt and additional_instructions.
235
+
236
+ if config.system_prompt:
237
+ prompt_str = config.system_prompt
238
+ else:
239
+ prompt_str = ""
240
+
241
+ if config.additional_instructions:
242
+ prompt_str += f" {config.additional_instructions}"
243
+
244
+ if len(prompt_str) > 0:
245
+ return prompt_str
246
+ return None
@@ -21,66 +21,87 @@ from nat.builder.builder import Builder
21
21
  from nat.builder.framework_enum import LLMFrameworkEnum
22
22
  from nat.builder.function_info import FunctionInfo
23
23
  from nat.cli.register_workflow import register_function
24
+ from nat.data_models.agent import AgentBaseConfig
25
+ from nat.data_models.api_server import ChatRequest
26
+ from nat.data_models.component_ref import FunctionGroupRef
24
27
  from nat.data_models.component_ref import FunctionRef
25
- from nat.data_models.component_ref import LLMRef
26
- from nat.data_models.function import FunctionBaseConfig
27
28
 
28
29
  logger = logging.getLogger(__name__)
29
30
 
30
31
 
31
- class ToolCallAgentWorkflowConfig(FunctionBaseConfig, name="tool_calling_agent"):
32
+ class ToolCallAgentWorkflowConfig(AgentBaseConfig, name="tool_calling_agent"):
32
33
  """
33
34
  A Tool Calling Agent requires an LLM which supports tool calling. A tool Calling Agent utilizes the tool
34
35
  input parameters to select the optimal tool. Supports handling tool errors.
35
36
  """
36
-
37
- tool_names: list[FunctionRef] = Field(default_factory=list,
38
- description="The list of tools to provide to the tool calling agent.")
39
- llm_name: LLMRef = Field(description="The LLM model to use with the tool calling agent.")
40
- verbose: bool = Field(default=False, description="Set the verbosity of the tool calling agent's logging.")
41
- handle_tool_errors: bool = Field(default=True, description="Specify ability to handle tool calling errors.")
42
37
  description: str = Field(default="Tool Calling Agent Workflow", description="Description of this functions use.")
38
+ tool_names: list[FunctionRef | FunctionGroupRef] = Field(
39
+ default_factory=list, description="The list of tools to provide to the tool calling agent.")
40
+ handle_tool_errors: bool = Field(default=True, description="Specify ability to handle tool calling errors.")
43
41
  max_iterations: int = Field(default=15, description="Number of tool calls before stoping the tool calling agent.")
42
+ max_history: int = Field(default=15, description="Maximum number of messages to keep in the conversation history.")
43
+
44
+ system_prompt: str | None = Field(default=None, description="Provides the system prompt to use with the agent.")
45
+ additional_instructions: str | None = Field(default=None,
46
+ description="Additional instructions appended to the system prompt.")
47
+ return_direct: list[FunctionRef] | None = Field(
48
+ default=None, description="List of tool names that should return responses directly without LLM processing.")
44
49
 
45
50
 
46
51
  @register_function(config_type=ToolCallAgentWorkflowConfig, framework_wrappers=[LLMFrameworkEnum.LANGCHAIN])
47
52
  async def tool_calling_agent_workflow(config: ToolCallAgentWorkflowConfig, builder: Builder):
48
- from langchain_core.messages.human import HumanMessage
49
- from langgraph.graph.graph import CompiledGraph
53
+ from langchain_core.messages import trim_messages
54
+ from langchain_core.messages.base import BaseMessage
55
+ from langgraph.graph.state import CompiledStateGraph
50
56
 
51
57
  from nat.agent.base import AGENT_LOG_PREFIX
58
+ from nat.agent.tool_calling_agent.agent import ToolCallAgentGraph
59
+ from nat.agent.tool_calling_agent.agent import ToolCallAgentGraphState
60
+ from nat.agent.tool_calling_agent.agent import create_tool_calling_agent_prompt
52
61
 
53
- from .agent import ToolCallAgentGraph
54
- from .agent import ToolCallAgentGraphState
55
-
62
+ prompt = create_tool_calling_agent_prompt(config)
56
63
  # we can choose an LLM for the ReAct agent in the config file
57
64
  llm = await builder.get_llm(config.llm_name, wrapper_type=LLMFrameworkEnum.LANGCHAIN)
58
65
  # the agent can run any installed tool, simply install the tool and add it to the config file
59
66
  # the sample tools provided can easily be copied or changed
60
- tools = builder.get_tools(tool_names=config.tool_names, wrapper_type=LLMFrameworkEnum.LANGCHAIN)
67
+ tools = await builder.get_tools(tool_names=config.tool_names, wrapper_type=LLMFrameworkEnum.LANGCHAIN)
61
68
  if not tools:
62
69
  raise ValueError(f"No tools specified for Tool Calling Agent '{config.llm_name}'")
63
70
 
64
- # some LLMs support tool calling
65
- # these models accept the tool's input schema and decide when to use a tool based on the input's relevance
66
- try:
67
- # in tool calling agents, we bind the tools to the LLM, to pass the tools' input schemas at runtime
68
- llm = llm.bind_tools(tools)
69
- except NotImplementedError as ex:
70
- logger.error("%s Failed to bind tools: %s", AGENT_LOG_PREFIX, ex, exc_info=True)
71
- raise ex
71
+ # convert return_direct FunctionRef objects to BaseTool objects
72
+ return_direct_tools = await builder.get_tools(
73
+ tool_names=config.return_direct, wrapper_type=LLMFrameworkEnum.LANGCHAIN) if config.return_direct else None
72
74
 
73
75
  # construct the Tool Calling Agent Graph from the configured llm, and tools
74
- graph: CompiledGraph = await ToolCallAgentGraph(llm=llm,
75
- tools=tools,
76
- detailed_logs=config.verbose,
77
- handle_tool_errors=config.handle_tool_errors).build_graph()
78
-
79
- async def _response_fn(input_message: str) -> str:
76
+ graph: CompiledStateGraph = await ToolCallAgentGraph(llm=llm,
77
+ tools=tools,
78
+ prompt=prompt,
79
+ detailed_logs=config.verbose,
80
+ log_response_max_chars=config.log_response_max_chars,
81
+ handle_tool_errors=config.handle_tool_errors,
82
+ return_direct=return_direct_tools).build_graph()
83
+
84
+ async def _response_fn(input_message: ChatRequest) -> str:
85
+ """
86
+ Main workflow entry function for the Tool Calling Agent.
87
+
88
+ This function invokes the Tool Calling Agent Graph and returns the response.
89
+
90
+ Args:
91
+ input_message (ChatRequest): The input message to process
92
+
93
+ Returns:
94
+ str: The response from the agent or error message
95
+ """
80
96
  try:
81
97
  # initialize the starting state with the user query
82
- input_message = HumanMessage(content=input_message)
83
- state = ToolCallAgentGraphState(messages=[input_message])
98
+ messages: list[BaseMessage] = trim_messages(messages=[m.model_dump() for m in input_message.messages],
99
+ max_tokens=config.max_history,
100
+ strategy="last",
101
+ token_counter=len,
102
+ start_on="human",
103
+ include_system=True)
104
+ state = ToolCallAgentGraphState(messages=messages)
84
105
 
85
106
  # run the Tool Calling Agent Graph
86
107
  state = await graph.ainvoke(state, config={'recursion_limit': (config.max_iterations + 1) * 2})
@@ -90,17 +111,15 @@ async def tool_calling_agent_workflow(config: ToolCallAgentWorkflowConfig, build
90
111
 
91
112
  # get and return the output from the state
92
113
  state = ToolCallAgentGraphState(**state)
93
- output_message = state.messages[-1] # pylint: disable=E1136
94
- return output_message.content
114
+ output_message = state.messages[-1]
115
+ return str(output_message.content)
95
116
  except Exception as ex:
96
- logger.exception("%s Tool Calling Agent failed with exception: %s", AGENT_LOG_PREFIX, ex, exc_info=ex)
97
- if config.verbose:
98
- return str(ex)
99
- return "I seem to be having a problem."
117
+ logger.exception("%s Tool Calling Agent failed with exception: %s", AGENT_LOG_PREFIX, ex)
118
+ raise RuntimeError
100
119
 
101
120
  try:
102
121
  yield FunctionInfo.from_fn(_response_fn, description=config.description)
103
122
  except GeneratorExit:
104
- logger.exception("%s Workflow exited early!", AGENT_LOG_PREFIX, exc_info=True)
123
+ logger.exception("%s Workflow exited early!", AGENT_LOG_PREFIX)
105
124
  finally:
106
125
  logger.debug("%s Cleaning up react_agent workflow.", AGENT_LOG_PREFIX)