nvidia-nat 1.3.0a20250910__py3-none-any.whl → 1.3.0a20250922__py3-none-any.whl

This diff represents the content of publicly available package versions that have been released to one of the supported registries. The information contained in this diff is provided for informational purposes only and reflects changes between package versions as they appear in their respective public registries.
Files changed (114) hide show
  1. nat/agent/base.py +9 -4
  2. nat/agent/prompt_optimizer/prompt.py +68 -0
  3. nat/agent/prompt_optimizer/register.py +149 -0
  4. nat/agent/react_agent/agent.py +1 -1
  5. nat/agent/react_agent/register.py +17 -14
  6. nat/agent/reasoning_agent/reasoning_agent.py +9 -7
  7. nat/agent/register.py +1 -0
  8. nat/agent/rewoo_agent/agent.py +9 -2
  9. nat/agent/rewoo_agent/register.py +16 -12
  10. nat/agent/tool_calling_agent/agent.py +69 -7
  11. nat/agent/tool_calling_agent/register.py +14 -13
  12. nat/authentication/credential_validator/__init__.py +14 -0
  13. nat/authentication/credential_validator/bearer_token_validator.py +557 -0
  14. nat/authentication/oauth2/oauth2_resource_server_config.py +124 -0
  15. nat/builder/builder.py +27 -4
  16. nat/builder/component_utils.py +7 -3
  17. nat/builder/context.py +28 -6
  18. nat/builder/function.py +313 -0
  19. nat/builder/function_info.py +1 -1
  20. nat/builder/workflow.py +5 -0
  21. nat/builder/workflow_builder.py +215 -16
  22. nat/cli/commands/optimize.py +90 -0
  23. nat/cli/commands/workflow/templates/config.yml.j2 +0 -1
  24. nat/cli/commands/workflow/workflow_commands.py +4 -7
  25. nat/cli/entrypoint.py +4 -9
  26. nat/cli/register_workflow.py +38 -4
  27. nat/cli/type_registry.py +71 -0
  28. nat/control_flow/__init__.py +0 -0
  29. nat/control_flow/register.py +20 -0
  30. nat/control_flow/router_agent/__init__.py +0 -0
  31. nat/control_flow/router_agent/agent.py +329 -0
  32. nat/control_flow/router_agent/prompt.py +48 -0
  33. nat/control_flow/router_agent/register.py +91 -0
  34. nat/control_flow/sequential_executor.py +167 -0
  35. nat/data_models/agent.py +34 -0
  36. nat/data_models/authentication.py +38 -0
  37. nat/data_models/component.py +2 -0
  38. nat/data_models/component_ref.py +11 -0
  39. nat/data_models/config.py +40 -16
  40. nat/data_models/function.py +34 -0
  41. nat/data_models/function_dependencies.py +8 -0
  42. nat/data_models/optimizable.py +119 -0
  43. nat/data_models/optimizer.py +149 -0
  44. nat/data_models/temperature_mixin.py +4 -3
  45. nat/data_models/top_p_mixin.py +4 -3
  46. nat/embedder/nim_embedder.py +1 -1
  47. nat/embedder/openai_embedder.py +1 -1
  48. nat/eval/config.py +1 -1
  49. nat/eval/evaluate.py +5 -1
  50. nat/eval/register.py +4 -0
  51. nat/eval/runtime_evaluator/__init__.py +14 -0
  52. nat/eval/runtime_evaluator/evaluate.py +123 -0
  53. nat/eval/runtime_evaluator/register.py +100 -0
  54. nat/experimental/test_time_compute/functions/plan_select_execute_function.py +5 -1
  55. nat/front_ends/fastapi/dask_client_mixin.py +65 -0
  56. nat/front_ends/fastapi/fastapi_front_end_config.py +18 -3
  57. nat/front_ends/fastapi/fastapi_front_end_plugin.py +134 -3
  58. nat/front_ends/fastapi/fastapi_front_end_plugin_worker.py +243 -228
  59. nat/front_ends/fastapi/job_store.py +518 -99
  60. nat/front_ends/fastapi/main.py +11 -19
  61. nat/front_ends/fastapi/utils.py +57 -0
  62. nat/front_ends/mcp/introspection_token_verifier.py +73 -0
  63. nat/front_ends/mcp/mcp_front_end_config.py +5 -1
  64. nat/front_ends/mcp/mcp_front_end_plugin.py +37 -11
  65. nat/front_ends/mcp/mcp_front_end_plugin_worker.py +111 -3
  66. nat/front_ends/mcp/tool_converter.py +3 -0
  67. nat/llm/aws_bedrock_llm.py +14 -3
  68. nat/llm/nim_llm.py +14 -3
  69. nat/llm/openai_llm.py +8 -1
  70. nat/observability/exporter/processing_exporter.py +29 -55
  71. nat/observability/mixin/redaction_config_mixin.py +5 -4
  72. nat/observability/mixin/tagging_config_mixin.py +26 -14
  73. nat/observability/mixin/type_introspection_mixin.py +420 -107
  74. nat/observability/processor/processor.py +3 -0
  75. nat/observability/processor/redaction/__init__.py +24 -0
  76. nat/observability/processor/redaction/contextual_redaction_processor.py +125 -0
  77. nat/observability/processor/redaction/contextual_span_redaction_processor.py +66 -0
  78. nat/observability/processor/redaction/redaction_processor.py +177 -0
  79. nat/observability/processor/redaction/span_header_redaction_processor.py +92 -0
  80. nat/observability/processor/span_tagging_processor.py +21 -14
  81. nat/profiler/decorators/framework_wrapper.py +9 -6
  82. nat/profiler/parameter_optimization/__init__.py +0 -0
  83. nat/profiler/parameter_optimization/optimizable_utils.py +93 -0
  84. nat/profiler/parameter_optimization/optimizer_runtime.py +67 -0
  85. nat/profiler/parameter_optimization/parameter_optimizer.py +153 -0
  86. nat/profiler/parameter_optimization/parameter_selection.py +108 -0
  87. nat/profiler/parameter_optimization/pareto_visualizer.py +380 -0
  88. nat/profiler/parameter_optimization/prompt_optimizer.py +384 -0
  89. nat/profiler/parameter_optimization/update_helpers.py +66 -0
  90. nat/profiler/utils.py +3 -1
  91. nat/tool/chat_completion.py +4 -1
  92. nat/tool/github_tools.py +450 -0
  93. nat/tool/register.py +2 -7
  94. nat/utils/callable_utils.py +70 -0
  95. nat/utils/exception_handlers/automatic_retries.py +103 -48
  96. nat/utils/log_levels.py +25 -0
  97. nat/utils/type_utils.py +4 -0
  98. {nvidia_nat-1.3.0a20250910.dist-info → nvidia_nat-1.3.0a20250922.dist-info}/METADATA +10 -1
  99. {nvidia_nat-1.3.0a20250910.dist-info → nvidia_nat-1.3.0a20250922.dist-info}/RECORD +105 -76
  100. {nvidia_nat-1.3.0a20250910.dist-info → nvidia_nat-1.3.0a20250922.dist-info}/entry_points.txt +1 -0
  101. nat/observability/processor/header_redaction_processor.py +0 -123
  102. nat/observability/processor/redaction_processor.py +0 -77
  103. nat/tool/github_tools/create_github_commit.py +0 -133
  104. nat/tool/github_tools/create_github_issue.py +0 -87
  105. nat/tool/github_tools/create_github_pr.py +0 -106
  106. nat/tool/github_tools/get_github_file.py +0 -106
  107. nat/tool/github_tools/get_github_issue.py +0 -166
  108. nat/tool/github_tools/get_github_pr.py +0 -256
  109. nat/tool/github_tools/update_github_issue.py +0 -100
  110. /nat/{tool/github_tools → agent/prompt_optimizer}/__init__.py +0 -0
  111. {nvidia_nat-1.3.0a20250910.dist-info → nvidia_nat-1.3.0a20250922.dist-info}/WHEEL +0 -0
  112. {nvidia_nat-1.3.0a20250910.dist-info → nvidia_nat-1.3.0a20250922.dist-info}/licenses/LICENSE-3rd-party.txt +0 -0
  113. {nvidia_nat-1.3.0a20250910.dist-info → nvidia_nat-1.3.0a20250922.dist-info}/licenses/LICENSE.md +0 -0
  114. {nvidia_nat-1.3.0a20250910.dist-info → nvidia_nat-1.3.0a20250922.dist-info}/top_level.txt +0 -0
nat/agent/base.py CHANGED
@@ -27,6 +27,7 @@ from langchain_core.language_models import BaseChatModel
27
27
  from langchain_core.messages import AIMessage
28
28
  from langchain_core.messages import BaseMessage
29
29
  from langchain_core.messages import ToolMessage
30
+ from langchain_core.runnables import Runnable
30
31
  from langchain_core.runnables import RunnableConfig
31
32
  from langchain_core.tools import BaseTool
32
33
  from langgraph.graph.state import CompiledStateGraph
@@ -107,21 +108,25 @@ class BaseAgent(ABC):
107
108
 
108
109
  return AIMessage(content=output_message)
109
110
 
110
- async def _call_llm(self, messages: list[BaseMessage]) -> AIMessage:
111
+ async def _call_llm(self, llm: Runnable, inputs: dict[str, Any], config: RunnableConfig | None = None) -> AIMessage:
111
112
  """
112
113
  Call the LLM directly. Retry logic is handled automatically by the underlying LLM client.
113
114
 
114
115
  Parameters
115
116
  ----------
116
- messages : list[BaseMessage]
117
- The messages to send to the LLM
117
+ llm : Runnable
118
+ The LLM runnable (prompt | llm or similar)
119
+ inputs : dict[str, Any]
120
+ The inputs to pass to the runnable
121
+ config : RunnableConfig | None
122
+ The config to pass to the runnable (should include callbacks)
118
123
 
119
124
  Returns
120
125
  -------
121
126
  AIMessage
122
127
  The LLM response
123
128
  """
124
- response = await self.llm.ainvoke(messages)
129
+ response = await llm.ainvoke(inputs, config=config)
125
130
  return AIMessage(content=str(response.content))
126
131
 
127
132
  async def _call_tool(self,
@@ -0,0 +1,68 @@
1
+ # SPDX-FileCopyrightText: Copyright (c) 2024-2025, NVIDIA CORPORATION & AFFILIATES. All rights reserved.
2
+ # SPDX-License-Identifier: Apache-2.0
3
+ #
4
+ # Licensed under the Apache License, Version 2.0 (the "License");
5
+ # you may not use this file except in compliance with the License.
6
+ # You may obtain a copy of the License at
7
+ #
8
+ # http://www.apache.org/licenses/LICENSE-2.0
9
+ #
10
+ # Unless required by applicable law or agreed to in writing, software
11
+ # distributed under the License is distributed on an "AS IS" BASIS,
12
+ # WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
13
+ # See the License for the specific language governing permissions and
14
+ # limitations under the License.
15
+ # flake8: noqa W291
16
+
17
+ mutator_prompt = """
18
+
19
+ ## CORE DIRECTIVES
20
+ - **Preserve the original objective and task.** Do not change what the prompt is meant to accomplish.
21
+ - **Keep the intent intact.** The improved prompt must solve the same problem as the original.
22
+ - **Do not invent new goals.** Only improve clarity, structure, constraints, and usability.
23
+ - **Do not drop critical instructions.** Everything essential from the original prompt must remain.
24
+ - **Return only the mutated prompt text.** No rationale, no diffs, no explanations.
25
+ - **Be Creative within bounds.** You may rephrase, reorganize, and enhance, but not alter meaning.
26
+ - **DO NOT use curly braces in your prompt** for anything other than existing variables in the prompt as the string
27
+ will be treated as an f-string.
28
+ - **Examples are a good idea** if the original prompt lacks them. They help clarify expected output.
29
+
30
+ ---
31
+
32
+ ## IMPROVEMENT HINTS
33
+ When modifying, apply these principles:
34
+ 1. **Clarity & Precision** – remove vague language, strengthen directives.
35
+ 2. **Structure & Flow** – order sections as: *Objective → Constraints → Tools → Steps → Output Schema → Examples*.
36
+ 3. **Schema Adherence** – enforce a single canonical output schema (JSON/XML) with `schema_version`.
37
+ 4. **Tool Governance** – clarify when/how tools are used, their inputs/outputs, and fallback behavior.
38
+ 5. **Error Handling** – specify behavior if tools fail or inputs are insufficient.
39
+ 6. **Budget Awareness** – minimize verbosity, respect token/latency limits.
40
+ 7. **Safety** – include refusals for unsafe requests, enforce compliance with rules.
41
+ 8. **Consistency** – avoid format drift; always maintain the same schema.
42
+ 9. **Integrity** – confirm the task, objective, and intent are preserved.
43
+
44
+ ---
45
+
46
+ ## MUTATION OPERATORS
47
+ You may:
48
+ - **Tighten** (remove fluff, redundancies)
49
+ - **Reorder** (improve logical flow)
50
+ - **Constrain** (add explicit rules/limits)
51
+ - **Harden** (improve error handling/fallbacks)
52
+ - **Defuse** (replace ambiguous verbs with measurable actions)
53
+ - **Format-lock** (wrap outputs in JSON/XML fenced blocks)
54
+ - **Example-ify** (add examples if missing or weak)
55
+
56
+ ---
57
+
58
+ ## INPUT
59
+ Here is the prompt to mutate:
60
+ {original_prompt}
61
+
62
+ ## OBJECTIVE
63
+ The prompt must acheive the following objective:
64
+ {objective}
65
+
66
+ The modified prompt is: \n
67
+
68
+ """
@@ -0,0 +1,149 @@
1
+ # SPDX-FileCopyrightText: Copyright (c) 2021-2025, NVIDIA CORPORATION & AFFILIATES. All rights reserved.
2
+ # SPDX-License-Identifier: Apache-2.0
3
+ #
4
+ # Licensed under the Apache License, Version 2.0 (the "License");
5
+ # you may not use this file except in compliance with the License.
6
+ # You may obtain a copy of the License at
7
+ #
8
+ # http://www.apache.org/licenses/LICENSE-2.0
9
+ #
10
+ # Unless required by applicable law or agreed to in writing, software
11
+ # distributed under the License is distributed on an "AS IS" BASIS,
12
+ # WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
13
+ # See the License for the specific language governing permissions and
14
+ # limitations under the License.
15
+
16
+ from pydantic import Field
17
+
18
+ from nat.builder.builder import Builder
19
+ from nat.builder.framework_enum import LLMFrameworkEnum
20
+ from nat.builder.function_info import FunctionInfo
21
+ from nat.cli.register_workflow import register_function
22
+ from nat.data_models.component_ref import LLMRef
23
+ from nat.data_models.function import FunctionBaseConfig
24
+ from nat.profiler.parameter_optimization.prompt_optimizer import PromptOptimizerInputSchema
25
+
26
+
27
+ class PromptOptimizerConfig(FunctionBaseConfig, name="prompt_init"):
28
+
29
+ optimizer_llm: LLMRef = Field(description="LLM to use for prompt optimization")
30
+ optimizer_prompt: str = Field(
31
+ description="Prompt template for the optimizer",
32
+ default=(
33
+ "You are an expert at optimizing prompts for LLMs. "
34
+ "Your task is to take a given prompt and suggest an optimized version of it. "
35
+ "Note that the prompt might be a template with variables and curly braces. Remember to always keep the "
36
+ "variables and curly braces in the prompt the same. Only modify the instructions in the prompt that are"
37
+ "not variables. The system is meant to achieve the following objective\n"
38
+ "{system_objective}\n Of which, the prompt is one part. The details of the prompt and context as below.\n"))
39
+ system_objective: str = Field(description="Objective of the workflow")
40
+
41
+
42
+ @register_function(config_type=PromptOptimizerConfig, framework_wrappers=[LLMFrameworkEnum.LANGCHAIN])
43
+ async def prompt_optimizer_function(config: PromptOptimizerConfig, builder: Builder):
44
+ """
45
+ Function to optimize prompts for LLMs.
46
+ """
47
+
48
+ try:
49
+ from langchain_core.prompts import PromptTemplate
50
+
51
+ from .prompt import mutator_prompt
52
+ except ImportError as exc:
53
+ raise ImportError("langchain-core is not installed. Please install it to use MultiLLMPlanner.\n"
54
+ "This error can be resolve by installing nvidia-nat[langchain]") from exc
55
+
56
+ llm = await builder.get_llm(config.optimizer_llm, wrapper_type=LLMFrameworkEnum.LANGCHAIN)
57
+
58
+ template = PromptTemplate(template=config.optimizer_prompt,
59
+ input_variables=["system_objective"],
60
+ validate_template=True)
61
+
62
+ base_prompt: str = (await template.ainvoke(input={"system_objective": config.system_objective})).to_string()
63
+ prompt_extension_template = PromptTemplate(template=mutator_prompt,
64
+ input_variables=["original_prompt", "objective"],
65
+ validate_template=True)
66
+
67
+ async def _inner(input_message: PromptOptimizerInputSchema) -> str:
68
+ """
69
+ Optimize the prompt using the provided LLM.
70
+ """
71
+
72
+ original_prompt = input_message.original_prompt
73
+ prompt_objective = input_message.objective
74
+
75
+ prompt_extension = (await prompt_extension_template.ainvoke(input={
76
+ "original_prompt": original_prompt,
77
+ "objective": prompt_objective,
78
+ })).to_string()
79
+
80
+ prompt = f"{base_prompt}\n\n{prompt_extension}"
81
+
82
+ optimized_prompt = await llm.ainvoke(prompt)
83
+ return optimized_prompt.content
84
+
85
+ yield FunctionInfo.from_fn(
86
+ fn=_inner,
87
+ description="Optimize prompts for LLMs using a feedback LLM.",
88
+ )
89
+
90
+
91
+ class PromptRecombinerConfig(FunctionBaseConfig, name="prompt_recombiner"):
92
+
93
+ optimizer_llm: LLMRef = Field(description="LLM to use for prompt recombination")
94
+ optimizer_prompt: str = Field(
95
+ description="Prompt template for the recombiner",
96
+ default=("You are an expert at combining prompt instructions for LLMs. "
97
+ "Your task is to merge two prompts for the same objective into a single, stronger prompt. "
98
+ "Do not introduce new variables or modify existing placeholders."),
99
+ )
100
+ system_objective: str = Field(description="Objective of the workflow")
101
+
102
+
103
+ @register_function(config_type=PromptRecombinerConfig, framework_wrappers=[LLMFrameworkEnum.LANGCHAIN])
104
+ async def prompt_recombiner_function(config: PromptRecombinerConfig, builder: Builder):
105
+ """
106
+ Function to recombine two parent prompts into a child prompt using the optimizer LLM.
107
+ Uses the same base template and objective instructions.
108
+ """
109
+
110
+ try:
111
+ from langchain_core.prompts import PromptTemplate
112
+ except ImportError as exc:
113
+ raise ImportError("langchain-core is not installed. Please install it to use MultiLLMPlanner.\n"
114
+ "This error can be resolve by installing nvidia-nat[langchain].") from exc
115
+
116
+ llm = await builder.get_llm(config.optimizer_llm, wrapper_type=LLMFrameworkEnum.LANGCHAIN)
117
+
118
+ template = PromptTemplate(template=config.optimizer_prompt,
119
+ input_variables=["system_objective"],
120
+ validate_template=True)
121
+
122
+ base_prompt: str = (await template.ainvoke(input={"system_objective": config.system_objective})).to_string()
123
+
124
+ class RecombineSchema(PromptOptimizerInputSchema):
125
+ parent_b: str | None = None
126
+
127
+ async def _inner(input_message: RecombineSchema) -> str:
128
+ parent_a = input_message.original_prompt
129
+ parent_b = input_message.parent_b or ""
130
+ prompt_objective = input_message.objective
131
+
132
+ prompt = (
133
+ f"{base_prompt}\n\n"
134
+ "We are performing genetic recombination between two prompts that satisfy the same objective.\n"
135
+ f"Objective: {prompt_objective}\n\n"
136
+ f"Parent A:\n{parent_a}\n\n"
137
+ f"Parent B:\n{parent_b}\n\n"
138
+ "Combine the strongest instructions and phrasing from both parents to produce a single, coherent child "
139
+ "prompt.\n"
140
+ "Maintain variables and placeholders unchanged.\n"
141
+ "Return only the child prompt text, with no additional commentary.")
142
+
143
+ child_prompt = await llm.ainvoke(prompt)
144
+ return child_prompt.content
145
+
146
+ yield FunctionInfo.from_fn(
147
+ fn=_inner,
148
+ description="Recombine two prompts into a stronger child prompt.",
149
+ )
@@ -360,7 +360,7 @@ class ReActAgentGraph(DualNodeAgent):
360
360
  if errors:
361
361
  error_text = "\n".join(errors)
362
362
  logger.error("%s %s", AGENT_LOG_PREFIX, error_text)
363
- raise ValueError(error_text)
363
+ return False
364
364
  return True
365
365
 
366
366
 
@@ -17,32 +17,32 @@ import logging
17
17
 
18
18
  from pydantic import AliasChoices
19
19
  from pydantic import Field
20
- from pydantic import PositiveInt
21
20
 
22
21
  from nat.builder.builder import Builder
23
22
  from nat.builder.framework_enum import LLMFrameworkEnum
24
23
  from nat.builder.function_info import FunctionInfo
25
24
  from nat.cli.register_workflow import register_function
25
+ from nat.data_models.agent import AgentBaseConfig
26
26
  from nat.data_models.api_server import ChatRequest
27
27
  from nat.data_models.api_server import ChatResponse
28
+ from nat.data_models.component_ref import FunctionGroupRef
28
29
  from nat.data_models.component_ref import FunctionRef
29
- from nat.data_models.component_ref import LLMRef
30
- from nat.data_models.function import FunctionBaseConfig
30
+ from nat.data_models.optimizable import OptimizableField
31
+ from nat.data_models.optimizable import OptimizableMixin
32
+ from nat.data_models.optimizable import SearchSpace
31
33
  from nat.utils.type_converter import GlobalTypeConverter
32
34
 
33
35
  logger = logging.getLogger(__name__)
34
36
 
35
37
 
36
- class ReActAgentWorkflowConfig(FunctionBaseConfig, name="react_agent"):
38
+ class ReActAgentWorkflowConfig(AgentBaseConfig, OptimizableMixin, name="react_agent"):
37
39
  """
38
40
  Defines a NAT function that uses a ReAct Agent performs reasoning inbetween tool calls, and utilizes the
39
41
  tool names and descriptions to select the optimal tool.
40
42
  """
41
-
42
- tool_names: list[FunctionRef] = Field(default_factory=list,
43
- description="The list of tools to provide to the react agent.")
44
- llm_name: LLMRef = Field(description="The LLM model to use with the react agent.")
45
- verbose: bool = Field(default=False, description="Set the verbosity of the react agent's logging.")
43
+ description: str = Field(default="ReAct Agent Workflow", description="The description of this functions use.")
44
+ tool_names: list[FunctionRef | FunctionGroupRef] = Field(
45
+ default_factory=list, description="The list of tools to provide to the react agent.")
46
46
  retry_agent_response_parsing_errors: bool = Field(
47
47
  default=True,
48
48
  validation_alias=AliasChoices("retry_agent_response_parsing_errors", "retry_parsing_errors"),
@@ -61,7 +61,6 @@ class ReActAgentWorkflowConfig(FunctionBaseConfig, name="react_agent"):
61
61
  description="Whether to pass tool call errors to agent. If False, failed tool calls will raise an exception.")
62
62
  include_tool_input_schema_in_tool_description: bool = Field(
63
63
  default=True, description="Specify inclusion of tool input schemas in the prompt.")
64
- description: str = Field(default="ReAct Agent Workflow", description="The description of this functions use.")
65
64
  normalize_tool_input_quotes: bool = Field(
66
65
  default=True,
67
66
  description="Whether to replace single quotes with double quotes in the tool input. "
@@ -70,13 +69,17 @@ class ReActAgentWorkflowConfig(FunctionBaseConfig, name="react_agent"):
70
69
  default=None,
71
70
  description="Provides the SYSTEM_PROMPT to use with the agent") # defaults to SYSTEM_PROMPT in prompt.py
72
71
  max_history: int = Field(default=15, description="Maximum number of messages to keep in the conversation history.")
73
- log_response_max_chars: PositiveInt = Field(
74
- default=1000, description="Maximum number of characters to display in logs when logging tool responses.")
75
72
  use_openai_api: bool = Field(default=False,
76
73
  description=("Use OpenAI API for the input/output types to the function. "
77
74
  "If False, strings will be used."))
78
- additional_instructions: str | None = Field(
79
- default=None, description="Additional instructions to provide to the agent in addition to the base prompt.")
75
+ additional_instructions: str | None = OptimizableField(
76
+ default=None,
77
+ description="Additional instructions to provide to the agent in addition to the base prompt.",
78
+ space=SearchSpace(
79
+ is_prompt=True,
80
+ prompt="No additional instructions.",
81
+ prompt_purpose="Additional instructions to provide to the agent in addition to the base prompt.",
82
+ ))
80
83
 
81
84
 
82
85
  @register_function(config_type=ReActAgentWorkflowConfig, framework_wrappers=[LLMFrameworkEnum.LANGCHAIN])
@@ -23,25 +23,22 @@ from nat.builder.builder import Builder
23
23
  from nat.builder.framework_enum import LLMFrameworkEnum
24
24
  from nat.builder.function_info import FunctionInfo
25
25
  from nat.cli.register_workflow import register_function
26
+ from nat.data_models.agent import AgentBaseConfig
26
27
  from nat.data_models.api_server import ChatRequest
27
28
  from nat.data_models.component_ref import FunctionRef
28
- from nat.data_models.component_ref import LLMRef
29
- from nat.data_models.function import FunctionBaseConfig
30
29
 
31
30
  logger = logging.getLogger(__name__)
32
31
 
33
32
 
34
- class ReasoningFunctionConfig(FunctionBaseConfig, name="reasoning_agent"):
33
+ class ReasoningFunctionConfig(AgentBaseConfig, name="reasoning_agent"):
35
34
  """
36
35
  Defines a NAT function that performs reasoning on the input data.
37
36
  Output is passed to the next function in the workflow.
38
37
 
39
38
  Designed to be used with an InterceptingFunction.
40
39
  """
41
-
42
- llm_name: LLMRef = Field(description="The name of the LLM to use for reasoning.")
40
+ description: str = Field(default="Reasoning Agent", description="The description of this function's use.")
43
41
  augmented_fn: FunctionRef = Field(description="The name of the function to reason on.")
44
- verbose: bool = Field(default=False, description="Whether to log detailed information.")
45
42
  reasoning_prompt_template: str = Field(
46
43
  default=("You are an expert reasoning model task with creating a detailed execution plan"
47
44
  " for a system that has the following description:\n\n"
@@ -113,7 +110,12 @@ async def build_reasoning_function(config: ReasoningFunctionConfig, builder: Bui
113
110
  f"function without a description.")
114
111
 
115
112
  # Get the function dependencies of the augmented function
116
- function_used_tools = builder.get_function_dependencies(config.augmented_fn).functions
113
+ function_dependencies = builder.get_function_dependencies(config.augmented_fn)
114
+ function_used_tools = set()
115
+ function_used_tools.update(function_dependencies.functions)
116
+ for function_group in function_dependencies.function_groups:
117
+ function_used_tools.update(builder.get_function_group_dependencies(function_group).functions)
118
+
117
119
  tool_names_with_desc: list[tuple[str, str]] = []
118
120
 
119
121
  for tool in function_used_tools:
nat/agent/register.py CHANGED
@@ -16,6 +16,7 @@
16
16
  # flake8: noqa
17
17
 
18
18
  # Import any workflows which need to be automatically registered here
19
+ from .prompt_optimizer import register as prompt_optimizer
19
20
  from .react_agent import register as react_agent
20
21
  from .reasoning_agent import reasoning_agent
21
22
  from .rewoo_agent import register as rewoo_agent
@@ -68,7 +68,9 @@ class ReWOOAgentGraph(BaseAgent):
68
68
  use_tool_schema: bool = True,
69
69
  callbacks: list[AsyncCallbackHandler] | None = None,
70
70
  detailed_logs: bool = False,
71
- log_response_max_chars: int = 1000):
71
+ log_response_max_chars: int = 1000,
72
+ tool_call_max_retries: int = 3,
73
+ raise_tool_call_error: bool = True):
72
74
  super().__init__(llm=llm,
73
75
  tools=tools,
74
76
  callbacks=callbacks,
@@ -94,6 +96,8 @@ class ReWOOAgentGraph(BaseAgent):
94
96
  self.planner_prompt = planner_prompt.partial(tools=tool_names_and_descriptions, tool_names=tool_names)
95
97
  self.solver_prompt = solver_prompt
96
98
  self.tools_dict = {tool.name: tool for tool in tools}
99
+ self.tool_call_max_retries = tool_call_max_retries
100
+ self.raise_tool_call_error = raise_tool_call_error
97
101
 
98
102
  logger.debug("%s Initialized ReWOO Agent Graph", AGENT_LOG_PREFIX)
99
103
 
@@ -269,11 +273,14 @@ class ReWOOAgentGraph(BaseAgent):
269
273
  tool_response = await self._call_tool(requested_tool,
270
274
  tool_input_parsed,
271
275
  RunnableConfig(callbacks=self.callbacks),
272
- max_retries=3)
276
+ max_retries=self.tool_call_max_retries)
273
277
 
274
278
  if self.detailed_logs:
275
279
  self._log_tool_response(requested_tool.name, tool_input_parsed, str(tool_response))
276
280
 
281
+ if self.raise_tool_call_error and tool_response.status == "error":
282
+ raise RuntimeError(f"Tool call failed: {tool_response.content}")
283
+
277
284
  intermediate_results[placeholder] = tool_response
278
285
  return {"intermediate_results": intermediate_results}
279
286
 
@@ -23,38 +23,36 @@ from nat.builder.builder import Builder
23
23
  from nat.builder.framework_enum import LLMFrameworkEnum
24
24
  from nat.builder.function_info import FunctionInfo
25
25
  from nat.cli.register_workflow import register_function
26
+ from nat.data_models.agent import AgentBaseConfig
26
27
  from nat.data_models.api_server import ChatRequest
27
28
  from nat.data_models.api_server import ChatResponse
29
+ from nat.data_models.component_ref import FunctionGroupRef
28
30
  from nat.data_models.component_ref import FunctionRef
29
- from nat.data_models.component_ref import LLMRef
30
- from nat.data_models.function import FunctionBaseConfig
31
31
  from nat.utils.type_converter import GlobalTypeConverter
32
32
 
33
33
  logger = logging.getLogger(__name__)
34
34
 
35
35
 
36
- class ReWOOAgentWorkflowConfig(FunctionBaseConfig, name="rewoo_agent"):
36
+ class ReWOOAgentWorkflowConfig(AgentBaseConfig, name="rewoo_agent"):
37
37
  """
38
38
  Defines a NAT function that uses a ReWOO Agent performs reasoning inbetween tool calls, and utilizes the
39
39
  tool names and descriptions to select the optimal tool.
40
40
  """
41
-
42
- tool_names: list[FunctionRef] = Field(default_factory=list,
43
- description="The list of tools to provide to the rewoo agent.")
44
- llm_name: LLMRef = Field(description="The LLM model to use with the rewoo agent.")
45
- verbose: bool = Field(default=False, description="Set the verbosity of the rewoo agent's logging.")
41
+ description: str = Field(default="ReWOO Agent Workflow", description="The description of this functions use.")
42
+ tool_names: list[FunctionRef | FunctionGroupRef] = Field(
43
+ default_factory=list, description="The list of tools to provide to the rewoo agent.")
46
44
  include_tool_input_schema_in_tool_description: bool = Field(
47
45
  default=True, description="Specify inclusion of tool input schemas in the prompt.")
48
- description: str = Field(default="ReWOO Agent Workflow", description="The description of this functions use.")
49
46
  planner_prompt: str | None = Field(
50
47
  default=None,
51
48
  description="Provides the PLANNER_PROMPT to use with the agent") # defaults to PLANNER_PROMPT in prompt.py
52
49
  solver_prompt: str | None = Field(
53
50
  default=None,
54
51
  description="Provides the SOLVER_PROMPT to use with the agent") # defaults to SOLVER_PROMPT in prompt.py
52
+ tool_call_max_retries: PositiveInt = Field(default=3,
53
+ description="The number of retries before raising a tool call error.",
54
+ ge=1)
55
55
  max_history: int = Field(default=15, description="Maximum number of messages to keep in the conversation history.")
56
- log_response_max_chars: PositiveInt = Field(
57
- default=1000, description="Maximum number of characters to display in logs when logging tool responses.")
58
56
  use_openai_api: bool = Field(default=False,
59
57
  description=("Use OpenAI API for the input/output types to the function. "
60
58
  "If False, strings will be used."))
@@ -65,6 +63,10 @@ class ReWOOAgentWorkflowConfig(FunctionBaseConfig, name="rewoo_agent"):
65
63
  additional_solver_instructions: str | None = Field(
66
64
  default=None,
67
65
  description="Additional instructions to provide to the agent in addition to the base solver prompt.")
66
+ raise_tool_call_error: bool = Field(default=True,
67
+ description="Whether to raise a exception immediately if a tool"
68
+ "call fails. If set to False, the tool call error message will be included in"
69
+ "the tool response and passed to the next tool.")
68
70
 
69
71
 
70
72
  @register_function(config_type=ReWOOAgentWorkflowConfig, framework_wrappers=[LLMFrameworkEnum.LANGCHAIN])
@@ -118,7 +120,9 @@ async def rewoo_agent_workflow(config: ReWOOAgentWorkflowConfig, builder: Builde
118
120
  tools=tools,
119
121
  use_tool_schema=config.include_tool_input_schema_in_tool_description,
120
122
  detailed_logs=config.verbose,
121
- log_response_max_chars=config.log_response_max_chars).build_graph()
123
+ log_response_max_chars=config.log_response_max_chars,
124
+ tool_call_max_retries=config.tool_call_max_retries,
125
+ raise_tool_call_error=config.raise_tool_call_error).build_graph()
122
126
 
123
127
  async def _response_fn(input_message: ChatRequest) -> ChatResponse:
124
128
  try:
@@ -19,10 +19,13 @@ import typing
19
19
  from langchain_core.callbacks.base import AsyncCallbackHandler
20
20
  from langchain_core.language_models import BaseChatModel
21
21
  from langchain_core.messages import SystemMessage
22
+ from langchain_core.messages import ToolMessage
22
23
  from langchain_core.messages.base import BaseMessage
23
24
  from langchain_core.runnables import RunnableLambda
24
25
  from langchain_core.runnables.config import RunnableConfig
25
26
  from langchain_core.tools import BaseTool
27
+ from langgraph.graph import StateGraph
28
+ from langgraph.graph.state import CompiledStateGraph
26
29
  from langgraph.prebuilt import ToolNode
27
30
  from pydantic import BaseModel
28
31
  from pydantic import Field
@@ -57,12 +60,14 @@ class ToolCallAgentGraph(DualNodeAgent):
57
60
  detailed_logs: bool = False,
58
61
  log_response_max_chars: int = 1000,
59
62
  handle_tool_errors: bool = True,
63
+ return_direct: list[BaseTool] | None = None,
60
64
  ):
61
65
  super().__init__(llm=llm,
62
66
  tools=tools,
63
67
  callbacks=callbacks,
64
68
  detailed_logs=detailed_logs,
65
69
  log_response_max_chars=log_response_max_chars)
70
+
66
71
  # some LLMs support tool calling
67
72
  # these models accept the tool's input schema and decide when to use a tool based on the input's relevance
68
73
  try:
@@ -85,8 +90,8 @@ class ToolCallAgentGraph(DualNodeAgent):
85
90
  )
86
91
 
87
92
  self.agent = prompt_runnable | self.bound_llm
88
-
89
93
  self.tool_caller = ToolNode(tools, handle_tool_errors=handle_tool_errors)
94
+ self.return_direct = [tool.name for tool in return_direct] if return_direct else []
90
95
  logger.debug("%s Initialized Tool Calling Agent Graph", AGENT_LOG_PREFIX)
91
96
 
92
97
  async def agent_node(self, state: ToolCallAgentGraphState):
@@ -146,13 +151,70 @@ class ToolCallAgentGraph(DualNodeAgent):
146
151
  logger.error("%s Failed to call tool_node: %s", AGENT_LOG_PREFIX, ex)
147
152
  raise
148
153
 
149
- async def build_graph(self):
154
+ async def tool_conditional_edge(self, state: ToolCallAgentGraphState) -> AgentDecision:
155
+ """
156
+ Determines whether to continue to the agent or end graph execution after a tool call.
157
+
158
+ Args:
159
+ state: The current state of the Tool Calling Agent graph containing messages and tool responses.
160
+
161
+ Returns:
162
+ AgentDecision: TOOL to continue to agent for processing, or END to terminate graph execution.
163
+ Returns END if the tool is in return_direct list, otherwise returns TOOL to continue processing.
164
+ """
150
165
  try:
151
- await super()._build_graph(state_schema=ToolCallAgentGraphState)
152
- logger.debug(
153
- "%s Tool Calling Agent Graph built and compiled successfully",
154
- AGENT_LOG_PREFIX,
155
- )
166
+ logger.debug("%s Starting the Tool Conditional Edge", AGENT_LOG_PREFIX)
167
+ if not state.messages:
168
+ logger.debug("%s No messages in state; routing to agent", AGENT_LOG_PREFIX)
169
+ return AgentDecision.TOOL
170
+
171
+ last_message = state.messages[-1]
172
+ # Return directly if this tool is in the return_direct set
173
+ if (self.return_direct and isinstance(last_message, ToolMessage) and last_message.name
174
+ and last_message.name in self.return_direct):
175
+ # Return directly if this tool is in the return_direct list
176
+ logger.debug("%s Tool %s is set to return directly", AGENT_LOG_PREFIX, last_message.name)
177
+ return AgentDecision.END
178
+ else:
179
+ # Continue to agent for processing
180
+ logger.debug("%s Tool response will be processed by agent", AGENT_LOG_PREFIX)
181
+ return AgentDecision.TOOL
182
+ except Exception as ex:
183
+ logger.exception("%s Failed to determine tool conditional edge: %s", AGENT_LOG_PREFIX, ex)
184
+ logger.warning("%s Continuing to agent for processing", AGENT_LOG_PREFIX)
185
+ return AgentDecision.TOOL
186
+
187
+ async def _build_graph(self, state_schema: type) -> CompiledStateGraph:
188
+ try:
189
+ logger.debug("%s Building and compiling the Tool Calling Agent Graph", AGENT_LOG_PREFIX)
190
+
191
+ graph = StateGraph(state_schema)
192
+ graph.add_node("agent", self.agent_node)
193
+ graph.add_node("tool", self.tool_node)
194
+
195
+ if self.return_direct:
196
+ # go to end of graph if tool is set to return directly
197
+ tool_conditional_edge_possible_outputs = {AgentDecision.END: "__end__", AgentDecision.TOOL: "agent"}
198
+ graph.add_conditional_edges("tool", self.tool_conditional_edge, tool_conditional_edge_possible_outputs)
199
+ else:
200
+ # otherwise return to agent after tool call
201
+ graph.add_edge("tool", "agent")
202
+
203
+ conditional_edge_possible_outputs = {AgentDecision.TOOL: "tool", AgentDecision.END: "__end__"}
204
+ graph.add_conditional_edges("agent", self.conditional_edge, conditional_edge_possible_outputs)
205
+
206
+ graph.set_entry_point("agent")
207
+ self.graph = graph.compile()
208
+
209
+ return self.graph
210
+ except Exception as ex:
211
+ logger.error("%s Failed to build Tool Calling Agent Graph: %s", AGENT_LOG_PREFIX, ex)
212
+ raise
213
+
214
+ async def build_graph(self) -> CompiledStateGraph:
215
+ try:
216
+ await self._build_graph(state_schema=ToolCallAgentGraphState)
217
+ logger.debug("%s Tool Calling Agent Graph built and compiled successfully", AGENT_LOG_PREFIX)
156
218
  return self.graph
157
219
  except Exception as ex:
158
220
  logger.error("%s Failed to build Tool Calling Agent Graph: %s", AGENT_LOG_PREFIX, ex)