nvidia-nat 1.3.0a20250910__py3-none-any.whl → 1.4.0a20251112__py3-none-any.whl
This diff represents the content of publicly available package versions that have been released to one of the supported registries. The information contained in this diff is provided for informational purposes only and reflects changes between package versions as they appear in their respective public registries.
- nat/agent/base.py +13 -8
- nat/agent/prompt_optimizer/prompt.py +68 -0
- nat/agent/prompt_optimizer/register.py +149 -0
- nat/agent/react_agent/agent.py +6 -5
- nat/agent/react_agent/register.py +49 -39
- nat/agent/reasoning_agent/reasoning_agent.py +17 -15
- nat/agent/register.py +2 -0
- nat/agent/responses_api_agent/__init__.py +14 -0
- nat/agent/responses_api_agent/register.py +126 -0
- nat/agent/rewoo_agent/agent.py +304 -117
- nat/agent/rewoo_agent/prompt.py +19 -22
- nat/agent/rewoo_agent/register.py +51 -38
- nat/agent/tool_calling_agent/agent.py +75 -17
- nat/agent/tool_calling_agent/register.py +46 -23
- nat/authentication/api_key/api_key_auth_provider.py +6 -11
- nat/authentication/api_key/api_key_auth_provider_config.py +8 -5
- nat/authentication/credential_validator/__init__.py +14 -0
- nat/authentication/credential_validator/bearer_token_validator.py +557 -0
- nat/authentication/http_basic_auth/http_basic_auth_provider.py +1 -1
- nat/authentication/interfaces.py +5 -2
- nat/authentication/oauth2/oauth2_auth_code_flow_provider.py +69 -36
- nat/authentication/oauth2/oauth2_auth_code_flow_provider_config.py +2 -1
- nat/authentication/oauth2/oauth2_resource_server_config.py +125 -0
- nat/builder/builder.py +55 -23
- nat/builder/component_utils.py +9 -5
- nat/builder/context.py +54 -15
- nat/builder/eval_builder.py +14 -9
- nat/builder/framework_enum.py +1 -0
- nat/builder/front_end.py +1 -1
- nat/builder/function.py +370 -0
- nat/builder/function_info.py +1 -1
- nat/builder/intermediate_step_manager.py +38 -2
- nat/builder/workflow.py +5 -0
- nat/builder/workflow_builder.py +306 -54
- nat/cli/cli_utils/config_override.py +1 -1
- nat/cli/commands/info/info.py +16 -6
- nat/cli/commands/mcp/__init__.py +14 -0
- nat/cli/commands/mcp/mcp.py +986 -0
- nat/cli/commands/optimize.py +90 -0
- nat/cli/commands/start.py +1 -1
- nat/cli/commands/workflow/templates/config.yml.j2 +14 -13
- nat/cli/commands/workflow/templates/register.py.j2 +2 -2
- nat/cli/commands/workflow/templates/workflow.py.j2 +35 -21
- nat/cli/commands/workflow/workflow_commands.py +60 -18
- nat/cli/entrypoint.py +15 -11
- nat/cli/main.py +3 -0
- nat/cli/register_workflow.py +38 -4
- nat/cli/type_registry.py +72 -1
- nat/control_flow/__init__.py +0 -0
- nat/control_flow/register.py +20 -0
- nat/control_flow/router_agent/__init__.py +0 -0
- nat/control_flow/router_agent/agent.py +329 -0
- nat/control_flow/router_agent/prompt.py +48 -0
- nat/control_flow/router_agent/register.py +91 -0
- nat/control_flow/sequential_executor.py +166 -0
- nat/data_models/agent.py +34 -0
- nat/data_models/api_server.py +199 -69
- nat/data_models/authentication.py +23 -9
- nat/data_models/common.py +47 -0
- nat/data_models/component.py +2 -0
- nat/data_models/component_ref.py +11 -0
- nat/data_models/config.py +41 -17
- nat/data_models/dataset_handler.py +4 -3
- nat/data_models/function.py +34 -0
- nat/data_models/function_dependencies.py +8 -0
- nat/data_models/intermediate_step.py +9 -1
- nat/data_models/llm.py +15 -1
- nat/data_models/openai_mcp.py +46 -0
- nat/data_models/optimizable.py +208 -0
- nat/data_models/optimizer.py +161 -0
- nat/data_models/span.py +41 -3
- nat/data_models/thinking_mixin.py +2 -2
- nat/embedder/azure_openai_embedder.py +2 -1
- nat/embedder/nim_embedder.py +3 -2
- nat/embedder/openai_embedder.py +3 -2
- nat/eval/config.py +1 -1
- nat/eval/dataset_handler/dataset_downloader.py +3 -2
- nat/eval/dataset_handler/dataset_filter.py +34 -2
- nat/eval/evaluate.py +10 -3
- nat/eval/evaluator/base_evaluator.py +1 -1
- nat/eval/rag_evaluator/evaluate.py +7 -4
- nat/eval/register.py +4 -0
- nat/eval/runtime_evaluator/__init__.py +14 -0
- nat/eval/runtime_evaluator/evaluate.py +123 -0
- nat/eval/runtime_evaluator/register.py +100 -0
- nat/eval/swe_bench_evaluator/evaluate.py +1 -1
- nat/eval/trajectory_evaluator/register.py +1 -1
- nat/eval/tunable_rag_evaluator/evaluate.py +1 -1
- nat/eval/usage_stats.py +2 -0
- nat/eval/utils/output_uploader.py +3 -2
- nat/eval/utils/weave_eval.py +17 -3
- nat/experimental/decorators/experimental_warning_decorator.py +27 -7
- nat/experimental/test_time_compute/functions/execute_score_select_function.py +1 -1
- nat/experimental/test_time_compute/functions/plan_select_execute_function.py +7 -3
- nat/experimental/test_time_compute/functions/ttc_tool_orchestration_function.py +1 -1
- nat/experimental/test_time_compute/functions/ttc_tool_wrapper_function.py +3 -3
- nat/experimental/test_time_compute/models/strategy_base.py +2 -2
- nat/experimental/test_time_compute/selection/llm_based_output_merging_selector.py +1 -1
- nat/front_ends/console/authentication_flow_handler.py +82 -30
- nat/front_ends/console/console_front_end_plugin.py +19 -7
- nat/front_ends/fastapi/auth_flow_handlers/http_flow_handler.py +1 -1
- nat/front_ends/fastapi/auth_flow_handlers/websocket_flow_handler.py +52 -17
- nat/front_ends/fastapi/dask_client_mixin.py +65 -0
- nat/front_ends/fastapi/fastapi_front_end_config.py +25 -3
- nat/front_ends/fastapi/fastapi_front_end_plugin.py +140 -3
- nat/front_ends/fastapi/fastapi_front_end_plugin_worker.py +445 -265
- nat/front_ends/fastapi/job_store.py +518 -99
- nat/front_ends/fastapi/main.py +11 -19
- nat/front_ends/fastapi/message_handler.py +69 -44
- nat/front_ends/fastapi/message_validator.py +8 -7
- nat/front_ends/fastapi/utils.py +57 -0
- nat/front_ends/mcp/introspection_token_verifier.py +73 -0
- nat/front_ends/mcp/mcp_front_end_config.py +71 -3
- nat/front_ends/mcp/mcp_front_end_plugin.py +85 -21
- nat/front_ends/mcp/mcp_front_end_plugin_worker.py +248 -29
- nat/front_ends/mcp/memory_profiler.py +320 -0
- nat/front_ends/mcp/tool_converter.py +78 -25
- nat/front_ends/simple_base/simple_front_end_plugin_base.py +3 -1
- nat/llm/aws_bedrock_llm.py +21 -8
- nat/llm/azure_openai_llm.py +14 -5
- nat/llm/litellm_llm.py +80 -0
- nat/llm/nim_llm.py +23 -9
- nat/llm/openai_llm.py +19 -7
- nat/llm/register.py +4 -0
- nat/llm/utils/thinking.py +1 -1
- nat/observability/exporter/base_exporter.py +1 -1
- nat/observability/exporter/processing_exporter.py +29 -55
- nat/observability/exporter/span_exporter.py +43 -15
- nat/observability/exporter_manager.py +2 -2
- nat/observability/mixin/redaction_config_mixin.py +5 -4
- nat/observability/mixin/tagging_config_mixin.py +26 -14
- nat/observability/mixin/type_introspection_mixin.py +420 -107
- nat/observability/processor/batching_processor.py +1 -1
- nat/observability/processor/processor.py +3 -0
- nat/observability/processor/redaction/__init__.py +24 -0
- nat/observability/processor/redaction/contextual_redaction_processor.py +125 -0
- nat/observability/processor/redaction/contextual_span_redaction_processor.py +66 -0
- nat/observability/processor/redaction/redaction_processor.py +177 -0
- nat/observability/processor/redaction/span_header_redaction_processor.py +92 -0
- nat/observability/processor/span_tagging_processor.py +21 -14
- nat/observability/register.py +16 -0
- nat/profiler/callbacks/langchain_callback_handler.py +32 -7
- nat/profiler/callbacks/llama_index_callback_handler.py +36 -2
- nat/profiler/callbacks/token_usage_base_model.py +2 -0
- nat/profiler/decorators/framework_wrapper.py +61 -9
- nat/profiler/decorators/function_tracking.py +35 -3
- nat/profiler/forecasting/models/linear_model.py +1 -1
- nat/profiler/forecasting/models/random_forest_regressor.py +1 -1
- nat/profiler/inference_optimization/bottleneck_analysis/nested_stack_analysis.py +1 -1
- nat/profiler/inference_optimization/experimental/prefix_span_analysis.py +1 -1
- nat/profiler/parameter_optimization/__init__.py +0 -0
- nat/profiler/parameter_optimization/optimizable_utils.py +93 -0
- nat/profiler/parameter_optimization/optimizer_runtime.py +67 -0
- nat/profiler/parameter_optimization/parameter_optimizer.py +189 -0
- nat/profiler/parameter_optimization/parameter_selection.py +107 -0
- nat/profiler/parameter_optimization/pareto_visualizer.py +460 -0
- nat/profiler/parameter_optimization/prompt_optimizer.py +384 -0
- nat/profiler/parameter_optimization/update_helpers.py +66 -0
- nat/profiler/utils.py +3 -1
- nat/registry_handlers/pypi/register_pypi.py +5 -3
- nat/registry_handlers/rest/register_rest.py +5 -3
- nat/retriever/milvus/retriever.py +1 -1
- nat/retriever/nemo_retriever/register.py +2 -1
- nat/runtime/loader.py +1 -1
- nat/runtime/runner.py +111 -6
- nat/runtime/session.py +49 -3
- nat/settings/global_settings.py +2 -2
- nat/tool/chat_completion.py +4 -1
- nat/tool/code_execution/code_sandbox.py +3 -6
- nat/tool/code_execution/local_sandbox/Dockerfile.sandbox +19 -32
- nat/tool/code_execution/local_sandbox/local_sandbox_server.py +6 -1
- nat/tool/code_execution/local_sandbox/sandbox.requirements.txt +2 -0
- nat/tool/code_execution/local_sandbox/start_local_sandbox.sh +10 -4
- nat/tool/datetime_tools.py +1 -1
- nat/tool/github_tools.py +450 -0
- nat/tool/memory_tools/add_memory_tool.py +3 -3
- nat/tool/memory_tools/delete_memory_tool.py +3 -4
- nat/tool/memory_tools/get_memory_tool.py +4 -4
- nat/tool/register.py +2 -7
- nat/tool/server_tools.py +15 -2
- nat/utils/__init__.py +76 -0
- nat/utils/callable_utils.py +70 -0
- nat/utils/data_models/schema_validator.py +1 -1
- nat/utils/decorators.py +210 -0
- nat/utils/exception_handlers/automatic_retries.py +278 -72
- nat/utils/io/yaml_tools.py +73 -3
- nat/utils/log_levels.py +25 -0
- nat/utils/responses_api.py +26 -0
- nat/utils/string_utils.py +16 -0
- nat/utils/type_converter.py +12 -3
- nat/utils/type_utils.py +6 -2
- nvidia_nat-1.4.0a20251112.dist-info/METADATA +197 -0
- {nvidia_nat-1.3.0a20250910.dist-info → nvidia_nat-1.4.0a20251112.dist-info}/RECORD +199 -165
- {nvidia_nat-1.3.0a20250910.dist-info → nvidia_nat-1.4.0a20251112.dist-info}/entry_points.txt +1 -0
- nat/cli/commands/info/list_mcp.py +0 -461
- nat/data_models/temperature_mixin.py +0 -43
- nat/data_models/top_p_mixin.py +0 -43
- nat/observability/processor/header_redaction_processor.py +0 -123
- nat/observability/processor/redaction_processor.py +0 -77
- nat/tool/code_execution/test_code_execution_sandbox.py +0 -414
- nat/tool/github_tools/create_github_commit.py +0 -133
- nat/tool/github_tools/create_github_issue.py +0 -87
- nat/tool/github_tools/create_github_pr.py +0 -106
- nat/tool/github_tools/get_github_file.py +0 -106
- nat/tool/github_tools/get_github_issue.py +0 -166
- nat/tool/github_tools/get_github_pr.py +0 -256
- nat/tool/github_tools/update_github_issue.py +0 -100
- nvidia_nat-1.3.0a20250910.dist-info/METADATA +0 -373
- /nat/{tool/github_tools → agent/prompt_optimizer}/__init__.py +0 -0
- {nvidia_nat-1.3.0a20250910.dist-info → nvidia_nat-1.4.0a20251112.dist-info}/WHEEL +0 -0
- {nvidia_nat-1.3.0a20250910.dist-info → nvidia_nat-1.4.0a20251112.dist-info}/licenses/LICENSE-3rd-party.txt +0 -0
- {nvidia_nat-1.3.0a20250910.dist-info → nvidia_nat-1.4.0a20251112.dist-info}/licenses/LICENSE.md +0 -0
- {nvidia_nat-1.3.0a20250910.dist-info → nvidia_nat-1.4.0a20251112.dist-info}/top_level.txt +0 -0
nat/agent/base.py
CHANGED
|
@@ -27,6 +27,7 @@ from langchain_core.language_models import BaseChatModel
|
|
|
27
27
|
from langchain_core.messages import AIMessage
|
|
28
28
|
from langchain_core.messages import BaseMessage
|
|
29
29
|
from langchain_core.messages import ToolMessage
|
|
30
|
+
from langchain_core.runnables import Runnable
|
|
30
31
|
from langchain_core.runnables import RunnableConfig
|
|
31
32
|
from langchain_core.tools import BaseTool
|
|
32
33
|
from langgraph.graph.state import CompiledStateGraph
|
|
@@ -101,27 +102,31 @@ class BaseAgent(ABC):
|
|
|
101
102
|
AIMessage
|
|
102
103
|
The LLM response
|
|
103
104
|
"""
|
|
104
|
-
output_message =
|
|
105
|
+
output_message = []
|
|
105
106
|
async for event in runnable.astream(inputs, config=config):
|
|
106
|
-
output_message
|
|
107
|
+
output_message.append(event.content)
|
|
107
108
|
|
|
108
|
-
return AIMessage(content=output_message)
|
|
109
|
+
return AIMessage(content="".join(output_message))
|
|
109
110
|
|
|
110
|
-
async def _call_llm(self,
|
|
111
|
+
async def _call_llm(self, llm: Runnable, inputs: dict[str, Any], config: RunnableConfig | None = None) -> AIMessage:
|
|
111
112
|
"""
|
|
112
113
|
Call the LLM directly. Retry logic is handled automatically by the underlying LLM client.
|
|
113
114
|
|
|
114
115
|
Parameters
|
|
115
116
|
----------
|
|
116
|
-
|
|
117
|
-
The
|
|
117
|
+
llm : Runnable
|
|
118
|
+
The LLM runnable (prompt | llm or similar)
|
|
119
|
+
inputs : dict[str, Any]
|
|
120
|
+
The inputs to pass to the runnable
|
|
121
|
+
config : RunnableConfig | None
|
|
122
|
+
The config to pass to the runnable (should include callbacks)
|
|
118
123
|
|
|
119
124
|
Returns
|
|
120
125
|
-------
|
|
121
126
|
AIMessage
|
|
122
127
|
The LLM response
|
|
123
128
|
"""
|
|
124
|
-
response = await
|
|
129
|
+
response = await llm.ainvoke(inputs, config=config)
|
|
125
130
|
return AIMessage(content=str(response.content))
|
|
126
131
|
|
|
127
132
|
async def _call_tool(self,
|
|
@@ -187,7 +192,7 @@ class BaseAgent(ABC):
|
|
|
187
192
|
await asyncio.sleep(sleep_time)
|
|
188
193
|
|
|
189
194
|
# All retries exhausted, return error message
|
|
190
|
-
error_content = "Tool call failed after all retry attempts. Last error:
|
|
195
|
+
error_content = f"Tool call failed after all retry attempts. Last error: {str(last_exception)}"
|
|
191
196
|
logger.error("%s %s", AGENT_LOG_PREFIX, error_content, exc_info=True)
|
|
192
197
|
return ToolMessage(name=tool.name, tool_call_id=tool.name, content=error_content, status="error")
|
|
193
198
|
|
|
@@ -0,0 +1,68 @@
|
|
|
1
|
+
# SPDX-FileCopyrightText: Copyright (c) 2024-2025, NVIDIA CORPORATION & AFFILIATES. All rights reserved.
|
|
2
|
+
# SPDX-License-Identifier: Apache-2.0
|
|
3
|
+
#
|
|
4
|
+
# Licensed under the Apache License, Version 2.0 (the "License");
|
|
5
|
+
# you may not use this file except in compliance with the License.
|
|
6
|
+
# You may obtain a copy of the License at
|
|
7
|
+
#
|
|
8
|
+
# http://www.apache.org/licenses/LICENSE-2.0
|
|
9
|
+
#
|
|
10
|
+
# Unless required by applicable law or agreed to in writing, software
|
|
11
|
+
# distributed under the License is distributed on an "AS IS" BASIS,
|
|
12
|
+
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
|
|
13
|
+
# See the License for the specific language governing permissions and
|
|
14
|
+
# limitations under the License.
|
|
15
|
+
# flake8: noqa W291
|
|
16
|
+
|
|
17
|
+
mutator_prompt = """
|
|
18
|
+
|
|
19
|
+
## CORE DIRECTIVES
|
|
20
|
+
- **Preserve the original objective and task.** Do not change what the prompt is meant to accomplish.
|
|
21
|
+
- **Keep the intent intact.** The improved prompt must solve the same problem as the original.
|
|
22
|
+
- **Do not invent new goals.** Only improve clarity, structure, constraints, and usability.
|
|
23
|
+
- **Do not drop critical instructions.** Everything essential from the original prompt must remain.
|
|
24
|
+
- **Return only the mutated prompt text.** No rationale, no diffs, no explanations.
|
|
25
|
+
- **Be Creative within bounds.** You may rephrase, reorganize, and enhance, but not alter meaning.
|
|
26
|
+
- **DO NOT use curly braces in your prompt** for anything other than existing variables in the prompt as the string
|
|
27
|
+
will be treated as an f-string.
|
|
28
|
+
- **Examples are a good idea** if the original prompt lacks them. They help clarify expected output.
|
|
29
|
+
|
|
30
|
+
---
|
|
31
|
+
|
|
32
|
+
## IMPROVEMENT HINTS
|
|
33
|
+
When modifying, apply these principles:
|
|
34
|
+
1. **Clarity & Precision** – remove vague language, strengthen directives.
|
|
35
|
+
2. **Structure & Flow** – order sections as: *Objective → Constraints → Tools → Steps → Output Schema → Examples*.
|
|
36
|
+
3. **Schema Adherence** – enforce a single canonical output schema (JSON/XML) with `schema_version`.
|
|
37
|
+
4. **Tool Governance** – clarify when/how tools are used, their inputs/outputs, and fallback behavior.
|
|
38
|
+
5. **Error Handling** – specify behavior if tools fail or inputs are insufficient.
|
|
39
|
+
6. **Budget Awareness** – minimize verbosity, respect token/latency limits.
|
|
40
|
+
7. **Safety** – include refusals for unsafe requests, enforce compliance with rules.
|
|
41
|
+
8. **Consistency** – avoid format drift; always maintain the same schema.
|
|
42
|
+
9. **Integrity** – confirm the task, objective, and intent are preserved.
|
|
43
|
+
|
|
44
|
+
---
|
|
45
|
+
|
|
46
|
+
## MUTATION OPERATORS
|
|
47
|
+
You may:
|
|
48
|
+
- **Tighten** (remove fluff, redundancies)
|
|
49
|
+
- **Reorder** (improve logical flow)
|
|
50
|
+
- **Constrain** (add explicit rules/limits)
|
|
51
|
+
- **Harden** (improve error handling/fallbacks)
|
|
52
|
+
- **Defuse** (replace ambiguous verbs with measurable actions)
|
|
53
|
+
- **Format-lock** (wrap outputs in JSON/XML fenced blocks)
|
|
54
|
+
- **Example-ify** (add examples if missing or weak)
|
|
55
|
+
|
|
56
|
+
---
|
|
57
|
+
|
|
58
|
+
## INPUT
|
|
59
|
+
Here is the prompt to mutate:
|
|
60
|
+
{original_prompt}
|
|
61
|
+
|
|
62
|
+
## OBJECTIVE
|
|
63
|
+
The prompt must acheive the following objective:
|
|
64
|
+
{objective}
|
|
65
|
+
|
|
66
|
+
The modified prompt is: \n
|
|
67
|
+
|
|
68
|
+
"""
|
|
@@ -0,0 +1,149 @@
|
|
|
1
|
+
# SPDX-FileCopyrightText: Copyright (c) 2021-2025, NVIDIA CORPORATION & AFFILIATES. All rights reserved.
|
|
2
|
+
# SPDX-License-Identifier: Apache-2.0
|
|
3
|
+
#
|
|
4
|
+
# Licensed under the Apache License, Version 2.0 (the "License");
|
|
5
|
+
# you may not use this file except in compliance with the License.
|
|
6
|
+
# You may obtain a copy of the License at
|
|
7
|
+
#
|
|
8
|
+
# http://www.apache.org/licenses/LICENSE-2.0
|
|
9
|
+
#
|
|
10
|
+
# Unless required by applicable law or agreed to in writing, software
|
|
11
|
+
# distributed under the License is distributed on an "AS IS" BASIS,
|
|
12
|
+
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
|
|
13
|
+
# See the License for the specific language governing permissions and
|
|
14
|
+
# limitations under the License.
|
|
15
|
+
|
|
16
|
+
from pydantic import Field
|
|
17
|
+
|
|
18
|
+
from nat.builder.builder import Builder
|
|
19
|
+
from nat.builder.framework_enum import LLMFrameworkEnum
|
|
20
|
+
from nat.builder.function_info import FunctionInfo
|
|
21
|
+
from nat.cli.register_workflow import register_function
|
|
22
|
+
from nat.data_models.component_ref import LLMRef
|
|
23
|
+
from nat.data_models.function import FunctionBaseConfig
|
|
24
|
+
from nat.profiler.parameter_optimization.prompt_optimizer import PromptOptimizerInputSchema
|
|
25
|
+
|
|
26
|
+
|
|
27
|
+
class PromptOptimizerConfig(FunctionBaseConfig, name="prompt_init"):
|
|
28
|
+
|
|
29
|
+
optimizer_llm: LLMRef = Field(description="LLM to use for prompt optimization")
|
|
30
|
+
optimizer_prompt: str = Field(
|
|
31
|
+
description="Prompt template for the optimizer",
|
|
32
|
+
default=(
|
|
33
|
+
"You are an expert at optimizing prompts for LLMs. "
|
|
34
|
+
"Your task is to take a given prompt and suggest an optimized version of it. "
|
|
35
|
+
"Note that the prompt might be a template with variables and curly braces. Remember to always keep the "
|
|
36
|
+
"variables and curly braces in the prompt the same. Only modify the instructions in the prompt that are"
|
|
37
|
+
"not variables. The system is meant to achieve the following objective\n"
|
|
38
|
+
"{system_objective}\n Of which, the prompt is one part. The details of the prompt and context as below.\n"))
|
|
39
|
+
system_objective: str = Field(description="Objective of the workflow")
|
|
40
|
+
|
|
41
|
+
|
|
42
|
+
@register_function(config_type=PromptOptimizerConfig, framework_wrappers=[LLMFrameworkEnum.LANGCHAIN])
|
|
43
|
+
async def prompt_optimizer_function(config: PromptOptimizerConfig, builder: Builder):
|
|
44
|
+
"""
|
|
45
|
+
Function to optimize prompts for LLMs.
|
|
46
|
+
"""
|
|
47
|
+
|
|
48
|
+
try:
|
|
49
|
+
from langchain_core.prompts import PromptTemplate
|
|
50
|
+
|
|
51
|
+
from .prompt import mutator_prompt
|
|
52
|
+
except ImportError as exc:
|
|
53
|
+
raise ImportError("langchain-core is not installed. Please install it to use MultiLLMPlanner.\n"
|
|
54
|
+
"This error can be resolve by installing \"nvidia-nat[langchain]\".") from exc
|
|
55
|
+
|
|
56
|
+
llm = await builder.get_llm(config.optimizer_llm, wrapper_type=LLMFrameworkEnum.LANGCHAIN)
|
|
57
|
+
|
|
58
|
+
template = PromptTemplate(template=config.optimizer_prompt,
|
|
59
|
+
input_variables=["system_objective"],
|
|
60
|
+
validate_template=True)
|
|
61
|
+
|
|
62
|
+
base_prompt: str = (await template.ainvoke(input={"system_objective": config.system_objective})).to_string()
|
|
63
|
+
prompt_extension_template = PromptTemplate(template=mutator_prompt,
|
|
64
|
+
input_variables=["original_prompt", "objective"],
|
|
65
|
+
validate_template=True)
|
|
66
|
+
|
|
67
|
+
async def _inner(input_message: PromptOptimizerInputSchema) -> str:
|
|
68
|
+
"""
|
|
69
|
+
Optimize the prompt using the provided LLM.
|
|
70
|
+
"""
|
|
71
|
+
|
|
72
|
+
original_prompt = input_message.original_prompt
|
|
73
|
+
prompt_objective = input_message.objective
|
|
74
|
+
|
|
75
|
+
prompt_extension = (await prompt_extension_template.ainvoke(input={
|
|
76
|
+
"original_prompt": original_prompt,
|
|
77
|
+
"objective": prompt_objective,
|
|
78
|
+
})).to_string()
|
|
79
|
+
|
|
80
|
+
prompt = f"{base_prompt}\n\n{prompt_extension}"
|
|
81
|
+
|
|
82
|
+
optimized_prompt = await llm.ainvoke(prompt)
|
|
83
|
+
return optimized_prompt.content
|
|
84
|
+
|
|
85
|
+
yield FunctionInfo.from_fn(
|
|
86
|
+
fn=_inner,
|
|
87
|
+
description="Optimize prompts for LLMs using a feedback LLM.",
|
|
88
|
+
)
|
|
89
|
+
|
|
90
|
+
|
|
91
|
+
class PromptRecombinerConfig(FunctionBaseConfig, name="prompt_recombiner"):
|
|
92
|
+
|
|
93
|
+
optimizer_llm: LLMRef = Field(description="LLM to use for prompt recombination")
|
|
94
|
+
optimizer_prompt: str = Field(
|
|
95
|
+
description="Prompt template for the recombiner",
|
|
96
|
+
default=("You are an expert at combining prompt instructions for LLMs. "
|
|
97
|
+
"Your task is to merge two prompts for the same objective into a single, stronger prompt. "
|
|
98
|
+
"Do not introduce new variables or modify existing placeholders."),
|
|
99
|
+
)
|
|
100
|
+
system_objective: str = Field(description="Objective of the workflow")
|
|
101
|
+
|
|
102
|
+
|
|
103
|
+
@register_function(config_type=PromptRecombinerConfig, framework_wrappers=[LLMFrameworkEnum.LANGCHAIN])
|
|
104
|
+
async def prompt_recombiner_function(config: PromptRecombinerConfig, builder: Builder):
|
|
105
|
+
"""
|
|
106
|
+
Function to recombine two parent prompts into a child prompt using the optimizer LLM.
|
|
107
|
+
Uses the same base template and objective instructions.
|
|
108
|
+
"""
|
|
109
|
+
|
|
110
|
+
try:
|
|
111
|
+
from langchain_core.prompts import PromptTemplate
|
|
112
|
+
except ImportError as exc:
|
|
113
|
+
raise ImportError("langchain-core is not installed. Please install it to use MultiLLMPlanner.\n"
|
|
114
|
+
"This error can be resolve by installing \"nvidia-nat[langchain]\".") from exc
|
|
115
|
+
|
|
116
|
+
llm = await builder.get_llm(config.optimizer_llm, wrapper_type=LLMFrameworkEnum.LANGCHAIN)
|
|
117
|
+
|
|
118
|
+
template = PromptTemplate(template=config.optimizer_prompt,
|
|
119
|
+
input_variables=["system_objective"],
|
|
120
|
+
validate_template=True)
|
|
121
|
+
|
|
122
|
+
base_prompt: str = (await template.ainvoke(input={"system_objective": config.system_objective})).to_string()
|
|
123
|
+
|
|
124
|
+
class RecombineSchema(PromptOptimizerInputSchema):
|
|
125
|
+
parent_b: str | None = None
|
|
126
|
+
|
|
127
|
+
async def _inner(input_message: RecombineSchema) -> str:
|
|
128
|
+
parent_a = input_message.original_prompt
|
|
129
|
+
parent_b = input_message.parent_b or ""
|
|
130
|
+
prompt_objective = input_message.objective
|
|
131
|
+
|
|
132
|
+
prompt = (
|
|
133
|
+
f"{base_prompt}\n\n"
|
|
134
|
+
"We are performing genetic recombination between two prompts that satisfy the same objective.\n"
|
|
135
|
+
f"Objective: {prompt_objective}\n\n"
|
|
136
|
+
f"Parent A:\n{parent_a}\n\n"
|
|
137
|
+
f"Parent B:\n{parent_b}\n\n"
|
|
138
|
+
"Combine the strongest instructions and phrasing from both parents to produce a single, coherent child "
|
|
139
|
+
"prompt.\n"
|
|
140
|
+
"Maintain variables and placeholders unchanged.\n"
|
|
141
|
+
"Return only the child prompt text, with no additional commentary.")
|
|
142
|
+
|
|
143
|
+
child_prompt = await llm.ainvoke(prompt)
|
|
144
|
+
return child_prompt.content
|
|
145
|
+
|
|
146
|
+
yield FunctionInfo.from_fn(
|
|
147
|
+
fn=_inner,
|
|
148
|
+
description="Recombine two prompts into a stronger child prompt.",
|
|
149
|
+
)
|
nat/agent/react_agent/agent.py
CHANGED
|
@@ -59,6 +59,7 @@ class ReActGraphState(BaseModel):
|
|
|
59
59
|
messages: list[BaseMessage] = Field(default_factory=list) # input and output of the ReAct Agent
|
|
60
60
|
agent_scratchpad: list[AgentAction] = Field(default_factory=list) # agent thoughts / intermediate steps
|
|
61
61
|
tool_responses: list[BaseMessage] = Field(default_factory=list) # the responses from any tool calls
|
|
62
|
+
final_answer: str | None = Field(default=None) # the final answer from the ReAct Agent
|
|
62
63
|
|
|
63
64
|
|
|
64
65
|
class ReActAgentGraph(DualNodeAgent):
|
|
@@ -204,6 +205,7 @@ class ReActAgentGraph(DualNodeAgent):
|
|
|
204
205
|
# this is where we handle the final output of the Agent, we can clean-up/format/postprocess here
|
|
205
206
|
# the final answer goes in the "messages" state channel
|
|
206
207
|
state.messages += [AIMessage(content=final_answer)]
|
|
208
|
+
state.final_answer = final_answer
|
|
207
209
|
else:
|
|
208
210
|
# the agent wants to call a tool, ensure the thoughts are preserved for the next agentic cycle
|
|
209
211
|
agent_output.log = output_message.content
|
|
@@ -242,10 +244,9 @@ class ReActAgentGraph(DualNodeAgent):
|
|
|
242
244
|
async def conditional_edge(self, state: ReActGraphState):
|
|
243
245
|
try:
|
|
244
246
|
logger.debug("%s Starting the ReAct Conditional Edge", AGENT_LOG_PREFIX)
|
|
245
|
-
if
|
|
246
|
-
# the ReAct Agent has finished executing
|
|
247
|
-
|
|
248
|
-
logger.debug("%s Final answer:\n%s", AGENT_LOG_PREFIX, last_message_content)
|
|
247
|
+
if state.final_answer:
|
|
248
|
+
# the ReAct Agent has finished executing
|
|
249
|
+
logger.debug("%s Final answer:\n%s", AGENT_LOG_PREFIX, state.final_answer)
|
|
249
250
|
return AgentDecision.END
|
|
250
251
|
# else the agent wants to call a tool
|
|
251
252
|
agent_output = state.agent_scratchpad[-1]
|
|
@@ -360,7 +361,7 @@ class ReActAgentGraph(DualNodeAgent):
|
|
|
360
361
|
if errors:
|
|
361
362
|
error_text = "\n".join(errors)
|
|
362
363
|
logger.error("%s %s", AGENT_LOG_PREFIX, error_text)
|
|
363
|
-
|
|
364
|
+
return False
|
|
364
365
|
return True
|
|
365
366
|
|
|
366
367
|
|
|
@@ -17,32 +17,34 @@ import logging
|
|
|
17
17
|
|
|
18
18
|
from pydantic import AliasChoices
|
|
19
19
|
from pydantic import Field
|
|
20
|
-
from pydantic import PositiveInt
|
|
21
20
|
|
|
22
21
|
from nat.builder.builder import Builder
|
|
23
22
|
from nat.builder.framework_enum import LLMFrameworkEnum
|
|
24
23
|
from nat.builder.function_info import FunctionInfo
|
|
25
24
|
from nat.cli.register_workflow import register_function
|
|
25
|
+
from nat.data_models.agent import AgentBaseConfig
|
|
26
26
|
from nat.data_models.api_server import ChatRequest
|
|
27
|
+
from nat.data_models.api_server import ChatRequestOrMessage
|
|
27
28
|
from nat.data_models.api_server import ChatResponse
|
|
29
|
+
from nat.data_models.api_server import Usage
|
|
30
|
+
from nat.data_models.component_ref import FunctionGroupRef
|
|
28
31
|
from nat.data_models.component_ref import FunctionRef
|
|
29
|
-
from nat.data_models.
|
|
30
|
-
from nat.data_models.
|
|
32
|
+
from nat.data_models.optimizable import OptimizableField
|
|
33
|
+
from nat.data_models.optimizable import OptimizableMixin
|
|
34
|
+
from nat.data_models.optimizable import SearchSpace
|
|
31
35
|
from nat.utils.type_converter import GlobalTypeConverter
|
|
32
36
|
|
|
33
37
|
logger = logging.getLogger(__name__)
|
|
34
38
|
|
|
35
39
|
|
|
36
|
-
class ReActAgentWorkflowConfig(
|
|
40
|
+
class ReActAgentWorkflowConfig(AgentBaseConfig, OptimizableMixin, name="react_agent"):
|
|
37
41
|
"""
|
|
38
42
|
Defines a NAT function that uses a ReAct Agent performs reasoning inbetween tool calls, and utilizes the
|
|
39
43
|
tool names and descriptions to select the optimal tool.
|
|
40
44
|
"""
|
|
41
|
-
|
|
42
|
-
tool_names: list[FunctionRef] = Field(
|
|
43
|
-
|
|
44
|
-
llm_name: LLMRef = Field(description="The LLM model to use with the react agent.")
|
|
45
|
-
verbose: bool = Field(default=False, description="Set the verbosity of the react agent's logging.")
|
|
45
|
+
description: str = Field(default="ReAct Agent Workflow", description="The description of this functions use.")
|
|
46
|
+
tool_names: list[FunctionRef | FunctionGroupRef] = Field(
|
|
47
|
+
default_factory=list, description="The list of tools to provide to the react agent.")
|
|
46
48
|
retry_agent_response_parsing_errors: bool = Field(
|
|
47
49
|
default=True,
|
|
48
50
|
validation_alias=AliasChoices("retry_agent_response_parsing_errors", "retry_parsing_errors"),
|
|
@@ -61,7 +63,6 @@ class ReActAgentWorkflowConfig(FunctionBaseConfig, name="react_agent"):
|
|
|
61
63
|
description="Whether to pass tool call errors to agent. If False, failed tool calls will raise an exception.")
|
|
62
64
|
include_tool_input_schema_in_tool_description: bool = Field(
|
|
63
65
|
default=True, description="Specify inclusion of tool input schemas in the prompt.")
|
|
64
|
-
description: str = Field(default="ReAct Agent Workflow", description="The description of this functions use.")
|
|
65
66
|
normalize_tool_input_quotes: bool = Field(
|
|
66
67
|
default=True,
|
|
67
68
|
description="Whether to replace single quotes with double quotes in the tool input. "
|
|
@@ -70,13 +71,14 @@ class ReActAgentWorkflowConfig(FunctionBaseConfig, name="react_agent"):
|
|
|
70
71
|
default=None,
|
|
71
72
|
description="Provides the SYSTEM_PROMPT to use with the agent") # defaults to SYSTEM_PROMPT in prompt.py
|
|
72
73
|
max_history: int = Field(default=15, description="Maximum number of messages to keep in the conversation history.")
|
|
73
|
-
|
|
74
|
-
default=
|
|
75
|
-
|
|
76
|
-
|
|
77
|
-
|
|
78
|
-
|
|
79
|
-
|
|
74
|
+
additional_instructions: str | None = OptimizableField(
|
|
75
|
+
default=None,
|
|
76
|
+
description="Additional instructions to provide to the agent in addition to the base prompt.",
|
|
77
|
+
space=SearchSpace(
|
|
78
|
+
is_prompt=True,
|
|
79
|
+
prompt="No additional instructions.",
|
|
80
|
+
prompt_purpose="Additional instructions to provide to the agent in addition to the base prompt.",
|
|
81
|
+
))
|
|
80
82
|
|
|
81
83
|
|
|
82
84
|
@register_function(config_type=ReActAgentWorkflowConfig, framework_wrappers=[LLMFrameworkEnum.LANGCHAIN])
|
|
@@ -96,7 +98,7 @@ async def react_agent_workflow(config: ReActAgentWorkflowConfig, builder: Builde
|
|
|
96
98
|
llm = await builder.get_llm(config.llm_name, wrapper_type=LLMFrameworkEnum.LANGCHAIN)
|
|
97
99
|
# the agent can run any installed tool, simply install the tool and add it to the config file
|
|
98
100
|
# the sample tool provided can easily be copied or changed
|
|
99
|
-
tools = builder.get_tools(tool_names=config.tool_names, wrapper_type=LLMFrameworkEnum.LANGCHAIN)
|
|
101
|
+
tools = await builder.get_tools(tool_names=config.tool_names, wrapper_type=LLMFrameworkEnum.LANGCHAIN)
|
|
100
102
|
if not tools:
|
|
101
103
|
raise ValueError(f"No tools specified for ReAct Agent '{config.llm_name}'")
|
|
102
104
|
# configure callbacks, for sending intermediate steps
|
|
@@ -114,10 +116,23 @@ async def react_agent_workflow(config: ReActAgentWorkflowConfig, builder: Builde
|
|
|
114
116
|
pass_tool_call_errors_to_agent=config.pass_tool_call_errors_to_agent,
|
|
115
117
|
normalize_tool_input_quotes=config.normalize_tool_input_quotes).build_graph()
|
|
116
118
|
|
|
117
|
-
async def _response_fn(
|
|
119
|
+
async def _response_fn(chat_request_or_message: ChatRequestOrMessage) -> ChatResponse | str:
|
|
120
|
+
"""
|
|
121
|
+
Main workflow entry function for the ReAct Agent.
|
|
122
|
+
|
|
123
|
+
This function invokes the ReAct Agent Graph and returns the response.
|
|
124
|
+
|
|
125
|
+
Args:
|
|
126
|
+
chat_request_or_message (ChatRequestOrMessage): The input message to process
|
|
127
|
+
|
|
128
|
+
Returns:
|
|
129
|
+
ChatResponse | str: The response from the agent or error message
|
|
130
|
+
"""
|
|
118
131
|
try:
|
|
132
|
+
message = GlobalTypeConverter.get().convert(chat_request_or_message, to_type=ChatRequest)
|
|
133
|
+
|
|
119
134
|
# initialize the starting state with the user query
|
|
120
|
-
messages: list[BaseMessage] = trim_messages(messages=[m.model_dump() for m in
|
|
135
|
+
messages: list[BaseMessage] = trim_messages(messages=[m.model_dump() for m in message.messages],
|
|
121
136
|
max_tokens=config.max_history,
|
|
122
137
|
strategy="last",
|
|
123
138
|
token_counter=len,
|
|
@@ -135,24 +150,19 @@ async def react_agent_workflow(config: ReActAgentWorkflowConfig, builder: Builde
|
|
|
135
150
|
# get and return the output from the state
|
|
136
151
|
state = ReActGraphState(**state)
|
|
137
152
|
output_message = state.messages[-1]
|
|
138
|
-
|
|
139
|
-
|
|
153
|
+
content = str(output_message.content)
|
|
154
|
+
|
|
155
|
+
# Create usage statistics for the response
|
|
156
|
+
prompt_tokens = sum(len(str(msg.content).split()) for msg in message.messages)
|
|
157
|
+
completion_tokens = len(content.split()) if content else 0
|
|
158
|
+
total_tokens = prompt_tokens + completion_tokens
|
|
159
|
+
usage = Usage(prompt_tokens=prompt_tokens, completion_tokens=completion_tokens, total_tokens=total_tokens)
|
|
160
|
+
response = ChatResponse.from_string(content, usage=usage)
|
|
161
|
+
if chat_request_or_message.is_string:
|
|
162
|
+
return GlobalTypeConverter.get().convert(response, to_type=str)
|
|
163
|
+
return response
|
|
140
164
|
except Exception as ex:
|
|
141
|
-
logger.
|
|
142
|
-
|
|
143
|
-
if config.verbose:
|
|
144
|
-
return ChatResponse.from_string(str(ex))
|
|
145
|
-
return ChatResponse.from_string("I seem to be having a problem.")
|
|
146
|
-
|
|
147
|
-
if (config.use_openai_api):
|
|
148
|
-
yield FunctionInfo.from_fn(_response_fn, description=config.description)
|
|
149
|
-
else:
|
|
150
|
-
|
|
151
|
-
async def _str_api_fn(input_message: str) -> str:
|
|
152
|
-
oai_input = GlobalTypeConverter.get().try_convert(input_message, to_type=ChatRequest)
|
|
153
|
-
|
|
154
|
-
oai_output = await _response_fn(oai_input)
|
|
155
|
-
|
|
156
|
-
return GlobalTypeConverter.get().try_convert(oai_output, to_type=str)
|
|
165
|
+
logger.error("%s ReAct Agent failed with exception: %s", AGENT_LOG_PREFIX, str(ex))
|
|
166
|
+
raise
|
|
157
167
|
|
|
158
|
-
|
|
168
|
+
yield FunctionInfo.from_fn(_response_fn, description=config.description)
|
|
@@ -23,25 +23,22 @@ from nat.builder.builder import Builder
|
|
|
23
23
|
from nat.builder.framework_enum import LLMFrameworkEnum
|
|
24
24
|
from nat.builder.function_info import FunctionInfo
|
|
25
25
|
from nat.cli.register_workflow import register_function
|
|
26
|
+
from nat.data_models.agent import AgentBaseConfig
|
|
26
27
|
from nat.data_models.api_server import ChatRequest
|
|
27
28
|
from nat.data_models.component_ref import FunctionRef
|
|
28
|
-
from nat.data_models.component_ref import LLMRef
|
|
29
|
-
from nat.data_models.function import FunctionBaseConfig
|
|
30
29
|
|
|
31
30
|
logger = logging.getLogger(__name__)
|
|
32
31
|
|
|
33
32
|
|
|
34
|
-
class ReasoningFunctionConfig(
|
|
33
|
+
class ReasoningFunctionConfig(AgentBaseConfig, name="reasoning_agent"):
|
|
35
34
|
"""
|
|
36
35
|
Defines a NAT function that performs reasoning on the input data.
|
|
37
36
|
Output is passed to the next function in the workflow.
|
|
38
37
|
|
|
39
38
|
Designed to be used with an InterceptingFunction.
|
|
40
39
|
"""
|
|
41
|
-
|
|
42
|
-
llm_name: LLMRef = Field(description="The name of the LLM to use for reasoning.")
|
|
40
|
+
description: str = Field(default="Reasoning Agent", description="The description of this function's use.")
|
|
43
41
|
augmented_fn: FunctionRef = Field(description="The name of the function to reason on.")
|
|
44
|
-
verbose: bool = Field(default=False, description="Whether to log detailed information.")
|
|
45
42
|
reasoning_prompt_template: str = Field(
|
|
46
43
|
default=("You are an expert reasoning model task with creating a detailed execution plan"
|
|
47
44
|
" for a system that has the following description:\n\n"
|
|
@@ -102,7 +99,7 @@ async def build_reasoning_function(config: ReasoningFunctionConfig, builder: Bui
|
|
|
102
99
|
llm: BaseChatModel = await builder.get_llm(config.llm_name, wrapper_type=LLMFrameworkEnum.LANGCHAIN)
|
|
103
100
|
|
|
104
101
|
# Get the augmented function's description
|
|
105
|
-
augmented_function = builder.get_function(config.augmented_fn)
|
|
102
|
+
augmented_function = await builder.get_function(config.augmented_fn)
|
|
106
103
|
|
|
107
104
|
# For now, we rely on runtime checking for type conversion
|
|
108
105
|
|
|
@@ -113,11 +110,16 @@ async def build_reasoning_function(config: ReasoningFunctionConfig, builder: Bui
|
|
|
113
110
|
f"function without a description.")
|
|
114
111
|
|
|
115
112
|
# Get the function dependencies of the augmented function
|
|
116
|
-
|
|
113
|
+
function_dependencies = builder.get_function_dependencies(config.augmented_fn)
|
|
114
|
+
function_used_tools = set()
|
|
115
|
+
function_used_tools.update(function_dependencies.functions)
|
|
116
|
+
for function_group in function_dependencies.function_groups:
|
|
117
|
+
function_used_tools.update(builder.get_function_group_dependencies(function_group).functions)
|
|
118
|
+
|
|
117
119
|
tool_names_with_desc: list[tuple[str, str]] = []
|
|
118
120
|
|
|
119
121
|
for tool in function_used_tools:
|
|
120
|
-
tool_impl = builder.get_function(tool)
|
|
122
|
+
tool_impl = await builder.get_function(tool)
|
|
121
123
|
tool_names_with_desc.append((tool, tool_impl.description if hasattr(tool_impl, "description") else ""))
|
|
122
124
|
|
|
123
125
|
# Draft the reasoning prompt for the augmented function
|
|
@@ -155,12 +157,12 @@ async def build_reasoning_function(config: ReasoningFunctionConfig, builder: Bui
|
|
|
155
157
|
prompt = prompt.to_string()
|
|
156
158
|
|
|
157
159
|
# Get the reasoning output from the LLM
|
|
158
|
-
reasoning_output =
|
|
160
|
+
reasoning_output = []
|
|
159
161
|
|
|
160
162
|
async for chunk in llm.astream(prompt):
|
|
161
|
-
reasoning_output
|
|
163
|
+
reasoning_output.append(chunk.content)
|
|
162
164
|
|
|
163
|
-
reasoning_output = remove_r1_think_tags(reasoning_output)
|
|
165
|
+
reasoning_output = remove_r1_think_tags("".join(reasoning_output))
|
|
164
166
|
|
|
165
167
|
output = await downstream_template.ainvoke(input={
|
|
166
168
|
"input_text": input_text, "reasoning_output": reasoning_output
|
|
@@ -198,12 +200,12 @@ async def build_reasoning_function(config: ReasoningFunctionConfig, builder: Bui
|
|
|
198
200
|
prompt = prompt.to_string()
|
|
199
201
|
|
|
200
202
|
# Get the reasoning output from the LLM
|
|
201
|
-
reasoning_output =
|
|
203
|
+
reasoning_output = []
|
|
202
204
|
|
|
203
205
|
async for chunk in llm.astream(prompt):
|
|
204
|
-
reasoning_output
|
|
206
|
+
reasoning_output.append(chunk.content)
|
|
205
207
|
|
|
206
|
-
reasoning_output = remove_r1_think_tags(reasoning_output)
|
|
208
|
+
reasoning_output = remove_r1_think_tags("".join(reasoning_output))
|
|
207
209
|
|
|
208
210
|
output = await downstream_template.ainvoke(input={
|
|
209
211
|
"input_text": input_text, "reasoning_output": reasoning_output
|
nat/agent/register.py
CHANGED
|
@@ -16,7 +16,9 @@
|
|
|
16
16
|
# flake8: noqa
|
|
17
17
|
|
|
18
18
|
# Import any workflows which need to be automatically registered here
|
|
19
|
+
from .prompt_optimizer import register as prompt_optimizer
|
|
19
20
|
from .react_agent import register as react_agent
|
|
20
21
|
from .reasoning_agent import reasoning_agent
|
|
22
|
+
from .responses_api_agent import register as responses_api_agent
|
|
21
23
|
from .rewoo_agent import register as rewoo_agent
|
|
22
24
|
from .tool_calling_agent import register as tool_calling_agent
|
|
@@ -0,0 +1,14 @@
|
|
|
1
|
+
# SPDX-FileCopyrightText: Copyright (c) 2025, NVIDIA CORPORATION & AFFILIATES. All rights reserved.
|
|
2
|
+
# SPDX-License-Identifier: Apache-2.0
|
|
3
|
+
#
|
|
4
|
+
# Licensed under the Apache License, Version 2.0 (the "License");
|
|
5
|
+
# you may not use this file except in compliance with the License.
|
|
6
|
+
# You may obtain a copy of the License at
|
|
7
|
+
#
|
|
8
|
+
# http://www.apache.org/licenses/LICENSE-2.0
|
|
9
|
+
#
|
|
10
|
+
# Unless required by applicable law or agreed to in writing, software
|
|
11
|
+
# distributed under the License is distributed on an "AS IS" BASIS,
|
|
12
|
+
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
|
|
13
|
+
# See the License for the specific language governing permissions and
|
|
14
|
+
# limitations under the License.
|