nvidia-nat 1.3.0.dev2__py3-none-any.whl → 1.3.0rc1__py3-none-any.whl

This diff represents the content of publicly available package versions that have been released to one of the supported registries. The information contained in this diff is provided for informational purposes only and reflects changes between package versions as they appear in their respective public registries.
Files changed (242) hide show
  1. aiq/__init__.py +2 -2
  2. nat/agent/base.py +24 -15
  3. nat/agent/dual_node.py +9 -4
  4. nat/agent/prompt_optimizer/prompt.py +68 -0
  5. nat/agent/prompt_optimizer/register.py +149 -0
  6. nat/agent/react_agent/agent.py +79 -47
  7. nat/agent/react_agent/register.py +41 -21
  8. nat/agent/reasoning_agent/reasoning_agent.py +11 -9
  9. nat/agent/register.py +1 -1
  10. nat/agent/rewoo_agent/agent.py +326 -148
  11. nat/agent/rewoo_agent/prompt.py +19 -22
  12. nat/agent/rewoo_agent/register.py +46 -26
  13. nat/agent/tool_calling_agent/agent.py +84 -28
  14. nat/agent/tool_calling_agent/register.py +51 -28
  15. nat/authentication/api_key/api_key_auth_provider.py +2 -2
  16. nat/authentication/credential_validator/bearer_token_validator.py +557 -0
  17. nat/authentication/http_basic_auth/http_basic_auth_provider.py +1 -1
  18. nat/authentication/interfaces.py +5 -2
  19. nat/authentication/oauth2/oauth2_auth_code_flow_provider.py +40 -20
  20. nat/authentication/oauth2/oauth2_resource_server_config.py +124 -0
  21. nat/authentication/register.py +0 -1
  22. nat/builder/builder.py +56 -24
  23. nat/builder/component_utils.py +9 -5
  24. nat/builder/context.py +46 -11
  25. nat/builder/eval_builder.py +16 -11
  26. nat/builder/framework_enum.py +1 -0
  27. nat/builder/front_end.py +1 -1
  28. nat/builder/function.py +378 -8
  29. nat/builder/function_base.py +3 -3
  30. nat/builder/function_info.py +6 -8
  31. nat/builder/user_interaction_manager.py +2 -2
  32. nat/builder/workflow.py +13 -1
  33. nat/builder/workflow_builder.py +281 -76
  34. nat/cli/cli_utils/config_override.py +2 -2
  35. nat/cli/commands/evaluate.py +1 -1
  36. nat/cli/commands/info/info.py +16 -6
  37. nat/cli/commands/info/list_channels.py +1 -1
  38. nat/cli/commands/info/list_components.py +7 -8
  39. nat/cli/commands/mcp/__init__.py +14 -0
  40. nat/cli/commands/mcp/mcp.py +986 -0
  41. nat/cli/commands/object_store/__init__.py +14 -0
  42. nat/cli/commands/object_store/object_store.py +227 -0
  43. nat/cli/commands/optimize.py +90 -0
  44. nat/cli/commands/registry/publish.py +2 -2
  45. nat/cli/commands/registry/pull.py +2 -2
  46. nat/cli/commands/registry/remove.py +2 -2
  47. nat/cli/commands/registry/search.py +15 -17
  48. nat/cli/commands/start.py +16 -5
  49. nat/cli/commands/uninstall.py +1 -1
  50. nat/cli/commands/workflow/templates/config.yml.j2 +0 -1
  51. nat/cli/commands/workflow/templates/pyproject.toml.j2 +4 -1
  52. nat/cli/commands/workflow/templates/register.py.j2 +0 -1
  53. nat/cli/commands/workflow/workflow_commands.py +9 -13
  54. nat/cli/entrypoint.py +8 -10
  55. nat/cli/register_workflow.py +38 -4
  56. nat/cli/type_registry.py +75 -6
  57. nat/control_flow/__init__.py +0 -0
  58. nat/control_flow/register.py +20 -0
  59. nat/control_flow/router_agent/__init__.py +0 -0
  60. nat/control_flow/router_agent/agent.py +329 -0
  61. nat/control_flow/router_agent/prompt.py +48 -0
  62. nat/control_flow/router_agent/register.py +91 -0
  63. nat/control_flow/sequential_executor.py +166 -0
  64. nat/data_models/agent.py +34 -0
  65. nat/data_models/api_server.py +10 -10
  66. nat/data_models/authentication.py +23 -9
  67. nat/data_models/common.py +1 -1
  68. nat/data_models/component.py +2 -0
  69. nat/data_models/component_ref.py +11 -0
  70. nat/data_models/config.py +41 -17
  71. nat/data_models/dataset_handler.py +1 -1
  72. nat/data_models/discovery_metadata.py +4 -4
  73. nat/data_models/evaluate.py +4 -1
  74. nat/data_models/function.py +34 -0
  75. nat/data_models/function_dependencies.py +14 -6
  76. nat/data_models/gated_field_mixin.py +242 -0
  77. nat/data_models/intermediate_step.py +3 -3
  78. nat/data_models/optimizable.py +119 -0
  79. nat/data_models/optimizer.py +149 -0
  80. nat/data_models/swe_bench_model.py +1 -1
  81. nat/data_models/temperature_mixin.py +44 -0
  82. nat/data_models/thinking_mixin.py +86 -0
  83. nat/data_models/top_p_mixin.py +44 -0
  84. nat/embedder/nim_embedder.py +1 -1
  85. nat/embedder/openai_embedder.py +1 -1
  86. nat/embedder/register.py +0 -1
  87. nat/eval/config.py +3 -1
  88. nat/eval/dataset_handler/dataset_handler.py +71 -7
  89. nat/eval/evaluate.py +86 -31
  90. nat/eval/evaluator/base_evaluator.py +1 -1
  91. nat/eval/evaluator/evaluator_model.py +13 -0
  92. nat/eval/intermediate_step_adapter.py +1 -1
  93. nat/eval/rag_evaluator/evaluate.py +2 -2
  94. nat/eval/rag_evaluator/register.py +3 -3
  95. nat/eval/register.py +4 -1
  96. nat/eval/remote_workflow.py +3 -3
  97. nat/eval/runtime_evaluator/__init__.py +14 -0
  98. nat/eval/runtime_evaluator/evaluate.py +123 -0
  99. nat/eval/runtime_evaluator/register.py +100 -0
  100. nat/eval/swe_bench_evaluator/evaluate.py +6 -6
  101. nat/eval/trajectory_evaluator/evaluate.py +1 -1
  102. nat/eval/trajectory_evaluator/register.py +1 -1
  103. nat/eval/tunable_rag_evaluator/evaluate.py +4 -7
  104. nat/eval/utils/eval_trace_ctx.py +89 -0
  105. nat/eval/utils/weave_eval.py +18 -9
  106. nat/experimental/decorators/experimental_warning_decorator.py +27 -7
  107. nat/experimental/test_time_compute/functions/plan_select_execute_function.py +7 -3
  108. nat/experimental/test_time_compute/functions/ttc_tool_orchestration_function.py +3 -3
  109. nat/experimental/test_time_compute/functions/ttc_tool_wrapper_function.py +1 -1
  110. nat/experimental/test_time_compute/models/strategy_base.py +5 -4
  111. nat/experimental/test_time_compute/register.py +0 -1
  112. nat/experimental/test_time_compute/selection/llm_based_output_merging_selector.py +1 -3
  113. nat/front_ends/console/authentication_flow_handler.py +82 -30
  114. nat/front_ends/console/console_front_end_plugin.py +8 -5
  115. nat/front_ends/fastapi/auth_flow_handlers/websocket_flow_handler.py +52 -17
  116. nat/front_ends/fastapi/dask_client_mixin.py +65 -0
  117. nat/front_ends/fastapi/fastapi_front_end_config.py +36 -5
  118. nat/front_ends/fastapi/fastapi_front_end_controller.py +4 -4
  119. nat/front_ends/fastapi/fastapi_front_end_plugin.py +135 -4
  120. nat/front_ends/fastapi/fastapi_front_end_plugin_worker.py +481 -281
  121. nat/front_ends/fastapi/job_store.py +518 -99
  122. nat/front_ends/fastapi/main.py +11 -19
  123. nat/front_ends/fastapi/message_handler.py +13 -14
  124. nat/front_ends/fastapi/message_validator.py +17 -19
  125. nat/front_ends/fastapi/response_helpers.py +4 -4
  126. nat/front_ends/fastapi/step_adaptor.py +2 -2
  127. nat/front_ends/fastapi/utils.py +57 -0
  128. nat/front_ends/mcp/introspection_token_verifier.py +73 -0
  129. nat/front_ends/mcp/mcp_front_end_config.py +10 -1
  130. nat/front_ends/mcp/mcp_front_end_plugin.py +45 -13
  131. nat/front_ends/mcp/mcp_front_end_plugin_worker.py +116 -8
  132. nat/front_ends/mcp/tool_converter.py +44 -14
  133. nat/front_ends/register.py +0 -1
  134. nat/front_ends/simple_base/simple_front_end_plugin_base.py +3 -1
  135. nat/llm/aws_bedrock_llm.py +24 -12
  136. nat/llm/azure_openai_llm.py +13 -6
  137. nat/llm/litellm_llm.py +69 -0
  138. nat/llm/nim_llm.py +20 -8
  139. nat/llm/openai_llm.py +14 -6
  140. nat/llm/register.py +4 -1
  141. nat/llm/utils/env_config_value.py +2 -3
  142. nat/llm/utils/thinking.py +215 -0
  143. nat/meta/pypi.md +9 -9
  144. nat/object_store/register.py +0 -1
  145. nat/observability/exporter/base_exporter.py +3 -3
  146. nat/observability/exporter/file_exporter.py +1 -1
  147. nat/observability/exporter/processing_exporter.py +309 -81
  148. nat/observability/exporter/span_exporter.py +1 -1
  149. nat/observability/exporter_manager.py +7 -7
  150. nat/observability/mixin/file_mixin.py +7 -7
  151. nat/observability/mixin/redaction_config_mixin.py +42 -0
  152. nat/observability/mixin/tagging_config_mixin.py +62 -0
  153. nat/observability/mixin/type_introspection_mixin.py +420 -107
  154. nat/observability/processor/batching_processor.py +5 -7
  155. nat/observability/processor/falsy_batch_filter_processor.py +55 -0
  156. nat/observability/processor/processor.py +3 -0
  157. nat/observability/processor/processor_factory.py +70 -0
  158. nat/observability/processor/redaction/__init__.py +24 -0
  159. nat/observability/processor/redaction/contextual_redaction_processor.py +125 -0
  160. nat/observability/processor/redaction/contextual_span_redaction_processor.py +66 -0
  161. nat/observability/processor/redaction/redaction_processor.py +177 -0
  162. nat/observability/processor/redaction/span_header_redaction_processor.py +92 -0
  163. nat/observability/processor/span_tagging_processor.py +68 -0
  164. nat/observability/register.py +6 -4
  165. nat/profiler/calc/calc_runner.py +3 -4
  166. nat/profiler/callbacks/agno_callback_handler.py +1 -1
  167. nat/profiler/callbacks/langchain_callback_handler.py +6 -6
  168. nat/profiler/callbacks/llama_index_callback_handler.py +3 -3
  169. nat/profiler/callbacks/semantic_kernel_callback_handler.py +3 -3
  170. nat/profiler/data_frame_row.py +1 -1
  171. nat/profiler/decorators/framework_wrapper.py +62 -13
  172. nat/profiler/decorators/function_tracking.py +160 -3
  173. nat/profiler/forecasting/models/forecasting_base_model.py +3 -1
  174. nat/profiler/inference_optimization/bottleneck_analysis/simple_stack_analysis.py +1 -1
  175. nat/profiler/inference_optimization/data_models.py +3 -3
  176. nat/profiler/inference_optimization/experimental/prefix_span_analysis.py +7 -8
  177. nat/profiler/inference_optimization/token_uniqueness.py +1 -1
  178. nat/profiler/parameter_optimization/__init__.py +0 -0
  179. nat/profiler/parameter_optimization/optimizable_utils.py +93 -0
  180. nat/profiler/parameter_optimization/optimizer_runtime.py +67 -0
  181. nat/profiler/parameter_optimization/parameter_optimizer.py +153 -0
  182. nat/profiler/parameter_optimization/parameter_selection.py +107 -0
  183. nat/profiler/parameter_optimization/pareto_visualizer.py +380 -0
  184. nat/profiler/parameter_optimization/prompt_optimizer.py +384 -0
  185. nat/profiler/parameter_optimization/update_helpers.py +66 -0
  186. nat/profiler/profile_runner.py +14 -9
  187. nat/profiler/utils.py +4 -2
  188. nat/registry_handlers/local/local_handler.py +2 -2
  189. nat/registry_handlers/package_utils.py +1 -2
  190. nat/registry_handlers/pypi/pypi_handler.py +23 -26
  191. nat/registry_handlers/register.py +3 -4
  192. nat/registry_handlers/rest/rest_handler.py +12 -13
  193. nat/retriever/milvus/retriever.py +2 -2
  194. nat/retriever/nemo_retriever/retriever.py +1 -1
  195. nat/retriever/register.py +0 -1
  196. nat/runtime/loader.py +2 -2
  197. nat/runtime/runner.py +3 -2
  198. nat/runtime/session.py +43 -8
  199. nat/settings/global_settings.py +16 -5
  200. nat/tool/chat_completion.py +5 -2
  201. nat/tool/code_execution/local_sandbox/local_sandbox_server.py +3 -3
  202. nat/tool/datetime_tools.py +49 -9
  203. nat/tool/document_search.py +2 -2
  204. nat/tool/github_tools.py +450 -0
  205. nat/tool/nvidia_rag.py +1 -1
  206. nat/tool/register.py +2 -9
  207. nat/tool/retriever.py +3 -2
  208. nat/utils/callable_utils.py +70 -0
  209. nat/utils/data_models/schema_validator.py +3 -3
  210. nat/utils/exception_handlers/automatic_retries.py +104 -51
  211. nat/utils/exception_handlers/schemas.py +1 -1
  212. nat/utils/io/yaml_tools.py +2 -2
  213. nat/utils/log_levels.py +25 -0
  214. nat/utils/reactive/base/observable_base.py +2 -2
  215. nat/utils/reactive/base/observer_base.py +1 -1
  216. nat/utils/reactive/observable.py +2 -2
  217. nat/utils/reactive/observer.py +4 -4
  218. nat/utils/reactive/subscription.py +1 -1
  219. nat/utils/settings/global_settings.py +6 -8
  220. nat/utils/type_converter.py +4 -3
  221. nat/utils/type_utils.py +9 -5
  222. {nvidia_nat-1.3.0.dev2.dist-info → nvidia_nat-1.3.0rc1.dist-info}/METADATA +42 -16
  223. {nvidia_nat-1.3.0.dev2.dist-info → nvidia_nat-1.3.0rc1.dist-info}/RECORD +230 -189
  224. {nvidia_nat-1.3.0.dev2.dist-info → nvidia_nat-1.3.0rc1.dist-info}/entry_points.txt +1 -0
  225. nat/cli/commands/info/list_mcp.py +0 -304
  226. nat/tool/github_tools/create_github_commit.py +0 -133
  227. nat/tool/github_tools/create_github_issue.py +0 -87
  228. nat/tool/github_tools/create_github_pr.py +0 -106
  229. nat/tool/github_tools/get_github_file.py +0 -106
  230. nat/tool/github_tools/get_github_issue.py +0 -166
  231. nat/tool/github_tools/get_github_pr.py +0 -256
  232. nat/tool/github_tools/update_github_issue.py +0 -100
  233. nat/tool/mcp/exceptions.py +0 -142
  234. nat/tool/mcp/mcp_client.py +0 -255
  235. nat/tool/mcp/mcp_tool.py +0 -96
  236. nat/utils/exception_handlers/mcp.py +0 -211
  237. /nat/{tool/github_tools → agent/prompt_optimizer}/__init__.py +0 -0
  238. /nat/{tool/mcp → authentication/credential_validator}/__init__.py +0 -0
  239. {nvidia_nat-1.3.0.dev2.dist-info → nvidia_nat-1.3.0rc1.dist-info}/WHEEL +0 -0
  240. {nvidia_nat-1.3.0.dev2.dist-info → nvidia_nat-1.3.0rc1.dist-info}/licenses/LICENSE-3rd-party.txt +0 -0
  241. {nvidia_nat-1.3.0.dev2.dist-info → nvidia_nat-1.3.0rc1.dist-info}/licenses/LICENSE.md +0 -0
  242. {nvidia_nat-1.3.0.dev2.dist-info → nvidia_nat-1.3.0rc1.dist-info}/top_level.txt +0 -0
@@ -0,0 +1,149 @@
1
+ # SPDX-FileCopyrightText: Copyright (c) 2021-2025, NVIDIA CORPORATION & AFFILIATES. All rights reserved.
2
+ # SPDX-License-Identifier: Apache-2.0
3
+ #
4
+ # Licensed under the Apache License, Version 2.0 (the "License");
5
+ # you may not use this file except in compliance with the License.
6
+ # You may obtain a copy of the License at
7
+ #
8
+ # http://www.apache.org/licenses/LICENSE-2.0
9
+ #
10
+ # Unless required by applicable law or agreed to in writing, software
11
+ # distributed under the License is distributed on an "AS IS" BASIS,
12
+ # WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
13
+ # See the License for the specific language governing permissions and
14
+ # limitations under the License.
15
+
16
+ from pathlib import Path
17
+
18
+ from pydantic import BaseModel
19
+ from pydantic import Field
20
+
21
+
22
+ class OptimizerMetric(BaseModel):
23
+ """
24
+ Parameters used by the workflow optimizer to define a metric to optimize.
25
+ """
26
+ evaluator_name: str = Field(description="Name of the metric to optimize.")
27
+ direction: str = Field(description="Direction of the optimization. Can be 'maximize' or 'minimize'.")
28
+ weight: float = Field(description="Weight of the metric in the optimization process.", default=1.0)
29
+
30
+
31
+ class NumericOptimizationConfig(BaseModel):
32
+ """
33
+ Configuration for numeric/enum optimization (Optuna).
34
+ """
35
+ enabled: bool = Field(default=True, description="Enable numeric optimization")
36
+ n_trials: int = Field(description="Number of trials for numeric optimization.", default=20)
37
+
38
+
39
+ class PromptGAOptimizationConfig(BaseModel):
40
+ """
41
+ Configuration for prompt optimization using a Genetic Algorithm.
42
+ """
43
+ enabled: bool = Field(default=False, description="Enable GA-based prompt optimization")
44
+
45
+ # Prompt optimization function hooks
46
+ prompt_population_init_function: str | None = Field(
47
+ default=None,
48
+ description="Optional function name to initialize/mutate candidate prompts.",
49
+ )
50
+ prompt_recombination_function: str | None = Field(
51
+ default=None,
52
+ description="Optional function name to recombine two parent prompts into a child.",
53
+ )
54
+
55
+ # Genetic algorithm configuration
56
+ ga_population_size: int = Field(
57
+ description="Population size for genetic algorithm prompt optimization.",
58
+ default=24,
59
+ )
60
+ ga_generations: int = Field(
61
+ description="Number of generations to evolve in GA prompt optimization.",
62
+ default=15,
63
+ )
64
+ ga_offspring_size: int | None = Field(
65
+ description="Number of offspring to produce per generation. Defaults to population_size - elitism.",
66
+ default=None,
67
+ )
68
+ ga_crossover_rate: float = Field(
69
+ description="Probability of applying crossover during reproduction.",
70
+ default=0.8,
71
+ ge=0.0,
72
+ le=1.0,
73
+ )
74
+ ga_mutation_rate: float = Field(
75
+ description="Probability of mutating a child after crossover.",
76
+ default=0.3,
77
+ ge=0.0,
78
+ le=1.0,
79
+ )
80
+ ga_elitism: int = Field(
81
+ description="Number of top individuals carried over unchanged each generation.",
82
+ default=2,
83
+ )
84
+ ga_selection_method: str = Field(
85
+ description="Parent selection strategy: 'tournament' or 'roulette'.",
86
+ default="tournament",
87
+ )
88
+ ga_tournament_size: int = Field(
89
+ description="Tournament size when using tournament selection.",
90
+ default=3,
91
+ )
92
+ ga_parallel_evaluations: int = Field(
93
+ description="Max number of individuals to evaluate concurrently per generation.",
94
+ default=8,
95
+ )
96
+ ga_diversity_lambda: float = Field(
97
+ description="Strength of diversity penalty (0 disables). Penalizes identical/near-identical prompts.",
98
+ default=0.0,
99
+ ge=0.0,
100
+ )
101
+
102
+
103
+ class OptimizerConfig(BaseModel):
104
+ """
105
+ Parameters used by the workflow optimizer.
106
+ """
107
+ output_path: Path | None = Field(
108
+ default=None,
109
+ description="Path to the output directory where the results will be saved.",
110
+ )
111
+
112
+ eval_metrics: dict[str, OptimizerMetric] | None = Field(
113
+ description="List of evaluation metrics to optimize.",
114
+ default=None,
115
+ )
116
+
117
+ reps_per_param_set: int = Field(
118
+ default=3,
119
+ description="Number of repetitions per parameter set for the optimization.",
120
+ )
121
+
122
+ target: float | None = Field(
123
+ description=(
124
+ "Target value for the optimization. If set, the optimization will stop when this value is reached."),
125
+ default=None,
126
+ )
127
+
128
+ multi_objective_combination_mode: str = Field(
129
+ description="Method to combine multiple objectives into a single score.",
130
+ default="harmonic",
131
+ )
132
+
133
+ # Nested configs
134
+ numeric: NumericOptimizationConfig = NumericOptimizationConfig()
135
+ prompt: PromptGAOptimizationConfig = PromptGAOptimizationConfig()
136
+
137
+
138
+ class OptimizerRunConfig(BaseModel):
139
+ """
140
+ Parameters used for an Optimizer R=run
141
+ """
142
+ # Eval parameters
143
+
144
+ config_file: Path | BaseModel # allow for instantiated configs to be passed in
145
+ dataset: str | Path | None # dataset file path can be specified in the config file
146
+ result_json_path: str = "$"
147
+ endpoint: str | None = None # only used when running the workflow remotely
148
+ endpoint_timeout: int = 300
149
+ override: tuple[tuple[str, str], ...] = ()
@@ -39,7 +39,7 @@ class SWEBenchInput(BaseModel):
39
39
 
40
40
  # Handle improperly formatted JSON strings for list fields
41
41
  @field_validator("FAIL_TO_PASS", "PASS_TO_PASS", mode="before")
42
- def parse_list_fields(cls, value): # pylint: disable=no-self-argument
42
+ def parse_list_fields(cls, value):
43
43
  if isinstance(value, str):
44
44
  # Attempt to parse the string as a list
45
45
  return json.loads(value)
@@ -0,0 +1,44 @@
1
+ # SPDX-FileCopyrightText: Copyright (c) 2025, NVIDIA CORPORATION & AFFILIATES. All rights reserved.
2
+ # SPDX-License-Identifier: Apache-2.0
3
+ #
4
+ # Licensed under the Apache License, Version 2.0 (the "License");
5
+ # you may not use this file except in compliance with the License.
6
+ # You may obtain a copy of the License at
7
+ #
8
+ # http://www.apache.org/licenses/LICENSE-2.0
9
+ #
10
+ # Unless required by applicable law or agreed to in writing, software
11
+ # distributed under the License is distributed on an "AS IS" BASIS,
12
+ # WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
13
+ # See the License for the specific language governing permissions and
14
+ # limitations under the License.
15
+
16
+ import re
17
+
18
+ from pydantic import BaseModel
19
+
20
+ from nat.data_models.gated_field_mixin import GatedFieldMixin
21
+ from nat.data_models.optimizable import OptimizableField
22
+ from nat.data_models.optimizable import SearchSpace
23
+
24
+
25
+ class TemperatureMixin(
26
+ BaseModel,
27
+ GatedFieldMixin,
28
+ field_name="temperature",
29
+ default_if_supported=0.0,
30
+ keys=("model_name", "model", "azure_deployment"),
31
+ unsupported=(re.compile(r"gpt-?5", re.IGNORECASE), ),
32
+ ):
33
+ """
34
+ Mixin class for temperature configuration. Unsupported on models like gpt-5.
35
+
36
+ Attributes:
37
+ temperature: Sampling temperature in [0, 1]. Defaults to 0.0 when supported on the model.
38
+ """
39
+ temperature: float | None = OptimizableField(
40
+ default=None,
41
+ ge=0.0,
42
+ le=1.0,
43
+ description="Sampling temperature in [0, 1]. Defaults to 0.0 when supported on the model.",
44
+ space=SearchSpace(high=0.9, low=0.1, step=0.2))
@@ -0,0 +1,86 @@
1
+ # SPDX-FileCopyrightText: Copyright (c) 2025, NVIDIA CORPORATION & AFFILIATES. All rights reserved.
2
+ # SPDX-License-Identifier: Apache-2.0
3
+ #
4
+ # Licensed under the Apache License, Version 2.0 (the "License");
5
+ # you may not use this file except in compliance with the License.
6
+ # You may obtain a copy of the License at
7
+ #
8
+ # http://www.apache.org/licenses/LICENSE-2.0
9
+ #
10
+ # Unless required by applicable law or agreed to in writing, software
11
+ # distributed under the License is distributed on an "AS IS" BASIS,
12
+ # WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
13
+ # See the License for the specific language governing permissions and
14
+ # limitations under the License.
15
+
16
+ import re
17
+
18
+ from pydantic import BaseModel
19
+ from pydantic import Field
20
+
21
+ from nat.data_models.gated_field_mixin import GatedFieldMixin
22
+
23
+ # Currently the control logic for thinking is only implemented for Nemotron models
24
+ _NEMOTRON_REGEX = re.compile(r"^nvidia/(llama|nvidia).*nemotron", re.IGNORECASE)
25
+ # The keys are the fields that are used to determine if the model supports thinking
26
+ _MODEL_KEYS = ("model_name", "model", "azure_deployment")
27
+
28
+
29
+ class ThinkingMixin(
30
+ BaseModel,
31
+ GatedFieldMixin,
32
+ field_name="thinking",
33
+ default_if_supported=None,
34
+ keys=_MODEL_KEYS,
35
+ supported=(_NEMOTRON_REGEX, ),
36
+ ):
37
+ """
38
+ Mixin class for thinking configuration. Only supported on Nemotron models.
39
+
40
+ Attributes:
41
+ thinking: Whether to enable thinking. Defaults to None when supported on the model.
42
+ """
43
+ thinking: bool | None = Field(
44
+ default=None,
45
+ description="Whether to enable thinking. Defaults to None when supported on the model.",
46
+ )
47
+
48
+ @property
49
+ def thinking_system_prompt(self) -> str | None:
50
+ """
51
+ Returns the system prompt to use for thinking.
52
+ For NVIDIA Nemotron, returns "/think" if enabled, else "/no_think".
53
+ For Llama Nemotron v1.5, returns "/think" if enabled, else "/no_think".
54
+ For Llama Nemotron v1.0, returns "detailed thinking on" if enabled, else "detailed thinking off".
55
+ If thinking is not supported on the model, returns None.
56
+
57
+ Returns:
58
+ str | None: The system prompt to use for thinking.
59
+ """
60
+ if self.thinking is None:
61
+ return None
62
+
63
+ for key in _MODEL_KEYS:
64
+ model = getattr(self, key, None)
65
+ if not isinstance(model, str) or model is None:
66
+ continue
67
+
68
+ # Normalize name to reduce checks
69
+ model = model.lower().translate(str.maketrans("_.", "--"))
70
+
71
+ if model.startswith("nvidia/nvidia"):
72
+ return "/think" if self.thinking else "/no_think"
73
+
74
+ if model.startswith("nvidia/llama"):
75
+ if "v1-0" in model or "v1-1" in model:
76
+ return f"detailed thinking {'on' if self.thinking else 'off'}"
77
+
78
+ if "v1-5" in model:
79
+ # v1.5 models are updated to use the /think and /no_think system prompts
80
+ return "/think" if self.thinking else "/no_think"
81
+
82
+ # Assume any other model is a newer model that uses the /think and /no_think system prompts
83
+ return "/think" if self.thinking else "/no_think"
84
+
85
+ # Unknown model
86
+ return None
@@ -0,0 +1,44 @@
1
+ # SPDX-FileCopyrightText: Copyright (c) 2025, NVIDIA CORPORATION & AFFILIATES. All rights reserved.
2
+ # SPDX-License-Identifier: Apache-2.0
3
+ #
4
+ # Licensed under the Apache License, Version 2.0 (the "License");
5
+ # you may not use this file except in compliance with the License.
6
+ # You may obtain a copy of the License at
7
+ #
8
+ # http://www.apache.org/licenses/LICENSE-2.0
9
+ #
10
+ # Unless required by applicable law or agreed to in writing, software
11
+ # distributed under the License is distributed on an "AS IS" BASIS,
12
+ # WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
13
+ # See the License for the specific language governing permissions and
14
+ # limitations under the License.
15
+
16
+ import re
17
+
18
+ from pydantic import BaseModel
19
+
20
+ from nat.data_models.gated_field_mixin import GatedFieldMixin
21
+ from nat.data_models.optimizable import OptimizableField
22
+ from nat.data_models.optimizable import SearchSpace
23
+
24
+
25
+ class TopPMixin(
26
+ BaseModel,
27
+ GatedFieldMixin,
28
+ field_name="top_p",
29
+ default_if_supported=1.0,
30
+ keys=("model_name", "model", "azure_deployment"),
31
+ unsupported=(re.compile(r"gpt-?5", re.IGNORECASE), ),
32
+ ):
33
+ """
34
+ Mixin class for top-p configuration. Unsupported on models like gpt-5.
35
+
36
+ Attributes:
37
+ top_p: Top-p for distribution sampling. Defaults to 1.0 when supported on the model.
38
+ """
39
+ top_p: float | None = OptimizableField(
40
+ default=None,
41
+ ge=0.0,
42
+ le=1.0,
43
+ description="Top-p for distribution sampling. Defaults to 1.0 when supported on the model.",
44
+ space=SearchSpace(high=1.0, low=0.5, step=0.1))
@@ -50,7 +50,7 @@ class NIMEmbedderModelConfig(EmbedderBaseConfig, RetryMixin, name="nim"):
50
50
  description=("The truncation strategy if the input on the "
51
51
  "server side if it's too large."))
52
52
 
53
- model_config = ConfigDict(protected_namespaces=())
53
+ model_config = ConfigDict(protected_namespaces=(), extra="allow")
54
54
 
55
55
 
56
56
  @register_embedder_provider(config_type=NIMEmbedderModelConfig)
@@ -27,7 +27,7 @@ from nat.data_models.retry_mixin import RetryMixin
27
27
  class OpenAIEmbedderModelConfig(EmbedderBaseConfig, RetryMixin, name="openai"):
28
28
  """An OpenAI LLM provider to be used with an LLM client."""
29
29
 
30
- model_config = ConfigDict(protected_namespaces=())
30
+ model_config = ConfigDict(protected_namespaces=(), extra="allow")
31
31
 
32
32
  api_key: str | None = Field(default=None, description="OpenAI API key to interact with hosted model.")
33
33
  base_url: str | None = Field(default=None, description="Base url to the hosted model.")
nat/embedder/register.py CHANGED
@@ -13,7 +13,6 @@
13
13
  # See the License for the specific language governing permissions and
14
14
  # limitations under the License.
15
15
 
16
- # pylint: disable=unused-import
17
16
  # flake8: noqa
18
17
  # isort:skip_file
19
18
 
nat/eval/config.py CHANGED
@@ -27,7 +27,7 @@ class EvaluationRunConfig(BaseModel):
27
27
  """
28
28
  Parameters used for a single evaluation run.
29
29
  """
30
- config_file: Path
30
+ config_file: Path | BaseModel
31
31
  dataset: str | None = None # dataset file path can be specified in the config file
32
32
  result_json_path: str = "$"
33
33
  skip_workflow: bool = False
@@ -44,6 +44,8 @@ class EvaluationRunConfig(BaseModel):
44
44
  # number of passes at each concurrency, if 0 the dataset is adjusted to a multiple of the
45
45
  # concurrency. The is only used if adjust_dataset_size is true
46
46
  num_passes: int = 0
47
+ # timeout for waiting for trace export tasks to complete
48
+ export_timeout: float = 60.0
47
49
 
48
50
 
49
51
  class EvaluationRunOutput(BaseModel):
@@ -13,6 +13,7 @@
13
13
  # See the License for the specific language governing permissions and
14
14
  # limitations under the License.
15
15
 
16
+ import importlib
16
17
  import json
17
18
  import math
18
19
  from pathlib import Path
@@ -41,7 +42,8 @@ class DatasetHandler:
41
42
  reps: int,
42
43
  concurrency: int,
43
44
  num_passes: int = 1,
44
- adjust_dataset_size: bool = False):
45
+ adjust_dataset_size: bool = False,
46
+ custom_pre_eval_process_function: str | None = None):
45
47
  from nat.eval.intermediate_step_adapter import IntermediateStepAdapter
46
48
 
47
49
  self.dataset_config = dataset_config
@@ -53,6 +55,9 @@ class DatasetHandler:
53
55
  self.num_passes = num_passes
54
56
  self.adjust_dataset_size = adjust_dataset_size
55
57
 
58
+ # Custom pre-evaluation process function
59
+ self.custom_pre_eval_process_function = custom_pre_eval_process_function
60
+
56
61
  # Helpers
57
62
  self.intermediate_step_adapter = IntermediateStepAdapter()
58
63
 
@@ -146,13 +151,12 @@ class DatasetHandler:
146
151
  # When num_passes is specified, always use concurrency * num_passes
147
152
  # This respects the user's intent for exact number of passes
148
153
  target_size = self.concurrency * self.num_passes
154
+ # When num_passes = 0, use the largest multiple of concurrency <= original_size
155
+ # If original_size < concurrency, we need at least concurrency rows
156
+ elif original_size >= self.concurrency:
157
+ target_size = (original_size // self.concurrency) * self.concurrency
149
158
  else:
150
- # When num_passes = 0, use the largest multiple of concurrency <= original_size
151
- # If original_size < concurrency, we need at least concurrency rows
152
- if original_size >= self.concurrency:
153
- target_size = (original_size // self.concurrency) * self.concurrency
154
- else:
155
- target_size = self.concurrency
159
+ target_size = self.concurrency
156
160
 
157
161
  if target_size == 0:
158
162
  raise ValueError("Input dataset too small for even one batch at given concurrency.")
@@ -331,6 +335,66 @@ class DatasetHandler:
331
335
  filtered_steps = self.intermediate_step_adapter.filter_intermediate_steps(intermediate_steps, event_filter)
332
336
  return self.intermediate_step_adapter.serialize_intermediate_steps(filtered_steps)
333
337
 
338
+ def pre_eval_process_eval_input(self, eval_input: EvalInput) -> EvalInput:
339
+ """
340
+ Pre-evaluation process the eval input using custom function if provided.
341
+
342
+ The custom pre-evaluation process function should have the signature:
343
+ def custom_pre_eval_process(item: EvalInputItem) -> EvalInputItem
344
+
345
+ The framework will iterate through all items and call this function on each one.
346
+
347
+ Args:
348
+ eval_input: The EvalInput object to pre-evaluation process
349
+
350
+ Returns:
351
+ The pre-evaluation processed EvalInput object
352
+ """
353
+ if self.custom_pre_eval_process_function:
354
+ try:
355
+ custom_function = self._load_custom_pre_eval_process_function()
356
+ processed_items = []
357
+
358
+ for item in eval_input.eval_input_items:
359
+ processed_item = custom_function(item)
360
+ if not isinstance(processed_item, EvalInputItem):
361
+ raise TypeError(f"Custom pre-evaluation '{self.custom_pre_eval_process_function}' must return "
362
+ f"EvalInputItem, got {type(processed_item)}")
363
+ processed_items.append(processed_item)
364
+
365
+ return EvalInput(eval_input_items=processed_items)
366
+ except Exception as e:
367
+ raise RuntimeError(f"Error calling custom pre-evaluation process function "
368
+ f"'{self.custom_pre_eval_process_function}': {e}") from e
369
+
370
+ return eval_input
371
+
372
+ def _load_custom_pre_eval_process_function(self):
373
+ """
374
+ Import and return the custom pre-evaluation process function using standard Python import path.
375
+
376
+ The function should process individual EvalInputItem objects.
377
+ """
378
+ # Split the function path to get module and function name
379
+ if "." not in self.custom_pre_eval_process_function:
380
+ raise ValueError(f"Invalid custom_pre_eval_process_function '{self.custom_pre_eval_process_function}'. "
381
+ "Expected format: '<module_path>.<function_name>'")
382
+ module_path, function_name = self.custom_pre_eval_process_function.rsplit(".", 1)
383
+
384
+ # Import the module
385
+ module = importlib.import_module(module_path)
386
+
387
+ # Get the function from the module
388
+ if not hasattr(module, function_name):
389
+ raise AttributeError(f"Function '{function_name}' not found in module '{module_path}'")
390
+
391
+ custom_function = getattr(module, function_name)
392
+
393
+ if not callable(custom_function):
394
+ raise ValueError(f"'{self.custom_pre_eval_process_function}' is not callable")
395
+
396
+ return custom_function
397
+
334
398
  def publish_eval_input(self,
335
399
  eval_input,
336
400
  workflow_output_step_filter: list[IntermediateStepType] | None = None) -> str: