nvidia-nat 1.2.0a20250813__py3-none-any.whl → 1.2.0rc6__py3-none-any.whl

This diff represents the content of publicly available package versions that have been released to one of the supported registries. The information contained in this diff is provided for informational purposes only and reflects changes between package versions as they appear in their respective public registries.
Files changed (116) hide show
  1. nat/agent/react_agent/register.py +1 -1
  2. nat/agent/reasoning_agent/reasoning_agent.py +3 -3
  3. nat/agent/rewoo_agent/prompt.py +11 -12
  4. nat/agent/rewoo_agent/register.py +30 -32
  5. nat/authentication/http_basic_auth/http_basic_auth_provider.py +1 -1
  6. nat/authentication/interfaces.py +1 -1
  7. nat/authentication/oauth2/oauth2_auth_code_flow_provider.py +2 -2
  8. nat/builder/component_utils.py +5 -5
  9. nat/builder/context.py +5 -5
  10. nat/builder/front_end.py +6 -6
  11. nat/builder/function_base.py +4 -4
  12. nat/builder/function_info.py +1 -1
  13. nat/builder/intermediate_step_manager.py +4 -4
  14. nat/builder/user_interaction_manager.py +3 -3
  15. nat/builder/workflow_builder.py +2 -2
  16. nat/cli/cli_utils/validation.py +1 -1
  17. nat/cli/commands/configure/channel/add.py +1 -1
  18. nat/cli/commands/configure/channel/channel.py +1 -3
  19. nat/cli/commands/configure/channel/remove.py +1 -1
  20. nat/cli/commands/configure/channel/update.py +1 -1
  21. nat/cli/commands/configure/configure.py +2 -2
  22. nat/cli/commands/evaluate.py +1 -1
  23. nat/cli/commands/info/info.py +2 -4
  24. nat/cli/commands/info/list_components.py +2 -2
  25. nat/cli/commands/info/list_mcp.py +9 -9
  26. nat/cli/commands/registry/publish.py +3 -3
  27. nat/cli/commands/registry/pull.py +3 -3
  28. nat/cli/commands/registry/registry.py +1 -3
  29. nat/cli/commands/registry/remove.py +3 -3
  30. nat/cli/commands/registry/search.py +3 -3
  31. nat/cli/commands/start.py +4 -4
  32. nat/cli/commands/workflow/templates/pyproject.toml.j2 +4 -4
  33. nat/cli/commands/workflow/workflow_commands.py +6 -6
  34. nat/data_models/api_server.py +38 -31
  35. nat/data_models/component_ref.py +9 -9
  36. nat/data_models/dataset_handler.py +56 -10
  37. nat/data_models/discovery_metadata.py +21 -50
  38. nat/data_models/evaluate.py +2 -2
  39. nat/data_models/intermediate_step.py +4 -4
  40. nat/embedder/register.py +0 -2
  41. nat/eval/dataset_handler/dataset_handler.py +118 -5
  42. nat/eval/evaluate.py +1 -1
  43. nat/eval/evaluator/evaluator_model.py +3 -3
  44. nat/eval/rag_evaluator/evaluate.py +1 -1
  45. nat/eval/swe_bench_evaluator/evaluate.py +2 -2
  46. nat/experimental/test_time_compute/editing/motivation_aware_summarization.py +1 -1
  47. nat/experimental/test_time_compute/functions/plan_select_execute_function.py +4 -4
  48. nat/experimental/test_time_compute/functions/ttc_tool_wrapper_function.py +1 -1
  49. nat/experimental/test_time_compute/scoring/llm_based_agent_scorer.py +1 -1
  50. nat/experimental/test_time_compute/scoring/llm_based_plan_scorer.py +1 -1
  51. nat/experimental/test_time_compute/search/multi_llm_planner.py +1 -1
  52. nat/experimental/test_time_compute/search/multi_query_retrieval_search.py +1 -1
  53. nat/experimental/test_time_compute/search/single_shot_multi_plan_planner.py +1 -1
  54. nat/experimental/test_time_compute/selection/llm_based_agent_output_selector.py +1 -1
  55. nat/experimental/test_time_compute/selection/llm_based_output_merging_selector.py +1 -1
  56. nat/experimental/test_time_compute/selection/llm_based_plan_selector.py +1 -1
  57. nat/front_ends/console/console_front_end_config.py +1 -1
  58. nat/front_ends/fastapi/fastapi_front_end_config.py +6 -7
  59. nat/front_ends/fastapi/fastapi_front_end_plugin.py +4 -4
  60. nat/front_ends/fastapi/fastapi_front_end_plugin_worker.py +13 -13
  61. nat/front_ends/fastapi/intermediate_steps_subscriber.py +1 -1
  62. nat/front_ends/fastapi/main.py +6 -6
  63. nat/front_ends/fastapi/message_handler.py +14 -3
  64. nat/front_ends/fastapi/message_validator.py +6 -8
  65. nat/front_ends/fastapi/response_helpers.py +3 -3
  66. nat/front_ends/mcp/mcp_front_end_config.py +3 -2
  67. nat/front_ends/mcp/mcp_front_end_plugin_worker.py +4 -4
  68. nat/front_ends/mcp/tool_converter.py +15 -16
  69. nat/memory/__init__.py +2 -2
  70. nat/meta/pypi.md +8 -8
  71. nat/object_store/__init__.py +2 -2
  72. nat/observability/exporter/base_exporter.py +1 -1
  73. nat/observability/exporter/raw_exporter.py +1 -1
  74. nat/observability/exporter_manager.py +1 -1
  75. nat/profiler/callbacks/agno_callback_handler.py +4 -4
  76. nat/profiler/callbacks/llama_index_callback_handler.py +2 -2
  77. nat/profiler/callbacks/semantic_kernel_callback_handler.py +1 -1
  78. nat/profiler/decorators/function_tracking.py +1 -1
  79. nat/profiler/forecasting/models/linear_model.py +3 -2
  80. nat/profiler/forecasting/models/random_forest_regressor.py +3 -2
  81. nat/profiler/inference_optimization/bottleneck_analysis/nested_stack_analysis.py +1 -1
  82. nat/profiler/inference_optimization/experimental/prefix_span_analysis.py +1 -1
  83. nat/profiler/profile_runner.py +1 -1
  84. nat/registry_handlers/local/local_handler.py +5 -5
  85. nat/registry_handlers/local/register_local.py +1 -1
  86. nat/registry_handlers/package_utils.py +17 -17
  87. nat/registry_handlers/pypi/pypi_handler.py +5 -5
  88. nat/registry_handlers/pypi/register_pypi.py +3 -3
  89. nat/registry_handlers/registry_handler_base.py +8 -8
  90. nat/registry_handlers/rest/register_rest.py +4 -4
  91. nat/registry_handlers/rest/rest_handler.py +6 -6
  92. nat/registry_handlers/schemas/package.py +3 -3
  93. nat/registry_handlers/schemas/publish.py +4 -4
  94. nat/registry_handlers/schemas/pull.py +4 -4
  95. nat/registry_handlers/schemas/search.py +7 -7
  96. nat/runtime/loader.py +21 -20
  97. nat/runtime/runner.py +2 -2
  98. nat/runtime/session.py +1 -1
  99. nat/settings/global_settings.py +2 -2
  100. nat/tool/chat_completion.py +1 -1
  101. nat/tool/code_execution/README.md +2 -2
  102. nat/tool/code_execution/test_code_execution_sandbox.py +1 -1
  103. nat/tool/mcp/exceptions.py +1 -1
  104. nat/tool/mcp/mcp_tool.py +1 -1
  105. nat/tool/retriever.py +3 -3
  106. nat/tool/server_tools.py +11 -11
  107. nat/utils/settings/global_settings.py +1 -1
  108. {nvidia_nat-1.2.0a20250813.dist-info → nvidia_nat-1.2.0rc6.dist-info}/METADATA +9 -9
  109. {nvidia_nat-1.2.0a20250813.dist-info → nvidia_nat-1.2.0rc6.dist-info}/RECORD +114 -116
  110. nat/embedder/langchain_client.py +0 -41
  111. nat/meta/module_to_distro.json +0 -4
  112. {nvidia_nat-1.2.0a20250813.dist-info → nvidia_nat-1.2.0rc6.dist-info}/WHEEL +0 -0
  113. {nvidia_nat-1.2.0a20250813.dist-info → nvidia_nat-1.2.0rc6.dist-info}/entry_points.txt +0 -0
  114. {nvidia_nat-1.2.0a20250813.dist-info → nvidia_nat-1.2.0rc6.dist-info}/licenses/LICENSE-3rd-party.txt +0 -0
  115. {nvidia_nat-1.2.0a20250813.dist-info → nvidia_nat-1.2.0rc6.dist-info}/licenses/LICENSE.md +0 -0
  116. {nvidia_nat-1.2.0a20250813.dist-info → nvidia_nat-1.2.0rc6.dist-info}/top_level.txt +0 -0
@@ -15,12 +15,10 @@
15
15
 
16
16
  import importlib.metadata
17
17
  import inspect
18
- import json
19
18
  import logging
20
19
  import typing
21
20
  from enum import Enum
22
21
  from functools import lru_cache
23
- from pathlib import Path
24
22
  from types import ModuleType
25
23
  from typing import TYPE_CHECKING
26
24
 
@@ -56,11 +54,11 @@ class DiscoveryMetadata(BaseModel):
56
54
  """A data model representing metadata about each registered component to faciliate its discovery.
57
55
 
58
56
  Args:
59
- package (str): The name of the package containing the AIQ Toolkit component.
60
- version (str): The version number of the package containing the AIQ Toolkit component.
61
- component_type (AIQComponentEnum): The type of AIQ Toolkit component this metadata represents.
62
- component_name (str): The registered name of the AIQ Toolkit component.
63
- description (str): Description of the AIQ Toolkit component pulled from its config objects docstrings.
57
+ package (str): The name of the package containing the NAT component.
58
+ version (str): The version number of the package containing the NAT component.
59
+ component_type (ComponentEnum): The type of NAT component this metadata represents.
60
+ component_name (str): The registered name of the NAT component.
61
+ description (str): Description of the NAT component pulled from its config objects docstrings.
64
62
  developer_notes (str): Other notes to a developers to aid in the use of the component.
65
63
  status (DiscoveryStatusEnum): Provides the status of the metadata discovery process.
66
64
  """
@@ -95,27 +93,12 @@ class DiscoveryMetadata(BaseModel):
95
93
  mapping = importlib.metadata.packages_distributions()
96
94
  try:
97
95
  distro_names = mapping.get(root_package_name, [None])
98
- distro_name = DiscoveryMetadata.get_preferred_item(distro_names, "aiqtoolkit")
96
+ distro_name = DiscoveryMetadata.get_preferred_item(distro_names, "nvidia-nat")
99
97
  except KeyError:
100
98
  return root_package_name
101
99
 
102
100
  return distro_name if distro_name else root_package_name
103
101
 
104
- @staticmethod
105
- @lru_cache
106
- def get_distribution_name_from_private_data(root_package: str) -> str | None:
107
- # Locate distibution mapping stored in the packages private data
108
- module = __import__(root_package)
109
- for path in module.__path__:
110
- package_dir = Path(path).resolve()
111
- distinfo_path = package_dir / "meta" / "module_to_distro.json"
112
-
113
- if distinfo_path.exists():
114
- with distinfo_path.open("r") as f:
115
- data = json.load(f)
116
- return data.get(root_package, None)
117
- return None
118
-
119
102
  @staticmethod
120
103
  @lru_cache
121
104
  def get_distribution_name_from_module(module: ModuleType | None) -> str:
@@ -125,22 +108,22 @@ class DiscoveryMetadata(BaseModel):
125
108
  module (ModuleType): A registered component's module.
126
109
 
127
110
  Returns:
128
- str: The distribution name of the AIQ Toolkit component.
111
+ str: The distribution name of the NAT component.
129
112
  """
130
113
  from nat.runtime.loader import get_all_entrypoints_distro_mapping
131
114
 
132
115
  if module is None:
133
- return "aiqtoolkit"
116
+ return "nvidia-nat"
134
117
 
135
118
  # Get the mapping of module names to distro names
136
119
  mapping = get_all_entrypoints_distro_mapping()
137
120
  module_package = module.__package__
138
121
 
139
122
  if module_package is None:
140
- return "aiqtoolkit"
123
+ return "nvidia-nat"
141
124
 
142
125
  # Traverse the module package parts in reverse order to find the distro name
143
- # This is because the module package is the root package for the AIQ Toolkit component
126
+ # This is because the module package is the root package for the NAT component
144
127
  # and the distro name is the name of the package that contains the component
145
128
  module_package_parts = module_package.split(".")
146
129
  for part_idx in range(len(module_package_parts), 0, -1):
@@ -149,7 +132,7 @@ class DiscoveryMetadata(BaseModel):
149
132
  if candidate_distro_name is not None:
150
133
  return candidate_distro_name
151
134
 
152
- return "aiqtoolkit"
135
+ return "nvidia-nat"
153
136
 
154
137
  @staticmethod
155
138
  @lru_cache
@@ -160,32 +143,20 @@ class DiscoveryMetadata(BaseModel):
160
143
  config_type (type[TypedBaseModelT]): A registered component's configuration object.
161
144
 
162
145
  Returns:
163
- str: The distribution name of the AIQ Toolkit component.
146
+ str: The distribution name of the NAT component.
164
147
  """
165
148
  module = inspect.getmodule(config_type)
166
149
  return DiscoveryMetadata.get_distribution_name_from_module(module)
167
150
 
168
- @staticmethod
169
- @lru_cache
170
- def get_distribution_name(root_package: str) -> str:
171
- """
172
- The aiq library packages use a distro name 'aiqtoolkit[]' and
173
- root package name 'aiq'. They provide mapping in a metadata file
174
- for optimized installation.
175
- """
176
-
177
- distro_name = DiscoveryMetadata.get_distribution_name_from_private_data(root_package)
178
- return distro_name if distro_name else root_package
179
-
180
151
  @staticmethod
181
152
  def from_config_type(config_type: type["TypedBaseModelT"],
182
153
  component_type: ComponentEnum = ComponentEnum.UNDEFINED) -> "DiscoveryMetadata":
183
- """Generates discovery metadata from an AIQ Toolkit config object.
154
+ """Generates discovery metadata from a NAT config object.
184
155
 
185
156
  Args:
186
157
  config_type (type[TypedBaseModelT]): A registered component's configuration object.
187
- component_type (AIQComponentEnum, optional): The type of the registered component. Defaults to
188
- AIQComponentEnum.UNDEFINED.
158
+ component_type (ComponentEnum, optional): The type of the registered component. Defaults to
159
+ ComponentEnum.UNDEFINED.
189
160
 
190
161
  Returns:
191
162
  DiscoveryMetadata: A an object containing component metadata to facilitate discovery and reuse.
@@ -228,8 +199,8 @@ class DiscoveryMetadata(BaseModel):
228
199
  wrapper_type (LLMFrameworkEnum): The wrapper to apply to the callable to faciliate inter-framwork
229
200
  interoperability.
230
201
 
231
- component_type (AIQComponentEnum, optional): The type of the registered component. Defaults to
232
- AIQComponentEnum.TOOL_WRAPPER.
202
+ component_type (ComponentEnum, optional): The type of the registered component. Defaults to
203
+ ComponentEnum.TOOL_WRAPPER.
233
204
 
234
205
  Returns:
235
206
  DiscoveryMetadata: A an object containing component metadata to facilitate discovery and reuse.
@@ -263,7 +234,7 @@ class DiscoveryMetadata(BaseModel):
263
234
  """Generates discovery metadata from an installed package name.
264
235
 
265
236
  Args:
266
- package_name (str): The name of the AIQ Toolkit plugin package containing registered components.
237
+ package_name (str): The name of the NAT plugin package containing registered components.
267
238
  package_version (str, optional): The version of the package, Defaults to None.
268
239
 
269
240
  Returns:
@@ -302,9 +273,9 @@ class DiscoveryMetadata(BaseModel):
302
273
  wrapper_type (LLMFrameworkEnum | str): The wrapper to apply to the callable to faciliate inter-framwork
303
274
  interoperability.
304
275
 
305
- provider_type (AIQComponentEnum): The type of provider the registered component supports.
306
- component_type (AIQComponentEnum, optional): The type of the registered component. Defaults to
307
- AIQComponentEnum.UNDEFINED.
276
+ provider_type (ComponentEnum): The type of provider the registered component supports.
277
+ component_type (ComponentEnum, optional): The type of the registered component. Defaults to
278
+ ComponentEnum.UNDEFINED.
308
279
 
309
280
  Returns:
310
281
  DiscoveryMetadata: A an object containing component metadata to facilitate discovery and reuse.
@@ -54,7 +54,7 @@ class JobManagementConfig(BaseModel):
54
54
 
55
55
  class EvalOutputConfig(BaseModel):
56
56
  # Output directory for the workflow and evaluation results
57
- dir: Path = Path("/tmp/aiq/examples/default/")
57
+ dir: Path = Path("./.tmp/nat/examples/default/")
58
58
  # S3 prefix for the workflow and evaluation results
59
59
  remote_dir: str | None = None
60
60
  # Custom scripts to run after the workflow and evaluation results are saved
@@ -77,7 +77,7 @@ class EvalGeneralConfig(BaseModel):
77
77
  workflow_alias: str | None = None
78
78
 
79
79
  # Output directory for the workflow and evaluation results
80
- output_dir: Path = Path("/tmp/aiq/examples/default/")
80
+ output_dir: Path = Path("./.tmp/nat/examples/default/")
81
81
 
82
82
  # If present overrides output_dir
83
83
  output: EvalOutputConfig | None = None
@@ -66,7 +66,7 @@ class IntermediateStepState(str, Enum):
66
66
 
67
67
  class StreamEventData(BaseModel):
68
68
  """
69
- AIQStreamEventData is a data model that represents the data field in an streaming event.
69
+ StreamEventData is a data model that represents the data field in an streaming event.
70
70
  """
71
71
 
72
72
  # Allow extra fields in the model_config to support derived models
@@ -121,7 +121,7 @@ class TraceMetadata(BaseModel):
121
121
 
122
122
  class IntermediateStepPayload(BaseModel):
123
123
  """
124
- AIQIntermediateStep is a data model that represents an intermediate step in the AIQ Toolkit. Intermediate steps are
124
+ IntermediateStep is a data model that represents an intermediate step in the NAT. Intermediate steps are
125
125
  captured while a request is running and can be used to show progress or to evaluate the path a workflow took to get
126
126
  a response.
127
127
  """
@@ -226,7 +226,7 @@ class IntermediateStepPayload(BaseModel):
226
226
 
227
227
  class IntermediateStep(BaseModel):
228
228
  """
229
- AIQIntermediateStep is a data model that represents an intermediate step in the AIQ Toolkit. Intermediate steps are
229
+ IntermediateStep is a data model that represents an intermediate step in the NAT. Intermediate steps are
230
230
  captured while a request is running and can be used to show progress or to evaluate the path a workflow took to get
231
231
  a response.
232
232
  """
@@ -243,7 +243,7 @@ class IntermediateStep(BaseModel):
243
243
 
244
244
  function_ancestry: InvocationNode
245
245
  """
246
- The function ancestry for the current step showing the current AIQ function that was being executed when the step
246
+ The function ancestry for the current step showing the current NAT function that was being executed when the step
247
247
  was created.
248
248
  """
249
249
 
nat/embedder/register.py CHANGED
@@ -20,5 +20,3 @@
20
20
  # Import any providers which need to be automatically registered here
21
21
  from . import nim_embedder
22
22
  from . import openai_embedder
23
- # Import any clients which need to be automatically registered here
24
- from . import langchain_client
@@ -15,10 +15,12 @@
15
15
 
16
16
  import json
17
17
  import math
18
+ from pathlib import Path
18
19
 
19
20
  import pandas as pd
20
21
 
21
22
  from nat.data_models.dataset_handler import EvalDatasetConfig
23
+ from nat.data_models.dataset_handler import EvalDatasetCustomConfig
22
24
  from nat.data_models.dataset_handler import EvalDatasetJsonConfig
23
25
  from nat.data_models.intermediate_step import IntermediateStep
24
26
  from nat.data_models.intermediate_step import IntermediateStepType
@@ -38,7 +40,7 @@ class DatasetHandler:
38
40
  dataset_config: EvalDatasetConfig,
39
41
  reps: int,
40
42
  concurrency: int,
41
- num_passes: int | None = None,
43
+ num_passes: int = 1,
42
44
  adjust_dataset_size: bool = False):
43
45
  from nat.eval.intermediate_step_adapter import IntermediateStepAdapter
44
46
 
@@ -184,6 +186,10 @@ class DatasetHandler:
184
186
  # if a dataset file has been provided in the command line, use that
185
187
  dataset_config = EvalDatasetJsonConfig(file_path=dataset) if dataset else self.dataset_config
186
188
 
189
+ # Handle custom dataset type with special processing
190
+ if isinstance(self.dataset_config, EvalDatasetCustomConfig):
191
+ return self._handle_custom_dataset(dataset)
192
+
187
193
  # Download the dataset if it is remote
188
194
  downloader = DatasetDownloader(dataset_config=dataset_config)
189
195
  downloader.download_dataset()
@@ -192,6 +198,19 @@ class DatasetHandler:
192
198
  # Parse the dataset into a DataFrame
193
199
  input_df = parser(dataset_config.file_path, **kwargs)
194
200
 
201
+ # Apply standard preprocessing and convert to EvalInput
202
+ return self._preprocess_eval_dataframe(input_df)
203
+
204
+ def _preprocess_dataframe(self, input_df: pd.DataFrame) -> pd.DataFrame:
205
+ """
206
+ Apply standard preprocessing to a DataFrame: filters, deduplication, repetitions, and size adjustment.
207
+
208
+ Args:
209
+ input_df: DataFrame to preprocess
210
+
211
+ Returns:
212
+ Preprocessed DataFrame
213
+ """
195
214
  # Apply filters and deduplicate
196
215
  input_df = self.dataset_filter.apply_filters(input_df)
197
216
  input_df.drop_duplicates(subset=[self.dataset_config.id_key], inplace=True)
@@ -205,12 +224,104 @@ class DatasetHandler:
205
224
  elif self.adjust_dataset_size:
206
225
  input_df = self.adjust_dataset(input_df)
207
226
 
208
- # Convert the DataFrame to a list of EvalInput objects
209
- return self.get_eval_input_from_df(input_df)
227
+ return input_df
228
+
229
+ def _preprocess_eval_dataframe(self, input_df: pd.DataFrame) -> EvalInput:
230
+ """
231
+ Apply standard preprocessing to a DataFrame and convert to EvalInput.
232
+
233
+ Args:
234
+ input_df: DataFrame to preprocess
235
+
236
+ Returns:
237
+ Preprocessed EvalInput object
238
+ """
239
+ processed_df = self._preprocess_dataframe(input_df)
240
+ return self.get_eval_input_from_df(processed_df)
241
+
242
+ def _preprocess_eval_input(self, eval_input: EvalInput) -> EvalInput:
243
+ """
244
+ Apply standard preprocessing to an EvalInput object.
245
+
246
+ Thin wrapper that converts EvalInput to DataFrame, processes it, and converts back.
247
+
248
+ Args:
249
+ eval_input: EvalInput object to preprocess
250
+
251
+ Returns:
252
+ Preprocessed EvalInput object
253
+ """
254
+ if not eval_input.eval_input_items:
255
+ return eval_input
256
+
257
+ input_df = self._eval_input_to_dataframe(eval_input)
258
+ return self._preprocess_eval_dataframe(input_df)
259
+
260
+ def _handle_custom_dataset(self, dataset: str | None) -> EvalInput:
261
+ """
262
+ Handle custom dataset type by calling the user-defined function
263
+ and applying standard preprocessing to the result.
264
+
265
+ Args:
266
+ dataset: Optional dataset file path from command line
267
+
268
+ Returns:
269
+ Preprocessed EvalInput object
270
+ """
271
+ # Determine input path - use command line dataset or config file_path
272
+ input_path = Path(dataset) if dataset else Path(self.dataset_config.file_path)
273
+
274
+ # Download the dataset if it is remote (for custom datasets too)
275
+ downloader = DatasetDownloader(dataset_config=self.dataset_config)
276
+ downloader.download_dataset()
277
+
278
+ # Load and call custom function
279
+ custom_function, kwargs = self.dataset_config.parser()
280
+
281
+ try:
282
+ # Call the custom function with file_path and kwargs
283
+ eval_input = custom_function(file_path=input_path, **kwargs)
284
+
285
+ if not isinstance(eval_input, EvalInput):
286
+ raise ValueError(f"Custom function must return an EvalInput object, "
287
+ f"but returned {type(eval_input)}")
288
+
289
+ except Exception as e:
290
+ raise RuntimeError(f"Error calling custom dataset function: {e}") from e
291
+
292
+ # Apply standard preprocessing (filters, deduplication, repetitions)
293
+ return self._preprocess_eval_input(eval_input)
294
+
295
+ def _eval_input_to_dataframe(self, eval_input: EvalInput) -> pd.DataFrame:
296
+ """
297
+ Convert an EvalInput object to a pandas DataFrame for processing.
298
+
299
+ Args:
300
+ eval_input: EvalInput object to convert
301
+
302
+ Returns:
303
+ DataFrame representation of the EvalInput
304
+ """
305
+ data = []
306
+ for item in eval_input.eval_input_items:
307
+ row = item.full_dataset_entry.copy() if item.full_dataset_entry else {}
308
+
309
+ # Ensure key fields are present
310
+ row[self.id_key] = item.id
311
+ if self.is_structured_input():
312
+ row[self.question_key] = item.input_obj
313
+ row[self.answer_key] = item.expected_output_obj
314
+ row[self.generated_answer_key] = item.output_obj
315
+ row[self.trajectory_key] = item.trajectory
316
+ row[self.expected_trajectory_key] = item.expected_trajectory
317
+
318
+ data.append(row)
319
+
320
+ return pd.DataFrame(data)
210
321
 
211
322
  def filter_intermediate_steps(self,
212
323
  intermediate_steps: list[IntermediateStep],
213
- event_filter: list[IntermediateStepType] = None) -> list[dict]:
324
+ event_filter: list[IntermediateStepType] | None = None) -> list[dict]:
214
325
  """
215
326
  Filter out the intermediate steps that are not relevant for evaluation.
216
327
  The output is written with with the intention of re-running the evaluation using the original config file.
@@ -220,7 +331,9 @@ class DatasetHandler:
220
331
  filtered_steps = self.intermediate_step_adapter.filter_intermediate_steps(intermediate_steps, event_filter)
221
332
  return self.intermediate_step_adapter.serialize_intermediate_steps(filtered_steps)
222
333
 
223
- def publish_eval_input(self, eval_input, workflow_output_step_filter: list[IntermediateStepType] = None) -> str:
334
+ def publish_eval_input(self,
335
+ eval_input,
336
+ workflow_output_step_filter: list[IntermediateStepType] | None = None) -> str:
224
337
  """
225
338
  Convert the EvalInput object to a JSON output for storing in a file. Use the orginal keys to
226
339
  allow re-running evaluation using the orignal config file and '--skip_workflow' option.
nat/eval/evaluate.py CHANGED
@@ -397,7 +397,7 @@ class EvaluationRun: # pylint: disable=too-many-public-methods
397
397
  return self.eval_config.general.workflow_alias
398
398
 
399
399
  if not workflow_type or workflow_type == "EmptyFunctionConfig":
400
- return "aiqtoolkit-eval"
400
+ return "nat-eval"
401
401
 
402
402
  return workflow_type
403
403
 
@@ -24,9 +24,9 @@ class EvalInputItem(BaseModel):
24
24
  id: typing.Any
25
25
  input_obj: typing.Any
26
26
  expected_output_obj: typing.Any
27
- output_obj: typing.Any
28
- expected_trajectory: list[IntermediateStep]
29
- trajectory: list[IntermediateStep]
27
+ output_obj: typing.Any = None # populated by the workflow
28
+ expected_trajectory: list[IntermediateStep] = []
29
+ trajectory: list[IntermediateStep] = [] # populated by the workflow
30
30
  full_dataset_entry: typing.Any
31
31
 
32
32
 
@@ -99,7 +99,7 @@ class RAGEvaluator:
99
99
  return EvaluationDataset(samples=samples)
100
100
 
101
101
  def ragas_to_eval_output(self, eval_input: EvalInput, results_dataset: EvaluationResult | None) -> EvalOutput:
102
- """Converts the ragas EvaluationResult to aiq EvalOutput"""
102
+ """Converts the ragas EvaluationResult to nat EvalOutput"""
103
103
 
104
104
  if not results_dataset:
105
105
  logger.error("Ragas evaluation failed with no results")
@@ -123,7 +123,7 @@ class SweBenchEvaluator:
123
123
  for s in swebench_inputs if s not in supported_inputs})
124
124
 
125
125
  # Write SWEBenchInput to file
126
- workflow_input_file = self.output_dir / "aiq_workflow_input.json"
126
+ workflow_input_file = self.output_dir / "nat_workflow_input.json"
127
127
  workflow_input_file.parent.mkdir(parents=True, exist_ok=True)
128
128
  Path(workflow_input_file).write_text(json.dumps([swebench.model_dump() for swebench in supported_inputs],
129
129
  indent=2),
@@ -139,7 +139,7 @@ class SweBenchEvaluator:
139
139
  return None, None
140
140
 
141
141
  # Write SWEBenchOutput to file
142
- workflow_output_file = self.output_dir / "aiq_workflow_output.json"
142
+ workflow_output_file = self.output_dir / "nat_workflow_output.json"
143
143
  Path(workflow_output_file).write_text(json.dumps([output.model_dump() for output in filtered_outputs],
144
144
  indent=2),
145
145
  encoding="utf-8")
@@ -65,7 +65,7 @@ class MotivationAwareSummarization(StrategyBase):
65
65
  from langchain_core.prompts import PromptTemplate
66
66
  except ImportError:
67
67
  raise ImportError("langchain-core is required for MultiQueryRetrievalSearch. "
68
- "Install aiqtoolkit-langchain or similar.")
68
+ "Install nvidia-nat-langchain or similar.")
69
69
 
70
70
  new_ttc_items: list[TTCItem] = []
71
71
 
@@ -35,7 +35,7 @@ logger = logging.getLogger(__name__)
35
35
 
36
36
  class PlanSelectExecuteFunctionConfig(FunctionBaseConfig, name="plan_select_execute_function"):
37
37
  """
38
- Defines an aiqtoolkit function that performs reasoning on the input data.
38
+ Defines a NAT function that performs reasoning on the input data.
39
39
  Output is passed to the next function in the workflow.
40
40
 
41
41
  Designed to be used with an InterceptingFunction.
@@ -83,7 +83,7 @@ async def plan_select_execute_function(config: PlanSelectExecuteFunctionConfig,
83
83
  from langchain_core.prompts import PromptTemplate
84
84
  except ImportError:
85
85
  raise ImportError("langchain-core is not installed. Please install it to use SingleShotMultiPlanPlanner.\n"
86
- "This error can be resolved by installing aiqtoolkit-langchain.")
86
+ "This error can be resolved by installing nvidia-nat-langchain.")
87
87
 
88
88
  # Get the augmented function's description
89
89
  augmented_function = builder.get_function(config.augmented_fn)
@@ -159,7 +159,7 @@ async def plan_select_execute_function(config: PlanSelectExecuteFunctionConfig,
159
159
  Perform reasoning on the input text.
160
160
 
161
161
  Args:
162
- input_message (AIQChatRequest): The input text to reason on.
162
+ input_message (ChatRequest): The input text to reason on.
163
163
  """
164
164
 
165
165
  input_text = "".join([str(message.model_dump()) + "\n" for message in input_message.messages])
@@ -192,7 +192,7 @@ async def plan_select_execute_function(config: PlanSelectExecuteFunctionConfig,
192
192
  Perform reasoning on the input text.
193
193
 
194
194
  Args:
195
- input_message (AIQChatRequest): The input text to reason on.
195
+ input_message (ChatRequest): The input text to reason on.
196
196
  """
197
197
 
198
198
  input_text = "".join([str(message.model_dump()) + "\n" for message in input_message.messages])
@@ -78,7 +78,7 @@ async def register_ttc_tool_wrapper_function(
78
78
  from langchain_core.prompts import PromptTemplate
79
79
  except ImportError:
80
80
  raise ImportError("langchain-core is not installed. Please install it to use SingleShotMultiPlanPlanner.\n"
81
- "This error can be resolved by installing aiqtoolkit-langchain.")
81
+ "This error can be resolved by installing nvidia-nat-langchain.")
82
82
 
83
83
  augmented_function: Function = builder.get_function(config.augmented_fn)
84
84
  input_llm: BaseChatModel = await builder.get_llm(config.input_llm, wrapper_type=LLMFrameworkEnum.LANGCHAIN)
@@ -67,7 +67,7 @@ class LLMBasedAgentScorer(StrategyBase):
67
67
  from langchain_core.prompts import PromptTemplate
68
68
  except ImportError:
69
69
  raise ImportError("langchain-core is not installed. Please install it to use SingleShotMultiPlanPlanner.\n"
70
- "This error can be resolved by installing aiqtoolkit-langchain.")
70
+ "This error can be resolved by installing nvidia-nat-langchain.")
71
71
 
72
72
  if not isinstance(self.llm_bound, BaseChatModel):
73
73
  raise ValueError("The `scoring_llm` must be an instance of `BaseChatModel`.")
@@ -67,7 +67,7 @@ class LLMBasedPlanScorer(StrategyBase):
67
67
  from langchain_core.prompts import PromptTemplate
68
68
  except ImportError:
69
69
  raise ImportError("langchain-core is not installed. Please install it to use SingleShotMultiPlanPlanner.\n"
70
- "This error can be resolved by installing aiqtoolkit-langchain.")
70
+ "This error can be resolved by installing nvidia-nat-langchain.")
71
71
 
72
72
  if not isinstance(self.llm_bound, BaseChatModel):
73
73
  raise ValueError("The `scoring_llm` must be an instance of `BaseChatModel`.")
@@ -96,7 +96,7 @@ class MultiLLMPlanner(StrategyBase):
96
96
  from langchain_core.prompts import PromptTemplate
97
97
  except ImportError:
98
98
  raise ImportError("langchain-core is not installed. Please install it to use MultiLLMPlanner.\n"
99
- "This error can be resolve by installing aiqtoolkit-langchain.")
99
+ "This error can be resolve by installing nvidia-nat-langchain.")
100
100
 
101
101
  # Create a single PromptTemplate
102
102
  planning_template = PromptTemplate(template=self.config.planning_template,
@@ -68,7 +68,7 @@ class MultiQueryRetrievalSearch(StrategyBase):
68
68
  from langchain_core.prompts import PromptTemplate
69
69
  except ImportError:
70
70
  raise ImportError("langchain-core is required for MultiQueryRetrievalSearch. "
71
- "Install aiqtoolkit-langchain or similar.")
71
+ "Install nvidia-nat-langchain or similar.")
72
72
 
73
73
  new_ttc_items: list[TTCItem] = []
74
74
 
@@ -64,7 +64,7 @@ class SingleShotMultiPlanPlanner(StrategyBase):
64
64
  from langchain_core.prompts import PromptTemplate
65
65
  except ImportError:
66
66
  raise ImportError("langchain-core is not installed. Please install it to use SingleShotMultiPlanPlanner.\n"
67
- "This error can be resolve by installing aiqtoolkit-langchain.")
67
+ "This error can be resolve by installing nvidia-nat-langchain.")
68
68
 
69
69
  planning_template = PromptTemplate(template=self.config.planning_template,
70
70
  input_variables=["context", "prompt"],
@@ -70,7 +70,7 @@ class LLMBasedAgentOutputSelector(StrategyBase):
70
70
  from langchain_core.prompts import PromptTemplate
71
71
  except ImportError:
72
72
  raise ImportError("langchain-core is not installed. Please install it to use SingleShotMultiPlanPlanner.\n"
73
- "This error can be resolved by installing aiqtoolkit-langchain.")
73
+ "This error can be resolved by installing nvidia-nat-langchain.")
74
74
 
75
75
  from pydantic import BaseModel
76
76
 
@@ -69,7 +69,7 @@ class LLMBasedOutputMergingSelector(StrategyBase):
69
69
  from langchain_core.prompts import PromptTemplate
70
70
  except ImportError:
71
71
  raise ImportError("langchain-core is not installed. Please install it to use SingleShotMultiPlanPlanner.\n"
72
- "This error can be resolved by installing aiqtoolkit-langchain.")
72
+ "This error can be resolved by installing nvidia-nat-langchain.")
73
73
 
74
74
  from typing import Callable
75
75
 
@@ -70,7 +70,7 @@ class LLMBasedPlanSelector(StrategyBase):
70
70
  from langchain_core.prompts import PromptTemplate
71
71
  except ImportError:
72
72
  raise ImportError("langchain-core is not installed. Please install it to use SingleShotMultiPlanPlanner.\n"
73
- "This error can be resolved by installing aiqtoolkit-langchain.")
73
+ "This error can be resolved by installing nvidia-nat-langchain.")
74
74
 
75
75
  if not isinstance(self.llm_bound, BaseChatModel):
76
76
  raise ValueError("The `selection_llm` must be an instance of `BaseChatModel`.")
@@ -22,7 +22,7 @@ from nat.data_models.front_end import FrontEndBaseConfig
22
22
 
23
23
  class ConsoleFrontEndConfig(FrontEndBaseConfig, name="console"):
24
24
  """
25
- A front end that allows an AIQ Toolkit workflow to be run from the console.
25
+ A front end that allows a NAT workflow to be run from the console.
26
26
  """
27
27
 
28
28
  input_query: list[str] | None = Field(default=None,
@@ -119,7 +119,7 @@ class AsyncGenerationStatusResponse(BaseAsyncStatusResponse):
119
119
 
120
120
  class FastApiFrontEndConfig(FrontEndBaseConfig, name="fastapi"):
121
121
  """
122
- A FastAPI based front end that allows an NAT workflow to be served as a microservice.
122
+ A FastAPI based front end that allows a NAT workflow to be served as a microservice.
123
123
  """
124
124
 
125
125
  class EndpointBase(BaseModel):
@@ -192,7 +192,7 @@ class FastApiFrontEndConfig(FrontEndBaseConfig, name="fastapi"):
192
192
  websocket_path="/websocket",
193
193
  openai_api_path="/chat",
194
194
  openai_api_v1_path="/v1/chat/completions",
195
- description="Executes the default AIQ Toolkit workflow from the loaded configuration ",
195
+ description="Executes the default NAT workflow from the loaded configuration ",
196
196
  )
197
197
 
198
198
  evaluate: typing.Annotated[EndpointBase, Field(description="Endpoint for evaluating workflows.")] = EndpointBase(
@@ -207,9 +207,8 @@ class FastApiFrontEndConfig(FrontEndBaseConfig, name="fastapi"):
207
207
 
208
208
  endpoints: list[Endpoint] = Field(
209
209
  default_factory=list,
210
- description=(
211
- "Additional endpoints to add to the FastAPI app which run functions within the AIQ Toolkit configuration. "
212
- "Each endpoint must have a unique path."))
210
+ description=("Additional endpoints to add to the FastAPI app which run functions within the NAT configuration. "
211
+ "Each endpoint must have a unique path."))
213
212
 
214
213
  cors: CrossOriginResourceSharing = Field(
215
214
  default_factory=CrossOriginResourceSharing,
@@ -221,8 +220,8 @@ class FastApiFrontEndConfig(FrontEndBaseConfig, name="fastapi"):
221
220
  )
222
221
  runner_class: str | None = Field(
223
222
  default=None,
224
- description=("The AIQ Toolkit runner class to use when launching the FastAPI app from multiple processes. "
225
- "Each runner is responsible for loading and running the AIQ Toolkit workflow. "
223
+ description=("The NAT runner class to use when launching the FastAPI app from multiple processes. "
224
+ "Each runner is responsible for loading and running the NAT workflow. "
226
225
  "Note: This is different from the worker class used by Gunicorn."),
227
226
  )
228
227