nvidia-nat 1.3.0a20250827__py3-none-any.whl → 1.3.0a20250829__py3-none-any.whl

This diff represents the content of publicly available package versions that have been released to one of the supported registries. The information contained in this diff is provided for informational purposes only and reflects changes between package versions as they appear in their respective public registries.
Files changed (87) hide show
  1. nat/agent/base.py +12 -7
  2. nat/agent/dual_node.py +7 -2
  3. nat/agent/react_agent/agent.py +15 -14
  4. nat/agent/react_agent/register.py +5 -1
  5. nat/agent/rewoo_agent/agent.py +23 -32
  6. nat/agent/rewoo_agent/register.py +8 -4
  7. nat/agent/tool_calling_agent/agent.py +15 -20
  8. nat/agent/tool_calling_agent/register.py +6 -2
  9. nat/builder/context.py +7 -2
  10. nat/builder/eval_builder.py +2 -2
  11. nat/builder/function.py +8 -8
  12. nat/builder/workflow_builder.py +21 -24
  13. nat/cli/cli_utils/config_override.py +1 -1
  14. nat/cli/commands/info/list_channels.py +1 -1
  15. nat/cli/commands/object_store/__init__.py +14 -0
  16. nat/cli/commands/object_store/object_store.py +227 -0
  17. nat/cli/commands/registry/publish.py +2 -2
  18. nat/cli/commands/registry/pull.py +2 -2
  19. nat/cli/commands/registry/remove.py +2 -2
  20. nat/cli/commands/registry/search.py +1 -1
  21. nat/cli/commands/start.py +1 -1
  22. nat/cli/commands/uninstall.py +1 -1
  23. nat/cli/commands/workflow/workflow_commands.py +4 -4
  24. nat/cli/entrypoint.py +3 -1
  25. nat/data_models/discovery_metadata.py +4 -4
  26. nat/data_models/gated_field_mixin.py +12 -14
  27. nat/data_models/temperature_mixin.py +1 -1
  28. nat/data_models/thinking_mixin.py +68 -0
  29. nat/data_models/top_p_mixin.py +1 -1
  30. nat/eval/evaluate.py +6 -6
  31. nat/eval/intermediate_step_adapter.py +1 -1
  32. nat/eval/rag_evaluator/evaluate.py +2 -2
  33. nat/eval/rag_evaluator/register.py +1 -1
  34. nat/eval/remote_workflow.py +3 -3
  35. nat/eval/swe_bench_evaluator/evaluate.py +5 -5
  36. nat/eval/trajectory_evaluator/evaluate.py +1 -1
  37. nat/eval/tunable_rag_evaluator/evaluate.py +3 -3
  38. nat/experimental/test_time_compute/functions/ttc_tool_orchestration_function.py +2 -2
  39. nat/front_ends/fastapi/fastapi_front_end_controller.py +4 -4
  40. nat/front_ends/fastapi/fastapi_front_end_plugin.py +1 -1
  41. nat/front_ends/fastapi/fastapi_front_end_plugin_worker.py +3 -3
  42. nat/front_ends/fastapi/message_handler.py +2 -2
  43. nat/front_ends/fastapi/message_validator.py +8 -10
  44. nat/front_ends/fastapi/response_helpers.py +4 -4
  45. nat/front_ends/fastapi/step_adaptor.py +1 -1
  46. nat/llm/aws_bedrock_llm.py +10 -9
  47. nat/llm/azure_openai_llm.py +9 -1
  48. nat/llm/nim_llm.py +2 -1
  49. nat/llm/openai_llm.py +2 -1
  50. nat/llm/utils/thinking.py +215 -0
  51. nat/observability/exporter/base_exporter.py +1 -1
  52. nat/observability/exporter/processing_exporter.py +8 -9
  53. nat/observability/exporter_manager.py +5 -5
  54. nat/observability/mixin/file_mixin.py +7 -7
  55. nat/observability/processor/batching_processor.py +4 -6
  56. nat/observability/processor/falsy_batch_filter_processor.py +55 -0
  57. nat/observability/processor/processor_factory.py +70 -0
  58. nat/profiler/calc/calc_runner.py +3 -4
  59. nat/profiler/callbacks/agno_callback_handler.py +1 -1
  60. nat/profiler/callbacks/langchain_callback_handler.py +5 -5
  61. nat/profiler/callbacks/llama_index_callback_handler.py +3 -3
  62. nat/profiler/callbacks/semantic_kernel_callback_handler.py +2 -2
  63. nat/profiler/decorators/function_tracking.py +125 -0
  64. nat/profiler/profile_runner.py +1 -1
  65. nat/profiler/utils.py +1 -1
  66. nat/registry_handlers/local/local_handler.py +2 -2
  67. nat/registry_handlers/package_utils.py +1 -1
  68. nat/registry_handlers/pypi/pypi_handler.py +3 -3
  69. nat/registry_handlers/rest/rest_handler.py +4 -4
  70. nat/retriever/milvus/retriever.py +1 -1
  71. nat/retriever/nemo_retriever/retriever.py +1 -1
  72. nat/runtime/loader.py +1 -1
  73. nat/runtime/runner.py +2 -2
  74. nat/settings/global_settings.py +1 -1
  75. nat/tool/code_execution/local_sandbox/local_sandbox_server.py +1 -1
  76. nat/tool/nvidia_rag.py +1 -1
  77. nat/tool/retriever.py +3 -2
  78. nat/utils/io/yaml_tools.py +1 -1
  79. nat/utils/reactive/observer.py +2 -2
  80. nat/utils/settings/global_settings.py +2 -2
  81. {nvidia_nat-1.3.0a20250827.dist-info → nvidia_nat-1.3.0a20250829.dist-info}/METADATA +3 -1
  82. {nvidia_nat-1.3.0a20250827.dist-info → nvidia_nat-1.3.0a20250829.dist-info}/RECORD +87 -81
  83. {nvidia_nat-1.3.0a20250827.dist-info → nvidia_nat-1.3.0a20250829.dist-info}/WHEEL +0 -0
  84. {nvidia_nat-1.3.0a20250827.dist-info → nvidia_nat-1.3.0a20250829.dist-info}/entry_points.txt +0 -0
  85. {nvidia_nat-1.3.0a20250827.dist-info → nvidia_nat-1.3.0a20250829.dist-info}/licenses/LICENSE-3rd-party.txt +0 -0
  86. {nvidia_nat-1.3.0a20250827.dist-info → nvidia_nat-1.3.0a20250829.dist-info}/licenses/LICENSE.md +0 -0
  87. {nvidia_nat-1.3.0a20250827.dist-info → nvidia_nat-1.3.0a20250829.dist-info}/top_level.txt +0 -0
@@ -0,0 +1,215 @@
1
+ # SPDX-FileCopyrightText: Copyright (c) 2025, NVIDIA CORPORATION & AFFILIATES. All rights reserved.
2
+ # SPDX-License-Identifier: Apache-2.0
3
+ #
4
+ # Licensed under the Apache License, Version 2.0 (the "License");
5
+ # you may not use this file except in compliance with the License.
6
+ # You may obtain a copy of the License at
7
+ #
8
+ # http://www.apache.org/licenses/LICENSE-2.0
9
+ #
10
+ # Unless required by applicable law or agreed to in writing, software
11
+ # distributed under the License is distributed on an "AS IS" BASIS,
12
+ # WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
13
+ # See the License for the specific language governing permissions and
14
+ # limitations under the License.
15
+
16
+ import functools
17
+ import inspect
18
+ import logging
19
+ import types
20
+ from abc import abstractmethod
21
+ from collections.abc import AsyncGenerator
22
+ from collections.abc import Iterable
23
+ from dataclasses import dataclass
24
+ from typing import Any
25
+ from typing import Callable
26
+ from typing import TypeVar
27
+
28
+ ModelType = TypeVar("ModelType")
29
+ MessagesType = TypeVar("MessagesType")
30
+
31
+ logger = logging.getLogger(__name__)
32
+
33
+
34
+ class FunctionArgumentWrapper:
35
+ """
36
+ Wrapper for the arguments and keyword arguments of a function.
37
+
38
+ The arguments and keyword arguments are stored in the args and kwargs attributes, respectively.
39
+ """
40
+
41
+ def __init__(self, *args: Any, **kwargs: Any):
42
+ """
43
+ Initialize the FunctionArgumentWrapper.
44
+
45
+ Args:
46
+ args: The arguments to the function.
47
+ kwargs: The keyword arguments to the function.
48
+ """
49
+ self.args = args
50
+ self.kwargs = kwargs
51
+
52
+ def __repr__(self) -> str:
53
+ return f"FunctionArgumentWrapper(args={self.args}, kwargs={self.kwargs})"
54
+
55
+
56
+ @dataclass
57
+ class BaseThinkingInjector:
58
+ """
59
+ Base class for thinking injectors.
60
+
61
+ Args:
62
+ system_prompt: The system prompt to inject.
63
+ function_names: The function names to inject the system prompt into.
64
+ """
65
+
66
+ system_prompt: str
67
+ function_names: list[str]
68
+
69
+ @abstractmethod
70
+ def inject(self, *args, **kwargs) -> FunctionArgumentWrapper:
71
+ """
72
+ Inject the system prompt into the arguments.
73
+
74
+ Args:
75
+ args: The arguments to inject the system prompt into.
76
+ kwargs: The keyword arguments to inject the system prompt into.
77
+
78
+ Returns:
79
+ FunctionArgumentWrapper: An object that contains the transformed args and kwargs.
80
+ """
81
+ pass
82
+
83
+
84
+ def _make_thinking_decorator(injector: BaseThinkingInjector):
85
+
86
+ def decorate(fn: Callable[..., Any]) -> Callable[..., Any]:
87
+
88
+ async def _call_async(obj: object, *call_args, **call_kwargs) -> Any:
89
+ new_args = injector.inject(*call_args, **call_kwargs)
90
+ return await fn(obj, *new_args.args, **new_args.kwargs)
91
+
92
+ async def _agen(obj: object, *call_args, **call_kwargs) -> AsyncGenerator[Any, None]:
93
+ new_args = injector.inject(*call_args, **call_kwargs)
94
+ async for item in fn(obj, *new_args.args, **new_args.kwargs):
95
+ yield item
96
+
97
+ def _gen(obj: object, *call_args, **call_kwargs) -> Iterable[Any]:
98
+ new_args = injector.inject(*call_args, **call_kwargs)
99
+ yield from fn(obj, *new_args.args, **new_args.kwargs)
100
+ return
101
+
102
+ def _sync(obj: object, *call_args, **call_kwargs) -> Any:
103
+ new_args = injector.inject(*call_args, **call_kwargs)
104
+ return fn(obj, *new_args.args, **new_args.kwargs)
105
+
106
+ # Decide which wrapper to return
107
+ if inspect.iscoroutinefunction(fn):
108
+ wrapper = _call_async
109
+ elif inspect.isasyncgenfunction(fn):
110
+ wrapper = _agen
111
+ elif inspect.isgeneratorfunction(fn):
112
+ wrapper = _gen
113
+ else:
114
+ wrapper = _sync
115
+
116
+ return functools.wraps(fn)(wrapper)
117
+
118
+ return decorate
119
+
120
+
121
+ def patch_with_thinking(obj: ModelType, injector: BaseThinkingInjector) -> ModelType:
122
+ """
123
+ Patch the given object with a decorator that injects a system prompt into the supplied messages.
124
+ There is an assumption that the first non-object argument is the messages.
125
+
126
+ Args:
127
+ obj: The object to patch.
128
+ injector: The injector to use.
129
+
130
+ Returns:
131
+ The patched object.
132
+
133
+ Examples:
134
+ >>> from nat.llm.utils.thinking import BaseThinkingInjector
135
+ >>> from nat.llm.utils.thinking import FunctionArgumentWrapper
136
+ >>> from nat.llm.utils.thinking import patch_with_thinking
137
+ >>>
138
+ >>> class MockClass:
139
+ ... def sync_method(self, *args, **kwargs):
140
+ ... return (args, kwargs)
141
+ ...
142
+ >>> mock_obj_1 = MockClass()
143
+ >>> class AddThinking(BaseThinkingInjector):
144
+ ... def inject(self, x: str, *args, **kwargs) -> FunctionArgumentWrapper:
145
+ ... return FunctionArgumentWrapper(("thinking " + x), *args, **kwargs)
146
+ >>>
147
+ >>> patched_obj = patch_with_thinking(mock_obj_1, AddThinking(
148
+ ... system_prompt="thinking",
149
+ ... function_names=["sync_method"],
150
+ ... ))
151
+ >>> patched_obj.sync_method("test", 1, 2, 3, foo="bar")
152
+ (('thinking test', 1, 2, 3), {'foo': 'bar'})
153
+ >>>
154
+ >>> mock_obj_2 = MockClass()
155
+ >>> class AddThinkingWithArgs(BaseThinkingInjector):
156
+ ... def inject(self, *args, **kwargs) -> FunctionArgumentWrapper:
157
+ ... return FunctionArgumentWrapper("thinking", *args, **kwargs)
158
+ >>>
159
+ >>> patched_obj = patch_with_thinking(mock_obj_2, AddThinkingWithArgs(
160
+ ... system_prompt="thinking",
161
+ ... function_names=["sync_method"],
162
+ ... ))
163
+ >>> patched_obj.sync_method("test", 1, 2, 3, foo="bar")
164
+ (('thinking', 'test', 1, 2, 3), {'foo': 'bar'})
165
+ >>>
166
+ >>> mock_obj_3 = MockClass()
167
+ >>> class AddThinkingWithKwargs(BaseThinkingInjector):
168
+ ... def inject(self, *args, **kwargs) -> FunctionArgumentWrapper:
169
+ ... return FunctionArgumentWrapper(*args, thinking=True, **kwargs)
170
+ >>>
171
+ >>> patched_obj = patch_with_thinking(mock_obj_3, AddThinkingWithKwargs(
172
+ ... system_prompt="thinking",
173
+ ... function_names=["sync_method"],
174
+ ... ))
175
+ >>> patched_obj.sync_method("test", 1, 2, 3, foo="bar")
176
+ (('test', 1, 2, 3), {'thinking': True, 'foo': 'bar'})
177
+ """
178
+
179
+ decorator = _make_thinking_decorator(injector)
180
+
181
+ cls = obj if inspect.isclass(obj) else type(obj)
182
+ cls_name = getattr(cls, "__name__", str(cls))
183
+
184
+ for name, _ in inspect.getmembers(cls, callable):
185
+ if name not in injector.function_names:
186
+ continue
187
+
188
+ descriptor = inspect.getattr_static(cls, name)
189
+ original = descriptor.__func__ if isinstance(descriptor, types.MethodType) else descriptor
190
+ wrapped = decorator(original)
191
+
192
+ try: # instance‑level first
193
+ if not inspect.isclass(obj):
194
+ object.__setattr__(obj, name, types.MethodType(wrapped, obj))
195
+ continue
196
+ except Exception as exc:
197
+ logger.info(
198
+ "Instance‑level patch failed for %s.%s (%s); "
199
+ "falling back to class‑level patch.",
200
+ cls_name,
201
+ name,
202
+ exc,
203
+ )
204
+
205
+ try: # class‑level fallback
206
+ setattr(cls, name, wrapped)
207
+ except Exception as exc:
208
+ logger.info(
209
+ "Cannot patch method %s.%s with thinking: %s",
210
+ cls_name,
211
+ name,
212
+ exc,
213
+ )
214
+
215
+ return obj
@@ -375,7 +375,7 @@ class BaseExporter(Exporter):
375
375
  except asyncio.TimeoutError:
376
376
  logger.warning("%s: Some tasks did not complete within %s seconds", self.name, timeout)
377
377
  except Exception as e:
378
- logger.error("%s: Error while waiting for tasks: %s", self.name, e, exc_info=True)
378
+ logger.exception("%s: Error while waiting for tasks: %s", self.name, e)
379
379
 
380
380
  @override
381
381
  async def stop(self):
@@ -175,7 +175,7 @@ class ProcessingExporter(Generic[PipelineInputT, PipelineOutputT], BaseExporter,
175
175
  try:
176
176
  processed_item = await processor.process(processed_item)
177
177
  except Exception as e:
178
- logger.error("Error in processor %s: %s", processor.__class__.__name__, e, exc_info=True)
178
+ logger.exception("Error in processor %s: %s", processor.__class__.__name__, e)
179
179
  # Continue with unprocessed item rather than failing
180
180
  return processed_item
181
181
 
@@ -214,7 +214,7 @@ class ProcessingExporter(Generic[PipelineInputT, PipelineOutputT], BaseExporter,
214
214
  try:
215
215
  source_index = self._processors.index(source_processor)
216
216
  except ValueError:
217
- logger.error("Source processor %s not found in pipeline", source_processor.__class__.__name__)
217
+ logger.exception("Source processor %s not found in pipeline", source_processor.__class__.__name__)
218
218
  return
219
219
 
220
220
  # Process through remaining processors (skip the source processor)
@@ -225,10 +225,9 @@ class ProcessingExporter(Generic[PipelineInputT, PipelineOutputT], BaseExporter,
225
225
  await self._export_final_item(processed_item)
226
226
 
227
227
  except Exception as e:
228
- logger.error("Failed to continue pipeline processing after %s: %s",
229
- source_processor.__class__.__name__,
230
- e,
231
- exc_info=True)
228
+ logger.exception("Failed to continue pipeline processing after %s: %s",
229
+ source_processor.__class__.__name__,
230
+ e)
232
231
 
233
232
  async def _export_with_processing(self, item: PipelineInputT) -> None:
234
233
  """Export an item after processing it through the pipeline.
@@ -248,7 +247,7 @@ class ProcessingExporter(Generic[PipelineInputT, PipelineOutputT], BaseExporter,
248
247
  await self._export_final_item(final_item, raise_on_invalid=True)
249
248
 
250
249
  except Exception as e:
251
- logger.error("Failed to export item '%s': %s", item, e, exc_info=True)
250
+ logger.error("Failed to export item '%s': %s", item, e)
252
251
  raise
253
252
 
254
253
  @override
@@ -293,7 +292,7 @@ class ProcessingExporter(Generic[PipelineInputT, PipelineOutputT], BaseExporter,
293
292
  task.add_done_callback(self._tasks.discard)
294
293
 
295
294
  except Exception as e:
296
- logger.error("%s: Failed to create task: %s", self.name, e, exc_info=True)
295
+ logger.error("%s: Failed to create task: %s", self.name, e)
297
296
  raise
298
297
 
299
298
  @override
@@ -316,7 +315,7 @@ class ProcessingExporter(Generic[PipelineInputT, PipelineOutputT], BaseExporter,
316
315
  await asyncio.gather(*shutdown_tasks, return_exceptions=True)
317
316
  logger.debug("Successfully shut down %d processors", len(shutdown_tasks))
318
317
  except Exception as e:
319
- logger.error("Error shutting down processors: %s", e, exc_info=True)
318
+ logger.exception("Error shutting down processors: %s", e)
320
319
 
321
320
  # Call parent cleanup
322
321
  await super()._cleanup()
@@ -177,7 +177,7 @@ class ExporterManager:
177
177
  else:
178
178
  logger.debug("Skipping cleanup for non-isolated exporter '%s'", name)
179
179
  except Exception as e:
180
- logger.error("Error preparing cleanup for isolated exporter '%s': %s", name, e)
180
+ logger.exception("Error preparing cleanup for isolated exporter '%s': %s", name, e)
181
181
 
182
182
  if cleanup_tasks:
183
183
  # Run cleanup tasks concurrently with timeout
@@ -195,7 +195,7 @@ class ExporterManager:
195
195
  logger.debug("Stopping isolated exporter '%s'", name)
196
196
  await exporter.stop()
197
197
  except Exception as e:
198
- logger.error("Error stopping isolated exporter '%s': %s", name, e)
198
+ logger.exception("Error stopping isolated exporter '%s': %s", name, e)
199
199
 
200
200
  @asynccontextmanager
201
201
  async def start(self, context_state: ContextState | None = None):
@@ -251,7 +251,7 @@ class ExporterManager:
251
251
  try:
252
252
  await self._cleanup_isolated_exporters()
253
253
  except Exception as e:
254
- logger.error("Error during isolated exporter cleanup: %s", e)
254
+ logger.exception("Error during isolated exporter cleanup: %s", e)
255
255
 
256
256
  # Then stop the manager tasks
257
257
  await self.stop()
@@ -275,7 +275,7 @@ class ExporterManager:
275
275
  logger.info("Stopped exporter '%s'", name)
276
276
  raise
277
277
  except Exception as e:
278
- logger.error("Failed to run exporter '%s': %s", name, str(e), exc_info=True)
278
+ logger.error("Failed to run exporter '%s': %s", name, str(e))
279
279
  # Re-raise the exception to ensure it's properly handled
280
280
  raise
281
281
 
@@ -307,7 +307,7 @@ class ExporterManager:
307
307
  except asyncio.CancelledError:
308
308
  logger.debug("Exporter '%s' task cancelled", name)
309
309
  except Exception as e:
310
- logger.error("Failed to stop exporter '%s': %s", name, str(e))
310
+ logger.exception("Failed to stop exporter '%s': %s", name, str(e))
311
311
 
312
312
  if stuck_tasks:
313
313
  logger.warning("Exporters did not shut down in time: %s", ", ".join(stuck_tasks))
@@ -103,7 +103,7 @@ class FileExportMixin(ResourceConflictMixin):
103
103
  self._current_file_path.unlink()
104
104
  logger.info("Cleaned up existing file: %s", self._current_file_path)
105
105
  except OSError as e:
106
- logger.error("Error removing existing file %s: %s", self._current_file_path, e)
106
+ logger.exception("Error removing existing file %s: %s", self._current_file_path, e)
107
107
 
108
108
  def _get_resource_identifiers(self) -> dict[str, Any]:
109
109
  """Return the file resources this instance will use.
@@ -154,10 +154,10 @@ class FileExportMixin(ResourceConflictMixin):
154
154
  old_file.unlink()
155
155
  logger.info("Cleaned up old log file during init: %s", old_file)
156
156
  except OSError as e:
157
- logger.error("Error removing old file %s: %s", old_file, e)
157
+ logger.exception("Error removing old file %s: %s", old_file, e)
158
158
 
159
159
  except Exception as e:
160
- logger.error("Error during initialization cleanup: %s", e)
160
+ logger.exception("Error during initialization cleanup: %s", e)
161
161
 
162
162
  async def _should_roll_file(self) -> bool:
163
163
  """Check if the current file should be rolled based on size."""
@@ -191,7 +191,7 @@ class FileExportMixin(ResourceConflictMixin):
191
191
  await self._cleanup_old_files()
192
192
 
193
193
  except OSError as e:
194
- logger.error("Error rolling file %s: %s", self._current_file_path, e)
194
+ logger.exception("Error rolling file %s: %s", self._current_file_path, e)
195
195
 
196
196
  async def _cleanup_old_files(self) -> None:
197
197
  """Remove old rolled files beyond the maximum count."""
@@ -209,10 +209,10 @@ class FileExportMixin(ResourceConflictMixin):
209
209
  old_file.unlink()
210
210
  logger.info("Cleaned up old log file: %s", old_file)
211
211
  except OSError as e:
212
- logger.error("Error removing old file %s: %s", old_file, e)
212
+ logger.exception("Error removing old file %s: %s", old_file, e)
213
213
 
214
214
  except Exception as e:
215
- logger.error("Error during cleanup: %s", e)
215
+ logger.exception("Error during cleanup: %s", e)
216
216
 
217
217
  async def export_processed(self, item: str | list[str]) -> None:
218
218
  """Export a processed string or list of strings.
@@ -248,7 +248,7 @@ class FileExportMixin(ResourceConflictMixin):
248
248
  await f.write("\n")
249
249
 
250
250
  except Exception as e:
251
- logger.error("Error exporting event: %s", e, exc_info=True)
251
+ logger.exception("Error exporting event: %s", e)
252
252
 
253
253
  def get_current_file_path(self) -> Path:
254
254
  """Get the current file path being written to.
@@ -193,14 +193,14 @@ class BatchingProcessor(CallbackProcessor[T, list[T]], Generic[T]):
193
193
  await self._done_callback(batch)
194
194
  logger.debug("Scheduled flush routed batch of %d items through pipeline", len(batch))
195
195
  except Exception as e:
196
- logger.error("Error routing scheduled batch through pipeline: %s", e, exc_info=True)
196
+ logger.exception("Error routing scheduled batch through pipeline: %s", e)
197
197
  else:
198
198
  logger.warning("Scheduled flush created batch of %d items but no pipeline callback set",
199
199
  len(batch))
200
200
  except asyncio.CancelledError:
201
201
  pass
202
202
  except Exception as e:
203
- logger.error("Error in scheduled flush: %s", e, exc_info=True)
203
+ logger.exception("Error in scheduled flush: %s", e)
204
204
 
205
205
  async def _create_batch(self) -> list[T]:
206
206
  """Create a batch from the current queue."""
@@ -271,9 +271,7 @@ class BatchingProcessor(CallbackProcessor[T, list[T]], Generic[T]):
271
271
  "Successfully flushed final batch of %d items through pipeline during shutdown",
272
272
  len(final_batch))
273
273
  except Exception as e:
274
- logger.error("Error routing final batch through pipeline during shutdown: %s",
275
- e,
276
- exc_info=True)
274
+ logger.exception("Error routing final batch through pipeline during shutdown: %s", e)
277
275
  else:
278
276
  logger.warning("Final batch of %d items created during shutdown but no pipeline callback set",
279
277
  len(final_batch))
@@ -285,7 +283,7 @@ class BatchingProcessor(CallbackProcessor[T, list[T]], Generic[T]):
285
283
  logger.debug("BatchingProcessor shutdown completed successfully")
286
284
 
287
285
  except Exception as e:
288
- logger.error("Error during BatchingProcessor shutdown: %s", e, exc_info=True)
286
+ logger.exception("Error during BatchingProcessor shutdown: %s", e)
289
287
  self._shutdown_complete = True
290
288
  self._shutdown_complete_event.set()
291
289
 
@@ -0,0 +1,55 @@
1
+ # SPDX-FileCopyrightText: Copyright (c) 2025, NVIDIA CORPORATION & AFFILIATES. All rights reserved.
2
+ # SPDX-License-Identifier: Apache-2.0
3
+ #
4
+ # Licensed under the Apache License, Version 2.0 (the "License");
5
+ # you may not use this file except in compliance with the License.
6
+ # You may obtain a copy of the License at
7
+ #
8
+ # http://www.apache.org/licenses/LICENSE-2.0
9
+ #
10
+ # Unless required by applicable law or agreed to in writing, software
11
+ # distributed under the License is distributed on an "AS IS" BASIS,
12
+ # WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
13
+ # See the License for the specific language governing permissions and
14
+ # limitations under the License.
15
+
16
+ import logging
17
+ from typing import TypeVar
18
+
19
+ from nat.observability.processor.processor import Processor
20
+ from nat.utils.type_utils import override
21
+
22
+ logger = logging.getLogger(__name__)
23
+
24
+ FalsyT = TypeVar("FalsyT")
25
+
26
+
27
+ class FalsyBatchFilterProcessor(Processor[list[FalsyT], list[FalsyT]]):
28
+ """Processor that filters out falsy items from a batch."""
29
+
30
+ @override
31
+ async def process(self, item: list[FalsyT]) -> list[FalsyT]:
32
+ """Filter out falsy items from a batch.
33
+
34
+ Args:
35
+ item (list[FalsyT]): The batch of items to filter.
36
+
37
+ Returns:
38
+ list[FalsyT]: The filtered batch.
39
+ """
40
+ return [i for i in item if i]
41
+
42
+
43
+ class DictBatchFilterProcessor(FalsyBatchFilterProcessor[dict]):
44
+ """Processor that filters out empty dict items from a batch."""
45
+ pass
46
+
47
+
48
+ class ListBatchFilterProcessor(FalsyBatchFilterProcessor[list]):
49
+ """Processor that filters out empty list items from a batch."""
50
+ pass
51
+
52
+
53
+ class SetBatchFilterProcessor(FalsyBatchFilterProcessor[set]):
54
+ """Processor that filters out empty set items from a batch."""
55
+ pass
@@ -0,0 +1,70 @@
1
+ # SPDX-FileCopyrightText: Copyright (c) 2025, NVIDIA CORPORATION & AFFILIATES. All rights reserved.
2
+ # SPDX-License-Identifier: Apache-2.0
3
+ #
4
+ # Licensed under the Apache License, Version 2.0 (the "License");
5
+ # you may not use this file except in compliance with the License.
6
+ # You may obtain a copy of the License at
7
+ #
8
+ # http://www.apache.org/licenses/LICENSE-2.0
9
+ #
10
+ # Unless required by applicable law or agreed to in writing, software
11
+ # distributed under the License is distributed on an "AS IS" BASIS,
12
+ # WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
13
+ # See the License for the specific language governing permissions and
14
+ # limitations under the License.
15
+
16
+ from typing import Any
17
+
18
+ from nat.observability.processor.processor import Processor
19
+
20
+
21
+ def processor_factory(processor_class: type, from_type: type[Any], to_type: type[Any]) -> type[Processor]:
22
+ """Create a concrete processor class from a processor class and types.
23
+
24
+ Args:
25
+ processor_class (type): The processor class to create a concrete instance of
26
+ from_type (type[Any]): The type of the input data
27
+ to_type (type[Any]): The type of the output data
28
+
29
+ Returns:
30
+ type[Processor]: The concrete processor class
31
+ """
32
+
33
+ class ConcreteProcessor(processor_class[from_type, to_type]): # type: ignore
34
+ pass
35
+
36
+ return ConcreteProcessor
37
+
38
+
39
+ def processor_factory_from_type(processor_class: type, from_type: type[Any]) -> type[Processor]:
40
+ """Create a concrete processor class from a processor class and input type.
41
+
42
+ Args:
43
+ processor_class (type): The processor class to create a concrete instance of
44
+ from_type (type[Any]): The type of the input data
45
+
46
+ Returns:
47
+ type[Processor]: The concrete processor class
48
+ """
49
+
50
+ class ConcreteProcessor(processor_class[from_type]): # type: ignore
51
+ pass
52
+
53
+ return ConcreteProcessor
54
+
55
+
56
+ def processor_factory_to_type(processor_class: type, to_type: type[Any]) -> type[Processor]:
57
+ """Create a concrete processor class from a processor class and output type.
58
+
59
+ Args:
60
+ processor_class (type): The processor class to create a concrete instance of
61
+ to_type (type[Any]): The type of the output data
62
+
63
+ Returns:
64
+ type[Processor]: The concrete processor class
65
+ """
66
+
67
+ class ConcreteProcessor(processor_class[to_type]): # type: ignore
68
+ pass
69
+
70
+ return ConcreteProcessor
@@ -442,7 +442,7 @@ class CalcRunner:
442
442
  runtime_fit=self.linear_analyzer.wf_runtime_fit # May be None
443
443
  )
444
444
  except Exception as e:
445
- logger.exception("Failed to plot concurrency vs. time metrics: %s", e, exc_info=True)
445
+ logger.exception("Failed to plot concurrency vs. time metrics: %s", e)
446
446
  logger.warning("Skipping plot of concurrency vs. time metrics")
447
447
 
448
448
  def write_output(self, output_dir: Path, calc_runner_output: CalcRunnerOutput):
@@ -506,11 +506,10 @@ class CalcRunner:
506
506
  continue
507
507
  try:
508
508
  calc_output = CalcRunnerOutput.model_validate_json(calc_runner_output_path.read_text())
509
- except ValidationError as e:
509
+ except ValidationError:
510
510
  logger.exception("Failed to validate calc runner output file %s. Skipping job %s.",
511
511
  calc_runner_output_path,
512
- e,
513
- exc_info=True)
512
+ job_dir.name)
514
513
  continue
515
514
 
516
515
  # Extract sizing metrics from calc_data
@@ -144,7 +144,7 @@ class AgnoProfilerHandler(BaseProfilerCallback):
144
144
  return result
145
145
 
146
146
  except Exception as e:
147
- logger.exception("Tool execution error: %s", e)
147
+ logger.error("Tool execution error: %s", e)
148
148
  raise
149
149
 
150
150
  return wrapped_tool_execute
@@ -106,7 +106,7 @@ class LangchainProfilerHandler(AsyncCallbackHandler, BaseProfilerCallback):
106
106
  try:
107
107
  model_name = kwargs.get("metadata")["ls_model_name"]
108
108
  except Exception as e:
109
- logger.exception("Error getting model name: %s", e, exc_info=True)
109
+ logger.exception("Error getting model name: %s", e)
110
110
 
111
111
  run_id = str(kwargs.get("run_id", str(uuid4())))
112
112
  self._run_id_to_model_name[run_id] = model_name
@@ -144,7 +144,7 @@ class LangchainProfilerHandler(AsyncCallbackHandler, BaseProfilerCallback):
144
144
  try:
145
145
  model_name = metadata["ls_model_name"] if metadata else kwargs.get("metadata")["ls_model_name"]
146
146
  except Exception as e:
147
- logger.exception("Error getting model name: %s", e, exc_info=True)
147
+ logger.exception("Error getting model name: %s", e)
148
148
 
149
149
  run_id = str(run_id)
150
150
  self._run_id_to_model_name[run_id] = model_name
@@ -173,13 +173,13 @@ class LangchainProfilerHandler(AsyncCallbackHandler, BaseProfilerCallback):
173
173
  try:
174
174
  model_name = self._run_id_to_model_name.get(str(kwargs.get("run_id", "")), "")
175
175
  except Exception as e:
176
- logger.exception("Error getting model name: %s", e, exc_info=True)
176
+ logger.exception("Error getting model name: %s", e)
177
177
 
178
178
  usage_metadata = {}
179
179
  try:
180
180
  usage_metadata = kwargs.get("chunk").message.usage_metadata if kwargs.get("chunk") else {}
181
181
  except Exception as e:
182
- logger.exception("Error getting usage metadata: %s", e, exc_info=True)
182
+ logger.exception("Error getting usage metadata: %s", e)
183
183
 
184
184
  stats = IntermediateStepPayload(
185
185
  event_type=IntermediateStepType.LLM_NEW_TOKEN,
@@ -206,7 +206,7 @@ class LangchainProfilerHandler(AsyncCallbackHandler, BaseProfilerCallback):
206
206
  try:
207
207
  model_name = self._run_id_to_model_name.get(str(kwargs.get("run_id", "")), "")
208
208
  except Exception as e_inner:
209
- logger.exception("Error getting model name: %s from outer error %s", e_inner, e, exc_info=True)
209
+ logger.exception("Error getting model name: %s from outer error %s", e_inner, e)
210
210
 
211
211
  try:
212
212
  generation = response.generations[0][0]
@@ -94,7 +94,7 @@ class LlamaIndexProfilerHandler(BaseCallbackHandler, BaseProfilerCallback):
94
94
  try:
95
95
  model_name = payload.get(EventPayload.SERIALIZED)['model']
96
96
  except Exception as e:
97
- logger.exception("Error getting model name: %s", e, exc_info=True)
97
+ logger.exception("Error getting model name: %s", e)
98
98
 
99
99
  llm_text_input = " ".join(prompts_or_messages) if prompts_or_messages else ""
100
100
 
@@ -159,13 +159,13 @@ class LlamaIndexProfilerHandler(BaseCallbackHandler, BaseProfilerCallback):
159
159
  for block in response.message.blocks:
160
160
  llm_text_output += block.text
161
161
  except Exception as e:
162
- logger.exception("Error getting LLM text output: %s", e, exc_info=True)
162
+ logger.exception("Error getting LLM text output: %s", e)
163
163
 
164
164
  model_name = ""
165
165
  try:
166
166
  model_name = response.raw.model
167
167
  except Exception as e:
168
- logger.exception("Error getting model name: %s", e, exc_info=True)
168
+ logger.exception("Error getting model name: %s", e)
169
169
 
170
170
  # Append usage data to NAT usage stats
171
171
  with self._lock:
@@ -132,7 +132,7 @@ class SemanticKernelProfilerHandler(BaseProfilerCallback):
132
132
  if "text" in item:
133
133
  model_input += item["text"]
134
134
  except Exception as e:
135
- logger.exception("Error in getting model input: %s", e, exc_info=True)
135
+ logger.exception("Error in getting model input: %s", e)
136
136
 
137
137
  input_stats = IntermediateStepPayload(event_type=IntermediateStepType.LLM_START,
138
138
  framework=LLMFrameworkEnum.SEMANTIC_KERNEL,
@@ -232,7 +232,7 @@ class SemanticKernelProfilerHandler(BaseProfilerCallback):
232
232
  return result
233
233
 
234
234
  except Exception as e:
235
- logger.exception("ToolUsage._use error: %s", e)
235
+ logger.error("ToolUsage._use error: %s", e)
236
236
  raise
237
237
 
238
238
  return patched_tool_call